Lines Matching refs:smu

263 static int smu_v13_0_6_init_microcode(struct smu_context *smu)
269 struct amdgpu_device *adev = smu->adev;
304 smu->pptable_firmware.data =
307 smu->pptable_firmware.size =
313 if (smu->pptable_firmware.data && smu->pptable_firmware.size) {
316 ucode->fw = &smu->pptable_firmware;
327 static int smu_v13_0_6_tables_init(struct smu_context *smu)
329 struct smu_table_context *smu_table = &smu->smu_table;
331 struct amdgpu_device *adev = smu->adev;
371 static int smu_v13_0_6_allocate_dpm_context(struct smu_context *smu)
373 struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
384 static int smu_v13_0_6_init_smc_tables(struct smu_context *smu)
388 ret = smu_v13_0_6_tables_init(smu);
392 ret = smu_v13_0_6_allocate_dpm_context(smu);
397 static int smu_v13_0_6_get_allowed_feature_mask(struct smu_context *smu,
410 static int smu_v13_0_6_get_metrics_table(struct smu_context *smu,
413 struct smu_table_context *smu_table = &smu->smu_table;
421 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetMetricsTable, NULL);
423 dev_info(smu->adev->dev,
428 amdgpu_asic_invalidate_hdp(smu->adev, NULL);
440 static ssize_t smu_v13_0_6_get_pm_metrics(struct smu_context *smu,
443 struct smu_table_context *smu_tbl_ctxt = &smu->smu_table;
457 ret = smu_v13_0_6_get_metrics_table(smu, pm_metrics->data, true);
461 smu_cmn_get_smc_version(smu, NULL, &pmfw_version);
475 static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu)
477 struct smu_table_context *smu_table = &smu->smu_table;
482 struct amdgpu_device *adev = smu->adev;
489 ret = smu_v13_0_6_get_metrics_table(smu, NULL, true);
503 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetMetricsVersion,
541 static int smu_v13_0_6_get_dpm_ultimate_freq(struct smu_context *smu,
545 struct smu_table_context *smu_table = &smu->smu_table;
551 if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type)) {
594 smu, CMN2ASIC_MAPPING_CLK, clk_type);
605 smu, SMU_MSG_GetMaxGfxclkFrequency, max);
608 smu, SMU_MSG_GetMaxDpmFreq, param, max);
616 smu, SMU_MSG_GetMinGfxclkFrequency, min);
619 smu, SMU_MSG_GetMinDpmFreq, param, min);
626 static int smu_v13_0_6_get_dpm_level_count(struct smu_context *smu,
632 ret = smu_v13_0_get_dpm_freq_by_index(smu, clk_type, 0xff, levels);
639 static int smu_v13_0_6_set_default_dpm_table(struct smu_context *smu)
641 struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
642 struct smu_table_context *smu_table = &smu->smu_table;
666 smu_v13_0_6_setup_driver_pptable(smu);
670 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT)) {
674 ret = smu_v13_0_6_get_dpm_ultimate_freq(smu, SMU_GFXCLK,
697 if (smu_cmn_feature_is_enabled(smu, dpm_map[j].feature_num)) {
699 smu, dpm_map[j].clk_type, &levels);
718 static int smu_v13_0_6_setup_pptable(struct smu_context *smu)
720 struct smu_table_context *table_context = &smu->smu_table;
731 static int smu_v13_0_6_check_fw_status(struct smu_context *smu)
733 struct amdgpu_device *adev = smu->adev;
746 static int smu_v13_0_6_populate_umd_state_clk(struct smu_context *smu)
748 struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
755 struct smu_umd_pstate_table *pstate_table = &smu->pstate_table;
793 static int smu_v13_0_6_get_clk_table(struct smu_context *smu,
818 static uint32_t smu_v13_0_6_get_throttler_status(struct smu_context *smu)
820 struct smu_power_context *smu_power = &smu->smu_power;
825 dev_dbg(smu->adev->dev, "SMU Throttler status: %u", throttler_status);
830 static int smu_v13_0_6_get_smu_metrics_data(struct smu_context *smu,
834 struct smu_table_context *smu_table = &smu->smu_table;
837 struct amdgpu_device *adev = smu->adev;
841 ret = smu_v13_0_6_get_metrics_table(smu, NULL, false);
849 if (smu->smc_fw_version >= 0x552F00) {
905 static int smu_v13_0_6_get_current_clk_freq_by_table(struct smu_context *smu,
937 return smu_v13_0_6_get_smu_metrics_data(smu, member_type, value);
940 static int smu_v13_0_6_print_clks(struct smu_context *smu, char *buf, int size,
948 ret = smu_v13_0_6_get_clk_table(smu, &clocks, single_dpm_table);
950 dev_err(smu->adev->dev, "Attempt to get %s clk levels failed!",
991 static int smu_v13_0_6_print_clk_levels(struct smu_context *smu,
996 struct smu_umd_pstate_table *pstate_table = &smu->pstate_table;
998 struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
1019 ret = smu_v13_0_6_get_current_clk_freq_by_table(smu, SMU_GFXCLK,
1022 dev_err(smu->adev->dev,
1064 ret = smu_v13_0_6_get_current_clk_freq_by_table(smu, SMU_UCLK,
1067 dev_err(smu->adev->dev,
1074 return smu_v13_0_6_print_clks(smu, buf, size, single_dpm_table,
1078 ret = smu_v13_0_6_get_current_clk_freq_by_table(smu, SMU_SOCCLK,
1081 dev_err(smu->adev->dev,
1088 return smu_v13_0_6_print_clks(smu, buf, size, single_dpm_table,
1092 ret = smu_v13_0_6_get_current_clk_freq_by_table(smu, SMU_FCLK,
1095 dev_err(smu->adev->dev,
1102 return smu_v13_0_6_print_clks(smu, buf, size, single_dpm_table,
1106 ret = smu_v13_0_6_get_current_clk_freq_by_table(smu, SMU_VCLK,
1109 dev_err(smu->adev->dev,
1116 return smu_v13_0_6_print_clks(smu, buf, size, single_dpm_table,
1120 ret = smu_v13_0_6_get_current_clk_freq_by_table(smu, SMU_DCLK,
1123 dev_err(smu->adev->dev,
1130 return smu_v13_0_6_print_clks(smu, buf, size, single_dpm_table,
1140 static int smu_v13_0_6_upload_dpm_level(struct smu_context *smu, bool max,
1143 struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
1147 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT) &&
1151 smu,
1156 dev_err(smu->adev->dev,
1163 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT) &&
1168 smu,
1173 dev_err(smu->adev->dev,
1180 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT) &&
1184 smu,
1189 dev_err(smu->adev->dev,
1199 static int smu_v13_0_6_force_clk_levels(struct smu_context *smu,
1202 struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
1214 dev_err(smu->adev->dev,
1222 smu, false, FEATURE_MASK(FEATURE_DPM_GFXCLK),
1225 dev_err(smu->adev->dev,
1231 smu, true, FEATURE_MASK(FEATURE_DPM_GFXCLK),
1234 dev_err(smu->adev->dev,
1256 static int smu_v13_0_6_get_current_activity_percent(struct smu_context *smu,
1268 smu, METRICS_AVERAGE_GFXACTIVITY, value);
1272 smu, METRICS_AVERAGE_MEMACTIVITY, value);
1275 dev_err(smu->adev->dev,
1283 static int smu_v13_0_6_thermal_get_temperature(struct smu_context *smu,
1295 smu, METRICS_TEMPERATURE_HOTSPOT, value);
1299 smu, METRICS_TEMPERATURE_MEM, value);
1302 dev_err(smu->adev->dev, "Invalid sensor for retrieving temp\n");
1309 static int smu_v13_0_6_read_sensor(struct smu_context *smu,
1324 ret = smu_v13_0_6_get_current_activity_percent(smu, sensor,
1329 ret = smu_v13_0_6_get_smu_metrics_data(smu,
1336 ret = smu_v13_0_6_thermal_get_temperature(smu, sensor,
1342 smu, SMU_UCLK, (uint32_t *)data);
1349 smu, SMU_GFXCLK, (uint32_t *)data);
1354 ret = smu_v13_0_get_gfx_vdd(smu, (uint32_t *)data);
1366 static int smu_v13_0_6_get_power_limit(struct smu_context *smu,
1372 struct smu_table_context *smu_table = &smu->smu_table;
1378 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetPptLimit, &power_limit);
1381 dev_err(smu->adev->dev, "Couldn't get PPT limit");
1399 static int smu_v13_0_6_set_power_limit(struct smu_context *smu,
1403 return smu_v13_0_set_power_limit(smu, limit_type, limit);
1410 struct smu_context *smu = adev->powerplay.pp_handle;
1411 struct smu_power_context *smu_power = &smu->smu_power;
1433 atomic64_inc(&smu->throttle_int_counter);
1447 schedule_work(&smu->throttling_logging_work);
1500 static int smu_v13_0_6_register_irq_handler(struct smu_context *smu)
1502 struct amdgpu_device *adev = smu->adev;
1503 struct amdgpu_irq_src *irq_src = &smu->irq_source;
1521 static int smu_v13_0_6_notify_unload(struct smu_context *smu)
1523 if (amdgpu_in_reset(smu->adev))
1526 dev_dbg(smu->adev->dev, "Notify PMFW about driver unload");
1528 smu_cmn_send_smc_msg(smu, SMU_MSG_PrepareMp1ForUnload, NULL);
1533 static int smu_v13_0_6_mca_set_debug_mode(struct smu_context *smu, bool enable)
1535 /* NOTE: this ClearMcaOnRead message is only supported for smu version 85.72.0 or higher */
1536 if (smu->smc_fw_version < 0x554800)
1539 return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ClearMcaOnRead,
1544 static int smu_v13_0_6_system_features_control(struct smu_context *smu,
1547 struct amdgpu_device *adev = smu->adev;
1555 ret = smu_v13_0_system_features_control(smu, enable);
1558 smu_v13_0_6_notify_unload(smu);
1564 static int smu_v13_0_6_set_gfx_soft_freq_limited_range(struct smu_context *smu,
1570 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk,
1575 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinGfxclk,
1581 static int smu_v13_0_6_set_performance_level(struct smu_context *smu,
1584 struct smu_dpm_context *smu_dpm = &(smu->smu_dpm);
1590 struct smu_umd_pstate_table *pstate_table = &smu->pstate_table;
1596 smu_cmn_send_smc_msg(smu, SMU_MSG_DisableDeterminism, NULL);
1608 smu, gfx_table->min, gfx_table->max);
1619 smu, SMU_UCLK, 0, uclk_table->max);
1636 static int smu_v13_0_6_set_soft_freq_limited_range(struct smu_context *smu,
1640 struct smu_dpm_context *smu_dpm = &(smu->smu_dpm);
1642 struct smu_umd_pstate_table *pstate_table = &smu->pstate_table;
1643 struct amdgpu_device *adev = smu->adev;
1658 dev_err(smu->adev->dev,
1669 smu, min, max);
1681 ((smu->smc_fw_version < 0x556600) ||
1686 smu, SMU_UCLK, 0, max);
1707 ret = smu_v13_0_6_set_gfx_soft_freq_limited_range(smu, min_clk,
1712 smu, SMU_MSG_EnableDeterminism, max, NULL);
1727 static int smu_v13_0_6_usr_edit_dpm_table(struct smu_context *smu,
1731 struct smu_dpm_context *smu_dpm = &(smu->smu_dpm);
1733 struct smu_umd_pstate_table *pstate_table = &smu->pstate_table;
1746 dev_err(smu->adev->dev,
1754 smu->adev->dev,
1767 smu->adev->dev,
1783 dev_err(smu->adev->dev,
1788 if (!smu_cmn_feature_is_enabled(smu,
1790 dev_warn(smu->adev->dev,
1796 dev_info(smu->adev->dev,
1802 smu->adev->dev,
1817 dev_err(smu->adev->dev,
1826 smu, SMU_GFXCLK, min_clk, max_clk);
1834 smu, SMU_UCLK, min_clk, max_clk);
1842 dev_err(smu->adev->dev,
1858 smu, SMU_GFXCLK, min_clk, max_clk);
1869 smu, SMU_UCLK, min_clk, max_clk);
1879 static int smu_v13_0_6_get_enabled_mask(struct smu_context *smu,
1884 ret = smu_cmn_get_enabled_mask(smu, feature_mask);
1886 if (ret == -EIO && smu->smc_fw_version < 0x552F00) {
1894 static bool smu_v13_0_6_is_dpm_running(struct smu_context *smu)
1899 ret = smu_v13_0_6_get_enabled_mask(smu, &feature_enabled);
1907 static int smu_v13_0_6_request_i2c_xfer(struct smu_context *smu,
1910 struct smu_table_context *smu_table = &smu->smu_table;
1912 struct amdgpu_device *adev = smu->adev;
1924 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_RequestI2cTransaction,
1935 struct smu_context *smu = adev->powerplay.pp_handle;
1936 struct smu_table_context *smu_table = &smu->smu_table;
1986 r = smu_v13_0_6_request_i2c_xfer(smu, req);
2026 static int smu_v13_0_6_i2c_control_init(struct smu_context *smu)
2028 struct amdgpu_device *adev = smu->adev;
2066 static void smu_v13_0_6_i2c_control_fini(struct smu_context *smu)
2068 struct amdgpu_device *adev = smu->adev;
2081 static void smu_v13_0_6_get_unique_id(struct smu_context *smu)
2083 struct amdgpu_device *adev = smu->adev;
2084 struct smu_table_context *smu_table = &smu->smu_table;
2091 static int smu_v13_0_6_get_bamaco_support(struct smu_context *smu)
2106 static void smu_v13_0_6_log_thermal_throttling_event(struct smu_context *smu)
2109 struct amdgpu_device *adev = smu->adev;
2113 throttler_status = smu_v13_0_6_get_throttler_status(smu);
2139 smu->adev->kfd.dev,
2145 smu_v13_0_6_get_current_pcie_link_width_level(struct smu_context *smu)
2147 struct amdgpu_device *adev = smu->adev;
2153 static int smu_v13_0_6_get_current_pcie_link_speed(struct smu_context *smu)
2155 struct amdgpu_device *adev = smu->adev;
2173 static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table)
2175 struct smu_table_context *smu_table = &smu->smu_table;
2178 struct amdgpu_device *adev = smu->adev;
2185 ret = smu_v13_0_6_get_metrics_table(smu, metrics_x, true);
2242 /*Check smu version, PCIE link speed and width will be reported from pmfw metric
2243 * table for both pf & one vf for smu version 85.99.0 or higher else report only
2246 if (smu->smc_fw_version >= 0x556300) {
2251 link_width_level = smu_v13_0_6_get_current_pcie_link_width_level(smu);
2258 smu_v13_0_6_get_current_pcie_link_speed(smu);
2317 static void smu_v13_0_6_restore_pci_config(struct smu_context *smu)
2319 struct amdgpu_device *adev = smu->adev;
2328 static int smu_v13_0_6_mode2_reset(struct smu_context *smu)
2331 struct amdgpu_device *adev = smu->adev;
2334 index = smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG,
2337 mutex_lock(&smu->message_lock);
2339 ret = smu_cmn_send_msg_without_waiting(smu, (uint16_t)index,
2345 dev_dbg(smu->adev->dev, "restore config space...\n");
2361 smu_v13_0_6_restore_pci_config(smu);
2363 dev_dbg(smu->adev->dev, "wait for reset ack\n");
2365 ret = smu_cmn_wait_for_response(smu);
2379 mutex_unlock(&smu->message_lock);
2388 static int smu_v13_0_6_get_thermal_temperature_range(struct smu_context *smu,
2391 struct amdgpu_device *adev = smu->adev;
2396 if (amdgpu_sriov_vf(smu->adev))
2402 /*Check smu version, GetCtfLimit message only supported for smu version 85.69 or higher */
2403 if (smu->smc_fw_version < 0x554500)
2407 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetCTFLimit,
2412 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetCTFLimit,
2417 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetCTFLimit,
2425 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetCTFLimit,
2433 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetThermalLimit,
2442 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetThermalLimit,
2454 static int smu_v13_0_6_mode1_reset(struct smu_context *smu)
2456 struct amdgpu_device *adev = smu->adev;
2478 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset,
2487 static bool smu_v13_0_6_is_mode1_reset_supported(struct smu_context *smu)
2492 static bool smu_v13_0_6_is_mode2_reset_supported(struct smu_context *smu)
2497 static int smu_v13_0_6_smu_send_hbm_bad_page_num(struct smu_context *smu,
2504 smu, SMU_MSG_SetNumBadHbmPagesRetired, size, NULL);
2506 dev_err(smu->adev->dev,
2513 static int smu_v13_0_6_send_rma_reason(struct smu_context *smu)
2515 struct amdgpu_device *adev = smu->adev;
2519 if ((adev->flags & AMD_IS_APU) || smu->smc_fw_version < 0x00555a00)
2522 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_RmaDueToBadPageThreshold, NULL);
2524 dev_err(smu->adev->dev,
2533 struct smu_context *smu = adev->powerplay.pp_handle;
2535 return smu_v13_0_6_mca_set_debug_mode(smu, enable);
2538 static int smu_v13_0_6_get_valid_mca_count(struct smu_context *smu, enum amdgpu_mca_error_type type, uint32_t *count)
2557 ret = smu_cmn_send_smc_msg(smu, msg, count);
2566 static int __smu_v13_0_6_mca_dump_bank(struct smu_context *smu, enum amdgpu_mca_error_type type,
2584 return smu_cmn_send_smc_msg_with_param(smu, msg, param, val);
2587 static int smu_v13_0_6_mca_dump_bank(struct smu_context *smu, enum amdgpu_mca_error_type type,
2596 ret = __smu_v13_0_6_mca_dump_bank(smu, type, idx, offset + (i << 2), &val[i]);
2637 struct smu_context *smu = adev->powerplay.pp_handle;
2644 ret = smu_v13_0_6_mca_dump_bank(smu, type, idx, reg_idx * 8, data, ARRAY_SIZE(data));
2833 struct smu_context *smu = adev->powerplay.pp_handle;
2841 if (!(adev->flags & AMD_IS_APU) && smu->smc_fw_version >= 0x00555600) {
2903 struct smu_context *smu = adev->powerplay.pp_handle;
2909 ret = smu_v13_0_6_get_valid_mca_count(smu, type, count);
3024 struct smu_context *smu = adev->powerplay.pp_handle;
3026 return smu_v13_0_6_mca_set_debug_mode(smu, enable);
3029 static int smu_v13_0_6_get_valid_aca_count(struct smu_context *smu, enum aca_smu_type type, u32 *count)
3048 ret = smu_cmn_send_smc_msg(smu, msg, count);
3060 struct smu_context *smu = adev->powerplay.pp_handle;
3066 ret = smu_v13_0_6_get_valid_aca_count(smu, type, count);
3076 static int __smu_v13_0_6_aca_bank_dump(struct smu_context *smu, enum aca_smu_type type,
3094 return smu_cmn_send_smc_msg_with_param(smu, msg, param, (uint32_t *)val);
3097 static int smu_v13_0_6_aca_bank_dump(struct smu_context *smu, enum aca_smu_type type,
3106 ret = __smu_v13_0_6_aca_bank_dump(smu, type, idx, offset + (i << 2), &val[i]);
3117 struct smu_context *smu = adev->powerplay.pp_handle;
3124 ret = smu_v13_0_6_aca_bank_dump(smu, type, idx, reg_idx * 8, data, ARRAY_SIZE(data));
3172 static int smu_v13_0_6_select_xgmi_plpd_policy(struct smu_context *smu,
3175 struct amdgpu_device *adev = smu->adev;
3193 ret = smu_cmn_send_smc_msg_with_param(smu,
3198 ret = smu_cmn_send_smc_msg_with_param(smu,
3267 void smu_v13_0_6_set_ppt_funcs(struct smu_context *smu)
3269 smu->ppt_funcs = &smu_v13_0_6_ppt_funcs;
3270 smu->message_map = smu_v13_0_6_message_map;
3271 smu->clock_map = smu_v13_0_6_clk_map;
3272 smu->feature_map = smu_v13_0_6_feature_mask_map;
3273 smu->table_map = smu_v13_0_6_table_map;
3274 smu->smc_driver_if_version = SMU13_0_6_DRIVER_IF_VERSION;
3275 smu->smc_fw_caps |= SMU_FW_CAP_RAS_PRI;
3276 smu_v13_0_set_smu_mailbox_registers(smu);
3277 amdgpu_mca_smu_init_funcs(smu->adev, &smu_v13_0_6_mca_smu_funcs);
3278 amdgpu_aca_set_smu_funcs(smu->adev, &smu_v13_0_6_aca_smu_funcs);