Lines Matching defs:smu

66 static int smu_v11_0_send_msg_without_waiting(struct smu_context *smu,
69 struct amdgpu_device *adev = smu->adev;
74 int smu_v11_0_read_arg(struct smu_context *smu, uint32_t *arg)
76 struct amdgpu_device *adev = smu->adev;
82 static int smu_v11_0_wait_for_response(struct smu_context *smu)
84 struct amdgpu_device *adev = smu->adev;
100 smu_v11_0_send_msg_with_param(struct smu_context *smu,
104 struct amdgpu_device *adev = smu->adev;
107 index = smu_msg_get_index(smu, msg);
111 ret = smu_v11_0_wait_for_response(smu);
122 smu_v11_0_send_msg_without_waiting(smu, (uint16_t)index);
124 ret = smu_v11_0_wait_for_response(smu);
127 smu_get_message_name(smu, msg), index, param, ret);
132 int smu_v11_0_init_microcode(struct smu_context *smu)
134 struct amdgpu_device *adev = smu->adev;
194 int smu_v11_0_load_microcode(struct smu_context *smu)
196 struct amdgpu_device *adev = smu->adev;
232 int smu_v11_0_check_fw_status(struct smu_context *smu)
234 struct amdgpu_device *adev = smu->adev;
247 int smu_v11_0_check_fw_version(struct smu_context *smu)
254 ret = smu_get_smc_version(smu, &if_version, &smu_version);
262 switch (smu->adev->asic_type) {
264 smu->smc_if_version = SMU11_DRIVER_IF_VERSION_VG20;
267 smu->smc_if_version = SMU11_DRIVER_IF_VERSION_ARCT;
270 smu->smc_if_version = SMU11_DRIVER_IF_VERSION_NV10;
273 smu->smc_if_version = SMU11_DRIVER_IF_VERSION_NV14;
276 pr_err("smu unsupported asic type:%d.\n", smu->adev->asic_type);
277 smu->smc_if_version = SMU11_DRIVER_IF_VERSION_INV;
289 if (if_version != smu->smc_if_version) {
290 pr_info("smu driver if version = 0x%08x, smu fw if version = 0x%08x, "
291 "smu fw version = 0x%08x (%d.%d.%d)\n",
292 smu->smc_if_version, if_version,
300 static int smu_v11_0_set_pptable_v2_0(struct smu_context *smu, const void **table, uint32_t *size)
302 struct amdgpu_device *adev = smu->adev;
315 static int smu_v11_0_set_pptable_v2_1(struct smu_context *smu, const void **table,
318 struct amdgpu_device *adev = smu->adev;
342 int smu_v11_0_setup_pptable(struct smu_context *smu)
344 struct amdgpu_device *adev = smu->adev;
356 if (version_major == 2 && smu->smu_table.boot_values.pp_table_id > 0) {
357 pr_info("use driver provided pptable %d\n", smu->smu_table.boot_values.pp_table_id);
360 ret = smu_v11_0_set_pptable_v2_0(smu, &table, &size);
363 ret = smu_v11_0_set_pptable_v2_1(smu, &table, &size,
364 smu->smu_table.boot_values.pp_table_id);
378 ret = smu_get_atom_data_table(smu, index, &atom_table_size, &frev, &crev,
385 if (!smu->smu_table.power_play_table)
386 smu->smu_table.power_play_table = table;
387 if (!smu->smu_table.power_play_table_size)
388 smu->smu_table.power_play_table_size = size;
393 static int smu_v11_0_init_dpm_context(struct smu_context *smu)
395 struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
400 return smu_alloc_dpm_context(smu);
403 static int smu_v11_0_fini_dpm_context(struct smu_context *smu)
405 struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
423 int smu_v11_0_init_smc_tables(struct smu_context *smu)
425 struct smu_table_context *smu_table = &smu->smu_table;
439 ret = smu_tables_init(smu, tables);
443 ret = smu_v11_0_init_dpm_context(smu);
450 int smu_v11_0_fini_smc_tables(struct smu_context *smu)
452 struct smu_table_context *smu_table = &smu->smu_table;
466 ret = smu_v11_0_fini_dpm_context(smu);
472 int smu_v11_0_init_power(struct smu_context *smu)
474 struct smu_power_context *smu_power = &smu->smu_power;
476 if (!smu->pm_enabled)
490 int smu_v11_0_fini_power(struct smu_context *smu)
492 struct smu_power_context *smu_power = &smu->smu_power;
494 if (!smu->pm_enabled)
506 int smu_v11_0_get_vbios_bootup_values(struct smu_context *smu)
518 ret = smu_get_atom_data_table(smu, index, &size, &frev, &crev,
533 smu->smu_table.boot_values.revision = v_3_1->firmware_revision;
534 smu->smu_table.boot_values.gfxclk = v_3_1->bootup_sclk_in10khz;
535 smu->smu_table.boot_values.uclk = v_3_1->bootup_mclk_in10khz;
536 smu->smu_table.boot_values.socclk = 0;
537 smu->smu_table.boot_values.dcefclk = 0;
538 smu->smu_table.boot_values.vddc = v_3_1->bootup_vddc_mv;
539 smu->smu_table.boot_values.vddci = v_3_1->bootup_vddci_mv;
540 smu->smu_table.boot_values.mvddc = v_3_1->bootup_mvddc_mv;
541 smu->smu_table.boot_values.vdd_gfx = v_3_1->bootup_vddgfx_mv;
542 smu->smu_table.boot_values.cooling_id = v_3_1->coolingsolution_id;
543 smu->smu_table.boot_values.pp_table_id = 0;
548 smu->smu_table.boot_values.revision = v_3_3->firmware_revision;
549 smu->smu_table.boot_values.gfxclk = v_3_3->bootup_sclk_in10khz;
550 smu->smu_table.boot_values.uclk = v_3_3->bootup_mclk_in10khz;
551 smu->smu_table.boot_values.socclk = 0;
552 smu->smu_table.boot_values.dcefclk = 0;
553 smu->smu_table.boot_values.vddc = v_3_3->bootup_vddc_mv;
554 smu->smu_table.boot_values.vddci = v_3_3->bootup_vddci_mv;
555 smu->smu_table.boot_values.mvddc = v_3_3->bootup_mvddc_mv;
556 smu->smu_table.boot_values.vdd_gfx = v_3_3->bootup_vddgfx_mv;
557 smu->smu_table.boot_values.cooling_id = v_3_3->coolingsolution_id;
558 smu->smu_table.boot_values.pp_table_id = v_3_3->pplib_pptable_id;
561 smu->smu_table.boot_values.format_revision = header->format_revision;
562 smu->smu_table.boot_values.content_revision = header->content_revision;
567 int smu_v11_0_get_clk_info_from_vbios(struct smu_context *smu)
570 struct amdgpu_device *adev = smu->adev;
585 smu->smu_table.boot_values.socclk = le32_to_cpu(output->atom_smu_outputclkfreq.smu_clock_freq_hz) / 10000;
599 smu->smu_table.boot_values.dcefclk = le32_to_cpu(output->atom_smu_outputclkfreq.smu_clock_freq_hz) / 10000;
613 smu->smu_table.boot_values.eclk = le32_to_cpu(output->atom_smu_outputclkfreq.smu_clock_freq_hz) / 10000;
627 smu->smu_table.boot_values.vclk = le32_to_cpu(output->atom_smu_outputclkfreq.smu_clock_freq_hz) / 10000;
641 smu->smu_table.boot_values.dclk = le32_to_cpu(output->atom_smu_outputclkfreq.smu_clock_freq_hz) / 10000;
643 if ((smu->smu_table.boot_values.format_revision == 3) &&
644 (smu->smu_table.boot_values.content_revision >= 2)) {
658 smu->smu_table.boot_values.fclk = le32_to_cpu(output->atom_smu_outputclkfreq.smu_clock_freq_hz) / 10000;
664 int smu_v11_0_notify_memory_pool_location(struct smu_context *smu)
666 struct smu_table_context *smu_table = &smu->smu_table;
679 ret = smu_send_smc_msg_with_param(smu,
684 ret = smu_send_smc_msg_with_param(smu,
694 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramAddrHigh,
698 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramAddrLow,
702 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramSize,
710 int smu_v11_0_check_pptable(struct smu_context *smu)
714 ret = smu_check_powerplay_table(smu);
718 int smu_v11_0_parse_pptable(struct smu_context *smu)
722 struct smu_table_context *table_context = &smu->smu_table;
733 ret = smu_store_powerplay_table(smu);
737 ret = smu_append_powerplay_table(smu);
742 int smu_v11_0_populate_smc_pptable(struct smu_context *smu)
746 ret = smu_set_default_dpm_table(smu);
751 int smu_v11_0_write_pptable(struct smu_context *smu)
753 struct smu_table_context *table_context = &smu->smu_table;
756 ret = smu_update_table(smu, SMU_TABLE_PPTABLE, 0,
762 int smu_v11_0_set_deep_sleep_dcefclk(struct smu_context *smu, uint32_t clk)
766 ret = smu_send_smc_msg_with_param(smu,
774 int smu_v11_0_set_min_dcef_deep_sleep(struct smu_context *smu)
776 struct smu_table_context *table_context = &smu->smu_table;
778 if (!smu->pm_enabled)
783 return smu_v11_0_set_deep_sleep_dcefclk(smu, table_context->boot_values.dcefclk / 100);
786 int smu_v11_0_set_driver_table_location(struct smu_context *smu)
788 struct smu_table *driver_table = &smu->smu_table.driver_table;
792 ret = smu_send_smc_msg_with_param(smu,
796 ret = smu_send_smc_msg_with_param(smu,
804 int smu_v11_0_set_tool_table_location(struct smu_context *smu)
807 struct smu_table *tool_table = &smu->smu_table.tables[SMU_TABLE_PMSTATUSLOG];
810 ret = smu_send_smc_msg_with_param(smu,
814 ret = smu_send_smc_msg_with_param(smu,
822 int smu_v11_0_init_display_count(struct smu_context *smu, uint32_t count)
826 if (!smu->pm_enabled)
829 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, count);
834 int smu_v11_0_set_allowed_mask(struct smu_context *smu)
836 struct smu_feature *feature = &smu->smu_feature;
846 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskHigh,
851 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskLow,
861 int smu_v11_0_get_enabled_mask(struct smu_context *smu,
865 struct smu_feature *feature = &smu->smu_feature;
872 ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesHigh);
875 ret = smu_read_smc_arg(smu, &feature_mask_high);
879 ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesLow);
882 ret = smu_read_smc_arg(smu, &feature_mask_low);
896 int smu_v11_0_system_features_control(struct smu_context *smu,
899 struct smu_feature *feature = &smu->smu_feature;
903 ret = smu_send_smc_msg(smu, (en ? SMU_MSG_EnableAllSmuFeatures :
912 ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
925 int smu_v11_0_notify_display_change(struct smu_context *smu)
929 if (!smu->pm_enabled)
931 if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT) &&
932 smu->adev->gmc.vram_type == AMDGPU_VRAM_TYPE_HBM)
933 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetUclkFastSwitch, 1);
939 smu_v11_0_get_max_sustainable_clock(struct smu_context *smu, uint32_t *clock,
945 if (!smu->pm_enabled)
948 if ((smu_msg_get_index(smu, SMU_MSG_GetDcModeMaxDpmFreq) < 0) ||
949 (smu_msg_get_index(smu, SMU_MSG_GetMaxDpmFreq) < 0))
952 clk_id = smu_clk_get_index(smu, clock_select);
956 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetDcModeMaxDpmFreq,
963 ret = smu_read_smc_arg(smu, clock);
971 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMaxDpmFreq,
978 ret = smu_read_smc_arg(smu, clock);
983 int smu_v11_0_init_max_sustainable_clocks(struct smu_context *smu)
990 smu->smu_table.max_sustainable_clocks = (void *)max_sustainable_clocks;
992 max_sustainable_clocks->uclock = smu->smu_table.boot_values.uclk / 100;
993 max_sustainable_clocks->soc_clock = smu->smu_table.boot_values.socclk / 100;
994 max_sustainable_clocks->dcef_clock = smu->smu_table.boot_values.dcefclk / 100;
999 if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
1000 ret = smu_v11_0_get_max_sustainable_clock(smu,
1010 if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
1011 ret = smu_v11_0_get_max_sustainable_clock(smu,
1021 if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) {
1022 ret = smu_v11_0_get_max_sustainable_clock(smu,
1031 ret = smu_v11_0_get_max_sustainable_clock(smu,
1039 ret = smu_v11_0_get_max_sustainable_clock(smu,
1047 ret = smu_v11_0_get_max_sustainable_clock(smu,
1063 uint32_t smu_v11_0_get_max_power_limit(struct smu_context *smu) {
1066 struct smu_table_context *table_context = &smu->smu_table;
1069 max_power_limit = smu_get_pptable_power_limit(smu);
1073 if (!smu->default_power_limit)
1074 smu->default_power_limit = smu->power_limit;
1075 max_power_limit = smu->default_power_limit;
1078 if (smu->od_enabled) {
1081 pr_debug("ODSETTING_POWERPERCENTAGE: %d (default: %d)\n", od_limit, smu->default_power_limit);
1090 int smu_v11_0_set_power_limit(struct smu_context *smu, uint32_t n)
1095 max_power_limit = smu_v11_0_get_max_power_limit(smu);
1105 n = smu->default_power_limit;
1107 if (!smu_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) {
1112 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetPptLimit, n);
1117 smu->power_limit = n;
1122 int smu_v11_0_get_current_clk_freq(struct smu_context *smu,
1133 asic_clk_id = smu_clk_get_index(smu, clk_id);
1138 if (smu_msg_get_index(smu, SMU_MSG_GetDpmClockFreq) < 0)
1139 ret = smu_get_current_clk_freq_by_table(smu, clk_id, &freq);
1141 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetDpmClockFreq,
1146 ret = smu_read_smc_arg(smu, &freq);
1157 static int smu_v11_0_set_thermal_range(struct smu_context *smu,
1160 struct amdgpu_device *adev = smu->adev;
1164 struct smu_table_context *table_context = &smu->smu_table;
1188 static int smu_v11_0_enable_thermal_alert(struct smu_context *smu)
1190 struct amdgpu_device *adev = smu->adev;
1202 int smu_v11_0_start_thermal_control(struct smu_context *smu)
1206 struct amdgpu_device *adev = smu->adev;
1208 if (!smu->pm_enabled)
1213 ret = smu_get_thermal_temperature_range(smu, &range);
1217 if (smu->smu_table.thermal_controller_type) {
1218 ret = smu_v11_0_set_thermal_range(smu, range);
1222 ret = smu_v11_0_enable_thermal_alert(smu);
1226 ret = smu_set_thermal_fan_table(smu);
1244 int smu_v11_0_stop_thermal_control(struct smu_context *smu)
1246 struct amdgpu_device *adev = smu->adev;
1258 static int smu_v11_0_get_gfx_vdd(struct smu_context *smu, uint32_t *value)
1260 struct amdgpu_device *adev = smu->adev;
1277 int smu_v11_0_read_sensor(struct smu_context *smu,
1288 ret = smu_get_current_clk_freq(smu, SMU_UCLK, (uint32_t *)data);
1292 ret = smu_get_current_clk_freq(smu, SMU_GFXCLK, (uint32_t *)data);
1296 ret = smu_v11_0_get_gfx_vdd(smu, (uint32_t *)data);
1304 ret = smu_common_read_sensor(smu, sensor, data, size);
1315 smu_v11_0_display_clock_voltage_request(struct smu_context *smu,
1324 if (!smu->pm_enabled)
1327 if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) ||
1328 smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
1354 if (clk_select == SMU_UCLK && smu->disable_uclk_switch)
1357 ret = smu_set_hard_freq_range(smu, clk_select, clk_freq, 0);
1360 smu->hard_min_uclk_req_from_dal = clk_freq;
1367 int smu_v11_0_gfx_off_control(struct smu_context *smu, bool enable)
1370 struct amdgpu_device *adev = smu->adev;
1381 ret = smu_send_smc_msg(smu, SMU_MSG_AllowGfxOff);
1383 ret = smu_send_smc_msg(smu, SMU_MSG_DisallowGfxOff);
1393 smu_v11_0_get_fan_control_mode(struct smu_context *smu)
1395 if (!smu_feature_is_enabled(smu, SMU_FEATURE_FAN_CONTROL_BIT))
1402 smu_v11_0_auto_fan_control(struct smu_context *smu, bool auto_fan_control)
1406 if (!smu_feature_is_supported(smu, SMU_FEATURE_FAN_CONTROL_BIT))
1409 ret = smu_feature_set_enabled(smu, SMU_FEATURE_FAN_CONTROL_BIT, auto_fan_control);
1418 smu_v11_0_set_fan_static_mode(struct smu_context *smu, uint32_t mode)
1420 struct amdgpu_device *adev = smu->adev;
1433 smu_v11_0_set_fan_speed_percent(struct smu_context *smu, uint32_t speed)
1435 struct amdgpu_device *adev = smu->adev;
1442 if (smu_v11_0_auto_fan_control(smu, 0))
1458 return smu_v11_0_set_fan_static_mode(smu, FDO_PWM_MODE_STATIC);
1462 smu_v11_0_set_fan_control_mode(struct smu_context *smu,
1469 ret = smu_v11_0_set_fan_speed_percent(smu, 100);
1472 ret = smu_v11_0_auto_fan_control(smu, 0);
1475 ret = smu_v11_0_auto_fan_control(smu, 1);
1489 int smu_v11_0_set_fan_speed_rpm(struct smu_context *smu,
1492 struct amdgpu_device *adev = smu->adev;
1499 ret = smu_v11_0_auto_fan_control(smu, 0);
1510 ret = smu_v11_0_set_fan_static_mode(smu, FDO_PWM_MODE_STATIC_RPM);
1515 int smu_v11_0_set_xgmi_pstate(struct smu_context *smu,
1519 ret = smu_send_smc_msg_with_param(smu,
1568 int smu_v11_0_register_irq_handler(struct smu_context *smu)
1570 struct amdgpu_device *adev = smu->adev;
1571 struct amdgpu_irq_src *irq_src = smu->irq_source;
1581 smu->irq_source = irq_src;
1600 int smu_v11_0_get_max_sustainable_clocks_by_dc(struct smu_context *smu,
1603 struct smu_table_context *table_context = &smu->smu_table;
1630 int smu_v11_0_set_azalia_d3_pme(struct smu_context *smu)
1634 ret = smu_send_smc_msg(smu, SMU_MSG_BacoAudioD3PME);
1639 static int smu_v11_0_baco_set_armd3_sequence(struct smu_context *smu, enum smu_v11_0_baco_seq baco_seq)
1641 return smu_send_smc_msg_with_param(smu, SMU_MSG_ArmD3, baco_seq);
1644 bool smu_v11_0_baco_is_support(struct smu_context *smu)
1646 struct amdgpu_device *adev = smu->adev;
1647 struct smu_baco_context *smu_baco = &smu->smu_baco;
1659 if (smu_feature_is_supported(smu, SMU_FEATURE_BACO_BIT) &&
1660 !smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT))
1670 enum smu_baco_state smu_v11_0_baco_get_state(struct smu_context *smu)
1672 struct smu_baco_context *smu_baco = &smu->smu_baco;
1682 int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state)
1685 struct smu_baco_context *smu_baco = &smu->smu_baco;
1686 struct amdgpu_device *adev = smu->adev;
1692 if (smu_v11_0_baco_get_state(smu) == state)
1710 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnterBaco, 0);
1712 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnterBaco, 1);
1715 ret = smu_send_smc_msg(smu, SMU_MSG_ExitBaco);
1737 int smu_v11_0_baco_enter(struct smu_context *smu)
1739 struct amdgpu_device *adev = smu->adev;
1744 ret = smu_v11_0_baco_set_armd3_sequence(smu, BACO_SEQ_BACO);
1749 ret = smu_v11_0_baco_set_state(smu, SMU_BACO_STATE_ENTER);
1758 int smu_v11_0_baco_exit(struct smu_context *smu)
1762 ret = smu_v11_0_baco_set_state(smu, SMU_BACO_STATE_EXIT);
1769 int smu_v11_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type clk_type,
1775 clk_id = smu_clk_get_index(smu, clk_type);
1783 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMaxDpmFreq, param);
1786 ret = smu_read_smc_arg(smu, max);
1792 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMinDpmFreq, param);
1795 ret = smu_read_smc_arg(smu, min);
1804 int smu_v11_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type,
1810 clk_id = smu_clk_get_index(smu, clk_type);
1816 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxByFreq,
1824 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinByFreq,
1833 int smu_v11_0_override_pcie_parameters(struct smu_context *smu)
1835 struct amdgpu_device *adev = smu->adev;
1865 ret = smu_update_pcie_parameters(smu, pcie_gen, pcie_width);
1874 int smu_v11_0_set_default_od_settings(struct smu_context *smu, bool initialize, size_t overdrive_table_size)
1876 struct smu_table_context *table_context = &smu->smu_table;
1887 ret = smu_update_table(smu, SMU_TABLE_OVERDRIVE, 0, table_context->overdrive_table, false);
1899 ret = smu_update_table(smu, SMU_TABLE_OVERDRIVE, 0, table_context->overdrive_table, true);
1907 int smu_v11_0_set_performance_level(struct smu_context *smu,
1915 ret = smu_force_dpm_limit_value(smu, true);
1918 ret = smu_force_dpm_limit_value(smu, false);
1922 ret = smu_unforce_dpm_levels(smu);
1927 ret = smu_get_profiling_clk_mask(smu, level,
1933 smu_force_clk_levels(smu, SMU_SCLK, 1 << sclk_mask, false);
1934 smu_force_clk_levels(smu, SMU_MCLK, 1 << mclk_mask, false);
1935 smu_force_clk_levels(smu, SMU_SOCCLK, 1 << soc_mask, false);