Lines Matching defs:smu

82 static void smu_v11_0_poll_baco_exit(struct smu_context *smu)
84 struct amdgpu_device *adev = smu->adev;
93 int smu_v11_0_init_microcode(struct smu_context *smu)
95 struct amdgpu_device *adev = smu->adev;
135 void smu_v11_0_fini_microcode(struct smu_context *smu)
137 struct amdgpu_device *adev = smu->adev;
143 int smu_v11_0_load_microcode(struct smu_context *smu)
145 struct amdgpu_device *adev = smu->adev;
183 int smu_v11_0_check_fw_status(struct smu_context *smu)
185 struct amdgpu_device *adev = smu->adev;
198 int smu_v11_0_check_fw_version(struct smu_context *smu)
200 struct amdgpu_device *adev = smu->adev;
205 ret = smu_cmn_get_smc_version(smu, &if_version, &smu_version);
213 if (smu->is_apu)
218 smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_NV10;
221 smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_NV12;
224 smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_NV14;
227 smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_Sienna_Cichlid;
230 smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_Navy_Flounder;
233 smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_VANGOGH;
236 smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_Dimgrey_Cavefish;
239 smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_Beige_Goby;
242 smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_Cyan_Skillfish;
245 smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_ARCT;
248 dev_err(smu->adev->dev, "smu unsupported IP version: 0x%x.\n",
250 smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_INV;
262 if (if_version != smu->smc_driver_if_version) {
263 dev_info(smu->adev->dev, "smu driver if version = 0x%08x, smu fw if version = 0x%08x, "
264 "smu fw program = %d, version = 0x%08x (%d.%d.%d)\n",
265 smu->smc_driver_if_version, if_version,
267 dev_info(smu->adev->dev, "SMU driver if version not matched\n");
273 static int smu_v11_0_set_pptable_v2_0(struct smu_context *smu, void **table, uint32_t *size)
275 struct amdgpu_device *adev = smu->adev;
288 static int smu_v11_0_set_pptable_v2_1(struct smu_context *smu, void **table,
291 struct amdgpu_device *adev = smu->adev;
315 int smu_v11_0_setup_pptable(struct smu_context *smu)
317 struct amdgpu_device *adev = smu->adev;
330 if (version_major == 2 && smu->smu_table.boot_values.pp_table_id > 0) {
331 dev_info(adev->dev, "use driver provided pptable %d\n", smu->smu_table.boot_values.pp_table_id);
334 ret = smu_v11_0_set_pptable_v2_0(smu, &table, &size);
337 ret = smu_v11_0_set_pptable_v2_1(smu, &table, &size,
338 smu->smu_table.boot_values.pp_table_id);
361 if (!smu->smu_table.power_play_table)
362 smu->smu_table.power_play_table = table;
363 if (!smu->smu_table.power_play_table_size)
364 smu->smu_table.power_play_table_size = size;
369 int smu_v11_0_init_smc_tables(struct smu_context *smu)
371 struct smu_table_context *smu_table = &smu->smu_table;
428 int smu_v11_0_fini_smc_tables(struct smu_context *smu)
430 struct smu_table_context *smu_table = &smu->smu_table;
431 struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
473 int smu_v11_0_init_power(struct smu_context *smu)
475 struct amdgpu_device *adev = smu->adev;
476 struct smu_power_context *smu_power = &smu->smu_power;
490 int smu_v11_0_fini_power(struct smu_context *smu)
492 struct smu_power_context *smu_power = &smu->smu_power;
527 int smu_v11_0_get_vbios_bootup_values(struct smu_context *smu)
539 ret = amdgpu_atombios_get_data_table(smu->adev, index, &size, &frev, &crev,
545 dev_err(smu->adev->dev, "unknown atom_firmware_info version! for smu11\n");
554 smu->smu_table.boot_values.revision = v_3_1->firmware_revision;
555 smu->smu_table.boot_values.gfxclk = v_3_1->bootup_sclk_in10khz;
556 smu->smu_table.boot_values.uclk = v_3_1->bootup_mclk_in10khz;
557 smu->smu_table.boot_values.socclk = 0;
558 smu->smu_table.boot_values.dcefclk = 0;
559 smu->smu_table.boot_values.vddc = v_3_1->bootup_vddc_mv;
560 smu->smu_table.boot_values.vddci = v_3_1->bootup_vddci_mv;
561 smu->smu_table.boot_values.mvddc = v_3_1->bootup_mvddc_mv;
562 smu->smu_table.boot_values.vdd_gfx = v_3_1->bootup_vddgfx_mv;
563 smu->smu_table.boot_values.cooling_id = v_3_1->coolingsolution_id;
564 smu->smu_table.boot_values.pp_table_id = 0;
565 smu->smu_table.boot_values.firmware_caps = v_3_1->firmware_capability;
571 smu->smu_table.boot_values.revision = v_3_3->firmware_revision;
572 smu->smu_table.boot_values.gfxclk = v_3_3->bootup_sclk_in10khz;
573 smu->smu_table.boot_values.uclk = v_3_3->bootup_mclk_in10khz;
574 smu->smu_table.boot_values.socclk = 0;
575 smu->smu_table.boot_values.dcefclk = 0;
576 smu->smu_table.boot_values.vddc = v_3_3->bootup_vddc_mv;
577 smu->smu_table.boot_values.vddci = v_3_3->bootup_vddci_mv;
578 smu->smu_table.boot_values.mvddc = v_3_3->bootup_mvddc_mv;
579 smu->smu_table.boot_values.vdd_gfx = v_3_3->bootup_vddgfx_mv;
580 smu->smu_table.boot_values.cooling_id = v_3_3->coolingsolution_id;
581 smu->smu_table.boot_values.pp_table_id = v_3_3->pplib_pptable_id;
582 smu->smu_table.boot_values.firmware_caps = v_3_3->firmware_capability;
585 smu->smu_table.boot_values.format_revision = header->format_revision;
586 smu->smu_table.boot_values.content_revision = header->content_revision;
588 smu_v11_0_atom_get_smu_clockinfo(smu->adev,
591 &smu->smu_table.boot_values.socclk);
593 smu_v11_0_atom_get_smu_clockinfo(smu->adev,
596 &smu->smu_table.boot_values.dcefclk);
598 smu_v11_0_atom_get_smu_clockinfo(smu->adev,
601 &smu->smu_table.boot_values.eclk);
603 smu_v11_0_atom_get_smu_clockinfo(smu->adev,
606 &smu->smu_table.boot_values.vclk);
608 smu_v11_0_atom_get_smu_clockinfo(smu->adev,
611 &smu->smu_table.boot_values.dclk);
613 if ((smu->smu_table.boot_values.format_revision == 3) &&
614 (smu->smu_table.boot_values.content_revision >= 2))
615 smu_v11_0_atom_get_smu_clockinfo(smu->adev,
618 &smu->smu_table.boot_values.fclk);
620 smu_v11_0_atom_get_smu_clockinfo(smu->adev,
623 &smu->smu_table.boot_values.lclk);
628 int smu_v11_0_notify_memory_pool_location(struct smu_context *smu)
630 struct smu_table_context *smu_table = &smu->smu_table;
643 ret = smu_cmn_send_smc_msg_with_param(smu,
649 ret = smu_cmn_send_smc_msg_with_param(smu,
660 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramAddrHigh,
664 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramAddrLow,
668 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramSize,
676 int smu_v11_0_set_min_deep_sleep_dcefclk(struct smu_context *smu, uint32_t clk)
680 ret = smu_cmn_send_smc_msg_with_param(smu,
683 dev_err(smu->adev->dev, "SMU11 attempt to set divider for DCEFCLK Failed!");
688 int smu_v11_0_set_driver_table_location(struct smu_context *smu)
690 struct smu_table *driver_table = &smu->smu_table.driver_table;
694 ret = smu_cmn_send_smc_msg_with_param(smu,
699 ret = smu_cmn_send_smc_msg_with_param(smu,
708 int smu_v11_0_set_tool_table_location(struct smu_context *smu)
711 struct smu_table *tool_table = &smu->smu_table.tables[SMU_TABLE_PMSTATUSLOG];
714 ret = smu_cmn_send_smc_msg_with_param(smu,
719 ret = smu_cmn_send_smc_msg_with_param(smu,
728 int smu_v11_0_init_display_count(struct smu_context *smu, uint32_t count)
730 struct amdgpu_device *adev = smu->adev;
741 return smu_cmn_send_smc_msg_with_param(smu,
748 int smu_v11_0_set_allowed_mask(struct smu_context *smu)
750 struct smu_feature *feature = &smu->smu_feature;
761 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskHigh,
766 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskLow,
775 int smu_v11_0_system_features_control(struct smu_context *smu,
778 return smu_cmn_send_smc_msg(smu, (en ? SMU_MSG_EnableAllSmuFeatures :
782 int smu_v11_0_notify_display_change(struct smu_context *smu)
786 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT) &&
787 smu->adev->gmc.vram_type == AMDGPU_VRAM_TYPE_HBM)
788 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetUclkFastSwitch, 1, NULL);
794 smu_v11_0_get_max_sustainable_clock(struct smu_context *smu, uint32_t *clock,
800 if ((smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG, SMU_MSG_GetDcModeMaxDpmFreq) < 0) ||
801 (smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG, SMU_MSG_GetMaxDpmFreq) < 0))
804 clk_id = smu_cmn_to_asic_specific_index(smu,
810 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetDcModeMaxDpmFreq,
813 dev_err(smu->adev->dev, "[GetMaxSustainableClock] Failed to get max DC clock from SMC!");
821 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetMaxDpmFreq,
824 dev_err(smu->adev->dev, "[GetMaxSustainableClock] failed to get max AC clock from SMC!");
831 int smu_v11_0_init_max_sustainable_clocks(struct smu_context *smu)
834 smu->smu_table.max_sustainable_clocks;
837 max_sustainable_clocks->uclock = smu->smu_table.boot_values.uclk / 100;
838 max_sustainable_clocks->soc_clock = smu->smu_table.boot_values.socclk / 100;
839 max_sustainable_clocks->dcef_clock = smu->smu_table.boot_values.dcefclk / 100;
844 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
845 ret = smu_v11_0_get_max_sustainable_clock(smu,
849 dev_err(smu->adev->dev, "[%s] failed to get max UCLK from SMC!",
855 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
856 ret = smu_v11_0_get_max_sustainable_clock(smu,
860 dev_err(smu->adev->dev, "[%s] failed to get max SOCCLK from SMC!",
866 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) {
867 ret = smu_v11_0_get_max_sustainable_clock(smu,
871 dev_err(smu->adev->dev, "[%s] failed to get max DCEFCLK from SMC!",
876 ret = smu_v11_0_get_max_sustainable_clock(smu,
880 dev_err(smu->adev->dev, "[%s] failed to get max DISPCLK from SMC!",
884 ret = smu_v11_0_get_max_sustainable_clock(smu,
888 dev_err(smu->adev->dev, "[%s] failed to get max PHYCLK from SMC!",
892 ret = smu_v11_0_get_max_sustainable_clock(smu,
896 dev_err(smu->adev->dev, "[%s] failed to get max PIXCLK from SMC!",
908 int smu_v11_0_get_current_power_limit(struct smu_context *smu,
914 if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT))
917 power_src = smu_cmn_to_asic_specific_index(smu,
919 smu->adev->pm.ac_power ?
929 ret = smu_cmn_send_smc_msg_with_param(smu,
934 dev_err(smu->adev->dev, "[%s] get PPT limit failed!", __func__);
939 int smu_v11_0_set_power_limit(struct smu_context *smu,
950 if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) {
951 dev_err(smu->adev->dev, "Setting new power limit is not supported!\n");
955 power_src = smu_cmn_to_asic_specific_index(smu,
957 smu->adev->pm.ac_power ?
971 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetPptLimit, limit_param, NULL);
973 dev_err(smu->adev->dev, "[%s] Set power limit Failed!\n", __func__);
977 smu->current_power_limit = limit;
982 static int smu_v11_0_ack_ac_dc_interrupt(struct smu_context *smu)
984 return smu_cmn_send_smc_msg(smu,
989 static int smu_v11_0_process_pending_interrupt(struct smu_context *smu)
993 if (smu->dc_controlled_by_gpio &&
994 smu_cmn_feature_is_enabled(smu, SMU_FEATURE_ACDC_BIT))
995 ret = smu_v11_0_ack_ac_dc_interrupt(smu);
1000 void smu_v11_0_interrupt_work(struct smu_context *smu)
1002 if (smu_v11_0_ack_ac_dc_interrupt(smu))
1003 dev_err(smu->adev->dev, "Ack AC/DC interrupt Failed!\n");
1006 int smu_v11_0_enable_thermal_alert(struct smu_context *smu)
1010 if (smu->smu_table.thermal_controller_type) {
1011 ret = amdgpu_irq_get(smu->adev, &smu->irq_source, 0);
1020 return smu_v11_0_process_pending_interrupt(smu);
1023 int smu_v11_0_disable_thermal_alert(struct smu_context *smu)
1025 return amdgpu_irq_put(smu->adev, &smu->irq_source, 0);
1033 int smu_v11_0_get_gfx_vdd(struct smu_context *smu, uint32_t *value)
1035 struct amdgpu_device *adev = smu->adev;
1053 smu_v11_0_display_clock_voltage_request(struct smu_context *smu,
1062 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) ||
1063 smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
1081 dev_info(smu->adev->dev, "[%s] Invalid Clock Type!", __func__);
1089 if (clk_select == SMU_UCLK && smu->disable_uclk_switch)
1092 ret = smu_v11_0_set_hard_freq_limited_range(smu, clk_select, clk_freq, 0);
1095 smu->hard_min_uclk_req_from_dal = clk_freq;
1102 int smu_v11_0_gfx_off_control(struct smu_context *smu, bool enable)
1105 struct amdgpu_device *adev = smu->adev;
1119 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_AllowGfxOff, NULL);
1121 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_DisallowGfxOff, NULL);
1131 smu_v11_0_get_fan_control_mode(struct smu_context *smu)
1133 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_FAN_CONTROL_BIT))
1136 return smu->user_dpm_profile.fan_mode;
1140 smu_v11_0_auto_fan_control(struct smu_context *smu, bool auto_fan_control)
1144 if (!smu_cmn_feature_is_supported(smu, SMU_FEATURE_FAN_CONTROL_BIT))
1147 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_FAN_CONTROL_BIT, auto_fan_control);
1149 dev_err(smu->adev->dev, "[%s]%s smc FAN CONTROL feature failed!",
1156 smu_v11_0_set_fan_static_mode(struct smu_context *smu, uint32_t mode)
1158 struct amdgpu_device *adev = smu->adev;
1171 smu_v11_0_set_fan_speed_pwm(struct smu_context *smu, uint32_t speed)
1173 struct amdgpu_device *adev = smu->adev;
1192 return smu_v11_0_set_fan_static_mode(smu, FDO_PWM_MODE_STATIC);
1195 int smu_v11_0_set_fan_speed_rpm(struct smu_context *smu,
1198 struct amdgpu_device *adev = smu->adev;
1222 return smu_v11_0_set_fan_static_mode(smu, FDO_PWM_MODE_STATIC_RPM);
1225 int smu_v11_0_get_fan_speed_pwm(struct smu_context *smu,
1228 struct amdgpu_device *adev = smu->adev;
1237 if ((smu->user_dpm_profile.flags & SMU_CUSTOM_FAN_SPEED_PWM)
1238 && !smu->user_dpm_profile.fan_speed_pwm) {
1257 int smu_v11_0_get_fan_speed_rpm(struct smu_context *smu,
1260 struct amdgpu_device *adev = smu->adev;
1270 if ((smu->user_dpm_profile.flags & SMU_CUSTOM_FAN_SPEED_RPM)
1271 && !smu->user_dpm_profile.fan_speed_rpm) {
1291 smu_v11_0_set_fan_control_mode(struct smu_context *smu,
1298 ret = smu_v11_0_auto_fan_control(smu, 0);
1300 ret = smu_v11_0_set_fan_speed_pwm(smu, 255);
1303 ret = smu_v11_0_auto_fan_control(smu, 0);
1306 ret = smu_v11_0_auto_fan_control(smu, 1);
1313 dev_err(smu->adev->dev, "[%s]Set fan control mode failed!", __func__);
1320 int smu_v11_0_set_xgmi_pstate(struct smu_context *smu,
1323 return smu_cmn_send_smc_msg_with_param(smu,
1334 struct smu_context *smu = adev->powerplay.pp_handle;
1357 smu->thermal_range.min / SMU_TEMPERATURE_UNITS_PER_CENTIGRADES);
1359 smu->thermal_range.software_shutdown_temp);
1403 struct smu_context *smu = adev->powerplay.pp_handle;
1416 schedule_delayed_work(&smu->swctf_delayed_work,
1444 schedule_work(&smu->interrupt_work);
1449 schedule_work(&smu->interrupt_work);
1456 atomic64_inc(&smu->throttle_int_counter);
1462 schedule_work(&smu->throttling_logging_work);
1482 int smu_v11_0_register_irq_handler(struct smu_context *smu)
1484 struct amdgpu_device *adev = smu->adev;
1485 struct amdgpu_irq_src *irq_src = &smu->irq_source;
1519 int smu_v11_0_get_max_sustainable_clocks_by_dc(struct smu_context *smu,
1522 struct smu_table_context *table_context = &smu->smu_table;
1549 int smu_v11_0_set_azalia_d3_pme(struct smu_context *smu)
1551 return smu_cmn_send_smc_msg(smu, SMU_MSG_BacoAudioD3PME, NULL);
1554 int smu_v11_0_baco_set_armd3_sequence(struct smu_context *smu,
1557 return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ArmD3, baco_seq, NULL);
1560 bool smu_v11_0_baco_is_support(struct smu_context *smu)
1562 struct smu_baco_context *smu_baco = &smu->smu_baco;
1564 if (amdgpu_sriov_vf(smu->adev) || !smu_baco->platform_support)
1568 if (smu_v11_0_baco_get_state(smu) == SMU_BACO_STATE_ENTER)
1572 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_BACO_BIT) &&
1573 !smu_cmn_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT))
1579 enum smu_baco_state smu_v11_0_baco_get_state(struct smu_context *smu)
1581 struct smu_baco_context *smu_baco = &smu->smu_baco;
1589 int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state)
1591 struct smu_baco_context *smu_baco = &smu->smu_baco;
1592 struct amdgpu_device *adev = smu->adev;
1597 if (smu_v11_0_baco_get_state(smu) == state)
1607 ret = smu_cmn_send_smc_msg_with_param(smu,
1612 ret = smu_cmn_send_smc_msg_with_param(smu,
1631 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_EnterBaco, 0, NULL);
1633 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_EnterBaco, 1, NULL);
1639 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_ExitBaco, NULL);
1654 int smu_v11_0_baco_enter(struct smu_context *smu)
1658 ret = smu_v11_0_baco_set_state(smu, SMU_BACO_STATE_ENTER);
1667 int smu_v11_0_baco_exit(struct smu_context *smu)
1671 ret = smu_v11_0_baco_set_state(smu, SMU_BACO_STATE_EXIT);
1677 smu_v11_0_poll_baco_exit(smu);
1683 int smu_v11_0_mode1_reset(struct smu_context *smu)
1687 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_Mode1Reset, NULL);
1694 int smu_v11_0_handle_passthrough_sbr(struct smu_context *smu, bool enable)
1698 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_LightSBR, enable ? 1 : 0, NULL);
1704 int smu_v11_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type clk_type,
1711 if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type)) {
1715 clock_limit = smu->smu_table.boot_values.uclk;
1719 clock_limit = smu->smu_table.boot_values.gfxclk;
1722 clock_limit = smu->smu_table.boot_values.socclk;
1738 clk_id = smu_cmn_to_asic_specific_index(smu,
1748 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetMaxDpmFreq, param, max);
1754 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetMinDpmFreq, param, min);
1763 int smu_v11_0_set_soft_freq_limited_range(struct smu_context *smu,
1771 if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type))
1774 clk_id = smu_cmn_to_asic_specific_index(smu,
1782 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxByFreq,
1790 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinByFreq,
1800 int smu_v11_0_set_hard_freq_limited_range(struct smu_context *smu,
1811 if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type))
1814 clk_id = smu_cmn_to_asic_specific_index(smu,
1822 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMaxByFreq,
1830 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinByFreq,
1839 int smu_v11_0_set_performance_level(struct smu_context *smu,
1843 smu->smu_dpm.dpm_context;
1851 &smu->pstate_table;
1852 struct amdgpu_device *adev = smu->adev;
1911 ret = smu_v11_0_set_soft_freq_limited_range(smu,
1920 ret = smu_v11_0_set_soft_freq_limited_range(smu,
1929 ret = smu_v11_0_set_soft_freq_limited_range(smu,
1940 int smu_v11_0_set_power_source(struct smu_context *smu,
1945 pwr_source = smu_cmn_to_asic_specific_index(smu,
1951 return smu_cmn_send_smc_msg_with_param(smu,
1957 int smu_v11_0_get_dpm_freq_by_index(struct smu_context *smu,
1968 if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type))
1971 clk_id = smu_cmn_to_asic_specific_index(smu,
1979 ret = smu_cmn_send_smc_msg_with_param(smu,
1995 int smu_v11_0_get_dpm_level_count(struct smu_context *smu,
1999 return smu_v11_0_get_dpm_freq_by_index(smu,
2005 int smu_v11_0_set_single_dpm_table(struct smu_context *smu,
2013 ret = smu_v11_0_get_dpm_level_count(smu,
2017 dev_err(smu->adev->dev, "[%s] failed to get dpm levels!\n", __func__);
2022 ret = smu_v11_0_get_dpm_freq_by_index(smu,
2027 dev_err(smu->adev->dev, "[%s] failed to get dpm freq by index!\n", __func__);
2043 int smu_v11_0_get_dpm_level_range(struct smu_context *smu,
2056 ret = smu_v11_0_get_dpm_freq_by_index(smu,
2065 ret = smu_v11_0_get_dpm_level_count(smu,
2071 ret = smu_v11_0_get_dpm_freq_by_index(smu,
2082 int smu_v11_0_get_current_pcie_link_width_level(struct smu_context *smu)
2084 struct amdgpu_device *adev = smu->adev;
2091 uint16_t smu_v11_0_get_current_pcie_link_width(struct smu_context *smu)
2095 width_level = smu_v11_0_get_current_pcie_link_width_level(smu);
2102 int smu_v11_0_get_current_pcie_link_speed_level(struct smu_context *smu)
2104 struct amdgpu_device *adev = smu->adev;
2111 uint16_t smu_v11_0_get_current_pcie_link_speed(struct smu_context *smu)
2115 speed_level = smu_v11_0_get_current_pcie_link_speed_level(smu);
2122 int smu_v11_0_gfx_ulv_control(struct smu_context *smu,
2127 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_GFX_ULV_BIT))
2128 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_GFX_ULV_BIT, enablement);
2133 int smu_v11_0_deep_sleep_control(struct smu_context *smu,
2136 struct amdgpu_device *adev = smu->adev;
2139 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_GFXCLK_BIT)) {
2140 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_GFXCLK_BIT, enablement);
2147 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_UCLK_BIT)) {
2148 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_UCLK_BIT, enablement);
2155 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_FCLK_BIT)) {
2156 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_FCLK_BIT, enablement);
2163 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_SOCCLK_BIT)) {
2164 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_SOCCLK_BIT, enablement);
2171 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_LCLK_BIT)) {
2172 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_LCLK_BIT, enablement);
2182 int smu_v11_0_restore_user_od_settings(struct smu_context *smu)
2184 struct smu_table_context *table_context = &smu->smu_table;
2188 ret = smu_cmn_update_table(smu, SMU_TABLE_OVERDRIVE, 0, (void *)user_od_table, true);
2190 dev_err(smu->adev->dev, "Failed to import overdrive table!\n");
2195 void smu_v11_0_set_smu_mailbox_registers(struct smu_context *smu)
2197 struct amdgpu_device *adev = smu->adev;
2199 smu->param_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_82);
2200 smu->msg_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_66);
2201 smu->resp_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_90);