Lines Matching refs:smu

92 int smu_v13_0_init_microcode(struct smu_context *smu)
94 struct amdgpu_device *adev = smu->adev;
102 /* doesn't need to load smu firmware in IOV mode */
133 void smu_v13_0_fini_microcode(struct smu_context *smu)
135 struct amdgpu_device *adev = smu->adev;
141 int smu_v13_0_load_microcode(struct smu_context *smu)
144 struct amdgpu_device *adev = smu->adev;
183 int smu_v13_0_init_pptable_microcode(struct smu_context *smu)
185 struct amdgpu_device *adev = smu->adev;
191 /* doesn't need to load smu firmware in IOV mode */
211 pptable_id = smu->smu_table.boot_values.pp_table_id;
218 ret = smu_v13_0_get_pptable_from_firmware(smu, &table, &size, pptable_id);
222 smu->pptable_firmware.data = table;
223 smu->pptable_firmware.size = size;
227 ucode->fw = &smu->pptable_firmware;
229 ALIGN(smu->pptable_firmware.size, PAGE_SIZE);
234 int smu_v13_0_check_fw_status(struct smu_context *smu)
236 struct amdgpu_device *adev = smu->adev;
258 int smu_v13_0_check_fw_version(struct smu_context *smu)
260 struct amdgpu_device *adev = smu->adev;
265 ret = smu_cmn_get_smc_version(smu, &if_version, &smu_version);
273 if (smu->is_apu ||
279 dev_dbg(smu->adev->dev, "smu fw reported program %d, version = 0x%08x (%d.%d.%d)\n",
290 if (if_version != smu->smc_driver_if_version) {
291 dev_info(adev->dev, "smu driver if version = 0x%08x, smu fw if version = 0x%08x, "
292 "smu fw program = %d, smu fw version = 0x%08x (%d.%d.%d)\n",
293 smu->smc_driver_if_version, if_version,
301 static int smu_v13_0_set_pptable_v2_0(struct smu_context *smu, void **table, uint32_t *size)
303 struct amdgpu_device *adev = smu->adev;
316 static int smu_v13_0_set_pptable_v2_1(struct smu_context *smu, void **table,
319 struct amdgpu_device *adev = smu->adev;
343 static int smu_v13_0_get_pptable_from_vbios(struct smu_context *smu, void **table, uint32_t *size)
345 struct amdgpu_device *adev = smu->adev;
365 int smu_v13_0_get_pptable_from_firmware(struct smu_context *smu,
371 struct amdgpu_device *adev = smu->adev;
384 dev_err(adev->dev, "Unsupported smu firmware version %d.%d\n",
391 ret = smu_v13_0_set_pptable_v2_0(smu, table, size);
394 ret = smu_v13_0_set_pptable_v2_1(smu, table, size, pptable_id);
404 int smu_v13_0_setup_pptable(struct smu_context *smu)
406 struct amdgpu_device *adev = smu->adev;
416 pptable_id = smu->smu_table.boot_values.pp_table_id;
421 ret = smu_v13_0_get_pptable_from_vbios(smu, &table, &size);
423 ret = smu_v13_0_get_pptable_from_firmware(smu, &table, &size, pptable_id);
428 if (!smu->smu_table.power_play_table)
429 smu->smu_table.power_play_table = table;
430 if (!smu->smu_table.power_play_table_size)
431 smu->smu_table.power_play_table_size = size;
436 int smu_v13_0_init_smc_tables(struct smu_context *smu)
438 struct smu_table_context *smu_table = &smu->smu_table;
503 int smu_v13_0_fini_smc_tables(struct smu_context *smu)
505 struct smu_table_context *smu_table = &smu->smu_table;
506 struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
546 int smu_v13_0_init_power(struct smu_context *smu)
548 struct smu_power_context *smu_power = &smu->smu_power;
562 int smu_v13_0_fini_power(struct smu_context *smu)
564 struct smu_power_context *smu_power = &smu->smu_power;
576 int smu_v13_0_get_vbios_bootup_values(struct smu_context *smu)
591 ret = amdgpu_atombios_get_data_table(smu->adev, index, &size, &frev, &crev,
597 dev_err(smu->adev->dev, "unknown atom_firmware_info version! for smu13\n");
606 smu->smu_table.boot_values.revision = v_3_1->firmware_revision;
607 smu->smu_table.boot_values.gfxclk = v_3_1->bootup_sclk_in10khz;
608 smu->smu_table.boot_values.uclk = v_3_1->bootup_mclk_in10khz;
609 smu->smu_table.boot_values.socclk = 0;
610 smu->smu_table.boot_values.dcefclk = 0;
611 smu->smu_table.boot_values.vddc = v_3_1->bootup_vddc_mv;
612 smu->smu_table.boot_values.vddci = v_3_1->bootup_vddci_mv;
613 smu->smu_table.boot_values.mvddc = v_3_1->bootup_mvddc_mv;
614 smu->smu_table.boot_values.vdd_gfx = v_3_1->bootup_vddgfx_mv;
615 smu->smu_table.boot_values.cooling_id = v_3_1->coolingsolution_id;
616 smu->smu_table.boot_values.pp_table_id = 0;
620 smu->smu_table.boot_values.revision = v_3_3->firmware_revision;
621 smu->smu_table.boot_values.gfxclk = v_3_3->bootup_sclk_in10khz;
622 smu->smu_table.boot_values.uclk = v_3_3->bootup_mclk_in10khz;
623 smu->smu_table.boot_values.socclk = 0;
624 smu->smu_table.boot_values.dcefclk = 0;
625 smu->smu_table.boot_values.vddc = v_3_3->bootup_vddc_mv;
626 smu->smu_table.boot_values.vddci = v_3_3->bootup_vddci_mv;
627 smu->smu_table.boot_values.mvddc = v_3_3->bootup_mvddc_mv;
628 smu->smu_table.boot_values.vdd_gfx = v_3_3->bootup_vddgfx_mv;
629 smu->smu_table.boot_values.cooling_id = v_3_3->coolingsolution_id;
630 smu->smu_table.boot_values.pp_table_id = v_3_3->pplib_pptable_id;
635 smu->smu_table.boot_values.revision = v_3_4->firmware_revision;
636 smu->smu_table.boot_values.gfxclk = v_3_4->bootup_sclk_in10khz;
637 smu->smu_table.boot_values.uclk = v_3_4->bootup_mclk_in10khz;
638 smu->smu_table.boot_values.socclk = 0;
639 smu->smu_table.boot_values.dcefclk = 0;
640 smu->smu_table.boot_values.vddc = v_3_4->bootup_vddc_mv;
641 smu->smu_table.boot_values.vddci = v_3_4->bootup_vddci_mv;
642 smu->smu_table.boot_values.mvddc = v_3_4->bootup_mvddc_mv;
643 smu->smu_table.boot_values.vdd_gfx = v_3_4->bootup_vddgfx_mv;
644 smu->smu_table.boot_values.cooling_id = v_3_4->coolingsolution_id;
645 smu->smu_table.boot_values.pp_table_id = v_3_4->pplib_pptable_id;
649 smu->smu_table.boot_values.format_revision = header->format_revision;
650 smu->smu_table.boot_values.content_revision = header->content_revision;
654 if (!amdgpu_atombios_get_data_table(smu->adev, index, &size, &frev, &crev,
660 smu->smu_table.boot_values.socclk = smu_info_v3_6->bootup_socclk_10khz;
661 smu->smu_table.boot_values.vclk = smu_info_v3_6->bootup_vclk_10khz;
662 smu->smu_table.boot_values.dclk = smu_info_v3_6->bootup_dclk_10khz;
663 smu->smu_table.boot_values.fclk = smu_info_v3_6->bootup_fclk_10khz;
669 smu->smu_table.boot_values.socclk = smu_info_v4_0->bootup_socclk_10khz;
670 smu->smu_table.boot_values.dcefclk = smu_info_v4_0->bootup_dcefclk_10khz;
671 smu->smu_table.boot_values.vclk = smu_info_v4_0->bootup_vclk0_10khz;
672 smu->smu_table.boot_values.dclk = smu_info_v4_0->bootup_dclk0_10khz;
673 smu->smu_table.boot_values.fclk = smu_info_v4_0->bootup_fclk_10khz;
675 dev_warn(smu->adev->dev, "Unexpected and unhandled version: %d.%d\n",
684 int smu_v13_0_notify_memory_pool_location(struct smu_context *smu)
686 struct smu_table_context *smu_table = &smu->smu_table;
699 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramAddrHigh,
703 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramAddrLow,
707 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramSize,
715 int smu_v13_0_set_min_deep_sleep_dcefclk(struct smu_context *smu, uint32_t clk)
719 ret = smu_cmn_send_smc_msg_with_param(smu,
722 dev_err(smu->adev->dev, "SMU13 attempt to set divider for DCEFCLK Failed!");
727 int smu_v13_0_set_driver_table_location(struct smu_context *smu)
729 struct smu_table *driver_table = &smu->smu_table.driver_table;
733 ret = smu_cmn_send_smc_msg_with_param(smu,
738 ret = smu_cmn_send_smc_msg_with_param(smu,
747 int smu_v13_0_set_tool_table_location(struct smu_context *smu)
750 struct smu_table *tool_table = &smu->smu_table.tables[SMU_TABLE_PMSTATUSLOG];
753 ret = smu_cmn_send_smc_msg_with_param(smu,
758 ret = smu_cmn_send_smc_msg_with_param(smu,
767 int smu_v13_0_init_display_count(struct smu_context *smu, uint32_t count)
771 if (!smu->pm_enabled)
774 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, count, NULL);
779 int smu_v13_0_set_allowed_mask(struct smu_context *smu)
781 struct smu_feature *feature = &smu->smu_feature;
791 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskHigh,
796 return smu_cmn_send_smc_msg_with_param(smu,
802 int smu_v13_0_gfx_off_control(struct smu_context *smu, bool enable)
805 struct amdgpu_device *adev = smu->adev;
820 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_AllowGfxOff, NULL);
822 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_DisallowGfxOff, NULL);
831 int smu_v13_0_system_features_control(struct smu_context *smu,
834 return smu_cmn_send_smc_msg(smu, (en ? SMU_MSG_EnableAllSmuFeatures :
838 int smu_v13_0_notify_display_change(struct smu_context *smu)
842 if (!amdgpu_device_has_dc_support(smu->adev))
843 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_DALNotPresent, NULL);
849 smu_v13_0_get_max_sustainable_clock(struct smu_context *smu, uint32_t *clock,
855 if ((smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG, SMU_MSG_GetDcModeMaxDpmFreq) < 0) ||
856 (smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG, SMU_MSG_GetMaxDpmFreq) < 0))
859 clk_id = smu_cmn_to_asic_specific_index(smu,
865 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetDcModeMaxDpmFreq,
868 dev_err(smu->adev->dev, "[GetMaxSustainableClock] Failed to get max DC clock from SMC!");
876 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetMaxDpmFreq,
879 dev_err(smu->adev->dev, "[GetMaxSustainableClock] failed to get max AC clock from SMC!");
886 int smu_v13_0_init_max_sustainable_clocks(struct smu_context *smu)
889 smu->smu_table.max_sustainable_clocks;
892 max_sustainable_clocks->uclock = smu->smu_table.boot_values.uclk / 100;
893 max_sustainable_clocks->soc_clock = smu->smu_table.boot_values.socclk / 100;
894 max_sustainable_clocks->dcef_clock = smu->smu_table.boot_values.dcefclk / 100;
899 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
900 ret = smu_v13_0_get_max_sustainable_clock(smu,
904 dev_err(smu->adev->dev, "[%s] failed to get max UCLK from SMC!",
910 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
911 ret = smu_v13_0_get_max_sustainable_clock(smu,
915 dev_err(smu->adev->dev, "[%s] failed to get max SOCCLK from SMC!",
921 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) {
922 ret = smu_v13_0_get_max_sustainable_clock(smu,
926 dev_err(smu->adev->dev, "[%s] failed to get max DCEFCLK from SMC!",
931 ret = smu_v13_0_get_max_sustainable_clock(smu,
935 dev_err(smu->adev->dev, "[%s] failed to get max DISPCLK from SMC!",
939 ret = smu_v13_0_get_max_sustainable_clock(smu,
943 dev_err(smu->adev->dev, "[%s] failed to get max PHYCLK from SMC!",
947 ret = smu_v13_0_get_max_sustainable_clock(smu,
951 dev_err(smu->adev->dev, "[%s] failed to get max PIXCLK from SMC!",
963 int smu_v13_0_get_current_power_limit(struct smu_context *smu,
969 if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT))
972 power_src = smu_cmn_to_asic_specific_index(smu,
974 smu->adev->pm.ac_power ?
980 ret = smu_cmn_send_smc_msg_with_param(smu,
985 dev_err(smu->adev->dev, "[%s] get PPT limit failed!", __func__);
990 int smu_v13_0_set_power_limit(struct smu_context *smu,
999 if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) {
1000 dev_err(smu->adev->dev, "Setting new power limit is not supported!\n");
1004 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetPptLimit, limit, NULL);
1006 dev_err(smu->adev->dev, "[%s] Set power limit Failed!\n", __func__);
1010 smu->current_power_limit = limit;
1015 static int smu_v13_0_allow_ih_interrupt(struct smu_context *smu)
1017 return smu_cmn_send_smc_msg(smu,
1022 static int smu_v13_0_process_pending_interrupt(struct smu_context *smu)
1026 if (smu->dc_controlled_by_gpio &&
1027 smu_cmn_feature_is_enabled(smu, SMU_FEATURE_ACDC_BIT))
1028 ret = smu_v13_0_allow_ih_interrupt(smu);
1033 int smu_v13_0_enable_thermal_alert(struct smu_context *smu)
1037 if (!smu->irq_source.num_types)
1040 ret = amdgpu_irq_get(smu->adev, &smu->irq_source, 0);
1044 return smu_v13_0_process_pending_interrupt(smu);
1047 int smu_v13_0_disable_thermal_alert(struct smu_context *smu)
1049 if (!smu->irq_source.num_types)
1052 return amdgpu_irq_put(smu->adev, &smu->irq_source, 0);
1060 int smu_v13_0_get_gfx_vdd(struct smu_context *smu, uint32_t *value)
1062 struct amdgpu_device *adev = smu->adev;
1080 smu_v13_0_display_clock_voltage_request(struct smu_context *smu,
1089 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) ||
1090 smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
1108 dev_info(smu->adev->dev, "[%s] Invalid Clock Type!", __func__);
1116 if (clk_select == SMU_UCLK && smu->disable_uclk_switch)
1119 ret = smu_v13_0_set_hard_freq_limited_range(smu, clk_select, clk_freq, 0);
1122 smu->hard_min_uclk_req_from_dal = clk_freq;
1129 uint32_t smu_v13_0_get_fan_control_mode(struct smu_context *smu)
1131 if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_FAN_CONTROL_BIT))
1138 smu_v13_0_auto_fan_control(struct smu_context *smu, bool auto_fan_control)
1142 if (!smu_cmn_feature_is_supported(smu, SMU_FEATURE_FAN_CONTROL_BIT))
1145 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_FAN_CONTROL_BIT, auto_fan_control);
1147 dev_err(smu->adev->dev, "[%s]%s smc FAN CONTROL feature failed!",
1154 smu_v13_0_set_fan_static_mode(struct smu_context *smu, uint32_t mode)
1156 struct amdgpu_device *adev = smu->adev;
1168 int smu_v13_0_set_fan_speed_pwm(struct smu_context *smu,
1171 struct amdgpu_device *adev = smu->adev;
1177 if (smu_v13_0_auto_fan_control(smu, 0))
1193 return smu_v13_0_set_fan_static_mode(smu, FDO_PWM_MODE_STATIC);
1197 smu_v13_0_set_fan_control_mode(struct smu_context *smu,
1204 ret = smu_v13_0_set_fan_speed_pwm(smu, 255);
1207 ret = smu_v13_0_auto_fan_control(smu, 0);
1210 ret = smu_v13_0_auto_fan_control(smu, 1);
1217 dev_err(smu->adev->dev, "[%s]Set fan control mode failed!", __func__);
1224 int smu_v13_0_set_fan_speed_rpm(struct smu_context *smu,
1227 struct amdgpu_device *adev = smu->adev;
1235 ret = smu_v13_0_auto_fan_control(smu, 0);
1245 return smu_v13_0_set_fan_static_mode(smu, FDO_PWM_MODE_STATIC_RPM);
1248 int smu_v13_0_set_xgmi_pstate(struct smu_context *smu,
1252 ret = smu_cmn_send_smc_msg_with_param(smu,
1264 struct smu_context *smu = adev->powerplay.pp_handle;
1287 smu->thermal_range.min / SMU_TEMPERATURE_UNITS_PER_CENTIGRADES);
1289 smu->thermal_range.software_shutdown_temp);
1324 static int smu_v13_0_ack_ac_dc_interrupt(struct smu_context *smu)
1326 return smu_cmn_send_smc_msg(smu,
1339 struct smu_context *smu = adev->powerplay.pp_handle;
1353 schedule_delayed_work(&smu->swctf_delayed_work,
1381 smu_v13_0_ack_ac_dc_interrupt(smu);
1386 smu_v13_0_ack_ac_dc_interrupt(smu);
1393 atomic64_inc(&smu->throttle_int_counter);
1399 schedule_work(&smu->throttling_logging_work);
1403 high = smu->thermal_range.software_shutdown_temp +
1404 smu->thermal_range.software_shutdown_temp_offset;
1410 smu->thermal_range.software_shutdown_temp_offset);
1422 smu->thermal_range.software_shutdown_temp);
1448 int smu_v13_0_register_irq_handler(struct smu_context *smu)
1450 struct amdgpu_device *adev = smu->adev;
1451 struct amdgpu_irq_src *irq_src = &smu->irq_source;
1488 int smu_v13_0_get_max_sustainable_clocks_by_dc(struct smu_context *smu,
1491 struct smu_table_context *table_context = &smu->smu_table;
1518 int smu_v13_0_set_azalia_d3_pme(struct smu_context *smu)
1522 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_BacoAudioD3PME, NULL);
1527 static int smu_v13_0_wait_for_reset_complete(struct smu_context *smu,
1532 dev_dbg(smu->adev->dev, "waiting for smu reset complete\n");
1533 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GfxDriverResetRecovery, NULL);
1538 int smu_v13_0_wait_for_event(struct smu_context *smu, enum smu_event_type event,
1545 ret = smu_v13_0_wait_for_reset_complete(smu, event_arg);
1554 int smu_v13_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type clk_type,
1561 if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type)) {
1565 clock_limit = smu->smu_table.boot_values.uclk;
1569 clock_limit = smu->smu_table.boot_values.gfxclk;
1572 clock_limit = smu->smu_table.boot_values.socclk;
1588 clk_id = smu_cmn_to_asic_specific_index(smu,
1598 if (smu->adev->pm.ac_power)
1599 ret = smu_cmn_send_smc_msg_with_param(smu,
1604 ret = smu_cmn_send_smc_msg_with_param(smu,
1613 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetMinDpmFreq, param, min);
1622 int smu_v13_0_set_soft_freq_limited_range(struct smu_context *smu,
1630 if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type))
1633 clk_id = smu_cmn_to_asic_specific_index(smu,
1641 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxByFreq,
1649 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinByFreq,
1659 int smu_v13_0_set_hard_freq_limited_range(struct smu_context *smu,
1670 if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type))
1673 clk_id = smu_cmn_to_asic_specific_index(smu,
1681 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMaxByFreq,
1689 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinByFreq,
1698 int smu_v13_0_set_performance_level(struct smu_context *smu,
1702 smu->smu_dpm.dpm_context;
1716 &smu->pstate_table;
1717 struct amdgpu_device *adev = smu->adev;
1791 if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 2)) {
1800 ret = smu_v13_0_set_soft_freq_limited_range(smu,
1812 ret = smu_v13_0_set_soft_freq_limited_range(smu,
1824 ret = smu_v13_0_set_soft_freq_limited_range(smu,
1839 ret = smu_v13_0_set_soft_freq_limited_range(smu,
1854 ret = smu_v13_0_set_soft_freq_limited_range(smu,
1866 ret = smu_v13_0_set_soft_freq_limited_range(smu,
1880 int smu_v13_0_set_power_source(struct smu_context *smu,
1885 pwr_source = smu_cmn_to_asic_specific_index(smu,
1891 return smu_cmn_send_smc_msg_with_param(smu,
1897 int smu_v13_0_get_dpm_freq_by_index(struct smu_context *smu,
1907 if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type))
1910 clk_id = smu_cmn_to_asic_specific_index(smu,
1918 ret = smu_cmn_send_smc_msg_with_param(smu,
1930 static int smu_v13_0_get_dpm_level_count(struct smu_context *smu,
1936 ret = smu_v13_0_get_dpm_freq_by_index(smu, clk_type, 0xff, value);
1938 if ((amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 2)) && (!ret && value))
1944 static int smu_v13_0_get_fine_grained_status(struct smu_context *smu,
1955 if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type))
1958 clk_id = smu_cmn_to_asic_specific_index(smu,
1966 ret = smu_cmn_send_smc_msg_with_param(smu,
1982 int smu_v13_0_set_single_dpm_table(struct smu_context *smu,
1990 ret = smu_v13_0_get_dpm_level_count(smu,
1994 dev_err(smu->adev->dev, "[%s] failed to get dpm levels!\n", __func__);
1998 if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) != IP_VERSION(13, 0, 2)) {
1999 ret = smu_v13_0_get_fine_grained_status(smu,
2003 dev_err(smu->adev->dev, "[%s] failed to get fine grained status!\n", __func__);
2009 ret = smu_v13_0_get_dpm_freq_by_index(smu,
2014 dev_err(smu->adev->dev, "[%s] failed to get dpm freq by index!\n", __func__);
2030 int smu_v13_0_get_current_pcie_link_width_level(struct smu_context *smu)
2032 struct amdgpu_device *adev = smu->adev;
2039 int smu_v13_0_get_current_pcie_link_width(struct smu_context *smu)
2043 width_level = smu_v13_0_get_current_pcie_link_width_level(smu);
2050 int smu_v13_0_get_current_pcie_link_speed_level(struct smu_context *smu)
2052 struct amdgpu_device *adev = smu->adev;
2059 int smu_v13_0_get_current_pcie_link_speed(struct smu_context *smu)
2063 speed_level = smu_v13_0_get_current_pcie_link_speed_level(smu);
2070 int smu_v13_0_set_vcn_enable(struct smu_context *smu,
2073 struct amdgpu_device *adev = smu->adev;
2080 ret = smu_cmn_send_smc_msg_with_param(smu, enable ?
2090 int smu_v13_0_set_jpeg_enable(struct smu_context *smu,
2093 return smu_cmn_send_smc_msg_with_param(smu, enable ?
2098 int smu_v13_0_run_btc(struct smu_context *smu)
2102 res = smu_cmn_send_smc_msg(smu, SMU_MSG_RunDcBtc, NULL);
2104 dev_err(smu->adev->dev, "RunDcBtc failed!\n");
2109 int smu_v13_0_gpo_control(struct smu_context *smu,
2114 res = smu_cmn_send_smc_msg_with_param(smu,
2119 dev_err(smu->adev->dev, "SetGpoAllow %d failed!\n", enablement);
2124 int smu_v13_0_deep_sleep_control(struct smu_context *smu,
2127 struct amdgpu_device *adev = smu->adev;
2130 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_GFXCLK_BIT)) {
2131 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_GFXCLK_BIT, enablement);
2138 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_UCLK_BIT)) {
2139 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_UCLK_BIT, enablement);
2146 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_FCLK_BIT)) {
2147 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_FCLK_BIT, enablement);
2154 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_SOCCLK_BIT)) {
2155 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_SOCCLK_BIT, enablement);
2162 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_LCLK_BIT)) {
2163 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_LCLK_BIT, enablement);
2170 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_VCN_BIT)) {
2171 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_VCN_BIT, enablement);
2178 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_MP0CLK_BIT)) {
2179 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_MP0CLK_BIT, enablement);
2186 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_MP1CLK_BIT)) {
2187 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_MP1CLK_BIT, enablement);
2197 int smu_v13_0_gfx_ulv_control(struct smu_context *smu,
2202 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_GFX_ULV_BIT))
2203 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_GFX_ULV_BIT, enablement);
2208 static int smu_v13_0_baco_set_armd3_sequence(struct smu_context *smu,
2211 struct smu_baco_context *smu_baco = &smu->smu_baco;
2214 ret = smu_cmn_send_smc_msg_with_param(smu,
2230 static enum smu_baco_state smu_v13_0_baco_get_state(struct smu_context *smu)
2232 struct smu_baco_context *smu_baco = &smu->smu_baco;
2237 static int smu_v13_0_baco_set_state(struct smu_context *smu,
2240 struct smu_baco_context *smu_baco = &smu->smu_baco;
2241 struct amdgpu_device *adev = smu->adev;
2244 if (smu_v13_0_baco_get_state(smu) == state)
2248 ret = smu_cmn_send_smc_msg_with_param(smu,
2254 ret = smu_cmn_send_smc_msg(smu,
2271 bool smu_v13_0_baco_is_support(struct smu_context *smu)
2273 struct smu_baco_context *smu_baco = &smu->smu_baco;
2275 if (amdgpu_sriov_vf(smu->adev) || !smu_baco->platform_support)
2279 if (smu_v13_0_baco_get_state(smu) == SMU_BACO_STATE_ENTER)
2282 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_BACO_BIT) &&
2283 !smu_cmn_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT))
2289 int smu_v13_0_baco_enter(struct smu_context *smu)
2291 struct smu_baco_context *smu_baco = &smu->smu_baco;
2292 struct amdgpu_device *adev = smu->adev;
2296 return smu_v13_0_baco_set_armd3_sequence(smu,
2300 ret = smu_v13_0_baco_set_state(smu, SMU_BACO_STATE_ENTER);
2308 int smu_v13_0_baco_exit(struct smu_context *smu)
2310 struct amdgpu_device *adev = smu->adev;
2316 ret = smu_v13_0_baco_set_armd3_sequence(smu, BACO_SEQ_ULPS);
2318 ret = smu_v13_0_baco_set_state(smu, SMU_BACO_STATE_EXIT);
2327 int smu_v13_0_set_gfx_power_up_by_imu(struct smu_context *smu)
2330 struct amdgpu_device *adev = smu->adev;
2333 return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_EnableGfxImu,
2337 index = smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG,
2339 return smu_cmn_send_msg_without_waiting(smu, index,
2343 int smu_v13_0_od_edit_dpm_table(struct smu_context *smu,
2347 struct smu_dpm_context *smu_dpm = &(smu->smu_dpm);
2357 dev_err(smu->adev->dev, "Input parameter number not correct\n");
2362 if (input[1] < smu->gfx_default_hard_min_freq) {
2363 dev_warn(smu->adev->dev,
2365 input[1], smu->gfx_default_hard_min_freq);
2368 smu->gfx_actual_hard_min_freq = input[1];
2370 if (input[1] > smu->gfx_default_soft_max_freq) {
2371 dev_warn(smu->adev->dev,
2373 input[1], smu->gfx_default_soft_max_freq);
2376 smu->gfx_actual_soft_max_freq = input[1];
2383 dev_err(smu->adev->dev, "Input parameter number not correct\n");
2386 smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq;
2387 smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq;
2391 dev_err(smu->adev->dev, "Input parameter number not correct\n");
2394 if (smu->gfx_actual_hard_min_freq > smu->gfx_actual_soft_max_freq) {
2395 dev_err(smu->adev->dev,
2397 smu->gfx_actual_hard_min_freq,
2398 smu->gfx_actual_soft_max_freq);
2402 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk,
2403 smu->gfx_actual_hard_min_freq,
2406 dev_err(smu->adev->dev, "Set hard min sclk failed!");
2410 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk,
2411 smu->gfx_actual_soft_max_freq,
2414 dev_err(smu->adev->dev, "Set soft max sclk failed!");
2425 int smu_v13_0_set_default_dpm_tables(struct smu_context *smu)
2427 struct smu_table_context *smu_table = &smu->smu_table;
2429 return smu_cmn_update_table(smu, SMU_TABLE_DPMCLOCKS, 0,
2433 void smu_v13_0_set_smu_mailbox_registers(struct smu_context *smu)
2435 struct amdgpu_device *adev = smu->adev;
2437 smu->param_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_82);
2438 smu->msg_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_66);
2439 smu->resp_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_90);
2442 int smu_v13_0_mode1_reset(struct smu_context *smu)
2446 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_Mode1Reset, NULL);
2453 int smu_v13_0_update_pcie_parameters(struct smu_context *smu,
2457 struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
2467 if (!(smu->adev->pm.pp_feature & PP_PCIE_DPM_MASK)) {
2493 ret = smu_cmn_send_smc_msg_with_param(smu,
2504 int smu_v13_0_disable_pmfw_state(struct smu_context *smu)
2507 struct amdgpu_device *adev = smu->adev;
2517 int smu_v13_0_enable_uclk_shadow(struct smu_context *smu, bool enable)
2519 return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_EnableUCLKShadow, enable, NULL);
2522 int smu_v13_0_set_wbrf_exclusion_ranges(struct smu_context *smu,
2558 ret = smu_cmn_update_table(smu, SMU_TABLE_WIFIBAND, 0, &wifi_bands, true);
2560 dev_warn(smu->adev->dev, "Failed to set wifiband!");