Lines Matching refs:smu

61 static int smu_force_smuclk_levels(struct smu_context *smu,
64 static int smu_handle_task(struct smu_context *smu,
67 static int smu_reset(struct smu_context *smu);
72 static int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled);
78 struct smu_context *smu = handle;
80 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
83 return smu_get_pp_feature_mask(smu, buf);
89 struct smu_context *smu = handle;
91 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
94 return smu_set_pp_feature_mask(smu, new_mask);
97 int smu_set_residency_gfxoff(struct smu_context *smu, bool value)
99 if (!smu->ppt_funcs->set_gfx_off_residency)
102 return smu_set_gfx_off_residency(smu, value);
105 int smu_get_residency_gfxoff(struct smu_context *smu, u32 *value)
107 if (!smu->ppt_funcs->get_gfx_off_residency)
110 return smu_get_gfx_off_residency(smu, value);
113 int smu_get_entrycount_gfxoff(struct smu_context *smu, u64 *value)
115 if (!smu->ppt_funcs->get_gfx_off_entrycount)
118 return smu_get_gfx_off_entrycount(smu, value);
121 int smu_get_status_gfxoff(struct smu_context *smu, uint32_t *value)
123 if (!smu->ppt_funcs->get_gfx_off_status)
126 *value = smu_get_gfx_off_status(smu);
131 int smu_set_soft_freq_range(struct smu_context *smu,
138 if (smu->ppt_funcs->set_soft_freq_limited_range)
139 ret = smu->ppt_funcs->set_soft_freq_limited_range(smu,
147 int smu_get_dpm_freq_range(struct smu_context *smu,
157 if (smu->ppt_funcs->get_dpm_ultimate_freq)
158 ret = smu->ppt_funcs->get_dpm_ultimate_freq(smu,
166 int smu_set_gfx_power_up_by_imu(struct smu_context *smu)
169 struct amdgpu_device *adev = smu->adev;
171 if (smu->ppt_funcs->set_gfx_power_up_by_imu) {
172 ret = smu->ppt_funcs->set_gfx_power_up_by_imu(smu);
181 struct smu_context *smu = handle;
185 ret = smu_get_dpm_freq_range(smu, SMU_UCLK,
195 struct smu_context *smu = handle;
199 ret = smu_get_dpm_freq_range(smu, SMU_GFXCLK,
207 static int smu_set_gfx_imu_enable(struct smu_context *smu)
209 struct amdgpu_device *adev = smu->adev;
214 if (amdgpu_in_reset(smu->adev) || adev->in_s0ix)
217 return smu_set_gfx_power_up_by_imu(smu);
234 static int smu_dpm_set_vcn_enable(struct smu_context *smu,
237 struct smu_power_context *smu_power = &smu->smu_power;
244 if (!is_vcn_enabled(smu->adev))
247 if (!smu->ppt_funcs->dpm_set_vcn_enable)
253 ret = smu->ppt_funcs->dpm_set_vcn_enable(smu, enable);
260 static int smu_dpm_set_jpeg_enable(struct smu_context *smu,
263 struct smu_power_context *smu_power = &smu->smu_power;
267 if (!is_vcn_enabled(smu->adev))
270 if (!smu->ppt_funcs->dpm_set_jpeg_enable)
276 ret = smu->ppt_funcs->dpm_set_jpeg_enable(smu, enable);
283 static int smu_dpm_set_vpe_enable(struct smu_context *smu,
286 struct smu_power_context *smu_power = &smu->smu_power;
290 if (!smu->ppt_funcs->dpm_set_vpe_enable)
296 ret = smu->ppt_funcs->dpm_set_vpe_enable(smu, enable);
303 static int smu_dpm_set_umsch_mm_enable(struct smu_context *smu,
306 struct smu_power_context *smu_power = &smu->smu_power;
310 if (!smu->adev->enable_umsch_mm)
313 if (!smu->ppt_funcs->dpm_set_umsch_mm_enable)
319 ret = smu->ppt_funcs->dpm_set_umsch_mm_enable(smu, enable);
333 * This API uses no smu->mutex lock protection due to:
337 * Under this case, the smu->mutex lock protection is already enforced on
344 struct smu_context *smu = handle;
347 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) {
348 dev_WARN(smu->adev->dev,
361 ret = smu_dpm_set_vcn_enable(smu, !gate);
363 dev_err(smu->adev->dev, "Failed to power %s VCN!\n",
367 ret = smu_gfx_off_control(smu, gate);
369 dev_err(smu->adev->dev, "Failed to %s gfxoff!\n",
373 ret = smu_powergate_sdma(smu, gate);
375 dev_err(smu->adev->dev, "Failed to power %s SDMA!\n",
379 ret = smu_dpm_set_jpeg_enable(smu, !gate);
381 dev_err(smu->adev->dev, "Failed to power %s JPEG!\n",
385 ret = smu_dpm_set_vpe_enable(smu, !gate);
387 dev_err(smu->adev->dev, "Failed to power %s VPE!\n",
391 dev_err(smu->adev->dev, "Unsupported block type!\n");
401 * @smu: smu_context pointer
406 static void smu_set_user_clk_dependencies(struct smu_context *smu, enum smu_clk_type clk)
408 if (smu->adev->in_suspend)
412 smu->user_dpm_profile.clk_dependency = 0;
413 smu->user_dpm_profile.clk_dependency = BIT(SMU_FCLK) | BIT(SMU_SOCCLK);
416 if (smu->user_dpm_profile.clk_dependency == (BIT(SMU_FCLK) | BIT(SMU_SOCCLK)))
419 smu->user_dpm_profile.clk_dependency = 0;
420 smu->user_dpm_profile.clk_dependency = BIT(SMU_MCLK) | BIT(SMU_SOCCLK);
423 if (smu->user_dpm_profile.clk_dependency == (BIT(SMU_FCLK) | BIT(SMU_SOCCLK)))
426 smu->user_dpm_profile.clk_dependency = 0;
427 smu->user_dpm_profile.clk_dependency = BIT(SMU_MCLK) | BIT(SMU_FCLK);
436 * @smu: smu_context pointer
441 static void smu_restore_dpm_user_profile(struct smu_context *smu)
443 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
446 if (!smu->adev->in_suspend)
449 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
453 smu->user_dpm_profile.flags |= SMU_DPM_USER_PROFILE_RESTORE;
456 if (smu->user_dpm_profile.power_limit) {
457 ret = smu_set_power_limit(smu, smu->user_dpm_profile.power_limit);
459 dev_err(smu->adev->dev, "Failed to set power limit value\n");
468 * Iterate over smu clk type and force the saved user clk
471 if (!(smu->user_dpm_profile.clk_dependency & BIT(clk_type)) &&
472 smu->user_dpm_profile.clk_mask[clk_type]) {
473 ret = smu_force_smuclk_levels(smu, clk_type,
474 smu->user_dpm_profile.clk_mask[clk_type]);
476 dev_err(smu->adev->dev,
483 if (smu->user_dpm_profile.fan_mode == AMD_FAN_CTRL_MANUAL ||
484 smu->user_dpm_profile.fan_mode == AMD_FAN_CTRL_NONE) {
485 ret = smu_set_fan_control_mode(smu, smu->user_dpm_profile.fan_mode);
487 smu->user_dpm_profile.fan_speed_pwm = 0;
488 smu->user_dpm_profile.fan_speed_rpm = 0;
489 smu->user_dpm_profile.fan_mode = AMD_FAN_CTRL_AUTO;
490 dev_err(smu->adev->dev, "Failed to set manual fan control mode\n");
493 if (smu->user_dpm_profile.fan_speed_pwm) {
494 ret = smu_set_fan_speed_pwm(smu, smu->user_dpm_profile.fan_speed_pwm);
496 dev_err(smu->adev->dev, "Failed to set manual fan speed in pwm\n");
499 if (smu->user_dpm_profile.fan_speed_rpm) {
500 ret = smu_set_fan_speed_rpm(smu, smu->user_dpm_profile.fan_speed_rpm);
502 dev_err(smu->adev->dev, "Failed to set manual fan speed in rpm\n");
507 if (smu->user_dpm_profile.user_od) {
508 if (smu->ppt_funcs->restore_user_od_settings) {
509 ret = smu->ppt_funcs->restore_user_od_settings(smu);
511 dev_err(smu->adev->dev, "Failed to upload customized OD settings\n");
516 smu->user_dpm_profile.flags &= ~SMU_DPM_USER_PROFILE_RESTORE;
547 struct smu_context *smu = adev->powerplay.pp_handle;
549 if (!smu_feature_is_enabled(smu, SMU_FEATURE_CCLK_DPM_BIT))
559 struct smu_context *smu = handle;
560 struct smu_table_context *smu_table = &smu->smu_table;
562 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
580 struct smu_context *smu = handle;
581 struct smu_table_context *smu_table = &smu->smu_table;
585 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
589 dev_err(smu->adev->dev, "pp table size not matched !\n");
607 smu->uploading_custom_pp_table = true;
609 ret = smu_reset(smu);
611 dev_info(smu->adev->dev, "smu reset failed, ret = %d\n", ret);
613 smu->uploading_custom_pp_table = false;
618 static int smu_get_driver_allowed_feature_mask(struct smu_context *smu)
620 struct smu_feature *feature = &smu->smu_feature;
631 if (smu->adev->scpm_enabled) {
638 ret = smu_get_allowed_feature_mask(smu, allowed_feature_mask,
652 struct smu_context *smu = adev->powerplay.pp_handle;
655 smu->od_enabled = true;
661 navi10_set_ppt_funcs(smu);
667 sienna_cichlid_set_ppt_funcs(smu);
671 renoir_set_ppt_funcs(smu);
674 vangogh_set_ppt_funcs(smu);
679 yellow_carp_set_ppt_funcs(smu);
683 smu_v13_0_4_set_ppt_funcs(smu);
686 smu_v13_0_5_set_ppt_funcs(smu);
689 cyan_skillfish_set_ppt_funcs(smu);
693 arcturus_set_ppt_funcs(smu);
695 smu->od_enabled = false;
698 aldebaran_set_ppt_funcs(smu);
700 smu->od_enabled = true;
704 smu_v13_0_0_set_ppt_funcs(smu);
707 smu_v13_0_6_set_ppt_funcs(smu);
709 smu->od_enabled = true;
712 smu_v13_0_7_set_ppt_funcs(smu);
716 smu_v14_0_0_set_ppt_funcs(smu);
728 struct smu_context *smu;
731 smu = kzalloc(sizeof(struct smu_context), GFP_KERNEL);
732 if (!smu)
735 smu->adev = adev;
736 smu->pm_enabled = !!amdgpu_dpm;
737 smu->is_apu = false;
738 smu->smu_baco.state = SMU_BACO_STATE_NONE;
739 smu->smu_baco.platform_support = false;
740 smu->user_dpm_profile.fan_mode = -1;
742 mutex_init(&smu->message_lock);
744 adev->powerplay.pp_handle = smu;
750 return smu_init_microcode(smu);
753 static int smu_set_default_dpm_table(struct smu_context *smu)
755 struct amdgpu_device *adev = smu->adev;
756 struct smu_power_context *smu_power = &smu->smu_power;
761 if (!smu->ppt_funcs->set_default_dpm_table)
770 ret = smu_dpm_set_vcn_enable(smu, true);
776 ret = smu_dpm_set_jpeg_enable(smu, true);
781 ret = smu->ppt_funcs->set_default_dpm_table(smu);
783 dev_err(smu->adev->dev,
787 smu_dpm_set_jpeg_enable(smu, !jpeg_gate);
790 smu_dpm_set_vcn_enable(smu, !vcn_gate);
795 static int smu_apply_default_config_table_settings(struct smu_context *smu)
797 struct amdgpu_device *adev = smu->adev;
800 ret = smu_get_default_config_table_settings(smu,
805 return smu_set_config_table(smu, &adev->pm.config_table);
811 struct smu_context *smu = adev->powerplay.pp_handle;
814 smu_set_fine_grain_gfx_freq_parameters(smu);
816 if (!smu->pm_enabled)
819 ret = smu_post_init(smu);
821 dev_err(adev->dev, "Failed to post smu init!\n");
833 smu_set_ac_dc(smu);
839 if (!amdgpu_sriov_vf(adev) || smu->od_enabled) {
840 ret = smu_set_default_od_settings(smu);
847 ret = smu_populate_umd_state_clk(smu);
853 ret = smu_get_asic_power_limits(smu,
854 &smu->current_power_limit,
855 &smu->default_power_limit,
856 &smu->max_power_limit,
857 &smu->min_power_limit);
864 smu_get_unique_id(smu);
866 smu_get_fan_parameters(smu);
868 smu_handle_task(smu,
869 smu->smu_dpm.dpm_level,
872 ret = smu_apply_default_config_table_settings(smu);
878 smu_restore_dpm_user_profile(smu);
883 static int smu_init_fb_allocations(struct smu_context *smu)
885 struct amdgpu_device *adev = smu->adev;
886 struct smu_table_context *smu_table = &smu->smu_table;
949 static int smu_fini_fb_allocations(struct smu_context *smu)
951 struct smu_table_context *smu_table = &smu->smu_table;
970 * @smu: amdgpu_device pointer
977 static int smu_alloc_memory_pool(struct smu_context *smu)
979 struct amdgpu_device *adev = smu->adev;
980 struct smu_table_context *smu_table = &smu->smu_table;
982 uint64_t pool_size = smu->pool_size;
1014 static int smu_free_memory_pool(struct smu_context *smu)
1016 struct smu_table_context *smu_table = &smu->smu_table;
1031 static int smu_alloc_dummy_read_table(struct smu_context *smu)
1033 struct smu_table_context *smu_table = &smu->smu_table;
1036 struct amdgpu_device *adev = smu->adev;
1055 static void smu_free_dummy_read_table(struct smu_context *smu)
1057 struct smu_table_context *smu_table = &smu->smu_table;
1069 static int smu_smc_table_sw_init(struct smu_context *smu)
1077 ret = smu_init_smc_tables(smu);
1079 dev_err(smu->adev->dev, "Failed to init smc tables!\n");
1087 ret = smu_init_power(smu);
1089 dev_err(smu->adev->dev, "Failed to init smu_init_power!\n");
1096 ret = smu_init_fb_allocations(smu);
1100 ret = smu_alloc_memory_pool(smu);
1104 ret = smu_alloc_dummy_read_table(smu);
1108 ret = smu_i2c_init(smu);
1115 static int smu_smc_table_sw_fini(struct smu_context *smu)
1119 smu_i2c_fini(smu);
1121 smu_free_dummy_read_table(smu);
1123 ret = smu_free_memory_pool(smu);
1127 ret = smu_fini_fb_allocations(smu);
1131 ret = smu_fini_power(smu);
1133 dev_err(smu->adev->dev, "Failed to init smu_fini_power!\n");
1137 ret = smu_fini_smc_tables(smu);
1139 dev_err(smu->adev->dev, "Failed to smu_fini_smc_tables!\n");
1148 struct smu_context *smu = container_of(work, struct smu_context,
1151 smu_log_thermal_throttling(smu);
1156 struct smu_context *smu = container_of(work, struct smu_context,
1159 if (smu->ppt_funcs && smu->ppt_funcs->interrupt_work)
1160 smu->ppt_funcs->interrupt_work(smu);
1165 struct smu_context *smu =
1168 &smu->thermal_range;
1169 struct amdgpu_device *adev = smu->adev;
1178 smu->ppt_funcs->read_sensor &&
1179 !smu->ppt_funcs->read_sensor(smu,
1191 static void smu_init_xgmi_plpd_mode(struct smu_context *smu)
1193 if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(11, 0, 2)) {
1194 smu->plpd_mode = XGMI_PLPD_DEFAULT;
1199 if (smu_feature_is_enabled(smu,
1201 smu->plpd_mode = XGMI_PLPD_DEFAULT;
1203 smu->plpd_mode = XGMI_PLPD_NONE;
1209 struct smu_context *smu = adev->powerplay.pp_handle;
1212 smu->pool_size = adev->pm.smu_prv_buffer_size;
1213 smu->smu_feature.feature_num = SMU_FEATURE_MAX;
1214 bitmap_zero(smu->smu_feature.supported, SMU_FEATURE_MAX);
1215 bitmap_zero(smu->smu_feature.allowed, SMU_FEATURE_MAX);
1217 INIT_WORK(&smu->throttling_logging_work, smu_throttling_logging_work_fn);
1218 INIT_WORK(&smu->interrupt_work, smu_interrupt_work_fn);
1219 atomic64_set(&smu->throttle_int_counter, 0);
1220 smu->watermarks_bitmap = 0;
1221 smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
1222 smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
1224 atomic_set(&smu->smu_power.power_gate.vcn_gated, 1);
1225 atomic_set(&smu->smu_power.power_gate.jpeg_gated, 1);
1226 atomic_set(&smu->smu_power.power_gate.vpe_gated, 1);
1227 atomic_set(&smu->smu_power.power_gate.umsch_mm_gated, 1);
1229 smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT];
1230 smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0;
1231 smu->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 1;
1232 smu->workload_prority[PP_SMC_POWER_PROFILE_POWERSAVING] = 2;
1233 smu->workload_prority[PP_SMC_POWER_PROFILE_VIDEO] = 3;
1234 smu->workload_prority[PP_SMC_POWER_PROFILE_VR] = 4;
1235 smu->workload_prority[PP_SMC_POWER_PROFILE_COMPUTE] = 5;
1236 smu->workload_prority[PP_SMC_POWER_PROFILE_CUSTOM] = 6;
1238 smu->workload_setting[0] = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
1239 smu->workload_setting[1] = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
1240 smu->workload_setting[2] = PP_SMC_POWER_PROFILE_POWERSAVING;
1241 smu->workload_setting[3] = PP_SMC_POWER_PROFILE_VIDEO;
1242 smu->workload_setting[4] = PP_SMC_POWER_PROFILE_VR;
1243 smu->workload_setting[5] = PP_SMC_POWER_PROFILE_COMPUTE;
1244 smu->workload_setting[6] = PP_SMC_POWER_PROFILE_CUSTOM;
1245 smu->display_config = &adev->pm.pm_display_cfg;
1247 smu->smu_dpm.dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
1248 smu->smu_dpm.requested_dpm_level = AMD_DPM_FORCED_LEVEL_AUTO;
1250 INIT_DELAYED_WORK(&smu->swctf_delayed_work,
1253 ret = smu_smc_table_sw_init(smu);
1260 ret = smu_get_vbios_bootup_values(smu);
1266 ret = smu_init_pptable_microcode(smu);
1272 ret = smu_register_irq_handler(smu);
1279 if (!smu->ppt_funcs->get_fan_control_mode)
1280 smu->adev->pm.no_fan = true;
1288 struct smu_context *smu = adev->powerplay.pp_handle;
1291 ret = smu_smc_table_sw_fini(smu);
1297 smu_fini_microcode(smu);
1302 static int smu_get_thermal_temperature_range(struct smu_context *smu)
1304 struct amdgpu_device *adev = smu->adev;
1306 &smu->thermal_range;
1309 if (!smu->ppt_funcs->get_thermal_temperature_range)
1312 ret = smu->ppt_funcs->get_thermal_temperature_range(smu, range);
1332 * @smu: smu_context pointer
1337 static int smu_wbrf_handle_exclusion_ranges(struct smu_context *smu)
1341 struct amdgpu_device *adev = smu->adev;
1390 ret = smu_set_wbrf_exclusion_ranges(smu, wifi_bands);
1394 ret = smu_set_wbrf_exclusion_ranges(smu, wifi_bands);
1413 struct smu_context *smu = container_of(nb, struct smu_context, wbrf_notifier);
1417 schedule_delayed_work(&smu->wbrf_delayed_work,
1436 struct smu_context *smu = container_of(work, struct smu_context, wbrf_delayed_work.work);
1438 smu_wbrf_handle_exclusion_ranges(smu);
1444 * @smu: smu_context pointer
1448 static void smu_wbrf_support_check(struct smu_context *smu)
1450 struct amdgpu_device *adev = smu->adev;
1452 smu->wbrf_supported = smu_is_asic_wbrf_supported(smu) && amdgpu_wbrf &&
1455 if (smu->wbrf_supported)
1462 * @smu: smu_context pointer
1468 static int smu_wbrf_init(struct smu_context *smu)
1472 if (!smu->wbrf_supported)
1475 INIT_DELAYED_WORK(&smu->wbrf_delayed_work, smu_wbrf_delayed_work_handler);
1477 smu->wbrf_notifier.notifier_call = smu_wbrf_event_handler;
1478 ret = amd_wbrf_register_notifier(&smu->wbrf_notifier);
1487 schedule_delayed_work(&smu->wbrf_delayed_work,
1496 * @smu: smu_context pointer
1500 static void smu_wbrf_fini(struct smu_context *smu)
1502 if (!smu->wbrf_supported)
1505 amd_wbrf_unregister_notifier(&smu->wbrf_notifier);
1507 cancel_delayed_work_sync(&smu->wbrf_delayed_work);
1510 static int smu_smc_hw_setup(struct smu_context *smu)
1512 struct smu_feature *feature = &smu->smu_feature;
1513 struct amdgpu_device *adev = smu->adev;
1523 if (adev->in_suspend && smu_is_dpm_running(smu)) {
1525 ret = smu_system_features_control(smu, true);
1535 ret = smu_init_display_count(smu, 0);
1541 ret = smu_set_driver_table_location(smu);
1550 ret = smu_set_tool_table_location(smu);
1560 ret = smu_notify_memory_pool_location(smu);
1572 ret = smu_setup_pptable(smu);
1579 /* smu_dump_pptable(smu); */
1590 ret = smu_write_pptable(smu);
1598 ret = smu_run_btc(smu);
1603 if (smu->wbrf_supported) {
1604 ret = smu_enable_uclk_shadow(smu, true);
1616 ret = smu_feature_set_allowed_mask(smu);
1623 ret = smu_system_features_control(smu, true);
1629 smu_init_xgmi_plpd_mode(smu);
1631 ret = smu_feature_get_enabled_mask(smu, &features_supported);
1640 if (!smu_is_dpm_running(smu))
1648 ret = smu_set_default_dpm_table(smu);
1679 ret = smu_update_pcie_parameters(smu, pcie_gen, pcie_width);
1685 ret = smu_get_thermal_temperature_range(smu);
1691 ret = smu_enable_thermal_alert(smu);
1697 ret = smu_notify_display_change(smu);
1707 ret = smu_set_min_dcef_deep_sleep(smu,
1708 smu->smu_table.boot_values.dcefclk / 100);
1715 ret = smu_wbrf_init(smu);
1722 static int smu_start_smc_engine(struct smu_context *smu)
1724 struct amdgpu_device *adev = smu->adev;
1729 if (smu->ppt_funcs->load_microcode) {
1730 ret = smu->ppt_funcs->load_microcode(smu);
1737 if (smu->ppt_funcs->check_fw_status) {
1738 ret = smu->ppt_funcs->check_fw_status(smu);
1749 ret = smu_check_fw_version(smu);
1760 struct smu_context *smu = adev->powerplay.pp_handle;
1763 smu->pm_enabled = false;
1767 ret = smu_start_smc_engine(smu);
1778 smu_wbrf_support_check(smu);
1780 if (smu->is_apu) {
1781 ret = smu_set_gfx_imu_enable(smu);
1784 smu_dpm_set_vcn_enable(smu, true);
1785 smu_dpm_set_jpeg_enable(smu, true);
1786 smu_dpm_set_vpe_enable(smu, true);
1787 smu_dpm_set_umsch_mm_enable(smu, true);
1788 smu_set_gfx_cgpg(smu, true);
1791 if (!smu->pm_enabled)
1794 ret = smu_get_driver_allowed_feature_mask(smu);
1798 ret = smu_smc_hw_setup(smu);
1811 ret = smu_init_max_sustainable_clocks(smu);
1824 static int smu_disable_dpms(struct smu_context *smu)
1826 struct amdgpu_device *adev = smu->adev;
1828 bool use_baco = !smu->is_apu &&
1857 if (smu->uploading_custom_pp_table) {
1910 if (use_baco && smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT)) {
1911 ret = smu_disable_all_features_with_exception(smu,
1914 dev_err(adev->dev, "Failed to disable smu features except BACO.\n");
1918 ret = smu_system_features_control(smu, false);
1920 dev_err(adev->dev, "Failed to disable smu features.\n");
1928 ret = smu_notify_rlc_state(smu, false);
1942 static int smu_smc_hw_cleanup(struct smu_context *smu)
1944 struct amdgpu_device *adev = smu->adev;
1947 smu_wbrf_fini(smu);
1949 cancel_work_sync(&smu->throttling_logging_work);
1950 cancel_work_sync(&smu->interrupt_work);
1952 ret = smu_disable_thermal_alert(smu);
1958 cancel_delayed_work_sync(&smu->swctf_delayed_work);
1960 ret = smu_disable_dpms(smu);
1969 static int smu_reset_mp1_state(struct smu_context *smu)
1971 struct amdgpu_device *adev = smu->adev;
1978 ret = smu_set_mp1_state(smu, PP_MP1_STATE_UNLOAD);
1986 struct smu_context *smu = adev->powerplay.pp_handle;
1992 smu_dpm_set_vcn_enable(smu, false);
1993 smu_dpm_set_jpeg_enable(smu, false);
1994 smu_dpm_set_vpe_enable(smu, false);
1995 smu_dpm_set_umsch_mm_enable(smu, false);
2000 if (!smu->pm_enabled)
2005 ret = smu_smc_hw_cleanup(smu);
2009 ret = smu_reset_mp1_state(smu);
2019 struct smu_context *smu = adev->powerplay.pp_handle;
2021 kfree(smu);
2024 static int smu_reset(struct smu_context *smu)
2026 struct amdgpu_device *adev = smu->adev;
2047 struct smu_context *smu = adev->powerplay.pp_handle;
2054 if (!smu->pm_enabled)
2059 ret = smu_smc_hw_cleanup(smu);
2063 smu->watermarks_bitmap &= ~(WATERMARKS_LOADED);
2065 smu_set_gfx_cgpg(smu, false);
2071 ret = smu_get_entrycount_gfxoff(smu, &count);
2082 struct smu_context *smu = adev->powerplay.pp_handle;
2087 if (!smu->pm_enabled)
2092 ret = smu_start_smc_engine(smu);
2098 ret = smu_smc_hw_setup(smu);
2104 ret = smu_set_gfx_imu_enable(smu);
2108 smu_set_gfx_cgpg(smu, true);
2110 smu->disable_uclk_switch = 0;
2122 struct smu_context *smu = handle;
2124 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2130 smu_set_min_dcef_deep_sleep(smu,
2156 struct smu_context *smu = (struct smu_context*)(handle);
2157 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
2159 if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
2166 smu_gpo_control(smu, false);
2167 smu_gfx_ulv_control(smu, false);
2168 smu_deep_sleep_control(smu, false);
2169 amdgpu_asic_update_umd_stable_pstate(smu->adev, true);
2176 amdgpu_asic_update_umd_stable_pstate(smu->adev, false);
2177 smu_deep_sleep_control(smu, true);
2178 smu_gfx_ulv_control(smu, true);
2179 smu_gpo_control(smu, true);
2186 static int smu_bump_power_profile_mode(struct smu_context *smu,
2192 if (smu->ppt_funcs->set_power_profile_mode)
2193 ret = smu->ppt_funcs->set_power_profile_mode(smu, param, param_size);
2198 static int smu_adjust_power_state_dynamic(struct smu_context *smu,
2205 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
2208 ret = smu_display_config_changed(smu);
2210 dev_err(smu->adev->dev, "Failed to change display config!");
2215 ret = smu_apply_clocks_adjust_rules(smu);
2217 dev_err(smu->adev->dev, "Failed to apply clocks adjust rules!");
2222 ret = smu_notify_smc_display_config(smu);
2224 dev_err(smu->adev->dev, "Failed to notify smc display config!");
2230 ret = smu_asic_set_performance_level(smu, level);
2232 dev_err(smu->adev->dev, "Failed to set performance level!");
2242 index = fls(smu->workload_mask);
2244 workload = smu->workload_setting[index];
2246 if (smu->power_profile_mode != workload)
2247 smu_bump_power_profile_mode(smu, &workload, 0);
2253 static int smu_handle_task(struct smu_context *smu,
2259 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2264 ret = smu_pre_display_config_changed(smu);
2267 ret = smu_adjust_power_state_dynamic(smu, level, false);
2271 ret = smu_adjust_power_state_dynamic(smu, level, true);
2284 struct smu_context *smu = handle;
2285 struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
2287 return smu_handle_task(smu, smu_dpm->dpm_level, task_id);
2295 struct smu_context *smu = handle;
2296 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
2300 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2307 smu->workload_mask &= ~(1 << smu->workload_prority[type]);
2308 index = fls(smu->workload_mask);
2310 workload = smu->workload_setting[index];
2312 smu->workload_mask |= (1 << smu->workload_prority[type]);
2313 index = fls(smu->workload_mask);
2315 workload = smu->workload_setting[index];
2320 smu_bump_power_profile_mode(smu, &workload, 0);
2327 struct smu_context *smu = handle;
2328 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
2330 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2333 if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
2342 struct smu_context *smu = handle;
2343 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
2346 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2349 if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
2352 ret = smu_enable_umd_pstate(smu, &level);
2356 ret = smu_handle_task(smu, level,
2361 memset(smu->user_dpm_profile.clk_mask, 0, sizeof(smu->user_dpm_profile.clk_mask));
2362 smu->user_dpm_profile.clk_dependency = 0;
2370 struct smu_context *smu = handle;
2372 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2375 return smu_init_display_count(smu, count);
2378 static int smu_force_smuclk_levels(struct smu_context *smu,
2382 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
2385 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2389 dev_dbg(smu->adev->dev, "force clock level is for dpm manual mode only.\n");
2393 if (smu->ppt_funcs && smu->ppt_funcs->force_clk_levels) {
2394 ret = smu->ppt_funcs->force_clk_levels(smu, clk_type, mask);
2395 if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) {
2396 smu->user_dpm_profile.clk_mask[clk_type] = mask;
2397 smu_set_user_clk_dependencies(smu, clk_type);
2408 struct smu_context *smu = handle;
2444 return smu_force_smuclk_levels(smu, clk_type, mask);
2457 struct smu_context *smu = handle;
2460 if (!smu->pm_enabled)
2463 if (smu->ppt_funcs &&
2464 smu->ppt_funcs->set_mp1_state)
2465 ret = smu->ppt_funcs->set_mp1_state(smu, mp1_state);
2473 struct smu_context *smu = handle;
2476 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2479 if (!smu->ppt_funcs || !smu->ppt_funcs->set_df_cstate)
2482 ret = smu->ppt_funcs->set_df_cstate(smu, state);
2484 dev_err(smu->adev->dev, "[SetDfCstate] failed!\n");
2489 int smu_write_watermarks_table(struct smu_context *smu)
2491 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2494 return smu_set_watermarks_table(smu, NULL);
2500 struct smu_context *smu = handle;
2502 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2505 if (smu->disable_watermark)
2508 return smu_set_watermarks_table(smu, clock_ranges);
2511 int smu_set_ac_dc(struct smu_context *smu)
2515 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2519 if (smu->dc_controlled_by_gpio)
2522 ret = smu_set_power_source(smu,
2523 smu->adev->pm.ac_power ? SMU_POWER_SOURCE_AC :
2526 dev_err(smu->adev->dev, "Failed to switch to %s mode!\n",
2527 smu->adev->pm.ac_power ? "AC" : "DC");
2533 .name = "smu",
2585 struct smu_context *smu = handle;
2586 struct amdgpu_device *adev = smu->adev;
2589 if (!smu->pm_enabled)
2596 if (smu->ppt_funcs->load_microcode) {
2597 ret = smu->ppt_funcs->load_microcode(smu);
2604 if (smu->ppt_funcs->check_fw_status) {
2605 ret = smu->ppt_funcs->check_fw_status(smu);
2615 static int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled)
2619 if (smu->ppt_funcs->set_gfx_cgpg)
2620 ret = smu->ppt_funcs->set_gfx_cgpg(smu, enabled);
2627 struct smu_context *smu = handle;
2630 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2633 if (!smu->ppt_funcs->set_fan_speed_rpm)
2639 ret = smu->ppt_funcs->set_fan_speed_rpm(smu, speed);
2640 if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) {
2641 smu->user_dpm_profile.flags |= SMU_CUSTOM_FAN_SPEED_RPM;
2642 smu->user_dpm_profile.fan_speed_rpm = speed;
2645 smu->user_dpm_profile.flags &= ~SMU_CUSTOM_FAN_SPEED_PWM;
2646 smu->user_dpm_profile.fan_speed_pwm = 0;
2655 * @handle: pointer to smu context
2667 struct smu_context *smu = handle;
2668 struct amdgpu_device *adev = smu->adev;
2673 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2705 if (smu->ppt_funcs->get_ppt_limit)
2706 ret = smu->ppt_funcs->get_ppt_limit(smu, limit, limit_type, limit_level);
2717 ret = smu_get_asic_power_limits(smu,
2718 &smu->current_power_limit,
2724 *limit = smu->current_power_limit;
2727 *limit = smu->default_power_limit;
2730 *limit = smu->max_power_limit;
2733 *limit = smu->min_power_limit;
2745 struct smu_context *smu = handle;
2749 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2754 if (smu->ppt_funcs->set_power_limit)
2755 return smu->ppt_funcs->set_power_limit(smu, limit_type, limit);
2757 if ((limit > smu->max_power_limit) || (limit < smu->min_power_limit)) {
2758 dev_err(smu->adev->dev,
2760 limit, smu->min_power_limit, smu->max_power_limit);
2765 limit = smu->current_power_limit;
2767 if (smu->ppt_funcs->set_power_limit) {
2768 ret = smu->ppt_funcs->set_power_limit(smu, limit_type, limit);
2769 if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE))
2770 smu->user_dpm_profile.power_limit = limit;
2776 static int smu_print_smuclk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf)
2780 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2783 if (smu->ppt_funcs->print_clk_levels)
2784 ret = smu->ppt_funcs->print_clk_levels(smu, clk_type, buf);
2847 struct smu_context *smu = handle;
2854 return smu_print_smuclk_levels(smu, clk_type, buf);
2859 struct smu_context *smu = handle;
2866 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2869 if (!smu->ppt_funcs->emit_clk_levels)
2872 return smu->ppt_funcs->emit_clk_levels(smu, clk_type, buf, offset);
2880 struct smu_context *smu = handle;
2883 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2886 if (smu->ppt_funcs->od_edit_dpm_table) {
2887 ret = smu->ppt_funcs->od_edit_dpm_table(smu, type, input, size);
2898 struct smu_context *smu = handle;
2900 &smu->pstate_table;
2904 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
2913 if (smu->ppt_funcs->read_sensor)
2914 if (!smu->ppt_funcs->read_sensor(smu, sensor, data, size))
2935 ret = smu_feature_get_enabled_mask(smu, (uint64_t *)data);
2939 *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UVD_BIT) ? 1 : 0;
2943 *(uint32_t *)data = smu_feature_is_enabled(smu, SMU_FEATURE_DPM_VCE_BIT) ? 1 : 0;
2947 *(uint32_t *)data = atomic_read(&smu->smu_power.power_gate.vcn_gated) ? 0 : 1;
2970 struct smu_context *smu = handle;
2972 if (smu->ppt_funcs && smu->ppt_funcs->get_apu_thermal_limit)
2973 ret = smu->ppt_funcs->get_apu_thermal_limit(smu, limit);
2981 struct smu_context *smu = handle;
2983 if (smu->ppt_funcs && smu->ppt_funcs->set_apu_thermal_limit)
2984 ret = smu->ppt_funcs->set_apu_thermal_limit(smu, limit);
2991 struct smu_context *smu = handle;
2993 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled ||
2994 !smu->ppt_funcs->get_power_profile_mode)
2999 return smu->ppt_funcs->get_power_profile_mode(smu, buf);
3006 struct smu_context *smu = handle;
3008 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled ||
3009 !smu->ppt_funcs->set_power_profile_mode)
3012 return smu_bump_power_profile_mode(smu, param, param_size);
3017 struct smu_context *smu = handle;
3019 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3022 if (!smu->ppt_funcs->get_fan_control_mode)
3028 *fan_mode = smu->ppt_funcs->get_fan_control_mode(smu);
3035 struct smu_context *smu = handle;
3038 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3041 if (!smu->ppt_funcs->set_fan_control_mode)
3047 ret = smu->ppt_funcs->set_fan_control_mode(smu, value);
3051 if (!(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) {
3052 smu->user_dpm_profile.fan_mode = value;
3056 smu->user_dpm_profile.fan_speed_pwm = 0;
3057 smu->user_dpm_profile.fan_speed_rpm = 0;
3058 smu->user_dpm_profile.flags &= ~(SMU_CUSTOM_FAN_SPEED_RPM | SMU_CUSTOM_FAN_SPEED_PWM);
3068 struct smu_context *smu = handle;
3071 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3074 if (!smu->ppt_funcs->get_fan_speed_pwm)
3080 ret = smu->ppt_funcs->get_fan_speed_pwm(smu, speed);
3087 struct smu_context *smu = handle;
3090 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3093 if (!smu->ppt_funcs->set_fan_speed_pwm)
3099 ret = smu->ppt_funcs->set_fan_speed_pwm(smu, speed);
3100 if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) {
3101 smu->user_dpm_profile.flags |= SMU_CUSTOM_FAN_SPEED_PWM;
3102 smu->user_dpm_profile.fan_speed_pwm = speed;
3105 smu->user_dpm_profile.flags &= ~SMU_CUSTOM_FAN_SPEED_RPM;
3106 smu->user_dpm_profile.fan_speed_rpm = 0;
3114 struct smu_context *smu = handle;
3117 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3120 if (!smu->ppt_funcs->get_fan_speed_rpm)
3126 ret = smu->ppt_funcs->get_fan_speed_rpm(smu, speed);
3133 struct smu_context *smu = handle;
3135 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3138 return smu_set_min_dcef_deep_sleep(smu, clk);
3145 struct smu_context *smu = handle;
3149 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3152 if (smu->ppt_funcs->get_clock_by_type_with_latency) {
3167 dev_err(smu->adev->dev, "Invalid clock type!\n");
3171 ret = smu->ppt_funcs->get_clock_by_type_with_latency(smu, clk_type, clocks);
3180 struct smu_context *smu = handle;
3183 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3186 if (smu->ppt_funcs->display_clock_voltage_request)
3187 ret = smu->ppt_funcs->display_clock_voltage_request(smu, clock_req);
3196 struct smu_context *smu = handle;
3199 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3202 if (smu->ppt_funcs->display_disable_memory_clock_switch)
3203 ret = smu->ppt_funcs->display_disable_memory_clock_switch(smu, disable_memory_clock_switch);
3211 struct smu_context *smu = handle;
3214 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3217 if (smu->ppt_funcs->set_xgmi_pstate)
3218 ret = smu->ppt_funcs->set_xgmi_pstate(smu, pstate);
3221 dev_err(smu->adev->dev, "Failed to set XGMI pstate!\n");
3228 struct smu_context *smu = handle;
3230 if (!smu->pm_enabled)
3233 if (!smu->ppt_funcs || !smu->ppt_funcs->baco_is_support)
3236 return smu->ppt_funcs->baco_is_support(smu);
3241 struct smu_context *smu = handle;
3244 if (!smu->pm_enabled)
3248 if (smu->ppt_funcs->baco_exit)
3249 ret = smu->ppt_funcs->baco_exit(smu);
3251 if (smu->ppt_funcs->baco_enter)
3252 ret = smu->ppt_funcs->baco_enter(smu);
3258 dev_err(smu->adev->dev, "Failed to %s BACO state!\n",
3264 bool smu_mode1_reset_is_support(struct smu_context *smu)
3268 if (!smu->pm_enabled)
3271 if (smu->ppt_funcs && smu->ppt_funcs->mode1_reset_is_support)
3272 ret = smu->ppt_funcs->mode1_reset_is_support(smu);
3277 bool smu_mode2_reset_is_support(struct smu_context *smu)
3281 if (!smu->pm_enabled)
3284 if (smu->ppt_funcs && smu->ppt_funcs->mode2_reset_is_support)
3285 ret = smu->ppt_funcs->mode2_reset_is_support(smu);
3290 int smu_mode1_reset(struct smu_context *smu)
3294 if (!smu->pm_enabled)
3297 if (smu->ppt_funcs->mode1_reset)
3298 ret = smu->ppt_funcs->mode1_reset(smu);
3305 struct smu_context *smu = handle;
3308 if (!smu->pm_enabled)
3311 if (smu->ppt_funcs->mode2_reset)
3312 ret = smu->ppt_funcs->mode2_reset(smu);
3315 dev_err(smu->adev->dev, "Mode2 reset failed!\n");
3322 struct smu_context *smu = handle;
3325 if (!smu->pm_enabled)
3328 if (smu->ppt_funcs->enable_gfx_features)
3329 ret = smu->ppt_funcs->enable_gfx_features(smu);
3332 dev_err(smu->adev->dev, "enable gfx features failed!\n");
3340 struct smu_context *smu = handle;
3343 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3346 if (smu->ppt_funcs->get_max_sustainable_clocks_by_dc)
3347 ret = smu->ppt_funcs->get_max_sustainable_clocks_by_dc(smu, max_clocks);
3356 struct smu_context *smu = handle;
3359 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3362 if (smu->ppt_funcs->get_uclk_dpm_states)
3363 ret = smu->ppt_funcs->get_uclk_dpm_states(smu, clock_values_in_khz, num_states);
3370 struct smu_context *smu = handle;
3373 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3376 if (smu->ppt_funcs->get_current_power_state)
3377 pm_state = smu->ppt_funcs->get_current_power_state(smu);
3385 struct smu_context *smu = handle;
3388 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3391 if (smu->ppt_funcs->get_dpm_clock_table)
3392 ret = smu->ppt_funcs->get_dpm_clock_table(smu, clock_table);
3399 struct smu_context *smu = handle;
3401 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3404 if (!smu->ppt_funcs->get_gpu_metrics)
3407 return smu->ppt_funcs->get_gpu_metrics(smu, table);
3413 struct smu_context *smu = handle;
3415 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3418 if (!smu->ppt_funcs->get_pm_metrics)
3421 return smu->ppt_funcs->get_pm_metrics(smu, pm_metrics, size);
3426 struct smu_context *smu = handle;
3429 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3432 if (smu->ppt_funcs->enable_mgpu_fan_boost)
3433 ret = smu->ppt_funcs->enable_mgpu_fan_boost(smu);
3441 struct smu_context *smu = handle;
3444 if (smu->ppt_funcs->gfx_state_change_set)
3445 ret = smu->ppt_funcs->gfx_state_change_set(smu, state);
3450 int smu_handle_passthrough_sbr(struct smu_context *smu, bool enable)
3454 if (smu->ppt_funcs->smu_handle_passthrough_sbr)
3455 ret = smu->ppt_funcs->smu_handle_passthrough_sbr(smu, enable);
3460 int smu_get_ecc_info(struct smu_context *smu, void *umc_ecc)
3464 if (smu->ppt_funcs &&
3465 smu->ppt_funcs->get_ecc_info)
3466 ret = smu->ppt_funcs->get_ecc_info(smu, umc_ecc);
3474 struct smu_context *smu = handle;
3475 struct smu_table_context *smu_table = &smu->smu_table;
3491 int smu_set_xgmi_plpd_mode(struct smu_context *smu,
3496 if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
3500 if (smu->plpd_mode == XGMI_PLPD_NONE)
3503 if (smu->plpd_mode == mode)
3506 if (smu->ppt_funcs && smu->ppt_funcs->select_xgmi_plpd_policy)
3507 ret = smu->ppt_funcs->select_xgmi_plpd_policy(smu, mode);
3510 smu->plpd_mode = mode;
3574 int smu_wait_for_event(struct smu_context *smu, enum smu_event_type event,
3579 if (smu->ppt_funcs->wait_for_event)
3580 ret = smu->ppt_funcs->wait_for_event(smu, event, event_arg);
3585 int smu_stb_collect_info(struct smu_context *smu, void *buf, uint32_t size)
3588 if (!smu->ppt_funcs->stb_collect_info || !smu->stb_context.enabled)
3592 if (size != smu->stb_context.stb_buf_size)
3596 * No need to lock smu mutex as we access STB directly through MMIO
3600 return smu->ppt_funcs->stb_collect_info(smu, buf, size);
3608 struct smu_context *smu = adev->powerplay.pp_handle;
3612 buf = kvmalloc_array(smu->stb_context.stb_buf_size, sizeof(*buf), GFP_KERNEL);
3616 r = smu_stb_collect_info(smu, buf, smu->stb_context.stb_buf_size);
3633 struct smu_context *smu = adev->powerplay.pp_handle;
3642 smu->stb_context.stb_buf_size);
3674 struct smu_context *smu = adev->powerplay.pp_handle;
3676 if (!smu || (!smu->stb_context.stb_buf_size))
3684 smu->stb_context.stb_buf_size);
3688 int smu_send_hbm_bad_pages_num(struct smu_context *smu, uint32_t size)
3692 if (smu->ppt_funcs && smu->ppt_funcs->send_hbm_bad_pages_num)
3693 ret = smu->ppt_funcs->send_hbm_bad_pages_num(smu, size);
3698 int smu_send_hbm_bad_channel_flag(struct smu_context *smu, uint32_t size)
3702 if (smu->ppt_funcs && smu->ppt_funcs->send_hbm_bad_channel_flag)
3703 ret = smu->ppt_funcs->send_hbm_bad_channel_flag(smu, size);
3708 int smu_send_rma_reason(struct smu_context *smu)
3712 if (smu->ppt_funcs && smu->ppt_funcs->send_rma_reason)
3713 ret = smu->ppt_funcs->send_rma_reason(smu);