Lines Matching refs:smu

227 static int aldebaran_tables_init(struct smu_context *smu)
229 struct smu_table_context *smu_table = &smu->smu_table;
269 static int aldebaran_allocate_dpm_context(struct smu_context *smu)
271 struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
282 static int aldebaran_init_smc_tables(struct smu_context *smu)
286 ret = aldebaran_tables_init(smu);
290 ret = aldebaran_allocate_dpm_context(smu);
294 return smu_v13_0_init_smc_tables(smu);
297 static int aldebaran_get_allowed_feature_mask(struct smu_context *smu,
309 static int aldebaran_set_default_dpm_table(struct smu_context *smu)
311 struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
313 PPTable_t *pptable = smu->smu_table.driver_pptable;
318 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
319 ret = smu_v13_0_set_single_dpm_table(smu,
326 dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.socclk / 100;
334 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT)) {
345 dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.gfxclk / 100;
353 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
354 ret = smu_v13_0_set_single_dpm_table(smu,
361 dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.uclk / 100;
369 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_FCLK_BIT)) {
370 ret = smu_v13_0_set_single_dpm_table(smu,
377 dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.fclk / 100;
386 static int aldebaran_check_powerplay_table(struct smu_context *smu)
388 struct smu_table_context *table_context = &smu->smu_table;
398 static int aldebaran_store_powerplay_table(struct smu_context *smu)
400 struct smu_table_context *table_context = &smu->smu_table;
409 static int aldebaran_append_powerplay_table(struct smu_context *smu)
411 struct smu_table_context *table_context = &smu->smu_table;
419 ret = amdgpu_atombios_get_data_table(smu->adev, index, NULL, NULL, NULL,
424 dev_info(smu->adev->dev, "smc_dpm_info table revision(format.content): %d.%d\n",
435 static int aldebaran_setup_pptable(struct smu_context *smu)
440 smu->smu_table.boot_values.pp_table_id = 0;
442 ret = smu_v13_0_setup_pptable(smu);
446 ret = aldebaran_store_powerplay_table(smu);
450 ret = aldebaran_append_powerplay_table(smu);
454 ret = aldebaran_check_powerplay_table(smu);
461 static bool aldebaran_is_primary(struct smu_context *smu)
463 struct amdgpu_device *adev = smu->adev;
471 static int aldebaran_run_board_btc(struct smu_context *smu)
475 if (!aldebaran_is_primary(smu))
478 if (smu->smc_fw_version <= 0x00441d00)
481 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_BoardPowerCalibration, NULL);
483 dev_err(smu->adev->dev, "Board power calibration failed!\n");
488 static int aldebaran_run_btc(struct smu_context *smu)
492 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_RunDcBtc, NULL);
494 dev_err(smu->adev->dev, "RunDcBtc failed!\n");
496 ret = aldebaran_run_board_btc(smu);
501 static int aldebaran_populate_umd_state_clk(struct smu_context *smu)
504 smu->smu_dpm.dpm_context;
512 &smu->pstate_table;
550 static void aldebaran_get_clk_table(struct smu_context *smu,
574 static int aldebaran_get_smu_metrics_data(struct smu_context *smu,
578 struct smu_table_context *smu_table = &smu->smu_table;
582 ret = smu_cmn_get_metrics_table(smu,
624 if (aldebaran_is_primary(smu))
670 static int aldebaran_get_current_clk_freq_by_table(struct smu_context *smu,
680 clk_id = smu_cmn_to_asic_specific_index(smu,
694 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT))
700 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT))
706 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT))
712 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT))
718 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT))
730 return aldebaran_get_smu_metrics_data(smu,
735 static int aldebaran_emit_clk_levels(struct smu_context *smu,
739 struct smu_umd_pstate_table *pstate_table = &smu->pstate_table;
742 struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
768 ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_GFXCLK, &cur_value);
770 dev_err(smu->adev->dev, "%s gfx clk Failed!", attempt_string);
775 aldebaran_get_clk_table(smu, &clocks, single_dpm_table);
800 ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_UCLK, &cur_value);
802 dev_err(smu->adev->dev, "%s mclk Failed!", attempt_string);
807 aldebaran_get_clk_table(smu, &clocks, single_dpm_table);
811 ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_SOCCLK, &cur_value);
813 dev_err(smu->adev->dev, "%s socclk Failed!", attempt_string);
818 aldebaran_get_clk_table(smu, &clocks, single_dpm_table);
822 ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_FCLK, &cur_value);
824 dev_err(smu->adev->dev, "%s fclk Failed!", attempt_string);
829 aldebaran_get_clk_table(smu, &clocks, single_dpm_table);
833 ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_VCLK, &cur_value);
835 dev_err(smu->adev->dev, "%s vclk Failed!", attempt_string);
840 aldebaran_get_clk_table(smu, &clocks, single_dpm_table);
844 ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_DCLK, &cur_value);
846 dev_err(smu->adev->dev, "%s dclk Failed!", attempt_string);
851 aldebaran_get_clk_table(smu, &clocks, single_dpm_table);
893 static int aldebaran_upload_dpm_level(struct smu_context *smu,
899 smu->smu_dpm.dpm_context;
903 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT) &&
906 ret = smu_cmn_send_smc_msg_with_param(smu,
911 dev_err(smu->adev->dev, "Failed to set soft %s gfxclk !\n",
917 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT) &&
920 ret = smu_cmn_send_smc_msg_with_param(smu,
925 dev_err(smu->adev->dev, "Failed to set soft %s memclk !\n",
931 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT) &&
934 ret = smu_cmn_send_smc_msg_with_param(smu,
939 dev_err(smu->adev->dev, "Failed to set soft %s socclk !\n",
948 static int aldebaran_force_clk_levels(struct smu_context *smu,
951 struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
963 dev_err(smu->adev->dev, "Clock level specified %d is over max allowed %d\n",
969 ret = aldebaran_upload_dpm_level(smu,
974 dev_err(smu->adev->dev, "Failed to upload boot level to lowest!\n");
978 ret = aldebaran_upload_dpm_level(smu,
983 dev_err(smu->adev->dev, "Failed to upload dpm max level to highest!\n");
1004 static int aldebaran_get_thermal_temperature_range(struct smu_context *smu,
1007 struct smu_table_context *table_context = &smu->smu_table;
1010 PPTable_t *pptable = smu->smu_table.driver_pptable;
1030 static int aldebaran_get_current_activity_percent(struct smu_context *smu,
1041 ret = aldebaran_get_smu_metrics_data(smu,
1046 ret = aldebaran_get_smu_metrics_data(smu,
1051 dev_err(smu->adev->dev, "Invalid sensor for retrieving clock activity\n");
1058 static int aldebaran_thermal_get_temperature(struct smu_context *smu,
1069 ret = aldebaran_get_smu_metrics_data(smu,
1074 ret = aldebaran_get_smu_metrics_data(smu,
1079 ret = aldebaran_get_smu_metrics_data(smu,
1084 dev_err(smu->adev->dev, "Invalid sensor for retrieving temp\n");
1091 static int aldebaran_read_sensor(struct smu_context *smu,
1106 ret = aldebaran_get_current_activity_percent(smu,
1112 ret = aldebaran_get_smu_metrics_data(smu,
1120 ret = aldebaran_thermal_get_temperature(smu, sensor,
1125 ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_UCLK, (uint32_t *)data);
1131 ret = aldebaran_get_current_clk_freq_by_table(smu, SMU_GFXCLK, (uint32_t *)data);
1136 ret = smu_v13_0_get_gfx_vdd(smu, (uint32_t *)data);
1148 static int aldebaran_get_power_limit(struct smu_context *smu,
1154 PPTable_t *pptable = smu->smu_table.driver_pptable;
1158 if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) {
1167 dev_warn(smu->adev->dev,
1176 if (aldebaran_is_primary(smu)) {
1177 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetPptLimit,
1183 dev_err(smu->adev->dev,
1207 static int aldebaran_set_power_limit(struct smu_context *smu,
1212 if (aldebaran_is_primary(smu))
1213 return smu_v13_0_set_power_limit(smu, limit_type, limit);
1218 static int aldebaran_system_features_control(struct smu_context *smu, bool enable)
1222 ret = smu_v13_0_system_features_control(smu, enable);
1224 ret = aldebaran_run_btc(smu);
1229 static int aldebaran_set_performance_level(struct smu_context *smu,
1232 struct smu_dpm_context *smu_dpm = &(smu->smu_dpm);
1236 struct smu_umd_pstate_table *pstate_table = &smu->pstate_table;
1241 smu_cmn_send_smc_msg(smu, SMU_MSG_DisableDeterminism, NULL);
1260 return smu_v13_0_set_performance_level(smu, level);
1263 static int aldebaran_set_soft_freq_limited_range(struct smu_context *smu,
1268 struct smu_dpm_context *smu_dpm = &(smu->smu_dpm);
1270 struct smu_umd_pstate_table *pstate_table = &smu->pstate_table;
1271 struct amdgpu_device *adev = smu->adev;
1285 dev_err(smu->adev->dev,
1294 ret = smu_v13_0_set_soft_freq_limited_range(smu, SMU_GFXCLK,
1315 ret = smu_v13_0_set_soft_freq_limited_range(smu, SMU_GFXCLK, min_clk, max_clk);
1318 ret = smu_cmn_send_smc_msg_with_param(smu,
1334 static int aldebaran_usr_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TABLE_COMMAND type,
1337 struct smu_dpm_context *smu_dpm = &(smu->smu_dpm);
1339 struct smu_umd_pstate_table *pstate_table = &smu->pstate_table;
1352 dev_err(smu->adev->dev, "Input parameter number not correct\n");
1358 dev_warn(smu->adev->dev, "Minimum GFX clk (%ld) MHz specified is less than the minimum allowed (%d) MHz\n",
1368 dev_warn(smu->adev->dev, "Maximum GFX clk (%ld) MHz specified is greater than the maximum allowed (%d) MHz\n",
1382 dev_err(smu->adev->dev, "Input parameter number not correct\n");
1389 return aldebaran_set_soft_freq_limited_range(smu, SMU_GFXCLK, min_clk, max_clk);
1394 dev_err(smu->adev->dev, "Input parameter number not correct\n");
1408 return aldebaran_set_soft_freq_limited_range(smu, SMU_GFXCLK, min_clk, max_clk);
1418 static bool aldebaran_is_dpm_running(struct smu_context *smu)
1423 ret = smu_cmn_get_enabled_mask(smu, &feature_enabled);
1434 struct smu_context *smu = adev->powerplay.pp_handle;
1435 struct smu_table_context *smu_table = &smu->smu_table;
1485 r = smu_cmn_update_table(smu, SMU_TABLE_I2C_COMMANDS, 0, req, true);
1526 static int aldebaran_i2c_control_init(struct smu_context *smu)
1528 struct amdgpu_device *adev = smu->adev;
1559 static void aldebaran_i2c_control_fini(struct smu_context *smu)
1561 struct amdgpu_device *adev = smu->adev;
1574 static void aldebaran_get_unique_id(struct smu_context *smu)
1576 struct amdgpu_device *adev = smu->adev;
1579 if (aldebaran_get_smu_metrics_data(smu, METRICS_UNIQUE_ID_UPPER32, &upper32))
1581 if (aldebaran_get_smu_metrics_data(smu, METRICS_UNIQUE_ID_LOWER32, &lower32))
1588 static int aldebaran_get_bamaco_support(struct smu_context *smu)
1595 static int aldebaran_set_df_cstate(struct smu_context *smu,
1598 struct amdgpu_device *adev = smu->adev;
1607 return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DFCstateControl, state, NULL);
1610 static int aldebaran_select_xgmi_plpd_policy(struct smu_context *smu,
1613 struct amdgpu_device *adev = smu->adev;
1622 return smu_cmn_send_smc_msg_with_param(smu,
1626 return smu_cmn_send_smc_msg_with_param(smu,
1643 static void aldebaran_log_thermal_throttling_event(struct smu_context *smu)
1647 struct amdgpu_device *adev = smu->adev;
1651 ret = aldebaran_get_smu_metrics_data(smu,
1677 kgd2kfd_smi_event_throttle(smu->adev->kfd.dev,
1682 static int aldebaran_get_current_pcie_link_speed(struct smu_context *smu)
1684 struct amdgpu_device *adev = smu->adev;
1692 return smu_v13_0_get_current_pcie_link_speed(smu);
1695 static ssize_t aldebaran_get_gpu_metrics(struct smu_context *smu,
1698 struct smu_table_context *smu_table = &smu->smu_table;
1704 ret = smu_cmn_get_metrics_table(smu,
1724 if (aldebaran_is_primary(smu)) {
1753 if (!amdgpu_sriov_vf(smu->adev)) {
1755 smu_v13_0_get_current_pcie_link_width(smu);
1757 aldebaran_get_current_pcie_link_speed(smu);
1776 static int aldebaran_check_ecc_table_support(struct smu_context *smu,
1779 if (smu->smc_fw_version < SUPPORT_ECCTABLE_SMU_VERSION)
1781 else if (smu->smc_fw_version >= SUPPORT_ECCTABLE_SMU_VERSION &&
1782 smu->smc_fw_version < SUPPORT_ECCTABLE_V2_SMU_VERSION)
1790 static ssize_t aldebaran_get_ecc_info(struct smu_context *smu,
1793 struct smu_table_context *smu_table = &smu->smu_table;
1800 ret = aldebaran_check_ecc_table_support(smu, &table_version);
1804 ret = smu_cmn_update_table(smu,
1810 dev_info(smu->adev->dev, "Failed to export SMU ecc table!\n");
1848 static int aldebaran_mode1_reset(struct smu_context *smu)
1852 struct amdgpu_device *adev = smu->adev;
1861 if (smu->smc_fw_version < 0x00440700) {
1862 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_Mode1Reset, NULL);
1866 if ((smu->smc_fw_version >= 0x00442c00) && ras &&
1871 ret = smu_cmn_send_smc_msg_with_param(smu,
1881 static int aldebaran_mode2_reset(struct smu_context *smu)
1884 struct amdgpu_device *adev = smu->adev;
1887 index = smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG,
1890 mutex_lock(&smu->message_lock);
1891 if (smu->smc_fw_version >= 0x00441400) {
1892 ret = smu_cmn_send_msg_without_waiting(smu, (uint16_t)index, SMU_RESET_MODE_2);
1895 dev_dbg(smu->adev->dev, "restore config space...\n");
1899 dev_dbg(smu->adev->dev, "wait for reset ack\n");
1901 ret = smu_cmn_wait_for_response(smu);
1917 dev_err(adev->dev, "smu fw 0x%x does not support MSG_GfxDeviceDriverReset MSG\n",
1918 smu->smc_fw_version);
1924 mutex_unlock(&smu->message_lock);
1929 static int aldebaran_smu_handle_passthrough_sbr(struct smu_context *smu, bool enable)
1932 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_HeavySBR, enable ? 1 : 0, NULL);
1937 static bool aldebaran_is_mode1_reset_supported(struct smu_context *smu)
1940 struct amdgpu_device *adev = smu->adev;
1948 ret = smu_cmn_get_smc_version(smu, NULL, &smu_version);
1966 static bool aldebaran_is_mode2_reset_supported(struct smu_context *smu)
1971 static int aldebaran_set_mp1_state(struct smu_context *smu,
1976 return smu_cmn_set_mp1_state(smu, mp1_state);
1982 static int aldebaran_smu_send_hbm_bad_page_num(struct smu_context *smu,
1988 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetNumBadHbmPagesRetired, size, NULL);
1990 dev_err(smu->adev->dev, "[%s] failed to message SMU to update HBM bad pages number\n",
1996 static int aldebaran_check_bad_channel_info_support(struct smu_context *smu)
1998 if (smu->smc_fw_version < SUPPORT_BAD_CHANNEL_INFO_MSG_VERSION)
2004 static int aldebaran_send_hbm_bad_channel_flag(struct smu_context *smu,
2009 ret = aldebaran_check_bad_channel_info_support(smu);
2014 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetBadHBMPagesRetiredFlagsPerChannel, size, NULL);
2016 dev_err(smu->adev->dev, "[%s] failed to message SMU to update HBM bad channel info\n",
2090 void aldebaran_set_ppt_funcs(struct smu_context *smu)
2092 smu->ppt_funcs = &aldebaran_ppt_funcs;
2093 smu->message_map = aldebaran_message_map;
2094 smu->clock_map = aldebaran_clk_map;
2095 smu->feature_map = aldebaran_feature_mask_map;
2096 smu->table_map = aldebaran_table_map;
2097 smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_ALDE;
2098 smu_v13_0_set_smu_mailbox_registers(smu);