Lines Matching defs:smu

304 static int navi10_get_workload_type(struct smu_context *smu, enum PP_SMC_POWER_PROFILE profile)
319 static bool is_asic_secure(struct smu_context *smu)
321 struct amdgpu_device *adev = smu->adev;
335 navi10_get_allowed_feature_mask(struct smu_context *smu,
338 struct amdgpu_device *adev = smu->adev;
393 if (smu->adev->pg_flags & AMD_PG_SUPPORT_MMHUB)
396 if (smu->adev->pg_flags & AMD_PG_SUPPORT_ATHUB)
399 if (smu->adev->pg_flags & AMD_PG_SUPPORT_VCN)
402 if (smu->adev->pg_flags & AMD_PG_SUPPORT_JPEG)
406 if (is_asic_secure(smu)) {
422 static int navi10_check_powerplay_table(struct smu_context *smu)
427 static int navi10_append_powerplay_table(struct smu_context *smu)
429 struct amdgpu_device *adev = smu->adev;
430 struct smu_table_context *table_context = &smu->smu_table;
438 ret = smu_get_atom_data_table(smu, index, NULL, NULL, NULL,
523 static int navi10_store_powerplay_table(struct smu_context *smu)
526 struct smu_table_context *table_context = &smu->smu_table;
527 struct smu_baco_context *smu_baco = &smu->smu_baco;
548 static int navi10_tables_init(struct smu_context *smu, struct smu_table *tables)
550 struct smu_table_context *smu_table = &smu->smu_table;
578 static int navi10_get_metrics_table(struct smu_context *smu,
581 struct smu_table_context *smu_table= &smu->smu_table;
584 mutex_lock(&smu->metrics_lock);
586 ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0,
590 mutex_unlock(&smu->metrics_lock);
597 mutex_unlock(&smu->metrics_lock);
602 static int navi10_allocate_dpm_context(struct smu_context *smu)
604 struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
619 static int navi10_set_default_dpm_table(struct smu_context *smu)
621 struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
622 struct smu_table_context *table_context = &smu->smu_table;
664 static int navi10_dpm_set_uvd_enable(struct smu_context *smu, bool enable)
666 struct smu_power_context *smu_power = &smu->smu_power;
672 if (smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
673 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 1);
679 if (smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
680 ret = smu_send_smc_msg(smu, SMU_MSG_PowerDownVcn);
690 static int navi10_dpm_set_jpeg_enable(struct smu_context *smu, bool enable)
692 struct smu_power_context *smu_power = &smu->smu_power;
697 if (smu_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) {
698 ret = smu_send_smc_msg(smu, SMU_MSG_PowerUpJpeg);
704 if (smu_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) {
705 ret = smu_send_smc_msg(smu, SMU_MSG_PowerDownJpeg);
715 static int navi10_get_current_clk_freq_by_table(struct smu_context *smu,
722 ret = navi10_get_metrics_table(smu, &metrics);
726 clk_id = smu_clk_get_index(smu, clk_type);
735 static bool navi10_is_support_fine_grained_dpm(struct smu_context *smu, enum smu_clk_type clk_type)
737 PPTable_t *pptable = smu->smu_table.driver_pptable;
741 clk_index = smu_clk_get_index(smu, clk_type);
763 static int navi10_print_clk_levels(struct smu_context *smu,
771 struct smu_table_context *table_context = &smu->smu_table;
773 struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
775 struct amdgpu_device *adev = smu->adev;
779 struct smu_11_0_overdrive_table *od_settings = smu->od_settings;
790 ret = smu_get_current_clk_freq(smu, clk_type, &cur_value);
797 ret = smu_get_dpm_level_count(smu, clk_type, &count);
801 if (!navi10_is_support_fine_grained_dpm(smu, clk_type)) {
803 ret = smu_get_dpm_freq_by_index(smu, clk_type, i, &value);
811 ret = smu_get_dpm_freq_by_index(smu, clk_type, 0, &freq_values[0]);
814 ret = smu_get_dpm_freq_by_index(smu, clk_type, count - 1, &freq_values[2]);
856 if (!smu->od_enabled || !od_table || !od_settings)
864 if (!smu->od_enabled || !od_table || !od_settings)
872 if (!smu->od_enabled || !od_table || !od_settings)
895 if (!smu->od_enabled || !od_table || !od_settings)
950 static int navi10_force_clk_levels(struct smu_context *smu,
969 if (navi10_is_support_fine_grained_dpm(smu, clk_type)) {
974 ret = smu_get_dpm_freq_by_index(smu, clk_type, soft_min_level, &min_freq);
978 ret = smu_get_dpm_freq_by_index(smu, clk_type, soft_max_level, &max_freq);
982 ret = smu_set_soft_freq_range(smu, clk_type, min_freq, max_freq);
993 static int navi10_populate_umd_state_clk(struct smu_context *smu)
998 ret = smu_get_dpm_freq_range(smu, SMU_SCLK, &min_sclk_freq, NULL, false);
1002 smu->pstate_sclk = min_sclk_freq * 100;
1004 ret = smu_get_dpm_freq_range(smu, SMU_MCLK, &min_mclk_freq, NULL, false);
1008 smu->pstate_mclk = min_mclk_freq * 100;
1013 static int navi10_get_clock_by_type_with_latency(struct smu_context *smu,
1026 ret = smu_get_dpm_level_count(smu, clk_type, &level_count);
1034 ret = smu_get_dpm_freq_by_index(smu, clk_type, i, &freq);
1049 static int navi10_pre_display_config_changed(struct smu_context *smu)
1054 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, 0);
1058 if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
1059 ret = smu_get_dpm_freq_range(smu, SMU_UCLK, NULL, &max_freq, false);
1062 ret = smu_set_hard_freq_range(smu, SMU_UCLK, 0, max_freq);
1070 static int navi10_display_config_changed(struct smu_context *smu)
1074 if ((smu->watermarks_bitmap & WATERMARKS_EXIST) &&
1075 !(smu->watermarks_bitmap & WATERMARKS_LOADED)) {
1076 ret = smu_write_watermarks_table(smu);
1080 smu->watermarks_bitmap |= WATERMARKS_LOADED;
1083 if ((smu->watermarks_bitmap & WATERMARKS_EXIST) &&
1084 smu_feature_is_supported(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) &&
1085 smu_feature_is_supported(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
1086 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays,
1087 smu->display_config->num_display);
1095 static int navi10_force_dpm_limit_value(struct smu_context *smu, bool highest)
1109 ret = smu_get_dpm_freq_range(smu, clk_type, &min_freq, &max_freq, false);
1114 ret = smu_set_soft_freq_range(smu, clk_type, force_freq, force_freq);
1122 static int navi10_unforce_dpm_levels(struct smu_context *smu)
1136 ret = smu_get_dpm_freq_range(smu, clk_type, &min_freq, &max_freq, false);
1140 ret = smu_set_soft_freq_range(smu, clk_type, min_freq, max_freq);
1148 static int navi10_get_gpu_power(struct smu_context *smu, uint32_t *value)
1156 ret = navi10_get_metrics_table(smu, &metrics);
1165 static int navi10_get_current_activity_percent(struct smu_context *smu,
1175 ret = navi10_get_metrics_table(smu, &metrics);
1194 static bool navi10_is_dpm_running(struct smu_context *smu)
1199 ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
1205 static int navi10_get_fan_speed_rpm(struct smu_context *smu,
1214 ret = navi10_get_metrics_table(smu, &metrics);
1223 static int navi10_get_fan_speed_percent(struct smu_context *smu,
1229 PPTable_t *pptable = smu->smu_table.driver_pptable;
1231 ret = navi10_get_fan_speed_rpm(smu, &current_rpm);
1241 static int navi10_get_power_profile_mode(struct smu_context *smu, char *buf)
1277 workload_type = smu_workload_get_type(smu, i);
1281 result = smu_update_table(smu,
1290 i, profile_name[i], (i == smu->power_profile_mode) ? "*" : " ");
1338 static int navi10_set_power_profile_mode(struct smu_context *smu, long *input, uint32_t size)
1343 smu->power_profile_mode = input[size];
1345 if (smu->power_profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) {
1346 pr_err("Invalid power profile mode %d\n", smu->power_profile_mode);
1350 if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) {
1354 ret = smu_update_table(smu,
1398 ret = smu_update_table(smu,
1408 workload_type = smu_workload_get_type(smu, smu->power_profile_mode);
1411 smu_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask,
1417 static int navi10_get_profiling_clk_mask(struct smu_context *smu,
1434 ret = smu_get_dpm_level_count(smu, SMU_SCLK, &level_count);
1441 ret = smu_get_dpm_level_count(smu, SMU_MCLK, &level_count);
1448 ret = smu_get_dpm_level_count(smu, SMU_SOCCLK, &level_count);
1458 static int navi10_notify_smc_display_config(struct smu_context *smu)
1464 min_clocks.dcef_clock = smu->display_config->min_dcef_set_clk;
1465 min_clocks.dcef_clock_in_sr = smu->display_config->min_dcef_deep_sleep_set_clk;
1466 min_clocks.memory_clock = smu->display_config->min_mem_set_clock;
1468 if (smu_feature_is_supported(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) {
1472 ret = smu_v11_0_display_clock_voltage_request(smu, &clock_req);
1474 if (smu_feature_is_supported(smu, SMU_FEATURE_DS_DCEFCLK_BIT)) {
1475 ret = smu_send_smc_msg_with_param(smu,
1488 if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) {
1489 ret = smu_set_hard_freq_range(smu, SMU_UCLK, min_clocks.memory_clock/100, 0);
1499 static int navi10_set_watermarks_table(struct smu_context *smu,
1559 static int navi10_thermal_get_temperature(struct smu_context *smu,
1569 ret = navi10_get_metrics_table(smu, &metrics);
1594 static int navi10_read_sensor(struct smu_context *smu,
1599 struct smu_table_context *table_context = &smu->smu_table;
1605 mutex_lock(&smu->sensor_lock);
1613 ret = navi10_get_current_activity_percent(smu, sensor, (uint32_t *)data);
1617 ret = navi10_get_gpu_power(smu, (uint32_t *)data);
1623 ret = navi10_thermal_get_temperature(smu, sensor, (uint32_t *)data);
1627 ret = smu_v11_0_read_sensor(smu, sensor, data, size);
1629 mutex_unlock(&smu->sensor_lock);
1634 static int navi10_get_uclk_dpm_states(struct smu_context *smu, uint32_t *clocks_in_khz, uint32_t *num_states)
1639 struct smu_table_context *table_context = &smu->smu_table;
1663 static int navi10_set_performance_level(struct smu_context *smu,
1666 static int navi10_set_standard_performance_level(struct smu_context *smu)
1668 struct amdgpu_device *adev = smu->adev;
1683 return navi10_set_performance_level(smu, AMD_DPM_FORCED_LEVEL_AUTO);
1686 ret = smu_set_soft_freq_range(smu, SMU_SCLK, sclk_freq, sclk_freq);
1689 ret = smu_set_soft_freq_range(smu, SMU_UCLK, uclk_freq, uclk_freq);
1696 static int navi10_set_peak_performance_level(struct smu_context *smu)
1698 struct amdgpu_device *adev = smu->adev;
1745 ret = smu_get_dpm_level_range(smu, SMU_SCLK, NULL, &sclk_freq);
1750 ret = smu_get_dpm_level_range(smu, SMU_UCLK, NULL, &uclk_freq);
1754 ret = smu_set_soft_freq_range(smu, SMU_SCLK, sclk_freq, sclk_freq);
1757 ret = smu_set_soft_freq_range(smu, SMU_UCLK, uclk_freq, uclk_freq);
1764 static int navi10_set_performance_level(struct smu_context *smu,
1772 ret = smu_force_dpm_limit_value(smu, true);
1775 ret = smu_force_dpm_limit_value(smu, false);
1778 ret = smu_unforce_dpm_levels(smu);
1781 ret = navi10_set_standard_performance_level(smu);
1785 ret = smu_get_profiling_clk_mask(smu, level,
1791 smu_force_clk_levels(smu, SMU_SCLK, 1 << sclk_mask, false);
1792 smu_force_clk_levels(smu, SMU_MCLK, 1 << mclk_mask, false);
1793 smu_force_clk_levels(smu, SMU_SOCCLK, 1 << soc_mask, false);
1796 ret = navi10_set_peak_performance_level(smu);
1806 static int navi10_get_thermal_temperature_range(struct smu_context *smu,
1809 struct smu_table_context *table_context = &smu->smu_table;
1821 static int navi10_display_disable_memory_clock_switch(struct smu_context *smu,
1827 smu->smu_table.max_sustainable_clocks;
1828 uint32_t min_memory_clock = smu->hard_min_uclk_req_from_dal;
1831 if(smu->disable_uclk_switch == disable_memory_clock_switch)
1835 ret = smu_set_hard_freq_range(smu, SMU_UCLK, max_memory_clock, 0);
1837 ret = smu_set_hard_freq_range(smu, SMU_UCLK, min_memory_clock, 0);
1840 smu->disable_uclk_switch = disable_memory_clock_switch;
1845 static uint32_t navi10_get_pptable_power_limit(struct smu_context *smu)
1847 PPTable_t *pptable = smu->smu_table.driver_pptable;
1851 static int navi10_get_power_limit(struct smu_context *smu,
1855 PPTable_t *pptable = smu->smu_table.driver_pptable;
1860 if (!smu->power_limit) {
1861 if (smu_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) {
1862 power_src = smu_power_get_index(smu, SMU_POWER_SOURCE_AC);
1866 ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetPptLimit,
1872 smu_read_smc_arg(smu, &asic_default_power_limit);
1883 smu->power_limit = asic_default_power_limit;
1887 *limit = smu_v11_0_get_max_power_limit(smu);
1889 *limit = smu->power_limit;
1894 static int navi10_update_pcie_parameters(struct smu_context *smu,
1898 PPTable_t *pptable = smu->smu_table.driver_pptable;
1902 struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
1910 ret = smu_send_smc_msg_with_param(smu,
1948 static int navi10_overdrive_get_gfx_clk_base_voltage(struct smu_context *smu,
1956 ret = smu_send_smc_msg_with_param(smu,
1964 smu_read_smc_arg(smu, &value);
1970 static int navi10_setup_od_limits(struct smu_context *smu) {
1974 if (!smu->smu_table.power_play_table) {
1978 powerplay_table = (const struct smu_11_0_powerplay_table *)smu->smu_table.power_play_table;
1980 if (!smu->od_settings) {
1981 smu->od_settings = kmemdup(overdrive_table, sizeof(struct smu_11_0_overdrive_table), GFP_KERNEL);
1983 memcpy(smu->od_settings, overdrive_table, sizeof(struct smu_11_0_overdrive_table));
1988 static int navi10_set_default_od_settings(struct smu_context *smu, bool initialize) {
1992 ret = smu_v11_0_set_default_od_settings(smu, initialize, sizeof(OverDriveTable_t));
1996 od_table = (OverDriveTable_t *)smu->smu_table.overdrive_table;
1997 boot_od_table = (OverDriveTable_t *)smu->smu_table.boot_overdrive_table;
1999 ret = navi10_setup_od_limits(smu);
2006 ret = navi10_overdrive_get_gfx_clk_base_voltage(smu,
2016 ret = navi10_overdrive_get_gfx_clk_base_voltage(smu,
2026 ret = navi10_overdrive_get_gfx_clk_base_voltage(smu,
2044 static int navi10_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TABLE_COMMAND type, long input[], uint32_t size) {
2047 struct smu_table_context *table_context = &smu->smu_table;
2054 if (!smu->od_enabled) {
2059 if (!smu->od_settings) {
2064 od_settings = smu->od_settings;
2141 ret = smu_update_table(smu, SMU_TABLE_OVERDRIVE, 0, (void *)od_table, true);
2147 ret = smu_handle_task(smu, smu->smu_dpm.dpm_level,
2217 static int navi10_run_btc(struct smu_context *smu)
2221 ret = smu_send_smc_msg(smu, SMU_MSG_RunBtc);
2228 static int navi10_dummy_pstate_control(struct smu_context *smu, bool enable)
2233 result = smu_send_smc_msg(smu, SMU_MSG_DAL_DISABLE_DUMMY_PSTATE_CHANGE);
2235 result = smu_send_smc_msg(smu, SMU_MSG_DAL_ENABLE_DUMMY_PSTATE_CHANGE);
2240 static int navi10_disable_umc_cdr_12gbps_workaround(struct smu_context *smu)
2246 ret = smu_get_smc_version(smu, NULL, &smu_version);
2254 ret = smu_get_dpm_level_count(smu, SMU_UCLK, &uclk_count);
2258 ret = smu_get_dpm_freq_by_index(smu, SMU_UCLK, (uint16_t)0, &uclk_min);
2262 ret = smu_get_dpm_freq_by_index(smu, SMU_UCLK, (uint16_t)(uclk_count - 1), &uclk_max);
2267 ret = smu_set_hard_freq_range(smu, SMU_UCLK, 0, uclk_min);
2272 ret = smu_set_hard_freq_range(smu, SMU_UCLK, 0, uclk_max);
2280 return navi10_dummy_pstate_control(smu, true);
2380 void navi10_set_ppt_funcs(struct smu_context *smu)
2382 smu->ppt_funcs = &navi10_ppt_funcs;