1/* 2 * Copyright 2022 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 24#include "smu_types.h" 25#define SWSMU_CODE_LAYER_L2 26 27#include "amdgpu.h" 28#include "amdgpu_smu.h" 29#include "smu_v13_0.h" 30#include "smu13_driver_if_v13_0_4.h" 31#include "smu_v13_0_4_ppt.h" 32#include "smu_v13_0_4_ppsmc.h" 33#include "smu_v13_0_4_pmfw.h" 34#include "smu_cmn.h" 35 36/* 37 * DO NOT use these for err/warn/info/debug messages. 38 * Use dev_err, dev_warn, dev_info and dev_dbg instead. 39 * They are more MGPU friendly. 40 */ 41#undef pr_err 42#undef pr_warn 43#undef pr_info 44#undef pr_debug 45 46#define mmMP1_SMN_C2PMSG_66 0x0282 47#define mmMP1_SMN_C2PMSG_66_BASE_IDX 1 48 49#define mmMP1_SMN_C2PMSG_82 0x0292 50#define mmMP1_SMN_C2PMSG_82_BASE_IDX 1 51 52#define mmMP1_SMN_C2PMSG_90 0x029a 53#define mmMP1_SMN_C2PMSG_90_BASE_IDX 1 54 55#define FEATURE_MASK(feature) (1ULL << feature) 56 57#define SMU_13_0_4_UMD_PSTATE_GFXCLK 938 58#define SMU_13_0_4_UMD_PSTATE_SOCCLK 938 59#define SMU_13_0_4_UMD_PSTATE_FCLK 1875 60 61#define SMC_DPM_FEATURE ( \ 62 FEATURE_MASK(FEATURE_CCLK_DPM_BIT) | \ 63 FEATURE_MASK(FEATURE_VCN_DPM_BIT) | \ 64 FEATURE_MASK(FEATURE_FCLK_DPM_BIT) | \ 65 FEATURE_MASK(FEATURE_SOCCLK_DPM_BIT) | \ 66 FEATURE_MASK(FEATURE_MP0CLK_DPM_BIT) | \ 67 FEATURE_MASK(FEATURE_LCLK_DPM_BIT) | \ 68 FEATURE_MASK(FEATURE_SHUBCLK_DPM_BIT) | \ 69 FEATURE_MASK(FEATURE_DCFCLK_DPM_BIT) | \ 70 FEATURE_MASK(FEATURE_ISP_DPM_BIT) | \ 71 FEATURE_MASK(FEATURE_IPU_DPM_BIT) | \ 72 FEATURE_MASK(FEATURE_GFX_DPM_BIT)) 73 74static struct cmn2asic_msg_mapping smu_v13_0_4_message_map[SMU_MSG_MAX_COUNT] = { 75 MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 1), 76 MSG_MAP(GetSmuVersion, PPSMC_MSG_GetPmfwVersion, 1), 77 MSG_MAP(GetDriverIfVersion, PPSMC_MSG_GetDriverIfVersion, 1), 78 MSG_MAP(AllowGfxOff, PPSMC_MSG_AllowGfxOff, 1), 79 MSG_MAP(DisallowGfxOff, PPSMC_MSG_DisallowGfxOff, 1), 80 MSG_MAP(PowerDownVcn, PPSMC_MSG_PowerDownVcn, 1), 81 MSG_MAP(PowerUpVcn, PPSMC_MSG_PowerUpVcn, 1), 82 MSG_MAP(SetHardMinVcn, PPSMC_MSG_SetHardMinVcn, 1), 83 MSG_MAP(PrepareMp1ForUnload, PPSMC_MSG_PrepareMp1ForUnload, 1), 84 MSG_MAP(SetDriverDramAddrHigh, PPSMC_MSG_SetDriverDramAddrHigh, 1), 85 MSG_MAP(SetDriverDramAddrLow, PPSMC_MSG_SetDriverDramAddrLow, 1), 86 MSG_MAP(TransferTableSmu2Dram, PPSMC_MSG_TransferTableSmu2Dram, 1), 87 MSG_MAP(TransferTableDram2Smu, PPSMC_MSG_TransferTableDram2Smu, 1), 88 MSG_MAP(GfxDeviceDriverReset, PPSMC_MSG_GfxDeviceDriverReset, 1), 89 MSG_MAP(GetEnabledSmuFeatures, PPSMC_MSG_GetEnabledSmuFeatures, 1), 90 MSG_MAP(SetHardMinSocclkByFreq, PPSMC_MSG_SetHardMinSocclkByFreq, 1), 91 MSG_MAP(SetSoftMinVcn, PPSMC_MSG_SetSoftMinVcn, 1), 92 MSG_MAP(GetGfxclkFrequency, PPSMC_MSG_GetGfxclkFrequency, 1), 93 MSG_MAP(GetFclkFrequency, PPSMC_MSG_GetFclkFrequency, 1), 94 MSG_MAP(SetSoftMaxGfxClk, PPSMC_MSG_SetSoftMaxGfxClk, 1), 95 MSG_MAP(SetHardMinGfxClk, PPSMC_MSG_SetHardMinGfxClk, 1), 96 MSG_MAP(SetSoftMaxSocclkByFreq, PPSMC_MSG_SetSoftMaxSocclkByFreq, 1), 97 MSG_MAP(SetSoftMaxFclkByFreq, PPSMC_MSG_SetSoftMaxFclkByFreq, 1), 98 MSG_MAP(SetSoftMaxVcn, PPSMC_MSG_SetSoftMaxVcn, 1), 99 MSG_MAP(SetPowerLimitPercentage, PPSMC_MSG_SetPowerLimitPercentage, 1), 100 MSG_MAP(PowerDownJpeg, PPSMC_MSG_PowerDownJpeg, 1), 101 MSG_MAP(PowerUpJpeg, PPSMC_MSG_PowerUpJpeg, 1), 102 MSG_MAP(SetHardMinFclkByFreq, PPSMC_MSG_SetHardMinFclkByFreq, 1), 103 MSG_MAP(SetSoftMinSocclkByFreq, PPSMC_MSG_SetSoftMinSocclkByFreq, 1), 104 MSG_MAP(EnableGfxImu, PPSMC_MSG_EnableGfxImu, 1), 105 MSG_MAP(PowerUpIspByTile, PPSMC_MSG_PowerUpIspByTile, 1), 106 MSG_MAP(PowerDownIspByTile, PPSMC_MSG_PowerDownIspByTile, 1), 107}; 108 109static struct cmn2asic_mapping smu_v13_0_4_feature_mask_map[SMU_FEATURE_COUNT] = { 110 FEA_MAP(CCLK_DPM), 111 FEA_MAP(FAN_CONTROLLER), 112 FEA_MAP(PPT), 113 FEA_MAP(TDC), 114 FEA_MAP(THERMAL), 115 FEA_MAP(VCN_DPM), 116 FEA_MAP_REVERSE(FCLK), 117 FEA_MAP_REVERSE(SOCCLK), 118 FEA_MAP(LCLK_DPM), 119 FEA_MAP(SHUBCLK_DPM), 120 FEA_MAP(DCFCLK_DPM), 121 FEA_MAP_HALF_REVERSE(GFX), 122 FEA_MAP(DS_GFXCLK), 123 FEA_MAP(DS_SOCCLK), 124 FEA_MAP(DS_LCLK), 125 FEA_MAP(DS_DCFCLK), 126 FEA_MAP(DS_FCLK), 127 FEA_MAP(DS_MP1CLK), 128 FEA_MAP(DS_MP0CLK), 129 FEA_MAP(GFX_DEM), 130 FEA_MAP(PSI), 131 FEA_MAP(PROCHOT), 132 FEA_MAP(CPUOFF), 133 FEA_MAP(STAPM), 134 FEA_MAP(S0I3), 135 FEA_MAP(PERF_LIMIT), 136 FEA_MAP(CORE_DLDO), 137 FEA_MAP(DS_VCN), 138 FEA_MAP(CPPC), 139 FEA_MAP(DF_CSTATES), 140 FEA_MAP(ATHUB_PG), 141}; 142 143static struct cmn2asic_mapping smu_v13_0_4_table_map[SMU_TABLE_COUNT] = { 144 TAB_MAP_VALID(WATERMARKS), 145 TAB_MAP_VALID(SMU_METRICS), 146 TAB_MAP_VALID(CUSTOM_DPM), 147 TAB_MAP_VALID(DPMCLOCKS), 148}; 149 150static int smu_v13_0_4_init_smc_tables(struct smu_context *smu) 151{ 152 struct smu_table_context *smu_table = &smu->smu_table; 153 struct smu_table *tables = smu_table->tables; 154 155 SMU_TABLE_INIT(tables, SMU_TABLE_WATERMARKS, sizeof(Watermarks_t), 156 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 157 SMU_TABLE_INIT(tables, SMU_TABLE_DPMCLOCKS, sizeof(DpmClocks_t), 158 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 159 SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_t), 160 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 161 162 smu_table->clocks_table = kzalloc(sizeof(DpmClocks_t), GFP_KERNEL); 163 if (!smu_table->clocks_table) 164 goto err0_out; 165 166 smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_t), GFP_KERNEL); 167 if (!smu_table->metrics_table) 168 goto err1_out; 169 smu_table->metrics_time = 0; 170 171 smu_table->watermarks_table = kzalloc(sizeof(Watermarks_t), GFP_KERNEL); 172 if (!smu_table->watermarks_table) 173 goto err2_out; 174 175 smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v2_1); 176 smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL); 177 if (!smu_table->gpu_metrics_table) 178 goto err3_out; 179 180 return 0; 181 182err3_out: 183 kfree(smu_table->watermarks_table); 184err2_out: 185 kfree(smu_table->metrics_table); 186err1_out: 187 kfree(smu_table->clocks_table); 188err0_out: 189 return -ENOMEM; 190} 191 192static int smu_v13_0_4_fini_smc_tables(struct smu_context *smu) 193{ 194 struct smu_table_context *smu_table = &smu->smu_table; 195 196 kfree(smu_table->clocks_table); 197 smu_table->clocks_table = NULL; 198 199 kfree(smu_table->metrics_table); 200 smu_table->metrics_table = NULL; 201 202 kfree(smu_table->watermarks_table); 203 smu_table->watermarks_table = NULL; 204 205 kfree(smu_table->gpu_metrics_table); 206 smu_table->gpu_metrics_table = NULL; 207 208 return 0; 209} 210 211static bool smu_v13_0_4_is_dpm_running(struct smu_context *smu) 212{ 213 int ret = 0; 214 uint64_t feature_enabled; 215 216 ret = smu_cmn_get_enabled_mask(smu, &feature_enabled); 217 218 if (ret) 219 return false; 220 221 return !!(feature_enabled & SMC_DPM_FEATURE); 222} 223 224static int smu_v13_0_4_system_features_control(struct smu_context *smu, bool en) 225{ 226 struct amdgpu_device *adev = smu->adev; 227 int ret = 0; 228 229 if (!en && !adev->in_s0ix) { 230 if (adev->in_s4) { 231 /* Adds a GFX reset as workaround just before sending the 232 * MP1_UNLOAD message to prevent GC/RLC/PMFW from entering 233 * an invalid state. 234 */ 235 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset, 236 SMU_RESET_MODE_2, NULL); 237 if (ret) 238 return ret; 239 } 240 241 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_PrepareMp1ForUnload, NULL); 242 } 243 244 return ret; 245} 246 247static ssize_t smu_v13_0_4_get_gpu_metrics(struct smu_context *smu, 248 void **table) 249{ 250 struct smu_table_context *smu_table = &smu->smu_table; 251 struct gpu_metrics_v2_1 *gpu_metrics = 252 (struct gpu_metrics_v2_1 *)smu_table->gpu_metrics_table; 253 SmuMetrics_t metrics; 254 int ret = 0; 255 256 ret = smu_cmn_get_metrics_table(smu, &metrics, true); 257 if (ret) 258 return ret; 259 260 smu_cmn_init_soft_gpu_metrics(gpu_metrics, 2, 1); 261 262 gpu_metrics->temperature_gfx = metrics.GfxTemperature; 263 gpu_metrics->temperature_soc = metrics.SocTemperature; 264 memcpy(&gpu_metrics->temperature_core[0], 265 &metrics.CoreTemperature[0], 266 sizeof(uint16_t) * 8); 267 gpu_metrics->temperature_l3[0] = metrics.L3Temperature; 268 269 gpu_metrics->average_gfx_activity = metrics.GfxActivity; 270 gpu_metrics->average_mm_activity = metrics.UvdActivity; 271 272 gpu_metrics->average_socket_power = metrics.AverageSocketPower; 273 gpu_metrics->average_gfx_power = metrics.Power[0]; 274 gpu_metrics->average_soc_power = metrics.Power[1]; 275 memcpy(&gpu_metrics->average_core_power[0], 276 &metrics.CorePower[0], 277 sizeof(uint16_t) * 8); 278 279 gpu_metrics->average_gfxclk_frequency = metrics.GfxclkFrequency; 280 gpu_metrics->average_socclk_frequency = metrics.SocclkFrequency; 281 gpu_metrics->average_uclk_frequency = metrics.MemclkFrequency; 282 gpu_metrics->average_fclk_frequency = metrics.MemclkFrequency; 283 gpu_metrics->average_vclk_frequency = metrics.VclkFrequency; 284 gpu_metrics->average_dclk_frequency = metrics.DclkFrequency; 285 286 memcpy(&gpu_metrics->current_coreclk[0], 287 &metrics.CoreFrequency[0], 288 sizeof(uint16_t) * 8); 289 gpu_metrics->current_l3clk[0] = metrics.L3Frequency; 290 291 gpu_metrics->throttle_status = metrics.ThrottlerStatus; 292 293 gpu_metrics->system_clock_counter = ktime_get_boottime_ns(); 294 295 *table = (void *)gpu_metrics; 296 297 return sizeof(struct gpu_metrics_v2_1); 298} 299 300static int smu_v13_0_4_get_smu_metrics_data(struct smu_context *smu, 301 MetricsMember_t member, 302 uint32_t *value) 303{ 304 struct smu_table_context *smu_table = &smu->smu_table; 305 306 SmuMetrics_t *metrics = (SmuMetrics_t *)smu_table->metrics_table; 307 int ret = 0; 308 309 ret = smu_cmn_get_metrics_table(smu, NULL, false); 310 if (ret) 311 return ret; 312 313 switch (member) { 314 case METRICS_AVERAGE_GFXCLK: 315 *value = metrics->GfxclkFrequency; 316 break; 317 case METRICS_AVERAGE_SOCCLK: 318 *value = metrics->SocclkFrequency; 319 break; 320 case METRICS_AVERAGE_VCLK: 321 *value = metrics->VclkFrequency; 322 break; 323 case METRICS_AVERAGE_DCLK: 324 *value = metrics->DclkFrequency; 325 break; 326 case METRICS_AVERAGE_UCLK: 327 *value = metrics->MemclkFrequency; 328 break; 329 case METRICS_AVERAGE_GFXACTIVITY: 330 *value = metrics->GfxActivity / 100; 331 break; 332 case METRICS_AVERAGE_VCNACTIVITY: 333 *value = metrics->UvdActivity; 334 break; 335 case METRICS_AVERAGE_SOCKETPOWER: 336 *value = (metrics->AverageSocketPower << 8) / 1000; 337 break; 338 case METRICS_CURR_SOCKETPOWER: 339 *value = (metrics->CurrentSocketPower << 8) / 1000; 340 break; 341 case METRICS_TEMPERATURE_EDGE: 342 *value = metrics->GfxTemperature / 100 * 343 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 344 break; 345 case METRICS_TEMPERATURE_HOTSPOT: 346 *value = metrics->SocTemperature / 100 * 347 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES; 348 break; 349 case METRICS_THROTTLER_STATUS: 350 *value = metrics->ThrottlerStatus; 351 break; 352 case METRICS_VOLTAGE_VDDGFX: 353 *value = metrics->Voltage[0]; 354 break; 355 case METRICS_VOLTAGE_VDDSOC: 356 *value = metrics->Voltage[1]; 357 break; 358 case METRICS_SS_APU_SHARE: 359 /* return the percentage of APU power with respect to APU's power limit. 360 * percentage is reported, this isn't boost value. Smartshift power 361 * boost/shift is only when the percentage is more than 100. 362 */ 363 if (metrics->StapmOpnLimit > 0) 364 *value = (metrics->ApuPower * 100) / metrics->StapmOpnLimit; 365 else 366 *value = 0; 367 break; 368 case METRICS_SS_DGPU_SHARE: 369 /* return the percentage of dGPU power with respect to dGPU's power limit. 370 * percentage is reported, this isn't boost value. Smartshift power 371 * boost/shift is only when the percentage is more than 100. 372 */ 373 if ((metrics->dGpuPower > 0) && 374 (metrics->StapmCurrentLimit > metrics->StapmOpnLimit)) 375 *value = (metrics->dGpuPower * 100) / 376 (metrics->StapmCurrentLimit - metrics->StapmOpnLimit); 377 else 378 *value = 0; 379 break; 380 default: 381 *value = UINT_MAX; 382 break; 383 } 384 385 return ret; 386} 387 388static int smu_v13_0_4_get_current_clk_freq(struct smu_context *smu, 389 enum smu_clk_type clk_type, 390 uint32_t *value) 391{ 392 MetricsMember_t member_type; 393 394 switch (clk_type) { 395 case SMU_SOCCLK: 396 member_type = METRICS_AVERAGE_SOCCLK; 397 break; 398 case SMU_VCLK: 399 member_type = METRICS_AVERAGE_VCLK; 400 break; 401 case SMU_DCLK: 402 member_type = METRICS_AVERAGE_DCLK; 403 break; 404 case SMU_MCLK: 405 member_type = METRICS_AVERAGE_UCLK; 406 break; 407 case SMU_FCLK: 408 return smu_cmn_send_smc_msg_with_param(smu, 409 SMU_MSG_GetFclkFrequency, 410 0, value); 411 case SMU_GFXCLK: 412 case SMU_SCLK: 413 return smu_cmn_send_smc_msg_with_param(smu, 414 SMU_MSG_GetGfxclkFrequency, 415 0, value); 416 break; 417 default: 418 return -EINVAL; 419 } 420 421 return smu_v13_0_4_get_smu_metrics_data(smu, member_type, value); 422} 423 424static int smu_v13_0_4_get_dpm_freq_by_index(struct smu_context *smu, 425 enum smu_clk_type clk_type, 426 uint32_t dpm_level, 427 uint32_t *freq) 428{ 429 DpmClocks_t *clk_table = smu->smu_table.clocks_table; 430 431 if (!clk_table || clk_type >= SMU_CLK_COUNT) 432 return -EINVAL; 433 434 switch (clk_type) { 435 case SMU_SOCCLK: 436 if (dpm_level >= clk_table->NumSocClkLevelsEnabled) 437 return -EINVAL; 438 *freq = clk_table->SocClocks[dpm_level]; 439 break; 440 case SMU_VCLK: 441 if (dpm_level >= clk_table->VcnClkLevelsEnabled) 442 return -EINVAL; 443 *freq = clk_table->VClocks[dpm_level]; 444 break; 445 case SMU_DCLK: 446 if (dpm_level >= clk_table->VcnClkLevelsEnabled) 447 return -EINVAL; 448 *freq = clk_table->DClocks[dpm_level]; 449 break; 450 case SMU_UCLK: 451 case SMU_MCLK: 452 if (dpm_level >= clk_table->NumDfPstatesEnabled) 453 return -EINVAL; 454 *freq = clk_table->DfPstateTable[dpm_level].MemClk; 455 break; 456 case SMU_FCLK: 457 if (dpm_level >= clk_table->NumDfPstatesEnabled) 458 return -EINVAL; 459 *freq = clk_table->DfPstateTable[dpm_level].FClk; 460 break; 461 default: 462 return -EINVAL; 463 } 464 465 return 0; 466} 467 468static int smu_v13_0_4_get_dpm_level_count(struct smu_context *smu, 469 enum smu_clk_type clk_type, 470 uint32_t *count) 471{ 472 DpmClocks_t *clk_table = smu->smu_table.clocks_table; 473 474 switch (clk_type) { 475 case SMU_SOCCLK: 476 *count = clk_table->NumSocClkLevelsEnabled; 477 break; 478 case SMU_VCLK: 479 *count = clk_table->VcnClkLevelsEnabled; 480 break; 481 case SMU_DCLK: 482 *count = clk_table->VcnClkLevelsEnabled; 483 break; 484 case SMU_MCLK: 485 *count = clk_table->NumDfPstatesEnabled; 486 break; 487 case SMU_FCLK: 488 *count = clk_table->NumDfPstatesEnabled; 489 break; 490 default: 491 break; 492 } 493 494 return 0; 495} 496 497static int smu_v13_0_4_print_clk_levels(struct smu_context *smu, 498 enum smu_clk_type clk_type, char *buf) 499{ 500 int i, idx, size = 0, ret = 0; 501 uint32_t cur_value = 0, value = 0, count = 0; 502 uint32_t min, max; 503 504 smu_cmn_get_sysfs_buf(&buf, &size); 505 506 switch (clk_type) { 507 case SMU_OD_SCLK: 508 size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK"); 509 size += sysfs_emit_at(buf, size, "0: %10uMhz\n", 510 (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq); 511 size += sysfs_emit_at(buf, size, "1: %10uMhz\n", 512 (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq); 513 break; 514 case SMU_OD_RANGE: 515 size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE"); 516 size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n", 517 smu->gfx_default_hard_min_freq, 518 smu->gfx_default_soft_max_freq); 519 break; 520 case SMU_SOCCLK: 521 case SMU_VCLK: 522 case SMU_DCLK: 523 case SMU_MCLK: 524 case SMU_FCLK: 525 ret = smu_v13_0_4_get_current_clk_freq(smu, clk_type, &cur_value); 526 if (ret) 527 break; 528 529 ret = smu_v13_0_4_get_dpm_level_count(smu, clk_type, &count); 530 if (ret) 531 break; 532 533 for (i = 0; i < count; i++) { 534 idx = (clk_type == SMU_FCLK || clk_type == SMU_MCLK) ? (count - i - 1) : i; 535 ret = smu_v13_0_4_get_dpm_freq_by_index(smu, clk_type, idx, &value); 536 if (ret) 537 break; 538 539 size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", i, value, 540 cur_value == value ? "*" : ""); 541 } 542 break; 543 case SMU_GFXCLK: 544 case SMU_SCLK: 545 ret = smu_v13_0_4_get_current_clk_freq(smu, clk_type, &cur_value); 546 if (ret) 547 break; 548 min = (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq; 549 max = (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq; 550 if (cur_value == max) 551 i = 2; 552 else if (cur_value == min) 553 i = 0; 554 else 555 i = 1; 556 size += sysfs_emit_at(buf, size, "0: %uMhz %s\n", min, 557 i == 0 ? "*" : ""); 558 size += sysfs_emit_at(buf, size, "1: %uMhz %s\n", 559 i == 1 ? cur_value : 1100, /* UMD PSTATE GFXCLK 1100 */ 560 i == 1 ? "*" : ""); 561 size += sysfs_emit_at(buf, size, "2: %uMhz %s\n", max, 562 i == 2 ? "*" : ""); 563 break; 564 default: 565 break; 566 } 567 568 return size; 569} 570 571static int smu_v13_0_4_read_sensor(struct smu_context *smu, 572 enum amd_pp_sensors sensor, 573 void *data, uint32_t *size) 574{ 575 int ret = 0; 576 577 if (!data || !size) 578 return -EINVAL; 579 580 switch (sensor) { 581 case AMDGPU_PP_SENSOR_GPU_LOAD: 582 ret = smu_v13_0_4_get_smu_metrics_data(smu, 583 METRICS_AVERAGE_GFXACTIVITY, 584 (uint32_t *)data); 585 *size = 4; 586 break; 587 case AMDGPU_PP_SENSOR_GPU_AVG_POWER: 588 ret = smu_v13_0_4_get_smu_metrics_data(smu, 589 METRICS_AVERAGE_SOCKETPOWER, 590 (uint32_t *)data); 591 *size = 4; 592 break; 593 case AMDGPU_PP_SENSOR_GPU_INPUT_POWER: 594 ret = smu_v13_0_4_get_smu_metrics_data(smu, 595 METRICS_CURR_SOCKETPOWER, 596 (uint32_t *)data); 597 *size = 4; 598 break; 599 case AMDGPU_PP_SENSOR_EDGE_TEMP: 600 ret = smu_v13_0_4_get_smu_metrics_data(smu, 601 METRICS_TEMPERATURE_EDGE, 602 (uint32_t *)data); 603 *size = 4; 604 break; 605 case AMDGPU_PP_SENSOR_HOTSPOT_TEMP: 606 ret = smu_v13_0_4_get_smu_metrics_data(smu, 607 METRICS_TEMPERATURE_HOTSPOT, 608 (uint32_t *)data); 609 *size = 4; 610 break; 611 case AMDGPU_PP_SENSOR_GFX_MCLK: 612 ret = smu_v13_0_4_get_smu_metrics_data(smu, 613 METRICS_AVERAGE_UCLK, 614 (uint32_t *)data); 615 *(uint32_t *)data *= 100; 616 *size = 4; 617 break; 618 case AMDGPU_PP_SENSOR_GFX_SCLK: 619 ret = smu_v13_0_4_get_smu_metrics_data(smu, 620 METRICS_AVERAGE_GFXCLK, 621 (uint32_t *)data); 622 *(uint32_t *)data *= 100; 623 *size = 4; 624 break; 625 case AMDGPU_PP_SENSOR_VDDGFX: 626 ret = smu_v13_0_4_get_smu_metrics_data(smu, 627 METRICS_VOLTAGE_VDDGFX, 628 (uint32_t *)data); 629 *size = 4; 630 break; 631 case AMDGPU_PP_SENSOR_VDDNB: 632 ret = smu_v13_0_4_get_smu_metrics_data(smu, 633 METRICS_VOLTAGE_VDDSOC, 634 (uint32_t *)data); 635 *size = 4; 636 break; 637 case AMDGPU_PP_SENSOR_SS_APU_SHARE: 638 ret = smu_v13_0_4_get_smu_metrics_data(smu, 639 METRICS_SS_APU_SHARE, 640 (uint32_t *)data); 641 *size = 4; 642 break; 643 case AMDGPU_PP_SENSOR_SS_DGPU_SHARE: 644 ret = smu_v13_0_4_get_smu_metrics_data(smu, 645 METRICS_SS_DGPU_SHARE, 646 (uint32_t *)data); 647 *size = 4; 648 break; 649 default: 650 ret = -EOPNOTSUPP; 651 break; 652 } 653 654 return ret; 655} 656 657static int smu_v13_0_4_set_watermarks_table(struct smu_context *smu, 658 struct pp_smu_wm_range_sets *clock_ranges) 659{ 660 int i; 661 int ret = 0; 662 Watermarks_t *table = smu->smu_table.watermarks_table; 663 664 if (!table || !clock_ranges) 665 return -EINVAL; 666 667 if (clock_ranges->num_reader_wm_sets > NUM_WM_RANGES || 668 clock_ranges->num_writer_wm_sets > NUM_WM_RANGES) 669 return -EINVAL; 670 671 for (i = 0; i < clock_ranges->num_reader_wm_sets; i++) { 672 table->WatermarkRow[WM_DCFCLK][i].MinClock = 673 clock_ranges->reader_wm_sets[i].min_drain_clk_mhz; 674 table->WatermarkRow[WM_DCFCLK][i].MaxClock = 675 clock_ranges->reader_wm_sets[i].max_drain_clk_mhz; 676 table->WatermarkRow[WM_DCFCLK][i].MinMclk = 677 clock_ranges->reader_wm_sets[i].min_fill_clk_mhz; 678 table->WatermarkRow[WM_DCFCLK][i].MaxMclk = 679 clock_ranges->reader_wm_sets[i].max_fill_clk_mhz; 680 681 table->WatermarkRow[WM_DCFCLK][i].WmSetting = 682 clock_ranges->reader_wm_sets[i].wm_inst; 683 } 684 685 for (i = 0; i < clock_ranges->num_writer_wm_sets; i++) { 686 table->WatermarkRow[WM_SOCCLK][i].MinClock = 687 clock_ranges->writer_wm_sets[i].min_fill_clk_mhz; 688 table->WatermarkRow[WM_SOCCLK][i].MaxClock = 689 clock_ranges->writer_wm_sets[i].max_fill_clk_mhz; 690 table->WatermarkRow[WM_SOCCLK][i].MinMclk = 691 clock_ranges->writer_wm_sets[i].min_drain_clk_mhz; 692 table->WatermarkRow[WM_SOCCLK][i].MaxMclk = 693 clock_ranges->writer_wm_sets[i].max_drain_clk_mhz; 694 695 table->WatermarkRow[WM_SOCCLK][i].WmSetting = 696 clock_ranges->writer_wm_sets[i].wm_inst; 697 } 698 699 smu->watermarks_bitmap |= WATERMARKS_EXIST; 700 701 /* pass data to smu controller */ 702 if ((smu->watermarks_bitmap & WATERMARKS_EXIST) && 703 !(smu->watermarks_bitmap & WATERMARKS_LOADED)) { 704 ret = smu_cmn_write_watermarks_table(smu); 705 if (ret) { 706 dev_err(smu->adev->dev, "Failed to update WMTABLE!"); 707 return ret; 708 } 709 smu->watermarks_bitmap |= WATERMARKS_LOADED; 710 } 711 712 return 0; 713} 714 715static bool smu_v13_0_4_clk_dpm_is_enabled(struct smu_context *smu, 716 enum smu_clk_type clk_type) 717{ 718 enum smu_feature_mask feature_id = 0; 719 720 switch (clk_type) { 721 case SMU_MCLK: 722 case SMU_UCLK: 723 case SMU_FCLK: 724 feature_id = SMU_FEATURE_DPM_FCLK_BIT; 725 break; 726 case SMU_GFXCLK: 727 case SMU_SCLK: 728 feature_id = SMU_FEATURE_DPM_GFXCLK_BIT; 729 break; 730 case SMU_SOCCLK: 731 feature_id = SMU_FEATURE_DPM_SOCCLK_BIT; 732 break; 733 case SMU_VCLK: 734 case SMU_DCLK: 735 feature_id = SMU_FEATURE_VCN_DPM_BIT; 736 break; 737 default: 738 return true; 739 } 740 741 return smu_cmn_feature_is_enabled(smu, feature_id); 742} 743 744static int smu_v13_0_4_get_dpm_ultimate_freq(struct smu_context *smu, 745 enum smu_clk_type clk_type, 746 uint32_t *min, 747 uint32_t *max) 748{ 749 DpmClocks_t *clk_table = smu->smu_table.clocks_table; 750 uint32_t clock_limit; 751 uint32_t max_dpm_level, min_dpm_level; 752 int ret = 0; 753 754 if (!smu_v13_0_4_clk_dpm_is_enabled(smu, clk_type)) { 755 switch (clk_type) { 756 case SMU_MCLK: 757 case SMU_UCLK: 758 clock_limit = smu->smu_table.boot_values.uclk; 759 break; 760 case SMU_FCLK: 761 clock_limit = smu->smu_table.boot_values.fclk; 762 break; 763 case SMU_GFXCLK: 764 case SMU_SCLK: 765 clock_limit = smu->smu_table.boot_values.gfxclk; 766 break; 767 case SMU_SOCCLK: 768 clock_limit = smu->smu_table.boot_values.socclk; 769 break; 770 case SMU_VCLK: 771 clock_limit = smu->smu_table.boot_values.vclk; 772 break; 773 case SMU_DCLK: 774 clock_limit = smu->smu_table.boot_values.dclk; 775 break; 776 default: 777 clock_limit = 0; 778 break; 779 } 780 781 /* clock in Mhz unit */ 782 if (min) 783 *min = clock_limit / 100; 784 if (max) 785 *max = clock_limit / 100; 786 787 return 0; 788 } 789 790 if (max) { 791 switch (clk_type) { 792 case SMU_GFXCLK: 793 case SMU_SCLK: 794 *max = clk_table->MaxGfxClk; 795 break; 796 case SMU_MCLK: 797 case SMU_UCLK: 798 case SMU_FCLK: 799 max_dpm_level = 0; 800 break; 801 case SMU_SOCCLK: 802 max_dpm_level = clk_table->NumSocClkLevelsEnabled - 1; 803 break; 804 case SMU_VCLK: 805 case SMU_DCLK: 806 max_dpm_level = clk_table->VcnClkLevelsEnabled - 1; 807 break; 808 default: 809 return -EINVAL; 810 } 811 812 if (clk_type != SMU_GFXCLK && clk_type != SMU_SCLK) { 813 ret = smu_v13_0_4_get_dpm_freq_by_index(smu, clk_type, 814 max_dpm_level, 815 max); 816 if (ret) 817 return ret; 818 } 819 } 820 821 if (min) { 822 switch (clk_type) { 823 case SMU_GFXCLK: 824 case SMU_SCLK: 825 *min = clk_table->MinGfxClk; 826 break; 827 case SMU_MCLK: 828 case SMU_UCLK: 829 case SMU_FCLK: 830 min_dpm_level = clk_table->NumDfPstatesEnabled - 1; 831 break; 832 case SMU_SOCCLK: 833 min_dpm_level = 0; 834 break; 835 case SMU_VCLK: 836 case SMU_DCLK: 837 min_dpm_level = 0; 838 break; 839 default: 840 return -EINVAL; 841 } 842 843 if (clk_type != SMU_GFXCLK && clk_type != SMU_SCLK) { 844 ret = smu_v13_0_4_get_dpm_freq_by_index(smu, clk_type, 845 min_dpm_level, 846 min); 847 } 848 } 849 850 return ret; 851} 852 853static int smu_v13_0_4_set_soft_freq_limited_range(struct smu_context *smu, 854 enum smu_clk_type clk_type, 855 uint32_t min, 856 uint32_t max) 857{ 858 enum smu_message_type msg_set_min, msg_set_max; 859 uint32_t min_clk = min; 860 uint32_t max_clk = max; 861 int ret = 0; 862 863 if (!smu_v13_0_4_clk_dpm_is_enabled(smu, clk_type)) 864 return -EINVAL; 865 866 switch (clk_type) { 867 case SMU_GFXCLK: 868 case SMU_SCLK: 869 msg_set_min = SMU_MSG_SetHardMinGfxClk; 870 msg_set_max = SMU_MSG_SetSoftMaxGfxClk; 871 break; 872 case SMU_FCLK: 873 msg_set_min = SMU_MSG_SetHardMinFclkByFreq; 874 msg_set_max = SMU_MSG_SetSoftMaxFclkByFreq; 875 break; 876 case SMU_SOCCLK: 877 msg_set_min = SMU_MSG_SetHardMinSocclkByFreq; 878 msg_set_max = SMU_MSG_SetSoftMaxSocclkByFreq; 879 break; 880 case SMU_VCLK: 881 case SMU_DCLK: 882 msg_set_min = SMU_MSG_SetHardMinVcn; 883 msg_set_max = SMU_MSG_SetSoftMaxVcn; 884 break; 885 default: 886 return -EINVAL; 887 } 888 889 if (clk_type == SMU_VCLK) { 890 min_clk = min << SMU_13_VCLK_SHIFT; 891 max_clk = max << SMU_13_VCLK_SHIFT; 892 } 893 894 ret = smu_cmn_send_smc_msg_with_param(smu, msg_set_min, min_clk, NULL); 895 if (ret) 896 return ret; 897 898 return smu_cmn_send_smc_msg_with_param(smu, msg_set_max, 899 max_clk, NULL); 900} 901 902static int smu_v13_0_4_force_clk_levels(struct smu_context *smu, 903 enum smu_clk_type clk_type, 904 uint32_t mask) 905{ 906 uint32_t soft_min_level = 0, soft_max_level = 0; 907 uint32_t min_freq = 0, max_freq = 0; 908 int ret = 0; 909 910 soft_min_level = mask ? (ffs(mask) - 1) : 0; 911 soft_max_level = mask ? (fls(mask) - 1) : 0; 912 913 switch (clk_type) { 914 case SMU_SOCCLK: 915 case SMU_FCLK: 916 case SMU_VCLK: 917 case SMU_DCLK: 918 ret = smu_v13_0_4_get_dpm_freq_by_index(smu, clk_type, soft_min_level, &min_freq); 919 if (ret) 920 break; 921 922 ret = smu_v13_0_4_get_dpm_freq_by_index(smu, clk_type, soft_max_level, &max_freq); 923 if (ret) 924 break; 925 926 ret = smu_v13_0_4_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq); 927 break; 928 default: 929 ret = -EINVAL; 930 break; 931 } 932 933 return ret; 934} 935 936static int smu_v13_0_4_get_dpm_profile_freq(struct smu_context *smu, 937 enum amd_dpm_forced_level level, 938 enum smu_clk_type clk_type, 939 uint32_t *min_clk, 940 uint32_t *max_clk) 941{ 942 int ret = 0; 943 uint32_t clk_limit = 0; 944 945 switch (clk_type) { 946 case SMU_GFXCLK: 947 case SMU_SCLK: 948 clk_limit = SMU_13_0_4_UMD_PSTATE_GFXCLK; 949 if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) 950 smu_v13_0_4_get_dpm_ultimate_freq(smu, SMU_SCLK, NULL, &clk_limit); 951 else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) 952 smu_v13_0_4_get_dpm_ultimate_freq(smu, SMU_SCLK, &clk_limit, NULL); 953 break; 954 case SMU_SOCCLK: 955 clk_limit = SMU_13_0_4_UMD_PSTATE_SOCCLK; 956 if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) 957 smu_v13_0_4_get_dpm_ultimate_freq(smu, SMU_SOCCLK, NULL, &clk_limit); 958 break; 959 case SMU_FCLK: 960 clk_limit = SMU_13_0_4_UMD_PSTATE_FCLK; 961 if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) 962 smu_v13_0_4_get_dpm_ultimate_freq(smu, SMU_FCLK, NULL, &clk_limit); 963 else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) 964 smu_v13_0_4_get_dpm_ultimate_freq(smu, SMU_FCLK, &clk_limit, NULL); 965 break; 966 case SMU_VCLK: 967 smu_v13_0_4_get_dpm_ultimate_freq(smu, SMU_VCLK, NULL, &clk_limit); 968 break; 969 case SMU_DCLK: 970 smu_v13_0_4_get_dpm_ultimate_freq(smu, SMU_DCLK, NULL, &clk_limit); 971 break; 972 default: 973 ret = -EINVAL; 974 break; 975 } 976 *min_clk = *max_clk = clk_limit; 977 return ret; 978} 979 980static int smu_v13_0_4_set_performance_level(struct smu_context *smu, 981 enum amd_dpm_forced_level level) 982{ 983 struct amdgpu_device *adev = smu->adev; 984 uint32_t sclk_min = 0, sclk_max = 0; 985 uint32_t fclk_min = 0, fclk_max = 0; 986 uint32_t socclk_min = 0, socclk_max = 0; 987 uint32_t vclk_min = 0, vclk_max = 0; 988 uint32_t dclk_min = 0, dclk_max = 0; 989 int ret = 0; 990 991 switch (level) { 992 case AMD_DPM_FORCED_LEVEL_HIGH: 993 smu_v13_0_4_get_dpm_ultimate_freq(smu, SMU_SCLK, NULL, &sclk_max); 994 smu_v13_0_4_get_dpm_ultimate_freq(smu, SMU_FCLK, NULL, &fclk_max); 995 smu_v13_0_4_get_dpm_ultimate_freq(smu, SMU_SOCCLK, NULL, &socclk_max); 996 smu_v13_0_4_get_dpm_ultimate_freq(smu, SMU_VCLK, NULL, &vclk_max); 997 smu_v13_0_4_get_dpm_ultimate_freq(smu, SMU_DCLK, NULL, &dclk_max); 998 sclk_min = sclk_max; 999 fclk_min = fclk_max; 1000 socclk_min = socclk_max; 1001 vclk_min = vclk_max; 1002 dclk_min = dclk_max; 1003 break; 1004 case AMD_DPM_FORCED_LEVEL_LOW: 1005 smu_v13_0_4_get_dpm_ultimate_freq(smu, SMU_SCLK, &sclk_min, NULL); 1006 smu_v13_0_4_get_dpm_ultimate_freq(smu, SMU_FCLK, &fclk_min, NULL); 1007 smu_v13_0_4_get_dpm_ultimate_freq(smu, SMU_SOCCLK, &socclk_min, NULL); 1008 smu_v13_0_4_get_dpm_ultimate_freq(smu, SMU_VCLK, &vclk_min, NULL); 1009 smu_v13_0_4_get_dpm_ultimate_freq(smu, SMU_DCLK, &dclk_min, NULL); 1010 sclk_max = sclk_min; 1011 fclk_max = fclk_min; 1012 socclk_max = socclk_min; 1013 vclk_max = vclk_min; 1014 dclk_max = dclk_min; 1015 break; 1016 case AMD_DPM_FORCED_LEVEL_AUTO: 1017 smu_v13_0_4_get_dpm_ultimate_freq(smu, SMU_SCLK, &sclk_min, &sclk_max); 1018 smu_v13_0_4_get_dpm_ultimate_freq(smu, SMU_FCLK, &fclk_min, &fclk_max); 1019 smu_v13_0_4_get_dpm_ultimate_freq(smu, SMU_SOCCLK, &socclk_min, &socclk_max); 1020 smu_v13_0_4_get_dpm_ultimate_freq(smu, SMU_VCLK, &vclk_min, &vclk_max); 1021 smu_v13_0_4_get_dpm_ultimate_freq(smu, SMU_DCLK, &dclk_min, &dclk_max); 1022 break; 1023 case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD: 1024 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK: 1025 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK: 1026 case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK: 1027 smu_v13_0_4_get_dpm_profile_freq(smu, level, SMU_SCLK, &sclk_min, &sclk_max); 1028 smu_v13_0_4_get_dpm_profile_freq(smu, level, SMU_FCLK, &fclk_min, &fclk_max); 1029 smu_v13_0_4_get_dpm_profile_freq(smu, level, SMU_SOCCLK, &socclk_min, &socclk_max); 1030 smu_v13_0_4_get_dpm_profile_freq(smu, level, SMU_VCLK, &vclk_min, &vclk_max); 1031 smu_v13_0_4_get_dpm_profile_freq(smu, level, SMU_DCLK, &dclk_min, &dclk_max); 1032 break; 1033 case AMD_DPM_FORCED_LEVEL_MANUAL: 1034 case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT: 1035 return 0; 1036 default: 1037 dev_err(adev->dev, "Invalid performance level %d\n", level); 1038 return -EINVAL; 1039 } 1040 1041 if (sclk_min && sclk_max) { 1042 ret = smu_v13_0_4_set_soft_freq_limited_range(smu, 1043 SMU_SCLK, 1044 sclk_min, 1045 sclk_max); 1046 if (ret) 1047 return ret; 1048 1049 smu->gfx_actual_hard_min_freq = sclk_min; 1050 smu->gfx_actual_soft_max_freq = sclk_max; 1051 } 1052 1053 if (fclk_min && fclk_max) { 1054 ret = smu_v13_0_4_set_soft_freq_limited_range(smu, 1055 SMU_FCLK, 1056 fclk_min, 1057 fclk_max); 1058 if (ret) 1059 return ret; 1060 } 1061 1062 if (socclk_min && socclk_max) { 1063 ret = smu_v13_0_4_set_soft_freq_limited_range(smu, 1064 SMU_SOCCLK, 1065 socclk_min, 1066 socclk_max); 1067 if (ret) 1068 return ret; 1069 } 1070 1071 if (vclk_min && vclk_max) { 1072 ret = smu_v13_0_4_set_soft_freq_limited_range(smu, 1073 SMU_VCLK, 1074 vclk_min, 1075 vclk_max); 1076 if (ret) 1077 return ret; 1078 } 1079 1080 if (dclk_min && dclk_max) { 1081 ret = smu_v13_0_4_set_soft_freq_limited_range(smu, 1082 SMU_DCLK, 1083 dclk_min, 1084 dclk_max); 1085 if (ret) 1086 return ret; 1087 } 1088 return ret; 1089} 1090 1091static int smu_v13_0_4_mode2_reset(struct smu_context *smu) 1092{ 1093 return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset, 1094 SMU_RESET_MODE_2, NULL); 1095} 1096 1097static int smu_v13_0_4_set_fine_grain_gfx_freq_parameters(struct smu_context *smu) 1098{ 1099 DpmClocks_t *clk_table = smu->smu_table.clocks_table; 1100 1101 smu->gfx_default_hard_min_freq = clk_table->MinGfxClk; 1102 smu->gfx_default_soft_max_freq = clk_table->MaxGfxClk; 1103 smu->gfx_actual_hard_min_freq = 0; 1104 smu->gfx_actual_soft_max_freq = 0; 1105 1106 return 0; 1107} 1108 1109static const struct pptable_funcs smu_v13_0_4_ppt_funcs = { 1110 .check_fw_status = smu_v13_0_check_fw_status, 1111 .check_fw_version = smu_v13_0_check_fw_version, 1112 .init_smc_tables = smu_v13_0_4_init_smc_tables, 1113 .fini_smc_tables = smu_v13_0_4_fini_smc_tables, 1114 .get_vbios_bootup_values = smu_v13_0_get_vbios_bootup_values, 1115 .system_features_control = smu_v13_0_4_system_features_control, 1116 .send_smc_msg_with_param = smu_cmn_send_smc_msg_with_param, 1117 .send_smc_msg = smu_cmn_send_smc_msg, 1118 .dpm_set_vcn_enable = smu_v13_0_set_vcn_enable, 1119 .dpm_set_jpeg_enable = smu_v13_0_set_jpeg_enable, 1120 .set_default_dpm_table = smu_v13_0_set_default_dpm_tables, 1121 .read_sensor = smu_v13_0_4_read_sensor, 1122 .is_dpm_running = smu_v13_0_4_is_dpm_running, 1123 .set_watermarks_table = smu_v13_0_4_set_watermarks_table, 1124 .get_gpu_metrics = smu_v13_0_4_get_gpu_metrics, 1125 .get_enabled_mask = smu_cmn_get_enabled_mask, 1126 .get_pp_feature_mask = smu_cmn_get_pp_feature_mask, 1127 .set_driver_table_location = smu_v13_0_set_driver_table_location, 1128 .gfx_off_control = smu_v13_0_gfx_off_control, 1129 .mode2_reset = smu_v13_0_4_mode2_reset, 1130 .get_dpm_ultimate_freq = smu_v13_0_4_get_dpm_ultimate_freq, 1131 .od_edit_dpm_table = smu_v13_0_od_edit_dpm_table, 1132 .print_clk_levels = smu_v13_0_4_print_clk_levels, 1133 .force_clk_levels = smu_v13_0_4_force_clk_levels, 1134 .set_performance_level = smu_v13_0_4_set_performance_level, 1135 .set_fine_grain_gfx_freq_parameters = smu_v13_0_4_set_fine_grain_gfx_freq_parameters, 1136 .set_gfx_power_up_by_imu = smu_v13_0_set_gfx_power_up_by_imu, 1137}; 1138 1139static void smu_v13_0_4_set_smu_mailbox_registers(struct smu_context *smu) 1140{ 1141 struct amdgpu_device *adev = smu->adev; 1142 1143 smu->param_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_82); 1144 smu->msg_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_66); 1145 smu->resp_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_90); 1146} 1147 1148void smu_v13_0_4_set_ppt_funcs(struct smu_context *smu) 1149{ 1150 struct amdgpu_device *adev = smu->adev; 1151 1152 smu->ppt_funcs = &smu_v13_0_4_ppt_funcs; 1153 smu->message_map = smu_v13_0_4_message_map; 1154 smu->feature_map = smu_v13_0_4_feature_mask_map; 1155 smu->table_map = smu_v13_0_4_table_map; 1156 smu->smc_driver_if_version = SMU13_0_4_DRIVER_IF_VERSION; 1157 smu->is_apu = true; 1158 1159 if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 4)) 1160 smu_v13_0_4_set_smu_mailbox_registers(smu); 1161 else 1162 smu_v13_0_set_smu_mailbox_registers(smu); 1163} 1164