1/* 2 * Copyright 2023 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 */ 22 23#include <linux/firmware.h> 24#include <linux/module.h> 25#include <linux/pci.h> 26#include <linux/reboot.h> 27 28#define SWSMU_CODE_LAYER_L3 29 30#include "amdgpu.h" 31#include "amdgpu_smu.h" 32#include "atomfirmware.h" 33#include "amdgpu_atomfirmware.h" 34#include "amdgpu_atombios.h" 35#include "smu_v14_0.h" 36#include "soc15_common.h" 37#include "atom.h" 38#include "amdgpu_ras.h" 39#include "smu_cmn.h" 40 41#include "asic_reg/mp/mp_14_0_0_offset.h" 42#include "asic_reg/mp/mp_14_0_0_sh_mask.h" 43 44/* 45 * DO NOT use these for err/warn/info/debug messages. 46 * Use dev_err, dev_warn, dev_info and dev_dbg instead. 47 * They are more MGPU friendly. 48 */ 49#undef pr_err 50#undef pr_warn 51#undef pr_info 52#undef pr_debug 53 54MODULE_FIRMWARE("amdgpu/smu_14_0_2.bin"); 55 56#define ENABLE_IMU_ARG_GFXOFF_ENABLE 1 57 58int smu_v14_0_init_microcode(struct smu_context *smu) 59{ 60 struct amdgpu_device *adev = smu->adev; 61 char fw_name[30]; 62 char ucode_prefix[15]; 63 int err = 0; 64 const struct smc_firmware_header_v1_0 *hdr; 65 const struct common_firmware_header *header; 66 struct amdgpu_firmware_info *ucode = NULL; 67 68 /* doesn't need to load smu firmware in IOV mode */ 69 if (amdgpu_sriov_vf(adev)) 70 return 0; 71 72 amdgpu_ucode_ip_version_decode(adev, MP1_HWIP, ucode_prefix, sizeof(ucode_prefix)); 73 74 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s.bin", ucode_prefix); 75 76 err = amdgpu_ucode_request(adev, &adev->pm.fw, fw_name); 77 if (err) 78 goto out; 79 80 hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data; 81 amdgpu_ucode_print_smc_hdr(&hdr->header); 82 adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version); 83 84 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { 85 ucode = &adev->firmware.ucode[AMDGPU_UCODE_ID_SMC]; 86 ucode->ucode_id = AMDGPU_UCODE_ID_SMC; 87 ucode->fw = adev->pm.fw; 88 header = (const struct common_firmware_header *)ucode->fw->data; 89 adev->firmware.fw_size += 90 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE); 91 } 92 93out: 94 if (err) 95 amdgpu_ucode_release(&adev->pm.fw); 96 return err; 97} 98 99void smu_v14_0_fini_microcode(struct smu_context *smu) 100{ 101 struct amdgpu_device *adev = smu->adev; 102 103 amdgpu_ucode_release(&adev->pm.fw); 104 adev->pm.fw_version = 0; 105} 106 107int smu_v14_0_load_microcode(struct smu_context *smu) 108{ 109#if 0 110 struct amdgpu_device *adev = smu->adev; 111 const uint32_t *src; 112 const struct smc_firmware_header_v1_0 *hdr; 113 uint32_t addr_start = MP1_SRAM; 114 uint32_t i; 115 uint32_t smc_fw_size; 116 uint32_t mp1_fw_flags; 117 118 hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data; 119 src = (const uint32_t *)(adev->pm.fw->data + 120 le32_to_cpu(hdr->header.ucode_array_offset_bytes)); 121 smc_fw_size = hdr->header.ucode_size_bytes; 122 123 for (i = 1; i < smc_fw_size/4 - 1; i++) { 124 WREG32_PCIE(addr_start, src[i]); 125 addr_start += 4; 126 } 127 128 WREG32_PCIE(MP1_Public | (smnMP1_PUB_CTRL & 0xffffffff), 129 1 & MP1_SMN_PUB_CTRL__LX3_RESET_MASK); 130 WREG32_PCIE(MP1_Public | (smnMP1_PUB_CTRL & 0xffffffff), 131 1 & ~MP1_SMN_PUB_CTRL__LX3_RESET_MASK); 132 133 for (i = 0; i < adev->usec_timeout; i++) { 134 mp1_fw_flags = RREG32_PCIE(MP1_Public | 135 (smnMP1_FIRMWARE_FLAGS & 0xffffffff)); 136 if ((mp1_fw_flags & MP1_CRU1_MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >> 137 MP1_CRU1_MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT) 138 break; 139 udelay(1); 140 } 141 142 if (i == adev->usec_timeout) 143 return -ETIME; 144 145#endif 146 return 0; 147 148} 149 150int smu_v14_0_init_pptable_microcode(struct smu_context *smu) 151{ 152 struct amdgpu_device *adev = smu->adev; 153 struct amdgpu_firmware_info *ucode = NULL; 154 uint32_t size = 0, pptable_id = 0; 155 int ret = 0; 156 void *table; 157 158 /* doesn't need to load smu firmware in IOV mode */ 159 if (amdgpu_sriov_vf(adev)) 160 return 0; 161 162 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) 163 return 0; 164 165 if (!adev->scpm_enabled) 166 return 0; 167 168 /* override pptable_id from driver parameter */ 169 if (amdgpu_smu_pptable_id >= 0) { 170 pptable_id = amdgpu_smu_pptable_id; 171 dev_info(adev->dev, "override pptable id %d\n", pptable_id); 172 } else { 173 pptable_id = smu->smu_table.boot_values.pp_table_id; 174 } 175 176 /* "pptable_id == 0" means vbios carries the pptable. */ 177 if (!pptable_id) 178 return 0; 179 180 ret = smu_v14_0_get_pptable_from_firmware(smu, &table, &size, pptable_id); 181 if (ret) 182 return ret; 183 184 smu->pptable_firmware.data = table; 185 smu->pptable_firmware.size = size; 186 187 ucode = &adev->firmware.ucode[AMDGPU_UCODE_ID_PPTABLE]; 188 ucode->ucode_id = AMDGPU_UCODE_ID_PPTABLE; 189 ucode->fw = &smu->pptable_firmware; 190 adev->firmware.fw_size += 191 ALIGN(smu->pptable_firmware.size, PAGE_SIZE); 192 193 return 0; 194} 195 196int smu_v14_0_check_fw_status(struct smu_context *smu) 197{ 198 struct amdgpu_device *adev = smu->adev; 199 uint32_t mp1_fw_flags; 200 201 mp1_fw_flags = RREG32_PCIE(MP1_Public | 202 (smnMP1_FIRMWARE_FLAGS & 0xffffffff)); 203 204 if ((mp1_fw_flags & MP1_CRU1_MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >> 205 MP1_CRU1_MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT) 206 return 0; 207 208 return -EIO; 209} 210 211int smu_v14_0_check_fw_version(struct smu_context *smu) 212{ 213 struct amdgpu_device *adev = smu->adev; 214 uint32_t if_version = 0xff, smu_version = 0xff; 215 uint8_t smu_program, smu_major, smu_minor, smu_debug; 216 int ret = 0; 217 218 ret = smu_cmn_get_smc_version(smu, &if_version, &smu_version); 219 if (ret) 220 return ret; 221 222 smu_program = (smu_version >> 24) & 0xff; 223 smu_major = (smu_version >> 16) & 0xff; 224 smu_minor = (smu_version >> 8) & 0xff; 225 smu_debug = (smu_version >> 0) & 0xff; 226 if (smu->is_apu) 227 adev->pm.fw_version = smu_version; 228 229 switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) { 230 case IP_VERSION(14, 0, 2): 231 smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_SMU_V14_0_2; 232 break; 233 case IP_VERSION(14, 0, 0): 234 smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_SMU_V14_0_0; 235 break; 236 case IP_VERSION(14, 0, 1): 237 smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_SMU_V14_0_1; 238 break; 239 240 default: 241 dev_err(adev->dev, "smu unsupported IP version: 0x%x.\n", 242 amdgpu_ip_version(adev, MP1_HWIP, 0)); 243 smu->smc_driver_if_version = SMU14_DRIVER_IF_VERSION_INV; 244 break; 245 } 246 247 if (adev->pm.fw) 248 dev_dbg(smu->adev->dev, "smu fw reported program %d, version = 0x%08x (%d.%d.%d)\n", 249 smu_program, smu_version, smu_major, smu_minor, smu_debug); 250 251 /* 252 * 1. if_version mismatch is not critical as our fw is designed 253 * to be backward compatible. 254 * 2. New fw usually brings some optimizations. But that's visible 255 * only on the paired driver. 256 * Considering above, we just leave user a verbal message instead 257 * of halt driver loading. 258 */ 259 if (if_version != smu->smc_driver_if_version) { 260 dev_info(adev->dev, "smu driver if version = 0x%08x, smu fw if version = 0x%08x, " 261 "smu fw program = %d, smu fw version = 0x%08x (%d.%d.%d)\n", 262 smu->smc_driver_if_version, if_version, 263 smu_program, smu_version, smu_major, smu_minor, smu_debug); 264 dev_info(adev->dev, "SMU driver if version not matched\n"); 265 } 266 267 return ret; 268} 269 270static int smu_v14_0_set_pptable_v2_0(struct smu_context *smu, void **table, uint32_t *size) 271{ 272 struct amdgpu_device *adev = smu->adev; 273 uint32_t ppt_offset_bytes; 274 const struct smc_firmware_header_v2_0 *v2; 275 276 v2 = (const struct smc_firmware_header_v2_0 *) adev->pm.fw->data; 277 278 ppt_offset_bytes = le32_to_cpu(v2->ppt_offset_bytes); 279 *size = le32_to_cpu(v2->ppt_size_bytes); 280 *table = (uint8_t *)v2 + ppt_offset_bytes; 281 282 return 0; 283} 284 285static int smu_v14_0_set_pptable_v2_1(struct smu_context *smu, void **table, 286 uint32_t *size, uint32_t pptable_id) 287{ 288 struct amdgpu_device *adev = smu->adev; 289 const struct smc_firmware_header_v2_1 *v2_1; 290 struct smc_soft_pptable_entry *entries; 291 uint32_t pptable_count = 0; 292 int i = 0; 293 294 v2_1 = (const struct smc_firmware_header_v2_1 *) adev->pm.fw->data; 295 entries = (struct smc_soft_pptable_entry *) 296 ((uint8_t *)v2_1 + le32_to_cpu(v2_1->pptable_entry_offset)); 297 pptable_count = le32_to_cpu(v2_1->pptable_count); 298 for (i = 0; i < pptable_count; i++) { 299 if (le32_to_cpu(entries[i].id) == pptable_id) { 300 *table = ((uint8_t *)v2_1 + le32_to_cpu(entries[i].ppt_offset_bytes)); 301 *size = le32_to_cpu(entries[i].ppt_size_bytes); 302 break; 303 } 304 } 305 306 if (i == pptable_count) 307 return -EINVAL; 308 309 return 0; 310} 311 312static int smu_v14_0_get_pptable_from_vbios(struct smu_context *smu, void **table, uint32_t *size) 313{ 314 struct amdgpu_device *adev = smu->adev; 315 uint16_t atom_table_size; 316 uint8_t frev, crev; 317 int ret, index; 318 319 dev_info(adev->dev, "use vbios provided pptable\n"); 320 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, 321 powerplayinfo); 322 323 ret = amdgpu_atombios_get_data_table(adev, index, &atom_table_size, &frev, &crev, 324 (uint8_t **)table); 325 if (ret) 326 return ret; 327 328 if (size) 329 *size = atom_table_size; 330 331 return 0; 332} 333 334int smu_v14_0_get_pptable_from_firmware(struct smu_context *smu, 335 void **table, 336 uint32_t *size, 337 uint32_t pptable_id) 338{ 339 const struct smc_firmware_header_v1_0 *hdr; 340 struct amdgpu_device *adev = smu->adev; 341 uint16_t version_major, version_minor; 342 int ret; 343 344 hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data; 345 if (!hdr) 346 return -EINVAL; 347 348 dev_info(adev->dev, "use driver provided pptable %d\n", pptable_id); 349 350 version_major = le16_to_cpu(hdr->header.header_version_major); 351 version_minor = le16_to_cpu(hdr->header.header_version_minor); 352 if (version_major != 2) { 353 dev_err(adev->dev, "Unsupported smu firmware version %d.%d\n", 354 version_major, version_minor); 355 return -EINVAL; 356 } 357 358 switch (version_minor) { 359 case 0: 360 ret = smu_v14_0_set_pptable_v2_0(smu, table, size); 361 break; 362 case 1: 363 ret = smu_v14_0_set_pptable_v2_1(smu, table, size, pptable_id); 364 break; 365 default: 366 ret = -EINVAL; 367 break; 368 } 369 370 return ret; 371} 372 373int smu_v14_0_setup_pptable(struct smu_context *smu) 374{ 375 struct amdgpu_device *adev = smu->adev; 376 uint32_t size = 0, pptable_id = 0; 377 void *table; 378 int ret = 0; 379 380 /* override pptable_id from driver parameter */ 381 if (amdgpu_smu_pptable_id >= 0) { 382 pptable_id = amdgpu_smu_pptable_id; 383 dev_info(adev->dev, "override pptable id %d\n", pptable_id); 384 } else { 385 pptable_id = smu->smu_table.boot_values.pp_table_id; 386 } 387 388 /* force using vbios pptable in sriov mode */ 389 if ((amdgpu_sriov_vf(adev) || !pptable_id) && (amdgpu_emu_mode != 1)) 390 ret = smu_v14_0_get_pptable_from_vbios(smu, &table, &size); 391 else 392 ret = smu_v14_0_get_pptable_from_firmware(smu, &table, &size, pptable_id); 393 394 if (ret) 395 return ret; 396 397 if (!smu->smu_table.power_play_table) 398 smu->smu_table.power_play_table = table; 399 if (!smu->smu_table.power_play_table_size) 400 smu->smu_table.power_play_table_size = size; 401 402 return 0; 403} 404 405int smu_v14_0_init_smc_tables(struct smu_context *smu) 406{ 407 struct smu_table_context *smu_table = &smu->smu_table; 408 struct smu_table *tables = smu_table->tables; 409 int ret = 0; 410 411 smu_table->driver_pptable = 412 kzalloc(tables[SMU_TABLE_PPTABLE].size, GFP_KERNEL); 413 if (!smu_table->driver_pptable) { 414 ret = -ENOMEM; 415 goto err0_out; 416 } 417 418 smu_table->max_sustainable_clocks = 419 kzalloc(sizeof(struct smu_14_0_max_sustainable_clocks), GFP_KERNEL); 420 if (!smu_table->max_sustainable_clocks) { 421 ret = -ENOMEM; 422 goto err1_out; 423 } 424 425 if (tables[SMU_TABLE_OVERDRIVE].size) { 426 smu_table->overdrive_table = 427 kzalloc(tables[SMU_TABLE_OVERDRIVE].size, GFP_KERNEL); 428 if (!smu_table->overdrive_table) { 429 ret = -ENOMEM; 430 goto err2_out; 431 } 432 433 smu_table->boot_overdrive_table = 434 kzalloc(tables[SMU_TABLE_OVERDRIVE].size, GFP_KERNEL); 435 if (!smu_table->boot_overdrive_table) { 436 ret = -ENOMEM; 437 goto err3_out; 438 } 439 } 440 441 smu_table->combo_pptable = 442 kzalloc(tables[SMU_TABLE_COMBO_PPTABLE].size, GFP_KERNEL); 443 if (!smu_table->combo_pptable) { 444 ret = -ENOMEM; 445 goto err4_out; 446 } 447 448 return 0; 449 450err4_out: 451 kfree(smu_table->boot_overdrive_table); 452err3_out: 453 kfree(smu_table->overdrive_table); 454err2_out: 455 kfree(smu_table->max_sustainable_clocks); 456err1_out: 457 kfree(smu_table->driver_pptable); 458err0_out: 459 return ret; 460} 461 462int smu_v14_0_fini_smc_tables(struct smu_context *smu) 463{ 464 struct smu_table_context *smu_table = &smu->smu_table; 465 struct smu_dpm_context *smu_dpm = &smu->smu_dpm; 466 467 kfree(smu_table->gpu_metrics_table); 468 kfree(smu_table->combo_pptable); 469 kfree(smu_table->boot_overdrive_table); 470 kfree(smu_table->overdrive_table); 471 kfree(smu_table->max_sustainable_clocks); 472 kfree(smu_table->driver_pptable); 473 smu_table->gpu_metrics_table = NULL; 474 smu_table->combo_pptable = NULL; 475 smu_table->boot_overdrive_table = NULL; 476 smu_table->overdrive_table = NULL; 477 smu_table->max_sustainable_clocks = NULL; 478 smu_table->driver_pptable = NULL; 479 kfree(smu_table->hardcode_pptable); 480 smu_table->hardcode_pptable = NULL; 481 482 kfree(smu_table->ecc_table); 483 kfree(smu_table->metrics_table); 484 kfree(smu_table->watermarks_table); 485 smu_table->ecc_table = NULL; 486 smu_table->metrics_table = NULL; 487 smu_table->watermarks_table = NULL; 488 smu_table->metrics_time = 0; 489 490 kfree(smu_dpm->dpm_context); 491 kfree(smu_dpm->golden_dpm_context); 492 kfree(smu_dpm->dpm_current_power_state); 493 kfree(smu_dpm->dpm_request_power_state); 494 smu_dpm->dpm_context = NULL; 495 smu_dpm->golden_dpm_context = NULL; 496 smu_dpm->dpm_context_size = 0; 497 smu_dpm->dpm_current_power_state = NULL; 498 smu_dpm->dpm_request_power_state = NULL; 499 500 return 0; 501} 502 503int smu_v14_0_init_power(struct smu_context *smu) 504{ 505 struct smu_power_context *smu_power = &smu->smu_power; 506 507 if (smu_power->power_context || smu_power->power_context_size != 0) 508 return -EINVAL; 509 510 smu_power->power_context = kzalloc(sizeof(struct smu_14_0_dpm_context), 511 GFP_KERNEL); 512 if (!smu_power->power_context) 513 return -ENOMEM; 514 smu_power->power_context_size = sizeof(struct smu_14_0_dpm_context); 515 516 return 0; 517} 518 519int smu_v14_0_fini_power(struct smu_context *smu) 520{ 521 struct smu_power_context *smu_power = &smu->smu_power; 522 523 if (!smu_power->power_context || smu_power->power_context_size == 0) 524 return -EINVAL; 525 526 kfree(smu_power->power_context); 527 smu_power->power_context = NULL; 528 smu_power->power_context_size = 0; 529 530 return 0; 531} 532 533int smu_v14_0_get_vbios_bootup_values(struct smu_context *smu) 534{ 535 int ret, index; 536 uint16_t size; 537 uint8_t frev, crev; 538 struct atom_common_table_header *header; 539 struct atom_firmware_info_v3_4 *v_3_4; 540 struct atom_firmware_info_v3_3 *v_3_3; 541 struct atom_firmware_info_v3_1 *v_3_1; 542 struct atom_smu_info_v3_6 *smu_info_v3_6; 543 struct atom_smu_info_v4_0 *smu_info_v4_0; 544 545 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, 546 firmwareinfo); 547 548 ret = amdgpu_atombios_get_data_table(smu->adev, index, &size, &frev, &crev, 549 (uint8_t **)&header); 550 if (ret) 551 return ret; 552 553 if (header->format_revision != 3) { 554 dev_err(smu->adev->dev, "unknown atom_firmware_info version! for smu14\n"); 555 return -EINVAL; 556 } 557 558 switch (header->content_revision) { 559 case 0: 560 case 1: 561 case 2: 562 v_3_1 = (struct atom_firmware_info_v3_1 *)header; 563 smu->smu_table.boot_values.revision = v_3_1->firmware_revision; 564 smu->smu_table.boot_values.gfxclk = v_3_1->bootup_sclk_in10khz; 565 smu->smu_table.boot_values.uclk = v_3_1->bootup_mclk_in10khz; 566 smu->smu_table.boot_values.socclk = 0; 567 smu->smu_table.boot_values.dcefclk = 0; 568 smu->smu_table.boot_values.vddc = v_3_1->bootup_vddc_mv; 569 smu->smu_table.boot_values.vddci = v_3_1->bootup_vddci_mv; 570 smu->smu_table.boot_values.mvddc = v_3_1->bootup_mvddc_mv; 571 smu->smu_table.boot_values.vdd_gfx = v_3_1->bootup_vddgfx_mv; 572 smu->smu_table.boot_values.cooling_id = v_3_1->coolingsolution_id; 573 smu->smu_table.boot_values.pp_table_id = 0; 574 break; 575 case 3: 576 v_3_3 = (struct atom_firmware_info_v3_3 *)header; 577 smu->smu_table.boot_values.revision = v_3_3->firmware_revision; 578 smu->smu_table.boot_values.gfxclk = v_3_3->bootup_sclk_in10khz; 579 smu->smu_table.boot_values.uclk = v_3_3->bootup_mclk_in10khz; 580 smu->smu_table.boot_values.socclk = 0; 581 smu->smu_table.boot_values.dcefclk = 0; 582 smu->smu_table.boot_values.vddc = v_3_3->bootup_vddc_mv; 583 smu->smu_table.boot_values.vddci = v_3_3->bootup_vddci_mv; 584 smu->smu_table.boot_values.mvddc = v_3_3->bootup_mvddc_mv; 585 smu->smu_table.boot_values.vdd_gfx = v_3_3->bootup_vddgfx_mv; 586 smu->smu_table.boot_values.cooling_id = v_3_3->coolingsolution_id; 587 smu->smu_table.boot_values.pp_table_id = v_3_3->pplib_pptable_id; 588 break; 589 case 4: 590 default: 591 v_3_4 = (struct atom_firmware_info_v3_4 *)header; 592 smu->smu_table.boot_values.revision = v_3_4->firmware_revision; 593 smu->smu_table.boot_values.gfxclk = v_3_4->bootup_sclk_in10khz; 594 smu->smu_table.boot_values.uclk = v_3_4->bootup_mclk_in10khz; 595 smu->smu_table.boot_values.socclk = 0; 596 smu->smu_table.boot_values.dcefclk = 0; 597 smu->smu_table.boot_values.vddc = v_3_4->bootup_vddc_mv; 598 smu->smu_table.boot_values.vddci = v_3_4->bootup_vddci_mv; 599 smu->smu_table.boot_values.mvddc = v_3_4->bootup_mvddc_mv; 600 smu->smu_table.boot_values.vdd_gfx = v_3_4->bootup_vddgfx_mv; 601 smu->smu_table.boot_values.cooling_id = v_3_4->coolingsolution_id; 602 smu->smu_table.boot_values.pp_table_id = v_3_4->pplib_pptable_id; 603 break; 604 } 605 606 smu->smu_table.boot_values.format_revision = header->format_revision; 607 smu->smu_table.boot_values.content_revision = header->content_revision; 608 609 index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, 610 smu_info); 611 if (!amdgpu_atombios_get_data_table(smu->adev, index, &size, &frev, &crev, 612 (uint8_t **)&header)) { 613 614 if ((frev == 3) && (crev == 6)) { 615 smu_info_v3_6 = (struct atom_smu_info_v3_6 *)header; 616 617 smu->smu_table.boot_values.socclk = smu_info_v3_6->bootup_socclk_10khz; 618 smu->smu_table.boot_values.vclk = smu_info_v3_6->bootup_vclk_10khz; 619 smu->smu_table.boot_values.dclk = smu_info_v3_6->bootup_dclk_10khz; 620 smu->smu_table.boot_values.fclk = smu_info_v3_6->bootup_fclk_10khz; 621 } else if ((frev == 3) && (crev == 1)) { 622 return 0; 623 } else if ((frev == 4) && (crev == 0)) { 624 smu_info_v4_0 = (struct atom_smu_info_v4_0 *)header; 625 626 smu->smu_table.boot_values.socclk = smu_info_v4_0->bootup_socclk_10khz; 627 smu->smu_table.boot_values.dcefclk = smu_info_v4_0->bootup_dcefclk_10khz; 628 smu->smu_table.boot_values.vclk = smu_info_v4_0->bootup_vclk0_10khz; 629 smu->smu_table.boot_values.dclk = smu_info_v4_0->bootup_dclk0_10khz; 630 smu->smu_table.boot_values.fclk = smu_info_v4_0->bootup_fclk_10khz; 631 } else { 632 dev_warn(smu->adev->dev, "Unexpected and unhandled version: %d.%d\n", 633 (uint32_t)frev, (uint32_t)crev); 634 } 635 } 636 637 return 0; 638} 639 640 641int smu_v14_0_notify_memory_pool_location(struct smu_context *smu) 642{ 643 struct smu_table_context *smu_table = &smu->smu_table; 644 struct smu_table *memory_pool = &smu_table->memory_pool; 645 int ret = 0; 646 uint64_t address; 647 uint32_t address_low, address_high; 648 649 if (memory_pool->size == 0 || memory_pool->cpu_addr == NULL) 650 return ret; 651 652 address = memory_pool->mc_address; 653 address_high = (uint32_t)upper_32_bits(address); 654 address_low = (uint32_t)lower_32_bits(address); 655 656 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramAddrHigh, 657 address_high, NULL); 658 if (ret) 659 return ret; 660 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramAddrLow, 661 address_low, NULL); 662 if (ret) 663 return ret; 664 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramSize, 665 (uint32_t)memory_pool->size, NULL); 666 if (ret) 667 return ret; 668 669 return ret; 670} 671 672int smu_v14_0_set_driver_table_location(struct smu_context *smu) 673{ 674 struct smu_table *driver_table = &smu->smu_table.driver_table; 675 int ret = 0; 676 677 if (driver_table->mc_address) { 678 ret = smu_cmn_send_smc_msg_with_param(smu, 679 SMU_MSG_SetDriverDramAddrHigh, 680 upper_32_bits(driver_table->mc_address), 681 NULL); 682 if (!ret) 683 ret = smu_cmn_send_smc_msg_with_param(smu, 684 SMU_MSG_SetDriverDramAddrLow, 685 lower_32_bits(driver_table->mc_address), 686 NULL); 687 } 688 689 return ret; 690} 691 692int smu_v14_0_set_tool_table_location(struct smu_context *smu) 693{ 694 int ret = 0; 695 struct smu_table *tool_table = &smu->smu_table.tables[SMU_TABLE_PMSTATUSLOG]; 696 697 if (tool_table->mc_address) { 698 ret = smu_cmn_send_smc_msg_with_param(smu, 699 SMU_MSG_SetToolsDramAddrHigh, 700 upper_32_bits(tool_table->mc_address), 701 NULL); 702 if (!ret) 703 ret = smu_cmn_send_smc_msg_with_param(smu, 704 SMU_MSG_SetToolsDramAddrLow, 705 lower_32_bits(tool_table->mc_address), 706 NULL); 707 } 708 709 return ret; 710} 711 712int smu_v14_0_set_allowed_mask(struct smu_context *smu) 713{ 714 struct smu_feature *feature = &smu->smu_feature; 715 int ret = 0; 716 uint32_t feature_mask[2]; 717 718 if (bitmap_empty(feature->allowed, SMU_FEATURE_MAX) || 719 feature->feature_num < 64) 720 return -EINVAL; 721 722 bitmap_to_arr32(feature_mask, feature->allowed, 64); 723 724 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskHigh, 725 feature_mask[1], NULL); 726 if (ret) 727 return ret; 728 729 return smu_cmn_send_smc_msg_with_param(smu, 730 SMU_MSG_SetAllowedFeaturesMaskLow, 731 feature_mask[0], 732 NULL); 733} 734 735int smu_v14_0_gfx_off_control(struct smu_context *smu, bool enable) 736{ 737 int ret = 0; 738 struct amdgpu_device *adev = smu->adev; 739 740 switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) { 741 case IP_VERSION(14, 0, 2): 742 case IP_VERSION(14, 0, 0): 743 case IP_VERSION(14, 0, 1): 744 if (!(adev->pm.pp_feature & PP_GFXOFF_MASK)) 745 return 0; 746 if (enable) 747 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_AllowGfxOff, NULL); 748 else 749 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_DisallowGfxOff, NULL); 750 break; 751 default: 752 break; 753 } 754 755 return ret; 756} 757 758int smu_v14_0_system_features_control(struct smu_context *smu, 759 bool en) 760{ 761 return smu_cmn_send_smc_msg(smu, (en ? SMU_MSG_EnableAllSmuFeatures : 762 SMU_MSG_DisableAllSmuFeatures), NULL); 763} 764 765int smu_v14_0_notify_display_change(struct smu_context *smu) 766{ 767 int ret = 0; 768 769 if (!smu->pm_enabled) 770 return ret; 771 772 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT) && 773 smu->adev->gmc.vram_type == AMDGPU_VRAM_TYPE_HBM) 774 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetUclkFastSwitch, 1, NULL); 775 776 return ret; 777} 778 779int smu_v14_0_get_current_power_limit(struct smu_context *smu, 780 uint32_t *power_limit) 781{ 782 int power_src; 783 int ret = 0; 784 785 if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) 786 return -EINVAL; 787 788 power_src = smu_cmn_to_asic_specific_index(smu, 789 CMN2ASIC_MAPPING_PWR, 790 smu->adev->pm.ac_power ? 791 SMU_POWER_SOURCE_AC : 792 SMU_POWER_SOURCE_DC); 793 if (power_src < 0) 794 return -EINVAL; 795 796 ret = smu_cmn_send_smc_msg_with_param(smu, 797 SMU_MSG_GetPptLimit, 798 power_src << 16, 799 power_limit); 800 if (ret) 801 dev_err(smu->adev->dev, "[%s] get PPT limit failed!", __func__); 802 803 return ret; 804} 805 806int smu_v14_0_set_power_limit(struct smu_context *smu, 807 enum smu_ppt_limit_type limit_type, 808 uint32_t limit) 809{ 810 int ret = 0; 811 812 if (limit_type != SMU_DEFAULT_PPT_LIMIT) 813 return -EINVAL; 814 815 if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) { 816 dev_err(smu->adev->dev, "Setting new power limit is not supported!\n"); 817 return -EOPNOTSUPP; 818 } 819 820 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetPptLimit, limit, NULL); 821 if (ret) { 822 dev_err(smu->adev->dev, "[%s] Set power limit Failed!\n", __func__); 823 return ret; 824 } 825 826 smu->current_power_limit = limit; 827 828 return 0; 829} 830 831static int smu_v14_0_set_irq_state(struct amdgpu_device *adev, 832 struct amdgpu_irq_src *source, 833 unsigned tyep, 834 enum amdgpu_interrupt_state state) 835{ 836 uint32_t val = 0; 837 838 switch (state) { 839 case AMDGPU_IRQ_STATE_DISABLE: 840 /* For THM irqs */ 841 // TODO 842 843 /* For MP1 SW irqs */ 844 val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL); 845 val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT_CTRL, INT_MASK, 1); 846 WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL, val); 847 848 break; 849 case AMDGPU_IRQ_STATE_ENABLE: 850 /* For THM irqs */ 851 // TODO 852 853 /* For MP1 SW irqs */ 854 val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT); 855 val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT, ID, 0xFE); 856 val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT, VALID, 0); 857 WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT, val); 858 859 val = RREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL); 860 val = REG_SET_FIELD(val, MP1_SMN_IH_SW_INT_CTRL, INT_MASK, 0); 861 WREG32_SOC15(MP1, 0, regMP1_SMN_IH_SW_INT_CTRL, val); 862 863 break; 864 default: 865 break; 866 } 867 868 return 0; 869} 870 871static int smu_v14_0_irq_process(struct amdgpu_device *adev, 872 struct amdgpu_irq_src *source, 873 struct amdgpu_iv_entry *entry) 874{ 875 // TODO 876 877 return 0; 878} 879 880static const struct amdgpu_irq_src_funcs smu_v14_0_irq_funcs = { 881 .set = smu_v14_0_set_irq_state, 882 .process = smu_v14_0_irq_process, 883}; 884 885int smu_v14_0_register_irq_handler(struct smu_context *smu) 886{ 887 struct amdgpu_device *adev = smu->adev; 888 struct amdgpu_irq_src *irq_src = &smu->irq_source; 889 int ret = 0; 890 891 if (amdgpu_sriov_vf(adev)) 892 return 0; 893 894 irq_src->num_types = 1; 895 irq_src->funcs = &smu_v14_0_irq_funcs; 896 897 // TODO: THM related 898 899 ret = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_MP1, 900 SMU_IH_INTERRUPT_ID_TO_DRIVER, 901 irq_src); 902 if (ret) 903 return ret; 904 905 return ret; 906} 907 908static int smu_v14_0_wait_for_reset_complete(struct smu_context *smu, 909 uint64_t event_arg) 910{ 911 int ret = 0; 912 913 dev_dbg(smu->adev->dev, "waiting for smu reset complete\n"); 914 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GfxDriverResetRecovery, NULL); 915 916 return ret; 917} 918 919int smu_v14_0_wait_for_event(struct smu_context *smu, enum smu_event_type event, 920 uint64_t event_arg) 921{ 922 int ret = -EINVAL; 923 924 switch (event) { 925 case SMU_EVENT_RESET_COMPLETE: 926 ret = smu_v14_0_wait_for_reset_complete(smu, event_arg); 927 break; 928 default: 929 break; 930 } 931 932 return ret; 933} 934 935int smu_v14_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type clk_type, 936 uint32_t *min, uint32_t *max) 937{ 938 int ret = 0, clk_id = 0; 939 uint32_t param = 0; 940 uint32_t clock_limit; 941 942 if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type)) { 943 switch (clk_type) { 944 case SMU_MCLK: 945 case SMU_UCLK: 946 clock_limit = smu->smu_table.boot_values.uclk; 947 break; 948 case SMU_GFXCLK: 949 case SMU_SCLK: 950 clock_limit = smu->smu_table.boot_values.gfxclk; 951 break; 952 case SMU_SOCCLK: 953 clock_limit = smu->smu_table.boot_values.socclk; 954 break; 955 default: 956 clock_limit = 0; 957 break; 958 } 959 960 /* clock in Mhz unit */ 961 if (min) 962 *min = clock_limit / 100; 963 if (max) 964 *max = clock_limit / 100; 965 966 return 0; 967 } 968 969 clk_id = smu_cmn_to_asic_specific_index(smu, 970 CMN2ASIC_MAPPING_CLK, 971 clk_type); 972 if (clk_id < 0) { 973 ret = -EINVAL; 974 goto failed; 975 } 976 param = (clk_id & 0xffff) << 16; 977 978 if (max) { 979 if (smu->adev->pm.ac_power) 980 ret = smu_cmn_send_smc_msg_with_param(smu, 981 SMU_MSG_GetMaxDpmFreq, 982 param, 983 max); 984 else 985 ret = smu_cmn_send_smc_msg_with_param(smu, 986 SMU_MSG_GetDcModeMaxDpmFreq, 987 param, 988 max); 989 if (ret) 990 goto failed; 991 } 992 993 if (min) { 994 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetMinDpmFreq, param, min); 995 if (ret) 996 goto failed; 997 } 998 999failed: 1000 return ret; 1001} 1002 1003int smu_v14_0_set_soft_freq_limited_range(struct smu_context *smu, 1004 enum smu_clk_type clk_type, 1005 uint32_t min, 1006 uint32_t max) 1007{ 1008 int ret = 0, clk_id = 0; 1009 uint32_t param; 1010 1011 if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type)) 1012 return 0; 1013 1014 clk_id = smu_cmn_to_asic_specific_index(smu, 1015 CMN2ASIC_MAPPING_CLK, 1016 clk_type); 1017 if (clk_id < 0) 1018 return clk_id; 1019 1020 if (max > 0) { 1021 param = (uint32_t)((clk_id << 16) | (max & 0xffff)); 1022 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxByFreq, 1023 param, NULL); 1024 if (ret) 1025 goto out; 1026 } 1027 1028 if (min > 0) { 1029 param = (uint32_t)((clk_id << 16) | (min & 0xffff)); 1030 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinByFreq, 1031 param, NULL); 1032 if (ret) 1033 goto out; 1034 } 1035 1036out: 1037 return ret; 1038} 1039 1040int smu_v14_0_set_hard_freq_limited_range(struct smu_context *smu, 1041 enum smu_clk_type clk_type, 1042 uint32_t min, 1043 uint32_t max) 1044{ 1045 int ret = 0, clk_id = 0; 1046 uint32_t param; 1047 1048 if (min <= 0 && max <= 0) 1049 return -EINVAL; 1050 1051 if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type)) 1052 return 0; 1053 1054 clk_id = smu_cmn_to_asic_specific_index(smu, 1055 CMN2ASIC_MAPPING_CLK, 1056 clk_type); 1057 if (clk_id < 0) 1058 return clk_id; 1059 1060 if (max > 0) { 1061 param = (uint32_t)((clk_id << 16) | (max & 0xffff)); 1062 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMaxByFreq, 1063 param, NULL); 1064 if (ret) 1065 return ret; 1066 } 1067 1068 if (min > 0) { 1069 param = (uint32_t)((clk_id << 16) | (min & 0xffff)); 1070 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinByFreq, 1071 param, NULL); 1072 if (ret) 1073 return ret; 1074 } 1075 1076 return ret; 1077} 1078 1079int smu_v14_0_set_performance_level(struct smu_context *smu, 1080 enum amd_dpm_forced_level level) 1081{ 1082 struct smu_14_0_dpm_context *dpm_context = 1083 smu->smu_dpm.dpm_context; 1084 struct smu_14_0_dpm_table *gfx_table = 1085 &dpm_context->dpm_tables.gfx_table; 1086 struct smu_14_0_dpm_table *mem_table = 1087 &dpm_context->dpm_tables.uclk_table; 1088 struct smu_14_0_dpm_table *soc_table = 1089 &dpm_context->dpm_tables.soc_table; 1090 struct smu_14_0_dpm_table *vclk_table = 1091 &dpm_context->dpm_tables.vclk_table; 1092 struct smu_14_0_dpm_table *dclk_table = 1093 &dpm_context->dpm_tables.dclk_table; 1094 struct smu_14_0_dpm_table *fclk_table = 1095 &dpm_context->dpm_tables.fclk_table; 1096 struct smu_umd_pstate_table *pstate_table = 1097 &smu->pstate_table; 1098 struct amdgpu_device *adev = smu->adev; 1099 uint32_t sclk_min = 0, sclk_max = 0; 1100 uint32_t mclk_min = 0, mclk_max = 0; 1101 uint32_t socclk_min = 0, socclk_max = 0; 1102 uint32_t vclk_min = 0, vclk_max = 0; 1103 uint32_t dclk_min = 0, dclk_max = 0; 1104 uint32_t fclk_min = 0, fclk_max = 0; 1105 int ret = 0, i; 1106 1107 switch (level) { 1108 case AMD_DPM_FORCED_LEVEL_HIGH: 1109 sclk_min = sclk_max = gfx_table->max; 1110 mclk_min = mclk_max = mem_table->max; 1111 socclk_min = socclk_max = soc_table->max; 1112 vclk_min = vclk_max = vclk_table->max; 1113 dclk_min = dclk_max = dclk_table->max; 1114 fclk_min = fclk_max = fclk_table->max; 1115 break; 1116 case AMD_DPM_FORCED_LEVEL_LOW: 1117 sclk_min = sclk_max = gfx_table->min; 1118 mclk_min = mclk_max = mem_table->min; 1119 socclk_min = socclk_max = soc_table->min; 1120 vclk_min = vclk_max = vclk_table->min; 1121 dclk_min = dclk_max = dclk_table->min; 1122 fclk_min = fclk_max = fclk_table->min; 1123 break; 1124 case AMD_DPM_FORCED_LEVEL_AUTO: 1125 sclk_min = gfx_table->min; 1126 sclk_max = gfx_table->max; 1127 mclk_min = mem_table->min; 1128 mclk_max = mem_table->max; 1129 socclk_min = soc_table->min; 1130 socclk_max = soc_table->max; 1131 vclk_min = vclk_table->min; 1132 vclk_max = vclk_table->max; 1133 dclk_min = dclk_table->min; 1134 dclk_max = dclk_table->max; 1135 fclk_min = fclk_table->min; 1136 fclk_max = fclk_table->max; 1137 break; 1138 case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD: 1139 sclk_min = sclk_max = pstate_table->gfxclk_pstate.standard; 1140 mclk_min = mclk_max = pstate_table->uclk_pstate.standard; 1141 socclk_min = socclk_max = pstate_table->socclk_pstate.standard; 1142 vclk_min = vclk_max = pstate_table->vclk_pstate.standard; 1143 dclk_min = dclk_max = pstate_table->dclk_pstate.standard; 1144 fclk_min = fclk_max = pstate_table->fclk_pstate.standard; 1145 break; 1146 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK: 1147 sclk_min = sclk_max = pstate_table->gfxclk_pstate.min; 1148 break; 1149 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK: 1150 mclk_min = mclk_max = pstate_table->uclk_pstate.min; 1151 break; 1152 case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK: 1153 sclk_min = sclk_max = pstate_table->gfxclk_pstate.peak; 1154 mclk_min = mclk_max = pstate_table->uclk_pstate.peak; 1155 socclk_min = socclk_max = pstate_table->socclk_pstate.peak; 1156 vclk_min = vclk_max = pstate_table->vclk_pstate.peak; 1157 dclk_min = dclk_max = pstate_table->dclk_pstate.peak; 1158 fclk_min = fclk_max = pstate_table->fclk_pstate.peak; 1159 break; 1160 case AMD_DPM_FORCED_LEVEL_MANUAL: 1161 case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT: 1162 return 0; 1163 default: 1164 dev_err(adev->dev, "Invalid performance level %d\n", level); 1165 return -EINVAL; 1166 } 1167 1168 if (sclk_min && sclk_max) { 1169 ret = smu_v14_0_set_soft_freq_limited_range(smu, 1170 SMU_GFXCLK, 1171 sclk_min, 1172 sclk_max); 1173 if (ret) 1174 return ret; 1175 1176 pstate_table->gfxclk_pstate.curr.min = sclk_min; 1177 pstate_table->gfxclk_pstate.curr.max = sclk_max; 1178 } 1179 1180 if (mclk_min && mclk_max) { 1181 ret = smu_v14_0_set_soft_freq_limited_range(smu, 1182 SMU_MCLK, 1183 mclk_min, 1184 mclk_max); 1185 if (ret) 1186 return ret; 1187 1188 pstate_table->uclk_pstate.curr.min = mclk_min; 1189 pstate_table->uclk_pstate.curr.max = mclk_max; 1190 } 1191 1192 if (socclk_min && socclk_max) { 1193 ret = smu_v14_0_set_soft_freq_limited_range(smu, 1194 SMU_SOCCLK, 1195 socclk_min, 1196 socclk_max); 1197 if (ret) 1198 return ret; 1199 1200 pstate_table->socclk_pstate.curr.min = socclk_min; 1201 pstate_table->socclk_pstate.curr.max = socclk_max; 1202 } 1203 1204 if (vclk_min && vclk_max) { 1205 for (i = 0; i < adev->vcn.num_vcn_inst; i++) { 1206 if (adev->vcn.harvest_config & (1 << i)) 1207 continue; 1208 ret = smu_v14_0_set_soft_freq_limited_range(smu, 1209 i ? SMU_VCLK1 : SMU_VCLK, 1210 vclk_min, 1211 vclk_max); 1212 if (ret) 1213 return ret; 1214 } 1215 pstate_table->vclk_pstate.curr.min = vclk_min; 1216 pstate_table->vclk_pstate.curr.max = vclk_max; 1217 } 1218 1219 if (dclk_min && dclk_max) { 1220 for (i = 0; i < adev->vcn.num_vcn_inst; i++) { 1221 if (adev->vcn.harvest_config & (1 << i)) 1222 continue; 1223 ret = smu_v14_0_set_soft_freq_limited_range(smu, 1224 i ? SMU_DCLK1 : SMU_DCLK, 1225 dclk_min, 1226 dclk_max); 1227 if (ret) 1228 return ret; 1229 } 1230 pstate_table->dclk_pstate.curr.min = dclk_min; 1231 pstate_table->dclk_pstate.curr.max = dclk_max; 1232 } 1233 1234 if (fclk_min && fclk_max) { 1235 ret = smu_v14_0_set_soft_freq_limited_range(smu, 1236 SMU_FCLK, 1237 fclk_min, 1238 fclk_max); 1239 if (ret) 1240 return ret; 1241 1242 pstate_table->fclk_pstate.curr.min = fclk_min; 1243 pstate_table->fclk_pstate.curr.max = fclk_max; 1244 } 1245 1246 return ret; 1247} 1248 1249int smu_v14_0_set_power_source(struct smu_context *smu, 1250 enum smu_power_src_type power_src) 1251{ 1252 int pwr_source; 1253 1254 pwr_source = smu_cmn_to_asic_specific_index(smu, 1255 CMN2ASIC_MAPPING_PWR, 1256 (uint32_t)power_src); 1257 if (pwr_source < 0) 1258 return -EINVAL; 1259 1260 return smu_cmn_send_smc_msg_with_param(smu, 1261 SMU_MSG_NotifyPowerSource, 1262 pwr_source, 1263 NULL); 1264} 1265 1266static int smu_v14_0_get_dpm_freq_by_index(struct smu_context *smu, 1267 enum smu_clk_type clk_type, 1268 uint16_t level, 1269 uint32_t *value) 1270{ 1271 int ret = 0, clk_id = 0; 1272 uint32_t param; 1273 1274 if (!value) 1275 return -EINVAL; 1276 1277 if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type)) 1278 return 0; 1279 1280 clk_id = smu_cmn_to_asic_specific_index(smu, 1281 CMN2ASIC_MAPPING_CLK, 1282 clk_type); 1283 if (clk_id < 0) 1284 return clk_id; 1285 1286 param = (uint32_t)(((clk_id & 0xffff) << 16) | (level & 0xffff)); 1287 1288 ret = smu_cmn_send_smc_msg_with_param(smu, 1289 SMU_MSG_GetDpmFreqByIndex, 1290 param, 1291 value); 1292 if (ret) 1293 return ret; 1294 1295 *value = *value & 0x7fffffff; 1296 1297 return ret; 1298} 1299 1300static int smu_v14_0_get_dpm_level_count(struct smu_context *smu, 1301 enum smu_clk_type clk_type, 1302 uint32_t *value) 1303{ 1304 int ret; 1305 1306 ret = smu_v14_0_get_dpm_freq_by_index(smu, clk_type, 0xff, value); 1307 1308 return ret; 1309} 1310 1311static int smu_v14_0_get_fine_grained_status(struct smu_context *smu, 1312 enum smu_clk_type clk_type, 1313 bool *is_fine_grained_dpm) 1314{ 1315 int ret = 0, clk_id = 0; 1316 uint32_t param; 1317 uint32_t value; 1318 1319 if (!is_fine_grained_dpm) 1320 return -EINVAL; 1321 1322 if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type)) 1323 return 0; 1324 1325 clk_id = smu_cmn_to_asic_specific_index(smu, 1326 CMN2ASIC_MAPPING_CLK, 1327 clk_type); 1328 if (clk_id < 0) 1329 return clk_id; 1330 1331 param = (uint32_t)(((clk_id & 0xffff) << 16) | 0xff); 1332 1333 ret = smu_cmn_send_smc_msg_with_param(smu, 1334 SMU_MSG_GetDpmFreqByIndex, 1335 param, 1336 &value); 1337 if (ret) 1338 return ret; 1339 1340 /* 1341 * BIT31: 1 - Fine grained DPM, 0 - Dicrete DPM 1342 * now, we un-support it 1343 */ 1344 *is_fine_grained_dpm = value & 0x80000000; 1345 1346 return 0; 1347} 1348 1349int smu_v14_0_set_single_dpm_table(struct smu_context *smu, 1350 enum smu_clk_type clk_type, 1351 struct smu_14_0_dpm_table *single_dpm_table) 1352{ 1353 int ret = 0; 1354 uint32_t clk; 1355 int i; 1356 1357 ret = smu_v14_0_get_dpm_level_count(smu, 1358 clk_type, 1359 &single_dpm_table->count); 1360 if (ret) { 1361 dev_err(smu->adev->dev, "[%s] failed to get dpm levels!\n", __func__); 1362 return ret; 1363 } 1364 1365 ret = smu_v14_0_get_fine_grained_status(smu, 1366 clk_type, 1367 &single_dpm_table->is_fine_grained); 1368 if (ret) { 1369 dev_err(smu->adev->dev, "[%s] failed to get fine grained status!\n", __func__); 1370 return ret; 1371 } 1372 1373 for (i = 0; i < single_dpm_table->count; i++) { 1374 ret = smu_v14_0_get_dpm_freq_by_index(smu, 1375 clk_type, 1376 i, 1377 &clk); 1378 if (ret) { 1379 dev_err(smu->adev->dev, "[%s] failed to get dpm freq by index!\n", __func__); 1380 return ret; 1381 } 1382 1383 single_dpm_table->dpm_levels[i].value = clk; 1384 single_dpm_table->dpm_levels[i].enabled = true; 1385 1386 if (i == 0) 1387 single_dpm_table->min = clk; 1388 else if (i == single_dpm_table->count - 1) 1389 single_dpm_table->max = clk; 1390 } 1391 1392 return 0; 1393} 1394 1395int smu_v14_0_set_vcn_enable(struct smu_context *smu, 1396 bool enable) 1397{ 1398 struct amdgpu_device *adev = smu->adev; 1399 int i, ret = 0; 1400 1401 for (i = 0; i < adev->vcn.num_vcn_inst; i++) { 1402 if (adev->vcn.harvest_config & (1 << i)) 1403 continue; 1404 1405 if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 0) || 1406 amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1)) { 1407 if (i == 0) 1408 ret = smu_cmn_send_smc_msg_with_param(smu, enable ? 1409 SMU_MSG_PowerUpVcn0 : SMU_MSG_PowerDownVcn0, 1410 i << 16U, NULL); 1411 else if (i == 1) 1412 ret = smu_cmn_send_smc_msg_with_param(smu, enable ? 1413 SMU_MSG_PowerUpVcn1 : SMU_MSG_PowerDownVcn1, 1414 i << 16U, NULL); 1415 } else { 1416 ret = smu_cmn_send_smc_msg_with_param(smu, enable ? 1417 SMU_MSG_PowerUpVcn : SMU_MSG_PowerDownVcn, 1418 i << 16U, NULL); 1419 } 1420 1421 if (ret) 1422 return ret; 1423 } 1424 1425 return ret; 1426} 1427 1428int smu_v14_0_set_jpeg_enable(struct smu_context *smu, 1429 bool enable) 1430{ 1431 struct amdgpu_device *adev = smu->adev; 1432 int i, ret = 0; 1433 1434 for (i = 0; i < adev->jpeg.num_jpeg_inst; i++) { 1435 if (adev->jpeg.harvest_config & (1 << i)) 1436 continue; 1437 1438 if (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 0) || 1439 amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1)) { 1440 if (i == 0) 1441 ret = smu_cmn_send_smc_msg_with_param(smu, enable ? 1442 SMU_MSG_PowerUpJpeg0 : SMU_MSG_PowerDownJpeg0, 1443 i << 16U, NULL); 1444 else if (i == 1 && amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(14, 0, 1)) 1445 ret = smu_cmn_send_smc_msg_with_param(smu, enable ? 1446 SMU_MSG_PowerUpJpeg1 : SMU_MSG_PowerDownJpeg1, 1447 i << 16U, NULL); 1448 } else { 1449 ret = smu_cmn_send_smc_msg_with_param(smu, enable ? 1450 SMU_MSG_PowerUpJpeg : SMU_MSG_PowerDownJpeg, 1451 i << 16U, NULL); 1452 } 1453 1454 if (ret) 1455 return ret; 1456 } 1457 1458 return ret; 1459} 1460 1461int smu_v14_0_run_btc(struct smu_context *smu) 1462{ 1463 int res; 1464 1465 res = smu_cmn_send_smc_msg(smu, SMU_MSG_RunDcBtc, NULL); 1466 if (res) 1467 dev_err(smu->adev->dev, "RunDcBtc failed!\n"); 1468 1469 return res; 1470} 1471 1472int smu_v14_0_gpo_control(struct smu_context *smu, 1473 bool enablement) 1474{ 1475 int res; 1476 1477 res = smu_cmn_send_smc_msg_with_param(smu, 1478 SMU_MSG_AllowGpo, 1479 enablement ? 1 : 0, 1480 NULL); 1481 if (res) 1482 dev_err(smu->adev->dev, "SetGpoAllow %d failed!\n", enablement); 1483 1484 return res; 1485} 1486 1487int smu_v14_0_deep_sleep_control(struct smu_context *smu, 1488 bool enablement) 1489{ 1490 struct amdgpu_device *adev = smu->adev; 1491 int ret = 0; 1492 1493 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_GFXCLK_BIT)) { 1494 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_GFXCLK_BIT, enablement); 1495 if (ret) { 1496 dev_err(adev->dev, "Failed to %s GFXCLK DS!\n", enablement ? "enable" : "disable"); 1497 return ret; 1498 } 1499 } 1500 1501 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_UCLK_BIT)) { 1502 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_UCLK_BIT, enablement); 1503 if (ret) { 1504 dev_err(adev->dev, "Failed to %s UCLK DS!\n", enablement ? "enable" : "disable"); 1505 return ret; 1506 } 1507 } 1508 1509 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_FCLK_BIT)) { 1510 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_FCLK_BIT, enablement); 1511 if (ret) { 1512 dev_err(adev->dev, "Failed to %s FCLK DS!\n", enablement ? "enable" : "disable"); 1513 return ret; 1514 } 1515 } 1516 1517 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_SOCCLK_BIT)) { 1518 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_SOCCLK_BIT, enablement); 1519 if (ret) { 1520 dev_err(adev->dev, "Failed to %s SOCCLK DS!\n", enablement ? "enable" : "disable"); 1521 return ret; 1522 } 1523 } 1524 1525 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_LCLK_BIT)) { 1526 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_LCLK_BIT, enablement); 1527 if (ret) { 1528 dev_err(adev->dev, "Failed to %s LCLK DS!\n", enablement ? "enable" : "disable"); 1529 return ret; 1530 } 1531 } 1532 1533 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_VCN_BIT)) { 1534 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_VCN_BIT, enablement); 1535 if (ret) { 1536 dev_err(adev->dev, "Failed to %s VCN DS!\n", enablement ? "enable" : "disable"); 1537 return ret; 1538 } 1539 } 1540 1541 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_MP0CLK_BIT)) { 1542 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_MP0CLK_BIT, enablement); 1543 if (ret) { 1544 dev_err(adev->dev, "Failed to %s MP0/MPIOCLK DS!\n", enablement ? "enable" : "disable"); 1545 return ret; 1546 } 1547 } 1548 1549 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_MP1CLK_BIT)) { 1550 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_DS_MP1CLK_BIT, enablement); 1551 if (ret) { 1552 dev_err(adev->dev, "Failed to %s MP1CLK DS!\n", enablement ? "enable" : "disable"); 1553 return ret; 1554 } 1555 } 1556 1557 return ret; 1558} 1559 1560int smu_v14_0_gfx_ulv_control(struct smu_context *smu, 1561 bool enablement) 1562{ 1563 int ret = 0; 1564 1565 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_GFX_ULV_BIT)) 1566 ret = smu_cmn_feature_set_enabled(smu, SMU_FEATURE_GFX_ULV_BIT, enablement); 1567 1568 return ret; 1569} 1570 1571int smu_v14_0_baco_set_armd3_sequence(struct smu_context *smu, 1572 enum smu_baco_seq baco_seq) 1573{ 1574 struct smu_baco_context *smu_baco = &smu->smu_baco; 1575 int ret; 1576 1577 ret = smu_cmn_send_smc_msg_with_param(smu, 1578 SMU_MSG_ArmD3, 1579 baco_seq, 1580 NULL); 1581 if (ret) 1582 return ret; 1583 1584 if (baco_seq == BACO_SEQ_BAMACO || 1585 baco_seq == BACO_SEQ_BACO) 1586 smu_baco->state = SMU_BACO_STATE_ENTER; 1587 else 1588 smu_baco->state = SMU_BACO_STATE_EXIT; 1589 1590 return 0; 1591} 1592 1593bool smu_v14_0_baco_is_support(struct smu_context *smu) 1594{ 1595 struct smu_baco_context *smu_baco = &smu->smu_baco; 1596 1597 if (amdgpu_sriov_vf(smu->adev) || 1598 !smu_baco->platform_support) 1599 return false; 1600 1601 /* return true if ASIC is in BACO state already */ 1602 if (smu_v14_0_baco_get_state(smu) == SMU_BACO_STATE_ENTER) 1603 return true; 1604 1605 if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_BACO_BIT) && 1606 !smu_cmn_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT)) 1607 return false; 1608 1609 return true; 1610} 1611 1612enum smu_baco_state smu_v14_0_baco_get_state(struct smu_context *smu) 1613{ 1614 struct smu_baco_context *smu_baco = &smu->smu_baco; 1615 1616 return smu_baco->state; 1617} 1618 1619int smu_v14_0_baco_set_state(struct smu_context *smu, 1620 enum smu_baco_state state) 1621{ 1622 struct smu_baco_context *smu_baco = &smu->smu_baco; 1623 struct amdgpu_device *adev = smu->adev; 1624 int ret = 0; 1625 1626 if (smu_v14_0_baco_get_state(smu) == state) 1627 return 0; 1628 1629 if (state == SMU_BACO_STATE_ENTER) { 1630 ret = smu_cmn_send_smc_msg_with_param(smu, 1631 SMU_MSG_EnterBaco, 1632 smu_baco->maco_support ? 1633 BACO_SEQ_BAMACO : BACO_SEQ_BACO, 1634 NULL); 1635 } else { 1636 ret = smu_cmn_send_smc_msg(smu, 1637 SMU_MSG_ExitBaco, 1638 NULL); 1639 if (ret) 1640 return ret; 1641 1642 /* clear vbios scratch 6 and 7 for coming asic reinit */ 1643 WREG32(adev->bios_scratch_reg_offset + 6, 0); 1644 WREG32(adev->bios_scratch_reg_offset + 7, 0); 1645 } 1646 1647 if (!ret) 1648 smu_baco->state = state; 1649 1650 return ret; 1651} 1652 1653int smu_v14_0_baco_enter(struct smu_context *smu) 1654{ 1655 int ret = 0; 1656 1657 ret = smu_v14_0_baco_set_state(smu, 1658 SMU_BACO_STATE_ENTER); 1659 if (ret) 1660 return ret; 1661 1662 msleep(10); 1663 1664 return ret; 1665} 1666 1667int smu_v14_0_baco_exit(struct smu_context *smu) 1668{ 1669 return smu_v14_0_baco_set_state(smu, 1670 SMU_BACO_STATE_EXIT); 1671} 1672 1673int smu_v14_0_set_gfx_power_up_by_imu(struct smu_context *smu) 1674{ 1675 uint16_t index; 1676 struct amdgpu_device *adev = smu->adev; 1677 1678 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { 1679 return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_EnableGfxImu, 1680 ENABLE_IMU_ARG_GFXOFF_ENABLE, NULL); 1681 } 1682 1683 index = smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG, 1684 SMU_MSG_EnableGfxImu); 1685 return smu_cmn_send_msg_without_waiting(smu, index, ENABLE_IMU_ARG_GFXOFF_ENABLE); 1686} 1687 1688int smu_v14_0_set_default_dpm_tables(struct smu_context *smu) 1689{ 1690 struct smu_table_context *smu_table = &smu->smu_table; 1691 1692 return smu_cmn_update_table(smu, SMU_TABLE_DPMCLOCKS, 0, 1693 smu_table->clocks_table, false); 1694} 1695 1696int smu_v14_0_od_edit_dpm_table(struct smu_context *smu, 1697 enum PP_OD_DPM_TABLE_COMMAND type, 1698 long input[], uint32_t size) 1699{ 1700 struct smu_dpm_context *smu_dpm = &(smu->smu_dpm); 1701 int ret = 0; 1702 1703 /* Only allowed in manual mode */ 1704 if (smu_dpm->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) 1705 return -EINVAL; 1706 1707 switch (type) { 1708 case PP_OD_EDIT_SCLK_VDDC_TABLE: 1709 if (size != 2) { 1710 dev_err(smu->adev->dev, "Input parameter number not correct\n"); 1711 return -EINVAL; 1712 } 1713 1714 if (input[0] == 0) { 1715 if (input[1] < smu->gfx_default_hard_min_freq) { 1716 dev_warn(smu->adev->dev, 1717 "Fine grain setting minimum sclk (%ld) MHz is less than the minimum allowed (%d) MHz\n", 1718 input[1], smu->gfx_default_hard_min_freq); 1719 return -EINVAL; 1720 } 1721 smu->gfx_actual_hard_min_freq = input[1]; 1722 } else if (input[0] == 1) { 1723 if (input[1] > smu->gfx_default_soft_max_freq) { 1724 dev_warn(smu->adev->dev, 1725 "Fine grain setting maximum sclk (%ld) MHz is greater than the maximum allowed (%d) MHz\n", 1726 input[1], smu->gfx_default_soft_max_freq); 1727 return -EINVAL; 1728 } 1729 smu->gfx_actual_soft_max_freq = input[1]; 1730 } else { 1731 return -EINVAL; 1732 } 1733 break; 1734 case PP_OD_RESTORE_DEFAULT_TABLE: 1735 if (size != 0) { 1736 dev_err(smu->adev->dev, "Input parameter number not correct\n"); 1737 return -EINVAL; 1738 } 1739 smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq; 1740 smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq; 1741 break; 1742 case PP_OD_COMMIT_DPM_TABLE: 1743 if (size != 0) { 1744 dev_err(smu->adev->dev, "Input parameter number not correct\n"); 1745 return -EINVAL; 1746 } 1747 if (smu->gfx_actual_hard_min_freq > smu->gfx_actual_soft_max_freq) { 1748 dev_err(smu->adev->dev, 1749 "The setting minimum sclk (%d) MHz is greater than the setting maximum sclk (%d) MHz\n", 1750 smu->gfx_actual_hard_min_freq, 1751 smu->gfx_actual_soft_max_freq); 1752 return -EINVAL; 1753 } 1754 1755 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk, 1756 smu->gfx_actual_hard_min_freq, 1757 NULL); 1758 if (ret) { 1759 dev_err(smu->adev->dev, "Set hard min sclk failed!"); 1760 return ret; 1761 } 1762 1763 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk, 1764 smu->gfx_actual_soft_max_freq, 1765 NULL); 1766 if (ret) { 1767 dev_err(smu->adev->dev, "Set soft max sclk failed!"); 1768 return ret; 1769 } 1770 break; 1771 default: 1772 return -ENOSYS; 1773 } 1774 1775 return ret; 1776} 1777 1778