1/* $NetBSD: amdgpu_soc15.c,v 1.3 2021/12/19 12:21:29 riastradh Exp $ */ 2 3/* 4 * Copyright 2016 Advanced Micro Devices, Inc. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 */ 25#include <sys/cdefs.h> 26__KERNEL_RCSID(0, "$NetBSD: amdgpu_soc15.c,v 1.3 2021/12/19 12:21:29 riastradh Exp $"); 27 28#include <linux/firmware.h> 29#include <linux/slab.h> 30#include <linux/module.h> 31#include <linux/pci.h> 32 33#include "amdgpu.h" 34#include "amdgpu_atombios.h" 35#include "amdgpu_ih.h" 36#include "amdgpu_uvd.h" 37#include "amdgpu_vce.h" 38#include "amdgpu_ucode.h" 39#include "amdgpu_psp.h" 40#include "atom.h" 41#include "amd_pcie.h" 42 43#include "uvd/uvd_7_0_offset.h" 44#include "gc/gc_9_0_offset.h" 45#include "gc/gc_9_0_sh_mask.h" 46#include "sdma0/sdma0_4_0_offset.h" 47#include "sdma1/sdma1_4_0_offset.h" 48#include "hdp/hdp_4_0_offset.h" 49#include "hdp/hdp_4_0_sh_mask.h" 50#include "smuio/smuio_9_0_offset.h" 51#include "smuio/smuio_9_0_sh_mask.h" 52#include "nbio/nbio_7_0_default.h" 53#include "nbio/nbio_7_0_offset.h" 54#include "nbio/nbio_7_0_sh_mask.h" 55#include "nbio/nbio_7_0_smn.h" 56#include "mp/mp_9_0_offset.h" 57 58#include "soc15.h" 59#include "soc15_common.h" 60#include "gfx_v9_0.h" 61#include "gmc_v9_0.h" 62#include "gfxhub_v1_0.h" 63#include "mmhub_v1_0.h" 64#include "df_v1_7.h" 65#include "df_v3_6.h" 66#include "nbio_v6_1.h" 67#include "nbio_v7_0.h" 68#include "nbio_v7_4.h" 69#include "vega10_ih.h" 70#include "sdma_v4_0.h" 71#include "uvd_v7_0.h" 72#include "vce_v4_0.h" 73#include "vcn_v1_0.h" 74#include "vcn_v2_0.h" 75#include "jpeg_v2_0.h" 76#include "vcn_v2_5.h" 77#include "jpeg_v2_5.h" 78#include "dce_virtual.h" 79#include "mxgpu_ai.h" 80#include "amdgpu_smu.h" 81#include "amdgpu_ras.h" 82#include "amdgpu_xgmi.h" 83#include <uapi/linux/kfd_ioctl.h> 84 85#include <linux/nbsd-namespace.h> 86 87#define mmMP0_MISC_CGTT_CTRL0 0x01b9 88#define mmMP0_MISC_CGTT_CTRL0_BASE_IDX 0 89#define mmMP0_MISC_LIGHT_SLEEP_CTRL 0x01ba 90#define mmMP0_MISC_LIGHT_SLEEP_CTRL_BASE_IDX 0 91 92/* for Vega20 register name change */ 93#define mmHDP_MEM_POWER_CTRL 0x00d4 94#define HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN_MASK 0x00000001L 95#define HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK 0x00000002L 96#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK 0x00010000L 97#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK 0x00020000L 98#define mmHDP_MEM_POWER_CTRL_BASE_IDX 0 99/* 100 * Indirect registers accessor 101 */ 102static u32 soc15_pcie_rreg(struct amdgpu_device *adev, u32 reg) 103{ 104 unsigned long flags, address, data; 105 u32 r; 106 address = adev->nbio.funcs->get_pcie_index_offset(adev); 107 data = adev->nbio.funcs->get_pcie_data_offset(adev); 108 109 spin_lock_irqsave(&adev->pcie_idx_lock, flags); 110 WREG32(address, reg); 111 (void)RREG32(address); 112 r = RREG32(data); 113 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); 114 return r; 115} 116 117static void soc15_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 118{ 119 unsigned long flags, address, data; 120 121 address = adev->nbio.funcs->get_pcie_index_offset(adev); 122 data = adev->nbio.funcs->get_pcie_data_offset(adev); 123 124 spin_lock_irqsave(&adev->pcie_idx_lock, flags); 125 WREG32(address, reg); 126 (void)RREG32(address); 127 WREG32(data, v); 128 (void)RREG32(data); 129 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); 130} 131 132static u64 soc15_pcie_rreg64(struct amdgpu_device *adev, u32 reg) 133{ 134 unsigned long flags, address, data; 135 u64 r; 136 address = adev->nbio.funcs->get_pcie_index_offset(adev); 137 data = adev->nbio.funcs->get_pcie_data_offset(adev); 138 139 spin_lock_irqsave(&adev->pcie_idx_lock, flags); 140 /* read low 32 bit */ 141 WREG32(address, reg); 142 (void)RREG32(address); 143 r = RREG32(data); 144 145 /* read high 32 bit*/ 146 WREG32(address, reg + 4); 147 (void)RREG32(address); 148 r |= ((u64)RREG32(data) << 32); 149 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); 150 return r; 151} 152 153static void soc15_pcie_wreg64(struct amdgpu_device *adev, u32 reg, u64 v) 154{ 155 unsigned long flags, address, data; 156 157 address = adev->nbio.funcs->get_pcie_index_offset(adev); 158 data = adev->nbio.funcs->get_pcie_data_offset(adev); 159 160 spin_lock_irqsave(&adev->pcie_idx_lock, flags); 161 /* write low 32 bit */ 162 WREG32(address, reg); 163 (void)RREG32(address); 164 WREG32(data, (u32)(v & 0xffffffffULL)); 165 (void)RREG32(data); 166 167 /* write high 32 bit */ 168 WREG32(address, reg + 4); 169 (void)RREG32(address); 170 WREG32(data, (u32)(v >> 32)); 171 (void)RREG32(data); 172 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); 173} 174 175static u32 soc15_uvd_ctx_rreg(struct amdgpu_device *adev, u32 reg) 176{ 177 unsigned long flags, address, data; 178 u32 r; 179 180 address = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_INDEX); 181 data = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_DATA); 182 183 spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags); 184 WREG32(address, ((reg) & 0x1ff)); 185 r = RREG32(data); 186 spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags); 187 return r; 188} 189 190static void soc15_uvd_ctx_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 191{ 192 unsigned long flags, address, data; 193 194 address = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_INDEX); 195 data = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_DATA); 196 197 spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags); 198 WREG32(address, ((reg) & 0x1ff)); 199 WREG32(data, (v)); 200 spin_unlock_irqrestore(&adev->uvd_ctx_idx_lock, flags); 201} 202 203static u32 soc15_didt_rreg(struct amdgpu_device *adev, u32 reg) 204{ 205 unsigned long flags, address, data; 206 u32 r; 207 208 address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX); 209 data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA); 210 211 spin_lock_irqsave(&adev->didt_idx_lock, flags); 212 WREG32(address, (reg)); 213 r = RREG32(data); 214 spin_unlock_irqrestore(&adev->didt_idx_lock, flags); 215 return r; 216} 217 218static void soc15_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 219{ 220 unsigned long flags, address, data; 221 222 address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX); 223 data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA); 224 225 spin_lock_irqsave(&adev->didt_idx_lock, flags); 226 WREG32(address, (reg)); 227 WREG32(data, (v)); 228 spin_unlock_irqrestore(&adev->didt_idx_lock, flags); 229} 230 231static u32 soc15_gc_cac_rreg(struct amdgpu_device *adev, u32 reg) 232{ 233 unsigned long flags; 234 u32 r; 235 236 spin_lock_irqsave(&adev->gc_cac_idx_lock, flags); 237 WREG32_SOC15(GC, 0, mmGC_CAC_IND_INDEX, (reg)); 238 r = RREG32_SOC15(GC, 0, mmGC_CAC_IND_DATA); 239 spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags); 240 return r; 241} 242 243static void soc15_gc_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 244{ 245 unsigned long flags; 246 247 spin_lock_irqsave(&adev->gc_cac_idx_lock, flags); 248 WREG32_SOC15(GC, 0, mmGC_CAC_IND_INDEX, (reg)); 249 WREG32_SOC15(GC, 0, mmGC_CAC_IND_DATA, (v)); 250 spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags); 251} 252 253static u32 soc15_se_cac_rreg(struct amdgpu_device *adev, u32 reg) 254{ 255 unsigned long flags; 256 u32 r; 257 258 spin_lock_irqsave(&adev->se_cac_idx_lock, flags); 259 WREG32_SOC15(GC, 0, mmSE_CAC_IND_INDEX, (reg)); 260 r = RREG32_SOC15(GC, 0, mmSE_CAC_IND_DATA); 261 spin_unlock_irqrestore(&adev->se_cac_idx_lock, flags); 262 return r; 263} 264 265static void soc15_se_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v) 266{ 267 unsigned long flags; 268 269 spin_lock_irqsave(&adev->se_cac_idx_lock, flags); 270 WREG32_SOC15(GC, 0, mmSE_CAC_IND_INDEX, (reg)); 271 WREG32_SOC15(GC, 0, mmSE_CAC_IND_DATA, (v)); 272 spin_unlock_irqrestore(&adev->se_cac_idx_lock, flags); 273} 274 275static u32 soc15_get_config_memsize(struct amdgpu_device *adev) 276{ 277 return adev->nbio.funcs->get_memsize(adev); 278} 279 280static u32 soc15_get_xclk(struct amdgpu_device *adev) 281{ 282 u32 reference_clock = adev->clock.spll.reference_freq; 283 284 if (adev->asic_type == CHIP_RAVEN) 285 return reference_clock / 4; 286 287 return reference_clock; 288} 289 290 291void soc15_grbm_select(struct amdgpu_device *adev, 292 u32 me, u32 pipe, u32 queue, u32 vmid) 293{ 294 u32 grbm_gfx_cntl = 0; 295 grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, PIPEID, pipe); 296 grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, MEID, me); 297 grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, VMID, vmid); 298 grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, QUEUEID, queue); 299 300 WREG32_SOC15_RLC_SHADOW(GC, 0, mmGRBM_GFX_CNTL, grbm_gfx_cntl); 301} 302 303static void soc15_vga_set_state(struct amdgpu_device *adev, bool state) 304{ 305 /* todo */ 306} 307 308static bool soc15_read_disabled_bios(struct amdgpu_device *adev) 309{ 310 /* todo */ 311 return false; 312} 313 314static bool soc15_read_bios_from_rom(struct amdgpu_device *adev, 315 u8 *bios, u32 length_bytes) 316{ 317 u32 *dw_ptr; 318 u32 i, length_dw; 319 320 if (bios == NULL) 321 return false; 322 if (length_bytes == 0) 323 return false; 324 /* APU vbios image is part of sbios image */ 325 if (adev->flags & AMD_IS_APU) 326 return false; 327 328 dw_ptr = (u32 *)bios; 329 length_dw = ALIGN(length_bytes, 4) / 4; 330 331 /* set rom index to 0 */ 332 WREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_INDEX), 0); 333 /* read out the rom data */ 334 for (i = 0; i < length_dw; i++) 335 dw_ptr[i] = RREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_DATA)); 336 337 return true; 338} 339 340static struct soc15_allowed_register_entry soc15_allowed_read_registers[] = { 341 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS)}, 342 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS2)}, 343 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE0)}, 344 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE1)}, 345 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE2)}, 346 { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE3)}, 347 { SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_STATUS_REG)}, 348 { SOC15_REG_ENTRY(SDMA1, 0, mmSDMA1_STATUS_REG)}, 349 { SOC15_REG_ENTRY(GC, 0, mmCP_STAT)}, 350 { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT1)}, 351 { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT2)}, 352 { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT3)}, 353 { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_BUSY_STAT)}, 354 { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STALLED_STAT1)}, 355 { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STATUS)}, 356 { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_BUSY_STAT)}, 357 { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STALLED_STAT1)}, 358 { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STATUS)}, 359 { SOC15_REG_ENTRY(GC, 0, mmGB_ADDR_CONFIG)}, 360 { SOC15_REG_ENTRY(GC, 0, mmDB_DEBUG2)}, 361}; 362 363static uint32_t soc15_read_indexed_register(struct amdgpu_device *adev, u32 se_num, 364 u32 sh_num, u32 reg_offset) 365{ 366 uint32_t val; 367 368 mutex_lock(&adev->grbm_idx_mutex); 369 if (se_num != 0xffffffff || sh_num != 0xffffffff) 370 amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff); 371 372 val = RREG32(reg_offset); 373 374 if (se_num != 0xffffffff || sh_num != 0xffffffff) 375 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); 376 mutex_unlock(&adev->grbm_idx_mutex); 377 return val; 378} 379 380static uint32_t soc15_get_register_value(struct amdgpu_device *adev, 381 bool indexed, u32 se_num, 382 u32 sh_num, u32 reg_offset) 383{ 384 if (indexed) { 385 return soc15_read_indexed_register(adev, se_num, sh_num, reg_offset); 386 } else { 387 if (reg_offset == SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG)) 388 return adev->gfx.config.gb_addr_config; 389 else if (reg_offset == SOC15_REG_OFFSET(GC, 0, mmDB_DEBUG2)) 390 return adev->gfx.config.db_debug2; 391 return RREG32(reg_offset); 392 } 393} 394 395static int soc15_read_register(struct amdgpu_device *adev, u32 se_num, 396 u32 sh_num, u32 reg_offset, u32 *value) 397{ 398 uint32_t i; 399 struct soc15_allowed_register_entry *en; 400 401 *value = 0; 402 for (i = 0; i < ARRAY_SIZE(soc15_allowed_read_registers); i++) { 403 en = &soc15_allowed_read_registers[i]; 404 if (reg_offset != (adev->reg_offset[en->hwip][en->inst][en->seg] 405 + en->reg_offset)) 406 continue; 407 408 *value = soc15_get_register_value(adev, 409 soc15_allowed_read_registers[i].grbm_indexed, 410 se_num, sh_num, reg_offset); 411 return 0; 412 } 413 return -EINVAL; 414} 415 416 417/** 418 * soc15_program_register_sequence - program an array of registers. 419 * 420 * @adev: amdgpu_device pointer 421 * @regs: pointer to the register array 422 * @array_size: size of the register array 423 * 424 * Programs an array or registers with and and or masks. 425 * This is a helper for setting golden registers. 426 */ 427 428void soc15_program_register_sequence(struct amdgpu_device *adev, 429 const struct soc15_reg_golden *regs, 430 const u32 array_size) 431{ 432 const struct soc15_reg_golden *entry; 433 u32 tmp, reg; 434 int i; 435 436 for (i = 0; i < array_size; ++i) { 437 entry = ®s[i]; 438 reg = adev->reg_offset[entry->hwip][entry->instance][entry->segment] + entry->reg; 439 440 if (entry->and_mask == 0xffffffff) { 441 tmp = entry->or_mask; 442 } else { 443 tmp = RREG32(reg); 444 tmp &= ~(entry->and_mask); 445 tmp |= (entry->or_mask & entry->and_mask); 446 } 447 448 if (reg == SOC15_REG_OFFSET(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3) || 449 reg == SOC15_REG_OFFSET(GC, 0, mmPA_SC_ENHANCE) || 450 reg == SOC15_REG_OFFSET(GC, 0, mmPA_SC_ENHANCE_1) || 451 reg == SOC15_REG_OFFSET(GC, 0, mmSH_MEM_CONFIG)) 452 WREG32_RLC(reg, tmp); 453 else 454 WREG32(reg, tmp); 455 456 } 457 458} 459 460static int soc15_asic_mode1_reset(struct amdgpu_device *adev) 461{ 462 u32 i; 463 int ret = 0; 464 465 amdgpu_atombios_scratch_regs_engine_hung(adev, true); 466 467 dev_info(adev->dev, "GPU mode1 reset\n"); 468 469 /* disable BM */ 470 pci_clear_master(adev->pdev); 471 472 pci_save_state(adev->pdev); 473 474 ret = psp_gpu_reset(adev); 475 if (ret) 476 dev_err(adev->dev, "GPU mode1 reset failed\n"); 477 478 pci_restore_state(adev->pdev); 479 480 /* wait for asic to come out of reset */ 481 for (i = 0; i < adev->usec_timeout; i++) { 482 u32 memsize = adev->nbio.funcs->get_memsize(adev); 483 484 if (memsize != 0xffffffff) 485 break; 486 udelay(1); 487 } 488 489 amdgpu_atombios_scratch_regs_engine_hung(adev, false); 490 491 return ret; 492} 493 494static int soc15_asic_baco_reset(struct amdgpu_device *adev) 495{ 496 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); 497 int ret = 0; 498 499 /* avoid NBIF got stuck when do RAS recovery in BACO reset */ 500 if (ras && ras->supported) 501 adev->nbio.funcs->enable_doorbell_interrupt(adev, false); 502 503 ret = amdgpu_dpm_baco_reset(adev); 504 if (ret) 505 return ret; 506 507 /* re-enable doorbell interrupt after BACO exit */ 508 if (ras && ras->supported) 509 adev->nbio.funcs->enable_doorbell_interrupt(adev, true); 510 511 return 0; 512} 513 514static enum amd_reset_method 515soc15_asic_reset_method(struct amdgpu_device *adev) 516{ 517 bool baco_reset = false; 518 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); 519 520 switch (adev->asic_type) { 521 case CHIP_RAVEN: 522 case CHIP_RENOIR: 523 return AMD_RESET_METHOD_MODE2; 524 case CHIP_VEGA10: 525 case CHIP_VEGA12: 526 case CHIP_ARCTURUS: 527 baco_reset = amdgpu_dpm_is_baco_supported(adev); 528 break; 529 case CHIP_VEGA20: 530 if (adev->psp.sos_fw_version >= 0x80067) 531 baco_reset = amdgpu_dpm_is_baco_supported(adev); 532 533 /* 534 * 1. PMFW version > 0x284300: all cases use baco 535 * 2. PMFW version <= 0x284300: only sGPU w/o RAS use baco 536 */ 537 if ((ras && ras->supported) && adev->pm.fw_version <= 0x283400) 538 baco_reset = false; 539 break; 540 default: 541 break; 542 } 543 544 if (baco_reset) 545 return AMD_RESET_METHOD_BACO; 546 else 547 return AMD_RESET_METHOD_MODE1; 548} 549 550static int soc15_asic_reset(struct amdgpu_device *adev) 551{ 552 /* original raven doesn't have full asic reset */ 553 if (adev->pdev->device == 0x15dd && adev->rev_id < 0x8) 554 return 0; 555 556 switch (soc15_asic_reset_method(adev)) { 557 case AMD_RESET_METHOD_BACO: 558 if (!adev->in_suspend) 559 amdgpu_inc_vram_lost(adev); 560 return soc15_asic_baco_reset(adev); 561 case AMD_RESET_METHOD_MODE2: 562 return amdgpu_dpm_mode2_reset(adev); 563 default: 564 if (!adev->in_suspend) 565 amdgpu_inc_vram_lost(adev); 566 return soc15_asic_mode1_reset(adev); 567 } 568} 569 570static bool soc15_supports_baco(struct amdgpu_device *adev) 571{ 572 switch (adev->asic_type) { 573 case CHIP_VEGA10: 574 case CHIP_VEGA12: 575 case CHIP_ARCTURUS: 576 return amdgpu_dpm_is_baco_supported(adev); 577 case CHIP_VEGA20: 578 if (adev->psp.sos_fw_version >= 0x80067) 579 return amdgpu_dpm_is_baco_supported(adev); 580 return false; 581 default: 582 return false; 583 } 584} 585 586/*static int soc15_set_uvd_clock(struct amdgpu_device *adev, u32 clock, 587 u32 cntl_reg, u32 status_reg) 588{ 589 return 0; 590}*/ 591 592static int soc15_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk) 593{ 594 /*int r; 595 596 r = soc15_set_uvd_clock(adev, vclk, ixCG_VCLK_CNTL, ixCG_VCLK_STATUS); 597 if (r) 598 return r; 599 600 r = soc15_set_uvd_clock(adev, dclk, ixCG_DCLK_CNTL, ixCG_DCLK_STATUS); 601 */ 602 return 0; 603} 604 605static int soc15_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk) 606{ 607 /* todo */ 608 609 return 0; 610} 611 612static void soc15_pcie_gen3_enable(struct amdgpu_device *adev) 613{ 614 if (pci_is_root_bus(adev->pdev->bus)) 615 return; 616 617 if (amdgpu_pcie_gen2 == 0) 618 return; 619 620 if (adev->flags & AMD_IS_APU) 621 return; 622 623 if (!(adev->pm.pcie_gen_mask & (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 | 624 CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3))) 625 return; 626 627 /* todo */ 628} 629 630static void soc15_program_aspm(struct amdgpu_device *adev) 631{ 632 633 if (amdgpu_aspm == 0) 634 return; 635 636 /* todo */ 637} 638 639static void soc15_enable_doorbell_aperture(struct amdgpu_device *adev, 640 bool enable) 641{ 642 adev->nbio.funcs->enable_doorbell_aperture(adev, enable); 643 adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, enable); 644} 645 646static const struct amdgpu_ip_block_version vega10_common_ip_block = 647{ 648 .type = AMD_IP_BLOCK_TYPE_COMMON, 649 .major = 2, 650 .minor = 0, 651 .rev = 0, 652 .funcs = &soc15_common_ip_funcs, 653}; 654 655static uint32_t soc15_get_rev_id(struct amdgpu_device *adev) 656{ 657 return adev->nbio.funcs->get_rev_id(adev); 658} 659 660int soc15_set_ip_blocks(struct amdgpu_device *adev) 661{ 662 /* Set IP register base before any HW register access */ 663 switch (adev->asic_type) { 664 case CHIP_VEGA10: 665 case CHIP_VEGA12: 666 case CHIP_RAVEN: 667 case CHIP_RENOIR: 668 vega10_reg_base_init(adev); 669 break; 670 case CHIP_VEGA20: 671 vega20_reg_base_init(adev); 672 break; 673 case CHIP_ARCTURUS: 674 arct_reg_base_init(adev); 675 break; 676 default: 677 return -EINVAL; 678 } 679 680 if (adev->asic_type == CHIP_VEGA20 || adev->asic_type == CHIP_ARCTURUS) 681 adev->gmc.xgmi.supported = true; 682 683 if (adev->flags & AMD_IS_APU) { 684 adev->nbio.funcs = &nbio_v7_0_funcs; 685 adev->nbio.hdp_flush_reg = &nbio_v7_0_hdp_flush_reg; 686 } else if (adev->asic_type == CHIP_VEGA20 || 687 adev->asic_type == CHIP_ARCTURUS) { 688 adev->nbio.funcs = &nbio_v7_4_funcs; 689 adev->nbio.hdp_flush_reg = &nbio_v7_4_hdp_flush_reg; 690 } else { 691 adev->nbio.funcs = &nbio_v6_1_funcs; 692 adev->nbio.hdp_flush_reg = &nbio_v6_1_hdp_flush_reg; 693 } 694 695 if (adev->asic_type == CHIP_VEGA20 || adev->asic_type == CHIP_ARCTURUS) 696 adev->df.funcs = &df_v3_6_funcs; 697 else 698 adev->df.funcs = &df_v1_7_funcs; 699 700 adev->rev_id = soc15_get_rev_id(adev); 701 adev->nbio.funcs->detect_hw_virt(adev); 702 703 if (amdgpu_sriov_vf(adev)) 704 adev->virt.ops = &xgpu_ai_virt_ops; 705 706 switch (adev->asic_type) { 707 case CHIP_VEGA10: 708 case CHIP_VEGA12: 709 case CHIP_VEGA20: 710 amdgpu_device_ip_block_add(adev, &vega10_common_ip_block); 711 amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block); 712 713 /* For Vega10 SR-IOV, PSP need to be initialized before IH */ 714 if (amdgpu_sriov_vf(adev)) { 715 if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) { 716 if (adev->asic_type == CHIP_VEGA20) 717 amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block); 718 else 719 amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block); 720 } 721 amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block); 722 } else { 723 amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block); 724 if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) { 725 if (adev->asic_type == CHIP_VEGA20) 726 amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block); 727 else 728 amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block); 729 } 730 } 731 amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block); 732 amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block); 733 if (is_support_sw_smu(adev)) { 734 if (!amdgpu_sriov_vf(adev)) 735 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); 736 } else { 737 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); 738 } 739 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) 740 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 741#if defined(CONFIG_DRM_AMD_DC) 742 else if (amdgpu_device_has_dc_support(adev)) 743 amdgpu_device_ip_block_add(adev, &dm_ip_block); 744#endif 745 if (!(adev->asic_type == CHIP_VEGA20 && amdgpu_sriov_vf(adev))) { 746 amdgpu_device_ip_block_add(adev, &uvd_v7_0_ip_block); 747 amdgpu_device_ip_block_add(adev, &vce_v4_0_ip_block); 748 } 749 break; 750 case CHIP_RAVEN: 751 amdgpu_device_ip_block_add(adev, &vega10_common_ip_block); 752 amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block); 753 amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block); 754 if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) 755 amdgpu_device_ip_block_add(adev, &psp_v10_0_ip_block); 756 amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block); 757 amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block); 758 amdgpu_device_ip_block_add(adev, &pp_smu_ip_block); 759 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) 760 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 761#if defined(CONFIG_DRM_AMD_DC) 762 else if (amdgpu_device_has_dc_support(adev)) 763 amdgpu_device_ip_block_add(adev, &dm_ip_block); 764#endif 765 amdgpu_device_ip_block_add(adev, &vcn_v1_0_ip_block); 766 break; 767 case CHIP_ARCTURUS: 768 amdgpu_device_ip_block_add(adev, &vega10_common_ip_block); 769 amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block); 770 771 if (amdgpu_sriov_vf(adev)) { 772 if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) 773 amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block); 774 amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block); 775 } else { 776 amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block); 777 if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) 778 amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block); 779 } 780 781 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) 782 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 783 amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block); 784 amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block); 785 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); 786 787 if (amdgpu_sriov_vf(adev)) { 788 if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) 789 amdgpu_device_ip_block_add(adev, &vcn_v2_5_ip_block); 790 } else { 791 amdgpu_device_ip_block_add(adev, &vcn_v2_5_ip_block); 792 } 793 if (!amdgpu_sriov_vf(adev)) 794 amdgpu_device_ip_block_add(adev, &jpeg_v2_5_ip_block); 795 break; 796 case CHIP_RENOIR: 797 amdgpu_device_ip_block_add(adev, &vega10_common_ip_block); 798 amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block); 799 amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block); 800 if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) 801 amdgpu_device_ip_block_add(adev, &psp_v12_0_ip_block); 802 amdgpu_device_ip_block_add(adev, &smu_v12_0_ip_block); 803 amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block); 804 amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block); 805 if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) 806 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 807#if defined(CONFIG_DRM_AMD_DC) 808 else if (amdgpu_device_has_dc_support(adev)) 809 amdgpu_device_ip_block_add(adev, &dm_ip_block); 810#endif 811 amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block); 812 amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block); 813 break; 814 default: 815 return -EINVAL; 816 } 817 818 return 0; 819} 820 821static void soc15_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring) 822{ 823 adev->nbio.funcs->hdp_flush(adev, ring); 824} 825 826static void soc15_invalidate_hdp(struct amdgpu_device *adev, 827 struct amdgpu_ring *ring) 828{ 829 if (!ring || !ring->funcs->emit_wreg) 830 WREG32_SOC15_NO_KIQ(HDP, 0, mmHDP_READ_CACHE_INVALIDATE, 1); 831 else 832 amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET( 833 HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 1); 834} 835 836static bool soc15_need_full_reset(struct amdgpu_device *adev) 837{ 838 /* change this when we implement soft reset */ 839 return true; 840} 841static void soc15_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0, 842 uint64_t *count1) 843{ 844 uint32_t perfctr = 0; 845 uint64_t cnt0_of, cnt1_of; 846 int tmp; 847 848 /* This reports 0 on APUs, so return to avoid writing/reading registers 849 * that may or may not be different from their GPU counterparts 850 */ 851 if (adev->flags & AMD_IS_APU) 852 return; 853 854 /* Set the 2 events that we wish to watch, defined above */ 855 /* Reg 40 is # received msgs */ 856 /* Reg 104 is # of posted requests sent */ 857 perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT0_SEL, 40); 858 perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT1_SEL, 104); 859 860 /* Write to enable desired perf counters */ 861 WREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK, perfctr); 862 /* Zero out and enable the perf counters 863 * Write 0x5: 864 * Bit 0 = Start all counters(1) 865 * Bit 2 = Global counter reset enable(1) 866 */ 867 WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL, 0x00000005); 868 869 msleep(1000); 870 871 /* Load the shadow and disable the perf counters 872 * Write 0x2: 873 * Bit 0 = Stop counters(0) 874 * Bit 1 = Load the shadow counters(1) 875 */ 876 WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL, 0x00000002); 877 878 /* Read register values to get any >32bit overflow */ 879 tmp = RREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK); 880 cnt0_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK, COUNTER0_UPPER); 881 cnt1_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK, COUNTER1_UPPER); 882 883 /* Get the values and add the overflow */ 884 *count0 = RREG32_PCIE(smnPCIE_PERF_COUNT0_TXCLK) | (cnt0_of << 32); 885 *count1 = RREG32_PCIE(smnPCIE_PERF_COUNT1_TXCLK) | (cnt1_of << 32); 886} 887 888static void vega20_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0, 889 uint64_t *count1) 890{ 891 uint32_t perfctr = 0; 892 uint64_t cnt0_of, cnt1_of; 893 int tmp; 894 895 /* This reports 0 on APUs, so return to avoid writing/reading registers 896 * that may or may not be different from their GPU counterparts 897 */ 898 if (adev->flags & AMD_IS_APU) 899 return; 900 901 /* Set the 2 events that we wish to watch, defined above */ 902 /* Reg 40 is # received msgs */ 903 /* Reg 108 is # of posted requests sent on VG20 */ 904 perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK3, 905 EVENT0_SEL, 40); 906 perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK3, 907 EVENT1_SEL, 108); 908 909 /* Write to enable desired perf counters */ 910 WREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK3, perfctr); 911 /* Zero out and enable the perf counters 912 * Write 0x5: 913 * Bit 0 = Start all counters(1) 914 * Bit 2 = Global counter reset enable(1) 915 */ 916 WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL, 0x00000005); 917 918 msleep(1000); 919 920 /* Load the shadow and disable the perf counters 921 * Write 0x2: 922 * Bit 0 = Stop counters(0) 923 * Bit 1 = Load the shadow counters(1) 924 */ 925 WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL, 0x00000002); 926 927 /* Read register values to get any >32bit overflow */ 928 tmp = RREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK3); 929 cnt0_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK3, COUNTER0_UPPER); 930 cnt1_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK3, COUNTER1_UPPER); 931 932 /* Get the values and add the overflow */ 933 *count0 = RREG32_PCIE(smnPCIE_PERF_COUNT0_TXCLK3) | (cnt0_of << 32); 934 *count1 = RREG32_PCIE(smnPCIE_PERF_COUNT1_TXCLK3) | (cnt1_of << 32); 935} 936 937static bool soc15_need_reset_on_init(struct amdgpu_device *adev) 938{ 939 u32 sol_reg; 940 941 /* Just return false for soc15 GPUs. Reset does not seem to 942 * be necessary. 943 */ 944 if (!amdgpu_passthrough(adev)) 945 return false; 946 947 if (adev->flags & AMD_IS_APU) 948 return false; 949 950 /* Check sOS sign of life register to confirm sys driver and sOS 951 * are already been loaded. 952 */ 953 sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81); 954 if (sol_reg) 955 return true; 956 957 return false; 958} 959 960static uint64_t soc15_get_pcie_replay_count(struct amdgpu_device *adev) 961{ 962 uint64_t nak_r, nak_g; 963 964 /* Get the number of NAKs received and generated */ 965 nak_r = RREG32_PCIE(smnPCIE_RX_NUM_NAK); 966 nak_g = RREG32_PCIE(smnPCIE_RX_NUM_NAK_GENERATED); 967 968 /* Add the total number of NAKs, i.e the number of replays */ 969 return (nak_r + nak_g); 970} 971 972static const struct amdgpu_asic_funcs soc15_asic_funcs = 973{ 974 .read_disabled_bios = &soc15_read_disabled_bios, 975 .read_bios_from_rom = &soc15_read_bios_from_rom, 976 .read_register = &soc15_read_register, 977 .reset = &soc15_asic_reset, 978 .reset_method = &soc15_asic_reset_method, 979 .set_vga_state = &soc15_vga_set_state, 980 .get_xclk = &soc15_get_xclk, 981 .set_uvd_clocks = &soc15_set_uvd_clocks, 982 .set_vce_clocks = &soc15_set_vce_clocks, 983 .get_config_memsize = &soc15_get_config_memsize, 984 .flush_hdp = &soc15_flush_hdp, 985 .invalidate_hdp = &soc15_invalidate_hdp, 986 .need_full_reset = &soc15_need_full_reset, 987 .init_doorbell_index = &vega10_doorbell_index_init, 988 .get_pcie_usage = &soc15_get_pcie_usage, 989 .need_reset_on_init = &soc15_need_reset_on_init, 990 .get_pcie_replay_count = &soc15_get_pcie_replay_count, 991 .supports_baco = &soc15_supports_baco, 992}; 993 994static const struct amdgpu_asic_funcs vega20_asic_funcs = 995{ 996 .read_disabled_bios = &soc15_read_disabled_bios, 997 .read_bios_from_rom = &soc15_read_bios_from_rom, 998 .read_register = &soc15_read_register, 999 .reset = &soc15_asic_reset, 1000 .reset_method = &soc15_asic_reset_method, 1001 .set_vga_state = &soc15_vga_set_state, 1002 .get_xclk = &soc15_get_xclk, 1003 .set_uvd_clocks = &soc15_set_uvd_clocks, 1004 .set_vce_clocks = &soc15_set_vce_clocks, 1005 .get_config_memsize = &soc15_get_config_memsize, 1006 .flush_hdp = &soc15_flush_hdp, 1007 .invalidate_hdp = &soc15_invalidate_hdp, 1008 .need_full_reset = &soc15_need_full_reset, 1009 .init_doorbell_index = &vega20_doorbell_index_init, 1010 .get_pcie_usage = &vega20_get_pcie_usage, 1011 .need_reset_on_init = &soc15_need_reset_on_init, 1012 .get_pcie_replay_count = &soc15_get_pcie_replay_count, 1013 .supports_baco = &soc15_supports_baco, 1014}; 1015 1016static int soc15_common_early_init(void *handle) 1017{ 1018#define MMIO_REG_HOLE_OFFSET (0x80000 - PAGE_SIZE) 1019 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1020 1021 adev->rmmio_remap.reg_offset = MMIO_REG_HOLE_OFFSET; 1022 adev->rmmio_remap.bus_addr = adev->rmmio_base + MMIO_REG_HOLE_OFFSET; 1023 adev->smc_rreg = NULL; 1024 adev->smc_wreg = NULL; 1025 adev->pcie_rreg = &soc15_pcie_rreg; 1026 adev->pcie_wreg = &soc15_pcie_wreg; 1027 adev->pcie_rreg64 = &soc15_pcie_rreg64; 1028 adev->pcie_wreg64 = &soc15_pcie_wreg64; 1029 adev->uvd_ctx_rreg = &soc15_uvd_ctx_rreg; 1030 adev->uvd_ctx_wreg = &soc15_uvd_ctx_wreg; 1031 adev->didt_rreg = &soc15_didt_rreg; 1032 adev->didt_wreg = &soc15_didt_wreg; 1033 adev->gc_cac_rreg = &soc15_gc_cac_rreg; 1034 adev->gc_cac_wreg = &soc15_gc_cac_wreg; 1035 adev->se_cac_rreg = &soc15_se_cac_rreg; 1036 adev->se_cac_wreg = &soc15_se_cac_wreg; 1037 1038 1039 adev->external_rev_id = 0xFF; 1040 switch (adev->asic_type) { 1041 case CHIP_VEGA10: 1042 adev->asic_funcs = &soc15_asic_funcs; 1043 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 1044 AMD_CG_SUPPORT_GFX_MGLS | 1045 AMD_CG_SUPPORT_GFX_RLC_LS | 1046 AMD_CG_SUPPORT_GFX_CP_LS | 1047 AMD_CG_SUPPORT_GFX_3D_CGCG | 1048 AMD_CG_SUPPORT_GFX_3D_CGLS | 1049 AMD_CG_SUPPORT_GFX_CGCG | 1050 AMD_CG_SUPPORT_GFX_CGLS | 1051 AMD_CG_SUPPORT_BIF_MGCG | 1052 AMD_CG_SUPPORT_BIF_LS | 1053 AMD_CG_SUPPORT_HDP_LS | 1054 AMD_CG_SUPPORT_DRM_MGCG | 1055 AMD_CG_SUPPORT_DRM_LS | 1056 AMD_CG_SUPPORT_ROM_MGCG | 1057 AMD_CG_SUPPORT_DF_MGCG | 1058 AMD_CG_SUPPORT_SDMA_MGCG | 1059 AMD_CG_SUPPORT_SDMA_LS | 1060 AMD_CG_SUPPORT_MC_MGCG | 1061 AMD_CG_SUPPORT_MC_LS; 1062 adev->pg_flags = 0; 1063 adev->external_rev_id = 0x1; 1064 break; 1065 case CHIP_VEGA12: 1066 adev->asic_funcs = &soc15_asic_funcs; 1067 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 1068 AMD_CG_SUPPORT_GFX_MGLS | 1069 AMD_CG_SUPPORT_GFX_CGCG | 1070 AMD_CG_SUPPORT_GFX_CGLS | 1071 AMD_CG_SUPPORT_GFX_3D_CGCG | 1072 AMD_CG_SUPPORT_GFX_3D_CGLS | 1073 AMD_CG_SUPPORT_GFX_CP_LS | 1074 AMD_CG_SUPPORT_MC_LS | 1075 AMD_CG_SUPPORT_MC_MGCG | 1076 AMD_CG_SUPPORT_SDMA_MGCG | 1077 AMD_CG_SUPPORT_SDMA_LS | 1078 AMD_CG_SUPPORT_BIF_MGCG | 1079 AMD_CG_SUPPORT_BIF_LS | 1080 AMD_CG_SUPPORT_HDP_MGCG | 1081 AMD_CG_SUPPORT_HDP_LS | 1082 AMD_CG_SUPPORT_ROM_MGCG | 1083 AMD_CG_SUPPORT_VCE_MGCG | 1084 AMD_CG_SUPPORT_UVD_MGCG; 1085 adev->pg_flags = 0; 1086 adev->external_rev_id = adev->rev_id + 0x14; 1087 break; 1088 case CHIP_VEGA20: 1089 adev->asic_funcs = &vega20_asic_funcs; 1090 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 1091 AMD_CG_SUPPORT_GFX_MGLS | 1092 AMD_CG_SUPPORT_GFX_CGCG | 1093 AMD_CG_SUPPORT_GFX_CGLS | 1094 AMD_CG_SUPPORT_GFX_3D_CGCG | 1095 AMD_CG_SUPPORT_GFX_3D_CGLS | 1096 AMD_CG_SUPPORT_GFX_CP_LS | 1097 AMD_CG_SUPPORT_MC_LS | 1098 AMD_CG_SUPPORT_MC_MGCG | 1099 AMD_CG_SUPPORT_SDMA_MGCG | 1100 AMD_CG_SUPPORT_SDMA_LS | 1101 AMD_CG_SUPPORT_BIF_MGCG | 1102 AMD_CG_SUPPORT_BIF_LS | 1103 AMD_CG_SUPPORT_HDP_MGCG | 1104 AMD_CG_SUPPORT_HDP_LS | 1105 AMD_CG_SUPPORT_ROM_MGCG | 1106 AMD_CG_SUPPORT_VCE_MGCG | 1107 AMD_CG_SUPPORT_UVD_MGCG; 1108 adev->pg_flags = 0; 1109 adev->external_rev_id = adev->rev_id + 0x28; 1110 break; 1111 case CHIP_RAVEN: 1112 adev->asic_funcs = &soc15_asic_funcs; 1113 if (adev->rev_id >= 0x8) 1114 adev->external_rev_id = adev->rev_id + 0x79; 1115 else if (adev->pdev->device == 0x15d8) 1116 adev->external_rev_id = adev->rev_id + 0x41; 1117 else if (adev->rev_id == 1) 1118 adev->external_rev_id = adev->rev_id + 0x20; 1119 else 1120 adev->external_rev_id = adev->rev_id + 0x01; 1121 1122 if (adev->rev_id >= 0x8) { 1123 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 1124 AMD_CG_SUPPORT_GFX_MGLS | 1125 AMD_CG_SUPPORT_GFX_CP_LS | 1126 AMD_CG_SUPPORT_GFX_3D_CGCG | 1127 AMD_CG_SUPPORT_GFX_3D_CGLS | 1128 AMD_CG_SUPPORT_GFX_CGCG | 1129 AMD_CG_SUPPORT_GFX_CGLS | 1130 AMD_CG_SUPPORT_BIF_LS | 1131 AMD_CG_SUPPORT_HDP_LS | 1132 AMD_CG_SUPPORT_ROM_MGCG | 1133 AMD_CG_SUPPORT_MC_MGCG | 1134 AMD_CG_SUPPORT_MC_LS | 1135 AMD_CG_SUPPORT_SDMA_MGCG | 1136 AMD_CG_SUPPORT_SDMA_LS | 1137 AMD_CG_SUPPORT_VCN_MGCG; 1138 1139 adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_VCN; 1140 } else if (adev->pdev->device == 0x15d8) { 1141 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 1142 AMD_CG_SUPPORT_GFX_MGLS | 1143 AMD_CG_SUPPORT_GFX_CP_LS | 1144 AMD_CG_SUPPORT_GFX_3D_CGCG | 1145 AMD_CG_SUPPORT_GFX_3D_CGLS | 1146 AMD_CG_SUPPORT_GFX_CGCG | 1147 AMD_CG_SUPPORT_GFX_CGLS | 1148 AMD_CG_SUPPORT_BIF_LS | 1149 AMD_CG_SUPPORT_HDP_LS | 1150 AMD_CG_SUPPORT_ROM_MGCG | 1151 AMD_CG_SUPPORT_MC_MGCG | 1152 AMD_CG_SUPPORT_MC_LS | 1153 AMD_CG_SUPPORT_SDMA_MGCG | 1154 AMD_CG_SUPPORT_SDMA_LS; 1155 1156 adev->pg_flags = AMD_PG_SUPPORT_SDMA | 1157 AMD_PG_SUPPORT_MMHUB | 1158 AMD_PG_SUPPORT_VCN | 1159 AMD_PG_SUPPORT_VCN_DPG; 1160 } else { 1161 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 1162 AMD_CG_SUPPORT_GFX_MGLS | 1163 AMD_CG_SUPPORT_GFX_RLC_LS | 1164 AMD_CG_SUPPORT_GFX_CP_LS | 1165 AMD_CG_SUPPORT_GFX_3D_CGCG | 1166 AMD_CG_SUPPORT_GFX_3D_CGLS | 1167 AMD_CG_SUPPORT_GFX_CGCG | 1168 AMD_CG_SUPPORT_GFX_CGLS | 1169 AMD_CG_SUPPORT_BIF_MGCG | 1170 AMD_CG_SUPPORT_BIF_LS | 1171 AMD_CG_SUPPORT_HDP_MGCG | 1172 AMD_CG_SUPPORT_HDP_LS | 1173 AMD_CG_SUPPORT_DRM_MGCG | 1174 AMD_CG_SUPPORT_DRM_LS | 1175 AMD_CG_SUPPORT_ROM_MGCG | 1176 AMD_CG_SUPPORT_MC_MGCG | 1177 AMD_CG_SUPPORT_MC_LS | 1178 AMD_CG_SUPPORT_SDMA_MGCG | 1179 AMD_CG_SUPPORT_SDMA_LS | 1180 AMD_CG_SUPPORT_VCN_MGCG; 1181 1182 adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_VCN; 1183 } 1184 break; 1185 case CHIP_ARCTURUS: 1186 adev->asic_funcs = &vega20_asic_funcs; 1187 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 1188 AMD_CG_SUPPORT_GFX_MGLS | 1189 AMD_CG_SUPPORT_GFX_CGCG | 1190 AMD_CG_SUPPORT_GFX_CGLS | 1191 AMD_CG_SUPPORT_GFX_CP_LS | 1192 AMD_CG_SUPPORT_HDP_MGCG | 1193 AMD_CG_SUPPORT_HDP_LS | 1194 AMD_CG_SUPPORT_SDMA_MGCG | 1195 AMD_CG_SUPPORT_SDMA_LS | 1196 AMD_CG_SUPPORT_MC_MGCG | 1197 AMD_CG_SUPPORT_MC_LS | 1198 AMD_CG_SUPPORT_IH_CG | 1199 AMD_CG_SUPPORT_VCN_MGCG | 1200 AMD_CG_SUPPORT_JPEG_MGCG; 1201 adev->pg_flags = 0; 1202 adev->external_rev_id = adev->rev_id + 0x32; 1203 break; 1204 case CHIP_RENOIR: 1205 adev->asic_funcs = &soc15_asic_funcs; 1206 adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | 1207 AMD_CG_SUPPORT_GFX_MGLS | 1208 AMD_CG_SUPPORT_GFX_3D_CGCG | 1209 AMD_CG_SUPPORT_GFX_3D_CGLS | 1210 AMD_CG_SUPPORT_GFX_CGCG | 1211 AMD_CG_SUPPORT_GFX_CGLS | 1212 AMD_CG_SUPPORT_GFX_CP_LS | 1213 AMD_CG_SUPPORT_MC_MGCG | 1214 AMD_CG_SUPPORT_MC_LS | 1215 AMD_CG_SUPPORT_SDMA_MGCG | 1216 AMD_CG_SUPPORT_SDMA_LS | 1217 AMD_CG_SUPPORT_BIF_LS | 1218 AMD_CG_SUPPORT_HDP_LS | 1219 AMD_CG_SUPPORT_ROM_MGCG | 1220 AMD_CG_SUPPORT_VCN_MGCG | 1221 AMD_CG_SUPPORT_JPEG_MGCG | 1222 AMD_CG_SUPPORT_IH_CG | 1223 AMD_CG_SUPPORT_ATHUB_LS | 1224 AMD_CG_SUPPORT_ATHUB_MGCG | 1225 AMD_CG_SUPPORT_DF_MGCG; 1226 adev->pg_flags = AMD_PG_SUPPORT_SDMA | 1227 AMD_PG_SUPPORT_VCN | 1228 AMD_PG_SUPPORT_JPEG | 1229 AMD_PG_SUPPORT_VCN_DPG; 1230 adev->external_rev_id = adev->rev_id + 0x91; 1231 break; 1232 default: 1233 /* FIXME: not supported yet */ 1234 return -EINVAL; 1235 } 1236 1237 if (amdgpu_sriov_vf(adev)) { 1238 amdgpu_virt_init_setting(adev); 1239 xgpu_ai_mailbox_set_irq_funcs(adev); 1240 } 1241 1242 return 0; 1243} 1244 1245static int soc15_common_late_init(void *handle) 1246{ 1247 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1248 int r = 0; 1249 1250 if (amdgpu_sriov_vf(adev)) 1251 xgpu_ai_mailbox_get_irq(adev); 1252 1253 if (adev->nbio.funcs->ras_late_init) 1254 r = adev->nbio.funcs->ras_late_init(adev); 1255 1256 return r; 1257} 1258 1259static int soc15_common_sw_init(void *handle) 1260{ 1261 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1262 1263 if (amdgpu_sriov_vf(adev)) 1264 xgpu_ai_mailbox_add_irq_id(adev); 1265 1266 adev->df.funcs->sw_init(adev); 1267 1268 return 0; 1269} 1270 1271static int soc15_common_sw_fini(void *handle) 1272{ 1273 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1274 1275 amdgpu_nbio_ras_fini(adev); 1276 adev->df.funcs->sw_fini(adev); 1277 return 0; 1278} 1279 1280static void soc15_doorbell_range_init(struct amdgpu_device *adev) 1281{ 1282 int i; 1283 struct amdgpu_ring *ring; 1284 1285 /* sdma/ih doorbell range are programed by hypervisor */ 1286 if (!amdgpu_sriov_vf(adev)) { 1287 for (i = 0; i < adev->sdma.num_instances; i++) { 1288 ring = &adev->sdma.instance[i].ring; 1289 adev->nbio.funcs->sdma_doorbell_range(adev, i, 1290 ring->use_doorbell, ring->doorbell_index, 1291 adev->doorbell_index.sdma_doorbell_range); 1292 } 1293 1294 adev->nbio.funcs->ih_doorbell_range(adev, adev->irq.ih.use_doorbell, 1295 adev->irq.ih.doorbell_index); 1296 } 1297} 1298 1299static int soc15_common_hw_init(void *handle) 1300{ 1301 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1302 1303 /* enable pcie gen2/3 link */ 1304 soc15_pcie_gen3_enable(adev); 1305 /* enable aspm */ 1306 soc15_program_aspm(adev); 1307 /* setup nbio registers */ 1308 adev->nbio.funcs->init_registers(adev); 1309 /* remap HDP registers to a hole in mmio space, 1310 * for the purpose of expose those registers 1311 * to process space 1312 */ 1313 if (adev->nbio.funcs->remap_hdp_registers) 1314 adev->nbio.funcs->remap_hdp_registers(adev); 1315 1316 /* enable the doorbell aperture */ 1317 soc15_enable_doorbell_aperture(adev, true); 1318 /* HW doorbell routing policy: doorbell writing not 1319 * in SDMA/IH/MM/ACV range will be routed to CP. So 1320 * we need to init SDMA/IH/MM/ACV doorbell range prior 1321 * to CP ip block init and ring test. 1322 */ 1323 soc15_doorbell_range_init(adev); 1324 1325 return 0; 1326} 1327 1328static int soc15_common_hw_fini(void *handle) 1329{ 1330 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1331 1332 /* disable the doorbell aperture */ 1333 soc15_enable_doorbell_aperture(adev, false); 1334 if (amdgpu_sriov_vf(adev)) 1335 xgpu_ai_mailbox_put_irq(adev); 1336 1337 if (adev->nbio.ras_if && 1338 amdgpu_ras_is_supported(adev, adev->nbio.ras_if->block)) { 1339 if (adev->nbio.funcs->init_ras_controller_interrupt) 1340 amdgpu_irq_put(adev, &adev->nbio.ras_controller_irq, 0); 1341 if (adev->nbio.funcs->init_ras_err_event_athub_interrupt) 1342 amdgpu_irq_put(adev, &adev->nbio.ras_err_event_athub_irq, 0); 1343 } 1344 1345 return 0; 1346} 1347 1348static int soc15_common_suspend(void *handle) 1349{ 1350 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1351 1352 return soc15_common_hw_fini(adev); 1353} 1354 1355static int soc15_common_resume(void *handle) 1356{ 1357 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1358 1359 return soc15_common_hw_init(adev); 1360} 1361 1362static bool soc15_common_is_idle(void *handle) 1363{ 1364 return true; 1365} 1366 1367static int soc15_common_wait_for_idle(void *handle) 1368{ 1369 return 0; 1370} 1371 1372static int soc15_common_soft_reset(void *handle) 1373{ 1374 return 0; 1375} 1376 1377static void soc15_update_hdp_light_sleep(struct amdgpu_device *adev, bool enable) 1378{ 1379 uint32_t def, data; 1380 1381 if (adev->asic_type == CHIP_VEGA20 || 1382 adev->asic_type == CHIP_ARCTURUS) { 1383 def = data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_CTRL)); 1384 1385 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS)) 1386 data |= HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN_MASK | 1387 HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK | 1388 HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK | 1389 HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK; 1390 else 1391 data &= ~(HDP_MEM_POWER_CTRL__IPH_MEM_POWER_CTRL_EN_MASK | 1392 HDP_MEM_POWER_CTRL__IPH_MEM_POWER_LS_EN_MASK | 1393 HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK | 1394 HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK); 1395 1396 if (def != data) 1397 WREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_CTRL), data); 1398 } else { 1399 def = data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS)); 1400 1401 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS)) 1402 data |= HDP_MEM_POWER_LS__LS_ENABLE_MASK; 1403 else 1404 data &= ~HDP_MEM_POWER_LS__LS_ENABLE_MASK; 1405 1406 if (def != data) 1407 WREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS), data); 1408 } 1409} 1410 1411static void soc15_update_drm_clock_gating(struct amdgpu_device *adev, bool enable) 1412{ 1413 uint32_t def, data; 1414 1415 def = data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_CGTT_CTRL0)); 1416 1417 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DRM_MGCG)) 1418 data &= ~(0x01000000 | 1419 0x02000000 | 1420 0x04000000 | 1421 0x08000000 | 1422 0x10000000 | 1423 0x20000000 | 1424 0x40000000 | 1425 0x80000000); 1426 else 1427 data |= (0x01000000 | 1428 0x02000000 | 1429 0x04000000 | 1430 0x08000000 | 1431 0x10000000 | 1432 0x20000000 | 1433 0x40000000 | 1434 0x80000000); 1435 1436 if (def != data) 1437 WREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_CGTT_CTRL0), data); 1438} 1439 1440static void soc15_update_drm_light_sleep(struct amdgpu_device *adev, bool enable) 1441{ 1442 uint32_t def, data; 1443 1444 def = data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_LIGHT_SLEEP_CTRL)); 1445 1446 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DRM_LS)) 1447 data |= 1; 1448 else 1449 data &= ~1; 1450 1451 if (def != data) 1452 WREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_LIGHT_SLEEP_CTRL), data); 1453} 1454 1455static void soc15_update_rom_medium_grain_clock_gating(struct amdgpu_device *adev, 1456 bool enable) 1457{ 1458 uint32_t def, data; 1459 1460 def = data = RREG32(SOC15_REG_OFFSET(SMUIO, 0, mmCGTT_ROM_CLK_CTRL0)); 1461 1462 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_ROM_MGCG)) 1463 data &= ~(CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK | 1464 CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK); 1465 else 1466 data |= CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK | 1467 CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE1_MASK; 1468 1469 if (def != data) 1470 WREG32(SOC15_REG_OFFSET(SMUIO, 0, mmCGTT_ROM_CLK_CTRL0), data); 1471} 1472 1473static int soc15_common_set_clockgating_state(void *handle, 1474 enum amd_clockgating_state state) 1475{ 1476 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1477 1478 if (amdgpu_sriov_vf(adev)) 1479 return 0; 1480 1481 switch (adev->asic_type) { 1482 case CHIP_VEGA10: 1483 case CHIP_VEGA12: 1484 case CHIP_VEGA20: 1485 adev->nbio.funcs->update_medium_grain_clock_gating(adev, 1486 state == AMD_CG_STATE_GATE); 1487 adev->nbio.funcs->update_medium_grain_light_sleep(adev, 1488 state == AMD_CG_STATE_GATE); 1489 soc15_update_hdp_light_sleep(adev, 1490 state == AMD_CG_STATE_GATE); 1491 soc15_update_drm_clock_gating(adev, 1492 state == AMD_CG_STATE_GATE); 1493 soc15_update_drm_light_sleep(adev, 1494 state == AMD_CG_STATE_GATE); 1495 soc15_update_rom_medium_grain_clock_gating(adev, 1496 state == AMD_CG_STATE_GATE); 1497 adev->df.funcs->update_medium_grain_clock_gating(adev, 1498 state == AMD_CG_STATE_GATE); 1499 break; 1500 case CHIP_RAVEN: 1501 case CHIP_RENOIR: 1502 adev->nbio.funcs->update_medium_grain_clock_gating(adev, 1503 state == AMD_CG_STATE_GATE); 1504 adev->nbio.funcs->update_medium_grain_light_sleep(adev, 1505 state == AMD_CG_STATE_GATE); 1506 soc15_update_hdp_light_sleep(adev, 1507 state == AMD_CG_STATE_GATE); 1508 soc15_update_drm_clock_gating(adev, 1509 state == AMD_CG_STATE_GATE); 1510 soc15_update_drm_light_sleep(adev, 1511 state == AMD_CG_STATE_GATE); 1512 soc15_update_rom_medium_grain_clock_gating(adev, 1513 state == AMD_CG_STATE_GATE); 1514 break; 1515 case CHIP_ARCTURUS: 1516 soc15_update_hdp_light_sleep(adev, 1517 state == AMD_CG_STATE_GATE); 1518 break; 1519 default: 1520 break; 1521 } 1522 return 0; 1523} 1524 1525static void soc15_common_get_clockgating_state(void *handle, u32 *flags) 1526{ 1527 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1528 int data; 1529 1530 if (amdgpu_sriov_vf(adev)) 1531 *flags = 0; 1532 1533 adev->nbio.funcs->get_clockgating_state(adev, flags); 1534 1535 /* AMD_CG_SUPPORT_HDP_LS */ 1536 data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS)); 1537 if (data & HDP_MEM_POWER_LS__LS_ENABLE_MASK) 1538 *flags |= AMD_CG_SUPPORT_HDP_LS; 1539 1540 /* AMD_CG_SUPPORT_DRM_MGCG */ 1541 data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_CGTT_CTRL0)); 1542 if (!(data & 0x01000000)) 1543 *flags |= AMD_CG_SUPPORT_DRM_MGCG; 1544 1545 /* AMD_CG_SUPPORT_DRM_LS */ 1546 data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_LIGHT_SLEEP_CTRL)); 1547 if (data & 0x1) 1548 *flags |= AMD_CG_SUPPORT_DRM_LS; 1549 1550 /* AMD_CG_SUPPORT_ROM_MGCG */ 1551 data = RREG32(SOC15_REG_OFFSET(SMUIO, 0, mmCGTT_ROM_CLK_CTRL0)); 1552 if (!(data & CGTT_ROM_CLK_CTRL0__SOFT_OVERRIDE0_MASK)) 1553 *flags |= AMD_CG_SUPPORT_ROM_MGCG; 1554 1555 adev->df.funcs->get_clockgating_state(adev, flags); 1556} 1557 1558static int soc15_common_set_powergating_state(void *handle, 1559 enum amd_powergating_state state) 1560{ 1561 /* todo */ 1562 return 0; 1563} 1564 1565const struct amd_ip_funcs soc15_common_ip_funcs = { 1566 .name = "soc15_common", 1567 .early_init = soc15_common_early_init, 1568 .late_init = soc15_common_late_init, 1569 .sw_init = soc15_common_sw_init, 1570 .sw_fini = soc15_common_sw_fini, 1571 .hw_init = soc15_common_hw_init, 1572 .hw_fini = soc15_common_hw_fini, 1573 .suspend = soc15_common_suspend, 1574 .resume = soc15_common_resume, 1575 .is_idle = soc15_common_is_idle, 1576 .wait_for_idle = soc15_common_wait_for_idle, 1577 .soft_reset = soc15_common_soft_reset, 1578 .set_clockgating_state = soc15_common_set_clockgating_state, 1579 .set_powergating_state = soc15_common_set_powergating_state, 1580 .get_clockgating_state= soc15_common_get_clockgating_state, 1581}; 1582