1/* $NetBSD: amdgpu_gmc_v7_0.c,v 1.6 2021/12/19 12:21:29 riastradh Exp $ */ 2 3/* 4 * Copyright 2014 Advanced Micro Devices, Inc. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 */ 25 26#include <sys/cdefs.h> 27__KERNEL_RCSID(0, "$NetBSD: amdgpu_gmc_v7_0.c,v 1.6 2021/12/19 12:21:29 riastradh Exp $"); 28 29#include <linux/firmware.h> 30#include <linux/module.h> 31#include <linux/pci.h> 32 33#include <drm/drm_cache.h> 34#include "amdgpu.h" 35#include "cikd.h" 36#include "cik.h" 37#include "gmc_v7_0.h" 38#include "amdgpu_ucode.h" 39#include "amdgpu_amdkfd.h" 40#include "amdgpu_gem.h" 41 42#include "bif/bif_4_1_d.h" 43#include "bif/bif_4_1_sh_mask.h" 44 45#include "gmc/gmc_7_1_d.h" 46#include "gmc/gmc_7_1_sh_mask.h" 47 48#include "oss/oss_2_0_d.h" 49#include "oss/oss_2_0_sh_mask.h" 50 51#include "dce/dce_8_0_d.h" 52#include "dce/dce_8_0_sh_mask.h" 53 54#include "amdgpu_atombios.h" 55 56#include "ivsrcid/ivsrcid_vislands30.h" 57 58static void gmc_v7_0_set_gmc_funcs(struct amdgpu_device *adev); 59static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev); 60static int gmc_v7_0_wait_for_idle(void *handle); 61 62MODULE_FIRMWARE("amdgpu/bonaire_mc.bin"); 63MODULE_FIRMWARE("amdgpu/hawaii_mc.bin"); 64MODULE_FIRMWARE("amdgpu/topaz_mc.bin"); 65 66static const u32 golden_settings_iceland_a11[] = 67{ 68 mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff, 69 mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff, 70 mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff, 71 mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff 72}; 73 74static const u32 iceland_mgcg_cgcg_init[] = 75{ 76 mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104 77}; 78 79static void gmc_v7_0_init_golden_registers(struct amdgpu_device *adev) 80{ 81 switch (adev->asic_type) { 82 case CHIP_TOPAZ: 83 amdgpu_device_program_register_sequence(adev, 84 iceland_mgcg_cgcg_init, 85 ARRAY_SIZE(iceland_mgcg_cgcg_init)); 86 amdgpu_device_program_register_sequence(adev, 87 golden_settings_iceland_a11, 88 ARRAY_SIZE(golden_settings_iceland_a11)); 89 break; 90 default: 91 break; 92 } 93} 94 95static void gmc_v7_0_mc_stop(struct amdgpu_device *adev) 96{ 97 u32 blackout; 98 99 gmc_v7_0_wait_for_idle((void *)adev); 100 101 blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL); 102 if (REG_GET_FIELD(blackout, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE) != 1) { 103 /* Block CPU access */ 104 WREG32(mmBIF_FB_EN, 0); 105 /* blackout the MC */ 106 blackout = REG_SET_FIELD(blackout, 107 MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 0); 108 WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout | 1); 109 } 110 /* wait for the MC to settle */ 111 udelay(100); 112} 113 114static void gmc_v7_0_mc_resume(struct amdgpu_device *adev) 115{ 116 u32 tmp; 117 118 /* unblackout the MC */ 119 tmp = RREG32(mmMC_SHARED_BLACKOUT_CNTL); 120 tmp = REG_SET_FIELD(tmp, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 0); 121 WREG32(mmMC_SHARED_BLACKOUT_CNTL, tmp); 122 /* allow CPU access */ 123 tmp = REG_SET_FIELD(0, BIF_FB_EN, FB_READ_EN, 1); 124 tmp = REG_SET_FIELD(tmp, BIF_FB_EN, FB_WRITE_EN, 1); 125 WREG32(mmBIF_FB_EN, tmp); 126} 127 128/** 129 * gmc_v7_0_init_microcode - load ucode images from disk 130 * 131 * @adev: amdgpu_device pointer 132 * 133 * Use the firmware interface to load the ucode images into 134 * the driver (not loaded into hw). 135 * Returns 0 on success, error on failure. 136 */ 137static int gmc_v7_0_init_microcode(struct amdgpu_device *adev) 138{ 139 const char *chip_name; 140 char fw_name[30]; 141 int err; 142 143 DRM_DEBUG("\n"); 144 145 switch (adev->asic_type) { 146 case CHIP_BONAIRE: 147 chip_name = "bonaire"; 148 break; 149 case CHIP_HAWAII: 150 chip_name = "hawaii"; 151 break; 152 case CHIP_TOPAZ: 153 chip_name = "topaz"; 154 break; 155 case CHIP_KAVERI: 156 case CHIP_KABINI: 157 case CHIP_MULLINS: 158 return 0; 159 default: BUG(); 160 } 161 162 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mc.bin", chip_name); 163 164 err = request_firmware(&adev->gmc.fw, fw_name, adev->dev); 165 if (err) 166 goto out; 167 err = amdgpu_ucode_validate(adev->gmc.fw); 168 169out: 170 if (err) { 171 pr_err("cik_mc: Failed to load firmware \"%s\"\n", fw_name); 172 release_firmware(adev->gmc.fw); 173 adev->gmc.fw = NULL; 174 } 175 return err; 176} 177 178/** 179 * gmc_v7_0_mc_load_microcode - load MC ucode into the hw 180 * 181 * @adev: amdgpu_device pointer 182 * 183 * Load the GDDR MC ucode into the hw (CIK). 184 * Returns 0 on success, error on failure. 185 */ 186static int gmc_v7_0_mc_load_microcode(struct amdgpu_device *adev) 187{ 188 const struct mc_firmware_header_v1_0 *hdr; 189 const __le32 *fw_data = NULL; 190 const __le32 *io_mc_regs = NULL; 191 u32 running; 192 int i, ucode_size, regs_size; 193 194 if (!adev->gmc.fw) 195 return -EINVAL; 196 197 hdr = (const struct mc_firmware_header_v1_0 *)adev->gmc.fw->data; 198 amdgpu_ucode_print_mc_hdr(&hdr->header); 199 200 adev->gmc.fw_version = le32_to_cpu(hdr->header.ucode_version); 201 regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2); 202 io_mc_regs = (const __le32 *) 203 (adev->gmc.fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes)); 204 ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4; 205 fw_data = (const __le32 *) 206 (adev->gmc.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); 207 208 running = REG_GET_FIELD(RREG32(mmMC_SEQ_SUP_CNTL), MC_SEQ_SUP_CNTL, RUN); 209 210 if (running == 0) { 211 /* reset the engine and set to writable */ 212 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008); 213 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000010); 214 215 /* load mc io regs */ 216 for (i = 0; i < regs_size; i++) { 217 WREG32(mmMC_SEQ_IO_DEBUG_INDEX, le32_to_cpup(io_mc_regs++)); 218 WREG32(mmMC_SEQ_IO_DEBUG_DATA, le32_to_cpup(io_mc_regs++)); 219 } 220 /* load the MC ucode */ 221 for (i = 0; i < ucode_size; i++) 222 WREG32(mmMC_SEQ_SUP_PGM, le32_to_cpup(fw_data++)); 223 224 /* put the engine back into the active state */ 225 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008); 226 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000004); 227 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000001); 228 229 /* wait for training to complete */ 230 for (i = 0; i < adev->usec_timeout; i++) { 231 if (REG_GET_FIELD(RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL), 232 MC_SEQ_TRAIN_WAKEUP_CNTL, TRAIN_DONE_D0)) 233 break; 234 udelay(1); 235 } 236 for (i = 0; i < adev->usec_timeout; i++) { 237 if (REG_GET_FIELD(RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL), 238 MC_SEQ_TRAIN_WAKEUP_CNTL, TRAIN_DONE_D1)) 239 break; 240 udelay(1); 241 } 242 } 243 244 return 0; 245} 246 247static void gmc_v7_0_vram_gtt_location(struct amdgpu_device *adev, 248 struct amdgpu_gmc *mc) 249{ 250 u64 base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF; 251 base <<= 24; 252 253 amdgpu_gmc_vram_location(adev, mc, base); 254 amdgpu_gmc_gart_location(adev, mc); 255} 256 257/** 258 * gmc_v7_0_mc_program - program the GPU memory controller 259 * 260 * @adev: amdgpu_device pointer 261 * 262 * Set the location of vram, gart, and AGP in the GPU's 263 * physical address space (CIK). 264 */ 265static void gmc_v7_0_mc_program(struct amdgpu_device *adev) 266{ 267 u32 tmp; 268 int i, j; 269 270 /* Initialize HDP */ 271 for (i = 0, j = 0; i < 32; i++, j += 0x6) { 272 WREG32((0xb05 + j), 0x00000000); 273 WREG32((0xb06 + j), 0x00000000); 274 WREG32((0xb07 + j), 0x00000000); 275 WREG32((0xb08 + j), 0x00000000); 276 WREG32((0xb09 + j), 0x00000000); 277 } 278 WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0); 279 280 if (gmc_v7_0_wait_for_idle((void *)adev)) { 281 dev_warn(adev->dev, "Wait for MC idle timedout !\n"); 282 } 283 if (adev->mode_info.num_crtc) { 284 /* Lockout access through VGA aperture*/ 285 tmp = RREG32(mmVGA_HDP_CONTROL); 286 tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1); 287 WREG32(mmVGA_HDP_CONTROL, tmp); 288 289 /* disable VGA render */ 290 tmp = RREG32(mmVGA_RENDER_CONTROL); 291 tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0); 292 WREG32(mmVGA_RENDER_CONTROL, tmp); 293 } 294 /* Update configuration */ 295 WREG32(mmMC_VM_SYSTEM_APERTURE_LOW_ADDR, 296 adev->gmc.vram_start >> 12); 297 WREG32(mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR, 298 adev->gmc.vram_end >> 12); 299 WREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 300 adev->vram_scratch.gpu_addr >> 12); 301 WREG32(mmMC_VM_AGP_BASE, 0); 302 WREG32(mmMC_VM_AGP_TOP, 0x0FFFFFFF); 303 WREG32(mmMC_VM_AGP_BOT, 0x0FFFFFFF); 304 if (gmc_v7_0_wait_for_idle((void *)adev)) { 305 dev_warn(adev->dev, "Wait for MC idle timedout !\n"); 306 } 307 308 WREG32(mmBIF_FB_EN, BIF_FB_EN__FB_READ_EN_MASK | BIF_FB_EN__FB_WRITE_EN_MASK); 309 310 tmp = RREG32(mmHDP_MISC_CNTL); 311 tmp = REG_SET_FIELD(tmp, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 0); 312 WREG32(mmHDP_MISC_CNTL, tmp); 313 314 tmp = RREG32(mmHDP_HOST_PATH_CNTL); 315 WREG32(mmHDP_HOST_PATH_CNTL, tmp); 316} 317 318/** 319 * gmc_v7_0_mc_init - initialize the memory controller driver params 320 * 321 * @adev: amdgpu_device pointer 322 * 323 * Look up the amount of vram, vram width, and decide how to place 324 * vram and gart within the GPU's physical address space (CIK). 325 * Returns 0 for success. 326 */ 327static int gmc_v7_0_mc_init(struct amdgpu_device *adev) 328{ 329 int r; 330 331 adev->gmc.vram_width = amdgpu_atombios_get_vram_width(adev); 332 if (!adev->gmc.vram_width) { 333 u32 tmp; 334 int chansize, numchan; 335 336 /* Get VRAM informations */ 337 tmp = RREG32(mmMC_ARB_RAMCFG); 338 if (REG_GET_FIELD(tmp, MC_ARB_RAMCFG, CHANSIZE)) { 339 chansize = 64; 340 } else { 341 chansize = 32; 342 } 343 tmp = RREG32(mmMC_SHARED_CHMAP); 344 switch (REG_GET_FIELD(tmp, MC_SHARED_CHMAP, NOOFCHAN)) { 345 case 0: 346 default: 347 numchan = 1; 348 break; 349 case 1: 350 numchan = 2; 351 break; 352 case 2: 353 numchan = 4; 354 break; 355 case 3: 356 numchan = 8; 357 break; 358 case 4: 359 numchan = 3; 360 break; 361 case 5: 362 numchan = 6; 363 break; 364 case 6: 365 numchan = 10; 366 break; 367 case 7: 368 numchan = 12; 369 break; 370 case 8: 371 numchan = 16; 372 break; 373 } 374 adev->gmc.vram_width = numchan * chansize; 375 } 376 /* size in MB on si */ 377 adev->gmc.mc_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL; 378 adev->gmc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL; 379 380 if (!(adev->flags & AMD_IS_APU)) { 381 r = amdgpu_device_resize_fb_bar(adev); 382 if (r) 383 return r; 384 } 385 adev->gmc.aper_base = pci_resource_start(adev->pdev, 0); 386 adev->gmc.aper_size = pci_resource_len(adev->pdev, 0); 387 388#ifdef __NetBSD__ 389 adev->gmc.aper_tag = adev->pdev->pd_pa.pa_memt; 390#endif 391 392#ifdef CONFIG_X86_64 393 if (adev->flags & AMD_IS_APU && 394 adev->gmc.real_vram_size > adev->gmc.aper_size) { 395 adev->gmc.aper_base = ((u64)RREG32(mmMC_VM_FB_OFFSET)) << 22; 396 adev->gmc.aper_size = adev->gmc.real_vram_size; 397 } 398#endif 399 400 /* In case the PCI BAR is larger than the actual amount of vram */ 401 adev->gmc.visible_vram_size = adev->gmc.aper_size; 402 if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size) 403 adev->gmc.visible_vram_size = adev->gmc.real_vram_size; 404 405 /* set the gart size */ 406 if (amdgpu_gart_size == -1) { 407 switch (adev->asic_type) { 408 case CHIP_TOPAZ: /* no MM engines */ 409 default: 410 adev->gmc.gart_size = 256ULL << 20; 411 break; 412#ifdef CONFIG_DRM_AMDGPU_CIK 413 case CHIP_BONAIRE: /* UVD, VCE do not support GPUVM */ 414 case CHIP_HAWAII: /* UVD, VCE do not support GPUVM */ 415 case CHIP_KAVERI: /* UVD, VCE do not support GPUVM */ 416 case CHIP_KABINI: /* UVD, VCE do not support GPUVM */ 417 case CHIP_MULLINS: /* UVD, VCE do not support GPUVM */ 418 adev->gmc.gart_size = 1024ULL << 20; 419 break; 420#endif 421 } 422 } else { 423 adev->gmc.gart_size = (u64)amdgpu_gart_size << 20; 424 } 425 426 gmc_v7_0_vram_gtt_location(adev, &adev->gmc); 427 428 return 0; 429} 430 431/** 432 * gmc_v7_0_flush_gpu_tlb_pasid - tlb flush via pasid 433 * 434 * @adev: amdgpu_device pointer 435 * @pasid: pasid to be flush 436 * 437 * Flush the TLB for the requested pasid. 438 */ 439static int gmc_v7_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev, 440 uint16_t pasid, uint32_t flush_type, 441 bool all_hub) 442{ 443 int vmid; 444 unsigned int tmp; 445 446 if (adev->in_gpu_reset) 447 return -EIO; 448 449 for (vmid = 1; vmid < 16; vmid++) { 450 451 tmp = RREG32(mmATC_VMID0_PASID_MAPPING + vmid); 452 if ((tmp & ATC_VMID0_PASID_MAPPING__VALID_MASK) && 453 (tmp & ATC_VMID0_PASID_MAPPING__PASID_MASK) == pasid) { 454 WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid); 455 RREG32(mmVM_INVALIDATE_RESPONSE); 456 break; 457 } 458 } 459 460 return 0; 461} 462 463/* 464 * GART 465 * VMID 0 is the physical GPU addresses as used by the kernel. 466 * VMIDs 1-15 are used for userspace clients and are handled 467 * by the amdgpu vm/hsa code. 468 */ 469 470/** 471 * gmc_v7_0_flush_gpu_tlb - gart tlb flush callback 472 * 473 * @adev: amdgpu_device pointer 474 * @vmid: vm instance to flush 475 * 476 * Flush the TLB for the requested page table (CIK). 477 */ 478static void gmc_v7_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, 479 uint32_t vmhub, uint32_t flush_type) 480{ 481 /* bits 0-15 are the VM contexts0-15 */ 482 WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid); 483} 484 485static uint64_t gmc_v7_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring, 486 unsigned vmid, uint64_t pd_addr) 487{ 488 uint32_t reg; 489 490 if (vmid < 8) 491 reg = mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vmid; 492 else 493 reg = mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vmid - 8; 494 amdgpu_ring_emit_wreg(ring, reg, pd_addr >> 12); 495 496 /* bits 0-15 are the VM contexts0-15 */ 497 amdgpu_ring_emit_wreg(ring, mmVM_INVALIDATE_REQUEST, 1 << vmid); 498 499 return pd_addr; 500} 501 502static void gmc_v7_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid, 503 unsigned pasid) 504{ 505 amdgpu_ring_emit_wreg(ring, mmIH_VMID_0_LUT + vmid, pasid); 506} 507 508static void gmc_v7_0_get_vm_pde(struct amdgpu_device *adev, int level, 509 uint64_t *addr, uint64_t *flags) 510{ 511 BUG_ON(*addr & 0xFFFFFF0000000FFFULL); 512} 513 514static void gmc_v7_0_get_vm_pte(struct amdgpu_device *adev, 515 struct amdgpu_bo_va_mapping *mapping, 516 uint64_t *flags) 517{ 518 *flags &= ~AMDGPU_PTE_EXECUTABLE; 519 *flags &= ~AMDGPU_PTE_PRT; 520} 521 522#ifdef __NetBSD__ 523# undef __iomem 524# undef writeq 525#endif 526 527/** 528 * gmc_v8_0_set_fault_enable_default - update VM fault handling 529 * 530 * @adev: amdgpu_device pointer 531 * @value: true redirects VM faults to the default page 532 */ 533static void gmc_v7_0_set_fault_enable_default(struct amdgpu_device *adev, 534 bool value) 535{ 536 u32 tmp; 537 538 tmp = RREG32(mmVM_CONTEXT1_CNTL); 539 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, 540 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value); 541 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, 542 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value); 543 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, 544 PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value); 545 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, 546 VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value); 547 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, 548 READ_PROTECTION_FAULT_ENABLE_DEFAULT, value); 549 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, 550 WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value); 551 WREG32(mmVM_CONTEXT1_CNTL, tmp); 552} 553 554/** 555 * gmc_v7_0_set_prt - set PRT VM fault 556 * 557 * @adev: amdgpu_device pointer 558 * @enable: enable/disable VM fault handling for PRT 559 */ 560static void gmc_v7_0_set_prt(struct amdgpu_device *adev, bool enable) 561{ 562 uint32_t tmp; 563 564 if (enable && !adev->gmc.prt_warning) { 565 dev_warn(adev->dev, "Disabling VM faults because of PRT request!\n"); 566 adev->gmc.prt_warning = true; 567 } 568 569 tmp = RREG32(mmVM_PRT_CNTL); 570 tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL, 571 CB_DISABLE_READ_FAULT_ON_UNMAPPED_ACCESS, enable); 572 tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL, 573 CB_DISABLE_WRITE_FAULT_ON_UNMAPPED_ACCESS, enable); 574 tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL, 575 TC_DISABLE_READ_FAULT_ON_UNMAPPED_ACCESS, enable); 576 tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL, 577 TC_DISABLE_WRITE_FAULT_ON_UNMAPPED_ACCESS, enable); 578 tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL, 579 L2_CACHE_STORE_INVALID_ENTRIES, enable); 580 tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL, 581 L1_TLB_STORE_INVALID_ENTRIES, enable); 582 tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL, 583 MASK_PDE0_FAULT, enable); 584 WREG32(mmVM_PRT_CNTL, tmp); 585 586 if (enable) { 587 uint32_t low = AMDGPU_VA_RESERVED_SIZE >> AMDGPU_GPU_PAGE_SHIFT; 588 uint32_t high = adev->vm_manager.max_pfn - 589 (AMDGPU_VA_RESERVED_SIZE >> AMDGPU_GPU_PAGE_SHIFT); 590 591 WREG32(mmVM_PRT_APERTURE0_LOW_ADDR, low); 592 WREG32(mmVM_PRT_APERTURE1_LOW_ADDR, low); 593 WREG32(mmVM_PRT_APERTURE2_LOW_ADDR, low); 594 WREG32(mmVM_PRT_APERTURE3_LOW_ADDR, low); 595 WREG32(mmVM_PRT_APERTURE0_HIGH_ADDR, high); 596 WREG32(mmVM_PRT_APERTURE1_HIGH_ADDR, high); 597 WREG32(mmVM_PRT_APERTURE2_HIGH_ADDR, high); 598 WREG32(mmVM_PRT_APERTURE3_HIGH_ADDR, high); 599 } else { 600 WREG32(mmVM_PRT_APERTURE0_LOW_ADDR, 0xfffffff); 601 WREG32(mmVM_PRT_APERTURE1_LOW_ADDR, 0xfffffff); 602 WREG32(mmVM_PRT_APERTURE2_LOW_ADDR, 0xfffffff); 603 WREG32(mmVM_PRT_APERTURE3_LOW_ADDR, 0xfffffff); 604 WREG32(mmVM_PRT_APERTURE0_HIGH_ADDR, 0x0); 605 WREG32(mmVM_PRT_APERTURE1_HIGH_ADDR, 0x0); 606 WREG32(mmVM_PRT_APERTURE2_HIGH_ADDR, 0x0); 607 WREG32(mmVM_PRT_APERTURE3_HIGH_ADDR, 0x0); 608 } 609} 610 611/** 612 * gmc_v7_0_gart_enable - gart enable 613 * 614 * @adev: amdgpu_device pointer 615 * 616 * This sets up the TLBs, programs the page tables for VMID0, 617 * sets up the hw for VMIDs 1-15 which are allocated on 618 * demand, and sets up the global locations for the LDS, GDS, 619 * and GPUVM for FSA64 clients (CIK). 620 * Returns 0 for success, errors for failure. 621 */ 622static int gmc_v7_0_gart_enable(struct amdgpu_device *adev) 623{ 624 uint64_t table_addr; 625 int r, i; 626 u32 tmp, field; 627 628 if (adev->gart.bo == NULL) { 629 dev_err(adev->dev, "No VRAM object for PCIE GART.\n"); 630 return -EINVAL; 631 } 632 r = amdgpu_gart_table_vram_pin(adev); 633 if (r) 634 return r; 635 636 table_addr = amdgpu_bo_gpu_offset(adev->gart.bo); 637 638 /* Setup TLB control */ 639 tmp = RREG32(mmMC_VM_MX_L1_TLB_CNTL); 640 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 1); 641 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_FRAGMENT_PROCESSING, 1); 642 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE, 3); 643 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_ADVANCED_DRIVER_MODEL, 1); 644 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_APERTURE_UNMAPPED_ACCESS, 0); 645 WREG32(mmMC_VM_MX_L1_TLB_CNTL, tmp); 646 /* Setup L2 cache */ 647 tmp = RREG32(mmVM_L2_CNTL); 648 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 1); 649 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING, 1); 650 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE, 1); 651 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE, 1); 652 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, EFFECTIVE_L2_QUEUE_SIZE, 7); 653 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1); 654 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY, 1); 655 WREG32(mmVM_L2_CNTL, tmp); 656 tmp = REG_SET_FIELD(0, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1); 657 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1); 658 WREG32(mmVM_L2_CNTL2, tmp); 659 660 field = adev->vm_manager.fragment_size; 661 tmp = RREG32(mmVM_L2_CNTL3); 662 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY, 1); 663 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, field); 664 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_FRAGMENT_SIZE, field); 665 WREG32(mmVM_L2_CNTL3, tmp); 666 /* setup context0 */ 667 WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->gmc.gart_start >> 12); 668 WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->gmc.gart_end >> 12); 669 WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, table_addr >> 12); 670 WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR, 671 (u32)(adev->dummy_page_addr >> 12)); 672 WREG32(mmVM_CONTEXT0_CNTL2, 0); 673 tmp = RREG32(mmVM_CONTEXT0_CNTL); 674 tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1); 675 tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0); 676 tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1); 677 WREG32(mmVM_CONTEXT0_CNTL, tmp); 678 679 WREG32(0x575, 0); 680 WREG32(0x576, 0); 681 WREG32(0x577, 0); 682 683 /* empty context1-15 */ 684 /* FIXME start with 4G, once using 2 level pt switch to full 685 * vm size space 686 */ 687 /* set vm size, must be a multiple of 4 */ 688 WREG32(mmVM_CONTEXT1_PAGE_TABLE_START_ADDR, 0); 689 WREG32(mmVM_CONTEXT1_PAGE_TABLE_END_ADDR, adev->vm_manager.max_pfn - 1); 690 for (i = 1; i < 16; i++) { 691 if (i < 8) 692 WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i, 693 table_addr >> 12); 694 else 695 WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + i - 8, 696 table_addr >> 12); 697 } 698 699 /* enable context1-15 */ 700 WREG32(mmVM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR, 701 (u32)(adev->dummy_page_addr >> 12)); 702 WREG32(mmVM_CONTEXT1_CNTL2, 4); 703 tmp = RREG32(mmVM_CONTEXT1_CNTL); 704 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1); 705 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH, 1); 706 tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_BLOCK_SIZE, 707 adev->vm_manager.block_size - 9); 708 WREG32(mmVM_CONTEXT1_CNTL, tmp); 709 if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) 710 gmc_v7_0_set_fault_enable_default(adev, false); 711 else 712 gmc_v7_0_set_fault_enable_default(adev, true); 713 714 if (adev->asic_type == CHIP_KAVERI) { 715 tmp = RREG32(mmCHUB_CONTROL); 716 tmp &= ~BYPASS_VM; 717 WREG32(mmCHUB_CONTROL, tmp); 718 } 719 720 gmc_v7_0_flush_gpu_tlb(adev, 0, 0, 0); 721 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", 722 (unsigned)(adev->gmc.gart_size >> 20), 723 (unsigned long long)table_addr); 724 adev->gart.ready = true; 725 return 0; 726} 727 728static int gmc_v7_0_gart_init(struct amdgpu_device *adev) 729{ 730 int r; 731 732 if (adev->gart.bo) { 733 WARN(1, "R600 PCIE GART already initialized\n"); 734 return 0; 735 } 736 /* Initialize common gart structure */ 737 r = amdgpu_gart_init(adev); 738 if (r) 739 return r; 740 adev->gart.table_size = adev->gart.num_gpu_pages * 8; 741 adev->gart.gart_pte_flags = 0; 742 return amdgpu_gart_table_vram_alloc(adev); 743} 744 745/** 746 * gmc_v7_0_gart_disable - gart disable 747 * 748 * @adev: amdgpu_device pointer 749 * 750 * This disables all VM page table (CIK). 751 */ 752static void gmc_v7_0_gart_disable(struct amdgpu_device *adev) 753{ 754 u32 tmp; 755 756 /* Disable all tables */ 757 WREG32(mmVM_CONTEXT0_CNTL, 0); 758 WREG32(mmVM_CONTEXT1_CNTL, 0); 759 /* Setup TLB control */ 760 tmp = RREG32(mmMC_VM_MX_L1_TLB_CNTL); 761 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 0); 762 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_FRAGMENT_PROCESSING, 0); 763 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_ADVANCED_DRIVER_MODEL, 0); 764 WREG32(mmMC_VM_MX_L1_TLB_CNTL, tmp); 765 /* Setup L2 cache */ 766 tmp = RREG32(mmVM_L2_CNTL); 767 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0); 768 WREG32(mmVM_L2_CNTL, tmp); 769 WREG32(mmVM_L2_CNTL2, 0); 770 amdgpu_gart_table_vram_unpin(adev); 771} 772 773/** 774 * gmc_v7_0_vm_decode_fault - print human readable fault info 775 * 776 * @adev: amdgpu_device pointer 777 * @status: VM_CONTEXT1_PROTECTION_FAULT_STATUS register value 778 * @addr: VM_CONTEXT1_PROTECTION_FAULT_ADDR register value 779 * 780 * Print human readable fault information (CIK). 781 */ 782static void gmc_v7_0_vm_decode_fault(struct amdgpu_device *adev, u32 status, 783 u32 addr, u32 mc_client, unsigned pasid) 784{ 785 u32 vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID); 786 u32 protections = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, 787 PROTECTIONS); 788 char block[5] = { mc_client >> 24, (mc_client >> 16) & 0xff, 789 (mc_client >> 8) & 0xff, mc_client & 0xff, 0 }; 790 u32 mc_id; 791 792 mc_id = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, 793 MEMORY_CLIENT_ID); 794 795 dev_err(adev->dev, "VM fault (0x%02x, vmid %d, pasid %d) at page %u, %s from '%s' (0x%08x) (%d)\n", 796 protections, vmid, pasid, addr, 797 REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, 798 MEMORY_CLIENT_RW) ? 799 "write" : "read", block, mc_client, mc_id); 800} 801 802 803static const u32 mc_cg_registers[] = { 804 mmMC_HUB_MISC_HUB_CG, 805 mmMC_HUB_MISC_SIP_CG, 806 mmMC_HUB_MISC_VM_CG, 807 mmMC_XPB_CLK_GAT, 808 mmATC_MISC_CG, 809 mmMC_CITF_MISC_WR_CG, 810 mmMC_CITF_MISC_RD_CG, 811 mmMC_CITF_MISC_VM_CG, 812 mmVM_L2_CG, 813}; 814 815static const u32 mc_cg_ls_en[] = { 816 MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK, 817 MC_HUB_MISC_SIP_CG__MEM_LS_ENABLE_MASK, 818 MC_HUB_MISC_VM_CG__MEM_LS_ENABLE_MASK, 819 MC_XPB_CLK_GAT__MEM_LS_ENABLE_MASK, 820 ATC_MISC_CG__MEM_LS_ENABLE_MASK, 821 MC_CITF_MISC_WR_CG__MEM_LS_ENABLE_MASK, 822 MC_CITF_MISC_RD_CG__MEM_LS_ENABLE_MASK, 823 MC_CITF_MISC_VM_CG__MEM_LS_ENABLE_MASK, 824 VM_L2_CG__MEM_LS_ENABLE_MASK, 825}; 826 827static const u32 mc_cg_en[] = { 828 MC_HUB_MISC_HUB_CG__ENABLE_MASK, 829 MC_HUB_MISC_SIP_CG__ENABLE_MASK, 830 MC_HUB_MISC_VM_CG__ENABLE_MASK, 831 MC_XPB_CLK_GAT__ENABLE_MASK, 832 ATC_MISC_CG__ENABLE_MASK, 833 MC_CITF_MISC_WR_CG__ENABLE_MASK, 834 MC_CITF_MISC_RD_CG__ENABLE_MASK, 835 MC_CITF_MISC_VM_CG__ENABLE_MASK, 836 VM_L2_CG__ENABLE_MASK, 837}; 838 839static void gmc_v7_0_enable_mc_ls(struct amdgpu_device *adev, 840 bool enable) 841{ 842 int i; 843 u32 orig, data; 844 845 for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) { 846 orig = data = RREG32(mc_cg_registers[i]); 847 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS)) 848 data |= mc_cg_ls_en[i]; 849 else 850 data &= ~mc_cg_ls_en[i]; 851 if (data != orig) 852 WREG32(mc_cg_registers[i], data); 853 } 854} 855 856static void gmc_v7_0_enable_mc_mgcg(struct amdgpu_device *adev, 857 bool enable) 858{ 859 int i; 860 u32 orig, data; 861 862 for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) { 863 orig = data = RREG32(mc_cg_registers[i]); 864 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG)) 865 data |= mc_cg_en[i]; 866 else 867 data &= ~mc_cg_en[i]; 868 if (data != orig) 869 WREG32(mc_cg_registers[i], data); 870 } 871} 872 873static void gmc_v7_0_enable_bif_mgls(struct amdgpu_device *adev, 874 bool enable) 875{ 876 u32 orig, data; 877 878 orig = data = RREG32_PCIE(ixPCIE_CNTL2); 879 880 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS)) { 881 data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_LS_EN, 1); 882 data = REG_SET_FIELD(data, PCIE_CNTL2, MST_MEM_LS_EN, 1); 883 data = REG_SET_FIELD(data, PCIE_CNTL2, REPLAY_MEM_LS_EN, 1); 884 data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_AGGRESSIVE_LS_EN, 1); 885 } else { 886 data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_LS_EN, 0); 887 data = REG_SET_FIELD(data, PCIE_CNTL2, MST_MEM_LS_EN, 0); 888 data = REG_SET_FIELD(data, PCIE_CNTL2, REPLAY_MEM_LS_EN, 0); 889 data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_AGGRESSIVE_LS_EN, 0); 890 } 891 892 if (orig != data) 893 WREG32_PCIE(ixPCIE_CNTL2, data); 894} 895 896static void gmc_v7_0_enable_hdp_mgcg(struct amdgpu_device *adev, 897 bool enable) 898{ 899 u32 orig, data; 900 901 orig = data = RREG32(mmHDP_HOST_PATH_CNTL); 902 903 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG)) 904 data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 0); 905 else 906 data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 1); 907 908 if (orig != data) 909 WREG32(mmHDP_HOST_PATH_CNTL, data); 910} 911 912static void gmc_v7_0_enable_hdp_ls(struct amdgpu_device *adev, 913 bool enable) 914{ 915 u32 orig, data; 916 917 orig = data = RREG32(mmHDP_MEM_POWER_LS); 918 919 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS)) 920 data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 1); 921 else 922 data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 0); 923 924 if (orig != data) 925 WREG32(mmHDP_MEM_POWER_LS, data); 926} 927 928static int gmc_v7_0_convert_vram_type(int mc_seq_vram_type) 929{ 930 switch (mc_seq_vram_type) { 931 case MC_SEQ_MISC0__MT__GDDR1: 932 return AMDGPU_VRAM_TYPE_GDDR1; 933 case MC_SEQ_MISC0__MT__DDR2: 934 return AMDGPU_VRAM_TYPE_DDR2; 935 case MC_SEQ_MISC0__MT__GDDR3: 936 return AMDGPU_VRAM_TYPE_GDDR3; 937 case MC_SEQ_MISC0__MT__GDDR4: 938 return AMDGPU_VRAM_TYPE_GDDR4; 939 case MC_SEQ_MISC0__MT__GDDR5: 940 return AMDGPU_VRAM_TYPE_GDDR5; 941 case MC_SEQ_MISC0__MT__HBM: 942 return AMDGPU_VRAM_TYPE_HBM; 943 case MC_SEQ_MISC0__MT__DDR3: 944 return AMDGPU_VRAM_TYPE_DDR3; 945 default: 946 return AMDGPU_VRAM_TYPE_UNKNOWN; 947 } 948} 949 950static int gmc_v7_0_early_init(void *handle) 951{ 952 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 953 954 gmc_v7_0_set_gmc_funcs(adev); 955 gmc_v7_0_set_irq_funcs(adev); 956 957 adev->gmc.shared_aperture_start = 0x2000000000000000ULL; 958 adev->gmc.shared_aperture_end = 959 adev->gmc.shared_aperture_start + (4ULL << 30) - 1; 960 adev->gmc.private_aperture_start = 961 adev->gmc.shared_aperture_end + 1; 962 adev->gmc.private_aperture_end = 963 adev->gmc.private_aperture_start + (4ULL << 30) - 1; 964 965 return 0; 966} 967 968static int gmc_v7_0_late_init(void *handle) 969{ 970 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 971 972 amdgpu_bo_late_init(adev); 973 974 if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS) 975 return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0); 976 else 977 return 0; 978} 979 980static unsigned gmc_v7_0_get_vbios_fb_size(struct amdgpu_device *adev) 981{ 982 u32 d1vga_control = RREG32(mmD1VGA_CONTROL); 983 unsigned size; 984 985 if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) { 986 size = 9 * 1024 * 1024; /* reserve 8MB for vga emulator and 1 MB for FB */ 987 } else { 988 u32 viewport = RREG32(mmVIEWPORT_SIZE); 989 size = (REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_HEIGHT) * 990 REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_WIDTH) * 991 4); 992 } 993 /* return 0 if the pre-OS buffer uses up most of vram */ 994 if ((adev->gmc.real_vram_size - size) < (8 * 1024 * 1024)) 995 return 0; 996 return size; 997} 998 999static int gmc_v7_0_sw_init(void *handle) 1000{ 1001 int r; 1002 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1003 1004 adev->num_vmhubs = 1; 1005 1006 if (adev->flags & AMD_IS_APU) { 1007 adev->gmc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN; 1008 } else { 1009 u32 tmp = RREG32(mmMC_SEQ_MISC0); 1010 tmp &= MC_SEQ_MISC0__MT__MASK; 1011 adev->gmc.vram_type = gmc_v7_0_convert_vram_type(tmp); 1012 } 1013 1014 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_GFX_PAGE_INV_FAULT, &adev->gmc.vm_fault); 1015 if (r) 1016 return r; 1017 1018 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_GFX_MEM_PROT_FAULT, &adev->gmc.vm_fault); 1019 if (r) 1020 return r; 1021 1022 /* Adjust VM size here. 1023 * Currently set to 4GB ((1 << 20) 4k pages). 1024 * Max GPUVM size for cayman and SI is 40 bits. 1025 */ 1026 amdgpu_vm_adjust_size(adev, 64, 9, 1, 40); 1027 1028 /* Set the internal MC address mask 1029 * This is the max address of the GPU's 1030 * internal address space. 1031 */ 1032 adev->gmc.mc_mask = 0xffffffffffULL; /* 40 bit MC */ 1033 1034#ifdef __NetBSD__ 1035 r = drm_limit_dma_space(adev->ddev, 0, DMA_BIT_MASK(40)); 1036#else 1037 r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(40)); 1038#endif 1039 if (r) { 1040 pr_warn("amdgpu: No suitable DMA available\n"); 1041 return r; 1042 } 1043 adev->need_swiotlb = drm_need_swiotlb(40); 1044 1045 r = gmc_v7_0_init_microcode(adev); 1046 if (r) { 1047 DRM_ERROR("Failed to load mc firmware!\n"); 1048 return r; 1049 } 1050 1051 r = gmc_v7_0_mc_init(adev); 1052 if (r) 1053 return r; 1054 1055 adev->gmc.stolen_size = gmc_v7_0_get_vbios_fb_size(adev); 1056 1057 /* Memory manager */ 1058 r = amdgpu_bo_init(adev); 1059 if (r) 1060 return r; 1061 1062 r = gmc_v7_0_gart_init(adev); 1063 if (r) 1064 return r; 1065 1066 /* 1067 * number of VMs 1068 * VMID 0 is reserved for System 1069 * amdgpu graphics/compute will use VMIDs 1-7 1070 * amdkfd will use VMIDs 8-15 1071 */ 1072 adev->vm_manager.id_mgr[0].num_ids = AMDGPU_NUM_OF_VMIDS; 1073 amdgpu_vm_manager_init(adev); 1074 1075 /* base offset of vram pages */ 1076 if (adev->flags & AMD_IS_APU) { 1077 u64 tmp = RREG32(mmMC_VM_FB_OFFSET); 1078 1079 tmp <<= 22; 1080 adev->vm_manager.vram_base_offset = tmp; 1081 } else { 1082 adev->vm_manager.vram_base_offset = 0; 1083 } 1084 1085 adev->gmc.vm_fault_info = kmalloc(sizeof(struct kfd_vm_fault_info), 1086 GFP_KERNEL); 1087 if (!adev->gmc.vm_fault_info) 1088 return -ENOMEM; 1089 atomic_set(&adev->gmc.vm_fault_info_updated, 0); 1090 1091 return 0; 1092} 1093 1094static int gmc_v7_0_sw_fini(void *handle) 1095{ 1096 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1097 1098 amdgpu_gem_force_release(adev); 1099 amdgpu_vm_manager_fini(adev); 1100 kfree(adev->gmc.vm_fault_info); 1101 amdgpu_gart_table_vram_free(adev); 1102 amdgpu_bo_fini(adev); 1103 amdgpu_gart_fini(adev); 1104 release_firmware(adev->gmc.fw); 1105 adev->gmc.fw = NULL; 1106 1107 return 0; 1108} 1109 1110static int gmc_v7_0_hw_init(void *handle) 1111{ 1112 int r; 1113 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1114 1115 gmc_v7_0_init_golden_registers(adev); 1116 1117 gmc_v7_0_mc_program(adev); 1118 1119 if (!(adev->flags & AMD_IS_APU)) { 1120 r = gmc_v7_0_mc_load_microcode(adev); 1121 if (r) { 1122 DRM_ERROR("Failed to load MC firmware!\n"); 1123 return r; 1124 } 1125 } 1126 1127 r = gmc_v7_0_gart_enable(adev); 1128 if (r) 1129 return r; 1130 1131 return r; 1132} 1133 1134static int gmc_v7_0_hw_fini(void *handle) 1135{ 1136 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1137 1138 amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0); 1139 gmc_v7_0_gart_disable(adev); 1140 1141 return 0; 1142} 1143 1144static int gmc_v7_0_suspend(void *handle) 1145{ 1146 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1147 1148 gmc_v7_0_hw_fini(adev); 1149 1150 return 0; 1151} 1152 1153static int gmc_v7_0_resume(void *handle) 1154{ 1155 int r; 1156 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1157 1158 r = gmc_v7_0_hw_init(adev); 1159 if (r) 1160 return r; 1161 1162 amdgpu_vmid_reset_all(adev); 1163 1164 return 0; 1165} 1166 1167static bool gmc_v7_0_is_idle(void *handle) 1168{ 1169 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1170 u32 tmp = RREG32(mmSRBM_STATUS); 1171 1172 if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK | 1173 SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK | SRBM_STATUS__VMC_BUSY_MASK)) 1174 return false; 1175 1176 return true; 1177} 1178 1179static int gmc_v7_0_wait_for_idle(void *handle) 1180{ 1181 unsigned i; 1182 u32 tmp; 1183 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1184 1185 for (i = 0; i < adev->usec_timeout; i++) { 1186 /* read MC_STATUS */ 1187 tmp = RREG32(mmSRBM_STATUS) & (SRBM_STATUS__MCB_BUSY_MASK | 1188 SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK | 1189 SRBM_STATUS__MCC_BUSY_MASK | 1190 SRBM_STATUS__MCD_BUSY_MASK | 1191 SRBM_STATUS__VMC_BUSY_MASK); 1192 if (!tmp) 1193 return 0; 1194 udelay(1); 1195 } 1196 return -ETIMEDOUT; 1197 1198} 1199 1200static int gmc_v7_0_soft_reset(void *handle) 1201{ 1202 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1203 u32 srbm_soft_reset = 0; 1204 u32 tmp = RREG32(mmSRBM_STATUS); 1205 1206 if (tmp & SRBM_STATUS__VMC_BUSY_MASK) 1207 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, 1208 SRBM_SOFT_RESET, SOFT_RESET_VMC, 1); 1209 1210 if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK | 1211 SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK)) { 1212 if (!(adev->flags & AMD_IS_APU)) 1213 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, 1214 SRBM_SOFT_RESET, SOFT_RESET_MC, 1); 1215 } 1216 1217 if (srbm_soft_reset) { 1218 gmc_v7_0_mc_stop(adev); 1219 if (gmc_v7_0_wait_for_idle((void *)adev)) { 1220 dev_warn(adev->dev, "Wait for GMC idle timed out !\n"); 1221 } 1222 1223 1224 tmp = RREG32(mmSRBM_SOFT_RESET); 1225 tmp |= srbm_soft_reset; 1226 dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp); 1227 WREG32(mmSRBM_SOFT_RESET, tmp); 1228 tmp = RREG32(mmSRBM_SOFT_RESET); 1229 1230 udelay(50); 1231 1232 tmp &= ~srbm_soft_reset; 1233 WREG32(mmSRBM_SOFT_RESET, tmp); 1234 tmp = RREG32(mmSRBM_SOFT_RESET); 1235 1236 /* Wait a little for things to settle down */ 1237 udelay(50); 1238 1239 gmc_v7_0_mc_resume(adev); 1240 udelay(50); 1241 } 1242 1243 return 0; 1244} 1245 1246static int gmc_v7_0_vm_fault_interrupt_state(struct amdgpu_device *adev, 1247 struct amdgpu_irq_src *src, 1248 unsigned type, 1249 enum amdgpu_interrupt_state state) 1250{ 1251 u32 tmp; 1252 u32 bits = (VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | 1253 VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | 1254 VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | 1255 VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | 1256 VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | 1257 VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK); 1258 1259 switch (state) { 1260 case AMDGPU_IRQ_STATE_DISABLE: 1261 /* system context */ 1262 tmp = RREG32(mmVM_CONTEXT0_CNTL); 1263 tmp &= ~bits; 1264 WREG32(mmVM_CONTEXT0_CNTL, tmp); 1265 /* VMs */ 1266 tmp = RREG32(mmVM_CONTEXT1_CNTL); 1267 tmp &= ~bits; 1268 WREG32(mmVM_CONTEXT1_CNTL, tmp); 1269 break; 1270 case AMDGPU_IRQ_STATE_ENABLE: 1271 /* system context */ 1272 tmp = RREG32(mmVM_CONTEXT0_CNTL); 1273 tmp |= bits; 1274 WREG32(mmVM_CONTEXT0_CNTL, tmp); 1275 /* VMs */ 1276 tmp = RREG32(mmVM_CONTEXT1_CNTL); 1277 tmp |= bits; 1278 WREG32(mmVM_CONTEXT1_CNTL, tmp); 1279 break; 1280 default: 1281 break; 1282 } 1283 1284 return 0; 1285} 1286 1287static int gmc_v7_0_process_interrupt(struct amdgpu_device *adev, 1288 struct amdgpu_irq_src *source, 1289 struct amdgpu_iv_entry *entry) 1290{ 1291 u32 addr, status, mc_client, vmid; 1292 1293 addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR); 1294 status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS); 1295 mc_client = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_MCCLIENT); 1296 /* reset addr and status */ 1297 WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1); 1298 1299 if (!addr && !status) 1300 return 0; 1301 1302 if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_FIRST) 1303 gmc_v7_0_set_fault_enable_default(adev, false); 1304 1305 if (printk_ratelimit()) { 1306 dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n", 1307 entry->src_id, entry->src_data[0]); 1308 dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n", 1309 addr); 1310 dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", 1311 status); 1312 gmc_v7_0_vm_decode_fault(adev, status, addr, mc_client, 1313 entry->pasid); 1314 } 1315 1316 vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, 1317 VMID); 1318 if (amdgpu_amdkfd_is_kfd_vmid(adev, vmid) 1319 && !atomic_read(&adev->gmc.vm_fault_info_updated)) { 1320 struct kfd_vm_fault_info *info = adev->gmc.vm_fault_info; 1321 u32 protections = REG_GET_FIELD(status, 1322 VM_CONTEXT1_PROTECTION_FAULT_STATUS, 1323 PROTECTIONS); 1324 1325 info->vmid = vmid; 1326 info->mc_id = REG_GET_FIELD(status, 1327 VM_CONTEXT1_PROTECTION_FAULT_STATUS, 1328 MEMORY_CLIENT_ID); 1329 info->status = status; 1330 info->page_addr = addr; 1331 info->prot_valid = protections & 0x7 ? true : false; 1332 info->prot_read = protections & 0x8 ? true : false; 1333 info->prot_write = protections & 0x10 ? true : false; 1334 info->prot_exec = protections & 0x20 ? true : false; 1335 mb(); 1336 atomic_set(&adev->gmc.vm_fault_info_updated, 1); 1337 } 1338 1339 return 0; 1340} 1341 1342static int gmc_v7_0_set_clockgating_state(void *handle, 1343 enum amd_clockgating_state state) 1344{ 1345 bool gate = false; 1346 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1347 1348 if (state == AMD_CG_STATE_GATE) 1349 gate = true; 1350 1351 if (!(adev->flags & AMD_IS_APU)) { 1352 gmc_v7_0_enable_mc_mgcg(adev, gate); 1353 gmc_v7_0_enable_mc_ls(adev, gate); 1354 } 1355 gmc_v7_0_enable_bif_mgls(adev, gate); 1356 gmc_v7_0_enable_hdp_mgcg(adev, gate); 1357 gmc_v7_0_enable_hdp_ls(adev, gate); 1358 1359 return 0; 1360} 1361 1362static int gmc_v7_0_set_powergating_state(void *handle, 1363 enum amd_powergating_state state) 1364{ 1365 return 0; 1366} 1367 1368static const struct amd_ip_funcs gmc_v7_0_ip_funcs = { 1369 .name = "gmc_v7_0", 1370 .early_init = gmc_v7_0_early_init, 1371 .late_init = gmc_v7_0_late_init, 1372 .sw_init = gmc_v7_0_sw_init, 1373 .sw_fini = gmc_v7_0_sw_fini, 1374 .hw_init = gmc_v7_0_hw_init, 1375 .hw_fini = gmc_v7_0_hw_fini, 1376 .suspend = gmc_v7_0_suspend, 1377 .resume = gmc_v7_0_resume, 1378 .is_idle = gmc_v7_0_is_idle, 1379 .wait_for_idle = gmc_v7_0_wait_for_idle, 1380 .soft_reset = gmc_v7_0_soft_reset, 1381 .set_clockgating_state = gmc_v7_0_set_clockgating_state, 1382 .set_powergating_state = gmc_v7_0_set_powergating_state, 1383}; 1384 1385static const struct amdgpu_gmc_funcs gmc_v7_0_gmc_funcs = { 1386 .flush_gpu_tlb = gmc_v7_0_flush_gpu_tlb, 1387 .flush_gpu_tlb_pasid = gmc_v7_0_flush_gpu_tlb_pasid, 1388 .emit_flush_gpu_tlb = gmc_v7_0_emit_flush_gpu_tlb, 1389 .emit_pasid_mapping = gmc_v7_0_emit_pasid_mapping, 1390 .set_prt = gmc_v7_0_set_prt, 1391 .get_vm_pde = gmc_v7_0_get_vm_pde, 1392 .get_vm_pte = gmc_v7_0_get_vm_pte 1393}; 1394 1395static const struct amdgpu_irq_src_funcs gmc_v7_0_irq_funcs = { 1396 .set = gmc_v7_0_vm_fault_interrupt_state, 1397 .process = gmc_v7_0_process_interrupt, 1398}; 1399 1400static void gmc_v7_0_set_gmc_funcs(struct amdgpu_device *adev) 1401{ 1402 adev->gmc.gmc_funcs = &gmc_v7_0_gmc_funcs; 1403} 1404 1405static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev) 1406{ 1407 adev->gmc.vm_fault.num_types = 1; 1408 adev->gmc.vm_fault.funcs = &gmc_v7_0_irq_funcs; 1409} 1410 1411const struct amdgpu_ip_block_version gmc_v7_0_ip_block = 1412{ 1413 .type = AMD_IP_BLOCK_TYPE_GMC, 1414 .major = 7, 1415 .minor = 0, 1416 .rev = 0, 1417 .funcs = &gmc_v7_0_ip_funcs, 1418}; 1419 1420const struct amdgpu_ip_block_version gmc_v7_4_ip_block = 1421{ 1422 .type = AMD_IP_BLOCK_TYPE_GMC, 1423 .major = 7, 1424 .minor = 4, 1425 .rev = 0, 1426 .funcs = &gmc_v7_0_ip_funcs, 1427}; 1428