1/* $NetBSD: amdgpu_sdma_v3_0.c,v 1.5 2021/12/19 12:21:29 riastradh Exp $ */ 2 3/* 4 * Copyright 2014 Advanced Micro Devices, Inc. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: Alex Deucher 25 */ 26 27#include <sys/cdefs.h> 28__KERNEL_RCSID(0, "$NetBSD: amdgpu_sdma_v3_0.c,v 1.5 2021/12/19 12:21:29 riastradh Exp $"); 29 30#include <linux/delay.h> 31#include <linux/firmware.h> 32#include <linux/module.h> 33 34#include "amdgpu.h" 35#include "amdgpu_ucode.h" 36#include "amdgpu_trace.h" 37#include "vi.h" 38#include "vid.h" 39 40#include "oss/oss_3_0_d.h" 41#include "oss/oss_3_0_sh_mask.h" 42 43#include "gmc/gmc_8_1_d.h" 44#include "gmc/gmc_8_1_sh_mask.h" 45 46#include "gca/gfx_8_0_d.h" 47#include "gca/gfx_8_0_enum.h" 48#include "gca/gfx_8_0_sh_mask.h" 49 50#include "bif/bif_5_0_d.h" 51#include "bif/bif_5_0_sh_mask.h" 52 53#include "tonga_sdma_pkt_open.h" 54 55#include "ivsrcid/ivsrcid_vislands30.h" 56#include <linux/nbsd-namespace.h> 57 58static void sdma_v3_0_set_ring_funcs(struct amdgpu_device *adev); 59static void sdma_v3_0_set_buffer_funcs(struct amdgpu_device *adev); 60static void sdma_v3_0_set_vm_pte_funcs(struct amdgpu_device *adev); 61static void sdma_v3_0_set_irq_funcs(struct amdgpu_device *adev); 62 63MODULE_FIRMWARE("amdgpu/tonga_sdma.bin"); 64MODULE_FIRMWARE("amdgpu/tonga_sdma1.bin"); 65MODULE_FIRMWARE("amdgpu/carrizo_sdma.bin"); 66MODULE_FIRMWARE("amdgpu/carrizo_sdma1.bin"); 67MODULE_FIRMWARE("amdgpu/fiji_sdma.bin"); 68MODULE_FIRMWARE("amdgpu/fiji_sdma1.bin"); 69MODULE_FIRMWARE("amdgpu/stoney_sdma.bin"); 70MODULE_FIRMWARE("amdgpu/polaris10_sdma.bin"); 71MODULE_FIRMWARE("amdgpu/polaris10_sdma1.bin"); 72MODULE_FIRMWARE("amdgpu/polaris11_sdma.bin"); 73MODULE_FIRMWARE("amdgpu/polaris11_sdma1.bin"); 74MODULE_FIRMWARE("amdgpu/polaris12_sdma.bin"); 75MODULE_FIRMWARE("amdgpu/polaris12_sdma1.bin"); 76MODULE_FIRMWARE("amdgpu/vegam_sdma.bin"); 77MODULE_FIRMWARE("amdgpu/vegam_sdma1.bin"); 78 79 80static const u32 sdma_offsets[SDMA_MAX_INSTANCE] = 81{ 82 SDMA0_REGISTER_OFFSET, 83 SDMA1_REGISTER_OFFSET 84}; 85 86static const u32 golden_settings_tonga_a11[] = 87{ 88 mmSDMA0_CHICKEN_BITS, 0xfc910007, 0x00810007, 89 mmSDMA0_CLK_CTRL, 0xff000fff, 0x00000000, 90 mmSDMA0_GFX_IB_CNTL, 0x800f0111, 0x00000100, 91 mmSDMA0_RLC0_IB_CNTL, 0x800f0111, 0x00000100, 92 mmSDMA0_RLC1_IB_CNTL, 0x800f0111, 0x00000100, 93 mmSDMA1_CHICKEN_BITS, 0xfc910007, 0x00810007, 94 mmSDMA1_CLK_CTRL, 0xff000fff, 0x00000000, 95 mmSDMA1_GFX_IB_CNTL, 0x800f0111, 0x00000100, 96 mmSDMA1_RLC0_IB_CNTL, 0x800f0111, 0x00000100, 97 mmSDMA1_RLC1_IB_CNTL, 0x800f0111, 0x00000100, 98}; 99 100static const u32 tonga_mgcg_cgcg_init[] = 101{ 102 mmSDMA0_CLK_CTRL, 0xff000ff0, 0x00000100, 103 mmSDMA1_CLK_CTRL, 0xff000ff0, 0x00000100 104}; 105 106static const u32 golden_settings_fiji_a10[] = 107{ 108 mmSDMA0_CHICKEN_BITS, 0xfc910007, 0x00810007, 109 mmSDMA0_GFX_IB_CNTL, 0x800f0111, 0x00000100, 110 mmSDMA0_RLC0_IB_CNTL, 0x800f0111, 0x00000100, 111 mmSDMA0_RLC1_IB_CNTL, 0x800f0111, 0x00000100, 112 mmSDMA1_CHICKEN_BITS, 0xfc910007, 0x00810007, 113 mmSDMA1_GFX_IB_CNTL, 0x800f0111, 0x00000100, 114 mmSDMA1_RLC0_IB_CNTL, 0x800f0111, 0x00000100, 115 mmSDMA1_RLC1_IB_CNTL, 0x800f0111, 0x00000100, 116}; 117 118static const u32 fiji_mgcg_cgcg_init[] = 119{ 120 mmSDMA0_CLK_CTRL, 0xff000ff0, 0x00000100, 121 mmSDMA1_CLK_CTRL, 0xff000ff0, 0x00000100 122}; 123 124static const u32 golden_settings_polaris11_a11[] = 125{ 126 mmSDMA0_CHICKEN_BITS, 0xfc910007, 0x00810007, 127 mmSDMA0_CLK_CTRL, 0xff000fff, 0x00000000, 128 mmSDMA0_GFX_IB_CNTL, 0x800f0111, 0x00000100, 129 mmSDMA0_RLC0_IB_CNTL, 0x800f0111, 0x00000100, 130 mmSDMA0_RLC1_IB_CNTL, 0x800f0111, 0x00000100, 131 mmSDMA1_CHICKEN_BITS, 0xfc910007, 0x00810007, 132 mmSDMA1_CLK_CTRL, 0xff000fff, 0x00000000, 133 mmSDMA1_GFX_IB_CNTL, 0x800f0111, 0x00000100, 134 mmSDMA1_RLC0_IB_CNTL, 0x800f0111, 0x00000100, 135 mmSDMA1_RLC1_IB_CNTL, 0x800f0111, 0x00000100, 136}; 137 138static const u32 golden_settings_polaris10_a11[] = 139{ 140 mmSDMA0_CHICKEN_BITS, 0xfc910007, 0x00810007, 141 mmSDMA0_CLK_CTRL, 0xff000fff, 0x00000000, 142 mmSDMA0_GFX_IB_CNTL, 0x800f0111, 0x00000100, 143 mmSDMA0_RLC0_IB_CNTL, 0x800f0111, 0x00000100, 144 mmSDMA0_RLC1_IB_CNTL, 0x800f0111, 0x00000100, 145 mmSDMA1_CHICKEN_BITS, 0xfc910007, 0x00810007, 146 mmSDMA1_CLK_CTRL, 0xff000fff, 0x00000000, 147 mmSDMA1_GFX_IB_CNTL, 0x800f0111, 0x00000100, 148 mmSDMA1_RLC0_IB_CNTL, 0x800f0111, 0x00000100, 149 mmSDMA1_RLC1_IB_CNTL, 0x800f0111, 0x00000100, 150}; 151 152static const u32 cz_golden_settings_a11[] = 153{ 154 mmSDMA0_CHICKEN_BITS, 0xfc910007, 0x00810007, 155 mmSDMA0_CLK_CTRL, 0xff000fff, 0x00000000, 156 mmSDMA0_GFX_IB_CNTL, 0x00000100, 0x00000100, 157 mmSDMA0_POWER_CNTL, 0x00000800, 0x0003c800, 158 mmSDMA0_RLC0_IB_CNTL, 0x00000100, 0x00000100, 159 mmSDMA0_RLC1_IB_CNTL, 0x00000100, 0x00000100, 160 mmSDMA1_CHICKEN_BITS, 0xfc910007, 0x00810007, 161 mmSDMA1_CLK_CTRL, 0xff000fff, 0x00000000, 162 mmSDMA1_GFX_IB_CNTL, 0x00000100, 0x00000100, 163 mmSDMA1_POWER_CNTL, 0x00000800, 0x0003c800, 164 mmSDMA1_RLC0_IB_CNTL, 0x00000100, 0x00000100, 165 mmSDMA1_RLC1_IB_CNTL, 0x00000100, 0x00000100, 166}; 167 168static const u32 cz_mgcg_cgcg_init[] = 169{ 170 mmSDMA0_CLK_CTRL, 0xff000ff0, 0x00000100, 171 mmSDMA1_CLK_CTRL, 0xff000ff0, 0x00000100 172}; 173 174static const u32 stoney_golden_settings_a11[] = 175{ 176 mmSDMA0_GFX_IB_CNTL, 0x00000100, 0x00000100, 177 mmSDMA0_POWER_CNTL, 0x00000800, 0x0003c800, 178 mmSDMA0_RLC0_IB_CNTL, 0x00000100, 0x00000100, 179 mmSDMA0_RLC1_IB_CNTL, 0x00000100, 0x00000100, 180}; 181 182static const u32 stoney_mgcg_cgcg_init[] = 183{ 184 mmSDMA0_CLK_CTRL, 0xffffffff, 0x00000100, 185}; 186 187/* 188 * sDMA - System DMA 189 * Starting with CIK, the GPU has new asynchronous 190 * DMA engines. These engines are used for compute 191 * and gfx. There are two DMA engines (SDMA0, SDMA1) 192 * and each one supports 1 ring buffer used for gfx 193 * and 2 queues used for compute. 194 * 195 * The programming model is very similar to the CP 196 * (ring buffer, IBs, etc.), but sDMA has it's own 197 * packet format that is different from the PM4 format 198 * used by the CP. sDMA supports copying data, writing 199 * embedded data, solid fills, and a number of other 200 * things. It also has support for tiling/detiling of 201 * buffers. 202 */ 203 204static void sdma_v3_0_init_golden_registers(struct amdgpu_device *adev) 205{ 206 switch (adev->asic_type) { 207 case CHIP_FIJI: 208 amdgpu_device_program_register_sequence(adev, 209 fiji_mgcg_cgcg_init, 210 ARRAY_SIZE(fiji_mgcg_cgcg_init)); 211 amdgpu_device_program_register_sequence(adev, 212 golden_settings_fiji_a10, 213 ARRAY_SIZE(golden_settings_fiji_a10)); 214 break; 215 case CHIP_TONGA: 216 amdgpu_device_program_register_sequence(adev, 217 tonga_mgcg_cgcg_init, 218 ARRAY_SIZE(tonga_mgcg_cgcg_init)); 219 amdgpu_device_program_register_sequence(adev, 220 golden_settings_tonga_a11, 221 ARRAY_SIZE(golden_settings_tonga_a11)); 222 break; 223 case CHIP_POLARIS11: 224 case CHIP_POLARIS12: 225 case CHIP_VEGAM: 226 amdgpu_device_program_register_sequence(adev, 227 golden_settings_polaris11_a11, 228 ARRAY_SIZE(golden_settings_polaris11_a11)); 229 break; 230 case CHIP_POLARIS10: 231 amdgpu_device_program_register_sequence(adev, 232 golden_settings_polaris10_a11, 233 ARRAY_SIZE(golden_settings_polaris10_a11)); 234 break; 235 case CHIP_CARRIZO: 236 amdgpu_device_program_register_sequence(adev, 237 cz_mgcg_cgcg_init, 238 ARRAY_SIZE(cz_mgcg_cgcg_init)); 239 amdgpu_device_program_register_sequence(adev, 240 cz_golden_settings_a11, 241 ARRAY_SIZE(cz_golden_settings_a11)); 242 break; 243 case CHIP_STONEY: 244 amdgpu_device_program_register_sequence(adev, 245 stoney_mgcg_cgcg_init, 246 ARRAY_SIZE(stoney_mgcg_cgcg_init)); 247 amdgpu_device_program_register_sequence(adev, 248 stoney_golden_settings_a11, 249 ARRAY_SIZE(stoney_golden_settings_a11)); 250 break; 251 default: 252 break; 253 } 254} 255 256static void sdma_v3_0_free_microcode(struct amdgpu_device *adev) 257{ 258 int i; 259 for (i = 0; i < adev->sdma.num_instances; i++) { 260 release_firmware(adev->sdma.instance[i].fw); 261 adev->sdma.instance[i].fw = NULL; 262 } 263} 264 265/** 266 * sdma_v3_0_init_microcode - load ucode images from disk 267 * 268 * @adev: amdgpu_device pointer 269 * 270 * Use the firmware interface to load the ucode images into 271 * the driver (not loaded into hw). 272 * Returns 0 on success, error on failure. 273 */ 274static int sdma_v3_0_init_microcode(struct amdgpu_device *adev) 275{ 276 const char *chip_name; 277 char fw_name[30]; 278 int err = 0, i; 279 struct amdgpu_firmware_info *info = NULL; 280 const struct common_firmware_header *header = NULL; 281 const struct sdma_firmware_header_v1_0 *hdr; 282 283 DRM_DEBUG("\n"); 284 285 switch (adev->asic_type) { 286 case CHIP_TONGA: 287 chip_name = "tonga"; 288 break; 289 case CHIP_FIJI: 290 chip_name = "fiji"; 291 break; 292 case CHIP_POLARIS10: 293 chip_name = "polaris10"; 294 break; 295 case CHIP_POLARIS11: 296 chip_name = "polaris11"; 297 break; 298 case CHIP_POLARIS12: 299 chip_name = "polaris12"; 300 break; 301 case CHIP_VEGAM: 302 chip_name = "vegam"; 303 break; 304 case CHIP_CARRIZO: 305 chip_name = "carrizo"; 306 break; 307 case CHIP_STONEY: 308 chip_name = "stoney"; 309 break; 310 default: BUG(); 311 } 312 313 for (i = 0; i < adev->sdma.num_instances; i++) { 314 if (i == 0) 315 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma.bin", chip_name); 316 else 317 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_sdma1.bin", chip_name); 318 err = request_firmware(&adev->sdma.instance[i].fw, fw_name, adev->dev); 319 if (err) 320 goto out; 321 err = amdgpu_ucode_validate(adev->sdma.instance[i].fw); 322 if (err) 323 goto out; 324 hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma.instance[i].fw->data; 325 adev->sdma.instance[i].fw_version = le32_to_cpu(hdr->header.ucode_version); 326 adev->sdma.instance[i].feature_version = le32_to_cpu(hdr->ucode_feature_version); 327 if (adev->sdma.instance[i].feature_version >= 20) 328 adev->sdma.instance[i].burst_nop = true; 329 330 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i]; 331 info->ucode_id = AMDGPU_UCODE_ID_SDMA0 + i; 332 info->fw = adev->sdma.instance[i].fw; 333 header = (const struct common_firmware_header *)info->fw->data; 334 adev->firmware.fw_size += 335 ALIGN(le32_to_cpu(header->ucode_size_bytes), PAGE_SIZE); 336 337 } 338out: 339 if (err) { 340 pr_err("sdma_v3_0: Failed to load firmware \"%s\"\n", fw_name); 341 for (i = 0; i < adev->sdma.num_instances; i++) { 342 release_firmware(adev->sdma.instance[i].fw); 343 adev->sdma.instance[i].fw = NULL; 344 } 345 } 346 return err; 347} 348 349/** 350 * sdma_v3_0_ring_get_rptr - get the current read pointer 351 * 352 * @ring: amdgpu ring pointer 353 * 354 * Get the current rptr from the hardware (VI+). 355 */ 356static uint64_t sdma_v3_0_ring_get_rptr(struct amdgpu_ring *ring) 357{ 358 /* XXX check if swapping is necessary on BE */ 359 return ring->adev->wb.wb[ring->rptr_offs] >> 2; 360} 361 362/** 363 * sdma_v3_0_ring_get_wptr - get the current write pointer 364 * 365 * @ring: amdgpu ring pointer 366 * 367 * Get the current wptr from the hardware (VI+). 368 */ 369static uint64_t sdma_v3_0_ring_get_wptr(struct amdgpu_ring *ring) 370{ 371 struct amdgpu_device *adev = ring->adev; 372 u32 wptr; 373 374 if (ring->use_doorbell || ring->use_pollmem) { 375 /* XXX check if swapping is necessary on BE */ 376 wptr = ring->adev->wb.wb[ring->wptr_offs] >> 2; 377 } else { 378 wptr = RREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[ring->me]) >> 2; 379 } 380 381 return wptr; 382} 383 384/** 385 * sdma_v3_0_ring_set_wptr - commit the write pointer 386 * 387 * @ring: amdgpu ring pointer 388 * 389 * Write the wptr back to the hardware (VI+). 390 */ 391static void sdma_v3_0_ring_set_wptr(struct amdgpu_ring *ring) 392{ 393 struct amdgpu_device *adev = ring->adev; 394 395 if (ring->use_doorbell) { 396 volatile u32 *wb = (volatile u32 *)&adev->wb.wb[ring->wptr_offs]; 397 /* XXX check if swapping is necessary on BE */ 398 WRITE_ONCE(*wb, (lower_32_bits(ring->wptr) << 2)); 399 WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr) << 2); 400 } else if (ring->use_pollmem) { 401 volatile u32 *wb = (volatile u32 *)&adev->wb.wb[ring->wptr_offs]; 402 403 WRITE_ONCE(*wb, (lower_32_bits(ring->wptr) << 2)); 404 } else { 405 WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[ring->me], lower_32_bits(ring->wptr) << 2); 406 } 407} 408 409static void sdma_v3_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) 410{ 411 struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring); 412 int i; 413 414 for (i = 0; i < count; i++) 415 if (sdma && sdma->burst_nop && (i == 0)) 416 amdgpu_ring_write(ring, ring->funcs->nop | 417 SDMA_PKT_NOP_HEADER_COUNT(count - 1)); 418 else 419 amdgpu_ring_write(ring, ring->funcs->nop); 420} 421 422/** 423 * sdma_v3_0_ring_emit_ib - Schedule an IB on the DMA engine 424 * 425 * @ring: amdgpu ring pointer 426 * @ib: IB object to schedule 427 * 428 * Schedule an IB in the DMA ring (VI). 429 */ 430static void sdma_v3_0_ring_emit_ib(struct amdgpu_ring *ring, 431 struct amdgpu_job *job, 432 struct amdgpu_ib *ib, 433 uint32_t flags) 434{ 435 unsigned vmid = AMDGPU_JOB_GET_VMID(job); 436 437 /* IB packet must end on a 8 DW boundary */ 438 sdma_v3_0_ring_insert_nop(ring, (2 - lower_32_bits(ring->wptr)) & 7); 439 440 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) | 441 SDMA_PKT_INDIRECT_HEADER_VMID(vmid & 0xf)); 442 /* base must be 32 byte aligned */ 443 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr) & 0xffffffe0); 444 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); 445 amdgpu_ring_write(ring, ib->length_dw); 446 amdgpu_ring_write(ring, 0); 447 amdgpu_ring_write(ring, 0); 448 449} 450 451/** 452 * sdma_v3_0_ring_emit_hdp_flush - emit an hdp flush on the DMA ring 453 * 454 * @ring: amdgpu ring pointer 455 * 456 * Emit an hdp flush packet on the requested DMA ring. 457 */ 458static void sdma_v3_0_ring_emit_hdp_flush(struct amdgpu_ring *ring) 459{ 460 u32 ref_and_mask = 0; 461 462 if (ring->me == 0) 463 ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, SDMA0, 1); 464 else 465 ref_and_mask = REG_SET_FIELD(ref_and_mask, GPU_HDP_FLUSH_DONE, SDMA1, 1); 466 467 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) | 468 SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(1) | 469 SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* == */ 470 amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_DONE << 2); 471 amdgpu_ring_write(ring, mmGPU_HDP_FLUSH_REQ << 2); 472 amdgpu_ring_write(ring, ref_and_mask); /* reference */ 473 amdgpu_ring_write(ring, ref_and_mask); /* mask */ 474 amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) | 475 SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */ 476} 477 478/** 479 * sdma_v3_0_ring_emit_fence - emit a fence on the DMA ring 480 * 481 * @ring: amdgpu ring pointer 482 * @fence: amdgpu fence object 483 * 484 * Add a DMA fence packet to the ring to write 485 * the fence seq number and DMA trap packet to generate 486 * an interrupt if needed (VI). 487 */ 488static void sdma_v3_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, 489 unsigned flags) 490{ 491 bool write64bit = flags & AMDGPU_FENCE_FLAG_64BIT; 492 /* write the fence */ 493 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE)); 494 amdgpu_ring_write(ring, lower_32_bits(addr)); 495 amdgpu_ring_write(ring, upper_32_bits(addr)); 496 amdgpu_ring_write(ring, lower_32_bits(seq)); 497 498 /* optionally write high bits as well */ 499 if (write64bit) { 500 addr += 4; 501 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE)); 502 amdgpu_ring_write(ring, lower_32_bits(addr)); 503 amdgpu_ring_write(ring, upper_32_bits(addr)); 504 amdgpu_ring_write(ring, upper_32_bits(seq)); 505 } 506 507 /* generate an interrupt */ 508 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_TRAP)); 509 amdgpu_ring_write(ring, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(0)); 510} 511 512/** 513 * sdma_v3_0_gfx_stop - stop the gfx async dma engines 514 * 515 * @adev: amdgpu_device pointer 516 * 517 * Stop the gfx async dma ring buffers (VI). 518 */ 519static void sdma_v3_0_gfx_stop(struct amdgpu_device *adev) 520{ 521 struct amdgpu_ring *sdma0 = &adev->sdma.instance[0].ring; 522 struct amdgpu_ring *sdma1 = &adev->sdma.instance[1].ring; 523 u32 rb_cntl, ib_cntl; 524 int i; 525 526 if ((adev->mman.buffer_funcs_ring == sdma0) || 527 (adev->mman.buffer_funcs_ring == sdma1)) 528 amdgpu_ttm_set_buffer_funcs_status(adev, false); 529 530 for (i = 0; i < adev->sdma.num_instances; i++) { 531 rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]); 532 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 0); 533 WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl); 534 ib_cntl = RREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i]); 535 ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 0); 536 WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl); 537 } 538 sdma0->sched.ready = false; 539 sdma1->sched.ready = false; 540} 541 542/** 543 * sdma_v3_0_rlc_stop - stop the compute async dma engines 544 * 545 * @adev: amdgpu_device pointer 546 * 547 * Stop the compute async dma queues (VI). 548 */ 549static void sdma_v3_0_rlc_stop(struct amdgpu_device *adev) 550{ 551 /* XXX todo */ 552} 553 554/** 555 * sdma_v3_0_ctx_switch_enable - stop the async dma engines context switch 556 * 557 * @adev: amdgpu_device pointer 558 * @enable: enable/disable the DMA MEs context switch. 559 * 560 * Halt or unhalt the async dma engines context switch (VI). 561 */ 562static void sdma_v3_0_ctx_switch_enable(struct amdgpu_device *adev, bool enable) 563{ 564 u32 f32_cntl, phase_quantum = 0; 565 int i; 566 567 if (amdgpu_sdma_phase_quantum) { 568 unsigned value = amdgpu_sdma_phase_quantum; 569 unsigned unit = 0; 570 571 while (value > (SDMA0_PHASE0_QUANTUM__VALUE_MASK >> 572 SDMA0_PHASE0_QUANTUM__VALUE__SHIFT)) { 573 value = (value + 1) >> 1; 574 unit++; 575 } 576 if (unit > (SDMA0_PHASE0_QUANTUM__UNIT_MASK >> 577 SDMA0_PHASE0_QUANTUM__UNIT__SHIFT)) { 578 value = (SDMA0_PHASE0_QUANTUM__VALUE_MASK >> 579 SDMA0_PHASE0_QUANTUM__VALUE__SHIFT); 580 unit = (SDMA0_PHASE0_QUANTUM__UNIT_MASK >> 581 SDMA0_PHASE0_QUANTUM__UNIT__SHIFT); 582 WARN_ONCE(1, 583 "clamping sdma_phase_quantum to %uK clock cycles\n", 584 value << unit); 585 } 586 phase_quantum = 587 value << SDMA0_PHASE0_QUANTUM__VALUE__SHIFT | 588 unit << SDMA0_PHASE0_QUANTUM__UNIT__SHIFT; 589 } 590 591 for (i = 0; i < adev->sdma.num_instances; i++) { 592 f32_cntl = RREG32(mmSDMA0_CNTL + sdma_offsets[i]); 593 if (enable) { 594 f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL, 595 AUTO_CTXSW_ENABLE, 1); 596 f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL, 597 ATC_L1_ENABLE, 1); 598 if (amdgpu_sdma_phase_quantum) { 599 WREG32(mmSDMA0_PHASE0_QUANTUM + sdma_offsets[i], 600 phase_quantum); 601 WREG32(mmSDMA0_PHASE1_QUANTUM + sdma_offsets[i], 602 phase_quantum); 603 } 604 } else { 605 f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL, 606 AUTO_CTXSW_ENABLE, 0); 607 f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL, 608 ATC_L1_ENABLE, 1); 609 } 610 611 WREG32(mmSDMA0_CNTL + sdma_offsets[i], f32_cntl); 612 } 613} 614 615/** 616 * sdma_v3_0_enable - stop the async dma engines 617 * 618 * @adev: amdgpu_device pointer 619 * @enable: enable/disable the DMA MEs. 620 * 621 * Halt or unhalt the async dma engines (VI). 622 */ 623static void sdma_v3_0_enable(struct amdgpu_device *adev, bool enable) 624{ 625 u32 f32_cntl; 626 int i; 627 628 if (!enable) { 629 sdma_v3_0_gfx_stop(adev); 630 sdma_v3_0_rlc_stop(adev); 631 } 632 633 for (i = 0; i < adev->sdma.num_instances; i++) { 634 f32_cntl = RREG32(mmSDMA0_F32_CNTL + sdma_offsets[i]); 635 if (enable) 636 f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, 0); 637 else 638 f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, 1); 639 WREG32(mmSDMA0_F32_CNTL + sdma_offsets[i], f32_cntl); 640 } 641} 642 643/** 644 * sdma_v3_0_gfx_resume - setup and start the async dma engines 645 * 646 * @adev: amdgpu_device pointer 647 * 648 * Set up the gfx DMA ring buffers and enable them (VI). 649 * Returns 0 for success, error for failure. 650 */ 651static int sdma_v3_0_gfx_resume(struct amdgpu_device *adev) 652{ 653 struct amdgpu_ring *ring; 654 u32 rb_cntl, ib_cntl, wptr_poll_cntl; 655 u32 rb_bufsz; 656 u32 wb_offset; 657 u32 doorbell; 658 u64 wptr_gpu_addr; 659 int i, j, r; 660 661 for (i = 0; i < adev->sdma.num_instances; i++) { 662 ring = &adev->sdma.instance[i].ring; 663 amdgpu_ring_clear_ring(ring); 664 wb_offset = (ring->rptr_offs * 4); 665 666 mutex_lock(&adev->srbm_mutex); 667 for (j = 0; j < 16; j++) { 668 vi_srbm_select(adev, 0, 0, 0, j); 669 /* SDMA GFX */ 670 WREG32(mmSDMA0_GFX_VIRTUAL_ADDR + sdma_offsets[i], 0); 671 WREG32(mmSDMA0_GFX_APE1_CNTL + sdma_offsets[i], 0); 672 } 673 vi_srbm_select(adev, 0, 0, 0, 0); 674 mutex_unlock(&adev->srbm_mutex); 675 676 WREG32(mmSDMA0_TILING_CONFIG + sdma_offsets[i], 677 adev->gfx.config.gb_addr_config & 0x70); 678 679 WREG32(mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL + sdma_offsets[i], 0); 680 681 /* Set ring buffer size in dwords */ 682 rb_bufsz = order_base_2(ring->ring_size / 4); 683 rb_cntl = RREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i]); 684 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SIZE, rb_bufsz); 685#ifdef __BIG_ENDIAN 686 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_SWAP_ENABLE, 1); 687 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, 688 RPTR_WRITEBACK_SWAP_ENABLE, 1); 689#endif 690 WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl); 691 692 /* Initialize the ring buffer's read and write pointers */ 693 ring->wptr = 0; 694 WREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i], 0); 695 sdma_v3_0_ring_set_wptr(ring); 696 WREG32(mmSDMA0_GFX_IB_RPTR + sdma_offsets[i], 0); 697 WREG32(mmSDMA0_GFX_IB_OFFSET + sdma_offsets[i], 0); 698 699 /* set the wb address whether it's enabled or not */ 700 WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i], 701 upper_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFF); 702 WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_LO + sdma_offsets[i], 703 lower_32_bits(adev->wb.gpu_addr + wb_offset) & 0xFFFFFFFC); 704 705 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RPTR_WRITEBACK_ENABLE, 1); 706 707 WREG32(mmSDMA0_GFX_RB_BASE + sdma_offsets[i], ring->gpu_addr >> 8); 708 WREG32(mmSDMA0_GFX_RB_BASE_HI + sdma_offsets[i], ring->gpu_addr >> 40); 709 710 doorbell = RREG32(mmSDMA0_GFX_DOORBELL + sdma_offsets[i]); 711 712 if (ring->use_doorbell) { 713 doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, 714 OFFSET, ring->doorbell_index); 715 doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 1); 716 } else { 717 doorbell = REG_SET_FIELD(doorbell, SDMA0_GFX_DOORBELL, ENABLE, 0); 718 } 719 WREG32(mmSDMA0_GFX_DOORBELL + sdma_offsets[i], doorbell); 720 721 /* setup the wptr shadow polling */ 722 wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4); 723 724 WREG32(mmSDMA0_GFX_RB_WPTR_POLL_ADDR_LO + sdma_offsets[i], 725 lower_32_bits(wptr_gpu_addr)); 726 WREG32(mmSDMA0_GFX_RB_WPTR_POLL_ADDR_HI + sdma_offsets[i], 727 upper_32_bits(wptr_gpu_addr)); 728 wptr_poll_cntl = RREG32(mmSDMA0_GFX_RB_WPTR_POLL_CNTL + sdma_offsets[i]); 729 if (ring->use_pollmem) { 730 /*wptr polling is not enogh fast, directly clean the wptr register */ 731 WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], 0); 732 wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl, 733 SDMA0_GFX_RB_WPTR_POLL_CNTL, 734 ENABLE, 1); 735 } else { 736 wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl, 737 SDMA0_GFX_RB_WPTR_POLL_CNTL, 738 ENABLE, 0); 739 } 740 WREG32(mmSDMA0_GFX_RB_WPTR_POLL_CNTL + sdma_offsets[i], wptr_poll_cntl); 741 742 /* enable DMA RB */ 743 rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 1); 744 WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl); 745 746 ib_cntl = RREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i]); 747 ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_ENABLE, 1); 748#ifdef __BIG_ENDIAN 749 ib_cntl = REG_SET_FIELD(ib_cntl, SDMA0_GFX_IB_CNTL, IB_SWAP_ENABLE, 1); 750#endif 751 /* enable DMA IBs */ 752 WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl); 753 754 ring->sched.ready = true; 755 } 756 757 /* unhalt the MEs */ 758 sdma_v3_0_enable(adev, true); 759 /* enable sdma ring preemption */ 760 sdma_v3_0_ctx_switch_enable(adev, true); 761 762 for (i = 0; i < adev->sdma.num_instances; i++) { 763 ring = &adev->sdma.instance[i].ring; 764 r = amdgpu_ring_test_helper(ring); 765 if (r) 766 return r; 767 768 if (adev->mman.buffer_funcs_ring == ring) 769 amdgpu_ttm_set_buffer_funcs_status(adev, true); 770 } 771 772 return 0; 773} 774 775/** 776 * sdma_v3_0_rlc_resume - setup and start the async dma engines 777 * 778 * @adev: amdgpu_device pointer 779 * 780 * Set up the compute DMA queues and enable them (VI). 781 * Returns 0 for success, error for failure. 782 */ 783static int sdma_v3_0_rlc_resume(struct amdgpu_device *adev) 784{ 785 /* XXX todo */ 786 return 0; 787} 788 789/** 790 * sdma_v3_0_start - setup and start the async dma engines 791 * 792 * @adev: amdgpu_device pointer 793 * 794 * Set up the DMA engines and enable them (VI). 795 * Returns 0 for success, error for failure. 796 */ 797static int sdma_v3_0_start(struct amdgpu_device *adev) 798{ 799 int r; 800 801 /* disable sdma engine before programing it */ 802 sdma_v3_0_ctx_switch_enable(adev, false); 803 sdma_v3_0_enable(adev, false); 804 805 /* start the gfx rings and rlc compute queues */ 806 r = sdma_v3_0_gfx_resume(adev); 807 if (r) 808 return r; 809 r = sdma_v3_0_rlc_resume(adev); 810 if (r) 811 return r; 812 813 return 0; 814} 815 816/** 817 * sdma_v3_0_ring_test_ring - simple async dma engine test 818 * 819 * @ring: amdgpu_ring structure holding ring information 820 * 821 * Test the DMA engine by writing using it to write an 822 * value to memory. (VI). 823 * Returns 0 for success, error for failure. 824 */ 825static int sdma_v3_0_ring_test_ring(struct amdgpu_ring *ring) 826{ 827 struct amdgpu_device *adev = ring->adev; 828 unsigned i; 829 unsigned index; 830 int r; 831 u32 tmp; 832 u64 gpu_addr; 833 834 r = amdgpu_device_wb_get(adev, &index); 835 if (r) 836 return r; 837 838 gpu_addr = adev->wb.gpu_addr + (index * 4); 839 tmp = 0xCAFEDEAD; 840 adev->wb.wb[index] = cpu_to_le32(tmp); 841 842 r = amdgpu_ring_alloc(ring, 5); 843 if (r) 844 goto error_free_wb; 845 846 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) | 847 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR)); 848 amdgpu_ring_write(ring, lower_32_bits(gpu_addr)); 849 amdgpu_ring_write(ring, upper_32_bits(gpu_addr)); 850 amdgpu_ring_write(ring, SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(1)); 851 amdgpu_ring_write(ring, 0xDEADBEEF); 852 amdgpu_ring_commit(ring); 853 854 for (i = 0; i < adev->usec_timeout; i++) { 855 tmp = le32_to_cpu(adev->wb.wb[index]); 856 if (tmp == 0xDEADBEEF) 857 break; 858 udelay(1); 859 } 860 861 if (i >= adev->usec_timeout) 862 r = -ETIMEDOUT; 863 864error_free_wb: 865 amdgpu_device_wb_free(adev, index); 866 return r; 867} 868 869/** 870 * sdma_v3_0_ring_test_ib - test an IB on the DMA engine 871 * 872 * @ring: amdgpu_ring structure holding ring information 873 * 874 * Test a simple IB in the DMA ring (VI). 875 * Returns 0 on success, error on failure. 876 */ 877static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) 878{ 879 struct amdgpu_device *adev = ring->adev; 880 struct amdgpu_ib ib; 881 struct dma_fence *f = NULL; 882 unsigned index; 883 u32 tmp = 0; 884 u64 gpu_addr; 885 long r; 886 887 r = amdgpu_device_wb_get(adev, &index); 888 if (r) 889 return r; 890 891 gpu_addr = adev->wb.gpu_addr + (index * 4); 892 tmp = 0xCAFEDEAD; 893 adev->wb.wb[index] = cpu_to_le32(tmp); 894 memset(&ib, 0, sizeof(ib)); 895 r = amdgpu_ib_get(adev, NULL, 256, &ib); 896 if (r) 897 goto err0; 898 899 ib.ptr[0] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) | 900 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR); 901 ib.ptr[1] = lower_32_bits(gpu_addr); 902 ib.ptr[2] = upper_32_bits(gpu_addr); 903 ib.ptr[3] = SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(1); 904 ib.ptr[4] = 0xDEADBEEF; 905 ib.ptr[5] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP); 906 ib.ptr[6] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP); 907 ib.ptr[7] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP); 908 ib.length_dw = 8; 909 910 r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f); 911 if (r) 912 goto err1; 913 914 r = dma_fence_wait_timeout(f, false, timeout); 915 if (r == 0) { 916 r = -ETIMEDOUT; 917 goto err1; 918 } else if (r < 0) { 919 goto err1; 920 } 921 tmp = le32_to_cpu(adev->wb.wb[index]); 922 if (tmp == 0xDEADBEEF) 923 r = 0; 924 else 925 r = -EINVAL; 926err1: 927 amdgpu_ib_free(adev, &ib, NULL); 928 dma_fence_put(f); 929err0: 930 amdgpu_device_wb_free(adev, index); 931 return r; 932} 933 934/** 935 * sdma_v3_0_vm_copy_pte - update PTEs by copying them from the GART 936 * 937 * @ib: indirect buffer to fill with commands 938 * @pe: addr of the page entry 939 * @src: src addr to copy from 940 * @count: number of page entries to update 941 * 942 * Update PTEs by copying them from the GART using sDMA (CIK). 943 */ 944static void sdma_v3_0_vm_copy_pte(struct amdgpu_ib *ib, 945 uint64_t pe, uint64_t src, 946 unsigned count) 947{ 948 unsigned bytes = count * 8; 949 950 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) | 951 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR); 952 ib->ptr[ib->length_dw++] = bytes; 953 ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */ 954 ib->ptr[ib->length_dw++] = lower_32_bits(src); 955 ib->ptr[ib->length_dw++] = upper_32_bits(src); 956 ib->ptr[ib->length_dw++] = lower_32_bits(pe); 957 ib->ptr[ib->length_dw++] = upper_32_bits(pe); 958} 959 960/** 961 * sdma_v3_0_vm_write_pte - update PTEs by writing them manually 962 * 963 * @ib: indirect buffer to fill with commands 964 * @pe: addr of the page entry 965 * @value: dst addr to write into pe 966 * @count: number of page entries to update 967 * @incr: increase next addr by incr bytes 968 * 969 * Update PTEs by writing them manually using sDMA (CIK). 970 */ 971static void sdma_v3_0_vm_write_pte(struct amdgpu_ib *ib, uint64_t pe, 972 uint64_t value, unsigned count, 973 uint32_t incr) 974{ 975 unsigned ndw = count * 2; 976 977 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) | 978 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR); 979 ib->ptr[ib->length_dw++] = lower_32_bits(pe); 980 ib->ptr[ib->length_dw++] = upper_32_bits(pe); 981 ib->ptr[ib->length_dw++] = ndw; 982 for (; ndw > 0; ndw -= 2) { 983 ib->ptr[ib->length_dw++] = lower_32_bits(value); 984 ib->ptr[ib->length_dw++] = upper_32_bits(value); 985 value += incr; 986 } 987} 988 989/** 990 * sdma_v3_0_vm_set_pte_pde - update the page tables using sDMA 991 * 992 * @ib: indirect buffer to fill with commands 993 * @pe: addr of the page entry 994 * @addr: dst addr to write into pe 995 * @count: number of page entries to update 996 * @incr: increase next addr by incr bytes 997 * @flags: access flags 998 * 999 * Update the page tables using sDMA (CIK). 1000 */ 1001static void sdma_v3_0_vm_set_pte_pde(struct amdgpu_ib *ib, uint64_t pe, 1002 uint64_t addr, unsigned count, 1003 uint32_t incr, uint64_t flags) 1004{ 1005 /* for physically contiguous pages (vram) */ 1006 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_GEN_PTEPDE); 1007 ib->ptr[ib->length_dw++] = lower_32_bits(pe); /* dst addr */ 1008 ib->ptr[ib->length_dw++] = upper_32_bits(pe); 1009 ib->ptr[ib->length_dw++] = lower_32_bits(flags); /* mask */ 1010 ib->ptr[ib->length_dw++] = upper_32_bits(flags); 1011 ib->ptr[ib->length_dw++] = lower_32_bits(addr); /* value */ 1012 ib->ptr[ib->length_dw++] = upper_32_bits(addr); 1013 ib->ptr[ib->length_dw++] = incr; /* increment size */ 1014 ib->ptr[ib->length_dw++] = 0; 1015 ib->ptr[ib->length_dw++] = count; /* number of entries */ 1016} 1017 1018/** 1019 * sdma_v3_0_ring_pad_ib - pad the IB to the required number of dw 1020 * 1021 * @ib: indirect buffer to fill with padding 1022 * 1023 */ 1024static void sdma_v3_0_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib) 1025{ 1026 struct amdgpu_sdma_instance *sdma = amdgpu_sdma_get_instance_from_ring(ring); 1027 u32 pad_count; 1028 int i; 1029 1030 pad_count = (-ib->length_dw) & 7; 1031 for (i = 0; i < pad_count; i++) 1032 if (sdma && sdma->burst_nop && (i == 0)) 1033 ib->ptr[ib->length_dw++] = 1034 SDMA_PKT_HEADER_OP(SDMA_OP_NOP) | 1035 SDMA_PKT_NOP_HEADER_COUNT(pad_count - 1); 1036 else 1037 ib->ptr[ib->length_dw++] = 1038 SDMA_PKT_HEADER_OP(SDMA_OP_NOP); 1039} 1040 1041/** 1042 * sdma_v3_0_ring_emit_pipeline_sync - sync the pipeline 1043 * 1044 * @ring: amdgpu_ring pointer 1045 * 1046 * Make sure all previous operations are completed (CIK). 1047 */ 1048static void sdma_v3_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring) 1049{ 1050 uint32_t seq = ring->fence_drv.sync_seq; 1051 uint64_t addr = ring->fence_drv.gpu_addr; 1052 1053 /* wait for idle */ 1054 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) | 1055 SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(0) | 1056 SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3) | /* equal */ 1057 SDMA_PKT_POLL_REGMEM_HEADER_MEM_POLL(1)); 1058 amdgpu_ring_write(ring, addr & 0xfffffffc); 1059 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff); 1060 amdgpu_ring_write(ring, seq); /* reference */ 1061 amdgpu_ring_write(ring, 0xffffffff); /* mask */ 1062 amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) | 1063 SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(4)); /* retry count, poll interval */ 1064} 1065 1066/** 1067 * sdma_v3_0_ring_emit_vm_flush - cik vm flush using sDMA 1068 * 1069 * @ring: amdgpu_ring pointer 1070 * @vm: amdgpu_vm pointer 1071 * 1072 * Update the page table base and flush the VM TLB 1073 * using sDMA (VI). 1074 */ 1075static void sdma_v3_0_ring_emit_vm_flush(struct amdgpu_ring *ring, 1076 unsigned vmid, uint64_t pd_addr) 1077{ 1078 amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr); 1079 1080 /* wait for flush */ 1081 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) | 1082 SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(0) | 1083 SDMA_PKT_POLL_REGMEM_HEADER_FUNC(0)); /* always */ 1084 amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2); 1085 amdgpu_ring_write(ring, 0); 1086 amdgpu_ring_write(ring, 0); /* reference */ 1087 amdgpu_ring_write(ring, 0); /* mask */ 1088 amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) | 1089 SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */ 1090} 1091 1092static void sdma_v3_0_ring_emit_wreg(struct amdgpu_ring *ring, 1093 uint32_t reg, uint32_t val) 1094{ 1095 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) | 1096 SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf)); 1097 amdgpu_ring_write(ring, reg); 1098 amdgpu_ring_write(ring, val); 1099} 1100 1101static int sdma_v3_0_early_init(void *handle) 1102{ 1103 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1104 1105 switch (adev->asic_type) { 1106 case CHIP_STONEY: 1107 adev->sdma.num_instances = 1; 1108 break; 1109 default: 1110 adev->sdma.num_instances = SDMA_MAX_INSTANCE; 1111 break; 1112 } 1113 1114 sdma_v3_0_set_ring_funcs(adev); 1115 sdma_v3_0_set_buffer_funcs(adev); 1116 sdma_v3_0_set_vm_pte_funcs(adev); 1117 sdma_v3_0_set_irq_funcs(adev); 1118 1119 return 0; 1120} 1121 1122static int sdma_v3_0_sw_init(void *handle) 1123{ 1124 struct amdgpu_ring *ring; 1125 int r, i; 1126 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1127 1128 /* SDMA trap event */ 1129 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SDMA_TRAP, 1130 &adev->sdma.trap_irq); 1131 if (r) 1132 return r; 1133 1134 /* SDMA Privileged inst */ 1135 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 241, 1136 &adev->sdma.illegal_inst_irq); 1137 if (r) 1138 return r; 1139 1140 /* SDMA Privileged inst */ 1141 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_SDMA_SRBM_WRITE, 1142 &adev->sdma.illegal_inst_irq); 1143 if (r) 1144 return r; 1145 1146 r = sdma_v3_0_init_microcode(adev); 1147 if (r) { 1148 DRM_ERROR("Failed to load sdma firmware!\n"); 1149 return r; 1150 } 1151 1152 for (i = 0; i < adev->sdma.num_instances; i++) { 1153 ring = &adev->sdma.instance[i].ring; 1154 ring->ring_obj = NULL; 1155 if (!amdgpu_sriov_vf(adev)) { 1156 ring->use_doorbell = true; 1157 ring->doorbell_index = adev->doorbell_index.sdma_engine[i]; 1158 } else { 1159 ring->use_pollmem = true; 1160 } 1161 1162 snprintf(ring->name, sizeof ring->name, "sdma%d", i); 1163 r = amdgpu_ring_init(adev, ring, 1024, 1164 &adev->sdma.trap_irq, 1165 (i == 0) ? 1166 AMDGPU_SDMA_IRQ_INSTANCE0 : 1167 AMDGPU_SDMA_IRQ_INSTANCE1); 1168 if (r) 1169 return r; 1170 } 1171 1172 return r; 1173} 1174 1175static int sdma_v3_0_sw_fini(void *handle) 1176{ 1177 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1178 int i; 1179 1180 for (i = 0; i < adev->sdma.num_instances; i++) 1181 amdgpu_ring_fini(&adev->sdma.instance[i].ring); 1182 1183 sdma_v3_0_free_microcode(adev); 1184 return 0; 1185} 1186 1187static int sdma_v3_0_hw_init(void *handle) 1188{ 1189 int r; 1190 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1191 1192 sdma_v3_0_init_golden_registers(adev); 1193 1194 r = sdma_v3_0_start(adev); 1195 if (r) 1196 return r; 1197 1198 return r; 1199} 1200 1201static int sdma_v3_0_hw_fini(void *handle) 1202{ 1203 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1204 1205 sdma_v3_0_ctx_switch_enable(adev, false); 1206 sdma_v3_0_enable(adev, false); 1207 1208 return 0; 1209} 1210 1211static int sdma_v3_0_suspend(void *handle) 1212{ 1213 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1214 1215 return sdma_v3_0_hw_fini(adev); 1216} 1217 1218static int sdma_v3_0_resume(void *handle) 1219{ 1220 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1221 1222 return sdma_v3_0_hw_init(adev); 1223} 1224 1225static bool sdma_v3_0_is_idle(void *handle) 1226{ 1227 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1228 u32 tmp = RREG32(mmSRBM_STATUS2); 1229 1230 if (tmp & (SRBM_STATUS2__SDMA_BUSY_MASK | 1231 SRBM_STATUS2__SDMA1_BUSY_MASK)) 1232 return false; 1233 1234 return true; 1235} 1236 1237static int sdma_v3_0_wait_for_idle(void *handle) 1238{ 1239 unsigned i; 1240 u32 tmp; 1241 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1242 1243 for (i = 0; i < adev->usec_timeout; i++) { 1244 tmp = RREG32(mmSRBM_STATUS2) & (SRBM_STATUS2__SDMA_BUSY_MASK | 1245 SRBM_STATUS2__SDMA1_BUSY_MASK); 1246 1247 if (!tmp) 1248 return 0; 1249 udelay(1); 1250 } 1251 return -ETIMEDOUT; 1252} 1253 1254static bool sdma_v3_0_check_soft_reset(void *handle) 1255{ 1256 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1257 u32 srbm_soft_reset = 0; 1258 u32 tmp = RREG32(mmSRBM_STATUS2); 1259 1260 if ((tmp & SRBM_STATUS2__SDMA_BUSY_MASK) || 1261 (tmp & SRBM_STATUS2__SDMA1_BUSY_MASK)) { 1262 srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA_MASK; 1263 srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_SDMA1_MASK; 1264 } 1265 1266 if (srbm_soft_reset) { 1267 adev->sdma.srbm_soft_reset = srbm_soft_reset; 1268 return true; 1269 } else { 1270 adev->sdma.srbm_soft_reset = 0; 1271 return false; 1272 } 1273} 1274 1275static int sdma_v3_0_pre_soft_reset(void *handle) 1276{ 1277 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1278 u32 srbm_soft_reset = 0; 1279 1280 if (!adev->sdma.srbm_soft_reset) 1281 return 0; 1282 1283 srbm_soft_reset = adev->sdma.srbm_soft_reset; 1284 1285 if (REG_GET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_SDMA) || 1286 REG_GET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_SDMA1)) { 1287 sdma_v3_0_ctx_switch_enable(adev, false); 1288 sdma_v3_0_enable(adev, false); 1289 } 1290 1291 return 0; 1292} 1293 1294static int sdma_v3_0_post_soft_reset(void *handle) 1295{ 1296 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1297 u32 srbm_soft_reset = 0; 1298 1299 if (!adev->sdma.srbm_soft_reset) 1300 return 0; 1301 1302 srbm_soft_reset = adev->sdma.srbm_soft_reset; 1303 1304 if (REG_GET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_SDMA) || 1305 REG_GET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_SDMA1)) { 1306 sdma_v3_0_gfx_resume(adev); 1307 sdma_v3_0_rlc_resume(adev); 1308 } 1309 1310 return 0; 1311} 1312 1313static int sdma_v3_0_soft_reset(void *handle) 1314{ 1315 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1316 u32 srbm_soft_reset = 0; 1317 u32 tmp; 1318 1319 if (!adev->sdma.srbm_soft_reset) 1320 return 0; 1321 1322 srbm_soft_reset = adev->sdma.srbm_soft_reset; 1323 1324 if (srbm_soft_reset) { 1325 tmp = RREG32(mmSRBM_SOFT_RESET); 1326 tmp |= srbm_soft_reset; 1327 dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp); 1328 WREG32(mmSRBM_SOFT_RESET, tmp); 1329 tmp = RREG32(mmSRBM_SOFT_RESET); 1330 1331 udelay(50); 1332 1333 tmp &= ~srbm_soft_reset; 1334 WREG32(mmSRBM_SOFT_RESET, tmp); 1335 tmp = RREG32(mmSRBM_SOFT_RESET); 1336 1337 /* Wait a little for things to settle down */ 1338 udelay(50); 1339 } 1340 1341 return 0; 1342} 1343 1344static int sdma_v3_0_set_trap_irq_state(struct amdgpu_device *adev, 1345 struct amdgpu_irq_src *source, 1346 unsigned type, 1347 enum amdgpu_interrupt_state state) 1348{ 1349 u32 sdma_cntl; 1350 1351 switch (type) { 1352 case AMDGPU_SDMA_IRQ_INSTANCE0: 1353 switch (state) { 1354 case AMDGPU_IRQ_STATE_DISABLE: 1355 sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET); 1356 sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE, 0); 1357 WREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET, sdma_cntl); 1358 break; 1359 case AMDGPU_IRQ_STATE_ENABLE: 1360 sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET); 1361 sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE, 1); 1362 WREG32(mmSDMA0_CNTL + SDMA0_REGISTER_OFFSET, sdma_cntl); 1363 break; 1364 default: 1365 break; 1366 } 1367 break; 1368 case AMDGPU_SDMA_IRQ_INSTANCE1: 1369 switch (state) { 1370 case AMDGPU_IRQ_STATE_DISABLE: 1371 sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET); 1372 sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE, 0); 1373 WREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET, sdma_cntl); 1374 break; 1375 case AMDGPU_IRQ_STATE_ENABLE: 1376 sdma_cntl = RREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET); 1377 sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA0_CNTL, TRAP_ENABLE, 1); 1378 WREG32(mmSDMA0_CNTL + SDMA1_REGISTER_OFFSET, sdma_cntl); 1379 break; 1380 default: 1381 break; 1382 } 1383 break; 1384 default: 1385 break; 1386 } 1387 return 0; 1388} 1389 1390static int sdma_v3_0_process_trap_irq(struct amdgpu_device *adev, 1391 struct amdgpu_irq_src *source, 1392 struct amdgpu_iv_entry *entry) 1393{ 1394 u8 instance_id, queue_id; 1395 1396 instance_id = (entry->ring_id & 0x3) >> 0; 1397 queue_id = (entry->ring_id & 0xc) >> 2; 1398 DRM_DEBUG("IH: SDMA trap\n"); 1399 switch (instance_id) { 1400 case 0: 1401 switch (queue_id) { 1402 case 0: 1403 amdgpu_fence_process(&adev->sdma.instance[0].ring); 1404 break; 1405 case 1: 1406 /* XXX compute */ 1407 break; 1408 case 2: 1409 /* XXX compute */ 1410 break; 1411 } 1412 break; 1413 case 1: 1414 switch (queue_id) { 1415 case 0: 1416 amdgpu_fence_process(&adev->sdma.instance[1].ring); 1417 break; 1418 case 1: 1419 /* XXX compute */ 1420 break; 1421 case 2: 1422 /* XXX compute */ 1423 break; 1424 } 1425 break; 1426 } 1427 return 0; 1428} 1429 1430static int sdma_v3_0_process_illegal_inst_irq(struct amdgpu_device *adev, 1431 struct amdgpu_irq_src *source, 1432 struct amdgpu_iv_entry *entry) 1433{ 1434 u8 instance_id, queue_id; 1435 1436 DRM_ERROR("Illegal instruction in SDMA command stream\n"); 1437 instance_id = (entry->ring_id & 0x3) >> 0; 1438 queue_id = (entry->ring_id & 0xc) >> 2; 1439 1440 if (instance_id <= 1 && queue_id == 0) 1441 drm_sched_fault(&adev->sdma.instance[instance_id].ring.sched); 1442 return 0; 1443} 1444 1445static void sdma_v3_0_update_sdma_medium_grain_clock_gating( 1446 struct amdgpu_device *adev, 1447 bool enable) 1448{ 1449 uint32_t temp, data; 1450 int i; 1451 1452 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG)) { 1453 for (i = 0; i < adev->sdma.num_instances; i++) { 1454 temp = data = RREG32(mmSDMA0_CLK_CTRL + sdma_offsets[i]); 1455 data &= ~(SDMA0_CLK_CTRL__SOFT_OVERRIDE7_MASK | 1456 SDMA0_CLK_CTRL__SOFT_OVERRIDE6_MASK | 1457 SDMA0_CLK_CTRL__SOFT_OVERRIDE5_MASK | 1458 SDMA0_CLK_CTRL__SOFT_OVERRIDE4_MASK | 1459 SDMA0_CLK_CTRL__SOFT_OVERRIDE3_MASK | 1460 SDMA0_CLK_CTRL__SOFT_OVERRIDE2_MASK | 1461 SDMA0_CLK_CTRL__SOFT_OVERRIDE1_MASK | 1462 SDMA0_CLK_CTRL__SOFT_OVERRIDE0_MASK); 1463 if (data != temp) 1464 WREG32(mmSDMA0_CLK_CTRL + sdma_offsets[i], data); 1465 } 1466 } else { 1467 for (i = 0; i < adev->sdma.num_instances; i++) { 1468 temp = data = RREG32(mmSDMA0_CLK_CTRL + sdma_offsets[i]); 1469 data |= SDMA0_CLK_CTRL__SOFT_OVERRIDE7_MASK | 1470 SDMA0_CLK_CTRL__SOFT_OVERRIDE6_MASK | 1471 SDMA0_CLK_CTRL__SOFT_OVERRIDE5_MASK | 1472 SDMA0_CLK_CTRL__SOFT_OVERRIDE4_MASK | 1473 SDMA0_CLK_CTRL__SOFT_OVERRIDE3_MASK | 1474 SDMA0_CLK_CTRL__SOFT_OVERRIDE2_MASK | 1475 SDMA0_CLK_CTRL__SOFT_OVERRIDE1_MASK | 1476 SDMA0_CLK_CTRL__SOFT_OVERRIDE0_MASK; 1477 1478 if (data != temp) 1479 WREG32(mmSDMA0_CLK_CTRL + sdma_offsets[i], data); 1480 } 1481 } 1482} 1483 1484static void sdma_v3_0_update_sdma_medium_grain_light_sleep( 1485 struct amdgpu_device *adev, 1486 bool enable) 1487{ 1488 uint32_t temp, data; 1489 int i; 1490 1491 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_LS)) { 1492 for (i = 0; i < adev->sdma.num_instances; i++) { 1493 temp = data = RREG32(mmSDMA0_POWER_CNTL + sdma_offsets[i]); 1494 data |= SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK; 1495 1496 if (temp != data) 1497 WREG32(mmSDMA0_POWER_CNTL + sdma_offsets[i], data); 1498 } 1499 } else { 1500 for (i = 0; i < adev->sdma.num_instances; i++) { 1501 temp = data = RREG32(mmSDMA0_POWER_CNTL + sdma_offsets[i]); 1502 data &= ~SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK; 1503 1504 if (temp != data) 1505 WREG32(mmSDMA0_POWER_CNTL + sdma_offsets[i], data); 1506 } 1507 } 1508} 1509 1510static int sdma_v3_0_set_clockgating_state(void *handle, 1511 enum amd_clockgating_state state) 1512{ 1513 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1514 1515 if (amdgpu_sriov_vf(adev)) 1516 return 0; 1517 1518 switch (adev->asic_type) { 1519 case CHIP_FIJI: 1520 case CHIP_CARRIZO: 1521 case CHIP_STONEY: 1522 sdma_v3_0_update_sdma_medium_grain_clock_gating(adev, 1523 state == AMD_CG_STATE_GATE); 1524 sdma_v3_0_update_sdma_medium_grain_light_sleep(adev, 1525 state == AMD_CG_STATE_GATE); 1526 break; 1527 default: 1528 break; 1529 } 1530 return 0; 1531} 1532 1533static int sdma_v3_0_set_powergating_state(void *handle, 1534 enum amd_powergating_state state) 1535{ 1536 return 0; 1537} 1538 1539static void sdma_v3_0_get_clockgating_state(void *handle, u32 *flags) 1540{ 1541 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1542 int data; 1543 1544 if (amdgpu_sriov_vf(adev)) 1545 *flags = 0; 1546 1547 /* AMD_CG_SUPPORT_SDMA_MGCG */ 1548 data = RREG32(mmSDMA0_CLK_CTRL + sdma_offsets[0]); 1549 if (!(data & SDMA0_CLK_CTRL__SOFT_OVERRIDE0_MASK)) 1550 *flags |= AMD_CG_SUPPORT_SDMA_MGCG; 1551 1552 /* AMD_CG_SUPPORT_SDMA_LS */ 1553 data = RREG32(mmSDMA0_POWER_CNTL + sdma_offsets[0]); 1554 if (data & SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK) 1555 *flags |= AMD_CG_SUPPORT_SDMA_LS; 1556} 1557 1558static const struct amd_ip_funcs sdma_v3_0_ip_funcs = { 1559 .name = "sdma_v3_0", 1560 .early_init = sdma_v3_0_early_init, 1561 .late_init = NULL, 1562 .sw_init = sdma_v3_0_sw_init, 1563 .sw_fini = sdma_v3_0_sw_fini, 1564 .hw_init = sdma_v3_0_hw_init, 1565 .hw_fini = sdma_v3_0_hw_fini, 1566 .suspend = sdma_v3_0_suspend, 1567 .resume = sdma_v3_0_resume, 1568 .is_idle = sdma_v3_0_is_idle, 1569 .wait_for_idle = sdma_v3_0_wait_for_idle, 1570 .check_soft_reset = sdma_v3_0_check_soft_reset, 1571 .pre_soft_reset = sdma_v3_0_pre_soft_reset, 1572 .post_soft_reset = sdma_v3_0_post_soft_reset, 1573 .soft_reset = sdma_v3_0_soft_reset, 1574 .set_clockgating_state = sdma_v3_0_set_clockgating_state, 1575 .set_powergating_state = sdma_v3_0_set_powergating_state, 1576 .get_clockgating_state = sdma_v3_0_get_clockgating_state, 1577}; 1578 1579static const struct amdgpu_ring_funcs sdma_v3_0_ring_funcs = { 1580 .type = AMDGPU_RING_TYPE_SDMA, 1581 .align_mask = 0xf, 1582 .nop = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP), 1583 .support_64bit_ptrs = false, 1584 .get_rptr = sdma_v3_0_ring_get_rptr, 1585 .get_wptr = sdma_v3_0_ring_get_wptr, 1586 .set_wptr = sdma_v3_0_ring_set_wptr, 1587 .emit_frame_size = 1588 6 + /* sdma_v3_0_ring_emit_hdp_flush */ 1589 3 + /* hdp invalidate */ 1590 6 + /* sdma_v3_0_ring_emit_pipeline_sync */ 1591 VI_FLUSH_GPU_TLB_NUM_WREG * 3 + 6 + /* sdma_v3_0_ring_emit_vm_flush */ 1592 10 + 10 + 10, /* sdma_v3_0_ring_emit_fence x3 for user fence, vm fence */ 1593 .emit_ib_size = 7 + 6, /* sdma_v3_0_ring_emit_ib */ 1594 .emit_ib = sdma_v3_0_ring_emit_ib, 1595 .emit_fence = sdma_v3_0_ring_emit_fence, 1596 .emit_pipeline_sync = sdma_v3_0_ring_emit_pipeline_sync, 1597 .emit_vm_flush = sdma_v3_0_ring_emit_vm_flush, 1598 .emit_hdp_flush = sdma_v3_0_ring_emit_hdp_flush, 1599 .test_ring = sdma_v3_0_ring_test_ring, 1600 .test_ib = sdma_v3_0_ring_test_ib, 1601 .insert_nop = sdma_v3_0_ring_insert_nop, 1602 .pad_ib = sdma_v3_0_ring_pad_ib, 1603 .emit_wreg = sdma_v3_0_ring_emit_wreg, 1604}; 1605 1606static void sdma_v3_0_set_ring_funcs(struct amdgpu_device *adev) 1607{ 1608 int i; 1609 1610 for (i = 0; i < adev->sdma.num_instances; i++) { 1611 adev->sdma.instance[i].ring.funcs = &sdma_v3_0_ring_funcs; 1612 adev->sdma.instance[i].ring.me = i; 1613 } 1614} 1615 1616static const struct amdgpu_irq_src_funcs sdma_v3_0_trap_irq_funcs = { 1617 .set = sdma_v3_0_set_trap_irq_state, 1618 .process = sdma_v3_0_process_trap_irq, 1619}; 1620 1621static const struct amdgpu_irq_src_funcs sdma_v3_0_illegal_inst_irq_funcs = { 1622 .process = sdma_v3_0_process_illegal_inst_irq, 1623}; 1624 1625static void sdma_v3_0_set_irq_funcs(struct amdgpu_device *adev) 1626{ 1627 adev->sdma.trap_irq.num_types = AMDGPU_SDMA_IRQ_LAST; 1628 adev->sdma.trap_irq.funcs = &sdma_v3_0_trap_irq_funcs; 1629 adev->sdma.illegal_inst_irq.funcs = &sdma_v3_0_illegal_inst_irq_funcs; 1630} 1631 1632/** 1633 * sdma_v3_0_emit_copy_buffer - copy buffer using the sDMA engine 1634 * 1635 * @ring: amdgpu_ring structure holding ring information 1636 * @src_offset: src GPU address 1637 * @dst_offset: dst GPU address 1638 * @byte_count: number of bytes to xfer 1639 * 1640 * Copy GPU buffers using the DMA engine (VI). 1641 * Used by the amdgpu ttm implementation to move pages if 1642 * registered as the asic copy callback. 1643 */ 1644static void sdma_v3_0_emit_copy_buffer(struct amdgpu_ib *ib, 1645 uint64_t src_offset, 1646 uint64_t dst_offset, 1647 uint32_t byte_count) 1648{ 1649 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY) | 1650 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR); 1651 ib->ptr[ib->length_dw++] = byte_count; 1652 ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */ 1653 ib->ptr[ib->length_dw++] = lower_32_bits(src_offset); 1654 ib->ptr[ib->length_dw++] = upper_32_bits(src_offset); 1655 ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset); 1656 ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset); 1657} 1658 1659/** 1660 * sdma_v3_0_emit_fill_buffer - fill buffer using the sDMA engine 1661 * 1662 * @ring: amdgpu_ring structure holding ring information 1663 * @src_data: value to write to buffer 1664 * @dst_offset: dst GPU address 1665 * @byte_count: number of bytes to xfer 1666 * 1667 * Fill GPU buffers using the DMA engine (VI). 1668 */ 1669static void sdma_v3_0_emit_fill_buffer(struct amdgpu_ib *ib, 1670 uint32_t src_data, 1671 uint64_t dst_offset, 1672 uint32_t byte_count) 1673{ 1674 ib->ptr[ib->length_dw++] = SDMA_PKT_HEADER_OP(SDMA_OP_CONST_FILL); 1675 ib->ptr[ib->length_dw++] = lower_32_bits(dst_offset); 1676 ib->ptr[ib->length_dw++] = upper_32_bits(dst_offset); 1677 ib->ptr[ib->length_dw++] = src_data; 1678 ib->ptr[ib->length_dw++] = byte_count; 1679} 1680 1681static const struct amdgpu_buffer_funcs sdma_v3_0_buffer_funcs = { 1682 .copy_max_bytes = 0x3fffe0, /* not 0x3fffff due to HW limitation */ 1683 .copy_num_dw = 7, 1684 .emit_copy_buffer = sdma_v3_0_emit_copy_buffer, 1685 1686 .fill_max_bytes = 0x3fffe0, /* not 0x3fffff due to HW limitation */ 1687 .fill_num_dw = 5, 1688 .emit_fill_buffer = sdma_v3_0_emit_fill_buffer, 1689}; 1690 1691static void sdma_v3_0_set_buffer_funcs(struct amdgpu_device *adev) 1692{ 1693 adev->mman.buffer_funcs = &sdma_v3_0_buffer_funcs; 1694 adev->mman.buffer_funcs_ring = &adev->sdma.instance[0].ring; 1695} 1696 1697static const struct amdgpu_vm_pte_funcs sdma_v3_0_vm_pte_funcs = { 1698 .copy_pte_num_dw = 7, 1699 .copy_pte = sdma_v3_0_vm_copy_pte, 1700 1701 .write_pte = sdma_v3_0_vm_write_pte, 1702 .set_pte_pde = sdma_v3_0_vm_set_pte_pde, 1703}; 1704 1705static void sdma_v3_0_set_vm_pte_funcs(struct amdgpu_device *adev) 1706{ 1707 unsigned i; 1708 1709 adev->vm_manager.vm_pte_funcs = &sdma_v3_0_vm_pte_funcs; 1710 for (i = 0; i < adev->sdma.num_instances; i++) { 1711 adev->vm_manager.vm_pte_scheds[i] = 1712 &adev->sdma.instance[i].ring.sched; 1713 } 1714 adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances; 1715} 1716 1717const struct amdgpu_ip_block_version sdma_v3_0_ip_block = 1718{ 1719 .type = AMD_IP_BLOCK_TYPE_SDMA, 1720 .major = 3, 1721 .minor = 0, 1722 .rev = 0, 1723 .funcs = &sdma_v3_0_ip_funcs, 1724}; 1725 1726const struct amdgpu_ip_block_version sdma_v3_1_ip_block = 1727{ 1728 .type = AMD_IP_BLOCK_TYPE_SDMA, 1729 .major = 3, 1730 .minor = 1, 1731 .rev = 0, 1732 .funcs = &sdma_v3_0_ip_funcs, 1733}; 1734