1/* $NetBSD: amdgpu_uvd_v6_0.c,v 1.6 2021/12/19 12:21:29 riastradh Exp $ */ 2 3/* 4 * Copyright 2014 Advanced Micro Devices, Inc. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: Christian K��nig <christian.koenig@amd.com> 25 */ 26 27#include <sys/cdefs.h> 28__KERNEL_RCSID(0, "$NetBSD: amdgpu_uvd_v6_0.c,v 1.6 2021/12/19 12:21:29 riastradh Exp $"); 29 30#include <linux/firmware.h> 31 32#include "amdgpu.h" 33#include "amdgpu_uvd.h" 34#include "vid.h" 35#include "uvd/uvd_6_0_d.h" 36#include "uvd/uvd_6_0_sh_mask.h" 37#include "oss/oss_2_0_d.h" 38#include "oss/oss_2_0_sh_mask.h" 39#include "smu/smu_7_1_3_d.h" 40#include "smu/smu_7_1_3_sh_mask.h" 41#include "bif/bif_5_1_d.h" 42#include "gmc/gmc_8_1_d.h" 43#include "vi.h" 44#include "ivsrcid/ivsrcid_vislands30.h" 45 46/* Polaris10/11/12 firmware version */ 47#define FW_1_130_16 ((1 << 24) | (130 << 16) | (16 << 8)) 48 49static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev); 50static void uvd_v6_0_set_enc_ring_funcs(struct amdgpu_device *adev); 51 52static void uvd_v6_0_set_irq_funcs(struct amdgpu_device *adev); 53static int uvd_v6_0_start(struct amdgpu_device *adev); 54static void uvd_v6_0_stop(struct amdgpu_device *adev); 55static void uvd_v6_0_set_sw_clock_gating(struct amdgpu_device *adev); 56static int uvd_v6_0_set_clockgating_state(void *handle, 57 enum amd_clockgating_state state); 58static void uvd_v6_0_enable_mgcg(struct amdgpu_device *adev, 59 bool enable); 60 61/** 62* uvd_v6_0_enc_support - get encode support status 63* 64* @adev: amdgpu_device pointer 65* 66* Returns the current hardware encode support status 67*/ 68static inline bool uvd_v6_0_enc_support(struct amdgpu_device *adev) 69{ 70 return ((adev->asic_type >= CHIP_POLARIS10) && 71 (adev->asic_type <= CHIP_VEGAM) && 72 (!adev->uvd.fw_version || adev->uvd.fw_version >= FW_1_130_16)); 73} 74 75/** 76 * uvd_v6_0_ring_get_rptr - get read pointer 77 * 78 * @ring: amdgpu_ring pointer 79 * 80 * Returns the current hardware read pointer 81 */ 82static uint64_t uvd_v6_0_ring_get_rptr(struct amdgpu_ring *ring) 83{ 84 struct amdgpu_device *adev = ring->adev; 85 86 return RREG32(mmUVD_RBC_RB_RPTR); 87} 88 89/** 90 * uvd_v6_0_enc_ring_get_rptr - get enc read pointer 91 * 92 * @ring: amdgpu_ring pointer 93 * 94 * Returns the current hardware enc read pointer 95 */ 96static uint64_t uvd_v6_0_enc_ring_get_rptr(struct amdgpu_ring *ring) 97{ 98 struct amdgpu_device *adev = ring->adev; 99 100 if (ring == &adev->uvd.inst->ring_enc[0]) 101 return RREG32(mmUVD_RB_RPTR); 102 else 103 return RREG32(mmUVD_RB_RPTR2); 104} 105/** 106 * uvd_v6_0_ring_get_wptr - get write pointer 107 * 108 * @ring: amdgpu_ring pointer 109 * 110 * Returns the current hardware write pointer 111 */ 112static uint64_t uvd_v6_0_ring_get_wptr(struct amdgpu_ring *ring) 113{ 114 struct amdgpu_device *adev = ring->adev; 115 116 return RREG32(mmUVD_RBC_RB_WPTR); 117} 118 119/** 120 * uvd_v6_0_enc_ring_get_wptr - get enc write pointer 121 * 122 * @ring: amdgpu_ring pointer 123 * 124 * Returns the current hardware enc write pointer 125 */ 126static uint64_t uvd_v6_0_enc_ring_get_wptr(struct amdgpu_ring *ring) 127{ 128 struct amdgpu_device *adev = ring->adev; 129 130 if (ring == &adev->uvd.inst->ring_enc[0]) 131 return RREG32(mmUVD_RB_WPTR); 132 else 133 return RREG32(mmUVD_RB_WPTR2); 134} 135 136/** 137 * uvd_v6_0_ring_set_wptr - set write pointer 138 * 139 * @ring: amdgpu_ring pointer 140 * 141 * Commits the write pointer to the hardware 142 */ 143static void uvd_v6_0_ring_set_wptr(struct amdgpu_ring *ring) 144{ 145 struct amdgpu_device *adev = ring->adev; 146 147 WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr)); 148} 149 150/** 151 * uvd_v6_0_enc_ring_set_wptr - set enc write pointer 152 * 153 * @ring: amdgpu_ring pointer 154 * 155 * Commits the enc write pointer to the hardware 156 */ 157static void uvd_v6_0_enc_ring_set_wptr(struct amdgpu_ring *ring) 158{ 159 struct amdgpu_device *adev = ring->adev; 160 161 if (ring == &adev->uvd.inst->ring_enc[0]) 162 WREG32(mmUVD_RB_WPTR, 163 lower_32_bits(ring->wptr)); 164 else 165 WREG32(mmUVD_RB_WPTR2, 166 lower_32_bits(ring->wptr)); 167} 168 169/** 170 * uvd_v6_0_enc_ring_test_ring - test if UVD ENC ring is working 171 * 172 * @ring: the engine to test on 173 * 174 */ 175static int uvd_v6_0_enc_ring_test_ring(struct amdgpu_ring *ring) 176{ 177 struct amdgpu_device *adev = ring->adev; 178 uint32_t rptr; 179 unsigned i; 180 int r; 181 182 r = amdgpu_ring_alloc(ring, 16); 183 if (r) 184 return r; 185 186 rptr = amdgpu_ring_get_rptr(ring); 187 188 amdgpu_ring_write(ring, HEVC_ENC_CMD_END); 189 amdgpu_ring_commit(ring); 190 191 for (i = 0; i < adev->usec_timeout; i++) { 192 if (amdgpu_ring_get_rptr(ring) != rptr) 193 break; 194 udelay(1); 195 } 196 197 if (i >= adev->usec_timeout) 198 r = -ETIMEDOUT; 199 200 return r; 201} 202 203/** 204 * uvd_v6_0_enc_get_create_msg - generate a UVD ENC create msg 205 * 206 * @adev: amdgpu_device pointer 207 * @ring: ring we should submit the msg to 208 * @handle: session handle to use 209 * @fence: optional fence to return 210 * 211 * Open up a stream for HW test 212 */ 213static int uvd_v6_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, 214 struct amdgpu_bo *bo, 215 struct dma_fence **fence) 216{ 217 const unsigned ib_size_dw = 16; 218 struct amdgpu_job *job; 219 struct amdgpu_ib *ib; 220 struct dma_fence *f = NULL; 221 uint64_t addr; 222 int i, r; 223 224 r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job); 225 if (r) 226 return r; 227 228 ib = &job->ibs[0]; 229 addr = amdgpu_bo_gpu_offset(bo); 230 231 ib->length_dw = 0; 232 ib->ptr[ib->length_dw++] = 0x00000018; 233 ib->ptr[ib->length_dw++] = 0x00000001; /* session info */ 234 ib->ptr[ib->length_dw++] = handle; 235 ib->ptr[ib->length_dw++] = 0x00010000; 236 ib->ptr[ib->length_dw++] = upper_32_bits(addr); 237 ib->ptr[ib->length_dw++] = addr; 238 239 ib->ptr[ib->length_dw++] = 0x00000014; 240 ib->ptr[ib->length_dw++] = 0x00000002; /* task info */ 241 ib->ptr[ib->length_dw++] = 0x0000001c; 242 ib->ptr[ib->length_dw++] = 0x00000001; 243 ib->ptr[ib->length_dw++] = 0x00000000; 244 245 ib->ptr[ib->length_dw++] = 0x00000008; 246 ib->ptr[ib->length_dw++] = 0x08000001; /* op initialize */ 247 248 for (i = ib->length_dw; i < ib_size_dw; ++i) 249 ib->ptr[i] = 0x0; 250 251 r = amdgpu_job_submit_direct(job, ring, &f); 252 if (r) 253 goto err; 254 255 if (fence) 256 *fence = dma_fence_get(f); 257 dma_fence_put(f); 258 return 0; 259 260err: 261 amdgpu_job_free(job); 262 return r; 263} 264 265/** 266 * uvd_v6_0_enc_get_destroy_msg - generate a UVD ENC destroy msg 267 * 268 * @adev: amdgpu_device pointer 269 * @ring: ring we should submit the msg to 270 * @handle: session handle to use 271 * @fence: optional fence to return 272 * 273 * Close up a stream for HW test or if userspace failed to do so 274 */ 275static int uvd_v6_0_enc_get_destroy_msg(struct amdgpu_ring *ring, 276 uint32_t handle, 277 struct amdgpu_bo *bo, 278 struct dma_fence **fence) 279{ 280 const unsigned ib_size_dw = 16; 281 struct amdgpu_job *job; 282 struct amdgpu_ib *ib; 283 struct dma_fence *f = NULL; 284 uint64_t addr; 285 int i, r; 286 287 r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job); 288 if (r) 289 return r; 290 291 ib = &job->ibs[0]; 292 addr = amdgpu_bo_gpu_offset(bo); 293 294 ib->length_dw = 0; 295 ib->ptr[ib->length_dw++] = 0x00000018; 296 ib->ptr[ib->length_dw++] = 0x00000001; /* session info */ 297 ib->ptr[ib->length_dw++] = handle; 298 ib->ptr[ib->length_dw++] = 0x00010000; 299 ib->ptr[ib->length_dw++] = upper_32_bits(addr); 300 ib->ptr[ib->length_dw++] = addr; 301 302 ib->ptr[ib->length_dw++] = 0x00000014; 303 ib->ptr[ib->length_dw++] = 0x00000002; /* task info */ 304 ib->ptr[ib->length_dw++] = 0x0000001c; 305 ib->ptr[ib->length_dw++] = 0x00000001; 306 ib->ptr[ib->length_dw++] = 0x00000000; 307 308 ib->ptr[ib->length_dw++] = 0x00000008; 309 ib->ptr[ib->length_dw++] = 0x08000002; /* op close session */ 310 311 for (i = ib->length_dw; i < ib_size_dw; ++i) 312 ib->ptr[i] = 0x0; 313 314 r = amdgpu_job_submit_direct(job, ring, &f); 315 if (r) 316 goto err; 317 318 if (fence) 319 *fence = dma_fence_get(f); 320 dma_fence_put(f); 321 return 0; 322 323err: 324 amdgpu_job_free(job); 325 return r; 326} 327 328/** 329 * uvd_v6_0_enc_ring_test_ib - test if UVD ENC IBs are working 330 * 331 * @ring: the engine to test on 332 * 333 */ 334static int uvd_v6_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout) 335{ 336 struct dma_fence *fence = NULL; 337 struct amdgpu_bo *bo = NULL; 338 long r; 339 340 r = amdgpu_bo_create_reserved(ring->adev, 128 * 1024, PAGE_SIZE, 341 AMDGPU_GEM_DOMAIN_VRAM, 342 &bo, NULL, NULL); 343 if (r) 344 return r; 345 346 r = uvd_v6_0_enc_get_create_msg(ring, 1, bo, NULL); 347 if (r) 348 goto error; 349 350 r = uvd_v6_0_enc_get_destroy_msg(ring, 1, bo, &fence); 351 if (r) 352 goto error; 353 354 r = dma_fence_wait_timeout(fence, false, timeout); 355 if (r == 0) 356 r = -ETIMEDOUT; 357 else if (r > 0) 358 r = 0; 359 360error: 361 dma_fence_put(fence); 362 amdgpu_bo_unreserve(bo); 363 amdgpu_bo_unref(&bo); 364 return r; 365} 366 367static int uvd_v6_0_early_init(void *handle) 368{ 369 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 370 adev->uvd.num_uvd_inst = 1; 371 372 if (!(adev->flags & AMD_IS_APU) && 373 (RREG32_SMC(ixCC_HARVEST_FUSES) & CC_HARVEST_FUSES__UVD_DISABLE_MASK)) 374 return -ENOENT; 375 376 uvd_v6_0_set_ring_funcs(adev); 377 378 if (uvd_v6_0_enc_support(adev)) { 379 adev->uvd.num_enc_rings = 2; 380 uvd_v6_0_set_enc_ring_funcs(adev); 381 } 382 383 uvd_v6_0_set_irq_funcs(adev); 384 385 return 0; 386} 387 388static int uvd_v6_0_sw_init(void *handle) 389{ 390 struct amdgpu_ring *ring; 391 int i, r; 392 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 393 394 /* UVD TRAP */ 395 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_UVD_SYSTEM_MESSAGE, &adev->uvd.inst->irq); 396 if (r) 397 return r; 398 399 /* UVD ENC TRAP */ 400 if (uvd_v6_0_enc_support(adev)) { 401 for (i = 0; i < adev->uvd.num_enc_rings; ++i) { 402 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i + VISLANDS30_IV_SRCID_UVD_ENC_GEN_PURP, &adev->uvd.inst->irq); 403 if (r) 404 return r; 405 } 406 } 407 408 r = amdgpu_uvd_sw_init(adev); 409 if (r) 410 return r; 411 412 if (!uvd_v6_0_enc_support(adev)) { 413 for (i = 0; i < adev->uvd.num_enc_rings; ++i) 414 adev->uvd.inst->ring_enc[i].funcs = NULL; 415 416 adev->uvd.inst->irq.num_types = 1; 417 adev->uvd.num_enc_rings = 0; 418 419 DRM_INFO("UVD ENC is disabled\n"); 420 } 421 422 ring = &adev->uvd.inst->ring; 423 snprintf(ring->name, sizeof ring->name, "uvd"); 424 r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0); 425 if (r) 426 return r; 427 428 r = amdgpu_uvd_resume(adev); 429 if (r) 430 return r; 431 432 if (uvd_v6_0_enc_support(adev)) { 433 for (i = 0; i < adev->uvd.num_enc_rings; ++i) { 434 ring = &adev->uvd.inst->ring_enc[i]; 435 snprintf(ring->name, sizeof ring->name, "uvd_enc%d", i); 436 r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0); 437 if (r) 438 return r; 439 } 440 } 441 442 r = amdgpu_uvd_entity_init(adev); 443 444 return r; 445} 446 447static int uvd_v6_0_sw_fini(void *handle) 448{ 449 int i, r; 450 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 451 452 r = amdgpu_uvd_suspend(adev); 453 if (r) 454 return r; 455 456 if (uvd_v6_0_enc_support(adev)) { 457 for (i = 0; i < adev->uvd.num_enc_rings; ++i) 458 amdgpu_ring_fini(&adev->uvd.inst->ring_enc[i]); 459 } 460 461 return amdgpu_uvd_sw_fini(adev); 462} 463 464/** 465 * uvd_v6_0_hw_init - start and test UVD block 466 * 467 * @adev: amdgpu_device pointer 468 * 469 * Initialize the hardware, boot up the VCPU and do some testing 470 */ 471static int uvd_v6_0_hw_init(void *handle) 472{ 473 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 474 struct amdgpu_ring *ring = &adev->uvd.inst->ring; 475 uint32_t tmp; 476 int i, r; 477 478 amdgpu_asic_set_uvd_clocks(adev, 10000, 10000); 479 uvd_v6_0_set_clockgating_state(adev, AMD_CG_STATE_UNGATE); 480 uvd_v6_0_enable_mgcg(adev, true); 481 482 r = amdgpu_ring_test_helper(ring); 483 if (r) 484 goto done; 485 486 r = amdgpu_ring_alloc(ring, 10); 487 if (r) { 488 DRM_ERROR("amdgpu: ring failed to lock UVD ring (%d).\n", r); 489 goto done; 490 } 491 492 tmp = PACKET0(mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL, 0); 493 amdgpu_ring_write(ring, tmp); 494 amdgpu_ring_write(ring, 0xFFFFF); 495 496 tmp = PACKET0(mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL, 0); 497 amdgpu_ring_write(ring, tmp); 498 amdgpu_ring_write(ring, 0xFFFFF); 499 500 tmp = PACKET0(mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL, 0); 501 amdgpu_ring_write(ring, tmp); 502 amdgpu_ring_write(ring, 0xFFFFF); 503 504 /* Clear timeout status bits */ 505 amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_TIMEOUT_STATUS, 0)); 506 amdgpu_ring_write(ring, 0x8); 507 508 amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CNTL, 0)); 509 amdgpu_ring_write(ring, 3); 510 511 amdgpu_ring_commit(ring); 512 513 if (uvd_v6_0_enc_support(adev)) { 514 for (i = 0; i < adev->uvd.num_enc_rings; ++i) { 515 ring = &adev->uvd.inst->ring_enc[i]; 516 r = amdgpu_ring_test_helper(ring); 517 if (r) 518 goto done; 519 } 520 } 521 522done: 523 if (!r) { 524 if (uvd_v6_0_enc_support(adev)) 525 DRM_INFO("UVD and UVD ENC initialized successfully.\n"); 526 else 527 DRM_INFO("UVD initialized successfully.\n"); 528 } 529 530 return r; 531} 532 533/** 534 * uvd_v6_0_hw_fini - stop the hardware block 535 * 536 * @adev: amdgpu_device pointer 537 * 538 * Stop the UVD block, mark ring as not ready any more 539 */ 540static int uvd_v6_0_hw_fini(void *handle) 541{ 542 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 543 struct amdgpu_ring *ring = &adev->uvd.inst->ring; 544 545 if (RREG32(mmUVD_STATUS) != 0) 546 uvd_v6_0_stop(adev); 547 548 ring->sched.ready = false; 549 550 return 0; 551} 552 553static int uvd_v6_0_suspend(void *handle) 554{ 555 int r; 556 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 557 558 r = uvd_v6_0_hw_fini(adev); 559 if (r) 560 return r; 561 562 return amdgpu_uvd_suspend(adev); 563} 564 565static int uvd_v6_0_resume(void *handle) 566{ 567 int r; 568 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 569 570 r = amdgpu_uvd_resume(adev); 571 if (r) 572 return r; 573 574 return uvd_v6_0_hw_init(adev); 575} 576 577/** 578 * uvd_v6_0_mc_resume - memory controller programming 579 * 580 * @adev: amdgpu_device pointer 581 * 582 * Let the UVD memory controller know it's offsets 583 */ 584static void uvd_v6_0_mc_resume(struct amdgpu_device *adev) 585{ 586 uint64_t offset; 587 uint32_t size; 588 589 /* programm memory controller bits 0-27 */ 590 WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, 591 lower_32_bits(adev->uvd.inst->gpu_addr)); 592 WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, 593 upper_32_bits(adev->uvd.inst->gpu_addr)); 594 595 offset = AMDGPU_UVD_FIRMWARE_OFFSET; 596 size = AMDGPU_UVD_FIRMWARE_SIZE(adev); 597 WREG32(mmUVD_VCPU_CACHE_OFFSET0, offset >> 3); 598 WREG32(mmUVD_VCPU_CACHE_SIZE0, size); 599 600 offset += size; 601 size = AMDGPU_UVD_HEAP_SIZE; 602 WREG32(mmUVD_VCPU_CACHE_OFFSET1, offset >> 3); 603 WREG32(mmUVD_VCPU_CACHE_SIZE1, size); 604 605 offset += size; 606 size = AMDGPU_UVD_STACK_SIZE + 607 (AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles); 608 WREG32(mmUVD_VCPU_CACHE_OFFSET2, offset >> 3); 609 WREG32(mmUVD_VCPU_CACHE_SIZE2, size); 610 611 WREG32(mmUVD_UDEC_ADDR_CONFIG, adev->gfx.config.gb_addr_config); 612 WREG32(mmUVD_UDEC_DB_ADDR_CONFIG, adev->gfx.config.gb_addr_config); 613 WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG, adev->gfx.config.gb_addr_config); 614 615 WREG32(mmUVD_GP_SCRATCH4, adev->uvd.max_handles); 616} 617 618#if 0 619static void cz_set_uvd_clock_gating_branches(struct amdgpu_device *adev, 620 bool enable) 621{ 622 u32 data, data1; 623 624 data = RREG32(mmUVD_CGC_GATE); 625 data1 = RREG32(mmUVD_SUVD_CGC_GATE); 626 if (enable) { 627 data |= UVD_CGC_GATE__SYS_MASK | 628 UVD_CGC_GATE__UDEC_MASK | 629 UVD_CGC_GATE__MPEG2_MASK | 630 UVD_CGC_GATE__RBC_MASK | 631 UVD_CGC_GATE__LMI_MC_MASK | 632 UVD_CGC_GATE__IDCT_MASK | 633 UVD_CGC_GATE__MPRD_MASK | 634 UVD_CGC_GATE__MPC_MASK | 635 UVD_CGC_GATE__LBSI_MASK | 636 UVD_CGC_GATE__LRBBM_MASK | 637 UVD_CGC_GATE__UDEC_RE_MASK | 638 UVD_CGC_GATE__UDEC_CM_MASK | 639 UVD_CGC_GATE__UDEC_IT_MASK | 640 UVD_CGC_GATE__UDEC_DB_MASK | 641 UVD_CGC_GATE__UDEC_MP_MASK | 642 UVD_CGC_GATE__WCB_MASK | 643 UVD_CGC_GATE__VCPU_MASK | 644 UVD_CGC_GATE__SCPU_MASK; 645 data1 |= UVD_SUVD_CGC_GATE__SRE_MASK | 646 UVD_SUVD_CGC_GATE__SIT_MASK | 647 UVD_SUVD_CGC_GATE__SMP_MASK | 648 UVD_SUVD_CGC_GATE__SCM_MASK | 649 UVD_SUVD_CGC_GATE__SDB_MASK | 650 UVD_SUVD_CGC_GATE__SRE_H264_MASK | 651 UVD_SUVD_CGC_GATE__SRE_HEVC_MASK | 652 UVD_SUVD_CGC_GATE__SIT_H264_MASK | 653 UVD_SUVD_CGC_GATE__SIT_HEVC_MASK | 654 UVD_SUVD_CGC_GATE__SCM_H264_MASK | 655 UVD_SUVD_CGC_GATE__SCM_HEVC_MASK | 656 UVD_SUVD_CGC_GATE__SDB_H264_MASK | 657 UVD_SUVD_CGC_GATE__SDB_HEVC_MASK; 658 } else { 659 data &= ~(UVD_CGC_GATE__SYS_MASK | 660 UVD_CGC_GATE__UDEC_MASK | 661 UVD_CGC_GATE__MPEG2_MASK | 662 UVD_CGC_GATE__RBC_MASK | 663 UVD_CGC_GATE__LMI_MC_MASK | 664 UVD_CGC_GATE__LMI_UMC_MASK | 665 UVD_CGC_GATE__IDCT_MASK | 666 UVD_CGC_GATE__MPRD_MASK | 667 UVD_CGC_GATE__MPC_MASK | 668 UVD_CGC_GATE__LBSI_MASK | 669 UVD_CGC_GATE__LRBBM_MASK | 670 UVD_CGC_GATE__UDEC_RE_MASK | 671 UVD_CGC_GATE__UDEC_CM_MASK | 672 UVD_CGC_GATE__UDEC_IT_MASK | 673 UVD_CGC_GATE__UDEC_DB_MASK | 674 UVD_CGC_GATE__UDEC_MP_MASK | 675 UVD_CGC_GATE__WCB_MASK | 676 UVD_CGC_GATE__VCPU_MASK | 677 UVD_CGC_GATE__SCPU_MASK); 678 data1 &= ~(UVD_SUVD_CGC_GATE__SRE_MASK | 679 UVD_SUVD_CGC_GATE__SIT_MASK | 680 UVD_SUVD_CGC_GATE__SMP_MASK | 681 UVD_SUVD_CGC_GATE__SCM_MASK | 682 UVD_SUVD_CGC_GATE__SDB_MASK | 683 UVD_SUVD_CGC_GATE__SRE_H264_MASK | 684 UVD_SUVD_CGC_GATE__SRE_HEVC_MASK | 685 UVD_SUVD_CGC_GATE__SIT_H264_MASK | 686 UVD_SUVD_CGC_GATE__SIT_HEVC_MASK | 687 UVD_SUVD_CGC_GATE__SCM_H264_MASK | 688 UVD_SUVD_CGC_GATE__SCM_HEVC_MASK | 689 UVD_SUVD_CGC_GATE__SDB_H264_MASK | 690 UVD_SUVD_CGC_GATE__SDB_HEVC_MASK); 691 } 692 WREG32(mmUVD_CGC_GATE, data); 693 WREG32(mmUVD_SUVD_CGC_GATE, data1); 694} 695#endif 696 697/** 698 * uvd_v6_0_start - start UVD block 699 * 700 * @adev: amdgpu_device pointer 701 * 702 * Setup and start the UVD block 703 */ 704static int uvd_v6_0_start(struct amdgpu_device *adev) 705{ 706 struct amdgpu_ring *ring = &adev->uvd.inst->ring; 707 uint32_t rb_bufsz, tmp; 708 uint32_t lmi_swap_cntl; 709 uint32_t mp_swap_cntl; 710 int i, j, r; 711 712 /* disable DPG */ 713 WREG32_P(mmUVD_POWER_STATUS, 0, ~UVD_POWER_STATUS__UVD_PG_MODE_MASK); 714 715 /* disable byte swapping */ 716 lmi_swap_cntl = 0; 717 mp_swap_cntl = 0; 718 719 uvd_v6_0_mc_resume(adev); 720 721 /* disable interupt */ 722 WREG32_FIELD(UVD_MASTINT_EN, VCPU_EN, 0); 723 724 /* stall UMC and register bus before resetting VCPU */ 725 WREG32_FIELD(UVD_LMI_CTRL2, STALL_ARB_UMC, 1); 726 mdelay(1); 727 728 /* put LMI, VCPU, RBC etc... into reset */ 729 WREG32(mmUVD_SOFT_RESET, 730 UVD_SOFT_RESET__LMI_SOFT_RESET_MASK | 731 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK | 732 UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK | 733 UVD_SOFT_RESET__RBC_SOFT_RESET_MASK | 734 UVD_SOFT_RESET__CSM_SOFT_RESET_MASK | 735 UVD_SOFT_RESET__CXW_SOFT_RESET_MASK | 736 UVD_SOFT_RESET__TAP_SOFT_RESET_MASK | 737 UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK); 738 mdelay(5); 739 740 /* take UVD block out of reset */ 741 WREG32_FIELD(SRBM_SOFT_RESET, SOFT_RESET_UVD, 0); 742 mdelay(5); 743 744 /* initialize UVD memory controller */ 745 WREG32(mmUVD_LMI_CTRL, 746 (0x40 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) | 747 UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK | 748 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK | 749 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK | 750 UVD_LMI_CTRL__REQ_MODE_MASK | 751 UVD_LMI_CTRL__DISABLE_ON_FWV_FAIL_MASK); 752 753#ifdef __BIG_ENDIAN 754 /* swap (8 in 32) RB and IB */ 755 lmi_swap_cntl = 0xa; 756 mp_swap_cntl = 0; 757#endif 758 WREG32(mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl); 759 WREG32(mmUVD_MP_SWAP_CNTL, mp_swap_cntl); 760 761 WREG32(mmUVD_MPC_SET_MUXA0, 0x40c2040); 762 WREG32(mmUVD_MPC_SET_MUXA1, 0x0); 763 WREG32(mmUVD_MPC_SET_MUXB0, 0x40c2040); 764 WREG32(mmUVD_MPC_SET_MUXB1, 0x0); 765 WREG32(mmUVD_MPC_SET_ALU, 0); 766 WREG32(mmUVD_MPC_SET_MUX, 0x88); 767 768 /* take all subblocks out of reset, except VCPU */ 769 WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); 770 mdelay(5); 771 772 /* enable VCPU clock */ 773 WREG32(mmUVD_VCPU_CNTL, UVD_VCPU_CNTL__CLK_EN_MASK); 774 775 /* enable UMC */ 776 WREG32_FIELD(UVD_LMI_CTRL2, STALL_ARB_UMC, 0); 777 778 /* boot up the VCPU */ 779 WREG32(mmUVD_SOFT_RESET, 0); 780 mdelay(10); 781 782 for (i = 0; i < 10; ++i) { 783 uint32_t status; 784 785 for (j = 0; j < 100; ++j) { 786 status = RREG32(mmUVD_STATUS); 787 if (status & 2) 788 break; 789 mdelay(10); 790 } 791 r = 0; 792 if (status & 2) 793 break; 794 795 DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n"); 796 WREG32_FIELD(UVD_SOFT_RESET, VCPU_SOFT_RESET, 1); 797 mdelay(10); 798 WREG32_FIELD(UVD_SOFT_RESET, VCPU_SOFT_RESET, 0); 799 mdelay(10); 800 r = -1; 801 } 802 803 if (r) { 804 DRM_ERROR("UVD not responding, giving up!!!\n"); 805 return r; 806 } 807 /* enable master interrupt */ 808 WREG32_P(mmUVD_MASTINT_EN, 809 (UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK), 810 ~(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK)); 811 812 /* clear the bit 4 of UVD_STATUS */ 813 WREG32_P(mmUVD_STATUS, 0, ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT)); 814 815 /* force RBC into idle state */ 816 rb_bufsz = order_base_2(ring->ring_size); 817 tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz); 818 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1); 819 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1); 820 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_WPTR_POLL_EN, 0); 821 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1); 822 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1); 823 WREG32(mmUVD_RBC_RB_CNTL, tmp); 824 825 /* set the write pointer delay */ 826 WREG32(mmUVD_RBC_RB_WPTR_CNTL, 0); 827 828 /* set the wb address */ 829 WREG32(mmUVD_RBC_RB_RPTR_ADDR, (upper_32_bits(ring->gpu_addr) >> 2)); 830 831 /* programm the RB_BASE for ring buffer */ 832 WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_LOW, 833 lower_32_bits(ring->gpu_addr)); 834 WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH, 835 upper_32_bits(ring->gpu_addr)); 836 837 /* Initialize the ring buffer's read and write pointers */ 838 WREG32(mmUVD_RBC_RB_RPTR, 0); 839 840 ring->wptr = RREG32(mmUVD_RBC_RB_RPTR); 841 WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr)); 842 843 WREG32_FIELD(UVD_RBC_RB_CNTL, RB_NO_FETCH, 0); 844 845 if (uvd_v6_0_enc_support(adev)) { 846 ring = &adev->uvd.inst->ring_enc[0]; 847 WREG32(mmUVD_RB_RPTR, lower_32_bits(ring->wptr)); 848 WREG32(mmUVD_RB_WPTR, lower_32_bits(ring->wptr)); 849 WREG32(mmUVD_RB_BASE_LO, ring->gpu_addr); 850 WREG32(mmUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr)); 851 WREG32(mmUVD_RB_SIZE, ring->ring_size / 4); 852 853 ring = &adev->uvd.inst->ring_enc[1]; 854 WREG32(mmUVD_RB_RPTR2, lower_32_bits(ring->wptr)); 855 WREG32(mmUVD_RB_WPTR2, lower_32_bits(ring->wptr)); 856 WREG32(mmUVD_RB_BASE_LO2, ring->gpu_addr); 857 WREG32(mmUVD_RB_BASE_HI2, upper_32_bits(ring->gpu_addr)); 858 WREG32(mmUVD_RB_SIZE2, ring->ring_size / 4); 859 } 860 861 return 0; 862} 863 864/** 865 * uvd_v6_0_stop - stop UVD block 866 * 867 * @adev: amdgpu_device pointer 868 * 869 * stop the UVD block 870 */ 871static void uvd_v6_0_stop(struct amdgpu_device *adev) 872{ 873 /* force RBC into idle state */ 874 WREG32(mmUVD_RBC_RB_CNTL, 0x11010101); 875 876 /* Stall UMC and register bus before resetting VCPU */ 877 WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8)); 878 mdelay(1); 879 880 /* put VCPU into reset */ 881 WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK); 882 mdelay(5); 883 884 /* disable VCPU clock */ 885 WREG32(mmUVD_VCPU_CNTL, 0x0); 886 887 /* Unstall UMC and register bus */ 888 WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8)); 889 890 WREG32(mmUVD_STATUS, 0); 891} 892 893/** 894 * uvd_v6_0_ring_emit_fence - emit an fence & trap command 895 * 896 * @ring: amdgpu_ring pointer 897 * @fence: fence to emit 898 * 899 * Write a fence and a trap command to the ring. 900 */ 901static void uvd_v6_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, 902 unsigned flags) 903{ 904 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT); 905 906 amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0)); 907 amdgpu_ring_write(ring, seq); 908 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0)); 909 amdgpu_ring_write(ring, addr & 0xffffffff); 910 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0)); 911 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff); 912 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0)); 913 amdgpu_ring_write(ring, 0); 914 915 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0)); 916 amdgpu_ring_write(ring, 0); 917 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0)); 918 amdgpu_ring_write(ring, 0); 919 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0)); 920 amdgpu_ring_write(ring, 2); 921} 922 923/** 924 * uvd_v6_0_enc_ring_emit_fence - emit an enc fence & trap command 925 * 926 * @ring: amdgpu_ring pointer 927 * @fence: fence to emit 928 * 929 * Write enc a fence and a trap command to the ring. 930 */ 931static void uvd_v6_0_enc_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, 932 u64 seq, unsigned flags) 933{ 934 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT); 935 936 amdgpu_ring_write(ring, HEVC_ENC_CMD_FENCE); 937 amdgpu_ring_write(ring, addr); 938 amdgpu_ring_write(ring, upper_32_bits(addr)); 939 amdgpu_ring_write(ring, seq); 940 amdgpu_ring_write(ring, HEVC_ENC_CMD_TRAP); 941} 942 943/** 944 * uvd_v6_0_ring_emit_hdp_flush - skip HDP flushing 945 * 946 * @ring: amdgpu_ring pointer 947 */ 948static void uvd_v6_0_ring_emit_hdp_flush(struct amdgpu_ring *ring) 949{ 950 /* The firmware doesn't seem to like touching registers at this point. */ 951} 952 953/** 954 * uvd_v6_0_ring_test_ring - register write test 955 * 956 * @ring: amdgpu_ring pointer 957 * 958 * Test if we can successfully write to the context register 959 */ 960static int uvd_v6_0_ring_test_ring(struct amdgpu_ring *ring) 961{ 962 struct amdgpu_device *adev = ring->adev; 963 uint32_t tmp = 0; 964 unsigned i; 965 int r; 966 967 WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD); 968 r = amdgpu_ring_alloc(ring, 3); 969 if (r) 970 return r; 971 972 amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0)); 973 amdgpu_ring_write(ring, 0xDEADBEEF); 974 amdgpu_ring_commit(ring); 975 for (i = 0; i < adev->usec_timeout; i++) { 976 tmp = RREG32(mmUVD_CONTEXT_ID); 977 if (tmp == 0xDEADBEEF) 978 break; 979 udelay(1); 980 } 981 982 if (i >= adev->usec_timeout) 983 r = -ETIMEDOUT; 984 985 return r; 986} 987 988/** 989 * uvd_v6_0_ring_emit_ib - execute indirect buffer 990 * 991 * @ring: amdgpu_ring pointer 992 * @ib: indirect buffer to execute 993 * 994 * Write ring commands to execute the indirect buffer 995 */ 996static void uvd_v6_0_ring_emit_ib(struct amdgpu_ring *ring, 997 struct amdgpu_job *job, 998 struct amdgpu_ib *ib, 999 uint32_t flags) 1000{ 1001 unsigned vmid = AMDGPU_JOB_GET_VMID(job); 1002 1003 amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_VMID, 0)); 1004 amdgpu_ring_write(ring, vmid); 1005 1006 amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW, 0)); 1007 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr)); 1008 amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH, 0)); 1009 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); 1010 amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_SIZE, 0)); 1011 amdgpu_ring_write(ring, ib->length_dw); 1012} 1013 1014/** 1015 * uvd_v6_0_enc_ring_emit_ib - enc execute indirect buffer 1016 * 1017 * @ring: amdgpu_ring pointer 1018 * @ib: indirect buffer to execute 1019 * 1020 * Write enc ring commands to execute the indirect buffer 1021 */ 1022static void uvd_v6_0_enc_ring_emit_ib(struct amdgpu_ring *ring, 1023 struct amdgpu_job *job, 1024 struct amdgpu_ib *ib, 1025 uint32_t flags) 1026{ 1027 unsigned vmid = AMDGPU_JOB_GET_VMID(job); 1028 1029 amdgpu_ring_write(ring, HEVC_ENC_CMD_IB_VM); 1030 amdgpu_ring_write(ring, vmid); 1031 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr)); 1032 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr)); 1033 amdgpu_ring_write(ring, ib->length_dw); 1034} 1035 1036static void uvd_v6_0_ring_emit_wreg(struct amdgpu_ring *ring, 1037 uint32_t reg, uint32_t val) 1038{ 1039 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0)); 1040 amdgpu_ring_write(ring, reg << 2); 1041 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0)); 1042 amdgpu_ring_write(ring, val); 1043 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0)); 1044 amdgpu_ring_write(ring, 0x8); 1045} 1046 1047static void uvd_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring, 1048 unsigned vmid, uint64_t pd_addr) 1049{ 1050 amdgpu_gmc_emit_flush_gpu_tlb(ring, vmid, pd_addr); 1051 1052 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0)); 1053 amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2); 1054 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0)); 1055 amdgpu_ring_write(ring, 0); 1056 amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH8, 0)); 1057 amdgpu_ring_write(ring, 1 << vmid); /* mask */ 1058 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0)); 1059 amdgpu_ring_write(ring, 0xC); 1060} 1061 1062static void uvd_v6_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring) 1063{ 1064 uint32_t seq = ring->fence_drv.sync_seq; 1065 uint64_t addr = ring->fence_drv.gpu_addr; 1066 1067 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0)); 1068 amdgpu_ring_write(ring, lower_32_bits(addr)); 1069 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0)); 1070 amdgpu_ring_write(ring, upper_32_bits(addr)); 1071 amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH8, 0)); 1072 amdgpu_ring_write(ring, 0xffffffff); /* mask */ 1073 amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH9, 0)); 1074 amdgpu_ring_write(ring, seq); 1075 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0)); 1076 amdgpu_ring_write(ring, 0xE); 1077} 1078 1079static void uvd_v6_0_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count) 1080{ 1081 int i; 1082 1083 WARN_ON(ring->wptr % 2 || count % 2); 1084 1085 for (i = 0; i < count / 2; i++) { 1086 amdgpu_ring_write(ring, PACKET0(mmUVD_NO_OP, 0)); 1087 amdgpu_ring_write(ring, 0); 1088 } 1089} 1090 1091static void uvd_v6_0_enc_ring_emit_pipeline_sync(struct amdgpu_ring *ring) 1092{ 1093 uint32_t seq = ring->fence_drv.sync_seq; 1094 uint64_t addr = ring->fence_drv.gpu_addr; 1095 1096 amdgpu_ring_write(ring, HEVC_ENC_CMD_WAIT_GE); 1097 amdgpu_ring_write(ring, lower_32_bits(addr)); 1098 amdgpu_ring_write(ring, upper_32_bits(addr)); 1099 amdgpu_ring_write(ring, seq); 1100} 1101 1102static void uvd_v6_0_enc_ring_insert_end(struct amdgpu_ring *ring) 1103{ 1104 amdgpu_ring_write(ring, HEVC_ENC_CMD_END); 1105} 1106 1107static void uvd_v6_0_enc_ring_emit_vm_flush(struct amdgpu_ring *ring, 1108 unsigned int vmid, uint64_t pd_addr) 1109{ 1110 amdgpu_ring_write(ring, HEVC_ENC_CMD_UPDATE_PTB); 1111 amdgpu_ring_write(ring, vmid); 1112 amdgpu_ring_write(ring, pd_addr >> 12); 1113 1114 amdgpu_ring_write(ring, HEVC_ENC_CMD_FLUSH_TLB); 1115 amdgpu_ring_write(ring, vmid); 1116} 1117 1118static bool uvd_v6_0_is_idle(void *handle) 1119{ 1120 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1121 1122 return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK); 1123} 1124 1125static int uvd_v6_0_wait_for_idle(void *handle) 1126{ 1127 unsigned i; 1128 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1129 1130 for (i = 0; i < adev->usec_timeout; i++) { 1131 if (uvd_v6_0_is_idle(handle)) 1132 return 0; 1133 } 1134 return -ETIMEDOUT; 1135} 1136 1137#define AMDGPU_UVD_STATUS_BUSY_MASK 0xfd 1138static bool uvd_v6_0_check_soft_reset(void *handle) 1139{ 1140 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1141 u32 srbm_soft_reset = 0; 1142 u32 tmp = RREG32(mmSRBM_STATUS); 1143 1144 if (REG_GET_FIELD(tmp, SRBM_STATUS, UVD_RQ_PENDING) || 1145 REG_GET_FIELD(tmp, SRBM_STATUS, UVD_BUSY) || 1146 (RREG32(mmUVD_STATUS) & AMDGPU_UVD_STATUS_BUSY_MASK)) 1147 srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_UVD, 1); 1148 1149 if (srbm_soft_reset) { 1150 adev->uvd.inst->srbm_soft_reset = srbm_soft_reset; 1151 return true; 1152 } else { 1153 adev->uvd.inst->srbm_soft_reset = 0; 1154 return false; 1155 } 1156} 1157 1158static int uvd_v6_0_pre_soft_reset(void *handle) 1159{ 1160 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1161 1162 if (!adev->uvd.inst->srbm_soft_reset) 1163 return 0; 1164 1165 uvd_v6_0_stop(adev); 1166 return 0; 1167} 1168 1169static int uvd_v6_0_soft_reset(void *handle) 1170{ 1171 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1172 u32 srbm_soft_reset; 1173 1174 if (!adev->uvd.inst->srbm_soft_reset) 1175 return 0; 1176 srbm_soft_reset = adev->uvd.inst->srbm_soft_reset; 1177 1178 if (srbm_soft_reset) { 1179 u32 tmp; 1180 1181 tmp = RREG32(mmSRBM_SOFT_RESET); 1182 tmp |= srbm_soft_reset; 1183 dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp); 1184 WREG32(mmSRBM_SOFT_RESET, tmp); 1185 tmp = RREG32(mmSRBM_SOFT_RESET); 1186 1187 udelay(50); 1188 1189 tmp &= ~srbm_soft_reset; 1190 WREG32(mmSRBM_SOFT_RESET, tmp); 1191 tmp = RREG32(mmSRBM_SOFT_RESET); 1192 1193 /* Wait a little for things to settle down */ 1194 udelay(50); 1195 } 1196 1197 return 0; 1198} 1199 1200static int uvd_v6_0_post_soft_reset(void *handle) 1201{ 1202 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1203 1204 if (!adev->uvd.inst->srbm_soft_reset) 1205 return 0; 1206 1207 mdelay(5); 1208 1209 return uvd_v6_0_start(adev); 1210} 1211 1212static int uvd_v6_0_set_interrupt_state(struct amdgpu_device *adev, 1213 struct amdgpu_irq_src *source, 1214 unsigned type, 1215 enum amdgpu_interrupt_state state) 1216{ 1217 // TODO 1218 return 0; 1219} 1220 1221static int uvd_v6_0_process_interrupt(struct amdgpu_device *adev, 1222 struct amdgpu_irq_src *source, 1223 struct amdgpu_iv_entry *entry) 1224{ 1225 bool int_handled = true; 1226 DRM_DEBUG("IH: UVD TRAP\n"); 1227 1228 switch (entry->src_id) { 1229 case 124: 1230 amdgpu_fence_process(&adev->uvd.inst->ring); 1231 break; 1232 case 119: 1233 if (likely(uvd_v6_0_enc_support(adev))) 1234 amdgpu_fence_process(&adev->uvd.inst->ring_enc[0]); 1235 else 1236 int_handled = false; 1237 break; 1238 case 120: 1239 if (likely(uvd_v6_0_enc_support(adev))) 1240 amdgpu_fence_process(&adev->uvd.inst->ring_enc[1]); 1241 else 1242 int_handled = false; 1243 break; 1244 } 1245 1246 if (false == int_handled) 1247 DRM_ERROR("Unhandled interrupt: %d %d\n", 1248 entry->src_id, entry->src_data[0]); 1249 1250 return 0; 1251} 1252 1253static void uvd_v6_0_enable_clock_gating(struct amdgpu_device *adev, bool enable) 1254{ 1255 uint32_t data1, data3; 1256 1257 data1 = RREG32(mmUVD_SUVD_CGC_GATE); 1258 data3 = RREG32(mmUVD_CGC_GATE); 1259 1260 data1 |= UVD_SUVD_CGC_GATE__SRE_MASK | 1261 UVD_SUVD_CGC_GATE__SIT_MASK | 1262 UVD_SUVD_CGC_GATE__SMP_MASK | 1263 UVD_SUVD_CGC_GATE__SCM_MASK | 1264 UVD_SUVD_CGC_GATE__SDB_MASK | 1265 UVD_SUVD_CGC_GATE__SRE_H264_MASK | 1266 UVD_SUVD_CGC_GATE__SRE_HEVC_MASK | 1267 UVD_SUVD_CGC_GATE__SIT_H264_MASK | 1268 UVD_SUVD_CGC_GATE__SIT_HEVC_MASK | 1269 UVD_SUVD_CGC_GATE__SCM_H264_MASK | 1270 UVD_SUVD_CGC_GATE__SCM_HEVC_MASK | 1271 UVD_SUVD_CGC_GATE__SDB_H264_MASK | 1272 UVD_SUVD_CGC_GATE__SDB_HEVC_MASK; 1273 1274 if (enable) { 1275 data3 |= (UVD_CGC_GATE__SYS_MASK | 1276 UVD_CGC_GATE__UDEC_MASK | 1277 UVD_CGC_GATE__MPEG2_MASK | 1278 UVD_CGC_GATE__RBC_MASK | 1279 UVD_CGC_GATE__LMI_MC_MASK | 1280 UVD_CGC_GATE__LMI_UMC_MASK | 1281 UVD_CGC_GATE__IDCT_MASK | 1282 UVD_CGC_GATE__MPRD_MASK | 1283 UVD_CGC_GATE__MPC_MASK | 1284 UVD_CGC_GATE__LBSI_MASK | 1285 UVD_CGC_GATE__LRBBM_MASK | 1286 UVD_CGC_GATE__UDEC_RE_MASK | 1287 UVD_CGC_GATE__UDEC_CM_MASK | 1288 UVD_CGC_GATE__UDEC_IT_MASK | 1289 UVD_CGC_GATE__UDEC_DB_MASK | 1290 UVD_CGC_GATE__UDEC_MP_MASK | 1291 UVD_CGC_GATE__WCB_MASK | 1292 UVD_CGC_GATE__JPEG_MASK | 1293 UVD_CGC_GATE__SCPU_MASK | 1294 UVD_CGC_GATE__JPEG2_MASK); 1295 /* only in pg enabled, we can gate clock to vcpu*/ 1296 if (adev->pg_flags & AMD_PG_SUPPORT_UVD) 1297 data3 |= UVD_CGC_GATE__VCPU_MASK; 1298 1299 data3 &= ~UVD_CGC_GATE__REGS_MASK; 1300 } else { 1301 data3 = 0; 1302 } 1303 1304 WREG32(mmUVD_SUVD_CGC_GATE, data1); 1305 WREG32(mmUVD_CGC_GATE, data3); 1306} 1307 1308static void uvd_v6_0_set_sw_clock_gating(struct amdgpu_device *adev) 1309{ 1310 uint32_t data, data2; 1311 1312 data = RREG32(mmUVD_CGC_CTRL); 1313 data2 = RREG32(mmUVD_SUVD_CGC_CTRL); 1314 1315 1316 data &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK | 1317 UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK); 1318 1319 1320 data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK | 1321 (1 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_GATE_DLY_TIMER)) | 1322 (4 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_OFF_DELAY)); 1323 1324 data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK | 1325 UVD_CGC_CTRL__UDEC_CM_MODE_MASK | 1326 UVD_CGC_CTRL__UDEC_IT_MODE_MASK | 1327 UVD_CGC_CTRL__UDEC_DB_MODE_MASK | 1328 UVD_CGC_CTRL__UDEC_MP_MODE_MASK | 1329 UVD_CGC_CTRL__SYS_MODE_MASK | 1330 UVD_CGC_CTRL__UDEC_MODE_MASK | 1331 UVD_CGC_CTRL__MPEG2_MODE_MASK | 1332 UVD_CGC_CTRL__REGS_MODE_MASK | 1333 UVD_CGC_CTRL__RBC_MODE_MASK | 1334 UVD_CGC_CTRL__LMI_MC_MODE_MASK | 1335 UVD_CGC_CTRL__LMI_UMC_MODE_MASK | 1336 UVD_CGC_CTRL__IDCT_MODE_MASK | 1337 UVD_CGC_CTRL__MPRD_MODE_MASK | 1338 UVD_CGC_CTRL__MPC_MODE_MASK | 1339 UVD_CGC_CTRL__LBSI_MODE_MASK | 1340 UVD_CGC_CTRL__LRBBM_MODE_MASK | 1341 UVD_CGC_CTRL__WCB_MODE_MASK | 1342 UVD_CGC_CTRL__VCPU_MODE_MASK | 1343 UVD_CGC_CTRL__JPEG_MODE_MASK | 1344 UVD_CGC_CTRL__SCPU_MODE_MASK | 1345 UVD_CGC_CTRL__JPEG2_MODE_MASK); 1346 data2 &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK | 1347 UVD_SUVD_CGC_CTRL__SIT_MODE_MASK | 1348 UVD_SUVD_CGC_CTRL__SMP_MODE_MASK | 1349 UVD_SUVD_CGC_CTRL__SCM_MODE_MASK | 1350 UVD_SUVD_CGC_CTRL__SDB_MODE_MASK); 1351 1352 WREG32(mmUVD_CGC_CTRL, data); 1353 WREG32(mmUVD_SUVD_CGC_CTRL, data2); 1354} 1355 1356#if 0 1357static void uvd_v6_0_set_hw_clock_gating(struct amdgpu_device *adev) 1358{ 1359 uint32_t data, data1, cgc_flags, suvd_flags; 1360 1361 data = RREG32(mmUVD_CGC_GATE); 1362 data1 = RREG32(mmUVD_SUVD_CGC_GATE); 1363 1364 cgc_flags = UVD_CGC_GATE__SYS_MASK | 1365 UVD_CGC_GATE__UDEC_MASK | 1366 UVD_CGC_GATE__MPEG2_MASK | 1367 UVD_CGC_GATE__RBC_MASK | 1368 UVD_CGC_GATE__LMI_MC_MASK | 1369 UVD_CGC_GATE__IDCT_MASK | 1370 UVD_CGC_GATE__MPRD_MASK | 1371 UVD_CGC_GATE__MPC_MASK | 1372 UVD_CGC_GATE__LBSI_MASK | 1373 UVD_CGC_GATE__LRBBM_MASK | 1374 UVD_CGC_GATE__UDEC_RE_MASK | 1375 UVD_CGC_GATE__UDEC_CM_MASK | 1376 UVD_CGC_GATE__UDEC_IT_MASK | 1377 UVD_CGC_GATE__UDEC_DB_MASK | 1378 UVD_CGC_GATE__UDEC_MP_MASK | 1379 UVD_CGC_GATE__WCB_MASK | 1380 UVD_CGC_GATE__VCPU_MASK | 1381 UVD_CGC_GATE__SCPU_MASK | 1382 UVD_CGC_GATE__JPEG_MASK | 1383 UVD_CGC_GATE__JPEG2_MASK; 1384 1385 suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK | 1386 UVD_SUVD_CGC_GATE__SIT_MASK | 1387 UVD_SUVD_CGC_GATE__SMP_MASK | 1388 UVD_SUVD_CGC_GATE__SCM_MASK | 1389 UVD_SUVD_CGC_GATE__SDB_MASK; 1390 1391 data |= cgc_flags; 1392 data1 |= suvd_flags; 1393 1394 WREG32(mmUVD_CGC_GATE, data); 1395 WREG32(mmUVD_SUVD_CGC_GATE, data1); 1396} 1397#endif 1398 1399static void uvd_v6_0_enable_mgcg(struct amdgpu_device *adev, 1400 bool enable) 1401{ 1402 u32 orig, data; 1403 1404 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) { 1405 data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL); 1406 data |= 0xfff; 1407 WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data); 1408 1409 orig = data = RREG32(mmUVD_CGC_CTRL); 1410 data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK; 1411 if (orig != data) 1412 WREG32(mmUVD_CGC_CTRL, data); 1413 } else { 1414 data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL); 1415 data &= ~0xfff; 1416 WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data); 1417 1418 orig = data = RREG32(mmUVD_CGC_CTRL); 1419 data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK; 1420 if (orig != data) 1421 WREG32(mmUVD_CGC_CTRL, data); 1422 } 1423} 1424 1425static int uvd_v6_0_set_clockgating_state(void *handle, 1426 enum amd_clockgating_state state) 1427{ 1428 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1429 bool enable = (state == AMD_CG_STATE_GATE); 1430 1431 if (enable) { 1432 /* wait for STATUS to clear */ 1433 if (uvd_v6_0_wait_for_idle(handle)) 1434 return -EBUSY; 1435 uvd_v6_0_enable_clock_gating(adev, true); 1436 /* enable HW gates because UVD is idle */ 1437/* uvd_v6_0_set_hw_clock_gating(adev); */ 1438 } else { 1439 /* disable HW gating and enable Sw gating */ 1440 uvd_v6_0_enable_clock_gating(adev, false); 1441 } 1442 uvd_v6_0_set_sw_clock_gating(adev); 1443 return 0; 1444} 1445 1446static int uvd_v6_0_set_powergating_state(void *handle, 1447 enum amd_powergating_state state) 1448{ 1449 /* This doesn't actually powergate the UVD block. 1450 * That's done in the dpm code via the SMC. This 1451 * just re-inits the block as necessary. The actual 1452 * gating still happens in the dpm code. We should 1453 * revisit this when there is a cleaner line between 1454 * the smc and the hw blocks 1455 */ 1456 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1457 int ret = 0; 1458 1459 WREG32(mmUVD_POWER_STATUS, UVD_POWER_STATUS__UVD_PG_EN_MASK); 1460 1461 if (state == AMD_PG_STATE_GATE) { 1462 uvd_v6_0_stop(adev); 1463 } else { 1464 ret = uvd_v6_0_start(adev); 1465 if (ret) 1466 goto out; 1467 } 1468 1469out: 1470 return ret; 1471} 1472 1473static void uvd_v6_0_get_clockgating_state(void *handle, u32 *flags) 1474{ 1475 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1476 int data; 1477 1478 mutex_lock(&adev->pm.mutex); 1479 1480 if (adev->flags & AMD_IS_APU) 1481 data = RREG32_SMC(ixCURRENT_PG_STATUS_APU); 1482 else 1483 data = RREG32_SMC(ixCURRENT_PG_STATUS); 1484 1485 if (data & CURRENT_PG_STATUS__UVD_PG_STATUS_MASK) { 1486 DRM_INFO("Cannot get clockgating state when UVD is powergated.\n"); 1487 goto out; 1488 } 1489 1490 /* AMD_CG_SUPPORT_UVD_MGCG */ 1491 data = RREG32(mmUVD_CGC_CTRL); 1492 if (data & UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK) 1493 *flags |= AMD_CG_SUPPORT_UVD_MGCG; 1494 1495out: 1496 mutex_unlock(&adev->pm.mutex); 1497} 1498 1499static const struct amd_ip_funcs uvd_v6_0_ip_funcs = { 1500 .name = "uvd_v6_0", 1501 .early_init = uvd_v6_0_early_init, 1502 .late_init = NULL, 1503 .sw_init = uvd_v6_0_sw_init, 1504 .sw_fini = uvd_v6_0_sw_fini, 1505 .hw_init = uvd_v6_0_hw_init, 1506 .hw_fini = uvd_v6_0_hw_fini, 1507 .suspend = uvd_v6_0_suspend, 1508 .resume = uvd_v6_0_resume, 1509 .is_idle = uvd_v6_0_is_idle, 1510 .wait_for_idle = uvd_v6_0_wait_for_idle, 1511 .check_soft_reset = uvd_v6_0_check_soft_reset, 1512 .pre_soft_reset = uvd_v6_0_pre_soft_reset, 1513 .soft_reset = uvd_v6_0_soft_reset, 1514 .post_soft_reset = uvd_v6_0_post_soft_reset, 1515 .set_clockgating_state = uvd_v6_0_set_clockgating_state, 1516 .set_powergating_state = uvd_v6_0_set_powergating_state, 1517 .get_clockgating_state = uvd_v6_0_get_clockgating_state, 1518}; 1519 1520static const struct amdgpu_ring_funcs uvd_v6_0_ring_phys_funcs = { 1521 .type = AMDGPU_RING_TYPE_UVD, 1522 .align_mask = 0xf, 1523 .support_64bit_ptrs = false, 1524 .no_user_fence = true, 1525 .get_rptr = uvd_v6_0_ring_get_rptr, 1526 .get_wptr = uvd_v6_0_ring_get_wptr, 1527 .set_wptr = uvd_v6_0_ring_set_wptr, 1528 .parse_cs = amdgpu_uvd_ring_parse_cs, 1529 .emit_frame_size = 1530 6 + /* hdp invalidate */ 1531 10 + /* uvd_v6_0_ring_emit_pipeline_sync */ 1532 14, /* uvd_v6_0_ring_emit_fence x1 no user fence */ 1533 .emit_ib_size = 8, /* uvd_v6_0_ring_emit_ib */ 1534 .emit_ib = uvd_v6_0_ring_emit_ib, 1535 .emit_fence = uvd_v6_0_ring_emit_fence, 1536 .emit_hdp_flush = uvd_v6_0_ring_emit_hdp_flush, 1537 .test_ring = uvd_v6_0_ring_test_ring, 1538 .test_ib = amdgpu_uvd_ring_test_ib, 1539 .insert_nop = uvd_v6_0_ring_insert_nop, 1540 .pad_ib = amdgpu_ring_generic_pad_ib, 1541 .begin_use = amdgpu_uvd_ring_begin_use, 1542 .end_use = amdgpu_uvd_ring_end_use, 1543 .emit_wreg = uvd_v6_0_ring_emit_wreg, 1544}; 1545 1546static const struct amdgpu_ring_funcs uvd_v6_0_ring_vm_funcs = { 1547 .type = AMDGPU_RING_TYPE_UVD, 1548 .align_mask = 0xf, 1549 .support_64bit_ptrs = false, 1550 .no_user_fence = true, 1551 .get_rptr = uvd_v6_0_ring_get_rptr, 1552 .get_wptr = uvd_v6_0_ring_get_wptr, 1553 .set_wptr = uvd_v6_0_ring_set_wptr, 1554 .emit_frame_size = 1555 6 + /* hdp invalidate */ 1556 10 + /* uvd_v6_0_ring_emit_pipeline_sync */ 1557 VI_FLUSH_GPU_TLB_NUM_WREG * 6 + 8 + /* uvd_v6_0_ring_emit_vm_flush */ 1558 14 + 14, /* uvd_v6_0_ring_emit_fence x2 vm fence */ 1559 .emit_ib_size = 8, /* uvd_v6_0_ring_emit_ib */ 1560 .emit_ib = uvd_v6_0_ring_emit_ib, 1561 .emit_fence = uvd_v6_0_ring_emit_fence, 1562 .emit_vm_flush = uvd_v6_0_ring_emit_vm_flush, 1563 .emit_pipeline_sync = uvd_v6_0_ring_emit_pipeline_sync, 1564 .emit_hdp_flush = uvd_v6_0_ring_emit_hdp_flush, 1565 .test_ring = uvd_v6_0_ring_test_ring, 1566 .test_ib = amdgpu_uvd_ring_test_ib, 1567 .insert_nop = uvd_v6_0_ring_insert_nop, 1568 .pad_ib = amdgpu_ring_generic_pad_ib, 1569 .begin_use = amdgpu_uvd_ring_begin_use, 1570 .end_use = amdgpu_uvd_ring_end_use, 1571 .emit_wreg = uvd_v6_0_ring_emit_wreg, 1572}; 1573 1574static const struct amdgpu_ring_funcs uvd_v6_0_enc_ring_vm_funcs = { 1575 .type = AMDGPU_RING_TYPE_UVD_ENC, 1576 .align_mask = 0x3f, 1577 .nop = HEVC_ENC_CMD_NO_OP, 1578 .support_64bit_ptrs = false, 1579 .no_user_fence = true, 1580 .get_rptr = uvd_v6_0_enc_ring_get_rptr, 1581 .get_wptr = uvd_v6_0_enc_ring_get_wptr, 1582 .set_wptr = uvd_v6_0_enc_ring_set_wptr, 1583 .emit_frame_size = 1584 4 + /* uvd_v6_0_enc_ring_emit_pipeline_sync */ 1585 5 + /* uvd_v6_0_enc_ring_emit_vm_flush */ 1586 5 + 5 + /* uvd_v6_0_enc_ring_emit_fence x2 vm fence */ 1587 1, /* uvd_v6_0_enc_ring_insert_end */ 1588 .emit_ib_size = 5, /* uvd_v6_0_enc_ring_emit_ib */ 1589 .emit_ib = uvd_v6_0_enc_ring_emit_ib, 1590 .emit_fence = uvd_v6_0_enc_ring_emit_fence, 1591 .emit_vm_flush = uvd_v6_0_enc_ring_emit_vm_flush, 1592 .emit_pipeline_sync = uvd_v6_0_enc_ring_emit_pipeline_sync, 1593 .test_ring = uvd_v6_0_enc_ring_test_ring, 1594 .test_ib = uvd_v6_0_enc_ring_test_ib, 1595 .insert_nop = amdgpu_ring_insert_nop, 1596 .insert_end = uvd_v6_0_enc_ring_insert_end, 1597 .pad_ib = amdgpu_ring_generic_pad_ib, 1598 .begin_use = amdgpu_uvd_ring_begin_use, 1599 .end_use = amdgpu_uvd_ring_end_use, 1600}; 1601 1602static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev) 1603{ 1604 if (adev->asic_type >= CHIP_POLARIS10) { 1605 adev->uvd.inst->ring.funcs = &uvd_v6_0_ring_vm_funcs; 1606 DRM_INFO("UVD is enabled in VM mode\n"); 1607 } else { 1608 adev->uvd.inst->ring.funcs = &uvd_v6_0_ring_phys_funcs; 1609 DRM_INFO("UVD is enabled in physical mode\n"); 1610 } 1611} 1612 1613static void uvd_v6_0_set_enc_ring_funcs(struct amdgpu_device *adev) 1614{ 1615 int i; 1616 1617 for (i = 0; i < adev->uvd.num_enc_rings; ++i) 1618 adev->uvd.inst->ring_enc[i].funcs = &uvd_v6_0_enc_ring_vm_funcs; 1619 1620 DRM_INFO("UVD ENC is enabled in VM mode\n"); 1621} 1622 1623static const struct amdgpu_irq_src_funcs uvd_v6_0_irq_funcs = { 1624 .set = uvd_v6_0_set_interrupt_state, 1625 .process = uvd_v6_0_process_interrupt, 1626}; 1627 1628static void uvd_v6_0_set_irq_funcs(struct amdgpu_device *adev) 1629{ 1630 if (uvd_v6_0_enc_support(adev)) 1631 adev->uvd.inst->irq.num_types = adev->uvd.num_enc_rings + 1; 1632 else 1633 adev->uvd.inst->irq.num_types = 1; 1634 1635 adev->uvd.inst->irq.funcs = &uvd_v6_0_irq_funcs; 1636} 1637 1638const struct amdgpu_ip_block_version uvd_v6_0_ip_block = 1639{ 1640 .type = AMD_IP_BLOCK_TYPE_UVD, 1641 .major = 6, 1642 .minor = 0, 1643 .rev = 0, 1644 .funcs = &uvd_v6_0_ip_funcs, 1645}; 1646 1647const struct amdgpu_ip_block_version uvd_v6_2_ip_block = 1648{ 1649 .type = AMD_IP_BLOCK_TYPE_UVD, 1650 .major = 6, 1651 .minor = 2, 1652 .rev = 0, 1653 .funcs = &uvd_v6_0_ip_funcs, 1654}; 1655 1656const struct amdgpu_ip_block_version uvd_v6_3_ip_block = 1657{ 1658 .type = AMD_IP_BLOCK_TYPE_UVD, 1659 .major = 6, 1660 .minor = 3, 1661 .rev = 0, 1662 .funcs = &uvd_v6_0_ip_funcs, 1663}; 1664