1// SPDX-License-Identifier: MIT 2/* 3 * Copyright �� 2008-2021 Intel Corporation 4 */ 5 6#include <drm/drm_cache.h> 7 8#include "gem/i915_gem_internal.h" 9 10#include "gen2_engine_cs.h" 11#include "gen6_engine_cs.h" 12#include "gen6_ppgtt.h" 13#include "gen7_renderclear.h" 14#include "i915_drv.h" 15#include "i915_irq.h" 16#include "i915_mitigations.h" 17#include "i915_reg.h" 18#include "intel_breadcrumbs.h" 19#include "intel_context.h" 20#include "intel_engine_regs.h" 21#include "intel_gt.h" 22#include "intel_gt_irq.h" 23#include "intel_gt_regs.h" 24#include "intel_reset.h" 25#include "intel_ring.h" 26#include "shmem_utils.h" 27#include "intel_engine_heartbeat.h" 28#include "intel_engine_pm.h" 29 30/* Rough estimate of the typical request size, performing a flush, 31 * set-context and then emitting the batch. 32 */ 33#define LEGACY_REQUEST_SIZE 200 34 35static void set_hwstam(struct intel_engine_cs *engine, u32 mask) 36{ 37 /* 38 * Keep the render interrupt unmasked as this papers over 39 * lost interrupts following a reset. 40 */ 41 if (engine->class == RENDER_CLASS) { 42 if (GRAPHICS_VER(engine->i915) >= 6) 43 mask &= ~BIT(0); 44 else 45 mask &= ~I915_USER_INTERRUPT; 46 } 47 48 intel_engine_set_hwsp_writemask(engine, mask); 49} 50 51static void set_hws_pga(struct intel_engine_cs *engine, phys_addr_t phys) 52{ 53 u32 addr; 54 55 addr = lower_32_bits(phys); 56 if (GRAPHICS_VER(engine->i915) >= 4) 57 addr |= (phys >> 28) & 0xf0; 58 59 intel_uncore_write(engine->uncore, HWS_PGA, addr); 60} 61 62static struct vm_page *status_page(struct intel_engine_cs *engine) 63{ 64 struct drm_i915_gem_object *obj = engine->status_page.vma->obj; 65 66 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj)); 67 return sg_page(obj->mm.pages->sgl); 68} 69 70static void ring_setup_phys_status_page(struct intel_engine_cs *engine) 71{ 72 set_hws_pga(engine, PFN_PHYS(page_to_pfn(status_page(engine)))); 73 set_hwstam(engine, ~0u); 74} 75 76static void set_hwsp(struct intel_engine_cs *engine, u32 offset) 77{ 78 i915_reg_t hwsp; 79 80 /* 81 * The ring status page addresses are no longer next to the rest of 82 * the ring registers as of gen7. 83 */ 84 if (GRAPHICS_VER(engine->i915) == 7) { 85 switch (engine->id) { 86 /* 87 * No more rings exist on Gen7. Default case is only to shut up 88 * gcc switch check warning. 89 */ 90 default: 91 GEM_BUG_ON(engine->id); 92 fallthrough; 93 case RCS0: 94 hwsp = RENDER_HWS_PGA_GEN7; 95 break; 96 case BCS0: 97 hwsp = BLT_HWS_PGA_GEN7; 98 break; 99 case VCS0: 100 hwsp = BSD_HWS_PGA_GEN7; 101 break; 102 case VECS0: 103 hwsp = VEBOX_HWS_PGA_GEN7; 104 break; 105 } 106 } else if (GRAPHICS_VER(engine->i915) == 6) { 107 hwsp = RING_HWS_PGA_GEN6(engine->mmio_base); 108 } else { 109 hwsp = RING_HWS_PGA(engine->mmio_base); 110 } 111 112 intel_uncore_write_fw(engine->uncore, hwsp, offset); 113 intel_uncore_posting_read_fw(engine->uncore, hwsp); 114} 115 116static void flush_cs_tlb(struct intel_engine_cs *engine) 117{ 118 if (!IS_GRAPHICS_VER(engine->i915, 6, 7)) 119 return; 120 121 /* ring should be idle before issuing a sync flush*/ 122 if ((ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE) == 0) 123 drm_warn(&engine->i915->drm, "%s not idle before sync flush!\n", 124 engine->name); 125 126 ENGINE_WRITE_FW(engine, RING_INSTPM, 127 _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE | 128 INSTPM_SYNC_FLUSH)); 129 if (__intel_wait_for_register_fw(engine->uncore, 130 RING_INSTPM(engine->mmio_base), 131 INSTPM_SYNC_FLUSH, 0, 132 2000, 0, NULL)) 133 ENGINE_TRACE(engine, 134 "wait for SyncFlush to complete for TLB invalidation timed out\n"); 135} 136 137static void ring_setup_status_page(struct intel_engine_cs *engine) 138{ 139 set_hwsp(engine, i915_ggtt_offset(engine->status_page.vma)); 140 set_hwstam(engine, ~0u); 141 142 flush_cs_tlb(engine); 143} 144 145static struct i915_address_space *vm_alias(struct i915_address_space *vm) 146{ 147 if (i915_is_ggtt(vm)) 148 vm = &i915_vm_to_ggtt(vm)->alias->vm; 149 150 return vm; 151} 152 153static u32 pp_dir(struct i915_address_space *vm) 154{ 155 return to_gen6_ppgtt(i915_vm_to_ppgtt(vm))->pp_dir; 156} 157 158static void set_pp_dir(struct intel_engine_cs *engine) 159{ 160 struct i915_address_space *vm = vm_alias(engine->gt->vm); 161 162 if (!vm) 163 return; 164 165 ENGINE_WRITE_FW(engine, RING_PP_DIR_DCLV, PP_DIR_DCLV_2G); 166 ENGINE_WRITE_FW(engine, RING_PP_DIR_BASE, pp_dir(vm)); 167 168 if (GRAPHICS_VER(engine->i915) >= 7) { 169 ENGINE_WRITE_FW(engine, 170 RING_MODE_GEN7, 171 _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)); 172 } 173} 174 175static bool stop_ring(struct intel_engine_cs *engine) 176{ 177 /* Empty the ring by skipping to the end */ 178 ENGINE_WRITE_FW(engine, RING_HEAD, ENGINE_READ_FW(engine, RING_TAIL)); 179 ENGINE_POSTING_READ(engine, RING_HEAD); 180 181 /* The ring must be empty before it is disabled */ 182 ENGINE_WRITE_FW(engine, RING_CTL, 0); 183 ENGINE_POSTING_READ(engine, RING_CTL); 184 185 /* Then reset the disabled ring */ 186 ENGINE_WRITE_FW(engine, RING_HEAD, 0); 187 ENGINE_WRITE_FW(engine, RING_TAIL, 0); 188 189 return (ENGINE_READ_FW(engine, RING_HEAD) & HEAD_ADDR) == 0; 190} 191 192static int xcs_resume(struct intel_engine_cs *engine) 193{ 194 struct intel_ring *ring = engine->legacy.ring; 195 196 ENGINE_TRACE(engine, "ring:{HEAD:%04x, TAIL:%04x}\n", 197 ring->head, ring->tail); 198 199 /* 200 * Double check the ring is empty & disabled before we resume. Called 201 * from atomic context during PCI probe, so _hardirq(). 202 */ 203 intel_synchronize_hardirq(engine->i915); 204 if (!stop_ring(engine)) 205 goto err; 206 207 if (HWS_NEEDS_PHYSICAL(engine->i915)) 208 ring_setup_phys_status_page(engine); 209 else 210 ring_setup_status_page(engine); 211 212 intel_breadcrumbs_reset(engine->breadcrumbs); 213 214 /* Enforce ordering by reading HEAD register back */ 215 ENGINE_POSTING_READ(engine, RING_HEAD); 216 217 /* 218 * Initialize the ring. This must happen _after_ we've cleared the ring 219 * registers with the above sequence (the readback of the HEAD registers 220 * also enforces ordering), otherwise the hw might lose the new ring 221 * register values. 222 */ 223 ENGINE_WRITE_FW(engine, RING_START, i915_ggtt_offset(ring->vma)); 224 225 /* Check that the ring offsets point within the ring! */ 226 GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->head)); 227 GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->tail)); 228 intel_ring_update_space(ring); 229 230 set_pp_dir(engine); 231 232 /* First wake the ring up to an empty/idle ring */ 233 ENGINE_WRITE_FW(engine, RING_HEAD, ring->head); 234 ENGINE_WRITE_FW(engine, RING_TAIL, ring->head); 235 ENGINE_POSTING_READ(engine, RING_TAIL); 236 237 ENGINE_WRITE_FW(engine, RING_CTL, 238 RING_CTL_SIZE(ring->size) | RING_VALID); 239 240 /* If the head is still not zero, the ring is dead */ 241 if (__intel_wait_for_register_fw(engine->uncore, 242 RING_CTL(engine->mmio_base), 243 RING_VALID, RING_VALID, 244 5000, 0, NULL)) 245 goto err; 246 247 if (GRAPHICS_VER(engine->i915) > 2) 248 ENGINE_WRITE_FW(engine, 249 RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING)); 250 251 /* Now awake, let it get started */ 252 if (ring->tail != ring->head) { 253 ENGINE_WRITE_FW(engine, RING_TAIL, ring->tail); 254 ENGINE_POSTING_READ(engine, RING_TAIL); 255 } 256 257 /* Papering over lost _interrupts_ immediately following the restart */ 258 intel_engine_signal_breadcrumbs(engine); 259 return 0; 260 261err: 262 drm_err(&engine->i915->drm, 263 "%s initialization failed; " 264 "ctl %08x (valid? %d) head %08x [%08x] tail %08x [%08x] start %08x [expected %08x]\n", 265 engine->name, 266 ENGINE_READ(engine, RING_CTL), 267 ENGINE_READ(engine, RING_CTL) & RING_VALID, 268 ENGINE_READ(engine, RING_HEAD), ring->head, 269 ENGINE_READ(engine, RING_TAIL), ring->tail, 270 ENGINE_READ(engine, RING_START), 271 i915_ggtt_offset(ring->vma)); 272 return -EIO; 273} 274 275static void sanitize_hwsp(struct intel_engine_cs *engine) 276{ 277 struct intel_timeline *tl; 278 279 list_for_each_entry(tl, &engine->status_page.timelines, engine_link) 280 intel_timeline_reset_seqno(tl); 281} 282 283static void xcs_sanitize(struct intel_engine_cs *engine) 284{ 285 /* 286 * Poison residual state on resume, in case the suspend didn't! 287 * 288 * We have to assume that across suspend/resume (or other loss 289 * of control) that the contents of our pinned buffers has been 290 * lost, replaced by garbage. Since this doesn't always happen, 291 * let's poison such state so that we more quickly spot when 292 * we falsely assume it has been preserved. 293 */ 294 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) 295 memset(engine->status_page.addr, POISON_INUSE, PAGE_SIZE); 296 297 /* 298 * The kernel_context HWSP is stored in the status_page. As above, 299 * that may be lost on resume/initialisation, and so we need to 300 * reset the value in the HWSP. 301 */ 302 sanitize_hwsp(engine); 303 304 /* And scrub the dirty cachelines for the HWSP */ 305 drm_clflush_virt_range(engine->status_page.addr, PAGE_SIZE); 306 307 intel_engine_reset_pinned_contexts(engine); 308} 309 310static void reset_prepare(struct intel_engine_cs *engine) 311{ 312 /* 313 * We stop engines, otherwise we might get failed reset and a 314 * dead gpu (on elk). Also as modern gpu as kbl can suffer 315 * from system hang if batchbuffer is progressing when 316 * the reset is issued, regardless of READY_TO_RESET ack. 317 * Thus assume it is best to stop engines on all gens 318 * where we have a gpu reset. 319 * 320 * WaKBLVECSSemaphoreWaitPoll:kbl (on ALL_ENGINES) 321 * 322 * WaMediaResetMainRingCleanup:ctg,elk (presumably) 323 * WaClearRingBufHeadRegAtInit:ctg,elk 324 * 325 * FIXME: Wa for more modern gens needs to be validated 326 */ 327 ENGINE_TRACE(engine, "\n"); 328 intel_engine_stop_cs(engine); 329 330 if (!stop_ring(engine)) { 331 /* G45 ring initialization often fails to reset head to zero */ 332 ENGINE_TRACE(engine, 333 "HEAD not reset to zero, " 334 "{ CTL:%08x, HEAD:%08x, TAIL:%08x, START:%08x }\n", 335 ENGINE_READ_FW(engine, RING_CTL), 336 ENGINE_READ_FW(engine, RING_HEAD), 337 ENGINE_READ_FW(engine, RING_TAIL), 338 ENGINE_READ_FW(engine, RING_START)); 339 if (!stop_ring(engine)) { 340 drm_err(&engine->i915->drm, 341 "failed to set %s head to zero " 342 "ctl %08x head %08x tail %08x start %08x\n", 343 engine->name, 344 ENGINE_READ_FW(engine, RING_CTL), 345 ENGINE_READ_FW(engine, RING_HEAD), 346 ENGINE_READ_FW(engine, RING_TAIL), 347 ENGINE_READ_FW(engine, RING_START)); 348 } 349 } 350} 351 352static void reset_rewind(struct intel_engine_cs *engine, bool stalled) 353{ 354 struct i915_request *pos, *rq; 355 unsigned long flags; 356 u32 head; 357 358 rq = NULL; 359 spin_lock_irqsave(&engine->sched_engine->lock, flags); 360 rcu_read_lock(); 361 list_for_each_entry(pos, &engine->sched_engine->requests, sched.link) { 362 if (!__i915_request_is_complete(pos)) { 363 rq = pos; 364 break; 365 } 366 } 367 rcu_read_unlock(); 368 369 /* 370 * The guilty request will get skipped on a hung engine. 371 * 372 * Users of client default contexts do not rely on logical 373 * state preserved between batches so it is safe to execute 374 * queued requests following the hang. Non default contexts 375 * rely on preserved state, so skipping a batch loses the 376 * evolution of the state and it needs to be considered corrupted. 377 * Executing more queued batches on top of corrupted state is 378 * risky. But we take the risk by trying to advance through 379 * the queued requests in order to make the client behaviour 380 * more predictable around resets, by not throwing away random 381 * amount of batches it has prepared for execution. Sophisticated 382 * clients can use gem_reset_stats_ioctl and dma fence status 383 * (exported via sync_file info ioctl on explicit fences) to observe 384 * when it loses the context state and should rebuild accordingly. 385 * 386 * The context ban, and ultimately the client ban, mechanism are safety 387 * valves if client submission ends up resulting in nothing more than 388 * subsequent hangs. 389 */ 390 391 if (rq) { 392 /* 393 * Try to restore the logical GPU state to match the 394 * continuation of the request queue. If we skip the 395 * context/PD restore, then the next request may try to execute 396 * assuming that its context is valid and loaded on the GPU and 397 * so may try to access invalid memory, prompting repeated GPU 398 * hangs. 399 * 400 * If the request was guilty, we still restore the logical 401 * state in case the next request requires it (e.g. the 402 * aliasing ppgtt), but skip over the hung batch. 403 * 404 * If the request was innocent, we try to replay the request 405 * with the restored context. 406 */ 407 __i915_request_reset(rq, stalled); 408 409 GEM_BUG_ON(rq->ring != engine->legacy.ring); 410 head = rq->head; 411 } else { 412 head = engine->legacy.ring->tail; 413 } 414 engine->legacy.ring->head = intel_ring_wrap(engine->legacy.ring, head); 415 416 spin_unlock_irqrestore(&engine->sched_engine->lock, flags); 417} 418 419static void reset_finish(struct intel_engine_cs *engine) 420{ 421} 422 423static void reset_cancel(struct intel_engine_cs *engine) 424{ 425 struct i915_request *request; 426 unsigned long flags; 427 428 spin_lock_irqsave(&engine->sched_engine->lock, flags); 429 430 /* Mark all submitted requests as skipped. */ 431 list_for_each_entry(request, &engine->sched_engine->requests, sched.link) 432 i915_request_put(i915_request_mark_eio(request)); 433 intel_engine_signal_breadcrumbs(engine); 434 435 /* Remaining _unready_ requests will be nop'ed when submitted */ 436 437 spin_unlock_irqrestore(&engine->sched_engine->lock, flags); 438} 439 440static void i9xx_submit_request(struct i915_request *request) 441{ 442 i915_request_submit(request); 443 wmb(); /* paranoid flush writes out of the WCB before mmio */ 444 445 ENGINE_WRITE(request->engine, RING_TAIL, 446 intel_ring_set_tail(request->ring, request->tail)); 447} 448 449static void __ring_context_fini(struct intel_context *ce) 450{ 451 i915_vma_put(ce->state); 452} 453 454static void ring_context_destroy(struct kref *ref) 455{ 456 struct intel_context *ce = container_of(ref, typeof(*ce), ref); 457 458 GEM_BUG_ON(intel_context_is_pinned(ce)); 459 460 if (ce->state) 461 __ring_context_fini(ce); 462 463 intel_context_fini(ce); 464 intel_context_free(ce); 465} 466 467static int ring_context_init_default_state(struct intel_context *ce, 468 struct i915_gem_ww_ctx *ww) 469{ 470 struct drm_i915_gem_object *obj = ce->state->obj; 471 void *vaddr; 472 473 vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB); 474 if (IS_ERR(vaddr)) 475 return PTR_ERR(vaddr); 476 477#ifdef __linux__ 478 shmem_read(ce->engine->default_state, 0, 479 vaddr, ce->engine->context_size); 480#else 481 uao_read(ce->engine->default_state, 0, 482 vaddr, ce->engine->context_size); 483#endif 484 485 i915_gem_object_flush_map(obj); 486 __i915_gem_object_release_map(obj); 487 488 __set_bit(CONTEXT_VALID_BIT, &ce->flags); 489 return 0; 490} 491 492static int ring_context_pre_pin(struct intel_context *ce, 493 struct i915_gem_ww_ctx *ww, 494 void **unused) 495{ 496 struct i915_address_space *vm; 497 int err = 0; 498 499 if (ce->engine->default_state && 500 !test_bit(CONTEXT_VALID_BIT, &ce->flags)) { 501 err = ring_context_init_default_state(ce, ww); 502 if (err) 503 return err; 504 } 505 506 vm = vm_alias(ce->vm); 507 if (vm) 508 err = gen6_ppgtt_pin(i915_vm_to_ppgtt((vm)), ww); 509 510 return err; 511} 512 513static void __context_unpin_ppgtt(struct intel_context *ce) 514{ 515 struct i915_address_space *vm; 516 517 vm = vm_alias(ce->vm); 518 if (vm) 519 gen6_ppgtt_unpin(i915_vm_to_ppgtt(vm)); 520} 521 522static void ring_context_unpin(struct intel_context *ce) 523{ 524} 525 526static void ring_context_post_unpin(struct intel_context *ce) 527{ 528 __context_unpin_ppgtt(ce); 529} 530 531static struct i915_vma * 532alloc_context_vma(struct intel_engine_cs *engine) 533{ 534 struct drm_i915_private *i915 = engine->i915; 535 struct drm_i915_gem_object *obj; 536 struct i915_vma *vma; 537 int err; 538 539 obj = i915_gem_object_create_shmem(i915, engine->context_size); 540 if (IS_ERR(obj)) 541 return ERR_CAST(obj); 542 543 /* 544 * Try to make the context utilize L3 as well as LLC. 545 * 546 * On VLV we don't have L3 controls in the PTEs so we 547 * shouldn't touch the cache level, especially as that 548 * would make the object snooped which might have a 549 * negative performance impact. 550 * 551 * Snooping is required on non-llc platforms in execlist 552 * mode, but since all GGTT accesses use PAT entry 0 we 553 * get snooping anyway regardless of cache_level. 554 * 555 * This is only applicable for Ivy Bridge devices since 556 * later platforms don't have L3 control bits in the PTE. 557 */ 558 if (IS_IVYBRIDGE(i915)) 559 i915_gem_object_set_cache_coherency(obj, I915_CACHE_L3_LLC); 560 561 vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL); 562 if (IS_ERR(vma)) { 563 err = PTR_ERR(vma); 564 goto err_obj; 565 } 566 567 return vma; 568 569err_obj: 570 i915_gem_object_put(obj); 571 return ERR_PTR(err); 572} 573 574static int ring_context_alloc(struct intel_context *ce) 575{ 576 struct intel_engine_cs *engine = ce->engine; 577 578 /* One ringbuffer to rule them all */ 579 GEM_BUG_ON(!engine->legacy.ring); 580 ce->ring = engine->legacy.ring; 581 ce->timeline = intel_timeline_get(engine->legacy.timeline); 582 583 GEM_BUG_ON(ce->state); 584 if (engine->context_size) { 585 struct i915_vma *vma; 586 587 vma = alloc_context_vma(engine); 588 if (IS_ERR(vma)) 589 return PTR_ERR(vma); 590 591 ce->state = vma; 592 } 593 594 return 0; 595} 596 597static int ring_context_pin(struct intel_context *ce, void *unused) 598{ 599 return 0; 600} 601 602static void ring_context_reset(struct intel_context *ce) 603{ 604 intel_ring_reset(ce->ring, ce->ring->emit); 605 clear_bit(CONTEXT_VALID_BIT, &ce->flags); 606} 607 608static void ring_context_revoke(struct intel_context *ce, 609 struct i915_request *rq, 610 unsigned int preempt_timeout_ms) 611{ 612 struct intel_engine_cs *engine; 613 614 if (!rq || !i915_request_is_active(rq)) 615 return; 616 617 engine = rq->engine; 618 lockdep_assert_held(&engine->sched_engine->lock); 619 list_for_each_entry_continue(rq, &engine->sched_engine->requests, 620 sched.link) 621 if (rq->context == ce) { 622 i915_request_set_error_once(rq, -EIO); 623 __i915_request_skip(rq); 624 } 625} 626 627static void ring_context_cancel_request(struct intel_context *ce, 628 struct i915_request *rq) 629{ 630 struct intel_engine_cs *engine = NULL; 631 632 i915_request_active_engine(rq, &engine); 633 634 if (engine && intel_engine_pulse(engine)) 635 intel_gt_handle_error(engine->gt, engine->mask, 0, 636 "request cancellation by %s", 637 curproc->p_p->ps_comm); 638} 639 640static const struct intel_context_ops ring_context_ops = { 641 .alloc = ring_context_alloc, 642 643 .cancel_request = ring_context_cancel_request, 644 645 .revoke = ring_context_revoke, 646 647 .pre_pin = ring_context_pre_pin, 648 .pin = ring_context_pin, 649 .unpin = ring_context_unpin, 650 .post_unpin = ring_context_post_unpin, 651 652 .enter = intel_context_enter_engine, 653 .exit = intel_context_exit_engine, 654 655 .reset = ring_context_reset, 656 .destroy = ring_context_destroy, 657}; 658 659static int load_pd_dir(struct i915_request *rq, 660 struct i915_address_space *vm, 661 u32 valid) 662{ 663 const struct intel_engine_cs * const engine = rq->engine; 664 u32 *cs; 665 666 cs = intel_ring_begin(rq, 12); 667 if (IS_ERR(cs)) 668 return PTR_ERR(cs); 669 670 *cs++ = MI_LOAD_REGISTER_IMM(1); 671 *cs++ = i915_mmio_reg_offset(RING_PP_DIR_DCLV(engine->mmio_base)); 672 *cs++ = valid; 673 674 *cs++ = MI_LOAD_REGISTER_IMM(1); 675 *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine->mmio_base)); 676 *cs++ = pp_dir(vm); 677 678 /* Stall until the page table load is complete? */ 679 *cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT; 680 *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine->mmio_base)); 681 *cs++ = intel_gt_scratch_offset(engine->gt, 682 INTEL_GT_SCRATCH_FIELD_DEFAULT); 683 684 *cs++ = MI_LOAD_REGISTER_IMM(1); 685 *cs++ = i915_mmio_reg_offset(RING_INSTPM(engine->mmio_base)); 686 *cs++ = _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE); 687 688 intel_ring_advance(rq, cs); 689 690 return rq->engine->emit_flush(rq, EMIT_FLUSH); 691} 692 693static int mi_set_context(struct i915_request *rq, 694 struct intel_context *ce, 695 u32 flags) 696{ 697 struct intel_engine_cs *engine = rq->engine; 698 struct drm_i915_private *i915 = engine->i915; 699 enum intel_engine_id id; 700 const int num_engines = 701 IS_HASWELL(i915) ? engine->gt->info.num_engines - 1 : 0; 702 bool force_restore = false; 703 int len; 704 u32 *cs; 705 706 len = 4; 707 if (GRAPHICS_VER(i915) == 7) 708 len += 2 + (num_engines ? 4 * num_engines + 6 : 0); 709 else if (GRAPHICS_VER(i915) == 5) 710 len += 2; 711 if (flags & MI_FORCE_RESTORE) { 712 GEM_BUG_ON(flags & MI_RESTORE_INHIBIT); 713 flags &= ~MI_FORCE_RESTORE; 714 force_restore = true; 715 len += 2; 716 } 717 718 cs = intel_ring_begin(rq, len); 719 if (IS_ERR(cs)) 720 return PTR_ERR(cs); 721 722 /* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */ 723 if (GRAPHICS_VER(i915) == 7) { 724 *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE; 725 if (num_engines) { 726 struct intel_engine_cs *signaller; 727 728 *cs++ = MI_LOAD_REGISTER_IMM(num_engines); 729 for_each_engine(signaller, engine->gt, id) { 730 if (signaller == engine) 731 continue; 732 733 *cs++ = i915_mmio_reg_offset( 734 RING_PSMI_CTL(signaller->mmio_base)); 735 *cs++ = _MASKED_BIT_ENABLE( 736 GEN6_PSMI_SLEEP_MSG_DISABLE); 737 } 738 } 739 } else if (GRAPHICS_VER(i915) == 5) { 740 /* 741 * This w/a is only listed for pre-production ilk a/b steppings, 742 * but is also mentioned for programming the powerctx. To be 743 * safe, just apply the workaround; we do not use SyncFlush so 744 * this should never take effect and so be a no-op! 745 */ 746 *cs++ = MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN; 747 } 748 749 if (force_restore) { 750 /* 751 * The HW doesn't handle being told to restore the current 752 * context very well. Quite often it likes goes to go off and 753 * sulk, especially when it is meant to be reloading PP_DIR. 754 * A very simple fix to force the reload is to simply switch 755 * away from the current context and back again. 756 * 757 * Note that the kernel_context will contain random state 758 * following the INHIBIT_RESTORE. We accept this since we 759 * never use the kernel_context state; it is merely a 760 * placeholder we use to flush other contexts. 761 */ 762 *cs++ = MI_SET_CONTEXT; 763 *cs++ = i915_ggtt_offset(engine->kernel_context->state) | 764 MI_MM_SPACE_GTT | 765 MI_RESTORE_INHIBIT; 766 } 767 768 *cs++ = MI_NOOP; 769 *cs++ = MI_SET_CONTEXT; 770 *cs++ = i915_ggtt_offset(ce->state) | flags; 771 /* 772 * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP 773 * WaMiSetContext_Hang:snb,ivb,vlv 774 */ 775 *cs++ = MI_NOOP; 776 777 if (GRAPHICS_VER(i915) == 7) { 778 if (num_engines) { 779 struct intel_engine_cs *signaller; 780 i915_reg_t last_reg = INVALID_MMIO_REG; /* keep gcc quiet */ 781 782 *cs++ = MI_LOAD_REGISTER_IMM(num_engines); 783 for_each_engine(signaller, engine->gt, id) { 784 if (signaller == engine) 785 continue; 786 787 last_reg = RING_PSMI_CTL(signaller->mmio_base); 788 *cs++ = i915_mmio_reg_offset(last_reg); 789 *cs++ = _MASKED_BIT_DISABLE( 790 GEN6_PSMI_SLEEP_MSG_DISABLE); 791 } 792 793 /* Insert a delay before the next switch! */ 794 *cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT; 795 *cs++ = i915_mmio_reg_offset(last_reg); 796 *cs++ = intel_gt_scratch_offset(engine->gt, 797 INTEL_GT_SCRATCH_FIELD_DEFAULT); 798 *cs++ = MI_NOOP; 799 } 800 *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE; 801 } else if (GRAPHICS_VER(i915) == 5) { 802 *cs++ = MI_SUSPEND_FLUSH; 803 } 804 805 intel_ring_advance(rq, cs); 806 807 return 0; 808} 809 810static int remap_l3_slice(struct i915_request *rq, int slice) 811{ 812#define L3LOG_DW (GEN7_L3LOG_SIZE / sizeof(u32)) 813 u32 *cs, *remap_info = rq->i915->l3_parity.remap_info[slice]; 814 int i; 815 816 if (!remap_info) 817 return 0; 818 819 cs = intel_ring_begin(rq, L3LOG_DW * 2 + 2); 820 if (IS_ERR(cs)) 821 return PTR_ERR(cs); 822 823 /* 824 * Note: We do not worry about the concurrent register cacheline hang 825 * here because no other code should access these registers other than 826 * at initialization time. 827 */ 828 *cs++ = MI_LOAD_REGISTER_IMM(L3LOG_DW); 829 for (i = 0; i < L3LOG_DW; i++) { 830 *cs++ = i915_mmio_reg_offset(GEN7_L3LOG(slice, i)); 831 *cs++ = remap_info[i]; 832 } 833 *cs++ = MI_NOOP; 834 intel_ring_advance(rq, cs); 835 836 return 0; 837#undef L3LOG_DW 838} 839 840static int remap_l3(struct i915_request *rq) 841{ 842 struct i915_gem_context *ctx = i915_request_gem_context(rq); 843 int i, err; 844 845 if (!ctx || !ctx->remap_slice) 846 return 0; 847 848 for (i = 0; i < MAX_L3_SLICES; i++) { 849 if (!(ctx->remap_slice & BIT(i))) 850 continue; 851 852 err = remap_l3_slice(rq, i); 853 if (err) 854 return err; 855 } 856 857 ctx->remap_slice = 0; 858 return 0; 859} 860 861static int switch_mm(struct i915_request *rq, struct i915_address_space *vm) 862{ 863 int ret; 864 865 if (!vm) 866 return 0; 867 868 ret = rq->engine->emit_flush(rq, EMIT_FLUSH); 869 if (ret) 870 return ret; 871 872 /* 873 * Not only do we need a full barrier (post-sync write) after 874 * invalidating the TLBs, but we need to wait a little bit 875 * longer. Whether this is merely delaying us, or the 876 * subsequent flush is a key part of serialising with the 877 * post-sync op, this extra pass appears vital before a 878 * mm switch! 879 */ 880 ret = load_pd_dir(rq, vm, PP_DIR_DCLV_2G); 881 if (ret) 882 return ret; 883 884 return rq->engine->emit_flush(rq, EMIT_INVALIDATE); 885} 886 887static int clear_residuals(struct i915_request *rq) 888{ 889 struct intel_engine_cs *engine = rq->engine; 890 int ret; 891 892 ret = switch_mm(rq, vm_alias(engine->kernel_context->vm)); 893 if (ret) 894 return ret; 895 896 if (engine->kernel_context->state) { 897 ret = mi_set_context(rq, 898 engine->kernel_context, 899 MI_MM_SPACE_GTT | MI_RESTORE_INHIBIT); 900 if (ret) 901 return ret; 902 } 903 904 ret = engine->emit_bb_start(rq, 905 i915_vma_offset(engine->wa_ctx.vma), 0, 906 0); 907 if (ret) 908 return ret; 909 910 ret = engine->emit_flush(rq, EMIT_FLUSH); 911 if (ret) 912 return ret; 913 914 /* Always invalidate before the next switch_mm() */ 915 return engine->emit_flush(rq, EMIT_INVALIDATE); 916} 917 918static int switch_context(struct i915_request *rq) 919{ 920 struct intel_engine_cs *engine = rq->engine; 921 struct intel_context *ce = rq->context; 922 void **residuals = NULL; 923 int ret; 924 925 GEM_BUG_ON(HAS_EXECLISTS(engine->i915)); 926 927 if (engine->wa_ctx.vma && ce != engine->kernel_context) { 928 if (engine->wa_ctx.vma->private != ce && 929 i915_mitigate_clear_residuals()) { 930 ret = clear_residuals(rq); 931 if (ret) 932 return ret; 933 934 residuals = &engine->wa_ctx.vma->private; 935 } 936 } 937 938 ret = switch_mm(rq, vm_alias(ce->vm)); 939 if (ret) 940 return ret; 941 942 if (ce->state) { 943 u32 flags; 944 945 GEM_BUG_ON(engine->id != RCS0); 946 947 /* For resource streamer on HSW+ and power context elsewhere */ 948 BUILD_BUG_ON(HSW_MI_RS_SAVE_STATE_EN != MI_SAVE_EXT_STATE_EN); 949 BUILD_BUG_ON(HSW_MI_RS_RESTORE_STATE_EN != MI_RESTORE_EXT_STATE_EN); 950 951 flags = MI_SAVE_EXT_STATE_EN | MI_MM_SPACE_GTT; 952 if (test_bit(CONTEXT_VALID_BIT, &ce->flags)) 953 flags |= MI_RESTORE_EXT_STATE_EN; 954 else 955 flags |= MI_RESTORE_INHIBIT; 956 957 ret = mi_set_context(rq, ce, flags); 958 if (ret) 959 return ret; 960 } 961 962 ret = remap_l3(rq); 963 if (ret) 964 return ret; 965 966 /* 967 * Now past the point of no return, this request _will_ be emitted. 968 * 969 * Or at least this preamble will be emitted, the request may be 970 * interrupted prior to submitting the user payload. If so, we 971 * still submit the "empty" request in order to preserve global 972 * state tracking such as this, our tracking of the current 973 * dirty context. 974 */ 975 if (residuals) { 976 intel_context_put(*residuals); 977 *residuals = intel_context_get(ce); 978 } 979 980 return 0; 981} 982 983static int ring_request_alloc(struct i915_request *request) 984{ 985 int ret; 986 987 GEM_BUG_ON(!intel_context_is_pinned(request->context)); 988 GEM_BUG_ON(i915_request_timeline(request)->has_initial_breadcrumb); 989 990 /* 991 * Flush enough space to reduce the likelihood of waiting after 992 * we start building the request - in which case we will just 993 * have to repeat work. 994 */ 995 request->reserved_space += LEGACY_REQUEST_SIZE; 996 997 /* Unconditionally invalidate GPU caches and TLBs. */ 998 ret = request->engine->emit_flush(request, EMIT_INVALIDATE); 999 if (ret) 1000 return ret; 1001 1002 ret = switch_context(request); 1003 if (ret) 1004 return ret; 1005 1006 request->reserved_space -= LEGACY_REQUEST_SIZE; 1007 return 0; 1008} 1009 1010static void gen6_bsd_submit_request(struct i915_request *request) 1011{ 1012 struct intel_uncore *uncore = request->engine->uncore; 1013 1014 intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL); 1015 1016 /* Every tail move must follow the sequence below */ 1017 1018 /* Disable notification that the ring is IDLE. The GT 1019 * will then assume that it is busy and bring it out of rc6. 1020 */ 1021 intel_uncore_write_fw(uncore, RING_PSMI_CTL(GEN6_BSD_RING_BASE), 1022 _MASKED_BIT_ENABLE(GEN6_PSMI_SLEEP_MSG_DISABLE)); 1023 1024 /* Clear the context id. Here be magic! */ 1025 intel_uncore_write64_fw(uncore, GEN6_BSD_RNCID, 0x0); 1026 1027 /* Wait for the ring not to be idle, i.e. for it to wake up. */ 1028 if (__intel_wait_for_register_fw(uncore, 1029 RING_PSMI_CTL(GEN6_BSD_RING_BASE), 1030 GEN6_BSD_SLEEP_INDICATOR, 1031 0, 1032 1000, 0, NULL)) 1033 drm_err(&uncore->i915->drm, 1034 "timed out waiting for the BSD ring to wake up\n"); 1035 1036 /* Now that the ring is fully powered up, update the tail */ 1037 i9xx_submit_request(request); 1038 1039 /* Let the ring send IDLE messages to the GT again, 1040 * and so let it sleep to conserve power when idle. 1041 */ 1042 intel_uncore_write_fw(uncore, RING_PSMI_CTL(GEN6_BSD_RING_BASE), 1043 _MASKED_BIT_DISABLE(GEN6_PSMI_SLEEP_MSG_DISABLE)); 1044 1045 intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL); 1046} 1047 1048static void i9xx_set_default_submission(struct intel_engine_cs *engine) 1049{ 1050 engine->submit_request = i9xx_submit_request; 1051} 1052 1053static void gen6_bsd_set_default_submission(struct intel_engine_cs *engine) 1054{ 1055 engine->submit_request = gen6_bsd_submit_request; 1056} 1057 1058static void ring_release(struct intel_engine_cs *engine) 1059{ 1060 struct drm_i915_private *i915 = engine->i915; 1061 1062 drm_WARN_ON(&i915->drm, GRAPHICS_VER(i915) > 2 && 1063 (ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE) == 0); 1064 1065 intel_engine_cleanup_common(engine); 1066 1067 if (engine->wa_ctx.vma) { 1068 intel_context_put(engine->wa_ctx.vma->private); 1069 i915_vma_unpin_and_release(&engine->wa_ctx.vma, 0); 1070 } 1071 1072 intel_ring_unpin(engine->legacy.ring); 1073 intel_ring_put(engine->legacy.ring); 1074 1075 intel_timeline_unpin(engine->legacy.timeline); 1076 intel_timeline_put(engine->legacy.timeline); 1077} 1078 1079static void irq_handler(struct intel_engine_cs *engine, u16 iir) 1080{ 1081 intel_engine_signal_breadcrumbs(engine); 1082} 1083 1084static void setup_irq(struct intel_engine_cs *engine) 1085{ 1086 struct drm_i915_private *i915 = engine->i915; 1087 1088 intel_engine_set_irq_handler(engine, irq_handler); 1089 1090 if (GRAPHICS_VER(i915) >= 6) { 1091 engine->irq_enable = gen6_irq_enable; 1092 engine->irq_disable = gen6_irq_disable; 1093 } else if (GRAPHICS_VER(i915) >= 5) { 1094 engine->irq_enable = gen5_irq_enable; 1095 engine->irq_disable = gen5_irq_disable; 1096 } else if (GRAPHICS_VER(i915) >= 3) { 1097 engine->irq_enable = gen3_irq_enable; 1098 engine->irq_disable = gen3_irq_disable; 1099 } else { 1100 engine->irq_enable = gen2_irq_enable; 1101 engine->irq_disable = gen2_irq_disable; 1102 } 1103} 1104 1105static void add_to_engine(struct i915_request *rq) 1106{ 1107 lockdep_assert_held(&rq->engine->sched_engine->lock); 1108 list_move_tail(&rq->sched.link, &rq->engine->sched_engine->requests); 1109} 1110 1111static void remove_from_engine(struct i915_request *rq) 1112{ 1113 spin_lock_irq(&rq->engine->sched_engine->lock); 1114 list_del_init(&rq->sched.link); 1115 1116 /* Prevent further __await_execution() registering a cb, then flush */ 1117 set_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags); 1118 1119 spin_unlock_irq(&rq->engine->sched_engine->lock); 1120 1121 i915_request_notify_execute_cb_imm(rq); 1122} 1123 1124static void setup_common(struct intel_engine_cs *engine) 1125{ 1126 struct drm_i915_private *i915 = engine->i915; 1127 1128 /* gen8+ are only supported with execlists */ 1129 GEM_BUG_ON(GRAPHICS_VER(i915) >= 8); 1130 1131 setup_irq(engine); 1132 1133 engine->resume = xcs_resume; 1134 engine->sanitize = xcs_sanitize; 1135 1136 engine->reset.prepare = reset_prepare; 1137 engine->reset.rewind = reset_rewind; 1138 engine->reset.cancel = reset_cancel; 1139 engine->reset.finish = reset_finish; 1140 1141 engine->add_active_request = add_to_engine; 1142 engine->remove_active_request = remove_from_engine; 1143 1144 engine->cops = &ring_context_ops; 1145 engine->request_alloc = ring_request_alloc; 1146 1147 /* 1148 * Using a global execution timeline; the previous final breadcrumb is 1149 * equivalent to our next initial bread so we can elide 1150 * engine->emit_init_breadcrumb(). 1151 */ 1152 engine->emit_fini_breadcrumb = gen3_emit_breadcrumb; 1153 if (GRAPHICS_VER(i915) == 5) 1154 engine->emit_fini_breadcrumb = gen5_emit_breadcrumb; 1155 1156 engine->set_default_submission = i9xx_set_default_submission; 1157 1158 if (GRAPHICS_VER(i915) >= 6) 1159 engine->emit_bb_start = gen6_emit_bb_start; 1160 else if (GRAPHICS_VER(i915) >= 4) 1161 engine->emit_bb_start = gen4_emit_bb_start; 1162 else if (IS_I830(i915) || IS_I845G(i915)) 1163 engine->emit_bb_start = i830_emit_bb_start; 1164 else 1165 engine->emit_bb_start = gen3_emit_bb_start; 1166} 1167 1168static void setup_rcs(struct intel_engine_cs *engine) 1169{ 1170 struct drm_i915_private *i915 = engine->i915; 1171 1172 if (HAS_L3_DPF(i915)) 1173 engine->irq_keep_mask = GT_RENDER_L3_PARITY_ERROR_INTERRUPT; 1174 1175 engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT; 1176 1177 if (GRAPHICS_VER(i915) >= 7) { 1178 engine->emit_flush = gen7_emit_flush_rcs; 1179 engine->emit_fini_breadcrumb = gen7_emit_breadcrumb_rcs; 1180 } else if (GRAPHICS_VER(i915) == 6) { 1181 engine->emit_flush = gen6_emit_flush_rcs; 1182 engine->emit_fini_breadcrumb = gen6_emit_breadcrumb_rcs; 1183 } else if (GRAPHICS_VER(i915) == 5) { 1184 engine->emit_flush = gen4_emit_flush_rcs; 1185 } else { 1186 if (GRAPHICS_VER(i915) < 4) 1187 engine->emit_flush = gen2_emit_flush; 1188 else 1189 engine->emit_flush = gen4_emit_flush_rcs; 1190 engine->irq_enable_mask = I915_USER_INTERRUPT; 1191 } 1192 1193 if (IS_HASWELL(i915)) 1194 engine->emit_bb_start = hsw_emit_bb_start; 1195} 1196 1197static void setup_vcs(struct intel_engine_cs *engine) 1198{ 1199 struct drm_i915_private *i915 = engine->i915; 1200 1201 if (GRAPHICS_VER(i915) >= 6) { 1202 /* gen6 bsd needs a special wa for tail updates */ 1203 if (GRAPHICS_VER(i915) == 6) 1204 engine->set_default_submission = gen6_bsd_set_default_submission; 1205 engine->emit_flush = gen6_emit_flush_vcs; 1206 engine->irq_enable_mask = GT_BSD_USER_INTERRUPT; 1207 1208 if (GRAPHICS_VER(i915) == 6) 1209 engine->emit_fini_breadcrumb = gen6_emit_breadcrumb_xcs; 1210 else 1211 engine->emit_fini_breadcrumb = gen7_emit_breadcrumb_xcs; 1212 } else { 1213 engine->emit_flush = gen4_emit_flush_vcs; 1214 if (GRAPHICS_VER(i915) == 5) 1215 engine->irq_enable_mask = ILK_BSD_USER_INTERRUPT; 1216 else 1217 engine->irq_enable_mask = I915_BSD_USER_INTERRUPT; 1218 } 1219} 1220 1221static void setup_bcs(struct intel_engine_cs *engine) 1222{ 1223 struct drm_i915_private *i915 = engine->i915; 1224 1225 engine->emit_flush = gen6_emit_flush_xcs; 1226 engine->irq_enable_mask = GT_BLT_USER_INTERRUPT; 1227 1228 if (GRAPHICS_VER(i915) == 6) 1229 engine->emit_fini_breadcrumb = gen6_emit_breadcrumb_xcs; 1230 else 1231 engine->emit_fini_breadcrumb = gen7_emit_breadcrumb_xcs; 1232} 1233 1234static void setup_vecs(struct intel_engine_cs *engine) 1235{ 1236 struct drm_i915_private *i915 = engine->i915; 1237 1238 GEM_BUG_ON(GRAPHICS_VER(i915) < 7); 1239 1240 engine->emit_flush = gen6_emit_flush_xcs; 1241 engine->irq_enable_mask = PM_VEBOX_USER_INTERRUPT; 1242 engine->irq_enable = hsw_irq_enable_vecs; 1243 engine->irq_disable = hsw_irq_disable_vecs; 1244 1245 engine->emit_fini_breadcrumb = gen7_emit_breadcrumb_xcs; 1246} 1247 1248static int gen7_ctx_switch_bb_setup(struct intel_engine_cs * const engine, 1249 struct i915_vma * const vma) 1250{ 1251 return gen7_setup_clear_gpr_bb(engine, vma); 1252} 1253 1254static int gen7_ctx_switch_bb_init(struct intel_engine_cs *engine, 1255 struct i915_gem_ww_ctx *ww, 1256 struct i915_vma *vma) 1257{ 1258 int err; 1259 1260 err = i915_vma_pin_ww(vma, ww, 0, 0, PIN_USER | PIN_HIGH); 1261 if (err) 1262 return err; 1263 1264 err = i915_vma_sync(vma); 1265 if (err) 1266 goto err_unpin; 1267 1268 err = gen7_ctx_switch_bb_setup(engine, vma); 1269 if (err) 1270 goto err_unpin; 1271 1272 engine->wa_ctx.vma = vma; 1273 return 0; 1274 1275err_unpin: 1276 i915_vma_unpin(vma); 1277 return err; 1278} 1279 1280static struct i915_vma *gen7_ctx_vma(struct intel_engine_cs *engine) 1281{ 1282 struct drm_i915_gem_object *obj; 1283 struct i915_vma *vma; 1284 int size, err; 1285 1286 if (GRAPHICS_VER(engine->i915) != 7 || engine->class != RENDER_CLASS) 1287 return NULL; 1288 1289 err = gen7_ctx_switch_bb_setup(engine, NULL /* probe size */); 1290 if (err < 0) 1291 return ERR_PTR(err); 1292 if (!err) 1293 return NULL; 1294 1295 size = ALIGN(err, PAGE_SIZE); 1296 1297 obj = i915_gem_object_create_internal(engine->i915, size); 1298 if (IS_ERR(obj)) 1299 return ERR_CAST(obj); 1300 1301 vma = i915_vma_instance(obj, engine->gt->vm, NULL); 1302 if (IS_ERR(vma)) { 1303 i915_gem_object_put(obj); 1304 return ERR_CAST(vma); 1305 } 1306 1307 vma->private = intel_context_create(engine); /* dummy residuals */ 1308 if (IS_ERR(vma->private)) { 1309 err = PTR_ERR(vma->private); 1310 vma->private = NULL; 1311 i915_gem_object_put(obj); 1312 return ERR_PTR(err); 1313 } 1314 1315 return vma; 1316} 1317 1318int intel_ring_submission_setup(struct intel_engine_cs *engine) 1319{ 1320 struct i915_gem_ww_ctx ww; 1321 struct intel_timeline *timeline; 1322 struct intel_ring *ring; 1323 struct i915_vma *gen7_wa_vma; 1324 int err; 1325 1326 setup_common(engine); 1327 1328 switch (engine->class) { 1329 case RENDER_CLASS: 1330 setup_rcs(engine); 1331 break; 1332 case VIDEO_DECODE_CLASS: 1333 setup_vcs(engine); 1334 break; 1335 case COPY_ENGINE_CLASS: 1336 setup_bcs(engine); 1337 break; 1338 case VIDEO_ENHANCEMENT_CLASS: 1339 setup_vecs(engine); 1340 break; 1341 default: 1342 MISSING_CASE(engine->class); 1343 return -ENODEV; 1344 } 1345 1346 timeline = intel_timeline_create_from_engine(engine, 1347 I915_GEM_HWS_SEQNO_ADDR); 1348 if (IS_ERR(timeline)) { 1349 err = PTR_ERR(timeline); 1350 goto err; 1351 } 1352 GEM_BUG_ON(timeline->has_initial_breadcrumb); 1353 1354 ring = intel_engine_create_ring(engine, SZ_16K); 1355 if (IS_ERR(ring)) { 1356 err = PTR_ERR(ring); 1357 goto err_timeline; 1358 } 1359 1360 GEM_BUG_ON(engine->legacy.ring); 1361 engine->legacy.ring = ring; 1362 engine->legacy.timeline = timeline; 1363 1364 gen7_wa_vma = gen7_ctx_vma(engine); 1365 if (IS_ERR(gen7_wa_vma)) { 1366 err = PTR_ERR(gen7_wa_vma); 1367 goto err_ring; 1368 } 1369 1370 i915_gem_ww_ctx_init(&ww, false); 1371 1372retry: 1373 err = i915_gem_object_lock(timeline->hwsp_ggtt->obj, &ww); 1374 if (!err && gen7_wa_vma) 1375 err = i915_gem_object_lock(gen7_wa_vma->obj, &ww); 1376 if (!err) 1377 err = i915_gem_object_lock(engine->legacy.ring->vma->obj, &ww); 1378 if (!err) 1379 err = intel_timeline_pin(timeline, &ww); 1380 if (!err) { 1381 err = intel_ring_pin(ring, &ww); 1382 if (err) 1383 intel_timeline_unpin(timeline); 1384 } 1385 if (err) 1386 goto out; 1387 1388 GEM_BUG_ON(timeline->hwsp_ggtt != engine->status_page.vma); 1389 1390 if (gen7_wa_vma) { 1391 err = gen7_ctx_switch_bb_init(engine, &ww, gen7_wa_vma); 1392 if (err) { 1393 intel_ring_unpin(ring); 1394 intel_timeline_unpin(timeline); 1395 } 1396 } 1397 1398out: 1399 if (err == -EDEADLK) { 1400 err = i915_gem_ww_ctx_backoff(&ww); 1401 if (!err) 1402 goto retry; 1403 } 1404 i915_gem_ww_ctx_fini(&ww); 1405 if (err) 1406 goto err_gen7_put; 1407 1408 /* Finally, take ownership and responsibility for cleanup! */ 1409 engine->release = ring_release; 1410 1411 return 0; 1412 1413err_gen7_put: 1414 if (gen7_wa_vma) { 1415 intel_context_put(gen7_wa_vma->private); 1416 i915_gem_object_put(gen7_wa_vma->obj); 1417 } 1418err_ring: 1419 intel_ring_put(ring); 1420err_timeline: 1421 intel_timeline_put(timeline); 1422err: 1423 intel_engine_cleanup_common(engine); 1424 return err; 1425} 1426 1427#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) 1428#include "selftest_ring_submission.c" 1429#endif 1430