Lines Matching refs:rq

28 static bool is_active(struct i915_request *rq)
30 if (i915_request_is_active(rq))
33 if (i915_request_on_hold(rq))
36 if (i915_request_has_initial_breadcrumb(rq) && i915_request_started(rq))
43 struct i915_request *rq,
53 if (i915_request_completed(rq)) /* that was quick! */
58 if (!READ_ONCE(engine->execlists.pending[0]) && is_active(rq))
69 struct i915_request *rq,
81 if (i915_request_completed(rq))
84 if (READ_ONCE(rq->fence.error))
88 if (rq->fence.error != -EIO) {
91 rq->fence.context,
92 rq->fence.seqno);
97 if (i915_request_wait(rq, 0,
101 rq->fence.context,
102 rq->fence.seqno);
125 struct i915_request *rq;
133 rq = igt_spinner_create_request(&spin, ce, MI_NOOP);
134 if (IS_ERR(rq)) {
135 err = PTR_ERR(rq);
139 i915_request_add(rq);
140 if (!igt_wait_for_spinner(&spin, rq)) {
182 struct i915_request *rq[2];
228 rq[0] = igt_spinner_create_request(&spin, ce[0], MI_ARB_CHECK);
229 if (IS_ERR(rq[0])) {
230 err = PTR_ERR(rq[0]);
234 i915_request_get(rq[0]);
235 i915_request_add(rq[0]);
236 GEM_BUG_ON(rq[0]->postfix > ce[1]->ring->emit);
238 if (!igt_wait_for_spinner(&spin, rq[0])) {
239 i915_request_put(rq[0]);
243 rq[1] = i915_request_create(ce[1]);
244 if (IS_ERR(rq[1])) {
245 err = PTR_ERR(rq[1]);
246 i915_request_put(rq[0]);
254 * rq[0] is already submitted, so this should reduce
257 * but it will install a dependency on rq[1] for rq[0]
261 i915_request_await_dma_fence(rq[1], &rq[0]->fence);
264 i915_request_get(rq[1]);
265 i915_request_add(rq[1]);
266 GEM_BUG_ON(rq[1]->postfix <= rq[0]->postfix);
267 i915_request_put(rq[0]);
275 engine->sched_engine->schedule(rq[1], &attr);
279 rq[0] = i915_request_create(ce[0]);
280 if (IS_ERR(rq[0])) {
281 err = PTR_ERR(rq[0]);
282 i915_request_put(rq[1]);
286 i915_request_await_dma_fence(rq[0], &rq[1]->fence);
287 i915_request_get(rq[0]);
288 i915_request_add(rq[0]);
289 GEM_BUG_ON(rq[0]->postfix > rq[1]->postfix);
290 i915_request_put(rq[1]);
291 i915_request_put(rq[0]);
344 struct i915_request *rq;
383 rq = igt_spinner_create_request(&spin, ce[0], MI_ARB_CHECK);
384 if (IS_ERR(rq)) {
385 err = PTR_ERR(rq);
389 i915_request_get(rq);
390 rq->sched.attr.priority = I915_PRIORITY_BARRIER;
391 i915_request_add(rq);
393 if (!igt_wait_for_spinner(&spin, rq)) {
395 i915_request_put(rq);
403 rq->wa_tail,
410 i915_request_put(rq);
419 pr_debug("%s: Filled ring with %d nop tails {size:%x, tail:%x, emit:%x, rq.tail:%x}\n",
424 rq->tail);
426 rq->tail,
428 i915_request_put(rq);
430 /* Create a second ring to preempt the first ring after rq[0] */
431 rq = intel_context_create_request(ce[1]);
432 if (IS_ERR(rq)) {
433 err = PTR_ERR(rq);
437 rq->sched.attr.priority = I915_PRIORITY_BARRIER;
438 i915_request_get(rq);
439 i915_request_add(rq);
441 err = wait_for_submit(engine, rq, HZ / 2);
442 i915_request_put(rq);
493 struct i915_request *rq;
533 rq = intel_context_create_request(ce);
536 if (IS_ERR(rq)) {
537 err = PTR_ERR(rq);
540 GEM_BUG_ON(!rq->head);
541 i915_request_add(rq);
601 struct i915_request *rq;
611 rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
612 if (IS_ERR(rq)) {
613 err = PTR_ERR(rq);
616 i915_request_add(rq);
618 if (!igt_wait_for_spinner(&spin, rq)) {
631 GEM_BUG_ON(execlists_active(&engine->execlists) != rq);
633 i915_request_get(rq);
634 execlists_hold(engine, rq);
635 GEM_BUG_ON(!i915_request_on_hold(rq));
638 GEM_BUG_ON(rq->fence.error != -EIO);
643 if (!i915_request_wait(rq, 0, HZ / 5)) {
646 i915_request_put(rq);
650 GEM_BUG_ON(!i915_request_on_hold(rq));
653 execlists_unhold(engine, rq);
654 if (i915_request_wait(rq, 0, HZ / 5) < 0) {
660 i915_request_put(rq);
720 struct i915_request *rq;
728 rq = intel_context_create_request(ce);
730 if (IS_ERR(rq)) {
731 err = PTR_ERR(rq);
735 if (rq->engine->emit_init_breadcrumb) {
736 err = rq->engine->emit_init_breadcrumb(rq);
738 i915_request_add(rq);
743 cs = intel_ring_begin(rq, 2);
745 i915_request_add(rq);
758 client[i] = i915_request_get(rq);
759 i915_request_add(rq);
820 emit_semaphore_chain(struct i915_request *rq, struct i915_vma *vma, int idx)
824 cs = intel_ring_begin(rq, 10);
852 intel_ring_advance(rq, cs);
860 struct i915_request *rq;
867 rq = intel_context_create_request(ce);
868 if (IS_ERR(rq))
872 if (rq->engine->emit_init_breadcrumb)
873 err = rq->engine->emit_init_breadcrumb(rq);
875 err = emit_semaphore_chain(rq, vma, idx);
877 i915_request_get(rq);
878 i915_request_add(rq);
880 rq = ERR_PTR(err);
884 return rq;
895 struct i915_request *rq;
898 rq = intel_engine_create_kernel_request(engine);
899 if (IS_ERR(rq))
900 return PTR_ERR(rq);
902 cs = intel_ring_begin(rq, 4);
904 i915_request_add(rq);
913 intel_ring_advance(rq, cs);
915 i915_request_get(rq);
916 i915_request_add(rq);
919 engine->sched_engine->schedule(rq, &attr);
922 i915_request_put(rq);
946 struct i915_request *rq;
948 rq = semaphore_queue(engine, vma, n++);
949 if (IS_ERR(rq)) {
950 err = PTR_ERR(rq);
954 i915_request_put(rq);
1056 struct i915_request *rq;
1060 rq = intel_context_create_request(ce);
1061 if (IS_ERR(rq))
1062 return rq;
1065 err = i915_request_await_dma_fence(rq, &wait->fence);
1070 cs = intel_ring_begin(rq, 14);
1088 *cs++ = i915_mmio_reg_offset(RING_TIMESTAMP(rq->engine->mmio_base));
1097 intel_ring_advance(rq, cs);
1101 i915_request_get(rq);
1102 i915_request_add(rq);
1104 i915_request_put(rq);
1108 return rq;
1130 struct i915_request *rq[3] = {};
1161 rq[A1] = create_rewinder(ce, NULL, slot, X);
1162 if (IS_ERR(rq[A1])) {
1167 rq[A2] = create_rewinder(ce, NULL, slot, Y);
1169 if (IS_ERR(rq[A2]))
1172 err = wait_for_submit(engine, rq[A2], HZ / 2);
1185 rq[B1] = create_rewinder(ce, rq[A1], slot, Z);
1187 if (IS_ERR(rq[2]))
1190 err = wait_for_submit(engine, rq[B1], HZ / 2);
1199 while (i915_request_is_active(rq[A2])) { /* semaphore yield! */
1206 GEM_BUG_ON(!i915_request_is_active(rq[A1]));
1207 GEM_BUG_ON(!i915_request_is_active(rq[B1]));
1208 GEM_BUG_ON(i915_request_is_active(rq[A2]));
1222 pr_err("%s: rq[%d] timed out\n",
1247 i915_request_put(rq[i]);
1259 struct i915_request *rq;
1261 rq = intel_engine_create_kernel_request(engine);
1262 if (IS_ERR(rq))
1263 return rq;
1265 i915_request_get(rq);
1266 i915_request_add(rq);
1268 return rq;
1330 struct i915_request *rq, *nop;
1339 rq = semaphore_queue(engine, vma, 0);
1340 if (IS_ERR(rq)) {
1341 err = PTR_ERR(rq);
1344 engine->sched_engine->schedule(rq, &attr);
1345 err = wait_for_submit(engine, rq, HZ / 2);
1366 GEM_BUG_ON(i915_request_completed(rq));
1367 GEM_BUG_ON(execlists_active(&engine->execlists) != rq);
1370 err = release_queue(engine, vma, 1, effective_prio(rq));
1381 if (i915_request_wait(rq, 0, slice_timeout(engine)) < 0) {
1394 i915_request_put(rq);
1430 struct i915_request *rq;
1447 rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
1449 if (IS_ERR(rq)) {
1450 err = PTR_ERR(rq);
1454 i915_request_get(rq);
1455 i915_request_add(rq);
1457 if (!igt_wait_for_spinner(&spin, rq)) {
1458 i915_request_put(rq);
1463 set_bit(I915_FENCE_FLAG_NOPREEMPT, &rq->fence.flags);
1464 i915_request_put(rq);
1474 rq = intel_context_create_request(ce);
1476 if (IS_ERR(rq)) {
1477 err = PTR_ERR(rq);
1481 rq->sched.attr.priority = I915_PRIORITY_BARRIER;
1482 i915_request_get(rq);
1483 i915_request_add(rq);
1489 if (wait_for_submit(engine, rq, HZ / 2)) {
1490 i915_request_put(rq);
1500 if (i915_request_wait(rq, 0, slice_timeout(engine)) >= 0) {
1505 i915_request_put(rq);
1721 struct i915_request *rq;
1727 rq = igt_spinner_create_request(spin, ce, arb);
1729 return rq;
1759 struct i915_request *rq;
1769 rq = spinner_create_request(&spin_lo, ctx_lo, engine,
1771 if (IS_ERR(rq)) {
1772 err = PTR_ERR(rq);
1776 i915_request_add(rq);
1777 if (!igt_wait_for_spinner(&spin_lo, rq)) {
1785 rq = spinner_create_request(&spin_hi, ctx_hi, engine,
1787 if (IS_ERR(rq)) {
1789 err = PTR_ERR(rq);
1793 i915_request_add(rq);
1794 if (!igt_wait_for_spinner(&spin_hi, rq)) {
1852 struct i915_request *rq;
1862 rq = spinner_create_request(&spin_lo, ctx_lo, engine,
1864 if (IS_ERR(rq)) {
1865 err = PTR_ERR(rq);
1869 i915_request_add(rq);
1870 if (!igt_wait_for_spinner(&spin_lo, rq)) {
1875 rq = spinner_create_request(&spin_hi, ctx_hi, engine,
1877 if (IS_ERR(rq)) {
1879 err = PTR_ERR(rq);
1883 i915_request_add(rq);
1884 if (igt_wait_for_spinner(&spin_hi, rq)) {
1890 engine->sched_engine->schedule(rq, &attr);
1892 if (!igt_wait_for_spinner(&spin_hi, rq)) {
2058 struct i915_request *rq;
2068 rq = spinner_create_request(&arg->a.spin,
2071 if (IS_ERR(rq))
2072 return PTR_ERR(rq);
2074 clear_bit(CONTEXT_BANNED, &rq->context->flags);
2075 i915_request_get(rq);
2076 i915_request_add(rq);
2077 if (!igt_wait_for_spinner(&arg->a.spin, rq)) {
2082 intel_context_ban(rq->context, rq);
2087 err = wait_for_reset(arg->engine, rq, HZ / 2);
2094 i915_request_put(rq);
2102 struct i915_request *rq[2] = {};
2112 rq[0] = spinner_create_request(&arg->a.spin,
2115 if (IS_ERR(rq[0]))
2116 return PTR_ERR(rq[0]);
2118 clear_bit(CONTEXT_BANNED, &rq[0]->context->flags);
2119 i915_request_get(rq[0]);
2120 i915_request_add(rq[0]);
2121 if (!igt_wait_for_spinner(&arg->a.spin, rq[0])) {
2126 rq[1] = spinner_create_request(&arg->b.spin,
2129 if (IS_ERR(rq[1])) {
2130 err = PTR_ERR(rq[1]);
2134 clear_bit(CONTEXT_BANNED, &rq[1]->context->flags);
2135 i915_request_get(rq[1]);
2136 err = i915_request_await_dma_fence(rq[1], &rq[0]->fence);
2137 i915_request_add(rq[1]);
2141 intel_context_ban(rq[1]->context, rq[1]);
2147 err = wait_for_reset(arg->engine, rq[1], HZ / 2);
2151 if (rq[0]->fence.error != 0) {
2157 if (rq[1]->fence.error != -EIO) {
2164 i915_request_put(rq[1]);
2165 i915_request_put(rq[0]);
2173 struct i915_request *rq[3] = {};
2183 rq[0] = spinner_create_request(&arg->a.spin,
2186 if (IS_ERR(rq[0]))
2187 return PTR_ERR(rq[0]);
2189 clear_bit(CONTEXT_BANNED, &rq[0]->context->flags);
2190 i915_request_get(rq[0]);
2191 i915_request_add(rq[0]);
2192 if (!igt_wait_for_spinner(&arg->a.spin, rq[0])) {
2197 rq[1] = igt_request_alloc(arg->b.ctx, arg->engine);
2198 if (IS_ERR(rq[1])) {
2199 err = PTR_ERR(rq[1]);
2203 clear_bit(CONTEXT_BANNED, &rq[1]->context->flags);
2204 i915_request_get(rq[1]);
2205 err = i915_request_await_dma_fence(rq[1], &rq[0]->fence);
2206 i915_request_add(rq[1]);
2210 rq[2] = spinner_create_request(&arg->b.spin,
2213 if (IS_ERR(rq[2])) {
2214 err = PTR_ERR(rq[2]);
2218 i915_request_get(rq[2]);
2219 err = i915_request_await_dma_fence(rq[2], &rq[1]->fence);
2220 i915_request_add(rq[2]);
2224 intel_context_ban(rq[2]->context, rq[2]);
2229 err = wait_for_reset(arg->engine, rq[2], HZ / 2);
2233 if (rq[0]->fence.error != -EIO) {
2244 if (intel_engine_has_semaphores(rq[1]->engine) &&
2245 rq[1]->fence.error != 0) {
2251 if (rq[2]->fence.error != -EIO) {
2258 i915_request_put(rq[2]);
2259 i915_request_put(rq[1]);
2260 i915_request_put(rq[0]);
2268 struct i915_request *rq;
2279 rq = spinner_create_request(&arg->a.spin,
2282 if (IS_ERR(rq))
2283 return PTR_ERR(rq);
2285 clear_bit(CONTEXT_BANNED, &rq->context->flags);
2286 i915_request_get(rq);
2287 i915_request_add(rq);
2288 if (!igt_wait_for_spinner(&arg->a.spin, rq)) {
2293 intel_context_ban(rq->context, rq);
2298 err = wait_for_reset(arg->engine, rq, HZ / 2);
2305 i915_request_put(rq);
2325 struct i915_request *rq;
2335 rq = spinner_create_request(&arg->a.spin,
2338 if (IS_ERR(rq))
2339 return PTR_ERR(rq);
2341 clear_bit(CONTEXT_BANNED, &rq->context->flags);
2342 i915_request_get(rq);
2343 i915_request_add(rq);
2344 if (!igt_wait_for_spinner(&arg->a.spin, rq)) {
2349 intel_context_set_banned(rq->context);
2367 err = wait_for_reset(engine, rq, HZ / 2);
2376 i915_request_put(rq);
2576 struct i915_request *rq;
2582 rq = spinner_create_request(&lo.spin,
2585 if (IS_ERR(rq))
2588 i915_request_get(rq);
2589 i915_request_add(rq);
2591 ring_size = rq->wa_tail - rq->head;
2593 ring_size += rq->ring->size;
2594 ring_size = rq->ring->size / ring_size;
2599 if (i915_request_wait(rq, 0, HZ / 2) < 0) {
2601 i915_request_put(rq);
2604 i915_request_put(rq);
2612 rq = spinner_create_request(&hi.spin,
2615 if (IS_ERR(rq))
2617 i915_request_add(rq);
2618 if (!igt_wait_for_spinner(&hi.spin, rq))
2621 rq = spinner_create_request(&lo.spin,
2624 if (IS_ERR(rq))
2626 i915_request_add(rq);
2629 rq = igt_request_alloc(lo.ctx, engine);
2630 if (IS_ERR(rq))
2632 i915_request_add(rq);
2635 rq = igt_request_alloc(hi.ctx, engine);
2636 if (IS_ERR(rq))
2639 i915_request_get(rq);
2640 i915_request_add(rq);
2641 engine->sched_engine->schedule(rq, &attr);
2644 if (i915_request_wait(rq, 0, HZ / 5) < 0) {
2652 i915_request_put(rq);
2656 i915_request_put(rq);
2658 rq = igt_request_alloc(lo.ctx, engine);
2659 if (IS_ERR(rq))
2662 i915_request_get(rq);
2663 i915_request_add(rq);
2665 if (i915_request_wait(rq, 0, HZ / 5) < 0) {
2674 i915_request_put(rq);
2677 i915_request_put(rq);
2706 struct i915_request *rq;
2761 rq = intel_context_create_request(ce);
2762 if (IS_ERR(rq)) {
2763 err = PTR_ERR(rq);
2767 rq->batch = i915_vma_get(vma);
2768 i915_request_get(rq);
2770 err = igt_vma_move_to_active_unlocked(vma, rq, 0);
2772 err = rq->engine->emit_bb_start(rq,
2775 i915_request_add(rq);
2782 rq->mock.link.next = &(*prev)->mock.link;
2783 *prev = rq;
2787 i915_vma_put(rq->batch);
2788 i915_request_put(rq);
2801 struct i915_request *rq;
2833 rq = igt_spinner_create_request(spin, ce[0], MI_ARB_CHECK);
2834 if (IS_ERR(rq)) {
2835 err = PTR_ERR(rq);
2839 i915_request_get(rq);
2840 rq->sched.attr.priority = I915_PRIORITY_BARRIER;
2841 i915_request_add(rq);
2843 if (!igt_wait_for_spinner(spin, rq)) {
2845 i915_request_put(rq);
2852 while (ce[0]->ring->tail - rq->wa_tail <= queue_sz) {
2858 i915_request_put(rq);
2867 pr_debug("%s: Filled %d with %d nop tails {size:%x, tail:%x, emit:%x, rq.tail:%x}\n",
2872 rq->tail);
2873 i915_request_put(rq);
2876 rq = intel_context_create_request(ce[1]);
2877 if (IS_ERR(rq)) {
2878 err = PTR_ERR(rq);
2882 rq->sched.attr.priority = I915_PRIORITY_BARRIER;
2883 i915_request_get(rq);
2884 i915_request_add(rq);
2886 err = wait_for_submit(engine, rq, HZ / 2);
2887 i915_request_put(rq);
2975 struct i915_request *rq = NULL;
2991 err = create_gang(engine, &rq);
2996 engine->sched_engine->schedule(rq, &attr);
3008 cs = i915_gem_object_pin_map_unlocked(rq->batch->obj, I915_MAP_WC);
3011 i915_gem_object_unpin_map(rq->batch->obj);
3017 while (rq) { /* wait for each rq from highest to lowest prio */
3018 struct i915_request *n = list_next_entry(rq, mock.link);
3020 if (err == 0 && i915_request_wait(rq, 0, HZ / 5) < 0) {
3025 prio, rq_prio(rq));
3032 i915_vma_put(rq->batch);
3033 i915_request_put(rq);
3034 rq = n;
3153 struct i915_request *rq;
3176 rq = intel_context_create_request(ce);
3177 if (IS_ERR(rq)) {
3178 err = PTR_ERR(rq);
3182 err = igt_vma_move_to_active_unlocked(vma, rq, 0);
3186 err = i915_vma_move_to_active(batch, rq, 0);
3188 err = rq->engine->emit_bb_start(rq,
3195 i915_request_get(rq);
3196 i915_request_add(rq);
3204 return err ? ERR_PTR(err) : rq;
3214 struct i915_request *rq;
3218 rq = intel_engine_create_kernel_request(engine);
3219 if (IS_ERR(rq))
3220 return PTR_ERR(rq);
3222 cs = intel_ring_begin(rq, 4);
3224 i915_request_add(rq);
3233 intel_ring_advance(rq, cs);
3235 i915_request_get(rq);
3236 i915_request_add(rq);
3238 engine->sched_engine->schedule(rq, &attr);
3240 if (i915_request_wait(rq, 0, HZ / 2) < 0)
3242 i915_request_put(rq);
3299 struct i915_request *rq;
3301 rq = create_gpr_client(engine, global,
3303 if (IS_ERR(rq)) {
3304 err = PTR_ERR(rq);
3308 client[i] = rq;
3398 struct i915_request *rq;
3403 rq = spinner_create_request(&spin_lo, ctx_lo, engine,
3405 if (IS_ERR(rq)) {
3406 err = PTR_ERR(rq);
3410 i915_request_add(rq);
3411 if (!igt_wait_for_spinner(&spin_lo, rq)) {
3417 rq = igt_request_alloc(ctx_hi, engine);
3418 if (IS_ERR(rq)) {
3420 err = PTR_ERR(rq);
3431 i915_request_get(rq);
3432 i915_request_add(rq);
3437 if (i915_request_wait(rq, 0, HZ / 10) < 0) {
3439 i915_request_put(rq);
3445 i915_request_put(rq);
3490 struct i915_request *rq;
3510 rq = igt_request_alloc(ctx, smoke->engine);
3511 if (IS_ERR(rq)) {
3512 err = PTR_ERR(rq);
3517 err = igt_vma_move_to_active_unlocked(vma, rq, 0);
3519 err = rq->engine->emit_bb_start(rq,
3524 i915_request_add(rq);
3754 struct i915_request *rq;
3756 rq = i915_request_create(ve[nc]);
3757 if (IS_ERR(rq)) {
3758 err = PTR_ERR(rq);
3764 request[nc] = i915_request_get(rq);
3765 i915_request_add(rq);
3771 struct i915_request *rq;
3773 rq = i915_request_create(ve[nc]);
3774 if (IS_ERR(rq)) {
3775 err = PTR_ERR(rq);
3781 request[nc] = i915_request_get(rq);
3782 i915_request_add(rq);
4027 struct i915_request *rq;
4046 rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
4048 if (IS_ERR(rq)) {
4049 err = PTR_ERR(rq);
4053 i915_request_add(rq);
4062 rq = intel_context_create_request(ce);
4064 if (IS_ERR(rq)) {
4065 err = PTR_ERR(rq);
4069 i915_request_get(rq);
4070 i915_request_add(rq);
4071 if (i915_request_wait(rq, 0, timeout) < 0) {
4073 __func__, rq->engine->name);
4078 i915_request_put(rq);
4094 struct i915_request *rq;
4106 /* XXX We do not handle oversubscription and fairness with normal rq */
4114 rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
4116 if (IS_ERR(rq)) {
4117 err = PTR_ERR(rq);
4121 i915_request_add(rq);
4131 rq = intel_context_create_request(ce);
4133 if (IS_ERR(rq)) {
4134 err = PTR_ERR(rq);
4138 i915_request_get(rq);
4139 i915_request_add(rq);
4140 if (i915_request_wait(rq, 0, timeout) < 0) {
4147 i915_request_put(rq);
4226 struct i915_request *rq;
4228 rq = i915_request_create(ve);
4229 if (IS_ERR(rq)) {
4230 err = PTR_ERR(rq);
4235 last = i915_request_get(rq);
4237 cs = intel_ring_begin(rq, 8);
4239 i915_request_add(rq);
4254 intel_ring_advance(rq, cs);
4257 rq->execution_mask = engine->mask;
4258 i915_request_add(rq);
4337 struct i915_request *rq;
4359 rq = igt_spinner_create_request(&spin, ve, MI_ARB_CHECK);
4360 if (IS_ERR(rq)) {
4361 err = PTR_ERR(rq);
4364 i915_request_add(rq);
4366 if (!igt_wait_for_spinner(&spin, rq)) {
4372 engine = rq->engine;
4381 GEM_BUG_ON(execlists_active(&engine->execlists) != rq);
4387 GEM_BUG_ON(rq->engine != engine);
4390 execlists_hold(engine, rq);
4391 GEM_BUG_ON(!i915_request_on_hold(rq));
4394 GEM_BUG_ON(rq->fence.error != -EIO);
4400 i915_request_get(rq);
4401 if (!i915_request_wait(rq, 0, HZ / 5)) {
4408 GEM_BUG_ON(!i915_request_on_hold(rq));
4411 execlists_unhold(engine, rq);
4412 if (i915_request_wait(rq, 0, HZ / 5) < 0) {
4420 i915_request_put(rq);