Lines Matching refs:rq

367 			struct i915_request *rq;
372 rq = t->request_alloc(ce);
374 if (IS_ERR(rq)) {
375 err = PTR_ERR(rq);
380 err = i915_sw_fence_await_sw_fence_gfp(&rq->submit,
384 requests[n] = i915_request_get(rq);
385 i915_request_add(rq);
389 &rq->fence,
394 i915_request_put(rq);
406 struct i915_request *rq = requests[count - 1];
410 rq->fence.context, rq->fence.seqno,
415 GEM_BUG_ON(!i915_request_completed(rq));
421 struct i915_request *rq = requests[n];
424 &rq->fence.flags)) {
426 rq->fence.context, rq->fence.seqno);
430 i915_request_put(rq);
647 struct i915_request *rq;
659 rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
660 if (IS_ERR(rq)) {
661 err = PTR_ERR(rq);
666 i915_request_cancel(rq, -EINTR);
667 i915_request_get(rq);
668 i915_request_add(rq);
670 if (i915_request_wait(rq, 0, HZ / 5) < 0) {
679 if (rq->fence.error != -EINTR) {
681 engine->name, rq->fence.error);
686 i915_request_put(rq);
700 struct i915_request *rq;
712 rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
713 if (IS_ERR(rq)) {
714 err = PTR_ERR(rq);
719 i915_request_get(rq);
720 i915_request_add(rq);
721 if (!igt_wait_for_spinner(&spin, rq)) {
729 i915_request_cancel(rq, -EINTR);
731 if (i915_request_wait(rq, 0, HZ / 5) < 0) {
740 if (rq->fence.error != -EINTR) {
742 engine->name, rq->fence.error);
747 i915_request_put(rq);
761 struct i915_request *rq;
773 rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
774 if (IS_ERR(rq)) {
775 err = PTR_ERR(rq);
779 i915_request_get(rq);
780 i915_request_add(rq);
782 if (i915_request_wait(rq, 0, HZ / 5) < 0) {
788 i915_request_cancel(rq, -EINTR);
789 if (rq->fence.error) {
791 engine->name, rq->fence.error);
796 i915_request_put(rq);
822 struct i915_request *rq, *nop;
842 rq = igt_spinner_create_request(&spin, ce, MI_NOOP);
843 if (IS_ERR(rq)) {
844 err = PTR_ERR(rq);
850 i915_request_get(rq);
851 i915_request_add(rq);
852 if (!igt_wait_for_spinner(&spin, rq)) {
867 i915_request_cancel(rq, -EINTR);
869 if (i915_request_wait(rq, 0, HZ) < 0) {
878 if (rq->fence.error != -EINTR) {
880 engine->name, rq->fence.error);
903 i915_request_put(rq);
1008 static int emit_bb_start(struct i915_request *rq, struct i915_vma *batch)
1010 return rq->engine->emit_bb_start(rq,
1275 struct i915_request *rq = request[idx];
1278 timeout = i915_request_wait(rq, 0,
1287 GEM_BUG_ON(!i915_request_completed(rq));
1288 i915_vma_unpin(rq->batch);
1289 i915_vma_put(rq->batch);
1290 i915_request_put(rq);
1300 struct i915_request *rq = request[idx];
1302 if (!rq)
1305 if (rq->batch) {
1306 i915_vma_unpin(rq->batch);
1307 i915_vma_put(rq->batch);
1309 i915_request_put(rq);
1476 struct i915_request *rq;
1478 rq = i915_request_create(engine->kernel_context);
1479 if (IS_ERR(rq)) {
1480 err = PTR_ERR(rq);
1484 i915_request_get(rq);
1485 i915_request_add(rq);
1488 if (i915_request_wait(rq, 0, HZ) < 0)
1490 i915_request_put(rq);
1514 struct i915_request *rq;
1516 rq = i915_request_create(engine->kernel_context);
1517 if (IS_ERR(rq)) {
1518 err = PTR_ERR(rq);
1522 i915_request_add(rq);
1560 struct i915_request *rq;
1576 rq = igt_spinner_create_request(&spin,
1580 if (IS_ERR(rq)) {
1581 err = PTR_ERR(rq);
1588 i915_request_get(rq);
1589 i915_request_add(rq);
1590 if (igt_wait_for_spinner(&spin, rq)) {
1599 if (err == 0 && i915_request_wait(rq, 0, HZ) < 0)
1601 i915_request_put(rq);
1690 struct i915_request *rq;
1705 rq = igt_request_alloc(ctx, engine);
1706 if (IS_ERR(rq)) {
1707 ret = PTR_ERR(rq);
1711 ret = rq->ring->size - rq->reserved_space;
1712 i915_request_add(rq);
1714 sz = rq->ring->emit - rq->head;
1716 sz += rq->ring->size;
1891 struct i915_request *rq;
1894 rq = intel_engine_create_kernel_request(ce->engine);
1895 if (IS_ERR(rq))
1896 return PTR_ERR(rq);
1900 i915_request_await_dma_fence(rq, fence);
1904 rq = i915_request_get(rq);
1905 i915_request_add(rq);
1906 if (i915_request_wait(rq, 0, HZ / 2) < 0 && !err)
1908 i915_request_put(rq);
2020 struct i915_request *rq;
2038 rq = i915_request_create(ce);
2039 if (IS_ERR(rq))
2040 return PTR_ERR(rq);
2042 cs = intel_ring_begin(rq, 4 + 12 * ARRAY_SIZE(elapsed));
2044 i915_request_add(rq);
2056 intel_ring_advance(rq, cs);
2057 i915_request_add(rq);
2111 struct i915_request *rq;
2117 rq = i915_request_create(ce);
2118 if (IS_ERR(rq)) {
2119 err = PTR_ERR(rq);
2123 cs = intel_ring_begin(rq, 4);
2125 i915_request_add(rq);
2132 intel_ring_advance(rq, cs);
2137 i915_request_add(rq);
2183 struct i915_request *rq;
2185 rq = i915_request_create(ce);
2186 if (IS_ERR(rq)) {
2187 err = PTR_ERR(rq);
2191 cs = intel_ring_begin(rq, 12);
2193 i915_request_add(rq);
2202 intel_ring_advance(rq, cs);
2212 i915_request_add(rq);
2243 struct i915_request *rq;
2246 rq = i915_request_create(engine->kernel_context);
2247 if (IS_ERR(rq))
2248 return PTR_ERR(rq);
2250 cs = intel_ring_begin(rq, 4);
2252 i915_request_add(rq);
2258 intel_ring_advance(rq, cs);
2259 i915_request_add(rq);
2298 struct i915_request *rq;
2301 rq = i915_request_create(ce);
2302 if (IS_ERR(rq)) {
2303 err = PTR_ERR(rq);
2307 err = i915_sw_fence_await_sw_fence_gfp(&rq->submit,
2311 i915_request_add(rq);
2315 cs = intel_ring_begin(rq, 4);
2317 i915_request_add(rq);
2324 intel_ring_advance(rq, cs);
2325 i915_request_add(rq);
2388 struct i915_request *rq;
2390 rq = i915_request_create(arr[j]);
2391 if (IS_ERR(rq)) {
2392 err = PTR_ERR(rq);
2397 err = i915_request_await_dma_fence(rq,
2400 i915_request_add(rq);
2405 cs = intel_ring_begin(rq, 4);
2407 i915_request_add(rq);
2415 intel_ring_advance(rq, cs);
2418 fence = i915_request_get(rq);
2420 i915_request_add(rq);
2479 struct i915_request *rq;
2481 rq = i915_request_create(ce);
2482 if (IS_ERR(rq)) {
2483 err = PTR_ERR(rq);
2487 cs = intel_ring_begin(rq, 12);
2489 i915_request_add(rq);
2498 intel_ring_advance(rq, cs);
2499 i915_request_add(rq);
2506 rq = i915_request_create(ce->engine->kernel_context);
2507 if (IS_ERR(rq)) {
2508 err = PTR_ERR(rq);
2512 cs = intel_ring_begin(rq, 8);
2514 i915_request_add(rq);
2522 intel_ring_advance(rq, cs);
2523 rq->sched.attr.priority = I915_PRIORITY_BARRIER;
2526 i915_request_add(rq);
2591 struct i915_request *rq;
2593 rq = i915_request_create(ce);
2594 if (IS_ERR(rq)) {
2595 err = PTR_ERR(rq);
2599 cs = intel_ring_begin(rq, 12);
2601 i915_request_add(rq);
2610 intel_ring_advance(rq, cs);
2612 dma_fence_add_callback(&rq->fence, &cb.base, signal_cb);
2613 i915_request_add(rq);
2739 struct i915_request *rq;
2741 rq = i915_request_create(ps->ce[idx]);
2742 if (IS_ERR(rq)) {
2743 err = PTR_ERR(rq);
2747 i915_request_get(rq);
2748 i915_request_add(rq);
2750 if (i915_request_wait(rq, 0, HZ / 5) < 0)
2752 i915_request_put(rq);
2773 struct i915_request *rq;
2775 rq = i915_request_create(ps->ce[idx]);
2776 if (IS_ERR(rq)) {
2777 err = PTR_ERR(rq);
2781 i915_request_get(rq);
2782 i915_request_add(rq);
2787 prev = rq;
2807 struct i915_request *rq;
2809 rq = i915_request_create(ps->ce[idx]);
2810 if (IS_ERR(rq))
2811 return PTR_ERR(rq);
2813 i915_request_add(rq);
2998 struct i915_request *rq;
3000 rq = i915_request_create(ce);
3001 if (IS_ERR(rq)) {
3002 err = PTR_ERR(rq);
3006 i915_request_get(rq);
3007 i915_request_add(rq);
3010 if (i915_request_wait(rq, 0, HZ) < 0)
3012 i915_request_put(rq);
3073 struct i915_request *rq;
3075 rq = i915_request_create(ce);
3076 if (IS_ERR(rq)) {
3077 err = PTR_ERR(rq);
3081 i915_request_get(rq);
3082 i915_request_add(rq);
3088 prev = rq;
3149 struct i915_request *rq;
3151 rq = i915_request_create(ce);
3152 if (IS_ERR(rq)) {
3153 err = PTR_ERR(rq);
3157 i915_request_add(rq);