Lines Matching refs:rq

38 static bool is_active(struct i915_request *rq)
40 if (i915_request_is_active(rq))
43 if (i915_request_on_hold(rq))
46 if (i915_request_has_initial_breadcrumb(rq) && i915_request_started(rq))
53 struct i915_request *rq,
63 if (i915_request_completed(rq)) /* that was quick! */
68 if (!READ_ONCE(engine->execlists.pending[0]) && is_active(rq))
83 struct i915_request *rq;
86 rq = intel_context_create_request(ce);
87 if (IS_ERR(rq))
88 return PTR_ERR(rq);
90 cs = intel_ring_begin(rq, 4);
92 i915_request_add(rq);
101 intel_ring_advance(rq, cs);
103 rq->sched.attr.priority = I915_PRIORITY_BARRIER;
104 i915_request_add(rq);
110 struct i915_request *rq;
114 rq = intel_engine_create_kernel_request(ce->engine);
115 if (IS_ERR(rq))
116 return PTR_ERR(rq);
120 i915_request_await_dma_fence(rq, fence);
124 rq = i915_request_get(rq);
125 i915_request_add(rq);
126 if (i915_request_wait(rq, 0, timeout) < 0)
128 i915_request_put(rq);
406 struct i915_request *rq;
430 rq = i915_request_create(ce);
431 if (IS_ERR(rq)) {
432 err = PTR_ERR(rq);
436 cs = intel_ring_begin(rq, 4 * MAX_IDX);
439 i915_request_add(rq);
455 err = i915_vma_move_to_active(scratch, rq, EXEC_OBJECT_WRITE);
457 i915_request_get(rq);
458 i915_request_add(rq);
465 if (i915_request_wait(rq, 0, HZ / 5) < 0) {
488 i915_request_put(rq);
534 struct i915_request *rq;
538 rq = intel_context_create_request(ce);
539 if (IS_ERR(rq))
540 return PTR_ERR(rq);
542 cs = intel_ring_begin(rq, 2 * NUM_GPR_DW + 2);
544 i915_request_add(rq);
555 intel_ring_advance(rq, cs);
557 rq->sched.attr.priority = I915_PRIORITY_BARRIER;
558 i915_request_add(rq);
569 struct i915_request *rq;
574 rq = intel_context_create_request(ce);
575 if (IS_ERR(rq))
576 return rq;
578 cs = intel_ring_begin(rq, 6 + 4 * NUM_GPR_DW);
580 i915_request_add(rq);
602 err = igt_vma_move_to_active_unlocked(scratch, rq, EXEC_OBJECT_WRITE);
604 i915_request_get(rq);
605 i915_request_add(rq);
607 i915_request_put(rq);
608 rq = ERR_PTR(err);
611 return rq;
620 struct i915_request *rq;
636 rq = __gpr_read(ce, scratch, slot);
637 if (IS_ERR(rq)) {
638 err = PTR_ERR(rq);
642 err = wait_for_submit(engine, rq, HZ / 2);
655 err = wait_for_submit(engine, rq, HZ / 2);
663 if (i915_request_wait(rq, 0, HZ / 5) < 0) {
690 i915_request_put(rq);
742 struct i915_request *rq;
746 rq = intel_context_create_request(ce);
747 if (IS_ERR(rq))
748 return rq;
750 cs = intel_ring_begin(rq, 10);
768 *cs++ = i915_mmio_reg_offset(RING_CTX_TIMESTAMP(rq->engine->mmio_base));
772 intel_ring_advance(rq, cs);
776 i915_request_get(rq);
777 i915_request_add(rq);
779 i915_request_put(rq);
783 return rq;
800 struct i915_request *rq;
805 rq = create_timestamp(arg->ce[0], slot, 1);
806 if (IS_ERR(rq))
807 return PTR_ERR(rq);
809 err = wait_for_submit(rq->engine, rq, HZ / 2);
845 i915_request_put(rq);
1057 struct i915_request *rq;
1067 rq = ERR_CAST(b_after);
1071 rq = intel_context_create_request(ce);
1072 if (IS_ERR(rq))
1075 err = igt_vma_move_to_active_unlocked(before, rq, EXEC_OBJECT_WRITE);
1079 err = igt_vma_move_to_active_unlocked(b_before, rq, 0);
1083 err = igt_vma_move_to_active_unlocked(after, rq, EXEC_OBJECT_WRITE);
1087 err = igt_vma_move_to_active_unlocked(b_after, rq, 0);
1091 cs = intel_ring_begin(rq, 14);
1118 intel_ring_advance(rq, cs);
1121 i915_request_get(rq);
1122 i915_request_add(rq);
1127 return rq;
1130 i915_request_add(rq);
1131 rq = ERR_PTR(err);
1210 struct i915_request *rq;
1219 rq = intel_context_create_request(ce);
1220 if (IS_ERR(rq)) {
1221 err = PTR_ERR(rq);
1225 err = igt_vma_move_to_active_unlocked(batch, rq, 0);
1229 cs = intel_ring_begin(rq, 8);
1246 intel_ring_advance(rq, cs);
1248 rq->sched.attr.priority = I915_PRIORITY_BARRIER;
1250 i915_request_add(rq);
1406 struct i915_request *rq;
1431 rq = record_registers(A, ref[0], ref[1], sema);
1432 if (IS_ERR(rq)) {
1433 err = PTR_ERR(rq);
1440 if (i915_request_wait(rq, 0, HZ / 2) < 0) {
1441 i915_request_put(rq);
1445 i915_request_put(rq);
1459 rq = record_registers(A, result[0], result[1], sema);
1460 if (IS_ERR(rq)) {
1461 err = PTR_ERR(rq);
1466 if (err == 0 && i915_request_wait(rq, 0, HZ / 2) < 0) {
1474 i915_request_put(rq);
1560 struct i915_request *rq;
1563 rq = intel_context_create_request(ce);
1564 if (IS_ERR(rq))
1565 return PTR_ERR(rq);
1567 i915_request_get(rq);
1568 i915_request_add(rq);
1570 if (i915_request_wait(rq, 0, HZ / 5) < 0)
1573 i915_request_put(rq);
1742 struct i915_request *rq)
1751 if (!rq->fence.error)
1763 struct i915_request *rq;
1775 rq = intel_context_create_request(ce);
1776 if (IS_ERR(rq)) {
1777 err = PTR_ERR(rq);
1781 i915_request_get(rq);
1782 i915_request_add(rq);
1783 return rq;
1879 struct i915_request *rq;
1894 rq = intel_context_create_request(ce);
1895 if (IS_ERR(rq)) {
1896 err = PTR_ERR(rq);
1901 i915_request_get(rq);
1903 i915_request_add(rq);
1909 i915_request_put(rq);
1912 err = i915_request_wait(rq, 0, HZ / 5);
1936 i915_request_put(rq);