Lines Matching refs:rq

454 static int emit_ggtt_store_dw(struct i915_request *rq, u32 addr, u32 value)
458 cs = intel_ring_begin(rq, 4);
462 if (GRAPHICS_VER(rq->i915) >= 8) {
467 } else if (GRAPHICS_VER(rq->i915) >= 4) {
479 intel_ring_advance(rq, cs);
487 struct i915_request *rq;
492 rq = ERR_PTR(err);
503 rq = intel_engine_create_kernel_request(engine);
504 if (IS_ERR(rq))
507 i915_request_get(rq);
509 err = emit_ggtt_store_dw(rq, tl->hwsp_offset, value);
510 i915_request_add(rq);
512 i915_request_put(rq);
513 rq = ERR_PTR(err);
519 if (IS_ERR(rq))
521 return rq;
554 struct i915_request *rq;
562 rq = checked_tl_write(tl, engine, count);
563 if (IS_ERR(rq)) {
565 err = PTR_ERR(rq);
570 i915_request_put(rq);
624 struct i915_request *rq;
636 rq = checked_tl_write(tl, engine, count);
638 if (IS_ERR(rq)) {
640 err = PTR_ERR(rq);
645 i915_request_put(rq);
696 struct i915_request *rq;
702 rq = intel_engine_create_kernel_request(engine);
703 if (IS_ERR(rq)) {
704 err = PTR_ERR(rq);
711 err = intel_timeline_get_seqno(tl, rq, &seqno[0]);
714 i915_request_add(rq);
720 err = emit_ggtt_store_dw(rq, tl->hwsp_offset, seqno[0]);
722 i915_request_add(rq);
728 err = intel_timeline_get_seqno(tl, rq, &seqno[1]);
731 i915_request_add(rq);
737 err = emit_ggtt_store_dw(rq, tl->hwsp_offset, seqno[1]);
739 i915_request_add(rq);
748 i915_request_add(rq);
750 if (i915_request_wait(rq, 0, HZ / 5) < 0) {
778 static int emit_read_hwsp(struct i915_request *rq,
782 const u32 gpr = i915_mmio_reg_offset(GEN8_RING_CS_GPR(rq->engine->mmio_base, 0));
785 cs = intel_ring_begin(rq, 12);
806 intel_ring_advance(rq, cs);
813 struct i915_request *rq;
884 w->rq = intel_context_create_request(ce);
886 if (IS_ERR(w->rq))
887 return PTR_ERR(w->rq);
891 switch_tl_lock(w->rq, NULL);
899 struct i915_request *rq = fetch_and_zero(&w->rq);
905 i915_request_get(rq);
906 switch_tl_lock(NULL, rq);
907 i915_request_add(rq);
909 if (i915_request_wait(rq, 0, HZ) < 0) {
928 i915_request_put(rq);
934 if (w->rq) {
935 switch_tl_lock(NULL, w->rq);
937 i915_request_add(w->rq);
945 struct i915_request *rq, *rn;
948 list_for_each_entry_safe(rq, rn, &tl->requests, link)
949 if (!i915_request_retire(rq))
956 static struct i915_request *wrap_timeline(struct i915_request *rq)
958 struct intel_context *ce = rq->context;
960 u32 seqno = rq->fence.seqno;
963 i915_request_put(rq);
964 rq = intel_context_create_request(ce);
965 if (IS_ERR(rq))
966 return rq;
968 i915_request_get(rq);
969 i915_request_add(rq);
972 i915_request_put(rq);
973 rq = i915_request_create(ce);
974 if (IS_ERR(rq))
975 return rq;
977 i915_request_get(rq);
978 i915_request_add(rq);
980 return rq;
1031 struct i915_request *rq;
1066 rq = i915_request_create(ce);
1067 if (IS_ERR(rq)) {
1068 err = PTR_ERR(rq);
1074 err = i915_sw_fence_await_dma_fence(&rq->submit,
1075 &watcher[0].rq->fence, 0,
1078 i915_request_add(rq);
1084 switch_tl_lock(rq, watcher[0].rq);
1085 err = intel_timeline_read_hwsp(rq, watcher[0].rq, &hwsp);
1087 err = emit_read_hwsp(watcher[0].rq, /* before */
1088 rq->fence.seqno, hwsp,
1090 switch_tl_lock(watcher[0].rq, rq);
1092 i915_request_add(rq);
1098 switch_tl_lock(rq, watcher[1].rq);
1099 err = intel_timeline_read_hwsp(rq, watcher[1].rq, &hwsp);
1101 err = emit_read_hwsp(watcher[1].rq, /* after */
1102 rq->fence.seqno, hwsp,
1104 switch_tl_lock(watcher[1].rq, rq);
1106 i915_request_add(rq);
1112 i915_request_get(rq);
1113 i915_request_add(rq);
1115 rq = wrap_timeline(rq);
1118 if (IS_ERR(rq)) {
1119 err = PTR_ERR(rq);
1123 err = i915_sw_fence_await_dma_fence(&watcher[1].rq->submit,
1124 &rq->fence, 0,
1127 i915_request_put(rq);
1135 i915_request_put(rq);
1141 if (i915_request_wait(rq,
1145 i915_request_put(rq);
1149 i915_request_put(rq);
1152 if (8 * watcher[1].rq->ring->emit >
1153 3 * watcher[1].rq->ring->size)
1194 struct i915_request *rq[3] = {};
1207 for (i = 0; i < ARRAY_SIZE(rq); i++) {
1222 rq[i] = i915_request_get(this);
1227 GEM_BUG_ON(rq[2]->fence.seqno > rq[0]->fence.seqno);
1229 if (i915_request_wait(rq[2], 0, HZ / 5) < 0) {
1235 for (i = 0; i < ARRAY_SIZE(rq); i++) {
1236 if (!i915_request_completed(rq[i])) {
1244 for (i = 0; i < ARRAY_SIZE(rq); i++)
1245 i915_request_put(rq[i]);
1270 struct i915_request *rq[3] = {};
1294 for (i = 0; i < ARRAY_SIZE(rq); i++) {
1309 rq[i] = i915_request_get(this);
1314 GEM_BUG_ON(rq[2]->fence.seqno > rq[0]->fence.seqno);
1316 if (i915_request_wait(rq[2], 0, HZ / 5) < 0) {
1322 for (i = 0; i < ARRAY_SIZE(rq); i++) {
1323 if (!i915_request_completed(rq[i])) {
1332 for (i = 0; i < ARRAY_SIZE(rq); i++)
1333 i915_request_put(rq[i]);
1370 struct i915_request *rq;
1378 rq = checked_tl_write(tl, engine, count);
1379 if (IS_ERR(rq)) {
1381 err = PTR_ERR(rq);
1385 if (i915_request_wait(rq, 0, HZ / 5) < 0) {
1387 i915_request_put(rq);
1401 i915_request_put(rq);