Lines Matching refs:rq

97 		       const struct i915_request *rq)
100 offset_in_page(sizeof(u32) * rq->fence.context);
109 struct i915_request *rq = NULL;
157 rq = igt_request_alloc(h->ctx, engine);
158 if (IS_ERR(rq)) {
159 err = PTR_ERR(rq);
163 err = igt_vma_move_to_active_unlocked(vma, rq, 0);
167 err = igt_vma_move_to_active_unlocked(hws, rq, 0);
174 *batch++ = lower_32_bits(hws_address(hws, rq));
175 *batch++ = upper_32_bits(hws_address(hws, rq));
176 *batch++ = rq->fence.seqno;
189 *batch++ = lower_32_bits(hws_address(hws, rq));
190 *batch++ = rq->fence.seqno;
202 *batch++ = lower_32_bits(hws_address(hws, rq));
203 *batch++ = rq->fence.seqno;
214 *batch++ = lower_32_bits(hws_address(hws, rq));
215 *batch++ = rq->fence.seqno;
228 if (rq->engine->emit_init_breadcrumb) {
229 err = rq->engine->emit_init_breadcrumb(rq);
238 err = rq->engine->emit_bb_start(rq, i915_vma_offset(vma), PAGE_SIZE, flags);
242 i915_request_set_error_once(rq, err);
243 i915_request_add(rq);
250 return err ? ERR_PTR(err) : rq;
253 static u32 hws_seqno(const struct hang *h, const struct i915_request *rq)
255 return READ_ONCE(h->seqno[rq->fence.context % (PAGE_SIZE/sizeof(u32))]);
274 static bool wait_until_running(struct hang *h, struct i915_request *rq)
276 return !(wait_for_us(i915_seqno_passed(hws_seqno(h, rq),
277 rq->fence.seqno),
279 wait_for(i915_seqno_passed(hws_seqno(h, rq),
280 rq->fence.seqno),
287 struct i915_request *rq;
306 rq = hang_create_request(&h, engine);
307 if (IS_ERR(rq)) {
308 err = PTR_ERR(rq);
314 i915_request_get(rq);
319 i915_request_add(rq);
323 timeout = i915_request_wait(rq, 0,
328 i915_request_put(rq);
375 struct i915_request *rq;
377 rq = intel_context_create_request(ce);
378 if (IS_ERR(rq)) {
379 err = PTR_ERR(rq);
385 i915_request_add(rq);
474 struct i915_request *rq;
476 rq = intel_context_create_request(ce);
477 if (IS_ERR(rq)) {
492 err = PTR_ERR(rq);
496 i915_request_add(rq);
595 struct i915_request *rq;
597 rq = intel_context_create_request(ce);
598 if (IS_ERR(rq)) {
615 err = PTR_ERR(rq);
621 last = i915_request_get(rq);
622 i915_request_add(rq);
730 struct i915_request *rq = NULL;
742 rq = hang_create_request(&h, engine);
743 if (IS_ERR(rq)) {
744 err = PTR_ERR(rq);
750 i915_request_get(rq);
751 i915_request_add(rq);
753 if (!wait_until_running(&h, rq)) {
757 __func__, rq->fence.seqno, hws_seqno(&h, rq));
761 i915_request_put(rq);
776 if (rq) {
778 err = intel_selftest_wait_for_rq(rq);
781 engine->name, rq->fence.context,
782 rq->fence.seqno, rq->context->guc_id.id, err);
786 if (rq)
787 i915_request_put(rq);
868 static int active_request_put(struct i915_request *rq)
872 if (!rq)
875 if (i915_request_wait(rq, 0, 10 * HZ) < 0) {
877 rq->engine->name,
878 rq->fence.context,
879 rq->fence.seqno);
882 intel_gt_set_wedged(rq->engine->gt);
886 i915_request_put(rq);
896 struct i915_request *rq[8] = {};
897 struct intel_context *ce[ARRAY_SIZE(rq)];
915 unsigned int idx = count++ & (ARRAY_SIZE(rq) - 1);
916 struct i915_request *old = rq[idx];
926 rq[idx] = i915_request_get(new);
934 engine->sched_engine->schedule(rq[idx], &attr);
946 for (count = 0; count < ARRAY_SIZE(rq); count++) {
947 int err__ = active_request_put(rq[count]);
1048 struct i915_request *rq = NULL;
1060 rq = hang_create_request(&h, engine);
1061 if (IS_ERR(rq)) {
1062 err = PTR_ERR(rq);
1068 i915_request_get(rq);
1069 i915_request_add(rq);
1071 if (!wait_until_running(&h, rq)) {
1075 __func__, rq->fence.seqno, hws_seqno(&h, rq));
1079 i915_request_put(rq);
1096 if (rq) {
1098 err = intel_selftest_wait_for_rq(rq);
1101 engine->name, rq->fence.context,
1102 rq->fence.seqno, rq->context->guc_id.id, err);
1107 if (rq) {
1108 if (rq->fence.error != -EIO) {
1111 rq->fence.context,
1112 rq->fence.seqno, rq->context->guc_id.id);
1113 i915_request_put(rq);
1121 if (i915_request_wait(rq, 0, HZ / 5) < 0) {
1128 rq->fence.context,
1129 rq->fence.seqno);
1132 i915_request_put(rq);
1140 i915_request_put(rq);
1300 struct i915_request *rq;
1321 rq = hang_create_request(&h, engine);
1322 if (IS_ERR(rq)) {
1323 err = PTR_ERR(rq);
1328 i915_request_get(rq);
1329 i915_request_add(rq);
1331 if (!wait_until_running(&h, rq)) {
1335 __func__, rq->fence.seqno, hws_seqno(&h, rq));
1336 intel_engine_dump(rq->engine, &p, "%s\n", rq->engine->name);
1346 timeout = i915_request_wait(rq, 0, 10);
1361 i915_request_put(rq);
1434 struct i915_request *rq;
1478 rq = hang_create_request(&h, engine);
1479 if (IS_ERR(rq)) {
1480 err = PTR_ERR(rq);
1492 i915_request_add(rq);
1502 i915_request_add(rq);
1507 err = igt_vma_move_to_active_unlocked(arg.vma, rq, flags);
1515 i915_request_get(rq);
1516 i915_request_add(rq);
1520 if (!wait_until_running(&h, rq)) {
1524 __func__, rq->fence.seqno, hws_seqno(&h, rq));
1525 intel_engine_dump(rq->engine, &p, "%s\n", rq->engine->name);
1544 if (wait_for(!list_empty(&rq->fence.cb_list), 10)) {
1548 intel_engine_dump(rq->engine, &p, "%s\n", rq->engine->name);
1556 fake_hangcheck(gt, rq->engine->mask);
1570 i915_request_put(rq);
1683 struct i915_request *rq;
1686 rq = hang_create_request(&h, engine);
1687 if (IS_ERR(rq)) {
1688 err = PTR_ERR(rq);
1693 i915_request_get(rq);
1694 i915_request_add(rq);
1710 i915_request_put(rq);
1727 i915_request_put(rq);
1741 i915_request_put(rq);
1747 if (rq->fence.error) {
1749 rq->fence.error);
1750 i915_request_put(rq);
1758 i915_request_put(rq);
1765 prev = rq;
1813 struct i915_request *rq;
1833 rq = hang_create_request(&h, engine);
1834 if (IS_ERR(rq)) {
1835 err = PTR_ERR(rq);
1840 i915_request_get(rq);
1841 i915_request_add(rq);
1843 if (!wait_until_running(&h, rq)) {
1847 __func__, rq->fence.seqno, hws_seqno(&h, rq));
1848 intel_engine_dump(rq->engine, &p, "%s\n", rq->engine->name);
1863 if (rq->fence.error != -EIO) {
1870 i915_request_put(rq);
1912 struct i915_request *rq;
1926 rq = hang_create_request(&h, engine);
1927 if (IS_ERR(rq)) {
1928 err = PTR_ERR(rq);
1933 i915_request_get(rq);
1934 i915_request_add(rq);
1936 if (wait_until_running(&h, rq)) {
1941 rq->fence.seqno, hws_seqno(&h, rq));
1950 i915_request_wait(rq, 0, MAX_SCHEDULE_TIMEOUT);
1955 i915_request_put(rq);