Lines Matching refs:rq

213 		 struct i915_request *rq,
216 struct i915_request *active = rq;
218 list_for_each_entry_from_reverse(rq, &tl->requests, link) {
219 if (__i915_request_is_complete(rq))
223 i915_request_set_error_once(rq, error);
224 __i915_request_skip(rq);
226 active = rq;
233 active_request(const struct intel_timeline * const tl, struct i915_request *rq)
235 return __active_request(tl, rq, 0);
256 static int rq_prio(const struct i915_request *rq)
258 return READ_ONCE(rq->sched.attr.priority);
261 static int effective_prio(const struct i915_request *rq)
263 int prio = rq_prio(rq);
273 if (i915_request_has_nopreempt(rq))
298 const struct i915_request *rq)
323 last_prio = max(effective_prio(rq), I915_PRIORITY_NORMAL - 1);
331 if (!list_is_last(&rq->sched.link, &engine->sched_engine->requests) &&
332 rq_prio(list_next_entry(rq, sched.link)) > last_prio)
369 struct i915_request *rq, *rn, *active = NULL;
375 list_for_each_entry_safe_reverse(rq, rn,
378 if (__i915_request_is_complete(rq)) {
379 list_del_init(&rq->sched.link);
383 __i915_request_unsubmit(rq);
385 GEM_BUG_ON(rq_prio(rq) == I915_PRIORITY_INVALID);
386 if (rq_prio(rq) != prio) {
387 prio = rq_prio(rq);
393 list_move(&rq->sched.link, pl);
394 set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
397 if (intel_ring_direction(rq->ring,
398 rq->tail,
399 rq->ring->tail + 8) > 0)
400 rq->context->lrc.desc |= CTX_DESC_FORCE_RESTORE;
402 active = rq;
418 execlists_context_status_change(struct i915_request *rq, unsigned long status)
427 atomic_notifier_call_chain(&rq->engine->context_status_notifier,
428 status, rq);
431 static void reset_active(struct i915_request *rq,
434 struct intel_context * const ce = rq->context;
452 ENGINE_TRACE(engine, "{ reset rq=%llx:%lld }\n",
453 rq->fence.context, rq->fence.seqno);
456 if (__i915_request_is_complete(rq))
457 head = rq->tail;
459 head = __active_request(ce->timeline, rq, -EIO)->head;
469 static bool bad_request(const struct i915_request *rq)
471 return rq->fence.error && i915_request_started(rq);
475 __execlists_schedule_in(struct i915_request *rq)
477 struct intel_engine_cs * const engine = rq->engine;
478 struct intel_context * const ce = rq->context;
486 if (unlikely(!intel_context_is_schedulable(ce) || bad_request(rq)))
487 reset_active(rq, engine);
522 execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_IN);
530 static void execlists_schedule_in(struct i915_request *rq, int idx)
532 struct intel_context * const ce = rq->context;
535 GEM_BUG_ON(!intel_engine_pm_is_awake(rq->engine));
536 trace_i915_request_in(rq, idx);
540 old = __execlists_schedule_in(rq);
543 GEM_BUG_ON(intel_context_inflight(ce) != rq->engine);
547 resubmit_virtual_request(struct i915_request *rq, struct virtual_engine *ve)
549 struct intel_engine_cs *engine = rq->engine;
553 clear_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
554 WRITE_ONCE(rq->engine, &ve->base);
555 ve->base.submit_request(rq);
560 static void kick_siblings(struct i915_request *rq, struct intel_context *ce)
563 struct intel_engine_cs *engine = rq->engine;
566 * After this point, the rq may be transferred to a new sibling, so
581 if (i915_request_in_priority_queue(rq) &&
582 rq->execution_mask != engine->mask)
583 resubmit_virtual_request(rq, ve);
589 static void __execlists_schedule_out(struct i915_request * const rq,
592 struct intel_engine_cs * const engine = rq->engine;
611 if (intel_timeline_is_last(ce->timeline, rq) &&
612 __i915_request_is_complete(rq))
630 execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_OUT);
645 kick_siblings(rq, ce);
651 static inline void execlists_schedule_out(struct i915_request *rq)
653 struct intel_context * const ce = rq->context;
655 trace_i915_request_out(rq);
660 __execlists_schedule_out(rq, ce);
662 i915_request_put(rq);
675 static u64 execlists_update_context(struct i915_request *rq)
677 struct intel_context *ce = rq->context;
682 if (rq->engine->flags & I915_ENGINE_HAS_EU_PRIORITY)
683 desc |= map_i915_prio_to_lrc_desc_prio(rq_prio(rq));
702 GEM_BUG_ON(ce->lrc_reg_state[CTX_RING_TAIL] != rq->ring->tail);
703 prev = rq->ring->tail;
704 tail = intel_ring_set_tail(rq->ring, rq->tail);
705 if (unlikely(intel_ring_direction(rq->ring, tail, prev) <= 0))
708 rq->tail = rq->wa_tail;
738 dump_port(char *buf, int buflen, const char *prefix, struct i915_request *rq)
740 if (!rq)
745 rq->context->lrc.ccid,
746 rq->fence.context, rq->fence.seqno,
747 __i915_request_is_complete(rq) ? "!" :
748 __i915_request_has_started(rq) ? "*" :
750 rq_prio(rq));
784 struct i915_request * const *port, *rq, *prev = NULL;
806 for (port = execlists->pending; (rq = *port); port++) {
810 GEM_BUG_ON(!kref_read(&rq->fence.refcount));
811 GEM_BUG_ON(!i915_request_is_active(rq));
813 if (ce == rq->context) {
820 ce = rq->context;
848 prev = rq;
855 if (rq->execution_mask != engine->mask &&
865 if (!spin_trylock_irqsave(&rq->lock, flags))
868 if (__i915_request_is_complete(rq))
900 spin_unlock_irqrestore(&rq->lock, flags);
932 struct i915_request *rq = execlists->pending[n];
935 rq ? execlists_update_context(rq) : 0,
962 static unsigned long i915_request_flags(const struct i915_request *rq)
964 return READ_ONCE(rq->fence.flags);
997 const struct i915_request *rq,
1002 if (!rq)
1005 if (!(rq->execution_mask & engine->mask)) /* We peeked too soon! */
1033 struct i915_request *rq = READ_ONCE(ve->request);
1035 /* lazily cleanup after another engine handled rq */
1036 if (!rq || !virtual_matches(ve, rq, engine)) {
1075 static void defer_request(struct i915_request *rq, struct list_head * const pl)
1089 GEM_BUG_ON(i915_request_is_active(rq));
1090 list_move_tail(&rq->sched.link, pl);
1092 for_each_waiter(p, rq) {
1100 if (w->engine != rq->engine)
1106 !__i915_request_is_complete(rq));
1111 if (rq_prio(w) < rq_prio(rq))
1114 GEM_BUG_ON(rq_prio(w) > rq_prio(rq));
1119 rq = list_first_entry_or_null(&list, typeof(*rq), sched.link);
1120 } while (rq);
1125 struct i915_request *rq;
1127 rq = __unwind_incomplete_requests(engine);
1128 if (!rq)
1131 defer_request(rq, i915_sched_lookup_priolist(engine->sched_engine,
1132 rq_prio(rq)));
1137 const struct i915_request *rq)
1151 return rq->context->lrc.ccid == READ_ONCE(el->yield);
1155 const struct i915_request *rq)
1161 if (!rq || __i915_request_is_complete(rq))
1169 if (!list_is_last_rcu(&rq->sched.link,
1190 timeslice_expired(struct intel_engine_cs *engine, const struct i915_request *rq)
1194 if (i915_request_has_nopreempt(rq) && __i915_request_has_started(rq))
1197 if (!needs_timeslice(engine, rq))
1200 return timer_expired(&el->timer) || timeslice_yield(el, rq);
1240 const struct i915_request *rq)
1242 if (!rq)
1246 engine->execlists.preempt_target = rq;
1249 if (unlikely(intel_context_is_banned(rq->context) || bad_request(rq)))
1256 const struct i915_request *rq)
1262 active_preempt_timeout(engine, rq));
1265 static bool completed(const struct i915_request *rq)
1267 if (i915_request_has_sentinel(rq))
1270 return __i915_request_is_complete(rq);
1414 struct i915_request *rq;
1418 rq = ve->request;
1419 if (unlikely(!virtual_matches(ve, rq, engine)))
1422 GEM_BUG_ON(rq->engine != &ve->base);
1423 GEM_BUG_ON(rq->context != &ve->context);
1425 if (unlikely(rq_prio(rq) < queue_prio(sched_engine))) {
1430 if (last && !can_merge_rq(last, rq)) {
1437 "virtual rq=%llx:%lld%s, new engine? %s\n",
1438 rq->fence.context,
1439 rq->fence.seqno,
1440 __i915_request_is_complete(rq) ? "!" :
1441 __i915_request_has_started(rq) ? "*" :
1452 GEM_BUG_ON(!(rq->execution_mask & engine->mask));
1453 WRITE_ONCE(rq->engine, engine);
1455 if (__i915_request_submit(rq)) {
1473 last = rq;
1476 i915_request_put(rq);
1493 struct i915_request *rq, *rn;
1495 priolist_for_each_request_consume(rq, rn, p) {
1509 if (last && !can_merge_rq(last, rq)) {
1523 if (last->context == rq->context)
1535 if (rq->execution_mask != engine->mask)
1546 ctx_single_port_submission(rq->context))
1552 if (__i915_request_submit(rq)) {
1560 rq->context));
1563 rq->fence.seqno));
1566 last = rq;
1968 struct i915_request *rq = *execlists->active;
1970 rq->context->lrc_reg_state;
1982 "rq:{start:%08x, head:%04x, tail:%04x, seqno:%llx:%d, hwsp:%d}, ",
1983 i915_ggtt_offset(rq->ring->vma),
1984 rq->head, rq->tail,
1985 rq->fence.context,
1986 lower_32_bits(rq->fence.seqno),
1987 hwsp_seqno(rq));
2056 static void __execlists_hold(struct i915_request *rq)
2063 if (i915_request_is_active(rq))
2064 __i915_request_unsubmit(rq);
2066 clear_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
2067 list_move_tail(&rq->sched.link,
2068 &rq->engine->sched_engine->hold);
2069 i915_request_set_hold(rq);
2070 RQ_TRACE(rq, "on hold\n");
2072 for_each_waiter(p, rq) {
2080 if (w->engine != rq->engine)
2095 rq = list_first_entry_or_null(&list, typeof(*rq), sched.link);
2096 } while (rq);
2100 struct i915_request *rq)
2102 if (i915_request_on_hold(rq))
2107 if (__i915_request_is_complete(rq)) { /* too late! */
2108 rq = NULL;
2118 GEM_BUG_ON(i915_request_on_hold(rq));
2119 GEM_BUG_ON(rq->engine != engine);
2120 __execlists_hold(rq);
2125 return rq;
2128 static bool hold_request(const struct i915_request *rq)
2138 for_each_signaler(p, rq) {
2142 if (s->engine != rq->engine)
2154 static void __execlists_unhold(struct i915_request *rq)
2161 RQ_TRACE(rq, "hold release\n");
2163 GEM_BUG_ON(!i915_request_on_hold(rq));
2164 GEM_BUG_ON(!i915_sw_fence_signaled(&rq->submit));
2166 i915_request_clear_hold(rq);
2167 list_move_tail(&rq->sched.link,
2168 i915_sched_lookup_priolist(rq->engine->sched_engine,
2169 rq_prio(rq)));
2170 set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
2173 for_each_waiter(p, rq) {
2180 if (w->engine != rq->engine)
2193 rq = list_first_entry_or_null(&list, typeof(*rq), sched.link);
2194 } while (rq);
2198 struct i915_request *rq)
2206 __execlists_unhold(rq);
2208 if (rq_prio(rq) > engine->sched_engine->queue_priority_hint) {
2209 engine->sched_engine->queue_priority_hint = rq_prio(rq);
2218 struct i915_request *rq;
2227 struct intel_engine_cs *engine = cap->rq->engine;
2232 vma = intel_engine_coredump_add_request(gt->engine, cap->rq, gfp);
2249 execlists_unhold(engine, cap->rq);
2250 i915_request_put(cap->rq);
2293 struct i915_request * const *port, *rq;
2301 for (port = el->active; (rq = *port); port++) {
2302 if (rq->context->lrc.ccid == ccid) {
2306 return rq;
2310 for (port = el->pending; (rq = *port); port++) {
2311 if (rq->context->lrc.ccid == ccid) {
2315 return rq;
2346 cap->rq = active_context(engine, active_ccid(engine));
2347 if (cap->rq) {
2348 cap->rq = active_request(cap->rq->context->timeline, cap->rq);
2349 cap->rq = i915_request_get_rcu(cap->rq);
2352 if (!cap->rq)
2375 if (!execlists_hold(engine, cap->rq))
2383 i915_request_put(cap->rq);
2443 const struct i915_request *rq = *engine->execlists.active;
2456 if (rq == engine->execlists.preempt_target)
2460 active_preempt_timeout(engine, rq));
2551 struct i915_request *rq)
2553 GEM_BUG_ON(!list_empty(&rq->sched.link));
2554 list_add_tail(&rq->sched.link,
2556 rq_prio(rq)));
2557 set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
2561 const struct i915_request *rq)
2565 if (rq_prio(rq) <= sched_engine->queue_priority_hint)
2568 sched_engine->queue_priority_hint = rq_prio(rq);
2573 const struct i915_request *rq)
2575 GEM_BUG_ON(i915_request_on_hold(rq));
2576 return !list_empty(&engine->sched_engine->hold) && hold_request(rq);
2643 struct i915_request *rq)
2647 i915_request_active_engine(rq, &engine);
2714 static int emit_pdps(struct i915_request *rq)
2716 const struct intel_engine_cs * const engine = rq->engine;
2717 struct i915_ppgtt * const ppgtt = i915_vm_to_ppgtt(rq->context->vm);
2721 GEM_BUG_ON(intel_vgpu_active(rq->i915));
2730 cs = intel_ring_begin(rq, 2);
2736 intel_ring_advance(rq, cs);
2739 err = engine->emit_flush(rq, EMIT_FLUSH);
2744 err = engine->emit_flush(rq, EMIT_INVALIDATE);
2748 cs = intel_ring_begin(rq, 4 * GEN8_3LVL_PDPES + 2);
2764 intel_ring_advance(rq, cs);
2766 intel_ring_advance(rq, cs);
3030 struct i915_request *rq;
3038 rq = active_context(engine, engine->execlists.reset_ccid);
3039 if (!rq)
3042 ce = rq->context;
3045 if (__i915_request_is_complete(rq)) {
3047 head = intel_ring_wrap(ce->ring, rq->tail);
3057 rq = active_request(ce->timeline, rq);
3058 head = intel_ring_wrap(ce->ring, rq->head);
3073 if (!__i915_request_has_started(rq))
3087 __i915_request_reset(rq, stalled);
3151 struct i915_request *rq, *rn;
3177 list_for_each_entry(rq, &engine->sched_engine->requests, sched.link)
3178 i915_request_put(i915_request_mark_eio(rq));
3185 priolist_for_each_request_consume(rq, rn, p) {
3186 if (i915_request_mark_eio(rq)) {
3187 __i915_request_submit(rq);
3188 i915_request_put(rq);
3197 list_for_each_entry(rq, &sched_engine->hold, sched.link)
3198 i915_request_put(i915_request_mark_eio(rq));
3209 rq = fetch_and_zero(&ve->request);
3210 if (rq) {
3211 if (i915_request_mark_eio(rq)) {
3212 rq->engine = engine;
3213 __i915_request_submit(rq);
3214 i915_request_put(rq);
3216 i915_request_put(rq);
3280 static void add_to_engine(struct i915_request *rq)
3282 lockdep_assert_held(&rq->engine->sched_engine->lock);
3283 list_move_tail(&rq->sched.link, &rq->engine->sched_engine->requests);
3286 static void remove_from_engine(struct i915_request *rq)
3292 * as their rq->engine pointer is not stable until under that
3294 * check that the rq still belongs to the newly locked engine.
3296 locked = READ_ONCE(rq->engine);
3298 while (unlikely(locked != (engine = READ_ONCE(rq->engine)))) {
3303 list_del_init(&rq->sched.link);
3305 clear_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
3306 clear_bit(I915_FENCE_FLAG_HOLD, &rq->fence.flags);
3309 set_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags);
3313 i915_request_notify_execute_cb_imm(rq);
3325 static void kick_execlists(const struct i915_request *rq, int prio)
3327 struct intel_engine_cs *engine = rq->engine;
3349 if (inflight->context == rq->context)
3353 "bumping queue-priority-hint:%d for rq:%llx:%lld, inflight:%llx:%lld prio %d\n",
3355 rq->fence.context, rq->fence.seqno,
3791 struct i915_request *rq;
3794 rq = READ_ONCE(ve->request);
3795 if (!rq)
3798 /* The rq is ready for submission; rq->execution_mask is now stable. */
3799 mask = rq->execution_mask;
3802 i915_request_set_error_once(rq, -ENODEV);
3806 ENGINE_TRACE(&ve->base, "rq=%llx:%lld, mask=%x, prio=%d\n",
3807 rq->fence.context, rq->fence.seqno,
3898 static void virtual_submit_request(struct i915_request *rq)
3900 struct virtual_engine *ve = to_virtual_engine(rq->engine);
3903 ENGINE_TRACE(&ve->base, "rq=%llx:%lld\n",
3904 rq->fence.context,
3905 rq->fence.seqno);
3912 if (__i915_request_is_complete(rq)) {
3913 __i915_request_submit(rq);
3923 ve->base.sched_engine->queue_priority_hint = rq_prio(rq);
3924 ve->request = i915_request_get(rq);
3927 list_move_tail(&rq->sched.link, virtual_queue(ve));
4083 const struct i915_request *rq,
4090 struct i915_request *rq, *last;
4099 list_for_each_entry(rq, &sched_engine->requests, sched.link) {
4101 show_request(m, rq, "\t\t", 0);
4103 last = rq;
4123 priolist_for_each_request(rq, p) {
4125 show_request(m, rq, "\t\t", 0);
4127 last = rq;
4144 struct i915_request *rq = READ_ONCE(ve->request);
4146 if (rq) {
4148 show_request(m, rq, "\t\t", 0);
4150 last = rq;