Lines Matching refs:rq

114 	struct i915_request *rq = to_request(fence);
116 GEM_BUG_ON(rq->guc_prio != GUC_PRIO_INIT &&
117 rq->guc_prio != GUC_PRIO_FINI);
119 i915_request_free_capture_list(fetch_and_zero(&rq->capture_list));
120 if (rq->batch_res) {
121 i915_vma_resource_put(rq->batch_res);
122 rq->batch_res = NULL;
132 i915_sw_fence_fini(&rq->submit);
133 i915_sw_fence_fini(&rq->semaphore);
139 * very careful in what rq->engine we poke. The virtual engine is
140 * referenced via the rq->context and we released that ref during
148 * not be unsubmitted again, so rq->engine and rq->execution_mask
149 * at this point is stable. rq->execution_mask will be a single
153 * power-of-two we assume that rq->engine may still be a virtual
163 * know that if the rq->execution_mask is a single bit, rq->engine
166 if (is_power_of_2(rq->execution_mask) &&
167 !cmpxchg(&rq->engine->request_pool, NULL, rq))
170 kmem_cache_free(slab_requests, rq);
191 __notify_execute_cb(struct i915_request *rq, bool (*fn)(struct irq_work *wrk))
195 if (llist_empty(&rq->execute_cb))
199 llist_del_all(&rq->execute_cb),
204 static void __notify_execute_cb_irq(struct i915_request *rq)
206 __notify_execute_cb(rq, irq_work_queue);
215 void i915_request_notify_execute_cb_imm(struct i915_request *rq)
217 __notify_execute_cb(rq, irq_work_imm);
220 static void __i915_request_fill(struct i915_request *rq, u8 val)
222 void *vaddr = rq->ring->vaddr;
225 head = rq->infix;
226 if (rq->postfix < head) {
227 memset(vaddr + head, val, rq->ring->size - head);
230 memset(vaddr + head, val, rq->postfix - head);
235 * @rq: request to inspect
244 i915_request_active_engine(struct i915_request *rq,
254 * Note that rq->engine is unstable, and so we double
257 locked = READ_ONCE(rq->engine);
259 while (unlikely(locked != (engine = READ_ONCE(rq->engine)))) {
265 if (i915_request_is_active(rq)) {
266 if (!__i915_request_is_complete(rq))
276 static void __rq_init_watchdog(struct i915_request *rq)
278 rq->watchdog.timer.function = NULL;
283 struct i915_request *rq =
285 struct intel_gt *gt = rq->engine->gt;
287 if (!i915_request_completed(rq)) {
288 if (llist_add(&rq->watchdog.link, &gt->watchdog.list))
291 i915_request_put(rq);
297 static void __rq_arm_watchdog(struct i915_request *rq)
299 struct i915_request_watchdog *wdg = &rq->watchdog;
300 struct intel_context *ce = rq->context;
305 i915_request_get(rq);
316 static void __rq_cancel_watchdog(struct i915_request *rq)
318 struct i915_request_watchdog *wdg = &rq->watchdog;
321 i915_request_put(rq);
356 bool i915_request_retire(struct i915_request *rq)
358 if (!__i915_request_is_complete(rq))
361 RQ_TRACE(rq, "\n");
363 GEM_BUG_ON(!i915_sw_fence_signaled(&rq->submit));
364 trace_i915_request_retire(rq);
365 i915_request_mark_complete(rq);
367 __rq_cancel_watchdog(rq);
378 GEM_BUG_ON(!list_is_first(&rq->link,
379 &i915_request_timeline(rq)->requests));
382 __i915_request_fill(rq, POISON_FREE);
383 rq->ring->head = rq->postfix;
385 if (!i915_request_signaled(rq)) {
386 spin_lock_irq(&rq->lock);
387 dma_fence_signal_locked(&rq->fence);
388 spin_unlock_irq(&rq->lock);
391 if (test_and_set_bit(I915_FENCE_FLAG_BOOST, &rq->fence.flags))
392 intel_rps_dec_waiters(&rq->engine->gt->rps);
404 rq->engine->remove_active_request(rq);
405 GEM_BUG_ON(!llist_empty(&rq->execute_cb));
407 __list_del_entry(&rq->link); /* poison neither prev/next (RCU walks) */
409 intel_context_exit(rq->context);
410 intel_context_unpin(rq->context);
412 i915_sched_node_fini(&rq->sched);
413 i915_request_put(rq);
418 void i915_request_retire_upto(struct i915_request *rq)
420 struct intel_timeline * const tl = i915_request_timeline(rq);
423 RQ_TRACE(rq, "\n");
424 GEM_BUG_ON(!__i915_request_is_complete(rq));
429 } while (i915_request_retire(tmp) && tmp != rq);
440 struct i915_request * const *port, *rq;
487 (rq = READ_ONCE(*port)); /* may race with promotion of pending[] */
489 if (rq->context == signal->context) {
490 inflight = i915_seqno_passed(rq->fence.seqno,
501 __await_execution(struct i915_request *rq,
514 cb->fence = &rq->submit;
552 void __i915_request_skip(struct i915_request *rq)
554 GEM_BUG_ON(!fatal_error(rq->fence.error));
556 if (rq->infix == rq->postfix)
559 RQ_TRACE(rq, "error: %d\n", rq->fence.error);
566 __i915_request_fill(rq, 0);
567 rq->infix = rq->postfix;
570 bool i915_request_set_error_once(struct i915_request *rq, int error)
576 if (i915_request_signaled(rq))
579 old = READ_ONCE(rq->fence.error);
583 } while (!try_cmpxchg(&rq->fence.error, &old, error));
588 struct i915_request *i915_request_mark_eio(struct i915_request *rq)
590 if (__i915_request_is_complete(rq))
593 GEM_BUG_ON(i915_request_signaled(rq));
596 rq = i915_request_get(rq);
598 i915_request_set_error_once(rq, -EIO);
599 i915_request_mark_complete(rq);
601 return rq;
761 void i915_request_cancel(struct i915_request *rq, int error)
763 if (!i915_request_set_error_once(rq, error))
766 set_bit(I915_FENCE_FLAG_SENTINEL, &rq->fence.flags);
768 intel_context_cancel_request(rq->context, rq);
810 struct i915_request *rq = container_of(fence, typeof(*rq), semaphore);
817 i915_request_put(rq);
826 struct i915_request *rq, *rn;
828 list_for_each_entry_safe(rq, rn, &tl->requests, link)
829 if (!i915_request_retire(rq))
838 struct i915_request *rq;
842 rq = xchg(rsvd, NULL);
843 if (!rq) /* Use the normal failure path for one final WARN */
846 return rq;
853 rq = list_first_entry(&tl->requests, typeof(*rq), link);
854 i915_request_retire(rq);
856 rq = kmem_cache_alloc(slab_requests,
858 if (rq)
859 return rq;
862 rq = list_last_entry(&tl->requests, typeof(*rq), link);
863 cond_synchronize_rcu(rq->rcustate);
874 struct i915_request *rq = arg;
876 spin_lock_init(&rq->lock);
877 i915_sched_node_init(&rq->sched);
878 i915_sw_fence_init(&rq->submit, submit_notify);
879 i915_sw_fence_init(&rq->semaphore, semaphore_notify);
881 clear_capture_list(rq);
882 rq->batch_res = NULL;
884 init_llist_head(&rq->execute_cb);
897 struct i915_request *rq;
935 rq = kmem_cache_alloc(slab_requests,
937 if (unlikely(!rq)) {
938 rq = request_alloc_slow(tl, &ce->engine->request_pool, gfp);
939 if (!rq) {
945 rq->context = ce;
946 rq->engine = ce->engine;
947 rq->ring = ce->ring;
948 rq->execution_mask = ce->engine->mask;
949 rq->i915 = ce->engine->i915;
951 ret = intel_timeline_get_seqno(tl, rq, &seqno);
955 dma_fence_init(&rq->fence, &i915_fence_ops, &rq->lock,
958 RCU_INIT_POINTER(rq->timeline, tl);
959 rq->hwsp_seqno = tl->hwsp_seqno;
960 GEM_BUG_ON(__i915_request_is_complete(rq));
962 rq->rcustate = get_state_synchronize_rcu(); /* acts as smp_mb() */
964 rq->guc_prio = GUC_PRIO_INIT;
967 i915_sw_fence_reinit(&i915_request_get(rq)->submit);
968 i915_sw_fence_reinit(&i915_request_get(rq)->semaphore);
970 i915_sched_node_reinit(&rq->sched);
973 clear_batch_ptr(rq);
974 __rq_init_watchdog(rq);
975 assert_capture_list_is_null(rq);
976 GEM_BUG_ON(!llist_empty(&rq->execute_cb));
977 GEM_BUG_ON(rq->batch_res);
991 rq->reserved_space =
992 2 * rq->engine->emit_fini_breadcrumb_dw * sizeof(u32);
1000 rq->head = rq->ring->emit;
1002 ret = rq->engine->request_alloc(rq);
1006 rq->infix = rq->ring->emit; /* end of header; start of user payload */
1009 list_add_tail_rcu(&rq->link, &tl->requests);
1011 return rq;
1014 ce->ring->emit = rq->head;
1017 GEM_BUG_ON(!list_empty(&rq->sched.signalers_list));
1018 GEM_BUG_ON(!list_empty(&rq->sched.waiters_list));
1021 kmem_cache_free(slab_requests, rq);
1030 struct i915_request *rq;
1038 rq = list_first_entry(&tl->requests, typeof(*rq), link);
1039 if (!list_is_last(&rq->link, &tl->requests))
1040 i915_request_retire(rq);
1043 rq = __i915_request_create(ce, GFP_KERNEL);
1045 if (IS_ERR(rq))
1049 rq->cookie = lockdep_pin_lock(&tl->mutex);
1051 return rq;
1055 return rq;
1059 i915_request_await_start(struct i915_request *rq, struct i915_request *signal)
1064 if (i915_request_timeline(rq) == rcu_access_pointer(signal->timeline))
1115 if (!intel_timeline_sync_is_later(i915_request_timeline(rq), fence))
1116 err = i915_sw_fence_await_dma_fence(&rq->submit,
1125 already_busywaiting(struct i915_request *rq)
1139 return rq->sched.semaphores | READ_ONCE(rq->engine->saturated);
1334 static void mark_external(struct i915_request *rq)
1344 rq->sched.flags |= I915_SCHED_HAS_EXTERNAL_CHAIN;
1348 __i915_request_await_external(struct i915_request *rq, struct dma_fence *fence)
1350 mark_external(rq);
1351 return i915_sw_fence_await_dma_fence(&rq->submit, fence,
1352 i915_fence_context_timeout(rq->i915,
1358 i915_request_await_external(struct i915_request *rq, struct dma_fence *fence)
1364 return __i915_request_await_external(rq, fence);
1370 err = __i915_request_await_external(rq, iter);
1374 err = i915_request_await_dma_fence(rq, chain->fence);
1383 static inline bool is_parallel_rq(struct i915_request *rq)
1385 return intel_context_is_parallel(rq->context);
1388 static inline struct intel_context *request_to_parent(struct i915_request *rq)
1390 return intel_context_to_parent(rq->context);
1403 i915_request_await_execution(struct i915_request *rq,
1425 if (fence->context == rq->fence.context)
1434 if (is_same_parallel_context(rq, to_request(fence)))
1436 ret = __i915_request_await_execution(rq,
1439 ret = i915_request_await_external(rq, fence);
1500 i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *fence)
1532 if (fence->context == rq->fence.context)
1537 intel_timeline_sync_is_later(i915_request_timeline(rq),
1542 if (is_same_parallel_context(rq, to_request(fence)))
1544 ret = i915_request_await_request(rq, to_request(fence));
1546 ret = i915_request_await_external(rq, fence);
1553 intel_timeline_sync_set(i915_request_timeline(rq),
1563 * @rq: request we are wishing to use
1568 int i915_request_await_deps(struct i915_request *rq, const struct i915_deps *deps)
1573 err = i915_request_await_dma_fence(rq, deps->fences[i]);
1620 static void i915_request_await_huc(struct i915_request *rq)
1622 struct intel_huc *huc = &rq->context->engine->gt->uc.huc;
1625 if (!rcu_access_pointer(rq->context->gem_context))
1629 i915_sw_fence_await_sw_fence(&rq->submit,
1631 &rq->hucq);
1635 __i915_request_ensure_parallel_ordering(struct i915_request *rq,
1640 GEM_BUG_ON(!is_parallel_rq(rq));
1642 prev = request_to_parent(rq)->parallel.last_rq;
1645 i915_sw_fence_await_sw_fence(&rq->submit,
1647 &rq->submitq);
1649 if (rq->engine->sched_engine->schedule)
1650 __i915_sched_node_add_dependency(&rq->sched,
1652 &rq->dep,
1658 request_to_parent(rq)->parallel.last_rq = i915_request_get(rq);
1666 &rq->fence));
1670 __i915_request_ensure_ordering(struct i915_request *rq,
1675 GEM_BUG_ON(is_parallel_rq(rq));
1678 &rq->fence));
1681 bool uses_guc = intel_engine_uses_guc(rq->engine);
1683 rq->engine->mask);
1684 bool same_context = prev->context == rq->context;
1694 rq->fence.seqno));
1697 i915_sw_fence_await_sw_fence(&rq->submit,
1699 &rq->submitq);
1701 __i915_sw_fence_await_dma_fence(&rq->submit,
1703 &rq->dmaq);
1704 if (rq->engine->sched_engine->schedule)
1705 __i915_sched_node_add_dependency(&rq->sched,
1707 &rq->dep,
1719 __i915_request_add_to_timeline(struct i915_request *rq)
1721 struct intel_timeline *timeline = i915_request_timeline(rq);
1731 if (rq->engine->class == VIDEO_DECODE_CLASS)
1732 i915_request_await_huc(rq);
1764 if (likely(!is_parallel_rq(rq)))
1765 prev = __i915_request_ensure_ordering(rq, timeline);
1767 prev = __i915_request_ensure_parallel_ordering(rq, timeline);
1776 GEM_BUG_ON(timeline->seqno != rq->fence.seqno);
1786 struct i915_request *__i915_request_commit(struct i915_request *rq)
1788 struct intel_engine_cs *engine = rq->engine;
1789 struct intel_ring *ring = rq->ring;
1792 RQ_TRACE(rq, "\n");
1799 GEM_BUG_ON(rq->reserved_space > ring->space);
1800 rq->reserved_space = 0;
1801 rq->emitted_jiffies = jiffies;
1809 cs = intel_ring_begin(rq, engine->emit_fini_breadcrumb_dw);
1811 rq->postfix = intel_ring_offset(rq, cs);
1813 return __i915_request_add_to_timeline(rq);
1816 void __i915_request_queue_bh(struct i915_request *rq)
1818 i915_sw_fence_commit(&rq->semaphore);
1819 i915_sw_fence_commit(&rq->submit);
1822 void __i915_request_queue(struct i915_request *rq,
1836 if (attr && rq->engine->sched_engine->schedule)
1837 rq->engine->sched_engine->schedule(rq, attr);
1840 __i915_request_queue_bh(rq);
1844 void i915_request_add(struct i915_request *rq)
1846 struct intel_timeline * const tl = i915_request_timeline(rq);
1851 lockdep_unpin_lock(&tl->mutex, rq->cookie);
1853 trace_i915_request_add(rq);
1854 __i915_request_commit(rq);
1858 ctx = rcu_dereference(rq->context->gem_context);
1863 __i915_request_queue(rq, &attr);
1901 static bool __i915_spin_request(struct i915_request * const rq, int state)
1917 if (!i915_request_is_running(rq))
1931 timeout_ns = READ_ONCE(rq->engine->props.max_busywait_duration_ns);
1934 if (dma_fence_is_signaled(&rq->fence))
1963 * @rq: the request to wait upon
1980 long i915_request_wait_timeout(struct i915_request *rq,
1991 if (dma_fence_is_signaled(&rq->fence))
1997 trace_i915_request_wait_begin(rq, flags);
2005 mutex_acquire(&rq->engine->gt->reset.mutex.dep_map, 0, 0, _THIS_IP_);
2031 __i915_spin_request(rq, state))
2046 if (flags & I915_WAIT_PRIORITY && !i915_request_started(rq))
2047 intel_rps_boost(rq);
2050 if (dma_fence_add_callback(&rq->fence, &wait.cb, request_wait_wake))
2068 if (i915_request_is_ready(rq))
2069 __intel_engine_flush_submission(rq->engine, false);
2074 if (dma_fence_is_signaled(&rq->fence))
2092 dma_fence_remove_callback(&rq->fence, &wait.cb);
2096 mutex_release(&rq->engine->gt->reset.mutex.dep_map, _THIS_IP_);
2097 trace_i915_request_wait_end(rq);
2103 * @rq: the request to wait upon
2119 long i915_request_wait(struct i915_request *rq,
2123 long ret = i915_request_wait_timeout(rq, flags, timeout);
2146 static char queue_status(const struct i915_request *rq)
2148 if (i915_request_is_active(rq))
2151 if (i915_request_is_ready(rq))
2152 return intel_engine_is_virtual(rq->engine) ? 'V' : 'R';
2157 static const char *run_status(const struct i915_request *rq)
2159 if (__i915_request_is_complete(rq))
2162 if (__i915_request_has_started(rq))
2165 if (!i915_sw_fence_signaled(&rq->semaphore))
2171 static const char *fence_status(const struct i915_request *rq)
2173 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags))
2176 if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &rq->fence.flags))
2183 const struct i915_request *rq,
2187 const char *name = rq->fence.ops->get_timeline_name((struct dma_fence *)&rq->fence);
2221 x = print_sched_attr(&rq->sched.attr, buf, x, sizeof(buf));
2225 queue_status(rq),
2226 rq->fence.context, rq->fence.seqno,
2227 run_status(rq),
2228 fence_status(rq),
2230 jiffies_to_msecs(jiffies - rq->emitted_jiffies),
2234 static bool engine_match_ring(struct intel_engine_cs *engine, struct i915_request *rq)
2238 return ring == i915_ggtt_offset(rq->ring->vma);
2241 static bool match_ring(struct i915_request *rq)
2247 if (!intel_engine_is_virtual(rq->engine))
2248 return engine_match_ring(rq->engine, rq);
2252 while ((engine = intel_engine_get_sibling(rq->engine, i++))) {
2253 found = engine_match_ring(engine, rq);
2261 enum i915_request_state i915_test_request_state(struct i915_request *rq)
2263 if (i915_request_completed(rq))
2266 if (!i915_request_started(rq))
2269 if (match_ring(rq))