Lines Matching refs:ce

112  * ce->guc_state.lock
113 * Protects everything under ce->guc_state. Ensures that a context is in the
121 * sched_engine->lock -> ce->guc_state.lock
122 * guc->submission_state.lock -> ce->guc_state.lock
181 static inline void init_sched_state(struct intel_context *ce)
183 lockdep_assert_held(&ce->guc_state.lock);
184 ce->guc_state.sched_state &= SCHED_STATE_BLOCKED_MASK;
198 static bool sched_state_is_init(struct intel_context *ce)
200 return !(ce->guc_state.sched_state & ~SCHED_STATE_VALID_INIT);
204 context_wait_for_deregister_to_register(struct intel_context *ce)
206 return ce->guc_state.sched_state &
211 set_context_wait_for_deregister_to_register(struct intel_context *ce)
213 lockdep_assert_held(&ce->guc_state.lock);
214 ce->guc_state.sched_state |=
219 clr_context_wait_for_deregister_to_register(struct intel_context *ce)
221 lockdep_assert_held(&ce->guc_state.lock);
222 ce->guc_state.sched_state &=
227 context_destroyed(struct intel_context *ce)
229 return ce->guc_state.sched_state & SCHED_STATE_DESTROYED;
233 set_context_destroyed(struct intel_context *ce)
235 lockdep_assert_held(&ce->guc_state.lock);
236 ce->guc_state.sched_state |= SCHED_STATE_DESTROYED;
240 clr_context_destroyed(struct intel_context *ce)
242 lockdep_assert_held(&ce->guc_state.lock);
243 ce->guc_state.sched_state &= ~SCHED_STATE_DESTROYED;
246 static inline bool context_pending_disable(struct intel_context *ce)
248 return ce->guc_state.sched_state & SCHED_STATE_PENDING_DISABLE;
251 static inline void set_context_pending_disable(struct intel_context *ce)
253 lockdep_assert_held(&ce->guc_state.lock);
254 ce->guc_state.sched_state |= SCHED_STATE_PENDING_DISABLE;
257 static inline void clr_context_pending_disable(struct intel_context *ce)
259 lockdep_assert_held(&ce->guc_state.lock);
260 ce->guc_state.sched_state &= ~SCHED_STATE_PENDING_DISABLE;
263 static inline bool context_banned(struct intel_context *ce)
265 return ce->guc_state.sched_state & SCHED_STATE_BANNED;
268 static inline void set_context_banned(struct intel_context *ce)
270 lockdep_assert_held(&ce->guc_state.lock);
271 ce->guc_state.sched_state |= SCHED_STATE_BANNED;
274 static inline void clr_context_banned(struct intel_context *ce)
276 lockdep_assert_held(&ce->guc_state.lock);
277 ce->guc_state.sched_state &= ~SCHED_STATE_BANNED;
280 static inline bool context_enabled(struct intel_context *ce)
282 return ce->guc_state.sched_state & SCHED_STATE_ENABLED;
285 static inline void set_context_enabled(struct intel_context *ce)
287 lockdep_assert_held(&ce->guc_state.lock);
288 ce->guc_state.sched_state |= SCHED_STATE_ENABLED;
291 static inline void clr_context_enabled(struct intel_context *ce)
293 lockdep_assert_held(&ce->guc_state.lock);
294 ce->guc_state.sched_state &= ~SCHED_STATE_ENABLED;
297 static inline bool context_pending_enable(struct intel_context *ce)
299 return ce->guc_state.sched_state & SCHED_STATE_PENDING_ENABLE;
302 static inline void set_context_pending_enable(struct intel_context *ce)
304 lockdep_assert_held(&ce->guc_state.lock);
305 ce->guc_state.sched_state |= SCHED_STATE_PENDING_ENABLE;
308 static inline void clr_context_pending_enable(struct intel_context *ce)
310 lockdep_assert_held(&ce->guc_state.lock);
311 ce->guc_state.sched_state &= ~SCHED_STATE_PENDING_ENABLE;
314 static inline bool context_registered(struct intel_context *ce)
316 return ce->guc_state.sched_state & SCHED_STATE_REGISTERED;
319 static inline void set_context_registered(struct intel_context *ce)
321 lockdep_assert_held(&ce->guc_state.lock);
322 ce->guc_state.sched_state |= SCHED_STATE_REGISTERED;
325 static inline void clr_context_registered(struct intel_context *ce)
327 lockdep_assert_held(&ce->guc_state.lock);
328 ce->guc_state.sched_state &= ~SCHED_STATE_REGISTERED;
331 static inline bool context_policy_required(struct intel_context *ce)
333 return ce->guc_state.sched_state & SCHED_STATE_POLICY_REQUIRED;
336 static inline void set_context_policy_required(struct intel_context *ce)
338 lockdep_assert_held(&ce->guc_state.lock);
339 ce->guc_state.sched_state |= SCHED_STATE_POLICY_REQUIRED;
342 static inline void clr_context_policy_required(struct intel_context *ce)
344 lockdep_assert_held(&ce->guc_state.lock);
345 ce->guc_state.sched_state &= ~SCHED_STATE_POLICY_REQUIRED;
348 static inline bool context_close_done(struct intel_context *ce)
350 return ce->guc_state.sched_state & SCHED_STATE_CLOSED;
353 static inline void set_context_close_done(struct intel_context *ce)
355 lockdep_assert_held(&ce->guc_state.lock);
356 ce->guc_state.sched_state |= SCHED_STATE_CLOSED;
359 static inline u32 context_blocked(struct intel_context *ce)
361 return (ce->guc_state.sched_state & SCHED_STATE_BLOCKED_MASK) >>
365 static inline void incr_context_blocked(struct intel_context *ce)
367 lockdep_assert_held(&ce->guc_state.lock);
369 ce->guc_state.sched_state += SCHED_STATE_BLOCKED;
371 GEM_BUG_ON(!context_blocked(ce)); /* Overflow check */
374 static inline void decr_context_blocked(struct intel_context *ce)
376 lockdep_assert_held(&ce->guc_state.lock);
378 GEM_BUG_ON(!context_blocked(ce)); /* Underflow check */
380 ce->guc_state.sched_state -= SCHED_STATE_BLOCKED;
389 static inline bool context_guc_id_invalid(struct intel_context *ce)
391 return ce->guc_id.id == GUC_INVALID_CONTEXT_ID;
394 static inline void set_context_guc_id_invalid(struct intel_context *ce)
396 ce->guc_id.id = GUC_INVALID_CONTEXT_ID;
399 static inline struct intel_guc *ce_to_guc(struct intel_context *ce)
401 return &ce->engine->gt->uc.guc;
449 static u32 __get_parent_scratch_offset(struct intel_context *ce)
451 GEM_BUG_ON(!ce->parallel.guc.parent_page);
453 return ce->parallel.guc.parent_page * PAGE_SIZE;
456 static u32 __get_wq_offset(struct intel_context *ce)
460 return __get_parent_scratch_offset(ce) + WQ_OFFSET;
464 __get_parent_scratch(struct intel_context *ce)
471 * parallel.guc.parent_page is the offset into ce->state while
472 * ce->lrc_reg_reg is ce->state + LRC_STATE_OFFSET.
475 (ce->lrc_reg_state +
476 ((__get_parent_scratch_offset(ce) -
481 __get_process_desc_v69(struct intel_context *ce)
483 struct parent_scratch *ps = __get_parent_scratch(ce);
489 __get_wq_desc_v70(struct intel_context *ce)
491 struct parent_scratch *ps = __get_parent_scratch(ce);
496 static u32 *get_wq_pointer(struct intel_context *ce, u32 wqi_size)
504 CIRC_SPACE(ce->parallel.guc.wqi_tail, ce->parallel.guc.wqi_head, WQ_SIZE)
506 ce->parallel.guc.wqi_head = READ_ONCE(*ce->parallel.guc.wq_head);
513 return &__get_parent_scratch(ce)->wq[ce->parallel.guc.wqi_tail / sizeof(u32)];
518 struct intel_context *ce = xa_load(&guc->context_lookup, id);
522 return ce;
580 struct intel_context *ce)
589 __xa_store(&guc->context_lookup, id, ce, GFP_ATOMIC);
693 static int guc_context_policy_init_v70(struct intel_context *ce, bool loop);
694 static int try_context_registration(struct intel_context *ce, bool loop);
699 struct intel_context *ce = request_to_scheduling_context(rq);
711 if (unlikely(!intel_context_is_schedulable(ce))) {
713 intel_engine_signal_breadcrumbs(ce->engine);
717 GEM_BUG_ON(!atomic_read(&ce->guc_id.ref));
718 GEM_BUG_ON(context_guc_id_invalid(ce));
720 if (context_policy_required(ce)) {
721 err = guc_context_policy_init_v70(ce, false);
726 spin_lock(&ce->guc_state.lock);
733 if (unlikely(context_blocked(ce) && !intel_context_is_parent(ce)))
736 enabled = context_enabled(ce) || context_blocked(ce);
740 action[len++] = ce->guc_id.id;
742 set_context_pending_enable(ce);
743 intel_context_get(ce);
747 action[len++] = ce->guc_id.id;
752 trace_intel_context_sched_enable(ce);
754 set_context_enabled(ce);
763 if (intel_context_is_parent(ce)) {
768 clr_context_pending_enable(ce);
769 intel_context_put(ce);
775 spin_unlock(&ce->guc_state.lock);
814 static u32 wq_space_until_wrap(struct intel_context *ce)
816 return (WQ_SIZE - ce->parallel.guc.wqi_tail);
819 static void write_wqi(struct intel_context *ce, u32 wqi_size)
826 intel_guc_write_barrier(ce_to_guc(ce));
828 ce->parallel.guc.wqi_tail = (ce->parallel.guc.wqi_tail + wqi_size) &
830 WRITE_ONCE(*ce->parallel.guc.wq_tail, ce->parallel.guc.wqi_tail);
833 static int guc_wq_noop_append(struct intel_context *ce)
835 u32 *wqi = get_wq_pointer(ce, wq_space_until_wrap(ce));
836 u32 len_dw = wq_space_until_wrap(ce) / sizeof(u32) - 1;
845 ce->parallel.guc.wqi_tail = 0;
852 struct intel_context *ce = request_to_scheduling_context(rq);
854 unsigned int wqi_size = (ce->parallel.number_children + 4) *
861 GEM_BUG_ON(!atomic_read(&ce->guc_id.ref));
862 GEM_BUG_ON(context_guc_id_invalid(ce));
863 GEM_BUG_ON(context_wait_for_deregister_to_register(ce));
864 GEM_BUG_ON(!ctx_id_mapped(ce_to_guc(ce), ce->guc_id.id));
867 if (wqi_size > wq_space_until_wrap(ce)) {
868 ret = guc_wq_noop_append(ce);
873 wqi = get_wq_pointer(ce, wqi_size);
881 *wqi++ = ce->lrc.lrca;
882 *wqi++ = FIELD_PREP(WQ_GUC_ID_MASK, ce->guc_id.id) |
883 FIELD_PREP(WQ_RING_TAIL_MASK, ce->ring->tail / sizeof(u64));
885 for_each_child(ce, child)
888 write_wqi(ce, wqi_size);
896 struct intel_context *ce = request_to_scheduling_context(rq);
899 if (unlikely(!intel_context_is_schedulable(ce)))
913 struct intel_context *ce = request_to_scheduling_context(rq);
924 !intel_context_is_schedulable(ce);
990 struct intel_context *ce = request_to_scheduling_context(last);
992 if (unlikely(!ctx_id_mapped(guc, ce->guc_id.id) &&
993 intel_context_is_schedulable(ce))) {
994 ret = try_context_registration(ce, false);
1071 static void __guc_context_destroy(struct intel_context *ce);
1072 static void release_guc_id(struct intel_guc *guc, struct intel_context *ce);
1073 static void guc_signal_context_fence(struct intel_context *ce);
1074 static void guc_cancel_context_requests(struct intel_context *ce);
1075 static void guc_blocked_fence_complete(struct intel_context *ce);
1079 struct intel_context *ce;
1084 xa_for_each(&guc->context_lookup, index, ce) {
1090 bool do_put = kref_get_unless_zero(&ce->ref);
1094 if (test_bit(CONTEXT_GUC_INIT, &ce->flags) &&
1095 (cancel_delayed_work(&ce->guc_state.sched_disable_delay_work))) {
1097 intel_context_sched_disable_unpin(ce);
1100 spin_lock(&ce->guc_state.lock);
1109 destroyed = context_destroyed(ce);
1110 pending_enable = context_pending_enable(ce);
1111 pending_disable = context_pending_disable(ce);
1112 deregister = context_wait_for_deregister_to_register(ce);
1113 banned = context_banned(ce);
1114 init_sched_state(ce);
1116 spin_unlock(&ce->guc_state.lock);
1121 guc_signal_context_fence(ce);
1124 release_guc_id(guc, ce);
1125 __guc_context_destroy(ce);
1128 intel_context_put(ce);
1133 guc_signal_context_fence(ce);
1135 guc_cancel_context_requests(ce);
1136 intel_engine_signal_breadcrumbs(ce->engine);
1138 intel_context_sched_disable_unpin(ce);
1141 spin_lock(&ce->guc_state.lock);
1142 guc_blocked_fence_complete(ce);
1143 spin_unlock(&ce->guc_state.lock);
1145 intel_context_put(ce);
1149 intel_context_put(ce);
1460 static void __guc_context_update_stats(struct intel_context *ce)
1462 struct intel_guc *guc = ce_to_guc(ce);
1466 lrc_update_runtime(ce);
1470 static void guc_context_update_stats(struct intel_context *ce)
1472 if (!intel_context_pin_if_active(ce))
1475 __guc_context_update_stats(ce);
1476 intel_context_unpin(ce);
1485 struct intel_context *ce;
1533 xa_for_each(&guc->context_lookup, index, ce)
1534 guc_context_update_stats(ce);
1713 __context_to_physical_engine(struct intel_context *ce)
1715 struct intel_engine_cs *engine = ce->engine;
1723 static void guc_reset_state(struct intel_context *ce, u32 head, bool scrub)
1725 struct intel_engine_cs *engine = __context_to_physical_engine(ce);
1727 if (!intel_context_is_schedulable(ce))
1730 GEM_BUG_ON(!intel_context_is_pinned(ce));
1741 lrc_init_regs(ce, engine, true);
1744 lrc_update_regs(ce, engine, head);
1768 __unwind_incomplete_requests(struct intel_context *ce)
1774 ce->engine->sched_engine;
1778 spin_lock(&ce->guc_state.lock);
1780 &ce->guc_state.requests,
1799 spin_unlock(&ce->guc_state.lock);
1803 static void __guc_reset_context(struct intel_context *ce, intel_engine_mask_t stalled)
1809 int i, number_children = ce->parallel.number_children;
1810 struct intel_context *parent = ce;
1812 GEM_BUG_ON(intel_context_is_child(ce));
1814 intel_context_get(ce);
1821 spin_lock_irqsave(&ce->guc_state.lock, flags);
1822 clr_context_enabled(ce);
1823 spin_unlock_irqrestore(&ce->guc_state.lock, flags);
1830 if (!intel_context_is_pinned(ce))
1834 rq = intel_context_get_active_request(ce);
1836 head = ce->ring->tail;
1841 guilty = stalled & ce->engine->mask;
1843 GEM_BUG_ON(i915_active_is_idle(&ce->active));
1844 head = intel_ring_wrap(ce->ring, rq->head);
1849 guc_reset_state(ce, head, guilty);
1852 ce = list_next_entry(ce, parallel.child_link);
1875 struct intel_context *ce;
1885 xa_for_each(&guc->context_lookup, index, ce) {
1886 if (!kref_get_unless_zero(&ce->ref))
1891 if (intel_context_is_pinned(ce) &&
1892 !intel_context_is_child(ce))
1893 __guc_reset_context(ce, stalled);
1895 intel_context_put(ce);
1905 static void guc_cancel_context_requests(struct intel_context *ce)
1907 struct i915_sched_engine *sched_engine = ce_to_guc(ce)->sched_engine;
1913 spin_lock(&ce->guc_state.lock);
1914 list_for_each_entry(rq, &ce->guc_state.requests, sched.link)
1916 spin_unlock(&ce->guc_state.lock);
1973 struct intel_context *ce;
1978 xa_for_each(&guc->context_lookup, index, ce) {
1979 if (!kref_get_unless_zero(&ce->ref))
1984 if (intel_context_is_pinned(ce) &&
1985 !intel_context_is_child(ce))
1986 guc_cancel_context_requests(ce);
1988 intel_context_put(ce);
2182 struct intel_context *ce = request_to_scheduling_context(rq);
2186 !ctx_id_mapped(guc, ce->guc_id.id);
2206 static int new_guc_id(struct intel_guc *guc, struct intel_context *ce)
2210 GEM_BUG_ON(intel_context_is_child(ce));
2212 if (intel_context_is_parent(ce))
2215 order_base_2(ce->parallel.number_children
2226 if (!intel_context_is_parent(ce))
2229 ce->guc_id.id = ret;
2233 static void __release_guc_id(struct intel_guc *guc, struct intel_context *ce)
2235 GEM_BUG_ON(intel_context_is_child(ce));
2237 if (!context_guc_id_invalid(ce)) {
2238 if (intel_context_is_parent(ce)) {
2240 ce->guc_id.id,
2241 order_base_2(ce->parallel.number_children
2246 ce->guc_id.id);
2248 clr_ctx_id_mapping(guc, ce->guc_id.id);
2249 set_context_guc_id_invalid(ce);
2251 if (!list_empty(&ce->guc_id.link))
2252 list_del_init(&ce->guc_id.link);
2255 static void release_guc_id(struct intel_guc *guc, struct intel_context *ce)
2260 __release_guc_id(guc, ce);
2264 static int steal_guc_id(struct intel_guc *guc, struct intel_context *ce)
2269 GEM_BUG_ON(intel_context_is_child(ce));
2270 GEM_BUG_ON(intel_context_is_parent(ce));
2283 ce->guc_id.id = cn->guc_id.id;
2301 static int assign_guc_id(struct intel_guc *guc, struct intel_context *ce)
2306 GEM_BUG_ON(intel_context_is_child(ce));
2308 ret = new_guc_id(guc, ce);
2310 if (intel_context_is_parent(ce))
2313 ret = steal_guc_id(guc, ce);
2318 if (intel_context_is_parent(ce)) {
2322 for_each_child(ce, child)
2323 child->guc_id.id = ce->guc_id.id + i++;
2330 static int pin_guc_id(struct intel_guc *guc, struct intel_context *ce)
2335 GEM_BUG_ON(atomic_read(&ce->guc_id.ref));
2340 might_lock(&ce->guc_state.lock);
2342 if (context_guc_id_invalid(ce)) {
2343 ret = assign_guc_id(guc, ce);
2348 if (!list_empty(&ce->guc_id.link))
2349 list_del_init(&ce->guc_id.link);
2350 atomic_inc(&ce->guc_id.ref);
2366 ce->engine->props.timeslice_duration_ms <<
2380 static void unpin_guc_id(struct intel_guc *guc, struct intel_context *ce)
2384 GEM_BUG_ON(atomic_read(&ce->guc_id.ref) < 0);
2385 GEM_BUG_ON(intel_context_is_child(ce));
2387 if (unlikely(context_guc_id_invalid(ce) ||
2388 intel_context_is_parent(ce)))
2392 if (!context_guc_id_invalid(ce) && list_empty(&ce->guc_id.link) &&
2393 !atomic_read(&ce->guc_id.ref))
2394 list_add_tail(&ce->guc_id.link,
2400 struct intel_context *ce,
2409 GEM_BUG_ON(ce->parallel.number_children > MAX_ENGINE_INSTANCE);
2413 action[len++] = ce->parallel.number_children + 1;
2415 for_each_child(ce, child) {
2424 struct intel_context *ce,
2433 GEM_BUG_ON(ce->parallel.number_children > MAX_ENGINE_INSTANCE);
2445 action[len++] = ce->parallel.number_children + 1;
2450 for_each_child(ce, child) {
2504 static void prepare_context_registration_info_v69(struct intel_context *ce);
2505 static void prepare_context_registration_info_v70(struct intel_context *ce,
2509 register_context_v69(struct intel_guc *guc, struct intel_context *ce, bool loop)
2512 ce->guc_id.id * sizeof(struct guc_lrc_desc_v69);
2514 prepare_context_registration_info_v69(ce);
2516 if (intel_context_is_parent(ce))
2517 return __guc_action_register_multi_lrc_v69(guc, ce, ce->guc_id.id,
2520 return __guc_action_register_context_v69(guc, ce->guc_id.id,
2525 register_context_v70(struct intel_guc *guc, struct intel_context *ce, bool loop)
2529 prepare_context_registration_info_v70(ce, &info);
2531 if (intel_context_is_parent(ce))
2532 return __guc_action_register_multi_lrc_v70(guc, ce, &info, loop);
2537 static int register_context(struct intel_context *ce, bool loop)
2539 struct intel_guc *guc = ce_to_guc(ce);
2542 GEM_BUG_ON(intel_context_is_child(ce));
2543 trace_intel_context_register(ce);
2546 ret = register_context_v70(guc, ce, loop);
2548 ret = register_context_v69(guc, ce, loop);
2553 spin_lock_irqsave(&ce->guc_state.lock, flags);
2554 set_context_registered(ce);
2555 spin_unlock_irqrestore(&ce->guc_state.lock, flags);
2558 guc_context_policy_init_v70(ce, loop);
2577 static int deregister_context(struct intel_context *ce, u32 guc_id)
2579 struct intel_guc *guc = ce_to_guc(ce);
2581 GEM_BUG_ON(intel_context_is_child(ce));
2582 trace_intel_context_deregister(ce);
2587 static inline void clear_children_join_go_memory(struct intel_context *ce)
2589 struct parent_scratch *ps = __get_parent_scratch(ce);
2593 for (i = 0; i < ce->parallel.number_children + 1; ++i)
2597 static inline u32 get_children_go_value(struct intel_context *ce)
2599 return __get_parent_scratch(ce)->go.semaphore;
2602 static inline u32 get_children_join_value(struct intel_context *ce,
2605 return __get_parent_scratch(ce)->join[child_index].semaphore;
2655 static int guc_context_policy_init_v70(struct intel_context *ce, bool loop)
2657 struct intel_engine_cs *engine = ce->engine;
2673 __guc_context_policy_start_klv(&policy, ce->guc_id.id);
2675 __guc_context_policy_add_priority(&policy, ce->guc_state.prio);
2684 spin_lock_irqsave(&ce->guc_state.lock, flags);
2686 set_context_policy_required(ce);
2688 clr_context_policy_required(ce);
2689 spin_unlock_irqrestore(&ce->guc_state.lock, flags);
2731 static void prepare_context_registration_info_v69(struct intel_context *ce)
2733 struct intel_engine_cs *engine = ce->engine;
2735 u32 ctx_id = ce->guc_id.id;
2746 i915_gem_object_is_lmem(ce->ring->vma->obj));
2752 desc->hw_context_desc = ce->lrc.lrca;
2753 desc->priority = ce->guc_state.prio;
2761 if (intel_context_is_parent(ce)) {
2764 ce->parallel.guc.wqi_tail = 0;
2765 ce->parallel.guc.wqi_head = 0;
2767 desc->process_desc = i915_ggtt_offset(ce->state) +
2768 __get_parent_scratch_offset(ce);
2769 desc->wq_addr = i915_ggtt_offset(ce->state) +
2770 __get_wq_offset(ce);
2773 pdesc = __get_process_desc_v69(ce);
2775 pdesc->stage_id = ce->guc_id.id;
2780 ce->parallel.guc.wq_head = &pdesc->head;
2781 ce->parallel.guc.wq_tail = &pdesc->tail;
2782 ce->parallel.guc.wq_status = &pdesc->wq_status;
2784 for_each_child(ce, child) {
2790 desc->priority = ce->guc_state.prio;
2795 clear_children_join_go_memory(ce);
2799 static void prepare_context_registration_info_v70(struct intel_context *ce,
2802 struct intel_engine_cs *engine = ce->engine;
2804 u32 ctx_id = ce->guc_id.id;
2813 i915_gem_object_is_lmem(ce->ring->vma->obj));
2823 info->hwlrca_lo = lower_32_bits(ce->lrc.lrca);
2824 info->hwlrca_hi = upper_32_bits(ce->lrc.lrca);
2826 info->hwlrca_lo |= map_guc_prio_to_lrc_desc_prio(ce->guc_state.prio);
2833 if (intel_context_is_parent(ce)) {
2837 ce->parallel.guc.wqi_tail = 0;
2838 ce->parallel.guc.wqi_head = 0;
2840 wq_desc_offset = i915_ggtt_offset(ce->state) +
2841 __get_parent_scratch_offset(ce);
2842 wq_base_offset = i915_ggtt_offset(ce->state) +
2843 __get_wq_offset(ce);
2850 wq_desc = __get_wq_desc_v70(ce);
2854 ce->parallel.guc.wq_head = &wq_desc->head;
2855 ce->parallel.guc.wq_tail = &wq_desc->tail;
2856 ce->parallel.guc.wq_status = &wq_desc->wq_status;
2858 clear_children_join_go_memory(ce);
2862 static int try_context_registration(struct intel_context *ce, bool loop)
2864 struct intel_engine_cs *engine = ce->engine;
2868 u32 ctx_id = ce->guc_id.id;
2872 GEM_BUG_ON(!sched_state_is_init(ce));
2877 set_ctx_id_mapping(guc, ctx_id, ce);
2891 trace_intel_context_steal_guc_id(ce);
2895 spin_lock_irqsave(&ce->guc_state.lock, flags);
2898 set_context_wait_for_deregister_to_register(ce);
2899 intel_context_get(ce);
2901 spin_unlock_irqrestore(&ce->guc_state.lock, flags);
2908 * If stealing the guc_id, this ce has the same guc_id as the
2912 ret = deregister_context(ce, ce->guc_id.id);
2917 ret = register_context(ce, loop);
2929 static int __guc_context_pre_pin(struct intel_context *ce,
2934 return lrc_pre_pin(ce, engine, ww, vaddr);
2937 static int __guc_context_pin(struct intel_context *ce,
2941 if (i915_ggtt_offset(ce->state) !=
2942 (ce->lrc.lrca & CTX_GTT_ADDRESS_MASK))
2943 set_bit(CONTEXT_LRCA_DIRTY, &ce->flags);
2950 return lrc_pin(ce, engine, vaddr);
2953 static int guc_context_pre_pin(struct intel_context *ce,
2957 return __guc_context_pre_pin(ce, ce->engine, ww, vaddr);
2960 static int guc_context_pin(struct intel_context *ce, void *vaddr)
2962 int ret = __guc_context_pin(ce, ce->engine, vaddr);
2964 if (likely(!ret && !intel_context_is_barrier(ce)))
2965 intel_engine_pm_get(ce->engine);
2970 static void guc_context_unpin(struct intel_context *ce)
2972 struct intel_guc *guc = ce_to_guc(ce);
2974 __guc_context_update_stats(ce);
2975 unpin_guc_id(guc, ce);
2976 lrc_unpin(ce);
2978 if (likely(!intel_context_is_barrier(ce)))
2979 intel_engine_pm_put_async(ce->engine);
2982 static void guc_context_post_unpin(struct intel_context *ce)
2984 lrc_post_unpin(ce);
2988 struct intel_context *ce)
2992 ce->guc_id.id,
2996 trace_intel_context_sched_enable(ce);
3003 struct intel_context *ce,
3008 guc_id, /* ce->guc_id.id not stable */
3014 GEM_BUG_ON(intel_context_is_child(ce));
3015 trace_intel_context_sched_disable(ce);
3021 static void guc_blocked_fence_complete(struct intel_context *ce)
3023 lockdep_assert_held(&ce->guc_state.lock);
3025 if (!i915_sw_fence_done(&ce->guc_state.blocked))
3026 i915_sw_fence_complete(&ce->guc_state.blocked);
3029 static void guc_blocked_fence_reinit(struct intel_context *ce)
3031 lockdep_assert_held(&ce->guc_state.lock);
3032 GEM_BUG_ON(!i915_sw_fence_done(&ce->guc_state.blocked));
3039 i915_sw_fence_fini(&ce->guc_state.blocked);
3040 i915_sw_fence_reinit(&ce->guc_state.blocked);
3041 i915_sw_fence_await(&ce->guc_state.blocked);
3042 i915_sw_fence_commit(&ce->guc_state.blocked);
3045 static u16 prep_context_pending_disable(struct intel_context *ce)
3047 lockdep_assert_held(&ce->guc_state.lock);
3049 set_context_pending_disable(ce);
3050 clr_context_enabled(ce);
3051 guc_blocked_fence_reinit(ce);
3052 intel_context_get(ce);
3054 return ce->guc_id.id;
3057 static struct i915_sw_fence *guc_context_block(struct intel_context *ce)
3059 struct intel_guc *guc = ce_to_guc(ce);
3061 struct intel_runtime_pm *runtime_pm = ce->engine->uncore->rpm;
3066 GEM_BUG_ON(intel_context_is_child(ce));
3068 spin_lock_irqsave(&ce->guc_state.lock, flags);
3070 incr_context_blocked(ce);
3072 enabled = context_enabled(ce);
3075 clr_context_enabled(ce);
3076 spin_unlock_irqrestore(&ce->guc_state.lock, flags);
3077 return &ce->guc_state.blocked;
3084 atomic_add(2, &ce->pin_count);
3086 guc_id = prep_context_pending_disable(ce);
3088 spin_unlock_irqrestore(&ce->guc_state.lock, flags);
3091 __guc_context_sched_disable(guc, ce, guc_id);
3093 return &ce->guc_state.blocked;
3103 static bool context_cant_unblock(struct intel_context *ce)
3105 lockdep_assert_held(&ce->guc_state.lock);
3107 return (ce->guc_state.sched_state & SCHED_STATE_NO_UNBLOCK) ||
3108 context_guc_id_invalid(ce) ||
3109 !ctx_id_mapped(ce_to_guc(ce), ce->guc_id.id) ||
3110 !intel_context_is_pinned(ce);
3113 static void guc_context_unblock(struct intel_context *ce)
3115 struct intel_guc *guc = ce_to_guc(ce);
3117 struct intel_runtime_pm *runtime_pm = ce->engine->uncore->rpm;
3121 GEM_BUG_ON(context_enabled(ce));
3122 GEM_BUG_ON(intel_context_is_child(ce));
3124 spin_lock_irqsave(&ce->guc_state.lock, flags);
3127 context_cant_unblock(ce))) {
3131 set_context_pending_enable(ce);
3132 set_context_enabled(ce);
3133 intel_context_get(ce);
3136 decr_context_blocked(ce);
3138 spin_unlock_irqrestore(&ce->guc_state.lock, flags);
3142 __guc_context_sched_enable(guc, ce);
3146 static void guc_context_cancel_request(struct intel_context *ce,
3155 intel_context_get(ce);
3160 guc_reset_state(ce, intel_ring_wrap(ce->ring, rq->head),
3165 intel_context_put(ce);
3191 guc_context_revoke(struct intel_context *ce, struct i915_request *rq,
3194 struct intel_guc *guc = ce_to_guc(ce);
3196 &ce->engine->gt->i915->runtime_pm;
3200 GEM_BUG_ON(intel_context_is_child(ce));
3204 spin_lock_irqsave(&ce->guc_state.lock, flags);
3205 set_context_banned(ce);
3208 (!context_enabled(ce) && !context_pending_disable(ce))) {
3209 spin_unlock_irqrestore(&ce->guc_state.lock, flags);
3211 guc_cancel_context_requests(ce);
3212 intel_engine_signal_breadcrumbs(ce->engine);
3213 } else if (!context_pending_disable(ce)) {
3220 atomic_add(2, &ce->pin_count);
3222 guc_id = prep_context_pending_disable(ce);
3223 spin_unlock_irqrestore(&ce->guc_state.lock, flags);
3233 __guc_context_sched_disable(guc, ce, guc_id);
3236 if (!context_guc_id_invalid(ce))
3239 ce->guc_id.id,
3241 spin_unlock_irqrestore(&ce->guc_state.lock, flags);
3245 static void do_sched_disable(struct intel_guc *guc, struct intel_context *ce,
3247 __releases(ce->guc_state.lock)
3249 struct intel_runtime_pm *runtime_pm = &ce->engine->gt->i915->runtime_pm;
3253 lockdep_assert_held(&ce->guc_state.lock);
3254 guc_id = prep_context_pending_disable(ce);
3256 spin_unlock_irqrestore(&ce->guc_state.lock, flags);
3259 __guc_context_sched_disable(guc, ce, guc_id);
3263 struct intel_context *ce)
3265 lockdep_assert_held(&ce->guc_state.lock);
3266 GEM_BUG_ON(intel_context_is_child(ce));
3268 if (submission_disabled(guc) || context_guc_id_invalid(ce) ||
3269 !ctx_id_mapped(guc, ce->guc_id.id)) {
3270 clr_context_enabled(ce);
3274 return !context_enabled(ce);
3279 struct intel_context *ce =
3280 container_of(wrk, typeof(*ce), guc_state.sched_disable_delay_work.work);
3281 struct intel_guc *guc = ce_to_guc(ce);
3284 spin_lock_irqsave(&ce->guc_state.lock, flags);
3286 if (bypass_sched_disable(guc, ce)) {
3287 spin_unlock_irqrestore(&ce->guc_state.lock, flags);
3288 intel_context_sched_disable_unpin(ce);
3290 do_sched_disable(guc, ce, flags);
3294 static bool guc_id_pressure(struct intel_guc *guc, struct intel_context *ce)
3300 if (intel_context_is_parent(ce))
3310 static void guc_context_sched_disable(struct intel_context *ce)
3312 struct intel_guc *guc = ce_to_guc(ce);
3316 spin_lock_irqsave(&ce->guc_state.lock, flags);
3318 if (bypass_sched_disable(guc, ce)) {
3319 spin_unlock_irqrestore(&ce->guc_state.lock, flags);
3320 intel_context_sched_disable_unpin(ce);
3321 } else if (!intel_context_is_closed(ce) && !guc_id_pressure(guc, ce) &&
3323 spin_unlock_irqrestore(&ce->guc_state.lock, flags);
3325 &ce->guc_state.sched_disable_delay_work,
3328 do_sched_disable(guc, ce, flags);
3332 static void guc_context_close(struct intel_context *ce)
3336 if (test_bit(CONTEXT_GUC_INIT, &ce->flags) &&
3337 cancel_delayed_work(&ce->guc_state.sched_disable_delay_work))
3338 __delay_sched_disable(&ce->guc_state.sched_disable_delay_work.work);
3340 spin_lock_irqsave(&ce->guc_state.lock, flags);
3341 set_context_close_done(ce);
3342 spin_unlock_irqrestore(&ce->guc_state.lock, flags);
3345 static inline int guc_lrc_desc_unpin(struct intel_context *ce)
3347 struct intel_guc *guc = ce_to_guc(ce);
3354 GEM_BUG_ON(!ctx_id_mapped(guc, ce->guc_id.id));
3355 GEM_BUG_ON(ce != __get_context(guc, ce->guc_id.id));
3356 GEM_BUG_ON(context_enabled(ce));
3359 spin_lock_irqsave(&ce->guc_state.lock, flags);
3367 set_context_destroyed(ce);
3368 clr_context_registered(ce);
3370 spin_unlock_irqrestore(&ce->guc_state.lock, flags);
3373 release_guc_id(guc, ce);
3374 __guc_context_destroy(ce);
3383 ret = deregister_context(ce, ce->guc_id.id);
3385 spin_lock(&ce->guc_state.lock);
3386 set_context_registered(ce);
3387 clr_context_destroyed(ce);
3388 spin_unlock(&ce->guc_state.lock);
3399 static void __guc_context_destroy(struct intel_context *ce)
3401 GEM_BUG_ON(ce->guc_state.prio_count[GUC_CLIENT_PRIORITY_KMD_HIGH] ||
3402 ce->guc_state.prio_count[GUC_CLIENT_PRIORITY_HIGH] ||
3403 ce->guc_state.prio_count[GUC_CLIENT_PRIORITY_KMD_NORMAL] ||
3404 ce->guc_state.prio_count[GUC_CLIENT_PRIORITY_NORMAL]);
3406 lrc_fini(ce);
3407 intel_context_fini(ce);
3409 if (intel_engine_is_virtual(ce->engine)) {
3411 container_of(ce, typeof(*ve), context);
3418 intel_context_free(ce);
3424 struct intel_context *ce;
3432 ce = list_first_entry_or_null(&guc->submission_state.destroyed_contexts,
3435 if (ce)
3436 list_del_init(&ce->destroyed_link);
3439 if (!ce)
3442 release_guc_id(guc, ce);
3443 __guc_context_destroy(ce);
3449 struct intel_context *ce;
3454 ce = list_first_entry_or_null(&guc->submission_state.destroyed_contexts,
3457 if (ce)
3458 list_del_init(&ce->destroyed_link);
3461 if (!ce)
3464 if (guc_lrc_desc_unpin(ce)) {
3473 list_add_tail(&ce->destroyed_link,
3507 struct intel_context *ce = container_of(kref, typeof(*ce), ref);
3508 struct intel_guc *guc = ce_to_guc(ce);
3518 destroy = submission_disabled(guc) || context_guc_id_invalid(ce) ||
3519 !ctx_id_mapped(guc, ce->guc_id.id);
3521 if (!list_empty(&ce->guc_id.link))
3522 list_del_init(&ce->guc_id.link);
3523 list_add_tail(&ce->destroyed_link,
3526 __release_guc_id(guc, ce);
3530 __guc_context_destroy(ce);
3542 static int guc_context_alloc(struct intel_context *ce)
3544 return lrc_alloc(ce, ce->engine);
3548 struct intel_context *ce)
3553 __guc_context_policy_start_klv(&policy, ce->guc_id.id);
3554 __guc_context_policy_add_priority(&policy, ce->guc_state.prio);
3559 ce->guc_id.id,
3560 ce->guc_state.prio,
3568 struct intel_context *ce,
3573 lockdep_assert_held(&ce->guc_state.lock);
3575 if (ce->guc_state.prio == prio || submission_disabled(guc) ||
3576 !context_registered(ce)) {
3577 ce->guc_state.prio = prio;
3581 ce->guc_state.prio = prio;
3582 __guc_context_set_prio(guc, ce);
3584 trace_intel_context_set_prio(ce);
3599 static inline void add_context_inflight_prio(struct intel_context *ce,
3602 lockdep_assert_held(&ce->guc_state.lock);
3603 GEM_BUG_ON(guc_prio >= ARRAY_SIZE(ce->guc_state.prio_count));
3605 ++ce->guc_state.prio_count[guc_prio];
3608 GEM_WARN_ON(!ce->guc_state.prio_count[guc_prio]);
3611 static inline void sub_context_inflight_prio(struct intel_context *ce,
3614 lockdep_assert_held(&ce->guc_state.lock);
3615 GEM_BUG_ON(guc_prio >= ARRAY_SIZE(ce->guc_state.prio_count));
3618 GEM_WARN_ON(!ce->guc_state.prio_count[guc_prio]);
3620 --ce->guc_state.prio_count[guc_prio];
3623 static inline void update_context_prio(struct intel_context *ce)
3625 struct intel_guc *guc = &ce->engine->gt->uc.guc;
3631 lockdep_assert_held(&ce->guc_state.lock);
3633 for (i = 0; i < ARRAY_SIZE(ce->guc_state.prio_count); ++i) {
3634 if (ce->guc_state.prio_count[i]) {
3635 guc_context_set_prio(guc, ce, i);
3649 struct intel_context *ce = request_to_scheduling_context(rq);
3652 GEM_BUG_ON(intel_context_is_child(ce));
3655 spin_lock(&ce->guc_state.lock);
3656 list_move_tail(&rq->sched.link, &ce->guc_state.requests);
3660 add_context_inflight_prio(ce, rq->guc_prio);
3662 sub_context_inflight_prio(ce, rq->guc_prio);
3664 add_context_inflight_prio(ce, rq->guc_prio);
3666 update_context_prio(ce);
3668 spin_unlock(&ce->guc_state.lock);
3671 static void guc_prio_fini(struct i915_request *rq, struct intel_context *ce)
3673 lockdep_assert_held(&ce->guc_state.lock);
3677 sub_context_inflight_prio(ce, rq->guc_prio);
3678 update_context_prio(ce);
3685 struct intel_context *ce = request_to_scheduling_context(rq);
3687 GEM_BUG_ON(intel_context_is_child(ce));
3689 spin_lock_irq(&ce->guc_state.lock);
3697 guc_prio_fini(rq, ce);
3699 spin_unlock_irq(&ce->guc_state.lock);
3701 atomic_dec(&ce->guc_id.ref);
3742 static void __guc_signal_context_fence(struct intel_context *ce)
3746 lockdep_assert_held(&ce->guc_state.lock);
3748 if (!list_empty(&ce->guc_state.fences))
3749 trace_intel_context_fence_release(ce);
3753 * ce->guc_state.lock is preserved.
3755 list_for_each_entry_safe(rq, rn, &ce->guc_state.fences,
3761 INIT_LIST_HEAD(&ce->guc_state.fences);
3764 static void guc_signal_context_fence(struct intel_context *ce)
3768 GEM_BUG_ON(intel_context_is_child(ce));
3770 spin_lock_irqsave(&ce->guc_state.lock, flags);
3771 clr_context_wait_for_deregister_to_register(ce);
3772 __guc_signal_context_fence(ce);
3773 spin_unlock_irqrestore(&ce->guc_state.lock, flags);
3776 static bool context_needs_register(struct intel_context *ce, bool new_guc_id)
3778 return (new_guc_id || test_bit(CONTEXT_LRCA_DIRTY, &ce->flags) ||
3779 !ctx_id_mapped(ce_to_guc(ce), ce->guc_id.id)) &&
3780 !submission_disabled(ce_to_guc(ce));
3783 static void guc_context_init(struct intel_context *ce)
3789 ctx = rcu_dereference(ce->gem_context);
3794 ce->guc_state.prio = map_i915_prio_to_guc_prio(prio);
3796 INIT_DELAYED_WORK(&ce->guc_state.sched_disable_delay_work,
3799 set_bit(CONTEXT_GUC_INIT, &ce->flags);
3804 struct intel_context *ce = request_to_scheduling_context(rq);
3805 struct intel_guc *guc = ce_to_guc(ce);
3833 if (unlikely(!test_bit(CONTEXT_GUC_INIT, &ce->flags)))
3834 guc_context_init(ce);
3850 if (cancel_delayed_work_sync(&ce->guc_state.sched_disable_delay_work))
3851 intel_context_sched_disable_unpin(ce);
3852 else if (intel_context_is_closed(ce))
3853 if (wait_for(context_close_done(ce), 1500))
3872 if (atomic_add_unless(&ce->guc_id.ref, 1, 0))
3875 ret = pin_guc_id(guc, ce); /* returns 1 if new guc_id assigned */
3878 if (context_needs_register(ce, !!ret)) {
3879 ret = try_context_registration(ce, true);
3885 atomic_dec(&ce->guc_id.ref);
3886 unpin_guc_id(guc, ce);
3891 clear_bit(CONTEXT_LRCA_DIRTY, &ce->flags);
3901 spin_lock_irqsave(&ce->guc_state.lock, flags);
3902 if (context_wait_for_deregister_to_register(ce) ||
3903 context_pending_disable(ce)) {
3907 list_add_tail(&rq->guc_fence_link, &ce->guc_state.fences);
3909 spin_unlock_irqrestore(&ce->guc_state.lock, flags);
3914 static int guc_virtual_context_pre_pin(struct intel_context *ce,
3918 struct intel_engine_cs *engine = guc_virtual_get_sibling(ce->engine, 0);
3920 return __guc_context_pre_pin(ce, engine, ww, vaddr);
3923 static int guc_virtual_context_pin(struct intel_context *ce, void *vaddr)
3925 struct intel_engine_cs *engine = guc_virtual_get_sibling(ce->engine, 0);
3926 int ret = __guc_context_pin(ce, engine, vaddr);
3927 intel_engine_mask_t tmp, mask = ce->engine->mask;
3930 for_each_engine_masked(engine, ce->engine->gt, mask, tmp)
3936 static void guc_virtual_context_unpin(struct intel_context *ce)
3938 intel_engine_mask_t tmp, mask = ce->engine->mask;
3940 struct intel_guc *guc = ce_to_guc(ce);
3942 GEM_BUG_ON(context_enabled(ce));
3943 GEM_BUG_ON(intel_context_is_barrier(ce));
3945 unpin_guc_id(guc, ce);
3946 lrc_unpin(ce);
3948 for_each_engine_masked(engine, ce->engine->gt, mask, tmp)
3952 static void guc_virtual_context_enter(struct intel_context *ce)
3954 intel_engine_mask_t tmp, mask = ce->engine->mask;
3957 for_each_engine_masked(engine, ce->engine->gt, mask, tmp)
3960 intel_timeline_enter(ce->timeline);
3963 static void guc_virtual_context_exit(struct intel_context *ce)
3965 intel_engine_mask_t tmp, mask = ce->engine->mask;
3968 for_each_engine_masked(engine, ce->engine->gt, mask, tmp)
3971 intel_timeline_exit(ce->timeline);
3974 static int guc_virtual_context_alloc(struct intel_context *ce)
3976 struct intel_engine_cs *engine = guc_virtual_get_sibling(ce->engine, 0);
3978 return lrc_alloc(ce, engine);
4007 static int guc_parent_context_pin(struct intel_context *ce, void *vaddr)
4009 struct intel_engine_cs *engine = guc_virtual_get_sibling(ce->engine, 0);
4010 struct intel_guc *guc = ce_to_guc(ce);
4013 GEM_BUG_ON(!intel_context_is_parent(ce));
4014 GEM_BUG_ON(!intel_engine_is_virtual(ce->engine));
4016 ret = pin_guc_id(guc, ce);
4020 return __guc_context_pin(ce, engine, vaddr);
4023 static int guc_child_context_pin(struct intel_context *ce, void *vaddr)
4025 struct intel_engine_cs *engine = guc_virtual_get_sibling(ce->engine, 0);
4027 GEM_BUG_ON(!intel_context_is_child(ce));
4028 GEM_BUG_ON(!intel_engine_is_virtual(ce->engine));
4030 __intel_context_pin(ce->parallel.parent);
4031 return __guc_context_pin(ce, engine, vaddr);
4034 static void guc_parent_context_unpin(struct intel_context *ce)
4036 struct intel_guc *guc = ce_to_guc(ce);
4038 GEM_BUG_ON(context_enabled(ce));
4039 GEM_BUG_ON(intel_context_is_barrier(ce));
4040 GEM_BUG_ON(!intel_context_is_parent(ce));
4041 GEM_BUG_ON(!intel_engine_is_virtual(ce->engine));
4043 unpin_guc_id(guc, ce);
4044 lrc_unpin(ce);
4047 static void guc_child_context_unpin(struct intel_context *ce)
4049 GEM_BUG_ON(context_enabled(ce));
4050 GEM_BUG_ON(intel_context_is_barrier(ce));
4051 GEM_BUG_ON(!intel_context_is_child(ce));
4052 GEM_BUG_ON(!intel_engine_is_virtual(ce->engine));
4054 lrc_unpin(ce);
4057 static void guc_child_context_post_unpin(struct intel_context *ce)
4059 GEM_BUG_ON(!intel_context_is_child(ce));
4060 GEM_BUG_ON(!intel_context_is_pinned(ce->parallel.parent));
4061 GEM_BUG_ON(!intel_engine_is_virtual(ce->engine));
4063 lrc_post_unpin(ce);
4064 intel_context_unpin(ce->parallel.parent);
4069 struct intel_context *ce = container_of(kref, typeof(*ce), ref);
4071 __guc_context_destroy(ce);
4147 struct intel_context *parent = NULL, *ce, *err;
4160 ce = intel_engine_create_virtual(siblings, num_siblings,
4162 if (IS_ERR(ce)) {
4163 err = ERR_CAST(ce);
4168 parent = ce;
4171 ce->ops = &virtual_child_context_ops;
4172 intel_context_bind_parent_child(parent, ce);
4184 for_each_child(parent, ce) {
4185 ce->engine->emit_bb_start =
4187 ce->engine->emit_fini_breadcrumb =
4189 ce->engine->emit_fini_breadcrumb_dw = 16;
4261 struct intel_context *ce = request_to_scheduling_context(rq);
4271 spin_lock(&ce->guc_state.lock);
4274 sub_context_inflight_prio(ce, rq->guc_prio);
4276 add_context_inflight_prio(ce, rq->guc_prio);
4277 update_context_prio(ce);
4279 spin_unlock(&ce->guc_state.lock);
4284 struct intel_context *ce = request_to_scheduling_context(rq);
4286 spin_lock(&ce->guc_state.lock);
4287 guc_prio_fini(rq, ce);
4288 spin_unlock(&ce->guc_state.lock);
4373 struct intel_context *ce)
4384 if (context_guc_id_invalid(ce)) {
4385 ret = pin_guc_id(guc, ce);
4391 if (!test_bit(CONTEXT_GUC_INIT, &ce->flags))
4392 guc_context_init(ce);
4394 ret = try_context_registration(ce, true);
4396 unpin_guc_id(guc, ce);
4425 struct intel_context *ce;
4427 list_for_each_entry(ce, &engine->pinned_contexts_list,
4429 int ret = guc_kernel_context_pin(guc, ce);
4798 struct intel_context *ce;
4805 ce = __get_context(guc, ctx_id);
4806 if (unlikely(!ce)) {
4811 if (unlikely(intel_context_is_child(ce))) {
4816 return ce;
4971 struct intel_context *ce;
4980 ce = g2h_context_lookup(guc, ctx_id);
4981 if (unlikely(!ce))
4984 trace_intel_context_deregister_done(ce);
4987 if (unlikely(ce->drop_deregister)) {
4988 ce->drop_deregister = false;
4993 if (context_wait_for_deregister_to_register(ce)) {
4995 &ce->engine->gt->i915->runtime_pm;
5003 register_context(ce, true);
5004 guc_signal_context_fence(ce);
5005 intel_context_put(ce);
5006 } else if (context_destroyed(ce)) {
5009 release_guc_id(guc, ce);
5010 __guc_context_destroy(ce);
5022 struct intel_context *ce;
5032 ce = g2h_context_lookup(guc, ctx_id);
5033 if (unlikely(!ce))
5036 if (unlikely(context_destroyed(ce) ||
5037 (!context_pending_enable(ce) &&
5038 !context_pending_disable(ce)))) {
5040 ce->guc_state.sched_state, ctx_id);
5044 trace_intel_context_sched_done(ce);
5046 if (context_pending_enable(ce)) {
5048 if (unlikely(ce->drop_schedule_enable)) {
5049 ce->drop_schedule_enable = false;
5054 spin_lock_irqsave(&ce->guc_state.lock, flags);
5055 clr_context_pending_enable(ce);
5056 spin_unlock_irqrestore(&ce->guc_state.lock, flags);
5057 } else if (context_pending_disable(ce)) {
5061 if (unlikely(ce->drop_schedule_disable)) {
5062 ce->drop_schedule_disable = false;
5074 intel_context_sched_disable_unpin(ce);
5076 spin_lock_irqsave(&ce->guc_state.lock, flags);
5077 banned = context_banned(ce);
5078 clr_context_banned(ce);
5079 clr_context_pending_disable(ce);
5080 __guc_signal_context_fence(ce);
5081 guc_blocked_fence_complete(ce);
5082 spin_unlock_irqrestore(&ce->guc_state.lock, flags);
5085 guc_cancel_context_requests(ce);
5086 intel_engine_signal_breadcrumbs(ce->engine);
5091 intel_context_put(ce);
5097 struct intel_context *ce)
5104 if (intel_engine_is_virtual(ce->engine)) {
5106 intel_engine_mask_t tmp, virtual_mask = ce->engine->mask;
5109 for_each_engine_masked(e, ce->engine->gt, virtual_mask, tmp) {
5110 bool match = intel_guc_capture_is_matching_engine(gt, ce, e);
5113 intel_engine_set_hung_context(e, ce);
5122 ce->guc_id.id, ce->engine->name);
5126 intel_engine_set_hung_context(ce->engine, ce);
5127 engine_mask = ce->engine->mask;
5128 i915_increase_reset_engine_count(&i915->gpu_error, ce->engine);
5135 static void guc_context_replay(struct intel_context *ce)
5137 struct i915_sched_engine *sched_engine = ce->engine->sched_engine;
5139 __guc_reset_context(ce, ce->engine->mask);
5144 struct intel_context *ce)
5146 bool capture = intel_context_is_schedulable(ce);
5148 trace_intel_context_reset(ce);
5152 ce->guc_id.id, ce->engine->name,
5153 str_yes_no(intel_context_is_exiting(ce)),
5154 str_yes_no(intel_context_is_banned(ce)));
5157 capture_error_state(guc, ce);
5158 guc_context_replay(ce);
5165 struct intel_context *ce;
5183 ce = g2h_context_lookup(guc, ctx_id);
5184 if (ce)
5185 intel_context_get(ce);
5188 if (unlikely(!ce))
5191 guc_handle_context_reset(guc, ce);
5192 intel_context_put(ce);
5307 struct intel_context *ce;
5317 xa_for_each(&guc->context_lookup, index, ce) {
5320 if (!kref_get_unless_zero(&ce->ref))
5325 if (!intel_context_is_pinned(ce))
5328 if (intel_engine_is_virtual(ce->engine)) {
5329 if (!(ce->engine->mask & engine->mask))
5332 if (ce->engine != engine)
5337 spin_lock(&ce->guc_state.lock);
5338 list_for_each_entry(rq, &ce->guc_state.requests, sched.link) {
5345 spin_unlock(&ce->guc_state.lock);
5348 intel_engine_set_hung_context(engine, ce);
5351 intel_context_put(ce);
5357 intel_context_put(ce);
5369 struct intel_context *ce;
5378 xa_for_each(&guc->context_lookup, index, ce) {
5379 if (!kref_get_unless_zero(&ce->ref))
5384 if (!intel_context_is_pinned(ce))
5387 if (intel_engine_is_virtual(ce->engine)) {
5388 if (!(ce->engine->mask & engine->mask))
5391 if (ce->engine != engine)
5395 spin_lock(&ce->guc_state.lock);
5396 intel_engine_dump_active_requests(&ce->guc_state.requests,
5398 spin_unlock(&ce->guc_state.lock);
5401 intel_context_put(ce);
5441 struct intel_context *ce)
5445 drm_printf(p, "\t\tPriority: %d\n", ce->guc_state.prio);
5450 i, ce->guc_state.prio_count[i]);
5456 struct intel_context *ce)
5458 drm_printf(p, "GuC lrc descriptor %u:\n", ce->guc_id.id);
5459 drm_printf(p, "\tHW Context Desc: 0x%08x\n", ce->lrc.lrca);
5461 ce->ring->head,
5462 ce->lrc_reg_state[CTX_RING_HEAD]);
5464 ce->ring->tail,
5465 ce->lrc_reg_state[CTX_RING_TAIL]);
5467 atomic_read(&ce->pin_count));
5469 atomic_read(&ce->guc_id.ref));
5471 ce->guc_state.sched_state);
5477 struct intel_context *ce;
5482 xa_for_each(&guc->context_lookup, index, ce) {
5483 GEM_BUG_ON(intel_context_is_child(ce));
5485 guc_log_context(p, ce);
5486 guc_log_context_priority(p, ce);
5488 if (intel_context_is_parent(ce)) {
5492 ce->parallel.number_children);
5494 if (ce->parallel.guc.wq_status) {
5496 READ_ONCE(*ce->parallel.guc.wq_head));
5498 READ_ONCE(*ce->parallel.guc.wq_tail));
5500 READ_ONCE(*ce->parallel.guc.wq_status));
5503 if (ce->engine->emit_bb_start ==
5508 get_children_go_value(ce));
5509 for (i = 0; i < ce->parallel.number_children; ++i)
5511 get_children_join_value(ce, i));
5514 for_each_child(ce, child)
5521 static inline u32 get_children_go_addr(struct intel_context *ce)
5523 GEM_BUG_ON(!intel_context_is_parent(ce));
5525 return i915_ggtt_offset(ce->state) +
5526 __get_parent_scratch_offset(ce) +
5530 static inline u32 get_children_join_addr(struct intel_context *ce,
5533 GEM_BUG_ON(!intel_context_is_parent(ce));
5535 return i915_ggtt_offset(ce->state) +
5536 __get_parent_scratch_offset(ce) +
5548 struct intel_context *ce = rq->context;
5552 GEM_BUG_ON(!intel_context_is_parent(ce));
5554 cs = intel_ring_begin(rq, 10 + 4 * ce->parallel.number_children);
5559 for (i = 0; i < ce->parallel.number_children; ++i) {
5565 *cs++ = get_children_join_addr(ce, i);
5576 get_children_go_addr(ce),
5595 struct intel_context *ce = rq->context;
5596 struct intel_context *parent = intel_context_to_parent(ce);
5599 GEM_BUG_ON(!intel_context_is_child(ce));
5609 ce->parallel.child_index),
5639 struct intel_context *ce = rq->context;
5642 GEM_BUG_ON(!intel_context_is_parent(ce));
5645 for (i = 0; i < ce->parallel.number_children; ++i) {
5651 *cs++ = get_children_join_addr(ce, i);
5662 get_children_go_addr(ce),
5689 struct intel_context *ce = rq->context;
5693 GEM_BUG_ON(!intel_context_is_parent(ce));
5701 (ce->engine->emit_fini_breadcrumb_dw - NON_SKIP_LEN));
5702 cs += ce->engine->emit_fini_breadcrumb_dw - NON_SKIP_LEN;
5722 ce->engine->emit_fini_breadcrumb_dw != cs);
5733 struct intel_context *ce = rq->context;
5734 struct intel_context *parent = intel_context_to_parent(ce);
5736 GEM_BUG_ON(!intel_context_is_child(ce));
5746 ce->parallel.child_index),
5765 struct intel_context *ce = rq->context;
5769 GEM_BUG_ON(!intel_context_is_child(ce));
5777 (ce->engine->emit_fini_breadcrumb_dw - NON_SKIP_LEN));
5778 cs += ce->engine->emit_fini_breadcrumb_dw - NON_SKIP_LEN;
5798 ce->engine->emit_fini_breadcrumb_dw != cs);