Lines Matching refs:ct

63 ct_to_guc(struct xe_guc_ct *ct)
65 return container_of(ct, struct xe_guc, ct);
69 ct_to_gt(struct xe_guc_ct *ct)
71 return container_of(ct, struct xe_gt, uc.guc.ct);
75 ct_to_xe(struct xe_guc_ct *ct)
77 return gt_to_xe(ct_to_gt(ct));
121 struct xe_guc_ct *ct = arg;
123 destroy_workqueue(ct->g2h_wq);
124 xa_destroy(&ct->fence_lookup);
129 static void primelockdep(struct xe_guc_ct *ct)
135 might_lock(&ct->lock);
139 int xe_guc_ct_init(struct xe_guc_ct *ct)
141 struct xe_device *xe = ct_to_xe(ct);
142 struct xe_gt *gt = ct_to_gt(ct);
149 ct->g2h_wq = alloc_ordered_workqueue("xe-g2h-wq", 0);
150 if (!ct->g2h_wq)
153 spin_lock_init(&ct->fast_lock);
154 xa_init(&ct->fence_lookup);
155 INIT_WORK(&ct->g2h_worker, g2h_worker_func);
156 init_waitqueue_head(&ct->wq);
157 init_waitqueue_head(&ct->g2h_fence_wq);
159 err = drmm_mutex_init(&xe->drm, &ct->lock);
163 primelockdep(ct);
171 ct->bo = bo;
173 err = drmm_add_action_or_reset(&xe->drm, guc_ct_fini, ct);
177 xe_assert(xe, ct->state == XE_GUC_CT_STATE_NOT_INITIALIZED);
178 ct->state = XE_GUC_CT_STATE_DISABLED;
227 static int guc_ct_ctb_h2g_register(struct xe_guc_ct *ct)
229 struct xe_guc *guc = ct_to_guc(ct);
233 desc_addr = xe_bo_ggtt_addr(ct->bo);
234 ctb_addr = xe_bo_ggtt_addr(ct->bo) + CTB_DESC_SIZE * 2;
235 size = ct->ctbs.h2g.info.size * sizeof(u32);
254 static int guc_ct_ctb_g2h_register(struct xe_guc_ct *ct)
256 struct xe_guc *guc = ct_to_guc(ct);
260 desc_addr = xe_bo_ggtt_addr(ct->bo) + CTB_DESC_SIZE;
261 ctb_addr = xe_bo_ggtt_addr(ct->bo) + CTB_DESC_SIZE * 2 +
263 size = ct->ctbs.g2h.info.size * sizeof(u32);
282 static int guc_ct_control_toggle(struct xe_guc_ct *ct, bool enable)
293 int ret = xe_guc_mmio_send(ct_to_guc(ct), request, ARRAY_SIZE(request));
298 static void xe_guc_ct_set_state(struct xe_guc_ct *ct,
301 mutex_lock(&ct->lock); /* Serialise dequeue_one_g2h() */
302 spin_lock_irq(&ct->fast_lock); /* Serialise CT fast-path */
304 xe_gt_assert(ct_to_gt(ct), ct->g2h_outstanding == 0 ||
307 ct->g2h_outstanding = 0;
308 ct->state = state;
310 spin_unlock_irq(&ct->fast_lock);
314 * needs to be serialized with the send path which ct lock provides.
316 xa_destroy(&ct->fence_lookup);
318 mutex_unlock(&ct->lock);
321 int xe_guc_ct_enable(struct xe_guc_ct *ct)
323 struct xe_device *xe = ct_to_xe(ct);
326 xe_assert(xe, !xe_guc_ct_enabled(ct));
328 guc_ct_ctb_h2g_init(xe, &ct->ctbs.h2g, &ct->bo->vmap);
329 guc_ct_ctb_g2h_init(xe, &ct->ctbs.g2h, &ct->bo->vmap);
331 err = guc_ct_ctb_h2g_register(ct);
335 err = guc_ct_ctb_g2h_register(ct);
339 err = guc_ct_control_toggle(ct, true);
343 xe_guc_ct_set_state(ct, XE_GUC_CT_STATE_ENABLED);
346 wake_up_all(&ct->wq);
357 static void stop_g2h_handler(struct xe_guc_ct *ct)
359 cancel_work_sync(&ct->g2h_worker);
364 * @ct: the &xe_guc_ct
369 void xe_guc_ct_disable(struct xe_guc_ct *ct)
371 xe_guc_ct_set_state(ct, XE_GUC_CT_STATE_DISABLED);
372 stop_g2h_handler(ct);
377 * @ct: the &xe_guc_ct
381 void xe_guc_ct_stop(struct xe_guc_ct *ct)
383 xe_guc_ct_set_state(ct, XE_GUC_CT_STATE_STOPPED);
384 stop_g2h_handler(ct);
387 static bool h2g_has_room(struct xe_guc_ct *ct, u32 cmd_len)
389 struct guc_ctb *h2g = &ct->ctbs.h2g;
391 lockdep_assert_held(&ct->lock);
394 h2g->info.head = desc_read(ct_to_xe(ct), h2g, head);
405 static bool g2h_has_room(struct xe_guc_ct *ct, u32 g2h_len)
410 lockdep_assert_held(&ct->fast_lock);
412 return ct->ctbs.g2h.info.space > g2h_len;
415 static int has_room(struct xe_guc_ct *ct, u32 cmd_len, u32 g2h_len)
417 lockdep_assert_held(&ct->lock);
419 if (!g2h_has_room(ct, g2h_len) || !h2g_has_room(ct, cmd_len))
425 static void h2g_reserve_space(struct xe_guc_ct *ct, u32 cmd_len)
427 lockdep_assert_held(&ct->lock);
428 ct->ctbs.h2g.info.space -= cmd_len;
431 static void __g2h_reserve_space(struct xe_guc_ct *ct, u32 g2h_len, u32 num_g2h)
433 xe_assert(ct_to_xe(ct), g2h_len <= ct->ctbs.g2h.info.space);
436 lockdep_assert_held(&ct->fast_lock);
438 ct->ctbs.g2h.info.space -= g2h_len;
439 ct->g2h_outstanding += num_g2h;
443 static void __g2h_release_space(struct xe_guc_ct *ct, u32 g2h_len)
445 lockdep_assert_held(&ct->fast_lock);
446 xe_assert(ct_to_xe(ct), ct->ctbs.g2h.info.space + g2h_len <=
447 ct->ctbs.g2h.info.size - ct->ctbs.g2h.info.resv_space);
449 ct->ctbs.g2h.info.space += g2h_len;
450 --ct->g2h_outstanding;
453 static void g2h_release_space(struct xe_guc_ct *ct, u32 g2h_len)
455 spin_lock_irq(&ct->fast_lock);
456 __g2h_release_space(ct, g2h_len);
457 spin_unlock_irq(&ct->fast_lock);
462 static int h2g_write(struct xe_guc_ct *ct, const u32 *action, u32 len,
465 struct xe_device *xe = ct_to_xe(ct);
466 struct guc_ctb *h2g = &ct->ctbs.h2g;
475 lockdep_assert_held(&ct->lock);
483 h2g_reserve_space(ct, (h2g->info.size - tail));
521 h2g_reserve_space(ct, full_len);
526 trace_xe_guc_ctb_h2g(ct_to_gt(ct)->info.id, *(action - 1), full_len,
541 static u16 next_ct_seqno(struct xe_guc_ct *ct, bool is_g2h_fence)
543 u32 seqno = ct->fence_seqno++ & CT_SEQNO_MASK;
551 static int __guc_ct_send_locked(struct xe_guc_ct *ct, const u32 *action,
555 struct xe_device *xe = ct_to_xe(ct);
559 xe_assert(xe, ct->state != XE_GUC_CT_STATE_NOT_INITIALIZED);
564 lockdep_assert_held(&ct->lock);
566 if (unlikely(ct->ctbs.h2g.info.broken)) {
571 if (ct->state == XE_GUC_CT_STATE_DISABLED) {
576 if (ct->state == XE_GUC_CT_STATE_STOPPED) {
581 xe_assert(xe, xe_guc_ct_enabled(ct));
590 g2h_fence->seqno = next_ct_seqno(ct, true);
591 ptr = xa_store(&ct->fence_lookup,
602 seqno = next_ct_seqno(ct, false);
606 spin_lock_irq(&ct->fast_lock);
608 ret = has_room(ct, len + GUC_CTB_HDR_LEN, g2h_len);
612 ret = h2g_write(ct, action, len, seqno, !!g2h_fence);
619 __g2h_reserve_space(ct, g2h_len, num_g2h);
620 xe_guc_notify(ct_to_guc(ct));
623 spin_unlock_irq(&ct->fast_lock);
628 static void kick_reset(struct xe_guc_ct *ct)
630 xe_gt_reset_async(ct_to_gt(ct));
633 static int dequeue_one_g2h(struct xe_guc_ct *ct);
635 static int guc_ct_send_locked(struct xe_guc_ct *ct, const u32 *action, u32 len,
639 struct drm_device *drm = &ct_to_xe(ct)->drm;
644 xe_assert(ct_to_xe(ct), !g2h_len || !g2h_fence);
645 lockdep_assert_held(&ct->lock);
646 xe_device_assert_mem_access(ct_to_xe(ct));
649 ret = __guc_ct_send_locked(ct, action, len, g2h_len, num_g2h,
660 !h2g_has_room(ct, len + GUC_CTB_HDR_LEN))) {
661 struct guc_ctb *h2g = &ct->ctbs.h2g;
675 struct xe_device *xe = ct_to_xe(ct);
676 struct guc_ctb *g2h = &ct->ctbs.g2h;
686 #define g2h_avail(ct) \
687 (desc_read(ct_to_xe(ct), (&ct->ctbs.g2h), tail) != ct->ctbs.g2h.info.head)
688 if (!wait_event_timeout(ct->wq, !ct->g2h_outstanding ||
689 g2h_avail(ct), HZ))
693 if (dequeue_one_g2h(ct) < 0)
703 xe_guc_ct_print(ct, &p, true);
704 ct->ctbs.h2g.info.broken = true;
709 static int guc_ct_send(struct xe_guc_ct *ct, const u32 *action, u32 len,
714 xe_assert(ct_to_xe(ct), !g2h_len || !g2h_fence);
716 mutex_lock(&ct->lock);
717 ret = guc_ct_send_locked(ct, action, len, g2h_len, num_g2h, g2h_fence);
718 mutex_unlock(&ct->lock);
723 int xe_guc_ct_send(struct xe_guc_ct *ct, const u32 *action, u32 len,
728 ret = guc_ct_send(ct, action, len, g2h_len, num_g2h, NULL);
730 kick_reset(ct);
735 int xe_guc_ct_send_locked(struct xe_guc_ct *ct, const u32 *action, u32 len,
740 ret = guc_ct_send_locked(ct, action, len, g2h_len, num_g2h, NULL);
742 kick_reset(ct);
747 int xe_guc_ct_send_g2h_handler(struct xe_guc_ct *ct, const u32 *action, u32 len)
751 lockdep_assert_held(&ct->lock);
753 ret = guc_ct_send_locked(ct, action, len, 0, 0, NULL);
755 kick_reset(ct);
764 static bool retry_failure(struct xe_guc_ct *ct, int ret)
769 #define ct_alive(ct) \
770 (xe_guc_ct_enabled(ct) && !ct->ctbs.h2g.info.broken && \
771 !ct->ctbs.g2h.info.broken)
772 if (!wait_event_interruptible_timeout(ct->wq, ct_alive(ct), HZ * 5))
779 static int guc_ct_send_recv(struct xe_guc_ct *ct, const u32 *action, u32 len,
782 struct xe_device *xe = ct_to_xe(ct);
797 ret = guc_ct_send(ct, action, len, 0, 0, &g2h_fence);
802 ptr = xa_store(&ct->fence_lookup,
811 kick_reset(ct);
813 if (no_fail && retry_failure(ct, ret))
817 xa_erase_irq(&ct->fence_lookup, g2h_fence.seqno);
822 ret = wait_event_timeout(ct->g2h_fence_wq, g2h_fence.done, HZ);
826 xa_erase_irq(&ct->fence_lookup, g2h_fence.seqno);
846 * @ct: the &xe_guc_ct
862 int xe_guc_ct_send_recv(struct xe_guc_ct *ct, const u32 *action, u32 len,
865 KUNIT_STATIC_STUB_REDIRECT(xe_guc_ct_send_recv, ct, action, len, response_buffer);
866 return guc_ct_send_recv(ct, action, len, response_buffer, false);
869 int xe_guc_ct_send_recv_no_fail(struct xe_guc_ct *ct, const u32 *action,
872 return guc_ct_send_recv(ct, action, len, response_buffer, true);
885 static int parse_g2h_event(struct xe_guc_ct *ct, u32 *msg, u32 len)
890 lockdep_assert_held(&ct->lock);
897 g2h_release_space(ct, len);
903 static int parse_g2h_response(struct xe_guc_ct *ct, u32 *msg, u32 len)
905 struct xe_gt *gt = ct_to_gt(ct);
913 lockdep_assert_held(&ct->lock);
916 * Fences for FAST_REQUEST messages are not tracked in ct->fence_lookup.
936 g2h_fence = xa_erase(&ct->fence_lookup, fence);
940 g2h_release_space(ct, GUC_CTB_HXG_MSG_MAX_LEN);
960 g2h_release_space(ct, GUC_CTB_HXG_MSG_MAX_LEN);
965 wake_up_all(&ct->g2h_fence_wq);
970 static int parse_g2h_msg(struct xe_guc_ct *ct, u32 *msg, u32 len)
972 struct xe_device *xe = ct_to_xe(ct);
977 lockdep_assert_held(&ct->lock);
984 ct->ctbs.g2h.info.broken = true;
992 ret = parse_g2h_event(ct, msg, len);
997 ret = parse_g2h_response(ct, msg, len);
1003 ct->ctbs.g2h.info.broken = true;
1011 static int process_g2h_msg(struct xe_guc_ct *ct, u32 *msg, u32 len)
1013 struct xe_device *xe = ct_to_xe(ct);
1014 struct xe_guc *guc = ct_to_guc(ct);
1081 static int g2h_read(struct xe_guc_ct *ct, u32 *msg, bool fast_path)
1083 struct xe_device *xe = ct_to_xe(ct);
1084 struct guc_ctb *g2h = &ct->ctbs.g2h;
1090 xe_assert(xe, ct->state != XE_GUC_CT_STATE_NOT_INITIALIZED);
1091 lockdep_assert_held(&ct->fast_lock);
1093 if (ct->state == XE_GUC_CT_STATE_DISABLED)
1096 if (ct->state == XE_GUC_CT_STATE_STOPPED)
1102 xe_assert(xe, xe_guc_ct_enabled(ct));
1165 trace_xe_guc_ctb_g2h(ct_to_gt(ct)->info.id, action, len,
1171 static void g2h_fast_path(struct xe_guc_ct *ct, u32 *msg, u32 len)
1173 struct xe_device *xe = ct_to_xe(ct);
1174 struct xe_guc *guc = ct_to_guc(ct);
1187 __g2h_release_space(ct, len);
1202 * @ct: GuC CT object
1208 void xe_guc_ct_fast_path(struct xe_guc_ct *ct)
1210 struct xe_device *xe = ct_to_xe(ct);
1214 ongoing = xe_device_mem_access_get_if_ongoing(ct_to_xe(ct));
1215 if (!ongoing && xe_pm_read_callback_task(ct_to_xe(ct)) == NULL)
1218 spin_lock(&ct->fast_lock);
1220 len = g2h_read(ct, ct->fast_msg, true);
1222 g2h_fast_path(ct, ct->fast_msg, len);
1224 spin_unlock(&ct->fast_lock);
1231 static int dequeue_one_g2h(struct xe_guc_ct *ct)
1236 lockdep_assert_held(&ct->lock);
1238 spin_lock_irq(&ct->fast_lock);
1239 len = g2h_read(ct, ct->msg, false);
1240 spin_unlock_irq(&ct->fast_lock);
1244 ret = parse_g2h_msg(ct, ct->msg, len);
1248 ret = process_g2h_msg(ct, ct->msg, len);
1257 struct xe_guc_ct *ct = container_of(w, struct xe_guc_ct, g2h_worker);
1284 ongoing = xe_device_mem_access_get_if_ongoing(ct_to_xe(ct));
1285 if (!ongoing && xe_pm_read_callback_task(ct_to_xe(ct)) == NULL)
1289 mutex_lock(&ct->lock);
1290 ret = dequeue_one_g2h(ct);
1291 mutex_unlock(&ct->lock);
1294 struct drm_device *drm = &ct_to_xe(ct)->drm;
1297 xe_guc_ct_print(ct, &p, false);
1298 kick_reset(ct);
1303 xe_device_mem_access_put(ct_to_xe(ct));
1381 * @ct: GuC CT object.
1391 struct xe_guc_ct_snapshot *xe_guc_ct_snapshot_capture(struct xe_guc_ct *ct,
1394 struct xe_device *xe = ct_to_xe(ct);
1405 if (xe_guc_ct_enabled(ct)) {
1407 snapshot->g2h_outstanding = READ_ONCE(ct->g2h_outstanding);
1408 guc_ctb_snapshot_capture(xe, &ct->ctbs.h2g,
1410 guc_ctb_snapshot_capture(xe, &ct->ctbs.g2h,
1463 * @ct: GuC CT.
1470 void xe_guc_ct_print(struct xe_guc_ct *ct, struct drm_printer *p, bool atomic)
1474 snapshot = xe_guc_ct_snapshot_capture(ct, atomic);