1/* SPDX-License-Identifier: MIT */
2/*
3 * Copyright �� 2022 Intel Corporation
4 */
5
6#ifndef _XE_EXEC_QUEUE_TYPES_H_
7#define _XE_EXEC_QUEUE_TYPES_H_
8
9#include <linux/kref.h>
10
11#include <drm/gpu_scheduler.h>
12
13#include "xe_gpu_scheduler_types.h"
14#include "xe_hw_engine_types.h"
15#include "xe_hw_fence_types.h"
16#include "xe_lrc_types.h"
17
18struct xe_execlist_exec_queue;
19struct xe_gt;
20struct xe_guc_exec_queue;
21struct xe_hw_engine;
22struct xe_vm;
23
24enum xe_exec_queue_priority {
25	XE_EXEC_QUEUE_PRIORITY_UNSET = -2, /* For execlist usage only */
26	XE_EXEC_QUEUE_PRIORITY_LOW = 0,
27	XE_EXEC_QUEUE_PRIORITY_NORMAL,
28	XE_EXEC_QUEUE_PRIORITY_HIGH,
29	XE_EXEC_QUEUE_PRIORITY_KERNEL,
30
31	XE_EXEC_QUEUE_PRIORITY_COUNT
32};
33
34/**
35 * struct xe_exec_queue - Execution queue
36 *
37 * Contains all state necessary for submissions. Can either be a user object or
38 * a kernel object.
39 */
40struct xe_exec_queue {
41	/** @gt: graphics tile this exec queue can submit to */
42	struct xe_gt *gt;
43	/**
44	 * @hwe: A hardware of the same class. May (physical engine) or may not
45	 * (virtual engine) be where jobs actual engine up running. Should never
46	 * really be used for submissions.
47	 */
48	struct xe_hw_engine *hwe;
49	/** @refcount: ref count of this exec queue */
50	struct kref refcount;
51	/** @vm: VM (address space) for this exec queue */
52	struct xe_vm *vm;
53	/** @class: class of this exec queue */
54	enum xe_engine_class class;
55	/**
56	 * @logical_mask: logical mask of where job submitted to exec queue can run
57	 */
58	u32 logical_mask;
59	/** @name: name of this exec queue */
60	char name[MAX_FENCE_NAME_LEN];
61	/** @width: width (number BB submitted per exec) of this exec queue */
62	u16 width;
63	/** @fence_irq: fence IRQ used to signal job completion */
64	struct xe_hw_fence_irq *fence_irq;
65
66	/**
67	 * @last_fence: last fence on exec queue, protected by vm->lock in write
68	 * mode if bind exec queue, protected by dma resv lock if non-bind exec
69	 * queue
70	 */
71	struct dma_fence *last_fence;
72
73/* queue no longer allowed to submit */
74#define EXEC_QUEUE_FLAG_BANNED			BIT(0)
75/* queue used for kernel submission only */
76#define EXEC_QUEUE_FLAG_KERNEL			BIT(1)
77/* kernel engine only destroyed at driver unload */
78#define EXEC_QUEUE_FLAG_PERMANENT		BIT(2)
79/* queue keeps running pending jobs after destroy ioctl */
80#define EXEC_QUEUE_FLAG_PERSISTENT		BIT(3)
81/* for VM jobs. Caller needs to hold rpm ref when creating queue with this flag */
82#define EXEC_QUEUE_FLAG_VM			BIT(4)
83/* child of VM queue for multi-tile VM jobs */
84#define EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD	BIT(5)
85/* kernel exec_queue only, set priority to highest level */
86#define EXEC_QUEUE_FLAG_HIGH_PRIORITY		BIT(6)
87
88	/**
89	 * @flags: flags for this exec queue, should statically setup aside from ban
90	 * bit
91	 */
92	unsigned long flags;
93
94	union {
95		/** @multi_gt_list: list head for VM bind engines if multi-GT */
96		struct list_head multi_gt_list;
97		/** @multi_gt_link: link for VM bind engines if multi-GT */
98		struct list_head multi_gt_link;
99	};
100
101	union {
102		/** @execlist: execlist backend specific state for exec queue */
103		struct xe_execlist_exec_queue *execlist;
104		/** @guc: GuC backend specific state for exec queue */
105		struct xe_guc_exec_queue *guc;
106	};
107
108	/**
109	 * @parallel: parallel submission state
110	 */
111	struct {
112		/** @parallel.composite_fence_ctx: context composite fence */
113		u64 composite_fence_ctx;
114		/** @parallel.composite_fence_seqno: seqno for composite fence */
115		u32 composite_fence_seqno;
116	} parallel;
117
118	/** @sched_props: scheduling properties */
119	struct {
120		/** @sched_props.timeslice_us: timeslice period in micro-seconds */
121		u32 timeslice_us;
122		/** @sched_props.preempt_timeout_us: preemption timeout in micro-seconds */
123		u32 preempt_timeout_us;
124		/** @sched_props.job_timeout_ms: job timeout in milliseconds */
125		u32 job_timeout_ms;
126		/** @sched_props.priority: priority of this exec queue */
127		enum xe_exec_queue_priority priority;
128	} sched_props;
129
130	/** @compute: compute exec queue state */
131	struct {
132		/** @compute.pfence: preemption fence */
133		struct dma_fence *pfence;
134		/** @compute.context: preemption fence context */
135		u64 context;
136		/** @compute.seqno: preemption fence seqno */
137		u32 seqno;
138		/** @compute.link: link into VM's list of exec queues */
139		struct list_head link;
140		/** @compute.lock: preemption fences lock */
141		spinlock_t lock;
142	} compute;
143
144	/** @ops: submission backend exec queue operations */
145	const struct xe_exec_queue_ops *ops;
146
147	/** @ring_ops: ring operations for this exec queue */
148	const struct xe_ring_ops *ring_ops;
149	/** @entity: DRM sched entity for this exec queue (1 to 1 relationship) */
150	struct drm_sched_entity *entity;
151	/**
152	 * @tlb_flush_seqno: The seqno of the last rebind tlb flush performed
153	 * Protected by @vm's resv. Unused if @vm == NULL.
154	 */
155	u64 tlb_flush_seqno;
156	/** @lrc: logical ring context for this exec queue */
157	struct xe_lrc lrc[];
158};
159
160/**
161 * struct xe_exec_queue_ops - Submission backend exec queue operations
162 */
163struct xe_exec_queue_ops {
164	/** @init: Initialize exec queue for submission backend */
165	int (*init)(struct xe_exec_queue *q);
166	/** @kill: Kill inflight submissions for backend */
167	void (*kill)(struct xe_exec_queue *q);
168	/** @fini: Fini exec queue for submission backend */
169	void (*fini)(struct xe_exec_queue *q);
170	/** @set_priority: Set priority for exec queue */
171	int (*set_priority)(struct xe_exec_queue *q,
172			    enum xe_exec_queue_priority priority);
173	/** @set_timeslice: Set timeslice for exec queue */
174	int (*set_timeslice)(struct xe_exec_queue *q, u32 timeslice_us);
175	/** @set_preempt_timeout: Set preemption timeout for exec queue */
176	int (*set_preempt_timeout)(struct xe_exec_queue *q, u32 preempt_timeout_us);
177	/**
178	 * @suspend: Suspend exec queue from executing, allowed to be called
179	 * multiple times in a row before resume with the caveat that
180	 * suspend_wait returns before calling suspend again.
181	 */
182	int (*suspend)(struct xe_exec_queue *q);
183	/**
184	 * @suspend_wait: Wait for an exec queue to suspend executing, should be
185	 * call after suspend.
186	 */
187	void (*suspend_wait)(struct xe_exec_queue *q);
188	/**
189	 * @resume: Resume exec queue execution, exec queue must be in a suspended
190	 * state and dma fence returned from most recent suspend call must be
191	 * signalled when this function is called.
192	 */
193	void (*resume)(struct xe_exec_queue *q);
194	/** @reset_status: check exec queue reset status */
195	bool (*reset_status)(struct xe_exec_queue *q);
196};
197
198#endif
199