1/* SPDX-License-Identifier: MIT */
2/*
3 * Copyright �� 2022 Intel Corporation
4 */
5
6#ifndef _XE_VM_TYPES_H_
7#define _XE_VM_TYPES_H_
8
9#include <drm/drm_gpuvm.h>
10
11#include <linux/dma-resv.h>
12#include <linux/kref.h>
13#include <linux/mmu_notifier.h>
14#include <linux/scatterlist.h>
15
16#include "xe_device_types.h"
17#include "xe_pt_types.h"
18#include "xe_range_fence.h"
19
20struct xe_bo;
21struct xe_sync_entry;
22struct xe_user_fence;
23struct xe_vm;
24
25#define XE_VMA_READ_ONLY	DRM_GPUVA_USERBITS
26#define XE_VMA_DESTROYED	(DRM_GPUVA_USERBITS << 1)
27#define XE_VMA_ATOMIC_PTE_BIT	(DRM_GPUVA_USERBITS << 2)
28#define XE_VMA_FIRST_REBIND	(DRM_GPUVA_USERBITS << 3)
29#define XE_VMA_LAST_REBIND	(DRM_GPUVA_USERBITS << 4)
30#define XE_VMA_PTE_4K		(DRM_GPUVA_USERBITS << 5)
31#define XE_VMA_PTE_2M		(DRM_GPUVA_USERBITS << 6)
32#define XE_VMA_PTE_1G		(DRM_GPUVA_USERBITS << 7)
33#define XE_VMA_PTE_64K		(DRM_GPUVA_USERBITS << 8)
34#define XE_VMA_PTE_COMPACT	(DRM_GPUVA_USERBITS << 9)
35#define XE_VMA_DUMPABLE		(DRM_GPUVA_USERBITS << 10)
36
37/** struct xe_userptr - User pointer */
38struct xe_userptr {
39	/** @invalidate_link: Link for the vm::userptr.invalidated list */
40	struct list_head invalidate_link;
41	/** @userptr: link into VM repin list if userptr. */
42	struct list_head repin_link;
43	/**
44	 * @notifier: MMU notifier for user pointer (invalidation call back)
45	 */
46	struct mmu_interval_notifier notifier;
47	/** @sgt: storage for a scatter gather table */
48	struct sg_table sgt;
49	/** @sg: allocated scatter gather table */
50	struct sg_table *sg;
51	/** @notifier_seq: notifier sequence number */
52	unsigned long notifier_seq;
53	/**
54	 * @initial_bind: user pointer has been bound at least once.
55	 * write: vm->userptr.notifier_lock in read mode and vm->resv held.
56	 * read: vm->userptr.notifier_lock in write mode or vm->resv held.
57	 */
58	bool initial_bind;
59#if IS_ENABLED(CONFIG_DRM_XE_USERPTR_INVAL_INJECT)
60	u32 divisor;
61#endif
62};
63
64struct xe_vma {
65	/** @gpuva: Base GPUVA object */
66	struct drm_gpuva gpuva;
67
68	/**
69	 * @combined_links: links into lists which are mutually exclusive.
70	 * Locking: vm lock in write mode OR vm lock in read mode and the vm's
71	 * resv.
72	 */
73	union {
74		/** @rebind: link into VM if this VMA needs rebinding. */
75		struct list_head rebind;
76		/** @destroy: link to contested list when VM is being closed. */
77		struct list_head destroy;
78	} combined_links;
79
80	union {
81		/** @destroy_cb: callback to destroy VMA when unbind job is done */
82		struct dma_fence_cb destroy_cb;
83		/** @destroy_work: worker to destroy this BO */
84		struct work_struct destroy_work;
85	};
86
87	/** @tile_invalidated: VMA has been invalidated */
88	u8 tile_invalidated;
89
90	/** @tile_mask: Tile mask of where to create binding for this VMA */
91	u8 tile_mask;
92
93	/**
94	 * @tile_present: GT mask of binding are present for this VMA.
95	 * protected by vm->lock, vm->resv and for userptrs,
96	 * vm->userptr.notifier_lock for writing. Needs either for reading,
97	 * but if reading is done under the vm->lock only, it needs to be held
98	 * in write mode.
99	 */
100	u8 tile_present;
101
102	/**
103	 * @pat_index: The pat index to use when encoding the PTEs for this vma.
104	 */
105	u16 pat_index;
106
107	/**
108	 * @ufence: The user fence that was provided with MAP.
109	 * Needs to be signalled before UNMAP can be processed.
110	 */
111	struct xe_user_fence *ufence;
112};
113
114/**
115 * struct xe_userptr_vma - A userptr vma subclass
116 * @vma: The vma.
117 * @userptr: Additional userptr information.
118 */
119struct xe_userptr_vma {
120	struct xe_vma vma;
121	struct xe_userptr userptr;
122};
123
124struct xe_device;
125
126struct xe_vm {
127	/** @gpuvm: base GPUVM used to track VMAs */
128	struct drm_gpuvm gpuvm;
129
130	struct xe_device *xe;
131
132	/* exec queue used for (un)binding vma's */
133	struct xe_exec_queue *q[XE_MAX_TILES_PER_DEVICE];
134
135	/** @lru_bulk_move: Bulk LRU move list for this VM's BOs */
136	struct ttm_lru_bulk_move lru_bulk_move;
137
138	u64 size;
139
140	struct xe_pt *pt_root[XE_MAX_TILES_PER_DEVICE];
141	struct xe_pt *scratch_pt[XE_MAX_TILES_PER_DEVICE][XE_VM_MAX_LEVEL];
142
143	/**
144	 * @flags: flags for this VM, statically setup a creation time aside
145	 * from XE_VM_FLAG_BANNED which requires vm->lock to set / read safely
146	 */
147#define XE_VM_FLAG_64K			BIT(0)
148#define XE_VM_FLAG_LR_MODE		BIT(1)
149#define XE_VM_FLAG_MIGRATION		BIT(2)
150#define XE_VM_FLAG_SCRATCH_PAGE		BIT(3)
151#define XE_VM_FLAG_FAULT_MODE		BIT(4)
152#define XE_VM_FLAG_BANNED		BIT(5)
153#define XE_VM_FLAG_TILE_ID(flags)	FIELD_GET(GENMASK(7, 6), flags)
154#define XE_VM_FLAG_SET_TILE_ID(tile)	FIELD_PREP(GENMASK(7, 6), (tile)->id)
155	unsigned long flags;
156
157	/** @composite_fence_ctx: context composite fence */
158	u64 composite_fence_ctx;
159	/** @composite_fence_seqno: seqno for composite fence */
160	u32 composite_fence_seqno;
161
162	/**
163	 * @lock: outer most lock, protects objects of anything attached to this
164	 * VM
165	 */
166	struct rw_semaphore lock;
167	/**
168	 * @snap_mutex: Mutex used to guard insertions and removals from gpuva,
169	 * so we can take a snapshot safely from devcoredump.
170	 */
171	struct mutex snap_mutex;
172
173	/**
174	 * @rebind_list: list of VMAs that need rebinding. Protected by the
175	 * vm->lock in write mode, OR (the vm->lock in read mode and the
176	 * vm resv).
177	 */
178	struct list_head rebind_list;
179
180	/**
181	 * @destroy_work: worker to destroy VM, needed as a dma_fence signaling
182	 * from an irq context can be last put and the destroy needs to be able
183	 * to sleep.
184	 */
185	struct work_struct destroy_work;
186
187	/**
188	 * @rftree: range fence tree to track updates to page table structure.
189	 * Used to implement conflict tracking between independent bind engines.
190	 */
191	struct xe_range_fence_tree rftree[XE_MAX_TILES_PER_DEVICE];
192
193	const struct xe_pt_ops *pt_ops;
194
195	/** @userptr: user pointer state */
196	struct {
197		/**
198		 * @userptr.repin_list: list of VMAs which are user pointers,
199		 * and needs repinning. Protected by @lock.
200		 */
201		struct list_head repin_list;
202		/**
203		 * @notifier_lock: protects notifier in write mode and
204		 * submission in read mode.
205		 */
206		struct rw_semaphore notifier_lock;
207		/**
208		 * @userptr.invalidated_lock: Protects the
209		 * @userptr.invalidated list.
210		 */
211		spinlock_t invalidated_lock;
212		/**
213		 * @userptr.invalidated: List of invalidated userptrs, not yet
214		 * picked
215		 * up for revalidation. Protected from access with the
216		 * @invalidated_lock. Removing items from the list
217		 * additionally requires @lock in write mode, and adding
218		 * items to the list requires the @userptr.notifer_lock in
219		 * write mode.
220		 */
221		struct list_head invalidated;
222	} userptr;
223
224	/** @preempt: preempt state */
225	struct {
226		/**
227		 * @min_run_period_ms: The minimum run period before preempting
228		 * an engine again
229		 */
230		s64 min_run_period_ms;
231		/** @exec_queues: list of exec queues attached to this VM */
232		struct list_head exec_queues;
233		/** @num_exec_queues: number exec queues attached to this VM */
234		int num_exec_queues;
235		/**
236		 * @rebind_deactivated: Whether rebind has been temporarily deactivated
237		 * due to no work available. Protected by the vm resv.
238		 */
239		bool rebind_deactivated;
240		/**
241		 * @rebind_work: worker to rebind invalidated userptrs / evicted
242		 * BOs
243		 */
244		struct work_struct rebind_work;
245	} preempt;
246
247	/** @um: unified memory state */
248	struct {
249		/** @asid: address space ID, unique to each VM */
250		u32 asid;
251		/**
252		 * @last_fault_vma: Last fault VMA, used for fast lookup when we
253		 * get a flood of faults to the same VMA
254		 */
255		struct xe_vma *last_fault_vma;
256	} usm;
257
258	/** @error_capture: allow to track errors */
259	struct {
260		/** @capture_once: capture only one error per VM */
261		bool capture_once;
262	} error_capture;
263
264	/**
265	 * @tlb_flush_seqno: Required TLB flush seqno for the next exec.
266	 * protected by the vm resv.
267	 */
268	u64 tlb_flush_seqno;
269	/** @batch_invalidate_tlb: Always invalidate TLB before batch start */
270	bool batch_invalidate_tlb;
271	/** @xef: XE file handle for tracking this VM's drm client */
272	struct xe_file *xef;
273};
274
275/** struct xe_vma_op_map - VMA map operation */
276struct xe_vma_op_map {
277	/** @vma: VMA to map */
278	struct xe_vma *vma;
279	/** @is_null: is NULL binding */
280	bool is_null;
281	/** @dumpable: whether BO is dumped on GPU hang */
282	bool dumpable;
283	/** @pat_index: The pat index to use for this operation. */
284	u16 pat_index;
285};
286
287/** struct xe_vma_op_remap - VMA remap operation */
288struct xe_vma_op_remap {
289	/** @prev: VMA preceding part of a split mapping */
290	struct xe_vma *prev;
291	/** @next: VMA subsequent part of a split mapping */
292	struct xe_vma *next;
293	/** @start: start of the VMA unmap */
294	u64 start;
295	/** @range: range of the VMA unmap */
296	u64 range;
297	/** @skip_prev: skip prev rebind */
298	bool skip_prev;
299	/** @skip_next: skip next rebind */
300	bool skip_next;
301	/** @unmap_done: unmap operation in done */
302	bool unmap_done;
303};
304
305/** struct xe_vma_op_prefetch - VMA prefetch operation */
306struct xe_vma_op_prefetch {
307	/** @region: memory region to prefetch to */
308	u32 region;
309};
310
311/** enum xe_vma_op_flags - flags for VMA operation */
312enum xe_vma_op_flags {
313	/** @XE_VMA_OP_FIRST: first VMA operation for a set of syncs */
314	XE_VMA_OP_FIRST			= BIT(0),
315	/** @XE_VMA_OP_LAST: last VMA operation for a set of syncs */
316	XE_VMA_OP_LAST			= BIT(1),
317	/** @XE_VMA_OP_COMMITTED: VMA operation committed */
318	XE_VMA_OP_COMMITTED		= BIT(2),
319	/** @XE_VMA_OP_PREV_COMMITTED: Previous VMA operation committed */
320	XE_VMA_OP_PREV_COMMITTED	= BIT(3),
321	/** @XE_VMA_OP_NEXT_COMMITTED: Next VMA operation committed */
322	XE_VMA_OP_NEXT_COMMITTED	= BIT(4),
323};
324
325/** struct xe_vma_op - VMA operation */
326struct xe_vma_op {
327	/** @base: GPUVA base operation */
328	struct drm_gpuva_op base;
329	/**
330	 * @ops: GPUVA ops, when set call drm_gpuva_ops_free after this
331	 * operations is processed
332	 */
333	struct drm_gpuva_ops *ops;
334	/** @q: exec queue for this operation */
335	struct xe_exec_queue *q;
336	/**
337	 * @syncs: syncs for this operation, only used on first and last
338	 * operation
339	 */
340	struct xe_sync_entry *syncs;
341	/** @num_syncs: number of syncs */
342	u32 num_syncs;
343	/** @link: async operation link */
344	struct list_head link;
345	/** @flags: operation flags */
346	enum xe_vma_op_flags flags;
347
348	union {
349		/** @map: VMA map operation specific data */
350		struct xe_vma_op_map map;
351		/** @remap: VMA remap operation specific data */
352		struct xe_vma_op_remap remap;
353		/** @prefetch: VMA prefetch operation specific data */
354		struct xe_vma_op_prefetch prefetch;
355	};
356};
357#endif
358