1/* SPDX-License-Identifier: MIT */
2/*
3 * Copyright �� 2022 Intel Corporation
4 */
5
6/**
7 * DOC: I915_PARAM_VM_BIND_VERSION
8 *
9 * VM_BIND feature version supported.
10 * See typedef drm_i915_getparam_t param.
11 *
12 * Specifies the VM_BIND feature version supported.
13 * The following versions of VM_BIND have been defined:
14 *
15 * 0: No VM_BIND support.
16 *
17 * 1: In VM_UNBIND calls, the UMD must specify the exact mappings created
18 *    previously with VM_BIND, the ioctl will not support unbinding multiple
19 *    mappings or splitting them. Similarly, VM_BIND calls will not replace
20 *    any existing mappings.
21 *
22 * 2: The restrictions on unbinding partial or multiple mappings is
23 *    lifted, Similarly, binding will replace any mappings in the given range.
24 *
25 * See struct drm_i915_gem_vm_bind and struct drm_i915_gem_vm_unbind.
26 */
27#define I915_PARAM_VM_BIND_VERSION	57
28
29/**
30 * DOC: I915_VM_CREATE_FLAGS_USE_VM_BIND
31 *
32 * Flag to opt-in for VM_BIND mode of binding during VM creation.
33 * See struct drm_i915_gem_vm_control flags.
34 *
35 * The older execbuf2 ioctl will not support VM_BIND mode of operation.
36 * For VM_BIND mode, we have new execbuf3 ioctl which will not accept any
37 * execlist (See struct drm_i915_gem_execbuffer3 for more details).
38 */
39#define I915_VM_CREATE_FLAGS_USE_VM_BIND	(1 << 0)
40
41/* VM_BIND related ioctls */
42#define DRM_I915_GEM_VM_BIND		0x3d
43#define DRM_I915_GEM_VM_UNBIND		0x3e
44#define DRM_I915_GEM_EXECBUFFER3	0x3f
45
46#define DRM_IOCTL_I915_GEM_VM_BIND		DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_VM_BIND, struct drm_i915_gem_vm_bind)
47#define DRM_IOCTL_I915_GEM_VM_UNBIND		DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_VM_UNBIND, struct drm_i915_gem_vm_bind)
48#define DRM_IOCTL_I915_GEM_EXECBUFFER3		DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER3, struct drm_i915_gem_execbuffer3)
49
50/**
51 * struct drm_i915_gem_timeline_fence - An input or output timeline fence.
52 *
53 * The operation will wait for input fence to signal.
54 *
55 * The returned output fence will be signaled after the completion of the
56 * operation.
57 */
58struct drm_i915_gem_timeline_fence {
59	/** @handle: User's handle for a drm_syncobj to wait on or signal. */
60	__u32 handle;
61
62	/**
63	 * @flags: Supported flags are:
64	 *
65	 * I915_TIMELINE_FENCE_WAIT:
66	 * Wait for the input fence before the operation.
67	 *
68	 * I915_TIMELINE_FENCE_SIGNAL:
69	 * Return operation completion fence as output.
70	 */
71	__u32 flags;
72#define I915_TIMELINE_FENCE_WAIT            (1 << 0)
73#define I915_TIMELINE_FENCE_SIGNAL          (1 << 1)
74#define __I915_TIMELINE_FENCE_UNKNOWN_FLAGS (-(I915_TIMELINE_FENCE_SIGNAL << 1))
75
76	/**
77	 * @value: A point in the timeline.
78	 * Value must be 0 for a binary drm_syncobj. A Value of 0 for a
79	 * timeline drm_syncobj is invalid as it turns a drm_syncobj into a
80	 * binary one.
81	 */
82	__u64 value;
83};
84
85/**
86 * struct drm_i915_gem_vm_bind - VA to object mapping to bind.
87 *
88 * This structure is passed to VM_BIND ioctl and specifies the mapping of GPU
89 * virtual address (VA) range to the section of an object that should be bound
90 * in the device page table of the specified address space (VM).
91 * The VA range specified must be unique (ie., not currently bound) and can
92 * be mapped to whole object or a section of the object (partial binding).
93 * Multiple VA mappings can be created to the same section of the object
94 * (aliasing).
95 *
96 * The @start, @offset and @length must be 4K page aligned. However the DG2
97 * and XEHPSDV has 64K page size for device local memory and has compact page
98 * table. On those platforms, for binding device local-memory objects, the
99 * @start, @offset and @length must be 64K aligned. Also, UMDs should not mix
100 * the local memory 64K page and the system memory 4K page bindings in the same
101 * 2M range.
102 *
103 * Error code -EINVAL will be returned if @start, @offset and @length are not
104 * properly aligned. In version 1 (See I915_PARAM_VM_BIND_VERSION), error code
105 * -ENOSPC will be returned if the VA range specified can't be reserved.
106 *
107 * VM_BIND/UNBIND ioctl calls executed on different CPU threads concurrently
108 * are not ordered. Furthermore, parts of the VM_BIND operation can be done
109 * asynchronously, if valid @fence is specified.
110 */
111struct drm_i915_gem_vm_bind {
112	/** @vm_id: VM (address space) id to bind */
113	__u32 vm_id;
114
115	/** @handle: Object handle */
116	__u32 handle;
117
118	/** @start: Virtual Address start to bind */
119	__u64 start;
120
121	/** @offset: Offset in object to bind */
122	__u64 offset;
123
124	/** @length: Length of mapping to bind */
125	__u64 length;
126
127	/**
128	 * @flags: Supported flags are:
129	 *
130	 * I915_GEM_VM_BIND_CAPTURE:
131	 * Capture this mapping in the dump upon GPU error.
132	 *
133	 * Note that @fence carries its own flags.
134	 */
135	__u64 flags;
136#define I915_GEM_VM_BIND_CAPTURE	(1 << 0)
137
138	/**
139	 * @fence: Timeline fence for bind completion signaling.
140	 *
141	 * Timeline fence is of format struct drm_i915_gem_timeline_fence.
142	 *
143	 * It is an out fence, hence using I915_TIMELINE_FENCE_WAIT flag
144	 * is invalid, and an error will be returned.
145	 *
146	 * If I915_TIMELINE_FENCE_SIGNAL flag is not set, then out fence
147	 * is not requested and binding is completed synchronously.
148	 */
149	struct drm_i915_gem_timeline_fence fence;
150
151	/**
152	 * @extensions: Zero-terminated chain of extensions.
153	 *
154	 * For future extensions. See struct i915_user_extension.
155	 */
156	__u64 extensions;
157};
158
159/**
160 * struct drm_i915_gem_vm_unbind - VA to object mapping to unbind.
161 *
162 * This structure is passed to VM_UNBIND ioctl and specifies the GPU virtual
163 * address (VA) range that should be unbound from the device page table of the
164 * specified address space (VM). VM_UNBIND will force unbind the specified
165 * range from device page table without waiting for any GPU job to complete.
166 * It is UMDs responsibility to ensure the mapping is no longer in use before
167 * calling VM_UNBIND.
168 *
169 * If the specified mapping is not found, the ioctl will simply return without
170 * any error.
171 *
172 * VM_BIND/UNBIND ioctl calls executed on different CPU threads concurrently
173 * are not ordered. Furthermore, parts of the VM_UNBIND operation can be done
174 * asynchronously, if valid @fence is specified.
175 */
176struct drm_i915_gem_vm_unbind {
177	/** @vm_id: VM (address space) id to bind */
178	__u32 vm_id;
179
180	/** @rsvd: Reserved, MBZ */
181	__u32 rsvd;
182
183	/** @start: Virtual Address start to unbind */
184	__u64 start;
185
186	/** @length: Length of mapping to unbind */
187	__u64 length;
188
189	/**
190	 * @flags: Currently reserved, MBZ.
191	 *
192	 * Note that @fence carries its own flags.
193	 */
194	__u64 flags;
195
196	/**
197	 * @fence: Timeline fence for unbind completion signaling.
198	 *
199	 * Timeline fence is of format struct drm_i915_gem_timeline_fence.
200	 *
201	 * It is an out fence, hence using I915_TIMELINE_FENCE_WAIT flag
202	 * is invalid, and an error will be returned.
203	 *
204	 * If I915_TIMELINE_FENCE_SIGNAL flag is not set, then out fence
205	 * is not requested and unbinding is completed synchronously.
206	 */
207	struct drm_i915_gem_timeline_fence fence;
208
209	/**
210	 * @extensions: Zero-terminated chain of extensions.
211	 *
212	 * For future extensions. See struct i915_user_extension.
213	 */
214	__u64 extensions;
215};
216
217/**
218 * struct drm_i915_gem_execbuffer3 - Structure for DRM_I915_GEM_EXECBUFFER3
219 * ioctl.
220 *
221 * DRM_I915_GEM_EXECBUFFER3 ioctl only works in VM_BIND mode and VM_BIND mode
222 * only works with this ioctl for submission.
223 * See I915_VM_CREATE_FLAGS_USE_VM_BIND.
224 */
225struct drm_i915_gem_execbuffer3 {
226	/**
227	 * @ctx_id: Context id
228	 *
229	 * Only contexts with user engine map are allowed.
230	 */
231	__u32 ctx_id;
232
233	/**
234	 * @engine_idx: Engine index
235	 *
236	 * An index in the user engine map of the context specified by @ctx_id.
237	 */
238	__u32 engine_idx;
239
240	/**
241	 * @batch_address: Batch gpu virtual address/es.
242	 *
243	 * For normal submission, it is the gpu virtual address of the batch
244	 * buffer. For parallel submission, it is a pointer to an array of
245	 * batch buffer gpu virtual addresses with array size equal to the
246	 * number of (parallel) engines involved in that submission (See
247	 * struct i915_context_engines_parallel_submit).
248	 */
249	__u64 batch_address;
250
251	/** @flags: Currently reserved, MBZ */
252	__u64 flags;
253
254	/** @rsvd1: Reserved, MBZ */
255	__u32 rsvd1;
256
257	/** @fence_count: Number of fences in @timeline_fences array. */
258	__u32 fence_count;
259
260	/**
261	 * @timeline_fences: Pointer to an array of timeline fences.
262	 *
263	 * Timeline fences are of format struct drm_i915_gem_timeline_fence.
264	 */
265	__u64 timeline_fences;
266
267	/** @rsvd2: Reserved, MBZ */
268	__u64 rsvd2;
269
270	/**
271	 * @extensions: Zero-terminated chain of extensions.
272	 *
273	 * For future extensions. See struct i915_user_extension.
274	 */
275	__u64 extensions;
276};
277
278/**
279 * struct drm_i915_gem_create_ext_vm_private - Extension to make the object
280 * private to the specified VM.
281 *
282 * See struct drm_i915_gem_create_ext.
283 */
284struct drm_i915_gem_create_ext_vm_private {
285#define I915_GEM_CREATE_EXT_VM_PRIVATE		2
286	/** @base: Extension link. See struct i915_user_extension. */
287	struct i915_user_extension base;
288
289	/** @vm_id: Id of the VM to which the object is private */
290	__u32 vm_id;
291};
292