1/*
2 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial portions
15 * of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
18 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
20 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
21 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
22 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
23 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 *
25 */
26
27#ifndef _UAPI_I915_DRM_H_
28#define _UAPI_I915_DRM_H_
29
30#include "drm.h"
31
32#if defined(__cplusplus)
33extern "C" {
34#endif
35
36/* Please note that modifications to all structs defined here are
37 * subject to backwards-compatibility constraints.
38 */
39
40/**
41 * DOC: uevents generated by i915 on its device node
42 *
43 * I915_L3_PARITY_UEVENT - Generated when the driver receives a parity mismatch
44 *	event from the GPU L3 cache. Additional information supplied is ROW,
45 *	BANK, SUBBANK, SLICE of the affected cacheline. Userspace should keep
46 *	track of these events, and if a specific cache-line seems to have a
47 *	persistent error, remap it with the L3 remapping tool supplied in
48 *	intel-gpu-tools.  The value supplied with the event is always 1.
49 *
50 * I915_ERROR_UEVENT - Generated upon error detection, currently only via
51 *	hangcheck. The error detection event is a good indicator of when things
52 *	began to go badly. The value supplied with the event is a 1 upon error
53 *	detection, and a 0 upon reset completion, signifying no more error
54 *	exists. NOTE: Disabling hangcheck or reset via module parameter will
55 *	cause the related events to not be seen.
56 *
57 * I915_RESET_UEVENT - Event is generated just before an attempt to reset the
58 *	GPU. The value supplied with the event is always 1. NOTE: Disable
59 *	reset via module parameter will cause this event to not be seen.
60 */
61#define I915_L3_PARITY_UEVENT		"L3_PARITY_ERROR"
62#define I915_ERROR_UEVENT		"ERROR"
63#define I915_RESET_UEVENT		"RESET"
64
65/**
66 * struct i915_user_extension - Base class for defining a chain of extensions
67 *
68 * Many interfaces need to grow over time. In most cases we can simply
69 * extend the struct and have userspace pass in more data. Another option,
70 * as demonstrated by Vulkan's approach to providing extensions for forward
71 * and backward compatibility, is to use a list of optional structs to
72 * provide those extra details.
73 *
74 * The key advantage to using an extension chain is that it allows us to
75 * redefine the interface more easily than an ever growing struct of
76 * increasing complexity, and for large parts of that interface to be
77 * entirely optional. The downside is more pointer chasing; chasing across
78 * the __user boundary with pointers encapsulated inside u64.
79 *
80 * Example chaining:
81 *
82 * .. code-block:: C
83 *
84 *	struct i915_user_extension ext3 {
85 *		.next_extension = 0, // end
86 *		.name = ...,
87 *	};
88 *	struct i915_user_extension ext2 {
89 *		.next_extension = (uintptr_t)&ext3,
90 *		.name = ...,
91 *	};
92 *	struct i915_user_extension ext1 {
93 *		.next_extension = (uintptr_t)&ext2,
94 *		.name = ...,
95 *	};
96 *
97 * Typically the struct i915_user_extension would be embedded in some uAPI
98 * struct, and in this case we would feed it the head of the chain(i.e ext1),
99 * which would then apply all of the above extensions.
100 *
101 */
102struct i915_user_extension {
103	/**
104	 * @next_extension:
105	 *
106	 * Pointer to the next struct i915_user_extension, or zero if the end.
107	 */
108	__u64 next_extension;
109	/**
110	 * @name: Name of the extension.
111	 *
112	 * Note that the name here is just some integer.
113	 *
114	 * Also note that the name space for this is not global for the whole
115	 * driver, but rather its scope/meaning is limited to the specific piece
116	 * of uAPI which has embedded the struct i915_user_extension.
117	 */
118	__u32 name;
119	/**
120	 * @flags: MBZ
121	 *
122	 * All undefined bits must be zero.
123	 */
124	__u32 flags;
125	/**
126	 * @rsvd: MBZ
127	 *
128	 * Reserved for future use; must be zero.
129	 */
130	__u32 rsvd[4];
131};
132
133/*
134 * MOCS indexes used for GPU surfaces, defining the cacheability of the
135 * surface data and the coherency for this data wrt. CPU vs. GPU accesses.
136 */
137enum i915_mocs_table_index {
138	/*
139	 * Not cached anywhere, coherency between CPU and GPU accesses is
140	 * guaranteed.
141	 */
142	I915_MOCS_UNCACHED,
143	/*
144	 * Cacheability and coherency controlled by the kernel automatically
145	 * based on the DRM_I915_GEM_SET_CACHING IOCTL setting and the current
146	 * usage of the surface (used for display scanout or not).
147	 */
148	I915_MOCS_PTE,
149	/*
150	 * Cached in all GPU caches available on the platform.
151	 * Coherency between CPU and GPU accesses to the surface is not
152	 * guaranteed without extra synchronization.
153	 */
154	I915_MOCS_CACHED,
155};
156
157/**
158 * enum drm_i915_gem_engine_class - uapi engine type enumeration
159 *
160 * Different engines serve different roles, and there may be more than one
161 * engine serving each role.  This enum provides a classification of the role
162 * of the engine, which may be used when requesting operations to be performed
163 * on a certain subset of engines, or for providing information about that
164 * group.
165 */
166enum drm_i915_gem_engine_class {
167	/**
168	 * @I915_ENGINE_CLASS_RENDER:
169	 *
170	 * Render engines support instructions used for 3D, Compute (GPGPU),
171	 * and programmable media workloads.  These instructions fetch data and
172	 * dispatch individual work items to threads that operate in parallel.
173	 * The threads run small programs (called "kernels" or "shaders") on
174	 * the GPU's execution units (EUs).
175	 */
176	I915_ENGINE_CLASS_RENDER	= 0,
177
178	/**
179	 * @I915_ENGINE_CLASS_COPY:
180	 *
181	 * Copy engines (also referred to as "blitters") support instructions
182	 * that move blocks of data from one location in memory to another,
183	 * or that fill a specified location of memory with fixed data.
184	 * Copy engines can perform pre-defined logical or bitwise operations
185	 * on the source, destination, or pattern data.
186	 */
187	I915_ENGINE_CLASS_COPY		= 1,
188
189	/**
190	 * @I915_ENGINE_CLASS_VIDEO:
191	 *
192	 * Video engines (also referred to as "bit stream decode" (BSD) or
193	 * "vdbox") support instructions that perform fixed-function media
194	 * decode and encode.
195	 */
196	I915_ENGINE_CLASS_VIDEO		= 2,
197
198	/**
199	 * @I915_ENGINE_CLASS_VIDEO_ENHANCE:
200	 *
201	 * Video enhancement engines (also referred to as "vebox") support
202	 * instructions related to image enhancement.
203	 */
204	I915_ENGINE_CLASS_VIDEO_ENHANCE	= 3,
205
206	/**
207	 * @I915_ENGINE_CLASS_COMPUTE:
208	 *
209	 * Compute engines support a subset of the instructions available
210	 * on render engines:  compute engines support Compute (GPGPU) and
211	 * programmable media workloads, but do not support the 3D pipeline.
212	 */
213	I915_ENGINE_CLASS_COMPUTE	= 4,
214
215	/* Values in this enum should be kept compact. */
216
217	/**
218	 * @I915_ENGINE_CLASS_INVALID:
219	 *
220	 * Placeholder value to represent an invalid engine class assignment.
221	 */
222	I915_ENGINE_CLASS_INVALID	= -1
223};
224
225/**
226 * struct i915_engine_class_instance - Engine class/instance identifier
227 *
228 * There may be more than one engine fulfilling any role within the system.
229 * Each engine of a class is given a unique instance number and therefore
230 * any engine can be specified by its class:instance tuplet. APIs that allow
231 * access to any engine in the system will use struct i915_engine_class_instance
232 * for this identification.
233 */
234struct i915_engine_class_instance {
235	/**
236	 * @engine_class:
237	 *
238	 * Engine class from enum drm_i915_gem_engine_class
239	 */
240	__u16 engine_class;
241#define I915_ENGINE_CLASS_INVALID_NONE -1
242#define I915_ENGINE_CLASS_INVALID_VIRTUAL -2
243
244	/**
245	 * @engine_instance:
246	 *
247	 * Engine instance.
248	 */
249	__u16 engine_instance;
250};
251
252/**
253 * DOC: perf_events exposed by i915 through /sys/bus/event_sources/drivers/i915
254 *
255 */
256
257enum drm_i915_pmu_engine_sample {
258	I915_SAMPLE_BUSY = 0,
259	I915_SAMPLE_WAIT = 1,
260	I915_SAMPLE_SEMA = 2
261};
262
263#define I915_PMU_SAMPLE_BITS (4)
264#define I915_PMU_SAMPLE_MASK (0xf)
265#define I915_PMU_SAMPLE_INSTANCE_BITS (8)
266#define I915_PMU_CLASS_SHIFT \
267	(I915_PMU_SAMPLE_BITS + I915_PMU_SAMPLE_INSTANCE_BITS)
268
269#define __I915_PMU_ENGINE(class, instance, sample) \
270	((class) << I915_PMU_CLASS_SHIFT | \
271	(instance) << I915_PMU_SAMPLE_BITS | \
272	(sample))
273
274#define I915_PMU_ENGINE_BUSY(class, instance) \
275	__I915_PMU_ENGINE(class, instance, I915_SAMPLE_BUSY)
276
277#define I915_PMU_ENGINE_WAIT(class, instance) \
278	__I915_PMU_ENGINE(class, instance, I915_SAMPLE_WAIT)
279
280#define I915_PMU_ENGINE_SEMA(class, instance) \
281	__I915_PMU_ENGINE(class, instance, I915_SAMPLE_SEMA)
282
283/*
284 * Top 4 bits of every non-engine counter are GT id.
285 */
286#define __I915_PMU_GT_SHIFT (60)
287
288#define ___I915_PMU_OTHER(gt, x) \
289	(((__u64)__I915_PMU_ENGINE(0xff, 0xff, 0xf) + 1 + (x)) | \
290	((__u64)(gt) << __I915_PMU_GT_SHIFT))
291
292#define __I915_PMU_OTHER(x) ___I915_PMU_OTHER(0, x)
293
294#define I915_PMU_ACTUAL_FREQUENCY	__I915_PMU_OTHER(0)
295#define I915_PMU_REQUESTED_FREQUENCY	__I915_PMU_OTHER(1)
296#define I915_PMU_INTERRUPTS		__I915_PMU_OTHER(2)
297#define I915_PMU_RC6_RESIDENCY		__I915_PMU_OTHER(3)
298#define I915_PMU_SOFTWARE_GT_AWAKE_TIME	__I915_PMU_OTHER(4)
299
300#define I915_PMU_LAST /* Deprecated - do not use */ I915_PMU_RC6_RESIDENCY
301
302#define __I915_PMU_ACTUAL_FREQUENCY(gt)		___I915_PMU_OTHER(gt, 0)
303#define __I915_PMU_REQUESTED_FREQUENCY(gt)	___I915_PMU_OTHER(gt, 1)
304#define __I915_PMU_INTERRUPTS(gt)		___I915_PMU_OTHER(gt, 2)
305#define __I915_PMU_RC6_RESIDENCY(gt)		___I915_PMU_OTHER(gt, 3)
306#define __I915_PMU_SOFTWARE_GT_AWAKE_TIME(gt)	___I915_PMU_OTHER(gt, 4)
307
308/* Each region is a minimum of 16k, and there are at most 255 of them.
309 */
310#define I915_NR_TEX_REGIONS 255	/* table size 2k - maximum due to use
311				 * of chars for next/prev indices */
312#define I915_LOG_MIN_TEX_REGION_SIZE 14
313
314typedef struct _drm_i915_init {
315	enum {
316		I915_INIT_DMA = 0x01,
317		I915_CLEANUP_DMA = 0x02,
318		I915_RESUME_DMA = 0x03
319	} func;
320	unsigned int mmio_offset;
321	int sarea_priv_offset;
322	unsigned int ring_start;
323	unsigned int ring_end;
324	unsigned int ring_size;
325	unsigned int front_offset;
326	unsigned int back_offset;
327	unsigned int depth_offset;
328	unsigned int w;
329	unsigned int h;
330	unsigned int pitch;
331	unsigned int pitch_bits;
332	unsigned int back_pitch;
333	unsigned int depth_pitch;
334	unsigned int cpp;
335	unsigned int chipset;
336} drm_i915_init_t;
337
338typedef struct _drm_i915_sarea {
339	struct drm_tex_region texList[I915_NR_TEX_REGIONS + 1];
340	int last_upload;	/* last time texture was uploaded */
341	int last_enqueue;	/* last time a buffer was enqueued */
342	int last_dispatch;	/* age of the most recently dispatched buffer */
343	int ctxOwner;		/* last context to upload state */
344	int texAge;
345	int pf_enabled;		/* is pageflipping allowed? */
346	int pf_active;
347	int pf_current_page;	/* which buffer is being displayed? */
348	int perf_boxes;		/* performance boxes to be displayed */
349	int width, height;      /* screen size in pixels */
350
351	drm_handle_t front_handle;
352	int front_offset;
353	int front_size;
354
355	drm_handle_t back_handle;
356	int back_offset;
357	int back_size;
358
359	drm_handle_t depth_handle;
360	int depth_offset;
361	int depth_size;
362
363	drm_handle_t tex_handle;
364	int tex_offset;
365	int tex_size;
366	int log_tex_granularity;
367	int pitch;
368	int rotation;           /* 0, 90, 180 or 270 */
369	int rotated_offset;
370	int rotated_size;
371	int rotated_pitch;
372	int virtualX, virtualY;
373
374	unsigned int front_tiled;
375	unsigned int back_tiled;
376	unsigned int depth_tiled;
377	unsigned int rotated_tiled;
378	unsigned int rotated2_tiled;
379
380	int pipeA_x;
381	int pipeA_y;
382	int pipeA_w;
383	int pipeA_h;
384	int pipeB_x;
385	int pipeB_y;
386	int pipeB_w;
387	int pipeB_h;
388
389	/* fill out some space for old userspace triple buffer */
390	drm_handle_t unused_handle;
391	__u32 unused1, unused2, unused3;
392
393	/* buffer object handles for static buffers. May change
394	 * over the lifetime of the client.
395	 */
396	__u32 front_bo_handle;
397	__u32 back_bo_handle;
398	__u32 unused_bo_handle;
399	__u32 depth_bo_handle;
400
401} drm_i915_sarea_t;
402
403/* due to userspace building against these headers we need some compat here */
404#define planeA_x pipeA_x
405#define planeA_y pipeA_y
406#define planeA_w pipeA_w
407#define planeA_h pipeA_h
408#define planeB_x pipeB_x
409#define planeB_y pipeB_y
410#define planeB_w pipeB_w
411#define planeB_h pipeB_h
412
413/* Flags for perf_boxes
414 */
415#define I915_BOX_RING_EMPTY    0x1
416#define I915_BOX_FLIP          0x2
417#define I915_BOX_WAIT          0x4
418#define I915_BOX_TEXTURE_LOAD  0x8
419#define I915_BOX_LOST_CONTEXT  0x10
420
421/*
422 * i915 specific ioctls.
423 *
424 * The device specific ioctl range is [DRM_COMMAND_BASE, DRM_COMMAND_END) ie
425 * [0x40, 0xa0) (a0 is excluded). The numbers below are defined as offset
426 * against DRM_COMMAND_BASE and should be between [0x0, 0x60).
427 */
428#define DRM_I915_INIT		0x00
429#define DRM_I915_FLUSH		0x01
430#define DRM_I915_FLIP		0x02
431#define DRM_I915_BATCHBUFFER	0x03
432#define DRM_I915_IRQ_EMIT	0x04
433#define DRM_I915_IRQ_WAIT	0x05
434#define DRM_I915_GETPARAM	0x06
435#define DRM_I915_SETPARAM	0x07
436#define DRM_I915_ALLOC		0x08
437#define DRM_I915_FREE		0x09
438#define DRM_I915_INIT_HEAP	0x0a
439#define DRM_I915_CMDBUFFER	0x0b
440#define DRM_I915_DESTROY_HEAP	0x0c
441#define DRM_I915_SET_VBLANK_PIPE	0x0d
442#define DRM_I915_GET_VBLANK_PIPE	0x0e
443#define DRM_I915_VBLANK_SWAP	0x0f
444#define DRM_I915_HWS_ADDR	0x11
445#define DRM_I915_GEM_INIT	0x13
446#define DRM_I915_GEM_EXECBUFFER	0x14
447#define DRM_I915_GEM_PIN	0x15
448#define DRM_I915_GEM_UNPIN	0x16
449#define DRM_I915_GEM_BUSY	0x17
450#define DRM_I915_GEM_THROTTLE	0x18
451#define DRM_I915_GEM_ENTERVT	0x19
452#define DRM_I915_GEM_LEAVEVT	0x1a
453#define DRM_I915_GEM_CREATE	0x1b
454#define DRM_I915_GEM_PREAD	0x1c
455#define DRM_I915_GEM_PWRITE	0x1d
456#define DRM_I915_GEM_MMAP	0x1e
457#define DRM_I915_GEM_SET_DOMAIN	0x1f
458#define DRM_I915_GEM_SW_FINISH	0x20
459#define DRM_I915_GEM_SET_TILING	0x21
460#define DRM_I915_GEM_GET_TILING	0x22
461#define DRM_I915_GEM_GET_APERTURE 0x23
462#define DRM_I915_GEM_MMAP_GTT	0x24
463#define DRM_I915_GET_PIPE_FROM_CRTC_ID	0x25
464#define DRM_I915_GEM_MADVISE	0x26
465#define DRM_I915_OVERLAY_PUT_IMAGE	0x27
466#define DRM_I915_OVERLAY_ATTRS	0x28
467#define DRM_I915_GEM_EXECBUFFER2	0x29
468#define DRM_I915_GEM_EXECBUFFER2_WR	DRM_I915_GEM_EXECBUFFER2
469#define DRM_I915_GET_SPRITE_COLORKEY	0x2a
470#define DRM_I915_SET_SPRITE_COLORKEY	0x2b
471#define DRM_I915_GEM_WAIT	0x2c
472#define DRM_I915_GEM_CONTEXT_CREATE	0x2d
473#define DRM_I915_GEM_CONTEXT_DESTROY	0x2e
474#define DRM_I915_GEM_SET_CACHING	0x2f
475#define DRM_I915_GEM_GET_CACHING	0x30
476#define DRM_I915_REG_READ		0x31
477#define DRM_I915_GET_RESET_STATS	0x32
478#define DRM_I915_GEM_USERPTR		0x33
479#define DRM_I915_GEM_CONTEXT_GETPARAM	0x34
480#define DRM_I915_GEM_CONTEXT_SETPARAM	0x35
481#define DRM_I915_PERF_OPEN		0x36
482#define DRM_I915_PERF_ADD_CONFIG	0x37
483#define DRM_I915_PERF_REMOVE_CONFIG	0x38
484#define DRM_I915_QUERY			0x39
485#define DRM_I915_GEM_VM_CREATE		0x3a
486#define DRM_I915_GEM_VM_DESTROY		0x3b
487#define DRM_I915_GEM_CREATE_EXT		0x3c
488/* Must be kept compact -- no holes */
489
490#define DRM_IOCTL_I915_INIT		DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
491#define DRM_IOCTL_I915_FLUSH		DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
492#define DRM_IOCTL_I915_FLIP		DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLIP)
493#define DRM_IOCTL_I915_BATCHBUFFER	DRM_IOW( DRM_COMMAND_BASE + DRM_I915_BATCHBUFFER, drm_i915_batchbuffer_t)
494#define DRM_IOCTL_I915_IRQ_EMIT         DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_IRQ_EMIT, drm_i915_irq_emit_t)
495#define DRM_IOCTL_I915_IRQ_WAIT         DRM_IOW( DRM_COMMAND_BASE + DRM_I915_IRQ_WAIT, drm_i915_irq_wait_t)
496#define DRM_IOCTL_I915_GETPARAM         DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GETPARAM, drm_i915_getparam_t)
497#define DRM_IOCTL_I915_SETPARAM         DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SETPARAM, drm_i915_setparam_t)
498#define DRM_IOCTL_I915_ALLOC            DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_ALLOC, drm_i915_mem_alloc_t)
499#define DRM_IOCTL_I915_FREE             DRM_IOW( DRM_COMMAND_BASE + DRM_I915_FREE, drm_i915_mem_free_t)
500#define DRM_IOCTL_I915_INIT_HEAP        DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT_HEAP, drm_i915_mem_init_heap_t)
501#define DRM_IOCTL_I915_CMDBUFFER	DRM_IOW( DRM_COMMAND_BASE + DRM_I915_CMDBUFFER, drm_i915_cmdbuffer_t)
502#define DRM_IOCTL_I915_DESTROY_HEAP	DRM_IOW( DRM_COMMAND_BASE + DRM_I915_DESTROY_HEAP, drm_i915_mem_destroy_heap_t)
503#define DRM_IOCTL_I915_SET_VBLANK_PIPE	DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SET_VBLANK_PIPE, drm_i915_vblank_pipe_t)
504#define DRM_IOCTL_I915_GET_VBLANK_PIPE	DRM_IOR( DRM_COMMAND_BASE + DRM_I915_GET_VBLANK_PIPE, drm_i915_vblank_pipe_t)
505#define DRM_IOCTL_I915_VBLANK_SWAP	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_VBLANK_SWAP, drm_i915_vblank_swap_t)
506#define DRM_IOCTL_I915_HWS_ADDR		DRM_IOW(DRM_COMMAND_BASE + DRM_I915_HWS_ADDR, struct drm_i915_gem_init)
507#define DRM_IOCTL_I915_GEM_INIT		DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_INIT, struct drm_i915_gem_init)
508#define DRM_IOCTL_I915_GEM_EXECBUFFER	DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER, struct drm_i915_gem_execbuffer)
509#define DRM_IOCTL_I915_GEM_EXECBUFFER2	DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER2, struct drm_i915_gem_execbuffer2)
510#define DRM_IOCTL_I915_GEM_EXECBUFFER2_WR	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER2_WR, struct drm_i915_gem_execbuffer2)
511#define DRM_IOCTL_I915_GEM_PIN		DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_PIN, struct drm_i915_gem_pin)
512#define DRM_IOCTL_I915_GEM_UNPIN	DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_UNPIN, struct drm_i915_gem_unpin)
513#define DRM_IOCTL_I915_GEM_BUSY		DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_BUSY, struct drm_i915_gem_busy)
514#define DRM_IOCTL_I915_GEM_SET_CACHING		DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_SET_CACHING, struct drm_i915_gem_caching)
515#define DRM_IOCTL_I915_GEM_GET_CACHING		DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_GET_CACHING, struct drm_i915_gem_caching)
516#define DRM_IOCTL_I915_GEM_THROTTLE	DRM_IO ( DRM_COMMAND_BASE + DRM_I915_GEM_THROTTLE)
517#define DRM_IOCTL_I915_GEM_ENTERVT	DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_ENTERVT)
518#define DRM_IOCTL_I915_GEM_LEAVEVT	DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_LEAVEVT)
519#define DRM_IOCTL_I915_GEM_CREATE	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_CREATE, struct drm_i915_gem_create)
520#define DRM_IOCTL_I915_GEM_CREATE_EXT	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_CREATE_EXT, struct drm_i915_gem_create_ext)
521#define DRM_IOCTL_I915_GEM_PREAD	DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PREAD, struct drm_i915_gem_pread)
522#define DRM_IOCTL_I915_GEM_PWRITE	DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PWRITE, struct drm_i915_gem_pwrite)
523#define DRM_IOCTL_I915_GEM_MMAP		DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP, struct drm_i915_gem_mmap)
524#define DRM_IOCTL_I915_GEM_MMAP_GTT	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP_GTT, struct drm_i915_gem_mmap_gtt)
525#define DRM_IOCTL_I915_GEM_MMAP_OFFSET	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP_GTT, struct drm_i915_gem_mmap_offset)
526#define DRM_IOCTL_I915_GEM_SET_DOMAIN	DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SET_DOMAIN, struct drm_i915_gem_set_domain)
527#define DRM_IOCTL_I915_GEM_SW_FINISH	DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SW_FINISH, struct drm_i915_gem_sw_finish)
528#define DRM_IOCTL_I915_GEM_SET_TILING	DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_SET_TILING, struct drm_i915_gem_set_tiling)
529#define DRM_IOCTL_I915_GEM_GET_TILING	DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_TILING, struct drm_i915_gem_get_tiling)
530#define DRM_IOCTL_I915_GEM_GET_APERTURE	DRM_IOR  (DRM_COMMAND_BASE + DRM_I915_GEM_GET_APERTURE, struct drm_i915_gem_get_aperture)
531#define DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_PIPE_FROM_CRTC_ID, struct drm_i915_get_pipe_from_crtc_id)
532#define DRM_IOCTL_I915_GEM_MADVISE	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MADVISE, struct drm_i915_gem_madvise)
533#define DRM_IOCTL_I915_OVERLAY_PUT_IMAGE	DRM_IOW(DRM_COMMAND_BASE + DRM_I915_OVERLAY_PUT_IMAGE, struct drm_intel_overlay_put_image)
534#define DRM_IOCTL_I915_OVERLAY_ATTRS	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_OVERLAY_ATTRS, struct drm_intel_overlay_attrs)
535#define DRM_IOCTL_I915_SET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_SET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey)
536#define DRM_IOCTL_I915_GET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey)
537#define DRM_IOCTL_I915_GEM_WAIT		DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_WAIT, struct drm_i915_gem_wait)
538#define DRM_IOCTL_I915_GEM_CONTEXT_CREATE	DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_CREATE, struct drm_i915_gem_context_create)
539#define DRM_IOCTL_I915_GEM_CONTEXT_CREATE_EXT	DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_CREATE, struct drm_i915_gem_context_create_ext)
540#define DRM_IOCTL_I915_GEM_CONTEXT_DESTROY	DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_DESTROY, struct drm_i915_gem_context_destroy)
541#define DRM_IOCTL_I915_REG_READ			DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_REG_READ, struct drm_i915_reg_read)
542#define DRM_IOCTL_I915_GET_RESET_STATS		DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GET_RESET_STATS, struct drm_i915_reset_stats)
543#define DRM_IOCTL_I915_GEM_USERPTR			DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_USERPTR, struct drm_i915_gem_userptr)
544#define DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM	DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_GETPARAM, struct drm_i915_gem_context_param)
545#define DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM	DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_SETPARAM, struct drm_i915_gem_context_param)
546#define DRM_IOCTL_I915_PERF_OPEN	DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_OPEN, struct drm_i915_perf_open_param)
547#define DRM_IOCTL_I915_PERF_ADD_CONFIG	DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_ADD_CONFIG, struct drm_i915_perf_oa_config)
548#define DRM_IOCTL_I915_PERF_REMOVE_CONFIG	DRM_IOW(DRM_COMMAND_BASE + DRM_I915_PERF_REMOVE_CONFIG, __u64)
549#define DRM_IOCTL_I915_QUERY			DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_QUERY, struct drm_i915_query)
550#define DRM_IOCTL_I915_GEM_VM_CREATE	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_VM_CREATE, struct drm_i915_gem_vm_control)
551#define DRM_IOCTL_I915_GEM_VM_DESTROY	DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_VM_DESTROY, struct drm_i915_gem_vm_control)
552
553/* Allow drivers to submit batchbuffers directly to hardware, relying
554 * on the security mechanisms provided by hardware.
555 */
556typedef struct drm_i915_batchbuffer {
557	int start;		/* agp offset */
558	int used;		/* nr bytes in use */
559	int DR1;		/* hw flags for GFX_OP_DRAWRECT_INFO */
560	int DR4;		/* window origin for GFX_OP_DRAWRECT_INFO */
561	int num_cliprects;	/* mulitpass with multiple cliprects? */
562	struct drm_clip_rect __user *cliprects;	/* pointer to userspace cliprects */
563} drm_i915_batchbuffer_t;
564
565/* As above, but pass a pointer to userspace buffer which can be
566 * validated by the kernel prior to sending to hardware.
567 */
568typedef struct _drm_i915_cmdbuffer {
569	char __user *buf;	/* pointer to userspace command buffer */
570	int sz;			/* nr bytes in buf */
571	int DR1;		/* hw flags for GFX_OP_DRAWRECT_INFO */
572	int DR4;		/* window origin for GFX_OP_DRAWRECT_INFO */
573	int num_cliprects;	/* mulitpass with multiple cliprects? */
574	struct drm_clip_rect __user *cliprects;	/* pointer to userspace cliprects */
575} drm_i915_cmdbuffer_t;
576
577/* Userspace can request & wait on irq's:
578 */
579typedef struct drm_i915_irq_emit {
580	int __user *irq_seq;
581} drm_i915_irq_emit_t;
582
583typedef struct drm_i915_irq_wait {
584	int irq_seq;
585} drm_i915_irq_wait_t;
586
587/*
588 * Different modes of per-process Graphics Translation Table,
589 * see I915_PARAM_HAS_ALIASING_PPGTT
590 */
591#define I915_GEM_PPGTT_NONE	0
592#define I915_GEM_PPGTT_ALIASING	1
593#define I915_GEM_PPGTT_FULL	2
594
595/* Ioctl to query kernel params:
596 */
597#define I915_PARAM_IRQ_ACTIVE            1
598#define I915_PARAM_ALLOW_BATCHBUFFER     2
599#define I915_PARAM_LAST_DISPATCH         3
600#define I915_PARAM_CHIPSET_ID            4
601#define I915_PARAM_HAS_GEM               5
602#define I915_PARAM_NUM_FENCES_AVAIL      6
603#define I915_PARAM_HAS_OVERLAY           7
604#define I915_PARAM_HAS_PAGEFLIPPING	 8
605#define I915_PARAM_HAS_EXECBUF2          9
606#define I915_PARAM_HAS_BSD		 10
607#define I915_PARAM_HAS_BLT		 11
608#define I915_PARAM_HAS_RELAXED_FENCING	 12
609#define I915_PARAM_HAS_COHERENT_RINGS	 13
610#define I915_PARAM_HAS_EXEC_CONSTANTS	 14
611#define I915_PARAM_HAS_RELAXED_DELTA	 15
612#define I915_PARAM_HAS_GEN7_SOL_RESET	 16
613#define I915_PARAM_HAS_LLC     	 	 17
614#define I915_PARAM_HAS_ALIASING_PPGTT	 18
615#define I915_PARAM_HAS_WAIT_TIMEOUT	 19
616#define I915_PARAM_HAS_SEMAPHORES	 20
617#define I915_PARAM_HAS_PRIME_VMAP_FLUSH	 21
618#define I915_PARAM_HAS_VEBOX		 22
619#define I915_PARAM_HAS_SECURE_BATCHES	 23
620#define I915_PARAM_HAS_PINNED_BATCHES	 24
621#define I915_PARAM_HAS_EXEC_NO_RELOC	 25
622#define I915_PARAM_HAS_EXEC_HANDLE_LUT   26
623#define I915_PARAM_HAS_WT     	 	 27
624#define I915_PARAM_CMD_PARSER_VERSION	 28
625#define I915_PARAM_HAS_COHERENT_PHYS_GTT 29
626#define I915_PARAM_MMAP_VERSION          30
627#define I915_PARAM_HAS_BSD2		 31
628#define I915_PARAM_REVISION              32
629#define I915_PARAM_SUBSLICE_TOTAL	 33
630#define I915_PARAM_EU_TOTAL		 34
631#define I915_PARAM_HAS_GPU_RESET	 35
632#define I915_PARAM_HAS_RESOURCE_STREAMER 36
633#define I915_PARAM_HAS_EXEC_SOFTPIN	 37
634#define I915_PARAM_HAS_POOLED_EU	 38
635#define I915_PARAM_MIN_EU_IN_POOL	 39
636#define I915_PARAM_MMAP_GTT_VERSION	 40
637
638/*
639 * Query whether DRM_I915_GEM_EXECBUFFER2 supports user defined execution
640 * priorities and the driver will attempt to execute batches in priority order.
641 * The param returns a capability bitmask, nonzero implies that the scheduler
642 * is enabled, with different features present according to the mask.
643 *
644 * The initial priority for each batch is supplied by the context and is
645 * controlled via I915_CONTEXT_PARAM_PRIORITY.
646 */
647#define I915_PARAM_HAS_SCHEDULER	 41
648#define   I915_SCHEDULER_CAP_ENABLED	(1ul << 0)
649#define   I915_SCHEDULER_CAP_PRIORITY	(1ul << 1)
650#define   I915_SCHEDULER_CAP_PREEMPTION	(1ul << 2)
651#define   I915_SCHEDULER_CAP_SEMAPHORES	(1ul << 3)
652#define   I915_SCHEDULER_CAP_ENGINE_BUSY_STATS	(1ul << 4)
653/*
654 * Indicates the 2k user priority levels are statically mapped into 3 buckets as
655 * follows:
656 *
657 * -1k to -1	Low priority
658 * 0		Normal priority
659 * 1 to 1k	Highest priority
660 */
661#define   I915_SCHEDULER_CAP_STATIC_PRIORITY_MAP	(1ul << 5)
662
663/*
664 * Query the status of HuC load.
665 *
666 * The query can fail in the following scenarios with the listed error codes:
667 *  -ENODEV if HuC is not present on this platform,
668 *  -EOPNOTSUPP if HuC firmware usage is disabled,
669 *  -ENOPKG if HuC firmware fetch failed,
670 *  -ENOEXEC if HuC firmware is invalid or mismatched,
671 *  -ENOMEM if i915 failed to prepare the FW objects for transfer to the uC,
672 *  -EIO if the FW transfer or the FW authentication failed.
673 *
674 * If the IOCTL is successful, the returned parameter will be set to one of the
675 * following values:
676 *  * 0 if HuC firmware load is not complete,
677 *  * 1 if HuC firmware is loaded and fully authenticated,
678 *  * 2 if HuC firmware is loaded and authenticated for clear media only
679 */
680#define I915_PARAM_HUC_STATUS		 42
681
682/* Query whether DRM_I915_GEM_EXECBUFFER2 supports the ability to opt-out of
683 * synchronisation with implicit fencing on individual objects.
684 * See EXEC_OBJECT_ASYNC.
685 */
686#define I915_PARAM_HAS_EXEC_ASYNC	 43
687
688/* Query whether DRM_I915_GEM_EXECBUFFER2 supports explicit fence support -
689 * both being able to pass in a sync_file fd to wait upon before executing,
690 * and being able to return a new sync_file fd that is signaled when the
691 * current request is complete. See I915_EXEC_FENCE_IN and I915_EXEC_FENCE_OUT.
692 */
693#define I915_PARAM_HAS_EXEC_FENCE	 44
694
695/* Query whether DRM_I915_GEM_EXECBUFFER2 supports the ability to capture
696 * user-specified buffers for post-mortem debugging of GPU hangs. See
697 * EXEC_OBJECT_CAPTURE.
698 */
699#define I915_PARAM_HAS_EXEC_CAPTURE	 45
700
701#define I915_PARAM_SLICE_MASK		 46
702
703/* Assuming it's uniform for each slice, this queries the mask of subslices
704 * per-slice for this system.
705 */
706#define I915_PARAM_SUBSLICE_MASK	 47
707
708/*
709 * Query whether DRM_I915_GEM_EXECBUFFER2 supports supplying the batch buffer
710 * as the first execobject as opposed to the last. See I915_EXEC_BATCH_FIRST.
711 */
712#define I915_PARAM_HAS_EXEC_BATCH_FIRST	 48
713
714/* Query whether DRM_I915_GEM_EXECBUFFER2 supports supplying an array of
715 * drm_i915_gem_exec_fence structures.  See I915_EXEC_FENCE_ARRAY.
716 */
717#define I915_PARAM_HAS_EXEC_FENCE_ARRAY  49
718
719/*
720 * Query whether every context (both per-file default and user created) is
721 * isolated (insofar as HW supports). If this parameter is not true, then
722 * freshly created contexts may inherit values from an existing context,
723 * rather than default HW values. If true, it also ensures (insofar as HW
724 * supports) that all state set by this context will not leak to any other
725 * context.
726 *
727 * As not every engine across every gen support contexts, the returned
728 * value reports the support of context isolation for individual engines by
729 * returning a bitmask of each engine class set to true if that class supports
730 * isolation.
731 */
732#define I915_PARAM_HAS_CONTEXT_ISOLATION 50
733
734/* Frequency of the command streamer timestamps given by the *_TIMESTAMP
735 * registers. This used to be fixed per platform but from CNL onwards, this
736 * might vary depending on the parts.
737 */
738#define I915_PARAM_CS_TIMESTAMP_FREQUENCY 51
739
740/*
741 * Once upon a time we supposed that writes through the GGTT would be
742 * immediately in physical memory (once flushed out of the CPU path). However,
743 * on a few different processors and chipsets, this is not necessarily the case
744 * as the writes appear to be buffered internally. Thus a read of the backing
745 * storage (physical memory) via a different path (with different physical tags
746 * to the indirect write via the GGTT) will see stale values from before
747 * the GGTT write. Inside the kernel, we can for the most part keep track of
748 * the different read/write domains in use (e.g. set-domain), but the assumption
749 * of coherency is baked into the ABI, hence reporting its true state in this
750 * parameter.
751 *
752 * Reports true when writes via mmap_gtt are immediately visible following an
753 * lfence to flush the WCB.
754 *
755 * Reports false when writes via mmap_gtt are indeterminately delayed in an in
756 * internal buffer and are _not_ immediately visible to third parties accessing
757 * directly via mmap_cpu/mmap_wc. Use of mmap_gtt as part of an IPC
758 * communications channel when reporting false is strongly disadvised.
759 */
760#define I915_PARAM_MMAP_GTT_COHERENT	52
761
762/*
763 * Query whether DRM_I915_GEM_EXECBUFFER2 supports coordination of parallel
764 * execution through use of explicit fence support.
765 * See I915_EXEC_FENCE_OUT and I915_EXEC_FENCE_SUBMIT.
766 */
767#define I915_PARAM_HAS_EXEC_SUBMIT_FENCE 53
768
769/*
770 * Revision of the i915-perf uAPI. The value returned helps determine what
771 * i915-perf features are available. See drm_i915_perf_property_id.
772 */
773#define I915_PARAM_PERF_REVISION	54
774
775/* Query whether DRM_I915_GEM_EXECBUFFER2 supports supplying an array of
776 * timeline syncobj through drm_i915_gem_execbuffer_ext_timeline_fences. See
777 * I915_EXEC_USE_EXTENSIONS.
778 */
779#define I915_PARAM_HAS_EXEC_TIMELINE_FENCES 55
780
781/* Query if the kernel supports the I915_USERPTR_PROBE flag. */
782#define I915_PARAM_HAS_USERPTR_PROBE 56
783
784/*
785 * Frequency of the timestamps in OA reports. This used to be the same as the CS
786 * timestamp frequency, but differs on some platforms.
787 */
788#define I915_PARAM_OA_TIMESTAMP_FREQUENCY 57
789
790/*
791 * Query the status of PXP support in i915.
792 *
793 * The query can fail in the following scenarios with the listed error codes:
794 *     -ENODEV = PXP support is not available on the GPU device or in the
795 *               kernel due to missing component drivers or kernel configs.
796 *
797 * If the IOCTL is successful, the returned parameter will be set to one of
798 * the following values:
799 *     1 = PXP feature is supported and is ready for use.
800 *     2 = PXP feature is supported but should be ready soon (pending
801 *         initialization of non-i915 system dependencies).
802 *
803 * NOTE: When param is supported (positive return values), user space should
804 *       still refer to the GEM PXP context-creation UAPI header specs to be
805 *       aware of possible failure due to system state machine at the time.
806 */
807#define I915_PARAM_PXP_STATUS		 58
808
809/* Must be kept compact -- no holes and well documented */
810
811/**
812 * struct drm_i915_getparam - Driver parameter query structure.
813 */
814struct drm_i915_getparam {
815	/** @param: Driver parameter to query. */
816	__s32 param;
817
818	/**
819	 * @value: Address of memory where queried value should be put.
820	 *
821	 * WARNING: Using pointers instead of fixed-size u64 means we need to write
822	 * compat32 code. Don't repeat this mistake.
823	 */
824	int __user *value;
825};
826
827/**
828 * typedef drm_i915_getparam_t - Driver parameter query structure.
829 * See struct drm_i915_getparam.
830 */
831typedef struct drm_i915_getparam drm_i915_getparam_t;
832
833/* Ioctl to set kernel params:
834 */
835#define I915_SETPARAM_USE_MI_BATCHBUFFER_START            1
836#define I915_SETPARAM_TEX_LRU_LOG_GRANULARITY             2
837#define I915_SETPARAM_ALLOW_BATCHBUFFER                   3
838#define I915_SETPARAM_NUM_USED_FENCES                     4
839/* Must be kept compact -- no holes */
840
841typedef struct drm_i915_setparam {
842	int param;
843	int value;
844} drm_i915_setparam_t;
845
846/* A memory manager for regions of shared memory:
847 */
848#define I915_MEM_REGION_AGP 1
849
850typedef struct drm_i915_mem_alloc {
851	int region;
852	int alignment;
853	int size;
854	int __user *region_offset;	/* offset from start of fb or agp */
855} drm_i915_mem_alloc_t;
856
857typedef struct drm_i915_mem_free {
858	int region;
859	int region_offset;
860} drm_i915_mem_free_t;
861
862typedef struct drm_i915_mem_init_heap {
863	int region;
864	int size;
865	int start;
866} drm_i915_mem_init_heap_t;
867
868/* Allow memory manager to be torn down and re-initialized (eg on
869 * rotate):
870 */
871typedef struct drm_i915_mem_destroy_heap {
872	int region;
873} drm_i915_mem_destroy_heap_t;
874
875/* Allow X server to configure which pipes to monitor for vblank signals
876 */
877#define	DRM_I915_VBLANK_PIPE_A	1
878#define	DRM_I915_VBLANK_PIPE_B	2
879
880typedef struct drm_i915_vblank_pipe {
881	int pipe;
882} drm_i915_vblank_pipe_t;
883
884/* Schedule buffer swap at given vertical blank:
885 */
886typedef struct drm_i915_vblank_swap {
887	drm_drawable_t drawable;
888	enum drm_vblank_seq_type seqtype;
889	unsigned int sequence;
890} drm_i915_vblank_swap_t;
891
892typedef struct drm_i915_hws_addr {
893	__u64 addr;
894} drm_i915_hws_addr_t;
895
896struct drm_i915_gem_init {
897	/**
898	 * Beginning offset in the GTT to be managed by the DRM memory
899	 * manager.
900	 */
901	__u64 gtt_start;
902	/**
903	 * Ending offset in the GTT to be managed by the DRM memory
904	 * manager.
905	 */
906	__u64 gtt_end;
907};
908
909struct drm_i915_gem_create {
910	/**
911	 * Requested size for the object.
912	 *
913	 * The (page-aligned) allocated size for the object will be returned.
914	 */
915	__u64 size;
916	/**
917	 * Returned handle for the object.
918	 *
919	 * Object handles are nonzero.
920	 */
921	__u32 handle;
922	__u32 pad;
923};
924
925struct drm_i915_gem_pread {
926	/** Handle for the object being read. */
927	__u32 handle;
928	__u32 pad;
929	/** Offset into the object to read from */
930	__u64 offset;
931	/** Length of data to read */
932	__u64 size;
933	/**
934	 * Pointer to write the data into.
935	 *
936	 * This is a fixed-size type for 32/64 compatibility.
937	 */
938	__u64 data_ptr;
939};
940
941struct drm_i915_gem_pwrite {
942	/** Handle for the object being written to. */
943	__u32 handle;
944	__u32 pad;
945	/** Offset into the object to write to */
946	__u64 offset;
947	/** Length of data to write */
948	__u64 size;
949	/**
950	 * Pointer to read the data from.
951	 *
952	 * This is a fixed-size type for 32/64 compatibility.
953	 */
954	__u64 data_ptr;
955};
956
957struct drm_i915_gem_mmap {
958	/** Handle for the object being mapped. */
959	__u32 handle;
960	__u32 pad;
961	/** Offset in the object to map. */
962	__u64 offset;
963	/**
964	 * Length of data to map.
965	 *
966	 * The value will be page-aligned.
967	 */
968	__u64 size;
969	/**
970	 * Returned pointer the data was mapped at.
971	 *
972	 * This is a fixed-size type for 32/64 compatibility.
973	 */
974	__u64 addr_ptr;
975
976	/**
977	 * Flags for extended behaviour.
978	 *
979	 * Added in version 2.
980	 */
981	__u64 flags;
982#define I915_MMAP_WC 0x1
983};
984
985struct drm_i915_gem_mmap_gtt {
986	/** Handle for the object being mapped. */
987	__u32 handle;
988	__u32 pad;
989	/**
990	 * Fake offset to use for subsequent mmap call
991	 *
992	 * This is a fixed-size type for 32/64 compatibility.
993	 */
994	__u64 offset;
995};
996
997/**
998 * struct drm_i915_gem_mmap_offset - Retrieve an offset so we can mmap this buffer object.
999 *
1000 * This struct is passed as argument to the `DRM_IOCTL_I915_GEM_MMAP_OFFSET` ioctl,
1001 * and is used to retrieve the fake offset to mmap an object specified by &handle.
1002 *
1003 * The legacy way of using `DRM_IOCTL_I915_GEM_MMAP` is removed on gen12+.
1004 * `DRM_IOCTL_I915_GEM_MMAP_GTT` is an older supported alias to this struct, but will behave
1005 * as setting the &extensions to 0, and &flags to `I915_MMAP_OFFSET_GTT`.
1006 */
1007struct drm_i915_gem_mmap_offset {
1008	/** @handle: Handle for the object being mapped. */
1009	__u32 handle;
1010	/** @pad: Must be zero */
1011	__u32 pad;
1012	/**
1013	 * @offset: The fake offset to use for subsequent mmap call
1014	 *
1015	 * This is a fixed-size type for 32/64 compatibility.
1016	 */
1017	__u64 offset;
1018
1019	/**
1020	 * @flags: Flags for extended behaviour.
1021	 *
1022	 * It is mandatory that one of the `MMAP_OFFSET` types
1023	 * should be included:
1024	 *
1025	 * - `I915_MMAP_OFFSET_GTT`: Use mmap with the object bound to GTT. (Write-Combined)
1026	 * - `I915_MMAP_OFFSET_WC`: Use Write-Combined caching.
1027	 * - `I915_MMAP_OFFSET_WB`: Use Write-Back caching.
1028	 * - `I915_MMAP_OFFSET_FIXED`: Use object placement to determine caching.
1029	 *
1030	 * On devices with local memory `I915_MMAP_OFFSET_FIXED` is the only valid
1031	 * type. On devices without local memory, this caching mode is invalid.
1032	 *
1033	 * As caching mode when specifying `I915_MMAP_OFFSET_FIXED`, WC or WB will
1034	 * be used, depending on the object placement on creation. WB will be used
1035	 * when the object can only exist in system memory, WC otherwise.
1036	 */
1037	__u64 flags;
1038
1039#define I915_MMAP_OFFSET_GTT	0
1040#define I915_MMAP_OFFSET_WC	1
1041#define I915_MMAP_OFFSET_WB	2
1042#define I915_MMAP_OFFSET_UC	3
1043#define I915_MMAP_OFFSET_FIXED	4
1044
1045	/**
1046	 * @extensions: Zero-terminated chain of extensions.
1047	 *
1048	 * No current extensions defined; mbz.
1049	 */
1050	__u64 extensions;
1051};
1052
1053/**
1054 * struct drm_i915_gem_set_domain - Adjust the objects write or read domain, in
1055 * preparation for accessing the pages via some CPU domain.
1056 *
1057 * Specifying a new write or read domain will flush the object out of the
1058 * previous domain(if required), before then updating the objects domain
1059 * tracking with the new domain.
1060 *
1061 * Note this might involve waiting for the object first if it is still active on
1062 * the GPU.
1063 *
1064 * Supported values for @read_domains and @write_domain:
1065 *
1066 *	- I915_GEM_DOMAIN_WC: Uncached write-combined domain
1067 *	- I915_GEM_DOMAIN_CPU: CPU cache domain
1068 *	- I915_GEM_DOMAIN_GTT: Mappable aperture domain
1069 *
1070 * All other domains are rejected.
1071 *
1072 * Note that for discrete, starting from DG1, this is no longer supported, and
1073 * is instead rejected. On such platforms the CPU domain is effectively static,
1074 * where we also only support a single &drm_i915_gem_mmap_offset cache mode,
1075 * which can't be set explicitly and instead depends on the object placements,
1076 * as per the below.
1077 *
1078 * Implicit caching rules, starting from DG1:
1079 *
1080 *	- If any of the object placements (see &drm_i915_gem_create_ext_memory_regions)
1081 *	  contain I915_MEMORY_CLASS_DEVICE then the object will be allocated and
1082 *	  mapped as write-combined only.
1083 *
1084 *	- Everything else is always allocated and mapped as write-back, with the
1085 *	  guarantee that everything is also coherent with the GPU.
1086 *
1087 * Note that this is likely to change in the future again, where we might need
1088 * more flexibility on future devices, so making this all explicit as part of a
1089 * new &drm_i915_gem_create_ext extension is probable.
1090 */
1091struct drm_i915_gem_set_domain {
1092	/** @handle: Handle for the object. */
1093	__u32 handle;
1094
1095	/** @read_domains: New read domains. */
1096	__u32 read_domains;
1097
1098	/**
1099	 * @write_domain: New write domain.
1100	 *
1101	 * Note that having something in the write domain implies it's in the
1102	 * read domain, and only that read domain.
1103	 */
1104	__u32 write_domain;
1105};
1106
1107struct drm_i915_gem_sw_finish {
1108	/** Handle for the object */
1109	__u32 handle;
1110};
1111
1112struct drm_i915_gem_relocation_entry {
1113	/**
1114	 * Handle of the buffer being pointed to by this relocation entry.
1115	 *
1116	 * It's appealing to make this be an index into the mm_validate_entry
1117	 * list to refer to the buffer, but this allows the driver to create
1118	 * a relocation list for state buffers and not re-write it per
1119	 * exec using the buffer.
1120	 */
1121	__u32 target_handle;
1122
1123	/**
1124	 * Value to be added to the offset of the target buffer to make up
1125	 * the relocation entry.
1126	 */
1127	__u32 delta;
1128
1129	/** Offset in the buffer the relocation entry will be written into */
1130	__u64 offset;
1131
1132	/**
1133	 * Offset value of the target buffer that the relocation entry was last
1134	 * written as.
1135	 *
1136	 * If the buffer has the same offset as last time, we can skip syncing
1137	 * and writing the relocation.  This value is written back out by
1138	 * the execbuffer ioctl when the relocation is written.
1139	 */
1140	__u64 presumed_offset;
1141
1142	/**
1143	 * Target memory domains read by this operation.
1144	 */
1145	__u32 read_domains;
1146
1147	/**
1148	 * Target memory domains written by this operation.
1149	 *
1150	 * Note that only one domain may be written by the whole
1151	 * execbuffer operation, so that where there are conflicts,
1152	 * the application will get -EINVAL back.
1153	 */
1154	__u32 write_domain;
1155};
1156
1157/** @{
1158 * Intel memory domains
1159 *
1160 * Most of these just align with the various caches in
1161 * the system and are used to flush and invalidate as
1162 * objects end up cached in different domains.
1163 */
1164/** CPU cache */
1165#define I915_GEM_DOMAIN_CPU		0x00000001
1166/** Render cache, used by 2D and 3D drawing */
1167#define I915_GEM_DOMAIN_RENDER		0x00000002
1168/** Sampler cache, used by texture engine */
1169#define I915_GEM_DOMAIN_SAMPLER		0x00000004
1170/** Command queue, used to load batch buffers */
1171#define I915_GEM_DOMAIN_COMMAND		0x00000008
1172/** Instruction cache, used by shader programs */
1173#define I915_GEM_DOMAIN_INSTRUCTION	0x00000010
1174/** Vertex address cache */
1175#define I915_GEM_DOMAIN_VERTEX		0x00000020
1176/** GTT domain - aperture and scanout */
1177#define I915_GEM_DOMAIN_GTT		0x00000040
1178/** WC domain - uncached access */
1179#define I915_GEM_DOMAIN_WC		0x00000080
1180/** @} */
1181
1182struct drm_i915_gem_exec_object {
1183	/**
1184	 * User's handle for a buffer to be bound into the GTT for this
1185	 * operation.
1186	 */
1187	__u32 handle;
1188
1189	/** Number of relocations to be performed on this buffer */
1190	__u32 relocation_count;
1191	/**
1192	 * Pointer to array of struct drm_i915_gem_relocation_entry containing
1193	 * the relocations to be performed in this buffer.
1194	 */
1195	__u64 relocs_ptr;
1196
1197	/** Required alignment in graphics aperture */
1198	__u64 alignment;
1199
1200	/**
1201	 * Returned value of the updated offset of the object, for future
1202	 * presumed_offset writes.
1203	 */
1204	__u64 offset;
1205};
1206
1207/* DRM_IOCTL_I915_GEM_EXECBUFFER was removed in Linux 5.13 */
1208struct drm_i915_gem_execbuffer {
1209	/**
1210	 * List of buffers to be validated with their relocations to be
1211	 * performend on them.
1212	 *
1213	 * This is a pointer to an array of struct drm_i915_gem_validate_entry.
1214	 *
1215	 * These buffers must be listed in an order such that all relocations
1216	 * a buffer is performing refer to buffers that have already appeared
1217	 * in the validate list.
1218	 */
1219	__u64 buffers_ptr;
1220	__u32 buffer_count;
1221
1222	/** Offset in the batchbuffer to start execution from. */
1223	__u32 batch_start_offset;
1224	/** Bytes used in batchbuffer from batch_start_offset */
1225	__u32 batch_len;
1226	__u32 DR1;
1227	__u32 DR4;
1228	__u32 num_cliprects;
1229	/** This is a struct drm_clip_rect *cliprects */
1230	__u64 cliprects_ptr;
1231};
1232
1233struct drm_i915_gem_exec_object2 {
1234	/**
1235	 * User's handle for a buffer to be bound into the GTT for this
1236	 * operation.
1237	 */
1238	__u32 handle;
1239
1240	/** Number of relocations to be performed on this buffer */
1241	__u32 relocation_count;
1242	/**
1243	 * Pointer to array of struct drm_i915_gem_relocation_entry containing
1244	 * the relocations to be performed in this buffer.
1245	 */
1246	__u64 relocs_ptr;
1247
1248	/** Required alignment in graphics aperture */
1249	__u64 alignment;
1250
1251	/**
1252	 * When the EXEC_OBJECT_PINNED flag is specified this is populated by
1253	 * the user with the GTT offset at which this object will be pinned.
1254	 *
1255	 * When the I915_EXEC_NO_RELOC flag is specified this must contain the
1256	 * presumed_offset of the object.
1257	 *
1258	 * During execbuffer2 the kernel populates it with the value of the
1259	 * current GTT offset of the object, for future presumed_offset writes.
1260	 *
1261	 * See struct drm_i915_gem_create_ext for the rules when dealing with
1262	 * alignment restrictions with I915_MEMORY_CLASS_DEVICE, on devices with
1263	 * minimum page sizes, like DG2.
1264	 */
1265	__u64 offset;
1266
1267#define EXEC_OBJECT_NEEDS_FENCE		 (1<<0)
1268#define EXEC_OBJECT_NEEDS_GTT		 (1<<1)
1269#define EXEC_OBJECT_WRITE		 (1<<2)
1270#define EXEC_OBJECT_SUPPORTS_48B_ADDRESS (1<<3)
1271#define EXEC_OBJECT_PINNED		 (1<<4)
1272#define EXEC_OBJECT_PAD_TO_SIZE		 (1<<5)
1273/* The kernel implicitly tracks GPU activity on all GEM objects, and
1274 * synchronises operations with outstanding rendering. This includes
1275 * rendering on other devices if exported via dma-buf. However, sometimes
1276 * this tracking is too coarse and the user knows better. For example,
1277 * if the object is split into non-overlapping ranges shared between different
1278 * clients or engines (i.e. suballocating objects), the implicit tracking
1279 * by kernel assumes that each operation affects the whole object rather
1280 * than an individual range, causing needless synchronisation between clients.
1281 * The kernel will also forgo any CPU cache flushes prior to rendering from
1282 * the object as the client is expected to be also handling such domain
1283 * tracking.
1284 *
1285 * The kernel maintains the implicit tracking in order to manage resources
1286 * used by the GPU - this flag only disables the synchronisation prior to
1287 * rendering with this object in this execbuf.
1288 *
1289 * Opting out of implicit synhronisation requires the user to do its own
1290 * explicit tracking to avoid rendering corruption. See, for example,
1291 * I915_PARAM_HAS_EXEC_FENCE to order execbufs and execute them asynchronously.
1292 */
1293#define EXEC_OBJECT_ASYNC		(1<<6)
1294/* Request that the contents of this execobject be copied into the error
1295 * state upon a GPU hang involving this batch for post-mortem debugging.
1296 * These buffers are recorded in no particular order as "user" in
1297 * /sys/class/drm/cardN/error. Query I915_PARAM_HAS_EXEC_CAPTURE to see
1298 * if the kernel supports this flag.
1299 */
1300#define EXEC_OBJECT_CAPTURE		(1<<7)
1301/* All remaining bits are MBZ and RESERVED FOR FUTURE USE */
1302#define __EXEC_OBJECT_UNKNOWN_FLAGS -(EXEC_OBJECT_CAPTURE<<1)
1303	__u64 flags;
1304
1305	union {
1306		__u64 rsvd1;
1307		__u64 pad_to_size;
1308	};
1309	__u64 rsvd2;
1310};
1311
1312/**
1313 * struct drm_i915_gem_exec_fence - An input or output fence for the execbuf
1314 * ioctl.
1315 *
1316 * The request will wait for input fence to signal before submission.
1317 *
1318 * The returned output fence will be signaled after the completion of the
1319 * request.
1320 */
1321struct drm_i915_gem_exec_fence {
1322	/** @handle: User's handle for a drm_syncobj to wait on or signal. */
1323	__u32 handle;
1324
1325	/**
1326	 * @flags: Supported flags are:
1327	 *
1328	 * I915_EXEC_FENCE_WAIT:
1329	 * Wait for the input fence before request submission.
1330	 *
1331	 * I915_EXEC_FENCE_SIGNAL:
1332	 * Return request completion fence as output
1333	 */
1334	__u32 flags;
1335#define I915_EXEC_FENCE_WAIT            (1<<0)
1336#define I915_EXEC_FENCE_SIGNAL          (1<<1)
1337#define __I915_EXEC_FENCE_UNKNOWN_FLAGS (-(I915_EXEC_FENCE_SIGNAL << 1))
1338};
1339
1340/**
1341 * struct drm_i915_gem_execbuffer_ext_timeline_fences - Timeline fences
1342 * for execbuf ioctl.
1343 *
1344 * This structure describes an array of drm_syncobj and associated points for
1345 * timeline variants of drm_syncobj. It is invalid to append this structure to
1346 * the execbuf if I915_EXEC_FENCE_ARRAY is set.
1347 */
1348struct drm_i915_gem_execbuffer_ext_timeline_fences {
1349#define DRM_I915_GEM_EXECBUFFER_EXT_TIMELINE_FENCES 0
1350	/** @base: Extension link. See struct i915_user_extension. */
1351	struct i915_user_extension base;
1352
1353	/**
1354	 * @fence_count: Number of elements in the @handles_ptr & @value_ptr
1355	 * arrays.
1356	 */
1357	__u64 fence_count;
1358
1359	/**
1360	 * @handles_ptr: Pointer to an array of struct drm_i915_gem_exec_fence
1361	 * of length @fence_count.
1362	 */
1363	__u64 handles_ptr;
1364
1365	/**
1366	 * @values_ptr: Pointer to an array of u64 values of length
1367	 * @fence_count.
1368	 * Values must be 0 for a binary drm_syncobj. A Value of 0 for a
1369	 * timeline drm_syncobj is invalid as it turns a drm_syncobj into a
1370	 * binary one.
1371	 */
1372	__u64 values_ptr;
1373};
1374
1375/**
1376 * struct drm_i915_gem_execbuffer2 - Structure for DRM_I915_GEM_EXECBUFFER2
1377 * ioctl.
1378 */
1379struct drm_i915_gem_execbuffer2 {
1380	/** @buffers_ptr: Pointer to a list of gem_exec_object2 structs */
1381	__u64 buffers_ptr;
1382
1383	/** @buffer_count: Number of elements in @buffers_ptr array */
1384	__u32 buffer_count;
1385
1386	/**
1387	 * @batch_start_offset: Offset in the batchbuffer to start execution
1388	 * from.
1389	 */
1390	__u32 batch_start_offset;
1391
1392	/**
1393	 * @batch_len: Length in bytes of the batch buffer, starting from the
1394	 * @batch_start_offset. If 0, length is assumed to be the batch buffer
1395	 * object size.
1396	 */
1397	__u32 batch_len;
1398
1399	/** @DR1: deprecated */
1400	__u32 DR1;
1401
1402	/** @DR4: deprecated */
1403	__u32 DR4;
1404
1405	/** @num_cliprects: See @cliprects_ptr */
1406	__u32 num_cliprects;
1407
1408	/**
1409	 * @cliprects_ptr: Kernel clipping was a DRI1 misfeature.
1410	 *
1411	 * It is invalid to use this field if I915_EXEC_FENCE_ARRAY or
1412	 * I915_EXEC_USE_EXTENSIONS flags are not set.
1413	 *
1414	 * If I915_EXEC_FENCE_ARRAY is set, then this is a pointer to an array
1415	 * of &drm_i915_gem_exec_fence and @num_cliprects is the length of the
1416	 * array.
1417	 *
1418	 * If I915_EXEC_USE_EXTENSIONS is set, then this is a pointer to a
1419	 * single &i915_user_extension and num_cliprects is 0.
1420	 */
1421	__u64 cliprects_ptr;
1422
1423	/** @flags: Execbuf flags */
1424	__u64 flags;
1425#define I915_EXEC_RING_MASK              (0x3f)
1426#define I915_EXEC_DEFAULT                (0<<0)
1427#define I915_EXEC_RENDER                 (1<<0)
1428#define I915_EXEC_BSD                    (2<<0)
1429#define I915_EXEC_BLT                    (3<<0)
1430#define I915_EXEC_VEBOX                  (4<<0)
1431
1432/* Used for switching the constants addressing mode on gen4+ RENDER ring.
1433 * Gen6+ only supports relative addressing to dynamic state (default) and
1434 * absolute addressing.
1435 *
1436 * These flags are ignored for the BSD and BLT rings.
1437 */
1438#define I915_EXEC_CONSTANTS_MASK 	(3<<6)
1439#define I915_EXEC_CONSTANTS_REL_GENERAL (0<<6) /* default */
1440#define I915_EXEC_CONSTANTS_ABSOLUTE 	(1<<6)
1441#define I915_EXEC_CONSTANTS_REL_SURFACE (2<<6) /* gen4/5 only */
1442
1443/** Resets the SO write offset registers for transform feedback on gen7. */
1444#define I915_EXEC_GEN7_SOL_RESET	(1<<8)
1445
1446/** Request a privileged ("secure") batch buffer. Note only available for
1447 * DRM_ROOT_ONLY | DRM_MASTER processes.
1448 */
1449#define I915_EXEC_SECURE		(1<<9)
1450
1451/** Inform the kernel that the batch is and will always be pinned. This
1452 * negates the requirement for a workaround to be performed to avoid
1453 * an incoherent CS (such as can be found on 830/845). If this flag is
1454 * not passed, the kernel will endeavour to make sure the batch is
1455 * coherent with the CS before execution. If this flag is passed,
1456 * userspace assumes the responsibility for ensuring the same.
1457 */
1458#define I915_EXEC_IS_PINNED		(1<<10)
1459
1460/** Provide a hint to the kernel that the command stream and auxiliary
1461 * state buffers already holds the correct presumed addresses and so the
1462 * relocation process may be skipped if no buffers need to be moved in
1463 * preparation for the execbuffer.
1464 */
1465#define I915_EXEC_NO_RELOC		(1<<11)
1466
1467/** Use the reloc.handle as an index into the exec object array rather
1468 * than as the per-file handle.
1469 */
1470#define I915_EXEC_HANDLE_LUT		(1<<12)
1471
1472/** Used for switching BSD rings on the platforms with two BSD rings */
1473#define I915_EXEC_BSD_SHIFT	 (13)
1474#define I915_EXEC_BSD_MASK	 (3 << I915_EXEC_BSD_SHIFT)
1475/* default ping-pong mode */
1476#define I915_EXEC_BSD_DEFAULT	 (0 << I915_EXEC_BSD_SHIFT)
1477#define I915_EXEC_BSD_RING1	 (1 << I915_EXEC_BSD_SHIFT)
1478#define I915_EXEC_BSD_RING2	 (2 << I915_EXEC_BSD_SHIFT)
1479
1480/** Tell the kernel that the batchbuffer is processed by
1481 *  the resource streamer.
1482 */
1483#define I915_EXEC_RESOURCE_STREAMER     (1<<15)
1484
1485/* Setting I915_EXEC_FENCE_IN implies that lower_32_bits(rsvd2) represent
1486 * a sync_file fd to wait upon (in a nonblocking manner) prior to executing
1487 * the batch.
1488 *
1489 * Returns -EINVAL if the sync_file fd cannot be found.
1490 */
1491#define I915_EXEC_FENCE_IN		(1<<16)
1492
1493/* Setting I915_EXEC_FENCE_OUT causes the ioctl to return a sync_file fd
1494 * in the upper_32_bits(rsvd2) upon success. Ownership of the fd is given
1495 * to the caller, and it should be close() after use. (The fd is a regular
1496 * file descriptor and will be cleaned up on process termination. It holds
1497 * a reference to the request, but nothing else.)
1498 *
1499 * The sync_file fd can be combined with other sync_file and passed either
1500 * to execbuf using I915_EXEC_FENCE_IN, to atomic KMS ioctls (so that a flip
1501 * will only occur after this request completes), or to other devices.
1502 *
1503 * Using I915_EXEC_FENCE_OUT requires use of
1504 * DRM_IOCTL_I915_GEM_EXECBUFFER2_WR ioctl so that the result is written
1505 * back to userspace. Failure to do so will cause the out-fence to always
1506 * be reported as zero, and the real fence fd to be leaked.
1507 */
1508#define I915_EXEC_FENCE_OUT		(1<<17)
1509
1510/*
1511 * Traditionally the execbuf ioctl has only considered the final element in
1512 * the execobject[] to be the executable batch. Often though, the client
1513 * will known the batch object prior to construction and being able to place
1514 * it into the execobject[] array first can simplify the relocation tracking.
1515 * Setting I915_EXEC_BATCH_FIRST tells execbuf to use element 0 of the
1516 * execobject[] as the * batch instead (the default is to use the last
1517 * element).
1518 */
1519#define I915_EXEC_BATCH_FIRST		(1<<18)
1520
1521/* Setting I915_FENCE_ARRAY implies that num_cliprects and cliprects_ptr
1522 * define an array of i915_gem_exec_fence structures which specify a set of
1523 * dma fences to wait upon or signal.
1524 */
1525#define I915_EXEC_FENCE_ARRAY   (1<<19)
1526
1527/*
1528 * Setting I915_EXEC_FENCE_SUBMIT implies that lower_32_bits(rsvd2) represent
1529 * a sync_file fd to wait upon (in a nonblocking manner) prior to executing
1530 * the batch.
1531 *
1532 * Returns -EINVAL if the sync_file fd cannot be found.
1533 */
1534#define I915_EXEC_FENCE_SUBMIT		(1 << 20)
1535
1536/*
1537 * Setting I915_EXEC_USE_EXTENSIONS implies that
1538 * drm_i915_gem_execbuffer2.cliprects_ptr is treated as a pointer to an linked
1539 * list of i915_user_extension. Each i915_user_extension node is the base of a
1540 * larger structure. The list of supported structures are listed in the
1541 * drm_i915_gem_execbuffer_ext enum.
1542 */
1543#define I915_EXEC_USE_EXTENSIONS	(1 << 21)
1544#define __I915_EXEC_UNKNOWN_FLAGS (-(I915_EXEC_USE_EXTENSIONS << 1))
1545
1546	/** @rsvd1: Context id */
1547	__u64 rsvd1;
1548
1549	/**
1550	 * @rsvd2: in and out sync_file file descriptors.
1551	 *
1552	 * When I915_EXEC_FENCE_IN or I915_EXEC_FENCE_SUBMIT flag is set, the
1553	 * lower 32 bits of this field will have the in sync_file fd (input).
1554	 *
1555	 * When I915_EXEC_FENCE_OUT flag is set, the upper 32 bits of this
1556	 * field will have the out sync_file fd (output).
1557	 */
1558	__u64 rsvd2;
1559};
1560
1561#define I915_EXEC_CONTEXT_ID_MASK	(0xffffffff)
1562#define i915_execbuffer2_set_context_id(eb2, context) \
1563	(eb2).rsvd1 = context & I915_EXEC_CONTEXT_ID_MASK
1564#define i915_execbuffer2_get_context_id(eb2) \
1565	((eb2).rsvd1 & I915_EXEC_CONTEXT_ID_MASK)
1566
1567struct drm_i915_gem_pin {
1568	/** Handle of the buffer to be pinned. */
1569	__u32 handle;
1570	__u32 pad;
1571
1572	/** alignment required within the aperture */
1573	__u64 alignment;
1574
1575	/** Returned GTT offset of the buffer. */
1576	__u64 offset;
1577};
1578
1579struct drm_i915_gem_unpin {
1580	/** Handle of the buffer to be unpinned. */
1581	__u32 handle;
1582	__u32 pad;
1583};
1584
1585struct drm_i915_gem_busy {
1586	/** Handle of the buffer to check for busy */
1587	__u32 handle;
1588
1589	/** Return busy status
1590	 *
1591	 * A return of 0 implies that the object is idle (after
1592	 * having flushed any pending activity), and a non-zero return that
1593	 * the object is still in-flight on the GPU. (The GPU has not yet
1594	 * signaled completion for all pending requests that reference the
1595	 * object.) An object is guaranteed to become idle eventually (so
1596	 * long as no new GPU commands are executed upon it). Due to the
1597	 * asynchronous nature of the hardware, an object reported
1598	 * as busy may become idle before the ioctl is completed.
1599	 *
1600	 * Furthermore, if the object is busy, which engine is busy is only
1601	 * provided as a guide and only indirectly by reporting its class
1602	 * (there may be more than one engine in each class). There are race
1603	 * conditions which prevent the report of which engines are busy from
1604	 * being always accurate.  However, the converse is not true. If the
1605	 * object is idle, the result of the ioctl, that all engines are idle,
1606	 * is accurate.
1607	 *
1608	 * The returned dword is split into two fields to indicate both
1609	 * the engine classes on which the object is being read, and the
1610	 * engine class on which it is currently being written (if any).
1611	 *
1612	 * The low word (bits 0:15) indicate if the object is being written
1613	 * to by any engine (there can only be one, as the GEM implicit
1614	 * synchronisation rules force writes to be serialised). Only the
1615	 * engine class (offset by 1, I915_ENGINE_CLASS_RENDER is reported as
1616	 * 1 not 0 etc) for the last write is reported.
1617	 *
1618	 * The high word (bits 16:31) are a bitmask of which engines classes
1619	 * are currently reading from the object. Multiple engines may be
1620	 * reading from the object simultaneously.
1621	 *
1622	 * The value of each engine class is the same as specified in the
1623	 * I915_CONTEXT_PARAM_ENGINES context parameter and via perf, i.e.
1624	 * I915_ENGINE_CLASS_RENDER, I915_ENGINE_CLASS_COPY, etc.
1625	 * Some hardware may have parallel execution engines, e.g. multiple
1626	 * media engines, which are mapped to the same class identifier and so
1627	 * are not separately reported for busyness.
1628	 *
1629	 * Caveat emptor:
1630	 * Only the boolean result of this query is reliable; that is whether
1631	 * the object is idle or busy. The report of which engines are busy
1632	 * should be only used as a heuristic.
1633	 */
1634	__u32 busy;
1635};
1636
1637/**
1638 * struct drm_i915_gem_caching - Set or get the caching for given object
1639 * handle.
1640 *
1641 * Allow userspace to control the GTT caching bits for a given object when the
1642 * object is later mapped through the ppGTT(or GGTT on older platforms lacking
1643 * ppGTT support, or if the object is used for scanout). Note that this might
1644 * require unbinding the object from the GTT first, if its current caching value
1645 * doesn't match.
1646 *
1647 * Note that this all changes on discrete platforms, starting from DG1, the
1648 * set/get caching is no longer supported, and is now rejected.  Instead the CPU
1649 * caching attributes(WB vs WC) will become an immutable creation time property
1650 * for the object, along with the GTT caching level. For now we don't expose any
1651 * new uAPI for this, instead on DG1 this is all implicit, although this largely
1652 * shouldn't matter since DG1 is coherent by default(without any way of
1653 * controlling it).
1654 *
1655 * Implicit caching rules, starting from DG1:
1656 *
1657 *     - If any of the object placements (see &drm_i915_gem_create_ext_memory_regions)
1658 *       contain I915_MEMORY_CLASS_DEVICE then the object will be allocated and
1659 *       mapped as write-combined only.
1660 *
1661 *     - Everything else is always allocated and mapped as write-back, with the
1662 *       guarantee that everything is also coherent with the GPU.
1663 *
1664 * Note that this is likely to change in the future again, where we might need
1665 * more flexibility on future devices, so making this all explicit as part of a
1666 * new &drm_i915_gem_create_ext extension is probable.
1667 *
1668 * Side note: Part of the reason for this is that changing the at-allocation-time CPU
1669 * caching attributes for the pages might be required(and is expensive) if we
1670 * need to then CPU map the pages later with different caching attributes. This
1671 * inconsistent caching behaviour, while supported on x86, is not universally
1672 * supported on other architectures. So for simplicity we opt for setting
1673 * everything at creation time, whilst also making it immutable, on discrete
1674 * platforms.
1675 */
1676struct drm_i915_gem_caching {
1677	/**
1678	 * @handle: Handle of the buffer to set/get the caching level.
1679	 */
1680	__u32 handle;
1681
1682	/**
1683	 * @caching: The GTT caching level to apply or possible return value.
1684	 *
1685	 * The supported @caching values:
1686	 *
1687	 * I915_CACHING_NONE:
1688	 *
1689	 * GPU access is not coherent with CPU caches.  Default for machines
1690	 * without an LLC. This means manual flushing might be needed, if we
1691	 * want GPU access to be coherent.
1692	 *
1693	 * I915_CACHING_CACHED:
1694	 *
1695	 * GPU access is coherent with CPU caches and furthermore the data is
1696	 * cached in last-level caches shared between CPU cores and the GPU GT.
1697	 *
1698	 * I915_CACHING_DISPLAY:
1699	 *
1700	 * Special GPU caching mode which is coherent with the scanout engines.
1701	 * Transparently falls back to I915_CACHING_NONE on platforms where no
1702	 * special cache mode (like write-through or gfdt flushing) is
1703	 * available. The kernel automatically sets this mode when using a
1704	 * buffer as a scanout target.  Userspace can manually set this mode to
1705	 * avoid a costly stall and clflush in the hotpath of drawing the first
1706	 * frame.
1707	 */
1708#define I915_CACHING_NONE		0
1709#define I915_CACHING_CACHED		1
1710#define I915_CACHING_DISPLAY		2
1711	__u32 caching;
1712};
1713
1714#define I915_TILING_NONE	0
1715#define I915_TILING_X		1
1716#define I915_TILING_Y		2
1717/*
1718 * Do not add new tiling types here.  The I915_TILING_* values are for
1719 * de-tiling fence registers that no longer exist on modern platforms.  Although
1720 * the hardware may support new types of tiling in general (e.g., Tile4), we
1721 * do not need to add them to the uapi that is specific to now-defunct ioctls.
1722 */
1723#define I915_TILING_LAST	I915_TILING_Y
1724
1725#define I915_BIT_6_SWIZZLE_NONE		0
1726#define I915_BIT_6_SWIZZLE_9		1
1727#define I915_BIT_6_SWIZZLE_9_10		2
1728#define I915_BIT_6_SWIZZLE_9_11		3
1729#define I915_BIT_6_SWIZZLE_9_10_11	4
1730/* Not seen by userland */
1731#define I915_BIT_6_SWIZZLE_UNKNOWN	5
1732/* Seen by userland. */
1733#define I915_BIT_6_SWIZZLE_9_17		6
1734#define I915_BIT_6_SWIZZLE_9_10_17	7
1735
1736struct drm_i915_gem_set_tiling {
1737	/** Handle of the buffer to have its tiling state updated */
1738	__u32 handle;
1739
1740	/**
1741	 * Tiling mode for the object (I915_TILING_NONE, I915_TILING_X,
1742	 * I915_TILING_Y).
1743	 *
1744	 * This value is to be set on request, and will be updated by the
1745	 * kernel on successful return with the actual chosen tiling layout.
1746	 *
1747	 * The tiling mode may be demoted to I915_TILING_NONE when the system
1748	 * has bit 6 swizzling that can't be managed correctly by GEM.
1749	 *
1750	 * Buffer contents become undefined when changing tiling_mode.
1751	 */
1752	__u32 tiling_mode;
1753
1754	/**
1755	 * Stride in bytes for the object when in I915_TILING_X or
1756	 * I915_TILING_Y.
1757	 */
1758	__u32 stride;
1759
1760	/**
1761	 * Returned address bit 6 swizzling required for CPU access through
1762	 * mmap mapping.
1763	 */
1764	__u32 swizzle_mode;
1765};
1766
1767struct drm_i915_gem_get_tiling {
1768	/** Handle of the buffer to get tiling state for. */
1769	__u32 handle;
1770
1771	/**
1772	 * Current tiling mode for the object (I915_TILING_NONE, I915_TILING_X,
1773	 * I915_TILING_Y).
1774	 */
1775	__u32 tiling_mode;
1776
1777	/**
1778	 * Returned address bit 6 swizzling required for CPU access through
1779	 * mmap mapping.
1780	 */
1781	__u32 swizzle_mode;
1782
1783	/**
1784	 * Returned address bit 6 swizzling required for CPU access through
1785	 * mmap mapping whilst bound.
1786	 */
1787	__u32 phys_swizzle_mode;
1788};
1789
1790struct drm_i915_gem_get_aperture {
1791	/** Total size of the aperture used by i915_gem_execbuffer, in bytes */
1792	__u64 aper_size;
1793
1794	/**
1795	 * Available space in the aperture used by i915_gem_execbuffer, in
1796	 * bytes
1797	 */
1798	__u64 aper_available_size;
1799};
1800
1801struct drm_i915_get_pipe_from_crtc_id {
1802	/** ID of CRTC being requested **/
1803	__u32 crtc_id;
1804
1805	/** pipe of requested CRTC **/
1806	__u32 pipe;
1807};
1808
1809#define I915_MADV_WILLNEED 0
1810#define I915_MADV_DONTNEED 1
1811#define __I915_MADV_PURGED 2 /* internal state */
1812
1813struct drm_i915_gem_madvise {
1814	/** Handle of the buffer to change the backing store advice */
1815	__u32 handle;
1816
1817	/* Advice: either the buffer will be needed again in the near future,
1818	 *         or won't be and could be discarded under memory pressure.
1819	 */
1820	__u32 madv;
1821
1822	/** Whether the backing store still exists. */
1823	__u32 retained;
1824};
1825
1826/* flags */
1827#define I915_OVERLAY_TYPE_MASK 		0xff
1828#define I915_OVERLAY_YUV_PLANAR 	0x01
1829#define I915_OVERLAY_YUV_PACKED 	0x02
1830#define I915_OVERLAY_RGB		0x03
1831
1832#define I915_OVERLAY_DEPTH_MASK		0xff00
1833#define I915_OVERLAY_RGB24		0x1000
1834#define I915_OVERLAY_RGB16		0x2000
1835#define I915_OVERLAY_RGB15		0x3000
1836#define I915_OVERLAY_YUV422		0x0100
1837#define I915_OVERLAY_YUV411		0x0200
1838#define I915_OVERLAY_YUV420		0x0300
1839#define I915_OVERLAY_YUV410		0x0400
1840
1841#define I915_OVERLAY_SWAP_MASK		0xff0000
1842#define I915_OVERLAY_NO_SWAP		0x000000
1843#define I915_OVERLAY_UV_SWAP		0x010000
1844#define I915_OVERLAY_Y_SWAP		0x020000
1845#define I915_OVERLAY_Y_AND_UV_SWAP	0x030000
1846
1847#define I915_OVERLAY_FLAGS_MASK		0xff000000
1848#define I915_OVERLAY_ENABLE		0x01000000
1849
1850struct drm_intel_overlay_put_image {
1851	/* various flags and src format description */
1852	__u32 flags;
1853	/* source picture description */
1854	__u32 bo_handle;
1855	/* stride values and offsets are in bytes, buffer relative */
1856	__u16 stride_Y; /* stride for packed formats */
1857	__u16 stride_UV;
1858	__u32 offset_Y; /* offset for packet formats */
1859	__u32 offset_U;
1860	__u32 offset_V;
1861	/* in pixels */
1862	__u16 src_width;
1863	__u16 src_height;
1864	/* to compensate the scaling factors for partially covered surfaces */
1865	__u16 src_scan_width;
1866	__u16 src_scan_height;
1867	/* output crtc description */
1868	__u32 crtc_id;
1869	__u16 dst_x;
1870	__u16 dst_y;
1871	__u16 dst_width;
1872	__u16 dst_height;
1873};
1874
1875/* flags */
1876#define I915_OVERLAY_UPDATE_ATTRS	(1<<0)
1877#define I915_OVERLAY_UPDATE_GAMMA	(1<<1)
1878#define I915_OVERLAY_DISABLE_DEST_COLORKEY	(1<<2)
1879struct drm_intel_overlay_attrs {
1880	__u32 flags;
1881	__u32 color_key;
1882	__s32 brightness;
1883	__u32 contrast;
1884	__u32 saturation;
1885	__u32 gamma0;
1886	__u32 gamma1;
1887	__u32 gamma2;
1888	__u32 gamma3;
1889	__u32 gamma4;
1890	__u32 gamma5;
1891};
1892
1893/*
1894 * Intel sprite handling
1895 *
1896 * Color keying works with a min/mask/max tuple.  Both source and destination
1897 * color keying is allowed.
1898 *
1899 * Source keying:
1900 * Sprite pixels within the min & max values, masked against the color channels
1901 * specified in the mask field, will be transparent.  All other pixels will
1902 * be displayed on top of the primary plane.  For RGB surfaces, only the min
1903 * and mask fields will be used; ranged compares are not allowed.
1904 *
1905 * Destination keying:
1906 * Primary plane pixels that match the min value, masked against the color
1907 * channels specified in the mask field, will be replaced by corresponding
1908 * pixels from the sprite plane.
1909 *
1910 * Note that source & destination keying are exclusive; only one can be
1911 * active on a given plane.
1912 */
1913
1914#define I915_SET_COLORKEY_NONE		(1<<0) /* Deprecated. Instead set
1915						* flags==0 to disable colorkeying.
1916						*/
1917#define I915_SET_COLORKEY_DESTINATION	(1<<1)
1918#define I915_SET_COLORKEY_SOURCE	(1<<2)
1919struct drm_intel_sprite_colorkey {
1920	__u32 plane_id;
1921	__u32 min_value;
1922	__u32 channel_mask;
1923	__u32 max_value;
1924	__u32 flags;
1925};
1926
1927struct drm_i915_gem_wait {
1928	/** Handle of BO we shall wait on */
1929	__u32 bo_handle;
1930	__u32 flags;
1931	/** Number of nanoseconds to wait, Returns time remaining. */
1932	__s64 timeout_ns;
1933};
1934
1935struct drm_i915_gem_context_create {
1936	__u32 ctx_id; /* output: id of new context*/
1937	__u32 pad;
1938};
1939
1940/**
1941 * struct drm_i915_gem_context_create_ext - Structure for creating contexts.
1942 */
1943struct drm_i915_gem_context_create_ext {
1944	/** @ctx_id: Id of the created context (output) */
1945	__u32 ctx_id;
1946
1947	/**
1948	 * @flags: Supported flags are:
1949	 *
1950	 * I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS:
1951	 *
1952	 * Extensions may be appended to this structure and driver must check
1953	 * for those. See @extensions.
1954	 *
1955	 * I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE
1956	 *
1957	 * Created context will have single timeline.
1958	 */
1959	__u32 flags;
1960#define I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS	(1u << 0)
1961#define I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE	(1u << 1)
1962#define I915_CONTEXT_CREATE_FLAGS_UNKNOWN \
1963	(-(I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE << 1))
1964
1965	/**
1966	 * @extensions: Zero-terminated chain of extensions.
1967	 *
1968	 * I915_CONTEXT_CREATE_EXT_SETPARAM:
1969	 * Context parameter to set or query during context creation.
1970	 * See struct drm_i915_gem_context_create_ext_setparam.
1971	 *
1972	 * I915_CONTEXT_CREATE_EXT_CLONE:
1973	 * This extension has been removed. On the off chance someone somewhere
1974	 * has attempted to use it, never re-use this extension number.
1975	 */
1976	__u64 extensions;
1977#define I915_CONTEXT_CREATE_EXT_SETPARAM 0
1978#define I915_CONTEXT_CREATE_EXT_CLONE 1
1979};
1980
1981/**
1982 * struct drm_i915_gem_context_param - Context parameter to set or query.
1983 */
1984struct drm_i915_gem_context_param {
1985	/** @ctx_id: Context id */
1986	__u32 ctx_id;
1987
1988	/** @size: Size of the parameter @value */
1989	__u32 size;
1990
1991	/** @param: Parameter to set or query */
1992	__u64 param;
1993#define I915_CONTEXT_PARAM_BAN_PERIOD	0x1
1994/* I915_CONTEXT_PARAM_NO_ZEROMAP has been removed.  On the off chance
1995 * someone somewhere has attempted to use it, never re-use this context
1996 * param number.
1997 */
1998#define I915_CONTEXT_PARAM_NO_ZEROMAP	0x2
1999#define I915_CONTEXT_PARAM_GTT_SIZE	0x3
2000#define I915_CONTEXT_PARAM_NO_ERROR_CAPTURE	0x4
2001#define I915_CONTEXT_PARAM_BANNABLE	0x5
2002#define I915_CONTEXT_PARAM_PRIORITY	0x6
2003#define   I915_CONTEXT_MAX_USER_PRIORITY	1023 /* inclusive */
2004#define   I915_CONTEXT_DEFAULT_PRIORITY		0
2005#define   I915_CONTEXT_MIN_USER_PRIORITY	-1023 /* inclusive */
2006	/*
2007	 * When using the following param, value should be a pointer to
2008	 * drm_i915_gem_context_param_sseu.
2009	 */
2010#define I915_CONTEXT_PARAM_SSEU		0x7
2011
2012/*
2013 * Not all clients may want to attempt automatic recover of a context after
2014 * a hang (for example, some clients may only submit very small incremental
2015 * batches relying on known logical state of previous batches which will never
2016 * recover correctly and each attempt will hang), and so would prefer that
2017 * the context is forever banned instead.
2018 *
2019 * If set to false (0), after a reset, subsequent (and in flight) rendering
2020 * from this context is discarded, and the client will need to create a new
2021 * context to use instead.
2022 *
2023 * If set to true (1), the kernel will automatically attempt to recover the
2024 * context by skipping the hanging batch and executing the next batch starting
2025 * from the default context state (discarding the incomplete logical context
2026 * state lost due to the reset).
2027 *
2028 * On creation, all new contexts are marked as recoverable.
2029 */
2030#define I915_CONTEXT_PARAM_RECOVERABLE	0x8
2031
2032	/*
2033	 * The id of the associated virtual memory address space (ppGTT) of
2034	 * this context. Can be retrieved and passed to another context
2035	 * (on the same fd) for both to use the same ppGTT and so share
2036	 * address layouts, and avoid reloading the page tables on context
2037	 * switches between themselves.
2038	 *
2039	 * See DRM_I915_GEM_VM_CREATE and DRM_I915_GEM_VM_DESTROY.
2040	 */
2041#define I915_CONTEXT_PARAM_VM		0x9
2042
2043/*
2044 * I915_CONTEXT_PARAM_ENGINES:
2045 *
2046 * Bind this context to operate on this subset of available engines. Henceforth,
2047 * the I915_EXEC_RING selector for DRM_IOCTL_I915_GEM_EXECBUFFER2 operates as
2048 * an index into this array of engines; I915_EXEC_DEFAULT selecting engine[0]
2049 * and upwards. Slots 0...N are filled in using the specified (class, instance).
2050 * Use
2051 *	engine_class: I915_ENGINE_CLASS_INVALID,
2052 *	engine_instance: I915_ENGINE_CLASS_INVALID_NONE
2053 * to specify a gap in the array that can be filled in later, e.g. by a
2054 * virtual engine used for load balancing.
2055 *
2056 * Setting the number of engines bound to the context to 0, by passing a zero
2057 * sized argument, will revert back to default settings.
2058 *
2059 * See struct i915_context_param_engines.
2060 *
2061 * Extensions:
2062 *   i915_context_engines_load_balance (I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE)
2063 *   i915_context_engines_bond (I915_CONTEXT_ENGINES_EXT_BOND)
2064 *   i915_context_engines_parallel_submit (I915_CONTEXT_ENGINES_EXT_PARALLEL_SUBMIT)
2065 */
2066#define I915_CONTEXT_PARAM_ENGINES	0xa
2067
2068/*
2069 * I915_CONTEXT_PARAM_PERSISTENCE:
2070 *
2071 * Allow the context and active rendering to survive the process until
2072 * completion. Persistence allows fire-and-forget clients to queue up a
2073 * bunch of work, hand the output over to a display server and then quit.
2074 * If the context is marked as not persistent, upon closing (either via
2075 * an explicit DRM_I915_GEM_CONTEXT_DESTROY or implicitly from file closure
2076 * or process termination), the context and any outstanding requests will be
2077 * cancelled (and exported fences for cancelled requests marked as -EIO).
2078 *
2079 * By default, new contexts allow persistence.
2080 */
2081#define I915_CONTEXT_PARAM_PERSISTENCE	0xb
2082
2083/* This API has been removed.  On the off chance someone somewhere has
2084 * attempted to use it, never re-use this context param number.
2085 */
2086#define I915_CONTEXT_PARAM_RINGSIZE	0xc
2087
2088/*
2089 * I915_CONTEXT_PARAM_PROTECTED_CONTENT:
2090 *
2091 * Mark that the context makes use of protected content, which will result
2092 * in the context being invalidated when the protected content session is.
2093 * Given that the protected content session is killed on suspend, the device
2094 * is kept awake for the lifetime of a protected context, so the user should
2095 * make sure to dispose of them once done.
2096 * This flag can only be set at context creation time and, when set to true,
2097 * must be preceded by an explicit setting of I915_CONTEXT_PARAM_RECOVERABLE
2098 * to false. This flag can't be set to true in conjunction with setting the
2099 * I915_CONTEXT_PARAM_BANNABLE flag to false. Creation example:
2100 *
2101 * .. code-block:: C
2102 *
2103 *	struct drm_i915_gem_context_create_ext_setparam p_protected = {
2104 *		.base = {
2105 *			.name = I915_CONTEXT_CREATE_EXT_SETPARAM,
2106 *		},
2107 *		.param = {
2108 *			.param = I915_CONTEXT_PARAM_PROTECTED_CONTENT,
2109 *			.value = 1,
2110 *		}
2111 *	};
2112 *	struct drm_i915_gem_context_create_ext_setparam p_norecover = {
2113 *		.base = {
2114 *			.name = I915_CONTEXT_CREATE_EXT_SETPARAM,
2115 *			.next_extension = to_user_pointer(&p_protected),
2116 *		},
2117 *		.param = {
2118 *			.param = I915_CONTEXT_PARAM_RECOVERABLE,
2119 *			.value = 0,
2120 *		}
2121 *	};
2122 *	struct drm_i915_gem_context_create_ext create = {
2123 *		.flags = I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS,
2124 *		.extensions = to_user_pointer(&p_norecover);
2125 *	};
2126 *
2127 *	ctx_id = gem_context_create_ext(drm_fd, &create);
2128 *
2129 * In addition to the normal failure cases, setting this flag during context
2130 * creation can result in the following errors:
2131 *
2132 * -ENODEV: feature not available
2133 * -EPERM: trying to mark a recoverable or not bannable context as protected
2134 * -ENXIO: A dependency such as a component driver or firmware is not yet
2135 *         loaded so user space may need to attempt again. Depending on the
2136 *         device, this error may be reported if protected context creation is
2137 *         attempted very early after kernel start because the internal timeout
2138 *         waiting for such dependencies is not guaranteed to be larger than
2139 *         required (numbers differ depending on system and kernel config):
2140 *            - ADL/RPL: dependencies may take up to 3 seconds from kernel start
2141 *                       while context creation internal timeout is 250 milisecs
2142 *            - MTL: dependencies may take up to 8 seconds from kernel start
2143 *                   while context creation internal timeout is 250 milisecs
2144 *         NOTE: such dependencies happen once, so a subsequent call to create a
2145 *         protected context after a prior successful call will not experience
2146 *         such timeouts and will not return -ENXIO (unless the driver is reloaded,
2147 *         or, depending on the device, resumes from a suspended state).
2148 * -EIO: The firmware did not succeed in creating the protected context.
2149 */
2150#define I915_CONTEXT_PARAM_PROTECTED_CONTENT    0xd
2151/* Must be kept compact -- no holes and well documented */
2152
2153	/** @value: Context parameter value to be set or queried */
2154	__u64 value;
2155};
2156
2157/*
2158 * Context SSEU programming
2159 *
2160 * It may be necessary for either functional or performance reason to configure
2161 * a context to run with a reduced number of SSEU (where SSEU stands for Slice/
2162 * Sub-slice/EU).
2163 *
2164 * This is done by configuring SSEU configuration using the below
2165 * @struct drm_i915_gem_context_param_sseu for every supported engine which
2166 * userspace intends to use.
2167 *
2168 * Not all GPUs or engines support this functionality in which case an error
2169 * code -ENODEV will be returned.
2170 *
2171 * Also, flexibility of possible SSEU configuration permutations varies between
2172 * GPU generations and software imposed limitations. Requesting such a
2173 * combination will return an error code of -EINVAL.
2174 *
2175 * NOTE: When perf/OA is active the context's SSEU configuration is ignored in
2176 * favour of a single global setting.
2177 */
2178struct drm_i915_gem_context_param_sseu {
2179	/*
2180	 * Engine class & instance to be configured or queried.
2181	 */
2182	struct i915_engine_class_instance engine;
2183
2184	/*
2185	 * Unknown flags must be cleared to zero.
2186	 */
2187	__u32 flags;
2188#define I915_CONTEXT_SSEU_FLAG_ENGINE_INDEX (1u << 0)
2189
2190	/*
2191	 * Mask of slices to enable for the context. Valid values are a subset
2192	 * of the bitmask value returned for I915_PARAM_SLICE_MASK.
2193	 */
2194	__u64 slice_mask;
2195
2196	/*
2197	 * Mask of subslices to enable for the context. Valid values are a
2198	 * subset of the bitmask value return by I915_PARAM_SUBSLICE_MASK.
2199	 */
2200	__u64 subslice_mask;
2201
2202	/*
2203	 * Minimum/Maximum number of EUs to enable per subslice for the
2204	 * context. min_eus_per_subslice must be inferior or equal to
2205	 * max_eus_per_subslice.
2206	 */
2207	__u16 min_eus_per_subslice;
2208	__u16 max_eus_per_subslice;
2209
2210	/*
2211	 * Unused for now. Must be cleared to zero.
2212	 */
2213	__u32 rsvd;
2214};
2215
2216/**
2217 * DOC: Virtual Engine uAPI
2218 *
2219 * Virtual engine is a concept where userspace is able to configure a set of
2220 * physical engines, submit a batch buffer, and let the driver execute it on any
2221 * engine from the set as it sees fit.
2222 *
2223 * This is primarily useful on parts which have multiple instances of a same
2224 * class engine, like for example GT3+ Skylake parts with their two VCS engines.
2225 *
2226 * For instance userspace can enumerate all engines of a certain class using the
2227 * previously described `Engine Discovery uAPI`_. After that userspace can
2228 * create a GEM context with a placeholder slot for the virtual engine (using
2229 * `I915_ENGINE_CLASS_INVALID` and `I915_ENGINE_CLASS_INVALID_NONE` for class
2230 * and instance respectively) and finally using the
2231 * `I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE` extension place a virtual engine in
2232 * the same reserved slot.
2233 *
2234 * Example of creating a virtual engine and submitting a batch buffer to it:
2235 *
2236 * .. code-block:: C
2237 *
2238 * 	I915_DEFINE_CONTEXT_ENGINES_LOAD_BALANCE(virtual, 2) = {
2239 * 		.base.name = I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE,
2240 * 		.engine_index = 0, // Place this virtual engine into engine map slot 0
2241 * 		.num_siblings = 2,
2242 * 		.engines = { { I915_ENGINE_CLASS_VIDEO, 0 },
2243 * 			     { I915_ENGINE_CLASS_VIDEO, 1 }, },
2244 * 	};
2245 * 	I915_DEFINE_CONTEXT_PARAM_ENGINES(engines, 1) = {
2246 * 		.engines = { { I915_ENGINE_CLASS_INVALID,
2247 * 			       I915_ENGINE_CLASS_INVALID_NONE } },
2248 * 		.extensions = to_user_pointer(&virtual), // Chains after load_balance extension
2249 * 	};
2250 * 	struct drm_i915_gem_context_create_ext_setparam p_engines = {
2251 * 		.base = {
2252 * 			.name = I915_CONTEXT_CREATE_EXT_SETPARAM,
2253 * 		},
2254 * 		.param = {
2255 * 			.param = I915_CONTEXT_PARAM_ENGINES,
2256 * 			.value = to_user_pointer(&engines),
2257 * 			.size = sizeof(engines),
2258 * 		},
2259 * 	};
2260 * 	struct drm_i915_gem_context_create_ext create = {
2261 * 		.flags = I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS,
2262 * 		.extensions = to_user_pointer(&p_engines);
2263 * 	};
2264 *
2265 * 	ctx_id = gem_context_create_ext(drm_fd, &create);
2266 *
2267 * 	// Now we have created a GEM context with its engine map containing a
2268 * 	// single virtual engine. Submissions to this slot can go either to
2269 * 	// vcs0 or vcs1, depending on the load balancing algorithm used inside
2270 * 	// the driver. The load balancing is dynamic from one batch buffer to
2271 * 	// another and transparent to userspace.
2272 *
2273 * 	...
2274 * 	execbuf.rsvd1 = ctx_id;
2275 * 	execbuf.flags = 0; // Submits to index 0 which is the virtual engine
2276 * 	gem_execbuf(drm_fd, &execbuf);
2277 */
2278
2279/*
2280 * i915_context_engines_load_balance:
2281 *
2282 * Enable load balancing across this set of engines.
2283 *
2284 * Into the I915_EXEC_DEFAULT slot [0], a virtual engine is created that when
2285 * used will proxy the execbuffer request onto one of the set of engines
2286 * in such a way as to distribute the load evenly across the set.
2287 *
2288 * The set of engines must be compatible (e.g. the same HW class) as they
2289 * will share the same logical GPU context and ring.
2290 *
2291 * To intermix rendering with the virtual engine and direct rendering onto
2292 * the backing engines (bypassing the load balancing proxy), the context must
2293 * be defined to use a single timeline for all engines.
2294 */
2295struct i915_context_engines_load_balance {
2296	struct i915_user_extension base;
2297
2298	__u16 engine_index;
2299	__u16 num_siblings;
2300	__u32 flags; /* all undefined flags must be zero */
2301
2302	__u64 mbz64; /* reserved for future use; must be zero */
2303
2304	struct i915_engine_class_instance engines[];
2305} __attribute__((packed));
2306
2307#define I915_DEFINE_CONTEXT_ENGINES_LOAD_BALANCE(name__, N__) struct { \
2308	struct i915_user_extension base; \
2309	__u16 engine_index; \
2310	__u16 num_siblings; \
2311	__u32 flags; \
2312	__u64 mbz64; \
2313	struct i915_engine_class_instance engines[N__]; \
2314} __attribute__((packed)) name__
2315
2316/*
2317 * i915_context_engines_bond:
2318 *
2319 * Constructed bonded pairs for execution within a virtual engine.
2320 *
2321 * All engines are equal, but some are more equal than others. Given
2322 * the distribution of resources in the HW, it may be preferable to run
2323 * a request on a given subset of engines in parallel to a request on a
2324 * specific engine. We enable this selection of engines within a virtual
2325 * engine by specifying bonding pairs, for any given master engine we will
2326 * only execute on one of the corresponding siblings within the virtual engine.
2327 *
2328 * To execute a request in parallel on the master engine and a sibling requires
2329 * coordination with a I915_EXEC_FENCE_SUBMIT.
2330 */
2331struct i915_context_engines_bond {
2332	struct i915_user_extension base;
2333
2334	struct i915_engine_class_instance master;
2335
2336	__u16 virtual_index; /* index of virtual engine in ctx->engines[] */
2337	__u16 num_bonds;
2338
2339	__u64 flags; /* all undefined flags must be zero */
2340	__u64 mbz64[4]; /* reserved for future use; must be zero */
2341
2342	struct i915_engine_class_instance engines[];
2343} __attribute__((packed));
2344
2345#define I915_DEFINE_CONTEXT_ENGINES_BOND(name__, N__) struct { \
2346	struct i915_user_extension base; \
2347	struct i915_engine_class_instance master; \
2348	__u16 virtual_index; \
2349	__u16 num_bonds; \
2350	__u64 flags; \
2351	__u64 mbz64[4]; \
2352	struct i915_engine_class_instance engines[N__]; \
2353} __attribute__((packed)) name__
2354
2355/**
2356 * struct i915_context_engines_parallel_submit - Configure engine for
2357 * parallel submission.
2358 *
2359 * Setup a slot in the context engine map to allow multiple BBs to be submitted
2360 * in a single execbuf IOCTL. Those BBs will then be scheduled to run on the GPU
2361 * in parallel. Multiple hardware contexts are created internally in the i915 to
2362 * run these BBs. Once a slot is configured for N BBs only N BBs can be
2363 * submitted in each execbuf IOCTL and this is implicit behavior e.g. The user
2364 * doesn't tell the execbuf IOCTL there are N BBs, the execbuf IOCTL knows how
2365 * many BBs there are based on the slot's configuration. The N BBs are the last
2366 * N buffer objects or first N if I915_EXEC_BATCH_FIRST is set.
2367 *
2368 * The default placement behavior is to create implicit bonds between each
2369 * context if each context maps to more than 1 physical engine (e.g. context is
2370 * a virtual engine). Also we only allow contexts of same engine class and these
2371 * contexts must be in logically contiguous order. Examples of the placement
2372 * behavior are described below. Lastly, the default is to not allow BBs to be
2373 * preempted mid-batch. Rather insert coordinated preemption points on all
2374 * hardware contexts between each set of BBs. Flags could be added in the future
2375 * to change both of these default behaviors.
2376 *
2377 * Returns -EINVAL if hardware context placement configuration is invalid or if
2378 * the placement configuration isn't supported on the platform / submission
2379 * interface.
2380 * Returns -ENODEV if extension isn't supported on the platform / submission
2381 * interface.
2382 *
2383 * .. code-block:: none
2384 *
2385 *	Examples syntax:
2386 *	CS[X] = generic engine of same class, logical instance X
2387 *	INVALID = I915_ENGINE_CLASS_INVALID, I915_ENGINE_CLASS_INVALID_NONE
2388 *
2389 *	Example 1 pseudo code:
2390 *	set_engines(INVALID)
2391 *	set_parallel(engine_index=0, width=2, num_siblings=1,
2392 *		     engines=CS[0],CS[1])
2393 *
2394 *	Results in the following valid placement:
2395 *	CS[0], CS[1]
2396 *
2397 *	Example 2 pseudo code:
2398 *	set_engines(INVALID)
2399 *	set_parallel(engine_index=0, width=2, num_siblings=2,
2400 *		     engines=CS[0],CS[2],CS[1],CS[3])
2401 *
2402 *	Results in the following valid placements:
2403 *	CS[0], CS[1]
2404 *	CS[2], CS[3]
2405 *
2406 *	This can be thought of as two virtual engines, each containing two
2407 *	engines thereby making a 2D array. However, there are bonds tying the
2408 *	entries together and placing restrictions on how they can be scheduled.
2409 *	Specifically, the scheduler can choose only vertical columns from the 2D
2410 *	array. That is, CS[0] is bonded to CS[1] and CS[2] to CS[3]. So if the
2411 *	scheduler wants to submit to CS[0], it must also choose CS[1] and vice
2412 *	versa. Same for CS[2] requires also using CS[3].
2413 *	VE[0] = CS[0], CS[2]
2414 *	VE[1] = CS[1], CS[3]
2415 *
2416 *	Example 3 pseudo code:
2417 *	set_engines(INVALID)
2418 *	set_parallel(engine_index=0, width=2, num_siblings=2,
2419 *		     engines=CS[0],CS[1],CS[1],CS[3])
2420 *
2421 *	Results in the following valid and invalid placements:
2422 *	CS[0], CS[1]
2423 *	CS[1], CS[3] - Not logically contiguous, return -EINVAL
2424 */
2425struct i915_context_engines_parallel_submit {
2426	/**
2427	 * @base: base user extension.
2428	 */
2429	struct i915_user_extension base;
2430
2431	/**
2432	 * @engine_index: slot for parallel engine
2433	 */
2434	__u16 engine_index;
2435
2436	/**
2437	 * @width: number of contexts per parallel engine or in other words the
2438	 * number of batches in each submission
2439	 */
2440	__u16 width;
2441
2442	/**
2443	 * @num_siblings: number of siblings per context or in other words the
2444	 * number of possible placements for each submission
2445	 */
2446	__u16 num_siblings;
2447
2448	/**
2449	 * @mbz16: reserved for future use; must be zero
2450	 */
2451	__u16 mbz16;
2452
2453	/**
2454	 * @flags: all undefined flags must be zero, currently not defined flags
2455	 */
2456	__u64 flags;
2457
2458	/**
2459	 * @mbz64: reserved for future use; must be zero
2460	 */
2461	__u64 mbz64[3];
2462
2463	/**
2464	 * @engines: 2-d array of engine instances to configure parallel engine
2465	 *
2466	 * length = width (i) * num_siblings (j)
2467	 * index = j + i * num_siblings
2468	 */
2469	struct i915_engine_class_instance engines[];
2470
2471} __packed;
2472
2473#define I915_DEFINE_CONTEXT_ENGINES_PARALLEL_SUBMIT(name__, N__) struct { \
2474	struct i915_user_extension base; \
2475	__u16 engine_index; \
2476	__u16 width; \
2477	__u16 num_siblings; \
2478	__u16 mbz16; \
2479	__u64 flags; \
2480	__u64 mbz64[3]; \
2481	struct i915_engine_class_instance engines[N__]; \
2482} __attribute__((packed)) name__
2483
2484/**
2485 * DOC: Context Engine Map uAPI
2486 *
2487 * Context engine map is a new way of addressing engines when submitting batch-
2488 * buffers, replacing the existing way of using identifiers like `I915_EXEC_BLT`
2489 * inside the flags field of `struct drm_i915_gem_execbuffer2`.
2490 *
2491 * To use it created GEM contexts need to be configured with a list of engines
2492 * the user is intending to submit to. This is accomplished using the
2493 * `I915_CONTEXT_PARAM_ENGINES` parameter and `struct
2494 * i915_context_param_engines`.
2495 *
2496 * For such contexts the `I915_EXEC_RING_MASK` field becomes an index into the
2497 * configured map.
2498 *
2499 * Example of creating such context and submitting against it:
2500 *
2501 * .. code-block:: C
2502 *
2503 * 	I915_DEFINE_CONTEXT_PARAM_ENGINES(engines, 2) = {
2504 * 		.engines = { { I915_ENGINE_CLASS_RENDER, 0 },
2505 * 			     { I915_ENGINE_CLASS_COPY, 0 } }
2506 * 	};
2507 * 	struct drm_i915_gem_context_create_ext_setparam p_engines = {
2508 * 		.base = {
2509 * 			.name = I915_CONTEXT_CREATE_EXT_SETPARAM,
2510 * 		},
2511 * 		.param = {
2512 * 			.param = I915_CONTEXT_PARAM_ENGINES,
2513 * 			.value = to_user_pointer(&engines),
2514 * 			.size = sizeof(engines),
2515 * 		},
2516 * 	};
2517 * 	struct drm_i915_gem_context_create_ext create = {
2518 * 		.flags = I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS,
2519 * 		.extensions = to_user_pointer(&p_engines);
2520 * 	};
2521 *
2522 * 	ctx_id = gem_context_create_ext(drm_fd, &create);
2523 *
2524 * 	// We have now created a GEM context with two engines in the map:
2525 * 	// Index 0 points to rcs0 while index 1 points to bcs0. Other engines
2526 * 	// will not be accessible from this context.
2527 *
2528 * 	...
2529 * 	execbuf.rsvd1 = ctx_id;
2530 * 	execbuf.flags = 0; // Submits to index 0, which is rcs0 for this context
2531 * 	gem_execbuf(drm_fd, &execbuf);
2532 *
2533 * 	...
2534 * 	execbuf.rsvd1 = ctx_id;
2535 * 	execbuf.flags = 1; // Submits to index 0, which is bcs0 for this context
2536 * 	gem_execbuf(drm_fd, &execbuf);
2537 */
2538
2539struct i915_context_param_engines {
2540	__u64 extensions; /* linked chain of extension blocks, 0 terminates */
2541#define I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE 0 /* see i915_context_engines_load_balance */
2542#define I915_CONTEXT_ENGINES_EXT_BOND 1 /* see i915_context_engines_bond */
2543#define I915_CONTEXT_ENGINES_EXT_PARALLEL_SUBMIT 2 /* see i915_context_engines_parallel_submit */
2544	struct i915_engine_class_instance engines[];
2545} __attribute__((packed));
2546
2547#define I915_DEFINE_CONTEXT_PARAM_ENGINES(name__, N__) struct { \
2548	__u64 extensions; \
2549	struct i915_engine_class_instance engines[N__]; \
2550} __attribute__((packed)) name__
2551
2552/**
2553 * struct drm_i915_gem_context_create_ext_setparam - Context parameter
2554 * to set or query during context creation.
2555 */
2556struct drm_i915_gem_context_create_ext_setparam {
2557	/** @base: Extension link. See struct i915_user_extension. */
2558	struct i915_user_extension base;
2559
2560	/**
2561	 * @param: Context parameter to set or query.
2562	 * See struct drm_i915_gem_context_param.
2563	 */
2564	struct drm_i915_gem_context_param param;
2565};
2566
2567struct drm_i915_gem_context_destroy {
2568	__u32 ctx_id;
2569	__u32 pad;
2570};
2571
2572/**
2573 * struct drm_i915_gem_vm_control - Structure to create or destroy VM.
2574 *
2575 * DRM_I915_GEM_VM_CREATE -
2576 *
2577 * Create a new virtual memory address space (ppGTT) for use within a context
2578 * on the same file. Extensions can be provided to configure exactly how the
2579 * address space is setup upon creation.
2580 *
2581 * The id of new VM (bound to the fd) for use with I915_CONTEXT_PARAM_VM is
2582 * returned in the outparam @id.
2583 *
2584 * An extension chain maybe provided, starting with @extensions, and terminated
2585 * by the @next_extension being 0. Currently, no extensions are defined.
2586 *
2587 * DRM_I915_GEM_VM_DESTROY -
2588 *
2589 * Destroys a previously created VM id, specified in @vm_id.
2590 *
2591 * No extensions or flags are allowed currently, and so must be zero.
2592 */
2593struct drm_i915_gem_vm_control {
2594	/** @extensions: Zero-terminated chain of extensions. */
2595	__u64 extensions;
2596
2597	/** @flags: reserved for future usage, currently MBZ */
2598	__u32 flags;
2599
2600	/** @vm_id: Id of the VM created or to be destroyed */
2601	__u32 vm_id;
2602};
2603
2604struct drm_i915_reg_read {
2605	/*
2606	 * Register offset.
2607	 * For 64bit wide registers where the upper 32bits don't immediately
2608	 * follow the lower 32bits, the offset of the lower 32bits must
2609	 * be specified
2610	 */
2611	__u64 offset;
2612#define I915_REG_READ_8B_WA (1ul << 0)
2613
2614	__u64 val; /* Return value */
2615};
2616
2617/* Known registers:
2618 *
2619 * Render engine timestamp - 0x2358 + 64bit - gen7+
2620 * - Note this register returns an invalid value if using the default
2621 *   single instruction 8byte read, in order to workaround that pass
2622 *   flag I915_REG_READ_8B_WA in offset field.
2623 *
2624 */
2625
2626struct drm_i915_reset_stats {
2627	__u32 ctx_id;
2628	__u32 flags;
2629
2630	/* All resets since boot/module reload, for all contexts */
2631	__u32 reset_count;
2632
2633	/* Number of batches lost when active in GPU, for this context */
2634	__u32 batch_active;
2635
2636	/* Number of batches lost pending for execution, for this context */
2637	__u32 batch_pending;
2638
2639	__u32 pad;
2640};
2641
2642/**
2643 * struct drm_i915_gem_userptr - Create GEM object from user allocated memory.
2644 *
2645 * Userptr objects have several restrictions on what ioctls can be used with the
2646 * object handle.
2647 */
2648struct drm_i915_gem_userptr {
2649	/**
2650	 * @user_ptr: The pointer to the allocated memory.
2651	 *
2652	 * Needs to be aligned to PAGE_SIZE.
2653	 */
2654	__u64 user_ptr;
2655
2656	/**
2657	 * @user_size:
2658	 *
2659	 * The size in bytes for the allocated memory. This will also become the
2660	 * object size.
2661	 *
2662	 * Needs to be aligned to PAGE_SIZE, and should be at least PAGE_SIZE,
2663	 * or larger.
2664	 */
2665	__u64 user_size;
2666
2667	/**
2668	 * @flags:
2669	 *
2670	 * Supported flags:
2671	 *
2672	 * I915_USERPTR_READ_ONLY:
2673	 *
2674	 * Mark the object as readonly, this also means GPU access can only be
2675	 * readonly. This is only supported on HW which supports readonly access
2676	 * through the GTT. If the HW can't support readonly access, an error is
2677	 * returned.
2678	 *
2679	 * I915_USERPTR_PROBE:
2680	 *
2681	 * Probe the provided @user_ptr range and validate that the @user_ptr is
2682	 * indeed pointing to normal memory and that the range is also valid.
2683	 * For example if some garbage address is given to the kernel, then this
2684	 * should complain.
2685	 *
2686	 * Returns -EFAULT if the probe failed.
2687	 *
2688	 * Note that this doesn't populate the backing pages, and also doesn't
2689	 * guarantee that the object will remain valid when the object is
2690	 * eventually used.
2691	 *
2692	 * The kernel supports this feature if I915_PARAM_HAS_USERPTR_PROBE
2693	 * returns a non-zero value.
2694	 *
2695	 * I915_USERPTR_UNSYNCHRONIZED:
2696	 *
2697	 * NOT USED. Setting this flag will result in an error.
2698	 */
2699	__u32 flags;
2700#define I915_USERPTR_READ_ONLY 0x1
2701#define I915_USERPTR_PROBE 0x2
2702#define I915_USERPTR_UNSYNCHRONIZED 0x80000000
2703	/**
2704	 * @handle: Returned handle for the object.
2705	 *
2706	 * Object handles are nonzero.
2707	 */
2708	__u32 handle;
2709};
2710
2711enum drm_i915_oa_format {
2712	I915_OA_FORMAT_A13 = 1,	    /* HSW only */
2713	I915_OA_FORMAT_A29,	    /* HSW only */
2714	I915_OA_FORMAT_A13_B8_C8,   /* HSW only */
2715	I915_OA_FORMAT_B4_C8,	    /* HSW only */
2716	I915_OA_FORMAT_A45_B8_C8,   /* HSW only */
2717	I915_OA_FORMAT_B4_C8_A16,   /* HSW only */
2718	I915_OA_FORMAT_C4_B8,	    /* HSW+ */
2719
2720	/* Gen8+ */
2721	I915_OA_FORMAT_A12,
2722	I915_OA_FORMAT_A12_B8_C8,
2723	I915_OA_FORMAT_A32u40_A4u32_B8_C8,
2724
2725	/* DG2 */
2726	I915_OAR_FORMAT_A32u40_A4u32_B8_C8,
2727	I915_OA_FORMAT_A24u40_A14u32_B8_C8,
2728
2729	/* MTL OAM */
2730	I915_OAM_FORMAT_MPEC8u64_B8_C8,
2731	I915_OAM_FORMAT_MPEC8u32_B8_C8,
2732
2733	I915_OA_FORMAT_MAX	    /* non-ABI */
2734};
2735
2736enum drm_i915_perf_property_id {
2737	/**
2738	 * Open the stream for a specific context handle (as used with
2739	 * execbuffer2). A stream opened for a specific context this way
2740	 * won't typically require root privileges.
2741	 *
2742	 * This property is available in perf revision 1.
2743	 */
2744	DRM_I915_PERF_PROP_CTX_HANDLE = 1,
2745
2746	/**
2747	 * A value of 1 requests the inclusion of raw OA unit reports as
2748	 * part of stream samples.
2749	 *
2750	 * This property is available in perf revision 1.
2751	 */
2752	DRM_I915_PERF_PROP_SAMPLE_OA,
2753
2754	/**
2755	 * The value specifies which set of OA unit metrics should be
2756	 * configured, defining the contents of any OA unit reports.
2757	 *
2758	 * This property is available in perf revision 1.
2759	 */
2760	DRM_I915_PERF_PROP_OA_METRICS_SET,
2761
2762	/**
2763	 * The value specifies the size and layout of OA unit reports.
2764	 *
2765	 * This property is available in perf revision 1.
2766	 */
2767	DRM_I915_PERF_PROP_OA_FORMAT,
2768
2769	/**
2770	 * Specifying this property implicitly requests periodic OA unit
2771	 * sampling and (at least on Haswell) the sampling frequency is derived
2772	 * from this exponent as follows:
2773	 *
2774	 *   80ns * 2^(period_exponent + 1)
2775	 *
2776	 * This property is available in perf revision 1.
2777	 */
2778	DRM_I915_PERF_PROP_OA_EXPONENT,
2779
2780	/**
2781	 * Specifying this property is only valid when specify a context to
2782	 * filter with DRM_I915_PERF_PROP_CTX_HANDLE. Specifying this property
2783	 * will hold preemption of the particular context we want to gather
2784	 * performance data about. The execbuf2 submissions must include a
2785	 * drm_i915_gem_execbuffer_ext_perf parameter for this to apply.
2786	 *
2787	 * This property is available in perf revision 3.
2788	 */
2789	DRM_I915_PERF_PROP_HOLD_PREEMPTION,
2790
2791	/**
2792	 * Specifying this pins all contexts to the specified SSEU power
2793	 * configuration for the duration of the recording.
2794	 *
2795	 * This parameter's value is a pointer to a struct
2796	 * drm_i915_gem_context_param_sseu.
2797	 *
2798	 * This property is available in perf revision 4.
2799	 */
2800	DRM_I915_PERF_PROP_GLOBAL_SSEU,
2801
2802	/**
2803	 * This optional parameter specifies the timer interval in nanoseconds
2804	 * at which the i915 driver will check the OA buffer for available data.
2805	 * Minimum allowed value is 100 microseconds. A default value is used by
2806	 * the driver if this parameter is not specified. Note that larger timer
2807	 * values will reduce cpu consumption during OA perf captures. However,
2808	 * excessively large values would potentially result in OA buffer
2809	 * overwrites as captures reach end of the OA buffer.
2810	 *
2811	 * This property is available in perf revision 5.
2812	 */
2813	DRM_I915_PERF_PROP_POLL_OA_PERIOD,
2814
2815	/**
2816	 * Multiple engines may be mapped to the same OA unit. The OA unit is
2817	 * identified by class:instance of any engine mapped to it.
2818	 *
2819	 * This parameter specifies the engine class and must be passed along
2820	 * with DRM_I915_PERF_PROP_OA_ENGINE_INSTANCE.
2821	 *
2822	 * This property is available in perf revision 6.
2823	 */
2824	DRM_I915_PERF_PROP_OA_ENGINE_CLASS,
2825
2826	/**
2827	 * This parameter specifies the engine instance and must be passed along
2828	 * with DRM_I915_PERF_PROP_OA_ENGINE_CLASS.
2829	 *
2830	 * This property is available in perf revision 6.
2831	 */
2832	DRM_I915_PERF_PROP_OA_ENGINE_INSTANCE,
2833
2834	DRM_I915_PERF_PROP_MAX /* non-ABI */
2835};
2836
2837struct drm_i915_perf_open_param {
2838	__u32 flags;
2839#define I915_PERF_FLAG_FD_CLOEXEC	(1<<0)
2840#define I915_PERF_FLAG_FD_NONBLOCK	(1<<1)
2841#define I915_PERF_FLAG_DISABLED		(1<<2)
2842
2843	/** The number of u64 (id, value) pairs */
2844	__u32 num_properties;
2845
2846	/**
2847	 * Pointer to array of u64 (id, value) pairs configuring the stream
2848	 * to open.
2849	 */
2850	__u64 properties_ptr;
2851};
2852
2853/*
2854 * Enable data capture for a stream that was either opened in a disabled state
2855 * via I915_PERF_FLAG_DISABLED or was later disabled via
2856 * I915_PERF_IOCTL_DISABLE.
2857 *
2858 * It is intended to be cheaper to disable and enable a stream than it may be
2859 * to close and re-open a stream with the same configuration.
2860 *
2861 * It's undefined whether any pending data for the stream will be lost.
2862 *
2863 * This ioctl is available in perf revision 1.
2864 */
2865#define I915_PERF_IOCTL_ENABLE	_IO('i', 0x0)
2866
2867/*
2868 * Disable data capture for a stream.
2869 *
2870 * It is an error to try and read a stream that is disabled.
2871 *
2872 * This ioctl is available in perf revision 1.
2873 */
2874#define I915_PERF_IOCTL_DISABLE	_IO('i', 0x1)
2875
2876/*
2877 * Change metrics_set captured by a stream.
2878 *
2879 * If the stream is bound to a specific context, the configuration change
2880 * will performed inline with that context such that it takes effect before
2881 * the next execbuf submission.
2882 *
2883 * Returns the previously bound metrics set id, or a negative error code.
2884 *
2885 * This ioctl is available in perf revision 2.
2886 */
2887#define I915_PERF_IOCTL_CONFIG	_IO('i', 0x2)
2888
2889/*
2890 * Common to all i915 perf records
2891 */
2892struct drm_i915_perf_record_header {
2893	__u32 type;
2894	__u16 pad;
2895	__u16 size;
2896};
2897
2898enum drm_i915_perf_record_type {
2899
2900	/**
2901	 * Samples are the work horse record type whose contents are extensible
2902	 * and defined when opening an i915 perf stream based on the given
2903	 * properties.
2904	 *
2905	 * Boolean properties following the naming convention
2906	 * DRM_I915_PERF_SAMPLE_xyz_PROP request the inclusion of 'xyz' data in
2907	 * every sample.
2908	 *
2909	 * The order of these sample properties given by userspace has no
2910	 * affect on the ordering of data within a sample. The order is
2911	 * documented here.
2912	 *
2913	 * struct {
2914	 *     struct drm_i915_perf_record_header header;
2915	 *
2916	 *     { u32 oa_report[]; } && DRM_I915_PERF_PROP_SAMPLE_OA
2917	 * };
2918	 */
2919	DRM_I915_PERF_RECORD_SAMPLE = 1,
2920
2921	/*
2922	 * Indicates that one or more OA reports were not written by the
2923	 * hardware. This can happen for example if an MI_REPORT_PERF_COUNT
2924	 * command collides with periodic sampling - which would be more likely
2925	 * at higher sampling frequencies.
2926	 */
2927	DRM_I915_PERF_RECORD_OA_REPORT_LOST = 2,
2928
2929	/**
2930	 * An error occurred that resulted in all pending OA reports being lost.
2931	 */
2932	DRM_I915_PERF_RECORD_OA_BUFFER_LOST = 3,
2933
2934	DRM_I915_PERF_RECORD_MAX /* non-ABI */
2935};
2936
2937/**
2938 * struct drm_i915_perf_oa_config
2939 *
2940 * Structure to upload perf dynamic configuration into the kernel.
2941 */
2942struct drm_i915_perf_oa_config {
2943	/**
2944	 * @uuid:
2945	 *
2946	 * String formatted like "%\08x-%\04x-%\04x-%\04x-%\012x"
2947	 */
2948	char uuid[36];
2949
2950	/**
2951	 * @n_mux_regs:
2952	 *
2953	 * Number of mux regs in &mux_regs_ptr.
2954	 */
2955	__u32 n_mux_regs;
2956
2957	/**
2958	 * @n_boolean_regs:
2959	 *
2960	 * Number of boolean regs in &boolean_regs_ptr.
2961	 */
2962	__u32 n_boolean_regs;
2963
2964	/**
2965	 * @n_flex_regs:
2966	 *
2967	 * Number of flex regs in &flex_regs_ptr.
2968	 */
2969	__u32 n_flex_regs;
2970
2971	/**
2972	 * @mux_regs_ptr:
2973	 *
2974	 * Pointer to tuples of u32 values (register address, value) for mux
2975	 * registers.  Expected length of buffer is (2 * sizeof(u32) *
2976	 * &n_mux_regs).
2977	 */
2978	__u64 mux_regs_ptr;
2979
2980	/**
2981	 * @boolean_regs_ptr:
2982	 *
2983	 * Pointer to tuples of u32 values (register address, value) for mux
2984	 * registers.  Expected length of buffer is (2 * sizeof(u32) *
2985	 * &n_boolean_regs).
2986	 */
2987	__u64 boolean_regs_ptr;
2988
2989	/**
2990	 * @flex_regs_ptr:
2991	 *
2992	 * Pointer to tuples of u32 values (register address, value) for mux
2993	 * registers.  Expected length of buffer is (2 * sizeof(u32) *
2994	 * &n_flex_regs).
2995	 */
2996	__u64 flex_regs_ptr;
2997};
2998
2999/**
3000 * struct drm_i915_query_item - An individual query for the kernel to process.
3001 *
3002 * The behaviour is determined by the @query_id. Note that exactly what
3003 * @data_ptr is also depends on the specific @query_id.
3004 */
3005struct drm_i915_query_item {
3006	/**
3007	 * @query_id:
3008	 *
3009	 * The id for this query.  Currently accepted query IDs are:
3010	 *  - %DRM_I915_QUERY_TOPOLOGY_INFO (see struct drm_i915_query_topology_info)
3011	 *  - %DRM_I915_QUERY_ENGINE_INFO (see struct drm_i915_engine_info)
3012	 *  - %DRM_I915_QUERY_PERF_CONFIG (see struct drm_i915_query_perf_config)
3013	 *  - %DRM_I915_QUERY_MEMORY_REGIONS (see struct drm_i915_query_memory_regions)
3014	 *  - %DRM_I915_QUERY_HWCONFIG_BLOB (see `GuC HWCONFIG blob uAPI`)
3015	 *  - %DRM_I915_QUERY_GEOMETRY_SUBSLICES (see struct drm_i915_query_topology_info)
3016	 *  - %DRM_I915_QUERY_GUC_SUBMISSION_VERSION (see struct drm_i915_query_guc_submission_version)
3017	 */
3018	__u64 query_id;
3019#define DRM_I915_QUERY_TOPOLOGY_INFO		1
3020#define DRM_I915_QUERY_ENGINE_INFO		2
3021#define DRM_I915_QUERY_PERF_CONFIG		3
3022#define DRM_I915_QUERY_MEMORY_REGIONS		4
3023#define DRM_I915_QUERY_HWCONFIG_BLOB		5
3024#define DRM_I915_QUERY_GEOMETRY_SUBSLICES	6
3025#define DRM_I915_QUERY_GUC_SUBMISSION_VERSION	7
3026/* Must be kept compact -- no holes and well documented */
3027
3028	/**
3029	 * @length:
3030	 *
3031	 * When set to zero by userspace, this is filled with the size of the
3032	 * data to be written at the @data_ptr pointer. The kernel sets this
3033	 * value to a negative value to signal an error on a particular query
3034	 * item.
3035	 */
3036	__s32 length;
3037
3038	/**
3039	 * @flags:
3040	 *
3041	 * When &query_id == %DRM_I915_QUERY_TOPOLOGY_INFO, must be 0.
3042	 *
3043	 * When &query_id == %DRM_I915_QUERY_PERF_CONFIG, must be one of the
3044	 * following:
3045	 *
3046	 *	- %DRM_I915_QUERY_PERF_CONFIG_LIST
3047	 *      - %DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID
3048	 *      - %DRM_I915_QUERY_PERF_CONFIG_FOR_UUID
3049	 *
3050	 * When &query_id == %DRM_I915_QUERY_GEOMETRY_SUBSLICES must contain
3051	 * a struct i915_engine_class_instance that references a render engine.
3052	 */
3053	__u32 flags;
3054#define DRM_I915_QUERY_PERF_CONFIG_LIST          1
3055#define DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID 2
3056#define DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_ID   3
3057
3058	/**
3059	 * @data_ptr:
3060	 *
3061	 * Data will be written at the location pointed by @data_ptr when the
3062	 * value of @length matches the length of the data to be written by the
3063	 * kernel.
3064	 */
3065	__u64 data_ptr;
3066};
3067
3068/**
3069 * struct drm_i915_query - Supply an array of struct drm_i915_query_item for the
3070 * kernel to fill out.
3071 *
3072 * Note that this is generally a two step process for each struct
3073 * drm_i915_query_item in the array:
3074 *
3075 * 1. Call the DRM_IOCTL_I915_QUERY, giving it our array of struct
3076 *    drm_i915_query_item, with &drm_i915_query_item.length set to zero. The
3077 *    kernel will then fill in the size, in bytes, which tells userspace how
3078 *    memory it needs to allocate for the blob(say for an array of properties).
3079 *
3080 * 2. Next we call DRM_IOCTL_I915_QUERY again, this time with the
3081 *    &drm_i915_query_item.data_ptr equal to our newly allocated blob. Note that
3082 *    the &drm_i915_query_item.length should still be the same as what the
3083 *    kernel previously set. At this point the kernel can fill in the blob.
3084 *
3085 * Note that for some query items it can make sense for userspace to just pass
3086 * in a buffer/blob equal to or larger than the required size. In this case only
3087 * a single ioctl call is needed. For some smaller query items this can work
3088 * quite well.
3089 *
3090 */
3091struct drm_i915_query {
3092	/** @num_items: The number of elements in the @items_ptr array */
3093	__u32 num_items;
3094
3095	/**
3096	 * @flags: Unused for now. Must be cleared to zero.
3097	 */
3098	__u32 flags;
3099
3100	/**
3101	 * @items_ptr:
3102	 *
3103	 * Pointer to an array of struct drm_i915_query_item. The number of
3104	 * array elements is @num_items.
3105	 */
3106	__u64 items_ptr;
3107};
3108
3109/**
3110 * struct drm_i915_query_topology_info
3111 *
3112 * Describes slice/subslice/EU information queried by
3113 * %DRM_I915_QUERY_TOPOLOGY_INFO
3114 */
3115struct drm_i915_query_topology_info {
3116	/**
3117	 * @flags:
3118	 *
3119	 * Unused for now. Must be cleared to zero.
3120	 */
3121	__u16 flags;
3122
3123	/**
3124	 * @max_slices:
3125	 *
3126	 * The number of bits used to express the slice mask.
3127	 */
3128	__u16 max_slices;
3129
3130	/**
3131	 * @max_subslices:
3132	 *
3133	 * The number of bits used to express the subslice mask.
3134	 */
3135	__u16 max_subslices;
3136
3137	/**
3138	 * @max_eus_per_subslice:
3139	 *
3140	 * The number of bits in the EU mask that correspond to a single
3141	 * subslice's EUs.
3142	 */
3143	__u16 max_eus_per_subslice;
3144
3145	/**
3146	 * @subslice_offset:
3147	 *
3148	 * Offset in data[] at which the subslice masks are stored.
3149	 */
3150	__u16 subslice_offset;
3151
3152	/**
3153	 * @subslice_stride:
3154	 *
3155	 * Stride at which each of the subslice masks for each slice are
3156	 * stored.
3157	 */
3158	__u16 subslice_stride;
3159
3160	/**
3161	 * @eu_offset:
3162	 *
3163	 * Offset in data[] at which the EU masks are stored.
3164	 */
3165	__u16 eu_offset;
3166
3167	/**
3168	 * @eu_stride:
3169	 *
3170	 * Stride at which each of the EU masks for each subslice are stored.
3171	 */
3172	__u16 eu_stride;
3173
3174	/**
3175	 * @data:
3176	 *
3177	 * Contains 3 pieces of information :
3178	 *
3179	 * - The slice mask with one bit per slice telling whether a slice is
3180	 *   available. The availability of slice X can be queried with the
3181	 *   following formula :
3182	 *
3183	 *   .. code:: c
3184	 *
3185	 *      (data[X / 8] >> (X % 8)) & 1
3186	 *
3187	 *   Starting with Xe_HP platforms, Intel hardware no longer has
3188	 *   traditional slices so i915 will always report a single slice
3189	 *   (hardcoded slicemask = 0x1) which contains all of the platform's
3190	 *   subslices.  I.e., the mask here does not reflect any of the newer
3191	 *   hardware concepts such as "gslices" or "cslices" since userspace
3192	 *   is capable of inferring those from the subslice mask.
3193	 *
3194	 * - The subslice mask for each slice with one bit per subslice telling
3195	 *   whether a subslice is available.  Starting with Gen12 we use the
3196	 *   term "subslice" to refer to what the hardware documentation
3197	 *   describes as a "dual-subslices."  The availability of subslice Y
3198	 *   in slice X can be queried with the following formula :
3199	 *
3200	 *   .. code:: c
3201	 *
3202	 *      (data[subslice_offset + X * subslice_stride + Y / 8] >> (Y % 8)) & 1
3203	 *
3204	 * - The EU mask for each subslice in each slice, with one bit per EU
3205	 *   telling whether an EU is available. The availability of EU Z in
3206	 *   subslice Y in slice X can be queried with the following formula :
3207	 *
3208	 *   .. code:: c
3209	 *
3210	 *      (data[eu_offset +
3211	 *            (X * max_subslices + Y) * eu_stride +
3212	 *            Z / 8
3213	 *       ] >> (Z % 8)) & 1
3214	 */
3215	__u8 data[];
3216};
3217
3218/**
3219 * DOC: Engine Discovery uAPI
3220 *
3221 * Engine discovery uAPI is a way of enumerating physical engines present in a
3222 * GPU associated with an open i915 DRM file descriptor. This supersedes the old
3223 * way of using `DRM_IOCTL_I915_GETPARAM` and engine identifiers like
3224 * `I915_PARAM_HAS_BLT`.
3225 *
3226 * The need for this interface came starting with Icelake and newer GPUs, which
3227 * started to establish a pattern of having multiple engines of a same class,
3228 * where not all instances were always completely functionally equivalent.
3229 *
3230 * Entry point for this uapi is `DRM_IOCTL_I915_QUERY` with the
3231 * `DRM_I915_QUERY_ENGINE_INFO` as the queried item id.
3232 *
3233 * Example for getting the list of engines:
3234 *
3235 * .. code-block:: C
3236 *
3237 * 	struct drm_i915_query_engine_info *info;
3238 * 	struct drm_i915_query_item item = {
3239 * 		.query_id = DRM_I915_QUERY_ENGINE_INFO;
3240 * 	};
3241 * 	struct drm_i915_query query = {
3242 * 		.num_items = 1,
3243 * 		.items_ptr = (uintptr_t)&item,
3244 * 	};
3245 * 	int err, i;
3246 *
3247 * 	// First query the size of the blob we need, this needs to be large
3248 * 	// enough to hold our array of engines. The kernel will fill out the
3249 * 	// item.length for us, which is the number of bytes we need.
3250 * 	//
3251 *	// Alternatively a large buffer can be allocated straightaway enabling
3252 * 	// querying in one pass, in which case item.length should contain the
3253 * 	// length of the provided buffer.
3254 * 	err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query);
3255 * 	if (err) ...
3256 *
3257 * 	info = calloc(1, item.length);
3258 * 	// Now that we allocated the required number of bytes, we call the ioctl
3259 * 	// again, this time with the data_ptr pointing to our newly allocated
3260 * 	// blob, which the kernel can then populate with info on all engines.
3261 *	item.data_ptr = (uintptr_t)&info;
3262 *
3263 * 	err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query);
3264 * 	if (err) ...
3265 *
3266 * 	// We can now access each engine in the array
3267 * 	for (i = 0; i < info->num_engines; i++) {
3268 * 		struct drm_i915_engine_info einfo = info->engines[i];
3269 * 		u16 class = einfo.engine.class;
3270 * 		u16 instance = einfo.engine.instance;
3271 * 		....
3272 * 	}
3273 *
3274 * 	free(info);
3275 *
3276 * Each of the enumerated engines, apart from being defined by its class and
3277 * instance (see `struct i915_engine_class_instance`), also can have flags and
3278 * capabilities defined as documented in i915_drm.h.
3279 *
3280 * For instance video engines which support HEVC encoding will have the
3281 * `I915_VIDEO_CLASS_CAPABILITY_HEVC` capability bit set.
3282 *
3283 * Engine discovery only fully comes to its own when combined with the new way
3284 * of addressing engines when submitting batch buffers using contexts with
3285 * engine maps configured.
3286 */
3287
3288/**
3289 * struct drm_i915_engine_info
3290 *
3291 * Describes one engine and its capabilities as known to the driver.
3292 */
3293struct drm_i915_engine_info {
3294	/** @engine: Engine class and instance. */
3295	struct i915_engine_class_instance engine;
3296
3297	/** @rsvd0: Reserved field. */
3298	__u32 rsvd0;
3299
3300	/** @flags: Engine flags. */
3301	__u64 flags;
3302#define I915_ENGINE_INFO_HAS_LOGICAL_INSTANCE		(1 << 0)
3303
3304	/** @capabilities: Capabilities of this engine. */
3305	__u64 capabilities;
3306#define I915_VIDEO_CLASS_CAPABILITY_HEVC		(1 << 0)
3307#define I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC	(1 << 1)
3308
3309	/** @logical_instance: Logical instance of engine */
3310	__u16 logical_instance;
3311
3312	/** @rsvd1: Reserved fields. */
3313	__u16 rsvd1[3];
3314	/** @rsvd2: Reserved fields. */
3315	__u64 rsvd2[3];
3316};
3317
3318/**
3319 * struct drm_i915_query_engine_info
3320 *
3321 * Engine info query enumerates all engines known to the driver by filling in
3322 * an array of struct drm_i915_engine_info structures.
3323 */
3324struct drm_i915_query_engine_info {
3325	/** @num_engines: Number of struct drm_i915_engine_info structs following. */
3326	__u32 num_engines;
3327
3328	/** @rsvd: MBZ */
3329	__u32 rsvd[3];
3330
3331	/** @engines: Marker for drm_i915_engine_info structures. */
3332	struct drm_i915_engine_info engines[];
3333};
3334
3335/**
3336 * struct drm_i915_query_perf_config
3337 *
3338 * Data written by the kernel with query %DRM_I915_QUERY_PERF_CONFIG and
3339 * %DRM_I915_QUERY_GEOMETRY_SUBSLICES.
3340 */
3341struct drm_i915_query_perf_config {
3342	union {
3343		/**
3344		 * @n_configs:
3345		 *
3346		 * When &drm_i915_query_item.flags ==
3347		 * %DRM_I915_QUERY_PERF_CONFIG_LIST, i915 sets this fields to
3348		 * the number of configurations available.
3349		 */
3350		__u64 n_configs;
3351
3352		/**
3353		 * @config:
3354		 *
3355		 * When &drm_i915_query_item.flags ==
3356		 * %DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_ID, i915 will use the
3357		 * value in this field as configuration identifier to decide
3358		 * what data to write into config_ptr.
3359		 */
3360		__u64 config;
3361
3362		/**
3363		 * @uuid:
3364		 *
3365		 * When &drm_i915_query_item.flags ==
3366		 * %DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID, i915 will use the
3367		 * value in this field as configuration identifier to decide
3368		 * what data to write into config_ptr.
3369		 *
3370		 * String formatted like "%08x-%04x-%04x-%04x-%012x"
3371		 */
3372		char uuid[36];
3373	};
3374
3375	/**
3376	 * @flags:
3377	 *
3378	 * Unused for now. Must be cleared to zero.
3379	 */
3380	__u32 flags;
3381
3382	/**
3383	 * @data:
3384	 *
3385	 * When &drm_i915_query_item.flags == %DRM_I915_QUERY_PERF_CONFIG_LIST,
3386	 * i915 will write an array of __u64 of configuration identifiers.
3387	 *
3388	 * When &drm_i915_query_item.flags == %DRM_I915_QUERY_PERF_CONFIG_DATA,
3389	 * i915 will write a struct drm_i915_perf_oa_config. If the following
3390	 * fields of struct drm_i915_perf_oa_config are not set to 0, i915 will
3391	 * write into the associated pointers the values of submitted when the
3392	 * configuration was created :
3393	 *
3394	 *  - &drm_i915_perf_oa_config.n_mux_regs
3395	 *  - &drm_i915_perf_oa_config.n_boolean_regs
3396	 *  - &drm_i915_perf_oa_config.n_flex_regs
3397	 */
3398	__u8 data[];
3399};
3400
3401/**
3402 * enum drm_i915_gem_memory_class - Supported memory classes
3403 */
3404enum drm_i915_gem_memory_class {
3405	/** @I915_MEMORY_CLASS_SYSTEM: System memory */
3406	I915_MEMORY_CLASS_SYSTEM = 0,
3407	/** @I915_MEMORY_CLASS_DEVICE: Device local-memory */
3408	I915_MEMORY_CLASS_DEVICE,
3409};
3410
3411/**
3412 * struct drm_i915_gem_memory_class_instance - Identify particular memory region
3413 */
3414struct drm_i915_gem_memory_class_instance {
3415	/** @memory_class: See enum drm_i915_gem_memory_class */
3416	__u16 memory_class;
3417
3418	/** @memory_instance: Which instance */
3419	__u16 memory_instance;
3420};
3421
3422/**
3423 * struct drm_i915_memory_region_info - Describes one region as known to the
3424 * driver.
3425 *
3426 * Note this is using both struct drm_i915_query_item and struct drm_i915_query.
3427 * For this new query we are adding the new query id DRM_I915_QUERY_MEMORY_REGIONS
3428 * at &drm_i915_query_item.query_id.
3429 */
3430struct drm_i915_memory_region_info {
3431	/** @region: The class:instance pair encoding */
3432	struct drm_i915_gem_memory_class_instance region;
3433
3434	/** @rsvd0: MBZ */
3435	__u32 rsvd0;
3436
3437	/**
3438	 * @probed_size: Memory probed by the driver
3439	 *
3440	 * Note that it should not be possible to ever encounter a zero value
3441	 * here, also note that no current region type will ever return -1 here.
3442	 * Although for future region types, this might be a possibility. The
3443	 * same applies to the other size fields.
3444	 */
3445	__u64 probed_size;
3446
3447	/**
3448	 * @unallocated_size: Estimate of memory remaining
3449	 *
3450	 * Requires CAP_PERFMON or CAP_SYS_ADMIN to get reliable accounting.
3451	 * Without this (or if this is an older kernel) the value here will
3452	 * always equal the @probed_size. Note this is only currently tracked
3453	 * for I915_MEMORY_CLASS_DEVICE regions (for other types the value here
3454	 * will always equal the @probed_size).
3455	 */
3456	__u64 unallocated_size;
3457
3458	union {
3459		/** @rsvd1: MBZ */
3460		__u64 rsvd1[8];
3461		struct {
3462			/**
3463			 * @probed_cpu_visible_size: Memory probed by the driver
3464			 * that is CPU accessible.
3465			 *
3466			 * This will be always be <= @probed_size, and the
3467			 * remainder (if there is any) will not be CPU
3468			 * accessible.
3469			 *
3470			 * On systems without small BAR, the @probed_size will
3471			 * always equal the @probed_cpu_visible_size, since all
3472			 * of it will be CPU accessible.
3473			 *
3474			 * Note this is only tracked for
3475			 * I915_MEMORY_CLASS_DEVICE regions (for other types the
3476			 * value here will always equal the @probed_size).
3477			 *
3478			 * Note that if the value returned here is zero, then
3479			 * this must be an old kernel which lacks the relevant
3480			 * small-bar uAPI support (including
3481			 * I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS), but on
3482			 * such systems we should never actually end up with a
3483			 * small BAR configuration, assuming we are able to load
3484			 * the kernel module. Hence it should be safe to treat
3485			 * this the same as when @probed_cpu_visible_size ==
3486			 * @probed_size.
3487			 */
3488			__u64 probed_cpu_visible_size;
3489
3490			/**
3491			 * @unallocated_cpu_visible_size: Estimate of CPU
3492			 * visible memory remaining.
3493			 *
3494			 * Note this is only tracked for
3495			 * I915_MEMORY_CLASS_DEVICE regions (for other types the
3496			 * value here will always equal the
3497			 * @probed_cpu_visible_size).
3498			 *
3499			 * Requires CAP_PERFMON or CAP_SYS_ADMIN to get reliable
3500			 * accounting.  Without this the value here will always
3501			 * equal the @probed_cpu_visible_size. Note this is only
3502			 * currently tracked for I915_MEMORY_CLASS_DEVICE
3503			 * regions (for other types the value here will also
3504			 * always equal the @probed_cpu_visible_size).
3505			 *
3506			 * If this is an older kernel the value here will be
3507			 * zero, see also @probed_cpu_visible_size.
3508			 */
3509			__u64 unallocated_cpu_visible_size;
3510		};
3511	};
3512};
3513
3514/**
3515 * struct drm_i915_query_memory_regions
3516 *
3517 * The region info query enumerates all regions known to the driver by filling
3518 * in an array of struct drm_i915_memory_region_info structures.
3519 *
3520 * Example for getting the list of supported regions:
3521 *
3522 * .. code-block:: C
3523 *
3524 *	struct drm_i915_query_memory_regions *info;
3525 *	struct drm_i915_query_item item = {
3526 *		.query_id = DRM_I915_QUERY_MEMORY_REGIONS;
3527 *	};
3528 *	struct drm_i915_query query = {
3529 *		.num_items = 1,
3530 *		.items_ptr = (uintptr_t)&item,
3531 *	};
3532 *	int err, i;
3533 *
3534 *	// First query the size of the blob we need, this needs to be large
3535 *	// enough to hold our array of regions. The kernel will fill out the
3536 *	// item.length for us, which is the number of bytes we need.
3537 *	err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query);
3538 *	if (err) ...
3539 *
3540 *	info = calloc(1, item.length);
3541 *	// Now that we allocated the required number of bytes, we call the ioctl
3542 *	// again, this time with the data_ptr pointing to our newly allocated
3543 *	// blob, which the kernel can then populate with the all the region info.
3544 *	item.data_ptr = (uintptr_t)&info,
3545 *
3546 *	err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query);
3547 *	if (err) ...
3548 *
3549 *	// We can now access each region in the array
3550 *	for (i = 0; i < info->num_regions; i++) {
3551 *		struct drm_i915_memory_region_info mr = info->regions[i];
3552 *		u16 class = mr.region.class;
3553 *		u16 instance = mr.region.instance;
3554 *
3555 *		....
3556 *	}
3557 *
3558 *	free(info);
3559 */
3560struct drm_i915_query_memory_regions {
3561	/** @num_regions: Number of supported regions */
3562	__u32 num_regions;
3563
3564	/** @rsvd: MBZ */
3565	__u32 rsvd[3];
3566
3567	/** @regions: Info about each supported region */
3568	struct drm_i915_memory_region_info regions[];
3569};
3570
3571/**
3572 * struct drm_i915_query_guc_submission_version - query GuC submission interface version
3573 */
3574struct drm_i915_query_guc_submission_version {
3575	/** @branch: Firmware branch version. */
3576	__u32 branch;
3577	/** @major: Firmware major version. */
3578	__u32 major;
3579	/** @minor: Firmware minor version. */
3580	__u32 minor;
3581	/** @patch: Firmware patch version. */
3582	__u32 patch;
3583};
3584
3585/**
3586 * DOC: GuC HWCONFIG blob uAPI
3587 *
3588 * The GuC produces a blob with information about the current device.
3589 * i915 reads this blob from GuC and makes it available via this uAPI.
3590 *
3591 * The format and meaning of the blob content are documented in the
3592 * Programmer's Reference Manual.
3593 */
3594
3595/**
3596 * struct drm_i915_gem_create_ext - Existing gem_create behaviour, with added
3597 * extension support using struct i915_user_extension.
3598 *
3599 * Note that new buffer flags should be added here, at least for the stuff that
3600 * is immutable. Previously we would have two ioctls, one to create the object
3601 * with gem_create, and another to apply various parameters, however this
3602 * creates some ambiguity for the params which are considered immutable. Also in
3603 * general we're phasing out the various SET/GET ioctls.
3604 */
3605struct drm_i915_gem_create_ext {
3606	/**
3607	 * @size: Requested size for the object.
3608	 *
3609	 * The (page-aligned) allocated size for the object will be returned.
3610	 *
3611	 * On platforms like DG2/ATS the kernel will always use 64K or larger
3612	 * pages for I915_MEMORY_CLASS_DEVICE. The kernel also requires a
3613	 * minimum of 64K GTT alignment for such objects.
3614	 *
3615	 * NOTE: Previously the ABI here required a minimum GTT alignment of 2M
3616	 * on DG2/ATS, due to how the hardware implemented 64K GTT page support,
3617	 * where we had the following complications:
3618	 *
3619	 *   1) The entire PDE (which covers a 2MB virtual address range), must
3620	 *   contain only 64K PTEs, i.e mixing 4K and 64K PTEs in the same
3621	 *   PDE is forbidden by the hardware.
3622	 *
3623	 *   2) We still need to support 4K PTEs for I915_MEMORY_CLASS_SYSTEM
3624	 *   objects.
3625	 *
3626	 * However on actual production HW this was completely changed to now
3627	 * allow setting a TLB hint at the PTE level (see PS64), which is a lot
3628	 * more flexible than the above. With this the 2M restriction was
3629	 * dropped where we now only require 64K.
3630	 */
3631	__u64 size;
3632
3633	/**
3634	 * @handle: Returned handle for the object.
3635	 *
3636	 * Object handles are nonzero.
3637	 */
3638	__u32 handle;
3639
3640	/**
3641	 * @flags: Optional flags.
3642	 *
3643	 * Supported values:
3644	 *
3645	 * I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS - Signal to the kernel that
3646	 * the object will need to be accessed via the CPU.
3647	 *
3648	 * Only valid when placing objects in I915_MEMORY_CLASS_DEVICE, and only
3649	 * strictly required on configurations where some subset of the device
3650	 * memory is directly visible/mappable through the CPU (which we also
3651	 * call small BAR), like on some DG2+ systems. Note that this is quite
3652	 * undesirable, but due to various factors like the client CPU, BIOS etc
3653	 * it's something we can expect to see in the wild. See
3654	 * &drm_i915_memory_region_info.probed_cpu_visible_size for how to
3655	 * determine if this system applies.
3656	 *
3657	 * Note that one of the placements MUST be I915_MEMORY_CLASS_SYSTEM, to
3658	 * ensure the kernel can always spill the allocation to system memory,
3659	 * if the object can't be allocated in the mappable part of
3660	 * I915_MEMORY_CLASS_DEVICE.
3661	 *
3662	 * Also note that since the kernel only supports flat-CCS on objects
3663	 * that can *only* be placed in I915_MEMORY_CLASS_DEVICE, we therefore
3664	 * don't support I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS together with
3665	 * flat-CCS.
3666	 *
3667	 * Without this hint, the kernel will assume that non-mappable
3668	 * I915_MEMORY_CLASS_DEVICE is preferred for this object. Note that the
3669	 * kernel can still migrate the object to the mappable part, as a last
3670	 * resort, if userspace ever CPU faults this object, but this might be
3671	 * expensive, and so ideally should be avoided.
3672	 *
3673	 * On older kernels which lack the relevant small-bar uAPI support (see
3674	 * also &drm_i915_memory_region_info.probed_cpu_visible_size),
3675	 * usage of the flag will result in an error, but it should NEVER be
3676	 * possible to end up with a small BAR configuration, assuming we can
3677	 * also successfully load the i915 kernel module. In such cases the
3678	 * entire I915_MEMORY_CLASS_DEVICE region will be CPU accessible, and as
3679	 * such there are zero restrictions on where the object can be placed.
3680	 */
3681#define I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS (1 << 0)
3682	__u32 flags;
3683
3684	/**
3685	 * @extensions: The chain of extensions to apply to this object.
3686	 *
3687	 * This will be useful in the future when we need to support several
3688	 * different extensions, and we need to apply more than one when
3689	 * creating the object. See struct i915_user_extension.
3690	 *
3691	 * If we don't supply any extensions then we get the same old gem_create
3692	 * behaviour.
3693	 *
3694	 * For I915_GEM_CREATE_EXT_MEMORY_REGIONS usage see
3695	 * struct drm_i915_gem_create_ext_memory_regions.
3696	 *
3697	 * For I915_GEM_CREATE_EXT_PROTECTED_CONTENT usage see
3698	 * struct drm_i915_gem_create_ext_protected_content.
3699	 *
3700	 * For I915_GEM_CREATE_EXT_SET_PAT usage see
3701	 * struct drm_i915_gem_create_ext_set_pat.
3702	 */
3703#define I915_GEM_CREATE_EXT_MEMORY_REGIONS 0
3704#define I915_GEM_CREATE_EXT_PROTECTED_CONTENT 1
3705#define I915_GEM_CREATE_EXT_SET_PAT 2
3706	__u64 extensions;
3707};
3708
3709/**
3710 * struct drm_i915_gem_create_ext_memory_regions - The
3711 * I915_GEM_CREATE_EXT_MEMORY_REGIONS extension.
3712 *
3713 * Set the object with the desired set of placements/regions in priority
3714 * order. Each entry must be unique and supported by the device.
3715 *
3716 * This is provided as an array of struct drm_i915_gem_memory_class_instance, or
3717 * an equivalent layout of class:instance pair encodings. See struct
3718 * drm_i915_query_memory_regions and DRM_I915_QUERY_MEMORY_REGIONS for how to
3719 * query the supported regions for a device.
3720 *
3721 * As an example, on discrete devices, if we wish to set the placement as
3722 * device local-memory we can do something like:
3723 *
3724 * .. code-block:: C
3725 *
3726 *	struct drm_i915_gem_memory_class_instance region_lmem = {
3727 *              .memory_class = I915_MEMORY_CLASS_DEVICE,
3728 *              .memory_instance = 0,
3729 *      };
3730 *      struct drm_i915_gem_create_ext_memory_regions regions = {
3731 *              .base = { .name = I915_GEM_CREATE_EXT_MEMORY_REGIONS },
3732 *              .regions = (uintptr_t)&region_lmem,
3733 *              .num_regions = 1,
3734 *      };
3735 *      struct drm_i915_gem_create_ext create_ext = {
3736 *              .size = 16 * PAGE_SIZE,
3737 *              .extensions = (uintptr_t)&regions,
3738 *      };
3739 *
3740 *      int err = ioctl(fd, DRM_IOCTL_I915_GEM_CREATE_EXT, &create_ext);
3741 *      if (err) ...
3742 *
3743 * At which point we get the object handle in &drm_i915_gem_create_ext.handle,
3744 * along with the final object size in &drm_i915_gem_create_ext.size, which
3745 * should account for any rounding up, if required.
3746 *
3747 * Note that userspace has no means of knowing the current backing region
3748 * for objects where @num_regions is larger than one. The kernel will only
3749 * ensure that the priority order of the @regions array is honoured, either
3750 * when initially placing the object, or when moving memory around due to
3751 * memory pressure
3752 *
3753 * On Flat-CCS capable HW, compression is supported for the objects residing
3754 * in I915_MEMORY_CLASS_DEVICE. When such objects (compressed) have other
3755 * memory class in @regions and migrated (by i915, due to memory
3756 * constraints) to the non I915_MEMORY_CLASS_DEVICE region, then i915 needs to
3757 * decompress the content. But i915 doesn't have the required information to
3758 * decompress the userspace compressed objects.
3759 *
3760 * So i915 supports Flat-CCS, on the objects which can reside only on
3761 * I915_MEMORY_CLASS_DEVICE regions.
3762 */
3763struct drm_i915_gem_create_ext_memory_regions {
3764	/** @base: Extension link. See struct i915_user_extension. */
3765	struct i915_user_extension base;
3766
3767	/** @pad: MBZ */
3768	__u32 pad;
3769	/** @num_regions: Number of elements in the @regions array. */
3770	__u32 num_regions;
3771	/**
3772	 * @regions: The regions/placements array.
3773	 *
3774	 * An array of struct drm_i915_gem_memory_class_instance.
3775	 */
3776	__u64 regions;
3777};
3778
3779/**
3780 * struct drm_i915_gem_create_ext_protected_content - The
3781 * I915_OBJECT_PARAM_PROTECTED_CONTENT extension.
3782 *
3783 * If this extension is provided, buffer contents are expected to be protected
3784 * by PXP encryption and require decryption for scan out and processing. This
3785 * is only possible on platforms that have PXP enabled, on all other scenarios
3786 * using this extension will cause the ioctl to fail and return -ENODEV. The
3787 * flags parameter is reserved for future expansion and must currently be set
3788 * to zero.
3789 *
3790 * The buffer contents are considered invalid after a PXP session teardown.
3791 *
3792 * The encryption is guaranteed to be processed correctly only if the object
3793 * is submitted with a context created using the
3794 * I915_CONTEXT_PARAM_PROTECTED_CONTENT flag. This will also enable extra checks
3795 * at submission time on the validity of the objects involved.
3796 *
3797 * Below is an example on how to create a protected object:
3798 *
3799 * .. code-block:: C
3800 *
3801 *      struct drm_i915_gem_create_ext_protected_content protected_ext = {
3802 *              .base = { .name = I915_GEM_CREATE_EXT_PROTECTED_CONTENT },
3803 *              .flags = 0,
3804 *      };
3805 *      struct drm_i915_gem_create_ext create_ext = {
3806 *              .size = PAGE_SIZE,
3807 *              .extensions = (uintptr_t)&protected_ext,
3808 *      };
3809 *
3810 *      int err = ioctl(fd, DRM_IOCTL_I915_GEM_CREATE_EXT, &create_ext);
3811 *      if (err) ...
3812 */
3813struct drm_i915_gem_create_ext_protected_content {
3814	/** @base: Extension link. See struct i915_user_extension. */
3815	struct i915_user_extension base;
3816	/** @flags: reserved for future usage, currently MBZ */
3817	__u32 flags;
3818};
3819
3820/**
3821 * struct drm_i915_gem_create_ext_set_pat - The
3822 * I915_GEM_CREATE_EXT_SET_PAT extension.
3823 *
3824 * If this extension is provided, the specified caching policy (PAT index) is
3825 * applied to the buffer object.
3826 *
3827 * Below is an example on how to create an object with specific caching policy:
3828 *
3829 * .. code-block:: C
3830 *
3831 *      struct drm_i915_gem_create_ext_set_pat set_pat_ext = {
3832 *              .base = { .name = I915_GEM_CREATE_EXT_SET_PAT },
3833 *              .pat_index = 0,
3834 *      };
3835 *      struct drm_i915_gem_create_ext create_ext = {
3836 *              .size = PAGE_SIZE,
3837 *              .extensions = (uintptr_t)&set_pat_ext,
3838 *      };
3839 *
3840 *      int err = ioctl(fd, DRM_IOCTL_I915_GEM_CREATE_EXT, &create_ext);
3841 *      if (err) ...
3842 */
3843struct drm_i915_gem_create_ext_set_pat {
3844	/** @base: Extension link. See struct i915_user_extension. */
3845	struct i915_user_extension base;
3846	/**
3847	 * @pat_index: PAT index to be set
3848	 * PAT index is a bit field in Page Table Entry to control caching
3849	 * behaviors for GPU accesses. The definition of PAT index is
3850	 * platform dependent and can be found in hardware specifications,
3851	 */
3852	__u32 pat_index;
3853	/** @rsvd: reserved for future use */
3854	__u32 rsvd;
3855};
3856
3857/* ID of the protected content session managed by i915 when PXP is active */
3858#define I915_PROTECTED_CONTENT_DEFAULT_SESSION 0xf
3859
3860#if defined(__cplusplus)
3861}
3862#endif
3863
3864#endif /* _UAPI_I915_DRM_H_ */
3865