1/*
2 * Header for the Direct Rendering Manager
3 *
4 * Author: Rickard E. (Rik) Faith <faith@valinux.com>
5 *
6 * Acknowledgments:
7 * Dec 1999, Richard Henderson <rth@twiddle.net>, move to generic cmpxchg.
8 */
9
10/*
11 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
12 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
13 * All rights reserved.
14 *
15 * Permission is hereby granted, free of charge, to any person obtaining a
16 * copy of this software and associated documentation files (the "Software"),
17 * to deal in the Software without restriction, including without limitation
18 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
19 * and/or sell copies of the Software, and to permit persons to whom the
20 * Software is furnished to do so, subject to the following conditions:
21 *
22 * The above copyright notice and this permission notice (including the next
23 * paragraph) shall be included in all copies or substantial portions of the
24 * Software.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
27 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
28 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
29 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
30 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
31 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
32 * OTHER DEALINGS IN THE SOFTWARE.
33 */
34
35#ifndef _DRM_H_
36#define _DRM_H_
37
38#if defined(__KERNEL__)
39
40#include <linux/types.h>
41#include <asm/ioctl.h>
42typedef unsigned int drm_handle_t;
43
44#elif defined(__linux__)
45
46#include <linux/types.h>
47#include <asm/ioctl.h>
48typedef unsigned int drm_handle_t;
49
50#else /* One of the BSDs */
51
52#include <stdint.h>
53#include <sys/ioccom.h>
54#include <sys/types.h>
55typedef int8_t   __s8;
56typedef uint8_t  __u8;
57typedef int16_t  __s16;
58typedef uint16_t __u16;
59typedef int32_t  __s32;
60typedef uint32_t __u32;
61typedef int64_t  __s64;
62typedef uint64_t __u64;
63typedef size_t   __kernel_size_t;
64typedef unsigned long drm_handle_t;
65
66#endif
67
68#if defined(__cplusplus)
69extern "C" {
70#endif
71
72#define DRM_NAME	"drm"	  /**< Name in kernel, /dev, and /proc */
73#define DRM_MIN_ORDER	5	  /**< At least 2^5 bytes = 32 bytes */
74#define DRM_MAX_ORDER	22	  /**< Up to 2^22 bytes = 4MB */
75#define DRM_RAM_PERCENT 10	  /**< How much system ram can we lock? */
76
77#define _DRM_LOCK_HELD	0x80000000U /**< Hardware lock is held */
78#define _DRM_LOCK_CONT	0x40000000U /**< Hardware lock is contended */
79#define _DRM_LOCK_IS_HELD(lock)	   ((lock) & _DRM_LOCK_HELD)
80#define _DRM_LOCK_IS_CONT(lock)	   ((lock) & _DRM_LOCK_CONT)
81#define _DRM_LOCKING_CONTEXT(lock) ((lock) & ~(_DRM_LOCK_HELD|_DRM_LOCK_CONT))
82
83typedef unsigned int drm_context_t;
84typedef unsigned int drm_drawable_t;
85typedef unsigned int drm_magic_t;
86
87/*
88 * Cliprect.
89 *
90 * \warning: If you change this structure, make sure you change
91 * XF86DRIClipRectRec in the server as well
92 *
93 * \note KW: Actually it's illegal to change either for
94 * backwards-compatibility reasons.
95 */
96struct drm_clip_rect {
97	unsigned short x1;
98	unsigned short y1;
99	unsigned short x2;
100	unsigned short y2;
101};
102
103/*
104 * Drawable information.
105 */
106struct drm_drawable_info {
107	unsigned int num_rects;
108	struct drm_clip_rect *rects;
109};
110
111/*
112 * Texture region,
113 */
114struct drm_tex_region {
115	unsigned char next;
116	unsigned char prev;
117	unsigned char in_use;
118	unsigned char padding;
119	unsigned int age;
120};
121
122/*
123 * Hardware lock.
124 *
125 * The lock structure is a simple cache-line aligned integer.  To avoid
126 * processor bus contention on a multiprocessor system, there should not be any
127 * other data stored in the same cache line.
128 */
129struct drm_hw_lock {
130	__volatile__ unsigned int lock;		/**< lock variable */
131	char padding[60];			/**< Pad to cache line */
132};
133
134/*
135 * DRM_IOCTL_VERSION ioctl argument type.
136 *
137 * \sa drmGetVersion().
138 */
139struct drm_version {
140	int version_major;	  /**< Major version */
141	int version_minor;	  /**< Minor version */
142	int version_patchlevel;	  /**< Patch level */
143	__kernel_size_t name_len;	  /**< Length of name buffer */
144	char __user *name;	  /**< Name of driver */
145	__kernel_size_t date_len;	  /**< Length of date buffer */
146	char __user *date;	  /**< User-space buffer to hold date */
147	__kernel_size_t desc_len;	  /**< Length of desc buffer */
148	char __user *desc;	  /**< User-space buffer to hold desc */
149};
150
151/*
152 * DRM_IOCTL_GET_UNIQUE ioctl argument type.
153 *
154 * \sa drmGetBusid() and drmSetBusId().
155 */
156struct drm_unique {
157	__kernel_size_t unique_len;	  /**< Length of unique */
158	char __user *unique;	  /**< Unique name for driver instantiation */
159};
160
161struct drm_list {
162	int count;		  /**< Length of user-space structures */
163	struct drm_version __user *version;
164};
165
166struct drm_block {
167	int unused;
168};
169
170/*
171 * DRM_IOCTL_CONTROL ioctl argument type.
172 *
173 * \sa drmCtlInstHandler() and drmCtlUninstHandler().
174 */
175struct drm_control {
176	enum {
177		DRM_ADD_COMMAND,
178		DRM_RM_COMMAND,
179		DRM_INST_HANDLER,
180		DRM_UNINST_HANDLER
181	} func;
182	int irq;
183};
184
185/*
186 * Type of memory to map.
187 */
188enum drm_map_type {
189	_DRM_FRAME_BUFFER = 0,	  /**< WC (no caching), no core dump */
190	_DRM_REGISTERS = 1,	  /**< no caching, no core dump */
191	_DRM_SHM = 2,		  /**< shared, cached */
192	_DRM_AGP = 3,		  /**< AGP/GART */
193	_DRM_SCATTER_GATHER = 4,  /**< Scatter/gather memory for PCI DMA */
194	_DRM_CONSISTENT = 5	  /**< Consistent memory for PCI DMA */
195};
196
197/*
198 * Memory mapping flags.
199 */
200enum drm_map_flags {
201	_DRM_RESTRICTED = 0x01,	     /**< Cannot be mapped to user-virtual */
202	_DRM_READ_ONLY = 0x02,
203	_DRM_LOCKED = 0x04,	     /**< shared, cached, locked */
204	_DRM_KERNEL = 0x08,	     /**< kernel requires access */
205	_DRM_WRITE_COMBINING = 0x10, /**< use write-combining if available */
206	_DRM_CONTAINS_LOCK = 0x20,   /**< SHM page that contains lock */
207	_DRM_REMOVABLE = 0x40,	     /**< Removable mapping */
208	_DRM_DRIVER = 0x80	     /**< Managed by driver */
209};
210
211struct drm_ctx_priv_map {
212	unsigned int ctx_id;	 /**< Context requesting private mapping */
213	void *handle;		 /**< Handle of map */
214};
215
216/*
217 * DRM_IOCTL_GET_MAP, DRM_IOCTL_ADD_MAP and DRM_IOCTL_RM_MAP ioctls
218 * argument type.
219 *
220 * \sa drmAddMap().
221 */
222struct drm_map {
223	unsigned long offset;	 /**< Requested physical address (0 for SAREA)*/
224	unsigned long size;	 /**< Requested physical size (bytes) */
225	enum drm_map_type type;	 /**< Type of memory to map */
226	enum drm_map_flags flags;	 /**< Flags */
227	void *handle;		 /**< User-space: "Handle" to pass to mmap() */
228				 /**< Kernel-space: kernel-virtual address */
229	int mtrr;		 /**< MTRR slot used */
230	/*   Private data */
231};
232
233/*
234 * DRM_IOCTL_GET_CLIENT ioctl argument type.
235 */
236struct drm_client {
237	int idx;		/**< Which client desired? */
238	int auth;		/**< Is client authenticated? */
239	unsigned long pid;	/**< Process ID */
240	unsigned long uid;	/**< User ID */
241	unsigned long magic;	/**< Magic */
242	unsigned long iocs;	/**< Ioctl count */
243};
244
245enum drm_stat_type {
246	_DRM_STAT_LOCK,
247	_DRM_STAT_OPENS,
248	_DRM_STAT_CLOSES,
249	_DRM_STAT_IOCTLS,
250	_DRM_STAT_LOCKS,
251	_DRM_STAT_UNLOCKS,
252	_DRM_STAT_VALUE,	/**< Generic value */
253	_DRM_STAT_BYTE,		/**< Generic byte counter (1024bytes/K) */
254	_DRM_STAT_COUNT,	/**< Generic non-byte counter (1000/k) */
255
256	_DRM_STAT_IRQ,		/**< IRQ */
257	_DRM_STAT_PRIMARY,	/**< Primary DMA bytes */
258	_DRM_STAT_SECONDARY,	/**< Secondary DMA bytes */
259	_DRM_STAT_DMA,		/**< DMA */
260	_DRM_STAT_SPECIAL,	/**< Special DMA (e.g., priority or polled) */
261	_DRM_STAT_MISSED	/**< Missed DMA opportunity */
262	    /* Add to the *END* of the list */
263};
264
265/*
266 * DRM_IOCTL_GET_STATS ioctl argument type.
267 */
268struct drm_stats {
269	unsigned long count;
270	struct {
271		unsigned long value;
272		enum drm_stat_type type;
273	} data[15];
274};
275
276/*
277 * Hardware locking flags.
278 */
279enum drm_lock_flags {
280	_DRM_LOCK_READY = 0x01,	     /**< Wait until hardware is ready for DMA */
281	_DRM_LOCK_QUIESCENT = 0x02,  /**< Wait until hardware quiescent */
282	_DRM_LOCK_FLUSH = 0x04,	     /**< Flush this context's DMA queue first */
283	_DRM_LOCK_FLUSH_ALL = 0x08,  /**< Flush all DMA queues first */
284	/* These *HALT* flags aren't supported yet
285	   -- they will be used to support the
286	   full-screen DGA-like mode. */
287	_DRM_HALT_ALL_QUEUES = 0x10, /**< Halt all current and future queues */
288	_DRM_HALT_CUR_QUEUES = 0x20  /**< Halt all current queues */
289};
290
291/*
292 * DRM_IOCTL_LOCK, DRM_IOCTL_UNLOCK and DRM_IOCTL_FINISH ioctl argument type.
293 *
294 * \sa drmGetLock() and drmUnlock().
295 */
296struct drm_lock {
297	int context;
298	enum drm_lock_flags flags;
299};
300
301/*
302 * DMA flags
303 *
304 * \warning
305 * These values \e must match xf86drm.h.
306 *
307 * \sa drm_dma.
308 */
309enum drm_dma_flags {
310	/* Flags for DMA buffer dispatch */
311	_DRM_DMA_BLOCK = 0x01,	      /**<
312				       * Block until buffer dispatched.
313				       *
314				       * \note The buffer may not yet have
315				       * been processed by the hardware --
316				       * getting a hardware lock with the
317				       * hardware quiescent will ensure
318				       * that the buffer has been
319				       * processed.
320				       */
321	_DRM_DMA_WHILE_LOCKED = 0x02, /**< Dispatch while lock held */
322	_DRM_DMA_PRIORITY = 0x04,     /**< High priority dispatch */
323
324	/* Flags for DMA buffer request */
325	_DRM_DMA_WAIT = 0x10,	      /**< Wait for free buffers */
326	_DRM_DMA_SMALLER_OK = 0x20,   /**< Smaller-than-requested buffers OK */
327	_DRM_DMA_LARGER_OK = 0x40     /**< Larger-than-requested buffers OK */
328};
329
330/*
331 * DRM_IOCTL_ADD_BUFS and DRM_IOCTL_MARK_BUFS ioctl argument type.
332 *
333 * \sa drmAddBufs().
334 */
335struct drm_buf_desc {
336	int count;		 /**< Number of buffers of this size */
337	int size;		 /**< Size in bytes */
338	int low_mark;		 /**< Low water mark */
339	int high_mark;		 /**< High water mark */
340	enum {
341		_DRM_PAGE_ALIGN = 0x01,	/**< Align on page boundaries for DMA */
342		_DRM_AGP_BUFFER = 0x02,	/**< Buffer is in AGP space */
343		_DRM_SG_BUFFER = 0x04,	/**< Scatter/gather memory buffer */
344		_DRM_FB_BUFFER = 0x08,	/**< Buffer is in frame buffer */
345		_DRM_PCI_BUFFER_RO = 0x10 /**< Map PCI DMA buffer read-only */
346	} flags;
347	unsigned long agp_start; /**<
348				  * Start address of where the AGP buffers are
349				  * in the AGP aperture
350				  */
351};
352
353/*
354 * DRM_IOCTL_INFO_BUFS ioctl argument type.
355 */
356struct drm_buf_info {
357	int count;		/**< Entries in list */
358	struct drm_buf_desc __user *list;
359};
360
361/*
362 * DRM_IOCTL_FREE_BUFS ioctl argument type.
363 */
364struct drm_buf_free {
365	int count;
366	int __user *list;
367};
368
369/*
370 * Buffer information
371 *
372 * \sa drm_buf_map.
373 */
374struct drm_buf_pub {
375	int idx;		       /**< Index into the master buffer list */
376	int total;		       /**< Buffer size */
377	int used;		       /**< Amount of buffer in use (for DMA) */
378	void __user *address;	       /**< Address of buffer */
379};
380
381/*
382 * DRM_IOCTL_MAP_BUFS ioctl argument type.
383 */
384struct drm_buf_map {
385	int count;		/**< Length of the buffer list */
386#ifdef __cplusplus
387	void __user *virt;
388#else
389	void __user *virtual;		/**< Mmap'd area in user-virtual */
390#endif
391	struct drm_buf_pub __user *list;	/**< Buffer information */
392};
393
394/*
395 * DRM_IOCTL_DMA ioctl argument type.
396 *
397 * Indices here refer to the offset into the buffer list in drm_buf_get.
398 *
399 * \sa drmDMA().
400 */
401struct drm_dma {
402	int context;			  /**< Context handle */
403	int send_count;			  /**< Number of buffers to send */
404	int __user *send_indices;	  /**< List of handles to buffers */
405	int __user *send_sizes;		  /**< Lengths of data to send */
406	enum drm_dma_flags flags;	  /**< Flags */
407	int request_count;		  /**< Number of buffers requested */
408	int request_size;		  /**< Desired size for buffers */
409	int __user *request_indices;	  /**< Buffer information */
410	int __user *request_sizes;
411	int granted_count;		  /**< Number of buffers granted */
412};
413
414enum drm_ctx_flags {
415	_DRM_CONTEXT_PRESERVED = 0x01,
416	_DRM_CONTEXT_2DONLY = 0x02
417};
418
419/*
420 * DRM_IOCTL_ADD_CTX ioctl argument type.
421 *
422 * \sa drmCreateContext() and drmDestroyContext().
423 */
424struct drm_ctx {
425	drm_context_t handle;
426	enum drm_ctx_flags flags;
427};
428
429/*
430 * DRM_IOCTL_RES_CTX ioctl argument type.
431 */
432struct drm_ctx_res {
433	int count;
434	struct drm_ctx __user *contexts;
435};
436
437/*
438 * DRM_IOCTL_ADD_DRAW and DRM_IOCTL_RM_DRAW ioctl argument type.
439 */
440struct drm_draw {
441	drm_drawable_t handle;
442};
443
444/*
445 * DRM_IOCTL_UPDATE_DRAW ioctl argument type.
446 */
447typedef enum {
448	DRM_DRAWABLE_CLIPRECTS
449} drm_drawable_info_type_t;
450
451struct drm_update_draw {
452	drm_drawable_t handle;
453	unsigned int type;
454	unsigned int num;
455	unsigned long long data;
456};
457
458/*
459 * DRM_IOCTL_GET_MAGIC and DRM_IOCTL_AUTH_MAGIC ioctl argument type.
460 */
461struct drm_auth {
462	drm_magic_t magic;
463};
464
465/*
466 * DRM_IOCTL_IRQ_BUSID ioctl argument type.
467 *
468 * \sa drmGetInterruptFromBusID().
469 */
470struct drm_irq_busid {
471	int irq;	/**< IRQ number */
472	int busnum;	/**< bus number */
473	int devnum;	/**< device number */
474	int funcnum;	/**< function number */
475};
476
477enum drm_vblank_seq_type {
478	_DRM_VBLANK_ABSOLUTE = 0x0,	/**< Wait for specific vblank sequence number */
479	_DRM_VBLANK_RELATIVE = 0x1,	/**< Wait for given number of vblanks */
480	/* bits 1-6 are reserved for high crtcs */
481	_DRM_VBLANK_HIGH_CRTC_MASK = 0x0000003e,
482	_DRM_VBLANK_EVENT = 0x4000000,   /**< Send event instead of blocking */
483	_DRM_VBLANK_FLIP = 0x8000000,   /**< Scheduled buffer swap should flip */
484	_DRM_VBLANK_NEXTONMISS = 0x10000000,	/**< If missed, wait for next vblank */
485	_DRM_VBLANK_SECONDARY = 0x20000000,	/**< Secondary display controller */
486	_DRM_VBLANK_SIGNAL = 0x40000000	/**< Send signal instead of blocking, unsupported */
487};
488#define _DRM_VBLANK_HIGH_CRTC_SHIFT 1
489
490#define _DRM_VBLANK_TYPES_MASK (_DRM_VBLANK_ABSOLUTE | _DRM_VBLANK_RELATIVE)
491#define _DRM_VBLANK_FLAGS_MASK (_DRM_VBLANK_EVENT | _DRM_VBLANK_SIGNAL | \
492				_DRM_VBLANK_SECONDARY | _DRM_VBLANK_NEXTONMISS)
493
494struct drm_wait_vblank_request {
495	enum drm_vblank_seq_type type;
496	unsigned int sequence;
497	unsigned long signal;
498};
499
500struct drm_wait_vblank_reply {
501	enum drm_vblank_seq_type type;
502	unsigned int sequence;
503	long tval_sec;
504	long tval_usec;
505};
506
507/*
508 * DRM_IOCTL_WAIT_VBLANK ioctl argument type.
509 *
510 * \sa drmWaitVBlank().
511 */
512union drm_wait_vblank {
513	struct drm_wait_vblank_request request;
514	struct drm_wait_vblank_reply reply;
515};
516
517#define _DRM_PRE_MODESET 1
518#define _DRM_POST_MODESET 2
519
520/*
521 * DRM_IOCTL_MODESET_CTL ioctl argument type
522 *
523 * \sa drmModesetCtl().
524 */
525struct drm_modeset_ctl {
526	__u32 crtc;
527	__u32 cmd;
528};
529
530/*
531 * DRM_IOCTL_AGP_ENABLE ioctl argument type.
532 *
533 * \sa drmAgpEnable().
534 */
535struct drm_agp_mode {
536	unsigned long mode;	/**< AGP mode */
537};
538
539/*
540 * DRM_IOCTL_AGP_ALLOC and DRM_IOCTL_AGP_FREE ioctls argument type.
541 *
542 * \sa drmAgpAlloc() and drmAgpFree().
543 */
544struct drm_agp_buffer {
545	unsigned long size;	/**< In bytes -- will round to page boundary */
546	unsigned long handle;	/**< Used for binding / unbinding */
547	unsigned long type;	/**< Type of memory to allocate */
548	unsigned long physical;	/**< Physical used by i810 */
549};
550
551/*
552 * DRM_IOCTL_AGP_BIND and DRM_IOCTL_AGP_UNBIND ioctls argument type.
553 *
554 * \sa drmAgpBind() and drmAgpUnbind().
555 */
556struct drm_agp_binding {
557	unsigned long handle;	/**< From drm_agp_buffer */
558	unsigned long offset;	/**< In bytes -- will round to page boundary */
559};
560
561/*
562 * DRM_IOCTL_AGP_INFO ioctl argument type.
563 *
564 * \sa drmAgpVersionMajor(), drmAgpVersionMinor(), drmAgpGetMode(),
565 * drmAgpBase(), drmAgpSize(), drmAgpMemoryUsed(), drmAgpMemoryAvail(),
566 * drmAgpVendorId() and drmAgpDeviceId().
567 */
568struct drm_agp_info {
569	int agp_version_major;
570	int agp_version_minor;
571	unsigned long mode;
572	unsigned long aperture_base;	/* physical address */
573	unsigned long aperture_size;	/* bytes */
574	unsigned long memory_allowed;	/* bytes */
575	unsigned long memory_used;
576
577	/* PCI information */
578	unsigned short id_vendor;
579	unsigned short id_device;
580};
581
582/*
583 * DRM_IOCTL_SG_ALLOC ioctl argument type.
584 */
585struct drm_scatter_gather {
586	unsigned long size;	/**< In bytes -- will round to page boundary */
587	unsigned long handle;	/**< Used for mapping / unmapping */
588};
589
590/*
591 * DRM_IOCTL_SET_VERSION ioctl argument type.
592 */
593struct drm_set_version {
594	int drm_di_major;
595	int drm_di_minor;
596	int drm_dd_major;
597	int drm_dd_minor;
598};
599
600/* DRM_IOCTL_GEM_CLOSE ioctl argument type */
601struct drm_gem_close {
602	/** Handle of the object to be closed. */
603	__u32 handle;
604	__u32 pad;
605};
606
607/* DRM_IOCTL_GEM_FLINK ioctl argument type */
608struct drm_gem_flink {
609	/** Handle for the object being named */
610	__u32 handle;
611
612	/** Returned global name */
613	__u32 name;
614};
615
616/* DRM_IOCTL_GEM_OPEN ioctl argument type */
617struct drm_gem_open {
618	/** Name of object being opened */
619	__u32 name;
620
621	/** Returned handle for the object */
622	__u32 handle;
623
624	/** Returned size of the object */
625	__u64 size;
626};
627
628/**
629 * DRM_CAP_DUMB_BUFFER
630 *
631 * If set to 1, the driver supports creating dumb buffers via the
632 * &DRM_IOCTL_MODE_CREATE_DUMB ioctl.
633 */
634#define DRM_CAP_DUMB_BUFFER		0x1
635/**
636 * DRM_CAP_VBLANK_HIGH_CRTC
637 *
638 * If set to 1, the kernel supports specifying a :ref:`CRTC index<crtc_index>`
639 * in the high bits of &drm_wait_vblank_request.type.
640 *
641 * Starting kernel version 2.6.39, this capability is always set to 1.
642 */
643#define DRM_CAP_VBLANK_HIGH_CRTC	0x2
644/**
645 * DRM_CAP_DUMB_PREFERRED_DEPTH
646 *
647 * The preferred bit depth for dumb buffers.
648 *
649 * The bit depth is the number of bits used to indicate the color of a single
650 * pixel excluding any padding. This is different from the number of bits per
651 * pixel. For instance, XRGB8888 has a bit depth of 24 but has 32 bits per
652 * pixel.
653 *
654 * Note that this preference only applies to dumb buffers, it's irrelevant for
655 * other types of buffers.
656 */
657#define DRM_CAP_DUMB_PREFERRED_DEPTH	0x3
658/**
659 * DRM_CAP_DUMB_PREFER_SHADOW
660 *
661 * If set to 1, the driver prefers userspace to render to a shadow buffer
662 * instead of directly rendering to a dumb buffer. For best speed, userspace
663 * should do streaming ordered memory copies into the dumb buffer and never
664 * read from it.
665 *
666 * Note that this preference only applies to dumb buffers, it's irrelevant for
667 * other types of buffers.
668 */
669#define DRM_CAP_DUMB_PREFER_SHADOW	0x4
670/**
671 * DRM_CAP_PRIME
672 *
673 * Bitfield of supported PRIME sharing capabilities. See &DRM_PRIME_CAP_IMPORT
674 * and &DRM_PRIME_CAP_EXPORT.
675 *
676 * Starting from kernel version 6.6, both &DRM_PRIME_CAP_IMPORT and
677 * &DRM_PRIME_CAP_EXPORT are always advertised.
678 *
679 * PRIME buffers are exposed as dma-buf file descriptors.
680 * See :ref:`prime_buffer_sharing`.
681 */
682#define DRM_CAP_PRIME			0x5
683/**
684 * DRM_PRIME_CAP_IMPORT
685 *
686 * If this bit is set in &DRM_CAP_PRIME, the driver supports importing PRIME
687 * buffers via the &DRM_IOCTL_PRIME_FD_TO_HANDLE ioctl.
688 *
689 * Starting from kernel version 6.6, this bit is always set in &DRM_CAP_PRIME.
690 */
691#define  DRM_PRIME_CAP_IMPORT		0x1
692/**
693 * DRM_PRIME_CAP_EXPORT
694 *
695 * If this bit is set in &DRM_CAP_PRIME, the driver supports exporting PRIME
696 * buffers via the &DRM_IOCTL_PRIME_HANDLE_TO_FD ioctl.
697 *
698 * Starting from kernel version 6.6, this bit is always set in &DRM_CAP_PRIME.
699 */
700#define  DRM_PRIME_CAP_EXPORT		0x2
701/**
702 * DRM_CAP_TIMESTAMP_MONOTONIC
703 *
704 * If set to 0, the kernel will report timestamps with ``CLOCK_REALTIME`` in
705 * struct drm_event_vblank. If set to 1, the kernel will report timestamps with
706 * ``CLOCK_MONOTONIC``. See ``clock_gettime(2)`` for the definition of these
707 * clocks.
708 *
709 * Starting from kernel version 2.6.39, the default value for this capability
710 * is 1. Starting kernel version 4.15, this capability is always set to 1.
711 */
712#define DRM_CAP_TIMESTAMP_MONOTONIC	0x6
713/**
714 * DRM_CAP_ASYNC_PAGE_FLIP
715 *
716 * If set to 1, the driver supports &DRM_MODE_PAGE_FLIP_ASYNC for legacy
717 * page-flips.
718 */
719#define DRM_CAP_ASYNC_PAGE_FLIP		0x7
720/**
721 * DRM_CAP_CURSOR_WIDTH
722 *
723 * The ``CURSOR_WIDTH`` and ``CURSOR_HEIGHT`` capabilities return a valid
724 * width x height combination for the hardware cursor. The intention is that a
725 * hardware agnostic userspace can query a cursor plane size to use.
726 *
727 * Note that the cross-driver contract is to merely return a valid size;
728 * drivers are free to attach another meaning on top, eg. i915 returns the
729 * maximum plane size.
730 */
731#define DRM_CAP_CURSOR_WIDTH		0x8
732/**
733 * DRM_CAP_CURSOR_HEIGHT
734 *
735 * See &DRM_CAP_CURSOR_WIDTH.
736 */
737#define DRM_CAP_CURSOR_HEIGHT		0x9
738/**
739 * DRM_CAP_ADDFB2_MODIFIERS
740 *
741 * If set to 1, the driver supports supplying modifiers in the
742 * &DRM_IOCTL_MODE_ADDFB2 ioctl.
743 */
744#define DRM_CAP_ADDFB2_MODIFIERS	0x10
745/**
746 * DRM_CAP_PAGE_FLIP_TARGET
747 *
748 * If set to 1, the driver supports the &DRM_MODE_PAGE_FLIP_TARGET_ABSOLUTE and
749 * &DRM_MODE_PAGE_FLIP_TARGET_RELATIVE flags in
750 * &drm_mode_crtc_page_flip_target.flags for the &DRM_IOCTL_MODE_PAGE_FLIP
751 * ioctl.
752 */
753#define DRM_CAP_PAGE_FLIP_TARGET	0x11
754/**
755 * DRM_CAP_CRTC_IN_VBLANK_EVENT
756 *
757 * If set to 1, the kernel supports reporting the CRTC ID in
758 * &drm_event_vblank.crtc_id for the &DRM_EVENT_VBLANK and
759 * &DRM_EVENT_FLIP_COMPLETE events.
760 *
761 * Starting kernel version 4.12, this capability is always set to 1.
762 */
763#define DRM_CAP_CRTC_IN_VBLANK_EVENT	0x12
764/**
765 * DRM_CAP_SYNCOBJ
766 *
767 * If set to 1, the driver supports sync objects. See :ref:`drm_sync_objects`.
768 */
769#define DRM_CAP_SYNCOBJ		0x13
770/**
771 * DRM_CAP_SYNCOBJ_TIMELINE
772 *
773 * If set to 1, the driver supports timeline operations on sync objects. See
774 * :ref:`drm_sync_objects`.
775 */
776#define DRM_CAP_SYNCOBJ_TIMELINE	0x14
777/**
778 * DRM_CAP_ATOMIC_ASYNC_PAGE_FLIP
779 *
780 * If set to 1, the driver supports &DRM_MODE_PAGE_FLIP_ASYNC for atomic
781 * commits.
782 */
783#define DRM_CAP_ATOMIC_ASYNC_PAGE_FLIP	0x15
784
785/* DRM_IOCTL_GET_CAP ioctl argument type */
786struct drm_get_cap {
787	__u64 capability;
788	__u64 value;
789};
790
791/**
792 * DRM_CLIENT_CAP_STEREO_3D
793 *
794 * If set to 1, the DRM core will expose the stereo 3D capabilities of the
795 * monitor by advertising the supported 3D layouts in the flags of struct
796 * drm_mode_modeinfo. See ``DRM_MODE_FLAG_3D_*``.
797 *
798 * This capability is always supported for all drivers starting from kernel
799 * version 3.13.
800 */
801#define DRM_CLIENT_CAP_STEREO_3D	1
802
803/**
804 * DRM_CLIENT_CAP_UNIVERSAL_PLANES
805 *
806 * If set to 1, the DRM core will expose all planes (overlay, primary, and
807 * cursor) to userspace.
808 *
809 * This capability has been introduced in kernel version 3.15. Starting from
810 * kernel version 3.17, this capability is always supported for all drivers.
811 */
812#define DRM_CLIENT_CAP_UNIVERSAL_PLANES  2
813
814/**
815 * DRM_CLIENT_CAP_ATOMIC
816 *
817 * If set to 1, the DRM core will expose atomic properties to userspace. This
818 * implicitly enables &DRM_CLIENT_CAP_UNIVERSAL_PLANES and
819 * &DRM_CLIENT_CAP_ASPECT_RATIO.
820 *
821 * If the driver doesn't support atomic mode-setting, enabling this capability
822 * will fail with -EOPNOTSUPP.
823 *
824 * This capability has been introduced in kernel version 4.0. Starting from
825 * kernel version 4.2, this capability is always supported for atomic-capable
826 * drivers.
827 */
828#define DRM_CLIENT_CAP_ATOMIC	3
829
830/**
831 * DRM_CLIENT_CAP_ASPECT_RATIO
832 *
833 * If set to 1, the DRM core will provide aspect ratio information in modes.
834 * See ``DRM_MODE_FLAG_PIC_AR_*``.
835 *
836 * This capability is always supported for all drivers starting from kernel
837 * version 4.18.
838 */
839#define DRM_CLIENT_CAP_ASPECT_RATIO    4
840
841/**
842 * DRM_CLIENT_CAP_WRITEBACK_CONNECTORS
843 *
844 * If set to 1, the DRM core will expose special connectors to be used for
845 * writing back to memory the scene setup in the commit. The client must enable
846 * &DRM_CLIENT_CAP_ATOMIC first.
847 *
848 * This capability is always supported for atomic-capable drivers starting from
849 * kernel version 4.19.
850 */
851#define DRM_CLIENT_CAP_WRITEBACK_CONNECTORS	5
852
853/**
854 * DRM_CLIENT_CAP_CURSOR_PLANE_HOTSPOT
855 *
856 * Drivers for para-virtualized hardware (e.g. vmwgfx, qxl, virtio and
857 * virtualbox) have additional restrictions for cursor planes (thus
858 * making cursor planes on those drivers not truly universal,) e.g.
859 * they need cursor planes to act like one would expect from a mouse
860 * cursor and have correctly set hotspot properties.
861 * If this client cap is not set the DRM core will hide cursor plane on
862 * those virtualized drivers because not setting it implies that the
863 * client is not capable of dealing with those extra restictions.
864 * Clients which do set cursor hotspot and treat the cursor plane
865 * like a mouse cursor should set this property.
866 * The client must enable &DRM_CLIENT_CAP_ATOMIC first.
867 *
868 * Setting this property on drivers which do not special case
869 * cursor planes (i.e. non-virtualized drivers) will return
870 * EOPNOTSUPP, which can be used by userspace to gauge
871 * requirements of the hardware/drivers they're running on.
872 *
873 * This capability is always supported for atomic-capable virtualized
874 * drivers starting from kernel version 6.6.
875 */
876#define DRM_CLIENT_CAP_CURSOR_PLANE_HOTSPOT	6
877
878/* DRM_IOCTL_SET_CLIENT_CAP ioctl argument type */
879struct drm_set_client_cap {
880	__u64 capability;
881	__u64 value;
882};
883
884#define DRM_RDWR O_RDWR
885#define DRM_CLOEXEC O_CLOEXEC
886struct drm_prime_handle {
887	__u32 handle;
888
889	/** Flags.. only applicable for handle->fd */
890	__u32 flags;
891
892	/** Returned dmabuf file descriptor */
893	__s32 fd;
894};
895
896struct drm_syncobj_create {
897	__u32 handle;
898#define DRM_SYNCOBJ_CREATE_SIGNALED (1 << 0)
899	__u32 flags;
900};
901
902struct drm_syncobj_destroy {
903	__u32 handle;
904	__u32 pad;
905};
906
907#define DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_IMPORT_SYNC_FILE (1 << 0)
908#define DRM_SYNCOBJ_HANDLE_TO_FD_FLAGS_EXPORT_SYNC_FILE (1 << 0)
909struct drm_syncobj_handle {
910	__u32 handle;
911	__u32 flags;
912
913	__s32 fd;
914	__u32 pad;
915};
916
917struct drm_syncobj_transfer {
918	__u32 src_handle;
919	__u32 dst_handle;
920	__u64 src_point;
921	__u64 dst_point;
922	__u32 flags;
923	__u32 pad;
924};
925
926#define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL (1 << 0)
927#define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT (1 << 1)
928#define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE (1 << 2) /* wait for time point to become available */
929#define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_DEADLINE (1 << 3) /* set fence deadline to deadline_nsec */
930struct drm_syncobj_wait {
931	__u64 handles;
932	/* absolute timeout */
933	__s64 timeout_nsec;
934	__u32 count_handles;
935	__u32 flags;
936	__u32 first_signaled; /* only valid when not waiting all */
937	__u32 pad;
938	/**
939	 * @deadline_nsec - fence deadline hint
940	 *
941	 * Deadline hint, in absolute CLOCK_MONOTONIC, to set on backing
942	 * fence(s) if the DRM_SYNCOBJ_WAIT_FLAGS_WAIT_DEADLINE flag is
943	 * set.
944	 */
945	__u64 deadline_nsec;
946};
947
948struct drm_syncobj_timeline_wait {
949	__u64 handles;
950	/* wait on specific timeline point for every handles*/
951	__u64 points;
952	/* absolute timeout */
953	__s64 timeout_nsec;
954	__u32 count_handles;
955	__u32 flags;
956	__u32 first_signaled; /* only valid when not waiting all */
957	__u32 pad;
958	/**
959	 * @deadline_nsec - fence deadline hint
960	 *
961	 * Deadline hint, in absolute CLOCK_MONOTONIC, to set on backing
962	 * fence(s) if the DRM_SYNCOBJ_WAIT_FLAGS_WAIT_DEADLINE flag is
963	 * set.
964	 */
965	__u64 deadline_nsec;
966};
967
968/**
969 * struct drm_syncobj_eventfd
970 * @handle: syncobj handle.
971 * @flags: Zero to wait for the point to be signalled, or
972 *         &DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE to wait for a fence to be
973 *         available for the point.
974 * @point: syncobj timeline point (set to zero for binary syncobjs).
975 * @fd: Existing eventfd to sent events to.
976 * @pad: Must be zero.
977 *
978 * Register an eventfd to be signalled by a syncobj. The eventfd counter will
979 * be incremented by one.
980 */
981struct drm_syncobj_eventfd {
982	__u32 handle;
983	__u32 flags;
984	__u64 point;
985	__s32 fd;
986	__u32 pad;
987};
988
989
990struct drm_syncobj_array {
991	__u64 handles;
992	__u32 count_handles;
993	__u32 pad;
994};
995
996#define DRM_SYNCOBJ_QUERY_FLAGS_LAST_SUBMITTED (1 << 0) /* last available point on timeline syncobj */
997struct drm_syncobj_timeline_array {
998	__u64 handles;
999	__u64 points;
1000	__u32 count_handles;
1001	__u32 flags;
1002};
1003
1004
1005/* Query current scanout sequence number */
1006struct drm_crtc_get_sequence {
1007	__u32 crtc_id;		/* requested crtc_id */
1008	__u32 active;		/* return: crtc output is active */
1009	__u64 sequence;		/* return: most recent vblank sequence */
1010	__s64 sequence_ns;	/* return: most recent time of first pixel out */
1011};
1012
1013/* Queue event to be delivered at specified sequence. Time stamp marks
1014 * when the first pixel of the refresh cycle leaves the display engine
1015 * for the display
1016 */
1017#define DRM_CRTC_SEQUENCE_RELATIVE		0x00000001	/* sequence is relative to current */
1018#define DRM_CRTC_SEQUENCE_NEXT_ON_MISS		0x00000002	/* Use next sequence if we've missed */
1019
1020struct drm_crtc_queue_sequence {
1021	__u32 crtc_id;
1022	__u32 flags;
1023	__u64 sequence;		/* on input, target sequence. on output, actual sequence */
1024	__u64 user_data;	/* user data passed to event */
1025};
1026
1027#if defined(__cplusplus)
1028}
1029#endif
1030
1031#include "drm_mode.h"
1032
1033#if defined(__cplusplus)
1034extern "C" {
1035#endif
1036
1037#define DRM_IOCTL_BASE			'd'
1038#define DRM_IO(nr)			_IO(DRM_IOCTL_BASE,nr)
1039#define DRM_IOR(nr,type)		_IOR(DRM_IOCTL_BASE,nr,type)
1040#define DRM_IOW(nr,type)		_IOW(DRM_IOCTL_BASE,nr,type)
1041#define DRM_IOWR(nr,type)		_IOWR(DRM_IOCTL_BASE,nr,type)
1042
1043#define DRM_IOCTL_VERSION		DRM_IOWR(0x00, struct drm_version)
1044#define DRM_IOCTL_GET_UNIQUE		DRM_IOWR(0x01, struct drm_unique)
1045#define DRM_IOCTL_GET_MAGIC		DRM_IOR( 0x02, struct drm_auth)
1046#define DRM_IOCTL_IRQ_BUSID		DRM_IOWR(0x03, struct drm_irq_busid)
1047#define DRM_IOCTL_GET_MAP               DRM_IOWR(0x04, struct drm_map)
1048#define DRM_IOCTL_GET_CLIENT            DRM_IOWR(0x05, struct drm_client)
1049#define DRM_IOCTL_GET_STATS             DRM_IOR( 0x06, struct drm_stats)
1050#define DRM_IOCTL_SET_VERSION		DRM_IOWR(0x07, struct drm_set_version)
1051#define DRM_IOCTL_MODESET_CTL           DRM_IOW(0x08, struct drm_modeset_ctl)
1052/**
1053 * DRM_IOCTL_GEM_CLOSE - Close a GEM handle.
1054 *
1055 * GEM handles are not reference-counted by the kernel. User-space is
1056 * responsible for managing their lifetime. For example, if user-space imports
1057 * the same memory object twice on the same DRM file description, the same GEM
1058 * handle is returned by both imports, and user-space needs to ensure
1059 * &DRM_IOCTL_GEM_CLOSE is performed once only. The same situation can happen
1060 * when a memory object is allocated, then exported and imported again on the
1061 * same DRM file description. The &DRM_IOCTL_MODE_GETFB2 IOCTL is an exception
1062 * and always returns fresh new GEM handles even if an existing GEM handle
1063 * already refers to the same memory object before the IOCTL is performed.
1064 */
1065#define DRM_IOCTL_GEM_CLOSE		DRM_IOW (0x09, struct drm_gem_close)
1066#define DRM_IOCTL_GEM_FLINK		DRM_IOWR(0x0a, struct drm_gem_flink)
1067#define DRM_IOCTL_GEM_OPEN		DRM_IOWR(0x0b, struct drm_gem_open)
1068#define DRM_IOCTL_GET_CAP		DRM_IOWR(0x0c, struct drm_get_cap)
1069#define DRM_IOCTL_SET_CLIENT_CAP	DRM_IOW( 0x0d, struct drm_set_client_cap)
1070
1071#define DRM_IOCTL_SET_UNIQUE		DRM_IOW( 0x10, struct drm_unique)
1072#define DRM_IOCTL_AUTH_MAGIC		DRM_IOW( 0x11, struct drm_auth)
1073#define DRM_IOCTL_BLOCK			DRM_IOWR(0x12, struct drm_block)
1074#define DRM_IOCTL_UNBLOCK		DRM_IOWR(0x13, struct drm_block)
1075#define DRM_IOCTL_CONTROL		DRM_IOW( 0x14, struct drm_control)
1076#define DRM_IOCTL_ADD_MAP		DRM_IOWR(0x15, struct drm_map)
1077#define DRM_IOCTL_ADD_BUFS		DRM_IOWR(0x16, struct drm_buf_desc)
1078#define DRM_IOCTL_MARK_BUFS		DRM_IOW( 0x17, struct drm_buf_desc)
1079#define DRM_IOCTL_INFO_BUFS		DRM_IOWR(0x18, struct drm_buf_info)
1080#define DRM_IOCTL_MAP_BUFS		DRM_IOWR(0x19, struct drm_buf_map)
1081#define DRM_IOCTL_FREE_BUFS		DRM_IOW( 0x1a, struct drm_buf_free)
1082
1083#define DRM_IOCTL_RM_MAP		DRM_IOW( 0x1b, struct drm_map)
1084
1085#define DRM_IOCTL_SET_SAREA_CTX		DRM_IOW( 0x1c, struct drm_ctx_priv_map)
1086#define DRM_IOCTL_GET_SAREA_CTX 	DRM_IOWR(0x1d, struct drm_ctx_priv_map)
1087
1088#define DRM_IOCTL_SET_MASTER            DRM_IO(0x1e)
1089#define DRM_IOCTL_DROP_MASTER           DRM_IO(0x1f)
1090
1091#define DRM_IOCTL_ADD_CTX		DRM_IOWR(0x20, struct drm_ctx)
1092#define DRM_IOCTL_RM_CTX		DRM_IOWR(0x21, struct drm_ctx)
1093#define DRM_IOCTL_MOD_CTX		DRM_IOW( 0x22, struct drm_ctx)
1094#define DRM_IOCTL_GET_CTX		DRM_IOWR(0x23, struct drm_ctx)
1095#define DRM_IOCTL_SWITCH_CTX		DRM_IOW( 0x24, struct drm_ctx)
1096#define DRM_IOCTL_NEW_CTX		DRM_IOW( 0x25, struct drm_ctx)
1097#define DRM_IOCTL_RES_CTX		DRM_IOWR(0x26, struct drm_ctx_res)
1098#define DRM_IOCTL_ADD_DRAW		DRM_IOWR(0x27, struct drm_draw)
1099#define DRM_IOCTL_RM_DRAW		DRM_IOWR(0x28, struct drm_draw)
1100#define DRM_IOCTL_DMA			DRM_IOWR(0x29, struct drm_dma)
1101#define DRM_IOCTL_LOCK			DRM_IOW( 0x2a, struct drm_lock)
1102#define DRM_IOCTL_UNLOCK		DRM_IOW( 0x2b, struct drm_lock)
1103#define DRM_IOCTL_FINISH		DRM_IOW( 0x2c, struct drm_lock)
1104
1105/**
1106 * DRM_IOCTL_PRIME_HANDLE_TO_FD - Convert a GEM handle to a DMA-BUF FD.
1107 *
1108 * User-space sets &drm_prime_handle.handle with the GEM handle to export and
1109 * &drm_prime_handle.flags, and gets back a DMA-BUF file descriptor in
1110 * &drm_prime_handle.fd.
1111 *
1112 * The export can fail for any driver-specific reason, e.g. because export is
1113 * not supported for this specific GEM handle (but might be for others).
1114 *
1115 * Support for exporting DMA-BUFs is advertised via &DRM_PRIME_CAP_EXPORT.
1116 */
1117#define DRM_IOCTL_PRIME_HANDLE_TO_FD    DRM_IOWR(0x2d, struct drm_prime_handle)
1118/**
1119 * DRM_IOCTL_PRIME_FD_TO_HANDLE - Convert a DMA-BUF FD to a GEM handle.
1120 *
1121 * User-space sets &drm_prime_handle.fd with a DMA-BUF file descriptor to
1122 * import, and gets back a GEM handle in &drm_prime_handle.handle.
1123 * &drm_prime_handle.flags is unused.
1124 *
1125 * If an existing GEM handle refers to the memory object backing the DMA-BUF,
1126 * that GEM handle is returned. Therefore user-space which needs to handle
1127 * arbitrary DMA-BUFs must have a user-space lookup data structure to manually
1128 * reference-count duplicated GEM handles. For more information see
1129 * &DRM_IOCTL_GEM_CLOSE.
1130 *
1131 * The import can fail for any driver-specific reason, e.g. because import is
1132 * only supported for DMA-BUFs allocated on this DRM device.
1133 *
1134 * Support for importing DMA-BUFs is advertised via &DRM_PRIME_CAP_IMPORT.
1135 */
1136#define DRM_IOCTL_PRIME_FD_TO_HANDLE    DRM_IOWR(0x2e, struct drm_prime_handle)
1137
1138#define DRM_IOCTL_AGP_ACQUIRE		DRM_IO(  0x30)
1139#define DRM_IOCTL_AGP_RELEASE		DRM_IO(  0x31)
1140#define DRM_IOCTL_AGP_ENABLE		DRM_IOW( 0x32, struct drm_agp_mode)
1141#define DRM_IOCTL_AGP_INFO		DRM_IOR( 0x33, struct drm_agp_info)
1142#define DRM_IOCTL_AGP_ALLOC		DRM_IOWR(0x34, struct drm_agp_buffer)
1143#define DRM_IOCTL_AGP_FREE		DRM_IOW( 0x35, struct drm_agp_buffer)
1144#define DRM_IOCTL_AGP_BIND		DRM_IOW( 0x36, struct drm_agp_binding)
1145#define DRM_IOCTL_AGP_UNBIND		DRM_IOW( 0x37, struct drm_agp_binding)
1146
1147#define DRM_IOCTL_SG_ALLOC		DRM_IOWR(0x38, struct drm_scatter_gather)
1148#define DRM_IOCTL_SG_FREE		DRM_IOW( 0x39, struct drm_scatter_gather)
1149
1150#define DRM_IOCTL_WAIT_VBLANK		DRM_IOWR(0x3a, union drm_wait_vblank)
1151
1152#define DRM_IOCTL_CRTC_GET_SEQUENCE	DRM_IOWR(0x3b, struct drm_crtc_get_sequence)
1153#define DRM_IOCTL_CRTC_QUEUE_SEQUENCE	DRM_IOWR(0x3c, struct drm_crtc_queue_sequence)
1154
1155#define DRM_IOCTL_UPDATE_DRAW		DRM_IOW(0x3f, struct drm_update_draw)
1156
1157#define DRM_IOCTL_MODE_GETRESOURCES	DRM_IOWR(0xA0, struct drm_mode_card_res)
1158#define DRM_IOCTL_MODE_GETCRTC		DRM_IOWR(0xA1, struct drm_mode_crtc)
1159#define DRM_IOCTL_MODE_SETCRTC		DRM_IOWR(0xA2, struct drm_mode_crtc)
1160#define DRM_IOCTL_MODE_CURSOR		DRM_IOWR(0xA3, struct drm_mode_cursor)
1161#define DRM_IOCTL_MODE_GETGAMMA		DRM_IOWR(0xA4, struct drm_mode_crtc_lut)
1162#define DRM_IOCTL_MODE_SETGAMMA		DRM_IOWR(0xA5, struct drm_mode_crtc_lut)
1163#define DRM_IOCTL_MODE_GETENCODER	DRM_IOWR(0xA6, struct drm_mode_get_encoder)
1164#define DRM_IOCTL_MODE_GETCONNECTOR	DRM_IOWR(0xA7, struct drm_mode_get_connector)
1165#define DRM_IOCTL_MODE_ATTACHMODE	DRM_IOWR(0xA8, struct drm_mode_mode_cmd) /* deprecated (never worked) */
1166#define DRM_IOCTL_MODE_DETACHMODE	DRM_IOWR(0xA9, struct drm_mode_mode_cmd) /* deprecated (never worked) */
1167
1168#define DRM_IOCTL_MODE_GETPROPERTY	DRM_IOWR(0xAA, struct drm_mode_get_property)
1169#define DRM_IOCTL_MODE_SETPROPERTY	DRM_IOWR(0xAB, struct drm_mode_connector_set_property)
1170#define DRM_IOCTL_MODE_GETPROPBLOB	DRM_IOWR(0xAC, struct drm_mode_get_blob)
1171#define DRM_IOCTL_MODE_GETFB		DRM_IOWR(0xAD, struct drm_mode_fb_cmd)
1172#define DRM_IOCTL_MODE_ADDFB		DRM_IOWR(0xAE, struct drm_mode_fb_cmd)
1173/**
1174 * DRM_IOCTL_MODE_RMFB - Remove a framebuffer.
1175 *
1176 * This removes a framebuffer previously added via ADDFB/ADDFB2. The IOCTL
1177 * argument is a framebuffer object ID.
1178 *
1179 * Warning: removing a framebuffer currently in-use on an enabled plane will
1180 * disable that plane. The CRTC the plane is linked to may also be disabled
1181 * (depending on driver capabilities).
1182 */
1183#define DRM_IOCTL_MODE_RMFB		DRM_IOWR(0xAF, unsigned int)
1184#define DRM_IOCTL_MODE_PAGE_FLIP	DRM_IOWR(0xB0, struct drm_mode_crtc_page_flip)
1185#define DRM_IOCTL_MODE_DIRTYFB		DRM_IOWR(0xB1, struct drm_mode_fb_dirty_cmd)
1186
1187/**
1188 * DRM_IOCTL_MODE_CREATE_DUMB - Create a new dumb buffer object.
1189 *
1190 * KMS dumb buffers provide a very primitive way to allocate a buffer object
1191 * suitable for scanout and map it for software rendering. KMS dumb buffers are
1192 * not suitable for hardware-accelerated rendering nor video decoding. KMS dumb
1193 * buffers are not suitable to be displayed on any other device than the KMS
1194 * device where they were allocated from. Also see
1195 * :ref:`kms_dumb_buffer_objects`.
1196 *
1197 * The IOCTL argument is a struct drm_mode_create_dumb.
1198 *
1199 * User-space is expected to create a KMS dumb buffer via this IOCTL, then add
1200 * it as a KMS framebuffer via &DRM_IOCTL_MODE_ADDFB and map it via
1201 * &DRM_IOCTL_MODE_MAP_DUMB.
1202 *
1203 * &DRM_CAP_DUMB_BUFFER indicates whether this IOCTL is supported.
1204 * &DRM_CAP_DUMB_PREFERRED_DEPTH and &DRM_CAP_DUMB_PREFER_SHADOW indicate
1205 * driver preferences for dumb buffers.
1206 */
1207#define DRM_IOCTL_MODE_CREATE_DUMB DRM_IOWR(0xB2, struct drm_mode_create_dumb)
1208#define DRM_IOCTL_MODE_MAP_DUMB    DRM_IOWR(0xB3, struct drm_mode_map_dumb)
1209#define DRM_IOCTL_MODE_DESTROY_DUMB    DRM_IOWR(0xB4, struct drm_mode_destroy_dumb)
1210#define DRM_IOCTL_MODE_GETPLANERESOURCES DRM_IOWR(0xB5, struct drm_mode_get_plane_res)
1211#define DRM_IOCTL_MODE_GETPLANE	DRM_IOWR(0xB6, struct drm_mode_get_plane)
1212#define DRM_IOCTL_MODE_SETPLANE	DRM_IOWR(0xB7, struct drm_mode_set_plane)
1213#define DRM_IOCTL_MODE_ADDFB2		DRM_IOWR(0xB8, struct drm_mode_fb_cmd2)
1214#define DRM_IOCTL_MODE_OBJ_GETPROPERTIES	DRM_IOWR(0xB9, struct drm_mode_obj_get_properties)
1215#define DRM_IOCTL_MODE_OBJ_SETPROPERTY	DRM_IOWR(0xBA, struct drm_mode_obj_set_property)
1216#define DRM_IOCTL_MODE_CURSOR2		DRM_IOWR(0xBB, struct drm_mode_cursor2)
1217#define DRM_IOCTL_MODE_ATOMIC		DRM_IOWR(0xBC, struct drm_mode_atomic)
1218#define DRM_IOCTL_MODE_CREATEPROPBLOB	DRM_IOWR(0xBD, struct drm_mode_create_blob)
1219#define DRM_IOCTL_MODE_DESTROYPROPBLOB	DRM_IOWR(0xBE, struct drm_mode_destroy_blob)
1220
1221#define DRM_IOCTL_SYNCOBJ_CREATE	DRM_IOWR(0xBF, struct drm_syncobj_create)
1222#define DRM_IOCTL_SYNCOBJ_DESTROY	DRM_IOWR(0xC0, struct drm_syncobj_destroy)
1223#define DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD	DRM_IOWR(0xC1, struct drm_syncobj_handle)
1224#define DRM_IOCTL_SYNCOBJ_FD_TO_HANDLE	DRM_IOWR(0xC2, struct drm_syncobj_handle)
1225#define DRM_IOCTL_SYNCOBJ_WAIT		DRM_IOWR(0xC3, struct drm_syncobj_wait)
1226#define DRM_IOCTL_SYNCOBJ_RESET		DRM_IOWR(0xC4, struct drm_syncobj_array)
1227#define DRM_IOCTL_SYNCOBJ_SIGNAL	DRM_IOWR(0xC5, struct drm_syncobj_array)
1228
1229#define DRM_IOCTL_MODE_CREATE_LEASE	DRM_IOWR(0xC6, struct drm_mode_create_lease)
1230#define DRM_IOCTL_MODE_LIST_LESSEES	DRM_IOWR(0xC7, struct drm_mode_list_lessees)
1231#define DRM_IOCTL_MODE_GET_LEASE	DRM_IOWR(0xC8, struct drm_mode_get_lease)
1232#define DRM_IOCTL_MODE_REVOKE_LEASE	DRM_IOWR(0xC9, struct drm_mode_revoke_lease)
1233
1234#define DRM_IOCTL_SYNCOBJ_TIMELINE_WAIT	DRM_IOWR(0xCA, struct drm_syncobj_timeline_wait)
1235#define DRM_IOCTL_SYNCOBJ_QUERY		DRM_IOWR(0xCB, struct drm_syncobj_timeline_array)
1236#define DRM_IOCTL_SYNCOBJ_TRANSFER	DRM_IOWR(0xCC, struct drm_syncobj_transfer)
1237#define DRM_IOCTL_SYNCOBJ_TIMELINE_SIGNAL	DRM_IOWR(0xCD, struct drm_syncobj_timeline_array)
1238
1239/**
1240 * DRM_IOCTL_MODE_GETFB2 - Get framebuffer metadata.
1241 *
1242 * This queries metadata about a framebuffer. User-space fills
1243 * &drm_mode_fb_cmd2.fb_id as the input, and the kernels fills the rest of the
1244 * struct as the output.
1245 *
1246 * If the client is DRM master or has &CAP_SYS_ADMIN, &drm_mode_fb_cmd2.handles
1247 * will be filled with GEM buffer handles. Fresh new GEM handles are always
1248 * returned, even if another GEM handle referring to the same memory object
1249 * already exists on the DRM file description. The caller is responsible for
1250 * removing the new handles, e.g. via the &DRM_IOCTL_GEM_CLOSE IOCTL. The same
1251 * new handle will be returned for multiple planes in case they use the same
1252 * memory object. Planes are valid until one has a zero handle -- this can be
1253 * used to compute the number of planes.
1254 *
1255 * Otherwise, &drm_mode_fb_cmd2.handles will be zeroed and planes are valid
1256 * until one has a zero &drm_mode_fb_cmd2.pitches.
1257 *
1258 * If the framebuffer has a format modifier, &DRM_MODE_FB_MODIFIERS will be set
1259 * in &drm_mode_fb_cmd2.flags and &drm_mode_fb_cmd2.modifier will contain the
1260 * modifier. Otherwise, user-space must ignore &drm_mode_fb_cmd2.modifier.
1261 *
1262 * To obtain DMA-BUF FDs for each plane without leaking GEM handles, user-space
1263 * can export each handle via &DRM_IOCTL_PRIME_HANDLE_TO_FD, then immediately
1264 * close each unique handle via &DRM_IOCTL_GEM_CLOSE, making sure to not
1265 * double-close handles which are specified multiple times in the array.
1266 */
1267#define DRM_IOCTL_MODE_GETFB2		DRM_IOWR(0xCE, struct drm_mode_fb_cmd2)
1268
1269#define DRM_IOCTL_SYNCOBJ_EVENTFD	DRM_IOWR(0xCF, struct drm_syncobj_eventfd)
1270
1271/**
1272 * DRM_IOCTL_MODE_CLOSEFB - Close a framebuffer.
1273 *
1274 * This closes a framebuffer previously added via ADDFB/ADDFB2. The IOCTL
1275 * argument is a framebuffer object ID.
1276 *
1277 * This IOCTL is similar to &DRM_IOCTL_MODE_RMFB, except it doesn't disable
1278 * planes and CRTCs. As long as the framebuffer is used by a plane, it's kept
1279 * alive. When the plane no longer uses the framebuffer (because the
1280 * framebuffer is replaced with another one, or the plane is disabled), the
1281 * framebuffer is cleaned up.
1282 *
1283 * This is useful to implement flicker-free transitions between two processes.
1284 *
1285 * Depending on the threat model, user-space may want to ensure that the
1286 * framebuffer doesn't expose any sensitive user information: closed
1287 * framebuffers attached to a plane can be read back by the next DRM master.
1288 */
1289#define DRM_IOCTL_MODE_CLOSEFB		DRM_IOWR(0xD0, struct drm_mode_closefb)
1290
1291/*
1292 * Device specific ioctls should only be in their respective headers
1293 * The device specific ioctl range is from 0x40 to 0x9f.
1294 * Generic IOCTLS restart at 0xA0.
1295 *
1296 * \sa drmCommandNone(), drmCommandRead(), drmCommandWrite(), and
1297 * drmCommandReadWrite().
1298 */
1299#define DRM_COMMAND_BASE                0x40
1300#define DRM_COMMAND_END			0xA0
1301
1302/**
1303 * struct drm_event - Header for DRM events
1304 * @type: event type.
1305 * @length: total number of payload bytes (including header).
1306 *
1307 * This struct is a header for events written back to user-space on the DRM FD.
1308 * A read on the DRM FD will always only return complete events: e.g. if the
1309 * read buffer is 100 bytes large and there are two 64 byte events pending,
1310 * only one will be returned.
1311 *
1312 * Event types 0 - 0x7fffffff are generic DRM events, 0x80000000 and
1313 * up are chipset specific. Generic DRM events include &DRM_EVENT_VBLANK,
1314 * &DRM_EVENT_FLIP_COMPLETE and &DRM_EVENT_CRTC_SEQUENCE.
1315 */
1316struct drm_event {
1317	__u32 type;
1318	__u32 length;
1319};
1320
1321/**
1322 * DRM_EVENT_VBLANK - vertical blanking event
1323 *
1324 * This event is sent in response to &DRM_IOCTL_WAIT_VBLANK with the
1325 * &_DRM_VBLANK_EVENT flag set.
1326 *
1327 * The event payload is a struct drm_event_vblank.
1328 */
1329#define DRM_EVENT_VBLANK 0x01
1330/**
1331 * DRM_EVENT_FLIP_COMPLETE - page-flip completion event
1332 *
1333 * This event is sent in response to an atomic commit or legacy page-flip with
1334 * the &DRM_MODE_PAGE_FLIP_EVENT flag set.
1335 *
1336 * The event payload is a struct drm_event_vblank.
1337 */
1338#define DRM_EVENT_FLIP_COMPLETE 0x02
1339/**
1340 * DRM_EVENT_CRTC_SEQUENCE - CRTC sequence event
1341 *
1342 * This event is sent in response to &DRM_IOCTL_CRTC_QUEUE_SEQUENCE.
1343 *
1344 * The event payload is a struct drm_event_crtc_sequence.
1345 */
1346#define DRM_EVENT_CRTC_SEQUENCE	0x03
1347
1348struct drm_event_vblank {
1349	struct drm_event base;
1350	__u64 user_data;
1351	__u32 tv_sec;
1352	__u32 tv_usec;
1353	__u32 sequence;
1354	__u32 crtc_id; /* 0 on older kernels that do not support this */
1355};
1356
1357/* Event delivered at sequence. Time stamp marks when the first pixel
1358 * of the refresh cycle leaves the display engine for the display
1359 */
1360struct drm_event_crtc_sequence {
1361	struct drm_event	base;
1362	__u64			user_data;
1363	__s64			time_ns;
1364	__u64			sequence;
1365};
1366
1367/* typedef area */
1368#ifndef __KERNEL__
1369typedef struct drm_clip_rect drm_clip_rect_t;
1370typedef struct drm_drawable_info drm_drawable_info_t;
1371typedef struct drm_tex_region drm_tex_region_t;
1372typedef struct drm_hw_lock drm_hw_lock_t;
1373typedef struct drm_version drm_version_t;
1374typedef struct drm_unique drm_unique_t;
1375typedef struct drm_list drm_list_t;
1376typedef struct drm_block drm_block_t;
1377typedef struct drm_control drm_control_t;
1378typedef enum drm_map_type drm_map_type_t;
1379typedef enum drm_map_flags drm_map_flags_t;
1380typedef struct drm_ctx_priv_map drm_ctx_priv_map_t;
1381typedef struct drm_map drm_map_t;
1382typedef struct drm_client drm_client_t;
1383typedef enum drm_stat_type drm_stat_type_t;
1384typedef struct drm_stats drm_stats_t;
1385typedef enum drm_lock_flags drm_lock_flags_t;
1386typedef struct drm_lock drm_lock_t;
1387typedef enum drm_dma_flags drm_dma_flags_t;
1388typedef struct drm_buf_desc drm_buf_desc_t;
1389typedef struct drm_buf_info drm_buf_info_t;
1390typedef struct drm_buf_free drm_buf_free_t;
1391typedef struct drm_buf_pub drm_buf_pub_t;
1392typedef struct drm_buf_map drm_buf_map_t;
1393typedef struct drm_dma drm_dma_t;
1394typedef union drm_wait_vblank drm_wait_vblank_t;
1395typedef struct drm_agp_mode drm_agp_mode_t;
1396typedef enum drm_ctx_flags drm_ctx_flags_t;
1397typedef struct drm_ctx drm_ctx_t;
1398typedef struct drm_ctx_res drm_ctx_res_t;
1399typedef struct drm_draw drm_draw_t;
1400typedef struct drm_update_draw drm_update_draw_t;
1401typedef struct drm_auth drm_auth_t;
1402typedef struct drm_irq_busid drm_irq_busid_t;
1403typedef enum drm_vblank_seq_type drm_vblank_seq_type_t;
1404
1405typedef struct drm_agp_buffer drm_agp_buffer_t;
1406typedef struct drm_agp_binding drm_agp_binding_t;
1407typedef struct drm_agp_info drm_agp_info_t;
1408typedef struct drm_scatter_gather drm_scatter_gather_t;
1409typedef struct drm_set_version drm_set_version_t;
1410#endif
1411
1412#if defined(__cplusplus)
1413}
1414#endif
1415
1416#endif
1417