1/**
2 * \file drm.h
3 * Header for the Direct Rendering Manager
4 *
5 * \author Rickard E. (Rik) Faith <faith@valinux.com>
6 *
7 * \par Acknowledgments:
8 * Dec 1999, Richard Henderson <rth@twiddle.net>, move to generic \c cmpxchg.
9 */
10
11/*-
12 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
13 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14 * All rights reserved.
15 *
16 * Permission is hereby granted, free of charge, to any person obtaining a
17 * copy of this software and associated documentation files (the "Software"),
18 * to deal in the Software without restriction, including without limitation
19 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20 * and/or sell copies of the Software, and to permit persons to whom the
21 * Software is furnished to do so, subject to the following conditions:
22 *
23 * The above copyright notice and this permission notice (including the next
24 * paragraph) shall be included in all copies or substantial portions of the
25 * Software.
26 *
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
30 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33 * OTHER DEALINGS IN THE SOFTWARE.
34 */
35
36#include <sys/cdefs.h>
37__FBSDID("$FreeBSD$");
38
39/**
40 * \mainpage
41 *
42 * The Direct Rendering Manager (DRM) is a device-independent kernel-level
43 * device driver that provides support for the XFree86 Direct Rendering
44 * Infrastructure (DRI).
45 *
46 * The DRM supports the Direct Rendering Infrastructure (DRI) in four major
47 * ways:
48 *     -# The DRM provides synchronized access to the graphics hardware via
49 *        the use of an optimized two-tiered lock.
50 *     -# The DRM enforces the DRI security policy for access to the graphics
51 *        hardware by only allowing authenticated X11 clients access to
52 *        restricted regions of memory.
53 *     -# The DRM provides a generic DMA engine, complete with multiple
54 *        queues and the ability to detect the need for an OpenGL context
55 *        switch.
56 *     -# The DRM is extensible via the use of small device-specific modules
57 *        that rely extensively on the API exported by the DRM module.
58 *
59 */
60
61#ifndef _DRM_H_
62#define _DRM_H_
63
64#ifndef __user
65#define __user
66#endif
67#ifndef __iomem
68#define __iomem
69#endif
70
71#ifdef __GNUC__
72# define DEPRECATED  __attribute__ ((deprecated))
73#else
74# define DEPRECATED
75#endif
76
77#if defined(__linux__)
78#include <asm/ioctl.h>		/* For _IO* macros */
79#define DRM_IOCTL_NR(n)		_IOC_NR(n)
80#define DRM_IOC_VOID		_IOC_NONE
81#define DRM_IOC_READ		_IOC_READ
82#define DRM_IOC_WRITE		_IOC_WRITE
83#define DRM_IOC_READWRITE	_IOC_READ|_IOC_WRITE
84#define DRM_IOC(dir, group, nr, size) _IOC(dir, group, nr, size)
85#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__NetBSD__) || defined(__OpenBSD__) || defined(__DragonFly__)
86#include <sys/ioccom.h>
87#define DRM_IOCTL_NR(n)		((n) & 0xff)
88#define DRM_IOC_VOID		IOC_VOID
89#define DRM_IOC_READ		IOC_OUT
90#define DRM_IOC_WRITE		IOC_IN
91#define DRM_IOC_READWRITE	IOC_INOUT
92#define DRM_IOC(dir, group, nr, size) _IOC(dir, group, nr, size)
93#endif
94
95#ifdef __OpenBSD__
96#define DRM_MAJOR       81
97#endif
98#if defined(__linux__) || defined(__NetBSD__)
99#define DRM_MAJOR       226
100#endif
101#define DRM_MAX_MINOR   15
102
103#define DRM_NAME	"drm"	  /**< Name in kernel, /dev, and /proc */
104#define DRM_MIN_ORDER	5	  /**< At least 2^5 bytes = 32 bytes */
105#define DRM_MAX_ORDER	22	  /**< Up to 2^22 bytes = 4MB */
106#define DRM_RAM_PERCENT 10	  /**< How much system ram can we lock? */
107
108#define _DRM_LOCK_HELD	0x80000000U /**< Hardware lock is held */
109#define _DRM_LOCK_CONT	0x40000000U /**< Hardware lock is contended */
110#define _DRM_LOCK_IS_HELD(lock)	   ((lock) & _DRM_LOCK_HELD)
111#define _DRM_LOCK_IS_CONT(lock)	   ((lock) & _DRM_LOCK_CONT)
112#define _DRM_LOCKING_CONTEXT(lock) ((lock) & ~(_DRM_LOCK_HELD|_DRM_LOCK_CONT))
113
114#if defined(__linux__)
115typedef unsigned int drm_handle_t;
116#else
117#include <sys/types.h>
118typedef unsigned long drm_handle_t;	/**< To mapped regions */
119#endif
120typedef unsigned int drm_context_t;	/**< GLXContext handle */
121typedef unsigned int drm_drawable_t;
122typedef unsigned int drm_magic_t;	/**< Magic for authentication */
123
124/**
125 * Cliprect.
126 *
127 * \warning If you change this structure, make sure you change
128 * XF86DRIClipRectRec in the server as well
129 *
130 * \note KW: Actually it's illegal to change either for
131 * backwards-compatibility reasons.
132 */
133struct drm_clip_rect {
134	unsigned short x1;
135	unsigned short y1;
136	unsigned short x2;
137	unsigned short y2;
138};
139
140/**
141 * Texture region,
142 */
143struct drm_tex_region {
144	unsigned char next;
145	unsigned char prev;
146	unsigned char in_use;
147	unsigned char padding;
148	unsigned int age;
149};
150
151/**
152 * Hardware lock.
153 *
154 * The lock structure is a simple cache-line aligned integer.  To avoid
155 * processor bus contention on a multiprocessor system, there should not be any
156 * other data stored in the same cache line.
157 */
158struct drm_hw_lock {
159	__volatile__ unsigned int lock;		/**< lock variable */
160	char padding[60];			/**< Pad to cache line */
161};
162
163/* This is beyond ugly, and only works on GCC.  However, it allows me to use
164 * drm.h in places (i.e., in the X-server) where I can't use size_t.  The real
165 * fix is to use uint32_t instead of size_t, but that fix will break existing
166 * LP64 (i.e., PowerPC64, SPARC64, IA-64, Alpha, etc.) systems.  That *will*
167 * eventually happen, though.  I chose 'unsigned long' to be the fallback type
168 * because that works on all the platforms I know about.  Hopefully, the
169 * real fix will happen before that bites us.
170 */
171
172#ifdef __SIZE_TYPE__
173# define DRM_SIZE_T __SIZE_TYPE__
174#else
175# warning "__SIZE_TYPE__ not defined.  Assuming sizeof(size_t) == sizeof(unsigned long)!"
176# define DRM_SIZE_T unsigned long
177#endif
178
179/**
180 * DRM_IOCTL_VERSION ioctl argument type.
181 *
182 * \sa drmGetVersion().
183 */
184struct drm_version {
185	int version_major;	  /**< Major version */
186	int version_minor;	  /**< Minor version */
187	int version_patchlevel;	  /**< Patch level */
188	DRM_SIZE_T name_len;	  /**< Length of name buffer */
189	char __user *name;		  /**< Name of driver */
190	DRM_SIZE_T date_len;	  /**< Length of date buffer */
191	char __user *date;		  /**< User-space buffer to hold date */
192	DRM_SIZE_T desc_len;	  /**< Length of desc buffer */
193	char __user *desc;		  /**< User-space buffer to hold desc */
194};
195
196/**
197 * DRM_IOCTL_GET_UNIQUE ioctl argument type.
198 *
199 * \sa drmGetBusid() and drmSetBusId().
200 */
201struct drm_unique {
202	DRM_SIZE_T unique_len;	  /**< Length of unique */
203	char __user *unique;		  /**< Unique name for driver instantiation */
204};
205
206#undef DRM_SIZE_T
207
208struct drm_list {
209	int count;		  /**< Length of user-space structures */
210	struct drm_version __user *version;
211};
212
213struct drm_block {
214	int unused;
215};
216
217/**
218 * DRM_IOCTL_CONTROL ioctl argument type.
219 *
220 * \sa drmCtlInstHandler() and drmCtlUninstHandler().
221 */
222struct drm_control {
223	enum {
224		DRM_ADD_COMMAND,
225		DRM_RM_COMMAND,
226		DRM_INST_HANDLER,
227		DRM_UNINST_HANDLER
228	} func;
229	int irq;
230};
231
232/**
233 * Type of memory to map.
234 */
235enum drm_map_type {
236	_DRM_FRAME_BUFFER = 0,	  /**< WC (no caching), no core dump */
237	_DRM_REGISTERS = 1,	  /**< no caching, no core dump */
238	_DRM_SHM = 2,		  /**< shared, cached */
239	_DRM_AGP = 3,		  /**< AGP/GART */
240	_DRM_SCATTER_GATHER = 4,  /**< Scatter/gather memory for PCI DMA */
241	_DRM_CONSISTENT = 5,	  /**< Consistent memory for PCI DMA */
242	_DRM_TTM = 6
243};
244
245/**
246 * Memory mapping flags.
247 */
248enum drm_map_flags {
249	_DRM_RESTRICTED = 0x01,	     /**< Cannot be mapped to user-virtual */
250	_DRM_READ_ONLY = 0x02,
251	_DRM_LOCKED = 0x04,	     /**< shared, cached, locked */
252	_DRM_KERNEL = 0x08,	     /**< kernel requires access */
253	_DRM_WRITE_COMBINING = 0x10, /**< use write-combining if available */
254	_DRM_CONTAINS_LOCK = 0x20,   /**< SHM page that contains lock */
255	_DRM_REMOVABLE = 0x40,	     /**< Removable mapping */
256	_DRM_DRIVER = 0x80	     /**< Managed by driver */
257};
258
259struct drm_ctx_priv_map {
260	unsigned int ctx_id;	 /**< Context requesting private mapping */
261	void *handle;		 /**< Handle of map */
262};
263
264/**
265 * DRM_IOCTL_GET_MAP, DRM_IOCTL_ADD_MAP and DRM_IOCTL_RM_MAP ioctls
266 * argument type.
267 *
268 * \sa drmAddMap().
269 */
270struct drm_map {
271	unsigned long offset;	 /**< Requested physical address (0 for SAREA)*/
272	unsigned long size;	 /**< Requested physical size (bytes) */
273	enum drm_map_type type;	 /**< Type of memory to map */
274	enum drm_map_flags flags;	 /**< Flags */
275	void *handle;		 /**< User-space: "Handle" to pass to mmap() */
276				 /**< Kernel-space: kernel-virtual address */
277	int mtrr;		 /**< MTRR slot used */
278	/*   Private data */
279};
280
281/**
282 * DRM_IOCTL_GET_CLIENT ioctl argument type.
283 */
284struct drm_client {
285	int idx;		/**< Which client desired? */
286	int auth;		/**< Is client authenticated? */
287	unsigned long pid;	/**< Process ID */
288	unsigned long uid;	/**< User ID */
289	unsigned long magic;	/**< Magic */
290	unsigned long iocs;	/**< Ioctl count */
291};
292
293enum drm_stat_type {
294	_DRM_STAT_LOCK,
295	_DRM_STAT_OPENS,
296	_DRM_STAT_CLOSES,
297	_DRM_STAT_IOCTLS,
298	_DRM_STAT_LOCKS,
299	_DRM_STAT_UNLOCKS,
300	_DRM_STAT_VALUE,	/**< Generic value */
301	_DRM_STAT_BYTE,		/**< Generic byte counter (1024bytes/K) */
302	_DRM_STAT_COUNT,	/**< Generic non-byte counter (1000/k) */
303
304	_DRM_STAT_IRQ,		/**< IRQ */
305	_DRM_STAT_PRIMARY,	/**< Primary DMA bytes */
306	_DRM_STAT_SECONDARY,	/**< Secondary DMA bytes */
307	_DRM_STAT_DMA,		/**< DMA */
308	_DRM_STAT_SPECIAL,	/**< Special DMA (e.g., priority or polled) */
309	_DRM_STAT_MISSED	/**< Missed DMA opportunity */
310	    /* Add to the *END* of the list */
311};
312
313/**
314 * DRM_IOCTL_GET_STATS ioctl argument type.
315 */
316struct drm_stats {
317	unsigned long count;
318	struct {
319		unsigned long value;
320		enum drm_stat_type type;
321	} data[15];
322};
323
324/**
325 * Hardware locking flags.
326 */
327enum drm_lock_flags {
328	_DRM_LOCK_READY = 0x01,	     /**< Wait until hardware is ready for DMA */
329	_DRM_LOCK_QUIESCENT = 0x02,  /**< Wait until hardware quiescent */
330	_DRM_LOCK_FLUSH = 0x04,	     /**< Flush this context's DMA queue first */
331	_DRM_LOCK_FLUSH_ALL = 0x08,  /**< Flush all DMA queues first */
332	/* These *HALT* flags aren't supported yet
333	   -- they will be used to support the
334	   full-screen DGA-like mode. */
335	_DRM_HALT_ALL_QUEUES = 0x10, /**< Halt all current and future queues */
336	_DRM_HALT_CUR_QUEUES = 0x20  /**< Halt all current queues */
337};
338
339/**
340 * DRM_IOCTL_LOCK, DRM_IOCTL_UNLOCK and DRM_IOCTL_FINISH ioctl argument type.
341 *
342 * \sa drmGetLock() and drmUnlock().
343 */
344struct drm_lock {
345	int context;
346	enum drm_lock_flags flags;
347};
348
349/**
350 * DMA flags
351 *
352 * \warning
353 * These values \e must match xf86drm.h.
354 *
355 * \sa drm_dma.
356 */
357enum drm_dma_flags {
358	/* Flags for DMA buffer dispatch */
359	_DRM_DMA_BLOCK = 0x01,	      /**<
360				       * Block until buffer dispatched.
361				       *
362				       * \note The buffer may not yet have
363				       * been processed by the hardware --
364				       * getting a hardware lock with the
365				       * hardware quiescent will ensure
366				       * that the buffer has been
367				       * processed.
368				       */
369	_DRM_DMA_WHILE_LOCKED = 0x02, /**< Dispatch while lock held */
370	_DRM_DMA_PRIORITY = 0x04,     /**< High priority dispatch */
371
372	/* Flags for DMA buffer request */
373	_DRM_DMA_WAIT = 0x10,	      /**< Wait for free buffers */
374	_DRM_DMA_SMALLER_OK = 0x20,   /**< Smaller-than-requested buffers OK */
375	_DRM_DMA_LARGER_OK = 0x40     /**< Larger-than-requested buffers OK */
376};
377
378/**
379 * DRM_IOCTL_ADD_BUFS and DRM_IOCTL_MARK_BUFS ioctl argument type.
380 *
381 * \sa drmAddBufs().
382 */
383struct drm_buf_desc {
384	int count;		 /**< Number of buffers of this size */
385	int size;		 /**< Size in bytes */
386	int low_mark;		 /**< Low water mark */
387	int high_mark;		 /**< High water mark */
388	enum {
389		_DRM_PAGE_ALIGN = 0x01,	/**< Align on page boundaries for DMA */
390		_DRM_AGP_BUFFER = 0x02,	/**< Buffer is in AGP space */
391		_DRM_SG_BUFFER  = 0x04,	/**< Scatter/gather memory buffer */
392		_DRM_FB_BUFFER  = 0x08, /**< Buffer is in frame buffer */
393		_DRM_PCI_BUFFER_RO = 0x10 /**< Map PCI DMA buffer read-only */
394	} flags;
395	unsigned long agp_start; /**<
396				  * Start address of where the AGP buffers are
397				  * in the AGP aperture
398				  */
399};
400
401/**
402 * DRM_IOCTL_INFO_BUFS ioctl argument type.
403 */
404struct drm_buf_info {
405	int count;		  /**< Number of buffers described in list */
406	struct drm_buf_desc __user *list; /**< List of buffer descriptions */
407};
408
409/**
410 * DRM_IOCTL_FREE_BUFS ioctl argument type.
411 */
412struct drm_buf_free {
413	int count;
414	int __user *list;
415};
416
417/**
418 * Buffer information
419 *
420 * \sa drm_buf_map.
421 */
422struct drm_buf_pub {
423	int idx;		       /**< Index into the master buffer list */
424	int total;		       /**< Buffer size */
425	int used;		       /**< Amount of buffer in use (for DMA) */
426	void __user *address;	       /**< Address of buffer */
427};
428
429/**
430 * DRM_IOCTL_MAP_BUFS ioctl argument type.
431 */
432struct drm_buf_map {
433	int count;		/**< Length of the buffer list */
434#if defined(__cplusplus)
435	void __user *c_virtual;
436#else
437	void __user *virtual;		/**< Mmap'd area in user-virtual */
438#endif
439	struct drm_buf_pub __user *list;	/**< Buffer information */
440};
441
442/**
443 * DRM_IOCTL_DMA ioctl argument type.
444 *
445 * Indices here refer to the offset into the buffer list in drm_buf_get.
446 *
447 * \sa drmDMA().
448 */
449struct drm_dma {
450	int context;			  /**< Context handle */
451	int send_count;			  /**< Number of buffers to send */
452	int __user *send_indices;	  /**< List of handles to buffers */
453	int __user *send_sizes;		  /**< Lengths of data to send */
454	enum drm_dma_flags flags;	  /**< Flags */
455	int request_count;		  /**< Number of buffers requested */
456	int request_size;		  /**< Desired size for buffers */
457	int __user *request_indices;	 /**< Buffer information */
458	int __user *request_sizes;
459	int granted_count;		  /**< Number of buffers granted */
460};
461
462enum drm_ctx_flags {
463	_DRM_CONTEXT_PRESERVED = 0x01,
464	_DRM_CONTEXT_2DONLY = 0x02
465};
466
467/**
468 * DRM_IOCTL_ADD_CTX ioctl argument type.
469 *
470 * \sa drmCreateContext() and drmDestroyContext().
471 */
472struct drm_ctx {
473	drm_context_t handle;
474	enum drm_ctx_flags flags;
475};
476
477/**
478 * DRM_IOCTL_RES_CTX ioctl argument type.
479 */
480struct drm_ctx_res {
481	int count;
482	struct drm_ctx __user *contexts;
483};
484
485/**
486 * DRM_IOCTL_ADD_DRAW and DRM_IOCTL_RM_DRAW ioctl argument type.
487 */
488struct drm_draw {
489	drm_drawable_t handle;
490};
491
492/**
493 * DRM_IOCTL_UPDATE_DRAW ioctl argument type.
494 */
495typedef enum {
496	DRM_DRAWABLE_CLIPRECTS,
497} drm_drawable_info_type_t;
498
499struct drm_update_draw {
500	drm_drawable_t handle;
501	unsigned int type;
502	unsigned int num;
503	unsigned long long data;
504};
505
506/**
507 * DRM_IOCTL_GET_MAGIC and DRM_IOCTL_AUTH_MAGIC ioctl argument type.
508 */
509struct drm_auth {
510	drm_magic_t magic;
511};
512
513/**
514 * DRM_IOCTL_IRQ_BUSID ioctl argument type.
515 *
516 * \sa drmGetInterruptFromBusID().
517 */
518struct drm_irq_busid {
519	int irq;	/**< IRQ number */
520	int busnum;	/**< bus number */
521	int devnum;	/**< device number */
522	int funcnum;	/**< function number */
523};
524
525enum drm_vblank_seq_type {
526	_DRM_VBLANK_ABSOLUTE = 0x0,	/**< Wait for specific vblank sequence number */
527	_DRM_VBLANK_RELATIVE = 0x1,	/**< Wait for given number of vblanks */
528	_DRM_VBLANK_FLIP = 0x8000000,	/**< Scheduled buffer swap should flip */
529	_DRM_VBLANK_NEXTONMISS = 0x10000000,	/**< If missed, wait for next vblank */
530	_DRM_VBLANK_SECONDARY = 0x20000000,	/**< Secondary display controller */
531	_DRM_VBLANK_SIGNAL = 0x40000000	/**< Send signal instead of blocking */
532};
533
534#define _DRM_VBLANK_TYPES_MASK (_DRM_VBLANK_ABSOLUTE | _DRM_VBLANK_RELATIVE)
535#define _DRM_VBLANK_FLAGS_MASK (_DRM_VBLANK_SIGNAL | _DRM_VBLANK_SECONDARY | \
536				_DRM_VBLANK_NEXTONMISS)
537
538struct drm_wait_vblank_request {
539	enum drm_vblank_seq_type type;
540	unsigned int sequence;
541	unsigned long signal;
542};
543
544struct drm_wait_vblank_reply {
545	enum drm_vblank_seq_type type;
546	unsigned int sequence;
547	long tval_sec;
548	long tval_usec;
549};
550
551/**
552 * DRM_IOCTL_WAIT_VBLANK ioctl argument type.
553 *
554 * \sa drmWaitVBlank().
555 */
556union drm_wait_vblank {
557	struct drm_wait_vblank_request request;
558	struct drm_wait_vblank_reply reply;
559};
560
561
562#define _DRM_PRE_MODESET 1
563#define _DRM_POST_MODESET 2
564
565/**
566 * DRM_IOCTL_MODESET_CTL ioctl argument type
567 *
568 * \sa drmModesetCtl().
569 */
570struct drm_modeset_ctl {
571	uint32_t crtc;
572	uint32_t cmd;
573};
574
575/**
576 * DRM_IOCTL_AGP_ENABLE ioctl argument type.
577 *
578 * \sa drmAgpEnable().
579 */
580struct drm_agp_mode {
581	unsigned long mode;	/**< AGP mode */
582};
583
584/**
585 * DRM_IOCTL_AGP_ALLOC and DRM_IOCTL_AGP_FREE ioctls argument type.
586 *
587 * \sa drmAgpAlloc() and drmAgpFree().
588 */
589struct drm_agp_buffer {
590	unsigned long size;	/**< In bytes -- will round to page boundary */
591	unsigned long handle;	/**< Used for binding / unbinding */
592	unsigned long type;	/**< Type of memory to allocate */
593	unsigned long physical;	/**< Physical used by i810 */
594};
595
596/**
597 * DRM_IOCTL_AGP_BIND and DRM_IOCTL_AGP_UNBIND ioctls argument type.
598 *
599 * \sa drmAgpBind() and drmAgpUnbind().
600 */
601struct drm_agp_binding {
602	unsigned long handle;	/**< From drm_agp_buffer */
603	unsigned long offset;	/**< In bytes -- will round to page boundary */
604};
605
606/**
607 * DRM_IOCTL_AGP_INFO ioctl argument type.
608 *
609 * \sa drmAgpVersionMajor(), drmAgpVersionMinor(), drmAgpGetMode(),
610 * drmAgpBase(), drmAgpSize(), drmAgpMemoryUsed(), drmAgpMemoryAvail(),
611 * drmAgpVendorId() and drmAgpDeviceId().
612 */
613struct drm_agp_info {
614	int agp_version_major;
615	int agp_version_minor;
616	unsigned long mode;
617	unsigned long aperture_base;   /**< physical address */
618	unsigned long aperture_size;   /**< bytes */
619	unsigned long memory_allowed;  /**< bytes */
620	unsigned long memory_used;
621
622	/** \name PCI information */
623	/*@{ */
624	unsigned short id_vendor;
625	unsigned short id_device;
626	/*@} */
627};
628
629/**
630 * DRM_IOCTL_SG_ALLOC ioctl argument type.
631 */
632struct drm_scatter_gather {
633	unsigned long size;	/**< In bytes -- will round to page boundary */
634	unsigned long handle;	/**< Used for mapping / unmapping */
635};
636
637/**
638 * DRM_IOCTL_SET_VERSION ioctl argument type.
639 */
640struct drm_set_version {
641	int drm_di_major;
642	int drm_di_minor;
643	int drm_dd_major;
644	int drm_dd_minor;
645};
646
647
648#define DRM_FENCE_FLAG_EMIT                0x00000001
649#define DRM_FENCE_FLAG_SHAREABLE           0x00000002
650/**
651 * On hardware with no interrupt events for operation completion,
652 * indicates that the kernel should sleep while waiting for any blocking
653 * operation to complete rather than spinning.
654 *
655 * Has no effect otherwise.
656 */
657#define DRM_FENCE_FLAG_WAIT_LAZY           0x00000004
658#define DRM_FENCE_FLAG_NO_USER             0x00000010
659
660/* Reserved for driver use */
661#define DRM_FENCE_MASK_DRIVER              0xFF000000
662
663#define DRM_FENCE_TYPE_EXE                 0x00000001
664
665struct drm_fence_arg {
666	unsigned int handle;
667	unsigned int fence_class;
668	unsigned int type;
669	unsigned int flags;
670	unsigned int signaled;
671	unsigned int error;
672	unsigned int sequence;
673	unsigned int pad64;
674	uint64_t expand_pad[2]; /*Future expansion */
675};
676
677/* Buffer permissions, referring to how the GPU uses the buffers.
678 * these translate to fence types used for the buffers.
679 * Typically a texture buffer is read, A destination buffer is write and
680 *  a command (batch-) buffer is exe. Can be or-ed together.
681 */
682
683#define DRM_BO_FLAG_READ        (1ULL << 0)
684#define DRM_BO_FLAG_WRITE       (1ULL << 1)
685#define DRM_BO_FLAG_EXE         (1ULL << 2)
686
687/*
688 * All of the bits related to access mode
689 */
690#define DRM_BO_MASK_ACCESS	(DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE | DRM_BO_FLAG_EXE)
691/*
692 * Status flags. Can be read to determine the actual state of a buffer.
693 * Can also be set in the buffer mask before validation.
694 */
695
696/*
697 * Mask: Never evict this buffer. Not even with force. This type of buffer is only
698 * available to root and must be manually removed before buffer manager shutdown
699 * or lock.
700 * Flags: Acknowledge
701 */
702#define DRM_BO_FLAG_NO_EVICT    (1ULL << 4)
703
704/*
705 * Mask: Require that the buffer is placed in mappable memory when validated.
706 *       If not set the buffer may or may not be in mappable memory when validated.
707 * Flags: If set, the buffer is in mappable memory.
708 */
709#define DRM_BO_FLAG_MAPPABLE    (1ULL << 5)
710
711/* Mask: The buffer should be shareable with other processes.
712 * Flags: The buffer is shareable with other processes.
713 */
714#define DRM_BO_FLAG_SHAREABLE   (1ULL << 6)
715
716/* Mask: If set, place the buffer in cache-coherent memory if available.
717 *       If clear, never place the buffer in cache coherent memory if validated.
718 * Flags: The buffer is currently in cache-coherent memory.
719 */
720#define DRM_BO_FLAG_CACHED      (1ULL << 7)
721
722/* Mask: Make sure that every time this buffer is validated,
723 *       it ends up on the same location provided that the memory mask is the same.
724 *       The buffer will also not be evicted when claiming space for
725 *       other buffers. Basically a pinned buffer but it may be thrown out as
726 *       part of buffer manager shutdown or locking.
727 * Flags: Acknowledge.
728 */
729#define DRM_BO_FLAG_NO_MOVE     (1ULL << 8)
730
731/* Mask: Make sure the buffer is in cached memory when mapped.  In conjunction
732 * with DRM_BO_FLAG_CACHED it also allows the buffer to be bound into the GART
733 * with unsnooped PTEs instead of snooped, by using chipset-specific cache
734 * flushing at bind time.  A better name might be DRM_BO_FLAG_TT_UNSNOOPED,
735 * as the eviction to local memory (TTM unbind) on map is just a side effect
736 * to prevent aggressive cache prefetch from the GPU disturbing the cache
737 * management that the DRM is doing.
738 *
739 * Flags: Acknowledge.
740 * Buffers allocated with this flag should not be used for suballocators
741 * This type may have issues on CPUs with over-aggressive caching
742 * http://marc.info/?l=linux-kernel&m=102376926732464&w=2
743 */
744#define DRM_BO_FLAG_CACHED_MAPPED    (1ULL << 19)
745
746
747/* Mask: Force DRM_BO_FLAG_CACHED flag strictly also if it is set.
748 * Flags: Acknowledge.
749 */
750#define DRM_BO_FLAG_FORCE_CACHING  (1ULL << 13)
751
752/*
753 * Mask: Force DRM_BO_FLAG_MAPPABLE flag strictly also if it is clear.
754 * Flags: Acknowledge.
755 */
756#define DRM_BO_FLAG_FORCE_MAPPABLE (1ULL << 14)
757#define DRM_BO_FLAG_TILE           (1ULL << 15)
758
759/*
760 * Memory type flags that can be or'ed together in the mask, but only
761 * one appears in flags.
762 */
763
764/* System memory */
765#define DRM_BO_FLAG_MEM_LOCAL  (1ULL << 24)
766/* Translation table memory */
767#define DRM_BO_FLAG_MEM_TT     (1ULL << 25)
768/* Vram memory */
769#define DRM_BO_FLAG_MEM_VRAM   (1ULL << 26)
770/* Up to the driver to define. */
771#define DRM_BO_FLAG_MEM_PRIV0  (1ULL << 27)
772#define DRM_BO_FLAG_MEM_PRIV1  (1ULL << 28)
773#define DRM_BO_FLAG_MEM_PRIV2  (1ULL << 29)
774#define DRM_BO_FLAG_MEM_PRIV3  (1ULL << 30)
775#define DRM_BO_FLAG_MEM_PRIV4  (1ULL << 31)
776/* We can add more of these now with a 64-bit flag type */
777
778/*
779 * This is a mask covering all of the memory type flags; easier to just
780 * use a single constant than a bunch of | values. It covers
781 * DRM_BO_FLAG_MEM_LOCAL through DRM_BO_FLAG_MEM_PRIV4
782 */
783#define DRM_BO_MASK_MEM         0x00000000FF000000ULL
784/*
785 * This adds all of the CPU-mapping options in with the memory
786 * type to label all bits which change how the page gets mapped
787 */
788#define DRM_BO_MASK_MEMTYPE     (DRM_BO_MASK_MEM | \
789				 DRM_BO_FLAG_CACHED_MAPPED | \
790				 DRM_BO_FLAG_CACHED | \
791				 DRM_BO_FLAG_MAPPABLE)
792
793/* Driver-private flags */
794#define DRM_BO_MASK_DRIVER      0xFFFF000000000000ULL
795
796/*
797 * Don't block on validate and map. Instead, return EBUSY.
798 */
799#define DRM_BO_HINT_DONT_BLOCK  0x00000002
800/*
801 * Don't place this buffer on the unfenced list. This means
802 * that the buffer will not end up having a fence associated
803 * with it as a result of this operation
804 */
805#define DRM_BO_HINT_DONT_FENCE  0x00000004
806/**
807 * On hardware with no interrupt events for operation completion,
808 * indicates that the kernel should sleep while waiting for any blocking
809 * operation to complete rather than spinning.
810 *
811 * Has no effect otherwise.
812 */
813#define DRM_BO_HINT_WAIT_LAZY   0x00000008
814/*
815 * The client has compute relocations refering to this buffer using the
816 * offset in the presumed_offset field. If that offset ends up matching
817 * where this buffer lands, the kernel is free to skip executing those
818 * relocations
819 */
820#define DRM_BO_HINT_PRESUMED_OFFSET 0x00000010
821
822#define DRM_BO_INIT_MAGIC 0xfe769812
823#define DRM_BO_INIT_MAJOR 1
824#define DRM_BO_INIT_MINOR 0
825#define DRM_BO_INIT_PATCH 0
826
827
828struct drm_bo_info_req {
829	uint64_t mask;
830	uint64_t flags;
831	unsigned int handle;
832	unsigned int hint;
833	unsigned int fence_class;
834	unsigned int desired_tile_stride;
835	unsigned int tile_info;
836	unsigned int pad64;
837	uint64_t presumed_offset;
838};
839
840struct drm_bo_create_req {
841	uint64_t flags;
842	uint64_t size;
843	uint64_t buffer_start;
844	unsigned int hint;
845	unsigned int page_alignment;
846};
847
848
849/*
850 * Reply flags
851 */
852
853#define DRM_BO_REP_BUSY 0x00000001
854
855struct drm_bo_info_rep {
856	uint64_t flags;
857	uint64_t proposed_flags;
858	uint64_t size;
859	uint64_t offset;
860	uint64_t arg_handle;
861	uint64_t buffer_start;
862	unsigned int handle;
863	unsigned int fence_flags;
864	unsigned int rep_flags;
865	unsigned int page_alignment;
866	unsigned int desired_tile_stride;
867	unsigned int hw_tile_stride;
868	unsigned int tile_info;
869	unsigned int pad64;
870	uint64_t expand_pad[4]; /*Future expansion */
871};
872
873struct drm_bo_arg_rep {
874	struct drm_bo_info_rep bo_info;
875	int ret;
876	unsigned int pad64;
877};
878
879struct drm_bo_create_arg {
880	union {
881		struct drm_bo_create_req req;
882		struct drm_bo_info_rep rep;
883	} d;
884};
885
886struct drm_bo_handle_arg {
887	unsigned int handle;
888};
889
890struct drm_bo_reference_info_arg {
891	union {
892		struct drm_bo_handle_arg req;
893		struct drm_bo_info_rep rep;
894	} d;
895};
896
897struct drm_bo_map_wait_idle_arg {
898	union {
899		struct drm_bo_info_req req;
900		struct drm_bo_info_rep rep;
901	} d;
902};
903
904struct drm_bo_op_req {
905	enum {
906		drm_bo_validate,
907		drm_bo_fence,
908		drm_bo_ref_fence,
909	} op;
910	unsigned int arg_handle;
911	struct drm_bo_info_req bo_req;
912};
913
914
915struct drm_bo_op_arg {
916	uint64_t next;
917	union {
918		struct drm_bo_op_req req;
919		struct drm_bo_arg_rep rep;
920	} d;
921	int handled;
922	unsigned int pad64;
923};
924
925
926#define DRM_BO_MEM_LOCAL 0
927#define DRM_BO_MEM_TT 1
928#define DRM_BO_MEM_VRAM 2
929#define DRM_BO_MEM_PRIV0 3
930#define DRM_BO_MEM_PRIV1 4
931#define DRM_BO_MEM_PRIV2 5
932#define DRM_BO_MEM_PRIV3 6
933#define DRM_BO_MEM_PRIV4 7
934
935#define DRM_BO_MEM_TYPES 8 /* For now. */
936
937#define DRM_BO_LOCK_UNLOCK_BM       (1 << 0)
938#define DRM_BO_LOCK_IGNORE_NO_EVICT (1 << 1)
939
940struct drm_bo_version_arg {
941	uint32_t major;
942	uint32_t minor;
943	uint32_t patchlevel;
944};
945
946struct drm_mm_type_arg {
947	unsigned int mem_type;
948	unsigned int lock_flags;
949};
950
951struct drm_mm_init_arg {
952	unsigned int magic;
953	unsigned int major;
954	unsigned int minor;
955	unsigned int mem_type;
956	uint64_t p_offset;
957	uint64_t p_size;
958};
959
960struct drm_mm_info_arg {
961	unsigned int mem_type;
962	uint64_t p_size;
963};
964
965struct drm_gem_close {
966	/** Handle of the object to be closed. */
967	uint32_t handle;
968	uint32_t pad;
969};
970
971struct drm_gem_flink {
972	/** Handle for the object being named */
973	uint32_t handle;
974
975	/** Returned global name */
976	uint32_t name;
977};
978
979struct drm_gem_open {
980	/** Name of object being opened */
981	uint32_t name;
982
983	/** Returned handle for the object */
984	uint32_t handle;
985
986	/** Returned size of the object */
987	uint64_t size;
988};
989
990/**
991 * \name Ioctls Definitions
992 */
993/*@{*/
994
995#define DRM_IOCTL_BASE			'd'
996#define DRM_IO(nr)			_IO(DRM_IOCTL_BASE,nr)
997#define DRM_IOR(nr,type)		_IOR(DRM_IOCTL_BASE,nr,type)
998#define DRM_IOW(nr,type)		_IOW(DRM_IOCTL_BASE,nr,type)
999#define DRM_IOWR(nr,type)		_IOWR(DRM_IOCTL_BASE,nr,type)
1000
1001#define DRM_IOCTL_VERSION		DRM_IOWR(0x00, struct drm_version)
1002#define DRM_IOCTL_GET_UNIQUE		DRM_IOWR(0x01, struct drm_unique)
1003#define DRM_IOCTL_GET_MAGIC		DRM_IOR( 0x02, struct drm_auth)
1004#define DRM_IOCTL_IRQ_BUSID		DRM_IOWR(0x03, struct drm_irq_busid)
1005#define DRM_IOCTL_GET_MAP               DRM_IOWR(0x04, struct drm_map)
1006#define DRM_IOCTL_GET_CLIENT            DRM_IOWR(0x05, struct drm_client)
1007#define DRM_IOCTL_GET_STATS             DRM_IOR( 0x06, struct drm_stats)
1008#define DRM_IOCTL_SET_VERSION		DRM_IOWR(0x07, struct drm_set_version)
1009#define DRM_IOCTL_MODESET_CTL           DRM_IOW(0x08,  struct drm_modeset_ctl)
1010
1011#define DRM_IOCTL_GEM_CLOSE		DRM_IOW (0x09, struct drm_gem_close)
1012#define DRM_IOCTL_GEM_FLINK		DRM_IOWR(0x0a, struct drm_gem_flink)
1013#define DRM_IOCTL_GEM_OPEN		DRM_IOWR(0x0b, struct drm_gem_open)
1014
1015#define DRM_IOCTL_SET_UNIQUE		DRM_IOW( 0x10, struct drm_unique)
1016#define DRM_IOCTL_AUTH_MAGIC		DRM_IOW( 0x11, struct drm_auth)
1017#define DRM_IOCTL_BLOCK			DRM_IOWR(0x12, struct drm_block)
1018#define DRM_IOCTL_UNBLOCK		DRM_IOWR(0x13, struct drm_block)
1019#define DRM_IOCTL_CONTROL		DRM_IOW( 0x14, struct drm_control)
1020#define DRM_IOCTL_ADD_MAP		DRM_IOWR(0x15, struct drm_map)
1021#define DRM_IOCTL_ADD_BUFS		DRM_IOWR(0x16, struct drm_buf_desc)
1022#define DRM_IOCTL_MARK_BUFS		DRM_IOW( 0x17, struct drm_buf_desc)
1023#define DRM_IOCTL_INFO_BUFS		DRM_IOWR(0x18, struct drm_buf_info)
1024#define DRM_IOCTL_MAP_BUFS		DRM_IOWR(0x19, struct drm_buf_map)
1025#define DRM_IOCTL_FREE_BUFS		DRM_IOW( 0x1a, struct drm_buf_free)
1026
1027#define DRM_IOCTL_RM_MAP		DRM_IOW( 0x1b, struct drm_map)
1028
1029#define DRM_IOCTL_SET_SAREA_CTX		DRM_IOW( 0x1c, struct drm_ctx_priv_map)
1030#define DRM_IOCTL_GET_SAREA_CTX		DRM_IOWR(0x1d, struct drm_ctx_priv_map)
1031
1032#define DRM_IOCTL_ADD_CTX		DRM_IOWR(0x20, struct drm_ctx)
1033#define DRM_IOCTL_RM_CTX		DRM_IOWR(0x21, struct drm_ctx)
1034#define DRM_IOCTL_MOD_CTX		DRM_IOW( 0x22, struct drm_ctx)
1035#define DRM_IOCTL_GET_CTX		DRM_IOWR(0x23, struct drm_ctx)
1036#define DRM_IOCTL_SWITCH_CTX		DRM_IOW( 0x24, struct drm_ctx)
1037#define DRM_IOCTL_NEW_CTX		DRM_IOW( 0x25, struct drm_ctx)
1038#define DRM_IOCTL_RES_CTX		DRM_IOWR(0x26, struct drm_ctx_res)
1039#define DRM_IOCTL_ADD_DRAW		DRM_IOWR(0x27, struct drm_draw)
1040#define DRM_IOCTL_RM_DRAW		DRM_IOWR(0x28, struct drm_draw)
1041#define DRM_IOCTL_DMA			DRM_IOWR(0x29, struct drm_dma)
1042#define DRM_IOCTL_LOCK			DRM_IOW( 0x2a, struct drm_lock)
1043#define DRM_IOCTL_UNLOCK		DRM_IOW( 0x2b, struct drm_lock)
1044#define DRM_IOCTL_FINISH		DRM_IOW( 0x2c, struct drm_lock)
1045
1046#define DRM_IOCTL_AGP_ACQUIRE		DRM_IO(  0x30)
1047#define DRM_IOCTL_AGP_RELEASE		DRM_IO(  0x31)
1048#define DRM_IOCTL_AGP_ENABLE		DRM_IOW( 0x32, struct drm_agp_mode)
1049#define DRM_IOCTL_AGP_INFO		DRM_IOR( 0x33, struct drm_agp_info)
1050#define DRM_IOCTL_AGP_ALLOC		DRM_IOWR(0x34, struct drm_agp_buffer)
1051#define DRM_IOCTL_AGP_FREE		DRM_IOW( 0x35, struct drm_agp_buffer)
1052#define DRM_IOCTL_AGP_BIND		DRM_IOW( 0x36, struct drm_agp_binding)
1053#define DRM_IOCTL_AGP_UNBIND		DRM_IOW( 0x37, struct drm_agp_binding)
1054
1055#define DRM_IOCTL_SG_ALLOC		DRM_IOWR(0x38, struct drm_scatter_gather)
1056#define DRM_IOCTL_SG_FREE		DRM_IOW( 0x39, struct drm_scatter_gather)
1057
1058#define DRM_IOCTL_WAIT_VBLANK		DRM_IOWR(0x3a, union drm_wait_vblank)
1059
1060#define DRM_IOCTL_UPDATE_DRAW           DRM_IOW(0x3f, struct drm_update_draw)
1061
1062#define DRM_IOCTL_MM_INIT               DRM_IOWR(0xc0, struct drm_mm_init_arg)
1063#define DRM_IOCTL_MM_TAKEDOWN           DRM_IOWR(0xc1, struct drm_mm_type_arg)
1064#define DRM_IOCTL_MM_LOCK               DRM_IOWR(0xc2, struct drm_mm_type_arg)
1065#define DRM_IOCTL_MM_UNLOCK             DRM_IOWR(0xc3, struct drm_mm_type_arg)
1066
1067#define DRM_IOCTL_FENCE_CREATE          DRM_IOWR(0xc4, struct drm_fence_arg)
1068#define DRM_IOCTL_FENCE_REFERENCE       DRM_IOWR(0xc6, struct drm_fence_arg)
1069#define DRM_IOCTL_FENCE_UNREFERENCE     DRM_IOWR(0xc7, struct drm_fence_arg)
1070#define DRM_IOCTL_FENCE_SIGNALED        DRM_IOWR(0xc8, struct drm_fence_arg)
1071#define DRM_IOCTL_FENCE_FLUSH           DRM_IOWR(0xc9, struct drm_fence_arg)
1072#define DRM_IOCTL_FENCE_WAIT            DRM_IOWR(0xca, struct drm_fence_arg)
1073#define DRM_IOCTL_FENCE_EMIT            DRM_IOWR(0xcb, struct drm_fence_arg)
1074#define DRM_IOCTL_FENCE_BUFFERS         DRM_IOWR(0xcc, struct drm_fence_arg)
1075
1076#define DRM_IOCTL_BO_CREATE             DRM_IOWR(0xcd, struct drm_bo_create_arg)
1077#define DRM_IOCTL_BO_MAP                DRM_IOWR(0xcf, struct drm_bo_map_wait_idle_arg)
1078#define DRM_IOCTL_BO_UNMAP              DRM_IOWR(0xd0, struct drm_bo_handle_arg)
1079#define DRM_IOCTL_BO_REFERENCE          DRM_IOWR(0xd1, struct drm_bo_reference_info_arg)
1080#define DRM_IOCTL_BO_UNREFERENCE        DRM_IOWR(0xd2, struct drm_bo_handle_arg)
1081#define DRM_IOCTL_BO_SETSTATUS          DRM_IOWR(0xd3, struct drm_bo_map_wait_idle_arg)
1082#define DRM_IOCTL_BO_INFO               DRM_IOWR(0xd4, struct drm_bo_reference_info_arg)
1083#define DRM_IOCTL_BO_WAIT_IDLE          DRM_IOWR(0xd5, struct drm_bo_map_wait_idle_arg)
1084#define DRM_IOCTL_BO_VERSION          DRM_IOR(0xd6, struct drm_bo_version_arg)
1085#define DRM_IOCTL_MM_INFO               DRM_IOWR(0xd7, struct drm_mm_info_arg)
1086
1087/*@}*/
1088
1089/**
1090 * Device specific ioctls should only be in their respective headers
1091 * The device specific ioctl range is from 0x40 to 0x99.
1092 * Generic IOCTLS restart at 0xA0.
1093 *
1094 * \sa drmCommandNone(), drmCommandRead(), drmCommandWrite(), and
1095 * drmCommandReadWrite().
1096 */
1097#define DRM_COMMAND_BASE                0x40
1098#define DRM_COMMAND_END                 0xA0
1099
1100/* typedef area */
1101#ifndef __KERNEL__
1102typedef struct drm_clip_rect drm_clip_rect_t;
1103typedef struct drm_tex_region drm_tex_region_t;
1104typedef struct drm_hw_lock drm_hw_lock_t;
1105typedef struct drm_version drm_version_t;
1106typedef struct drm_unique drm_unique_t;
1107typedef struct drm_list drm_list_t;
1108typedef struct drm_block drm_block_t;
1109typedef struct drm_control drm_control_t;
1110typedef enum drm_map_type drm_map_type_t;
1111typedef enum drm_map_flags drm_map_flags_t;
1112typedef struct drm_ctx_priv_map drm_ctx_priv_map_t;
1113typedef struct drm_map drm_map_t;
1114typedef struct drm_client drm_client_t;
1115typedef enum drm_stat_type drm_stat_type_t;
1116typedef struct drm_stats drm_stats_t;
1117typedef enum drm_lock_flags drm_lock_flags_t;
1118typedef struct drm_lock drm_lock_t;
1119typedef enum drm_dma_flags drm_dma_flags_t;
1120typedef struct drm_buf_desc drm_buf_desc_t;
1121typedef struct drm_buf_info drm_buf_info_t;
1122typedef struct drm_buf_free drm_buf_free_t;
1123typedef struct drm_buf_pub drm_buf_pub_t;
1124typedef struct drm_buf_map drm_buf_map_t;
1125typedef struct drm_dma drm_dma_t;
1126typedef union drm_wait_vblank drm_wait_vblank_t;
1127typedef struct drm_agp_mode drm_agp_mode_t;
1128typedef enum drm_ctx_flags drm_ctx_flags_t;
1129typedef struct drm_ctx drm_ctx_t;
1130typedef struct drm_ctx_res drm_ctx_res_t;
1131typedef struct drm_draw drm_draw_t;
1132typedef struct drm_update_draw drm_update_draw_t;
1133typedef struct drm_auth drm_auth_t;
1134typedef struct drm_irq_busid drm_irq_busid_t;
1135typedef enum drm_vblank_seq_type drm_vblank_seq_type_t;
1136typedef struct drm_agp_buffer drm_agp_buffer_t;
1137typedef struct drm_agp_binding drm_agp_binding_t;
1138typedef struct drm_agp_info drm_agp_info_t;
1139typedef struct drm_scatter_gather drm_scatter_gather_t;
1140typedef struct drm_set_version drm_set_version_t;
1141
1142typedef struct drm_fence_arg drm_fence_arg_t;
1143typedef struct drm_mm_type_arg drm_mm_type_arg_t;
1144typedef struct drm_mm_init_arg drm_mm_init_arg_t;
1145typedef enum drm_bo_type drm_bo_type_t;
1146#endif
1147
1148#endif
1149