Deleted Added
full compact
drm_drv.c (242467) drm_drv.c (247835)
1/*-
2 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
3 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
21 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
23 * OTHER DEALINGS IN THE SOFTWARE.
24 *
25 * Authors:
26 * Rickard E. (Rik) Faith <faith@valinux.com>
27 * Gareth Hughes <gareth@valinux.com>
28 *
29 */
30
31#include <sys/cdefs.h>
1/*-
2 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
3 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
21 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
23 * OTHER DEALINGS IN THE SOFTWARE.
24 *
25 * Authors:
26 * Rickard E. (Rik) Faith <faith@valinux.com>
27 * Gareth Hughes <gareth@valinux.com>
28 *
29 */
30
31#include <sys/cdefs.h>
32__FBSDID("$FreeBSD: head/sys/dev/drm2/drm_drv.c 242467 2012-11-02 05:26:33Z glebius $");
32__FBSDID("$FreeBSD: head/sys/dev/drm2/drm_drv.c 247835 2013-03-05 09:49:34Z kib $");
33
34/** @file drm_drv.c
35 * The catch-all file for DRM device support, including module setup/teardown,
36 * open/close, and ioctl dispatch.
37 */
38
39#include <sys/limits.h>
40#include <sys/sysent.h>
41#include <dev/drm2/drmP.h>
42#include <dev/drm2/drm.h>
43#include <dev/drm2/drm_sarea.h>
44#include <dev/drm2/drm_mode.h>
45
46#ifdef DRM_DEBUG_DEFAULT_ON
47int drm_debug_flag = (DRM_DEBUGBITS_DEBUG | DRM_DEBUGBITS_KMS |
48 DRM_DEBUGBITS_FAILED_IOCTL);
49#else
50int drm_debug_flag = 0;
51#endif
52int drm_notyet_flag = 0;
53
54unsigned int drm_vblank_offdelay = 5000; /* Default to 5000 msecs. */
55unsigned int drm_timestamp_precision = 20; /* Default to 20 usecs. */
56
57static int drm_load(struct drm_device *dev);
58static void drm_unload(struct drm_device *dev);
59static drm_pci_id_list_t *drm_find_description(int vendor, int device,
60 drm_pci_id_list_t *idlist);
33
34/** @file drm_drv.c
35 * The catch-all file for DRM device support, including module setup/teardown,
36 * open/close, and ioctl dispatch.
37 */
38
39#include <sys/limits.h>
40#include <sys/sysent.h>
41#include <dev/drm2/drmP.h>
42#include <dev/drm2/drm.h>
43#include <dev/drm2/drm_sarea.h>
44#include <dev/drm2/drm_mode.h>
45
46#ifdef DRM_DEBUG_DEFAULT_ON
47int drm_debug_flag = (DRM_DEBUGBITS_DEBUG | DRM_DEBUGBITS_KMS |
48 DRM_DEBUGBITS_FAILED_IOCTL);
49#else
50int drm_debug_flag = 0;
51#endif
52int drm_notyet_flag = 0;
53
54unsigned int drm_vblank_offdelay = 5000; /* Default to 5000 msecs. */
55unsigned int drm_timestamp_precision = 20; /* Default to 20 usecs. */
56
57static int drm_load(struct drm_device *dev);
58static void drm_unload(struct drm_device *dev);
59static drm_pci_id_list_t *drm_find_description(int vendor, int device,
60 drm_pci_id_list_t *idlist);
61static int drm_mmap_single(struct cdev *kdev, vm_ooffset_t *offset,
62 vm_size_t size, struct vm_object **obj_res, int nprot);
61
62static int
63drm_modevent(module_t mod, int type, void *data)
64{
65
66 switch (type) {
67 case MOD_LOAD:
68 TUNABLE_INT_FETCH("drm.debug", &drm_debug_flag);
69 TUNABLE_INT_FETCH("drm.notyet", &drm_notyet_flag);
70 break;
71 }
72 return (0);
73}
74
75static moduledata_t drm_mod = {
76 "drmn",
77 drm_modevent,
78 0
79};
80DECLARE_MODULE(drmn, drm_mod, SI_SUB_DRIVERS, SI_ORDER_FIRST);
81MODULE_VERSION(drmn, 1);
82MODULE_DEPEND(drmn, agp, 1, 1, 1);
83MODULE_DEPEND(drmn, pci, 1, 1, 1);
84MODULE_DEPEND(drmn, mem, 1, 1, 1);
85MODULE_DEPEND(drmn, iicbus, 1, 1, 1);
86
87static drm_ioctl_desc_t drm_ioctls[256] = {
88 DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, 0),
89 DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0),
90 DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0),
91 DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY),
92 DRM_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_getmap, 0),
93 DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, 0),
94 DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, 0),
95 DRM_IOCTL_DEF(DRM_IOCTL_GET_CAP, drm_getcap, DRM_UNLOCKED),
96 DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_MASTER|DRM_ROOT_ONLY),
97
98 DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
99 DRM_IOCTL_DEF(DRM_IOCTL_BLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
100 DRM_IOCTL_DEF(DRM_IOCTL_UNBLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
101 DRM_IOCTL_DEF(DRM_IOCTL_AUTH_MAGIC, drm_authmagic, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
102
103 DRM_IOCTL_DEF(DRM_IOCTL_ADD_MAP, drm_addmap_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
104 DRM_IOCTL_DEF(DRM_IOCTL_RM_MAP, drm_rmmap_ioctl, DRM_AUTH),
105
106 DRM_IOCTL_DEF(DRM_IOCTL_SET_SAREA_CTX, drm_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
107 DRM_IOCTL_DEF(DRM_IOCTL_GET_SAREA_CTX, drm_getsareactx, DRM_AUTH),
108
109 DRM_IOCTL_DEF(DRM_IOCTL_SET_MASTER, drm_setmaster_ioctl, DRM_ROOT_ONLY),
110 DRM_IOCTL_DEF(DRM_IOCTL_DROP_MASTER, drm_dropmaster_ioctl, DRM_ROOT_ONLY),
111
112 DRM_IOCTL_DEF(DRM_IOCTL_ADD_CTX, drm_addctx, DRM_AUTH|DRM_ROOT_ONLY),
113 DRM_IOCTL_DEF(DRM_IOCTL_RM_CTX, drm_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
114 DRM_IOCTL_DEF(DRM_IOCTL_MOD_CTX, drm_modctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
115 DRM_IOCTL_DEF(DRM_IOCTL_GET_CTX, drm_getctx, DRM_AUTH),
116 DRM_IOCTL_DEF(DRM_IOCTL_SWITCH_CTX, drm_switchctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
117 DRM_IOCTL_DEF(DRM_IOCTL_NEW_CTX, drm_newctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
118 DRM_IOCTL_DEF(DRM_IOCTL_RES_CTX, drm_resctx, DRM_AUTH),
119
120 DRM_IOCTL_DEF(DRM_IOCTL_ADD_DRAW, drm_adddraw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
121 DRM_IOCTL_DEF(DRM_IOCTL_RM_DRAW, drm_rmdraw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
122
123 DRM_IOCTL_DEF(DRM_IOCTL_LOCK, drm_lock, DRM_AUTH),
124 DRM_IOCTL_DEF(DRM_IOCTL_UNLOCK, drm_unlock, DRM_AUTH),
125
126 DRM_IOCTL_DEF(DRM_IOCTL_FINISH, drm_noop, DRM_AUTH),
127
128 DRM_IOCTL_DEF(DRM_IOCTL_ADD_BUFS, drm_addbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
129 DRM_IOCTL_DEF(DRM_IOCTL_MARK_BUFS, drm_markbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
130 DRM_IOCTL_DEF(DRM_IOCTL_INFO_BUFS, drm_infobufs, DRM_AUTH),
131 DRM_IOCTL_DEF(DRM_IOCTL_MAP_BUFS, drm_mapbufs, DRM_AUTH),
132 DRM_IOCTL_DEF(DRM_IOCTL_FREE_BUFS, drm_freebufs, DRM_AUTH),
133 DRM_IOCTL_DEF(DRM_IOCTL_DMA, drm_dma, DRM_AUTH),
134
135 DRM_IOCTL_DEF(DRM_IOCTL_CONTROL, drm_control, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
136
137 DRM_IOCTL_DEF(DRM_IOCTL_AGP_ACQUIRE, drm_agp_acquire_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
138 DRM_IOCTL_DEF(DRM_IOCTL_AGP_RELEASE, drm_agp_release_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
139 DRM_IOCTL_DEF(DRM_IOCTL_AGP_ENABLE, drm_agp_enable_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
140 DRM_IOCTL_DEF(DRM_IOCTL_AGP_INFO, drm_agp_info_ioctl, DRM_AUTH),
141 DRM_IOCTL_DEF(DRM_IOCTL_AGP_ALLOC, drm_agp_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
142 DRM_IOCTL_DEF(DRM_IOCTL_AGP_FREE, drm_agp_free_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
143 DRM_IOCTL_DEF(DRM_IOCTL_AGP_BIND, drm_agp_bind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
144 DRM_IOCTL_DEF(DRM_IOCTL_AGP_UNBIND, drm_agp_unbind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
145
146 DRM_IOCTL_DEF(DRM_IOCTL_SG_ALLOC, drm_sg_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
147 DRM_IOCTL_DEF(DRM_IOCTL_SG_FREE, drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
148 DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank, DRM_UNLOCKED),
149 DRM_IOCTL_DEF(DRM_IOCTL_MODESET_CTL, drm_modeset_ctl, 0),
150 DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_update_draw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
151
152 DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, DRM_UNLOCKED),
153 DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH|DRM_UNLOCKED),
154 DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH|DRM_UNLOCKED),
155
156 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
157 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANERESOURCES, drm_mode_getplane_res, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
158 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
159 DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETCRTC, drm_mode_setcrtc, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
160 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANE, drm_mode_getplane, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
161 DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPLANE, drm_mode_setplane, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
162 DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR, drm_mode_cursor_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
163 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETGAMMA, drm_mode_gamma_get_ioctl, DRM_MASTER|DRM_UNLOCKED),
164 DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETGAMMA, drm_mode_gamma_set_ioctl, DRM_MASTER|DRM_UNLOCKED),
165 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETENCODER, drm_mode_getencoder, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
166 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCONNECTOR, drm_mode_getconnector, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
167 DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATTACHMODE, drm_mode_attachmode_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
168 DRM_IOCTL_DEF(DRM_IOCTL_MODE_DETACHMODE, drm_mode_detachmode_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
169 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPERTY, drm_mode_getproperty_ioctl, DRM_MASTER | DRM_CONTROL_ALLOW|DRM_UNLOCKED),
170 DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPROPERTY, drm_mode_connector_property_set_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
171 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
172 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
173 DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
174 DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB2, drm_mode_addfb2, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
175 DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
176 DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
177 DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
178 DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATE_DUMB, drm_mode_create_dumb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
179 DRM_IOCTL_DEF(DRM_IOCTL_MODE_MAP_DUMB, drm_mode_mmap_dumb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
180 DRM_IOCTL_DEF(DRM_IOCTL_MODE_DESTROY_DUMB, drm_mode_destroy_dumb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
181};
182
183static struct cdevsw drm_cdevsw = {
184 .d_version = D_VERSION,
185 .d_open = drm_open,
186 .d_read = drm_read,
187 .d_ioctl = drm_ioctl,
188 .d_poll = drm_poll,
189 .d_mmap = drm_mmap,
63
64static int
65drm_modevent(module_t mod, int type, void *data)
66{
67
68 switch (type) {
69 case MOD_LOAD:
70 TUNABLE_INT_FETCH("drm.debug", &drm_debug_flag);
71 TUNABLE_INT_FETCH("drm.notyet", &drm_notyet_flag);
72 break;
73 }
74 return (0);
75}
76
77static moduledata_t drm_mod = {
78 "drmn",
79 drm_modevent,
80 0
81};
82DECLARE_MODULE(drmn, drm_mod, SI_SUB_DRIVERS, SI_ORDER_FIRST);
83MODULE_VERSION(drmn, 1);
84MODULE_DEPEND(drmn, agp, 1, 1, 1);
85MODULE_DEPEND(drmn, pci, 1, 1, 1);
86MODULE_DEPEND(drmn, mem, 1, 1, 1);
87MODULE_DEPEND(drmn, iicbus, 1, 1, 1);
88
89static drm_ioctl_desc_t drm_ioctls[256] = {
90 DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, 0),
91 DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0),
92 DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0),
93 DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY),
94 DRM_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_getmap, 0),
95 DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, 0),
96 DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, 0),
97 DRM_IOCTL_DEF(DRM_IOCTL_GET_CAP, drm_getcap, DRM_UNLOCKED),
98 DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_MASTER|DRM_ROOT_ONLY),
99
100 DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
101 DRM_IOCTL_DEF(DRM_IOCTL_BLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
102 DRM_IOCTL_DEF(DRM_IOCTL_UNBLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
103 DRM_IOCTL_DEF(DRM_IOCTL_AUTH_MAGIC, drm_authmagic, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
104
105 DRM_IOCTL_DEF(DRM_IOCTL_ADD_MAP, drm_addmap_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
106 DRM_IOCTL_DEF(DRM_IOCTL_RM_MAP, drm_rmmap_ioctl, DRM_AUTH),
107
108 DRM_IOCTL_DEF(DRM_IOCTL_SET_SAREA_CTX, drm_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
109 DRM_IOCTL_DEF(DRM_IOCTL_GET_SAREA_CTX, drm_getsareactx, DRM_AUTH),
110
111 DRM_IOCTL_DEF(DRM_IOCTL_SET_MASTER, drm_setmaster_ioctl, DRM_ROOT_ONLY),
112 DRM_IOCTL_DEF(DRM_IOCTL_DROP_MASTER, drm_dropmaster_ioctl, DRM_ROOT_ONLY),
113
114 DRM_IOCTL_DEF(DRM_IOCTL_ADD_CTX, drm_addctx, DRM_AUTH|DRM_ROOT_ONLY),
115 DRM_IOCTL_DEF(DRM_IOCTL_RM_CTX, drm_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
116 DRM_IOCTL_DEF(DRM_IOCTL_MOD_CTX, drm_modctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
117 DRM_IOCTL_DEF(DRM_IOCTL_GET_CTX, drm_getctx, DRM_AUTH),
118 DRM_IOCTL_DEF(DRM_IOCTL_SWITCH_CTX, drm_switchctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
119 DRM_IOCTL_DEF(DRM_IOCTL_NEW_CTX, drm_newctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
120 DRM_IOCTL_DEF(DRM_IOCTL_RES_CTX, drm_resctx, DRM_AUTH),
121
122 DRM_IOCTL_DEF(DRM_IOCTL_ADD_DRAW, drm_adddraw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
123 DRM_IOCTL_DEF(DRM_IOCTL_RM_DRAW, drm_rmdraw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
124
125 DRM_IOCTL_DEF(DRM_IOCTL_LOCK, drm_lock, DRM_AUTH),
126 DRM_IOCTL_DEF(DRM_IOCTL_UNLOCK, drm_unlock, DRM_AUTH),
127
128 DRM_IOCTL_DEF(DRM_IOCTL_FINISH, drm_noop, DRM_AUTH),
129
130 DRM_IOCTL_DEF(DRM_IOCTL_ADD_BUFS, drm_addbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
131 DRM_IOCTL_DEF(DRM_IOCTL_MARK_BUFS, drm_markbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
132 DRM_IOCTL_DEF(DRM_IOCTL_INFO_BUFS, drm_infobufs, DRM_AUTH),
133 DRM_IOCTL_DEF(DRM_IOCTL_MAP_BUFS, drm_mapbufs, DRM_AUTH),
134 DRM_IOCTL_DEF(DRM_IOCTL_FREE_BUFS, drm_freebufs, DRM_AUTH),
135 DRM_IOCTL_DEF(DRM_IOCTL_DMA, drm_dma, DRM_AUTH),
136
137 DRM_IOCTL_DEF(DRM_IOCTL_CONTROL, drm_control, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
138
139 DRM_IOCTL_DEF(DRM_IOCTL_AGP_ACQUIRE, drm_agp_acquire_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
140 DRM_IOCTL_DEF(DRM_IOCTL_AGP_RELEASE, drm_agp_release_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
141 DRM_IOCTL_DEF(DRM_IOCTL_AGP_ENABLE, drm_agp_enable_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
142 DRM_IOCTL_DEF(DRM_IOCTL_AGP_INFO, drm_agp_info_ioctl, DRM_AUTH),
143 DRM_IOCTL_DEF(DRM_IOCTL_AGP_ALLOC, drm_agp_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
144 DRM_IOCTL_DEF(DRM_IOCTL_AGP_FREE, drm_agp_free_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
145 DRM_IOCTL_DEF(DRM_IOCTL_AGP_BIND, drm_agp_bind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
146 DRM_IOCTL_DEF(DRM_IOCTL_AGP_UNBIND, drm_agp_unbind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
147
148 DRM_IOCTL_DEF(DRM_IOCTL_SG_ALLOC, drm_sg_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
149 DRM_IOCTL_DEF(DRM_IOCTL_SG_FREE, drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
150 DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank, DRM_UNLOCKED),
151 DRM_IOCTL_DEF(DRM_IOCTL_MODESET_CTL, drm_modeset_ctl, 0),
152 DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_update_draw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
153
154 DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, DRM_UNLOCKED),
155 DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH|DRM_UNLOCKED),
156 DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH|DRM_UNLOCKED),
157
158 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
159 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANERESOURCES, drm_mode_getplane_res, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
160 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
161 DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETCRTC, drm_mode_setcrtc, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
162 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANE, drm_mode_getplane, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
163 DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPLANE, drm_mode_setplane, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
164 DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR, drm_mode_cursor_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
165 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETGAMMA, drm_mode_gamma_get_ioctl, DRM_MASTER|DRM_UNLOCKED),
166 DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETGAMMA, drm_mode_gamma_set_ioctl, DRM_MASTER|DRM_UNLOCKED),
167 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETENCODER, drm_mode_getencoder, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
168 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCONNECTOR, drm_mode_getconnector, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
169 DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATTACHMODE, drm_mode_attachmode_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
170 DRM_IOCTL_DEF(DRM_IOCTL_MODE_DETACHMODE, drm_mode_detachmode_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
171 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPERTY, drm_mode_getproperty_ioctl, DRM_MASTER | DRM_CONTROL_ALLOW|DRM_UNLOCKED),
172 DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPROPERTY, drm_mode_connector_property_set_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
173 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
174 DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
175 DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
176 DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB2, drm_mode_addfb2, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
177 DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
178 DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
179 DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
180 DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATE_DUMB, drm_mode_create_dumb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
181 DRM_IOCTL_DEF(DRM_IOCTL_MODE_MAP_DUMB, drm_mode_mmap_dumb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
182 DRM_IOCTL_DEF(DRM_IOCTL_MODE_DESTROY_DUMB, drm_mode_destroy_dumb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
183};
184
185static struct cdevsw drm_cdevsw = {
186 .d_version = D_VERSION,
187 .d_open = drm_open,
188 .d_read = drm_read,
189 .d_ioctl = drm_ioctl,
190 .d_poll = drm_poll,
191 .d_mmap = drm_mmap,
190 .d_mmap_single = drm_gem_mmap_single,
192 .d_mmap_single = drm_mmap_single,
191 .d_name = "drm",
192 .d_flags = D_TRACKCLOSE
193};
194
195static int drm_msi = 1; /* Enable by default. */
196TUNABLE_INT("hw.drm.msi", &drm_msi);
197SYSCTL_NODE(_hw, OID_AUTO, drm, CTLFLAG_RW, NULL, "DRM device");
198SYSCTL_INT(_hw_drm, OID_AUTO, msi, CTLFLAG_RDTUN, &drm_msi, 1,
199 "Enable MSI interrupts for drm devices");
200
201static struct drm_msi_blacklist_entry drm_msi_blacklist[] = {
202 {0x8086, 0x2772}, /* Intel i945G */ \
203 {0x8086, 0x27A2}, /* Intel i945GM */ \
204 {0x8086, 0x27AE}, /* Intel i945GME */ \
205 {0, 0}
206};
207
208static int drm_msi_is_blacklisted(int vendor, int device)
209{
210 int i = 0;
211
212 for (i = 0; drm_msi_blacklist[i].vendor != 0; i++) {
213 if ((drm_msi_blacklist[i].vendor == vendor) &&
214 (drm_msi_blacklist[i].device == device)) {
215 return 1;
216 }
217 }
218
219 return 0;
220}
221
222int drm_probe(device_t kdev, drm_pci_id_list_t *idlist)
223{
224 drm_pci_id_list_t *id_entry;
225 int vendor, device;
226
227 vendor = pci_get_vendor(kdev);
228 device = pci_get_device(kdev);
229
230 if (pci_get_class(kdev) != PCIC_DISPLAY
231 || pci_get_subclass(kdev) != PCIS_DISPLAY_VGA)
232 return ENXIO;
233
234 id_entry = drm_find_description(vendor, device, idlist);
235 if (id_entry != NULL) {
236 if (!device_get_desc(kdev)) {
237 DRM_DEBUG("desc : %s\n", device_get_desc(kdev));
238 device_set_desc(kdev, id_entry->name);
239 }
240 return 0;
241 }
242
243 return ENXIO;
244}
245
246int drm_attach(device_t kdev, drm_pci_id_list_t *idlist)
247{
248 struct drm_device *dev;
249 drm_pci_id_list_t *id_entry;
250 int error, msicount;
251
252 dev = device_get_softc(kdev);
253
254 dev->device = kdev;
255
256 dev->pci_domain = pci_get_domain(dev->device);
257 dev->pci_bus = pci_get_bus(dev->device);
258 dev->pci_slot = pci_get_slot(dev->device);
259 dev->pci_func = pci_get_function(dev->device);
260
261 dev->pci_vendor = pci_get_vendor(dev->device);
262 dev->pci_device = pci_get_device(dev->device);
263
264 if (drm_core_check_feature(dev, DRIVER_HAVE_IRQ)) {
265 if (drm_msi &&
266 !drm_msi_is_blacklisted(dev->pci_vendor, dev->pci_device)) {
267 msicount = pci_msi_count(dev->device);
268 DRM_DEBUG("MSI count = %d\n", msicount);
269 if (msicount > 1)
270 msicount = 1;
271
272 if (pci_alloc_msi(dev->device, &msicount) == 0) {
273 DRM_INFO("MSI enabled %d message(s)\n",
274 msicount);
275 dev->msi_enabled = 1;
276 dev->irqrid = 1;
277 }
278 }
279
280 dev->irqr = bus_alloc_resource_any(dev->device, SYS_RES_IRQ,
281 &dev->irqrid, RF_SHAREABLE);
282 if (!dev->irqr) {
283 return (ENOENT);
284 }
285
286 dev->irq = (int) rman_get_start(dev->irqr);
287 }
288
289 mtx_init(&dev->dev_lock, "drmdev", NULL, MTX_DEF);
290 mtx_init(&dev->irq_lock, "drmirq", NULL, MTX_DEF);
291 mtx_init(&dev->vbl_lock, "drmvbl", NULL, MTX_DEF);
292 mtx_init(&dev->drw_lock, "drmdrw", NULL, MTX_DEF);
293 mtx_init(&dev->event_lock, "drmev", NULL, MTX_DEF);
294 sx_init(&dev->dev_struct_lock, "drmslk");
295
296 id_entry = drm_find_description(dev->pci_vendor,
297 dev->pci_device, idlist);
298 dev->id_entry = id_entry;
299
300 error = drm_load(dev);
301 if (error == 0)
302 error = drm_create_cdevs(kdev);
303 return (error);
304}
305
306int
307drm_create_cdevs(device_t kdev)
308{
309 struct drm_device *dev;
310 int error, unit;
311
312 unit = device_get_unit(kdev);
313 dev = device_get_softc(kdev);
314
315 error = make_dev_p(MAKEDEV_WAITOK | MAKEDEV_CHECKNAME, &dev->devnode,
316 &drm_cdevsw, 0, DRM_DEV_UID, DRM_DEV_GID,
317 DRM_DEV_MODE, "dri/card%d", unit);
318 if (error == 0)
319 dev->devnode->si_drv1 = dev;
320 return (error);
321}
322
323int drm_detach(device_t kdev)
324{
325 struct drm_device *dev;
326
327 dev = device_get_softc(kdev);
328 drm_unload(dev);
329 if (dev->irqr) {
330 bus_release_resource(dev->device, SYS_RES_IRQ, dev->irqrid,
331 dev->irqr);
332 if (dev->msi_enabled) {
333 pci_release_msi(dev->device);
334 DRM_INFO("MSI released\n");
335 }
336 }
337 return (0);
338}
339
340#ifndef DRM_DEV_NAME
341#define DRM_DEV_NAME "drm"
342#endif
343
344devclass_t drm_devclass;
345
346drm_pci_id_list_t *drm_find_description(int vendor, int device,
347 drm_pci_id_list_t *idlist)
348{
349 int i = 0;
350
351 for (i = 0; idlist[i].vendor != 0; i++) {
352 if ((idlist[i].vendor == vendor) &&
353 ((idlist[i].device == device) ||
354 (idlist[i].device == 0))) {
355 return &idlist[i];
356 }
357 }
358 return NULL;
359}
360
361static int drm_firstopen(struct drm_device *dev)
362{
363 drm_local_map_t *map;
364 int i;
365
366 DRM_LOCK_ASSERT(dev);
367
368 /* prebuild the SAREA */
369 i = drm_addmap(dev, 0, SAREA_MAX, _DRM_SHM,
370 _DRM_CONTAINS_LOCK, &map);
371 if (i != 0)
372 return i;
373
374 if (dev->driver->firstopen)
375 dev->driver->firstopen(dev);
376
377 dev->buf_use = 0;
378
379 if (drm_core_check_feature(dev, DRIVER_HAVE_DMA)) {
380 i = drm_dma_setup(dev);
381 if (i != 0)
382 return i;
383 }
384
385 for (i = 0; i < DRM_HASH_SIZE; i++) {
386 dev->magiclist[i].head = NULL;
387 dev->magiclist[i].tail = NULL;
388 }
389
390 dev->lock.lock_queue = 0;
391 if (!drm_core_check_feature(dev, DRIVER_MODESET))
392 dev->irq_enabled = 0;
393 dev->context_flag = 0;
394 dev->last_context = 0;
395 dev->if_version = 0;
396
397 dev->buf_sigio = NULL;
398
399 DRM_DEBUG("\n");
400
401 return 0;
402}
403
404static int drm_lastclose(struct drm_device *dev)
405{
406 drm_magic_entry_t *pt, *next;
407 drm_local_map_t *map, *mapsave;
408 int i;
409
410 DRM_LOCK_ASSERT(dev);
411
412 DRM_DEBUG("\n");
413
414 if (dev->driver->lastclose != NULL)
415 dev->driver->lastclose(dev);
416
417 if (!drm_core_check_feature(dev, DRIVER_MODESET) && dev->irq_enabled)
418 drm_irq_uninstall(dev);
419
420 if (dev->unique) {
421 free(dev->unique, DRM_MEM_DRIVER);
422 dev->unique = NULL;
423 dev->unique_len = 0;
424 }
425 /* Clear pid list */
426 for (i = 0; i < DRM_HASH_SIZE; i++) {
427 for (pt = dev->magiclist[i].head; pt; pt = next) {
428 next = pt->next;
429 free(pt, DRM_MEM_MAGIC);
430 }
431 dev->magiclist[i].head = dev->magiclist[i].tail = NULL;
432 }
433
434 DRM_UNLOCK(dev);
435 drm_drawable_free_all(dev);
436 DRM_LOCK(dev);
437
438 /* Clear AGP information */
439 if (dev->agp) {
440 drm_agp_mem_t *entry;
441 drm_agp_mem_t *nexte;
442
443 /* Remove AGP resources, but leave dev->agp intact until
444 * drm_unload is called.
445 */
446 for (entry = dev->agp->memory; entry; entry = nexte) {
447 nexte = entry->next;
448 if (entry->bound)
449 drm_agp_unbind_memory(entry->handle);
450 drm_agp_free_memory(entry->handle);
451 free(entry, DRM_MEM_AGPLISTS);
452 }
453 dev->agp->memory = NULL;
454
455 if (dev->agp->acquired)
456 drm_agp_release(dev);
457
458 dev->agp->acquired = 0;
459 dev->agp->enabled = 0;
460 }
461 if (dev->sg != NULL) {
462 drm_sg_cleanup(dev->sg);
463 dev->sg = NULL;
464 }
465
466 TAILQ_FOREACH_SAFE(map, &dev->maplist, link, mapsave) {
467 if (!(map->flags & _DRM_DRIVER))
468 drm_rmmap(dev, map);
469 }
470
471 drm_dma_takedown(dev);
472 if (dev->lock.hw_lock) {
473 dev->lock.hw_lock = NULL; /* SHM removed */
474 dev->lock.file_priv = NULL;
475 DRM_WAKEUP_INT((void *)&dev->lock.lock_queue);
476 }
477
478 return 0;
479}
480
481static int drm_load(struct drm_device *dev)
482{
483 int i, retcode;
484
485 DRM_DEBUG("\n");
486
487 TAILQ_INIT(&dev->maplist);
488 dev->map_unrhdr = new_unrhdr(1, ((1 << DRM_MAP_HANDLE_BITS) - 1), NULL);
489 if (dev->map_unrhdr == NULL) {
490 DRM_ERROR("Couldn't allocate map number allocator\n");
491 return EINVAL;
492 }
493
494
495 drm_mem_init();
496 drm_sysctl_init(dev);
497 TAILQ_INIT(&dev->files);
498
499 dev->counters = 6;
500 dev->types[0] = _DRM_STAT_LOCK;
501 dev->types[1] = _DRM_STAT_OPENS;
502 dev->types[2] = _DRM_STAT_CLOSES;
503 dev->types[3] = _DRM_STAT_IOCTLS;
504 dev->types[4] = _DRM_STAT_LOCKS;
505 dev->types[5] = _DRM_STAT_UNLOCKS;
506
507 for (i = 0; i < DRM_ARRAY_SIZE(dev->counts); i++)
508 atomic_set(&dev->counts[i], 0);
509
510 INIT_LIST_HEAD(&dev->vblank_event_list);
511
512 if (drm_core_has_AGP(dev)) {
513 if (drm_device_is_agp(dev))
514 dev->agp = drm_agp_init();
515 if (drm_core_check_feature(dev, DRIVER_REQUIRE_AGP) &&
516 dev->agp == NULL) {
517 DRM_ERROR("Card isn't AGP, or couldn't initialize "
518 "AGP.\n");
519 retcode = ENOMEM;
520 goto error;
521 }
522 if (dev->agp != NULL && dev->agp->info.ai_aperture_base != 0) {
523 if (drm_mtrr_add(dev->agp->info.ai_aperture_base,
524 dev->agp->info.ai_aperture_size, DRM_MTRR_WC) == 0)
525 dev->agp->mtrr = 1;
526 }
527 }
528
529 retcode = drm_ctxbitmap_init(dev);
530 if (retcode != 0) {
531 DRM_ERROR("Cannot allocate memory for context bitmap.\n");
532 goto error;
533 }
534
535 dev->drw_unrhdr = new_unrhdr(1, INT_MAX, NULL);
536 if (dev->drw_unrhdr == NULL) {
537 DRM_ERROR("Couldn't allocate drawable number allocator\n");
538 retcode = ENOMEM;
539 goto error;
540 }
541
542 if (dev->driver->driver_features & DRIVER_GEM) {
543 retcode = drm_gem_init(dev);
544 if (retcode != 0) {
545 DRM_ERROR("Cannot initialize graphics execution "
546 "manager (GEM)\n");
547 goto error1;
548 }
549 }
550
551 if (dev->driver->load != NULL) {
552 DRM_LOCK(dev);
553 /* Shared code returns -errno. */
554 retcode = -dev->driver->load(dev,
555 dev->id_entry->driver_private);
556 if (pci_enable_busmaster(dev->device))
557 DRM_ERROR("Request to enable bus-master failed.\n");
558 DRM_UNLOCK(dev);
559 if (retcode != 0)
560 goto error;
561 }
562
563 DRM_INFO("Initialized %s %d.%d.%d %s\n",
564 dev->driver->name,
565 dev->driver->major,
566 dev->driver->minor,
567 dev->driver->patchlevel,
568 dev->driver->date);
569
570 return 0;
571
572error1:
573 delete_unrhdr(dev->drw_unrhdr);
574error:
575 drm_sysctl_cleanup(dev);
576 DRM_LOCK(dev);
577 drm_lastclose(dev);
578 DRM_UNLOCK(dev);
579 if (dev->devnode != NULL)
580 destroy_dev(dev->devnode);
581
582 mtx_destroy(&dev->drw_lock);
583 mtx_destroy(&dev->vbl_lock);
584 mtx_destroy(&dev->irq_lock);
585 mtx_destroy(&dev->dev_lock);
586 mtx_destroy(&dev->event_lock);
587 sx_destroy(&dev->dev_struct_lock);
588
589 return retcode;
590}
591
592static void drm_unload(struct drm_device *dev)
593{
594 int i;
595
596 DRM_DEBUG("\n");
597
598 drm_sysctl_cleanup(dev);
599 if (dev->devnode != NULL)
600 destroy_dev(dev->devnode);
601
602 drm_ctxbitmap_cleanup(dev);
603
604 if (dev->driver->driver_features & DRIVER_GEM)
605 drm_gem_destroy(dev);
606
607 if (dev->agp && dev->agp->mtrr) {
608 int __unused retcode;
609
610 retcode = drm_mtrr_del(0, dev->agp->info.ai_aperture_base,
611 dev->agp->info.ai_aperture_size, DRM_MTRR_WC);
612 DRM_DEBUG("mtrr_del = %d", retcode);
613 }
614
615 drm_vblank_cleanup(dev);
616
617 DRM_LOCK(dev);
618 drm_lastclose(dev);
619 DRM_UNLOCK(dev);
620
621 /* Clean up PCI resources allocated by drm_bufs.c. We're not really
622 * worried about resource consumption while the DRM is inactive (between
623 * lastclose and firstopen or unload) because these aren't actually
624 * taking up KVA, just keeping the PCI resource allocated.
625 */
626 for (i = 0; i < DRM_MAX_PCI_RESOURCE; i++) {
627 if (dev->pcir[i] == NULL)
628 continue;
629 bus_release_resource(dev->device, SYS_RES_MEMORY,
630 dev->pcirid[i], dev->pcir[i]);
631 dev->pcir[i] = NULL;
632 }
633
634 if (dev->agp) {
635 free(dev->agp, DRM_MEM_AGPLISTS);
636 dev->agp = NULL;
637 }
638
639 if (dev->driver->unload != NULL) {
640 DRM_LOCK(dev);
641 dev->driver->unload(dev);
642 DRM_UNLOCK(dev);
643 }
644
645 delete_unrhdr(dev->drw_unrhdr);
646 delete_unrhdr(dev->map_unrhdr);
647
648 drm_mem_uninit();
649
650 if (pci_disable_busmaster(dev->device))
651 DRM_ERROR("Request to disable bus-master failed.\n");
652
653 mtx_destroy(&dev->drw_lock);
654 mtx_destroy(&dev->vbl_lock);
655 mtx_destroy(&dev->irq_lock);
656 mtx_destroy(&dev->dev_lock);
657 mtx_destroy(&dev->event_lock);
658 sx_destroy(&dev->dev_struct_lock);
659}
660
661int drm_version(struct drm_device *dev, void *data, struct drm_file *file_priv)
662{
663 struct drm_version *version = data;
664 int len;
665
666#define DRM_COPY( name, value ) \
667 len = strlen( value ); \
668 if ( len > name##_len ) len = name##_len; \
669 name##_len = strlen( value ); \
670 if ( len && name ) { \
671 if ( DRM_COPY_TO_USER( name, value, len ) ) \
672 return EFAULT; \
673 }
674
675 version->version_major = dev->driver->major;
676 version->version_minor = dev->driver->minor;
677 version->version_patchlevel = dev->driver->patchlevel;
678
679 DRM_COPY(version->name, dev->driver->name);
680 DRM_COPY(version->date, dev->driver->date);
681 DRM_COPY(version->desc, dev->driver->desc);
682
683 return 0;
684}
685
686int
687drm_open(struct cdev *kdev, int flags, int fmt, DRM_STRUCTPROC *p)
688{
689 struct drm_device *dev;
690 int retcode;
691
692 dev = kdev->si_drv1;
693 if (dev == NULL)
694 return (ENXIO);
695
696 DRM_DEBUG("open_count = %d\n", dev->open_count);
697
698 retcode = drm_open_helper(kdev, flags, fmt, p, dev);
699
700 if (retcode == 0) {
701 atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
702 DRM_LOCK(dev);
703 mtx_lock(&Giant);
704 device_busy(dev->device);
705 mtx_unlock(&Giant);
706 if (!dev->open_count++)
707 retcode = drm_firstopen(dev);
708 DRM_UNLOCK(dev);
709 }
710
711 return (retcode);
712}
713
714void drm_close(void *data)
715{
716 struct drm_file *file_priv = data;
717 struct drm_device *dev = file_priv->dev;
718 int retcode = 0;
719
720 DRM_DEBUG("open_count = %d\n", dev->open_count);
721
722 DRM_LOCK(dev);
723
724 if (dev->driver->preclose != NULL)
725 dev->driver->preclose(dev, file_priv);
726
727 /* ========================================================
728 * Begin inline drm_release
729 */
730
731 DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
732 DRM_CURRENTPID, (long)dev->device, dev->open_count);
733
734 if (dev->driver->driver_features & DRIVER_GEM)
735 drm_gem_release(dev, file_priv);
736
737 if (dev->lock.hw_lock && _DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)
738 && dev->lock.file_priv == file_priv) {
739 DRM_DEBUG("Process %d dead, freeing lock for context %d\n",
740 DRM_CURRENTPID,
741 _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
742 if (dev->driver->reclaim_buffers_locked != NULL)
743 dev->driver->reclaim_buffers_locked(dev, file_priv);
744
745 drm_lock_free(&dev->lock,
746 _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
747
748 /* FIXME: may require heavy-handed reset of
749 hardware at this point, possibly
750 processed via a callback to the X
751 server. */
752 } else if (dev->driver->reclaim_buffers_locked != NULL &&
753 dev->lock.hw_lock != NULL) {
754 /* The lock is required to reclaim buffers */
755 for (;;) {
756 if (!dev->lock.hw_lock) {
757 /* Device has been unregistered */
758 retcode = EINTR;
759 break;
760 }
761 if (drm_lock_take(&dev->lock, DRM_KERNEL_CONTEXT)) {
762 dev->lock.file_priv = file_priv;
763 dev->lock.lock_time = jiffies;
764 atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
765 break; /* Got lock */
766 }
767 /* Contention */
768 retcode = DRM_LOCK_SLEEP(dev, &dev->lock.lock_queue,
769 PCATCH, "drmlk2", 0);
770 if (retcode)
771 break;
772 }
773 if (retcode == 0) {
774 dev->driver->reclaim_buffers_locked(dev, file_priv);
775 drm_lock_free(&dev->lock, DRM_KERNEL_CONTEXT);
776 }
777 }
778
779 if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) &&
780 !dev->driver->reclaim_buffers_locked)
781 drm_reclaim_buffers(dev, file_priv);
782
783 funsetown(&dev->buf_sigio);
784 seldrain(&file_priv->event_poll);
785
786 if (dev->driver->postclose != NULL)
787 dev->driver->postclose(dev, file_priv);
788 TAILQ_REMOVE(&dev->files, file_priv, link);
789 free(file_priv, DRM_MEM_FILES);
790
791 /* ========================================================
792 * End inline drm_release
793 */
794
795 atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
796 mtx_lock(&Giant);
797 device_unbusy(dev->device);
798 mtx_unlock(&Giant);
799 if (--dev->open_count == 0) {
800 retcode = drm_lastclose(dev);
801 }
802
803 DRM_UNLOCK(dev);
804}
805
806extern drm_ioctl_desc_t drm_compat_ioctls[];
807
808/* drm_ioctl is called whenever a process performs an ioctl on /dev/drm.
809 */
810int drm_ioctl(struct cdev *kdev, u_long cmd, caddr_t data, int flags,
811 DRM_STRUCTPROC *p)
812{
813 struct drm_device *dev = drm_get_device_from_kdev(kdev);
814 int retcode = 0;
815 drm_ioctl_desc_t *ioctl;
816 int (*func)(struct drm_device *dev, void *data, struct drm_file *file_priv);
817 int nr = DRM_IOCTL_NR(cmd);
818 int is_driver_ioctl = 0;
819 struct drm_file *file_priv;
820
821 retcode = devfs_get_cdevpriv((void **)&file_priv);
822 if (retcode != 0) {
823 DRM_ERROR("can't find authenticator\n");
824 return EINVAL;
825 }
826
827 atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
828 ++file_priv->ioctl_count;
829
830 DRM_DEBUG("pid=%d, cmd=0x%02lx, nr=0x%02x, dev 0x%lx, auth=%d\n",
831 DRM_CURRENTPID, cmd, nr, (long)dev->device,
832 file_priv->authenticated);
833
834 switch (cmd) {
835 case FIONBIO:
836 case FIOASYNC:
837 return 0;
838
839 case FIOSETOWN:
840 return fsetown(*(int *)data, &dev->buf_sigio);
841
842 case FIOGETOWN:
843 *(int *) data = fgetown(&dev->buf_sigio);
844 return 0;
845 }
846
847 if (IOCGROUP(cmd) != DRM_IOCTL_BASE) {
848 DRM_DEBUG("Bad ioctl group 0x%x\n", (int)IOCGROUP(cmd));
849 return EINVAL;
850 }
851
852#ifdef COMPAT_FREEBSD32
853 /*
854 * Called whenever a 32-bit process running under a 64-bit
855 * kernel performs an ioctl on /dev/drm.
856 */
857 if (SV_CURPROC_FLAG(SV_ILP32) && drm_compat_ioctls[nr].func != NULL)
858 /*
859 * Assume that ioctls without an explicit compat
860 * routine will just work. This may not always be a
861 * good assumption, but it's better than always
862 * failing.
863 */
864 ioctl = &drm_compat_ioctls[nr];
865 else
866#endif
867 ioctl = &drm_ioctls[nr];
868 /* It's not a core DRM ioctl, try driver-specific. */
869 if (ioctl->func == NULL && nr >= DRM_COMMAND_BASE) {
870 /* The array entries begin at DRM_COMMAND_BASE ioctl nr */
871 nr -= DRM_COMMAND_BASE;
872 if (nr > dev->driver->max_ioctl) {
873 DRM_DEBUG("Bad driver ioctl number, 0x%x (of 0x%x)\n",
874 nr, dev->driver->max_ioctl);
875 return EINVAL;
876 }
877#ifdef COMPAT_FREEBSD32
878 if (SV_CURPROC_FLAG(SV_ILP32) &&
879 nr < *dev->driver->compat_ioctls_nr &&
880 dev->driver->compat_ioctls[nr].func != NULL)
881 ioctl = &dev->driver->compat_ioctls[nr];
882 else
883#endif
884 ioctl = &dev->driver->ioctls[nr];
885 is_driver_ioctl = 1;
886 }
887 func = ioctl->func;
888
889 if (func == NULL) {
890 DRM_DEBUG("no function\n");
891 return EINVAL;
892 }
893
894 if (((ioctl->flags & DRM_ROOT_ONLY) && !DRM_SUSER(p)) ||
895 ((ioctl->flags & DRM_AUTH) && !file_priv->authenticated) ||
896 ((ioctl->flags & DRM_MASTER) && !file_priv->master))
897 return EACCES;
898
899 if (is_driver_ioctl) {
900 if ((ioctl->flags & DRM_UNLOCKED) == 0)
901 DRM_LOCK(dev);
902 /* shared code returns -errno */
903 retcode = -func(dev, data, file_priv);
904 if ((ioctl->flags & DRM_UNLOCKED) == 0)
905 DRM_UNLOCK(dev);
906 } else {
907 retcode = func(dev, data, file_priv);
908 }
909
910 if (retcode != 0)
911 DRM_DEBUG(" returning %d\n", retcode);
912 if (retcode != 0 &&
913 (drm_debug_flag & DRM_DEBUGBITS_FAILED_IOCTL) != 0) {
914 printf(
915"pid %d, cmd 0x%02lx, nr 0x%02x/%1d, dev 0x%lx, auth %d, res %d\n",
916 DRM_CURRENTPID, cmd, nr, is_driver_ioctl, (long)dev->device,
917 file_priv->authenticated, retcode);
918 }
919
920 return retcode;
921}
922
923drm_local_map_t *drm_getsarea(struct drm_device *dev)
924{
925 drm_local_map_t *map;
926
927 DRM_LOCK_ASSERT(dev);
928 TAILQ_FOREACH(map, &dev->maplist, link) {
929 if (map->type == _DRM_SHM && (map->flags & _DRM_CONTAINS_LOCK))
930 return map;
931 }
932
933 return NULL;
934}
935
936int
937drm_add_busid_modesetting(struct drm_device *dev, struct sysctl_ctx_list *ctx,
938 struct sysctl_oid *top)
939{
940 struct sysctl_oid *oid;
941
942 snprintf(dev->busid_str, sizeof(dev->busid_str),
943 "pci:%04x:%02x:%02x.%d", dev->pci_domain, dev->pci_bus,
944 dev->pci_slot, dev->pci_func);
945 oid = SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(top), OID_AUTO, "busid",
946 CTLFLAG_RD, dev->busid_str, 0, NULL);
947 if (oid == NULL)
948 return (ENOMEM);
949 dev->modesetting = (dev->driver->driver_features & DRIVER_MODESET) != 0;
950 oid = SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(top), OID_AUTO,
951 "modesetting", CTLFLAG_RD, &dev->modesetting, 0, NULL);
952 if (oid == NULL)
953 return (ENOMEM);
954
955 return (0);
956}
957
193 .d_name = "drm",
194 .d_flags = D_TRACKCLOSE
195};
196
197static int drm_msi = 1; /* Enable by default. */
198TUNABLE_INT("hw.drm.msi", &drm_msi);
199SYSCTL_NODE(_hw, OID_AUTO, drm, CTLFLAG_RW, NULL, "DRM device");
200SYSCTL_INT(_hw_drm, OID_AUTO, msi, CTLFLAG_RDTUN, &drm_msi, 1,
201 "Enable MSI interrupts for drm devices");
202
203static struct drm_msi_blacklist_entry drm_msi_blacklist[] = {
204 {0x8086, 0x2772}, /* Intel i945G */ \
205 {0x8086, 0x27A2}, /* Intel i945GM */ \
206 {0x8086, 0x27AE}, /* Intel i945GME */ \
207 {0, 0}
208};
209
210static int drm_msi_is_blacklisted(int vendor, int device)
211{
212 int i = 0;
213
214 for (i = 0; drm_msi_blacklist[i].vendor != 0; i++) {
215 if ((drm_msi_blacklist[i].vendor == vendor) &&
216 (drm_msi_blacklist[i].device == device)) {
217 return 1;
218 }
219 }
220
221 return 0;
222}
223
224int drm_probe(device_t kdev, drm_pci_id_list_t *idlist)
225{
226 drm_pci_id_list_t *id_entry;
227 int vendor, device;
228
229 vendor = pci_get_vendor(kdev);
230 device = pci_get_device(kdev);
231
232 if (pci_get_class(kdev) != PCIC_DISPLAY
233 || pci_get_subclass(kdev) != PCIS_DISPLAY_VGA)
234 return ENXIO;
235
236 id_entry = drm_find_description(vendor, device, idlist);
237 if (id_entry != NULL) {
238 if (!device_get_desc(kdev)) {
239 DRM_DEBUG("desc : %s\n", device_get_desc(kdev));
240 device_set_desc(kdev, id_entry->name);
241 }
242 return 0;
243 }
244
245 return ENXIO;
246}
247
248int drm_attach(device_t kdev, drm_pci_id_list_t *idlist)
249{
250 struct drm_device *dev;
251 drm_pci_id_list_t *id_entry;
252 int error, msicount;
253
254 dev = device_get_softc(kdev);
255
256 dev->device = kdev;
257
258 dev->pci_domain = pci_get_domain(dev->device);
259 dev->pci_bus = pci_get_bus(dev->device);
260 dev->pci_slot = pci_get_slot(dev->device);
261 dev->pci_func = pci_get_function(dev->device);
262
263 dev->pci_vendor = pci_get_vendor(dev->device);
264 dev->pci_device = pci_get_device(dev->device);
265
266 if (drm_core_check_feature(dev, DRIVER_HAVE_IRQ)) {
267 if (drm_msi &&
268 !drm_msi_is_blacklisted(dev->pci_vendor, dev->pci_device)) {
269 msicount = pci_msi_count(dev->device);
270 DRM_DEBUG("MSI count = %d\n", msicount);
271 if (msicount > 1)
272 msicount = 1;
273
274 if (pci_alloc_msi(dev->device, &msicount) == 0) {
275 DRM_INFO("MSI enabled %d message(s)\n",
276 msicount);
277 dev->msi_enabled = 1;
278 dev->irqrid = 1;
279 }
280 }
281
282 dev->irqr = bus_alloc_resource_any(dev->device, SYS_RES_IRQ,
283 &dev->irqrid, RF_SHAREABLE);
284 if (!dev->irqr) {
285 return (ENOENT);
286 }
287
288 dev->irq = (int) rman_get_start(dev->irqr);
289 }
290
291 mtx_init(&dev->dev_lock, "drmdev", NULL, MTX_DEF);
292 mtx_init(&dev->irq_lock, "drmirq", NULL, MTX_DEF);
293 mtx_init(&dev->vbl_lock, "drmvbl", NULL, MTX_DEF);
294 mtx_init(&dev->drw_lock, "drmdrw", NULL, MTX_DEF);
295 mtx_init(&dev->event_lock, "drmev", NULL, MTX_DEF);
296 sx_init(&dev->dev_struct_lock, "drmslk");
297
298 id_entry = drm_find_description(dev->pci_vendor,
299 dev->pci_device, idlist);
300 dev->id_entry = id_entry;
301
302 error = drm_load(dev);
303 if (error == 0)
304 error = drm_create_cdevs(kdev);
305 return (error);
306}
307
308int
309drm_create_cdevs(device_t kdev)
310{
311 struct drm_device *dev;
312 int error, unit;
313
314 unit = device_get_unit(kdev);
315 dev = device_get_softc(kdev);
316
317 error = make_dev_p(MAKEDEV_WAITOK | MAKEDEV_CHECKNAME, &dev->devnode,
318 &drm_cdevsw, 0, DRM_DEV_UID, DRM_DEV_GID,
319 DRM_DEV_MODE, "dri/card%d", unit);
320 if (error == 0)
321 dev->devnode->si_drv1 = dev;
322 return (error);
323}
324
325int drm_detach(device_t kdev)
326{
327 struct drm_device *dev;
328
329 dev = device_get_softc(kdev);
330 drm_unload(dev);
331 if (dev->irqr) {
332 bus_release_resource(dev->device, SYS_RES_IRQ, dev->irqrid,
333 dev->irqr);
334 if (dev->msi_enabled) {
335 pci_release_msi(dev->device);
336 DRM_INFO("MSI released\n");
337 }
338 }
339 return (0);
340}
341
342#ifndef DRM_DEV_NAME
343#define DRM_DEV_NAME "drm"
344#endif
345
346devclass_t drm_devclass;
347
348drm_pci_id_list_t *drm_find_description(int vendor, int device,
349 drm_pci_id_list_t *idlist)
350{
351 int i = 0;
352
353 for (i = 0; idlist[i].vendor != 0; i++) {
354 if ((idlist[i].vendor == vendor) &&
355 ((idlist[i].device == device) ||
356 (idlist[i].device == 0))) {
357 return &idlist[i];
358 }
359 }
360 return NULL;
361}
362
363static int drm_firstopen(struct drm_device *dev)
364{
365 drm_local_map_t *map;
366 int i;
367
368 DRM_LOCK_ASSERT(dev);
369
370 /* prebuild the SAREA */
371 i = drm_addmap(dev, 0, SAREA_MAX, _DRM_SHM,
372 _DRM_CONTAINS_LOCK, &map);
373 if (i != 0)
374 return i;
375
376 if (dev->driver->firstopen)
377 dev->driver->firstopen(dev);
378
379 dev->buf_use = 0;
380
381 if (drm_core_check_feature(dev, DRIVER_HAVE_DMA)) {
382 i = drm_dma_setup(dev);
383 if (i != 0)
384 return i;
385 }
386
387 for (i = 0; i < DRM_HASH_SIZE; i++) {
388 dev->magiclist[i].head = NULL;
389 dev->magiclist[i].tail = NULL;
390 }
391
392 dev->lock.lock_queue = 0;
393 if (!drm_core_check_feature(dev, DRIVER_MODESET))
394 dev->irq_enabled = 0;
395 dev->context_flag = 0;
396 dev->last_context = 0;
397 dev->if_version = 0;
398
399 dev->buf_sigio = NULL;
400
401 DRM_DEBUG("\n");
402
403 return 0;
404}
405
406static int drm_lastclose(struct drm_device *dev)
407{
408 drm_magic_entry_t *pt, *next;
409 drm_local_map_t *map, *mapsave;
410 int i;
411
412 DRM_LOCK_ASSERT(dev);
413
414 DRM_DEBUG("\n");
415
416 if (dev->driver->lastclose != NULL)
417 dev->driver->lastclose(dev);
418
419 if (!drm_core_check_feature(dev, DRIVER_MODESET) && dev->irq_enabled)
420 drm_irq_uninstall(dev);
421
422 if (dev->unique) {
423 free(dev->unique, DRM_MEM_DRIVER);
424 dev->unique = NULL;
425 dev->unique_len = 0;
426 }
427 /* Clear pid list */
428 for (i = 0; i < DRM_HASH_SIZE; i++) {
429 for (pt = dev->magiclist[i].head; pt; pt = next) {
430 next = pt->next;
431 free(pt, DRM_MEM_MAGIC);
432 }
433 dev->magiclist[i].head = dev->magiclist[i].tail = NULL;
434 }
435
436 DRM_UNLOCK(dev);
437 drm_drawable_free_all(dev);
438 DRM_LOCK(dev);
439
440 /* Clear AGP information */
441 if (dev->agp) {
442 drm_agp_mem_t *entry;
443 drm_agp_mem_t *nexte;
444
445 /* Remove AGP resources, but leave dev->agp intact until
446 * drm_unload is called.
447 */
448 for (entry = dev->agp->memory; entry; entry = nexte) {
449 nexte = entry->next;
450 if (entry->bound)
451 drm_agp_unbind_memory(entry->handle);
452 drm_agp_free_memory(entry->handle);
453 free(entry, DRM_MEM_AGPLISTS);
454 }
455 dev->agp->memory = NULL;
456
457 if (dev->agp->acquired)
458 drm_agp_release(dev);
459
460 dev->agp->acquired = 0;
461 dev->agp->enabled = 0;
462 }
463 if (dev->sg != NULL) {
464 drm_sg_cleanup(dev->sg);
465 dev->sg = NULL;
466 }
467
468 TAILQ_FOREACH_SAFE(map, &dev->maplist, link, mapsave) {
469 if (!(map->flags & _DRM_DRIVER))
470 drm_rmmap(dev, map);
471 }
472
473 drm_dma_takedown(dev);
474 if (dev->lock.hw_lock) {
475 dev->lock.hw_lock = NULL; /* SHM removed */
476 dev->lock.file_priv = NULL;
477 DRM_WAKEUP_INT((void *)&dev->lock.lock_queue);
478 }
479
480 return 0;
481}
482
483static int drm_load(struct drm_device *dev)
484{
485 int i, retcode;
486
487 DRM_DEBUG("\n");
488
489 TAILQ_INIT(&dev->maplist);
490 dev->map_unrhdr = new_unrhdr(1, ((1 << DRM_MAP_HANDLE_BITS) - 1), NULL);
491 if (dev->map_unrhdr == NULL) {
492 DRM_ERROR("Couldn't allocate map number allocator\n");
493 return EINVAL;
494 }
495
496
497 drm_mem_init();
498 drm_sysctl_init(dev);
499 TAILQ_INIT(&dev->files);
500
501 dev->counters = 6;
502 dev->types[0] = _DRM_STAT_LOCK;
503 dev->types[1] = _DRM_STAT_OPENS;
504 dev->types[2] = _DRM_STAT_CLOSES;
505 dev->types[3] = _DRM_STAT_IOCTLS;
506 dev->types[4] = _DRM_STAT_LOCKS;
507 dev->types[5] = _DRM_STAT_UNLOCKS;
508
509 for (i = 0; i < DRM_ARRAY_SIZE(dev->counts); i++)
510 atomic_set(&dev->counts[i], 0);
511
512 INIT_LIST_HEAD(&dev->vblank_event_list);
513
514 if (drm_core_has_AGP(dev)) {
515 if (drm_device_is_agp(dev))
516 dev->agp = drm_agp_init();
517 if (drm_core_check_feature(dev, DRIVER_REQUIRE_AGP) &&
518 dev->agp == NULL) {
519 DRM_ERROR("Card isn't AGP, or couldn't initialize "
520 "AGP.\n");
521 retcode = ENOMEM;
522 goto error;
523 }
524 if (dev->agp != NULL && dev->agp->info.ai_aperture_base != 0) {
525 if (drm_mtrr_add(dev->agp->info.ai_aperture_base,
526 dev->agp->info.ai_aperture_size, DRM_MTRR_WC) == 0)
527 dev->agp->mtrr = 1;
528 }
529 }
530
531 retcode = drm_ctxbitmap_init(dev);
532 if (retcode != 0) {
533 DRM_ERROR("Cannot allocate memory for context bitmap.\n");
534 goto error;
535 }
536
537 dev->drw_unrhdr = new_unrhdr(1, INT_MAX, NULL);
538 if (dev->drw_unrhdr == NULL) {
539 DRM_ERROR("Couldn't allocate drawable number allocator\n");
540 retcode = ENOMEM;
541 goto error;
542 }
543
544 if (dev->driver->driver_features & DRIVER_GEM) {
545 retcode = drm_gem_init(dev);
546 if (retcode != 0) {
547 DRM_ERROR("Cannot initialize graphics execution "
548 "manager (GEM)\n");
549 goto error1;
550 }
551 }
552
553 if (dev->driver->load != NULL) {
554 DRM_LOCK(dev);
555 /* Shared code returns -errno. */
556 retcode = -dev->driver->load(dev,
557 dev->id_entry->driver_private);
558 if (pci_enable_busmaster(dev->device))
559 DRM_ERROR("Request to enable bus-master failed.\n");
560 DRM_UNLOCK(dev);
561 if (retcode != 0)
562 goto error;
563 }
564
565 DRM_INFO("Initialized %s %d.%d.%d %s\n",
566 dev->driver->name,
567 dev->driver->major,
568 dev->driver->minor,
569 dev->driver->patchlevel,
570 dev->driver->date);
571
572 return 0;
573
574error1:
575 delete_unrhdr(dev->drw_unrhdr);
576error:
577 drm_sysctl_cleanup(dev);
578 DRM_LOCK(dev);
579 drm_lastclose(dev);
580 DRM_UNLOCK(dev);
581 if (dev->devnode != NULL)
582 destroy_dev(dev->devnode);
583
584 mtx_destroy(&dev->drw_lock);
585 mtx_destroy(&dev->vbl_lock);
586 mtx_destroy(&dev->irq_lock);
587 mtx_destroy(&dev->dev_lock);
588 mtx_destroy(&dev->event_lock);
589 sx_destroy(&dev->dev_struct_lock);
590
591 return retcode;
592}
593
594static void drm_unload(struct drm_device *dev)
595{
596 int i;
597
598 DRM_DEBUG("\n");
599
600 drm_sysctl_cleanup(dev);
601 if (dev->devnode != NULL)
602 destroy_dev(dev->devnode);
603
604 drm_ctxbitmap_cleanup(dev);
605
606 if (dev->driver->driver_features & DRIVER_GEM)
607 drm_gem_destroy(dev);
608
609 if (dev->agp && dev->agp->mtrr) {
610 int __unused retcode;
611
612 retcode = drm_mtrr_del(0, dev->agp->info.ai_aperture_base,
613 dev->agp->info.ai_aperture_size, DRM_MTRR_WC);
614 DRM_DEBUG("mtrr_del = %d", retcode);
615 }
616
617 drm_vblank_cleanup(dev);
618
619 DRM_LOCK(dev);
620 drm_lastclose(dev);
621 DRM_UNLOCK(dev);
622
623 /* Clean up PCI resources allocated by drm_bufs.c. We're not really
624 * worried about resource consumption while the DRM is inactive (between
625 * lastclose and firstopen or unload) because these aren't actually
626 * taking up KVA, just keeping the PCI resource allocated.
627 */
628 for (i = 0; i < DRM_MAX_PCI_RESOURCE; i++) {
629 if (dev->pcir[i] == NULL)
630 continue;
631 bus_release_resource(dev->device, SYS_RES_MEMORY,
632 dev->pcirid[i], dev->pcir[i]);
633 dev->pcir[i] = NULL;
634 }
635
636 if (dev->agp) {
637 free(dev->agp, DRM_MEM_AGPLISTS);
638 dev->agp = NULL;
639 }
640
641 if (dev->driver->unload != NULL) {
642 DRM_LOCK(dev);
643 dev->driver->unload(dev);
644 DRM_UNLOCK(dev);
645 }
646
647 delete_unrhdr(dev->drw_unrhdr);
648 delete_unrhdr(dev->map_unrhdr);
649
650 drm_mem_uninit();
651
652 if (pci_disable_busmaster(dev->device))
653 DRM_ERROR("Request to disable bus-master failed.\n");
654
655 mtx_destroy(&dev->drw_lock);
656 mtx_destroy(&dev->vbl_lock);
657 mtx_destroy(&dev->irq_lock);
658 mtx_destroy(&dev->dev_lock);
659 mtx_destroy(&dev->event_lock);
660 sx_destroy(&dev->dev_struct_lock);
661}
662
663int drm_version(struct drm_device *dev, void *data, struct drm_file *file_priv)
664{
665 struct drm_version *version = data;
666 int len;
667
668#define DRM_COPY( name, value ) \
669 len = strlen( value ); \
670 if ( len > name##_len ) len = name##_len; \
671 name##_len = strlen( value ); \
672 if ( len && name ) { \
673 if ( DRM_COPY_TO_USER( name, value, len ) ) \
674 return EFAULT; \
675 }
676
677 version->version_major = dev->driver->major;
678 version->version_minor = dev->driver->minor;
679 version->version_patchlevel = dev->driver->patchlevel;
680
681 DRM_COPY(version->name, dev->driver->name);
682 DRM_COPY(version->date, dev->driver->date);
683 DRM_COPY(version->desc, dev->driver->desc);
684
685 return 0;
686}
687
688int
689drm_open(struct cdev *kdev, int flags, int fmt, DRM_STRUCTPROC *p)
690{
691 struct drm_device *dev;
692 int retcode;
693
694 dev = kdev->si_drv1;
695 if (dev == NULL)
696 return (ENXIO);
697
698 DRM_DEBUG("open_count = %d\n", dev->open_count);
699
700 retcode = drm_open_helper(kdev, flags, fmt, p, dev);
701
702 if (retcode == 0) {
703 atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
704 DRM_LOCK(dev);
705 mtx_lock(&Giant);
706 device_busy(dev->device);
707 mtx_unlock(&Giant);
708 if (!dev->open_count++)
709 retcode = drm_firstopen(dev);
710 DRM_UNLOCK(dev);
711 }
712
713 return (retcode);
714}
715
716void drm_close(void *data)
717{
718 struct drm_file *file_priv = data;
719 struct drm_device *dev = file_priv->dev;
720 int retcode = 0;
721
722 DRM_DEBUG("open_count = %d\n", dev->open_count);
723
724 DRM_LOCK(dev);
725
726 if (dev->driver->preclose != NULL)
727 dev->driver->preclose(dev, file_priv);
728
729 /* ========================================================
730 * Begin inline drm_release
731 */
732
733 DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
734 DRM_CURRENTPID, (long)dev->device, dev->open_count);
735
736 if (dev->driver->driver_features & DRIVER_GEM)
737 drm_gem_release(dev, file_priv);
738
739 if (dev->lock.hw_lock && _DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)
740 && dev->lock.file_priv == file_priv) {
741 DRM_DEBUG("Process %d dead, freeing lock for context %d\n",
742 DRM_CURRENTPID,
743 _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
744 if (dev->driver->reclaim_buffers_locked != NULL)
745 dev->driver->reclaim_buffers_locked(dev, file_priv);
746
747 drm_lock_free(&dev->lock,
748 _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
749
750 /* FIXME: may require heavy-handed reset of
751 hardware at this point, possibly
752 processed via a callback to the X
753 server. */
754 } else if (dev->driver->reclaim_buffers_locked != NULL &&
755 dev->lock.hw_lock != NULL) {
756 /* The lock is required to reclaim buffers */
757 for (;;) {
758 if (!dev->lock.hw_lock) {
759 /* Device has been unregistered */
760 retcode = EINTR;
761 break;
762 }
763 if (drm_lock_take(&dev->lock, DRM_KERNEL_CONTEXT)) {
764 dev->lock.file_priv = file_priv;
765 dev->lock.lock_time = jiffies;
766 atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
767 break; /* Got lock */
768 }
769 /* Contention */
770 retcode = DRM_LOCK_SLEEP(dev, &dev->lock.lock_queue,
771 PCATCH, "drmlk2", 0);
772 if (retcode)
773 break;
774 }
775 if (retcode == 0) {
776 dev->driver->reclaim_buffers_locked(dev, file_priv);
777 drm_lock_free(&dev->lock, DRM_KERNEL_CONTEXT);
778 }
779 }
780
781 if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) &&
782 !dev->driver->reclaim_buffers_locked)
783 drm_reclaim_buffers(dev, file_priv);
784
785 funsetown(&dev->buf_sigio);
786 seldrain(&file_priv->event_poll);
787
788 if (dev->driver->postclose != NULL)
789 dev->driver->postclose(dev, file_priv);
790 TAILQ_REMOVE(&dev->files, file_priv, link);
791 free(file_priv, DRM_MEM_FILES);
792
793 /* ========================================================
794 * End inline drm_release
795 */
796
797 atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
798 mtx_lock(&Giant);
799 device_unbusy(dev->device);
800 mtx_unlock(&Giant);
801 if (--dev->open_count == 0) {
802 retcode = drm_lastclose(dev);
803 }
804
805 DRM_UNLOCK(dev);
806}
807
808extern drm_ioctl_desc_t drm_compat_ioctls[];
809
810/* drm_ioctl is called whenever a process performs an ioctl on /dev/drm.
811 */
812int drm_ioctl(struct cdev *kdev, u_long cmd, caddr_t data, int flags,
813 DRM_STRUCTPROC *p)
814{
815 struct drm_device *dev = drm_get_device_from_kdev(kdev);
816 int retcode = 0;
817 drm_ioctl_desc_t *ioctl;
818 int (*func)(struct drm_device *dev, void *data, struct drm_file *file_priv);
819 int nr = DRM_IOCTL_NR(cmd);
820 int is_driver_ioctl = 0;
821 struct drm_file *file_priv;
822
823 retcode = devfs_get_cdevpriv((void **)&file_priv);
824 if (retcode != 0) {
825 DRM_ERROR("can't find authenticator\n");
826 return EINVAL;
827 }
828
829 atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
830 ++file_priv->ioctl_count;
831
832 DRM_DEBUG("pid=%d, cmd=0x%02lx, nr=0x%02x, dev 0x%lx, auth=%d\n",
833 DRM_CURRENTPID, cmd, nr, (long)dev->device,
834 file_priv->authenticated);
835
836 switch (cmd) {
837 case FIONBIO:
838 case FIOASYNC:
839 return 0;
840
841 case FIOSETOWN:
842 return fsetown(*(int *)data, &dev->buf_sigio);
843
844 case FIOGETOWN:
845 *(int *) data = fgetown(&dev->buf_sigio);
846 return 0;
847 }
848
849 if (IOCGROUP(cmd) != DRM_IOCTL_BASE) {
850 DRM_DEBUG("Bad ioctl group 0x%x\n", (int)IOCGROUP(cmd));
851 return EINVAL;
852 }
853
854#ifdef COMPAT_FREEBSD32
855 /*
856 * Called whenever a 32-bit process running under a 64-bit
857 * kernel performs an ioctl on /dev/drm.
858 */
859 if (SV_CURPROC_FLAG(SV_ILP32) && drm_compat_ioctls[nr].func != NULL)
860 /*
861 * Assume that ioctls without an explicit compat
862 * routine will just work. This may not always be a
863 * good assumption, but it's better than always
864 * failing.
865 */
866 ioctl = &drm_compat_ioctls[nr];
867 else
868#endif
869 ioctl = &drm_ioctls[nr];
870 /* It's not a core DRM ioctl, try driver-specific. */
871 if (ioctl->func == NULL && nr >= DRM_COMMAND_BASE) {
872 /* The array entries begin at DRM_COMMAND_BASE ioctl nr */
873 nr -= DRM_COMMAND_BASE;
874 if (nr > dev->driver->max_ioctl) {
875 DRM_DEBUG("Bad driver ioctl number, 0x%x (of 0x%x)\n",
876 nr, dev->driver->max_ioctl);
877 return EINVAL;
878 }
879#ifdef COMPAT_FREEBSD32
880 if (SV_CURPROC_FLAG(SV_ILP32) &&
881 nr < *dev->driver->compat_ioctls_nr &&
882 dev->driver->compat_ioctls[nr].func != NULL)
883 ioctl = &dev->driver->compat_ioctls[nr];
884 else
885#endif
886 ioctl = &dev->driver->ioctls[nr];
887 is_driver_ioctl = 1;
888 }
889 func = ioctl->func;
890
891 if (func == NULL) {
892 DRM_DEBUG("no function\n");
893 return EINVAL;
894 }
895
896 if (((ioctl->flags & DRM_ROOT_ONLY) && !DRM_SUSER(p)) ||
897 ((ioctl->flags & DRM_AUTH) && !file_priv->authenticated) ||
898 ((ioctl->flags & DRM_MASTER) && !file_priv->master))
899 return EACCES;
900
901 if (is_driver_ioctl) {
902 if ((ioctl->flags & DRM_UNLOCKED) == 0)
903 DRM_LOCK(dev);
904 /* shared code returns -errno */
905 retcode = -func(dev, data, file_priv);
906 if ((ioctl->flags & DRM_UNLOCKED) == 0)
907 DRM_UNLOCK(dev);
908 } else {
909 retcode = func(dev, data, file_priv);
910 }
911
912 if (retcode != 0)
913 DRM_DEBUG(" returning %d\n", retcode);
914 if (retcode != 0 &&
915 (drm_debug_flag & DRM_DEBUGBITS_FAILED_IOCTL) != 0) {
916 printf(
917"pid %d, cmd 0x%02lx, nr 0x%02x/%1d, dev 0x%lx, auth %d, res %d\n",
918 DRM_CURRENTPID, cmd, nr, is_driver_ioctl, (long)dev->device,
919 file_priv->authenticated, retcode);
920 }
921
922 return retcode;
923}
924
925drm_local_map_t *drm_getsarea(struct drm_device *dev)
926{
927 drm_local_map_t *map;
928
929 DRM_LOCK_ASSERT(dev);
930 TAILQ_FOREACH(map, &dev->maplist, link) {
931 if (map->type == _DRM_SHM && (map->flags & _DRM_CONTAINS_LOCK))
932 return map;
933 }
934
935 return NULL;
936}
937
938int
939drm_add_busid_modesetting(struct drm_device *dev, struct sysctl_ctx_list *ctx,
940 struct sysctl_oid *top)
941{
942 struct sysctl_oid *oid;
943
944 snprintf(dev->busid_str, sizeof(dev->busid_str),
945 "pci:%04x:%02x:%02x.%d", dev->pci_domain, dev->pci_bus,
946 dev->pci_slot, dev->pci_func);
947 oid = SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(top), OID_AUTO, "busid",
948 CTLFLAG_RD, dev->busid_str, 0, NULL);
949 if (oid == NULL)
950 return (ENOMEM);
951 dev->modesetting = (dev->driver->driver_features & DRIVER_MODESET) != 0;
952 oid = SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(top), OID_AUTO,
953 "modesetting", CTLFLAG_RD, &dev->modesetting, 0, NULL);
954 if (oid == NULL)
955 return (ENOMEM);
956
957 return (0);
958}
959
960static int
961drm_mmap_single(struct cdev *kdev, vm_ooffset_t *offset, vm_size_t size,
962 struct vm_object **obj_res, int nprot)
963{
964 struct drm_device *dev;
965
966 dev = drm_get_device_from_kdev(kdev);
967 if ((dev->driver->driver_features & DRIVER_GEM) != 0) {
968 return (drm_gem_mmap_single(dev, offset, size, obj_res, nprot));
969 } else if (dev->drm_ttm_bo != NULL) {
970 return (ttm_bo_mmap_single(dev->drm_ttm_bo, offset, size,
971 obj_res, nprot));
972 } else {
973 return (ENODEV);
974 }
975}
976
958#if DRM_LINUX
959
960#include <sys/sysproto.h>
961
962MODULE_DEPEND(DRIVER_NAME, linux, 1, 1, 1);
963
964#define LINUX_IOCTL_DRM_MIN 0x6400
965#define LINUX_IOCTL_DRM_MAX 0x64ff
966
967static linux_ioctl_function_t drm_linux_ioctl;
968static struct linux_ioctl_handler drm_handler = {drm_linux_ioctl,
969 LINUX_IOCTL_DRM_MIN, LINUX_IOCTL_DRM_MAX};
970
971SYSINIT(drm_register, SI_SUB_KLD, SI_ORDER_MIDDLE,
972 linux_ioctl_register_handler, &drm_handler);
973SYSUNINIT(drm_unregister, SI_SUB_KLD, SI_ORDER_MIDDLE,
974 linux_ioctl_unregister_handler, &drm_handler);
975
976/* The bits for in/out are switched on Linux */
977#define LINUX_IOC_IN IOC_OUT
978#define LINUX_IOC_OUT IOC_IN
979
980static int
981drm_linux_ioctl(DRM_STRUCTPROC *p, struct linux_ioctl_args* args)
982{
983 int error;
984 int cmd = args->cmd;
985
986 args->cmd &= ~(LINUX_IOC_IN | LINUX_IOC_OUT);
987 if (cmd & LINUX_IOC_IN)
988 args->cmd |= IOC_IN;
989 if (cmd & LINUX_IOC_OUT)
990 args->cmd |= IOC_OUT;
991
992 error = ioctl(p, (struct ioctl_args *)args);
993
994 return error;
995}
996#endif /* DRM_LINUX */
997
998bool
999dmi_check_system(const struct dmi_system_id *sysid)
1000{
1001
1002 /* XXXKIB */
1003 return (false);
1004}
1005
977#if DRM_LINUX
978
979#include <sys/sysproto.h>
980
981MODULE_DEPEND(DRIVER_NAME, linux, 1, 1, 1);
982
983#define LINUX_IOCTL_DRM_MIN 0x6400
984#define LINUX_IOCTL_DRM_MAX 0x64ff
985
986static linux_ioctl_function_t drm_linux_ioctl;
987static struct linux_ioctl_handler drm_handler = {drm_linux_ioctl,
988 LINUX_IOCTL_DRM_MIN, LINUX_IOCTL_DRM_MAX};
989
990SYSINIT(drm_register, SI_SUB_KLD, SI_ORDER_MIDDLE,
991 linux_ioctl_register_handler, &drm_handler);
992SYSUNINIT(drm_unregister, SI_SUB_KLD, SI_ORDER_MIDDLE,
993 linux_ioctl_unregister_handler, &drm_handler);
994
995/* The bits for in/out are switched on Linux */
996#define LINUX_IOC_IN IOC_OUT
997#define LINUX_IOC_OUT IOC_IN
998
999static int
1000drm_linux_ioctl(DRM_STRUCTPROC *p, struct linux_ioctl_args* args)
1001{
1002 int error;
1003 int cmd = args->cmd;
1004
1005 args->cmd &= ~(LINUX_IOC_IN | LINUX_IOC_OUT);
1006 if (cmd & LINUX_IOC_IN)
1007 args->cmd |= IOC_IN;
1008 if (cmd & LINUX_IOC_OUT)
1009 args->cmd |= IOC_OUT;
1010
1011 error = ioctl(p, (struct ioctl_args *)args);
1012
1013 return error;
1014}
1015#endif /* DRM_LINUX */
1016
1017bool
1018dmi_check_system(const struct dmi_system_id *sysid)
1019{
1020
1021 /* XXXKIB */
1022 return (false);
1023}
1024