Deleted Added
full compact
drm_drv.c (189099) drm_drv.c (189128)
1/*-
2 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
3 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
21 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
23 * OTHER DEALINGS IN THE SOFTWARE.
24 *
25 * Authors:
26 * Rickard E. (Rik) Faith <faith@valinux.com>
27 * Gareth Hughes <gareth@valinux.com>
28 *
29 */
30
31#include <sys/cdefs.h>
1/*-
2 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
3 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
21 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
23 * OTHER DEALINGS IN THE SOFTWARE.
24 *
25 * Authors:
26 * Rickard E. (Rik) Faith <faith@valinux.com>
27 * Gareth Hughes <gareth@valinux.com>
28 *
29 */
30
31#include <sys/cdefs.h>
32__FBSDID("$FreeBSD: head/sys/dev/drm/drm_drv.c 189099 2009-02-27 06:01:42Z rnoland $");
32__FBSDID("$FreeBSD: head/sys/dev/drm/drm_drv.c 189128 2009-02-27 23:50:55Z rnoland $");
33
34/** @file drm_drv.c
35 * The catch-all file for DRM device support, including module setup/teardown,
36 * open/close, and ioctl dispatch.
37 */
38
39
40#include <sys/limits.h>
41#include "dev/drm/drmP.h"
42#include "dev/drm/drm.h"
43#include "dev/drm/drm_sarea.h"
44
45#ifdef DRM_DEBUG_DEFAULT_ON
46int drm_debug_flag = 1;
47#else
48int drm_debug_flag = 0;
49#endif
50
51static int drm_load(struct drm_device *dev);
52static void drm_unload(struct drm_device *dev);
53static drm_pci_id_list_t *drm_find_description(int vendor, int device,
54 drm_pci_id_list_t *idlist);
55
56#define DRIVER_SOFTC(unit) \
57 ((struct drm_device *)devclass_get_softc(drm_devclass, unit))
58
59MODULE_VERSION(drm, 1);
60MODULE_DEPEND(drm, agp, 1, 1, 1);
61MODULE_DEPEND(drm, pci, 1, 1, 1);
62MODULE_DEPEND(drm, mem, 1, 1, 1);
63
64static drm_ioctl_desc_t drm_ioctls[256] = {
65 DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, 0),
66 DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0),
67 DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0),
68 DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY),
69 DRM_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_getmap, 0),
70 DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, 0),
71 DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, 0),
72 DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_MASTER|DRM_ROOT_ONLY),
73
74 DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
75 DRM_IOCTL_DEF(DRM_IOCTL_BLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
76 DRM_IOCTL_DEF(DRM_IOCTL_UNBLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
77 DRM_IOCTL_DEF(DRM_IOCTL_AUTH_MAGIC, drm_authmagic, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
78
79 DRM_IOCTL_DEF(DRM_IOCTL_ADD_MAP, drm_addmap_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
80 DRM_IOCTL_DEF(DRM_IOCTL_RM_MAP, drm_rmmap_ioctl, DRM_AUTH),
81
82 DRM_IOCTL_DEF(DRM_IOCTL_SET_SAREA_CTX, drm_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
83 DRM_IOCTL_DEF(DRM_IOCTL_GET_SAREA_CTX, drm_getsareactx, DRM_AUTH),
84
85 DRM_IOCTL_DEF(DRM_IOCTL_ADD_CTX, drm_addctx, DRM_AUTH|DRM_ROOT_ONLY),
86 DRM_IOCTL_DEF(DRM_IOCTL_RM_CTX, drm_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
87 DRM_IOCTL_DEF(DRM_IOCTL_MOD_CTX, drm_modctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
88 DRM_IOCTL_DEF(DRM_IOCTL_GET_CTX, drm_getctx, DRM_AUTH),
89 DRM_IOCTL_DEF(DRM_IOCTL_SWITCH_CTX, drm_switchctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
90 DRM_IOCTL_DEF(DRM_IOCTL_NEW_CTX, drm_newctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
91 DRM_IOCTL_DEF(DRM_IOCTL_RES_CTX, drm_resctx, DRM_AUTH),
92
93 DRM_IOCTL_DEF(DRM_IOCTL_ADD_DRAW, drm_adddraw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
94 DRM_IOCTL_DEF(DRM_IOCTL_RM_DRAW, drm_rmdraw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
95
96 DRM_IOCTL_DEF(DRM_IOCTL_LOCK, drm_lock, DRM_AUTH),
97 DRM_IOCTL_DEF(DRM_IOCTL_UNLOCK, drm_unlock, DRM_AUTH),
98
99 DRM_IOCTL_DEF(DRM_IOCTL_FINISH, drm_noop, DRM_AUTH),
100
101 DRM_IOCTL_DEF(DRM_IOCTL_ADD_BUFS, drm_addbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
102 DRM_IOCTL_DEF(DRM_IOCTL_MARK_BUFS, drm_markbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
103 DRM_IOCTL_DEF(DRM_IOCTL_INFO_BUFS, drm_infobufs, DRM_AUTH),
104 DRM_IOCTL_DEF(DRM_IOCTL_MAP_BUFS, drm_mapbufs, DRM_AUTH),
105 DRM_IOCTL_DEF(DRM_IOCTL_FREE_BUFS, drm_freebufs, DRM_AUTH),
106 DRM_IOCTL_DEF(DRM_IOCTL_DMA, drm_dma, DRM_AUTH),
107
108 DRM_IOCTL_DEF(DRM_IOCTL_CONTROL, drm_control, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
109
110 DRM_IOCTL_DEF(DRM_IOCTL_AGP_ACQUIRE, drm_agp_acquire_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
111 DRM_IOCTL_DEF(DRM_IOCTL_AGP_RELEASE, drm_agp_release_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
112 DRM_IOCTL_DEF(DRM_IOCTL_AGP_ENABLE, drm_agp_enable_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
113 DRM_IOCTL_DEF(DRM_IOCTL_AGP_INFO, drm_agp_info_ioctl, DRM_AUTH),
114 DRM_IOCTL_DEF(DRM_IOCTL_AGP_ALLOC, drm_agp_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
115 DRM_IOCTL_DEF(DRM_IOCTL_AGP_FREE, drm_agp_free_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
116 DRM_IOCTL_DEF(DRM_IOCTL_AGP_BIND, drm_agp_bind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
117 DRM_IOCTL_DEF(DRM_IOCTL_AGP_UNBIND, drm_agp_unbind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
118
119 DRM_IOCTL_DEF(DRM_IOCTL_SG_ALLOC, drm_sg_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
120 DRM_IOCTL_DEF(DRM_IOCTL_SG_FREE, drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
121 DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank, 0),
122 DRM_IOCTL_DEF(DRM_IOCTL_MODESET_CTL, drm_modeset_ctl, 0),
123 DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_update_draw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
124};
125
126static struct cdevsw drm_cdevsw = {
127 .d_version = D_VERSION,
128 .d_open = drm_open,
129 .d_read = drm_read,
130 .d_ioctl = drm_ioctl,
131 .d_poll = drm_poll,
132 .d_mmap = drm_mmap,
133 .d_name = "drm",
134 .d_flags = D_TRACKCLOSE
135};
136
33
34/** @file drm_drv.c
35 * The catch-all file for DRM device support, including module setup/teardown,
36 * open/close, and ioctl dispatch.
37 */
38
39
40#include <sys/limits.h>
41#include "dev/drm/drmP.h"
42#include "dev/drm/drm.h"
43#include "dev/drm/drm_sarea.h"
44
45#ifdef DRM_DEBUG_DEFAULT_ON
46int drm_debug_flag = 1;
47#else
48int drm_debug_flag = 0;
49#endif
50
51static int drm_load(struct drm_device *dev);
52static void drm_unload(struct drm_device *dev);
53static drm_pci_id_list_t *drm_find_description(int vendor, int device,
54 drm_pci_id_list_t *idlist);
55
56#define DRIVER_SOFTC(unit) \
57 ((struct drm_device *)devclass_get_softc(drm_devclass, unit))
58
59MODULE_VERSION(drm, 1);
60MODULE_DEPEND(drm, agp, 1, 1, 1);
61MODULE_DEPEND(drm, pci, 1, 1, 1);
62MODULE_DEPEND(drm, mem, 1, 1, 1);
63
64static drm_ioctl_desc_t drm_ioctls[256] = {
65 DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, 0),
66 DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0),
67 DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0),
68 DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY),
69 DRM_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_getmap, 0),
70 DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, 0),
71 DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, 0),
72 DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_MASTER|DRM_ROOT_ONLY),
73
74 DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
75 DRM_IOCTL_DEF(DRM_IOCTL_BLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
76 DRM_IOCTL_DEF(DRM_IOCTL_UNBLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
77 DRM_IOCTL_DEF(DRM_IOCTL_AUTH_MAGIC, drm_authmagic, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
78
79 DRM_IOCTL_DEF(DRM_IOCTL_ADD_MAP, drm_addmap_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
80 DRM_IOCTL_DEF(DRM_IOCTL_RM_MAP, drm_rmmap_ioctl, DRM_AUTH),
81
82 DRM_IOCTL_DEF(DRM_IOCTL_SET_SAREA_CTX, drm_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
83 DRM_IOCTL_DEF(DRM_IOCTL_GET_SAREA_CTX, drm_getsareactx, DRM_AUTH),
84
85 DRM_IOCTL_DEF(DRM_IOCTL_ADD_CTX, drm_addctx, DRM_AUTH|DRM_ROOT_ONLY),
86 DRM_IOCTL_DEF(DRM_IOCTL_RM_CTX, drm_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
87 DRM_IOCTL_DEF(DRM_IOCTL_MOD_CTX, drm_modctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
88 DRM_IOCTL_DEF(DRM_IOCTL_GET_CTX, drm_getctx, DRM_AUTH),
89 DRM_IOCTL_DEF(DRM_IOCTL_SWITCH_CTX, drm_switchctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
90 DRM_IOCTL_DEF(DRM_IOCTL_NEW_CTX, drm_newctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
91 DRM_IOCTL_DEF(DRM_IOCTL_RES_CTX, drm_resctx, DRM_AUTH),
92
93 DRM_IOCTL_DEF(DRM_IOCTL_ADD_DRAW, drm_adddraw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
94 DRM_IOCTL_DEF(DRM_IOCTL_RM_DRAW, drm_rmdraw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
95
96 DRM_IOCTL_DEF(DRM_IOCTL_LOCK, drm_lock, DRM_AUTH),
97 DRM_IOCTL_DEF(DRM_IOCTL_UNLOCK, drm_unlock, DRM_AUTH),
98
99 DRM_IOCTL_DEF(DRM_IOCTL_FINISH, drm_noop, DRM_AUTH),
100
101 DRM_IOCTL_DEF(DRM_IOCTL_ADD_BUFS, drm_addbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
102 DRM_IOCTL_DEF(DRM_IOCTL_MARK_BUFS, drm_markbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
103 DRM_IOCTL_DEF(DRM_IOCTL_INFO_BUFS, drm_infobufs, DRM_AUTH),
104 DRM_IOCTL_DEF(DRM_IOCTL_MAP_BUFS, drm_mapbufs, DRM_AUTH),
105 DRM_IOCTL_DEF(DRM_IOCTL_FREE_BUFS, drm_freebufs, DRM_AUTH),
106 DRM_IOCTL_DEF(DRM_IOCTL_DMA, drm_dma, DRM_AUTH),
107
108 DRM_IOCTL_DEF(DRM_IOCTL_CONTROL, drm_control, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
109
110 DRM_IOCTL_DEF(DRM_IOCTL_AGP_ACQUIRE, drm_agp_acquire_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
111 DRM_IOCTL_DEF(DRM_IOCTL_AGP_RELEASE, drm_agp_release_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
112 DRM_IOCTL_DEF(DRM_IOCTL_AGP_ENABLE, drm_agp_enable_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
113 DRM_IOCTL_DEF(DRM_IOCTL_AGP_INFO, drm_agp_info_ioctl, DRM_AUTH),
114 DRM_IOCTL_DEF(DRM_IOCTL_AGP_ALLOC, drm_agp_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
115 DRM_IOCTL_DEF(DRM_IOCTL_AGP_FREE, drm_agp_free_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
116 DRM_IOCTL_DEF(DRM_IOCTL_AGP_BIND, drm_agp_bind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
117 DRM_IOCTL_DEF(DRM_IOCTL_AGP_UNBIND, drm_agp_unbind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
118
119 DRM_IOCTL_DEF(DRM_IOCTL_SG_ALLOC, drm_sg_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
120 DRM_IOCTL_DEF(DRM_IOCTL_SG_FREE, drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
121 DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank, 0),
122 DRM_IOCTL_DEF(DRM_IOCTL_MODESET_CTL, drm_modeset_ctl, 0),
123 DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_update_draw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
124};
125
126static struct cdevsw drm_cdevsw = {
127 .d_version = D_VERSION,
128 .d_open = drm_open,
129 .d_read = drm_read,
130 .d_ioctl = drm_ioctl,
131 .d_poll = drm_poll,
132 .d_mmap = drm_mmap,
133 .d_name = "drm",
134 .d_flags = D_TRACKCLOSE
135};
136
137int drm_msi = 1; /* Enable by default. */
138TUNABLE_INT("hw.drm.msi", &drm_msi);
139
137static struct drm_msi_blacklist_entry drm_msi_blacklist[] = {
138 {0x8086, 0x2772}, /* Intel i945G */ \
139 {0x8086, 0x27A2}, /* Intel i945GM */ \
140 {0x8086, 0x27AE}, /* Intel i945GME */ \
141 {0, 0}
142};
143
144static int drm_msi_is_blacklisted(int vendor, int device)
145{
146 int i = 0;
147
148 for (i = 0; drm_msi_blacklist[i].vendor != 0; i++) {
149 if ((drm_msi_blacklist[i].vendor == vendor) &&
150 (drm_msi_blacklist[i].device == device)) {
151 return 1;
152 }
153 }
154
155 return 0;
156}
157
158int drm_probe(device_t dev, drm_pci_id_list_t *idlist)
159{
160 drm_pci_id_list_t *id_entry;
161 int vendor, device;
162#if __FreeBSD_version < 700010
163 device_t realdev;
164
165 if (!strcmp(device_get_name(dev), "drmsub"))
166 realdev = device_get_parent(dev);
167 else
168 realdev = dev;
169 vendor = pci_get_vendor(realdev);
170 device = pci_get_device(realdev);
171#else
172 vendor = pci_get_vendor(dev);
173 device = pci_get_device(dev);
174#endif
175
176 if (pci_get_class(dev) != PCIC_DISPLAY
177 || pci_get_subclass(dev) != PCIS_DISPLAY_VGA)
178 return ENXIO;
179
180 id_entry = drm_find_description(vendor, device, idlist);
181 if (id_entry != NULL) {
182 device_set_desc(dev, id_entry->name);
183 return 0;
184 }
185
186 return ENXIO;
187}
188
189int drm_attach(device_t nbdev, drm_pci_id_list_t *idlist)
190{
191 struct drm_device *dev;
192 drm_pci_id_list_t *id_entry;
193 int unit, msicount;
194
195 unit = device_get_unit(nbdev);
196 dev = device_get_softc(nbdev);
197
198#if __FreeBSD_version < 700010
199 if (!strcmp(device_get_name(nbdev), "drmsub"))
200 dev->device = device_get_parent(nbdev);
201 else
202 dev->device = nbdev;
203#else
204 dev->device = nbdev;
205#endif
206 dev->devnode = make_dev(&drm_cdevsw,
207 unit,
208 DRM_DEV_UID,
209 DRM_DEV_GID,
210 DRM_DEV_MODE,
211 "dri/card%d", unit);
212
213#if __FreeBSD_version >= 700053
214 dev->pci_domain = pci_get_domain(dev->device);
215#else
216 dev->pci_domain = 0;
217#endif
218 dev->pci_bus = pci_get_bus(dev->device);
219 dev->pci_slot = pci_get_slot(dev->device);
220 dev->pci_func = pci_get_function(dev->device);
221
222 dev->pci_vendor = pci_get_vendor(dev->device);
223 dev->pci_device = pci_get_device(dev->device);
224
140static struct drm_msi_blacklist_entry drm_msi_blacklist[] = {
141 {0x8086, 0x2772}, /* Intel i945G */ \
142 {0x8086, 0x27A2}, /* Intel i945GM */ \
143 {0x8086, 0x27AE}, /* Intel i945GME */ \
144 {0, 0}
145};
146
147static int drm_msi_is_blacklisted(int vendor, int device)
148{
149 int i = 0;
150
151 for (i = 0; drm_msi_blacklist[i].vendor != 0; i++) {
152 if ((drm_msi_blacklist[i].vendor == vendor) &&
153 (drm_msi_blacklist[i].device == device)) {
154 return 1;
155 }
156 }
157
158 return 0;
159}
160
161int drm_probe(device_t dev, drm_pci_id_list_t *idlist)
162{
163 drm_pci_id_list_t *id_entry;
164 int vendor, device;
165#if __FreeBSD_version < 700010
166 device_t realdev;
167
168 if (!strcmp(device_get_name(dev), "drmsub"))
169 realdev = device_get_parent(dev);
170 else
171 realdev = dev;
172 vendor = pci_get_vendor(realdev);
173 device = pci_get_device(realdev);
174#else
175 vendor = pci_get_vendor(dev);
176 device = pci_get_device(dev);
177#endif
178
179 if (pci_get_class(dev) != PCIC_DISPLAY
180 || pci_get_subclass(dev) != PCIS_DISPLAY_VGA)
181 return ENXIO;
182
183 id_entry = drm_find_description(vendor, device, idlist);
184 if (id_entry != NULL) {
185 device_set_desc(dev, id_entry->name);
186 return 0;
187 }
188
189 return ENXIO;
190}
191
192int drm_attach(device_t nbdev, drm_pci_id_list_t *idlist)
193{
194 struct drm_device *dev;
195 drm_pci_id_list_t *id_entry;
196 int unit, msicount;
197
198 unit = device_get_unit(nbdev);
199 dev = device_get_softc(nbdev);
200
201#if __FreeBSD_version < 700010
202 if (!strcmp(device_get_name(nbdev), "drmsub"))
203 dev->device = device_get_parent(nbdev);
204 else
205 dev->device = nbdev;
206#else
207 dev->device = nbdev;
208#endif
209 dev->devnode = make_dev(&drm_cdevsw,
210 unit,
211 DRM_DEV_UID,
212 DRM_DEV_GID,
213 DRM_DEV_MODE,
214 "dri/card%d", unit);
215
216#if __FreeBSD_version >= 700053
217 dev->pci_domain = pci_get_domain(dev->device);
218#else
219 dev->pci_domain = 0;
220#endif
221 dev->pci_bus = pci_get_bus(dev->device);
222 dev->pci_slot = pci_get_slot(dev->device);
223 dev->pci_func = pci_get_function(dev->device);
224
225 dev->pci_vendor = pci_get_vendor(dev->device);
226 dev->pci_device = pci_get_device(dev->device);
227
225 if (!drm_msi_is_blacklisted(dev->pci_vendor, dev->pci_device)) {
228 if (drm_msi &&
229 !drm_msi_is_blacklisted(dev->pci_vendor, dev->pci_device)) {
226 msicount = pci_msi_count(dev->device);
227 DRM_DEBUG("MSI count = %d\n", msicount);
228 if (msicount > 1)
229 msicount = 1;
230
231 if (pci_alloc_msi(dev->device, &msicount) == 0) {
232 DRM_INFO("MSI enabled %d message(s)\n", msicount);
233 dev->msi_enabled = 1;
234 dev->irqrid = 1;
235 }
236 }
237
238 dev->irqr = bus_alloc_resource_any(dev->device, SYS_RES_IRQ,
239 &dev->irqrid, RF_SHAREABLE);
240 if (!dev->irqr) {
241 return ENOENT;
242 }
243
244 dev->irq = (int) rman_get_start(dev->irqr);
245
246 mtx_init(&dev->dev_lock, "drmdev", NULL, MTX_DEF);
247 mtx_init(&dev->irq_lock, "drmirq", NULL, MTX_DEF);
248 mtx_init(&dev->vbl_lock, "drmvbl", NULL, MTX_DEF);
249 mtx_init(&dev->drw_lock, "drmdrw", NULL, MTX_DEF);
250
251 id_entry = drm_find_description(dev->pci_vendor,
252 dev->pci_device, idlist);
253 dev->id_entry = id_entry;
254
255 return drm_load(dev);
256}
257
258int drm_detach(device_t nbdev)
259{
260 struct drm_device *dev;
261
262 dev = device_get_softc(nbdev);
263
264 drm_unload(dev);
265
266 bus_release_resource(dev->device, SYS_RES_IRQ, dev->irqrid, dev->irqr);
267
268 if (dev->msi_enabled) {
269 pci_release_msi(dev->device);
270 DRM_INFO("MSI released\n");
271 }
272
273 return 0;
274}
275
276#ifndef DRM_DEV_NAME
277#define DRM_DEV_NAME "drm"
278#endif
279
280devclass_t drm_devclass;
281
282drm_pci_id_list_t *drm_find_description(int vendor, int device,
283 drm_pci_id_list_t *idlist)
284{
285 int i = 0;
286
287 for (i = 0; idlist[i].vendor != 0; i++) {
288 if ((idlist[i].vendor == vendor) &&
289 (idlist[i].device == device)) {
290 return &idlist[i];
291 }
292 }
293 return NULL;
294}
295
296static int drm_firstopen(struct drm_device *dev)
297{
298 drm_local_map_t *map;
299 int i;
300
301 DRM_SPINLOCK_ASSERT(&dev->dev_lock);
302
303 /* prebuild the SAREA */
304 i = drm_addmap(dev, 0, SAREA_MAX, _DRM_SHM,
305 _DRM_CONTAINS_LOCK, &map);
306 if (i != 0)
307 return i;
308
309 if (dev->driver->firstopen)
310 dev->driver->firstopen(dev);
311
312 dev->buf_use = 0;
313
314 if (drm_core_check_feature(dev, DRIVER_HAVE_DMA)) {
315 i = drm_dma_setup(dev);
316 if (i != 0)
317 return i;
318 }
319
320 for (i = 0; i < DRM_HASH_SIZE; i++) {
321 dev->magiclist[i].head = NULL;
322 dev->magiclist[i].tail = NULL;
323 }
324
325 dev->lock.lock_queue = 0;
326 dev->irq_enabled = 0;
327 dev->context_flag = 0;
328 dev->last_context = 0;
329 dev->if_version = 0;
330
331 dev->buf_sigio = NULL;
332
333 DRM_DEBUG("\n");
334
335 return 0;
336}
337
338static int drm_lastclose(struct drm_device *dev)
339{
340 drm_magic_entry_t *pt, *next;
341 drm_local_map_t *map, *mapsave;
342 int i;
343
344 DRM_SPINLOCK_ASSERT(&dev->dev_lock);
345
346 DRM_DEBUG("\n");
347
348 if (dev->driver->lastclose != NULL)
349 dev->driver->lastclose(dev);
350
351 if (dev->irq_enabled)
352 drm_irq_uninstall(dev);
353
354 if (dev->unique) {
355 free(dev->unique, DRM_MEM_DRIVER);
356 dev->unique = NULL;
357 dev->unique_len = 0;
358 }
359 /* Clear pid list */
360 for (i = 0; i < DRM_HASH_SIZE; i++) {
361 for (pt = dev->magiclist[i].head; pt; pt = next) {
362 next = pt->next;
363 free(pt, DRM_MEM_MAGIC);
364 }
365 dev->magiclist[i].head = dev->magiclist[i].tail = NULL;
366 }
367
368 DRM_UNLOCK();
369 drm_drawable_free_all(dev);
370 DRM_LOCK();
371
372 /* Clear AGP information */
373 if (dev->agp) {
374 drm_agp_mem_t *entry;
375 drm_agp_mem_t *nexte;
376
377 /* Remove AGP resources, but leave dev->agp intact until
378 * drm_unload is called.
379 */
380 for (entry = dev->agp->memory; entry; entry = nexte) {
381 nexte = entry->next;
382 if (entry->bound)
383 drm_agp_unbind_memory(entry->handle);
384 drm_agp_free_memory(entry->handle);
385 free(entry, DRM_MEM_AGPLISTS);
386 }
387 dev->agp->memory = NULL;
388
389 if (dev->agp->acquired)
390 drm_agp_release(dev);
391
392 dev->agp->acquired = 0;
393 dev->agp->enabled = 0;
394 }
395 if (dev->sg != NULL) {
396 drm_sg_cleanup(dev->sg);
397 dev->sg = NULL;
398 }
399
400 TAILQ_FOREACH_SAFE(map, &dev->maplist, link, mapsave) {
401 if (!(map->flags & _DRM_DRIVER))
402 drm_rmmap(dev, map);
403 }
404
405 drm_dma_takedown(dev);
406 if (dev->lock.hw_lock) {
407 dev->lock.hw_lock = NULL; /* SHM removed */
408 dev->lock.file_priv = NULL;
409 DRM_WAKEUP_INT((void *)&dev->lock.lock_queue);
410 }
411
412 return 0;
413}
414
415static int drm_load(struct drm_device *dev)
416{
417 int i, retcode;
418
419 DRM_DEBUG("\n");
420
421 TAILQ_INIT(&dev->maplist);
422
423 drm_mem_init();
424 drm_sysctl_init(dev);
425 TAILQ_INIT(&dev->files);
426
427 dev->counters = 6;
428 dev->types[0] = _DRM_STAT_LOCK;
429 dev->types[1] = _DRM_STAT_OPENS;
430 dev->types[2] = _DRM_STAT_CLOSES;
431 dev->types[3] = _DRM_STAT_IOCTLS;
432 dev->types[4] = _DRM_STAT_LOCKS;
433 dev->types[5] = _DRM_STAT_UNLOCKS;
434
435 for (i = 0; i < DRM_ARRAY_SIZE(dev->counts); i++)
436 atomic_set(&dev->counts[i], 0);
437
438 if (dev->driver->load != NULL) {
439 DRM_LOCK();
440 /* Shared code returns -errno. */
441 retcode = -dev->driver->load(dev,
442 dev->id_entry->driver_private);
443 if (pci_enable_busmaster(dev->device))
444 DRM_ERROR("Request to enable bus-master failed.\n");
445 DRM_UNLOCK();
446 if (retcode != 0)
447 goto error;
448 }
449
450 if (drm_core_has_AGP(dev)) {
451 if (drm_device_is_agp(dev))
452 dev->agp = drm_agp_init();
453 if (drm_core_check_feature(dev, DRIVER_REQUIRE_AGP) &&
454 dev->agp == NULL) {
455 DRM_ERROR("Card isn't AGP, or couldn't initialize "
456 "AGP.\n");
457 retcode = ENOMEM;
458 goto error;
459 }
460 if (dev->agp != NULL) {
461 if (drm_mtrr_add(dev->agp->info.ai_aperture_base,
462 dev->agp->info.ai_aperture_size, DRM_MTRR_WC) == 0)
463 dev->agp->mtrr = 1;
464 }
465 }
466
467 retcode = drm_ctxbitmap_init(dev);
468 if (retcode != 0) {
469 DRM_ERROR("Cannot allocate memory for context bitmap.\n");
470 goto error;
471 }
472
473 dev->drw_unrhdr = new_unrhdr(1, INT_MAX, NULL);
474 if (dev->drw_unrhdr == NULL) {
475 DRM_ERROR("Couldn't allocate drawable number allocator\n");
476 goto error;
477 }
478
479 DRM_INFO("Initialized %s %d.%d.%d %s\n",
480 dev->driver->name,
481 dev->driver->major,
482 dev->driver->minor,
483 dev->driver->patchlevel,
484 dev->driver->date);
485
486 return 0;
487
488error:
489 drm_sysctl_cleanup(dev);
490 DRM_LOCK();
491 drm_lastclose(dev);
492 DRM_UNLOCK();
493 destroy_dev(dev->devnode);
494
495 mtx_destroy(&dev->drw_lock);
496 mtx_destroy(&dev->vbl_lock);
497 mtx_destroy(&dev->irq_lock);
498 mtx_destroy(&dev->dev_lock);
499
500 return retcode;
501}
502
503static void drm_unload(struct drm_device *dev)
504{
505 int i;
506
507 DRM_DEBUG("\n");
508
509 drm_sysctl_cleanup(dev);
510 destroy_dev(dev->devnode);
511
512 drm_ctxbitmap_cleanup(dev);
513
514 if (dev->agp && dev->agp->mtrr) {
515 int __unused retcode;
516
517 retcode = drm_mtrr_del(0, dev->agp->info.ai_aperture_base,
518 dev->agp->info.ai_aperture_size, DRM_MTRR_WC);
519 DRM_DEBUG("mtrr_del = %d", retcode);
520 }
521
522 DRM_LOCK();
523 drm_lastclose(dev);
524 DRM_UNLOCK();
525
526 /* Clean up PCI resources allocated by drm_bufs.c. We're not really
527 * worried about resource consumption while the DRM is inactive (between
528 * lastclose and firstopen or unload) because these aren't actually
529 * taking up KVA, just keeping the PCI resource allocated.
530 */
531 for (i = 0; i < DRM_MAX_PCI_RESOURCE; i++) {
532 if (dev->pcir[i] == NULL)
533 continue;
534 bus_release_resource(dev->device, SYS_RES_MEMORY,
535 dev->pcirid[i], dev->pcir[i]);
536 dev->pcir[i] = NULL;
537 }
538
539 if (dev->agp) {
540 free(dev->agp, DRM_MEM_AGPLISTS);
541 dev->agp = NULL;
542 }
543
544 if (dev->driver->unload != NULL) {
545 DRM_LOCK();
546 dev->driver->unload(dev);
547 DRM_UNLOCK();
548 }
549
550 delete_unrhdr(dev->drw_unrhdr);
551
552 drm_mem_uninit();
553
554 if (pci_disable_busmaster(dev->device))
555 DRM_ERROR("Request to disable bus-master failed.\n");
556
557 mtx_destroy(&dev->drw_lock);
558 mtx_destroy(&dev->vbl_lock);
559 mtx_destroy(&dev->irq_lock);
560 mtx_destroy(&dev->dev_lock);
561}
562
563int drm_version(struct drm_device *dev, void *data, struct drm_file *file_priv)
564{
565 struct drm_version *version = data;
566 int len;
567
568#define DRM_COPY( name, value ) \
569 len = strlen( value ); \
570 if ( len > name##_len ) len = name##_len; \
571 name##_len = strlen( value ); \
572 if ( len && name ) { \
573 if ( DRM_COPY_TO_USER( name, value, len ) ) \
574 return EFAULT; \
575 }
576
577 version->version_major = dev->driver->major;
578 version->version_minor = dev->driver->minor;
579 version->version_patchlevel = dev->driver->patchlevel;
580
581 DRM_COPY(version->name, dev->driver->name);
582 DRM_COPY(version->date, dev->driver->date);
583 DRM_COPY(version->desc, dev->driver->desc);
584
585 return 0;
586}
587
588int drm_open(struct cdev *kdev, int flags, int fmt, DRM_STRUCTPROC *p)
589{
590 struct drm_device *dev = NULL;
591 int retcode = 0;
592
593 dev = DRIVER_SOFTC(dev2unit(kdev));
594
595 DRM_DEBUG("open_count = %d\n", dev->open_count);
596
597 retcode = drm_open_helper(kdev, flags, fmt, p, dev);
598
599 if (!retcode) {
600 atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
601 DRM_LOCK();
602 device_busy(dev->device);
603 if (!dev->open_count++)
604 retcode = drm_firstopen(dev);
605 DRM_UNLOCK();
606 }
607
608 return retcode;
609}
610
611void drm_close(void *data)
612{
613 struct drm_file *file_priv = data;
614 struct drm_device *dev = file_priv->dev;
615 int retcode = 0;
616
617 DRM_DEBUG("open_count = %d\n", dev->open_count);
618
619 DRM_LOCK();
620
621 if (dev->driver->preclose != NULL)
622 dev->driver->preclose(dev, file_priv);
623
624 /* ========================================================
625 * Begin inline drm_release
626 */
627
628 DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
629 DRM_CURRENTPID, (long)dev->device, dev->open_count);
630
631 if (dev->lock.hw_lock && _DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)
632 && dev->lock.file_priv == file_priv) {
633 DRM_DEBUG("Process %d dead, freeing lock for context %d\n",
634 DRM_CURRENTPID,
635 _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
636 if (dev->driver->reclaim_buffers_locked != NULL)
637 dev->driver->reclaim_buffers_locked(dev, file_priv);
638
639 drm_lock_free(&dev->lock,
640 _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
641
642 /* FIXME: may require heavy-handed reset of
643 hardware at this point, possibly
644 processed via a callback to the X
645 server. */
646 } else if (dev->driver->reclaim_buffers_locked != NULL &&
647 dev->lock.hw_lock != NULL) {
648 /* The lock is required to reclaim buffers */
649 for (;;) {
650 if (!dev->lock.hw_lock) {
651 /* Device has been unregistered */
652 retcode = EINTR;
653 break;
654 }
655 if (drm_lock_take(&dev->lock, DRM_KERNEL_CONTEXT)) {
656 dev->lock.file_priv = file_priv;
657 dev->lock.lock_time = jiffies;
658 atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
659 break; /* Got lock */
660 }
661 /* Contention */
662 retcode = mtx_sleep((void *)&dev->lock.lock_queue,
663 &dev->dev_lock, PZERO | PCATCH, "drmlk2", 0);
664 if (retcode)
665 break;
666 }
667 if (retcode == 0) {
668 dev->driver->reclaim_buffers_locked(dev, file_priv);
669 drm_lock_free(&dev->lock, DRM_KERNEL_CONTEXT);
670 }
671 }
672
673 if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) &&
674 !dev->driver->reclaim_buffers_locked)
675 drm_reclaim_buffers(dev, file_priv);
676
677 funsetown(&dev->buf_sigio);
678
679 if (dev->driver->postclose != NULL)
680 dev->driver->postclose(dev, file_priv);
681 TAILQ_REMOVE(&dev->files, file_priv, link);
682 free(file_priv, DRM_MEM_FILES);
683
684 /* ========================================================
685 * End inline drm_release
686 */
687
688 atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
689 device_unbusy(dev->device);
690 if (--dev->open_count == 0) {
691 retcode = drm_lastclose(dev);
692 }
693
694 DRM_UNLOCK();
695}
696
697/* drm_ioctl is called whenever a process performs an ioctl on /dev/drm.
698 */
699int drm_ioctl(struct cdev *kdev, u_long cmd, caddr_t data, int flags,
700 DRM_STRUCTPROC *p)
701{
702 struct drm_device *dev = drm_get_device_from_kdev(kdev);
703 int retcode = 0;
704 drm_ioctl_desc_t *ioctl;
705 int (*func)(struct drm_device *dev, void *data, struct drm_file *file_priv);
706 int nr = DRM_IOCTL_NR(cmd);
707 int is_driver_ioctl = 0;
708 struct drm_file *file_priv;
709
710 retcode = devfs_get_cdevpriv((void **)&file_priv);
711 if (retcode != 0) {
712 DRM_ERROR("can't find authenticator\n");
713 return EINVAL;
714 }
715
716 atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
717 ++file_priv->ioctl_count;
718
719 DRM_DEBUG("pid=%d, cmd=0x%02lx, nr=0x%02x, dev 0x%lx, auth=%d\n",
720 DRM_CURRENTPID, cmd, nr, (long)dev->device,
721 file_priv->authenticated);
722
723 switch (cmd) {
724 case FIONBIO:
725 case FIOASYNC:
726 return 0;
727
728 case FIOSETOWN:
729 return fsetown(*(int *)data, &dev->buf_sigio);
730
731 case FIOGETOWN:
732 *(int *) data = fgetown(&dev->buf_sigio);
733 return 0;
734 }
735
736 if (IOCGROUP(cmd) != DRM_IOCTL_BASE) {
737 DRM_DEBUG("Bad ioctl group 0x%x\n", (int)IOCGROUP(cmd));
738 return EINVAL;
739 }
740
741 ioctl = &drm_ioctls[nr];
742 /* It's not a core DRM ioctl, try driver-specific. */
743 if (ioctl->func == NULL && nr >= DRM_COMMAND_BASE) {
744 /* The array entries begin at DRM_COMMAND_BASE ioctl nr */
745 nr -= DRM_COMMAND_BASE;
746 if (nr > dev->driver->max_ioctl) {
747 DRM_DEBUG("Bad driver ioctl number, 0x%x (of 0x%x)\n",
748 nr, dev->driver->max_ioctl);
749 return EINVAL;
750 }
751 ioctl = &dev->driver->ioctls[nr];
752 is_driver_ioctl = 1;
753 }
754 func = ioctl->func;
755
756 if (func == NULL) {
757 DRM_DEBUG("no function\n");
758 return EINVAL;
759 }
760
761 if (((ioctl->flags & DRM_ROOT_ONLY) && !DRM_SUSER(p)) ||
762 ((ioctl->flags & DRM_AUTH) && !file_priv->authenticated) ||
763 ((ioctl->flags & DRM_MASTER) && !file_priv->master))
764 return EACCES;
765
766 if (is_driver_ioctl) {
767 DRM_LOCK();
768 /* shared code returns -errno */
769 retcode = -func(dev, data, file_priv);
770 DRM_UNLOCK();
771 } else {
772 retcode = func(dev, data, file_priv);
773 }
774
775 if (retcode != 0)
776 DRM_DEBUG(" returning %d\n", retcode);
777
778 return retcode;
779}
780
781drm_local_map_t *drm_getsarea(struct drm_device *dev)
782{
783 drm_local_map_t *map;
784
785 DRM_SPINLOCK_ASSERT(&dev->dev_lock);
786 TAILQ_FOREACH(map, &dev->maplist, link) {
787 if (map->type == _DRM_SHM && (map->flags & _DRM_CONTAINS_LOCK))
788 return map;
789 }
790
791 return NULL;
792}
793
794#if DRM_LINUX
795
796#include <sys/sysproto.h>
797
798MODULE_DEPEND(DRIVER_NAME, linux, 1, 1, 1);
799
800#define LINUX_IOCTL_DRM_MIN 0x6400
801#define LINUX_IOCTL_DRM_MAX 0x64ff
802
803static linux_ioctl_function_t drm_linux_ioctl;
804static struct linux_ioctl_handler drm_handler = {drm_linux_ioctl,
805 LINUX_IOCTL_DRM_MIN, LINUX_IOCTL_DRM_MAX};
806
807SYSINIT(drm_register, SI_SUB_KLD, SI_ORDER_MIDDLE,
808 linux_ioctl_register_handler, &drm_handler);
809SYSUNINIT(drm_unregister, SI_SUB_KLD, SI_ORDER_MIDDLE,
810 linux_ioctl_unregister_handler, &drm_handler);
811
812/* The bits for in/out are switched on Linux */
813#define LINUX_IOC_IN IOC_OUT
814#define LINUX_IOC_OUT IOC_IN
815
816static int
817drm_linux_ioctl(DRM_STRUCTPROC *p, struct linux_ioctl_args* args)
818{
819 int error;
820 int cmd = args->cmd;
821
822 args->cmd &= ~(LINUX_IOC_IN | LINUX_IOC_OUT);
823 if (cmd & LINUX_IOC_IN)
824 args->cmd |= IOC_IN;
825 if (cmd & LINUX_IOC_OUT)
826 args->cmd |= IOC_OUT;
827
828 error = ioctl(p, (struct ioctl_args *)args);
829
830 return error;
831}
832#endif /* DRM_LINUX */
230 msicount = pci_msi_count(dev->device);
231 DRM_DEBUG("MSI count = %d\n", msicount);
232 if (msicount > 1)
233 msicount = 1;
234
235 if (pci_alloc_msi(dev->device, &msicount) == 0) {
236 DRM_INFO("MSI enabled %d message(s)\n", msicount);
237 dev->msi_enabled = 1;
238 dev->irqrid = 1;
239 }
240 }
241
242 dev->irqr = bus_alloc_resource_any(dev->device, SYS_RES_IRQ,
243 &dev->irqrid, RF_SHAREABLE);
244 if (!dev->irqr) {
245 return ENOENT;
246 }
247
248 dev->irq = (int) rman_get_start(dev->irqr);
249
250 mtx_init(&dev->dev_lock, "drmdev", NULL, MTX_DEF);
251 mtx_init(&dev->irq_lock, "drmirq", NULL, MTX_DEF);
252 mtx_init(&dev->vbl_lock, "drmvbl", NULL, MTX_DEF);
253 mtx_init(&dev->drw_lock, "drmdrw", NULL, MTX_DEF);
254
255 id_entry = drm_find_description(dev->pci_vendor,
256 dev->pci_device, idlist);
257 dev->id_entry = id_entry;
258
259 return drm_load(dev);
260}
261
262int drm_detach(device_t nbdev)
263{
264 struct drm_device *dev;
265
266 dev = device_get_softc(nbdev);
267
268 drm_unload(dev);
269
270 bus_release_resource(dev->device, SYS_RES_IRQ, dev->irqrid, dev->irqr);
271
272 if (dev->msi_enabled) {
273 pci_release_msi(dev->device);
274 DRM_INFO("MSI released\n");
275 }
276
277 return 0;
278}
279
280#ifndef DRM_DEV_NAME
281#define DRM_DEV_NAME "drm"
282#endif
283
284devclass_t drm_devclass;
285
286drm_pci_id_list_t *drm_find_description(int vendor, int device,
287 drm_pci_id_list_t *idlist)
288{
289 int i = 0;
290
291 for (i = 0; idlist[i].vendor != 0; i++) {
292 if ((idlist[i].vendor == vendor) &&
293 (idlist[i].device == device)) {
294 return &idlist[i];
295 }
296 }
297 return NULL;
298}
299
300static int drm_firstopen(struct drm_device *dev)
301{
302 drm_local_map_t *map;
303 int i;
304
305 DRM_SPINLOCK_ASSERT(&dev->dev_lock);
306
307 /* prebuild the SAREA */
308 i = drm_addmap(dev, 0, SAREA_MAX, _DRM_SHM,
309 _DRM_CONTAINS_LOCK, &map);
310 if (i != 0)
311 return i;
312
313 if (dev->driver->firstopen)
314 dev->driver->firstopen(dev);
315
316 dev->buf_use = 0;
317
318 if (drm_core_check_feature(dev, DRIVER_HAVE_DMA)) {
319 i = drm_dma_setup(dev);
320 if (i != 0)
321 return i;
322 }
323
324 for (i = 0; i < DRM_HASH_SIZE; i++) {
325 dev->magiclist[i].head = NULL;
326 dev->magiclist[i].tail = NULL;
327 }
328
329 dev->lock.lock_queue = 0;
330 dev->irq_enabled = 0;
331 dev->context_flag = 0;
332 dev->last_context = 0;
333 dev->if_version = 0;
334
335 dev->buf_sigio = NULL;
336
337 DRM_DEBUG("\n");
338
339 return 0;
340}
341
342static int drm_lastclose(struct drm_device *dev)
343{
344 drm_magic_entry_t *pt, *next;
345 drm_local_map_t *map, *mapsave;
346 int i;
347
348 DRM_SPINLOCK_ASSERT(&dev->dev_lock);
349
350 DRM_DEBUG("\n");
351
352 if (dev->driver->lastclose != NULL)
353 dev->driver->lastclose(dev);
354
355 if (dev->irq_enabled)
356 drm_irq_uninstall(dev);
357
358 if (dev->unique) {
359 free(dev->unique, DRM_MEM_DRIVER);
360 dev->unique = NULL;
361 dev->unique_len = 0;
362 }
363 /* Clear pid list */
364 for (i = 0; i < DRM_HASH_SIZE; i++) {
365 for (pt = dev->magiclist[i].head; pt; pt = next) {
366 next = pt->next;
367 free(pt, DRM_MEM_MAGIC);
368 }
369 dev->magiclist[i].head = dev->magiclist[i].tail = NULL;
370 }
371
372 DRM_UNLOCK();
373 drm_drawable_free_all(dev);
374 DRM_LOCK();
375
376 /* Clear AGP information */
377 if (dev->agp) {
378 drm_agp_mem_t *entry;
379 drm_agp_mem_t *nexte;
380
381 /* Remove AGP resources, but leave dev->agp intact until
382 * drm_unload is called.
383 */
384 for (entry = dev->agp->memory; entry; entry = nexte) {
385 nexte = entry->next;
386 if (entry->bound)
387 drm_agp_unbind_memory(entry->handle);
388 drm_agp_free_memory(entry->handle);
389 free(entry, DRM_MEM_AGPLISTS);
390 }
391 dev->agp->memory = NULL;
392
393 if (dev->agp->acquired)
394 drm_agp_release(dev);
395
396 dev->agp->acquired = 0;
397 dev->agp->enabled = 0;
398 }
399 if (dev->sg != NULL) {
400 drm_sg_cleanup(dev->sg);
401 dev->sg = NULL;
402 }
403
404 TAILQ_FOREACH_SAFE(map, &dev->maplist, link, mapsave) {
405 if (!(map->flags & _DRM_DRIVER))
406 drm_rmmap(dev, map);
407 }
408
409 drm_dma_takedown(dev);
410 if (dev->lock.hw_lock) {
411 dev->lock.hw_lock = NULL; /* SHM removed */
412 dev->lock.file_priv = NULL;
413 DRM_WAKEUP_INT((void *)&dev->lock.lock_queue);
414 }
415
416 return 0;
417}
418
419static int drm_load(struct drm_device *dev)
420{
421 int i, retcode;
422
423 DRM_DEBUG("\n");
424
425 TAILQ_INIT(&dev->maplist);
426
427 drm_mem_init();
428 drm_sysctl_init(dev);
429 TAILQ_INIT(&dev->files);
430
431 dev->counters = 6;
432 dev->types[0] = _DRM_STAT_LOCK;
433 dev->types[1] = _DRM_STAT_OPENS;
434 dev->types[2] = _DRM_STAT_CLOSES;
435 dev->types[3] = _DRM_STAT_IOCTLS;
436 dev->types[4] = _DRM_STAT_LOCKS;
437 dev->types[5] = _DRM_STAT_UNLOCKS;
438
439 for (i = 0; i < DRM_ARRAY_SIZE(dev->counts); i++)
440 atomic_set(&dev->counts[i], 0);
441
442 if (dev->driver->load != NULL) {
443 DRM_LOCK();
444 /* Shared code returns -errno. */
445 retcode = -dev->driver->load(dev,
446 dev->id_entry->driver_private);
447 if (pci_enable_busmaster(dev->device))
448 DRM_ERROR("Request to enable bus-master failed.\n");
449 DRM_UNLOCK();
450 if (retcode != 0)
451 goto error;
452 }
453
454 if (drm_core_has_AGP(dev)) {
455 if (drm_device_is_agp(dev))
456 dev->agp = drm_agp_init();
457 if (drm_core_check_feature(dev, DRIVER_REQUIRE_AGP) &&
458 dev->agp == NULL) {
459 DRM_ERROR("Card isn't AGP, or couldn't initialize "
460 "AGP.\n");
461 retcode = ENOMEM;
462 goto error;
463 }
464 if (dev->agp != NULL) {
465 if (drm_mtrr_add(dev->agp->info.ai_aperture_base,
466 dev->agp->info.ai_aperture_size, DRM_MTRR_WC) == 0)
467 dev->agp->mtrr = 1;
468 }
469 }
470
471 retcode = drm_ctxbitmap_init(dev);
472 if (retcode != 0) {
473 DRM_ERROR("Cannot allocate memory for context bitmap.\n");
474 goto error;
475 }
476
477 dev->drw_unrhdr = new_unrhdr(1, INT_MAX, NULL);
478 if (dev->drw_unrhdr == NULL) {
479 DRM_ERROR("Couldn't allocate drawable number allocator\n");
480 goto error;
481 }
482
483 DRM_INFO("Initialized %s %d.%d.%d %s\n",
484 dev->driver->name,
485 dev->driver->major,
486 dev->driver->minor,
487 dev->driver->patchlevel,
488 dev->driver->date);
489
490 return 0;
491
492error:
493 drm_sysctl_cleanup(dev);
494 DRM_LOCK();
495 drm_lastclose(dev);
496 DRM_UNLOCK();
497 destroy_dev(dev->devnode);
498
499 mtx_destroy(&dev->drw_lock);
500 mtx_destroy(&dev->vbl_lock);
501 mtx_destroy(&dev->irq_lock);
502 mtx_destroy(&dev->dev_lock);
503
504 return retcode;
505}
506
507static void drm_unload(struct drm_device *dev)
508{
509 int i;
510
511 DRM_DEBUG("\n");
512
513 drm_sysctl_cleanup(dev);
514 destroy_dev(dev->devnode);
515
516 drm_ctxbitmap_cleanup(dev);
517
518 if (dev->agp && dev->agp->mtrr) {
519 int __unused retcode;
520
521 retcode = drm_mtrr_del(0, dev->agp->info.ai_aperture_base,
522 dev->agp->info.ai_aperture_size, DRM_MTRR_WC);
523 DRM_DEBUG("mtrr_del = %d", retcode);
524 }
525
526 DRM_LOCK();
527 drm_lastclose(dev);
528 DRM_UNLOCK();
529
530 /* Clean up PCI resources allocated by drm_bufs.c. We're not really
531 * worried about resource consumption while the DRM is inactive (between
532 * lastclose and firstopen or unload) because these aren't actually
533 * taking up KVA, just keeping the PCI resource allocated.
534 */
535 for (i = 0; i < DRM_MAX_PCI_RESOURCE; i++) {
536 if (dev->pcir[i] == NULL)
537 continue;
538 bus_release_resource(dev->device, SYS_RES_MEMORY,
539 dev->pcirid[i], dev->pcir[i]);
540 dev->pcir[i] = NULL;
541 }
542
543 if (dev->agp) {
544 free(dev->agp, DRM_MEM_AGPLISTS);
545 dev->agp = NULL;
546 }
547
548 if (dev->driver->unload != NULL) {
549 DRM_LOCK();
550 dev->driver->unload(dev);
551 DRM_UNLOCK();
552 }
553
554 delete_unrhdr(dev->drw_unrhdr);
555
556 drm_mem_uninit();
557
558 if (pci_disable_busmaster(dev->device))
559 DRM_ERROR("Request to disable bus-master failed.\n");
560
561 mtx_destroy(&dev->drw_lock);
562 mtx_destroy(&dev->vbl_lock);
563 mtx_destroy(&dev->irq_lock);
564 mtx_destroy(&dev->dev_lock);
565}
566
567int drm_version(struct drm_device *dev, void *data, struct drm_file *file_priv)
568{
569 struct drm_version *version = data;
570 int len;
571
572#define DRM_COPY( name, value ) \
573 len = strlen( value ); \
574 if ( len > name##_len ) len = name##_len; \
575 name##_len = strlen( value ); \
576 if ( len && name ) { \
577 if ( DRM_COPY_TO_USER( name, value, len ) ) \
578 return EFAULT; \
579 }
580
581 version->version_major = dev->driver->major;
582 version->version_minor = dev->driver->minor;
583 version->version_patchlevel = dev->driver->patchlevel;
584
585 DRM_COPY(version->name, dev->driver->name);
586 DRM_COPY(version->date, dev->driver->date);
587 DRM_COPY(version->desc, dev->driver->desc);
588
589 return 0;
590}
591
592int drm_open(struct cdev *kdev, int flags, int fmt, DRM_STRUCTPROC *p)
593{
594 struct drm_device *dev = NULL;
595 int retcode = 0;
596
597 dev = DRIVER_SOFTC(dev2unit(kdev));
598
599 DRM_DEBUG("open_count = %d\n", dev->open_count);
600
601 retcode = drm_open_helper(kdev, flags, fmt, p, dev);
602
603 if (!retcode) {
604 atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
605 DRM_LOCK();
606 device_busy(dev->device);
607 if (!dev->open_count++)
608 retcode = drm_firstopen(dev);
609 DRM_UNLOCK();
610 }
611
612 return retcode;
613}
614
615void drm_close(void *data)
616{
617 struct drm_file *file_priv = data;
618 struct drm_device *dev = file_priv->dev;
619 int retcode = 0;
620
621 DRM_DEBUG("open_count = %d\n", dev->open_count);
622
623 DRM_LOCK();
624
625 if (dev->driver->preclose != NULL)
626 dev->driver->preclose(dev, file_priv);
627
628 /* ========================================================
629 * Begin inline drm_release
630 */
631
632 DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
633 DRM_CURRENTPID, (long)dev->device, dev->open_count);
634
635 if (dev->lock.hw_lock && _DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)
636 && dev->lock.file_priv == file_priv) {
637 DRM_DEBUG("Process %d dead, freeing lock for context %d\n",
638 DRM_CURRENTPID,
639 _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
640 if (dev->driver->reclaim_buffers_locked != NULL)
641 dev->driver->reclaim_buffers_locked(dev, file_priv);
642
643 drm_lock_free(&dev->lock,
644 _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
645
646 /* FIXME: may require heavy-handed reset of
647 hardware at this point, possibly
648 processed via a callback to the X
649 server. */
650 } else if (dev->driver->reclaim_buffers_locked != NULL &&
651 dev->lock.hw_lock != NULL) {
652 /* The lock is required to reclaim buffers */
653 for (;;) {
654 if (!dev->lock.hw_lock) {
655 /* Device has been unregistered */
656 retcode = EINTR;
657 break;
658 }
659 if (drm_lock_take(&dev->lock, DRM_KERNEL_CONTEXT)) {
660 dev->lock.file_priv = file_priv;
661 dev->lock.lock_time = jiffies;
662 atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
663 break; /* Got lock */
664 }
665 /* Contention */
666 retcode = mtx_sleep((void *)&dev->lock.lock_queue,
667 &dev->dev_lock, PZERO | PCATCH, "drmlk2", 0);
668 if (retcode)
669 break;
670 }
671 if (retcode == 0) {
672 dev->driver->reclaim_buffers_locked(dev, file_priv);
673 drm_lock_free(&dev->lock, DRM_KERNEL_CONTEXT);
674 }
675 }
676
677 if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) &&
678 !dev->driver->reclaim_buffers_locked)
679 drm_reclaim_buffers(dev, file_priv);
680
681 funsetown(&dev->buf_sigio);
682
683 if (dev->driver->postclose != NULL)
684 dev->driver->postclose(dev, file_priv);
685 TAILQ_REMOVE(&dev->files, file_priv, link);
686 free(file_priv, DRM_MEM_FILES);
687
688 /* ========================================================
689 * End inline drm_release
690 */
691
692 atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
693 device_unbusy(dev->device);
694 if (--dev->open_count == 0) {
695 retcode = drm_lastclose(dev);
696 }
697
698 DRM_UNLOCK();
699}
700
701/* drm_ioctl is called whenever a process performs an ioctl on /dev/drm.
702 */
703int drm_ioctl(struct cdev *kdev, u_long cmd, caddr_t data, int flags,
704 DRM_STRUCTPROC *p)
705{
706 struct drm_device *dev = drm_get_device_from_kdev(kdev);
707 int retcode = 0;
708 drm_ioctl_desc_t *ioctl;
709 int (*func)(struct drm_device *dev, void *data, struct drm_file *file_priv);
710 int nr = DRM_IOCTL_NR(cmd);
711 int is_driver_ioctl = 0;
712 struct drm_file *file_priv;
713
714 retcode = devfs_get_cdevpriv((void **)&file_priv);
715 if (retcode != 0) {
716 DRM_ERROR("can't find authenticator\n");
717 return EINVAL;
718 }
719
720 atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
721 ++file_priv->ioctl_count;
722
723 DRM_DEBUG("pid=%d, cmd=0x%02lx, nr=0x%02x, dev 0x%lx, auth=%d\n",
724 DRM_CURRENTPID, cmd, nr, (long)dev->device,
725 file_priv->authenticated);
726
727 switch (cmd) {
728 case FIONBIO:
729 case FIOASYNC:
730 return 0;
731
732 case FIOSETOWN:
733 return fsetown(*(int *)data, &dev->buf_sigio);
734
735 case FIOGETOWN:
736 *(int *) data = fgetown(&dev->buf_sigio);
737 return 0;
738 }
739
740 if (IOCGROUP(cmd) != DRM_IOCTL_BASE) {
741 DRM_DEBUG("Bad ioctl group 0x%x\n", (int)IOCGROUP(cmd));
742 return EINVAL;
743 }
744
745 ioctl = &drm_ioctls[nr];
746 /* It's not a core DRM ioctl, try driver-specific. */
747 if (ioctl->func == NULL && nr >= DRM_COMMAND_BASE) {
748 /* The array entries begin at DRM_COMMAND_BASE ioctl nr */
749 nr -= DRM_COMMAND_BASE;
750 if (nr > dev->driver->max_ioctl) {
751 DRM_DEBUG("Bad driver ioctl number, 0x%x (of 0x%x)\n",
752 nr, dev->driver->max_ioctl);
753 return EINVAL;
754 }
755 ioctl = &dev->driver->ioctls[nr];
756 is_driver_ioctl = 1;
757 }
758 func = ioctl->func;
759
760 if (func == NULL) {
761 DRM_DEBUG("no function\n");
762 return EINVAL;
763 }
764
765 if (((ioctl->flags & DRM_ROOT_ONLY) && !DRM_SUSER(p)) ||
766 ((ioctl->flags & DRM_AUTH) && !file_priv->authenticated) ||
767 ((ioctl->flags & DRM_MASTER) && !file_priv->master))
768 return EACCES;
769
770 if (is_driver_ioctl) {
771 DRM_LOCK();
772 /* shared code returns -errno */
773 retcode = -func(dev, data, file_priv);
774 DRM_UNLOCK();
775 } else {
776 retcode = func(dev, data, file_priv);
777 }
778
779 if (retcode != 0)
780 DRM_DEBUG(" returning %d\n", retcode);
781
782 return retcode;
783}
784
785drm_local_map_t *drm_getsarea(struct drm_device *dev)
786{
787 drm_local_map_t *map;
788
789 DRM_SPINLOCK_ASSERT(&dev->dev_lock);
790 TAILQ_FOREACH(map, &dev->maplist, link) {
791 if (map->type == _DRM_SHM && (map->flags & _DRM_CONTAINS_LOCK))
792 return map;
793 }
794
795 return NULL;
796}
797
798#if DRM_LINUX
799
800#include <sys/sysproto.h>
801
802MODULE_DEPEND(DRIVER_NAME, linux, 1, 1, 1);
803
804#define LINUX_IOCTL_DRM_MIN 0x6400
805#define LINUX_IOCTL_DRM_MAX 0x64ff
806
807static linux_ioctl_function_t drm_linux_ioctl;
808static struct linux_ioctl_handler drm_handler = {drm_linux_ioctl,
809 LINUX_IOCTL_DRM_MIN, LINUX_IOCTL_DRM_MAX};
810
811SYSINIT(drm_register, SI_SUB_KLD, SI_ORDER_MIDDLE,
812 linux_ioctl_register_handler, &drm_handler);
813SYSUNINIT(drm_unregister, SI_SUB_KLD, SI_ORDER_MIDDLE,
814 linux_ioctl_unregister_handler, &drm_handler);
815
816/* The bits for in/out are switched on Linux */
817#define LINUX_IOC_IN IOC_OUT
818#define LINUX_IOC_OUT IOC_IN
819
820static int
821drm_linux_ioctl(DRM_STRUCTPROC *p, struct linux_ioctl_args* args)
822{
823 int error;
824 int cmd = args->cmd;
825
826 args->cmd &= ~(LINUX_IOC_IN | LINUX_IOC_OUT);
827 if (cmd & LINUX_IOC_IN)
828 args->cmd |= IOC_IN;
829 if (cmd & LINUX_IOC_OUT)
830 args->cmd |= IOC_OUT;
831
832 error = ioctl(p, (struct ioctl_args *)args);
833
834 return error;
835}
836#endif /* DRM_LINUX */