1/*-
2 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
3 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
20 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
21 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
23 * OTHER DEALINGS IN THE SOFTWARE.
24 *
25 * Authors:
26 *    Rickard E. (Rik) Faith <faith@valinux.com>
27 *    Gareth Hughes <gareth@valinux.com>
28 *
29 */
30
31#include <sys/cdefs.h>
32__FBSDID("$FreeBSD: releng/10.3/sys/dev/drm/drm_drv.c 215367 2010-11-16 03:43:06Z nwhitehorn $");
33
34/** @file drm_drv.c
35 * The catch-all file for DRM device support, including module setup/teardown,
36 * open/close, and ioctl dispatch.
37 */
38
39
40#include <sys/limits.h>
41#include "dev/drm/drmP.h"
42#include "dev/drm/drm.h"
43#include "dev/drm/drm_sarea.h"
44
45#ifdef DRM_DEBUG_DEFAULT_ON
46int drm_debug_flag = 1;
47#else
48int drm_debug_flag = 0;
49#endif
50
51static int drm_load(struct drm_device *dev);
52static void drm_unload(struct drm_device *dev);
53static drm_pci_id_list_t *drm_find_description(int vendor, int device,
54    drm_pci_id_list_t *idlist);
55
56MODULE_VERSION(drm, 1);
57MODULE_DEPEND(drm, agp, 1, 1, 1);
58MODULE_DEPEND(drm, pci, 1, 1, 1);
59MODULE_DEPEND(drm, mem, 1, 1, 1);
60
61static drm_ioctl_desc_t		  drm_ioctls[256] = {
62	DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, 0),
63	DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0),
64	DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0),
65	DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY),
66	DRM_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_getmap, 0),
67	DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, 0),
68	DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, 0),
69	DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_MASTER|DRM_ROOT_ONLY),
70
71	DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
72	DRM_IOCTL_DEF(DRM_IOCTL_BLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
73	DRM_IOCTL_DEF(DRM_IOCTL_UNBLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
74	DRM_IOCTL_DEF(DRM_IOCTL_AUTH_MAGIC, drm_authmagic, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
75
76	DRM_IOCTL_DEF(DRM_IOCTL_ADD_MAP, drm_addmap_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
77	DRM_IOCTL_DEF(DRM_IOCTL_RM_MAP, drm_rmmap_ioctl, DRM_AUTH),
78
79	DRM_IOCTL_DEF(DRM_IOCTL_SET_SAREA_CTX, drm_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
80	DRM_IOCTL_DEF(DRM_IOCTL_GET_SAREA_CTX, drm_getsareactx, DRM_AUTH),
81
82	DRM_IOCTL_DEF(DRM_IOCTL_ADD_CTX, drm_addctx, DRM_AUTH|DRM_ROOT_ONLY),
83	DRM_IOCTL_DEF(DRM_IOCTL_RM_CTX, drm_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
84	DRM_IOCTL_DEF(DRM_IOCTL_MOD_CTX, drm_modctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
85	DRM_IOCTL_DEF(DRM_IOCTL_GET_CTX, drm_getctx, DRM_AUTH),
86	DRM_IOCTL_DEF(DRM_IOCTL_SWITCH_CTX, drm_switchctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
87	DRM_IOCTL_DEF(DRM_IOCTL_NEW_CTX, drm_newctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
88	DRM_IOCTL_DEF(DRM_IOCTL_RES_CTX, drm_resctx, DRM_AUTH),
89
90	DRM_IOCTL_DEF(DRM_IOCTL_ADD_DRAW, drm_adddraw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
91	DRM_IOCTL_DEF(DRM_IOCTL_RM_DRAW, drm_rmdraw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
92
93	DRM_IOCTL_DEF(DRM_IOCTL_LOCK, drm_lock, DRM_AUTH),
94	DRM_IOCTL_DEF(DRM_IOCTL_UNLOCK, drm_unlock, DRM_AUTH),
95
96	DRM_IOCTL_DEF(DRM_IOCTL_FINISH, drm_noop, DRM_AUTH),
97
98	DRM_IOCTL_DEF(DRM_IOCTL_ADD_BUFS, drm_addbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
99	DRM_IOCTL_DEF(DRM_IOCTL_MARK_BUFS, drm_markbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
100	DRM_IOCTL_DEF(DRM_IOCTL_INFO_BUFS, drm_infobufs, DRM_AUTH),
101	DRM_IOCTL_DEF(DRM_IOCTL_MAP_BUFS, drm_mapbufs, DRM_AUTH),
102	DRM_IOCTL_DEF(DRM_IOCTL_FREE_BUFS, drm_freebufs, DRM_AUTH),
103	DRM_IOCTL_DEF(DRM_IOCTL_DMA, drm_dma, DRM_AUTH),
104
105	DRM_IOCTL_DEF(DRM_IOCTL_CONTROL, drm_control, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
106
107	DRM_IOCTL_DEF(DRM_IOCTL_AGP_ACQUIRE, drm_agp_acquire_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
108	DRM_IOCTL_DEF(DRM_IOCTL_AGP_RELEASE, drm_agp_release_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
109	DRM_IOCTL_DEF(DRM_IOCTL_AGP_ENABLE, drm_agp_enable_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
110	DRM_IOCTL_DEF(DRM_IOCTL_AGP_INFO, drm_agp_info_ioctl, DRM_AUTH),
111	DRM_IOCTL_DEF(DRM_IOCTL_AGP_ALLOC, drm_agp_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
112	DRM_IOCTL_DEF(DRM_IOCTL_AGP_FREE, drm_agp_free_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
113	DRM_IOCTL_DEF(DRM_IOCTL_AGP_BIND, drm_agp_bind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
114	DRM_IOCTL_DEF(DRM_IOCTL_AGP_UNBIND, drm_agp_unbind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
115
116	DRM_IOCTL_DEF(DRM_IOCTL_SG_ALLOC, drm_sg_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
117	DRM_IOCTL_DEF(DRM_IOCTL_SG_FREE, drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
118	DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank, 0),
119	DRM_IOCTL_DEF(DRM_IOCTL_MODESET_CTL, drm_modeset_ctl, 0),
120	DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_update_draw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
121};
122
123static struct cdevsw drm_cdevsw = {
124	.d_version =	D_VERSION,
125	.d_open =	drm_open,
126	.d_read =	drm_read,
127	.d_ioctl =	drm_ioctl,
128	.d_poll =	drm_poll,
129	.d_mmap =	drm_mmap,
130	.d_name =	"drm",
131	.d_flags =	D_TRACKCLOSE
132};
133
134static int drm_msi = 1;	/* Enable by default. */
135TUNABLE_INT("hw.drm.msi", &drm_msi);
136SYSCTL_NODE(_hw, OID_AUTO, drm, CTLFLAG_RW, NULL, "DRM device");
137SYSCTL_INT(_hw_drm, OID_AUTO, msi, CTLFLAG_RDTUN, &drm_msi, 1,
138    "Enable MSI interrupts for drm devices");
139
140static struct drm_msi_blacklist_entry drm_msi_blacklist[] = {
141	{0x8086, 0x2772}, /* Intel i945G	*/ \
142	{0x8086, 0x27A2}, /* Intel i945GM	*/ \
143	{0x8086, 0x27AE}, /* Intel i945GME	*/ \
144	{0, 0}
145};
146
147static int drm_msi_is_blacklisted(int vendor, int device)
148{
149	int i = 0;
150
151	for (i = 0; drm_msi_blacklist[i].vendor != 0; i++) {
152		if ((drm_msi_blacklist[i].vendor == vendor) &&
153		    (drm_msi_blacklist[i].device == device)) {
154			return 1;
155		}
156	}
157
158	return 0;
159}
160
161int drm_probe(device_t kdev, drm_pci_id_list_t *idlist)
162{
163	drm_pci_id_list_t *id_entry;
164	int vendor, device;
165#if __FreeBSD_version < 700010
166	device_t realdev;
167
168	if (!strcmp(device_get_name(kdev), "drmsub"))
169		realdev = device_get_parent(kdev);
170	else
171		realdev = kdev;
172	vendor = pci_get_vendor(realdev);
173	device = pci_get_device(realdev);
174#else
175	vendor = pci_get_vendor(kdev);
176	device = pci_get_device(kdev);
177#endif
178
179	if (pci_get_class(kdev) != PCIC_DISPLAY
180	    || pci_get_subclass(kdev) != PCIS_DISPLAY_VGA)
181		return ENXIO;
182
183	id_entry = drm_find_description(vendor, device, idlist);
184	if (id_entry != NULL) {
185		if (!device_get_desc(kdev)) {
186			DRM_DEBUG("desc : %s\n", device_get_desc(kdev));
187			device_set_desc(kdev, id_entry->name);
188		}
189		return 0;
190	}
191
192	return ENXIO;
193}
194
195int drm_attach(device_t kdev, drm_pci_id_list_t *idlist)
196{
197	struct drm_device *dev;
198	drm_pci_id_list_t *id_entry;
199	int unit, msicount;
200
201	unit = device_get_unit(kdev);
202	dev = device_get_softc(kdev);
203
204#if __FreeBSD_version < 700010
205	if (!strcmp(device_get_name(kdev), "drmsub"))
206		dev->device = device_get_parent(kdev);
207	else
208		dev->device = kdev;
209#else
210	dev->device = kdev;
211#endif
212	dev->devnode = make_dev(&drm_cdevsw,
213			0,
214			DRM_DEV_UID,
215			DRM_DEV_GID,
216			DRM_DEV_MODE,
217			"dri/card%d", unit);
218	dev->devnode->si_drv1 = dev;
219
220#if __FreeBSD_version >= 700053
221	dev->pci_domain = pci_get_domain(dev->device);
222#else
223	dev->pci_domain = 0;
224#endif
225	dev->pci_bus = pci_get_bus(dev->device);
226	dev->pci_slot = pci_get_slot(dev->device);
227	dev->pci_func = pci_get_function(dev->device);
228
229	dev->pci_vendor = pci_get_vendor(dev->device);
230	dev->pci_device = pci_get_device(dev->device);
231
232	if (drm_core_check_feature(dev, DRIVER_HAVE_IRQ)) {
233		if (drm_msi &&
234		    !drm_msi_is_blacklisted(dev->pci_vendor, dev->pci_device)) {
235			msicount = pci_msi_count(dev->device);
236			DRM_DEBUG("MSI count = %d\n", msicount);
237			if (msicount > 1)
238				msicount = 1;
239
240			if (pci_alloc_msi(dev->device, &msicount) == 0) {
241				DRM_INFO("MSI enabled %d message(s)\n",
242				    msicount);
243				dev->msi_enabled = 1;
244				dev->irqrid = 1;
245			}
246		}
247
248		dev->irqr = bus_alloc_resource_any(dev->device, SYS_RES_IRQ,
249		    &dev->irqrid, RF_SHAREABLE);
250		if (!dev->irqr) {
251			return ENOENT;
252		}
253
254		dev->irq = (int) rman_get_start(dev->irqr);
255	}
256
257	mtx_init(&dev->dev_lock, "drmdev", NULL, MTX_DEF);
258	mtx_init(&dev->irq_lock, "drmirq", NULL, MTX_DEF);
259	mtx_init(&dev->vbl_lock, "drmvbl", NULL, MTX_DEF);
260	mtx_init(&dev->drw_lock, "drmdrw", NULL, MTX_DEF);
261
262	id_entry = drm_find_description(dev->pci_vendor,
263	    dev->pci_device, idlist);
264	dev->id_entry = id_entry;
265
266	return drm_load(dev);
267}
268
269int drm_detach(device_t kdev)
270{
271	struct drm_device *dev;
272
273	dev = device_get_softc(kdev);
274
275	drm_unload(dev);
276
277	if (dev->irqr) {
278		bus_release_resource(dev->device, SYS_RES_IRQ, dev->irqrid,
279		    dev->irqr);
280
281		if (dev->msi_enabled) {
282			pci_release_msi(dev->device);
283			DRM_INFO("MSI released\n");
284		}
285	}
286
287	return 0;
288}
289
290#ifndef DRM_DEV_NAME
291#define DRM_DEV_NAME "drm"
292#endif
293
294devclass_t drm_devclass;
295
296drm_pci_id_list_t *drm_find_description(int vendor, int device,
297    drm_pci_id_list_t *idlist)
298{
299	int i = 0;
300
301	for (i = 0; idlist[i].vendor != 0; i++) {
302		if ((idlist[i].vendor == vendor) &&
303		    ((idlist[i].device == device) ||
304		    (idlist[i].device == 0))) {
305			return &idlist[i];
306		}
307	}
308	return NULL;
309}
310
311static int drm_firstopen(struct drm_device *dev)
312{
313	drm_local_map_t *map;
314	int i;
315
316	DRM_SPINLOCK_ASSERT(&dev->dev_lock);
317
318	/* prebuild the SAREA */
319	i = drm_addmap(dev, 0, SAREA_MAX, _DRM_SHM,
320	    _DRM_CONTAINS_LOCK, &map);
321	if (i != 0)
322		return i;
323
324	if (dev->driver->firstopen)
325		dev->driver->firstopen(dev);
326
327	dev->buf_use = 0;
328
329	if (drm_core_check_feature(dev, DRIVER_HAVE_DMA)) {
330		i = drm_dma_setup(dev);
331		if (i != 0)
332			return i;
333	}
334
335	for (i = 0; i < DRM_HASH_SIZE; i++) {
336		dev->magiclist[i].head = NULL;
337		dev->magiclist[i].tail = NULL;
338	}
339
340	dev->lock.lock_queue = 0;
341	dev->irq_enabled = 0;
342	dev->context_flag = 0;
343	dev->last_context = 0;
344	dev->if_version = 0;
345
346	dev->buf_sigio = NULL;
347
348	DRM_DEBUG("\n");
349
350	return 0;
351}
352
353static int drm_lastclose(struct drm_device *dev)
354{
355	drm_magic_entry_t *pt, *next;
356	drm_local_map_t *map, *mapsave;
357	int i;
358
359	DRM_SPINLOCK_ASSERT(&dev->dev_lock);
360
361	DRM_DEBUG("\n");
362
363	if (dev->driver->lastclose != NULL)
364		dev->driver->lastclose(dev);
365
366	if (dev->irq_enabled)
367		drm_irq_uninstall(dev);
368
369	if (dev->unique) {
370		free(dev->unique, DRM_MEM_DRIVER);
371		dev->unique = NULL;
372		dev->unique_len = 0;
373	}
374	/* Clear pid list */
375	for (i = 0; i < DRM_HASH_SIZE; i++) {
376		for (pt = dev->magiclist[i].head; pt; pt = next) {
377			next = pt->next;
378			free(pt, DRM_MEM_MAGIC);
379		}
380		dev->magiclist[i].head = dev->magiclist[i].tail = NULL;
381	}
382
383	DRM_UNLOCK();
384	drm_drawable_free_all(dev);
385	DRM_LOCK();
386
387	/* Clear AGP information */
388	if (dev->agp) {
389		drm_agp_mem_t *entry;
390		drm_agp_mem_t *nexte;
391
392		/* Remove AGP resources, but leave dev->agp intact until
393		 * drm_unload is called.
394		 */
395		for (entry = dev->agp->memory; entry; entry = nexte) {
396			nexte = entry->next;
397			if (entry->bound)
398				drm_agp_unbind_memory(entry->handle);
399			drm_agp_free_memory(entry->handle);
400			free(entry, DRM_MEM_AGPLISTS);
401		}
402		dev->agp->memory = NULL;
403
404		if (dev->agp->acquired)
405			drm_agp_release(dev);
406
407		dev->agp->acquired = 0;
408		dev->agp->enabled  = 0;
409	}
410	if (dev->sg != NULL) {
411		drm_sg_cleanup(dev->sg);
412		dev->sg = NULL;
413	}
414
415	TAILQ_FOREACH_SAFE(map, &dev->maplist, link, mapsave) {
416		if (!(map->flags & _DRM_DRIVER))
417			drm_rmmap(dev, map);
418	}
419
420	drm_dma_takedown(dev);
421	if (dev->lock.hw_lock) {
422		dev->lock.hw_lock = NULL; /* SHM removed */
423		dev->lock.file_priv = NULL;
424		DRM_WAKEUP_INT((void *)&dev->lock.lock_queue);
425	}
426
427	return 0;
428}
429
430static int drm_load(struct drm_device *dev)
431{
432	int i, retcode;
433
434	DRM_DEBUG("\n");
435
436	TAILQ_INIT(&dev->maplist);
437	dev->map_unrhdr = new_unrhdr(1, ((1 << DRM_MAP_HANDLE_BITS) - 1), NULL);
438	if (dev->map_unrhdr == NULL) {
439		DRM_ERROR("Couldn't allocate map number allocator\n");
440		return EINVAL;
441	}
442
443
444	drm_mem_init();
445	drm_sysctl_init(dev);
446	TAILQ_INIT(&dev->files);
447
448	dev->counters  = 6;
449	dev->types[0]  = _DRM_STAT_LOCK;
450	dev->types[1]  = _DRM_STAT_OPENS;
451	dev->types[2]  = _DRM_STAT_CLOSES;
452	dev->types[3]  = _DRM_STAT_IOCTLS;
453	dev->types[4]  = _DRM_STAT_LOCKS;
454	dev->types[5]  = _DRM_STAT_UNLOCKS;
455
456	for (i = 0; i < DRM_ARRAY_SIZE(dev->counts); i++)
457		atomic_set(&dev->counts[i], 0);
458
459	if (dev->driver->load != NULL) {
460		DRM_LOCK();
461		/* Shared code returns -errno. */
462		retcode = -dev->driver->load(dev,
463		    dev->id_entry->driver_private);
464		if (pci_enable_busmaster(dev->device))
465			DRM_ERROR("Request to enable bus-master failed.\n");
466		DRM_UNLOCK();
467		if (retcode != 0)
468			goto error;
469	}
470
471	if (drm_core_has_AGP(dev)) {
472		if (drm_device_is_agp(dev))
473			dev->agp = drm_agp_init();
474		if (drm_core_check_feature(dev, DRIVER_REQUIRE_AGP) &&
475		    dev->agp == NULL) {
476			DRM_ERROR("Card isn't AGP, or couldn't initialize "
477			    "AGP.\n");
478			retcode = ENOMEM;
479			goto error;
480		}
481		if (dev->agp != NULL && dev->agp->info.ai_aperture_base != 0) {
482			if (drm_mtrr_add(dev->agp->info.ai_aperture_base,
483			    dev->agp->info.ai_aperture_size, DRM_MTRR_WC) == 0)
484				dev->agp->mtrr = 1;
485		}
486	}
487
488	retcode = drm_ctxbitmap_init(dev);
489	if (retcode != 0) {
490		DRM_ERROR("Cannot allocate memory for context bitmap.\n");
491		goto error;
492	}
493
494	dev->drw_unrhdr = new_unrhdr(1, INT_MAX, NULL);
495	if (dev->drw_unrhdr == NULL) {
496		DRM_ERROR("Couldn't allocate drawable number allocator\n");
497		goto error;
498	}
499
500	DRM_INFO("Initialized %s %d.%d.%d %s\n",
501	    dev->driver->name,
502	    dev->driver->major,
503	    dev->driver->minor,
504	    dev->driver->patchlevel,
505	    dev->driver->date);
506
507	return 0;
508
509error:
510	drm_sysctl_cleanup(dev);
511	DRM_LOCK();
512	drm_lastclose(dev);
513	DRM_UNLOCK();
514	destroy_dev(dev->devnode);
515
516	mtx_destroy(&dev->drw_lock);
517	mtx_destroy(&dev->vbl_lock);
518	mtx_destroy(&dev->irq_lock);
519	mtx_destroy(&dev->dev_lock);
520
521	return retcode;
522}
523
524static void drm_unload(struct drm_device *dev)
525{
526	int i;
527
528	DRM_DEBUG("\n");
529
530	drm_sysctl_cleanup(dev);
531	destroy_dev(dev->devnode);
532
533	drm_ctxbitmap_cleanup(dev);
534
535	if (dev->agp && dev->agp->mtrr) {
536		int __unused retcode;
537
538		retcode = drm_mtrr_del(0, dev->agp->info.ai_aperture_base,
539		    dev->agp->info.ai_aperture_size, DRM_MTRR_WC);
540		DRM_DEBUG("mtrr_del = %d", retcode);
541	}
542
543	drm_vblank_cleanup(dev);
544
545	DRM_LOCK();
546	drm_lastclose(dev);
547	DRM_UNLOCK();
548
549	/* Clean up PCI resources allocated by drm_bufs.c.  We're not really
550	 * worried about resource consumption while the DRM is inactive (between
551	 * lastclose and firstopen or unload) because these aren't actually
552	 * taking up KVA, just keeping the PCI resource allocated.
553	 */
554	for (i = 0; i < DRM_MAX_PCI_RESOURCE; i++) {
555		if (dev->pcir[i] == NULL)
556			continue;
557		bus_release_resource(dev->device, SYS_RES_MEMORY,
558		    dev->pcirid[i], dev->pcir[i]);
559		dev->pcir[i] = NULL;
560	}
561
562	if (dev->agp) {
563		free(dev->agp, DRM_MEM_AGPLISTS);
564		dev->agp = NULL;
565	}
566
567	if (dev->driver->unload != NULL) {
568		DRM_LOCK();
569		dev->driver->unload(dev);
570		DRM_UNLOCK();
571	}
572
573	delete_unrhdr(dev->drw_unrhdr);
574	delete_unrhdr(dev->map_unrhdr);
575
576	drm_mem_uninit();
577
578	if (pci_disable_busmaster(dev->device))
579		DRM_ERROR("Request to disable bus-master failed.\n");
580
581	mtx_destroy(&dev->drw_lock);
582	mtx_destroy(&dev->vbl_lock);
583	mtx_destroy(&dev->irq_lock);
584	mtx_destroy(&dev->dev_lock);
585}
586
587int drm_version(struct drm_device *dev, void *data, struct drm_file *file_priv)
588{
589	struct drm_version *version = data;
590	int len;
591
592#define DRM_COPY( name, value )						\
593	len = strlen( value );						\
594	if ( len > name##_len ) len = name##_len;			\
595	name##_len = strlen( value );					\
596	if ( len && name ) {						\
597		if ( DRM_COPY_TO_USER( name, value, len ) )		\
598			return EFAULT;				\
599	}
600
601	version->version_major		= dev->driver->major;
602	version->version_minor		= dev->driver->minor;
603	version->version_patchlevel	= dev->driver->patchlevel;
604
605	DRM_COPY(version->name, dev->driver->name);
606	DRM_COPY(version->date, dev->driver->date);
607	DRM_COPY(version->desc, dev->driver->desc);
608
609	return 0;
610}
611
612int drm_open(struct cdev *kdev, int flags, int fmt, DRM_STRUCTPROC *p)
613{
614	struct drm_device *dev = NULL;
615	int retcode = 0;
616
617	dev = kdev->si_drv1;
618
619	DRM_DEBUG("open_count = %d\n", dev->open_count);
620
621	retcode = drm_open_helper(kdev, flags, fmt, p, dev);
622
623	if (!retcode) {
624		atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
625		DRM_LOCK();
626		device_busy(dev->device);
627		if (!dev->open_count++)
628			retcode = drm_firstopen(dev);
629		DRM_UNLOCK();
630	}
631
632	return retcode;
633}
634
635void drm_close(void *data)
636{
637	struct drm_file *file_priv = data;
638	struct drm_device *dev = file_priv->dev;
639	int retcode = 0;
640
641	DRM_DEBUG("open_count = %d\n", dev->open_count);
642
643	DRM_LOCK();
644
645	if (dev->driver->preclose != NULL)
646		dev->driver->preclose(dev, file_priv);
647
648	/* ========================================================
649	 * Begin inline drm_release
650	 */
651
652	DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
653	    DRM_CURRENTPID, (long)dev->device, dev->open_count);
654
655	if (dev->lock.hw_lock && _DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)
656	    && dev->lock.file_priv == file_priv) {
657		DRM_DEBUG("Process %d dead, freeing lock for context %d\n",
658			  DRM_CURRENTPID,
659			  _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
660		if (dev->driver->reclaim_buffers_locked != NULL)
661			dev->driver->reclaim_buffers_locked(dev, file_priv);
662
663		drm_lock_free(&dev->lock,
664		    _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
665
666				/* FIXME: may require heavy-handed reset of
667                                   hardware at this point, possibly
668                                   processed via a callback to the X
669                                   server. */
670	} else if (dev->driver->reclaim_buffers_locked != NULL &&
671	    dev->lock.hw_lock != NULL) {
672		/* The lock is required to reclaim buffers */
673		for (;;) {
674			if (!dev->lock.hw_lock) {
675				/* Device has been unregistered */
676				retcode = EINTR;
677				break;
678			}
679			if (drm_lock_take(&dev->lock, DRM_KERNEL_CONTEXT)) {
680				dev->lock.file_priv = file_priv;
681				dev->lock.lock_time = jiffies;
682				atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
683				break;	/* Got lock */
684			}
685			/* Contention */
686			retcode = mtx_sleep((void *)&dev->lock.lock_queue,
687			    &dev->dev_lock, PCATCH, "drmlk2", 0);
688			if (retcode)
689				break;
690		}
691		if (retcode == 0) {
692			dev->driver->reclaim_buffers_locked(dev, file_priv);
693			drm_lock_free(&dev->lock, DRM_KERNEL_CONTEXT);
694		}
695	}
696
697	if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) &&
698	    !dev->driver->reclaim_buffers_locked)
699		drm_reclaim_buffers(dev, file_priv);
700
701	funsetown(&dev->buf_sigio);
702
703	if (dev->driver->postclose != NULL)
704		dev->driver->postclose(dev, file_priv);
705	TAILQ_REMOVE(&dev->files, file_priv, link);
706	free(file_priv, DRM_MEM_FILES);
707
708	/* ========================================================
709	 * End inline drm_release
710	 */
711
712	atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
713	device_unbusy(dev->device);
714	if (--dev->open_count == 0) {
715		retcode = drm_lastclose(dev);
716	}
717
718	DRM_UNLOCK();
719}
720
721/* drm_ioctl is called whenever a process performs an ioctl on /dev/drm.
722 */
723int drm_ioctl(struct cdev *kdev, u_long cmd, caddr_t data, int flags,
724    DRM_STRUCTPROC *p)
725{
726	struct drm_device *dev = drm_get_device_from_kdev(kdev);
727	int retcode = 0;
728	drm_ioctl_desc_t *ioctl;
729	int (*func)(struct drm_device *dev, void *data, struct drm_file *file_priv);
730	int nr = DRM_IOCTL_NR(cmd);
731	int is_driver_ioctl = 0;
732	struct drm_file *file_priv;
733
734	retcode = devfs_get_cdevpriv((void **)&file_priv);
735	if (retcode != 0) {
736		DRM_ERROR("can't find authenticator\n");
737		return EINVAL;
738	}
739
740	atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
741	++file_priv->ioctl_count;
742
743	DRM_DEBUG("pid=%d, cmd=0x%02lx, nr=0x%02x, dev 0x%lx, auth=%d\n",
744	    DRM_CURRENTPID, cmd, nr, (long)dev->device,
745	    file_priv->authenticated);
746
747	switch (cmd) {
748	case FIONBIO:
749	case FIOASYNC:
750		return 0;
751
752	case FIOSETOWN:
753		return fsetown(*(int *)data, &dev->buf_sigio);
754
755	case FIOGETOWN:
756		*(int *) data = fgetown(&dev->buf_sigio);
757		return 0;
758	}
759
760	if (IOCGROUP(cmd) != DRM_IOCTL_BASE) {
761		DRM_DEBUG("Bad ioctl group 0x%x\n", (int)IOCGROUP(cmd));
762		return EINVAL;
763	}
764
765	ioctl = &drm_ioctls[nr];
766	/* It's not a core DRM ioctl, try driver-specific. */
767	if (ioctl->func == NULL && nr >= DRM_COMMAND_BASE) {
768		/* The array entries begin at DRM_COMMAND_BASE ioctl nr */
769		nr -= DRM_COMMAND_BASE;
770		if (nr > dev->driver->max_ioctl) {
771			DRM_DEBUG("Bad driver ioctl number, 0x%x (of 0x%x)\n",
772			    nr, dev->driver->max_ioctl);
773			return EINVAL;
774		}
775		ioctl = &dev->driver->ioctls[nr];
776		is_driver_ioctl = 1;
777	}
778	func = ioctl->func;
779
780	if (func == NULL) {
781		DRM_DEBUG("no function\n");
782		return EINVAL;
783	}
784
785	if (((ioctl->flags & DRM_ROOT_ONLY) && !DRM_SUSER(p)) ||
786	    ((ioctl->flags & DRM_AUTH) && !file_priv->authenticated) ||
787	    ((ioctl->flags & DRM_MASTER) && !file_priv->master))
788		return EACCES;
789
790	if (is_driver_ioctl) {
791		DRM_LOCK();
792		/* shared code returns -errno */
793		retcode = -func(dev, data, file_priv);
794		DRM_UNLOCK();
795	} else {
796		retcode = func(dev, data, file_priv);
797	}
798
799	if (retcode != 0)
800		DRM_DEBUG("    returning %d\n", retcode);
801
802	return retcode;
803}
804
805drm_local_map_t *drm_getsarea(struct drm_device *dev)
806{
807	drm_local_map_t *map;
808
809	DRM_SPINLOCK_ASSERT(&dev->dev_lock);
810	TAILQ_FOREACH(map, &dev->maplist, link) {
811		if (map->type == _DRM_SHM && (map->flags & _DRM_CONTAINS_LOCK))
812			return map;
813	}
814
815	return NULL;
816}
817
818#if DRM_LINUX
819
820#include <sys/sysproto.h>
821
822MODULE_DEPEND(DRIVER_NAME, linux, 1, 1, 1);
823
824#define LINUX_IOCTL_DRM_MIN		0x6400
825#define LINUX_IOCTL_DRM_MAX		0x64ff
826
827static linux_ioctl_function_t drm_linux_ioctl;
828static struct linux_ioctl_handler drm_handler = {drm_linux_ioctl,
829    LINUX_IOCTL_DRM_MIN, LINUX_IOCTL_DRM_MAX};
830
831SYSINIT(drm_register, SI_SUB_KLD, SI_ORDER_MIDDLE,
832    linux_ioctl_register_handler, &drm_handler);
833SYSUNINIT(drm_unregister, SI_SUB_KLD, SI_ORDER_MIDDLE,
834    linux_ioctl_unregister_handler, &drm_handler);
835
836/* The bits for in/out are switched on Linux */
837#define LINUX_IOC_IN	IOC_OUT
838#define LINUX_IOC_OUT	IOC_IN
839
840static int
841drm_linux_ioctl(DRM_STRUCTPROC *p, struct linux_ioctl_args* args)
842{
843	int error;
844	int cmd = args->cmd;
845
846	args->cmd &= ~(LINUX_IOC_IN | LINUX_IOC_OUT);
847	if (cmd & LINUX_IOC_IN)
848		args->cmd |= IOC_IN;
849	if (cmd & LINUX_IOC_OUT)
850		args->cmd |= IOC_OUT;
851
852	error = ioctl(p, (struct ioctl_args *)args);
853
854	return error;
855}
856#endif /* DRM_LINUX */
857