1/* radeon_drv.c -- ATI Radeon driver -*- linux-c -*-
2 *
3 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
4 * Copyright 2000 VA Linux Systems, Inc., Fremont, California.
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the "Software"),
9 * to deal in the Software without restriction, including without limitation
10 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
11 * and/or sell copies of the Software, and to permit persons to whom the
12 * Software is furnished to do so, subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the next
15 * paragraph) shall be included in all copies or substantial portions of the
16 * Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
21 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
22 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
23 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
24 * DEALINGS IN THE SOFTWARE.
25 *
26 * Authors: Kevin E. Martin <martin@valinux.com>
27 *          Rickard E. (Rik) Faith <faith@valinux.com>
28 *
29 */
30
31#include <linux/config.h>
32#include "drmP.h"
33#include "radeon_drv.h"
34
35#define RADEON_NAME		"radeon"
36#define RADEON_DESC		"ATI Radeon"
37#define RADEON_DATE		"20010105"
38#define RADEON_MAJOR		1
39#define RADEON_MINOR		0
40#define RADEON_PATCHLEVEL	0
41
42static drm_device_t	      radeon_device;
43drm_ctx_t	              radeon_res_ctx;
44
45static struct file_operations radeon_fops = {
46#if LINUX_VERSION_CODE >= 0x020400
47				/* This started being used during 2.4.0-test */
48	owner:   THIS_MODULE,
49#endif
50	open:	 radeon_open,
51	flush:	 drm_flush,
52	release: radeon_release,
53	ioctl:	 radeon_ioctl,
54	mmap:	 drm_mmap,
55	read:	 drm_read,
56	fasync:	 drm_fasync,
57	poll:	 drm_poll,
58};
59
60static struct miscdevice      radeon_misc = {
61	minor: MISC_DYNAMIC_MINOR,
62	name:  RADEON_NAME,
63	fops:  &radeon_fops,
64};
65
66static drm_ioctl_desc_t	      radeon_ioctls[] = {
67	[DRM_IOCTL_NR(DRM_IOCTL_VERSION)]       = { radeon_version,	0, 0 },
68	[DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE)]    = { drm_getunique,	0, 0 },
69	[DRM_IOCTL_NR(DRM_IOCTL_GET_MAGIC)]     = { drm_getmagic,	0, 0 },
70	[DRM_IOCTL_NR(DRM_IOCTL_IRQ_BUSID)]     = { drm_irq_busid,	0, 1 },
71
72	[DRM_IOCTL_NR(DRM_IOCTL_SET_UNIQUE)]    = { drm_setunique,	1, 1 },
73	[DRM_IOCTL_NR(DRM_IOCTL_BLOCK)]	        = { drm_block,		1, 1 },
74	[DRM_IOCTL_NR(DRM_IOCTL_UNBLOCK)]       = { drm_unblock,	1, 1 },
75	[DRM_IOCTL_NR(DRM_IOCTL_AUTH_MAGIC)]    = { drm_authmagic,	1, 1 },
76	[DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP)]       = { drm_addmap,		1, 1 },
77	[DRM_IOCTL_NR(DRM_IOCTL_ADD_BUFS)]      = { radeon_addbufs,	1, 1 },
78	[DRM_IOCTL_NR(DRM_IOCTL_MARK_BUFS)]     = { drm_markbufs,	1, 1 },
79	[DRM_IOCTL_NR(DRM_IOCTL_INFO_BUFS)]     = { drm_infobufs,	1, 0 },
80	[DRM_IOCTL_NR(DRM_IOCTL_MAP_BUFS)]      = { radeon_mapbufs,	1, 0 },
81	[DRM_IOCTL_NR(DRM_IOCTL_FREE_BUFS)]     = { drm_freebufs,	1, 0 },
82
83	[DRM_IOCTL_NR(DRM_IOCTL_ADD_CTX)]       = { radeon_addctx,	1, 1 },
84	[DRM_IOCTL_NR(DRM_IOCTL_RM_CTX)]        = { radeon_rmctx,	1, 1 },
85	[DRM_IOCTL_NR(DRM_IOCTL_MOD_CTX)]       = { radeon_modctx,	1, 1 },
86	[DRM_IOCTL_NR(DRM_IOCTL_GET_CTX)]       = { radeon_getctx,	1, 0 },
87	[DRM_IOCTL_NR(DRM_IOCTL_SWITCH_CTX)]    = { radeon_switchctx,	1, 1 },
88	[DRM_IOCTL_NR(DRM_IOCTL_NEW_CTX)]       = { radeon_newctx,	1, 1 },
89	[DRM_IOCTL_NR(DRM_IOCTL_RES_CTX)]       = { radeon_resctx,	1, 0 },
90	[DRM_IOCTL_NR(DRM_IOCTL_ADD_DRAW)]      = { drm_adddraw,	1, 1 },
91	[DRM_IOCTL_NR(DRM_IOCTL_RM_DRAW)]       = { drm_rmdraw,		1, 1 },
92	[DRM_IOCTL_NR(DRM_IOCTL_DMA)]	        = { radeon_cp_buffers,	1, 0 },
93	[DRM_IOCTL_NR(DRM_IOCTL_LOCK)]	        = { radeon_lock,	1, 0 },
94	[DRM_IOCTL_NR(DRM_IOCTL_UNLOCK)]        = { radeon_unlock,	1, 0 },
95	[DRM_IOCTL_NR(DRM_IOCTL_FINISH)]        = { drm_finish,		1, 0 },
96
97#if defined(CONFIG_AGP) || defined(CONFIG_AGP_MODULE)
98	[DRM_IOCTL_NR(DRM_IOCTL_AGP_ACQUIRE)]   = { drm_agp_acquire,	1, 1 },
99	[DRM_IOCTL_NR(DRM_IOCTL_AGP_RELEASE)]   = { drm_agp_release,	1, 1 },
100	[DRM_IOCTL_NR(DRM_IOCTL_AGP_ENABLE)]    = { drm_agp_enable,	1, 1 },
101	[DRM_IOCTL_NR(DRM_IOCTL_AGP_INFO)]      = { drm_agp_info,	1, 0 },
102	[DRM_IOCTL_NR(DRM_IOCTL_AGP_ALLOC)]     = { drm_agp_alloc,	1, 1 },
103	[DRM_IOCTL_NR(DRM_IOCTL_AGP_FREE)]      = { drm_agp_free,	1, 1 },
104	[DRM_IOCTL_NR(DRM_IOCTL_AGP_BIND)]      = { drm_agp_bind,	1, 1 },
105	[DRM_IOCTL_NR(DRM_IOCTL_AGP_UNBIND)]    = { drm_agp_unbind,	1, 1 },
106#endif
107
108	[DRM_IOCTL_NR(DRM_IOCTL_RADEON_CP_INIT)]  = { radeon_cp_init,   1, 1 },
109	[DRM_IOCTL_NR(DRM_IOCTL_RADEON_CP_START)] = { radeon_cp_start,  1, 1 },
110	[DRM_IOCTL_NR(DRM_IOCTL_RADEON_CP_STOP)]  = { radeon_cp_stop,   1, 1 },
111	[DRM_IOCTL_NR(DRM_IOCTL_RADEON_CP_RESET)] = { radeon_cp_reset,  1, 1 },
112	[DRM_IOCTL_NR(DRM_IOCTL_RADEON_CP_IDLE)]  = { radeon_cp_idle,   1, 0 },
113	[DRM_IOCTL_NR(DRM_IOCTL_RADEON_RESET)] = { radeon_engine_reset, 1, 0 },
114	[DRM_IOCTL_NR(DRM_IOCTL_RADEON_FULLSCREEN)] = { radeon_fullscreen, 1, 0 },
115
116	[DRM_IOCTL_NR(DRM_IOCTL_RADEON_SWAP)]    = { radeon_cp_swap,    1, 0 },
117	[DRM_IOCTL_NR(DRM_IOCTL_RADEON_CLEAR)]   = { radeon_cp_clear,   1, 0 },
118	[DRM_IOCTL_NR(DRM_IOCTL_RADEON_VERTEX)]  = { radeon_cp_vertex,  1, 0 },
119	[DRM_IOCTL_NR(DRM_IOCTL_RADEON_INDICES)] = { radeon_cp_indices, 1, 0 },
120	[DRM_IOCTL_NR(DRM_IOCTL_RADEON_BLIT)]    = { radeon_cp_blit,    1, 0 },
121	[DRM_IOCTL_NR(DRM_IOCTL_RADEON_STIPPLE)] = { radeon_cp_stipple, 1, 0 },
122	[DRM_IOCTL_NR(DRM_IOCTL_RADEON_INDIRECT)]= { radeon_cp_indirect,1, 1 },
123};
124#define RADEON_IOCTL_COUNT DRM_ARRAY_SIZE(radeon_ioctls)
125
126#ifdef MODULE
127static char		      *radeon = NULL;
128#endif
129
130MODULE_AUTHOR("VA Linux Systems, Inc.");
131MODULE_DESCRIPTION("radeon");
132MODULE_LICENSE("GPL and additional rights");
133MODULE_PARM(radeon, "s");
134
135#ifndef MODULE
136/* radeon_options is called by the kernel to parse command-line options
137 * passed via the boot-loader (e.g., LILO).  It calls the insmod option
138 * routine, drm_parse_drm.
139 */
140
141static int __init radeon_options(char *str)
142{
143	drm_parse_options(str);
144	return 1;
145}
146
147__setup("radeon=", radeon_options);
148#endif
149
150static int radeon_setup(drm_device_t *dev)
151{
152	int i;
153
154	atomic_set(&dev->ioctl_count, 0);
155	atomic_set(&dev->vma_count, 0);
156	dev->buf_use	  = 0;
157	atomic_set(&dev->buf_alloc, 0);
158
159	drm_dma_setup(dev);
160
161	atomic_set(&dev->total_open, 0);
162	atomic_set(&dev->total_close, 0);
163	atomic_set(&dev->total_ioctl, 0);
164	atomic_set(&dev->total_irq, 0);
165	atomic_set(&dev->total_ctx, 0);
166	atomic_set(&dev->total_locks, 0);
167	atomic_set(&dev->total_unlocks, 0);
168	atomic_set(&dev->total_contends, 0);
169	atomic_set(&dev->total_sleeps, 0);
170
171	for (i = 0; i < DRM_HASH_SIZE; i++) {
172		dev->magiclist[i].head = NULL;
173		dev->magiclist[i].tail = NULL;
174	}
175	dev->maplist	    = NULL;
176	dev->map_count	    = 0;
177	dev->vmalist	    = NULL;
178	dev->lock.hw_lock   = NULL;
179	init_waitqueue_head(&dev->lock.lock_queue);
180	dev->queue_count    = 0;
181	dev->queue_reserved = 0;
182	dev->queue_slots    = 0;
183	dev->queuelist	    = NULL;
184	dev->irq	    = 0;
185	dev->context_flag   = 0;
186	dev->interrupt_flag = 0;
187	dev->dma_flag	    = 0;
188	dev->last_context   = 0;
189	dev->last_switch    = 0;
190	dev->last_checked   = 0;
191	init_timer(&dev->timer);
192	init_waitqueue_head(&dev->context_wait);
193
194	dev->ctx_start	    = 0;
195	dev->lck_start	    = 0;
196
197	dev->buf_rp	    = dev->buf;
198	dev->buf_wp	    = dev->buf;
199	dev->buf_end	    = dev->buf + DRM_BSZ;
200	dev->buf_async	    = NULL;
201	init_waitqueue_head(&dev->buf_readers);
202	init_waitqueue_head(&dev->buf_writers);
203
204	radeon_res_ctx.handle = -1;
205
206	DRM_DEBUG("\n");
207
208	/* The kernel's context could be created here, but is now created
209	   in drm_dma_enqueue.	This is more resource-efficient for
210	   hardware that does not do DMA, but may mean that
211	   drm_select_queue fails between the time the interrupt is
212	   initialized and the time the queues are initialized. */
213
214	return 0;
215}
216
217
218static int radeon_takedown(drm_device_t *dev)
219{
220	int		  i;
221	drm_magic_entry_t *pt, *next;
222	drm_map_t	  *map;
223	drm_vma_entry_t	  *vma, *vma_next;
224
225	DRM_DEBUG("\n");
226
227	down(&dev->struct_sem);
228	del_timer(&dev->timer);
229
230	if (dev->devname) {
231		drm_free(dev->devname, strlen(dev->devname)+1, DRM_MEM_DRIVER);
232		dev->devname = NULL;
233	}
234
235	if (dev->unique) {
236		drm_free(dev->unique, strlen(dev->unique)+1, DRM_MEM_DRIVER);
237		dev->unique = NULL;
238		dev->unique_len = 0;
239	}
240				/* Clear pid list */
241	for (i = 0; i < DRM_HASH_SIZE; i++) {
242		for (pt = dev->magiclist[i].head; pt; pt = next) {
243			next = pt->next;
244			drm_free(pt, sizeof(*pt), DRM_MEM_MAGIC);
245		}
246		dev->magiclist[i].head = dev->magiclist[i].tail = NULL;
247	}
248
249#if defined(CONFIG_AGP) || defined(CONFIG_AGP_MODULE)
250				/* Clear AGP information */
251	if (dev->agp) {
252		drm_agp_mem_t *entry;
253		drm_agp_mem_t *nexte;
254
255				/* Remove AGP resources, but leave dev->agp
256                                   intact until radeon_cleanup is called. */
257		for (entry = dev->agp->memory; entry; entry = nexte) {
258			nexte = entry->next;
259			if (entry->bound) drm_unbind_agp(entry->memory);
260			drm_free_agp(entry->memory, entry->pages);
261			drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS);
262		}
263		dev->agp->memory = NULL;
264
265		if (dev->agp->acquired)	_drm_agp_release();
266
267		dev->agp->acquired = 0;
268		dev->agp->enabled  = 0;
269	}
270#endif
271
272				/* Clear vma list (only built for debugging) */
273	if (dev->vmalist) {
274		for (vma = dev->vmalist; vma; vma = vma_next) {
275			vma_next = vma->next;
276			drm_free(vma, sizeof(*vma), DRM_MEM_VMAS);
277		}
278		dev->vmalist = NULL;
279	}
280
281				/* Clear map area and mtrr information */
282	if (dev->maplist) {
283		for (i = 0; i < dev->map_count; i++) {
284			map = dev->maplist[i];
285			switch (map->type) {
286			case _DRM_REGISTERS:
287			case _DRM_FRAME_BUFFER:
288#ifdef CONFIG_MTRR
289				if (map->mtrr >= 0) {
290					int retcode;
291					retcode = mtrr_del(map->mtrr,
292							   map->offset,
293							   map->size);
294					DRM_DEBUG("mtrr_del = %d\n", retcode);
295				}
296#endif
297				drm_ioremapfree(map->handle, map->size);
298				break;
299			case _DRM_SHM:
300				drm_free_pages((unsigned long)map->handle,
301					       drm_order(map->size)
302					       - PAGE_SHIFT,
303					       DRM_MEM_SAREA);
304				break;
305			case _DRM_AGP:
306				/* Do nothing here, because this is all
307                                   handled in the AGP/GART driver. */
308				break;
309			}
310			drm_free(map, sizeof(*map), DRM_MEM_MAPS);
311		}
312		drm_free(dev->maplist,
313			 dev->map_count * sizeof(*dev->maplist),
314			 DRM_MEM_MAPS);
315		dev->maplist   = NULL;
316		dev->map_count = 0;
317	}
318
319	drm_dma_takedown(dev);
320
321	dev->queue_count     = 0;
322	if (dev->lock.hw_lock) {
323		dev->lock.hw_lock    = NULL; /* SHM removed */
324		dev->lock.pid	     = 0;
325		wake_up_interruptible(&dev->lock.lock_queue);
326	}
327	up(&dev->struct_sem);
328
329	return 0;
330}
331
332/* radeon_init is called via init_module at module load time, or via
333 * linux/init/main.c (this is not currently supported). */
334
335static int __init radeon_init(void)
336{
337	int		      retcode;
338	drm_device_t	      *dev = &radeon_device;
339
340	DRM_DEBUG("\n");
341
342	memset((void *)dev, 0, sizeof(*dev));
343	dev->count_lock	  = SPIN_LOCK_UNLOCKED;
344	sema_init(&dev->struct_sem, 1);
345
346#ifdef MODULE
347	drm_parse_options(radeon);
348#endif
349
350	if ((retcode = misc_register(&radeon_misc))) {
351		DRM_ERROR("Cannot register \"%s\"\n", RADEON_NAME);
352		return retcode;
353	}
354	dev->device = MKDEV(MISC_MAJOR, radeon_misc.minor);
355	dev->name   = RADEON_NAME;
356
357	drm_mem_init();
358	drm_proc_init(dev);
359
360#if defined(CONFIG_AGP) || defined(CONFIG_AGP_MODULE)
361	dev->agp    = drm_agp_init();
362      	if (dev->agp == NULL) {
363	   	DRM_ERROR("Cannot initialize agpgart module.\n");
364	   	drm_proc_cleanup();
365	   	misc_deregister(&radeon_misc);
366	   	radeon_takedown(dev);
367	   	return -ENOMEM;
368	}
369
370#ifdef CONFIG_MTRR
371	dev->agp->agp_mtrr = mtrr_add(dev->agp->agp_info.aper_base,
372				      dev->agp->agp_info.aper_size*1024*1024,
373				      MTRR_TYPE_WRCOMB,
374				      1);
375#endif
376#endif
377
378	if((retcode = drm_ctxbitmap_init(dev))) {
379		DRM_ERROR("Cannot allocate memory for context bitmap.\n");
380		drm_proc_cleanup();
381		misc_deregister(&radeon_misc);
382		radeon_takedown(dev);
383		return retcode;
384	}
385
386	DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
387		 RADEON_NAME,
388		 RADEON_MAJOR,
389		 RADEON_MINOR,
390		 RADEON_PATCHLEVEL,
391		 RADEON_DATE,
392		 radeon_misc.minor);
393
394	return 0;
395}
396
397/* radeon_cleanup is called via cleanup_module at module unload time. */
398
399static void __exit radeon_cleanup(void)
400{
401	drm_device_t	      *dev = &radeon_device;
402
403	DRM_DEBUG("\n");
404
405	drm_proc_cleanup();
406	if (misc_deregister(&radeon_misc)) {
407		DRM_ERROR("Cannot unload module\n");
408	} else {
409		DRM_INFO("Module unloaded\n");
410	}
411	drm_ctxbitmap_cleanup(dev);
412	radeon_takedown(dev);
413#if defined(CONFIG_AGP) || defined(CONFIG_AGP_MODULE)
414	if (dev->agp) {
415		drm_agp_uninit();
416		drm_free(dev->agp, sizeof(*dev->agp), DRM_MEM_AGPLISTS);
417		dev->agp = NULL;
418	}
419#endif
420}
421
422module_init(radeon_init);
423module_exit(radeon_cleanup);
424
425
426int radeon_version(struct inode *inode, struct file *filp, unsigned int cmd,
427		   unsigned long arg)
428{
429	drm_version_t version;
430	int	      len;
431
432	if (copy_from_user(&version,
433			   (drm_version_t *)arg,
434			   sizeof(version)))
435		return -EFAULT;
436
437#define DRM_COPY(name,value)				     \
438	len = strlen(value);				     \
439	if (len > name##_len) len = name##_len;		     \
440	name##_len = strlen(value);			     \
441	if (len && name) {				     \
442		if (copy_to_user(name, value, len))	     \
443			return -EFAULT;			     \
444	}
445
446	version.version_major	   = RADEON_MAJOR;
447	version.version_minor	   = RADEON_MINOR;
448	version.version_patchlevel = RADEON_PATCHLEVEL;
449
450	DRM_COPY(version.name, RADEON_NAME);
451	DRM_COPY(version.date, RADEON_DATE);
452	DRM_COPY(version.desc, RADEON_DESC);
453
454	if (copy_to_user((drm_version_t *)arg,
455			 &version,
456			 sizeof(version)))
457		return -EFAULT;
458	return 0;
459}
460
461int radeon_open(struct inode *inode, struct file *filp)
462{
463	drm_device_t  *dev    = &radeon_device;
464	int	      retcode = 0;
465
466	DRM_DEBUG("open_count = %d\n", dev->open_count);
467	if (!(retcode = drm_open_helper(inode, filp, dev))) {
468#if LINUX_VERSION_CODE < 0x020333
469		MOD_INC_USE_COUNT; /* Needed before Linux 2.3.51 */
470#endif
471		atomic_inc(&dev->total_open);
472		spin_lock(&dev->count_lock);
473		if (!dev->open_count++) {
474			spin_unlock(&dev->count_lock);
475			return radeon_setup(dev);
476		}
477		spin_unlock(&dev->count_lock);
478	}
479
480	return retcode;
481}
482
483int radeon_release(struct inode *inode, struct file *filp)
484{
485	drm_file_t    *priv   = filp->private_data;
486	drm_device_t  *dev;
487	int	      retcode = 0;
488
489	lock_kernel();
490	dev = priv->dev;
491
492	DRM_DEBUG("open_count = %d\n", dev->open_count);
493
494	/* Force the cleanup of page flipping when required */
495	if ( dev->dev_private ) {
496		drm_radeon_private_t *dev_priv = dev->dev_private;
497		if ( dev_priv->page_flipping ) {
498			radeon_do_cleanup_pageflip( dev );
499		}
500	}
501
502	if (!(retcode = drm_release(inode, filp))) {
503#if LINUX_VERSION_CODE < 0x020333
504		MOD_DEC_USE_COUNT; /* Needed before Linux 2.3.51 */
505#endif
506		atomic_inc(&dev->total_close);
507		spin_lock(&dev->count_lock);
508		if (!--dev->open_count) {
509			if (atomic_read(&dev->ioctl_count) || dev->blocked) {
510				DRM_ERROR("Device busy: %d %d\n",
511					  atomic_read(&dev->ioctl_count),
512					  dev->blocked);
513				spin_unlock(&dev->count_lock);
514				unlock_kernel();
515				return -EBUSY;
516			}
517			spin_unlock(&dev->count_lock);
518			unlock_kernel();
519			return radeon_takedown(dev);
520		}
521		spin_unlock(&dev->count_lock);
522	}
523
524	unlock_kernel();
525	return retcode;
526}
527
528/* radeon_ioctl is called whenever a process performs an ioctl on /dev/drm. */
529
530int radeon_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
531		 unsigned long arg)
532{
533	int		 nr	 = DRM_IOCTL_NR(cmd);
534	drm_file_t	 *priv	 = filp->private_data;
535	drm_device_t	 *dev	 = priv->dev;
536	int		 retcode = 0;
537	drm_ioctl_desc_t *ioctl;
538	drm_ioctl_t	 *func;
539
540	atomic_inc(&dev->ioctl_count);
541	atomic_inc(&dev->total_ioctl);
542	++priv->ioctl_count;
543
544	DRM_DEBUG("pid = %d, cmd = 0x%02x, nr = 0x%02x, dev 0x%x, auth = %d\n",
545		  current->pid, cmd, nr, dev->device, priv->authenticated);
546
547	if (nr >= RADEON_IOCTL_COUNT) {
548		retcode = -EINVAL;
549	} else {
550		ioctl	  = &radeon_ioctls[nr];
551		func	  = ioctl->func;
552
553		if (!func) {
554			DRM_DEBUG("no function\n");
555			retcode = -EINVAL;
556		} else if ((ioctl->root_only && !capable(CAP_SYS_ADMIN))
557			    || (ioctl->auth_needed && !priv->authenticated)) {
558			retcode = -EACCES;
559		} else {
560			retcode = (func)(inode, filp, cmd, arg);
561		}
562	}
563
564	atomic_dec(&dev->ioctl_count);
565	return retcode;
566}
567
568int radeon_lock(struct inode *inode, struct file *filp, unsigned int cmd,
569		unsigned long arg)
570{
571        drm_file_t        *priv   = filp->private_data;
572        drm_device_t      *dev    = priv->dev;
573        DECLARE_WAITQUEUE(entry, current);
574        int               ret   = 0;
575        drm_lock_t        lock;
576#if DRM_DMA_HISTOGRAM
577        cycles_t          start;
578
579        dev->lck_start = start = get_cycles();
580#endif
581
582        if (copy_from_user(&lock, (drm_lock_t *)arg, sizeof(lock)))
583		return -EFAULT;
584
585        if (lock.context == DRM_KERNEL_CONTEXT) {
586                DRM_ERROR("Process %d using kernel context %d\n",
587                          current->pid, lock.context);
588                return -EINVAL;
589        }
590
591        DRM_DEBUG("%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n",
592                  lock.context, current->pid, dev->lock.hw_lock->lock,
593                  lock.flags);
594
595        if (lock.context < 0 /* || lock.context >= dev->queue_count */)
596                return -EINVAL;
597
598        if (!ret) {
599                add_wait_queue(&dev->lock.lock_queue, &entry);
600                for (;;) {
601                        current->state = TASK_INTERRUPTIBLE;
602                        if (!dev->lock.hw_lock) {
603                                /* Device has been unregistered */
604                                ret = -EINTR;
605                                break;
606                        }
607                        if (drm_lock_take(&dev->lock.hw_lock->lock,
608                                          lock.context)) {
609                                dev->lock.pid       = current->pid;
610                                dev->lock.lock_time = jiffies;
611                                atomic_inc(&dev->total_locks);
612                                break;  /* Got lock */
613                        }
614
615                                /* Contention */
616                        atomic_inc(&dev->total_sleeps);
617                        schedule();
618                        if (signal_pending(current)) {
619                                ret = -ERESTARTSYS;
620                                break;
621                        }
622                }
623                current->state = TASK_RUNNING;
624                remove_wait_queue(&dev->lock.lock_queue, &entry);
625        }
626
627        if (!ret) {
628		sigemptyset(&dev->sigmask);
629		sigaddset(&dev->sigmask, SIGSTOP);
630		sigaddset(&dev->sigmask, SIGTSTP);
631		sigaddset(&dev->sigmask, SIGTTIN);
632		sigaddset(&dev->sigmask, SIGTTOU);
633		dev->sigdata.context = lock.context;
634		dev->sigdata.lock    = dev->lock.hw_lock;
635		block_all_signals(drm_notifier, &dev->sigdata, &dev->sigmask);
636                if (lock.flags & _DRM_LOCK_READY) {
637				/* Wait for space in DMA/FIFO */
638		}
639                if (lock.flags & _DRM_LOCK_QUIESCENT) {
640				/* Make hardware quiescent */
641			DRM_DEBUG("not quiescent!\n");
642		}
643        }
644
645#if LINUX_VERSION_CODE < 0x020400
646	if (lock.context != radeon_res_ctx.handle) {
647		current->counter = 5;
648		current->priority = DEF_PRIORITY/4;
649	}
650#endif
651        DRM_DEBUG("%d %s\n", lock.context, ret ? "interrupted" : "has lock");
652
653#if DRM_DMA_HISTOGRAM
654        atomic_inc(&dev->histo.lacq[drm_histogram_slot(get_cycles() - start)]);
655#endif
656
657        return ret;
658}
659
660
661int radeon_unlock(struct inode *inode, struct file *filp, unsigned int cmd,
662		  unsigned long arg)
663{
664	drm_file_t	  *priv	  = filp->private_data;
665	drm_device_t	  *dev	  = priv->dev;
666	drm_lock_t	  lock;
667
668	if (copy_from_user(&lock, (drm_lock_t *)arg, sizeof(lock)))
669		return -EFAULT;
670
671	if (lock.context == DRM_KERNEL_CONTEXT) {
672		DRM_ERROR("Process %d using kernel context %d\n",
673			  current->pid, lock.context);
674		return -EINVAL;
675	}
676
677	DRM_DEBUG("%d frees lock (%d holds)\n",
678		  lock.context,
679		  _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
680	atomic_inc(&dev->total_unlocks);
681	if (_DRM_LOCK_IS_CONT(dev->lock.hw_lock->lock))
682		atomic_inc(&dev->total_contends);
683	drm_lock_transfer(dev, &dev->lock.hw_lock->lock, DRM_KERNEL_CONTEXT);
684	if (!dev->context_flag) {
685		if (drm_lock_free(dev, &dev->lock.hw_lock->lock,
686				  DRM_KERNEL_CONTEXT)) {
687			DRM_ERROR("\n");
688		}
689	}
690
691#if LINUX_VERSION_CODE < 0x020400
692	if (lock.context != radeon_res_ctx.handle) {
693		current->counter = 5;
694		current->priority = DEF_PRIORITY;
695	}
696#endif
697	unblock_all_signals();
698	return 0;
699}
700