1/* tdfx_drv.c -- tdfx driver -*- linux-c -*-
2 * Created: Thu Oct  7 10:38:32 1999 by faith@precisioninsight.com
3 *
4 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
6 * All Rights Reserved.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
22 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE.
26 *
27 * Authors:
28 *    Rickard E. (Rik) Faith <faith@valinux.com>
29 *    Daryll Strauss <daryll@valinux.com>
30 *
31 */
32
33#include <linux/config.h>
34#include "drmP.h"
35#include "tdfx_drv.h"
36
37#define TDFX_NAME	 "tdfx"
38#define TDFX_DESC	 "3dfx Banshee/Voodoo3+"
39#define TDFX_DATE	 "20000928"
40#define TDFX_MAJOR	 1
41#define TDFX_MINOR	 0
42#define TDFX_PATCHLEVEL  0
43
44static drm_device_t	      tdfx_device;
45drm_ctx_t	              tdfx_res_ctx;
46
47static struct file_operations tdfx_fops = {
48#if LINUX_VERSION_CODE >= 0x020400
49				/* This started being used during 2.4.0-test */
50	owner:   THIS_MODULE,
51#endif
52	open:	 tdfx_open,
53	flush:	 drm_flush,
54	release: tdfx_release,
55	ioctl:	 tdfx_ioctl,
56	mmap:	 drm_mmap,
57	read:	 drm_read,
58	fasync:	 drm_fasync,
59	poll:	 drm_poll,
60};
61
62static struct miscdevice      tdfx_misc = {
63	minor: MISC_DYNAMIC_MINOR,
64	name:  TDFX_NAME,
65	fops:  &tdfx_fops,
66};
67
68static drm_ioctl_desc_t	      tdfx_ioctls[] = {
69	[DRM_IOCTL_NR(DRM_IOCTL_VERSION)]    = { tdfx_version,	  0, 0 },
70	[DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE)] = { drm_getunique,	  0, 0 },
71	[DRM_IOCTL_NR(DRM_IOCTL_GET_MAGIC)]  = { drm_getmagic,	  0, 0 },
72	[DRM_IOCTL_NR(DRM_IOCTL_IRQ_BUSID)]  = { drm_irq_busid,	  0, 1 },
73
74	[DRM_IOCTL_NR(DRM_IOCTL_SET_UNIQUE)] = { drm_setunique,	  1, 1 },
75	[DRM_IOCTL_NR(DRM_IOCTL_BLOCK)]	     = { drm_block,	  1, 1 },
76	[DRM_IOCTL_NR(DRM_IOCTL_UNBLOCK)]    = { drm_unblock,	  1, 1 },
77	[DRM_IOCTL_NR(DRM_IOCTL_AUTH_MAGIC)] = { drm_authmagic,	  1, 1 },
78	[DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP)]    = { drm_addmap,	  1, 1 },
79
80	[DRM_IOCTL_NR(DRM_IOCTL_ADD_CTX)]    = { tdfx_addctx,	  1, 1 },
81	[DRM_IOCTL_NR(DRM_IOCTL_RM_CTX)]     = { tdfx_rmctx,	  1, 1 },
82	[DRM_IOCTL_NR(DRM_IOCTL_MOD_CTX)]    = { tdfx_modctx,	  1, 1 },
83	[DRM_IOCTL_NR(DRM_IOCTL_GET_CTX)]    = { tdfx_getctx,	  1, 0 },
84	[DRM_IOCTL_NR(DRM_IOCTL_SWITCH_CTX)] = { tdfx_switchctx,  1, 1 },
85	[DRM_IOCTL_NR(DRM_IOCTL_NEW_CTX)]    = { tdfx_newctx,	  1, 1 },
86	[DRM_IOCTL_NR(DRM_IOCTL_RES_CTX)]    = { tdfx_resctx,	  1, 0 },
87	[DRM_IOCTL_NR(DRM_IOCTL_ADD_DRAW)]   = { drm_adddraw,	  1, 1 },
88	[DRM_IOCTL_NR(DRM_IOCTL_RM_DRAW)]    = { drm_rmdraw,	  1, 1 },
89	[DRM_IOCTL_NR(DRM_IOCTL_LOCK)]	     = { tdfx_lock,	  1, 0 },
90	[DRM_IOCTL_NR(DRM_IOCTL_UNLOCK)]     = { tdfx_unlock,	  1, 0 },
91	[DRM_IOCTL_NR(DRM_IOCTL_FINISH)]     = { drm_finish,	  1, 0 },
92#if defined(CONFIG_AGP) || defined(CONFIG_AGP_MODULE)
93	[DRM_IOCTL_NR(DRM_IOCTL_AGP_ACQUIRE)]   = {drm_agp_acquire, 1, 1},
94	[DRM_IOCTL_NR(DRM_IOCTL_AGP_RELEASE)]   = {drm_agp_release, 1, 1},
95	[DRM_IOCTL_NR(DRM_IOCTL_AGP_ENABLE)]    = {drm_agp_enable,  1, 1},
96	[DRM_IOCTL_NR(DRM_IOCTL_AGP_INFO)]      = {drm_agp_info,    1, 1},
97	[DRM_IOCTL_NR(DRM_IOCTL_AGP_ALLOC)]     = {drm_agp_alloc,   1, 1},
98	[DRM_IOCTL_NR(DRM_IOCTL_AGP_FREE)]      = {drm_agp_free,    1, 1},
99	[DRM_IOCTL_NR(DRM_IOCTL_AGP_BIND)]      = {drm_agp_unbind,  1, 1},
100	[DRM_IOCTL_NR(DRM_IOCTL_AGP_UNBIND)]    = {drm_agp_bind,    1, 1},
101#endif
102};
103#define TDFX_IOCTL_COUNT DRM_ARRAY_SIZE(tdfx_ioctls)
104
105#ifdef MODULE
106static char		      *tdfx = NULL;
107#endif
108
109MODULE_AUTHOR("VA Linux Systems, Inc.");
110MODULE_LICENSE("GPL and additional rights");
111MODULE_DESCRIPTION("tdfx");
112MODULE_PARM(tdfx, "s");
113
114#ifndef MODULE
115/* tdfx_options is called by the kernel to parse command-line options
116 * passed via the boot-loader (e.g., LILO).  It calls the insmod option
117 * routine, drm_parse_drm.
118 */
119
120static int __init tdfx_options(char *str)
121{
122	drm_parse_options(str);
123	return 1;
124}
125
126__setup("tdfx=", tdfx_options);
127#endif
128
129static int tdfx_setup(drm_device_t *dev)
130{
131	int i;
132
133	atomic_set(&dev->ioctl_count, 0);
134	atomic_set(&dev->vma_count, 0);
135	dev->buf_use	  = 0;
136	atomic_set(&dev->buf_alloc, 0);
137
138	atomic_set(&dev->total_open, 0);
139	atomic_set(&dev->total_close, 0);
140	atomic_set(&dev->total_ioctl, 0);
141	atomic_set(&dev->total_irq, 0);
142	atomic_set(&dev->total_ctx, 0);
143	atomic_set(&dev->total_locks, 0);
144	atomic_set(&dev->total_unlocks, 0);
145	atomic_set(&dev->total_contends, 0);
146	atomic_set(&dev->total_sleeps, 0);
147
148	for (i = 0; i < DRM_HASH_SIZE; i++) {
149		dev->magiclist[i].head = NULL;
150		dev->magiclist[i].tail = NULL;
151	}
152	dev->maplist	    = NULL;
153	dev->map_count	    = 0;
154	dev->vmalist	    = NULL;
155	dev->lock.hw_lock   = NULL;
156	init_waitqueue_head(&dev->lock.lock_queue);
157	dev->queue_count    = 0;
158	dev->queue_reserved = 0;
159	dev->queue_slots    = 0;
160	dev->queuelist	    = NULL;
161	dev->irq	    = 0;
162	dev->context_flag   = 0;
163	dev->interrupt_flag = 0;
164	dev->dma            = 0;
165	dev->dma_flag	    = 0;
166	dev->last_context   = 0;
167	dev->last_switch    = 0;
168	dev->last_checked   = 0;
169	init_timer(&dev->timer);
170	init_waitqueue_head(&dev->context_wait);
171
172	dev->ctx_start	    = 0;
173	dev->lck_start	    = 0;
174
175	dev->buf_rp	  = dev->buf;
176	dev->buf_wp	  = dev->buf;
177	dev->buf_end	  = dev->buf + DRM_BSZ;
178	dev->buf_async	  = NULL;
179	init_waitqueue_head(&dev->buf_readers);
180	init_waitqueue_head(&dev->buf_writers);
181
182	tdfx_res_ctx.handle=-1;
183
184	DRM_DEBUG("\n");
185
186	/* The kernel's context could be created here, but is now created
187	   in drm_dma_enqueue.	This is more resource-efficient for
188	   hardware that does not do DMA, but may mean that
189	   drm_select_queue fails between the time the interrupt is
190	   initialized and the time the queues are initialized. */
191
192	return 0;
193}
194
195
196static int tdfx_takedown(drm_device_t *dev)
197{
198	int		  i;
199	drm_magic_entry_t *pt, *next;
200	drm_map_t	  *map;
201	drm_vma_entry_t	  *vma, *vma_next;
202
203	DRM_DEBUG("\n");
204
205	down(&dev->struct_sem);
206	del_timer(&dev->timer);
207
208	if (dev->devname) {
209		drm_free(dev->devname, strlen(dev->devname)+1, DRM_MEM_DRIVER);
210		dev->devname = NULL;
211	}
212
213	if (dev->unique) {
214		drm_free(dev->unique, strlen(dev->unique)+1, DRM_MEM_DRIVER);
215		dev->unique = NULL;
216		dev->unique_len = 0;
217	}
218				/* Clear pid list */
219	for (i = 0; i < DRM_HASH_SIZE; i++) {
220		for (pt = dev->magiclist[i].head; pt; pt = next) {
221			next = pt->next;
222			drm_free(pt, sizeof(*pt), DRM_MEM_MAGIC);
223		}
224		dev->magiclist[i].head = dev->magiclist[i].tail = NULL;
225	}
226#if defined(CONFIG_AGP) || defined(CONFIG_AGP_MODULE)
227				/* Clear AGP information */
228	if (dev->agp) {
229		drm_agp_mem_t *temp;
230		drm_agp_mem_t *temp_next;
231
232		temp = dev->agp->memory;
233		while(temp != NULL) {
234			temp_next = temp->next;
235			drm_free_agp(temp->memory, temp->pages);
236			drm_free(temp, sizeof(*temp), DRM_MEM_AGPLISTS);
237			temp = temp_next;
238		}
239		if (dev->agp->acquired) _drm_agp_release();
240	}
241#endif
242				/* Clear vma list (only built for debugging) */
243	if (dev->vmalist) {
244		for (vma = dev->vmalist; vma; vma = vma_next) {
245			vma_next = vma->next;
246			drm_free(vma, sizeof(*vma), DRM_MEM_VMAS);
247		}
248		dev->vmalist = NULL;
249	}
250
251				/* Clear map area and mtrr information */
252	if (dev->maplist) {
253		for (i = 0; i < dev->map_count; i++) {
254			map = dev->maplist[i];
255			switch (map->type) {
256			case _DRM_REGISTERS:
257			case _DRM_FRAME_BUFFER:
258#ifdef CONFIG_MTRR
259				if (map->mtrr >= 0) {
260					int retcode;
261					retcode = mtrr_del(map->mtrr,
262							   map->offset,
263							   map->size);
264					DRM_DEBUG("mtrr_del = %d\n", retcode);
265				}
266#endif
267				drm_ioremapfree(map->handle, map->size);
268				break;
269			case _DRM_SHM:
270				drm_free_pages((unsigned long)map->handle,
271					       drm_order(map->size)
272					       - PAGE_SHIFT,
273					       DRM_MEM_SAREA);
274				break;
275			case _DRM_AGP:
276				/* Do nothing here, because this is all
277                                   handled in the AGP/GART driver. */
278				break;
279			}
280			drm_free(map, sizeof(*map), DRM_MEM_MAPS);
281		}
282		drm_free(dev->maplist,
283			 dev->map_count * sizeof(*dev->maplist),
284			 DRM_MEM_MAPS);
285		dev->maplist   = NULL;
286		dev->map_count = 0;
287	}
288
289	if (dev->lock.hw_lock) {
290		dev->lock.hw_lock    = NULL; /* SHM removed */
291		dev->lock.pid	     = 0;
292		wake_up_interruptible(&dev->lock.lock_queue);
293	}
294	up(&dev->struct_sem);
295
296	return 0;
297}
298
299/* tdfx_init is called via init_module at module load time, or via
300 * linux/init/main.c (this is not currently supported). */
301
302static int __init tdfx_init(void)
303{
304	int		      retcode;
305	drm_device_t	      *dev = &tdfx_device;
306
307	DRM_DEBUG("\n");
308
309	memset((void *)dev, 0, sizeof(*dev));
310	dev->count_lock	  = SPIN_LOCK_UNLOCKED;
311	sema_init(&dev->struct_sem, 1);
312
313#ifdef MODULE
314	drm_parse_options(tdfx);
315#endif
316
317	if ((retcode = misc_register(&tdfx_misc))) {
318		DRM_ERROR("Cannot register \"%s\"\n", TDFX_NAME);
319		return retcode;
320	}
321	dev->device = MKDEV(MISC_MAJOR, tdfx_misc.minor);
322	dev->name   = TDFX_NAME;
323
324	drm_mem_init();
325	drm_proc_init(dev);
326#if defined(CONFIG_AGP) || defined(CONFIG_AGP_MODULE)
327	dev->agp    = drm_agp_init();
328#endif
329	if((retcode = drm_ctxbitmap_init(dev))) {
330		DRM_ERROR("Cannot allocate memory for context bitmap.\n");
331		drm_proc_cleanup();
332		misc_deregister(&tdfx_misc);
333		tdfx_takedown(dev);
334		return retcode;
335	}
336
337	DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
338		 TDFX_NAME,
339		 TDFX_MAJOR,
340		 TDFX_MINOR,
341		 TDFX_PATCHLEVEL,
342		 TDFX_DATE,
343		 tdfx_misc.minor);
344
345	return 0;
346}
347
348/* tdfx_cleanup is called via cleanup_module at module unload time. */
349
350static void __exit tdfx_cleanup(void)
351{
352	drm_device_t	      *dev = &tdfx_device;
353
354	DRM_DEBUG("\n");
355
356	drm_proc_cleanup();
357	if (misc_deregister(&tdfx_misc)) {
358		DRM_ERROR("Cannot unload module\n");
359	} else {
360		DRM_INFO("Module unloaded\n");
361	}
362	drm_ctxbitmap_cleanup(dev);
363	tdfx_takedown(dev);
364#if defined(CONFIG_AGP) || defined(CONFIG_AGP_MODULE)
365	if (dev->agp) {
366		drm_agp_uninit();
367		drm_free(dev->agp, sizeof(*dev->agp), DRM_MEM_AGPLISTS);
368		dev->agp = NULL;
369	}
370#endif
371}
372
373module_init(tdfx_init);
374module_exit(tdfx_cleanup);
375
376
377int tdfx_version(struct inode *inode, struct file *filp, unsigned int cmd,
378		  unsigned long arg)
379{
380	drm_version_t version;
381	int	      len;
382
383	if (copy_from_user(&version,
384			   (drm_version_t *)arg,
385			   sizeof(version)))
386		return -EFAULT;
387
388#define DRM_COPY(name,value)				     \
389	len = strlen(value);				     \
390	if (len > name##_len) len = name##_len;		     \
391	name##_len = strlen(value);			     \
392	if (len && name) {				     \
393		if (copy_to_user(name, value, len))	     \
394			return -EFAULT;			     \
395	}
396
397	version.version_major	   = TDFX_MAJOR;
398	version.version_minor	   = TDFX_MINOR;
399	version.version_patchlevel = TDFX_PATCHLEVEL;
400
401	DRM_COPY(version.name, TDFX_NAME);
402	DRM_COPY(version.date, TDFX_DATE);
403	DRM_COPY(version.desc, TDFX_DESC);
404
405	if (copy_to_user((drm_version_t *)arg,
406			 &version,
407			 sizeof(version)))
408		return -EFAULT;
409	return 0;
410}
411
412int tdfx_open(struct inode *inode, struct file *filp)
413{
414	drm_device_t  *dev    = &tdfx_device;
415	int	      retcode = 0;
416
417	DRM_DEBUG("open_count = %d\n", dev->open_count);
418	if (!(retcode = drm_open_helper(inode, filp, dev))) {
419#if LINUX_VERSION_CODE < 0x020333
420		MOD_INC_USE_COUNT; /* Needed before Linux 2.3.51 */
421#endif
422		atomic_inc(&dev->total_open);
423		spin_lock(&dev->count_lock);
424		if (!dev->open_count++) {
425			spin_unlock(&dev->count_lock);
426			return tdfx_setup(dev);
427		}
428		spin_unlock(&dev->count_lock);
429	}
430	return retcode;
431}
432
433int tdfx_release(struct inode *inode, struct file *filp)
434{
435	drm_file_t    *priv   = filp->private_data;
436	drm_device_t  *dev;
437	int	      retcode = 0;
438
439	lock_kernel();
440	dev = priv->dev;
441
442	DRM_DEBUG("open_count = %d\n", dev->open_count);
443	if (!(retcode = drm_release(inode, filp))) {
444#if LINUX_VERSION_CODE < 0x020333
445		MOD_DEC_USE_COUNT; /* Needed before Linux 2.3.51 */
446#endif
447		atomic_inc(&dev->total_close);
448		spin_lock(&dev->count_lock);
449		if (!--dev->open_count) {
450			if (atomic_read(&dev->ioctl_count) || dev->blocked) {
451				DRM_ERROR("Device busy: %d %d\n",
452					  atomic_read(&dev->ioctl_count),
453					  dev->blocked);
454				spin_unlock(&dev->count_lock);
455				unlock_kernel();
456				return -EBUSY;
457			}
458			spin_unlock(&dev->count_lock);
459			unlock_kernel();
460			return tdfx_takedown(dev);
461		}
462		spin_unlock(&dev->count_lock);
463	}
464
465	unlock_kernel();
466	return retcode;
467}
468
469/* tdfx_ioctl is called whenever a process performs an ioctl on /dev/drm. */
470
471int tdfx_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
472		unsigned long arg)
473{
474	int		 nr	 = DRM_IOCTL_NR(cmd);
475	drm_file_t	 *priv	 = filp->private_data;
476	drm_device_t	 *dev	 = priv->dev;
477	int		 retcode = 0;
478	drm_ioctl_desc_t *ioctl;
479	drm_ioctl_t	 *func;
480
481	atomic_inc(&dev->ioctl_count);
482	atomic_inc(&dev->total_ioctl);
483	++priv->ioctl_count;
484
485	DRM_DEBUG("pid = %d, cmd = 0x%02x, nr = 0x%02x, dev 0x%x, auth = %d\n",
486		  current->pid, cmd, nr, dev->device, priv->authenticated);
487
488	if (nr >= TDFX_IOCTL_COUNT) {
489		retcode = -EINVAL;
490	} else {
491		ioctl	  = &tdfx_ioctls[nr];
492		func	  = ioctl->func;
493
494		if (!func) {
495			DRM_DEBUG("no function\n");
496			retcode = -EINVAL;
497		} else if ((ioctl->root_only && !capable(CAP_SYS_ADMIN))
498			    || (ioctl->auth_needed && !priv->authenticated)) {
499			retcode = -EACCES;
500		} else {
501			retcode = (func)(inode, filp, cmd, arg);
502		}
503	}
504
505	atomic_dec(&dev->ioctl_count);
506	return retcode;
507}
508
509int tdfx_lock(struct inode *inode, struct file *filp, unsigned int cmd,
510	      unsigned long arg)
511{
512        drm_file_t        *priv   = filp->private_data;
513        drm_device_t      *dev    = priv->dev;
514        DECLARE_WAITQUEUE(entry, current);
515        int               ret   = 0;
516        drm_lock_t        lock;
517#if DRM_DMA_HISTOGRAM
518        cycles_t          start;
519
520        dev->lck_start = start = get_cycles();
521#endif
522
523        if (copy_from_user(&lock, (drm_lock_t *)arg, sizeof(lock)))
524		return -EFAULT;
525
526        if (lock.context == DRM_KERNEL_CONTEXT) {
527                DRM_ERROR("Process %d using kernel context %d\n",
528                          current->pid, lock.context);
529                return -EINVAL;
530        }
531
532        DRM_DEBUG("%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n",
533                  lock.context, current->pid, dev->lock.hw_lock->lock,
534                  lock.flags);
535
536
537        if (!ret) {
538                add_wait_queue(&dev->lock.lock_queue, &entry);
539                for (;;) {
540                        current->state = TASK_INTERRUPTIBLE;
541                        if (!dev->lock.hw_lock) {
542                                /* Device has been unregistered */
543                                ret = -EINTR;
544                                break;
545                        }
546                        if (drm_lock_take(&dev->lock.hw_lock->lock,
547                                          lock.context)) {
548                                dev->lock.pid       = current->pid;
549                                dev->lock.lock_time = jiffies;
550                                atomic_inc(&dev->total_locks);
551                                break;  /* Got lock */
552                        }
553
554                                /* Contention */
555                        atomic_inc(&dev->total_sleeps);
556			yield();
557                        if (signal_pending(current)) {
558                                ret = -ERESTARTSYS;
559                                break;
560                        }
561                }
562                current->state = TASK_RUNNING;
563                remove_wait_queue(&dev->lock.lock_queue, &entry);
564        }
565
566
567        if (!ret) {
568		sigemptyset(&dev->sigmask);
569		sigaddset(&dev->sigmask, SIGSTOP);
570		sigaddset(&dev->sigmask, SIGTSTP);
571		sigaddset(&dev->sigmask, SIGTTIN);
572		sigaddset(&dev->sigmask, SIGTTOU);
573		dev->sigdata.context = lock.context;
574		dev->sigdata.lock    = dev->lock.hw_lock;
575		block_all_signals(drm_notifier, &dev->sigdata, &dev->sigmask);
576
577                if (lock.flags & _DRM_LOCK_READY) {
578				/* Wait for space in DMA/FIFO */
579		}
580                if (lock.flags & _DRM_LOCK_QUIESCENT) {
581				/* Make hardware quiescent */
582		}
583        }
584
585#if LINUX_VERSION_CODE < 0x020400
586	if (lock.context != tdfx_res_ctx.handle) {
587		current->counter = 5;
588		current->priority = DEF_PRIORITY/4;
589	}
590#endif
591        DRM_DEBUG("%d %s\n", lock.context, ret ? "interrupted" : "has lock");
592
593#if DRM_DMA_HISTOGRAM
594        atomic_inc(&dev->histo.lacq[drm_histogram_slot(get_cycles() - start)]);
595#endif
596
597        return ret;
598}
599
600
601int tdfx_unlock(struct inode *inode, struct file *filp, unsigned int cmd,
602		 unsigned long arg)
603{
604	drm_file_t	  *priv	  = filp->private_data;
605	drm_device_t	  *dev	  = priv->dev;
606	drm_lock_t	  lock;
607
608	if (copy_from_user(&lock, (drm_lock_t *)arg, sizeof(lock)))
609		return -EFAULT;
610
611	if (lock.context == DRM_KERNEL_CONTEXT) {
612		DRM_ERROR("Process %d using kernel context %d\n",
613			  current->pid, lock.context);
614		return -EINVAL;
615	}
616
617	DRM_DEBUG("%d frees lock (%d holds)\n",
618		  lock.context,
619		  _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
620	atomic_inc(&dev->total_unlocks);
621	if (_DRM_LOCK_IS_CONT(dev->lock.hw_lock->lock))
622		atomic_inc(&dev->total_contends);
623	drm_lock_transfer(dev, &dev->lock.hw_lock->lock, DRM_KERNEL_CONTEXT);
624	if (!dev->context_flag) {
625		if (drm_lock_free(dev, &dev->lock.hw_lock->lock,
626				  DRM_KERNEL_CONTEXT)) {
627			DRM_ERROR("\n");
628		}
629	}
630
631#if LINUX_VERSION_CODE < 0x020400
632	if (lock.context != tdfx_res_ctx.handle) {
633		current->counter = 5;
634		current->priority = DEF_PRIORITY;
635	}
636#endif
637
638	unblock_all_signals();
639	return 0;
640}
641