1/**
2 * \file drm_bufs.c
3 * Generic buffer template
4 *
5 * \author Rickard E. (Rik) Faith <faith@valinux.com>
6 * \author Gareth Hughes <gareth@valinux.com>
7 */
8
9/*
10 * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
11 *
12 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
13 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14 * All Rights Reserved.
15 *
16 * Permission is hereby granted, free of charge, to any person obtaining a
17 * copy of this software and associated documentation files (the "Software"),
18 * to deal in the Software without restriction, including without limitation
19 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20 * and/or sell copies of the Software, and to permit persons to whom the
21 * Software is furnished to do so, subject to the following conditions:
22 *
23 * The above copyright notice and this permission notice (including the next
24 * paragraph) shall be included in all copies or substantial portions of the
25 * Software.
26 *
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
30 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33 * OTHER DEALINGS IN THE SOFTWARE.
34 */
35
36#include <linux/vmalloc.h>
37#include "drmP.h"
38
39unsigned long drm_get_resource_start(drm_device_t *dev, unsigned int resource)
40{
41	return pci_resource_start(dev->pdev, resource);
42}
43EXPORT_SYMBOL(drm_get_resource_start);
44
45unsigned long drm_get_resource_len(drm_device_t *dev, unsigned int resource)
46{
47	return pci_resource_len(dev->pdev, resource);
48}
49
50EXPORT_SYMBOL(drm_get_resource_len);
51
52static drm_map_list_t *drm_find_matching_map(drm_device_t *dev,
53					     drm_local_map_t *map)
54{
55	struct list_head *list;
56
57	list_for_each(list, &dev->maplist->head) {
58		drm_map_list_t *entry = list_entry(list, drm_map_list_t, head);
59		if (entry->map && map->type == entry->map->type &&
60		    ((entry->map->offset == map->offset) ||
61		     (map->type == _DRM_SHM && map->flags==_DRM_CONTAINS_LOCK))) {
62			return entry;
63		}
64	}
65
66	return NULL;
67}
68
69static int drm_map_handle(drm_device_t *dev, drm_hash_item_t *hash,
70			  unsigned long user_token, int hashed_handle)
71{
72	int use_hashed_handle;
73#if (BITS_PER_LONG == 64)
74	use_hashed_handle = ((user_token & 0xFFFFFFFF00000000UL) || hashed_handle);
75#elif (BITS_PER_LONG == 32)
76	use_hashed_handle = hashed_handle;
77#else
78#error Unsupported long size. Neither 64 nor 32 bits.
79#endif
80
81	if (!use_hashed_handle) {
82		int ret;
83		hash->key = user_token >> PAGE_SHIFT;
84		ret = drm_ht_insert_item(&dev->map_hash, hash);
85		if (ret != -EINVAL)
86			return ret;
87	}
88	return drm_ht_just_insert_please(&dev->map_hash, hash,
89					 user_token, 32 - PAGE_SHIFT - 3,
90					 0, DRM_MAP_HASH_OFFSET >> PAGE_SHIFT);
91}
92
93/**
94 * Ioctl to specify a range of memory that is available for mapping by a non-root process.
95 *
96 * \param inode device inode.
97 * \param filp file pointer.
98 * \param cmd command.
99 * \param arg pointer to a drm_map structure.
100 * \return zero on success or a negative value on error.
101 *
102 * Adjusts the memory offset to its absolute value according to the mapping
103 * type.  Adds the map to the map list drm_device::maplist. Adds MTRR's where
104 * applicable and if supported by the kernel.
105 */
106static int drm_addmap_core(drm_device_t * dev, unsigned int offset,
107			   unsigned int size, drm_map_type_t type,
108			   drm_map_flags_t flags, drm_map_list_t ** maplist)
109{
110	drm_map_t *map;
111	drm_map_list_t *list;
112	drm_dma_handle_t *dmah;
113	unsigned long user_token;
114	int ret;
115
116	map = drm_alloc(sizeof(*map), DRM_MEM_MAPS);
117	if (!map)
118		return -ENOMEM;
119
120	map->offset = offset;
121	map->size = size;
122	map->flags = flags;
123	map->type = type;
124
125	/* Only allow shared memory to be removable since we only keep enough
126	 * book keeping information about shared memory to allow for removal
127	 * when processes fork.
128	 */
129	if ((map->flags & _DRM_REMOVABLE) && map->type != _DRM_SHM) {
130		drm_free(map, sizeof(*map), DRM_MEM_MAPS);
131		return -EINVAL;
132	}
133	DRM_DEBUG("offset = 0x%08lx, size = 0x%08lx, type = %d\n",
134		  map->offset, map->size, map->type);
135	if ((map->offset & (~PAGE_MASK)) || (map->size & (~PAGE_MASK))) {
136		drm_free(map, sizeof(*map), DRM_MEM_MAPS);
137		return -EINVAL;
138	}
139	map->mtrr = -1;
140	map->handle = NULL;
141
142	switch (map->type) {
143	case _DRM_REGISTERS:
144	case _DRM_FRAME_BUFFER:
145#if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) && \
146	!defined(__powerpc64__) && !defined(__x86_64__)
147		if (map->offset + (map->size-1) < map->offset ||
148		    map->offset < virt_to_phys(high_memory)) {
149			drm_free(map, sizeof(*map), DRM_MEM_MAPS);
150			return -EINVAL;
151		}
152#endif
153#ifdef __alpha__
154		map->offset += dev->hose->mem_space->start;
155#endif
156		/* Some drivers preinitialize some maps, without the X Server
157		 * needing to be aware of it.  Therefore, we just return success
158		 * when the server tries to create a duplicate map.
159		 */
160		list = drm_find_matching_map(dev, map);
161		if (list != NULL) {
162			if (list->map->size != map->size) {
163				DRM_DEBUG("Matching maps of type %d with "
164					  "mismatched sizes, (%ld vs %ld)\n",
165					  map->type, map->size,
166					  list->map->size);
167				list->map->size = map->size;
168			}
169
170			drm_free(map, sizeof(*map), DRM_MEM_MAPS);
171			*maplist = list;
172			return 0;
173		}
174
175		if (drm_core_has_MTRR(dev)) {
176			if (map->type == _DRM_FRAME_BUFFER ||
177			    (map->flags & _DRM_WRITE_COMBINING)) {
178				map->mtrr = mtrr_add(map->offset, map->size,
179						     MTRR_TYPE_WRCOMB, 1);
180			}
181		}
182		if (map->type == _DRM_REGISTERS)
183			map->handle = ioremap(map->offset, map->size);
184		break;
185	case _DRM_SHM:
186		list = drm_find_matching_map(dev, map);
187		if (list != NULL) {
188			if(list->map->size != map->size) {
189				DRM_DEBUG("Matching maps of type %d with "
190					  "mismatched sizes, (%ld vs %ld)\n",
191					  map->type, map->size, list->map->size);
192				list->map->size = map->size;
193			}
194
195			drm_free(map, sizeof(*map), DRM_MEM_MAPS);
196			*maplist = list;
197			return 0;
198		}
199		map->handle = vmalloc_user(map->size);
200		DRM_DEBUG("%lu %d %p\n",
201			  map->size, drm_order(map->size), map->handle);
202		if (!map->handle) {
203			drm_free(map, sizeof(*map), DRM_MEM_MAPS);
204			return -ENOMEM;
205		}
206		map->offset = (unsigned long)map->handle;
207		if (map->flags & _DRM_CONTAINS_LOCK) {
208			/* Prevent a 2nd X Server from creating a 2nd lock */
209			if (dev->lock.hw_lock != NULL) {
210				vfree(map->handle);
211				drm_free(map, sizeof(*map), DRM_MEM_MAPS);
212				return -EBUSY;
213			}
214			dev->sigdata.lock = dev->lock.hw_lock = map->handle;	/* Pointer to lock */
215		}
216		break;
217	case _DRM_AGP: {
218		drm_agp_mem_t *entry;
219		int valid = 0;
220
221		if (!drm_core_has_AGP(dev)) {
222			drm_free(map, sizeof(*map), DRM_MEM_MAPS);
223			return -EINVAL;
224		}
225#ifdef __alpha__
226		map->offset += dev->hose->mem_space->start;
227#endif
228		/* Note: dev->agp->base may actually be 0 when the DRM
229		 * is not in control of AGP space. But if user space is
230		 * it should already have added the AGP base itself.
231		 */
232		map->offset += dev->agp->base;
233		map->mtrr = dev->agp->agp_mtrr;	/* for getmap */
234
235		/* This assumes the DRM is in total control of AGP space.
236		 * It's not always the case as AGP can be in the control
237		 * of user space (i.e. i810 driver). So this loop will get
238		 * skipped and we double check that dev->agp->memory is
239		 * actually set as well as being invalid before EPERM'ing
240		 */
241		for (entry = dev->agp->memory; entry; entry = entry->next) {
242			if ((map->offset >= entry->bound) &&
243			    (map->offset + map->size <= entry->bound + entry->pages * PAGE_SIZE)) {
244				valid = 1;
245				break;
246			}
247		}
248		if (dev->agp->memory && !valid) {
249			drm_free(map, sizeof(*map), DRM_MEM_MAPS);
250			return -EPERM;
251		}
252		DRM_DEBUG("AGP offset = 0x%08lx, size = 0x%08lx\n", map->offset, map->size);
253
254		break;
255	}
256	case _DRM_SCATTER_GATHER:
257		if (!dev->sg) {
258			drm_free(map, sizeof(*map), DRM_MEM_MAPS);
259			return -EINVAL;
260		}
261		map->offset += (unsigned long)dev->sg->virtual;
262		break;
263	case _DRM_CONSISTENT:
264		/* dma_addr_t is 64bit on i386 with CONFIG_HIGHMEM64G,
265		 * As we're limiting the address to 2^32-1 (or less),
266		 * casting it down to 32 bits is no problem, but we
267		 * need to point to a 64bit variable first. */
268		dmah = drm_pci_alloc(dev, map->size, map->size, 0xffffffffUL);
269		if (!dmah) {
270			drm_free(map, sizeof(*map), DRM_MEM_MAPS);
271			return -ENOMEM;
272		}
273		map->handle = dmah->vaddr;
274		map->offset = (unsigned long)dmah->busaddr;
275		kfree(dmah);
276		break;
277	default:
278		drm_free(map, sizeof(*map), DRM_MEM_MAPS);
279		return -EINVAL;
280	}
281
282	list = drm_alloc(sizeof(*list), DRM_MEM_MAPS);
283	if (!list) {
284		if (map->type == _DRM_REGISTERS)
285			iounmap(map->handle);
286		drm_free(map, sizeof(*map), DRM_MEM_MAPS);
287		return -EINVAL;
288	}
289	memset(list, 0, sizeof(*list));
290	list->map = map;
291
292	mutex_lock(&dev->struct_mutex);
293	list_add(&list->head, &dev->maplist->head);
294
295	/* Assign a 32-bit handle */
296	/* We do it here so that dev->struct_mutex protects the increment */
297	user_token = (map->type == _DRM_SHM) ? (unsigned long)map->handle :
298		map->offset;
299	ret = drm_map_handle(dev, &list->hash, user_token, 0);
300	if (ret) {
301		if (map->type == _DRM_REGISTERS)
302			iounmap(map->handle);
303		drm_free(map, sizeof(*map), DRM_MEM_MAPS);
304		drm_free(list, sizeof(*list), DRM_MEM_MAPS);
305		mutex_unlock(&dev->struct_mutex);
306		return ret;
307	}
308
309	list->user_token = list->hash.key << PAGE_SHIFT;
310	mutex_unlock(&dev->struct_mutex);
311
312	*maplist = list;
313	return 0;
314	}
315
316int drm_addmap(drm_device_t * dev, unsigned int offset,
317	       unsigned int size, drm_map_type_t type,
318	       drm_map_flags_t flags, drm_local_map_t ** map_ptr)
319{
320	drm_map_list_t *list;
321	int rc;
322
323	rc = drm_addmap_core(dev, offset, size, type, flags, &list);
324	if (!rc)
325		*map_ptr = list->map;
326	return rc;
327}
328
329EXPORT_SYMBOL(drm_addmap);
330
331int drm_addmap_ioctl(struct inode *inode, struct file *filp,
332		     unsigned int cmd, unsigned long arg)
333{
334	drm_file_t *priv = filp->private_data;
335	drm_device_t *dev = priv->head->dev;
336	drm_map_t map;
337	drm_map_list_t *maplist;
338	drm_map_t __user *argp = (void __user *)arg;
339	int err;
340
341	if (!(filp->f_mode & 3))
342		return -EACCES;	/* Require read/write */
343
344	if (copy_from_user(&map, argp, sizeof(map))) {
345		return -EFAULT;
346	}
347
348	if (!(capable(CAP_SYS_ADMIN) || map.type == _DRM_AGP))
349		return -EPERM;
350
351	err = drm_addmap_core(dev, map.offset, map.size, map.type, map.flags,
352			      &maplist);
353
354	if (err)
355		return err;
356
357	if (copy_to_user(argp, maplist->map, sizeof(drm_map_t)))
358		return -EFAULT;
359
360	/* avoid a warning on 64-bit, this casting isn't very nice, but the API is set so too late */
361	if (put_user((void *)(unsigned long)maplist->user_token, &argp->handle))
362		return -EFAULT;
363	return 0;
364}
365
366/**
367 * Remove a map private from list and deallocate resources if the mapping
368 * isn't in use.
369 *
370 * \param inode device inode.
371 * \param filp file pointer.
372 * \param cmd command.
373 * \param arg pointer to a drm_map_t structure.
374 * \return zero on success or a negative value on error.
375 *
376 * Searches the map on drm_device::maplist, removes it from the list, see if
377 * its being used, and free any associate resource (such as MTRR's) if it's not
378 * being on use.
379 *
380 * \sa drm_addmap
381 */
382int drm_rmmap_locked(drm_device_t *dev, drm_local_map_t *map)
383{
384	struct list_head *list;
385	drm_map_list_t *r_list = NULL;
386	drm_dma_handle_t dmah;
387
388	/* Find the list entry for the map and remove it */
389	list_for_each(list, &dev->maplist->head) {
390		r_list = list_entry(list, drm_map_list_t, head);
391
392		if (r_list->map == map) {
393			list_del(list);
394			drm_ht_remove_key(&dev->map_hash,
395					  r_list->user_token >> PAGE_SHIFT);
396			drm_free(list, sizeof(*list), DRM_MEM_MAPS);
397			break;
398		}
399	}
400
401	/* List has wrapped around to the head pointer, or it's empty and we
402	 * didn't find anything.
403	 */
404	if (list == (&dev->maplist->head)) {
405		return -EINVAL;
406	}
407
408	switch (map->type) {
409	case _DRM_REGISTERS:
410		iounmap(map->handle);
411		/* FALLTHROUGH */
412	case _DRM_FRAME_BUFFER:
413		if (drm_core_has_MTRR(dev) && map->mtrr >= 0) {
414			int retcode;
415			retcode = mtrr_del(map->mtrr, map->offset, map->size);
416			DRM_DEBUG("mtrr_del=%d\n", retcode);
417		}
418		break;
419	case _DRM_SHM:
420		vfree(map->handle);
421		break;
422	case _DRM_AGP:
423	case _DRM_SCATTER_GATHER:
424		break;
425	case _DRM_CONSISTENT:
426		dmah.vaddr = map->handle;
427		dmah.busaddr = map->offset;
428		dmah.size = map->size;
429		__drm_pci_free(dev, &dmah);
430		break;
431	}
432	drm_free(map, sizeof(*map), DRM_MEM_MAPS);
433
434	return 0;
435}
436
437int drm_rmmap(drm_device_t *dev, drm_local_map_t *map)
438{
439	int ret;
440
441	mutex_lock(&dev->struct_mutex);
442	ret = drm_rmmap_locked(dev, map);
443	mutex_unlock(&dev->struct_mutex);
444
445	return ret;
446}
447
448/* The rmmap ioctl appears to be unnecessary.  All mappings are torn down on
449 * the last close of the device, and this is necessary for cleanup when things
450 * exit uncleanly.  Therefore, having userland manually remove mappings seems
451 * like a pointless exercise since they're going away anyway.
452 *
453 * One use case might be after addmap is allowed for normal users for SHM and
454 * gets used by drivers that the server doesn't need to care about.  This seems
455 * unlikely.
456 */
457int drm_rmmap_ioctl(struct inode *inode, struct file *filp,
458		    unsigned int cmd, unsigned long arg)
459{
460	drm_file_t *priv = filp->private_data;
461	drm_device_t *dev = priv->head->dev;
462	drm_map_t request;
463	drm_local_map_t *map = NULL;
464	struct list_head *list;
465	int ret;
466
467	if (copy_from_user(&request, (drm_map_t __user *) arg, sizeof(request))) {
468		return -EFAULT;
469	}
470
471	mutex_lock(&dev->struct_mutex);
472	list_for_each(list, &dev->maplist->head) {
473		drm_map_list_t *r_list = list_entry(list, drm_map_list_t, head);
474
475		if (r_list->map &&
476		    r_list->user_token == (unsigned long)request.handle &&
477		    r_list->map->flags & _DRM_REMOVABLE) {
478			map = r_list->map;
479			break;
480		}
481	}
482
483	/* List has wrapped around to the head pointer, or its empty we didn't
484	 * find anything.
485	 */
486	if (list == (&dev->maplist->head)) {
487		mutex_unlock(&dev->struct_mutex);
488		return -EINVAL;
489	}
490
491	if (!map) {
492		mutex_unlock(&dev->struct_mutex);
493		return -EINVAL;
494	}
495
496	/* Register and framebuffer maps are permanent */
497	if ((map->type == _DRM_REGISTERS) || (map->type == _DRM_FRAME_BUFFER)) {
498		mutex_unlock(&dev->struct_mutex);
499		return 0;
500	}
501
502	ret = drm_rmmap_locked(dev, map);
503
504	mutex_unlock(&dev->struct_mutex);
505
506	return ret;
507}
508
509/**
510 * Cleanup after an error on one of the addbufs() functions.
511 *
512 * \param dev DRM device.
513 * \param entry buffer entry where the error occurred.
514 *
515 * Frees any pages and buffers associated with the given entry.
516 */
517static void drm_cleanup_buf_error(drm_device_t * dev, drm_buf_entry_t * entry)
518{
519	int i;
520
521	if (entry->seg_count) {
522		for (i = 0; i < entry->seg_count; i++) {
523			if (entry->seglist[i]) {
524				drm_pci_free(dev, entry->seglist[i]);
525			}
526		}
527		drm_free(entry->seglist,
528			 entry->seg_count *
529			 sizeof(*entry->seglist), DRM_MEM_SEGS);
530
531		entry->seg_count = 0;
532	}
533
534	if (entry->buf_count) {
535		for (i = 0; i < entry->buf_count; i++) {
536			if (entry->buflist[i].dev_private) {
537				drm_free(entry->buflist[i].dev_private,
538					 entry->buflist[i].dev_priv_size,
539					 DRM_MEM_BUFS);
540			}
541		}
542		drm_free(entry->buflist,
543			 entry->buf_count *
544			 sizeof(*entry->buflist), DRM_MEM_BUFS);
545
546		entry->buf_count = 0;
547	}
548}
549
550#if __OS_HAS_AGP
551/**
552 * Add AGP buffers for DMA transfers.
553 *
554 * \param dev drm_device_t to which the buffers are to be added.
555 * \param request pointer to a drm_buf_desc_t describing the request.
556 * \return zero on success or a negative number on failure.
557 *
558 * After some sanity checks creates a drm_buf structure for each buffer and
559 * reallocates the buffer list of the same size order to accommodate the new
560 * buffers.
561 */
562int drm_addbufs_agp(drm_device_t * dev, drm_buf_desc_t * request)
563{
564	drm_device_dma_t *dma = dev->dma;
565	drm_buf_entry_t *entry;
566	drm_agp_mem_t *agp_entry;
567	drm_buf_t *buf;
568	unsigned long offset;
569	unsigned long agp_offset;
570	int count;
571	int order;
572	int size;
573	int alignment;
574	int page_order;
575	int total;
576	int byte_count;
577	int i, valid;
578	drm_buf_t **temp_buflist;
579
580	if (!dma)
581		return -EINVAL;
582
583	count = request->count;
584	order = drm_order(request->size);
585	size = 1 << order;
586
587	alignment = (request->flags & _DRM_PAGE_ALIGN)
588	    ? PAGE_ALIGN(size) : size;
589	page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
590	total = PAGE_SIZE << page_order;
591
592	byte_count = 0;
593	agp_offset = dev->agp->base + request->agp_start;
594
595	DRM_DEBUG("count:      %d\n", count);
596	DRM_DEBUG("order:      %d\n", order);
597	DRM_DEBUG("size:       %d\n", size);
598	DRM_DEBUG("agp_offset: %lx\n", agp_offset);
599	DRM_DEBUG("alignment:  %d\n", alignment);
600	DRM_DEBUG("page_order: %d\n", page_order);
601	DRM_DEBUG("total:      %d\n", total);
602
603	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
604		return -EINVAL;
605	if (dev->queue_count)
606		return -EBUSY;	/* Not while in use */
607
608	/* Make sure buffers are located in AGP memory that we own */
609	valid = 0;
610	for (agp_entry = dev->agp->memory; agp_entry; agp_entry = agp_entry->next) {
611		if ((agp_offset >= agp_entry->bound) &&
612		    (agp_offset + total * count <= agp_entry->bound + agp_entry->pages * PAGE_SIZE)) {
613			valid = 1;
614			break;
615		}
616	}
617	if (dev->agp->memory && !valid) {
618		DRM_DEBUG("zone invalid\n");
619		return -EINVAL;
620	}
621	spin_lock(&dev->count_lock);
622	if (dev->buf_use) {
623		spin_unlock(&dev->count_lock);
624		return -EBUSY;
625	}
626	atomic_inc(&dev->buf_alloc);
627	spin_unlock(&dev->count_lock);
628
629	mutex_lock(&dev->struct_mutex);
630	entry = &dma->bufs[order];
631	if (entry->buf_count) {
632		mutex_unlock(&dev->struct_mutex);
633		atomic_dec(&dev->buf_alloc);
634		return -ENOMEM;	/* May only call once for each order */
635	}
636
637	if (count < 0 || count > 4096) {
638		mutex_unlock(&dev->struct_mutex);
639		atomic_dec(&dev->buf_alloc);
640		return -EINVAL;
641	}
642
643	entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
644				   DRM_MEM_BUFS);
645	if (!entry->buflist) {
646		mutex_unlock(&dev->struct_mutex);
647		atomic_dec(&dev->buf_alloc);
648		return -ENOMEM;
649	}
650	memset(entry->buflist, 0, count * sizeof(*entry->buflist));
651
652	entry->buf_size = size;
653	entry->page_order = page_order;
654
655	offset = 0;
656
657	while (entry->buf_count < count) {
658		buf = &entry->buflist[entry->buf_count];
659		buf->idx = dma->buf_count + entry->buf_count;
660		buf->total = alignment;
661		buf->order = order;
662		buf->used = 0;
663
664		buf->offset = (dma->byte_count + offset);
665		buf->bus_address = agp_offset + offset;
666		buf->address = (void *)(agp_offset + offset);
667		buf->next = NULL;
668		buf->waiting = 0;
669		buf->pending = 0;
670		init_waitqueue_head(&buf->dma_wait);
671		buf->filp = NULL;
672
673		buf->dev_priv_size = dev->driver->dev_priv_size;
674		buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS);
675		if (!buf->dev_private) {
676			/* Set count correctly so we free the proper amount. */
677			entry->buf_count = count;
678			drm_cleanup_buf_error(dev, entry);
679			mutex_unlock(&dev->struct_mutex);
680			atomic_dec(&dev->buf_alloc);
681			return -ENOMEM;
682		}
683		memset(buf->dev_private, 0, buf->dev_priv_size);
684
685		DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
686
687		offset += alignment;
688		entry->buf_count++;
689		byte_count += PAGE_SIZE << page_order;
690	}
691
692	DRM_DEBUG("byte_count: %d\n", byte_count);
693
694	temp_buflist = drm_realloc(dma->buflist,
695				   dma->buf_count * sizeof(*dma->buflist),
696				   (dma->buf_count + entry->buf_count)
697				   * sizeof(*dma->buflist), DRM_MEM_BUFS);
698	if (!temp_buflist) {
699		/* Free the entry because it isn't valid */
700		drm_cleanup_buf_error(dev, entry);
701		mutex_unlock(&dev->struct_mutex);
702		atomic_dec(&dev->buf_alloc);
703		return -ENOMEM;
704	}
705	dma->buflist = temp_buflist;
706
707	for (i = 0; i < entry->buf_count; i++) {
708		dma->buflist[i + dma->buf_count] = &entry->buflist[i];
709	}
710
711	dma->buf_count += entry->buf_count;
712	dma->seg_count += entry->seg_count;
713	dma->page_count += byte_count >> PAGE_SHIFT;
714	dma->byte_count += byte_count;
715
716	DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
717	DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
718
719	mutex_unlock(&dev->struct_mutex);
720
721	request->count = entry->buf_count;
722	request->size = size;
723
724	dma->flags = _DRM_DMA_USE_AGP;
725
726	atomic_dec(&dev->buf_alloc);
727	return 0;
728}
729EXPORT_SYMBOL(drm_addbufs_agp);
730#endif				/* __OS_HAS_AGP */
731
732int drm_addbufs_pci(drm_device_t * dev, drm_buf_desc_t * request)
733{
734	drm_device_dma_t *dma = dev->dma;
735	int count;
736	int order;
737	int size;
738	int total;
739	int page_order;
740	drm_buf_entry_t *entry;
741	drm_dma_handle_t *dmah;
742	drm_buf_t *buf;
743	int alignment;
744	unsigned long offset;
745	int i;
746	int byte_count;
747	int page_count;
748	unsigned long *temp_pagelist;
749	drm_buf_t **temp_buflist;
750
751	if (!drm_core_check_feature(dev, DRIVER_PCI_DMA))
752		return -EINVAL;
753
754	if (!dma)
755		return -EINVAL;
756
757	if (!capable(CAP_SYS_ADMIN))
758		return -EPERM;
759
760	count = request->count;
761	order = drm_order(request->size);
762	size = 1 << order;
763
764	DRM_DEBUG("count=%d, size=%d (%d), order=%d, queue_count=%d\n",
765		  request->count, request->size, size, order, dev->queue_count);
766
767	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
768		return -EINVAL;
769	if (dev->queue_count)
770		return -EBUSY;	/* Not while in use */
771
772	alignment = (request->flags & _DRM_PAGE_ALIGN)
773	    ? PAGE_ALIGN(size) : size;
774	page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
775	total = PAGE_SIZE << page_order;
776
777	spin_lock(&dev->count_lock);
778	if (dev->buf_use) {
779		spin_unlock(&dev->count_lock);
780		return -EBUSY;
781	}
782	atomic_inc(&dev->buf_alloc);
783	spin_unlock(&dev->count_lock);
784
785	mutex_lock(&dev->struct_mutex);
786	entry = &dma->bufs[order];
787	if (entry->buf_count) {
788		mutex_unlock(&dev->struct_mutex);
789		atomic_dec(&dev->buf_alloc);
790		return -ENOMEM;	/* May only call once for each order */
791	}
792
793	if (count < 0 || count > 4096) {
794		mutex_unlock(&dev->struct_mutex);
795		atomic_dec(&dev->buf_alloc);
796		return -EINVAL;
797	}
798
799	entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
800				   DRM_MEM_BUFS);
801	if (!entry->buflist) {
802		mutex_unlock(&dev->struct_mutex);
803		atomic_dec(&dev->buf_alloc);
804		return -ENOMEM;
805	}
806	memset(entry->buflist, 0, count * sizeof(*entry->buflist));
807
808	entry->seglist = drm_alloc(count * sizeof(*entry->seglist),
809				   DRM_MEM_SEGS);
810	if (!entry->seglist) {
811		drm_free(entry->buflist,
812			 count * sizeof(*entry->buflist), DRM_MEM_BUFS);
813		mutex_unlock(&dev->struct_mutex);
814		atomic_dec(&dev->buf_alloc);
815		return -ENOMEM;
816	}
817	memset(entry->seglist, 0, count * sizeof(*entry->seglist));
818
819	/* Keep the original pagelist until we know all the allocations
820	 * have succeeded
821	 */
822	temp_pagelist = drm_alloc((dma->page_count + (count << page_order))
823				  * sizeof(*dma->pagelist), DRM_MEM_PAGES);
824	if (!temp_pagelist) {
825		drm_free(entry->buflist,
826			 count * sizeof(*entry->buflist), DRM_MEM_BUFS);
827		drm_free(entry->seglist,
828			 count * sizeof(*entry->seglist), DRM_MEM_SEGS);
829		mutex_unlock(&dev->struct_mutex);
830		atomic_dec(&dev->buf_alloc);
831		return -ENOMEM;
832	}
833	memcpy(temp_pagelist,
834	       dma->pagelist, dma->page_count * sizeof(*dma->pagelist));
835	DRM_DEBUG("pagelist: %d entries\n",
836		  dma->page_count + (count << page_order));
837
838	entry->buf_size = size;
839	entry->page_order = page_order;
840	byte_count = 0;
841	page_count = 0;
842
843	while (entry->buf_count < count) {
844
845		dmah = drm_pci_alloc(dev, PAGE_SIZE << page_order, 0x1000, 0xfffffffful);
846
847		if (!dmah) {
848			/* Set count correctly so we free the proper amount. */
849			entry->buf_count = count;
850			entry->seg_count = count;
851			drm_cleanup_buf_error(dev, entry);
852			drm_free(temp_pagelist,
853				 (dma->page_count + (count << page_order))
854				 * sizeof(*dma->pagelist), DRM_MEM_PAGES);
855			mutex_unlock(&dev->struct_mutex);
856			atomic_dec(&dev->buf_alloc);
857			return -ENOMEM;
858		}
859		entry->seglist[entry->seg_count++] = dmah;
860		for (i = 0; i < (1 << page_order); i++) {
861			DRM_DEBUG("page %d @ 0x%08lx\n",
862				  dma->page_count + page_count,
863				  (unsigned long)dmah->vaddr + PAGE_SIZE * i);
864			temp_pagelist[dma->page_count + page_count++]
865				= (unsigned long)dmah->vaddr + PAGE_SIZE * i;
866		}
867		for (offset = 0;
868		     offset + size <= total && entry->buf_count < count;
869		     offset += alignment, ++entry->buf_count) {
870			buf = &entry->buflist[entry->buf_count];
871			buf->idx = dma->buf_count + entry->buf_count;
872			buf->total = alignment;
873			buf->order = order;
874			buf->used = 0;
875			buf->offset = (dma->byte_count + byte_count + offset);
876			buf->address = (void *)(dmah->vaddr + offset);
877			buf->bus_address = dmah->busaddr + offset;
878			buf->next = NULL;
879			buf->waiting = 0;
880			buf->pending = 0;
881			init_waitqueue_head(&buf->dma_wait);
882			buf->filp = NULL;
883
884			buf->dev_priv_size = dev->driver->dev_priv_size;
885			buf->dev_private = drm_alloc(buf->dev_priv_size,
886						     DRM_MEM_BUFS);
887			if (!buf->dev_private) {
888				/* Set count correctly so we free the proper amount. */
889				entry->buf_count = count;
890				entry->seg_count = count;
891				drm_cleanup_buf_error(dev, entry);
892				drm_free(temp_pagelist,
893					 (dma->page_count +
894					  (count << page_order))
895					 * sizeof(*dma->pagelist),
896					 DRM_MEM_PAGES);
897				mutex_unlock(&dev->struct_mutex);
898				atomic_dec(&dev->buf_alloc);
899				return -ENOMEM;
900			}
901			memset(buf->dev_private, 0, buf->dev_priv_size);
902
903			DRM_DEBUG("buffer %d @ %p\n",
904				  entry->buf_count, buf->address);
905		}
906		byte_count += PAGE_SIZE << page_order;
907	}
908
909	temp_buflist = drm_realloc(dma->buflist,
910				   dma->buf_count * sizeof(*dma->buflist),
911				   (dma->buf_count + entry->buf_count)
912				   * sizeof(*dma->buflist), DRM_MEM_BUFS);
913	if (!temp_buflist) {
914		/* Free the entry because it isn't valid */
915		drm_cleanup_buf_error(dev, entry);
916		drm_free(temp_pagelist,
917			 (dma->page_count + (count << page_order))
918			 * sizeof(*dma->pagelist), DRM_MEM_PAGES);
919		mutex_unlock(&dev->struct_mutex);
920		atomic_dec(&dev->buf_alloc);
921		return -ENOMEM;
922	}
923	dma->buflist = temp_buflist;
924
925	for (i = 0; i < entry->buf_count; i++) {
926		dma->buflist[i + dma->buf_count] = &entry->buflist[i];
927	}
928
929	/* No allocations failed, so now we can replace the orginal pagelist
930	 * with the new one.
931	 */
932	if (dma->page_count) {
933		drm_free(dma->pagelist,
934			 dma->page_count * sizeof(*dma->pagelist),
935			 DRM_MEM_PAGES);
936	}
937	dma->pagelist = temp_pagelist;
938
939	dma->buf_count += entry->buf_count;
940	dma->seg_count += entry->seg_count;
941	dma->page_count += entry->seg_count << page_order;
942	dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
943
944	mutex_unlock(&dev->struct_mutex);
945
946	request->count = entry->buf_count;
947	request->size = size;
948
949	if (request->flags & _DRM_PCI_BUFFER_RO)
950		dma->flags = _DRM_DMA_USE_PCI_RO;
951
952	atomic_dec(&dev->buf_alloc);
953	return 0;
954
955}
956EXPORT_SYMBOL(drm_addbufs_pci);
957
958static int drm_addbufs_sg(drm_device_t * dev, drm_buf_desc_t * request)
959{
960	drm_device_dma_t *dma = dev->dma;
961	drm_buf_entry_t *entry;
962	drm_buf_t *buf;
963	unsigned long offset;
964	unsigned long agp_offset;
965	int count;
966	int order;
967	int size;
968	int alignment;
969	int page_order;
970	int total;
971	int byte_count;
972	int i;
973	drm_buf_t **temp_buflist;
974
975	if (!drm_core_check_feature(dev, DRIVER_SG))
976		return -EINVAL;
977
978	if (!dma)
979		return -EINVAL;
980
981	if (!capable(CAP_SYS_ADMIN))
982		return -EPERM;
983
984	count = request->count;
985	order = drm_order(request->size);
986	size = 1 << order;
987
988	alignment = (request->flags & _DRM_PAGE_ALIGN)
989	    ? PAGE_ALIGN(size) : size;
990	page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
991	total = PAGE_SIZE << page_order;
992
993	byte_count = 0;
994	agp_offset = request->agp_start;
995
996	DRM_DEBUG("count:      %d\n", count);
997	DRM_DEBUG("order:      %d\n", order);
998	DRM_DEBUG("size:       %d\n", size);
999	DRM_DEBUG("agp_offset: %lu\n", agp_offset);
1000	DRM_DEBUG("alignment:  %d\n", alignment);
1001	DRM_DEBUG("page_order: %d\n", page_order);
1002	DRM_DEBUG("total:      %d\n", total);
1003
1004	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
1005		return -EINVAL;
1006	if (dev->queue_count)
1007		return -EBUSY;	/* Not while in use */
1008
1009	spin_lock(&dev->count_lock);
1010	if (dev->buf_use) {
1011		spin_unlock(&dev->count_lock);
1012		return -EBUSY;
1013	}
1014	atomic_inc(&dev->buf_alloc);
1015	spin_unlock(&dev->count_lock);
1016
1017	mutex_lock(&dev->struct_mutex);
1018	entry = &dma->bufs[order];
1019	if (entry->buf_count) {
1020		mutex_unlock(&dev->struct_mutex);
1021		atomic_dec(&dev->buf_alloc);
1022		return -ENOMEM;	/* May only call once for each order */
1023	}
1024
1025	if (count < 0 || count > 4096) {
1026		mutex_unlock(&dev->struct_mutex);
1027		atomic_dec(&dev->buf_alloc);
1028		return -EINVAL;
1029	}
1030
1031	entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
1032				   DRM_MEM_BUFS);
1033	if (!entry->buflist) {
1034		mutex_unlock(&dev->struct_mutex);
1035		atomic_dec(&dev->buf_alloc);
1036		return -ENOMEM;
1037	}
1038	memset(entry->buflist, 0, count * sizeof(*entry->buflist));
1039
1040	entry->buf_size = size;
1041	entry->page_order = page_order;
1042
1043	offset = 0;
1044
1045	while (entry->buf_count < count) {
1046		buf = &entry->buflist[entry->buf_count];
1047		buf->idx = dma->buf_count + entry->buf_count;
1048		buf->total = alignment;
1049		buf->order = order;
1050		buf->used = 0;
1051
1052		buf->offset = (dma->byte_count + offset);
1053		buf->bus_address = agp_offset + offset;
1054		buf->address = (void *)(agp_offset + offset
1055					+ (unsigned long)dev->sg->virtual);
1056		buf->next = NULL;
1057		buf->waiting = 0;
1058		buf->pending = 0;
1059		init_waitqueue_head(&buf->dma_wait);
1060		buf->filp = NULL;
1061
1062		buf->dev_priv_size = dev->driver->dev_priv_size;
1063		buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS);
1064		if (!buf->dev_private) {
1065			/* Set count correctly so we free the proper amount. */
1066			entry->buf_count = count;
1067			drm_cleanup_buf_error(dev, entry);
1068			mutex_unlock(&dev->struct_mutex);
1069			atomic_dec(&dev->buf_alloc);
1070			return -ENOMEM;
1071		}
1072
1073		memset(buf->dev_private, 0, buf->dev_priv_size);
1074
1075		DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
1076
1077		offset += alignment;
1078		entry->buf_count++;
1079		byte_count += PAGE_SIZE << page_order;
1080	}
1081
1082	DRM_DEBUG("byte_count: %d\n", byte_count);
1083
1084	temp_buflist = drm_realloc(dma->buflist,
1085				   dma->buf_count * sizeof(*dma->buflist),
1086				   (dma->buf_count + entry->buf_count)
1087				   * sizeof(*dma->buflist), DRM_MEM_BUFS);
1088	if (!temp_buflist) {
1089		/* Free the entry because it isn't valid */
1090		drm_cleanup_buf_error(dev, entry);
1091		mutex_unlock(&dev->struct_mutex);
1092		atomic_dec(&dev->buf_alloc);
1093		return -ENOMEM;
1094	}
1095	dma->buflist = temp_buflist;
1096
1097	for (i = 0; i < entry->buf_count; i++) {
1098		dma->buflist[i + dma->buf_count] = &entry->buflist[i];
1099	}
1100
1101	dma->buf_count += entry->buf_count;
1102	dma->seg_count += entry->seg_count;
1103	dma->page_count += byte_count >> PAGE_SHIFT;
1104	dma->byte_count += byte_count;
1105
1106	DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
1107	DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
1108
1109	mutex_unlock(&dev->struct_mutex);
1110
1111	request->count = entry->buf_count;
1112	request->size = size;
1113
1114	dma->flags = _DRM_DMA_USE_SG;
1115
1116	atomic_dec(&dev->buf_alloc);
1117	return 0;
1118}
1119
1120static int drm_addbufs_fb(drm_device_t * dev, drm_buf_desc_t * request)
1121{
1122	drm_device_dma_t *dma = dev->dma;
1123	drm_buf_entry_t *entry;
1124	drm_buf_t *buf;
1125	unsigned long offset;
1126	unsigned long agp_offset;
1127	int count;
1128	int order;
1129	int size;
1130	int alignment;
1131	int page_order;
1132	int total;
1133	int byte_count;
1134	int i;
1135	drm_buf_t **temp_buflist;
1136
1137	if (!drm_core_check_feature(dev, DRIVER_FB_DMA))
1138		return -EINVAL;
1139
1140	if (!dma)
1141		return -EINVAL;
1142
1143	if (!capable(CAP_SYS_ADMIN))
1144		return -EPERM;
1145
1146	count = request->count;
1147	order = drm_order(request->size);
1148	size = 1 << order;
1149
1150	alignment = (request->flags & _DRM_PAGE_ALIGN)
1151	    ? PAGE_ALIGN(size) : size;
1152	page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
1153	total = PAGE_SIZE << page_order;
1154
1155	byte_count = 0;
1156	agp_offset = request->agp_start;
1157
1158	DRM_DEBUG("count:      %d\n", count);
1159	DRM_DEBUG("order:      %d\n", order);
1160	DRM_DEBUG("size:       %d\n", size);
1161	DRM_DEBUG("agp_offset: %lu\n", agp_offset);
1162	DRM_DEBUG("alignment:  %d\n", alignment);
1163	DRM_DEBUG("page_order: %d\n", page_order);
1164	DRM_DEBUG("total:      %d\n", total);
1165
1166	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
1167		return -EINVAL;
1168	if (dev->queue_count)
1169		return -EBUSY;	/* Not while in use */
1170
1171	spin_lock(&dev->count_lock);
1172	if (dev->buf_use) {
1173		spin_unlock(&dev->count_lock);
1174		return -EBUSY;
1175	}
1176	atomic_inc(&dev->buf_alloc);
1177	spin_unlock(&dev->count_lock);
1178
1179	mutex_lock(&dev->struct_mutex);
1180	entry = &dma->bufs[order];
1181	if (entry->buf_count) {
1182		mutex_unlock(&dev->struct_mutex);
1183		atomic_dec(&dev->buf_alloc);
1184		return -ENOMEM;	/* May only call once for each order */
1185	}
1186
1187	if (count < 0 || count > 4096) {
1188		mutex_unlock(&dev->struct_mutex);
1189		atomic_dec(&dev->buf_alloc);
1190		return -EINVAL;
1191	}
1192
1193	entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
1194				   DRM_MEM_BUFS);
1195	if (!entry->buflist) {
1196		mutex_unlock(&dev->struct_mutex);
1197		atomic_dec(&dev->buf_alloc);
1198		return -ENOMEM;
1199	}
1200	memset(entry->buflist, 0, count * sizeof(*entry->buflist));
1201
1202	entry->buf_size = size;
1203	entry->page_order = page_order;
1204
1205	offset = 0;
1206
1207	while (entry->buf_count < count) {
1208		buf = &entry->buflist[entry->buf_count];
1209		buf->idx = dma->buf_count + entry->buf_count;
1210		buf->total = alignment;
1211		buf->order = order;
1212		buf->used = 0;
1213
1214		buf->offset = (dma->byte_count + offset);
1215		buf->bus_address = agp_offset + offset;
1216		buf->address = (void *)(agp_offset + offset);
1217		buf->next = NULL;
1218		buf->waiting = 0;
1219		buf->pending = 0;
1220		init_waitqueue_head(&buf->dma_wait);
1221		buf->filp = NULL;
1222
1223		buf->dev_priv_size = dev->driver->dev_priv_size;
1224		buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS);
1225		if (!buf->dev_private) {
1226			/* Set count correctly so we free the proper amount. */
1227			entry->buf_count = count;
1228			drm_cleanup_buf_error(dev, entry);
1229			mutex_unlock(&dev->struct_mutex);
1230			atomic_dec(&dev->buf_alloc);
1231			return -ENOMEM;
1232		}
1233		memset(buf->dev_private, 0, buf->dev_priv_size);
1234
1235		DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
1236
1237		offset += alignment;
1238		entry->buf_count++;
1239		byte_count += PAGE_SIZE << page_order;
1240	}
1241
1242	DRM_DEBUG("byte_count: %d\n", byte_count);
1243
1244	temp_buflist = drm_realloc(dma->buflist,
1245				   dma->buf_count * sizeof(*dma->buflist),
1246				   (dma->buf_count + entry->buf_count)
1247				   * sizeof(*dma->buflist), DRM_MEM_BUFS);
1248	if (!temp_buflist) {
1249		/* Free the entry because it isn't valid */
1250		drm_cleanup_buf_error(dev, entry);
1251		mutex_unlock(&dev->struct_mutex);
1252		atomic_dec(&dev->buf_alloc);
1253		return -ENOMEM;
1254	}
1255	dma->buflist = temp_buflist;
1256
1257	for (i = 0; i < entry->buf_count; i++) {
1258		dma->buflist[i + dma->buf_count] = &entry->buflist[i];
1259	}
1260
1261	dma->buf_count += entry->buf_count;
1262	dma->seg_count += entry->seg_count;
1263	dma->page_count += byte_count >> PAGE_SHIFT;
1264	dma->byte_count += byte_count;
1265
1266	DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
1267	DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
1268
1269	mutex_unlock(&dev->struct_mutex);
1270
1271	request->count = entry->buf_count;
1272	request->size = size;
1273
1274	dma->flags = _DRM_DMA_USE_FB;
1275
1276	atomic_dec(&dev->buf_alloc);
1277	return 0;
1278}
1279
1280
1281/**
1282 * Add buffers for DMA transfers (ioctl).
1283 *
1284 * \param inode device inode.
1285 * \param filp file pointer.
1286 * \param cmd command.
1287 * \param arg pointer to a drm_buf_desc_t request.
1288 * \return zero on success or a negative number on failure.
1289 *
1290 * According with the memory type specified in drm_buf_desc::flags and the
1291 * build options, it dispatches the call either to addbufs_agp(),
1292 * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent
1293 * PCI memory respectively.
1294 */
1295int drm_addbufs(struct inode *inode, struct file *filp,
1296		unsigned int cmd, unsigned long arg)
1297{
1298	drm_buf_desc_t request;
1299	drm_file_t *priv = filp->private_data;
1300	drm_device_t *dev = priv->head->dev;
1301	int ret;
1302
1303	if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1304		return -EINVAL;
1305
1306	if (copy_from_user(&request, (drm_buf_desc_t __user *) arg,
1307			   sizeof(request)))
1308		return -EFAULT;
1309
1310#if __OS_HAS_AGP
1311	if (request.flags & _DRM_AGP_BUFFER)
1312		ret = drm_addbufs_agp(dev, &request);
1313	else
1314#endif
1315	if (request.flags & _DRM_SG_BUFFER)
1316		ret = drm_addbufs_sg(dev, &request);
1317	else if (request.flags & _DRM_FB_BUFFER)
1318		ret = drm_addbufs_fb(dev, &request);
1319	else
1320		ret = drm_addbufs_pci(dev, &request);
1321
1322	if (ret == 0) {
1323		if (copy_to_user((void __user *)arg, &request, sizeof(request))) {
1324			ret = -EFAULT;
1325		}
1326	}
1327	return ret;
1328}
1329
1330/**
1331 * Get information about the buffer mappings.
1332 *
1333 * This was originally mean for debugging purposes, or by a sophisticated
1334 * client library to determine how best to use the available buffers (e.g.,
1335 * large buffers can be used for image transfer).
1336 *
1337 * \param inode device inode.
1338 * \param filp file pointer.
1339 * \param cmd command.
1340 * \param arg pointer to a drm_buf_info structure.
1341 * \return zero on success or a negative number on failure.
1342 *
1343 * Increments drm_device::buf_use while holding the drm_device::count_lock
1344 * lock, preventing of allocating more buffers after this call. Information
1345 * about each requested buffer is then copied into user space.
1346 */
1347int drm_infobufs(struct inode *inode, struct file *filp,
1348		 unsigned int cmd, unsigned long arg)
1349{
1350	drm_file_t *priv = filp->private_data;
1351	drm_device_t *dev = priv->head->dev;
1352	drm_device_dma_t *dma = dev->dma;
1353	drm_buf_info_t request;
1354	drm_buf_info_t __user *argp = (void __user *)arg;
1355	int i;
1356	int count;
1357
1358	if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1359		return -EINVAL;
1360
1361	if (!dma)
1362		return -EINVAL;
1363
1364	spin_lock(&dev->count_lock);
1365	if (atomic_read(&dev->buf_alloc)) {
1366		spin_unlock(&dev->count_lock);
1367		return -EBUSY;
1368	}
1369	++dev->buf_use;		/* Can't allocate more after this call */
1370	spin_unlock(&dev->count_lock);
1371
1372	if (copy_from_user(&request, argp, sizeof(request)))
1373		return -EFAULT;
1374
1375	for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
1376		if (dma->bufs[i].buf_count)
1377			++count;
1378	}
1379
1380	DRM_DEBUG("count = %d\n", count);
1381
1382	if (request.count >= count) {
1383		for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
1384			if (dma->bufs[i].buf_count) {
1385				drm_buf_desc_t __user *to =
1386				    &request.list[count];
1387				drm_buf_entry_t *from = &dma->bufs[i];
1388				drm_freelist_t *list = &dma->bufs[i].freelist;
1389				if (copy_to_user(&to->count,
1390						 &from->buf_count,
1391						 sizeof(from->buf_count)) ||
1392				    copy_to_user(&to->size,
1393						 &from->buf_size,
1394						 sizeof(from->buf_size)) ||
1395				    copy_to_user(&to->low_mark,
1396						 &list->low_mark,
1397						 sizeof(list->low_mark)) ||
1398				    copy_to_user(&to->high_mark,
1399						 &list->high_mark,
1400						 sizeof(list->high_mark)))
1401					return -EFAULT;
1402
1403				DRM_DEBUG("%d %d %d %d %d\n",
1404					  i,
1405					  dma->bufs[i].buf_count,
1406					  dma->bufs[i].buf_size,
1407					  dma->bufs[i].freelist.low_mark,
1408					  dma->bufs[i].freelist.high_mark);
1409				++count;
1410			}
1411		}
1412	}
1413	request.count = count;
1414
1415	if (copy_to_user(argp, &request, sizeof(request)))
1416		return -EFAULT;
1417
1418	return 0;
1419}
1420
1421/**
1422 * Specifies a low and high water mark for buffer allocation
1423 *
1424 * \param inode device inode.
1425 * \param filp file pointer.
1426 * \param cmd command.
1427 * \param arg a pointer to a drm_buf_desc structure.
1428 * \return zero on success or a negative number on failure.
1429 *
1430 * Verifies that the size order is bounded between the admissible orders and
1431 * updates the respective drm_device_dma::bufs entry low and high water mark.
1432 *
1433 * \note This ioctl is deprecated and mostly never used.
1434 */
1435int drm_markbufs(struct inode *inode, struct file *filp,
1436		 unsigned int cmd, unsigned long arg)
1437{
1438	drm_file_t *priv = filp->private_data;
1439	drm_device_t *dev = priv->head->dev;
1440	drm_device_dma_t *dma = dev->dma;
1441	drm_buf_desc_t request;
1442	int order;
1443	drm_buf_entry_t *entry;
1444
1445	if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1446		return -EINVAL;
1447
1448	if (!dma)
1449		return -EINVAL;
1450
1451	if (copy_from_user(&request,
1452			   (drm_buf_desc_t __user *) arg, sizeof(request)))
1453		return -EFAULT;
1454
1455	DRM_DEBUG("%d, %d, %d\n",
1456		  request.size, request.low_mark, request.high_mark);
1457	order = drm_order(request.size);
1458	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
1459		return -EINVAL;
1460	entry = &dma->bufs[order];
1461
1462	if (request.low_mark < 0 || request.low_mark > entry->buf_count)
1463		return -EINVAL;
1464	if (request.high_mark < 0 || request.high_mark > entry->buf_count)
1465		return -EINVAL;
1466
1467	entry->freelist.low_mark = request.low_mark;
1468	entry->freelist.high_mark = request.high_mark;
1469
1470	return 0;
1471}
1472
1473/**
1474 * Unreserve the buffers in list, previously reserved using drmDMA.
1475 *
1476 * \param inode device inode.
1477 * \param filp file pointer.
1478 * \param cmd command.
1479 * \param arg pointer to a drm_buf_free structure.
1480 * \return zero on success or a negative number on failure.
1481 *
1482 * Calls free_buffer() for each used buffer.
1483 * This function is primarily used for debugging.
1484 */
1485int drm_freebufs(struct inode *inode, struct file *filp,
1486		 unsigned int cmd, unsigned long arg)
1487{
1488	drm_file_t *priv = filp->private_data;
1489	drm_device_t *dev = priv->head->dev;
1490	drm_device_dma_t *dma = dev->dma;
1491	drm_buf_free_t request;
1492	int i;
1493	int idx;
1494	drm_buf_t *buf;
1495
1496	if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1497		return -EINVAL;
1498
1499	if (!dma)
1500		return -EINVAL;
1501
1502	if (copy_from_user(&request,
1503			   (drm_buf_free_t __user *) arg, sizeof(request)))
1504		return -EFAULT;
1505
1506	DRM_DEBUG("%d\n", request.count);
1507	for (i = 0; i < request.count; i++) {
1508		if (copy_from_user(&idx, &request.list[i], sizeof(idx)))
1509			return -EFAULT;
1510		if (idx < 0 || idx >= dma->buf_count) {
1511			DRM_ERROR("Index %d (of %d max)\n",
1512				  idx, dma->buf_count - 1);
1513			return -EINVAL;
1514		}
1515		buf = dma->buflist[idx];
1516		if (buf->filp != filp) {
1517			DRM_ERROR("Process %d freeing buffer not owned\n",
1518				  current->pid);
1519			return -EINVAL;
1520		}
1521		drm_free_buffer(dev, buf);
1522	}
1523
1524	return 0;
1525}
1526
1527/**
1528 * Maps all of the DMA buffers into client-virtual space (ioctl).
1529 *
1530 * \param inode device inode.
1531 * \param filp file pointer.
1532 * \param cmd command.
1533 * \param arg pointer to a drm_buf_map structure.
1534 * \return zero on success or a negative number on failure.
1535 *
1536 * Maps the AGP, SG or PCI buffer region with do_mmap(), and copies information
1537 * about each buffer into user space. For PCI buffers, it calls do_mmap() with
1538 * offset equal to 0, which drm_mmap() interpretes as PCI buffers and calls
1539 * drm_mmap_dma().
1540 */
1541int drm_mapbufs(struct inode *inode, struct file *filp,
1542		unsigned int cmd, unsigned long arg)
1543{
1544	drm_file_t *priv = filp->private_data;
1545	drm_device_t *dev = priv->head->dev;
1546	drm_device_dma_t *dma = dev->dma;
1547	drm_buf_map_t __user *argp = (void __user *)arg;
1548	int retcode = 0;
1549	const int zero = 0;
1550	unsigned long virtual;
1551	unsigned long address;
1552	drm_buf_map_t request;
1553	int i;
1554
1555	if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1556		return -EINVAL;
1557
1558	if (!dma)
1559		return -EINVAL;
1560
1561	spin_lock(&dev->count_lock);
1562	if (atomic_read(&dev->buf_alloc)) {
1563		spin_unlock(&dev->count_lock);
1564		return -EBUSY;
1565	}
1566	dev->buf_use++;		/* Can't allocate more after this call */
1567	spin_unlock(&dev->count_lock);
1568
1569	if (copy_from_user(&request, argp, sizeof(request)))
1570		return -EFAULT;
1571
1572	if (request.count >= dma->buf_count) {
1573		if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP))
1574		    || (drm_core_check_feature(dev, DRIVER_SG)
1575			&& (dma->flags & _DRM_DMA_USE_SG))
1576		    || (drm_core_check_feature(dev, DRIVER_FB_DMA)
1577			&& (dma->flags & _DRM_DMA_USE_FB))) {
1578			drm_map_t *map = dev->agp_buffer_map;
1579			unsigned long token = dev->agp_buffer_token;
1580
1581			if (!map) {
1582				retcode = -EINVAL;
1583				goto done;
1584			}
1585
1586			down_write(&current->mm->mmap_sem);
1587			virtual = do_mmap(filp, 0, map->size,
1588					  PROT_READ | PROT_WRITE,
1589					  MAP_SHARED, token);
1590			up_write(&current->mm->mmap_sem);
1591		} else {
1592			down_write(&current->mm->mmap_sem);
1593			virtual = do_mmap(filp, 0, dma->byte_count,
1594					  PROT_READ | PROT_WRITE,
1595					  MAP_SHARED, 0);
1596			up_write(&current->mm->mmap_sem);
1597		}
1598		if (virtual > -1024UL) {
1599			/* Real error */
1600			retcode = (signed long)virtual;
1601			goto done;
1602		}
1603		request.virtual = (void __user *)virtual;
1604
1605		for (i = 0; i < dma->buf_count; i++) {
1606			if (copy_to_user(&request.list[i].idx,
1607					 &dma->buflist[i]->idx,
1608					 sizeof(request.list[0].idx))) {
1609				retcode = -EFAULT;
1610				goto done;
1611			}
1612			if (copy_to_user(&request.list[i].total,
1613					 &dma->buflist[i]->total,
1614					 sizeof(request.list[0].total))) {
1615				retcode = -EFAULT;
1616				goto done;
1617			}
1618			if (copy_to_user(&request.list[i].used,
1619					 &zero, sizeof(zero))) {
1620				retcode = -EFAULT;
1621				goto done;
1622			}
1623			address = virtual + dma->buflist[i]->offset;	/* *** */
1624			if (copy_to_user(&request.list[i].address,
1625					 &address, sizeof(address))) {
1626				retcode = -EFAULT;
1627				goto done;
1628			}
1629		}
1630	}
1631      done:
1632	request.count = dma->buf_count;
1633	DRM_DEBUG("%d buffers, retcode = %d\n", request.count, retcode);
1634
1635	if (copy_to_user(argp, &request, sizeof(request)))
1636		return -EFAULT;
1637
1638	return retcode;
1639}
1640
1641/**
1642 * Compute size order.  Returns the exponent of the smaller power of two which
1643 * is greater or equal to given number.
1644 *
1645 * \param size size.
1646 * \return order.
1647 *
1648 * \todo Can be made faster.
1649 */
1650int drm_order(unsigned long size)
1651{
1652	int order;
1653	unsigned long tmp;
1654
1655	for (order = 0, tmp = size >> 1; tmp; tmp >>= 1, order++) ;
1656
1657	if (size & (size - 1))
1658		++order;
1659
1660	return order;
1661}
1662EXPORT_SYMBOL(drm_order);
1663