1280183Sdumbbell/**
2280183Sdumbbell * \file drm_bufs.c
3280183Sdumbbell * Generic buffer template
4280183Sdumbbell *
5280183Sdumbbell * \author Rickard E. (Rik) Faith <faith@valinux.com>
6280183Sdumbbell * \author Gareth Hughes <gareth@valinux.com>
7280183Sdumbbell */
8280183Sdumbbell
9280183Sdumbbell/*
10280183Sdumbbell * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
11280183Sdumbbell *
12235783Skib * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
13235783Skib * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14235783Skib * All Rights Reserved.
15235783Skib *
16235783Skib * Permission is hereby granted, free of charge, to any person obtaining a
17235783Skib * copy of this software and associated documentation files (the "Software"),
18235783Skib * to deal in the Software without restriction, including without limitation
19235783Skib * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20235783Skib * and/or sell copies of the Software, and to permit persons to whom the
21235783Skib * Software is furnished to do so, subject to the following conditions:
22235783Skib *
23235783Skib * The above copyright notice and this permission notice (including the next
24235783Skib * paragraph) shall be included in all copies or substantial portions of the
25235783Skib * Software.
26235783Skib *
27235783Skib * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28235783Skib * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29235783Skib * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
30235783Skib * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31235783Skib * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32235783Skib * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33235783Skib * OTHER DEALINGS IN THE SOFTWARE.
34235783Skib */
35235783Skib
36235783Skib#include <sys/cdefs.h>
37235783Skib__FBSDID("$FreeBSD$");
38235783Skib
39280183Sdumbbell#include <sys/param.h>
40280183Sdumbbell#include <sys/shm.h>
41235783Skib
42235783Skib#include <dev/pci/pcireg.h>
43235783Skib
44235783Skib#include <dev/drm2/drmP.h>
45235783Skib
46235783Skib/* Allocation of PCI memory resources (framebuffer, registers, etc.) for
47235783Skib * drm_get_resource_*.  Note that they are not RF_ACTIVE, so there's no virtual
48235783Skib * address for accessing them.  Cleaned up at unload.
49235783Skib */
50235783Skibstatic int drm_alloc_resource(struct drm_device *dev, int resource)
51235783Skib{
52235783Skib	struct resource *res;
53235783Skib	int rid;
54235783Skib
55235783Skib	if (resource >= DRM_MAX_PCI_RESOURCE) {
56235783Skib		DRM_ERROR("Resource %d too large\n", resource);
57235783Skib		return 1;
58235783Skib	}
59235783Skib
60235783Skib	if (dev->pcir[resource] != NULL) {
61235783Skib		return 0;
62235783Skib	}
63235783Skib
64235783Skib	rid = PCIR_BAR(resource);
65280183Sdumbbell	res = bus_alloc_resource_any(dev->dev, SYS_RES_MEMORY, &rid,
66235783Skib	    RF_SHAREABLE);
67235783Skib	if (res == NULL) {
68235783Skib		DRM_ERROR("Couldn't find resource 0x%x\n", resource);
69235783Skib		return 1;
70235783Skib	}
71235783Skib
72235783Skib	if (dev->pcir[resource] == NULL) {
73235783Skib		dev->pcirid[resource] = rid;
74235783Skib		dev->pcir[resource] = res;
75235783Skib	}
76235783Skib
77235783Skib	return 0;
78235783Skib}
79235783Skib
80235783Skibunsigned long drm_get_resource_start(struct drm_device *dev,
81235783Skib				     unsigned int resource)
82235783Skib{
83280183Sdumbbell	unsigned long start;
84280183Sdumbbell
85280183Sdumbbell	mtx_lock(&dev->pcir_lock);
86280183Sdumbbell
87235783Skib	if (drm_alloc_resource(dev, resource) != 0)
88235783Skib		return 0;
89235783Skib
90280183Sdumbbell	start = rman_get_start(dev->pcir[resource]);
91280183Sdumbbell
92280183Sdumbbell	mtx_unlock(&dev->pcir_lock);
93280183Sdumbbell
94280183Sdumbbell	return (start);
95235783Skib}
96235783Skib
97235783Skibunsigned long drm_get_resource_len(struct drm_device *dev,
98235783Skib				   unsigned int resource)
99235783Skib{
100280183Sdumbbell	unsigned long len;
101280183Sdumbbell
102280183Sdumbbell	mtx_lock(&dev->pcir_lock);
103280183Sdumbbell
104235783Skib	if (drm_alloc_resource(dev, resource) != 0)
105235783Skib		return 0;
106235783Skib
107280183Sdumbbell	len = rman_get_size(dev->pcir[resource]);
108280183Sdumbbell
109280183Sdumbbell	mtx_unlock(&dev->pcir_lock);
110280183Sdumbbell
111280183Sdumbbell	return (len);
112235783Skib}
113235783Skib
114280183Sdumbbellstatic struct drm_map_list *drm_find_matching_map(struct drm_device *dev,
115280183Sdumbbell						  struct drm_local_map *map)
116235783Skib{
117280183Sdumbbell	struct drm_map_list *entry;
118280183Sdumbbell	list_for_each_entry(entry, &dev->maplist, head) {
119280183Sdumbbell		/*
120280183Sdumbbell		 * Because the kernel-userspace ABI is fixed at a 32-bit offset
121280183Sdumbbell		 * while PCI resources may live above that, we only compare the
122280183Sdumbbell		 * lower 32 bits of the map offset for maps of type
123280183Sdumbbell		 * _DRM_FRAMEBUFFER or _DRM_REGISTERS.
124280183Sdumbbell		 * It is assumed that if a driver have more than one resource
125280183Sdumbbell		 * of each type, the lower 32 bits are different.
126280183Sdumbbell		 */
127280183Sdumbbell		if (!entry->map ||
128280183Sdumbbell		    map->type != entry->map->type ||
129280183Sdumbbell		    entry->master != dev->primary->master)
130280183Sdumbbell			continue;
131280183Sdumbbell		switch (map->type) {
132280183Sdumbbell		case _DRM_SHM:
133280183Sdumbbell			if (map->flags != _DRM_CONTAINS_LOCK)
134280183Sdumbbell				break;
135280183Sdumbbell			return entry;
136280183Sdumbbell		case _DRM_REGISTERS:
137280183Sdumbbell		case _DRM_FRAME_BUFFER:
138280183Sdumbbell			if ((entry->map->offset & 0xffffffff) ==
139280183Sdumbbell			    (map->offset & 0xffffffff))
140280183Sdumbbell				return entry;
141280183Sdumbbell		default: /* Make gcc happy */
142280183Sdumbbell			;
143280183Sdumbbell		}
144280183Sdumbbell		if (entry->map->offset == map->offset)
145280183Sdumbbell			return entry;
146280183Sdumbbell	}
147280183Sdumbbell
148280183Sdumbbell	return NULL;
149280183Sdumbbell}
150280183Sdumbbell
151280183Sdumbbellstatic int drm_map_handle(struct drm_device *dev, struct drm_hash_item *hash,
152280183Sdumbbell			  unsigned long user_token, int hashed_handle, int shm)
153280183Sdumbbell{
154280183Sdumbbell	int use_hashed_handle, shift;
155280183Sdumbbell	unsigned long add;
156280183Sdumbbell
157280183Sdumbbell#if (BITS_PER_LONG == 64)
158280183Sdumbbell	use_hashed_handle = ((user_token & 0xFFFFFFFF00000000UL) || hashed_handle);
159280183Sdumbbell#elif (BITS_PER_LONG == 32)
160280183Sdumbbell	use_hashed_handle = hashed_handle;
161280183Sdumbbell#else
162280183Sdumbbell#error Unsupported long size. Neither 64 nor 32 bits.
163280183Sdumbbell#endif
164280183Sdumbbell
165280183Sdumbbell	if (!use_hashed_handle) {
166280183Sdumbbell		int ret;
167280183Sdumbbell		hash->key = user_token >> PAGE_SHIFT;
168280183Sdumbbell		ret = drm_ht_insert_item(&dev->map_hash, hash);
169280183Sdumbbell		if (ret != -EINVAL)
170280183Sdumbbell			return ret;
171280183Sdumbbell	}
172280183Sdumbbell
173280183Sdumbbell	shift = 0;
174280183Sdumbbell	add = DRM_MAP_HASH_OFFSET >> PAGE_SHIFT;
175280183Sdumbbell	if (shm && (SHMLBA > PAGE_SIZE)) {
176280183Sdumbbell		int bits = ilog2(SHMLBA >> PAGE_SHIFT) + 1;
177280183Sdumbbell
178280183Sdumbbell		/* For shared memory, we have to preserve the SHMLBA
179280183Sdumbbell		 * bits of the eventual vma->vm_pgoff value during
180280183Sdumbbell		 * mmap().  Otherwise we run into cache aliasing problems
181280183Sdumbbell		 * on some platforms.  On these platforms, the pgoff of
182280183Sdumbbell		 * a mmap() request is used to pick a suitable virtual
183280183Sdumbbell		 * address for the mmap() region such that it will not
184280183Sdumbbell		 * cause cache aliasing problems.
185280183Sdumbbell		 *
186280183Sdumbbell		 * Therefore, make sure the SHMLBA relevant bits of the
187280183Sdumbbell		 * hash value we use are equal to those in the original
188280183Sdumbbell		 * kernel virtual address.
189280183Sdumbbell		 */
190280183Sdumbbell		shift = bits;
191280183Sdumbbell		add |= ((user_token >> PAGE_SHIFT) & ((1UL << bits) - 1UL));
192280183Sdumbbell	}
193280183Sdumbbell
194280183Sdumbbell	return drm_ht_just_insert_please(&dev->map_hash, hash,
195280183Sdumbbell					 user_token, 32 - PAGE_SHIFT - 3,
196280183Sdumbbell					 shift, add);
197280183Sdumbbell}
198280183Sdumbbell
199280183Sdumbbell/**
200280183Sdumbbell * Core function to create a range of memory available for mapping by a
201280183Sdumbbell * non-root process.
202280183Sdumbbell *
203280183Sdumbbell * Adjusts the memory offset to its absolute value according to the mapping
204280183Sdumbbell * type.  Adds the map to the map list drm_device::maplist. Adds MTRR's where
205280183Sdumbbell * applicable and if supported by the kernel.
206280183Sdumbbell */
207280183Sdumbbellstatic int drm_addmap_core(struct drm_device * dev, resource_size_t offset,
208280183Sdumbbell			   unsigned int size, enum drm_map_type type,
209280183Sdumbbell			   enum drm_map_flags flags,
210280183Sdumbbell			   struct drm_map_list ** maplist)
211280183Sdumbbell{
212280183Sdumbbell	struct drm_local_map *map;
213280183Sdumbbell	struct drm_map_list *list;
214280183Sdumbbell	drm_dma_handle_t *dmah;
215280183Sdumbbell	unsigned long user_token;
216280183Sdumbbell	int ret;
217235783Skib	int align;
218235783Skib
219280183Sdumbbell	map = malloc(sizeof(*map), DRM_MEM_MAPS, M_NOWAIT);
220280183Sdumbbell	if (!map)
221280183Sdumbbell		return -ENOMEM;
222280183Sdumbbell
223280183Sdumbbell	map->offset = offset;
224280183Sdumbbell	map->size = size;
225280183Sdumbbell	map->flags = flags;
226280183Sdumbbell	map->type = type;
227280183Sdumbbell
228235783Skib	/* Only allow shared memory to be removable since we only keep enough
229235783Skib	 * book keeping information about shared memory to allow for removal
230235783Skib	 * when processes fork.
231235783Skib	 */
232280183Sdumbbell	if ((map->flags & _DRM_REMOVABLE) && map->type != _DRM_SHM) {
233280183Sdumbbell		free(map, DRM_MEM_MAPS);
234280183Sdumbbell		return -EINVAL;
235235783Skib	}
236280183Sdumbbell	DRM_DEBUG("offset = 0x%08llx, size = 0x%08lx, type = %d\n",
237280183Sdumbbell		  (unsigned long long)map->offset, map->size, map->type);
238235783Skib
239280183Sdumbbell	/* page-align _DRM_SHM maps. They are allocated here so there is no security
240280183Sdumbbell	 * hole created by that and it works around various broken drivers that use
241280183Sdumbbell	 * a non-aligned quantity to map the SAREA. --BenH
242235783Skib	 */
243280183Sdumbbell	if (map->type == _DRM_SHM)
244280183Sdumbbell		map->size = PAGE_ALIGN(map->size);
245235783Skib
246280183Sdumbbell	/*
247280183Sdumbbell	 * FreeBSD port note: FreeBSD's PAGE_MASK is the inverse of
248280183Sdumbbell	 * Linux's one. That's why the test below doesn't inverse the
249280183Sdumbbell	 * constant.
250235783Skib	 */
251280183Sdumbbell	if ((map->offset & ((resource_size_t)PAGE_MASK)) || (map->size & (PAGE_MASK))) {
252280183Sdumbbell		free(map, DRM_MEM_MAPS);
253280183Sdumbbell		return -EINVAL;
254235783Skib	}
255280183Sdumbbell	map->mtrr = -1;
256280183Sdumbbell	map->handle = NULL;
257235783Skib
258235783Skib	switch (map->type) {
259235783Skib	case _DRM_REGISTERS:
260235783Skib	case _DRM_FRAME_BUFFER:
261280183Sdumbbell#ifdef __linux__
262280183Sdumbbell#if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) && !defined(__powerpc64__) && !defined(__x86_64__) && !defined(__arm__)
263280183Sdumbbell		if (map->offset + (map->size-1) < map->offset ||
264280183Sdumbbell		    map->offset < virt_to_phys(high_memory)) {
265280183Sdumbbell			kfree(map);
266280183Sdumbbell			return -EINVAL;
267280183Sdumbbell		}
268280183Sdumbbell#endif
269280183Sdumbbell#endif
270280183Sdumbbell		/* Some drivers preinitialize some maps, without the X Server
271280183Sdumbbell		 * needing to be aware of it.  Therefore, we just return success
272280183Sdumbbell		 * when the server tries to create a duplicate map.
273280183Sdumbbell		 */
274280183Sdumbbell		list = drm_find_matching_map(dev, map);
275280183Sdumbbell		if (list != NULL) {
276280183Sdumbbell			if (list->map->size != map->size) {
277280183Sdumbbell				DRM_DEBUG("Matching maps of type %d with "
278280183Sdumbbell					  "mismatched sizes, (%ld vs %ld)\n",
279280183Sdumbbell					  map->type, map->size,
280280183Sdumbbell					  list->map->size);
281280183Sdumbbell				list->map->size = map->size;
282280183Sdumbbell			}
283280183Sdumbbell
284280183Sdumbbell			free(map, DRM_MEM_MAPS);
285280183Sdumbbell			*maplist = list;
286280183Sdumbbell			return 0;
287280183Sdumbbell		}
288280183Sdumbbell
289280183Sdumbbell		if (drm_core_has_MTRR(dev)) {
290280183Sdumbbell			if (map->type == _DRM_FRAME_BUFFER ||
291280183Sdumbbell			    (map->flags & _DRM_WRITE_COMBINING)) {
292280183Sdumbbell				if (drm_mtrr_add(
293280183Sdumbbell				    map->offset, map->size,
294280183Sdumbbell				    DRM_MTRR_WC) == 0)
295280183Sdumbbell					map->mtrr = 1;
296280183Sdumbbell			}
297280183Sdumbbell		}
298280183Sdumbbell		if (map->type == _DRM_REGISTERS) {
299280183Sdumbbell			drm_core_ioremap(map, dev);
300280183Sdumbbell			if (!map->handle) {
301280183Sdumbbell				free(map, DRM_MEM_MAPS);
302280183Sdumbbell				return -ENOMEM;
303280183Sdumbbell			}
304280183Sdumbbell		}
305280183Sdumbbell
306235783Skib		break;
307235783Skib	case _DRM_SHM:
308280183Sdumbbell		list = drm_find_matching_map(dev, map);
309280183Sdumbbell		if (list != NULL) {
310280183Sdumbbell			if(list->map->size != map->size) {
311280183Sdumbbell				DRM_DEBUG("Matching maps of type %d with "
312280183Sdumbbell					  "mismatched sizes, (%ld vs %ld)\n",
313280183Sdumbbell					  map->type, map->size, list->map->size);
314280183Sdumbbell				list->map->size = map->size;
315280183Sdumbbell			}
316280183Sdumbbell
317280183Sdumbbell			free(map, DRM_MEM_MAPS);
318280183Sdumbbell			*maplist = list;
319280183Sdumbbell			return 0;
320280183Sdumbbell		}
321280183Sdumbbell		map->handle = malloc(map->size, DRM_MEM_MAPS, M_NOWAIT);
322235783Skib		DRM_DEBUG("%lu %d %p\n",
323280183Sdumbbell			  map->size, drm_order(map->size), map->handle);
324280183Sdumbbell		if (!map->handle) {
325235783Skib			free(map, DRM_MEM_MAPS);
326280183Sdumbbell			return -ENOMEM;
327235783Skib		}
328280183Sdumbbell		map->offset = (unsigned long)map->handle;
329235783Skib		if (map->flags & _DRM_CONTAINS_LOCK) {
330235783Skib			/* Prevent a 2nd X Server from creating a 2nd lock */
331280183Sdumbbell			if (dev->primary->master->lock.hw_lock != NULL) {
332280183Sdumbbell				free(map->handle, DRM_MEM_MAPS);
333235783Skib				free(map, DRM_MEM_MAPS);
334280183Sdumbbell				return -EBUSY;
335235783Skib			}
336280183Sdumbbell			dev->sigdata.lock = dev->primary->master->lock.hw_lock = map->handle;	/* Pointer to lock */
337235783Skib		}
338235783Skib		break;
339280183Sdumbbell	case _DRM_AGP: {
340280183Sdumbbell		struct drm_agp_mem *entry;
341280183Sdumbbell		int valid = 0;
342280183Sdumbbell
343280183Sdumbbell		if (!drm_core_has_AGP(dev)) {
344280183Sdumbbell			free(map, DRM_MEM_MAPS);
345280183Sdumbbell			return -EINVAL;
346280183Sdumbbell		}
347280183Sdumbbell#ifdef __linux__
348280183Sdumbbell#ifdef __alpha__
349280183Sdumbbell		map->offset += dev->hose->mem_space->start;
350280183Sdumbbell#endif
351280183Sdumbbell#endif
352235783Skib		/* In some cases (i810 driver), user space may have already
353235783Skib		 * added the AGP base itself, because dev->agp->base previously
354235783Skib		 * only got set during AGP enable.  So, only add the base
355235783Skib		 * address if the map's offset isn't already within the
356235783Skib		 * aperture.
357235783Skib		 */
358235783Skib		if (map->offset < dev->agp->base ||
359235783Skib		    map->offset > dev->agp->base +
360280183Sdumbbell		    dev->agp->agp_info.ai_aperture_size * 1024 * 1024 - 1) {
361235783Skib			map->offset += dev->agp->base;
362235783Skib		}
363280183Sdumbbell		map->mtrr = dev->agp->agp_mtrr;	/* for getmap */
364280183Sdumbbell
365280183Sdumbbell		/* This assumes the DRM is in total control of AGP space.
366280183Sdumbbell		 * It's not always the case as AGP can be in the control
367280183Sdumbbell		 * of user space (i.e. i810 driver). So this loop will get
368280183Sdumbbell		 * skipped and we double check that dev->agp->memory is
369280183Sdumbbell		 * actually set as well as being invalid before EPERM'ing
370280183Sdumbbell		 */
371280183Sdumbbell		list_for_each_entry(entry, &dev->agp->memory, head) {
372235783Skib			if ((map->offset >= entry->bound) &&
373280183Sdumbbell			    (map->offset + map->size <= entry->bound + entry->pages * PAGE_SIZE)) {
374235783Skib				valid = 1;
375235783Skib				break;
376235783Skib			}
377235783Skib		}
378280183Sdumbbell		if (!list_empty(&dev->agp->memory) && !valid) {
379235783Skib			free(map, DRM_MEM_MAPS);
380280183Sdumbbell			return -EPERM;
381280183Sdumbbell		}
382280183Sdumbbell		DRM_DEBUG("AGP offset = 0x%08llx, size = 0x%08lx\n",
383280183Sdumbbell			  (unsigned long long)map->offset, map->size);
384280183Sdumbbell
385235783Skib		break;
386280183Sdumbbell	}
387280183Sdumbbell	case _DRM_GEM:
388280183Sdumbbell		DRM_ERROR("tried to addmap GEM object\n");
389280183Sdumbbell		break;
390235783Skib	case _DRM_SCATTER_GATHER:
391235783Skib		if (!dev->sg) {
392235783Skib			free(map, DRM_MEM_MAPS);
393280183Sdumbbell			return -EINVAL;
394235783Skib		}
395280183Sdumbbell		map->handle = (void *)(dev->sg->vaddr + offset);
396280183Sdumbbell		map->offset += dev->sg->vaddr;
397235783Skib		break;
398235783Skib	case _DRM_CONSISTENT:
399280183Sdumbbell		/* dma_addr_t is 64bit on i386 with CONFIG_HIGHMEM64G,
400280183Sdumbbell		 * As we're limiting the address to 2^32-1 (or less),
401280183Sdumbbell		 * casting it down to 32 bits is no problem, but we
402280183Sdumbbell		 * need to point to a 64bit variable first. */
403235783Skib		align = map->size;
404235783Skib		if ((align & (align - 1)) != 0)
405235783Skib			align = PAGE_SIZE;
406280183Sdumbbell		dmah = drm_pci_alloc(dev, map->size, align, BUS_SPACE_MAXADDR);
407280183Sdumbbell		if (!dmah) {
408235783Skib			free(map, DRM_MEM_MAPS);
409280183Sdumbbell			return -ENOMEM;
410235783Skib		}
411280183Sdumbbell		map->handle = dmah->vaddr;
412280183Sdumbbell		map->offset = dmah->busaddr;
413280183Sdumbbell		map->dmah = dmah;
414235783Skib		break;
415235783Skib	default:
416235783Skib		free(map, DRM_MEM_MAPS);
417280183Sdumbbell		return -EINVAL;
418235783Skib	}
419235783Skib
420280183Sdumbbell	list = malloc(sizeof(*list), DRM_MEM_MAPS, M_ZERO | M_NOWAIT);
421280183Sdumbbell	if (!list) {
422280183Sdumbbell		if (map->type == _DRM_REGISTERS)
423280183Sdumbbell			drm_core_ioremapfree(map, dev);
424280183Sdumbbell		free(map, DRM_MEM_MAPS);
425280183Sdumbbell		return -EINVAL;
426280183Sdumbbell	}
427280183Sdumbbell	list->map = map;
428280183Sdumbbell
429235783Skib	DRM_LOCK(dev);
430280183Sdumbbell	list_add(&list->head, &dev->maplist);
431235783Skib
432280183Sdumbbell	/* Assign a 32-bit handle */
433280183Sdumbbell	/* We do it here so that dev->struct_mutex protects the increment */
434280183Sdumbbell	user_token = (map->type == _DRM_SHM) ? (unsigned long)map->handle :
435280183Sdumbbell		map->offset;
436280183Sdumbbell	ret = drm_map_handle(dev, &list->hash, user_token, 0,
437280183Sdumbbell			     (map->type == _DRM_SHM));
438280183Sdumbbell	if (ret) {
439280183Sdumbbell		if (map->type == _DRM_REGISTERS)
440280183Sdumbbell			drm_core_ioremapfree(map, dev);
441280183Sdumbbell		free(map, DRM_MEM_MAPS);
442280183Sdumbbell		free(list, DRM_MEM_MAPS);
443280183Sdumbbell		DRM_UNLOCK(dev);
444280183Sdumbbell		return ret;
445280183Sdumbbell	}
446235783Skib
447280183Sdumbbell	list->user_token = list->hash.key << PAGE_SHIFT;
448280183Sdumbbell	DRM_UNLOCK(dev);
449235783Skib
450280183Sdumbbell	if (!(map->flags & _DRM_DRIVER))
451280183Sdumbbell		list->master = dev->primary->master;
452280183Sdumbbell	*maplist = list;
453280183Sdumbbell	return 0;
454280183Sdumbbell	}
455235783Skib
456280183Sdumbbellint drm_addmap(struct drm_device * dev, resource_size_t offset,
457280183Sdumbbell	       unsigned int size, enum drm_map_type type,
458280183Sdumbbell	       enum drm_map_flags flags, struct drm_local_map ** map_ptr)
459280183Sdumbbell{
460280183Sdumbbell	struct drm_map_list *list;
461280183Sdumbbell	int rc;
462280183Sdumbbell
463280183Sdumbbell	rc = drm_addmap_core(dev, offset, size, type, flags, &list);
464280183Sdumbbell	if (!rc)
465280183Sdumbbell		*map_ptr = list->map;
466280183Sdumbbell	return rc;
467235783Skib}
468235783Skib
469280183SdumbbellEXPORT_SYMBOL(drm_addmap);
470280183Sdumbbell
471280183Sdumbbell/**
472280183Sdumbbell * Ioctl to specify a range of memory that is available for mapping by a
473280183Sdumbbell * non-root process.
474280183Sdumbbell *
475280183Sdumbbell * \param inode device inode.
476280183Sdumbbell * \param file_priv DRM file private.
477280183Sdumbbell * \param cmd command.
478280183Sdumbbell * \param arg pointer to a drm_map structure.
479280183Sdumbbell * \return zero on success or a negative value on error.
480280183Sdumbbell *
481280183Sdumbbell */
482235783Skibint drm_addmap_ioctl(struct drm_device *dev, void *data,
483235783Skib		     struct drm_file *file_priv)
484235783Skib{
485280183Sdumbbell	struct drm_map *map = data;
486280183Sdumbbell	struct drm_map_list *maplist;
487235783Skib	int err;
488235783Skib
489280183Sdumbbell	if (!(DRM_SUSER(DRM_CURPROC) || map->type == _DRM_AGP || map->type == _DRM_SHM))
490280183Sdumbbell		return -EPERM;
491235783Skib
492280183Sdumbbell	err = drm_addmap_core(dev, map->offset, map->size, map->type,
493280183Sdumbbell			      map->flags, &maplist);
494235783Skib
495280183Sdumbbell	if (err)
496235783Skib		return err;
497235783Skib
498280183Sdumbbell	/* avoid a warning on 64-bit, this casting isn't very nice, but the API is set so too late */
499280183Sdumbbell	map->handle = (void *)(unsigned long)maplist->user_token;
500235783Skib	return 0;
501235783Skib}
502235783Skib
503280183Sdumbbell/**
504280183Sdumbbell * Remove a map private from list and deallocate resources if the mapping
505280183Sdumbbell * isn't in use.
506280183Sdumbbell *
507280183Sdumbbell * Searches the map on drm_device::maplist, removes it from the list, see if
508280183Sdumbbell * its being used, and free any associate resource (such as MTRR's) if it's not
509280183Sdumbbell * being on use.
510280183Sdumbbell *
511280183Sdumbbell * \sa drm_addmap
512280183Sdumbbell */
513280183Sdumbbellint drm_rmmap_locked(struct drm_device *dev, struct drm_local_map *map)
514235783Skib{
515280183Sdumbbell	struct drm_map_list *r_list = NULL, *list_t;
516280183Sdumbbell	int found = 0;
517280183Sdumbbell	struct drm_master *master;
518235783Skib
519280183Sdumbbell	/* Find the list entry for the map and remove it */
520280183Sdumbbell	list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) {
521280183Sdumbbell		if (r_list->map == map) {
522280183Sdumbbell			master = r_list->master;
523280183Sdumbbell			list_del(&r_list->head);
524280183Sdumbbell			drm_ht_remove_key(&dev->map_hash,
525280183Sdumbbell					  r_list->user_token >> PAGE_SHIFT);
526280183Sdumbbell			free(r_list, DRM_MEM_MAPS);
527280183Sdumbbell			found = 1;
528280183Sdumbbell			break;
529280183Sdumbbell		}
530280183Sdumbbell	}
531235783Skib
532280183Sdumbbell	if (!found)
533280183Sdumbbell		return -EINVAL;
534235783Skib
535235783Skib	switch (map->type) {
536235783Skib	case _DRM_REGISTERS:
537280183Sdumbbell		drm_core_ioremapfree(map, dev);
538235783Skib		/* FALLTHROUGH */
539235783Skib	case _DRM_FRAME_BUFFER:
540280183Sdumbbell		if (drm_core_has_MTRR(dev) && map->mtrr >= 0) {
541280183Sdumbbell			int retcode;
542280183Sdumbbell			retcode = drm_mtrr_del(map->mtrr, map->offset,
543280183Sdumbbell			    map->size, DRM_MTRR_WC);
544280183Sdumbbell			DRM_DEBUG("mtrr_del=%d\n", retcode);
545235783Skib		}
546235783Skib		break;
547235783Skib	case _DRM_SHM:
548280183Sdumbbell		free(map->handle, DRM_MEM_MAPS);
549280183Sdumbbell		if (master) {
550280183Sdumbbell			if (dev->sigdata.lock == master->lock.hw_lock)
551280183Sdumbbell				dev->sigdata.lock = NULL;
552280183Sdumbbell			master->lock.hw_lock = NULL;   /* SHM removed */
553280183Sdumbbell			master->lock.file_priv = NULL;
554280183Sdumbbell			DRM_WAKEUP_INT((void *)&master->lock.lock_queue);
555280183Sdumbbell		}
556235783Skib		break;
557235783Skib	case _DRM_AGP:
558235783Skib	case _DRM_SCATTER_GATHER:
559235783Skib		break;
560235783Skib	case _DRM_CONSISTENT:
561235783Skib		drm_pci_free(dev, map->dmah);
562235783Skib		break;
563280183Sdumbbell	case _DRM_GEM:
564280183Sdumbbell		DRM_ERROR("tried to rmmap GEM object\n");
565235783Skib		break;
566235783Skib	}
567280183Sdumbbell	free(map, DRM_MEM_MAPS);
568235783Skib
569280183Sdumbbell	return 0;
570280183Sdumbbell}
571280183SdumbbellEXPORT_SYMBOL(drm_rmmap_locked);
572235783Skib
573280183Sdumbbellint drm_rmmap(struct drm_device *dev, struct drm_local_map *map)
574280183Sdumbbell{
575280183Sdumbbell	int ret;
576280183Sdumbbell
577280183Sdumbbell	DRM_LOCK(dev);
578280183Sdumbbell	ret = drm_rmmap_locked(dev, map);
579235783Skib	DRM_UNLOCK(dev);
580235783Skib
581280183Sdumbbell	return ret;
582235783Skib}
583280183SdumbbellEXPORT_SYMBOL(drm_rmmap);
584235783Skib
585280183Sdumbbell/* The rmmap ioctl appears to be unnecessary.  All mappings are torn down on
586280183Sdumbbell * the last close of the device, and this is necessary for cleanup when things
587280183Sdumbbell * exit uncleanly.  Therefore, having userland manually remove mappings seems
588280183Sdumbbell * like a pointless exercise since they're going away anyway.
589280183Sdumbbell *
590280183Sdumbbell * One use case might be after addmap is allowed for normal users for SHM and
591280183Sdumbbell * gets used by drivers that the server doesn't need to care about.  This seems
592280183Sdumbbell * unlikely.
593280183Sdumbbell *
594280183Sdumbbell * \param inode device inode.
595280183Sdumbbell * \param file_priv DRM file private.
596280183Sdumbbell * \param cmd command.
597280183Sdumbbell * \param arg pointer to a struct drm_map structure.
598280183Sdumbbell * \return zero on success or a negative value on error.
599235783Skib */
600235783Skibint drm_rmmap_ioctl(struct drm_device *dev, void *data,
601235783Skib		    struct drm_file *file_priv)
602235783Skib{
603235783Skib	struct drm_map *request = data;
604280183Sdumbbell	struct drm_local_map *map = NULL;
605280183Sdumbbell	struct drm_map_list *r_list;
606280183Sdumbbell	int ret;
607235783Skib
608235783Skib	DRM_LOCK(dev);
609280183Sdumbbell	list_for_each_entry(r_list, &dev->maplist, head) {
610280183Sdumbbell		if (r_list->map &&
611280183Sdumbbell		    r_list->user_token == (unsigned long)request->handle &&
612280183Sdumbbell		    r_list->map->flags & _DRM_REMOVABLE) {
613280183Sdumbbell			map = r_list->map;
614235783Skib			break;
615280183Sdumbbell		}
616235783Skib	}
617235783Skib
618280183Sdumbbell	/* List has wrapped around to the head pointer, or its empty we didn't
619280183Sdumbbell	 * find anything.
620280183Sdumbbell	 */
621280183Sdumbbell	if (list_empty(&dev->maplist) || !map) {
622235783Skib		DRM_UNLOCK(dev);
623280183Sdumbbell		return -EINVAL;
624235783Skib	}
625235783Skib
626280183Sdumbbell	/* Register and framebuffer maps are permanent */
627280183Sdumbbell	if ((map->type == _DRM_REGISTERS) || (map->type == _DRM_FRAME_BUFFER)) {
628280183Sdumbbell		DRM_UNLOCK(dev);
629280183Sdumbbell		return 0;
630280183Sdumbbell	}
631235783Skib
632280183Sdumbbell	ret = drm_rmmap_locked(dev, map);
633280183Sdumbbell
634235783Skib	DRM_UNLOCK(dev);
635235783Skib
636280183Sdumbbell	return ret;
637235783Skib}
638235783Skib
639280183Sdumbbell/**
640280183Sdumbbell * Cleanup after an error on one of the addbufs() functions.
641280183Sdumbbell *
642280183Sdumbbell * \param dev DRM device.
643280183Sdumbbell * \param entry buffer entry where the error occurred.
644280183Sdumbbell *
645280183Sdumbbell * Frees any pages and buffers associated with the given entry.
646280183Sdumbbell */
647280183Sdumbbellstatic void drm_cleanup_buf_error(struct drm_device * dev,
648280183Sdumbbell				  struct drm_buf_entry * entry)
649235783Skib{
650235783Skib	int i;
651235783Skib
652235783Skib	if (entry->seg_count) {
653235783Skib		for (i = 0; i < entry->seg_count; i++) {
654280183Sdumbbell			if (entry->seglist[i]) {
655280183Sdumbbell				drm_pci_free(dev, entry->seglist[i]);
656280183Sdumbbell			}
657235783Skib		}
658235783Skib		free(entry->seglist, DRM_MEM_SEGS);
659235783Skib
660235783Skib		entry->seg_count = 0;
661235783Skib	}
662235783Skib
663280183Sdumbbell	if (entry->buf_count) {
664280183Sdumbbell		for (i = 0; i < entry->buf_count; i++) {
665235783Skib			free(entry->buflist[i].dev_private, DRM_MEM_BUFS);
666235783Skib		}
667235783Skib		free(entry->buflist, DRM_MEM_BUFS);
668235783Skib
669235783Skib		entry->buf_count = 0;
670235783Skib	}
671235783Skib}
672235783Skib
673280183Sdumbbell#if __OS_HAS_AGP
674280183Sdumbbell/**
675280183Sdumbbell * Add AGP buffers for DMA transfers.
676280183Sdumbbell *
677280183Sdumbbell * \param dev struct drm_device to which the buffers are to be added.
678280183Sdumbbell * \param request pointer to a struct drm_buf_desc describing the request.
679280183Sdumbbell * \return zero on success or a negative number on failure.
680280183Sdumbbell *
681280183Sdumbbell * After some sanity checks creates a drm_buf structure for each buffer and
682280183Sdumbbell * reallocates the buffer list of the same size order to accommodate the new
683280183Sdumbbell * buffers.
684280183Sdumbbell */
685280183Sdumbbellint drm_addbufs_agp(struct drm_device * dev, struct drm_buf_desc * request)
686235783Skib{
687280183Sdumbbell	struct drm_device_dma *dma = dev->dma;
688280183Sdumbbell	struct drm_buf_entry *entry;
689280183Sdumbbell	struct drm_agp_mem *agp_entry;
690280183Sdumbbell	struct drm_buf *buf;
691235783Skib	unsigned long offset;
692235783Skib	unsigned long agp_offset;
693235783Skib	int count;
694235783Skib	int order;
695235783Skib	int size;
696235783Skib	int alignment;
697235783Skib	int page_order;
698235783Skib	int total;
699235783Skib	int byte_count;
700280183Sdumbbell	int i, valid;
701280183Sdumbbell	struct drm_buf **temp_buflist;
702235783Skib
703280183Sdumbbell	if (!dma)
704280183Sdumbbell		return -EINVAL;
705280183Sdumbbell
706235783Skib	count = request->count;
707235783Skib	order = drm_order(request->size);
708235783Skib	size = 1 << order;
709235783Skib
710280183Sdumbbell	alignment = (request->flags & _DRM_PAGE_ALIGN)
711280183Sdumbbell	    ? PAGE_ALIGN(size) : size;
712235783Skib	page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
713235783Skib	total = PAGE_SIZE << page_order;
714235783Skib
715235783Skib	byte_count = 0;
716235783Skib	agp_offset = dev->agp->base + request->agp_start;
717235783Skib
718280183Sdumbbell	DRM_DEBUG("count:      %d\n", count);
719280183Sdumbbell	DRM_DEBUG("order:      %d\n", order);
720280183Sdumbbell	DRM_DEBUG("size:       %d\n", size);
721280183Sdumbbell	DRM_DEBUG("agp_offset: %lx\n", agp_offset);
722280183Sdumbbell	DRM_DEBUG("alignment:  %d\n", alignment);
723280183Sdumbbell	DRM_DEBUG("page_order: %d\n", page_order);
724280183Sdumbbell	DRM_DEBUG("total:      %d\n", total);
725235783Skib
726280183Sdumbbell	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
727280183Sdumbbell		return -EINVAL;
728280183Sdumbbell
729235783Skib	/* Make sure buffers are located in AGP memory that we own */
730280183Sdumbbell	valid = 0;
731280183Sdumbbell	list_for_each_entry(agp_entry, &dev->agp->memory, head) {
732235783Skib		if ((agp_offset >= agp_entry->bound) &&
733280183Sdumbbell		    (agp_offset + total * count <= agp_entry->bound + agp_entry->pages * PAGE_SIZE)) {
734235783Skib			valid = 1;
735235783Skib			break;
736235783Skib		}
737235783Skib	}
738280183Sdumbbell	if (!list_empty(&dev->agp->memory) && !valid) {
739235783Skib		DRM_DEBUG("zone invalid\n");
740280183Sdumbbell		return -EINVAL;
741280183Sdumbbell	}
742280183Sdumbbell	mtx_lock(&dev->count_lock);
743280183Sdumbbell	if (dev->buf_use) {
744280183Sdumbbell		mtx_unlock(&dev->count_lock);
745280183Sdumbbell		return -EBUSY;
746280183Sdumbbell	}
747280183Sdumbbell	atomic_inc(&dev->buf_alloc);
748280183Sdumbbell	mtx_unlock(&dev->count_lock);
749235783Skib
750280183Sdumbbell	DRM_LOCK(dev);
751235783Skib	entry = &dma->bufs[order];
752280183Sdumbbell	if (entry->buf_count) {
753280183Sdumbbell		DRM_UNLOCK(dev);
754280183Sdumbbell		atomic_dec(&dev->buf_alloc);
755280183Sdumbbell		return -ENOMEM;	/* May only call once for each order */
756280183Sdumbbell	}
757235783Skib
758280183Sdumbbell	if (count < 0 || count > 4096) {
759280183Sdumbbell		DRM_UNLOCK(dev);
760280183Sdumbbell		atomic_dec(&dev->buf_alloc);
761280183Sdumbbell		return -EINVAL;
762280183Sdumbbell	}
763280183Sdumbbell
764235783Skib	entry->buflist = malloc(count * sizeof(*entry->buflist), DRM_MEM_BUFS,
765235783Skib	    M_NOWAIT | M_ZERO);
766235783Skib	if (!entry->buflist) {
767280183Sdumbbell		DRM_UNLOCK(dev);
768280183Sdumbbell		atomic_dec(&dev->buf_alloc);
769280183Sdumbbell		return -ENOMEM;
770235783Skib	}
771235783Skib
772235783Skib	entry->buf_size = size;
773235783Skib	entry->page_order = page_order;
774235783Skib
775235783Skib	offset = 0;
776235783Skib
777235783Skib	while (entry->buf_count < count) {
778280183Sdumbbell		buf = &entry->buflist[entry->buf_count];
779280183Sdumbbell		buf->idx = dma->buf_count + entry->buf_count;
780280183Sdumbbell		buf->total = alignment;
781280183Sdumbbell		buf->order = order;
782280183Sdumbbell		buf->used = 0;
783235783Skib
784280183Sdumbbell		buf->offset = (dma->byte_count + offset);
785235783Skib		buf->bus_address = agp_offset + offset;
786235783Skib		buf->address = (void *)(agp_offset + offset);
787280183Sdumbbell		buf->next = NULL;
788280183Sdumbbell		buf->waiting = 0;
789235783Skib		buf->pending = 0;
790235783Skib		buf->file_priv = NULL;
791235783Skib
792280183Sdumbbell		buf->dev_priv_size = dev->driver->dev_priv_size;
793235783Skib		buf->dev_private = malloc(buf->dev_priv_size, DRM_MEM_BUFS,
794235783Skib		    M_NOWAIT | M_ZERO);
795280183Sdumbbell		if (!buf->dev_private) {
796235783Skib			/* Set count correctly so we free the proper amount. */
797235783Skib			entry->buf_count = count;
798235783Skib			drm_cleanup_buf_error(dev, entry);
799280183Sdumbbell			DRM_UNLOCK(dev);
800280183Sdumbbell			atomic_dec(&dev->buf_alloc);
801280183Sdumbbell			return -ENOMEM;
802235783Skib		}
803235783Skib
804280183Sdumbbell		DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
805280183Sdumbbell
806235783Skib		offset += alignment;
807235783Skib		entry->buf_count++;
808235783Skib		byte_count += PAGE_SIZE << page_order;
809235783Skib	}
810235783Skib
811235783Skib	DRM_DEBUG("byte_count: %d\n", byte_count);
812235783Skib
813235783Skib	temp_buflist = realloc(dma->buflist,
814235783Skib	    (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist),
815235783Skib	    DRM_MEM_BUFS, M_NOWAIT);
816280183Sdumbbell	if (!temp_buflist) {
817235783Skib		/* Free the entry because it isn't valid */
818235783Skib		drm_cleanup_buf_error(dev, entry);
819280183Sdumbbell		DRM_UNLOCK(dev);
820280183Sdumbbell		atomic_dec(&dev->buf_alloc);
821280183Sdumbbell		return -ENOMEM;
822235783Skib	}
823235783Skib	dma->buflist = temp_buflist;
824235783Skib
825235783Skib	for (i = 0; i < entry->buf_count; i++) {
826235783Skib		dma->buflist[i + dma->buf_count] = &entry->buflist[i];
827235783Skib	}
828235783Skib
829235783Skib	dma->buf_count += entry->buf_count;
830280183Sdumbbell	dma->seg_count += entry->seg_count;
831280183Sdumbbell	dma->page_count += byte_count >> PAGE_SHIFT;
832235783Skib	dma->byte_count += byte_count;
833235783Skib
834235783Skib	DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
835235783Skib	DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
836235783Skib
837280183Sdumbbell	DRM_UNLOCK(dev);
838280183Sdumbbell
839235783Skib	request->count = entry->buf_count;
840235783Skib	request->size = size;
841235783Skib
842235783Skib	dma->flags = _DRM_DMA_USE_AGP;
843235783Skib
844280183Sdumbbell	atomic_dec(&dev->buf_alloc);
845235783Skib	return 0;
846235783Skib}
847280183SdumbbellEXPORT_SYMBOL(drm_addbufs_agp);
848280183Sdumbbell#endif				/* __OS_HAS_AGP */
849235783Skib
850280183Sdumbbellint drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request)
851235783Skib{
852280183Sdumbbell	struct drm_device_dma *dma = dev->dma;
853235783Skib	int count;
854235783Skib	int order;
855235783Skib	int size;
856235783Skib	int total;
857235783Skib	int page_order;
858280183Sdumbbell	struct drm_buf_entry *entry;
859280183Sdumbbell	drm_dma_handle_t *dmah;
860280183Sdumbbell	struct drm_buf *buf;
861235783Skib	int alignment;
862235783Skib	unsigned long offset;
863235783Skib	int i;
864235783Skib	int byte_count;
865235783Skib	int page_count;
866235783Skib	unsigned long *temp_pagelist;
867280183Sdumbbell	struct drm_buf **temp_buflist;
868235783Skib
869280183Sdumbbell	if (!drm_core_check_feature(dev, DRIVER_PCI_DMA))
870280183Sdumbbell		return -EINVAL;
871280183Sdumbbell
872280183Sdumbbell	if (!dma)
873280183Sdumbbell		return -EINVAL;
874280183Sdumbbell
875280183Sdumbbell	if (!DRM_SUSER(DRM_CURPROC))
876280183Sdumbbell		return -EPERM;
877280183Sdumbbell
878235783Skib	count = request->count;
879235783Skib	order = drm_order(request->size);
880235783Skib	size = 1 << order;
881235783Skib
882235783Skib	DRM_DEBUG("count=%d, size=%d (%d), order=%d\n",
883280183Sdumbbell		  request->count, request->size, size, order);
884235783Skib
885280183Sdumbbell	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
886280183Sdumbbell		return -EINVAL;
887280183Sdumbbell
888235783Skib	alignment = (request->flags & _DRM_PAGE_ALIGN)
889280183Sdumbbell	    ? PAGE_ALIGN(size) : size;
890235783Skib	page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
891235783Skib	total = PAGE_SIZE << page_order;
892235783Skib
893280183Sdumbbell	mtx_lock(&dev->count_lock);
894280183Sdumbbell	if (dev->buf_use) {
895280183Sdumbbell		mtx_unlock(&dev->count_lock);
896280183Sdumbbell		return -EBUSY;
897280183Sdumbbell	}
898280183Sdumbbell	atomic_inc(&dev->buf_alloc);
899280183Sdumbbell	mtx_unlock(&dev->count_lock);
900280183Sdumbbell
901280183Sdumbbell	DRM_LOCK(dev);
902235783Skib	entry = &dma->bufs[order];
903280183Sdumbbell	if (entry->buf_count) {
904280183Sdumbbell		DRM_UNLOCK(dev);
905280183Sdumbbell		atomic_dec(&dev->buf_alloc);
906280183Sdumbbell		return -ENOMEM;	/* May only call once for each order */
907280183Sdumbbell	}
908235783Skib
909280183Sdumbbell	if (count < 0 || count > 4096) {
910280183Sdumbbell		DRM_UNLOCK(dev);
911280183Sdumbbell		atomic_dec(&dev->buf_alloc);
912280183Sdumbbell		return -EINVAL;
913280183Sdumbbell	}
914280183Sdumbbell
915235783Skib	entry->buflist = malloc(count * sizeof(*entry->buflist), DRM_MEM_BUFS,
916235783Skib	    M_NOWAIT | M_ZERO);
917280183Sdumbbell	if (!entry->buflist) {
918280183Sdumbbell		DRM_UNLOCK(dev);
919280183Sdumbbell		atomic_dec(&dev->buf_alloc);
920280183Sdumbbell		return -ENOMEM;
921280183Sdumbbell	}
922280183Sdumbbell
923235783Skib	entry->seglist = malloc(count * sizeof(*entry->seglist), DRM_MEM_SEGS,
924235783Skib	    M_NOWAIT | M_ZERO);
925280183Sdumbbell	if (!entry->seglist) {
926280183Sdumbbell		free(entry->buflist, DRM_MEM_BUFS);
927280183Sdumbbell		DRM_UNLOCK(dev);
928280183Sdumbbell		atomic_dec(&dev->buf_alloc);
929280183Sdumbbell		return -ENOMEM;
930280183Sdumbbell	}
931235783Skib
932235783Skib	/* Keep the original pagelist until we know all the allocations
933235783Skib	 * have succeeded
934235783Skib	 */
935235783Skib	temp_pagelist = malloc((dma->page_count + (count << page_order)) *
936235783Skib	    sizeof(*dma->pagelist), DRM_MEM_PAGES, M_NOWAIT);
937280183Sdumbbell	if (!temp_pagelist) {
938280183Sdumbbell		free(entry->buflist, DRM_MEM_BUFS);
939235783Skib		free(entry->seglist, DRM_MEM_SEGS);
940280183Sdumbbell		DRM_UNLOCK(dev);
941280183Sdumbbell		atomic_dec(&dev->buf_alloc);
942280183Sdumbbell		return -ENOMEM;
943235783Skib	}
944280183Sdumbbell	memcpy(temp_pagelist,
945280183Sdumbbell	       dma->pagelist, dma->page_count * sizeof(*dma->pagelist));
946235783Skib	DRM_DEBUG("pagelist: %d entries\n",
947280183Sdumbbell		  dma->page_count + (count << page_order));
948235783Skib
949280183Sdumbbell	entry->buf_size = size;
950235783Skib	entry->page_order = page_order;
951235783Skib	byte_count = 0;
952235783Skib	page_count = 0;
953235783Skib
954235783Skib	while (entry->buf_count < count) {
955280183Sdumbbell
956280183Sdumbbell		dmah = drm_pci_alloc(dev, PAGE_SIZE << page_order, 0x1000, BUS_SPACE_MAXADDR);
957280183Sdumbbell
958280183Sdumbbell		if (!dmah) {
959235783Skib			/* Set count correctly so we free the proper amount. */
960235783Skib			entry->buf_count = count;
961235783Skib			entry->seg_count = count;
962235783Skib			drm_cleanup_buf_error(dev, entry);
963235783Skib			free(temp_pagelist, DRM_MEM_PAGES);
964280183Sdumbbell			DRM_UNLOCK(dev);
965280183Sdumbbell			atomic_dec(&dev->buf_alloc);
966280183Sdumbbell			return -ENOMEM;
967235783Skib		}
968235783Skib		entry->seglist[entry->seg_count++] = dmah;
969235783Skib		for (i = 0; i < (1 << page_order); i++) {
970280183Sdumbbell			DRM_DEBUG("page %d @ 0x%08lx\n",
971280183Sdumbbell				  dma->page_count + page_count,
972280183Sdumbbell				  (unsigned long)dmah->vaddr + PAGE_SIZE * i);
973280183Sdumbbell			temp_pagelist[dma->page_count + page_count++]
974280183Sdumbbell				= (unsigned long)dmah->vaddr + PAGE_SIZE * i;
975235783Skib		}
976235783Skib		for (offset = 0;
977280183Sdumbbell		     offset + size <= total && entry->buf_count < count;
978280183Sdumbbell		     offset += alignment, ++entry->buf_count) {
979280183Sdumbbell			buf = &entry->buflist[entry->buf_count];
980280183Sdumbbell			buf->idx = dma->buf_count + entry->buf_count;
981280183Sdumbbell			buf->total = alignment;
982280183Sdumbbell			buf->order = order;
983280183Sdumbbell			buf->used = 0;
984280183Sdumbbell			buf->offset = (dma->byte_count + byte_count + offset);
985280183Sdumbbell			buf->address = (void *)((char *)dmah->vaddr + offset);
986235783Skib			buf->bus_address = dmah->busaddr + offset;
987280183Sdumbbell			buf->next = NULL;
988280183Sdumbbell			buf->waiting = 0;
989235783Skib			buf->pending = 0;
990235783Skib			buf->file_priv = NULL;
991235783Skib
992280183Sdumbbell			buf->dev_priv_size = dev->driver->dev_priv_size;
993235783Skib			buf->dev_private = malloc(buf->dev_priv_size,
994235783Skib			    DRM_MEM_BUFS, M_NOWAIT | M_ZERO);
995280183Sdumbbell			if (!buf->dev_private) {
996235783Skib				/* Set count correctly so we free the proper amount. */
997235783Skib				entry->buf_count = count;
998235783Skib				entry->seg_count = count;
999235783Skib				drm_cleanup_buf_error(dev, entry);
1000235783Skib				free(temp_pagelist, DRM_MEM_PAGES);
1001280183Sdumbbell				DRM_UNLOCK(dev);
1002280183Sdumbbell				atomic_dec(&dev->buf_alloc);
1003280183Sdumbbell				return -ENOMEM;
1004235783Skib			}
1005235783Skib
1006235783Skib			DRM_DEBUG("buffer %d @ %p\n",
1007280183Sdumbbell				  entry->buf_count, buf->address);
1008235783Skib		}
1009235783Skib		byte_count += PAGE_SIZE << page_order;
1010235783Skib	}
1011235783Skib
1012235783Skib	temp_buflist = realloc(dma->buflist,
1013235783Skib	    (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist),
1014235783Skib	    DRM_MEM_BUFS, M_NOWAIT);
1015280183Sdumbbell	if (!temp_buflist) {
1016235783Skib		/* Free the entry because it isn't valid */
1017235783Skib		drm_cleanup_buf_error(dev, entry);
1018235783Skib		free(temp_pagelist, DRM_MEM_PAGES);
1019280183Sdumbbell		DRM_UNLOCK(dev);
1020280183Sdumbbell		atomic_dec(&dev->buf_alloc);
1021280183Sdumbbell		return -ENOMEM;
1022235783Skib	}
1023235783Skib	dma->buflist = temp_buflist;
1024235783Skib
1025235783Skib	for (i = 0; i < entry->buf_count; i++) {
1026235783Skib		dma->buflist[i + dma->buf_count] = &entry->buflist[i];
1027235783Skib	}
1028235783Skib
1029280183Sdumbbell	/* No allocations failed, so now we can replace the original pagelist
1030235783Skib	 * with the new one.
1031235783Skib	 */
1032280183Sdumbbell	if (dma->page_count) {
1033280183Sdumbbell		free(dma->pagelist, DRM_MEM_PAGES);
1034280183Sdumbbell	}
1035235783Skib	dma->pagelist = temp_pagelist;
1036235783Skib
1037235783Skib	dma->buf_count += entry->buf_count;
1038235783Skib	dma->seg_count += entry->seg_count;
1039235783Skib	dma->page_count += entry->seg_count << page_order;
1040235783Skib	dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
1041235783Skib
1042280183Sdumbbell	DRM_UNLOCK(dev);
1043280183Sdumbbell
1044235783Skib	request->count = entry->buf_count;
1045235783Skib	request->size = size;
1046235783Skib
1047280183Sdumbbell	if (request->flags & _DRM_PCI_BUFFER_RO)
1048280183Sdumbbell		dma->flags = _DRM_DMA_USE_PCI_RO;
1049280183Sdumbbell
1050280183Sdumbbell	atomic_dec(&dev->buf_alloc);
1051235783Skib	return 0;
1052235783Skib
1053235783Skib}
1054280183SdumbbellEXPORT_SYMBOL(drm_addbufs_pci);
1055235783Skib
1056280183Sdumbbellstatic int drm_addbufs_sg(struct drm_device * dev, struct drm_buf_desc * request)
1057235783Skib{
1058280183Sdumbbell	struct drm_device_dma *dma = dev->dma;
1059280183Sdumbbell	struct drm_buf_entry *entry;
1060280183Sdumbbell	struct drm_buf *buf;
1061235783Skib	unsigned long offset;
1062235783Skib	unsigned long agp_offset;
1063235783Skib	int count;
1064235783Skib	int order;
1065235783Skib	int size;
1066235783Skib	int alignment;
1067235783Skib	int page_order;
1068235783Skib	int total;
1069235783Skib	int byte_count;
1070235783Skib	int i;
1071280183Sdumbbell	struct drm_buf **temp_buflist;
1072235783Skib
1073280183Sdumbbell	if (!drm_core_check_feature(dev, DRIVER_SG))
1074280183Sdumbbell		return -EINVAL;
1075280183Sdumbbell
1076280183Sdumbbell	if (!dma)
1077280183Sdumbbell		return -EINVAL;
1078280183Sdumbbell
1079280183Sdumbbell	if (!DRM_SUSER(DRM_CURPROC))
1080280183Sdumbbell		return -EPERM;
1081280183Sdumbbell
1082235783Skib	count = request->count;
1083235783Skib	order = drm_order(request->size);
1084235783Skib	size = 1 << order;
1085235783Skib
1086280183Sdumbbell	alignment = (request->flags & _DRM_PAGE_ALIGN)
1087280183Sdumbbell	    ? PAGE_ALIGN(size) : size;
1088235783Skib	page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
1089235783Skib	total = PAGE_SIZE << page_order;
1090235783Skib
1091235783Skib	byte_count = 0;
1092235783Skib	agp_offset = request->agp_start;
1093235783Skib
1094280183Sdumbbell	DRM_DEBUG("count:      %d\n", count);
1095280183Sdumbbell	DRM_DEBUG("order:      %d\n", order);
1096280183Sdumbbell	DRM_DEBUG("size:       %d\n", size);
1097280183Sdumbbell	DRM_DEBUG("agp_offset: %lu\n", agp_offset);
1098280183Sdumbbell	DRM_DEBUG("alignment:  %d\n", alignment);
1099280183Sdumbbell	DRM_DEBUG("page_order: %d\n", page_order);
1100280183Sdumbbell	DRM_DEBUG("total:      %d\n", total);
1101235783Skib
1102280183Sdumbbell	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
1103280183Sdumbbell		return -EINVAL;
1104280183Sdumbbell
1105280183Sdumbbell	mtx_lock(&dev->count_lock);
1106280183Sdumbbell	if (dev->buf_use) {
1107280183Sdumbbell		mtx_unlock(&dev->count_lock);
1108280183Sdumbbell		return -EBUSY;
1109280183Sdumbbell	}
1110280183Sdumbbell	atomic_inc(&dev->buf_alloc);
1111280183Sdumbbell	mtx_unlock(&dev->count_lock);
1112280183Sdumbbell
1113280183Sdumbbell	DRM_LOCK(dev);
1114235783Skib	entry = &dma->bufs[order];
1115280183Sdumbbell	if (entry->buf_count) {
1116280183Sdumbbell		DRM_UNLOCK(dev);
1117280183Sdumbbell		atomic_dec(&dev->buf_alloc);
1118280183Sdumbbell		return -ENOMEM;	/* May only call once for each order */
1119280183Sdumbbell	}
1120235783Skib
1121280183Sdumbbell	if (count < 0 || count > 4096) {
1122280183Sdumbbell		DRM_UNLOCK(dev);
1123280183Sdumbbell		atomic_dec(&dev->buf_alloc);
1124280183Sdumbbell		return -EINVAL;
1125280183Sdumbbell	}
1126280183Sdumbbell
1127235783Skib	entry->buflist = malloc(count * sizeof(*entry->buflist), DRM_MEM_BUFS,
1128235783Skib	    M_NOWAIT | M_ZERO);
1129280183Sdumbbell	if (!entry->buflist) {
1130280183Sdumbbell		DRM_UNLOCK(dev);
1131280183Sdumbbell		atomic_dec(&dev->buf_alloc);
1132280183Sdumbbell		return -ENOMEM;
1133280183Sdumbbell	}
1134235783Skib
1135235783Skib	entry->buf_size = size;
1136235783Skib	entry->page_order = page_order;
1137235783Skib
1138235783Skib	offset = 0;
1139235783Skib
1140235783Skib	while (entry->buf_count < count) {
1141280183Sdumbbell		buf = &entry->buflist[entry->buf_count];
1142280183Sdumbbell		buf->idx = dma->buf_count + entry->buf_count;
1143280183Sdumbbell		buf->total = alignment;
1144280183Sdumbbell		buf->order = order;
1145280183Sdumbbell		buf->used = 0;
1146235783Skib
1147280183Sdumbbell		buf->offset = (dma->byte_count + offset);
1148235783Skib		buf->bus_address = agp_offset + offset;
1149280183Sdumbbell		buf->address = (void *)(agp_offset + offset
1150280183Sdumbbell					+ (unsigned long)dev->sg->vaddr);
1151280183Sdumbbell		buf->next = NULL;
1152280183Sdumbbell		buf->waiting = 0;
1153235783Skib		buf->pending = 0;
1154235783Skib		buf->file_priv = NULL;
1155235783Skib
1156280183Sdumbbell		buf->dev_priv_size = dev->driver->dev_priv_size;
1157235783Skib		buf->dev_private = malloc(buf->dev_priv_size, DRM_MEM_BUFS,
1158235783Skib		    M_NOWAIT | M_ZERO);
1159280183Sdumbbell		if (!buf->dev_private) {
1160235783Skib			/* Set count correctly so we free the proper amount. */
1161235783Skib			entry->buf_count = count;
1162235783Skib			drm_cleanup_buf_error(dev, entry);
1163280183Sdumbbell			DRM_UNLOCK(dev);
1164280183Sdumbbell			atomic_dec(&dev->buf_alloc);
1165280183Sdumbbell			return -ENOMEM;
1166235783Skib		}
1167235783Skib
1168280183Sdumbbell		DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
1169235783Skib
1170235783Skib		offset += alignment;
1171235783Skib		entry->buf_count++;
1172235783Skib		byte_count += PAGE_SIZE << page_order;
1173235783Skib	}
1174235783Skib
1175235783Skib	DRM_DEBUG("byte_count: %d\n", byte_count);
1176235783Skib
1177235783Skib	temp_buflist = realloc(dma->buflist,
1178235783Skib	    (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist),
1179235783Skib	    DRM_MEM_BUFS, M_NOWAIT);
1180280183Sdumbbell	if (!temp_buflist) {
1181235783Skib		/* Free the entry because it isn't valid */
1182235783Skib		drm_cleanup_buf_error(dev, entry);
1183280183Sdumbbell		DRM_UNLOCK(dev);
1184280183Sdumbbell		atomic_dec(&dev->buf_alloc);
1185280183Sdumbbell		return -ENOMEM;
1186235783Skib	}
1187235783Skib	dma->buflist = temp_buflist;
1188235783Skib
1189235783Skib	for (i = 0; i < entry->buf_count; i++) {
1190235783Skib		dma->buflist[i + dma->buf_count] = &entry->buflist[i];
1191235783Skib	}
1192235783Skib
1193235783Skib	dma->buf_count += entry->buf_count;
1194280183Sdumbbell	dma->seg_count += entry->seg_count;
1195280183Sdumbbell	dma->page_count += byte_count >> PAGE_SHIFT;
1196235783Skib	dma->byte_count += byte_count;
1197235783Skib
1198235783Skib	DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
1199235783Skib	DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
1200235783Skib
1201280183Sdumbbell	DRM_UNLOCK(dev);
1202280183Sdumbbell
1203235783Skib	request->count = entry->buf_count;
1204235783Skib	request->size = size;
1205235783Skib
1206235783Skib	dma->flags = _DRM_DMA_USE_SG;
1207235783Skib
1208280183Sdumbbell	atomic_dec(&dev->buf_alloc);
1209235783Skib	return 0;
1210235783Skib}
1211235783Skib
1212280183Sdumbbellstatic int drm_addbufs_fb(struct drm_device * dev, struct drm_buf_desc * request)
1213235783Skib{
1214280183Sdumbbell	struct drm_device_dma *dma = dev->dma;
1215280183Sdumbbell	struct drm_buf_entry *entry;
1216280183Sdumbbell	struct drm_buf *buf;
1217280183Sdumbbell	unsigned long offset;
1218280183Sdumbbell	unsigned long agp_offset;
1219280183Sdumbbell	int count;
1220280183Sdumbbell	int order;
1221280183Sdumbbell	int size;
1222280183Sdumbbell	int alignment;
1223280183Sdumbbell	int page_order;
1224280183Sdumbbell	int total;
1225280183Sdumbbell	int byte_count;
1226280183Sdumbbell	int i;
1227280183Sdumbbell	struct drm_buf **temp_buflist;
1228235783Skib
1229280183Sdumbbell	if (!drm_core_check_feature(dev, DRIVER_FB_DMA))
1230280183Sdumbbell		return -EINVAL;
1231235783Skib
1232280183Sdumbbell	if (!dma)
1233280183Sdumbbell		return -EINVAL;
1234235783Skib
1235280183Sdumbbell	if (!DRM_SUSER(DRM_CURPROC))
1236280183Sdumbbell		return -EPERM;
1237235783Skib
1238280183Sdumbbell	count = request->count;
1239280183Sdumbbell	order = drm_order(request->size);
1240280183Sdumbbell	size = 1 << order;
1241235783Skib
1242280183Sdumbbell	alignment = (request->flags & _DRM_PAGE_ALIGN)
1243280183Sdumbbell	    ? PAGE_ALIGN(size) : size;
1244280183Sdumbbell	page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
1245280183Sdumbbell	total = PAGE_SIZE << page_order;
1246235783Skib
1247280183Sdumbbell	byte_count = 0;
1248280183Sdumbbell	agp_offset = request->agp_start;
1249235783Skib
1250280183Sdumbbell	DRM_DEBUG("count:      %d\n", count);
1251280183Sdumbbell	DRM_DEBUG("order:      %d\n", order);
1252280183Sdumbbell	DRM_DEBUG("size:       %d\n", size);
1253280183Sdumbbell	DRM_DEBUG("agp_offset: %lu\n", agp_offset);
1254280183Sdumbbell	DRM_DEBUG("alignment:  %d\n", alignment);
1255280183Sdumbbell	DRM_DEBUG("page_order: %d\n", page_order);
1256280183Sdumbbell	DRM_DEBUG("total:      %d\n", total);
1257235783Skib
1258235783Skib	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
1259280183Sdumbbell		return -EINVAL;
1260235783Skib
1261280183Sdumbbell	mtx_lock(&dev->count_lock);
1262280183Sdumbbell	if (dev->buf_use) {
1263280183Sdumbbell		mtx_unlock(&dev->count_lock);
1264280183Sdumbbell		return -EBUSY;
1265280183Sdumbbell	}
1266280183Sdumbbell	atomic_inc(&dev->buf_alloc);
1267280183Sdumbbell	mtx_unlock(&dev->count_lock);
1268235783Skib
1269280183Sdumbbell	DRM_LOCK(dev);
1270280183Sdumbbell	entry = &dma->bufs[order];
1271280183Sdumbbell	if (entry->buf_count) {
1272280183Sdumbbell		DRM_UNLOCK(dev);
1273280183Sdumbbell		atomic_dec(&dev->buf_alloc);
1274280183Sdumbbell		return -ENOMEM;	/* May only call once for each order */
1275235783Skib	}
1276280183Sdumbbell
1277280183Sdumbbell	if (count < 0 || count > 4096) {
1278280183Sdumbbell		DRM_UNLOCK(dev);
1279280183Sdumbbell		atomic_dec(&dev->buf_alloc);
1280280183Sdumbbell		return -EINVAL;
1281235783Skib	}
1282235783Skib
1283280183Sdumbbell	entry->buflist = malloc(count * sizeof(*entry->buflist), DRM_MEM_BUFS,
1284280183Sdumbbell	    M_NOWAIT | M_ZERO);
1285280183Sdumbbell	if (!entry->buflist) {
1286280183Sdumbbell		DRM_UNLOCK(dev);
1287280183Sdumbbell		atomic_dec(&dev->buf_alloc);
1288280183Sdumbbell		return -ENOMEM;
1289280183Sdumbbell	}
1290235783Skib
1291280183Sdumbbell	entry->buf_size = size;
1292280183Sdumbbell	entry->page_order = page_order;
1293235783Skib
1294280183Sdumbbell	offset = 0;
1295235783Skib
1296280183Sdumbbell	while (entry->buf_count < count) {
1297280183Sdumbbell		buf = &entry->buflist[entry->buf_count];
1298280183Sdumbbell		buf->idx = dma->buf_count + entry->buf_count;
1299280183Sdumbbell		buf->total = alignment;
1300280183Sdumbbell		buf->order = order;
1301280183Sdumbbell		buf->used = 0;
1302235783Skib
1303280183Sdumbbell		buf->offset = (dma->byte_count + offset);
1304280183Sdumbbell		buf->bus_address = agp_offset + offset;
1305280183Sdumbbell		buf->address = (void *)(agp_offset + offset);
1306280183Sdumbbell		buf->next = NULL;
1307280183Sdumbbell		buf->waiting = 0;
1308280183Sdumbbell		buf->pending = 0;
1309280183Sdumbbell		buf->file_priv = NULL;
1310235783Skib
1311280183Sdumbbell		buf->dev_priv_size = dev->driver->dev_priv_size;
1312280183Sdumbbell		buf->dev_private = malloc(buf->dev_priv_size, DRM_MEM_BUFS,
1313280183Sdumbbell		    M_NOWAIT | M_ZERO);
1314280183Sdumbbell		if (!buf->dev_private) {
1315280183Sdumbbell			/* Set count correctly so we free the proper amount. */
1316280183Sdumbbell			entry->buf_count = count;
1317280183Sdumbbell			drm_cleanup_buf_error(dev, entry);
1318280183Sdumbbell			DRM_UNLOCK(dev);
1319280183Sdumbbell			atomic_dec(&dev->buf_alloc);
1320280183Sdumbbell			return -ENOMEM;
1321280183Sdumbbell		}
1322235783Skib
1323280183Sdumbbell		DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
1324235783Skib
1325280183Sdumbbell		offset += alignment;
1326280183Sdumbbell		entry->buf_count++;
1327280183Sdumbbell		byte_count += PAGE_SIZE << page_order;
1328280183Sdumbbell	}
1329235783Skib
1330280183Sdumbbell	DRM_DEBUG("byte_count: %d\n", byte_count);
1331280183Sdumbbell
1332280183Sdumbbell	temp_buflist = realloc(dma->buflist,
1333280183Sdumbbell	    (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist),
1334280183Sdumbbell	    DRM_MEM_BUFS, M_NOWAIT);
1335280183Sdumbbell	if (!temp_buflist) {
1336280183Sdumbbell		/* Free the entry because it isn't valid */
1337280183Sdumbbell		drm_cleanup_buf_error(dev, entry);
1338280183Sdumbbell		DRM_UNLOCK(dev);
1339280183Sdumbbell		atomic_dec(&dev->buf_alloc);
1340280183Sdumbbell		return -ENOMEM;
1341235783Skib	}
1342280183Sdumbbell	dma->buflist = temp_buflist;
1343280183Sdumbbell
1344280183Sdumbbell	for (i = 0; i < entry->buf_count; i++) {
1345280183Sdumbbell		dma->buflist[i + dma->buf_count] = &entry->buflist[i];
1346235783Skib	}
1347235783Skib
1348280183Sdumbbell	dma->buf_count += entry->buf_count;
1349280183Sdumbbell	dma->seg_count += entry->seg_count;
1350280183Sdumbbell	dma->page_count += byte_count >> PAGE_SHIFT;
1351280183Sdumbbell	dma->byte_count += byte_count;
1352235783Skib
1353280183Sdumbbell	DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
1354280183Sdumbbell	DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
1355235783Skib
1356280183Sdumbbell	DRM_UNLOCK(dev);
1357280183Sdumbbell
1358280183Sdumbbell	request->count = entry->buf_count;
1359280183Sdumbbell	request->size = size;
1360280183Sdumbbell
1361280183Sdumbbell	dma->flags = _DRM_DMA_USE_FB;
1362280183Sdumbbell
1363280183Sdumbbell	atomic_dec(&dev->buf_alloc);
1364280183Sdumbbell	return 0;
1365235783Skib}
1366235783Skib
1367280183Sdumbbell
1368280183Sdumbbell/**
1369280183Sdumbbell * Add buffers for DMA transfers (ioctl).
1370280183Sdumbbell *
1371280183Sdumbbell * \param inode device inode.
1372280183Sdumbbell * \param file_priv DRM file private.
1373280183Sdumbbell * \param cmd command.
1374280183Sdumbbell * \param arg pointer to a struct drm_buf_desc request.
1375280183Sdumbbell * \return zero on success or a negative number on failure.
1376280183Sdumbbell *
1377280183Sdumbbell * According with the memory type specified in drm_buf_desc::flags and the
1378280183Sdumbbell * build options, it dispatches the call either to addbufs_agp(),
1379280183Sdumbbell * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent
1380280183Sdumbbell * PCI memory respectively.
1381280183Sdumbbell */
1382280183Sdumbbellint drm_addbufs(struct drm_device *dev, void *data,
1383280183Sdumbbell		struct drm_file *file_priv)
1384235783Skib{
1385235783Skib	struct drm_buf_desc *request = data;
1386280183Sdumbbell	int ret;
1387235783Skib
1388280183Sdumbbell	if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1389280183Sdumbbell		return -EINVAL;
1390280183Sdumbbell
1391280183Sdumbbell#if __OS_HAS_AGP
1392235783Skib	if (request->flags & _DRM_AGP_BUFFER)
1393280183Sdumbbell		ret = drm_addbufs_agp(dev, request);
1394235783Skib	else
1395280183Sdumbbell#endif
1396280183Sdumbbell	if (request->flags & _DRM_SG_BUFFER)
1397280183Sdumbbell		ret = drm_addbufs_sg(dev, request);
1398280183Sdumbbell	else if (request->flags & _DRM_FB_BUFFER)
1399280183Sdumbbell		ret = drm_addbufs_fb(dev, request);
1400280183Sdumbbell	else
1401280183Sdumbbell		ret = drm_addbufs_pci(dev, request);
1402235783Skib
1403280183Sdumbbell	return ret;
1404235783Skib}
1405235783Skib
1406280183Sdumbbell/**
1407280183Sdumbbell * Get information about the buffer mappings.
1408280183Sdumbbell *
1409280183Sdumbbell * This was originally mean for debugging purposes, or by a sophisticated
1410280183Sdumbbell * client library to determine how best to use the available buffers (e.g.,
1411280183Sdumbbell * large buffers can be used for image transfer).
1412280183Sdumbbell *
1413280183Sdumbbell * \param inode device inode.
1414280183Sdumbbell * \param file_priv DRM file private.
1415280183Sdumbbell * \param cmd command.
1416280183Sdumbbell * \param arg pointer to a drm_buf_info structure.
1417280183Sdumbbell * \return zero on success or a negative number on failure.
1418280183Sdumbbell *
1419280183Sdumbbell * Increments drm_device::buf_use while holding the drm_device::count_lock
1420280183Sdumbbell * lock, preventing of allocating more buffers after this call. Information
1421280183Sdumbbell * about each requested buffer is then copied into user space.
1422280183Sdumbbell */
1423280183Sdumbbellint drm_infobufs(struct drm_device *dev, void *data,
1424280183Sdumbbell		 struct drm_file *file_priv)
1425235783Skib{
1426280183Sdumbbell	struct drm_device_dma *dma = dev->dma;
1427235783Skib	struct drm_buf_info *request = data;
1428235783Skib	int i;
1429235783Skib	int count;
1430235783Skib
1431280183Sdumbbell	if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1432280183Sdumbbell		return -EINVAL;
1433280183Sdumbbell
1434280183Sdumbbell	if (!dma)
1435280183Sdumbbell		return -EINVAL;
1436280183Sdumbbell
1437280183Sdumbbell	mtx_lock(&dev->count_lock);
1438280183Sdumbbell	if (atomic_read(&dev->buf_alloc)) {
1439280183Sdumbbell		mtx_unlock(&dev->count_lock);
1440280183Sdumbbell		return -EBUSY;
1441280183Sdumbbell	}
1442235783Skib	++dev->buf_use;		/* Can't allocate more after this call */
1443280183Sdumbbell	mtx_unlock(&dev->count_lock);
1444235783Skib
1445235783Skib	for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
1446235783Skib		if (dma->bufs[i].buf_count)
1447235783Skib			++count;
1448235783Skib	}
1449235783Skib
1450235783Skib	DRM_DEBUG("count = %d\n", count);
1451235783Skib
1452235783Skib	if (request->count >= count) {
1453235783Skib		for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
1454235783Skib			if (dma->bufs[i].buf_count) {
1455280183Sdumbbell				struct drm_buf_desc __user *to =
1456280183Sdumbbell				    &request->list[count];
1457280183Sdumbbell				struct drm_buf_entry *from = &dma->bufs[i];
1458280183Sdumbbell				struct drm_freelist *list = &dma->bufs[i].freelist;
1459280183Sdumbbell				if (copy_to_user(&to->count,
1460280183Sdumbbell						 &from->buf_count,
1461280183Sdumbbell						 sizeof(from->buf_count)) ||
1462280183Sdumbbell				    copy_to_user(&to->size,
1463280183Sdumbbell						 &from->buf_size,
1464280183Sdumbbell						 sizeof(from->buf_size)) ||
1465280183Sdumbbell				    copy_to_user(&to->low_mark,
1466280183Sdumbbell						 &list->low_mark,
1467280183Sdumbbell						 sizeof(list->low_mark)) ||
1468280183Sdumbbell				    copy_to_user(&to->high_mark,
1469280183Sdumbbell						 &list->high_mark,
1470280183Sdumbbell						 sizeof(list->high_mark)))
1471280183Sdumbbell					return -EFAULT;
1472235783Skib
1473235783Skib				DRM_DEBUG("%d %d %d %d %d\n",
1474280183Sdumbbell					  i,
1475280183Sdumbbell					  dma->bufs[i].buf_count,
1476280183Sdumbbell					  dma->bufs[i].buf_size,
1477280183Sdumbbell					  dma->bufs[i].freelist.low_mark,
1478280183Sdumbbell					  dma->bufs[i].freelist.high_mark);
1479235783Skib				++count;
1480235783Skib			}
1481235783Skib		}
1482235783Skib	}
1483235783Skib	request->count = count;
1484235783Skib
1485280183Sdumbbell	return 0;
1486235783Skib}
1487235783Skib
1488280183Sdumbbell/**
1489280183Sdumbbell * Specifies a low and high water mark for buffer allocation
1490280183Sdumbbell *
1491280183Sdumbbell * \param inode device inode.
1492280183Sdumbbell * \param file_priv DRM file private.
1493280183Sdumbbell * \param cmd command.
1494280183Sdumbbell * \param arg a pointer to a drm_buf_desc structure.
1495280183Sdumbbell * \return zero on success or a negative number on failure.
1496280183Sdumbbell *
1497280183Sdumbbell * Verifies that the size order is bounded between the admissible orders and
1498280183Sdumbbell * updates the respective drm_device_dma::bufs entry low and high water mark.
1499280183Sdumbbell *
1500280183Sdumbbell * \note This ioctl is deprecated and mostly never used.
1501280183Sdumbbell */
1502280183Sdumbbellint drm_markbufs(struct drm_device *dev, void *data,
1503280183Sdumbbell		 struct drm_file *file_priv)
1504235783Skib{
1505280183Sdumbbell	struct drm_device_dma *dma = dev->dma;
1506235783Skib	struct drm_buf_desc *request = data;
1507235783Skib	int order;
1508280183Sdumbbell	struct drm_buf_entry *entry;
1509235783Skib
1510280183Sdumbbell	if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1511280183Sdumbbell		return -EINVAL;
1512280183Sdumbbell
1513280183Sdumbbell	if (!dma)
1514280183Sdumbbell		return -EINVAL;
1515280183Sdumbbell
1516235783Skib	DRM_DEBUG("%d, %d, %d\n",
1517235783Skib		  request->size, request->low_mark, request->high_mark);
1518280183Sdumbbell	order = drm_order(request->size);
1519280183Sdumbbell	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
1520280183Sdumbbell		return -EINVAL;
1521280183Sdumbbell	entry = &dma->bufs[order];
1522235783Skib
1523280183Sdumbbell	if (request->low_mark < 0 || request->low_mark > entry->buf_count)
1524280183Sdumbbell		return -EINVAL;
1525280183Sdumbbell	if (request->high_mark < 0 || request->high_mark > entry->buf_count)
1526280183Sdumbbell		return -EINVAL;
1527235783Skib
1528280183Sdumbbell	entry->freelist.low_mark = request->low_mark;
1529280183Sdumbbell	entry->freelist.high_mark = request->high_mark;
1530235783Skib
1531235783Skib	return 0;
1532235783Skib}
1533235783Skib
1534280183Sdumbbell/**
1535280183Sdumbbell * Unreserve the buffers in list, previously reserved using drmDMA.
1536280183Sdumbbell *
1537280183Sdumbbell * \param inode device inode.
1538280183Sdumbbell * \param file_priv DRM file private.
1539280183Sdumbbell * \param cmd command.
1540280183Sdumbbell * \param arg pointer to a drm_buf_free structure.
1541280183Sdumbbell * \return zero on success or a negative number on failure.
1542280183Sdumbbell *
1543280183Sdumbbell * Calls free_buffer() for each used buffer.
1544280183Sdumbbell * This function is primarily used for debugging.
1545280183Sdumbbell */
1546280183Sdumbbellint drm_freebufs(struct drm_device *dev, void *data,
1547280183Sdumbbell		 struct drm_file *file_priv)
1548235783Skib{
1549280183Sdumbbell	struct drm_device_dma *dma = dev->dma;
1550235783Skib	struct drm_buf_free *request = data;
1551235783Skib	int i;
1552235783Skib	int idx;
1553280183Sdumbbell	struct drm_buf *buf;
1554235783Skib
1555280183Sdumbbell	if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1556280183Sdumbbell		return -EINVAL;
1557280183Sdumbbell
1558280183Sdumbbell	if (!dma)
1559280183Sdumbbell		return -EINVAL;
1560280183Sdumbbell
1561235783Skib	DRM_DEBUG("%d\n", request->count);
1562235783Skib	for (i = 0; i < request->count; i++) {
1563280183Sdumbbell		if (copy_from_user(&idx, &request->list[i], sizeof(idx)))
1564280183Sdumbbell			return -EFAULT;
1565235783Skib		if (idx < 0 || idx >= dma->buf_count) {
1566235783Skib			DRM_ERROR("Index %d (of %d max)\n",
1567280183Sdumbbell				  idx, dma->buf_count - 1);
1568280183Sdumbbell			return -EINVAL;
1569235783Skib		}
1570235783Skib		buf = dma->buflist[idx];
1571235783Skib		if (buf->file_priv != file_priv) {
1572235783Skib			DRM_ERROR("Process %d freeing buffer not owned\n",
1573280183Sdumbbell				  DRM_CURRENTPID);
1574280183Sdumbbell			return -EINVAL;
1575235783Skib		}
1576235783Skib		drm_free_buffer(dev, buf);
1577235783Skib	}
1578235783Skib
1579280183Sdumbbell	return 0;
1580235783Skib}
1581235783Skib
1582280183Sdumbbell/**
1583280183Sdumbbell * Maps all of the DMA buffers into client-virtual space (ioctl).
1584280183Sdumbbell *
1585280183Sdumbbell * \param inode device inode.
1586280183Sdumbbell * \param file_priv DRM file private.
1587280183Sdumbbell * \param cmd command.
1588280183Sdumbbell * \param arg pointer to a drm_buf_map structure.
1589280183Sdumbbell * \return zero on success or a negative number on failure.
1590280183Sdumbbell *
1591280183Sdumbbell * Maps the AGP, SG or PCI buffer region with vm_mmap(), and copies information
1592280183Sdumbbell * about each buffer into user space. For PCI buffers, it calls vm_mmap() with
1593280183Sdumbbell * offset equal to 0, which drm_mmap() interpretes as PCI buffers and calls
1594280183Sdumbbell * drm_mmap_dma().
1595280183Sdumbbell */
1596280183Sdumbbellint drm_mapbufs(struct drm_device *dev, void *data,
1597280183Sdumbbell	        struct drm_file *file_priv)
1598235783Skib{
1599280183Sdumbbell	struct drm_device_dma *dma = dev->dma;
1600235783Skib	int retcode = 0;
1601235783Skib	const int zero = 0;
1602280183Sdumbbell	vm_offset_t virtual;
1603235783Skib	vm_offset_t address;
1604235783Skib	struct vmspace *vms;
1605235783Skib	struct drm_buf_map *request = data;
1606235783Skib	int i;
1607235783Skib
1608280183Sdumbbell	if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1609280183Sdumbbell		return -EINVAL;
1610235783Skib
1611280183Sdumbbell	if (!dma)
1612280183Sdumbbell		return -EINVAL;
1613235783Skib
1614280183Sdumbbell	mtx_lock(&dev->count_lock);
1615280183Sdumbbell	if (atomic_read(&dev->buf_alloc)) {
1616280183Sdumbbell		mtx_unlock(&dev->count_lock);
1617280183Sdumbbell		return -EBUSY;
1618235783Skib	}
1619280183Sdumbbell	dev->buf_use++;		/* Can't allocate more after this call */
1620280183Sdumbbell	mtx_unlock(&dev->count_lock);
1621235783Skib
1622280183Sdumbbell	vms = DRM_CURPROC->td_proc->p_vmspace;
1623235783Skib
1624280183Sdumbbell	if (request->count >= dma->buf_count) {
1625280183Sdumbbell		if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP))
1626280183Sdumbbell		    || (drm_core_check_feature(dev, DRIVER_SG)
1627280183Sdumbbell			&& (dma->flags & _DRM_DMA_USE_SG))
1628280183Sdumbbell		    || (drm_core_check_feature(dev, DRIVER_FB_DMA)
1629280183Sdumbbell			&& (dma->flags & _DRM_DMA_USE_FB))) {
1630280183Sdumbbell			struct drm_local_map *map = dev->agp_buffer_map;
1631280183Sdumbbell			vm_ooffset_t token = dev->agp_buffer_token;
1632235783Skib
1633280183Sdumbbell			if (!map) {
1634280183Sdumbbell				retcode = -EINVAL;
1635280183Sdumbbell				goto done;
1636280183Sdumbbell			}
1637280183Sdumbbell			retcode = vm_mmap(&vms->vm_map, &virtual, map->size,
1638283998Sjhb			    VM_PROT_READ | VM_PROT_WRITE, VM_PROT_ALL,
1639280183Sdumbbell			    MAP_SHARED | MAP_NOSYNC, OBJT_DEVICE,
1640280183Sdumbbell			    file_priv->minor->device, token);
1641280183Sdumbbell		} else {
1642280183Sdumbbell			retcode = vm_mmap(&vms->vm_map, &virtual, dma->byte_count,
1643283998Sjhb			    VM_PROT_READ | VM_PROT_WRITE, VM_PROT_ALL,
1644280183Sdumbbell			    MAP_SHARED | MAP_NOSYNC, OBJT_DEVICE,
1645280183Sdumbbell			    file_priv->minor->device, 0);
1646235783Skib		}
1647280183Sdumbbell		if (retcode) {
1648280183Sdumbbell			/* Real error */
1649280183Sdumbbell			retcode = -retcode;
1650235783Skib			goto done;
1651235783Skib		}
1652280183Sdumbbell		request->virtual = (void __user *)virtual;
1653280183Sdumbbell
1654280183Sdumbbell		for (i = 0; i < dma->buf_count; i++) {
1655280183Sdumbbell			if (copy_to_user(&request->list[i].idx,
1656280183Sdumbbell					 &dma->buflist[i]->idx,
1657280183Sdumbbell					 sizeof(request->list[0].idx))) {
1658280183Sdumbbell				retcode = -EFAULT;
1659280183Sdumbbell				goto done;
1660280183Sdumbbell			}
1661280183Sdumbbell			if (copy_to_user(&request->list[i].total,
1662280183Sdumbbell					 &dma->buflist[i]->total,
1663280183Sdumbbell					 sizeof(request->list[0].total))) {
1664280183Sdumbbell				retcode = -EFAULT;
1665280183Sdumbbell				goto done;
1666280183Sdumbbell			}
1667280183Sdumbbell			if (copy_to_user(&request->list[i].used,
1668280183Sdumbbell					 &zero, sizeof(zero))) {
1669280183Sdumbbell				retcode = -EFAULT;
1670280183Sdumbbell				goto done;
1671280183Sdumbbell			}
1672280183Sdumbbell			address = virtual + dma->buflist[i]->offset;	/* *** */
1673280183Sdumbbell			if (copy_to_user(&request->list[i].address,
1674280183Sdumbbell					 &address, sizeof(address))) {
1675280183Sdumbbell				retcode = -EFAULT;
1676280183Sdumbbell				goto done;
1677280183Sdumbbell			}
1678235783Skib		}
1679235783Skib	}
1680280183Sdumbbell      done:
1681235783Skib	request->count = dma->buf_count;
1682235783Skib	DRM_DEBUG("%d buffers, retcode = %d\n", request->count, retcode);
1683235783Skib
1684235783Skib	return retcode;
1685235783Skib}
1686235783Skib
1687280183Sdumbbell/**
1688280183Sdumbbell * Compute size order.  Returns the exponent of the smaller power of two which
1689280183Sdumbbell * is greater or equal to given number.
1690280183Sdumbbell *
1691280183Sdumbbell * \param size size.
1692280183Sdumbbell * \return order.
1693280183Sdumbbell *
1694280183Sdumbbell * \todo Can be made faster.
1695235783Skib */
1696235783Skibint drm_order(unsigned long size)
1697235783Skib{
1698235783Skib	int order;
1699280183Sdumbbell	unsigned long tmp;
1700235783Skib
1701280183Sdumbbell	for (order = 0, tmp = size >> 1; tmp; tmp >>= 1, order++) ;
1702235783Skib
1703280183Sdumbbell	if (size & (size - 1))
1704235783Skib		++order;
1705235783Skib
1706235783Skib	return order;
1707235783Skib}
1708280183SdumbbellEXPORT_SYMBOL(drm_order);
1709