drm_bufs.c revision 189561
1/*-
2 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
3 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
20 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
21 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
23 * OTHER DEALINGS IN THE SOFTWARE.
24 *
25 * Authors:
26 *    Rickard E. (Rik) Faith <faith@valinux.com>
27 *    Gareth Hughes <gareth@valinux.com>
28 *
29 */
30
31#include <sys/cdefs.h>
32__FBSDID("$FreeBSD: head/sys/dev/drm/drm_bufs.c 189561 2009-03-09 07:49:13Z rnoland $");
33
34/** @file drm_bufs.c
35 * Implementation of the ioctls for setup of DRM mappings and DMA buffers.
36 */
37
38#include "dev/pci/pcireg.h"
39
40#include "dev/drm/drmP.h"
41
42/* Allocation of PCI memory resources (framebuffer, registers, etc.) for
43 * drm_get_resource_*.  Note that they are not RF_ACTIVE, so there's no virtual
44 * address for accessing them.  Cleaned up at unload.
45 */
46static int drm_alloc_resource(struct drm_device *dev, int resource)
47{
48	if (resource >= DRM_MAX_PCI_RESOURCE) {
49		DRM_ERROR("Resource %d too large\n", resource);
50		return 1;
51	}
52
53	DRM_UNLOCK();
54	if (dev->pcir[resource] != NULL) {
55		DRM_LOCK();
56		return 0;
57	}
58
59	dev->pcirid[resource] = PCIR_BAR(resource);
60	dev->pcir[resource] = bus_alloc_resource_any(dev->device,
61	    SYS_RES_MEMORY, &dev->pcirid[resource], RF_SHAREABLE);
62	DRM_LOCK();
63
64	if (dev->pcir[resource] == NULL) {
65		DRM_ERROR("Couldn't find resource 0x%x\n", resource);
66		return 1;
67	}
68
69	return 0;
70}
71
72unsigned long drm_get_resource_start(struct drm_device *dev,
73				     unsigned int resource)
74{
75	if (drm_alloc_resource(dev, resource) != 0)
76		return 0;
77
78	return rman_get_start(dev->pcir[resource]);
79}
80
81unsigned long drm_get_resource_len(struct drm_device *dev,
82				   unsigned int resource)
83{
84	if (drm_alloc_resource(dev, resource) != 0)
85		return 0;
86
87	return rman_get_size(dev->pcir[resource]);
88}
89
90int drm_addmap(struct drm_device * dev, unsigned long offset,
91	       unsigned long size,
92    enum drm_map_type type, enum drm_map_flags flags, drm_local_map_t **map_ptr)
93{
94	drm_local_map_t *map;
95	int align;
96	/*drm_agp_mem_t *entry;
97	int valid;*/
98
99	/* Only allow shared memory to be removable since we only keep enough
100	 * book keeping information about shared memory to allow for removal
101	 * when processes fork.
102	 */
103	if ((flags & _DRM_REMOVABLE) && type != _DRM_SHM) {
104		DRM_ERROR("Requested removable map for non-DRM_SHM\n");
105		return EINVAL;
106	}
107	if ((offset & PAGE_MASK) || (size & PAGE_MASK)) {
108		DRM_ERROR("offset/size not page aligned: 0x%lx/0x%lx\n",
109		    offset, size);
110		return EINVAL;
111	}
112	if (offset + size < offset) {
113		DRM_ERROR("offset and size wrap around: 0x%lx/0x%lx\n",
114		    offset, size);
115		return EINVAL;
116	}
117
118	DRM_DEBUG("offset = 0x%08lx, size = 0x%08lx, type = %d\n", offset,
119	    size, type);
120
121	/* Check if this is just another version of a kernel-allocated map, and
122	 * just hand that back if so.
123	 */
124	if (type == _DRM_REGISTERS || type == _DRM_FRAME_BUFFER ||
125	    type == _DRM_SHM) {
126		TAILQ_FOREACH(map, &dev->maplist, link) {
127			if (map->type == type && (map->offset == offset ||
128			    (map->type == _DRM_SHM &&
129			    map->flags == _DRM_CONTAINS_LOCK))) {
130				map->size = size;
131				DRM_DEBUG("Found kernel map %d\n", type);
132				goto done;
133			}
134		}
135	}
136	DRM_UNLOCK();
137
138	/* Allocate a new map structure, fill it in, and do any type-specific
139	 * initialization necessary.
140	 */
141	map = malloc(sizeof(*map), DRM_MEM_MAPS, M_ZERO | M_NOWAIT);
142	if (!map) {
143		DRM_LOCK();
144		return ENOMEM;
145	}
146
147	map->offset = offset;
148	map->size = size;
149	map->type = type;
150	map->flags = flags;
151
152	switch (map->type) {
153	case _DRM_REGISTERS:
154		map->handle = drm_ioremap(dev, map);
155		if (!(map->flags & _DRM_WRITE_COMBINING))
156			break;
157		/* FALLTHROUGH */
158	case _DRM_FRAME_BUFFER:
159		if (drm_mtrr_add(map->offset, map->size, DRM_MTRR_WC) == 0)
160			map->mtrr = 1;
161		break;
162	case _DRM_SHM:
163		map->handle = malloc(map->size, DRM_MEM_MAPS, M_NOWAIT);
164		DRM_DEBUG("%lu %d %p\n",
165		    map->size, drm_order(map->size), map->handle);
166		if (!map->handle) {
167			free(map, DRM_MEM_MAPS);
168			DRM_LOCK();
169			return ENOMEM;
170		}
171		map->offset = (unsigned long)map->handle;
172		if (map->flags & _DRM_CONTAINS_LOCK) {
173			/* Prevent a 2nd X Server from creating a 2nd lock */
174			DRM_LOCK();
175			if (dev->lock.hw_lock != NULL) {
176				DRM_UNLOCK();
177				free(map->handle, DRM_MEM_MAPS);
178				free(map, DRM_MEM_MAPS);
179				return EBUSY;
180			}
181			dev->lock.hw_lock = map->handle; /* Pointer to lock */
182			DRM_UNLOCK();
183		}
184		break;
185	case _DRM_AGP:
186		/*valid = 0;*/
187		/* In some cases (i810 driver), user space may have already
188		 * added the AGP base itself, because dev->agp->base previously
189		 * only got set during AGP enable.  So, only add the base
190		 * address if the map's offset isn't already within the
191		 * aperture.
192		 */
193		if (map->offset < dev->agp->base ||
194		    map->offset > dev->agp->base +
195		    dev->agp->info.ai_aperture_size - 1) {
196			map->offset += dev->agp->base;
197		}
198		map->mtrr   = dev->agp->mtrr; /* for getmap */
199		/*for (entry = dev->agp->memory; entry; entry = entry->next) {
200			if ((map->offset >= entry->bound) &&
201			    (map->offset + map->size <=
202			    entry->bound + entry->pages * PAGE_SIZE)) {
203				valid = 1;
204				break;
205			}
206		}
207		if (!valid) {
208			free(map, DRM_MEM_MAPS);
209			DRM_LOCK();
210			return EACCES;
211		}*/
212		break;
213	case _DRM_SCATTER_GATHER:
214		if (!dev->sg) {
215			free(map, DRM_MEM_MAPS);
216			DRM_LOCK();
217			return EINVAL;
218		}
219		map->offset = map->offset + dev->sg->handle;
220		break;
221	case _DRM_CONSISTENT:
222		/* Unfortunately, we don't get any alignment specification from
223		 * the caller, so we have to guess.  drm_pci_alloc requires
224		 * a power-of-two alignment, so try to align the bus address of
225		 * the map to it size if possible, otherwise just assume
226		 * PAGE_SIZE alignment.
227		 */
228		align = map->size;
229		if ((align & (align - 1)) != 0)
230			align = PAGE_SIZE;
231		map->dmah = drm_pci_alloc(dev, map->size, align, 0xfffffffful);
232		if (map->dmah == NULL) {
233			free(map, DRM_MEM_MAPS);
234			DRM_LOCK();
235			return ENOMEM;
236		}
237		map->handle = map->dmah->vaddr;
238		map->offset = map->dmah->busaddr;
239		break;
240	default:
241		DRM_ERROR("Bad map type %d\n", map->type);
242		free(map, DRM_MEM_MAPS);
243		DRM_LOCK();
244		return EINVAL;
245	}
246
247	DRM_LOCK();
248	TAILQ_INSERT_TAIL(&dev->maplist, map, link);
249
250done:
251	/* Jumped to, with lock held, when a kernel map is found. */
252
253	DRM_DEBUG("Added map %d 0x%lx/0x%lx\n", map->type, map->offset,
254	    map->size);
255
256	*map_ptr = map;
257
258	return 0;
259}
260
261int drm_addmap_ioctl(struct drm_device *dev, void *data,
262		     struct drm_file *file_priv)
263{
264	struct drm_map *request = data;
265	drm_local_map_t *map;
266	int err;
267
268	if (!(dev->flags & (FREAD|FWRITE)))
269		return EACCES; /* Require read/write */
270
271	if (!DRM_SUSER(DRM_CURPROC) && request->type != _DRM_AGP)
272		return EACCES;
273
274	DRM_LOCK();
275	err = drm_addmap(dev, request->offset, request->size, request->type,
276	    request->flags, &map);
277	DRM_UNLOCK();
278	if (err != 0)
279		return err;
280
281	request->offset = map->offset;
282	request->size = map->size;
283	request->type = map->type;
284	request->flags = map->flags;
285	request->mtrr   = map->mtrr;
286	request->handle = map->handle;
287
288	if (request->type != _DRM_SHM) {
289		request->handle = (void *)request->offset;
290	}
291
292	return 0;
293}
294
295void drm_rmmap(struct drm_device *dev, drm_local_map_t *map)
296{
297	DRM_SPINLOCK_ASSERT(&dev->dev_lock);
298
299	TAILQ_REMOVE(&dev->maplist, map, link);
300
301	switch (map->type) {
302	case _DRM_REGISTERS:
303		if (map->bsr == NULL)
304			drm_ioremapfree(map);
305		/* FALLTHROUGH */
306	case _DRM_FRAME_BUFFER:
307		if (map->mtrr) {
308			int __unused retcode;
309
310			retcode = drm_mtrr_del(0, map->offset, map->size,
311			    DRM_MTRR_WC);
312			DRM_DEBUG("mtrr_del = %d\n", retcode);
313		}
314		break;
315	case _DRM_SHM:
316		free(map->handle, DRM_MEM_MAPS);
317		break;
318	case _DRM_AGP:
319	case _DRM_SCATTER_GATHER:
320		break;
321	case _DRM_CONSISTENT:
322		drm_pci_free(dev, map->dmah);
323		break;
324	default:
325		DRM_ERROR("Bad map type %d\n", map->type);
326		break;
327	}
328
329	if (map->bsr != NULL) {
330		bus_release_resource(dev->device, SYS_RES_MEMORY, map->rid,
331		    map->bsr);
332	}
333
334	free(map, DRM_MEM_MAPS);
335}
336
337/* Remove a map private from list and deallocate resources if the mapping
338 * isn't in use.
339 */
340
341int drm_rmmap_ioctl(struct drm_device *dev, void *data,
342		    struct drm_file *file_priv)
343{
344	drm_local_map_t *map;
345	struct drm_map *request = data;
346
347	DRM_LOCK();
348	TAILQ_FOREACH(map, &dev->maplist, link) {
349		if (map->handle == request->handle &&
350		    map->flags & _DRM_REMOVABLE)
351			break;
352	}
353
354	/* No match found. */
355	if (map == NULL) {
356		DRM_UNLOCK();
357		return EINVAL;
358	}
359
360	drm_rmmap(dev, map);
361
362	DRM_UNLOCK();
363
364	return 0;
365}
366
367
368static void drm_cleanup_buf_error(struct drm_device *dev,
369				  drm_buf_entry_t *entry)
370{
371	int i;
372
373	if (entry->seg_count) {
374		for (i = 0; i < entry->seg_count; i++) {
375			drm_pci_free(dev, entry->seglist[i]);
376		}
377		free(entry->seglist, DRM_MEM_SEGS);
378
379		entry->seg_count = 0;
380	}
381
382   	if (entry->buf_count) {
383	   	for (i = 0; i < entry->buf_count; i++) {
384			free(entry->buflist[i].dev_private, DRM_MEM_BUFS);
385		}
386		free(entry->buflist, DRM_MEM_BUFS);
387
388		entry->buf_count = 0;
389	}
390}
391
392static int drm_do_addbufs_agp(struct drm_device *dev, struct drm_buf_desc *request)
393{
394	drm_device_dma_t *dma = dev->dma;
395	drm_buf_entry_t *entry;
396	/*drm_agp_mem_t *agp_entry;
397	int valid*/
398	drm_buf_t *buf;
399	unsigned long offset;
400	unsigned long agp_offset;
401	int count;
402	int order;
403	int size;
404	int alignment;
405	int page_order;
406	int total;
407	int byte_count;
408	int i;
409	drm_buf_t **temp_buflist;
410
411	count = request->count;
412	order = drm_order(request->size);
413	size = 1 << order;
414
415	alignment  = (request->flags & _DRM_PAGE_ALIGN)
416	    ? round_page(size) : size;
417	page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
418	total = PAGE_SIZE << page_order;
419
420	byte_count = 0;
421	agp_offset = dev->agp->base + request->agp_start;
422
423	DRM_DEBUG("count:      %d\n",  count);
424	DRM_DEBUG("order:      %d\n",  order);
425	DRM_DEBUG("size:       %d\n",  size);
426	DRM_DEBUG("agp_offset: 0x%lx\n", agp_offset);
427	DRM_DEBUG("alignment:  %d\n",  alignment);
428	DRM_DEBUG("page_order: %d\n",  page_order);
429	DRM_DEBUG("total:      %d\n",  total);
430
431	/* Make sure buffers are located in AGP memory that we own */
432	/* Breaks MGA due to drm_alloc_agp not setting up entries for the
433	 * memory.  Safe to ignore for now because these ioctls are still
434	 * root-only.
435	 */
436	/*valid = 0;
437	for (agp_entry = dev->agp->memory; agp_entry;
438	    agp_entry = agp_entry->next) {
439		if ((agp_offset >= agp_entry->bound) &&
440		    (agp_offset + total * count <=
441		    agp_entry->bound + agp_entry->pages * PAGE_SIZE)) {
442			valid = 1;
443			break;
444		}
445	}
446	if (!valid) {
447		DRM_DEBUG("zone invalid\n");
448		return EINVAL;
449	}*/
450
451	entry = &dma->bufs[order];
452
453	entry->buflist = malloc(count * sizeof(*entry->buflist), DRM_MEM_BUFS,
454	    M_NOWAIT | M_ZERO);
455	if (!entry->buflist) {
456		return ENOMEM;
457	}
458
459	entry->buf_size = size;
460	entry->page_order = page_order;
461
462	offset = 0;
463
464	while (entry->buf_count < count) {
465		buf          = &entry->buflist[entry->buf_count];
466		buf->idx     = dma->buf_count + entry->buf_count;
467		buf->total   = alignment;
468		buf->order   = order;
469		buf->used    = 0;
470
471		buf->offset  = (dma->byte_count + offset);
472		buf->bus_address = agp_offset + offset;
473		buf->address = (void *)(agp_offset + offset);
474		buf->next    = NULL;
475		buf->pending = 0;
476		buf->file_priv = NULL;
477
478		buf->dev_priv_size = dev->driver->buf_priv_size;
479		buf->dev_private = malloc(buf->dev_priv_size, DRM_MEM_BUFS,
480		    M_NOWAIT | M_ZERO);
481		if (buf->dev_private == NULL) {
482			/* Set count correctly so we free the proper amount. */
483			entry->buf_count = count;
484			drm_cleanup_buf_error(dev, entry);
485			return ENOMEM;
486		}
487
488		offset += alignment;
489		entry->buf_count++;
490		byte_count += PAGE_SIZE << page_order;
491	}
492
493	DRM_DEBUG("byte_count: %d\n", byte_count);
494
495	temp_buflist = realloc(dma->buflist,
496	    (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist),
497	    DRM_MEM_BUFS, M_NOWAIT);
498	if (temp_buflist == NULL) {
499		/* Free the entry because it isn't valid */
500		drm_cleanup_buf_error(dev, entry);
501		return ENOMEM;
502	}
503	dma->buflist = temp_buflist;
504
505	for (i = 0; i < entry->buf_count; i++) {
506		dma->buflist[i + dma->buf_count] = &entry->buflist[i];
507	}
508
509	dma->buf_count += entry->buf_count;
510	dma->byte_count += byte_count;
511
512	DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
513	DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
514
515	request->count = entry->buf_count;
516	request->size = size;
517
518	dma->flags = _DRM_DMA_USE_AGP;
519
520	return 0;
521}
522
523static int drm_do_addbufs_pci(struct drm_device *dev, struct drm_buf_desc *request)
524{
525	drm_device_dma_t *dma = dev->dma;
526	int count;
527	int order;
528	int size;
529	int total;
530	int page_order;
531	drm_buf_entry_t *entry;
532	drm_buf_t *buf;
533	int alignment;
534	unsigned long offset;
535	int i;
536	int byte_count;
537	int page_count;
538	unsigned long *temp_pagelist;
539	drm_buf_t **temp_buflist;
540
541	count = request->count;
542	order = drm_order(request->size);
543	size = 1 << order;
544
545	DRM_DEBUG("count=%d, size=%d (%d), order=%d\n",
546	    request->count, request->size, size, order);
547
548	alignment = (request->flags & _DRM_PAGE_ALIGN)
549	    ? round_page(size) : size;
550	page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
551	total = PAGE_SIZE << page_order;
552
553	entry = &dma->bufs[order];
554
555	entry->buflist = malloc(count * sizeof(*entry->buflist), DRM_MEM_BUFS,
556	    M_NOWAIT | M_ZERO);
557	entry->seglist = malloc(count * sizeof(*entry->seglist), DRM_MEM_SEGS,
558	    M_NOWAIT | M_ZERO);
559
560	/* Keep the original pagelist until we know all the allocations
561	 * have succeeded
562	 */
563	temp_pagelist = malloc((dma->page_count + (count << page_order)) *
564	    sizeof(*dma->pagelist), DRM_MEM_PAGES, M_NOWAIT);
565
566	if (entry->buflist == NULL || entry->seglist == NULL ||
567	    temp_pagelist == NULL) {
568		free(temp_pagelist, DRM_MEM_PAGES);
569		free(entry->seglist, DRM_MEM_SEGS);
570		free(entry->buflist, DRM_MEM_BUFS);
571		return ENOMEM;
572	}
573
574	memcpy(temp_pagelist, dma->pagelist, dma->page_count *
575	    sizeof(*dma->pagelist));
576
577	DRM_DEBUG("pagelist: %d entries\n",
578	    dma->page_count + (count << page_order));
579
580	entry->buf_size	= size;
581	entry->page_order = page_order;
582	byte_count = 0;
583	page_count = 0;
584
585	while (entry->buf_count < count) {
586		DRM_SPINUNLOCK(&dev->dma_lock);
587		drm_dma_handle_t *dmah = drm_pci_alloc(dev, size, alignment,
588		    0xfffffffful);
589		DRM_SPINLOCK(&dev->dma_lock);
590		if (dmah == NULL) {
591			/* Set count correctly so we free the proper amount. */
592			entry->buf_count = count;
593			entry->seg_count = count;
594			drm_cleanup_buf_error(dev, entry);
595			free(temp_pagelist, DRM_MEM_PAGES);
596			return ENOMEM;
597		}
598
599		entry->seglist[entry->seg_count++] = dmah;
600		for (i = 0; i < (1 << page_order); i++) {
601			DRM_DEBUG("page %d @ %p\n",
602			    dma->page_count + page_count,
603			    (char *)dmah->vaddr + PAGE_SIZE * i);
604			temp_pagelist[dma->page_count + page_count++] =
605			    (long)dmah->vaddr + PAGE_SIZE * i;
606		}
607		for (offset = 0;
608		    offset + size <= total && entry->buf_count < count;
609		    offset += alignment, ++entry->buf_count) {
610			buf	     = &entry->buflist[entry->buf_count];
611			buf->idx     = dma->buf_count + entry->buf_count;
612			buf->total   = alignment;
613			buf->order   = order;
614			buf->used    = 0;
615			buf->offset  = (dma->byte_count + byte_count + offset);
616			buf->address = ((char *)dmah->vaddr + offset);
617			buf->bus_address = dmah->busaddr + offset;
618			buf->next    = NULL;
619			buf->pending = 0;
620			buf->file_priv = NULL;
621
622			buf->dev_priv_size = dev->driver->buf_priv_size;
623			buf->dev_private = malloc(buf->dev_priv_size,
624			    DRM_MEM_BUFS, M_NOWAIT | M_ZERO);
625			if (buf->dev_private == NULL) {
626				/* Set count correctly so we free the proper amount. */
627				entry->buf_count = count;
628				entry->seg_count = count;
629				drm_cleanup_buf_error(dev, entry);
630				free(temp_pagelist, DRM_MEM_PAGES);
631				return ENOMEM;
632			}
633
634			DRM_DEBUG("buffer %d @ %p\n",
635			    entry->buf_count, buf->address);
636		}
637		byte_count += PAGE_SIZE << page_order;
638	}
639
640	temp_buflist = realloc(dma->buflist,
641	    (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist),
642	    DRM_MEM_BUFS, M_NOWAIT);
643	if (temp_buflist == NULL) {
644		/* Free the entry because it isn't valid */
645		drm_cleanup_buf_error(dev, entry);
646		free(temp_pagelist, DRM_MEM_PAGES);
647		return ENOMEM;
648	}
649	dma->buflist = temp_buflist;
650
651	for (i = 0; i < entry->buf_count; i++) {
652		dma->buflist[i + dma->buf_count] = &entry->buflist[i];
653	}
654
655	/* No allocations failed, so now we can replace the orginal pagelist
656	 * with the new one.
657	 */
658	free(dma->pagelist, DRM_MEM_PAGES);
659	dma->pagelist = temp_pagelist;
660
661	dma->buf_count += entry->buf_count;
662	dma->seg_count += entry->seg_count;
663	dma->page_count += entry->seg_count << page_order;
664	dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
665
666	request->count = entry->buf_count;
667	request->size = size;
668
669	return 0;
670
671}
672
673static int drm_do_addbufs_sg(struct drm_device *dev, struct drm_buf_desc *request)
674{
675	drm_device_dma_t *dma = dev->dma;
676	drm_buf_entry_t *entry;
677	drm_buf_t *buf;
678	unsigned long offset;
679	unsigned long agp_offset;
680	int count;
681	int order;
682	int size;
683	int alignment;
684	int page_order;
685	int total;
686	int byte_count;
687	int i;
688	drm_buf_t **temp_buflist;
689
690	count = request->count;
691	order = drm_order(request->size);
692	size = 1 << order;
693
694	alignment  = (request->flags & _DRM_PAGE_ALIGN)
695	    ? round_page(size) : size;
696	page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
697	total = PAGE_SIZE << page_order;
698
699	byte_count = 0;
700	agp_offset = request->agp_start;
701
702	DRM_DEBUG("count:      %d\n",  count);
703	DRM_DEBUG("order:      %d\n",  order);
704	DRM_DEBUG("size:       %d\n",  size);
705	DRM_DEBUG("agp_offset: %ld\n", agp_offset);
706	DRM_DEBUG("alignment:  %d\n",  alignment);
707	DRM_DEBUG("page_order: %d\n",  page_order);
708	DRM_DEBUG("total:      %d\n",  total);
709
710	entry = &dma->bufs[order];
711
712	entry->buflist = malloc(count * sizeof(*entry->buflist), DRM_MEM_BUFS,
713	    M_NOWAIT | M_ZERO);
714	if (entry->buflist == NULL)
715		return ENOMEM;
716
717	entry->buf_size = size;
718	entry->page_order = page_order;
719
720	offset = 0;
721
722	while (entry->buf_count < count) {
723		buf          = &entry->buflist[entry->buf_count];
724		buf->idx     = dma->buf_count + entry->buf_count;
725		buf->total   = alignment;
726		buf->order   = order;
727		buf->used    = 0;
728
729		buf->offset  = (dma->byte_count + offset);
730		buf->bus_address = agp_offset + offset;
731		buf->address = (void *)(agp_offset + offset + dev->sg->handle);
732		buf->next    = NULL;
733		buf->pending = 0;
734		buf->file_priv = NULL;
735
736		buf->dev_priv_size = dev->driver->buf_priv_size;
737		buf->dev_private = malloc(buf->dev_priv_size, DRM_MEM_BUFS,
738		    M_NOWAIT | M_ZERO);
739		if (buf->dev_private == NULL) {
740			/* Set count correctly so we free the proper amount. */
741			entry->buf_count = count;
742			drm_cleanup_buf_error(dev, entry);
743			return ENOMEM;
744		}
745
746		DRM_DEBUG("buffer %d @ %p\n",
747		    entry->buf_count, buf->address);
748
749		offset += alignment;
750		entry->buf_count++;
751		byte_count += PAGE_SIZE << page_order;
752	}
753
754	DRM_DEBUG("byte_count: %d\n", byte_count);
755
756	temp_buflist = realloc(dma->buflist,
757	    (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist),
758	    DRM_MEM_BUFS, M_NOWAIT);
759	if (temp_buflist == NULL) {
760		/* Free the entry because it isn't valid */
761		drm_cleanup_buf_error(dev, entry);
762		return ENOMEM;
763	}
764	dma->buflist = temp_buflist;
765
766	for (i = 0; i < entry->buf_count; i++) {
767		dma->buflist[i + dma->buf_count] = &entry->buflist[i];
768	}
769
770	dma->buf_count += entry->buf_count;
771	dma->byte_count += byte_count;
772
773	DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
774	DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
775
776	request->count = entry->buf_count;
777	request->size = size;
778
779	dma->flags = _DRM_DMA_USE_SG;
780
781	return 0;
782}
783
784int drm_addbufs_agp(struct drm_device *dev, struct drm_buf_desc *request)
785{
786	int order, ret;
787
788	if (request->count < 0 || request->count > 4096)
789		return EINVAL;
790
791	order = drm_order(request->size);
792	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
793		return EINVAL;
794
795	DRM_SPINLOCK(&dev->dma_lock);
796
797	/* No more allocations after first buffer-using ioctl. */
798	if (dev->buf_use != 0) {
799		DRM_SPINUNLOCK(&dev->dma_lock);
800		return EBUSY;
801	}
802	/* No more than one allocation per order */
803	if (dev->dma->bufs[order].buf_count != 0) {
804		DRM_SPINUNLOCK(&dev->dma_lock);
805		return ENOMEM;
806	}
807
808	ret = drm_do_addbufs_agp(dev, request);
809
810	DRM_SPINUNLOCK(&dev->dma_lock);
811
812	return ret;
813}
814
815int drm_addbufs_sg(struct drm_device *dev, struct drm_buf_desc *request)
816{
817	int order, ret;
818
819	if (!DRM_SUSER(DRM_CURPROC))
820		return EACCES;
821
822	if (request->count < 0 || request->count > 4096)
823		return EINVAL;
824
825	order = drm_order(request->size);
826	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
827		return EINVAL;
828
829	DRM_SPINLOCK(&dev->dma_lock);
830
831	/* No more allocations after first buffer-using ioctl. */
832	if (dev->buf_use != 0) {
833		DRM_SPINUNLOCK(&dev->dma_lock);
834		return EBUSY;
835	}
836	/* No more than one allocation per order */
837	if (dev->dma->bufs[order].buf_count != 0) {
838		DRM_SPINUNLOCK(&dev->dma_lock);
839		return ENOMEM;
840	}
841
842	ret = drm_do_addbufs_sg(dev, request);
843
844	DRM_SPINUNLOCK(&dev->dma_lock);
845
846	return ret;
847}
848
849int drm_addbufs_pci(struct drm_device *dev, struct drm_buf_desc *request)
850{
851	int order, ret;
852
853	if (!DRM_SUSER(DRM_CURPROC))
854		return EACCES;
855
856	if (request->count < 0 || request->count > 4096)
857		return EINVAL;
858
859	order = drm_order(request->size);
860	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
861		return EINVAL;
862
863	DRM_SPINLOCK(&dev->dma_lock);
864
865	/* No more allocations after first buffer-using ioctl. */
866	if (dev->buf_use != 0) {
867		DRM_SPINUNLOCK(&dev->dma_lock);
868		return EBUSY;
869	}
870	/* No more than one allocation per order */
871	if (dev->dma->bufs[order].buf_count != 0) {
872		DRM_SPINUNLOCK(&dev->dma_lock);
873		return ENOMEM;
874	}
875
876	ret = drm_do_addbufs_pci(dev, request);
877
878	DRM_SPINUNLOCK(&dev->dma_lock);
879
880	return ret;
881}
882
883int drm_addbufs(struct drm_device *dev, void *data, struct drm_file *file_priv)
884{
885	struct drm_buf_desc *request = data;
886	int err;
887
888	if (request->flags & _DRM_AGP_BUFFER)
889		err = drm_addbufs_agp(dev, request);
890	else if (request->flags & _DRM_SG_BUFFER)
891		err = drm_addbufs_sg(dev, request);
892	else
893		err = drm_addbufs_pci(dev, request);
894
895	return err;
896}
897
898int drm_infobufs(struct drm_device *dev, void *data, struct drm_file *file_priv)
899{
900	drm_device_dma_t *dma = dev->dma;
901	struct drm_buf_info *request = data;
902	int i;
903	int count;
904	int retcode = 0;
905
906	DRM_SPINLOCK(&dev->dma_lock);
907	++dev->buf_use;		/* Can't allocate more after this call */
908	DRM_SPINUNLOCK(&dev->dma_lock);
909
910	for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
911		if (dma->bufs[i].buf_count)
912			++count;
913	}
914
915	DRM_DEBUG("count = %d\n", count);
916
917	if (request->count >= count) {
918		for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
919			if (dma->bufs[i].buf_count) {
920				struct drm_buf_desc from;
921
922				from.count = dma->bufs[i].buf_count;
923				from.size = dma->bufs[i].buf_size;
924				from.low_mark = dma->bufs[i].freelist.low_mark;
925				from.high_mark = dma->bufs[i].freelist.high_mark;
926
927				if (DRM_COPY_TO_USER(&request->list[count], &from,
928				    sizeof(struct drm_buf_desc)) != 0) {
929					retcode = EFAULT;
930					break;
931				}
932
933				DRM_DEBUG("%d %d %d %d %d\n",
934				    i, dma->bufs[i].buf_count,
935				    dma->bufs[i].buf_size,
936				    dma->bufs[i].freelist.low_mark,
937				    dma->bufs[i].freelist.high_mark);
938				++count;
939			}
940		}
941	}
942	request->count = count;
943
944	return retcode;
945}
946
947int drm_markbufs(struct drm_device *dev, void *data, struct drm_file *file_priv)
948{
949	drm_device_dma_t *dma = dev->dma;
950	struct drm_buf_desc *request = data;
951	int order;
952
953	DRM_DEBUG("%d, %d, %d\n",
954		  request->size, request->low_mark, request->high_mark);
955
956
957	order = drm_order(request->size);
958	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ||
959	    request->low_mark < 0 || request->high_mark < 0) {
960		return EINVAL;
961	}
962
963	DRM_SPINLOCK(&dev->dma_lock);
964	if (request->low_mark > dma->bufs[order].buf_count ||
965	    request->high_mark > dma->bufs[order].buf_count) {
966		DRM_SPINUNLOCK(&dev->dma_lock);
967		return EINVAL;
968	}
969
970	dma->bufs[order].freelist.low_mark  = request->low_mark;
971	dma->bufs[order].freelist.high_mark = request->high_mark;
972	DRM_SPINUNLOCK(&dev->dma_lock);
973
974	return 0;
975}
976
977int drm_freebufs(struct drm_device *dev, void *data, struct drm_file *file_priv)
978{
979	drm_device_dma_t *dma = dev->dma;
980	struct drm_buf_free *request = data;
981	int i;
982	int idx;
983	drm_buf_t *buf;
984	int retcode = 0;
985
986	DRM_DEBUG("%d\n", request->count);
987
988	DRM_SPINLOCK(&dev->dma_lock);
989	for (i = 0; i < request->count; i++) {
990		if (DRM_COPY_FROM_USER(&idx, &request->list[i], sizeof(idx))) {
991			retcode = EFAULT;
992			break;
993		}
994		if (idx < 0 || idx >= dma->buf_count) {
995			DRM_ERROR("Index %d (of %d max)\n",
996			    idx, dma->buf_count - 1);
997			retcode = EINVAL;
998			break;
999		}
1000		buf = dma->buflist[idx];
1001		if (buf->file_priv != file_priv) {
1002			DRM_ERROR("Process %d freeing buffer not owned\n",
1003			    DRM_CURRENTPID);
1004			retcode = EINVAL;
1005			break;
1006		}
1007		drm_free_buffer(dev, buf);
1008	}
1009	DRM_SPINUNLOCK(&dev->dma_lock);
1010
1011	return retcode;
1012}
1013
1014int drm_mapbufs(struct drm_device *dev, void *data, struct drm_file *file_priv)
1015{
1016	drm_device_dma_t *dma = dev->dma;
1017	int retcode = 0;
1018	const int zero = 0;
1019	vm_offset_t address;
1020	struct vmspace *vms;
1021	vm_ooffset_t foff;
1022	vm_size_t size;
1023	vm_offset_t vaddr;
1024	struct drm_buf_map *request = data;
1025	int i;
1026
1027	vms = DRM_CURPROC->td_proc->p_vmspace;
1028
1029	DRM_SPINLOCK(&dev->dma_lock);
1030	dev->buf_use++;		/* Can't allocate more after this call */
1031	DRM_SPINUNLOCK(&dev->dma_lock);
1032
1033	if (request->count < dma->buf_count)
1034		goto done;
1035
1036	if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP)) ||
1037	    (drm_core_check_feature(dev, DRIVER_SG) &&
1038	    (dma->flags & _DRM_DMA_USE_SG))) {
1039		drm_local_map_t *map = dev->agp_buffer_map;
1040
1041		if (map == NULL) {
1042			retcode = EINVAL;
1043			goto done;
1044		}
1045		size = round_page(map->size);
1046		foff = map->offset;
1047	} else {
1048		size = round_page(dma->byte_count),
1049		foff = 0;
1050	}
1051
1052	vaddr = round_page((vm_offset_t)vms->vm_daddr + MAXDSIZ);
1053#if __FreeBSD_version >= 600023
1054	retcode = vm_mmap(&vms->vm_map, &vaddr, size, PROT_READ | PROT_WRITE,
1055	    VM_PROT_ALL, MAP_SHARED | MAP_NOSYNC, OBJT_DEVICE,
1056	    dev->devnode, foff);
1057#else
1058	retcode = vm_mmap(&vms->vm_map, &vaddr, size, PROT_READ | PROT_WRITE,
1059	    VM_PROT_ALL, MAP_SHARED | MAP_NOSYNC,
1060	    SLIST_FIRST(&dev->devnode->si_hlist), foff);
1061#endif
1062	if (retcode)
1063		goto done;
1064
1065	request->virtual = (void *)vaddr;
1066
1067	for (i = 0; i < dma->buf_count; i++) {
1068		if (DRM_COPY_TO_USER(&request->list[i].idx,
1069		    &dma->buflist[i]->idx, sizeof(request->list[0].idx))) {
1070			retcode = EFAULT;
1071			goto done;
1072		}
1073		if (DRM_COPY_TO_USER(&request->list[i].total,
1074		    &dma->buflist[i]->total, sizeof(request->list[0].total))) {
1075			retcode = EFAULT;
1076			goto done;
1077		}
1078		if (DRM_COPY_TO_USER(&request->list[i].used, &zero,
1079		    sizeof(zero))) {
1080			retcode = EFAULT;
1081			goto done;
1082		}
1083		address = vaddr + dma->buflist[i]->offset; /* *** */
1084		if (DRM_COPY_TO_USER(&request->list[i].address, &address,
1085		    sizeof(address))) {
1086			retcode = EFAULT;
1087			goto done;
1088		}
1089	}
1090
1091 done:
1092	request->count = dma->buf_count;
1093
1094	DRM_DEBUG("%d buffers, retcode = %d\n", request->count, retcode);
1095
1096	return retcode;
1097}
1098
1099/*
1100 * Compute order.  Can be made faster.
1101 */
1102int drm_order(unsigned long size)
1103{
1104	int order;
1105
1106	if (size == 0)
1107		return 0;
1108
1109	order = ffsl(size) - 1;
1110	if (size & ~(1ul << order))
1111		++order;
1112
1113	return order;
1114}
1115