drm_bufs.c revision 145132
1/* drm_bufs.h -- Generic buffer template -*- linux-c -*-
2 * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
3 */
4/*-
5 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
6 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
7 * All Rights Reserved.
8 *
9 * Permission is hereby granted, free of charge, to any person obtaining a
10 * copy of this software and associated documentation files (the "Software"),
11 * to deal in the Software without restriction, including without limitation
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
15 *
16 * The above copyright notice and this permission notice (including the next
17 * paragraph) shall be included in all copies or substantial portions of the
18 * Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
23 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
26 * OTHER DEALINGS IN THE SOFTWARE.
27 *
28 * Authors:
29 *    Rickard E. (Rik) Faith <faith@valinux.com>
30 *    Gareth Hughes <gareth@valinux.com>
31 *
32 * $FreeBSD: head/sys/dev/drm/drm_bufs.c 145132 2005-04-16 03:44:47Z anholt $
33 */
34
35#include "dev/drm/drmP.h"
36
37/*
38 * Compute order.  Can be made faster.
39 */
40int drm_order(unsigned long size)
41{
42	int order;
43	unsigned long tmp;
44
45	for ( order = 0, tmp = size ; tmp >>= 1 ; ++order );
46
47	if ( size & ~(1 << order) )
48		++order;
49
50	return order;
51}
52
53unsigned long drm_get_resource_start(drm_device_t *dev, unsigned int resource)
54{
55	struct resource *bsr;
56	unsigned long offset;
57
58	resource = resource * 4 + 0x10;
59
60	bsr = bus_alloc_resource_any(dev->device, SYS_RES_MEMORY, &resource,
61	    RF_ACTIVE | RF_SHAREABLE);
62	if (bsr == NULL) {
63		DRM_ERROR("Couldn't find resource 0x%x\n", resource);
64		return 0;
65	}
66
67	offset = rman_get_start(bsr);
68
69	bus_release_resource(dev->device, SYS_RES_MEMORY, resource, bsr);
70
71	return offset;
72}
73
74unsigned long drm_get_resource_len(drm_device_t *dev, unsigned int resource)
75{
76	struct resource *bsr;
77	unsigned long len;
78
79	resource = resource * 4 + 0x10;
80
81	bsr = bus_alloc_resource_any(dev->device, SYS_RES_MEMORY, &resource,
82	    RF_ACTIVE | RF_SHAREABLE);
83	if (bsr == NULL) {
84		DRM_ERROR("Couldn't find resource 0x%x\n", resource);
85		return ENOMEM;
86	}
87
88	len = rman_get_size(bsr);
89
90	bus_release_resource(dev->device, SYS_RES_MEMORY, resource, bsr);
91
92	return len;
93}
94
95int drm_initmap(drm_device_t *dev, unsigned long start, unsigned long len,
96		unsigned int resource, int type, int flags)
97{
98	drm_local_map_t *map;
99	struct resource *bsr;
100
101	if (type != _DRM_REGISTERS && type != _DRM_FRAME_BUFFER)
102		return EINVAL;
103	if (len == 0)
104		return EINVAL;
105
106	map = malloc(sizeof(*map), M_DRM, M_ZERO | M_NOWAIT);
107	if (map == NULL)
108		return ENOMEM;
109
110	map->rid = resource * 4 + 0x10;
111	bsr = bus_alloc_resource_any(dev->device, SYS_RES_MEMORY, &map->rid,
112	    RF_ACTIVE | RF_SHAREABLE);
113	if (bsr == NULL) {
114		DRM_ERROR("Couldn't allocate %s resource\n",
115		    ((type == _DRM_REGISTERS) ? "mmio" : "framebuffer"));
116		free(map, M_DRM);
117		return ENOMEM;
118	}
119
120	map->kernel_owned = 1;
121	map->type = type;
122	map->flags = flags;
123	map->bsr = bsr;
124	map->bst = rman_get_bustag(bsr);
125	map->bsh = rman_get_bushandle(bsr);
126	map->offset = start;
127	map->size = len;
128
129	if (type == _DRM_REGISTERS)
130		map->handle = rman_get_virtual(bsr);
131
132	DRM_DEBUG("initmap %d,0x%x@0x%lx/0x%lx\n", map->type, map->flags,
133	    map->offset, map->size);
134
135	if (map->flags & _DRM_WRITE_COMBINING) {
136		int err;
137
138		err = drm_mtrr_add(map->offset, map->size, DRM_MTRR_WC);
139		if (err == 0)
140			map->mtrr = 1;
141	}
142
143	DRM_LOCK();
144	TAILQ_INSERT_TAIL(&dev->maplist, map, link);
145	DRM_UNLOCK();
146
147	return 0;
148}
149
150int drm_addmap(DRM_IOCTL_ARGS)
151{
152	DRM_DEVICE;
153	drm_map_t request;
154	drm_local_map_t *map;
155	dma_addr_t bus_addr;
156
157	if (!(dev->flags & (FREAD|FWRITE)))
158		return DRM_ERR(EACCES); /* Require read/write */
159
160	DRM_COPY_FROM_USER_IOCTL( request, (drm_map_t *)data, sizeof(drm_map_t) );
161
162	/* Only allow shared memory to be removable since we only keep enough
163	 * book keeping information about shared memory to allow for removal
164	 * when processes fork.
165	 */
166	if ((request.flags & _DRM_REMOVABLE) && request.type != _DRM_SHM)
167		return EINVAL;
168	if ((request.offset & PAGE_MASK) || (request.size & PAGE_MASK))
169		return EINVAL;
170	if (request.offset + request.size < request.offset)
171		return EINVAL;
172
173	DRM_DEBUG("offset = 0x%08lx, size = 0x%08lx, type = %d\n",
174	    request.offset, request.size, request.type);
175
176	/* Check if this is just another version of a kernel-allocated map, and
177	 * just hand that back if so.
178	 */
179	if (request.type == _DRM_REGISTERS || request.type == _DRM_FRAME_BUFFER)
180	{
181		DRM_LOCK();
182		TAILQ_FOREACH(map, &dev->maplist, link) {
183			if (map->kernel_owned && map->type == request.type &&
184			    map->offset == request.offset) {
185				/* XXX: this size setting is questionable. */
186				map->size = request.size;
187				DRM_DEBUG("Found kernel map %d\n", request.type);
188				goto done;
189			}
190		}
191		DRM_UNLOCK();
192	}
193
194	/* Allocate a new map structure, fill it in, and do any type-specific
195	 * initialization necessary.
196	 */
197	map = malloc(sizeof(*map), M_DRM, M_ZERO | M_NOWAIT);
198	if ( !map )
199		return DRM_ERR(ENOMEM);
200
201	map->offset = request.offset;
202	map->size = request.size;
203	map->type = request.type;
204	map->flags = request.flags;
205
206	switch ( map->type ) {
207	case _DRM_REGISTERS:
208		drm_ioremap(dev, map);
209		if (!(map->flags & _DRM_WRITE_COMBINING))
210			break;
211		/* FALLTHROUGH */
212	case _DRM_FRAME_BUFFER:
213		if (drm_mtrr_add(map->offset, map->size, DRM_MTRR_WC) == 0)
214			map->mtrr = 1;
215		break;
216	case _DRM_SHM:
217		map->handle = malloc(map->size, M_DRM, M_NOWAIT);
218		DRM_DEBUG( "%lu %d %p\n",
219			   map->size, drm_order(map->size), map->handle );
220		if ( !map->handle ) {
221			free(map, M_DRM);
222			return DRM_ERR(ENOMEM);
223		}
224		map->offset = (unsigned long)map->handle;
225		if ( map->flags & _DRM_CONTAINS_LOCK ) {
226			/* Prevent a 2nd X Server from creating a 2nd lock */
227			DRM_LOCK();
228			if (dev->lock.hw_lock != NULL) {
229				DRM_UNLOCK();
230				free(map->handle, M_DRM);
231				free(map, M_DRM);
232				return DRM_ERR(EBUSY);
233			}
234			dev->lock.hw_lock = map->handle; /* Pointer to lock */
235			DRM_UNLOCK();
236		}
237		break;
238	case _DRM_AGP:
239		map->offset += dev->agp->base;
240		map->mtrr   = dev->agp->mtrr; /* for getmap */
241		break;
242	case _DRM_SCATTER_GATHER:
243		if (!dev->sg) {
244			free(map, M_DRM);
245			return DRM_ERR(EINVAL);
246		}
247		map->offset = map->offset + dev->sg->handle;
248		break;
249	case _DRM_CONSISTENT:
250		map->handle = drm_pci_alloc(dev, map->size, map->size,
251		    0xfffffffful, &bus_addr);
252		if (map->handle == NULL) {
253			free(map, M_DRM);
254			return ENOMEM;
255		}
256		map->offset = (unsigned long)bus_addr;
257		break;
258	default:
259		free(map, M_DRM);
260		return DRM_ERR(EINVAL);
261	}
262
263	DRM_LOCK();
264	TAILQ_INSERT_TAIL(&dev->maplist, map, link);
265
266done:
267	/* Jumped to, with lock held, when a kernel map is found. */
268	request.offset = map->offset;
269	request.size = map->size;
270	request.type = map->type;
271	request.flags = map->flags;
272	request.mtrr   = map->mtrr;
273	request.handle = map->handle;
274	DRM_UNLOCK();
275
276	DRM_DEBUG("Added map %d 0x%lx/0x%lx\n", request.type, request.offset, request.size);
277
278	if ( request.type != _DRM_SHM ) {
279		request.handle = (void *)request.offset;
280	}
281
282	DRM_COPY_TO_USER_IOCTL( (drm_map_t *)data, request, sizeof(drm_map_t) );
283
284	return 0;
285}
286
287void drm_remove_map(drm_device_t *dev, drm_local_map_t *map)
288{
289	DRM_SPINLOCK_ASSERT(&dev->dev_lock);
290
291	TAILQ_REMOVE(&dev->maplist, map, link);
292
293	switch (map->type) {
294	case _DRM_REGISTERS:
295		if (map->bsr == NULL)
296			drm_ioremapfree(map);
297		/* FALLTHROUGH */
298	case _DRM_FRAME_BUFFER:
299		if (map->mtrr) {
300			int __unused retcode;
301
302			retcode = drm_mtrr_del(map->offset, map->size,
303			    DRM_MTRR_WC);
304			DRM_DEBUG("mtrr_del = %d\n", retcode);
305		}
306		break;
307	case _DRM_SHM:
308		free(map->handle, M_DRM);
309		break;
310	case _DRM_AGP:
311	case _DRM_SCATTER_GATHER:
312		break;
313	case _DRM_CONSISTENT:
314		drm_pci_free(dev, map->size, map->handle, map->offset);
315		break;
316	}
317
318	if (map->bsr != NULL) {
319		bus_release_resource(dev->device, SYS_RES_MEMORY, map->rid,
320		    map->bsr);
321	}
322
323	free(map, M_DRM);
324}
325
326/* Remove a map private from list and deallocate resources if the mapping
327 * isn't in use.
328 */
329
330int drm_rmmap(DRM_IOCTL_ARGS)
331{
332	DRM_DEVICE;
333	drm_local_map_t *map;
334	drm_map_t request;
335
336	DRM_COPY_FROM_USER_IOCTL( request, (drm_map_t *)data, sizeof(request) );
337
338	DRM_LOCK();
339	TAILQ_FOREACH(map, &dev->maplist, link) {
340		if (map->handle == request.handle &&
341		    map->flags & _DRM_REMOVABLE)
342			break;
343	}
344
345	/* No match found. */
346	if (map == NULL) {
347		DRM_UNLOCK();
348		return DRM_ERR(EINVAL);
349	}
350
351	drm_remove_map(dev, map);
352
353	DRM_UNLOCK();
354
355	return 0;
356}
357
358
359static void drm_cleanup_buf_error(drm_device_t *dev, drm_buf_entry_t *entry)
360{
361	int i;
362
363	if (entry->seg_count) {
364		for (i = 0; i < entry->seg_count; i++) {
365			drm_pci_free(dev, entry->buf_size,
366			    (void *)entry->seglist[i],
367			    entry->seglist_bus[i]);
368		}
369		free(entry->seglist, M_DRM);
370		free(entry->seglist_bus, M_DRM);
371
372		entry->seg_count = 0;
373	}
374
375   	if (entry->buf_count) {
376	   	for (i = 0; i < entry->buf_count; i++) {
377			free(entry->buflist[i].dev_private, M_DRM);
378		}
379		free(entry->buflist, M_DRM);
380
381		entry->buf_count = 0;
382	}
383}
384
385static int drm_addbufs_agp(drm_device_t *dev, drm_buf_desc_t *request)
386{
387	drm_device_dma_t *dma = dev->dma;
388	drm_buf_entry_t *entry;
389	drm_buf_t *buf;
390	unsigned long offset;
391	unsigned long agp_offset;
392	int count;
393	int order;
394	int size;
395	int alignment;
396	int page_order;
397	int total;
398	int byte_count;
399	int i;
400	drm_buf_t **temp_buflist;
401
402	count = request->count;
403	order = drm_order(request->size);
404	size = 1 << order;
405
406	alignment  = (request->flags & _DRM_PAGE_ALIGN)
407		? round_page(size) : size;
408	page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
409	total = PAGE_SIZE << page_order;
410
411	byte_count = 0;
412	agp_offset = dev->agp->base + request->agp_start;
413
414	DRM_DEBUG( "count:      %d\n",  count );
415	DRM_DEBUG( "order:      %d\n",  order );
416	DRM_DEBUG( "size:       %d\n",  size );
417	DRM_DEBUG( "agp_offset: 0x%lx\n", agp_offset );
418	DRM_DEBUG( "alignment:  %d\n",  alignment );
419	DRM_DEBUG( "page_order: %d\n",  page_order );
420	DRM_DEBUG( "total:      %d\n",  total );
421
422	entry = &dma->bufs[order];
423
424	entry->buflist = malloc(count * sizeof(*entry->buflist), M_DRM,
425	    M_NOWAIT | M_ZERO);
426	if ( !entry->buflist ) {
427		return DRM_ERR(ENOMEM);
428	}
429
430	entry->buf_size = size;
431	entry->page_order = page_order;
432
433	offset = 0;
434
435	while ( entry->buf_count < count ) {
436		buf          = &entry->buflist[entry->buf_count];
437		buf->idx     = dma->buf_count + entry->buf_count;
438		buf->total   = alignment;
439		buf->order   = order;
440		buf->used    = 0;
441
442		buf->offset  = (dma->byte_count + offset);
443		buf->bus_address = agp_offset + offset;
444		buf->address = (void *)(agp_offset + offset);
445		buf->next    = NULL;
446		buf->pending = 0;
447		buf->filp    = NULL;
448
449		buf->dev_priv_size = dev->dev_priv_size;
450		buf->dev_private = malloc(buf->dev_priv_size, M_DRM,
451		    M_NOWAIT | M_ZERO);
452		if (buf->dev_private == NULL) {
453			/* Set count correctly so we free the proper amount. */
454			entry->buf_count = count;
455			drm_cleanup_buf_error(dev, entry);
456			return DRM_ERR(ENOMEM);
457		}
458
459		offset += alignment;
460		entry->buf_count++;
461		byte_count += PAGE_SIZE << page_order;
462	}
463
464	DRM_DEBUG( "byte_count: %d\n", byte_count );
465
466	temp_buflist = realloc(dma->buflist,
467	    (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist), M_DRM,
468	    M_NOWAIT);
469	if (temp_buflist == NULL) {
470		/* Free the entry because it isn't valid */
471		drm_cleanup_buf_error(dev, entry);
472		return DRM_ERR(ENOMEM);
473	}
474	dma->buflist = temp_buflist;
475
476	for ( i = 0 ; i < entry->buf_count ; i++ ) {
477		dma->buflist[i + dma->buf_count] = &entry->buflist[i];
478	}
479
480	dma->buf_count += entry->buf_count;
481	dma->byte_count += byte_count;
482
483	DRM_DEBUG( "dma->buf_count : %d\n", dma->buf_count );
484	DRM_DEBUG( "entry->buf_count : %d\n", entry->buf_count );
485
486	request->count = entry->buf_count;
487	request->size = size;
488
489	dma->flags = _DRM_DMA_USE_AGP;
490
491	return 0;
492}
493
494static int drm_addbufs_pci(drm_device_t *dev, drm_buf_desc_t *request)
495{
496	drm_device_dma_t *dma = dev->dma;
497	int count;
498	int order;
499	int size;
500	int total;
501	int page_order;
502	drm_buf_entry_t *entry;
503	vm_offset_t vaddr;
504	drm_buf_t *buf;
505	int alignment;
506	unsigned long offset;
507	int i;
508	int byte_count;
509	int page_count;
510	unsigned long *temp_pagelist;
511	drm_buf_t **temp_buflist;
512	dma_addr_t bus_addr;
513
514	count = request->count;
515	order = drm_order(request->size);
516	size = 1 << order;
517
518	DRM_DEBUG( "count=%d, size=%d (%d), order=%d\n",
519		   request->count, request->size, size, order );
520
521	alignment = (request->flags & _DRM_PAGE_ALIGN)
522		? round_page(size) : size;
523	page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
524	total = PAGE_SIZE << page_order;
525
526	entry = &dma->bufs[order];
527
528	entry->buflist = malloc(count * sizeof(*entry->buflist), M_DRM,
529	    M_NOWAIT | M_ZERO);
530	entry->seglist = malloc(count * sizeof(*entry->seglist), M_DRM,
531	    M_NOWAIT | M_ZERO);
532	entry->seglist_bus = malloc(count * sizeof(*entry->seglist_bus), M_DRM,
533	    M_NOWAIT | M_ZERO);
534
535	/* Keep the original pagelist until we know all the allocations
536	 * have succeeded
537	 */
538	temp_pagelist = malloc((dma->page_count + (count << page_order)) *
539	    sizeof(*dma->pagelist), M_DRM, M_NOWAIT);
540
541	if (entry->buflist == NULL || entry->seglist == NULL ||
542	    entry->seglist_bus == NULL || temp_pagelist == NULL) {
543		free(entry->buflist, M_DRM);
544		free(entry->seglist, M_DRM);
545		free(entry->seglist_bus, M_DRM);
546		return DRM_ERR(ENOMEM);
547	}
548
549	memcpy(temp_pagelist, dma->pagelist, dma->page_count *
550	    sizeof(*dma->pagelist));
551
552	DRM_DEBUG( "pagelist: %d entries\n",
553		   dma->page_count + (count << page_order) );
554
555	entry->buf_size	= size;
556	entry->page_order = page_order;
557	byte_count = 0;
558	page_count = 0;
559
560	while ( entry->buf_count < count ) {
561		vaddr = (vm_offset_t)drm_pci_alloc(dev, size, alignment,
562		    0xfffffffful, &bus_addr);
563		if (vaddr == 0) {
564			/* Set count correctly so we free the proper amount. */
565			entry->buf_count = count;
566			entry->seg_count = count;
567			drm_cleanup_buf_error(dev, entry);
568			free(temp_pagelist, M_DRM);
569			return DRM_ERR(ENOMEM);
570		}
571
572		entry->seglist_bus[entry->seg_count] = bus_addr;
573		entry->seglist[entry->seg_count++] = vaddr;
574		for ( i = 0 ; i < (1 << page_order) ; i++ ) {
575			DRM_DEBUG( "page %d @ 0x%08lx\n",
576				   dma->page_count + page_count,
577				   (long)vaddr + PAGE_SIZE * i );
578			temp_pagelist[dma->page_count + page_count++] =
579			    vaddr + PAGE_SIZE * i;
580		}
581		for ( offset = 0 ;
582		      offset + size <= total && entry->buf_count < count ;
583		      offset += alignment, ++entry->buf_count ) {
584			buf	     = &entry->buflist[entry->buf_count];
585			buf->idx     = dma->buf_count + entry->buf_count;
586			buf->total   = alignment;
587			buf->order   = order;
588			buf->used    = 0;
589			buf->offset  = (dma->byte_count + byte_count + offset);
590			buf->address = (void *)(vaddr + offset);
591			buf->bus_address = bus_addr + offset;
592			buf->next    = NULL;
593			buf->pending = 0;
594			buf->filp    = NULL;
595
596			buf->dev_priv_size = dev->dev_priv_size;
597			buf->dev_private = malloc(buf->dev_priv_size, M_DRM,
598			    M_NOWAIT | M_ZERO);
599			if (buf->dev_private == NULL) {
600				/* Set count correctly so we free the proper amount. */
601				entry->buf_count = count;
602				entry->seg_count = count;
603				drm_cleanup_buf_error(dev, entry);
604				free(temp_pagelist, M_DRM);
605				return DRM_ERR(ENOMEM);
606			}
607
608			DRM_DEBUG( "buffer %d @ %p\n",
609				   entry->buf_count, buf->address );
610		}
611		byte_count += PAGE_SIZE << page_order;
612	}
613
614	temp_buflist = realloc(dma->buflist,
615	    (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist), M_DRM,
616	    M_NOWAIT);
617	if (temp_buflist == NULL) {
618		/* Free the entry because it isn't valid */
619		drm_cleanup_buf_error(dev, entry);
620		free(temp_pagelist, M_DRM);
621		return DRM_ERR(ENOMEM);
622	}
623	dma->buflist = temp_buflist;
624
625	for ( i = 0 ; i < entry->buf_count ; i++ ) {
626		dma->buflist[i + dma->buf_count] = &entry->buflist[i];
627	}
628
629	/* No allocations failed, so now we can replace the orginal pagelist
630	 * with the new one.
631	 */
632	free(dma->pagelist, M_DRM);
633	dma->pagelist = temp_pagelist;
634
635	dma->buf_count += entry->buf_count;
636	dma->seg_count += entry->seg_count;
637	dma->page_count += entry->seg_count << page_order;
638	dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
639
640	request->count = entry->buf_count;
641	request->size = size;
642
643	return 0;
644
645}
646
647static int drm_addbufs_sg(drm_device_t *dev, drm_buf_desc_t *request)
648{
649	drm_device_dma_t *dma = dev->dma;
650	drm_buf_entry_t *entry;
651	drm_buf_t *buf;
652	unsigned long offset;
653	unsigned long agp_offset;
654	int count;
655	int order;
656	int size;
657	int alignment;
658	int page_order;
659	int total;
660	int byte_count;
661	int i;
662	drm_buf_t **temp_buflist;
663
664	count = request->count;
665	order = drm_order(request->size);
666	size = 1 << order;
667
668	alignment  = (request->flags & _DRM_PAGE_ALIGN)
669		? round_page(size) : size;
670	page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
671	total = PAGE_SIZE << page_order;
672
673	byte_count = 0;
674	agp_offset = request->agp_start;
675
676	DRM_DEBUG( "count:      %d\n",  count );
677	DRM_DEBUG( "order:      %d\n",  order );
678	DRM_DEBUG( "size:       %d\n",  size );
679	DRM_DEBUG( "agp_offset: %ld\n", agp_offset );
680	DRM_DEBUG( "alignment:  %d\n",  alignment );
681	DRM_DEBUG( "page_order: %d\n",  page_order );
682	DRM_DEBUG( "total:      %d\n",  total );
683
684	entry = &dma->bufs[order];
685
686	entry->buflist = malloc(count * sizeof(*entry->buflist), M_DRM,
687	    M_NOWAIT | M_ZERO);
688	if (entry->buflist == NULL)
689		return DRM_ERR(ENOMEM);
690
691	entry->buf_size = size;
692	entry->page_order = page_order;
693
694	offset = 0;
695
696	while ( entry->buf_count < count ) {
697		buf          = &entry->buflist[entry->buf_count];
698		buf->idx     = dma->buf_count + entry->buf_count;
699		buf->total   = alignment;
700		buf->order   = order;
701		buf->used    = 0;
702
703		buf->offset  = (dma->byte_count + offset);
704		buf->bus_address = agp_offset + offset;
705		buf->address = (void *)(agp_offset + offset + dev->sg->handle);
706		buf->next    = NULL;
707		buf->pending = 0;
708		buf->filp    = NULL;
709
710		buf->dev_priv_size = dev->dev_priv_size;
711		buf->dev_private = malloc(buf->dev_priv_size, M_DRM,
712		    M_NOWAIT | M_ZERO);
713		if (buf->dev_private == NULL) {
714			/* Set count correctly so we free the proper amount. */
715			entry->buf_count = count;
716			drm_cleanup_buf_error(dev, entry);
717			return DRM_ERR(ENOMEM);
718		}
719
720		DRM_DEBUG( "buffer %d @ %p\n",
721			   entry->buf_count, buf->address );
722
723		offset += alignment;
724		entry->buf_count++;
725		byte_count += PAGE_SIZE << page_order;
726	}
727
728	DRM_DEBUG( "byte_count: %d\n", byte_count );
729
730	temp_buflist = realloc(dma->buflist,
731	    (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist), M_DRM,
732	    M_NOWAIT);
733	if (temp_buflist == NULL) {
734		/* Free the entry because it isn't valid */
735		drm_cleanup_buf_error(dev, entry);
736		return DRM_ERR(ENOMEM);
737	}
738	dma->buflist = temp_buflist;
739
740	for ( i = 0 ; i < entry->buf_count ; i++ ) {
741		dma->buflist[i + dma->buf_count] = &entry->buflist[i];
742	}
743
744	dma->buf_count += entry->buf_count;
745	dma->byte_count += byte_count;
746
747	DRM_DEBUG( "dma->buf_count : %d\n", dma->buf_count );
748	DRM_DEBUG( "entry->buf_count : %d\n", entry->buf_count );
749
750	request->count = entry->buf_count;
751	request->size = size;
752
753	dma->flags = _DRM_DMA_USE_SG;
754
755	return 0;
756}
757
758int drm_addbufs(DRM_IOCTL_ARGS)
759{
760	DRM_DEVICE;
761	drm_buf_desc_t request;
762	int err;
763	int order;
764
765	DRM_COPY_FROM_USER_IOCTL( request, (drm_buf_desc_t *)data, sizeof(request) );
766
767	if (request.count < 0 || request.count > 4096)
768		return DRM_ERR(EINVAL);
769
770	order = drm_order(request.size);
771	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
772		return DRM_ERR(EINVAL);
773
774	DRM_SPINLOCK(&dev->dma_lock);
775	/* No more allocations after first buffer-using ioctl. */
776	if (dev->buf_use != 0) {
777		DRM_SPINUNLOCK(&dev->dma_lock);
778		return DRM_ERR(EBUSY);
779	}
780	/* No more than one allocation per order */
781	if (dev->dma->bufs[order].buf_count != 0) {
782		DRM_SPINUNLOCK(&dev->dma_lock);
783		return DRM_ERR(ENOMEM);
784	}
785
786	if ( request.flags & _DRM_AGP_BUFFER )
787		err = drm_addbufs_agp(dev, &request);
788	else
789	if ( request.flags & _DRM_SG_BUFFER )
790		err = drm_addbufs_sg(dev, &request);
791	else
792		err = drm_addbufs_pci(dev, &request);
793	DRM_SPINUNLOCK(&dev->dma_lock);
794
795	DRM_COPY_TO_USER_IOCTL((drm_buf_desc_t *)data, request, sizeof(request));
796
797	return err;
798}
799
800int drm_infobufs(DRM_IOCTL_ARGS)
801{
802	DRM_DEVICE;
803	drm_device_dma_t *dma = dev->dma;
804	drm_buf_info_t request;
805	int i;
806	int count;
807	int retcode = 0;
808
809	DRM_COPY_FROM_USER_IOCTL( request, (drm_buf_info_t *)data, sizeof(request) );
810
811	DRM_SPINLOCK(&dev->dma_lock);
812	++dev->buf_use;		/* Can't allocate more after this call */
813	DRM_SPINUNLOCK(&dev->dma_lock);
814
815	for ( i = 0, count = 0 ; i < DRM_MAX_ORDER + 1 ; i++ ) {
816		if ( dma->bufs[i].buf_count ) ++count;
817	}
818
819	DRM_DEBUG( "count = %d\n", count );
820
821	if ( request.count >= count ) {
822		for ( i = 0, count = 0 ; i < DRM_MAX_ORDER + 1 ; i++ ) {
823			if ( dma->bufs[i].buf_count ) {
824				drm_buf_desc_t from;
825
826				from.count = dma->bufs[i].buf_count;
827				from.size = dma->bufs[i].buf_size;
828				from.low_mark = dma->bufs[i].freelist.low_mark;
829				from.high_mark = dma->bufs[i].freelist.high_mark;
830
831				if (DRM_COPY_TO_USER(&request.list[count], &from,
832				    sizeof(drm_buf_desc_t)) != 0) {
833					retcode = DRM_ERR(EFAULT);
834					break;
835				}
836
837				DRM_DEBUG( "%d %d %d %d %d\n",
838					   i,
839					   dma->bufs[i].buf_count,
840					   dma->bufs[i].buf_size,
841					   dma->bufs[i].freelist.low_mark,
842					   dma->bufs[i].freelist.high_mark );
843				++count;
844			}
845		}
846	}
847	request.count = count;
848
849	DRM_COPY_TO_USER_IOCTL( (drm_buf_info_t *)data, request, sizeof(request) );
850
851	return retcode;
852}
853
854int drm_markbufs(DRM_IOCTL_ARGS)
855{
856	DRM_DEVICE;
857	drm_device_dma_t *dma = dev->dma;
858	drm_buf_desc_t request;
859	int order;
860
861	DRM_COPY_FROM_USER_IOCTL( request, (drm_buf_desc_t *)data, sizeof(request) );
862
863	DRM_DEBUG( "%d, %d, %d\n",
864		   request.size, request.low_mark, request.high_mark );
865
866
867	order = drm_order(request.size);
868	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ||
869	    request.low_mark < 0 || request.high_mark < 0) {
870		return DRM_ERR(EINVAL);
871	}
872
873	DRM_SPINLOCK(&dev->dma_lock);
874	if (request.low_mark > dma->bufs[order].buf_count ||
875	    request.high_mark > dma->bufs[order].buf_count) {
876		return DRM_ERR(EINVAL);
877	}
878
879	dma->bufs[order].freelist.low_mark  = request.low_mark;
880	dma->bufs[order].freelist.high_mark = request.high_mark;
881	DRM_SPINUNLOCK(&dev->dma_lock);
882
883	return 0;
884}
885
886int drm_freebufs(DRM_IOCTL_ARGS)
887{
888	DRM_DEVICE;
889	drm_device_dma_t *dma = dev->dma;
890	drm_buf_free_t request;
891	int i;
892	int idx;
893	drm_buf_t *buf;
894	int retcode = 0;
895
896	DRM_COPY_FROM_USER_IOCTL( request, (drm_buf_free_t *)data, sizeof(request) );
897
898	DRM_DEBUG( "%d\n", request.count );
899
900	DRM_SPINLOCK(&dev->dma_lock);
901	for ( i = 0 ; i < request.count ; i++ ) {
902		if (DRM_COPY_FROM_USER(&idx, &request.list[i], sizeof(idx))) {
903			retcode = DRM_ERR(EFAULT);
904			break;
905		}
906		if ( idx < 0 || idx >= dma->buf_count ) {
907			DRM_ERROR( "Index %d (of %d max)\n",
908				   idx, dma->buf_count - 1 );
909			retcode = DRM_ERR(EINVAL);
910			break;
911		}
912		buf = dma->buflist[idx];
913		if ( buf->filp != filp ) {
914			DRM_ERROR("Process %d freeing buffer not owned\n",
915				   DRM_CURRENTPID);
916			retcode = DRM_ERR(EINVAL);
917			break;
918		}
919		drm_free_buffer(dev, buf);
920	}
921	DRM_SPINUNLOCK(&dev->dma_lock);
922
923	return retcode;
924}
925
926int drm_mapbufs(DRM_IOCTL_ARGS)
927{
928	DRM_DEVICE;
929	drm_device_dma_t *dma = dev->dma;
930	int retcode = 0;
931	const int zero = 0;
932	vm_offset_t address;
933	struct vmspace *vms;
934#ifdef __FreeBSD__
935	vm_ooffset_t foff;
936	vm_size_t size;
937	vm_offset_t vaddr;
938#elif defined(__NetBSD__) || defined(__OpenBSD__)
939	struct vnode *vn;
940	vm_size_t size;
941	vaddr_t vaddr;
942#endif /* __NetBSD__ || __OpenBSD__ */
943
944	drm_buf_map_t request;
945	int i;
946
947	DRM_COPY_FROM_USER_IOCTL( request, (drm_buf_map_t *)data, sizeof(request) );
948
949#if defined(__NetBSD__) || defined(__OpenBSD__)
950	if (!vfinddev(kdev, VCHR, &vn))
951		return 0;	/* FIXME: Shouldn't this be EINVAL or something? */
952#endif /* __NetBSD__ || __OpenBSD */
953
954#if defined(__FreeBSD__) && __FreeBSD_version >= 500000
955	vms = p->td_proc->p_vmspace;
956#else
957	vms = p->p_vmspace;
958#endif
959
960	DRM_SPINLOCK(&dev->dma_lock);
961	dev->buf_use++;		/* Can't allocate more after this call */
962	DRM_SPINUNLOCK(&dev->dma_lock);
963
964	if (request.count < dma->buf_count)
965		goto done;
966
967	if ((dev->use_agp && (dma->flags & _DRM_DMA_USE_AGP)) ||
968	    (dev->use_sg && (dma->flags & _DRM_DMA_USE_SG))) {
969		drm_local_map_t *map = dev->agp_buffer_map;
970
971		if (map == NULL) {
972			retcode = EINVAL;
973			goto done;
974		}
975		size = round_page(map->size);
976		foff = map->offset;
977	} else {
978		size = round_page(dma->byte_count),
979		foff = 0;
980	}
981
982#ifdef __FreeBSD__
983	vaddr = round_page((vm_offset_t)vms->vm_daddr + MAXDSIZ);
984#if __FreeBSD_version >= 600023
985	retcode = vm_mmap(&vms->vm_map, &vaddr, size, PROT_READ | PROT_WRITE,
986	    VM_PROT_ALL, MAP_SHARED, OBJT_DEVICE, kdev, foff );
987#else
988	retcode = vm_mmap(&vms->vm_map, &vaddr, size, PROT_READ | PROT_WRITE,
989	    VM_PROT_ALL, MAP_SHARED, SLIST_FIRST(&kdev->si_hlist), foff );
990#endif
991#elif defined(__NetBSD__) || defined(__OpenBSD__)
992	vaddr = round_page((vaddr_t)vms->vm_daddr + MAXDSIZ);
993	retcode = uvm_mmap(&vms->vm_map, &vaddr, size,
994	    UVM_PROT_READ | UVM_PROT_WRITE, UVM_PROT_ALL, MAP_SHARED,
995	    &vn->v_uobj, foff, p->p_rlimit[RLIMIT_MEMLOCK].rlim_cur);
996#endif /* __NetBSD__ || __OpenBSD */
997	if (retcode)
998		goto done;
999
1000	request.virtual = (void *)vaddr;
1001
1002	for ( i = 0 ; i < dma->buf_count ; i++ ) {
1003		if (DRM_COPY_TO_USER(&request.list[i].idx,
1004		    &dma->buflist[i]->idx, sizeof(request.list[0].idx))) {
1005			retcode = EFAULT;
1006			goto done;
1007		}
1008		if (DRM_COPY_TO_USER(&request.list[i].total,
1009		    &dma->buflist[i]->total, sizeof(request.list[0].total))) {
1010			retcode = EFAULT;
1011			goto done;
1012		}
1013		if (DRM_COPY_TO_USER(&request.list[i].used, &zero,
1014		    sizeof(zero))) {
1015			retcode = EFAULT;
1016			goto done;
1017		}
1018		address = vaddr + dma->buflist[i]->offset; /* *** */
1019		if (DRM_COPY_TO_USER(&request.list[i].address, &address,
1020		    sizeof(address))) {
1021			retcode = EFAULT;
1022			goto done;
1023		}
1024	}
1025
1026 done:
1027	request.count = dma->buf_count;
1028
1029	DRM_DEBUG( "%d buffers, retcode = %d\n", request.count, retcode );
1030
1031	DRM_COPY_TO_USER_IOCTL((drm_buf_map_t *)data, request, sizeof(request));
1032
1033	return DRM_ERR(retcode);
1034}
1035