1/*-
2 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
3 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
20 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
21 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
23 * OTHER DEALINGS IN THE SOFTWARE.
24 *
25 * Authors:
26 *    Rickard E. (Rik) Faith <faith@valinux.com>
27 *    Gareth Hughes <gareth@valinux.com>
28 *
29 */
30
31/** @file drm_bufs.c
32 * Implementation of the ioctls for setup of DRM mappings and DMA buffers.
33 */
34
35#if defined(__FreeBSD__)
36#include "dev/pci/pcireg.h"
37#endif
38
39#include "drmP.h"
40
41/* Allocation of PCI memory resources (framebuffer, registers, etc.) for
42 * drm_get_resource_*.  Note that they are not RF_ACTIVE, so there's no virtual
43 * address for accessing them.  Cleaned up at unload.
44 */
45static int drm_alloc_resource(struct drm_device *dev, int resource)
46{
47#if defined(__FreeBSD__)
48	if (resource >= DRM_MAX_PCI_RESOURCE) {
49		DRM_ERROR("Resource %d too large\n", resource);
50		return 1;
51	}
52
53	DRM_UNLOCK();
54	if (dev->pcir[resource] != NULL) {
55		DRM_LOCK();
56		return 0;
57	}
58
59	dev->pcirid[resource] = PCIR_BAR(resource);
60	dev->pcir[resource] = bus_alloc_resource_any(dev->device,
61	    SYS_RES_MEMORY, &dev->pcirid[resource], RF_SHAREABLE);
62	DRM_LOCK();
63
64	if (dev->pcir[resource] == NULL) {
65		DRM_ERROR("Couldn't find resource 0x%x\n", resource);
66		return 1;
67	}
68#elif defined(__NetBSD__)
69	/* XXX This space _not_ intentionally left blank! */
70#endif
71
72	return 0;
73}
74
75unsigned long drm_get_resource_start(struct drm_device *dev,
76				     unsigned int resource)
77{
78	if (drm_alloc_resource(dev, resource) != 0)
79		return 0;
80
81#if defined(__FreeBSD__)
82	return rman_get_start(dev->pcir[resource]);
83#elif   defined(__NetBSD__)
84	return dev->pci_map_data[resource].base;
85#endif
86}
87
88unsigned long drm_get_resource_len(struct drm_device *dev,
89				   unsigned int resource)
90{
91	if (drm_alloc_resource(dev, resource) != 0)
92		return 0;
93
94#if defined(__FreeBSD__)
95	return rman_get_size(dev->pcir[resource]);
96#elif   defined(__NetBSD__)
97	return dev->pci_map_data[resource].size;
98#endif
99}
100
101int drm_addmap(struct drm_device * dev, unsigned long offset,
102	       unsigned long size,
103    enum drm_map_type type, enum drm_map_flags flags, drm_local_map_t **map_ptr)
104{
105	drm_local_map_t *map;
106	int align;
107	/*drm_agp_mem_t *entry;
108	int valid;*/
109
110	/* Only allow shared memory to be removable since we only keep enough
111	 * book keeping information about shared memory to allow for removal
112	 * when processes fork.
113	 */
114	if ((flags & _DRM_REMOVABLE) && type != _DRM_SHM) {
115		DRM_ERROR("Requested removable map for non-DRM_SHM\n");
116		return EINVAL;
117	}
118	if ((offset & PAGE_MASK) || (size & PAGE_MASK)) {
119		DRM_ERROR("offset/size not page aligned: 0x%lx/0x%lx\n",
120		    offset, size);
121		return EINVAL;
122	}
123	if (offset + size < offset) {
124		DRM_ERROR("offset and size wrap around: 0x%lx/0x%lx\n",
125		    offset, size);
126		return EINVAL;
127	}
128
129	DRM_DEBUG("offset = 0x%08lx, size = 0x%08lx, type = %d\n", offset,
130	    size, type);
131
132	/* Check if this is just another version of a kernel-allocated map, and
133	 * just hand that back if so.
134	 */
135	if (type == _DRM_REGISTERS || type == _DRM_FRAME_BUFFER ||
136	    type == _DRM_SHM) {
137		TAILQ_FOREACH(map, &dev->maplist, link) {
138			if (map->type == type && (map->offset == offset ||
139			    (map->type == _DRM_SHM &&
140			    map->flags == _DRM_CONTAINS_LOCK))) {
141				map->size = size;
142				DRM_DEBUG("Found kernel map %d\n", type);
143				goto done;
144			}
145		}
146	}
147	DRM_UNLOCK();
148
149	/* Allocate a new map structure, fill it in, and do any type-specific
150	 * initialization necessary.
151	 */
152	map = malloc(sizeof(*map), DRM_MEM_MAPS, M_ZERO | M_NOWAIT);
153	if (!map) {
154		DRM_LOCK();
155		return ENOMEM;
156	}
157
158	map->offset = offset;
159	map->size = size;
160	map->type = type;
161	map->flags = flags;
162#if defined(__NetBSD__)
163	map->fullmap = NULL;
164	map->mapsize = 0;
165#endif
166
167	switch (map->type) {
168	case _DRM_REGISTERS:
169		map->handle = drm_ioremap(dev, map);
170		if (map->handle == NULL) {
171			DRM_ERROR("drm_addmap couldn't ioremap registers with "
172				"base %lX, size %lX\n",
173				(long) offset, (long) size);
174			DRM_LOCK();
175			return EINVAL;
176		}
177
178		if (!(map->flags & _DRM_WRITE_COMBINING))
179			break;
180
181		/* FALLTHROUGH */
182	case _DRM_FRAME_BUFFER:
183		if (drm_mtrr_add(map->offset, map->size, DRM_MTRR_WC) == 0)
184			map->mtrr = 1;
185		break;
186	case _DRM_SHM:
187		map->handle = malloc(map->size, DRM_MEM_MAPS, M_NOWAIT);
188		DRM_DEBUG("%lu %d %p\n",
189		    map->size, drm_order(map->size), map->handle);
190		if (!map->handle) {
191			free(map, DRM_MEM_MAPS);
192			DRM_LOCK();
193			return ENOMEM;
194		}
195		map->offset = (unsigned long)map->handle;
196		if (map->flags & _DRM_CONTAINS_LOCK) {
197			/* Prevent a 2nd X Server from creating a 2nd lock */
198			DRM_LOCK();
199			if (dev->lock.hw_lock != NULL) {
200				DRM_UNLOCK();
201				free(map->handle, DRM_MEM_MAPS);
202				free(map, DRM_MEM_MAPS);
203				return EBUSY;
204			}
205			dev->lock.hw_lock = map->handle; /* Pointer to lock */
206			DRM_UNLOCK();
207		}
208		break;
209	case _DRM_AGP:
210		/*valid = 0;*/
211		/* In some cases (i810 driver), user space may have already
212		 * added the AGP base itself, because dev->agp->base previously
213		 * only got set during AGP enable.  So, only add the base
214		 * address if the map's offset isn't already within the
215		 * aperture.
216		 */
217		if (map->offset < dev->agp->base ||
218		    map->offset > dev->agp->base +
219		    dev->agp->info.ai_aperture_size - 1) {
220			map->offset += dev->agp->base;
221		}
222		map->mtrr   = dev->agp->mtrr; /* for getmap */
223		/*for (entry = dev->agp->memory; entry; entry = entry->next) {
224			if ((map->offset >= entry->bound) &&
225			    (map->offset + map->size <=
226			    entry->bound + entry->pages * PAGE_SIZE)) {
227				valid = 1;
228				break;
229			}
230		}
231		if (!valid) {
232			free(map, DRM_MEM_MAPS);
233			DRM_LOCK();
234			return EACCES;
235		}*/
236		break;
237	case _DRM_SCATTER_GATHER:
238		if (!dev->sg) {
239			free(map, DRM_MEM_MAPS);
240			DRM_LOCK();
241			return EINVAL;
242		}
243		map->offset += dev->sg->handle;
244		break;
245	case _DRM_CONSISTENT:
246		/* Unfortunately, we don't get any alignment specification from
247		 * the caller, so we have to guess.  drm_pci_alloc requires
248		 * a power-of-two alignment, so try to align the bus address of
249		 * the map to it size if possible, otherwise just assume
250		 * PAGE_SIZE alignment.
251		 */
252		align = map->size;
253		if ((align & (align - 1)) != 0)
254			align = PAGE_SIZE;
255		map->dmah = drm_pci_alloc(dev, map->size, align, 0xfffffffful);
256		if (map->dmah == NULL) {
257			free(map, DRM_MEM_MAPS);
258			DRM_LOCK();
259			return ENOMEM;
260		}
261		map->handle = map->dmah->vaddr;
262		map->offset = map->dmah->busaddr;
263		break;
264	default:
265		DRM_ERROR("Bad map type %d\n", map->type);
266		free(map, DRM_MEM_MAPS);
267		DRM_LOCK();
268		return EINVAL;
269	}
270
271	DRM_LOCK();
272	TAILQ_INSERT_TAIL(&dev->maplist, map, link);
273
274done:
275	/* Jumped to, with lock held, when a kernel map is found. */
276
277	DRM_DEBUG("Added map %d 0x%lx/0x%lx\n", map->type, map->offset,
278	    map->size);
279
280	*map_ptr = map;
281
282	return 0;
283}
284
285int drm_addmap_ioctl(struct drm_device *dev, void *data,
286		     struct drm_file *file_priv)
287{
288	struct drm_map *request = data;
289	drm_local_map_t *map;
290	int err;
291
292	if (!(dev->flags & (FREAD|FWRITE)))
293		return EACCES; /* Require read/write */
294
295	if (!DRM_SUSER(DRM_CURPROC) && request->type != _DRM_AGP)
296		return EACCES;
297
298	DRM_LOCK();
299	err = drm_addmap(dev, request->offset, request->size, request->type,
300	    request->flags, &map);
301	DRM_UNLOCK();
302	if (err != 0)
303		return err;
304
305	request->offset = map->offset;
306	request->size = map->size;
307	request->type = map->type;
308	request->flags = map->flags;
309	request->mtrr   = map->mtrr;
310	request->handle = map->handle;
311
312	if (request->type != _DRM_SHM) {
313		request->handle = (void *)request->offset;
314	}
315
316	return 0;
317}
318
319static void
320drm_rmmap_user(void *addr, size_t size)
321{
322	vaddr_t va, eva;
323	paddr_t pa;
324	struct vm_page *pg;
325
326	va = (vaddr_t)addr;
327	eva = va + size;
328	for (; va < eva; va += PAGE_SIZE) {
329		pmap_extract(pmap_kernel(), va, &pa);
330		pg = PHYS_TO_VM_PAGE(pa);
331		pmap_page_protect(pg, VM_PROT_NONE);
332	}
333}
334
335void drm_rmmap(struct drm_device *dev, drm_local_map_t *map)
336{
337	DRM_SPINLOCK_ASSERT(&dev->dev_lock);
338
339	TAILQ_REMOVE(&dev->maplist, map, link);
340
341	switch (map->type) {
342	case _DRM_REGISTERS:
343#if defined(__FreeBSD__)
344		if (map->bsr == NULL)
345#endif
346			drm_ioremapfree(map);
347		/* FALLTHROUGH */
348	case _DRM_FRAME_BUFFER:
349		if (map->mtrr) {
350			int __unused retcode;
351			retcode = drm_mtrr_del(0, map->offset, map->size,
352			    DRM_MTRR_WC);
353			DRM_DEBUG("mtrr_del = %d\n", retcode);
354		}
355		break;
356	case _DRM_SHM:
357
358		/*
359		 * Remove any user mappings before we free the kernel memory.
360		 */
361		drm_rmmap_user(map->handle, map->size);
362		free(map->handle, DRM_MEM_MAPS);
363		break;
364	case _DRM_AGP:
365	case _DRM_SCATTER_GATHER:
366		break;
367	case _DRM_CONSISTENT:
368		drm_pci_free(dev, map->dmah);
369		break;
370	default:
371		DRM_ERROR("Bad map type %d\n", map->type);
372		break;
373	}
374
375#if defined(__FreeBSD__)
376	if (map->bsr != NULL) {
377		bus_release_resource(dev->device, SYS_RES_MEMORY, map->rid,
378		    map->bsr);
379	}
380#endif
381
382	free(map, DRM_MEM_MAPS);
383}
384
385/* Remove a map private from list and deallocate resources if the mapping
386 * isn't in use.
387 */
388
389int drm_rmmap_ioctl(struct drm_device *dev, void *data,
390		    struct drm_file *file_priv)
391{
392	drm_local_map_t *map;
393	struct drm_map *request = data;
394
395	DRM_LOCK();
396	TAILQ_FOREACH(map, &dev->maplist, link) {
397		if (map->handle == request->handle &&
398		    map->flags & _DRM_REMOVABLE)
399			break;
400	}
401
402	/* No match found. */
403	if (map == NULL) {
404		DRM_UNLOCK();
405		return EINVAL;
406	}
407
408	drm_rmmap(dev, map);
409
410	DRM_UNLOCK();
411
412	return 0;
413}
414
415
416static void drm_cleanup_buf_error(struct drm_device *dev,
417				  drm_buf_entry_t *entry)
418{
419	int i;
420
421	if (entry->seg_count) {
422		for (i = 0; i < entry->seg_count; i++) {
423			drm_pci_free(dev, entry->seglist[i]);
424		}
425		free(entry->seglist, DRM_MEM_SEGS);
426
427		entry->seg_count = 0;
428	}
429
430   	if (entry->buf_count) {
431	   	for (i = 0; i < entry->buf_count; i++) {
432			free(entry->buflist[i].dev_private, DRM_MEM_BUFS);
433		}
434		free(entry->buflist, DRM_MEM_BUFS);
435
436		entry->buf_count = 0;
437	}
438}
439
440static int drm_do_addbufs_agp(struct drm_device *dev, struct drm_buf_desc *request)
441{
442	drm_device_dma_t *dma = dev->dma;
443	drm_buf_entry_t *entry;
444	/*drm_agp_mem_t *agp_entry;
445	int valid*/
446	drm_buf_t *buf;
447	unsigned long offset;
448	unsigned long agp_offset;
449	int count;
450	int order;
451	int size;
452	int alignment;
453	int page_order;
454	int total;
455	int byte_count;
456	int i;
457	drm_buf_t **temp_buflist;
458
459	count = request->count;
460	order = drm_order(request->size);
461	size = 1 << order;
462
463	alignment  = (request->flags & _DRM_PAGE_ALIGN)
464	    ? round_page(size) : size;
465	page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
466	total = PAGE_SIZE << page_order;
467
468	byte_count = 0;
469	agp_offset = dev->agp->base + request->agp_start;
470
471	DRM_DEBUG("count:      %d\n",  count);
472	DRM_DEBUG("order:      %d\n",  order);
473	DRM_DEBUG("size:       %d\n",  size);
474	DRM_DEBUG("agp_offset: 0x%lx\n", agp_offset);
475	DRM_DEBUG("alignment:  %d\n",  alignment);
476	DRM_DEBUG("page_order: %d\n",  page_order);
477	DRM_DEBUG("total:      %d\n",  total);
478
479	/* Make sure buffers are located in AGP memory that we own */
480	/* Breaks MGA due to drm_alloc_agp not setting up entries for the
481	 * memory.  Safe to ignore for now because these ioctls are still
482	 * root-only.
483	 */
484	/*valid = 0;
485	for (agp_entry = dev->agp->memory; agp_entry;
486	    agp_entry = agp_entry->next) {
487		if ((agp_offset >= agp_entry->bound) &&
488		    (agp_offset + total * count <=
489		    agp_entry->bound + agp_entry->pages * PAGE_SIZE)) {
490			valid = 1;
491			break;
492		}
493	}
494	if (!valid) {
495		DRM_DEBUG("zone invalid\n");
496		return EINVAL;
497	}*/
498
499	entry = &dma->bufs[order];
500
501	entry->buflist = malloc(count * sizeof(*entry->buflist), DRM_MEM_BUFS,
502	    M_NOWAIT | M_ZERO);
503	if (!entry->buflist) {
504		return ENOMEM;
505	}
506
507	entry->buf_size = size;
508	entry->page_order = page_order;
509
510	offset = 0;
511
512	while (entry->buf_count < count) {
513		buf          = &entry->buflist[entry->buf_count];
514		buf->idx     = dma->buf_count + entry->buf_count;
515		buf->total   = alignment;
516		buf->order   = order;
517		buf->used    = 0;
518
519		buf->offset  = (dma->byte_count + offset);
520		buf->bus_address = agp_offset + offset;
521		buf->address = (void *)(agp_offset + offset);
522		buf->next    = NULL;
523		buf->pending = 0;
524		buf->file_priv = NULL;
525
526		buf->dev_priv_size = dev->driver->buf_priv_size;
527		buf->dev_private = malloc(buf->dev_priv_size, DRM_MEM_BUFS,
528		    M_NOWAIT | M_ZERO);
529		if (buf->dev_private == NULL) {
530			/* Set count correctly so we free the proper amount. */
531			entry->buf_count = count;
532			drm_cleanup_buf_error(dev, entry);
533			return ENOMEM;
534		}
535
536		offset += alignment;
537		entry->buf_count++;
538		byte_count += PAGE_SIZE << page_order;
539	}
540
541	DRM_DEBUG("byte_count: %d\n", byte_count);
542
543	temp_buflist = realloc(dma->buflist,
544	    (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist),
545	    DRM_MEM_BUFS, M_NOWAIT);
546	if (temp_buflist == NULL) {
547		/* Free the entry because it isn't valid */
548		drm_cleanup_buf_error(dev, entry);
549		return ENOMEM;
550	}
551	dma->buflist = temp_buflist;
552
553	for (i = 0; i < entry->buf_count; i++) {
554		dma->buflist[i + dma->buf_count] = &entry->buflist[i];
555	}
556
557	dma->buf_count += entry->buf_count;
558	dma->byte_count += byte_count;
559
560	DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
561	DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
562
563	request->count = entry->buf_count;
564	request->size = size;
565
566	dma->flags = _DRM_DMA_USE_AGP;
567
568	return 0;
569}
570
571static int drm_do_addbufs_pci(struct drm_device *dev, struct drm_buf_desc *request)
572{
573	drm_device_dma_t *dma = dev->dma;
574	int count;
575	int order;
576	int size;
577	int total;
578	int page_order;
579	drm_buf_entry_t *entry;
580	drm_buf_t *buf;
581	int alignment;
582	unsigned long offset;
583	int i;
584	int byte_count;
585	int page_count;
586	unsigned long *temp_pagelist;
587	drm_buf_t **temp_buflist;
588
589	count = request->count;
590	order = drm_order(request->size);
591	size = 1 << order;
592
593	DRM_DEBUG("count=%d, size=%d (%d), order=%d\n",
594	    request->count, request->size, size, order);
595
596	alignment = (request->flags & _DRM_PAGE_ALIGN)
597	    ? round_page(size) : size;
598	page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
599	total = PAGE_SIZE << page_order;
600
601	entry = &dma->bufs[order];
602
603	entry->buflist = malloc(count * sizeof(*entry->buflist), DRM_MEM_BUFS,
604	    M_NOWAIT | M_ZERO);
605	entry->seglist = malloc(count * sizeof(*entry->seglist), DRM_MEM_SEGS,
606	    M_NOWAIT | M_ZERO);
607
608	/* Keep the original pagelist until we know all the allocations
609	 * have succeeded
610	 */
611	temp_pagelist = malloc((dma->page_count + (count << page_order)) *
612	    sizeof(*dma->pagelist), DRM_MEM_PAGES, M_NOWAIT);
613
614	if (entry->buflist == NULL || entry->seglist == NULL ||
615	    temp_pagelist == NULL) {
616		if (temp_pagelist)
617			free(temp_pagelist, DRM_MEM_PAGES);
618		if (entry->seglist)
619			free(entry->seglist, DRM_MEM_SEGS);
620		if (entry->buflist)
621			free(entry->buflist, DRM_MEM_BUFS);
622		return ENOMEM;
623	}
624
625	memcpy(temp_pagelist, dma->pagelist, dma->page_count *
626	    sizeof(*dma->pagelist));
627
628	DRM_DEBUG("pagelist: %d entries\n",
629	    dma->page_count + (count << page_order));
630
631	entry->buf_size	= size;
632	entry->page_order = page_order;
633	byte_count = 0;
634	page_count = 0;
635
636	while (entry->buf_count < count) {
637		DRM_SPINUNLOCK(&dev->dma_lock);
638		drm_dma_handle_t *dmah = drm_pci_alloc(dev, size, alignment,
639		    0xfffffffful);
640		DRM_SPINLOCK(&dev->dma_lock);
641		if (dmah == NULL) {
642			/* Set count correctly so we free the proper amount. */
643			entry->buf_count = count;
644			entry->seg_count = count;
645			drm_cleanup_buf_error(dev, entry);
646			free(temp_pagelist, DRM_MEM_PAGES);
647			return ENOMEM;
648		}
649
650		entry->seglist[entry->seg_count++] = dmah;
651		for (i = 0; i < (1 << page_order); i++) {
652			DRM_DEBUG("page %d @ %p\n",
653			    dma->page_count + page_count,
654			    (char *)dmah->vaddr + PAGE_SIZE * i);
655			temp_pagelist[dma->page_count + page_count++] =
656			    (long)dmah->vaddr + PAGE_SIZE * i;
657		}
658		for (offset = 0;
659		    offset + size <= total && entry->buf_count < count;
660		    offset += alignment, ++entry->buf_count) {
661			buf	     = &entry->buflist[entry->buf_count];
662			buf->idx     = dma->buf_count + entry->buf_count;
663			buf->total   = alignment;
664			buf->order   = order;
665			buf->used    = 0;
666			buf->offset  = (dma->byte_count + byte_count + offset);
667			buf->address = ((char *)dmah->vaddr + offset);
668			buf->bus_address = dmah->busaddr + offset;
669			buf->next    = NULL;
670			buf->pending = 0;
671			buf->file_priv = NULL;
672
673			buf->dev_priv_size = dev->driver->buf_priv_size;
674			buf->dev_private = malloc(buf->dev_priv_size,
675			    DRM_MEM_BUFS, M_NOWAIT | M_ZERO);
676			if (buf->dev_private == NULL) {
677				/* Set count correctly so we free the proper amount. */
678				entry->buf_count = count;
679				entry->seg_count = count;
680				drm_cleanup_buf_error(dev, entry);
681				free(temp_pagelist, DRM_MEM_PAGES);
682				return ENOMEM;
683			}
684
685			DRM_DEBUG("buffer %d @ %p\n",
686			    entry->buf_count, buf->address);
687		}
688		byte_count += PAGE_SIZE << page_order;
689	}
690
691	temp_buflist = realloc(dma->buflist,
692	    (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist),
693	    DRM_MEM_BUFS, M_NOWAIT);
694	if (temp_buflist == NULL) {
695		/* Free the entry because it isn't valid */
696		drm_cleanup_buf_error(dev, entry);
697		free(temp_pagelist, DRM_MEM_PAGES);
698		return ENOMEM;
699	}
700	dma->buflist = temp_buflist;
701
702	for (i = 0; i < entry->buf_count; i++) {
703		dma->buflist[i + dma->buf_count] = &entry->buflist[i];
704	}
705
706	/* No allocations failed, so now we can replace the orginal pagelist
707	 * with the new one.
708	 */
709	free(dma->pagelist, DRM_MEM_PAGES);
710	dma->pagelist = temp_pagelist;
711
712	dma->buf_count += entry->buf_count;
713	dma->seg_count += entry->seg_count;
714	dma->page_count += entry->seg_count << page_order;
715	dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
716
717	request->count = entry->buf_count;
718	request->size = size;
719
720	return 0;
721
722}
723
724static int drm_do_addbufs_sg(struct drm_device *dev, struct drm_buf_desc *request)
725{
726	drm_device_dma_t *dma = dev->dma;
727	drm_buf_entry_t *entry;
728	drm_buf_t *buf;
729	unsigned long offset;
730	unsigned long agp_offset;
731	int count;
732	int order;
733	int size;
734	int alignment;
735	int page_order;
736	int total;
737	int byte_count;
738	int i;
739	drm_buf_t **temp_buflist;
740
741	count = request->count;
742	order = drm_order(request->size);
743	size = 1 << order;
744
745	alignment  = (request->flags & _DRM_PAGE_ALIGN)
746	    ? round_page(size) : size;
747	page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
748	total = PAGE_SIZE << page_order;
749
750	byte_count = 0;
751	agp_offset = request->agp_start;
752
753	DRM_DEBUG("count:      %d\n",  count);
754	DRM_DEBUG("order:      %d\n",  order);
755	DRM_DEBUG("size:       %d\n",  size);
756	DRM_DEBUG("agp_offset: %ld\n", agp_offset);
757	DRM_DEBUG("alignment:  %d\n",  alignment);
758	DRM_DEBUG("page_order: %d\n",  page_order);
759	DRM_DEBUG("total:      %d\n",  total);
760
761	entry = &dma->bufs[order];
762
763	entry->buflist = malloc(count * sizeof(*entry->buflist), DRM_MEM_BUFS,
764	    M_NOWAIT | M_ZERO);
765	if (entry->buflist == NULL)
766		return ENOMEM;
767
768	entry->buf_size = size;
769	entry->page_order = page_order;
770
771	offset = 0;
772
773	while (entry->buf_count < count) {
774		buf          = &entry->buflist[entry->buf_count];
775		buf->idx     = dma->buf_count + entry->buf_count;
776		buf->total   = alignment;
777		buf->order   = order;
778		buf->used    = 0;
779
780		buf->offset  = (dma->byte_count + offset);
781		buf->bus_address = agp_offset + offset;
782		buf->address = (void *)(agp_offset + offset + dev->sg->handle);
783		buf->next    = NULL;
784		buf->pending = 0;
785		buf->file_priv = NULL;
786
787		buf->dev_priv_size = dev->driver->buf_priv_size;
788		buf->dev_private = malloc(buf->dev_priv_size, DRM_MEM_BUFS,
789		    M_NOWAIT | M_ZERO);
790		if (buf->dev_private == NULL) {
791			/* Set count correctly so we free the proper amount. */
792			entry->buf_count = count;
793			drm_cleanup_buf_error(dev, entry);
794			return ENOMEM;
795		}
796
797		DRM_DEBUG("buffer %d @ %p\n",
798		    entry->buf_count, buf->address);
799
800		offset += alignment;
801		entry->buf_count++;
802		byte_count += PAGE_SIZE << page_order;
803	}
804
805	DRM_DEBUG("byte_count: %d\n", byte_count);
806
807	temp_buflist = realloc(dma->buflist,
808	    (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist),
809	    DRM_MEM_BUFS, M_NOWAIT);
810	if (temp_buflist == NULL) {
811		/* Free the entry because it isn't valid */
812		drm_cleanup_buf_error(dev, entry);
813		return ENOMEM;
814	}
815	dma->buflist = temp_buflist;
816
817	for (i = 0; i < entry->buf_count; i++) {
818		dma->buflist[i + dma->buf_count] = &entry->buflist[i];
819	}
820
821	dma->buf_count += entry->buf_count;
822	dma->byte_count += byte_count;
823
824	DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
825	DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
826
827	request->count = entry->buf_count;
828	request->size = size;
829
830	dma->flags = _DRM_DMA_USE_SG;
831
832	return 0;
833}
834
835int drm_addbufs_agp(struct drm_device *dev, struct drm_buf_desc *request)
836{
837	int order, ret;
838
839	if (request->count < 0 || request->count > 4096)
840		return EINVAL;
841
842	order = drm_order(request->size);
843	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
844		return EINVAL;
845
846	DRM_SPINLOCK(&dev->dma_lock);
847
848	/* No more allocations after first buffer-using ioctl. */
849	if (dev->buf_use != 0) {
850		DRM_SPINUNLOCK(&dev->dma_lock);
851		return EBUSY;
852	}
853	/* No more than one allocation per order */
854	if (dev->dma->bufs[order].buf_count != 0) {
855		DRM_SPINUNLOCK(&dev->dma_lock);
856		return ENOMEM;
857	}
858
859	ret = drm_do_addbufs_agp(dev, request);
860
861	DRM_SPINUNLOCK(&dev->dma_lock);
862
863	return ret;
864}
865
866int drm_addbufs_sg(struct drm_device *dev, struct drm_buf_desc *request)
867{
868	int order, ret;
869
870	if (!DRM_SUSER(DRM_CURPROC))
871		return EACCES;
872
873	if (request->count < 0 || request->count > 4096)
874		return EINVAL;
875
876	order = drm_order(request->size);
877	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
878		return EINVAL;
879
880	DRM_SPINLOCK(&dev->dma_lock);
881
882	/* No more allocations after first buffer-using ioctl. */
883	if (dev->buf_use != 0) {
884		DRM_SPINUNLOCK(&dev->dma_lock);
885		return EBUSY;
886	}
887	/* No more than one allocation per order */
888	if (dev->dma->bufs[order].buf_count != 0) {
889		DRM_SPINUNLOCK(&dev->dma_lock);
890		return ENOMEM;
891	}
892
893	ret = drm_do_addbufs_sg(dev, request);
894
895	DRM_SPINUNLOCK(&dev->dma_lock);
896
897	return ret;
898}
899
900int drm_addbufs_pci(struct drm_device *dev, struct drm_buf_desc *request)
901{
902	int order, ret;
903
904	if (!DRM_SUSER(DRM_CURPROC))
905		return EACCES;
906
907	if (request->count < 0 || request->count > 4096)
908		return EINVAL;
909
910	order = drm_order(request->size);
911	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
912		return EINVAL;
913
914	DRM_SPINLOCK(&dev->dma_lock);
915
916	/* No more allocations after first buffer-using ioctl. */
917	if (dev->buf_use != 0) {
918		DRM_SPINUNLOCK(&dev->dma_lock);
919		return EBUSY;
920	}
921	/* No more than one allocation per order */
922	if (dev->dma->bufs[order].buf_count != 0) {
923		DRM_SPINUNLOCK(&dev->dma_lock);
924		return ENOMEM;
925	}
926
927	ret = drm_do_addbufs_pci(dev, request);
928
929	DRM_SPINUNLOCK(&dev->dma_lock);
930
931	return ret;
932}
933
934int drm_addbufs(struct drm_device *dev, void *data, struct drm_file *file_priv)
935{
936	struct drm_buf_desc *request = data;
937	int err;
938
939	if (request->flags & _DRM_AGP_BUFFER)
940		err = drm_addbufs_agp(dev, request);
941	else if (request->flags & _DRM_SG_BUFFER)
942		err = drm_addbufs_sg(dev, request);
943	else
944		err = drm_addbufs_pci(dev, request);
945
946	return err;
947}
948
949int drm_infobufs(struct drm_device *dev, void *data, struct drm_file *file_priv)
950{
951	drm_device_dma_t *dma = dev->dma;
952	struct drm_buf_info *request = data;
953	int i;
954	int count;
955	int retcode = 0;
956
957	DRM_SPINLOCK(&dev->dma_lock);
958	++dev->buf_use;		/* Can't allocate more after this call */
959	DRM_SPINUNLOCK(&dev->dma_lock);
960
961	for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
962		if (dma->bufs[i].buf_count)
963			++count;
964	}
965
966	DRM_DEBUG("count = %d\n", count);
967
968	if (request->count >= count) {
969		for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
970			if (dma->bufs[i].buf_count) {
971				struct drm_buf_desc from;
972
973				from.count = dma->bufs[i].buf_count;
974				from.size = dma->bufs[i].buf_size;
975				from.low_mark = dma->bufs[i].freelist.low_mark;
976				from.high_mark = dma->bufs[i].freelist.high_mark;
977
978				if (DRM_COPY_TO_USER(&request->list[count], &from,
979				    sizeof(struct drm_buf_desc)) != 0) {
980					retcode = EFAULT;
981					break;
982				}
983
984				DRM_DEBUG("%d %d %d %d %d\n",
985				    i, dma->bufs[i].buf_count,
986				    dma->bufs[i].buf_size,
987				    dma->bufs[i].freelist.low_mark,
988				    dma->bufs[i].freelist.high_mark);
989				++count;
990			}
991		}
992	}
993	request->count = count;
994
995	return retcode;
996}
997
998int drm_markbufs(struct drm_device *dev, void *data, struct drm_file *file_priv)
999{
1000	drm_device_dma_t *dma = dev->dma;
1001	struct drm_buf_desc *request = data;
1002	int order;
1003
1004	DRM_DEBUG("%d, %d, %d\n",
1005		  request->size, request->low_mark, request->high_mark);
1006
1007
1008	order = drm_order(request->size);
1009	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ||
1010	    request->low_mark < 0 || request->high_mark < 0) {
1011		return EINVAL;
1012	}
1013
1014	DRM_SPINLOCK(&dev->dma_lock);
1015	if (request->low_mark > dma->bufs[order].buf_count ||
1016	    request->high_mark > dma->bufs[order].buf_count) {
1017		DRM_SPINUNLOCK(&dev->dma_lock);
1018		return EINVAL;
1019	}
1020
1021	dma->bufs[order].freelist.low_mark  = request->low_mark;
1022	dma->bufs[order].freelist.high_mark = request->high_mark;
1023	DRM_SPINUNLOCK(&dev->dma_lock);
1024
1025	return 0;
1026}
1027
1028int drm_freebufs(struct drm_device *dev, void *data, struct drm_file *file_priv)
1029{
1030	drm_device_dma_t *dma = dev->dma;
1031	struct drm_buf_free *request = data;
1032	int i;
1033	int idx;
1034	drm_buf_t *buf;
1035	int retcode = 0;
1036
1037	DRM_DEBUG("%d\n", request->count);
1038
1039	DRM_SPINLOCK(&dev->dma_lock);
1040	for (i = 0; i < request->count; i++) {
1041		if (DRM_COPY_FROM_USER(&idx, &request->list[i], sizeof(idx))) {
1042			retcode = EFAULT;
1043			break;
1044		}
1045		if (idx < 0 || idx >= dma->buf_count) {
1046			DRM_ERROR("Index %d (of %d max)\n",
1047			    idx, dma->buf_count - 1);
1048			retcode = EINVAL;
1049			break;
1050		}
1051		buf = dma->buflist[idx];
1052		if (buf->file_priv != file_priv) {
1053			DRM_ERROR("Process %d freeing buffer not owned\n",
1054			    DRM_CURRENTPID);
1055			retcode = EINVAL;
1056			break;
1057		}
1058		drm_free_buffer(dev, buf);
1059	}
1060	DRM_SPINUNLOCK(&dev->dma_lock);
1061
1062	return retcode;
1063}
1064
1065int drm_mapbufs(struct drm_device *dev, void *data, struct drm_file *file_priv)
1066{
1067	drm_device_dma_t *dma = dev->dma;
1068	int retcode = 0;
1069	const int zero = 0;
1070	vm_offset_t address;
1071	struct vmspace *vms;
1072#if defined(__FreeBSD__)
1073	vm_ooffset_t foff;
1074	vm_size_t size;
1075	vm_offset_t vaddr;
1076#elif   defined(__NetBSD__)
1077	struct vnode *vn;
1078	voff_t foff;
1079	vsize_t size, rsize;
1080	vaddr_t vaddr;
1081#endif
1082	struct drm_buf_map *request = data;
1083	int i;
1084
1085#if defined(__NetBSD__)
1086	if (!vfinddev(dev->kdev, VCHR, &vn))
1087		return 0;	/* FIXME: Shouldn't this be EINVAL or something? */
1088#endif /* __NetBSD__ || __OpenBSD */
1089
1090#if defined(__FreeBSD__)
1091	vms = DRM_CURPROC->td_proc->p_vmspace;
1092#elif   defined(__NetBSD__)
1093	vms = DRM_CURPROC->p_vmspace;
1094#endif
1095
1096	DRM_SPINLOCK(&dev->dma_lock);
1097	dev->buf_use++;		/* Can't allocate more after this call */
1098	DRM_SPINUNLOCK(&dev->dma_lock);
1099
1100	if (request->count < dma->buf_count)
1101		goto done;
1102
1103	if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP)) ||
1104	    (drm_core_check_feature(dev, DRIVER_SG) &&
1105	    (dma->flags & _DRM_DMA_USE_SG))) {
1106		drm_local_map_t *map = dev->agp_buffer_map;
1107
1108		if (map == NULL) {
1109			retcode = EINVAL;
1110			goto done;
1111		}
1112		size = round_page(map->size);
1113		foff = map->offset;
1114	} else {
1115		size = round_page(dma->byte_count),
1116		foff = 0;
1117	}
1118
1119#if defined(__FreeBSD__)
1120	vaddr = round_page((vm_offset_t)vms->vm_daddr + MAXDSIZ);
1121#if __FreeBSD_version >= 600023
1122	retcode = vm_mmap(&vms->vm_map, &vaddr, size, PROT_READ | PROT_WRITE,
1123	    VM_PROT_ALL, MAP_SHARED | MAP_NOSYNC, OBJT_DEVICE, dev->devnode, foff);
1124#else
1125	retcode = vm_mmap(&vms->vm_map, &vaddr, size, PROT_READ | PROT_WRITE,
1126	    VM_PROT_ALL, MAP_SHARED | MAP_NOSYNC, SLIST_FIRST(&dev->devnode->si_hlist),
1127	    foff);
1128#endif
1129#elif   defined(__NetBSD__)
1130	/* XXXNETBSD */
1131	vaddr = curlwp->l_proc->p_emul->e_vm_default_addr(curlwp->l_proc,
1132	    (vaddr_t)vms->vm_daddr, size);
1133	rsize = round_page(size);
1134	DRM_DEBUG("mmap %#lx/%#lx foff %#llx\n", vaddr, rsize, (long long)foff);
1135	retcode = uvm_mmap(&vms->vm_map, &vaddr, rsize,
1136	    UVM_PROT_READ | UVM_PROT_WRITE, UVM_PROT_ALL, MAP_SHARED,
1137	    &vn->v_uobj, foff, curproc->p_rlimit[RLIMIT_MEMLOCK].rlim_cur);
1138#endif
1139	if (retcode)
1140		goto done;
1141
1142	request->virtual = (void *)vaddr;
1143
1144	for (i = 0; i < dma->buf_count; i++) {
1145		if (DRM_COPY_TO_USER(&request->list[i].idx,
1146		    &dma->buflist[i]->idx, sizeof(request->list[0].idx))) {
1147			retcode = EFAULT;
1148			goto done;
1149		}
1150		if (DRM_COPY_TO_USER(&request->list[i].total,
1151		    &dma->buflist[i]->total, sizeof(request->list[0].total))) {
1152			retcode = EFAULT;
1153			goto done;
1154		}
1155		if (DRM_COPY_TO_USER(&request->list[i].used, &zero,
1156		    sizeof(zero))) {
1157			retcode = EFAULT;
1158			goto done;
1159		}
1160		address = vaddr + dma->buflist[i]->offset; /* *** */
1161		if (DRM_COPY_TO_USER(&request->list[i].address, &address,
1162		    sizeof(address))) {
1163			retcode = EFAULT;
1164			goto done;
1165		}
1166	}
1167
1168 done:
1169	request->count = dma->buf_count;
1170#if defined(__NetBSD__)
1171	vrele(vn);
1172#endif
1173
1174	DRM_DEBUG("%d buffers, retcode = %d\n", request->count, retcode);
1175
1176	return retcode;
1177}
1178
1179/*
1180 * Compute order.  Can be made faster.
1181 */
1182int drm_order(unsigned long size)
1183{
1184#if defined(__FreeBSD__)
1185	int order;
1186
1187	if (size == 0)
1188		return 0;
1189
1190	order = flsl(size) - 1;
1191	if (size & ~(1ul << order))
1192		++order;
1193
1194	return order;
1195#elif   defined(__NetBSD__)
1196	int order;
1197	unsigned long tmp;
1198
1199	for ( order = 0, tmp = size ; tmp >>= 1 ; ++order );
1200
1201	if ( size & ~(1 << order) )
1202		++order;
1203
1204	return order;
1205#endif
1206}
1207