drm_bufs.c revision 196464
1/*-
2 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
3 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
20 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
21 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
23 * OTHER DEALINGS IN THE SOFTWARE.
24 *
25 * Authors:
26 *    Rickard E. (Rik) Faith <faith@valinux.com>
27 *    Gareth Hughes <gareth@valinux.com>
28 *
29 */
30
31#include <sys/cdefs.h>
32__FBSDID("$FreeBSD: head/sys/dev/drm/drm_bufs.c 196464 2009-08-23 14:27:46Z rnoland $");
33
34/** @file drm_bufs.c
35 * Implementation of the ioctls for setup of DRM mappings and DMA buffers.
36 */
37
38#include "dev/pci/pcireg.h"
39
40#include "dev/drm/drmP.h"
41
42/* Allocation of PCI memory resources (framebuffer, registers, etc.) for
43 * drm_get_resource_*.  Note that they are not RF_ACTIVE, so there's no virtual
44 * address for accessing them.  Cleaned up at unload.
45 */
46static int drm_alloc_resource(struct drm_device *dev, int resource)
47{
48	struct resource *res;
49	int rid;
50
51	DRM_SPINLOCK_ASSERT(&dev->dev_lock);
52
53	if (resource >= DRM_MAX_PCI_RESOURCE) {
54		DRM_ERROR("Resource %d too large\n", resource);
55		return 1;
56	}
57
58	if (dev->pcir[resource] != NULL) {
59		return 0;
60	}
61
62	DRM_UNLOCK();
63	rid = PCIR_BAR(resource);
64	res = bus_alloc_resource_any(dev->device, SYS_RES_MEMORY, &rid,
65	    RF_SHAREABLE);
66	DRM_LOCK();
67	if (res == NULL) {
68		DRM_ERROR("Couldn't find resource 0x%x\n", resource);
69		return 1;
70	}
71
72	if (dev->pcir[resource] == NULL) {
73		dev->pcirid[resource] = rid;
74		dev->pcir[resource] = res;
75	}
76
77	return 0;
78}
79
80unsigned long drm_get_resource_start(struct drm_device *dev,
81				     unsigned int resource)
82{
83	if (drm_alloc_resource(dev, resource) != 0)
84		return 0;
85
86	return rman_get_start(dev->pcir[resource]);
87}
88
89unsigned long drm_get_resource_len(struct drm_device *dev,
90				   unsigned int resource)
91{
92	if (drm_alloc_resource(dev, resource) != 0)
93		return 0;
94
95	return rman_get_size(dev->pcir[resource]);
96}
97
98int drm_addmap(struct drm_device * dev, unsigned long offset,
99	       unsigned long size,
100    enum drm_map_type type, enum drm_map_flags flags, drm_local_map_t **map_ptr)
101{
102	drm_local_map_t *map;
103	int align;
104	/*drm_agp_mem_t *entry;
105	int valid;*/
106
107	/* Only allow shared memory to be removable since we only keep enough
108	 * book keeping information about shared memory to allow for removal
109	 * when processes fork.
110	 */
111	if ((flags & _DRM_REMOVABLE) && type != _DRM_SHM) {
112		DRM_ERROR("Requested removable map for non-DRM_SHM\n");
113		return EINVAL;
114	}
115	if ((offset & PAGE_MASK) || (size & PAGE_MASK)) {
116		DRM_ERROR("offset/size not page aligned: 0x%lx/0x%lx\n",
117		    offset, size);
118		return EINVAL;
119	}
120	if (offset + size < offset) {
121		DRM_ERROR("offset and size wrap around: 0x%lx/0x%lx\n",
122		    offset, size);
123		return EINVAL;
124	}
125
126	DRM_DEBUG("offset = 0x%08lx, size = 0x%08lx, type = %d\n", offset,
127	    size, type);
128
129	/* Check if this is just another version of a kernel-allocated map, and
130	 * just hand that back if so.
131	 */
132	if (type == _DRM_REGISTERS || type == _DRM_FRAME_BUFFER ||
133	    type == _DRM_SHM) {
134		TAILQ_FOREACH(map, &dev->maplist, link) {
135			if (map->type == type && (map->offset == offset ||
136			    (map->type == _DRM_SHM &&
137			    map->flags == _DRM_CONTAINS_LOCK))) {
138				map->size = size;
139				DRM_DEBUG("Found kernel map %d\n", type);
140				goto done;
141			}
142		}
143	}
144	DRM_UNLOCK();
145
146	/* Allocate a new map structure, fill it in, and do any type-specific
147	 * initialization necessary.
148	 */
149	map = malloc(sizeof(*map), DRM_MEM_MAPS, M_ZERO | M_NOWAIT);
150	if (!map) {
151		DRM_LOCK();
152		return ENOMEM;
153	}
154
155	map->offset = offset;
156	map->size = size;
157	map->type = type;
158	map->flags = flags;
159
160	switch (map->type) {
161	case _DRM_REGISTERS:
162		map->handle = drm_ioremap(dev, map);
163		if (!(map->flags & _DRM_WRITE_COMBINING))
164			break;
165		/* FALLTHROUGH */
166	case _DRM_FRAME_BUFFER:
167		if (drm_mtrr_add(map->offset, map->size, DRM_MTRR_WC) == 0)
168			map->mtrr = 1;
169		break;
170	case _DRM_SHM:
171		map->handle = malloc(map->size, DRM_MEM_MAPS, M_NOWAIT);
172		DRM_DEBUG("%lu %d %p\n",
173		    map->size, drm_order(map->size), map->handle);
174		if (!map->handle) {
175			free(map, DRM_MEM_MAPS);
176			DRM_LOCK();
177			return ENOMEM;
178		}
179		map->offset = (unsigned long)map->handle;
180		if (map->flags & _DRM_CONTAINS_LOCK) {
181			/* Prevent a 2nd X Server from creating a 2nd lock */
182			DRM_LOCK();
183			if (dev->lock.hw_lock != NULL) {
184				DRM_UNLOCK();
185				free(map->handle, DRM_MEM_MAPS);
186				free(map, DRM_MEM_MAPS);
187				return EBUSY;
188			}
189			dev->lock.hw_lock = map->handle; /* Pointer to lock */
190			DRM_UNLOCK();
191		}
192		break;
193	case _DRM_AGP:
194		/*valid = 0;*/
195		/* In some cases (i810 driver), user space may have already
196		 * added the AGP base itself, because dev->agp->base previously
197		 * only got set during AGP enable.  So, only add the base
198		 * address if the map's offset isn't already within the
199		 * aperture.
200		 */
201		if (map->offset < dev->agp->base ||
202		    map->offset > dev->agp->base +
203		    dev->agp->info.ai_aperture_size - 1) {
204			map->offset += dev->agp->base;
205		}
206		map->mtrr   = dev->agp->mtrr; /* for getmap */
207		/*for (entry = dev->agp->memory; entry; entry = entry->next) {
208			if ((map->offset >= entry->bound) &&
209			    (map->offset + map->size <=
210			    entry->bound + entry->pages * PAGE_SIZE)) {
211				valid = 1;
212				break;
213			}
214		}
215		if (!valid) {
216			free(map, DRM_MEM_MAPS);
217			DRM_LOCK();
218			return EACCES;
219		}*/
220		break;
221	case _DRM_SCATTER_GATHER:
222		if (!dev->sg) {
223			free(map, DRM_MEM_MAPS);
224			DRM_LOCK();
225			return EINVAL;
226		}
227		map->offset += dev->sg->handle;
228		break;
229	case _DRM_CONSISTENT:
230		/* Unfortunately, we don't get any alignment specification from
231		 * the caller, so we have to guess.  drm_pci_alloc requires
232		 * a power-of-two alignment, so try to align the bus address of
233		 * the map to it size if possible, otherwise just assume
234		 * PAGE_SIZE alignment.
235		 */
236		align = map->size;
237		if ((align & (align - 1)) != 0)
238			align = PAGE_SIZE;
239		map->dmah = drm_pci_alloc(dev, map->size, align, 0xfffffffful);
240		if (map->dmah == NULL) {
241			free(map, DRM_MEM_MAPS);
242			DRM_LOCK();
243			return ENOMEM;
244		}
245		map->handle = map->dmah->vaddr;
246		map->offset = map->dmah->busaddr;
247		break;
248	default:
249		DRM_ERROR("Bad map type %d\n", map->type);
250		free(map, DRM_MEM_MAPS);
251		DRM_LOCK();
252		return EINVAL;
253	}
254
255	DRM_LOCK();
256	TAILQ_INSERT_TAIL(&dev->maplist, map, link);
257
258done:
259	/* Jumped to, with lock held, when a kernel map is found. */
260
261	DRM_DEBUG("Added map %d 0x%lx/0x%lx\n", map->type, map->offset,
262	    map->size);
263
264	*map_ptr = map;
265
266	return 0;
267}
268
269int drm_addmap_ioctl(struct drm_device *dev, void *data,
270		     struct drm_file *file_priv)
271{
272	struct drm_map *request = data;
273	drm_local_map_t *map;
274	int err;
275
276	if (!(dev->flags & (FREAD|FWRITE)))
277		return EACCES; /* Require read/write */
278
279	if (!DRM_SUSER(DRM_CURPROC) && request->type != _DRM_AGP)
280		return EACCES;
281
282	DRM_LOCK();
283	err = drm_addmap(dev, request->offset, request->size, request->type,
284	    request->flags, &map);
285	DRM_UNLOCK();
286	if (err != 0)
287		return err;
288
289	request->offset = map->offset;
290	request->size = map->size;
291	request->type = map->type;
292	request->flags = map->flags;
293	request->mtrr   = map->mtrr;
294	request->handle = map->handle;
295
296	if (request->type != _DRM_SHM) {
297		request->handle = (void *)request->offset;
298	}
299
300	return 0;
301}
302
303void drm_rmmap(struct drm_device *dev, drm_local_map_t *map)
304{
305	DRM_SPINLOCK_ASSERT(&dev->dev_lock);
306
307	if (map == NULL)
308		return;
309
310	TAILQ_REMOVE(&dev->maplist, map, link);
311
312	switch (map->type) {
313	case _DRM_REGISTERS:
314		if (map->bsr == NULL)
315			drm_ioremapfree(map);
316		/* FALLTHROUGH */
317	case _DRM_FRAME_BUFFER:
318		if (map->mtrr) {
319			int __unused retcode;
320
321			retcode = drm_mtrr_del(0, map->offset, map->size,
322			    DRM_MTRR_WC);
323			DRM_DEBUG("mtrr_del = %d\n", retcode);
324		}
325		break;
326	case _DRM_SHM:
327		free(map->handle, DRM_MEM_MAPS);
328		break;
329	case _DRM_AGP:
330	case _DRM_SCATTER_GATHER:
331		break;
332	case _DRM_CONSISTENT:
333		drm_pci_free(dev, map->dmah);
334		break;
335	default:
336		DRM_ERROR("Bad map type %d\n", map->type);
337		break;
338	}
339
340	if (map->bsr != NULL) {
341		bus_release_resource(dev->device, SYS_RES_MEMORY, map->rid,
342		    map->bsr);
343	}
344
345	free(map, DRM_MEM_MAPS);
346}
347
348/* Remove a map private from list and deallocate resources if the mapping
349 * isn't in use.
350 */
351
352int drm_rmmap_ioctl(struct drm_device *dev, void *data,
353		    struct drm_file *file_priv)
354{
355	drm_local_map_t *map;
356	struct drm_map *request = data;
357
358	DRM_LOCK();
359	TAILQ_FOREACH(map, &dev->maplist, link) {
360		if (map->handle == request->handle &&
361		    map->flags & _DRM_REMOVABLE)
362			break;
363	}
364
365	/* No match found. */
366	if (map == NULL) {
367		DRM_UNLOCK();
368		return EINVAL;
369	}
370
371	drm_rmmap(dev, map);
372
373	DRM_UNLOCK();
374
375	return 0;
376}
377
378
379static void drm_cleanup_buf_error(struct drm_device *dev,
380				  drm_buf_entry_t *entry)
381{
382	int i;
383
384	if (entry->seg_count) {
385		for (i = 0; i < entry->seg_count; i++) {
386			drm_pci_free(dev, entry->seglist[i]);
387		}
388		free(entry->seglist, DRM_MEM_SEGS);
389
390		entry->seg_count = 0;
391	}
392
393   	if (entry->buf_count) {
394	   	for (i = 0; i < entry->buf_count; i++) {
395			free(entry->buflist[i].dev_private, DRM_MEM_BUFS);
396		}
397		free(entry->buflist, DRM_MEM_BUFS);
398
399		entry->buf_count = 0;
400	}
401}
402
403static int drm_do_addbufs_agp(struct drm_device *dev, struct drm_buf_desc *request)
404{
405	drm_device_dma_t *dma = dev->dma;
406	drm_buf_entry_t *entry;
407	/*drm_agp_mem_t *agp_entry;
408	int valid*/
409	drm_buf_t *buf;
410	unsigned long offset;
411	unsigned long agp_offset;
412	int count;
413	int order;
414	int size;
415	int alignment;
416	int page_order;
417	int total;
418	int byte_count;
419	int i;
420	drm_buf_t **temp_buflist;
421
422	count = request->count;
423	order = drm_order(request->size);
424	size = 1 << order;
425
426	alignment  = (request->flags & _DRM_PAGE_ALIGN)
427	    ? round_page(size) : size;
428	page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
429	total = PAGE_SIZE << page_order;
430
431	byte_count = 0;
432	agp_offset = dev->agp->base + request->agp_start;
433
434	DRM_DEBUG("count:      %d\n",  count);
435	DRM_DEBUG("order:      %d\n",  order);
436	DRM_DEBUG("size:       %d\n",  size);
437	DRM_DEBUG("agp_offset: 0x%lx\n", agp_offset);
438	DRM_DEBUG("alignment:  %d\n",  alignment);
439	DRM_DEBUG("page_order: %d\n",  page_order);
440	DRM_DEBUG("total:      %d\n",  total);
441
442	/* Make sure buffers are located in AGP memory that we own */
443	/* Breaks MGA due to drm_alloc_agp not setting up entries for the
444	 * memory.  Safe to ignore for now because these ioctls are still
445	 * root-only.
446	 */
447	/*valid = 0;
448	for (agp_entry = dev->agp->memory; agp_entry;
449	    agp_entry = agp_entry->next) {
450		if ((agp_offset >= agp_entry->bound) &&
451		    (agp_offset + total * count <=
452		    agp_entry->bound + agp_entry->pages * PAGE_SIZE)) {
453			valid = 1;
454			break;
455		}
456	}
457	if (!valid) {
458		DRM_DEBUG("zone invalid\n");
459		return EINVAL;
460	}*/
461
462	entry = &dma->bufs[order];
463
464	entry->buflist = malloc(count * sizeof(*entry->buflist), DRM_MEM_BUFS,
465	    M_NOWAIT | M_ZERO);
466	if (!entry->buflist) {
467		return ENOMEM;
468	}
469
470	entry->buf_size = size;
471	entry->page_order = page_order;
472
473	offset = 0;
474
475	while (entry->buf_count < count) {
476		buf          = &entry->buflist[entry->buf_count];
477		buf->idx     = dma->buf_count + entry->buf_count;
478		buf->total   = alignment;
479		buf->order   = order;
480		buf->used    = 0;
481
482		buf->offset  = (dma->byte_count + offset);
483		buf->bus_address = agp_offset + offset;
484		buf->address = (void *)(agp_offset + offset);
485		buf->next    = NULL;
486		buf->pending = 0;
487		buf->file_priv = NULL;
488
489		buf->dev_priv_size = dev->driver->buf_priv_size;
490		buf->dev_private = malloc(buf->dev_priv_size, DRM_MEM_BUFS,
491		    M_NOWAIT | M_ZERO);
492		if (buf->dev_private == NULL) {
493			/* Set count correctly so we free the proper amount. */
494			entry->buf_count = count;
495			drm_cleanup_buf_error(dev, entry);
496			return ENOMEM;
497		}
498
499		offset += alignment;
500		entry->buf_count++;
501		byte_count += PAGE_SIZE << page_order;
502	}
503
504	DRM_DEBUG("byte_count: %d\n", byte_count);
505
506	temp_buflist = realloc(dma->buflist,
507	    (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist),
508	    DRM_MEM_BUFS, M_NOWAIT);
509	if (temp_buflist == NULL) {
510		/* Free the entry because it isn't valid */
511		drm_cleanup_buf_error(dev, entry);
512		return ENOMEM;
513	}
514	dma->buflist = temp_buflist;
515
516	for (i = 0; i < entry->buf_count; i++) {
517		dma->buflist[i + dma->buf_count] = &entry->buflist[i];
518	}
519
520	dma->buf_count += entry->buf_count;
521	dma->byte_count += byte_count;
522
523	DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
524	DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
525
526	request->count = entry->buf_count;
527	request->size = size;
528
529	dma->flags = _DRM_DMA_USE_AGP;
530
531	return 0;
532}
533
534static int drm_do_addbufs_pci(struct drm_device *dev, struct drm_buf_desc *request)
535{
536	drm_device_dma_t *dma = dev->dma;
537	int count;
538	int order;
539	int size;
540	int total;
541	int page_order;
542	drm_buf_entry_t *entry;
543	drm_buf_t *buf;
544	int alignment;
545	unsigned long offset;
546	int i;
547	int byte_count;
548	int page_count;
549	unsigned long *temp_pagelist;
550	drm_buf_t **temp_buflist;
551
552	count = request->count;
553	order = drm_order(request->size);
554	size = 1 << order;
555
556	DRM_DEBUG("count=%d, size=%d (%d), order=%d\n",
557	    request->count, request->size, size, order);
558
559	alignment = (request->flags & _DRM_PAGE_ALIGN)
560	    ? round_page(size) : size;
561	page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
562	total = PAGE_SIZE << page_order;
563
564	entry = &dma->bufs[order];
565
566	entry->buflist = malloc(count * sizeof(*entry->buflist), DRM_MEM_BUFS,
567	    M_NOWAIT | M_ZERO);
568	entry->seglist = malloc(count * sizeof(*entry->seglist), DRM_MEM_SEGS,
569	    M_NOWAIT | M_ZERO);
570
571	/* Keep the original pagelist until we know all the allocations
572	 * have succeeded
573	 */
574	temp_pagelist = malloc((dma->page_count + (count << page_order)) *
575	    sizeof(*dma->pagelist), DRM_MEM_PAGES, M_NOWAIT);
576
577	if (entry->buflist == NULL || entry->seglist == NULL ||
578	    temp_pagelist == NULL) {
579		free(temp_pagelist, DRM_MEM_PAGES);
580		free(entry->seglist, DRM_MEM_SEGS);
581		free(entry->buflist, DRM_MEM_BUFS);
582		return ENOMEM;
583	}
584
585	memcpy(temp_pagelist, dma->pagelist, dma->page_count *
586	    sizeof(*dma->pagelist));
587
588	DRM_DEBUG("pagelist: %d entries\n",
589	    dma->page_count + (count << page_order));
590
591	entry->buf_size	= size;
592	entry->page_order = page_order;
593	byte_count = 0;
594	page_count = 0;
595
596	while (entry->buf_count < count) {
597		DRM_SPINUNLOCK(&dev->dma_lock);
598		drm_dma_handle_t *dmah = drm_pci_alloc(dev, size, alignment,
599		    0xfffffffful);
600		DRM_SPINLOCK(&dev->dma_lock);
601		if (dmah == NULL) {
602			/* Set count correctly so we free the proper amount. */
603			entry->buf_count = count;
604			entry->seg_count = count;
605			drm_cleanup_buf_error(dev, entry);
606			free(temp_pagelist, DRM_MEM_PAGES);
607			return ENOMEM;
608		}
609
610		entry->seglist[entry->seg_count++] = dmah;
611		for (i = 0; i < (1 << page_order); i++) {
612			DRM_DEBUG("page %d @ %p\n",
613			    dma->page_count + page_count,
614			    (char *)dmah->vaddr + PAGE_SIZE * i);
615			temp_pagelist[dma->page_count + page_count++] =
616			    (long)dmah->vaddr + PAGE_SIZE * i;
617		}
618		for (offset = 0;
619		    offset + size <= total && entry->buf_count < count;
620		    offset += alignment, ++entry->buf_count) {
621			buf	     = &entry->buflist[entry->buf_count];
622			buf->idx     = dma->buf_count + entry->buf_count;
623			buf->total   = alignment;
624			buf->order   = order;
625			buf->used    = 0;
626			buf->offset  = (dma->byte_count + byte_count + offset);
627			buf->address = ((char *)dmah->vaddr + offset);
628			buf->bus_address = dmah->busaddr + offset;
629			buf->next    = NULL;
630			buf->pending = 0;
631			buf->file_priv = NULL;
632
633			buf->dev_priv_size = dev->driver->buf_priv_size;
634			buf->dev_private = malloc(buf->dev_priv_size,
635			    DRM_MEM_BUFS, M_NOWAIT | M_ZERO);
636			if (buf->dev_private == NULL) {
637				/* Set count correctly so we free the proper amount. */
638				entry->buf_count = count;
639				entry->seg_count = count;
640				drm_cleanup_buf_error(dev, entry);
641				free(temp_pagelist, DRM_MEM_PAGES);
642				return ENOMEM;
643			}
644
645			DRM_DEBUG("buffer %d @ %p\n",
646			    entry->buf_count, buf->address);
647		}
648		byte_count += PAGE_SIZE << page_order;
649	}
650
651	temp_buflist = realloc(dma->buflist,
652	    (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist),
653	    DRM_MEM_BUFS, M_NOWAIT);
654	if (temp_buflist == NULL) {
655		/* Free the entry because it isn't valid */
656		drm_cleanup_buf_error(dev, entry);
657		free(temp_pagelist, DRM_MEM_PAGES);
658		return ENOMEM;
659	}
660	dma->buflist = temp_buflist;
661
662	for (i = 0; i < entry->buf_count; i++) {
663		dma->buflist[i + dma->buf_count] = &entry->buflist[i];
664	}
665
666	/* No allocations failed, so now we can replace the orginal pagelist
667	 * with the new one.
668	 */
669	free(dma->pagelist, DRM_MEM_PAGES);
670	dma->pagelist = temp_pagelist;
671
672	dma->buf_count += entry->buf_count;
673	dma->seg_count += entry->seg_count;
674	dma->page_count += entry->seg_count << page_order;
675	dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
676
677	request->count = entry->buf_count;
678	request->size = size;
679
680	return 0;
681
682}
683
684static int drm_do_addbufs_sg(struct drm_device *dev, struct drm_buf_desc *request)
685{
686	drm_device_dma_t *dma = dev->dma;
687	drm_buf_entry_t *entry;
688	drm_buf_t *buf;
689	unsigned long offset;
690	unsigned long agp_offset;
691	int count;
692	int order;
693	int size;
694	int alignment;
695	int page_order;
696	int total;
697	int byte_count;
698	int i;
699	drm_buf_t **temp_buflist;
700
701	count = request->count;
702	order = drm_order(request->size);
703	size = 1 << order;
704
705	alignment  = (request->flags & _DRM_PAGE_ALIGN)
706	    ? round_page(size) : size;
707	page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
708	total = PAGE_SIZE << page_order;
709
710	byte_count = 0;
711	agp_offset = request->agp_start;
712
713	DRM_DEBUG("count:      %d\n",  count);
714	DRM_DEBUG("order:      %d\n",  order);
715	DRM_DEBUG("size:       %d\n",  size);
716	DRM_DEBUG("agp_offset: %ld\n", agp_offset);
717	DRM_DEBUG("alignment:  %d\n",  alignment);
718	DRM_DEBUG("page_order: %d\n",  page_order);
719	DRM_DEBUG("total:      %d\n",  total);
720
721	entry = &dma->bufs[order];
722
723	entry->buflist = malloc(count * sizeof(*entry->buflist), DRM_MEM_BUFS,
724	    M_NOWAIT | M_ZERO);
725	if (entry->buflist == NULL)
726		return ENOMEM;
727
728	entry->buf_size = size;
729	entry->page_order = page_order;
730
731	offset = 0;
732
733	while (entry->buf_count < count) {
734		buf          = &entry->buflist[entry->buf_count];
735		buf->idx     = dma->buf_count + entry->buf_count;
736		buf->total   = alignment;
737		buf->order   = order;
738		buf->used    = 0;
739
740		buf->offset  = (dma->byte_count + offset);
741		buf->bus_address = agp_offset + offset;
742		buf->address = (void *)(agp_offset + offset + dev->sg->handle);
743		buf->next    = NULL;
744		buf->pending = 0;
745		buf->file_priv = NULL;
746
747		buf->dev_priv_size = dev->driver->buf_priv_size;
748		buf->dev_private = malloc(buf->dev_priv_size, DRM_MEM_BUFS,
749		    M_NOWAIT | M_ZERO);
750		if (buf->dev_private == NULL) {
751			/* Set count correctly so we free the proper amount. */
752			entry->buf_count = count;
753			drm_cleanup_buf_error(dev, entry);
754			return ENOMEM;
755		}
756
757		DRM_DEBUG("buffer %d @ %p\n",
758		    entry->buf_count, buf->address);
759
760		offset += alignment;
761		entry->buf_count++;
762		byte_count += PAGE_SIZE << page_order;
763	}
764
765	DRM_DEBUG("byte_count: %d\n", byte_count);
766
767	temp_buflist = realloc(dma->buflist,
768	    (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist),
769	    DRM_MEM_BUFS, M_NOWAIT);
770	if (temp_buflist == NULL) {
771		/* Free the entry because it isn't valid */
772		drm_cleanup_buf_error(dev, entry);
773		return ENOMEM;
774	}
775	dma->buflist = temp_buflist;
776
777	for (i = 0; i < entry->buf_count; i++) {
778		dma->buflist[i + dma->buf_count] = &entry->buflist[i];
779	}
780
781	dma->buf_count += entry->buf_count;
782	dma->byte_count += byte_count;
783
784	DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
785	DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
786
787	request->count = entry->buf_count;
788	request->size = size;
789
790	dma->flags = _DRM_DMA_USE_SG;
791
792	return 0;
793}
794
795int drm_addbufs_agp(struct drm_device *dev, struct drm_buf_desc *request)
796{
797	int order, ret;
798
799	if (request->count < 0 || request->count > 4096)
800		return EINVAL;
801
802	order = drm_order(request->size);
803	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
804		return EINVAL;
805
806	DRM_SPINLOCK(&dev->dma_lock);
807
808	/* No more allocations after first buffer-using ioctl. */
809	if (dev->buf_use != 0) {
810		DRM_SPINUNLOCK(&dev->dma_lock);
811		return EBUSY;
812	}
813	/* No more than one allocation per order */
814	if (dev->dma->bufs[order].buf_count != 0) {
815		DRM_SPINUNLOCK(&dev->dma_lock);
816		return ENOMEM;
817	}
818
819	ret = drm_do_addbufs_agp(dev, request);
820
821	DRM_SPINUNLOCK(&dev->dma_lock);
822
823	return ret;
824}
825
826int drm_addbufs_sg(struct drm_device *dev, struct drm_buf_desc *request)
827{
828	int order, ret;
829
830	if (!DRM_SUSER(DRM_CURPROC))
831		return EACCES;
832
833	if (request->count < 0 || request->count > 4096)
834		return EINVAL;
835
836	order = drm_order(request->size);
837	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
838		return EINVAL;
839
840	DRM_SPINLOCK(&dev->dma_lock);
841
842	/* No more allocations after first buffer-using ioctl. */
843	if (dev->buf_use != 0) {
844		DRM_SPINUNLOCK(&dev->dma_lock);
845		return EBUSY;
846	}
847	/* No more than one allocation per order */
848	if (dev->dma->bufs[order].buf_count != 0) {
849		DRM_SPINUNLOCK(&dev->dma_lock);
850		return ENOMEM;
851	}
852
853	ret = drm_do_addbufs_sg(dev, request);
854
855	DRM_SPINUNLOCK(&dev->dma_lock);
856
857	return ret;
858}
859
860int drm_addbufs_pci(struct drm_device *dev, struct drm_buf_desc *request)
861{
862	int order, ret;
863
864	if (!DRM_SUSER(DRM_CURPROC))
865		return EACCES;
866
867	if (request->count < 0 || request->count > 4096)
868		return EINVAL;
869
870	order = drm_order(request->size);
871	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
872		return EINVAL;
873
874	DRM_SPINLOCK(&dev->dma_lock);
875
876	/* No more allocations after first buffer-using ioctl. */
877	if (dev->buf_use != 0) {
878		DRM_SPINUNLOCK(&dev->dma_lock);
879		return EBUSY;
880	}
881	/* No more than one allocation per order */
882	if (dev->dma->bufs[order].buf_count != 0) {
883		DRM_SPINUNLOCK(&dev->dma_lock);
884		return ENOMEM;
885	}
886
887	ret = drm_do_addbufs_pci(dev, request);
888
889	DRM_SPINUNLOCK(&dev->dma_lock);
890
891	return ret;
892}
893
894int drm_addbufs(struct drm_device *dev, void *data, struct drm_file *file_priv)
895{
896	struct drm_buf_desc *request = data;
897	int err;
898
899	if (request->flags & _DRM_AGP_BUFFER)
900		err = drm_addbufs_agp(dev, request);
901	else if (request->flags & _DRM_SG_BUFFER)
902		err = drm_addbufs_sg(dev, request);
903	else
904		err = drm_addbufs_pci(dev, request);
905
906	return err;
907}
908
909int drm_infobufs(struct drm_device *dev, void *data, struct drm_file *file_priv)
910{
911	drm_device_dma_t *dma = dev->dma;
912	struct drm_buf_info *request = data;
913	int i;
914	int count;
915	int retcode = 0;
916
917	DRM_SPINLOCK(&dev->dma_lock);
918	++dev->buf_use;		/* Can't allocate more after this call */
919	DRM_SPINUNLOCK(&dev->dma_lock);
920
921	for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
922		if (dma->bufs[i].buf_count)
923			++count;
924	}
925
926	DRM_DEBUG("count = %d\n", count);
927
928	if (request->count >= count) {
929		for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
930			if (dma->bufs[i].buf_count) {
931				struct drm_buf_desc from;
932
933				from.count = dma->bufs[i].buf_count;
934				from.size = dma->bufs[i].buf_size;
935				from.low_mark = dma->bufs[i].freelist.low_mark;
936				from.high_mark = dma->bufs[i].freelist.high_mark;
937
938				if (DRM_COPY_TO_USER(&request->list[count], &from,
939				    sizeof(struct drm_buf_desc)) != 0) {
940					retcode = EFAULT;
941					break;
942				}
943
944				DRM_DEBUG("%d %d %d %d %d\n",
945				    i, dma->bufs[i].buf_count,
946				    dma->bufs[i].buf_size,
947				    dma->bufs[i].freelist.low_mark,
948				    dma->bufs[i].freelist.high_mark);
949				++count;
950			}
951		}
952	}
953	request->count = count;
954
955	return retcode;
956}
957
958int drm_markbufs(struct drm_device *dev, void *data, struct drm_file *file_priv)
959{
960	drm_device_dma_t *dma = dev->dma;
961	struct drm_buf_desc *request = data;
962	int order;
963
964	DRM_DEBUG("%d, %d, %d\n",
965		  request->size, request->low_mark, request->high_mark);
966
967
968	order = drm_order(request->size);
969	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ||
970	    request->low_mark < 0 || request->high_mark < 0) {
971		return EINVAL;
972	}
973
974	DRM_SPINLOCK(&dev->dma_lock);
975	if (request->low_mark > dma->bufs[order].buf_count ||
976	    request->high_mark > dma->bufs[order].buf_count) {
977		DRM_SPINUNLOCK(&dev->dma_lock);
978		return EINVAL;
979	}
980
981	dma->bufs[order].freelist.low_mark  = request->low_mark;
982	dma->bufs[order].freelist.high_mark = request->high_mark;
983	DRM_SPINUNLOCK(&dev->dma_lock);
984
985	return 0;
986}
987
988int drm_freebufs(struct drm_device *dev, void *data, struct drm_file *file_priv)
989{
990	drm_device_dma_t *dma = dev->dma;
991	struct drm_buf_free *request = data;
992	int i;
993	int idx;
994	drm_buf_t *buf;
995	int retcode = 0;
996
997	DRM_DEBUG("%d\n", request->count);
998
999	DRM_SPINLOCK(&dev->dma_lock);
1000	for (i = 0; i < request->count; i++) {
1001		if (DRM_COPY_FROM_USER(&idx, &request->list[i], sizeof(idx))) {
1002			retcode = EFAULT;
1003			break;
1004		}
1005		if (idx < 0 || idx >= dma->buf_count) {
1006			DRM_ERROR("Index %d (of %d max)\n",
1007			    idx, dma->buf_count - 1);
1008			retcode = EINVAL;
1009			break;
1010		}
1011		buf = dma->buflist[idx];
1012		if (buf->file_priv != file_priv) {
1013			DRM_ERROR("Process %d freeing buffer not owned\n",
1014			    DRM_CURRENTPID);
1015			retcode = EINVAL;
1016			break;
1017		}
1018		drm_free_buffer(dev, buf);
1019	}
1020	DRM_SPINUNLOCK(&dev->dma_lock);
1021
1022	return retcode;
1023}
1024
1025int drm_mapbufs(struct drm_device *dev, void *data, struct drm_file *file_priv)
1026{
1027	drm_device_dma_t *dma = dev->dma;
1028	int retcode = 0;
1029	const int zero = 0;
1030	vm_offset_t address;
1031	struct vmspace *vms;
1032	vm_ooffset_t foff;
1033	vm_size_t size;
1034	vm_offset_t vaddr;
1035	struct drm_buf_map *request = data;
1036	int i;
1037
1038	vms = DRM_CURPROC->td_proc->p_vmspace;
1039
1040	DRM_SPINLOCK(&dev->dma_lock);
1041	dev->buf_use++;		/* Can't allocate more after this call */
1042	DRM_SPINUNLOCK(&dev->dma_lock);
1043
1044	if (request->count < dma->buf_count)
1045		goto done;
1046
1047	if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP)) ||
1048	    (drm_core_check_feature(dev, DRIVER_SG) &&
1049	    (dma->flags & _DRM_DMA_USE_SG))) {
1050		drm_local_map_t *map = dev->agp_buffer_map;
1051
1052		if (map == NULL) {
1053			retcode = EINVAL;
1054			goto done;
1055		}
1056		size = round_page(map->size);
1057		foff = map->offset;
1058	} else {
1059		size = round_page(dma->byte_count),
1060		foff = 0;
1061	}
1062
1063	vaddr = round_page((vm_offset_t)vms->vm_daddr + MAXDSIZ);
1064#if __FreeBSD_version >= 600023
1065	retcode = vm_mmap(&vms->vm_map, &vaddr, size, PROT_READ | PROT_WRITE,
1066	    VM_PROT_ALL, MAP_SHARED | MAP_NOSYNC, OBJT_DEVICE,
1067	    dev->devnode, foff);
1068#else
1069	retcode = vm_mmap(&vms->vm_map, &vaddr, size, PROT_READ | PROT_WRITE,
1070	    VM_PROT_ALL, MAP_SHARED | MAP_NOSYNC,
1071	    SLIST_FIRST(&dev->devnode->si_hlist), foff);
1072#endif
1073	if (retcode)
1074		goto done;
1075
1076	request->virtual = (void *)vaddr;
1077
1078	for (i = 0; i < dma->buf_count; i++) {
1079		if (DRM_COPY_TO_USER(&request->list[i].idx,
1080		    &dma->buflist[i]->idx, sizeof(request->list[0].idx))) {
1081			retcode = EFAULT;
1082			goto done;
1083		}
1084		if (DRM_COPY_TO_USER(&request->list[i].total,
1085		    &dma->buflist[i]->total, sizeof(request->list[0].total))) {
1086			retcode = EFAULT;
1087			goto done;
1088		}
1089		if (DRM_COPY_TO_USER(&request->list[i].used, &zero,
1090		    sizeof(zero))) {
1091			retcode = EFAULT;
1092			goto done;
1093		}
1094		address = vaddr + dma->buflist[i]->offset; /* *** */
1095		if (DRM_COPY_TO_USER(&request->list[i].address, &address,
1096		    sizeof(address))) {
1097			retcode = EFAULT;
1098			goto done;
1099		}
1100	}
1101
1102 done:
1103	request->count = dma->buf_count;
1104
1105	DRM_DEBUG("%d buffers, retcode = %d\n", request->count, retcode);
1106
1107	return retcode;
1108}
1109
1110/*
1111 * Compute order.  Can be made faster.
1112 */
1113int drm_order(unsigned long size)
1114{
1115	int order;
1116
1117	if (size == 0)
1118		return 0;
1119
1120	order = flsl(size) - 1;
1121	if (size & ~(1ul << order))
1122		++order;
1123
1124	return order;
1125}
1126