Deleted Added
full compact
drm_bufs.c (182080) drm_bufs.c (182883)
1/*-
2 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
3 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
21 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
23 * OTHER DEALINGS IN THE SOFTWARE.
24 *
25 * Authors:
26 * Rickard E. (Rik) Faith <faith@valinux.com>
27 * Gareth Hughes <gareth@valinux.com>
28 *
29 */
30
31#include <sys/cdefs.h>
1/*-
2 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
3 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
21 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
23 * OTHER DEALINGS IN THE SOFTWARE.
24 *
25 * Authors:
26 * Rickard E. (Rik) Faith <faith@valinux.com>
27 * Gareth Hughes <gareth@valinux.com>
28 *
29 */
30
31#include <sys/cdefs.h>
32__FBSDID("$FreeBSD: head/sys/dev/drm/drm_bufs.c 182080 2008-08-23 20:59:12Z rnoland $");
32__FBSDID("$FreeBSD: head/sys/dev/drm/drm_bufs.c 182883 2008-09-09 02:05:03Z rnoland $");
33
34/** @file drm_bufs.c
35 * Implementation of the ioctls for setup of DRM mappings and DMA buffers.
36 */
37
38#include "dev/pci/pcireg.h"
39
40#include "dev/drm/drmP.h"
41
42/*
43 * Compute order. Can be made faster.
44 */
45int drm_order(unsigned long size)
46{
47 int order;
48 unsigned long tmp;
49
50 for ( order = 0, tmp = size ; tmp >>= 1 ; ++order );
51
52 if ( size & ~(1 << order) )
53 ++order;
54
55 return order;
56}
57
58/* Allocation of PCI memory resources (framebuffer, registers, etc.) for
59 * drm_get_resource_*. Note that they are not RF_ACTIVE, so there's no virtual
60 * address for accessing them. Cleaned up at unload.
61 */
62static int drm_alloc_resource(struct drm_device *dev, int resource)
63{
64 if (resource >= DRM_MAX_PCI_RESOURCE) {
65 DRM_ERROR("Resource %d too large\n", resource);
66 return 1;
67 }
68
69 DRM_UNLOCK();
70 if (dev->pcir[resource] != NULL) {
71 DRM_LOCK();
72 return 0;
73 }
74
75 dev->pcirid[resource] = PCIR_BAR(resource);
76 dev->pcir[resource] = bus_alloc_resource_any(dev->device,
77 SYS_RES_MEMORY, &dev->pcirid[resource], RF_SHAREABLE);
78 DRM_LOCK();
79
80 if (dev->pcir[resource] == NULL) {
81 DRM_ERROR("Couldn't find resource 0x%x\n", resource);
82 return 1;
83 }
84
85 return 0;
86}
87
88unsigned long drm_get_resource_start(struct drm_device *dev,
89 unsigned int resource)
90{
91 if (drm_alloc_resource(dev, resource) != 0)
92 return 0;
93
94 return rman_get_start(dev->pcir[resource]);
95}
96
97unsigned long drm_get_resource_len(struct drm_device *dev,
98 unsigned int resource)
99{
100 if (drm_alloc_resource(dev, resource) != 0)
101 return 0;
102
103 return rman_get_size(dev->pcir[resource]);
104}
105
106int drm_addmap(struct drm_device * dev, unsigned long offset,
107 unsigned long size,
108 drm_map_type_t type, drm_map_flags_t flags, drm_local_map_t **map_ptr)
109{
110 drm_local_map_t *map;
111 int align;
112 /*drm_agp_mem_t *entry;
113 int valid;*/
114
115 /* Only allow shared memory to be removable since we only keep enough
116 * book keeping information about shared memory to allow for removal
117 * when processes fork.
118 */
119 if ((flags & _DRM_REMOVABLE) && type != _DRM_SHM) {
120 DRM_ERROR("Requested removable map for non-DRM_SHM\n");
121 return EINVAL;
122 }
123 if ((offset & PAGE_MASK) || (size & PAGE_MASK)) {
124 DRM_ERROR("offset/size not page aligned: 0x%lx/0x%lx\n",
125 offset, size);
126 return EINVAL;
127 }
128 if (offset + size < offset) {
129 DRM_ERROR("offset and size wrap around: 0x%lx/0x%lx\n",
130 offset, size);
131 return EINVAL;
132 }
133
134 DRM_DEBUG("offset = 0x%08lx, size = 0x%08lx, type = %d\n", offset,
135 size, type);
136
137 /* Check if this is just another version of a kernel-allocated map, and
138 * just hand that back if so.
139 */
140 if (type == _DRM_REGISTERS || type == _DRM_FRAME_BUFFER ||
141 type == _DRM_SHM) {
142 TAILQ_FOREACH(map, &dev->maplist, link) {
143 if (map->type == type && (map->offset == offset ||
144 (map->type == _DRM_SHM &&
145 map->flags == _DRM_CONTAINS_LOCK))) {
146 map->size = size;
147 DRM_DEBUG("Found kernel map %d\n", type);
148 goto done;
149 }
150 }
151 }
152 DRM_UNLOCK();
153
154 /* Allocate a new map structure, fill it in, and do any type-specific
155 * initialization necessary.
156 */
157 map = malloc(sizeof(*map), M_DRM, M_ZERO | M_NOWAIT);
158 if ( !map ) {
159 DRM_LOCK();
160 return ENOMEM;
161 }
162
163 map->offset = offset;
164 map->size = size;
165 map->type = type;
166 map->flags = flags;
167
168 switch ( map->type ) {
169 case _DRM_REGISTERS:
170 map->handle = drm_ioremap(dev, map);
171 if (!(map->flags & _DRM_WRITE_COMBINING))
172 break;
173 /* FALLTHROUGH */
174 case _DRM_FRAME_BUFFER:
175 if (drm_mtrr_add(map->offset, map->size, DRM_MTRR_WC) == 0)
176 map->mtrr = 1;
177 break;
178 case _DRM_SHM:
179 map->handle = malloc(map->size, M_DRM, M_NOWAIT);
180 DRM_DEBUG( "%lu %d %p\n",
181 map->size, drm_order(map->size), map->handle );
182 if ( !map->handle ) {
183 free(map, M_DRM);
184 DRM_LOCK();
185 return ENOMEM;
186 }
187 map->offset = (unsigned long)map->handle;
188 if ( map->flags & _DRM_CONTAINS_LOCK ) {
189 /* Prevent a 2nd X Server from creating a 2nd lock */
190 DRM_LOCK();
191 if (dev->lock.hw_lock != NULL) {
192 DRM_UNLOCK();
193 free(map->handle, M_DRM);
194 free(map, M_DRM);
195 return EBUSY;
196 }
197 dev->lock.hw_lock = map->handle; /* Pointer to lock */
198 DRM_UNLOCK();
199 }
200 break;
201 case _DRM_AGP:
202 /*valid = 0;*/
203 /* In some cases (i810 driver), user space may have already
204 * added the AGP base itself, because dev->agp->base previously
205 * only got set during AGP enable. So, only add the base
206 * address if the map's offset isn't already within the
207 * aperture.
208 */
209 if (map->offset < dev->agp->base ||
210 map->offset > dev->agp->base +
211 dev->agp->info.ai_aperture_size - 1) {
212 map->offset += dev->agp->base;
213 }
214 map->mtrr = dev->agp->mtrr; /* for getmap */
215 /*for (entry = dev->agp->memory; entry; entry = entry->next) {
216 if ((map->offset >= entry->bound) &&
217 (map->offset + map->size <=
218 entry->bound + entry->pages * PAGE_SIZE)) {
219 valid = 1;
220 break;
221 }
222 }
223 if (!valid) {
224 free(map, M_DRM);
225 DRM_LOCK();
226 return EACCES;
227 }*/
228 break;
229 case _DRM_SCATTER_GATHER:
230 if (!dev->sg) {
231 free(map, M_DRM);
232 DRM_LOCK();
233 return EINVAL;
234 }
235 map->offset = map->offset + dev->sg->handle;
236 break;
237 case _DRM_CONSISTENT:
238 /* Unfortunately, we don't get any alignment specification from
239 * the caller, so we have to guess. drm_pci_alloc requires
240 * a power-of-two alignment, so try to align the bus address of
241 * the map to it size if possible, otherwise just assume
242 * PAGE_SIZE alignment.
243 */
244 align = map->size;
245 if ((align & (align - 1)) != 0)
246 align = PAGE_SIZE;
247 map->dmah = drm_pci_alloc(dev, map->size, align, 0xfffffffful);
248 if (map->dmah == NULL) {
249 free(map, M_DRM);
250 DRM_LOCK();
251 return ENOMEM;
252 }
253 map->handle = map->dmah->vaddr;
254 map->offset = map->dmah->busaddr;
255 break;
256 default:
257 DRM_ERROR("Bad map type %d\n", map->type);
258 free(map, M_DRM);
259 DRM_LOCK();
260 return EINVAL;
261 }
262
263 DRM_LOCK();
264 TAILQ_INSERT_TAIL(&dev->maplist, map, link);
265
266done:
267 /* Jumped to, with lock held, when a kernel map is found. */
268
269 DRM_DEBUG("Added map %d 0x%lx/0x%lx\n", map->type, map->offset,
270 map->size);
271
272 *map_ptr = map;
273
274 return 0;
275}
276
277int drm_addmap_ioctl(struct drm_device *dev, void *data,
278 struct drm_file *file_priv)
279{
280 drm_map_t *request = data;
281 drm_local_map_t *map;
282 int err;
283
284 if (!(dev->flags & (FREAD|FWRITE)))
285 return EACCES; /* Require read/write */
286
287 if (!DRM_SUSER(DRM_CURPROC) && request->type != _DRM_AGP)
288 return EACCES;
289
290 DRM_LOCK();
291 err = drm_addmap(dev, request->offset, request->size, request->type,
292 request->flags, &map);
293 DRM_UNLOCK();
294 if (err != 0)
295 return err;
296
297 request->offset = map->offset;
298 request->size = map->size;
299 request->type = map->type;
300 request->flags = map->flags;
301 request->mtrr = map->mtrr;
302 request->handle = map->handle;
303
304 if (request->type != _DRM_SHM) {
305 request->handle = (void *)request->offset;
306 }
307
308 return 0;
309}
310
311void drm_rmmap(struct drm_device *dev, drm_local_map_t *map)
312{
313 DRM_SPINLOCK_ASSERT(&dev->dev_lock);
314
315 TAILQ_REMOVE(&dev->maplist, map, link);
316
317 switch (map->type) {
318 case _DRM_REGISTERS:
319 if (map->bsr == NULL)
320 drm_ioremapfree(map);
321 /* FALLTHROUGH */
322 case _DRM_FRAME_BUFFER:
323 if (map->mtrr) {
324 int __unused retcode;
325
326 retcode = drm_mtrr_del(0, map->offset, map->size,
327 DRM_MTRR_WC);
328 DRM_DEBUG("mtrr_del = %d\n", retcode);
329 }
330 break;
331 case _DRM_SHM:
332 free(map->handle, M_DRM);
333 break;
334 case _DRM_AGP:
335 case _DRM_SCATTER_GATHER:
336 break;
337 case _DRM_CONSISTENT:
338 drm_pci_free(dev, map->dmah);
339 break;
340 default:
341 DRM_ERROR("Bad map type %d\n", map->type);
342 break;
343 }
344
345 if (map->bsr != NULL) {
346 bus_release_resource(dev->device, SYS_RES_MEMORY, map->rid,
347 map->bsr);
348 }
349
350 free(map, M_DRM);
351}
352
353/* Remove a map private from list and deallocate resources if the mapping
354 * isn't in use.
355 */
356
357int drm_rmmap_ioctl(struct drm_device *dev, void *data,
358 struct drm_file *file_priv)
359{
360 drm_local_map_t *map;
361 drm_map_t *request = data;
362
363 DRM_LOCK();
364 TAILQ_FOREACH(map, &dev->maplist, link) {
365 if (map->handle == request->handle &&
366 map->flags & _DRM_REMOVABLE)
367 break;
368 }
369
370 /* No match found. */
371 if (map == NULL) {
372 DRM_UNLOCK();
373 return EINVAL;
374 }
375
376 drm_rmmap(dev, map);
377
378 DRM_UNLOCK();
379
380 return 0;
381}
382
383
384static void drm_cleanup_buf_error(struct drm_device *dev,
385 drm_buf_entry_t *entry)
386{
387 int i;
388
389 if (entry->seg_count) {
390 for (i = 0; i < entry->seg_count; i++) {
391 drm_pci_free(dev, entry->seglist[i]);
392 }
393 free(entry->seglist, M_DRM);
394
395 entry->seg_count = 0;
396 }
397
398 if (entry->buf_count) {
399 for (i = 0; i < entry->buf_count; i++) {
400 free(entry->buflist[i].dev_private, M_DRM);
401 }
402 free(entry->buflist, M_DRM);
403
404 entry->buf_count = 0;
405 }
406}
407
408static int drm_do_addbufs_agp(struct drm_device *dev, drm_buf_desc_t *request)
409{
410 drm_device_dma_t *dma = dev->dma;
411 drm_buf_entry_t *entry;
412 /*drm_agp_mem_t *agp_entry;
413 int valid*/
414 drm_buf_t *buf;
415 unsigned long offset;
416 unsigned long agp_offset;
417 int count;
418 int order;
419 int size;
420 int alignment;
421 int page_order;
422 int total;
423 int byte_count;
424 int i;
425 drm_buf_t **temp_buflist;
426
427 count = request->count;
428 order = drm_order(request->size);
429 size = 1 << order;
430
431 alignment = (request->flags & _DRM_PAGE_ALIGN)
432 ? round_page(size) : size;
433 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
434 total = PAGE_SIZE << page_order;
435
436 byte_count = 0;
437 agp_offset = dev->agp->base + request->agp_start;
438
439 DRM_DEBUG( "count: %d\n", count );
440 DRM_DEBUG( "order: %d\n", order );
441 DRM_DEBUG( "size: %d\n", size );
442 DRM_DEBUG( "agp_offset: 0x%lx\n", agp_offset );
443 DRM_DEBUG( "alignment: %d\n", alignment );
444 DRM_DEBUG( "page_order: %d\n", page_order );
445 DRM_DEBUG( "total: %d\n", total );
446
447 /* Make sure buffers are located in AGP memory that we own */
448 /* Breaks MGA due to drm_alloc_agp not setting up entries for the
449 * memory. Safe to ignore for now because these ioctls are still
450 * root-only.
451 */
452 /*valid = 0;
453 for (agp_entry = dev->agp->memory; agp_entry;
454 agp_entry = agp_entry->next) {
455 if ((agp_offset >= agp_entry->bound) &&
456 (agp_offset + total * count <=
457 agp_entry->bound + agp_entry->pages * PAGE_SIZE)) {
458 valid = 1;
459 break;
460 }
461 }
462 if (!valid) {
463 DRM_DEBUG("zone invalid\n");
464 return EINVAL;
465 }*/
466
467 entry = &dma->bufs[order];
468
469 entry->buflist = malloc(count * sizeof(*entry->buflist), M_DRM,
470 M_NOWAIT | M_ZERO);
471 if ( !entry->buflist ) {
472 return ENOMEM;
473 }
474
475 entry->buf_size = size;
476 entry->page_order = page_order;
477
478 offset = 0;
479
480 while ( entry->buf_count < count ) {
481 buf = &entry->buflist[entry->buf_count];
482 buf->idx = dma->buf_count + entry->buf_count;
483 buf->total = alignment;
484 buf->order = order;
485 buf->used = 0;
486
487 buf->offset = (dma->byte_count + offset);
488 buf->bus_address = agp_offset + offset;
489 buf->address = (void *)(agp_offset + offset);
490 buf->next = NULL;
491 buf->pending = 0;
492 buf->file_priv = NULL;
493
494 buf->dev_priv_size = dev->driver.buf_priv_size;
495 buf->dev_private = malloc(buf->dev_priv_size, M_DRM,
496 M_NOWAIT | M_ZERO);
497 if (buf->dev_private == NULL) {
498 /* Set count correctly so we free the proper amount. */
499 entry->buf_count = count;
500 drm_cleanup_buf_error(dev, entry);
501 return ENOMEM;
502 }
503
504 offset += alignment;
505 entry->buf_count++;
506 byte_count += PAGE_SIZE << page_order;
507 }
508
509 DRM_DEBUG( "byte_count: %d\n", byte_count );
510
511 temp_buflist = realloc(dma->buflist,
512 (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist), M_DRM,
513 M_NOWAIT);
514 if (temp_buflist == NULL) {
515 /* Free the entry because it isn't valid */
516 drm_cleanup_buf_error(dev, entry);
517 return ENOMEM;
518 }
519 dma->buflist = temp_buflist;
520
521 for ( i = 0 ; i < entry->buf_count ; i++ ) {
522 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
523 }
524
525 dma->buf_count += entry->buf_count;
526 dma->byte_count += byte_count;
527
528 DRM_DEBUG( "dma->buf_count : %d\n", dma->buf_count );
529 DRM_DEBUG( "entry->buf_count : %d\n", entry->buf_count );
530
531 request->count = entry->buf_count;
532 request->size = size;
533
534 dma->flags = _DRM_DMA_USE_AGP;
535
536 return 0;
537}
538
539static int drm_do_addbufs_pci(struct drm_device *dev, drm_buf_desc_t *request)
540{
541 drm_device_dma_t *dma = dev->dma;
542 int count;
543 int order;
544 int size;
545 int total;
546 int page_order;
547 drm_buf_entry_t *entry;
548 drm_buf_t *buf;
549 int alignment;
550 unsigned long offset;
551 int i;
552 int byte_count;
553 int page_count;
554 unsigned long *temp_pagelist;
555 drm_buf_t **temp_buflist;
556
557 count = request->count;
558 order = drm_order(request->size);
559 size = 1 << order;
560
561 DRM_DEBUG( "count=%d, size=%d (%d), order=%d\n",
562 request->count, request->size, size, order );
563
564 alignment = (request->flags & _DRM_PAGE_ALIGN)
565 ? round_page(size) : size;
566 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
567 total = PAGE_SIZE << page_order;
568
569 entry = &dma->bufs[order];
570
571 entry->buflist = malloc(count * sizeof(*entry->buflist), M_DRM,
572 M_NOWAIT | M_ZERO);
573 entry->seglist = malloc(count * sizeof(*entry->seglist), M_DRM,
574 M_NOWAIT | M_ZERO);
575
576 /* Keep the original pagelist until we know all the allocations
577 * have succeeded
578 */
579 temp_pagelist = malloc((dma->page_count + (count << page_order)) *
580 sizeof(*dma->pagelist), M_DRM, M_NOWAIT);
581
582 if (entry->buflist == NULL || entry->seglist == NULL ||
583 temp_pagelist == NULL) {
584 free(entry->buflist, M_DRM);
585 free(entry->seglist, M_DRM);
586 return ENOMEM;
587 }
588
589 memcpy(temp_pagelist, dma->pagelist, dma->page_count *
590 sizeof(*dma->pagelist));
591
592 DRM_DEBUG( "pagelist: %d entries\n",
593 dma->page_count + (count << page_order) );
594
595 entry->buf_size = size;
596 entry->page_order = page_order;
597 byte_count = 0;
598 page_count = 0;
599
600 while ( entry->buf_count < count ) {
33
34/** @file drm_bufs.c
35 * Implementation of the ioctls for setup of DRM mappings and DMA buffers.
36 */
37
38#include "dev/pci/pcireg.h"
39
40#include "dev/drm/drmP.h"
41
42/*
43 * Compute order. Can be made faster.
44 */
45int drm_order(unsigned long size)
46{
47 int order;
48 unsigned long tmp;
49
50 for ( order = 0, tmp = size ; tmp >>= 1 ; ++order );
51
52 if ( size & ~(1 << order) )
53 ++order;
54
55 return order;
56}
57
58/* Allocation of PCI memory resources (framebuffer, registers, etc.) for
59 * drm_get_resource_*. Note that they are not RF_ACTIVE, so there's no virtual
60 * address for accessing them. Cleaned up at unload.
61 */
62static int drm_alloc_resource(struct drm_device *dev, int resource)
63{
64 if (resource >= DRM_MAX_PCI_RESOURCE) {
65 DRM_ERROR("Resource %d too large\n", resource);
66 return 1;
67 }
68
69 DRM_UNLOCK();
70 if (dev->pcir[resource] != NULL) {
71 DRM_LOCK();
72 return 0;
73 }
74
75 dev->pcirid[resource] = PCIR_BAR(resource);
76 dev->pcir[resource] = bus_alloc_resource_any(dev->device,
77 SYS_RES_MEMORY, &dev->pcirid[resource], RF_SHAREABLE);
78 DRM_LOCK();
79
80 if (dev->pcir[resource] == NULL) {
81 DRM_ERROR("Couldn't find resource 0x%x\n", resource);
82 return 1;
83 }
84
85 return 0;
86}
87
88unsigned long drm_get_resource_start(struct drm_device *dev,
89 unsigned int resource)
90{
91 if (drm_alloc_resource(dev, resource) != 0)
92 return 0;
93
94 return rman_get_start(dev->pcir[resource]);
95}
96
97unsigned long drm_get_resource_len(struct drm_device *dev,
98 unsigned int resource)
99{
100 if (drm_alloc_resource(dev, resource) != 0)
101 return 0;
102
103 return rman_get_size(dev->pcir[resource]);
104}
105
106int drm_addmap(struct drm_device * dev, unsigned long offset,
107 unsigned long size,
108 drm_map_type_t type, drm_map_flags_t flags, drm_local_map_t **map_ptr)
109{
110 drm_local_map_t *map;
111 int align;
112 /*drm_agp_mem_t *entry;
113 int valid;*/
114
115 /* Only allow shared memory to be removable since we only keep enough
116 * book keeping information about shared memory to allow for removal
117 * when processes fork.
118 */
119 if ((flags & _DRM_REMOVABLE) && type != _DRM_SHM) {
120 DRM_ERROR("Requested removable map for non-DRM_SHM\n");
121 return EINVAL;
122 }
123 if ((offset & PAGE_MASK) || (size & PAGE_MASK)) {
124 DRM_ERROR("offset/size not page aligned: 0x%lx/0x%lx\n",
125 offset, size);
126 return EINVAL;
127 }
128 if (offset + size < offset) {
129 DRM_ERROR("offset and size wrap around: 0x%lx/0x%lx\n",
130 offset, size);
131 return EINVAL;
132 }
133
134 DRM_DEBUG("offset = 0x%08lx, size = 0x%08lx, type = %d\n", offset,
135 size, type);
136
137 /* Check if this is just another version of a kernel-allocated map, and
138 * just hand that back if so.
139 */
140 if (type == _DRM_REGISTERS || type == _DRM_FRAME_BUFFER ||
141 type == _DRM_SHM) {
142 TAILQ_FOREACH(map, &dev->maplist, link) {
143 if (map->type == type && (map->offset == offset ||
144 (map->type == _DRM_SHM &&
145 map->flags == _DRM_CONTAINS_LOCK))) {
146 map->size = size;
147 DRM_DEBUG("Found kernel map %d\n", type);
148 goto done;
149 }
150 }
151 }
152 DRM_UNLOCK();
153
154 /* Allocate a new map structure, fill it in, and do any type-specific
155 * initialization necessary.
156 */
157 map = malloc(sizeof(*map), M_DRM, M_ZERO | M_NOWAIT);
158 if ( !map ) {
159 DRM_LOCK();
160 return ENOMEM;
161 }
162
163 map->offset = offset;
164 map->size = size;
165 map->type = type;
166 map->flags = flags;
167
168 switch ( map->type ) {
169 case _DRM_REGISTERS:
170 map->handle = drm_ioremap(dev, map);
171 if (!(map->flags & _DRM_WRITE_COMBINING))
172 break;
173 /* FALLTHROUGH */
174 case _DRM_FRAME_BUFFER:
175 if (drm_mtrr_add(map->offset, map->size, DRM_MTRR_WC) == 0)
176 map->mtrr = 1;
177 break;
178 case _DRM_SHM:
179 map->handle = malloc(map->size, M_DRM, M_NOWAIT);
180 DRM_DEBUG( "%lu %d %p\n",
181 map->size, drm_order(map->size), map->handle );
182 if ( !map->handle ) {
183 free(map, M_DRM);
184 DRM_LOCK();
185 return ENOMEM;
186 }
187 map->offset = (unsigned long)map->handle;
188 if ( map->flags & _DRM_CONTAINS_LOCK ) {
189 /* Prevent a 2nd X Server from creating a 2nd lock */
190 DRM_LOCK();
191 if (dev->lock.hw_lock != NULL) {
192 DRM_UNLOCK();
193 free(map->handle, M_DRM);
194 free(map, M_DRM);
195 return EBUSY;
196 }
197 dev->lock.hw_lock = map->handle; /* Pointer to lock */
198 DRM_UNLOCK();
199 }
200 break;
201 case _DRM_AGP:
202 /*valid = 0;*/
203 /* In some cases (i810 driver), user space may have already
204 * added the AGP base itself, because dev->agp->base previously
205 * only got set during AGP enable. So, only add the base
206 * address if the map's offset isn't already within the
207 * aperture.
208 */
209 if (map->offset < dev->agp->base ||
210 map->offset > dev->agp->base +
211 dev->agp->info.ai_aperture_size - 1) {
212 map->offset += dev->agp->base;
213 }
214 map->mtrr = dev->agp->mtrr; /* for getmap */
215 /*for (entry = dev->agp->memory; entry; entry = entry->next) {
216 if ((map->offset >= entry->bound) &&
217 (map->offset + map->size <=
218 entry->bound + entry->pages * PAGE_SIZE)) {
219 valid = 1;
220 break;
221 }
222 }
223 if (!valid) {
224 free(map, M_DRM);
225 DRM_LOCK();
226 return EACCES;
227 }*/
228 break;
229 case _DRM_SCATTER_GATHER:
230 if (!dev->sg) {
231 free(map, M_DRM);
232 DRM_LOCK();
233 return EINVAL;
234 }
235 map->offset = map->offset + dev->sg->handle;
236 break;
237 case _DRM_CONSISTENT:
238 /* Unfortunately, we don't get any alignment specification from
239 * the caller, so we have to guess. drm_pci_alloc requires
240 * a power-of-two alignment, so try to align the bus address of
241 * the map to it size if possible, otherwise just assume
242 * PAGE_SIZE alignment.
243 */
244 align = map->size;
245 if ((align & (align - 1)) != 0)
246 align = PAGE_SIZE;
247 map->dmah = drm_pci_alloc(dev, map->size, align, 0xfffffffful);
248 if (map->dmah == NULL) {
249 free(map, M_DRM);
250 DRM_LOCK();
251 return ENOMEM;
252 }
253 map->handle = map->dmah->vaddr;
254 map->offset = map->dmah->busaddr;
255 break;
256 default:
257 DRM_ERROR("Bad map type %d\n", map->type);
258 free(map, M_DRM);
259 DRM_LOCK();
260 return EINVAL;
261 }
262
263 DRM_LOCK();
264 TAILQ_INSERT_TAIL(&dev->maplist, map, link);
265
266done:
267 /* Jumped to, with lock held, when a kernel map is found. */
268
269 DRM_DEBUG("Added map %d 0x%lx/0x%lx\n", map->type, map->offset,
270 map->size);
271
272 *map_ptr = map;
273
274 return 0;
275}
276
277int drm_addmap_ioctl(struct drm_device *dev, void *data,
278 struct drm_file *file_priv)
279{
280 drm_map_t *request = data;
281 drm_local_map_t *map;
282 int err;
283
284 if (!(dev->flags & (FREAD|FWRITE)))
285 return EACCES; /* Require read/write */
286
287 if (!DRM_SUSER(DRM_CURPROC) && request->type != _DRM_AGP)
288 return EACCES;
289
290 DRM_LOCK();
291 err = drm_addmap(dev, request->offset, request->size, request->type,
292 request->flags, &map);
293 DRM_UNLOCK();
294 if (err != 0)
295 return err;
296
297 request->offset = map->offset;
298 request->size = map->size;
299 request->type = map->type;
300 request->flags = map->flags;
301 request->mtrr = map->mtrr;
302 request->handle = map->handle;
303
304 if (request->type != _DRM_SHM) {
305 request->handle = (void *)request->offset;
306 }
307
308 return 0;
309}
310
311void drm_rmmap(struct drm_device *dev, drm_local_map_t *map)
312{
313 DRM_SPINLOCK_ASSERT(&dev->dev_lock);
314
315 TAILQ_REMOVE(&dev->maplist, map, link);
316
317 switch (map->type) {
318 case _DRM_REGISTERS:
319 if (map->bsr == NULL)
320 drm_ioremapfree(map);
321 /* FALLTHROUGH */
322 case _DRM_FRAME_BUFFER:
323 if (map->mtrr) {
324 int __unused retcode;
325
326 retcode = drm_mtrr_del(0, map->offset, map->size,
327 DRM_MTRR_WC);
328 DRM_DEBUG("mtrr_del = %d\n", retcode);
329 }
330 break;
331 case _DRM_SHM:
332 free(map->handle, M_DRM);
333 break;
334 case _DRM_AGP:
335 case _DRM_SCATTER_GATHER:
336 break;
337 case _DRM_CONSISTENT:
338 drm_pci_free(dev, map->dmah);
339 break;
340 default:
341 DRM_ERROR("Bad map type %d\n", map->type);
342 break;
343 }
344
345 if (map->bsr != NULL) {
346 bus_release_resource(dev->device, SYS_RES_MEMORY, map->rid,
347 map->bsr);
348 }
349
350 free(map, M_DRM);
351}
352
353/* Remove a map private from list and deallocate resources if the mapping
354 * isn't in use.
355 */
356
357int drm_rmmap_ioctl(struct drm_device *dev, void *data,
358 struct drm_file *file_priv)
359{
360 drm_local_map_t *map;
361 drm_map_t *request = data;
362
363 DRM_LOCK();
364 TAILQ_FOREACH(map, &dev->maplist, link) {
365 if (map->handle == request->handle &&
366 map->flags & _DRM_REMOVABLE)
367 break;
368 }
369
370 /* No match found. */
371 if (map == NULL) {
372 DRM_UNLOCK();
373 return EINVAL;
374 }
375
376 drm_rmmap(dev, map);
377
378 DRM_UNLOCK();
379
380 return 0;
381}
382
383
384static void drm_cleanup_buf_error(struct drm_device *dev,
385 drm_buf_entry_t *entry)
386{
387 int i;
388
389 if (entry->seg_count) {
390 for (i = 0; i < entry->seg_count; i++) {
391 drm_pci_free(dev, entry->seglist[i]);
392 }
393 free(entry->seglist, M_DRM);
394
395 entry->seg_count = 0;
396 }
397
398 if (entry->buf_count) {
399 for (i = 0; i < entry->buf_count; i++) {
400 free(entry->buflist[i].dev_private, M_DRM);
401 }
402 free(entry->buflist, M_DRM);
403
404 entry->buf_count = 0;
405 }
406}
407
408static int drm_do_addbufs_agp(struct drm_device *dev, drm_buf_desc_t *request)
409{
410 drm_device_dma_t *dma = dev->dma;
411 drm_buf_entry_t *entry;
412 /*drm_agp_mem_t *agp_entry;
413 int valid*/
414 drm_buf_t *buf;
415 unsigned long offset;
416 unsigned long agp_offset;
417 int count;
418 int order;
419 int size;
420 int alignment;
421 int page_order;
422 int total;
423 int byte_count;
424 int i;
425 drm_buf_t **temp_buflist;
426
427 count = request->count;
428 order = drm_order(request->size);
429 size = 1 << order;
430
431 alignment = (request->flags & _DRM_PAGE_ALIGN)
432 ? round_page(size) : size;
433 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
434 total = PAGE_SIZE << page_order;
435
436 byte_count = 0;
437 agp_offset = dev->agp->base + request->agp_start;
438
439 DRM_DEBUG( "count: %d\n", count );
440 DRM_DEBUG( "order: %d\n", order );
441 DRM_DEBUG( "size: %d\n", size );
442 DRM_DEBUG( "agp_offset: 0x%lx\n", agp_offset );
443 DRM_DEBUG( "alignment: %d\n", alignment );
444 DRM_DEBUG( "page_order: %d\n", page_order );
445 DRM_DEBUG( "total: %d\n", total );
446
447 /* Make sure buffers are located in AGP memory that we own */
448 /* Breaks MGA due to drm_alloc_agp not setting up entries for the
449 * memory. Safe to ignore for now because these ioctls are still
450 * root-only.
451 */
452 /*valid = 0;
453 for (agp_entry = dev->agp->memory; agp_entry;
454 agp_entry = agp_entry->next) {
455 if ((agp_offset >= agp_entry->bound) &&
456 (agp_offset + total * count <=
457 agp_entry->bound + agp_entry->pages * PAGE_SIZE)) {
458 valid = 1;
459 break;
460 }
461 }
462 if (!valid) {
463 DRM_DEBUG("zone invalid\n");
464 return EINVAL;
465 }*/
466
467 entry = &dma->bufs[order];
468
469 entry->buflist = malloc(count * sizeof(*entry->buflist), M_DRM,
470 M_NOWAIT | M_ZERO);
471 if ( !entry->buflist ) {
472 return ENOMEM;
473 }
474
475 entry->buf_size = size;
476 entry->page_order = page_order;
477
478 offset = 0;
479
480 while ( entry->buf_count < count ) {
481 buf = &entry->buflist[entry->buf_count];
482 buf->idx = dma->buf_count + entry->buf_count;
483 buf->total = alignment;
484 buf->order = order;
485 buf->used = 0;
486
487 buf->offset = (dma->byte_count + offset);
488 buf->bus_address = agp_offset + offset;
489 buf->address = (void *)(agp_offset + offset);
490 buf->next = NULL;
491 buf->pending = 0;
492 buf->file_priv = NULL;
493
494 buf->dev_priv_size = dev->driver.buf_priv_size;
495 buf->dev_private = malloc(buf->dev_priv_size, M_DRM,
496 M_NOWAIT | M_ZERO);
497 if (buf->dev_private == NULL) {
498 /* Set count correctly so we free the proper amount. */
499 entry->buf_count = count;
500 drm_cleanup_buf_error(dev, entry);
501 return ENOMEM;
502 }
503
504 offset += alignment;
505 entry->buf_count++;
506 byte_count += PAGE_SIZE << page_order;
507 }
508
509 DRM_DEBUG( "byte_count: %d\n", byte_count );
510
511 temp_buflist = realloc(dma->buflist,
512 (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist), M_DRM,
513 M_NOWAIT);
514 if (temp_buflist == NULL) {
515 /* Free the entry because it isn't valid */
516 drm_cleanup_buf_error(dev, entry);
517 return ENOMEM;
518 }
519 dma->buflist = temp_buflist;
520
521 for ( i = 0 ; i < entry->buf_count ; i++ ) {
522 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
523 }
524
525 dma->buf_count += entry->buf_count;
526 dma->byte_count += byte_count;
527
528 DRM_DEBUG( "dma->buf_count : %d\n", dma->buf_count );
529 DRM_DEBUG( "entry->buf_count : %d\n", entry->buf_count );
530
531 request->count = entry->buf_count;
532 request->size = size;
533
534 dma->flags = _DRM_DMA_USE_AGP;
535
536 return 0;
537}
538
539static int drm_do_addbufs_pci(struct drm_device *dev, drm_buf_desc_t *request)
540{
541 drm_device_dma_t *dma = dev->dma;
542 int count;
543 int order;
544 int size;
545 int total;
546 int page_order;
547 drm_buf_entry_t *entry;
548 drm_buf_t *buf;
549 int alignment;
550 unsigned long offset;
551 int i;
552 int byte_count;
553 int page_count;
554 unsigned long *temp_pagelist;
555 drm_buf_t **temp_buflist;
556
557 count = request->count;
558 order = drm_order(request->size);
559 size = 1 << order;
560
561 DRM_DEBUG( "count=%d, size=%d (%d), order=%d\n",
562 request->count, request->size, size, order );
563
564 alignment = (request->flags & _DRM_PAGE_ALIGN)
565 ? round_page(size) : size;
566 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
567 total = PAGE_SIZE << page_order;
568
569 entry = &dma->bufs[order];
570
571 entry->buflist = malloc(count * sizeof(*entry->buflist), M_DRM,
572 M_NOWAIT | M_ZERO);
573 entry->seglist = malloc(count * sizeof(*entry->seglist), M_DRM,
574 M_NOWAIT | M_ZERO);
575
576 /* Keep the original pagelist until we know all the allocations
577 * have succeeded
578 */
579 temp_pagelist = malloc((dma->page_count + (count << page_order)) *
580 sizeof(*dma->pagelist), M_DRM, M_NOWAIT);
581
582 if (entry->buflist == NULL || entry->seglist == NULL ||
583 temp_pagelist == NULL) {
584 free(entry->buflist, M_DRM);
585 free(entry->seglist, M_DRM);
586 return ENOMEM;
587 }
588
589 memcpy(temp_pagelist, dma->pagelist, dma->page_count *
590 sizeof(*dma->pagelist));
591
592 DRM_DEBUG( "pagelist: %d entries\n",
593 dma->page_count + (count << page_order) );
594
595 entry->buf_size = size;
596 entry->page_order = page_order;
597 byte_count = 0;
598 page_count = 0;
599
600 while ( entry->buf_count < count ) {
601 DRM_SPINUNLOCK(&dev->dma_lock);
601 drm_dma_handle_t *dmah = drm_pci_alloc(dev, size, alignment,
602 0xfffffffful);
602 drm_dma_handle_t *dmah = drm_pci_alloc(dev, size, alignment,
603 0xfffffffful);
604 DRM_SPINLOCK(&dev->dma_lock);
603 if (dmah == NULL) {
604 /* Set count correctly so we free the proper amount. */
605 entry->buf_count = count;
606 entry->seg_count = count;
607 drm_cleanup_buf_error(dev, entry);
608 free(temp_pagelist, M_DRM);
609 return ENOMEM;
610 }
611
612 entry->seglist[entry->seg_count++] = dmah;
613 for ( i = 0 ; i < (1 << page_order) ; i++ ) {
614 DRM_DEBUG( "page %d @ %p\n",
615 dma->page_count + page_count,
616 (char *)dmah->vaddr + PAGE_SIZE * i );
617 temp_pagelist[dma->page_count + page_count++] =
618 (long)dmah->vaddr + PAGE_SIZE * i;
619 }
620 for ( offset = 0 ;
621 offset + size <= total && entry->buf_count < count ;
622 offset += alignment, ++entry->buf_count ) {
623 buf = &entry->buflist[entry->buf_count];
624 buf->idx = dma->buf_count + entry->buf_count;
625 buf->total = alignment;
626 buf->order = order;
627 buf->used = 0;
628 buf->offset = (dma->byte_count + byte_count + offset);
629 buf->address = ((char *)dmah->vaddr + offset);
630 buf->bus_address = dmah->busaddr + offset;
631 buf->next = NULL;
632 buf->pending = 0;
633 buf->file_priv = NULL;
634
635 buf->dev_priv_size = dev->driver.buf_priv_size;
636 buf->dev_private = malloc(buf->dev_priv_size, M_DRM,
637 M_NOWAIT | M_ZERO);
638 if (buf->dev_private == NULL) {
639 /* Set count correctly so we free the proper amount. */
640 entry->buf_count = count;
641 entry->seg_count = count;
642 drm_cleanup_buf_error(dev, entry);
643 free(temp_pagelist, M_DRM);
644 return ENOMEM;
645 }
646
647 DRM_DEBUG( "buffer %d @ %p\n",
648 entry->buf_count, buf->address );
649 }
650 byte_count += PAGE_SIZE << page_order;
651 }
652
653 temp_buflist = realloc(dma->buflist,
654 (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist), M_DRM,
655 M_NOWAIT);
656 if (temp_buflist == NULL) {
657 /* Free the entry because it isn't valid */
658 drm_cleanup_buf_error(dev, entry);
659 free(temp_pagelist, M_DRM);
660 return ENOMEM;
661 }
662 dma->buflist = temp_buflist;
663
664 for ( i = 0 ; i < entry->buf_count ; i++ ) {
665 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
666 }
667
668 /* No allocations failed, so now we can replace the orginal pagelist
669 * with the new one.
670 */
671 free(dma->pagelist, M_DRM);
672 dma->pagelist = temp_pagelist;
673
674 dma->buf_count += entry->buf_count;
675 dma->seg_count += entry->seg_count;
676 dma->page_count += entry->seg_count << page_order;
677 dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
678
679 request->count = entry->buf_count;
680 request->size = size;
681
682 return 0;
683
684}
685
686static int drm_do_addbufs_sg(struct drm_device *dev, drm_buf_desc_t *request)
687{
688 drm_device_dma_t *dma = dev->dma;
689 drm_buf_entry_t *entry;
690 drm_buf_t *buf;
691 unsigned long offset;
692 unsigned long agp_offset;
693 int count;
694 int order;
695 int size;
696 int alignment;
697 int page_order;
698 int total;
699 int byte_count;
700 int i;
701 drm_buf_t **temp_buflist;
702
703 count = request->count;
704 order = drm_order(request->size);
705 size = 1 << order;
706
707 alignment = (request->flags & _DRM_PAGE_ALIGN)
708 ? round_page(size) : size;
709 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
710 total = PAGE_SIZE << page_order;
711
712 byte_count = 0;
713 agp_offset = request->agp_start;
714
715 DRM_DEBUG( "count: %d\n", count );
716 DRM_DEBUG( "order: %d\n", order );
717 DRM_DEBUG( "size: %d\n", size );
718 DRM_DEBUG( "agp_offset: %ld\n", agp_offset );
719 DRM_DEBUG( "alignment: %d\n", alignment );
720 DRM_DEBUG( "page_order: %d\n", page_order );
721 DRM_DEBUG( "total: %d\n", total );
722
723 entry = &dma->bufs[order];
724
725 entry->buflist = malloc(count * sizeof(*entry->buflist), M_DRM,
726 M_NOWAIT | M_ZERO);
727 if (entry->buflist == NULL)
728 return ENOMEM;
729
730 entry->buf_size = size;
731 entry->page_order = page_order;
732
733 offset = 0;
734
735 while ( entry->buf_count < count ) {
736 buf = &entry->buflist[entry->buf_count];
737 buf->idx = dma->buf_count + entry->buf_count;
738 buf->total = alignment;
739 buf->order = order;
740 buf->used = 0;
741
742 buf->offset = (dma->byte_count + offset);
743 buf->bus_address = agp_offset + offset;
744 buf->address = (void *)(agp_offset + offset + dev->sg->handle);
745 buf->next = NULL;
746 buf->pending = 0;
747 buf->file_priv = NULL;
748
749 buf->dev_priv_size = dev->driver.buf_priv_size;
750 buf->dev_private = malloc(buf->dev_priv_size, M_DRM,
751 M_NOWAIT | M_ZERO);
752 if (buf->dev_private == NULL) {
753 /* Set count correctly so we free the proper amount. */
754 entry->buf_count = count;
755 drm_cleanup_buf_error(dev, entry);
756 return ENOMEM;
757 }
758
759 DRM_DEBUG( "buffer %d @ %p\n",
760 entry->buf_count, buf->address );
761
762 offset += alignment;
763 entry->buf_count++;
764 byte_count += PAGE_SIZE << page_order;
765 }
766
767 DRM_DEBUG( "byte_count: %d\n", byte_count );
768
769 temp_buflist = realloc(dma->buflist,
770 (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist), M_DRM,
771 M_NOWAIT);
772 if (temp_buflist == NULL) {
773 /* Free the entry because it isn't valid */
774 drm_cleanup_buf_error(dev, entry);
775 return ENOMEM;
776 }
777 dma->buflist = temp_buflist;
778
779 for ( i = 0 ; i < entry->buf_count ; i++ ) {
780 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
781 }
782
783 dma->buf_count += entry->buf_count;
784 dma->byte_count += byte_count;
785
786 DRM_DEBUG( "dma->buf_count : %d\n", dma->buf_count );
787 DRM_DEBUG( "entry->buf_count : %d\n", entry->buf_count );
788
789 request->count = entry->buf_count;
790 request->size = size;
791
792 dma->flags = _DRM_DMA_USE_SG;
793
794 return 0;
795}
796
797int drm_addbufs_agp(struct drm_device *dev, drm_buf_desc_t *request)
798{
799 int order, ret;
800
801 if (request->count < 0 || request->count > 4096)
802 return EINVAL;
803
804 order = drm_order(request->size);
805 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
806 return EINVAL;
807
808 DRM_SPINLOCK(&dev->dma_lock);
809
810 /* No more allocations after first buffer-using ioctl. */
811 if (dev->buf_use != 0) {
812 DRM_SPINUNLOCK(&dev->dma_lock);
813 return EBUSY;
814 }
815 /* No more than one allocation per order */
816 if (dev->dma->bufs[order].buf_count != 0) {
817 DRM_SPINUNLOCK(&dev->dma_lock);
818 return ENOMEM;
819 }
820
821 ret = drm_do_addbufs_agp(dev, request);
822
823 DRM_SPINUNLOCK(&dev->dma_lock);
824
825 return ret;
826}
827
828int drm_addbufs_sg(struct drm_device *dev, drm_buf_desc_t *request)
829{
830 int order, ret;
831
832 if (!DRM_SUSER(DRM_CURPROC))
833 return EACCES;
834
835 if (request->count < 0 || request->count > 4096)
836 return EINVAL;
837
838 order = drm_order(request->size);
839 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
840 return EINVAL;
841
842 DRM_SPINLOCK(&dev->dma_lock);
843
844 /* No more allocations after first buffer-using ioctl. */
845 if (dev->buf_use != 0) {
846 DRM_SPINUNLOCK(&dev->dma_lock);
847 return EBUSY;
848 }
849 /* No more than one allocation per order */
850 if (dev->dma->bufs[order].buf_count != 0) {
851 DRM_SPINUNLOCK(&dev->dma_lock);
852 return ENOMEM;
853 }
854
855 ret = drm_do_addbufs_sg(dev, request);
856
857 DRM_SPINUNLOCK(&dev->dma_lock);
858
859 return ret;
860}
861
862int drm_addbufs_pci(struct drm_device *dev, drm_buf_desc_t *request)
863{
864 int order, ret;
865
866 if (!DRM_SUSER(DRM_CURPROC))
867 return EACCES;
868
869 if (request->count < 0 || request->count > 4096)
870 return EINVAL;
871
872 order = drm_order(request->size);
873 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
874 return EINVAL;
875
876 DRM_SPINLOCK(&dev->dma_lock);
877
878 /* No more allocations after first buffer-using ioctl. */
879 if (dev->buf_use != 0) {
880 DRM_SPINUNLOCK(&dev->dma_lock);
881 return EBUSY;
882 }
883 /* No more than one allocation per order */
884 if (dev->dma->bufs[order].buf_count != 0) {
885 DRM_SPINUNLOCK(&dev->dma_lock);
886 return ENOMEM;
887 }
888
889 ret = drm_do_addbufs_pci(dev, request);
890
891 DRM_SPINUNLOCK(&dev->dma_lock);
892
893 return ret;
894}
895
896int drm_addbufs_ioctl(struct drm_device *dev, void *data,
897 struct drm_file *file_priv)
898{
899 drm_buf_desc_t *request = data;
900 int err;
901
902 if (request->flags & _DRM_AGP_BUFFER)
903 err = drm_addbufs_agp(dev, request);
904 else if (request->flags & _DRM_SG_BUFFER)
905 err = drm_addbufs_sg(dev, request);
906 else
907 err = drm_addbufs_pci(dev, request);
908
909 return err;
910}
911
912int drm_infobufs(struct drm_device *dev, void *data, struct drm_file *file_priv)
913{
914 drm_device_dma_t *dma = dev->dma;
915 drm_buf_info_t *request = data;
916 int i;
917 int count;
918 int retcode = 0;
919
920 DRM_SPINLOCK(&dev->dma_lock);
921 ++dev->buf_use; /* Can't allocate more after this call */
922 DRM_SPINUNLOCK(&dev->dma_lock);
923
924 for ( i = 0, count = 0 ; i < DRM_MAX_ORDER + 1 ; i++ ) {
925 if ( dma->bufs[i].buf_count ) ++count;
926 }
927
928 DRM_DEBUG( "count = %d\n", count );
929
930 if ( request->count >= count ) {
931 for ( i = 0, count = 0 ; i < DRM_MAX_ORDER + 1 ; i++ ) {
932 if ( dma->bufs[i].buf_count ) {
933 drm_buf_desc_t from;
934
935 from.count = dma->bufs[i].buf_count;
936 from.size = dma->bufs[i].buf_size;
937 from.low_mark = dma->bufs[i].freelist.low_mark;
938 from.high_mark = dma->bufs[i].freelist.high_mark;
939
940 if (DRM_COPY_TO_USER(&request->list[count], &from,
941 sizeof(drm_buf_desc_t)) != 0) {
942 retcode = EFAULT;
943 break;
944 }
945
946 DRM_DEBUG( "%d %d %d %d %d\n",
947 i,
948 dma->bufs[i].buf_count,
949 dma->bufs[i].buf_size,
950 dma->bufs[i].freelist.low_mark,
951 dma->bufs[i].freelist.high_mark );
952 ++count;
953 }
954 }
955 }
956 request->count = count;
957
958 return retcode;
959}
960
961int drm_markbufs(struct drm_device *dev, void *data, struct drm_file *file_priv)
962{
963 drm_device_dma_t *dma = dev->dma;
964 drm_buf_desc_t *request = data;
965 int order;
966
967 DRM_DEBUG( "%d, %d, %d\n",
968 request->size, request->low_mark, request->high_mark );
969
970
971 order = drm_order(request->size);
972 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ||
973 request->low_mark < 0 || request->high_mark < 0) {
974 return EINVAL;
975 }
976
977 DRM_SPINLOCK(&dev->dma_lock);
978 if (request->low_mark > dma->bufs[order].buf_count ||
979 request->high_mark > dma->bufs[order].buf_count) {
980 DRM_SPINUNLOCK(&dev->dma_lock);
981 return EINVAL;
982 }
983
984 dma->bufs[order].freelist.low_mark = request->low_mark;
985 dma->bufs[order].freelist.high_mark = request->high_mark;
986 DRM_SPINUNLOCK(&dev->dma_lock);
987
988 return 0;
989}
990
991int drm_freebufs(struct drm_device *dev, void *data, struct drm_file *file_priv)
992{
993 drm_device_dma_t *dma = dev->dma;
994 drm_buf_free_t *request = data;
995 int i;
996 int idx;
997 drm_buf_t *buf;
998 int retcode = 0;
999
1000 DRM_DEBUG( "%d\n", request->count );
1001
1002 DRM_SPINLOCK(&dev->dma_lock);
1003 for ( i = 0 ; i < request->count ; i++ ) {
1004 if (DRM_COPY_FROM_USER(&idx, &request->list[i], sizeof(idx))) {
1005 retcode = EFAULT;
1006 break;
1007 }
1008 if ( idx < 0 || idx >= dma->buf_count ) {
1009 DRM_ERROR( "Index %d (of %d max)\n",
1010 idx, dma->buf_count - 1 );
1011 retcode = EINVAL;
1012 break;
1013 }
1014 buf = dma->buflist[idx];
1015 if ( buf->file_priv != file_priv ) {
1016 DRM_ERROR("Process %d freeing buffer not owned\n",
1017 DRM_CURRENTPID);
1018 retcode = EINVAL;
1019 break;
1020 }
1021 drm_free_buffer(dev, buf);
1022 }
1023 DRM_SPINUNLOCK(&dev->dma_lock);
1024
1025 return retcode;
1026}
1027
1028int drm_mapbufs(struct drm_device *dev, void *data, struct drm_file *file_priv)
1029{
1030 drm_device_dma_t *dma = dev->dma;
1031 int retcode = 0;
1032 const int zero = 0;
1033 vm_offset_t address;
1034 struct vmspace *vms;
1035#ifdef __FreeBSD__
1036 vm_ooffset_t foff;
1037 vm_size_t size;
1038 vm_offset_t vaddr;
1039#elif defined(__NetBSD__) || defined(__OpenBSD__)
1040 struct vnode *vn;
1041 voff_t foff;
1042 vsize_t size;
1043 vaddr_t vaddr;
1044#endif /* __NetBSD__ || __OpenBSD__ */
1045
1046 drm_buf_map_t *request = data;
1047 int i;
1048
1049#if defined(__NetBSD__) || defined(__OpenBSD__)
1050 if (!vfinddev(kdev, VCHR, &vn))
1051 return 0; /* FIXME: Shouldn't this be EINVAL or something? */
1052#endif /* __NetBSD__ || __OpenBSD */
1053
1054#if defined(__FreeBSD__) && __FreeBSD_version >= 500000
1055 vms = DRM_CURPROC->td_proc->p_vmspace;
1056#else
1057 vms = DRM_CURPROC->p_vmspace;
1058#endif
1059
1060 DRM_SPINLOCK(&dev->dma_lock);
1061 dev->buf_use++; /* Can't allocate more after this call */
1062 DRM_SPINUNLOCK(&dev->dma_lock);
1063
1064 if (request->count < dma->buf_count)
1065 goto done;
1066
1067 if ((dev->driver.use_agp && (dma->flags & _DRM_DMA_USE_AGP)) ||
1068 (dev->driver.use_sg && (dma->flags & _DRM_DMA_USE_SG))) {
1069 drm_local_map_t *map = dev->agp_buffer_map;
1070
1071 if (map == NULL) {
1072 retcode = EINVAL;
1073 goto done;
1074 }
1075 size = round_page(map->size);
1076 foff = map->offset;
1077 } else {
1078 size = round_page(dma->byte_count),
1079 foff = 0;
1080 }
1081
1082#ifdef __FreeBSD__
1083 vaddr = round_page((vm_offset_t)vms->vm_daddr + MAXDSIZ);
1084#if __FreeBSD_version >= 600023
1085 retcode = vm_mmap(&vms->vm_map, &vaddr, size, PROT_READ | PROT_WRITE,
1086 VM_PROT_ALL, MAP_SHARED, OBJT_DEVICE, dev->devnode, foff);
1087#else
1088 retcode = vm_mmap(&vms->vm_map, &vaddr, size, PROT_READ | PROT_WRITE,
1089 VM_PROT_ALL, MAP_SHARED, SLIST_FIRST(&dev->devnode->si_hlist),
1090 foff);
1091#endif
1092#elif defined(__NetBSD__) || defined(__OpenBSD__)
1093 vaddr = round_page((vaddr_t)vms->vm_daddr + MAXDSIZ);
1094 retcode = uvm_mmap(&vms->vm_map, &vaddr, size,
1095 UVM_PROT_READ | UVM_PROT_WRITE, UVM_PROT_ALL, MAP_SHARED,
1096 &vn->v_uobj, foff, p->p_rlimit[RLIMIT_MEMLOCK].rlim_cur);
1097#endif /* __NetBSD__ || __OpenBSD */
1098 if (retcode)
1099 goto done;
1100
1101 request->virtual = (void *)vaddr;
1102
1103 for ( i = 0 ; i < dma->buf_count ; i++ ) {
1104 if (DRM_COPY_TO_USER(&request->list[i].idx,
1105 &dma->buflist[i]->idx, sizeof(request->list[0].idx))) {
1106 retcode = EFAULT;
1107 goto done;
1108 }
1109 if (DRM_COPY_TO_USER(&request->list[i].total,
1110 &dma->buflist[i]->total, sizeof(request->list[0].total))) {
1111 retcode = EFAULT;
1112 goto done;
1113 }
1114 if (DRM_COPY_TO_USER(&request->list[i].used, &zero,
1115 sizeof(zero))) {
1116 retcode = EFAULT;
1117 goto done;
1118 }
1119 address = vaddr + dma->buflist[i]->offset; /* *** */
1120 if (DRM_COPY_TO_USER(&request->list[i].address, &address,
1121 sizeof(address))) {
1122 retcode = EFAULT;
1123 goto done;
1124 }
1125 }
1126
1127 done:
1128 request->count = dma->buf_count;
1129
1130 DRM_DEBUG( "%d buffers, retcode = %d\n", request->count, retcode );
1131
1132 return retcode;
1133}
605 if (dmah == NULL) {
606 /* Set count correctly so we free the proper amount. */
607 entry->buf_count = count;
608 entry->seg_count = count;
609 drm_cleanup_buf_error(dev, entry);
610 free(temp_pagelist, M_DRM);
611 return ENOMEM;
612 }
613
614 entry->seglist[entry->seg_count++] = dmah;
615 for ( i = 0 ; i < (1 << page_order) ; i++ ) {
616 DRM_DEBUG( "page %d @ %p\n",
617 dma->page_count + page_count,
618 (char *)dmah->vaddr + PAGE_SIZE * i );
619 temp_pagelist[dma->page_count + page_count++] =
620 (long)dmah->vaddr + PAGE_SIZE * i;
621 }
622 for ( offset = 0 ;
623 offset + size <= total && entry->buf_count < count ;
624 offset += alignment, ++entry->buf_count ) {
625 buf = &entry->buflist[entry->buf_count];
626 buf->idx = dma->buf_count + entry->buf_count;
627 buf->total = alignment;
628 buf->order = order;
629 buf->used = 0;
630 buf->offset = (dma->byte_count + byte_count + offset);
631 buf->address = ((char *)dmah->vaddr + offset);
632 buf->bus_address = dmah->busaddr + offset;
633 buf->next = NULL;
634 buf->pending = 0;
635 buf->file_priv = NULL;
636
637 buf->dev_priv_size = dev->driver.buf_priv_size;
638 buf->dev_private = malloc(buf->dev_priv_size, M_DRM,
639 M_NOWAIT | M_ZERO);
640 if (buf->dev_private == NULL) {
641 /* Set count correctly so we free the proper amount. */
642 entry->buf_count = count;
643 entry->seg_count = count;
644 drm_cleanup_buf_error(dev, entry);
645 free(temp_pagelist, M_DRM);
646 return ENOMEM;
647 }
648
649 DRM_DEBUG( "buffer %d @ %p\n",
650 entry->buf_count, buf->address );
651 }
652 byte_count += PAGE_SIZE << page_order;
653 }
654
655 temp_buflist = realloc(dma->buflist,
656 (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist), M_DRM,
657 M_NOWAIT);
658 if (temp_buflist == NULL) {
659 /* Free the entry because it isn't valid */
660 drm_cleanup_buf_error(dev, entry);
661 free(temp_pagelist, M_DRM);
662 return ENOMEM;
663 }
664 dma->buflist = temp_buflist;
665
666 for ( i = 0 ; i < entry->buf_count ; i++ ) {
667 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
668 }
669
670 /* No allocations failed, so now we can replace the orginal pagelist
671 * with the new one.
672 */
673 free(dma->pagelist, M_DRM);
674 dma->pagelist = temp_pagelist;
675
676 dma->buf_count += entry->buf_count;
677 dma->seg_count += entry->seg_count;
678 dma->page_count += entry->seg_count << page_order;
679 dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
680
681 request->count = entry->buf_count;
682 request->size = size;
683
684 return 0;
685
686}
687
688static int drm_do_addbufs_sg(struct drm_device *dev, drm_buf_desc_t *request)
689{
690 drm_device_dma_t *dma = dev->dma;
691 drm_buf_entry_t *entry;
692 drm_buf_t *buf;
693 unsigned long offset;
694 unsigned long agp_offset;
695 int count;
696 int order;
697 int size;
698 int alignment;
699 int page_order;
700 int total;
701 int byte_count;
702 int i;
703 drm_buf_t **temp_buflist;
704
705 count = request->count;
706 order = drm_order(request->size);
707 size = 1 << order;
708
709 alignment = (request->flags & _DRM_PAGE_ALIGN)
710 ? round_page(size) : size;
711 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
712 total = PAGE_SIZE << page_order;
713
714 byte_count = 0;
715 agp_offset = request->agp_start;
716
717 DRM_DEBUG( "count: %d\n", count );
718 DRM_DEBUG( "order: %d\n", order );
719 DRM_DEBUG( "size: %d\n", size );
720 DRM_DEBUG( "agp_offset: %ld\n", agp_offset );
721 DRM_DEBUG( "alignment: %d\n", alignment );
722 DRM_DEBUG( "page_order: %d\n", page_order );
723 DRM_DEBUG( "total: %d\n", total );
724
725 entry = &dma->bufs[order];
726
727 entry->buflist = malloc(count * sizeof(*entry->buflist), M_DRM,
728 M_NOWAIT | M_ZERO);
729 if (entry->buflist == NULL)
730 return ENOMEM;
731
732 entry->buf_size = size;
733 entry->page_order = page_order;
734
735 offset = 0;
736
737 while ( entry->buf_count < count ) {
738 buf = &entry->buflist[entry->buf_count];
739 buf->idx = dma->buf_count + entry->buf_count;
740 buf->total = alignment;
741 buf->order = order;
742 buf->used = 0;
743
744 buf->offset = (dma->byte_count + offset);
745 buf->bus_address = agp_offset + offset;
746 buf->address = (void *)(agp_offset + offset + dev->sg->handle);
747 buf->next = NULL;
748 buf->pending = 0;
749 buf->file_priv = NULL;
750
751 buf->dev_priv_size = dev->driver.buf_priv_size;
752 buf->dev_private = malloc(buf->dev_priv_size, M_DRM,
753 M_NOWAIT | M_ZERO);
754 if (buf->dev_private == NULL) {
755 /* Set count correctly so we free the proper amount. */
756 entry->buf_count = count;
757 drm_cleanup_buf_error(dev, entry);
758 return ENOMEM;
759 }
760
761 DRM_DEBUG( "buffer %d @ %p\n",
762 entry->buf_count, buf->address );
763
764 offset += alignment;
765 entry->buf_count++;
766 byte_count += PAGE_SIZE << page_order;
767 }
768
769 DRM_DEBUG( "byte_count: %d\n", byte_count );
770
771 temp_buflist = realloc(dma->buflist,
772 (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist), M_DRM,
773 M_NOWAIT);
774 if (temp_buflist == NULL) {
775 /* Free the entry because it isn't valid */
776 drm_cleanup_buf_error(dev, entry);
777 return ENOMEM;
778 }
779 dma->buflist = temp_buflist;
780
781 for ( i = 0 ; i < entry->buf_count ; i++ ) {
782 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
783 }
784
785 dma->buf_count += entry->buf_count;
786 dma->byte_count += byte_count;
787
788 DRM_DEBUG( "dma->buf_count : %d\n", dma->buf_count );
789 DRM_DEBUG( "entry->buf_count : %d\n", entry->buf_count );
790
791 request->count = entry->buf_count;
792 request->size = size;
793
794 dma->flags = _DRM_DMA_USE_SG;
795
796 return 0;
797}
798
799int drm_addbufs_agp(struct drm_device *dev, drm_buf_desc_t *request)
800{
801 int order, ret;
802
803 if (request->count < 0 || request->count > 4096)
804 return EINVAL;
805
806 order = drm_order(request->size);
807 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
808 return EINVAL;
809
810 DRM_SPINLOCK(&dev->dma_lock);
811
812 /* No more allocations after first buffer-using ioctl. */
813 if (dev->buf_use != 0) {
814 DRM_SPINUNLOCK(&dev->dma_lock);
815 return EBUSY;
816 }
817 /* No more than one allocation per order */
818 if (dev->dma->bufs[order].buf_count != 0) {
819 DRM_SPINUNLOCK(&dev->dma_lock);
820 return ENOMEM;
821 }
822
823 ret = drm_do_addbufs_agp(dev, request);
824
825 DRM_SPINUNLOCK(&dev->dma_lock);
826
827 return ret;
828}
829
830int drm_addbufs_sg(struct drm_device *dev, drm_buf_desc_t *request)
831{
832 int order, ret;
833
834 if (!DRM_SUSER(DRM_CURPROC))
835 return EACCES;
836
837 if (request->count < 0 || request->count > 4096)
838 return EINVAL;
839
840 order = drm_order(request->size);
841 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
842 return EINVAL;
843
844 DRM_SPINLOCK(&dev->dma_lock);
845
846 /* No more allocations after first buffer-using ioctl. */
847 if (dev->buf_use != 0) {
848 DRM_SPINUNLOCK(&dev->dma_lock);
849 return EBUSY;
850 }
851 /* No more than one allocation per order */
852 if (dev->dma->bufs[order].buf_count != 0) {
853 DRM_SPINUNLOCK(&dev->dma_lock);
854 return ENOMEM;
855 }
856
857 ret = drm_do_addbufs_sg(dev, request);
858
859 DRM_SPINUNLOCK(&dev->dma_lock);
860
861 return ret;
862}
863
864int drm_addbufs_pci(struct drm_device *dev, drm_buf_desc_t *request)
865{
866 int order, ret;
867
868 if (!DRM_SUSER(DRM_CURPROC))
869 return EACCES;
870
871 if (request->count < 0 || request->count > 4096)
872 return EINVAL;
873
874 order = drm_order(request->size);
875 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
876 return EINVAL;
877
878 DRM_SPINLOCK(&dev->dma_lock);
879
880 /* No more allocations after first buffer-using ioctl. */
881 if (dev->buf_use != 0) {
882 DRM_SPINUNLOCK(&dev->dma_lock);
883 return EBUSY;
884 }
885 /* No more than one allocation per order */
886 if (dev->dma->bufs[order].buf_count != 0) {
887 DRM_SPINUNLOCK(&dev->dma_lock);
888 return ENOMEM;
889 }
890
891 ret = drm_do_addbufs_pci(dev, request);
892
893 DRM_SPINUNLOCK(&dev->dma_lock);
894
895 return ret;
896}
897
898int drm_addbufs_ioctl(struct drm_device *dev, void *data,
899 struct drm_file *file_priv)
900{
901 drm_buf_desc_t *request = data;
902 int err;
903
904 if (request->flags & _DRM_AGP_BUFFER)
905 err = drm_addbufs_agp(dev, request);
906 else if (request->flags & _DRM_SG_BUFFER)
907 err = drm_addbufs_sg(dev, request);
908 else
909 err = drm_addbufs_pci(dev, request);
910
911 return err;
912}
913
914int drm_infobufs(struct drm_device *dev, void *data, struct drm_file *file_priv)
915{
916 drm_device_dma_t *dma = dev->dma;
917 drm_buf_info_t *request = data;
918 int i;
919 int count;
920 int retcode = 0;
921
922 DRM_SPINLOCK(&dev->dma_lock);
923 ++dev->buf_use; /* Can't allocate more after this call */
924 DRM_SPINUNLOCK(&dev->dma_lock);
925
926 for ( i = 0, count = 0 ; i < DRM_MAX_ORDER + 1 ; i++ ) {
927 if ( dma->bufs[i].buf_count ) ++count;
928 }
929
930 DRM_DEBUG( "count = %d\n", count );
931
932 if ( request->count >= count ) {
933 for ( i = 0, count = 0 ; i < DRM_MAX_ORDER + 1 ; i++ ) {
934 if ( dma->bufs[i].buf_count ) {
935 drm_buf_desc_t from;
936
937 from.count = dma->bufs[i].buf_count;
938 from.size = dma->bufs[i].buf_size;
939 from.low_mark = dma->bufs[i].freelist.low_mark;
940 from.high_mark = dma->bufs[i].freelist.high_mark;
941
942 if (DRM_COPY_TO_USER(&request->list[count], &from,
943 sizeof(drm_buf_desc_t)) != 0) {
944 retcode = EFAULT;
945 break;
946 }
947
948 DRM_DEBUG( "%d %d %d %d %d\n",
949 i,
950 dma->bufs[i].buf_count,
951 dma->bufs[i].buf_size,
952 dma->bufs[i].freelist.low_mark,
953 dma->bufs[i].freelist.high_mark );
954 ++count;
955 }
956 }
957 }
958 request->count = count;
959
960 return retcode;
961}
962
963int drm_markbufs(struct drm_device *dev, void *data, struct drm_file *file_priv)
964{
965 drm_device_dma_t *dma = dev->dma;
966 drm_buf_desc_t *request = data;
967 int order;
968
969 DRM_DEBUG( "%d, %d, %d\n",
970 request->size, request->low_mark, request->high_mark );
971
972
973 order = drm_order(request->size);
974 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ||
975 request->low_mark < 0 || request->high_mark < 0) {
976 return EINVAL;
977 }
978
979 DRM_SPINLOCK(&dev->dma_lock);
980 if (request->low_mark > dma->bufs[order].buf_count ||
981 request->high_mark > dma->bufs[order].buf_count) {
982 DRM_SPINUNLOCK(&dev->dma_lock);
983 return EINVAL;
984 }
985
986 dma->bufs[order].freelist.low_mark = request->low_mark;
987 dma->bufs[order].freelist.high_mark = request->high_mark;
988 DRM_SPINUNLOCK(&dev->dma_lock);
989
990 return 0;
991}
992
993int drm_freebufs(struct drm_device *dev, void *data, struct drm_file *file_priv)
994{
995 drm_device_dma_t *dma = dev->dma;
996 drm_buf_free_t *request = data;
997 int i;
998 int idx;
999 drm_buf_t *buf;
1000 int retcode = 0;
1001
1002 DRM_DEBUG( "%d\n", request->count );
1003
1004 DRM_SPINLOCK(&dev->dma_lock);
1005 for ( i = 0 ; i < request->count ; i++ ) {
1006 if (DRM_COPY_FROM_USER(&idx, &request->list[i], sizeof(idx))) {
1007 retcode = EFAULT;
1008 break;
1009 }
1010 if ( idx < 0 || idx >= dma->buf_count ) {
1011 DRM_ERROR( "Index %d (of %d max)\n",
1012 idx, dma->buf_count - 1 );
1013 retcode = EINVAL;
1014 break;
1015 }
1016 buf = dma->buflist[idx];
1017 if ( buf->file_priv != file_priv ) {
1018 DRM_ERROR("Process %d freeing buffer not owned\n",
1019 DRM_CURRENTPID);
1020 retcode = EINVAL;
1021 break;
1022 }
1023 drm_free_buffer(dev, buf);
1024 }
1025 DRM_SPINUNLOCK(&dev->dma_lock);
1026
1027 return retcode;
1028}
1029
1030int drm_mapbufs(struct drm_device *dev, void *data, struct drm_file *file_priv)
1031{
1032 drm_device_dma_t *dma = dev->dma;
1033 int retcode = 0;
1034 const int zero = 0;
1035 vm_offset_t address;
1036 struct vmspace *vms;
1037#ifdef __FreeBSD__
1038 vm_ooffset_t foff;
1039 vm_size_t size;
1040 vm_offset_t vaddr;
1041#elif defined(__NetBSD__) || defined(__OpenBSD__)
1042 struct vnode *vn;
1043 voff_t foff;
1044 vsize_t size;
1045 vaddr_t vaddr;
1046#endif /* __NetBSD__ || __OpenBSD__ */
1047
1048 drm_buf_map_t *request = data;
1049 int i;
1050
1051#if defined(__NetBSD__) || defined(__OpenBSD__)
1052 if (!vfinddev(kdev, VCHR, &vn))
1053 return 0; /* FIXME: Shouldn't this be EINVAL or something? */
1054#endif /* __NetBSD__ || __OpenBSD */
1055
1056#if defined(__FreeBSD__) && __FreeBSD_version >= 500000
1057 vms = DRM_CURPROC->td_proc->p_vmspace;
1058#else
1059 vms = DRM_CURPROC->p_vmspace;
1060#endif
1061
1062 DRM_SPINLOCK(&dev->dma_lock);
1063 dev->buf_use++; /* Can't allocate more after this call */
1064 DRM_SPINUNLOCK(&dev->dma_lock);
1065
1066 if (request->count < dma->buf_count)
1067 goto done;
1068
1069 if ((dev->driver.use_agp && (dma->flags & _DRM_DMA_USE_AGP)) ||
1070 (dev->driver.use_sg && (dma->flags & _DRM_DMA_USE_SG))) {
1071 drm_local_map_t *map = dev->agp_buffer_map;
1072
1073 if (map == NULL) {
1074 retcode = EINVAL;
1075 goto done;
1076 }
1077 size = round_page(map->size);
1078 foff = map->offset;
1079 } else {
1080 size = round_page(dma->byte_count),
1081 foff = 0;
1082 }
1083
1084#ifdef __FreeBSD__
1085 vaddr = round_page((vm_offset_t)vms->vm_daddr + MAXDSIZ);
1086#if __FreeBSD_version >= 600023
1087 retcode = vm_mmap(&vms->vm_map, &vaddr, size, PROT_READ | PROT_WRITE,
1088 VM_PROT_ALL, MAP_SHARED, OBJT_DEVICE, dev->devnode, foff);
1089#else
1090 retcode = vm_mmap(&vms->vm_map, &vaddr, size, PROT_READ | PROT_WRITE,
1091 VM_PROT_ALL, MAP_SHARED, SLIST_FIRST(&dev->devnode->si_hlist),
1092 foff);
1093#endif
1094#elif defined(__NetBSD__) || defined(__OpenBSD__)
1095 vaddr = round_page((vaddr_t)vms->vm_daddr + MAXDSIZ);
1096 retcode = uvm_mmap(&vms->vm_map, &vaddr, size,
1097 UVM_PROT_READ | UVM_PROT_WRITE, UVM_PROT_ALL, MAP_SHARED,
1098 &vn->v_uobj, foff, p->p_rlimit[RLIMIT_MEMLOCK].rlim_cur);
1099#endif /* __NetBSD__ || __OpenBSD */
1100 if (retcode)
1101 goto done;
1102
1103 request->virtual = (void *)vaddr;
1104
1105 for ( i = 0 ; i < dma->buf_count ; i++ ) {
1106 if (DRM_COPY_TO_USER(&request->list[i].idx,
1107 &dma->buflist[i]->idx, sizeof(request->list[0].idx))) {
1108 retcode = EFAULT;
1109 goto done;
1110 }
1111 if (DRM_COPY_TO_USER(&request->list[i].total,
1112 &dma->buflist[i]->total, sizeof(request->list[0].total))) {
1113 retcode = EFAULT;
1114 goto done;
1115 }
1116 if (DRM_COPY_TO_USER(&request->list[i].used, &zero,
1117 sizeof(zero))) {
1118 retcode = EFAULT;
1119 goto done;
1120 }
1121 address = vaddr + dma->buflist[i]->offset; /* *** */
1122 if (DRM_COPY_TO_USER(&request->list[i].address, &address,
1123 sizeof(address))) {
1124 retcode = EFAULT;
1125 goto done;
1126 }
1127 }
1128
1129 done:
1130 request->count = dma->buf_count;
1131
1132 DRM_DEBUG( "%d buffers, retcode = %d\n", request->count, retcode );
1133
1134 return retcode;
1135}