Deleted Added
full compact
1/*-
2 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
3 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
15 * Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
21 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
23 * OTHER DEALINGS IN THE SOFTWARE.
24 *
25 * Authors:
26 * Rickard E. (Rik) Faith <faith@valinux.com>
27 * Gareth Hughes <gareth@valinux.com>
28 *
29 */
30
31#include <sys/cdefs.h>
32__FBSDID("$FreeBSD: releng/10.3/sys/dev/drm/drm_bufs.c 207067 2010-04-22 18:44:23Z rnoland $");
32__FBSDID("$FreeBSD: releng/10.3/sys/dev/drm/drm_bufs.c 331987 2018-04-04 05:43:03Z gordon $");
33
34/** @file drm_bufs.c
35 * Implementation of the ioctls for setup of DRM mappings and DMA buffers.
36 */
37
38#include "dev/pci/pcireg.h"
39
40#include "dev/drm/drmP.h"
41
42/* Allocation of PCI memory resources (framebuffer, registers, etc.) for
43 * drm_get_resource_*. Note that they are not RF_ACTIVE, so there's no virtual
44 * address for accessing them. Cleaned up at unload.
45 */
46static int drm_alloc_resource(struct drm_device *dev, int resource)
47{
48 struct resource *res;
49 int rid;
50
51 DRM_SPINLOCK_ASSERT(&dev->dev_lock);
52
53 if (resource >= DRM_MAX_PCI_RESOURCE) {
54 DRM_ERROR("Resource %d too large\n", resource);
55 return 1;
56 }
57
58 if (dev->pcir[resource] != NULL) {
59 return 0;
60 }
61
62 DRM_UNLOCK();
63 rid = PCIR_BAR(resource);
64 res = bus_alloc_resource_any(dev->device, SYS_RES_MEMORY, &rid,
65 RF_SHAREABLE);
66 DRM_LOCK();
67 if (res == NULL) {
68 DRM_ERROR("Couldn't find resource 0x%x\n", resource);
69 return 1;
70 }
71
72 if (dev->pcir[resource] == NULL) {
73 dev->pcirid[resource] = rid;
74 dev->pcir[resource] = res;
75 }
76
77 return 0;
78}
79
80unsigned long drm_get_resource_start(struct drm_device *dev,
81 unsigned int resource)
82{
83 if (drm_alloc_resource(dev, resource) != 0)
84 return 0;
85
86 return rman_get_start(dev->pcir[resource]);
87}
88
89unsigned long drm_get_resource_len(struct drm_device *dev,
90 unsigned int resource)
91{
92 if (drm_alloc_resource(dev, resource) != 0)
93 return 0;
94
95 return rman_get_size(dev->pcir[resource]);
96}
97
98int drm_addmap(struct drm_device * dev, unsigned long offset,
99 unsigned long size,
100 enum drm_map_type type, enum drm_map_flags flags, drm_local_map_t **map_ptr)
101{
102 drm_local_map_t *map;
103 int align;
104 /*drm_agp_mem_t *entry;
105 int valid;*/
106
107 /* Only allow shared memory to be removable since we only keep enough
108 * book keeping information about shared memory to allow for removal
109 * when processes fork.
110 */
111 if ((flags & _DRM_REMOVABLE) && type != _DRM_SHM) {
112 DRM_ERROR("Requested removable map for non-DRM_SHM\n");
113 return EINVAL;
114 }
115 if ((offset & PAGE_MASK) || (size & PAGE_MASK)) {
116 DRM_ERROR("offset/size not page aligned: 0x%lx/0x%lx\n",
117 offset, size);
118 return EINVAL;
119 }
120 if (offset + size < offset) {
121 DRM_ERROR("offset and size wrap around: 0x%lx/0x%lx\n",
122 offset, size);
123 return EINVAL;
124 }
125
126 DRM_DEBUG("offset = 0x%08lx, size = 0x%08lx, type = %d\n", offset,
127 size, type);
128
129 /* Check if this is just another version of a kernel-allocated map, and
130 * just hand that back if so.
131 */
132 if (type == _DRM_REGISTERS || type == _DRM_FRAME_BUFFER ||
133 type == _DRM_SHM) {
134 TAILQ_FOREACH(map, &dev->maplist, link) {
135 if (map->type == type && (map->offset == offset ||
136 (map->type == _DRM_SHM &&
137 map->flags == _DRM_CONTAINS_LOCK))) {
138 map->size = size;
139 DRM_DEBUG("Found kernel map %d\n", type);
140 goto done;
141 }
142 }
143 }
144 DRM_UNLOCK();
145
146 /* Allocate a new map structure, fill it in, and do any type-specific
147 * initialization necessary.
148 */
149 map = malloc(sizeof(*map), DRM_MEM_MAPS, M_ZERO | M_NOWAIT);
150 if (!map) {
151 DRM_LOCK();
152 return ENOMEM;
153 }
154
155 map->offset = offset;
156 map->size = size;
157 map->type = type;
158 map->flags = flags;
159 map->handle = (void *)((unsigned long)alloc_unr(dev->map_unrhdr) <<
160 DRM_MAP_HANDLE_SHIFT);
161
162 switch (map->type) {
163 case _DRM_REGISTERS:
164 map->virtual = drm_ioremap(dev, map);
165 if (!(map->flags & _DRM_WRITE_COMBINING))
166 break;
167 /* FALLTHROUGH */
168 case _DRM_FRAME_BUFFER:
169 if (drm_mtrr_add(map->offset, map->size, DRM_MTRR_WC) == 0)
170 map->mtrr = 1;
171 break;
172 case _DRM_SHM:
173 map->virtual = malloc(map->size, DRM_MEM_MAPS, M_NOWAIT);
174 DRM_DEBUG("%lu %d %p\n",
175 map->size, drm_order(map->size), map->virtual);
176 if (!map->virtual) {
177 free(map, DRM_MEM_MAPS);
178 DRM_LOCK();
179 return ENOMEM;
180 }
181 map->offset = (unsigned long)map->virtual;
182 if (map->flags & _DRM_CONTAINS_LOCK) {
183 /* Prevent a 2nd X Server from creating a 2nd lock */
184 DRM_LOCK();
185 if (dev->lock.hw_lock != NULL) {
186 DRM_UNLOCK();
187 free(map->virtual, DRM_MEM_MAPS);
188 free(map, DRM_MEM_MAPS);
189 return EBUSY;
190 }
191 dev->lock.hw_lock = map->virtual; /* Pointer to lock */
192 DRM_UNLOCK();
193 }
194 break;
195 case _DRM_AGP:
196 /*valid = 0;*/
197 /* In some cases (i810 driver), user space may have already
198 * added the AGP base itself, because dev->agp->base previously
199 * only got set during AGP enable. So, only add the base
200 * address if the map's offset isn't already within the
201 * aperture.
202 */
203 if (map->offset < dev->agp->base ||
204 map->offset > dev->agp->base +
205 dev->agp->info.ai_aperture_size - 1) {
206 map->offset += dev->agp->base;
207 }
208 map->mtrr = dev->agp->mtrr; /* for getmap */
209 /*for (entry = dev->agp->memory; entry; entry = entry->next) {
210 if ((map->offset >= entry->bound) &&
211 (map->offset + map->size <=
212 entry->bound + entry->pages * PAGE_SIZE)) {
213 valid = 1;
214 break;
215 }
216 }
217 if (!valid) {
218 free(map, DRM_MEM_MAPS);
219 DRM_LOCK();
220 return EACCES;
221 }*/
222 break;
223 case _DRM_SCATTER_GATHER:
224 if (!dev->sg) {
225 free(map, DRM_MEM_MAPS);
226 DRM_LOCK();
227 return EINVAL;
228 }
229 map->virtual = (void *)(dev->sg->vaddr + offset);
230 map->offset = dev->sg->vaddr + offset;
231 break;
232 case _DRM_CONSISTENT:
233 /* Unfortunately, we don't get any alignment specification from
234 * the caller, so we have to guess. drm_pci_alloc requires
235 * a power-of-two alignment, so try to align the bus address of
236 * the map to it size if possible, otherwise just assume
237 * PAGE_SIZE alignment.
238 */
239 align = map->size;
240 if ((align & (align - 1)) != 0)
241 align = PAGE_SIZE;
242 map->dmah = drm_pci_alloc(dev, map->size, align, 0xfffffffful);
243 if (map->dmah == NULL) {
244 free(map, DRM_MEM_MAPS);
245 DRM_LOCK();
246 return ENOMEM;
247 }
248 map->virtual = map->dmah->vaddr;
249 map->offset = map->dmah->busaddr;
250 break;
251 default:
252 DRM_ERROR("Bad map type %d\n", map->type);
253 free(map, DRM_MEM_MAPS);
254 DRM_LOCK();
255 return EINVAL;
256 }
257
258 DRM_LOCK();
259 TAILQ_INSERT_TAIL(&dev->maplist, map, link);
260
261done:
262 /* Jumped to, with lock held, when a kernel map is found. */
263
264 DRM_DEBUG("Added map %d 0x%lx/0x%lx\n", map->type, map->offset,
265 map->size);
266
267 *map_ptr = map;
268
269 return 0;
270}
271
272int drm_addmap_ioctl(struct drm_device *dev, void *data,
273 struct drm_file *file_priv)
274{
275 struct drm_map *request = data;
276 drm_local_map_t *map;
277 int err;
278
279 if (!(dev->flags & (FREAD|FWRITE)))
280 return EACCES; /* Require read/write */
281
282 if (!DRM_SUSER(DRM_CURPROC) && request->type != _DRM_AGP)
283 return EACCES;
284
285 DRM_LOCK();
286 err = drm_addmap(dev, request->offset, request->size, request->type,
287 request->flags, &map);
288 DRM_UNLOCK();
289 if (err != 0)
290 return err;
291
292 request->offset = map->offset;
293 request->size = map->size;
294 request->type = map->type;
295 request->flags = map->flags;
296 request->mtrr = map->mtrr;
297 request->handle = (void *)map->handle;
298
299 return 0;
300}
301
302void drm_rmmap(struct drm_device *dev, drm_local_map_t *map)
303{
304 DRM_SPINLOCK_ASSERT(&dev->dev_lock);
305
306 if (map == NULL)
307 return;
308
309 TAILQ_REMOVE(&dev->maplist, map, link);
310
311 switch (map->type) {
312 case _DRM_REGISTERS:
313 if (map->bsr == NULL)
314 drm_ioremapfree(map);
315 /* FALLTHROUGH */
316 case _DRM_FRAME_BUFFER:
317 if (map->mtrr) {
318 int __unused retcode;
319
320 retcode = drm_mtrr_del(0, map->offset, map->size,
321 DRM_MTRR_WC);
322 DRM_DEBUG("mtrr_del = %d\n", retcode);
323 }
324 break;
325 case _DRM_SHM:
326 free(map->virtual, DRM_MEM_MAPS);
327 break;
328 case _DRM_AGP:
329 case _DRM_SCATTER_GATHER:
330 break;
331 case _DRM_CONSISTENT:
332 drm_pci_free(dev, map->dmah);
333 break;
334 default:
335 DRM_ERROR("Bad map type %d\n", map->type);
336 break;
337 }
338
339 if (map->bsr != NULL) {
340 bus_release_resource(dev->device, SYS_RES_MEMORY, map->rid,
341 map->bsr);
342 }
343
344 DRM_UNLOCK();
345 if (map->handle)
346 free_unr(dev->map_unrhdr, (unsigned long)map->handle >>
347 DRM_MAP_HANDLE_SHIFT);
348 DRM_LOCK();
349
350 free(map, DRM_MEM_MAPS);
351}
352
353/* Remove a map private from list and deallocate resources if the mapping
354 * isn't in use.
355 */
356
357int drm_rmmap_ioctl(struct drm_device *dev, void *data,
358 struct drm_file *file_priv)
359{
360 drm_local_map_t *map;
361 struct drm_map *request = data;
362
363 DRM_LOCK();
364 TAILQ_FOREACH(map, &dev->maplist, link) {
365 if (map->handle == request->handle &&
366 map->flags & _DRM_REMOVABLE)
367 break;
368 }
369
370 /* No match found. */
371 if (map == NULL) {
372 DRM_UNLOCK();
373 return EINVAL;
374 }
375
376 drm_rmmap(dev, map);
377
378 DRM_UNLOCK();
379
380 return 0;
381}
382
383
384static void drm_cleanup_buf_error(struct drm_device *dev,
385 drm_buf_entry_t *entry)
386{
387 int i;
388
389 if (entry->seg_count) {
390 for (i = 0; i < entry->seg_count; i++) {
391 drm_pci_free(dev, entry->seglist[i]);
392 }
393 free(entry->seglist, DRM_MEM_SEGS);
394
395 entry->seg_count = 0;
396 }
397
398 if (entry->buf_count) {
399 for (i = 0; i < entry->buf_count; i++) {
400 free(entry->buflist[i].dev_private, DRM_MEM_BUFS);
401 }
402 free(entry->buflist, DRM_MEM_BUFS);
403
404 entry->buf_count = 0;
405 }
406}
407
408static int drm_do_addbufs_agp(struct drm_device *dev, struct drm_buf_desc *request)
409{
410 drm_device_dma_t *dma = dev->dma;
411 drm_buf_entry_t *entry;
412 /*drm_agp_mem_t *agp_entry;
413 int valid*/
414 drm_buf_t *buf;
415 unsigned long offset;
416 unsigned long agp_offset;
417 int count;
418 int order;
419 int size;
420 int alignment;
421 int page_order;
422 int total;
423 int byte_count;
424 int i;
425 drm_buf_t **temp_buflist;
426
427 count = request->count;
428 order = drm_order(request->size);
429 size = 1 << order;
430
431 alignment = (request->flags & _DRM_PAGE_ALIGN)
432 ? round_page(size) : size;
433 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
434 total = PAGE_SIZE << page_order;
435
436 byte_count = 0;
437 agp_offset = dev->agp->base + request->agp_start;
438
439 DRM_DEBUG("count: %d\n", count);
440 DRM_DEBUG("order: %d\n", order);
441 DRM_DEBUG("size: %d\n", size);
442 DRM_DEBUG("agp_offset: 0x%lx\n", agp_offset);
443 DRM_DEBUG("alignment: %d\n", alignment);
444 DRM_DEBUG("page_order: %d\n", page_order);
445 DRM_DEBUG("total: %d\n", total);
446
447 /* Make sure buffers are located in AGP memory that we own */
448 /* Breaks MGA due to drm_alloc_agp not setting up entries for the
449 * memory. Safe to ignore for now because these ioctls are still
450 * root-only.
451 */
452 /*valid = 0;
453 for (agp_entry = dev->agp->memory; agp_entry;
454 agp_entry = agp_entry->next) {
455 if ((agp_offset >= agp_entry->bound) &&
456 (agp_offset + total * count <=
457 agp_entry->bound + agp_entry->pages * PAGE_SIZE)) {
458 valid = 1;
459 break;
460 }
461 }
462 if (!valid) {
463 DRM_DEBUG("zone invalid\n");
464 return EINVAL;
465 }*/
466
467 entry = &dma->bufs[order];
468
469 entry->buflist = malloc(count * sizeof(*entry->buflist), DRM_MEM_BUFS,
470 M_NOWAIT | M_ZERO);
471 if (!entry->buflist) {
472 return ENOMEM;
473 }
474
475 entry->buf_size = size;
476 entry->page_order = page_order;
477
478 offset = 0;
479
480 while (entry->buf_count < count) {
481 buf = &entry->buflist[entry->buf_count];
482 buf->idx = dma->buf_count + entry->buf_count;
483 buf->total = alignment;
484 buf->order = order;
485 buf->used = 0;
486
487 buf->offset = (dma->byte_count + offset);
488 buf->bus_address = agp_offset + offset;
489 buf->address = (void *)(agp_offset + offset);
490 buf->next = NULL;
491 buf->pending = 0;
492 buf->file_priv = NULL;
493
494 buf->dev_priv_size = dev->driver->buf_priv_size;
495 buf->dev_private = malloc(buf->dev_priv_size, DRM_MEM_BUFS,
496 M_NOWAIT | M_ZERO);
497 if (buf->dev_private == NULL) {
498 /* Set count correctly so we free the proper amount. */
499 entry->buf_count = count;
500 drm_cleanup_buf_error(dev, entry);
501 return ENOMEM;
502 }
503
504 offset += alignment;
505 entry->buf_count++;
506 byte_count += PAGE_SIZE << page_order;
507 }
508
509 DRM_DEBUG("byte_count: %d\n", byte_count);
510
511 temp_buflist = realloc(dma->buflist,
512 (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist),
513 DRM_MEM_BUFS, M_NOWAIT);
514 if (temp_buflist == NULL) {
515 /* Free the entry because it isn't valid */
516 drm_cleanup_buf_error(dev, entry);
517 return ENOMEM;
518 }
519 dma->buflist = temp_buflist;
520
521 for (i = 0; i < entry->buf_count; i++) {
522 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
523 }
524
525 dma->buf_count += entry->buf_count;
526 dma->byte_count += byte_count;
527
528 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
529 DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
530
531 request->count = entry->buf_count;
532 request->size = size;
533
534 dma->flags = _DRM_DMA_USE_AGP;
535
536 return 0;
537}
538
539static int drm_do_addbufs_pci(struct drm_device *dev, struct drm_buf_desc *request)
540{
541 drm_device_dma_t *dma = dev->dma;
542 int count;
543 int order;
544 int size;
545 int total;
546 int page_order;
547 drm_buf_entry_t *entry;
548 drm_buf_t *buf;
549 int alignment;
550 unsigned long offset;
551 int i;
552 int byte_count;
553 int page_count;
554 unsigned long *temp_pagelist;
555 drm_buf_t **temp_buflist;
556
557 count = request->count;
558 order = drm_order(request->size);
559 size = 1 << order;
560
561 DRM_DEBUG("count=%d, size=%d (%d), order=%d\n",
562 request->count, request->size, size, order);
563
564 alignment = (request->flags & _DRM_PAGE_ALIGN)
565 ? round_page(size) : size;
566 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
567 total = PAGE_SIZE << page_order;
568
569 entry = &dma->bufs[order];
570
571 entry->buflist = malloc(count * sizeof(*entry->buflist), DRM_MEM_BUFS,
572 M_NOWAIT | M_ZERO);
573 entry->seglist = malloc(count * sizeof(*entry->seglist), DRM_MEM_SEGS,
574 M_NOWAIT | M_ZERO);
575
576 /* Keep the original pagelist until we know all the allocations
577 * have succeeded
578 */
579 temp_pagelist = malloc((dma->page_count + (count << page_order)) *
580 sizeof(*dma->pagelist), DRM_MEM_PAGES, M_NOWAIT);
581
582 if (entry->buflist == NULL || entry->seglist == NULL ||
583 temp_pagelist == NULL) {
584 free(temp_pagelist, DRM_MEM_PAGES);
585 free(entry->seglist, DRM_MEM_SEGS);
586 free(entry->buflist, DRM_MEM_BUFS);
587 return ENOMEM;
588 }
589
590 memcpy(temp_pagelist, dma->pagelist, dma->page_count *
591 sizeof(*dma->pagelist));
592
593 DRM_DEBUG("pagelist: %d entries\n",
594 dma->page_count + (count << page_order));
595
596 entry->buf_size = size;
597 entry->page_order = page_order;
598 byte_count = 0;
599 page_count = 0;
600
601 while (entry->buf_count < count) {
602 DRM_SPINUNLOCK(&dev->dma_lock);
603 drm_dma_handle_t *dmah = drm_pci_alloc(dev, size, alignment,
604 0xfffffffful);
605 DRM_SPINLOCK(&dev->dma_lock);
606 if (dmah == NULL) {
607 /* Set count correctly so we free the proper amount. */
608 entry->buf_count = count;
609 entry->seg_count = count;
610 drm_cleanup_buf_error(dev, entry);
611 free(temp_pagelist, DRM_MEM_PAGES);
612 return ENOMEM;
613 }
614
615 entry->seglist[entry->seg_count++] = dmah;
616 for (i = 0; i < (1 << page_order); i++) {
617 DRM_DEBUG("page %d @ %p\n",
618 dma->page_count + page_count,
619 (char *)dmah->vaddr + PAGE_SIZE * i);
620 temp_pagelist[dma->page_count + page_count++] =
621 (long)dmah->vaddr + PAGE_SIZE * i;
622 }
623 for (offset = 0;
624 offset + size <= total && entry->buf_count < count;
625 offset += alignment, ++entry->buf_count) {
626 buf = &entry->buflist[entry->buf_count];
627 buf->idx = dma->buf_count + entry->buf_count;
628 buf->total = alignment;
629 buf->order = order;
630 buf->used = 0;
631 buf->offset = (dma->byte_count + byte_count + offset);
632 buf->address = ((char *)dmah->vaddr + offset);
633 buf->bus_address = dmah->busaddr + offset;
634 buf->next = NULL;
635 buf->pending = 0;
636 buf->file_priv = NULL;
637
638 buf->dev_priv_size = dev->driver->buf_priv_size;
639 buf->dev_private = malloc(buf->dev_priv_size,
640 DRM_MEM_BUFS, M_NOWAIT | M_ZERO);
641 if (buf->dev_private == NULL) {
642 /* Set count correctly so we free the proper amount. */
643 entry->buf_count = count;
644 entry->seg_count = count;
645 drm_cleanup_buf_error(dev, entry);
646 free(temp_pagelist, DRM_MEM_PAGES);
647 return ENOMEM;
648 }
649
650 DRM_DEBUG("buffer %d @ %p\n",
651 entry->buf_count, buf->address);
652 }
653 byte_count += PAGE_SIZE << page_order;
654 }
655
656 temp_buflist = realloc(dma->buflist,
657 (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist),
658 DRM_MEM_BUFS, M_NOWAIT);
659 if (temp_buflist == NULL) {
660 /* Free the entry because it isn't valid */
661 drm_cleanup_buf_error(dev, entry);
662 free(temp_pagelist, DRM_MEM_PAGES);
663 return ENOMEM;
664 }
665 dma->buflist = temp_buflist;
666
667 for (i = 0; i < entry->buf_count; i++) {
668 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
669 }
670
671 /* No allocations failed, so now we can replace the orginal pagelist
672 * with the new one.
673 */
674 free(dma->pagelist, DRM_MEM_PAGES);
675 dma->pagelist = temp_pagelist;
676
677 dma->buf_count += entry->buf_count;
678 dma->seg_count += entry->seg_count;
679 dma->page_count += entry->seg_count << page_order;
680 dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
681
682 request->count = entry->buf_count;
683 request->size = size;
684
685 return 0;
686
687}
688
689static int drm_do_addbufs_sg(struct drm_device *dev, struct drm_buf_desc *request)
690{
691 drm_device_dma_t *dma = dev->dma;
692 drm_buf_entry_t *entry;
693 drm_buf_t *buf;
694 unsigned long offset;
695 unsigned long agp_offset;
696 int count;
697 int order;
698 int size;
699 int alignment;
700 int page_order;
701 int total;
702 int byte_count;
703 int i;
704 drm_buf_t **temp_buflist;
705
706 count = request->count;
707 order = drm_order(request->size);
708 size = 1 << order;
709
710 alignment = (request->flags & _DRM_PAGE_ALIGN)
711 ? round_page(size) : size;
712 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
713 total = PAGE_SIZE << page_order;
714
715 byte_count = 0;
716 agp_offset = request->agp_start;
717
718 DRM_DEBUG("count: %d\n", count);
719 DRM_DEBUG("order: %d\n", order);
720 DRM_DEBUG("size: %d\n", size);
721 DRM_DEBUG("agp_offset: %ld\n", agp_offset);
722 DRM_DEBUG("alignment: %d\n", alignment);
723 DRM_DEBUG("page_order: %d\n", page_order);
724 DRM_DEBUG("total: %d\n", total);
725
726 entry = &dma->bufs[order];
727
728 entry->buflist = malloc(count * sizeof(*entry->buflist), DRM_MEM_BUFS,
729 M_NOWAIT | M_ZERO);
730 if (entry->buflist == NULL)
731 return ENOMEM;
732
733 entry->buf_size = size;
734 entry->page_order = page_order;
735
736 offset = 0;
737
738 while (entry->buf_count < count) {
739 buf = &entry->buflist[entry->buf_count];
740 buf->idx = dma->buf_count + entry->buf_count;
741 buf->total = alignment;
742 buf->order = order;
743 buf->used = 0;
744
745 buf->offset = (dma->byte_count + offset);
746 buf->bus_address = agp_offset + offset;
747 buf->address = (void *)(agp_offset + offset + dev->sg->vaddr);
748 buf->next = NULL;
749 buf->pending = 0;
750 buf->file_priv = NULL;
751
752 buf->dev_priv_size = dev->driver->buf_priv_size;
753 buf->dev_private = malloc(buf->dev_priv_size, DRM_MEM_BUFS,
754 M_NOWAIT | M_ZERO);
755 if (buf->dev_private == NULL) {
756 /* Set count correctly so we free the proper amount. */
757 entry->buf_count = count;
758 drm_cleanup_buf_error(dev, entry);
759 return ENOMEM;
760 }
761
762 DRM_DEBUG("buffer %d @ %p\n",
763 entry->buf_count, buf->address);
764
765 offset += alignment;
766 entry->buf_count++;
767 byte_count += PAGE_SIZE << page_order;
768 }
769
770 DRM_DEBUG("byte_count: %d\n", byte_count);
771
772 temp_buflist = realloc(dma->buflist,
773 (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist),
774 DRM_MEM_BUFS, M_NOWAIT);
775 if (temp_buflist == NULL) {
776 /* Free the entry because it isn't valid */
777 drm_cleanup_buf_error(dev, entry);
778 return ENOMEM;
779 }
780 dma->buflist = temp_buflist;
781
782 for (i = 0; i < entry->buf_count; i++) {
783 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
784 }
785
786 dma->buf_count += entry->buf_count;
787 dma->byte_count += byte_count;
788
789 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
790 DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
791
792 request->count = entry->buf_count;
793 request->size = size;
794
795 dma->flags = _DRM_DMA_USE_SG;
796
797 return 0;
798}
799
800int drm_addbufs_agp(struct drm_device *dev, struct drm_buf_desc *request)
801{
802 int order, ret;
803
804 if (request->count < 0 || request->count > 4096)
805 return EINVAL;
806
807 order = drm_order(request->size);
808 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
809 return EINVAL;
810
811 DRM_SPINLOCK(&dev->dma_lock);
812
813 /* No more allocations after first buffer-using ioctl. */
814 if (dev->buf_use != 0) {
815 DRM_SPINUNLOCK(&dev->dma_lock);
816 return EBUSY;
817 }
818 /* No more than one allocation per order */
819 if (dev->dma->bufs[order].buf_count != 0) {
820 DRM_SPINUNLOCK(&dev->dma_lock);
821 return ENOMEM;
822 }
823
824 ret = drm_do_addbufs_agp(dev, request);
825
826 DRM_SPINUNLOCK(&dev->dma_lock);
827
828 return ret;
829}
830
831int drm_addbufs_sg(struct drm_device *dev, struct drm_buf_desc *request)
832{
833 int order, ret;
834
835 if (!DRM_SUSER(DRM_CURPROC))
836 return EACCES;
837
838 if (request->count < 0 || request->count > 4096)
839 return EINVAL;
840
841 order = drm_order(request->size);
842 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
843 return EINVAL;
844
845 DRM_SPINLOCK(&dev->dma_lock);
846
847 /* No more allocations after first buffer-using ioctl. */
848 if (dev->buf_use != 0) {
849 DRM_SPINUNLOCK(&dev->dma_lock);
850 return EBUSY;
851 }
852 /* No more than one allocation per order */
853 if (dev->dma->bufs[order].buf_count != 0) {
854 DRM_SPINUNLOCK(&dev->dma_lock);
855 return ENOMEM;
856 }
857
858 ret = drm_do_addbufs_sg(dev, request);
859
860 DRM_SPINUNLOCK(&dev->dma_lock);
861
862 return ret;
863}
864
865int drm_addbufs_pci(struct drm_device *dev, struct drm_buf_desc *request)
866{
867 int order, ret;
868
869 if (!DRM_SUSER(DRM_CURPROC))
870 return EACCES;
871
872 if (request->count < 0 || request->count > 4096)
873 return EINVAL;
874
875 order = drm_order(request->size);
876 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
877 return EINVAL;
878
879 DRM_SPINLOCK(&dev->dma_lock);
880
881 /* No more allocations after first buffer-using ioctl. */
882 if (dev->buf_use != 0) {
883 DRM_SPINUNLOCK(&dev->dma_lock);
884 return EBUSY;
885 }
886 /* No more than one allocation per order */
887 if (dev->dma->bufs[order].buf_count != 0) {
888 DRM_SPINUNLOCK(&dev->dma_lock);
889 return ENOMEM;
890 }
891
892 ret = drm_do_addbufs_pci(dev, request);
893
894 DRM_SPINUNLOCK(&dev->dma_lock);
895
896 return ret;
897}
898
899int drm_addbufs(struct drm_device *dev, void *data, struct drm_file *file_priv)
900{
901 struct drm_buf_desc *request = data;
902 int err;
903
904 if (request->flags & _DRM_AGP_BUFFER)
905 err = drm_addbufs_agp(dev, request);
906 else if (request->flags & _DRM_SG_BUFFER)
907 err = drm_addbufs_sg(dev, request);
908 else
909 err = drm_addbufs_pci(dev, request);
910
911 return err;
912}
913
914int drm_infobufs(struct drm_device *dev, void *data, struct drm_file *file_priv)
915{
916 drm_device_dma_t *dma = dev->dma;
917 struct drm_buf_info *request = data;
918 int i;
919 int count;
920 int retcode = 0;
921
922 DRM_SPINLOCK(&dev->dma_lock);
923 ++dev->buf_use; /* Can't allocate more after this call */
924 DRM_SPINUNLOCK(&dev->dma_lock);
925
926 for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
927 if (dma->bufs[i].buf_count)
928 ++count;
929 }
930
931 DRM_DEBUG("count = %d\n", count);
932
933 if (request->count >= count) {
934 for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
935 if (dma->bufs[i].buf_count) {
936 struct drm_buf_desc from;
937
938 memset(&from, 0, sizeof(from));
939 from.count = dma->bufs[i].buf_count;
940 from.size = dma->bufs[i].buf_size;
941 from.low_mark = dma->bufs[i].freelist.low_mark;
942 from.high_mark = dma->bufs[i].freelist.high_mark;
943
944 if (DRM_COPY_TO_USER(&request->list[count], &from,
945 sizeof(struct drm_buf_desc)) != 0) {
946 retcode = EFAULT;
947 break;
948 }
949
950 DRM_DEBUG("%d %d %d %d %d\n",
951 i, dma->bufs[i].buf_count,
952 dma->bufs[i].buf_size,
953 dma->bufs[i].freelist.low_mark,
954 dma->bufs[i].freelist.high_mark);
955 ++count;
956 }
957 }
958 }
959 request->count = count;
960
961 return retcode;
962}
963
964int drm_markbufs(struct drm_device *dev, void *data, struct drm_file *file_priv)
965{
966 drm_device_dma_t *dma = dev->dma;
967 struct drm_buf_desc *request = data;
968 int order;
969
970 DRM_DEBUG("%d, %d, %d\n",
971 request->size, request->low_mark, request->high_mark);
972
973
974 order = drm_order(request->size);
975 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ||
976 request->low_mark < 0 || request->high_mark < 0) {
977 return EINVAL;
978 }
979
980 DRM_SPINLOCK(&dev->dma_lock);
981 if (request->low_mark > dma->bufs[order].buf_count ||
982 request->high_mark > dma->bufs[order].buf_count) {
983 DRM_SPINUNLOCK(&dev->dma_lock);
984 return EINVAL;
985 }
986
987 dma->bufs[order].freelist.low_mark = request->low_mark;
988 dma->bufs[order].freelist.high_mark = request->high_mark;
989 DRM_SPINUNLOCK(&dev->dma_lock);
990
991 return 0;
992}
993
994int drm_freebufs(struct drm_device *dev, void *data, struct drm_file *file_priv)
995{
996 drm_device_dma_t *dma = dev->dma;
997 struct drm_buf_free *request = data;
998 int i;
999 int idx;
1000 drm_buf_t *buf;
1001 int retcode = 0;
1002
1003 DRM_DEBUG("%d\n", request->count);
1004
1005 DRM_SPINLOCK(&dev->dma_lock);
1006 for (i = 0; i < request->count; i++) {
1007 if (DRM_COPY_FROM_USER(&idx, &request->list[i], sizeof(idx))) {
1008 retcode = EFAULT;
1009 break;
1010 }
1011 if (idx < 0 || idx >= dma->buf_count) {
1012 DRM_ERROR("Index %d (of %d max)\n",
1013 idx, dma->buf_count - 1);
1014 retcode = EINVAL;
1015 break;
1016 }
1017 buf = dma->buflist[idx];
1018 if (buf->file_priv != file_priv) {
1019 DRM_ERROR("Process %d freeing buffer not owned\n",
1020 DRM_CURRENTPID);
1021 retcode = EINVAL;
1022 break;
1023 }
1024 drm_free_buffer(dev, buf);
1025 }
1026 DRM_SPINUNLOCK(&dev->dma_lock);
1027
1028 return retcode;
1029}
1030
1031int drm_mapbufs(struct drm_device *dev, void *data, struct drm_file *file_priv)
1032{
1033 drm_device_dma_t *dma = dev->dma;
1034 int retcode = 0;
1035 const int zero = 0;
1036 vm_offset_t address;
1037 struct vmspace *vms;
1038 vm_ooffset_t foff;
1039 vm_size_t size;
1040 vm_offset_t vaddr;
1041 struct drm_buf_map *request = data;
1042 int i;
1043
1044 vms = DRM_CURPROC->td_proc->p_vmspace;
1045
1046 DRM_SPINLOCK(&dev->dma_lock);
1047 dev->buf_use++; /* Can't allocate more after this call */
1048 DRM_SPINUNLOCK(&dev->dma_lock);
1049
1050 if (request->count < dma->buf_count)
1051 goto done;
1052
1053 if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP)) ||
1054 (drm_core_check_feature(dev, DRIVER_SG) &&
1055 (dma->flags & _DRM_DMA_USE_SG))) {
1056 drm_local_map_t *map = dev->agp_buffer_map;
1057
1058 if (map == NULL) {
1059 retcode = EINVAL;
1060 goto done;
1061 }
1062 size = round_page(map->size);
1063 foff = (unsigned long)map->handle;
1064 } else {
1065 size = round_page(dma->byte_count),
1066 foff = 0;
1067 }
1068
1069 vaddr = round_page((vm_offset_t)vms->vm_daddr + MAXDSIZ);
1070#if __FreeBSD_version >= 600023
1071 retcode = vm_mmap(&vms->vm_map, &vaddr, size, PROT_READ | PROT_WRITE,
1072 VM_PROT_ALL, MAP_SHARED | MAP_NOSYNC, OBJT_DEVICE,
1073 dev->devnode, foff);
1074#else
1075 retcode = vm_mmap(&vms->vm_map, &vaddr, size, PROT_READ | PROT_WRITE,
1076 VM_PROT_ALL, MAP_SHARED | MAP_NOSYNC,
1077 SLIST_FIRST(&dev->devnode->si_hlist), foff);
1078#endif
1079 if (retcode)
1080 goto done;
1081
1082 request->virtual = (void *)vaddr;
1083
1084 for (i = 0; i < dma->buf_count; i++) {
1085 if (DRM_COPY_TO_USER(&request->list[i].idx,
1086 &dma->buflist[i]->idx, sizeof(request->list[0].idx))) {
1087 retcode = EFAULT;
1088 goto done;
1089 }
1090 if (DRM_COPY_TO_USER(&request->list[i].total,
1091 &dma->buflist[i]->total, sizeof(request->list[0].total))) {
1092 retcode = EFAULT;
1093 goto done;
1094 }
1095 if (DRM_COPY_TO_USER(&request->list[i].used, &zero,
1096 sizeof(zero))) {
1097 retcode = EFAULT;
1098 goto done;
1099 }
1100 address = vaddr + dma->buflist[i]->offset; /* *** */
1101 if (DRM_COPY_TO_USER(&request->list[i].address, &address,
1102 sizeof(address))) {
1103 retcode = EFAULT;
1104 goto done;
1105 }
1106 }
1107
1108 done:
1109 request->count = dma->buf_count;
1110
1111 DRM_DEBUG("%d buffers, retcode = %d\n", request->count, retcode);
1112
1113 return retcode;
1114}
1115
1116/*
1117 * Compute order. Can be made faster.
1118 */
1119int drm_order(unsigned long size)
1120{
1121 int order;
1122
1123 if (size == 0)
1124 return 0;
1125
1126 order = flsl(size) - 1;
1127 if (size & ~(1ul << order))
1128 ++order;
1129
1130 return order;
1131}