drm_bufs.c revision 194537
1/*- 2 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas. 3 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. 4 * All Rights Reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice (including the next 14 * paragraph) shall be included in all copies or substantial portions of the 15 * Software. 16 * 17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 20 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR 21 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 22 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 23 * OTHER DEALINGS IN THE SOFTWARE. 24 * 25 * Authors: 26 * Rickard E. (Rik) Faith <faith@valinux.com> 27 * Gareth Hughes <gareth@valinux.com> 28 * 29 */ 30 31#include <sys/cdefs.h> 32__FBSDID("$FreeBSD: head/sys/dev/drm/drm_bufs.c 194537 2009-06-20 16:37:24Z rnoland $"); 33 34/** @file drm_bufs.c 35 * Implementation of the ioctls for setup of DRM mappings and DMA buffers. 36 */ 37 38#include "dev/pci/pcireg.h" 39 40#include "dev/drm/drmP.h" 41 42/* Allocation of PCI memory resources (framebuffer, registers, etc.) for 43 * drm_get_resource_*. Note that they are not RF_ACTIVE, so there's no virtual 44 * address for accessing them. Cleaned up at unload. 45 */ 46static int drm_alloc_resource(struct drm_device *dev, int resource) 47{ 48 if (resource >= DRM_MAX_PCI_RESOURCE) { 49 DRM_ERROR("Resource %d too large\n", resource); 50 return 1; 51 } 52 53 DRM_UNLOCK(); 54 if (dev->pcir[resource] != NULL) { 55 DRM_LOCK(); 56 return 0; 57 } 58 59 dev->pcirid[resource] = PCIR_BAR(resource); 60 dev->pcir[resource] = bus_alloc_resource_any(dev->device, 61 SYS_RES_MEMORY, &dev->pcirid[resource], RF_SHAREABLE); 62 DRM_LOCK(); 63 64 if (dev->pcir[resource] == NULL) { 65 DRM_ERROR("Couldn't find resource 0x%x\n", resource); 66 return 1; 67 } 68 69 return 0; 70} 71 72unsigned long drm_get_resource_start(struct drm_device *dev, 73 unsigned int resource) 74{ 75 if (drm_alloc_resource(dev, resource) != 0) 76 return 0; 77 78 return rman_get_start(dev->pcir[resource]); 79} 80 81unsigned long drm_get_resource_len(struct drm_device *dev, 82 unsigned int resource) 83{ 84 if (drm_alloc_resource(dev, resource) != 0) 85 return 0; 86 87 return rman_get_size(dev->pcir[resource]); 88} 89 90int drm_addmap(struct drm_device * dev, unsigned long offset, 91 unsigned long size, 92 enum drm_map_type type, enum drm_map_flags flags, drm_local_map_t **map_ptr) 93{ 94 drm_local_map_t *map; 95 int align; 96 /*drm_agp_mem_t *entry; 97 int valid;*/ 98 99 /* Only allow shared memory to be removable since we only keep enough 100 * book keeping information about shared memory to allow for removal 101 * when processes fork. 102 */ 103 if ((flags & _DRM_REMOVABLE) && type != _DRM_SHM) { 104 DRM_ERROR("Requested removable map for non-DRM_SHM\n"); 105 return EINVAL; 106 } 107 if ((offset & PAGE_MASK) || (size & PAGE_MASK)) { 108 DRM_ERROR("offset/size not page aligned: 0x%lx/0x%lx\n", 109 offset, size); 110 return EINVAL; 111 } 112 if (offset + size < offset) { 113 DRM_ERROR("offset and size wrap around: 0x%lx/0x%lx\n", 114 offset, size); 115 return EINVAL; 116 } 117 118 DRM_DEBUG("offset = 0x%08lx, size = 0x%08lx, type = %d\n", offset, 119 size, type); 120 121 /* Check if this is just another version of a kernel-allocated map, and 122 * just hand that back if so. 123 */ 124 if (type == _DRM_REGISTERS || type == _DRM_FRAME_BUFFER || 125 type == _DRM_SHM) { 126 TAILQ_FOREACH(map, &dev->maplist, link) { 127 if (map->type == type && (map->offset == offset || 128 (map->type == _DRM_SHM && 129 map->flags == _DRM_CONTAINS_LOCK))) { 130 map->size = size; 131 DRM_DEBUG("Found kernel map %d\n", type); 132 goto done; 133 } 134 } 135 } 136 DRM_UNLOCK(); 137 138 /* Allocate a new map structure, fill it in, and do any type-specific 139 * initialization necessary. 140 */ 141 map = malloc(sizeof(*map), DRM_MEM_MAPS, M_ZERO | M_NOWAIT); 142 if (!map) { 143 DRM_LOCK(); 144 return ENOMEM; 145 } 146 147 map->offset = offset; 148 map->size = size; 149 map->type = type; 150 map->flags = flags; 151 152 switch (map->type) { 153 case _DRM_REGISTERS: 154 map->handle = drm_ioremap(dev, map); 155 if (!(map->flags & _DRM_WRITE_COMBINING)) 156 break; 157 /* FALLTHROUGH */ 158 case _DRM_FRAME_BUFFER: 159 if (drm_mtrr_add(map->offset, map->size, DRM_MTRR_WC) == 0) 160 map->mtrr = 1; 161 break; 162 case _DRM_SHM: 163 map->handle = malloc(map->size, DRM_MEM_MAPS, M_NOWAIT); 164 DRM_DEBUG("%lu %d %p\n", 165 map->size, drm_order(map->size), map->handle); 166 if (!map->handle) { 167 free(map, DRM_MEM_MAPS); 168 DRM_LOCK(); 169 return ENOMEM; 170 } 171 map->offset = (unsigned long)map->handle; 172 if (map->flags & _DRM_CONTAINS_LOCK) { 173 /* Prevent a 2nd X Server from creating a 2nd lock */ 174 DRM_LOCK(); 175 if (dev->lock.hw_lock != NULL) { 176 DRM_UNLOCK(); 177 free(map->handle, DRM_MEM_MAPS); 178 free(map, DRM_MEM_MAPS); 179 return EBUSY; 180 } 181 dev->lock.hw_lock = map->handle; /* Pointer to lock */ 182 DRM_UNLOCK(); 183 } 184 break; 185 case _DRM_AGP: 186 /*valid = 0;*/ 187 /* In some cases (i810 driver), user space may have already 188 * added the AGP base itself, because dev->agp->base previously 189 * only got set during AGP enable. So, only add the base 190 * address if the map's offset isn't already within the 191 * aperture. 192 */ 193 if (map->offset < dev->agp->base || 194 map->offset > dev->agp->base + 195 dev->agp->info.ai_aperture_size - 1) { 196 map->offset += dev->agp->base; 197 } 198 map->mtrr = dev->agp->mtrr; /* for getmap */ 199 /*for (entry = dev->agp->memory; entry; entry = entry->next) { 200 if ((map->offset >= entry->bound) && 201 (map->offset + map->size <= 202 entry->bound + entry->pages * PAGE_SIZE)) { 203 valid = 1; 204 break; 205 } 206 } 207 if (!valid) { 208 free(map, DRM_MEM_MAPS); 209 DRM_LOCK(); 210 return EACCES; 211 }*/ 212 break; 213 case _DRM_SCATTER_GATHER: 214 if (!dev->sg) { 215 free(map, DRM_MEM_MAPS); 216 DRM_LOCK(); 217 return EINVAL; 218 } 219 map->offset += dev->sg->handle; 220 break; 221 case _DRM_CONSISTENT: 222 /* Unfortunately, we don't get any alignment specification from 223 * the caller, so we have to guess. drm_pci_alloc requires 224 * a power-of-two alignment, so try to align the bus address of 225 * the map to it size if possible, otherwise just assume 226 * PAGE_SIZE alignment. 227 */ 228 align = map->size; 229 if ((align & (align - 1)) != 0) 230 align = PAGE_SIZE; 231 map->dmah = drm_pci_alloc(dev, map->size, align, 0xfffffffful); 232 if (map->dmah == NULL) { 233 free(map, DRM_MEM_MAPS); 234 DRM_LOCK(); 235 return ENOMEM; 236 } 237 map->handle = map->dmah->vaddr; 238 map->offset = map->dmah->busaddr; 239 break; 240 default: 241 DRM_ERROR("Bad map type %d\n", map->type); 242 free(map, DRM_MEM_MAPS); 243 DRM_LOCK(); 244 return EINVAL; 245 } 246 247 DRM_LOCK(); 248 TAILQ_INSERT_TAIL(&dev->maplist, map, link); 249 250done: 251 /* Jumped to, with lock held, when a kernel map is found. */ 252 253 DRM_DEBUG("Added map %d 0x%lx/0x%lx\n", map->type, map->offset, 254 map->size); 255 256 *map_ptr = map; 257 258 return 0; 259} 260 261int drm_addmap_ioctl(struct drm_device *dev, void *data, 262 struct drm_file *file_priv) 263{ 264 struct drm_map *request = data; 265 drm_local_map_t *map; 266 int err; 267 268 if (!(dev->flags & (FREAD|FWRITE))) 269 return EACCES; /* Require read/write */ 270 271 if (!DRM_SUSER(DRM_CURPROC) && request->type != _DRM_AGP) 272 return EACCES; 273 274 DRM_LOCK(); 275 err = drm_addmap(dev, request->offset, request->size, request->type, 276 request->flags, &map); 277 DRM_UNLOCK(); 278 if (err != 0) 279 return err; 280 281 request->offset = map->offset; 282 request->size = map->size; 283 request->type = map->type; 284 request->flags = map->flags; 285 request->mtrr = map->mtrr; 286 request->handle = map->handle; 287 288 if (request->type != _DRM_SHM) { 289 request->handle = (void *)request->offset; 290 } 291 292 return 0; 293} 294 295void drm_rmmap(struct drm_device *dev, drm_local_map_t *map) 296{ 297 DRM_SPINLOCK_ASSERT(&dev->dev_lock); 298 299 if (map == NULL) 300 return; 301 302 TAILQ_REMOVE(&dev->maplist, map, link); 303 304 switch (map->type) { 305 case _DRM_REGISTERS: 306 if (map->bsr == NULL) 307 drm_ioremapfree(map); 308 /* FALLTHROUGH */ 309 case _DRM_FRAME_BUFFER: 310 if (map->mtrr) { 311 int __unused retcode; 312 313 retcode = drm_mtrr_del(0, map->offset, map->size, 314 DRM_MTRR_WC); 315 DRM_DEBUG("mtrr_del = %d\n", retcode); 316 } 317 break; 318 case _DRM_SHM: 319 free(map->handle, DRM_MEM_MAPS); 320 break; 321 case _DRM_AGP: 322 case _DRM_SCATTER_GATHER: 323 break; 324 case _DRM_CONSISTENT: 325 drm_pci_free(dev, map->dmah); 326 break; 327 default: 328 DRM_ERROR("Bad map type %d\n", map->type); 329 break; 330 } 331 332 if (map->bsr != NULL) { 333 bus_release_resource(dev->device, SYS_RES_MEMORY, map->rid, 334 map->bsr); 335 } 336 337 free(map, DRM_MEM_MAPS); 338} 339 340/* Remove a map private from list and deallocate resources if the mapping 341 * isn't in use. 342 */ 343 344int drm_rmmap_ioctl(struct drm_device *dev, void *data, 345 struct drm_file *file_priv) 346{ 347 drm_local_map_t *map; 348 struct drm_map *request = data; 349 350 DRM_LOCK(); 351 TAILQ_FOREACH(map, &dev->maplist, link) { 352 if (map->handle == request->handle && 353 map->flags & _DRM_REMOVABLE) 354 break; 355 } 356 357 /* No match found. */ 358 if (map == NULL) { 359 DRM_UNLOCK(); 360 return EINVAL; 361 } 362 363 drm_rmmap(dev, map); 364 365 DRM_UNLOCK(); 366 367 return 0; 368} 369 370 371static void drm_cleanup_buf_error(struct drm_device *dev, 372 drm_buf_entry_t *entry) 373{ 374 int i; 375 376 if (entry->seg_count) { 377 for (i = 0; i < entry->seg_count; i++) { 378 drm_pci_free(dev, entry->seglist[i]); 379 } 380 free(entry->seglist, DRM_MEM_SEGS); 381 382 entry->seg_count = 0; 383 } 384 385 if (entry->buf_count) { 386 for (i = 0; i < entry->buf_count; i++) { 387 free(entry->buflist[i].dev_private, DRM_MEM_BUFS); 388 } 389 free(entry->buflist, DRM_MEM_BUFS); 390 391 entry->buf_count = 0; 392 } 393} 394 395static int drm_do_addbufs_agp(struct drm_device *dev, struct drm_buf_desc *request) 396{ 397 drm_device_dma_t *dma = dev->dma; 398 drm_buf_entry_t *entry; 399 /*drm_agp_mem_t *agp_entry; 400 int valid*/ 401 drm_buf_t *buf; 402 unsigned long offset; 403 unsigned long agp_offset; 404 int count; 405 int order; 406 int size; 407 int alignment; 408 int page_order; 409 int total; 410 int byte_count; 411 int i; 412 drm_buf_t **temp_buflist; 413 414 count = request->count; 415 order = drm_order(request->size); 416 size = 1 << order; 417 418 alignment = (request->flags & _DRM_PAGE_ALIGN) 419 ? round_page(size) : size; 420 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0; 421 total = PAGE_SIZE << page_order; 422 423 byte_count = 0; 424 agp_offset = dev->agp->base + request->agp_start; 425 426 DRM_DEBUG("count: %d\n", count); 427 DRM_DEBUG("order: %d\n", order); 428 DRM_DEBUG("size: %d\n", size); 429 DRM_DEBUG("agp_offset: 0x%lx\n", agp_offset); 430 DRM_DEBUG("alignment: %d\n", alignment); 431 DRM_DEBUG("page_order: %d\n", page_order); 432 DRM_DEBUG("total: %d\n", total); 433 434 /* Make sure buffers are located in AGP memory that we own */ 435 /* Breaks MGA due to drm_alloc_agp not setting up entries for the 436 * memory. Safe to ignore for now because these ioctls are still 437 * root-only. 438 */ 439 /*valid = 0; 440 for (agp_entry = dev->agp->memory; agp_entry; 441 agp_entry = agp_entry->next) { 442 if ((agp_offset >= agp_entry->bound) && 443 (agp_offset + total * count <= 444 agp_entry->bound + agp_entry->pages * PAGE_SIZE)) { 445 valid = 1; 446 break; 447 } 448 } 449 if (!valid) { 450 DRM_DEBUG("zone invalid\n"); 451 return EINVAL; 452 }*/ 453 454 entry = &dma->bufs[order]; 455 456 entry->buflist = malloc(count * sizeof(*entry->buflist), DRM_MEM_BUFS, 457 M_NOWAIT | M_ZERO); 458 if (!entry->buflist) { 459 return ENOMEM; 460 } 461 462 entry->buf_size = size; 463 entry->page_order = page_order; 464 465 offset = 0; 466 467 while (entry->buf_count < count) { 468 buf = &entry->buflist[entry->buf_count]; 469 buf->idx = dma->buf_count + entry->buf_count; 470 buf->total = alignment; 471 buf->order = order; 472 buf->used = 0; 473 474 buf->offset = (dma->byte_count + offset); 475 buf->bus_address = agp_offset + offset; 476 buf->address = (void *)(agp_offset + offset); 477 buf->next = NULL; 478 buf->pending = 0; 479 buf->file_priv = NULL; 480 481 buf->dev_priv_size = dev->driver->buf_priv_size; 482 buf->dev_private = malloc(buf->dev_priv_size, DRM_MEM_BUFS, 483 M_NOWAIT | M_ZERO); 484 if (buf->dev_private == NULL) { 485 /* Set count correctly so we free the proper amount. */ 486 entry->buf_count = count; 487 drm_cleanup_buf_error(dev, entry); 488 return ENOMEM; 489 } 490 491 offset += alignment; 492 entry->buf_count++; 493 byte_count += PAGE_SIZE << page_order; 494 } 495 496 DRM_DEBUG("byte_count: %d\n", byte_count); 497 498 temp_buflist = realloc(dma->buflist, 499 (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist), 500 DRM_MEM_BUFS, M_NOWAIT); 501 if (temp_buflist == NULL) { 502 /* Free the entry because it isn't valid */ 503 drm_cleanup_buf_error(dev, entry); 504 return ENOMEM; 505 } 506 dma->buflist = temp_buflist; 507 508 for (i = 0; i < entry->buf_count; i++) { 509 dma->buflist[i + dma->buf_count] = &entry->buflist[i]; 510 } 511 512 dma->buf_count += entry->buf_count; 513 dma->byte_count += byte_count; 514 515 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count); 516 DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count); 517 518 request->count = entry->buf_count; 519 request->size = size; 520 521 dma->flags = _DRM_DMA_USE_AGP; 522 523 return 0; 524} 525 526static int drm_do_addbufs_pci(struct drm_device *dev, struct drm_buf_desc *request) 527{ 528 drm_device_dma_t *dma = dev->dma; 529 int count; 530 int order; 531 int size; 532 int total; 533 int page_order; 534 drm_buf_entry_t *entry; 535 drm_buf_t *buf; 536 int alignment; 537 unsigned long offset; 538 int i; 539 int byte_count; 540 int page_count; 541 unsigned long *temp_pagelist; 542 drm_buf_t **temp_buflist; 543 544 count = request->count; 545 order = drm_order(request->size); 546 size = 1 << order; 547 548 DRM_DEBUG("count=%d, size=%d (%d), order=%d\n", 549 request->count, request->size, size, order); 550 551 alignment = (request->flags & _DRM_PAGE_ALIGN) 552 ? round_page(size) : size; 553 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0; 554 total = PAGE_SIZE << page_order; 555 556 entry = &dma->bufs[order]; 557 558 entry->buflist = malloc(count * sizeof(*entry->buflist), DRM_MEM_BUFS, 559 M_NOWAIT | M_ZERO); 560 entry->seglist = malloc(count * sizeof(*entry->seglist), DRM_MEM_SEGS, 561 M_NOWAIT | M_ZERO); 562 563 /* Keep the original pagelist until we know all the allocations 564 * have succeeded 565 */ 566 temp_pagelist = malloc((dma->page_count + (count << page_order)) * 567 sizeof(*dma->pagelist), DRM_MEM_PAGES, M_NOWAIT); 568 569 if (entry->buflist == NULL || entry->seglist == NULL || 570 temp_pagelist == NULL) { 571 free(temp_pagelist, DRM_MEM_PAGES); 572 free(entry->seglist, DRM_MEM_SEGS); 573 free(entry->buflist, DRM_MEM_BUFS); 574 return ENOMEM; 575 } 576 577 memcpy(temp_pagelist, dma->pagelist, dma->page_count * 578 sizeof(*dma->pagelist)); 579 580 DRM_DEBUG("pagelist: %d entries\n", 581 dma->page_count + (count << page_order)); 582 583 entry->buf_size = size; 584 entry->page_order = page_order; 585 byte_count = 0; 586 page_count = 0; 587 588 while (entry->buf_count < count) { 589 DRM_SPINUNLOCK(&dev->dma_lock); 590 drm_dma_handle_t *dmah = drm_pci_alloc(dev, size, alignment, 591 0xfffffffful); 592 DRM_SPINLOCK(&dev->dma_lock); 593 if (dmah == NULL) { 594 /* Set count correctly so we free the proper amount. */ 595 entry->buf_count = count; 596 entry->seg_count = count; 597 drm_cleanup_buf_error(dev, entry); 598 free(temp_pagelist, DRM_MEM_PAGES); 599 return ENOMEM; 600 } 601 602 entry->seglist[entry->seg_count++] = dmah; 603 for (i = 0; i < (1 << page_order); i++) { 604 DRM_DEBUG("page %d @ %p\n", 605 dma->page_count + page_count, 606 (char *)dmah->vaddr + PAGE_SIZE * i); 607 temp_pagelist[dma->page_count + page_count++] = 608 (long)dmah->vaddr + PAGE_SIZE * i; 609 } 610 for (offset = 0; 611 offset + size <= total && entry->buf_count < count; 612 offset += alignment, ++entry->buf_count) { 613 buf = &entry->buflist[entry->buf_count]; 614 buf->idx = dma->buf_count + entry->buf_count; 615 buf->total = alignment; 616 buf->order = order; 617 buf->used = 0; 618 buf->offset = (dma->byte_count + byte_count + offset); 619 buf->address = ((char *)dmah->vaddr + offset); 620 buf->bus_address = dmah->busaddr + offset; 621 buf->next = NULL; 622 buf->pending = 0; 623 buf->file_priv = NULL; 624 625 buf->dev_priv_size = dev->driver->buf_priv_size; 626 buf->dev_private = malloc(buf->dev_priv_size, 627 DRM_MEM_BUFS, M_NOWAIT | M_ZERO); 628 if (buf->dev_private == NULL) { 629 /* Set count correctly so we free the proper amount. */ 630 entry->buf_count = count; 631 entry->seg_count = count; 632 drm_cleanup_buf_error(dev, entry); 633 free(temp_pagelist, DRM_MEM_PAGES); 634 return ENOMEM; 635 } 636 637 DRM_DEBUG("buffer %d @ %p\n", 638 entry->buf_count, buf->address); 639 } 640 byte_count += PAGE_SIZE << page_order; 641 } 642 643 temp_buflist = realloc(dma->buflist, 644 (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist), 645 DRM_MEM_BUFS, M_NOWAIT); 646 if (temp_buflist == NULL) { 647 /* Free the entry because it isn't valid */ 648 drm_cleanup_buf_error(dev, entry); 649 free(temp_pagelist, DRM_MEM_PAGES); 650 return ENOMEM; 651 } 652 dma->buflist = temp_buflist; 653 654 for (i = 0; i < entry->buf_count; i++) { 655 dma->buflist[i + dma->buf_count] = &entry->buflist[i]; 656 } 657 658 /* No allocations failed, so now we can replace the orginal pagelist 659 * with the new one. 660 */ 661 free(dma->pagelist, DRM_MEM_PAGES); 662 dma->pagelist = temp_pagelist; 663 664 dma->buf_count += entry->buf_count; 665 dma->seg_count += entry->seg_count; 666 dma->page_count += entry->seg_count << page_order; 667 dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order); 668 669 request->count = entry->buf_count; 670 request->size = size; 671 672 return 0; 673 674} 675 676static int drm_do_addbufs_sg(struct drm_device *dev, struct drm_buf_desc *request) 677{ 678 drm_device_dma_t *dma = dev->dma; 679 drm_buf_entry_t *entry; 680 drm_buf_t *buf; 681 unsigned long offset; 682 unsigned long agp_offset; 683 int count; 684 int order; 685 int size; 686 int alignment; 687 int page_order; 688 int total; 689 int byte_count; 690 int i; 691 drm_buf_t **temp_buflist; 692 693 count = request->count; 694 order = drm_order(request->size); 695 size = 1 << order; 696 697 alignment = (request->flags & _DRM_PAGE_ALIGN) 698 ? round_page(size) : size; 699 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0; 700 total = PAGE_SIZE << page_order; 701 702 byte_count = 0; 703 agp_offset = request->agp_start; 704 705 DRM_DEBUG("count: %d\n", count); 706 DRM_DEBUG("order: %d\n", order); 707 DRM_DEBUG("size: %d\n", size); 708 DRM_DEBUG("agp_offset: %ld\n", agp_offset); 709 DRM_DEBUG("alignment: %d\n", alignment); 710 DRM_DEBUG("page_order: %d\n", page_order); 711 DRM_DEBUG("total: %d\n", total); 712 713 entry = &dma->bufs[order]; 714 715 entry->buflist = malloc(count * sizeof(*entry->buflist), DRM_MEM_BUFS, 716 M_NOWAIT | M_ZERO); 717 if (entry->buflist == NULL) 718 return ENOMEM; 719 720 entry->buf_size = size; 721 entry->page_order = page_order; 722 723 offset = 0; 724 725 while (entry->buf_count < count) { 726 buf = &entry->buflist[entry->buf_count]; 727 buf->idx = dma->buf_count + entry->buf_count; 728 buf->total = alignment; 729 buf->order = order; 730 buf->used = 0; 731 732 buf->offset = (dma->byte_count + offset); 733 buf->bus_address = agp_offset + offset; 734 buf->address = (void *)(agp_offset + offset + dev->sg->handle); 735 buf->next = NULL; 736 buf->pending = 0; 737 buf->file_priv = NULL; 738 739 buf->dev_priv_size = dev->driver->buf_priv_size; 740 buf->dev_private = malloc(buf->dev_priv_size, DRM_MEM_BUFS, 741 M_NOWAIT | M_ZERO); 742 if (buf->dev_private == NULL) { 743 /* Set count correctly so we free the proper amount. */ 744 entry->buf_count = count; 745 drm_cleanup_buf_error(dev, entry); 746 return ENOMEM; 747 } 748 749 DRM_DEBUG("buffer %d @ %p\n", 750 entry->buf_count, buf->address); 751 752 offset += alignment; 753 entry->buf_count++; 754 byte_count += PAGE_SIZE << page_order; 755 } 756 757 DRM_DEBUG("byte_count: %d\n", byte_count); 758 759 temp_buflist = realloc(dma->buflist, 760 (dma->buf_count + entry->buf_count) * sizeof(*dma->buflist), 761 DRM_MEM_BUFS, M_NOWAIT); 762 if (temp_buflist == NULL) { 763 /* Free the entry because it isn't valid */ 764 drm_cleanup_buf_error(dev, entry); 765 return ENOMEM; 766 } 767 dma->buflist = temp_buflist; 768 769 for (i = 0; i < entry->buf_count; i++) { 770 dma->buflist[i + dma->buf_count] = &entry->buflist[i]; 771 } 772 773 dma->buf_count += entry->buf_count; 774 dma->byte_count += byte_count; 775 776 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count); 777 DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count); 778 779 request->count = entry->buf_count; 780 request->size = size; 781 782 dma->flags = _DRM_DMA_USE_SG; 783 784 return 0; 785} 786 787int drm_addbufs_agp(struct drm_device *dev, struct drm_buf_desc *request) 788{ 789 int order, ret; 790 791 if (request->count < 0 || request->count > 4096) 792 return EINVAL; 793 794 order = drm_order(request->size); 795 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) 796 return EINVAL; 797 798 DRM_SPINLOCK(&dev->dma_lock); 799 800 /* No more allocations after first buffer-using ioctl. */ 801 if (dev->buf_use != 0) { 802 DRM_SPINUNLOCK(&dev->dma_lock); 803 return EBUSY; 804 } 805 /* No more than one allocation per order */ 806 if (dev->dma->bufs[order].buf_count != 0) { 807 DRM_SPINUNLOCK(&dev->dma_lock); 808 return ENOMEM; 809 } 810 811 ret = drm_do_addbufs_agp(dev, request); 812 813 DRM_SPINUNLOCK(&dev->dma_lock); 814 815 return ret; 816} 817 818int drm_addbufs_sg(struct drm_device *dev, struct drm_buf_desc *request) 819{ 820 int order, ret; 821 822 if (!DRM_SUSER(DRM_CURPROC)) 823 return EACCES; 824 825 if (request->count < 0 || request->count > 4096) 826 return EINVAL; 827 828 order = drm_order(request->size); 829 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) 830 return EINVAL; 831 832 DRM_SPINLOCK(&dev->dma_lock); 833 834 /* No more allocations after first buffer-using ioctl. */ 835 if (dev->buf_use != 0) { 836 DRM_SPINUNLOCK(&dev->dma_lock); 837 return EBUSY; 838 } 839 /* No more than one allocation per order */ 840 if (dev->dma->bufs[order].buf_count != 0) { 841 DRM_SPINUNLOCK(&dev->dma_lock); 842 return ENOMEM; 843 } 844 845 ret = drm_do_addbufs_sg(dev, request); 846 847 DRM_SPINUNLOCK(&dev->dma_lock); 848 849 return ret; 850} 851 852int drm_addbufs_pci(struct drm_device *dev, struct drm_buf_desc *request) 853{ 854 int order, ret; 855 856 if (!DRM_SUSER(DRM_CURPROC)) 857 return EACCES; 858 859 if (request->count < 0 || request->count > 4096) 860 return EINVAL; 861 862 order = drm_order(request->size); 863 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) 864 return EINVAL; 865 866 DRM_SPINLOCK(&dev->dma_lock); 867 868 /* No more allocations after first buffer-using ioctl. */ 869 if (dev->buf_use != 0) { 870 DRM_SPINUNLOCK(&dev->dma_lock); 871 return EBUSY; 872 } 873 /* No more than one allocation per order */ 874 if (dev->dma->bufs[order].buf_count != 0) { 875 DRM_SPINUNLOCK(&dev->dma_lock); 876 return ENOMEM; 877 } 878 879 ret = drm_do_addbufs_pci(dev, request); 880 881 DRM_SPINUNLOCK(&dev->dma_lock); 882 883 return ret; 884} 885 886int drm_addbufs(struct drm_device *dev, void *data, struct drm_file *file_priv) 887{ 888 struct drm_buf_desc *request = data; 889 int err; 890 891 if (request->flags & _DRM_AGP_BUFFER) 892 err = drm_addbufs_agp(dev, request); 893 else if (request->flags & _DRM_SG_BUFFER) 894 err = drm_addbufs_sg(dev, request); 895 else 896 err = drm_addbufs_pci(dev, request); 897 898 return err; 899} 900 901int drm_infobufs(struct drm_device *dev, void *data, struct drm_file *file_priv) 902{ 903 drm_device_dma_t *dma = dev->dma; 904 struct drm_buf_info *request = data; 905 int i; 906 int count; 907 int retcode = 0; 908 909 DRM_SPINLOCK(&dev->dma_lock); 910 ++dev->buf_use; /* Can't allocate more after this call */ 911 DRM_SPINUNLOCK(&dev->dma_lock); 912 913 for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) { 914 if (dma->bufs[i].buf_count) 915 ++count; 916 } 917 918 DRM_DEBUG("count = %d\n", count); 919 920 if (request->count >= count) { 921 for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) { 922 if (dma->bufs[i].buf_count) { 923 struct drm_buf_desc from; 924 925 from.count = dma->bufs[i].buf_count; 926 from.size = dma->bufs[i].buf_size; 927 from.low_mark = dma->bufs[i].freelist.low_mark; 928 from.high_mark = dma->bufs[i].freelist.high_mark; 929 930 if (DRM_COPY_TO_USER(&request->list[count], &from, 931 sizeof(struct drm_buf_desc)) != 0) { 932 retcode = EFAULT; 933 break; 934 } 935 936 DRM_DEBUG("%d %d %d %d %d\n", 937 i, dma->bufs[i].buf_count, 938 dma->bufs[i].buf_size, 939 dma->bufs[i].freelist.low_mark, 940 dma->bufs[i].freelist.high_mark); 941 ++count; 942 } 943 } 944 } 945 request->count = count; 946 947 return retcode; 948} 949 950int drm_markbufs(struct drm_device *dev, void *data, struct drm_file *file_priv) 951{ 952 drm_device_dma_t *dma = dev->dma; 953 struct drm_buf_desc *request = data; 954 int order; 955 956 DRM_DEBUG("%d, %d, %d\n", 957 request->size, request->low_mark, request->high_mark); 958 959 960 order = drm_order(request->size); 961 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER || 962 request->low_mark < 0 || request->high_mark < 0) { 963 return EINVAL; 964 } 965 966 DRM_SPINLOCK(&dev->dma_lock); 967 if (request->low_mark > dma->bufs[order].buf_count || 968 request->high_mark > dma->bufs[order].buf_count) { 969 DRM_SPINUNLOCK(&dev->dma_lock); 970 return EINVAL; 971 } 972 973 dma->bufs[order].freelist.low_mark = request->low_mark; 974 dma->bufs[order].freelist.high_mark = request->high_mark; 975 DRM_SPINUNLOCK(&dev->dma_lock); 976 977 return 0; 978} 979 980int drm_freebufs(struct drm_device *dev, void *data, struct drm_file *file_priv) 981{ 982 drm_device_dma_t *dma = dev->dma; 983 struct drm_buf_free *request = data; 984 int i; 985 int idx; 986 drm_buf_t *buf; 987 int retcode = 0; 988 989 DRM_DEBUG("%d\n", request->count); 990 991 DRM_SPINLOCK(&dev->dma_lock); 992 for (i = 0; i < request->count; i++) { 993 if (DRM_COPY_FROM_USER(&idx, &request->list[i], sizeof(idx))) { 994 retcode = EFAULT; 995 break; 996 } 997 if (idx < 0 || idx >= dma->buf_count) { 998 DRM_ERROR("Index %d (of %d max)\n", 999 idx, dma->buf_count - 1); 1000 retcode = EINVAL; 1001 break; 1002 } 1003 buf = dma->buflist[idx]; 1004 if (buf->file_priv != file_priv) { 1005 DRM_ERROR("Process %d freeing buffer not owned\n", 1006 DRM_CURRENTPID); 1007 retcode = EINVAL; 1008 break; 1009 } 1010 drm_free_buffer(dev, buf); 1011 } 1012 DRM_SPINUNLOCK(&dev->dma_lock); 1013 1014 return retcode; 1015} 1016 1017int drm_mapbufs(struct drm_device *dev, void *data, struct drm_file *file_priv) 1018{ 1019 drm_device_dma_t *dma = dev->dma; 1020 int retcode = 0; 1021 const int zero = 0; 1022 vm_offset_t address; 1023 struct vmspace *vms; 1024 vm_ooffset_t foff; 1025 vm_size_t size; 1026 vm_offset_t vaddr; 1027 struct drm_buf_map *request = data; 1028 int i; 1029 1030 vms = DRM_CURPROC->td_proc->p_vmspace; 1031 1032 DRM_SPINLOCK(&dev->dma_lock); 1033 dev->buf_use++; /* Can't allocate more after this call */ 1034 DRM_SPINUNLOCK(&dev->dma_lock); 1035 1036 if (request->count < dma->buf_count) 1037 goto done; 1038 1039 if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP)) || 1040 (drm_core_check_feature(dev, DRIVER_SG) && 1041 (dma->flags & _DRM_DMA_USE_SG))) { 1042 drm_local_map_t *map = dev->agp_buffer_map; 1043 1044 if (map == NULL) { 1045 retcode = EINVAL; 1046 goto done; 1047 } 1048 size = round_page(map->size); 1049 foff = map->offset; 1050 } else { 1051 size = round_page(dma->byte_count), 1052 foff = 0; 1053 } 1054 1055 vaddr = round_page((vm_offset_t)vms->vm_daddr + MAXDSIZ); 1056#if __FreeBSD_version >= 600023 1057 retcode = vm_mmap(&vms->vm_map, &vaddr, size, PROT_READ | PROT_WRITE, 1058 VM_PROT_ALL, MAP_SHARED | MAP_NOSYNC, OBJT_DEVICE, 1059 dev->devnode, foff); 1060#else 1061 retcode = vm_mmap(&vms->vm_map, &vaddr, size, PROT_READ | PROT_WRITE, 1062 VM_PROT_ALL, MAP_SHARED | MAP_NOSYNC, 1063 SLIST_FIRST(&dev->devnode->si_hlist), foff); 1064#endif 1065 if (retcode) 1066 goto done; 1067 1068 request->virtual = (void *)vaddr; 1069 1070 for (i = 0; i < dma->buf_count; i++) { 1071 if (DRM_COPY_TO_USER(&request->list[i].idx, 1072 &dma->buflist[i]->idx, sizeof(request->list[0].idx))) { 1073 retcode = EFAULT; 1074 goto done; 1075 } 1076 if (DRM_COPY_TO_USER(&request->list[i].total, 1077 &dma->buflist[i]->total, sizeof(request->list[0].total))) { 1078 retcode = EFAULT; 1079 goto done; 1080 } 1081 if (DRM_COPY_TO_USER(&request->list[i].used, &zero, 1082 sizeof(zero))) { 1083 retcode = EFAULT; 1084 goto done; 1085 } 1086 address = vaddr + dma->buflist[i]->offset; /* *** */ 1087 if (DRM_COPY_TO_USER(&request->list[i].address, &address, 1088 sizeof(address))) { 1089 retcode = EFAULT; 1090 goto done; 1091 } 1092 } 1093 1094 done: 1095 request->count = dma->buf_count; 1096 1097 DRM_DEBUG("%d buffers, retcode = %d\n", request->count, retcode); 1098 1099 return retcode; 1100} 1101 1102/* 1103 * Compute order. Can be made faster. 1104 */ 1105int drm_order(unsigned long size) 1106{ 1107 int order; 1108 1109 if (size == 0) 1110 return 0; 1111 1112 order = flsl(size) - 1; 1113 if (size & ~(1ul << order)) 1114 ++order; 1115 1116 return order; 1117} 1118