drm_gem.c revision 1.2
1/* 2 * Copyright �� 2008 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eric Anholt <eric@anholt.net> 25 * 26 */ 27 28#include <dev/pci/drm/drmP.h> 29#include <dev/pci/drm/drm_vma_manager.h> 30 31#include <uvm/uvm.h> 32 33void drm_unref(struct uvm_object *); 34void drm_ref(struct uvm_object *); 35boolean_t drm_flush(struct uvm_object *, voff_t, voff_t, int); 36int drm_fault(struct uvm_faultinfo *, vaddr_t, vm_page_t *, int, int, 37 vm_fault_t, vm_prot_t, int); 38 39struct uvm_pagerops drm_pgops = { 40 NULL, 41 drm_ref, 42 drm_unref, 43 drm_fault, 44 drm_flush, 45}; 46 47void 48drm_ref(struct uvm_object *uobj) 49{ 50 struct drm_gem_object *obj = 51 container_of(uobj, struct drm_gem_object, uobj); 52 53 drm_gem_object_reference(obj); 54} 55 56void 57drm_unref(struct uvm_object *uobj) 58{ 59 struct drm_gem_object *obj = 60 container_of(uobj, struct drm_gem_object, uobj); 61 62 drm_gem_object_unreference_unlocked(obj); 63} 64 65int 66drm_fault(struct uvm_faultinfo *ufi, vaddr_t vaddr, vm_page_t *pps, 67 int npages, int centeridx, vm_fault_t fault_type, 68 vm_prot_t access_type, int flags) 69{ 70 struct vm_map_entry *entry = ufi->entry; 71 struct uvm_object *uobj = entry->object.uvm_obj; 72 struct drm_gem_object *obj = 73 container_of(uobj, struct drm_gem_object, uobj); 74 struct drm_device *dev = obj->dev; 75 int ret; 76 77 /* 78 * we do not allow device mappings to be mapped copy-on-write 79 * so we kill any attempt to do so here. 80 */ 81 82 if (UVM_ET_ISCOPYONWRITE(entry)) { 83 uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, uobj, NULL); 84 return(VM_PAGER_ERROR); 85 } 86 87 /* 88 * We could end up here as the result of a copyin(9) or 89 * copyout(9) while handling an ioctl. So we must be careful 90 * not to deadlock. Therefore we only block if the quiesce 91 * count is zero, which guarantees we didn't enter from within 92 * an ioctl code path. 93 */ 94 mtx_enter(&dev->quiesce_mtx); 95 if (dev->quiesce && dev->quiesce_count == 0) { 96 mtx_leave(&dev->quiesce_mtx); 97 uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, uobj, NULL); 98 mtx_enter(&dev->quiesce_mtx); 99 while (dev->quiesce) { 100 msleep(&dev->quiesce, &dev->quiesce_mtx, 101 PZERO, "drmflt", 0); 102 } 103 mtx_leave(&dev->quiesce_mtx); 104 return(VM_PAGER_REFAULT); 105 } 106 dev->quiesce_count++; 107 mtx_leave(&dev->quiesce_mtx); 108 109 /* Call down into driver to do the magic */ 110 ret = dev->driver->gem_fault(obj, ufi, entry->offset + (vaddr - 111 entry->start), vaddr, pps, npages, centeridx, 112 access_type, flags); 113 114 mtx_enter(&dev->quiesce_mtx); 115 dev->quiesce_count--; 116 if (dev->quiesce) 117 wakeup(&dev->quiesce_count); 118 mtx_leave(&dev->quiesce_mtx); 119 120 return (ret); 121} 122 123boolean_t 124drm_flush(struct uvm_object *uobj, voff_t start, voff_t stop, int flags) 125{ 126 return (TRUE); 127} 128 129struct uvm_object * 130udv_attach_drm(dev_t device, vm_prot_t accessprot, voff_t off, vsize_t size) 131{ 132 struct drm_device *dev = drm_get_device_from_kdev(device); 133 struct drm_gem_object *obj; 134 struct drm_vma_offset_node *node; 135 struct drm_file *priv; 136 struct file *filp; 137 138 if (cdevsw[major(device)].d_mmap != drmmmap) 139 return NULL; 140 141 if (dev == NULL) 142 return NULL; 143 144 if (dev->driver->mmap) 145 return dev->driver->mmap(dev, off, size); 146 147 mutex_lock(&dev->struct_mutex); 148 149 priv = drm_find_file_by_minor(dev, minor(device)); 150 if (priv == 0) { 151 mutex_unlock(&dev->struct_mutex); 152 return NULL; 153 } 154 filp = priv->filp; 155 156 node = drm_vma_offset_exact_lookup(dev->vma_offset_manager, 157 off >> PAGE_SHIFT, 158 atop(round_page(size))); 159 if (!node) { 160 mutex_unlock(&dev->struct_mutex); 161 return NULL; 162 } else if (!drm_vma_node_is_allowed(node, filp)) { 163 mutex_unlock(&dev->struct_mutex); 164 return NULL; 165 } 166 167 obj = container_of(node, struct drm_gem_object, vma_node); 168 drm_gem_object_reference(obj); 169 170 mutex_unlock(&dev->struct_mutex); 171 return &obj->uobj; 172} 173 174/** @file drm_gem.c 175 * 176 * This file provides some of the base ioctls and library routines for 177 * the graphics memory manager implemented by each device driver. 178 * 179 * Because various devices have different requirements in terms of 180 * synchronization and migration strategies, implementing that is left up to 181 * the driver, and all that the general API provides should be generic -- 182 * allocating objects, reading/writing data with the cpu, freeing objects. 183 * Even there, platform-dependent optimizations for reading/writing data with 184 * the CPU mean we'll likely hook those out to driver-specific calls. However, 185 * the DRI2 implementation wants to have at least allocate/mmap be generic. 186 * 187 * The goal was to have swap-backed object allocation managed through 188 * struct file. However, file descriptors as handles to a struct file have 189 * two major failings: 190 * - Process limits prevent more than 1024 or so being used at a time by 191 * default. 192 * - Inability to allocate high fds will aggravate the X Server's select() 193 * handling, and likely that of many GL client applications as well. 194 * 195 * This led to a plan of using our own integer IDs (called handles, following 196 * DRM terminology) to mimic fds, and implement the fd syscalls we need as 197 * ioctls. The objects themselves will still include the struct file so 198 * that we can transition to fds if the required kernel infrastructure shows 199 * up at a later date, and as our interface with shmfs for memory allocation. 200 */ 201 202/* 203 * We make up offsets for buffer objects so we can recognize them at 204 * mmap time. 205 */ 206 207/* pgoff in mmap is an unsigned long, so we need to make sure that 208 * the faked up offset will fit 209 */ 210 211#if BITS_PER_LONG == 64 212#define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1) 213#define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16) 214#else 215#define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFUL >> PAGE_SHIFT) + 1) 216#define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFUL >> PAGE_SHIFT) * 16) 217#endif 218 219/** 220 * Initialize the GEM device fields 221 */ 222 223int 224drm_gem_init(struct drm_device *dev) 225{ 226 struct drm_vma_offset_manager *vma_offset_manager; 227 228 rw_init(&dev->object_name_lock, "drmonl"); 229 idr_init(&dev->object_name_idr); 230 231 vma_offset_manager = kzalloc(sizeof(*vma_offset_manager), GFP_KERNEL); 232 if (!vma_offset_manager) { 233 DRM_ERROR("out of memory\n"); 234 return -ENOMEM; 235 } 236 237 dev->vma_offset_manager = vma_offset_manager; 238 drm_vma_offset_manager_init(vma_offset_manager, 239 DRM_FILE_PAGE_OFFSET_START, 240 DRM_FILE_PAGE_OFFSET_SIZE); 241 242 return 0; 243} 244 245void 246drm_gem_destroy(struct drm_device *dev) 247{ 248 249 drm_vma_offset_manager_destroy(dev->vma_offset_manager); 250 kfree(dev->vma_offset_manager); 251 dev->vma_offset_manager = NULL; 252} 253 254#ifdef __linux__ 255 256/** 257 * Initialize an already allocated GEM object of the specified size with 258 * shmfs backing store. 259 */ 260int drm_gem_object_init(struct drm_device *dev, 261 struct drm_gem_object *obj, size_t size) 262{ 263 struct file *filp; 264 265 drm_gem_private_object_init(dev, obj, size); 266 267 filp = shmem_file_setup("drm mm object", size, VM_NORESERVE); 268 if (IS_ERR(filp)) 269 return PTR_ERR(filp); 270 271 obj->filp = filp; 272 273 return 0; 274} 275EXPORT_SYMBOL(drm_gem_object_init); 276 277#else 278 279int drm_gem_object_init(struct drm_device *dev, 280 struct drm_gem_object *obj, size_t size) 281{ 282 drm_gem_private_object_init(dev, obj, size); 283 284 obj->uao = uao_create(size, 0); 285 uvm_objinit(&obj->uobj, &drm_pgops, 1); 286 287 atomic_inc(&dev->obj_count); 288 atomic_add(obj->size, &dev->obj_memory); 289 290 return 0; 291} 292 293#endif 294 295/** 296 * Initialize an already allocated GEM object of the specified size with 297 * no GEM provided backing store. Instead the caller is responsible for 298 * backing the object and handling it. 299 */ 300void drm_gem_private_object_init(struct drm_device *dev, 301 struct drm_gem_object *obj, size_t size) 302{ 303 BUG_ON((size & (PAGE_SIZE - 1)) != 0); 304 305 obj->dev = dev; 306 obj->filp = NULL; 307 308 kref_init(&obj->refcount); 309 obj->handle_count = 0; 310 obj->size = size; 311 drm_vma_node_reset(&obj->vma_node); 312} 313EXPORT_SYMBOL(drm_gem_private_object_init); 314 315static void 316drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp) 317{ 318#ifdef __linux__ 319 /* 320 * Note: obj->dma_buf can't disappear as long as we still hold a 321 * handle reference in obj->handle_count. 322 */ 323 mutex_lock(&filp->prime.lock); 324 if (obj->dma_buf) { 325 drm_prime_remove_buf_handle_locked(&filp->prime, 326 obj->dma_buf); 327 } 328 mutex_unlock(&filp->prime.lock); 329#endif 330} 331 332/** 333 * Called after the last handle to the object has been closed 334 * 335 * Removes any name for the object. Note that this must be 336 * called before drm_gem_object_free or we'll be touching 337 * freed memory 338 */ 339static void drm_gem_object_handle_free(struct drm_gem_object *obj) 340{ 341 struct drm_device *dev = obj->dev; 342 343 /* Remove any name for this object */ 344 if (obj->name) { 345 idr_remove(&dev->object_name_idr, obj->name); 346 obj->name = 0; 347 } 348} 349 350static void drm_gem_object_exported_dma_buf_free(struct drm_gem_object *obj) 351{ 352#ifdef __linux__ 353 /* Unbreak the reference cycle if we have an exported dma_buf. */ 354 if (obj->dma_buf) { 355 dma_buf_put(obj->dma_buf); 356 obj->dma_buf = NULL; 357 } 358#endif 359} 360 361static void 362drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj) 363{ 364 if (WARN_ON(obj->handle_count == 0)) 365 return; 366 367 /* 368 * Must bump handle count first as this may be the last 369 * ref, in which case the object would disappear before we 370 * checked for a name 371 */ 372 373 mutex_lock(&obj->dev->object_name_lock); 374 if (--obj->handle_count == 0) { 375 drm_gem_object_handle_free(obj); 376 drm_gem_object_exported_dma_buf_free(obj); 377 } 378 mutex_unlock(&obj->dev->object_name_lock); 379 380 drm_gem_object_unreference_unlocked(obj); 381} 382 383/** 384 * Removes the mapping from handle to filp for this object. 385 */ 386int 387drm_gem_handle_delete(struct drm_file *filp, u32 handle) 388{ 389 struct drm_device *dev; 390 struct drm_gem_object *obj; 391 392 /* This is gross. The idr system doesn't let us try a delete and 393 * return an error code. It just spews if you fail at deleting. 394 * So, we have to grab a lock around finding the object and then 395 * doing the delete on it and dropping the refcount, or the user 396 * could race us to double-decrement the refcount and cause a 397 * use-after-free later. Given the frequency of our handle lookups, 398 * we may want to use ida for number allocation and a hash table 399 * for the pointers, anyway. 400 */ 401 spin_lock(&filp->table_lock); 402 403 /* Check if we currently have a reference on the object */ 404 obj = idr_find(&filp->object_idr, handle); 405 if (obj == NULL) { 406 spin_unlock(&filp->table_lock); 407 return -EINVAL; 408 } 409 dev = obj->dev; 410 411 /* Release reference and decrement refcount. */ 412 idr_remove(&filp->object_idr, handle); 413 spin_unlock(&filp->table_lock); 414 415 if (drm_core_check_feature(dev, DRIVER_PRIME)) 416 drm_gem_remove_prime_handles(obj, filp); 417 drm_vma_node_revoke(&obj->vma_node, filp->filp); 418 419 if (dev->driver->gem_close_object) 420 dev->driver->gem_close_object(obj, filp); 421 drm_gem_object_handle_unreference_unlocked(obj); 422 423 return 0; 424} 425EXPORT_SYMBOL(drm_gem_handle_delete); 426 427/** 428 * drm_gem_dumb_destroy - dumb fb callback helper for gem based drivers 429 * 430 * This implements the ->dumb_destroy kms driver callback for drivers which use 431 * gem to manage their backing storage. 432 */ 433int drm_gem_dumb_destroy(struct drm_file *file, 434 struct drm_device *dev, 435 uint32_t handle) 436{ 437 return drm_gem_handle_delete(file, handle); 438} 439EXPORT_SYMBOL(drm_gem_dumb_destroy); 440 441/** 442 * drm_gem_handle_create_tail - internal functions to create a handle 443 * 444 * This expects the dev->object_name_lock to be held already and will drop it 445 * before returning. Used to avoid races in establishing new handles when 446 * importing an object from either an flink name or a dma-buf. 447 */ 448int 449drm_gem_handle_create_tail(struct drm_file *file_priv, 450 struct drm_gem_object *obj, 451 u32 *handlep) 452{ 453 struct drm_device *dev = obj->dev; 454 int ret; 455 456 WARN_ON(!mutex_is_locked(&dev->object_name_lock)); 457 458 /* 459 * Get the user-visible handle using idr. Preload and perform 460 * allocation under our spinlock. 461 */ 462 idr_preload(GFP_KERNEL); 463 spin_lock(&file_priv->table_lock); 464 465 ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT); 466 drm_gem_object_reference(obj); 467 obj->handle_count++; 468 spin_unlock(&file_priv->table_lock); 469 idr_preload_end(); 470 mutex_unlock(&dev->object_name_lock); 471 if (ret < 0) { 472 drm_gem_object_handle_unreference_unlocked(obj); 473 return ret; 474 } 475 *handlep = ret; 476 477 ret = drm_vma_node_allow(&obj->vma_node, file_priv->filp); 478 if (ret) { 479 drm_gem_handle_delete(file_priv, *handlep); 480 return ret; 481 } 482 483 if (dev->driver->gem_open_object) { 484 ret = dev->driver->gem_open_object(obj, file_priv); 485 if (ret) { 486 drm_gem_handle_delete(file_priv, *handlep); 487 return ret; 488 } 489 } 490 491 return 0; 492} 493 494/** 495 * Create a handle for this object. This adds a handle reference 496 * to the object, which includes a regular reference count. Callers 497 * will likely want to dereference the object afterwards. 498 */ 499int 500drm_gem_handle_create(struct drm_file *file_priv, 501 struct drm_gem_object *obj, 502 u32 *handlep) 503{ 504 mutex_lock(&obj->dev->object_name_lock); 505 506 return drm_gem_handle_create_tail(file_priv, obj, handlep); 507} 508EXPORT_SYMBOL(drm_gem_handle_create); 509 510 511/** 512 * drm_gem_free_mmap_offset - release a fake mmap offset for an object 513 * @obj: obj in question 514 * 515 * This routine frees fake offsets allocated by drm_gem_create_mmap_offset(). 516 */ 517void 518drm_gem_free_mmap_offset(struct drm_gem_object *obj) 519{ 520 struct drm_device *dev = obj->dev; 521 522 drm_vma_offset_remove(dev->vma_offset_manager, &obj->vma_node); 523} 524EXPORT_SYMBOL(drm_gem_free_mmap_offset); 525 526/** 527 * drm_gem_create_mmap_offset_size - create a fake mmap offset for an object 528 * @obj: obj in question 529 * @size: the virtual size 530 * 531 * GEM memory mapping works by handing back to userspace a fake mmap offset 532 * it can use in a subsequent mmap(2) call. The DRM core code then looks 533 * up the object based on the offset and sets up the various memory mapping 534 * structures. 535 * 536 * This routine allocates and attaches a fake offset for @obj, in cases where 537 * the virtual size differs from the physical size (ie. obj->size). Otherwise 538 * just use drm_gem_create_mmap_offset(). 539 */ 540int 541drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size) 542{ 543 struct drm_device *dev = obj->dev; 544 545 return drm_vma_offset_add(dev->vma_offset_manager, &obj->vma_node, 546 size / PAGE_SIZE); 547} 548EXPORT_SYMBOL(drm_gem_create_mmap_offset_size); 549 550/** 551 * drm_gem_create_mmap_offset - create a fake mmap offset for an object 552 * @obj: obj in question 553 * 554 * GEM memory mapping works by handing back to userspace a fake mmap offset 555 * it can use in a subsequent mmap(2) call. The DRM core code then looks 556 * up the object based on the offset and sets up the various memory mapping 557 * structures. 558 * 559 * This routine allocates and attaches a fake offset for @obj. 560 */ 561int drm_gem_create_mmap_offset(struct drm_gem_object *obj) 562{ 563 return drm_gem_create_mmap_offset_size(obj, obj->size); 564} 565EXPORT_SYMBOL(drm_gem_create_mmap_offset); 566 567#ifdef __linux__ 568 569/** 570 * drm_gem_get_pages - helper to allocate backing pages for a GEM object 571 * from shmem 572 * @obj: obj in question 573 * @gfpmask: gfp mask of requested pages 574 */ 575struct page **drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask) 576{ 577 struct inode *inode; 578 struct address_space *mapping; 579 struct page *p, **pages; 580 int i, npages; 581 582 /* This is the shared memory object that backs the GEM resource */ 583 inode = file_inode(obj->filp); 584 mapping = inode->i_mapping; 585 586 /* We already BUG_ON() for non-page-aligned sizes in 587 * drm_gem_object_init(), so we should never hit this unless 588 * driver author is doing something really wrong: 589 */ 590 WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0); 591 592 npages = obj->size >> PAGE_SHIFT; 593 594 pages = drm_malloc_ab(npages, sizeof(struct page *)); 595 if (pages == NULL) 596 return ERR_PTR(-ENOMEM); 597 598 gfpmask |= mapping_gfp_mask(mapping); 599 600 for (i = 0; i < npages; i++) { 601 p = shmem_read_mapping_page_gfp(mapping, i, gfpmask); 602 if (IS_ERR(p)) 603 goto fail; 604 pages[i] = p; 605 606 /* There is a hypothetical issue w/ drivers that require 607 * buffer memory in the low 4GB.. if the pages are un- 608 * pinned, and swapped out, they can end up swapped back 609 * in above 4GB. If pages are already in memory, then 610 * shmem_read_mapping_page_gfp will ignore the gfpmask, 611 * even if the already in-memory page disobeys the mask. 612 * 613 * It is only a theoretical issue today, because none of 614 * the devices with this limitation can be populated with 615 * enough memory to trigger the issue. But this BUG_ON() 616 * is here as a reminder in case the problem with 617 * shmem_read_mapping_page_gfp() isn't solved by the time 618 * it does become a real issue. 619 * 620 * See this thread: http://lkml.org/lkml/2011/7/11/238 621 */ 622 BUG_ON((gfpmask & __GFP_DMA32) && 623 (page_to_pfn(p) >= 0x00100000UL)); 624 } 625 626 return pages; 627 628fail: 629 while (i--) 630 page_cache_release(pages[i]); 631 632 drm_free_large(pages); 633 return ERR_CAST(p); 634} 635EXPORT_SYMBOL(drm_gem_get_pages); 636 637/** 638 * drm_gem_put_pages - helper to free backing pages for a GEM object 639 * @obj: obj in question 640 * @pages: pages to free 641 * @dirty: if true, pages will be marked as dirty 642 * @accessed: if true, the pages will be marked as accessed 643 */ 644void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages, 645 bool dirty, bool accessed) 646{ 647 int i, npages; 648 649 /* We already BUG_ON() for non-page-aligned sizes in 650 * drm_gem_object_init(), so we should never hit this unless 651 * driver author is doing something really wrong: 652 */ 653 WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0); 654 655 npages = obj->size >> PAGE_SHIFT; 656 657 for (i = 0; i < npages; i++) { 658 if (dirty) 659 set_page_dirty(pages[i]); 660 661 if (accessed) 662 mark_page_accessed(pages[i]); 663 664 /* Undo the reference we took when populating the table */ 665 page_cache_release(pages[i]); 666 } 667 668 drm_free_large(pages); 669} 670EXPORT_SYMBOL(drm_gem_put_pages); 671 672#endif 673 674/** Returns a reference to the object named by the handle. */ 675struct drm_gem_object * 676drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp, 677 u32 handle) 678{ 679 struct drm_gem_object *obj; 680 681 spin_lock(&filp->table_lock); 682 683 /* Check if we currently have a reference on the object */ 684 obj = idr_find(&filp->object_idr, handle); 685 if (obj == NULL) { 686 spin_unlock(&filp->table_lock); 687 return NULL; 688 } 689 690 drm_gem_object_reference(obj); 691 692 spin_unlock(&filp->table_lock); 693 694 return obj; 695} 696EXPORT_SYMBOL(drm_gem_object_lookup); 697 698/** 699 * Releases the handle to an mm object. 700 */ 701int 702drm_gem_close_ioctl(struct drm_device *dev, void *data, 703 struct drm_file *file_priv) 704{ 705 struct drm_gem_close *args = data; 706 int ret; 707 708 if (!(dev->driver->driver_features & DRIVER_GEM)) 709 return -ENODEV; 710 711 ret = drm_gem_handle_delete(file_priv, args->handle); 712 713 return ret; 714} 715 716/** 717 * Create a global name for an object, returning the name. 718 * 719 * Note that the name does not hold a reference; when the object 720 * is freed, the name goes away. 721 */ 722int 723drm_gem_flink_ioctl(struct drm_device *dev, void *data, 724 struct drm_file *file_priv) 725{ 726 struct drm_gem_flink *args = data; 727 struct drm_gem_object *obj; 728 int ret; 729 730 if (!(dev->driver->driver_features & DRIVER_GEM)) 731 return -ENODEV; 732 733 obj = drm_gem_object_lookup(dev, file_priv, args->handle); 734 if (obj == NULL) 735 return -ENOENT; 736 737 mutex_lock(&dev->object_name_lock); 738 idr_preload(GFP_KERNEL); 739 /* prevent races with concurrent gem_close. */ 740 if (obj->handle_count == 0) { 741 ret = -ENOENT; 742 goto err; 743 } 744 745 if (!obj->name) { 746 ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_NOWAIT); 747 if (ret < 0) 748 goto err; 749 750 obj->name = ret; 751 } 752 753 args->name = (uint64_t) obj->name; 754 ret = 0; 755 756err: 757 idr_preload_end(); 758 mutex_unlock(&dev->object_name_lock); 759 drm_gem_object_unreference_unlocked(obj); 760 return ret; 761} 762 763/** 764 * Open an object using the global name, returning a handle and the size. 765 * 766 * This handle (of course) holds a reference to the object, so the object 767 * will not go away until the handle is deleted. 768 */ 769int 770drm_gem_open_ioctl(struct drm_device *dev, void *data, 771 struct drm_file *file_priv) 772{ 773 struct drm_gem_open *args = data; 774 struct drm_gem_object *obj; 775 int ret; 776 u32 handle; 777 778 if (!(dev->driver->driver_features & DRIVER_GEM)) 779 return -ENODEV; 780 781 mutex_lock(&dev->object_name_lock); 782 obj = idr_find(&dev->object_name_idr, (int) args->name); 783 if (obj) { 784 drm_gem_object_reference(obj); 785 } else { 786 mutex_unlock(&dev->object_name_lock); 787 return -ENOENT; 788 } 789 790 /* drm_gem_handle_create_tail unlocks dev->object_name_lock. */ 791 ret = drm_gem_handle_create_tail(file_priv, obj, &handle); 792 drm_gem_object_unreference_unlocked(obj); 793 if (ret) 794 return ret; 795 796 args->handle = handle; 797 args->size = obj->size; 798 799 return 0; 800} 801 802/** 803 * Called at device open time, sets up the structure for handling refcounting 804 * of mm objects. 805 */ 806void 807drm_gem_open(struct drm_device *dev, struct drm_file *file_private) 808{ 809 idr_init(&file_private->object_idr); 810 mtx_init(&file_private->table_lock, IPL_NONE); 811} 812 813/** 814 * Called at device close to release the file's 815 * handle references on objects. 816 */ 817static int 818drm_gem_object_release_handle(int id, void *ptr, void *data) 819{ 820 struct drm_file *file_priv = data; 821 struct drm_gem_object *obj = ptr; 822 struct drm_device *dev = obj->dev; 823 824 if (drm_core_check_feature(dev, DRIVER_PRIME)) 825 drm_gem_remove_prime_handles(obj, file_priv); 826 drm_vma_node_revoke(&obj->vma_node, file_priv->filp); 827 828 if (dev->driver->gem_close_object) 829 dev->driver->gem_close_object(obj, file_priv); 830 831 drm_gem_object_handle_unreference_unlocked(obj); 832 833 return 0; 834} 835 836/** 837 * Called at close time when the filp is going away. 838 * 839 * Releases any remaining references on objects by this filp. 840 */ 841void 842drm_gem_release(struct drm_device *dev, struct drm_file *file_private) 843{ 844 idr_for_each(&file_private->object_idr, 845 &drm_gem_object_release_handle, file_private); 846 idr_destroy(&file_private->object_idr); 847} 848 849#ifdef __linux__ 850 851void 852drm_gem_object_release(struct drm_gem_object *obj) 853{ 854 WARN_ON(obj->dma_buf); 855 856 if (obj->filp) 857 fput(obj->filp); 858} 859EXPORT_SYMBOL(drm_gem_object_release); 860 861#else 862 863void 864drm_gem_object_release(struct drm_gem_object *obj) 865{ 866 struct drm_device *dev = obj->dev; 867 868 if (obj->uao) 869 uao_detach(obj->uao); 870 871 atomic_dec(&dev->obj_count); 872 atomic_sub(obj->size, &dev->obj_memory); 873} 874 875#endif 876 877/** 878 * Called after the last reference to the object has been lost. 879 * Must be called holding struct_ mutex 880 * 881 * Frees the object 882 */ 883void 884drm_gem_object_free(struct kref *kref) 885{ 886 struct drm_gem_object *obj = (struct drm_gem_object *) kref; 887 struct drm_device *dev = obj->dev; 888 889 BUG_ON(!mutex_is_locked(&dev->struct_mutex)); 890 891 if (dev->driver->gem_free_object != NULL) 892 dev->driver->gem_free_object(obj); 893} 894EXPORT_SYMBOL(drm_gem_object_free); 895 896#ifdef __linux__ 897 898void drm_gem_vm_open(struct vm_area_struct *vma) 899{ 900 struct drm_gem_object *obj = vma->vm_private_data; 901 902 drm_gem_object_reference(obj); 903 904 mutex_lock(&obj->dev->struct_mutex); 905 drm_vm_open_locked(obj->dev, vma); 906 mutex_unlock(&obj->dev->struct_mutex); 907} 908EXPORT_SYMBOL(drm_gem_vm_open); 909 910void drm_gem_vm_close(struct vm_area_struct *vma) 911{ 912 struct drm_gem_object *obj = vma->vm_private_data; 913 struct drm_device *dev = obj->dev; 914 915 mutex_lock(&dev->struct_mutex); 916 drm_vm_close_locked(obj->dev, vma); 917 drm_gem_object_unreference(obj); 918 mutex_unlock(&dev->struct_mutex); 919} 920EXPORT_SYMBOL(drm_gem_vm_close); 921 922/** 923 * drm_gem_mmap_obj - memory map a GEM object 924 * @obj: the GEM object to map 925 * @obj_size: the object size to be mapped, in bytes 926 * @vma: VMA for the area to be mapped 927 * 928 * Set up the VMA to prepare mapping of the GEM object using the gem_vm_ops 929 * provided by the driver. Depending on their requirements, drivers can either 930 * provide a fault handler in their gem_vm_ops (in which case any accesses to 931 * the object will be trapped, to perform migration, GTT binding, surface 932 * register allocation, or performance monitoring), or mmap the buffer memory 933 * synchronously after calling drm_gem_mmap_obj. 934 * 935 * This function is mainly intended to implement the DMABUF mmap operation, when 936 * the GEM object is not looked up based on its fake offset. To implement the 937 * DRM mmap operation, drivers should use the drm_gem_mmap() function. 938 * 939 * drm_gem_mmap_obj() assumes the user is granted access to the buffer while 940 * drm_gem_mmap() prevents unprivileged users from mapping random objects. So 941 * callers must verify access restrictions before calling this helper. 942 * 943 * NOTE: This function has to be protected with dev->struct_mutex 944 * 945 * Return 0 or success or -EINVAL if the object size is smaller than the VMA 946 * size, or if no gem_vm_ops are provided. 947 */ 948int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size, 949 struct vm_area_struct *vma) 950{ 951 struct drm_device *dev = obj->dev; 952 953 lockdep_assert_held(&dev->struct_mutex); 954 955 /* Check for valid size. */ 956 if (obj_size < vma->vm_end - vma->vm_start) 957 return -EINVAL; 958 959 if (!dev->driver->gem_vm_ops) 960 return -EINVAL; 961 962 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP; 963 vma->vm_ops = dev->driver->gem_vm_ops; 964 vma->vm_private_data = obj; 965 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); 966 967 /* Take a ref for this mapping of the object, so that the fault 968 * handler can dereference the mmap offset's pointer to the object. 969 * This reference is cleaned up by the corresponding vm_close 970 * (which should happen whether the vma was created by this call, or 971 * by a vm_open due to mremap or partial unmap or whatever). 972 */ 973 drm_gem_object_reference(obj); 974 975 drm_vm_open_locked(dev, vma); 976 return 0; 977} 978EXPORT_SYMBOL(drm_gem_mmap_obj); 979 980/** 981 * drm_gem_mmap - memory map routine for GEM objects 982 * @filp: DRM file pointer 983 * @vma: VMA for the area to be mapped 984 * 985 * If a driver supports GEM object mapping, mmap calls on the DRM file 986 * descriptor will end up here. 987 * 988 * Look up the GEM object based on the offset passed in (vma->vm_pgoff will 989 * contain the fake offset we created when the GTT map ioctl was called on 990 * the object) and map it with a call to drm_gem_mmap_obj(). 991 * 992 * If the caller is not granted access to the buffer object, the mmap will fail 993 * with EACCES. Please see the vma manager for more information. 994 */ 995int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) 996{ 997 struct drm_file *priv = filp->private_data; 998 struct drm_device *dev = priv->minor->dev; 999 struct drm_gem_object *obj; 1000 struct drm_vma_offset_node *node; 1001 int ret = 0; 1002 1003 if (drm_device_is_unplugged(dev)) 1004 return -ENODEV; 1005 1006 mutex_lock(&dev->struct_mutex); 1007 1008 node = drm_vma_offset_exact_lookup(dev->vma_offset_manager, 1009 vma->vm_pgoff, 1010 vma_pages(vma)); 1011 if (!node) { 1012 mutex_unlock(&dev->struct_mutex); 1013 return drm_mmap(filp, vma); 1014 } else if (!drm_vma_node_is_allowed(node, filp)) { 1015 mutex_unlock(&dev->struct_mutex); 1016 return -EACCES; 1017 } 1018 1019 obj = container_of(node, struct drm_gem_object, vma_node); 1020 ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT, vma); 1021 1022 mutex_unlock(&dev->struct_mutex); 1023 1024 return ret; 1025} 1026EXPORT_SYMBOL(drm_gem_mmap); 1027 1028#endif 1029