drm_gem.c revision 1.16
1/* 2 * Copyright �� 2008 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Eric Anholt <eric@anholt.net> 25 * 26 */ 27 28#include <linux/types.h> 29#include <linux/slab.h> 30#include <linux/mm.h> 31#include <linux/uaccess.h> 32#include <linux/fs.h> 33#include <linux/file.h> 34#include <linux/module.h> 35#include <linux/mman.h> 36#include <linux/pagemap.h> 37#include <linux/shmem_fs.h> 38#include <linux/dma-buf.h> 39#include <linux/mem_encrypt.h> 40#include <linux/pagevec.h> 41 42#include <drm/drm.h> 43#include <drm/drm_device.h> 44#include <drm/drm_drv.h> 45#include <drm/drm_file.h> 46#include <drm/drm_gem.h> 47#include <drm/drm_managed.h> 48#include <drm/drm_print.h> 49#include <drm/drm_vma_manager.h> 50 51#include "drm_internal.h" 52 53#include <sys/conf.h> 54#include <uvm/uvm.h> 55 56void drm_unref(struct uvm_object *); 57void drm_ref(struct uvm_object *); 58boolean_t drm_flush(struct uvm_object *, voff_t, voff_t, int); 59int drm_fault(struct uvm_faultinfo *, vaddr_t, vm_page_t *, int, int, 60 vm_fault_t, vm_prot_t, int); 61 62const struct uvm_pagerops drm_pgops = { 63 .pgo_reference = drm_ref, 64 .pgo_detach = drm_unref, 65 .pgo_fault = drm_fault, 66 .pgo_flush = drm_flush, 67}; 68 69void 70drm_ref(struct uvm_object *uobj) 71{ 72 struct drm_gem_object *obj = 73 container_of(uobj, struct drm_gem_object, uobj); 74 75 drm_gem_object_get(obj); 76} 77 78void 79drm_unref(struct uvm_object *uobj) 80{ 81 struct drm_gem_object *obj = 82 container_of(uobj, struct drm_gem_object, uobj); 83 84 drm_gem_object_put(obj); 85} 86 87int 88drm_fault(struct uvm_faultinfo *ufi, vaddr_t vaddr, vm_page_t *pps, 89 int npages, int centeridx, vm_fault_t fault_type, 90 vm_prot_t access_type, int flags) 91{ 92 struct vm_map_entry *entry = ufi->entry; 93 struct uvm_object *uobj = entry->object.uvm_obj; 94 struct drm_gem_object *obj = 95 container_of(uobj, struct drm_gem_object, uobj); 96 struct drm_device *dev = obj->dev; 97 int ret; 98 99 /* 100 * we do not allow device mappings to be mapped copy-on-write 101 * so we kill any attempt to do so here. 102 */ 103 104 if (UVM_ET_ISCOPYONWRITE(entry)) { 105 uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, uobj); 106 return(VM_PAGER_ERROR); 107 } 108 109 /* 110 * We could end up here as the result of a copyin(9) or 111 * copyout(9) while handling an ioctl. So we must be careful 112 * not to deadlock. Therefore we only block if the quiesce 113 * count is zero, which guarantees we didn't enter from within 114 * an ioctl code path. 115 */ 116 mtx_enter(&dev->quiesce_mtx); 117 if (dev->quiesce && dev->quiesce_count == 0) { 118 mtx_leave(&dev->quiesce_mtx); 119 uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, uobj); 120 mtx_enter(&dev->quiesce_mtx); 121 while (dev->quiesce) { 122 msleep_nsec(&dev->quiesce, &dev->quiesce_mtx, 123 PZERO, "drmflt", INFSLP); 124 } 125 mtx_leave(&dev->quiesce_mtx); 126 return(VM_PAGER_REFAULT); 127 } 128 dev->quiesce_count++; 129 mtx_leave(&dev->quiesce_mtx); 130 131 /* Call down into driver to do the magic */ 132 ret = dev->driver->gem_fault(obj, ufi, entry->offset + (vaddr - 133 entry->start), vaddr, pps, npages, centeridx, 134 access_type, flags); 135 136 mtx_enter(&dev->quiesce_mtx); 137 dev->quiesce_count--; 138 if (dev->quiesce) 139 wakeup(&dev->quiesce_count); 140 mtx_leave(&dev->quiesce_mtx); 141 142 return (ret); 143} 144 145boolean_t 146drm_flush(struct uvm_object *uobj, voff_t start, voff_t stop, int flags) 147{ 148 return (TRUE); 149} 150 151struct uvm_object * 152udv_attach_drm(dev_t device, vm_prot_t accessprot, voff_t off, vsize_t size) 153{ 154 struct drm_device *dev = drm_get_device_from_kdev(device); 155 struct drm_gem_object *obj = NULL; 156 struct drm_vma_offset_node *node; 157 struct drm_file *priv; 158 struct file *filp; 159 160 if (cdevsw[major(device)].d_mmap != drmmmap) 161 return NULL; 162 163 if (dev == NULL) 164 return NULL; 165 166 mutex_lock(&dev->filelist_mutex); 167 priv = drm_find_file_by_minor(dev, minor(device)); 168 if (priv == NULL) { 169 mutex_unlock(&dev->filelist_mutex); 170 return NULL; 171 } 172 filp = priv->filp; 173 mutex_unlock(&dev->filelist_mutex); 174 175 if (dev->driver->mmap) 176 return dev->driver->mmap(filp, accessprot, off, size); 177 178 drm_vma_offset_lock_lookup(dev->vma_offset_manager); 179 node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager, 180 off >> PAGE_SHIFT, 181 atop(round_page(size))); 182 if (likely(node)) { 183 obj = container_of(node, struct drm_gem_object, vma_node); 184 /* 185 * When the object is being freed, after it hits 0-refcnt it 186 * proceeds to tear down the object. In the process it will 187 * attempt to remove the VMA offset and so acquire this 188 * mgr->vm_lock. Therefore if we find an object with a 0-refcnt 189 * that matches our range, we know it is in the process of being 190 * destroyed and will be freed as soon as we release the lock - 191 * so we have to check for the 0-refcnted object and treat it as 192 * invalid. 193 */ 194 if (!kref_get_unless_zero(&obj->refcount)) 195 obj = NULL; 196 } 197 drm_vma_offset_unlock_lookup(dev->vma_offset_manager); 198 199 if (!obj) 200 return NULL; 201 202 if (!drm_vma_node_is_allowed(node, priv)) { 203 drm_gem_object_put(obj); 204 return NULL; 205 } 206 207 if (node->readonly) { 208 if (accessprot & PROT_WRITE) { 209 drm_gem_object_put(obj); 210 return NULL; 211 } 212 } 213 214 return &obj->uobj; 215} 216 217/** @file drm_gem.c 218 * 219 * This file provides some of the base ioctls and library routines for 220 * the graphics memory manager implemented by each device driver. 221 * 222 * Because various devices have different requirements in terms of 223 * synchronization and migration strategies, implementing that is left up to 224 * the driver, and all that the general API provides should be generic -- 225 * allocating objects, reading/writing data with the cpu, freeing objects. 226 * Even there, platform-dependent optimizations for reading/writing data with 227 * the CPU mean we'll likely hook those out to driver-specific calls. However, 228 * the DRI2 implementation wants to have at least allocate/mmap be generic. 229 * 230 * The goal was to have swap-backed object allocation managed through 231 * struct file. However, file descriptors as handles to a struct file have 232 * two major failings: 233 * - Process limits prevent more than 1024 or so being used at a time by 234 * default. 235 * - Inability to allocate high fds will aggravate the X Server's select() 236 * handling, and likely that of many GL client applications as well. 237 * 238 * This led to a plan of using our own integer IDs (called handles, following 239 * DRM terminology) to mimic fds, and implement the fd syscalls we need as 240 * ioctls. The objects themselves will still include the struct file so 241 * that we can transition to fds if the required kernel infrastructure shows 242 * up at a later date, and as our interface with shmfs for memory allocation. 243 */ 244 245static void 246drm_gem_init_release(struct drm_device *dev, void *ptr) 247{ 248 drm_vma_offset_manager_destroy(dev->vma_offset_manager); 249} 250 251/** 252 * drm_gem_init - Initialize the GEM device fields 253 * @dev: drm_devic structure to initialize 254 */ 255int 256drm_gem_init(struct drm_device *dev) 257{ 258 struct drm_vma_offset_manager *vma_offset_manager; 259 260 rw_init(&dev->object_name_lock, "drmonl"); 261 idr_init_base(&dev->object_name_idr, 1); 262 263 vma_offset_manager = drmm_kzalloc(dev, sizeof(*vma_offset_manager), 264 GFP_KERNEL); 265 if (!vma_offset_manager) { 266 DRM_ERROR("out of memory\n"); 267 return -ENOMEM; 268 } 269 270 dev->vma_offset_manager = vma_offset_manager; 271 drm_vma_offset_manager_init(vma_offset_manager, 272 DRM_FILE_PAGE_OFFSET_START, 273 DRM_FILE_PAGE_OFFSET_SIZE); 274 275 return drmm_add_action(dev, drm_gem_init_release, NULL); 276} 277 278#ifdef __linux__ 279 280/** 281 * drm_gem_object_init - initialize an allocated shmem-backed GEM object 282 * @dev: drm_device the object should be initialized for 283 * @obj: drm_gem_object to initialize 284 * @size: object size 285 * 286 * Initialize an already allocated GEM object of the specified size with 287 * shmfs backing store. 288 */ 289int drm_gem_object_init(struct drm_device *dev, 290 struct drm_gem_object *obj, size_t size) 291{ 292 struct file *filp; 293 294 drm_gem_private_object_init(dev, obj, size); 295 296 filp = shmem_file_setup("drm mm object", size, VM_NORESERVE); 297 if (IS_ERR(filp)) 298 return PTR_ERR(filp); 299 300 obj->filp = filp; 301 302 return 0; 303} 304EXPORT_SYMBOL(drm_gem_object_init); 305 306#else 307 308int drm_gem_object_init(struct drm_device *dev, 309 struct drm_gem_object *obj, size_t size) 310{ 311 drm_gem_private_object_init(dev, obj, size); 312 313 if (size > (512 * 1024 * 1024)) { 314 printf("%s size too big %lu\n", __func__, size); 315 return -ENOMEM; 316 } 317 318 obj->uao = uao_create(size, 0); 319 uvm_obj_init(&obj->uobj, &drm_pgops, 1); 320 321 return 0; 322} 323 324#endif 325 326/** 327 * drm_gem_private_object_init - initialize an allocated private GEM object 328 * @dev: drm_device the object should be initialized for 329 * @obj: drm_gem_object to initialize 330 * @size: object size 331 * 332 * Initialize an already allocated GEM object of the specified size with 333 * no GEM provided backing store. Instead the caller is responsible for 334 * backing the object and handling it. 335 */ 336void drm_gem_private_object_init(struct drm_device *dev, 337 struct drm_gem_object *obj, size_t size) 338{ 339 BUG_ON((size & (PAGE_SIZE - 1)) != 0); 340 341 obj->dev = dev; 342#ifdef __linux__ 343 obj->filp = NULL; 344#else 345 obj->uao = NULL; 346#endif 347 348 kref_init(&obj->refcount); 349 obj->handle_count = 0; 350 obj->size = size; 351 dma_resv_init(&obj->_resv); 352 if (!obj->resv) 353 obj->resv = &obj->_resv; 354 355 drm_vma_node_reset(&obj->vma_node); 356} 357EXPORT_SYMBOL(drm_gem_private_object_init); 358 359static void 360drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp) 361{ 362 /* 363 * Note: obj->dma_buf can't disappear as long as we still hold a 364 * handle reference in obj->handle_count. 365 */ 366 mutex_lock(&filp->prime.lock); 367 if (obj->dma_buf) { 368 drm_prime_remove_buf_handle_locked(&filp->prime, 369 obj->dma_buf); 370 } 371 mutex_unlock(&filp->prime.lock); 372} 373 374/** 375 * drm_gem_object_handle_free - release resources bound to userspace handles 376 * @obj: GEM object to clean up. 377 * 378 * Called after the last handle to the object has been closed 379 * 380 * Removes any name for the object. Note that this must be 381 * called before drm_gem_object_free or we'll be touching 382 * freed memory 383 */ 384static void drm_gem_object_handle_free(struct drm_gem_object *obj) 385{ 386 struct drm_device *dev = obj->dev; 387 388 /* Remove any name for this object */ 389 if (obj->name) { 390 idr_remove(&dev->object_name_idr, obj->name); 391 obj->name = 0; 392 } 393} 394 395static void drm_gem_object_exported_dma_buf_free(struct drm_gem_object *obj) 396{ 397 /* Unbreak the reference cycle if we have an exported dma_buf. */ 398 if (obj->dma_buf) { 399 dma_buf_put(obj->dma_buf); 400 obj->dma_buf = NULL; 401 } 402} 403 404static void 405drm_gem_object_handle_put_unlocked(struct drm_gem_object *obj) 406{ 407 struct drm_device *dev = obj->dev; 408 bool final = false; 409 410 if (WARN_ON(READ_ONCE(obj->handle_count) == 0)) 411 return; 412 413 /* 414 * Must bump handle count first as this may be the last 415 * ref, in which case the object would disappear before we 416 * checked for a name 417 */ 418 419 mutex_lock(&dev->object_name_lock); 420 if (--obj->handle_count == 0) { 421 drm_gem_object_handle_free(obj); 422 drm_gem_object_exported_dma_buf_free(obj); 423 final = true; 424 } 425 mutex_unlock(&dev->object_name_lock); 426 427 if (final) 428 drm_gem_object_put(obj); 429} 430 431/* 432 * Called at device or object close to release the file's 433 * handle references on objects. 434 */ 435static int 436drm_gem_object_release_handle(int id, void *ptr, void *data) 437{ 438 struct drm_file *file_priv = data; 439 struct drm_gem_object *obj = ptr; 440 struct drm_device *dev = obj->dev; 441 442 if (obj->funcs && obj->funcs->close) 443 obj->funcs->close(obj, file_priv); 444 else if (dev->driver->gem_close_object) 445 dev->driver->gem_close_object(obj, file_priv); 446 447 drm_gem_remove_prime_handles(obj, file_priv); 448 drm_vma_node_revoke(&obj->vma_node, file_priv); 449 450 drm_gem_object_handle_put_unlocked(obj); 451 452 return 0; 453} 454 455/** 456 * drm_gem_handle_delete - deletes the given file-private handle 457 * @filp: drm file-private structure to use for the handle look up 458 * @handle: userspace handle to delete 459 * 460 * Removes the GEM handle from the @filp lookup table which has been added with 461 * drm_gem_handle_create(). If this is the last handle also cleans up linked 462 * resources like GEM names. 463 */ 464int 465drm_gem_handle_delete(struct drm_file *filp, u32 handle) 466{ 467 struct drm_gem_object *obj; 468 469 spin_lock(&filp->table_lock); 470 471 /* Check if we currently have a reference on the object */ 472 obj = idr_replace(&filp->object_idr, NULL, handle); 473 spin_unlock(&filp->table_lock); 474 if (IS_ERR_OR_NULL(obj)) 475 return -EINVAL; 476 477 /* Release driver's reference and decrement refcount. */ 478 drm_gem_object_release_handle(handle, obj, filp); 479 480 /* And finally make the handle available for future allocations. */ 481 spin_lock(&filp->table_lock); 482 idr_remove(&filp->object_idr, handle); 483 spin_unlock(&filp->table_lock); 484 485 return 0; 486} 487EXPORT_SYMBOL(drm_gem_handle_delete); 488 489/** 490 * drm_gem_dumb_map_offset - return the fake mmap offset for a gem object 491 * @file: drm file-private structure containing the gem object 492 * @dev: corresponding drm_device 493 * @handle: gem object handle 494 * @offset: return location for the fake mmap offset 495 * 496 * This implements the &drm_driver.dumb_map_offset kms driver callback for 497 * drivers which use gem to manage their backing storage. 498 * 499 * Returns: 500 * 0 on success or a negative error code on failure. 501 */ 502int drm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, 503 u32 handle, u64 *offset) 504{ 505 struct drm_gem_object *obj; 506 int ret; 507 508 obj = drm_gem_object_lookup(file, handle); 509 if (!obj) 510 return -ENOENT; 511 512 /* Don't allow imported objects to be mapped */ 513 if (obj->import_attach) { 514 ret = -EINVAL; 515 goto out; 516 } 517 518 ret = drm_gem_create_mmap_offset(obj); 519 if (ret) 520 goto out; 521 522 *offset = drm_vma_node_offset_addr(&obj->vma_node); 523out: 524 drm_gem_object_put(obj); 525 526 return ret; 527} 528EXPORT_SYMBOL_GPL(drm_gem_dumb_map_offset); 529 530/** 531 * drm_gem_dumb_destroy - dumb fb callback helper for gem based drivers 532 * @file: drm file-private structure to remove the dumb handle from 533 * @dev: corresponding drm_device 534 * @handle: the dumb handle to remove 535 * 536 * This implements the &drm_driver.dumb_destroy kms driver callback for drivers 537 * which use gem to manage their backing storage. 538 */ 539int drm_gem_dumb_destroy(struct drm_file *file, 540 struct drm_device *dev, 541 uint32_t handle) 542{ 543 return drm_gem_handle_delete(file, handle); 544} 545EXPORT_SYMBOL(drm_gem_dumb_destroy); 546 547/** 548 * drm_gem_handle_create_tail - internal functions to create a handle 549 * @file_priv: drm file-private structure to register the handle for 550 * @obj: object to register 551 * @handlep: pointer to return the created handle to the caller 552 * 553 * This expects the &drm_device.object_name_lock to be held already and will 554 * drop it before returning. Used to avoid races in establishing new handles 555 * when importing an object from either an flink name or a dma-buf. 556 * 557 * Handles must be release again through drm_gem_handle_delete(). This is done 558 * when userspace closes @file_priv for all attached handles, or through the 559 * GEM_CLOSE ioctl for individual handles. 560 */ 561int 562drm_gem_handle_create_tail(struct drm_file *file_priv, 563 struct drm_gem_object *obj, 564 u32 *handlep) 565{ 566 struct drm_device *dev = obj->dev; 567 u32 handle; 568 int ret; 569 570 WARN_ON(!mutex_is_locked(&dev->object_name_lock)); 571 if (obj->handle_count++ == 0) 572 drm_gem_object_get(obj); 573 574 /* 575 * Get the user-visible handle using idr. Preload and perform 576 * allocation under our spinlock. 577 */ 578 idr_preload(GFP_KERNEL); 579 spin_lock(&file_priv->table_lock); 580 581 ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT); 582 583 spin_unlock(&file_priv->table_lock); 584 idr_preload_end(); 585 586 mutex_unlock(&dev->object_name_lock); 587 if (ret < 0) 588 goto err_unref; 589 590 handle = ret; 591 592 ret = drm_vma_node_allow(&obj->vma_node, file_priv); 593 if (ret) 594 goto err_remove; 595 596 if (obj->funcs && obj->funcs->open) { 597 ret = obj->funcs->open(obj, file_priv); 598 if (ret) 599 goto err_revoke; 600 } else if (dev->driver->gem_open_object) { 601 ret = dev->driver->gem_open_object(obj, file_priv); 602 if (ret) 603 goto err_revoke; 604 } 605 606 *handlep = handle; 607 return 0; 608 609err_revoke: 610 drm_vma_node_revoke(&obj->vma_node, file_priv); 611err_remove: 612 spin_lock(&file_priv->table_lock); 613 idr_remove(&file_priv->object_idr, handle); 614 spin_unlock(&file_priv->table_lock); 615err_unref: 616 drm_gem_object_handle_put_unlocked(obj); 617 return ret; 618} 619 620/** 621 * drm_gem_handle_create - create a gem handle for an object 622 * @file_priv: drm file-private structure to register the handle for 623 * @obj: object to register 624 * @handlep: pointer to return the created handle to the caller 625 * 626 * Create a handle for this object. This adds a handle reference to the object, 627 * which includes a regular reference count. Callers will likely want to 628 * dereference the object afterwards. 629 * 630 * Since this publishes @obj to userspace it must be fully set up by this point, 631 * drivers must call this last in their buffer object creation callbacks. 632 */ 633int drm_gem_handle_create(struct drm_file *file_priv, 634 struct drm_gem_object *obj, 635 u32 *handlep) 636{ 637 mutex_lock(&obj->dev->object_name_lock); 638 639 return drm_gem_handle_create_tail(file_priv, obj, handlep); 640} 641EXPORT_SYMBOL(drm_gem_handle_create); 642 643 644/** 645 * drm_gem_free_mmap_offset - release a fake mmap offset for an object 646 * @obj: obj in question 647 * 648 * This routine frees fake offsets allocated by drm_gem_create_mmap_offset(). 649 * 650 * Note that drm_gem_object_release() already calls this function, so drivers 651 * don't have to take care of releasing the mmap offset themselves when freeing 652 * the GEM object. 653 */ 654void 655drm_gem_free_mmap_offset(struct drm_gem_object *obj) 656{ 657 struct drm_device *dev = obj->dev; 658 659 drm_vma_offset_remove(dev->vma_offset_manager, &obj->vma_node); 660} 661EXPORT_SYMBOL(drm_gem_free_mmap_offset); 662 663/** 664 * drm_gem_create_mmap_offset_size - create a fake mmap offset for an object 665 * @obj: obj in question 666 * @size: the virtual size 667 * 668 * GEM memory mapping works by handing back to userspace a fake mmap offset 669 * it can use in a subsequent mmap(2) call. The DRM core code then looks 670 * up the object based on the offset and sets up the various memory mapping 671 * structures. 672 * 673 * This routine allocates and attaches a fake offset for @obj, in cases where 674 * the virtual size differs from the physical size (ie. &drm_gem_object.size). 675 * Otherwise just use drm_gem_create_mmap_offset(). 676 * 677 * This function is idempotent and handles an already allocated mmap offset 678 * transparently. Drivers do not need to check for this case. 679 */ 680int 681drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size) 682{ 683 struct drm_device *dev = obj->dev; 684 685 return drm_vma_offset_add(dev->vma_offset_manager, &obj->vma_node, 686 size / PAGE_SIZE); 687} 688EXPORT_SYMBOL(drm_gem_create_mmap_offset_size); 689 690/** 691 * drm_gem_create_mmap_offset - create a fake mmap offset for an object 692 * @obj: obj in question 693 * 694 * GEM memory mapping works by handing back to userspace a fake mmap offset 695 * it can use in a subsequent mmap(2) call. The DRM core code then looks 696 * up the object based on the offset and sets up the various memory mapping 697 * structures. 698 * 699 * This routine allocates and attaches a fake offset for @obj. 700 * 701 * Drivers can call drm_gem_free_mmap_offset() before freeing @obj to release 702 * the fake offset again. 703 */ 704int drm_gem_create_mmap_offset(struct drm_gem_object *obj) 705{ 706 return drm_gem_create_mmap_offset_size(obj, obj->size); 707} 708EXPORT_SYMBOL(drm_gem_create_mmap_offset); 709 710#ifdef notyet 711/* 712 * Move pages to appropriate lru and release the pagevec, decrementing the 713 * ref count of those pages. 714 */ 715static void drm_gem_check_release_pagevec(struct pagevec *pvec) 716{ 717 check_move_unevictable_pages(pvec); 718 __pagevec_release(pvec); 719 cond_resched(); 720} 721#endif 722 723/** 724 * drm_gem_get_pages - helper to allocate backing pages for a GEM object 725 * from shmem 726 * @obj: obj in question 727 * 728 * This reads the page-array of the shmem-backing storage of the given gem 729 * object. An array of pages is returned. If a page is not allocated or 730 * swapped-out, this will allocate/swap-in the required pages. Note that the 731 * whole object is covered by the page-array and pinned in memory. 732 * 733 * Use drm_gem_put_pages() to release the array and unpin all pages. 734 * 735 * This uses the GFP-mask set on the shmem-mapping (see mapping_set_gfp_mask()). 736 * If you require other GFP-masks, you have to do those allocations yourself. 737 * 738 * Note that you are not allowed to change gfp-zones during runtime. That is, 739 * shmem_read_mapping_page_gfp() must be called with the same gfp_zone(gfp) as 740 * set during initialization. If you have special zone constraints, set them 741 * after drm_gem_object_init() via mapping_set_gfp_mask(). shmem-core takes care 742 * to keep pages in the required zone during swap-in. 743 * 744 * This function is only valid on objects initialized with 745 * drm_gem_object_init(), but not for those initialized with 746 * drm_gem_private_object_init() only. 747 */ 748struct vm_page **drm_gem_get_pages(struct drm_gem_object *obj) 749{ 750 STUB(); 751 return ERR_PTR(-ENOSYS); 752#ifdef notyet 753 struct address_space *mapping; 754 struct vm_page *p, **pages; 755 struct pagevec pvec; 756 int i, npages; 757 758 759 if (WARN_ON(!obj->filp)) 760 return ERR_PTR(-EINVAL); 761 762 /* This is the shared memory object that backs the GEM resource */ 763 mapping = obj->filp->f_mapping; 764 765 /* We already BUG_ON() for non-page-aligned sizes in 766 * drm_gem_object_init(), so we should never hit this unless 767 * driver author is doing something really wrong: 768 */ 769 WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0); 770 771 npages = obj->size >> PAGE_SHIFT; 772 773 pages = kvmalloc_array(npages, sizeof(struct vm_page *), GFP_KERNEL); 774 if (pages == NULL) 775 return ERR_PTR(-ENOMEM); 776 777 mapping_set_unevictable(mapping); 778 779 for (i = 0; i < npages; i++) { 780 p = shmem_read_mapping_page(mapping, i); 781 if (IS_ERR(p)) 782 goto fail; 783 pages[i] = p; 784 785 /* Make sure shmem keeps __GFP_DMA32 allocated pages in the 786 * correct region during swapin. Note that this requires 787 * __GFP_DMA32 to be set in mapping_gfp_mask(inode->i_mapping) 788 * so shmem can relocate pages during swapin if required. 789 */ 790 BUG_ON(mapping_gfp_constraint(mapping, __GFP_DMA32) && 791 (page_to_pfn(p) >= 0x00100000UL)); 792 } 793 794 return pages; 795 796fail: 797 mapping_clear_unevictable(mapping); 798 pagevec_init(&pvec); 799 while (i--) { 800 if (!pagevec_add(&pvec, pages[i])) 801 drm_gem_check_release_pagevec(&pvec); 802 } 803 if (pagevec_count(&pvec)) 804 drm_gem_check_release_pagevec(&pvec); 805 806 kvfree(pages); 807 return ERR_CAST(p); 808#endif 809} 810EXPORT_SYMBOL(drm_gem_get_pages); 811 812/** 813 * drm_gem_put_pages - helper to free backing pages for a GEM object 814 * @obj: obj in question 815 * @pages: pages to free 816 * @dirty: if true, pages will be marked as dirty 817 * @accessed: if true, the pages will be marked as accessed 818 */ 819void drm_gem_put_pages(struct drm_gem_object *obj, struct vm_page **pages, 820 bool dirty, bool accessed) 821{ 822 STUB(); 823#ifdef notyet 824 int i, npages; 825 struct address_space *mapping; 826 struct pagevec pvec; 827 828 mapping = file_inode(obj->filp)->i_mapping; 829 mapping_clear_unevictable(mapping); 830 831 /* We already BUG_ON() for non-page-aligned sizes in 832 * drm_gem_object_init(), so we should never hit this unless 833 * driver author is doing something really wrong: 834 */ 835 WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0); 836 837 npages = obj->size >> PAGE_SHIFT; 838 839 pagevec_init(&pvec); 840 for (i = 0; i < npages; i++) { 841 if (!pages[i]) 842 continue; 843 844 if (dirty) 845 set_page_dirty(pages[i]); 846 847 if (accessed) 848 mark_page_accessed(pages[i]); 849 850 /* Undo the reference we took when populating the table */ 851 if (!pagevec_add(&pvec, pages[i])) 852 drm_gem_check_release_pagevec(&pvec); 853 } 854 if (pagevec_count(&pvec)) 855 drm_gem_check_release_pagevec(&pvec); 856 857 kvfree(pages); 858#endif 859} 860EXPORT_SYMBOL(drm_gem_put_pages); 861 862static int objects_lookup(struct drm_file *filp, u32 *handle, int count, 863 struct drm_gem_object **objs) 864{ 865 int i, ret = 0; 866 struct drm_gem_object *obj; 867 868 spin_lock(&filp->table_lock); 869 870 for (i = 0; i < count; i++) { 871 /* Check if we currently have a reference on the object */ 872 obj = idr_find(&filp->object_idr, handle[i]); 873 if (!obj) { 874 ret = -ENOENT; 875 break; 876 } 877 drm_gem_object_get(obj); 878 objs[i] = obj; 879 } 880 spin_unlock(&filp->table_lock); 881 882 return ret; 883} 884 885/** 886 * drm_gem_objects_lookup - look up GEM objects from an array of handles 887 * @filp: DRM file private date 888 * @bo_handles: user pointer to array of userspace handle 889 * @count: size of handle array 890 * @objs_out: returned pointer to array of drm_gem_object pointers 891 * 892 * Takes an array of userspace handles and returns a newly allocated array of 893 * GEM objects. 894 * 895 * For a single handle lookup, use drm_gem_object_lookup(). 896 * 897 * Returns: 898 * 899 * @objs filled in with GEM object pointers. Returned GEM objects need to be 900 * released with drm_gem_object_put(). -ENOENT is returned on a lookup 901 * failure. 0 is returned on success. 902 * 903 */ 904int drm_gem_objects_lookup(struct drm_file *filp, void __user *bo_handles, 905 int count, struct drm_gem_object ***objs_out) 906{ 907 int ret; 908 u32 *handles; 909 struct drm_gem_object **objs; 910 911 if (!count) 912 return 0; 913 914 objs = kvmalloc_array(count, sizeof(struct drm_gem_object *), 915 GFP_KERNEL | __GFP_ZERO); 916 if (!objs) 917 return -ENOMEM; 918 919 *objs_out = objs; 920 921 handles = kvmalloc_array(count, sizeof(u32), GFP_KERNEL); 922 if (!handles) { 923 ret = -ENOMEM; 924 goto out; 925 } 926 927 if (copy_from_user(handles, bo_handles, count * sizeof(u32))) { 928 ret = -EFAULT; 929 DRM_DEBUG("Failed to copy in GEM handles\n"); 930 goto out; 931 } 932 933 ret = objects_lookup(filp, handles, count, objs); 934out: 935 kvfree(handles); 936 return ret; 937 938} 939EXPORT_SYMBOL(drm_gem_objects_lookup); 940 941/** 942 * drm_gem_object_lookup - look up a GEM object from its handle 943 * @filp: DRM file private date 944 * @handle: userspace handle 945 * 946 * Returns: 947 * 948 * A reference to the object named by the handle if such exists on @filp, NULL 949 * otherwise. 950 * 951 * If looking up an array of handles, use drm_gem_objects_lookup(). 952 */ 953struct drm_gem_object * 954drm_gem_object_lookup(struct drm_file *filp, u32 handle) 955{ 956 struct drm_gem_object *obj = NULL; 957 958 objects_lookup(filp, &handle, 1, &obj); 959 return obj; 960} 961EXPORT_SYMBOL(drm_gem_object_lookup); 962 963/** 964 * drm_gem_dma_resv_wait - Wait on GEM object's reservation's objects 965 * shared and/or exclusive fences. 966 * @filep: DRM file private date 967 * @handle: userspace handle 968 * @wait_all: if true, wait on all fences, else wait on just exclusive fence 969 * @timeout: timeout value in jiffies or zero to return immediately 970 * 971 * Returns: 972 * 973 * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or 974 * greater than 0 on success. 975 */ 976long drm_gem_dma_resv_wait(struct drm_file *filep, u32 handle, 977 bool wait_all, unsigned long timeout) 978{ 979 long ret; 980 struct drm_gem_object *obj; 981 982 obj = drm_gem_object_lookup(filep, handle); 983 if (!obj) { 984 DRM_DEBUG("Failed to look up GEM BO %d\n", handle); 985 return -EINVAL; 986 } 987 988 ret = dma_resv_wait_timeout_rcu(obj->resv, wait_all, 989 true, timeout); 990 if (ret == 0) 991 ret = -ETIME; 992 else if (ret > 0) 993 ret = 0; 994 995 drm_gem_object_put(obj); 996 997 return ret; 998} 999EXPORT_SYMBOL(drm_gem_dma_resv_wait); 1000 1001/** 1002 * drm_gem_close_ioctl - implementation of the GEM_CLOSE ioctl 1003 * @dev: drm_device 1004 * @data: ioctl data 1005 * @file_priv: drm file-private structure 1006 * 1007 * Releases the handle to an mm object. 1008 */ 1009int 1010drm_gem_close_ioctl(struct drm_device *dev, void *data, 1011 struct drm_file *file_priv) 1012{ 1013 struct drm_gem_close *args = data; 1014 int ret; 1015 1016 if (!drm_core_check_feature(dev, DRIVER_GEM)) 1017 return -EOPNOTSUPP; 1018 1019 ret = drm_gem_handle_delete(file_priv, args->handle); 1020 1021 return ret; 1022} 1023 1024/** 1025 * drm_gem_flink_ioctl - implementation of the GEM_FLINK ioctl 1026 * @dev: drm_device 1027 * @data: ioctl data 1028 * @file_priv: drm file-private structure 1029 * 1030 * Create a global name for an object, returning the name. 1031 * 1032 * Note that the name does not hold a reference; when the object 1033 * is freed, the name goes away. 1034 */ 1035int 1036drm_gem_flink_ioctl(struct drm_device *dev, void *data, 1037 struct drm_file *file_priv) 1038{ 1039 struct drm_gem_flink *args = data; 1040 struct drm_gem_object *obj; 1041 int ret; 1042 1043 if (!drm_core_check_feature(dev, DRIVER_GEM)) 1044 return -EOPNOTSUPP; 1045 1046 obj = drm_gem_object_lookup(file_priv, args->handle); 1047 if (obj == NULL) 1048 return -ENOENT; 1049 1050 mutex_lock(&dev->object_name_lock); 1051 /* prevent races with concurrent gem_close. */ 1052 if (obj->handle_count == 0) { 1053 ret = -ENOENT; 1054 goto err; 1055 } 1056 1057 if (!obj->name) { 1058 ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_KERNEL); 1059 if (ret < 0) 1060 goto err; 1061 1062 obj->name = ret; 1063 } 1064 1065 args->name = (uint64_t) obj->name; 1066 ret = 0; 1067 1068err: 1069 mutex_unlock(&dev->object_name_lock); 1070 drm_gem_object_put(obj); 1071 return ret; 1072} 1073 1074/** 1075 * drm_gem_open - implementation of the GEM_OPEN ioctl 1076 * @dev: drm_device 1077 * @data: ioctl data 1078 * @file_priv: drm file-private structure 1079 * 1080 * Open an object using the global name, returning a handle and the size. 1081 * 1082 * This handle (of course) holds a reference to the object, so the object 1083 * will not go away until the handle is deleted. 1084 */ 1085int 1086drm_gem_open_ioctl(struct drm_device *dev, void *data, 1087 struct drm_file *file_priv) 1088{ 1089 struct drm_gem_open *args = data; 1090 struct drm_gem_object *obj; 1091 int ret; 1092 u32 handle; 1093 1094 if (!drm_core_check_feature(dev, DRIVER_GEM)) 1095 return -EOPNOTSUPP; 1096 1097 mutex_lock(&dev->object_name_lock); 1098 obj = idr_find(&dev->object_name_idr, (int) args->name); 1099 if (obj) { 1100 drm_gem_object_get(obj); 1101 } else { 1102 mutex_unlock(&dev->object_name_lock); 1103 return -ENOENT; 1104 } 1105 1106 /* drm_gem_handle_create_tail unlocks dev->object_name_lock. */ 1107 ret = drm_gem_handle_create_tail(file_priv, obj, &handle); 1108 if (ret) 1109 goto err; 1110 1111 args->handle = handle; 1112 args->size = obj->size; 1113 1114err: 1115 drm_gem_object_put(obj); 1116 return ret; 1117} 1118 1119/** 1120 * gem_gem_open - initalizes GEM file-private structures at devnode open time 1121 * @dev: drm_device which is being opened by userspace 1122 * @file_private: drm file-private structure to set up 1123 * 1124 * Called at device open time, sets up the structure for handling refcounting 1125 * of mm objects. 1126 */ 1127void 1128drm_gem_open(struct drm_device *dev, struct drm_file *file_private) 1129{ 1130 idr_init_base(&file_private->object_idr, 1); 1131 mtx_init(&file_private->table_lock, IPL_NONE); 1132} 1133 1134/** 1135 * drm_gem_release - release file-private GEM resources 1136 * @dev: drm_device which is being closed by userspace 1137 * @file_private: drm file-private structure to clean up 1138 * 1139 * Called at close time when the filp is going away. 1140 * 1141 * Releases any remaining references on objects by this filp. 1142 */ 1143void 1144drm_gem_release(struct drm_device *dev, struct drm_file *file_private) 1145{ 1146 idr_for_each(&file_private->object_idr, 1147 &drm_gem_object_release_handle, file_private); 1148 idr_destroy(&file_private->object_idr); 1149} 1150 1151/** 1152 * drm_gem_object_release - release GEM buffer object resources 1153 * @obj: GEM buffer object 1154 * 1155 * This releases any structures and resources used by @obj and is the invers of 1156 * drm_gem_object_init(). 1157 */ 1158void 1159drm_gem_object_release(struct drm_gem_object *obj) 1160{ 1161 WARN_ON(obj->dma_buf); 1162 1163#ifdef __linux__ 1164 if (obj->filp) 1165 fput(obj->filp); 1166#else 1167 if (obj->uao) 1168 uao_detach(obj->uao); 1169#endif 1170 1171 dma_resv_fini(&obj->_resv); 1172 drm_gem_free_mmap_offset(obj); 1173} 1174EXPORT_SYMBOL(drm_gem_object_release); 1175 1176/** 1177 * drm_gem_object_free - free a GEM object 1178 * @kref: kref of the object to free 1179 * 1180 * Called after the last reference to the object has been lost. 1181 * 1182 * Frees the object 1183 */ 1184void 1185drm_gem_object_free(struct kref *kref) 1186{ 1187 struct drm_gem_object *obj = 1188 container_of(kref, struct drm_gem_object, refcount); 1189 struct drm_device *dev = obj->dev; 1190 1191 if (obj->funcs) 1192 obj->funcs->free(obj); 1193 else if (dev->driver->gem_free_object_unlocked) 1194 dev->driver->gem_free_object_unlocked(obj); 1195} 1196EXPORT_SYMBOL(drm_gem_object_free); 1197 1198/** 1199 * drm_gem_object_put_locked - release a GEM buffer object reference 1200 * @obj: GEM buffer object 1201 * 1202 * This releases a reference to @obj. Callers must hold the 1203 * &drm_device.struct_mutex lock when calling this function, even when the 1204 * driver doesn't use &drm_device.struct_mutex for anything. 1205 * 1206 * For drivers not encumbered with legacy locking use 1207 * drm_gem_object_put() instead. 1208 */ 1209void 1210drm_gem_object_put_locked(struct drm_gem_object *obj) 1211{ 1212 if (obj) { 1213 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); 1214 1215 kref_put(&obj->refcount, drm_gem_object_free); 1216 } 1217} 1218EXPORT_SYMBOL(drm_gem_object_put_locked); 1219 1220#ifdef __linux__ 1221 1222/** 1223 * drm_gem_vm_open - vma->ops->open implementation for GEM 1224 * @vma: VM area structure 1225 * 1226 * This function implements the #vm_operations_struct open() callback for GEM 1227 * drivers. This must be used together with drm_gem_vm_close(). 1228 */ 1229void drm_gem_vm_open(struct vm_area_struct *vma) 1230{ 1231 struct drm_gem_object *obj = vma->vm_private_data; 1232 1233 drm_gem_object_get(obj); 1234} 1235EXPORT_SYMBOL(drm_gem_vm_open); 1236 1237/** 1238 * drm_gem_vm_close - vma->ops->close implementation for GEM 1239 * @vma: VM area structure 1240 * 1241 * This function implements the #vm_operations_struct close() callback for GEM 1242 * drivers. This must be used together with drm_gem_vm_open(). 1243 */ 1244void drm_gem_vm_close(struct vm_area_struct *vma) 1245{ 1246 struct drm_gem_object *obj = vma->vm_private_data; 1247 1248 drm_gem_object_put(obj); 1249} 1250EXPORT_SYMBOL(drm_gem_vm_close); 1251 1252/** 1253 * drm_gem_mmap_obj - memory map a GEM object 1254 * @obj: the GEM object to map 1255 * @obj_size: the object size to be mapped, in bytes 1256 * @vma: VMA for the area to be mapped 1257 * 1258 * Set up the VMA to prepare mapping of the GEM object using the gem_vm_ops 1259 * provided by the driver. Depending on their requirements, drivers can either 1260 * provide a fault handler in their gem_vm_ops (in which case any accesses to 1261 * the object will be trapped, to perform migration, GTT binding, surface 1262 * register allocation, or performance monitoring), or mmap the buffer memory 1263 * synchronously after calling drm_gem_mmap_obj. 1264 * 1265 * This function is mainly intended to implement the DMABUF mmap operation, when 1266 * the GEM object is not looked up based on its fake offset. To implement the 1267 * DRM mmap operation, drivers should use the drm_gem_mmap() function. 1268 * 1269 * drm_gem_mmap_obj() assumes the user is granted access to the buffer while 1270 * drm_gem_mmap() prevents unprivileged users from mapping random objects. So 1271 * callers must verify access restrictions before calling this helper. 1272 * 1273 * Return 0 or success or -EINVAL if the object size is smaller than the VMA 1274 * size, or if no gem_vm_ops are provided. 1275 */ 1276int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size, 1277 struct vm_area_struct *vma) 1278{ 1279 struct drm_device *dev = obj->dev; 1280 int ret; 1281 1282 /* Check for valid size. */ 1283 if (obj_size < vma->vm_end - vma->vm_start) 1284 return -EINVAL; 1285 1286 /* Take a ref for this mapping of the object, so that the fault 1287 * handler can dereference the mmap offset's pointer to the object. 1288 * This reference is cleaned up by the corresponding vm_close 1289 * (which should happen whether the vma was created by this call, or 1290 * by a vm_open due to mremap or partial unmap or whatever). 1291 */ 1292 drm_gem_object_get(obj); 1293 1294 vma->vm_private_data = obj; 1295 1296 if (obj->funcs && obj->funcs->mmap) { 1297 ret = obj->funcs->mmap(obj, vma); 1298 if (ret) { 1299 drm_gem_object_put(obj); 1300 return ret; 1301 } 1302 WARN_ON(!(vma->vm_flags & VM_DONTEXPAND)); 1303 } else { 1304 if (obj->funcs && obj->funcs->vm_ops) 1305 vma->vm_ops = obj->funcs->vm_ops; 1306 else if (dev->driver->gem_vm_ops) 1307 vma->vm_ops = dev->driver->gem_vm_ops; 1308 else { 1309 drm_gem_object_put(obj); 1310 return -EINVAL; 1311 } 1312 1313 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP; 1314 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); 1315 vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot); 1316 } 1317 1318 return 0; 1319} 1320EXPORT_SYMBOL(drm_gem_mmap_obj); 1321 1322/** 1323 * drm_gem_mmap - memory map routine for GEM objects 1324 * @filp: DRM file pointer 1325 * @vma: VMA for the area to be mapped 1326 * 1327 * If a driver supports GEM object mapping, mmap calls on the DRM file 1328 * descriptor will end up here. 1329 * 1330 * Look up the GEM object based on the offset passed in (vma->vm_pgoff will 1331 * contain the fake offset we created when the GTT map ioctl was called on 1332 * the object) and map it with a call to drm_gem_mmap_obj(). 1333 * 1334 * If the caller is not granted access to the buffer object, the mmap will fail 1335 * with EACCES. Please see the vma manager for more information. 1336 */ 1337int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma) 1338{ 1339 struct drm_file *priv = filp->private_data; 1340 struct drm_device *dev = priv->minor->dev; 1341 struct drm_gem_object *obj = NULL; 1342 struct drm_vma_offset_node *node; 1343 int ret; 1344 1345 if (drm_dev_is_unplugged(dev)) 1346 return -ENODEV; 1347 1348 drm_vma_offset_lock_lookup(dev->vma_offset_manager); 1349 node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager, 1350 vma->vm_pgoff, 1351 vma_pages(vma)); 1352 if (likely(node)) { 1353 obj = container_of(node, struct drm_gem_object, vma_node); 1354 /* 1355 * When the object is being freed, after it hits 0-refcnt it 1356 * proceeds to tear down the object. In the process it will 1357 * attempt to remove the VMA offset and so acquire this 1358 * mgr->vm_lock. Therefore if we find an object with a 0-refcnt 1359 * that matches our range, we know it is in the process of being 1360 * destroyed and will be freed as soon as we release the lock - 1361 * so we have to check for the 0-refcnted object and treat it as 1362 * invalid. 1363 */ 1364 if (!kref_get_unless_zero(&obj->refcount)) 1365 obj = NULL; 1366 } 1367 drm_vma_offset_unlock_lookup(dev->vma_offset_manager); 1368 1369 if (!obj) 1370 return -EINVAL; 1371 1372 if (!drm_vma_node_is_allowed(node, priv)) { 1373 drm_gem_object_put(obj); 1374 return -EACCES; 1375 } 1376 1377 if (node->readonly) { 1378 if (vma->vm_flags & VM_WRITE) { 1379 drm_gem_object_put(obj); 1380 return -EINVAL; 1381 } 1382 1383 vma->vm_flags &= ~VM_MAYWRITE; 1384 } 1385 1386 ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT, 1387 vma); 1388 1389 drm_gem_object_put(obj); 1390 1391 return ret; 1392} 1393EXPORT_SYMBOL(drm_gem_mmap); 1394 1395#endif /* __linux__ */ 1396 1397void drm_gem_print_info(struct drm_printer *p, unsigned int indent, 1398 const struct drm_gem_object *obj) 1399{ 1400 drm_printf_indent(p, indent, "name=%d\n", obj->name); 1401 drm_printf_indent(p, indent, "refcount=%u\n", 1402 kref_read(&obj->refcount)); 1403 drm_printf_indent(p, indent, "start=%08lx\n", 1404 drm_vma_node_start(&obj->vma_node)); 1405 drm_printf_indent(p, indent, "size=%zu\n", obj->size); 1406 drm_printf_indent(p, indent, "imported=%s\n", 1407 obj->import_attach ? "yes" : "no"); 1408 1409 if (obj->funcs && obj->funcs->print_info) 1410 obj->funcs->print_info(p, indent, obj); 1411} 1412 1413int drm_gem_pin(struct drm_gem_object *obj) 1414{ 1415 if (obj->funcs && obj->funcs->pin) 1416 return obj->funcs->pin(obj); 1417 else if (obj->dev->driver->gem_prime_pin) 1418 return obj->dev->driver->gem_prime_pin(obj); 1419 else 1420 return 0; 1421} 1422 1423void drm_gem_unpin(struct drm_gem_object *obj) 1424{ 1425 if (obj->funcs && obj->funcs->unpin) 1426 obj->funcs->unpin(obj); 1427 else if (obj->dev->driver->gem_prime_unpin) 1428 obj->dev->driver->gem_prime_unpin(obj); 1429} 1430 1431void *drm_gem_vmap(struct drm_gem_object *obj) 1432{ 1433 void *vaddr; 1434 1435 if (obj->funcs && obj->funcs->vmap) 1436 vaddr = obj->funcs->vmap(obj); 1437 else if (obj->dev->driver->gem_prime_vmap) 1438 vaddr = obj->dev->driver->gem_prime_vmap(obj); 1439 else 1440 vaddr = ERR_PTR(-EOPNOTSUPP); 1441 1442 if (!vaddr) 1443 vaddr = ERR_PTR(-ENOMEM); 1444 1445 return vaddr; 1446} 1447 1448void drm_gem_vunmap(struct drm_gem_object *obj, void *vaddr) 1449{ 1450 if (!vaddr) 1451 return; 1452 1453 if (obj->funcs && obj->funcs->vunmap) 1454 obj->funcs->vunmap(obj, vaddr); 1455 else if (obj->dev->driver->gem_prime_vunmap) 1456 obj->dev->driver->gem_prime_vunmap(obj, vaddr); 1457} 1458 1459/** 1460 * drm_gem_lock_reservations - Sets up the ww context and acquires 1461 * the lock on an array of GEM objects. 1462 * 1463 * Once you've locked your reservations, you'll want to set up space 1464 * for your shared fences (if applicable), submit your job, then 1465 * drm_gem_unlock_reservations(). 1466 * 1467 * @objs: drm_gem_objects to lock 1468 * @count: Number of objects in @objs 1469 * @acquire_ctx: struct ww_acquire_ctx that will be initialized as 1470 * part of tracking this set of locked reservations. 1471 */ 1472int 1473drm_gem_lock_reservations(struct drm_gem_object **objs, int count, 1474 struct ww_acquire_ctx *acquire_ctx) 1475{ 1476 int contended = -1; 1477 int i, ret; 1478 1479 ww_acquire_init(acquire_ctx, &reservation_ww_class); 1480 1481retry: 1482 if (contended != -1) { 1483 struct drm_gem_object *obj = objs[contended]; 1484 1485 ret = dma_resv_lock_slow_interruptible(obj->resv, 1486 acquire_ctx); 1487 if (ret) { 1488 ww_acquire_done(acquire_ctx); 1489 return ret; 1490 } 1491 } 1492 1493 for (i = 0; i < count; i++) { 1494 if (i == contended) 1495 continue; 1496 1497 ret = dma_resv_lock_interruptible(objs[i]->resv, 1498 acquire_ctx); 1499 if (ret) { 1500 int j; 1501 1502 for (j = 0; j < i; j++) 1503 dma_resv_unlock(objs[j]->resv); 1504 1505 if (contended != -1 && contended >= i) 1506 dma_resv_unlock(objs[contended]->resv); 1507 1508 if (ret == -EDEADLK) { 1509 contended = i; 1510 goto retry; 1511 } 1512 1513 ww_acquire_done(acquire_ctx); 1514 return ret; 1515 } 1516 } 1517 1518 ww_acquire_done(acquire_ctx); 1519 1520 return 0; 1521} 1522EXPORT_SYMBOL(drm_gem_lock_reservations); 1523 1524void 1525drm_gem_unlock_reservations(struct drm_gem_object **objs, int count, 1526 struct ww_acquire_ctx *acquire_ctx) 1527{ 1528 int i; 1529 1530 for (i = 0; i < count; i++) 1531 dma_resv_unlock(objs[i]->resv); 1532 1533 ww_acquire_fini(acquire_ctx); 1534} 1535EXPORT_SYMBOL(drm_gem_unlock_reservations); 1536 1537#ifdef notyet 1538/** 1539 * drm_gem_fence_array_add - Adds the fence to an array of fences to be 1540 * waited on, deduplicating fences from the same context. 1541 * 1542 * @fence_array: array of dma_fence * for the job to block on. 1543 * @fence: the dma_fence to add to the list of dependencies. 1544 * 1545 * Returns: 1546 * 0 on success, or an error on failing to expand the array. 1547 */ 1548int drm_gem_fence_array_add(struct xarray *fence_array, 1549 struct dma_fence *fence) 1550{ 1551 struct dma_fence *entry; 1552 unsigned long index; 1553 u32 id = 0; 1554 int ret; 1555 1556 if (!fence) 1557 return 0; 1558 1559 /* Deduplicate if we already depend on a fence from the same context. 1560 * This lets the size of the array of deps scale with the number of 1561 * engines involved, rather than the number of BOs. 1562 */ 1563 xa_for_each(fence_array, index, entry) { 1564 if (entry->context != fence->context) 1565 continue; 1566 1567 if (dma_fence_is_later(fence, entry)) { 1568 dma_fence_put(entry); 1569 xa_store(fence_array, index, fence, GFP_KERNEL); 1570 } else { 1571 dma_fence_put(fence); 1572 } 1573 return 0; 1574 } 1575 1576 ret = xa_alloc(fence_array, &id, fence, xa_limit_32b, GFP_KERNEL); 1577 if (ret != 0) 1578 dma_fence_put(fence); 1579 1580 return ret; 1581} 1582EXPORT_SYMBOL(drm_gem_fence_array_add); 1583 1584/** 1585 * drm_gem_fence_array_add_implicit - Adds the implicit dependencies tracked 1586 * in the GEM object's reservation object to an array of dma_fences for use in 1587 * scheduling a rendering job. 1588 * 1589 * This should be called after drm_gem_lock_reservations() on your array of 1590 * GEM objects used in the job but before updating the reservations with your 1591 * own fences. 1592 * 1593 * @fence_array: array of dma_fence * for the job to block on. 1594 * @obj: the gem object to add new dependencies from. 1595 * @write: whether the job might write the object (so we need to depend on 1596 * shared fences in the reservation object). 1597 */ 1598int drm_gem_fence_array_add_implicit(struct xarray *fence_array, 1599 struct drm_gem_object *obj, 1600 bool write) 1601{ 1602 int ret; 1603 struct dma_fence **fences; 1604 unsigned int i, fence_count; 1605 1606 if (!write) { 1607 struct dma_fence *fence = 1608 dma_resv_get_excl_rcu(obj->resv); 1609 1610 return drm_gem_fence_array_add(fence_array, fence); 1611 } 1612 1613 ret = dma_resv_get_fences_rcu(obj->resv, NULL, 1614 &fence_count, &fences); 1615 if (ret || !fence_count) 1616 return ret; 1617 1618 for (i = 0; i < fence_count; i++) { 1619 ret = drm_gem_fence_array_add(fence_array, fences[i]); 1620 if (ret) 1621 break; 1622 } 1623 1624 for (; i < fence_count; i++) 1625 dma_fence_put(fences[i]); 1626 kfree(fences); 1627 return ret; 1628} 1629EXPORT_SYMBOL(drm_gem_fence_array_add_implicit); 1630 1631#endif /* notyet */ 1632