1/** 2 * \file drm_vm.c 3 * Memory mapping for DRM 4 * 5 * \author Rickard E. (Rik) Faith <faith@valinux.com> 6 * \author Gareth Hughes <gareth@valinux.com> 7 */ 8 9/* 10 * Created: Mon Jan 4 08:58:31 1999 by faith@valinux.com 11 * 12 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. 13 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. 14 * All Rights Reserved. 15 * 16 * Permission is hereby granted, free of charge, to any person obtaining a 17 * copy of this software and associated documentation files (the "Software"), 18 * to deal in the Software without restriction, including without limitation 19 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 20 * and/or sell copies of the Software, and to permit persons to whom the 21 * Software is furnished to do so, subject to the following conditions: 22 * 23 * The above copyright notice and this permission notice (including the next 24 * paragraph) shall be included in all copies or substantial portions of the 25 * Software. 26 * 27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 28 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 29 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 30 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR 31 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 32 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 33 * OTHER DEALINGS IN THE SOFTWARE. 34 */ 35 36#include "drmP.h" 37#if defined(__ia64__) 38#include <linux/efi.h> 39#endif 40 41static void drm_vm_open(struct vm_area_struct *vma); 42static void drm_vm_close(struct vm_area_struct *vma); 43 44static pgprot_t drm_io_prot(uint32_t map_type, struct vm_area_struct *vma) 45{ 46 pgprot_t tmp = vm_get_page_prot(vma->vm_flags); 47 48#if defined(__i386__) || defined(__x86_64__) 49 if (boot_cpu_data.x86 > 3 && map_type != _DRM_AGP) { 50 pgprot_val(tmp) |= _PAGE_PCD; 51 pgprot_val(tmp) &= ~_PAGE_PWT; 52 } 53#elif defined(__powerpc__) 54 pgprot_val(tmp) |= _PAGE_NO_CACHE; 55 if (map_type == _DRM_REGISTERS) 56 pgprot_val(tmp) |= _PAGE_GUARDED; 57#endif 58#if defined(__ia64__) 59 if (efi_range_is_wc(vma->vm_start, vma->vm_end - 60 vma->vm_start)) 61 tmp = pgprot_writecombine(tmp); 62 else 63 tmp = pgprot_noncached(tmp); 64#endif 65 return tmp; 66} 67 68/** 69 * \c nopage method for AGP virtual memory. 70 * 71 * \param vma virtual memory area. 72 * \param address access address. 73 * \return pointer to the page structure. 74 * 75 * Find the right map and if it's AGP memory find the real physical page to 76 * map, get the page, increment the use count and return it. 77 */ 78#if __OS_HAS_AGP 79static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma, 80 unsigned long address) 81{ 82 drm_file_t *priv = vma->vm_file->private_data; 83 drm_device_t *dev = priv->head->dev; 84 drm_map_t *map = NULL; 85 drm_map_list_t *r_list; 86 drm_hash_item_t *hash; 87 88 /* 89 * Find the right map 90 */ 91 if (!drm_core_has_AGP(dev)) 92 goto vm_nopage_error; 93 94 if (!dev->agp || !dev->agp->cant_use_aperture) 95 goto vm_nopage_error; 96 97 if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash)) 98 goto vm_nopage_error; 99 100 r_list = drm_hash_entry(hash, drm_map_list_t, hash); 101 map = r_list->map; 102 103 if (map && map->type == _DRM_AGP) { 104 unsigned long offset = address - vma->vm_start; 105 unsigned long baddr = map->offset + offset; 106 struct drm_agp_mem *agpmem; 107 struct page *page; 108 109#ifdef __alpha__ 110 /* 111 * Adjust to a bus-relative address 112 */ 113 baddr -= dev->hose->mem_space->start; 114#endif 115 116 /* 117 * It's AGP memory - find the real physical page to map 118 */ 119 for (agpmem = dev->agp->memory; agpmem; agpmem = agpmem->next) { 120 if (agpmem->bound <= baddr && 121 agpmem->bound + agpmem->pages * PAGE_SIZE > baddr) 122 break; 123 } 124 125 if (!agpmem) 126 goto vm_nopage_error; 127 128 /* 129 * Get the page, inc the use count, and return it 130 */ 131 offset = (baddr - agpmem->bound) >> PAGE_SHIFT; 132 page = virt_to_page(__va(agpmem->memory->memory[offset])); 133 get_page(page); 134 135 DRM_DEBUG 136 ("baddr = 0x%lx page = 0x%p, offset = 0x%lx, count=%d\n", 137 baddr, __va(agpmem->memory->memory[offset]), offset, 138 page_count(page)); 139 140 return page; 141 } 142 vm_nopage_error: 143 return NOPAGE_SIGBUS; /* Disallow mremap */ 144} 145#else /* __OS_HAS_AGP */ 146static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma, 147 unsigned long address) 148{ 149 return NOPAGE_SIGBUS; 150} 151#endif /* __OS_HAS_AGP */ 152 153/** 154 * \c nopage method for shared virtual memory. 155 * 156 * \param vma virtual memory area. 157 * \param address access address. 158 * \return pointer to the page structure. 159 * 160 * Get the mapping, find the real physical page to map, get the page, and 161 * return it. 162 */ 163static __inline__ struct page *drm_do_vm_shm_nopage(struct vm_area_struct *vma, 164 unsigned long address) 165{ 166 drm_map_t *map = (drm_map_t *) vma->vm_private_data; 167 unsigned long offset; 168 unsigned long i; 169 struct page *page; 170 171 if (address > vma->vm_end) 172 return NOPAGE_SIGBUS; /* Disallow mremap */ 173 if (!map) 174 return NOPAGE_SIGBUS; /* Nothing allocated */ 175 176 offset = address - vma->vm_start; 177 i = (unsigned long)map->handle + offset; 178 page = vmalloc_to_page((void *)i); 179 if (!page) 180 return NOPAGE_SIGBUS; 181 get_page(page); 182 183 DRM_DEBUG("shm_nopage 0x%lx\n", address); 184 return page; 185} 186 187/** 188 * \c close method for shared virtual memory. 189 * 190 * \param vma virtual memory area. 191 * 192 * Deletes map information if we are the last 193 * person to close a mapping and it's not in the global maplist. 194 */ 195static void drm_vm_shm_close(struct vm_area_struct *vma) 196{ 197 drm_file_t *priv = vma->vm_file->private_data; 198 drm_device_t *dev = priv->head->dev; 199 drm_vma_entry_t *pt, *prev, *next; 200 drm_map_t *map; 201 drm_map_list_t *r_list; 202 struct list_head *list; 203 int found_maps = 0; 204 205 DRM_DEBUG("0x%08lx,0x%08lx\n", 206 vma->vm_start, vma->vm_end - vma->vm_start); 207 atomic_dec(&dev->vma_count); 208 209 map = vma->vm_private_data; 210 211 mutex_lock(&dev->struct_mutex); 212 for (pt = dev->vmalist, prev = NULL; pt; pt = next) { 213 next = pt->next; 214 if (pt->vma->vm_private_data == map) 215 found_maps++; 216 if (pt->vma == vma) { 217 if (prev) { 218 prev->next = pt->next; 219 } else { 220 dev->vmalist = pt->next; 221 } 222 drm_free(pt, sizeof(*pt), DRM_MEM_VMAS); 223 } else { 224 prev = pt; 225 } 226 } 227 /* We were the only map that was found */ 228 if (found_maps == 1 && map->flags & _DRM_REMOVABLE) { 229 /* Check to see if we are in the maplist, if we are not, then 230 * we delete this mappings information. 231 */ 232 found_maps = 0; 233 list = &dev->maplist->head; 234 list_for_each(list, &dev->maplist->head) { 235 r_list = list_entry(list, drm_map_list_t, head); 236 if (r_list->map == map) 237 found_maps++; 238 } 239 240 if (!found_maps) { 241 drm_dma_handle_t dmah; 242 243 switch (map->type) { 244 case _DRM_REGISTERS: 245 case _DRM_FRAME_BUFFER: 246 if (drm_core_has_MTRR(dev) && map->mtrr >= 0) { 247 int retcode; 248 retcode = mtrr_del(map->mtrr, 249 map->offset, 250 map->size); 251 DRM_DEBUG("mtrr_del = %d\n", retcode); 252 } 253 iounmap(map->handle); 254 break; 255 case _DRM_SHM: 256 vfree(map->handle); 257 break; 258 case _DRM_AGP: 259 case _DRM_SCATTER_GATHER: 260 break; 261 case _DRM_CONSISTENT: 262 dmah.vaddr = map->handle; 263 dmah.busaddr = map->offset; 264 dmah.size = map->size; 265 __drm_pci_free(dev, &dmah); 266 break; 267 } 268 drm_free(map, sizeof(*map), DRM_MEM_MAPS); 269 } 270 } 271 mutex_unlock(&dev->struct_mutex); 272} 273 274/** 275 * \c nopage method for DMA virtual memory. 276 * 277 * \param vma virtual memory area. 278 * \param address access address. 279 * \return pointer to the page structure. 280 * 281 * Determine the page number from the page offset and get it from drm_device_dma::pagelist. 282 */ 283static __inline__ struct page *drm_do_vm_dma_nopage(struct vm_area_struct *vma, 284 unsigned long address) 285{ 286 drm_file_t *priv = vma->vm_file->private_data; 287 drm_device_t *dev = priv->head->dev; 288 drm_device_dma_t *dma = dev->dma; 289 unsigned long offset; 290 unsigned long page_nr; 291 struct page *page; 292 293 if (!dma) 294 return NOPAGE_SIGBUS; /* Error */ 295 if (address > vma->vm_end) 296 return NOPAGE_SIGBUS; /* Disallow mremap */ 297 if (!dma->pagelist) 298 return NOPAGE_SIGBUS; /* Nothing allocated */ 299 300 offset = address - vma->vm_start; /* vm_[pg]off[set] should be 0 */ 301 page_nr = offset >> PAGE_SHIFT; 302 page = virt_to_page((dma->pagelist[page_nr] + (offset & (~PAGE_MASK)))); 303 304 get_page(page); 305 306 DRM_DEBUG("dma_nopage 0x%lx (page %lu)\n", address, page_nr); 307 return page; 308} 309 310/** 311 * \c nopage method for scatter-gather virtual memory. 312 * 313 * \param vma virtual memory area. 314 * \param address access address. 315 * \return pointer to the page structure. 316 * 317 * Determine the map offset from the page offset and get it from drm_sg_mem::pagelist. 318 */ 319static __inline__ struct page *drm_do_vm_sg_nopage(struct vm_area_struct *vma, 320 unsigned long address) 321{ 322 drm_map_t *map = (drm_map_t *) vma->vm_private_data; 323 drm_file_t *priv = vma->vm_file->private_data; 324 drm_device_t *dev = priv->head->dev; 325 drm_sg_mem_t *entry = dev->sg; 326 unsigned long offset; 327 unsigned long map_offset; 328 unsigned long page_offset; 329 struct page *page; 330 331 if (!entry) 332 return NOPAGE_SIGBUS; /* Error */ 333 if (address > vma->vm_end) 334 return NOPAGE_SIGBUS; /* Disallow mremap */ 335 if (!entry->pagelist) 336 return NOPAGE_SIGBUS; /* Nothing allocated */ 337 338 offset = address - vma->vm_start; 339 map_offset = map->offset - (unsigned long)dev->sg->virtual; 340 page_offset = (offset >> PAGE_SHIFT) + (map_offset >> PAGE_SHIFT); 341 page = entry->pagelist[page_offset]; 342 get_page(page); 343 344 return page; 345} 346 347static struct page *drm_vm_nopage(struct vm_area_struct *vma, 348 unsigned long address, int *type) 349{ 350 if (type) 351 *type = VM_FAULT_MINOR; 352 return drm_do_vm_nopage(vma, address); 353} 354 355static struct page *drm_vm_shm_nopage(struct vm_area_struct *vma, 356 unsigned long address, int *type) 357{ 358 if (type) 359 *type = VM_FAULT_MINOR; 360 return drm_do_vm_shm_nopage(vma, address); 361} 362 363static struct page *drm_vm_dma_nopage(struct vm_area_struct *vma, 364 unsigned long address, int *type) 365{ 366 if (type) 367 *type = VM_FAULT_MINOR; 368 return drm_do_vm_dma_nopage(vma, address); 369} 370 371static struct page *drm_vm_sg_nopage(struct vm_area_struct *vma, 372 unsigned long address, int *type) 373{ 374 if (type) 375 *type = VM_FAULT_MINOR; 376 return drm_do_vm_sg_nopage(vma, address); 377} 378 379/** AGP virtual memory operations */ 380static struct vm_operations_struct drm_vm_ops = { 381 .nopage = drm_vm_nopage, 382 .open = drm_vm_open, 383 .close = drm_vm_close, 384}; 385 386/** Shared virtual memory operations */ 387static struct vm_operations_struct drm_vm_shm_ops = { 388 .nopage = drm_vm_shm_nopage, 389 .open = drm_vm_open, 390 .close = drm_vm_shm_close, 391}; 392 393/** DMA virtual memory operations */ 394static struct vm_operations_struct drm_vm_dma_ops = { 395 .nopage = drm_vm_dma_nopage, 396 .open = drm_vm_open, 397 .close = drm_vm_close, 398}; 399 400/** Scatter-gather virtual memory operations */ 401static struct vm_operations_struct drm_vm_sg_ops = { 402 .nopage = drm_vm_sg_nopage, 403 .open = drm_vm_open, 404 .close = drm_vm_close, 405}; 406 407/** 408 * \c open method for shared virtual memory. 409 * 410 * \param vma virtual memory area. 411 * 412 * Create a new drm_vma_entry structure as the \p vma private data entry and 413 * add it to drm_device::vmalist. 414 */ 415static void drm_vm_open_locked(struct vm_area_struct *vma) 416{ 417 drm_file_t *priv = vma->vm_file->private_data; 418 drm_device_t *dev = priv->head->dev; 419 drm_vma_entry_t *vma_entry; 420 421 DRM_DEBUG("0x%08lx,0x%08lx\n", 422 vma->vm_start, vma->vm_end - vma->vm_start); 423 atomic_inc(&dev->vma_count); 424 425 vma_entry = drm_alloc(sizeof(*vma_entry), DRM_MEM_VMAS); 426 if (vma_entry) { 427 vma_entry->vma = vma; 428 vma_entry->next = dev->vmalist; 429 vma_entry->pid = current->pid; 430 dev->vmalist = vma_entry; 431 } 432} 433 434static void drm_vm_open(struct vm_area_struct *vma) 435{ 436 drm_file_t *priv = vma->vm_file->private_data; 437 drm_device_t *dev = priv->head->dev; 438 439 mutex_lock(&dev->struct_mutex); 440 drm_vm_open_locked(vma); 441 mutex_unlock(&dev->struct_mutex); 442} 443 444/** 445 * \c close method for all virtual memory types. 446 * 447 * \param vma virtual memory area. 448 * 449 * Search the \p vma private data entry in drm_device::vmalist, unlink it, and 450 * free it. 451 */ 452static void drm_vm_close(struct vm_area_struct *vma) 453{ 454 drm_file_t *priv = vma->vm_file->private_data; 455 drm_device_t *dev = priv->head->dev; 456 drm_vma_entry_t *pt, *prev; 457 458 DRM_DEBUG("0x%08lx,0x%08lx\n", 459 vma->vm_start, vma->vm_end - vma->vm_start); 460 atomic_dec(&dev->vma_count); 461 462 mutex_lock(&dev->struct_mutex); 463 for (pt = dev->vmalist, prev = NULL; pt; prev = pt, pt = pt->next) { 464 if (pt->vma == vma) { 465 if (prev) { 466 prev->next = pt->next; 467 } else { 468 dev->vmalist = pt->next; 469 } 470 drm_free(pt, sizeof(*pt), DRM_MEM_VMAS); 471 break; 472 } 473 } 474 mutex_unlock(&dev->struct_mutex); 475} 476 477/** 478 * mmap DMA memory. 479 * 480 * \param filp file pointer. 481 * \param vma virtual memory area. 482 * \return zero on success or a negative number on failure. 483 * 484 * Sets the virtual memory area operations structure to vm_dma_ops, the file 485 * pointer, and calls vm_open(). 486 */ 487static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma) 488{ 489 drm_file_t *priv = filp->private_data; 490 drm_device_t *dev; 491 drm_device_dma_t *dma; 492 unsigned long length = vma->vm_end - vma->vm_start; 493 494 dev = priv->head->dev; 495 dma = dev->dma; 496 DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n", 497 vma->vm_start, vma->vm_end, vma->vm_pgoff); 498 499 /* Length must match exact page count */ 500 if (!dma || (length >> PAGE_SHIFT) != dma->page_count) { 501 return -EINVAL; 502 } 503 504 if (!capable(CAP_SYS_ADMIN) && 505 (dma->flags & _DRM_DMA_USE_PCI_RO)) { 506 vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE); 507#if defined(__i386__) || defined(__x86_64__) 508 pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW; 509#else 510 /* Ye gads this is ugly. With more thought 511 we could move this up higher and use 512 `protection_map' instead. */ 513 vma->vm_page_prot = 514 __pgprot(pte_val 515 (pte_wrprotect 516 (__pte(pgprot_val(vma->vm_page_prot))))); 517#endif 518 } 519 520 vma->vm_ops = &drm_vm_dma_ops; 521 522 vma->vm_flags |= VM_RESERVED; /* Don't swap */ 523 524 vma->vm_file = filp; /* Needed for drm_vm_open() */ 525 drm_vm_open_locked(vma); 526 return 0; 527} 528 529unsigned long drm_core_get_map_ofs(drm_map_t * map) 530{ 531 return map->offset; 532} 533 534EXPORT_SYMBOL(drm_core_get_map_ofs); 535 536unsigned long drm_core_get_reg_ofs(struct drm_device *dev) 537{ 538#ifdef __alpha__ 539 return dev->hose->dense_mem_base - dev->hose->mem_space->start; 540#else 541 return 0; 542#endif 543} 544 545EXPORT_SYMBOL(drm_core_get_reg_ofs); 546 547/** 548 * mmap DMA memory. 549 * 550 * \param filp file pointer. 551 * \param vma virtual memory area. 552 * \return zero on success or a negative number on failure. 553 * 554 * If the virtual memory area has no offset associated with it then it's a DMA 555 * area, so calls mmap_dma(). Otherwise searches the map in drm_device::maplist, 556 * checks that the restricted flag is not set, sets the virtual memory operations 557 * according to the mapping type and remaps the pages. Finally sets the file 558 * pointer and calls vm_open(). 559 */ 560static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma) 561{ 562 drm_file_t *priv = filp->private_data; 563 drm_device_t *dev = priv->head->dev; 564 drm_map_t *map = NULL; 565 unsigned long offset = 0; 566 drm_hash_item_t *hash; 567 568 DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n", 569 vma->vm_start, vma->vm_end, vma->vm_pgoff); 570 571 if (!priv->authenticated) 572 return -EACCES; 573 574 /* We check for "dma". On Apple's UniNorth, it's valid to have 575 * the AGP mapped at physical address 0 576 * --BenH. 577 */ 578 if (!vma->vm_pgoff 579#if __OS_HAS_AGP 580 && (!dev->agp 581 || dev->agp->agp_info.device->vendor != PCI_VENDOR_ID_APPLE) 582#endif 583 ) 584 return drm_mmap_dma(filp, vma); 585 586 if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash)) { 587 DRM_ERROR("Could not find map\n"); 588 return -EINVAL; 589 } 590 591 map = drm_hash_entry(hash, drm_map_list_t, hash)->map; 592 if (!map || ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN))) 593 return -EPERM; 594 595 /* Check for valid size. */ 596 if (map->size < vma->vm_end - vma->vm_start) 597 return -EINVAL; 598 599 if (!capable(CAP_SYS_ADMIN) && (map->flags & _DRM_READ_ONLY)) { 600 vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE); 601#if defined(__i386__) || defined(__x86_64__) 602 pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW; 603#else 604 /* Ye gads this is ugly. With more thought 605 we could move this up higher and use 606 `protection_map' instead. */ 607 vma->vm_page_prot = 608 __pgprot(pte_val 609 (pte_wrprotect 610 (__pte(pgprot_val(vma->vm_page_prot))))); 611#endif 612 } 613 614 switch (map->type) { 615 case _DRM_AGP: 616 if (drm_core_has_AGP(dev) && dev->agp->cant_use_aperture) { 617 /* 618 * On some platforms we can't talk to bus dma address from the CPU, so for 619 * memory of type DRM_AGP, we'll deal with sorting out the real physical 620 * pages and mappings in nopage() 621 */ 622#if defined(__powerpc__) 623 pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE; 624#endif 625 vma->vm_ops = &drm_vm_ops; 626 break; 627 } 628 /* fall through to _DRM_FRAME_BUFFER... */ 629 case _DRM_FRAME_BUFFER: 630 case _DRM_REGISTERS: 631 offset = dev->driver->get_reg_ofs(dev); 632 vma->vm_flags |= VM_IO; /* not in core dump */ 633 vma->vm_page_prot = drm_io_prot(map->type, vma); 634#ifdef __sparc__ 635 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 636#endif 637 if (io_remap_pfn_range(vma, vma->vm_start, 638 (map->offset + offset) >> PAGE_SHIFT, 639 vma->vm_end - vma->vm_start, 640 vma->vm_page_prot)) 641 return -EAGAIN; 642 DRM_DEBUG(" Type = %d; start = 0x%lx, end = 0x%lx," 643 " offset = 0x%lx\n", 644 map->type, 645 vma->vm_start, vma->vm_end, map->offset + offset); 646 vma->vm_ops = &drm_vm_ops; 647 break; 648 case _DRM_CONSISTENT: 649 /* Consistent memory is really like shared memory. But 650 * it's allocated in a different way, so avoid nopage */ 651 if (remap_pfn_range(vma, vma->vm_start, 652 page_to_pfn(virt_to_page(map->handle)), 653 vma->vm_end - vma->vm_start, vma->vm_page_prot)) 654 return -EAGAIN; 655 /* fall through to _DRM_SHM */ 656 case _DRM_SHM: 657 vma->vm_ops = &drm_vm_shm_ops; 658 vma->vm_private_data = (void *)map; 659 /* Don't let this area swap. Change when 660 DRM_KERNEL advisory is supported. */ 661 vma->vm_flags |= VM_RESERVED; 662 break; 663 case _DRM_SCATTER_GATHER: 664 vma->vm_ops = &drm_vm_sg_ops; 665 vma->vm_private_data = (void *)map; 666 vma->vm_flags |= VM_RESERVED; 667 break; 668 default: 669 return -EINVAL; /* This should never happen. */ 670 } 671 vma->vm_flags |= VM_RESERVED; /* Don't swap */ 672 673 vma->vm_file = filp; /* Needed for drm_vm_open() */ 674 drm_vm_open_locked(vma); 675 return 0; 676} 677 678int drm_mmap(struct file *filp, struct vm_area_struct *vma) 679{ 680 drm_file_t *priv = filp->private_data; 681 drm_device_t *dev = priv->head->dev; 682 int ret; 683 684 mutex_lock(&dev->struct_mutex); 685 ret = drm_mmap_locked(filp, vma); 686 mutex_unlock(&dev->struct_mutex); 687 688 return ret; 689} 690EXPORT_SYMBOL(drm_mmap); 691