ttm_bo_vm.c revision 262988
1247835Skib/************************************************************************** 2247835Skib * 3247835Skib * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA 4247835Skib * All Rights Reserved. 5247835Skib * 6247835Skib * Permission is hereby granted, free of charge, to any person obtaining a 7247835Skib * copy of this software and associated documentation files (the 8247835Skib * "Software"), to deal in the Software without restriction, including 9247835Skib * without limitation the rights to use, copy, modify, merge, publish, 10247835Skib * distribute, sub license, and/or sell copies of the Software, and to 11247835Skib * permit persons to whom the Software is furnished to do so, subject to 12247835Skib * the following conditions: 13247835Skib * 14247835Skib * The above copyright notice and this permission notice (including the 15247835Skib * next paragraph) shall be included in all copies or substantial portions 16247835Skib * of the Software. 17247835Skib * 18247835Skib * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19247835Skib * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20247835Skib * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 21247835Skib * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 22247835Skib * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 23247835Skib * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 24247835Skib * USE OR OTHER DEALINGS IN THE SOFTWARE. 25247835Skib * 26247835Skib **************************************************************************/ 27247835Skib/* 28247835Skib * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> 29247835Skib */ 30247835Skib/* 31247835Skib * Copyright (c) 2013 The FreeBSD Foundation 32247835Skib * All rights reserved. 33247835Skib * 34247835Skib * Portions of this software were developed by Konstantin Belousov 35247835Skib * <kib@FreeBSD.org> under sponsorship from the FreeBSD Foundation. 36247835Skib */ 37247835Skib 38247835Skib#include <sys/cdefs.h> 39247835Skib__FBSDID("$FreeBSD: stable/9/sys/dev/drm2/ttm/ttm_bo_vm.c 262988 2014-03-10 23:16:19Z dumbbell $"); 40247835Skib 41247835Skib#include "opt_vm.h" 42247835Skib 43247835Skib#include <dev/drm2/drmP.h> 44247835Skib#include <dev/drm2/ttm/ttm_module.h> 45247835Skib#include <dev/drm2/ttm/ttm_bo_driver.h> 46247835Skib#include <dev/drm2/ttm/ttm_placement.h> 47247835Skib 48247835Skib#include <vm/vm.h> 49247835Skib#include <vm/vm_page.h> 50247835Skib 51247835Skib#define TTM_BO_VM_NUM_PREFAULT 16 52247835Skib 53247835SkibRB_GENERATE(ttm_bo_device_buffer_objects, ttm_buffer_object, vm_rb, 54247835Skib ttm_bo_cmp_rb_tree_items); 55247835Skib 56247835Skibint 57247835Skibttm_bo_cmp_rb_tree_items(struct ttm_buffer_object *a, 58247835Skib struct ttm_buffer_object *b) 59247835Skib{ 60247835Skib 61247835Skib if (a->vm_node->start < b->vm_node->start) { 62247835Skib return (-1); 63247835Skib } else if (a->vm_node->start > b->vm_node->start) { 64247835Skib return (1); 65247835Skib } else { 66247835Skib return (0); 67247835Skib } 68247835Skib} 69247835Skib 70247835Skibstatic struct ttm_buffer_object *ttm_bo_vm_lookup_rb(struct ttm_bo_device *bdev, 71247835Skib unsigned long page_start, 72247835Skib unsigned long num_pages) 73247835Skib{ 74247835Skib unsigned long cur_offset; 75247835Skib struct ttm_buffer_object *bo; 76247835Skib struct ttm_buffer_object *best_bo = NULL; 77247835Skib 78262988Sdumbbell bo = RB_ROOT(&bdev->addr_space_rb); 79262988Sdumbbell while (bo != NULL) { 80247835Skib cur_offset = bo->vm_node->start; 81247835Skib if (page_start >= cur_offset) { 82247835Skib best_bo = bo; 83247835Skib if (page_start == cur_offset) 84247835Skib break; 85262988Sdumbbell bo = RB_RIGHT(bo, vm_rb); 86262988Sdumbbell } else 87262988Sdumbbell bo = RB_LEFT(bo, vm_rb); 88247835Skib } 89247835Skib 90247835Skib if (unlikely(best_bo == NULL)) 91247835Skib return NULL; 92247835Skib 93247835Skib if (unlikely((best_bo->vm_node->start + best_bo->num_pages) < 94247835Skib (page_start + num_pages))) 95247835Skib return NULL; 96247835Skib 97247835Skib return best_bo; 98247835Skib} 99247835Skib 100247835Skibstatic int 101247835Skibttm_bo_vm_fault(vm_object_t vm_obj, vm_ooffset_t offset, 102247835Skib int prot, vm_page_t *mres) 103247835Skib{ 104247835Skib 105247835Skib struct ttm_buffer_object *bo = vm_obj->handle; 106247835Skib struct ttm_bo_device *bdev = bo->bdev; 107247835Skib struct ttm_tt *ttm = NULL; 108262988Sdumbbell vm_page_t m, m1, oldm; 109247835Skib int ret; 110247835Skib int retval = VM_PAGER_OK; 111247835Skib struct ttm_mem_type_manager *man = 112247835Skib &bdev->man[bo->mem.mem_type]; 113247835Skib 114247835Skib vm_object_pip_add(vm_obj, 1); 115247835Skib oldm = *mres; 116247835Skib if (oldm != NULL) { 117247835Skib vm_page_lock(oldm); 118247835Skib vm_page_remove(oldm); 119247835Skib vm_page_unlock(oldm); 120247835Skib *mres = NULL; 121247835Skib } else 122247835Skib oldm = NULL; 123247835Skibretry: 124247835Skib VM_OBJECT_UNLOCK(vm_obj); 125247835Skib m = NULL; 126247835Skib 127247835Skibreserve: 128262988Sdumbbell ret = ttm_bo_reserve(bo, false, false, false, 0); 129247835Skib if (unlikely(ret != 0)) { 130247835Skib if (ret == -EBUSY) { 131247835Skib kern_yield(0); 132247835Skib goto reserve; 133247835Skib } 134247835Skib } 135247835Skib 136247835Skib if (bdev->driver->fault_reserve_notify) { 137247835Skib ret = bdev->driver->fault_reserve_notify(bo); 138247835Skib switch (ret) { 139247835Skib case 0: 140247835Skib break; 141247835Skib case -EBUSY: 142247835Skib case -ERESTART: 143247835Skib case -EINTR: 144247835Skib kern_yield(0); 145247835Skib goto reserve; 146247835Skib default: 147247835Skib retval = VM_PAGER_ERROR; 148247835Skib goto out_unlock; 149247835Skib } 150247835Skib } 151247835Skib 152247835Skib /* 153247835Skib * Wait for buffer data in transit, due to a pipelined 154247835Skib * move. 155247835Skib */ 156247835Skib 157247835Skib mtx_lock(&bdev->fence_lock); 158262988Sdumbbell if ((atomic_load_acq_long(&bo->priv_flags) & 159262988Sdumbbell (1UL << TTM_BO_PRIV_FLAG_MOVING)) != 0) { 160262988Sdumbbell /* 161262988Sdumbbell * Here, the behavior differs between Linux and FreeBSD. 162262988Sdumbbell * 163262988Sdumbbell * On Linux, the wait is interruptible (3rd argument to 164262988Sdumbbell * ttm_bo_wait). There must be some mechanism to resume 165262988Sdumbbell * page fault handling, once the signal is processed. 166262988Sdumbbell * 167262988Sdumbbell * On FreeBSD, the wait is uninteruptible. This is not a 168262988Sdumbbell * problem as we can't end up with an unkillable process 169262988Sdumbbell * here, because the wait will eventually time out. 170262988Sdumbbell * 171262988Sdumbbell * An example of this situation is the Xorg process 172262988Sdumbbell * which uses SIGALRM internally. The signal could 173262988Sdumbbell * interrupt the wait, causing the page fault to fail 174262988Sdumbbell * and the process to receive SIGSEGV. 175262988Sdumbbell */ 176262988Sdumbbell ret = ttm_bo_wait(bo, false, false, false); 177247835Skib mtx_unlock(&bdev->fence_lock); 178247835Skib if (unlikely(ret != 0)) { 179247835Skib retval = VM_PAGER_ERROR; 180247835Skib goto out_unlock; 181247835Skib } 182247835Skib } else 183247835Skib mtx_unlock(&bdev->fence_lock); 184247835Skib 185247835Skib ret = ttm_mem_io_lock(man, true); 186247835Skib if (unlikely(ret != 0)) { 187247835Skib retval = VM_PAGER_ERROR; 188247835Skib goto out_unlock; 189247835Skib } 190247835Skib ret = ttm_mem_io_reserve_vm(bo); 191247835Skib if (unlikely(ret != 0)) { 192247835Skib retval = VM_PAGER_ERROR; 193247835Skib goto out_io_unlock; 194247835Skib } 195247835Skib 196247835Skib /* 197247835Skib * Strictly, we're not allowed to modify vma->vm_page_prot here, 198247835Skib * since the mmap_sem is only held in read mode. However, we 199247835Skib * modify only the caching bits of vma->vm_page_prot and 200247835Skib * consider those bits protected by 201247835Skib * the bo->mutex, as we should be the only writers. 202247835Skib * There shouldn't really be any readers of these bits except 203247835Skib * within vm_insert_mixed()? fork? 204247835Skib * 205247835Skib * TODO: Add a list of vmas to the bo, and change the 206247835Skib * vma->vm_page_prot when the object changes caching policy, with 207247835Skib * the correct locks held. 208247835Skib */ 209247835Skib if (!bo->mem.bus.is_iomem) { 210247835Skib /* Allocate all page at once, most common usage */ 211247835Skib ttm = bo->ttm; 212247835Skib if (ttm->bdev->driver->ttm_tt_populate(ttm)) { 213247835Skib retval = VM_PAGER_ERROR; 214247835Skib goto out_io_unlock; 215247835Skib } 216247835Skib } 217247835Skib 218247835Skib if (bo->mem.bus.is_iomem) { 219247835Skib m = vm_phys_fictitious_to_vm_page(bo->mem.bus.base + 220247835Skib bo->mem.bus.offset + offset); 221247835Skib pmap_page_set_memattr(m, ttm_io_prot(bo->mem.placement)); 222247835Skib } else { 223247835Skib ttm = bo->ttm; 224247835Skib m = ttm->pages[OFF_TO_IDX(offset)]; 225247835Skib if (unlikely(!m)) { 226247835Skib retval = VM_PAGER_ERROR; 227247835Skib goto out_io_unlock; 228247835Skib } 229247835Skib pmap_page_set_memattr(m, 230247835Skib (bo->mem.placement & TTM_PL_FLAG_CACHED) ? 231247835Skib VM_MEMATTR_WRITE_BACK : ttm_io_prot(bo->mem.placement)); 232247835Skib } 233247835Skib 234247835Skib VM_OBJECT_LOCK(vm_obj); 235247835Skib if ((m->flags & VPO_BUSY) != 0) { 236247835Skib vm_page_sleep(m, "ttmpbs"); 237247835Skib ttm_mem_io_unlock(man); 238247835Skib ttm_bo_unreserve(bo); 239247835Skib goto retry; 240247835Skib } 241247835Skib m->valid = VM_PAGE_BITS_ALL; 242247835Skib *mres = m; 243262988Sdumbbell m1 = vm_page_lookup(vm_obj, OFF_TO_IDX(offset)); 244262988Sdumbbell if (m1 == NULL) { 245262988Sdumbbell vm_page_insert(m, vm_obj, OFF_TO_IDX(offset)); 246262988Sdumbbell } else { 247262988Sdumbbell KASSERT(m == m1, 248262988Sdumbbell ("inconsistent insert bo %p m %p m1 %p offset %jx", 249262988Sdumbbell bo, m, m1, (uintmax_t)offset)); 250262988Sdumbbell } 251247835Skib vm_page_busy(m); 252247835Skib 253247835Skib if (oldm != NULL) { 254247835Skib vm_page_lock(oldm); 255247835Skib vm_page_free(oldm); 256247835Skib vm_page_unlock(oldm); 257247835Skib } 258247835Skib 259247835Skibout_io_unlock1: 260247835Skib ttm_mem_io_unlock(man); 261247835Skibout_unlock1: 262247835Skib ttm_bo_unreserve(bo); 263247835Skib vm_object_pip_wakeup(vm_obj); 264247835Skib return (retval); 265247835Skib 266247835Skibout_io_unlock: 267247835Skib VM_OBJECT_LOCK(vm_obj); 268247835Skib goto out_io_unlock1; 269247835Skib 270247835Skibout_unlock: 271247835Skib VM_OBJECT_LOCK(vm_obj); 272247835Skib goto out_unlock1; 273247835Skib} 274247835Skib 275247835Skibstatic int 276247835Skibttm_bo_vm_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot, 277247835Skib vm_ooffset_t foff, struct ucred *cred, u_short *color) 278247835Skib{ 279247835Skib 280262988Sdumbbell /* 281262988Sdumbbell * On Linux, a reference to the buffer object is acquired here. 282262988Sdumbbell * The reason is that this function is not called when the 283262988Sdumbbell * mmap() is initialized, but only when a process forks for 284262988Sdumbbell * instance. Therefore on Linux, the reference on the bo is 285262988Sdumbbell * acquired either in ttm_bo_mmap() or ttm_bo_vm_open(). It's 286262988Sdumbbell * then released in ttm_bo_vm_close(). 287262988Sdumbbell * 288262988Sdumbbell * Here, this function is called during mmap() intialization. 289262988Sdumbbell * Thus, the reference acquired in ttm_bo_mmap_single() is 290262988Sdumbbell * sufficient. 291262988Sdumbbell */ 292262988Sdumbbell 293247835Skib *color = 0; 294247835Skib return (0); 295247835Skib} 296247835Skib 297247835Skibstatic void 298247835Skibttm_bo_vm_dtor(void *handle) 299247835Skib{ 300247835Skib struct ttm_buffer_object *bo = handle; 301247835Skib 302247835Skib ttm_bo_unref(&bo); 303247835Skib} 304247835Skib 305247835Skibstatic struct cdev_pager_ops ttm_pager_ops = { 306247835Skib .cdev_pg_fault = ttm_bo_vm_fault, 307247835Skib .cdev_pg_ctor = ttm_bo_vm_ctor, 308247835Skib .cdev_pg_dtor = ttm_bo_vm_dtor 309247835Skib}; 310247835Skib 311247835Skibint 312247835Skibttm_bo_mmap_single(struct ttm_bo_device *bdev, vm_ooffset_t *offset, vm_size_t size, 313247835Skib struct vm_object **obj_res, int nprot) 314247835Skib{ 315247835Skib struct ttm_bo_driver *driver; 316247835Skib struct ttm_buffer_object *bo; 317247835Skib struct vm_object *vm_obj; 318247835Skib int ret; 319247835Skib 320247835Skib rw_wlock(&bdev->vm_lock); 321247835Skib bo = ttm_bo_vm_lookup_rb(bdev, OFF_TO_IDX(*offset), OFF_TO_IDX(size)); 322247835Skib if (likely(bo != NULL)) 323247835Skib refcount_acquire(&bo->kref); 324247835Skib rw_wunlock(&bdev->vm_lock); 325247835Skib 326247835Skib if (unlikely(bo == NULL)) { 327247835Skib printf("[TTM] Could not find buffer object to map\n"); 328247835Skib return (EINVAL); 329247835Skib } 330247835Skib 331247835Skib driver = bo->bdev->driver; 332247835Skib if (unlikely(!driver->verify_access)) { 333247835Skib ret = EPERM; 334247835Skib goto out_unref; 335247835Skib } 336247835Skib ret = -driver->verify_access(bo); 337247835Skib if (unlikely(ret != 0)) 338247835Skib goto out_unref; 339247835Skib 340247835Skib vm_obj = cdev_pager_allocate(bo, OBJT_MGTDEVICE, &ttm_pager_ops, 341247835Skib size, nprot, 0, curthread->td_ucred); 342247835Skib if (vm_obj == NULL) { 343247835Skib ret = EINVAL; 344247835Skib goto out_unref; 345247835Skib } 346247835Skib /* 347247835Skib * Note: We're transferring the bo reference to vm_obj->handle here. 348247835Skib */ 349247835Skib *offset = 0; 350247835Skib *obj_res = vm_obj; 351247835Skib return 0; 352247835Skibout_unref: 353247835Skib ttm_bo_unref(&bo); 354247835Skib return ret; 355247835Skib} 356247835Skib 357262988Sdumbbellvoid 358262988Sdumbbellttm_bo_release_mmap(struct ttm_buffer_object *bo) 359262988Sdumbbell{ 360262988Sdumbbell vm_object_t vm_obj; 361262988Sdumbbell vm_page_t m; 362262988Sdumbbell int i; 363262988Sdumbbell 364262988Sdumbbell vm_obj = cdev_pager_lookup(bo); 365262988Sdumbbell if (vm_obj == NULL) 366262988Sdumbbell return; 367262988Sdumbbell 368262988Sdumbbell VM_OBJECT_LOCK(vm_obj); 369262988Sdumbbellretry: 370262988Sdumbbell for (i = 0; i < bo->num_pages; i++) { 371262988Sdumbbell m = vm_page_lookup(vm_obj, i); 372262988Sdumbbell if (m == NULL) 373262988Sdumbbell continue; 374262988Sdumbbell if (vm_page_sleep_if_busy(m, true, "ttm_unm")) 375262988Sdumbbell goto retry; 376262988Sdumbbell cdev_pager_free_page(vm_obj, m); 377262988Sdumbbell } 378262988Sdumbbell VM_OBJECT_UNLOCK(vm_obj); 379262988Sdumbbell 380262988Sdumbbell vm_object_deallocate(vm_obj); 381262988Sdumbbell} 382262988Sdumbbell 383247835Skib#if 0 384247835Skibint ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo) 385247835Skib{ 386247835Skib if (vma->vm_pgoff != 0) 387247835Skib return -EACCES; 388247835Skib 389247835Skib vma->vm_ops = &ttm_bo_vm_ops; 390247835Skib vma->vm_private_data = ttm_bo_reference(bo); 391247835Skib vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND; 392247835Skib return 0; 393247835Skib} 394247835Skib 395247835Skibssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp, 396247835Skib const char __user *wbuf, char __user *rbuf, size_t count, 397247835Skib loff_t *f_pos, bool write) 398247835Skib{ 399247835Skib struct ttm_buffer_object *bo; 400247835Skib struct ttm_bo_driver *driver; 401247835Skib struct ttm_bo_kmap_obj map; 402247835Skib unsigned long dev_offset = (*f_pos >> PAGE_SHIFT); 403247835Skib unsigned long kmap_offset; 404247835Skib unsigned long kmap_end; 405247835Skib unsigned long kmap_num; 406247835Skib size_t io_size; 407247835Skib unsigned int page_offset; 408247835Skib char *virtual; 409247835Skib int ret; 410247835Skib bool no_wait = false; 411247835Skib bool dummy; 412247835Skib 413247835Skib read_lock(&bdev->vm_lock); 414247835Skib bo = ttm_bo_vm_lookup_rb(bdev, dev_offset, 1); 415247835Skib if (likely(bo != NULL)) 416247835Skib ttm_bo_reference(bo); 417247835Skib read_unlock(&bdev->vm_lock); 418247835Skib 419247835Skib if (unlikely(bo == NULL)) 420247835Skib return -EFAULT; 421247835Skib 422247835Skib driver = bo->bdev->driver; 423247835Skib if (unlikely(!driver->verify_access)) { 424247835Skib ret = -EPERM; 425247835Skib goto out_unref; 426247835Skib } 427247835Skib 428247835Skib ret = driver->verify_access(bo, filp); 429247835Skib if (unlikely(ret != 0)) 430247835Skib goto out_unref; 431247835Skib 432247835Skib kmap_offset = dev_offset - bo->vm_node->start; 433247835Skib if (unlikely(kmap_offset >= bo->num_pages)) { 434247835Skib ret = -EFBIG; 435247835Skib goto out_unref; 436247835Skib } 437247835Skib 438247835Skib page_offset = *f_pos & ~PAGE_MASK; 439247835Skib io_size = bo->num_pages - kmap_offset; 440247835Skib io_size = (io_size << PAGE_SHIFT) - page_offset; 441247835Skib if (count < io_size) 442247835Skib io_size = count; 443247835Skib 444247835Skib kmap_end = (*f_pos + count - 1) >> PAGE_SHIFT; 445247835Skib kmap_num = kmap_end - kmap_offset + 1; 446247835Skib 447247835Skib ret = ttm_bo_reserve(bo, true, no_wait, false, 0); 448247835Skib 449247835Skib switch (ret) { 450247835Skib case 0: 451247835Skib break; 452247835Skib case -EBUSY: 453247835Skib ret = -EAGAIN; 454247835Skib goto out_unref; 455247835Skib default: 456247835Skib goto out_unref; 457247835Skib } 458247835Skib 459247835Skib ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map); 460247835Skib if (unlikely(ret != 0)) { 461247835Skib ttm_bo_unreserve(bo); 462247835Skib goto out_unref; 463247835Skib } 464247835Skib 465247835Skib virtual = ttm_kmap_obj_virtual(&map, &dummy); 466247835Skib virtual += page_offset; 467247835Skib 468247835Skib if (write) 469247835Skib ret = copy_from_user(virtual, wbuf, io_size); 470247835Skib else 471247835Skib ret = copy_to_user(rbuf, virtual, io_size); 472247835Skib 473247835Skib ttm_bo_kunmap(&map); 474247835Skib ttm_bo_unreserve(bo); 475247835Skib ttm_bo_unref(&bo); 476247835Skib 477247835Skib if (unlikely(ret != 0)) 478247835Skib return -EFBIG; 479247835Skib 480247835Skib *f_pos += io_size; 481247835Skib 482247835Skib return io_size; 483247835Skibout_unref: 484247835Skib ttm_bo_unref(&bo); 485247835Skib return ret; 486247835Skib} 487247835Skib 488247835Skibssize_t ttm_bo_fbdev_io(struct ttm_buffer_object *bo, const char __user *wbuf, 489247835Skib char __user *rbuf, size_t count, loff_t *f_pos, 490247835Skib bool write) 491247835Skib{ 492247835Skib struct ttm_bo_kmap_obj map; 493247835Skib unsigned long kmap_offset; 494247835Skib unsigned long kmap_end; 495247835Skib unsigned long kmap_num; 496247835Skib size_t io_size; 497247835Skib unsigned int page_offset; 498247835Skib char *virtual; 499247835Skib int ret; 500247835Skib bool no_wait = false; 501247835Skib bool dummy; 502247835Skib 503247835Skib kmap_offset = (*f_pos >> PAGE_SHIFT); 504247835Skib if (unlikely(kmap_offset >= bo->num_pages)) 505247835Skib return -EFBIG; 506247835Skib 507247835Skib page_offset = *f_pos & ~PAGE_MASK; 508247835Skib io_size = bo->num_pages - kmap_offset; 509247835Skib io_size = (io_size << PAGE_SHIFT) - page_offset; 510247835Skib if (count < io_size) 511247835Skib io_size = count; 512247835Skib 513247835Skib kmap_end = (*f_pos + count - 1) >> PAGE_SHIFT; 514247835Skib kmap_num = kmap_end - kmap_offset + 1; 515247835Skib 516247835Skib ret = ttm_bo_reserve(bo, true, no_wait, false, 0); 517247835Skib 518247835Skib switch (ret) { 519247835Skib case 0: 520247835Skib break; 521247835Skib case -EBUSY: 522247835Skib return -EAGAIN; 523247835Skib default: 524247835Skib return ret; 525247835Skib } 526247835Skib 527247835Skib ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map); 528247835Skib if (unlikely(ret != 0)) { 529247835Skib ttm_bo_unreserve(bo); 530247835Skib return ret; 531247835Skib } 532247835Skib 533247835Skib virtual = ttm_kmap_obj_virtual(&map, &dummy); 534247835Skib virtual += page_offset; 535247835Skib 536247835Skib if (write) 537247835Skib ret = copy_from_user(virtual, wbuf, io_size); 538247835Skib else 539247835Skib ret = copy_to_user(rbuf, virtual, io_size); 540247835Skib 541247835Skib ttm_bo_kunmap(&map); 542247835Skib ttm_bo_unreserve(bo); 543247835Skib ttm_bo_unref(&bo); 544247835Skib 545247835Skib if (unlikely(ret != 0)) 546247835Skib return ret; 547247835Skib 548247835Skib *f_pos += io_size; 549247835Skib 550247835Skib return io_size; 551247835Skib} 552247835Skib#endif 553