Lines Matching defs:bo

75 	struct ttm_buffer_object *bo;
78 bo = RB_ROOT(&bdev->addr_space_rb);
79 while (bo != NULL) {
80 cur_offset = bo->vm_node->start;
82 best_bo = bo;
85 bo = RB_RIGHT(bo, vm_rb);
87 bo = RB_LEFT(bo, vm_rb);
105 struct ttm_buffer_object *bo = vm_obj->handle;
106 struct ttm_bo_device *bdev = bo->bdev;
112 &bdev->man[bo->mem.mem_type];
128 ret = ttm_bo_reserve(bo, false, false, false, 0);
137 ret = bdev->driver->fault_reserve_notify(bo);
158 if (test_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags)) {
175 ret = ttm_bo_wait(bo, false, false, false);
189 ret = ttm_mem_io_reserve_vm(bo);
200 * the bo->mutex, as we should be the only writers.
204 * TODO: Add a list of vmas to the bo, and change the
208 if (!bo->mem.bus.is_iomem) {
210 ttm = bo->ttm;
217 if (bo->mem.bus.is_iomem) {
218 m = vm_phys_fictitious_to_vm_page(bo->mem.bus.base +
219 bo->mem.bus.offset + offset);
220 pmap_page_set_memattr(m, ttm_io_prot(bo->mem.placement));
222 ttm = bo->ttm;
229 (bo->mem.placement & TTM_PL_FLAG_CACHED) ?
230 VM_MEMATTR_WRITE_BACK : ttm_io_prot(bo->mem.placement));
237 ttm_bo_unreserve(bo);
247 ("inconsistent insert bo %p m %p m1 %p offset %jx",
248 bo, m, m1, (uintmax_t)offset));
261 ttm_bo_unreserve(bo);
283 * instance. Therefore on Linux, the reference on the bo is
299 struct ttm_buffer_object *bo = handle;
301 ttm_bo_unref(&bo);
315 struct ttm_buffer_object *bo;
320 bo = ttm_bo_vm_lookup_rb(bdev, OFF_TO_IDX(*offset), OFF_TO_IDX(size));
321 if (likely(bo != NULL))
322 refcount_acquire(&bo->kref);
325 if (unlikely(bo == NULL)) {
330 driver = bo->bdev->driver;
335 ret = -driver->verify_access(bo);
339 vm_obj = cdev_pager_allocate(bo, OBJT_MGTDEVICE, &ttm_pager_ops,
346 * Note: We're transferring the bo reference to vm_obj->handle here.
352 ttm_bo_unref(&bo);
357 ttm_bo_release_mmap(struct ttm_buffer_object *bo)
363 vm_obj = cdev_pager_lookup(bo);
369 for (i = 0; i < bo->num_pages; i++) {
383 int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
389 vma->vm_private_data = ttm_bo_reference(bo);
398 struct ttm_buffer_object *bo;
413 bo = ttm_bo_vm_lookup_rb(bdev, dev_offset, 1);
414 if (likely(bo != NULL))
415 ttm_bo_reference(bo);
418 if (unlikely(bo == NULL))
421 driver = bo->bdev->driver;
427 ret = driver->verify_access(bo, filp);
431 kmap_offset = dev_offset - bo->vm_node->start;
432 if (unlikely(kmap_offset >= bo->num_pages)) {
438 io_size = bo->num_pages - kmap_offset;
446 ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
458 ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
460 ttm_bo_unreserve(bo);
473 ttm_bo_unreserve(bo);
474 ttm_bo_unref(&bo);
483 ttm_bo_unref(&bo);
487 ssize_t ttm_bo_fbdev_io(struct ttm_buffer_object *bo, const char __user *wbuf,
503 if (unlikely(kmap_offset >= bo->num_pages))
507 io_size = bo->num_pages - kmap_offset;
515 ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
526 ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
528 ttm_bo_unreserve(bo);
541 ttm_bo_unreserve(bo);
542 ttm_bo_unref(&bo);