Lines Matching defs:bdev

60 static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type)
62 struct ttm_mem_type_manager *man = &bdev->man[mem_type];
90 ttm_mem_type_debug(bo->bdev, mem_type);
111 struct ttm_bo_device *bdev = bo->bdev;
130 ttm_mem_global_free(bdev->glob->mem_glob, acc_size);
159 struct ttm_bo_device *bdev = bo->bdev;
168 man = &bdev->man[bo->mem.mem_type];
370 struct ttm_bo_device *bdev = bo->bdev;
378 if (bdev->need_dma32)
386 bo->ttm = bdev->driver->ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
392 bo->ttm = bdev->driver->ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
415 struct ttm_bo_device *bdev = bo->bdev;
416 bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem);
417 bool new_is_pci = ttm_mem_reg_is_pci(bdev, mem);
418 struct ttm_mem_type_manager *old_man = &bdev->man[bo->mem.mem_type];
419 struct ttm_mem_type_manager *new_man = &bdev->man[mem->mem_type];
454 if (bdev->driver->move_notify)
455 bdev->driver->move_notify(bo, mem);
462 if (bdev->driver->move_notify)
463 bdev->driver->move_notify(bo, mem);
468 else if (bdev->driver->move)
469 ret = bdev->driver->move(bo, evict, interruptible,
475 if (bdev->driver->move_notify) {
479 bdev->driver->move_notify(bo, mem);
489 ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement);
497 bdev->man[bo->mem.mem_type].gpu_offset;
505 new_man = &bdev->man[bo->mem.mem_type];
525 if (bo->bdev->driver->move_notify)
526 bo->bdev->driver->move_notify(bo, NULL);
550 struct ttm_bo_device *bdev = bo->bdev;
552 struct ttm_bo_driver *driver = bdev->driver;
560 mtx_lock(&bdev->fence_lock);
563 mtx_unlock(&bdev->fence_lock);
575 mtx_unlock(&bdev->fence_lock);
583 list_add_tail(&bo->ddestroy, &bdev->ddestroy);
590 taskqueue_enqueue_timeout(taskqueue_thread, &bdev->wq,
610 struct ttm_bo_device *bdev = bo->bdev;
611 struct ttm_bo_driver *driver = bdev->driver;
616 mtx_lock(&bdev->fence_lock);
628 mtx_unlock(&bdev->fence_lock);
643 mtx_lock(&bdev->fence_lock);
645 mtx_unlock(&bdev->fence_lock);
665 mtx_unlock(&bdev->fence_lock);
691 static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
693 struct ttm_bo_global *glob = bdev->glob;
698 if (list_empty(&bdev->ddestroy))
701 entry = list_first_entry(&bdev->ddestroy,
708 if (entry->ddestroy.next != &bdev->ddestroy) {
748 struct ttm_bo_device *bdev = arg;
750 if (ttm_bo_delayed_delete(bdev, false)) {
751 taskqueue_enqueue_timeout(taskqueue_thread, &bdev->wq,
758 struct ttm_bo_device *bdev = bo->bdev;
759 struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
761 rw_wlock(&bdev->vm_lock);
764 &bdev->addr_space_rb, bo);
768 rw_wunlock(&bdev->vm_lock);
786 int ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev)
790 if (taskqueue_cancel_timeout(taskqueue_thread, &bdev->wq, &pending))
791 taskqueue_drain_timeout(taskqueue_thread, &bdev->wq);
795 void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched)
798 taskqueue_enqueue_timeout(taskqueue_thread, &bdev->wq,
806 struct ttm_bo_device *bdev = bo->bdev;
811 mtx_lock(&bdev->fence_lock);
813 mtx_unlock(&bdev->fence_lock);
833 bdev->driver->evict_flags(bo, &placement);
858 static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
863 struct ttm_bo_global *glob = bdev->glob;
864 struct ttm_mem_type_manager *man = &bdev->man[mem_type];
907 struct ttm_mem_type_manager *man = &bo->bdev->man[mem->mem_type];
924 struct ttm_bo_device *bdev = bo->bdev;
925 struct ttm_mem_type_manager *man = &bdev->man[mem_type];
934 ret = ttm_mem_evict_first(bdev, mem_type,
1003 struct ttm_bo_device *bdev = bo->bdev;
1018 man = &bdev->man[mem_type];
1064 man = &bdev->man[mem_type];
1111 struct ttm_bo_device *bdev = bo->bdev;
1120 mtx_lock(&bdev->fence_lock);
1122 mtx_unlock(&bdev->fence_lock);
1215 int ttm_bo_init(struct ttm_bo_device *bdev,
1229 struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
1261 bo->bdev = bdev;
1262 bo->glob = bdev->glob;
1309 size_t ttm_bo_acc_size(struct ttm_bo_device *bdev,
1322 size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev,
1336 int ttm_bo_create(struct ttm_bo_device *bdev,
1350 acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct ttm_buffer_object));
1351 ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment,
1360 static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
1363 struct ttm_mem_type_manager *man = &bdev->man[mem_type];
1364 struct ttm_bo_global *glob = bdev->glob;
1374 ret = ttm_mem_evict_first(bdev, mem_type, false, false);
1388 int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
1397 man = &bdev->man[mem_type];
1410 ttm_bo_force_list_clean(bdev, mem_type, false);
1418 int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type)
1420 struct ttm_mem_type_manager *man = &bdev->man[mem_type];
1432 return ttm_bo_force_list_clean(bdev, mem_type, true);
1435 int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
1442 man = &bdev->man[type];
1449 ret = bdev->driver->init_mem_type(bdev, type, man);
1452 man->bdev = bdev;
1532 int ttm_bo_device_release(struct ttm_bo_device *bdev)
1537 struct ttm_bo_global *glob = bdev->glob;
1540 man = &bdev->man[i];
1543 if ((i != TTM_PL_SYSTEM) && ttm_bo_clean_mm(bdev, i)) {
1553 list_del(&bdev->device_list);
1556 if (taskqueue_cancel_timeout(taskqueue_thread, &bdev->wq, NULL))
1557 taskqueue_drain_timeout(taskqueue_thread, &bdev->wq);
1559 while (ttm_bo_delayed_delete(bdev, true))
1563 if (list_empty(&bdev->ddestroy))
1566 if (list_empty(&bdev->man[0].lru))
1570 MPASS(drm_mm_clean(&bdev->addr_space_mm));
1571 rw_wlock(&bdev->vm_lock);
1572 drm_mm_takedown(&bdev->addr_space_mm);
1573 rw_wunlock(&bdev->vm_lock);
1578 int ttm_bo_device_init(struct ttm_bo_device *bdev,
1586 rw_init(&bdev->vm_lock, "ttmvml");
1587 bdev->driver = driver;
1589 memset(bdev->man, 0, sizeof(bdev->man));
1595 ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0);
1599 RB_INIT(&bdev->addr_space_rb);
1600 ret = drm_mm_init(&bdev->addr_space_mm, file_page_offset, 0x10000000);
1604 TIMEOUT_TASK_INIT(taskqueue_thread, &bdev->wq, 0,
1605 ttm_bo_delayed_workqueue, bdev);
1606 INIT_LIST_HEAD(&bdev->ddestroy);
1607 bdev->dev_mapping = NULL;
1608 bdev->glob = glob;
1609 bdev->need_dma32 = need_dma32;
1610 bdev->val_seq = 0;
1611 mtx_init(&bdev->fence_lock, "ttmfence", NULL, MTX_DEF);
1613 list_add_tail(&bdev->device_list, &glob->device_list);
1618 ttm_bo_clean_mm(bdev, 0);
1627 bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1629 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
1653 struct ttm_bo_device *bdev = bo->bdev;
1654 struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
1663 struct ttm_bo_device *bdev = bo->bdev;
1665 /* The caller acquired bdev->vm_lock. */
1666 RB_INSERT(ttm_bo_device_buffer_objects, &bdev->addr_space_rb, bo);
1682 struct ttm_bo_device *bdev = bo->bdev;
1686 ret = drm_mm_pre_get(&bdev->addr_space_mm);
1690 rw_wlock(&bdev->vm_lock);
1691 bo->vm_node = drm_mm_search_free(&bdev->addr_space_mm,
1703 rw_wunlock(&bdev->vm_lock);
1708 rw_wunlock(&bdev->vm_lock);
1713 rw_wunlock(&bdev->vm_lock);
1720 struct ttm_bo_driver *driver = bo->bdev->driver;
1721 struct ttm_bo_device *bdev = bo->bdev;
1734 mtx_unlock(&bdev->fence_lock);
1736 mtx_lock(&bdev->fence_lock);
1744 mtx_unlock(&bdev->fence_lock);
1749 mtx_lock(&bdev->fence_lock);
1752 mtx_lock(&bdev->fence_lock);
1758 mtx_unlock(&bdev->fence_lock);
1761 mtx_lock(&bdev->fence_lock);
1763 mtx_unlock(&bdev->fence_lock);
1765 mtx_lock(&bdev->fence_lock);
1773 struct ttm_bo_device *bdev = bo->bdev;
1783 mtx_lock(&bdev->fence_lock);
1785 mtx_unlock(&bdev->fence_lock);
1841 mtx_lock(&bo->bdev->fence_lock);
1843 mtx_unlock(&bo->bdev->fence_lock);
1869 if (bo->bdev->driver->swap_notify)
1870 bo->bdev->driver->swap_notify(bo);
1888 void ttm_bo_swapout_all(struct ttm_bo_device *bdev)
1890 while (ttm_bo_swapout(&bdev->glob->shrink) == 0)