Lines Matching refs:ttm_bo

313 static struct ttm_tt *xe_ttm_tt_create(struct ttm_buffer_object *ttm_bo,
316 struct xe_bo *bo = ttm_to_xe_bo(ttm_bo);
521 static int xe_bo_move_dmabuf(struct ttm_buffer_object *ttm_bo,
524 struct dma_buf_attachment *attach = ttm_bo->base.import_attach;
525 struct xe_ttm_tt *xe_tt = container_of(ttm_bo->ttm, struct xe_ttm_tt,
527 struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev);
531 xe_assert(xe, ttm_bo->ttm);
536 if (ttm_bo->sg) {
537 dma_buf_unmap_attachment(attach, ttm_bo->sg, DMA_BIDIRECTIONAL);
538 ttm_bo->sg = NULL;
545 ttm_bo->sg = sg;
549 ttm_bo_move_null(ttm_bo, new_res);
575 struct ttm_buffer_object *ttm_bo = &bo->ttm;
576 struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev);
577 struct ttm_resource *old_mem = ttm_bo->resource;
595 if (ttm_bo->base.dma_buf && !ttm_bo->base.import_attach)
596 dma_buf_move_notify(ttm_bo->base.dma_buf);
613 static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict,
618 struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev);
619 struct xe_bo *bo = ttm_to_xe_bo(ttm_bo);
620 struct ttm_resource *old_mem = ttm_bo->resource;
622 struct ttm_tt *ttm = ttm_bo->ttm;
633 ttm_bo_move_null(ttm_bo, new_mem);
637 if (ttm_bo->type == ttm_bo_type_sg) {
640 ret = xe_bo_move_dmabuf(ttm_bo, new_mem);
651 (!ttm && ttm_bo->type == ttm_bo_type_device);
654 ttm_bo_move_null(ttm_bo, new_mem);
659 ttm_bo_move_null(ttm_bo, new_mem);
669 ttm_bo_move_null(ttm_bo, new_mem);
681 long timeout = dma_resv_wait_timeout(ttm_bo->base.resv,
691 ttm_bo_move_null(ttm_bo, new_mem);
733 ret = ttm_bo_move_memcpy(ttm_bo, ctx, new_mem);
765 ret = ttm_bo_move_accel_cleanup(ttm_bo, fence, evict,
769 ttm_bo_move_null(ttm_bo, new_mem);
778 dma_resv_add_fence(ttm_bo->base.resv, fence,
780 ttm_bo_move_null(ttm_bo, new_mem);
914 static unsigned long xe_ttm_io_mem_pfn(struct ttm_buffer_object *ttm_bo,
917 struct xe_bo *bo = ttm_to_xe_bo(ttm_bo);
921 if (ttm_bo->resource->mem_type == XE_PL_STOLEN)
924 vram = res_to_mem_region(ttm_bo->resource);
925 xe_res_first(ttm_bo->resource, (u64)page_offset << PAGE_SHIFT, 0, &cursor);
935 static bool xe_ttm_bo_lock_in_destructor(struct ttm_buffer_object *ttm_bo)
937 struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev);
940 xe_assert(xe, !kref_read(&ttm_bo->kref));
945 * the ttm_bo refcount is zero at this point. So trylocking *should*
948 spin_lock(&ttm_bo->bdev->lru_lock);
949 locked = dma_resv_trylock(ttm_bo->base.resv);
950 spin_unlock(&ttm_bo->bdev->lru_lock);
956 static void xe_ttm_bo_release_notify(struct ttm_buffer_object *ttm_bo)
963 if (!xe_bo_is_xe_bo(ttm_bo))
966 bo = ttm_to_xe_bo(ttm_bo);
967 xe_assert(xe_bo_device(bo), !(bo->created && kref_read(&ttm_bo->base.refcount)));
973 if (ttm_bo->base.resv != &ttm_bo->base._resv)
976 if (!xe_ttm_bo_lock_in_destructor(ttm_bo))
985 dma_resv_for_each_fence(&cursor, ttm_bo->base.resv,
992 dma_resv_replace_fences(ttm_bo->base.resv,
1000 dma_resv_unlock(ttm_bo->base.resv);
1003 static void xe_ttm_bo_delete_mem_notify(struct ttm_buffer_object *ttm_bo)
1005 if (!xe_bo_is_xe_bo(ttm_bo))
1012 if (ttm_bo->type == ttm_bo_type_sg && ttm_bo->sg) {
1013 struct xe_ttm_tt *xe_tt = container_of(ttm_bo->ttm,
1016 dma_buf_unmap_attachment(ttm_bo->base.import_attach, ttm_bo->sg,
1018 ttm_bo->sg = NULL;
1037 static void xe_ttm_bo_destroy(struct ttm_buffer_object *ttm_bo)
1039 struct xe_bo *bo = ttm_to_xe_bo(ttm_bo);
1040 struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev);
1046 xe_assert(xe, list_empty(&ttm_bo->base.gpuva.list));