Searched refs:bo (Results 51 - 75 of 374) sorted by relevance

1234567891011>>

/linux-master/drivers/media/platform/nvidia/tegra-vde/
H A Dvde.c61 struct tegra_vde_bo *bo; local
64 bo = kzalloc(sizeof(*bo), GFP_KERNEL);
65 if (!bo)
68 bo->vde = vde;
69 bo->size = size;
70 bo->dma_dir = dma_dir;
71 bo->dma_attrs = DMA_ATTR_WRITE_COMBINE |
75 bo->dma_attrs |= DMA_ATTR_FORCE_CONTIGUOUS;
77 bo
128 tegra_vde_free_bo(struct tegra_vde_bo *bo) argument
[all...]
/linux-master/drivers/gpu/drm/radeon/
H A Dradeon_sa.c32 * We store the last allocated bo in "hole", we always try to allocate
33 * after the last allocated bo. Principle is that in a linear GPU ring
34 * progression was is after last is the oldest bo we allocated and thus
37 * If it's not the case we skip over the bo after last to the closest
38 * done bo if such one exist. If none exist and we are not asked to
55 domain, flags, NULL, NULL, &sa_manager->bo);
57 dev_err(rdev->dev, "(%d) failed to allocate bo for manager\n", r);
72 radeon_bo_unref(&sa_manager->bo);
80 if (sa_manager->bo == NULL) {
81 dev_err(rdev->dev, "no bo fo
[all...]
/linux-master/drivers/gpu/drm/virtio/
H A Dvirtgpu_prime.c34 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj); local
37 wait_event(vgdev->resp_wq, bo->uuid_state != STATE_INITIALIZING);
38 if (bo->uuid_state != STATE_OK)
41 uuid_copy(uuid, &bo->uuid);
51 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj); local
53 if (virtio_gpu_is_vram(bo))
54 return virtio_gpu_vram_map_dma_buf(bo, attach->dev, dir);
64 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj); local
66 if (virtio_gpu_is_vram(bo)) {
91 struct virtio_gpu_object *bo)
90 virtio_gpu_resource_assign_uuid(struct virtio_gpu_device *vgdev, struct virtio_gpu_object *bo) argument
110 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj); local
[all...]
/linux-master/drivers/gpu/drm/vmwgfx/
H A Dvmwgfx_bo.c46 * @bo: Pointer to the embedded struct ttm_buffer_object
48 static void vmw_bo_free(struct ttm_buffer_object *bo) argument
50 struct vmw_bo *vbo = to_vmw_bo(&bo->base);
74 struct ttm_buffer_object *bo = &buf->tbo; local
79 ret = ttm_bo_reserve(bo, interruptible, false, NULL);
83 ret = ttm_bo_validate(bo, placement, &ctx);
87 ttm_bo_unreserve(bo);
97 * Flushes and unpins the query bo to avoid failures.
110 struct ttm_buffer_object *bo = &buf->tbo; local
115 ret = ttm_bo_reserve(bo, interruptibl
179 struct ttm_buffer_object *bo = &buf->tbo; local
237 struct ttm_buffer_object *bo = &buf->tbo; local
259 vmw_bo_get_guest_ptr(const struct ttm_buffer_object *bo, SVGAGuestPtr *ptr) argument
284 struct ttm_buffer_object *bo = &vbo->tbo; local
329 struct ttm_buffer_object *bo = &vbo->tbo; local
454 struct ttm_buffer_object *bo = &vmw_bo->tbo; local
635 vmw_bo_fence_single(struct ttm_buffer_object *bo, struct vmw_fence_obj *fence) argument
708 vmw_bo_swap_notify(struct ttm_buffer_object *bo) argument
725 vmw_bo_move_notify(struct ttm_buffer_object *bo, struct ttm_resource *mem) argument
819 vmw_bo_placement_set(struct vmw_bo *bo, u32 domain, u32 busy_domain) argument
845 vmw_bo_placement_set_default_accelerated(struct vmw_bo *bo) argument
[all...]
H A Dvmwgfx_ttm_buffer.c257 * @bo: Pointer to a struct ttm_buffer_object
264 const struct vmw_sg_table *vmw_bo_sg_table(struct ttm_buffer_object *bo) argument
267 container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm);
400 static struct ttm_tt *vmw_ttm_tt_create(struct ttm_buffer_object *bo, argument
405 bool external = bo->type == ttm_bo_type_sg;
411 vmw_be->dev_priv = vmw_priv_from_ttm(bo->bdev);
418 ret = ttm_sg_tt_init(&vmw_be->dma_ttm, bo, page_flags,
421 ret = ttm_tt_init(&vmw_be->dma_ttm, bo, page_flags,
432 static void vmw_evict_flags(struct ttm_buffer_object *bo, argument
463 * @bo
471 vmw_move_notify(struct ttm_buffer_object *bo, struct ttm_resource *old_mem, struct ttm_resource *new_mem) argument
485 vmw_swap_notify(struct ttm_buffer_object *bo) argument
496 vmw_move(struct ttm_buffer_object *bo, bool evict, struct ttm_operation_ctx *ctx, struct ttm_resource *new_mem, struct ttm_place *hop) argument
[all...]
/linux-master/drivers/gpu/drm/qxl/
H A Dqxl_release.c40 /* put an alloc/dealloc surface cmd into one bo and round up to 128 */
124 struct qxl_bo *bo; local
128 bo = to_qxl_bo(entry->tv.bo);
129 qxl_bo_unref(&bo);
163 struct qxl_bo **bo,
166 /* pin releases bo's they are too messy to evict */
168 QXL_GEM_DOMAIN_VRAM, priority, NULL, bo);
171 int qxl_release_list_add(struct qxl_release *release, struct qxl_bo *bo) argument
176 if (entry->tv.bo
162 qxl_release_bo_alloc(struct qxl_device *qdev, struct qxl_bo **bo, u32 priority) argument
191 qxl_release_validate_bo(struct qxl_bo *bo) argument
230 struct qxl_bo *bo = to_qxl_bo(entry->tv.bo); local
258 struct qxl_bo *bo; local
286 struct qxl_bo *bo, *free_bo = NULL; local
385 struct qxl_bo *bo = release->release_bo; local
398 struct qxl_bo *bo = release->release_bo; local
407 struct ttm_buffer_object *bo; local
[all...]
/linux-master/drivers/gpu/drm/amd/amdgpu/
H A Damdgpu_amdkfd_gpuvm.c291 void amdgpu_amdkfd_release_notify(struct amdgpu_bo *bo) argument
293 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
294 u32 alloc_flags = bo->kfd_bo->alloc_flags;
295 u64 size = amdgpu_bo_size(bo);
298 bo->xcp_id);
300 kfree(bo->kfd_bo);
320 ret = amdgpu_bo_reserve(mem->bo, false);
325 flags |= mem->bo->flags & (AMDGPU_GEM_CREATE_COHERENT |
328 ret = amdgpu_gem_object_create(adev, mem->bo->tbo.base.size, 1,
330 ttm_bo_type_sg, mem->bo
353 amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo, struct amdgpu_amdkfd_fence *ef) argument
371 amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo) argument
407 amdgpu_amdkfd_bo_validate(struct amdgpu_bo *bo, uint32_t domain, bool wait) argument
429 amdgpu_amdkfd_bo_validate_and_fence(struct amdgpu_bo *bo, uint32_t domain, struct dma_fence *fence) argument
455 amdgpu_amdkfd_validate_vm_bo(void *_unused, struct amdgpu_bo *bo) argument
552 struct amdgpu_bo *bo = attachment->bo_va->base.bo; local
599 struct amdgpu_bo *bo = attachment->bo_va->base.bo; local
642 struct amdgpu_bo *bo = attachment->bo_va->base.bo; local
719 struct amdgpu_bo *bo = attachment->bo_va->base.bo; local
765 struct amdgpu_bo *bo = attachment->bo_va->base.bo; local
834 kfd_mem_attach_dmabuf(struct amdgpu_device *adev, struct kgd_mem *mem, struct amdgpu_bo **bo) argument
874 struct amdgpu_bo *bo[2] = {NULL, NULL}; local
1006 struct amdgpu_bo *bo = attachment->bo_va->base.bo; local
1054 struct amdgpu_bo *bo = mem->bo; local
1145 struct amdgpu_bo *bo = mem->bo; local
1187 struct amdgpu_bo *bo = mem->bo; local
1465 amdgpu_amdkfd_gpuvm_pin_bo(struct amdgpu_bo *bo, u32 domain) argument
1491 amdgpu_amdkfd_gpuvm_unpin_bo(struct amdgpu_bo *bo) argument
1693 struct amdgpu_bo *bo; local
1992 struct amdgpu_bo *bo; local
2198 amdgpu_amdkfd_map_gtt_bo_to_gart(struct amdgpu_bo *bo) argument
2255 struct amdgpu_bo *bo = mem->bo; local
2313 struct amdgpu_bo *bo = mem->bo; local
2340 struct amdgpu_bo *bo; local
2507 struct amdgpu_bo *bo; local
2610 struct amdgpu_bo *bo; local
2908 struct amdgpu_bo *bo = mem->bo; local
3043 struct amdgpu_bo *bo = peer_vm->root.bo; local
[all...]
H A Damdgpu_hmm.c70 struct amdgpu_bo *bo = container_of(mni, struct amdgpu_bo, notifier); local
71 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
81 r = dma_resv_wait_timeout(bo->tbo.base.resv, DMA_RESV_USAGE_BOOKKEEP,
85 DRM_ERROR("(%ld) failed to wait for user bo\n", r);
107 struct amdgpu_bo *bo = container_of(mni, struct amdgpu_bo, notifier); local
112 amdgpu_amdkfd_evict_userptr(mni, cur_seq, bo->kfd_bo);
124 * @bo: amdgpu buffer object
130 int amdgpu_hmm_register(struct amdgpu_bo *bo, unsigned long addr) argument
134 if (bo->kfd_bo)
135 r = mmu_interval_notifier_insert(&bo
160 amdgpu_hmm_unregister(struct amdgpu_bo *bo) argument
[all...]
H A Damdgpu_csa.c38 int amdgpu_allocate_static_csa(struct amdgpu_device *adev, struct amdgpu_bo **bo, argument
44 domain, bo,
46 if (!*bo)
54 void amdgpu_free_static_csa(struct amdgpu_bo **bo) argument
56 amdgpu_bo_free_kernel(bo, NULL, NULL);
66 struct amdgpu_bo *bo, struct amdgpu_bo_va **bo_va,
76 r = drm_exec_lock_obj(&exec, &bo->tbo.base);
84 *bo_va = amdgpu_vm_bo_add(adev, vm, bo);
106 struct amdgpu_bo *bo, struct amdgpu_bo_va *bo_va,
116 r = drm_exec_lock_obj(&exec, &bo
65 amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm, struct amdgpu_bo *bo, struct amdgpu_bo_va **bo_va, uint64_t csa_addr, uint32_t size) argument
105 amdgpu_unmap_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm, struct amdgpu_bo *bo, struct amdgpu_bo_va *bo_va, uint64_t csa_addr) argument
[all...]
/linux-master/drivers/gpu/drm/nouveau/
H A Dnv10_fence.h15 struct nouveau_bo *bo; member in struct:nv10_fence_priv
H A Dnouveau_bo.c137 nouveau_bo_del_ttm(struct ttm_buffer_object *bo) argument
139 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
141 struct nouveau_bo *nvbo = nouveau_bo(bo);
143 WARN_ON(nvbo->bo.pin_count > 0);
144 nouveau_bo_del_io_reserve_lru(bo);
151 if (bo->base.dev) {
158 drm_gem_object_release(&bo->base);
160 dma_resv_fini(&bo->base._resv);
177 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
229 nvbo->bo
474 struct ttm_buffer_object *bo = &nvbo->bo; local
551 struct ttm_buffer_object *bo = &nvbo->bo; local
672 nouveau_bo_add_io_reserve_lru(struct ttm_buffer_object *bo) argument
682 nouveau_bo_del_io_reserve_lru(struct ttm_buffer_object *bo) argument
751 nouveau_ttm_tt_create(struct ttm_buffer_object *bo, uint32_t page_flags) argument
795 nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl) argument
813 nouveau_bo_move_prep(struct nouveau_drm *drm, struct ttm_buffer_object *bo, struct ttm_resource *reg) argument
845 nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, struct ttm_operation_ctx *ctx, struct ttm_resource *new_reg) argument
972 nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, struct ttm_resource *new_reg) argument
1009 nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_resource *new_reg, struct nouveau_drm_tile **new_tile) argument
1030 nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo, struct nouveau_drm_tile *new_tile, struct nouveau_drm_tile **old_tile) argument
1050 nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, struct ttm_operation_ctx *ctx, struct ttm_resource *new_reg, struct ttm_place *hop) argument
1279 nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo) argument
1390 nouveau_bo_delete_mem_notify(struct ttm_buffer_object *bo) argument
[all...]
/linux-master/drivers/gpu/drm/xe/
H A Dxe_drm_client.h58 struct xe_bo *bo);
59 void xe_drm_client_remove_bo(struct xe_bo *bo);
62 struct xe_bo *bo)
66 static inline void xe_drm_client_remove_bo(struct xe_bo *bo) argument
61 xe_drm_client_add_bo(struct xe_drm_client *client, struct xe_bo *bo) argument
H A Dxe_bo_types.h45 /** @ttm_kmap: TTM bo kmap object for internal use only. Keep off. */
51 * @client: @xe_drm_client which created the bo
61 /** @created: Whether the bo has passed initial creation */
77 #define intel_bo_to_drm_bo(bo) (&(bo)->ttm.base)
78 #define intel_bo_to_i915(bo) to_i915(intel_bo_to_drm_bo(bo)->dev)
H A Dxe_bb.c47 bb->bo = xe_sa_bo_new(!usm ? tile->mem.kernel_bb_pool : gt->usm.bb_pool,
49 if (IS_ERR(bb->bo)) {
50 err = PTR_ERR(bb->bo);
54 bb->cs = xe_sa_bo_cpu_addr(bb->bo);
66 u32 size = drm_suballoc_size(bb->bo);
72 xe_sa_bo_flush_write(bb->bo);
83 batch_base_ofs + drm_suballoc_soffset(bb->bo),
84 batch_base_ofs + drm_suballoc_soffset(bb->bo) +
97 u64 addr = xe_sa_bo_gpu_addr(bb->bo);
108 xe_sa_bo_free(bb->bo, fenc
[all...]
/linux-master/drivers/gpu/drm/i915/gem/
H A Di915_gem_ttm.c266 static struct ttm_tt *i915_ttm_tt_create(struct ttm_buffer_object *bo, argument
269 struct drm_i915_private *i915 = container_of(bo->bdev, typeof(*i915),
271 struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
277 if (i915_ttm_is_ghost_object(bo))
284 if (obj->flags & I915_BO_ALLOC_CPU_CLEAR && (!bo->resource ||
285 ttm_manager_type(bo->bdev, bo->resource->mem_type)->use_tt))
296 ccs_pages = DIV_ROUND_UP(DIV_ROUND_UP(bo->base.size,
300 ret = ttm_tt_init(&i915_tt->ttm, bo, page_flags, caching, ccs_pages);
304 __i915_refct_sgt_init(&i915_tt->cached_rsgt, bo
355 i915_ttm_eviction_valuable(struct ttm_buffer_object *bo, const struct ttm_place *place) argument
378 i915_ttm_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *placement) argument
419 struct ttm_buffer_object *bo = i915_gem_to_ttm(obj); local
459 struct ttm_buffer_object *bo = i915_gem_to_ttm(obj); local
504 i915_ttm_delete_mem_notify(struct ttm_buffer_object *bo) argument
566 struct ttm_buffer_object *bo = i915_gem_to_ttm(obj); local
602 struct ttm_buffer_object *bo = i915_gem_to_ttm(obj); local
621 i915_ttm_swap_notify(struct ttm_buffer_object *bo) argument
684 i915_ttm_io_mem_pfn(struct ttm_buffer_object *bo, unsigned long page_offset) argument
701 i915_ttm_access_memory(struct ttm_buffer_object *bo, unsigned long offset, void *buf, int len, int write) argument
777 struct ttm_buffer_object *bo = i915_gem_to_ttm(obj); local
924 struct ttm_buffer_object *bo = i915_gem_to_ttm(obj); local
1037 struct ttm_buffer_object *bo = area->vm_private_data; local
1196 struct ttm_buffer_object *bo = i915_gem_to_ttm(obj); local
1240 i915_ttm_bo_destroy(struct ttm_buffer_object *bo) argument
[all...]
H A Di915_gem_ttm_move.c84 struct ttm_buffer_object *bo = i915_gem_to_ttm(obj); local
86 if (i915_ttm_cpu_maps_iomem(bo->resource) || bo->ttm->caching != ttm_cached) {
104 struct ttm_buffer_object *bo = i915_gem_to_ttm(obj); local
116 if (!bo->resource) {
121 mem_flags = i915_ttm_cpu_maps_iomem(bo->resource) ? I915_BO_FLAG_IOMEM :
123 mem_type = bo->resource->mem_type;
124 cache_level = i915_ttm_cache_level(to_i915(bo->base.dev), bo->resource,
125 bo
162 i915_ttm_move_notify(struct ttm_buffer_object *bo) argument
187 i915_ttm_accel_move(struct ttm_buffer_object *bo, bool clear, struct ttm_resource *dst_mem, struct ttm_tt *dst_ttm, struct sg_table *dst_st, const struct i915_deps *deps) argument
311 i915_ttm_memcpy_init(struct i915_ttm_memcpy_arg *arg, struct ttm_buffer_object *bo, bool clear, struct ttm_resource *dst_mem, struct ttm_tt *dst_ttm, struct i915_refct_sgt *dst_rsgt) argument
454 i915_ttm_memcpy_allowed(struct ttm_buffer_object *bo, struct ttm_resource *dst_mem) argument
468 __i915_ttm_move(struct ttm_buffer_object *bo, const struct ttm_operation_ctx *ctx, bool clear, struct ttm_resource *dst_mem, struct ttm_tt *dst_ttm, struct i915_refct_sgt *dst_rsgt, bool allow_accel, const struct i915_deps *move_deps) argument
570 i915_ttm_move(struct ttm_buffer_object *bo, bool evict, struct ttm_operation_ctx *ctx, struct ttm_resource *dst_mem, struct ttm_place *hop) argument
[all...]
/linux-master/drivers/gpu/drm/xe/display/
H A Dxe_fb_pin.c17 write_dpt_rotated(struct xe_bo *bo, struct iosys_map *map, u32 *dpt_ofs, u32 bo_ofs, argument
20 struct xe_device *xe = xe_bo_device(bo);
24 /* TODO: Maybe rewrite so we can traverse the bo addresses sequentially,
32 u64 pte = ggtt->pt_ops->pte_encode_bo(bo, src_idx * XE_PAGE_SIZE,
49 write_dpt_remapped(struct xe_bo *bo, struct iosys_map *map, u32 *dpt_ofs, argument
53 struct xe_device *xe = xe_bo_device(bo);
55 u64 (*pte_encode_bo)(struct xe_bo *bo, u64 bo_offset, u16 pat_index)
64 pte_encode_bo(bo, src_idx * XE_PAGE_SIZE,
86 struct xe_bo *bo = intel_fb_obj(&fb->base), *dpt; local
87 u32 dpt_size, size = bo
157 write_ggtt_rotated(struct xe_bo *bo, struct xe_ggtt *ggtt, u32 *ggtt_ofs, u32 bo_ofs, u32 width, u32 height, u32 src_stride, u32 dst_stride) argument
184 struct xe_bo *bo = intel_fb_obj(&fb->base); local
255 struct xe_bo *bo = intel_fb_obj(&fb->base); local
352 struct xe_bo *bo = intel_fb_obj(fb); local
[all...]
H A Dxe_dsb_buffer.c15 return xe_bo_ggtt_addr(dsb_buf->vma->bo);
20 iosys_map_wr(&dsb_buf->vma->bo->vmap, idx * 4, u32, val);
25 return iosys_map_rd(&dsb_buf->vma->bo->vmap, idx * 4, u32);
32 iosys_map_memset(&dsb_buf->vma->bo->vmap, idx * 4, val, size);
55 vma->bo = obj;
64 xe_bo_unpin_map_no_vm(dsb_buf->vma->bo);
/linux-master/drivers/gpu/drm/xe/compat-i915-headers/
H A Di915_vma.h21 struct xe_bo *bo, *dpt; member in struct:i915_vma
25 #define i915_ggtt_clear_scanout(bo) do { } while (0)
/linux-master/drivers/gpu/drm/tegra/
H A Dfbdev.c28 struct tegra_bo *bo; local
31 bo = tegra_fb_get_plane(helper->fb, 0);
33 err = drm_gem_mmap_obj(&bo->gem, bo->gem.size, vma);
37 return __tegra_gem_mmap(&bo->gem, vma);
44 struct tegra_bo *bo = tegra_fb_get_plane(fb, 0); local
49 if (bo->pages) {
50 vunmap(bo->vaddr);
51 bo->vaddr = NULL;
79 struct tegra_bo *bo; local
[all...]
H A Dsubmit.c44 struct gather_bo *bo = container_of(host_bo, struct gather_bo, base); local
46 kref_get(&bo->ref);
53 struct gather_bo *bo = container_of(ref, struct gather_bo, ref); local
55 dma_free_attrs(bo->dev, bo->gather_data_words * 4, bo->gather_data, bo->gather_data_dma,
57 kfree(bo);
62 struct gather_bo *bo = container_of(host_bo, struct gather_bo, base); local
64 kref_put(&bo
68 gather_bo_pin(struct device *dev, struct host1x_bo *bo, enum dma_data_direction direction) argument
127 struct gather_bo *bo = container_of(host_bo, struct gather_bo, base); local
183 struct gather_bo *bo; local
228 submit_write_reloc(struct tegra_drm_context *context, struct gather_bo *bo, struct drm_tegra_submit_buf *buf, struct tegra_drm_mapping *mapping) argument
257 submit_process_bufs(struct tegra_drm_context *context, struct gather_bo *bo, struct drm_tegra_channel_submit *args, struct tegra_drm_submit_data *job_data) argument
350 submit_job_add_gather(struct host1x_job *job, struct tegra_drm_context *context, struct drm_tegra_submit_cmd_gather_uptr *cmd, struct gather_bo *bo, u32 *offset, struct tegra_drm_submit_data *job_data, u32 *class) argument
393 submit_create_job(struct tegra_drm_context *context, struct gather_bo *bo, struct drm_tegra_channel_submit *args, struct tegra_drm_submit_data *job_data, struct xarray *syncpoints) argument
518 struct gather_bo *bo; local
[all...]
/linux-master/drivers/gpu/drm/ttm/tests/
H A Dttm_resource_test.c20 struct ttm_buffer_object *bo; member in struct:ttm_resource_test_priv
57 priv->bo = ttm_bo_kunit_init(test, priv->devs, size);
112 struct ttm_buffer_object *bo; local
118 bo = priv->bo;
130 KUNIT_ASSERT_TRUE(test, list_empty(&man->lru[bo->priority]));
132 ttm_resource_init(bo, place, res);
138 KUNIT_ASSERT_PTR_EQ(test, res->bo, bo);
146 KUNIT_ASSERT_TRUE(test, list_is_singular(&man->lru[bo
155 struct ttm_buffer_object *bo; local
185 struct ttm_buffer_object *bo; local
230 struct ttm_buffer_object *bo; local
268 struct ttm_buffer_object *bo; local
293 struct ttm_buffer_object *bo; local
[all...]
H A Dttm_kunit_helpers.c9 static struct ttm_tt *ttm_tt_simple_create(struct ttm_buffer_object *bo, argument
15 ttm_tt_init(tt, bo, page_flags, ttm_cached, 0);
25 static void dummy_ttm_bo_destroy(struct ttm_buffer_object *bo) argument
57 struct ttm_buffer_object *bo; local
60 bo = kunit_kzalloc(test, sizeof(*bo), GFP_KERNEL);
61 KUNIT_ASSERT_NOT_NULL(test, bo);
63 bo->base = gem_obj;
64 err = drm_gem_object_init(devs->drm, &bo->base, size);
67 bo
[all...]
/linux-master/drivers/gpu/drm/lima/
H A Dlima_vm.c77 lima_vm_bo_find(struct lima_vm *vm, struct lima_bo *bo) argument
81 list_for_each_entry(bo_va, &bo->va, list) {
91 int lima_vm_bo_add(struct lima_vm *vm, struct lima_bo *bo, bool create) argument
97 mutex_lock(&bo->lock);
99 bo_va = lima_vm_bo_find(vm, bo);
102 mutex_unlock(&bo->lock);
108 mutex_unlock(&bo->lock);
123 err = drm_mm_insert_node(&vm->mm, &bo_va->node, lima_bo_size(bo));
127 for_each_sgtable_dma_page(bo->base.sgt, &sg_iter, 0) {
138 list_add_tail(&bo_va->list, &bo
155 lima_vm_bo_del(struct lima_vm *vm, struct lima_bo *bo) argument
185 lima_vm_get_va(struct lima_vm *vm, struct lima_bo *bo) argument
283 lima_vm_map_bo(struct lima_vm *vm, struct lima_bo *bo, int pageoff) argument
[all...]
/linux-master/include/drm/ttm/
H A Dttm_device.h65 * @bo: The buffer object to create the ttm for.
73 struct ttm_tt *(*ttm_tt_create)(struct ttm_buffer_object *bo,
113 * @bo: the buffer object to be evicted
119 bool (*eviction_valuable)(struct ttm_buffer_object *bo,
124 * @bo: the buffer object to be evicted
126 * Return the bo flags for a buffer which is not mapped to the hardware.
128 * finished, they'll end up in bo->mem.flags
133 void (*evict_flags)(struct ttm_buffer_object *bo,
139 * @bo: the buffer to move
149 int (*move)(struct ttm_buffer_object *bo, boo
[all...]

Completed in 316 milliseconds

1234567891011>>