Searched refs:dma_resv (Results 1 - 25 of 76) sorted by last modified time

1234

/linux-master/drivers/gpu/drm/amd/amdgpu/
H A Damdgpu_ttm.c291 struct dma_resv *resv,
2112 struct dma_resv *resv,
2144 struct dma_resv *resv,
2198 struct dma_resv *resv,
2235 struct dma_resv *resv,
H A Damdgpu_object.h57 struct dma_resv *resv;
338 int amdgpu_bo_sync_wait_resv(struct amdgpu_device *adev, struct dma_resv *resv,
H A Damdgpu_object.c1440 struct dma_resv *resv = bo->tbo.base.resv;
1468 int amdgpu_bo_sync_wait_resv(struct amdgpu_device *adev, struct dma_resv *resv,
H A Damdgpu_vm.c914 struct dma_resv *resv, uint64_t start, uint64_t last,
1128 struct dma_resv *resv;
1353 struct dma_resv *resv = vm->root.bo->tbo.base.resv;
1383 struct dma_resv *resv = vm->root.bo->tbo.base.resv;
1435 struct dma_resv *resv;
H A Damdgpu_ttm.h152 struct dma_resv *resv,
159 struct dma_resv *resv,
163 struct dma_resv *resv,
H A Damdgpu_cs.c1196 struct dma_resv *resv = bo->tbo.base.resv;
/linux-master/drivers/gpu/drm/vmwgfx/
H A Dvmwgfx_bo.h58 struct dma_resv *resv;
/linux-master/drivers/gpu/drm/xe/
H A Dxe_migrate.c619 static int job_add_deps(struct xe_sched_job *job, struct dma_resv *resv,
H A Dxe_vm.h256 static inline struct dma_resv *xe_vm_resv(struct xe_vm *vm)
H A Dxe_bo.c561 * to the buffer object's dma_resv object, that signals when access is
562 * stopped. The caller will wait on all dma_resv fences before
1204 struct xe_tile *tile, struct dma_resv *resv,
1779 * @vm: Pointer to a the vm the bo shares a locked dma_resv object with, or
2040 * xe_bo_lock() - Lock the buffer object's dma_resv object
2044 * Locks the buffer object's dma_resv object. If the buffer object is
2045 * pointing to a shared dma_resv object, that shared lock is locked.
2062 * xe_bo_unlock() - Unlock the buffer object's dma_resv object
H A Dxe_bo.h99 struct xe_tile *tile, struct dma_resv *resv,
285 * memory allocation in the dma_resv individualization, it's not ok
/linux-master/drivers/gpu/drm/nouveau/
H A Dnouveau_bo.c153 * dma_resv from a root GEM object.
349 struct sg_table *sg, struct dma_resv *robj)
379 struct sg_table *sg, struct dma_resv *robj,
1380 struct dma_resv *resv = nvbo->bo.base.resv;
H A Dnouveau_bo.h29 /* Root GEM object we derive the dma_resv of in case this BO is not
83 struct sg_table *sg, struct dma_resv *robj);
86 struct dma_resv *robj,
/linux-master/include/drm/
H A Ddrm_gem.h382 struct dma_resv *resv;
391 struct dma_resv _resv;
398 * Drivers should lock list accesses with the GEMs &dma_resv lock
/linux-master/drivers/gpu/drm/ttm/
H A Dttm_bo.c215 struct dma_resv *resv = &bo->base._resv;
245 struct dma_resv *resv = &bo->base._resv;
302 * Block for the dma_resv object to become idle, lock the buffer and clean up
952 * @resv: Pointer to a dma_resv, or NULL to let ttm allocate one.
978 struct sg_table *sg, struct dma_resv *resv,
1047 * @resv: Pointer to a dma_resv, or NULL to let ttm allocate one.
1071 struct sg_table *sg, struct dma_resv *resv,
/linux-master/drivers/gpu/drm/ttm/tests/
H A Dttm_bo_test.c129 * dma_resv lock of bo2 so the other context is "wounded" and has to back off
416 struct dma_resv *external_resv;
/linux-master/drivers/gpu/drm/scheduler/
H A Dsched_main.c828 * &drm_sched_job.s_fence of @job, so that it can be attached to struct dma_resv
930 * @resv: the dma_resv object to get the fences from
940 struct dma_resv *resv,
/linux-master/drivers/gpu/drm/radeon/
H A Dradeon_object.c131 struct dma_resv *resv,
787 struct dma_resv *resv = bo->tbo.base.resv;
H A Dradeon.h582 struct dma_resv *resv,
1908 struct dma_resv *resv);
1914 struct dma_resv *resv);
1921 struct dma_resv *resv);
H A Dr600.c2966 struct dma_resv *resv)
H A Dr100.c904 struct dma_resv *resv)
H A Dcik.c3648 struct dma_resv *resv)
/linux-master/drivers/gpu/drm/loongson/
H A Dlsdc_ttm.c438 struct dma_resv *resv)
/linux-master/drivers/gpu/drm/i915/display/
H A Dintel_atomic_plane.c1018 static int add_dma_resv_fences(struct dma_resv *resv,
/linux-master/drivers/gpu/drm/etnaviv/
H A Detnaviv_gem.c434 struct dma_resv *robj = obj->resv;

Completed in 501 milliseconds

1234