Searched refs:resv (Results 1 - 25 of 267) sorted by last modified time

1234567891011

/linux-master/mm/
H A Dhugetlb.c438 get_file_region_entry_from_cache(struct resv_map *resv, long from, long to) argument
442 VM_BUG_ON(resv->region_cache_count <= 0);
444 resv->region_cache_count--;
445 nrg = list_first_entry(&resv->region_cache, struct file_region, link);
468 struct resv_map *resv,
487 if (!resv->pages_per_hpage)
488 resv->pages_per_hpage = pages_per_huge_page(h);
492 VM_BUG_ON(resv->pages_per_hpage != pages_per_huge_page(h));
520 static void coalesce_file_region(struct resv_map *resv, struct file_region *rg) argument
525 if (&prg->link != &resv
466 record_hugetlb_cgroup_uncharge_info(struct hugetlb_cgroup *h_cg, struct hstate *h, struct resv_map *resv, struct file_region *nrg) argument
573 add_reservation_in_range(struct resv_map *resv, long f, long t, struct hugetlb_cgroup *h_cg, struct hstate *h, long *regions_needed) argument
706 region_add(struct resv_map *resv, long f, long t, long in_regions_needed, struct hstate *h, struct hugetlb_cgroup *h_cg) argument
773 region_chg(struct resv_map *resv, long f, long t, long *out_regions_needed) argument
809 region_abort(struct resv_map *resv, long f, long t, long regions_needed) argument
832 region_del(struct resv_map *resv, long f, long t) argument
959 region_count(struct resv_map *resv, long f, long t) argument
2806 struct resv_map *resv; local
5175 struct resv_map *resv = vma_resv_map(vma); local
5214 struct resv_map *resv; local
[all...]
/linux-master/drivers/gpu/drm/
H A Ddrm_gem_atomic_helper.c3 #include <linux/dma-resv.h>
122 * This function extracts the exclusive fence from &drm_gem_object.resv and
170 ret = dma_resv_get_singleton(obj->resv, usage, &new);
/linux-master/drivers/gpu/drm/amd/amdkfd/
H A Dkfd_svm.c576 bp.resv = NULL;
606 r = dma_resv_reserve_fences(bo->tbo.base.resv, 1);
/linux-master/drivers/gpu/drm/amd/amdgpu/
H A Damdgpu_object.c266 bp.resv = NULL;
545 .resv = bp->resv
617 bp->resv, bp->destroy);
632 r = amdgpu_fill_buffer(bo, 0, bo->tbo.base.resv, &fence, true);
636 dma_resv_add_fence(bo->tbo.base.resv, fence,
640 if (!bp->resv)
653 if (!bp->resv)
654 dma_resv_unlock(bo->tbo.base.resv);
784 r = dma_resv_wait_timeout(bo->tbo.base.resv, DMA_RESV_USAGE_KERNE
1436 struct dma_resv *resv = bo->tbo.base.resv; local
1464 amdgpu_bo_sync_wait_resv(struct amdgpu_device *adev, struct dma_resv *resv, enum amdgpu_sync_mode sync_mode, void *owner, bool intr) argument
[all...]
H A Damdgpu_mes.c1279 r = dma_resv_get_singleton(bo->tbo.base.resv, DMA_RESV_USAGE_BOOKKEEP,
H A Damdgpu_amdkfd_gpuvm.c330 ttm_bo_type_sg, mem->bo->tbo.base.resv, &gem_obj, 0);
351 * NOTE: Must be called with BO reserved i.e. bo->tbo.resv->lock held.
365 dma_resv_replace_fences(bo->tbo.base.resv, ef->base.context,
399 BUG_ON(!dma_resv_trylock(bo->tbo.base.resv));
401 dma_resv_unlock(bo->tbo.base.resv);
442 ret = dma_resv_reserve_fences(bo->tbo.base.resv, 1);
446 dma_resv_add_fence(bo->tbo.base.resv, fence,
1341 ret = amdgpu_sync_resv(NULL, sync, pd->tbo.base.resv,
1417 ret = dma_resv_reserve_fences(vm->root.bo->tbo.base.resv, 1);
1420 dma_resv_add_fence(vm->root.bo->tbo.base.resv,
[all...]
H A Damdgpu_vm.c336 if (bo->tbo.base.resv != vm->root.bo->tbo.base.resv)
339 dma_resv_assert_held(vm->root.bo->tbo.base.resv);
515 if (dma_resv_locking_ctx(bo->tbo.base.resv) != ticket) {
897 * @resv: fences we need to sync to
914 struct dma_resv *resv, uint64_t start, uint64_t last,
975 r = vm->update_funcs->prepare(&params, resv, sync_mode);
1070 if (bo->tbo.base.resv != vm->root.bo->tbo.base.resv &&
1071 !dma_resv_trylock(bo->tbo.base.resv))
912 amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm, bool immediate, bool unlocked, bool flush_tlb, bool allow_override, struct dma_resv *resv, uint64_t start, uint64_t last, uint64_t flags, uint64_t offset, uint64_t vram_base, struct ttm_resource *res, dma_addr_t *pages_addr, struct dma_fence **fence) argument
1128 struct dma_resv *resv; local
1353 struct dma_resv *resv = vm->root.bo->tbo.base.resv; local
1383 struct dma_resv *resv = vm->root.bo->tbo.base.resv; local
1435 struct dma_resv *resv; local
[all...]
H A Damdgpu_ttm.h152 struct dma_resv *resv,
159 struct dma_resv *resv,
163 struct dma_resv *resv,
H A Damdgpu_ttm.c279 * @resv: resv object to sync to
291 struct dma_resv *resv,
327 resv, &next, false, true, tmz);
372 bo->base.resv, &fence);
1396 dma_resv_for_each_fence(&resv_cursor, bo->base.resv,
2112 struct dma_resv *resv,
2135 if (!resv)
2138 return drm_sched_job_add_resv_dependencies(&(*job)->base, resv, argument
2144 struct dma_resv *resv,
287 amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev, const struct amdgpu_copy_mem *src, const struct amdgpu_copy_mem *dst, uint64_t size, bool tmz, struct dma_resv *resv, struct dma_fence **f) argument
2105 amdgpu_ttm_prepare_job(struct amdgpu_device *adev, bool direct_submit, unsigned int num_dw, struct dma_resv *resv, bool vm_needs_flush, struct amdgpu_job **job, bool delayed) argument
2192 amdgpu_ttm_fill_mem(struct amdgpu_ring *ring, uint32_t src_data, uint64_t dst_addr, uint32_t byte_count, struct dma_resv *resv, struct dma_fence **fence, bool vm_needs_flush, bool delayed) argument
2229 amdgpu_fill_buffer(struct amdgpu_bo *bo, uint32_t src_data, struct dma_resv *resv, struct dma_fence **f, bool delayed) argument
[all...]
H A Damdgpu_object.h57 struct dma_resv *resv; member in struct:amdgpu_bo_param
338 int amdgpu_bo_sync_wait_resv(struct amdgpu_device *adev, struct dma_resv *resv,
H A Damdgpu_cs.c785 .resv = bo->tbo.base.resv
1196 struct dma_resv *resv = bo->tbo.base.resv; local
1201 r = amdgpu_sync_resv(p->adev, &p->sync, resv, sync_mode,
1319 dma_resv_add_fence(gobj->resv,
1325 dma_resv_add_fence(gobj->resv, p->fence, DMA_RESV_USAGE_WRITE);
1778 if (dma_resv_locking_ctx((*bo)->tbo.base.resv) != &parser->exec.ticket)
/linux-master/drivers/usb/gadget/function/
H A Df_fs.c20 #include <linux/dma-resv.h>
1312 dma_resv_lock(dmabuf->resv, NULL);
1314 dma_resv_unlock(dmabuf->resv);
1390 * the resv object, which would deadlock.
1431 return dma_resv_lock_interruptible(dmabuf->resv, NULL);
1433 if (!dma_resv_trylock(dmabuf->resv))
1502 dma_resv_unlock(dmabuf->resv);
1624 retl = dma_resv_wait_timeout(dmabuf->resv,
1634 ret = dma_resv_reserve_fences(dmabuf->resv, 1);
1672 dma_resv_add_fence(dmabuf->resv,
[all...]
/linux-master/drivers/gpu/drm/xe/
H A Dxe_vm.c288 err = dma_resv_reserve_fences(bo->ttm.base.resv, vm->preempt.num_exec_queues);
294 dma_resv_add_fence(bo->ttm.base.resv,
518 ret = dma_resv_reserve_fences(obj->resv, num_fences);
1043 * @vma: The vma for witch we want to lock the vm resv and any attached
1044 * object's resv.
1546 * All vm operations will add shared fences to resv.
1549 * install a fence to resv. Hence it's safe to
3439 dma_resv_lock(bo->ttm.base.resv, NULL);
3448 dma_resv_unlock(bo->ttm.base.resv);
H A Dxe_migrate.c619 static int job_add_deps(struct xe_sched_job *job, struct dma_resv *resv, argument
622 return drm_sched_job_add_resv_dependencies(&job->drm, resv, usage);
822 err = job_add_deps(job, src_bo->ttm.base.resv,
825 err = job_add_deps(job, dst_bo->ttm.base.resv,
1068 err = job_add_deps(job, bo->ttm.base.resv,
1185 if (bo && !dma_resv_test_signaled(bo->ttm.base.resv,
1245 * @bo: The bo whose dma-resv we will await before updating, or NULL if userptr.
1397 err = job_add_deps(job, bo->ttm.base.resv,
/linux-master/drivers/gpu/drm/vmwgfx/
H A Dvmwgfx_gem.c166 .resv = attach->dmabuf->resv,
171 dma_resv_lock(params.resv, NULL);
181 dma_resv_unlock(params.resv);
H A Dvmwgfx_bo.h58 struct dma_resv *resv; member in struct:vmw_bo_params
H A Dvmwgfx_bo.c288 dma_resv_assert_held(bo->base.resv);
382 .resv = params->resv,
400 params->sg, params->resv, destroy);
460 lret = dma_resv_wait_timeout(bo->base.resv, DMA_RESV_USAGE_READ,
647 ret = dma_resv_reserve_fences(bo->base.resv, 1);
649 dma_resv_add_fence(bo->base.resv, &fence->base,
H A Dvmwgfx_blit.c466 dma_resv_assert_held(dst->base.resv);
468 dma_resv_assert_held(src->base.resv);
/linux-master/io_uring/
H A Dio_uring.c4052 for (i = 0; i < ARRAY_SIZE(p.resv); i++) {
4053 if (p.resv[i])
4160 BUILD_BUG_ON(offsetof(struct io_uring_buf, resv) !=
/linux-master/drivers/iommu/
H A Dmtk_iommu.c988 const struct mtk_iommu_iova_region *resv, *curdom; local
996 resv = data->plat_data->iova_region + i;
999 if (resv->iova_base <= curdom->iova_base ||
1000 resv->iova_base + resv->size >= curdom->iova_base + curdom->size)
1003 region = iommu_alloc_resv_region(resv->iova_base, resv->size,
/linux-master/drivers/iommu/intel/
H A Diommu.c4374 struct iommu_resv_region *resv; local
4387 resv = iommu_alloc_resv_region(rmrr->base_address,
4390 if (!resv)
4393 list_add_tail(&resv->list, head);
/linux-master/drivers/iommu/amd/
H A Dinit.c146 u8 resv[6]; member in struct:ivmd_header
/linux-master/drivers/gpu/drm/qxl/
H A Dqxl_release.c243 ret = dma_resv_reserve_fences(bo->tbo.base.resv, 1);
472 dma_resv_add_fence(bo->base.resv, &release->base,
475 dma_resv_unlock(bo->base.resv);
/linux-master/drivers/gpu/drm/panfrost/
H A Dpanfrost_mmu.c473 dma_resv_lock(obj->resv, NULL);
539 dma_resv_unlock(obj->resv);
548 dma_resv_unlock(obj->resv);
/linux-master/drivers/gpu/drm/amd/display/amdgpu_dm/
H A Damdgpu_dm_wb.c109 r = dma_resv_reserve_fences(rbo->tbo.base.resv, 1);

Completed in 372 milliseconds

1234567891011