Searched refs:iova (Results 76 - 100 of 257) sorted by relevance

1234567891011

/linux-master/drivers/infiniband/core/
H A Duverbs_std_types_mr.c181 &mr->iova, sizeof(mr->iova));
195 u64 offset, length, iova; local
213 ret = uverbs_copy_from(&iova, attrs,
218 if ((offset & ~PAGE_MASK) != (iova & ~PAGE_MASK))
240 mr = pd->device->ops.reg_user_mr_dmabuf(pd, offset, length, iova, fd,
/linux-master/drivers/gpu/drm/etnaviv/
H A Detnaviv_iommu_v2.c93 unsigned long iova, phys_addr_t paddr,
109 mtlb_entry = (iova & MMUv2_MTLB_MASK) >> MMUv2_MTLB_SHIFT;
110 stlb_entry = (iova & MMUv2_STLB_MASK) >> MMUv2_STLB_SHIFT;
122 unsigned long iova, size_t size)
130 mtlb_entry = (iova & MMUv2_MTLB_MASK) >> MMUv2_MTLB_SHIFT;
131 stlb_entry = (iova & MMUv2_STLB_MASK) >> MMUv2_STLB_SHIFT;
92 etnaviv_iommuv2_map(struct etnaviv_iommu_context *context, unsigned long iova, phys_addr_t paddr, size_t size, int prot) argument
121 etnaviv_iommuv2_unmap(struct etnaviv_iommu_context *context, unsigned long iova, size_t size) argument
H A Detnaviv_mmu.h25 int (*map)(struct etnaviv_iommu_context *context, unsigned long iova,
27 size_t (*unmap)(struct etnaviv_iommu_context *context, unsigned long iova,
/linux-master/drivers/iommu/iommufd/
H A Diommufd_private.h60 int iopt_get_pages(struct io_pagetable *iopt, unsigned long iova,
67 unsigned long *iova, void __user *uptr,
73 int iopt_unmap_iova(struct io_pagetable *iopt, unsigned long iova,
84 void iommufd_access_notify_unmap(struct io_pagetable *iopt, unsigned long iova,
434 unsigned int ioas_id, u64 *iova, u32 *flags);
442 u64 *iova, u32 *flags)
440 iommufd_test_syz_conv_iova_id(struct iommufd_ucmd *ucmd, unsigned int ioas_id, u64 *iova, u32 *flags) argument
/linux-master/drivers/vfio/
H A Dvfio.h248 dma_addr_t iova, int npage,
251 dma_addr_t iova, int npage);
253 dma_addr_t iova, void *data,
293 dma_addr_t iova, int npage,
300 dma_addr_t iova, int npage)
305 dma_addr_t iova, void *data,
292 vfio_device_container_pin_pages(struct vfio_device *device, dma_addr_t iova, int npage, int prot, struct page **pages) argument
299 vfio_device_container_unpin_pages(struct vfio_device *device, dma_addr_t iova, int npage) argument
304 vfio_device_container_dma_rw(struct vfio_device *device, dma_addr_t iova, void *data, size_t len, bool write) argument
H A Dvfio_main.c1048 if (!IS_ALIGNED(range.iova, control.page_size) ||
1054 if (check_add_overflow(range.iova, range.length, &iova_end) ||
1060 nodes[i].start = range.iova;
1061 nodes[i].last = range.iova + range.length - 1;
1105 unsigned long iova, size_t length,
1110 return device->log_ops->log_read_and_clear(device, iova, length, iter);
1141 if (check_add_overflow(report.iova, report.length, &iova_end) ||
1145 iter = iova_bitmap_alloc(report.iova, report.length,
1535 * @iova [in] : starting IOVA of user pages to be pinned.
1545 int vfio_pin_pages(struct vfio_device *device, dma_addr_t iova, argument
1104 vfio_device_log_read_and_clear(struct iova_bitmap *iter, unsigned long iova, size_t length, void *opaque) argument
1586 vfio_unpin_pages(struct vfio_device *device, dma_addr_t iova, int npage) argument
1625 vfio_dma_rw(struct vfio_device *device, dma_addr_t iova, void *data, size_t len, bool write) argument
[all...]
/linux-master/include/linux/soc/qcom/
H A Dgeni-se.h503 void geni_se_tx_init_dma(struct geni_se *se, dma_addr_t iova, size_t len);
506 dma_addr_t *iova);
508 void geni_se_rx_init_dma(struct geni_se *se, dma_addr_t iova, size_t len);
511 dma_addr_t *iova);
513 void geni_se_tx_dma_unprep(struct geni_se *se, dma_addr_t iova, size_t len);
515 void geni_se_rx_dma_unprep(struct geni_se *se, dma_addr_t iova, size_t len);
/linux-master/drivers/gpu/drm/msm/
H A Dmsm_fb.c27 uint64_t iova[DRM_FORMAT_MAX_PLANES]; member in struct:msm_framebuffer
91 ret = msm_gem_get_and_pin_iova(fb->obj[i], aspace, &msm_fb->iova[i]);
92 drm_dbg_state(fb->dev, "FB[%u]: iova[%d]: %08llx (%d)\n",
93 fb->base.id, i, msm_fb->iova[i], ret);
115 memset(msm_fb->iova, 0, sizeof(msm_fb->iova));
122 return msm_fb->iova[plane] + fb->offsets[plane];
H A Dmsm_gem.c409 * iova range) in addition to removing the iommu mapping. In the eviction
410 * case (!close), we keep the iova allocated, but only remove the iommu
468 GEM_WARN_ON(vma->iova < range_start);
469 GEM_WARN_ON((vma->iova + obj->size) > range_end);
535 struct msm_gem_address_space *aspace, uint64_t *iova,
549 *iova = vma->iova;
557 * get iova and pin it. Should have a matching put
558 * limits iova to specified range (in pages)
561 struct msm_gem_address_space *aspace, uint64_t *iova,
534 get_and_pin_iova_range_locked(struct drm_gem_object *obj, struct msm_gem_address_space *aspace, uint64_t *iova, u64 range_start, u64 range_end) argument
560 msm_gem_get_and_pin_iova_range(struct drm_gem_object *obj, struct msm_gem_address_space *aspace, uint64_t *iova, u64 range_start, u64 range_end) argument
574 msm_gem_get_and_pin_iova(struct drm_gem_object *obj, struct msm_gem_address_space *aspace, uint64_t *iova) argument
584 msm_gem_get_iova(struct drm_gem_object *obj, struct msm_gem_address_space *aspace, uint64_t *iova) argument
624 msm_gem_set_iova(struct drm_gem_object *obj, struct msm_gem_address_space *aspace, uint64_t iova) argument
1314 msm_gem_kernel_new(struct drm_device *dev, uint32_t size, uint32_t flags, struct msm_gem_address_space *aspace, struct drm_gem_object **bo, uint64_t *iova) argument
[all...]
H A Dmsm_ringbuffer.h68 uint64_t iova; member in struct:msm_ringbuffer
H A Dmsm_gem_submit.c319 submit->bos[i].iova = vma->iova;
371 struct drm_gem_object **obj, uint64_t *iova)
381 if (iova)
382 *iova = submit->bos[idx].iova;
414 uint64_t iova; local
433 ret = submit_bo(submit, submit_reloc.reloc_idx, NULL, &iova);
437 iova += submit_reloc.reloc_offset;
440 iova >>
370 submit_bo(struct msm_gem_submit *submit, uint32_t idx, struct drm_gem_object **obj, uint64_t *iova) argument
783 uint64_t iova; local
[all...]
/linux-master/drivers/net/ipa/
H A Dipa_mem.c456 unsigned long iova; local
472 iova = phys; /* We just want a direct mapping */
474 ret = iommu_map(domain, iova, phys, size, IOMMU_READ | IOMMU_WRITE,
479 ipa->imem_iova = iova;
530 unsigned long iova; local
580 iova = phys; /* We just want a direct mapping */
582 ret = iommu_map(domain, iova, phys, size, IOMMU_READ | IOMMU_WRITE,
587 ipa->smem_iova = iova;
/linux-master/drivers/net/ethernet/marvell/octeontx2/nic/
H A Dotx2_txrx.c195 u64 iova, int len, struct nix_rx_parse_s *parse,
202 va = phys_to_virt(otx2_iova_to_phys(pfvf->iommu_domain, iova));
227 pfvf->hw_ops->aura_freeptr(pfvf, qidx, iova & ~0x07ULL);
621 u64 dma_addr, *iova = NULL; local
634 iova = (void *)sg + sizeof(*sg);
649 *iova++ = dma_addr;
731 int alg, u64 iova, int ptp_offset,
740 mem->addr = iova;
857 u64 *iova = NULL; local
868 iova
194 otx2_skb_add_frag(struct otx2_nic *pfvf, struct sk_buff *skb, u64 iova, int len, struct nix_rx_parse_s *parse, int qidx) argument
730 otx2_sqe_add_mem(struct otx2_snd_queue *sq, int *offset, int alg, u64 iova, int ptp_offset, u64 base_ns, bool udp_csum_crt) argument
1091 u64 iova; local
1219 u64 iova; local
1354 u64 *iova = NULL; local
1370 otx2_xdp_sq_append_pkt(struct otx2_nic *pfvf, u64 iova, int len, u16 qidx) argument
1413 u64 iova, pa; local
[all...]
/linux-master/drivers/vhost/
H A Dvdpa.c984 u64 iova, u64 size, u64 pa, u32 perm, void *opaque)
992 r = vhost_iotlb_add_range_ctx(iotlb, iova, iova + size - 1,
998 r = ops->dma_map(vdpa, asid, iova, size, pa, perm, opaque);
1003 r = iommu_map(v->domain, iova, pa, size,
1008 vhost_iotlb_del_range(iotlb, iova, iova + size - 1);
1020 u64 iova, u64 size)
1026 vhost_vdpa_iotlb_unmap(v, iotlb, iova, iova
983 vhost_vdpa_map(struct vhost_vdpa *v, struct vhost_iotlb *iotlb, u64 iova, u64 size, u64 pa, u32 perm, void *opaque) argument
1018 vhost_vdpa_unmap(struct vhost_vdpa *v, struct vhost_iotlb *iotlb, u64 iova, u64 size) argument
1035 vhost_vdpa_va_map(struct vhost_vdpa *v, struct vhost_iotlb *iotlb, u64 iova, u64 size, u64 uaddr, u32 perm) argument
1086 vhost_vdpa_pa_map(struct vhost_vdpa *v, struct vhost_iotlb *iotlb, u64 iova, u64 size, u64 uaddr, u32 perm) argument
[all...]
/linux-master/drivers/iommu/arm/arm-smmu/
H A Darm-smmu.c278 static void arm_smmu_tlb_inv_range_s1(unsigned long iova, size_t size, argument
290 iova = (iova >> 12) << 12;
291 iova |= cfg->asid;
293 arm_smmu_cb_write(smmu, idx, reg, iova);
294 iova += granule;
297 iova >>= 12;
298 iova |= (u64)cfg->asid << 48;
300 arm_smmu_cb_writeq(smmu, idx, reg, iova);
301 iova
306 arm_smmu_tlb_inv_range_s2(unsigned long iova, size_t size, size_t granule, void *cookie, int reg) argument
326 arm_smmu_tlb_inv_walk_s1(unsigned long iova, size_t size, size_t granule, void *cookie) argument
341 arm_smmu_tlb_add_page_s1(struct iommu_iotlb_gather *gather, unsigned long iova, size_t granule, void *cookie) argument
349 arm_smmu_tlb_inv_walk_s2(unsigned long iova, size_t size, size_t granule, void *cookie) argument
357 arm_smmu_tlb_add_page_s2(struct iommu_iotlb_gather *gather, unsigned long iova, size_t granule, void *cookie) argument
365 arm_smmu_tlb_inv_walk_s2_v1(unsigned long iova, size_t size, size_t granule, void *cookie) argument
377 arm_smmu_tlb_add_page_s2_v1(struct iommu_iotlb_gather *gather, unsigned long iova, size_t granule, void *cookie) argument
411 unsigned long iova; local
1216 arm_smmu_map_pages(struct iommu_domain *domain, unsigned long iova, phys_addr_t paddr, size_t pgsize, size_t pgcount, int prot, gfp_t gfp, size_t *mapped) argument
1234 arm_smmu_unmap_pages(struct iommu_domain *domain, unsigned long iova, size_t pgsize, size_t pgcount, struct iommu_iotlb_gather *iotlb_gather) argument
1282 arm_smmu_iova_to_phys_hard(struct iommu_domain *domain, dma_addr_t iova) argument
1334 arm_smmu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova) argument
[all...]
/linux-master/drivers/gpu/drm/tegra/
H A Dgem.h40 dma_addr_t iova; member in struct:tegra_bo
H A Duapi.h33 dma_addr_t iova; member in struct:tegra_drm_mapping
H A Dplane.h47 dma_addr_t iova[3]; member in struct:tegra_plane_state
H A Dnvdec.c229 nvdec->falcon.firmware.iova);
233 nvdec->falcon.firmware.iova);
248 dma_addr_t iova; local
263 virt = dma_alloc_coherent(nvdec->dev, size, &iova, GFP_KERNEL);
265 err = dma_mapping_error(nvdec->dev, iova);
269 virt = tegra_drm_alloc(tegra, size, &iova);
275 nvdec->falcon.firmware.iova = iova;
302 dma_free_coherent(nvdec->dev, size, virt, iova);
304 tegra_drm_free(tegra, size, virt, iova);
[all...]
/linux-master/drivers/gpu/drm/msm/adreno/
H A Da5xx_preempt.c209 a5xx_gpu->preempt[i]->rbase = gpu->rb[i]->iova;
228 u64 iova = 0, counters_iova = 0; local
232 MSM_BO_WC | MSM_BO_MAP_PRIV, gpu->aspace, &bo, &iova);
251 a5xx_gpu->preempt_iova[ring->id] = iova;
/linux-master/include/linux/soc/apple/
H A Drtkit.h23 * @iova: Device VA of shared memory buffer.
32 dma_addr_t iova; member in struct:apple_rtkit_shmem
/linux-master/drivers/gpu/drm/msm/dsi/
H A Ddsi_cfg.h57 int (*dma_base_get)(struct msm_dsi_host *msm_host, uint64_t *iova);
/linux-master/drivers/iommu/
H A Dof_iommu.c248 phys_addr_t iova; local
254 maps = of_translate_dma_region(np, maps, &iova, &length);
259 type = iommu_resv_region_get_type(dev, &phys, iova, length);
261 region = iommu_alloc_resv_region(iova, length, prot, type,
H A Diommu.c2362 phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova) argument
2365 return iova;
2370 return domain->ops->iova_to_phys(domain, iova);
2374 static size_t iommu_pgsize(struct iommu_domain *domain, unsigned long iova, argument
2380 unsigned long addr_merge = paddr | iova;
2410 if ((iova ^ paddr) & (pgsize_next - 1))
2428 static int __iommu_map(struct iommu_domain *domain, unsigned long iova, argument
2432 unsigned long orig_iova = iova;
2452 if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) {
2453 pr_err("unaligned: iova
2491 iommu_map(struct iommu_domain *domain, unsigned long iova, phys_addr_t paddr, size_t size, int prot, gfp_t gfp) argument
2521 __iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size, struct iommu_iotlb_gather *iotlb_gather) argument
2575 iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size) argument
2589 iommu_unmap_fast(struct iommu_domain *domain, unsigned long iova, size_t size, struct iommu_iotlb_gather *iotlb_gather) argument
2597 iommu_map_sg(struct iommu_domain *domain, unsigned long iova, struct scatterlist *sg, unsigned int nents, int prot, gfp_t gfp) argument
2682 report_iommu_fault(struct iommu_domain *domain, struct device *dev, unsigned long iova, int flags) argument
[all...]
/linux-master/drivers/infiniband/hw/mana/
H A Dmr.c98 u64 iova, int access_flags,
112 "start 0x%llx, iova 0x%llx length 0x%llx access_flags 0x%x",
113 start, iova, length, access_flags);
130 err = mana_ib_create_dma_region(dev, mr->umem, &dma_region_handle, iova);
144 mr_params.gva.virtual_address = iova;
97 mana_ib_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 length, u64 iova, int access_flags, struct ib_udata *udata) argument

Completed in 241 milliseconds

1234567891011