Searched refs:iova (Results 76 - 100 of 249) sorted by relevance

12345678910

/linux-master/drivers/gpu/drm/msm/
H A Dmsm_fb.c27 uint64_t iova[DRM_FORMAT_MAX_PLANES]; member in struct:msm_framebuffer
91 ret = msm_gem_get_and_pin_iova(fb->obj[i], aspace, &msm_fb->iova[i]);
92 drm_dbg_state(fb->dev, "FB[%u]: iova[%d]: %08llx (%d)\n",
93 fb->base.id, i, msm_fb->iova[i], ret);
115 memset(msm_fb->iova, 0, sizeof(msm_fb->iova));
122 return msm_fb->iova[plane] + fb->offsets[plane];
H A Dmsm_gem.c409 * iova range) in addition to removing the iommu mapping. In the eviction
410 * case (!close), we keep the iova allocated, but only remove the iommu
468 GEM_WARN_ON(vma->iova < range_start);
469 GEM_WARN_ON((vma->iova + obj->size) > range_end);
535 struct msm_gem_address_space *aspace, uint64_t *iova,
549 *iova = vma->iova;
557 * get iova and pin it. Should have a matching put
558 * limits iova to specified range (in pages)
561 struct msm_gem_address_space *aspace, uint64_t *iova,
534 get_and_pin_iova_range_locked(struct drm_gem_object *obj, struct msm_gem_address_space *aspace, uint64_t *iova, u64 range_start, u64 range_end) argument
560 msm_gem_get_and_pin_iova_range(struct drm_gem_object *obj, struct msm_gem_address_space *aspace, uint64_t *iova, u64 range_start, u64 range_end) argument
574 msm_gem_get_and_pin_iova(struct drm_gem_object *obj, struct msm_gem_address_space *aspace, uint64_t *iova) argument
584 msm_gem_get_iova(struct drm_gem_object *obj, struct msm_gem_address_space *aspace, uint64_t *iova) argument
624 msm_gem_set_iova(struct drm_gem_object *obj, struct msm_gem_address_space *aspace, uint64_t iova) argument
1314 msm_gem_kernel_new(struct drm_device *dev, uint32_t size, uint32_t flags, struct msm_gem_address_space *aspace, struct drm_gem_object **bo, uint64_t *iova) argument
[all...]
H A Dmsm_ringbuffer.h68 uint64_t iova; member in struct:msm_ringbuffer
H A Dmsm_gem_submit.c319 submit->bos[i].iova = vma->iova;
371 struct drm_gem_object **obj, uint64_t *iova)
381 if (iova)
382 *iova = submit->bos[idx].iova;
414 uint64_t iova; local
433 ret = submit_bo(submit, submit_reloc.reloc_idx, NULL, &iova);
437 iova += submit_reloc.reloc_offset;
440 iova >>
370 submit_bo(struct msm_gem_submit *submit, uint32_t idx, struct drm_gem_object **obj, uint64_t *iova) argument
783 uint64_t iova; local
[all...]
/linux-master/drivers/gpu/drm/etnaviv/
H A Detnaviv_mmu.h25 int (*map)(struct etnaviv_iommu_context *context, unsigned long iova,
27 size_t (*unmap)(struct etnaviv_iommu_context *context, unsigned long iova,
/linux-master/drivers/net/ipa/
H A Dipa_mem.c457 unsigned long iova; local
473 iova = phys; /* We just want a direct mapping */
475 ret = iommu_map(domain, iova, phys, size, IOMMU_READ | IOMMU_WRITE,
480 ipa->imem_iova = iova;
531 unsigned long iova; local
581 iova = phys; /* We just want a direct mapping */
583 ret = iommu_map(domain, iova, phys, size, IOMMU_READ | IOMMU_WRITE,
588 ipa->smem_iova = iova;
/linux-master/drivers/net/ethernet/marvell/octeontx2/nic/
H A Dotx2_txrx.c195 u64 iova, int len, struct nix_rx_parse_s *parse,
202 va = phys_to_virt(otx2_iova_to_phys(pfvf->iommu_domain, iova));
227 pfvf->hw_ops->aura_freeptr(pfvf, qidx, iova & ~0x07ULL);
618 u64 dma_addr, *iova = NULL; local
631 iova = (void *)sg + sizeof(*sg);
646 *iova++ = dma_addr;
728 int alg, u64 iova, int ptp_offset,
737 mem->addr = iova;
854 u64 *iova = NULL; local
865 iova
194 otx2_skb_add_frag(struct otx2_nic *pfvf, struct sk_buff *skb, u64 iova, int len, struct nix_rx_parse_s *parse, int qidx) argument
727 otx2_sqe_add_mem(struct otx2_snd_queue *sq, int *offset, int alg, u64 iova, int ptp_offset, u64 base_ns, bool udp_csum_crt) argument
1088 u64 iova; local
1216 u64 iova; local
1351 u64 *iova = NULL; local
1367 otx2_xdp_sq_append_pkt(struct otx2_nic *pfvf, u64 iova, int len, u16 qidx) argument
1410 u64 iova, pa; local
[all...]
/linux-master/drivers/vhost/
H A Dvdpa.c984 u64 iova, u64 size, u64 pa, u32 perm, void *opaque)
992 r = vhost_iotlb_add_range_ctx(iotlb, iova, iova + size - 1,
998 r = ops->dma_map(vdpa, asid, iova, size, pa, perm, opaque);
1003 r = iommu_map(v->domain, iova, pa, size,
1008 vhost_iotlb_del_range(iotlb, iova, iova + size - 1);
1020 u64 iova, u64 size)
1026 vhost_vdpa_iotlb_unmap(v, iotlb, iova, iova
983 vhost_vdpa_map(struct vhost_vdpa *v, struct vhost_iotlb *iotlb, u64 iova, u64 size, u64 pa, u32 perm, void *opaque) argument
1018 vhost_vdpa_unmap(struct vhost_vdpa *v, struct vhost_iotlb *iotlb, u64 iova, u64 size) argument
1035 vhost_vdpa_va_map(struct vhost_vdpa *v, struct vhost_iotlb *iotlb, u64 iova, u64 size, u64 uaddr, u32 perm) argument
1086 vhost_vdpa_pa_map(struct vhost_vdpa *v, struct vhost_iotlb *iotlb, u64 iova, u64 size, u64 uaddr, u32 perm) argument
[all...]
/linux-master/drivers/iommu/arm/arm-smmu/
H A Darm-smmu.c278 static void arm_smmu_tlb_inv_range_s1(unsigned long iova, size_t size, argument
290 iova = (iova >> 12) << 12;
291 iova |= cfg->asid;
293 arm_smmu_cb_write(smmu, idx, reg, iova);
294 iova += granule;
297 iova >>= 12;
298 iova |= (u64)cfg->asid << 48;
300 arm_smmu_cb_writeq(smmu, idx, reg, iova);
301 iova
306 arm_smmu_tlb_inv_range_s2(unsigned long iova, size_t size, size_t granule, void *cookie, int reg) argument
326 arm_smmu_tlb_inv_walk_s1(unsigned long iova, size_t size, size_t granule, void *cookie) argument
341 arm_smmu_tlb_add_page_s1(struct iommu_iotlb_gather *gather, unsigned long iova, size_t granule, void *cookie) argument
349 arm_smmu_tlb_inv_walk_s2(unsigned long iova, size_t size, size_t granule, void *cookie) argument
357 arm_smmu_tlb_add_page_s2(struct iommu_iotlb_gather *gather, unsigned long iova, size_t granule, void *cookie) argument
365 arm_smmu_tlb_inv_walk_s2_v1(unsigned long iova, size_t size, size_t granule, void *cookie) argument
377 arm_smmu_tlb_add_page_s2_v1(struct iommu_iotlb_gather *gather, unsigned long iova, size_t granule, void *cookie) argument
411 unsigned long iova; local
1212 arm_smmu_map_pages(struct iommu_domain *domain, unsigned long iova, phys_addr_t paddr, size_t pgsize, size_t pgcount, int prot, gfp_t gfp, size_t *mapped) argument
1230 arm_smmu_unmap_pages(struct iommu_domain *domain, unsigned long iova, size_t pgsize, size_t pgcount, struct iommu_iotlb_gather *iotlb_gather) argument
1278 arm_smmu_iova_to_phys_hard(struct iommu_domain *domain, dma_addr_t iova) argument
1330 arm_smmu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova) argument
[all...]
/linux-master/drivers/gpu/drm/tegra/
H A Dgem.h40 dma_addr_t iova; member in struct:tegra_bo
H A Duapi.h33 dma_addr_t iova; member in struct:tegra_drm_mapping
H A Dplane.h47 dma_addr_t iova[3]; member in struct:tegra_plane_state
H A Dnvdec.c229 nvdec->falcon.firmware.iova);
233 nvdec->falcon.firmware.iova);
248 dma_addr_t iova; local
263 virt = dma_alloc_coherent(nvdec->dev, size, &iova, GFP_KERNEL);
265 err = dma_mapping_error(nvdec->dev, iova);
269 virt = tegra_drm_alloc(tegra, size, &iova);
275 nvdec->falcon.firmware.iova = iova;
302 dma_free_coherent(nvdec->dev, size, virt, iova);
304 tegra_drm_free(tegra, size, virt, iova);
[all...]
/linux-master/drivers/gpu/drm/msm/adreno/
H A Da5xx_preempt.c209 a5xx_gpu->preempt[i]->rbase = gpu->rb[i]->iova;
228 u64 iova = 0, counters_iova = 0; local
232 MSM_BO_WC | MSM_BO_MAP_PRIV, gpu->aspace, &bo, &iova);
251 a5xx_gpu->preempt_iova[ring->id] = iova;
/linux-master/include/linux/soc/apple/
H A Drtkit.h23 * @iova: Device VA of shared memory buffer.
32 dma_addr_t iova; member in struct:apple_rtkit_shmem
/linux-master/drivers/gpu/drm/msm/dsi/
H A Ddsi_cfg.h57 int (*dma_base_get)(struct msm_dsi_host *msm_host, uint64_t *iova);
/linux-master/drivers/iommu/
H A Dof_iommu.c248 phys_addr_t iova; local
254 maps = of_translate_dma_region(np, maps, &iova, &length);
259 type = iommu_resv_region_get_type(dev, &phys, iova, length);
261 region = iommu_alloc_resv_region(iova, length, prot, type,
H A Diommu.c2359 phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova) argument
2362 return iova;
2367 return domain->ops->iova_to_phys(domain, iova);
2371 static size_t iommu_pgsize(struct iommu_domain *domain, unsigned long iova, argument
2377 unsigned long addr_merge = paddr | iova;
2407 if ((iova ^ paddr) & (pgsize_next - 1))
2425 static int __iommu_map(struct iommu_domain *domain, unsigned long iova, argument
2429 unsigned long orig_iova = iova;
2449 if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) {
2450 pr_err("unaligned: iova
2488 iommu_map(struct iommu_domain *domain, unsigned long iova, phys_addr_t paddr, size_t size, int prot, gfp_t gfp) argument
2518 __iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size, struct iommu_iotlb_gather *iotlb_gather) argument
2572 iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size) argument
2586 iommu_unmap_fast(struct iommu_domain *domain, unsigned long iova, size_t size, struct iommu_iotlb_gather *iotlb_gather) argument
2594 iommu_map_sg(struct iommu_domain *domain, unsigned long iova, struct scatterlist *sg, unsigned int nents, int prot, gfp_t gfp) argument
2679 report_iommu_fault(struct iommu_domain *domain, struct device *dev, unsigned long iova, int flags) argument
[all...]
H A Dmsm_iommu.c138 static void __flush_iotlb_range(unsigned long iova, size_t size, argument
155 iova &= TLBIVA_VA;
156 iova |= GET_CONTEXTIDR_ASID(iommu->base,
158 SET_TLBIVA(iommu->base, master->num, iova);
159 iova += granule;
170 static void __flush_iotlb_walk(unsigned long iova, size_t size, argument
173 __flush_iotlb_range(iova, size, granule, false, cookie);
177 unsigned long iova, size_t granule, void *cookie)
179 __flush_iotlb_range(iova, granule, granule, true, cookie);
485 static int msm_iommu_map(struct iommu_domain *domain, unsigned long iova, argument
176 __flush_iotlb_page(struct iommu_iotlb_gather *gather, unsigned long iova, size_t granule, void *cookie) argument
501 msm_iommu_sync_map(struct iommu_domain *domain, unsigned long iova, size_t size) argument
510 msm_iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t pgsize, size_t pgcount, struct iommu_iotlb_gather *gather) argument
[all...]
H A Dipmmu-vmsa.c316 static void ipmmu_tlb_flush(unsigned long iova, size_t size, argument
491 unsigned long iova; local
498 iova = ipmmu_ctx_read_root(domain, IMELAR);
500 iova |= (u64)ipmmu_ctx_read_root(domain, IMEUAR) << 32;
513 iova);
516 iova);
527 if (!report_iommu_fault(&domain->io_domain, mmu->dev, iova, 0))
531 "Unhandled fault: status 0x%08x iova 0x%lx\n",
532 status, iova);
668 static int ipmmu_map(struct iommu_domain *io_domain, unsigned long iova, argument
678 ipmmu_unmap(struct iommu_domain *io_domain, unsigned long iova, size_t pgsize, size_t pgcount, struct iommu_iotlb_gather *gather) argument
701 ipmmu_iova_to_phys(struct iommu_domain *io_domain, dma_addr_t iova) argument
[all...]
/linux-master/drivers/infiniband/hw/mana/
H A Dmr.c98 u64 iova, int access_flags,
112 "start 0x%llx, iova 0x%llx length 0x%llx access_flags 0x%x",
113 start, iova, length, access_flags);
130 err = mana_ib_create_dma_region(dev, mr->umem, &dma_region_handle, iova);
144 mr_params.gva.virtual_address = iova;
97 mana_ib_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 length, u64 iova, int access_flags, struct ib_udata *udata) argument
/linux-master/drivers/vfio/
H A Dvfio_main.c1048 if (!IS_ALIGNED(range.iova, control.page_size) ||
1054 if (check_add_overflow(range.iova, range.length, &iova_end) ||
1060 nodes[i].start = range.iova;
1061 nodes[i].last = range.iova + range.length - 1;
1105 unsigned long iova, size_t length,
1110 return device->log_ops->log_read_and_clear(device, iova, length, iter);
1141 if (check_add_overflow(report.iova, report.length, &iova_end) ||
1145 iter = iova_bitmap_alloc(report.iova, report.length,
1535 * @iova [in] : starting IOVA of user pages to be pinned.
1545 int vfio_pin_pages(struct vfio_device *device, dma_addr_t iova, argument
1104 vfio_device_log_read_and_clear(struct iova_bitmap *iter, unsigned long iova, size_t length, void *opaque) argument
1586 vfio_unpin_pages(struct vfio_device *device, dma_addr_t iova, int npage) argument
1625 vfio_dma_rw(struct vfio_device *device, dma_addr_t iova, void *data, size_t len, bool write) argument
[all...]
/linux-master/include/linux/
H A Dvfio.h128 void (*dma_unmap)(struct vfio_device *vdev, u64 iova, u64 length);
231 unsigned long iova, unsigned long length,
321 int vfio_pin_pages(struct vfio_device *device, dma_addr_t iova,
323 void vfio_unpin_pages(struct vfio_device *device, dma_addr_t iova, int npage);
324 int vfio_dma_rw(struct vfio_device *device, dma_addr_t iova,
/linux-master/drivers/iommu/iommufd/
H A Dpages.c194 * iopt_pages. If the iova is sub page-size then the area has an iova that
221 static void iommu_unmap_nofail(struct iommu_domain *domain, unsigned long iova, argument
226 ret = iommu_unmap(domain, iova, size);
381 unsigned long iova; local
384 iova = iopt_area_index_to_iova(area, start_index);
393 phys = iommu_iova_to_phys(domain, iova) - page_offset;
396 iova += PAGE_SIZE - page_offset;
409 unsigned long iova; local
412 iova
447 batch_iommu_map_small(struct iommu_domain *domain, unsigned long iova, phys_addr_t paddr, size_t size, int prot) argument
484 unsigned long iova; local
[all...]
/linux-master/drivers/gpu/host1x/
H A Djob.c217 struct iova *alloc;
241 gather_size = iova_align(&host->iova, gather_size);
243 shift = iova_shift(&host->iova);
244 alloc = alloc_iova(&host->iova, gather_size >> shift,
251 err = iommu_map_sgtable(host->domain, iova_dma_addr(&host->iova, alloc),
254 __free_iova(&host->iova, alloc);
259 map->phys = iova_dma_addr(&host->iova, alloc);
664 free_iova(&host->iova, iova_pfn(&host->iova, job->addr_phys[i]));

Completed in 401 milliseconds

12345678910