Searched refs:iova (Results 1 - 25 of 249) sorted by relevance

12345678910

/linux-master/drivers/media/platform/nvidia/tegra-vde/
H A Diommu.c9 #include <linux/iova.h>
21 struct iova **iovap,
24 struct iova *iova; local
30 size = iova_align(&vde->iova, size);
31 shift = iova_shift(&vde->iova);
33 iova = alloc_iova(&vde->iova, size >> shift, end >> shift, true);
34 if (!iova)
37 addr = iova_dma_addr(&vde->iova, iov
51 tegra_vde_iommu_unmap(struct tegra_vde *vde, struct iova *iova) argument
64 struct iova *iova; local
[all...]
H A Ddmabuf-cache.c9 #include <linux/iova.h>
28 struct iova *iova; member in struct:tegra_vde_cache_entry
39 tegra_vde_iommu_unmap(entry->vde, entry->iova);
73 struct iova *iova; local
91 *addrp = iova_dma_addr(&vde->iova, entry->iova);
125 err = tegra_vde_iommu_map(vde, sgt, &iova, dmabuf->size);
129 *addrp = iova_dma_addr(&vde->iova, iov
[all...]
/linux-master/include/linux/
H A Diova.h17 /* iova structure */
18 struct iova { struct
27 /* holds all the iova translations for a domain */
30 struct rb_root rbroot; /* iova domain rbtree root */
37 struct iova anchor; /* rbtree lookup anchor */
43 static inline unsigned long iova_size(struct iova *iova) argument
45 return iova->pfn_hi - iova->pfn_lo + 1;
58 static inline size_t iova_offset(struct iova_domain *iovad, dma_addr_t iova) argument
68 iova_dma_addr(struct iova_domain *iovad, struct iova *iova) argument
73 iova_pfn(struct iova_domain *iovad, dma_addr_t iova) argument
114 __free_iova(struct iova_domain *iovad, struct iova *iova) argument
[all...]
H A Diova_bitmap.h15 unsigned long iova, size_t length,
19 struct iova_bitmap *iova_bitmap_alloc(unsigned long iova, size_t length,
26 unsigned long iova, size_t length);
28 static inline struct iova_bitmap *iova_bitmap_alloc(unsigned long iova, argument
47 unsigned long iova, size_t length)
46 iova_bitmap_set(struct iova_bitmap *bitmap, unsigned long iova, size_t length) argument
H A Diommufd.h34 void (*unmap)(void *data, unsigned long iova, unsigned long length);
63 int iommufd_access_pin_pages(struct iommufd_access *access, unsigned long iova,
67 unsigned long iova, unsigned long length);
68 int iommufd_access_rw(struct iommufd_access *access, unsigned long iova,
84 unsigned long iova,
93 unsigned long iova,
98 static inline int iommufd_access_rw(struct iommufd_access *access, unsigned long iova, argument
83 iommufd_access_pin_pages(struct iommufd_access *access, unsigned long iova, unsigned long length, struct page **out_pages, unsigned int flags) argument
92 iommufd_access_unpin_pages(struct iommufd_access *access, unsigned long iova, unsigned long length) argument
H A Dio-pgtable.h43 void (*tlb_flush_walk)(unsigned long iova, size_t size, size_t granule,
46 unsigned long iova, size_t granule, void *cookie);
56 * @ias: Input address (iova) size, in bits.
179 * @iova_to_phys: Translate iova to physical address.
185 int (*map_pages)(struct io_pgtable_ops *ops, unsigned long iova,
188 size_t (*unmap_pages)(struct io_pgtable_ops *ops, unsigned long iova,
192 unsigned long iova);
194 unsigned long iova, size_t size,
252 io_pgtable_tlb_flush_walk(struct io_pgtable *iop, unsigned long iova, argument
256 iop->cfg.tlb->tlb_flush_walk(iova, siz
260 io_pgtable_tlb_add_page(struct io_pgtable *iop, struct iommu_iotlb_gather * gather, unsigned long iova, size_t granule) argument
[all...]
/linux-master/include/trace/events/
H A Diommu.h81 TP_PROTO(unsigned long iova, phys_addr_t paddr, size_t size),
83 TP_ARGS(iova, paddr, size),
86 __field(u64, iova)
92 __entry->iova = iova;
97 TP_printk("IOMMU: iova=0x%016llx - 0x%016llx paddr=0x%016llx size=%zu",
98 __entry->iova, __entry->iova + __entry->size, __entry->paddr,
105 TP_PROTO(unsigned long iova, size_t size, size_t unmapped_size),
107 TP_ARGS(iova, siz
[all...]
/linux-master/drivers/fpga/
H A Ddfl-afu-dma-region.c118 * @iova: address of the dma memory area
121 * Compare the dma memory area defined by @iova and @size with given dma region.
125 u64 iova, u64 size)
127 if (!size && region->iova != iova)
130 return (region->iova <= iova) &&
131 (region->length + region->iova >= iova + size);
149 dev_dbg(&pdata->dev->dev, "add region (iova
124 dma_region_check_iova(struct dfl_afu_dma_region *region, u64 iova, u64 size) argument
245 afu_dma_region_find(struct dfl_feature_platform_data *pdata, u64 iova, u64 size) argument
285 afu_dma_region_find_iova(struct dfl_feature_platform_data *pdata, u64 iova) argument
301 afu_dma_map_region(struct dfl_feature_platform_data *pdata, u64 user_addr, u64 length, u64 *iova) argument
380 afu_dma_unmap_region(struct dfl_feature_platform_data *pdata, u64 iova) argument
[all...]
H A Ddfl-afu.h48 * @iova: region IO virtual address.
56 u64 iova; member in struct:dfl_afu_dma_region
99 u64 user_addr, u64 length, u64 *iova);
100 int afu_dma_unmap_region(struct dfl_feature_platform_data *pdata, u64 iova);
103 u64 iova, u64 size);
/linux-master/drivers/staging/media/ipu3/
H A Dipu3-mmu.h30 int imgu_mmu_map(struct imgu_mmu_info *info, unsigned long iova,
32 size_t imgu_mmu_unmap(struct imgu_mmu_info *info, unsigned long iova,
34 size_t imgu_mmu_map_sg(struct imgu_mmu_info *info, unsigned long iova,
H A Dipu3-dmamap.c102 struct iova *iova; local
107 iova = alloc_iova(&imgu->iova_domain, size >> shift,
109 if (!iova)
117 iovaddr = iova_dma_addr(&imgu->iova_domain, iova);
133 map->daddr = iova_dma_addr(&imgu->iova_domain, iova);
142 imgu_mmu_unmap(imgu->mmu, iova_dma_addr(&imgu->iova_domain, iova),
146 __free_iova(&imgu->iova_domain, iova);
153 struct iova *iova; local
189 struct iova *iova; local
[all...]
H A Dipu3-mmu.c150 * @iova: IOVA to split.
154 static inline void address_to_pte_idx(unsigned long iova, u32 *l1pt_idx, argument
157 iova >>= IPU3_PAGE_SHIFT;
160 *l2pt_idx = iova & IPU3_L2PT_MASK;
162 iova >>= IPU3_L2PT_SHIFT;
165 *l1pt_idx = iova & IPU3_L1PT_MASK;
210 static int __imgu_mmu_map(struct imgu_mmu *mmu, unsigned long iova, argument
220 address_to_pte_idx(iova, &l1pt_idx, &l2pt_idx);
244 * @iova: the virtual address
251 int imgu_mmu_map(struct imgu_mmu_info *info, unsigned long iova, argument
299 imgu_mmu_map_sg(struct imgu_mmu_info *info, unsigned long iova, struct scatterlist *sg, unsigned int nents) argument
338 __imgu_mmu_unmap(struct imgu_mmu *mmu, unsigned long iova, size_t size) argument
379 imgu_mmu_unmap(struct imgu_mmu_info *info, unsigned long iova, size_t size) argument
[all...]
/linux-master/drivers/iommu/iommufd/
H A Diova_bitmap.c32 * data[(iova / page_size) / 64] & (1ULL << (iova % 64))
36 unsigned long iova; member in struct:iova_bitmap_map
74 * bitmap = iova_bitmap_alloc(iova, length, page_size, data);
82 * Each iteration of the @dirty_reporter_fn is called with a unique @iova
87 * iova_bitmap_set(bitmap, iova, iova_length);
112 unsigned long iova; member in struct:iova_bitmap
126 * (stored in mapped::iova). All computations in this file are done using
127 * relative IOVAs and thus avoid an extra subtraction against mapped::iova.
131 unsigned long iova)
130 iova_bitmap_offset_to_index(struct iova_bitmap *bitmap, unsigned long iova) argument
241 iova_bitmap_alloc(unsigned long iova, size_t length, unsigned long page_size, u64 __user *data) argument
322 unsigned long iova = iova_bitmap_mapped_iova(bitmap); local
355 unsigned long iova = iova_bitmap_mapped_iova(bitmap); local
381 unsigned long iova = iova_bitmap_mapped_length(bitmap) - 1; local
443 iova_bitmap_set(struct iova_bitmap *bitmap, unsigned long iova, size_t length) argument
[all...]
H A Dio_pagetable.h19 * the iova that are backed by something. iova not covered by areas is not
97 * Number of bytes from the start of the iopt_pages that the iova begins.
102 unsigned long iova)
105 WARN_ON(iova < iopt_area_iova(area) ||
106 iova > iopt_area_last_iova(area));
107 return (iova - iopt_area_iova(area)) + area->page_offset +
112 unsigned long iova)
114 return iopt_area_start_byte(area, iova) / PAGE_SIZE;
154 unsigned long iova,
101 iopt_area_start_byte(struct iopt_area *area, unsigned long iova) argument
111 iopt_area_iova_to_index(struct iopt_area *area, unsigned long iova) argument
[all...]
/linux-master/drivers/iommu/
H A Diova.c8 #include <linux/iova.h>
31 static struct iova *to_iova(struct rb_node *node)
33 return rb_entry(node, struct iova, node);
71 __cached_rbnode_insert_update(struct iova_domain *iovad, struct iova *new)
80 __cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free)
82 struct iova *cached_iova;
135 /* Insert the iova into domain rbtree by holding writer lock */
137 iova_insert_rbtree(struct rb_root *root, struct iova *iova, argument
145 struct iova *thi
233 free_iova_mem(struct iova *iova) argument
282 struct iova *iova = to_iova(node); local
295 remove_iova(struct iova_domain *iovad, struct iova *iova) argument
312 struct iova *iova; local
329 __free_iova(struct iova_domain *iovad, struct iova *iova) argument
351 struct iova *iova; local
447 struct iova *iova, *tmp; local
461 struct iova *iova = to_iova(node); local
471 struct iova *iova; local
486 struct iova *iova; local
496 __adjust_overlap_range(struct iova *iova, unsigned long *pfn_lo, unsigned long *pfn_hi) argument
519 struct iova *iova; local
625 struct iova *iova = private_find_iova(iovad, mag->pfns[i]); local
[all...]
H A Dio-pgtable-dart.c124 unsigned long iova, phys_addr_t paddr,
173 static int dart_get_table(struct dart_io_pgtable *data, unsigned long iova) argument
175 return (iova >> (3 * data->bits_per_level + ilog2(sizeof(dart_iopte)))) &
179 static int dart_get_l1_index(struct dart_io_pgtable *data, unsigned long iova) argument
182 return (iova >> (2 * data->bits_per_level + ilog2(sizeof(dart_iopte)))) &
186 static int dart_get_l2_index(struct dart_io_pgtable *data, unsigned long iova) argument
189 return (iova >> (data->bits_per_level + ilog2(sizeof(dart_iopte)))) &
193 static dart_iopte *dart_get_l2(struct dart_io_pgtable *data, unsigned long iova) argument
196 int tbl = dart_get_table(data, iova);
202 ptep += dart_get_l1_index(data, iova);
123 dart_init_pte(struct dart_io_pgtable *data, unsigned long iova, phys_addr_t paddr, dart_iopte prot, int num_entries, dart_iopte *ptep) argument
236 dart_map_pages(struct io_pgtable_ops *ops, unsigned long iova, phys_addr_t paddr, size_t pgsize, size_t pgcount, int iommu_prot, gfp_t gfp, size_t *mapped) argument
298 dart_unmap_pages(struct io_pgtable_ops *ops, unsigned long iova, size_t pgsize, size_t pgcount, struct iommu_iotlb_gather *gather) argument
341 dart_iova_to_phys(struct io_pgtable_ops *ops, unsigned long iova) argument
[all...]
H A Dio-pgtable-arm-v7s.c432 unsigned long iova, phys_addr_t paddr, int prot,
448 tblp = ptep - ARM_V7S_LVL_IDX(iova, lvl, cfg);
449 if (WARN_ON(__arm_v7s_unmap(data, NULL, iova + i * sz,
497 static int __arm_v7s_map(struct arm_v7s_io_pgtable *data, unsigned long iova, argument
506 ptep += ARM_V7S_LVL_IDX(iova, lvl, cfg);
510 return arm_v7s_init_pte(data, iova, paddr, prot,
541 return __arm_v7s_map(data, iova, paddr, size, prot, lvl + 1, cptep, gfp);
544 static int arm_v7s_map_pages(struct io_pgtable_ops *ops, unsigned long iova, argument
551 if (WARN_ON(iova >= (1ULL << data->iop.cfg.ias) ||
560 ret = __arm_v7s_map(data, iova, padd
431 arm_v7s_init_pte(struct arm_v7s_io_pgtable *data, unsigned long iova, phys_addr_t paddr, int prot, int lvl, int num_entries, arm_v7s_iopte *ptep) argument
595 arm_v7s_split_cont(struct arm_v7s_io_pgtable *data, unsigned long iova, int idx, int lvl, arm_v7s_iopte *ptep) argument
621 arm_v7s_split_blk_unmap(struct arm_v7s_io_pgtable *data, struct iommu_iotlb_gather *gather, unsigned long iova, size_t size, arm_v7s_iopte blk_pte, arm_v7s_iopte *ptep) argument
666 __arm_v7s_unmap(struct arm_v7s_io_pgtable *data, struct iommu_iotlb_gather *gather, unsigned long iova, size_t size, int lvl, arm_v7s_iopte *ptep) argument
738 arm_v7s_unmap_pages(struct io_pgtable_ops *ops, unsigned long iova, size_t pgsize, size_t pgcount, struct iommu_iotlb_gather *gather) argument
760 arm_v7s_iova_to_phys(struct io_pgtable_ops *ops, unsigned long iova) argument
903 dummy_tlb_flush(unsigned long iova, size_t size, size_t granule, void *cookie) argument
910 dummy_tlb_add_page(struct iommu_iotlb_gather *gather, unsigned long iova, size_t granule, void *cookie) argument
940 unsigned int iova, size, iova_start; local
[all...]
/linux-master/drivers/vfio/
H A Dvfio_iommu_type1.c90 dma_addr_t iova; /* Device address */ member in struct:vfio_dma
129 dma_addr_t iova; /* Device address */ member in struct:vfio_pfn
136 dma_addr_t iova; member in struct:vfio_regions
173 if (start + size <= dma->iova)
175 else if (start >= dma->iova + dma->size)
194 if (start < dma->iova + dma->size) {
197 if (start >= dma->iova)
204 if (res && size && dma_res->iova >= start + size)
218 if (new->iova + new->size <= dma->iova)
323 vfio_find_vpfn(struct vfio_dma *dma, dma_addr_t iova) argument
367 vfio_add_to_pfn_list(struct vfio_dma *dma, dma_addr_t iova, unsigned long pfn) argument
390 vfio_iova_get_vfio_pfn(struct vfio_dma *dma, unsigned long iova) argument
609 dma_addr_t iova = vaddr - dma->vaddr + dma->iova; local
712 vfio_unpin_pages_remote(struct vfio_dma *dma, dma_addr_t iova, unsigned long pfn, long npage, bool do_accounting) argument
767 vfio_unpin_page_external(struct vfio_dma *dma, dma_addr_t iova, bool do_accounting) argument
827 dma_addr_t iova; local
892 dma_addr_t iova; local
919 dma_addr_t iova = user_iova + PAGE_SIZE * i; local
967 unmap_unpin_fast(struct vfio_domain *domain, struct vfio_dma *dma, dma_addr_t *iova, size_t len, phys_addr_t phys, long *unlocked, struct list_head *unmapped_list, int *unmapped_cnt, struct iommu_iotlb_gather *iotlb_gather) argument
1007 unmap_unpin_slow(struct vfio_domain *domain, struct vfio_dma *dma, dma_addr_t *iova, size_t len, phys_addr_t phys, long *unlocked) argument
1028 dma_addr_t iova = dma->iova, end = dma->iova + dma->size; local
1183 vfio_iova_dirty_bitmap(u64 __user *bitmap, struct vfio_iommu *iommu, dma_addr_t iova, size_t size, size_t pgsize) argument
1275 dma_addr_t iova = unmap->iova; local
1418 vfio_iommu_map(struct vfio_iommu *iommu, dma_addr_t iova, unsigned long pfn, long npage, int prot) argument
1448 dma_addr_t iova = dma->iova; local
1498 struct list_head *iova = &iommu->iova_list; local
1549 dma_addr_t iova = map->iova; local
1685 dma_addr_t iova; local
1771 dma_addr_t iova; local
1945 struct list_head *iova = &iommu->iova_list; local
1975 vfio_iommu_aper_resize(struct list_head *iova, dma_addr_t start, dma_addr_t end) argument
2036 vfio_iommu_resv_exclude(struct list_head *iova, struct list_head *resv_regions) argument
2094 vfio_iommu_iova_free(struct list_head *iova) argument
2107 struct list_head *iova = &iommu->iova_list; local
2127 struct list_head *iova = &iommu->iova_list; local
2687 struct vfio_iova *iova; local
[all...]
/linux-master/drivers/iommu/amd/
H A Dio_pgtable_v2.c135 static u64 *v2_alloc_pte(int nid, u64 *pgd, unsigned long iova, argument
143 pte = &pgd[PM_LEVEL_INDEX(level, iova)];
144 iova = PAGE_SIZE_ALIGN(iova, PAGE_SIZE);
175 pte = &pte[PM_LEVEL_INDEX(level, iova)];
199 unsigned long iova, unsigned long *page_size)
205 pte = &pgtable->pgd[PM_LEVEL_INDEX(level, iova)];
216 pte = &pte[PM_LEVEL_INDEX(level - 1, iova)];
236 static int iommu_v2_map_pages(struct io_pgtable_ops *ops, unsigned long iova, argument
245 unsigned long o_iova = iova;
198 fetch_pte(struct amd_io_pgtable *pgtable, unsigned long iova, unsigned long *page_size) argument
282 iommu_v2_unmap_pages(struct io_pgtable_ops *ops, unsigned long iova, size_t pgsize, size_t pgcount, struct iommu_iotlb_gather *gather) argument
311 iommu_v2_iova_to_phys(struct io_pgtable_ops *ops, unsigned long iova) argument
334 v2_tlb_flush_walk(unsigned long iova, size_t size, size_t granule, void *cookie) argument
339 v2_tlb_add_page(struct iommu_iotlb_gather *gather, unsigned long iova, size_t granule, void *cookie) argument
[all...]
/linux-master/drivers/infiniband/sw/rxe/
H A Drxe_mr.c27 int mr_check_range(struct rxe_mr *mr, u64 iova, size_t length) argument
35 if (iova < mr->ibmr.iova ||
36 iova + length > mr->ibmr.iova + mr->ibmr.length) {
37 rxe_dbg_mr(mr, "iova/length out of range\n");
75 static unsigned long rxe_mr_iova_to_index(struct rxe_mr *mr, u64 iova) argument
77 return (iova >> mr->page_shift) - (mr->ibmr.iova >> mr->page_shift);
80 static unsigned long rxe_mr_iova_to_page_offset(struct rxe_mr *mr, u64 iova) argument
245 rxe_mr_copy_xarray(struct rxe_mr *mr, u64 iova, void *addr, unsigned int length, enum rxe_mr_copy_dir dir) argument
304 rxe_mr_copy(struct rxe_mr *mr, u64 iova, void *addr, unsigned int length, enum rxe_mr_copy_dir dir) argument
345 u64 iova; local
424 rxe_flush_pmem_iova(struct rxe_mr *mr, u64 iova, unsigned int length) argument
471 rxe_mr_do_atomic_op(struct rxe_mr *mr, u64 iova, int opcode, u64 compare, u64 swap_add, u64 *orig_val) argument
529 rxe_mr_do_atomic_write(struct rxe_mr *mr, u64 iova, u64 value) argument
577 rxe_mr_do_atomic_write(struct rxe_mr *mr, u64 iova, u64 value) argument
[all...]
/linux-master/drivers/gpu/drm/msm/
H A Dmsm_mmu.h14 int (*map)(struct msm_mmu *mmu, uint64_t iova, struct sg_table *sgt,
16 int (*unmap)(struct msm_mmu *mmu, uint64_t iova, size_t len);
30 int (*handler)(void *arg, unsigned long iova, int flags, void *data);
48 int (*handler)(void *arg, unsigned long iova, int flags, void *data))
47 msm_mmu_set_fault_handler(struct msm_mmu *mmu, void *arg, int (*handler)(void *arg, unsigned long iova, int flags, void *data)) argument
H A Dmsm_iommu.c37 unsigned long iova, phys_addr_t paddr,
43 unsigned long addr_merge = paddr | iova;
73 if ((iova ^ paddr) & (pgsize_next - 1))
91 static int msm_iommu_pagetable_unmap(struct msm_mmu *mmu, u64 iova, argument
100 pgsize = calc_pgsize(pagetable, iova, iova, size, &count);
102 unmapped = ops->unmap_pages(ops, iova, pgsize, count, NULL);
106 iova += unmapped;
115 static int msm_iommu_pagetable_map(struct msm_mmu *mmu, u64 iova, argument
121 u64 addr = iova;
36 calc_pgsize(struct msm_iommu_pagetable *pagetable, unsigned long iova, phys_addr_t paddr, size_t size, size_t *count) argument
219 msm_iommu_tlb_flush_walk(unsigned long iova, size_t size, size_t granule, void *cookie) argument
235 msm_iommu_tlb_add_page(struct iommu_iotlb_gather *gather, unsigned long iova, size_t granule, void *cookie) argument
322 msm_fault_handler(struct iommu_domain *domain, struct device *dev, unsigned long iova, int flags, void *arg) argument
361 msm_iommu_map(struct msm_mmu *mmu, uint64_t iova, struct sg_table *sgt, size_t len, int prot) argument
377 msm_iommu_unmap(struct msm_mmu *mmu, uint64_t iova, size_t len) argument
[all...]
/linux-master/drivers/vdpa/vdpa_user/
H A Diova_domain.c104 u64 iova, u64 size, u64 paddr)
107 u64 last = iova + size - 1;
109 while (iova <= last) {
110 map = &domain->bounce_maps[iova >> PAGE_SHIFT];
118 iova += PAGE_SIZE;
124 u64 iova, u64 size)
127 u64 last = iova + size - 1;
129 while (iova <= last) {
130 map = &domain->bounce_maps[iova >> PAGE_SHIFT];
132 iova
103 vduse_domain_map_bounce_page(struct vduse_iova_domain *domain, u64 iova, u64 size, u64 paddr) argument
123 vduse_domain_unmap_bounce_page(struct vduse_iova_domain *domain, u64 iova, u64 size) argument
160 vduse_domain_bounce(struct vduse_iova_domain *domain, dma_addr_t iova, size_t size, enum dma_data_direction dir) argument
190 vduse_domain_get_coherent_page(struct vduse_iova_domain *domain, u64 iova) argument
211 vduse_domain_get_bounce_page(struct vduse_iova_domain *domain, u64 iova) argument
367 vduse_domain_free_iova(struct iova_domain *iovad, dma_addr_t iova, size_t size) argument
404 dma_addr_t iova = vduse_domain_alloc_iova(iovad, size, limit); local
451 dma_addr_t iova = vduse_domain_alloc_iova(iovad, size, limit); local
509 unsigned long iova = vmf->pgoff << PAGE_SHIFT; local
[all...]
/linux-master/tools/testing/selftests/iommu/
H A Diommufd_fail_nth.c234 __u64 iova; local
270 if (_test_ioctl_ioas_map(self->fd, ioas_id, buffer, BUFFER_SIZE, &iova,
282 .src_iova = iova,
290 if (_test_ioctl_ioas_unmap(self->fd, ioas_id, iova, BUFFER_SIZE,
294 _test_ioctl_ioas_unmap(self->fd, ioas_id, iova, BUFFER_SIZE, NULL);
304 __u64 iova; local
321 if (_test_ioctl_ioas_map(self->fd, ioas_id, buffer, 262144, &iova,
341 __u64 iova; local
362 if (_test_ioctl_ioas_map(self->fd, ioas_id, buffer, 262144, &iova,
386 __u64 iova; local
465 __u64 iova; local
521 __u64 iova; local
585 __u64 iova; local
[all...]
/linux-master/drivers/gpu/drm/etnaviv/
H A Detnaviv_dump.h27 __le64 iova; member in struct:etnaviv_dump_object_header

Completed in 392 milliseconds

12345678910