Lines Matching defs:iova

90 	dma_addr_t		iova;		/* Device address */
129 dma_addr_t iova; /* Device address */
136 dma_addr_t iova;
173 if (start + size <= dma->iova)
175 else if (start >= dma->iova + dma->size)
194 if (start < dma->iova + dma->size) {
197 if (start >= dma->iova)
204 if (res && size && dma_res->iova >= start + size)
218 if (new->iova + new->size <= dma->iova)
268 bitmap_set(dma->bitmap, (vpfn->iova - dma->iova) >> pgshift, 1);
321 * Helper Functions for host iova-pfn list
323 static struct vfio_pfn *vfio_find_vpfn(struct vfio_dma *dma, dma_addr_t iova)
331 if (iova < vpfn->iova)
333 else if (iova > vpfn->iova)
352 if (new->iova < vpfn->iova)
367 static int vfio_add_to_pfn_list(struct vfio_dma *dma, dma_addr_t iova,
376 vpfn->iova = iova;
391 unsigned long iova)
393 struct vfio_pfn *vpfn = vfio_find_vpfn(dma, iova);
609 dma_addr_t iova = vaddr - dma->vaddr + dma->iova;
661 if (!rsvd && !vfio_find_vpfn(dma, iova)) {
675 iova += PAGE_SIZE;
712 static long vfio_unpin_pages_remote(struct vfio_dma *dma, dma_addr_t iova,
719 for (i = 0; i < npage; i++, iova += PAGE_SIZE) {
722 if (vfio_find_vpfn(dma, iova))
767 static int vfio_unpin_page_external(struct vfio_dma *dma, dma_addr_t iova,
771 struct vfio_pfn *vpfn = vfio_find_vpfn(dma, iova);
827 dma_addr_t iova;
830 iova = user_iova + PAGE_SIZE * i;
831 dma = vfio_find_dma(iommu, iova, PAGE_SIZE);
842 vpfn = vfio_iova_get_vfio_pfn(dma, iova);
848 remote_vaddr = dma->vaddr + (iova - dma->iova);
859 ret = vfio_add_to_pfn_list(dma, iova, phys_pfn);
876 (iova - dma->iova) >> pgshift, 1);
892 dma_addr_t iova;
894 iova = user_iova + PAGE_SIZE * j;
895 dma = vfio_find_dma(iommu, iova, PAGE_SIZE);
896 vfio_unpin_page_external(dma, iova, do_accounting);
919 dma_addr_t iova = user_iova + PAGE_SIZE * i;
922 dma = vfio_find_dma(iommu, iova, PAGE_SIZE);
926 vfio_unpin_page_external(dma, iova, do_accounting);
945 entry->iova,
968 struct vfio_dma *dma, dma_addr_t *iova,
978 unmapped = iommu_unmap_fast(domain->domain, *iova, len,
984 entry->iova = *iova;
989 *iova += unmapped;
1008 struct vfio_dma *dma, dma_addr_t *iova,
1012 size_t unmapped = iommu_unmap(domain->domain, *iova, len);
1015 *unlocked += vfio_unpin_pages_remote(dma, *iova,
1019 *iova += unmapped;
1028 dma_addr_t iova = dma->iova, end = dma->iova + dma->size;
1052 iommu_unmap(d->domain, dma->iova, dma->size);
1057 while (iova < end) {
1061 phys = iommu_iova_to_phys(domain->domain, iova);
1063 iova += PAGE_SIZE;
1073 !domain->fgsp && iova + len < end; len += PAGE_SIZE) {
1074 next = iommu_iova_to_phys(domain->domain, iova + len);
1083 unmapped = unmap_unpin_fast(domain, dma, &iova, len, phys,
1088 unmapped = unmap_unpin_slow(domain, dma, &iova, len,
1152 unsigned long bit_offset = (dma->iova - base_iova) >> pgshift;
1184 dma_addr_t iova, size_t size, size_t pgsize)
1197 dma = vfio_find_dma(iommu, iova, 1);
1198 if (dma && dma->iova != iova)
1201 dma = vfio_find_dma(iommu, iova + size - 1, 0);
1202 if (dma && dma->iova + dma->size != iova + size)
1208 if (dma->iova < iova)
1211 if (dma->iova > iova + size - 1)
1214 ret = update_user_bitmap(bitmap, iommu, dma, iova, pgsize);
1261 device->ops->dma_unmap(device, dma->iova, dma->size);
1275 dma_addr_t iova = unmap->iova;
1292 if (iova & (pgsize - 1))
1296 if (iova || size)
1300 iova + size - 1 < iova || size > SIZE_MAX) {
1333 * the first iova of mapping will unmap the entire range.
1344 dma = vfio_find_dma(iommu, iova, 1);
1345 if (dma && dma->iova != iova)
1348 dma = vfio_find_dma(iommu, iova + size - 1, 0);
1349 if (dma && dma->iova + dma->size != iova + size)
1354 n = first_n = vfio_find_dma_first_node(iommu, iova, size);
1358 if (dma->iova >= iova + size)
1361 if (!iommu->v2 && iova > dma->iova)
1399 iova, pgsize);
1418 static int vfio_iommu_map(struct vfio_iommu *iommu, dma_addr_t iova,
1425 ret = iommu_map(d->domain, iova, (phys_addr_t)pfn << PAGE_SHIFT,
1438 iommu_unmap(d->domain, iova, npage << PAGE_SHIFT);
1448 dma_addr_t iova = dma->iova;
1470 ret = vfio_iommu_map(iommu, iova + dma->size, pfn, npage,
1473 vfio_unpin_pages_remote(dma, iova + dma->size, pfn,
1493 * Check dma map request is within a valid iova range
1498 struct list_head *iova = &iommu->iova_list;
1501 list_for_each_entry(node, iova, list) {
1510 return list_empty(iova);
1549 dma_addr_t iova = map->iova;
1557 if (map->size != size || map->vaddr != vaddr || map->iova != iova)
1575 if (!size || (size | iova | vaddr) & (pgsize - 1)) {
1581 if (iova + size - 1 < iova || vaddr + size - 1 < vaddr) {
1586 dma = vfio_find_dma(iommu, iova, size);
1590 } else if (!dma->vaddr_invalid || dma->iova != iova ||
1612 if (!vfio_iommu_iova_dma_valid(iommu, iova, iova + size - 1)) {
1624 dma->iova = iova;
1685 dma_addr_t iova;
1688 iova = dma->iova;
1690 while (iova < dma->iova + dma->size) {
1703 phys = iommu_iova_to_phys(d->domain, iova);
1706 iova += PAGE_SIZE;
1712 i = iova + size;
1713 while (i < dma->iova + dma->size &&
1722 (iova - dma->iova);
1723 size_t n = dma->iova + dma->size - iova;
1740 ret = iommu_map(domain->domain, iova, phys, size,
1745 vfio_unpin_pages_remote(dma, iova,
1754 iova += size;
1771 dma_addr_t iova;
1774 iommu_unmap(domain->domain, dma->iova, dma->size);
1778 iova = dma->iova;
1779 while (iova < dma->iova + dma->size) {
1784 phys = iommu_iova_to_phys(domain->domain, iova);
1786 iova += PAGE_SIZE;
1792 i = iova + size;
1793 while (i < dma->iova + dma->size &&
1800 iommu_unmap(domain->domain, iova, size);
1801 vfio_unpin_pages_remote(dma, iova, phys >> PAGE_SHIFT,
1912 * This is a helper function to insert an address range to iova list.
1945 struct list_head *iova = &iommu->iova_list;
1947 if (list_empty(iova))
1951 first = list_first_entry(iova, struct vfio_iova, list);
1952 last = list_last_entry(iova, struct vfio_iova, list);
1972 * Resize iommu iova aperture window. This is called only if the new
1975 static int vfio_iommu_aper_resize(struct list_head *iova,
1980 if (list_empty(iova))
1981 return vfio_iommu_iova_insert(iova, start, end);
1983 /* Adjust iova list start */
1984 list_for_each_entry_safe(node, next, iova, list) {
1996 /* Adjust iova list end */
1997 list_for_each_entry_safe(node, next, iova, list) {
2033 * Check iova region overlap with reserved regions and
2034 * exclude them from the iommu iova range
2036 static int vfio_iommu_resv_exclude(struct list_head *iova,
2051 list_for_each_entry_safe(n, next, iova, list) {
2059 * reserve region to exclude that from valid iova range.
2078 if (list_empty(iova))
2094 static void vfio_iommu_iova_free(struct list_head *iova)
2098 list_for_each_entry_safe(n, next, iova, list) {
2107 struct list_head *iova = &iommu->iova_list;
2111 list_for_each_entry(n, iova, list) {
2127 struct list_head *iova = &iommu->iova_list;
2129 vfio_iommu_iova_free(iova);
2131 list_splice_tail(iova_copy, iova);
2229 * We don't want to work on the original iova list as the list
2312 /* Delete the old one and insert new iova list */
2374 * the removed domain decided the iova aperture window. Modify the
2375 * iova aperture with the smallest window among existing domains.
2406 * group can be part of valid iova now. But since reserved regions
2407 * may be duplicated among groups, populate the iova valid regions
2437 /* purge the iova list and create new one */
2444 /* Exclude current reserved regions from iova ranges */
2477 * Get a copy of iova list. This will be used to update
2687 struct vfio_iova *iova;
2691 list_for_each_entry(iova, &iommu->iova_list, list)
2710 list_for_each_entry(iova, &iommu->iova_list, list) {
2711 cap_iovas->iova_ranges[i].start = iova->start;
2712 cap_iovas->iova_ranges[i].end = iova->end;
2941 if (range.iova + range.size < range.iova)
2962 if (range.iova & (iommu_pgsize - 1)) {
2973 iommu, range.iova,
3074 offset = user_iova - dma->iova;