Lines Matching defs:iommu

28 #include <linux/iommu.h>
157 vfio_iommu_find_iommu_group(struct vfio_iommu *iommu,
165 static struct vfio_dma *vfio_find_dma(struct vfio_iommu *iommu,
168 struct rb_node *node = iommu->dma_list.rb_node;
184 static struct rb_node *vfio_find_dma_first_node(struct vfio_iommu *iommu,
188 struct rb_node *node = iommu->dma_list.rb_node;
209 static void vfio_link_dma(struct vfio_iommu *iommu, struct vfio_dma *new)
211 struct rb_node **link = &iommu->dma_list.rb_node, *parent = NULL;
225 rb_insert_color(&new->node, &iommu->dma_list);
228 static void vfio_unlink_dma(struct vfio_iommu *iommu, struct vfio_dma *old)
230 rb_erase(&old->node, &iommu->dma_list);
272 static void vfio_iommu_populate_bitmap_full(struct vfio_iommu *iommu)
275 unsigned long pgshift = __ffs(iommu->pgsize_bitmap);
277 for (n = rb_first(&iommu->dma_list); n; n = rb_next(n)) {
284 static int vfio_dma_bitmap_alloc_all(struct vfio_iommu *iommu, size_t pgsize)
288 for (n = rb_first(&iommu->dma_list); n; n = rb_next(n)) {
309 static void vfio_dma_bitmap_free_all(struct vfio_iommu *iommu)
313 for (n = rb_first(&iommu->dma_list); n; n = rb_next(n)) {
598 * the iommu can only map chunks of consecutive pfns anyway, so get the
790 struct vfio_iommu *iommu = iommu_data;
797 if (!iommu || !pages)
801 if (!iommu->v2)
804 mutex_lock(&iommu->lock);
806 if (WARN_ONCE(iommu->vaddr_invalid_count,
813 if (list_empty(&iommu->device_list)) {
819 * If iommu capable domain exist in the container then all pages are
821 * iommu capable domain in the container.
823 do_accounting = list_empty(&iommu->domain_list);
831 dma = vfio_find_dma(iommu, iova, PAGE_SIZE);
868 if (iommu->dirty_page_tracking) {
869 unsigned long pgshift = __ffs(iommu->pgsize_bitmap);
881 group = vfio_iommu_find_iommu_group(iommu, iommu_group);
884 iommu->num_non_pinned_groups--;
895 dma = vfio_find_dma(iommu, iova, PAGE_SIZE);
900 mutex_unlock(&iommu->lock);
907 struct vfio_iommu *iommu = iommu_data;
912 if (WARN_ON(!iommu->v2))
915 mutex_lock(&iommu->lock);
917 do_accounting = list_empty(&iommu->domain_list);
922 dma = vfio_find_dma(iommu, iova, PAGE_SIZE);
929 mutex_unlock(&iommu->lock);
1025 static long vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma,
1038 if (list_empty(&iommu->domain_list))
1044 * means we need to use one of the iommu domains to figure out the
1046 * no iommu translations remaining when the pages are unpinned.
1048 domain = d = list_first_entry(&iommu->domain_list,
1051 list_for_each_entry_continue(d, &iommu->domain_list, next) {
1109 static void vfio_remove_dma(struct vfio_iommu *iommu, struct vfio_dma *dma)
1112 vfio_unmap_unpin(iommu, dma, true);
1113 vfio_unlink_dma(iommu, dma);
1118 iommu->vaddr_invalid_count--;
1120 iommu->dma_avail++;
1123 static void vfio_update_pgsize_bitmap(struct vfio_iommu *iommu)
1127 iommu->pgsize_bitmap = ULONG_MAX;
1129 list_for_each_entry(domain, &iommu->domain_list, next)
1130 iommu->pgsize_bitmap &= domain->domain->pgsize_bitmap;
1137 * granularity while iommu driver can use the sub-PAGE_SIZE size
1140 if (iommu->pgsize_bitmap & ~PAGE_MASK) {
1141 iommu->pgsize_bitmap &= PAGE_MASK;
1142 iommu->pgsize_bitmap |= PAGE_SIZE;
1146 static int update_user_bitmap(u64 __user *bitmap, struct vfio_iommu *iommu,
1161 if (iommu->num_non_pinned_groups && dma->iommu_mapped)
1183 static int vfio_iova_dirty_bitmap(u64 __user *bitmap, struct vfio_iommu *iommu,
1197 dma = vfio_find_dma(iommu, iova, 1);
1201 dma = vfio_find_dma(iommu, iova + size - 1, 0);
1205 for (n = rb_first(&iommu->dma_list); n; n = rb_next(n)) {
1214 ret = update_user_bitmap(bitmap, iommu, dma, iova, pgsize);
1243 static void vfio_notify_dma_unmap(struct vfio_iommu *iommu,
1248 if (list_empty(&iommu->device_list))
1254 * call back down to this code and try to obtain the iommu->lock we must
1257 mutex_lock(&iommu->device_list_lock);
1258 mutex_unlock(&iommu->lock);
1260 list_for_each_entry(device, &iommu->device_list, iommu_entry)
1263 mutex_unlock(&iommu->device_list_lock);
1264 mutex_lock(&iommu->lock);
1267 static int vfio_dma_do_unmap(struct vfio_iommu *iommu,
1281 mutex_lock(&iommu->lock);
1284 if (invalidate_vaddr && !list_empty(&iommu->emulated_iommu_groups)) {
1289 pgshift = __ffs(iommu->pgsize_bitmap);
1306 (!iommu->dirty_page_tracking || (bitmap->pgsize != pgsize))) {
1313 * vfio-iommu-type1 (v1) - User mappings were coalesced together to
1343 if (iommu->v2 && !unmap_all) {
1344 dma = vfio_find_dma(iommu, iova, 1);
1348 dma = vfio_find_dma(iommu, iova + size - 1, 0);
1354 n = first_n = vfio_find_dma_first_node(iommu, iova, size);
1361 if (!iommu->v2 && iova > dma->iova)
1372 iommu->vaddr_invalid_count--;
1379 iommu->vaddr_invalid_count++;
1393 vfio_notify_dma_unmap(iommu, dma);
1398 ret = update_user_bitmap(bitmap->data, iommu, dma,
1406 vfio_remove_dma(iommu, dma);
1410 mutex_unlock(&iommu->lock);
1418 static int vfio_iommu_map(struct vfio_iommu *iommu, dma_addr_t iova,
1424 list_for_each_entry(d, &iommu->domain_list, next) {
1437 list_for_each_entry_continue_reverse(d, &iommu->domain_list, next) {
1445 static int vfio_pin_map_dma(struct vfio_iommu *iommu, struct vfio_dma *dma,
1470 ret = vfio_iommu_map(iommu, iova + dma->size, pfn, npage,
1487 vfio_remove_dma(iommu, dma);
1495 static bool vfio_iommu_iova_dma_valid(struct vfio_iommu *iommu,
1498 struct list_head *iova = &iommu->iova_list;
1545 static int vfio_dma_do_map(struct vfio_iommu *iommu,
1569 mutex_lock(&iommu->lock);
1571 pgsize = (size_t)1 << __ffs(iommu->pgsize_bitmap);
1586 dma = vfio_find_dma(iommu, iova, size);
1599 iommu->vaddr_invalid_count--;
1607 if (!iommu->dma_avail) {
1612 if (!vfio_iommu_iova_dma_valid(iommu, iova, iova + size - 1)) {
1623 iommu->dma_avail--;
1646 vfio_link_dma(iommu, dma);
1649 if (list_empty(&iommu->domain_list))
1652 ret = vfio_pin_map_dma(iommu, dma, size);
1654 if (!ret && iommu->dirty_page_tracking) {
1657 vfio_remove_dma(iommu, dma);
1661 mutex_unlock(&iommu->lock);
1665 static int vfio_iommu_replay(struct vfio_iommu *iommu,
1675 if (!list_empty(&iommu->domain_list))
1676 d = list_first_entry(&iommu->domain_list,
1681 n = rb_first(&iommu->dma_list);
1759 for (n = rb_first(&iommu->dma_list); n; n = rb_next(n)) {
1867 vfio_iommu_find_iommu_group(struct vfio_iommu *iommu,
1873 list_for_each_entry(domain, &iommu->domain_list, next) {
1879 list_for_each_entry(group, &iommu->emulated_iommu_groups, next)
1938 * Check the new iommu aperture conflicts with existing aper or with any
1941 static bool vfio_iommu_aper_conflict(struct vfio_iommu *iommu,
1945 struct list_head *iova = &iommu->iova_list;
1958 if (vfio_find_dma(iommu, first->start, start - first->start))
1964 if (vfio_find_dma(iommu, end + 1, last->end - end))
1972 * Resize iommu iova aperture window. This is called only if the new
2015 static bool vfio_iommu_resv_conflict(struct vfio_iommu *iommu,
2025 if (vfio_find_dma(iommu, region->start, region->length))
2034 * exclude them from the iommu iova range
2104 static int vfio_iommu_iova_get_copy(struct vfio_iommu *iommu,
2107 struct list_head *iova = &iommu->iova_list;
2124 static void vfio_iommu_iova_insert_copy(struct vfio_iommu *iommu,
2127 struct list_head *iova = &iommu->iova_list;
2145 struct vfio_iommu *iommu = iommu_data;
2155 mutex_lock(&iommu->lock);
2158 if (iommu->vaddr_invalid_count)
2163 if (vfio_iommu_find_iommu_group(iommu, iommu_group))
2173 list_add(&group->next, &iommu->emulated_iommu_groups);
2177 * The iommu scope can only be promoted with the addition of a
2201 if (iommu->nesting) {
2213 if (vfio_iommu_aper_conflict(iommu, geo->aperture_start,
2223 if (vfio_iommu_resv_conflict(iommu, &group_resv_regions)) {
2233 ret = vfio_iommu_iova_get_copy(iommu, &iova_copy);
2276 list_for_each_entry(d, &iommu->domain_list, next) {
2299 ret = vfio_iommu_replay(iommu, domain);
2309 list_add(&domain->next, &iommu->domain_list);
2310 vfio_update_pgsize_bitmap(iommu);
2313 vfio_iommu_iova_insert_copy(iommu, &iova_copy);
2316 * An iommu backed group can dirty memory directly and therefore
2317 * demotes the iommu scope until it declares itself dirty tracking
2320 iommu->num_non_pinned_groups++;
2321 mutex_unlock(&iommu->lock);
2337 mutex_unlock(&iommu->lock);
2341 static void vfio_iommu_unmap_unpin_all(struct vfio_iommu *iommu)
2345 while ((node = rb_first(&iommu->dma_list)))
2346 vfio_remove_dma(iommu, rb_entry(node, struct vfio_dma, node));
2349 static void vfio_iommu_unmap_unpin_reaccount(struct vfio_iommu *iommu)
2353 n = rb_first(&iommu->dma_list);
2359 unlocked += vfio_unmap_unpin(iommu, dma, false);
2377 static void vfio_iommu_aper_expand(struct vfio_iommu *iommu,
2388 list_for_each_entry(domain, &iommu->domain_list, next) {
2410 static int vfio_iommu_resv_refresh(struct vfio_iommu *iommu,
2423 list_for_each_entry(d, &iommu->domain_list, next) {
2454 struct vfio_iommu *iommu = iommu_data;
2460 mutex_lock(&iommu->lock);
2461 list_for_each_entry(group, &iommu->emulated_iommu_groups, next) {
2468 if (list_empty(&iommu->emulated_iommu_groups) &&
2469 list_empty(&iommu->domain_list)) {
2470 WARN_ON(!list_empty(&iommu->device_list));
2471 vfio_iommu_unmap_unpin_all(iommu);
2481 vfio_iommu_iova_get_copy(iommu, &iova_copy);
2483 list_for_each_entry(domain, &iommu->domain_list, next) {
2495 * iommu and external domain doesn't exist, then all the
2496 * mappings go away too. If it's the last domain with iommu and
2500 if (list_is_singular(&iommu->domain_list)) {
2501 if (list_empty(&iommu->emulated_iommu_groups)) {
2503 &iommu->device_list));
2504 vfio_iommu_unmap_unpin_all(iommu);
2506 vfio_iommu_unmap_unpin_reaccount(iommu);
2512 vfio_iommu_aper_expand(iommu, &iova_copy);
2513 vfio_update_pgsize_bitmap(iommu);
2518 if (!vfio_iommu_resv_refresh(iommu, &iova_copy))
2519 vfio_iommu_iova_insert_copy(iommu, &iova_copy);
2525 * Removal of a group without dirty tracking may allow the iommu scope
2529 iommu->num_non_pinned_groups--;
2530 if (iommu->dirty_page_tracking)
2531 vfio_iommu_populate_bitmap_full(iommu);
2533 mutex_unlock(&iommu->lock);
2538 struct vfio_iommu *iommu;
2540 iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
2541 if (!iommu)
2548 iommu->nesting = true;
2551 iommu->v2 = true;
2554 kfree(iommu);
2558 INIT_LIST_HEAD(&iommu->domain_list);
2559 INIT_LIST_HEAD(&iommu->iova_list);
2560 iommu->dma_list = RB_ROOT;
2561 iommu->dma_avail = dma_entry_limit;
2562 mutex_init(&iommu->lock);
2563 mutex_init(&iommu->device_list_lock);
2564 INIT_LIST_HEAD(&iommu->device_list);
2565 iommu->pgsize_bitmap = PAGE_MASK;
2566 INIT_LIST_HEAD(&iommu->emulated_iommu_groups);
2568 return iommu;
2587 struct vfio_iommu *iommu = iommu_data;
2592 &iommu->emulated_iommu_groups, next) {
2597 vfio_iommu_unmap_unpin_all(iommu);
2600 &iommu->domain_list, next) {
2606 vfio_iommu_iova_free(&iommu->iova_list);
2608 kfree(iommu);
2611 static int vfio_domains_have_enforce_cache_coherency(struct vfio_iommu *iommu)
2616 mutex_lock(&iommu->lock);
2617 list_for_each_entry(domain, &iommu->domain_list, next) {
2623 mutex_unlock(&iommu->lock);
2628 static bool vfio_iommu_has_emulated(struct vfio_iommu *iommu)
2632 mutex_lock(&iommu->lock);
2633 ret = !list_empty(&iommu->emulated_iommu_groups);
2634 mutex_unlock(&iommu->lock);
2638 static int vfio_iommu_type1_check_extension(struct vfio_iommu *iommu,
2652 return iommu && !vfio_iommu_has_emulated(iommu);
2654 if (!iommu)
2656 return vfio_domains_have_enforce_cache_coherency(iommu);
2683 static int vfio_iommu_iova_build_caps(struct vfio_iommu *iommu,
2691 list_for_each_entry(iova, &iommu->iova_list, list)
2710 list_for_each_entry(iova, &iommu->iova_list, list) {
2722 static int vfio_iommu_migration_build_caps(struct vfio_iommu *iommu,
2732 cap_mig.pgsize_bitmap = (size_t)1 << __ffs(iommu->pgsize_bitmap);
2738 static int vfio_iommu_dma_avail_build_caps(struct vfio_iommu *iommu,
2746 cap_dma_avail.avail = iommu->dma_avail;
2752 static int vfio_iommu_type1_get_info(struct vfio_iommu *iommu,
2770 mutex_lock(&iommu->lock);
2773 info.iova_pgsizes = iommu->pgsize_bitmap;
2775 ret = vfio_iommu_migration_build_caps(iommu, &caps);
2778 ret = vfio_iommu_dma_avail_build_caps(iommu, &caps);
2781 ret = vfio_iommu_iova_build_caps(iommu, &caps);
2783 mutex_unlock(&iommu->lock);
2811 static int vfio_iommu_type1_map_dma(struct vfio_iommu *iommu,
2827 return vfio_dma_do_map(iommu, &map);
2830 static int vfio_iommu_type1_unmap_dma(struct vfio_iommu *iommu,
2875 ret = vfio_dma_do_unmap(iommu, &unmap, &bitmap);
2883 static int vfio_iommu_type1_dirty_pages(struct vfio_iommu *iommu,
2893 if (!iommu->v2)
2911 mutex_lock(&iommu->lock);
2912 pgsize = 1 << __ffs(iommu->pgsize_bitmap);
2913 if (!iommu->dirty_page_tracking) {
2914 ret = vfio_dma_bitmap_alloc_all(iommu, pgsize);
2916 iommu->dirty_page_tracking = true;
2918 mutex_unlock(&iommu->lock);
2921 mutex_lock(&iommu->lock);
2922 if (iommu->dirty_page_tracking) {
2923 iommu->dirty_page_tracking = false;
2924 vfio_dma_bitmap_free_all(iommu);
2926 mutex_unlock(&iommu->lock);
2953 mutex_lock(&iommu->lock);
2955 iommu_pgsize = (size_t)1 << __ffs(iommu->pgsize_bitmap);
2971 if (iommu->dirty_page_tracking)
2973 iommu, range.iova,
2979 mutex_unlock(&iommu->lock);
2990 struct vfio_iommu *iommu = iommu_data;
2994 return vfio_iommu_type1_check_extension(iommu, arg);
2996 return vfio_iommu_type1_get_info(iommu, arg);
2998 return vfio_iommu_type1_map_dma(iommu, arg);
3000 return vfio_iommu_type1_unmap_dma(iommu, arg);
3002 return vfio_iommu_type1_dirty_pages(iommu, arg);
3011 struct vfio_iommu *iommu = iommu_data;
3017 * list_empty(&iommu->device_list) is tested under the iommu->lock while
3022 mutex_lock(&iommu->lock);
3023 mutex_lock(&iommu->device_list_lock);
3024 list_add(&vdev->iommu_entry, &iommu->device_list);
3025 mutex_unlock(&iommu->device_list_lock);
3026 mutex_unlock(&iommu->lock);
3032 struct vfio_iommu *iommu = iommu_data;
3037 mutex_lock(&iommu->lock);
3038 mutex_lock(&iommu->device_list_lock);
3040 mutex_unlock(&iommu->device_list_lock);
3041 mutex_unlock(&iommu->lock);
3044 static int vfio_iommu_type1_dma_rw_chunk(struct vfio_iommu *iommu,
3057 dma = vfio_find_dma(iommu, user_iova, 1);
3084 if (*copied && iommu->dirty_page_tracking) {
3085 unsigned long pgshift = __ffs(iommu->pgsize_bitmap);
3107 struct vfio_iommu *iommu = iommu_data;
3111 mutex_lock(&iommu->lock);
3113 if (WARN_ONCE(iommu->vaddr_invalid_count,
3120 ret = vfio_iommu_type1_dma_rw_chunk(iommu, user_iova, data,
3131 mutex_unlock(&iommu->lock);
3140 struct vfio_iommu *iommu = iommu_data;
3143 if (!iommu || !iommu_group)
3146 mutex_lock(&iommu->lock);
3147 list_for_each_entry(d, &iommu->domain_list, next) {
3153 mutex_unlock(&iommu->lock);
3159 .name = "vfio-iommu-type1",