10Sduke// SPDX-License-Identifier: GPL-2.0-only
22362Sohair/*
30Sduke * MMU-based software IOTLB.
40Sduke *
50Sduke * Copyright (C) 2020-2021 Bytedance Inc. and/or its affiliates. All rights reserved.
60Sduke *
70Sduke * Author: Xie Yongji <xieyongji@bytedance.com>
80Sduke *
90Sduke */
100Sduke
110Sduke#include <linux/slab.h>
120Sduke#include <linux/file.h>
130Sduke#include <linux/anon_inodes.h>
140Sduke#include <linux/highmem.h>
150Sduke#include <linux/vmalloc.h>
160Sduke#include <linux/vdpa.h>
170Sduke
180Sduke#include "iova_domain.h"
192362Sohair
202362Sohairstatic int vduse_iotlb_add_range(struct vduse_iova_domain *domain,
212362Sohair				 u64 start, u64 last,
220Sduke				 u64 addr, unsigned int perm,
230Sduke				 struct file *file, u64 offset)
240Sduke{
250Sduke	struct vdpa_map_file *map_file;
2613638Spsadhukhan	int ret;
270Sduke
280Sduke	map_file = kmalloc(sizeof(*map_file), GFP_ATOMIC);
290Sduke	if (!map_file)
300Sduke		return -ENOMEM;
310Sduke
320Sduke	map_file->file = get_file(file);
330Sduke	map_file->offset = offset;
340Sduke
3513638Spsadhukhan	ret = vhost_iotlb_add_range_ctx(domain->iotlb, start, last,
3613638Spsadhukhan					addr, perm, map_file);
3713638Spsadhukhan	if (ret) {
3813638Spsadhukhan		fput(map_file->file);
3913638Spsadhukhan		kfree(map_file);
4013638Spsadhukhan		return ret;
4113638Spsadhukhan	}
4213638Spsadhukhan	return 0;
4313638Spsadhukhan}
4413638Spsadhukhan
450Sdukestatic void vduse_iotlb_del_range(struct vduse_iova_domain *domain,
460Sduke				  u64 start, u64 last)
470Sduke{
480Sduke	struct vdpa_map_file *map_file;
490Sduke	struct vhost_iotlb_map *map;
500Sduke
510Sduke	while ((map = vhost_iotlb_itree_first(domain->iotlb, start, last))) {
520Sduke		map_file = (struct vdpa_map_file *)map->opaque;
530Sduke		fput(map_file->file);
540Sduke		kfree(map_file);
550Sduke		vhost_iotlb_map_free(domain->iotlb, map);
560Sduke	}
570Sduke}
580Sduke
590Sdukeint vduse_domain_set_map(struct vduse_iova_domain *domain,
600Sduke			 struct vhost_iotlb *iotlb)
610Sduke{
620Sduke	struct vdpa_map_file *map_file;
630Sduke	struct vhost_iotlb_map *map;
640Sduke	u64 start = 0ULL, last = ULLONG_MAX;
650Sduke	int ret;
660Sduke
670Sduke	spin_lock(&domain->iotlb_lock);
680Sduke	vduse_iotlb_del_range(domain, start, last);
690Sduke
700Sduke	for (map = vhost_iotlb_itree_first(iotlb, start, last); map;
710Sduke	     map = vhost_iotlb_itree_next(map, start, last)) {
720Sduke		map_file = (struct vdpa_map_file *)map->opaque;
730Sduke		ret = vduse_iotlb_add_range(domain, map->start, map->last,
740Sduke					    map->addr, map->perm,
750Sduke					    map_file->file,
760Sduke					    map_file->offset);
770Sduke		if (ret)
780Sduke			goto err;
790Sduke	}
800Sduke	spin_unlock(&domain->iotlb_lock);
810Sduke
820Sduke	return 0;
830Sdukeerr:
840Sduke	vduse_iotlb_del_range(domain, start, last);
850Sduke	spin_unlock(&domain->iotlb_lock);
860Sduke	return ret;
870Sduke}
880Sduke
890Sdukevoid vduse_domain_clear_map(struct vduse_iova_domain *domain,
900Sduke			    struct vhost_iotlb *iotlb)
910Sduke{
920Sduke	struct vhost_iotlb_map *map;
930Sduke	u64 start = 0ULL, last = ULLONG_MAX;
940Sduke
950Sduke	spin_lock(&domain->iotlb_lock);
960Sduke	for (map = vhost_iotlb_itree_first(iotlb, start, last); map;
970Sduke	     map = vhost_iotlb_itree_next(map, start, last)) {
980Sduke		vduse_iotlb_del_range(domain, map->start, map->last);
990Sduke	}
1000Sduke	spin_unlock(&domain->iotlb_lock);
1010Sduke}
1020Sduke
1030Sdukestatic int vduse_domain_map_bounce_page(struct vduse_iova_domain *domain,
1040Sduke					 u64 iova, u64 size, u64 paddr)
1050Sduke{
1060Sduke	struct vduse_bounce_map *map;
1070Sduke	u64 last = iova + size - 1;
1080Sduke
1090Sduke	while (iova <= last) {
1100Sduke		map = &domain->bounce_maps[iova >> PAGE_SHIFT];
1110Sduke		if (!map->bounce_page) {
1120Sduke			map->bounce_page = alloc_page(GFP_ATOMIC);
1130Sduke			if (!map->bounce_page)
1140Sduke				return -ENOMEM;
1150Sduke		}
1160Sduke		map->orig_phys = paddr;
1170Sduke		paddr += PAGE_SIZE;
1180Sduke		iova += PAGE_SIZE;
1190Sduke	}
1200Sduke	return 0;
1210Sduke}
1220Sduke
1230Sdukestatic void vduse_domain_unmap_bounce_page(struct vduse_iova_domain *domain,
1240Sduke					   u64 iova, u64 size)
1250Sduke{
1260Sduke	struct vduse_bounce_map *map;
1270Sduke	u64 last = iova + size - 1;
1280Sduke
1290Sduke	while (iova <= last) {
1300Sduke		map = &domain->bounce_maps[iova >> PAGE_SHIFT];
1310Sduke		map->orig_phys = INVALID_PHYS_ADDR;
1320Sduke		iova += PAGE_SIZE;
1330Sduke	}
1340Sduke}
1350Sduke
1360Sdukestatic void do_bounce(phys_addr_t orig, void *addr, size_t size,
1370Sduke		      enum dma_data_direction dir)
1380Sduke{
1390Sduke	unsigned long pfn = PFN_DOWN(orig);
1400Sduke	unsigned int offset = offset_in_page(orig);
1410Sduke	struct page *page;
1420Sduke	unsigned int sz = 0;
1430Sduke
1440Sduke	while (size) {
1450Sduke		sz = min_t(size_t, PAGE_SIZE - offset, size);
1460Sduke
1470Sduke		page = pfn_to_page(pfn);
1480Sduke		if (dir == DMA_TO_DEVICE)
1490Sduke			memcpy_from_page(addr, page, offset, sz);
1500Sduke		else
1510Sduke			memcpy_to_page(page, offset, addr, sz);
15213638Spsadhukhan
15313638Spsadhukhan		size -= sz;
15413638Spsadhukhan		pfn++;
15513638Spsadhukhan		addr += sz;
15613638Spsadhukhan		offset = 0;
15713638Spsadhukhan	}
1580Sduke}
1590Sduke
1600Sdukestatic void vduse_domain_bounce(struct vduse_iova_domain *domain,
1610Sduke				dma_addr_t iova, size_t size,
1620Sduke				enum dma_data_direction dir)
1630Sduke{
1640Sduke	struct vduse_bounce_map *map;
1650Sduke	unsigned int offset;
1660Sduke	void *addr;
1670Sduke	size_t sz;
1680Sduke
1690Sduke	if (iova >= domain->bounce_size)
1700Sduke		return;
1710Sduke
1720Sduke	while (size) {
17313638Spsadhukhan		map = &domain->bounce_maps[iova >> PAGE_SHIFT];
1740Sduke		offset = offset_in_page(iova);
17513638Spsadhukhan		sz = min_t(size_t, PAGE_SIZE - offset, size);
17613638Spsadhukhan
1770Sduke		if (WARN_ON(!map->bounce_page ||
1780Sduke			    map->orig_phys == INVALID_PHYS_ADDR))
1790Sduke			return;
1800Sduke
1810Sduke		addr = kmap_local_page(map->bounce_page);
1820Sduke		do_bounce(map->orig_phys + offset, addr + offset, sz, dir);
1830Sduke		kunmap_local(addr);
1840Sduke		size -= sz;
1850Sduke		iova += sz;
1860Sduke	}
1870Sduke}
1880Sduke
1890Sdukestatic struct page *
1900Sdukevduse_domain_get_coherent_page(struct vduse_iova_domain *domain, u64 iova)
1910Sduke{
1920Sduke	u64 start = iova & PAGE_MASK;
1930Sduke	u64 last = start + PAGE_SIZE - 1;
1940Sduke	struct vhost_iotlb_map *map;
1950Sduke	struct page *page = NULL;
1960Sduke
1970Sduke	spin_lock(&domain->iotlb_lock);
1980Sduke	map = vhost_iotlb_itree_first(domain->iotlb, start, last);
1990Sduke	if (!map)
2000Sduke		goto out;
2010Sduke
2020Sduke	page = pfn_to_page((map->addr + iova - map->start) >> PAGE_SHIFT);
2030Sduke	get_page(page);
2040Sdukeout:
2050Sduke	spin_unlock(&domain->iotlb_lock);
2060Sduke
2070Sduke	return page;
2080Sduke}
2090Sduke
2100Sdukestatic struct page *
2110Sdukevduse_domain_get_bounce_page(struct vduse_iova_domain *domain, u64 iova)
2120Sduke{
2130Sduke	struct vduse_bounce_map *map;
2140Sduke	struct page *page = NULL;
2150Sduke
2160Sduke	read_lock(&domain->bounce_lock);
2170Sduke	map = &domain->bounce_maps[iova >> PAGE_SHIFT];
2180Sduke	if (domain->user_bounce_pages || !map->bounce_page)
2190Sduke		goto out;
2200Sduke
2210Sduke	page = map->bounce_page;
2220Sduke	get_page(page);
2230Sdukeout:
2240Sduke	read_unlock(&domain->bounce_lock);
2250Sduke
2260Sduke	return page;
2270Sduke}
2280Sduke
2290Sdukestatic void
2300Sdukevduse_domain_free_kernel_bounce_pages(struct vduse_iova_domain *domain)
2310Sduke{
2320Sduke	struct vduse_bounce_map *map;
2330Sduke	unsigned long pfn, bounce_pfns;
2340Sduke
2350Sduke	bounce_pfns = domain->bounce_size >> PAGE_SHIFT;
2360Sduke
2370Sduke	for (pfn = 0; pfn < bounce_pfns; pfn++) {
2380Sduke		map = &domain->bounce_maps[pfn];
2390Sduke		if (WARN_ON(map->orig_phys != INVALID_PHYS_ADDR))
2400Sduke			continue;
2410Sduke
2420Sduke		if (!map->bounce_page)
2430Sduke			continue;
2440Sduke
2450Sduke		__free_page(map->bounce_page);
2460Sduke		map->bounce_page = NULL;
2470Sduke	}
2480Sduke}
2490Sduke
2500Sdukeint vduse_domain_add_user_bounce_pages(struct vduse_iova_domain *domain,
2510Sduke				       struct page **pages, int count)
2520Sduke{
2530Sduke	struct vduse_bounce_map *map;
2540Sduke	int i, ret;
2550Sduke
2560Sduke	/* Now we don't support partial mapping */
2570Sduke	if (count != (domain->bounce_size >> PAGE_SHIFT))
2580Sduke		return -EINVAL;
2590Sduke
260	write_lock(&domain->bounce_lock);
261	ret = -EEXIST;
262	if (domain->user_bounce_pages)
263		goto out;
264
265	for (i = 0; i < count; i++) {
266		map = &domain->bounce_maps[i];
267		if (map->bounce_page) {
268			/* Copy kernel page to user page if it's in use */
269			if (map->orig_phys != INVALID_PHYS_ADDR)
270				memcpy_to_page(pages[i], 0,
271					       page_address(map->bounce_page),
272					       PAGE_SIZE);
273			__free_page(map->bounce_page);
274		}
275		map->bounce_page = pages[i];
276		get_page(pages[i]);
277	}
278	domain->user_bounce_pages = true;
279	ret = 0;
280out:
281	write_unlock(&domain->bounce_lock);
282
283	return ret;
284}
285
286void vduse_domain_remove_user_bounce_pages(struct vduse_iova_domain *domain)
287{
288	struct vduse_bounce_map *map;
289	unsigned long i, count;
290
291	write_lock(&domain->bounce_lock);
292	if (!domain->user_bounce_pages)
293		goto out;
294
295	count = domain->bounce_size >> PAGE_SHIFT;
296	for (i = 0; i < count; i++) {
297		struct page *page = NULL;
298
299		map = &domain->bounce_maps[i];
300		if (WARN_ON(!map->bounce_page))
301			continue;
302
303		/* Copy user page to kernel page if it's in use */
304		if (map->orig_phys != INVALID_PHYS_ADDR) {
305			page = alloc_page(GFP_ATOMIC | __GFP_NOFAIL);
306			memcpy_from_page(page_address(page),
307					 map->bounce_page, 0, PAGE_SIZE);
308		}
309		put_page(map->bounce_page);
310		map->bounce_page = page;
311	}
312	domain->user_bounce_pages = false;
313out:
314	write_unlock(&domain->bounce_lock);
315}
316
317void vduse_domain_reset_bounce_map(struct vduse_iova_domain *domain)
318{
319	if (!domain->bounce_map)
320		return;
321
322	spin_lock(&domain->iotlb_lock);
323	if (!domain->bounce_map)
324		goto unlock;
325
326	vduse_iotlb_del_range(domain, 0, domain->bounce_size - 1);
327	domain->bounce_map = 0;
328unlock:
329	spin_unlock(&domain->iotlb_lock);
330}
331
332static int vduse_domain_init_bounce_map(struct vduse_iova_domain *domain)
333{
334	int ret = 0;
335
336	if (domain->bounce_map)
337		return 0;
338
339	spin_lock(&domain->iotlb_lock);
340	if (domain->bounce_map)
341		goto unlock;
342
343	ret = vduse_iotlb_add_range(domain, 0, domain->bounce_size - 1,
344				    0, VHOST_MAP_RW, domain->file, 0);
345	if (ret)
346		goto unlock;
347
348	domain->bounce_map = 1;
349unlock:
350	spin_unlock(&domain->iotlb_lock);
351	return ret;
352}
353
354static dma_addr_t
355vduse_domain_alloc_iova(struct iova_domain *iovad,
356			unsigned long size, unsigned long limit)
357{
358	unsigned long shift = iova_shift(iovad);
359	unsigned long iova_len = iova_align(iovad, size) >> shift;
360	unsigned long iova_pfn;
361
362	iova_pfn = alloc_iova_fast(iovad, iova_len, limit >> shift, true);
363
364	return (dma_addr_t)iova_pfn << shift;
365}
366
367static void vduse_domain_free_iova(struct iova_domain *iovad,
368				   dma_addr_t iova, size_t size)
369{
370	unsigned long shift = iova_shift(iovad);
371	unsigned long iova_len = iova_align(iovad, size) >> shift;
372
373	free_iova_fast(iovad, iova >> shift, iova_len);
374}
375
376void vduse_domain_sync_single_for_device(struct vduse_iova_domain *domain,
377				      dma_addr_t dma_addr, size_t size,
378				      enum dma_data_direction dir)
379{
380	read_lock(&domain->bounce_lock);
381	if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)
382		vduse_domain_bounce(domain, dma_addr, size, DMA_TO_DEVICE);
383	read_unlock(&domain->bounce_lock);
384}
385
386void vduse_domain_sync_single_for_cpu(struct vduse_iova_domain *domain,
387				      dma_addr_t dma_addr, size_t size,
388				      enum dma_data_direction dir)
389{
390	read_lock(&domain->bounce_lock);
391	if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)
392		vduse_domain_bounce(domain, dma_addr, size, DMA_FROM_DEVICE);
393	read_unlock(&domain->bounce_lock);
394}
395
396dma_addr_t vduse_domain_map_page(struct vduse_iova_domain *domain,
397				 struct page *page, unsigned long offset,
398				 size_t size, enum dma_data_direction dir,
399				 unsigned long attrs)
400{
401	struct iova_domain *iovad = &domain->stream_iovad;
402	unsigned long limit = domain->bounce_size - 1;
403	phys_addr_t pa = page_to_phys(page) + offset;
404	dma_addr_t iova = vduse_domain_alloc_iova(iovad, size, limit);
405
406	if (!iova)
407		return DMA_MAPPING_ERROR;
408
409	if (vduse_domain_init_bounce_map(domain))
410		goto err;
411
412	read_lock(&domain->bounce_lock);
413	if (vduse_domain_map_bounce_page(domain, (u64)iova, (u64)size, pa))
414		goto err_unlock;
415
416	if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
417	    (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL))
418		vduse_domain_bounce(domain, iova, size, DMA_TO_DEVICE);
419
420	read_unlock(&domain->bounce_lock);
421
422	return iova;
423err_unlock:
424	read_unlock(&domain->bounce_lock);
425err:
426	vduse_domain_free_iova(iovad, iova, size);
427	return DMA_MAPPING_ERROR;
428}
429
430void vduse_domain_unmap_page(struct vduse_iova_domain *domain,
431			     dma_addr_t dma_addr, size_t size,
432			     enum dma_data_direction dir, unsigned long attrs)
433{
434	struct iova_domain *iovad = &domain->stream_iovad;
435	read_lock(&domain->bounce_lock);
436	if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
437	    (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL))
438		vduse_domain_bounce(domain, dma_addr, size, DMA_FROM_DEVICE);
439
440	vduse_domain_unmap_bounce_page(domain, (u64)dma_addr, (u64)size);
441	read_unlock(&domain->bounce_lock);
442	vduse_domain_free_iova(iovad, dma_addr, size);
443}
444
445void *vduse_domain_alloc_coherent(struct vduse_iova_domain *domain,
446				  size_t size, dma_addr_t *dma_addr,
447				  gfp_t flag, unsigned long attrs)
448{
449	struct iova_domain *iovad = &domain->consistent_iovad;
450	unsigned long limit = domain->iova_limit;
451	dma_addr_t iova = vduse_domain_alloc_iova(iovad, size, limit);
452	void *orig = alloc_pages_exact(size, flag);
453
454	if (!iova || !orig)
455		goto err;
456
457	spin_lock(&domain->iotlb_lock);
458	if (vduse_iotlb_add_range(domain, (u64)iova, (u64)iova + size - 1,
459				  virt_to_phys(orig), VHOST_MAP_RW,
460				  domain->file, (u64)iova)) {
461		spin_unlock(&domain->iotlb_lock);
462		goto err;
463	}
464	spin_unlock(&domain->iotlb_lock);
465
466	*dma_addr = iova;
467
468	return orig;
469err:
470	*dma_addr = DMA_MAPPING_ERROR;
471	if (orig)
472		free_pages_exact(orig, size);
473	if (iova)
474		vduse_domain_free_iova(iovad, iova, size);
475
476	return NULL;
477}
478
479void vduse_domain_free_coherent(struct vduse_iova_domain *domain, size_t size,
480				void *vaddr, dma_addr_t dma_addr,
481				unsigned long attrs)
482{
483	struct iova_domain *iovad = &domain->consistent_iovad;
484	struct vhost_iotlb_map *map;
485	struct vdpa_map_file *map_file;
486	phys_addr_t pa;
487
488	spin_lock(&domain->iotlb_lock);
489	map = vhost_iotlb_itree_first(domain->iotlb, (u64)dma_addr,
490				      (u64)dma_addr + size - 1);
491	if (WARN_ON(!map)) {
492		spin_unlock(&domain->iotlb_lock);
493		return;
494	}
495	map_file = (struct vdpa_map_file *)map->opaque;
496	fput(map_file->file);
497	kfree(map_file);
498	pa = map->addr;
499	vhost_iotlb_map_free(domain->iotlb, map);
500	spin_unlock(&domain->iotlb_lock);
501
502	vduse_domain_free_iova(iovad, dma_addr, size);
503	free_pages_exact(phys_to_virt(pa), size);
504}
505
506static vm_fault_t vduse_domain_mmap_fault(struct vm_fault *vmf)
507{
508	struct vduse_iova_domain *domain = vmf->vma->vm_private_data;
509	unsigned long iova = vmf->pgoff << PAGE_SHIFT;
510	struct page *page;
511
512	if (!domain)
513		return VM_FAULT_SIGBUS;
514
515	if (iova < domain->bounce_size)
516		page = vduse_domain_get_bounce_page(domain, iova);
517	else
518		page = vduse_domain_get_coherent_page(domain, iova);
519
520	if (!page)
521		return VM_FAULT_SIGBUS;
522
523	vmf->page = page;
524
525	return 0;
526}
527
528static const struct vm_operations_struct vduse_domain_mmap_ops = {
529	.fault = vduse_domain_mmap_fault,
530};
531
532static int vduse_domain_mmap(struct file *file, struct vm_area_struct *vma)
533{
534	struct vduse_iova_domain *domain = file->private_data;
535
536	vm_flags_set(vma, VM_DONTDUMP | VM_DONTEXPAND);
537	vma->vm_private_data = domain;
538	vma->vm_ops = &vduse_domain_mmap_ops;
539
540	return 0;
541}
542
543static int vduse_domain_release(struct inode *inode, struct file *file)
544{
545	struct vduse_iova_domain *domain = file->private_data;
546
547	spin_lock(&domain->iotlb_lock);
548	vduse_iotlb_del_range(domain, 0, ULLONG_MAX);
549	vduse_domain_remove_user_bounce_pages(domain);
550	vduse_domain_free_kernel_bounce_pages(domain);
551	spin_unlock(&domain->iotlb_lock);
552	put_iova_domain(&domain->stream_iovad);
553	put_iova_domain(&domain->consistent_iovad);
554	vhost_iotlb_free(domain->iotlb);
555	vfree(domain->bounce_maps);
556	kfree(domain);
557
558	return 0;
559}
560
561static const struct file_operations vduse_domain_fops = {
562	.owner = THIS_MODULE,
563	.mmap = vduse_domain_mmap,
564	.release = vduse_domain_release,
565};
566
567void vduse_domain_destroy(struct vduse_iova_domain *domain)
568{
569	fput(domain->file);
570}
571
572struct vduse_iova_domain *
573vduse_domain_create(unsigned long iova_limit, size_t bounce_size)
574{
575	struct vduse_iova_domain *domain;
576	struct file *file;
577	struct vduse_bounce_map *map;
578	unsigned long pfn, bounce_pfns;
579	int ret;
580
581	bounce_pfns = PAGE_ALIGN(bounce_size) >> PAGE_SHIFT;
582	if (iova_limit <= bounce_size)
583		return NULL;
584
585	domain = kzalloc(sizeof(*domain), GFP_KERNEL);
586	if (!domain)
587		return NULL;
588
589	domain->iotlb = vhost_iotlb_alloc(0, 0);
590	if (!domain->iotlb)
591		goto err_iotlb;
592
593	domain->iova_limit = iova_limit;
594	domain->bounce_size = PAGE_ALIGN(bounce_size);
595	domain->bounce_maps = vzalloc(bounce_pfns *
596				sizeof(struct vduse_bounce_map));
597	if (!domain->bounce_maps)
598		goto err_map;
599
600	for (pfn = 0; pfn < bounce_pfns; pfn++) {
601		map = &domain->bounce_maps[pfn];
602		map->orig_phys = INVALID_PHYS_ADDR;
603	}
604	file = anon_inode_getfile("[vduse-domain]", &vduse_domain_fops,
605				domain, O_RDWR);
606	if (IS_ERR(file))
607		goto err_file;
608
609	domain->file = file;
610	rwlock_init(&domain->bounce_lock);
611	spin_lock_init(&domain->iotlb_lock);
612	init_iova_domain(&domain->stream_iovad,
613			PAGE_SIZE, IOVA_START_PFN);
614	ret = iova_domain_init_rcaches(&domain->stream_iovad);
615	if (ret)
616		goto err_iovad_stream;
617	init_iova_domain(&domain->consistent_iovad,
618			PAGE_SIZE, bounce_pfns);
619	ret = iova_domain_init_rcaches(&domain->consistent_iovad);
620	if (ret)
621		goto err_iovad_consistent;
622
623	return domain;
624err_iovad_consistent:
625	put_iova_domain(&domain->stream_iovad);
626err_iovad_stream:
627	fput(file);
628err_file:
629	vfree(domain->bounce_maps);
630err_map:
631	vhost_iotlb_free(domain->iotlb);
632err_iotlb:
633	kfree(domain);
634	return NULL;
635}
636
637int vduse_domain_init(void)
638{
639	return iova_cache_get();
640}
641
642void vduse_domain_exit(void)
643{
644	iova_cache_put();
645}
646