1/*-
2 * Copyright (c) 2016 Akshay Jaggi <jaggi@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * gntdev.c
27 *
28 * Interface to /dev/xen/gntdev.
29 *
30 */
31
32#include <sys/cdefs.h>
33__FBSDID("$FreeBSD$");
34
35#include <sys/param.h>
36#include <sys/systm.h>
37#include <sys/uio.h>
38#include <sys/bus.h>
39#include <sys/malloc.h>
40#include <sys/kernel.h>
41#include <sys/lock.h>
42#include <sys/mutex.h>
43#include <sys/rwlock.h>
44#include <sys/selinfo.h>
45#include <sys/poll.h>
46#include <sys/conf.h>
47#include <sys/fcntl.h>
48#include <sys/ioccom.h>
49#include <sys/rman.h>
50#include <sys/tree.h>
51#include <sys/module.h>
52#include <sys/proc.h>
53#include <sys/bitset.h>
54#include <sys/queue.h>
55#include <sys/mman.h>
56#include <sys/syslog.h>
57#include <sys/taskqueue.h>
58
59#include <vm/vm.h>
60#include <vm/vm_param.h>
61#include <vm/vm_extern.h>
62#include <vm/vm_kern.h>
63#include <vm/vm_page.h>
64#include <vm/vm_map.h>
65#include <vm/vm_object.h>
66#include <vm/vm_pager.h>
67
68#include <machine/md_var.h>
69
70#include <xen/xen-os.h>
71#include <xen/hypervisor.h>
72#include <xen/error.h>
73#include <xen/xen_intr.h>
74#include <xen/gnttab.h>
75#include <xen/gntdev.h>
76
77MALLOC_DEFINE(M_GNTDEV, "gntdev", "Xen grant-table user-space device");
78
79#define MAX_OFFSET_COUNT ((0xffffffffffffffffull >> PAGE_SHIFT) + 1)
80
81static d_open_t gntdev_open;
82static d_ioctl_t gntdev_ioctl;
83static d_mmap_single_t gntdev_mmap_single;
84
85static struct cdevsw gntdev_devsw = {
86	.d_version = D_VERSION,
87	.d_open = gntdev_open,
88	.d_ioctl = gntdev_ioctl,
89	.d_mmap_single = gntdev_mmap_single,
90	.d_name = "gntdev",
91};
92
93static device_t gntdev_dev = NULL;
94
95struct gntdev_gref;
96struct gntdev_gmap;
97STAILQ_HEAD(gref_list_head, gntdev_gref);
98STAILQ_HEAD(gmap_list_head, gntdev_gmap);
99RB_HEAD(gref_tree_head, gntdev_gref);
100RB_HEAD(gmap_tree_head, gntdev_gmap);
101
102struct file_offset_struct {
103	RB_ENTRY(file_offset_struct)	next;
104	uint64_t			file_offset;
105	uint64_t			count;
106};
107
108static int
109offset_cmp(struct file_offset_struct *f1, struct file_offset_struct *f2)
110{
111	return (f1->file_offset - f2->file_offset);
112}
113
114RB_HEAD(file_offset_head, file_offset_struct);
115RB_GENERATE_STATIC(file_offset_head, file_offset_struct, next, offset_cmp);
116
117struct per_user_data {
118	struct mtx		user_data_lock;
119	struct gref_tree_head	gref_tree;
120	struct gmap_tree_head	gmap_tree;
121	struct file_offset_head	file_offset;
122};
123
124/*
125 * Get offset into the file which will be used while mmapping the
126 * appropriate pages by the userspace program.
127 */
128static int
129get_file_offset(struct per_user_data *priv_user, uint32_t count,
130    uint64_t *file_offset)
131{
132	struct file_offset_struct *offset, *offset_tmp;
133
134	if (count == 0)
135		return (EINVAL);
136	mtx_lock(&priv_user->user_data_lock);
137	RB_FOREACH_SAFE(offset, file_offset_head, &priv_user->file_offset,
138	    offset_tmp) {
139		if (offset->count >= count) {
140			offset->count -= count;
141			*file_offset = offset->file_offset + offset->count *
142			    PAGE_SIZE;
143			if (offset->count == 0) {
144				RB_REMOVE(file_offset_head,
145				    &priv_user->file_offset, offset);
146				free(offset, M_GNTDEV);
147			}
148			mtx_unlock(&priv_user->user_data_lock);
149			return (0);
150		}
151	}
152	mtx_unlock(&priv_user->user_data_lock);
153
154	return (ENOSPC);
155}
156
157static void
158put_file_offset(struct per_user_data *priv_user, uint32_t count,
159    uint64_t file_offset)
160{
161	struct file_offset_struct *offset, *offset_nxt, *offset_prv;
162
163	offset = malloc(sizeof(*offset), M_GNTDEV, M_WAITOK | M_ZERO);
164	offset->file_offset = file_offset;
165	offset->count = count;
166
167	mtx_lock(&priv_user->user_data_lock);
168	RB_INSERT(file_offset_head, &priv_user->file_offset, offset);
169	offset_nxt = RB_NEXT(file_offset_head, &priv_user->file_offset, offset);
170	offset_prv = RB_PREV(file_offset_head, &priv_user->file_offset, offset);
171	if (offset_nxt != NULL &&
172	    offset_nxt->file_offset == offset->file_offset + offset->count *
173	    PAGE_SIZE) {
174		offset->count += offset_nxt->count;
175		RB_REMOVE(file_offset_head, &priv_user->file_offset,
176		    offset_nxt);
177		free(offset_nxt, M_GNTDEV);
178	}
179	if (offset_prv != NULL &&
180	    offset->file_offset == offset_prv->file_offset + offset_prv->count *
181	    PAGE_SIZE) {
182		offset_prv->count += offset->count;
183		RB_REMOVE(file_offset_head, &priv_user->file_offset, offset);
184		free(offset, M_GNTDEV);
185	}
186	mtx_unlock(&priv_user->user_data_lock);
187}
188
189static int	gntdev_gmap_pg_ctor(void *handle, vm_ooffset_t size,
190    vm_prot_t prot, vm_ooffset_t foff, struct ucred *cred, u_short *color);
191static void	gntdev_gmap_pg_dtor(void *handle);
192static int	gntdev_gmap_pg_fault(vm_object_t object, vm_ooffset_t offset,
193    int prot, vm_page_t *mres);
194
195static struct cdev_pager_ops gntdev_gmap_pg_ops = {
196	.cdev_pg_fault = gntdev_gmap_pg_fault,
197	.cdev_pg_ctor =	gntdev_gmap_pg_ctor,
198	.cdev_pg_dtor =	gntdev_gmap_pg_dtor,
199};
200
201struct cleanup_data_struct {
202	struct mtx to_kill_grefs_mtx;
203	struct mtx to_kill_gmaps_mtx;
204	struct gref_list_head to_kill_grefs;
205	struct gmap_list_head to_kill_gmaps;
206};
207
208static struct cleanup_data_struct cleanup_data = {
209	.to_kill_grefs = STAILQ_HEAD_INITIALIZER(cleanup_data.to_kill_grefs),
210	.to_kill_gmaps = STAILQ_HEAD_INITIALIZER(cleanup_data.to_kill_gmaps),
211};
212MTX_SYSINIT(to_kill_grefs_mtx, &cleanup_data.to_kill_grefs_mtx,
213    "gntdev to_kill_grefs mutex", MTX_DEF);
214MTX_SYSINIT(to_kill_gmaps_mtx, &cleanup_data.to_kill_gmaps_mtx,
215    "gntdev to_kill_gmaps mutex", MTX_DEF);
216
217static void	cleanup_function(void *arg, __unused int pending);
218static struct task cleanup_task = TASK_INITIALIZER(0, cleanup_function,
219    &cleanup_data);
220
221struct notify_data {
222	uint64_t		index;
223	uint32_t		action;
224	uint32_t		event_channel_port;
225	xen_intr_handle_t	notify_evtchn_handle;
226};
227
228static void	notify(struct notify_data *notify, vm_page_t page);
229
230/*-------------------- Grant Allocation Methods  -----------------------------*/
231
232struct gntdev_gref {
233	union gref_next_union {
234		STAILQ_ENTRY(gntdev_gref) 		list;
235		RB_ENTRY(gntdev_gref)	 		tree;
236	}			gref_next;
237	uint64_t		file_index;
238	grant_ref_t		gref_id;
239	vm_page_t		page;
240	struct notify_data	*notify;
241};
242
243static int
244gref_cmp(struct gntdev_gref *g1, struct gntdev_gref *g2)
245{
246	return (g1->file_index - g2->file_index);
247}
248
249RB_GENERATE_STATIC(gref_tree_head, gntdev_gref, gref_next.tree, gref_cmp);
250
251/*
252 * Traverse over the device-list of to-be-deleted grants allocated, and
253 * if all accesses, both local mmaps and foreign maps, to them have ended,
254 * destroy them.
255 */
256static void
257gref_list_dtor(struct cleanup_data_struct *cleanup_data)
258{
259	struct gref_list_head tmp_grefs;
260	struct gntdev_gref *gref, *gref_tmp, *gref_previous;
261
262	STAILQ_INIT(&tmp_grefs);
263	mtx_lock(&cleanup_data->to_kill_grefs_mtx);
264	STAILQ_SWAP(&cleanup_data->to_kill_grefs, &tmp_grefs, gntdev_gref);
265	mtx_unlock(&cleanup_data->to_kill_grefs_mtx);
266
267	gref_previous = NULL;
268	STAILQ_FOREACH_SAFE(gref, &tmp_grefs, gref_next.list, gref_tmp) {
269		if (gref->page && gref->page->object == NULL) {
270			if (gref->notify) {
271				notify(gref->notify, gref->page);
272			}
273			if (gref->gref_id != GRANT_REF_INVALID) {
274				if (gnttab_query_foreign_access(gref->gref_id))
275					continue;
276				if (gnttab_end_foreign_access_ref(gref->gref_id)
277				    == 0)
278					continue;
279				gnttab_free_grant_reference(gref->gref_id);
280			}
281			vm_page_unwire_noq(gref->page);
282			vm_page_free(gref->page);
283			gref->page = NULL;
284		}
285		if (gref->page == NULL) {
286			if (gref_previous == NULL)
287				STAILQ_REMOVE_HEAD(&tmp_grefs, gref_next.list);
288			else
289				STAILQ_REMOVE_AFTER(&tmp_grefs, gref_previous,
290				    gref_next.list);
291			if (gref->notify)
292				free(gref->notify, M_GNTDEV);
293			free(gref, M_GNTDEV);
294		}
295		else
296			gref_previous = gref;
297	}
298
299	if (!STAILQ_EMPTY(&tmp_grefs)) {
300		mtx_lock(&cleanup_data->to_kill_grefs_mtx);
301		STAILQ_CONCAT(&cleanup_data->to_kill_grefs, &tmp_grefs);
302		mtx_unlock(&cleanup_data->to_kill_grefs_mtx);
303	}
304}
305
306/*
307 * Find count number of contiguous allocated grants for a given userspace
308 * program by file-offset (index).
309 */
310static struct gntdev_gref*
311gntdev_find_grefs(struct per_user_data *priv_user,
312	uint64_t index, uint32_t count)
313{
314	struct gntdev_gref find_gref, *gref, *gref_start = NULL;
315
316	find_gref.file_index = index;
317
318	mtx_lock(&priv_user->user_data_lock);
319	gref_start = RB_FIND(gref_tree_head, &priv_user->gref_tree, &find_gref);
320	for (gref = gref_start; gref != NULL && count > 0; gref =
321	    RB_NEXT(gref_tree_head, &priv_user->gref_tree, gref)) {
322		if (index != gref->file_index)
323			break;
324		index += PAGE_SIZE;
325		count--;
326	}
327	mtx_unlock(&priv_user->user_data_lock);
328
329	if (count)
330		return (NULL);
331	return (gref_start);
332}
333
334/*
335 * IOCTL_GNTDEV_ALLOC_GREF
336 * Allocate required number of wired pages for the request, grant foreign
337 * access to the physical frames for these pages, and add details about
338 * this allocation to the per user private data, so that these pages can
339 * be mmapped by the userspace program.
340 */
341static int
342gntdev_alloc_gref(struct ioctl_gntdev_alloc_gref *arg)
343{
344	uint32_t i;
345	int error, readonly;
346	uint64_t file_offset;
347	struct gntdev_gref *grefs;
348	struct per_user_data *priv_user;
349
350	readonly = !(arg->flags & GNTDEV_ALLOC_FLAG_WRITABLE);
351
352	error = devfs_get_cdevpriv((void**) &priv_user);
353	if (error != 0)
354		return (EINVAL);
355
356	/* Cleanup grefs and free pages. */
357	taskqueue_enqueue(taskqueue_thread, &cleanup_task);
358
359	/* Get file offset for this request. */
360	error = get_file_offset(priv_user, arg->count, &file_offset);
361	if (error != 0)
362		return (error);
363
364	/* Allocate grefs. */
365	grefs = malloc(sizeof(*grefs) * arg->count, M_GNTDEV, M_WAITOK);
366
367	for (i = 0; i < arg->count; i++) {
368		grefs[i].file_index = file_offset + i * PAGE_SIZE;
369		grefs[i].gref_id = GRANT_REF_INVALID;
370		grefs[i].notify = NULL;
371		grefs[i].page = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL
372			| VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | VM_ALLOC_ZERO);
373		if (grefs[i].page == NULL) {
374			log(LOG_ERR, "Page allocation failed.");
375			error = ENOMEM;
376			break;
377		}
378		if ((grefs[i].page->flags & PG_ZERO) == 0) {
379			/*
380			 * Zero the allocated page, as we don't want to
381			 * leak our memory to other domains.
382			 */
383			pmap_zero_page(grefs[i].page);
384		}
385		grefs[i].page->valid = VM_PAGE_BITS_ALL;
386
387		error = gnttab_grant_foreign_access(arg->domid,
388			(VM_PAGE_TO_PHYS(grefs[i].page) >> PAGE_SHIFT),
389			readonly, &grefs[i].gref_id);
390		if (error != 0) {
391			log(LOG_ERR, "Grant Table Hypercall failed.");
392			break;
393		}
394	}
395
396	if (error != 0) {
397		/*
398		 * If target domain maps the gref (by guessing the gref-id),
399		 * then we can't clean it up yet and we have to leave the
400		 * page in place so as to not leak our memory to that domain.
401		 * Add it to a global list to be cleaned up later.
402		 */
403		mtx_lock(&cleanup_data.to_kill_grefs_mtx);
404		for (i = 0; i < arg->count; i++)
405			STAILQ_INSERT_TAIL(&cleanup_data.to_kill_grefs,
406			    &grefs[i], gref_next.list);
407		mtx_unlock(&cleanup_data.to_kill_grefs_mtx);
408
409		taskqueue_enqueue(taskqueue_thread, &cleanup_task);
410
411		return (error);
412	}
413
414	/* Copy the output values. */
415	arg->index = file_offset;
416	for (i = 0; i < arg->count; i++)
417		suword32(&arg->gref_ids[i], grefs[i].gref_id);
418
419	/* Modify the per user private data. */
420	mtx_lock(&priv_user->user_data_lock);
421	for (i = 0; i < arg->count; i++)
422		RB_INSERT(gref_tree_head, &priv_user->gref_tree, &grefs[i]);
423	mtx_unlock(&priv_user->user_data_lock);
424
425	return (error);
426}
427
428/*
429 * IOCTL_GNTDEV_DEALLOC_GREF
430 * Remove grant allocation information from the per user private data, so
431 * that it can't be mmapped anymore by the userspace program, and add it
432 * to the to-be-deleted grants global device-list.
433 */
434static int
435gntdev_dealloc_gref(struct ioctl_gntdev_dealloc_gref *arg)
436{
437	int error;
438	uint32_t count;
439	struct gntdev_gref *gref, *gref_tmp;
440	struct per_user_data *priv_user;
441
442	error = devfs_get_cdevpriv((void**) &priv_user);
443	if (error != 0)
444		return (EINVAL);
445
446	gref = gntdev_find_grefs(priv_user, arg->index, arg->count);
447	if (gref == NULL) {
448		log(LOG_ERR, "Can't find requested grant-refs.");
449		return (EINVAL);
450	}
451
452	/* Remove the grefs from user private data. */
453	count = arg->count;
454	mtx_lock(&priv_user->user_data_lock);
455	mtx_lock(&cleanup_data.to_kill_grefs_mtx);
456	for (; gref != NULL && count > 0; gref = gref_tmp) {
457		gref_tmp = RB_NEXT(gref_tree_head, &priv_user->gref_tree, gref);
458		RB_REMOVE(gref_tree_head, &priv_user->gref_tree, gref);
459		STAILQ_INSERT_TAIL(&cleanup_data.to_kill_grefs, gref,
460		    gref_next.list);
461		count--;
462	}
463	mtx_unlock(&cleanup_data.to_kill_grefs_mtx);
464	mtx_unlock(&priv_user->user_data_lock);
465
466	taskqueue_enqueue(taskqueue_thread, &cleanup_task);
467	put_file_offset(priv_user, arg->count, arg->index);
468
469	return (0);
470}
471
472/*-------------------- Grant Mapping Methods  --------------------------------*/
473
474struct gntdev_gmap_map {
475	vm_object_t	mem;
476	struct resource	*pseudo_phys_res;
477	int 		pseudo_phys_res_id;
478	vm_paddr_t	phys_base_addr;
479};
480
481struct gntdev_gmap {
482	union gmap_next_union {
483		STAILQ_ENTRY(gntdev_gmap)		list;
484		RB_ENTRY(gntdev_gmap)			tree;
485	}				gmap_next;
486	uint64_t			file_index;
487	uint32_t			count;
488	struct gnttab_map_grant_ref	*grant_map_ops;
489	struct gntdev_gmap_map		*map;
490	struct notify_data		*notify;
491};
492
493static int
494gmap_cmp(struct gntdev_gmap *g1, struct gntdev_gmap *g2)
495{
496	return (g1->file_index - g2->file_index);
497}
498
499RB_GENERATE_STATIC(gmap_tree_head, gntdev_gmap, gmap_next.tree, gmap_cmp);
500
501/*
502 * Traverse over the device-list of to-be-deleted grant mappings, and if
503 * the region is no longer mmapped by anyone, free the memory used to
504 * store information about the mapping.
505 */
506static void
507gmap_list_dtor(struct cleanup_data_struct *cleanup_data)
508{
509	struct gmap_list_head tmp_gmaps;
510	struct gntdev_gmap *gmap, *gmap_tmp, *gmap_previous;
511
512	STAILQ_INIT(&tmp_gmaps);
513	mtx_lock(&cleanup_data->to_kill_gmaps_mtx);
514	STAILQ_SWAP(&cleanup_data->to_kill_gmaps, &tmp_gmaps, gntdev_gmap);
515	mtx_unlock(&cleanup_data->to_kill_gmaps_mtx);
516
517	gmap_previous = NULL;
518	STAILQ_FOREACH_SAFE(gmap, &tmp_gmaps, gmap_next.list, gmap_tmp) {
519		if (gmap->map == NULL) {
520			if (gmap_previous == NULL)
521				STAILQ_REMOVE_HEAD(&tmp_gmaps, gmap_next.list);
522			else
523				STAILQ_REMOVE_AFTER(&tmp_gmaps, gmap_previous,
524				    gmap_next.list);
525
526			if (gmap->notify)
527				free(gmap->notify, M_GNTDEV);
528			free(gmap->grant_map_ops, M_GNTDEV);
529			free(gmap, M_GNTDEV);
530		}
531		else
532			gmap_previous = gmap;
533	}
534
535	if (!STAILQ_EMPTY(&tmp_gmaps)) {
536		mtx_lock(&cleanup_data->to_kill_gmaps_mtx);
537		STAILQ_CONCAT(&cleanup_data->to_kill_gmaps, &tmp_gmaps);
538		mtx_unlock(&cleanup_data->to_kill_gmaps_mtx);
539	}
540}
541
542/*
543 * Find mapped grants for a given userspace program, by file-offset (index)
544 * and count, as supplied during the map-ioctl.
545 */
546static struct gntdev_gmap*
547gntdev_find_gmap(struct per_user_data *priv_user,
548	uint64_t index, uint32_t count)
549{
550	struct gntdev_gmap find_gmap, *gmap;
551
552	find_gmap.file_index = index;
553
554	mtx_lock(&priv_user->user_data_lock);
555	gmap = RB_FIND(gmap_tree_head, &priv_user->gmap_tree, &find_gmap);
556	mtx_unlock(&priv_user->user_data_lock);
557
558	if (gmap != NULL && gmap->count == count)
559		return (gmap);
560	return (NULL);
561}
562
563/*
564 * Remove the pages from the mgtdevice pager, call the unmap hypercall,
565 * free the xenmem resource. This function is called during the
566 * destruction of the mgtdevice pager, which happens when all mmaps to
567 * it have been removed, and the unmap-ioctl has been performed.
568 */
569static int
570notify_unmap_cleanup(struct gntdev_gmap *gmap)
571{
572	uint32_t i;
573	int error, count;
574	vm_page_t m;
575	struct gnttab_unmap_grant_ref *unmap_ops;
576
577	unmap_ops = malloc(sizeof(struct gnttab_unmap_grant_ref) * gmap->count,
578			M_GNTDEV, M_WAITOK);
579
580	/* Enumerate freeable maps. */
581	count = 0;
582	for (i = 0; i < gmap->count; i++) {
583		if (gmap->grant_map_ops[i].handle != -1) {
584			unmap_ops[count].handle = gmap->grant_map_ops[i].handle;
585			unmap_ops[count].host_addr =
586				gmap->grant_map_ops[i].host_addr;
587			unmap_ops[count].dev_bus_addr = 0;
588			count++;
589		}
590	}
591
592	/* Perform notification. */
593	if (count > 0 && gmap->notify) {
594		vm_page_t page;
595		uint64_t page_offset;
596
597		page_offset = gmap->notify->index - gmap->file_index;
598		page = PHYS_TO_VM_PAGE(gmap->map->phys_base_addr + page_offset);
599		notify(gmap->notify, page);
600	}
601
602	/* Free the pages. */
603	VM_OBJECT_WLOCK(gmap->map->mem);
604retry:
605	for (i = 0; i < gmap->count; i++) {
606		m = vm_page_lookup(gmap->map->mem, i);
607		if (m == NULL)
608			continue;
609		if (vm_page_busy_acquire(m, VM_ALLOC_WAITFAIL) == 0)
610			goto retry;
611		cdev_pager_free_page(gmap->map->mem, m);
612	}
613	VM_OBJECT_WUNLOCK(gmap->map->mem);
614
615	/* Perform unmap hypercall. */
616	error = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref,
617	    unmap_ops, count);
618
619	for (i = 0; i < gmap->count; i++) {
620		gmap->grant_map_ops[i].handle = -1;
621		gmap->grant_map_ops[i].host_addr = 0;
622	}
623
624	if (gmap->map) {
625		error = xenmem_free(gntdev_dev, gmap->map->pseudo_phys_res_id,
626		    gmap->map->pseudo_phys_res);
627		KASSERT(error == 0,
628		    ("Unable to release memory resource: %d", error));
629
630		free(gmap->map, M_GNTDEV);
631		gmap->map = NULL;
632	}
633
634	free(unmap_ops, M_GNTDEV);
635
636	return (error);
637}
638
639/*
640 * IOCTL_GNTDEV_MAP_GRANT_REF
641 * Populate structures for mapping the grant reference in the per user
642 * private data. Actual resource allocation and map hypercall is performed
643 * during the mmap.
644 */
645static int
646gntdev_map_grant_ref(struct ioctl_gntdev_map_grant_ref *arg)
647{
648	uint32_t i;
649	int error;
650	struct gntdev_gmap *gmap;
651	struct per_user_data *priv_user;
652
653	error = devfs_get_cdevpriv((void**) &priv_user);
654	if (error != 0)
655		return (EINVAL);
656
657	gmap = malloc(sizeof(*gmap), M_GNTDEV, M_WAITOK | M_ZERO);
658	gmap->count = arg->count;
659	gmap->grant_map_ops =
660	    malloc(sizeof(struct gnttab_map_grant_ref) * arg->count,
661	        M_GNTDEV, M_WAITOK | M_ZERO);
662
663	for (i = 0; i < arg->count; i++) {
664		struct ioctl_gntdev_grant_ref ref;
665
666		error = copyin(&arg->refs[i], &ref, sizeof(ref));
667		if (error != 0) {
668			free(gmap->grant_map_ops, M_GNTDEV);
669			free(gmap, M_GNTDEV);
670			return (error);
671		}
672		gmap->grant_map_ops[i].dom = ref.domid;
673		gmap->grant_map_ops[i].ref = ref.ref;
674		gmap->grant_map_ops[i].handle = -1;
675		gmap->grant_map_ops[i].flags = GNTMAP_host_map;
676	}
677
678	error = get_file_offset(priv_user, arg->count, &gmap->file_index);
679	if (error != 0) {
680		free(gmap->grant_map_ops, M_GNTDEV);
681		free(gmap, M_GNTDEV);
682		return (error);
683	}
684
685	mtx_lock(&priv_user->user_data_lock);
686	RB_INSERT(gmap_tree_head, &priv_user->gmap_tree, gmap);
687	mtx_unlock(&priv_user->user_data_lock);
688
689	arg->index = gmap->file_index;
690
691	return (error);
692}
693
694/*
695 * IOCTL_GNTDEV_UNMAP_GRANT_REF
696 * Remove the map information from the per user private data and add it
697 * to the global device-list of mappings to be deleted. A reference to
698 * the mgtdevice pager is also decreased, the reason for which is
699 * explained in mmap_gmap().
700 */
701static int
702gntdev_unmap_grant_ref(struct ioctl_gntdev_unmap_grant_ref *arg)
703{
704	int error;
705	struct gntdev_gmap *gmap;
706	struct per_user_data *priv_user;
707
708	error = devfs_get_cdevpriv((void**) &priv_user);
709	if (error != 0)
710		return (EINVAL);
711
712	gmap = gntdev_find_gmap(priv_user, arg->index, arg->count);
713	if (gmap == NULL) {
714		log(LOG_ERR, "Can't find requested grant-map.");
715		return (EINVAL);
716	}
717
718	mtx_lock(&priv_user->user_data_lock);
719	mtx_lock(&cleanup_data.to_kill_gmaps_mtx);
720	RB_REMOVE(gmap_tree_head, &priv_user->gmap_tree, gmap);
721	STAILQ_INSERT_TAIL(&cleanup_data.to_kill_gmaps, gmap, gmap_next.list);
722	mtx_unlock(&cleanup_data.to_kill_gmaps_mtx);
723	mtx_unlock(&priv_user->user_data_lock);
724
725	if (gmap->map)
726		vm_object_deallocate(gmap->map->mem);
727
728	taskqueue_enqueue(taskqueue_thread, &cleanup_task);
729	put_file_offset(priv_user, arg->count, arg->index);
730
731	return (0);
732}
733
734/*
735 * IOCTL_GNTDEV_GET_OFFSET_FOR_VADDR
736 * Get file-offset and count for a given mapping, from the virtual address
737 * where the mapping is mmapped.
738 * Please note, this only works for grants mapped by this domain, and not
739 * grants allocated. Count doesn't make much sense in reference to grants
740 * allocated. Also, because this function is present in the linux gntdev
741 * device, but not in the linux gntalloc one, most userspace code only use
742 * it for mapped grants.
743 */
744static int
745gntdev_get_offset_for_vaddr(struct ioctl_gntdev_get_offset_for_vaddr *arg,
746	struct thread *td)
747{
748	int error;
749	vm_map_t map;
750	vm_map_entry_t entry;
751	vm_object_t mem;
752	vm_pindex_t pindex;
753	vm_prot_t prot;
754	boolean_t wired;
755	struct gntdev_gmap *gmap;
756	int rc;
757
758	map = &td->td_proc->p_vmspace->vm_map;
759	error = vm_map_lookup(&map, arg->vaddr, VM_PROT_NONE, &entry,
760		    &mem, &pindex, &prot, &wired);
761	if (error != KERN_SUCCESS)
762		return (EINVAL);
763
764	if ((mem->type != OBJT_MGTDEVICE) ||
765	    (mem->un_pager.devp.ops != &gntdev_gmap_pg_ops)) {
766		rc = EINVAL;
767		goto out;
768	}
769
770	gmap = mem->handle;
771	if (gmap == NULL ||
772	    (entry->end - entry->start) != (gmap->count * PAGE_SIZE)) {
773		rc = EINVAL;
774		goto out;
775	}
776
777	arg->count = gmap->count;
778	arg->offset = gmap->file_index;
779	rc = 0;
780
781out:
782	vm_map_lookup_done(map, entry);
783	return (rc);
784}
785
786/*-------------------- Grant Mapping Pager  ----------------------------------*/
787
788static int
789gntdev_gmap_pg_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot,
790    vm_ooffset_t foff, struct ucred *cred, u_short *color)
791{
792
793	return (0);
794}
795
796static void
797gntdev_gmap_pg_dtor(void *handle)
798{
799
800	notify_unmap_cleanup((struct gntdev_gmap *)handle);
801}
802
803static int
804gntdev_gmap_pg_fault(vm_object_t object, vm_ooffset_t offset, int prot,
805    vm_page_t *mres)
806{
807	struct gntdev_gmap *gmap = object->handle;
808	vm_pindex_t pidx, ridx;
809	vm_page_t page;
810	vm_ooffset_t relative_offset;
811
812	if (gmap->map == NULL)
813		return (VM_PAGER_FAIL);
814
815	relative_offset = offset - gmap->file_index;
816
817	pidx = OFF_TO_IDX(offset);
818	ridx = OFF_TO_IDX(relative_offset);
819	if (ridx >= gmap->count ||
820	    gmap->grant_map_ops[ridx].status != GNTST_okay)
821		return (VM_PAGER_FAIL);
822
823	page = PHYS_TO_VM_PAGE(gmap->map->phys_base_addr + relative_offset);
824	if (page == NULL)
825		return (VM_PAGER_FAIL);
826
827	KASSERT((page->flags & PG_FICTITIOUS) != 0,
828	    ("not fictitious %p", page));
829	KASSERT(vm_page_wired(page), ("page %p is not wired", page));
830	KASSERT(!vm_page_busied(page), ("page %p is busy", page));
831
832	vm_page_busy_acquire(page, 0);
833	vm_page_valid(page);
834	if (*mres != NULL)
835		vm_page_replace(page, object, pidx, *mres);
836	else
837		vm_page_insert(page, object, pidx);
838	*mres = page;
839	return (VM_PAGER_OK);
840}
841
842/*------------------ Grant Table Methods  ------------------------------------*/
843
844static void
845notify(struct notify_data *notify, vm_page_t page)
846{
847	if (notify->action & UNMAP_NOTIFY_CLEAR_BYTE) {
848		uint8_t *mem;
849		uint64_t offset;
850
851		offset = notify->index & PAGE_MASK;
852		mem = (uint8_t *)pmap_quick_enter_page(page);
853		mem[offset] = 0;
854		pmap_quick_remove_page((vm_offset_t)mem);
855	}
856	if (notify->action & UNMAP_NOTIFY_SEND_EVENT) {
857		xen_intr_signal(notify->notify_evtchn_handle);
858		xen_intr_unbind(&notify->notify_evtchn_handle);
859	}
860	notify->action = 0;
861}
862
863/*
864 * Helper to copy new arguments from the notify ioctl into
865 * the existing notify data.
866 */
867static int
868copy_notify_helper(struct notify_data *destination,
869    struct ioctl_gntdev_unmap_notify *source)
870{
871	xen_intr_handle_t handlep = NULL;
872
873	/*
874	 * "Get" before "Put"ting previous reference, as we might be
875	 * holding the last reference to the event channel port.
876	 */
877	if (source->action & UNMAP_NOTIFY_SEND_EVENT)
878		if (xen_intr_get_evtchn_from_port(source->event_channel_port,
879		    &handlep) != 0)
880			return (EINVAL);
881
882	if (destination->action & UNMAP_NOTIFY_SEND_EVENT)
883		xen_intr_unbind(&destination->notify_evtchn_handle);
884
885	destination->action = source->action;
886	destination->event_channel_port = source->event_channel_port;
887	destination->index = source->index;
888	destination->notify_evtchn_handle = handlep;
889
890	return (0);
891}
892
893/*
894 * IOCTL_GNTDEV_SET_UNMAP_NOTIFY
895 * Set unmap notification inside the appropriate grant. It sends a
896 * notification when the grant is completely munmapped by this domain
897 * and ready for destruction.
898 */
899static int
900gntdev_set_unmap_notify(struct ioctl_gntdev_unmap_notify *arg)
901{
902	int error;
903	uint64_t index;
904	struct per_user_data *priv_user;
905	struct gntdev_gref *gref = NULL;
906	struct gntdev_gmap *gmap;
907
908	error = devfs_get_cdevpriv((void**) &priv_user);
909	if (error != 0)
910		return (EINVAL);
911
912	if (arg->action & ~(UNMAP_NOTIFY_CLEAR_BYTE|UNMAP_NOTIFY_SEND_EVENT))
913		return (EINVAL);
914
915	index = arg->index & ~PAGE_MASK;
916	gref = gntdev_find_grefs(priv_user, index, 1);
917	if (gref) {
918		if (gref->notify == NULL)
919			gref->notify = malloc(sizeof(*arg), M_GNTDEV,
920			    M_WAITOK | M_ZERO);
921		return (copy_notify_helper(gref->notify, arg));
922	}
923
924	error = EINVAL;
925	mtx_lock(&priv_user->user_data_lock);
926	RB_FOREACH(gmap, gmap_tree_head, &priv_user->gmap_tree) {
927		if (arg->index >= gmap->file_index &&
928		    arg->index < gmap->file_index + gmap->count * PAGE_SIZE) {
929			if (gmap->notify == NULL)
930				gmap->notify = malloc(sizeof(*arg), M_GNTDEV,
931				    M_WAITOK | M_ZERO);
932			error = copy_notify_helper(gmap->notify, arg);
933			break;
934		}
935	}
936	mtx_unlock(&priv_user->user_data_lock);
937
938	return (error);
939}
940
941/*------------------ Gntdev Char Device Methods  -----------------------------*/
942
943static void
944cleanup_function(void *arg, __unused int pending)
945{
946
947	gref_list_dtor((struct cleanup_data_struct *) arg);
948	gmap_list_dtor((struct cleanup_data_struct *) arg);
949}
950
951static void
952per_user_data_dtor(void *arg)
953{
954	struct gntdev_gref *gref, *gref_tmp;
955	struct gntdev_gmap *gmap, *gmap_tmp;
956	struct file_offset_struct *offset, *offset_tmp;
957	struct per_user_data *priv_user;
958
959	priv_user = (struct per_user_data *) arg;
960
961	mtx_lock(&priv_user->user_data_lock);
962
963	mtx_lock(&cleanup_data.to_kill_grefs_mtx);
964	RB_FOREACH_SAFE(gref, gref_tree_head, &priv_user->gref_tree, gref_tmp) {
965		RB_REMOVE(gref_tree_head, &priv_user->gref_tree, gref);
966		STAILQ_INSERT_TAIL(&cleanup_data.to_kill_grefs, gref,
967		    gref_next.list);
968	}
969	mtx_unlock(&cleanup_data.to_kill_grefs_mtx);
970
971	mtx_lock(&cleanup_data.to_kill_gmaps_mtx);
972	RB_FOREACH_SAFE(gmap, gmap_tree_head, &priv_user->gmap_tree, gmap_tmp) {
973		RB_REMOVE(gmap_tree_head, &priv_user->gmap_tree, gmap);
974		STAILQ_INSERT_TAIL(&cleanup_data.to_kill_gmaps, gmap,
975		    gmap_next.list);
976		if (gmap->map)
977			vm_object_deallocate(gmap->map->mem);
978	}
979	mtx_unlock(&cleanup_data.to_kill_gmaps_mtx);
980
981	RB_FOREACH_SAFE(offset, file_offset_head, &priv_user->file_offset,
982	    offset_tmp) {
983		RB_REMOVE(file_offset_head, &priv_user->file_offset, offset);
984		free(offset, M_GNTDEV);
985	}
986
987	mtx_unlock(&priv_user->user_data_lock);
988
989	taskqueue_enqueue(taskqueue_thread, &cleanup_task);
990
991	mtx_destroy(&priv_user->user_data_lock);
992	free(priv_user, M_GNTDEV);
993}
994
995static int
996gntdev_open(struct cdev *dev, int flag, int otyp, struct thread *td)
997{
998	int error;
999	struct per_user_data *priv_user;
1000	struct file_offset_struct *offset;
1001
1002	priv_user = malloc(sizeof(*priv_user), M_GNTDEV, M_WAITOK | M_ZERO);
1003	RB_INIT(&priv_user->gref_tree);
1004	RB_INIT(&priv_user->gmap_tree);
1005	RB_INIT(&priv_user->file_offset);
1006	offset = malloc(sizeof(*offset), M_GNTDEV, M_WAITOK | M_ZERO);
1007	offset->file_offset = 0;
1008	offset->count = MAX_OFFSET_COUNT;
1009	RB_INSERT(file_offset_head, &priv_user->file_offset, offset);
1010	mtx_init(&priv_user->user_data_lock,
1011	    "per user data mutex", NULL, MTX_DEF);
1012
1013	error = devfs_set_cdevpriv(priv_user, per_user_data_dtor);
1014	if (error != 0)
1015		per_user_data_dtor(priv_user);
1016
1017	return (error);
1018}
1019
1020static int
1021gntdev_ioctl(struct cdev *dev, u_long cmd, caddr_t data,
1022	int fflag, struct thread *td)
1023{
1024	int error;
1025
1026	switch (cmd) {
1027	case IOCTL_GNTDEV_SET_UNMAP_NOTIFY:
1028		error = gntdev_set_unmap_notify(
1029		    (struct ioctl_gntdev_unmap_notify*) data);
1030		break;
1031	case IOCTL_GNTDEV_ALLOC_GREF:
1032		error = gntdev_alloc_gref(
1033		    (struct ioctl_gntdev_alloc_gref*) data);
1034		break;
1035	case IOCTL_GNTDEV_DEALLOC_GREF:
1036		error = gntdev_dealloc_gref(
1037		    (struct ioctl_gntdev_dealloc_gref*) data);
1038		break;
1039	case IOCTL_GNTDEV_MAP_GRANT_REF:
1040		error = gntdev_map_grant_ref(
1041		    (struct ioctl_gntdev_map_grant_ref*) data);
1042		break;
1043	case IOCTL_GNTDEV_UNMAP_GRANT_REF:
1044		error = gntdev_unmap_grant_ref(
1045		    (struct ioctl_gntdev_unmap_grant_ref*) data);
1046		break;
1047	case IOCTL_GNTDEV_GET_OFFSET_FOR_VADDR:
1048		error = gntdev_get_offset_for_vaddr(
1049		    (struct ioctl_gntdev_get_offset_for_vaddr*) data, td);
1050		break;
1051	default:
1052		error = ENOSYS;
1053		break;
1054	}
1055
1056	return (error);
1057}
1058
1059/*
1060 * MMAP an allocated grant into user memory.
1061 * Please note, that the grants must not already be mmapped, otherwise
1062 * this function will fail.
1063 */
1064static int
1065mmap_gref(struct per_user_data *priv_user, struct gntdev_gref *gref_start,
1066    uint32_t count, vm_size_t size, struct vm_object **object)
1067{
1068	vm_object_t mem_obj;
1069	struct gntdev_gref *gref;
1070
1071	mem_obj = vm_pager_allocate(OBJT_PHYS, NULL, size, VM_PROT_ALL, 0,
1072	    curthread->td_ucred);
1073	if (mem_obj == NULL)
1074		return (ENOMEM);
1075
1076	mtx_lock(&priv_user->user_data_lock);
1077	VM_OBJECT_WLOCK(mem_obj);
1078	for (gref = gref_start; gref != NULL && count > 0; gref =
1079	    RB_NEXT(gref_tree_head, &priv_user->gref_tree, gref)) {
1080		if (gref->page->object)
1081			break;
1082
1083		vm_page_insert(gref->page, mem_obj,
1084		    OFF_TO_IDX(gref->file_index));
1085
1086		count--;
1087	}
1088	VM_OBJECT_WUNLOCK(mem_obj);
1089	mtx_unlock(&priv_user->user_data_lock);
1090
1091	if (count) {
1092		vm_object_deallocate(mem_obj);
1093		return (EINVAL);
1094	}
1095
1096	*object = mem_obj;
1097
1098	return (0);
1099
1100}
1101
1102/*
1103 * MMAP a mapped grant into user memory.
1104 */
1105static int
1106mmap_gmap(struct per_user_data *priv_user, struct gntdev_gmap *gmap_start,
1107    vm_ooffset_t *offset, vm_size_t size, struct vm_object **object, int nprot)
1108{
1109	uint32_t i;
1110	int error;
1111
1112	/*
1113	 * The grant map hypercall might already be done.
1114	 * If that is the case, increase a reference to the
1115	 * vm object and return the already allocated object.
1116	 */
1117	if (gmap_start->map) {
1118		vm_object_reference(gmap_start->map->mem);
1119		*object = gmap_start->map->mem;
1120		return (0);
1121	}
1122
1123	gmap_start->map = malloc(sizeof(*(gmap_start->map)), M_GNTDEV,
1124	    M_WAITOK | M_ZERO);
1125
1126	/* Allocate the xen pseudo physical memory resource. */
1127	gmap_start->map->pseudo_phys_res_id = 0;
1128	gmap_start->map->pseudo_phys_res = xenmem_alloc(gntdev_dev,
1129	    &gmap_start->map->pseudo_phys_res_id, size);
1130	if (gmap_start->map->pseudo_phys_res == NULL) {
1131		free(gmap_start->map, M_GNTDEV);
1132		gmap_start->map = NULL;
1133		return (ENOMEM);
1134	}
1135	gmap_start->map->phys_base_addr =
1136	    rman_get_start(gmap_start->map->pseudo_phys_res);
1137
1138	/* Allocate the mgtdevice pager. */
1139	gmap_start->map->mem = cdev_pager_allocate(gmap_start, OBJT_MGTDEVICE,
1140	    &gntdev_gmap_pg_ops, size, nprot, *offset, NULL);
1141	if (gmap_start->map->mem == NULL) {
1142		xenmem_free(gntdev_dev, gmap_start->map->pseudo_phys_res_id,
1143		    gmap_start->map->pseudo_phys_res);
1144		free(gmap_start->map, M_GNTDEV);
1145		gmap_start->map = NULL;
1146		return (ENOMEM);
1147	}
1148
1149	for (i = 0; i < gmap_start->count; i++) {
1150		gmap_start->grant_map_ops[i].host_addr =
1151		    gmap_start->map->phys_base_addr + i * PAGE_SIZE;
1152
1153		if ((nprot & PROT_WRITE) == 0)
1154			gmap_start->grant_map_ops[i].flags |= GNTMAP_readonly;
1155	}
1156	/* Make the MAP hypercall. */
1157	error = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref,
1158	    gmap_start->grant_map_ops, gmap_start->count);
1159	if (error != 0) {
1160		/*
1161		 * Deallocate pager.
1162		 * Pager deallocation will automatically take care of
1163		 * xenmem deallocation, etc.
1164		 */
1165		vm_object_deallocate(gmap_start->map->mem);
1166
1167		return (EINVAL);
1168	}
1169
1170	/* Retry EAGAIN maps. */
1171	for (i = 0; i < gmap_start->count; i++) {
1172		int delay = 1;
1173		while (delay < 256 &&
1174		    gmap_start->grant_map_ops[i].status == GNTST_eagain) {
1175			HYPERVISOR_grant_table_op( GNTTABOP_map_grant_ref,
1176			    &gmap_start->grant_map_ops[i], 1);
1177			pause(("gntmap"), delay * SBT_1MS);
1178			delay++;
1179		}
1180		if (gmap_start->grant_map_ops[i].status == GNTST_eagain)
1181			gmap_start->grant_map_ops[i].status = GNTST_bad_page;
1182
1183		if (gmap_start->grant_map_ops[i].status != GNTST_okay) {
1184			/*
1185			 * Deallocate pager.
1186			 * Pager deallocation will automatically take care of
1187			 * xenmem deallocation, notification, unmap hypercall,
1188			 * etc.
1189			 */
1190			vm_object_deallocate(gmap_start->map->mem);
1191
1192			return (EINVAL);
1193		}
1194	}
1195
1196	/*
1197	 * Add a reference to the vm object. We do not want
1198	 * the vm object to be deleted when all the mmaps are
1199	 * unmapped, because it may be re-mmapped. Instead,
1200	 * we want the object to be deleted, when along with
1201	 * munmaps, we have also processed the unmap-ioctl.
1202	 */
1203	vm_object_reference(gmap_start->map->mem);
1204
1205	*object = gmap_start->map->mem;
1206
1207	return (0);
1208}
1209
1210static int
1211gntdev_mmap_single(struct cdev *cdev, vm_ooffset_t *offset, vm_size_t size,
1212    struct vm_object **object, int nprot)
1213{
1214	int error;
1215	uint32_t count;
1216	struct gntdev_gref *gref_start;
1217	struct gntdev_gmap *gmap_start;
1218	struct per_user_data *priv_user;
1219
1220	error = devfs_get_cdevpriv((void**) &priv_user);
1221	if (error != 0)
1222		return (EINVAL);
1223
1224	count = OFF_TO_IDX(size);
1225
1226	gref_start = gntdev_find_grefs(priv_user, *offset, count);
1227	if (gref_start) {
1228		error = mmap_gref(priv_user, gref_start, count, size, object);
1229		return (error);
1230	}
1231
1232	gmap_start = gntdev_find_gmap(priv_user, *offset, count);
1233	if (gmap_start) {
1234		error = mmap_gmap(priv_user, gmap_start, offset, size, object,
1235		    nprot);
1236		return (error);
1237	}
1238
1239	return (EINVAL);
1240}
1241
1242/*------------------ Private Device Attachment Functions  --------------------*/
1243static void
1244gntdev_identify(driver_t *driver, device_t parent)
1245{
1246
1247	KASSERT((xen_domain()),
1248	    ("Trying to attach gntdev device on non Xen domain"));
1249
1250	if (BUS_ADD_CHILD(parent, 0, "gntdev", 0) == NULL)
1251		panic("unable to attach gntdev user-space device");
1252}
1253
1254static int
1255gntdev_probe(device_t dev)
1256{
1257
1258	gntdev_dev = dev;
1259	device_set_desc(dev, "Xen grant-table user-space device");
1260	return (BUS_PROBE_NOWILDCARD);
1261}
1262
1263static int
1264gntdev_attach(device_t dev)
1265{
1266
1267	make_dev_credf(MAKEDEV_ETERNAL, &gntdev_devsw, 0, NULL, UID_ROOT,
1268	    GID_WHEEL, 0600, "xen/gntdev");
1269	return (0);
1270}
1271
1272/*-------------------- Private Device Attachment Data  -----------------------*/
1273static device_method_t gntdev_methods[] = {
1274	DEVMETHOD(device_identify, gntdev_identify),
1275	DEVMETHOD(device_probe, gntdev_probe),
1276	DEVMETHOD(device_attach, gntdev_attach),
1277	DEVMETHOD_END
1278};
1279
1280static driver_t gntdev_driver = {
1281	"gntdev",
1282	gntdev_methods,
1283	0,
1284};
1285
1286devclass_t gntdev_devclass;
1287
1288DRIVER_MODULE(gntdev, xenpv, gntdev_driver, gntdev_devclass, 0, 0);
1289MODULE_DEPEND(gntdev, xenpv, 1, 1, 1);
1290