linux_compat.c revision 342804
1/*-
2 * Copyright (c) 2010 Isilon Systems, Inc.
3 * Copyright (c) 2010 iX Systems, Inc.
4 * Copyright (c) 2010 Panasas, Inc.
5 * Copyright (c) 2013-2018 Mellanox Technologies, Ltd.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice unmodified, this list of conditions, and the following
13 *    disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in the
16 *    documentation and/or other materials provided with the distribution.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30#include <sys/cdefs.h>
31__FBSDID("$FreeBSD: stable/11/sys/compat/linuxkpi/common/src/linux_compat.c 342804 2019-01-06 00:59:55Z kib $");
32
33#include "opt_stack.h"
34
35#include <sys/param.h>
36#include <sys/systm.h>
37#include <sys/malloc.h>
38#include <sys/kernel.h>
39#include <sys/sysctl.h>
40#include <sys/proc.h>
41#include <sys/sglist.h>
42#include <sys/sleepqueue.h>
43#include <sys/refcount.h>
44#include <sys/lock.h>
45#include <sys/mutex.h>
46#include <sys/bus.h>
47#include <sys/fcntl.h>
48#include <sys/file.h>
49#include <sys/filio.h>
50#include <sys/rwlock.h>
51#include <sys/mman.h>
52#include <sys/stack.h>
53#include <sys/user.h>
54
55#include <vm/vm.h>
56#include <vm/pmap.h>
57#include <vm/vm_object.h>
58#include <vm/vm_page.h>
59#include <vm/vm_pager.h>
60
61#include <machine/stdarg.h>
62
63#if defined(__i386__) || defined(__amd64__)
64#include <machine/md_var.h>
65#endif
66
67#include <linux/kobject.h>
68#include <linux/device.h>
69#include <linux/slab.h>
70#include <linux/module.h>
71#include <linux/moduleparam.h>
72#include <linux/cdev.h>
73#include <linux/file.h>
74#include <linux/sysfs.h>
75#include <linux/mm.h>
76#include <linux/io.h>
77#include <linux/vmalloc.h>
78#include <linux/netdevice.h>
79#include <linux/timer.h>
80#include <linux/interrupt.h>
81#include <linux/uaccess.h>
82#include <linux/list.h>
83#include <linux/kthread.h>
84#include <linux/kernel.h>
85#include <linux/compat.h>
86#include <linux/poll.h>
87#include <linux/smp.h>
88
89#if defined(__i386__) || defined(__amd64__)
90#include <asm/smp.h>
91#endif
92
93SYSCTL_NODE(_compat, OID_AUTO, linuxkpi, CTLFLAG_RW, 0, "LinuxKPI parameters");
94
95MALLOC_DEFINE(M_KMALLOC, "linux", "Linux kmalloc compat");
96
97#include <linux/rbtree.h>
98/* Undo Linux compat changes. */
99#undef RB_ROOT
100#undef file
101#undef cdev
102#define	RB_ROOT(head)	(head)->rbh_root
103
104static void linux_cdev_deref(struct linux_cdev *ldev);
105static struct vm_area_struct *linux_cdev_handle_find(void *handle);
106
107struct kobject linux_class_root;
108struct device linux_root_device;
109struct class linux_class_misc;
110struct list_head pci_drivers;
111struct list_head pci_devices;
112spinlock_t pci_lock;
113
114unsigned long linux_timer_hz_mask;
115
116int
117panic_cmp(struct rb_node *one, struct rb_node *two)
118{
119	panic("no cmp");
120}
121
122RB_GENERATE(linux_root, rb_node, __entry, panic_cmp);
123
124int
125kobject_set_name_vargs(struct kobject *kobj, const char *fmt, va_list args)
126{
127	va_list tmp_va;
128	int len;
129	char *old;
130	char *name;
131	char dummy;
132
133	old = kobj->name;
134
135	if (old && fmt == NULL)
136		return (0);
137
138	/* compute length of string */
139	va_copy(tmp_va, args);
140	len = vsnprintf(&dummy, 0, fmt, tmp_va);
141	va_end(tmp_va);
142
143	/* account for zero termination */
144	len++;
145
146	/* check for error */
147	if (len < 1)
148		return (-EINVAL);
149
150	/* allocate memory for string */
151	name = kzalloc(len, GFP_KERNEL);
152	if (name == NULL)
153		return (-ENOMEM);
154	vsnprintf(name, len, fmt, args);
155	kobj->name = name;
156
157	/* free old string */
158	kfree(old);
159
160	/* filter new string */
161	for (; *name != '\0'; name++)
162		if (*name == '/')
163			*name = '!';
164	return (0);
165}
166
167int
168kobject_set_name(struct kobject *kobj, const char *fmt, ...)
169{
170	va_list args;
171	int error;
172
173	va_start(args, fmt);
174	error = kobject_set_name_vargs(kobj, fmt, args);
175	va_end(args);
176
177	return (error);
178}
179
180static int
181kobject_add_complete(struct kobject *kobj, struct kobject *parent)
182{
183	const struct kobj_type *t;
184	int error;
185
186	kobj->parent = parent;
187	error = sysfs_create_dir(kobj);
188	if (error == 0 && kobj->ktype && kobj->ktype->default_attrs) {
189		struct attribute **attr;
190		t = kobj->ktype;
191
192		for (attr = t->default_attrs; *attr != NULL; attr++) {
193			error = sysfs_create_file(kobj, *attr);
194			if (error)
195				break;
196		}
197		if (error)
198			sysfs_remove_dir(kobj);
199
200	}
201	return (error);
202}
203
204int
205kobject_add(struct kobject *kobj, struct kobject *parent, const char *fmt, ...)
206{
207	va_list args;
208	int error;
209
210	va_start(args, fmt);
211	error = kobject_set_name_vargs(kobj, fmt, args);
212	va_end(args);
213	if (error)
214		return (error);
215
216	return kobject_add_complete(kobj, parent);
217}
218
219void
220linux_kobject_release(struct kref *kref)
221{
222	struct kobject *kobj;
223	char *name;
224
225	kobj = container_of(kref, struct kobject, kref);
226	sysfs_remove_dir(kobj);
227	name = kobj->name;
228	if (kobj->ktype && kobj->ktype->release)
229		kobj->ktype->release(kobj);
230	kfree(name);
231}
232
233static void
234linux_kobject_kfree(struct kobject *kobj)
235{
236	kfree(kobj);
237}
238
239static void
240linux_kobject_kfree_name(struct kobject *kobj)
241{
242	if (kobj) {
243		kfree(kobj->name);
244	}
245}
246
247const struct kobj_type linux_kfree_type = {
248	.release = linux_kobject_kfree
249};
250
251static void
252linux_device_release(struct device *dev)
253{
254	pr_debug("linux_device_release: %s\n", dev_name(dev));
255	kfree(dev);
256}
257
258static ssize_t
259linux_class_show(struct kobject *kobj, struct attribute *attr, char *buf)
260{
261	struct class_attribute *dattr;
262	ssize_t error;
263
264	dattr = container_of(attr, struct class_attribute, attr);
265	error = -EIO;
266	if (dattr->show)
267		error = dattr->show(container_of(kobj, struct class, kobj),
268		    dattr, buf);
269	return (error);
270}
271
272static ssize_t
273linux_class_store(struct kobject *kobj, struct attribute *attr, const char *buf,
274    size_t count)
275{
276	struct class_attribute *dattr;
277	ssize_t error;
278
279	dattr = container_of(attr, struct class_attribute, attr);
280	error = -EIO;
281	if (dattr->store)
282		error = dattr->store(container_of(kobj, struct class, kobj),
283		    dattr, buf, count);
284	return (error);
285}
286
287static void
288linux_class_release(struct kobject *kobj)
289{
290	struct class *class;
291
292	class = container_of(kobj, struct class, kobj);
293	if (class->class_release)
294		class->class_release(class);
295}
296
297static const struct sysfs_ops linux_class_sysfs = {
298	.show  = linux_class_show,
299	.store = linux_class_store,
300};
301
302const struct kobj_type linux_class_ktype = {
303	.release = linux_class_release,
304	.sysfs_ops = &linux_class_sysfs
305};
306
307static void
308linux_dev_release(struct kobject *kobj)
309{
310	struct device *dev;
311
312	dev = container_of(kobj, struct device, kobj);
313	/* This is the precedence defined by linux. */
314	if (dev->release)
315		dev->release(dev);
316	else if (dev->class && dev->class->dev_release)
317		dev->class->dev_release(dev);
318}
319
320static ssize_t
321linux_dev_show(struct kobject *kobj, struct attribute *attr, char *buf)
322{
323	struct device_attribute *dattr;
324	ssize_t error;
325
326	dattr = container_of(attr, struct device_attribute, attr);
327	error = -EIO;
328	if (dattr->show)
329		error = dattr->show(container_of(kobj, struct device, kobj),
330		    dattr, buf);
331	return (error);
332}
333
334static ssize_t
335linux_dev_store(struct kobject *kobj, struct attribute *attr, const char *buf,
336    size_t count)
337{
338	struct device_attribute *dattr;
339	ssize_t error;
340
341	dattr = container_of(attr, struct device_attribute, attr);
342	error = -EIO;
343	if (dattr->store)
344		error = dattr->store(container_of(kobj, struct device, kobj),
345		    dattr, buf, count);
346	return (error);
347}
348
349static const struct sysfs_ops linux_dev_sysfs = {
350	.show  = linux_dev_show,
351	.store = linux_dev_store,
352};
353
354const struct kobj_type linux_dev_ktype = {
355	.release = linux_dev_release,
356	.sysfs_ops = &linux_dev_sysfs
357};
358
359struct device *
360device_create(struct class *class, struct device *parent, dev_t devt,
361    void *drvdata, const char *fmt, ...)
362{
363	struct device *dev;
364	va_list args;
365
366	dev = kzalloc(sizeof(*dev), M_WAITOK);
367	dev->parent = parent;
368	dev->class = class;
369	dev->devt = devt;
370	dev->driver_data = drvdata;
371	dev->release = linux_device_release;
372	va_start(args, fmt);
373	kobject_set_name_vargs(&dev->kobj, fmt, args);
374	va_end(args);
375	device_register(dev);
376
377	return (dev);
378}
379
380int
381kobject_init_and_add(struct kobject *kobj, const struct kobj_type *ktype,
382    struct kobject *parent, const char *fmt, ...)
383{
384	va_list args;
385	int error;
386
387	kobject_init(kobj, ktype);
388	kobj->ktype = ktype;
389	kobj->parent = parent;
390	kobj->name = NULL;
391
392	va_start(args, fmt);
393	error = kobject_set_name_vargs(kobj, fmt, args);
394	va_end(args);
395	if (error)
396		return (error);
397	return kobject_add_complete(kobj, parent);
398}
399
400static void
401linux_kq_lock(void *arg)
402{
403	spinlock_t *s = arg;
404
405	spin_lock(s);
406}
407static void
408linux_kq_unlock(void *arg)
409{
410	spinlock_t *s = arg;
411
412	spin_unlock(s);
413}
414
415static void
416linux_kq_lock_owned(void *arg)
417{
418#ifdef INVARIANTS
419	spinlock_t *s = arg;
420
421	mtx_assert(&s->m, MA_OWNED);
422#endif
423}
424
425static void
426linux_kq_lock_unowned(void *arg)
427{
428#ifdef INVARIANTS
429	spinlock_t *s = arg;
430
431	mtx_assert(&s->m, MA_NOTOWNED);
432#endif
433}
434
435static void
436linux_file_kqfilter_poll(struct linux_file *, int);
437
438struct linux_file *
439linux_file_alloc(void)
440{
441	struct linux_file *filp;
442
443	filp = kzalloc(sizeof(*filp), GFP_KERNEL);
444
445	/* set initial refcount */
446	filp->f_count = 1;
447
448	/* setup fields needed by kqueue support */
449	spin_lock_init(&filp->f_kqlock);
450	knlist_init(&filp->f_selinfo.si_note, &filp->f_kqlock,
451	    linux_kq_lock, linux_kq_unlock,
452	    linux_kq_lock_owned, linux_kq_lock_unowned);
453
454	return (filp);
455}
456
457void
458linux_file_free(struct linux_file *filp)
459{
460	if (filp->_file == NULL) {
461		if (filp->f_shmem != NULL)
462			vm_object_deallocate(filp->f_shmem);
463		kfree(filp);
464	} else {
465		/*
466		 * The close method of the character device or file
467		 * will free the linux_file structure:
468		 */
469		_fdrop(filp->_file, curthread);
470	}
471}
472
473static int
474linux_cdev_pager_fault(vm_object_t vm_obj, vm_ooffset_t offset, int prot,
475    vm_page_t *mres)
476{
477	struct vm_area_struct *vmap;
478
479	vmap = linux_cdev_handle_find(vm_obj->handle);
480
481	MPASS(vmap != NULL);
482	MPASS(vmap->vm_private_data == vm_obj->handle);
483
484	if (likely(vmap->vm_ops != NULL && offset < vmap->vm_len)) {
485		vm_paddr_t paddr = IDX_TO_OFF(vmap->vm_pfn) + offset;
486		vm_page_t page;
487
488		if (((*mres)->flags & PG_FICTITIOUS) != 0) {
489			/*
490			 * If the passed in result page is a fake
491			 * page, update it with the new physical
492			 * address.
493			 */
494			page = *mres;
495			vm_page_updatefake(page, paddr, vm_obj->memattr);
496		} else {
497			/*
498			 * Replace the passed in "mres" page with our
499			 * own fake page and free up the all of the
500			 * original pages.
501			 */
502			VM_OBJECT_WUNLOCK(vm_obj);
503			page = vm_page_getfake(paddr, vm_obj->memattr);
504			VM_OBJECT_WLOCK(vm_obj);
505
506			vm_page_replace_checked(page, vm_obj,
507			    (*mres)->pindex, *mres);
508
509			vm_page_lock(*mres);
510			vm_page_free(*mres);
511			vm_page_unlock(*mres);
512			*mres = page;
513		}
514		page->valid = VM_PAGE_BITS_ALL;
515		return (VM_PAGER_OK);
516	}
517	return (VM_PAGER_FAIL);
518}
519
520static int
521linux_cdev_pager_populate(vm_object_t vm_obj, vm_pindex_t pidx, int fault_type,
522    vm_prot_t max_prot, vm_pindex_t *first, vm_pindex_t *last)
523{
524	struct vm_area_struct *vmap;
525	int err;
526
527	linux_set_current(curthread);
528
529	/* get VM area structure */
530	vmap = linux_cdev_handle_find(vm_obj->handle);
531	MPASS(vmap != NULL);
532	MPASS(vmap->vm_private_data == vm_obj->handle);
533
534	VM_OBJECT_WUNLOCK(vm_obj);
535
536	down_write(&vmap->vm_mm->mmap_sem);
537	if (unlikely(vmap->vm_ops == NULL)) {
538		err = VM_FAULT_SIGBUS;
539	} else {
540		struct vm_fault vmf;
541
542		/* fill out VM fault structure */
543		vmf.virtual_address = (void *)(uintptr_t)IDX_TO_OFF(pidx);
544		vmf.flags = (fault_type & VM_PROT_WRITE) ? FAULT_FLAG_WRITE : 0;
545		vmf.pgoff = 0;
546		vmf.page = NULL;
547		vmf.vma = vmap;
548
549		vmap->vm_pfn_count = 0;
550		vmap->vm_pfn_pcount = &vmap->vm_pfn_count;
551		vmap->vm_obj = vm_obj;
552
553		err = vmap->vm_ops->fault(vmap, &vmf);
554
555		while (vmap->vm_pfn_count == 0 && err == VM_FAULT_NOPAGE) {
556			kern_yield(PRI_USER);
557			err = vmap->vm_ops->fault(vmap, &vmf);
558		}
559	}
560
561	/* translate return code */
562	switch (err) {
563	case VM_FAULT_OOM:
564		err = VM_PAGER_AGAIN;
565		break;
566	case VM_FAULT_SIGBUS:
567		err = VM_PAGER_BAD;
568		break;
569	case VM_FAULT_NOPAGE:
570		/*
571		 * By contract the fault handler will return having
572		 * busied all the pages itself. If pidx is already
573		 * found in the object, it will simply xbusy the first
574		 * page and return with vm_pfn_count set to 1.
575		 */
576		*first = vmap->vm_pfn_first;
577		*last = *first + vmap->vm_pfn_count - 1;
578		err = VM_PAGER_OK;
579		break;
580	default:
581		err = VM_PAGER_ERROR;
582		break;
583	}
584	up_write(&vmap->vm_mm->mmap_sem);
585	VM_OBJECT_WLOCK(vm_obj);
586	return (err);
587}
588
589static struct rwlock linux_vma_lock;
590static TAILQ_HEAD(, vm_area_struct) linux_vma_head =
591    TAILQ_HEAD_INITIALIZER(linux_vma_head);
592
593static void
594linux_cdev_handle_free(struct vm_area_struct *vmap)
595{
596	/* Drop reference on vm_file */
597	if (vmap->vm_file != NULL)
598		fput(vmap->vm_file);
599
600	/* Drop reference on mm_struct */
601	mmput(vmap->vm_mm);
602
603	kfree(vmap);
604}
605
606static void
607linux_cdev_handle_remove(struct vm_area_struct *vmap)
608{
609	rw_wlock(&linux_vma_lock);
610	TAILQ_REMOVE(&linux_vma_head, vmap, vm_entry);
611	rw_wunlock(&linux_vma_lock);
612}
613
614static struct vm_area_struct *
615linux_cdev_handle_find(void *handle)
616{
617	struct vm_area_struct *vmap;
618
619	rw_rlock(&linux_vma_lock);
620	TAILQ_FOREACH(vmap, &linux_vma_head, vm_entry) {
621		if (vmap->vm_private_data == handle)
622			break;
623	}
624	rw_runlock(&linux_vma_lock);
625	return (vmap);
626}
627
628static int
629linux_cdev_pager_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot,
630		      vm_ooffset_t foff, struct ucred *cred, u_short *color)
631{
632
633	MPASS(linux_cdev_handle_find(handle) != NULL);
634	*color = 0;
635	return (0);
636}
637
638static void
639linux_cdev_pager_dtor(void *handle)
640{
641	const struct vm_operations_struct *vm_ops;
642	struct vm_area_struct *vmap;
643
644	vmap = linux_cdev_handle_find(handle);
645	MPASS(vmap != NULL);
646
647	/*
648	 * Remove handle before calling close operation to prevent
649	 * other threads from reusing the handle pointer.
650	 */
651	linux_cdev_handle_remove(vmap);
652
653	down_write(&vmap->vm_mm->mmap_sem);
654	vm_ops = vmap->vm_ops;
655	if (likely(vm_ops != NULL))
656		vm_ops->close(vmap);
657	up_write(&vmap->vm_mm->mmap_sem);
658
659	linux_cdev_handle_free(vmap);
660}
661
662static struct cdev_pager_ops linux_cdev_pager_ops[2] = {
663  {
664	/* OBJT_MGTDEVICE */
665	.cdev_pg_populate	= linux_cdev_pager_populate,
666	.cdev_pg_ctor	= linux_cdev_pager_ctor,
667	.cdev_pg_dtor	= linux_cdev_pager_dtor
668  },
669  {
670	/* OBJT_DEVICE */
671	.cdev_pg_fault	= linux_cdev_pager_fault,
672	.cdev_pg_ctor	= linux_cdev_pager_ctor,
673	.cdev_pg_dtor	= linux_cdev_pager_dtor
674  },
675};
676
677int
678zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
679    unsigned long size)
680{
681	vm_object_t obj;
682	vm_page_t m;
683
684	obj = vma->vm_obj;
685	if (obj == NULL || (obj->flags & OBJ_UNMANAGED) != 0)
686		return (-ENOTSUP);
687	VM_OBJECT_RLOCK(obj);
688	for (m = vm_page_find_least(obj, OFF_TO_IDX(address));
689	    m != NULL && m->pindex < OFF_TO_IDX(address + size);
690	    m = TAILQ_NEXT(m, listq))
691		pmap_remove_all(m);
692	VM_OBJECT_RUNLOCK(obj);
693	return (0);
694}
695
696static struct file_operations dummy_ldev_ops = {
697	/* XXXKIB */
698};
699
700static struct linux_cdev dummy_ldev = {
701	.ops = &dummy_ldev_ops,
702};
703
704#define	LDEV_SI_DTR	0x0001
705#define	LDEV_SI_REF	0x0002
706
707static void
708linux_get_fop(struct linux_file *filp, const struct file_operations **fop,
709    struct linux_cdev **dev)
710{
711	struct linux_cdev *ldev;
712	u_int siref;
713
714	ldev = filp->f_cdev;
715	*fop = filp->f_op;
716	if (ldev != NULL) {
717		for (siref = ldev->siref;;) {
718			if ((siref & LDEV_SI_DTR) != 0) {
719				ldev = &dummy_ldev;
720				siref = ldev->siref;
721				*fop = ldev->ops;
722				MPASS((ldev->siref & LDEV_SI_DTR) == 0);
723			} else if (atomic_fcmpset_int(&ldev->siref, &siref,
724			    siref + LDEV_SI_REF)) {
725				break;
726			}
727		}
728	}
729	*dev = ldev;
730}
731
732static void
733linux_drop_fop(struct linux_cdev *ldev)
734{
735
736	if (ldev == NULL)
737		return;
738	MPASS((ldev->siref & ~LDEV_SI_DTR) != 0);
739	atomic_subtract_int(&ldev->siref, LDEV_SI_REF);
740}
741
742#define	OPW(fp,td,code) ({			\
743	struct file *__fpop;			\
744	__typeof(code) __retval;		\
745						\
746	__fpop = (td)->td_fpop;			\
747	(td)->td_fpop = (fp);			\
748	__retval = (code);			\
749	(td)->td_fpop = __fpop;			\
750	__retval;				\
751})
752
753static int
754linux_dev_fdopen(struct cdev *dev, int fflags, struct thread *td,
755    struct file *file)
756{
757	struct linux_cdev *ldev;
758	struct linux_file *filp;
759	const struct file_operations *fop;
760	int error;
761
762	ldev = dev->si_drv1;
763
764	filp = linux_file_alloc();
765	filp->f_dentry = &filp->f_dentry_store;
766	filp->f_op = ldev->ops;
767	filp->f_mode = file->f_flag;
768	filp->f_flags = file->f_flag;
769	filp->f_vnode = file->f_vnode;
770	filp->_file = file;
771	refcount_acquire(&ldev->refs);
772	filp->f_cdev = ldev;
773
774	linux_set_current(td);
775	linux_get_fop(filp, &fop, &ldev);
776
777	if (fop->open != NULL) {
778		error = -fop->open(file->f_vnode, filp);
779		if (error != 0) {
780			linux_drop_fop(ldev);
781			linux_cdev_deref(filp->f_cdev);
782			kfree(filp);
783			return (error);
784		}
785	}
786
787	/* hold on to the vnode - used for fstat() */
788	vhold(filp->f_vnode);
789
790	/* release the file from devfs */
791	finit(file, filp->f_mode, DTYPE_DEV, filp, &linuxfileops);
792	linux_drop_fop(ldev);
793	return (ENXIO);
794}
795
796#define	LINUX_IOCTL_MIN_PTR 0x10000UL
797#define	LINUX_IOCTL_MAX_PTR (LINUX_IOCTL_MIN_PTR + IOCPARM_MAX)
798
799static inline int
800linux_remap_address(void **uaddr, size_t len)
801{
802	uintptr_t uaddr_val = (uintptr_t)(*uaddr);
803
804	if (unlikely(uaddr_val >= LINUX_IOCTL_MIN_PTR &&
805	    uaddr_val < LINUX_IOCTL_MAX_PTR)) {
806		struct task_struct *pts = current;
807		if (pts == NULL) {
808			*uaddr = NULL;
809			return (1);
810		}
811
812		/* compute data offset */
813		uaddr_val -= LINUX_IOCTL_MIN_PTR;
814
815		/* check that length is within bounds */
816		if ((len > IOCPARM_MAX) ||
817		    (uaddr_val + len) > pts->bsd_ioctl_len) {
818			*uaddr = NULL;
819			return (1);
820		}
821
822		/* re-add kernel buffer address */
823		uaddr_val += (uintptr_t)pts->bsd_ioctl_data;
824
825		/* update address location */
826		*uaddr = (void *)uaddr_val;
827		return (1);
828	}
829	return (0);
830}
831
832int
833linux_copyin(const void *uaddr, void *kaddr, size_t len)
834{
835	if (linux_remap_address(__DECONST(void **, &uaddr), len)) {
836		if (uaddr == NULL)
837			return (-EFAULT);
838		memcpy(kaddr, uaddr, len);
839		return (0);
840	}
841	return (-copyin(uaddr, kaddr, len));
842}
843
844int
845linux_copyout(const void *kaddr, void *uaddr, size_t len)
846{
847	if (linux_remap_address(&uaddr, len)) {
848		if (uaddr == NULL)
849			return (-EFAULT);
850		memcpy(uaddr, kaddr, len);
851		return (0);
852	}
853	return (-copyout(kaddr, uaddr, len));
854}
855
856size_t
857linux_clear_user(void *_uaddr, size_t _len)
858{
859	uint8_t *uaddr = _uaddr;
860	size_t len = _len;
861
862	/* make sure uaddr is aligned before going into the fast loop */
863	while (((uintptr_t)uaddr & 7) != 0 && len > 7) {
864		if (subyte(uaddr, 0))
865			return (_len);
866		uaddr++;
867		len--;
868	}
869
870	/* zero 8 bytes at a time */
871	while (len > 7) {
872#ifdef __LP64__
873		if (suword64(uaddr, 0))
874			return (_len);
875#else
876		if (suword32(uaddr, 0))
877			return (_len);
878		if (suword32(uaddr + 4, 0))
879			return (_len);
880#endif
881		uaddr += 8;
882		len -= 8;
883	}
884
885	/* zero fill end, if any */
886	while (len > 0) {
887		if (subyte(uaddr, 0))
888			return (_len);
889		uaddr++;
890		len--;
891	}
892	return (0);
893}
894
895int
896linux_access_ok(int rw, const void *uaddr, size_t len)
897{
898	uintptr_t saddr;
899	uintptr_t eaddr;
900
901	/* get start and end address */
902	saddr = (uintptr_t)uaddr;
903	eaddr = (uintptr_t)uaddr + len;
904
905	/* verify addresses are valid for userspace */
906	return ((saddr == eaddr) ||
907	    (eaddr > saddr && eaddr <= VM_MAXUSER_ADDRESS));
908}
909
910/*
911 * This function should return either EINTR or ERESTART depending on
912 * the signal type sent to this thread:
913 */
914static int
915linux_get_error(struct task_struct *task, int error)
916{
917	/* check for signal type interrupt code */
918	if (error == EINTR || error == ERESTARTSYS || error == ERESTART) {
919		error = -linux_schedule_get_interrupt_value(task);
920		if (error == 0)
921			error = EINTR;
922	}
923	return (error);
924}
925
926static int
927linux_file_ioctl_sub(struct file *fp, struct linux_file *filp,
928    const struct file_operations *fop, u_long cmd, caddr_t data,
929    struct thread *td)
930{
931	struct task_struct *task = current;
932	unsigned size;
933	int error;
934
935	size = IOCPARM_LEN(cmd);
936	/* refer to logic in sys_ioctl() */
937	if (size > 0) {
938		/*
939		 * Setup hint for linux_copyin() and linux_copyout().
940		 *
941		 * Background: Linux code expects a user-space address
942		 * while FreeBSD supplies a kernel-space address.
943		 */
944		task->bsd_ioctl_data = data;
945		task->bsd_ioctl_len = size;
946		data = (void *)LINUX_IOCTL_MIN_PTR;
947	} else {
948		/* fetch user-space pointer */
949		data = *(void **)data;
950	}
951#if defined(__amd64__)
952	if (td->td_proc->p_elf_machine == EM_386) {
953		/* try the compat IOCTL handler first */
954		if (fop->compat_ioctl != NULL) {
955			error = -OPW(fp, td, fop->compat_ioctl(filp,
956			    cmd, (u_long)data));
957		} else {
958			error = ENOTTY;
959		}
960
961		/* fallback to the regular IOCTL handler, if any */
962		if (error == ENOTTY && fop->unlocked_ioctl != NULL) {
963			error = -OPW(fp, td, fop->unlocked_ioctl(filp,
964			    cmd, (u_long)data));
965		}
966	} else
967#endif
968	{
969		if (fop->unlocked_ioctl != NULL) {
970			error = -OPW(fp, td, fop->unlocked_ioctl(filp,
971			    cmd, (u_long)data));
972		} else {
973			error = ENOTTY;
974		}
975	}
976	if (size > 0) {
977		task->bsd_ioctl_data = NULL;
978		task->bsd_ioctl_len = 0;
979	}
980
981	if (error == EWOULDBLOCK) {
982		/* update kqfilter status, if any */
983		linux_file_kqfilter_poll(filp,
984		    LINUX_KQ_FLAG_HAS_READ | LINUX_KQ_FLAG_HAS_WRITE);
985	} else {
986		error = linux_get_error(task, error);
987	}
988	return (error);
989}
990
991#define	LINUX_POLL_TABLE_NORMAL ((poll_table *)1)
992
993/*
994 * This function atomically updates the poll wakeup state and returns
995 * the previous state at the time of update.
996 */
997static uint8_t
998linux_poll_wakeup_state(atomic_t *v, const uint8_t *pstate)
999{
1000	int c, old;
1001
1002	c = v->counter;
1003
1004	while ((old = atomic_cmpxchg(v, c, pstate[c])) != c)
1005		c = old;
1006
1007	return (c);
1008}
1009
1010
1011static int
1012linux_poll_wakeup_callback(wait_queue_t *wq, unsigned int wq_state, int flags, void *key)
1013{
1014	static const uint8_t state[LINUX_FWQ_STATE_MAX] = {
1015		[LINUX_FWQ_STATE_INIT] = LINUX_FWQ_STATE_INIT, /* NOP */
1016		[LINUX_FWQ_STATE_NOT_READY] = LINUX_FWQ_STATE_NOT_READY, /* NOP */
1017		[LINUX_FWQ_STATE_QUEUED] = LINUX_FWQ_STATE_READY,
1018		[LINUX_FWQ_STATE_READY] = LINUX_FWQ_STATE_READY, /* NOP */
1019	};
1020	struct linux_file *filp = container_of(wq, struct linux_file, f_wait_queue.wq);
1021
1022	switch (linux_poll_wakeup_state(&filp->f_wait_queue.state, state)) {
1023	case LINUX_FWQ_STATE_QUEUED:
1024		linux_poll_wakeup(filp);
1025		return (1);
1026	default:
1027		return (0);
1028	}
1029}
1030
1031void
1032linux_poll_wait(struct linux_file *filp, wait_queue_head_t *wqh, poll_table *p)
1033{
1034	static const uint8_t state[LINUX_FWQ_STATE_MAX] = {
1035		[LINUX_FWQ_STATE_INIT] = LINUX_FWQ_STATE_NOT_READY,
1036		[LINUX_FWQ_STATE_NOT_READY] = LINUX_FWQ_STATE_NOT_READY, /* NOP */
1037		[LINUX_FWQ_STATE_QUEUED] = LINUX_FWQ_STATE_QUEUED, /* NOP */
1038		[LINUX_FWQ_STATE_READY] = LINUX_FWQ_STATE_QUEUED,
1039	};
1040
1041	/* check if we are called inside the select system call */
1042	if (p == LINUX_POLL_TABLE_NORMAL)
1043		selrecord(curthread, &filp->f_selinfo);
1044
1045	switch (linux_poll_wakeup_state(&filp->f_wait_queue.state, state)) {
1046	case LINUX_FWQ_STATE_INIT:
1047		/* NOTE: file handles can only belong to one wait-queue */
1048		filp->f_wait_queue.wqh = wqh;
1049		filp->f_wait_queue.wq.func = &linux_poll_wakeup_callback;
1050		add_wait_queue(wqh, &filp->f_wait_queue.wq);
1051		atomic_set(&filp->f_wait_queue.state, LINUX_FWQ_STATE_QUEUED);
1052		break;
1053	default:
1054		break;
1055	}
1056}
1057
1058static void
1059linux_poll_wait_dequeue(struct linux_file *filp)
1060{
1061	static const uint8_t state[LINUX_FWQ_STATE_MAX] = {
1062		[LINUX_FWQ_STATE_INIT] = LINUX_FWQ_STATE_INIT,	/* NOP */
1063		[LINUX_FWQ_STATE_NOT_READY] = LINUX_FWQ_STATE_INIT,
1064		[LINUX_FWQ_STATE_QUEUED] = LINUX_FWQ_STATE_INIT,
1065		[LINUX_FWQ_STATE_READY] = LINUX_FWQ_STATE_INIT,
1066	};
1067
1068	seldrain(&filp->f_selinfo);
1069
1070	switch (linux_poll_wakeup_state(&filp->f_wait_queue.state, state)) {
1071	case LINUX_FWQ_STATE_NOT_READY:
1072	case LINUX_FWQ_STATE_QUEUED:
1073	case LINUX_FWQ_STATE_READY:
1074		remove_wait_queue(filp->f_wait_queue.wqh, &filp->f_wait_queue.wq);
1075		break;
1076	default:
1077		break;
1078	}
1079}
1080
1081void
1082linux_poll_wakeup(struct linux_file *filp)
1083{
1084	/* this function should be NULL-safe */
1085	if (filp == NULL)
1086		return;
1087
1088	selwakeup(&filp->f_selinfo);
1089
1090	spin_lock(&filp->f_kqlock);
1091	filp->f_kqflags |= LINUX_KQ_FLAG_NEED_READ |
1092	    LINUX_KQ_FLAG_NEED_WRITE;
1093
1094	/* make sure the "knote" gets woken up */
1095	KNOTE_LOCKED(&filp->f_selinfo.si_note, 1);
1096	spin_unlock(&filp->f_kqlock);
1097}
1098
1099static void
1100linux_file_kqfilter_detach(struct knote *kn)
1101{
1102	struct linux_file *filp = kn->kn_hook;
1103
1104	spin_lock(&filp->f_kqlock);
1105	knlist_remove(&filp->f_selinfo.si_note, kn, 1);
1106	spin_unlock(&filp->f_kqlock);
1107}
1108
1109static int
1110linux_file_kqfilter_read_event(struct knote *kn, long hint)
1111{
1112	struct linux_file *filp = kn->kn_hook;
1113
1114	mtx_assert(&filp->f_kqlock.m, MA_OWNED);
1115
1116	return ((filp->f_kqflags & LINUX_KQ_FLAG_NEED_READ) ? 1 : 0);
1117}
1118
1119static int
1120linux_file_kqfilter_write_event(struct knote *kn, long hint)
1121{
1122	struct linux_file *filp = kn->kn_hook;
1123
1124	mtx_assert(&filp->f_kqlock.m, MA_OWNED);
1125
1126	return ((filp->f_kqflags & LINUX_KQ_FLAG_NEED_WRITE) ? 1 : 0);
1127}
1128
1129static struct filterops linux_dev_kqfiltops_read = {
1130	.f_isfd = 1,
1131	.f_detach = linux_file_kqfilter_detach,
1132	.f_event = linux_file_kqfilter_read_event,
1133};
1134
1135static struct filterops linux_dev_kqfiltops_write = {
1136	.f_isfd = 1,
1137	.f_detach = linux_file_kqfilter_detach,
1138	.f_event = linux_file_kqfilter_write_event,
1139};
1140
1141static void
1142linux_file_kqfilter_poll(struct linux_file *filp, int kqflags)
1143{
1144	struct thread *td;
1145	const struct file_operations *fop;
1146	struct linux_cdev *ldev;
1147	int temp;
1148
1149	if ((filp->f_kqflags & kqflags) == 0)
1150		return;
1151
1152	td = curthread;
1153
1154	linux_get_fop(filp, &fop, &ldev);
1155	/* get the latest polling state */
1156	temp = OPW(filp->_file, td, fop->poll(filp, NULL));
1157	linux_drop_fop(ldev);
1158
1159	spin_lock(&filp->f_kqlock);
1160	/* clear kqflags */
1161	filp->f_kqflags &= ~(LINUX_KQ_FLAG_NEED_READ |
1162	    LINUX_KQ_FLAG_NEED_WRITE);
1163	/* update kqflags */
1164	if ((temp & (POLLIN | POLLOUT)) != 0) {
1165		if ((temp & POLLIN) != 0)
1166			filp->f_kqflags |= LINUX_KQ_FLAG_NEED_READ;
1167		if ((temp & POLLOUT) != 0)
1168			filp->f_kqflags |= LINUX_KQ_FLAG_NEED_WRITE;
1169
1170		/* make sure the "knote" gets woken up */
1171		KNOTE_LOCKED(&filp->f_selinfo.si_note, 0);
1172	}
1173	spin_unlock(&filp->f_kqlock);
1174}
1175
1176static int
1177linux_file_kqfilter(struct file *file, struct knote *kn)
1178{
1179	struct linux_file *filp;
1180	struct thread *td;
1181	int error;
1182
1183	td = curthread;
1184	filp = (struct linux_file *)file->f_data;
1185	filp->f_flags = file->f_flag;
1186	if (filp->f_op->poll == NULL)
1187		return (EINVAL);
1188
1189	spin_lock(&filp->f_kqlock);
1190	switch (kn->kn_filter) {
1191	case EVFILT_READ:
1192		filp->f_kqflags |= LINUX_KQ_FLAG_HAS_READ;
1193		kn->kn_fop = &linux_dev_kqfiltops_read;
1194		kn->kn_hook = filp;
1195		knlist_add(&filp->f_selinfo.si_note, kn, 1);
1196		error = 0;
1197		break;
1198	case EVFILT_WRITE:
1199		filp->f_kqflags |= LINUX_KQ_FLAG_HAS_WRITE;
1200		kn->kn_fop = &linux_dev_kqfiltops_write;
1201		kn->kn_hook = filp;
1202		knlist_add(&filp->f_selinfo.si_note, kn, 1);
1203		error = 0;
1204		break;
1205	default:
1206		error = EINVAL;
1207		break;
1208	}
1209	spin_unlock(&filp->f_kqlock);
1210
1211	if (error == 0) {
1212		linux_set_current(td);
1213
1214		/* update kqfilter status, if any */
1215		linux_file_kqfilter_poll(filp,
1216		    LINUX_KQ_FLAG_HAS_READ | LINUX_KQ_FLAG_HAS_WRITE);
1217	}
1218	return (error);
1219}
1220
1221static int
1222linux_file_mmap_single(struct file *fp, const struct file_operations *fop,
1223    vm_ooffset_t *offset, vm_size_t size, struct vm_object **object,
1224    int nprot, struct thread *td)
1225{
1226	struct task_struct *task;
1227	struct vm_area_struct *vmap;
1228	struct mm_struct *mm;
1229	struct linux_file *filp;
1230	vm_memattr_t attr;
1231	int error;
1232
1233	filp = (struct linux_file *)fp->f_data;
1234	filp->f_flags = fp->f_flag;
1235
1236	if (fop->mmap == NULL)
1237		return (EOPNOTSUPP);
1238
1239	linux_set_current(td);
1240
1241	/*
1242	 * The same VM object might be shared by multiple processes
1243	 * and the mm_struct is usually freed when a process exits.
1244	 *
1245	 * The atomic reference below makes sure the mm_struct is
1246	 * available as long as the vmap is in the linux_vma_head.
1247	 */
1248	task = current;
1249	mm = task->mm;
1250	if (atomic_inc_not_zero(&mm->mm_users) == 0)
1251		return (EINVAL);
1252
1253	vmap = kzalloc(sizeof(*vmap), GFP_KERNEL);
1254	vmap->vm_start = 0;
1255	vmap->vm_end = size;
1256	vmap->vm_pgoff = *offset / PAGE_SIZE;
1257	vmap->vm_pfn = 0;
1258	vmap->vm_flags = vmap->vm_page_prot = (nprot & VM_PROT_ALL);
1259	vmap->vm_ops = NULL;
1260	vmap->vm_file = get_file(filp);
1261	vmap->vm_mm = mm;
1262
1263	if (unlikely(down_write_killable(&vmap->vm_mm->mmap_sem))) {
1264		error = linux_get_error(task, EINTR);
1265	} else {
1266		error = -OPW(fp, td, fop->mmap(filp, vmap));
1267		error = linux_get_error(task, error);
1268		up_write(&vmap->vm_mm->mmap_sem);
1269	}
1270
1271	if (error != 0) {
1272		linux_cdev_handle_free(vmap);
1273		return (error);
1274	}
1275
1276	attr = pgprot2cachemode(vmap->vm_page_prot);
1277
1278	if (vmap->vm_ops != NULL) {
1279		struct vm_area_struct *ptr;
1280		void *vm_private_data;
1281		bool vm_no_fault;
1282
1283		if (vmap->vm_ops->open == NULL ||
1284		    vmap->vm_ops->close == NULL ||
1285		    vmap->vm_private_data == NULL) {
1286			/* free allocated VM area struct */
1287			linux_cdev_handle_free(vmap);
1288			return (EINVAL);
1289		}
1290
1291		vm_private_data = vmap->vm_private_data;
1292
1293		rw_wlock(&linux_vma_lock);
1294		TAILQ_FOREACH(ptr, &linux_vma_head, vm_entry) {
1295			if (ptr->vm_private_data == vm_private_data)
1296				break;
1297		}
1298		/* check if there is an existing VM area struct */
1299		if (ptr != NULL) {
1300			/* check if the VM area structure is invalid */
1301			if (ptr->vm_ops == NULL ||
1302			    ptr->vm_ops->open == NULL ||
1303			    ptr->vm_ops->close == NULL) {
1304				error = ESTALE;
1305				vm_no_fault = 1;
1306			} else {
1307				error = EEXIST;
1308				vm_no_fault = (ptr->vm_ops->fault == NULL);
1309			}
1310		} else {
1311			/* insert VM area structure into list */
1312			TAILQ_INSERT_TAIL(&linux_vma_head, vmap, vm_entry);
1313			error = 0;
1314			vm_no_fault = (vmap->vm_ops->fault == NULL);
1315		}
1316		rw_wunlock(&linux_vma_lock);
1317
1318		if (error != 0) {
1319			/* free allocated VM area struct */
1320			linux_cdev_handle_free(vmap);
1321			/* check for stale VM area struct */
1322			if (error != EEXIST)
1323				return (error);
1324		}
1325
1326		/* check if there is no fault handler */
1327		if (vm_no_fault) {
1328			*object = cdev_pager_allocate(vm_private_data, OBJT_DEVICE,
1329			    &linux_cdev_pager_ops[1], size, nprot, *offset,
1330			    td->td_ucred);
1331		} else {
1332			*object = cdev_pager_allocate(vm_private_data, OBJT_MGTDEVICE,
1333			    &linux_cdev_pager_ops[0], size, nprot, *offset,
1334			    td->td_ucred);
1335		}
1336
1337		/* check if allocating the VM object failed */
1338		if (*object == NULL) {
1339			if (error == 0) {
1340				/* remove VM area struct from list */
1341				linux_cdev_handle_remove(vmap);
1342				/* free allocated VM area struct */
1343				linux_cdev_handle_free(vmap);
1344			}
1345			return (EINVAL);
1346		}
1347	} else {
1348		struct sglist *sg;
1349
1350		sg = sglist_alloc(1, M_WAITOK);
1351		sglist_append_phys(sg,
1352		    (vm_paddr_t)vmap->vm_pfn << PAGE_SHIFT, vmap->vm_len);
1353
1354		*object = vm_pager_allocate(OBJT_SG, sg, vmap->vm_len,
1355		    nprot, 0, td->td_ucred);
1356
1357		linux_cdev_handle_free(vmap);
1358
1359		if (*object == NULL) {
1360			sglist_free(sg);
1361			return (EINVAL);
1362		}
1363	}
1364
1365	if (attr != VM_MEMATTR_DEFAULT) {
1366		VM_OBJECT_WLOCK(*object);
1367		vm_object_set_memattr(*object, attr);
1368		VM_OBJECT_WUNLOCK(*object);
1369	}
1370	*offset = 0;
1371	return (0);
1372}
1373
1374struct cdevsw linuxcdevsw = {
1375	.d_version = D_VERSION,
1376	.d_fdopen = linux_dev_fdopen,
1377	.d_name = "lkpidev",
1378};
1379
1380static int
1381linux_file_read(struct file *file, struct uio *uio, struct ucred *active_cred,
1382    int flags, struct thread *td)
1383{
1384	struct linux_file *filp;
1385	const struct file_operations *fop;
1386	struct linux_cdev *ldev;
1387	ssize_t bytes;
1388	int error;
1389
1390	error = 0;
1391	filp = (struct linux_file *)file->f_data;
1392	filp->f_flags = file->f_flag;
1393	/* XXX no support for I/O vectors currently */
1394	if (uio->uio_iovcnt != 1)
1395		return (EOPNOTSUPP);
1396	if (uio->uio_resid > DEVFS_IOSIZE_MAX)
1397		return (EINVAL);
1398	linux_set_current(td);
1399	linux_get_fop(filp, &fop, &ldev);
1400	if (fop->read != NULL) {
1401		bytes = OPW(file, td, fop->read(filp,
1402		    uio->uio_iov->iov_base,
1403		    uio->uio_iov->iov_len, &uio->uio_offset));
1404		if (bytes >= 0) {
1405			uio->uio_iov->iov_base =
1406			    ((uint8_t *)uio->uio_iov->iov_base) + bytes;
1407			uio->uio_iov->iov_len -= bytes;
1408			uio->uio_resid -= bytes;
1409		} else {
1410			error = linux_get_error(current, -bytes);
1411		}
1412	} else
1413		error = ENXIO;
1414
1415	/* update kqfilter status, if any */
1416	linux_file_kqfilter_poll(filp, LINUX_KQ_FLAG_HAS_READ);
1417	linux_drop_fop(ldev);
1418
1419	return (error);
1420}
1421
1422static int
1423linux_file_write(struct file *file, struct uio *uio, struct ucred *active_cred,
1424    int flags, struct thread *td)
1425{
1426	struct linux_file *filp;
1427	const struct file_operations *fop;
1428	struct linux_cdev *ldev;
1429	ssize_t bytes;
1430	int error;
1431
1432	filp = (struct linux_file *)file->f_data;
1433	filp->f_flags = file->f_flag;
1434	/* XXX no support for I/O vectors currently */
1435	if (uio->uio_iovcnt != 1)
1436		return (EOPNOTSUPP);
1437	if (uio->uio_resid > DEVFS_IOSIZE_MAX)
1438		return (EINVAL);
1439	linux_set_current(td);
1440	linux_get_fop(filp, &fop, &ldev);
1441	if (fop->write != NULL) {
1442		bytes = OPW(file, td, fop->write(filp,
1443		    uio->uio_iov->iov_base,
1444		    uio->uio_iov->iov_len, &uio->uio_offset));
1445		if (bytes >= 0) {
1446			uio->uio_iov->iov_base =
1447			    ((uint8_t *)uio->uio_iov->iov_base) + bytes;
1448			uio->uio_iov->iov_len -= bytes;
1449			uio->uio_resid -= bytes;
1450			error = 0;
1451		} else {
1452			error = linux_get_error(current, -bytes);
1453		}
1454	} else
1455		error = ENXIO;
1456
1457	/* update kqfilter status, if any */
1458	linux_file_kqfilter_poll(filp, LINUX_KQ_FLAG_HAS_WRITE);
1459
1460	linux_drop_fop(ldev);
1461
1462	return (error);
1463}
1464
1465static int
1466linux_file_poll(struct file *file, int events, struct ucred *active_cred,
1467    struct thread *td)
1468{
1469	struct linux_file *filp;
1470	const struct file_operations *fop;
1471	struct linux_cdev *ldev;
1472	int revents;
1473
1474	filp = (struct linux_file *)file->f_data;
1475	filp->f_flags = file->f_flag;
1476	linux_set_current(td);
1477	linux_get_fop(filp, &fop, &ldev);
1478	if (fop->poll != NULL) {
1479		revents = OPW(file, td, fop->poll(filp,
1480		    LINUX_POLL_TABLE_NORMAL)) & events;
1481	} else {
1482		revents = 0;
1483	}
1484	linux_drop_fop(ldev);
1485	return (revents);
1486}
1487
1488static int
1489linux_file_close(struct file *file, struct thread *td)
1490{
1491	struct linux_file *filp;
1492	const struct file_operations *fop;
1493	struct linux_cdev *ldev;
1494	int error;
1495
1496	filp = (struct linux_file *)file->f_data;
1497
1498	KASSERT(file_count(filp) == 0,
1499	    ("File refcount(%d) is not zero", file_count(filp)));
1500
1501	error = 0;
1502	filp->f_flags = file->f_flag;
1503	linux_set_current(td);
1504	linux_poll_wait_dequeue(filp);
1505	linux_get_fop(filp, &fop, &ldev);
1506	if (fop->release != NULL)
1507		error = -OPW(file, td, fop->release(filp->f_vnode, filp));
1508	funsetown(&filp->f_sigio);
1509	if (filp->f_vnode != NULL)
1510		vdrop(filp->f_vnode);
1511	linux_drop_fop(ldev);
1512	if (filp->f_cdev != NULL)
1513		linux_cdev_deref(filp->f_cdev);
1514	kfree(filp);
1515
1516	return (error);
1517}
1518
1519static int
1520linux_file_ioctl(struct file *fp, u_long cmd, void *data, struct ucred *cred,
1521    struct thread *td)
1522{
1523	struct linux_file *filp;
1524	const struct file_operations *fop;
1525	struct linux_cdev *ldev;
1526	int error;
1527
1528	error = 0;
1529	filp = (struct linux_file *)fp->f_data;
1530	filp->f_flags = fp->f_flag;
1531	linux_get_fop(filp, &fop, &ldev);
1532
1533	linux_set_current(td);
1534	switch (cmd) {
1535	case FIONBIO:
1536		break;
1537	case FIOASYNC:
1538		if (fop->fasync == NULL)
1539			break;
1540		error = -OPW(fp, td, fop->fasync(0, filp, fp->f_flag & FASYNC));
1541		break;
1542	case FIOSETOWN:
1543		error = fsetown(*(int *)data, &filp->f_sigio);
1544		if (error == 0) {
1545			if (fop->fasync == NULL)
1546				break;
1547			error = -OPW(fp, td, fop->fasync(0, filp,
1548			    fp->f_flag & FASYNC));
1549		}
1550		break;
1551	case FIOGETOWN:
1552		*(int *)data = fgetown(&filp->f_sigio);
1553		break;
1554	default:
1555		error = linux_file_ioctl_sub(fp, filp, fop, cmd, data, td);
1556		break;
1557	}
1558	linux_drop_fop(ldev);
1559	return (error);
1560}
1561
1562static int
1563linux_file_mmap_sub(struct thread *td, vm_size_t objsize, vm_prot_t prot,
1564    vm_prot_t *maxprotp, int *flagsp, struct file *fp,
1565    vm_ooffset_t *foff, const struct file_operations *fop, vm_object_t *objp)
1566{
1567	/*
1568	 * Character devices do not provide private mappings
1569	 * of any kind:
1570	 */
1571	if ((*maxprotp & VM_PROT_WRITE) == 0 &&
1572	    (prot & VM_PROT_WRITE) != 0)
1573		return (EACCES);
1574	if ((*flagsp & (MAP_PRIVATE | MAP_COPY)) != 0)
1575		return (EINVAL);
1576
1577	return (linux_file_mmap_single(fp, fop, foff, objsize, objp,
1578	    (int)prot, td));
1579}
1580
1581static int
1582linux_file_mmap(struct file *fp, vm_map_t map, vm_offset_t *addr, vm_size_t size,
1583    vm_prot_t prot, vm_prot_t cap_maxprot, int flags, vm_ooffset_t foff,
1584    struct thread *td)
1585{
1586	struct linux_file *filp;
1587	const struct file_operations *fop;
1588	struct linux_cdev *ldev;
1589	struct mount *mp;
1590	struct vnode *vp;
1591	vm_object_t object;
1592	vm_prot_t maxprot;
1593	int error;
1594
1595	filp = (struct linux_file *)fp->f_data;
1596
1597	vp = filp->f_vnode;
1598	if (vp == NULL)
1599		return (EOPNOTSUPP);
1600
1601	/*
1602	 * Ensure that file and memory protections are
1603	 * compatible.
1604	 */
1605	mp = vp->v_mount;
1606	if (mp != NULL && (mp->mnt_flag & MNT_NOEXEC) != 0) {
1607		maxprot = VM_PROT_NONE;
1608		if ((prot & VM_PROT_EXECUTE) != 0)
1609			return (EACCES);
1610	} else
1611		maxprot = VM_PROT_EXECUTE;
1612	if ((fp->f_flag & FREAD) != 0)
1613		maxprot |= VM_PROT_READ;
1614	else if ((prot & VM_PROT_READ) != 0)
1615		return (EACCES);
1616
1617	/*
1618	 * If we are sharing potential changes via MAP_SHARED and we
1619	 * are trying to get write permission although we opened it
1620	 * without asking for it, bail out.
1621	 *
1622	 * Note that most character devices always share mappings.
1623	 *
1624	 * Rely on linux_file_mmap_sub() to fail invalid MAP_PRIVATE
1625	 * requests rather than doing it here.
1626	 */
1627	if ((flags & MAP_SHARED) != 0) {
1628		if ((fp->f_flag & FWRITE) != 0)
1629			maxprot |= VM_PROT_WRITE;
1630		else if ((prot & VM_PROT_WRITE) != 0)
1631			return (EACCES);
1632	}
1633	maxprot &= cap_maxprot;
1634
1635	linux_get_fop(filp, &fop, &ldev);
1636	error = linux_file_mmap_sub(td, size, prot, &maxprot, &flags, fp,
1637	    &foff, fop, &object);
1638	if (error != 0)
1639		goto out;
1640
1641	error = vm_mmap_object(map, addr, size, prot, maxprot, flags, object,
1642	    foff, FALSE, td);
1643	if (error != 0)
1644		vm_object_deallocate(object);
1645out:
1646	linux_drop_fop(ldev);
1647	return (error);
1648}
1649
1650static int
1651linux_file_stat(struct file *fp, struct stat *sb, struct ucred *active_cred,
1652    struct thread *td)
1653{
1654	struct linux_file *filp;
1655	struct vnode *vp;
1656	int error;
1657
1658	filp = (struct linux_file *)fp->f_data;
1659	if (filp->f_vnode == NULL)
1660		return (EOPNOTSUPP);
1661
1662	vp = filp->f_vnode;
1663
1664	vn_lock(vp, LK_SHARED | LK_RETRY);
1665	error = vn_stat(vp, sb, td->td_ucred, NOCRED, td);
1666	VOP_UNLOCK(vp, 0);
1667
1668	return (error);
1669}
1670
1671static int
1672linux_file_fill_kinfo(struct file *fp, struct kinfo_file *kif,
1673    struct filedesc *fdp)
1674{
1675	struct linux_file *filp;
1676	struct vnode *vp;
1677	int error;
1678
1679	filp = fp->f_data;
1680	vp = filp->f_vnode;
1681	if (vp == NULL) {
1682		error = 0;
1683		kif->kf_type = KF_TYPE_DEV;
1684	} else {
1685		vref(vp);
1686		FILEDESC_SUNLOCK(fdp);
1687		error = vn_fill_kinfo_vnode(vp, kif);
1688		vrele(vp);
1689		kif->kf_type = KF_TYPE_VNODE;
1690		FILEDESC_SLOCK(fdp);
1691	}
1692	return (error);
1693}
1694
1695unsigned int
1696linux_iminor(struct inode *inode)
1697{
1698	struct linux_cdev *ldev;
1699
1700	if (inode == NULL || inode->v_rdev == NULL ||
1701	    inode->v_rdev->si_devsw != &linuxcdevsw)
1702		return (-1U);
1703	ldev = inode->v_rdev->si_drv1;
1704	if (ldev == NULL)
1705		return (-1U);
1706
1707	return (minor(ldev->dev));
1708}
1709
1710struct fileops linuxfileops = {
1711	.fo_read = linux_file_read,
1712	.fo_write = linux_file_write,
1713	.fo_truncate = invfo_truncate,
1714	.fo_kqfilter = linux_file_kqfilter,
1715	.fo_stat = linux_file_stat,
1716	.fo_fill_kinfo = linux_file_fill_kinfo,
1717	.fo_poll = linux_file_poll,
1718	.fo_close = linux_file_close,
1719	.fo_ioctl = linux_file_ioctl,
1720	.fo_mmap = linux_file_mmap,
1721	.fo_chmod = invfo_chmod,
1722	.fo_chown = invfo_chown,
1723	.fo_sendfile = invfo_sendfile,
1724	.fo_flags = DFLAG_PASSABLE,
1725};
1726
1727/*
1728 * Hash of vmmap addresses.  This is infrequently accessed and does not
1729 * need to be particularly large.  This is done because we must store the
1730 * caller's idea of the map size to properly unmap.
1731 */
1732struct vmmap {
1733	LIST_ENTRY(vmmap)	vm_next;
1734	void 			*vm_addr;
1735	unsigned long		vm_size;
1736};
1737
1738struct vmmaphd {
1739	struct vmmap *lh_first;
1740};
1741#define	VMMAP_HASH_SIZE	64
1742#define	VMMAP_HASH_MASK	(VMMAP_HASH_SIZE - 1)
1743#define	VM_HASH(addr)	((uintptr_t)(addr) >> PAGE_SHIFT) & VMMAP_HASH_MASK
1744static struct vmmaphd vmmaphead[VMMAP_HASH_SIZE];
1745static struct mtx vmmaplock;
1746
1747static void
1748vmmap_add(void *addr, unsigned long size)
1749{
1750	struct vmmap *vmmap;
1751
1752	vmmap = kmalloc(sizeof(*vmmap), GFP_KERNEL);
1753	mtx_lock(&vmmaplock);
1754	vmmap->vm_size = size;
1755	vmmap->vm_addr = addr;
1756	LIST_INSERT_HEAD(&vmmaphead[VM_HASH(addr)], vmmap, vm_next);
1757	mtx_unlock(&vmmaplock);
1758}
1759
1760static struct vmmap *
1761vmmap_remove(void *addr)
1762{
1763	struct vmmap *vmmap;
1764
1765	mtx_lock(&vmmaplock);
1766	LIST_FOREACH(vmmap, &vmmaphead[VM_HASH(addr)], vm_next)
1767		if (vmmap->vm_addr == addr)
1768			break;
1769	if (vmmap)
1770		LIST_REMOVE(vmmap, vm_next);
1771	mtx_unlock(&vmmaplock);
1772
1773	return (vmmap);
1774}
1775
1776#if defined(__i386__) || defined(__amd64__) || defined(__powerpc__)
1777void *
1778_ioremap_attr(vm_paddr_t phys_addr, unsigned long size, int attr)
1779{
1780	void *addr;
1781
1782	addr = pmap_mapdev_attr(phys_addr, size, attr);
1783	if (addr == NULL)
1784		return (NULL);
1785	vmmap_add(addr, size);
1786
1787	return (addr);
1788}
1789#endif
1790
1791void
1792iounmap(void *addr)
1793{
1794	struct vmmap *vmmap;
1795
1796	vmmap = vmmap_remove(addr);
1797	if (vmmap == NULL)
1798		return;
1799#if defined(__i386__) || defined(__amd64__) || defined(__powerpc__)
1800	pmap_unmapdev((vm_offset_t)addr, vmmap->vm_size);
1801#endif
1802	kfree(vmmap);
1803}
1804
1805
1806void *
1807vmap(struct page **pages, unsigned int count, unsigned long flags, int prot)
1808{
1809	vm_offset_t off;
1810	size_t size;
1811
1812	size = count * PAGE_SIZE;
1813	off = kva_alloc(size);
1814	if (off == 0)
1815		return (NULL);
1816	vmmap_add((void *)off, size);
1817	pmap_qenter(off, pages, count);
1818
1819	return ((void *)off);
1820}
1821
1822void
1823vunmap(void *addr)
1824{
1825	struct vmmap *vmmap;
1826
1827	vmmap = vmmap_remove(addr);
1828	if (vmmap == NULL)
1829		return;
1830	pmap_qremove((vm_offset_t)addr, vmmap->vm_size / PAGE_SIZE);
1831	kva_free((vm_offset_t)addr, vmmap->vm_size);
1832	kfree(vmmap);
1833}
1834
1835char *
1836kvasprintf(gfp_t gfp, const char *fmt, va_list ap)
1837{
1838	unsigned int len;
1839	char *p;
1840	va_list aq;
1841
1842	va_copy(aq, ap);
1843	len = vsnprintf(NULL, 0, fmt, aq);
1844	va_end(aq);
1845
1846	p = kmalloc(len + 1, gfp);
1847	if (p != NULL)
1848		vsnprintf(p, len + 1, fmt, ap);
1849
1850	return (p);
1851}
1852
1853char *
1854kasprintf(gfp_t gfp, const char *fmt, ...)
1855{
1856	va_list ap;
1857	char *p;
1858
1859	va_start(ap, fmt);
1860	p = kvasprintf(gfp, fmt, ap);
1861	va_end(ap);
1862
1863	return (p);
1864}
1865
1866static void
1867linux_timer_callback_wrapper(void *context)
1868{
1869	struct timer_list *timer;
1870
1871	linux_set_current(curthread);
1872
1873	timer = context;
1874	timer->function(timer->data);
1875}
1876
1877void
1878mod_timer(struct timer_list *timer, int expires)
1879{
1880
1881	timer->expires = expires;
1882	callout_reset(&timer->callout,
1883	    linux_timer_jiffies_until(expires),
1884	    &linux_timer_callback_wrapper, timer);
1885}
1886
1887void
1888add_timer(struct timer_list *timer)
1889{
1890
1891	callout_reset(&timer->callout,
1892	    linux_timer_jiffies_until(timer->expires),
1893	    &linux_timer_callback_wrapper, timer);
1894}
1895
1896void
1897add_timer_on(struct timer_list *timer, int cpu)
1898{
1899
1900	callout_reset_on(&timer->callout,
1901	    linux_timer_jiffies_until(timer->expires),
1902	    &linux_timer_callback_wrapper, timer, cpu);
1903}
1904
1905static void
1906linux_timer_init(void *arg)
1907{
1908
1909	/*
1910	 * Compute an internal HZ value which can divide 2**32 to
1911	 * avoid timer rounding problems when the tick value wraps
1912	 * around 2**32:
1913	 */
1914	linux_timer_hz_mask = 1;
1915	while (linux_timer_hz_mask < (unsigned long)hz)
1916		linux_timer_hz_mask *= 2;
1917	linux_timer_hz_mask--;
1918}
1919SYSINIT(linux_timer, SI_SUB_DRIVERS, SI_ORDER_FIRST, linux_timer_init, NULL);
1920
1921void
1922linux_complete_common(struct completion *c, int all)
1923{
1924	int wakeup_swapper;
1925
1926	sleepq_lock(c);
1927	if (all) {
1928		c->done = UINT_MAX;
1929		wakeup_swapper = sleepq_broadcast(c, SLEEPQ_SLEEP, 0, 0);
1930	} else {
1931		if (c->done != UINT_MAX)
1932			c->done++;
1933		wakeup_swapper = sleepq_signal(c, SLEEPQ_SLEEP, 0, 0);
1934	}
1935	sleepq_release(c);
1936	if (wakeup_swapper)
1937		kick_proc0();
1938}
1939
1940/*
1941 * Indefinite wait for done != 0 with or without signals.
1942 */
1943int
1944linux_wait_for_common(struct completion *c, int flags)
1945{
1946	struct task_struct *task;
1947	int error;
1948
1949	if (SCHEDULER_STOPPED())
1950		return (0);
1951
1952	task = current;
1953
1954	if (flags != 0)
1955		flags = SLEEPQ_INTERRUPTIBLE | SLEEPQ_SLEEP;
1956	else
1957		flags = SLEEPQ_SLEEP;
1958	error = 0;
1959	for (;;) {
1960		sleepq_lock(c);
1961		if (c->done)
1962			break;
1963		sleepq_add(c, NULL, "completion", flags, 0);
1964		if (flags & SLEEPQ_INTERRUPTIBLE) {
1965			DROP_GIANT();
1966			error = -sleepq_wait_sig(c, 0);
1967			PICKUP_GIANT();
1968			if (error != 0) {
1969				linux_schedule_save_interrupt_value(task, error);
1970				error = -ERESTARTSYS;
1971				goto intr;
1972			}
1973		} else {
1974			DROP_GIANT();
1975			sleepq_wait(c, 0);
1976			PICKUP_GIANT();
1977		}
1978	}
1979	if (c->done != UINT_MAX)
1980		c->done--;
1981	sleepq_release(c);
1982
1983intr:
1984	return (error);
1985}
1986
1987/*
1988 * Time limited wait for done != 0 with or without signals.
1989 */
1990int
1991linux_wait_for_timeout_common(struct completion *c, int timeout, int flags)
1992{
1993	struct task_struct *task;
1994	int end = jiffies + timeout;
1995	int error;
1996
1997	if (SCHEDULER_STOPPED())
1998		return (0);
1999
2000	task = current;
2001
2002	if (flags != 0)
2003		flags = SLEEPQ_INTERRUPTIBLE | SLEEPQ_SLEEP;
2004	else
2005		flags = SLEEPQ_SLEEP;
2006
2007	for (;;) {
2008		sleepq_lock(c);
2009		if (c->done)
2010			break;
2011		sleepq_add(c, NULL, "completion", flags, 0);
2012		sleepq_set_timeout(c, linux_timer_jiffies_until(end));
2013
2014		DROP_GIANT();
2015		if (flags & SLEEPQ_INTERRUPTIBLE)
2016			error = -sleepq_timedwait_sig(c, 0);
2017		else
2018			error = -sleepq_timedwait(c, 0);
2019		PICKUP_GIANT();
2020
2021		if (error != 0) {
2022			/* check for timeout */
2023			if (error == -EWOULDBLOCK) {
2024				error = 0;	/* timeout */
2025			} else {
2026				/* signal happened */
2027				linux_schedule_save_interrupt_value(task, error);
2028				error = -ERESTARTSYS;
2029			}
2030			goto done;
2031		}
2032	}
2033	if (c->done != UINT_MAX)
2034		c->done--;
2035	sleepq_release(c);
2036
2037	/* return how many jiffies are left */
2038	error = linux_timer_jiffies_until(end);
2039done:
2040	return (error);
2041}
2042
2043int
2044linux_try_wait_for_completion(struct completion *c)
2045{
2046	int isdone;
2047
2048	sleepq_lock(c);
2049	isdone = (c->done != 0);
2050	if (c->done != 0 && c->done != UINT_MAX)
2051		c->done--;
2052	sleepq_release(c);
2053	return (isdone);
2054}
2055
2056int
2057linux_completion_done(struct completion *c)
2058{
2059	int isdone;
2060
2061	sleepq_lock(c);
2062	isdone = (c->done != 0);
2063	sleepq_release(c);
2064	return (isdone);
2065}
2066
2067static void
2068linux_cdev_deref(struct linux_cdev *ldev)
2069{
2070
2071	if (refcount_release(&ldev->refs))
2072		kfree(ldev);
2073}
2074
2075static void
2076linux_cdev_release(struct kobject *kobj)
2077{
2078	struct linux_cdev *cdev;
2079	struct kobject *parent;
2080
2081	cdev = container_of(kobj, struct linux_cdev, kobj);
2082	parent = kobj->parent;
2083	linux_destroy_dev(cdev);
2084	linux_cdev_deref(cdev);
2085	kobject_put(parent);
2086}
2087
2088static void
2089linux_cdev_static_release(struct kobject *kobj)
2090{
2091	struct linux_cdev *cdev;
2092	struct kobject *parent;
2093
2094	cdev = container_of(kobj, struct linux_cdev, kobj);
2095	parent = kobj->parent;
2096	linux_destroy_dev(cdev);
2097	kobject_put(parent);
2098}
2099
2100void
2101linux_destroy_dev(struct linux_cdev *ldev)
2102{
2103
2104	if (ldev->cdev == NULL)
2105		return;
2106
2107	MPASS((ldev->siref & LDEV_SI_DTR) == 0);
2108	atomic_set_int(&ldev->siref, LDEV_SI_DTR);
2109	while ((atomic_load_int(&ldev->siref) & ~LDEV_SI_DTR) != 0)
2110		pause("ldevdtr", hz / 4);
2111
2112	destroy_dev(ldev->cdev);
2113	ldev->cdev = NULL;
2114}
2115
2116const struct kobj_type linux_cdev_ktype = {
2117	.release = linux_cdev_release,
2118};
2119
2120const struct kobj_type linux_cdev_static_ktype = {
2121	.release = linux_cdev_static_release,
2122};
2123
2124static void
2125linux_handle_ifnet_link_event(void *arg, struct ifnet *ifp, int linkstate)
2126{
2127	struct notifier_block *nb;
2128
2129	nb = arg;
2130	if (linkstate == LINK_STATE_UP)
2131		nb->notifier_call(nb, NETDEV_UP, ifp);
2132	else
2133		nb->notifier_call(nb, NETDEV_DOWN, ifp);
2134}
2135
2136static void
2137linux_handle_ifnet_arrival_event(void *arg, struct ifnet *ifp)
2138{
2139	struct notifier_block *nb;
2140
2141	nb = arg;
2142	nb->notifier_call(nb, NETDEV_REGISTER, ifp);
2143}
2144
2145static void
2146linux_handle_ifnet_departure_event(void *arg, struct ifnet *ifp)
2147{
2148	struct notifier_block *nb;
2149
2150	nb = arg;
2151	nb->notifier_call(nb, NETDEV_UNREGISTER, ifp);
2152}
2153
2154static void
2155linux_handle_iflladdr_event(void *arg, struct ifnet *ifp)
2156{
2157	struct notifier_block *nb;
2158
2159	nb = arg;
2160	nb->notifier_call(nb, NETDEV_CHANGEADDR, ifp);
2161}
2162
2163static void
2164linux_handle_ifaddr_event(void *arg, struct ifnet *ifp)
2165{
2166	struct notifier_block *nb;
2167
2168	nb = arg;
2169	nb->notifier_call(nb, NETDEV_CHANGEIFADDR, ifp);
2170}
2171
2172int
2173register_netdevice_notifier(struct notifier_block *nb)
2174{
2175
2176	nb->tags[NETDEV_UP] = EVENTHANDLER_REGISTER(
2177	    ifnet_link_event, linux_handle_ifnet_link_event, nb, 0);
2178	nb->tags[NETDEV_REGISTER] = EVENTHANDLER_REGISTER(
2179	    ifnet_arrival_event, linux_handle_ifnet_arrival_event, nb, 0);
2180	nb->tags[NETDEV_UNREGISTER] = EVENTHANDLER_REGISTER(
2181	    ifnet_departure_event, linux_handle_ifnet_departure_event, nb, 0);
2182	nb->tags[NETDEV_CHANGEADDR] = EVENTHANDLER_REGISTER(
2183	    iflladdr_event, linux_handle_iflladdr_event, nb, 0);
2184
2185	return (0);
2186}
2187
2188int
2189register_inetaddr_notifier(struct notifier_block *nb)
2190{
2191
2192	nb->tags[NETDEV_CHANGEIFADDR] = EVENTHANDLER_REGISTER(
2193	    ifaddr_event, linux_handle_ifaddr_event, nb, 0);
2194	return (0);
2195}
2196
2197int
2198unregister_netdevice_notifier(struct notifier_block *nb)
2199{
2200
2201	EVENTHANDLER_DEREGISTER(ifnet_link_event,
2202	    nb->tags[NETDEV_UP]);
2203	EVENTHANDLER_DEREGISTER(ifnet_arrival_event,
2204	    nb->tags[NETDEV_REGISTER]);
2205	EVENTHANDLER_DEREGISTER(ifnet_departure_event,
2206	    nb->tags[NETDEV_UNREGISTER]);
2207	EVENTHANDLER_DEREGISTER(iflladdr_event,
2208	    nb->tags[NETDEV_CHANGEADDR]);
2209
2210	return (0);
2211}
2212
2213int
2214unregister_inetaddr_notifier(struct notifier_block *nb)
2215{
2216
2217	EVENTHANDLER_DEREGISTER(ifaddr_event,
2218	    nb->tags[NETDEV_CHANGEIFADDR]);
2219
2220	return (0);
2221}
2222
2223struct list_sort_thunk {
2224	int (*cmp)(void *, struct list_head *, struct list_head *);
2225	void *priv;
2226};
2227
2228static inline int
2229linux_le_cmp(void *priv, const void *d1, const void *d2)
2230{
2231	struct list_head *le1, *le2;
2232	struct list_sort_thunk *thunk;
2233
2234	thunk = priv;
2235	le1 = *(__DECONST(struct list_head **, d1));
2236	le2 = *(__DECONST(struct list_head **, d2));
2237	return ((thunk->cmp)(thunk->priv, le1, le2));
2238}
2239
2240void
2241list_sort(void *priv, struct list_head *head, int (*cmp)(void *priv,
2242    struct list_head *a, struct list_head *b))
2243{
2244	struct list_sort_thunk thunk;
2245	struct list_head **ar, *le;
2246	size_t count, i;
2247
2248	count = 0;
2249	list_for_each(le, head)
2250		count++;
2251	ar = malloc(sizeof(struct list_head *) * count, M_KMALLOC, M_WAITOK);
2252	i = 0;
2253	list_for_each(le, head)
2254		ar[i++] = le;
2255	thunk.cmp = cmp;
2256	thunk.priv = priv;
2257	qsort_r(ar, count, sizeof(struct list_head *), &thunk, linux_le_cmp);
2258	INIT_LIST_HEAD(head);
2259	for (i = 0; i < count; i++)
2260		list_add_tail(ar[i], head);
2261	free(ar, M_KMALLOC);
2262}
2263
2264void
2265linux_irq_handler(void *ent)
2266{
2267	struct irq_ent *irqe;
2268
2269	linux_set_current(curthread);
2270
2271	irqe = ent;
2272	irqe->handler(irqe->irq, irqe->arg);
2273}
2274
2275#if defined(__i386__) || defined(__amd64__)
2276int
2277linux_wbinvd_on_all_cpus(void)
2278{
2279
2280	pmap_invalidate_cache();
2281	return (0);
2282}
2283#endif
2284
2285int
2286linux_on_each_cpu(void callback(void *), void *data)
2287{
2288
2289	smp_rendezvous(smp_no_rendezvous_barrier, callback,
2290	    smp_no_rendezvous_barrier, data);
2291	return (0);
2292}
2293
2294int
2295linux_in_atomic(void)
2296{
2297
2298	return ((curthread->td_pflags & TDP_NOFAULTING) != 0);
2299}
2300
2301struct linux_cdev *
2302linux_find_cdev(const char *name, unsigned major, unsigned minor)
2303{
2304	dev_t dev = MKDEV(major, minor);
2305	struct cdev *cdev;
2306
2307	dev_lock();
2308	LIST_FOREACH(cdev, &linuxcdevsw.d_devs, si_list) {
2309		struct linux_cdev *ldev = cdev->si_drv1;
2310		if (ldev->dev == dev &&
2311		    strcmp(kobject_name(&ldev->kobj), name) == 0) {
2312			break;
2313		}
2314	}
2315	dev_unlock();
2316
2317	return (cdev != NULL ? cdev->si_drv1 : NULL);
2318}
2319
2320int
2321__register_chrdev(unsigned int major, unsigned int baseminor,
2322    unsigned int count, const char *name,
2323    const struct file_operations *fops)
2324{
2325	struct linux_cdev *cdev;
2326	int ret = 0;
2327	int i;
2328
2329	for (i = baseminor; i < baseminor + count; i++) {
2330		cdev = cdev_alloc();
2331		cdev_init(cdev, fops);
2332		kobject_set_name(&cdev->kobj, name);
2333
2334		ret = cdev_add(cdev, makedev(major, i), 1);
2335		if (ret != 0)
2336			break;
2337	}
2338	return (ret);
2339}
2340
2341int
2342__register_chrdev_p(unsigned int major, unsigned int baseminor,
2343    unsigned int count, const char *name,
2344    const struct file_operations *fops, uid_t uid,
2345    gid_t gid, int mode)
2346{
2347	struct linux_cdev *cdev;
2348	int ret = 0;
2349	int i;
2350
2351	for (i = baseminor; i < baseminor + count; i++) {
2352		cdev = cdev_alloc();
2353		cdev_init(cdev, fops);
2354		kobject_set_name(&cdev->kobj, name);
2355
2356		ret = cdev_add_ext(cdev, makedev(major, i), uid, gid, mode);
2357		if (ret != 0)
2358			break;
2359	}
2360	return (ret);
2361}
2362
2363void
2364__unregister_chrdev(unsigned int major, unsigned int baseminor,
2365    unsigned int count, const char *name)
2366{
2367	struct linux_cdev *cdevp;
2368	int i;
2369
2370	for (i = baseminor; i < baseminor + count; i++) {
2371		cdevp = linux_find_cdev(name, major, i);
2372		if (cdevp != NULL)
2373			cdev_del(cdevp);
2374	}
2375}
2376
2377void
2378linux_dump_stack(void)
2379{
2380#ifdef STACK
2381	struct stack st;
2382
2383	stack_zero(&st);
2384	stack_save(&st);
2385	stack_print(&st);
2386#endif
2387}
2388
2389#if defined(__i386__) || defined(__amd64__)
2390bool linux_cpu_has_clflush;
2391#endif
2392
2393static void
2394linux_compat_init(void *arg)
2395{
2396	struct sysctl_oid *rootoid;
2397	int i;
2398
2399#if defined(__i386__) || defined(__amd64__)
2400	linux_cpu_has_clflush = (cpu_feature & CPUID_CLFSH);
2401#endif
2402	rw_init(&linux_vma_lock, "lkpi-vma-lock");
2403
2404	rootoid = SYSCTL_ADD_ROOT_NODE(NULL,
2405	    OID_AUTO, "sys", CTLFLAG_RD|CTLFLAG_MPSAFE, NULL, "sys");
2406	kobject_init(&linux_class_root, &linux_class_ktype);
2407	kobject_set_name(&linux_class_root, "class");
2408	linux_class_root.oidp = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(rootoid),
2409	    OID_AUTO, "class", CTLFLAG_RD|CTLFLAG_MPSAFE, NULL, "class");
2410	kobject_init(&linux_root_device.kobj, &linux_dev_ktype);
2411	kobject_set_name(&linux_root_device.kobj, "device");
2412	linux_root_device.kobj.oidp = SYSCTL_ADD_NODE(NULL,
2413	    SYSCTL_CHILDREN(rootoid), OID_AUTO, "device", CTLFLAG_RD, NULL,
2414	    "device");
2415	linux_root_device.bsddev = root_bus;
2416	linux_class_misc.name = "misc";
2417	class_register(&linux_class_misc);
2418	INIT_LIST_HEAD(&pci_drivers);
2419	INIT_LIST_HEAD(&pci_devices);
2420	spin_lock_init(&pci_lock);
2421	mtx_init(&vmmaplock, "IO Map lock", NULL, MTX_DEF);
2422	for (i = 0; i < VMMAP_HASH_SIZE; i++)
2423		LIST_INIT(&vmmaphead[i]);
2424}
2425SYSINIT(linux_compat, SI_SUB_DRIVERS, SI_ORDER_SECOND, linux_compat_init, NULL);
2426
2427static void
2428linux_compat_uninit(void *arg)
2429{
2430	linux_kobject_kfree_name(&linux_class_root);
2431	linux_kobject_kfree_name(&linux_root_device.kobj);
2432	linux_kobject_kfree_name(&linux_class_misc.kobj);
2433
2434	mtx_destroy(&vmmaplock);
2435	spin_lock_destroy(&pci_lock);
2436	rw_destroy(&linux_vma_lock);
2437}
2438SYSUNINIT(linux_compat, SI_SUB_DRIVERS, SI_ORDER_SECOND, linux_compat_uninit, NULL);
2439
2440/*
2441 * NOTE: Linux frequently uses "unsigned long" for pointer to integer
2442 * conversion and vice versa, where in FreeBSD "uintptr_t" would be
2443 * used. Assert these types have the same size, else some parts of the
2444 * LinuxKPI may not work like expected:
2445 */
2446CTASSERT(sizeof(unsigned long) == sizeof(uintptr_t));
2447