linux_compat.c revision 329971
1/*- 2 * Copyright (c) 2010 Isilon Systems, Inc. 3 * Copyright (c) 2010 iX Systems, Inc. 4 * Copyright (c) 2010 Panasas, Inc. 5 * Copyright (c) 2013-2017 Mellanox Technologies, Ltd. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice unmodified, this list of conditions, and the following 13 * disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 */ 29 30#include <sys/cdefs.h> 31__FBSDID("$FreeBSD: stable/11/sys/compat/linuxkpi/common/src/linux_compat.c 329971 2018-02-25 10:34:47Z hselasky $"); 32 33#include <sys/param.h> 34#include <sys/systm.h> 35#include <sys/malloc.h> 36#include <sys/kernel.h> 37#include <sys/sysctl.h> 38#include <sys/proc.h> 39#include <sys/sglist.h> 40#include <sys/sleepqueue.h> 41#include <sys/lock.h> 42#include <sys/mutex.h> 43#include <sys/bus.h> 44#include <sys/fcntl.h> 45#include <sys/file.h> 46#include <sys/filio.h> 47#include <sys/rwlock.h> 48#include <sys/mman.h> 49 50#include <vm/vm.h> 51#include <vm/pmap.h> 52#include <vm/vm_object.h> 53#include <vm/vm_page.h> 54#include <vm/vm_pager.h> 55 56#include <machine/stdarg.h> 57 58#if defined(__i386__) || defined(__amd64__) 59#include <machine/md_var.h> 60#endif 61 62#include <linux/kobject.h> 63#include <linux/device.h> 64#include <linux/slab.h> 65#include <linux/module.h> 66#include <linux/moduleparam.h> 67#include <linux/cdev.h> 68#include <linux/file.h> 69#include <linux/sysfs.h> 70#include <linux/mm.h> 71#include <linux/io.h> 72#include <linux/vmalloc.h> 73#include <linux/netdevice.h> 74#include <linux/timer.h> 75#include <linux/interrupt.h> 76#include <linux/uaccess.h> 77#include <linux/list.h> 78#include <linux/kthread.h> 79#include <linux/kernel.h> 80#include <linux/compat.h> 81#include <linux/poll.h> 82#include <linux/smp.h> 83 84#if defined(__i386__) || defined(__amd64__) 85#include <asm/smp.h> 86#endif 87 88SYSCTL_NODE(_compat, OID_AUTO, linuxkpi, CTLFLAG_RW, 0, "LinuxKPI parameters"); 89 90MALLOC_DEFINE(M_KMALLOC, "linux", "Linux kmalloc compat"); 91 92#include <linux/rbtree.h> 93/* Undo Linux compat changes. */ 94#undef RB_ROOT 95#undef file 96#undef cdev 97#define RB_ROOT(head) (head)->rbh_root 98 99static struct vm_area_struct *linux_cdev_handle_find(void *handle); 100 101struct kobject linux_class_root; 102struct device linux_root_device; 103struct class linux_class_misc; 104struct list_head pci_drivers; 105struct list_head pci_devices; 106spinlock_t pci_lock; 107 108unsigned long linux_timer_hz_mask; 109 110int 111panic_cmp(struct rb_node *one, struct rb_node *two) 112{ 113 panic("no cmp"); 114} 115 116RB_GENERATE(linux_root, rb_node, __entry, panic_cmp); 117 118int 119kobject_set_name_vargs(struct kobject *kobj, const char *fmt, va_list args) 120{ 121 va_list tmp_va; 122 int len; 123 char *old; 124 char *name; 125 char dummy; 126 127 old = kobj->name; 128 129 if (old && fmt == NULL) 130 return (0); 131 132 /* compute length of string */ 133 va_copy(tmp_va, args); 134 len = vsnprintf(&dummy, 0, fmt, tmp_va); 135 va_end(tmp_va); 136 137 /* account for zero termination */ 138 len++; 139 140 /* check for error */ 141 if (len < 1) 142 return (-EINVAL); 143 144 /* allocate memory for string */ 145 name = kzalloc(len, GFP_KERNEL); 146 if (name == NULL) 147 return (-ENOMEM); 148 vsnprintf(name, len, fmt, args); 149 kobj->name = name; 150 151 /* free old string */ 152 kfree(old); 153 154 /* filter new string */ 155 for (; *name != '\0'; name++) 156 if (*name == '/') 157 *name = '!'; 158 return (0); 159} 160 161int 162kobject_set_name(struct kobject *kobj, const char *fmt, ...) 163{ 164 va_list args; 165 int error; 166 167 va_start(args, fmt); 168 error = kobject_set_name_vargs(kobj, fmt, args); 169 va_end(args); 170 171 return (error); 172} 173 174static int 175kobject_add_complete(struct kobject *kobj, struct kobject *parent) 176{ 177 const struct kobj_type *t; 178 int error; 179 180 kobj->parent = parent; 181 error = sysfs_create_dir(kobj); 182 if (error == 0 && kobj->ktype && kobj->ktype->default_attrs) { 183 struct attribute **attr; 184 t = kobj->ktype; 185 186 for (attr = t->default_attrs; *attr != NULL; attr++) { 187 error = sysfs_create_file(kobj, *attr); 188 if (error) 189 break; 190 } 191 if (error) 192 sysfs_remove_dir(kobj); 193 194 } 195 return (error); 196} 197 198int 199kobject_add(struct kobject *kobj, struct kobject *parent, const char *fmt, ...) 200{ 201 va_list args; 202 int error; 203 204 va_start(args, fmt); 205 error = kobject_set_name_vargs(kobj, fmt, args); 206 va_end(args); 207 if (error) 208 return (error); 209 210 return kobject_add_complete(kobj, parent); 211} 212 213void 214linux_kobject_release(struct kref *kref) 215{ 216 struct kobject *kobj; 217 char *name; 218 219 kobj = container_of(kref, struct kobject, kref); 220 sysfs_remove_dir(kobj); 221 name = kobj->name; 222 if (kobj->ktype && kobj->ktype->release) 223 kobj->ktype->release(kobj); 224 kfree(name); 225} 226 227static void 228linux_kobject_kfree(struct kobject *kobj) 229{ 230 kfree(kobj); 231} 232 233static void 234linux_kobject_kfree_name(struct kobject *kobj) 235{ 236 if (kobj) { 237 kfree(kobj->name); 238 } 239} 240 241const struct kobj_type linux_kfree_type = { 242 .release = linux_kobject_kfree 243}; 244 245static void 246linux_device_release(struct device *dev) 247{ 248 pr_debug("linux_device_release: %s\n", dev_name(dev)); 249 kfree(dev); 250} 251 252static ssize_t 253linux_class_show(struct kobject *kobj, struct attribute *attr, char *buf) 254{ 255 struct class_attribute *dattr; 256 ssize_t error; 257 258 dattr = container_of(attr, struct class_attribute, attr); 259 error = -EIO; 260 if (dattr->show) 261 error = dattr->show(container_of(kobj, struct class, kobj), 262 dattr, buf); 263 return (error); 264} 265 266static ssize_t 267linux_class_store(struct kobject *kobj, struct attribute *attr, const char *buf, 268 size_t count) 269{ 270 struct class_attribute *dattr; 271 ssize_t error; 272 273 dattr = container_of(attr, struct class_attribute, attr); 274 error = -EIO; 275 if (dattr->store) 276 error = dattr->store(container_of(kobj, struct class, kobj), 277 dattr, buf, count); 278 return (error); 279} 280 281static void 282linux_class_release(struct kobject *kobj) 283{ 284 struct class *class; 285 286 class = container_of(kobj, struct class, kobj); 287 if (class->class_release) 288 class->class_release(class); 289} 290 291static const struct sysfs_ops linux_class_sysfs = { 292 .show = linux_class_show, 293 .store = linux_class_store, 294}; 295 296const struct kobj_type linux_class_ktype = { 297 .release = linux_class_release, 298 .sysfs_ops = &linux_class_sysfs 299}; 300 301static void 302linux_dev_release(struct kobject *kobj) 303{ 304 struct device *dev; 305 306 dev = container_of(kobj, struct device, kobj); 307 /* This is the precedence defined by linux. */ 308 if (dev->release) 309 dev->release(dev); 310 else if (dev->class && dev->class->dev_release) 311 dev->class->dev_release(dev); 312} 313 314static ssize_t 315linux_dev_show(struct kobject *kobj, struct attribute *attr, char *buf) 316{ 317 struct device_attribute *dattr; 318 ssize_t error; 319 320 dattr = container_of(attr, struct device_attribute, attr); 321 error = -EIO; 322 if (dattr->show) 323 error = dattr->show(container_of(kobj, struct device, kobj), 324 dattr, buf); 325 return (error); 326} 327 328static ssize_t 329linux_dev_store(struct kobject *kobj, struct attribute *attr, const char *buf, 330 size_t count) 331{ 332 struct device_attribute *dattr; 333 ssize_t error; 334 335 dattr = container_of(attr, struct device_attribute, attr); 336 error = -EIO; 337 if (dattr->store) 338 error = dattr->store(container_of(kobj, struct device, kobj), 339 dattr, buf, count); 340 return (error); 341} 342 343static const struct sysfs_ops linux_dev_sysfs = { 344 .show = linux_dev_show, 345 .store = linux_dev_store, 346}; 347 348const struct kobj_type linux_dev_ktype = { 349 .release = linux_dev_release, 350 .sysfs_ops = &linux_dev_sysfs 351}; 352 353struct device * 354device_create(struct class *class, struct device *parent, dev_t devt, 355 void *drvdata, const char *fmt, ...) 356{ 357 struct device *dev; 358 va_list args; 359 360 dev = kzalloc(sizeof(*dev), M_WAITOK); 361 dev->parent = parent; 362 dev->class = class; 363 dev->devt = devt; 364 dev->driver_data = drvdata; 365 dev->release = linux_device_release; 366 va_start(args, fmt); 367 kobject_set_name_vargs(&dev->kobj, fmt, args); 368 va_end(args); 369 device_register(dev); 370 371 return (dev); 372} 373 374int 375kobject_init_and_add(struct kobject *kobj, const struct kobj_type *ktype, 376 struct kobject *parent, const char *fmt, ...) 377{ 378 va_list args; 379 int error; 380 381 kobject_init(kobj, ktype); 382 kobj->ktype = ktype; 383 kobj->parent = parent; 384 kobj->name = NULL; 385 386 va_start(args, fmt); 387 error = kobject_set_name_vargs(kobj, fmt, args); 388 va_end(args); 389 if (error) 390 return (error); 391 return kobject_add_complete(kobj, parent); 392} 393 394static void 395linux_kq_lock(void *arg) 396{ 397 spinlock_t *s = arg; 398 399 spin_lock(s); 400} 401static void 402linux_kq_unlock(void *arg) 403{ 404 spinlock_t *s = arg; 405 406 spin_unlock(s); 407} 408 409static void 410linux_kq_lock_owned(void *arg) 411{ 412#ifdef INVARIANTS 413 spinlock_t *s = arg; 414 415 mtx_assert(&s->m, MA_OWNED); 416#endif 417} 418 419static void 420linux_kq_lock_unowned(void *arg) 421{ 422#ifdef INVARIANTS 423 spinlock_t *s = arg; 424 425 mtx_assert(&s->m, MA_NOTOWNED); 426#endif 427} 428 429static void 430linux_file_kqfilter_poll(struct linux_file *, int); 431 432struct linux_file * 433linux_file_alloc(void) 434{ 435 struct linux_file *filp; 436 437 filp = kzalloc(sizeof(*filp), GFP_KERNEL); 438 439 /* set initial refcount */ 440 filp->f_count = 1; 441 442 /* setup fields needed by kqueue support */ 443 spin_lock_init(&filp->f_kqlock); 444 knlist_init(&filp->f_selinfo.si_note, &filp->f_kqlock, 445 linux_kq_lock, linux_kq_unlock, 446 linux_kq_lock_owned, linux_kq_lock_unowned); 447 448 return (filp); 449} 450 451void 452linux_file_free(struct linux_file *filp) 453{ 454 if (filp->_file == NULL) { 455 if (filp->f_shmem != NULL) 456 vm_object_deallocate(filp->f_shmem); 457 kfree(filp); 458 } else { 459 /* 460 * The close method of the character device or file 461 * will free the linux_file structure: 462 */ 463 _fdrop(filp->_file, curthread); 464 } 465} 466 467static int 468linux_cdev_pager_fault(vm_object_t vm_obj, vm_ooffset_t offset, int prot, 469 vm_page_t *mres) 470{ 471 struct vm_area_struct *vmap; 472 473 vmap = linux_cdev_handle_find(vm_obj->handle); 474 475 MPASS(vmap != NULL); 476 MPASS(vmap->vm_private_data == vm_obj->handle); 477 478 if (likely(vmap->vm_ops != NULL && offset < vmap->vm_len)) { 479 vm_paddr_t paddr = IDX_TO_OFF(vmap->vm_pfn) + offset; 480 vm_page_t page; 481 482 if (((*mres)->flags & PG_FICTITIOUS) != 0) { 483 /* 484 * If the passed in result page is a fake 485 * page, update it with the new physical 486 * address. 487 */ 488 page = *mres; 489 vm_page_updatefake(page, paddr, vm_obj->memattr); 490 } else { 491 /* 492 * Replace the passed in "mres" page with our 493 * own fake page and free up the all of the 494 * original pages. 495 */ 496 VM_OBJECT_WUNLOCK(vm_obj); 497 page = vm_page_getfake(paddr, vm_obj->memattr); 498 VM_OBJECT_WLOCK(vm_obj); 499 500 vm_page_replace_checked(page, vm_obj, 501 (*mres)->pindex, *mres); 502 503 vm_page_lock(*mres); 504 vm_page_free(*mres); 505 vm_page_unlock(*mres); 506 *mres = page; 507 } 508 page->valid = VM_PAGE_BITS_ALL; 509 return (VM_PAGER_OK); 510 } 511 return (VM_PAGER_FAIL); 512} 513 514static int 515linux_cdev_pager_populate(vm_object_t vm_obj, vm_pindex_t pidx, int fault_type, 516 vm_prot_t max_prot, vm_pindex_t *first, vm_pindex_t *last) 517{ 518 struct vm_area_struct *vmap; 519 int err; 520 521 linux_set_current(curthread); 522 523 /* get VM area structure */ 524 vmap = linux_cdev_handle_find(vm_obj->handle); 525 MPASS(vmap != NULL); 526 MPASS(vmap->vm_private_data == vm_obj->handle); 527 528 VM_OBJECT_WUNLOCK(vm_obj); 529 530 down_write(&vmap->vm_mm->mmap_sem); 531 if (unlikely(vmap->vm_ops == NULL)) { 532 err = VM_FAULT_SIGBUS; 533 } else { 534 struct vm_fault vmf; 535 536 /* fill out VM fault structure */ 537 vmf.virtual_address = (void *)((uintptr_t)pidx << PAGE_SHIFT); 538 vmf.flags = (fault_type & VM_PROT_WRITE) ? FAULT_FLAG_WRITE : 0; 539 vmf.pgoff = 0; 540 vmf.page = NULL; 541 vmf.vma = vmap; 542 543 vmap->vm_pfn_count = 0; 544 vmap->vm_pfn_pcount = &vmap->vm_pfn_count; 545 vmap->vm_obj = vm_obj; 546 547 err = vmap->vm_ops->fault(vmap, &vmf); 548 549 while (vmap->vm_pfn_count == 0 && err == VM_FAULT_NOPAGE) { 550 kern_yield(PRI_USER); 551 err = vmap->vm_ops->fault(vmap, &vmf); 552 } 553 } 554 555 /* translate return code */ 556 switch (err) { 557 case VM_FAULT_OOM: 558 err = VM_PAGER_AGAIN; 559 break; 560 case VM_FAULT_SIGBUS: 561 err = VM_PAGER_BAD; 562 break; 563 case VM_FAULT_NOPAGE: 564 /* 565 * By contract the fault handler will return having 566 * busied all the pages itself. If pidx is already 567 * found in the object, it will simply xbusy the first 568 * page and return with vm_pfn_count set to 1. 569 */ 570 *first = vmap->vm_pfn_first; 571 *last = *first + vmap->vm_pfn_count - 1; 572 err = VM_PAGER_OK; 573 break; 574 default: 575 err = VM_PAGER_ERROR; 576 break; 577 } 578 up_write(&vmap->vm_mm->mmap_sem); 579 VM_OBJECT_WLOCK(vm_obj); 580 return (err); 581} 582 583static struct rwlock linux_vma_lock; 584static TAILQ_HEAD(, vm_area_struct) linux_vma_head = 585 TAILQ_HEAD_INITIALIZER(linux_vma_head); 586 587static void 588linux_cdev_handle_free(struct vm_area_struct *vmap) 589{ 590 /* Drop reference on vm_file */ 591 if (vmap->vm_file != NULL) 592 fput(vmap->vm_file); 593 594 /* Drop reference on mm_struct */ 595 mmput(vmap->vm_mm); 596 597 kfree(vmap); 598} 599 600static void 601linux_cdev_handle_remove(struct vm_area_struct *vmap) 602{ 603 rw_wlock(&linux_vma_lock); 604 TAILQ_REMOVE(&linux_vma_head, vmap, vm_entry); 605 rw_wunlock(&linux_vma_lock); 606} 607 608static struct vm_area_struct * 609linux_cdev_handle_find(void *handle) 610{ 611 struct vm_area_struct *vmap; 612 613 rw_rlock(&linux_vma_lock); 614 TAILQ_FOREACH(vmap, &linux_vma_head, vm_entry) { 615 if (vmap->vm_private_data == handle) 616 break; 617 } 618 rw_runlock(&linux_vma_lock); 619 return (vmap); 620} 621 622static int 623linux_cdev_pager_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot, 624 vm_ooffset_t foff, struct ucred *cred, u_short *color) 625{ 626 627 MPASS(linux_cdev_handle_find(handle) != NULL); 628 *color = 0; 629 return (0); 630} 631 632static void 633linux_cdev_pager_dtor(void *handle) 634{ 635 const struct vm_operations_struct *vm_ops; 636 struct vm_area_struct *vmap; 637 638 vmap = linux_cdev_handle_find(handle); 639 MPASS(vmap != NULL); 640 641 /* 642 * Remove handle before calling close operation to prevent 643 * other threads from reusing the handle pointer. 644 */ 645 linux_cdev_handle_remove(vmap); 646 647 down_write(&vmap->vm_mm->mmap_sem); 648 vm_ops = vmap->vm_ops; 649 if (likely(vm_ops != NULL)) 650 vm_ops->close(vmap); 651 up_write(&vmap->vm_mm->mmap_sem); 652 653 linux_cdev_handle_free(vmap); 654} 655 656static struct cdev_pager_ops linux_cdev_pager_ops[2] = { 657 { 658 /* OBJT_MGTDEVICE */ 659 .cdev_pg_populate = linux_cdev_pager_populate, 660 .cdev_pg_ctor = linux_cdev_pager_ctor, 661 .cdev_pg_dtor = linux_cdev_pager_dtor 662 }, 663 { 664 /* OBJT_DEVICE */ 665 .cdev_pg_fault = linux_cdev_pager_fault, 666 .cdev_pg_ctor = linux_cdev_pager_ctor, 667 .cdev_pg_dtor = linux_cdev_pager_dtor 668 }, 669}; 670 671#define OPW(fp,td,code) ({ \ 672 struct file *__fpop; \ 673 __typeof(code) __retval; \ 674 \ 675 __fpop = (td)->td_fpop; \ 676 (td)->td_fpop = (fp); \ 677 __retval = (code); \ 678 (td)->td_fpop = __fpop; \ 679 __retval; \ 680}) 681 682static int 683linux_dev_fdopen(struct cdev *dev, int fflags, struct thread *td, struct file *file) 684{ 685 struct linux_cdev *ldev; 686 struct linux_file *filp; 687 int error; 688 689 ldev = dev->si_drv1; 690 691 filp = linux_file_alloc(); 692 filp->f_dentry = &filp->f_dentry_store; 693 filp->f_op = ldev->ops; 694 filp->f_mode = file->f_flag; 695 filp->f_flags = file->f_flag; 696 filp->f_vnode = file->f_vnode; 697 filp->_file = file; 698 699 linux_set_current(td); 700 701 if (filp->f_op->open) { 702 error = -filp->f_op->open(file->f_vnode, filp); 703 if (error) { 704 kfree(filp); 705 return (error); 706 } 707 } 708 709 /* hold on to the vnode - used for fstat() */ 710 vhold(filp->f_vnode); 711 712 /* release the file from devfs */ 713 finit(file, filp->f_mode, DTYPE_DEV, filp, &linuxfileops); 714 return (ENXIO); 715} 716 717#define LINUX_IOCTL_MIN_PTR 0x10000UL 718#define LINUX_IOCTL_MAX_PTR (LINUX_IOCTL_MIN_PTR + IOCPARM_MAX) 719 720static inline int 721linux_remap_address(void **uaddr, size_t len) 722{ 723 uintptr_t uaddr_val = (uintptr_t)(*uaddr); 724 725 if (unlikely(uaddr_val >= LINUX_IOCTL_MIN_PTR && 726 uaddr_val < LINUX_IOCTL_MAX_PTR)) { 727 struct task_struct *pts = current; 728 if (pts == NULL) { 729 *uaddr = NULL; 730 return (1); 731 } 732 733 /* compute data offset */ 734 uaddr_val -= LINUX_IOCTL_MIN_PTR; 735 736 /* check that length is within bounds */ 737 if ((len > IOCPARM_MAX) || 738 (uaddr_val + len) > pts->bsd_ioctl_len) { 739 *uaddr = NULL; 740 return (1); 741 } 742 743 /* re-add kernel buffer address */ 744 uaddr_val += (uintptr_t)pts->bsd_ioctl_data; 745 746 /* update address location */ 747 *uaddr = (void *)uaddr_val; 748 return (1); 749 } 750 return (0); 751} 752 753int 754linux_copyin(const void *uaddr, void *kaddr, size_t len) 755{ 756 if (linux_remap_address(__DECONST(void **, &uaddr), len)) { 757 if (uaddr == NULL) 758 return (-EFAULT); 759 memcpy(kaddr, uaddr, len); 760 return (0); 761 } 762 return (-copyin(uaddr, kaddr, len)); 763} 764 765int 766linux_copyout(const void *kaddr, void *uaddr, size_t len) 767{ 768 if (linux_remap_address(&uaddr, len)) { 769 if (uaddr == NULL) 770 return (-EFAULT); 771 memcpy(uaddr, kaddr, len); 772 return (0); 773 } 774 return (-copyout(kaddr, uaddr, len)); 775} 776 777size_t 778linux_clear_user(void *_uaddr, size_t _len) 779{ 780 uint8_t *uaddr = _uaddr; 781 size_t len = _len; 782 783 /* make sure uaddr is aligned before going into the fast loop */ 784 while (((uintptr_t)uaddr & 7) != 0 && len > 7) { 785 if (subyte(uaddr, 0)) 786 return (_len); 787 uaddr++; 788 len--; 789 } 790 791 /* zero 8 bytes at a time */ 792 while (len > 7) { 793#ifdef __LP64__ 794 if (suword64(uaddr, 0)) 795 return (_len); 796#else 797 if (suword32(uaddr, 0)) 798 return (_len); 799 if (suword32(uaddr + 4, 0)) 800 return (_len); 801#endif 802 uaddr += 8; 803 len -= 8; 804 } 805 806 /* zero fill end, if any */ 807 while (len > 0) { 808 if (subyte(uaddr, 0)) 809 return (_len); 810 uaddr++; 811 len--; 812 } 813 return (0); 814} 815 816int 817linux_access_ok(int rw, const void *uaddr, size_t len) 818{ 819 uintptr_t saddr; 820 uintptr_t eaddr; 821 822 /* get start and end address */ 823 saddr = (uintptr_t)uaddr; 824 eaddr = (uintptr_t)uaddr + len; 825 826 /* verify addresses are valid for userspace */ 827 return ((saddr == eaddr) || 828 (eaddr > saddr && eaddr <= VM_MAXUSER_ADDRESS)); 829} 830 831static int 832linux_file_ioctl_sub(struct file *fp, struct linux_file *filp, 833 u_long cmd, caddr_t data, struct thread *td) 834{ 835 unsigned size; 836 int error; 837 838 size = IOCPARM_LEN(cmd); 839 /* refer to logic in sys_ioctl() */ 840 if (size > 0) { 841 /* 842 * Setup hint for linux_copyin() and linux_copyout(). 843 * 844 * Background: Linux code expects a user-space address 845 * while FreeBSD supplies a kernel-space address. 846 */ 847 current->bsd_ioctl_data = data; 848 current->bsd_ioctl_len = size; 849 data = (void *)LINUX_IOCTL_MIN_PTR; 850 } else { 851 /* fetch user-space pointer */ 852 data = *(void **)data; 853 } 854#if defined(__amd64__) 855 if (td->td_proc->p_elf_machine == EM_386) { 856 /* try the compat IOCTL handler first */ 857 if (filp->f_op->compat_ioctl != NULL) 858 error = -OPW(fp, td, filp->f_op->compat_ioctl(filp, cmd, (u_long)data)); 859 else 860 error = ENOTTY; 861 862 /* fallback to the regular IOCTL handler, if any */ 863 if (error == ENOTTY && filp->f_op->unlocked_ioctl != NULL) 864 error = -OPW(fp, td, filp->f_op->unlocked_ioctl(filp, cmd, (u_long)data)); 865 } else 866#endif 867 if (filp->f_op->unlocked_ioctl != NULL) 868 error = -OPW(fp, td, filp->f_op->unlocked_ioctl(filp, cmd, (u_long)data)); 869 else 870 error = ENOTTY; 871 if (size > 0) { 872 current->bsd_ioctl_data = NULL; 873 current->bsd_ioctl_len = 0; 874 } 875 876 if (error == EWOULDBLOCK) { 877 /* update kqfilter status, if any */ 878 linux_file_kqfilter_poll(filp, 879 LINUX_KQ_FLAG_HAS_READ | LINUX_KQ_FLAG_HAS_WRITE); 880 } else if (error == ERESTARTSYS) 881 error = ERESTART; 882 return (error); 883} 884 885#define LINUX_POLL_TABLE_NORMAL ((poll_table *)1) 886 887/* 888 * This function atomically updates the poll wakeup state and returns 889 * the previous state at the time of update. 890 */ 891static uint8_t 892linux_poll_wakeup_state(atomic_t *v, const uint8_t *pstate) 893{ 894 int c, old; 895 896 c = v->counter; 897 898 while ((old = atomic_cmpxchg(v, c, pstate[c])) != c) 899 c = old; 900 901 return (c); 902} 903 904 905static int 906linux_poll_wakeup_callback(wait_queue_t *wq, unsigned int wq_state, int flags, void *key) 907{ 908 static const uint8_t state[LINUX_FWQ_STATE_MAX] = { 909 [LINUX_FWQ_STATE_INIT] = LINUX_FWQ_STATE_INIT, /* NOP */ 910 [LINUX_FWQ_STATE_NOT_READY] = LINUX_FWQ_STATE_NOT_READY, /* NOP */ 911 [LINUX_FWQ_STATE_QUEUED] = LINUX_FWQ_STATE_READY, 912 [LINUX_FWQ_STATE_READY] = LINUX_FWQ_STATE_READY, /* NOP */ 913 }; 914 struct linux_file *filp = container_of(wq, struct linux_file, f_wait_queue.wq); 915 916 switch (linux_poll_wakeup_state(&filp->f_wait_queue.state, state)) { 917 case LINUX_FWQ_STATE_QUEUED: 918 linux_poll_wakeup(filp); 919 return (1); 920 default: 921 return (0); 922 } 923} 924 925void 926linux_poll_wait(struct linux_file *filp, wait_queue_head_t *wqh, poll_table *p) 927{ 928 static const uint8_t state[LINUX_FWQ_STATE_MAX] = { 929 [LINUX_FWQ_STATE_INIT] = LINUX_FWQ_STATE_NOT_READY, 930 [LINUX_FWQ_STATE_NOT_READY] = LINUX_FWQ_STATE_NOT_READY, /* NOP */ 931 [LINUX_FWQ_STATE_QUEUED] = LINUX_FWQ_STATE_QUEUED, /* NOP */ 932 [LINUX_FWQ_STATE_READY] = LINUX_FWQ_STATE_QUEUED, 933 }; 934 935 /* check if we are called inside the select system call */ 936 if (p == LINUX_POLL_TABLE_NORMAL) 937 selrecord(curthread, &filp->f_selinfo); 938 939 switch (linux_poll_wakeup_state(&filp->f_wait_queue.state, state)) { 940 case LINUX_FWQ_STATE_INIT: 941 /* NOTE: file handles can only belong to one wait-queue */ 942 filp->f_wait_queue.wqh = wqh; 943 filp->f_wait_queue.wq.func = &linux_poll_wakeup_callback; 944 add_wait_queue(wqh, &filp->f_wait_queue.wq); 945 atomic_set(&filp->f_wait_queue.state, LINUX_FWQ_STATE_QUEUED); 946 break; 947 default: 948 break; 949 } 950} 951 952static void 953linux_poll_wait_dequeue(struct linux_file *filp) 954{ 955 static const uint8_t state[LINUX_FWQ_STATE_MAX] = { 956 [LINUX_FWQ_STATE_INIT] = LINUX_FWQ_STATE_INIT, /* NOP */ 957 [LINUX_FWQ_STATE_NOT_READY] = LINUX_FWQ_STATE_INIT, 958 [LINUX_FWQ_STATE_QUEUED] = LINUX_FWQ_STATE_INIT, 959 [LINUX_FWQ_STATE_READY] = LINUX_FWQ_STATE_INIT, 960 }; 961 962 seldrain(&filp->f_selinfo); 963 964 switch (linux_poll_wakeup_state(&filp->f_wait_queue.state, state)) { 965 case LINUX_FWQ_STATE_NOT_READY: 966 case LINUX_FWQ_STATE_QUEUED: 967 case LINUX_FWQ_STATE_READY: 968 remove_wait_queue(filp->f_wait_queue.wqh, &filp->f_wait_queue.wq); 969 break; 970 default: 971 break; 972 } 973} 974 975void 976linux_poll_wakeup(struct linux_file *filp) 977{ 978 /* this function should be NULL-safe */ 979 if (filp == NULL) 980 return; 981 982 selwakeup(&filp->f_selinfo); 983 984 spin_lock(&filp->f_kqlock); 985 filp->f_kqflags |= LINUX_KQ_FLAG_NEED_READ | 986 LINUX_KQ_FLAG_NEED_WRITE; 987 988 /* make sure the "knote" gets woken up */ 989 KNOTE_LOCKED(&filp->f_selinfo.si_note, 1); 990 spin_unlock(&filp->f_kqlock); 991} 992 993static void 994linux_file_kqfilter_detach(struct knote *kn) 995{ 996 struct linux_file *filp = kn->kn_hook; 997 998 spin_lock(&filp->f_kqlock); 999 knlist_remove(&filp->f_selinfo.si_note, kn, 1); 1000 spin_unlock(&filp->f_kqlock); 1001} 1002 1003static int 1004linux_file_kqfilter_read_event(struct knote *kn, long hint) 1005{ 1006 struct linux_file *filp = kn->kn_hook; 1007 1008 mtx_assert(&filp->f_kqlock.m, MA_OWNED); 1009 1010 return ((filp->f_kqflags & LINUX_KQ_FLAG_NEED_READ) ? 1 : 0); 1011} 1012 1013static int 1014linux_file_kqfilter_write_event(struct knote *kn, long hint) 1015{ 1016 struct linux_file *filp = kn->kn_hook; 1017 1018 mtx_assert(&filp->f_kqlock.m, MA_OWNED); 1019 1020 return ((filp->f_kqflags & LINUX_KQ_FLAG_NEED_WRITE) ? 1 : 0); 1021} 1022 1023static struct filterops linux_dev_kqfiltops_read = { 1024 .f_isfd = 1, 1025 .f_detach = linux_file_kqfilter_detach, 1026 .f_event = linux_file_kqfilter_read_event, 1027}; 1028 1029static struct filterops linux_dev_kqfiltops_write = { 1030 .f_isfd = 1, 1031 .f_detach = linux_file_kqfilter_detach, 1032 .f_event = linux_file_kqfilter_write_event, 1033}; 1034 1035static void 1036linux_file_kqfilter_poll(struct linux_file *filp, int kqflags) 1037{ 1038 int temp; 1039 1040 if (filp->f_kqflags & kqflags) { 1041 struct thread *td = curthread; 1042 1043 /* get the latest polling state */ 1044 temp = OPW(filp->_file, td, filp->f_op->poll(filp, NULL)); 1045 1046 spin_lock(&filp->f_kqlock); 1047 /* clear kqflags */ 1048 filp->f_kqflags &= ~(LINUX_KQ_FLAG_NEED_READ | 1049 LINUX_KQ_FLAG_NEED_WRITE); 1050 /* update kqflags */ 1051 if (temp & (POLLIN | POLLOUT)) { 1052 if (temp & POLLIN) 1053 filp->f_kqflags |= LINUX_KQ_FLAG_NEED_READ; 1054 if (temp & POLLOUT) 1055 filp->f_kqflags |= LINUX_KQ_FLAG_NEED_WRITE; 1056 1057 /* make sure the "knote" gets woken up */ 1058 KNOTE_LOCKED(&filp->f_selinfo.si_note, 0); 1059 } 1060 spin_unlock(&filp->f_kqlock); 1061 } 1062} 1063 1064static int 1065linux_file_kqfilter(struct file *file, struct knote *kn) 1066{ 1067 struct linux_file *filp; 1068 struct thread *td; 1069 int error; 1070 1071 td = curthread; 1072 filp = (struct linux_file *)file->f_data; 1073 filp->f_flags = file->f_flag; 1074 if (filp->f_op->poll == NULL) 1075 return (EINVAL); 1076 1077 spin_lock(&filp->f_kqlock); 1078 switch (kn->kn_filter) { 1079 case EVFILT_READ: 1080 filp->f_kqflags |= LINUX_KQ_FLAG_HAS_READ; 1081 kn->kn_fop = &linux_dev_kqfiltops_read; 1082 kn->kn_hook = filp; 1083 knlist_add(&filp->f_selinfo.si_note, kn, 1); 1084 error = 0; 1085 break; 1086 case EVFILT_WRITE: 1087 filp->f_kqflags |= LINUX_KQ_FLAG_HAS_WRITE; 1088 kn->kn_fop = &linux_dev_kqfiltops_write; 1089 kn->kn_hook = filp; 1090 knlist_add(&filp->f_selinfo.si_note, kn, 1); 1091 error = 0; 1092 break; 1093 default: 1094 error = EINVAL; 1095 break; 1096 } 1097 spin_unlock(&filp->f_kqlock); 1098 1099 if (error == 0) { 1100 linux_set_current(td); 1101 1102 /* update kqfilter status, if any */ 1103 linux_file_kqfilter_poll(filp, 1104 LINUX_KQ_FLAG_HAS_READ | LINUX_KQ_FLAG_HAS_WRITE); 1105 } 1106 return (error); 1107} 1108 1109static int 1110linux_file_mmap_single(struct file *fp, vm_ooffset_t *offset, 1111 vm_size_t size, struct vm_object **object, int nprot, 1112 struct thread *td) 1113{ 1114 struct vm_area_struct *vmap; 1115 struct mm_struct *mm; 1116 struct linux_file *filp; 1117 vm_memattr_t attr; 1118 int error; 1119 1120 filp = (struct linux_file *)fp->f_data; 1121 filp->f_flags = fp->f_flag; 1122 1123 if (filp->f_op->mmap == NULL) 1124 return (EOPNOTSUPP); 1125 1126 linux_set_current(td); 1127 1128 /* 1129 * The same VM object might be shared by multiple processes 1130 * and the mm_struct is usually freed when a process exits. 1131 * 1132 * The atomic reference below makes sure the mm_struct is 1133 * available as long as the vmap is in the linux_vma_head. 1134 */ 1135 mm = current->mm; 1136 if (atomic_inc_not_zero(&mm->mm_users) == 0) 1137 return (EINVAL); 1138 1139 vmap = kzalloc(sizeof(*vmap), GFP_KERNEL); 1140 vmap->vm_start = 0; 1141 vmap->vm_end = size; 1142 vmap->vm_pgoff = *offset / PAGE_SIZE; 1143 vmap->vm_pfn = 0; 1144 vmap->vm_flags = vmap->vm_page_prot = (nprot & VM_PROT_ALL); 1145 vmap->vm_ops = NULL; 1146 vmap->vm_file = get_file(filp); 1147 vmap->vm_mm = mm; 1148 1149 if (unlikely(down_write_killable(&vmap->vm_mm->mmap_sem))) { 1150 error = EINTR; 1151 } else { 1152 error = -OPW(fp, td, filp->f_op->mmap(filp, vmap)); 1153 if (error == ERESTARTSYS) 1154 error = ERESTART; 1155 up_write(&vmap->vm_mm->mmap_sem); 1156 } 1157 1158 if (error != 0) { 1159 linux_cdev_handle_free(vmap); 1160 return (error); 1161 } 1162 1163 attr = pgprot2cachemode(vmap->vm_page_prot); 1164 1165 if (vmap->vm_ops != NULL) { 1166 struct vm_area_struct *ptr; 1167 void *vm_private_data; 1168 bool vm_no_fault; 1169 1170 if (vmap->vm_ops->open == NULL || 1171 vmap->vm_ops->close == NULL || 1172 vmap->vm_private_data == NULL) { 1173 /* free allocated VM area struct */ 1174 linux_cdev_handle_free(vmap); 1175 return (EINVAL); 1176 } 1177 1178 vm_private_data = vmap->vm_private_data; 1179 1180 rw_wlock(&linux_vma_lock); 1181 TAILQ_FOREACH(ptr, &linux_vma_head, vm_entry) { 1182 if (ptr->vm_private_data == vm_private_data) 1183 break; 1184 } 1185 /* check if there is an existing VM area struct */ 1186 if (ptr != NULL) { 1187 /* check if the VM area structure is invalid */ 1188 if (ptr->vm_ops == NULL || 1189 ptr->vm_ops->open == NULL || 1190 ptr->vm_ops->close == NULL) { 1191 error = ESTALE; 1192 vm_no_fault = 1; 1193 } else { 1194 error = EEXIST; 1195 vm_no_fault = (ptr->vm_ops->fault == NULL); 1196 } 1197 } else { 1198 /* insert VM area structure into list */ 1199 TAILQ_INSERT_TAIL(&linux_vma_head, vmap, vm_entry); 1200 error = 0; 1201 vm_no_fault = (vmap->vm_ops->fault == NULL); 1202 } 1203 rw_wunlock(&linux_vma_lock); 1204 1205 if (error != 0) { 1206 /* free allocated VM area struct */ 1207 linux_cdev_handle_free(vmap); 1208 /* check for stale VM area struct */ 1209 if (error != EEXIST) 1210 return (error); 1211 } 1212 1213 /* check if there is no fault handler */ 1214 if (vm_no_fault) { 1215 *object = cdev_pager_allocate(vm_private_data, OBJT_DEVICE, 1216 &linux_cdev_pager_ops[1], size, nprot, *offset, 1217 td->td_ucred); 1218 } else { 1219 *object = cdev_pager_allocate(vm_private_data, OBJT_MGTDEVICE, 1220 &linux_cdev_pager_ops[0], size, nprot, *offset, 1221 td->td_ucred); 1222 } 1223 1224 /* check if allocating the VM object failed */ 1225 if (*object == NULL) { 1226 if (error == 0) { 1227 /* remove VM area struct from list */ 1228 linux_cdev_handle_remove(vmap); 1229 /* free allocated VM area struct */ 1230 linux_cdev_handle_free(vmap); 1231 } 1232 return (EINVAL); 1233 } 1234 } else { 1235 struct sglist *sg; 1236 1237 sg = sglist_alloc(1, M_WAITOK); 1238 sglist_append_phys(sg, 1239 (vm_paddr_t)vmap->vm_pfn << PAGE_SHIFT, vmap->vm_len); 1240 1241 *object = vm_pager_allocate(OBJT_SG, sg, vmap->vm_len, 1242 nprot, 0, td->td_ucred); 1243 1244 linux_cdev_handle_free(vmap); 1245 1246 if (*object == NULL) { 1247 sglist_free(sg); 1248 return (EINVAL); 1249 } 1250 } 1251 1252 if (attr != VM_MEMATTR_DEFAULT) { 1253 VM_OBJECT_WLOCK(*object); 1254 vm_object_set_memattr(*object, attr); 1255 VM_OBJECT_WUNLOCK(*object); 1256 } 1257 *offset = 0; 1258 return (0); 1259} 1260 1261struct cdevsw linuxcdevsw = { 1262 .d_version = D_VERSION, 1263 .d_fdopen = linux_dev_fdopen, 1264 .d_name = "lkpidev", 1265}; 1266 1267static int 1268linux_file_read(struct file *file, struct uio *uio, struct ucred *active_cred, 1269 int flags, struct thread *td) 1270{ 1271 struct linux_file *filp; 1272 ssize_t bytes; 1273 int error; 1274 1275 error = 0; 1276 filp = (struct linux_file *)file->f_data; 1277 filp->f_flags = file->f_flag; 1278 /* XXX no support for I/O vectors currently */ 1279 if (uio->uio_iovcnt != 1) 1280 return (EOPNOTSUPP); 1281 if (uio->uio_resid > DEVFS_IOSIZE_MAX) 1282 return (EINVAL); 1283 linux_set_current(td); 1284 if (filp->f_op->read) { 1285 bytes = OPW(file, td, filp->f_op->read(filp, uio->uio_iov->iov_base, 1286 uio->uio_iov->iov_len, &uio->uio_offset)); 1287 if (bytes >= 0) { 1288 uio->uio_iov->iov_base = 1289 ((uint8_t *)uio->uio_iov->iov_base) + bytes; 1290 uio->uio_iov->iov_len -= bytes; 1291 uio->uio_resid -= bytes; 1292 } else { 1293 error = -bytes; 1294 if (error == ERESTARTSYS) 1295 error = ERESTART; 1296 } 1297 } else 1298 error = ENXIO; 1299 1300 /* update kqfilter status, if any */ 1301 linux_file_kqfilter_poll(filp, LINUX_KQ_FLAG_HAS_READ); 1302 1303 return (error); 1304} 1305 1306static int 1307linux_file_write(struct file *file, struct uio *uio, struct ucred *active_cred, 1308 int flags, struct thread *td) 1309{ 1310 struct linux_file *filp; 1311 ssize_t bytes; 1312 int error; 1313 1314 error = 0; 1315 filp = (struct linux_file *)file->f_data; 1316 filp->f_flags = file->f_flag; 1317 /* XXX no support for I/O vectors currently */ 1318 if (uio->uio_iovcnt != 1) 1319 return (EOPNOTSUPP); 1320 if (uio->uio_resid > DEVFS_IOSIZE_MAX) 1321 return (EINVAL); 1322 linux_set_current(td); 1323 if (filp->f_op->write) { 1324 bytes = OPW(file, td, filp->f_op->write(filp, uio->uio_iov->iov_base, 1325 uio->uio_iov->iov_len, &uio->uio_offset)); 1326 if (bytes >= 0) { 1327 uio->uio_iov->iov_base = 1328 ((uint8_t *)uio->uio_iov->iov_base) + bytes; 1329 uio->uio_iov->iov_len -= bytes; 1330 uio->uio_resid -= bytes; 1331 } else { 1332 error = -bytes; 1333 if (error == ERESTARTSYS) 1334 error = ERESTART; 1335 } 1336 } else 1337 error = ENXIO; 1338 1339 /* update kqfilter status, if any */ 1340 linux_file_kqfilter_poll(filp, LINUX_KQ_FLAG_HAS_WRITE); 1341 1342 return (error); 1343} 1344 1345static int 1346linux_file_poll(struct file *file, int events, struct ucred *active_cred, 1347 struct thread *td) 1348{ 1349 struct linux_file *filp; 1350 int revents; 1351 1352 filp = (struct linux_file *)file->f_data; 1353 filp->f_flags = file->f_flag; 1354 linux_set_current(td); 1355 if (filp->f_op->poll != NULL) 1356 revents = OPW(file, td, filp->f_op->poll(filp, LINUX_POLL_TABLE_NORMAL)) & events; 1357 else 1358 revents = 0; 1359 1360 return (revents); 1361} 1362 1363static int 1364linux_file_close(struct file *file, struct thread *td) 1365{ 1366 struct linux_file *filp; 1367 int error; 1368 1369 filp = (struct linux_file *)file->f_data; 1370 1371 KASSERT(file_count(filp) == 0, ("File refcount(%d) is not zero", file_count(filp))); 1372 1373 filp->f_flags = file->f_flag; 1374 linux_set_current(td); 1375 linux_poll_wait_dequeue(filp); 1376 error = -OPW(file, td, filp->f_op->release(filp->f_vnode, filp)); 1377 funsetown(&filp->f_sigio); 1378 if (filp->f_vnode != NULL) 1379 vdrop(filp->f_vnode); 1380 kfree(filp); 1381 1382 return (error); 1383} 1384 1385static int 1386linux_file_ioctl(struct file *fp, u_long cmd, void *data, struct ucred *cred, 1387 struct thread *td) 1388{ 1389 struct linux_file *filp; 1390 int error; 1391 1392 filp = (struct linux_file *)fp->f_data; 1393 filp->f_flags = fp->f_flag; 1394 error = 0; 1395 1396 linux_set_current(td); 1397 switch (cmd) { 1398 case FIONBIO: 1399 break; 1400 case FIOASYNC: 1401 if (filp->f_op->fasync == NULL) 1402 break; 1403 error = -OPW(fp, td, filp->f_op->fasync(0, filp, fp->f_flag & FASYNC)); 1404 break; 1405 case FIOSETOWN: 1406 error = fsetown(*(int *)data, &filp->f_sigio); 1407 if (error == 0) { 1408 if (filp->f_op->fasync == NULL) 1409 break; 1410 error = -OPW(fp, td, filp->f_op->fasync(0, filp, 1411 fp->f_flag & FASYNC)); 1412 } 1413 break; 1414 case FIOGETOWN: 1415 *(int *)data = fgetown(&filp->f_sigio); 1416 break; 1417 default: 1418 error = linux_file_ioctl_sub(fp, filp, cmd, data, td); 1419 break; 1420 } 1421 return (error); 1422} 1423 1424static int 1425linux_file_mmap_sub(struct thread *td, vm_size_t objsize, vm_prot_t prot, 1426 vm_prot_t *maxprotp, int *flagsp, struct file *fp, 1427 vm_ooffset_t *foff, vm_object_t *objp) 1428{ 1429 /* 1430 * Character devices do not provide private mappings 1431 * of any kind: 1432 */ 1433 if ((*maxprotp & VM_PROT_WRITE) == 0 && 1434 (prot & VM_PROT_WRITE) != 0) 1435 return (EACCES); 1436 if ((*flagsp & (MAP_PRIVATE | MAP_COPY)) != 0) 1437 return (EINVAL); 1438 1439 return (linux_file_mmap_single(fp, foff, objsize, objp, (int)prot, td)); 1440} 1441 1442static int 1443linux_file_mmap(struct file *fp, vm_map_t map, vm_offset_t *addr, vm_size_t size, 1444 vm_prot_t prot, vm_prot_t cap_maxprot, int flags, vm_ooffset_t foff, 1445 struct thread *td) 1446{ 1447 struct linux_file *filp; 1448 struct mount *mp; 1449 struct vnode *vp; 1450 vm_object_t object; 1451 vm_prot_t maxprot; 1452 int error; 1453 1454 filp = (struct linux_file *)fp->f_data; 1455 1456 vp = filp->f_vnode; 1457 if (vp == NULL) 1458 return (EOPNOTSUPP); 1459 1460 /* 1461 * Ensure that file and memory protections are 1462 * compatible. 1463 */ 1464 mp = vp->v_mount; 1465 if (mp != NULL && (mp->mnt_flag & MNT_NOEXEC) != 0) { 1466 maxprot = VM_PROT_NONE; 1467 if ((prot & VM_PROT_EXECUTE) != 0) 1468 return (EACCES); 1469 } else 1470 maxprot = VM_PROT_EXECUTE; 1471 if ((fp->f_flag & FREAD) != 0) 1472 maxprot |= VM_PROT_READ; 1473 else if ((prot & VM_PROT_READ) != 0) 1474 return (EACCES); 1475 1476 /* 1477 * If we are sharing potential changes via MAP_SHARED and we 1478 * are trying to get write permission although we opened it 1479 * without asking for it, bail out. 1480 * 1481 * Note that most character devices always share mappings. 1482 * 1483 * Rely on linux_file_mmap_sub() to fail invalid MAP_PRIVATE 1484 * requests rather than doing it here. 1485 */ 1486 if ((flags & MAP_SHARED) != 0) { 1487 if ((fp->f_flag & FWRITE) != 0) 1488 maxprot |= VM_PROT_WRITE; 1489 else if ((prot & VM_PROT_WRITE) != 0) 1490 return (EACCES); 1491 } 1492 maxprot &= cap_maxprot; 1493 1494 error = linux_file_mmap_sub(td, size, prot, &maxprot, &flags, fp, &foff, 1495 &object); 1496 if (error != 0) 1497 return (error); 1498 1499 error = vm_mmap_object(map, addr, size, prot, maxprot, flags, object, 1500 foff, FALSE, td); 1501 if (error != 0) 1502 vm_object_deallocate(object); 1503 return (error); 1504} 1505 1506static int 1507linux_file_stat(struct file *fp, struct stat *sb, struct ucred *active_cred, 1508 struct thread *td) 1509{ 1510 struct linux_file *filp; 1511 struct vnode *vp; 1512 int error; 1513 1514 filp = (struct linux_file *)fp->f_data; 1515 if (filp->f_vnode == NULL) 1516 return (EOPNOTSUPP); 1517 1518 vp = filp->f_vnode; 1519 1520 vn_lock(vp, LK_SHARED | LK_RETRY); 1521 error = vn_stat(vp, sb, td->td_ucred, NOCRED, td); 1522 VOP_UNLOCK(vp, 0); 1523 1524 return (error); 1525} 1526 1527static int 1528linux_file_fill_kinfo(struct file *fp, struct kinfo_file *kif, 1529 struct filedesc *fdp) 1530{ 1531 1532 return (0); 1533} 1534 1535unsigned int 1536linux_iminor(struct inode *inode) 1537{ 1538 struct linux_cdev *ldev; 1539 1540 if (inode == NULL || inode->v_rdev == NULL || 1541 inode->v_rdev->si_devsw != &linuxcdevsw) 1542 return (-1U); 1543 ldev = inode->v_rdev->si_drv1; 1544 if (ldev == NULL) 1545 return (-1U); 1546 1547 return (minor(ldev->dev)); 1548} 1549 1550struct fileops linuxfileops = { 1551 .fo_read = linux_file_read, 1552 .fo_write = linux_file_write, 1553 .fo_truncate = invfo_truncate, 1554 .fo_kqfilter = linux_file_kqfilter, 1555 .fo_stat = linux_file_stat, 1556 .fo_fill_kinfo = linux_file_fill_kinfo, 1557 .fo_poll = linux_file_poll, 1558 .fo_close = linux_file_close, 1559 .fo_ioctl = linux_file_ioctl, 1560 .fo_mmap = linux_file_mmap, 1561 .fo_chmod = invfo_chmod, 1562 .fo_chown = invfo_chown, 1563 .fo_sendfile = invfo_sendfile, 1564 .fo_flags = DFLAG_PASSABLE, 1565}; 1566 1567/* 1568 * Hash of vmmap addresses. This is infrequently accessed and does not 1569 * need to be particularly large. This is done because we must store the 1570 * caller's idea of the map size to properly unmap. 1571 */ 1572struct vmmap { 1573 LIST_ENTRY(vmmap) vm_next; 1574 void *vm_addr; 1575 unsigned long vm_size; 1576}; 1577 1578struct vmmaphd { 1579 struct vmmap *lh_first; 1580}; 1581#define VMMAP_HASH_SIZE 64 1582#define VMMAP_HASH_MASK (VMMAP_HASH_SIZE - 1) 1583#define VM_HASH(addr) ((uintptr_t)(addr) >> PAGE_SHIFT) & VMMAP_HASH_MASK 1584static struct vmmaphd vmmaphead[VMMAP_HASH_SIZE]; 1585static struct mtx vmmaplock; 1586 1587static void 1588vmmap_add(void *addr, unsigned long size) 1589{ 1590 struct vmmap *vmmap; 1591 1592 vmmap = kmalloc(sizeof(*vmmap), GFP_KERNEL); 1593 mtx_lock(&vmmaplock); 1594 vmmap->vm_size = size; 1595 vmmap->vm_addr = addr; 1596 LIST_INSERT_HEAD(&vmmaphead[VM_HASH(addr)], vmmap, vm_next); 1597 mtx_unlock(&vmmaplock); 1598} 1599 1600static struct vmmap * 1601vmmap_remove(void *addr) 1602{ 1603 struct vmmap *vmmap; 1604 1605 mtx_lock(&vmmaplock); 1606 LIST_FOREACH(vmmap, &vmmaphead[VM_HASH(addr)], vm_next) 1607 if (vmmap->vm_addr == addr) 1608 break; 1609 if (vmmap) 1610 LIST_REMOVE(vmmap, vm_next); 1611 mtx_unlock(&vmmaplock); 1612 1613 return (vmmap); 1614} 1615 1616#if defined(__i386__) || defined(__amd64__) || defined(__powerpc__) 1617void * 1618_ioremap_attr(vm_paddr_t phys_addr, unsigned long size, int attr) 1619{ 1620 void *addr; 1621 1622 addr = pmap_mapdev_attr(phys_addr, size, attr); 1623 if (addr == NULL) 1624 return (NULL); 1625 vmmap_add(addr, size); 1626 1627 return (addr); 1628} 1629#endif 1630 1631void 1632iounmap(void *addr) 1633{ 1634 struct vmmap *vmmap; 1635 1636 vmmap = vmmap_remove(addr); 1637 if (vmmap == NULL) 1638 return; 1639#if defined(__i386__) || defined(__amd64__) || defined(__powerpc__) 1640 pmap_unmapdev((vm_offset_t)addr, vmmap->vm_size); 1641#endif 1642 kfree(vmmap); 1643} 1644 1645 1646void * 1647vmap(struct page **pages, unsigned int count, unsigned long flags, int prot) 1648{ 1649 vm_offset_t off; 1650 size_t size; 1651 1652 size = count * PAGE_SIZE; 1653 off = kva_alloc(size); 1654 if (off == 0) 1655 return (NULL); 1656 vmmap_add((void *)off, size); 1657 pmap_qenter(off, pages, count); 1658 1659 return ((void *)off); 1660} 1661 1662void 1663vunmap(void *addr) 1664{ 1665 struct vmmap *vmmap; 1666 1667 vmmap = vmmap_remove(addr); 1668 if (vmmap == NULL) 1669 return; 1670 pmap_qremove((vm_offset_t)addr, vmmap->vm_size / PAGE_SIZE); 1671 kva_free((vm_offset_t)addr, vmmap->vm_size); 1672 kfree(vmmap); 1673} 1674 1675char * 1676kvasprintf(gfp_t gfp, const char *fmt, va_list ap) 1677{ 1678 unsigned int len; 1679 char *p; 1680 va_list aq; 1681 1682 va_copy(aq, ap); 1683 len = vsnprintf(NULL, 0, fmt, aq); 1684 va_end(aq); 1685 1686 p = kmalloc(len + 1, gfp); 1687 if (p != NULL) 1688 vsnprintf(p, len + 1, fmt, ap); 1689 1690 return (p); 1691} 1692 1693char * 1694kasprintf(gfp_t gfp, const char *fmt, ...) 1695{ 1696 va_list ap; 1697 char *p; 1698 1699 va_start(ap, fmt); 1700 p = kvasprintf(gfp, fmt, ap); 1701 va_end(ap); 1702 1703 return (p); 1704} 1705 1706static void 1707linux_timer_callback_wrapper(void *context) 1708{ 1709 struct timer_list *timer; 1710 1711 linux_set_current(curthread); 1712 1713 timer = context; 1714 timer->function(timer->data); 1715} 1716 1717void 1718mod_timer(struct timer_list *timer, int expires) 1719{ 1720 1721 timer->expires = expires; 1722 callout_reset(&timer->timer_callout, 1723 linux_timer_jiffies_until(expires), 1724 &linux_timer_callback_wrapper, timer); 1725} 1726 1727void 1728add_timer(struct timer_list *timer) 1729{ 1730 1731 callout_reset(&timer->timer_callout, 1732 linux_timer_jiffies_until(timer->expires), 1733 &linux_timer_callback_wrapper, timer); 1734} 1735 1736void 1737add_timer_on(struct timer_list *timer, int cpu) 1738{ 1739 1740 callout_reset_on(&timer->timer_callout, 1741 linux_timer_jiffies_until(timer->expires), 1742 &linux_timer_callback_wrapper, timer, cpu); 1743} 1744 1745static void 1746linux_timer_init(void *arg) 1747{ 1748 1749 /* 1750 * Compute an internal HZ value which can divide 2**32 to 1751 * avoid timer rounding problems when the tick value wraps 1752 * around 2**32: 1753 */ 1754 linux_timer_hz_mask = 1; 1755 while (linux_timer_hz_mask < (unsigned long)hz) 1756 linux_timer_hz_mask *= 2; 1757 linux_timer_hz_mask--; 1758} 1759SYSINIT(linux_timer, SI_SUB_DRIVERS, SI_ORDER_FIRST, linux_timer_init, NULL); 1760 1761void 1762linux_complete_common(struct completion *c, int all) 1763{ 1764 int wakeup_swapper; 1765 1766 sleepq_lock(c); 1767 c->done++; 1768 if (all) 1769 wakeup_swapper = sleepq_broadcast(c, SLEEPQ_SLEEP, 0, 0); 1770 else 1771 wakeup_swapper = sleepq_signal(c, SLEEPQ_SLEEP, 0, 0); 1772 sleepq_release(c); 1773 if (wakeup_swapper) 1774 kick_proc0(); 1775} 1776 1777/* 1778 * Indefinite wait for done != 0 with or without signals. 1779 */ 1780int 1781linux_wait_for_common(struct completion *c, int flags) 1782{ 1783 int error; 1784 1785 if (SCHEDULER_STOPPED()) 1786 return (0); 1787 1788 DROP_GIANT(); 1789 1790 if (flags != 0) 1791 flags = SLEEPQ_INTERRUPTIBLE | SLEEPQ_SLEEP; 1792 else 1793 flags = SLEEPQ_SLEEP; 1794 error = 0; 1795 for (;;) { 1796 sleepq_lock(c); 1797 if (c->done) 1798 break; 1799 sleepq_add(c, NULL, "completion", flags, 0); 1800 if (flags & SLEEPQ_INTERRUPTIBLE) { 1801 if (sleepq_wait_sig(c, 0) != 0) { 1802 error = -ERESTARTSYS; 1803 goto intr; 1804 } 1805 } else 1806 sleepq_wait(c, 0); 1807 } 1808 c->done--; 1809 sleepq_release(c); 1810 1811intr: 1812 PICKUP_GIANT(); 1813 1814 return (error); 1815} 1816 1817/* 1818 * Time limited wait for done != 0 with or without signals. 1819 */ 1820int 1821linux_wait_for_timeout_common(struct completion *c, int timeout, int flags) 1822{ 1823 int end = jiffies + timeout; 1824 int error; 1825 int ret; 1826 1827 if (SCHEDULER_STOPPED()) 1828 return (0); 1829 1830 DROP_GIANT(); 1831 1832 if (flags != 0) 1833 flags = SLEEPQ_INTERRUPTIBLE | SLEEPQ_SLEEP; 1834 else 1835 flags = SLEEPQ_SLEEP; 1836 1837 error = 0; 1838 ret = 0; 1839 for (;;) { 1840 sleepq_lock(c); 1841 if (c->done) 1842 break; 1843 sleepq_add(c, NULL, "completion", flags, 0); 1844 sleepq_set_timeout(c, linux_timer_jiffies_until(end)); 1845 if (flags & SLEEPQ_INTERRUPTIBLE) 1846 ret = sleepq_timedwait_sig(c, 0); 1847 else 1848 ret = sleepq_timedwait(c, 0); 1849 if (ret != 0) { 1850 /* check for timeout or signal */ 1851 if (ret == EWOULDBLOCK) 1852 error = 0; 1853 else 1854 error = -ERESTARTSYS; 1855 goto intr; 1856 } 1857 } 1858 c->done--; 1859 sleepq_release(c); 1860 1861intr: 1862 PICKUP_GIANT(); 1863 1864 /* return how many jiffies are left */ 1865 return (ret != 0 ? error : linux_timer_jiffies_until(end)); 1866} 1867 1868int 1869linux_try_wait_for_completion(struct completion *c) 1870{ 1871 int isdone; 1872 1873 isdone = 1; 1874 sleepq_lock(c); 1875 if (c->done) 1876 c->done--; 1877 else 1878 isdone = 0; 1879 sleepq_release(c); 1880 return (isdone); 1881} 1882 1883int 1884linux_completion_done(struct completion *c) 1885{ 1886 int isdone; 1887 1888 isdone = 1; 1889 sleepq_lock(c); 1890 if (c->done == 0) 1891 isdone = 0; 1892 sleepq_release(c); 1893 return (isdone); 1894} 1895 1896static void 1897linux_cdev_release(struct kobject *kobj) 1898{ 1899 struct linux_cdev *cdev; 1900 struct kobject *parent; 1901 1902 cdev = container_of(kobj, struct linux_cdev, kobj); 1903 parent = kobj->parent; 1904 if (cdev->cdev) 1905 destroy_dev(cdev->cdev); 1906 kfree(cdev); 1907 kobject_put(parent); 1908} 1909 1910static void 1911linux_cdev_static_release(struct kobject *kobj) 1912{ 1913 struct linux_cdev *cdev; 1914 struct kobject *parent; 1915 1916 cdev = container_of(kobj, struct linux_cdev, kobj); 1917 parent = kobj->parent; 1918 if (cdev->cdev) 1919 destroy_dev(cdev->cdev); 1920 kobject_put(parent); 1921} 1922 1923const struct kobj_type linux_cdev_ktype = { 1924 .release = linux_cdev_release, 1925}; 1926 1927const struct kobj_type linux_cdev_static_ktype = { 1928 .release = linux_cdev_static_release, 1929}; 1930 1931static void 1932linux_handle_ifnet_link_event(void *arg, struct ifnet *ifp, int linkstate) 1933{ 1934 struct notifier_block *nb; 1935 1936 nb = arg; 1937 if (linkstate == LINK_STATE_UP) 1938 nb->notifier_call(nb, NETDEV_UP, ifp); 1939 else 1940 nb->notifier_call(nb, NETDEV_DOWN, ifp); 1941} 1942 1943static void 1944linux_handle_ifnet_arrival_event(void *arg, struct ifnet *ifp) 1945{ 1946 struct notifier_block *nb; 1947 1948 nb = arg; 1949 nb->notifier_call(nb, NETDEV_REGISTER, ifp); 1950} 1951 1952static void 1953linux_handle_ifnet_departure_event(void *arg, struct ifnet *ifp) 1954{ 1955 struct notifier_block *nb; 1956 1957 nb = arg; 1958 nb->notifier_call(nb, NETDEV_UNREGISTER, ifp); 1959} 1960 1961static void 1962linux_handle_iflladdr_event(void *arg, struct ifnet *ifp) 1963{ 1964 struct notifier_block *nb; 1965 1966 nb = arg; 1967 nb->notifier_call(nb, NETDEV_CHANGEADDR, ifp); 1968} 1969 1970static void 1971linux_handle_ifaddr_event(void *arg, struct ifnet *ifp) 1972{ 1973 struct notifier_block *nb; 1974 1975 nb = arg; 1976 nb->notifier_call(nb, NETDEV_CHANGEIFADDR, ifp); 1977} 1978 1979int 1980register_netdevice_notifier(struct notifier_block *nb) 1981{ 1982 1983 nb->tags[NETDEV_UP] = EVENTHANDLER_REGISTER( 1984 ifnet_link_event, linux_handle_ifnet_link_event, nb, 0); 1985 nb->tags[NETDEV_REGISTER] = EVENTHANDLER_REGISTER( 1986 ifnet_arrival_event, linux_handle_ifnet_arrival_event, nb, 0); 1987 nb->tags[NETDEV_UNREGISTER] = EVENTHANDLER_REGISTER( 1988 ifnet_departure_event, linux_handle_ifnet_departure_event, nb, 0); 1989 nb->tags[NETDEV_CHANGEADDR] = EVENTHANDLER_REGISTER( 1990 iflladdr_event, linux_handle_iflladdr_event, nb, 0); 1991 1992 return (0); 1993} 1994 1995int 1996register_inetaddr_notifier(struct notifier_block *nb) 1997{ 1998 1999 nb->tags[NETDEV_CHANGEIFADDR] = EVENTHANDLER_REGISTER( 2000 ifaddr_event, linux_handle_ifaddr_event, nb, 0); 2001 return (0); 2002} 2003 2004int 2005unregister_netdevice_notifier(struct notifier_block *nb) 2006{ 2007 2008 EVENTHANDLER_DEREGISTER(ifnet_link_event, 2009 nb->tags[NETDEV_UP]); 2010 EVENTHANDLER_DEREGISTER(ifnet_arrival_event, 2011 nb->tags[NETDEV_REGISTER]); 2012 EVENTHANDLER_DEREGISTER(ifnet_departure_event, 2013 nb->tags[NETDEV_UNREGISTER]); 2014 EVENTHANDLER_DEREGISTER(iflladdr_event, 2015 nb->tags[NETDEV_CHANGEADDR]); 2016 2017 return (0); 2018} 2019 2020int 2021unregister_inetaddr_notifier(struct notifier_block *nb) 2022{ 2023 2024 EVENTHANDLER_DEREGISTER(ifaddr_event, 2025 nb->tags[NETDEV_CHANGEIFADDR]); 2026 2027 return (0); 2028} 2029 2030struct list_sort_thunk { 2031 int (*cmp)(void *, struct list_head *, struct list_head *); 2032 void *priv; 2033}; 2034 2035static inline int 2036linux_le_cmp(void *priv, const void *d1, const void *d2) 2037{ 2038 struct list_head *le1, *le2; 2039 struct list_sort_thunk *thunk; 2040 2041 thunk = priv; 2042 le1 = *(__DECONST(struct list_head **, d1)); 2043 le2 = *(__DECONST(struct list_head **, d2)); 2044 return ((thunk->cmp)(thunk->priv, le1, le2)); 2045} 2046 2047void 2048list_sort(void *priv, struct list_head *head, int (*cmp)(void *priv, 2049 struct list_head *a, struct list_head *b)) 2050{ 2051 struct list_sort_thunk thunk; 2052 struct list_head **ar, *le; 2053 size_t count, i; 2054 2055 count = 0; 2056 list_for_each(le, head) 2057 count++; 2058 ar = malloc(sizeof(struct list_head *) * count, M_KMALLOC, M_WAITOK); 2059 i = 0; 2060 list_for_each(le, head) 2061 ar[i++] = le; 2062 thunk.cmp = cmp; 2063 thunk.priv = priv; 2064 qsort_r(ar, count, sizeof(struct list_head *), &thunk, linux_le_cmp); 2065 INIT_LIST_HEAD(head); 2066 for (i = 0; i < count; i++) 2067 list_add_tail(ar[i], head); 2068 free(ar, M_KMALLOC); 2069} 2070 2071void 2072linux_irq_handler(void *ent) 2073{ 2074 struct irq_ent *irqe; 2075 2076 linux_set_current(curthread); 2077 2078 irqe = ent; 2079 irqe->handler(irqe->irq, irqe->arg); 2080} 2081 2082#if defined(__i386__) || defined(__amd64__) 2083int 2084linux_wbinvd_on_all_cpus(void) 2085{ 2086 2087 pmap_invalidate_cache(); 2088 return (0); 2089} 2090#endif 2091 2092int 2093linux_on_each_cpu(void callback(void *), void *data) 2094{ 2095 2096 smp_rendezvous(smp_no_rendezvous_barrier, callback, 2097 smp_no_rendezvous_barrier, data); 2098 return (0); 2099} 2100 2101int 2102linux_in_atomic(void) 2103{ 2104 2105 return ((curthread->td_pflags & TDP_NOFAULTING) != 0); 2106} 2107 2108struct linux_cdev * 2109linux_find_cdev(const char *name, unsigned major, unsigned minor) 2110{ 2111 dev_t dev = MKDEV(major, minor); 2112 struct cdev *cdev; 2113 2114 dev_lock(); 2115 LIST_FOREACH(cdev, &linuxcdevsw.d_devs, si_list) { 2116 struct linux_cdev *ldev = cdev->si_drv1; 2117 if (ldev->dev == dev && 2118 strcmp(kobject_name(&ldev->kobj), name) == 0) { 2119 break; 2120 } 2121 } 2122 dev_unlock(); 2123 2124 return (cdev != NULL ? cdev->si_drv1 : NULL); 2125} 2126 2127int 2128__register_chrdev(unsigned int major, unsigned int baseminor, 2129 unsigned int count, const char *name, 2130 const struct file_operations *fops) 2131{ 2132 struct linux_cdev *cdev; 2133 int ret = 0; 2134 int i; 2135 2136 for (i = baseminor; i < baseminor + count; i++) { 2137 cdev = cdev_alloc(); 2138 cdev_init(cdev, fops); 2139 kobject_set_name(&cdev->kobj, name); 2140 2141 ret = cdev_add(cdev, makedev(major, i), 1); 2142 if (ret != 0) 2143 break; 2144 } 2145 return (ret); 2146} 2147 2148int 2149__register_chrdev_p(unsigned int major, unsigned int baseminor, 2150 unsigned int count, const char *name, 2151 const struct file_operations *fops, uid_t uid, 2152 gid_t gid, int mode) 2153{ 2154 struct linux_cdev *cdev; 2155 int ret = 0; 2156 int i; 2157 2158 for (i = baseminor; i < baseminor + count; i++) { 2159 cdev = cdev_alloc(); 2160 cdev_init(cdev, fops); 2161 kobject_set_name(&cdev->kobj, name); 2162 2163 ret = cdev_add_ext(cdev, makedev(major, i), uid, gid, mode); 2164 if (ret != 0) 2165 break; 2166 } 2167 return (ret); 2168} 2169 2170void 2171__unregister_chrdev(unsigned int major, unsigned int baseminor, 2172 unsigned int count, const char *name) 2173{ 2174 struct linux_cdev *cdevp; 2175 int i; 2176 2177 for (i = baseminor; i < baseminor + count; i++) { 2178 cdevp = linux_find_cdev(name, major, i); 2179 if (cdevp != NULL) 2180 cdev_del(cdevp); 2181 } 2182} 2183 2184#if defined(__i386__) || defined(__amd64__) 2185bool linux_cpu_has_clflush; 2186#endif 2187 2188static void 2189linux_compat_init(void *arg) 2190{ 2191 struct sysctl_oid *rootoid; 2192 int i; 2193 2194#if defined(__i386__) || defined(__amd64__) 2195 linux_cpu_has_clflush = (cpu_feature & CPUID_CLFSH); 2196#endif 2197 rw_init(&linux_vma_lock, "lkpi-vma-lock"); 2198 2199 rootoid = SYSCTL_ADD_ROOT_NODE(NULL, 2200 OID_AUTO, "sys", CTLFLAG_RD|CTLFLAG_MPSAFE, NULL, "sys"); 2201 kobject_init(&linux_class_root, &linux_class_ktype); 2202 kobject_set_name(&linux_class_root, "class"); 2203 linux_class_root.oidp = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(rootoid), 2204 OID_AUTO, "class", CTLFLAG_RD|CTLFLAG_MPSAFE, NULL, "class"); 2205 kobject_init(&linux_root_device.kobj, &linux_dev_ktype); 2206 kobject_set_name(&linux_root_device.kobj, "device"); 2207 linux_root_device.kobj.oidp = SYSCTL_ADD_NODE(NULL, 2208 SYSCTL_CHILDREN(rootoid), OID_AUTO, "device", CTLFLAG_RD, NULL, 2209 "device"); 2210 linux_root_device.bsddev = root_bus; 2211 linux_class_misc.name = "misc"; 2212 class_register(&linux_class_misc); 2213 INIT_LIST_HEAD(&pci_drivers); 2214 INIT_LIST_HEAD(&pci_devices); 2215 spin_lock_init(&pci_lock); 2216 mtx_init(&vmmaplock, "IO Map lock", NULL, MTX_DEF); 2217 for (i = 0; i < VMMAP_HASH_SIZE; i++) 2218 LIST_INIT(&vmmaphead[i]); 2219} 2220SYSINIT(linux_compat, SI_SUB_DRIVERS, SI_ORDER_SECOND, linux_compat_init, NULL); 2221 2222static void 2223linux_compat_uninit(void *arg) 2224{ 2225 linux_kobject_kfree_name(&linux_class_root); 2226 linux_kobject_kfree_name(&linux_root_device.kobj); 2227 linux_kobject_kfree_name(&linux_class_misc.kobj); 2228 2229 mtx_destroy(&vmmaplock); 2230 spin_lock_destroy(&pci_lock); 2231 rw_destroy(&linux_vma_lock); 2232} 2233SYSUNINIT(linux_compat, SI_SUB_DRIVERS, SI_ORDER_SECOND, linux_compat_uninit, NULL); 2234 2235/* 2236 * NOTE: Linux frequently uses "unsigned long" for pointer to integer 2237 * conversion and vice versa, where in FreeBSD "uintptr_t" would be 2238 * used. Assert these types have the same size, else some parts of the 2239 * LinuxKPI may not work like expected: 2240 */ 2241CTASSERT(sizeof(unsigned long) == sizeof(uintptr_t)); 2242