linux_compat.c revision 324685
1/*- 2 * Copyright (c) 2010 Isilon Systems, Inc. 3 * Copyright (c) 2010 iX Systems, Inc. 4 * Copyright (c) 2010 Panasas, Inc. 5 * Copyright (c) 2013, 2014 Mellanox Technologies, Ltd. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice unmodified, this list of conditions, and the following 13 * disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 */ 29 30#include <sys/param.h> 31#include <sys/systm.h> 32#include <sys/malloc.h> 33#include <sys/kernel.h> 34#include <sys/sysctl.h> 35#include <sys/proc.h> 36#include <sys/sglist.h> 37#include <sys/sleepqueue.h> 38#include <sys/lock.h> 39#include <sys/mutex.h> 40#include <sys/bus.h> 41#include <sys/fcntl.h> 42#include <sys/file.h> 43#include <sys/filio.h> 44#include <sys/rwlock.h> 45 46#include <vm/vm.h> 47#include <vm/pmap.h> 48 49#include <machine/stdarg.h> 50#include <machine/pmap.h> 51 52#include <linux/kobject.h> 53#include <linux/device.h> 54#include <linux/slab.h> 55#include <linux/module.h> 56#include <linux/moduleparam.h> 57#include <linux/cdev.h> 58#include <linux/file.h> 59#include <linux/sysfs.h> 60#include <linux/mm.h> 61#include <linux/io.h> 62#include <linux/vmalloc.h> 63#include <linux/timer.h> 64#include <linux/netdevice.h> 65 66#include <vm/vm_pager.h> 67 68#include <linux/workqueue.h> 69 70SYSCTL_NODE(_compat, OID_AUTO, linuxkpi, CTLFLAG_RW, 0, "LinuxKPI parameters"); 71 72MALLOC_DEFINE(M_KMALLOC, "linux", "Linux kmalloc compat"); 73 74#include <linux/rbtree.h> 75/* Undo Linux compat changes. */ 76#undef RB_ROOT 77#undef file 78#undef cdev 79#define RB_ROOT(head) (head)->rbh_root 80 81struct kobject class_root; 82struct device linux_rootdev; 83struct class miscclass; 84struct list_head pci_drivers; 85struct list_head pci_devices; 86spinlock_t pci_lock; 87 88unsigned long linux_timer_hz_mask; 89 90int 91panic_cmp(struct rb_node *one, struct rb_node *two) 92{ 93 panic("no cmp"); 94} 95 96RB_GENERATE(linux_root, rb_node, __entry, panic_cmp); 97 98int 99kobject_set_name_vargs(struct kobject *kobj, const char *fmt, va_list args) 100{ 101 va_list tmp_va; 102 int len; 103 char *old; 104 char *name; 105 char dummy; 106 107 old = kobj->name; 108 109 if (old && fmt == NULL) 110 return (0); 111 112 /* compute length of string */ 113 va_copy(tmp_va, args); 114 len = vsnprintf(&dummy, 0, fmt, tmp_va); 115 va_end(tmp_va); 116 117 /* account for zero termination */ 118 len++; 119 120 /* check for error */ 121 if (len < 1) 122 return (-EINVAL); 123 124 /* allocate memory for string */ 125 name = kzalloc(len, GFP_KERNEL); 126 if (name == NULL) 127 return (-ENOMEM); 128 vsnprintf(name, len, fmt, args); 129 kobj->name = name; 130 131 /* free old string */ 132 kfree(old); 133 134 /* filter new string */ 135 for (; *name != '\0'; name++) 136 if (*name == '/') 137 *name = '!'; 138 return (0); 139} 140 141int 142kobject_set_name(struct kobject *kobj, const char *fmt, ...) 143{ 144 va_list args; 145 int error; 146 147 va_start(args, fmt); 148 error = kobject_set_name_vargs(kobj, fmt, args); 149 va_end(args); 150 151 return (error); 152} 153 154static inline int 155kobject_add_complete(struct kobject *kobj, struct kobject *parent) 156{ 157 struct kobj_type *t; 158 int error; 159 160 kobj->parent = kobject_get(parent); 161 error = sysfs_create_dir(kobj); 162 if (error == 0 && kobj->ktype && kobj->ktype->default_attrs) { 163 struct attribute **attr; 164 t = kobj->ktype; 165 166 for (attr = t->default_attrs; *attr != NULL; attr++) { 167 error = sysfs_create_file(kobj, *attr); 168 if (error) 169 break; 170 } 171 if (error) 172 sysfs_remove_dir(kobj); 173 174 } 175 return (error); 176} 177 178int 179kobject_add(struct kobject *kobj, struct kobject *parent, const char *fmt, ...) 180{ 181 va_list args; 182 int error; 183 184 va_start(args, fmt); 185 error = kobject_set_name_vargs(kobj, fmt, args); 186 va_end(args); 187 if (error) 188 return (error); 189 190 return kobject_add_complete(kobj, parent); 191} 192 193void 194kobject_release(struct kref *kref) 195{ 196 struct kobject *kobj; 197 char *name; 198 199 kobj = container_of(kref, struct kobject, kref); 200 sysfs_remove_dir(kobj); 201 if (kobj->parent) 202 kobject_put(kobj->parent); 203 kobj->parent = NULL; 204 name = kobj->name; 205 if (kobj->ktype && kobj->ktype->release) 206 kobj->ktype->release(kobj); 207 kfree(name); 208} 209 210static void 211kobject_kfree(struct kobject *kobj) 212{ 213 kfree(kobj); 214} 215 216static void 217kobject_kfree_name(struct kobject *kobj) 218{ 219 if (kobj) { 220 kfree(kobj->name); 221 } 222} 223 224struct kobj_type kfree_type = { .release = kobject_kfree }; 225 226static void 227dev_release(struct device *dev) 228{ 229 pr_debug("dev_release: %s\n", dev_name(dev)); 230 kfree(dev); 231} 232 233struct device * 234device_create(struct class *class, struct device *parent, dev_t devt, 235 void *drvdata, const char *fmt, ...) 236{ 237 struct device *dev; 238 va_list args; 239 240 dev = kzalloc(sizeof(*dev), M_WAITOK); 241 dev->parent = parent; 242 dev->class = class; 243 dev->devt = devt; 244 dev->driver_data = drvdata; 245 dev->release = dev_release; 246 va_start(args, fmt); 247 kobject_set_name_vargs(&dev->kobj, fmt, args); 248 va_end(args); 249 device_register(dev); 250 251 return (dev); 252} 253 254int 255kobject_init_and_add(struct kobject *kobj, struct kobj_type *ktype, 256 struct kobject *parent, const char *fmt, ...) 257{ 258 va_list args; 259 int error; 260 261 kobject_init(kobj, ktype); 262 kobj->ktype = ktype; 263 kobj->parent = parent; 264 kobj->name = NULL; 265 266 va_start(args, fmt); 267 error = kobject_set_name_vargs(kobj, fmt, args); 268 va_end(args); 269 if (error) 270 return (error); 271 return kobject_add_complete(kobj, parent); 272} 273 274static void 275linux_file_dtor(void *cdp) 276{ 277 struct linux_file *filp; 278 279 filp = cdp; 280 filp->f_op->release(filp->f_vnode, filp); 281 vdrop(filp->f_vnode); 282 kfree(filp); 283} 284 285static int 286linux_dev_open(struct cdev *dev, int oflags, int devtype, struct thread *td) 287{ 288 struct linux_cdev *ldev; 289 struct linux_file *filp; 290 struct file *file; 291 int error; 292 293 file = curthread->td_fpop; 294 ldev = dev->si_drv1; 295 if (ldev == NULL) 296 return (ENODEV); 297 filp = kzalloc(sizeof(*filp), GFP_KERNEL); 298 filp->f_dentry = &filp->f_dentry_store; 299 filp->f_op = ldev->ops; 300 filp->f_flags = file->f_flag; 301 vhold(file->f_vnode); 302 filp->f_vnode = file->f_vnode; 303 if (filp->f_op->open) { 304 error = -filp->f_op->open(file->f_vnode, filp); 305 if (error) { 306 kfree(filp); 307 return (error); 308 } 309 } 310 error = devfs_set_cdevpriv(filp, linux_file_dtor); 311 if (error) { 312 filp->f_op->release(file->f_vnode, filp); 313 kfree(filp); 314 return (error); 315 } 316 317 return 0; 318} 319 320static int 321linux_dev_close(struct cdev *dev, int fflag, int devtype, struct thread *td) 322{ 323 struct linux_cdev *ldev; 324 struct linux_file *filp; 325 struct file *file; 326 int error; 327 328 file = curthread->td_fpop; 329 ldev = dev->si_drv1; 330 if (ldev == NULL) 331 return (0); 332 if ((error = devfs_get_cdevpriv((void **)&filp)) != 0) 333 return (error); 334 filp->f_flags = file->f_flag; 335 devfs_clear_cdevpriv(); 336 337 338 return (0); 339} 340 341static int 342linux_dev_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, 343 struct thread *td) 344{ 345 struct linux_cdev *ldev; 346 struct linux_file *filp; 347 struct file *file; 348 int error; 349 350 file = curthread->td_fpop; 351 ldev = dev->si_drv1; 352 if (ldev == NULL) 353 return (0); 354 if ((error = devfs_get_cdevpriv((void **)&filp)) != 0) 355 return (error); 356 filp->f_flags = file->f_flag; 357 /* 358 * Linux does not have a generic ioctl copyin/copyout layer. All 359 * linux ioctls must be converted to void ioctls which pass a 360 * pointer to the address of the data. We want the actual user 361 * address so we dereference here. 362 */ 363 data = *(void **)data; 364 if (filp->f_op->unlocked_ioctl) 365 error = -filp->f_op->unlocked_ioctl(filp, cmd, (u_long)data); 366 else 367 error = ENOTTY; 368 369 return (error); 370} 371 372static int 373linux_dev_read(struct cdev *dev, struct uio *uio, int ioflag) 374{ 375 struct linux_cdev *ldev; 376 struct linux_file *filp; 377 struct file *file; 378 ssize_t bytes; 379 int error; 380 381 file = curthread->td_fpop; 382 ldev = dev->si_drv1; 383 if (ldev == NULL) 384 return (0); 385 if ((error = devfs_get_cdevpriv((void **)&filp)) != 0) 386 return (error); 387 filp->f_flags = file->f_flag; 388 if (uio->uio_iovcnt != 1) 389 panic("linux_dev_read: uio %p iovcnt %d", 390 uio, uio->uio_iovcnt); 391 if (filp->f_op->read) { 392 bytes = filp->f_op->read(filp, uio->uio_iov->iov_base, 393 uio->uio_iov->iov_len, &uio->uio_offset); 394 if (bytes >= 0) { 395 uio->uio_iov->iov_base += bytes; 396 uio->uio_iov->iov_len -= bytes; 397 uio->uio_resid -= bytes; 398 } else 399 error = -bytes; 400 } else 401 error = ENXIO; 402 403 return (error); 404} 405 406static int 407linux_dev_write(struct cdev *dev, struct uio *uio, int ioflag) 408{ 409 struct linux_cdev *ldev; 410 struct linux_file *filp; 411 struct file *file; 412 ssize_t bytes; 413 int error; 414 415 file = curthread->td_fpop; 416 ldev = dev->si_drv1; 417 if (ldev == NULL) 418 return (0); 419 if ((error = devfs_get_cdevpriv((void **)&filp)) != 0) 420 return (error); 421 filp->f_flags = file->f_flag; 422 if (uio->uio_iovcnt != 1) 423 panic("linux_dev_write: uio %p iovcnt %d", 424 uio, uio->uio_iovcnt); 425 if (filp->f_op->write) { 426 bytes = filp->f_op->write(filp, uio->uio_iov->iov_base, 427 uio->uio_iov->iov_len, &uio->uio_offset); 428 if (bytes >= 0) { 429 uio->uio_iov->iov_base += bytes; 430 uio->uio_iov->iov_len -= bytes; 431 uio->uio_resid -= bytes; 432 } else 433 error = -bytes; 434 } else 435 error = ENXIO; 436 437 return (error); 438} 439 440static int 441linux_dev_poll(struct cdev *dev, int events, struct thread *td) 442{ 443 struct linux_cdev *ldev; 444 struct linux_file *filp; 445 struct file *file; 446 int revents; 447 int error; 448 449 file = curthread->td_fpop; 450 ldev = dev->si_drv1; 451 if (ldev == NULL) 452 return (0); 453 if ((error = devfs_get_cdevpriv((void **)&filp)) != 0) 454 return (error); 455 filp->f_flags = file->f_flag; 456 if (filp->f_op->poll) 457 revents = filp->f_op->poll(filp, NULL) & events; 458 else 459 revents = 0; 460 461 return (revents); 462} 463 464static int 465linux_dev_mmap_single(struct cdev *dev, vm_ooffset_t *offset, 466 vm_size_t size, struct vm_object **object, int nprot) 467{ 468 struct linux_cdev *ldev; 469 struct linux_file *filp; 470 struct file *file; 471 struct vm_area_struct vma; 472 int error; 473 474 file = curthread->td_fpop; 475 ldev = dev->si_drv1; 476 if (ldev == NULL) 477 return (ENODEV); 478 if ((error = devfs_get_cdevpriv((void **)&filp)) != 0) 479 return (error); 480 filp->f_flags = file->f_flag; 481 vma.vm_start = 0; 482 vma.vm_end = size; 483 vma.vm_pgoff = *offset / PAGE_SIZE; 484 vma.vm_pfn = 0; 485 vma.vm_page_prot = VM_MEMATTR_DEFAULT; 486 if (filp->f_op->mmap) { 487 error = -filp->f_op->mmap(filp, &vma); 488 if (error == 0) { 489 struct sglist *sg; 490 491 sg = sglist_alloc(1, M_WAITOK); 492 sglist_append_phys(sg, 493 (vm_paddr_t)vma.vm_pfn << PAGE_SHIFT, vma.vm_len); 494 *object = vm_pager_allocate(OBJT_SG, sg, vma.vm_len, 495 nprot, 0, curthread->td_ucred); 496 if (*object == NULL) { 497 sglist_free(sg); 498 return (EINVAL); 499 } 500 *offset = 0; 501 if (vma.vm_page_prot != VM_MEMATTR_DEFAULT) { 502 VM_OBJECT_WLOCK(*object); 503 vm_object_set_memattr(*object, 504 vma.vm_page_prot); 505 VM_OBJECT_WUNLOCK(*object); 506 } 507 } 508 } else 509 error = ENODEV; 510 511 return (error); 512} 513 514struct cdevsw linuxcdevsw = { 515 .d_version = D_VERSION, 516 .d_flags = D_TRACKCLOSE, 517 .d_open = linux_dev_open, 518 .d_close = linux_dev_close, 519 .d_read = linux_dev_read, 520 .d_write = linux_dev_write, 521 .d_ioctl = linux_dev_ioctl, 522 .d_mmap_single = linux_dev_mmap_single, 523 .d_poll = linux_dev_poll, 524}; 525 526static int 527linux_file_read(struct file *file, struct uio *uio, struct ucred *active_cred, 528 int flags, struct thread *td) 529{ 530 struct linux_file *filp; 531 ssize_t bytes; 532 int error; 533 534 error = 0; 535 filp = (struct linux_file *)file->f_data; 536 filp->f_flags = file->f_flag; 537 if (uio->uio_iovcnt != 1) 538 panic("linux_file_read: uio %p iovcnt %d", 539 uio, uio->uio_iovcnt); 540 if (filp->f_op->read) { 541 bytes = filp->f_op->read(filp, uio->uio_iov->iov_base, 542 uio->uio_iov->iov_len, &uio->uio_offset); 543 if (bytes >= 0) { 544 uio->uio_iov->iov_base += bytes; 545 uio->uio_iov->iov_len -= bytes; 546 uio->uio_resid -= bytes; 547 } else 548 error = -bytes; 549 } else 550 error = ENXIO; 551 552 return (error); 553} 554 555static int 556linux_file_poll(struct file *file, int events, struct ucred *active_cred, 557 struct thread *td) 558{ 559 struct linux_file *filp; 560 int revents; 561 562 filp = (struct linux_file *)file->f_data; 563 filp->f_flags = file->f_flag; 564 if (filp->f_op->poll) 565 revents = filp->f_op->poll(filp, NULL) & events; 566 else 567 revents = 0; 568 569 return (0); 570} 571 572static int 573linux_file_close(struct file *file, struct thread *td) 574{ 575 struct linux_file *filp; 576 int error; 577 578 filp = (struct linux_file *)file->f_data; 579 filp->f_flags = file->f_flag; 580 error = -filp->f_op->release(NULL, filp); 581 funsetown(&filp->f_sigio); 582 kfree(filp); 583 584 return (error); 585} 586 587static int 588linux_file_ioctl(struct file *fp, u_long cmd, void *data, struct ucred *cred, 589 struct thread *td) 590{ 591 struct linux_file *filp; 592 int error; 593 594 filp = (struct linux_file *)fp->f_data; 595 filp->f_flags = fp->f_flag; 596 error = 0; 597 598 switch (cmd) { 599 case FIONBIO: 600 break; 601 case FIOASYNC: 602 if (filp->f_op->fasync == NULL) 603 break; 604 error = filp->f_op->fasync(0, filp, fp->f_flag & FASYNC); 605 break; 606 case FIOSETOWN: 607 error = fsetown(*(int *)data, &filp->f_sigio); 608 if (error == 0) 609 error = filp->f_op->fasync(0, filp, 610 fp->f_flag & FASYNC); 611 break; 612 case FIOGETOWN: 613 *(int *)data = fgetown(&filp->f_sigio); 614 break; 615 default: 616 error = ENOTTY; 617 break; 618 } 619 return (error); 620} 621 622struct fileops linuxfileops = { 623 .fo_read = linux_file_read, 624 .fo_poll = linux_file_poll, 625 .fo_close = linux_file_close, 626 .fo_ioctl = linux_file_ioctl, 627 .fo_chmod = invfo_chmod, 628 .fo_chown = invfo_chown, 629 .fo_sendfile = invfo_sendfile, 630}; 631 632/* 633 * Hash of vmmap addresses. This is infrequently accessed and does not 634 * need to be particularly large. This is done because we must store the 635 * caller's idea of the map size to properly unmap. 636 */ 637struct vmmap { 638 LIST_ENTRY(vmmap) vm_next; 639 void *vm_addr; 640 unsigned long vm_size; 641}; 642 643struct vmmaphd { 644 struct vmmap *lh_first; 645}; 646#define VMMAP_HASH_SIZE 64 647#define VMMAP_HASH_MASK (VMMAP_HASH_SIZE - 1) 648#define VM_HASH(addr) ((uintptr_t)(addr) >> PAGE_SHIFT) & VMMAP_HASH_MASK 649static struct vmmaphd vmmaphead[VMMAP_HASH_SIZE]; 650static struct mtx vmmaplock; 651 652static void 653vmmap_add(void *addr, unsigned long size) 654{ 655 struct vmmap *vmmap; 656 657 vmmap = kmalloc(sizeof(*vmmap), GFP_KERNEL); 658 mtx_lock(&vmmaplock); 659 vmmap->vm_size = size; 660 vmmap->vm_addr = addr; 661 LIST_INSERT_HEAD(&vmmaphead[VM_HASH(addr)], vmmap, vm_next); 662 mtx_unlock(&vmmaplock); 663} 664 665static struct vmmap * 666vmmap_remove(void *addr) 667{ 668 struct vmmap *vmmap; 669 670 mtx_lock(&vmmaplock); 671 LIST_FOREACH(vmmap, &vmmaphead[VM_HASH(addr)], vm_next) 672 if (vmmap->vm_addr == addr) 673 break; 674 if (vmmap) 675 LIST_REMOVE(vmmap, vm_next); 676 mtx_unlock(&vmmaplock); 677 678 return (vmmap); 679} 680 681void * 682_ioremap_attr(vm_paddr_t phys_addr, unsigned long size, int attr) 683{ 684 void *addr; 685 686 addr = pmap_mapdev_attr(phys_addr, size, attr); 687 if (addr == NULL) 688 return (NULL); 689 vmmap_add(addr, size); 690 691 return (addr); 692} 693 694void 695iounmap(void *addr) 696{ 697 struct vmmap *vmmap; 698 699 vmmap = vmmap_remove(addr); 700 if (vmmap == NULL) 701 return; 702 pmap_unmapdev((vm_offset_t)addr, vmmap->vm_size); 703 kfree(vmmap); 704} 705 706 707void * 708vmap(struct page **pages, unsigned int count, unsigned long flags, int prot) 709{ 710 vm_offset_t off; 711 size_t size; 712 713 size = count * PAGE_SIZE; 714 off = kva_alloc(size); 715 if (off == 0) 716 return (NULL); 717 vmmap_add((void *)off, size); 718 pmap_qenter(off, pages, count); 719 720 return ((void *)off); 721} 722 723void 724vunmap(void *addr) 725{ 726 struct vmmap *vmmap; 727 728 vmmap = vmmap_remove(addr); 729 if (vmmap == NULL) 730 return; 731 pmap_qremove((vm_offset_t)addr, vmmap->vm_size / PAGE_SIZE); 732 kva_free((vm_offset_t)addr, vmmap->vm_size); 733 kfree(vmmap); 734} 735 736char * 737kvasprintf(gfp_t gfp, const char *fmt, va_list ap) 738{ 739 unsigned int len; 740 char *p; 741 va_list aq; 742 743 va_copy(aq, ap); 744 len = vsnprintf(NULL, 0, fmt, aq); 745 va_end(aq); 746 747 p = kmalloc(len + 1, gfp); 748 if (p != NULL) 749 vsnprintf(p, len + 1, fmt, ap); 750 751 return (p); 752} 753 754char * 755kasprintf(gfp_t gfp, const char *fmt, ...) 756{ 757 va_list ap; 758 char *p; 759 760 va_start(ap, fmt); 761 p = kvasprintf(gfp, fmt, ap); 762 va_end(ap); 763 764 return (p); 765} 766 767static int 768linux_timer_jiffies_until(unsigned long expires) 769{ 770 int delta = expires - jiffies; 771 /* guard against already expired values */ 772 if (delta < 1) 773 delta = 1; 774 return (delta); 775} 776 777static void 778linux_timer_callback_wrapper(void *context) 779{ 780 struct timer_list *timer; 781 782 timer = context; 783 timer->function(timer->data); 784} 785 786void 787mod_timer(struct timer_list *timer, unsigned long expires) 788{ 789 790 timer->expires = expires; 791 callout_reset(&timer->timer_callout, 792 linux_timer_jiffies_until(expires), 793 &linux_timer_callback_wrapper, timer); 794} 795 796void 797add_timer(struct timer_list *timer) 798{ 799 800 callout_reset(&timer->timer_callout, 801 linux_timer_jiffies_until(timer->expires), 802 &linux_timer_callback_wrapper, timer); 803} 804 805static void 806linux_timer_init(void *arg) 807{ 808 809 /* 810 * Compute an internal HZ value which can divide 2**32 to 811 * avoid timer rounding problems when the tick value wraps 812 * around 2**32: 813 */ 814 linux_timer_hz_mask = 1; 815 while (linux_timer_hz_mask < (unsigned long)hz) 816 linux_timer_hz_mask *= 2; 817 linux_timer_hz_mask--; 818} 819SYSINIT(linux_timer, SI_SUB_DRIVERS, SI_ORDER_FIRST, linux_timer_init, NULL); 820 821void 822linux_complete_common(struct completion *c, int all) 823{ 824 int wakeup_swapper; 825 826 sleepq_lock(c); 827 c->done++; 828 if (all) 829 wakeup_swapper = sleepq_broadcast(c, SLEEPQ_SLEEP, 0, 0); 830 else 831 wakeup_swapper = sleepq_signal(c, SLEEPQ_SLEEP, 0, 0); 832 sleepq_release(c); 833 if (wakeup_swapper) 834 kick_proc0(); 835} 836 837/* 838 * Indefinite wait for done != 0 with or without signals. 839 */ 840long 841linux_wait_for_common(struct completion *c, int flags) 842{ 843 844 if (flags != 0) 845 flags = SLEEPQ_INTERRUPTIBLE | SLEEPQ_SLEEP; 846 else 847 flags = SLEEPQ_SLEEP; 848 for (;;) { 849 sleepq_lock(c); 850 if (c->done) 851 break; 852 sleepq_add(c, NULL, "completion", flags, 0); 853 if (flags & SLEEPQ_INTERRUPTIBLE) { 854 if (sleepq_wait_sig(c, 0) != 0) 855 return (-ERESTARTSYS); 856 } else 857 sleepq_wait(c, 0); 858 } 859 c->done--; 860 sleepq_release(c); 861 862 return (0); 863} 864 865/* 866 * Time limited wait for done != 0 with or without signals. 867 */ 868long 869linux_wait_for_timeout_common(struct completion *c, long timeout, int flags) 870{ 871 long end = jiffies + timeout; 872 873 if (flags != 0) 874 flags = SLEEPQ_INTERRUPTIBLE | SLEEPQ_SLEEP; 875 else 876 flags = SLEEPQ_SLEEP; 877 for (;;) { 878 int ret; 879 880 sleepq_lock(c); 881 if (c->done) 882 break; 883 sleepq_add(c, NULL, "completion", flags, 0); 884 sleepq_set_timeout(c, linux_timer_jiffies_until(end)); 885 if (flags & SLEEPQ_INTERRUPTIBLE) 886 ret = sleepq_timedwait_sig(c, 0); 887 else 888 ret = sleepq_timedwait(c, 0); 889 if (ret != 0) { 890 /* check for timeout or signal */ 891 if (ret == EWOULDBLOCK) 892 return (0); 893 else 894 return (-ERESTARTSYS); 895 } 896 } 897 c->done--; 898 sleepq_release(c); 899 900 /* return how many jiffies are left */ 901 return (linux_timer_jiffies_until(end)); 902} 903 904int 905linux_try_wait_for_completion(struct completion *c) 906{ 907 int isdone; 908 909 isdone = 1; 910 sleepq_lock(c); 911 if (c->done) 912 c->done--; 913 else 914 isdone = 0; 915 sleepq_release(c); 916 return (isdone); 917} 918 919int 920linux_completion_done(struct completion *c) 921{ 922 int isdone; 923 924 isdone = 1; 925 sleepq_lock(c); 926 if (c->done == 0) 927 isdone = 0; 928 sleepq_release(c); 929 return (isdone); 930} 931 932void 933linux_delayed_work_fn(void *arg) 934{ 935 struct delayed_work *work; 936 937 work = arg; 938 taskqueue_enqueue(work->work.taskqueue, &work->work.work_task); 939} 940 941void 942linux_work_fn(void *context, int pending) 943{ 944 struct work_struct *work; 945 946 work = context; 947 work->fn(work); 948} 949 950void 951linux_flush_fn(void *context, int pending) 952{ 953} 954 955struct workqueue_struct * 956linux_create_workqueue_common(const char *name, int cpus) 957{ 958 struct workqueue_struct *wq; 959 960 wq = kmalloc(sizeof(*wq), M_WAITOK); 961 wq->taskqueue = taskqueue_create(name, M_WAITOK, 962 taskqueue_thread_enqueue, &wq->taskqueue); 963 atomic_set(&wq->draining, 0); 964 taskqueue_start_threads(&wq->taskqueue, cpus, PWAIT, "%s", name); 965 966 return (wq); 967} 968 969void 970destroy_workqueue(struct workqueue_struct *wq) 971{ 972 taskqueue_free(wq->taskqueue); 973 kfree(wq); 974} 975 976static void 977linux_compat_init(void *arg) 978{ 979 struct sysctl_oid *rootoid; 980 int i; 981 982 rootoid = SYSCTL_ADD_NODE(NULL, SYSCTL_STATIC_CHILDREN(), 983 OID_AUTO, "sys", CTLFLAG_RD|CTLFLAG_MPSAFE, NULL, "sys"); 984 kobject_init(&class_root, &class_ktype); 985 kobject_set_name(&class_root, "class"); 986 class_root.oidp = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(rootoid), 987 OID_AUTO, "class", CTLFLAG_RD|CTLFLAG_MPSAFE, NULL, "class"); 988 kobject_init(&linux_rootdev.kobj, &dev_ktype); 989 kobject_set_name(&linux_rootdev.kobj, "device"); 990 linux_rootdev.kobj.oidp = SYSCTL_ADD_NODE(NULL, 991 SYSCTL_CHILDREN(rootoid), OID_AUTO, "device", CTLFLAG_RD, NULL, 992 "device"); 993 linux_rootdev.bsddev = root_bus; 994 miscclass.name = "misc"; 995 class_register(&miscclass); 996 INIT_LIST_HEAD(&pci_drivers); 997 INIT_LIST_HEAD(&pci_devices); 998 spin_lock_init(&pci_lock); 999 mtx_init(&vmmaplock, "IO Map lock", NULL, MTX_DEF); 1000 for (i = 0; i < VMMAP_HASH_SIZE; i++) 1001 LIST_INIT(&vmmaphead[i]); 1002} 1003 1004SYSINIT(linux_compat, SI_SUB_DRIVERS, SI_ORDER_SECOND, linux_compat_init, NULL); 1005 1006static void 1007linux_compat_uninit(void *arg) 1008{ 1009 kobject_kfree_name(&class_root); 1010 kobject_kfree_name(&linux_rootdev.kobj); 1011 kobject_kfree_name(&miscclass.kobj); 1012} 1013SYSUNINIT(linux_compat, SI_SUB_DRIVERS, SI_ORDER_SECOND, linux_compat_uninit, NULL); 1014