1/* 2 * Copyright (c) 2000-2013 Apple Inc. All rights reserved. 3 * 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ 5 * 6 * This file contains Original Code and/or Modifications of Original Code 7 * as defined in and that are subject to the Apple Public Source License 8 * Version 2.0 (the 'License'). You may not use this file except in 9 * compliance with the License. The rights granted to you under the License 10 * may not be used to create, or enable the creation or redistribution of, 11 * unlawful or unlicensed copies of an Apple operating system, or to 12 * circumvent, violate, or enable the circumvention or violation of, any 13 * terms of an Apple operating system software license agreement. 14 * 15 * Please obtain a copy of the License at 16 * http://www.opensource.apple.com/apsl/ and read it before using this file. 17 * 18 * The Original Code and all software distributed under the License are 19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER 20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, 21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, 22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. 23 * Please see the License for the specific language governing rights and 24 * limitations under the License. 25 * 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ 27 * 28 */ 29/*- 30 * Copyright (c) 1999,2000,2001 Jonathan Lemon <jlemon@FreeBSD.org> 31 * All rights reserved. 32 * 33 * Redistribution and use in source and binary forms, with or without 34 * modification, are permitted provided that the following conditions 35 * are met: 36 * 1. Redistributions of source code must retain the above copyright 37 * notice, this list of conditions and the following disclaimer. 38 * 2. Redistributions in binary form must reproduce the above copyright 39 * notice, this list of conditions and the following disclaimer in the 40 * documentation and/or other materials provided with the distribution. 41 * 42 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 43 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 44 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 45 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 46 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 47 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 48 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 49 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 50 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 51 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 52 * SUCH DAMAGE. 53 */ 54/* 55 * @(#)kern_event.c 1.0 (3/31/2000) 56 */ 57#include <stdint.h> 58 59#include <sys/param.h> 60#include <sys/systm.h> 61#include <sys/filedesc.h> 62#include <sys/kernel.h> 63#include <sys/proc_internal.h> 64#include <sys/kauth.h> 65#include <sys/malloc.h> 66#include <sys/unistd.h> 67#include <sys/file_internal.h> 68#include <sys/fcntl.h> 69#include <sys/select.h> 70#include <sys/queue.h> 71#include <sys/event.h> 72#include <sys/eventvar.h> 73#include <sys/protosw.h> 74#include <sys/socket.h> 75#include <sys/socketvar.h> 76#include <sys/stat.h> 77#include <sys/sysctl.h> 78#include <sys/uio.h> 79#include <sys/sysproto.h> 80#include <sys/user.h> 81#include <sys/vnode_internal.h> 82#include <string.h> 83#include <sys/proc_info.h> 84#include <sys/codesign.h> 85 86#include <kern/lock.h> 87#include <kern/clock.h> 88#include <kern/thread_call.h> 89#include <kern/sched_prim.h> 90#include <kern/zalloc.h> 91#include <kern/assert.h> 92 93#include <libkern/libkern.h> 94#include "net/net_str_id.h" 95 96#include <mach/task.h> 97 98#if VM_PRESSURE_EVENTS 99#include <kern/vm_pressure.h> 100#endif 101 102#if CONFIG_MEMORYSTATUS 103#include <sys/kern_memorystatus.h> 104#endif 105 106MALLOC_DEFINE(M_KQUEUE, "kqueue", "memory for kqueue system"); 107 108#define KQ_EVENT NULL 109 110static inline void kqlock(struct kqueue *kq); 111static inline void kqunlock(struct kqueue *kq); 112 113static int kqlock2knoteuse(struct kqueue *kq, struct knote *kn); 114static int kqlock2knoteusewait(struct kqueue *kq, struct knote *kn); 115static int kqlock2knotedrop(struct kqueue *kq, struct knote *kn); 116static int knoteuse2kqlock(struct kqueue *kq, struct knote *kn); 117 118static void kqueue_wakeup(struct kqueue *kq, int closed); 119static int kqueue_read(struct fileproc *fp, struct uio *uio, 120 int flags, vfs_context_t ctx); 121static int kqueue_write(struct fileproc *fp, struct uio *uio, 122 int flags, vfs_context_t ctx); 123static int kqueue_ioctl(struct fileproc *fp, u_long com, caddr_t data, 124 vfs_context_t ctx); 125static int kqueue_select(struct fileproc *fp, int which, void *wql, 126 vfs_context_t ctx); 127static int kqueue_close(struct fileglob *fg, vfs_context_t ctx); 128static int kqueue_kqfilter(struct fileproc *fp, struct knote *kn, 129 vfs_context_t ctx); 130static int kqueue_drain(struct fileproc *fp, vfs_context_t ctx); 131extern int kqueue_stat(struct fileproc *fp, void *ub, int isstat64, 132 vfs_context_t ctx); 133 134static const struct fileops kqueueops = { 135 .fo_type = DTYPE_KQUEUE, 136 .fo_read = kqueue_read, 137 .fo_write = kqueue_write, 138 .fo_ioctl = kqueue_ioctl, 139 .fo_select = kqueue_select, 140 .fo_close = kqueue_close, 141 .fo_kqfilter = kqueue_kqfilter, 142 .fo_drain = kqueue_drain, 143}; 144 145static int kevent_internal(struct proc *p, int iskev64, user_addr_t changelist, 146 int nchanges, user_addr_t eventlist, int nevents, int fd, 147 user_addr_t utimeout, unsigned int flags, int32_t *retval); 148static int kevent_copyin(user_addr_t *addrp, struct kevent64_s *kevp, 149 struct proc *p, int iskev64); 150static int kevent_copyout(struct kevent64_s *kevp, user_addr_t *addrp, 151 struct proc *p, int iskev64); 152char * kevent_description(struct kevent64_s *kevp, char *s, size_t n); 153 154static int kevent_callback(struct kqueue *kq, struct kevent64_s *kevp, 155 void *data); 156static void kevent_continue(struct kqueue *kq, void *data, int error); 157static void kqueue_scan_continue(void *contp, wait_result_t wait_result); 158static int kqueue_process(struct kqueue *kq, kevent_callback_t callback, 159 void *data, int *countp, struct proc *p); 160static int kqueue_begin_processing(struct kqueue *kq); 161static void kqueue_end_processing(struct kqueue *kq); 162static int knote_process(struct knote *kn, kevent_callback_t callback, 163 void *data, struct kqtailq *inprocessp, struct proc *p); 164static void knote_put(struct knote *kn); 165static int knote_fdpattach(struct knote *kn, struct filedesc *fdp, 166 struct proc *p); 167static void knote_drop(struct knote *kn, struct proc *p); 168static void knote_activate(struct knote *kn, int); 169static void knote_deactivate(struct knote *kn); 170static void knote_enqueue(struct knote *kn); 171static void knote_dequeue(struct knote *kn); 172static struct knote *knote_alloc(void); 173static void knote_free(struct knote *kn); 174 175static int filt_fileattach(struct knote *kn); 176static struct filterops file_filtops = { 177 .f_isfd = 1, 178 .f_attach = filt_fileattach, 179}; 180 181static void filt_kqdetach(struct knote *kn); 182static int filt_kqueue(struct knote *kn, long hint); 183static struct filterops kqread_filtops = { 184 .f_isfd = 1, 185 .f_detach = filt_kqdetach, 186 .f_event = filt_kqueue, 187}; 188 189/* placeholder for not-yet-implemented filters */ 190static int filt_badattach(struct knote *kn); 191static struct filterops bad_filtops = { 192 .f_attach = filt_badattach, 193}; 194 195static int filt_procattach(struct knote *kn); 196static void filt_procdetach(struct knote *kn); 197static int filt_proc(struct knote *kn, long hint); 198static struct filterops proc_filtops = { 199 .f_attach = filt_procattach, 200 .f_detach = filt_procdetach, 201 .f_event = filt_proc, 202}; 203 204#if VM_PRESSURE_EVENTS 205static int filt_vmattach(struct knote *kn); 206static void filt_vmdetach(struct knote *kn); 207static int filt_vm(struct knote *kn, long hint); 208static struct filterops vm_filtops = { 209 .f_attach = filt_vmattach, 210 .f_detach = filt_vmdetach, 211 .f_event = filt_vm, 212}; 213#endif /* VM_PRESSURE_EVENTS */ 214 215#if CONFIG_MEMORYSTATUS 216extern struct filterops memorystatus_filtops; 217#endif /* CONFIG_MEMORYSTATUS */ 218 219extern struct filterops fs_filtops; 220 221extern struct filterops sig_filtops; 222 223/* Timer filter */ 224static int filt_timerattach(struct knote *kn); 225static void filt_timerdetach(struct knote *kn); 226static int filt_timer(struct knote *kn, long hint); 227static void filt_timertouch(struct knote *kn, struct kevent64_s *kev, 228 long type); 229static struct filterops timer_filtops = { 230 .f_attach = filt_timerattach, 231 .f_detach = filt_timerdetach, 232 .f_event = filt_timer, 233 .f_touch = filt_timertouch, 234}; 235 236/* Helpers */ 237static void filt_timerexpire(void *knx, void *param1); 238static int filt_timervalidate(struct knote *kn); 239static void filt_timerupdate(struct knote *kn); 240static void filt_timercancel(struct knote *kn); 241 242#define TIMER_RUNNING 0x1 243#define TIMER_CANCELWAIT 0x2 244 245static lck_mtx_t _filt_timerlock; 246static void filt_timerlock(void); 247static void filt_timerunlock(void); 248 249static zone_t knote_zone; 250 251#define KN_HASH(val, mask) (((val) ^ (val >> 8)) & (mask)) 252 253#if 0 254extern struct filterops aio_filtops; 255#endif 256 257/* Mach portset filter */ 258extern struct filterops machport_filtops; 259 260/* User filter */ 261static int filt_userattach(struct knote *kn); 262static void filt_userdetach(struct knote *kn); 263static int filt_user(struct knote *kn, long hint); 264static void filt_usertouch(struct knote *kn, struct kevent64_s *kev, 265 long type); 266static struct filterops user_filtops = { 267 .f_attach = filt_userattach, 268 .f_detach = filt_userdetach, 269 .f_event = filt_user, 270 .f_touch = filt_usertouch, 271}; 272 273/* 274 * Table for all system-defined filters. 275 */ 276static struct filterops *sysfilt_ops[] = { 277 &file_filtops, /* EVFILT_READ */ 278 &file_filtops, /* EVFILT_WRITE */ 279#if 0 280 &aio_filtops, /* EVFILT_AIO */ 281#else 282 &bad_filtops, /* EVFILT_AIO */ 283#endif 284 &file_filtops, /* EVFILT_VNODE */ 285 &proc_filtops, /* EVFILT_PROC */ 286 &sig_filtops, /* EVFILT_SIGNAL */ 287 &timer_filtops, /* EVFILT_TIMER */ 288 &machport_filtops, /* EVFILT_MACHPORT */ 289 &fs_filtops, /* EVFILT_FS */ 290 &user_filtops, /* EVFILT_USER */ 291 &bad_filtops, /* unused */ 292#if VM_PRESSURE_EVENTS 293 &vm_filtops, /* EVFILT_VM */ 294#else 295 &bad_filtops, /* EVFILT_VM */ 296#endif 297 &file_filtops, /* EVFILT_SOCK */ 298#if CONFIG_MEMORYSTATUS 299 &memorystatus_filtops, /* EVFILT_MEMORYSTATUS */ 300#else 301 &bad_filtops, /* EVFILT_MEMORYSTATUS */ 302#endif 303}; 304 305/* 306 * kqueue/note lock attributes and implementations 307 * 308 * kqueues have locks, while knotes have use counts 309 * Most of the knote state is guarded by the object lock. 310 * the knote "inuse" count and status use the kqueue lock. 311 */ 312lck_grp_attr_t * kq_lck_grp_attr; 313lck_grp_t * kq_lck_grp; 314lck_attr_t * kq_lck_attr; 315 316static inline void 317kqlock(struct kqueue *kq) 318{ 319 lck_spin_lock(&kq->kq_lock); 320} 321 322static inline void 323kqunlock(struct kqueue *kq) 324{ 325 lck_spin_unlock(&kq->kq_lock); 326} 327 328/* 329 * Convert a kq lock to a knote use referece. 330 * 331 * If the knote is being dropped, we can't get 332 * a use reference, so just return with it 333 * still locked. 334 * - kq locked at entry 335 * - unlock on exit if we get the use reference 336 */ 337static int 338kqlock2knoteuse(struct kqueue *kq, struct knote *kn) 339{ 340 if (kn->kn_status & KN_DROPPING) 341 return (0); 342 kn->kn_inuse++; 343 kqunlock(kq); 344 return (1); 345} 346 347/* 348 * Convert a kq lock to a knote use referece, 349 * but wait for attach and drop events to complete. 350 * 351 * If the knote is being dropped, we can't get 352 * a use reference, so just return with it 353 * still locked. 354 * - kq locked at entry 355 * - kq always unlocked on exit 356 */ 357static int 358kqlock2knoteusewait(struct kqueue *kq, struct knote *kn) 359{ 360 if ((kn->kn_status & (KN_DROPPING | KN_ATTACHING)) != 0) { 361 kn->kn_status |= KN_USEWAIT; 362 wait_queue_assert_wait((wait_queue_t)kq->kq_wqs, 363 &kn->kn_status, THREAD_UNINT, 0); 364 kqunlock(kq); 365 thread_block(THREAD_CONTINUE_NULL); 366 return (0); 367 } 368 kn->kn_inuse++; 369 kqunlock(kq); 370 return (1); 371} 372 373/* 374 * Convert from a knote use reference back to kq lock. 375 * 376 * Drop a use reference and wake any waiters if 377 * this is the last one. 378 * 379 * The exit return indicates if the knote is 380 * still alive - but the kqueue lock is taken 381 * unconditionally. 382 */ 383static int 384knoteuse2kqlock(struct kqueue *kq, struct knote *kn) 385{ 386 kqlock(kq); 387 if (--kn->kn_inuse == 0) { 388 if ((kn->kn_status & KN_ATTACHING) != 0) { 389 kn->kn_status &= ~KN_ATTACHING; 390 } 391 if ((kn->kn_status & KN_USEWAIT) != 0) { 392 kn->kn_status &= ~KN_USEWAIT; 393 wait_queue_wakeup_all((wait_queue_t)kq->kq_wqs, 394 &kn->kn_status, THREAD_AWAKENED); 395 } 396 } 397 return ((kn->kn_status & KN_DROPPING) == 0); 398} 399 400/* 401 * Convert a kq lock to a knote drop reference. 402 * 403 * If the knote is in use, wait for the use count 404 * to subside. We first mark our intention to drop 405 * it - keeping other users from "piling on." 406 * If we are too late, we have to wait for the 407 * other drop to complete. 408 * 409 * - kq locked at entry 410 * - always unlocked on exit. 411 * - caller can't hold any locks that would prevent 412 * the other dropper from completing. 413 */ 414static int 415kqlock2knotedrop(struct kqueue *kq, struct knote *kn) 416{ 417 int oktodrop; 418 419 oktodrop = ((kn->kn_status & (KN_DROPPING | KN_ATTACHING)) == 0); 420 kn->kn_status |= KN_DROPPING; 421 if (oktodrop) { 422 if (kn->kn_inuse == 0) { 423 kqunlock(kq); 424 return (oktodrop); 425 } 426 } 427 kn->kn_status |= KN_USEWAIT; 428 wait_queue_assert_wait((wait_queue_t)kq->kq_wqs, &kn->kn_status, 429 THREAD_UNINT, 0); 430 kqunlock(kq); 431 thread_block(THREAD_CONTINUE_NULL); 432 return (oktodrop); 433} 434 435/* 436 * Release a knote use count reference. 437 */ 438static void 439knote_put(struct knote *kn) 440{ 441 struct kqueue *kq = kn->kn_kq; 442 443 kqlock(kq); 444 if (--kn->kn_inuse == 0) { 445 if ((kn->kn_status & KN_USEWAIT) != 0) { 446 kn->kn_status &= ~KN_USEWAIT; 447 wait_queue_wakeup_all((wait_queue_t)kq->kq_wqs, 448 &kn->kn_status, THREAD_AWAKENED); 449 } 450 } 451 kqunlock(kq); 452} 453 454static int 455filt_fileattach(struct knote *kn) 456{ 457 return (fo_kqfilter(kn->kn_fp, kn, vfs_context_current())); 458} 459 460#define f_flag f_fglob->fg_flag 461#define f_msgcount f_fglob->fg_msgcount 462#define f_cred f_fglob->fg_cred 463#define f_ops f_fglob->fg_ops 464#define f_offset f_fglob->fg_offset 465#define f_data f_fglob->fg_data 466 467static void 468filt_kqdetach(struct knote *kn) 469{ 470 struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data; 471 472 kqlock(kq); 473 KNOTE_DETACH(&kq->kq_sel.si_note, kn); 474 kqunlock(kq); 475} 476 477/*ARGSUSED*/ 478static int 479filt_kqueue(struct knote *kn, __unused long hint) 480{ 481 struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data; 482 483 kn->kn_data = kq->kq_count; 484 return (kn->kn_data > 0); 485} 486 487static int 488filt_procattach(struct knote *kn) 489{ 490 struct proc *p; 491 492 assert(PID_MAX < NOTE_PDATAMASK); 493 494 if ((kn->kn_sfflags & (NOTE_TRACK | NOTE_TRACKERR | NOTE_CHILD)) != 0) 495 return (ENOTSUP); 496 497 p = proc_find(kn->kn_id); 498 if (p == NULL) { 499 return (ESRCH); 500 } 501 502 const int NoteExitStatusBits = NOTE_EXIT | NOTE_EXITSTATUS; 503 504 if ((kn->kn_sfflags & NoteExitStatusBits) == NoteExitStatusBits) 505 do { 506 pid_t selfpid = proc_selfpid(); 507 508 if (p->p_ppid == selfpid) 509 break; /* parent => ok */ 510 511 if ((p->p_lflag & P_LTRACED) != 0 && 512 (p->p_oppid == selfpid)) 513 break; /* parent-in-waiting => ok */ 514 515 proc_rele(p); 516 return (EACCES); 517 } while (0); 518 519 proc_klist_lock(); 520 521 kn->kn_flags |= EV_CLEAR; /* automatically set */ 522 kn->kn_ptr.p_proc = p; /* store the proc handle */ 523 524 KNOTE_ATTACH(&p->p_klist, kn); 525 526 proc_klist_unlock(); 527 528 proc_rele(p); 529 530 return (0); 531} 532 533/* 534 * The knote may be attached to a different process, which may exit, 535 * leaving nothing for the knote to be attached to. In that case, 536 * the pointer to the process will have already been nulled out. 537 */ 538static void 539filt_procdetach(struct knote *kn) 540{ 541 struct proc *p; 542 543 proc_klist_lock(); 544 545 p = kn->kn_ptr.p_proc; 546 if (p != PROC_NULL) { 547 kn->kn_ptr.p_proc = PROC_NULL; 548 KNOTE_DETACH(&p->p_klist, kn); 549 } 550 551 proc_klist_unlock(); 552} 553 554static int 555filt_proc(struct knote *kn, long hint) 556{ 557 /* 558 * Note: a lot of bits in hint may be obtained from the knote 559 * To free some of those bits, see <rdar://problem/12592988> Freeing up 560 * bits in hint for filt_proc 561 */ 562 /* hint is 0 when called from above */ 563 if (hint != 0) { 564 u_int event; 565 566 /* ALWAYS CALLED WITH proc_klist_lock when (hint != 0) */ 567 568 /* 569 * mask off extra data 570 */ 571 event = (u_int)hint & NOTE_PCTRLMASK; 572 573 /* 574 * termination lifecycle events can happen while a debugger 575 * has reparented a process, in which case notifications 576 * should be quashed except to the tracing parent. When 577 * the debugger reaps the child (either via wait4(2) or 578 * process exit), the child will be reparented to the original 579 * parent and these knotes re-fired. 580 */ 581 if (event & NOTE_EXIT) { 582 if ((kn->kn_ptr.p_proc->p_oppid != 0) 583 && (kn->kn_kq->kq_p->p_pid != kn->kn_ptr.p_proc->p_ppid)) { 584 /* 585 * This knote is not for the current ptrace(2) parent, ignore. 586 */ 587 return 0; 588 } 589 } 590 591 /* 592 * if the user is interested in this event, record it. 593 */ 594 if (kn->kn_sfflags & event) 595 kn->kn_fflags |= event; 596 597#pragma clang diagnostic push 598#pragma clang diagnostic ignored "-Wdeprecated-declarations" 599 if ((event == NOTE_REAP) || ((event == NOTE_EXIT) && !(kn->kn_sfflags & NOTE_REAP))) { 600 kn->kn_flags |= (EV_EOF | EV_ONESHOT); 601 } 602#pragma clang diagnostic pop 603 604 if (event == NOTE_EXIT) { 605 kn->kn_data = 0; 606 if ((kn->kn_sfflags & NOTE_EXITSTATUS) != 0) { 607 kn->kn_fflags |= NOTE_EXITSTATUS; 608 kn->kn_data |= (hint & NOTE_PDATAMASK); 609 } 610 if ((kn->kn_sfflags & NOTE_EXIT_DETAIL) != 0) { 611 kn->kn_fflags |= NOTE_EXIT_DETAIL; 612 if ((kn->kn_ptr.p_proc->p_lflag & 613 P_LTERM_DECRYPTFAIL) != 0) { 614 kn->kn_data |= NOTE_EXIT_DECRYPTFAIL; 615 } 616 if ((kn->kn_ptr.p_proc->p_lflag & 617 P_LTERM_JETSAM) != 0) { 618 kn->kn_data |= NOTE_EXIT_MEMORY; 619 switch (kn->kn_ptr.p_proc->p_lflag & 620 P_JETSAM_MASK) { 621 case P_JETSAM_VMPAGESHORTAGE: 622 kn->kn_data |= NOTE_EXIT_MEMORY_VMPAGESHORTAGE; 623 break; 624 case P_JETSAM_VMTHRASHING: 625 kn->kn_data |= NOTE_EXIT_MEMORY_VMTHRASHING; 626 break; 627 case P_JETSAM_VNODE: 628 kn->kn_data |= NOTE_EXIT_MEMORY_VNODE; 629 break; 630 case P_JETSAM_HIWAT: 631 kn->kn_data |= NOTE_EXIT_MEMORY_HIWAT; 632 break; 633 case P_JETSAM_PID: 634 kn->kn_data |= NOTE_EXIT_MEMORY_PID; 635 break; 636 case P_JETSAM_IDLEEXIT: 637 kn->kn_data |= NOTE_EXIT_MEMORY_IDLE; 638 break; 639 } 640 } 641 if ((kn->kn_ptr.p_proc->p_csflags & 642 CS_KILLED) != 0) { 643 kn->kn_data |= NOTE_EXIT_CSERROR; 644 } 645 } 646 } 647 648 } 649 650 /* atomic check, no locking need when called from above */ 651 return (kn->kn_fflags != 0); 652} 653 654#if VM_PRESSURE_EVENTS 655/* 656 * Virtual memory kevents 657 * 658 * author: Matt Jacobson [matthew_jacobson@apple.com] 659 */ 660 661static int 662filt_vmattach(struct knote *kn) 663{ 664 /* 665 * The note will be cleared once the information has been flushed to 666 * the client. If there is still pressure, we will be re-alerted. 667 */ 668 kn->kn_flags |= EV_CLEAR; 669 return (vm_knote_register(kn)); 670} 671 672static void 673filt_vmdetach(struct knote *kn) 674{ 675 vm_knote_unregister(kn); 676} 677 678static int 679filt_vm(struct knote *kn, long hint) 680{ 681 /* hint == 0 means this is just an alive? check (always true) */ 682 if (hint != 0) { 683 const pid_t pid = (pid_t)hint; 684 if ((kn->kn_sfflags & NOTE_VM_PRESSURE) && 685 (kn->kn_kq->kq_p->p_pid == pid)) { 686 kn->kn_fflags |= NOTE_VM_PRESSURE; 687 } 688 } 689 690 return (kn->kn_fflags != 0); 691} 692#endif /* VM_PRESSURE_EVENTS */ 693 694/* 695 * filt_timervalidate - process data from user 696 * 697 * Converts to either interval or deadline format. 698 * 699 * The saved-data field in the knote contains the 700 * time value. The saved filter-flags indicates 701 * the unit of measurement. 702 * 703 * After validation, either the saved-data field 704 * contains the interval in absolute time, or ext[0] 705 * contains the expected deadline. If that deadline 706 * is in the past, ext[0] is 0. 707 * 708 * Returns EINVAL for unrecognized units of time. 709 * 710 * Timer filter lock is held. 711 * 712 */ 713static int 714filt_timervalidate(struct knote *kn) 715{ 716 uint64_t multiplier; 717 uint64_t raw = 0; 718 719 switch (kn->kn_sfflags & (NOTE_SECONDS|NOTE_USECONDS|NOTE_NSECONDS)) { 720 case NOTE_SECONDS: 721 multiplier = NSEC_PER_SEC; 722 break; 723 case NOTE_USECONDS: 724 multiplier = NSEC_PER_USEC; 725 break; 726 case NOTE_NSECONDS: 727 multiplier = 1; 728 break; 729 case 0: /* milliseconds (default) */ 730 multiplier = NSEC_PER_SEC / 1000; 731 break; 732 default: 733 return (EINVAL); 734 } 735 736 /* transform the slop delta(leeway) in kn_ext[1] if passed to same time scale */ 737 if(kn->kn_sfflags & NOTE_LEEWAY){ 738 nanoseconds_to_absolutetime((uint64_t)kn->kn_ext[1] * multiplier, &raw); 739 kn->kn_ext[1] = raw; 740 } 741 742 nanoseconds_to_absolutetime((uint64_t)kn->kn_sdata * multiplier, &raw); 743 744 kn->kn_ext[0] = 0; 745 kn->kn_sdata = 0; 746 747 if (kn->kn_sfflags & NOTE_ABSOLUTE) { 748 clock_sec_t seconds; 749 clock_nsec_t nanoseconds; 750 uint64_t now; 751 752 clock_get_calendar_nanotime(&seconds, &nanoseconds); 753 nanoseconds_to_absolutetime((uint64_t)seconds * NSEC_PER_SEC + 754 nanoseconds, &now); 755 756 if (raw < now) { 757 /* time has already passed */ 758 kn->kn_ext[0] = 0; 759 } else { 760 raw -= now; 761 clock_absolutetime_interval_to_deadline(raw, 762 &kn->kn_ext[0]); 763 } 764 } else { 765 kn->kn_sdata = raw; 766 } 767 768 return (0); 769} 770 771/* 772 * filt_timerupdate - compute the next deadline 773 * 774 * Repeating timers store their interval in kn_sdata. Absolute 775 * timers have already calculated the deadline, stored in ext[0]. 776 * 777 * On return, the next deadline (or zero if no deadline is needed) 778 * is stored in kn_ext[0]. 779 * 780 * Timer filter lock is held. 781 */ 782static void 783filt_timerupdate(struct knote *kn) 784{ 785 /* if there's no interval, deadline is just in kn_ext[0] */ 786 if (kn->kn_sdata == 0) 787 return; 788 789 /* if timer hasn't fired before, fire in interval nsecs */ 790 if (kn->kn_ext[0] == 0) { 791 clock_absolutetime_interval_to_deadline(kn->kn_sdata, 792 &kn->kn_ext[0]); 793 } else { 794 /* 795 * If timer has fired before, schedule the next pop 796 * relative to the last intended deadline. 797 * 798 * We could check for whether the deadline has expired, 799 * but the thread call layer can handle that. 800 */ 801 kn->kn_ext[0] += kn->kn_sdata; 802 } 803} 804 805/* 806 * filt_timerexpire - the timer callout routine 807 * 808 * Just propagate the timer event into the knote 809 * filter routine (by going through the knote 810 * synchronization point). Pass a hint to 811 * indicate this is a real event, not just a 812 * query from above. 813 */ 814static void 815filt_timerexpire(void *knx, __unused void *spare) 816{ 817 struct klist timer_list; 818 struct knote *kn = knx; 819 820 filt_timerlock(); 821 822 kn->kn_hookid &= ~TIMER_RUNNING; 823 824 /* no "object" for timers, so fake a list */ 825 SLIST_INIT(&timer_list); 826 SLIST_INSERT_HEAD(&timer_list, kn, kn_selnext); 827 KNOTE(&timer_list, 1); 828 829 /* if someone is waiting for timer to pop */ 830 if (kn->kn_hookid & TIMER_CANCELWAIT) { 831 struct kqueue *kq = kn->kn_kq; 832 wait_queue_wakeup_all((wait_queue_t)kq->kq_wqs, &kn->kn_hook, 833 THREAD_AWAKENED); 834 } 835 836 filt_timerunlock(); 837} 838 839/* 840 * Cancel a running timer (or wait for the pop). 841 * Timer filter lock is held. 842 */ 843static void 844filt_timercancel(struct knote *kn) 845{ 846 struct kqueue *kq = kn->kn_kq; 847 thread_call_t callout = kn->kn_hook; 848 boolean_t cancelled; 849 850 if (kn->kn_hookid & TIMER_RUNNING) { 851 /* cancel the callout if we can */ 852 cancelled = thread_call_cancel(callout); 853 if (cancelled) { 854 kn->kn_hookid &= ~TIMER_RUNNING; 855 } else { 856 /* we have to wait for the expire routine. */ 857 kn->kn_hookid |= TIMER_CANCELWAIT; 858 wait_queue_assert_wait((wait_queue_t)kq->kq_wqs, 859 &kn->kn_hook, THREAD_UNINT, 0); 860 filt_timerunlock(); 861 thread_block(THREAD_CONTINUE_NULL); 862 filt_timerlock(); 863 assert((kn->kn_hookid & TIMER_RUNNING) == 0); 864 } 865 } 866} 867 868/* 869 * Allocate a thread call for the knote's lifetime, and kick off the timer. 870 */ 871static int 872filt_timerattach(struct knote *kn) 873{ 874 thread_call_t callout; 875 int error; 876 877 callout = thread_call_allocate(filt_timerexpire, kn); 878 if (NULL == callout) 879 return (ENOMEM); 880 881 filt_timerlock(); 882 error = filt_timervalidate(kn); 883 if (error != 0) { 884 filt_timerunlock(); 885 return (error); 886 } 887 888 kn->kn_hook = (void*)callout; 889 kn->kn_hookid = 0; 890 891 /* absolute=EV_ONESHOT */ 892 if (kn->kn_sfflags & NOTE_ABSOLUTE) 893 kn->kn_flags |= EV_ONESHOT; 894 895 filt_timerupdate(kn); 896 if (kn->kn_ext[0]) { 897 kn->kn_flags |= EV_CLEAR; 898 unsigned int timer_flags = 0; 899 if (kn->kn_sfflags & NOTE_CRITICAL) 900 timer_flags |= THREAD_CALL_DELAY_USER_CRITICAL; 901 else if (kn->kn_sfflags & NOTE_BACKGROUND) 902 timer_flags |= THREAD_CALL_DELAY_USER_BACKGROUND; 903 else 904 timer_flags |= THREAD_CALL_DELAY_USER_NORMAL; 905 906 if (kn->kn_sfflags & NOTE_LEEWAY) 907 timer_flags |= THREAD_CALL_DELAY_LEEWAY; 908 909 thread_call_enter_delayed_with_leeway(callout, NULL, 910 kn->kn_ext[0], kn->kn_ext[1], timer_flags); 911 912 kn->kn_hookid |= TIMER_RUNNING; 913 } else { 914 /* fake immediate */ 915 kn->kn_data = 1; 916 } 917 918 filt_timerunlock(); 919 return (0); 920} 921 922/* 923 * Shut down the timer if it's running, and free the callout. 924 */ 925static void 926filt_timerdetach(struct knote *kn) 927{ 928 thread_call_t callout; 929 930 filt_timerlock(); 931 932 callout = (thread_call_t)kn->kn_hook; 933 filt_timercancel(kn); 934 935 filt_timerunlock(); 936 937 thread_call_free(callout); 938} 939 940 941 942static int 943filt_timer(struct knote *kn, long hint) 944{ 945 int result; 946 947 if (hint) { 948 /* real timer pop -- timer lock held by filt_timerexpire */ 949 kn->kn_data++; 950 951 if (((kn->kn_hookid & TIMER_CANCELWAIT) == 0) && 952 ((kn->kn_flags & EV_ONESHOT) == 0)) { 953 954 /* evaluate next time to fire */ 955 filt_timerupdate(kn); 956 957 if (kn->kn_ext[0]) { 958 unsigned int timer_flags = 0; 959 960 /* keep the callout and re-arm */ 961 if (kn->kn_sfflags & NOTE_CRITICAL) 962 timer_flags |= THREAD_CALL_DELAY_USER_CRITICAL; 963 else if (kn->kn_sfflags & NOTE_BACKGROUND) 964 timer_flags |= THREAD_CALL_DELAY_USER_BACKGROUND; 965 else 966 timer_flags |= THREAD_CALL_DELAY_USER_NORMAL; 967 968 if (kn->kn_sfflags & NOTE_LEEWAY) 969 timer_flags |= THREAD_CALL_DELAY_LEEWAY; 970 971 thread_call_enter_delayed_with_leeway(kn->kn_hook, NULL, 972 kn->kn_ext[0], kn->kn_ext[1], timer_flags); 973 974 kn->kn_hookid |= TIMER_RUNNING; 975 } 976 } 977 978 return (1); 979 } 980 981 /* user-query */ 982 filt_timerlock(); 983 984 result = (kn->kn_data != 0); 985 986 filt_timerunlock(); 987 988 return (result); 989} 990 991 992/* 993 * filt_timertouch - update knote with new user input 994 * 995 * Cancel and restart the timer based on new user data. When 996 * the user picks up a knote, clear the count of how many timer 997 * pops have gone off (in kn_data). 998 */ 999static void 1000filt_timertouch(struct knote *kn, struct kevent64_s *kev, long type) 1001{ 1002 int error; 1003 filt_timerlock(); 1004 1005 switch (type) { 1006 case EVENT_REGISTER: 1007 /* cancel current call */ 1008 filt_timercancel(kn); 1009 1010 /* recalculate deadline */ 1011 kn->kn_sdata = kev->data; 1012 kn->kn_sfflags = kev->fflags; 1013 kn->kn_ext[0] = kev->ext[0]; 1014 kn->kn_ext[1] = kev->ext[1]; 1015 1016 error = filt_timervalidate(kn); 1017 if (error) { 1018 /* no way to report error, so mark it in the knote */ 1019 kn->kn_flags |= EV_ERROR; 1020 kn->kn_data = error; 1021 break; 1022 } 1023 1024 /* start timer if necessary */ 1025 filt_timerupdate(kn); 1026 1027 if (kn->kn_ext[0]) { 1028 unsigned int timer_flags = 0; 1029 if (kn->kn_sfflags & NOTE_CRITICAL) 1030 timer_flags |= THREAD_CALL_DELAY_USER_CRITICAL; 1031 else if (kn->kn_sfflags & NOTE_BACKGROUND) 1032 timer_flags |= THREAD_CALL_DELAY_USER_BACKGROUND; 1033 else 1034 timer_flags |= THREAD_CALL_DELAY_USER_NORMAL; 1035 1036 if (kn->kn_sfflags & NOTE_LEEWAY) 1037 timer_flags |= THREAD_CALL_DELAY_LEEWAY; 1038 1039 thread_call_enter_delayed_with_leeway(kn->kn_hook, NULL, 1040 kn->kn_ext[0], kn->kn_ext[1], timer_flags); 1041 1042 kn->kn_hookid |= TIMER_RUNNING; 1043 } else { 1044 /* pretend the timer has fired */ 1045 kn->kn_data = 1; 1046 } 1047 1048 break; 1049 1050 case EVENT_PROCESS: 1051 /* reset the timer pop count in kn_data */ 1052 *kev = kn->kn_kevent; 1053 kev->ext[0] = 0; 1054 kn->kn_data = 0; 1055 if (kn->kn_flags & EV_CLEAR) 1056 kn->kn_fflags = 0; 1057 break; 1058 default: 1059 panic("%s: - invalid type (%ld)", __func__, type); 1060 break; 1061 } 1062 1063 filt_timerunlock(); 1064} 1065 1066static void 1067filt_timerlock(void) 1068{ 1069 lck_mtx_lock(&_filt_timerlock); 1070} 1071 1072static void 1073filt_timerunlock(void) 1074{ 1075 lck_mtx_unlock(&_filt_timerlock); 1076} 1077 1078static int 1079filt_userattach(struct knote *kn) 1080{ 1081 /* EVFILT_USER knotes are not attached to anything in the kernel */ 1082 kn->kn_hook = NULL; 1083 if (kn->kn_fflags & NOTE_TRIGGER) { 1084 kn->kn_hookid = 1; 1085 } else { 1086 kn->kn_hookid = 0; 1087 } 1088 return (0); 1089} 1090 1091static void 1092filt_userdetach(__unused struct knote *kn) 1093{ 1094 /* EVFILT_USER knotes are not attached to anything in the kernel */ 1095} 1096 1097static int 1098filt_user(struct knote *kn, __unused long hint) 1099{ 1100 return (kn->kn_hookid); 1101} 1102 1103static void 1104filt_usertouch(struct knote *kn, struct kevent64_s *kev, long type) 1105{ 1106 uint32_t ffctrl; 1107 switch (type) { 1108 case EVENT_REGISTER: 1109 if (kev->fflags & NOTE_TRIGGER) { 1110 kn->kn_hookid = 1; 1111 } 1112 1113 ffctrl = kev->fflags & NOTE_FFCTRLMASK; 1114 kev->fflags &= NOTE_FFLAGSMASK; 1115 switch (ffctrl) { 1116 case NOTE_FFNOP: 1117 break; 1118 case NOTE_FFAND: 1119 OSBitAndAtomic(kev->fflags, &kn->kn_sfflags); 1120 break; 1121 case NOTE_FFOR: 1122 OSBitOrAtomic(kev->fflags, &kn->kn_sfflags); 1123 break; 1124 case NOTE_FFCOPY: 1125 kn->kn_sfflags = kev->fflags; 1126 break; 1127 } 1128 kn->kn_sdata = kev->data; 1129 break; 1130 case EVENT_PROCESS: 1131 *kev = kn->kn_kevent; 1132 kev->fflags = (volatile UInt32)kn->kn_sfflags; 1133 kev->data = kn->kn_sdata; 1134 if (kn->kn_flags & EV_CLEAR) { 1135 kn->kn_hookid = 0; 1136 kn->kn_data = 0; 1137 kn->kn_fflags = 0; 1138 } 1139 break; 1140 default: 1141 panic("%s: - invalid type (%ld)", __func__, type); 1142 break; 1143 } 1144} 1145 1146/* 1147 * JMM - placeholder for not-yet-implemented filters 1148 */ 1149static int 1150filt_badattach(__unused struct knote *kn) 1151{ 1152 return (ENOTSUP); 1153} 1154 1155struct kqueue * 1156kqueue_alloc(struct proc *p) 1157{ 1158 struct filedesc *fdp = p->p_fd; 1159 struct kqueue *kq; 1160 1161 MALLOC_ZONE(kq, struct kqueue *, sizeof (struct kqueue), M_KQUEUE, 1162 M_WAITOK); 1163 if (kq != NULL) { 1164 wait_queue_set_t wqs; 1165 1166 wqs = wait_queue_set_alloc(SYNC_POLICY_FIFO | 1167 SYNC_POLICY_PREPOST); 1168 if (wqs != NULL) { 1169 bzero(kq, sizeof (struct kqueue)); 1170 lck_spin_init(&kq->kq_lock, kq_lck_grp, kq_lck_attr); 1171 TAILQ_INIT(&kq->kq_head); 1172 kq->kq_wqs = wqs; 1173 kq->kq_p = p; 1174 } else { 1175 FREE_ZONE(kq, sizeof (struct kqueue), M_KQUEUE); 1176 } 1177 } 1178 1179 if (fdp->fd_knlistsize < 0) { 1180 proc_fdlock(p); 1181 if (fdp->fd_knlistsize < 0) 1182 fdp->fd_knlistsize = 0; /* this process has had a kq */ 1183 proc_fdunlock(p); 1184 } 1185 1186 return (kq); 1187} 1188 1189/* 1190 * kqueue_dealloc - detach all knotes from a kqueue and free it 1191 * 1192 * We walk each list looking for knotes referencing this 1193 * this kqueue. If we find one, we try to drop it. But 1194 * if we fail to get a drop reference, that will wait 1195 * until it is dropped. So, we can just restart again 1196 * safe in the assumption that the list will eventually 1197 * not contain any more references to this kqueue (either 1198 * we dropped them all, or someone else did). 1199 * 1200 * Assumes no new events are being added to the kqueue. 1201 * Nothing locked on entry or exit. 1202 */ 1203void 1204kqueue_dealloc(struct kqueue *kq) 1205{ 1206 struct proc *p = kq->kq_p; 1207 struct filedesc *fdp = p->p_fd; 1208 struct knote *kn; 1209 int i; 1210 1211 proc_fdlock(p); 1212 for (i = 0; i < fdp->fd_knlistsize; i++) { 1213 kn = SLIST_FIRST(&fdp->fd_knlist[i]); 1214 while (kn != NULL) { 1215 if (kq == kn->kn_kq) { 1216 kqlock(kq); 1217 proc_fdunlock(p); 1218 /* drop it ourselves or wait */ 1219 if (kqlock2knotedrop(kq, kn)) { 1220 kn->kn_fop->f_detach(kn); 1221 knote_drop(kn, p); 1222 } 1223 proc_fdlock(p); 1224 /* start over at beginning of list */ 1225 kn = SLIST_FIRST(&fdp->fd_knlist[i]); 1226 continue; 1227 } 1228 kn = SLIST_NEXT(kn, kn_link); 1229 } 1230 } 1231 if (fdp->fd_knhashmask != 0) { 1232 for (i = 0; i < (int)fdp->fd_knhashmask + 1; i++) { 1233 kn = SLIST_FIRST(&fdp->fd_knhash[i]); 1234 while (kn != NULL) { 1235 if (kq == kn->kn_kq) { 1236 kqlock(kq); 1237 proc_fdunlock(p); 1238 /* drop it ourselves or wait */ 1239 if (kqlock2knotedrop(kq, kn)) { 1240 kn->kn_fop->f_detach(kn); 1241 knote_drop(kn, p); 1242 } 1243 proc_fdlock(p); 1244 /* start over at beginning of list */ 1245 kn = SLIST_FIRST(&fdp->fd_knhash[i]); 1246 continue; 1247 } 1248 kn = SLIST_NEXT(kn, kn_link); 1249 } 1250 } 1251 } 1252 proc_fdunlock(p); 1253 1254 /* 1255 * before freeing the wait queue set for this kqueue, 1256 * make sure it is unlinked from all its containing (select) sets. 1257 */ 1258 wait_queue_unlink_all((wait_queue_t)kq->kq_wqs); 1259 wait_queue_set_free(kq->kq_wqs); 1260 lck_spin_destroy(&kq->kq_lock, kq_lck_grp); 1261 FREE_ZONE(kq, sizeof (struct kqueue), M_KQUEUE); 1262} 1263 1264int 1265kqueue_body(struct proc *p, fp_allocfn_t fp_zalloc, void *cra, int32_t *retval) 1266{ 1267 struct kqueue *kq; 1268 struct fileproc *fp; 1269 int fd, error; 1270 1271 error = falloc_withalloc(p, 1272 &fp, &fd, vfs_context_current(), fp_zalloc, cra); 1273 if (error) { 1274 return (error); 1275 } 1276 1277 kq = kqueue_alloc(p); 1278 if (kq == NULL) { 1279 fp_free(p, fd, fp); 1280 return (ENOMEM); 1281 } 1282 1283 fp->f_flag = FREAD | FWRITE; 1284 fp->f_ops = &kqueueops; 1285 fp->f_data = kq; 1286 1287 proc_fdlock(p); 1288 *fdflags(p, fd) |= UF_EXCLOSE; 1289 procfdtbl_releasefd(p, fd, NULL); 1290 fp_drop(p, fd, fp, 1); 1291 proc_fdunlock(p); 1292 1293 *retval = fd; 1294 return (error); 1295} 1296 1297int 1298kqueue(struct proc *p, __unused struct kqueue_args *uap, int32_t *retval) 1299{ 1300 return (kqueue_body(p, fileproc_alloc_init, NULL, retval)); 1301} 1302 1303static int 1304kevent_copyin(user_addr_t *addrp, struct kevent64_s *kevp, struct proc *p, 1305 int iskev64) 1306{ 1307 int advance; 1308 int error; 1309 1310 if (iskev64) { 1311 advance = sizeof (struct kevent64_s); 1312 error = copyin(*addrp, (caddr_t)kevp, advance); 1313 } else if (IS_64BIT_PROCESS(p)) { 1314 struct user64_kevent kev64; 1315 bzero(kevp, sizeof (struct kevent64_s)); 1316 1317 advance = sizeof (kev64); 1318 error = copyin(*addrp, (caddr_t)&kev64, advance); 1319 if (error) 1320 return (error); 1321 kevp->ident = kev64.ident; 1322 kevp->filter = kev64.filter; 1323 kevp->flags = kev64.flags; 1324 kevp->fflags = kev64.fflags; 1325 kevp->data = kev64.data; 1326 kevp->udata = kev64.udata; 1327 } else { 1328 struct user32_kevent kev32; 1329 bzero(kevp, sizeof (struct kevent64_s)); 1330 1331 advance = sizeof (kev32); 1332 error = copyin(*addrp, (caddr_t)&kev32, advance); 1333 if (error) 1334 return (error); 1335 kevp->ident = (uintptr_t)kev32.ident; 1336 kevp->filter = kev32.filter; 1337 kevp->flags = kev32.flags; 1338 kevp->fflags = kev32.fflags; 1339 kevp->data = (intptr_t)kev32.data; 1340 kevp->udata = CAST_USER_ADDR_T(kev32.udata); 1341 } 1342 if (!error) 1343 *addrp += advance; 1344 return (error); 1345} 1346 1347static int 1348kevent_copyout(struct kevent64_s *kevp, user_addr_t *addrp, struct proc *p, 1349 int iskev64) 1350{ 1351 int advance; 1352 int error; 1353 1354 if (iskev64) { 1355 advance = sizeof (struct kevent64_s); 1356 error = copyout((caddr_t)kevp, *addrp, advance); 1357 } else if (IS_64BIT_PROCESS(p)) { 1358 struct user64_kevent kev64; 1359 1360 /* 1361 * deal with the special case of a user-supplied 1362 * value of (uintptr_t)-1. 1363 */ 1364 kev64.ident = (kevp->ident == (uintptr_t)-1) ? 1365 (uint64_t)-1LL : (uint64_t)kevp->ident; 1366 1367 kev64.filter = kevp->filter; 1368 kev64.flags = kevp->flags; 1369 kev64.fflags = kevp->fflags; 1370 kev64.data = (int64_t) kevp->data; 1371 kev64.udata = kevp->udata; 1372 advance = sizeof (kev64); 1373 error = copyout((caddr_t)&kev64, *addrp, advance); 1374 } else { 1375 struct user32_kevent kev32; 1376 1377 kev32.ident = (uint32_t)kevp->ident; 1378 kev32.filter = kevp->filter; 1379 kev32.flags = kevp->flags; 1380 kev32.fflags = kevp->fflags; 1381 kev32.data = (int32_t)kevp->data; 1382 kev32.udata = kevp->udata; 1383 advance = sizeof (kev32); 1384 error = copyout((caddr_t)&kev32, *addrp, advance); 1385 } 1386 if (!error) 1387 *addrp += advance; 1388 return (error); 1389} 1390 1391/* 1392 * kevent_continue - continue a kevent syscall after blocking 1393 * 1394 * assume we inherit a use count on the kq fileglob. 1395 */ 1396 1397static void 1398kevent_continue(__unused struct kqueue *kq, void *data, int error) 1399{ 1400 struct _kevent *cont_args; 1401 struct fileproc *fp; 1402 int32_t *retval; 1403 int noutputs; 1404 int fd; 1405 struct proc *p = current_proc(); 1406 1407 cont_args = (struct _kevent *)data; 1408 noutputs = cont_args->eventout; 1409 retval = cont_args->retval; 1410 fd = cont_args->fd; 1411 fp = cont_args->fp; 1412 1413 fp_drop(p, fd, fp, 0); 1414 1415 /* don't restart after signals... */ 1416 if (error == ERESTART) 1417 error = EINTR; 1418 else if (error == EWOULDBLOCK) 1419 error = 0; 1420 if (error == 0) 1421 *retval = noutputs; 1422 unix_syscall_return(error); 1423} 1424 1425/* 1426 * kevent - [syscall] register and wait for kernel events 1427 * 1428 */ 1429int 1430kevent(struct proc *p, struct kevent_args *uap, int32_t *retval) 1431{ 1432 return (kevent_internal(p, 1433 0, 1434 uap->changelist, 1435 uap->nchanges, 1436 uap->eventlist, 1437 uap->nevents, 1438 uap->fd, 1439 uap->timeout, 1440 0, /* no flags from old kevent() call */ 1441 retval)); 1442} 1443 1444int 1445kevent64(struct proc *p, struct kevent64_args *uap, int32_t *retval) 1446{ 1447 return (kevent_internal(p, 1448 1, 1449 uap->changelist, 1450 uap->nchanges, 1451 uap->eventlist, 1452 uap->nevents, 1453 uap->fd, 1454 uap->timeout, 1455 uap->flags, 1456 retval)); 1457} 1458 1459static int 1460kevent_internal(struct proc *p, int iskev64, user_addr_t changelist, 1461 int nchanges, user_addr_t ueventlist, int nevents, int fd, 1462 user_addr_t utimeout, __unused unsigned int flags, 1463 int32_t *retval) 1464{ 1465 struct _kevent *cont_args; 1466 uthread_t ut; 1467 struct kqueue *kq; 1468 struct fileproc *fp; 1469 struct kevent64_s kev; 1470 int error, noutputs; 1471 struct timeval atv; 1472 1473 /* convert timeout to absolute - if we have one */ 1474 if (utimeout != USER_ADDR_NULL) { 1475 struct timeval rtv; 1476 if (IS_64BIT_PROCESS(p)) { 1477 struct user64_timespec ts; 1478 error = copyin(utimeout, &ts, sizeof(ts)); 1479 if ((ts.tv_sec & 0xFFFFFFFF00000000ull) != 0) 1480 error = EINVAL; 1481 else 1482 TIMESPEC_TO_TIMEVAL(&rtv, &ts); 1483 } else { 1484 struct user32_timespec ts; 1485 error = copyin(utimeout, &ts, sizeof(ts)); 1486 TIMESPEC_TO_TIMEVAL(&rtv, &ts); 1487 } 1488 if (error) 1489 return (error); 1490 if (itimerfix(&rtv)) 1491 return (EINVAL); 1492 getmicrouptime(&atv); 1493 timevaladd(&atv, &rtv); 1494 } else { 1495 atv.tv_sec = 0; 1496 atv.tv_usec = 0; 1497 } 1498 1499 /* get a usecount for the kq itself */ 1500 if ((error = fp_getfkq(p, fd, &fp, &kq)) != 0) 1501 return (error); 1502 1503 /* each kq should only be used for events of one type */ 1504 kqlock(kq); 1505 if (kq->kq_state & (KQ_KEV32 | KQ_KEV64)) { 1506 if (((iskev64 && (kq->kq_state & KQ_KEV32)) || 1507 (!iskev64 && (kq->kq_state & KQ_KEV64)))) { 1508 error = EINVAL; 1509 kqunlock(kq); 1510 goto errorout; 1511 } 1512 } else { 1513 kq->kq_state |= (iskev64 ? KQ_KEV64 : KQ_KEV32); 1514 } 1515 kqunlock(kq); 1516 1517 /* register all the change requests the user provided... */ 1518 noutputs = 0; 1519 while (nchanges > 0 && error == 0) { 1520 error = kevent_copyin(&changelist, &kev, p, iskev64); 1521 if (error) 1522 break; 1523 1524 kev.flags &= ~EV_SYSFLAGS; 1525 error = kevent_register(kq, &kev, p); 1526 if ((error || (kev.flags & EV_RECEIPT)) && nevents > 0) { 1527 kev.flags = EV_ERROR; 1528 kev.data = error; 1529 error = kevent_copyout(&kev, &ueventlist, p, iskev64); 1530 if (error == 0) { 1531 nevents--; 1532 noutputs++; 1533 } 1534 } 1535 nchanges--; 1536 } 1537 1538 /* store the continuation/completion data in the uthread */ 1539 ut = (uthread_t)get_bsdthread_info(current_thread()); 1540 cont_args = &ut->uu_kevent.ss_kevent; 1541 cont_args->fp = fp; 1542 cont_args->fd = fd; 1543 cont_args->retval = retval; 1544 cont_args->eventlist = ueventlist; 1545 cont_args->eventcount = nevents; 1546 cont_args->eventout = noutputs; 1547 cont_args->eventsize = iskev64; 1548 1549 if (nevents > 0 && noutputs == 0 && error == 0) 1550 error = kqueue_scan(kq, kevent_callback, 1551 kevent_continue, cont_args, 1552 &atv, p); 1553 kevent_continue(kq, cont_args, error); 1554 1555errorout: 1556 fp_drop(p, fd, fp, 0); 1557 return (error); 1558} 1559 1560 1561/* 1562 * kevent_callback - callback for each individual event 1563 * 1564 * called with nothing locked 1565 * caller holds a reference on the kqueue 1566 */ 1567static int 1568kevent_callback(__unused struct kqueue *kq, struct kevent64_s *kevp, 1569 void *data) 1570{ 1571 struct _kevent *cont_args; 1572 int error; 1573 int iskev64; 1574 1575 cont_args = (struct _kevent *)data; 1576 assert(cont_args->eventout < cont_args->eventcount); 1577 1578 iskev64 = cont_args->eventsize; 1579 1580 /* 1581 * Copy out the appropriate amount of event data for this user. 1582 */ 1583 error = kevent_copyout(kevp, &cont_args->eventlist, current_proc(), 1584 iskev64); 1585 1586 /* 1587 * If there isn't space for additional events, return 1588 * a harmless error to stop the processing here 1589 */ 1590 if (error == 0 && ++cont_args->eventout == cont_args->eventcount) 1591 error = EWOULDBLOCK; 1592 return (error); 1593} 1594 1595/* 1596 * kevent_description - format a description of a kevent for diagnostic output 1597 * 1598 * called with a 128-byte string buffer 1599 */ 1600 1601char * 1602kevent_description(struct kevent64_s *kevp, char *s, size_t n) 1603{ 1604 snprintf(s, n, 1605 "kevent=" 1606 "{.ident=%#llx, .filter=%d, .flags=%#x, .fflags=%#x, .data=%#llx, .udata=%#llx, .ext[0]=%#llx, .ext[1]=%#llx}", 1607 kevp->ident, 1608 kevp->filter, 1609 kevp->flags, 1610 kevp->fflags, 1611 kevp->data, 1612 kevp->udata, 1613 kevp->ext[0], 1614 kevp->ext[1]); 1615 1616 return (s); 1617} 1618 1619/* 1620 * kevent_register - add a new event to a kqueue 1621 * 1622 * Creates a mapping between the event source and 1623 * the kqueue via a knote data structure. 1624 * 1625 * Because many/most the event sources are file 1626 * descriptor related, the knote is linked off 1627 * the filedescriptor table for quick access. 1628 * 1629 * called with nothing locked 1630 * caller holds a reference on the kqueue 1631 */ 1632 1633int 1634kevent_register(struct kqueue *kq, struct kevent64_s *kev, 1635 __unused struct proc *ctxp) 1636{ 1637 struct proc *p = kq->kq_p; 1638 struct filedesc *fdp = p->p_fd; 1639 struct filterops *fops; 1640 struct fileproc *fp = NULL; 1641 struct knote *kn = NULL; 1642 int error = 0; 1643 1644 if (kev->filter < 0) { 1645 if (kev->filter + EVFILT_SYSCOUNT < 0) 1646 return (EINVAL); 1647 fops = sysfilt_ops[~kev->filter]; /* to 0-base index */ 1648 } else { 1649 /* 1650 * XXX 1651 * filter attach routine is responsible for insuring that 1652 * the identifier can be attached to it. 1653 */ 1654 printf("unknown filter: %d\n", kev->filter); 1655 return (EINVAL); 1656 } 1657 1658restart: 1659 /* this iocount needs to be dropped if it is not registered */ 1660 proc_fdlock(p); 1661 if (fops->f_isfd && (error = fp_lookup(p, kev->ident, &fp, 1)) != 0) { 1662 proc_fdunlock(p); 1663 return (error); 1664 } 1665 1666 if (fops->f_isfd) { 1667 /* fd-based knotes are linked off the fd table */ 1668 if (kev->ident < (u_int)fdp->fd_knlistsize) { 1669 SLIST_FOREACH(kn, &fdp->fd_knlist[kev->ident], kn_link) 1670 if (kq == kn->kn_kq && 1671 kev->filter == kn->kn_filter) 1672 break; 1673 } 1674 } else { 1675 /* hash non-fd knotes here too */ 1676 if (fdp->fd_knhashmask != 0) { 1677 struct klist *list; 1678 1679 list = &fdp->fd_knhash[ 1680 KN_HASH((u_long)kev->ident, fdp->fd_knhashmask)]; 1681 SLIST_FOREACH(kn, list, kn_link) 1682 if (kev->ident == kn->kn_id && 1683 kq == kn->kn_kq && 1684 kev->filter == kn->kn_filter) 1685 break; 1686 } 1687 } 1688 1689 /* 1690 * kn now contains the matching knote, or NULL if no match 1691 */ 1692 if (kn == NULL) { 1693 if ((kev->flags & (EV_ADD|EV_DELETE)) == EV_ADD) { 1694 kn = knote_alloc(); 1695 if (kn == NULL) { 1696 proc_fdunlock(p); 1697 error = ENOMEM; 1698 goto done; 1699 } 1700 kn->kn_fp = fp; 1701 kn->kn_kq = kq; 1702 kn->kn_tq = &kq->kq_head; 1703 kn->kn_fop = fops; 1704 kn->kn_sfflags = kev->fflags; 1705 kn->kn_sdata = kev->data; 1706 kev->fflags = 0; 1707 kev->data = 0; 1708 kn->kn_kevent = *kev; 1709 kn->kn_inuse = 1; /* for f_attach() */ 1710 kn->kn_status = KN_ATTACHING; 1711 1712 /* before anyone can find it */ 1713 if (kev->flags & EV_DISABLE) 1714 kn->kn_status |= KN_DISABLED; 1715 1716 error = knote_fdpattach(kn, fdp, p); 1717 proc_fdunlock(p); 1718 1719 if (error) { 1720 knote_free(kn); 1721 goto done; 1722 } 1723 1724 /* 1725 * apply reference count to knote structure, and 1726 * do not release it at the end of this routine. 1727 */ 1728 fp = NULL; 1729 1730 error = fops->f_attach(kn); 1731 1732 kqlock(kq); 1733 1734 if (error != 0) { 1735 /* 1736 * Failed to attach correctly, so drop. 1737 * All other possible users/droppers 1738 * have deferred to us. 1739 */ 1740 kn->kn_status |= KN_DROPPING; 1741 kqunlock(kq); 1742 knote_drop(kn, p); 1743 goto done; 1744 } else if (kn->kn_status & KN_DROPPING) { 1745 /* 1746 * Attach succeeded, but someone else 1747 * deferred their drop - now we have 1748 * to do it for them (after detaching). 1749 */ 1750 kqunlock(kq); 1751 kn->kn_fop->f_detach(kn); 1752 knote_drop(kn, p); 1753 goto done; 1754 } 1755 kn->kn_status &= ~KN_ATTACHING; 1756 kqunlock(kq); 1757 } else { 1758 proc_fdunlock(p); 1759 error = ENOENT; 1760 goto done; 1761 } 1762 } else { 1763 /* existing knote - get kqueue lock */ 1764 kqlock(kq); 1765 proc_fdunlock(p); 1766 1767 if (kev->flags & EV_DELETE) { 1768 knote_dequeue(kn); 1769 kn->kn_status |= KN_DISABLED; 1770 if (kqlock2knotedrop(kq, kn)) { 1771 kn->kn_fop->f_detach(kn); 1772 knote_drop(kn, p); 1773 } 1774 goto done; 1775 } 1776 1777 /* update status flags for existing knote */ 1778 if (kev->flags & EV_DISABLE) { 1779 knote_dequeue(kn); 1780 kn->kn_status |= KN_DISABLED; 1781 } else if (kev->flags & EV_ENABLE) { 1782 kn->kn_status &= ~KN_DISABLED; 1783 if (kn->kn_status & KN_ACTIVE) 1784 knote_enqueue(kn); 1785 } 1786 1787 /* 1788 * The user may change some filter values after the 1789 * initial EV_ADD, but doing so will not reset any 1790 * filter which have already been triggered. 1791 */ 1792 kn->kn_kevent.udata = kev->udata; 1793 if (fops->f_isfd || fops->f_touch == NULL) { 1794 kn->kn_sfflags = kev->fflags; 1795 kn->kn_sdata = kev->data; 1796 } 1797 1798 /* 1799 * If somebody is in the middle of dropping this 1800 * knote - go find/insert a new one. But we have 1801 * wait for this one to go away first. Attaches 1802 * running in parallel may also drop/modify the 1803 * knote. Wait for those to complete as well and 1804 * then start over if we encounter one. 1805 */ 1806 if (!kqlock2knoteusewait(kq, kn)) { 1807 /* kqueue, proc_fdlock both unlocked */ 1808 goto restart; 1809 } 1810 1811 /* 1812 * Call touch routine to notify filter of changes 1813 * in filter values. 1814 */ 1815 if (!fops->f_isfd && fops->f_touch != NULL) 1816 fops->f_touch(kn, kev, EVENT_REGISTER); 1817 } 1818 /* still have use ref on knote */ 1819 1820 /* 1821 * If the knote is not marked to always stay enqueued, 1822 * invoke the filter routine to see if it should be 1823 * enqueued now. 1824 */ 1825 if ((kn->kn_status & KN_STAYQUEUED) == 0 && kn->kn_fop->f_event(kn, 0)) { 1826 if (knoteuse2kqlock(kq, kn)) 1827 knote_activate(kn, 1); 1828 kqunlock(kq); 1829 } else { 1830 knote_put(kn); 1831 } 1832 1833done: 1834 if (fp != NULL) 1835 fp_drop(p, kev->ident, fp, 0); 1836 return (error); 1837} 1838 1839 1840/* 1841 * knote_process - process a triggered event 1842 * 1843 * Validate that it is really still a triggered event 1844 * by calling the filter routines (if necessary). Hold 1845 * a use reference on the knote to avoid it being detached. 1846 * If it is still considered triggered, invoke the callback 1847 * routine provided and move it to the provided inprocess 1848 * queue. 1849 * 1850 * caller holds a reference on the kqueue. 1851 * kqueue locked on entry and exit - but may be dropped 1852 */ 1853static int 1854knote_process(struct knote *kn, 1855 kevent_callback_t callback, 1856 void *data, 1857 struct kqtailq *inprocessp, 1858 struct proc *p) 1859{ 1860 struct kqueue *kq = kn->kn_kq; 1861 struct kevent64_s kev; 1862 int touch; 1863 int result; 1864 int error; 1865 1866 /* 1867 * Determine the kevent state we want to return. 1868 * 1869 * Some event states need to be revalidated before returning 1870 * them, others we take the snapshot at the time the event 1871 * was enqueued. 1872 * 1873 * Events with non-NULL f_touch operations must be touched. 1874 * Triggered events must fill in kev for the callback. 1875 * 1876 * Convert our lock to a use-count and call the event's 1877 * filter routine(s) to update. 1878 */ 1879 if ((kn->kn_status & KN_DISABLED) != 0) { 1880 result = 0; 1881 touch = 0; 1882 } else { 1883 int revalidate; 1884 1885 result = 1; 1886 revalidate = ((kn->kn_status & KN_STAYQUEUED) != 0 || 1887 (kn->kn_flags & EV_ONESHOT) == 0); 1888 touch = (!kn->kn_fop->f_isfd && kn->kn_fop->f_touch != NULL); 1889 1890 if (revalidate || touch) { 1891 if (revalidate) 1892 knote_deactivate(kn); 1893 1894 /* call the filter/touch routines with just a ref */ 1895 if (kqlock2knoteuse(kq, kn)) { 1896 /* if we have to revalidate, call the filter */ 1897 if (revalidate) { 1898 result = kn->kn_fop->f_event(kn, 0); 1899 } 1900 1901 /* 1902 * capture the kevent data - using touch if 1903 * specified 1904 */ 1905 if (result && touch) { 1906 kn->kn_fop->f_touch(kn, &kev, 1907 EVENT_PROCESS); 1908 } 1909 1910 /* 1911 * convert back to a kqlock - bail if the knote 1912 * went away 1913 */ 1914 if (!knoteuse2kqlock(kq, kn)) { 1915 return (EJUSTRETURN); 1916 } else if (result) { 1917 /* 1918 * if revalidated as alive, make sure 1919 * it's active 1920 */ 1921 if (!(kn->kn_status & KN_ACTIVE)) { 1922 knote_activate(kn, 0); 1923 } 1924 1925 /* 1926 * capture all events that occurred 1927 * during filter 1928 */ 1929 if (!touch) { 1930 kev = kn->kn_kevent; 1931 } 1932 1933 } else if ((kn->kn_status & KN_STAYQUEUED) == 0) { 1934 /* 1935 * was already dequeued, so just bail on 1936 * this one 1937 */ 1938 return (EJUSTRETURN); 1939 } 1940 } else { 1941 return (EJUSTRETURN); 1942 } 1943 } else { 1944 kev = kn->kn_kevent; 1945 } 1946 } 1947 1948 /* move knote onto inprocess queue */ 1949 assert(kn->kn_tq == &kq->kq_head); 1950 TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe); 1951 kn->kn_tq = inprocessp; 1952 TAILQ_INSERT_TAIL(inprocessp, kn, kn_tqe); 1953 1954 /* 1955 * Determine how to dispatch the knote for future event handling. 1956 * not-fired: just return (do not callout). 1957 * One-shot: deactivate it. 1958 * Clear: deactivate and clear the state. 1959 * Dispatch: don't clear state, just deactivate it and mark it disabled. 1960 * All others: just leave where they are. 1961 */ 1962 1963 if (result == 0) { 1964 return (EJUSTRETURN); 1965 } else if ((kn->kn_flags & EV_ONESHOT) != 0) { 1966 knote_deactivate(kn); 1967 if (kqlock2knotedrop(kq, kn)) { 1968 kn->kn_fop->f_detach(kn); 1969 knote_drop(kn, p); 1970 } 1971 } else if ((kn->kn_flags & (EV_CLEAR | EV_DISPATCH)) != 0) { 1972 if ((kn->kn_flags & EV_DISPATCH) != 0) { 1973 /* deactivate and disable all dispatch knotes */ 1974 knote_deactivate(kn); 1975 kn->kn_status |= KN_DISABLED; 1976 } else if (!touch || kn->kn_fflags == 0) { 1977 /* only deactivate if nothing since the touch */ 1978 knote_deactivate(kn); 1979 } 1980 if (!touch && (kn->kn_flags & EV_CLEAR) != 0) { 1981 /* manually clear non-touch knotes */ 1982 kn->kn_data = 0; 1983 kn->kn_fflags = 0; 1984 } 1985 kqunlock(kq); 1986 } else { 1987 /* 1988 * leave on inprocess queue. We'll 1989 * move all the remaining ones back 1990 * the kq queue and wakeup any 1991 * waiters when we are done. 1992 */ 1993 kqunlock(kq); 1994 } 1995 1996 /* callback to handle each event as we find it */ 1997 error = (callback)(kq, &kev, data); 1998 1999 kqlock(kq); 2000 return (error); 2001} 2002 2003/* 2004 * Return 0 to indicate that processing should proceed, 2005 * -1 if there is nothing to process. 2006 * 2007 * Called with kqueue locked and returns the same way, 2008 * but may drop lock temporarily. 2009 */ 2010static int 2011kqueue_begin_processing(struct kqueue *kq) 2012{ 2013 for (;;) { 2014 if (kq->kq_count == 0) { 2015 return (-1); 2016 } 2017 2018 /* if someone else is processing the queue, wait */ 2019 if (kq->kq_nprocess != 0) { 2020 wait_queue_assert_wait((wait_queue_t)kq->kq_wqs, 2021 &kq->kq_nprocess, THREAD_UNINT, 0); 2022 kq->kq_state |= KQ_PROCWAIT; 2023 kqunlock(kq); 2024 thread_block(THREAD_CONTINUE_NULL); 2025 kqlock(kq); 2026 } else { 2027 kq->kq_nprocess = 1; 2028 return (0); 2029 } 2030 } 2031} 2032 2033/* 2034 * Called with kqueue lock held. 2035 */ 2036static void 2037kqueue_end_processing(struct kqueue *kq) 2038{ 2039 kq->kq_nprocess = 0; 2040 if (kq->kq_state & KQ_PROCWAIT) { 2041 kq->kq_state &= ~KQ_PROCWAIT; 2042 wait_queue_wakeup_all((wait_queue_t)kq->kq_wqs, 2043 &kq->kq_nprocess, THREAD_AWAKENED); 2044 } 2045} 2046 2047/* 2048 * kqueue_process - process the triggered events in a kqueue 2049 * 2050 * Walk the queued knotes and validate that they are 2051 * really still triggered events by calling the filter 2052 * routines (if necessary). Hold a use reference on 2053 * the knote to avoid it being detached. For each event 2054 * that is still considered triggered, invoke the 2055 * callback routine provided. 2056 * 2057 * caller holds a reference on the kqueue. 2058 * kqueue locked on entry and exit - but may be dropped 2059 * kqueue list locked (held for duration of call) 2060 */ 2061 2062static int 2063kqueue_process(struct kqueue *kq, 2064 kevent_callback_t callback, 2065 void *data, 2066 int *countp, 2067 struct proc *p) 2068{ 2069 struct kqtailq inprocess; 2070 struct knote *kn; 2071 int nevents; 2072 int error; 2073 2074 TAILQ_INIT(&inprocess); 2075 2076 if (kqueue_begin_processing(kq) == -1) { 2077 *countp = 0; 2078 /* Nothing to process */ 2079 return (0); 2080 } 2081 2082 /* 2083 * Clear any pre-posted status from previous runs, so we 2084 * only detect events that occur during this run. 2085 */ 2086 wait_queue_sub_clearrefs(kq->kq_wqs); 2087 2088 /* 2089 * loop through the enqueued knotes, processing each one and 2090 * revalidating those that need it. As they are processed, 2091 * they get moved to the inprocess queue (so the loop can end). 2092 */ 2093 error = 0; 2094 nevents = 0; 2095 2096 while (error == 0 && 2097 (kn = TAILQ_FIRST(&kq->kq_head)) != NULL) { 2098 error = knote_process(kn, callback, data, &inprocess, p); 2099 if (error == EJUSTRETURN) 2100 error = 0; 2101 else 2102 nevents++; 2103 } 2104 2105 /* 2106 * With the kqueue still locked, move any knotes 2107 * remaining on the inprocess queue back to the 2108 * kq's queue and wake up any waiters. 2109 */ 2110 while ((kn = TAILQ_FIRST(&inprocess)) != NULL) { 2111 assert(kn->kn_tq == &inprocess); 2112 TAILQ_REMOVE(&inprocess, kn, kn_tqe); 2113 kn->kn_tq = &kq->kq_head; 2114 TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe); 2115 } 2116 2117 kqueue_end_processing(kq); 2118 2119 *countp = nevents; 2120 return (error); 2121} 2122 2123 2124static void 2125kqueue_scan_continue(void *data, wait_result_t wait_result) 2126{ 2127 thread_t self = current_thread(); 2128 uthread_t ut = (uthread_t)get_bsdthread_info(self); 2129 struct _kqueue_scan * cont_args = &ut->uu_kevent.ss_kqueue_scan; 2130 struct kqueue *kq = (struct kqueue *)data; 2131 int error; 2132 int count; 2133 2134 /* convert the (previous) wait_result to a proper error */ 2135 switch (wait_result) { 2136 case THREAD_AWAKENED: 2137 kqlock(kq); 2138 error = kqueue_process(kq, cont_args->call, cont_args, &count, 2139 current_proc()); 2140 if (error == 0 && count == 0) { 2141 wait_queue_assert_wait((wait_queue_t)kq->kq_wqs, 2142 KQ_EVENT, THREAD_ABORTSAFE, cont_args->deadline); 2143 kq->kq_state |= KQ_SLEEP; 2144 kqunlock(kq); 2145 thread_block_parameter(kqueue_scan_continue, kq); 2146 /* NOTREACHED */ 2147 } 2148 kqunlock(kq); 2149 break; 2150 case THREAD_TIMED_OUT: 2151 error = EWOULDBLOCK; 2152 break; 2153 case THREAD_INTERRUPTED: 2154 error = EINTR; 2155 break; 2156 default: 2157 panic("%s: - invalid wait_result (%d)", __func__, 2158 wait_result); 2159 error = 0; 2160 } 2161 2162 /* call the continuation with the results */ 2163 assert(cont_args->cont != NULL); 2164 (cont_args->cont)(kq, cont_args->data, error); 2165} 2166 2167 2168/* 2169 * kqueue_scan - scan and wait for events in a kqueue 2170 * 2171 * Process the triggered events in a kqueue. 2172 * 2173 * If there are no events triggered arrange to 2174 * wait for them. If the caller provided a 2175 * continuation routine, then kevent_scan will 2176 * also. 2177 * 2178 * The callback routine must be valid. 2179 * The caller must hold a use-count reference on the kq. 2180 */ 2181 2182int 2183kqueue_scan(struct kqueue *kq, 2184 kevent_callback_t callback, 2185 kqueue_continue_t continuation, 2186 void *data, 2187 struct timeval *atvp, 2188 struct proc *p) 2189{ 2190 thread_continue_t cont = THREAD_CONTINUE_NULL; 2191 uint64_t deadline; 2192 int error; 2193 int first; 2194 2195 assert(callback != NULL); 2196 2197 first = 1; 2198 for (;;) { 2199 wait_result_t wait_result; 2200 int count; 2201 2202 /* 2203 * Make a pass through the kq to find events already 2204 * triggered. 2205 */ 2206 kqlock(kq); 2207 error = kqueue_process(kq, callback, data, &count, p); 2208 if (error || count) 2209 break; /* lock still held */ 2210 2211 /* looks like we have to consider blocking */ 2212 if (first) { 2213 first = 0; 2214 /* convert the timeout to a deadline once */ 2215 if (atvp->tv_sec || atvp->tv_usec) { 2216 uint64_t now; 2217 2218 clock_get_uptime(&now); 2219 nanoseconds_to_absolutetime((uint64_t)atvp->tv_sec * NSEC_PER_SEC + 2220 atvp->tv_usec * (long)NSEC_PER_USEC, 2221 &deadline); 2222 if (now >= deadline) { 2223 /* non-blocking call */ 2224 error = EWOULDBLOCK; 2225 break; /* lock still held */ 2226 } 2227 deadline -= now; 2228 clock_absolutetime_interval_to_deadline(deadline, &deadline); 2229 } else { 2230 deadline = 0; /* block forever */ 2231 } 2232 2233 if (continuation) { 2234 uthread_t ut = (uthread_t)get_bsdthread_info(current_thread()); 2235 struct _kqueue_scan *cont_args = &ut->uu_kevent.ss_kqueue_scan; 2236 2237 cont_args->call = callback; 2238 cont_args->cont = continuation; 2239 cont_args->deadline = deadline; 2240 cont_args->data = data; 2241 cont = kqueue_scan_continue; 2242 } 2243 } 2244 2245 /* go ahead and wait */ 2246 wait_queue_assert_wait_with_leeway((wait_queue_t)kq->kq_wqs, 2247 KQ_EVENT, THREAD_ABORTSAFE, TIMEOUT_URGENCY_USER_NORMAL, 2248 deadline, 0); 2249 kq->kq_state |= KQ_SLEEP; 2250 kqunlock(kq); 2251 wait_result = thread_block_parameter(cont, kq); 2252 /* NOTREACHED if (continuation != NULL) */ 2253 2254 switch (wait_result) { 2255 case THREAD_AWAKENED: 2256 continue; 2257 case THREAD_TIMED_OUT: 2258 return (EWOULDBLOCK); 2259 case THREAD_INTERRUPTED: 2260 return (EINTR); 2261 default: 2262 panic("%s: - bad wait_result (%d)", __func__, 2263 wait_result); 2264 error = 0; 2265 } 2266 } 2267 kqunlock(kq); 2268 return (error); 2269} 2270 2271 2272/* 2273 * XXX 2274 * This could be expanded to call kqueue_scan, if desired. 2275 */ 2276/*ARGSUSED*/ 2277static int 2278kqueue_read(__unused struct fileproc *fp, 2279 __unused struct uio *uio, 2280 __unused int flags, 2281 __unused vfs_context_t ctx) 2282{ 2283 return (ENXIO); 2284} 2285 2286/*ARGSUSED*/ 2287static int 2288kqueue_write(__unused struct fileproc *fp, 2289 __unused struct uio *uio, 2290 __unused int flags, 2291 __unused vfs_context_t ctx) 2292{ 2293 return (ENXIO); 2294} 2295 2296/*ARGSUSED*/ 2297static int 2298kqueue_ioctl(__unused struct fileproc *fp, 2299 __unused u_long com, 2300 __unused caddr_t data, 2301 __unused vfs_context_t ctx) 2302{ 2303 return (ENOTTY); 2304} 2305 2306/*ARGSUSED*/ 2307static int 2308kqueue_select(struct fileproc *fp, int which, void *wql, 2309 __unused vfs_context_t ctx) 2310{ 2311 struct kqueue *kq = (struct kqueue *)fp->f_data; 2312 struct knote *kn; 2313 struct kqtailq inprocessq; 2314 int retnum = 0; 2315 2316 if (which != FREAD) 2317 return (0); 2318 2319 TAILQ_INIT(&inprocessq); 2320 2321 kqlock(kq); 2322 /* 2323 * If this is the first pass, link the wait queue associated with the 2324 * the kqueue onto the wait queue set for the select(). Normally we 2325 * use selrecord() for this, but it uses the wait queue within the 2326 * selinfo structure and we need to use the main one for the kqueue to 2327 * catch events from KN_STAYQUEUED sources. So we do the linkage manually. 2328 * (The select() call will unlink them when it ends). 2329 */ 2330 if (wql != NULL) { 2331 thread_t cur_act = current_thread(); 2332 struct uthread * ut = get_bsdthread_info(cur_act); 2333 2334 kq->kq_state |= KQ_SEL; 2335 wait_queue_link_noalloc((wait_queue_t)kq->kq_wqs, ut->uu_wqset, 2336 (wait_queue_link_t)wql); 2337 } 2338 2339 if (kqueue_begin_processing(kq) == -1) { 2340 kqunlock(kq); 2341 return (0); 2342 } 2343 2344 if (kq->kq_count != 0) { 2345 /* 2346 * there is something queued - but it might be a 2347 * KN_STAYQUEUED knote, which may or may not have 2348 * any events pending. So, we have to walk the 2349 * list of knotes to see, and peek at the stay- 2350 * queued ones to be really sure. 2351 */ 2352 while ((kn = (struct knote *)TAILQ_FIRST(&kq->kq_head)) != NULL) { 2353 if ((kn->kn_status & KN_STAYQUEUED) == 0) { 2354 retnum = 1; 2355 goto out; 2356 } 2357 2358 TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe); 2359 TAILQ_INSERT_TAIL(&inprocessq, kn, kn_tqe); 2360 2361 if (kqlock2knoteuse(kq, kn)) { 2362 unsigned peek; 2363 2364 peek = kn->kn_fop->f_peek(kn); 2365 if (knoteuse2kqlock(kq, kn)) { 2366 if (peek > 0) { 2367 retnum = 1; 2368 goto out; 2369 } 2370 } else { 2371 retnum = 0; 2372 } 2373 } 2374 } 2375 } 2376 2377out: 2378 /* Return knotes to active queue */ 2379 while ((kn = TAILQ_FIRST(&inprocessq)) != NULL) { 2380 TAILQ_REMOVE(&inprocessq, kn, kn_tqe); 2381 kn->kn_tq = &kq->kq_head; 2382 TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe); 2383 } 2384 2385 kqueue_end_processing(kq); 2386 kqunlock(kq); 2387 return (retnum); 2388} 2389 2390/* 2391 * kqueue_close - 2392 */ 2393/*ARGSUSED*/ 2394static int 2395kqueue_close(struct fileglob *fg, __unused vfs_context_t ctx) 2396{ 2397 struct kqueue *kq = (struct kqueue *)fg->fg_data; 2398 2399 kqueue_dealloc(kq); 2400 fg->fg_data = NULL; 2401 return (0); 2402} 2403 2404/*ARGSUSED*/ 2405/* 2406 * The callers has taken a use-count reference on this kqueue and will donate it 2407 * to the kqueue we are being added to. This keeps the kqueue from closing until 2408 * that relationship is torn down. 2409 */ 2410static int 2411kqueue_kqfilter(__unused struct fileproc *fp, struct knote *kn, __unused vfs_context_t ctx) 2412{ 2413 struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data; 2414 struct kqueue *parentkq = kn->kn_kq; 2415 2416 if (parentkq == kq || 2417 kn->kn_filter != EVFILT_READ) 2418 return (1); 2419 2420 /* 2421 * We have to avoid creating a cycle when nesting kqueues 2422 * inside another. Rather than trying to walk the whole 2423 * potential DAG of nested kqueues, we just use a simple 2424 * ceiling protocol. When a kqueue is inserted into another, 2425 * we check that the (future) parent is not already nested 2426 * into another kqueue at a lower level than the potenial 2427 * child (because it could indicate a cycle). If that test 2428 * passes, we just mark the nesting levels accordingly. 2429 */ 2430 2431 kqlock(parentkq); 2432 if (parentkq->kq_level > 0 && 2433 parentkq->kq_level < kq->kq_level) 2434 { 2435 kqunlock(parentkq); 2436 return (1); 2437 } else { 2438 /* set parent level appropriately */ 2439 if (parentkq->kq_level == 0) 2440 parentkq->kq_level = 2; 2441 if (parentkq->kq_level < kq->kq_level + 1) 2442 parentkq->kq_level = kq->kq_level + 1; 2443 kqunlock(parentkq); 2444 2445 kn->kn_fop = &kqread_filtops; 2446 kqlock(kq); 2447 KNOTE_ATTACH(&kq->kq_sel.si_note, kn); 2448 /* indicate nesting in child, if needed */ 2449 if (kq->kq_level == 0) 2450 kq->kq_level = 1; 2451 kqunlock(kq); 2452 return (0); 2453 } 2454} 2455 2456/* 2457 * kqueue_drain - called when kq is closed 2458 */ 2459/*ARGSUSED*/ 2460static int 2461kqueue_drain(struct fileproc *fp, __unused vfs_context_t ctx) 2462{ 2463 struct kqueue *kq = (struct kqueue *)fp->f_fglob->fg_data; 2464 kqlock(kq); 2465 kqueue_wakeup(kq, 1); 2466 kqunlock(kq); 2467 return (0); 2468} 2469 2470/*ARGSUSED*/ 2471int 2472kqueue_stat(struct fileproc *fp, void *ub, int isstat64, __unused vfs_context_t ctx) 2473{ 2474 2475 struct kqueue *kq = (struct kqueue *)fp->f_data; 2476 if (isstat64 != 0) { 2477 struct stat64 *sb64 = (struct stat64 *)ub; 2478 2479 bzero((void *)sb64, sizeof(*sb64)); 2480 sb64->st_size = kq->kq_count; 2481 if (kq->kq_state & KQ_KEV64) 2482 sb64->st_blksize = sizeof(struct kevent64_s); 2483 else 2484 sb64->st_blksize = sizeof(struct kevent); 2485 sb64->st_mode = S_IFIFO; 2486 } else { 2487 struct stat *sb = (struct stat *)ub; 2488 2489 bzero((void *)sb, sizeof(*sb)); 2490 sb->st_size = kq->kq_count; 2491 if (kq->kq_state & KQ_KEV64) 2492 sb->st_blksize = sizeof(struct kevent64_s); 2493 else 2494 sb->st_blksize = sizeof(struct kevent); 2495 sb->st_mode = S_IFIFO; 2496 } 2497 2498 return (0); 2499} 2500 2501/* 2502 * Called with the kqueue locked 2503 */ 2504static void 2505kqueue_wakeup(struct kqueue *kq, int closed) 2506{ 2507 if ((kq->kq_state & (KQ_SLEEP | KQ_SEL)) != 0 || kq->kq_nprocess > 0) { 2508 kq->kq_state &= ~(KQ_SLEEP | KQ_SEL); 2509 wait_queue_wakeup_all((wait_queue_t)kq->kq_wqs, KQ_EVENT, 2510 (closed) ? THREAD_INTERRUPTED : THREAD_AWAKENED); 2511 } 2512} 2513 2514void 2515klist_init(struct klist *list) 2516{ 2517 SLIST_INIT(list); 2518} 2519 2520 2521/* 2522 * Query/Post each knote in the object's list 2523 * 2524 * The object lock protects the list. It is assumed 2525 * that the filter/event routine for the object can 2526 * determine that the object is already locked (via 2527 * the hint) and not deadlock itself. 2528 * 2529 * The object lock should also hold off pending 2530 * detach/drop operations. But we'll prevent it here 2531 * too - just in case. 2532 */ 2533void 2534knote(struct klist *list, long hint) 2535{ 2536 struct knote *kn; 2537 2538 SLIST_FOREACH(kn, list, kn_selnext) { 2539 struct kqueue *kq = kn->kn_kq; 2540 2541 kqlock(kq); 2542 if (kqlock2knoteuse(kq, kn)) { 2543 int result; 2544 2545 /* call the event with only a use count */ 2546 result = kn->kn_fop->f_event(kn, hint); 2547 2548 /* if its not going away and triggered */ 2549 if (knoteuse2kqlock(kq, kn) && result) 2550 knote_activate(kn, 1); 2551 /* lock held again */ 2552 } 2553 kqunlock(kq); 2554 } 2555} 2556 2557/* 2558 * attach a knote to the specified list. Return true if this is the first entry. 2559 * The list is protected by whatever lock the object it is associated with uses. 2560 */ 2561int 2562knote_attach(struct klist *list, struct knote *kn) 2563{ 2564 int ret = SLIST_EMPTY(list); 2565 SLIST_INSERT_HEAD(list, kn, kn_selnext); 2566 return (ret); 2567} 2568 2569/* 2570 * detach a knote from the specified list. Return true if that was the last entry. 2571 * The list is protected by whatever lock the object it is associated with uses. 2572 */ 2573int 2574knote_detach(struct klist *list, struct knote *kn) 2575{ 2576 SLIST_REMOVE(list, kn, knote, kn_selnext); 2577 return (SLIST_EMPTY(list)); 2578} 2579 2580/* 2581 * For a given knote, link a provided wait queue directly with the kqueue. 2582 * Wakeups will happen via recursive wait queue support. But nothing will move 2583 * the knote to the active list at wakeup (nothing calls knote()). Instead, 2584 * we permanently enqueue them here. 2585 * 2586 * kqueue and knote references are held by caller. 2587 * 2588 * caller provides the wait queue link structure. 2589 */ 2590int 2591knote_link_wait_queue(struct knote *kn, struct wait_queue *wq, wait_queue_link_t wql) 2592{ 2593 struct kqueue *kq = kn->kn_kq; 2594 kern_return_t kr; 2595 2596 kr = wait_queue_link_noalloc(wq, kq->kq_wqs, wql); 2597 if (kr == KERN_SUCCESS) { 2598 knote_markstayqueued(kn); 2599 return (0); 2600 } else { 2601 return (EINVAL); 2602 } 2603} 2604 2605/* 2606 * Unlink the provided wait queue from the kqueue associated with a knote. 2607 * Also remove it from the magic list of directly attached knotes. 2608 * 2609 * Note that the unlink may have already happened from the other side, so 2610 * ignore any failures to unlink and just remove it from the kqueue list. 2611 * 2612 * On success, caller is responsible for the link structure 2613 */ 2614int 2615knote_unlink_wait_queue(struct knote *kn, struct wait_queue *wq, wait_queue_link_t *wqlp) 2616{ 2617 struct kqueue *kq = kn->kn_kq; 2618 kern_return_t kr; 2619 2620 kr = wait_queue_unlink_nofree(wq, kq->kq_wqs, wqlp); 2621 kqlock(kq); 2622 kn->kn_status &= ~KN_STAYQUEUED; 2623 knote_dequeue(kn); 2624 kqunlock(kq); 2625 return ((kr != KERN_SUCCESS) ? EINVAL : 0); 2626} 2627 2628/* 2629 * remove all knotes referencing a specified fd 2630 * 2631 * Essentially an inlined knote_remove & knote_drop 2632 * when we know for sure that the thing is a file 2633 * 2634 * Entered with the proc_fd lock already held. 2635 * It returns the same way, but may drop it temporarily. 2636 */ 2637void 2638knote_fdclose(struct proc *p, int fd) 2639{ 2640 struct filedesc *fdp = p->p_fd; 2641 struct klist *list; 2642 struct knote *kn; 2643 2644 list = &fdp->fd_knlist[fd]; 2645 while ((kn = SLIST_FIRST(list)) != NULL) { 2646 struct kqueue *kq = kn->kn_kq; 2647 2648 if (kq->kq_p != p) 2649 panic("%s: proc mismatch (kq->kq_p=%p != p=%p)", 2650 __func__, kq->kq_p, p); 2651 2652 kqlock(kq); 2653 proc_fdunlock(p); 2654 2655 /* 2656 * Convert the lock to a drop ref. 2657 * If we get it, go ahead and drop it. 2658 * Otherwise, we waited for it to 2659 * be dropped by the other guy, so 2660 * it is safe to move on in the list. 2661 */ 2662 if (kqlock2knotedrop(kq, kn)) { 2663 kn->kn_fop->f_detach(kn); 2664 knote_drop(kn, p); 2665 } 2666 2667 proc_fdlock(p); 2668 2669 /* the fd tables may have changed - start over */ 2670 list = &fdp->fd_knlist[fd]; 2671 } 2672} 2673 2674/* proc_fdlock held on entry (and exit) */ 2675static int 2676knote_fdpattach(struct knote *kn, struct filedesc *fdp, struct proc *p) 2677{ 2678 struct klist *list = NULL; 2679 2680 if (! kn->kn_fop->f_isfd) { 2681 if (fdp->fd_knhashmask == 0) 2682 fdp->fd_knhash = hashinit(CONFIG_KN_HASHSIZE, M_KQUEUE, 2683 &fdp->fd_knhashmask); 2684 list = &fdp->fd_knhash[KN_HASH(kn->kn_id, fdp->fd_knhashmask)]; 2685 } else { 2686 if ((u_int)fdp->fd_knlistsize <= kn->kn_id) { 2687 u_int size = 0; 2688 2689 if (kn->kn_id >= (uint64_t)p->p_rlimit[RLIMIT_NOFILE].rlim_cur 2690 || kn->kn_id >= (uint64_t)maxfiles) 2691 return (EINVAL); 2692 2693 /* have to grow the fd_knlist */ 2694 size = fdp->fd_knlistsize; 2695 while (size <= kn->kn_id) 2696 size += KQEXTENT; 2697 2698 if (size >= (UINT_MAX/sizeof(struct klist *))) 2699 return (EINVAL); 2700 2701 MALLOC(list, struct klist *, 2702 size * sizeof(struct klist *), M_KQUEUE, M_WAITOK); 2703 if (list == NULL) 2704 return (ENOMEM); 2705 2706 bcopy((caddr_t)fdp->fd_knlist, (caddr_t)list, 2707 fdp->fd_knlistsize * sizeof(struct klist *)); 2708 bzero((caddr_t)list + 2709 fdp->fd_knlistsize * sizeof(struct klist *), 2710 (size - fdp->fd_knlistsize) * sizeof(struct klist *)); 2711 FREE(fdp->fd_knlist, M_KQUEUE); 2712 fdp->fd_knlist = list; 2713 fdp->fd_knlistsize = size; 2714 } 2715 list = &fdp->fd_knlist[kn->kn_id]; 2716 } 2717 SLIST_INSERT_HEAD(list, kn, kn_link); 2718 return (0); 2719} 2720 2721 2722 2723/* 2724 * should be called at spl == 0, since we don't want to hold spl 2725 * while calling fdrop and free. 2726 */ 2727static void 2728knote_drop(struct knote *kn, __unused struct proc *ctxp) 2729{ 2730 struct kqueue *kq = kn->kn_kq; 2731 struct proc *p = kq->kq_p; 2732 struct filedesc *fdp = p->p_fd; 2733 struct klist *list; 2734 int needswakeup; 2735 2736 proc_fdlock(p); 2737 if (kn->kn_fop->f_isfd) 2738 list = &fdp->fd_knlist[kn->kn_id]; 2739 else 2740 list = &fdp->fd_knhash[KN_HASH(kn->kn_id, fdp->fd_knhashmask)]; 2741 2742 SLIST_REMOVE(list, kn, knote, kn_link); 2743 kqlock(kq); 2744 knote_dequeue(kn); 2745 needswakeup = (kn->kn_status & KN_USEWAIT); 2746 kqunlock(kq); 2747 proc_fdunlock(p); 2748 2749 if (needswakeup) 2750 wait_queue_wakeup_all((wait_queue_t)kq->kq_wqs, &kn->kn_status, 2751 THREAD_AWAKENED); 2752 2753 if (kn->kn_fop->f_isfd) 2754 fp_drop(p, kn->kn_id, kn->kn_fp, 0); 2755 2756 knote_free(kn); 2757} 2758 2759/* called with kqueue lock held */ 2760static void 2761knote_activate(struct knote *kn, int propagate) 2762{ 2763 struct kqueue *kq = kn->kn_kq; 2764 2765 kn->kn_status |= KN_ACTIVE; 2766 knote_enqueue(kn); 2767 kqueue_wakeup(kq, 0); 2768 2769 /* this is a real event: wake up the parent kq, too */ 2770 if (propagate) 2771 KNOTE(&kq->kq_sel.si_note, 0); 2772} 2773 2774/* called with kqueue lock held */ 2775static void 2776knote_deactivate(struct knote *kn) 2777{ 2778 kn->kn_status &= ~KN_ACTIVE; 2779 knote_dequeue(kn); 2780} 2781 2782/* called with kqueue lock held */ 2783static void 2784knote_enqueue(struct knote *kn) 2785{ 2786 if ((kn->kn_status & (KN_QUEUED | KN_STAYQUEUED)) == KN_STAYQUEUED || 2787 (kn->kn_status & (KN_QUEUED | KN_STAYQUEUED | KN_DISABLED)) == 0) { 2788 struct kqtailq *tq = kn->kn_tq; 2789 struct kqueue *kq = kn->kn_kq; 2790 2791 TAILQ_INSERT_TAIL(tq, kn, kn_tqe); 2792 kn->kn_status |= KN_QUEUED; 2793 kq->kq_count++; 2794 } 2795} 2796 2797/* called with kqueue lock held */ 2798static void 2799knote_dequeue(struct knote *kn) 2800{ 2801 struct kqueue *kq = kn->kn_kq; 2802 2803 if ((kn->kn_status & (KN_QUEUED | KN_STAYQUEUED)) == KN_QUEUED) { 2804 struct kqtailq *tq = kn->kn_tq; 2805 2806 TAILQ_REMOVE(tq, kn, kn_tqe); 2807 kn->kn_tq = &kq->kq_head; 2808 kn->kn_status &= ~KN_QUEUED; 2809 kq->kq_count--; 2810 } 2811} 2812 2813void 2814knote_init(void) 2815{ 2816 knote_zone = zinit(sizeof(struct knote), 8192*sizeof(struct knote), 2817 8192, "knote zone"); 2818 2819 /* allocate kq lock group attribute and group */ 2820 kq_lck_grp_attr = lck_grp_attr_alloc_init(); 2821 2822 kq_lck_grp = lck_grp_alloc_init("kqueue", kq_lck_grp_attr); 2823 2824 /* Allocate kq lock attribute */ 2825 kq_lck_attr = lck_attr_alloc_init(); 2826 2827 /* Initialize the timer filter lock */ 2828 lck_mtx_init(&_filt_timerlock, kq_lck_grp, kq_lck_attr); 2829 2830#if VM_PRESSURE_EVENTS 2831 /* Initialize the vm pressure list lock */ 2832 vm_pressure_init(kq_lck_grp, kq_lck_attr); 2833#endif 2834 2835#if CONFIG_MEMORYSTATUS 2836 /* Initialize the memorystatus list lock */ 2837 memorystatus_kevent_init(kq_lck_grp, kq_lck_attr); 2838#endif 2839} 2840SYSINIT(knote, SI_SUB_PSEUDO, SI_ORDER_ANY, knote_init, NULL) 2841 2842static struct knote * 2843knote_alloc(void) 2844{ 2845 return ((struct knote *)zalloc(knote_zone)); 2846} 2847 2848static void 2849knote_free(struct knote *kn) 2850{ 2851 zfree(knote_zone, kn); 2852} 2853 2854#if SOCKETS 2855#include <sys/param.h> 2856#include <sys/socket.h> 2857#include <sys/protosw.h> 2858#include <sys/domain.h> 2859#include <sys/mbuf.h> 2860#include <sys/kern_event.h> 2861#include <sys/malloc.h> 2862#include <sys/sys_domain.h> 2863#include <sys/syslog.h> 2864 2865static lck_grp_attr_t *kev_lck_grp_attr; 2866static lck_attr_t *kev_lck_attr; 2867static lck_grp_t *kev_lck_grp; 2868static decl_lck_rw_data(,kev_lck_data); 2869static lck_rw_t *kev_rwlock = &kev_lck_data; 2870 2871static int kev_attach(struct socket *so, int proto, struct proc *p); 2872static int kev_detach(struct socket *so); 2873static int kev_control(struct socket *so, u_long cmd, caddr_t data, 2874 struct ifnet *ifp, struct proc *p); 2875static lck_mtx_t * event_getlock(struct socket *, int); 2876static int event_lock(struct socket *, int, void *); 2877static int event_unlock(struct socket *, int, void *); 2878 2879static int event_sofreelastref(struct socket *); 2880static void kev_delete(struct kern_event_pcb *); 2881 2882static struct pr_usrreqs event_usrreqs = { 2883 .pru_attach = kev_attach, 2884 .pru_control = kev_control, 2885 .pru_detach = kev_detach, 2886 .pru_soreceive = soreceive, 2887}; 2888 2889static struct protosw eventsw[] = { 2890{ 2891 .pr_type = SOCK_RAW, 2892 .pr_protocol = SYSPROTO_EVENT, 2893 .pr_flags = PR_ATOMIC, 2894 .pr_usrreqs = &event_usrreqs, 2895 .pr_lock = event_lock, 2896 .pr_unlock = event_unlock, 2897 .pr_getlock = event_getlock, 2898} 2899}; 2900 2901static lck_mtx_t * 2902event_getlock(struct socket *so, int locktype) 2903{ 2904#pragma unused(locktype) 2905 struct kern_event_pcb *ev_pcb = (struct kern_event_pcb *)so->so_pcb; 2906 2907 if (so->so_pcb != NULL) { 2908 if (so->so_usecount < 0) 2909 panic("%s: so=%p usecount=%d lrh= %s\n", __func__, 2910 so, so->so_usecount, solockhistory_nr(so)); 2911 /* NOTREACHED */ 2912 } else { 2913 panic("%s: so=%p NULL NO so_pcb %s\n", __func__, 2914 so, solockhistory_nr(so)); 2915 /* NOTREACHED */ 2916 } 2917 return (&ev_pcb->evp_mtx); 2918} 2919 2920static int 2921event_lock(struct socket *so, int refcount, void *lr) 2922{ 2923 void *lr_saved; 2924 2925 if (lr == NULL) 2926 lr_saved = __builtin_return_address(0); 2927 else 2928 lr_saved = lr; 2929 2930 if (so->so_pcb != NULL) { 2931 lck_mtx_lock(&((struct kern_event_pcb *)so->so_pcb)->evp_mtx); 2932 } else { 2933 panic("%s: so=%p NO PCB! lr=%p lrh= %s\n", __func__, 2934 so, lr_saved, solockhistory_nr(so)); 2935 /* NOTREACHED */ 2936 } 2937 2938 if (so->so_usecount < 0) { 2939 panic("%s: so=%p so_pcb=%p lr=%p ref=%d lrh= %s\n", __func__, 2940 so, so->so_pcb, lr_saved, so->so_usecount, 2941 solockhistory_nr(so)); 2942 /* NOTREACHED */ 2943 } 2944 2945 if (refcount) 2946 so->so_usecount++; 2947 2948 so->lock_lr[so->next_lock_lr] = lr_saved; 2949 so->next_lock_lr = (so->next_lock_lr+1) % SO_LCKDBG_MAX; 2950 return (0); 2951} 2952 2953static int 2954event_unlock(struct socket *so, int refcount, void *lr) 2955{ 2956 void *lr_saved; 2957 lck_mtx_t *mutex_held; 2958 2959 if (lr == NULL) 2960 lr_saved = __builtin_return_address(0); 2961 else 2962 lr_saved = lr; 2963 2964 if (refcount) 2965 so->so_usecount--; 2966 2967 if (so->so_usecount < 0) { 2968 panic("%s: so=%p usecount=%d lrh= %s\n", __func__, 2969 so, so->so_usecount, solockhistory_nr(so)); 2970 /* NOTREACHED */ 2971 } 2972 if (so->so_pcb == NULL) { 2973 panic("%s: so=%p NO PCB usecount=%d lr=%p lrh= %s\n", __func__, 2974 so, so->so_usecount, (void *)lr_saved, 2975 solockhistory_nr(so)); 2976 /* NOTREACHED */ 2977 } 2978 mutex_held = (&((struct kern_event_pcb *)so->so_pcb)->evp_mtx); 2979 2980 lck_mtx_assert(mutex_held, LCK_MTX_ASSERT_OWNED); 2981 so->unlock_lr[so->next_unlock_lr] = lr_saved; 2982 so->next_unlock_lr = (so->next_unlock_lr+1) % SO_LCKDBG_MAX; 2983 2984 if (so->so_usecount == 0) { 2985 VERIFY(so->so_flags & SOF_PCBCLEARING); 2986 event_sofreelastref(so); 2987 } else { 2988 lck_mtx_unlock(mutex_held); 2989 } 2990 2991 return (0); 2992} 2993 2994static int 2995event_sofreelastref(struct socket *so) 2996{ 2997 struct kern_event_pcb *ev_pcb = (struct kern_event_pcb *)so->so_pcb; 2998 2999 lck_mtx_assert(&(ev_pcb->evp_mtx), LCK_MTX_ASSERT_OWNED); 3000 3001 so->so_pcb = NULL; 3002 3003 /* 3004 * Disable upcall in the event another thread is in kev_post_msg() 3005 * appending record to the receive socket buffer, since sbwakeup() 3006 * may release the socket lock otherwise. 3007 */ 3008 so->so_rcv.sb_flags &= ~SB_UPCALL; 3009 so->so_snd.sb_flags &= ~SB_UPCALL; 3010 so->so_event = NULL; 3011 lck_mtx_unlock(&(ev_pcb->evp_mtx)); 3012 3013 lck_mtx_assert(&(ev_pcb->evp_mtx), LCK_MTX_ASSERT_NOTOWNED); 3014 lck_rw_lock_exclusive(kev_rwlock); 3015 LIST_REMOVE(ev_pcb, evp_link); 3016 lck_rw_done(kev_rwlock); 3017 kev_delete(ev_pcb); 3018 3019 sofreelastref(so, 1); 3020 return (0); 3021} 3022 3023static int event_proto_count = (sizeof (eventsw) / sizeof (struct protosw)); 3024 3025static 3026struct kern_event_head kern_event_head; 3027 3028static u_int32_t static_event_id = 0; 3029 3030#define EVPCB_ZONE_MAX 65536 3031#define EVPCB_ZONE_NAME "kerneventpcb" 3032static struct zone *ev_pcb_zone; 3033 3034/* 3035 * Install the protosw's for the NKE manager. Invoked at extension load time 3036 */ 3037void 3038kern_event_init(struct domain *dp) 3039{ 3040 struct protosw *pr; 3041 int i; 3042 3043 VERIFY(!(dp->dom_flags & DOM_INITIALIZED)); 3044 VERIFY(dp == systemdomain); 3045 3046 kev_lck_grp_attr = lck_grp_attr_alloc_init(); 3047 if (kev_lck_grp_attr == NULL) { 3048 panic("%s: lck_grp_attr_alloc_init failed\n", __func__); 3049 /* NOTREACHED */ 3050 } 3051 3052 kev_lck_grp = lck_grp_alloc_init("Kernel Event Protocol", 3053 kev_lck_grp_attr); 3054 if (kev_lck_grp == NULL) { 3055 panic("%s: lck_grp_alloc_init failed\n", __func__); 3056 /* NOTREACHED */ 3057 } 3058 3059 kev_lck_attr = lck_attr_alloc_init(); 3060 if (kev_lck_attr == NULL) { 3061 panic("%s: lck_attr_alloc_init failed\n", __func__); 3062 /* NOTREACHED */ 3063 } 3064 3065 lck_rw_init(kev_rwlock, kev_lck_grp, kev_lck_attr); 3066 if (kev_rwlock == NULL) { 3067 panic("%s: lck_mtx_alloc_init failed\n", __func__); 3068 /* NOTREACHED */ 3069 } 3070 3071 for (i = 0, pr = &eventsw[0]; i < event_proto_count; i++, pr++) 3072 net_add_proto(pr, dp, 1); 3073 3074 ev_pcb_zone = zinit(sizeof(struct kern_event_pcb), 3075 EVPCB_ZONE_MAX * sizeof(struct kern_event_pcb), 0, EVPCB_ZONE_NAME); 3076 if (ev_pcb_zone == NULL) { 3077 panic("%s: failed allocating ev_pcb_zone", __func__); 3078 /* NOTREACHED */ 3079 } 3080 zone_change(ev_pcb_zone, Z_EXPAND, TRUE); 3081 zone_change(ev_pcb_zone, Z_CALLERACCT, TRUE); 3082} 3083 3084static int 3085kev_attach(struct socket *so, __unused int proto, __unused struct proc *p) 3086{ 3087 int error = 0; 3088 struct kern_event_pcb *ev_pcb; 3089 3090 error = soreserve(so, KEV_SNDSPACE, KEV_RECVSPACE); 3091 if (error != 0) 3092 return (error); 3093 3094 if ((ev_pcb = (struct kern_event_pcb *)zalloc(ev_pcb_zone)) == NULL) { 3095 return (ENOBUFS); 3096 } 3097 bzero(ev_pcb, sizeof(struct kern_event_pcb)); 3098 lck_mtx_init(&ev_pcb->evp_mtx, kev_lck_grp, kev_lck_attr); 3099 3100 ev_pcb->evp_socket = so; 3101 ev_pcb->evp_vendor_code_filter = 0xffffffff; 3102 3103 so->so_pcb = (caddr_t) ev_pcb; 3104 lck_rw_lock_exclusive(kev_rwlock); 3105 LIST_INSERT_HEAD(&kern_event_head, ev_pcb, evp_link); 3106 lck_rw_done(kev_rwlock); 3107 3108 return (error); 3109} 3110 3111static void 3112kev_delete(struct kern_event_pcb *ev_pcb) 3113{ 3114 VERIFY(ev_pcb != NULL); 3115 lck_mtx_destroy(&ev_pcb->evp_mtx, kev_lck_grp); 3116 zfree(ev_pcb_zone, ev_pcb); 3117} 3118 3119static int 3120kev_detach(struct socket *so) 3121{ 3122 struct kern_event_pcb *ev_pcb = (struct kern_event_pcb *) so->so_pcb; 3123 3124 if (ev_pcb != NULL) { 3125 soisdisconnected(so); 3126 so->so_flags |= SOF_PCBCLEARING; 3127 } 3128 3129 return (0); 3130} 3131 3132/* 3133 * For now, kev_vendor_code and mbuf_tags use the same 3134 * mechanism. 3135 */ 3136errno_t kev_vendor_code_find( 3137 const char *string, 3138 u_int32_t *out_vendor_code) 3139{ 3140 if (strlen(string) >= KEV_VENDOR_CODE_MAX_STR_LEN) { 3141 return (EINVAL); 3142 } 3143 return (net_str_id_find_internal(string, out_vendor_code, 3144 NSI_VENDOR_CODE, 1)); 3145} 3146 3147errno_t 3148kev_msg_post(struct kev_msg *event_msg) 3149{ 3150 mbuf_tag_id_t min_vendor, max_vendor; 3151 3152 net_str_id_first_last(&min_vendor, &max_vendor, NSI_VENDOR_CODE); 3153 3154 if (event_msg == NULL) 3155 return (EINVAL); 3156 3157 /* 3158 * Limit third parties to posting events for registered vendor codes 3159 * only 3160 */ 3161 if (event_msg->vendor_code < min_vendor || 3162 event_msg->vendor_code > max_vendor) 3163 return (EINVAL); 3164 3165 return (kev_post_msg(event_msg)); 3166} 3167 3168int 3169kev_post_msg(struct kev_msg *event_msg) 3170{ 3171 struct mbuf *m, *m2; 3172 struct kern_event_pcb *ev_pcb; 3173 struct kern_event_msg *ev; 3174 char *tmp; 3175 u_int32_t total_size; 3176 int i; 3177 3178 /* Verify the message is small enough to fit in one mbuf w/o cluster */ 3179 total_size = KEV_MSG_HEADER_SIZE; 3180 3181 for (i = 0; i < 5; i++) { 3182 if (event_msg->dv[i].data_length == 0) 3183 break; 3184 total_size += event_msg->dv[i].data_length; 3185 } 3186 3187 if (total_size > MLEN) { 3188 return (EMSGSIZE); 3189 } 3190 3191 m = m_get(M_DONTWAIT, MT_DATA); 3192 if (m == 0) 3193 return (ENOBUFS); 3194 3195 ev = mtod(m, struct kern_event_msg *); 3196 total_size = KEV_MSG_HEADER_SIZE; 3197 3198 tmp = (char *) &ev->event_data[0]; 3199 for (i = 0; i < 5; i++) { 3200 if (event_msg->dv[i].data_length == 0) 3201 break; 3202 3203 total_size += event_msg->dv[i].data_length; 3204 bcopy(event_msg->dv[i].data_ptr, tmp, 3205 event_msg->dv[i].data_length); 3206 tmp += event_msg->dv[i].data_length; 3207 } 3208 3209 ev->id = ++static_event_id; 3210 ev->total_size = total_size; 3211 ev->vendor_code = event_msg->vendor_code; 3212 ev->kev_class = event_msg->kev_class; 3213 ev->kev_subclass = event_msg->kev_subclass; 3214 ev->event_code = event_msg->event_code; 3215 3216 m->m_len = total_size; 3217 lck_rw_lock_shared(kev_rwlock); 3218 for (ev_pcb = LIST_FIRST(&kern_event_head); 3219 ev_pcb; 3220 ev_pcb = LIST_NEXT(ev_pcb, evp_link)) { 3221 lck_mtx_lock(&ev_pcb->evp_mtx); 3222 if (ev_pcb->evp_socket->so_pcb == NULL) { 3223 lck_mtx_unlock(&ev_pcb->evp_mtx); 3224 continue; 3225 } 3226 if (ev_pcb->evp_vendor_code_filter != KEV_ANY_VENDOR) { 3227 if (ev_pcb->evp_vendor_code_filter != ev->vendor_code) { 3228 lck_mtx_unlock(&ev_pcb->evp_mtx); 3229 continue; 3230 } 3231 3232 if (ev_pcb->evp_class_filter != KEV_ANY_CLASS) { 3233 if (ev_pcb->evp_class_filter != ev->kev_class) { 3234 lck_mtx_unlock(&ev_pcb->evp_mtx); 3235 continue; 3236 } 3237 3238 if ((ev_pcb->evp_subclass_filter != KEV_ANY_SUBCLASS) && 3239 (ev_pcb->evp_subclass_filter != ev->kev_subclass)) { 3240 lck_mtx_unlock(&ev_pcb->evp_mtx); 3241 continue; 3242 } 3243 } 3244 } 3245 3246 m2 = m_copym(m, 0, m->m_len, M_NOWAIT); 3247 if (m2 == 0) { 3248 m_free(m); 3249 lck_mtx_unlock(&ev_pcb->evp_mtx); 3250 lck_rw_done(kev_rwlock); 3251 return (ENOBUFS); 3252 } 3253 if (sbappendrecord(&ev_pcb->evp_socket->so_rcv, m2)) 3254 sorwakeup(ev_pcb->evp_socket); 3255 lck_mtx_unlock(&ev_pcb->evp_mtx); 3256 } 3257 m_free(m); 3258 lck_rw_done(kev_rwlock); 3259 3260 return (0); 3261} 3262 3263static int 3264kev_control(struct socket *so, 3265 u_long cmd, 3266 caddr_t data, 3267 __unused struct ifnet *ifp, 3268 __unused struct proc *p) 3269{ 3270 struct kev_request *kev_req = (struct kev_request *) data; 3271 struct kern_event_pcb *ev_pcb; 3272 struct kev_vendor_code *kev_vendor; 3273 u_int32_t *id_value = (u_int32_t *) data; 3274 3275 switch (cmd) { 3276 case SIOCGKEVID: 3277 *id_value = static_event_id; 3278 break; 3279 case SIOCSKEVFILT: 3280 ev_pcb = (struct kern_event_pcb *) so->so_pcb; 3281 ev_pcb->evp_vendor_code_filter = kev_req->vendor_code; 3282 ev_pcb->evp_class_filter = kev_req->kev_class; 3283 ev_pcb->evp_subclass_filter = kev_req->kev_subclass; 3284 break; 3285 case SIOCGKEVFILT: 3286 ev_pcb = (struct kern_event_pcb *) so->so_pcb; 3287 kev_req->vendor_code = ev_pcb->evp_vendor_code_filter; 3288 kev_req->kev_class = ev_pcb->evp_class_filter; 3289 kev_req->kev_subclass = ev_pcb->evp_subclass_filter; 3290 break; 3291 case SIOCGKEVVENDOR: 3292 kev_vendor = (struct kev_vendor_code *)data; 3293 /* Make sure string is NULL terminated */ 3294 kev_vendor->vendor_string[KEV_VENDOR_CODE_MAX_STR_LEN-1] = 0; 3295 return (net_str_id_find_internal(kev_vendor->vendor_string, 3296 &kev_vendor->vendor_code, NSI_VENDOR_CODE, 0)); 3297 default: 3298 return (ENOTSUP); 3299 } 3300 3301 return (0); 3302} 3303 3304#endif /* SOCKETS */ 3305 3306 3307int 3308fill_kqueueinfo(struct kqueue *kq, struct kqueue_info * kinfo) 3309{ 3310 struct vinfo_stat * st; 3311 3312 /* No need for the funnel as fd is kept alive */ 3313 st = &kinfo->kq_stat; 3314 3315 st->vst_size = kq->kq_count; 3316 if (kq->kq_state & KQ_KEV64) 3317 st->vst_blksize = sizeof(struct kevent64_s); 3318 else 3319 st->vst_blksize = sizeof(struct kevent); 3320 st->vst_mode = S_IFIFO; 3321 if (kq->kq_state & KQ_SEL) 3322 kinfo->kq_state |= PROC_KQUEUE_SELECT; 3323 if (kq->kq_state & KQ_SLEEP) 3324 kinfo->kq_state |= PROC_KQUEUE_SLEEP; 3325 3326 return (0); 3327} 3328 3329 3330void 3331knote_markstayqueued(struct knote *kn) 3332{ 3333 kqlock(kn->kn_kq); 3334 kn->kn_status |= KN_STAYQUEUED; 3335 knote_enqueue(kn); 3336 kqunlock(kn->kn_kq); 3337} 3338