kern_event.c revision 297977
1/*- 2 * Copyright (c) 1999,2000,2001 Jonathan Lemon <jlemon@FreeBSD.org> 3 * Copyright 2004 John-Mark Gurney <jmg@FreeBSD.org> 4 * Copyright (c) 2009 Apple, Inc. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29#include <sys/cdefs.h> 30__FBSDID("$FreeBSD: stable/10/sys/kern/kern_event.c 297977 2016-04-14 17:14:11Z vangyzen $"); 31 32#include "opt_ktrace.h" 33 34#include <sys/param.h> 35#include <sys/systm.h> 36#include <sys/capsicum.h> 37#include <sys/kernel.h> 38#include <sys/lock.h> 39#include <sys/mutex.h> 40#include <sys/rwlock.h> 41#include <sys/proc.h> 42#include <sys/malloc.h> 43#include <sys/unistd.h> 44#include <sys/file.h> 45#include <sys/filedesc.h> 46#include <sys/filio.h> 47#include <sys/fcntl.h> 48#include <sys/kthread.h> 49#include <sys/selinfo.h> 50#include <sys/stdatomic.h> 51#include <sys/queue.h> 52#include <sys/event.h> 53#include <sys/eventvar.h> 54#include <sys/poll.h> 55#include <sys/protosw.h> 56#include <sys/sigio.h> 57#include <sys/signalvar.h> 58#include <sys/socket.h> 59#include <sys/socketvar.h> 60#include <sys/stat.h> 61#include <sys/sysctl.h> 62#include <sys/sysproto.h> 63#include <sys/syscallsubr.h> 64#include <sys/taskqueue.h> 65#include <sys/uio.h> 66#ifdef KTRACE 67#include <sys/ktrace.h> 68#endif 69 70#include <vm/uma.h> 71 72static MALLOC_DEFINE(M_KQUEUE, "kqueue", "memory for kqueue system"); 73 74/* 75 * This lock is used if multiple kq locks are required. This possibly 76 * should be made into a per proc lock. 77 */ 78static struct mtx kq_global; 79MTX_SYSINIT(kq_global, &kq_global, "kqueue order", MTX_DEF); 80#define KQ_GLOBAL_LOCK(lck, haslck) do { \ 81 if (!haslck) \ 82 mtx_lock(lck); \ 83 haslck = 1; \ 84} while (0) 85#define KQ_GLOBAL_UNLOCK(lck, haslck) do { \ 86 if (haslck) \ 87 mtx_unlock(lck); \ 88 haslck = 0; \ 89} while (0) 90 91TASKQUEUE_DEFINE_THREAD(kqueue); 92 93static int kevent_copyout(void *arg, struct kevent *kevp, int count); 94static int kevent_copyin(void *arg, struct kevent *kevp, int count); 95static int kqueue_register(struct kqueue *kq, struct kevent *kev, 96 struct thread *td, int waitok); 97static int kqueue_acquire(struct file *fp, struct kqueue **kqp); 98static void kqueue_release(struct kqueue *kq, int locked); 99static int kqueue_expand(struct kqueue *kq, struct filterops *fops, 100 uintptr_t ident, int waitok); 101static void kqueue_task(void *arg, int pending); 102static int kqueue_scan(struct kqueue *kq, int maxevents, 103 struct kevent_copyops *k_ops, 104 const struct timespec *timeout, 105 struct kevent *keva, struct thread *td); 106static void kqueue_wakeup(struct kqueue *kq); 107static struct filterops *kqueue_fo_find(int filt); 108static void kqueue_fo_release(int filt); 109 110static fo_rdwr_t kqueue_read; 111static fo_rdwr_t kqueue_write; 112static fo_truncate_t kqueue_truncate; 113static fo_ioctl_t kqueue_ioctl; 114static fo_poll_t kqueue_poll; 115static fo_kqfilter_t kqueue_kqfilter; 116static fo_stat_t kqueue_stat; 117static fo_close_t kqueue_close; 118 119static struct fileops kqueueops = { 120 .fo_read = kqueue_read, 121 .fo_write = kqueue_write, 122 .fo_truncate = kqueue_truncate, 123 .fo_ioctl = kqueue_ioctl, 124 .fo_poll = kqueue_poll, 125 .fo_kqfilter = kqueue_kqfilter, 126 .fo_stat = kqueue_stat, 127 .fo_close = kqueue_close, 128 .fo_chmod = invfo_chmod, 129 .fo_chown = invfo_chown, 130 .fo_sendfile = invfo_sendfile, 131}; 132 133static int knote_attach(struct knote *kn, struct kqueue *kq); 134static void knote_drop(struct knote *kn, struct thread *td); 135static void knote_enqueue(struct knote *kn); 136static void knote_dequeue(struct knote *kn); 137static void knote_init(void); 138static struct knote *knote_alloc(int waitok); 139static void knote_free(struct knote *kn); 140 141static void filt_kqdetach(struct knote *kn); 142static int filt_kqueue(struct knote *kn, long hint); 143static int filt_procattach(struct knote *kn); 144static void filt_procdetach(struct knote *kn); 145static int filt_proc(struct knote *kn, long hint); 146static int filt_fileattach(struct knote *kn); 147static void filt_timerexpire(void *knx); 148static int filt_timerattach(struct knote *kn); 149static void filt_timerdetach(struct knote *kn); 150static int filt_timer(struct knote *kn, long hint); 151static int filt_userattach(struct knote *kn); 152static void filt_userdetach(struct knote *kn); 153static int filt_user(struct knote *kn, long hint); 154static void filt_usertouch(struct knote *kn, struct kevent *kev, 155 u_long type); 156 157static struct filterops file_filtops = { 158 .f_isfd = 1, 159 .f_attach = filt_fileattach, 160}; 161static struct filterops kqread_filtops = { 162 .f_isfd = 1, 163 .f_detach = filt_kqdetach, 164 .f_event = filt_kqueue, 165}; 166/* XXX - move to kern_proc.c? */ 167static struct filterops proc_filtops = { 168 .f_isfd = 0, 169 .f_attach = filt_procattach, 170 .f_detach = filt_procdetach, 171 .f_event = filt_proc, 172}; 173static struct filterops timer_filtops = { 174 .f_isfd = 0, 175 .f_attach = filt_timerattach, 176 .f_detach = filt_timerdetach, 177 .f_event = filt_timer, 178}; 179static struct filterops user_filtops = { 180 .f_attach = filt_userattach, 181 .f_detach = filt_userdetach, 182 .f_event = filt_user, 183 .f_touch = filt_usertouch, 184}; 185 186static uma_zone_t knote_zone; 187static atomic_uint kq_ncallouts = ATOMIC_VAR_INIT(0); 188static unsigned int kq_calloutmax = 4 * 1024; 189SYSCTL_UINT(_kern, OID_AUTO, kq_calloutmax, CTLFLAG_RW, 190 &kq_calloutmax, 0, "Maximum number of callouts allocated for kqueue"); 191 192/* XXX - ensure not KN_INFLUX?? */ 193#define KNOTE_ACTIVATE(kn, islock) do { \ 194 if ((islock)) \ 195 mtx_assert(&(kn)->kn_kq->kq_lock, MA_OWNED); \ 196 else \ 197 KQ_LOCK((kn)->kn_kq); \ 198 (kn)->kn_status |= KN_ACTIVE; \ 199 if (((kn)->kn_status & (KN_QUEUED | KN_DISABLED)) == 0) \ 200 knote_enqueue((kn)); \ 201 if (!(islock)) \ 202 KQ_UNLOCK((kn)->kn_kq); \ 203} while(0) 204#define KQ_LOCK(kq) do { \ 205 mtx_lock(&(kq)->kq_lock); \ 206} while (0) 207#define KQ_FLUX_WAKEUP(kq) do { \ 208 if (((kq)->kq_state & KQ_FLUXWAIT) == KQ_FLUXWAIT) { \ 209 (kq)->kq_state &= ~KQ_FLUXWAIT; \ 210 wakeup((kq)); \ 211 } \ 212} while (0) 213#define KQ_UNLOCK_FLUX(kq) do { \ 214 KQ_FLUX_WAKEUP(kq); \ 215 mtx_unlock(&(kq)->kq_lock); \ 216} while (0) 217#define KQ_UNLOCK(kq) do { \ 218 mtx_unlock(&(kq)->kq_lock); \ 219} while (0) 220#define KQ_OWNED(kq) do { \ 221 mtx_assert(&(kq)->kq_lock, MA_OWNED); \ 222} while (0) 223#define KQ_NOTOWNED(kq) do { \ 224 mtx_assert(&(kq)->kq_lock, MA_NOTOWNED); \ 225} while (0) 226#define KN_LIST_LOCK(kn) do { \ 227 if (kn->kn_knlist != NULL) \ 228 kn->kn_knlist->kl_lock(kn->kn_knlist->kl_lockarg); \ 229} while (0) 230#define KN_LIST_UNLOCK(kn) do { \ 231 if (kn->kn_knlist != NULL) \ 232 kn->kn_knlist->kl_unlock(kn->kn_knlist->kl_lockarg); \ 233} while (0) 234#define KNL_ASSERT_LOCK(knl, islocked) do { \ 235 if (islocked) \ 236 KNL_ASSERT_LOCKED(knl); \ 237 else \ 238 KNL_ASSERT_UNLOCKED(knl); \ 239} while (0) 240#ifdef INVARIANTS 241#define KNL_ASSERT_LOCKED(knl) do { \ 242 knl->kl_assert_locked((knl)->kl_lockarg); \ 243} while (0) 244#define KNL_ASSERT_UNLOCKED(knl) do { \ 245 knl->kl_assert_unlocked((knl)->kl_lockarg); \ 246} while (0) 247#else /* !INVARIANTS */ 248#define KNL_ASSERT_LOCKED(knl) do {} while(0) 249#define KNL_ASSERT_UNLOCKED(knl) do {} while (0) 250#endif /* INVARIANTS */ 251 252#define KN_HASHSIZE 64 /* XXX should be tunable */ 253#define KN_HASH(val, mask) (((val) ^ (val >> 8)) & (mask)) 254 255static int 256filt_nullattach(struct knote *kn) 257{ 258 259 return (ENXIO); 260}; 261 262struct filterops null_filtops = { 263 .f_isfd = 0, 264 .f_attach = filt_nullattach, 265}; 266 267/* XXX - make SYSINIT to add these, and move into respective modules. */ 268extern struct filterops sig_filtops; 269extern struct filterops fs_filtops; 270 271/* 272 * Table for for all system-defined filters. 273 */ 274static struct mtx filterops_lock; 275MTX_SYSINIT(kqueue_filterops, &filterops_lock, "protect sysfilt_ops", 276 MTX_DEF); 277static struct { 278 struct filterops *for_fop; 279 int for_refcnt; 280} sysfilt_ops[EVFILT_SYSCOUNT] = { 281 { &file_filtops }, /* EVFILT_READ */ 282 { &file_filtops }, /* EVFILT_WRITE */ 283 { &null_filtops }, /* EVFILT_AIO */ 284 { &file_filtops }, /* EVFILT_VNODE */ 285 { &proc_filtops }, /* EVFILT_PROC */ 286 { &sig_filtops }, /* EVFILT_SIGNAL */ 287 { &timer_filtops }, /* EVFILT_TIMER */ 288 { &null_filtops }, /* former EVFILT_NETDEV */ 289 { &fs_filtops }, /* EVFILT_FS */ 290 { &null_filtops }, /* EVFILT_LIO */ 291 { &user_filtops }, /* EVFILT_USER */ 292}; 293 294/* 295 * Simple redirection for all cdevsw style objects to call their fo_kqfilter 296 * method. 297 */ 298static int 299filt_fileattach(struct knote *kn) 300{ 301 302 return (fo_kqfilter(kn->kn_fp, kn)); 303} 304 305/*ARGSUSED*/ 306static int 307kqueue_kqfilter(struct file *fp, struct knote *kn) 308{ 309 struct kqueue *kq = kn->kn_fp->f_data; 310 311 if (kn->kn_filter != EVFILT_READ) 312 return (EINVAL); 313 314 kn->kn_status |= KN_KQUEUE; 315 kn->kn_fop = &kqread_filtops; 316 knlist_add(&kq->kq_sel.si_note, kn, 0); 317 318 return (0); 319} 320 321static void 322filt_kqdetach(struct knote *kn) 323{ 324 struct kqueue *kq = kn->kn_fp->f_data; 325 326 knlist_remove(&kq->kq_sel.si_note, kn, 0); 327} 328 329/*ARGSUSED*/ 330static int 331filt_kqueue(struct knote *kn, long hint) 332{ 333 struct kqueue *kq = kn->kn_fp->f_data; 334 335 kn->kn_data = kq->kq_count; 336 return (kn->kn_data > 0); 337} 338 339/* XXX - move to kern_proc.c? */ 340static int 341filt_procattach(struct knote *kn) 342{ 343 struct proc *p; 344 int immediate; 345 int error; 346 347 immediate = 0; 348 p = pfind(kn->kn_id); 349 if (p == NULL && (kn->kn_sfflags & NOTE_EXIT)) { 350 p = zpfind(kn->kn_id); 351 immediate = 1; 352 } else if (p != NULL && (p->p_flag & P_WEXIT)) { 353 immediate = 1; 354 } 355 356 if (p == NULL) 357 return (ESRCH); 358 if ((error = p_cansee(curthread, p))) { 359 PROC_UNLOCK(p); 360 return (error); 361 } 362 363 kn->kn_ptr.p_proc = p; 364 kn->kn_flags |= EV_CLEAR; /* automatically set */ 365 366 /* 367 * Internal flag indicating registration done by kernel for the 368 * purposes of getting a NOTE_CHILD notification. 369 */ 370 if (kn->kn_flags & EV_FLAG2) { 371 kn->kn_flags &= ~EV_FLAG2; 372 kn->kn_data = kn->kn_sdata; /* ppid */ 373 kn->kn_fflags = NOTE_CHILD; 374 kn->kn_sfflags &= ~NOTE_EXIT; 375 immediate = 1; /* Force immediate activation of child note. */ 376 } 377 /* 378 * Internal flag indicating registration done by kernel (for other than 379 * NOTE_CHILD). 380 */ 381 if (kn->kn_flags & EV_FLAG1) { 382 kn->kn_flags &= ~EV_FLAG1; 383 } 384 385 if (immediate == 0) 386 knlist_add(&p->p_klist, kn, 1); 387 388 /* 389 * Immediately activate any child notes or, in the case of a zombie 390 * target process, exit notes. The latter is necessary to handle the 391 * case where the target process, e.g. a child, dies before the kevent 392 * is registered. 393 */ 394 if (immediate && filt_proc(kn, NOTE_EXIT)) 395 KNOTE_ACTIVATE(kn, 0); 396 397 PROC_UNLOCK(p); 398 399 return (0); 400} 401 402/* 403 * The knote may be attached to a different process, which may exit, 404 * leaving nothing for the knote to be attached to. So when the process 405 * exits, the knote is marked as DETACHED and also flagged as ONESHOT so 406 * it will be deleted when read out. However, as part of the knote deletion, 407 * this routine is called, so a check is needed to avoid actually performing 408 * a detach, because the original process does not exist any more. 409 */ 410/* XXX - move to kern_proc.c? */ 411static void 412filt_procdetach(struct knote *kn) 413{ 414 struct proc *p; 415 416 p = kn->kn_ptr.p_proc; 417 knlist_remove(&p->p_klist, kn, 0); 418 kn->kn_ptr.p_proc = NULL; 419} 420 421/* XXX - move to kern_proc.c? */ 422static int 423filt_proc(struct knote *kn, long hint) 424{ 425 struct proc *p = kn->kn_ptr.p_proc; 426 u_int event; 427 428 /* 429 * mask off extra data 430 */ 431 event = (u_int)hint & NOTE_PCTRLMASK; 432 433 /* 434 * if the user is interested in this event, record it. 435 */ 436 if (kn->kn_sfflags & event) 437 kn->kn_fflags |= event; 438 439 /* 440 * process is gone, so flag the event as finished. 441 */ 442 if (event == NOTE_EXIT) { 443 if (!(kn->kn_status & KN_DETACHED)) 444 knlist_remove_inevent(&p->p_klist, kn); 445 kn->kn_flags |= (EV_EOF | EV_ONESHOT); 446 kn->kn_ptr.p_proc = NULL; 447 if (kn->kn_fflags & NOTE_EXIT) 448 kn->kn_data = p->p_xstat; 449 if (kn->kn_fflags == 0) 450 kn->kn_flags |= EV_DROP; 451 return (1); 452 } 453 454 return (kn->kn_fflags != 0); 455} 456 457/* 458 * Called when the process forked. It mostly does the same as the 459 * knote(), activating all knotes registered to be activated when the 460 * process forked. Additionally, for each knote attached to the 461 * parent, check whether user wants to track the new process. If so 462 * attach a new knote to it, and immediately report an event with the 463 * child's pid. 464 */ 465void 466knote_fork(struct knlist *list, int pid) 467{ 468 struct kqueue *kq; 469 struct knote *kn; 470 struct kevent kev; 471 int error; 472 473 if (list == NULL) 474 return; 475 list->kl_lock(list->kl_lockarg); 476 477 SLIST_FOREACH(kn, &list->kl_list, kn_selnext) { 478 if ((kn->kn_status & KN_INFLUX) == KN_INFLUX) 479 continue; 480 kq = kn->kn_kq; 481 KQ_LOCK(kq); 482 if ((kn->kn_status & (KN_INFLUX | KN_SCAN)) == KN_INFLUX) { 483 KQ_UNLOCK(kq); 484 continue; 485 } 486 487 /* 488 * The same as knote(), activate the event. 489 */ 490 if ((kn->kn_sfflags & NOTE_TRACK) == 0) { 491 kn->kn_status |= KN_HASKQLOCK; 492 if (kn->kn_fop->f_event(kn, NOTE_FORK)) 493 KNOTE_ACTIVATE(kn, 1); 494 kn->kn_status &= ~KN_HASKQLOCK; 495 KQ_UNLOCK(kq); 496 continue; 497 } 498 499 /* 500 * The NOTE_TRACK case. In addition to the activation 501 * of the event, we need to register new events to 502 * track the child. Drop the locks in preparation for 503 * the call to kqueue_register(). 504 */ 505 kn->kn_status |= KN_INFLUX; 506 KQ_UNLOCK(kq); 507 list->kl_unlock(list->kl_lockarg); 508 509 /* 510 * Activate existing knote and register tracking knotes with 511 * new process. 512 * 513 * First register a knote to get just the child notice. This 514 * must be a separate note from a potential NOTE_EXIT 515 * notification since both NOTE_CHILD and NOTE_EXIT are defined 516 * to use the data field (in conflicting ways). 517 */ 518 kev.ident = pid; 519 kev.filter = kn->kn_filter; 520 kev.flags = kn->kn_flags | EV_ADD | EV_ENABLE | EV_ONESHOT | EV_FLAG2; 521 kev.fflags = kn->kn_sfflags; 522 kev.data = kn->kn_id; /* parent */ 523 kev.udata = kn->kn_kevent.udata;/* preserve udata */ 524 error = kqueue_register(kq, &kev, NULL, 0); 525 if (error) 526 kn->kn_fflags |= NOTE_TRACKERR; 527 528 /* 529 * Then register another knote to track other potential events 530 * from the new process. 531 */ 532 kev.ident = pid; 533 kev.filter = kn->kn_filter; 534 kev.flags = kn->kn_flags | EV_ADD | EV_ENABLE | EV_FLAG1; 535 kev.fflags = kn->kn_sfflags; 536 kev.data = kn->kn_id; /* parent */ 537 kev.udata = kn->kn_kevent.udata;/* preserve udata */ 538 error = kqueue_register(kq, &kev, NULL, 0); 539 if (error) 540 kn->kn_fflags |= NOTE_TRACKERR; 541 if (kn->kn_fop->f_event(kn, NOTE_FORK)) 542 KNOTE_ACTIVATE(kn, 0); 543 KQ_LOCK(kq); 544 kn->kn_status &= ~KN_INFLUX; 545 KQ_UNLOCK_FLUX(kq); 546 list->kl_lock(list->kl_lockarg); 547 } 548 list->kl_unlock(list->kl_lockarg); 549} 550 551/* 552 * XXX: EVFILT_TIMER should perhaps live in kern_time.c beside the 553 * interval timer support code. 554 */ 555 556#define NOTE_TIMER_PRECMASK (NOTE_SECONDS|NOTE_MSECONDS|NOTE_USECONDS| \ 557 NOTE_NSECONDS) 558 559static __inline sbintime_t 560timer2sbintime(intptr_t data, int flags) 561{ 562 sbintime_t modifier; 563 564 switch (flags & NOTE_TIMER_PRECMASK) { 565 case NOTE_SECONDS: 566 modifier = SBT_1S; 567 break; 568 case NOTE_MSECONDS: /* FALLTHROUGH */ 569 case 0: 570 modifier = SBT_1MS; 571 break; 572 case NOTE_USECONDS: 573 modifier = SBT_1US; 574 break; 575 case NOTE_NSECONDS: 576 modifier = SBT_1NS; 577 break; 578 default: 579 return (-1); 580 } 581 582#ifdef __LP64__ 583 if (data > SBT_MAX / modifier) 584 return (SBT_MAX); 585#endif 586 return (modifier * data); 587} 588 589static void 590filt_timerexpire(void *knx) 591{ 592 struct callout *calloutp; 593 struct knote *kn; 594 595 kn = knx; 596 kn->kn_data++; 597 KNOTE_ACTIVATE(kn, 0); /* XXX - handle locking */ 598 599 if ((kn->kn_flags & EV_ONESHOT) != EV_ONESHOT) { 600 calloutp = (struct callout *)kn->kn_hook; 601 *kn->kn_ptr.p_nexttime += timer2sbintime(kn->kn_sdata, 602 kn->kn_sfflags); 603 callout_reset_sbt_on(calloutp, *kn->kn_ptr.p_nexttime, 0, 604 filt_timerexpire, kn, PCPU_GET(cpuid), C_ABSOLUTE); 605 } 606} 607 608/* 609 * data contains amount of time to sleep 610 */ 611static int 612filt_timerattach(struct knote *kn) 613{ 614 struct callout *calloutp; 615 sbintime_t to; 616 unsigned int ncallouts; 617 618 if ((intptr_t)kn->kn_sdata < 0) 619 return (EINVAL); 620 if ((intptr_t)kn->kn_sdata == 0 && (kn->kn_flags & EV_ONESHOT) == 0) 621 kn->kn_sdata = 1; 622 /* Only precision unit are supported in flags so far */ 623 if (kn->kn_sfflags & ~NOTE_TIMER_PRECMASK) 624 return (EINVAL); 625 626 to = timer2sbintime(kn->kn_sdata, kn->kn_sfflags); 627 if (to < 0) 628 return (EINVAL); 629 630 ncallouts = atomic_load_explicit(&kq_ncallouts, memory_order_relaxed); 631 do { 632 if (ncallouts >= kq_calloutmax) 633 return (ENOMEM); 634 } while (!atomic_compare_exchange_weak_explicit(&kq_ncallouts, 635 &ncallouts, ncallouts + 1, memory_order_relaxed, 636 memory_order_relaxed)); 637 638 kn->kn_flags |= EV_CLEAR; /* automatically set */ 639 kn->kn_status &= ~KN_DETACHED; /* knlist_add clears it */ 640 kn->kn_ptr.p_nexttime = malloc(sizeof(sbintime_t), M_KQUEUE, M_WAITOK); 641 calloutp = malloc(sizeof(*calloutp), M_KQUEUE, M_WAITOK); 642 callout_init(calloutp, CALLOUT_MPSAFE); 643 kn->kn_hook = calloutp; 644 *kn->kn_ptr.p_nexttime = to + sbinuptime(); 645 callout_reset_sbt_on(calloutp, *kn->kn_ptr.p_nexttime, 0, 646 filt_timerexpire, kn, PCPU_GET(cpuid), C_ABSOLUTE); 647 648 return (0); 649} 650 651static void 652filt_timerdetach(struct knote *kn) 653{ 654 struct callout *calloutp; 655 unsigned int old; 656 657 calloutp = (struct callout *)kn->kn_hook; 658 callout_drain(calloutp); 659 free(calloutp, M_KQUEUE); 660 free(kn->kn_ptr.p_nexttime, M_KQUEUE); 661 old = atomic_fetch_sub_explicit(&kq_ncallouts, 1, memory_order_relaxed); 662 KASSERT(old > 0, ("Number of callouts cannot become negative")); 663 kn->kn_status |= KN_DETACHED; /* knlist_remove sets it */ 664} 665 666static int 667filt_timer(struct knote *kn, long hint) 668{ 669 670 return (kn->kn_data != 0); 671} 672 673static int 674filt_userattach(struct knote *kn) 675{ 676 677 /* 678 * EVFILT_USER knotes are not attached to anything in the kernel. 679 */ 680 kn->kn_hook = NULL; 681 if (kn->kn_fflags & NOTE_TRIGGER) 682 kn->kn_hookid = 1; 683 else 684 kn->kn_hookid = 0; 685 return (0); 686} 687 688static void 689filt_userdetach(__unused struct knote *kn) 690{ 691 692 /* 693 * EVFILT_USER knotes are not attached to anything in the kernel. 694 */ 695} 696 697static int 698filt_user(struct knote *kn, __unused long hint) 699{ 700 701 return (kn->kn_hookid); 702} 703 704static void 705filt_usertouch(struct knote *kn, struct kevent *kev, u_long type) 706{ 707 u_int ffctrl; 708 709 switch (type) { 710 case EVENT_REGISTER: 711 if (kev->fflags & NOTE_TRIGGER) 712 kn->kn_hookid = 1; 713 714 ffctrl = kev->fflags & NOTE_FFCTRLMASK; 715 kev->fflags &= NOTE_FFLAGSMASK; 716 switch (ffctrl) { 717 case NOTE_FFNOP: 718 break; 719 720 case NOTE_FFAND: 721 kn->kn_sfflags &= kev->fflags; 722 break; 723 724 case NOTE_FFOR: 725 kn->kn_sfflags |= kev->fflags; 726 break; 727 728 case NOTE_FFCOPY: 729 kn->kn_sfflags = kev->fflags; 730 break; 731 732 default: 733 /* XXX Return error? */ 734 break; 735 } 736 kn->kn_sdata = kev->data; 737 if (kev->flags & EV_CLEAR) { 738 kn->kn_hookid = 0; 739 kn->kn_data = 0; 740 kn->kn_fflags = 0; 741 } 742 break; 743 744 case EVENT_PROCESS: 745 *kev = kn->kn_kevent; 746 kev->fflags = kn->kn_sfflags; 747 kev->data = kn->kn_sdata; 748 if (kn->kn_flags & EV_CLEAR) { 749 kn->kn_hookid = 0; 750 kn->kn_data = 0; 751 kn->kn_fflags = 0; 752 } 753 break; 754 755 default: 756 panic("filt_usertouch() - invalid type (%ld)", type); 757 break; 758 } 759} 760 761int 762sys_kqueue(struct thread *td, struct kqueue_args *uap) 763{ 764 765 return (kern_kqueue(td, 0)); 766} 767 768int 769kern_kqueue(struct thread *td, int flags) 770{ 771 struct filedesc *fdp; 772 struct kqueue *kq; 773 struct file *fp; 774 int fd, error; 775 776 fdp = td->td_proc->p_fd; 777 error = falloc(td, &fp, &fd, flags); 778 if (error) 779 goto done2; 780 781 /* An extra reference on `fp' has been held for us by falloc(). */ 782 kq = malloc(sizeof *kq, M_KQUEUE, M_WAITOK | M_ZERO); 783 mtx_init(&kq->kq_lock, "kqueue", NULL, MTX_DEF|MTX_DUPOK); 784 TAILQ_INIT(&kq->kq_head); 785 kq->kq_fdp = fdp; 786 knlist_init_mtx(&kq->kq_sel.si_note, &kq->kq_lock); 787 TASK_INIT(&kq->kq_task, 0, kqueue_task, kq); 788 789 FILEDESC_XLOCK(fdp); 790 TAILQ_INSERT_HEAD(&fdp->fd_kqlist, kq, kq_list); 791 FILEDESC_XUNLOCK(fdp); 792 793 finit(fp, FREAD | FWRITE, DTYPE_KQUEUE, kq, &kqueueops); 794 fdrop(fp, td); 795 796 td->td_retval[0] = fd; 797done2: 798 return (error); 799} 800 801#ifndef _SYS_SYSPROTO_H_ 802struct kevent_args { 803 int fd; 804 const struct kevent *changelist; 805 int nchanges; 806 struct kevent *eventlist; 807 int nevents; 808 const struct timespec *timeout; 809}; 810#endif 811int 812sys_kevent(struct thread *td, struct kevent_args *uap) 813{ 814 struct timespec ts, *tsp; 815 struct kevent_copyops k_ops = { uap, 816 kevent_copyout, 817 kevent_copyin}; 818 int error; 819#ifdef KTRACE 820 struct uio ktruio; 821 struct iovec ktriov; 822 struct uio *ktruioin = NULL; 823 struct uio *ktruioout = NULL; 824#endif 825 826 if (uap->timeout != NULL) { 827 error = copyin(uap->timeout, &ts, sizeof(ts)); 828 if (error) 829 return (error); 830 tsp = &ts; 831 } else 832 tsp = NULL; 833 834#ifdef KTRACE 835 if (KTRPOINT(td, KTR_GENIO)) { 836 ktriov.iov_base = uap->changelist; 837 ktriov.iov_len = uap->nchanges * sizeof(struct kevent); 838 ktruio = (struct uio){ .uio_iov = &ktriov, .uio_iovcnt = 1, 839 .uio_segflg = UIO_USERSPACE, .uio_rw = UIO_READ, 840 .uio_td = td }; 841 ktruioin = cloneuio(&ktruio); 842 ktriov.iov_base = uap->eventlist; 843 ktriov.iov_len = uap->nevents * sizeof(struct kevent); 844 ktruioout = cloneuio(&ktruio); 845 } 846#endif 847 848 error = kern_kevent(td, uap->fd, uap->nchanges, uap->nevents, 849 &k_ops, tsp); 850 851#ifdef KTRACE 852 if (ktruioin != NULL) { 853 ktruioin->uio_resid = uap->nchanges * sizeof(struct kevent); 854 ktrgenio(uap->fd, UIO_WRITE, ktruioin, 0); 855 ktruioout->uio_resid = td->td_retval[0] * sizeof(struct kevent); 856 ktrgenio(uap->fd, UIO_READ, ktruioout, error); 857 } 858#endif 859 860 return (error); 861} 862 863/* 864 * Copy 'count' items into the destination list pointed to by uap->eventlist. 865 */ 866static int 867kevent_copyout(void *arg, struct kevent *kevp, int count) 868{ 869 struct kevent_args *uap; 870 int error; 871 872 KASSERT(count <= KQ_NEVENTS, ("count (%d) > KQ_NEVENTS", count)); 873 uap = (struct kevent_args *)arg; 874 875 error = copyout(kevp, uap->eventlist, count * sizeof *kevp); 876 if (error == 0) 877 uap->eventlist += count; 878 return (error); 879} 880 881/* 882 * Copy 'count' items from the list pointed to by uap->changelist. 883 */ 884static int 885kevent_copyin(void *arg, struct kevent *kevp, int count) 886{ 887 struct kevent_args *uap; 888 int error; 889 890 KASSERT(count <= KQ_NEVENTS, ("count (%d) > KQ_NEVENTS", count)); 891 uap = (struct kevent_args *)arg; 892 893 error = copyin(uap->changelist, kevp, count * sizeof *kevp); 894 if (error == 0) 895 uap->changelist += count; 896 return (error); 897} 898 899int 900kern_kevent(struct thread *td, int fd, int nchanges, int nevents, 901 struct kevent_copyops *k_ops, const struct timespec *timeout) 902{ 903 cap_rights_t rights; 904 struct file *fp; 905 int error; 906 907 cap_rights_init(&rights); 908 if (nchanges > 0) 909 cap_rights_set(&rights, CAP_KQUEUE_CHANGE); 910 if (nevents > 0) 911 cap_rights_set(&rights, CAP_KQUEUE_EVENT); 912 error = fget(td, fd, &rights, &fp); 913 if (error != 0) 914 return (error); 915 916 error = kern_kevent_fp(td, fp, nchanges, nevents, k_ops, timeout); 917 fdrop(fp, td); 918 919 return (error); 920} 921 922int 923kern_kevent_fp(struct thread *td, struct file *fp, int nchanges, int nevents, 924 struct kevent_copyops *k_ops, const struct timespec *timeout) 925{ 926 struct kevent keva[KQ_NEVENTS]; 927 struct kevent *kevp, *changes; 928 struct kqueue *kq; 929 int i, n, nerrors, error; 930 931 error = kqueue_acquire(fp, &kq); 932 if (error != 0) 933 return (error); 934 935 nerrors = 0; 936 937 while (nchanges > 0) { 938 n = nchanges > KQ_NEVENTS ? KQ_NEVENTS : nchanges; 939 error = k_ops->k_copyin(k_ops->arg, keva, n); 940 if (error) 941 goto done; 942 changes = keva; 943 for (i = 0; i < n; i++) { 944 kevp = &changes[i]; 945 if (!kevp->filter) 946 continue; 947 kevp->flags &= ~EV_SYSFLAGS; 948 error = kqueue_register(kq, kevp, td, 1); 949 if (error || (kevp->flags & EV_RECEIPT)) { 950 if (nevents != 0) { 951 kevp->flags = EV_ERROR; 952 kevp->data = error; 953 (void) k_ops->k_copyout(k_ops->arg, 954 kevp, 1); 955 nevents--; 956 nerrors++; 957 } else { 958 goto done; 959 } 960 } 961 } 962 nchanges -= n; 963 } 964 if (nerrors) { 965 td->td_retval[0] = nerrors; 966 error = 0; 967 goto done; 968 } 969 970 error = kqueue_scan(kq, nevents, k_ops, timeout, keva, td); 971done: 972 kqueue_release(kq, 0); 973 return (error); 974} 975 976int 977kqueue_add_filteropts(int filt, struct filterops *filtops) 978{ 979 int error; 980 981 error = 0; 982 if (filt > 0 || filt + EVFILT_SYSCOUNT < 0) { 983 printf( 984"trying to add a filterop that is out of range: %d is beyond %d\n", 985 ~filt, EVFILT_SYSCOUNT); 986 return EINVAL; 987 } 988 mtx_lock(&filterops_lock); 989 if (sysfilt_ops[~filt].for_fop != &null_filtops && 990 sysfilt_ops[~filt].for_fop != NULL) 991 error = EEXIST; 992 else { 993 sysfilt_ops[~filt].for_fop = filtops; 994 sysfilt_ops[~filt].for_refcnt = 0; 995 } 996 mtx_unlock(&filterops_lock); 997 998 return (error); 999} 1000 1001int 1002kqueue_del_filteropts(int filt) 1003{ 1004 int error; 1005 1006 error = 0; 1007 if (filt > 0 || filt + EVFILT_SYSCOUNT < 0) 1008 return EINVAL; 1009 1010 mtx_lock(&filterops_lock); 1011 if (sysfilt_ops[~filt].for_fop == &null_filtops || 1012 sysfilt_ops[~filt].for_fop == NULL) 1013 error = EINVAL; 1014 else if (sysfilt_ops[~filt].for_refcnt != 0) 1015 error = EBUSY; 1016 else { 1017 sysfilt_ops[~filt].for_fop = &null_filtops; 1018 sysfilt_ops[~filt].for_refcnt = 0; 1019 } 1020 mtx_unlock(&filterops_lock); 1021 1022 return error; 1023} 1024 1025static struct filterops * 1026kqueue_fo_find(int filt) 1027{ 1028 1029 if (filt > 0 || filt + EVFILT_SYSCOUNT < 0) 1030 return NULL; 1031 1032 mtx_lock(&filterops_lock); 1033 sysfilt_ops[~filt].for_refcnt++; 1034 if (sysfilt_ops[~filt].for_fop == NULL) 1035 sysfilt_ops[~filt].for_fop = &null_filtops; 1036 mtx_unlock(&filterops_lock); 1037 1038 return sysfilt_ops[~filt].for_fop; 1039} 1040 1041static void 1042kqueue_fo_release(int filt) 1043{ 1044 1045 if (filt > 0 || filt + EVFILT_SYSCOUNT < 0) 1046 return; 1047 1048 mtx_lock(&filterops_lock); 1049 KASSERT(sysfilt_ops[~filt].for_refcnt > 0, 1050 ("filter object refcount not valid on release")); 1051 sysfilt_ops[~filt].for_refcnt--; 1052 mtx_unlock(&filterops_lock); 1053} 1054 1055/* 1056 * A ref to kq (obtained via kqueue_acquire) must be held. waitok will 1057 * influence if memory allocation should wait. Make sure it is 0 if you 1058 * hold any mutexes. 1059 */ 1060static int 1061kqueue_register(struct kqueue *kq, struct kevent *kev, struct thread *td, int waitok) 1062{ 1063 struct filterops *fops; 1064 struct file *fp; 1065 struct knote *kn, *tkn; 1066 cap_rights_t rights; 1067 int error, filt, event; 1068 int haskqglobal, filedesc_unlock; 1069 1070 fp = NULL; 1071 kn = NULL; 1072 error = 0; 1073 haskqglobal = 0; 1074 filedesc_unlock = 0; 1075 1076 filt = kev->filter; 1077 fops = kqueue_fo_find(filt); 1078 if (fops == NULL) 1079 return EINVAL; 1080 1081 tkn = knote_alloc(waitok); /* prevent waiting with locks */ 1082 1083findkn: 1084 if (fops->f_isfd) { 1085 KASSERT(td != NULL, ("td is NULL")); 1086 error = fget(td, kev->ident, 1087 cap_rights_init(&rights, CAP_EVENT), &fp); 1088 if (error) 1089 goto done; 1090 1091 if ((kev->flags & EV_ADD) == EV_ADD && kqueue_expand(kq, fops, 1092 kev->ident, 0) != 0) { 1093 /* try again */ 1094 fdrop(fp, td); 1095 fp = NULL; 1096 error = kqueue_expand(kq, fops, kev->ident, waitok); 1097 if (error) 1098 goto done; 1099 goto findkn; 1100 } 1101 1102 if (fp->f_type == DTYPE_KQUEUE) { 1103 /* 1104 * If we add some intelligence about what we are doing, 1105 * we should be able to support events on ourselves. 1106 * We need to know when we are doing this to prevent 1107 * getting both the knlist lock and the kq lock since 1108 * they are the same thing. 1109 */ 1110 if (fp->f_data == kq) { 1111 error = EINVAL; 1112 goto done; 1113 } 1114 1115 /* 1116 * Pre-lock the filedesc before the global 1117 * lock mutex, see the comment in 1118 * kqueue_close(). 1119 */ 1120 FILEDESC_XLOCK(td->td_proc->p_fd); 1121 filedesc_unlock = 1; 1122 KQ_GLOBAL_LOCK(&kq_global, haskqglobal); 1123 } 1124 1125 KQ_LOCK(kq); 1126 if (kev->ident < kq->kq_knlistsize) { 1127 SLIST_FOREACH(kn, &kq->kq_knlist[kev->ident], kn_link) 1128 if (kev->filter == kn->kn_filter) 1129 break; 1130 } 1131 } else { 1132 if ((kev->flags & EV_ADD) == EV_ADD) 1133 kqueue_expand(kq, fops, kev->ident, waitok); 1134 1135 KQ_LOCK(kq); 1136 1137 /* 1138 * If possible, find an existing knote to use for this kevent. 1139 */ 1140 if (kev->filter == EVFILT_PROC && 1141 (kev->flags & (EV_FLAG1 | EV_FLAG2)) != 0) { 1142 /* This is an internal creation of a process tracking 1143 * note. Don't attempt to coalesce this with an 1144 * existing note. 1145 */ 1146 ; 1147 } else if (kq->kq_knhashmask != 0) { 1148 struct klist *list; 1149 1150 list = &kq->kq_knhash[ 1151 KN_HASH((u_long)kev->ident, kq->kq_knhashmask)]; 1152 SLIST_FOREACH(kn, list, kn_link) 1153 if (kev->ident == kn->kn_id && 1154 kev->filter == kn->kn_filter) 1155 break; 1156 } 1157 } 1158 1159 /* knote is in the process of changing, wait for it to stabilize. */ 1160 if (kn != NULL && (kn->kn_status & KN_INFLUX) == KN_INFLUX) { 1161 KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal); 1162 if (filedesc_unlock) { 1163 FILEDESC_XUNLOCK(td->td_proc->p_fd); 1164 filedesc_unlock = 0; 1165 } 1166 kq->kq_state |= KQ_FLUXWAIT; 1167 msleep(kq, &kq->kq_lock, PSOCK | PDROP, "kqflxwt", 0); 1168 if (fp != NULL) { 1169 fdrop(fp, td); 1170 fp = NULL; 1171 } 1172 goto findkn; 1173 } 1174 1175 /* 1176 * kn now contains the matching knote, or NULL if no match 1177 */ 1178 if (kn == NULL) { 1179 if (kev->flags & EV_ADD) { 1180 kn = tkn; 1181 tkn = NULL; 1182 if (kn == NULL) { 1183 KQ_UNLOCK(kq); 1184 error = ENOMEM; 1185 goto done; 1186 } 1187 kn->kn_fp = fp; 1188 kn->kn_kq = kq; 1189 kn->kn_fop = fops; 1190 /* 1191 * apply reference counts to knote structure, and 1192 * do not release it at the end of this routine. 1193 */ 1194 fops = NULL; 1195 fp = NULL; 1196 1197 kn->kn_sfflags = kev->fflags; 1198 kn->kn_sdata = kev->data; 1199 kev->fflags = 0; 1200 kev->data = 0; 1201 kn->kn_kevent = *kev; 1202 kn->kn_kevent.flags &= ~(EV_ADD | EV_DELETE | 1203 EV_ENABLE | EV_DISABLE); 1204 kn->kn_status = KN_INFLUX|KN_DETACHED; 1205 1206 error = knote_attach(kn, kq); 1207 KQ_UNLOCK(kq); 1208 if (error != 0) { 1209 tkn = kn; 1210 goto done; 1211 } 1212 1213 if ((error = kn->kn_fop->f_attach(kn)) != 0) { 1214 knote_drop(kn, td); 1215 goto done; 1216 } 1217 KN_LIST_LOCK(kn); 1218 goto done_ev_add; 1219 } else { 1220 /* No matching knote and the EV_ADD flag is not set. */ 1221 KQ_UNLOCK(kq); 1222 error = ENOENT; 1223 goto done; 1224 } 1225 } 1226 1227 if (kev->flags & EV_DELETE) { 1228 kn->kn_status |= KN_INFLUX; 1229 KQ_UNLOCK(kq); 1230 if (!(kn->kn_status & KN_DETACHED)) 1231 kn->kn_fop->f_detach(kn); 1232 knote_drop(kn, td); 1233 goto done; 1234 } 1235 1236 /* 1237 * The user may change some filter values after the initial EV_ADD, 1238 * but doing so will not reset any filter which has already been 1239 * triggered. 1240 */ 1241 kn->kn_status |= KN_INFLUX | KN_SCAN; 1242 KQ_UNLOCK(kq); 1243 KN_LIST_LOCK(kn); 1244 kn->kn_kevent.udata = kev->udata; 1245 if (!fops->f_isfd && fops->f_touch != NULL) { 1246 fops->f_touch(kn, kev, EVENT_REGISTER); 1247 } else { 1248 kn->kn_sfflags = kev->fflags; 1249 kn->kn_sdata = kev->data; 1250 } 1251 1252 /* 1253 * We can get here with kn->kn_knlist == NULL. This can happen when 1254 * the initial attach event decides that the event is "completed" 1255 * already. i.e. filt_procattach is called on a zombie process. It 1256 * will call filt_proc which will remove it from the list, and NULL 1257 * kn_knlist. 1258 */ 1259done_ev_add: 1260 event = kn->kn_fop->f_event(kn, 0); 1261 KQ_LOCK(kq); 1262 if (event) 1263 KNOTE_ACTIVATE(kn, 1); 1264 kn->kn_status &= ~(KN_INFLUX | KN_SCAN); 1265 KN_LIST_UNLOCK(kn); 1266 1267 if ((kev->flags & EV_DISABLE) && 1268 ((kn->kn_status & KN_DISABLED) == 0)) { 1269 kn->kn_status |= KN_DISABLED; 1270 } 1271 1272 if ((kev->flags & EV_ENABLE) && (kn->kn_status & KN_DISABLED)) { 1273 kn->kn_status &= ~KN_DISABLED; 1274 if ((kn->kn_status & KN_ACTIVE) && 1275 ((kn->kn_status & KN_QUEUED) == 0)) 1276 knote_enqueue(kn); 1277 } 1278 KQ_UNLOCK_FLUX(kq); 1279 1280done: 1281 KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal); 1282 if (filedesc_unlock) 1283 FILEDESC_XUNLOCK(td->td_proc->p_fd); 1284 if (fp != NULL) 1285 fdrop(fp, td); 1286 if (tkn != NULL) 1287 knote_free(tkn); 1288 if (fops != NULL) 1289 kqueue_fo_release(filt); 1290 return (error); 1291} 1292 1293static int 1294kqueue_acquire(struct file *fp, struct kqueue **kqp) 1295{ 1296 int error; 1297 struct kqueue *kq; 1298 1299 error = 0; 1300 1301 kq = fp->f_data; 1302 if (fp->f_type != DTYPE_KQUEUE || kq == NULL) 1303 return (EBADF); 1304 *kqp = kq; 1305 KQ_LOCK(kq); 1306 if ((kq->kq_state & KQ_CLOSING) == KQ_CLOSING) { 1307 KQ_UNLOCK(kq); 1308 return (EBADF); 1309 } 1310 kq->kq_refcnt++; 1311 KQ_UNLOCK(kq); 1312 1313 return error; 1314} 1315 1316static void 1317kqueue_release(struct kqueue *kq, int locked) 1318{ 1319 if (locked) 1320 KQ_OWNED(kq); 1321 else 1322 KQ_LOCK(kq); 1323 kq->kq_refcnt--; 1324 if (kq->kq_refcnt == 1) 1325 wakeup(&kq->kq_refcnt); 1326 if (!locked) 1327 KQ_UNLOCK(kq); 1328} 1329 1330static void 1331kqueue_schedtask(struct kqueue *kq) 1332{ 1333 1334 KQ_OWNED(kq); 1335 KASSERT(((kq->kq_state & KQ_TASKDRAIN) != KQ_TASKDRAIN), 1336 ("scheduling kqueue task while draining")); 1337 1338 if ((kq->kq_state & KQ_TASKSCHED) != KQ_TASKSCHED) { 1339 taskqueue_enqueue(taskqueue_kqueue, &kq->kq_task); 1340 kq->kq_state |= KQ_TASKSCHED; 1341 } 1342} 1343 1344/* 1345 * Expand the kq to make sure we have storage for fops/ident pair. 1346 * 1347 * Return 0 on success (or no work necessary), return errno on failure. 1348 * 1349 * Not calling hashinit w/ waitok (proper malloc flag) should be safe. 1350 * If kqueue_register is called from a non-fd context, there usually/should 1351 * be no locks held. 1352 */ 1353static int 1354kqueue_expand(struct kqueue *kq, struct filterops *fops, uintptr_t ident, 1355 int waitok) 1356{ 1357 struct klist *list, *tmp_knhash, *to_free; 1358 u_long tmp_knhashmask; 1359 int size; 1360 int fd; 1361 int mflag = waitok ? M_WAITOK : M_NOWAIT; 1362 1363 KQ_NOTOWNED(kq); 1364 1365 to_free = NULL; 1366 if (fops->f_isfd) { 1367 fd = ident; 1368 if (kq->kq_knlistsize <= fd) { 1369 size = kq->kq_knlistsize; 1370 while (size <= fd) 1371 size += KQEXTENT; 1372 list = malloc(size * sizeof(*list), M_KQUEUE, mflag); 1373 if (list == NULL) 1374 return ENOMEM; 1375 KQ_LOCK(kq); 1376 if (kq->kq_knlistsize > fd) { 1377 to_free = list; 1378 list = NULL; 1379 } else { 1380 if (kq->kq_knlist != NULL) { 1381 bcopy(kq->kq_knlist, list, 1382 kq->kq_knlistsize * sizeof(*list)); 1383 to_free = kq->kq_knlist; 1384 kq->kq_knlist = NULL; 1385 } 1386 bzero((caddr_t)list + 1387 kq->kq_knlistsize * sizeof(*list), 1388 (size - kq->kq_knlistsize) * sizeof(*list)); 1389 kq->kq_knlistsize = size; 1390 kq->kq_knlist = list; 1391 } 1392 KQ_UNLOCK(kq); 1393 } 1394 } else { 1395 if (kq->kq_knhashmask == 0) { 1396 tmp_knhash = hashinit(KN_HASHSIZE, M_KQUEUE, 1397 &tmp_knhashmask); 1398 if (tmp_knhash == NULL) 1399 return ENOMEM; 1400 KQ_LOCK(kq); 1401 if (kq->kq_knhashmask == 0) { 1402 kq->kq_knhash = tmp_knhash; 1403 kq->kq_knhashmask = tmp_knhashmask; 1404 } else { 1405 to_free = tmp_knhash; 1406 } 1407 KQ_UNLOCK(kq); 1408 } 1409 } 1410 free(to_free, M_KQUEUE); 1411 1412 KQ_NOTOWNED(kq); 1413 return 0; 1414} 1415 1416static void 1417kqueue_task(void *arg, int pending) 1418{ 1419 struct kqueue *kq; 1420 int haskqglobal; 1421 1422 haskqglobal = 0; 1423 kq = arg; 1424 1425 KQ_GLOBAL_LOCK(&kq_global, haskqglobal); 1426 KQ_LOCK(kq); 1427 1428 KNOTE_LOCKED(&kq->kq_sel.si_note, 0); 1429 1430 kq->kq_state &= ~KQ_TASKSCHED; 1431 if ((kq->kq_state & KQ_TASKDRAIN) == KQ_TASKDRAIN) { 1432 wakeup(&kq->kq_state); 1433 } 1434 KQ_UNLOCK(kq); 1435 KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal); 1436} 1437 1438/* 1439 * Scan, update kn_data (if not ONESHOT), and copyout triggered events. 1440 * We treat KN_MARKER knotes as if they are INFLUX. 1441 */ 1442static int 1443kqueue_scan(struct kqueue *kq, int maxevents, struct kevent_copyops *k_ops, 1444 const struct timespec *tsp, struct kevent *keva, struct thread *td) 1445{ 1446 struct kevent *kevp; 1447 struct knote *kn, *marker; 1448 sbintime_t asbt, rsbt; 1449 int count, error, haskqglobal, influx, nkev, touch; 1450 1451 count = maxevents; 1452 nkev = 0; 1453 error = 0; 1454 haskqglobal = 0; 1455 1456 if (maxevents == 0) 1457 goto done_nl; 1458 1459 rsbt = 0; 1460 if (tsp != NULL) { 1461 if (tsp->tv_sec < 0 || tsp->tv_nsec < 0 || 1462 tsp->tv_nsec >= 1000000000) { 1463 error = EINVAL; 1464 goto done_nl; 1465 } 1466 if (timespecisset(tsp)) { 1467 if (tsp->tv_sec <= INT32_MAX) { 1468 rsbt = tstosbt(*tsp); 1469 if (TIMESEL(&asbt, rsbt)) 1470 asbt += tc_tick_sbt; 1471 if (asbt <= INT64_MAX - rsbt) 1472 asbt += rsbt; 1473 else 1474 asbt = 0; 1475 rsbt >>= tc_precexp; 1476 } else 1477 asbt = 0; 1478 } else 1479 asbt = -1; 1480 } else 1481 asbt = 0; 1482 marker = knote_alloc(1); 1483 if (marker == NULL) { 1484 error = ENOMEM; 1485 goto done_nl; 1486 } 1487 marker->kn_status = KN_MARKER; 1488 KQ_LOCK(kq); 1489 1490retry: 1491 kevp = keva; 1492 if (kq->kq_count == 0) { 1493 if (asbt == -1) { 1494 error = EWOULDBLOCK; 1495 } else { 1496 kq->kq_state |= KQ_SLEEP; 1497 error = msleep_sbt(kq, &kq->kq_lock, PSOCK | PCATCH, 1498 "kqread", asbt, rsbt, C_ABSOLUTE); 1499 } 1500 if (error == 0) 1501 goto retry; 1502 /* don't restart after signals... */ 1503 if (error == ERESTART) 1504 error = EINTR; 1505 else if (error == EWOULDBLOCK) 1506 error = 0; 1507 goto done; 1508 } 1509 1510 TAILQ_INSERT_TAIL(&kq->kq_head, marker, kn_tqe); 1511 influx = 0; 1512 while (count) { 1513 KQ_OWNED(kq); 1514 kn = TAILQ_FIRST(&kq->kq_head); 1515 1516 if ((kn->kn_status == KN_MARKER && kn != marker) || 1517 (kn->kn_status & KN_INFLUX) == KN_INFLUX) { 1518 if (influx) { 1519 influx = 0; 1520 KQ_FLUX_WAKEUP(kq); 1521 } 1522 kq->kq_state |= KQ_FLUXWAIT; 1523 error = msleep(kq, &kq->kq_lock, PSOCK, 1524 "kqflxwt", 0); 1525 continue; 1526 } 1527 1528 TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe); 1529 if ((kn->kn_status & KN_DISABLED) == KN_DISABLED) { 1530 kn->kn_status &= ~KN_QUEUED; 1531 kq->kq_count--; 1532 continue; 1533 } 1534 if (kn == marker) { 1535 KQ_FLUX_WAKEUP(kq); 1536 if (count == maxevents) 1537 goto retry; 1538 goto done; 1539 } 1540 KASSERT((kn->kn_status & KN_INFLUX) == 0, 1541 ("KN_INFLUX set when not suppose to be")); 1542 1543 if ((kn->kn_flags & EV_DROP) == EV_DROP) { 1544 kn->kn_status &= ~KN_QUEUED; 1545 kn->kn_status |= KN_INFLUX; 1546 kq->kq_count--; 1547 KQ_UNLOCK(kq); 1548 /* 1549 * We don't need to lock the list since we've marked 1550 * it _INFLUX. 1551 */ 1552 if (!(kn->kn_status & KN_DETACHED)) 1553 kn->kn_fop->f_detach(kn); 1554 knote_drop(kn, td); 1555 KQ_LOCK(kq); 1556 continue; 1557 } else if ((kn->kn_flags & EV_ONESHOT) == EV_ONESHOT) { 1558 kn->kn_status &= ~KN_QUEUED; 1559 kn->kn_status |= KN_INFLUX; 1560 kq->kq_count--; 1561 KQ_UNLOCK(kq); 1562 /* 1563 * We don't need to lock the list since we've marked 1564 * it _INFLUX. 1565 */ 1566 *kevp = kn->kn_kevent; 1567 if (!(kn->kn_status & KN_DETACHED)) 1568 kn->kn_fop->f_detach(kn); 1569 knote_drop(kn, td); 1570 KQ_LOCK(kq); 1571 kn = NULL; 1572 } else { 1573 kn->kn_status |= KN_INFLUX | KN_SCAN; 1574 KQ_UNLOCK(kq); 1575 if ((kn->kn_status & KN_KQUEUE) == KN_KQUEUE) 1576 KQ_GLOBAL_LOCK(&kq_global, haskqglobal); 1577 KN_LIST_LOCK(kn); 1578 if (kn->kn_fop->f_event(kn, 0) == 0) { 1579 KQ_LOCK(kq); 1580 KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal); 1581 kn->kn_status &= 1582 ~(KN_QUEUED | KN_ACTIVE | KN_INFLUX | 1583 KN_SCAN); 1584 kq->kq_count--; 1585 KN_LIST_UNLOCK(kn); 1586 influx = 1; 1587 continue; 1588 } 1589 touch = (!kn->kn_fop->f_isfd && 1590 kn->kn_fop->f_touch != NULL); 1591 if (touch) 1592 kn->kn_fop->f_touch(kn, kevp, EVENT_PROCESS); 1593 else 1594 *kevp = kn->kn_kevent; 1595 KQ_LOCK(kq); 1596 KQ_GLOBAL_UNLOCK(&kq_global, haskqglobal); 1597 if (kn->kn_flags & (EV_CLEAR | EV_DISPATCH)) { 1598 /* 1599 * Manually clear knotes who weren't 1600 * 'touch'ed. 1601 */ 1602 if (touch == 0 && kn->kn_flags & EV_CLEAR) { 1603 kn->kn_data = 0; 1604 kn->kn_fflags = 0; 1605 } 1606 if (kn->kn_flags & EV_DISPATCH) 1607 kn->kn_status |= KN_DISABLED; 1608 kn->kn_status &= ~(KN_QUEUED | KN_ACTIVE); 1609 kq->kq_count--; 1610 } else 1611 TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe); 1612 1613 kn->kn_status &= ~(KN_INFLUX | KN_SCAN); 1614 KN_LIST_UNLOCK(kn); 1615 influx = 1; 1616 } 1617 1618 /* we are returning a copy to the user */ 1619 kevp++; 1620 nkev++; 1621 count--; 1622 1623 if (nkev == KQ_NEVENTS) { 1624 influx = 0; 1625 KQ_UNLOCK_FLUX(kq); 1626 error = k_ops->k_copyout(k_ops->arg, keva, nkev); 1627 nkev = 0; 1628 kevp = keva; 1629 KQ_LOCK(kq); 1630 if (error) 1631 break; 1632 } 1633 } 1634 TAILQ_REMOVE(&kq->kq_head, marker, kn_tqe); 1635done: 1636 KQ_OWNED(kq); 1637 KQ_UNLOCK_FLUX(kq); 1638 knote_free(marker); 1639done_nl: 1640 KQ_NOTOWNED(kq); 1641 if (nkev != 0) 1642 error = k_ops->k_copyout(k_ops->arg, keva, nkev); 1643 td->td_retval[0] = maxevents - count; 1644 return (error); 1645} 1646 1647/* 1648 * XXX 1649 * This could be expanded to call kqueue_scan, if desired. 1650 */ 1651/*ARGSUSED*/ 1652static int 1653kqueue_read(struct file *fp, struct uio *uio, struct ucred *active_cred, 1654 int flags, struct thread *td) 1655{ 1656 return (ENXIO); 1657} 1658 1659/*ARGSUSED*/ 1660static int 1661kqueue_write(struct file *fp, struct uio *uio, struct ucred *active_cred, 1662 int flags, struct thread *td) 1663{ 1664 return (ENXIO); 1665} 1666 1667/*ARGSUSED*/ 1668static int 1669kqueue_truncate(struct file *fp, off_t length, struct ucred *active_cred, 1670 struct thread *td) 1671{ 1672 1673 return (EINVAL); 1674} 1675 1676/*ARGSUSED*/ 1677static int 1678kqueue_ioctl(struct file *fp, u_long cmd, void *data, 1679 struct ucred *active_cred, struct thread *td) 1680{ 1681 /* 1682 * Enabling sigio causes two major problems: 1683 * 1) infinite recursion: 1684 * Synopsys: kevent is being used to track signals and have FIOASYNC 1685 * set. On receipt of a signal this will cause a kqueue to recurse 1686 * into itself over and over. Sending the sigio causes the kqueue 1687 * to become ready, which in turn posts sigio again, forever. 1688 * Solution: this can be solved by setting a flag in the kqueue that 1689 * we have a SIGIO in progress. 1690 * 2) locking problems: 1691 * Synopsys: Kqueue is a leaf subsystem, but adding signalling puts 1692 * us above the proc and pgrp locks. 1693 * Solution: Post a signal using an async mechanism, being sure to 1694 * record a generation count in the delivery so that we do not deliver 1695 * a signal to the wrong process. 1696 * 1697 * Note, these two mechanisms are somewhat mutually exclusive! 1698 */ 1699#if 0 1700 struct kqueue *kq; 1701 1702 kq = fp->f_data; 1703 switch (cmd) { 1704 case FIOASYNC: 1705 if (*(int *)data) { 1706 kq->kq_state |= KQ_ASYNC; 1707 } else { 1708 kq->kq_state &= ~KQ_ASYNC; 1709 } 1710 return (0); 1711 1712 case FIOSETOWN: 1713 return (fsetown(*(int *)data, &kq->kq_sigio)); 1714 1715 case FIOGETOWN: 1716 *(int *)data = fgetown(&kq->kq_sigio); 1717 return (0); 1718 } 1719#endif 1720 1721 return (ENOTTY); 1722} 1723 1724/*ARGSUSED*/ 1725static int 1726kqueue_poll(struct file *fp, int events, struct ucred *active_cred, 1727 struct thread *td) 1728{ 1729 struct kqueue *kq; 1730 int revents = 0; 1731 int error; 1732 1733 if ((error = kqueue_acquire(fp, &kq))) 1734 return POLLERR; 1735 1736 KQ_LOCK(kq); 1737 if (events & (POLLIN | POLLRDNORM)) { 1738 if (kq->kq_count) { 1739 revents |= events & (POLLIN | POLLRDNORM); 1740 } else { 1741 selrecord(td, &kq->kq_sel); 1742 if (SEL_WAITING(&kq->kq_sel)) 1743 kq->kq_state |= KQ_SEL; 1744 } 1745 } 1746 kqueue_release(kq, 1); 1747 KQ_UNLOCK(kq); 1748 return (revents); 1749} 1750 1751/*ARGSUSED*/ 1752static int 1753kqueue_stat(struct file *fp, struct stat *st, struct ucred *active_cred, 1754 struct thread *td) 1755{ 1756 1757 bzero((void *)st, sizeof *st); 1758 /* 1759 * We no longer return kq_count because the unlocked value is useless. 1760 * If you spent all this time getting the count, why not spend your 1761 * syscall better by calling kevent? 1762 * 1763 * XXX - This is needed for libc_r. 1764 */ 1765 st->st_mode = S_IFIFO; 1766 return (0); 1767} 1768 1769/*ARGSUSED*/ 1770static int 1771kqueue_close(struct file *fp, struct thread *td) 1772{ 1773 struct kqueue *kq = fp->f_data; 1774 struct filedesc *fdp; 1775 struct knote *kn; 1776 int i; 1777 int error; 1778 int filedesc_unlock; 1779 1780 if ((error = kqueue_acquire(fp, &kq))) 1781 return error; 1782 1783 filedesc_unlock = 0; 1784 KQ_LOCK(kq); 1785 1786 KASSERT((kq->kq_state & KQ_CLOSING) != KQ_CLOSING, 1787 ("kqueue already closing")); 1788 kq->kq_state |= KQ_CLOSING; 1789 if (kq->kq_refcnt > 1) 1790 msleep(&kq->kq_refcnt, &kq->kq_lock, PSOCK, "kqclose", 0); 1791 1792 KASSERT(kq->kq_refcnt == 1, ("other refs are out there!")); 1793 fdp = kq->kq_fdp; 1794 1795 KASSERT(knlist_empty(&kq->kq_sel.si_note), 1796 ("kqueue's knlist not empty")); 1797 1798 for (i = 0; i < kq->kq_knlistsize; i++) { 1799 while ((kn = SLIST_FIRST(&kq->kq_knlist[i])) != NULL) { 1800 if ((kn->kn_status & KN_INFLUX) == KN_INFLUX) { 1801 kq->kq_state |= KQ_FLUXWAIT; 1802 msleep(kq, &kq->kq_lock, PSOCK, "kqclo1", 0); 1803 continue; 1804 } 1805 kn->kn_status |= KN_INFLUX; 1806 KQ_UNLOCK(kq); 1807 if (!(kn->kn_status & KN_DETACHED)) 1808 kn->kn_fop->f_detach(kn); 1809 knote_drop(kn, td); 1810 KQ_LOCK(kq); 1811 } 1812 } 1813 if (kq->kq_knhashmask != 0) { 1814 for (i = 0; i <= kq->kq_knhashmask; i++) { 1815 while ((kn = SLIST_FIRST(&kq->kq_knhash[i])) != NULL) { 1816 if ((kn->kn_status & KN_INFLUX) == KN_INFLUX) { 1817 kq->kq_state |= KQ_FLUXWAIT; 1818 msleep(kq, &kq->kq_lock, PSOCK, 1819 "kqclo2", 0); 1820 continue; 1821 } 1822 kn->kn_status |= KN_INFLUX; 1823 KQ_UNLOCK(kq); 1824 if (!(kn->kn_status & KN_DETACHED)) 1825 kn->kn_fop->f_detach(kn); 1826 knote_drop(kn, td); 1827 KQ_LOCK(kq); 1828 } 1829 } 1830 } 1831 1832 if ((kq->kq_state & KQ_TASKSCHED) == KQ_TASKSCHED) { 1833 kq->kq_state |= KQ_TASKDRAIN; 1834 msleep(&kq->kq_state, &kq->kq_lock, PSOCK, "kqtqdr", 0); 1835 } 1836 1837 if ((kq->kq_state & KQ_SEL) == KQ_SEL) { 1838 selwakeuppri(&kq->kq_sel, PSOCK); 1839 if (!SEL_WAITING(&kq->kq_sel)) 1840 kq->kq_state &= ~KQ_SEL; 1841 } 1842 1843 KQ_UNLOCK(kq); 1844 1845 /* 1846 * We could be called due to the knote_drop() doing fdrop(), 1847 * called from kqueue_register(). In this case the global 1848 * lock is owned, and filedesc sx is locked before, to not 1849 * take the sleepable lock after non-sleepable. 1850 */ 1851 if (!sx_xlocked(FILEDESC_LOCK(fdp))) { 1852 FILEDESC_XLOCK(fdp); 1853 filedesc_unlock = 1; 1854 } else 1855 filedesc_unlock = 0; 1856 TAILQ_REMOVE(&fdp->fd_kqlist, kq, kq_list); 1857 if (filedesc_unlock) 1858 FILEDESC_XUNLOCK(fdp); 1859 1860 seldrain(&kq->kq_sel); 1861 knlist_destroy(&kq->kq_sel.si_note); 1862 mtx_destroy(&kq->kq_lock); 1863 kq->kq_fdp = NULL; 1864 1865 if (kq->kq_knhash != NULL) 1866 free(kq->kq_knhash, M_KQUEUE); 1867 if (kq->kq_knlist != NULL) 1868 free(kq->kq_knlist, M_KQUEUE); 1869 1870 funsetown(&kq->kq_sigio); 1871 free(kq, M_KQUEUE); 1872 fp->f_data = NULL; 1873 1874 return (0); 1875} 1876 1877static void 1878kqueue_wakeup(struct kqueue *kq) 1879{ 1880 KQ_OWNED(kq); 1881 1882 if ((kq->kq_state & KQ_SLEEP) == KQ_SLEEP) { 1883 kq->kq_state &= ~KQ_SLEEP; 1884 wakeup(kq); 1885 } 1886 if ((kq->kq_state & KQ_SEL) == KQ_SEL) { 1887 selwakeuppri(&kq->kq_sel, PSOCK); 1888 if (!SEL_WAITING(&kq->kq_sel)) 1889 kq->kq_state &= ~KQ_SEL; 1890 } 1891 if (!knlist_empty(&kq->kq_sel.si_note)) 1892 kqueue_schedtask(kq); 1893 if ((kq->kq_state & KQ_ASYNC) == KQ_ASYNC) { 1894 pgsigio(&kq->kq_sigio, SIGIO, 0); 1895 } 1896} 1897 1898/* 1899 * Walk down a list of knotes, activating them if their event has triggered. 1900 * 1901 * There is a possibility to optimize in the case of one kq watching another. 1902 * Instead of scheduling a task to wake it up, you could pass enough state 1903 * down the chain to make up the parent kqueue. Make this code functional 1904 * first. 1905 */ 1906void 1907knote(struct knlist *list, long hint, int lockflags) 1908{ 1909 struct kqueue *kq; 1910 struct knote *kn, *tkn; 1911 int error; 1912 1913 if (list == NULL) 1914 return; 1915 1916 KNL_ASSERT_LOCK(list, lockflags & KNF_LISTLOCKED); 1917 1918 if ((lockflags & KNF_LISTLOCKED) == 0) 1919 list->kl_lock(list->kl_lockarg); 1920 1921 /* 1922 * If we unlock the list lock (and set KN_INFLUX), we can 1923 * eliminate the kqueue scheduling, but this will introduce 1924 * four lock/unlock's for each knote to test. Also, marker 1925 * would be needed to keep iteration position, since filters 1926 * or other threads could remove events. 1927 */ 1928 SLIST_FOREACH_SAFE(kn, &list->kl_list, kn_selnext, tkn) { 1929 kq = kn->kn_kq; 1930 KQ_LOCK(kq); 1931 if ((kn->kn_status & (KN_INFLUX | KN_SCAN)) == KN_INFLUX) { 1932 /* 1933 * Do not process the influx notes, except for 1934 * the influx coming from the kq unlock in the 1935 * kqueue_scan(). In the later case, we do 1936 * not interfere with the scan, since the code 1937 * fragment in kqueue_scan() locks the knlist, 1938 * and cannot proceed until we finished. 1939 */ 1940 KQ_UNLOCK(kq); 1941 } else if ((lockflags & KNF_NOKQLOCK) != 0) { 1942 kn->kn_status |= KN_INFLUX; 1943 KQ_UNLOCK(kq); 1944 error = kn->kn_fop->f_event(kn, hint); 1945 KQ_LOCK(kq); 1946 kn->kn_status &= ~KN_INFLUX; 1947 if (error) 1948 KNOTE_ACTIVATE(kn, 1); 1949 KQ_UNLOCK_FLUX(kq); 1950 } else { 1951 kn->kn_status |= KN_HASKQLOCK; 1952 if (kn->kn_fop->f_event(kn, hint)) 1953 KNOTE_ACTIVATE(kn, 1); 1954 kn->kn_status &= ~KN_HASKQLOCK; 1955 KQ_UNLOCK(kq); 1956 } 1957 } 1958 if ((lockflags & KNF_LISTLOCKED) == 0) 1959 list->kl_unlock(list->kl_lockarg); 1960} 1961 1962/* 1963 * add a knote to a knlist 1964 */ 1965void 1966knlist_add(struct knlist *knl, struct knote *kn, int islocked) 1967{ 1968 KNL_ASSERT_LOCK(knl, islocked); 1969 KQ_NOTOWNED(kn->kn_kq); 1970 KASSERT((kn->kn_status & (KN_INFLUX|KN_DETACHED)) == 1971 (KN_INFLUX|KN_DETACHED), ("knote not KN_INFLUX and KN_DETACHED")); 1972 if (!islocked) 1973 knl->kl_lock(knl->kl_lockarg); 1974 SLIST_INSERT_HEAD(&knl->kl_list, kn, kn_selnext); 1975 if (!islocked) 1976 knl->kl_unlock(knl->kl_lockarg); 1977 KQ_LOCK(kn->kn_kq); 1978 kn->kn_knlist = knl; 1979 kn->kn_status &= ~KN_DETACHED; 1980 KQ_UNLOCK(kn->kn_kq); 1981} 1982 1983static void 1984knlist_remove_kq(struct knlist *knl, struct knote *kn, int knlislocked, int kqislocked) 1985{ 1986 KASSERT(!(!!kqislocked && !knlislocked), ("kq locked w/o knl locked")); 1987 KNL_ASSERT_LOCK(knl, knlislocked); 1988 mtx_assert(&kn->kn_kq->kq_lock, kqislocked ? MA_OWNED : MA_NOTOWNED); 1989 if (!kqislocked) 1990 KASSERT((kn->kn_status & (KN_INFLUX|KN_DETACHED)) == KN_INFLUX, 1991 ("knlist_remove called w/o knote being KN_INFLUX or already removed")); 1992 if (!knlislocked) 1993 knl->kl_lock(knl->kl_lockarg); 1994 SLIST_REMOVE(&knl->kl_list, kn, knote, kn_selnext); 1995 kn->kn_knlist = NULL; 1996 if (!knlislocked) 1997 knl->kl_unlock(knl->kl_lockarg); 1998 if (!kqislocked) 1999 KQ_LOCK(kn->kn_kq); 2000 kn->kn_status |= KN_DETACHED; 2001 if (!kqislocked) 2002 KQ_UNLOCK(kn->kn_kq); 2003} 2004 2005/* 2006 * remove knote from the specified knlist 2007 */ 2008void 2009knlist_remove(struct knlist *knl, struct knote *kn, int islocked) 2010{ 2011 2012 knlist_remove_kq(knl, kn, islocked, 0); 2013} 2014 2015/* 2016 * remove knote from the specified knlist while in f_event handler. 2017 */ 2018void 2019knlist_remove_inevent(struct knlist *knl, struct knote *kn) 2020{ 2021 2022 knlist_remove_kq(knl, kn, 1, 2023 (kn->kn_status & KN_HASKQLOCK) == KN_HASKQLOCK); 2024} 2025 2026int 2027knlist_empty(struct knlist *knl) 2028{ 2029 2030 KNL_ASSERT_LOCKED(knl); 2031 return SLIST_EMPTY(&knl->kl_list); 2032} 2033 2034static struct mtx knlist_lock; 2035MTX_SYSINIT(knlist_lock, &knlist_lock, "knlist lock for lockless objects", 2036 MTX_DEF); 2037static void knlist_mtx_lock(void *arg); 2038static void knlist_mtx_unlock(void *arg); 2039 2040static void 2041knlist_mtx_lock(void *arg) 2042{ 2043 2044 mtx_lock((struct mtx *)arg); 2045} 2046 2047static void 2048knlist_mtx_unlock(void *arg) 2049{ 2050 2051 mtx_unlock((struct mtx *)arg); 2052} 2053 2054static void 2055knlist_mtx_assert_locked(void *arg) 2056{ 2057 2058 mtx_assert((struct mtx *)arg, MA_OWNED); 2059} 2060 2061static void 2062knlist_mtx_assert_unlocked(void *arg) 2063{ 2064 2065 mtx_assert((struct mtx *)arg, MA_NOTOWNED); 2066} 2067 2068static void 2069knlist_rw_rlock(void *arg) 2070{ 2071 2072 rw_rlock((struct rwlock *)arg); 2073} 2074 2075static void 2076knlist_rw_runlock(void *arg) 2077{ 2078 2079 rw_runlock((struct rwlock *)arg); 2080} 2081 2082static void 2083knlist_rw_assert_locked(void *arg) 2084{ 2085 2086 rw_assert((struct rwlock *)arg, RA_LOCKED); 2087} 2088 2089static void 2090knlist_rw_assert_unlocked(void *arg) 2091{ 2092 2093 rw_assert((struct rwlock *)arg, RA_UNLOCKED); 2094} 2095 2096void 2097knlist_init(struct knlist *knl, void *lock, void (*kl_lock)(void *), 2098 void (*kl_unlock)(void *), 2099 void (*kl_assert_locked)(void *), void (*kl_assert_unlocked)(void *)) 2100{ 2101 2102 if (lock == NULL) 2103 knl->kl_lockarg = &knlist_lock; 2104 else 2105 knl->kl_lockarg = lock; 2106 2107 if (kl_lock == NULL) 2108 knl->kl_lock = knlist_mtx_lock; 2109 else 2110 knl->kl_lock = kl_lock; 2111 if (kl_unlock == NULL) 2112 knl->kl_unlock = knlist_mtx_unlock; 2113 else 2114 knl->kl_unlock = kl_unlock; 2115 if (kl_assert_locked == NULL) 2116 knl->kl_assert_locked = knlist_mtx_assert_locked; 2117 else 2118 knl->kl_assert_locked = kl_assert_locked; 2119 if (kl_assert_unlocked == NULL) 2120 knl->kl_assert_unlocked = knlist_mtx_assert_unlocked; 2121 else 2122 knl->kl_assert_unlocked = kl_assert_unlocked; 2123 2124 SLIST_INIT(&knl->kl_list); 2125} 2126 2127void 2128knlist_init_mtx(struct knlist *knl, struct mtx *lock) 2129{ 2130 2131 knlist_init(knl, lock, NULL, NULL, NULL, NULL); 2132} 2133 2134void 2135knlist_init_rw_reader(struct knlist *knl, struct rwlock *lock) 2136{ 2137 2138 knlist_init(knl, lock, knlist_rw_rlock, knlist_rw_runlock, 2139 knlist_rw_assert_locked, knlist_rw_assert_unlocked); 2140} 2141 2142void 2143knlist_destroy(struct knlist *knl) 2144{ 2145 2146#ifdef INVARIANTS 2147 /* 2148 * if we run across this error, we need to find the offending 2149 * driver and have it call knlist_clear or knlist_delete. 2150 */ 2151 if (!SLIST_EMPTY(&knl->kl_list)) 2152 printf("WARNING: destroying knlist w/ knotes on it!\n"); 2153#endif 2154 2155 knl->kl_lockarg = knl->kl_lock = knl->kl_unlock = NULL; 2156 SLIST_INIT(&knl->kl_list); 2157} 2158 2159/* 2160 * Even if we are locked, we may need to drop the lock to allow any influx 2161 * knotes time to "settle". 2162 */ 2163void 2164knlist_cleardel(struct knlist *knl, struct thread *td, int islocked, int killkn) 2165{ 2166 struct knote *kn, *kn2; 2167 struct kqueue *kq; 2168 2169 if (islocked) 2170 KNL_ASSERT_LOCKED(knl); 2171 else { 2172 KNL_ASSERT_UNLOCKED(knl); 2173again: /* need to reacquire lock since we have dropped it */ 2174 knl->kl_lock(knl->kl_lockarg); 2175 } 2176 2177 SLIST_FOREACH_SAFE(kn, &knl->kl_list, kn_selnext, kn2) { 2178 kq = kn->kn_kq; 2179 KQ_LOCK(kq); 2180 if ((kn->kn_status & KN_INFLUX)) { 2181 KQ_UNLOCK(kq); 2182 continue; 2183 } 2184 knlist_remove_kq(knl, kn, 1, 1); 2185 if (killkn) { 2186 kn->kn_status |= KN_INFLUX | KN_DETACHED; 2187 KQ_UNLOCK(kq); 2188 knote_drop(kn, td); 2189 } else { 2190 /* Make sure cleared knotes disappear soon */ 2191 kn->kn_flags |= (EV_EOF | EV_ONESHOT); 2192 KQ_UNLOCK(kq); 2193 } 2194 kq = NULL; 2195 } 2196 2197 if (!SLIST_EMPTY(&knl->kl_list)) { 2198 /* there are still KN_INFLUX remaining */ 2199 kn = SLIST_FIRST(&knl->kl_list); 2200 kq = kn->kn_kq; 2201 KQ_LOCK(kq); 2202 KASSERT(kn->kn_status & KN_INFLUX, 2203 ("knote removed w/o list lock")); 2204 knl->kl_unlock(knl->kl_lockarg); 2205 kq->kq_state |= KQ_FLUXWAIT; 2206 msleep(kq, &kq->kq_lock, PSOCK | PDROP, "kqkclr", 0); 2207 kq = NULL; 2208 goto again; 2209 } 2210 2211 if (islocked) 2212 KNL_ASSERT_LOCKED(knl); 2213 else { 2214 knl->kl_unlock(knl->kl_lockarg); 2215 KNL_ASSERT_UNLOCKED(knl); 2216 } 2217} 2218 2219/* 2220 * Remove all knotes referencing a specified fd must be called with FILEDESC 2221 * lock. This prevents a race where a new fd comes along and occupies the 2222 * entry and we attach a knote to the fd. 2223 */ 2224void 2225knote_fdclose(struct thread *td, int fd) 2226{ 2227 struct filedesc *fdp = td->td_proc->p_fd; 2228 struct kqueue *kq; 2229 struct knote *kn; 2230 int influx; 2231 2232 FILEDESC_XLOCK_ASSERT(fdp); 2233 2234 /* 2235 * We shouldn't have to worry about new kevents appearing on fd 2236 * since filedesc is locked. 2237 */ 2238 TAILQ_FOREACH(kq, &fdp->fd_kqlist, kq_list) { 2239 KQ_LOCK(kq); 2240 2241again: 2242 influx = 0; 2243 while (kq->kq_knlistsize > fd && 2244 (kn = SLIST_FIRST(&kq->kq_knlist[fd])) != NULL) { 2245 if (kn->kn_status & KN_INFLUX) { 2246 /* someone else might be waiting on our knote */ 2247 if (influx) 2248 wakeup(kq); 2249 kq->kq_state |= KQ_FLUXWAIT; 2250 msleep(kq, &kq->kq_lock, PSOCK, "kqflxwt", 0); 2251 goto again; 2252 } 2253 kn->kn_status |= KN_INFLUX; 2254 KQ_UNLOCK(kq); 2255 if (!(kn->kn_status & KN_DETACHED)) 2256 kn->kn_fop->f_detach(kn); 2257 knote_drop(kn, td); 2258 influx = 1; 2259 KQ_LOCK(kq); 2260 } 2261 KQ_UNLOCK_FLUX(kq); 2262 } 2263} 2264 2265static int 2266knote_attach(struct knote *kn, struct kqueue *kq) 2267{ 2268 struct klist *list; 2269 2270 KASSERT(kn->kn_status & KN_INFLUX, ("knote not marked INFLUX")); 2271 KQ_OWNED(kq); 2272 2273 if (kn->kn_fop->f_isfd) { 2274 if (kn->kn_id >= kq->kq_knlistsize) 2275 return ENOMEM; 2276 list = &kq->kq_knlist[kn->kn_id]; 2277 } else { 2278 if (kq->kq_knhash == NULL) 2279 return ENOMEM; 2280 list = &kq->kq_knhash[KN_HASH(kn->kn_id, kq->kq_knhashmask)]; 2281 } 2282 2283 SLIST_INSERT_HEAD(list, kn, kn_link); 2284 2285 return 0; 2286} 2287 2288/* 2289 * knote must already have been detached using the f_detach method. 2290 * no lock need to be held, it is assumed that the KN_INFLUX flag is set 2291 * to prevent other removal. 2292 */ 2293static void 2294knote_drop(struct knote *kn, struct thread *td) 2295{ 2296 struct kqueue *kq; 2297 struct klist *list; 2298 2299 kq = kn->kn_kq; 2300 2301 KQ_NOTOWNED(kq); 2302 KASSERT((kn->kn_status & KN_INFLUX) == KN_INFLUX, 2303 ("knote_drop called without KN_INFLUX set in kn_status")); 2304 2305 KQ_LOCK(kq); 2306 if (kn->kn_fop->f_isfd) 2307 list = &kq->kq_knlist[kn->kn_id]; 2308 else 2309 list = &kq->kq_knhash[KN_HASH(kn->kn_id, kq->kq_knhashmask)]; 2310 2311 if (!SLIST_EMPTY(list)) 2312 SLIST_REMOVE(list, kn, knote, kn_link); 2313 if (kn->kn_status & KN_QUEUED) 2314 knote_dequeue(kn); 2315 KQ_UNLOCK_FLUX(kq); 2316 2317 if (kn->kn_fop->f_isfd) { 2318 fdrop(kn->kn_fp, td); 2319 kn->kn_fp = NULL; 2320 } 2321 kqueue_fo_release(kn->kn_kevent.filter); 2322 kn->kn_fop = NULL; 2323 knote_free(kn); 2324} 2325 2326static void 2327knote_enqueue(struct knote *kn) 2328{ 2329 struct kqueue *kq = kn->kn_kq; 2330 2331 KQ_OWNED(kn->kn_kq); 2332 KASSERT((kn->kn_status & KN_QUEUED) == 0, ("knote already queued")); 2333 2334 TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe); 2335 kn->kn_status |= KN_QUEUED; 2336 kq->kq_count++; 2337 kqueue_wakeup(kq); 2338} 2339 2340static void 2341knote_dequeue(struct knote *kn) 2342{ 2343 struct kqueue *kq = kn->kn_kq; 2344 2345 KQ_OWNED(kn->kn_kq); 2346 KASSERT(kn->kn_status & KN_QUEUED, ("knote not queued")); 2347 2348 TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe); 2349 kn->kn_status &= ~KN_QUEUED; 2350 kq->kq_count--; 2351} 2352 2353static void 2354knote_init(void) 2355{ 2356 2357 knote_zone = uma_zcreate("KNOTE", sizeof(struct knote), NULL, NULL, 2358 NULL, NULL, UMA_ALIGN_PTR, 0); 2359} 2360SYSINIT(knote, SI_SUB_PSEUDO, SI_ORDER_ANY, knote_init, NULL); 2361 2362static struct knote * 2363knote_alloc(int waitok) 2364{ 2365 return ((struct knote *)uma_zalloc(knote_zone, 2366 (waitok ? M_WAITOK : M_NOWAIT)|M_ZERO)); 2367} 2368 2369static void 2370knote_free(struct knote *kn) 2371{ 2372 if (kn != NULL) 2373 uma_zfree(knote_zone, kn); 2374} 2375 2376/* 2377 * Register the kev w/ the kq specified by fd. 2378 */ 2379int 2380kqfd_register(int fd, struct kevent *kev, struct thread *td, int waitok) 2381{ 2382 struct kqueue *kq; 2383 struct file *fp; 2384 cap_rights_t rights; 2385 int error; 2386 2387 error = fget(td, fd, cap_rights_init(&rights, CAP_KQUEUE_CHANGE), &fp); 2388 if (error != 0) 2389 return (error); 2390 if ((error = kqueue_acquire(fp, &kq)) != 0) 2391 goto noacquire; 2392 2393 error = kqueue_register(kq, kev, td, waitok); 2394 2395 kqueue_release(kq, 0); 2396 2397noacquire: 2398 fdrop(fp, td); 2399 2400 return error; 2401} 2402