Lines Matching defs:kq

105 static inline void kqlock(struct kqueue *kq);
106 static inline void kqunlock(struct kqueue *kq);
108 static int kqlock2knoteuse(struct kqueue *kq, struct knote *kn);
109 static int kqlock2knoteusewait(struct kqueue *kq, struct knote *kn);
110 static int kqlock2knotedrop(struct kqueue *kq, struct knote *kn);
111 static int knoteuse2kqlock(struct kqueue *kq, struct knote *kn);
113 static void kqueue_wakeup(struct kqueue *kq, int closed);
144 static int kevent_callback(struct kqueue *kq, struct kevent64_s *kevp, void *data);
145 static void kevent_continue(struct kqueue *kq, void *data, int error);
147 static int kqueue_process(struct kqueue *kq, kevent_callback_t callback,
149 static int kqueue_begin_processing(struct kqueue *kq);
150 static void kqueue_end_processing(struct kqueue *kq);
299 kqlock(struct kqueue *kq)
301 lck_spin_lock(&kq->kq_lock);
305 kqunlock(struct kqueue *kq)
307 lck_spin_unlock(&kq->kq_lock);
311 * Convert a kq lock to a knote use referece.
317 * - kq locked at entry
321 kqlock2knoteuse(struct kqueue *kq, struct knote *kn)
326 kqunlock(kq);
331 * Convert a kq lock to a knote use referece,
338 * - kq locked at entry
339 * - kq always unlocked on exit
342 kqlock2knoteusewait(struct kqueue *kq, struct knote *kn)
346 wait_queue_assert_wait((wait_queue_t)kq->kq_wqs, &kn->kn_status, THREAD_UNINT, 0);
347 kqunlock(kq);
352 kqunlock(kq);
358 * Convert from a knote use reference back to kq lock.
368 knoteuse2kqlock(struct kqueue *kq, struct knote *kn)
370 kqlock(kq);
377 wait_queue_wakeup_all((wait_queue_t)kq->kq_wqs, &kn->kn_status, THREAD_AWAKENED);
384 * Convert a kq lock to a knote drop referece.
392 * - kq locked at entry
398 kqlock2knotedrop(struct kqueue *kq, struct knote *kn)
406 kqunlock(kq);
411 wait_queue_assert_wait((wait_queue_t)kq->kq_wqs, &kn->kn_status, THREAD_UNINT, 0);
412 kqunlock(kq);
423 struct kqueue *kq = kn->kn_kq;
425 kqlock(kq);
429 wait_queue_wakeup_all((wait_queue_t)kq->kq_wqs, &kn->kn_status, THREAD_AWAKENED);
432 kqunlock(kq);
453 struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data;
455 kqlock(kq);
456 KNOTE_DETACH(&kq->kq_sel.si_note, kn);
457 kqunlock(kq);
464 struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data;
466 kn->kn_data = kq->kq_count;
771 struct kqueue *kq = kn->kn_kq;
772 wait_queue_wakeup_all((wait_queue_t)kq->kq_wqs, &kn->kn_hook,
786 struct kqueue *kq = kn->kn_kq;
798 wait_queue_assert_wait((wait_queue_t)kq->kq_wqs,
1058 struct kqueue *kq;
1060 MALLOC_ZONE(kq, struct kqueue *, sizeof(struct kqueue), M_KQUEUE, M_WAITOK);
1061 if (kq != NULL) {
1066 bzero(kq, sizeof(struct kqueue));
1067 lck_spin_init(&kq->kq_lock, kq_lck_grp, kq_lck_attr);
1068 TAILQ_INIT(&kq->kq_head);
1069 kq->kq_wqs = wqs;
1070 kq->kq_p = p;
1072 FREE_ZONE(kq, sizeof(struct kqueue), M_KQUEUE);
1079 fdp->fd_knlistsize = 0; /* this process has had a kq */
1083 return kq;
1102 kqueue_dealloc(struct kqueue *kq)
1104 struct proc *p = kq->kq_p;
1113 if (kq == kn->kn_kq) {
1114 kqlock(kq);
1117 if (kqlock2knotedrop(kq, kn)) {
1133 if (kq == kn->kn_kq) {
1134 kqlock(kq);
1137 if (kqlock2knotedrop(kq, kn)) {
1156 wait_queue_unlink_all((wait_queue_t)kq->kq_wqs);
1157 wait_queue_set_free(kq->kq_wqs);
1158 lck_spin_destroy(&kq->kq_lock, kq_lck_grp);
1159 FREE_ZONE(kq, sizeof(struct kqueue), M_KQUEUE);
1165 struct kqueue *kq;
1174 kq = kqueue_alloc(p);
1175 if (kq == NULL) {
1183 fp->f_data = (caddr_t)kq;
1283 * assume we inherit a use count on the kq fileglob.
1287 kevent_continue(__unused struct kqueue *kq, void *data, int error)
1356 struct kqueue *kq;
1388 /* get a usecount for the kq itself */
1389 if ((error = fp_getfkq(p, fd, &fp, &kq)) != 0)
1392 /* each kq should only be used for events of one type */
1393 kqlock(kq);
1394 if (kq->kq_state & (KQ_KEV32 | KQ_KEV64)) {
1395 if (((iskev64 && (kq->kq_state & KQ_KEV32)) ||
1396 (!iskev64 && (kq->kq_state & KQ_KEV64)))) {
1398 kqunlock(kq);
1402 kq->kq_state |= (iskev64 ? KQ_KEV64 : KQ_KEV32);
1404 kqunlock(kq);
1414 error = kevent_register(kq, &kev, p);
1439 error = kqueue_scan(kq, kevent_callback,
1442 kevent_continue(kq, cont_args, error);
1458 kevent_callback(__unused struct kqueue *kq, struct kevent64_s *kevp,
1522 kevent_register(struct kqueue *kq, struct kevent64_s *kev, __unused struct proc *ctxp)
1524 struct proc *p = kq->kq_p;
1557 if (kq == kn->kn_kq &&
1570 kq == kn->kn_kq &&
1588 kn->kn_kq = kq;
1589 kn->kn_tq = &kq->kq_head;
1619 kqlock(kq);
1628 kqunlock(kq);
1637 kqunlock(kq);
1643 kqunlock(kq);
1651 kqlock(kq);
1657 if (kqlock2knotedrop(kq, kn)) {
1693 if (!kqlock2knoteusewait(kq, kn)) {
1713 if (knoteuse2kqlock(kq, kn))
1715 kqunlock(kq);
1747 struct kqueue *kq = kn->kn_kq;
1782 if (kqlock2knoteuse(kq, kn)) {
1795 if (!knoteuse2kqlock(kq, kn)) {
1821 assert(kn->kn_tq == &kq->kq_head);
1822 TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe);
1839 if (kqlock2knotedrop(kq, kn)) {
1857 kqunlock(kq);
1862 * the kq queue and wakeup any
1865 kqunlock(kq);
1869 error = (callback)(kq, &kev, data);
1871 kqlock(kq);
1883 kqueue_begin_processing(struct kqueue *kq)
1886 if (kq->kq_count == 0) {
1891 if (kq->kq_nprocess != 0) {
1892 wait_queue_assert_wait((wait_queue_t)kq->kq_wqs, &kq->kq_nprocess, THREAD_UNINT, 0);
1893 kq->kq_state |= KQ_PROCWAIT;
1894 kqunlock(kq);
1896 kqlock(kq);
1898 kq->kq_nprocess = 1;
1908 kqueue_end_processing(struct kqueue *kq)
1910 kq->kq_nprocess = 0;
1911 if (kq->kq_state & KQ_PROCWAIT) {
1912 kq->kq_state &= ~KQ_PROCWAIT;
1913 wait_queue_wakeup_all((wait_queue_t)kq->kq_wqs, &kq->kq_nprocess, THREAD_AWAKENED);
1933 kqueue_process(struct kqueue *kq,
1946 if (kqueue_begin_processing(kq) == -1) {
1956 wait_queue_sub_clearrefs(kq->kq_wqs);
1967 (kn = TAILQ_FIRST(&kq->kq_head)) != NULL) {
1978 * kq's queue and wake up any waiters.
1983 kn->kn_tq = &kq->kq_head;
1984 TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe);
1987 kqueue_end_processing(kq);
2000 struct kqueue *kq = (struct kqueue *)data;
2007 kqlock(kq);
2008 error = kqueue_process(kq, cont_args->call, cont_args, &count, current_proc());
2010 wait_queue_assert_wait((wait_queue_t)kq->kq_wqs, KQ_EVENT,
2012 kq->kq_state |= KQ_SLEEP;
2013 kqunlock(kq);
2014 thread_block_parameter(kqueue_scan_continue, kq);
2017 kqunlock(kq);
2032 (cont_args->cont)(kq, cont_args->data, error);
2047 * The caller must hold a use-count reference on the kq.
2051 kqueue_scan(struct kqueue *kq,
2071 * Make a pass through the kq to find events already
2074 kqlock(kq);
2075 error = kqueue_process(kq, callback, data, &count, p);
2114 wait_queue_assert_wait((wait_queue_t)kq->kq_wqs, KQ_EVENT, THREAD_ABORTSAFE, deadline);
2115 kq->kq_state |= KQ_SLEEP;
2116 kqunlock(kq);
2117 wait_result = thread_block_parameter(cont, kq);
2133 kqunlock(kq);
2176 struct kqueue *kq = (struct kqueue *)fp->f_data;
2186 kqlock(kq);
2199 kq->kq_state |= KQ_SEL;
2200 wait_queue_link_noalloc((wait_queue_t)kq->kq_wqs, ut->uu_wqset,
2204 if (kqueue_begin_processing(kq) == -1) {
2205 kqunlock(kq);
2209 if (kq->kq_count != 0) {
2217 while ((kn = (struct knote*)TAILQ_FIRST(&kq->kq_head)) != NULL) {
2223 TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe);
2226 if (kqlock2knoteuse(kq, kn)) {
2230 if (knoteuse2kqlock(kq, kn)) {
2246 kn->kn_tq = &kq->kq_head;
2247 TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe);
2250 kqueue_end_processing(kq);
2251 kqunlock(kq);
2262 struct kqueue *kq = (struct kqueue *)fg->fg_data;
2264 kqueue_dealloc(kq);
2278 struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data;
2281 if (parentkq == kq ||
2298 parentkq->kq_level < kq->kq_level)
2306 if (parentkq->kq_level < kq->kq_level + 1)
2307 parentkq->kq_level = kq->kq_level + 1;
2311 kqlock(kq);
2312 KNOTE_ATTACH(&kq->kq_sel.si_note, kn);
2314 if (kq->kq_level == 0)
2315 kq->kq_level = 1;
2316 kqunlock(kq);
2322 * kqueue_drain - called when kq is closed
2328 struct kqueue *kq = (struct kqueue *)fp->f_fglob->fg_data;
2329 kqlock(kq);
2330 kqueue_wakeup(kq, 1);
2331 kqunlock(kq);
2340 struct kqueue *kq = (struct kqueue *)fp->f_data;
2345 sb64->st_size = kq->kq_count;
2346 if (kq->kq_state & KQ_KEV64)
2355 sb->st_size = kq->kq_count;
2356 if (kq->kq_state & KQ_KEV64)
2370 kqueue_wakeup(struct kqueue *kq, int closed)
2372 if ((kq->kq_state & (KQ_SLEEP | KQ_SEL)) != 0 || kq->kq_nprocess > 0) {
2373 kq->kq_state &= ~(KQ_SLEEP | KQ_SEL);
2374 wait_queue_wakeup_all((wait_queue_t)kq->kq_wqs, KQ_EVENT,
2404 struct kqueue *kq = kn->kn_kq;
2406 kqlock(kq);
2407 if (kqlock2knoteuse(kq, kn)) {
2414 if (knoteuse2kqlock(kq, kn) && result)
2418 kqunlock(kq);
2458 struct kqueue *kq = kn->kn_kq;
2461 kr = wait_queue_link_noalloc(wq, kq->kq_wqs, wql);
2482 struct kqueue *kq = kn->kn_kq;
2485 kr = wait_queue_unlink_nofree(wq, kq->kq_wqs, wqlp);
2486 kqlock(kq);
2489 kqunlock(kq);
2511 struct kqueue *kq = kn->kn_kq;
2513 if (kq->kq_p != p)
2514 panic("knote_fdclose: proc mismatch (kq->kq_p=%p != p=%p)", kq->kq_p, p);
2516 kqlock(kq);
2526 if (kqlock2knotedrop(kq, kn)) {
2594 struct kqueue *kq = kn->kn_kq;
2595 struct proc *p = kq->kq_p;
2607 kqlock(kq);
2610 kqunlock(kq);
2614 wait_queue_wakeup_all((wait_queue_t)kq->kq_wqs, &kn->kn_status, THREAD_AWAKENED);
2626 struct kqueue *kq = kn->kn_kq;
2630 kqueue_wakeup(kq, 0);
2632 /* this is a real event: wake up the parent kq, too */
2634 KNOTE(&kq->kq_sel.si_note, 0);
2652 struct kqueue *kq = kn->kn_kq;
2656 kq->kq_count++;
2664 struct kqueue *kq = kn->kn_kq;
2670 kn->kn_tq = &kq->kq_head;
2672 kq->kq_count--;
2681 /* allocate kq lock group attribute and group */
2686 /* Allocate kq lock attribute */
2986 fill_kqueueinfo(struct kqueue *kq, struct kqueue_info * kinfo)
2994 st->vst_size = kq->kq_count;
2995 if (kq->kq_state & KQ_KEV64)
3000 if (kq->kq_state & KQ_SEL)
3002 if (kq->kq_state & KQ_SLEEP)