• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /macosx-10.10.1/xnu-2782.1.97/bsd/kern/

Lines Matching defs:kqueue

106 MALLOC_DEFINE(M_KQUEUE, "kqueue", "memory for kqueue system");
110 static inline void kqlock(struct kqueue *kq);
111 static inline void kqunlock(struct kqueue *kq);
113 static int kqlock2knoteuse(struct kqueue *kq, struct knote *kn);
114 static int kqlock2knoteusewait(struct kqueue *kq, struct knote *kn);
115 static int kqlock2knotedrop(struct kqueue *kq, struct knote *kn);
116 static int knoteuse2kqlock(struct kqueue *kq, struct knote *kn);
118 static void kqueue_wakeup(struct kqueue *kq, int closed);
152 static int kevent_callback(struct kqueue *kq, struct kevent64_s *kevp,
154 static void kevent_continue(struct kqueue *kq, void *data, int error);
156 static int kqueue_process(struct kqueue *kq, kevent_callback_t callback,
158 static int kqueue_begin_processing(struct kqueue *kq);
159 static void kqueue_end_processing(struct kqueue *kq);
304 * kqueue/note lock attributes and implementations
308 * the knote "inuse" count and status use the kqueue lock.
315 kqlock(struct kqueue *kq)
321 kqunlock(struct kqueue *kq)
336 kqlock2knoteuse(struct kqueue *kq, struct knote *kn)
356 kqlock2knoteusewait(struct kqueue *kq, struct knote *kn)
378 * still alive - but the kqueue lock is taken
382 knoteuse2kqlock(struct kqueue *kq, struct knote *kn)
413 kqlock2knotedrop(struct kqueue *kq, struct knote *kn)
439 struct kqueue *kq = kn->kn_kq;
468 struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data;
479 struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data;
838 struct kqueue *kq = kn->kn_kq;
853 struct kqueue *kq = kn->kn_kq;
1162 struct kqueue *
1166 struct kqueue *kq;
1168 MALLOC_ZONE(kq, struct kqueue *, sizeof (struct kqueue), M_KQUEUE,
1176 bzero(kq, sizeof (struct kqueue));
1182 FREE_ZONE(kq, sizeof (struct kqueue), M_KQUEUE);
1197 * kqueue_dealloc - detach all knotes from a kqueue and free it
1200 * this kqueue. If we find one, we try to drop it. But
1204 * not contain any more references to this kqueue (either
1207 * Assumes no new events are being added to the kqueue.
1211 kqueue_dealloc(struct kqueue *kq)
1262 * before freeing the wait queue set for this kqueue,
1268 FREE_ZONE(kq, sizeof (struct kqueue), M_KQUEUE);
1274 struct kqueue *kq;
1305 kqueue(struct proc *p, __unused struct kqueue_args *uap, int32_t *retval)
1405 kevent_continue(__unused struct kqueue *kq, void *data, int error)
1474 struct kqueue *kq;
1572 * caller holds a reference on the kqueue
1575 kevent_callback(__unused struct kqueue *kq, struct kevent64_s *kevp,
1627 * kevent_register - add a new event to a kqueue
1630 * the kqueue via a knote data structure.
1637 * caller holds a reference on the kqueue
1641 kevent_register(struct kqueue *kq, struct kevent64_s *kev,
1770 /* existing knote - get kqueue lock */
1814 /* kqueue, proc_fdlock both unlocked */
1857 * caller holds a reference on the kqueue.
1858 * kqueue locked on entry and exit - but may be dropped
1867 struct kqueue *kq = kn->kn_kq;
2014 * Called with kqueue locked and returns the same way,
2018 kqueue_begin_processing(struct kqueue *kq)
2041 * Called with kqueue lock held.
2044 kqueue_end_processing(struct kqueue *kq)
2055 * kqueue_process - process the triggered events in a kqueue
2064 * caller holds a reference on the kqueue.
2065 * kqueue locked on entry and exit - but may be dropped
2066 * kqueue list locked (held for duration of call)
2070 kqueue_process(struct kqueue *kq,
2113 * With the kqueue still locked, move any knotes
2137 struct kqueue *kq = (struct kqueue *)data;
2176 * kqueue_scan - scan and wait for events in a kqueue
2178 * Process the triggered events in a kqueue.
2190 kqueue_scan(struct kqueue *kq,
2318 struct kqueue *kq = (struct kqueue *)fp->f_data;
2331 * the kqueue onto the wait queue set for the select(). Normally we
2333 * selinfo structure and we need to use the main one for the kqueue to
2404 struct kqueue *kq = (struct kqueue *)fg->fg_data;
2413 * The callers has taken a use-count reference on this kqueue and will donate it
2414 * to the kqueue we are being added to. This keeps the kqueue from closing until
2420 struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data;
2421 struct kqueue *parentkq = kn->kn_kq;
2431 * ceiling protocol. When a kqueue is inserted into another,
2433 * into another kqueue at a lower level than the potenial
2470 struct kqueue *kq = (struct kqueue *)fp->f_fglob->fg_data;
2479 kqueue_stat(struct kqueue *kq, void *ub, int isstat64, proc_t p)
2508 * Called with the kqueue locked
2511 kqueue_wakeup(struct kqueue *kq, int closed)
2545 struct kqueue *kq = kn->kn_kq;
2587 * For a given knote, link a provided wait queue directly with the kqueue.
2592 * kqueue and knote references are held by caller.
2599 struct kqueue *kq = kn->kn_kq;
2612 * Unlink the provided wait queue from the kqueue associated with a knote.
2616 * ignore any failures to unlink and just remove it from the kqueue list.
2623 struct kqueue *kq = kn->kn_kq;
2652 struct kqueue *kq = kn->kn_kq;
2736 struct kqueue *kq = kn->kn_kq;
2765 /* called with kqueue lock held */
2769 struct kqueue *kq = kn->kn_kq;
2780 /* called with kqueue lock held */
2788 /* called with kqueue lock held */
2795 struct kqueue *kq = kn->kn_kq;
2803 /* called with kqueue lock held */
2807 struct kqueue *kq = kn->kn_kq;
2828 kq_lck_grp = lck_grp_alloc_init("kqueue", kq_lck_grp_attr);
3491 fill_kqueueinfo(struct kqueue *kq, struct kqueue_info * kinfo)