Lines Matching refs:kq

78  * This lock is used if multiple kq locks are required.  This possibly
98 static int kqueue_register(struct kqueue *kq, struct kevent *kev,
101 static void kqueue_release(struct kqueue *kq, int locked);
102 static void kqueue_destroy(struct kqueue *kq);
103 static void kqueue_drain(struct kqueue *kq, struct thread *td);
104 static int kqueue_expand(struct kqueue *kq, struct filterops *fops,
107 static int kqueue_scan(struct kqueue *kq, int maxevents,
111 static void kqueue_wakeup(struct kqueue *kq);
137 static int knote_attach(struct knote *kn, struct kqueue *kq);
208 #define KQ_LOCK(kq) do { \
209 mtx_lock(&(kq)->kq_lock); \
211 #define KQ_FLUX_WAKEUP(kq) do { \
212 if (((kq)->kq_state & KQ_FLUXWAIT) == KQ_FLUXWAIT) { \
213 (kq)->kq_state &= ~KQ_FLUXWAIT; \
214 wakeup((kq)); \
217 #define KQ_UNLOCK_FLUX(kq) do { \
218 KQ_FLUX_WAKEUP(kq); \
219 mtx_unlock(&(kq)->kq_lock); \
221 #define KQ_UNLOCK(kq) do { \
222 mtx_unlock(&(kq)->kq_lock); \
224 #define KQ_OWNED(kq) do { \
225 mtx_assert(&(kq)->kq_lock, MA_OWNED); \
227 #define KQ_NOTOWNED(kq) do { \
228 mtx_assert(&(kq)->kq_lock, MA_NOTOWNED); \
337 struct kqueue *kq = kn->kn_fp->f_data;
344 knlist_add(&kq->kq_sel.si_note, kn, 0);
352 struct kqueue *kq = kn->kn_fp->f_data;
354 knlist_remove(&kq->kq_sel.si_note, kn, 0);
361 struct kqueue *kq = kn->kn_fp->f_data;
363 kn->kn_data = kq->kq_count;
489 struct kqueue *kq;
499 kq = kn->kn_kq;
500 KQ_LOCK(kq);
502 KQ_UNLOCK(kq);
514 KQ_UNLOCK(kq);
525 KQ_UNLOCK(kq);
544 error = kqueue_register(kq, &kev, NULL, 0);
558 error = kqueue_register(kq, &kev, NULL, 0);
563 KQ_LOCK(kq);
565 KQ_UNLOCK_FLUX(kq);
814 kqueue_init(struct kqueue *kq)
817 mtx_init(&kq->kq_lock, "kqueue", NULL, MTX_DEF | MTX_DUPOK);
818 TAILQ_INIT(&kq->kq_head);
819 knlist_init_mtx(&kq->kq_sel.si_note, &kq->kq_lock);
820 TASK_INIT(&kq->kq_task, 0, kqueue_task, kq);
827 struct kqueue *kq;
844 kq = malloc(sizeof *kq, M_KQUEUE, M_WAITOK | M_ZERO);
845 kqueue_init(kq);
846 kq->kq_fdp = fdp;
847 kq->kq_cred = crhold(cred);
850 TAILQ_INSERT_HEAD(&fdp->fd_kqlist, kq, kq_list);
853 finit(fp, FREAD | FWRITE, DTYPE_KQUEUE, kq, &kqueueops);
982 kqueue_kevent(struct kqueue *kq, struct thread *td, int nchanges, int nevents,
1001 error = kqueue_register(kq, kevp, td, 1);
1019 return (kqueue_scan(kq, nevents, k_ops, timeout, keva, td));
1026 struct kqueue *kq;
1029 error = kqueue_acquire(fp, &kq);
1032 error = kqueue_kevent(kq, td, nchanges, nevents, k_ops, timeout);
1033 kqueue_release(kq, 0);
1045 struct kqueue kq = {};
1048 kqueue_init(&kq);
1049 kq.kq_refcnt = 1;
1050 error = kqueue_kevent(&kq, td, nevents, nevents, k_ops, NULL);
1051 kqueue_drain(&kq, td);
1052 kqueue_destroy(&kq);
1142 * A ref to kq (obtained via kqueue_acquire) must be held. waitok will
1147 kqueue_register(struct kqueue *kq, struct kevent *kev, struct thread *td, int waitok)
1194 if ((kev->flags & EV_ADD) == EV_ADD && kqueue_expand(kq, fops,
1199 error = kqueue_expand(kq, fops, kev->ident, waitok);
1210 * getting both the knlist lock and the kq lock since
1213 if (fp->f_data == kq) {
1228 KQ_LOCK(kq);
1229 if (kev->ident < kq->kq_knlistsize) {
1230 SLIST_FOREACH(kn, &kq->kq_knlist[kev->ident], kn_link)
1236 kqueue_expand(kq, fops, kev->ident, waitok);
1238 KQ_LOCK(kq);
1250 } else if (kq->kq_knhashmask != 0) {
1253 list = &kq->kq_knhash[
1254 KN_HASH((u_long)kev->ident, kq->kq_knhashmask)];
1269 kq->kq_state |= KQ_FLUXWAIT;
1270 msleep(kq, &kq->kq_lock, PSOCK | PDROP, "kqflxwt", 0);
1286 KQ_UNLOCK(kq);
1291 kn->kn_kq = kq;
1309 error = knote_attach(kn, kq);
1310 KQ_UNLOCK(kq);
1324 KQ_UNLOCK(kq);
1332 KQ_UNLOCK(kq);
1350 KQ_UNLOCK(kq);
1378 KQ_LOCK(kq);
1386 KQ_UNLOCK_FLUX(kq);
1404 struct kqueue *kq;
1408 kq = fp->f_data;
1409 if (fp->f_type != DTYPE_KQUEUE || kq == NULL)
1411 *kqp = kq;
1412 KQ_LOCK(kq);
1413 if ((kq->kq_state & KQ_CLOSING) == KQ_CLOSING) {
1414 KQ_UNLOCK(kq);
1417 kq->kq_refcnt++;
1418 KQ_UNLOCK(kq);
1424 kqueue_release(struct kqueue *kq, int locked)
1427 KQ_OWNED(kq);
1429 KQ_LOCK(kq);
1430 kq->kq_refcnt--;
1431 if (kq->kq_refcnt == 1)
1432 wakeup(&kq->kq_refcnt);
1434 KQ_UNLOCK(kq);
1438 kqueue_schedtask(struct kqueue *kq)
1441 KQ_OWNED(kq);
1442 KASSERT(((kq->kq_state & KQ_TASKDRAIN) != KQ_TASKDRAIN),
1445 if ((kq->kq_state & KQ_TASKSCHED) != KQ_TASKSCHED) {
1446 taskqueue_enqueue(taskqueue_kqueue_ctx, &kq->kq_task);
1447 kq->kq_state |= KQ_TASKSCHED;
1452 * Expand the kq to make sure we have storage for fops/ident pair.
1461 kqueue_expand(struct kqueue *kq, struct filterops *fops, uintptr_t ident,
1470 KQ_NOTOWNED(kq);
1475 if (kq->kq_knlistsize <= fd) {
1476 size = kq->kq_knlistsize;
1482 KQ_LOCK(kq);
1483 if (kq->kq_knlistsize > fd) {
1487 if (kq->kq_knlist != NULL) {
1488 bcopy(kq->kq_knlist, list,
1489 kq->kq_knlistsize * sizeof(*list));
1490 to_free = kq->kq_knlist;
1491 kq->kq_knlist = NULL;
1494 kq->kq_knlistsize * sizeof(*list),
1495 (size - kq->kq_knlistsize) * sizeof(*list));
1496 kq->kq_knlistsize = size;
1497 kq->kq_knlist = list;
1499 KQ_UNLOCK(kq);
1502 if (kq->kq_knhashmask == 0) {
1507 KQ_LOCK(kq);
1508 if (kq->kq_knhashmask == 0) {
1509 kq->kq_knhash = tmp_knhash;
1510 kq->kq_knhashmask = tmp_knhashmask;
1514 KQ_UNLOCK(kq);
1519 KQ_NOTOWNED(kq);
1526 struct kqueue *kq;
1530 kq = arg;
1533 KQ_LOCK(kq);
1535 KNOTE_LOCKED(&kq->kq_sel.si_note, 0);
1537 kq->kq_state &= ~KQ_TASKSCHED;
1538 if ((kq->kq_state & KQ_TASKDRAIN) == KQ_TASKDRAIN) {
1539 wakeup(&kq->kq_state);
1541 KQ_UNLOCK(kq);
1550 kqueue_scan(struct kqueue *kq, int maxevents, struct kevent_copyops *k_ops,
1592 KQ_LOCK(kq);
1596 if (kq->kq_count == 0) {
1600 kq->kq_state |= KQ_SLEEP;
1601 error = msleep_sbt(kq, &kq->kq_lock, PSOCK | PCATCH,
1614 TAILQ_INSERT_TAIL(&kq->kq_head, marker, kn_tqe);
1617 KQ_OWNED(kq);
1618 kn = TAILQ_FIRST(&kq->kq_head);
1624 KQ_FLUX_WAKEUP(kq);
1626 kq->kq_state |= KQ_FLUXWAIT;
1627 error = msleep(kq, &kq->kq_lock, PSOCK,
1632 TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe);
1635 kq->kq_count--;
1639 KQ_FLUX_WAKEUP(kq);
1650 kq->kq_count--;
1651 KQ_UNLOCK(kq);
1659 KQ_LOCK(kq);
1664 kq->kq_count--;
1665 KQ_UNLOCK(kq);
1674 KQ_LOCK(kq);
1678 KQ_UNLOCK(kq);
1683 KQ_LOCK(kq);
1688 kq->kq_count--;
1699 KQ_LOCK(kq);
1713 kq->kq_count--;
1715 TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe);
1729 KQ_UNLOCK_FLUX(kq);
1733 KQ_LOCK(kq);
1738 TAILQ_REMOVE(&kq->kq_head, marker, kn_tqe);
1740 KQ_OWNED(kq);
1741 KQ_UNLOCK_FLUX(kq);
1744 KQ_NOTOWNED(kq);
1775 struct kqueue *kq;
1777 kq = fp->f_data;
1781 kq->kq_state |= KQ_ASYNC;
1783 kq->kq_state &= ~KQ_ASYNC;
1788 return (fsetown(*(int *)data, &kq->kq_sigio));
1791 *(int *)data = fgetown(&kq->kq_sigio);
1804 struct kqueue *kq;
1808 if ((error = kqueue_acquire(fp, &kq)))
1811 KQ_LOCK(kq);
1813 if (kq->kq_count) {
1816 selrecord(td, &kq->kq_sel);
1817 if (SEL_WAITING(&kq->kq_sel))
1818 kq->kq_state |= KQ_SEL;
1821 kqueue_release(kq, 1);
1822 KQ_UNLOCK(kq);
1845 kqueue_drain(struct kqueue *kq, struct thread *td)
1850 KQ_LOCK(kq);
1852 KASSERT((kq->kq_state & KQ_CLOSING) != KQ_CLOSING,
1854 kq->kq_state |= KQ_CLOSING;
1855 if (kq->kq_refcnt > 1)
1856 msleep(&kq->kq_refcnt, &kq->kq_lock, PSOCK, "kqclose", 0);
1858 KASSERT(kq->kq_refcnt == 1, ("other refs are out there!"));
1860 KASSERT(knlist_empty(&kq->kq_sel.si_note),
1863 for (i = 0; i < kq->kq_knlistsize; i++) {
1864 while ((kn = SLIST_FIRST(&kq->kq_knlist[i])) != NULL) {
1866 kq->kq_state |= KQ_FLUXWAIT;
1867 msleep(kq, &kq->kq_lock, PSOCK, "kqclo1", 0);
1871 KQ_UNLOCK(kq);
1875 KQ_LOCK(kq);
1878 if (kq->kq_knhashmask != 0) {
1879 for (i = 0; i <= kq->kq_knhashmask; i++) {
1880 while ((kn = SLIST_FIRST(&kq->kq_knhash[i])) != NULL) {
1882 kq->kq_state |= KQ_FLUXWAIT;
1883 msleep(kq, &kq->kq_lock, PSOCK,
1888 KQ_UNLOCK(kq);
1892 KQ_LOCK(kq);
1897 if ((kq->kq_state & KQ_TASKSCHED) == KQ_TASKSCHED) {
1898 kq->kq_state |= KQ_TASKDRAIN;
1899 msleep(&kq->kq_state, &kq->kq_lock, PSOCK, "kqtqdr", 0);
1902 if ((kq->kq_state & KQ_SEL) == KQ_SEL) {
1903 selwakeuppri(&kq->kq_sel, PSOCK);
1904 if (!SEL_WAITING(&kq->kq_sel))
1905 kq->kq_state &= ~KQ_SEL;
1908 KQ_UNLOCK(kq);
1912 kqueue_destroy(struct kqueue *kq)
1915 KASSERT(kq->kq_fdp == NULL,
1917 seldrain(&kq->kq_sel);
1918 knlist_destroy(&kq->kq_sel.si_note);
1919 mtx_destroy(&kq->kq_lock);
1921 if (kq->kq_knhash != NULL)
1922 free(kq->kq_knhash, M_KQUEUE);
1923 if (kq->kq_knlist != NULL)
1924 free(kq->kq_knlist, M_KQUEUE);
1926 funsetown(&kq->kq_sigio);
1933 struct kqueue *kq = fp->f_data;
1938 if ((error = kqueue_acquire(fp, &kq)))
1940 kqueue_drain(kq, td);
1948 fdp = kq->kq_fdp;
1949 kq->kq_fdp = NULL;
1955 TAILQ_REMOVE(&fdp->fd_kqlist, kq, kq_list);
1959 kqueue_destroy(kq);
1960 chgkqcnt(kq->kq_cred->cr_ruidinfo, -1, 0);
1961 crfree(kq->kq_cred);
1962 free(kq, M_KQUEUE);
1977 kqueue_wakeup(struct kqueue *kq)
1979 KQ_OWNED(kq);
1981 if ((kq->kq_state & KQ_SLEEP) == KQ_SLEEP) {
1982 kq->kq_state &= ~KQ_SLEEP;
1983 wakeup(kq);
1985 if ((kq->kq_state & KQ_SEL) == KQ_SEL) {
1986 selwakeuppri(&kq->kq_sel, PSOCK);
1987 if (!SEL_WAITING(&kq->kq_sel))
1988 kq->kq_state &= ~KQ_SEL;
1990 if (!knlist_empty(&kq->kq_sel.si_note))
1991 kqueue_schedtask(kq);
1992 if ((kq->kq_state & KQ_ASYNC) == KQ_ASYNC) {
1993 pgsigio(&kq->kq_sigio, SIGIO, 0);
2000 * There is a possibility to optimize in the case of one kq watching another.
2008 struct kqueue *kq;
2028 kq = kn->kn_kq;
2029 KQ_LOCK(kq);
2033 * the influx coming from the kq unlock in the
2039 KQ_UNLOCK(kq);
2042 KQ_UNLOCK(kq);
2044 KQ_LOCK(kq);
2048 KQ_UNLOCK_FLUX(kq);
2054 KQ_UNLOCK(kq);
2086 KASSERT(!(!!kqislocked && !knlislocked), ("kq locked w/o knl locked"));
2279 struct kqueue *kq;
2291 kq = kn->kn_kq;
2292 KQ_LOCK(kq);
2294 KQ_UNLOCK(kq);
2300 KQ_UNLOCK(kq);
2305 KQ_UNLOCK(kq);
2307 kq = NULL;
2313 kq = kn->kn_kq;
2314 KQ_LOCK(kq);
2318 kq->kq_state |= KQ_FLUXWAIT;
2319 msleep(kq, &kq->kq_lock, PSOCK | PDROP, "kqkclr", 0);
2320 kq = NULL;
2341 struct kqueue *kq;
2351 TAILQ_FOREACH(kq, &fdp->fd_kqlist, kq_list) {
2352 KQ_LOCK(kq);
2356 while (kq->kq_knlistsize > fd &&
2357 (kn = SLIST_FIRST(&kq->kq_knlist[fd])) != NULL) {
2361 wakeup(kq);
2362 kq->kq_state |= KQ_FLUXWAIT;
2363 msleep(kq, &kq->kq_lock, PSOCK, "kqflxwt", 0);
2367 KQ_UNLOCK(kq);
2372 KQ_LOCK(kq);
2374 KQ_UNLOCK_FLUX(kq);
2379 knote_attach(struct knote *kn, struct kqueue *kq)
2384 KQ_OWNED(kq);
2387 if (kn->kn_id >= kq->kq_knlistsize)
2389 list = &kq->kq_knlist[kn->kn_id];
2391 if (kq->kq_knhash == NULL)
2393 list = &kq->kq_knhash[KN_HASH(kn->kn_id, kq->kq_knhashmask)];
2409 struct kqueue *kq;
2412 kq = kn->kn_kq;
2414 KQ_NOTOWNED(kq);
2418 KQ_LOCK(kq);
2420 list = &kq->kq_knlist[kn->kn_id];
2422 list = &kq->kq_knhash[KN_HASH(kn->kn_id, kq->kq_knhashmask)];
2428 KQ_UNLOCK_FLUX(kq);
2442 struct kqueue *kq = kn->kn_kq;
2447 TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe);
2449 kq->kq_count++;
2450 kqueue_wakeup(kq);
2456 struct kqueue *kq = kn->kn_kq;
2461 TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe);
2463 kq->kq_count--;
2491 * Register the kev w/ the kq specified by fd.
2496 struct kqueue *kq;
2504 if ((error = kqueue_acquire(fp, &kq)) != 0)
2507 error = kqueue_register(kq, kev, td, waitok);
2509 kqueue_release(kq, 0);