Lines Matching refs:kq

75  * This lock is used if multiple kq locks are required.  This possibly
95 static int kqueue_register(struct kqueue *kq, struct kevent *kev,
98 static void kqueue_release(struct kqueue *kq, int locked);
99 static int kqueue_expand(struct kqueue *kq, struct filterops *fops,
102 static int kqueue_scan(struct kqueue *kq, int maxevents,
106 static void kqueue_wakeup(struct kqueue *kq);
133 static int knote_attach(struct knote *kn, struct kqueue *kq);
204 #define KQ_LOCK(kq) do { \
205 mtx_lock(&(kq)->kq_lock); \
207 #define KQ_FLUX_WAKEUP(kq) do { \
208 if (((kq)->kq_state & KQ_FLUXWAIT) == KQ_FLUXWAIT) { \
209 (kq)->kq_state &= ~KQ_FLUXWAIT; \
210 wakeup((kq)); \
213 #define KQ_UNLOCK_FLUX(kq) do { \
214 KQ_FLUX_WAKEUP(kq); \
215 mtx_unlock(&(kq)->kq_lock); \
217 #define KQ_UNLOCK(kq) do { \
218 mtx_unlock(&(kq)->kq_lock); \
220 #define KQ_OWNED(kq) do { \
221 mtx_assert(&(kq)->kq_lock, MA_OWNED); \
223 #define KQ_NOTOWNED(kq) do { \
224 mtx_assert(&(kq)->kq_lock, MA_NOTOWNED); \
309 struct kqueue *kq = kn->kn_fp->f_data;
316 knlist_add(&kq->kq_sel.si_note, kn, 0);
324 struct kqueue *kq = kn->kn_fp->f_data;
326 knlist_remove(&kq->kq_sel.si_note, kn, 0);
333 struct kqueue *kq = kn->kn_fp->f_data;
335 kn->kn_data = kq->kq_count;
457 struct kqueue *kq;
469 kq = kn->kn_kq;
470 KQ_LOCK(kq);
472 KQ_UNLOCK(kq);
484 KQ_UNLOCK(kq);
495 KQ_UNLOCK(kq);
508 error = kqueue_register(kq, &kev, NULL, 0);
513 KQ_LOCK(kq);
515 KQ_UNLOCK_FLUX(kq);
742 struct kqueue *kq;
752 kq = malloc(sizeof *kq, M_KQUEUE, M_WAITOK | M_ZERO);
753 mtx_init(&kq->kq_lock, "kqueue", NULL, MTX_DEF|MTX_DUPOK);
754 TAILQ_INIT(&kq->kq_head);
755 kq->kq_fdp = fdp;
756 knlist_init_mtx(&kq->kq_sel.si_note, &kq->kq_lock);
757 TASK_INIT(&kq->kq_task, 0, kqueue_task, kq);
760 TAILQ_INSERT_HEAD(&fdp->fd_kqlist, kq, kq_list);
763 finit(fp, FREAD | FWRITE, DTYPE_KQUEUE, kq, &kqueueops);
898 struct kqueue *kq;
901 error = kqueue_acquire(fp, &kq);
918 error = kqueue_register(kq, kevp, td, 1);
940 error = kqueue_scan(kq, nevents, k_ops, timeout, keva, td);
942 kqueue_release(kq, 0);
1026 * A ref to kq (obtained via kqueue_acquire) must be held. waitok will
1031 kqueue_register(struct kqueue *kq, struct kevent *kev, struct thread *td, int waitok)
1061 if ((kev->flags & EV_ADD) == EV_ADD && kqueue_expand(kq, fops,
1066 error = kqueue_expand(kq, fops, kev->ident, waitok);
1077 * getting both the knlist lock and the kq lock since
1080 if (fp->f_data == kq) {
1095 KQ_LOCK(kq);
1096 if (kev->ident < kq->kq_knlistsize) {
1097 SLIST_FOREACH(kn, &kq->kq_knlist[kev->ident], kn_link)
1103 kqueue_expand(kq, fops, kev->ident, waitok);
1105 KQ_LOCK(kq);
1106 if (kq->kq_knhashmask != 0) {
1109 list = &kq->kq_knhash[
1110 KN_HASH((u_long)kev->ident, kq->kq_knhashmask)];
1125 kq->kq_state |= KQ_FLUXWAIT;
1126 msleep(kq, &kq->kq_lock, PSOCK | PDROP, "kqflxwt", 0);
1142 KQ_UNLOCK(kq);
1147 kn->kn_kq = kq;
1165 error = knote_attach(kn, kq);
1166 KQ_UNLOCK(kq);
1180 KQ_UNLOCK(kq);
1188 KQ_UNLOCK(kq);
1201 KQ_UNLOCK(kq);
1220 KQ_LOCK(kq);
1237 KQ_UNLOCK_FLUX(kq);
1256 struct kqueue *kq;
1260 kq = fp->f_data;
1261 if (fp->f_type != DTYPE_KQUEUE || kq == NULL)
1263 *kqp = kq;
1264 KQ_LOCK(kq);
1265 if ((kq->kq_state & KQ_CLOSING) == KQ_CLOSING) {
1266 KQ_UNLOCK(kq);
1269 kq->kq_refcnt++;
1270 KQ_UNLOCK(kq);
1276 kqueue_release(struct kqueue *kq, int locked)
1279 KQ_OWNED(kq);
1281 KQ_LOCK(kq);
1282 kq->kq_refcnt--;
1283 if (kq->kq_refcnt == 1)
1284 wakeup(&kq->kq_refcnt);
1286 KQ_UNLOCK(kq);
1290 kqueue_schedtask(struct kqueue *kq)
1293 KQ_OWNED(kq);
1294 KASSERT(((kq->kq_state & KQ_TASKDRAIN) != KQ_TASKDRAIN),
1297 if ((kq->kq_state & KQ_TASKSCHED) != KQ_TASKSCHED) {
1298 taskqueue_enqueue(taskqueue_kqueue, &kq->kq_task);
1299 kq->kq_state |= KQ_TASKSCHED;
1304 * Expand the kq to make sure we have storage for fops/ident pair.
1313 kqueue_expand(struct kqueue *kq, struct filterops *fops, uintptr_t ident,
1322 KQ_NOTOWNED(kq);
1327 if (kq->kq_knlistsize <= fd) {
1328 size = kq->kq_knlistsize;
1334 KQ_LOCK(kq);
1335 if (kq->kq_knlistsize > fd) {
1339 if (kq->kq_knlist != NULL) {
1340 bcopy(kq->kq_knlist, list,
1341 kq->kq_knlistsize * sizeof(*list));
1342 to_free = kq->kq_knlist;
1343 kq->kq_knlist = NULL;
1346 kq->kq_knlistsize * sizeof(*list),
1347 (size - kq->kq_knlistsize) * sizeof(*list));
1348 kq->kq_knlistsize = size;
1349 kq->kq_knlist = list;
1351 KQ_UNLOCK(kq);
1354 if (kq->kq_knhashmask == 0) {
1359 KQ_LOCK(kq);
1360 if (kq->kq_knhashmask == 0) {
1361 kq->kq_knhash = tmp_knhash;
1362 kq->kq_knhashmask = tmp_knhashmask;
1366 KQ_UNLOCK(kq);
1371 KQ_NOTOWNED(kq);
1378 struct kqueue *kq;
1382 kq = arg;
1385 KQ_LOCK(kq);
1387 KNOTE_LOCKED(&kq->kq_sel.si_note, 0);
1389 kq->kq_state &= ~KQ_TASKSCHED;
1390 if ((kq->kq_state & KQ_TASKDRAIN) == KQ_TASKDRAIN) {
1391 wakeup(&kq->kq_state);
1393 KQ_UNLOCK(kq);
1402 kqueue_scan(struct kqueue *kq, int maxevents, struct kevent_copyops *k_ops,
1447 KQ_LOCK(kq);
1451 if (kq->kq_count == 0) {
1455 kq->kq_state |= KQ_SLEEP;
1456 error = msleep_sbt(kq, &kq->kq_lock, PSOCK | PCATCH,
1469 TAILQ_INSERT_TAIL(&kq->kq_head, marker, kn_tqe);
1472 KQ_OWNED(kq);
1473 kn = TAILQ_FIRST(&kq->kq_head);
1479 KQ_FLUX_WAKEUP(kq);
1481 kq->kq_state |= KQ_FLUXWAIT;
1482 error = msleep(kq, &kq->kq_lock, PSOCK,
1487 TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe);
1490 kq->kq_count--;
1494 KQ_FLUX_WAKEUP(kq);
1505 kq->kq_count--;
1506 KQ_UNLOCK(kq);
1514 KQ_LOCK(kq);
1519 kq->kq_count--;
1520 KQ_UNLOCK(kq);
1529 KQ_LOCK(kq);
1533 KQ_UNLOCK(kq);
1538 KQ_LOCK(kq);
1543 kq->kq_count--;
1554 KQ_LOCK(kq);
1568 kq->kq_count--;
1570 TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe);
1584 KQ_UNLOCK_FLUX(kq);
1588 KQ_LOCK(kq);
1593 TAILQ_REMOVE(&kq->kq_head, marker, kn_tqe);
1595 KQ_OWNED(kq);
1596 KQ_UNLOCK_FLUX(kq);
1599 KQ_NOTOWNED(kq);
1659 struct kqueue *kq;
1661 kq = fp->f_data;
1665 kq->kq_state |= KQ_ASYNC;
1667 kq->kq_state &= ~KQ_ASYNC;
1672 return (fsetown(*(int *)data, &kq->kq_sigio));
1675 *(int *)data = fgetown(&kq->kq_sigio);
1688 struct kqueue *kq;
1692 if ((error = kqueue_acquire(fp, &kq)))
1695 KQ_LOCK(kq);
1697 if (kq->kq_count) {
1700 selrecord(td, &kq->kq_sel);
1701 if (SEL_WAITING(&kq->kq_sel))
1702 kq->kq_state |= KQ_SEL;
1705 kqueue_release(kq, 1);
1706 KQ_UNLOCK(kq);
1732 struct kqueue *kq = fp->f_data;
1739 if ((error = kqueue_acquire(fp, &kq)))
1743 KQ_LOCK(kq);
1745 KASSERT((kq->kq_state & KQ_CLOSING) != KQ_CLOSING,
1747 kq->kq_state |= KQ_CLOSING;
1748 if (kq->kq_refcnt > 1)
1749 msleep(&kq->kq_refcnt, &kq->kq_lock, PSOCK, "kqclose", 0);
1751 KASSERT(kq->kq_refcnt == 1, ("other refs are out there!"));
1752 fdp = kq->kq_fdp;
1754 KASSERT(knlist_empty(&kq->kq_sel.si_note),
1757 for (i = 0; i < kq->kq_knlistsize; i++) {
1758 while ((kn = SLIST_FIRST(&kq->kq_knlist[i])) != NULL) {
1760 kq->kq_state |= KQ_FLUXWAIT;
1761 msleep(kq, &kq->kq_lock, PSOCK, "kqclo1", 0);
1765 KQ_UNLOCK(kq);
1769 KQ_LOCK(kq);
1772 if (kq->kq_knhashmask != 0) {
1773 for (i = 0; i <= kq->kq_knhashmask; i++) {
1774 while ((kn = SLIST_FIRST(&kq->kq_knhash[i])) != NULL) {
1776 kq->kq_state |= KQ_FLUXWAIT;
1777 msleep(kq, &kq->kq_lock, PSOCK,
1782 KQ_UNLOCK(kq);
1786 KQ_LOCK(kq);
1791 if ((kq->kq_state & KQ_TASKSCHED) == KQ_TASKSCHED) {
1792 kq->kq_state |= KQ_TASKDRAIN;
1793 msleep(&kq->kq_state, &kq->kq_lock, PSOCK, "kqtqdr", 0);
1796 if ((kq->kq_state & KQ_SEL) == KQ_SEL) {
1797 selwakeuppri(&kq->kq_sel, PSOCK);
1798 if (!SEL_WAITING(&kq->kq_sel))
1799 kq->kq_state &= ~KQ_SEL;
1802 KQ_UNLOCK(kq);
1815 TAILQ_REMOVE(&fdp->fd_kqlist, kq, kq_list);
1819 seldrain(&kq->kq_sel);
1820 knlist_destroy(&kq->kq_sel.si_note);
1821 mtx_destroy(&kq->kq_lock);
1822 kq->kq_fdp = NULL;
1824 if (kq->kq_knhash != NULL)
1825 free(kq->kq_knhash, M_KQUEUE);
1826 if (kq->kq_knlist != NULL)
1827 free(kq->kq_knlist, M_KQUEUE);
1829 funsetown(&kq->kq_sigio);
1830 free(kq, M_KQUEUE);
1837 kqueue_wakeup(struct kqueue *kq)
1839 KQ_OWNED(kq);
1841 if ((kq->kq_state & KQ_SLEEP) == KQ_SLEEP) {
1842 kq->kq_state &= ~KQ_SLEEP;
1843 wakeup(kq);
1845 if ((kq->kq_state & KQ_SEL) == KQ_SEL) {
1846 selwakeuppri(&kq->kq_sel, PSOCK);
1847 if (!SEL_WAITING(&kq->kq_sel))
1848 kq->kq_state &= ~KQ_SEL;
1850 if (!knlist_empty(&kq->kq_sel.si_note))
1851 kqueue_schedtask(kq);
1852 if ((kq->kq_state & KQ_ASYNC) == KQ_ASYNC) {
1853 pgsigio(&kq->kq_sigio, SIGIO, 0);
1860 * There is a possibility to optimize in the case of one kq watching another.
1868 struct kqueue *kq;
1888 kq = kn->kn_kq;
1889 KQ_LOCK(kq);
1893 * the influx coming from the kq unlock in the
1899 KQ_UNLOCK(kq);
1902 KQ_UNLOCK(kq);
1904 KQ_LOCK(kq);
1908 KQ_UNLOCK_FLUX(kq);
1914 KQ_UNLOCK(kq);
1945 KASSERT(!(!!kqislocked && !knlislocked), ("kq locked w/o knl locked"));
2126 struct kqueue *kq;
2137 kq = kn->kn_kq;
2138 KQ_LOCK(kq);
2140 KQ_UNLOCK(kq);
2146 KQ_UNLOCK(kq);
2151 KQ_UNLOCK(kq);
2153 kq = NULL;
2159 kq = kn->kn_kq;
2160 KQ_LOCK(kq);
2164 kq->kq_state |= KQ_FLUXWAIT;
2165 msleep(kq, &kq->kq_lock, PSOCK | PDROP, "kqkclr", 0);
2166 kq = NULL;
2187 struct kqueue *kq;
2197 TAILQ_FOREACH(kq, &fdp->fd_kqlist, kq_list) {
2198 KQ_LOCK(kq);
2202 while (kq->kq_knlistsize > fd &&
2203 (kn = SLIST_FIRST(&kq->kq_knlist[fd])) != NULL) {
2207 wakeup(kq);
2208 kq->kq_state |= KQ_FLUXWAIT;
2209 msleep(kq, &kq->kq_lock, PSOCK, "kqflxwt", 0);
2213 KQ_UNLOCK(kq);
2218 KQ_LOCK(kq);
2220 KQ_UNLOCK_FLUX(kq);
2225 knote_attach(struct knote *kn, struct kqueue *kq)
2230 KQ_OWNED(kq);
2233 if (kn->kn_id >= kq->kq_knlistsize)
2235 list = &kq->kq_knlist[kn->kn_id];
2237 if (kq->kq_knhash == NULL)
2239 list = &kq->kq_knhash[KN_HASH(kn->kn_id, kq->kq_knhashmask)];
2255 struct kqueue *kq;
2258 kq = kn->kn_kq;
2260 KQ_NOTOWNED(kq);
2264 KQ_LOCK(kq);
2266 list = &kq->kq_knlist[kn->kn_id];
2268 list = &kq->kq_knhash[KN_HASH(kn->kn_id, kq->kq_knhashmask)];
2274 KQ_UNLOCK_FLUX(kq);
2288 struct kqueue *kq = kn->kn_kq;
2293 TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe);
2295 kq->kq_count++;
2296 kqueue_wakeup(kq);
2302 struct kqueue *kq = kn->kn_kq;
2307 TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe);
2309 kq->kq_count--;
2336 * Register the kev w/ the kq specified by fd.
2341 struct kqueue *kq;
2349 if ((error = kqueue_acquire(fp, &kq)) != 0)
2352 error = kqueue_register(kq, kev, td, waitok);
2354 kqueue_release(kq, 0);