1/*	$NetBSD: epoll.c,v 1.1.1.3 2021/04/07 02:43:14 christos Exp $	*/
2/*
3 * Copyright 2000-2007 Niels Provos <provos@citi.umich.edu>
4 * Copyright 2007-2012 Niels Provos, Nick Mathewson
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 * 3. The name of the author may not be used to endorse or promote products
15 *    derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28#include "event2/event-config.h"
29#include <sys/cdefs.h>
30__RCSID("$NetBSD: epoll.c,v 1.1.1.3 2021/04/07 02:43:14 christos Exp $");
31#include "evconfig-private.h"
32
33#ifdef EVENT__HAVE_EPOLL
34
35#include <stdint.h>
36#include <sys/types.h>
37#include <sys/resource.h>
38#ifdef EVENT__HAVE_SYS_TIME_H
39#include <sys/time.h>
40#endif
41#include <sys/queue.h>
42#include <sys/epoll.h>
43#include <signal.h>
44#include <limits.h>
45#include <stdio.h>
46#include <stdlib.h>
47#include <string.h>
48#include <unistd.h>
49#include <errno.h>
50#ifdef EVENT__HAVE_FCNTL_H
51#include <fcntl.h>
52#endif
53#ifdef EVENT__HAVE_SYS_TIMERFD_H
54#include <sys/timerfd.h>
55#endif
56
57#include "event-internal.h"
58#include "evsignal-internal.h"
59#include "event2/thread.h"
60#include "evthread-internal.h"
61#include "log-internal.h"
62#include "evmap-internal.h"
63#include "changelist-internal.h"
64#include "time-internal.h"
65
66/* Since Linux 2.6.17, epoll is able to report about peer half-closed connection
67   using special EPOLLRDHUP flag on a read event.
68*/
69#if !defined(EPOLLRDHUP)
70#define EPOLLRDHUP 0
71#define EARLY_CLOSE_IF_HAVE_RDHUP 0
72#else
73#define EARLY_CLOSE_IF_HAVE_RDHUP EV_FEATURE_EARLY_CLOSE
74#endif
75
76#include "epolltable-internal.h"
77
78#if defined(EVENT__HAVE_SYS_TIMERFD_H) &&			  \
79	defined(EVENT__HAVE_TIMERFD_CREATE) &&			  \
80	defined(HAVE_POSIX_MONOTONIC) && defined(TFD_NONBLOCK) && \
81	defined(TFD_CLOEXEC)
82/* Note that we only use timerfd if TFD_NONBLOCK and TFD_CLOEXEC are available
83   and working.  This means that we can't support it on 2.6.25 (where timerfd
84   was introduced) or 2.6.26, since 2.6.27 introduced those flags.
85 */
86#define USING_TIMERFD
87#endif
88
89struct epollop {
90	struct epoll_event *events;
91	int nevents;
92	int epfd;
93#ifdef USING_TIMERFD
94	int timerfd;
95#endif
96};
97
98static void *epoll_init(struct event_base *);
99static int epoll_dispatch(struct event_base *, struct timeval *);
100static void epoll_dealloc(struct event_base *);
101
102static const struct eventop epollops_changelist = {
103	"epoll (with changelist)",
104	epoll_init,
105	event_changelist_add_,
106	event_changelist_del_,
107	epoll_dispatch,
108	epoll_dealloc,
109	1, /* need reinit */
110	EV_FEATURE_ET|EV_FEATURE_O1| EARLY_CLOSE_IF_HAVE_RDHUP,
111	EVENT_CHANGELIST_FDINFO_SIZE
112};
113
114
115static int epoll_nochangelist_add(struct event_base *base, evutil_socket_t fd,
116    short old, short events, void *p);
117static int epoll_nochangelist_del(struct event_base *base, evutil_socket_t fd,
118    short old, short events, void *p);
119
120const struct eventop epollops = {
121	"epoll",
122	epoll_init,
123	epoll_nochangelist_add,
124	epoll_nochangelist_del,
125	epoll_dispatch,
126	epoll_dealloc,
127	1, /* need reinit */
128	EV_FEATURE_ET|EV_FEATURE_O1|EV_FEATURE_EARLY_CLOSE,
129	0
130};
131
132#define INITIAL_NEVENT 32
133#define MAX_NEVENT 4096
134
135/* On Linux kernels at least up to 2.6.24.4, epoll can't handle timeout
136 * values bigger than (LONG_MAX - 999ULL)/HZ.  HZ in the wild can be
137 * as big as 1000, and LONG_MAX can be as small as (1<<31)-1, so the
138 * largest number of msec we can support here is 2147482.  Let's
139 * round that down by 47 seconds.
140 */
141#define MAX_EPOLL_TIMEOUT_MSEC (35*60*1000)
142
143static void *
144epoll_init(struct event_base *base)
145{
146	int epfd = -1;
147	struct epollop *epollop;
148
149#ifdef EVENT__HAVE_EPOLL_CREATE1
150	/* First, try the shiny new epoll_create1 interface, if we have it. */
151	epfd = epoll_create1(EPOLL_CLOEXEC);
152#endif
153	if (epfd == -1) {
154		/* Initialize the kernel queue using the old interface.  (The
155		size field is ignored   since 2.6.8.) */
156		if ((epfd = epoll_create(32000)) == -1) {
157			if (errno != ENOSYS)
158				event_warn("epoll_create");
159			return (NULL);
160		}
161		evutil_make_socket_closeonexec(epfd);
162	}
163
164	if (!(epollop = mm_calloc(1, sizeof(struct epollop)))) {
165		close(epfd);
166		return (NULL);
167	}
168
169	epollop->epfd = epfd;
170
171	/* Initialize fields */
172	epollop->events = mm_calloc(INITIAL_NEVENT, sizeof(struct epoll_event));
173	if (epollop->events == NULL) {
174		mm_free(epollop);
175		close(epfd);
176		return (NULL);
177	}
178	epollop->nevents = INITIAL_NEVENT;
179
180	if ((base->flags & EVENT_BASE_FLAG_EPOLL_USE_CHANGELIST) != 0 ||
181	    ((base->flags & EVENT_BASE_FLAG_IGNORE_ENV) == 0 &&
182		evutil_getenv_("EVENT_EPOLL_USE_CHANGELIST") != NULL)) {
183
184		base->evsel = &epollops_changelist;
185	}
186
187#ifdef USING_TIMERFD
188	/*
189	  The epoll interface ordinarily gives us one-millisecond precision,
190	  so on Linux it makes perfect sense to use the CLOCK_MONOTONIC_COARSE
191	  timer.  But when the user has set the new PRECISE_TIMER flag for an
192	  event_base, we can try to use timerfd to give them finer granularity.
193	*/
194	if ((base->flags & EVENT_BASE_FLAG_PRECISE_TIMER) &&
195	    base->monotonic_timer.monotonic_clock == CLOCK_MONOTONIC) {
196		int fd;
197		fd = epollop->timerfd = timerfd_create(CLOCK_MONOTONIC, TFD_NONBLOCK|TFD_CLOEXEC);
198		if (epollop->timerfd >= 0) {
199			struct epoll_event epev;
200			memset(&epev, 0, sizeof(epev));
201			epev.data.fd = epollop->timerfd;
202			epev.events = EPOLLIN;
203			if (epoll_ctl(epollop->epfd, EPOLL_CTL_ADD, fd, &epev) < 0) {
204				event_warn("epoll_ctl(timerfd)");
205				close(fd);
206				epollop->timerfd = -1;
207			}
208		} else {
209			if (errno != EINVAL && errno != ENOSYS) {
210				/* These errors probably mean that we were
211				 * compiled with timerfd/TFD_* support, but
212				 * we're running on a kernel that lacks those.
213				 */
214				event_warn("timerfd_create");
215			}
216			epollop->timerfd = -1;
217		}
218	} else {
219		epollop->timerfd = -1;
220	}
221#endif
222
223	evsig_init_(base);
224
225	return (epollop);
226}
227
228static const char *
229change_to_string(int change)
230{
231	change &= (EV_CHANGE_ADD|EV_CHANGE_DEL);
232	if (change == EV_CHANGE_ADD) {
233		return "add";
234	} else if (change == EV_CHANGE_DEL) {
235		return "del";
236	} else if (change == 0) {
237		return "none";
238	} else {
239		return "???";
240	}
241}
242
243static const char *
244epoll_op_to_string(int op)
245{
246	return op == EPOLL_CTL_ADD?"ADD":
247	    op == EPOLL_CTL_DEL?"DEL":
248	    op == EPOLL_CTL_MOD?"MOD":
249	    "???";
250}
251
252#define PRINT_CHANGES(op, events, ch, status)  \
253	"Epoll %s(%d) on fd %d " status ". "       \
254	"Old events were %d; "                     \
255	"read change was %d (%s); "                \
256	"write change was %d (%s); "               \
257	"close change was %d (%s)",                \
258	epoll_op_to_string(op),                    \
259	events,                                    \
260	ch->fd,                                    \
261	ch->old_events,                            \
262	ch->read_change,                           \
263	change_to_string(ch->read_change),         \
264	ch->write_change,                          \
265	change_to_string(ch->write_change),        \
266	ch->close_change,                          \
267	change_to_string(ch->close_change)
268
269static int
270epoll_apply_one_change(struct event_base *base,
271    struct epollop *epollop,
272    const struct event_change *ch)
273{
274	struct epoll_event epev;
275	int op, events = 0;
276	int idx;
277
278	idx = EPOLL_OP_TABLE_INDEX(ch);
279	op = epoll_op_table[idx].op;
280	events = epoll_op_table[idx].events;
281
282	if (!events) {
283		EVUTIL_ASSERT(op == 0);
284		return 0;
285	}
286
287	if ((ch->read_change|ch->write_change|ch->close_change) & EV_CHANGE_ET)
288		events |= EPOLLET;
289
290	memset(&epev, 0, sizeof(epev));
291	epev.data.fd = ch->fd;
292	epev.events = events;
293	if (epoll_ctl(epollop->epfd, op, ch->fd, &epev) == 0) {
294		event_debug((PRINT_CHANGES(op, epev.events, ch, "okay")));
295		return 0;
296	}
297
298	switch (op) {
299	case EPOLL_CTL_MOD:
300		if (errno == ENOENT) {
301			/* If a MOD operation fails with ENOENT, the
302			 * fd was probably closed and re-opened.  We
303			 * should retry the operation as an ADD.
304			 */
305			if (epoll_ctl(epollop->epfd, EPOLL_CTL_ADD, ch->fd, &epev) == -1) {
306				event_warn("Epoll MOD(%d) on %d retried as ADD; that failed too",
307				    (int)epev.events, ch->fd);
308				return -1;
309			} else {
310				event_debug(("Epoll MOD(%d) on %d retried as ADD; succeeded.",
311					(int)epev.events,
312					ch->fd));
313				return 0;
314			}
315		}
316		break;
317	case EPOLL_CTL_ADD:
318		if (errno == EEXIST) {
319			/* If an ADD operation fails with EEXIST,
320			 * either the operation was redundant (as with a
321			 * precautionary add), or we ran into a fun
322			 * kernel bug where using dup*() to duplicate the
323			 * same file into the same fd gives you the same epitem
324			 * rather than a fresh one.  For the second case,
325			 * we must retry with MOD. */
326			if (epoll_ctl(epollop->epfd, EPOLL_CTL_MOD, ch->fd, &epev) == -1) {
327				event_warn("Epoll ADD(%d) on %d retried as MOD; that failed too",
328				    (int)epev.events, ch->fd);
329				return -1;
330			} else {
331				event_debug(("Epoll ADD(%d) on %d retried as MOD; succeeded.",
332					(int)epev.events,
333					ch->fd));
334				return 0;
335			}
336		}
337		break;
338	case EPOLL_CTL_DEL:
339		if (errno == ENOENT || errno == EBADF || errno == EPERM) {
340			/* If a delete fails with one of these errors,
341			 * that's fine too: we closed the fd before we
342			 * got around to calling epoll_dispatch. */
343			event_debug(("Epoll DEL(%d) on fd %d gave %s: DEL was unnecessary.",
344				(int)epev.events,
345				ch->fd,
346				strerror(errno)));
347			return 0;
348		}
349		break;
350	default:
351		break;
352	}
353
354	event_warn(PRINT_CHANGES(op, epev.events, ch, "failed"));
355	return -1;
356}
357
358static int
359epoll_apply_changes(struct event_base *base)
360{
361	struct event_changelist *changelist = &base->changelist;
362	struct epollop *epollop = base->evbase;
363	struct event_change *ch;
364
365	int r = 0;
366	int i;
367
368	for (i = 0; i < changelist->n_changes; ++i) {
369		ch = &changelist->changes[i];
370		if (epoll_apply_one_change(base, epollop, ch) < 0)
371			r = -1;
372	}
373
374	return (r);
375}
376
377static int
378epoll_nochangelist_add(struct event_base *base, evutil_socket_t fd,
379    short old, short events, void *p)
380{
381	struct event_change ch;
382	ch.fd = fd;
383	ch.old_events = old;
384	ch.read_change = ch.write_change = ch.close_change = 0;
385	if (events & EV_WRITE)
386		ch.write_change = EV_CHANGE_ADD |
387		    (events & EV_ET);
388	if (events & EV_READ)
389		ch.read_change = EV_CHANGE_ADD |
390		    (events & EV_ET);
391	if (events & EV_CLOSED)
392		ch.close_change = EV_CHANGE_ADD |
393		    (events & EV_ET);
394
395	return epoll_apply_one_change(base, base->evbase, &ch);
396}
397
398static int
399epoll_nochangelist_del(struct event_base *base, evutil_socket_t fd,
400    short old, short events, void *p)
401{
402	struct event_change ch;
403	ch.fd = fd;
404	ch.old_events = old;
405	ch.read_change = ch.write_change = ch.close_change = 0;
406	if (events & EV_WRITE)
407		ch.write_change = EV_CHANGE_DEL |
408		    (events & EV_ET);
409	if (events & EV_READ)
410		ch.read_change = EV_CHANGE_DEL |
411		    (events & EV_ET);
412	if (events & EV_CLOSED)
413		ch.close_change = EV_CHANGE_DEL |
414		    (events & EV_ET);
415
416	return epoll_apply_one_change(base, base->evbase, &ch);
417}
418
419static int
420epoll_dispatch(struct event_base *base, struct timeval *tv)
421{
422	struct epollop *epollop = base->evbase;
423	struct epoll_event *events = epollop->events;
424	int i, res;
425	long timeout = -1;
426
427#ifdef USING_TIMERFD
428	if (epollop->timerfd >= 0) {
429		struct itimerspec is;
430		is.it_interval.tv_sec = 0;
431		is.it_interval.tv_nsec = 0;
432		if (tv == NULL) {
433			/* No timeout; disarm the timer. */
434			is.it_value.tv_sec = 0;
435			is.it_value.tv_nsec = 0;
436		} else {
437			if (tv->tv_sec == 0 && tv->tv_usec == 0) {
438				/* we need to exit immediately; timerfd can't
439				 * do that. */
440				timeout = 0;
441			}
442			is.it_value.tv_sec = tv->tv_sec;
443			is.it_value.tv_nsec = tv->tv_usec * 1000;
444		}
445		/* TODO: we could avoid unnecessary syscalls here by only
446		   calling timerfd_settime when the top timeout changes, or
447		   when we're called with a different timeval.
448		*/
449		if (timerfd_settime(epollop->timerfd, 0, &is, NULL) < 0) {
450			event_warn("timerfd_settime");
451		}
452	} else
453#endif
454	if (tv != NULL) {
455		timeout = evutil_tv_to_msec_(tv);
456		if (timeout < 0 || timeout > MAX_EPOLL_TIMEOUT_MSEC) {
457			/* Linux kernels can wait forever if the timeout is
458			 * too big; see comment on MAX_EPOLL_TIMEOUT_MSEC. */
459			timeout = MAX_EPOLL_TIMEOUT_MSEC;
460		}
461	}
462
463	epoll_apply_changes(base);
464	event_changelist_remove_all_(&base->changelist, base);
465
466	EVBASE_RELEASE_LOCK(base, th_base_lock);
467
468	res = epoll_wait(epollop->epfd, events, epollop->nevents, timeout);
469
470	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
471
472	if (res == -1) {
473		if (errno != EINTR) {
474			event_warn("epoll_wait");
475			return (-1);
476		}
477
478		return (0);
479	}
480
481	event_debug(("%s: epoll_wait reports %d", __func__, res));
482	EVUTIL_ASSERT(res <= epollop->nevents);
483
484	for (i = 0; i < res; i++) {
485		int what = events[i].events;
486		short ev = 0;
487#ifdef USING_TIMERFD
488		if (events[i].data.fd == epollop->timerfd)
489			continue;
490#endif
491
492		if (what & EPOLLERR) {
493			ev = EV_READ | EV_WRITE;
494		} else if ((what & EPOLLHUP) && !(what & EPOLLRDHUP)) {
495			ev = EV_READ | EV_WRITE;
496		} else {
497			if (what & EPOLLIN)
498				ev |= EV_READ;
499			if (what & EPOLLOUT)
500				ev |= EV_WRITE;
501			if (what & EPOLLRDHUP)
502				ev |= EV_CLOSED;
503		}
504
505		if (!ev)
506			continue;
507
508		evmap_io_active_(base, events[i].data.fd, ev | EV_ET);
509	}
510
511	if (res == epollop->nevents && epollop->nevents < MAX_NEVENT) {
512		/* We used all of the event space this time.  We should
513		   be ready for more events next time. */
514		int new_nevents = epollop->nevents * 2;
515		struct epoll_event *new_events;
516
517		new_events = mm_realloc(epollop->events,
518		    new_nevents * sizeof(struct epoll_event));
519		if (new_events) {
520			epollop->events = new_events;
521			epollop->nevents = new_nevents;
522		}
523	}
524
525	return (0);
526}
527
528
529static void
530epoll_dealloc(struct event_base *base)
531{
532	struct epollop *epollop = base->evbase;
533
534	evsig_dealloc_(base);
535	if (epollop->events)
536		mm_free(epollop->events);
537	if (epollop->epfd >= 0)
538		close(epollop->epfd);
539#ifdef USING_TIMERFD
540	if (epollop->timerfd >= 0)
541		close(epollop->timerfd);
542#endif
543
544	memset(epollop, 0, sizeof(struct epollop));
545	mm_free(epollop);
546}
547
548#endif /* EVENT__HAVE_EPOLL */
549