1/*	$NetBSD: evmap.c,v 1.6 2021/04/10 19:18:45 rillig Exp $	*/
2
3/*
4 * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 * 3. The name of the author may not be used to endorse or promote products
15 *    derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28#include "event2/event-config.h"
29#include <sys/cdefs.h>
30__RCSID("$NetBSD: evmap.c,v 1.6 2021/04/10 19:18:45 rillig Exp $");
31#include "evconfig-private.h"
32
33#ifdef _WIN32
34#include <winsock2.h>
35#define WIN32_LEAN_AND_MEAN
36#include <windows.h>
37#undef WIN32_LEAN_AND_MEAN
38#endif
39#include <sys/types.h>
40#if !defined(_WIN32) && defined(EVENT__HAVE_SYS_TIME_H)
41#include <sys/time.h>
42#endif
43#include <sys/queue.h>
44#include <stdio.h>
45#include <stdlib.h>
46#ifndef _WIN32
47#include <unistd.h>
48#endif
49#include <errno.h>
50#include <limits.h>
51#include <signal.h>
52#include <string.h>
53#include <time.h>
54
55#include "event-internal.h"
56#include "evmap-internal.h"
57#include "mm-internal.h"
58#include "changelist-internal.h"
59
60/** An entry for an evmap_io list: notes all the events that want to read or
61	write on a given fd, and the number of each.
62  */
63struct evmap_io {
64	struct event_dlist events;
65	ev_uint16_t nread;
66	ev_uint16_t nwrite;
67	ev_uint16_t nclose;
68};
69
70/* An entry for an evmap_signal list: notes all the events that want to know
71   when a signal triggers. */
72struct evmap_signal {
73	struct event_dlist events;
74};
75
76/* On some platforms, fds start at 0 and increment by 1 as they are
77   allocated, and old numbers get used.  For these platforms, we
78   implement io maps just like signal maps: as an array of pointers to
79   struct evmap_io.  But on other platforms (windows), sockets are not
80   0-indexed, not necessarily consecutive, and not necessarily reused.
81   There, we use a hashtable to implement evmap_io.
82*/
83#ifdef EVMAP_USE_HT
84struct event_map_entry {
85	HT_ENTRY(event_map_entry) map_node;
86	evutil_socket_t fd;
87	union { /* This is a union in case we need to make more things that can
88			   be in the hashtable. */
89		struct evmap_io evmap_io;
90	} ent;
91};
92
93/* Helper used by the event_io_map hashtable code; tries to return a good hash
94 * of the fd in e->fd. */
95static inline unsigned
96hashsocket(struct event_map_entry *e)
97{
98	/* On win32, in practice, the low 2-3 bits of a SOCKET seem not to
99	 * matter.  Our hashtable implementation really likes low-order bits,
100	 * though, so let's do the rotate-and-add trick. */
101	unsigned h = (unsigned) e->fd;
102	h += (h >> 2) | (h << 30);
103	return h;
104}
105
106/* Helper used by the event_io_map hashtable code; returns true iff e1 and e2
107 * have the same e->fd. */
108static inline int
109eqsocket(struct event_map_entry *e1, struct event_map_entry *e2)
110{
111	return e1->fd == e2->fd;
112}
113
114HT_PROTOTYPE(event_io_map, event_map_entry, map_node, hashsocket, eqsocket)
115HT_GENERATE(event_io_map, event_map_entry, map_node, hashsocket, eqsocket,
116			0.5, mm_malloc, mm_realloc, mm_free)
117
118#define GET_IO_SLOT(x, map, slot, type)					\
119	do {								\
120		struct event_map_entry key_, *ent_;			\
121		key_.fd = slot;						\
122		ent_ = HT_FIND(event_io_map, map, &key_);		\
123		(x) = ent_ ? &ent_->ent.type : NULL;			\
124	} while (0);
125
126#define GET_IO_SLOT_AND_CTOR(x, map, slot, type, ctor, fdinfo_len)	\
127	do {								\
128		struct event_map_entry key_, *ent_;			\
129		key_.fd = slot;						\
130		HT_FIND_OR_INSERT_(event_io_map, map_node, hashsocket, map, \
131		    event_map_entry, &key_, ptr,			\
132		    {							\
133			    ent_ = *ptr;				\
134		    },							\
135		    {							\
136			    ent_ = mm_calloc(1,sizeof(struct event_map_entry)+fdinfo_len); \
137			    if (EVUTIL_UNLIKELY(ent_ == NULL))		\
138				    return (-1);			\
139			    ent_->fd = slot;				\
140			    (ctor)(&ent_->ent.type);			\
141			    HT_FOI_INSERT_(map_node, map, &key_, ent_, ptr) \
142				});					\
143		(x) = &ent_->ent.type;					\
144	} while (0)
145
146void evmap_io_initmap_(struct event_io_map *ctx)
147{
148	HT_INIT(event_io_map, ctx);
149}
150
151void evmap_io_clear_(struct event_io_map *ctx)
152{
153	struct event_map_entry **ent, **next, *this;
154	for (ent = HT_START(event_io_map, ctx); ent; ent = next) {
155		this = *ent;
156		next = HT_NEXT_RMV(event_io_map, ctx, ent);
157		mm_free(this);
158	}
159	HT_CLEAR(event_io_map, ctx); /* remove all storage held by the ctx. */
160}
161#endif
162
163/* Set the variable 'x' to the field in event_map 'map' with fields of type
164   'struct type *' corresponding to the fd or signal 'slot'.  Set 'x' to NULL
165   if there are no entries for 'slot'.  Does no bounds-checking. */
166#define GET_SIGNAL_SLOT(x, map, slot, type)			\
167	(x) = (struct type *)((map)->entries[slot])
168/* As GET_SLOT, but construct the entry for 'slot' if it is not present,
169   by allocating enough memory for a 'struct type', and initializing the new
170   value by calling the function 'ctor' on it.  Makes the function
171   return -1 on allocation failure.
172 */
173#define GET_SIGNAL_SLOT_AND_CTOR(x, map, slot, type, ctor, fdinfo_len)	\
174	do {								\
175		if ((map)->entries[slot] == NULL) {			\
176			(map)->entries[slot] =				\
177			    mm_calloc(1,sizeof(struct type)+fdinfo_len); \
178			if (EVUTIL_UNLIKELY((map)->entries[slot] == NULL)) \
179				return (-1);				\
180			(ctor)((struct type *)(map)->entries[slot]);	\
181		}							\
182		(x) = (struct type *)((map)->entries[slot]);		\
183	} while (0)
184
185/* If we aren't using hashtables, then define the IO_SLOT macros and functions
186   as thin aliases over the SIGNAL_SLOT versions. */
187#ifndef EVMAP_USE_HT
188#define GET_IO_SLOT(x,map,slot,type) GET_SIGNAL_SLOT(x,map,slot,type)
189#define GET_IO_SLOT_AND_CTOR(x,map,slot,type,ctor,fdinfo_len)	\
190	GET_SIGNAL_SLOT_AND_CTOR(x,map,slot,type,ctor,fdinfo_len)
191#define FDINFO_OFFSET sizeof(struct evmap_io)
192void
193evmap_io_initmap_(struct event_io_map* ctx)
194{
195	evmap_signal_initmap_(ctx);
196}
197void
198evmap_io_clear_(struct event_io_map* ctx)
199{
200	evmap_signal_clear_(ctx);
201}
202#endif
203
204
205/** Expand 'map' with new entries of width 'msize' until it is big enough
206	to store a value in 'slot'.
207 */
208static int
209evmap_make_space(struct event_signal_map *map, int slot, int msize)
210{
211	if (map->nentries <= slot) {
212		int nentries = map->nentries ? map->nentries : 32;
213		void **tmp;
214
215		if (slot > INT_MAX / 2)
216			return (-1);
217
218		while (nentries <= slot)
219			nentries <<= 1;
220
221		if (nentries > INT_MAX / msize)
222			return (-1);
223
224		tmp = (void **)mm_realloc(map->entries, nentries * msize);
225		if (tmp == NULL)
226			return (-1);
227
228		memset(&tmp[map->nentries], 0,
229		    (nentries - map->nentries) * msize);
230
231		map->nentries = nentries;
232		map->entries = tmp;
233	}
234
235	return (0);
236}
237
238void
239evmap_signal_initmap_(struct event_signal_map *ctx)
240{
241	ctx->nentries = 0;
242	ctx->entries = NULL;
243}
244
245void
246evmap_signal_clear_(struct event_signal_map *ctx)
247{
248	if (ctx->entries != NULL) {
249		int i;
250		for (i = 0; i < ctx->nentries; ++i) {
251			if (ctx->entries[i] != NULL)
252				mm_free(ctx->entries[i]);
253		}
254		mm_free(ctx->entries);
255		ctx->entries = NULL;
256	}
257	ctx->nentries = 0;
258}
259
260
261/* code specific to file descriptors */
262
263/** Constructor for struct evmap_io */
264static void
265evmap_io_init(struct evmap_io *entry)
266{
267	LIST_INIT(&entry->events);
268	entry->nread = 0;
269	entry->nwrite = 0;
270	entry->nclose = 0;
271}
272
273
274/* return -1 on error, 0 on success if nothing changed in the event backend,
275 * and 1 on success if something did. */
276int
277evmap_io_add_(struct event_base *base, evutil_socket_t fd, struct event *ev)
278{
279	const struct eventop *evsel = base->evsel;
280	struct event_io_map *io = &base->io;
281	struct evmap_io *ctx = NULL;
282	int nread, nwrite, nclose, retval = 0;
283	short res = 0, old = 0;
284	struct event *old_ev;
285
286	EVUTIL_ASSERT(fd == ev->ev_fd);
287
288	if (fd < 0)
289		return 0;
290
291#ifndef EVMAP_USE_HT
292	if (fd >= io->nentries) {
293		if (evmap_make_space(io, fd, sizeof(struct evmap_io *)) == -1)
294			return (-1);
295	}
296#endif
297	GET_IO_SLOT_AND_CTOR(ctx, io, fd, evmap_io, evmap_io_init,
298						 evsel->fdinfo_len);
299
300	nread = ctx->nread;
301	nwrite = ctx->nwrite;
302	nclose = ctx->nclose;
303
304	if (nread)
305		old |= EV_READ;
306	if (nwrite)
307		old |= EV_WRITE;
308	if (nclose)
309		old |= EV_CLOSED;
310
311	if (ev->ev_events & EV_READ) {
312		if (++nread == 1)
313			res |= EV_READ;
314	}
315	if (ev->ev_events & EV_WRITE) {
316		if (++nwrite == 1)
317			res |= EV_WRITE;
318	}
319	if (ev->ev_events & EV_CLOSED) {
320		if (++nclose == 1)
321			res |= EV_CLOSED;
322	}
323	if (EVUTIL_UNLIKELY(nread > 0xffff || nwrite > 0xffff || nclose > 0xffff)) {
324		event_warnx("Too many events reading or writing on fd %d",
325		    (int)fd);
326		return -1;
327	}
328	if (EVENT_DEBUG_MODE_IS_ON() &&
329	    (old_ev = LIST_FIRST(&ctx->events)) &&
330	    (old_ev->ev_events&EV_ET) != (ev->ev_events&EV_ET)) {
331		event_warnx("Tried to mix edge-triggered and non-edge-triggered"
332		    " events on fd %d", (int)fd);
333		return -1;
334	}
335
336	if (res) {
337		void *extra = ((char*)ctx) + sizeof(struct evmap_io);
338		/* XXX(niels): we cannot mix edge-triggered and
339		 * level-triggered, we should probably assert on
340		 * this. */
341		if (evsel->add(base, ev->ev_fd,
342			old, (ev->ev_events & EV_ET) | res, extra) == -1)
343			return (-1);
344		retval = 1;
345	}
346
347	ctx->nread = (ev_uint16_t) nread;
348	ctx->nwrite = (ev_uint16_t) nwrite;
349	ctx->nclose = (ev_uint16_t) nclose;
350	LIST_INSERT_HEAD(&ctx->events, ev, ev_io_next);
351
352	return (retval);
353}
354
355/* return -1 on error, 0 on success if nothing changed in the event backend,
356 * and 1 on success if something did. */
357int
358evmap_io_del_(struct event_base *base, evutil_socket_t fd, struct event *ev)
359{
360	const struct eventop *evsel = base->evsel;
361	struct event_io_map *io = &base->io;
362	struct evmap_io *ctx;
363	int nread, nwrite, nclose, retval = 0;
364	short res = 0, old = 0;
365
366	if (fd < 0)
367		return 0;
368
369	EVUTIL_ASSERT(fd == ev->ev_fd);
370
371#ifndef EVMAP_USE_HT
372	if (fd >= io->nentries)
373		return (-1);
374#endif
375
376	GET_IO_SLOT(ctx, io, fd, evmap_io);
377
378	nread = ctx->nread;
379	nwrite = ctx->nwrite;
380	nclose = ctx->nclose;
381
382	if (nread)
383		old |= EV_READ;
384	if (nwrite)
385		old |= EV_WRITE;
386	if (nclose)
387		old |= EV_CLOSED;
388
389	if (ev->ev_events & EV_READ) {
390		if (--nread == 0)
391			res |= EV_READ;
392		EVUTIL_ASSERT(nread >= 0);
393	}
394	if (ev->ev_events & EV_WRITE) {
395		if (--nwrite == 0)
396			res |= EV_WRITE;
397		EVUTIL_ASSERT(nwrite >= 0);
398	}
399	if (ev->ev_events & EV_CLOSED) {
400		if (--nclose == 0)
401			res |= EV_CLOSED;
402		EVUTIL_ASSERT(nclose >= 0);
403	}
404
405	if (res) {
406		void *extra = ((char*)ctx) + sizeof(struct evmap_io);
407		if (evsel->del(base, ev->ev_fd,
408			old, (ev->ev_events & EV_ET) | res, extra) == -1) {
409			retval = -1;
410		} else {
411			retval = 1;
412		}
413	}
414
415	ctx->nread = nread;
416	ctx->nwrite = nwrite;
417	ctx->nclose = nclose;
418	LIST_REMOVE(ev, ev_io_next);
419
420	return (retval);
421}
422
423void
424evmap_io_active_(struct event_base *base, evutil_socket_t fd, short events)
425{
426	struct event_io_map *io = &base->io;
427	struct evmap_io *ctx;
428	struct event *ev;
429
430#ifndef EVMAP_USE_HT
431	if (fd < 0 || fd >= io->nentries)
432		return;
433#endif
434	GET_IO_SLOT(ctx, io, fd, evmap_io);
435
436	if (NULL == ctx)
437		return;
438	LIST_FOREACH(ev, &ctx->events, ev_io_next) {
439		if (ev->ev_events & (events & ~EV_ET))
440			event_active_nolock_(ev, ev->ev_events & events, 1);
441	}
442}
443
444/* code specific to signals */
445
446static void
447evmap_signal_init(struct evmap_signal *entry)
448{
449	LIST_INIT(&entry->events);
450}
451
452
453int
454evmap_signal_add_(struct event_base *base, int sig, struct event *ev)
455{
456	const struct eventop *evsel = base->evsigsel;
457	struct event_signal_map *map = &base->sigmap;
458	struct evmap_signal *ctx = NULL;
459
460	if (sig < 0 || sig >= NSIG)
461		return (-1);
462
463	if (sig >= map->nentries) {
464		if (evmap_make_space(
465			map, sig, sizeof(struct evmap_signal *)) == -1)
466			return (-1);
467	}
468	GET_SIGNAL_SLOT_AND_CTOR(ctx, map, sig, evmap_signal, evmap_signal_init,
469	    base->evsigsel->fdinfo_len);
470
471	if (LIST_EMPTY(&ctx->events)) {
472		if (evsel->add(base, ev->ev_fd, 0, EV_SIGNAL, NULL)
473		    == -1)
474			return (-1);
475	}
476
477	LIST_INSERT_HEAD(&ctx->events, ev, ev_signal_next);
478
479	return (1);
480}
481
482int
483evmap_signal_del_(struct event_base *base, int sig, struct event *ev)
484{
485	const struct eventop *evsel = base->evsigsel;
486	struct event_signal_map *map = &base->sigmap;
487	struct evmap_signal *ctx;
488
489	if (sig < 0 || sig >= map->nentries)
490		return (-1);
491
492	GET_SIGNAL_SLOT(ctx, map, sig, evmap_signal);
493
494	LIST_REMOVE(ev, ev_signal_next);
495
496	if (LIST_FIRST(&ctx->events) == NULL) {
497		if (evsel->del(base, ev->ev_fd, 0, EV_SIGNAL, NULL) == -1)
498			return (-1);
499	}
500
501	return (1);
502}
503
504void
505evmap_signal_active_(struct event_base *base, evutil_socket_t sig, int ncalls)
506{
507	struct event_signal_map *map = &base->sigmap;
508	struct evmap_signal *ctx;
509	struct event *ev;
510
511	if (sig < 0 || sig >= map->nentries)
512		return;
513	GET_SIGNAL_SLOT(ctx, map, sig, evmap_signal);
514
515	if (!ctx)
516		return;
517	LIST_FOREACH(ev, &ctx->events, ev_signal_next)
518		event_active_nolock_(ev, EV_SIGNAL, ncalls);
519}
520
521void *
522evmap_io_get_fdinfo_(struct event_io_map *map, evutil_socket_t fd)
523{
524	struct evmap_io *ctx;
525	GET_IO_SLOT(ctx, map, fd, evmap_io);
526	if (ctx)
527		return ((char*)ctx) + sizeof(struct evmap_io);
528	else
529		return NULL;
530}
531
532/* Callback type for evmap_io_foreach_fd */
533typedef int (*evmap_io_foreach_fd_cb)(
534	struct event_base *, evutil_socket_t, struct evmap_io *, void *);
535
536/* Multipurpose helper function: Iterate over every file descriptor event_base
537 * for which we could have EV_READ or EV_WRITE events.  For each such fd, call
538 * fn(base, signum, evmap_io, arg), where fn is the user-provided
539 * function, base is the event_base, signum is the signal number, evmap_io
540 * is an evmap_io structure containing a list of events pending on the
541 * file descriptor, and arg is the user-supplied argument.
542 *
543 * If fn returns 0, continue on to the next signal. Otherwise, return the same
544 * value that fn returned.
545 *
546 * Note that there is no guarantee that the file descriptors will be processed
547 * in any particular order.
548 */
549static int
550evmap_io_foreach_fd(struct event_base *base,
551    evmap_io_foreach_fd_cb fn,
552    void *arg)
553{
554	evutil_socket_t fd;
555	struct event_io_map *iomap = &base->io;
556	int r = 0;
557#ifdef EVMAP_USE_HT
558	struct event_map_entry **mapent;
559	HT_FOREACH(mapent, event_io_map, iomap) {
560		struct evmap_io *ctx = &(*mapent)->ent.evmap_io;
561		fd = (*mapent)->fd;
562#else
563	for (fd = 0; fd < iomap->nentries; ++fd) {
564		struct evmap_io *ctx = iomap->entries[fd];
565		if (!ctx)
566			continue;
567#endif
568		if ((r = fn(base, fd, ctx, arg)))
569			break;
570	}
571	return r;
572}
573
574/* Callback type for evmap_signal_foreach_signal */
575typedef int (*evmap_signal_foreach_signal_cb)(
576	struct event_base *, int, struct evmap_signal *, void *);
577
578/* Multipurpose helper function: Iterate over every signal number in the
579 * event_base for which we could have signal events.  For each such signal,
580 * call fn(base, signum, evmap_signal, arg), where fn is the user-provided
581 * function, base is the event_base, signum is the signal number, evmap_signal
582 * is an evmap_signal structure containing a list of events pending on the
583 * signal, and arg is the user-supplied argument.
584 *
585 * If fn returns 0, continue on to the next signal. Otherwise, return the same
586 * value that fn returned.
587 */
588static int
589evmap_signal_foreach_signal(struct event_base *base,
590    evmap_signal_foreach_signal_cb fn,
591    void *arg)
592{
593	struct event_signal_map *sigmap = &base->sigmap;
594	int r = 0;
595	int signum;
596
597	for (signum = 0; signum < sigmap->nentries; ++signum) {
598		struct evmap_signal *ctx = sigmap->entries[signum];
599		if (!ctx)
600			continue;
601		if ((r = fn(base, signum, ctx, arg)))
602			break;
603	}
604	return r;
605}
606
607/* Helper for evmap_reinit_: tell the backend to add every fd for which we have
608 * pending events, with the appropriate combination of EV_READ, EV_WRITE, and
609 * EV_ET. */
610static int
611evmap_io_reinit_iter_fn(struct event_base *base, evutil_socket_t fd,
612    struct evmap_io *ctx, void *arg)
613{
614	const struct eventop *evsel = base->evsel;
615	void *extra;
616	int *result = arg;
617	short events = 0;
618	struct event *ev;
619	EVUTIL_ASSERT(ctx);
620
621	extra = ((char*)ctx) + sizeof(struct evmap_io);
622	if (ctx->nread)
623		events |= EV_READ;
624	if (ctx->nwrite)
625		events |= EV_WRITE;
626	if (ctx->nclose)
627		events |= EV_CLOSED;
628	if (evsel->fdinfo_len)
629		memset(extra, 0, evsel->fdinfo_len);
630	if (events &&
631	    (ev = LIST_FIRST(&ctx->events)) &&
632	    (ev->ev_events & EV_ET))
633		events |= EV_ET;
634	if (evsel->add(base, fd, 0, events, extra) == -1)
635		*result = -1;
636
637	return 0;
638}
639
640/* Helper for evmap_reinit_: tell the backend to add every signal for which we
641 * have pending events.  */
642static int
643evmap_signal_reinit_iter_fn(struct event_base *base,
644    int signum, struct evmap_signal *ctx, void *arg)
645{
646	const struct eventop *evsel = base->evsigsel;
647	int *result = arg;
648
649	if (!LIST_EMPTY(&ctx->events)) {
650		if (evsel->add(base, signum, 0, EV_SIGNAL, NULL) == -1)
651			*result = -1;
652	}
653	return 0;
654}
655
656int
657evmap_reinit_(struct event_base *base)
658{
659	int result = 0;
660
661	evmap_io_foreach_fd(base, evmap_io_reinit_iter_fn, &result);
662	if (result < 0)
663		return -1;
664	evmap_signal_foreach_signal(base, evmap_signal_reinit_iter_fn, &result);
665	if (result < 0)
666		return -1;
667	return 0;
668}
669
670/* Helper for evmap_delete_all_: delete every event in an event_dlist. */
671static int
672delete_all_in_dlist(struct event_dlist *dlist)
673{
674	struct event *ev;
675	while ((ev = LIST_FIRST(dlist)))
676		event_del(ev);
677	return 0;
678}
679
680/* Helper for evmap_delete_all_: delete every event pending on an fd. */
681static int
682evmap_io_delete_all_iter_fn(struct event_base *base, evutil_socket_t fd,
683    struct evmap_io *io_info, void *arg)
684{
685	return delete_all_in_dlist(&io_info->events);
686}
687
688/* Helper for evmap_delete_all_: delete every event pending on a signal. */
689static int
690evmap_signal_delete_all_iter_fn(struct event_base *base, int signum,
691    struct evmap_signal *sig_info, void *arg)
692{
693	return delete_all_in_dlist(&sig_info->events);
694}
695
696void
697evmap_delete_all_(struct event_base *base)
698{
699	evmap_signal_foreach_signal(base, evmap_signal_delete_all_iter_fn, NULL);
700	evmap_io_foreach_fd(base, evmap_io_delete_all_iter_fn, NULL);
701}
702
703/** Per-fd structure for use with changelists.  It keeps track, for each fd or
704 * signal using the changelist, of where its entry in the changelist is.
705 */
706struct event_changelist_fdinfo {
707	int idxplus1; /* this is the index +1, so that memset(0) will make it
708		       * a no-such-element */
709};
710
711void
712event_changelist_init_(struct event_changelist *changelist)
713{
714	changelist->changes = NULL;
715	changelist->changes_size = 0;
716	changelist->n_changes = 0;
717}
718
719/** Helper: return the changelist_fdinfo corresponding to a given change. */
720static inline struct event_changelist_fdinfo *
721event_change_get_fdinfo(struct event_base *base,
722    const struct event_change *change)
723{
724	char *ptr;
725	if (change->read_change & EV_CHANGE_SIGNAL) {
726		struct evmap_signal *ctx;
727		GET_SIGNAL_SLOT(ctx, &base->sigmap, change->fd, evmap_signal);
728		ptr = ((char*)ctx) + sizeof(struct evmap_signal);
729	} else {
730		struct evmap_io *ctx;
731		GET_IO_SLOT(ctx, &base->io, change->fd, evmap_io);
732		ptr = ((char*)ctx) + sizeof(struct evmap_io);
733	}
734	return (void*)ptr;
735}
736
737/** Callback helper for event_changelist_assert_ok */
738static int
739event_changelist_assert_ok_foreach_iter_fn(
740	struct event_base *base,
741	evutil_socket_t fd, struct evmap_io *io, void *arg)
742{
743	struct event_changelist *changelist = &base->changelist;
744	struct event_changelist_fdinfo *f;
745	f = (void*)
746	    ( ((char*)io) + sizeof(struct evmap_io) );
747	if (f->idxplus1) {
748		struct event_change *c = &changelist->changes[f->idxplus1 - 1];
749		EVUTIL_ASSERT(c->fd == fd);
750	}
751	return 0;
752}
753
754/** Make sure that the changelist is consistent with the evmap structures. */
755static void
756event_changelist_assert_ok(struct event_base *base)
757{
758	int i;
759	struct event_changelist *changelist = &base->changelist;
760
761	EVUTIL_ASSERT(changelist->changes_size >= changelist->n_changes);
762	for (i = 0; i < changelist->n_changes; ++i) {
763		struct event_change *c = &changelist->changes[i];
764		struct event_changelist_fdinfo *f;
765		EVUTIL_ASSERT(c->fd >= 0);
766		f = event_change_get_fdinfo(base, c);
767		EVUTIL_ASSERT(f);
768		EVUTIL_ASSERT(f->idxplus1 == i + 1);
769	}
770
771	evmap_io_foreach_fd(base,
772	    event_changelist_assert_ok_foreach_iter_fn,
773	    NULL);
774}
775
776#ifdef DEBUG_CHANGELIST
777#define event_changelist_check(base)  event_changelist_assert_ok((base))
778#else
779#define event_changelist_check(base)  ((void)0)
780#endif
781
782void
783event_changelist_remove_all_(struct event_changelist *changelist,
784    struct event_base *base)
785{
786	int i;
787
788	event_changelist_check(base);
789
790	for (i = 0; i < changelist->n_changes; ++i) {
791		struct event_change *ch = &changelist->changes[i];
792		struct event_changelist_fdinfo *fdinfo =
793		    event_change_get_fdinfo(base, ch);
794		EVUTIL_ASSERT(fdinfo->idxplus1 == i + 1);
795		fdinfo->idxplus1 = 0;
796	}
797
798	changelist->n_changes = 0;
799
800	event_changelist_check(base);
801}
802
803void
804event_changelist_freemem_(struct event_changelist *changelist)
805{
806	if (changelist->changes)
807		mm_free(changelist->changes);
808	event_changelist_init_(changelist); /* zero it all out. */
809}
810
811/** Increase the size of 'changelist' to hold more changes. */
812static int
813event_changelist_grow(struct event_changelist *changelist)
814{
815	int new_size;
816	struct event_change *new_changes;
817	if (changelist->changes_size < 64)
818		new_size = 64;
819	else
820		new_size = changelist->changes_size * 2;
821
822	new_changes = mm_realloc(changelist->changes,
823	    new_size * sizeof(struct event_change));
824
825	if (EVUTIL_UNLIKELY(new_changes == NULL))
826		return (-1);
827
828	changelist->changes = new_changes;
829	changelist->changes_size = new_size;
830
831	return (0);
832}
833
834/** Return a pointer to the changelist entry for the file descriptor or signal
835 * 'fd', whose fdinfo is 'fdinfo'.  If none exists, construct it, setting its
836 * old_events field to old_events.
837 */
838static struct event_change *
839event_changelist_get_or_construct(struct event_changelist *changelist,
840    evutil_socket_t fd,
841    short old_events,
842    struct event_changelist_fdinfo *fdinfo)
843{
844	struct event_change *change;
845
846	if (fdinfo->idxplus1 == 0) {
847		int idx;
848		EVUTIL_ASSERT(changelist->n_changes <= changelist->changes_size);
849
850		if (changelist->n_changes == changelist->changes_size) {
851			if (event_changelist_grow(changelist) < 0)
852				return NULL;
853		}
854
855		idx = changelist->n_changes++;
856		change = &changelist->changes[idx];
857		fdinfo->idxplus1 = idx + 1;
858
859		memset(change, 0, sizeof(struct event_change));
860		change->fd = fd;
861		change->old_events = old_events;
862	} else {
863		change = &changelist->changes[fdinfo->idxplus1 - 1];
864		EVUTIL_ASSERT(change->fd == fd);
865	}
866	return change;
867}
868
869int
870event_changelist_add_(struct event_base *base, evutil_socket_t fd, short old, short events,
871    void *p)
872{
873	struct event_changelist *changelist = &base->changelist;
874	struct event_changelist_fdinfo *fdinfo = p;
875	struct event_change *change;
876	ev_uint8_t evchange = EV_CHANGE_ADD | (events & (EV_ET|EV_PERSIST|EV_SIGNAL));
877
878	event_changelist_check(base);
879
880	change = event_changelist_get_or_construct(changelist, fd, old, fdinfo);
881	if (!change)
882		return -1;
883
884	/* An add replaces any previous delete, but doesn't result in a no-op,
885	 * since the delete might fail (because the fd had been closed since
886	 * the last add, for instance. */
887
888	if (events & (EV_READ|EV_SIGNAL))
889		change->read_change = evchange;
890	if (events & EV_WRITE)
891		change->write_change = evchange;
892	if (events & EV_CLOSED)
893		change->close_change = evchange;
894
895	event_changelist_check(base);
896	return (0);
897}
898
899int
900event_changelist_del_(struct event_base *base, evutil_socket_t fd, short old, short events,
901    void *p)
902{
903	struct event_changelist *changelist = &base->changelist;
904	struct event_changelist_fdinfo *fdinfo = p;
905	struct event_change *change;
906	ev_uint8_t del = EV_CHANGE_DEL | (events & EV_ET);
907
908	event_changelist_check(base);
909	change = event_changelist_get_or_construct(changelist, fd, old, fdinfo);
910	event_changelist_check(base);
911	if (!change)
912		return -1;
913
914	/* A delete on an event set that doesn't contain the event to be
915	   deleted produces a no-op.  This effectively emoves any previous
916	   uncommitted add, rather than replacing it: on those platforms where
917	   "add, delete, dispatch" is not the same as "no-op, dispatch", we
918	   want the no-op behavior.
919
920	   If we have a no-op item, we could remove it it from the list
921	   entirely, but really there's not much point: skipping the no-op
922	   change when we do the dispatch later is far cheaper than rejuggling
923	   the array now.
924
925	   As this stands, it also lets through deletions of events that are
926	   not currently set.
927	 */
928
929	if (events & (EV_READ|EV_SIGNAL)) {
930		if (!(change->old_events & (EV_READ | EV_SIGNAL)))
931			change->read_change = 0;
932		else
933			change->read_change = del;
934	}
935	if (events & EV_WRITE) {
936		if (!(change->old_events & EV_WRITE))
937			change->write_change = 0;
938		else
939			change->write_change = del;
940	}
941	if (events & EV_CLOSED) {
942		if (!(change->old_events & EV_CLOSED))
943			change->close_change = 0;
944		else
945			change->close_change = del;
946	}
947
948	event_changelist_check(base);
949	return (0);
950}
951
952/* Helper for evmap_check_integrity_: verify that all of the events pending on
953 * given fd are set up correctly, and that the nread and nwrite counts on that
954 * fd are correct. */
955static int
956evmap_io_check_integrity_fn(struct event_base *base, evutil_socket_t fd,
957    struct evmap_io *io_info, void *arg)
958{
959	struct event *ev;
960	int n_read = 0, n_write = 0, n_close = 0;
961
962	/* First, make sure the list itself isn't corrupt. Otherwise,
963	 * running LIST_FOREACH could be an exciting adventure. */
964	EVUTIL_ASSERT_LIST_OK(&io_info->events, event, ev_io_next);
965
966	LIST_FOREACH(ev, &io_info->events, ev_io_next) {
967		EVUTIL_ASSERT(ev->ev_flags & EVLIST_INSERTED);
968		EVUTIL_ASSERT(ev->ev_fd == fd);
969		EVUTIL_ASSERT(!(ev->ev_events & EV_SIGNAL));
970		EVUTIL_ASSERT((ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED)));
971		if (ev->ev_events & EV_READ)
972			++n_read;
973		if (ev->ev_events & EV_WRITE)
974			++n_write;
975		if (ev->ev_events & EV_CLOSED)
976			++n_close;
977	}
978
979	EVUTIL_ASSERT(n_read == io_info->nread);
980	EVUTIL_ASSERT(n_write == io_info->nwrite);
981	EVUTIL_ASSERT(n_close == io_info->nclose);
982
983	return 0;
984}
985
986/* Helper for evmap_check_integrity_: verify that all of the events pending
987 * on given signal are set up correctly. */
988static int
989evmap_signal_check_integrity_fn(struct event_base *base,
990    int signum, struct evmap_signal *sig_info, void *arg)
991{
992	struct event *ev;
993	/* First, make sure the list itself isn't corrupt. */
994	EVUTIL_ASSERT_LIST_OK(&sig_info->events, event, ev_signal_next);
995
996	LIST_FOREACH(ev, &sig_info->events, ev_io_next) {
997		EVUTIL_ASSERT(ev->ev_flags & EVLIST_INSERTED);
998		EVUTIL_ASSERT(ev->ev_fd == signum);
999		EVUTIL_ASSERT((ev->ev_events & EV_SIGNAL));
1000		EVUTIL_ASSERT(!(ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED)));
1001	}
1002	return 0;
1003}
1004
1005void
1006evmap_check_integrity_(struct event_base *base)
1007{
1008	evmap_io_foreach_fd(base, evmap_io_check_integrity_fn, NULL);
1009	evmap_signal_foreach_signal(base, evmap_signal_check_integrity_fn, NULL);
1010
1011	if (base->evsel->add == event_changelist_add_)
1012		event_changelist_assert_ok(base);
1013}
1014
1015/* Helper type for evmap_foreach_event_: Bundles a function to call on every
1016 * event, and the user-provided void* to use as its third argument. */
1017struct evmap_foreach_event_helper {
1018	event_base_foreach_event_cb fn;
1019	void *arg;
1020};
1021
1022/* Helper for evmap_foreach_event_: calls a provided function on every event
1023 * pending on a given fd.  */
1024static int
1025evmap_io_foreach_event_fn(struct event_base *base, evutil_socket_t fd,
1026    struct evmap_io *io_info, void *arg)
1027{
1028	struct evmap_foreach_event_helper *h = arg;
1029	struct event *ev;
1030	int r;
1031	LIST_FOREACH(ev, &io_info->events, ev_io_next) {
1032		if ((r = h->fn(base, ev, h->arg)))
1033			return r;
1034	}
1035	return 0;
1036}
1037
1038/* Helper for evmap_foreach_event_: calls a provided function on every event
1039 * pending on a given signal.  */
1040static int
1041evmap_signal_foreach_event_fn(struct event_base *base, int signum,
1042    struct evmap_signal *sig_info, void *arg)
1043{
1044	struct event *ev;
1045	struct evmap_foreach_event_helper *h = arg;
1046	int r;
1047	LIST_FOREACH(ev, &sig_info->events, ev_signal_next) {
1048		if ((r = h->fn(base, ev, h->arg)))
1049			return r;
1050	}
1051	return 0;
1052}
1053
1054int
1055evmap_foreach_event_(struct event_base *base,
1056    event_base_foreach_event_cb fn, void *arg)
1057{
1058	struct evmap_foreach_event_helper h;
1059	int r;
1060	h.fn = fn;
1061	h.arg = arg;
1062	if ((r = evmap_io_foreach_fd(base, evmap_io_foreach_event_fn, &h)))
1063		return r;
1064	return evmap_signal_foreach_signal(base, evmap_signal_foreach_event_fn, &h);
1065}
1066
1067