evmap.c revision 290001
1/*
2 * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 *    notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 *    notice, this list of conditions and the following disclaimer in the
11 *    documentation and/or other materials provided with the distribution.
12 * 3. The name of the author may not be used to endorse or promote products
13 *    derived from this software without specific prior written permission.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26#include "event2/event-config.h"
27#include "evconfig-private.h"
28
29#ifdef _WIN32
30#include <winsock2.h>
31#define WIN32_LEAN_AND_MEAN
32#include <windows.h>
33#undef WIN32_LEAN_AND_MEAN
34#endif
35#include <sys/types.h>
36#if !defined(_WIN32) && defined(EVENT__HAVE_SYS_TIME_H)
37#include <sys/time.h>
38#endif
39#include <sys/queue.h>
40#include <stdio.h>
41#include <stdlib.h>
42#ifndef _WIN32
43#include <unistd.h>
44#endif
45#include <errno.h>
46#include <signal.h>
47#include <string.h>
48#include <time.h>
49
50#include "event-internal.h"
51#include "evmap-internal.h"
52#include "mm-internal.h"
53#include "changelist-internal.h"
54
55/** An entry for an evmap_io list: notes all the events that want to read or
56	write on a given fd, and the number of each.
57  */
58struct evmap_io {
59	struct event_dlist events;
60	ev_uint16_t nread;
61	ev_uint16_t nwrite;
62	ev_uint16_t nclose;
63};
64
65/* An entry for an evmap_signal list: notes all the events that want to know
66   when a signal triggers. */
67struct evmap_signal {
68	struct event_dlist events;
69};
70
71/* On some platforms, fds start at 0 and increment by 1 as they are
72   allocated, and old numbers get used.  For these platforms, we
73   implement io maps just like signal maps: as an array of pointers to
74   struct evmap_io.  But on other platforms (windows), sockets are not
75   0-indexed, not necessarily consecutive, and not necessarily reused.
76   There, we use a hashtable to implement evmap_io.
77*/
78#ifdef EVMAP_USE_HT
79struct event_map_entry {
80	HT_ENTRY(event_map_entry) map_node;
81	evutil_socket_t fd;
82	union { /* This is a union in case we need to make more things that can
83			   be in the hashtable. */
84		struct evmap_io evmap_io;
85	} ent;
86};
87
88/* Helper used by the event_io_map hashtable code; tries to return a good hash
89 * of the fd in e->fd. */
90static inline unsigned
91hashsocket(struct event_map_entry *e)
92{
93	/* On win32, in practice, the low 2-3 bits of a SOCKET seem not to
94	 * matter.  Our hashtable implementation really likes low-order bits,
95	 * though, so let's do the rotate-and-add trick. */
96	unsigned h = (unsigned) e->fd;
97	h += (h >> 2) | (h << 30);
98	return h;
99}
100
101/* Helper used by the event_io_map hashtable code; returns true iff e1 and e2
102 * have the same e->fd. */
103static inline int
104eqsocket(struct event_map_entry *e1, struct event_map_entry *e2)
105{
106	return e1->fd == e2->fd;
107}
108
109HT_PROTOTYPE(event_io_map, event_map_entry, map_node, hashsocket, eqsocket)
110HT_GENERATE(event_io_map, event_map_entry, map_node, hashsocket, eqsocket,
111			0.5, mm_malloc, mm_realloc, mm_free)
112
113#define GET_IO_SLOT(x, map, slot, type)					\
114	do {								\
115		struct event_map_entry key_, *ent_;			\
116		key_.fd = slot;						\
117		ent_ = HT_FIND(event_io_map, map, &key_);		\
118		(x) = ent_ ? &ent_->ent.type : NULL;			\
119	} while (0);
120
121#define GET_IO_SLOT_AND_CTOR(x, map, slot, type, ctor, fdinfo_len)	\
122	do {								\
123		struct event_map_entry key_, *ent_;			\
124		key_.fd = slot;						\
125		HT_FIND_OR_INSERT_(event_io_map, map_node, hashsocket, map, \
126		    event_map_entry, &key_, ptr,			\
127		    {							\
128			    ent_ = *ptr;				\
129		    },							\
130		    {							\
131			    ent_ = mm_calloc(1,sizeof(struct event_map_entry)+fdinfo_len); \
132			    if (EVUTIL_UNLIKELY(ent_ == NULL))		\
133				    return (-1);			\
134			    ent_->fd = slot;				\
135			    (ctor)(&ent_->ent.type);			\
136			    HT_FOI_INSERT_(map_node, map, &key_, ent_, ptr) \
137				});					\
138		(x) = &ent_->ent.type;					\
139	} while (0)
140
141void evmap_io_initmap_(struct event_io_map *ctx)
142{
143	HT_INIT(event_io_map, ctx);
144}
145
146void evmap_io_clear_(struct event_io_map *ctx)
147{
148	struct event_map_entry **ent, **next, *this;
149	for (ent = HT_START(event_io_map, ctx); ent; ent = next) {
150		this = *ent;
151		next = HT_NEXT_RMV(event_io_map, ctx, ent);
152		mm_free(this);
153	}
154	HT_CLEAR(event_io_map, ctx); /* remove all storage held by the ctx. */
155}
156#endif
157
158/* Set the variable 'x' to the field in event_map 'map' with fields of type
159   'struct type *' corresponding to the fd or signal 'slot'.  Set 'x' to NULL
160   if there are no entries for 'slot'.  Does no bounds-checking. */
161#define GET_SIGNAL_SLOT(x, map, slot, type)			\
162	(x) = (struct type *)((map)->entries[slot])
163/* As GET_SLOT, but construct the entry for 'slot' if it is not present,
164   by allocating enough memory for a 'struct type', and initializing the new
165   value by calling the function 'ctor' on it.  Makes the function
166   return -1 on allocation failure.
167 */
168#define GET_SIGNAL_SLOT_AND_CTOR(x, map, slot, type, ctor, fdinfo_len)	\
169	do {								\
170		if ((map)->entries[slot] == NULL) {			\
171			(map)->entries[slot] =				\
172			    mm_calloc(1,sizeof(struct type)+fdinfo_len); \
173			if (EVUTIL_UNLIKELY((map)->entries[slot] == NULL)) \
174				return (-1);				\
175			(ctor)((struct type *)(map)->entries[slot]);	\
176		}							\
177		(x) = (struct type *)((map)->entries[slot]);		\
178	} while (0)
179
180/* If we aren't using hashtables, then define the IO_SLOT macros and functions
181   as thin aliases over the SIGNAL_SLOT versions. */
182#ifndef EVMAP_USE_HT
183#define GET_IO_SLOT(x,map,slot,type) GET_SIGNAL_SLOT(x,map,slot,type)
184#define GET_IO_SLOT_AND_CTOR(x,map,slot,type,ctor,fdinfo_len)	\
185	GET_SIGNAL_SLOT_AND_CTOR(x,map,slot,type,ctor,fdinfo_len)
186#define FDINFO_OFFSET sizeof(struct evmap_io)
187void
188evmap_io_initmap_(struct event_io_map* ctx)
189{
190	evmap_signal_initmap_(ctx);
191}
192void
193evmap_io_clear_(struct event_io_map* ctx)
194{
195	evmap_signal_clear_(ctx);
196}
197#endif
198
199
200/** Expand 'map' with new entries of width 'msize' until it is big enough
201	to store a value in 'slot'.
202 */
203static int
204evmap_make_space(struct event_signal_map *map, int slot, int msize)
205{
206	if (map->nentries <= slot) {
207		int nentries = map->nentries ? map->nentries : 32;
208		void **tmp;
209
210		while (nentries <= slot)
211			nentries <<= 1;
212
213		tmp = (void **)mm_realloc(map->entries, nentries * msize);
214		if (tmp == NULL)
215			return (-1);
216
217		memset(&tmp[map->nentries], 0,
218		    (nentries - map->nentries) * msize);
219
220		map->nentries = nentries;
221		map->entries = tmp;
222	}
223
224	return (0);
225}
226
227void
228evmap_signal_initmap_(struct event_signal_map *ctx)
229{
230	ctx->nentries = 0;
231	ctx->entries = NULL;
232}
233
234void
235evmap_signal_clear_(struct event_signal_map *ctx)
236{
237	if (ctx->entries != NULL) {
238		int i;
239		for (i = 0; i < ctx->nentries; ++i) {
240			if (ctx->entries[i] != NULL)
241				mm_free(ctx->entries[i]);
242		}
243		mm_free(ctx->entries);
244		ctx->entries = NULL;
245	}
246	ctx->nentries = 0;
247}
248
249
250/* code specific to file descriptors */
251
252/** Constructor for struct evmap_io */
253static void
254evmap_io_init(struct evmap_io *entry)
255{
256	LIST_INIT(&entry->events);
257	entry->nread = 0;
258	entry->nwrite = 0;
259	entry->nclose = 0;
260}
261
262
263/* return -1 on error, 0 on success if nothing changed in the event backend,
264 * and 1 on success if something did. */
265int
266evmap_io_add_(struct event_base *base, evutil_socket_t fd, struct event *ev)
267{
268	const struct eventop *evsel = base->evsel;
269	struct event_io_map *io = &base->io;
270	struct evmap_io *ctx = NULL;
271	int nread, nwrite, nclose, retval = 0;
272	short res = 0, old = 0;
273	struct event *old_ev;
274
275	EVUTIL_ASSERT(fd == ev->ev_fd);
276
277	if (fd < 0)
278		return 0;
279
280#ifndef EVMAP_USE_HT
281	if (fd >= io->nentries) {
282		if (evmap_make_space(io, fd, sizeof(struct evmap_io *)) == -1)
283			return (-1);
284	}
285#endif
286	GET_IO_SLOT_AND_CTOR(ctx, io, fd, evmap_io, evmap_io_init,
287						 evsel->fdinfo_len);
288
289	nread = ctx->nread;
290	nwrite = ctx->nwrite;
291	nclose = ctx->nclose;
292
293	if (nread)
294		old |= EV_READ;
295	if (nwrite)
296		old |= EV_WRITE;
297	if (nclose)
298		old |= EV_CLOSED;
299
300	if (ev->ev_events & EV_READ) {
301		if (++nread == 1)
302			res |= EV_READ;
303	}
304	if (ev->ev_events & EV_WRITE) {
305		if (++nwrite == 1)
306			res |= EV_WRITE;
307	}
308	if (ev->ev_events & EV_CLOSED) {
309		if (++nclose == 1)
310			res |= EV_CLOSED;
311	}
312	if (EVUTIL_UNLIKELY(nread > 0xffff || nwrite > 0xffff || nclose > 0xffff)) {
313		event_warnx("Too many events reading or writing on fd %d",
314		    (int)fd);
315		return -1;
316	}
317	if (EVENT_DEBUG_MODE_IS_ON() &&
318	    (old_ev = LIST_FIRST(&ctx->events)) &&
319	    (old_ev->ev_events&EV_ET) != (ev->ev_events&EV_ET)) {
320		event_warnx("Tried to mix edge-triggered and non-edge-triggered"
321		    " events on fd %d", (int)fd);
322		return -1;
323	}
324
325	if (res) {
326		void *extra = ((char*)ctx) + sizeof(struct evmap_io);
327		/* XXX(niels): we cannot mix edge-triggered and
328		 * level-triggered, we should probably assert on
329		 * this. */
330		if (evsel->add(base, ev->ev_fd,
331			old, (ev->ev_events & EV_ET) | res, extra) == -1)
332			return (-1);
333		retval = 1;
334	}
335
336	ctx->nread = (ev_uint16_t) nread;
337	ctx->nwrite = (ev_uint16_t) nwrite;
338	ctx->nclose = (ev_uint16_t) nclose;
339	LIST_INSERT_HEAD(&ctx->events, ev, ev_io_next);
340
341	return (retval);
342}
343
344/* return -1 on error, 0 on success if nothing changed in the event backend,
345 * and 1 on success if something did. */
346int
347evmap_io_del_(struct event_base *base, evutil_socket_t fd, struct event *ev)
348{
349	const struct eventop *evsel = base->evsel;
350	struct event_io_map *io = &base->io;
351	struct evmap_io *ctx;
352	int nread, nwrite, nclose, retval = 0;
353	short res = 0, old = 0;
354
355	if (fd < 0)
356		return 0;
357
358	EVUTIL_ASSERT(fd == ev->ev_fd);
359
360#ifndef EVMAP_USE_HT
361	if (fd >= io->nentries)
362		return (-1);
363#endif
364
365	GET_IO_SLOT(ctx, io, fd, evmap_io);
366
367	nread = ctx->nread;
368	nwrite = ctx->nwrite;
369	nclose = ctx->nclose;
370
371	if (nread)
372		old |= EV_READ;
373	if (nwrite)
374		old |= EV_WRITE;
375	if (nclose)
376		old |= EV_CLOSED;
377
378	if (ev->ev_events & EV_READ) {
379		if (--nread == 0)
380			res |= EV_READ;
381		EVUTIL_ASSERT(nread >= 0);
382	}
383	if (ev->ev_events & EV_WRITE) {
384		if (--nwrite == 0)
385			res |= EV_WRITE;
386		EVUTIL_ASSERT(nwrite >= 0);
387	}
388	if (ev->ev_events & EV_CLOSED) {
389		if (--nclose == 0)
390			res |= EV_CLOSED;
391		EVUTIL_ASSERT(nclose >= 0);
392	}
393
394	if (res) {
395		void *extra = ((char*)ctx) + sizeof(struct evmap_io);
396		if (evsel->del(base, ev->ev_fd, old, res, extra) == -1) {
397			retval = -1;
398		} else {
399			retval = 1;
400		}
401	}
402
403	ctx->nread = nread;
404	ctx->nwrite = nwrite;
405	ctx->nclose = nclose;
406	LIST_REMOVE(ev, ev_io_next);
407
408	return (retval);
409}
410
411void
412evmap_io_active_(struct event_base *base, evutil_socket_t fd, short events)
413{
414	struct event_io_map *io = &base->io;
415	struct evmap_io *ctx;
416	struct event *ev;
417
418#ifndef EVMAP_USE_HT
419	if (fd < 0 || fd >= io->nentries)
420		return;
421#endif
422	GET_IO_SLOT(ctx, io, fd, evmap_io);
423
424	if (NULL == ctx)
425		return;
426	LIST_FOREACH(ev, &ctx->events, ev_io_next) {
427		if (ev->ev_events & events)
428			event_active_nolock_(ev, ev->ev_events & events, 1);
429	}
430}
431
432/* code specific to signals */
433
434static void
435evmap_signal_init(struct evmap_signal *entry)
436{
437	LIST_INIT(&entry->events);
438}
439
440
441int
442evmap_signal_add_(struct event_base *base, int sig, struct event *ev)
443{
444	const struct eventop *evsel = base->evsigsel;
445	struct event_signal_map *map = &base->sigmap;
446	struct evmap_signal *ctx = NULL;
447
448	if (sig >= map->nentries) {
449		if (evmap_make_space(
450			map, sig, sizeof(struct evmap_signal *)) == -1)
451			return (-1);
452	}
453	GET_SIGNAL_SLOT_AND_CTOR(ctx, map, sig, evmap_signal, evmap_signal_init,
454	    base->evsigsel->fdinfo_len);
455
456	if (LIST_EMPTY(&ctx->events)) {
457		if (evsel->add(base, ev->ev_fd, 0, EV_SIGNAL, NULL)
458		    == -1)
459			return (-1);
460	}
461
462	LIST_INSERT_HEAD(&ctx->events, ev, ev_signal_next);
463
464	return (1);
465}
466
467int
468evmap_signal_del_(struct event_base *base, int sig, struct event *ev)
469{
470	const struct eventop *evsel = base->evsigsel;
471	struct event_signal_map *map = &base->sigmap;
472	struct evmap_signal *ctx;
473
474	if (sig >= map->nentries)
475		return (-1);
476
477	GET_SIGNAL_SLOT(ctx, map, sig, evmap_signal);
478
479	LIST_REMOVE(ev, ev_signal_next);
480
481	if (LIST_FIRST(&ctx->events) == NULL) {
482		if (evsel->del(base, ev->ev_fd, 0, EV_SIGNAL, NULL) == -1)
483			return (-1);
484	}
485
486	return (1);
487}
488
489void
490evmap_signal_active_(struct event_base *base, evutil_socket_t sig, int ncalls)
491{
492	struct event_signal_map *map = &base->sigmap;
493	struct evmap_signal *ctx;
494	struct event *ev;
495
496	if (sig < 0 || sig >= map->nentries)
497		return;
498	GET_SIGNAL_SLOT(ctx, map, sig, evmap_signal);
499
500	if (!ctx)
501		return;
502	LIST_FOREACH(ev, &ctx->events, ev_signal_next)
503		event_active_nolock_(ev, EV_SIGNAL, ncalls);
504}
505
506void *
507evmap_io_get_fdinfo_(struct event_io_map *map, evutil_socket_t fd)
508{
509	struct evmap_io *ctx;
510	GET_IO_SLOT(ctx, map, fd, evmap_io);
511	if (ctx)
512		return ((char*)ctx) + sizeof(struct evmap_io);
513	else
514		return NULL;
515}
516
517/* Callback type for evmap_io_foreach_fd */
518typedef int (*evmap_io_foreach_fd_cb)(
519	struct event_base *, evutil_socket_t, struct evmap_io *, void *);
520
521/* Multipurpose helper function: Iterate over every file descriptor event_base
522 * for which we could have EV_READ or EV_WRITE events.  For each such fd, call
523 * fn(base, signum, evmap_io, arg), where fn is the user-provided
524 * function, base is the event_base, signum is the signal number, evmap_io
525 * is an evmap_io structure containing a list of events pending on the
526 * file descriptor, and arg is the user-supplied argument.
527 *
528 * If fn returns 0, continue on to the next signal. Otherwise, return the same
529 * value that fn returned.
530 *
531 * Note that there is no guarantee that the file descriptors will be processed
532 * in any particular order.
533 */
534static int
535evmap_io_foreach_fd(struct event_base *base,
536    evmap_io_foreach_fd_cb fn,
537    void *arg)
538{
539	evutil_socket_t fd;
540	struct event_io_map *iomap = &base->io;
541	int r = 0;
542#ifdef EVMAP_USE_HT
543	struct event_map_entry **mapent;
544	HT_FOREACH(mapent, event_io_map, iomap) {
545		struct evmap_io *ctx = &(*mapent)->ent.evmap_io;
546		fd = (*mapent)->fd;
547#else
548	for (fd = 0; fd < iomap->nentries; ++fd) {
549		struct evmap_io *ctx = iomap->entries[fd];
550		if (!ctx)
551			continue;
552#endif
553		if ((r = fn(base, fd, ctx, arg)))
554			break;
555	}
556	return r;
557}
558
559/* Callback type for evmap_signal_foreach_signal */
560typedef int (*evmap_signal_foreach_signal_cb)(
561	struct event_base *, int, struct evmap_signal *, void *);
562
563/* Multipurpose helper function: Iterate over every signal number in the
564 * event_base for which we could have signal events.  For each such signal,
565 * call fn(base, signum, evmap_signal, arg), where fn is the user-provided
566 * function, base is the event_base, signum is the signal number, evmap_signal
567 * is an evmap_signal structure containing a list of events pending on the
568 * signal, and arg is the user-supplied argument.
569 *
570 * If fn returns 0, continue on to the next signal. Otherwise, return the same
571 * value that fn returned.
572 */
573static int
574evmap_signal_foreach_signal(struct event_base *base,
575    evmap_signal_foreach_signal_cb fn,
576    void *arg)
577{
578	struct event_signal_map *sigmap = &base->sigmap;
579	int r = 0;
580	int signum;
581
582	for (signum = 0; signum < sigmap->nentries; ++signum) {
583		struct evmap_signal *ctx = sigmap->entries[signum];
584		if (!ctx)
585			continue;
586		if ((r = fn(base, signum, ctx, arg)))
587			break;
588	}
589	return r;
590}
591
592/* Helper for evmap_reinit_: tell the backend to add every fd for which we have
593 * pending events, with the appropriate combination of EV_READ, EV_WRITE, and
594 * EV_ET. */
595static int
596evmap_io_reinit_iter_fn(struct event_base *base, evutil_socket_t fd,
597    struct evmap_io *ctx, void *arg)
598{
599	const struct eventop *evsel = base->evsel;
600	void *extra;
601	int *result = arg;
602	short events = 0;
603	struct event *ev;
604	EVUTIL_ASSERT(ctx);
605
606	extra = ((char*)ctx) + sizeof(struct evmap_io);
607	if (ctx->nread)
608		events |= EV_READ;
609	if (ctx->nwrite)
610		events |= EV_WRITE;
611	if (ctx->nclose)
612		events |= EV_CLOSED;
613	if (evsel->fdinfo_len)
614		memset(extra, 0, evsel->fdinfo_len);
615	if (events &&
616	    (ev = LIST_FIRST(&ctx->events)) &&
617	    (ev->ev_events & EV_ET))
618		events |= EV_ET;
619	if (evsel->add(base, fd, 0, events, extra) == -1)
620		*result = -1;
621
622	return 0;
623}
624
625/* Helper for evmap_reinit_: tell the backend to add every signal for which we
626 * have pending events.  */
627static int
628evmap_signal_reinit_iter_fn(struct event_base *base,
629    int signum, struct evmap_signal *ctx, void *arg)
630{
631	const struct eventop *evsel = base->evsigsel;
632	int *result = arg;
633
634	if (!LIST_EMPTY(&ctx->events)) {
635		if (evsel->add(base, signum, 0, EV_SIGNAL, NULL) == -1)
636			*result = -1;
637	}
638	return 0;
639}
640
641int
642evmap_reinit_(struct event_base *base)
643{
644	int result = 0;
645
646	evmap_io_foreach_fd(base, evmap_io_reinit_iter_fn, &result);
647	if (result < 0)
648		return -1;
649	evmap_signal_foreach_signal(base, evmap_signal_reinit_iter_fn, &result);
650	if (result < 0)
651		return -1;
652	return 0;
653}
654
655/* Helper for evmap_delete_all_: delete every event in an event_dlist. */
656static int
657delete_all_in_dlist(struct event_dlist *dlist)
658{
659	struct event *ev;
660	while ((ev = LIST_FIRST(dlist)))
661		event_del(ev);
662	return 0;
663}
664
665/* Helper for evmap_delete_all_: delete every event pending on an fd. */
666static int
667evmap_io_delete_all_iter_fn(struct event_base *base, evutil_socket_t fd,
668    struct evmap_io *io_info, void *arg)
669{
670	return delete_all_in_dlist(&io_info->events);
671}
672
673/* Helper for evmap_delete_all_: delete every event pending on a signal. */
674static int
675evmap_signal_delete_all_iter_fn(struct event_base *base, int signum,
676    struct evmap_signal *sig_info, void *arg)
677{
678	return delete_all_in_dlist(&sig_info->events);
679}
680
681void
682evmap_delete_all_(struct event_base *base)
683{
684	evmap_signal_foreach_signal(base, evmap_signal_delete_all_iter_fn, NULL);
685	evmap_io_foreach_fd(base, evmap_io_delete_all_iter_fn, NULL);
686}
687
688/** Per-fd structure for use with changelists.  It keeps track, for each fd or
689 * signal using the changelist, of where its entry in the changelist is.
690 */
691struct event_changelist_fdinfo {
692	int idxplus1; /* this is the index +1, so that memset(0) will make it
693		       * a no-such-element */
694};
695
696void
697event_changelist_init_(struct event_changelist *changelist)
698{
699	changelist->changes = NULL;
700	changelist->changes_size = 0;
701	changelist->n_changes = 0;
702}
703
704/** Helper: return the changelist_fdinfo corresponding to a given change. */
705static inline struct event_changelist_fdinfo *
706event_change_get_fdinfo(struct event_base *base,
707    const struct event_change *change)
708{
709	char *ptr;
710	if (change->read_change & EV_CHANGE_SIGNAL) {
711		struct evmap_signal *ctx;
712		GET_SIGNAL_SLOT(ctx, &base->sigmap, change->fd, evmap_signal);
713		ptr = ((char*)ctx) + sizeof(struct evmap_signal);
714	} else {
715		struct evmap_io *ctx;
716		GET_IO_SLOT(ctx, &base->io, change->fd, evmap_io);
717		ptr = ((char*)ctx) + sizeof(struct evmap_io);
718	}
719	return (void*)ptr;
720}
721
722/** Callback helper for event_changelist_assert_ok */
723static int
724event_changelist_assert_ok_foreach_iter_fn(
725	struct event_base *base,
726	evutil_socket_t fd, struct evmap_io *io, void *arg)
727{
728	struct event_changelist *changelist = &base->changelist;
729	struct event_changelist_fdinfo *f;
730	f = (void*)
731	    ( ((char*)io) + sizeof(struct evmap_io) );
732	if (f->idxplus1) {
733		struct event_change *c = &changelist->changes[f->idxplus1 - 1];
734		EVUTIL_ASSERT(c->fd == fd);
735	}
736	return 0;
737}
738
739/** Make sure that the changelist is consistent with the evmap structures. */
740static void
741event_changelist_assert_ok(struct event_base *base)
742{
743	int i;
744	struct event_changelist *changelist = &base->changelist;
745
746	EVUTIL_ASSERT(changelist->changes_size >= changelist->n_changes);
747	for (i = 0; i < changelist->n_changes; ++i) {
748		struct event_change *c = &changelist->changes[i];
749		struct event_changelist_fdinfo *f;
750		EVUTIL_ASSERT(c->fd >= 0);
751		f = event_change_get_fdinfo(base, c);
752		EVUTIL_ASSERT(f);
753		EVUTIL_ASSERT(f->idxplus1 == i + 1);
754	}
755
756	evmap_io_foreach_fd(base,
757	    event_changelist_assert_ok_foreach_iter_fn,
758	    NULL);
759}
760
761#ifdef DEBUG_CHANGELIST
762#define event_changelist_check(base)  event_changelist_assert_ok((base))
763#else
764#define event_changelist_check(base)  ((void)0)
765#endif
766
767void
768event_changelist_remove_all_(struct event_changelist *changelist,
769    struct event_base *base)
770{
771	int i;
772
773	event_changelist_check(base);
774
775	for (i = 0; i < changelist->n_changes; ++i) {
776		struct event_change *ch = &changelist->changes[i];
777		struct event_changelist_fdinfo *fdinfo =
778		    event_change_get_fdinfo(base, ch);
779		EVUTIL_ASSERT(fdinfo->idxplus1 == i + 1);
780		fdinfo->idxplus1 = 0;
781	}
782
783	changelist->n_changes = 0;
784
785	event_changelist_check(base);
786}
787
788void
789event_changelist_freemem_(struct event_changelist *changelist)
790{
791	if (changelist->changes)
792		mm_free(changelist->changes);
793	event_changelist_init_(changelist); /* zero it all out. */
794}
795
796/** Increase the size of 'changelist' to hold more changes. */
797static int
798event_changelist_grow(struct event_changelist *changelist)
799{
800	int new_size;
801	struct event_change *new_changes;
802	if (changelist->changes_size < 64)
803		new_size = 64;
804	else
805		new_size = changelist->changes_size * 2;
806
807	new_changes = mm_realloc(changelist->changes,
808	    new_size * sizeof(struct event_change));
809
810	if (EVUTIL_UNLIKELY(new_changes == NULL))
811		return (-1);
812
813	changelist->changes = new_changes;
814	changelist->changes_size = new_size;
815
816	return (0);
817}
818
819/** Return a pointer to the changelist entry for the file descriptor or signal
820 * 'fd', whose fdinfo is 'fdinfo'.  If none exists, construct it, setting its
821 * old_events field to old_events.
822 */
823static struct event_change *
824event_changelist_get_or_construct(struct event_changelist *changelist,
825    evutil_socket_t fd,
826    short old_events,
827    struct event_changelist_fdinfo *fdinfo)
828{
829	struct event_change *change;
830
831	if (fdinfo->idxplus1 == 0) {
832		int idx;
833		EVUTIL_ASSERT(changelist->n_changes <= changelist->changes_size);
834
835		if (changelist->n_changes == changelist->changes_size) {
836			if (event_changelist_grow(changelist) < 0)
837				return NULL;
838		}
839
840		idx = changelist->n_changes++;
841		change = &changelist->changes[idx];
842		fdinfo->idxplus1 = idx + 1;
843
844		memset(change, 0, sizeof(struct event_change));
845		change->fd = fd;
846		change->old_events = old_events;
847	} else {
848		change = &changelist->changes[fdinfo->idxplus1 - 1];
849		EVUTIL_ASSERT(change->fd == fd);
850	}
851	return change;
852}
853
854int
855event_changelist_add_(struct event_base *base, evutil_socket_t fd, short old, short events,
856    void *p)
857{
858	struct event_changelist *changelist = &base->changelist;
859	struct event_changelist_fdinfo *fdinfo = p;
860	struct event_change *change;
861
862	event_changelist_check(base);
863
864	change = event_changelist_get_or_construct(changelist, fd, old, fdinfo);
865	if (!change)
866		return -1;
867
868	/* An add replaces any previous delete, but doesn't result in a no-op,
869	 * since the delete might fail (because the fd had been closed since
870	 * the last add, for instance. */
871
872	if (events & (EV_READ|EV_SIGNAL)) {
873		change->read_change = EV_CHANGE_ADD |
874		    (events & (EV_ET|EV_PERSIST|EV_SIGNAL));
875	}
876	if (events & EV_WRITE) {
877		change->write_change = EV_CHANGE_ADD |
878		    (events & (EV_ET|EV_PERSIST|EV_SIGNAL));
879	}
880	if (events & EV_CLOSED) {
881		change->close_change = EV_CHANGE_ADD |
882		    (events & (EV_ET|EV_PERSIST|EV_SIGNAL));
883	}
884
885	event_changelist_check(base);
886	return (0);
887}
888
889int
890event_changelist_del_(struct event_base *base, evutil_socket_t fd, short old, short events,
891    void *p)
892{
893	struct event_changelist *changelist = &base->changelist;
894	struct event_changelist_fdinfo *fdinfo = p;
895	struct event_change *change;
896
897	event_changelist_check(base);
898	change = event_changelist_get_or_construct(changelist, fd, old, fdinfo);
899	event_changelist_check(base);
900	if (!change)
901		return -1;
902
903	/* A delete on an event set that doesn't contain the event to be
904	   deleted produces a no-op.  This effectively emoves any previous
905	   uncommitted add, rather than replacing it: on those platforms where
906	   "add, delete, dispatch" is not the same as "no-op, dispatch", we
907	   want the no-op behavior.
908
909	   If we have a no-op item, we could remove it it from the list
910	   entirely, but really there's not much point: skipping the no-op
911	   change when we do the dispatch later is far cheaper than rejuggling
912	   the array now.
913
914	   As this stands, it also lets through deletions of events that are
915	   not currently set.
916	 */
917
918	if (events & (EV_READ|EV_SIGNAL)) {
919		if (!(change->old_events & (EV_READ | EV_SIGNAL)))
920			change->read_change = 0;
921		else
922			change->read_change = EV_CHANGE_DEL;
923	}
924	if (events & EV_WRITE) {
925		if (!(change->old_events & EV_WRITE))
926			change->write_change = 0;
927		else
928			change->write_change = EV_CHANGE_DEL;
929	}
930	if (events & EV_CLOSED) {
931		if (!(change->old_events & EV_CLOSED))
932			change->close_change = 0;
933		else
934			change->close_change = EV_CHANGE_DEL;
935	}
936
937	event_changelist_check(base);
938	return (0);
939}
940
941/* Helper for evmap_check_integrity_: verify that all of the events pending on
942 * given fd are set up correctly, and that the nread and nwrite counts on that
943 * fd are correct. */
944static int
945evmap_io_check_integrity_fn(struct event_base *base, evutil_socket_t fd,
946    struct evmap_io *io_info, void *arg)
947{
948	struct event *ev;
949	int n_read = 0, n_write = 0, n_close = 0;
950
951	/* First, make sure the list itself isn't corrupt. Otherwise,
952	 * running LIST_FOREACH could be an exciting adventure. */
953	EVUTIL_ASSERT_LIST_OK(&io_info->events, event, ev_io_next);
954
955	LIST_FOREACH(ev, &io_info->events, ev_io_next) {
956		EVUTIL_ASSERT(ev->ev_flags & EVLIST_INSERTED);
957		EVUTIL_ASSERT(ev->ev_fd == fd);
958		EVUTIL_ASSERT(!(ev->ev_events & EV_SIGNAL));
959		EVUTIL_ASSERT((ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED)));
960		if (ev->ev_events & EV_READ)
961			++n_read;
962		if (ev->ev_events & EV_WRITE)
963			++n_write;
964		if (ev->ev_events & EV_CLOSED)
965			++n_close;
966	}
967
968	EVUTIL_ASSERT(n_read == io_info->nread);
969	EVUTIL_ASSERT(n_write == io_info->nwrite);
970	EVUTIL_ASSERT(n_close == io_info->nclose);
971
972	return 0;
973}
974
975/* Helper for evmap_check_integrity_: verify that all of the events pending
976 * on given signal are set up correctly. */
977static int
978evmap_signal_check_integrity_fn(struct event_base *base,
979    int signum, struct evmap_signal *sig_info, void *arg)
980{
981	struct event *ev;
982	/* First, make sure the list itself isn't corrupt. */
983	EVUTIL_ASSERT_LIST_OK(&sig_info->events, event, ev_signal_next);
984
985	LIST_FOREACH(ev, &sig_info->events, ev_io_next) {
986		EVUTIL_ASSERT(ev->ev_flags & EVLIST_INSERTED);
987		EVUTIL_ASSERT(ev->ev_fd == signum);
988		EVUTIL_ASSERT((ev->ev_events & EV_SIGNAL));
989		EVUTIL_ASSERT(!(ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED)));
990	}
991	return 0;
992}
993
994void
995evmap_check_integrity_(struct event_base *base)
996{
997	evmap_io_foreach_fd(base, evmap_io_check_integrity_fn, NULL);
998	evmap_signal_foreach_signal(base, evmap_signal_check_integrity_fn, NULL);
999
1000	if (base->evsel->add == event_changelist_add_)
1001		event_changelist_assert_ok(base);
1002}
1003
1004/* Helper type for evmap_foreach_event_: Bundles a function to call on every
1005 * event, and the user-provided void* to use as its third argument. */
1006struct evmap_foreach_event_helper {
1007	event_base_foreach_event_cb fn;
1008	void *arg;
1009};
1010
1011/* Helper for evmap_foreach_event_: calls a provided function on every event
1012 * pending on a given fd.  */
1013static int
1014evmap_io_foreach_event_fn(struct event_base *base, evutil_socket_t fd,
1015    struct evmap_io *io_info, void *arg)
1016{
1017	struct evmap_foreach_event_helper *h = arg;
1018	struct event *ev;
1019	int r;
1020	LIST_FOREACH(ev, &io_info->events, ev_io_next) {
1021		if ((r = h->fn(base, ev, h->arg)))
1022			return r;
1023	}
1024	return 0;
1025}
1026
1027/* Helper for evmap_foreach_event_: calls a provided function on every event
1028 * pending on a given signal.  */
1029static int
1030evmap_signal_foreach_event_fn(struct event_base *base, int signum,
1031    struct evmap_signal *sig_info, void *arg)
1032{
1033	struct event *ev;
1034	struct evmap_foreach_event_helper *h = arg;
1035	int r;
1036	LIST_FOREACH(ev, &sig_info->events, ev_signal_next) {
1037		if ((r = h->fn(base, ev, h->arg)))
1038			return r;
1039	}
1040	return 0;
1041}
1042
1043int
1044evmap_foreach_event_(struct event_base *base,
1045    event_base_foreach_event_cb fn, void *arg)
1046{
1047	struct evmap_foreach_event_helper h;
1048	int r;
1049	h.fn = fn;
1050	h.arg = arg;
1051	if ((r = evmap_io_foreach_fd(base, evmap_io_foreach_event_fn, &h)))
1052		return r;
1053	return evmap_signal_foreach_signal(base, evmap_signal_foreach_event_fn, &h);
1054}
1055
1056