regress.c revision 275970
1178825Sdfr/*
2233294Sstas * Copyright (c) 2003-2007 Niels Provos <provos@citi.umich.edu>
3233294Sstas * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
4233294Sstas *
5178825Sdfr * Redistribution and use in source and binary forms, with or without
6233294Sstas * modification, are permitted provided that the following conditions
7233294Sstas * are met:
8233294Sstas * 1. Redistributions of source code must retain the above copyright
9178825Sdfr *    notice, this list of conditions and the following disclaimer.
10233294Sstas * 2. Redistributions in binary form must reproduce the above copyright
11233294Sstas *    notice, this list of conditions and the following disclaimer in the
12178825Sdfr *    documentation and/or other materials provided with the distribution.
13233294Sstas * 3. The name of the author may not be used to endorse or promote products
14233294Sstas *    derived from this software without specific prior written permission.
15233294Sstas *
16178825Sdfr * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17233294Sstas * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18233294Sstas * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19233294Sstas * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20178825Sdfr * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21233294Sstas * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22233294Sstas * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23233294Sstas * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24233294Sstas * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25233294Sstas * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26233294Sstas */
27233294Sstas#include "util-internal.h"
28233294Sstas
29233294Sstas#ifdef _WIN32
30233294Sstas#include <winsock2.h>
31233294Sstas#include <windows.h>
32178825Sdfr#endif
33178825Sdfr
34178825Sdfr#include "event2/event-config.h"
35178825Sdfr
36178825Sdfr#include <sys/types.h>
37178825Sdfr#include <sys/stat.h>
38178825Sdfr#ifdef EVENT__HAVE_SYS_TIME_H
39178825Sdfr#include <sys/time.h>
40178825Sdfr#endif
41178825Sdfr#include <sys/queue.h>
42178825Sdfr#ifndef _WIN32
43178825Sdfr#include <sys/socket.h>
44178825Sdfr#include <sys/wait.h>
45178825Sdfr#include <signal.h>
46178825Sdfr#include <unistd.h>
47178825Sdfr#include <netdb.h>
48178825Sdfr#endif
49178825Sdfr#include <fcntl.h>
50178825Sdfr#include <signal.h>
51178825Sdfr#include <stdlib.h>
52178825Sdfr#include <stdio.h>
53178825Sdfr#include <string.h>
54178825Sdfr#include <errno.h>
55178825Sdfr#include <assert.h>
56178825Sdfr#include <ctype.h>
57178825Sdfr
58178825Sdfr#include "event2/event.h"
59178825Sdfr#include "event2/event_struct.h"
60178825Sdfr#include "event2/event_compat.h"
61233294Sstas#include "event2/tag.h"
62178825Sdfr#include "event2/buffer.h"
63178825Sdfr#include "event2/buffer_compat.h"
64178825Sdfr#include "event2/util.h"
65178825Sdfr#include "event-internal.h"
66178825Sdfr#include "evthread-internal.h"
67178825Sdfr#include "log-internal.h"
68178825Sdfr#include "time-internal.h"
69178825Sdfr
70178825Sdfr#include "regress.h"
71178825Sdfr
72178825Sdfr#ifndef _WIN32
73178825Sdfr#include "regress.gen.h"
74178825Sdfr#endif
75178825Sdfr
76178825Sdfrevutil_socket_t pair[2];
77233294Sstasint test_ok;
78233294Sstasint called;
79233294Sstasstruct event_base *global_base;
80233294Sstas
81233294Sstasstatic char wbuf[4096];
82233294Sstasstatic char rbuf[4096];
83233294Sstasstatic int woff;
84233294Sstasstatic int roff;
85233294Sstasstatic int usepersist;
86233294Sstasstatic struct timeval tset;
87233294Sstasstatic struct timeval tcalled;
88178825Sdfr
89178825Sdfr
90178825Sdfr#define TEST1	"this is a test"
91178825Sdfr
92178825Sdfr#ifndef SHUT_WR
93178825Sdfr#define SHUT_WR 1
94178825Sdfr#endif
95178825Sdfr
96#ifdef _WIN32
97#define write(fd,buf,len) send((fd),(buf),(int)(len),0)
98#define read(fd,buf,len) recv((fd),(buf),(int)(len),0)
99#endif
100
101struct basic_cb_args
102{
103	struct event_base *eb;
104	struct event *ev;
105	unsigned int callcount;
106};
107
108static void
109simple_read_cb(evutil_socket_t fd, short event, void *arg)
110{
111	char buf[256];
112	int len;
113
114	len = read(fd, buf, sizeof(buf));
115
116	if (len) {
117		if (!called) {
118			if (event_add(arg, NULL) == -1)
119				exit(1);
120		}
121	} else if (called == 1)
122		test_ok = 1;
123
124	called++;
125}
126
127static void
128basic_read_cb(evutil_socket_t fd, short event, void *data)
129{
130	char buf[256];
131	int len;
132	struct basic_cb_args *arg = data;
133
134	len = read(fd, buf, sizeof(buf));
135
136	if (len < 0) {
137		tt_fail_perror("read (callback)");
138	} else {
139		switch (arg->callcount++) {
140		case 0:	 /* first call: expect to read data; cycle */
141			if (len > 0)
142				return;
143
144			tt_fail_msg("EOF before data read");
145			break;
146
147		case 1:	 /* second call: expect EOF; stop */
148			if (len > 0)
149				tt_fail_msg("not all data read on first cycle");
150			break;
151
152		default:  /* third call: should not happen */
153			tt_fail_msg("too many cycles");
154		}
155	}
156
157	event_del(arg->ev);
158	event_base_loopexit(arg->eb, NULL);
159}
160
161static void
162dummy_read_cb(evutil_socket_t fd, short event, void *arg)
163{
164}
165
166static void
167simple_write_cb(evutil_socket_t fd, short event, void *arg)
168{
169	int len;
170
171	len = write(fd, TEST1, strlen(TEST1) + 1);
172	if (len == -1)
173		test_ok = 0;
174	else
175		test_ok = 1;
176}
177
178static void
179multiple_write_cb(evutil_socket_t fd, short event, void *arg)
180{
181	struct event *ev = arg;
182	int len;
183
184	len = 128;
185	if (woff + len >= (int)sizeof(wbuf))
186		len = sizeof(wbuf) - woff;
187
188	len = write(fd, wbuf + woff, len);
189	if (len == -1) {
190		fprintf(stderr, "%s: write\n", __func__);
191		if (usepersist)
192			event_del(ev);
193		return;
194	}
195
196	woff += len;
197
198	if (woff >= (int)sizeof(wbuf)) {
199		shutdown(fd, SHUT_WR);
200		if (usepersist)
201			event_del(ev);
202		return;
203	}
204
205	if (!usepersist) {
206		if (event_add(ev, NULL) == -1)
207			exit(1);
208	}
209}
210
211static void
212multiple_read_cb(evutil_socket_t fd, short event, void *arg)
213{
214	struct event *ev = arg;
215	int len;
216
217	len = read(fd, rbuf + roff, sizeof(rbuf) - roff);
218	if (len == -1)
219		fprintf(stderr, "%s: read\n", __func__);
220	if (len <= 0) {
221		if (usepersist)
222			event_del(ev);
223		return;
224	}
225
226	roff += len;
227	if (!usepersist) {
228		if (event_add(ev, NULL) == -1)
229			exit(1);
230	}
231}
232
233static void
234timeout_cb(evutil_socket_t fd, short event, void *arg)
235{
236	evutil_gettimeofday(&tcalled, NULL);
237}
238
239struct both {
240	struct event ev;
241	int nread;
242};
243
244static void
245combined_read_cb(evutil_socket_t fd, short event, void *arg)
246{
247	struct both *both = arg;
248	char buf[128];
249	int len;
250
251	len = read(fd, buf, sizeof(buf));
252	if (len == -1)
253		fprintf(stderr, "%s: read\n", __func__);
254	if (len <= 0)
255		return;
256
257	both->nread += len;
258	if (event_add(&both->ev, NULL) == -1)
259		exit(1);
260}
261
262static void
263combined_write_cb(evutil_socket_t fd, short event, void *arg)
264{
265	struct both *both = arg;
266	char buf[128];
267	int len;
268
269	len = sizeof(buf);
270	if (len > both->nread)
271		len = both->nread;
272
273	memset(buf, 'q', len);
274
275	len = write(fd, buf, len);
276	if (len == -1)
277		fprintf(stderr, "%s: write\n", __func__);
278	if (len <= 0) {
279		shutdown(fd, SHUT_WR);
280		return;
281	}
282
283	both->nread -= len;
284	if (event_add(&both->ev, NULL) == -1)
285		exit(1);
286}
287
288/* These macros used to replicate the work of the legacy test wrapper code */
289#define setup_test(x) do {						\
290	if (!in_legacy_test_wrapper) {					\
291		TT_FAIL(("Legacy test %s not wrapped properly", x));	\
292		return;							\
293	}								\
294	} while (0)
295#define cleanup_test() setup_test("cleanup")
296
297static void
298test_simpleread(void)
299{
300	struct event ev;
301
302	/* Very simple read test */
303	setup_test("Simple read: ");
304
305	if (write(pair[0], TEST1, strlen(TEST1)+1) < 0) {
306		tt_fail_perror("write");
307	}
308
309	shutdown(pair[0], SHUT_WR);
310
311	event_set(&ev, pair[1], EV_READ, simple_read_cb, &ev);
312	if (event_add(&ev, NULL) == -1)
313		exit(1);
314	event_dispatch();
315
316	cleanup_test();
317}
318
319static void
320test_simplewrite(void)
321{
322	struct event ev;
323
324	/* Very simple write test */
325	setup_test("Simple write: ");
326
327	event_set(&ev, pair[0], EV_WRITE, simple_write_cb, &ev);
328	if (event_add(&ev, NULL) == -1)
329		exit(1);
330	event_dispatch();
331
332	cleanup_test();
333}
334
335static void
336simpleread_multiple_cb(evutil_socket_t fd, short event, void *arg)
337{
338	if (++called == 2)
339		test_ok = 1;
340}
341
342static void
343test_simpleread_multiple(void)
344{
345	struct event one, two;
346
347	/* Very simple read test */
348	setup_test("Simple read to multiple evens: ");
349
350	if (write(pair[0], TEST1, strlen(TEST1)+1) < 0) {
351		tt_fail_perror("write");
352	}
353
354	shutdown(pair[0], SHUT_WR);
355
356	event_set(&one, pair[1], EV_READ, simpleread_multiple_cb, NULL);
357	if (event_add(&one, NULL) == -1)
358		exit(1);
359	event_set(&two, pair[1], EV_READ, simpleread_multiple_cb, NULL);
360	if (event_add(&two, NULL) == -1)
361		exit(1);
362	event_dispatch();
363
364	cleanup_test();
365}
366
367static int have_closed = 0;
368static int premature_event = 0;
369static void
370simpleclose_close_fd_cb(evutil_socket_t s, short what, void *ptr)
371{
372	evutil_socket_t **fds = ptr;
373	TT_BLATHER(("Closing"));
374	evutil_closesocket(*fds[0]);
375	evutil_closesocket(*fds[1]);
376	*fds[0] = -1;
377	*fds[1] = -1;
378	have_closed = 1;
379}
380
381static void
382record_event_cb(evutil_socket_t s, short what, void *ptr)
383{
384	short *whatp = ptr;
385	if (!have_closed)
386		premature_event = 1;
387	*whatp = what;
388	TT_BLATHER(("Recorded %d on socket %d", (int)what, (int)s));
389}
390
391static void
392test_simpleclose(void *ptr)
393{
394	/* Test that a close of FD is detected as a read and as a write. */
395	struct event_base *base = event_base_new();
396	evutil_socket_t pair1[2]={-1,-1}, pair2[2] = {-1, -1};
397	evutil_socket_t *to_close[2];
398	struct event *rev=NULL, *wev=NULL, *closeev=NULL;
399	struct timeval tv;
400	short got_read_on_close = 0, got_write_on_close = 0;
401	char buf[1024];
402	memset(buf, 99, sizeof(buf));
403#ifdef _WIN32
404#define LOCAL_SOCKETPAIR_AF AF_INET
405#else
406#define LOCAL_SOCKETPAIR_AF AF_UNIX
407#endif
408	if (evutil_socketpair(LOCAL_SOCKETPAIR_AF, SOCK_STREAM, 0, pair1)<0)
409		TT_DIE(("socketpair: %s", strerror(errno)));
410	if (evutil_socketpair(LOCAL_SOCKETPAIR_AF, SOCK_STREAM, 0, pair2)<0)
411		TT_DIE(("socketpair: %s", strerror(errno)));
412	if (evutil_make_socket_nonblocking(pair1[1]) < 0)
413		TT_DIE(("make_socket_nonblocking"));
414	if (evutil_make_socket_nonblocking(pair2[1]) < 0)
415		TT_DIE(("make_socket_nonblocking"));
416
417	/** Stuff pair2[1] full of data, until write fails */
418	while (1) {
419		int r = write(pair2[1], buf, sizeof(buf));
420		if (r<0) {
421			int err = evutil_socket_geterror(pair2[1]);
422			if (! EVUTIL_ERR_RW_RETRIABLE(err))
423				TT_DIE(("write failed strangely: %s",
424					evutil_socket_error_to_string(err)));
425			break;
426		}
427	}
428	to_close[0] = &pair1[0];
429	to_close[1] = &pair2[0];
430
431	closeev = event_new(base, -1, EV_TIMEOUT, simpleclose_close_fd_cb,
432	    to_close);
433	rev = event_new(base, pair1[1], EV_READ, record_event_cb,
434	    &got_read_on_close);
435	TT_BLATHER(("Waiting for read on %d", (int)pair1[1]));
436	wev = event_new(base, pair2[1], EV_WRITE, record_event_cb,
437	    &got_write_on_close);
438	TT_BLATHER(("Waiting for write on %d", (int)pair2[1]));
439	tv.tv_sec = 0;
440	tv.tv_usec = 100*1000; /* Close pair1[0] after a little while, and make
441			       * sure we get a read event. */
442	event_add(closeev, &tv);
443	event_add(rev, NULL);
444	event_add(wev, NULL);
445	/* Don't let the test go on too long. */
446	tv.tv_sec = 0;
447	tv.tv_usec = 200*1000;
448	event_base_loopexit(base, &tv);
449	event_base_loop(base, 0);
450
451	tt_int_op(got_read_on_close, ==, EV_READ);
452	tt_int_op(got_write_on_close, ==, EV_WRITE);
453	tt_int_op(premature_event, ==, 0);
454
455end:
456	if (pair1[0] >= 0)
457		evutil_closesocket(pair1[0]);
458	if (pair1[1] >= 0)
459		evutil_closesocket(pair1[1]);
460	if (pair2[0] >= 0)
461		evutil_closesocket(pair2[0]);
462	if (pair2[1] >= 0)
463		evutil_closesocket(pair2[1]);
464	if (rev)
465		event_free(rev);
466	if (wev)
467		event_free(wev);
468	if (closeev)
469		event_free(closeev);
470	if (base)
471		event_base_free(base);
472}
473
474
475static void
476test_multiple(void)
477{
478	struct event ev, ev2;
479	int i;
480
481	/* Multiple read and write test */
482	setup_test("Multiple read/write: ");
483	memset(rbuf, 0, sizeof(rbuf));
484	for (i = 0; i < (int)sizeof(wbuf); i++)
485		wbuf[i] = i;
486
487	roff = woff = 0;
488	usepersist = 0;
489
490	event_set(&ev, pair[0], EV_WRITE, multiple_write_cb, &ev);
491	if (event_add(&ev, NULL) == -1)
492		exit(1);
493	event_set(&ev2, pair[1], EV_READ, multiple_read_cb, &ev2);
494	if (event_add(&ev2, NULL) == -1)
495		exit(1);
496	event_dispatch();
497
498	if (roff == woff)
499		test_ok = memcmp(rbuf, wbuf, sizeof(wbuf)) == 0;
500
501	cleanup_test();
502}
503
504static void
505test_persistent(void)
506{
507	struct event ev, ev2;
508	int i;
509
510	/* Multiple read and write test with persist */
511	setup_test("Persist read/write: ");
512	memset(rbuf, 0, sizeof(rbuf));
513	for (i = 0; i < (int)sizeof(wbuf); i++)
514		wbuf[i] = i;
515
516	roff = woff = 0;
517	usepersist = 1;
518
519	event_set(&ev, pair[0], EV_WRITE|EV_PERSIST, multiple_write_cb, &ev);
520	if (event_add(&ev, NULL) == -1)
521		exit(1);
522	event_set(&ev2, pair[1], EV_READ|EV_PERSIST, multiple_read_cb, &ev2);
523	if (event_add(&ev2, NULL) == -1)
524		exit(1);
525	event_dispatch();
526
527	if (roff == woff)
528		test_ok = memcmp(rbuf, wbuf, sizeof(wbuf)) == 0;
529
530	cleanup_test();
531}
532
533static void
534test_combined(void)
535{
536	struct both r1, r2, w1, w2;
537
538	setup_test("Combined read/write: ");
539	memset(&r1, 0, sizeof(r1));
540	memset(&r2, 0, sizeof(r2));
541	memset(&w1, 0, sizeof(w1));
542	memset(&w2, 0, sizeof(w2));
543
544	w1.nread = 4096;
545	w2.nread = 8192;
546
547	event_set(&r1.ev, pair[0], EV_READ, combined_read_cb, &r1);
548	event_set(&w1.ev, pair[0], EV_WRITE, combined_write_cb, &w1);
549	event_set(&r2.ev, pair[1], EV_READ, combined_read_cb, &r2);
550	event_set(&w2.ev, pair[1], EV_WRITE, combined_write_cb, &w2);
551	tt_assert(event_add(&r1.ev, NULL) != -1);
552	tt_assert(!event_add(&w1.ev, NULL));
553	tt_assert(!event_add(&r2.ev, NULL));
554	tt_assert(!event_add(&w2.ev, NULL));
555	event_dispatch();
556
557	if (r1.nread == 8192 && r2.nread == 4096)
558		test_ok = 1;
559
560end:
561	cleanup_test();
562}
563
564static void
565test_simpletimeout(void)
566{
567	struct timeval tv;
568	struct event ev;
569
570	setup_test("Simple timeout: ");
571
572	tv.tv_usec = 200*1000;
573	tv.tv_sec = 0;
574	evutil_timerclear(&tcalled);
575	evtimer_set(&ev, timeout_cb, NULL);
576	evtimer_add(&ev, &tv);
577
578	evutil_gettimeofday(&tset, NULL);
579	event_dispatch();
580	test_timeval_diff_eq(&tset, &tcalled, 200);
581
582	test_ok = 1;
583end:
584	cleanup_test();
585}
586
587static void
588periodic_timeout_cb(evutil_socket_t fd, short event, void *arg)
589{
590	int *count = arg;
591
592	(*count)++;
593	if (*count == 6) {
594		/* call loopexit only once - on slow machines(?), it is
595		 * apparently possible for this to get called twice. */
596		test_ok = 1;
597		event_base_loopexit(global_base, NULL);
598	}
599}
600
601static void
602test_persistent_timeout(void)
603{
604	struct timeval tv;
605	struct event ev;
606	int count = 0;
607
608	evutil_timerclear(&tv);
609	tv.tv_usec = 10000;
610
611	event_assign(&ev, global_base, -1, EV_TIMEOUT|EV_PERSIST,
612	    periodic_timeout_cb, &count);
613	event_add(&ev, &tv);
614
615	event_dispatch();
616
617	event_del(&ev);
618}
619
620static void
621test_persistent_timeout_jump(void *ptr)
622{
623	struct basic_test_data *data = ptr;
624	struct event ev;
625	int count = 0;
626	struct timeval msec100 = { 0, 100 * 1000 };
627	struct timeval msec50 = { 0, 50 * 1000 };
628	struct timeval msec300 = { 0, 300 * 1000 };
629
630	event_assign(&ev, data->base, -1, EV_PERSIST, periodic_timeout_cb, &count);
631	event_add(&ev, &msec100);
632	/* Wait for a bit */
633	evutil_usleep_(&msec300);
634	event_base_loopexit(data->base, &msec50);
635	event_base_dispatch(data->base);
636	tt_int_op(count, ==, 1);
637
638end:
639	event_del(&ev);
640}
641
642struct persist_active_timeout_called {
643	int n;
644	short events[16];
645	struct timeval tvs[16];
646};
647
648static void
649activate_cb(evutil_socket_t fd, short event, void *arg)
650{
651	struct event *ev = arg;
652	event_active(ev, EV_READ, 1);
653}
654
655static void
656persist_active_timeout_cb(evutil_socket_t fd, short event, void *arg)
657{
658	struct persist_active_timeout_called *c = arg;
659	if (c->n < 15) {
660		c->events[c->n] = event;
661		evutil_gettimeofday(&c->tvs[c->n], NULL);
662		++c->n;
663	}
664}
665
666static void
667test_persistent_active_timeout(void *ptr)
668{
669	struct timeval tv, tv2, tv_exit, start;
670	struct event ev;
671	struct persist_active_timeout_called res;
672
673	struct basic_test_data *data = ptr;
674	struct event_base *base = data->base;
675
676	memset(&res, 0, sizeof(res));
677
678	tv.tv_sec = 0;
679	tv.tv_usec = 200 * 1000;
680	event_assign(&ev, base, -1, EV_TIMEOUT|EV_PERSIST,
681	    persist_active_timeout_cb, &res);
682	event_add(&ev, &tv);
683
684	tv2.tv_sec = 0;
685	tv2.tv_usec = 100 * 1000;
686	event_base_once(base, -1, EV_TIMEOUT, activate_cb, &ev, &tv2);
687
688	tv_exit.tv_sec = 0;
689	tv_exit.tv_usec = 600 * 1000;
690	event_base_loopexit(base, &tv_exit);
691
692	event_base_assert_ok_(base);
693	evutil_gettimeofday(&start, NULL);
694
695	event_base_dispatch(base);
696	event_base_assert_ok_(base);
697
698	tt_int_op(res.n, ==, 3);
699	tt_int_op(res.events[0], ==, EV_READ);
700	tt_int_op(res.events[1], ==, EV_TIMEOUT);
701	tt_int_op(res.events[2], ==, EV_TIMEOUT);
702	test_timeval_diff_eq(&start, &res.tvs[0], 100);
703	test_timeval_diff_eq(&start, &res.tvs[1], 300);
704	test_timeval_diff_eq(&start, &res.tvs[2], 500);
705end:
706	event_del(&ev);
707}
708
709struct common_timeout_info {
710	struct event ev;
711	struct timeval called_at;
712	int which;
713	int count;
714};
715
716static void
717common_timeout_cb(evutil_socket_t fd, short event, void *arg)
718{
719	struct common_timeout_info *ti = arg;
720	++ti->count;
721	evutil_gettimeofday(&ti->called_at, NULL);
722	if (ti->count >= 4)
723		event_del(&ti->ev);
724}
725
726static void
727test_common_timeout(void *ptr)
728{
729	struct basic_test_data *data = ptr;
730
731	struct event_base *base = data->base;
732	int i;
733	struct common_timeout_info info[100];
734
735	struct timeval start;
736	struct timeval tmp_100_ms = { 0, 100*1000 };
737	struct timeval tmp_200_ms = { 0, 200*1000 };
738	struct timeval tmp_5_sec = { 5, 0 };
739	struct timeval tmp_5M_usec = { 0, 5*1000*1000 };
740
741	const struct timeval *ms_100, *ms_200, *sec_5;
742
743	ms_100 = event_base_init_common_timeout(base, &tmp_100_ms);
744	ms_200 = event_base_init_common_timeout(base, &tmp_200_ms);
745	sec_5 = event_base_init_common_timeout(base, &tmp_5_sec);
746	tt_assert(ms_100);
747	tt_assert(ms_200);
748	tt_assert(sec_5);
749	tt_ptr_op(event_base_init_common_timeout(base, &tmp_200_ms),
750	    ==, ms_200);
751	tt_ptr_op(event_base_init_common_timeout(base, ms_200), ==, ms_200);
752	tt_ptr_op(event_base_init_common_timeout(base, &tmp_5M_usec), ==, sec_5);
753	tt_int_op(ms_100->tv_sec, ==, 0);
754	tt_int_op(ms_200->tv_sec, ==, 0);
755	tt_int_op(sec_5->tv_sec, ==, 5);
756	tt_int_op(ms_100->tv_usec, ==, 100000|0x50000000);
757	tt_int_op(ms_200->tv_usec, ==, 200000|0x50100000);
758	tt_int_op(sec_5->tv_usec, ==, 0|0x50200000);
759
760	memset(info, 0, sizeof(info));
761
762	for (i=0; i<100; ++i) {
763		info[i].which = i;
764		event_assign(&info[i].ev, base, -1, EV_TIMEOUT|EV_PERSIST,
765		    common_timeout_cb, &info[i]);
766		if (i % 2) {
767			if ((i%20)==1) {
768				/* Glass-box test: Make sure we survive the
769				 * transition to non-common timeouts. It's
770				 * a little tricky. */
771				event_add(&info[i].ev, ms_200);
772				event_add(&info[i].ev, &tmp_100_ms);
773			} else if ((i%20)==3) {
774				/* Check heap-to-common too. */
775				event_add(&info[i].ev, &tmp_200_ms);
776				event_add(&info[i].ev, ms_100);
777			} else if ((i%20)==5) {
778				/* Also check common-to-common. */
779				event_add(&info[i].ev, ms_200);
780				event_add(&info[i].ev, ms_100);
781			} else {
782				event_add(&info[i].ev, ms_100);
783			}
784		} else {
785			event_add(&info[i].ev, ms_200);
786		}
787	}
788
789	event_base_assert_ok_(base);
790	evutil_gettimeofday(&start, NULL);
791	event_base_dispatch(base);
792
793	event_base_assert_ok_(base);
794
795	for (i=0; i<10; ++i) {
796		tt_int_op(info[i].count, ==, 4);
797		if (i % 2) {
798			test_timeval_diff_eq(&start, &info[i].called_at, 400);
799		} else {
800			test_timeval_diff_eq(&start, &info[i].called_at, 800);
801		}
802	}
803
804	/* Make sure we can free the base with some events in. */
805	for (i=0; i<100; ++i) {
806		if (i % 2) {
807			event_add(&info[i].ev, ms_100);
808		} else {
809			event_add(&info[i].ev, ms_200);
810		}
811	}
812
813end:
814	event_base_free(data->base); /* need to do this here before info is
815				      * out-of-scope */
816	data->base = NULL;
817}
818
819#ifndef _WIN32
820static void signal_cb(evutil_socket_t fd, short event, void *arg);
821
822#define current_base event_global_current_base_
823extern struct event_base *current_base;
824
825static void
826child_signal_cb(evutil_socket_t fd, short event, void *arg)
827{
828	struct timeval tv;
829	int *pint = arg;
830
831	*pint = 1;
832
833	tv.tv_usec = 500000;
834	tv.tv_sec = 0;
835	event_loopexit(&tv);
836}
837
838static void
839test_fork(void)
840{
841	int status, got_sigchld = 0;
842	struct event ev, sig_ev;
843	pid_t pid;
844
845	setup_test("After fork: ");
846
847	tt_assert(current_base);
848	evthread_make_base_notifiable(current_base);
849
850	if (write(pair[0], TEST1, strlen(TEST1)+1) < 0) {
851		tt_fail_perror("write");
852	}
853
854	event_set(&ev, pair[1], EV_READ, simple_read_cb, &ev);
855	if (event_add(&ev, NULL) == -1)
856		exit(1);
857
858	evsignal_set(&sig_ev, SIGCHLD, child_signal_cb, &got_sigchld);
859	evsignal_add(&sig_ev, NULL);
860
861	event_base_assert_ok_(current_base);
862	TT_BLATHER(("Before fork"));
863	if ((pid = regress_fork()) == 0) {
864		/* in the child */
865		TT_BLATHER(("In child, before reinit"));
866		event_base_assert_ok_(current_base);
867		if (event_reinit(current_base) == -1) {
868			fprintf(stdout, "FAILED (reinit)\n");
869			exit(1);
870		}
871		TT_BLATHER(("After reinit"));
872		event_base_assert_ok_(current_base);
873		TT_BLATHER(("After assert-ok"));
874
875		evsignal_del(&sig_ev);
876
877		called = 0;
878
879		event_dispatch();
880
881		event_base_free(current_base);
882
883		/* we do not send an EOF; simple_read_cb requires an EOF
884		 * to set test_ok.  we just verify that the callback was
885		 * called. */
886		exit(test_ok != 0 || called != 2 ? -2 : 76);
887	}
888
889	/* wait for the child to read the data */
890	{
891		const struct timeval tv = { 0, 100000 };
892		evutil_usleep_(&tv);
893	}
894
895	if (write(pair[0], TEST1, strlen(TEST1)+1) < 0) {
896		tt_fail_perror("write");
897	}
898
899	TT_BLATHER(("Before waitpid"));
900	if (waitpid(pid, &status, 0) == -1) {
901		fprintf(stdout, "FAILED (fork)\n");
902		exit(1);
903	}
904	TT_BLATHER(("After waitpid"));
905
906	if (WEXITSTATUS(status) != 76) {
907		fprintf(stdout, "FAILED (exit): %d\n", WEXITSTATUS(status));
908		exit(1);
909	}
910
911	/* test that the current event loop still works */
912	if (write(pair[0], TEST1, strlen(TEST1)+1) < 0) {
913		fprintf(stderr, "%s: write\n", __func__);
914	}
915
916	shutdown(pair[0], SHUT_WR);
917
918	event_dispatch();
919
920	if (!got_sigchld) {
921		fprintf(stdout, "FAILED (sigchld)\n");
922		exit(1);
923	}
924
925	evsignal_del(&sig_ev);
926
927	end:
928	cleanup_test();
929}
930
931static void
932signal_cb_sa(int sig)
933{
934	test_ok = 2;
935}
936
937static void
938signal_cb(evutil_socket_t fd, short event, void *arg)
939{
940	struct event *ev = arg;
941
942	evsignal_del(ev);
943	test_ok = 1;
944}
945
946static void
947test_simplesignal(void)
948{
949	struct event ev;
950	struct itimerval itv;
951
952	setup_test("Simple signal: ");
953	evsignal_set(&ev, SIGALRM, signal_cb, &ev);
954	evsignal_add(&ev, NULL);
955	/* find bugs in which operations are re-ordered */
956	evsignal_del(&ev);
957	evsignal_add(&ev, NULL);
958
959	memset(&itv, 0, sizeof(itv));
960	itv.it_value.tv_sec = 0;
961	itv.it_value.tv_usec = 100000;
962	if (setitimer(ITIMER_REAL, &itv, NULL) == -1)
963		goto skip_simplesignal;
964
965	event_dispatch();
966 skip_simplesignal:
967	if (evsignal_del(&ev) == -1)
968		test_ok = 0;
969
970	cleanup_test();
971}
972
973static void
974test_multiplesignal(void)
975{
976	struct event ev_one, ev_two;
977	struct itimerval itv;
978
979	setup_test("Multiple signal: ");
980
981	evsignal_set(&ev_one, SIGALRM, signal_cb, &ev_one);
982	evsignal_add(&ev_one, NULL);
983
984	evsignal_set(&ev_two, SIGALRM, signal_cb, &ev_two);
985	evsignal_add(&ev_two, NULL);
986
987	memset(&itv, 0, sizeof(itv));
988	itv.it_value.tv_sec = 0;
989	itv.it_value.tv_usec = 100000;
990	if (setitimer(ITIMER_REAL, &itv, NULL) == -1)
991		goto skip_simplesignal;
992
993	event_dispatch();
994
995 skip_simplesignal:
996	if (evsignal_del(&ev_one) == -1)
997		test_ok = 0;
998	if (evsignal_del(&ev_two) == -1)
999		test_ok = 0;
1000
1001	cleanup_test();
1002}
1003
1004static void
1005test_immediatesignal(void)
1006{
1007	struct event ev;
1008
1009	test_ok = 0;
1010	evsignal_set(&ev, SIGUSR1, signal_cb, &ev);
1011	evsignal_add(&ev, NULL);
1012	raise(SIGUSR1);
1013	event_loop(EVLOOP_NONBLOCK);
1014	evsignal_del(&ev);
1015	cleanup_test();
1016}
1017
1018static void
1019test_signal_dealloc(void)
1020{
1021	/* make sure that evsignal_event is event_del'ed and pipe closed */
1022	struct event ev;
1023	struct event_base *base = event_init();
1024	evsignal_set(&ev, SIGUSR1, signal_cb, &ev);
1025	evsignal_add(&ev, NULL);
1026	evsignal_del(&ev);
1027	event_base_free(base);
1028	/* If we got here without asserting, we're fine. */
1029	test_ok = 1;
1030	cleanup_test();
1031}
1032
1033static void
1034test_signal_pipeloss(void)
1035{
1036	/* make sure that the base1 pipe is closed correctly. */
1037	struct event_base *base1, *base2;
1038	int pipe1;
1039	test_ok = 0;
1040	base1 = event_init();
1041	pipe1 = base1->sig.ev_signal_pair[0];
1042	base2 = event_init();
1043	event_base_free(base2);
1044	event_base_free(base1);
1045	if (close(pipe1) != -1 || errno!=EBADF) {
1046		/* fd must be closed, so second close gives -1, EBADF */
1047		printf("signal pipe not closed. ");
1048		test_ok = 0;
1049	} else {
1050		test_ok = 1;
1051	}
1052	cleanup_test();
1053}
1054
1055/*
1056 * make two bases to catch signals, use both of them.  this only works
1057 * for event mechanisms that use our signal pipe trick.	 kqueue handles
1058 * signals internally, and all interested kqueues get all the signals.
1059 */
1060static void
1061test_signal_switchbase(void)
1062{
1063	struct event ev1, ev2;
1064	struct event_base *base1, *base2;
1065	int is_kqueue;
1066	test_ok = 0;
1067	base1 = event_init();
1068	base2 = event_init();
1069	is_kqueue = !strcmp(event_get_method(),"kqueue");
1070	evsignal_set(&ev1, SIGUSR1, signal_cb, &ev1);
1071	evsignal_set(&ev2, SIGUSR1, signal_cb, &ev2);
1072	if (event_base_set(base1, &ev1) ||
1073	    event_base_set(base2, &ev2) ||
1074	    event_add(&ev1, NULL) ||
1075	    event_add(&ev2, NULL)) {
1076		fprintf(stderr, "%s: cannot set base, add\n", __func__);
1077		exit(1);
1078	}
1079
1080	tt_ptr_op(event_get_base(&ev1), ==, base1);
1081	tt_ptr_op(event_get_base(&ev2), ==, base2);
1082
1083	test_ok = 0;
1084	/* can handle signal before loop is called */
1085	raise(SIGUSR1);
1086	event_base_loop(base2, EVLOOP_NONBLOCK);
1087	if (is_kqueue) {
1088		if (!test_ok)
1089			goto end;
1090		test_ok = 0;
1091	}
1092	event_base_loop(base1, EVLOOP_NONBLOCK);
1093	if (test_ok && !is_kqueue) {
1094		test_ok = 0;
1095
1096		/* set base1 to handle signals */
1097		event_base_loop(base1, EVLOOP_NONBLOCK);
1098		raise(SIGUSR1);
1099		event_base_loop(base1, EVLOOP_NONBLOCK);
1100		event_base_loop(base2, EVLOOP_NONBLOCK);
1101	}
1102end:
1103	event_base_free(base1);
1104	event_base_free(base2);
1105	cleanup_test();
1106}
1107
1108/*
1109 * assert that a signal event removed from the event queue really is
1110 * removed - with no possibility of it's parent handler being fired.
1111 */
1112static void
1113test_signal_assert(void)
1114{
1115	struct event ev;
1116	struct event_base *base = event_init();
1117	test_ok = 0;
1118	/* use SIGCONT so we don't kill ourselves when we signal to nowhere */
1119	evsignal_set(&ev, SIGCONT, signal_cb, &ev);
1120	evsignal_add(&ev, NULL);
1121	/*
1122	 * if evsignal_del() fails to reset the handler, it's current handler
1123	 * will still point to evsig_handler().
1124	 */
1125	evsignal_del(&ev);
1126
1127	raise(SIGCONT);
1128#if 0
1129	/* only way to verify we were in evsig_handler() */
1130	/* XXXX Now there's no longer a good way. */
1131	if (base->sig.evsig_caught)
1132		test_ok = 0;
1133	else
1134		test_ok = 1;
1135#else
1136	test_ok = 1;
1137#endif
1138
1139	event_base_free(base);
1140	cleanup_test();
1141	return;
1142}
1143
1144/*
1145 * assert that we restore our previous signal handler properly.
1146 */
1147static void
1148test_signal_restore(void)
1149{
1150	struct event ev;
1151	struct event_base *base = event_init();
1152#ifdef EVENT__HAVE_SIGACTION
1153	struct sigaction sa;
1154#endif
1155
1156	test_ok = 0;
1157#ifdef EVENT__HAVE_SIGACTION
1158	sa.sa_handler = signal_cb_sa;
1159	sa.sa_flags = 0x0;
1160	sigemptyset(&sa.sa_mask);
1161	if (sigaction(SIGUSR1, &sa, NULL) == -1)
1162		goto out;
1163#else
1164	if (signal(SIGUSR1, signal_cb_sa) == SIG_ERR)
1165		goto out;
1166#endif
1167	evsignal_set(&ev, SIGUSR1, signal_cb, &ev);
1168	evsignal_add(&ev, NULL);
1169	evsignal_del(&ev);
1170
1171	raise(SIGUSR1);
1172	/* 1 == signal_cb, 2 == signal_cb_sa, we want our previous handler */
1173	if (test_ok != 2)
1174		test_ok = 0;
1175out:
1176	event_base_free(base);
1177	cleanup_test();
1178	return;
1179}
1180
1181static void
1182signal_cb_swp(int sig, short event, void *arg)
1183{
1184	called++;
1185	if (called < 5)
1186		raise(sig);
1187	else
1188		event_loopexit(NULL);
1189}
1190static void
1191timeout_cb_swp(evutil_socket_t fd, short event, void *arg)
1192{
1193	if (called == -1) {
1194		struct timeval tv = {5, 0};
1195
1196		called = 0;
1197		evtimer_add((struct event *)arg, &tv);
1198		raise(SIGUSR1);
1199		return;
1200	}
1201	test_ok = 0;
1202	event_loopexit(NULL);
1203}
1204
1205static void
1206test_signal_while_processing(void)
1207{
1208	struct event_base *base = event_init();
1209	struct event ev, ev_timer;
1210	struct timeval tv = {0, 0};
1211
1212	setup_test("Receiving a signal while processing other signal: ");
1213
1214	called = -1;
1215	test_ok = 1;
1216	signal_set(&ev, SIGUSR1, signal_cb_swp, NULL);
1217	signal_add(&ev, NULL);
1218	evtimer_set(&ev_timer, timeout_cb_swp, &ev_timer);
1219	evtimer_add(&ev_timer, &tv);
1220	event_dispatch();
1221
1222	event_base_free(base);
1223	cleanup_test();
1224	return;
1225}
1226#endif
1227
1228static void
1229test_free_active_base(void *ptr)
1230{
1231	struct basic_test_data *data = ptr;
1232	struct event_base *base1;
1233	struct event ev1;
1234
1235	base1 = event_init();
1236	if (base1) {
1237		event_assign(&ev1, base1, data->pair[1], EV_READ,
1238			     dummy_read_cb, NULL);
1239		event_add(&ev1, NULL);
1240		event_base_free(base1);	 /* should not crash */
1241	} else {
1242		tt_fail_msg("failed to create event_base for test");
1243	}
1244
1245	base1 = event_init();
1246	tt_assert(base1);
1247	event_assign(&ev1, base1, 0, 0, dummy_read_cb, NULL);
1248	event_active(&ev1, EV_READ, 1);
1249	event_base_free(base1);
1250end:
1251	;
1252}
1253
1254static void
1255test_manipulate_active_events(void *ptr)
1256{
1257	struct basic_test_data *data = ptr;
1258	struct event_base *base = data->base;
1259	struct event ev1;
1260
1261	event_assign(&ev1, base, -1, EV_TIMEOUT, dummy_read_cb, NULL);
1262
1263	/* Make sure an active event is pending. */
1264	event_active(&ev1, EV_READ, 1);
1265	tt_int_op(event_pending(&ev1, EV_READ|EV_TIMEOUT|EV_WRITE, NULL),
1266	    ==, EV_READ);
1267
1268	/* Make sure that activating an event twice works. */
1269	event_active(&ev1, EV_WRITE, 1);
1270	tt_int_op(event_pending(&ev1, EV_READ|EV_TIMEOUT|EV_WRITE, NULL),
1271	    ==, EV_READ|EV_WRITE);
1272
1273end:
1274	event_del(&ev1);
1275}
1276
1277static void
1278event_selfarg_cb(evutil_socket_t fd, short event, void *arg)
1279{
1280	struct event *ev = arg;
1281	struct event_base *base = event_get_base(ev);
1282	event_base_assert_ok_(base);
1283	event_base_loopexit(base, NULL);
1284	tt_want(ev == event_base_get_running_event(base));
1285}
1286
1287static void
1288test_event_new_selfarg(void *ptr)
1289{
1290	struct basic_test_data *data = ptr;
1291	struct event_base *base = data->base;
1292	struct event *ev = event_new(base, -1, EV_READ, event_selfarg_cb,
1293                                     event_self_cbarg());
1294
1295	event_active(ev, EV_READ, 1);
1296	event_base_dispatch(base);
1297
1298	event_free(ev);
1299}
1300
1301static void
1302test_event_assign_selfarg(void *ptr)
1303{
1304	struct basic_test_data *data = ptr;
1305	struct event_base *base = data->base;
1306	struct event ev;
1307
1308	event_assign(&ev, base, -1, EV_READ, event_selfarg_cb,
1309                     event_self_cbarg());
1310	event_active(&ev, EV_READ, 1);
1311	event_base_dispatch(base);
1312}
1313
1314static void
1315test_event_base_get_num_events(void *ptr)
1316{
1317	struct basic_test_data *data = ptr;
1318	struct event_base *base = data->base;
1319	struct event ev;
1320	int event_count_active;
1321	int event_count_virtual;
1322	int event_count_added;
1323	int event_count_active_virtual;
1324	int event_count_active_added;
1325	int event_count_virtual_added;
1326	int event_count_active_added_virtual;
1327
1328	struct timeval qsec = {0, 100000};
1329
1330	event_assign(&ev, base, -1, EV_READ, event_selfarg_cb,
1331	    event_self_cbarg());
1332
1333	event_add(&ev, &qsec);
1334	event_count_active = event_base_get_num_events(base,
1335	    EVENT_BASE_COUNT_ACTIVE);
1336	event_count_virtual = event_base_get_num_events(base,
1337	    EVENT_BASE_COUNT_VIRTUAL);
1338	event_count_added = event_base_get_num_events(base,
1339	    EVENT_BASE_COUNT_ADDED);
1340	event_count_active_virtual = event_base_get_num_events(base,
1341	    EVENT_BASE_COUNT_ACTIVE|EVENT_BASE_COUNT_VIRTUAL);
1342	event_count_active_added = event_base_get_num_events(base,
1343	    EVENT_BASE_COUNT_ACTIVE|EVENT_BASE_COUNT_ADDED);
1344	event_count_virtual_added = event_base_get_num_events(base,
1345	    EVENT_BASE_COUNT_VIRTUAL|EVENT_BASE_COUNT_ADDED);
1346	event_count_active_added_virtual = event_base_get_num_events(base,
1347	    EVENT_BASE_COUNT_ACTIVE|
1348	    EVENT_BASE_COUNT_ADDED|
1349	    EVENT_BASE_COUNT_VIRTUAL);
1350	tt_int_op(event_count_active, ==, 0);
1351	tt_int_op(event_count_virtual, ==, 0);
1352	/* libevent itself adds a timeout event, so the event_count is 2 here */
1353	tt_int_op(event_count_added, ==, 2);
1354	tt_int_op(event_count_active_virtual, ==, 0);
1355	tt_int_op(event_count_active_added, ==, 2);
1356	tt_int_op(event_count_virtual_added, ==, 2);
1357	tt_int_op(event_count_active_added_virtual, ==, 2);
1358
1359	event_active(&ev, EV_READ, 1);
1360	event_count_active = event_base_get_num_events(base,
1361	    EVENT_BASE_COUNT_ACTIVE);
1362	event_count_virtual = event_base_get_num_events(base,
1363	    EVENT_BASE_COUNT_VIRTUAL);
1364	event_count_added = event_base_get_num_events(base,
1365	    EVENT_BASE_COUNT_ADDED);
1366	event_count_active_virtual = event_base_get_num_events(base,
1367	    EVENT_BASE_COUNT_ACTIVE|EVENT_BASE_COUNT_VIRTUAL);
1368	event_count_active_added = event_base_get_num_events(base,
1369	    EVENT_BASE_COUNT_ACTIVE|EVENT_BASE_COUNT_ADDED);
1370	event_count_virtual_added = event_base_get_num_events(base,
1371	    EVENT_BASE_COUNT_VIRTUAL|EVENT_BASE_COUNT_ADDED);
1372	event_count_active_added_virtual = event_base_get_num_events(base,
1373	    EVENT_BASE_COUNT_ACTIVE|
1374	    EVENT_BASE_COUNT_ADDED|
1375	    EVENT_BASE_COUNT_VIRTUAL);
1376	tt_int_op(event_count_active, ==, 1);
1377	tt_int_op(event_count_virtual, ==, 0);
1378	tt_int_op(event_count_added, ==, 3);
1379	tt_int_op(event_count_active_virtual, ==, 1);
1380	tt_int_op(event_count_active_added, ==, 4);
1381	tt_int_op(event_count_virtual_added, ==, 3);
1382	tt_int_op(event_count_active_added_virtual, ==, 4);
1383
1384       event_base_loop(base, 0);
1385       event_count_active = event_base_get_num_events(base,
1386	   EVENT_BASE_COUNT_ACTIVE);
1387       event_count_virtual = event_base_get_num_events(base,
1388	   EVENT_BASE_COUNT_VIRTUAL);
1389       event_count_added = event_base_get_num_events(base,
1390	   EVENT_BASE_COUNT_ADDED);
1391       event_count_active_virtual = event_base_get_num_events(base,
1392	   EVENT_BASE_COUNT_ACTIVE|EVENT_BASE_COUNT_VIRTUAL);
1393       event_count_active_added = event_base_get_num_events(base,
1394	   EVENT_BASE_COUNT_ACTIVE|EVENT_BASE_COUNT_ADDED);
1395       event_count_virtual_added = event_base_get_num_events(base,
1396	   EVENT_BASE_COUNT_VIRTUAL|EVENT_BASE_COUNT_ADDED);
1397       event_count_active_added_virtual = event_base_get_num_events(base,
1398	   EVENT_BASE_COUNT_ACTIVE|
1399	   EVENT_BASE_COUNT_ADDED|
1400	   EVENT_BASE_COUNT_VIRTUAL);
1401       tt_int_op(event_count_active, ==, 0);
1402       tt_int_op(event_count_virtual, ==, 0);
1403       tt_int_op(event_count_added, ==, 0);
1404       tt_int_op(event_count_active_virtual, ==, 0);
1405       tt_int_op(event_count_active_added, ==, 0);
1406       tt_int_op(event_count_virtual_added, ==, 0);
1407       tt_int_op(event_count_active_added_virtual, ==, 0);
1408
1409       event_base_add_virtual_(base);
1410       event_count_active = event_base_get_num_events(base,
1411	   EVENT_BASE_COUNT_ACTIVE);
1412       event_count_virtual = event_base_get_num_events(base,
1413	   EVENT_BASE_COUNT_VIRTUAL);
1414       event_count_added = event_base_get_num_events(base,
1415	   EVENT_BASE_COUNT_ADDED);
1416       event_count_active_virtual = event_base_get_num_events(base,
1417	   EVENT_BASE_COUNT_ACTIVE|EVENT_BASE_COUNT_VIRTUAL);
1418       event_count_active_added = event_base_get_num_events(base,
1419	   EVENT_BASE_COUNT_ACTIVE|EVENT_BASE_COUNT_ADDED);
1420       event_count_virtual_added = event_base_get_num_events(base,
1421	   EVENT_BASE_COUNT_VIRTUAL|EVENT_BASE_COUNT_ADDED);
1422       event_count_active_added_virtual = event_base_get_num_events(base,
1423	   EVENT_BASE_COUNT_ACTIVE|
1424	   EVENT_BASE_COUNT_ADDED|
1425	   EVENT_BASE_COUNT_VIRTUAL);
1426       tt_int_op(event_count_active, ==, 0);
1427       tt_int_op(event_count_virtual, ==, 1);
1428       tt_int_op(event_count_added, ==, 0);
1429       tt_int_op(event_count_active_virtual, ==, 1);
1430       tt_int_op(event_count_active_added, ==, 0);
1431       tt_int_op(event_count_virtual_added, ==, 1);
1432       tt_int_op(event_count_active_added_virtual, ==, 1);
1433
1434end:
1435       ;
1436}
1437
1438static void
1439test_event_base_get_max_events(void *ptr)
1440{
1441	struct basic_test_data *data = ptr;
1442	struct event_base *base = data->base;
1443	struct event ev;
1444	struct event ev2;
1445	int event_count_active;
1446	int event_count_virtual;
1447	int event_count_added;
1448	int event_count_active_virtual;
1449	int event_count_active_added;
1450	int event_count_virtual_added;
1451	int event_count_active_added_virtual;
1452
1453	struct timeval qsec = {0, 100000};
1454
1455	event_assign(&ev, base, -1, EV_READ, event_selfarg_cb,
1456	    event_self_cbarg());
1457	event_assign(&ev2, base, -1, EV_READ, event_selfarg_cb,
1458	    event_self_cbarg());
1459
1460	event_add(&ev, &qsec);
1461	event_add(&ev2, &qsec);
1462	event_del(&ev2);
1463
1464	event_count_active = event_base_get_max_events(base,
1465	    EVENT_BASE_COUNT_ACTIVE, 0);
1466	event_count_virtual = event_base_get_max_events(base,
1467	    EVENT_BASE_COUNT_VIRTUAL, 0);
1468	event_count_added = event_base_get_max_events(base,
1469	    EVENT_BASE_COUNT_ADDED, 0);
1470	event_count_active_virtual = event_base_get_max_events(base,
1471	    EVENT_BASE_COUNT_ACTIVE | EVENT_BASE_COUNT_VIRTUAL, 0);
1472	event_count_active_added = event_base_get_max_events(base,
1473	    EVENT_BASE_COUNT_ACTIVE | EVENT_BASE_COUNT_ADDED, 0);
1474	event_count_virtual_added = event_base_get_max_events(base,
1475	    EVENT_BASE_COUNT_VIRTUAL | EVENT_BASE_COUNT_ADDED, 0);
1476	event_count_active_added_virtual = event_base_get_max_events(base,
1477	    EVENT_BASE_COUNT_ACTIVE |
1478	    EVENT_BASE_COUNT_ADDED |
1479	    EVENT_BASE_COUNT_VIRTUAL, 0);
1480
1481	tt_int_op(event_count_active, ==, 0);
1482	tt_int_op(event_count_virtual, ==, 0);
1483	/* libevent itself adds a timeout event, so the event_count is 4 here */
1484	tt_int_op(event_count_added, ==, 4);
1485	tt_int_op(event_count_active_virtual, ==, 0);
1486	tt_int_op(event_count_active_added, ==, 4);
1487	tt_int_op(event_count_virtual_added, ==, 4);
1488	tt_int_op(event_count_active_added_virtual, ==, 4);
1489
1490	event_active(&ev, EV_READ, 1);
1491	event_count_active = event_base_get_max_events(base,
1492	    EVENT_BASE_COUNT_ACTIVE, 0);
1493	event_count_virtual = event_base_get_max_events(base,
1494	    EVENT_BASE_COUNT_VIRTUAL, 0);
1495	event_count_added = event_base_get_max_events(base,
1496	    EVENT_BASE_COUNT_ADDED, 0);
1497	event_count_active_virtual = event_base_get_max_events(base,
1498	    EVENT_BASE_COUNT_ACTIVE | EVENT_BASE_COUNT_VIRTUAL, 0);
1499	event_count_active_added = event_base_get_max_events(base,
1500	    EVENT_BASE_COUNT_ACTIVE | EVENT_BASE_COUNT_ADDED, 0);
1501	event_count_virtual_added = event_base_get_max_events(base,
1502	    EVENT_BASE_COUNT_VIRTUAL | EVENT_BASE_COUNT_ADDED, 0);
1503	event_count_active_added_virtual = event_base_get_max_events(base,
1504	    EVENT_BASE_COUNT_ACTIVE |
1505	    EVENT_BASE_COUNT_ADDED |
1506	    EVENT_BASE_COUNT_VIRTUAL, 0);
1507
1508	tt_int_op(event_count_active, ==, 1);
1509	tt_int_op(event_count_virtual, ==, 0);
1510	tt_int_op(event_count_added, ==, 4);
1511	tt_int_op(event_count_active_virtual, ==, 1);
1512	tt_int_op(event_count_active_added, ==, 5);
1513	tt_int_op(event_count_virtual_added, ==, 4);
1514	tt_int_op(event_count_active_added_virtual, ==, 5);
1515
1516	event_base_loop(base, 0);
1517	event_count_active = event_base_get_max_events(base,
1518	    EVENT_BASE_COUNT_ACTIVE, 1);
1519	event_count_virtual = event_base_get_max_events(base,
1520	    EVENT_BASE_COUNT_VIRTUAL, 1);
1521	event_count_added = event_base_get_max_events(base,
1522	    EVENT_BASE_COUNT_ADDED, 1);
1523	event_count_active_virtual = event_base_get_max_events(base,
1524	    EVENT_BASE_COUNT_ACTIVE | EVENT_BASE_COUNT_VIRTUAL, 0);
1525	event_count_active_added = event_base_get_max_events(base,
1526	    EVENT_BASE_COUNT_ACTIVE | EVENT_BASE_COUNT_ADDED, 0);
1527	event_count_virtual_added = event_base_get_max_events(base,
1528	    EVENT_BASE_COUNT_VIRTUAL | EVENT_BASE_COUNT_ADDED, 0);
1529	event_count_active_added_virtual = event_base_get_max_events(base,
1530	    EVENT_BASE_COUNT_ACTIVE |
1531	    EVENT_BASE_COUNT_ADDED |
1532	    EVENT_BASE_COUNT_VIRTUAL, 1);
1533
1534	tt_int_op(event_count_active, ==, 1);
1535	tt_int_op(event_count_virtual, ==, 0);
1536	tt_int_op(event_count_added, ==, 4);
1537	tt_int_op(event_count_active_virtual, ==, 0);
1538	tt_int_op(event_count_active_added, ==, 0);
1539	tt_int_op(event_count_virtual_added, ==, 0);
1540	tt_int_op(event_count_active_added_virtual, ==, 0);
1541
1542	event_count_active = event_base_get_max_events(base,
1543	    EVENT_BASE_COUNT_ACTIVE, 0);
1544	event_count_virtual = event_base_get_max_events(base,
1545	    EVENT_BASE_COUNT_VIRTUAL, 0);
1546	event_count_added = event_base_get_max_events(base,
1547	    EVENT_BASE_COUNT_ADDED, 0);
1548	tt_int_op(event_count_active, ==, 0);
1549	tt_int_op(event_count_virtual, ==, 0);
1550	tt_int_op(event_count_added, ==, 0);
1551
1552	event_base_add_virtual_(base);
1553	event_count_active = event_base_get_max_events(base,
1554	    EVENT_BASE_COUNT_ACTIVE, 0);
1555	event_count_virtual = event_base_get_max_events(base,
1556	    EVENT_BASE_COUNT_VIRTUAL, 0);
1557	event_count_added = event_base_get_max_events(base,
1558	    EVENT_BASE_COUNT_ADDED, 0);
1559	event_count_active_virtual = event_base_get_max_events(base,
1560	    EVENT_BASE_COUNT_ACTIVE | EVENT_BASE_COUNT_VIRTUAL, 0);
1561	event_count_active_added = event_base_get_max_events(base,
1562	    EVENT_BASE_COUNT_ACTIVE | EVENT_BASE_COUNT_ADDED, 0);
1563	event_count_virtual_added = event_base_get_max_events(base,
1564	    EVENT_BASE_COUNT_VIRTUAL | EVENT_BASE_COUNT_ADDED, 0);
1565	event_count_active_added_virtual = event_base_get_max_events(base,
1566	    EVENT_BASE_COUNT_ACTIVE |
1567	    EVENT_BASE_COUNT_ADDED |
1568	    EVENT_BASE_COUNT_VIRTUAL, 0);
1569
1570	tt_int_op(event_count_active, ==, 0);
1571	tt_int_op(event_count_virtual, ==, 1);
1572	tt_int_op(event_count_added, ==, 0);
1573	tt_int_op(event_count_active_virtual, ==, 1);
1574	tt_int_op(event_count_active_added, ==, 0);
1575	tt_int_op(event_count_virtual_added, ==, 1);
1576	tt_int_op(event_count_active_added_virtual, ==, 1);
1577
1578end:
1579       ;
1580}
1581
1582static void
1583test_bad_assign(void *ptr)
1584{
1585	struct event ev;
1586	int r;
1587	/* READ|SIGNAL is not allowed */
1588	r = event_assign(&ev, NULL, -1, EV_SIGNAL|EV_READ, dummy_read_cb, NULL);
1589	tt_int_op(r,==,-1);
1590
1591end:
1592	;
1593}
1594
1595static int reentrant_cb_run = 0;
1596
1597static void
1598bad_reentrant_run_loop_cb(evutil_socket_t fd, short what, void *ptr)
1599{
1600	struct event_base *base = ptr;
1601	int r;
1602	reentrant_cb_run = 1;
1603	/* This reentrant call to event_base_loop should be detected and
1604	 * should fail */
1605	r = event_base_loop(base, 0);
1606	tt_int_op(r, ==, -1);
1607end:
1608	;
1609}
1610
1611static void
1612test_bad_reentrant(void *ptr)
1613{
1614	struct basic_test_data *data = ptr;
1615	struct event_base *base = data->base;
1616	struct event ev;
1617	int r;
1618	event_assign(&ev, base, -1,
1619	    0, bad_reentrant_run_loop_cb, base);
1620
1621	event_active(&ev, EV_WRITE, 1);
1622	r = event_base_loop(base, 0);
1623	tt_int_op(r, ==, 1);
1624	tt_int_op(reentrant_cb_run, ==, 1);
1625end:
1626	;
1627}
1628
1629static int n_write_a_byte_cb=0;
1630static int n_read_and_drain_cb=0;
1631static int n_activate_other_event_cb=0;
1632static void
1633write_a_byte_cb(evutil_socket_t fd, short what, void *arg)
1634{
1635	char buf[] = "x";
1636	if (write(fd, buf, 1) == 1)
1637		++n_write_a_byte_cb;
1638}
1639static void
1640read_and_drain_cb(evutil_socket_t fd, short what, void *arg)
1641{
1642	char buf[128];
1643	int n;
1644	++n_read_and_drain_cb;
1645	while ((n = read(fd, buf, sizeof(buf))) > 0)
1646		;
1647}
1648
1649static void
1650activate_other_event_cb(evutil_socket_t fd, short what, void *other_)
1651{
1652	struct event *ev_activate = other_;
1653	++n_activate_other_event_cb;
1654	event_active_later_(ev_activate, EV_READ);
1655}
1656
1657static void
1658test_active_later(void *ptr)
1659{
1660	struct basic_test_data *data = ptr;
1661	struct event *ev1, *ev2;
1662	struct event ev3, ev4;
1663	struct timeval qsec = {0, 100000};
1664	ev1 = event_new(data->base, data->pair[0], EV_READ|EV_PERSIST, read_and_drain_cb, NULL);
1665	ev2 = event_new(data->base, data->pair[1], EV_WRITE|EV_PERSIST, write_a_byte_cb, NULL);
1666	event_assign(&ev3, data->base, -1, 0, activate_other_event_cb, &ev4);
1667	event_assign(&ev4, data->base, -1, 0, activate_other_event_cb, &ev3);
1668	event_add(ev1, NULL);
1669	event_add(ev2, NULL);
1670	event_active_later_(&ev3, EV_READ);
1671
1672	event_base_loopexit(data->base, &qsec);
1673
1674	event_base_loop(data->base, 0);
1675
1676	TT_BLATHER(("%d write calls, %d read calls, %d activate-other calls.",
1677		n_write_a_byte_cb, n_read_and_drain_cb, n_activate_other_event_cb));
1678	event_del(&ev3);
1679	event_del(&ev4);
1680
1681	tt_int_op(n_write_a_byte_cb, ==, n_activate_other_event_cb);
1682	tt_int_op(n_write_a_byte_cb, >, 100);
1683	tt_int_op(n_read_and_drain_cb, >, 100);
1684	tt_int_op(n_activate_other_event_cb, >, 100);
1685
1686	event_active_later_(&ev4, EV_READ);
1687	event_active(&ev4, EV_READ, 1); /* This should make the event
1688					   active immediately. */
1689	tt_assert((ev4.ev_flags & EVLIST_ACTIVE) != 0);
1690	tt_assert((ev4.ev_flags & EVLIST_ACTIVE_LATER) == 0);
1691
1692	/* Now leave this one around, so that event_free sees it and removes
1693	 * it. */
1694	event_active_later_(&ev3, EV_READ);
1695	event_base_assert_ok_(data->base);
1696	event_base_free(data->base);
1697	data->base = NULL;
1698end:
1699	;
1700}
1701
1702
1703static void incr_arg_cb(evutil_socket_t fd, short what, void *arg)
1704{
1705	int *intptr = arg;
1706	(void) fd; (void) what;
1707	++*intptr;
1708}
1709static void remove_timers_cb(evutil_socket_t fd, short what, void *arg)
1710{
1711	struct event **ep = arg;
1712	(void) fd; (void) what;
1713	event_remove_timer(ep[0]);
1714	event_remove_timer(ep[1]);
1715}
1716static void send_a_byte_cb(evutil_socket_t fd, short what, void *arg)
1717{
1718	evutil_socket_t *sockp = arg;
1719	(void) fd; (void) what;
1720	(void) write(*sockp, "A", 1);
1721}
1722struct read_not_timeout_param
1723{
1724	struct event **ev;
1725	int events;
1726	int count;
1727};
1728static void read_not_timeout_cb(evutil_socket_t fd, short what, void *arg)
1729{
1730	struct read_not_timeout_param *rntp = arg;
1731	char c;
1732	ev_ssize_t n;
1733	(void) fd; (void) what;
1734	n = read(fd, &c, 1);
1735	tt_int_op(n, ==, 1);
1736	rntp->events |= what;
1737	++rntp->count;
1738	if(2 == rntp->count) event_del(rntp->ev[0]);
1739end:
1740	;
1741}
1742
1743static void
1744test_event_remove_timeout(void *ptr)
1745{
1746	struct basic_test_data *data = ptr;
1747	struct event_base *base = data->base;
1748	struct event *ev[5];
1749	int ev1_fired=0;
1750	struct timeval ms25 = { 0, 25*1000 },
1751		ms40 = { 0, 40*1000 },
1752		ms75 = { 0, 75*1000 },
1753		ms125 = { 0, 125*1000 };
1754	struct read_not_timeout_param rntp = { ev, 0, 0 };
1755
1756	event_base_assert_ok_(base);
1757
1758	ev[0] = event_new(base, data->pair[0], EV_READ|EV_PERSIST,
1759	    read_not_timeout_cb, &rntp);
1760	ev[1] = evtimer_new(base, incr_arg_cb, &ev1_fired);
1761	ev[2] = evtimer_new(base, remove_timers_cb, ev);
1762	ev[3] = evtimer_new(base, send_a_byte_cb, &data->pair[1]);
1763	ev[4] = evtimer_new(base, send_a_byte_cb, &data->pair[1]);
1764	tt_assert(base);
1765	event_add(ev[2], &ms25); /* remove timers */
1766	event_add(ev[4], &ms40); /* write to test if timer re-activates */
1767	event_add(ev[0], &ms75); /* read */
1768	event_add(ev[1], &ms75); /* timer */
1769	event_add(ev[3], &ms125); /* timeout. */
1770	event_base_assert_ok_(base);
1771
1772	event_base_dispatch(base);
1773
1774	tt_int_op(ev1_fired, ==, 0);
1775	tt_int_op(rntp.events, ==, EV_READ);
1776
1777	event_base_assert_ok_(base);
1778end:
1779	event_free(ev[0]);
1780	event_free(ev[1]);
1781	event_free(ev[2]);
1782	event_free(ev[3]);
1783	event_free(ev[4]);
1784}
1785
1786static void
1787test_event_base_new(void *ptr)
1788{
1789	struct basic_test_data *data = ptr;
1790	struct event_base *base = 0;
1791	struct event ev1;
1792	struct basic_cb_args args;
1793
1794	int towrite = (int)strlen(TEST1)+1;
1795	int len = write(data->pair[0], TEST1, towrite);
1796
1797	if (len < 0)
1798		tt_abort_perror("initial write");
1799	else if (len != towrite)
1800		tt_abort_printf(("initial write fell short (%d of %d bytes)",
1801				 len, towrite));
1802
1803	if (shutdown(data->pair[0], SHUT_WR))
1804		tt_abort_perror("initial write shutdown");
1805
1806	base = event_base_new();
1807	if (!base)
1808		tt_abort_msg("failed to create event base");
1809
1810	args.eb = base;
1811	args.ev = &ev1;
1812	args.callcount = 0;
1813	event_assign(&ev1, base, data->pair[1],
1814		     EV_READ|EV_PERSIST, basic_read_cb, &args);
1815
1816	if (event_add(&ev1, NULL))
1817		tt_abort_perror("initial event_add");
1818
1819	if (event_base_loop(base, 0))
1820		tt_abort_msg("unsuccessful exit from event loop");
1821
1822end:
1823	if (base)
1824		event_base_free(base);
1825}
1826
1827static void
1828test_loopexit(void)
1829{
1830	struct timeval tv, tv_start, tv_end;
1831	struct event ev;
1832
1833	setup_test("Loop exit: ");
1834
1835	tv.tv_usec = 0;
1836	tv.tv_sec = 60*60*24;
1837	evtimer_set(&ev, timeout_cb, NULL);
1838	evtimer_add(&ev, &tv);
1839
1840	tv.tv_usec = 300*1000;
1841	tv.tv_sec = 0;
1842	event_loopexit(&tv);
1843
1844	evutil_gettimeofday(&tv_start, NULL);
1845	event_dispatch();
1846	evutil_gettimeofday(&tv_end, NULL);
1847
1848	evtimer_del(&ev);
1849
1850	tt_assert(event_base_got_exit(global_base));
1851	tt_assert(!event_base_got_break(global_base));
1852
1853	test_timeval_diff_eq(&tv_start, &tv_end, 300);
1854
1855	test_ok = 1;
1856end:
1857	cleanup_test();
1858}
1859
1860static void
1861test_loopexit_multiple(void)
1862{
1863	struct timeval tv, tv_start, tv_end;
1864	struct event_base *base;
1865
1866	setup_test("Loop Multiple exit: ");
1867
1868	base = event_base_new();
1869
1870	tv.tv_usec = 200*1000;
1871	tv.tv_sec = 0;
1872	event_base_loopexit(base, &tv);
1873
1874	tv.tv_usec = 0;
1875	tv.tv_sec = 3;
1876	event_base_loopexit(base, &tv);
1877
1878	evutil_gettimeofday(&tv_start, NULL);
1879	event_base_dispatch(base);
1880	evutil_gettimeofday(&tv_end, NULL);
1881
1882	tt_assert(event_base_got_exit(base));
1883	tt_assert(!event_base_got_break(base));
1884
1885	event_base_free(base);
1886
1887	test_timeval_diff_eq(&tv_start, &tv_end, 200);
1888
1889	test_ok = 1;
1890
1891end:
1892	cleanup_test();
1893}
1894
1895static void
1896break_cb(evutil_socket_t fd, short events, void *arg)
1897{
1898	test_ok = 1;
1899	event_loopbreak();
1900}
1901
1902static void
1903fail_cb(evutil_socket_t fd, short events, void *arg)
1904{
1905	test_ok = 0;
1906}
1907
1908static void
1909test_loopbreak(void)
1910{
1911	struct event ev1, ev2;
1912	struct timeval tv;
1913
1914	setup_test("Loop break: ");
1915
1916	tv.tv_sec = 0;
1917	tv.tv_usec = 0;
1918	evtimer_set(&ev1, break_cb, NULL);
1919	evtimer_add(&ev1, &tv);
1920	evtimer_set(&ev2, fail_cb, NULL);
1921	evtimer_add(&ev2, &tv);
1922
1923	event_dispatch();
1924
1925	tt_assert(!event_base_got_exit(global_base));
1926	tt_assert(event_base_got_break(global_base));
1927
1928	evtimer_del(&ev1);
1929	evtimer_del(&ev2);
1930
1931end:
1932	cleanup_test();
1933}
1934
1935static struct event *readd_test_event_last_added = NULL;
1936static void
1937re_add_read_cb(evutil_socket_t fd, short event, void *arg)
1938{
1939	char buf[256];
1940	struct event *ev_other = arg;
1941	ev_ssize_t n_read;
1942
1943	readd_test_event_last_added = ev_other;
1944
1945	n_read = read(fd, buf, sizeof(buf));
1946
1947	if (n_read < 0) {
1948		tt_fail_perror("read");
1949		event_base_loopbreak(event_get_base(ev_other));
1950		return;
1951	} else {
1952		event_add(ev_other, NULL);
1953		++test_ok;
1954	}
1955}
1956
1957static void
1958test_nonpersist_readd(void)
1959{
1960	struct event ev1, ev2;
1961
1962	setup_test("Re-add nonpersistent events: ");
1963	event_set(&ev1, pair[0], EV_READ, re_add_read_cb, &ev2);
1964	event_set(&ev2, pair[1], EV_READ, re_add_read_cb, &ev1);
1965
1966	if (write(pair[0], "Hello", 5) < 0) {
1967		tt_fail_perror("write(pair[0])");
1968	}
1969
1970	if (write(pair[1], "Hello", 5) < 0) {
1971		tt_fail_perror("write(pair[1])\n");
1972	}
1973
1974	if (event_add(&ev1, NULL) == -1 ||
1975	    event_add(&ev2, NULL) == -1) {
1976		test_ok = 0;
1977	}
1978	if (test_ok != 0)
1979		exit(1);
1980	event_loop(EVLOOP_ONCE);
1981	if (test_ok != 2)
1982		exit(1);
1983	/* At this point, we executed both callbacks.  Whichever one got
1984	 * called first added the second, but the second then immediately got
1985	 * deleted before its callback was called.  At this point, though, it
1986	 * re-added the first.
1987	 */
1988	if (!readd_test_event_last_added) {
1989		test_ok = 0;
1990	} else if (readd_test_event_last_added == &ev1) {
1991		if (!event_pending(&ev1, EV_READ, NULL) ||
1992		    event_pending(&ev2, EV_READ, NULL))
1993			test_ok = 0;
1994	} else {
1995		if (event_pending(&ev1, EV_READ, NULL) ||
1996		    !event_pending(&ev2, EV_READ, NULL))
1997			test_ok = 0;
1998	}
1999
2000	event_del(&ev1);
2001	event_del(&ev2);
2002
2003	cleanup_test();
2004}
2005
2006struct test_pri_event {
2007	struct event ev;
2008	int count;
2009};
2010
2011static void
2012test_priorities_cb(evutil_socket_t fd, short what, void *arg)
2013{
2014	struct test_pri_event *pri = arg;
2015	struct timeval tv;
2016
2017	if (pri->count == 3) {
2018		event_loopexit(NULL);
2019		return;
2020	}
2021
2022	pri->count++;
2023
2024	evutil_timerclear(&tv);
2025	event_add(&pri->ev, &tv);
2026}
2027
2028static void
2029test_priorities_impl(int npriorities)
2030{
2031	struct test_pri_event one, two;
2032	struct timeval tv;
2033
2034	TT_BLATHER(("Testing Priorities %d: ", npriorities));
2035
2036	event_base_priority_init(global_base, npriorities);
2037
2038	memset(&one, 0, sizeof(one));
2039	memset(&two, 0, sizeof(two));
2040
2041	timeout_set(&one.ev, test_priorities_cb, &one);
2042	if (event_priority_set(&one.ev, 0) == -1) {
2043		fprintf(stderr, "%s: failed to set priority", __func__);
2044		exit(1);
2045	}
2046
2047	timeout_set(&two.ev, test_priorities_cb, &two);
2048	if (event_priority_set(&two.ev, npriorities - 1) == -1) {
2049		fprintf(stderr, "%s: failed to set priority", __func__);
2050		exit(1);
2051	}
2052
2053	evutil_timerclear(&tv);
2054
2055	if (event_add(&one.ev, &tv) == -1)
2056		exit(1);
2057	if (event_add(&two.ev, &tv) == -1)
2058		exit(1);
2059
2060	event_dispatch();
2061
2062	event_del(&one.ev);
2063	event_del(&two.ev);
2064
2065	if (npriorities == 1) {
2066		if (one.count == 3 && two.count == 3)
2067			test_ok = 1;
2068	} else if (npriorities == 2) {
2069		/* Two is called once because event_loopexit is priority 1 */
2070		if (one.count == 3 && two.count == 1)
2071			test_ok = 1;
2072	} else {
2073		if (one.count == 3 && two.count == 0)
2074			test_ok = 1;
2075	}
2076}
2077
2078static void
2079test_priorities(void)
2080{
2081	test_priorities_impl(1);
2082	if (test_ok)
2083		test_priorities_impl(2);
2084	if (test_ok)
2085		test_priorities_impl(3);
2086}
2087
2088/* priority-active-inversion: activate a higher-priority event, and make sure
2089 * it keeps us from running a lower-priority event first. */
2090static int n_pai_calls = 0;
2091static struct event pai_events[3];
2092
2093static void
2094prio_active_inversion_cb(evutil_socket_t fd, short what, void *arg)
2095{
2096	int *call_order = arg;
2097	*call_order = n_pai_calls++;
2098	if (n_pai_calls == 1) {
2099		/* This should activate later, even though it shares a
2100		   priority with us. */
2101		event_active(&pai_events[1], EV_READ, 1);
2102		/* This should activate next, since its priority is higher,
2103		   even though we activated it second. */
2104		event_active(&pai_events[2], EV_TIMEOUT, 1);
2105	}
2106}
2107
2108static void
2109test_priority_active_inversion(void *data_)
2110{
2111	struct basic_test_data *data = data_;
2112	struct event_base *base = data->base;
2113	int call_order[3];
2114	int i;
2115	tt_int_op(event_base_priority_init(base, 8), ==, 0);
2116
2117	n_pai_calls = 0;
2118	memset(call_order, 0, sizeof(call_order));
2119
2120	for (i=0;i<3;++i) {
2121		event_assign(&pai_events[i], data->base, -1, 0,
2122		    prio_active_inversion_cb, &call_order[i]);
2123	}
2124
2125	event_priority_set(&pai_events[0], 4);
2126	event_priority_set(&pai_events[1], 4);
2127	event_priority_set(&pai_events[2], 0);
2128
2129	event_active(&pai_events[0], EV_WRITE, 1);
2130
2131	event_base_dispatch(base);
2132	tt_int_op(n_pai_calls, ==, 3);
2133	tt_int_op(call_order[0], ==, 0);
2134	tt_int_op(call_order[1], ==, 2);
2135	tt_int_op(call_order[2], ==, 1);
2136end:
2137	;
2138}
2139
2140
2141static void
2142test_multiple_cb(evutil_socket_t fd, short event, void *arg)
2143{
2144	if (event & EV_READ)
2145		test_ok |= 1;
2146	else if (event & EV_WRITE)
2147		test_ok |= 2;
2148}
2149
2150static void
2151test_multiple_events_for_same_fd(void)
2152{
2153   struct event e1, e2;
2154
2155   setup_test("Multiple events for same fd: ");
2156
2157   event_set(&e1, pair[0], EV_READ, test_multiple_cb, NULL);
2158   event_add(&e1, NULL);
2159   event_set(&e2, pair[0], EV_WRITE, test_multiple_cb, NULL);
2160   event_add(&e2, NULL);
2161   event_loop(EVLOOP_ONCE);
2162   event_del(&e2);
2163
2164   if (write(pair[1], TEST1, strlen(TEST1)+1) < 0) {
2165	   tt_fail_perror("write");
2166   }
2167
2168   event_loop(EVLOOP_ONCE);
2169   event_del(&e1);
2170
2171   if (test_ok != 3)
2172	   test_ok = 0;
2173
2174   cleanup_test();
2175}
2176
2177int evtag_decode_int(ev_uint32_t *pnumber, struct evbuffer *evbuf);
2178int evtag_decode_int64(ev_uint64_t *pnumber, struct evbuffer *evbuf);
2179int evtag_encode_tag(struct evbuffer *evbuf, ev_uint32_t number);
2180int evtag_decode_tag(ev_uint32_t *pnumber, struct evbuffer *evbuf);
2181
2182static void
2183read_once_cb(evutil_socket_t fd, short event, void *arg)
2184{
2185	char buf[256];
2186	int len;
2187
2188	len = read(fd, buf, sizeof(buf));
2189
2190	if (called) {
2191		test_ok = 0;
2192	} else if (len) {
2193		/* Assumes global pair[0] can be used for writing */
2194		if (write(pair[0], TEST1, strlen(TEST1)+1) < 0) {
2195			tt_fail_perror("write");
2196			test_ok = 0;
2197		} else {
2198			test_ok = 1;
2199		}
2200	}
2201
2202	called++;
2203}
2204
2205static void
2206test_want_only_once(void)
2207{
2208	struct event ev;
2209	struct timeval tv;
2210
2211	/* Very simple read test */
2212	setup_test("Want read only once: ");
2213
2214	if (write(pair[0], TEST1, strlen(TEST1)+1) < 0) {
2215		tt_fail_perror("write");
2216	}
2217
2218	/* Setup the loop termination */
2219	evutil_timerclear(&tv);
2220	tv.tv_usec = 300*1000;
2221	event_loopexit(&tv);
2222
2223	event_set(&ev, pair[1], EV_READ, read_once_cb, &ev);
2224	if (event_add(&ev, NULL) == -1)
2225		exit(1);
2226	event_dispatch();
2227
2228	cleanup_test();
2229}
2230
2231#define TEST_MAX_INT	6
2232
2233static void
2234evtag_int_test(void *ptr)
2235{
2236	struct evbuffer *tmp = evbuffer_new();
2237	ev_uint32_t integers[TEST_MAX_INT] = {
2238		0xaf0, 0x1000, 0x1, 0xdeadbeef, 0x00, 0xbef000
2239	};
2240	ev_uint32_t integer;
2241	ev_uint64_t big_int;
2242	int i;
2243
2244	evtag_init();
2245
2246	for (i = 0; i < TEST_MAX_INT; i++) {
2247		int oldlen, newlen;
2248		oldlen = (int)EVBUFFER_LENGTH(tmp);
2249		evtag_encode_int(tmp, integers[i]);
2250		newlen = (int)EVBUFFER_LENGTH(tmp);
2251		TT_BLATHER(("encoded 0x%08x with %d bytes",
2252			(unsigned)integers[i], newlen - oldlen));
2253		big_int = integers[i];
2254		big_int *= 1000000000; /* 1 billion */
2255		evtag_encode_int64(tmp, big_int);
2256	}
2257
2258	for (i = 0; i < TEST_MAX_INT; i++) {
2259		tt_int_op(evtag_decode_int(&integer, tmp), !=, -1);
2260		tt_uint_op(integer, ==, integers[i]);
2261		tt_int_op(evtag_decode_int64(&big_int, tmp), !=, -1);
2262		tt_assert((big_int / 1000000000) == integers[i]);
2263	}
2264
2265	tt_uint_op(EVBUFFER_LENGTH(tmp), ==, 0);
2266end:
2267	evbuffer_free(tmp);
2268}
2269
2270static void
2271evtag_fuzz(void *ptr)
2272{
2273	u_char buffer[4096];
2274	struct evbuffer *tmp = evbuffer_new();
2275	struct timeval tv;
2276	int i, j;
2277
2278	int not_failed = 0;
2279
2280	evtag_init();
2281
2282	for (j = 0; j < 100; j++) {
2283		for (i = 0; i < (int)sizeof(buffer); i++)
2284			buffer[i] = rand();
2285		evbuffer_drain(tmp, -1);
2286		evbuffer_add(tmp, buffer, sizeof(buffer));
2287
2288		if (evtag_unmarshal_timeval(tmp, 0, &tv) != -1)
2289			not_failed++;
2290	}
2291
2292	/* The majority of decodes should fail */
2293	tt_int_op(not_failed, <, 10);
2294
2295	/* Now insert some corruption into the tag length field */
2296	evbuffer_drain(tmp, -1);
2297	evutil_timerclear(&tv);
2298	tv.tv_sec = 1;
2299	evtag_marshal_timeval(tmp, 0, &tv);
2300	evbuffer_add(tmp, buffer, sizeof(buffer));
2301
2302	((char *)EVBUFFER_DATA(tmp))[1] = '\xff';
2303	if (evtag_unmarshal_timeval(tmp, 0, &tv) != -1) {
2304		tt_abort_msg("evtag_unmarshal_timeval should have failed");
2305	}
2306
2307end:
2308	evbuffer_free(tmp);
2309}
2310
2311static void
2312evtag_tag_encoding(void *ptr)
2313{
2314	struct evbuffer *tmp = evbuffer_new();
2315	ev_uint32_t integers[TEST_MAX_INT] = {
2316		0xaf0, 0x1000, 0x1, 0xdeadbeef, 0x00, 0xbef000
2317	};
2318	ev_uint32_t integer;
2319	int i;
2320
2321	evtag_init();
2322
2323	for (i = 0; i < TEST_MAX_INT; i++) {
2324		int oldlen, newlen;
2325		oldlen = (int)EVBUFFER_LENGTH(tmp);
2326		evtag_encode_tag(tmp, integers[i]);
2327		newlen = (int)EVBUFFER_LENGTH(tmp);
2328		TT_BLATHER(("encoded 0x%08x with %d bytes",
2329			(unsigned)integers[i], newlen - oldlen));
2330	}
2331
2332	for (i = 0; i < TEST_MAX_INT; i++) {
2333		tt_int_op(evtag_decode_tag(&integer, tmp), !=, -1);
2334		tt_uint_op(integer, ==, integers[i]);
2335	}
2336
2337	tt_uint_op(EVBUFFER_LENGTH(tmp), ==, 0);
2338
2339end:
2340	evbuffer_free(tmp);
2341}
2342
2343static void
2344evtag_test_peek(void *ptr)
2345{
2346	struct evbuffer *tmp = evbuffer_new();
2347	ev_uint32_t u32;
2348
2349	evtag_marshal_int(tmp, 30, 0);
2350	evtag_marshal_string(tmp, 40, "Hello world");
2351
2352	tt_int_op(evtag_peek(tmp, &u32), ==, 1);
2353	tt_int_op(u32, ==, 30);
2354	tt_int_op(evtag_peek_length(tmp, &u32), ==, 0);
2355	tt_int_op(u32, ==, 1+1+1);
2356	tt_int_op(evtag_consume(tmp), ==, 0);
2357
2358	tt_int_op(evtag_peek(tmp, &u32), ==, 1);
2359	tt_int_op(u32, ==, 40);
2360	tt_int_op(evtag_peek_length(tmp, &u32), ==, 0);
2361	tt_int_op(u32, ==, 1+1+11);
2362	tt_int_op(evtag_payload_length(tmp, &u32), ==, 0);
2363	tt_int_op(u32, ==, 11);
2364
2365end:
2366	evbuffer_free(tmp);
2367}
2368
2369
2370static void
2371test_methods(void *ptr)
2372{
2373	const char **methods = event_get_supported_methods();
2374	struct event_config *cfg = NULL;
2375	struct event_base *base = NULL;
2376	const char *backend;
2377	int n_methods = 0;
2378
2379	tt_assert(methods);
2380
2381	backend = methods[0];
2382	while (*methods != NULL) {
2383		TT_BLATHER(("Support method: %s", *methods));
2384		++methods;
2385		++n_methods;
2386	}
2387
2388	cfg = event_config_new();
2389	assert(cfg != NULL);
2390
2391	tt_int_op(event_config_avoid_method(cfg, backend), ==, 0);
2392	event_config_set_flag(cfg, EVENT_BASE_FLAG_IGNORE_ENV);
2393
2394	base = event_base_new_with_config(cfg);
2395	if (n_methods > 1) {
2396		tt_assert(base);
2397		tt_str_op(backend, !=, event_base_get_method(base));
2398	} else {
2399		tt_assert(base == NULL);
2400	}
2401
2402end:
2403	if (base)
2404		event_base_free(base);
2405	if (cfg)
2406		event_config_free(cfg);
2407}
2408
2409static void
2410test_version(void *arg)
2411{
2412	const char *vstr;
2413	ev_uint32_t vint;
2414	int major, minor, patch, n;
2415
2416	vstr = event_get_version();
2417	vint = event_get_version_number();
2418
2419	tt_assert(vstr);
2420	tt_assert(vint);
2421
2422	tt_str_op(vstr, ==, LIBEVENT_VERSION);
2423	tt_int_op(vint, ==, LIBEVENT_VERSION_NUMBER);
2424
2425	n = sscanf(vstr, "%d.%d.%d", &major, &minor, &patch);
2426	tt_assert(3 == n);
2427	tt_int_op((vint&0xffffff00), ==, ((major<<24)|(minor<<16)|(patch<<8)));
2428end:
2429	;
2430}
2431
2432static void
2433test_base_features(void *arg)
2434{
2435	struct event_base *base = NULL;
2436	struct event_config *cfg = NULL;
2437
2438	cfg = event_config_new();
2439
2440	tt_assert(0 == event_config_require_features(cfg, EV_FEATURE_ET));
2441
2442	base = event_base_new_with_config(cfg);
2443	if (base) {
2444		tt_int_op(EV_FEATURE_ET, ==,
2445		    event_base_get_features(base) & EV_FEATURE_ET);
2446	} else {
2447		base = event_base_new();
2448		tt_int_op(0, ==, event_base_get_features(base) & EV_FEATURE_ET);
2449	}
2450
2451end:
2452	if (base)
2453		event_base_free(base);
2454	if (cfg)
2455		event_config_free(cfg);
2456}
2457
2458#ifdef EVENT__HAVE_SETENV
2459#define SETENV_OK
2460#elif !defined(EVENT__HAVE_SETENV) && defined(EVENT__HAVE_PUTENV)
2461static void setenv(const char *k, const char *v, int o_)
2462{
2463	char b[256];
2464	evutil_snprintf(b, sizeof(b), "%s=%s",k,v);
2465	putenv(b);
2466}
2467#define SETENV_OK
2468#endif
2469
2470#ifdef EVENT__HAVE_UNSETENV
2471#define UNSETENV_OK
2472#elif !defined(EVENT__HAVE_UNSETENV) && defined(EVENT__HAVE_PUTENV)
2473static void unsetenv(const char *k)
2474{
2475	char b[256];
2476	evutil_snprintf(b, sizeof(b), "%s=",k);
2477	putenv(b);
2478}
2479#define UNSETENV_OK
2480#endif
2481
2482#if defined(SETENV_OK) && defined(UNSETENV_OK)
2483static void
2484methodname_to_envvar(const char *mname, char *buf, size_t buflen)
2485{
2486	char *cp;
2487	evutil_snprintf(buf, buflen, "EVENT_NO%s", mname);
2488	for (cp = buf; *cp; ++cp) {
2489		*cp = EVUTIL_TOUPPER_(*cp);
2490	}
2491}
2492#endif
2493
2494static void
2495test_base_environ(void *arg)
2496{
2497	struct event_base *base = NULL;
2498	struct event_config *cfg = NULL;
2499
2500#if defined(SETENV_OK) && defined(UNSETENV_OK)
2501	const char **basenames;
2502	int i, n_methods=0;
2503	char varbuf[128];
2504	const char *defaultname, *ignoreenvname;
2505
2506	/* See if unsetenv works before we rely on it. */
2507	setenv("EVENT_NOWAFFLES", "1", 1);
2508	unsetenv("EVENT_NOWAFFLES");
2509	if (getenv("EVENT_NOWAFFLES") != NULL) {
2510#ifndef EVENT__HAVE_UNSETENV
2511		TT_DECLARE("NOTE", ("Can't fake unsetenv; skipping test"));
2512#else
2513		TT_DECLARE("NOTE", ("unsetenv doesn't work; skipping test"));
2514#endif
2515		tt_skip();
2516	}
2517
2518	basenames = event_get_supported_methods();
2519	for (i = 0; basenames[i]; ++i) {
2520		methodname_to_envvar(basenames[i], varbuf, sizeof(varbuf));
2521		unsetenv(varbuf);
2522		++n_methods;
2523	}
2524
2525	base = event_base_new();
2526	tt_assert(base);
2527
2528	defaultname = event_base_get_method(base);
2529	TT_BLATHER(("default is <%s>", defaultname));
2530	event_base_free(base);
2531	base = NULL;
2532
2533	/* Can we disable the method with EVENT_NOfoo ? */
2534	if (!strcmp(defaultname, "epoll (with changelist)")) {
2535 		setenv("EVENT_NOEPOLL", "1", 1);
2536		ignoreenvname = "epoll";
2537	} else {
2538		methodname_to_envvar(defaultname, varbuf, sizeof(varbuf));
2539		setenv(varbuf, "1", 1);
2540		ignoreenvname = defaultname;
2541	}
2542
2543	/* Use an empty cfg rather than NULL so a failure doesn't exit() */
2544	cfg = event_config_new();
2545	base = event_base_new_with_config(cfg);
2546	event_config_free(cfg);
2547	cfg = NULL;
2548	if (n_methods == 1) {
2549		tt_assert(!base);
2550	} else {
2551		tt_assert(base);
2552		tt_str_op(defaultname, !=, event_base_get_method(base));
2553		event_base_free(base);
2554		base = NULL;
2555	}
2556
2557	/* Can we disable looking at the environment with IGNORE_ENV ? */
2558	cfg = event_config_new();
2559	event_config_set_flag(cfg, EVENT_BASE_FLAG_IGNORE_ENV);
2560	base = event_base_new_with_config(cfg);
2561	tt_assert(base);
2562	tt_str_op(ignoreenvname, ==, event_base_get_method(base));
2563#else
2564	tt_skip();
2565#endif
2566
2567end:
2568	if (base)
2569		event_base_free(base);
2570	if (cfg)
2571		event_config_free(cfg);
2572}
2573
2574static void
2575read_called_once_cb(evutil_socket_t fd, short event, void *arg)
2576{
2577	tt_int_op(event, ==, EV_READ);
2578	called += 1;
2579end:
2580	;
2581}
2582
2583static void
2584timeout_called_once_cb(evutil_socket_t fd, short event, void *arg)
2585{
2586	tt_int_op(event, ==, EV_TIMEOUT);
2587	called += 100;
2588end:
2589	;
2590}
2591
2592static void
2593immediate_called_twice_cb(evutil_socket_t fd, short event, void *arg)
2594{
2595	tt_int_op(event, ==, EV_TIMEOUT);
2596	called += 1000;
2597end:
2598	;
2599}
2600
2601static void
2602test_event_once(void *ptr)
2603{
2604	struct basic_test_data *data = ptr;
2605	struct timeval tv;
2606	int r;
2607
2608	tv.tv_sec = 0;
2609	tv.tv_usec = 50*1000;
2610	called = 0;
2611	r = event_base_once(data->base, data->pair[0], EV_READ,
2612	    read_called_once_cb, NULL, NULL);
2613	tt_int_op(r, ==, 0);
2614	r = event_base_once(data->base, -1, EV_TIMEOUT,
2615	    timeout_called_once_cb, NULL, &tv);
2616	tt_int_op(r, ==, 0);
2617	r = event_base_once(data->base, -1, 0, NULL, NULL, NULL);
2618	tt_int_op(r, <, 0);
2619	r = event_base_once(data->base, -1, EV_TIMEOUT,
2620	    immediate_called_twice_cb, NULL, NULL);
2621	tt_int_op(r, ==, 0);
2622	tv.tv_sec = 0;
2623	tv.tv_usec = 0;
2624	r = event_base_once(data->base, -1, EV_TIMEOUT,
2625	    immediate_called_twice_cb, NULL, &tv);
2626	tt_int_op(r, ==, 0);
2627
2628	if (write(data->pair[1], TEST1, strlen(TEST1)+1) < 0) {
2629		tt_fail_perror("write");
2630	}
2631
2632	shutdown(data->pair[1], SHUT_WR);
2633
2634	event_base_dispatch(data->base);
2635
2636	tt_int_op(called, ==, 2101);
2637end:
2638	;
2639}
2640
2641static void
2642test_event_once_never(void *ptr)
2643{
2644	struct basic_test_data *data = ptr;
2645	struct timeval tv;
2646
2647	/* Have one trigger in 10 seconds (don't worry, because) */
2648	tv.tv_sec = 10;
2649	tv.tv_usec = 0;
2650	called = 0;
2651	event_base_once(data->base, -1, EV_TIMEOUT,
2652	    timeout_called_once_cb, NULL, &tv);
2653
2654	/* But shut down the base in 75 msec. */
2655	tv.tv_sec = 0;
2656	tv.tv_usec = 75*1000;
2657	event_base_loopexit(data->base, &tv);
2658
2659	event_base_dispatch(data->base);
2660
2661	tt_int_op(called, ==, 0);
2662end:
2663	;
2664}
2665
2666static void
2667test_event_pending(void *ptr)
2668{
2669	struct basic_test_data *data = ptr;
2670	struct event *r=NULL, *w=NULL, *t=NULL;
2671	struct timeval tv, now, tv2;
2672
2673	tv.tv_sec = 0;
2674	tv.tv_usec = 500 * 1000;
2675	r = event_new(data->base, data->pair[0], EV_READ, simple_read_cb,
2676	    NULL);
2677	w = event_new(data->base, data->pair[1], EV_WRITE, simple_write_cb,
2678	    NULL);
2679	t = evtimer_new(data->base, timeout_cb, NULL);
2680
2681	tt_assert(r);
2682	tt_assert(w);
2683	tt_assert(t);
2684
2685	evutil_gettimeofday(&now, NULL);
2686	event_add(r, NULL);
2687	event_add(t, &tv);
2688
2689	tt_assert( event_pending(r, EV_READ, NULL));
2690	tt_assert(!event_pending(w, EV_WRITE, NULL));
2691	tt_assert(!event_pending(r, EV_WRITE, NULL));
2692	tt_assert( event_pending(r, EV_READ|EV_WRITE, NULL));
2693	tt_assert(!event_pending(r, EV_TIMEOUT, NULL));
2694	tt_assert( event_pending(t, EV_TIMEOUT, NULL));
2695	tt_assert( event_pending(t, EV_TIMEOUT, &tv2));
2696
2697	tt_assert(evutil_timercmp(&tv2, &now, >));
2698
2699	test_timeval_diff_eq(&now, &tv2, 500);
2700
2701end:
2702	if (r) {
2703		event_del(r);
2704		event_free(r);
2705	}
2706	if (w) {
2707		event_del(w);
2708		event_free(w);
2709	}
2710	if (t) {
2711		event_del(t);
2712		event_free(t);
2713	}
2714}
2715
2716#ifndef _WIN32
2717/* You can't do this test on windows, since dup2 doesn't work on sockets */
2718
2719static void
2720dfd_cb(evutil_socket_t fd, short e, void *data)
2721{
2722	*(int*)data = (int)e;
2723}
2724
2725/* Regression test for our workaround for a fun epoll/linux related bug
2726 * where fd2 = dup(fd1); add(fd2); close(fd2); dup2(fd1,fd2); add(fd2)
2727 * will get you an EEXIST */
2728static void
2729test_dup_fd(void *arg)
2730{
2731	struct basic_test_data *data = arg;
2732	struct event_base *base = data->base;
2733	struct event *ev1=NULL, *ev2=NULL;
2734	int fd, dfd=-1;
2735	int ev1_got, ev2_got;
2736
2737	tt_int_op(write(data->pair[0], "Hello world",
2738		strlen("Hello world")), >, 0);
2739	fd = data->pair[1];
2740
2741	dfd = dup(fd);
2742	tt_int_op(dfd, >=, 0);
2743
2744	ev1 = event_new(base, fd, EV_READ|EV_PERSIST, dfd_cb, &ev1_got);
2745	ev2 = event_new(base, dfd, EV_READ|EV_PERSIST, dfd_cb, &ev2_got);
2746	ev1_got = ev2_got = 0;
2747	event_add(ev1, NULL);
2748	event_add(ev2, NULL);
2749	event_base_loop(base, EVLOOP_ONCE);
2750	tt_int_op(ev1_got, ==, EV_READ);
2751	tt_int_op(ev2_got, ==, EV_READ);
2752
2753	/* Now close and delete dfd then dispatch.  We need to do the
2754	 * dispatch here so that when we add it later, we think there
2755	 * was an intermediate delete. */
2756	close(dfd);
2757	event_del(ev2);
2758	ev1_got = ev2_got = 0;
2759	event_base_loop(base, EVLOOP_ONCE);
2760	tt_want_int_op(ev1_got, ==, EV_READ);
2761	tt_int_op(ev2_got, ==, 0);
2762
2763	/* Re-duplicate the fd.  We need to get the same duplicated
2764	 * value that we closed to provoke the epoll quirk.  Also, we
2765	 * need to change the events to write, or else the old lingering
2766	 * read event will make the test pass whether the change was
2767	 * successful or not. */
2768	tt_int_op(dup2(fd, dfd), ==, dfd);
2769	event_free(ev2);
2770	ev2 = event_new(base, dfd, EV_WRITE|EV_PERSIST, dfd_cb, &ev2_got);
2771	event_add(ev2, NULL);
2772	ev1_got = ev2_got = 0;
2773	event_base_loop(base, EVLOOP_ONCE);
2774	tt_want_int_op(ev1_got, ==, EV_READ);
2775	tt_int_op(ev2_got, ==, EV_WRITE);
2776
2777end:
2778	if (ev1)
2779		event_free(ev1);
2780	if (ev2)
2781		event_free(ev2);
2782	if (dfd >= 0)
2783		close(dfd);
2784}
2785#endif
2786
2787#ifdef EVENT__DISABLE_MM_REPLACEMENT
2788static void
2789test_mm_functions(void *arg)
2790{
2791	tinytest_set_test_skipped_();
2792}
2793#else
2794static int
2795check_dummy_mem_ok(void *mem_)
2796{
2797	char *mem = mem_;
2798	mem -= 16;
2799	return !memcmp(mem, "{[<guardedram>]}", 16);
2800}
2801
2802static void *
2803dummy_malloc(size_t len)
2804{
2805	char *mem = malloc(len+16);
2806	memcpy(mem, "{[<guardedram>]}", 16);
2807	return mem+16;
2808}
2809
2810static void *
2811dummy_realloc(void *mem_, size_t len)
2812{
2813	char *mem = mem_;
2814	if (!mem)
2815		return dummy_malloc(len);
2816	tt_want(check_dummy_mem_ok(mem_));
2817	mem -= 16;
2818	mem = realloc(mem, len+16);
2819	return mem+16;
2820}
2821
2822static void
2823dummy_free(void *mem_)
2824{
2825	char *mem = mem_;
2826	tt_want(check_dummy_mem_ok(mem_));
2827	mem -= 16;
2828	free(mem);
2829}
2830
2831static void
2832test_mm_functions(void *arg)
2833{
2834	struct event_base *b = NULL;
2835	struct event_config *cfg = NULL;
2836	event_set_mem_functions(dummy_malloc, dummy_realloc, dummy_free);
2837	cfg = event_config_new();
2838	event_config_avoid_method(cfg, "Nonesuch");
2839	b = event_base_new_with_config(cfg);
2840	tt_assert(b);
2841	tt_assert(check_dummy_mem_ok(b));
2842end:
2843	if (cfg)
2844		event_config_free(cfg);
2845	if (b)
2846		event_base_free(b);
2847}
2848#endif
2849
2850static void
2851many_event_cb(evutil_socket_t fd, short event, void *arg)
2852{
2853	int *calledp = arg;
2854	*calledp += 1;
2855}
2856
2857static void
2858test_many_events(void *arg)
2859{
2860	/* Try 70 events that should all be ready at once.  This will
2861	 * exercise the "resize" code on most of the backends, and will make
2862	 * sure that we can get past the 64-handle limit of some windows
2863	 * functions. */
2864#define MANY 70
2865
2866	struct basic_test_data *data = arg;
2867	struct event_base *base = data->base;
2868	int one_at_a_time = data->setup_data != NULL;
2869	evutil_socket_t sock[MANY];
2870	struct event *ev[MANY];
2871	int called[MANY];
2872	int i;
2873	int loopflags = EVLOOP_NONBLOCK, evflags=0;
2874	if (one_at_a_time) {
2875		loopflags |= EVLOOP_ONCE;
2876		evflags = EV_PERSIST;
2877	}
2878
2879	memset(sock, 0xff, sizeof(sock));
2880	memset(ev, 0, sizeof(ev));
2881	memset(called, 0, sizeof(called));
2882
2883	for (i = 0; i < MANY; ++i) {
2884		/* We need an event that will hit the backend, and that will
2885		 * be ready immediately.  "Send a datagram" is an easy
2886		 * instance of that. */
2887		sock[i] = socket(AF_INET, SOCK_DGRAM, 0);
2888		tt_assert(sock[i] >= 0);
2889		called[i] = 0;
2890		ev[i] = event_new(base, sock[i], EV_WRITE|evflags,
2891		    many_event_cb, &called[i]);
2892		event_add(ev[i], NULL);
2893		if (one_at_a_time)
2894			event_base_loop(base, EVLOOP_NONBLOCK|EVLOOP_ONCE);
2895	}
2896
2897	event_base_loop(base, loopflags);
2898
2899	for (i = 0; i < MANY; ++i) {
2900		if (one_at_a_time)
2901			tt_int_op(called[i], ==, MANY - i + 1);
2902		else
2903			tt_int_op(called[i], ==, 1);
2904	}
2905
2906end:
2907	for (i = 0; i < MANY; ++i) {
2908		if (ev[i])
2909			event_free(ev[i]);
2910		if (sock[i] >= 0)
2911			evutil_closesocket(sock[i]);
2912	}
2913#undef MANY
2914}
2915
2916static void
2917test_struct_event_size(void *arg)
2918{
2919	tt_int_op(event_get_struct_event_size(), <=, sizeof(struct event));
2920end:
2921	;
2922}
2923
2924static void
2925test_get_assignment(void *arg)
2926{
2927	struct basic_test_data *data = arg;
2928	struct event_base *base = data->base;
2929	struct event *ev1 = NULL;
2930	const char *str = "foo";
2931
2932	struct event_base *b;
2933	evutil_socket_t s;
2934	short what;
2935	event_callback_fn cb;
2936	void *cb_arg;
2937
2938	ev1 = event_new(base, data->pair[1], EV_READ, dummy_read_cb, (void*)str);
2939	event_get_assignment(ev1, &b, &s, &what, &cb, &cb_arg);
2940
2941	tt_ptr_op(b, ==, base);
2942	tt_int_op(s, ==, data->pair[1]);
2943	tt_int_op(what, ==, EV_READ);
2944	tt_ptr_op(cb, ==, dummy_read_cb);
2945	tt_ptr_op(cb_arg, ==, str);
2946
2947	/* Now make sure this doesn't crash. */
2948	event_get_assignment(ev1, NULL, NULL, NULL, NULL, NULL);
2949
2950end:
2951	if (ev1)
2952		event_free(ev1);
2953}
2954
2955struct foreach_helper {
2956	int count;
2957	const struct event *ev;
2958};
2959
2960static int
2961foreach_count_cb(const struct event_base *base, const struct event *ev, void *arg)
2962{
2963	struct foreach_helper *h = event_get_callback_arg(ev);
2964	struct timeval *tv = arg;
2965	if (event_get_callback(ev) != timeout_cb)
2966		return 0;
2967	tt_ptr_op(event_get_base(ev), ==, base);
2968	tt_int_op(tv->tv_sec, ==, 10);
2969	h->ev = ev;
2970	h->count++;
2971	return 0;
2972end:
2973	return -1;
2974}
2975
2976static int
2977foreach_find_cb(const struct event_base *base, const struct event *ev, void *arg)
2978{
2979	const struct event **ev_out = arg;
2980	struct foreach_helper *h = event_get_callback_arg(ev);
2981	if (event_get_callback(ev) != timeout_cb)
2982		return 0;
2983	if (h->count == 99) {
2984		*ev_out = ev;
2985		return 101;
2986	}
2987	return 0;
2988}
2989
2990static void
2991test_event_foreach(void *arg)
2992{
2993	struct basic_test_data *data = arg;
2994	struct event_base *base = data->base;
2995	struct event *ev[5];
2996	struct foreach_helper visited[5];
2997	int i;
2998	struct timeval ten_sec = {10,0};
2999	const struct event *ev_found = NULL;
3000
3001	for (i = 0; i < 5; ++i) {
3002		visited[i].count = 0;
3003		visited[i].ev = NULL;
3004		ev[i] = event_new(base, -1, 0, timeout_cb, &visited[i]);
3005	}
3006
3007	tt_int_op(-1, ==, event_base_foreach_event(NULL, foreach_count_cb, NULL));
3008	tt_int_op(-1, ==, event_base_foreach_event(base, NULL, NULL));
3009
3010	event_add(ev[0], &ten_sec);
3011	event_add(ev[1], &ten_sec);
3012	event_active(ev[1], EV_TIMEOUT, 1);
3013	event_active(ev[2], EV_TIMEOUT, 1);
3014	event_add(ev[3], &ten_sec);
3015	/* Don't touch ev[4]. */
3016
3017	tt_int_op(0, ==, event_base_foreach_event(base, foreach_count_cb,
3018		&ten_sec));
3019	tt_int_op(1, ==, visited[0].count);
3020	tt_int_op(1, ==, visited[1].count);
3021	tt_int_op(1, ==, visited[2].count);
3022	tt_int_op(1, ==, visited[3].count);
3023	tt_ptr_op(ev[0], ==, visited[0].ev);
3024	tt_ptr_op(ev[1], ==, visited[1].ev);
3025	tt_ptr_op(ev[2], ==, visited[2].ev);
3026	tt_ptr_op(ev[3], ==, visited[3].ev);
3027
3028	visited[2].count = 99;
3029	tt_int_op(101, ==, event_base_foreach_event(base, foreach_find_cb,
3030		&ev_found));
3031	tt_ptr_op(ev_found, ==, ev[2]);
3032
3033end:
3034	for (i=0; i<5; ++i) {
3035		event_free(ev[i]);
3036	}
3037}
3038
3039static struct event_base *cached_time_base = NULL;
3040static int cached_time_reset = 0;
3041static int cached_time_sleep = 0;
3042static void
3043cache_time_cb(evutil_socket_t fd, short what, void *arg)
3044{
3045	struct timeval *tv = arg;
3046	tt_int_op(0, ==, event_base_gettimeofday_cached(cached_time_base, tv));
3047	if (cached_time_sleep) {
3048		struct timeval delay = { 0, 30*1000 };
3049		evutil_usleep_(&delay);
3050	}
3051	if (cached_time_reset) {
3052		event_base_update_cache_time(cached_time_base);
3053	}
3054end:
3055	;
3056}
3057
3058static void
3059test_gettimeofday_cached(void *arg)
3060{
3061	struct basic_test_data *data = arg;
3062	struct event_config *cfg = NULL;
3063	struct event_base *base = NULL;
3064	struct timeval tv1, tv2, tv3, now;
3065	struct event *ev1=NULL, *ev2=NULL, *ev3=NULL;
3066	int cached_time_disable = strstr(data->setup_data, "disable") != NULL;
3067
3068	cfg = event_config_new();
3069	if (cached_time_disable) {
3070		event_config_set_flag(cfg, EVENT_BASE_FLAG_NO_CACHE_TIME);
3071	}
3072	cached_time_base = base = event_base_new_with_config(cfg);
3073	tt_assert(base);
3074
3075	/* Try gettimeofday_cached outside of an event loop. */
3076	evutil_gettimeofday(&now, NULL);
3077	tt_int_op(0, ==, event_base_gettimeofday_cached(NULL, &tv1));
3078	tt_int_op(0, ==, event_base_gettimeofday_cached(base, &tv2));
3079	tt_int_op(timeval_msec_diff(&tv1, &tv2), <, 10);
3080	tt_int_op(timeval_msec_diff(&tv1, &now), <, 10);
3081
3082	cached_time_reset = strstr(data->setup_data, "reset") != NULL;
3083	cached_time_sleep = strstr(data->setup_data, "sleep") != NULL;
3084
3085	ev1 = event_new(base, -1, 0, cache_time_cb, &tv1);
3086	ev2 = event_new(base, -1, 0, cache_time_cb, &tv2);
3087	ev3 = event_new(base, -1, 0, cache_time_cb, &tv3);
3088
3089	event_active(ev1, EV_TIMEOUT, 1);
3090	event_active(ev2, EV_TIMEOUT, 1);
3091	event_active(ev3, EV_TIMEOUT, 1);
3092
3093	event_base_dispatch(base);
3094
3095	if (cached_time_reset && cached_time_sleep) {
3096		tt_int_op(labs(timeval_msec_diff(&tv1,&tv2)), >, 10);
3097		tt_int_op(labs(timeval_msec_diff(&tv2,&tv3)), >, 10);
3098	} else if (cached_time_disable && cached_time_sleep) {
3099		tt_int_op(labs(timeval_msec_diff(&tv1,&tv2)), >, 10);
3100		tt_int_op(labs(timeval_msec_diff(&tv2,&tv3)), >, 10);
3101	} else if (! cached_time_disable) {
3102		tt_assert(evutil_timercmp(&tv1, &tv2, ==));
3103		tt_assert(evutil_timercmp(&tv2, &tv3, ==));
3104	}
3105
3106end:
3107	if (ev1)
3108		event_free(ev1);
3109	if (ev2)
3110		event_free(ev2);
3111	if (ev3)
3112		event_free(ev3);
3113	if (base)
3114		event_base_free(base);
3115	if (cfg)
3116		event_config_free(cfg);
3117}
3118
3119static void
3120tabf_cb(evutil_socket_t fd, short what, void *arg)
3121{
3122	int *ptr = arg;
3123	*ptr = what;
3124	*ptr += 0x10000;
3125}
3126
3127static void
3128test_active_by_fd(void *arg)
3129{
3130	struct basic_test_data *data = arg;
3131	struct event_base *base = data->base;
3132	struct event *ev1 = NULL, *ev2 = NULL, *ev3 = NULL, *ev4 = NULL;
3133	int e1,e2,e3,e4;
3134#ifndef _WIN32
3135	struct event *evsig = NULL;
3136	int es;
3137#endif
3138	struct timeval tenmin = { 600, 0 };
3139
3140	/* Ensure no crash on nonexistent FD. */
3141	event_base_active_by_fd(base, 1000, EV_READ);
3142
3143	/* Ensure no crash on bogus FD. */
3144	event_base_active_by_fd(base, -1, EV_READ);
3145
3146	/* Ensure no crash on nonexistent/bogus signal. */
3147	event_base_active_by_signal(base, 1000);
3148	event_base_active_by_signal(base, -1);
3149
3150	event_base_assert_ok_(base);
3151
3152	e1 = e2 = e3 = e4 = 0;
3153	ev1 = event_new(base, data->pair[0], EV_READ, tabf_cb, &e1);
3154	ev2 = event_new(base, data->pair[0], EV_WRITE, tabf_cb, &e2);
3155	ev3 = event_new(base, data->pair[1], EV_READ, tabf_cb, &e3);
3156	ev4 = event_new(base, data->pair[1], EV_READ, tabf_cb, &e4);
3157	tt_assert(ev1);
3158	tt_assert(ev2);
3159	tt_assert(ev3);
3160	tt_assert(ev4);
3161#ifndef _WIN32
3162	evsig = event_new(base, SIGHUP, EV_SIGNAL, tabf_cb, &es);
3163	tt_assert(evsig);
3164	event_add(evsig, &tenmin);
3165#endif
3166
3167	event_add(ev1, &tenmin);
3168	event_add(ev2, NULL);
3169	event_add(ev3, NULL);
3170	event_add(ev4, &tenmin);
3171
3172
3173	event_base_assert_ok_(base);
3174
3175	/* Trigger 2, 3, 4 */
3176	event_base_active_by_fd(base, data->pair[0], EV_WRITE);
3177	event_base_active_by_fd(base, data->pair[1], EV_READ);
3178#ifndef _WIN32
3179	event_base_active_by_signal(base, SIGHUP);
3180#endif
3181
3182	event_base_assert_ok_(base);
3183
3184	event_base_loop(base, EVLOOP_ONCE);
3185
3186	tt_int_op(e1, ==, 0);
3187	tt_int_op(e2, ==, EV_WRITE | 0x10000);
3188	tt_int_op(e3, ==, EV_READ | 0x10000);
3189	/* Mask out EV_WRITE here, since it could be genuinely writeable. */
3190	tt_int_op((e4 & ~EV_WRITE), ==, EV_READ | 0x10000);
3191#ifndef _WIN32
3192	tt_int_op(es, ==, EV_SIGNAL | 0x10000);
3193#endif
3194
3195end:
3196	if (ev1)
3197		event_free(ev1);
3198	if (ev2)
3199		event_free(ev2);
3200	if (ev3)
3201		event_free(ev3);
3202	if (ev4)
3203		event_free(ev4);
3204#ifndef _WIN32
3205	if (evsig)
3206		event_free(evsig);
3207#endif
3208}
3209
3210struct testcase_t main_testcases[] = {
3211	/* Some converted-over tests */
3212	{ "methods", test_methods, TT_FORK, NULL, NULL },
3213	{ "version", test_version, 0, NULL, NULL },
3214	BASIC(base_features, TT_FORK|TT_NO_LOGS),
3215	{ "base_environ", test_base_environ, TT_FORK, NULL, NULL },
3216
3217	BASIC(event_base_new, TT_FORK|TT_NEED_SOCKETPAIR),
3218	BASIC(free_active_base, TT_FORK|TT_NEED_SOCKETPAIR),
3219
3220	BASIC(manipulate_active_events, TT_FORK|TT_NEED_BASE),
3221	BASIC(event_new_selfarg, TT_FORK|TT_NEED_BASE),
3222	BASIC(event_assign_selfarg, TT_FORK|TT_NEED_BASE),
3223	BASIC(event_base_get_num_events, TT_FORK|TT_NEED_BASE),
3224	BASIC(event_base_get_max_events, TT_FORK|TT_NEED_BASE),
3225
3226	BASIC(bad_assign, TT_FORK|TT_NEED_BASE|TT_NO_LOGS),
3227	BASIC(bad_reentrant, TT_FORK|TT_NEED_BASE|TT_NO_LOGS),
3228	BASIC(active_later, TT_FORK|TT_NEED_BASE|TT_NEED_SOCKETPAIR),
3229	BASIC(event_remove_timeout, TT_FORK|TT_NEED_BASE|TT_NEED_SOCKETPAIR),
3230
3231	/* These are still using the old API */
3232	LEGACY(persistent_timeout, TT_FORK|TT_NEED_BASE),
3233	{ "persistent_timeout_jump", test_persistent_timeout_jump, TT_FORK|TT_NEED_BASE, &basic_setup, NULL },
3234	{ "persistent_active_timeout", test_persistent_active_timeout,
3235	  TT_FORK|TT_NEED_BASE, &basic_setup, NULL },
3236	LEGACY(priorities, TT_FORK|TT_NEED_BASE),
3237	BASIC(priority_active_inversion, TT_FORK|TT_NEED_BASE),
3238	{ "common_timeout", test_common_timeout, TT_FORK|TT_NEED_BASE,
3239	  &basic_setup, NULL },
3240
3241	/* These legacy tests may not all need all of these flags. */
3242	LEGACY(simpleread, TT_ISOLATED),
3243	LEGACY(simpleread_multiple, TT_ISOLATED),
3244	LEGACY(simplewrite, TT_ISOLATED),
3245	{ "simpleclose", test_simpleclose, TT_FORK, &basic_setup,
3246	  NULL },
3247	LEGACY(multiple, TT_ISOLATED),
3248	LEGACY(persistent, TT_ISOLATED),
3249	LEGACY(combined, TT_ISOLATED),
3250	LEGACY(simpletimeout, TT_ISOLATED),
3251	LEGACY(loopbreak, TT_ISOLATED),
3252	LEGACY(loopexit, TT_ISOLATED),
3253	LEGACY(loopexit_multiple, TT_ISOLATED),
3254	LEGACY(nonpersist_readd, TT_ISOLATED),
3255	LEGACY(multiple_events_for_same_fd, TT_ISOLATED),
3256	LEGACY(want_only_once, TT_ISOLATED),
3257	{ "event_once", test_event_once, TT_ISOLATED, &basic_setup, NULL },
3258	{ "event_once_never", test_event_once_never, TT_ISOLATED, &basic_setup, NULL },
3259	{ "event_pending", test_event_pending, TT_ISOLATED, &basic_setup,
3260	  NULL },
3261#ifndef _WIN32
3262	{ "dup_fd", test_dup_fd, TT_ISOLATED, &basic_setup, NULL },
3263#endif
3264	{ "mm_functions", test_mm_functions, TT_FORK, NULL, NULL },
3265	{ "many_events", test_many_events, TT_ISOLATED, &basic_setup, NULL },
3266	{ "many_events_slow_add", test_many_events, TT_ISOLATED, &basic_setup, (void*)1 },
3267
3268	{ "struct_event_size", test_struct_event_size, 0, NULL, NULL },
3269	BASIC(get_assignment, TT_FORK|TT_NEED_BASE|TT_NEED_SOCKETPAIR),
3270
3271	BASIC(event_foreach, TT_FORK|TT_NEED_BASE),
3272	{ "gettimeofday_cached", test_gettimeofday_cached, TT_FORK, &basic_setup, (void*)"" },
3273	{ "gettimeofday_cached_sleep", test_gettimeofday_cached, TT_FORK, &basic_setup, (void*)"sleep" },
3274	{ "gettimeofday_cached_reset", test_gettimeofday_cached, TT_FORK, &basic_setup, (void*)"sleep reset" },
3275	{ "gettimeofday_cached_disabled", test_gettimeofday_cached, TT_FORK, &basic_setup, (void*)"sleep disable" },
3276	{ "gettimeofday_cached_disabled_nosleep", test_gettimeofday_cached, TT_FORK, &basic_setup, (void*)"disable" },
3277
3278	BASIC(active_by_fd, TT_FORK|TT_NEED_BASE|TT_NEED_SOCKETPAIR),
3279
3280#ifndef _WIN32
3281	LEGACY(fork, TT_ISOLATED),
3282#endif
3283	END_OF_TESTCASES
3284};
3285
3286struct testcase_t evtag_testcases[] = {
3287	{ "int", evtag_int_test, TT_FORK, NULL, NULL },
3288	{ "fuzz", evtag_fuzz, TT_FORK, NULL, NULL },
3289	{ "encoding", evtag_tag_encoding, TT_FORK, NULL, NULL },
3290	{ "peek", evtag_test_peek, 0, NULL, NULL },
3291
3292	END_OF_TESTCASES
3293};
3294
3295struct testcase_t signal_testcases[] = {
3296#ifndef _WIN32
3297	LEGACY(simplesignal, TT_ISOLATED),
3298	LEGACY(multiplesignal, TT_ISOLATED),
3299	LEGACY(immediatesignal, TT_ISOLATED),
3300	LEGACY(signal_dealloc, TT_ISOLATED),
3301	LEGACY(signal_pipeloss, TT_ISOLATED),
3302	LEGACY(signal_switchbase, TT_ISOLATED|TT_NO_LOGS),
3303	LEGACY(signal_restore, TT_ISOLATED),
3304	LEGACY(signal_assert, TT_ISOLATED),
3305	LEGACY(signal_while_processing, TT_ISOLATED),
3306#endif
3307	END_OF_TESTCASES
3308};
3309
3310