1/*	$OpenBSD: event.c,v 1.42 2022/12/27 23:05:55 jmc Exp $	*/
2
3/*
4 * Copyright (c) 2000-2004 Niels Provos <provos@citi.umich.edu>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 * 3. The name of the author may not be used to endorse or promote products
16 *    derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30#include <sys/types.h>
31#include <sys/socket.h>
32#include <sys/time.h>
33#include <sys/queue.h>
34
35#include <stdio.h>
36#include <stdlib.h>
37#include <unistd.h>
38#include <errno.h>
39#include <signal.h>
40#include <string.h>
41#include <assert.h>
42#include <time.h>
43#include <netdb.h>
44#include <asr.h>
45
46#include "event.h"
47#include "event-internal.h"
48#include "log.h"
49
50extern const struct eventop selectops;
51extern const struct eventop pollops;
52extern const struct eventop kqops;
53
54/* In order of preference */
55static const struct eventop *eventops[] = {
56	&kqops,
57	&pollops,
58	&selectops,
59	NULL
60};
61
62/* Global state */
63struct event_base *current_base = NULL;
64extern struct event_base *evsignal_base;
65
66/* Handle signals - This is a deprecated interface */
67int (*event_sigcb)(void);		/* Signal callback when gotsig is set */
68volatile sig_atomic_t event_gotsig;	/* Set in signal handler */
69
70/* Prototypes */
71static void	event_queue_insert(struct event_base *, struct event *, int);
72static void	event_queue_remove(struct event_base *, struct event *, int);
73static int	event_haveevents(struct event_base *);
74
75static void	event_process_active(struct event_base *);
76
77static int	timeout_next(struct event_base *, struct timeval **);
78static void	timeout_process(struct event_base *);
79
80static void
81gettime(struct event_base *base, struct timeval *tp)
82{
83	struct timespec	ts;
84
85	if (base->tv_cache.tv_sec) {
86		*tp = base->tv_cache;
87		return;
88	}
89
90	if (clock_gettime(CLOCK_MONOTONIC, &ts) == -1)
91		event_err(1, "%s: clock_gettime", __func__);
92
93	TIMESPEC_TO_TIMEVAL(tp, &ts);
94}
95
96struct event_base *
97event_init(void)
98{
99	struct event_base *base = event_base_new();
100
101	if (base != NULL)
102		current_base = base;
103
104	return (base);
105}
106
107struct event_base *
108event_base_new(void)
109{
110	int i;
111	struct event_base *base;
112
113	if ((base = calloc(1, sizeof(struct event_base))) == NULL)
114		event_err(1, "%s: calloc", __func__);
115
116	event_sigcb = NULL;
117	event_gotsig = 0;
118
119	gettime(base, &base->event_tv);
120
121	min_heap_ctor(&base->timeheap);
122	TAILQ_INIT(&base->eventqueue);
123	base->sig.ev_signal_pair[0] = -1;
124	base->sig.ev_signal_pair[1] = -1;
125
126	base->evbase = NULL;
127	for (i = 0; eventops[i] && !base->evbase; i++) {
128		base->evsel = eventops[i];
129
130		base->evbase = base->evsel->init(base);
131	}
132
133	if (base->evbase == NULL)
134		event_errx(1, "%s: no event mechanism available", __func__);
135
136	if (!issetugid() && getenv("EVENT_SHOW_METHOD"))
137		event_msgx("libevent using: %s", base->evsel->name);
138
139	/* allocate a single active event queue */
140	event_base_priority_init(base, 1);
141
142	return (base);
143}
144
145void
146event_base_free(struct event_base *base)
147{
148	int i;
149	size_t n_deleted=0;
150	struct event *ev;
151
152	if (base == NULL && current_base)
153		base = current_base;
154	if (base == current_base)
155		current_base = NULL;
156
157	/* XXX(niels) - check for internal events first */
158	assert(base);
159	/* Delete all non-internal events. */
160	for (ev = TAILQ_FIRST(&base->eventqueue); ev; ) {
161		struct event *next = TAILQ_NEXT(ev, ev_next);
162		if (!(ev->ev_flags & EVLIST_INTERNAL)) {
163			event_del(ev);
164			++n_deleted;
165		}
166		ev = next;
167	}
168	while ((ev = min_heap_top(&base->timeheap)) != NULL) {
169		event_del(ev);
170		++n_deleted;
171	}
172
173	for (i = 0; i < base->nactivequeues; ++i) {
174		for (ev = TAILQ_FIRST(base->activequeues[i]); ev; ) {
175			struct event *next = TAILQ_NEXT(ev, ev_active_next);
176			if (!(ev->ev_flags & EVLIST_INTERNAL)) {
177				event_del(ev);
178				++n_deleted;
179			}
180			ev = next;
181		}
182	}
183
184	if (n_deleted)
185		event_debug(("%s: %zu events were still set in base",
186			__func__, n_deleted));
187
188	if (base->evsel->dealloc != NULL)
189		base->evsel->dealloc(base, base->evbase);
190
191	for (i = 0; i < base->nactivequeues; ++i)
192		assert(TAILQ_EMPTY(base->activequeues[i]));
193
194	assert(min_heap_empty(&base->timeheap));
195	min_heap_dtor(&base->timeheap);
196
197	for (i = 0; i < base->nactivequeues; ++i)
198		free(base->activequeues[i]);
199	free(base->activequeues);
200
201	assert(TAILQ_EMPTY(&base->eventqueue));
202
203	free(base);
204}
205
206/* reinitialized the event base after a fork */
207int
208event_reinit(struct event_base *base)
209{
210	const struct eventop *evsel = base->evsel;
211	void *evbase = base->evbase;
212	int res = 0;
213	struct event *ev;
214
215#if 0
216	/* Right now, reinit always takes effect, since even if the
217	   backend doesn't require it, the signal socketpair code does.
218	*/
219	/* check if this event mechanism requires reinit */
220	if (!evsel->need_reinit)
221		return (0);
222#endif
223
224	/* prevent internal delete */
225	if (base->sig.ev_signal_added) {
226		/* we cannot call event_del here because the base has
227		 * not been reinitialized yet. */
228		event_queue_remove(base, &base->sig.ev_signal,
229		    EVLIST_INSERTED);
230		if (base->sig.ev_signal.ev_flags & EVLIST_ACTIVE)
231			event_queue_remove(base, &base->sig.ev_signal,
232			    EVLIST_ACTIVE);
233		base->sig.ev_signal_added = 0;
234	}
235
236	if (base->evsel->dealloc != NULL)
237		base->evsel->dealloc(base, base->evbase);
238	evbase = base->evbase = evsel->init(base);
239	if (base->evbase == NULL)
240		event_errx(1, "%s: could not reinitialize event mechanism",
241		    __func__);
242
243	TAILQ_FOREACH(ev, &base->eventqueue, ev_next) {
244		if (evsel->add(evbase, ev) == -1)
245			res = -1;
246	}
247
248	return (res);
249}
250
251int
252event_priority_init(int npriorities)
253{
254  return event_base_priority_init(current_base, npriorities);
255}
256
257int
258event_base_priority_init(struct event_base *base, int npriorities)
259{
260	int i;
261
262	if (base->event_count_active)
263		return (-1);
264
265	if (npriorities == base->nactivequeues)
266		return (0);
267
268	if (base->nactivequeues) {
269		for (i = 0; i < base->nactivequeues; ++i) {
270			free(base->activequeues[i]);
271		}
272		free(base->activequeues);
273	}
274
275	/* Allocate our priority queues */
276	base->nactivequeues = npriorities;
277	base->activequeues = (struct event_list **)
278	    calloc(base->nactivequeues, sizeof(struct event_list *));
279	if (base->activequeues == NULL)
280		event_err(1, "%s: calloc", __func__);
281
282	for (i = 0; i < base->nactivequeues; ++i) {
283		base->activequeues[i] = malloc(sizeof(struct event_list));
284		if (base->activequeues[i] == NULL)
285			event_err(1, "%s: malloc", __func__);
286		TAILQ_INIT(base->activequeues[i]);
287	}
288
289	return (0);
290}
291
292int
293event_haveevents(struct event_base *base)
294{
295	return (base->event_count > 0);
296}
297
298/*
299 * Active events are stored in priority queues.  Lower priorities are always
300 * process before higher priorities.  Low priority events can starve high
301 * priority ones.
302 */
303
304static void
305event_process_active(struct event_base *base)
306{
307	struct event *ev;
308	struct event_list *activeq = NULL;
309	int i;
310	short ncalls;
311
312	for (i = 0; i < base->nactivequeues; ++i) {
313		if (TAILQ_FIRST(base->activequeues[i]) != NULL) {
314			activeq = base->activequeues[i];
315			break;
316		}
317	}
318
319	assert(activeq != NULL);
320
321	for (ev = TAILQ_FIRST(activeq); ev; ev = TAILQ_FIRST(activeq)) {
322		if (ev->ev_events & EV_PERSIST)
323			event_queue_remove(base, ev, EVLIST_ACTIVE);
324		else
325			event_del(ev);
326
327		/* Allows deletes to work */
328		ncalls = ev->ev_ncalls;
329		ev->ev_pncalls = &ncalls;
330		while (ncalls) {
331			ncalls--;
332			ev->ev_ncalls = ncalls;
333			(*ev->ev_callback)((int)ev->ev_fd, ev->ev_res, ev->ev_arg);
334			if (event_gotsig || base->event_break)
335				return;
336		}
337	}
338}
339
340/*
341 * Wait continuously for events.  We exit only if no events are left.
342 */
343
344int
345event_dispatch(void)
346{
347	return (event_loop(0));
348}
349
350int
351event_base_dispatch(struct event_base *event_base)
352{
353  return (event_base_loop(event_base, 0));
354}
355
356const char *
357event_base_get_method(struct event_base *base)
358{
359	assert(base);
360	return (base->evsel->name);
361}
362
363static void
364event_loopexit_cb(int fd, short what, void *arg)
365{
366	struct event_base *base = arg;
367	base->event_gotterm = 1;
368}
369
370/* not thread safe */
371int
372event_loopexit(const struct timeval *tv)
373{
374	return (event_once(-1, EV_TIMEOUT, event_loopexit_cb,
375		    current_base, tv));
376}
377
378int
379event_base_loopexit(struct event_base *event_base, const struct timeval *tv)
380{
381	return (event_base_once(event_base, -1, EV_TIMEOUT, event_loopexit_cb,
382		    event_base, tv));
383}
384
385/* not thread safe */
386int
387event_loopbreak(void)
388{
389	return (event_base_loopbreak(current_base));
390}
391
392int
393event_base_loopbreak(struct event_base *event_base)
394{
395	if (event_base == NULL)
396		return (-1);
397
398	event_base->event_break = 1;
399	return (0);
400}
401
402
403
404/* not thread safe */
405
406int
407event_loop(int flags)
408{
409	return event_base_loop(current_base, flags);
410}
411
412int
413event_base_loop(struct event_base *base, int flags)
414{
415	const struct eventop *evsel = base->evsel;
416	void *evbase = base->evbase;
417	struct timeval tv;
418	struct timeval *tv_p;
419	int res, done;
420
421	/* clear time cache */
422	base->tv_cache.tv_sec = 0;
423
424	if (base->sig.ev_signal_added)
425		evsignal_base = base;
426	done = 0;
427	while (!done) {
428		/* Terminate the loop if we have been asked to */
429		if (base->event_gotterm) {
430			base->event_gotterm = 0;
431			break;
432		}
433
434		if (base->event_break) {
435			base->event_break = 0;
436			break;
437		}
438
439		/* You cannot use this interface for multi-threaded apps */
440		while (event_gotsig) {
441			event_gotsig = 0;
442			if (event_sigcb) {
443				res = (*event_sigcb)();
444				if (res == -1) {
445					errno = EINTR;
446					return (-1);
447				}
448			}
449		}
450
451		tv_p = &tv;
452		if (!base->event_count_active && !(flags & EVLOOP_NONBLOCK)) {
453			timeout_next(base, &tv_p);
454		} else {
455			/*
456			 * if we have active events, we just poll new events
457			 * without waiting.
458			 */
459			timerclear(&tv);
460		}
461
462		/* If we have no events, we just exit */
463		if (!event_haveevents(base)) {
464			event_debug(("%s: no events registered.", __func__));
465			return (1);
466		}
467
468		/* update last old time */
469		gettime(base, &base->event_tv);
470
471		/* clear time cache */
472		base->tv_cache.tv_sec = 0;
473
474		res = evsel->dispatch(base, evbase, tv_p);
475
476		if (res == -1)
477			return (-1);
478		gettime(base, &base->tv_cache);
479
480		timeout_process(base);
481
482		if (base->event_count_active) {
483			event_process_active(base);
484			if (!base->event_count_active && (flags & EVLOOP_ONCE))
485				done = 1;
486		} else if (flags & EVLOOP_NONBLOCK)
487			done = 1;
488	}
489
490	/* clear time cache */
491	base->tv_cache.tv_sec = 0;
492
493	event_debug(("%s: asked to terminate loop.", __func__));
494	return (0);
495}
496
497/* Sets up an event for processing once */
498
499struct event_once {
500	struct event ev;
501
502	void (*cb)(int, short, void *);
503	void *arg;
504};
505
506/* One-time callback, it deletes itself */
507
508static void
509event_once_cb(int fd, short events, void *arg)
510{
511	struct event_once *eonce = arg;
512
513	(*eonce->cb)(fd, events, eonce->arg);
514	free(eonce);
515}
516
517/* not threadsafe, event scheduled once. */
518int
519event_once(int fd, short events,
520    void (*callback)(int, short, void *), void *arg, const struct timeval *tv)
521{
522	return event_base_once(current_base, fd, events, callback, arg, tv);
523}
524
525/* Schedules an event once */
526int
527event_base_once(struct event_base *base, int fd, short events,
528    void (*callback)(int, short, void *), void *arg, const struct timeval *tv)
529{
530	struct event_once *eonce;
531	struct timeval etv;
532	int res;
533
534	/* We cannot support signals that just fire once */
535	if (events & EV_SIGNAL)
536		return (-1);
537
538	if ((eonce = calloc(1, sizeof(struct event_once))) == NULL)
539		return (-1);
540
541	eonce->cb = callback;
542	eonce->arg = arg;
543
544	if (events == EV_TIMEOUT) {
545		if (tv == NULL) {
546			timerclear(&etv);
547			tv = &etv;
548		}
549
550		evtimer_set(&eonce->ev, event_once_cb, eonce);
551	} else if (events & (EV_READ|EV_WRITE)) {
552		events &= EV_READ|EV_WRITE;
553
554		event_set(&eonce->ev, fd, events, event_once_cb, eonce);
555	} else {
556		/* Bad event combination */
557		free(eonce);
558		return (-1);
559	}
560
561	res = event_base_set(base, &eonce->ev);
562	if (res == 0)
563		res = event_add(&eonce->ev, tv);
564	if (res != 0) {
565		free(eonce);
566		return (res);
567	}
568
569	return (0);
570}
571
572void
573event_set(struct event *ev, int fd, short events,
574	  void (*callback)(int, short, void *), void *arg)
575{
576	/* Take the current base - caller needs to set the real base later */
577	ev->ev_base = current_base;
578
579	ev->ev_callback = callback;
580	ev->ev_arg = arg;
581	ev->ev_fd = fd;
582	ev->ev_events = events;
583	ev->ev_res = 0;
584	ev->ev_flags = EVLIST_INIT;
585	ev->ev_ncalls = 0;
586	ev->ev_pncalls = NULL;
587
588	min_heap_elem_init(ev);
589
590	/* by default, we put new events into the middle priority */
591	if(current_base)
592		ev->ev_pri = current_base->nactivequeues/2;
593}
594
595int
596event_base_set(struct event_base *base, struct event *ev)
597{
598	/* Only innocent events may be assigned to a different base */
599	if (ev->ev_flags != EVLIST_INIT)
600		return (-1);
601
602	ev->ev_base = base;
603	ev->ev_pri = base->nactivequeues/2;
604
605	return (0);
606}
607
608/*
609 * Set's the priority of an event - if an event is already scheduled
610 * changing the priority is going to fail.
611 */
612
613int
614event_priority_set(struct event *ev, int pri)
615{
616	if (ev->ev_flags & EVLIST_ACTIVE)
617		return (-1);
618	if (pri < 0 || pri >= ev->ev_base->nactivequeues)
619		return (-1);
620
621	ev->ev_pri = pri;
622
623	return (0);
624}
625
626/*
627 * Checks if a specific event is pending or scheduled.
628 */
629
630int
631event_pending(struct event *ev, short event, struct timeval *tv)
632{
633	struct timeval	now, res;
634	int flags = 0;
635
636	if (ev->ev_flags & EVLIST_INSERTED)
637		flags |= (ev->ev_events & (EV_READ|EV_WRITE|EV_SIGNAL));
638	if (ev->ev_flags & EVLIST_ACTIVE)
639		flags |= ev->ev_res;
640	if (ev->ev_flags & EVLIST_TIMEOUT)
641		flags |= EV_TIMEOUT;
642
643	event &= (EV_TIMEOUT|EV_READ|EV_WRITE|EV_SIGNAL);
644
645	/* See if there is a timeout that we should report */
646	if (tv != NULL && (flags & event & EV_TIMEOUT)) {
647		gettime(ev->ev_base, &now);
648		timersub(&ev->ev_timeout, &now, &res);
649		/* correctly remap to real time */
650		gettimeofday(&now, NULL);
651		timeradd(&now, &res, tv);
652	}
653
654	return (flags & event);
655}
656
657int
658event_add(struct event *ev, const struct timeval *tv)
659{
660	struct event_base *base = ev->ev_base;
661	const struct eventop *evsel = base->evsel;
662	void *evbase = base->evbase;
663	int res = 0;
664
665	event_debug((
666		 "event_add: event: %p, %s%s%scall %p",
667		 ev,
668		 ev->ev_events & EV_READ ? "EV_READ " : " ",
669		 ev->ev_events & EV_WRITE ? "EV_WRITE " : " ",
670		 tv ? "EV_TIMEOUT " : " ",
671		 ev->ev_callback));
672
673	assert(!(ev->ev_flags & ~EVLIST_ALL));
674
675	/*
676	 * prepare for timeout insertion further below, if we get a
677	 * failure on any step, we should not change any state.
678	 */
679	if (tv != NULL && !(ev->ev_flags & EVLIST_TIMEOUT)) {
680		if (min_heap_reserve(&base->timeheap,
681			1 + min_heap_size(&base->timeheap)) == -1)
682			return (-1);  /* ENOMEM == errno */
683	}
684
685	if ((ev->ev_events & (EV_READ|EV_WRITE|EV_SIGNAL)) &&
686	    !(ev->ev_flags & (EVLIST_INSERTED|EVLIST_ACTIVE))) {
687		res = evsel->add(evbase, ev);
688		if (res != -1)
689			event_queue_insert(base, ev, EVLIST_INSERTED);
690	}
691
692	/*
693	 * we should change the timeout state only if the previous event
694	 * addition succeeded.
695	 */
696	if (res != -1 && tv != NULL) {
697		struct timeval now;
698
699		/*
700		 * we already reserved memory above for the case where we
701		 * are not replacing an existing timeout.
702		 */
703		if (ev->ev_flags & EVLIST_TIMEOUT)
704			event_queue_remove(base, ev, EVLIST_TIMEOUT);
705
706		/* Check if it is active due to a timeout.  Rescheduling
707		 * this timeout before the callback can be executed
708		 * removes it from the active list. */
709		if ((ev->ev_flags & EVLIST_ACTIVE) &&
710		    (ev->ev_res & EV_TIMEOUT)) {
711			/* See if we are just active executing this
712			 * event in a loop
713			 */
714			if (ev->ev_ncalls && ev->ev_pncalls) {
715				/* Abort loop */
716				*ev->ev_pncalls = 0;
717			}
718
719			event_queue_remove(base, ev, EVLIST_ACTIVE);
720		}
721
722		gettime(base, &now);
723		timeradd(&now, tv, &ev->ev_timeout);
724
725		event_debug((
726			 "event_add: timeout in %lld seconds, call %p",
727			 (long long)tv->tv_sec, ev->ev_callback));
728
729		event_queue_insert(base, ev, EVLIST_TIMEOUT);
730	}
731
732	return (res);
733}
734
735int
736event_del(struct event *ev)
737{
738	struct event_base *base;
739	const struct eventop *evsel;
740	void *evbase;
741
742	event_debug(("event_del: %p, callback %p",
743		 ev, ev->ev_callback));
744
745	/* An event without a base has not been added */
746	if (ev->ev_base == NULL)
747		return (-1);
748
749	base = ev->ev_base;
750	evsel = base->evsel;
751	evbase = base->evbase;
752
753	assert(!(ev->ev_flags & ~EVLIST_ALL));
754
755	/* See if we are just active executing this event in a loop */
756	if (ev->ev_ncalls && ev->ev_pncalls) {
757		/* Abort loop */
758		*ev->ev_pncalls = 0;
759	}
760
761	if (ev->ev_flags & EVLIST_TIMEOUT)
762		event_queue_remove(base, ev, EVLIST_TIMEOUT);
763
764	if (ev->ev_flags & EVLIST_ACTIVE)
765		event_queue_remove(base, ev, EVLIST_ACTIVE);
766
767	if (ev->ev_flags & EVLIST_INSERTED) {
768		event_queue_remove(base, ev, EVLIST_INSERTED);
769		return (evsel->del(evbase, ev));
770	}
771
772	return (0);
773}
774
775void
776event_active(struct event *ev, int res, short ncalls)
777{
778	/* We get different kinds of events, add them together */
779	if (ev->ev_flags & EVLIST_ACTIVE) {
780		ev->ev_res |= res;
781		return;
782	}
783
784	ev->ev_res = res;
785	ev->ev_ncalls = ncalls;
786	ev->ev_pncalls = NULL;
787	event_queue_insert(ev->ev_base, ev, EVLIST_ACTIVE);
788}
789
790static int
791timeout_next(struct event_base *base, struct timeval **tv_p)
792{
793	struct timeval now;
794	struct event *ev;
795	struct timeval *tv = *tv_p;
796
797	if ((ev = min_heap_top(&base->timeheap)) == NULL) {
798		/* if no time-based events are active wait for I/O */
799		*tv_p = NULL;
800		return (0);
801	}
802
803	gettime(base, &now);
804
805	if (timercmp(&ev->ev_timeout, &now, <=)) {
806		timerclear(tv);
807		return (0);
808	}
809
810	timersub(&ev->ev_timeout, &now, tv);
811
812	assert(tv->tv_sec >= 0);
813	assert(tv->tv_usec >= 0);
814
815	event_debug(("timeout_next: in %lld seconds", (long long)tv->tv_sec));
816	return (0);
817}
818
819void
820timeout_process(struct event_base *base)
821{
822	struct timeval now;
823	struct event *ev;
824
825	if (min_heap_empty(&base->timeheap))
826		return;
827
828	gettime(base, &now);
829
830	while ((ev = min_heap_top(&base->timeheap))) {
831		if (timercmp(&ev->ev_timeout, &now, >))
832			break;
833
834		/* delete this event from the I/O queues */
835		event_del(ev);
836
837		event_debug(("timeout_process: call %p",
838			 ev->ev_callback));
839		event_active(ev, EV_TIMEOUT, 1);
840	}
841}
842
843void
844event_queue_remove(struct event_base *base, struct event *ev, int queue)
845{
846	if (!(ev->ev_flags & queue))
847		event_errx(1, "%s: %p(fd %d) not on queue %x", __func__,
848			   ev, ev->ev_fd, queue);
849
850	if (~ev->ev_flags & EVLIST_INTERNAL)
851		base->event_count--;
852
853	ev->ev_flags &= ~queue;
854	switch (queue) {
855	case EVLIST_INSERTED:
856		TAILQ_REMOVE(&base->eventqueue, ev, ev_next);
857		break;
858	case EVLIST_ACTIVE:
859		base->event_count_active--;
860		TAILQ_REMOVE(base->activequeues[ev->ev_pri],
861		    ev, ev_active_next);
862		break;
863	case EVLIST_TIMEOUT:
864		min_heap_erase(&base->timeheap, ev);
865		break;
866	default:
867		event_errx(1, "%s: unknown queue %x", __func__, queue);
868	}
869}
870
871void
872event_queue_insert(struct event_base *base, struct event *ev, int queue)
873{
874	if (ev->ev_flags & queue) {
875		/* Double insertion is possible for active events */
876		if (queue & EVLIST_ACTIVE)
877			return;
878
879		event_errx(1, "%s: %p(fd %d) already on queue %x", __func__,
880			   ev, ev->ev_fd, queue);
881	}
882
883	if (~ev->ev_flags & EVLIST_INTERNAL)
884		base->event_count++;
885
886	ev->ev_flags |= queue;
887	switch (queue) {
888	case EVLIST_INSERTED:
889		TAILQ_INSERT_TAIL(&base->eventqueue, ev, ev_next);
890		break;
891	case EVLIST_ACTIVE:
892		base->event_count_active++;
893		TAILQ_INSERT_TAIL(base->activequeues[ev->ev_pri],
894		    ev,ev_active_next);
895		break;
896	case EVLIST_TIMEOUT: {
897		min_heap_push(&base->timeheap, ev);
898		break;
899	}
900	default:
901		event_errx(1, "%s: unknown queue %x", __func__, queue);
902	}
903}
904
905/* Functions for debugging */
906
907const char *
908event_get_version(void)
909{
910	return (_EVENT_VERSION);
911}
912
913/*
914 * No thread-safe interface needed - the information should be the same
915 * for all threads.
916 */
917
918const char *
919event_get_method(void)
920{
921	return (current_base->evsel->name);
922}
923
924
925/*
926 * Libevent glue for ASR.
927 */
928struct event_asr {
929	struct event	 ev;
930	struct asr_query *async;
931	void		(*cb)(struct asr_result *, void *);
932	void		*arg;
933};
934
935static void
936event_asr_dispatch(int fd __attribute__((__unused__)),
937    short ev __attribute__((__unused__)), void *arg)
938{
939	struct event_asr	*eva = arg;
940	struct asr_result	 ar;
941	struct timeval		 tv;
942
943	event_del(&eva->ev);
944
945	if (asr_run(eva->async, &ar)) {
946		eva->cb(&ar, eva->arg);
947		free(eva);
948	} else {
949		event_set(&eva->ev, ar.ar_fd,
950		    ar.ar_cond == ASR_WANT_READ ? EV_READ : EV_WRITE,
951		    event_asr_dispatch, eva);
952		tv.tv_sec = ar.ar_timeout / 1000;
953		tv.tv_usec = (ar.ar_timeout % 1000) * 1000;
954		event_add(&eva->ev, &tv);
955	}
956}
957
958struct event_asr *
959event_asr_run(struct asr_query *async, void (*cb)(struct asr_result *, void *),
960    void *arg)
961{
962	struct event_asr *eva;
963	struct timeval tv;
964
965	eva = calloc(1, sizeof *eva);
966	if (eva == NULL)
967		return (NULL);
968	eva->async = async;
969	eva->cb = cb;
970	eva->arg = arg;
971	tv.tv_sec = 0;
972	tv.tv_usec = 0;
973	evtimer_set(&eva->ev, event_asr_dispatch, eva);
974	evtimer_add(&eva->ev, &tv);
975	return (eva);
976}
977
978void
979event_asr_abort(struct event_asr *eva)
980{
981	asr_abort(eva->async);
982	event_del(&eva->ev);
983	free(eva);
984}
985