1275970Scy/*
2275970Scy * Copyright (c) 2000-2007 Niels Provos <provos@citi.umich.edu>
3275970Scy * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
4275970Scy *
5275970Scy * Redistribution and use in source and binary forms, with or without
6275970Scy * modification, are permitted provided that the following conditions
7275970Scy * are met:
8275970Scy * 1. Redistributions of source code must retain the above copyright
9275970Scy *    notice, this list of conditions and the following disclaimer.
10275970Scy * 2. Redistributions in binary form must reproduce the above copyright
11275970Scy *    notice, this list of conditions and the following disclaimer in the
12275970Scy *    documentation and/or other materials provided with the distribution.
13275970Scy * 3. The name of the author may not be used to endorse or promote products
14275970Scy *    derived from this software without specific prior written permission.
15275970Scy *
16275970Scy * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17275970Scy * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18275970Scy * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19275970Scy * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20275970Scy * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21275970Scy * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22275970Scy * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23275970Scy * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24275970Scy * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25275970Scy * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26275970Scy */
27275970Scy#ifndef EVENT_INTERNAL_H_INCLUDED_
28275970Scy#define EVENT_INTERNAL_H_INCLUDED_
29275970Scy
30275970Scy#ifdef __cplusplus
31275970Scyextern "C" {
32275970Scy#endif
33275970Scy
34275970Scy#include "event2/event-config.h"
35275970Scy#include "evconfig-private.h"
36275970Scy
37275970Scy#include <time.h>
38275970Scy#include <sys/queue.h>
39275970Scy#include "event2/event_struct.h"
40275970Scy#include "minheap-internal.h"
41275970Scy#include "evsignal-internal.h"
42275970Scy#include "mm-internal.h"
43275970Scy#include "defer-internal.h"
44275970Scy
45275970Scy/* map union members back */
46275970Scy
47275970Scy/* mutually exclusive */
48275970Scy#define ev_signal_next	ev_.ev_signal.ev_signal_next
49275970Scy#define ev_io_next	ev_.ev_io.ev_io_next
50275970Scy#define ev_io_timeout	ev_.ev_io.ev_timeout
51275970Scy
52275970Scy/* used only by signals */
53275970Scy#define ev_ncalls	ev_.ev_signal.ev_ncalls
54275970Scy#define ev_pncalls	ev_.ev_signal.ev_pncalls
55275970Scy
56275970Scy#define ev_pri ev_evcallback.evcb_pri
57275970Scy#define ev_flags ev_evcallback.evcb_flags
58275970Scy#define ev_closure ev_evcallback.evcb_closure
59275970Scy#define ev_callback ev_evcallback.evcb_cb_union.evcb_callback
60275970Scy#define ev_arg ev_evcallback.evcb_arg
61275970Scy
62275970Scy/** @name Event closure codes
63275970Scy
64275970Scy    Possible values for evcb_closure in struct event_callback
65275970Scy
66275970Scy    @{
67275970Scy */
68275970Scy/** A regular event. Uses the evcb_callback callback */
69275970Scy#define EV_CLOSURE_EVENT 0
70275970Scy/** A signal event. Uses the evcb_callback callback */
71275970Scy#define EV_CLOSURE_EVENT_SIGNAL 1
72275970Scy/** A persistent non-signal event. Uses the evcb_callback callback */
73275970Scy#define EV_CLOSURE_EVENT_PERSIST 2
74275970Scy/** A simple callback. Uses the evcb_selfcb callback. */
75275970Scy#define EV_CLOSURE_CB_SELF 3
76275970Scy/** A finalizing callback. Uses the evcb_cbfinalize callback. */
77275970Scy#define EV_CLOSURE_CB_FINALIZE 4
78275970Scy/** A finalizing event. Uses the evcb_evfinalize callback. */
79275970Scy#define EV_CLOSURE_EVENT_FINALIZE 5
80275970Scy/** A finalizing event that should get freed after. Uses the evcb_evfinalize
81275970Scy * callback. */
82275970Scy#define EV_CLOSURE_EVENT_FINALIZE_FREE 6
83275970Scy/** @} */
84275970Scy
85275970Scy/** Structure to define the backend of a given event_base. */
86275970Scystruct eventop {
87275970Scy	/** The name of this backend. */
88275970Scy	const char *name;
89275970Scy	/** Function to set up an event_base to use this backend.  It should
90275970Scy	 * create a new structure holding whatever information is needed to
91275970Scy	 * run the backend, and return it.  The returned pointer will get
92275970Scy	 * stored by event_init into the event_base.evbase field.  On failure,
93275970Scy	 * this function should return NULL. */
94275970Scy	void *(*init)(struct event_base *);
95275970Scy	/** Enable reading/writing on a given fd or signal.  'events' will be
96275970Scy	 * the events that we're trying to enable: one or more of EV_READ,
97275970Scy	 * EV_WRITE, EV_SIGNAL, and EV_ET.  'old' will be those events that
98275970Scy	 * were enabled on this fd previously.  'fdinfo' will be a structure
99275970Scy	 * associated with the fd by the evmap; its size is defined by the
100275970Scy	 * fdinfo field below.  It will be set to 0 the first time the fd is
101275970Scy	 * added.  The function should return 0 on success and -1 on error.
102275970Scy	 */
103275970Scy	int (*add)(struct event_base *, evutil_socket_t fd, short old, short events, void *fdinfo);
104275970Scy	/** As "add", except 'events' contains the events we mean to disable. */
105275970Scy	int (*del)(struct event_base *, evutil_socket_t fd, short old, short events, void *fdinfo);
106275970Scy	/** Function to implement the core of an event loop.  It must see which
107275970Scy	    added events are ready, and cause event_active to be called for each
108275970Scy	    active event (usually via event_io_active or such).  It should
109275970Scy	    return 0 on success and -1 on error.
110275970Scy	 */
111275970Scy	int (*dispatch)(struct event_base *, struct timeval *);
112275970Scy	/** Function to clean up and free our data from the event_base. */
113275970Scy	void (*dealloc)(struct event_base *);
114275970Scy	/** Flag: set if we need to reinitialize the event base after we fork.
115275970Scy	 */
116275970Scy	int need_reinit;
117275970Scy	/** Bit-array of supported event_method_features that this backend can
118275970Scy	 * provide. */
119275970Scy	enum event_method_feature features;
120275970Scy	/** Length of the extra information we should record for each fd that
121275970Scy	    has one or more active events.  This information is recorded
122275970Scy	    as part of the evmap entry for each fd, and passed as an argument
123275970Scy	    to the add and del functions above.
124275970Scy	 */
125275970Scy	size_t fdinfo_len;
126275970Scy};
127275970Scy
128275970Scy#ifdef _WIN32
129275970Scy/* If we're on win32, then file descriptors are not nice low densely packed
130275970Scy   integers.  Instead, they are pointer-like windows handles, and we want to
131275970Scy   use a hashtable instead of an array to map fds to events.
132275970Scy*/
133275970Scy#define EVMAP_USE_HT
134275970Scy#endif
135275970Scy
136275970Scy/* #define HT_CACHE_HASH_VALS */
137275970Scy
138275970Scy#ifdef EVMAP_USE_HT
139275970Scy#define HT_NO_CACHE_HASH_VALUES
140275970Scy#include "ht-internal.h"
141275970Scystruct event_map_entry;
142275970ScyHT_HEAD(event_io_map, event_map_entry);
143275970Scy#else
144275970Scy#define event_io_map event_signal_map
145275970Scy#endif
146275970Scy
147275970Scy/* Used to map signal numbers to a list of events.  If EVMAP_USE_HT is not
148275970Scy   defined, this structure is also used as event_io_map, which maps fds to a
149275970Scy   list of events.
150275970Scy*/
151275970Scystruct event_signal_map {
152275970Scy	/* An array of evmap_io * or of evmap_signal *; empty entries are
153275970Scy	 * set to NULL. */
154275970Scy	void **entries;
155275970Scy	/* The number of entries available in entries */
156275970Scy	int nentries;
157275970Scy};
158275970Scy
159275970Scy/* A list of events waiting on a given 'common' timeout value.  Ordinarily,
160275970Scy * events waiting for a timeout wait on a minheap.  Sometimes, however, a
161275970Scy * queue can be faster.
162275970Scy **/
163275970Scystruct common_timeout_list {
164275970Scy	/* List of events currently waiting in the queue. */
165275970Scy	struct event_list events;
166275970Scy	/* 'magic' timeval used to indicate the duration of events in this
167275970Scy	 * queue. */
168275970Scy	struct timeval duration;
169275970Scy	/* Event that triggers whenever one of the events in the queue is
170275970Scy	 * ready to activate */
171275970Scy	struct event timeout_event;
172275970Scy	/* The event_base that this timeout list is part of */
173275970Scy	struct event_base *base;
174275970Scy};
175275970Scy
176275970Scy/** Mask used to get the real tv_usec value from a common timeout. */
177275970Scy#define COMMON_TIMEOUT_MICROSECONDS_MASK       0x000fffff
178275970Scy
179275970Scystruct event_change;
180275970Scy
181275970Scy/* List of 'changes' since the last call to eventop.dispatch.  Only maintained
182275970Scy * if the backend is using changesets. */
183275970Scystruct event_changelist {
184275970Scy	struct event_change *changes;
185275970Scy	int n_changes;
186275970Scy	int changes_size;
187275970Scy};
188275970Scy
189275970Scy#ifndef EVENT__DISABLE_DEBUG_MODE
190275970Scy/* Global internal flag: set to one if debug mode is on. */
191275970Scyextern int event_debug_mode_on_;
192275970Scy#define EVENT_DEBUG_MODE_IS_ON() (event_debug_mode_on_)
193275970Scy#else
194275970Scy#define EVENT_DEBUG_MODE_IS_ON() (0)
195275970Scy#endif
196275970Scy
197275970ScyTAILQ_HEAD(evcallback_list, event_callback);
198275970Scy
199275970Scy/* Sets up an event for processing once */
200275970Scystruct event_once {
201275970Scy	LIST_ENTRY(event_once) next_once;
202275970Scy	struct event ev;
203275970Scy
204275970Scy	void (*cb)(evutil_socket_t, short, void *);
205275970Scy	void *arg;
206275970Scy};
207275970Scy
208275970Scystruct event_base {
209275970Scy	/** Function pointers and other data to describe this event_base's
210275970Scy	 * backend. */
211275970Scy	const struct eventop *evsel;
212275970Scy	/** Pointer to backend-specific data. */
213275970Scy	void *evbase;
214275970Scy
215275970Scy	/** List of changes to tell backend about at next dispatch.  Only used
216275970Scy	 * by the O(1) backends. */
217275970Scy	struct event_changelist changelist;
218275970Scy
219275970Scy	/** Function pointers used to describe the backend that this event_base
220275970Scy	 * uses for signals */
221275970Scy	const struct eventop *evsigsel;
222275970Scy	/** Data to implement the common signal handelr code. */
223275970Scy	struct evsig_info sig;
224275970Scy
225275970Scy	/** Number of virtual events */
226275970Scy	int virtual_event_count;
227275970Scy	/** Maximum number of virtual events active */
228275970Scy	int virtual_event_count_max;
229275970Scy	/** Number of total events added to this event_base */
230275970Scy	int event_count;
231275970Scy	/** Maximum number of total events added to this event_base */
232275970Scy	int event_count_max;
233275970Scy	/** Number of total events active in this event_base */
234275970Scy	int event_count_active;
235275970Scy	/** Maximum number of total events active in this event_base */
236275970Scy	int event_count_active_max;
237275970Scy
238275970Scy	/** Set if we should terminate the loop once we're done processing
239275970Scy	 * events. */
240275970Scy	int event_gotterm;
241275970Scy	/** Set if we should terminate the loop immediately */
242275970Scy	int event_break;
243275970Scy	/** Set if we should start a new instance of the loop immediately. */
244275970Scy	int event_continue;
245275970Scy
246275970Scy	/** The currently running priority of events */
247275970Scy	int event_running_priority;
248275970Scy
249275970Scy	/** Set if we're running the event_base_loop function, to prevent
250275970Scy	 * reentrant invocation. */
251275970Scy	int running_loop;
252275970Scy
253275970Scy	/** Set to the number of deferred_cbs we've made 'active' in the
254275970Scy	 * loop.  This is a hack to prevent starvation; it would be smarter
255275970Scy	 * to just use event_config_set_max_dispatch_interval's max_callbacks
256275970Scy	 * feature */
257275970Scy	int n_deferreds_queued;
258275970Scy
259275970Scy	/* Active event management. */
260275970Scy	/** An array of nactivequeues queues for active event_callbacks (ones
261275970Scy	 * that have triggered, and whose callbacks need to be called).  Low
262275970Scy	 * priority numbers are more important, and stall higher ones.
263275970Scy	 */
264275970Scy	struct evcallback_list *activequeues;
265275970Scy	/** The length of the activequeues array */
266275970Scy	int nactivequeues;
267275970Scy	/** A list of event_callbacks that should become active the next time
268275970Scy	 * we process events, but not this time. */
269275970Scy	struct evcallback_list active_later_queue;
270275970Scy
271275970Scy	/* common timeout logic */
272275970Scy
273275970Scy	/** An array of common_timeout_list* for all of the common timeout
274275970Scy	 * values we know. */
275275970Scy	struct common_timeout_list **common_timeout_queues;
276275970Scy	/** The number of entries used in common_timeout_queues */
277275970Scy	int n_common_timeouts;
278275970Scy	/** The total size of common_timeout_queues. */
279275970Scy	int n_common_timeouts_allocated;
280275970Scy
281275970Scy	/** Mapping from file descriptors to enabled (added) events */
282275970Scy	struct event_io_map io;
283275970Scy
284275970Scy	/** Mapping from signal numbers to enabled (added) events. */
285275970Scy	struct event_signal_map sigmap;
286275970Scy
287275970Scy	/** Priority queue of events with timeouts. */
288275970Scy	struct min_heap timeheap;
289275970Scy
290275970Scy	/** Stored timeval: used to avoid calling gettimeofday/clock_gettime
291275970Scy	 * too often. */
292275970Scy	struct timeval tv_cache;
293275970Scy
294275970Scy	struct evutil_monotonic_timer monotonic_timer;
295275970Scy
296275970Scy	/** Difference between internal time (maybe from clock_gettime) and
297275970Scy	 * gettimeofday. */
298275970Scy	struct timeval tv_clock_diff;
299275970Scy	/** Second in which we last updated tv_clock_diff, in monotonic time. */
300275970Scy	time_t last_updated_clock_diff;
301275970Scy
302275970Scy#ifndef EVENT__DISABLE_THREAD_SUPPORT
303275970Scy	/* threading support */
304275970Scy	/** The thread currently running the event_loop for this base */
305275970Scy	unsigned long th_owner_id;
306275970Scy	/** A lock to prevent conflicting accesses to this event_base */
307275970Scy	void *th_base_lock;
308275970Scy	/** A condition that gets signalled when we're done processing an
309275970Scy	 * event with waiters on it. */
310275970Scy	void *current_event_cond;
311275970Scy	/** Number of threads blocking on current_event_cond. */
312275970Scy	int current_event_waiters;
313275970Scy#endif
314275970Scy	/** The event whose callback is executing right now */
315275970Scy	struct event_callback *current_event;
316275970Scy
317275970Scy#ifdef _WIN32
318275970Scy	/** IOCP support structure, if IOCP is enabled. */
319275970Scy	struct event_iocp_port *iocp;
320275970Scy#endif
321275970Scy
322275970Scy	/** Flags that this base was configured with */
323275970Scy	enum event_base_config_flag flags;
324275970Scy
325275970Scy	struct timeval max_dispatch_time;
326275970Scy	int max_dispatch_callbacks;
327275970Scy	int limit_callbacks_after_prio;
328275970Scy
329275970Scy	/* Notify main thread to wake up break, etc. */
330275970Scy	/** True if the base already has a pending notify, and we don't need
331275970Scy	 * to add any more. */
332275970Scy	int is_notify_pending;
333275970Scy	/** A socketpair used by some th_notify functions to wake up the main
334275970Scy	 * thread. */
335275970Scy	evutil_socket_t th_notify_fd[2];
336275970Scy	/** An event used by some th_notify functions to wake up the main
337275970Scy	 * thread. */
338275970Scy	struct event th_notify;
339275970Scy	/** A function used to wake up the main thread from another thread. */
340275970Scy	int (*th_notify_fn)(struct event_base *base);
341275970Scy
342275970Scy	/** Saved seed for weak random number generator. Some backends use
343275970Scy	 * this to produce fairness among sockets. Protected by th_base_lock. */
344275970Scy	struct evutil_weakrand_state weakrand_seed;
345275970Scy
346275970Scy	/** List of event_onces that have not yet fired. */
347275970Scy	LIST_HEAD(once_event_list, event_once) once_events;
348275970Scy
349275970Scy};
350275970Scy
351275970Scystruct event_config_entry {
352275970Scy	TAILQ_ENTRY(event_config_entry) next;
353275970Scy
354275970Scy	const char *avoid_method;
355275970Scy};
356275970Scy
357275970Scy/** Internal structure: describes the configuration we want for an event_base
358275970Scy * that we're about to allocate. */
359275970Scystruct event_config {
360275970Scy	TAILQ_HEAD(event_configq, event_config_entry) entries;
361275970Scy
362275970Scy	int n_cpus_hint;
363275970Scy	struct timeval max_dispatch_interval;
364275970Scy	int max_dispatch_callbacks;
365275970Scy	int limit_callbacks_after_prio;
366275970Scy	enum event_method_feature require_features;
367275970Scy	enum event_base_config_flag flags;
368275970Scy};
369275970Scy
370275970Scy/* Internal use only: Functions that might be missing from <sys/queue.h> */
371275970Scy#if defined(EVENT__HAVE_SYS_QUEUE_H) && !defined(EVENT__HAVE_TAILQFOREACH)
372275970Scy#ifndef TAILQ_FIRST
373275970Scy#define	TAILQ_FIRST(head)		((head)->tqh_first)
374275970Scy#endif
375275970Scy#ifndef TAILQ_END
376275970Scy#define	TAILQ_END(head)			NULL
377275970Scy#endif
378275970Scy#ifndef TAILQ_NEXT
379275970Scy#define	TAILQ_NEXT(elm, field)		((elm)->field.tqe_next)
380275970Scy#endif
381275970Scy
382275970Scy#ifndef TAILQ_FOREACH
383275970Scy#define TAILQ_FOREACH(var, head, field)					\
384275970Scy	for ((var) = TAILQ_FIRST(head);					\
385275970Scy	     (var) != TAILQ_END(head);					\
386275970Scy	     (var) = TAILQ_NEXT(var, field))
387275970Scy#endif
388275970Scy
389275970Scy#ifndef TAILQ_INSERT_BEFORE
390275970Scy#define	TAILQ_INSERT_BEFORE(listelm, elm, field) do {			\
391275970Scy	(elm)->field.tqe_prev = (listelm)->field.tqe_prev;		\
392275970Scy	(elm)->field.tqe_next = (listelm);				\
393275970Scy	*(listelm)->field.tqe_prev = (elm);				\
394275970Scy	(listelm)->field.tqe_prev = &(elm)->field.tqe_next;		\
395275970Scy} while (0)
396275970Scy#endif
397275970Scy#endif /* TAILQ_FOREACH */
398275970Scy
399275970Scy#define N_ACTIVE_CALLBACKS(base)					\
400275970Scy	((base)->event_count_active)
401275970Scy
402275970Scyint evsig_set_handler_(struct event_base *base, int evsignal,
403275970Scy			  void (*fn)(int));
404275970Scyint evsig_restore_handler_(struct event_base *base, int evsignal);
405275970Scy
406275970Scyint event_add_nolock_(struct event *ev,
407275970Scy    const struct timeval *tv, int tv_is_absolute);
408275970Scy/** Argument for event_del_nolock_. Tells event_del not to block on the event
409275970Scy * if it's running in another thread. */
410275970Scy#define EVENT_DEL_NOBLOCK 0
411275970Scy/** Argument for event_del_nolock_. Tells event_del to block on the event
412275970Scy * if it's running in another thread, regardless of its value for EV_FINALIZE
413275970Scy */
414275970Scy#define EVENT_DEL_BLOCK 1
415275970Scy/** Argument for event_del_nolock_. Tells event_del to block on the event
416275970Scy * if it is running in another thread and it doesn't have EV_FINALIZE set.
417275970Scy */
418275970Scy#define EVENT_DEL_AUTOBLOCK 2
419275970Scy/** Argument for event_del_nolock_. Tells event_del to procede even if the
420275970Scy * event is set up for finalization rather for regular use.*/
421275970Scy#define EVENT_DEL_EVEN_IF_FINALIZING 3
422275970Scyint event_del_nolock_(struct event *ev, int blocking);
423275970Scyint event_remove_timer_nolock_(struct event *ev);
424275970Scy
425275970Scyvoid event_active_nolock_(struct event *ev, int res, short count);
426275970Scyint event_callback_activate_(struct event_base *, struct event_callback *);
427275970Scyint event_callback_activate_nolock_(struct event_base *, struct event_callback *);
428275970Scyint event_callback_cancel_(struct event_base *base,
429275970Scy    struct event_callback *evcb);
430275970Scy
431275970Scyvoid event_callback_finalize_nolock_(struct event_base *base, unsigned flags, struct event_callback *evcb, void (*cb)(struct event_callback *, void *));
432275970Scyvoid event_callback_finalize_(struct event_base *base, unsigned flags, struct event_callback *evcb, void (*cb)(struct event_callback *, void *));
433275970Scyint event_callback_finalize_many_(struct event_base *base, int n_cbs, struct event_callback **evcb, void (*cb)(struct event_callback *, void *));
434275970Scy
435275970Scy
436275970Scyvoid event_active_later_(struct event *ev, int res);
437275970Scyvoid event_active_later_nolock_(struct event *ev, int res);
438275970Scyvoid event_callback_activate_later_nolock_(struct event_base *base,
439275970Scy    struct event_callback *evcb);
440275970Scyint event_callback_cancel_nolock_(struct event_base *base,
441275970Scy    struct event_callback *evcb, int even_if_finalizing);
442275970Scyvoid event_callback_init_(struct event_base *base,
443275970Scy    struct event_callback *cb);
444275970Scy
445275970Scy/* FIXME document. */
446275970Scyvoid event_base_add_virtual_(struct event_base *base);
447275970Scyvoid event_base_del_virtual_(struct event_base *base);
448275970Scy
449275970Scy/** For debugging: unless assertions are disabled, verify the referential
450275970Scy    integrity of the internal data structures of 'base'.  This operation can
451275970Scy    be expensive.
452275970Scy
453275970Scy    Returns on success; aborts on failure.
454275970Scy*/
455275970Scyvoid event_base_assert_ok_(struct event_base *base);
456275970Scyvoid event_base_assert_ok_nolock_(struct event_base *base);
457275970Scy
458275970Scy
459275970Scy/* Helper function: Call 'fn' exactly once every inserted or active event in
460275970Scy * the event_base 'base'.
461275970Scy *
462275970Scy * If fn returns 0, continue on to the next event. Otherwise, return the same
463275970Scy * value that fn returned.
464275970Scy *
465275970Scy * Requires that 'base' be locked.
466275970Scy */
467275970Scyint event_base_foreach_event_nolock_(struct event_base *base,
468275970Scy    event_base_foreach_event_cb cb, void *arg);
469275970Scy
470275970Scy#ifdef __cplusplus
471275970Scy}
472275970Scy#endif
473275970Scy
474275970Scy#endif /* EVENT_INTERNAL_H_INCLUDED_ */
475