1275970Scy/* 2275970Scy * Copyright (c) 2000-2007 Niels Provos <provos@citi.umich.edu> 3275970Scy * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson 4275970Scy * 5275970Scy * Redistribution and use in source and binary forms, with or without 6275970Scy * modification, are permitted provided that the following conditions 7275970Scy * are met: 8275970Scy * 1. Redistributions of source code must retain the above copyright 9275970Scy * notice, this list of conditions and the following disclaimer. 10275970Scy * 2. Redistributions in binary form must reproduce the above copyright 11275970Scy * notice, this list of conditions and the following disclaimer in the 12275970Scy * documentation and/or other materials provided with the distribution. 13275970Scy * 3. The name of the author may not be used to endorse or promote products 14275970Scy * derived from this software without specific prior written permission. 15275970Scy * 16275970Scy * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 17275970Scy * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 18275970Scy * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 19275970Scy * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 20275970Scy * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 21275970Scy * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 22275970Scy * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 23275970Scy * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24275970Scy * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 25275970Scy * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26275970Scy */ 27275970Scy#include "event2/event-config.h" 28275970Scy#include "evconfig-private.h" 29275970Scy 30275970Scy#ifdef _WIN32 31275970Scy#include <winsock2.h> 32275970Scy#define WIN32_LEAN_AND_MEAN 33275970Scy#include <windows.h> 34275970Scy#undef WIN32_LEAN_AND_MEAN 35275970Scy#endif 36275970Scy#include <sys/types.h> 37275970Scy#if !defined(_WIN32) && defined(EVENT__HAVE_SYS_TIME_H) 38275970Scy#include <sys/time.h> 39275970Scy#endif 40275970Scy#include <sys/queue.h> 41275970Scy#ifdef EVENT__HAVE_SYS_SOCKET_H 42275970Scy#include <sys/socket.h> 43275970Scy#endif 44275970Scy#include <stdio.h> 45275970Scy#include <stdlib.h> 46275970Scy#ifdef EVENT__HAVE_UNISTD_H 47275970Scy#include <unistd.h> 48275970Scy#endif 49275970Scy#include <ctype.h> 50275970Scy#include <errno.h> 51275970Scy#include <signal.h> 52275970Scy#include <string.h> 53275970Scy#include <time.h> 54275970Scy#include <limits.h> 55275970Scy 56275970Scy#include "event2/event.h" 57275970Scy#include "event2/event_struct.h" 58275970Scy#include "event2/event_compat.h" 59275970Scy#include "event-internal.h" 60275970Scy#include "defer-internal.h" 61275970Scy#include "evthread-internal.h" 62275970Scy#include "event2/thread.h" 63275970Scy#include "event2/util.h" 64275970Scy#include "log-internal.h" 65275970Scy#include "evmap-internal.h" 66275970Scy#include "iocp-internal.h" 67275970Scy#include "changelist-internal.h" 68275970Scy#define HT_NO_CACHE_HASH_VALUES 69275970Scy#include "ht-internal.h" 70275970Scy#include "util-internal.h" 71275970Scy 72275970Scy 73275970Scy#ifdef EVENT__HAVE_WORKING_KQUEUE 74275970Scy#include "kqueue-internal.h" 75275970Scy#endif 76275970Scy 77275970Scy#ifdef EVENT__HAVE_EVENT_PORTS 78275970Scyextern const struct eventop evportops; 79275970Scy#endif 80275970Scy#ifdef EVENT__HAVE_SELECT 81275970Scyextern const struct eventop selectops; 82275970Scy#endif 83275970Scy#ifdef EVENT__HAVE_POLL 84275970Scyextern const struct eventop pollops; 85275970Scy#endif 86275970Scy#ifdef EVENT__HAVE_EPOLL 87275970Scyextern const struct eventop epollops; 88275970Scy#endif 89275970Scy#ifdef EVENT__HAVE_WORKING_KQUEUE 90275970Scyextern const struct eventop kqops; 91275970Scy#endif 92275970Scy#ifdef EVENT__HAVE_DEVPOLL 93275970Scyextern const struct eventop devpollops; 94275970Scy#endif 95275970Scy#ifdef _WIN32 96275970Scyextern const struct eventop win32ops; 97275970Scy#endif 98275970Scy 99275970Scy/* Array of backends in order of preference. */ 100275970Scystatic const struct eventop *eventops[] = { 101275970Scy#ifdef EVENT__HAVE_EVENT_PORTS 102275970Scy &evportops, 103275970Scy#endif 104275970Scy#ifdef EVENT__HAVE_WORKING_KQUEUE 105275970Scy &kqops, 106275970Scy#endif 107275970Scy#ifdef EVENT__HAVE_EPOLL 108275970Scy &epollops, 109275970Scy#endif 110275970Scy#ifdef EVENT__HAVE_DEVPOLL 111275970Scy &devpollops, 112275970Scy#endif 113275970Scy#ifdef EVENT__HAVE_POLL 114275970Scy &pollops, 115275970Scy#endif 116275970Scy#ifdef EVENT__HAVE_SELECT 117275970Scy &selectops, 118275970Scy#endif 119275970Scy#ifdef _WIN32 120275970Scy &win32ops, 121275970Scy#endif 122275970Scy NULL 123275970Scy}; 124275970Scy 125275970Scy/* Global state; deprecated */ 126275970Scystruct event_base *event_global_current_base_ = NULL; 127275970Scy#define current_base event_global_current_base_ 128275970Scy 129275970Scy/* Global state */ 130275970Scy 131275970Scystatic void *event_self_cbarg_ptr_ = NULL; 132275970Scy 133275970Scy/* Prototypes */ 134275970Scystatic void event_queue_insert_active(struct event_base *, struct event_callback *); 135275970Scystatic void event_queue_insert_active_later(struct event_base *, struct event_callback *); 136275970Scystatic void event_queue_insert_timeout(struct event_base *, struct event *); 137275970Scystatic void event_queue_insert_inserted(struct event_base *, struct event *); 138275970Scystatic void event_queue_remove_active(struct event_base *, struct event_callback *); 139275970Scystatic void event_queue_remove_active_later(struct event_base *, struct event_callback *); 140275970Scystatic void event_queue_remove_timeout(struct event_base *, struct event *); 141275970Scystatic void event_queue_remove_inserted(struct event_base *, struct event *); 142275970Scystatic void event_queue_make_later_events_active(struct event_base *base); 143275970Scy 144275970Scystatic int evthread_make_base_notifiable_nolock_(struct event_base *base); 145275970Scystatic int event_del_(struct event *ev, int blocking); 146275970Scy 147275970Scy#ifdef USE_REINSERT_TIMEOUT 148275970Scy/* This code seems buggy; only turn it on if we find out what the trouble is. */ 149275970Scystatic void event_queue_reinsert_timeout(struct event_base *,struct event *, int was_common, int is_common, int old_timeout_idx); 150275970Scy#endif 151275970Scy 152275970Scystatic int event_haveevents(struct event_base *); 153275970Scy 154275970Scystatic int event_process_active(struct event_base *); 155275970Scy 156275970Scystatic int timeout_next(struct event_base *, struct timeval **); 157275970Scystatic void timeout_process(struct event_base *); 158275970Scy 159275970Scystatic inline void event_signal_closure(struct event_base *, struct event *ev); 160275970Scystatic inline void event_persist_closure(struct event_base *, struct event *ev); 161275970Scy 162275970Scystatic int evthread_notify_base(struct event_base *base); 163275970Scy 164275970Scystatic void insert_common_timeout_inorder(struct common_timeout_list *ctl, 165275970Scy struct event *ev); 166275970Scy 167275970Scy#ifndef EVENT__DISABLE_DEBUG_MODE 168275970Scy/* These functions implement a hashtable of which 'struct event *' structures 169275970Scy * have been setup or added. We don't want to trust the content of the struct 170275970Scy * event itself, since we're trying to work through cases where an event gets 171275970Scy * clobbered or freed. Instead, we keep a hashtable indexed by the pointer. 172275970Scy */ 173275970Scy 174275970Scystruct event_debug_entry { 175275970Scy HT_ENTRY(event_debug_entry) node; 176275970Scy const struct event *ptr; 177275970Scy unsigned added : 1; 178275970Scy}; 179275970Scy 180275970Scystatic inline unsigned 181275970Scyhash_debug_entry(const struct event_debug_entry *e) 182275970Scy{ 183275970Scy /* We need to do this silliness to convince compilers that we 184275970Scy * honestly mean to cast e->ptr to an integer, and discard any 185275970Scy * part of it that doesn't fit in an unsigned. 186275970Scy */ 187275970Scy unsigned u = (unsigned) ((ev_uintptr_t) e->ptr); 188275970Scy /* Our hashtable implementation is pretty sensitive to low bits, 189275970Scy * and every struct event is over 64 bytes in size, so we can 190275970Scy * just say >>6. */ 191275970Scy return (u >> 6); 192275970Scy} 193275970Scy 194275970Scystatic inline int 195275970Scyeq_debug_entry(const struct event_debug_entry *a, 196275970Scy const struct event_debug_entry *b) 197275970Scy{ 198275970Scy return a->ptr == b->ptr; 199275970Scy} 200275970Scy 201275970Scyint event_debug_mode_on_ = 0; 202275970Scy/* Set if it's too late to enable event_debug_mode. */ 203275970Scystatic int event_debug_mode_too_late = 0; 204275970Scy#ifndef EVENT__DISABLE_THREAD_SUPPORT 205275970Scystatic void *event_debug_map_lock_ = NULL; 206275970Scy#endif 207275970Scystatic HT_HEAD(event_debug_map, event_debug_entry) global_debug_map = 208275970Scy HT_INITIALIZER(); 209275970Scy 210275970ScyHT_PROTOTYPE(event_debug_map, event_debug_entry, node, hash_debug_entry, 211275970Scy eq_debug_entry) 212275970ScyHT_GENERATE(event_debug_map, event_debug_entry, node, hash_debug_entry, 213275970Scy eq_debug_entry, 0.5, mm_malloc, mm_realloc, mm_free) 214275970Scy 215275970Scy/* Macro: record that ev is now setup (that is, ready for an add) */ 216275970Scy#define event_debug_note_setup_(ev) do { \ 217275970Scy if (event_debug_mode_on_) { \ 218275970Scy struct event_debug_entry *dent,find; \ 219275970Scy find.ptr = (ev); \ 220275970Scy EVLOCK_LOCK(event_debug_map_lock_, 0); \ 221275970Scy dent = HT_FIND(event_debug_map, &global_debug_map, &find); \ 222275970Scy if (dent) { \ 223275970Scy dent->added = 0; \ 224275970Scy } else { \ 225275970Scy dent = mm_malloc(sizeof(*dent)); \ 226275970Scy if (!dent) \ 227275970Scy event_err(1, \ 228275970Scy "Out of memory in debugging code"); \ 229275970Scy dent->ptr = (ev); \ 230275970Scy dent->added = 0; \ 231275970Scy HT_INSERT(event_debug_map, &global_debug_map, dent); \ 232275970Scy } \ 233275970Scy EVLOCK_UNLOCK(event_debug_map_lock_, 0); \ 234275970Scy } \ 235275970Scy event_debug_mode_too_late = 1; \ 236275970Scy } while (0) 237275970Scy/* Macro: record that ev is no longer setup */ 238275970Scy#define event_debug_note_teardown_(ev) do { \ 239275970Scy if (event_debug_mode_on_) { \ 240275970Scy struct event_debug_entry *dent,find; \ 241275970Scy find.ptr = (ev); \ 242275970Scy EVLOCK_LOCK(event_debug_map_lock_, 0); \ 243275970Scy dent = HT_REMOVE(event_debug_map, &global_debug_map, &find); \ 244275970Scy if (dent) \ 245275970Scy mm_free(dent); \ 246275970Scy EVLOCK_UNLOCK(event_debug_map_lock_, 0); \ 247275970Scy } \ 248275970Scy event_debug_mode_too_late = 1; \ 249275970Scy } while (0) 250275970Scy/* Macro: record that ev is now added */ 251275970Scy#define event_debug_note_add_(ev) do { \ 252275970Scy if (event_debug_mode_on_) { \ 253275970Scy struct event_debug_entry *dent,find; \ 254275970Scy find.ptr = (ev); \ 255275970Scy EVLOCK_LOCK(event_debug_map_lock_, 0); \ 256275970Scy dent = HT_FIND(event_debug_map, &global_debug_map, &find); \ 257275970Scy if (dent) { \ 258275970Scy dent->added = 1; \ 259275970Scy } else { \ 260275970Scy event_errx(EVENT_ERR_ABORT_, \ 261275970Scy "%s: noting an add on a non-setup event %p" \ 262275970Scy " (events: 0x%x, fd: "EV_SOCK_FMT \ 263275970Scy ", flags: 0x%x)", \ 264275970Scy __func__, (ev), (ev)->ev_events, \ 265275970Scy EV_SOCK_ARG((ev)->ev_fd), (ev)->ev_flags); \ 266275970Scy } \ 267275970Scy EVLOCK_UNLOCK(event_debug_map_lock_, 0); \ 268275970Scy } \ 269275970Scy event_debug_mode_too_late = 1; \ 270275970Scy } while (0) 271275970Scy/* Macro: record that ev is no longer added */ 272275970Scy#define event_debug_note_del_(ev) do { \ 273275970Scy if (event_debug_mode_on_) { \ 274275970Scy struct event_debug_entry *dent,find; \ 275275970Scy find.ptr = (ev); \ 276275970Scy EVLOCK_LOCK(event_debug_map_lock_, 0); \ 277275970Scy dent = HT_FIND(event_debug_map, &global_debug_map, &find); \ 278275970Scy if (dent) { \ 279275970Scy dent->added = 0; \ 280275970Scy } else { \ 281275970Scy event_errx(EVENT_ERR_ABORT_, \ 282275970Scy "%s: noting a del on a non-setup event %p" \ 283275970Scy " (events: 0x%x, fd: "EV_SOCK_FMT \ 284275970Scy ", flags: 0x%x)", \ 285275970Scy __func__, (ev), (ev)->ev_events, \ 286275970Scy EV_SOCK_ARG((ev)->ev_fd), (ev)->ev_flags); \ 287275970Scy } \ 288275970Scy EVLOCK_UNLOCK(event_debug_map_lock_, 0); \ 289275970Scy } \ 290275970Scy event_debug_mode_too_late = 1; \ 291275970Scy } while (0) 292275970Scy/* Macro: assert that ev is setup (i.e., okay to add or inspect) */ 293275970Scy#define event_debug_assert_is_setup_(ev) do { \ 294275970Scy if (event_debug_mode_on_) { \ 295275970Scy struct event_debug_entry *dent,find; \ 296275970Scy find.ptr = (ev); \ 297275970Scy EVLOCK_LOCK(event_debug_map_lock_, 0); \ 298275970Scy dent = HT_FIND(event_debug_map, &global_debug_map, &find); \ 299275970Scy if (!dent) { \ 300275970Scy event_errx(EVENT_ERR_ABORT_, \ 301275970Scy "%s called on a non-initialized event %p" \ 302275970Scy " (events: 0x%x, fd: "EV_SOCK_FMT\ 303275970Scy ", flags: 0x%x)", \ 304275970Scy __func__, (ev), (ev)->ev_events, \ 305275970Scy EV_SOCK_ARG((ev)->ev_fd), (ev)->ev_flags); \ 306275970Scy } \ 307275970Scy EVLOCK_UNLOCK(event_debug_map_lock_, 0); \ 308275970Scy } \ 309275970Scy } while (0) 310275970Scy/* Macro: assert that ev is not added (i.e., okay to tear down or set 311275970Scy * up again) */ 312275970Scy#define event_debug_assert_not_added_(ev) do { \ 313275970Scy if (event_debug_mode_on_) { \ 314275970Scy struct event_debug_entry *dent,find; \ 315275970Scy find.ptr = (ev); \ 316275970Scy EVLOCK_LOCK(event_debug_map_lock_, 0); \ 317275970Scy dent = HT_FIND(event_debug_map, &global_debug_map, &find); \ 318275970Scy if (dent && dent->added) { \ 319275970Scy event_errx(EVENT_ERR_ABORT_, \ 320275970Scy "%s called on an already added event %p" \ 321275970Scy " (events: 0x%x, fd: "EV_SOCK_FMT", " \ 322275970Scy "flags: 0x%x)", \ 323275970Scy __func__, (ev), (ev)->ev_events, \ 324275970Scy EV_SOCK_ARG((ev)->ev_fd), (ev)->ev_flags); \ 325275970Scy } \ 326275970Scy EVLOCK_UNLOCK(event_debug_map_lock_, 0); \ 327275970Scy } \ 328275970Scy } while (0) 329275970Scy#else 330275970Scy#define event_debug_note_setup_(ev) \ 331275970Scy ((void)0) 332275970Scy#define event_debug_note_teardown_(ev) \ 333275970Scy ((void)0) 334275970Scy#define event_debug_note_add_(ev) \ 335275970Scy ((void)0) 336275970Scy#define event_debug_note_del_(ev) \ 337275970Scy ((void)0) 338275970Scy#define event_debug_assert_is_setup_(ev) \ 339275970Scy ((void)0) 340275970Scy#define event_debug_assert_not_added_(ev) \ 341275970Scy ((void)0) 342275970Scy#endif 343275970Scy 344275970Scy#define EVENT_BASE_ASSERT_LOCKED(base) \ 345275970Scy EVLOCK_ASSERT_LOCKED((base)->th_base_lock) 346275970Scy 347275970Scy/* How often (in seconds) do we check for changes in wall clock time relative 348275970Scy * to monotonic time? Set this to -1 for 'never.' */ 349275970Scy#define CLOCK_SYNC_INTERVAL 5 350275970Scy 351275970Scy/** Set 'tp' to the current time according to 'base'. We must hold the lock 352275970Scy * on 'base'. If there is a cached time, return it. Otherwise, use 353275970Scy * clock_gettime or gettimeofday as appropriate to find out the right time. 354275970Scy * Return 0 on success, -1 on failure. 355275970Scy */ 356275970Scystatic int 357275970Scygettime(struct event_base *base, struct timeval *tp) 358275970Scy{ 359275970Scy EVENT_BASE_ASSERT_LOCKED(base); 360275970Scy 361275970Scy if (base->tv_cache.tv_sec) { 362275970Scy *tp = base->tv_cache; 363275970Scy return (0); 364275970Scy } 365275970Scy 366275970Scy if (evutil_gettime_monotonic_(&base->monotonic_timer, tp) == -1) { 367275970Scy return -1; 368275970Scy } 369275970Scy 370275970Scy if (base->last_updated_clock_diff + CLOCK_SYNC_INTERVAL 371275970Scy < tp->tv_sec) { 372275970Scy struct timeval tv; 373275970Scy evutil_gettimeofday(&tv,NULL); 374275970Scy evutil_timersub(&tv, tp, &base->tv_clock_diff); 375275970Scy base->last_updated_clock_diff = tp->tv_sec; 376275970Scy } 377275970Scy 378275970Scy return 0; 379275970Scy} 380275970Scy 381275970Scyint 382275970Scyevent_base_gettimeofday_cached(struct event_base *base, struct timeval *tv) 383275970Scy{ 384275970Scy int r; 385275970Scy if (!base) { 386275970Scy base = current_base; 387275970Scy if (!current_base) 388275970Scy return evutil_gettimeofday(tv, NULL); 389275970Scy } 390275970Scy 391275970Scy EVBASE_ACQUIRE_LOCK(base, th_base_lock); 392275970Scy if (base->tv_cache.tv_sec == 0) { 393275970Scy r = evutil_gettimeofday(tv, NULL); 394275970Scy } else { 395275970Scy evutil_timeradd(&base->tv_cache, &base->tv_clock_diff, tv); 396275970Scy r = 0; 397275970Scy } 398275970Scy EVBASE_RELEASE_LOCK(base, th_base_lock); 399275970Scy return r; 400275970Scy} 401275970Scy 402275970Scy/** Make 'base' have no current cached time. */ 403275970Scystatic inline void 404275970Scyclear_time_cache(struct event_base *base) 405275970Scy{ 406275970Scy base->tv_cache.tv_sec = 0; 407275970Scy} 408275970Scy 409275970Scy/** Replace the cached time in 'base' with the current time. */ 410275970Scystatic inline void 411275970Scyupdate_time_cache(struct event_base *base) 412275970Scy{ 413275970Scy base->tv_cache.tv_sec = 0; 414275970Scy if (!(base->flags & EVENT_BASE_FLAG_NO_CACHE_TIME)) 415275970Scy gettime(base, &base->tv_cache); 416275970Scy} 417275970Scy 418275970Scyint 419275970Scyevent_base_update_cache_time(struct event_base *base) 420275970Scy{ 421275970Scy 422275970Scy if (!base) { 423275970Scy base = current_base; 424275970Scy if (!current_base) 425275970Scy return -1; 426275970Scy } 427275970Scy 428275970Scy EVBASE_ACQUIRE_LOCK(base, th_base_lock); 429275970Scy if (base->running_loop) 430275970Scy update_time_cache(base); 431275970Scy EVBASE_RELEASE_LOCK(base, th_base_lock); 432275970Scy return 0; 433275970Scy} 434275970Scy 435275970Scystatic inline struct event * 436275970Scyevent_callback_to_event(struct event_callback *evcb) 437275970Scy{ 438275970Scy EVUTIL_ASSERT((evcb->evcb_flags & EVLIST_INIT)); 439275970Scy return EVUTIL_UPCAST(evcb, struct event, ev_evcallback); 440275970Scy} 441275970Scy 442275970Scystatic inline struct event_callback * 443275970Scyevent_to_event_callback(struct event *ev) 444275970Scy{ 445275970Scy return &ev->ev_evcallback; 446275970Scy} 447275970Scy 448275970Scystruct event_base * 449275970Scyevent_init(void) 450275970Scy{ 451275970Scy struct event_base *base = event_base_new_with_config(NULL); 452275970Scy 453275970Scy if (base == NULL) { 454275970Scy event_errx(1, "%s: Unable to construct event_base", __func__); 455275970Scy return NULL; 456275970Scy } 457275970Scy 458275970Scy current_base = base; 459275970Scy 460275970Scy return (base); 461275970Scy} 462275970Scy 463275970Scystruct event_base * 464275970Scyevent_base_new(void) 465275970Scy{ 466275970Scy struct event_base *base = NULL; 467275970Scy struct event_config *cfg = event_config_new(); 468275970Scy if (cfg) { 469275970Scy base = event_base_new_with_config(cfg); 470275970Scy event_config_free(cfg); 471275970Scy } 472275970Scy return base; 473275970Scy} 474275970Scy 475275970Scy/** Return true iff 'method' is the name of a method that 'cfg' tells us to 476275970Scy * avoid. */ 477275970Scystatic int 478275970Scyevent_config_is_avoided_method(const struct event_config *cfg, 479275970Scy const char *method) 480275970Scy{ 481275970Scy struct event_config_entry *entry; 482275970Scy 483275970Scy TAILQ_FOREACH(entry, &cfg->entries, next) { 484275970Scy if (entry->avoid_method != NULL && 485275970Scy strcmp(entry->avoid_method, method) == 0) 486275970Scy return (1); 487275970Scy } 488275970Scy 489275970Scy return (0); 490275970Scy} 491275970Scy 492275970Scy/** Return true iff 'method' is disabled according to the environment. */ 493275970Scystatic int 494275970Scyevent_is_method_disabled(const char *name) 495275970Scy{ 496275970Scy char environment[64]; 497275970Scy int i; 498275970Scy 499275970Scy evutil_snprintf(environment, sizeof(environment), "EVENT_NO%s", name); 500275970Scy for (i = 8; environment[i] != '\0'; ++i) 501275970Scy environment[i] = EVUTIL_TOUPPER_(environment[i]); 502275970Scy /* Note that evutil_getenv_() ignores the environment entirely if 503275970Scy * we're setuid */ 504275970Scy return (evutil_getenv_(environment) != NULL); 505275970Scy} 506275970Scy 507275970Scyint 508275970Scyevent_base_get_features(const struct event_base *base) 509275970Scy{ 510275970Scy return base->evsel->features; 511275970Scy} 512275970Scy 513275970Scyvoid 514275970Scyevent_enable_debug_mode(void) 515275970Scy{ 516275970Scy#ifndef EVENT__DISABLE_DEBUG_MODE 517275970Scy if (event_debug_mode_on_) 518275970Scy event_errx(1, "%s was called twice!", __func__); 519275970Scy if (event_debug_mode_too_late) 520275970Scy event_errx(1, "%s must be called *before* creating any events " 521275970Scy "or event_bases",__func__); 522275970Scy 523275970Scy event_debug_mode_on_ = 1; 524275970Scy 525275970Scy HT_INIT(event_debug_map, &global_debug_map); 526275970Scy#endif 527275970Scy} 528275970Scy 529275970Scyvoid 530275970Scyevent_disable_debug_mode(void) 531275970Scy{ 532285612Sdelphij#ifndef EVENT__DISABLE_DEBUG_MODE 533275970Scy struct event_debug_entry **ent, *victim; 534275970Scy 535275970Scy EVLOCK_LOCK(event_debug_map_lock_, 0); 536275970Scy for (ent = HT_START(event_debug_map, &global_debug_map); ent; ) { 537275970Scy victim = *ent; 538285612Sdelphij ent = HT_NEXT_RMV(event_debug_map, &global_debug_map, ent); 539275970Scy mm_free(victim); 540275970Scy } 541275970Scy HT_CLEAR(event_debug_map, &global_debug_map); 542275970Scy EVLOCK_UNLOCK(event_debug_map_lock_ , 0); 543285612Sdelphij 544285612Sdelphij event_debug_mode_on_ = 0; 545285612Sdelphij#endif 546275970Scy} 547275970Scy 548275970Scystruct event_base * 549275970Scyevent_base_new_with_config(const struct event_config *cfg) 550275970Scy{ 551275970Scy int i; 552275970Scy struct event_base *base; 553275970Scy int should_check_environment; 554275970Scy 555275970Scy#ifndef EVENT__DISABLE_DEBUG_MODE 556275970Scy event_debug_mode_too_late = 1; 557275970Scy#endif 558275970Scy 559275970Scy if ((base = mm_calloc(1, sizeof(struct event_base))) == NULL) { 560275970Scy event_warn("%s: calloc", __func__); 561275970Scy return NULL; 562275970Scy } 563275970Scy 564275970Scy if (cfg) 565275970Scy base->flags = cfg->flags; 566275970Scy 567275970Scy should_check_environment = 568275970Scy !(cfg && (cfg->flags & EVENT_BASE_FLAG_IGNORE_ENV)); 569275970Scy 570275970Scy { 571275970Scy struct timeval tmp; 572275970Scy int precise_time = 573275970Scy cfg && (cfg->flags & EVENT_BASE_FLAG_PRECISE_TIMER); 574275970Scy int flags; 575275970Scy if (should_check_environment && !precise_time) { 576275970Scy precise_time = evutil_getenv_("EVENT_PRECISE_TIMER") != NULL; 577275970Scy base->flags |= EVENT_BASE_FLAG_PRECISE_TIMER; 578275970Scy } 579275970Scy flags = precise_time ? EV_MONOT_PRECISE : 0; 580275970Scy evutil_configure_monotonic_time_(&base->monotonic_timer, flags); 581275970Scy 582275970Scy gettime(base, &tmp); 583275970Scy } 584275970Scy 585275970Scy min_heap_ctor_(&base->timeheap); 586275970Scy 587275970Scy base->sig.ev_signal_pair[0] = -1; 588275970Scy base->sig.ev_signal_pair[1] = -1; 589275970Scy base->th_notify_fd[0] = -1; 590275970Scy base->th_notify_fd[1] = -1; 591275970Scy 592275970Scy TAILQ_INIT(&base->active_later_queue); 593275970Scy 594275970Scy evmap_io_initmap_(&base->io); 595275970Scy evmap_signal_initmap_(&base->sigmap); 596275970Scy event_changelist_init_(&base->changelist); 597275970Scy 598275970Scy base->evbase = NULL; 599275970Scy 600275970Scy if (cfg) { 601275970Scy memcpy(&base->max_dispatch_time, 602275970Scy &cfg->max_dispatch_interval, sizeof(struct timeval)); 603275970Scy base->limit_callbacks_after_prio = 604275970Scy cfg->limit_callbacks_after_prio; 605275970Scy } else { 606275970Scy base->max_dispatch_time.tv_sec = -1; 607275970Scy base->limit_callbacks_after_prio = 1; 608275970Scy } 609275970Scy if (cfg && cfg->max_dispatch_callbacks >= 0) { 610275970Scy base->max_dispatch_callbacks = cfg->max_dispatch_callbacks; 611275970Scy } else { 612275970Scy base->max_dispatch_callbacks = INT_MAX; 613275970Scy } 614275970Scy if (base->max_dispatch_callbacks == INT_MAX && 615275970Scy base->max_dispatch_time.tv_sec == -1) 616275970Scy base->limit_callbacks_after_prio = INT_MAX; 617275970Scy 618275970Scy for (i = 0; eventops[i] && !base->evbase; i++) { 619275970Scy if (cfg != NULL) { 620275970Scy /* determine if this backend should be avoided */ 621275970Scy if (event_config_is_avoided_method(cfg, 622275970Scy eventops[i]->name)) 623275970Scy continue; 624275970Scy if ((eventops[i]->features & cfg->require_features) 625275970Scy != cfg->require_features) 626275970Scy continue; 627275970Scy } 628275970Scy 629275970Scy /* also obey the environment variables */ 630275970Scy if (should_check_environment && 631275970Scy event_is_method_disabled(eventops[i]->name)) 632275970Scy continue; 633275970Scy 634275970Scy base->evsel = eventops[i]; 635275970Scy 636275970Scy base->evbase = base->evsel->init(base); 637275970Scy } 638275970Scy 639275970Scy if (base->evbase == NULL) { 640275970Scy event_warnx("%s: no event mechanism available", 641275970Scy __func__); 642275970Scy base->evsel = NULL; 643275970Scy event_base_free(base); 644275970Scy return NULL; 645275970Scy } 646275970Scy 647275970Scy if (evutil_getenv_("EVENT_SHOW_METHOD")) 648275970Scy event_msgx("libevent using: %s", base->evsel->name); 649275970Scy 650275970Scy /* allocate a single active event queue */ 651275970Scy if (event_base_priority_init(base, 1) < 0) { 652275970Scy event_base_free(base); 653275970Scy return NULL; 654275970Scy } 655275970Scy 656275970Scy /* prepare for threading */ 657275970Scy 658275970Scy#ifndef EVENT__DISABLE_THREAD_SUPPORT 659275970Scy if (EVTHREAD_LOCKING_ENABLED() && 660275970Scy (!cfg || !(cfg->flags & EVENT_BASE_FLAG_NOLOCK))) { 661275970Scy int r; 662275970Scy EVTHREAD_ALLOC_LOCK(base->th_base_lock, 0); 663275970Scy EVTHREAD_ALLOC_COND(base->current_event_cond); 664275970Scy r = evthread_make_base_notifiable(base); 665275970Scy if (r<0) { 666275970Scy event_warnx("%s: Unable to make base notifiable.", __func__); 667275970Scy event_base_free(base); 668275970Scy return NULL; 669275970Scy } 670275970Scy } 671275970Scy#endif 672275970Scy 673275970Scy#ifdef _WIN32 674275970Scy if (cfg && (cfg->flags & EVENT_BASE_FLAG_STARTUP_IOCP)) 675275970Scy event_base_start_iocp_(base, cfg->n_cpus_hint); 676275970Scy#endif 677275970Scy 678275970Scy return (base); 679275970Scy} 680275970Scy 681275970Scyint 682275970Scyevent_base_start_iocp_(struct event_base *base, int n_cpus) 683275970Scy{ 684275970Scy#ifdef _WIN32 685275970Scy if (base->iocp) 686275970Scy return 0; 687275970Scy base->iocp = event_iocp_port_launch_(n_cpus); 688275970Scy if (!base->iocp) { 689275970Scy event_warnx("%s: Couldn't launch IOCP", __func__); 690275970Scy return -1; 691275970Scy } 692275970Scy return 0; 693275970Scy#else 694275970Scy return -1; 695275970Scy#endif 696275970Scy} 697275970Scy 698275970Scyvoid 699275970Scyevent_base_stop_iocp_(struct event_base *base) 700275970Scy{ 701275970Scy#ifdef _WIN32 702275970Scy int rv; 703275970Scy 704275970Scy if (!base->iocp) 705275970Scy return; 706275970Scy rv = event_iocp_shutdown_(base->iocp, -1); 707275970Scy EVUTIL_ASSERT(rv >= 0); 708275970Scy base->iocp = NULL; 709275970Scy#endif 710275970Scy} 711275970Scy 712275970Scystatic int 713275970Scyevent_base_cancel_single_callback_(struct event_base *base, 714275970Scy struct event_callback *evcb, 715275970Scy int run_finalizers) 716275970Scy{ 717275970Scy int result = 0; 718275970Scy 719275970Scy if (evcb->evcb_flags & EVLIST_INIT) { 720275970Scy struct event *ev = event_callback_to_event(evcb); 721275970Scy if (!(ev->ev_flags & EVLIST_INTERNAL)) { 722275970Scy event_del_(ev, EVENT_DEL_EVEN_IF_FINALIZING); 723275970Scy result = 1; 724275970Scy } 725275970Scy } else { 726275970Scy EVBASE_ACQUIRE_LOCK(base, th_base_lock); 727275970Scy event_callback_cancel_nolock_(base, evcb, 1); 728275970Scy EVBASE_RELEASE_LOCK(base, th_base_lock); 729275970Scy result = 1; 730275970Scy } 731275970Scy 732275970Scy if (run_finalizers && (evcb->evcb_flags & EVLIST_FINALIZING)) { 733275970Scy switch (evcb->evcb_closure) { 734275970Scy case EV_CLOSURE_EVENT_FINALIZE: 735275970Scy case EV_CLOSURE_EVENT_FINALIZE_FREE: { 736275970Scy struct event *ev = event_callback_to_event(evcb); 737275970Scy ev->ev_evcallback.evcb_cb_union.evcb_evfinalize(ev, ev->ev_arg); 738275970Scy if (evcb->evcb_closure == EV_CLOSURE_EVENT_FINALIZE_FREE) 739275970Scy mm_free(ev); 740275970Scy break; 741275970Scy } 742275970Scy case EV_CLOSURE_CB_FINALIZE: 743275970Scy evcb->evcb_cb_union.evcb_cbfinalize(evcb, evcb->evcb_arg); 744275970Scy break; 745275970Scy default: 746275970Scy break; 747275970Scy } 748275970Scy } 749275970Scy return result; 750275970Scy} 751275970Scy 752275970Scystatic void 753275970Scyevent_base_free_(struct event_base *base, int run_finalizers) 754275970Scy{ 755275970Scy int i, n_deleted=0; 756275970Scy struct event *ev; 757275970Scy /* XXXX grab the lock? If there is contention when one thread frees 758275970Scy * the base, then the contending thread will be very sad soon. */ 759275970Scy 760275970Scy /* event_base_free(NULL) is how to free the current_base if we 761275970Scy * made it with event_init and forgot to hold a reference to it. */ 762275970Scy if (base == NULL && current_base) 763275970Scy base = current_base; 764275970Scy /* Don't actually free NULL. */ 765275970Scy if (base == NULL) { 766275970Scy event_warnx("%s: no base to free", __func__); 767275970Scy return; 768275970Scy } 769275970Scy /* XXX(niels) - check for internal events first */ 770275970Scy 771275970Scy#ifdef _WIN32 772275970Scy event_base_stop_iocp_(base); 773275970Scy#endif 774275970Scy 775275970Scy /* threading fds if we have them */ 776275970Scy if (base->th_notify_fd[0] != -1) { 777275970Scy event_del(&base->th_notify); 778275970Scy EVUTIL_CLOSESOCKET(base->th_notify_fd[0]); 779275970Scy if (base->th_notify_fd[1] != -1) 780275970Scy EVUTIL_CLOSESOCKET(base->th_notify_fd[1]); 781275970Scy base->th_notify_fd[0] = -1; 782275970Scy base->th_notify_fd[1] = -1; 783275970Scy event_debug_unassign(&base->th_notify); 784275970Scy } 785275970Scy 786275970Scy /* Delete all non-internal events. */ 787275970Scy evmap_delete_all_(base); 788275970Scy 789275970Scy while ((ev = min_heap_top_(&base->timeheap)) != NULL) { 790275970Scy event_del(ev); 791275970Scy ++n_deleted; 792275970Scy } 793275970Scy for (i = 0; i < base->n_common_timeouts; ++i) { 794275970Scy struct common_timeout_list *ctl = 795275970Scy base->common_timeout_queues[i]; 796275970Scy event_del(&ctl->timeout_event); /* Internal; doesn't count */ 797275970Scy event_debug_unassign(&ctl->timeout_event); 798275970Scy for (ev = TAILQ_FIRST(&ctl->events); ev; ) { 799275970Scy struct event *next = TAILQ_NEXT(ev, 800275970Scy ev_timeout_pos.ev_next_with_common_timeout); 801275970Scy if (!(ev->ev_flags & EVLIST_INTERNAL)) { 802275970Scy event_del(ev); 803275970Scy ++n_deleted; 804275970Scy } 805275970Scy ev = next; 806275970Scy } 807275970Scy mm_free(ctl); 808275970Scy } 809275970Scy if (base->common_timeout_queues) 810275970Scy mm_free(base->common_timeout_queues); 811275970Scy 812275970Scy for (i = 0; i < base->nactivequeues; ++i) { 813275970Scy struct event_callback *evcb, *next; 814275970Scy for (evcb = TAILQ_FIRST(&base->activequeues[i]); evcb; ) { 815275970Scy next = TAILQ_NEXT(evcb, evcb_active_next); 816275970Scy n_deleted += event_base_cancel_single_callback_(base, evcb, run_finalizers); 817275970Scy evcb = next; 818275970Scy } 819275970Scy } 820275970Scy { 821275970Scy struct event_callback *evcb; 822275970Scy while ((evcb = TAILQ_FIRST(&base->active_later_queue))) { 823275970Scy n_deleted += event_base_cancel_single_callback_(base, evcb, run_finalizers); 824275970Scy } 825275970Scy } 826275970Scy 827275970Scy 828275970Scy if (n_deleted) 829275970Scy event_debug(("%s: %d events were still set in base", 830275970Scy __func__, n_deleted)); 831275970Scy 832275970Scy while (LIST_FIRST(&base->once_events)) { 833275970Scy struct event_once *eonce = LIST_FIRST(&base->once_events); 834275970Scy LIST_REMOVE(eonce, next_once); 835275970Scy mm_free(eonce); 836275970Scy } 837275970Scy 838275970Scy if (base->evsel != NULL && base->evsel->dealloc != NULL) 839275970Scy base->evsel->dealloc(base); 840275970Scy 841275970Scy for (i = 0; i < base->nactivequeues; ++i) 842275970Scy EVUTIL_ASSERT(TAILQ_EMPTY(&base->activequeues[i])); 843275970Scy 844275970Scy EVUTIL_ASSERT(min_heap_empty_(&base->timeheap)); 845275970Scy min_heap_dtor_(&base->timeheap); 846275970Scy 847275970Scy mm_free(base->activequeues); 848275970Scy 849275970Scy evmap_io_clear_(&base->io); 850275970Scy evmap_signal_clear_(&base->sigmap); 851275970Scy event_changelist_freemem_(&base->changelist); 852275970Scy 853275970Scy EVTHREAD_FREE_LOCK(base->th_base_lock, 0); 854275970Scy EVTHREAD_FREE_COND(base->current_event_cond); 855275970Scy 856275970Scy /* If we're freeing current_base, there won't be a current_base. */ 857275970Scy if (base == current_base) 858275970Scy current_base = NULL; 859275970Scy mm_free(base); 860275970Scy} 861275970Scy 862275970Scyvoid 863275970Scyevent_base_free_nofinalize(struct event_base *base) 864275970Scy{ 865275970Scy event_base_free_(base, 0); 866275970Scy} 867275970Scy 868275970Scyvoid 869275970Scyevent_base_free(struct event_base *base) 870275970Scy{ 871275970Scy event_base_free_(base, 1); 872275970Scy} 873275970Scy 874275970Scy/* Fake eventop; used to disable the backend temporarily inside event_reinit 875275970Scy * so that we can call event_del() on an event without telling the backend. 876275970Scy */ 877275970Scystatic int 878275970Scynil_backend_del(struct event_base *b, evutil_socket_t fd, short old, 879275970Scy short events, void *fdinfo) 880275970Scy{ 881275970Scy return 0; 882275970Scy} 883275970Scyconst struct eventop nil_eventop = { 884275970Scy "nil", 885275970Scy NULL, /* init: unused. */ 886275970Scy NULL, /* add: unused. */ 887275970Scy nil_backend_del, /* del: used, so needs to be killed. */ 888275970Scy NULL, /* dispatch: unused. */ 889275970Scy NULL, /* dealloc: unused. */ 890275970Scy 0, 0, 0 891275970Scy}; 892275970Scy 893275970Scy/* reinitialize the event base after a fork */ 894275970Scyint 895275970Scyevent_reinit(struct event_base *base) 896275970Scy{ 897275970Scy const struct eventop *evsel; 898275970Scy int res = 0; 899275970Scy int was_notifiable = 0; 900275970Scy int had_signal_added = 0; 901275970Scy 902275970Scy EVBASE_ACQUIRE_LOCK(base, th_base_lock); 903275970Scy 904275970Scy evsel = base->evsel; 905275970Scy 906275970Scy /* check if this event mechanism requires reinit on the backend */ 907275970Scy if (evsel->need_reinit) { 908275970Scy /* We're going to call event_del() on our notify events (the 909275970Scy * ones that tell about signals and wakeup events). But we 910275970Scy * don't actually want to tell the backend to change its 911275970Scy * state, since it might still share some resource (a kqueue, 912275970Scy * an epoll fd) with the parent process, and we don't want to 913275970Scy * delete the fds from _that_ backend, we temporarily stub out 914275970Scy * the evsel with a replacement. 915275970Scy */ 916275970Scy base->evsel = &nil_eventop; 917275970Scy } 918275970Scy 919275970Scy /* We need to re-create a new signal-notification fd and a new 920275970Scy * thread-notification fd. Otherwise, we'll still share those with 921275970Scy * the parent process, which would make any notification sent to them 922275970Scy * get received by one or both of the event loops, more or less at 923275970Scy * random. 924275970Scy */ 925275970Scy if (base->sig.ev_signal_added) { 926275970Scy event_del_nolock_(&base->sig.ev_signal, EVENT_DEL_AUTOBLOCK); 927275970Scy event_debug_unassign(&base->sig.ev_signal); 928275970Scy memset(&base->sig.ev_signal, 0, sizeof(base->sig.ev_signal)); 929275970Scy if (base->sig.ev_signal_pair[0] != -1) 930275970Scy EVUTIL_CLOSESOCKET(base->sig.ev_signal_pair[0]); 931275970Scy if (base->sig.ev_signal_pair[1] != -1) 932275970Scy EVUTIL_CLOSESOCKET(base->sig.ev_signal_pair[1]); 933275970Scy had_signal_added = 1; 934275970Scy base->sig.ev_signal_added = 0; 935275970Scy } 936275970Scy if (base->th_notify_fn != NULL) { 937275970Scy was_notifiable = 1; 938275970Scy base->th_notify_fn = NULL; 939275970Scy } 940275970Scy if (base->th_notify_fd[0] != -1) { 941275970Scy event_del_nolock_(&base->th_notify, EVENT_DEL_AUTOBLOCK); 942275970Scy EVUTIL_CLOSESOCKET(base->th_notify_fd[0]); 943275970Scy if (base->th_notify_fd[1] != -1) 944275970Scy EVUTIL_CLOSESOCKET(base->th_notify_fd[1]); 945275970Scy base->th_notify_fd[0] = -1; 946275970Scy base->th_notify_fd[1] = -1; 947275970Scy event_debug_unassign(&base->th_notify); 948275970Scy } 949275970Scy 950275970Scy /* Replace the original evsel. */ 951275970Scy base->evsel = evsel; 952275970Scy 953275970Scy if (evsel->need_reinit) { 954275970Scy /* Reconstruct the backend through brute-force, so that we do 955275970Scy * not share any structures with the parent process. For some 956275970Scy * backends, this is necessary: epoll and kqueue, for 957275970Scy * instance, have events associated with a kernel 958275970Scy * structure. If didn't reinitialize, we'd share that 959275970Scy * structure with the parent process, and any changes made by 960275970Scy * the parent would affect our backend's behavior (and vice 961275970Scy * versa). 962275970Scy */ 963275970Scy if (base->evsel->dealloc != NULL) 964275970Scy base->evsel->dealloc(base); 965275970Scy base->evbase = evsel->init(base); 966275970Scy if (base->evbase == NULL) { 967275970Scy event_errx(1, 968275970Scy "%s: could not reinitialize event mechanism", 969275970Scy __func__); 970275970Scy res = -1; 971275970Scy goto done; 972275970Scy } 973275970Scy 974275970Scy /* Empty out the changelist (if any): we are starting from a 975275970Scy * blank slate. */ 976275970Scy event_changelist_freemem_(&base->changelist); 977275970Scy 978275970Scy /* Tell the event maps to re-inform the backend about all 979275970Scy * pending events. This will make the signal notification 980275970Scy * event get re-created if necessary. */ 981275970Scy if (evmap_reinit_(base) < 0) 982275970Scy res = -1; 983275970Scy } else { 984275970Scy if (had_signal_added) 985275970Scy res = evsig_init_(base); 986275970Scy } 987275970Scy 988275970Scy /* If we were notifiable before, and nothing just exploded, become 989275970Scy * notifiable again. */ 990275970Scy if (was_notifiable && res == 0) 991275970Scy res = evthread_make_base_notifiable_nolock_(base); 992275970Scy 993275970Scydone: 994275970Scy EVBASE_RELEASE_LOCK(base, th_base_lock); 995275970Scy return (res); 996275970Scy} 997275970Scy 998285612Sdelphij/* Get the monotonic time for this event_base' timer */ 999285612Sdelphijint 1000285612Sdelphijevent_gettime_monotonic(struct event_base *base, struct timeval *tv) 1001285612Sdelphij{ 1002285612Sdelphij int rv = -1; 1003285612Sdelphij 1004285612Sdelphij if (base && tv) { 1005285612Sdelphij EVBASE_ACQUIRE_LOCK(base, th_base_lock); 1006285612Sdelphij rv = evutil_gettime_monotonic_(&(base->monotonic_timer), tv); 1007285612Sdelphij EVBASE_RELEASE_LOCK(base, th_base_lock); 1008285612Sdelphij } 1009285612Sdelphij 1010285612Sdelphij return rv; 1011285612Sdelphij} 1012285612Sdelphij 1013275970Scyconst char ** 1014275970Scyevent_get_supported_methods(void) 1015275970Scy{ 1016275970Scy static const char **methods = NULL; 1017275970Scy const struct eventop **method; 1018275970Scy const char **tmp; 1019275970Scy int i = 0, k; 1020275970Scy 1021275970Scy /* count all methods */ 1022275970Scy for (method = &eventops[0]; *method != NULL; ++method) { 1023275970Scy ++i; 1024275970Scy } 1025275970Scy 1026275970Scy /* allocate one more than we need for the NULL pointer */ 1027275970Scy tmp = mm_calloc((i + 1), sizeof(char *)); 1028275970Scy if (tmp == NULL) 1029275970Scy return (NULL); 1030275970Scy 1031275970Scy /* populate the array with the supported methods */ 1032275970Scy for (k = 0, i = 0; eventops[k] != NULL; ++k) { 1033275970Scy tmp[i++] = eventops[k]->name; 1034275970Scy } 1035275970Scy tmp[i] = NULL; 1036275970Scy 1037275970Scy if (methods != NULL) 1038275970Scy mm_free((char**)methods); 1039275970Scy 1040275970Scy methods = tmp; 1041275970Scy 1042275970Scy return (methods); 1043275970Scy} 1044275970Scy 1045275970Scystruct event_config * 1046275970Scyevent_config_new(void) 1047275970Scy{ 1048275970Scy struct event_config *cfg = mm_calloc(1, sizeof(*cfg)); 1049275970Scy 1050275970Scy if (cfg == NULL) 1051275970Scy return (NULL); 1052275970Scy 1053275970Scy TAILQ_INIT(&cfg->entries); 1054275970Scy cfg->max_dispatch_interval.tv_sec = -1; 1055275970Scy cfg->max_dispatch_callbacks = INT_MAX; 1056275970Scy cfg->limit_callbacks_after_prio = 1; 1057275970Scy 1058275970Scy return (cfg); 1059275970Scy} 1060275970Scy 1061275970Scystatic void 1062275970Scyevent_config_entry_free(struct event_config_entry *entry) 1063275970Scy{ 1064275970Scy if (entry->avoid_method != NULL) 1065275970Scy mm_free((char *)entry->avoid_method); 1066275970Scy mm_free(entry); 1067275970Scy} 1068275970Scy 1069275970Scyvoid 1070275970Scyevent_config_free(struct event_config *cfg) 1071275970Scy{ 1072275970Scy struct event_config_entry *entry; 1073275970Scy 1074275970Scy while ((entry = TAILQ_FIRST(&cfg->entries)) != NULL) { 1075275970Scy TAILQ_REMOVE(&cfg->entries, entry, next); 1076275970Scy event_config_entry_free(entry); 1077275970Scy } 1078275970Scy mm_free(cfg); 1079275970Scy} 1080275970Scy 1081275970Scyint 1082275970Scyevent_config_set_flag(struct event_config *cfg, int flag) 1083275970Scy{ 1084275970Scy if (!cfg) 1085275970Scy return -1; 1086275970Scy cfg->flags |= flag; 1087275970Scy return 0; 1088275970Scy} 1089275970Scy 1090275970Scyint 1091275970Scyevent_config_avoid_method(struct event_config *cfg, const char *method) 1092275970Scy{ 1093275970Scy struct event_config_entry *entry = mm_malloc(sizeof(*entry)); 1094275970Scy if (entry == NULL) 1095275970Scy return (-1); 1096275970Scy 1097275970Scy if ((entry->avoid_method = mm_strdup(method)) == NULL) { 1098275970Scy mm_free(entry); 1099275970Scy return (-1); 1100275970Scy } 1101275970Scy 1102275970Scy TAILQ_INSERT_TAIL(&cfg->entries, entry, next); 1103275970Scy 1104275970Scy return (0); 1105275970Scy} 1106275970Scy 1107275970Scyint 1108275970Scyevent_config_require_features(struct event_config *cfg, 1109275970Scy int features) 1110275970Scy{ 1111275970Scy if (!cfg) 1112275970Scy return (-1); 1113275970Scy cfg->require_features = features; 1114275970Scy return (0); 1115275970Scy} 1116275970Scy 1117275970Scyint 1118275970Scyevent_config_set_num_cpus_hint(struct event_config *cfg, int cpus) 1119275970Scy{ 1120275970Scy if (!cfg) 1121275970Scy return (-1); 1122275970Scy cfg->n_cpus_hint = cpus; 1123275970Scy return (0); 1124275970Scy} 1125275970Scy 1126275970Scyint 1127275970Scyevent_config_set_max_dispatch_interval(struct event_config *cfg, 1128275970Scy const struct timeval *max_interval, int max_callbacks, int min_priority) 1129275970Scy{ 1130275970Scy if (max_interval) 1131275970Scy memcpy(&cfg->max_dispatch_interval, max_interval, 1132275970Scy sizeof(struct timeval)); 1133275970Scy else 1134275970Scy cfg->max_dispatch_interval.tv_sec = -1; 1135275970Scy cfg->max_dispatch_callbacks = 1136275970Scy max_callbacks >= 0 ? max_callbacks : INT_MAX; 1137275970Scy if (min_priority < 0) 1138275970Scy min_priority = 0; 1139275970Scy cfg->limit_callbacks_after_prio = min_priority; 1140275970Scy return (0); 1141275970Scy} 1142275970Scy 1143275970Scyint 1144275970Scyevent_priority_init(int npriorities) 1145275970Scy{ 1146275970Scy return event_base_priority_init(current_base, npriorities); 1147275970Scy} 1148275970Scy 1149275970Scyint 1150275970Scyevent_base_priority_init(struct event_base *base, int npriorities) 1151275970Scy{ 1152275970Scy int i, r; 1153275970Scy r = -1; 1154275970Scy 1155275970Scy EVBASE_ACQUIRE_LOCK(base, th_base_lock); 1156275970Scy 1157275970Scy if (N_ACTIVE_CALLBACKS(base) || npriorities < 1 1158275970Scy || npriorities >= EVENT_MAX_PRIORITIES) 1159275970Scy goto err; 1160275970Scy 1161275970Scy if (npriorities == base->nactivequeues) 1162275970Scy goto ok; 1163275970Scy 1164275970Scy if (base->nactivequeues) { 1165275970Scy mm_free(base->activequeues); 1166275970Scy base->nactivequeues = 0; 1167275970Scy } 1168275970Scy 1169275970Scy /* Allocate our priority queues */ 1170275970Scy base->activequeues = (struct evcallback_list *) 1171275970Scy mm_calloc(npriorities, sizeof(struct evcallback_list)); 1172275970Scy if (base->activequeues == NULL) { 1173275970Scy event_warn("%s: calloc", __func__); 1174275970Scy goto err; 1175275970Scy } 1176275970Scy base->nactivequeues = npriorities; 1177275970Scy 1178275970Scy for (i = 0; i < base->nactivequeues; ++i) { 1179275970Scy TAILQ_INIT(&base->activequeues[i]); 1180275970Scy } 1181275970Scy 1182275970Scyok: 1183275970Scy r = 0; 1184275970Scyerr: 1185275970Scy EVBASE_RELEASE_LOCK(base, th_base_lock); 1186275970Scy return (r); 1187275970Scy} 1188275970Scy 1189275970Scyint 1190275970Scyevent_base_get_npriorities(struct event_base *base) 1191275970Scy{ 1192275970Scy 1193275970Scy int n; 1194275970Scy if (base == NULL) 1195275970Scy base = current_base; 1196275970Scy 1197275970Scy EVBASE_ACQUIRE_LOCK(base, th_base_lock); 1198275970Scy n = base->nactivequeues; 1199275970Scy EVBASE_RELEASE_LOCK(base, th_base_lock); 1200275970Scy return (n); 1201275970Scy} 1202275970Scy 1203275970Scyint 1204275970Scyevent_base_get_num_events(struct event_base *base, unsigned int type) 1205275970Scy{ 1206275970Scy int r = 0; 1207275970Scy 1208275970Scy EVBASE_ACQUIRE_LOCK(base, th_base_lock); 1209275970Scy 1210275970Scy if (type & EVENT_BASE_COUNT_ACTIVE) 1211275970Scy r += base->event_count_active; 1212275970Scy 1213275970Scy if (type & EVENT_BASE_COUNT_VIRTUAL) 1214275970Scy r += base->virtual_event_count; 1215275970Scy 1216275970Scy if (type & EVENT_BASE_COUNT_ADDED) 1217275970Scy r += base->event_count; 1218275970Scy 1219275970Scy EVBASE_RELEASE_LOCK(base, th_base_lock); 1220275970Scy 1221275970Scy return r; 1222275970Scy} 1223275970Scy 1224275970Scyint 1225275970Scyevent_base_get_max_events(struct event_base *base, unsigned int type, int clear) 1226275970Scy{ 1227275970Scy int r = 0; 1228275970Scy 1229275970Scy EVBASE_ACQUIRE_LOCK(base, th_base_lock); 1230275970Scy 1231275970Scy if (type & EVENT_BASE_COUNT_ACTIVE) { 1232275970Scy r += base->event_count_active_max; 1233275970Scy if (clear) 1234275970Scy base->event_count_active_max = 0; 1235275970Scy } 1236275970Scy 1237275970Scy if (type & EVENT_BASE_COUNT_VIRTUAL) { 1238275970Scy r += base->virtual_event_count_max; 1239275970Scy if (clear) 1240275970Scy base->virtual_event_count_max = 0; 1241275970Scy } 1242275970Scy 1243275970Scy if (type & EVENT_BASE_COUNT_ADDED) { 1244275970Scy r += base->event_count_max; 1245275970Scy if (clear) 1246275970Scy base->event_count_max = 0; 1247275970Scy } 1248275970Scy 1249275970Scy EVBASE_RELEASE_LOCK(base, th_base_lock); 1250275970Scy 1251275970Scy return r; 1252275970Scy} 1253275970Scy 1254275970Scy/* Returns true iff we're currently watching any events. */ 1255275970Scystatic int 1256275970Scyevent_haveevents(struct event_base *base) 1257275970Scy{ 1258275970Scy /* Caller must hold th_base_lock */ 1259275970Scy return (base->virtual_event_count > 0 || base->event_count > 0); 1260275970Scy} 1261275970Scy 1262275970Scy/* "closure" function called when processing active signal events */ 1263275970Scystatic inline void 1264275970Scyevent_signal_closure(struct event_base *base, struct event *ev) 1265275970Scy{ 1266275970Scy short ncalls; 1267275970Scy int should_break; 1268275970Scy 1269275970Scy /* Allows deletes to work */ 1270275970Scy ncalls = ev->ev_ncalls; 1271275970Scy if (ncalls != 0) 1272275970Scy ev->ev_pncalls = &ncalls; 1273275970Scy EVBASE_RELEASE_LOCK(base, th_base_lock); 1274275970Scy while (ncalls) { 1275275970Scy ncalls--; 1276275970Scy ev->ev_ncalls = ncalls; 1277275970Scy if (ncalls == 0) 1278275970Scy ev->ev_pncalls = NULL; 1279275970Scy (*ev->ev_callback)(ev->ev_fd, ev->ev_res, ev->ev_arg); 1280275970Scy 1281275970Scy EVBASE_ACQUIRE_LOCK(base, th_base_lock); 1282275970Scy should_break = base->event_break; 1283275970Scy EVBASE_RELEASE_LOCK(base, th_base_lock); 1284275970Scy 1285275970Scy if (should_break) { 1286275970Scy if (ncalls != 0) 1287275970Scy ev->ev_pncalls = NULL; 1288275970Scy return; 1289275970Scy } 1290275970Scy } 1291275970Scy} 1292275970Scy 1293275970Scy/* Common timeouts are special timeouts that are handled as queues rather than 1294275970Scy * in the minheap. This is more efficient than the minheap if we happen to 1295275970Scy * know that we're going to get several thousands of timeout events all with 1296275970Scy * the same timeout value. 1297275970Scy * 1298275970Scy * Since all our timeout handling code assumes timevals can be copied, 1299275970Scy * assigned, etc, we can't use "magic pointer" to encode these common 1300275970Scy * timeouts. Searching through a list to see if every timeout is common could 1301275970Scy * also get inefficient. Instead, we take advantage of the fact that tv_usec 1302275970Scy * is 32 bits long, but only uses 20 of those bits (since it can never be over 1303275970Scy * 999999.) We use the top bits to encode 4 bites of magic number, and 8 bits 1304275970Scy * of index into the event_base's aray of common timeouts. 1305275970Scy */ 1306275970Scy 1307275970Scy#define MICROSECONDS_MASK COMMON_TIMEOUT_MICROSECONDS_MASK 1308275970Scy#define COMMON_TIMEOUT_IDX_MASK 0x0ff00000 1309275970Scy#define COMMON_TIMEOUT_IDX_SHIFT 20 1310275970Scy#define COMMON_TIMEOUT_MASK 0xf0000000 1311275970Scy#define COMMON_TIMEOUT_MAGIC 0x50000000 1312275970Scy 1313275970Scy#define COMMON_TIMEOUT_IDX(tv) \ 1314275970Scy (((tv)->tv_usec & COMMON_TIMEOUT_IDX_MASK)>>COMMON_TIMEOUT_IDX_SHIFT) 1315275970Scy 1316275970Scy/** Return true iff if 'tv' is a common timeout in 'base' */ 1317275970Scystatic inline int 1318275970Scyis_common_timeout(const struct timeval *tv, 1319275970Scy const struct event_base *base) 1320275970Scy{ 1321275970Scy int idx; 1322275970Scy if ((tv->tv_usec & COMMON_TIMEOUT_MASK) != COMMON_TIMEOUT_MAGIC) 1323275970Scy return 0; 1324275970Scy idx = COMMON_TIMEOUT_IDX(tv); 1325275970Scy return idx < base->n_common_timeouts; 1326275970Scy} 1327275970Scy 1328275970Scy/* True iff tv1 and tv2 have the same common-timeout index, or if neither 1329275970Scy * one is a common timeout. */ 1330275970Scystatic inline int 1331275970Scyis_same_common_timeout(const struct timeval *tv1, const struct timeval *tv2) 1332275970Scy{ 1333275970Scy return (tv1->tv_usec & ~MICROSECONDS_MASK) == 1334275970Scy (tv2->tv_usec & ~MICROSECONDS_MASK); 1335275970Scy} 1336275970Scy 1337275970Scy/** Requires that 'tv' is a common timeout. Return the corresponding 1338275970Scy * common_timeout_list. */ 1339275970Scystatic inline struct common_timeout_list * 1340275970Scyget_common_timeout_list(struct event_base *base, const struct timeval *tv) 1341275970Scy{ 1342275970Scy return base->common_timeout_queues[COMMON_TIMEOUT_IDX(tv)]; 1343275970Scy} 1344275970Scy 1345275970Scy#if 0 1346275970Scystatic inline int 1347275970Scycommon_timeout_ok(const struct timeval *tv, 1348275970Scy struct event_base *base) 1349275970Scy{ 1350275970Scy const struct timeval *expect = 1351275970Scy &get_common_timeout_list(base, tv)->duration; 1352275970Scy return tv->tv_sec == expect->tv_sec && 1353275970Scy tv->tv_usec == expect->tv_usec; 1354275970Scy} 1355275970Scy#endif 1356275970Scy 1357275970Scy/* Add the timeout for the first event in given common timeout list to the 1358275970Scy * event_base's minheap. */ 1359275970Scystatic void 1360275970Scycommon_timeout_schedule(struct common_timeout_list *ctl, 1361275970Scy const struct timeval *now, struct event *head) 1362275970Scy{ 1363275970Scy struct timeval timeout = head->ev_timeout; 1364275970Scy timeout.tv_usec &= MICROSECONDS_MASK; 1365275970Scy event_add_nolock_(&ctl->timeout_event, &timeout, 1); 1366275970Scy} 1367275970Scy 1368275970Scy/* Callback: invoked when the timeout for a common timeout queue triggers. 1369275970Scy * This means that (at least) the first event in that queue should be run, 1370275970Scy * and the timeout should be rescheduled if there are more events. */ 1371275970Scystatic void 1372275970Scycommon_timeout_callback(evutil_socket_t fd, short what, void *arg) 1373275970Scy{ 1374275970Scy struct timeval now; 1375275970Scy struct common_timeout_list *ctl = arg; 1376275970Scy struct event_base *base = ctl->base; 1377275970Scy struct event *ev = NULL; 1378275970Scy EVBASE_ACQUIRE_LOCK(base, th_base_lock); 1379275970Scy gettime(base, &now); 1380275970Scy while (1) { 1381275970Scy ev = TAILQ_FIRST(&ctl->events); 1382275970Scy if (!ev || ev->ev_timeout.tv_sec > now.tv_sec || 1383275970Scy (ev->ev_timeout.tv_sec == now.tv_sec && 1384275970Scy (ev->ev_timeout.tv_usec&MICROSECONDS_MASK) > now.tv_usec)) 1385275970Scy break; 1386275970Scy event_del_nolock_(ev, EVENT_DEL_NOBLOCK); 1387275970Scy event_active_nolock_(ev, EV_TIMEOUT, 1); 1388275970Scy } 1389275970Scy if (ev) 1390275970Scy common_timeout_schedule(ctl, &now, ev); 1391275970Scy EVBASE_RELEASE_LOCK(base, th_base_lock); 1392275970Scy} 1393275970Scy 1394275970Scy#define MAX_COMMON_TIMEOUTS 256 1395275970Scy 1396275970Scyconst struct timeval * 1397275970Scyevent_base_init_common_timeout(struct event_base *base, 1398275970Scy const struct timeval *duration) 1399275970Scy{ 1400275970Scy int i; 1401275970Scy struct timeval tv; 1402275970Scy const struct timeval *result=NULL; 1403275970Scy struct common_timeout_list *new_ctl; 1404275970Scy 1405275970Scy EVBASE_ACQUIRE_LOCK(base, th_base_lock); 1406275970Scy if (duration->tv_usec > 1000000) { 1407275970Scy memcpy(&tv, duration, sizeof(struct timeval)); 1408275970Scy if (is_common_timeout(duration, base)) 1409275970Scy tv.tv_usec &= MICROSECONDS_MASK; 1410275970Scy tv.tv_sec += tv.tv_usec / 1000000; 1411275970Scy tv.tv_usec %= 1000000; 1412275970Scy duration = &tv; 1413275970Scy } 1414275970Scy for (i = 0; i < base->n_common_timeouts; ++i) { 1415275970Scy const struct common_timeout_list *ctl = 1416275970Scy base->common_timeout_queues[i]; 1417275970Scy if (duration->tv_sec == ctl->duration.tv_sec && 1418275970Scy duration->tv_usec == 1419275970Scy (ctl->duration.tv_usec & MICROSECONDS_MASK)) { 1420275970Scy EVUTIL_ASSERT(is_common_timeout(&ctl->duration, base)); 1421275970Scy result = &ctl->duration; 1422275970Scy goto done; 1423275970Scy } 1424275970Scy } 1425275970Scy if (base->n_common_timeouts == MAX_COMMON_TIMEOUTS) { 1426275970Scy event_warnx("%s: Too many common timeouts already in use; " 1427275970Scy "we only support %d per event_base", __func__, 1428275970Scy MAX_COMMON_TIMEOUTS); 1429275970Scy goto done; 1430275970Scy } 1431275970Scy if (base->n_common_timeouts_allocated == base->n_common_timeouts) { 1432275970Scy int n = base->n_common_timeouts < 16 ? 16 : 1433275970Scy base->n_common_timeouts*2; 1434275970Scy struct common_timeout_list **newqueues = 1435275970Scy mm_realloc(base->common_timeout_queues, 1436275970Scy n*sizeof(struct common_timeout_queue *)); 1437275970Scy if (!newqueues) { 1438275970Scy event_warn("%s: realloc",__func__); 1439275970Scy goto done; 1440275970Scy } 1441275970Scy base->n_common_timeouts_allocated = n; 1442275970Scy base->common_timeout_queues = newqueues; 1443275970Scy } 1444275970Scy new_ctl = mm_calloc(1, sizeof(struct common_timeout_list)); 1445275970Scy if (!new_ctl) { 1446275970Scy event_warn("%s: calloc",__func__); 1447275970Scy goto done; 1448275970Scy } 1449275970Scy TAILQ_INIT(&new_ctl->events); 1450275970Scy new_ctl->duration.tv_sec = duration->tv_sec; 1451275970Scy new_ctl->duration.tv_usec = 1452275970Scy duration->tv_usec | COMMON_TIMEOUT_MAGIC | 1453275970Scy (base->n_common_timeouts << COMMON_TIMEOUT_IDX_SHIFT); 1454275970Scy evtimer_assign(&new_ctl->timeout_event, base, 1455275970Scy common_timeout_callback, new_ctl); 1456275970Scy new_ctl->timeout_event.ev_flags |= EVLIST_INTERNAL; 1457275970Scy event_priority_set(&new_ctl->timeout_event, 0); 1458275970Scy new_ctl->base = base; 1459275970Scy base->common_timeout_queues[base->n_common_timeouts++] = new_ctl; 1460275970Scy result = &new_ctl->duration; 1461275970Scy 1462275970Scydone: 1463275970Scy if (result) 1464275970Scy EVUTIL_ASSERT(is_common_timeout(result, base)); 1465275970Scy 1466275970Scy EVBASE_RELEASE_LOCK(base, th_base_lock); 1467275970Scy return result; 1468275970Scy} 1469275970Scy 1470275970Scy/* Closure function invoked when we're activating a persistent event. */ 1471275970Scystatic inline void 1472275970Scyevent_persist_closure(struct event_base *base, struct event *ev) 1473275970Scy{ 1474275970Scy void (*evcb_callback)(evutil_socket_t, short, void *); 1475275970Scy 1476285612Sdelphij // Other fields of *ev that must be stored before executing 1477285612Sdelphij evutil_socket_t evcb_fd; 1478285612Sdelphij short evcb_res; 1479285612Sdelphij void *evcb_arg; 1480285612Sdelphij 1481275970Scy /* reschedule the persistent event if we have a timeout. */ 1482275970Scy if (ev->ev_io_timeout.tv_sec || ev->ev_io_timeout.tv_usec) { 1483275970Scy /* If there was a timeout, we want it to run at an interval of 1484275970Scy * ev_io_timeout after the last time it was _scheduled_ for, 1485275970Scy * not ev_io_timeout after _now_. If it fired for another 1486275970Scy * reason, though, the timeout ought to start ticking _now_. */ 1487275970Scy struct timeval run_at, relative_to, delay, now; 1488275970Scy ev_uint32_t usec_mask = 0; 1489275970Scy EVUTIL_ASSERT(is_same_common_timeout(&ev->ev_timeout, 1490275970Scy &ev->ev_io_timeout)); 1491275970Scy gettime(base, &now); 1492275970Scy if (is_common_timeout(&ev->ev_timeout, base)) { 1493275970Scy delay = ev->ev_io_timeout; 1494275970Scy usec_mask = delay.tv_usec & ~MICROSECONDS_MASK; 1495275970Scy delay.tv_usec &= MICROSECONDS_MASK; 1496275970Scy if (ev->ev_res & EV_TIMEOUT) { 1497275970Scy relative_to = ev->ev_timeout; 1498275970Scy relative_to.tv_usec &= MICROSECONDS_MASK; 1499275970Scy } else { 1500275970Scy relative_to = now; 1501275970Scy } 1502275970Scy } else { 1503275970Scy delay = ev->ev_io_timeout; 1504275970Scy if (ev->ev_res & EV_TIMEOUT) { 1505275970Scy relative_to = ev->ev_timeout; 1506275970Scy } else { 1507275970Scy relative_to = now; 1508275970Scy } 1509275970Scy } 1510275970Scy evutil_timeradd(&relative_to, &delay, &run_at); 1511275970Scy if (evutil_timercmp(&run_at, &now, <)) { 1512275970Scy /* Looks like we missed at least one invocation due to 1513275970Scy * a clock jump, not running the event loop for a 1514275970Scy * while, really slow callbacks, or 1515275970Scy * something. Reschedule relative to now. 1516275970Scy */ 1517275970Scy evutil_timeradd(&now, &delay, &run_at); 1518275970Scy } 1519275970Scy run_at.tv_usec |= usec_mask; 1520275970Scy event_add_nolock_(ev, &run_at, 1); 1521275970Scy } 1522275970Scy 1523275970Scy // Save our callback before we release the lock 1524285612Sdelphij evcb_callback = ev->ev_callback; 1525285612Sdelphij evcb_fd = ev->ev_fd; 1526285612Sdelphij evcb_res = ev->ev_res; 1527285612Sdelphij evcb_arg = ev->ev_arg; 1528275970Scy 1529275970Scy // Release the lock 1530275970Scy EVBASE_RELEASE_LOCK(base, th_base_lock); 1531275970Scy 1532275970Scy // Execute the callback 1533285612Sdelphij (evcb_callback)(evcb_fd, evcb_res, evcb_arg); 1534275970Scy} 1535275970Scy 1536275970Scy/* 1537275970Scy Helper for event_process_active to process all the events in a single queue, 1538275970Scy releasing the lock as we go. This function requires that the lock be held 1539275970Scy when it's invoked. Returns -1 if we get a signal or an event_break that 1540275970Scy means we should stop processing any active events now. Otherwise returns 1541275970Scy the number of non-internal event_callbacks that we processed. 1542275970Scy*/ 1543275970Scystatic int 1544275970Scyevent_process_active_single_queue(struct event_base *base, 1545275970Scy struct evcallback_list *activeq, 1546275970Scy int max_to_process, const struct timeval *endtime) 1547275970Scy{ 1548275970Scy struct event_callback *evcb; 1549275970Scy int count = 0; 1550275970Scy 1551275970Scy EVUTIL_ASSERT(activeq != NULL); 1552275970Scy 1553275970Scy for (evcb = TAILQ_FIRST(activeq); evcb; evcb = TAILQ_FIRST(activeq)) { 1554275970Scy struct event *ev=NULL; 1555275970Scy if (evcb->evcb_flags & EVLIST_INIT) { 1556275970Scy ev = event_callback_to_event(evcb); 1557275970Scy 1558275970Scy if (ev->ev_events & EV_PERSIST || ev->ev_flags & EVLIST_FINALIZING) 1559275970Scy event_queue_remove_active(base, evcb); 1560275970Scy else 1561275970Scy event_del_nolock_(ev, EVENT_DEL_NOBLOCK); 1562275970Scy event_debug(( 1563275970Scy "event_process_active: event: %p, %s%s%scall %p", 1564275970Scy ev, 1565275970Scy ev->ev_res & EV_READ ? "EV_READ " : " ", 1566275970Scy ev->ev_res & EV_WRITE ? "EV_WRITE " : " ", 1567275970Scy ev->ev_res & EV_CLOSED ? "EV_CLOSED " : " ", 1568275970Scy ev->ev_callback)); 1569275970Scy } else { 1570275970Scy event_queue_remove_active(base, evcb); 1571275970Scy event_debug(("event_process_active: event_callback %p, " 1572275970Scy "closure %d, call %p", 1573275970Scy evcb, evcb->evcb_closure, evcb->evcb_cb_union.evcb_callback)); 1574275970Scy } 1575275970Scy 1576275970Scy if (!(evcb->evcb_flags & EVLIST_INTERNAL)) 1577275970Scy ++count; 1578275970Scy 1579275970Scy 1580275970Scy base->current_event = evcb; 1581275970Scy#ifndef EVENT__DISABLE_THREAD_SUPPORT 1582275970Scy base->current_event_waiters = 0; 1583275970Scy#endif 1584275970Scy 1585275970Scy switch (evcb->evcb_closure) { 1586275970Scy case EV_CLOSURE_EVENT_SIGNAL: 1587275970Scy EVUTIL_ASSERT(ev != NULL); 1588275970Scy event_signal_closure(base, ev); 1589275970Scy break; 1590275970Scy case EV_CLOSURE_EVENT_PERSIST: 1591275970Scy EVUTIL_ASSERT(ev != NULL); 1592275970Scy event_persist_closure(base, ev); 1593275970Scy break; 1594275970Scy case EV_CLOSURE_EVENT: { 1595285612Sdelphij void (*evcb_callback)(evutil_socket_t, short, void *); 1596275970Scy EVUTIL_ASSERT(ev != NULL); 1597285612Sdelphij evcb_callback = *ev->ev_callback; 1598275970Scy EVBASE_RELEASE_LOCK(base, th_base_lock); 1599275970Scy evcb_callback(ev->ev_fd, ev->ev_res, ev->ev_arg); 1600275970Scy } 1601275970Scy break; 1602275970Scy case EV_CLOSURE_CB_SELF: { 1603275970Scy void (*evcb_selfcb)(struct event_callback *, void *) = evcb->evcb_cb_union.evcb_selfcb; 1604275970Scy EVBASE_RELEASE_LOCK(base, th_base_lock); 1605275970Scy evcb_selfcb(evcb, evcb->evcb_arg); 1606275970Scy } 1607275970Scy break; 1608275970Scy case EV_CLOSURE_EVENT_FINALIZE: 1609275970Scy case EV_CLOSURE_EVENT_FINALIZE_FREE: { 1610285612Sdelphij void (*evcb_evfinalize)(struct event *, void *); 1611285612Sdelphij int evcb_closure = evcb->evcb_closure; 1612275970Scy EVUTIL_ASSERT(ev != NULL); 1613275970Scy base->current_event = NULL; 1614285612Sdelphij evcb_evfinalize = ev->ev_evcallback.evcb_cb_union.evcb_evfinalize; 1615275970Scy EVUTIL_ASSERT((evcb->evcb_flags & EVLIST_FINALIZING)); 1616275970Scy EVBASE_RELEASE_LOCK(base, th_base_lock); 1617275970Scy evcb_evfinalize(ev, ev->ev_arg); 1618275970Scy event_debug_note_teardown_(ev); 1619285612Sdelphij if (evcb_closure == EV_CLOSURE_EVENT_FINALIZE_FREE) 1620275970Scy mm_free(ev); 1621275970Scy } 1622275970Scy break; 1623275970Scy case EV_CLOSURE_CB_FINALIZE: { 1624275970Scy void (*evcb_cbfinalize)(struct event_callback *, void *) = evcb->evcb_cb_union.evcb_cbfinalize; 1625275970Scy base->current_event = NULL; 1626275970Scy EVUTIL_ASSERT((evcb->evcb_flags & EVLIST_FINALIZING)); 1627275970Scy EVBASE_RELEASE_LOCK(base, th_base_lock); 1628275970Scy evcb_cbfinalize(evcb, evcb->evcb_arg); 1629275970Scy } 1630275970Scy break; 1631275970Scy default: 1632275970Scy EVUTIL_ASSERT(0); 1633275970Scy } 1634275970Scy 1635275970Scy EVBASE_ACQUIRE_LOCK(base, th_base_lock); 1636275970Scy base->current_event = NULL; 1637275970Scy#ifndef EVENT__DISABLE_THREAD_SUPPORT 1638275970Scy if (base->current_event_waiters) { 1639275970Scy base->current_event_waiters = 0; 1640275970Scy EVTHREAD_COND_BROADCAST(base->current_event_cond); 1641275970Scy } 1642275970Scy#endif 1643275970Scy 1644275970Scy if (base->event_break) 1645275970Scy return -1; 1646275970Scy if (count >= max_to_process) 1647275970Scy return count; 1648275970Scy if (count && endtime) { 1649275970Scy struct timeval now; 1650275970Scy update_time_cache(base); 1651275970Scy gettime(base, &now); 1652275970Scy if (evutil_timercmp(&now, endtime, >=)) 1653275970Scy return count; 1654275970Scy } 1655275970Scy if (base->event_continue) 1656275970Scy break; 1657275970Scy } 1658275970Scy return count; 1659275970Scy} 1660275970Scy 1661275970Scy/* 1662275970Scy * Active events are stored in priority queues. Lower priorities are always 1663275970Scy * process before higher priorities. Low priority events can starve high 1664275970Scy * priority ones. 1665275970Scy */ 1666275970Scy 1667275970Scystatic int 1668275970Scyevent_process_active(struct event_base *base) 1669275970Scy{ 1670275970Scy /* Caller must hold th_base_lock */ 1671275970Scy struct evcallback_list *activeq = NULL; 1672275970Scy int i, c = 0; 1673275970Scy const struct timeval *endtime; 1674275970Scy struct timeval tv; 1675275970Scy const int maxcb = base->max_dispatch_callbacks; 1676275970Scy const int limit_after_prio = base->limit_callbacks_after_prio; 1677275970Scy if (base->max_dispatch_time.tv_sec >= 0) { 1678275970Scy update_time_cache(base); 1679275970Scy gettime(base, &tv); 1680275970Scy evutil_timeradd(&base->max_dispatch_time, &tv, &tv); 1681275970Scy endtime = &tv; 1682275970Scy } else { 1683275970Scy endtime = NULL; 1684275970Scy } 1685275970Scy 1686275970Scy for (i = 0; i < base->nactivequeues; ++i) { 1687275970Scy if (TAILQ_FIRST(&base->activequeues[i]) != NULL) { 1688275970Scy base->event_running_priority = i; 1689275970Scy activeq = &base->activequeues[i]; 1690275970Scy if (i < limit_after_prio) 1691275970Scy c = event_process_active_single_queue(base, activeq, 1692275970Scy INT_MAX, NULL); 1693275970Scy else 1694275970Scy c = event_process_active_single_queue(base, activeq, 1695275970Scy maxcb, endtime); 1696275970Scy if (c < 0) { 1697275970Scy goto done; 1698275970Scy } else if (c > 0) 1699275970Scy break; /* Processed a real event; do not 1700275970Scy * consider lower-priority events */ 1701275970Scy /* If we get here, all of the events we processed 1702275970Scy * were internal. Continue. */ 1703275970Scy } 1704275970Scy } 1705275970Scy 1706275970Scydone: 1707275970Scy base->event_running_priority = -1; 1708275970Scy 1709275970Scy return c; 1710275970Scy} 1711275970Scy 1712275970Scy/* 1713275970Scy * Wait continuously for events. We exit only if no events are left. 1714275970Scy */ 1715275970Scy 1716275970Scyint 1717275970Scyevent_dispatch(void) 1718275970Scy{ 1719275970Scy return (event_loop(0)); 1720275970Scy} 1721275970Scy 1722275970Scyint 1723275970Scyevent_base_dispatch(struct event_base *event_base) 1724275970Scy{ 1725275970Scy return (event_base_loop(event_base, 0)); 1726275970Scy} 1727275970Scy 1728275970Scyconst char * 1729275970Scyevent_base_get_method(const struct event_base *base) 1730275970Scy{ 1731275970Scy EVUTIL_ASSERT(base); 1732275970Scy return (base->evsel->name); 1733275970Scy} 1734275970Scy 1735275970Scy/** Callback: used to implement event_base_loopexit by telling the event_base 1736275970Scy * that it's time to exit its loop. */ 1737275970Scystatic void 1738275970Scyevent_loopexit_cb(evutil_socket_t fd, short what, void *arg) 1739275970Scy{ 1740275970Scy struct event_base *base = arg; 1741275970Scy base->event_gotterm = 1; 1742275970Scy} 1743275970Scy 1744275970Scyint 1745275970Scyevent_loopexit(const struct timeval *tv) 1746275970Scy{ 1747275970Scy return (event_once(-1, EV_TIMEOUT, event_loopexit_cb, 1748275970Scy current_base, tv)); 1749275970Scy} 1750275970Scy 1751275970Scyint 1752275970Scyevent_base_loopexit(struct event_base *event_base, const struct timeval *tv) 1753275970Scy{ 1754275970Scy return (event_base_once(event_base, -1, EV_TIMEOUT, event_loopexit_cb, 1755275970Scy event_base, tv)); 1756275970Scy} 1757275970Scy 1758275970Scyint 1759275970Scyevent_loopbreak(void) 1760275970Scy{ 1761275970Scy return (event_base_loopbreak(current_base)); 1762275970Scy} 1763275970Scy 1764275970Scyint 1765275970Scyevent_base_loopbreak(struct event_base *event_base) 1766275970Scy{ 1767275970Scy int r = 0; 1768275970Scy if (event_base == NULL) 1769275970Scy return (-1); 1770275970Scy 1771275970Scy EVBASE_ACQUIRE_LOCK(event_base, th_base_lock); 1772275970Scy event_base->event_break = 1; 1773275970Scy 1774275970Scy if (EVBASE_NEED_NOTIFY(event_base)) { 1775275970Scy r = evthread_notify_base(event_base); 1776275970Scy } else { 1777275970Scy r = (0); 1778275970Scy } 1779275970Scy EVBASE_RELEASE_LOCK(event_base, th_base_lock); 1780275970Scy return r; 1781275970Scy} 1782275970Scy 1783275970Scyint 1784275970Scyevent_base_loopcontinue(struct event_base *event_base) 1785275970Scy{ 1786275970Scy int r = 0; 1787275970Scy if (event_base == NULL) 1788275970Scy return (-1); 1789275970Scy 1790275970Scy EVBASE_ACQUIRE_LOCK(event_base, th_base_lock); 1791275970Scy event_base->event_continue = 1; 1792275970Scy 1793275970Scy if (EVBASE_NEED_NOTIFY(event_base)) { 1794275970Scy r = evthread_notify_base(event_base); 1795275970Scy } else { 1796275970Scy r = (0); 1797275970Scy } 1798275970Scy EVBASE_RELEASE_LOCK(event_base, th_base_lock); 1799275970Scy return r; 1800275970Scy} 1801275970Scy 1802275970Scyint 1803275970Scyevent_base_got_break(struct event_base *event_base) 1804275970Scy{ 1805275970Scy int res; 1806275970Scy EVBASE_ACQUIRE_LOCK(event_base, th_base_lock); 1807275970Scy res = event_base->event_break; 1808275970Scy EVBASE_RELEASE_LOCK(event_base, th_base_lock); 1809275970Scy return res; 1810275970Scy} 1811275970Scy 1812275970Scyint 1813275970Scyevent_base_got_exit(struct event_base *event_base) 1814275970Scy{ 1815275970Scy int res; 1816275970Scy EVBASE_ACQUIRE_LOCK(event_base, th_base_lock); 1817275970Scy res = event_base->event_gotterm; 1818275970Scy EVBASE_RELEASE_LOCK(event_base, th_base_lock); 1819275970Scy return res; 1820275970Scy} 1821275970Scy 1822275970Scy/* not thread safe */ 1823275970Scy 1824275970Scyint 1825275970Scyevent_loop(int flags) 1826275970Scy{ 1827275970Scy return event_base_loop(current_base, flags); 1828275970Scy} 1829275970Scy 1830275970Scyint 1831275970Scyevent_base_loop(struct event_base *base, int flags) 1832275970Scy{ 1833275970Scy const struct eventop *evsel = base->evsel; 1834275970Scy struct timeval tv; 1835275970Scy struct timeval *tv_p; 1836275970Scy int res, done, retval = 0; 1837275970Scy 1838275970Scy /* Grab the lock. We will release it inside evsel.dispatch, and again 1839275970Scy * as we invoke user callbacks. */ 1840275970Scy EVBASE_ACQUIRE_LOCK(base, th_base_lock); 1841275970Scy 1842275970Scy if (base->running_loop) { 1843275970Scy event_warnx("%s: reentrant invocation. Only one event_base_loop" 1844275970Scy " can run on each event_base at once.", __func__); 1845275970Scy EVBASE_RELEASE_LOCK(base, th_base_lock); 1846275970Scy return -1; 1847275970Scy } 1848275970Scy 1849275970Scy base->running_loop = 1; 1850275970Scy 1851275970Scy clear_time_cache(base); 1852275970Scy 1853275970Scy if (base->sig.ev_signal_added && base->sig.ev_n_signals_added) 1854275970Scy evsig_set_base_(base); 1855275970Scy 1856275970Scy done = 0; 1857275970Scy 1858275970Scy#ifndef EVENT__DISABLE_THREAD_SUPPORT 1859275970Scy base->th_owner_id = EVTHREAD_GET_ID(); 1860275970Scy#endif 1861275970Scy 1862275970Scy base->event_gotterm = base->event_break = 0; 1863275970Scy 1864275970Scy while (!done) { 1865275970Scy base->event_continue = 0; 1866275970Scy base->n_deferreds_queued = 0; 1867275970Scy 1868275970Scy /* Terminate the loop if we have been asked to */ 1869275970Scy if (base->event_gotterm) { 1870275970Scy break; 1871275970Scy } 1872275970Scy 1873275970Scy if (base->event_break) { 1874275970Scy break; 1875275970Scy } 1876275970Scy 1877275970Scy tv_p = &tv; 1878275970Scy if (!N_ACTIVE_CALLBACKS(base) && !(flags & EVLOOP_NONBLOCK)) { 1879275970Scy timeout_next(base, &tv_p); 1880275970Scy } else { 1881275970Scy /* 1882275970Scy * if we have active events, we just poll new events 1883275970Scy * without waiting. 1884275970Scy */ 1885275970Scy evutil_timerclear(&tv); 1886275970Scy } 1887275970Scy 1888275970Scy /* If we have no events, we just exit */ 1889275970Scy if (0==(flags&EVLOOP_NO_EXIT_ON_EMPTY) && 1890275970Scy !event_haveevents(base) && !N_ACTIVE_CALLBACKS(base)) { 1891275970Scy event_debug(("%s: no events registered.", __func__)); 1892275970Scy retval = 1; 1893275970Scy goto done; 1894275970Scy } 1895275970Scy 1896275970Scy event_queue_make_later_events_active(base); 1897275970Scy 1898275970Scy clear_time_cache(base); 1899275970Scy 1900275970Scy res = evsel->dispatch(base, tv_p); 1901275970Scy 1902275970Scy if (res == -1) { 1903275970Scy event_debug(("%s: dispatch returned unsuccessfully.", 1904275970Scy __func__)); 1905275970Scy retval = -1; 1906275970Scy goto done; 1907275970Scy } 1908275970Scy 1909275970Scy update_time_cache(base); 1910275970Scy 1911275970Scy timeout_process(base); 1912275970Scy 1913275970Scy if (N_ACTIVE_CALLBACKS(base)) { 1914275970Scy int n = event_process_active(base); 1915275970Scy if ((flags & EVLOOP_ONCE) 1916275970Scy && N_ACTIVE_CALLBACKS(base) == 0 1917275970Scy && n != 0) 1918275970Scy done = 1; 1919275970Scy } else if (flags & EVLOOP_NONBLOCK) 1920275970Scy done = 1; 1921275970Scy } 1922275970Scy event_debug(("%s: asked to terminate loop.", __func__)); 1923275970Scy 1924275970Scydone: 1925275970Scy clear_time_cache(base); 1926275970Scy base->running_loop = 0; 1927275970Scy 1928275970Scy EVBASE_RELEASE_LOCK(base, th_base_lock); 1929275970Scy 1930275970Scy return (retval); 1931275970Scy} 1932275970Scy 1933275970Scy/* One-time callback to implement event_base_once: invokes the user callback, 1934275970Scy * then deletes the allocated storage */ 1935275970Scystatic void 1936275970Scyevent_once_cb(evutil_socket_t fd, short events, void *arg) 1937275970Scy{ 1938275970Scy struct event_once *eonce = arg; 1939275970Scy 1940275970Scy (*eonce->cb)(fd, events, eonce->arg); 1941275970Scy EVBASE_ACQUIRE_LOCK(eonce->ev.ev_base, th_base_lock); 1942275970Scy LIST_REMOVE(eonce, next_once); 1943275970Scy EVBASE_RELEASE_LOCK(eonce->ev.ev_base, th_base_lock); 1944275970Scy event_debug_unassign(&eonce->ev); 1945275970Scy mm_free(eonce); 1946275970Scy} 1947275970Scy 1948275970Scy/* not threadsafe, event scheduled once. */ 1949275970Scyint 1950275970Scyevent_once(evutil_socket_t fd, short events, 1951275970Scy void (*callback)(evutil_socket_t, short, void *), 1952275970Scy void *arg, const struct timeval *tv) 1953275970Scy{ 1954275970Scy return event_base_once(current_base, fd, events, callback, arg, tv); 1955275970Scy} 1956275970Scy 1957275970Scy/* Schedules an event once */ 1958275970Scyint 1959275970Scyevent_base_once(struct event_base *base, evutil_socket_t fd, short events, 1960275970Scy void (*callback)(evutil_socket_t, short, void *), 1961275970Scy void *arg, const struct timeval *tv) 1962275970Scy{ 1963275970Scy struct event_once *eonce; 1964275970Scy int res = 0; 1965275970Scy int activate = 0; 1966275970Scy 1967275970Scy /* We cannot support signals that just fire once, or persistent 1968275970Scy * events. */ 1969275970Scy if (events & (EV_SIGNAL|EV_PERSIST)) 1970275970Scy return (-1); 1971275970Scy 1972275970Scy if ((eonce = mm_calloc(1, sizeof(struct event_once))) == NULL) 1973275970Scy return (-1); 1974275970Scy 1975275970Scy eonce->cb = callback; 1976275970Scy eonce->arg = arg; 1977275970Scy 1978275970Scy if ((events & (EV_TIMEOUT|EV_SIGNAL|EV_READ|EV_WRITE|EV_CLOSED)) == EV_TIMEOUT) { 1979275970Scy evtimer_assign(&eonce->ev, base, event_once_cb, eonce); 1980275970Scy 1981275970Scy if (tv == NULL || ! evutil_timerisset(tv)) { 1982275970Scy /* If the event is going to become active immediately, 1983275970Scy * don't put it on the timeout queue. This is one 1984275970Scy * idiom for scheduling a callback, so let's make 1985275970Scy * it fast (and order-preserving). */ 1986275970Scy activate = 1; 1987275970Scy } 1988275970Scy } else if (events & (EV_READ|EV_WRITE|EV_CLOSED)) { 1989275970Scy events &= EV_READ|EV_WRITE|EV_CLOSED; 1990275970Scy 1991275970Scy event_assign(&eonce->ev, base, fd, events, event_once_cb, eonce); 1992275970Scy } else { 1993275970Scy /* Bad event combination */ 1994275970Scy mm_free(eonce); 1995275970Scy return (-1); 1996275970Scy } 1997275970Scy 1998275970Scy if (res == 0) { 1999275970Scy EVBASE_ACQUIRE_LOCK(base, th_base_lock); 2000275970Scy if (activate) 2001275970Scy event_active_nolock_(&eonce->ev, EV_TIMEOUT, 1); 2002275970Scy else 2003275970Scy res = event_add_nolock_(&eonce->ev, tv, 0); 2004275970Scy 2005275970Scy if (res != 0) { 2006275970Scy mm_free(eonce); 2007275970Scy return (res); 2008275970Scy } else { 2009275970Scy LIST_INSERT_HEAD(&base->once_events, eonce, next_once); 2010275970Scy } 2011275970Scy EVBASE_RELEASE_LOCK(base, th_base_lock); 2012275970Scy } 2013275970Scy 2014275970Scy return (0); 2015275970Scy} 2016275970Scy 2017275970Scyint 2018275970Scyevent_assign(struct event *ev, struct event_base *base, evutil_socket_t fd, short events, void (*callback)(evutil_socket_t, short, void *), void *arg) 2019275970Scy{ 2020275970Scy if (!base) 2021275970Scy base = current_base; 2022275970Scy if (arg == &event_self_cbarg_ptr_) 2023275970Scy arg = ev; 2024275970Scy 2025275970Scy event_debug_assert_not_added_(ev); 2026275970Scy 2027275970Scy ev->ev_base = base; 2028275970Scy 2029275970Scy ev->ev_callback = callback; 2030275970Scy ev->ev_arg = arg; 2031275970Scy ev->ev_fd = fd; 2032275970Scy ev->ev_events = events; 2033275970Scy ev->ev_res = 0; 2034275970Scy ev->ev_flags = EVLIST_INIT; 2035275970Scy ev->ev_ncalls = 0; 2036275970Scy ev->ev_pncalls = NULL; 2037275970Scy 2038275970Scy if (events & EV_SIGNAL) { 2039275970Scy if ((events & (EV_READ|EV_WRITE|EV_CLOSED)) != 0) { 2040275970Scy event_warnx("%s: EV_SIGNAL is not compatible with " 2041275970Scy "EV_READ, EV_WRITE or EV_CLOSED", __func__); 2042275970Scy return -1; 2043275970Scy } 2044275970Scy ev->ev_closure = EV_CLOSURE_EVENT_SIGNAL; 2045275970Scy } else { 2046275970Scy if (events & EV_PERSIST) { 2047275970Scy evutil_timerclear(&ev->ev_io_timeout); 2048275970Scy ev->ev_closure = EV_CLOSURE_EVENT_PERSIST; 2049275970Scy } else { 2050275970Scy ev->ev_closure = EV_CLOSURE_EVENT; 2051275970Scy } 2052275970Scy } 2053275970Scy 2054275970Scy min_heap_elem_init_(ev); 2055275970Scy 2056275970Scy if (base != NULL) { 2057275970Scy /* by default, we put new events into the middle priority */ 2058275970Scy ev->ev_pri = base->nactivequeues / 2; 2059275970Scy } 2060275970Scy 2061275970Scy event_debug_note_setup_(ev); 2062275970Scy 2063275970Scy return 0; 2064275970Scy} 2065275970Scy 2066275970Scyint 2067275970Scyevent_base_set(struct event_base *base, struct event *ev) 2068275970Scy{ 2069275970Scy /* Only innocent events may be assigned to a different base */ 2070275970Scy if (ev->ev_flags != EVLIST_INIT) 2071275970Scy return (-1); 2072275970Scy 2073275970Scy event_debug_assert_is_setup_(ev); 2074275970Scy 2075275970Scy ev->ev_base = base; 2076275970Scy ev->ev_pri = base->nactivequeues/2; 2077275970Scy 2078275970Scy return (0); 2079275970Scy} 2080275970Scy 2081275970Scyvoid 2082275970Scyevent_set(struct event *ev, evutil_socket_t fd, short events, 2083275970Scy void (*callback)(evutil_socket_t, short, void *), void *arg) 2084275970Scy{ 2085275970Scy int r; 2086275970Scy r = event_assign(ev, current_base, fd, events, callback, arg); 2087275970Scy EVUTIL_ASSERT(r == 0); 2088275970Scy} 2089275970Scy 2090275970Scyvoid * 2091275970Scyevent_self_cbarg(void) 2092275970Scy{ 2093275970Scy return &event_self_cbarg_ptr_; 2094275970Scy} 2095275970Scy 2096275970Scystruct event * 2097275970Scyevent_base_get_running_event(struct event_base *base) 2098275970Scy{ 2099275970Scy struct event *ev = NULL; 2100275970Scy EVBASE_ACQUIRE_LOCK(base, th_base_lock); 2101275970Scy if (EVBASE_IN_THREAD(base)) { 2102275970Scy struct event_callback *evcb = base->current_event; 2103275970Scy if (evcb->evcb_flags & EVLIST_INIT) 2104275970Scy ev = event_callback_to_event(evcb); 2105275970Scy } 2106275970Scy EVBASE_RELEASE_LOCK(base, th_base_lock); 2107275970Scy return ev; 2108275970Scy} 2109275970Scy 2110275970Scystruct event * 2111275970Scyevent_new(struct event_base *base, evutil_socket_t fd, short events, void (*cb)(evutil_socket_t, short, void *), void *arg) 2112275970Scy{ 2113275970Scy struct event *ev; 2114275970Scy ev = mm_malloc(sizeof(struct event)); 2115275970Scy if (ev == NULL) 2116275970Scy return (NULL); 2117275970Scy if (event_assign(ev, base, fd, events, cb, arg) < 0) { 2118275970Scy mm_free(ev); 2119275970Scy return (NULL); 2120275970Scy } 2121275970Scy 2122275970Scy return (ev); 2123275970Scy} 2124275970Scy 2125275970Scyvoid 2126275970Scyevent_free(struct event *ev) 2127275970Scy{ 2128275970Scy /* This is disabled, so that events which have been finalized be a 2129275970Scy * valid target for event_free(). That's */ 2130275970Scy // event_debug_assert_is_setup_(ev); 2131275970Scy 2132275970Scy /* make sure that this event won't be coming back to haunt us. */ 2133275970Scy event_del(ev); 2134275970Scy event_debug_note_teardown_(ev); 2135275970Scy mm_free(ev); 2136275970Scy 2137275970Scy} 2138275970Scy 2139275970Scyvoid 2140275970Scyevent_debug_unassign(struct event *ev) 2141275970Scy{ 2142275970Scy event_debug_assert_not_added_(ev); 2143275970Scy event_debug_note_teardown_(ev); 2144275970Scy 2145275970Scy ev->ev_flags &= ~EVLIST_INIT; 2146275970Scy} 2147275970Scy 2148275970Scy#define EVENT_FINALIZE_FREE_ 0x10000 2149275970Scystatic int 2150275970Scyevent_finalize_nolock_(struct event_base *base, unsigned flags, struct event *ev, event_finalize_callback_fn cb) 2151275970Scy{ 2152275970Scy ev_uint8_t closure = (flags & EVENT_FINALIZE_FREE_) ? 2153275970Scy EV_CLOSURE_EVENT_FINALIZE_FREE : EV_CLOSURE_EVENT_FINALIZE; 2154275970Scy 2155275970Scy event_del_nolock_(ev, EVENT_DEL_NOBLOCK); 2156275970Scy ev->ev_closure = closure; 2157275970Scy ev->ev_evcallback.evcb_cb_union.evcb_evfinalize = cb; 2158275970Scy event_active_nolock_(ev, EV_FINALIZE, 1); 2159275970Scy ev->ev_flags |= EVLIST_FINALIZING; 2160275970Scy return 0; 2161275970Scy} 2162275970Scy 2163275970Scystatic int 2164275970Scyevent_finalize_impl_(unsigned flags, struct event *ev, event_finalize_callback_fn cb) 2165275970Scy{ 2166275970Scy int r; 2167275970Scy struct event_base *base = ev->ev_base; 2168275970Scy if (EVUTIL_FAILURE_CHECK(!base)) { 2169275970Scy event_warnx("%s: event has no event_base set.", __func__); 2170275970Scy return -1; 2171275970Scy } 2172275970Scy 2173275970Scy EVBASE_ACQUIRE_LOCK(base, th_base_lock); 2174275970Scy r = event_finalize_nolock_(base, flags, ev, cb); 2175275970Scy EVBASE_RELEASE_LOCK(base, th_base_lock); 2176275970Scy return r; 2177275970Scy} 2178275970Scy 2179275970Scyint 2180275970Scyevent_finalize(unsigned flags, struct event *ev, event_finalize_callback_fn cb) 2181275970Scy{ 2182275970Scy return event_finalize_impl_(flags, ev, cb); 2183275970Scy} 2184275970Scy 2185275970Scyint 2186275970Scyevent_free_finalize(unsigned flags, struct event *ev, event_finalize_callback_fn cb) 2187275970Scy{ 2188275970Scy return event_finalize_impl_(flags|EVENT_FINALIZE_FREE_, ev, cb); 2189275970Scy} 2190275970Scy 2191275970Scyvoid 2192275970Scyevent_callback_finalize_nolock_(struct event_base *base, unsigned flags, struct event_callback *evcb, void (*cb)(struct event_callback *, void *)) 2193275970Scy{ 2194275970Scy struct event *ev = NULL; 2195275970Scy if (evcb->evcb_flags & EVLIST_INIT) { 2196275970Scy ev = event_callback_to_event(evcb); 2197275970Scy event_del_nolock_(ev, EVENT_DEL_NOBLOCK); 2198275970Scy } else { 2199275970Scy event_callback_cancel_nolock_(base, evcb, 0); /*XXX can this fail?*/ 2200275970Scy } 2201275970Scy 2202275970Scy evcb->evcb_closure = EV_CLOSURE_CB_FINALIZE; 2203275970Scy evcb->evcb_cb_union.evcb_cbfinalize = cb; 2204275970Scy event_callback_activate_nolock_(base, evcb); /* XXX can this really fail?*/ 2205275970Scy evcb->evcb_flags |= EVLIST_FINALIZING; 2206275970Scy} 2207275970Scy 2208275970Scyvoid 2209275970Scyevent_callback_finalize_(struct event_base *base, unsigned flags, struct event_callback *evcb, void (*cb)(struct event_callback *, void *)) 2210275970Scy{ 2211275970Scy EVBASE_ACQUIRE_LOCK(base, th_base_lock); 2212275970Scy event_callback_finalize_nolock_(base, flags, evcb, cb); 2213275970Scy EVBASE_RELEASE_LOCK(base, th_base_lock); 2214275970Scy} 2215275970Scy 2216275970Scy/** Internal: Finalize all of the n_cbs callbacks in evcbs. The provided 2217275970Scy * callback will be invoked on *one of them*, after they have *all* been 2218275970Scy * finalized. */ 2219275970Scyint 2220275970Scyevent_callback_finalize_many_(struct event_base *base, int n_cbs, struct event_callback **evcbs, void (*cb)(struct event_callback *, void *)) 2221275970Scy{ 2222275970Scy int n_pending = 0, i; 2223275970Scy 2224275970Scy if (base == NULL) 2225275970Scy base = current_base; 2226275970Scy 2227275970Scy EVBASE_ACQUIRE_LOCK(base, th_base_lock); 2228275970Scy 2229275970Scy event_debug(("%s: %d events finalizing", __func__, n_cbs)); 2230275970Scy 2231275970Scy /* At most one can be currently executing; the rest we just 2232275970Scy * cancel... But we always make sure that the finalize callback 2233275970Scy * runs. */ 2234275970Scy for (i = 0; i < n_cbs; ++i) { 2235275970Scy struct event_callback *evcb = evcbs[i]; 2236275970Scy if (evcb == base->current_event) { 2237275970Scy event_callback_finalize_nolock_(base, 0, evcb, cb); 2238275970Scy ++n_pending; 2239275970Scy } else { 2240275970Scy event_callback_cancel_nolock_(base, evcb, 0); 2241275970Scy } 2242275970Scy } 2243275970Scy 2244275970Scy if (n_pending == 0) { 2245275970Scy /* Just do the first one. */ 2246275970Scy event_callback_finalize_nolock_(base, 0, evcbs[0], cb); 2247275970Scy } 2248275970Scy 2249275970Scy EVBASE_RELEASE_LOCK(base, th_base_lock); 2250275970Scy return 0; 2251275970Scy} 2252275970Scy 2253275970Scy/* 2254275970Scy * Set's the priority of an event - if an event is already scheduled 2255275970Scy * changing the priority is going to fail. 2256275970Scy */ 2257275970Scy 2258275970Scyint 2259275970Scyevent_priority_set(struct event *ev, int pri) 2260275970Scy{ 2261275970Scy event_debug_assert_is_setup_(ev); 2262275970Scy 2263275970Scy if (ev->ev_flags & EVLIST_ACTIVE) 2264275970Scy return (-1); 2265275970Scy if (pri < 0 || pri >= ev->ev_base->nactivequeues) 2266275970Scy return (-1); 2267275970Scy 2268275970Scy ev->ev_pri = pri; 2269275970Scy 2270275970Scy return (0); 2271275970Scy} 2272275970Scy 2273275970Scy/* 2274275970Scy * Checks if a specific event is pending or scheduled. 2275275970Scy */ 2276275970Scy 2277275970Scyint 2278275970Scyevent_pending(const struct event *ev, short event, struct timeval *tv) 2279275970Scy{ 2280275970Scy int flags = 0; 2281275970Scy 2282275970Scy if (EVUTIL_FAILURE_CHECK(ev->ev_base == NULL)) { 2283275970Scy event_warnx("%s: event has no event_base set.", __func__); 2284275970Scy return 0; 2285275970Scy } 2286275970Scy 2287275970Scy EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock); 2288275970Scy event_debug_assert_is_setup_(ev); 2289275970Scy 2290275970Scy if (ev->ev_flags & EVLIST_INSERTED) 2291275970Scy flags |= (ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED|EV_SIGNAL)); 2292275970Scy if (ev->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) 2293275970Scy flags |= ev->ev_res; 2294275970Scy if (ev->ev_flags & EVLIST_TIMEOUT) 2295275970Scy flags |= EV_TIMEOUT; 2296275970Scy 2297275970Scy event &= (EV_TIMEOUT|EV_READ|EV_WRITE|EV_CLOSED|EV_SIGNAL); 2298275970Scy 2299275970Scy /* See if there is a timeout that we should report */ 2300275970Scy if (tv != NULL && (flags & event & EV_TIMEOUT)) { 2301275970Scy struct timeval tmp = ev->ev_timeout; 2302275970Scy tmp.tv_usec &= MICROSECONDS_MASK; 2303275970Scy /* correctly remamp to real time */ 2304275970Scy evutil_timeradd(&ev->ev_base->tv_clock_diff, &tmp, tv); 2305275970Scy } 2306275970Scy 2307275970Scy EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock); 2308275970Scy 2309275970Scy return (flags & event); 2310275970Scy} 2311275970Scy 2312275970Scyint 2313275970Scyevent_initialized(const struct event *ev) 2314275970Scy{ 2315275970Scy if (!(ev->ev_flags & EVLIST_INIT)) 2316275970Scy return 0; 2317275970Scy 2318275970Scy return 1; 2319275970Scy} 2320275970Scy 2321275970Scyvoid 2322275970Scyevent_get_assignment(const struct event *event, struct event_base **base_out, evutil_socket_t *fd_out, short *events_out, event_callback_fn *callback_out, void **arg_out) 2323275970Scy{ 2324275970Scy event_debug_assert_is_setup_(event); 2325275970Scy 2326275970Scy if (base_out) 2327275970Scy *base_out = event->ev_base; 2328275970Scy if (fd_out) 2329275970Scy *fd_out = event->ev_fd; 2330275970Scy if (events_out) 2331275970Scy *events_out = event->ev_events; 2332275970Scy if (callback_out) 2333275970Scy *callback_out = event->ev_callback; 2334275970Scy if (arg_out) 2335275970Scy *arg_out = event->ev_arg; 2336275970Scy} 2337275970Scy 2338275970Scysize_t 2339275970Scyevent_get_struct_event_size(void) 2340275970Scy{ 2341275970Scy return sizeof(struct event); 2342275970Scy} 2343275970Scy 2344275970Scyevutil_socket_t 2345275970Scyevent_get_fd(const struct event *ev) 2346275970Scy{ 2347275970Scy event_debug_assert_is_setup_(ev); 2348275970Scy return ev->ev_fd; 2349275970Scy} 2350275970Scy 2351275970Scystruct event_base * 2352275970Scyevent_get_base(const struct event *ev) 2353275970Scy{ 2354275970Scy event_debug_assert_is_setup_(ev); 2355275970Scy return ev->ev_base; 2356275970Scy} 2357275970Scy 2358275970Scyshort 2359275970Scyevent_get_events(const struct event *ev) 2360275970Scy{ 2361275970Scy event_debug_assert_is_setup_(ev); 2362275970Scy return ev->ev_events; 2363275970Scy} 2364275970Scy 2365275970Scyevent_callback_fn 2366275970Scyevent_get_callback(const struct event *ev) 2367275970Scy{ 2368275970Scy event_debug_assert_is_setup_(ev); 2369275970Scy return ev->ev_callback; 2370275970Scy} 2371275970Scy 2372275970Scyvoid * 2373275970Scyevent_get_callback_arg(const struct event *ev) 2374275970Scy{ 2375275970Scy event_debug_assert_is_setup_(ev); 2376275970Scy return ev->ev_arg; 2377275970Scy} 2378275970Scy 2379275970Scyint 2380275970Scyevent_get_priority(const struct event *ev) 2381275970Scy{ 2382275970Scy event_debug_assert_is_setup_(ev); 2383275970Scy return ev->ev_pri; 2384275970Scy} 2385275970Scy 2386275970Scyint 2387275970Scyevent_add(struct event *ev, const struct timeval *tv) 2388275970Scy{ 2389275970Scy int res; 2390275970Scy 2391275970Scy if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) { 2392275970Scy event_warnx("%s: event has no event_base set.", __func__); 2393275970Scy return -1; 2394275970Scy } 2395275970Scy 2396275970Scy EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock); 2397275970Scy 2398275970Scy res = event_add_nolock_(ev, tv, 0); 2399275970Scy 2400275970Scy EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock); 2401275970Scy 2402275970Scy return (res); 2403275970Scy} 2404275970Scy 2405275970Scy/* Helper callback: wake an event_base from another thread. This version 2406275970Scy * works by writing a byte to one end of a socketpair, so that the event_base 2407275970Scy * listening on the other end will wake up as the corresponding event 2408275970Scy * triggers */ 2409275970Scystatic int 2410275970Scyevthread_notify_base_default(struct event_base *base) 2411275970Scy{ 2412275970Scy char buf[1]; 2413275970Scy int r; 2414275970Scy buf[0] = (char) 0; 2415275970Scy#ifdef _WIN32 2416275970Scy r = send(base->th_notify_fd[1], buf, 1, 0); 2417275970Scy#else 2418275970Scy r = write(base->th_notify_fd[1], buf, 1); 2419275970Scy#endif 2420275970Scy return (r < 0 && ! EVUTIL_ERR_IS_EAGAIN(errno)) ? -1 : 0; 2421275970Scy} 2422275970Scy 2423275970Scy#ifdef EVENT__HAVE_EVENTFD 2424275970Scy/* Helper callback: wake an event_base from another thread. This version 2425275970Scy * assumes that you have a working eventfd() implementation. */ 2426275970Scystatic int 2427275970Scyevthread_notify_base_eventfd(struct event_base *base) 2428275970Scy{ 2429275970Scy ev_uint64_t msg = 1; 2430275970Scy int r; 2431275970Scy do { 2432275970Scy r = write(base->th_notify_fd[0], (void*) &msg, sizeof(msg)); 2433275970Scy } while (r < 0 && errno == EAGAIN); 2434275970Scy 2435275970Scy return (r < 0) ? -1 : 0; 2436275970Scy} 2437275970Scy#endif 2438275970Scy 2439275970Scy 2440275970Scy/** Tell the thread currently running the event_loop for base (if any) that it 2441275970Scy * needs to stop waiting in its dispatch function (if it is) and process all 2442275970Scy * active callbacks. */ 2443275970Scystatic int 2444275970Scyevthread_notify_base(struct event_base *base) 2445275970Scy{ 2446275970Scy EVENT_BASE_ASSERT_LOCKED(base); 2447275970Scy if (!base->th_notify_fn) 2448275970Scy return -1; 2449275970Scy if (base->is_notify_pending) 2450275970Scy return 0; 2451275970Scy base->is_notify_pending = 1; 2452275970Scy return base->th_notify_fn(base); 2453275970Scy} 2454275970Scy 2455275970Scy/* Implementation function to remove a timeout on a currently pending event. 2456275970Scy */ 2457275970Scyint 2458275970Scyevent_remove_timer_nolock_(struct event *ev) 2459275970Scy{ 2460275970Scy struct event_base *base = ev->ev_base; 2461275970Scy 2462275970Scy EVENT_BASE_ASSERT_LOCKED(base); 2463275970Scy event_debug_assert_is_setup_(ev); 2464275970Scy 2465275970Scy event_debug(("event_remove_timer_nolock: event: %p", ev)); 2466275970Scy 2467275970Scy /* If it's not pending on a timeout, we don't need to do anything. */ 2468275970Scy if (ev->ev_flags & EVLIST_TIMEOUT) { 2469275970Scy event_queue_remove_timeout(base, ev); 2470275970Scy evutil_timerclear(&ev->ev_.ev_io.ev_timeout); 2471275970Scy } 2472275970Scy 2473275970Scy return (0); 2474275970Scy} 2475275970Scy 2476275970Scyint 2477275970Scyevent_remove_timer(struct event *ev) 2478275970Scy{ 2479275970Scy int res; 2480275970Scy 2481275970Scy if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) { 2482275970Scy event_warnx("%s: event has no event_base set.", __func__); 2483275970Scy return -1; 2484275970Scy } 2485275970Scy 2486275970Scy EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock); 2487275970Scy 2488275970Scy res = event_remove_timer_nolock_(ev); 2489275970Scy 2490275970Scy EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock); 2491275970Scy 2492275970Scy return (res); 2493275970Scy} 2494275970Scy 2495275970Scy/* Implementation function to add an event. Works just like event_add, 2496275970Scy * except: 1) it requires that we have the lock. 2) if tv_is_absolute is set, 2497275970Scy * we treat tv as an absolute time, not as an interval to add to the current 2498275970Scy * time */ 2499275970Scyint 2500275970Scyevent_add_nolock_(struct event *ev, const struct timeval *tv, 2501275970Scy int tv_is_absolute) 2502275970Scy{ 2503275970Scy struct event_base *base = ev->ev_base; 2504275970Scy int res = 0; 2505275970Scy int notify = 0; 2506275970Scy 2507275970Scy EVENT_BASE_ASSERT_LOCKED(base); 2508275970Scy event_debug_assert_is_setup_(ev); 2509275970Scy 2510275970Scy event_debug(( 2511275970Scy "event_add: event: %p (fd "EV_SOCK_FMT"), %s%s%s%scall %p", 2512275970Scy ev, 2513275970Scy EV_SOCK_ARG(ev->ev_fd), 2514275970Scy ev->ev_events & EV_READ ? "EV_READ " : " ", 2515275970Scy ev->ev_events & EV_WRITE ? "EV_WRITE " : " ", 2516275970Scy ev->ev_events & EV_CLOSED ? "EV_CLOSED " : " ", 2517275970Scy tv ? "EV_TIMEOUT " : " ", 2518275970Scy ev->ev_callback)); 2519275970Scy 2520275970Scy EVUTIL_ASSERT(!(ev->ev_flags & ~EVLIST_ALL)); 2521275970Scy 2522275970Scy if (ev->ev_flags & EVLIST_FINALIZING) { 2523275970Scy /* XXXX debug */ 2524275970Scy return (-1); 2525275970Scy } 2526275970Scy 2527275970Scy /* 2528275970Scy * prepare for timeout insertion further below, if we get a 2529275970Scy * failure on any step, we should not change any state. 2530275970Scy */ 2531275970Scy if (tv != NULL && !(ev->ev_flags & EVLIST_TIMEOUT)) { 2532275970Scy if (min_heap_reserve_(&base->timeheap, 2533275970Scy 1 + min_heap_size_(&base->timeheap)) == -1) 2534275970Scy return (-1); /* ENOMEM == errno */ 2535275970Scy } 2536275970Scy 2537275970Scy /* If the main thread is currently executing a signal event's 2538275970Scy * callback, and we are not the main thread, then we want to wait 2539275970Scy * until the callback is done before we mess with the event, or else 2540275970Scy * we can race on ev_ncalls and ev_pncalls below. */ 2541275970Scy#ifndef EVENT__DISABLE_THREAD_SUPPORT 2542275970Scy if (base->current_event == event_to_event_callback(ev) && 2543275970Scy (ev->ev_events & EV_SIGNAL) 2544275970Scy && !EVBASE_IN_THREAD(base)) { 2545275970Scy ++base->current_event_waiters; 2546275970Scy EVTHREAD_COND_WAIT(base->current_event_cond, base->th_base_lock); 2547275970Scy } 2548275970Scy#endif 2549275970Scy 2550275970Scy if ((ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED|EV_SIGNAL)) && 2551275970Scy !(ev->ev_flags & (EVLIST_INSERTED|EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))) { 2552275970Scy if (ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED)) 2553275970Scy res = evmap_io_add_(base, ev->ev_fd, ev); 2554275970Scy else if (ev->ev_events & EV_SIGNAL) 2555275970Scy res = evmap_signal_add_(base, (int)ev->ev_fd, ev); 2556275970Scy if (res != -1) 2557275970Scy event_queue_insert_inserted(base, ev); 2558275970Scy if (res == 1) { 2559275970Scy /* evmap says we need to notify the main thread. */ 2560275970Scy notify = 1; 2561275970Scy res = 0; 2562275970Scy } 2563275970Scy } 2564275970Scy 2565275970Scy /* 2566275970Scy * we should change the timeout state only if the previous event 2567275970Scy * addition succeeded. 2568275970Scy */ 2569275970Scy if (res != -1 && tv != NULL) { 2570275970Scy struct timeval now; 2571275970Scy int common_timeout; 2572275970Scy#ifdef USE_REINSERT_TIMEOUT 2573275970Scy int was_common; 2574275970Scy int old_timeout_idx; 2575275970Scy#endif 2576275970Scy 2577275970Scy /* 2578275970Scy * for persistent timeout events, we remember the 2579275970Scy * timeout value and re-add the event. 2580275970Scy * 2581275970Scy * If tv_is_absolute, this was already set. 2582275970Scy */ 2583275970Scy if (ev->ev_closure == EV_CLOSURE_EVENT_PERSIST && !tv_is_absolute) 2584275970Scy ev->ev_io_timeout = *tv; 2585275970Scy 2586275970Scy#ifndef USE_REINSERT_TIMEOUT 2587275970Scy if (ev->ev_flags & EVLIST_TIMEOUT) { 2588275970Scy event_queue_remove_timeout(base, ev); 2589275970Scy } 2590275970Scy#endif 2591275970Scy 2592275970Scy /* Check if it is active due to a timeout. Rescheduling 2593275970Scy * this timeout before the callback can be executed 2594275970Scy * removes it from the active list. */ 2595275970Scy if ((ev->ev_flags & EVLIST_ACTIVE) && 2596275970Scy (ev->ev_res & EV_TIMEOUT)) { 2597275970Scy if (ev->ev_events & EV_SIGNAL) { 2598275970Scy /* See if we are just active executing 2599275970Scy * this event in a loop 2600275970Scy */ 2601275970Scy if (ev->ev_ncalls && ev->ev_pncalls) { 2602275970Scy /* Abort loop */ 2603275970Scy *ev->ev_pncalls = 0; 2604275970Scy } 2605275970Scy } 2606275970Scy 2607275970Scy event_queue_remove_active(base, event_to_event_callback(ev)); 2608275970Scy } 2609275970Scy 2610275970Scy gettime(base, &now); 2611275970Scy 2612275970Scy common_timeout = is_common_timeout(tv, base); 2613275970Scy#ifdef USE_REINSERT_TIMEOUT 2614275970Scy was_common = is_common_timeout(&ev->ev_timeout, base); 2615275970Scy old_timeout_idx = COMMON_TIMEOUT_IDX(&ev->ev_timeout); 2616275970Scy#endif 2617275970Scy 2618275970Scy if (tv_is_absolute) { 2619275970Scy ev->ev_timeout = *tv; 2620275970Scy } else if (common_timeout) { 2621275970Scy struct timeval tmp = *tv; 2622275970Scy tmp.tv_usec &= MICROSECONDS_MASK; 2623275970Scy evutil_timeradd(&now, &tmp, &ev->ev_timeout); 2624275970Scy ev->ev_timeout.tv_usec |= 2625275970Scy (tv->tv_usec & ~MICROSECONDS_MASK); 2626275970Scy } else { 2627275970Scy evutil_timeradd(&now, tv, &ev->ev_timeout); 2628275970Scy } 2629275970Scy 2630275970Scy event_debug(( 2631275970Scy "event_add: event %p, timeout in %d seconds %d useconds, call %p", 2632275970Scy ev, (int)tv->tv_sec, (int)tv->tv_usec, ev->ev_callback)); 2633275970Scy 2634275970Scy#ifdef USE_REINSERT_TIMEOUT 2635275970Scy event_queue_reinsert_timeout(base, ev, was_common, common_timeout, old_timeout_idx); 2636275970Scy#else 2637275970Scy event_queue_insert_timeout(base, ev); 2638275970Scy#endif 2639275970Scy 2640275970Scy if (common_timeout) { 2641275970Scy struct common_timeout_list *ctl = 2642275970Scy get_common_timeout_list(base, &ev->ev_timeout); 2643275970Scy if (ev == TAILQ_FIRST(&ctl->events)) { 2644275970Scy common_timeout_schedule(ctl, &now, ev); 2645275970Scy } 2646275970Scy } else { 2647275970Scy struct event* top = NULL; 2648275970Scy /* See if the earliest timeout is now earlier than it 2649275970Scy * was before: if so, we will need to tell the main 2650275970Scy * thread to wake up earlier than it would otherwise. 2651275970Scy * We double check the timeout of the top element to 2652275970Scy * handle time distortions due to system suspension. 2653275970Scy */ 2654275970Scy if (min_heap_elt_is_top_(ev)) 2655275970Scy notify = 1; 2656275970Scy else if ((top = min_heap_top_(&base->timeheap)) != NULL && 2657275970Scy evutil_timercmp(&top->ev_timeout, &now, <)) 2658275970Scy notify = 1; 2659275970Scy } 2660275970Scy } 2661275970Scy 2662275970Scy /* if we are not in the right thread, we need to wake up the loop */ 2663275970Scy if (res != -1 && notify && EVBASE_NEED_NOTIFY(base)) 2664275970Scy evthread_notify_base(base); 2665275970Scy 2666275970Scy event_debug_note_add_(ev); 2667275970Scy 2668275970Scy return (res); 2669275970Scy} 2670275970Scy 2671275970Scystatic int 2672275970Scyevent_del_(struct event *ev, int blocking) 2673275970Scy{ 2674275970Scy int res; 2675275970Scy 2676275970Scy if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) { 2677275970Scy event_warnx("%s: event has no event_base set.", __func__); 2678275970Scy return -1; 2679275970Scy } 2680275970Scy 2681275970Scy EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock); 2682275970Scy 2683275970Scy res = event_del_nolock_(ev, blocking); 2684275970Scy 2685275970Scy EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock); 2686275970Scy 2687275970Scy return (res); 2688275970Scy} 2689275970Scy 2690275970Scyint 2691275970Scyevent_del(struct event *ev) 2692275970Scy{ 2693275970Scy return event_del_(ev, EVENT_DEL_AUTOBLOCK); 2694275970Scy} 2695275970Scy 2696275970Scyint 2697275970Scyevent_del_block(struct event *ev) 2698275970Scy{ 2699275970Scy return event_del_(ev, EVENT_DEL_BLOCK); 2700275970Scy} 2701275970Scy 2702275970Scyint 2703275970Scyevent_del_noblock(struct event *ev) 2704275970Scy{ 2705275970Scy return event_del_(ev, EVENT_DEL_NOBLOCK); 2706275970Scy} 2707275970Scy 2708275970Scy/** Helper for event_del: always called with th_base_lock held. 2709275970Scy * 2710275970Scy * "blocking" must be one of the EVENT_DEL_{BLOCK, NOBLOCK, AUTOBLOCK, 2711275970Scy * EVEN_IF_FINALIZING} values. See those for more information. 2712275970Scy */ 2713275970Scyint 2714275970Scyevent_del_nolock_(struct event *ev, int blocking) 2715275970Scy{ 2716275970Scy struct event_base *base; 2717275970Scy int res = 0, notify = 0; 2718275970Scy 2719275970Scy event_debug(("event_del: %p (fd "EV_SOCK_FMT"), callback %p", 2720275970Scy ev, EV_SOCK_ARG(ev->ev_fd), ev->ev_callback)); 2721275970Scy 2722275970Scy /* An event without a base has not been added */ 2723275970Scy if (ev->ev_base == NULL) 2724275970Scy return (-1); 2725275970Scy 2726275970Scy EVENT_BASE_ASSERT_LOCKED(ev->ev_base); 2727275970Scy 2728275970Scy if (blocking != EVENT_DEL_EVEN_IF_FINALIZING) { 2729275970Scy if (ev->ev_flags & EVLIST_FINALIZING) { 2730275970Scy /* XXXX Debug */ 2731275970Scy return 0; 2732275970Scy } 2733275970Scy } 2734275970Scy 2735275970Scy /* If the main thread is currently executing this event's callback, 2736275970Scy * and we are not the main thread, then we want to wait until the 2737275970Scy * callback is done before we start removing the event. That way, 2738275970Scy * when this function returns, it will be safe to free the 2739275970Scy * user-supplied argument. */ 2740275970Scy base = ev->ev_base; 2741275970Scy#ifndef EVENT__DISABLE_THREAD_SUPPORT 2742275970Scy if (blocking != EVENT_DEL_NOBLOCK && 2743275970Scy base->current_event == event_to_event_callback(ev) && 2744275970Scy !EVBASE_IN_THREAD(base) && 2745275970Scy (blocking == EVENT_DEL_BLOCK || !(ev->ev_events & EV_FINALIZE))) { 2746275970Scy ++base->current_event_waiters; 2747275970Scy EVTHREAD_COND_WAIT(base->current_event_cond, base->th_base_lock); 2748275970Scy } 2749275970Scy#endif 2750275970Scy 2751275970Scy EVUTIL_ASSERT(!(ev->ev_flags & ~EVLIST_ALL)); 2752275970Scy 2753275970Scy /* See if we are just active executing this event in a loop */ 2754275970Scy if (ev->ev_events & EV_SIGNAL) { 2755275970Scy if (ev->ev_ncalls && ev->ev_pncalls) { 2756275970Scy /* Abort loop */ 2757275970Scy *ev->ev_pncalls = 0; 2758275970Scy } 2759275970Scy } 2760275970Scy 2761275970Scy if (ev->ev_flags & EVLIST_TIMEOUT) { 2762275970Scy /* NOTE: We never need to notify the main thread because of a 2763275970Scy * deleted timeout event: all that could happen if we don't is 2764275970Scy * that the dispatch loop might wake up too early. But the 2765275970Scy * point of notifying the main thread _is_ to wake up the 2766275970Scy * dispatch loop early anyway, so we wouldn't gain anything by 2767275970Scy * doing it. 2768275970Scy */ 2769275970Scy event_queue_remove_timeout(base, ev); 2770275970Scy } 2771275970Scy 2772275970Scy if (ev->ev_flags & EVLIST_ACTIVE) 2773275970Scy event_queue_remove_active(base, event_to_event_callback(ev)); 2774275970Scy else if (ev->ev_flags & EVLIST_ACTIVE_LATER) 2775275970Scy event_queue_remove_active_later(base, event_to_event_callback(ev)); 2776275970Scy 2777275970Scy if (ev->ev_flags & EVLIST_INSERTED) { 2778275970Scy event_queue_remove_inserted(base, ev); 2779275970Scy if (ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED)) 2780275970Scy res = evmap_io_del_(base, ev->ev_fd, ev); 2781275970Scy else 2782275970Scy res = evmap_signal_del_(base, (int)ev->ev_fd, ev); 2783275970Scy if (res == 1) { 2784275970Scy /* evmap says we need to notify the main thread. */ 2785275970Scy notify = 1; 2786275970Scy res = 0; 2787275970Scy } 2788275970Scy } 2789275970Scy 2790275970Scy /* if we are not in the right thread, we need to wake up the loop */ 2791275970Scy if (res != -1 && notify && EVBASE_NEED_NOTIFY(base)) 2792275970Scy evthread_notify_base(base); 2793275970Scy 2794275970Scy event_debug_note_del_(ev); 2795275970Scy 2796275970Scy return (res); 2797275970Scy} 2798275970Scy 2799275970Scyvoid 2800275970Scyevent_active(struct event *ev, int res, short ncalls) 2801275970Scy{ 2802275970Scy if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) { 2803275970Scy event_warnx("%s: event has no event_base set.", __func__); 2804275970Scy return; 2805275970Scy } 2806275970Scy 2807275970Scy EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock); 2808275970Scy 2809275970Scy event_debug_assert_is_setup_(ev); 2810275970Scy 2811275970Scy event_active_nolock_(ev, res, ncalls); 2812275970Scy 2813275970Scy EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock); 2814275970Scy} 2815275970Scy 2816275970Scy 2817275970Scyvoid 2818275970Scyevent_active_nolock_(struct event *ev, int res, short ncalls) 2819275970Scy{ 2820275970Scy struct event_base *base; 2821275970Scy 2822275970Scy event_debug(("event_active: %p (fd "EV_SOCK_FMT"), res %d, callback %p", 2823275970Scy ev, EV_SOCK_ARG(ev->ev_fd), (int)res, ev->ev_callback)); 2824275970Scy 2825275970Scy base = ev->ev_base; 2826275970Scy EVENT_BASE_ASSERT_LOCKED(base); 2827275970Scy 2828275970Scy if (ev->ev_flags & EVLIST_FINALIZING) { 2829275970Scy /* XXXX debug */ 2830275970Scy return; 2831275970Scy } 2832275970Scy 2833275970Scy switch ((ev->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))) { 2834275970Scy default: 2835275970Scy case EVLIST_ACTIVE|EVLIST_ACTIVE_LATER: 2836275970Scy EVUTIL_ASSERT(0); 2837275970Scy break; 2838275970Scy case EVLIST_ACTIVE: 2839275970Scy /* We get different kinds of events, add them together */ 2840275970Scy ev->ev_res |= res; 2841275970Scy return; 2842275970Scy case EVLIST_ACTIVE_LATER: 2843275970Scy ev->ev_res |= res; 2844275970Scy break; 2845275970Scy case 0: 2846275970Scy ev->ev_res = res; 2847275970Scy break; 2848275970Scy } 2849275970Scy 2850275970Scy if (ev->ev_pri < base->event_running_priority) 2851275970Scy base->event_continue = 1; 2852275970Scy 2853275970Scy if (ev->ev_events & EV_SIGNAL) { 2854275970Scy#ifndef EVENT__DISABLE_THREAD_SUPPORT 2855275970Scy if (base->current_event == event_to_event_callback(ev) && 2856275970Scy !EVBASE_IN_THREAD(base)) { 2857275970Scy ++base->current_event_waiters; 2858275970Scy EVTHREAD_COND_WAIT(base->current_event_cond, base->th_base_lock); 2859275970Scy } 2860275970Scy#endif 2861275970Scy ev->ev_ncalls = ncalls; 2862275970Scy ev->ev_pncalls = NULL; 2863275970Scy } 2864275970Scy 2865275970Scy event_callback_activate_nolock_(base, event_to_event_callback(ev)); 2866275970Scy} 2867275970Scy 2868275970Scyvoid 2869275970Scyevent_active_later_(struct event *ev, int res) 2870275970Scy{ 2871275970Scy EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock); 2872275970Scy event_active_later_nolock_(ev, res); 2873275970Scy EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock); 2874275970Scy} 2875275970Scy 2876275970Scyvoid 2877275970Scyevent_active_later_nolock_(struct event *ev, int res) 2878275970Scy{ 2879275970Scy struct event_base *base = ev->ev_base; 2880275970Scy EVENT_BASE_ASSERT_LOCKED(base); 2881275970Scy 2882275970Scy if (ev->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) { 2883275970Scy /* We get different kinds of events, add them together */ 2884275970Scy ev->ev_res |= res; 2885275970Scy return; 2886275970Scy } 2887275970Scy 2888275970Scy ev->ev_res = res; 2889275970Scy 2890275970Scy event_callback_activate_later_nolock_(base, event_to_event_callback(ev)); 2891275970Scy} 2892275970Scy 2893275970Scyint 2894275970Scyevent_callback_activate_(struct event_base *base, 2895275970Scy struct event_callback *evcb) 2896275970Scy{ 2897275970Scy int r; 2898275970Scy EVBASE_ACQUIRE_LOCK(base, th_base_lock); 2899275970Scy r = event_callback_activate_nolock_(base, evcb); 2900275970Scy EVBASE_RELEASE_LOCK(base, th_base_lock); 2901275970Scy return r; 2902275970Scy} 2903275970Scy 2904275970Scyint 2905275970Scyevent_callback_activate_nolock_(struct event_base *base, 2906275970Scy struct event_callback *evcb) 2907275970Scy{ 2908275970Scy int r = 1; 2909275970Scy 2910275970Scy if (evcb->evcb_flags & EVLIST_FINALIZING) 2911275970Scy return 0; 2912275970Scy 2913275970Scy switch (evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) { 2914275970Scy default: 2915275970Scy EVUTIL_ASSERT(0); 2916275970Scy case EVLIST_ACTIVE_LATER: 2917275970Scy event_queue_remove_active_later(base, evcb); 2918275970Scy r = 0; 2919275970Scy break; 2920275970Scy case EVLIST_ACTIVE: 2921275970Scy return 0; 2922275970Scy case 0: 2923275970Scy break; 2924275970Scy } 2925275970Scy 2926275970Scy event_queue_insert_active(base, evcb); 2927275970Scy 2928275970Scy if (EVBASE_NEED_NOTIFY(base)) 2929275970Scy evthread_notify_base(base); 2930275970Scy 2931275970Scy return r; 2932275970Scy} 2933275970Scy 2934275970Scyvoid 2935275970Scyevent_callback_activate_later_nolock_(struct event_base *base, 2936275970Scy struct event_callback *evcb) 2937275970Scy{ 2938275970Scy if (evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) 2939275970Scy return; 2940275970Scy 2941275970Scy event_queue_insert_active_later(base, evcb); 2942275970Scy if (EVBASE_NEED_NOTIFY(base)) 2943275970Scy evthread_notify_base(base); 2944275970Scy} 2945275970Scy 2946275970Scyvoid 2947275970Scyevent_callback_init_(struct event_base *base, 2948275970Scy struct event_callback *cb) 2949275970Scy{ 2950275970Scy memset(cb, 0, sizeof(*cb)); 2951275970Scy cb->evcb_pri = base->nactivequeues - 1; 2952275970Scy} 2953275970Scy 2954275970Scyint 2955275970Scyevent_callback_cancel_(struct event_base *base, 2956275970Scy struct event_callback *evcb) 2957275970Scy{ 2958275970Scy int r; 2959275970Scy EVBASE_ACQUIRE_LOCK(base, th_base_lock); 2960275970Scy r = event_callback_cancel_nolock_(base, evcb, 0); 2961275970Scy EVBASE_RELEASE_LOCK(base, th_base_lock); 2962275970Scy return r; 2963275970Scy} 2964275970Scy 2965275970Scyint 2966275970Scyevent_callback_cancel_nolock_(struct event_base *base, 2967275970Scy struct event_callback *evcb, int even_if_finalizing) 2968275970Scy{ 2969275970Scy if ((evcb->evcb_flags & EVLIST_FINALIZING) && !even_if_finalizing) 2970275970Scy return 0; 2971275970Scy 2972275970Scy if (evcb->evcb_flags & EVLIST_INIT) 2973275970Scy return event_del_nolock_(event_callback_to_event(evcb), 2974275970Scy even_if_finalizing ? EVENT_DEL_EVEN_IF_FINALIZING : EVENT_DEL_AUTOBLOCK); 2975275970Scy 2976275970Scy switch ((evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))) { 2977275970Scy default: 2978275970Scy case EVLIST_ACTIVE|EVLIST_ACTIVE_LATER: 2979275970Scy EVUTIL_ASSERT(0); 2980275970Scy break; 2981275970Scy case EVLIST_ACTIVE: 2982275970Scy /* We get different kinds of events, add them together */ 2983275970Scy event_queue_remove_active(base, evcb); 2984275970Scy return 0; 2985275970Scy case EVLIST_ACTIVE_LATER: 2986275970Scy event_queue_remove_active_later(base, evcb); 2987275970Scy break; 2988275970Scy case 0: 2989275970Scy break; 2990275970Scy } 2991275970Scy 2992275970Scy return 0; 2993275970Scy} 2994275970Scy 2995275970Scyvoid 2996275970Scyevent_deferred_cb_init_(struct event_callback *cb, ev_uint8_t priority, deferred_cb_fn fn, void *arg) 2997275970Scy{ 2998275970Scy memset(cb, 0, sizeof(*cb)); 2999275970Scy cb->evcb_cb_union.evcb_selfcb = fn; 3000275970Scy cb->evcb_arg = arg; 3001275970Scy cb->evcb_pri = priority; 3002275970Scy cb->evcb_closure = EV_CLOSURE_CB_SELF; 3003275970Scy} 3004275970Scy 3005275970Scyvoid 3006275970Scyevent_deferred_cb_set_priority_(struct event_callback *cb, ev_uint8_t priority) 3007275970Scy{ 3008275970Scy cb->evcb_pri = priority; 3009275970Scy} 3010275970Scy 3011275970Scyvoid 3012275970Scyevent_deferred_cb_cancel_(struct event_base *base, struct event_callback *cb) 3013275970Scy{ 3014275970Scy if (!base) 3015275970Scy base = current_base; 3016275970Scy event_callback_cancel_(base, cb); 3017275970Scy} 3018275970Scy 3019275970Scy#define MAX_DEFERREDS_QUEUED 32 3020275970Scyint 3021275970Scyevent_deferred_cb_schedule_(struct event_base *base, struct event_callback *cb) 3022275970Scy{ 3023275970Scy int r = 1; 3024275970Scy if (!base) 3025275970Scy base = current_base; 3026275970Scy EVBASE_ACQUIRE_LOCK(base, th_base_lock); 3027275970Scy if (base->n_deferreds_queued > MAX_DEFERREDS_QUEUED) { 3028275970Scy event_callback_activate_later_nolock_(base, cb); 3029275970Scy } else { 3030275970Scy ++base->n_deferreds_queued; 3031275970Scy r = event_callback_activate_nolock_(base, cb); 3032275970Scy } 3033275970Scy EVBASE_RELEASE_LOCK(base, th_base_lock); 3034275970Scy return r; 3035275970Scy} 3036275970Scy 3037275970Scystatic int 3038275970Scytimeout_next(struct event_base *base, struct timeval **tv_p) 3039275970Scy{ 3040275970Scy /* Caller must hold th_base_lock */ 3041275970Scy struct timeval now; 3042275970Scy struct event *ev; 3043275970Scy struct timeval *tv = *tv_p; 3044275970Scy int res = 0; 3045275970Scy 3046275970Scy ev = min_heap_top_(&base->timeheap); 3047275970Scy 3048275970Scy if (ev == NULL) { 3049275970Scy /* if no time-based events are active wait for I/O */ 3050275970Scy *tv_p = NULL; 3051275970Scy goto out; 3052275970Scy } 3053275970Scy 3054275970Scy if (gettime(base, &now) == -1) { 3055275970Scy res = -1; 3056275970Scy goto out; 3057275970Scy } 3058275970Scy 3059275970Scy if (evutil_timercmp(&ev->ev_timeout, &now, <=)) { 3060275970Scy evutil_timerclear(tv); 3061275970Scy goto out; 3062275970Scy } 3063275970Scy 3064275970Scy evutil_timersub(&ev->ev_timeout, &now, tv); 3065275970Scy 3066275970Scy EVUTIL_ASSERT(tv->tv_sec >= 0); 3067275970Scy EVUTIL_ASSERT(tv->tv_usec >= 0); 3068275970Scy event_debug(("timeout_next: event: %p, in %d seconds, %d useconds", ev, (int)tv->tv_sec, (int)tv->tv_usec)); 3069275970Scy 3070275970Scyout: 3071275970Scy return (res); 3072275970Scy} 3073275970Scy 3074275970Scy/* Activate every event whose timeout has elapsed. */ 3075275970Scystatic void 3076275970Scytimeout_process(struct event_base *base) 3077275970Scy{ 3078275970Scy /* Caller must hold lock. */ 3079275970Scy struct timeval now; 3080275970Scy struct event *ev; 3081275970Scy 3082275970Scy if (min_heap_empty_(&base->timeheap)) { 3083275970Scy return; 3084275970Scy } 3085275970Scy 3086275970Scy gettime(base, &now); 3087275970Scy 3088275970Scy while ((ev = min_heap_top_(&base->timeheap))) { 3089275970Scy if (evutil_timercmp(&ev->ev_timeout, &now, >)) 3090275970Scy break; 3091275970Scy 3092275970Scy /* delete this event from the I/O queues */ 3093275970Scy event_del_nolock_(ev, EVENT_DEL_NOBLOCK); 3094275970Scy 3095275970Scy event_debug(("timeout_process: event: %p, call %p", 3096275970Scy ev, ev->ev_callback)); 3097275970Scy event_active_nolock_(ev, EV_TIMEOUT, 1); 3098275970Scy } 3099275970Scy} 3100275970Scy 3101275970Scy#if (EVLIST_INTERNAL >> 4) != 1 3102275970Scy#error "Mismatch for value of EVLIST_INTERNAL" 3103275970Scy#endif 3104275970Scy 3105275970Scy#ifndef MAX 3106275970Scy#define MAX(a,b) (((a)>(b))?(a):(b)) 3107275970Scy#endif 3108275970Scy 3109275970Scy#define MAX_EVENT_COUNT(var, v) var = MAX(var, v) 3110275970Scy 3111275970Scy/* These are a fancy way to spell 3112275970Scy if (flags & EVLIST_INTERNAL) 3113275970Scy base->event_count--/++; 3114275970Scy*/ 3115275970Scy#define DECR_EVENT_COUNT(base,flags) \ 3116275970Scy ((base)->event_count -= (~((flags) >> 4) & 1)) 3117275970Scy#define INCR_EVENT_COUNT(base,flags) do { \ 3118275970Scy ((base)->event_count += (~((flags) >> 4) & 1)); \ 3119275970Scy MAX_EVENT_COUNT((base)->event_count_max, (base)->event_count); \ 3120275970Scy} while (0) 3121275970Scy 3122275970Scystatic void 3123275970Scyevent_queue_remove_inserted(struct event_base *base, struct event *ev) 3124275970Scy{ 3125275970Scy EVENT_BASE_ASSERT_LOCKED(base); 3126275970Scy if (EVUTIL_FAILURE_CHECK(!(ev->ev_flags & EVLIST_INSERTED))) { 3127275970Scy event_errx(1, "%s: %p(fd "EV_SOCK_FMT") not on queue %x", __func__, 3128275970Scy ev, EV_SOCK_ARG(ev->ev_fd), EVLIST_INSERTED); 3129275970Scy return; 3130275970Scy } 3131275970Scy DECR_EVENT_COUNT(base, ev->ev_flags); 3132275970Scy ev->ev_flags &= ~EVLIST_INSERTED; 3133275970Scy} 3134275970Scystatic void 3135275970Scyevent_queue_remove_active(struct event_base *base, struct event_callback *evcb) 3136275970Scy{ 3137275970Scy EVENT_BASE_ASSERT_LOCKED(base); 3138275970Scy if (EVUTIL_FAILURE_CHECK(!(evcb->evcb_flags & EVLIST_ACTIVE))) { 3139275970Scy event_errx(1, "%s: %p not on queue %x", __func__, 3140275970Scy evcb, EVLIST_ACTIVE); 3141275970Scy return; 3142275970Scy } 3143275970Scy DECR_EVENT_COUNT(base, evcb->evcb_flags); 3144275970Scy evcb->evcb_flags &= ~EVLIST_ACTIVE; 3145275970Scy base->event_count_active--; 3146275970Scy 3147275970Scy TAILQ_REMOVE(&base->activequeues[evcb->evcb_pri], 3148275970Scy evcb, evcb_active_next); 3149275970Scy} 3150275970Scystatic void 3151275970Scyevent_queue_remove_active_later(struct event_base *base, struct event_callback *evcb) 3152275970Scy{ 3153275970Scy EVENT_BASE_ASSERT_LOCKED(base); 3154275970Scy if (EVUTIL_FAILURE_CHECK(!(evcb->evcb_flags & EVLIST_ACTIVE_LATER))) { 3155275970Scy event_errx(1, "%s: %p not on queue %x", __func__, 3156275970Scy evcb, EVLIST_ACTIVE_LATER); 3157275970Scy return; 3158275970Scy } 3159275970Scy DECR_EVENT_COUNT(base, evcb->evcb_flags); 3160275970Scy evcb->evcb_flags &= ~EVLIST_ACTIVE_LATER; 3161275970Scy base->event_count_active--; 3162275970Scy 3163275970Scy TAILQ_REMOVE(&base->active_later_queue, evcb, evcb_active_next); 3164275970Scy} 3165275970Scystatic void 3166275970Scyevent_queue_remove_timeout(struct event_base *base, struct event *ev) 3167275970Scy{ 3168275970Scy EVENT_BASE_ASSERT_LOCKED(base); 3169275970Scy if (EVUTIL_FAILURE_CHECK(!(ev->ev_flags & EVLIST_TIMEOUT))) { 3170275970Scy event_errx(1, "%s: %p(fd "EV_SOCK_FMT") not on queue %x", __func__, 3171275970Scy ev, EV_SOCK_ARG(ev->ev_fd), EVLIST_TIMEOUT); 3172275970Scy return; 3173275970Scy } 3174275970Scy DECR_EVENT_COUNT(base, ev->ev_flags); 3175275970Scy ev->ev_flags &= ~EVLIST_TIMEOUT; 3176275970Scy 3177275970Scy if (is_common_timeout(&ev->ev_timeout, base)) { 3178275970Scy struct common_timeout_list *ctl = 3179275970Scy get_common_timeout_list(base, &ev->ev_timeout); 3180275970Scy TAILQ_REMOVE(&ctl->events, ev, 3181275970Scy ev_timeout_pos.ev_next_with_common_timeout); 3182275970Scy } else { 3183275970Scy min_heap_erase_(&base->timeheap, ev); 3184275970Scy } 3185275970Scy} 3186275970Scy 3187275970Scy#ifdef USE_REINSERT_TIMEOUT 3188275970Scy/* Remove and reinsert 'ev' into the timeout queue. */ 3189275970Scystatic void 3190275970Scyevent_queue_reinsert_timeout(struct event_base *base, struct event *ev, 3191275970Scy int was_common, int is_common, int old_timeout_idx) 3192275970Scy{ 3193275970Scy struct common_timeout_list *ctl; 3194275970Scy if (!(ev->ev_flags & EVLIST_TIMEOUT)) { 3195275970Scy event_queue_insert_timeout(base, ev); 3196275970Scy return; 3197275970Scy } 3198275970Scy 3199275970Scy switch ((was_common<<1) | is_common) { 3200275970Scy case 3: /* Changing from one common timeout to another */ 3201275970Scy ctl = base->common_timeout_queues[old_timeout_idx]; 3202275970Scy TAILQ_REMOVE(&ctl->events, ev, 3203275970Scy ev_timeout_pos.ev_next_with_common_timeout); 3204275970Scy ctl = get_common_timeout_list(base, &ev->ev_timeout); 3205275970Scy insert_common_timeout_inorder(ctl, ev); 3206275970Scy break; 3207275970Scy case 2: /* Was common; is no longer common */ 3208275970Scy ctl = base->common_timeout_queues[old_timeout_idx]; 3209275970Scy TAILQ_REMOVE(&ctl->events, ev, 3210275970Scy ev_timeout_pos.ev_next_with_common_timeout); 3211275970Scy min_heap_push_(&base->timeheap, ev); 3212275970Scy break; 3213275970Scy case 1: /* Wasn't common; has become common. */ 3214275970Scy min_heap_erase_(&base->timeheap, ev); 3215275970Scy ctl = get_common_timeout_list(base, &ev->ev_timeout); 3216275970Scy insert_common_timeout_inorder(ctl, ev); 3217275970Scy break; 3218275970Scy case 0: /* was in heap; is still on heap. */ 3219275970Scy min_heap_adjust_(&base->timeheap, ev); 3220275970Scy break; 3221275970Scy default: 3222275970Scy EVUTIL_ASSERT(0); /* unreachable */ 3223275970Scy break; 3224275970Scy } 3225275970Scy} 3226275970Scy#endif 3227275970Scy 3228275970Scy/* Add 'ev' to the common timeout list in 'ev'. */ 3229275970Scystatic void 3230275970Scyinsert_common_timeout_inorder(struct common_timeout_list *ctl, 3231275970Scy struct event *ev) 3232275970Scy{ 3233275970Scy struct event *e; 3234275970Scy /* By all logic, we should just be able to append 'ev' to the end of 3235275970Scy * ctl->events, since the timeout on each 'ev' is set to {the common 3236275970Scy * timeout} + {the time when we add the event}, and so the events 3237275970Scy * should arrive in order of their timeeouts. But just in case 3238275970Scy * there's some wacky threading issue going on, we do a search from 3239275970Scy * the end of 'ev' to find the right insertion point. 3240275970Scy */ 3241275970Scy TAILQ_FOREACH_REVERSE(e, &ctl->events, 3242275970Scy event_list, ev_timeout_pos.ev_next_with_common_timeout) { 3243275970Scy /* This timercmp is a little sneaky, since both ev and e have 3244275970Scy * magic values in tv_usec. Fortunately, they ought to have 3245275970Scy * the _same_ magic values in tv_usec. Let's assert for that. 3246275970Scy */ 3247275970Scy EVUTIL_ASSERT( 3248275970Scy is_same_common_timeout(&e->ev_timeout, &ev->ev_timeout)); 3249275970Scy if (evutil_timercmp(&ev->ev_timeout, &e->ev_timeout, >=)) { 3250275970Scy TAILQ_INSERT_AFTER(&ctl->events, e, ev, 3251275970Scy ev_timeout_pos.ev_next_with_common_timeout); 3252275970Scy return; 3253275970Scy } 3254275970Scy } 3255275970Scy TAILQ_INSERT_HEAD(&ctl->events, ev, 3256275970Scy ev_timeout_pos.ev_next_with_common_timeout); 3257275970Scy} 3258275970Scy 3259275970Scystatic void 3260275970Scyevent_queue_insert_inserted(struct event_base *base, struct event *ev) 3261275970Scy{ 3262275970Scy EVENT_BASE_ASSERT_LOCKED(base); 3263275970Scy 3264275970Scy if (EVUTIL_FAILURE_CHECK(ev->ev_flags & EVLIST_INSERTED)) { 3265275970Scy event_errx(1, "%s: %p(fd "EV_SOCK_FMT") already inserted", __func__, 3266275970Scy ev, EV_SOCK_ARG(ev->ev_fd)); 3267275970Scy return; 3268275970Scy } 3269275970Scy 3270275970Scy INCR_EVENT_COUNT(base, ev->ev_flags); 3271275970Scy 3272275970Scy ev->ev_flags |= EVLIST_INSERTED; 3273275970Scy} 3274275970Scy 3275275970Scystatic void 3276275970Scyevent_queue_insert_active(struct event_base *base, struct event_callback *evcb) 3277275970Scy{ 3278275970Scy EVENT_BASE_ASSERT_LOCKED(base); 3279275970Scy 3280275970Scy if (evcb->evcb_flags & EVLIST_ACTIVE) { 3281275970Scy /* Double insertion is possible for active events */ 3282275970Scy return; 3283275970Scy } 3284275970Scy 3285275970Scy INCR_EVENT_COUNT(base, evcb->evcb_flags); 3286275970Scy 3287275970Scy evcb->evcb_flags |= EVLIST_ACTIVE; 3288275970Scy 3289275970Scy base->event_count_active++; 3290275970Scy MAX_EVENT_COUNT(base->event_count_active_max, base->event_count_active); 3291275970Scy EVUTIL_ASSERT(evcb->evcb_pri < base->nactivequeues); 3292275970Scy TAILQ_INSERT_TAIL(&base->activequeues[evcb->evcb_pri], 3293275970Scy evcb, evcb_active_next); 3294275970Scy} 3295275970Scy 3296275970Scystatic void 3297275970Scyevent_queue_insert_active_later(struct event_base *base, struct event_callback *evcb) 3298275970Scy{ 3299275970Scy EVENT_BASE_ASSERT_LOCKED(base); 3300275970Scy if (evcb->evcb_flags & (EVLIST_ACTIVE_LATER|EVLIST_ACTIVE)) { 3301275970Scy /* Double insertion is possible */ 3302275970Scy return; 3303275970Scy } 3304275970Scy 3305275970Scy INCR_EVENT_COUNT(base, evcb->evcb_flags); 3306275970Scy evcb->evcb_flags |= EVLIST_ACTIVE_LATER; 3307275970Scy base->event_count_active++; 3308275970Scy MAX_EVENT_COUNT(base->event_count_active_max, base->event_count_active); 3309275970Scy EVUTIL_ASSERT(evcb->evcb_pri < base->nactivequeues); 3310275970Scy TAILQ_INSERT_TAIL(&base->active_later_queue, evcb, evcb_active_next); 3311275970Scy} 3312275970Scy 3313275970Scystatic void 3314275970Scyevent_queue_insert_timeout(struct event_base *base, struct event *ev) 3315275970Scy{ 3316275970Scy EVENT_BASE_ASSERT_LOCKED(base); 3317275970Scy 3318275970Scy if (EVUTIL_FAILURE_CHECK(ev->ev_flags & EVLIST_TIMEOUT)) { 3319275970Scy event_errx(1, "%s: %p(fd "EV_SOCK_FMT") already on timeout", __func__, 3320275970Scy ev, EV_SOCK_ARG(ev->ev_fd)); 3321275970Scy return; 3322275970Scy } 3323275970Scy 3324275970Scy INCR_EVENT_COUNT(base, ev->ev_flags); 3325275970Scy 3326275970Scy ev->ev_flags |= EVLIST_TIMEOUT; 3327275970Scy 3328275970Scy if (is_common_timeout(&ev->ev_timeout, base)) { 3329275970Scy struct common_timeout_list *ctl = 3330275970Scy get_common_timeout_list(base, &ev->ev_timeout); 3331275970Scy insert_common_timeout_inorder(ctl, ev); 3332275970Scy } else { 3333275970Scy min_heap_push_(&base->timeheap, ev); 3334275970Scy } 3335275970Scy} 3336275970Scy 3337275970Scystatic void 3338275970Scyevent_queue_make_later_events_active(struct event_base *base) 3339275970Scy{ 3340275970Scy struct event_callback *evcb; 3341275970Scy EVENT_BASE_ASSERT_LOCKED(base); 3342275970Scy 3343275970Scy while ((evcb = TAILQ_FIRST(&base->active_later_queue))) { 3344275970Scy TAILQ_REMOVE(&base->active_later_queue, evcb, evcb_active_next); 3345275970Scy evcb->evcb_flags = (evcb->evcb_flags & ~EVLIST_ACTIVE_LATER) | EVLIST_ACTIVE; 3346275970Scy EVUTIL_ASSERT(evcb->evcb_pri < base->nactivequeues); 3347275970Scy TAILQ_INSERT_TAIL(&base->activequeues[evcb->evcb_pri], evcb, evcb_active_next); 3348275970Scy base->n_deferreds_queued += (evcb->evcb_closure == EV_CLOSURE_CB_SELF); 3349275970Scy } 3350275970Scy} 3351275970Scy 3352275970Scy/* Functions for debugging */ 3353275970Scy 3354275970Scyconst char * 3355275970Scyevent_get_version(void) 3356275970Scy{ 3357275970Scy return (EVENT__VERSION); 3358275970Scy} 3359275970Scy 3360275970Scyev_uint32_t 3361275970Scyevent_get_version_number(void) 3362275970Scy{ 3363275970Scy return (EVENT__NUMERIC_VERSION); 3364275970Scy} 3365275970Scy 3366275970Scy/* 3367275970Scy * No thread-safe interface needed - the information should be the same 3368275970Scy * for all threads. 3369275970Scy */ 3370275970Scy 3371275970Scyconst char * 3372275970Scyevent_get_method(void) 3373275970Scy{ 3374275970Scy return (current_base->evsel->name); 3375275970Scy} 3376275970Scy 3377275970Scy#ifndef EVENT__DISABLE_MM_REPLACEMENT 3378275970Scystatic void *(*mm_malloc_fn_)(size_t sz) = NULL; 3379275970Scystatic void *(*mm_realloc_fn_)(void *p, size_t sz) = NULL; 3380275970Scystatic void (*mm_free_fn_)(void *p) = NULL; 3381275970Scy 3382275970Scyvoid * 3383275970Scyevent_mm_malloc_(size_t sz) 3384275970Scy{ 3385275970Scy if (sz == 0) 3386275970Scy return NULL; 3387275970Scy 3388275970Scy if (mm_malloc_fn_) 3389275970Scy return mm_malloc_fn_(sz); 3390275970Scy else 3391275970Scy return malloc(sz); 3392275970Scy} 3393275970Scy 3394275970Scyvoid * 3395275970Scyevent_mm_calloc_(size_t count, size_t size) 3396275970Scy{ 3397275970Scy if (count == 0 || size == 0) 3398275970Scy return NULL; 3399275970Scy 3400275970Scy if (mm_malloc_fn_) { 3401275970Scy size_t sz = count * size; 3402275970Scy void *p = NULL; 3403275970Scy if (count > EV_SIZE_MAX / size) 3404275970Scy goto error; 3405275970Scy p = mm_malloc_fn_(sz); 3406275970Scy if (p) 3407275970Scy return memset(p, 0, sz); 3408275970Scy } else { 3409275970Scy void *p = calloc(count, size); 3410275970Scy#ifdef _WIN32 3411275970Scy /* Windows calloc doesn't reliably set ENOMEM */ 3412275970Scy if (p == NULL) 3413275970Scy goto error; 3414275970Scy#endif 3415275970Scy return p; 3416275970Scy } 3417275970Scy 3418275970Scyerror: 3419275970Scy errno = ENOMEM; 3420275970Scy return NULL; 3421275970Scy} 3422275970Scy 3423275970Scychar * 3424275970Scyevent_mm_strdup_(const char *str) 3425275970Scy{ 3426275970Scy if (!str) { 3427275970Scy errno = EINVAL; 3428275970Scy return NULL; 3429275970Scy } 3430275970Scy 3431275970Scy if (mm_malloc_fn_) { 3432275970Scy size_t ln = strlen(str); 3433275970Scy void *p = NULL; 3434275970Scy if (ln == EV_SIZE_MAX) 3435275970Scy goto error; 3436275970Scy p = mm_malloc_fn_(ln+1); 3437275970Scy if (p) 3438275970Scy return memcpy(p, str, ln+1); 3439275970Scy } else 3440275970Scy#ifdef _WIN32 3441275970Scy return _strdup(str); 3442275970Scy#else 3443275970Scy return strdup(str); 3444275970Scy#endif 3445275970Scy 3446275970Scyerror: 3447275970Scy errno = ENOMEM; 3448275970Scy return NULL; 3449275970Scy} 3450275970Scy 3451275970Scyvoid * 3452275970Scyevent_mm_realloc_(void *ptr, size_t sz) 3453275970Scy{ 3454275970Scy if (mm_realloc_fn_) 3455275970Scy return mm_realloc_fn_(ptr, sz); 3456275970Scy else 3457275970Scy return realloc(ptr, sz); 3458275970Scy} 3459275970Scy 3460275970Scyvoid 3461275970Scyevent_mm_free_(void *ptr) 3462275970Scy{ 3463275970Scy if (mm_free_fn_) 3464275970Scy mm_free_fn_(ptr); 3465275970Scy else 3466275970Scy free(ptr); 3467275970Scy} 3468275970Scy 3469275970Scyvoid 3470275970Scyevent_set_mem_functions(void *(*malloc_fn)(size_t sz), 3471275970Scy void *(*realloc_fn)(void *ptr, size_t sz), 3472275970Scy void (*free_fn)(void *ptr)) 3473275970Scy{ 3474275970Scy mm_malloc_fn_ = malloc_fn; 3475275970Scy mm_realloc_fn_ = realloc_fn; 3476275970Scy mm_free_fn_ = free_fn; 3477275970Scy} 3478275970Scy#endif 3479275970Scy 3480275970Scy#ifdef EVENT__HAVE_EVENTFD 3481275970Scystatic void 3482275970Scyevthread_notify_drain_eventfd(evutil_socket_t fd, short what, void *arg) 3483275970Scy{ 3484275970Scy ev_uint64_t msg; 3485275970Scy ev_ssize_t r; 3486275970Scy struct event_base *base = arg; 3487275970Scy 3488275970Scy r = read(fd, (void*) &msg, sizeof(msg)); 3489275970Scy if (r<0 && errno != EAGAIN) { 3490275970Scy event_sock_warn(fd, "Error reading from eventfd"); 3491275970Scy } 3492275970Scy EVBASE_ACQUIRE_LOCK(base, th_base_lock); 3493275970Scy base->is_notify_pending = 0; 3494275970Scy EVBASE_RELEASE_LOCK(base, th_base_lock); 3495275970Scy} 3496275970Scy#endif 3497275970Scy 3498275970Scystatic void 3499275970Scyevthread_notify_drain_default(evutil_socket_t fd, short what, void *arg) 3500275970Scy{ 3501275970Scy unsigned char buf[1024]; 3502275970Scy struct event_base *base = arg; 3503275970Scy#ifdef _WIN32 3504275970Scy while (recv(fd, (char*)buf, sizeof(buf), 0) > 0) 3505275970Scy ; 3506275970Scy#else 3507275970Scy while (read(fd, (char*)buf, sizeof(buf)) > 0) 3508275970Scy ; 3509275970Scy#endif 3510275970Scy 3511275970Scy EVBASE_ACQUIRE_LOCK(base, th_base_lock); 3512275970Scy base->is_notify_pending = 0; 3513275970Scy EVBASE_RELEASE_LOCK(base, th_base_lock); 3514275970Scy} 3515275970Scy 3516275970Scyint 3517275970Scyevthread_make_base_notifiable(struct event_base *base) 3518275970Scy{ 3519275970Scy int r; 3520275970Scy if (!base) 3521275970Scy return -1; 3522275970Scy 3523275970Scy EVBASE_ACQUIRE_LOCK(base, th_base_lock); 3524275970Scy r = evthread_make_base_notifiable_nolock_(base); 3525275970Scy EVBASE_RELEASE_LOCK(base, th_base_lock); 3526275970Scy return r; 3527275970Scy} 3528275970Scy 3529275970Scystatic int 3530275970Scyevthread_make_base_notifiable_nolock_(struct event_base *base) 3531275970Scy{ 3532275970Scy void (*cb)(evutil_socket_t, short, void *); 3533275970Scy int (*notify)(struct event_base *); 3534275970Scy 3535275970Scy if (base->th_notify_fn != NULL) { 3536275970Scy /* The base is already notifiable: we're doing fine. */ 3537275970Scy return 0; 3538275970Scy } 3539275970Scy 3540275970Scy#if defined(EVENT__HAVE_WORKING_KQUEUE) 3541275970Scy if (base->evsel == &kqops && event_kq_add_notify_event_(base) == 0) { 3542275970Scy base->th_notify_fn = event_kq_notify_base_; 3543275970Scy /* No need to add an event here; the backend can wake 3544275970Scy * itself up just fine. */ 3545275970Scy return 0; 3546275970Scy } 3547275970Scy#endif 3548275970Scy 3549275970Scy#ifdef EVENT__HAVE_EVENTFD 3550275970Scy base->th_notify_fd[0] = evutil_eventfd_(0, 3551275970Scy EVUTIL_EFD_CLOEXEC|EVUTIL_EFD_NONBLOCK); 3552275970Scy if (base->th_notify_fd[0] >= 0) { 3553275970Scy base->th_notify_fd[1] = -1; 3554275970Scy notify = evthread_notify_base_eventfd; 3555275970Scy cb = evthread_notify_drain_eventfd; 3556275970Scy } else 3557275970Scy#endif 3558275970Scy if (evutil_make_internal_pipe_(base->th_notify_fd) == 0) { 3559275970Scy notify = evthread_notify_base_default; 3560275970Scy cb = evthread_notify_drain_default; 3561275970Scy } else { 3562275970Scy return -1; 3563275970Scy } 3564275970Scy 3565275970Scy base->th_notify_fn = notify; 3566275970Scy 3567275970Scy /* prepare an event that we can use for wakeup */ 3568275970Scy event_assign(&base->th_notify, base, base->th_notify_fd[0], 3569275970Scy EV_READ|EV_PERSIST, cb, base); 3570275970Scy 3571275970Scy /* we need to mark this as internal event */ 3572275970Scy base->th_notify.ev_flags |= EVLIST_INTERNAL; 3573275970Scy event_priority_set(&base->th_notify, 0); 3574275970Scy 3575275970Scy return event_add_nolock_(&base->th_notify, NULL, 0); 3576275970Scy} 3577275970Scy 3578275970Scyint 3579275970Scyevent_base_foreach_event_nolock_(struct event_base *base, 3580275970Scy event_base_foreach_event_cb fn, void *arg) 3581275970Scy{ 3582275970Scy int r, i; 3583275970Scy unsigned u; 3584275970Scy struct event *ev; 3585275970Scy 3586275970Scy /* Start out with all the EVLIST_INSERTED events. */ 3587275970Scy if ((r = evmap_foreach_event_(base, fn, arg))) 3588275970Scy return r; 3589275970Scy 3590275970Scy /* Okay, now we deal with those events that have timeouts and are in 3591275970Scy * the min-heap. */ 3592275970Scy for (u = 0; u < base->timeheap.n; ++u) { 3593275970Scy ev = base->timeheap.p[u]; 3594275970Scy if (ev->ev_flags & EVLIST_INSERTED) { 3595275970Scy /* we already processed this one */ 3596275970Scy continue; 3597275970Scy } 3598275970Scy if ((r = fn(base, ev, arg))) 3599275970Scy return r; 3600275970Scy } 3601275970Scy 3602275970Scy /* Now for the events in one of the timeout queues. 3603275970Scy * the min-heap. */ 3604275970Scy for (i = 0; i < base->n_common_timeouts; ++i) { 3605275970Scy struct common_timeout_list *ctl = 3606275970Scy base->common_timeout_queues[i]; 3607275970Scy TAILQ_FOREACH(ev, &ctl->events, 3608275970Scy ev_timeout_pos.ev_next_with_common_timeout) { 3609275970Scy if (ev->ev_flags & EVLIST_INSERTED) { 3610275970Scy /* we already processed this one */ 3611275970Scy continue; 3612275970Scy } 3613275970Scy if ((r = fn(base, ev, arg))) 3614275970Scy return r; 3615275970Scy } 3616275970Scy } 3617275970Scy 3618275970Scy /* Finally, we deal wit all the active events that we haven't touched 3619275970Scy * yet. */ 3620275970Scy for (i = 0; i < base->nactivequeues; ++i) { 3621275970Scy struct event_callback *evcb; 3622275970Scy TAILQ_FOREACH(evcb, &base->activequeues[i], evcb_active_next) { 3623275970Scy if ((evcb->evcb_flags & (EVLIST_INIT|EVLIST_INSERTED|EVLIST_TIMEOUT)) != EVLIST_INIT) { 3624275970Scy /* This isn't an event (evlist_init clear), or 3625275970Scy * we already processed it. (inserted or 3626275970Scy * timeout set */ 3627275970Scy continue; 3628275970Scy } 3629275970Scy ev = event_callback_to_event(evcb); 3630275970Scy if ((r = fn(base, ev, arg))) 3631275970Scy return r; 3632275970Scy } 3633275970Scy } 3634275970Scy 3635275970Scy return 0; 3636275970Scy} 3637275970Scy 3638275970Scy/* Helper for event_base_dump_events: called on each event in the event base; 3639275970Scy * dumps only the inserted events. */ 3640275970Scystatic int 3641275970Scydump_inserted_event_fn(const struct event_base *base, const struct event *e, void *arg) 3642275970Scy{ 3643275970Scy FILE *output = arg; 3644275970Scy const char *gloss = (e->ev_events & EV_SIGNAL) ? 3645275970Scy "sig" : "fd "; 3646275970Scy 3647275970Scy if (! (e->ev_flags & (EVLIST_INSERTED|EVLIST_TIMEOUT))) 3648275970Scy return 0; 3649275970Scy 3650275970Scy fprintf(output, " %p [%s "EV_SOCK_FMT"]%s%s%s%s%s%s", 3651275970Scy (void*)e, gloss, EV_SOCK_ARG(e->ev_fd), 3652275970Scy (e->ev_events&EV_READ)?" Read":"", 3653275970Scy (e->ev_events&EV_WRITE)?" Write":"", 3654275970Scy (e->ev_events&EV_CLOSED)?" EOF":"", 3655275970Scy (e->ev_events&EV_SIGNAL)?" Signal":"", 3656275970Scy (e->ev_events&EV_PERSIST)?" Persist":"", 3657275970Scy (e->ev_flags&EVLIST_INTERNAL)?" Internal":""); 3658275970Scy if (e->ev_flags & EVLIST_TIMEOUT) { 3659275970Scy struct timeval tv; 3660275970Scy tv.tv_sec = e->ev_timeout.tv_sec; 3661275970Scy tv.tv_usec = e->ev_timeout.tv_usec & MICROSECONDS_MASK; 3662275970Scy evutil_timeradd(&tv, &base->tv_clock_diff, &tv); 3663275970Scy fprintf(output, " Timeout=%ld.%06d", 3664275970Scy (long)tv.tv_sec, (int)(tv.tv_usec & MICROSECONDS_MASK)); 3665275970Scy } 3666275970Scy fputc('\n', output); 3667275970Scy 3668275970Scy return 0; 3669275970Scy} 3670275970Scy 3671275970Scy/* Helper for event_base_dump_events: called on each event in the event base; 3672275970Scy * dumps only the active events. */ 3673275970Scystatic int 3674275970Scydump_active_event_fn(const struct event_base *base, const struct event *e, void *arg) 3675275970Scy{ 3676275970Scy FILE *output = arg; 3677275970Scy const char *gloss = (e->ev_events & EV_SIGNAL) ? 3678275970Scy "sig" : "fd "; 3679275970Scy 3680275970Scy if (! (e->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))) 3681275970Scy return 0; 3682275970Scy 3683275970Scy fprintf(output, " %p [%s "EV_SOCK_FMT", priority=%d]%s%s%s%s%s active%s%s\n", 3684275970Scy (void*)e, gloss, EV_SOCK_ARG(e->ev_fd), e->ev_pri, 3685275970Scy (e->ev_res&EV_READ)?" Read":"", 3686275970Scy (e->ev_res&EV_WRITE)?" Write":"", 3687275970Scy (e->ev_res&EV_CLOSED)?" EOF":"", 3688275970Scy (e->ev_res&EV_SIGNAL)?" Signal":"", 3689275970Scy (e->ev_res&EV_TIMEOUT)?" Timeout":"", 3690275970Scy (e->ev_flags&EVLIST_INTERNAL)?" [Internal]":"", 3691275970Scy (e->ev_flags&EVLIST_ACTIVE_LATER)?" [NextTime]":""); 3692275970Scy 3693275970Scy return 0; 3694275970Scy} 3695275970Scy 3696275970Scyint 3697275970Scyevent_base_foreach_event(struct event_base *base, 3698275970Scy event_base_foreach_event_cb fn, void *arg) 3699275970Scy{ 3700275970Scy int r; 3701275970Scy if ((!fn) || (!base)) { 3702275970Scy return -1; 3703275970Scy } 3704275970Scy EVBASE_ACQUIRE_LOCK(base, th_base_lock); 3705275970Scy r = event_base_foreach_event_nolock_(base, fn, arg); 3706275970Scy EVBASE_RELEASE_LOCK(base, th_base_lock); 3707275970Scy return r; 3708275970Scy} 3709275970Scy 3710275970Scy 3711275970Scyvoid 3712275970Scyevent_base_dump_events(struct event_base *base, FILE *output) 3713275970Scy{ 3714275970Scy EVBASE_ACQUIRE_LOCK(base, th_base_lock); 3715275970Scy fprintf(output, "Inserted events:\n"); 3716275970Scy event_base_foreach_event_nolock_(base, dump_inserted_event_fn, output); 3717275970Scy 3718275970Scy fprintf(output, "Active events:\n"); 3719275970Scy event_base_foreach_event_nolock_(base, dump_active_event_fn, output); 3720275970Scy EVBASE_RELEASE_LOCK(base, th_base_lock); 3721275970Scy} 3722275970Scy 3723275970Scyvoid 3724275970Scyevent_base_active_by_fd(struct event_base *base, evutil_socket_t fd, short events) 3725275970Scy{ 3726275970Scy EVBASE_ACQUIRE_LOCK(base, th_base_lock); 3727275970Scy evmap_io_active_(base, fd, events & (EV_READ|EV_WRITE|EV_CLOSED)); 3728275970Scy EVBASE_RELEASE_LOCK(base, th_base_lock); 3729275970Scy} 3730275970Scy 3731275970Scyvoid 3732275970Scyevent_base_active_by_signal(struct event_base *base, int sig) 3733275970Scy{ 3734275970Scy EVBASE_ACQUIRE_LOCK(base, th_base_lock); 3735275970Scy evmap_signal_active_(base, sig, 1); 3736275970Scy EVBASE_RELEASE_LOCK(base, th_base_lock); 3737275970Scy} 3738275970Scy 3739275970Scy 3740275970Scyvoid 3741275970Scyevent_base_add_virtual_(struct event_base *base) 3742275970Scy{ 3743275970Scy EVBASE_ACQUIRE_LOCK(base, th_base_lock); 3744275970Scy base->virtual_event_count++; 3745275970Scy MAX_EVENT_COUNT(base->virtual_event_count_max, base->virtual_event_count); 3746275970Scy EVBASE_RELEASE_LOCK(base, th_base_lock); 3747275970Scy} 3748275970Scy 3749275970Scyvoid 3750275970Scyevent_base_del_virtual_(struct event_base *base) 3751275970Scy{ 3752275970Scy EVBASE_ACQUIRE_LOCK(base, th_base_lock); 3753275970Scy EVUTIL_ASSERT(base->virtual_event_count > 0); 3754275970Scy base->virtual_event_count--; 3755275970Scy if (base->virtual_event_count == 0 && EVBASE_NEED_NOTIFY(base)) 3756275970Scy evthread_notify_base(base); 3757275970Scy EVBASE_RELEASE_LOCK(base, th_base_lock); 3758275970Scy} 3759275970Scy 3760275970Scystatic void 3761275970Scyevent_free_debug_globals_locks(void) 3762275970Scy{ 3763275970Scy#ifndef EVENT__DISABLE_THREAD_SUPPORT 3764275970Scy#ifndef EVENT__DISABLE_DEBUG_MODE 3765275970Scy if (event_debug_map_lock_ != NULL) { 3766275970Scy EVTHREAD_FREE_LOCK(event_debug_map_lock_, 0); 3767275970Scy event_debug_map_lock_ = NULL; 3768285612Sdelphij evthreadimpl_disable_lock_debugging_(); 3769275970Scy } 3770275970Scy#endif /* EVENT__DISABLE_DEBUG_MODE */ 3771275970Scy#endif /* EVENT__DISABLE_THREAD_SUPPORT */ 3772275970Scy return; 3773275970Scy} 3774275970Scy 3775275970Scystatic void 3776275970Scyevent_free_debug_globals(void) 3777275970Scy{ 3778275970Scy event_free_debug_globals_locks(); 3779275970Scy} 3780275970Scy 3781275970Scystatic void 3782275970Scyevent_free_evsig_globals(void) 3783275970Scy{ 3784275970Scy evsig_free_globals_(); 3785275970Scy} 3786275970Scy 3787275970Scystatic void 3788275970Scyevent_free_evutil_globals(void) 3789275970Scy{ 3790275970Scy evutil_free_globals_(); 3791275970Scy} 3792275970Scy 3793275970Scystatic void 3794275970Scyevent_free_globals(void) 3795275970Scy{ 3796275970Scy event_free_debug_globals(); 3797275970Scy event_free_evsig_globals(); 3798275970Scy event_free_evutil_globals(); 3799275970Scy} 3800275970Scy 3801275970Scyvoid 3802275970Scylibevent_global_shutdown(void) 3803275970Scy{ 3804285612Sdelphij event_disable_debug_mode(); 3805275970Scy event_free_globals(); 3806275970Scy} 3807275970Scy 3808275970Scy#ifndef EVENT__DISABLE_THREAD_SUPPORT 3809275970Scyint 3810275970Scyevent_global_setup_locks_(const int enable_locks) 3811275970Scy{ 3812275970Scy#ifndef EVENT__DISABLE_DEBUG_MODE 3813275970Scy EVTHREAD_SETUP_GLOBAL_LOCK(event_debug_map_lock_, 0); 3814275970Scy#endif 3815275970Scy if (evsig_global_setup_locks_(enable_locks) < 0) 3816275970Scy return -1; 3817275970Scy if (evutil_global_setup_locks_(enable_locks) < 0) 3818275970Scy return -1; 3819275970Scy if (evutil_secure_rng_global_setup_locks_(enable_locks) < 0) 3820275970Scy return -1; 3821275970Scy return 0; 3822275970Scy} 3823275970Scy#endif 3824275970Scy 3825275970Scyvoid 3826275970Scyevent_base_assert_ok_(struct event_base *base) 3827275970Scy{ 3828275970Scy EVBASE_ACQUIRE_LOCK(base, th_base_lock); 3829275970Scy event_base_assert_ok_nolock_(base); 3830275970Scy EVBASE_RELEASE_LOCK(base, th_base_lock); 3831275970Scy} 3832275970Scy 3833275970Scyvoid 3834275970Scyevent_base_assert_ok_nolock_(struct event_base *base) 3835275970Scy{ 3836275970Scy int i; 3837275970Scy int count; 3838275970Scy 3839275970Scy /* First do checks on the per-fd and per-signal lists */ 3840275970Scy evmap_check_integrity_(base); 3841275970Scy 3842275970Scy /* Check the heap property */ 3843275970Scy for (i = 1; i < (int)base->timeheap.n; ++i) { 3844275970Scy int parent = (i - 1) / 2; 3845275970Scy struct event *ev, *p_ev; 3846275970Scy ev = base->timeheap.p[i]; 3847275970Scy p_ev = base->timeheap.p[parent]; 3848275970Scy EVUTIL_ASSERT(ev->ev_flags & EVLIST_TIMEOUT); 3849275970Scy EVUTIL_ASSERT(evutil_timercmp(&p_ev->ev_timeout, &ev->ev_timeout, <=)); 3850275970Scy EVUTIL_ASSERT(ev->ev_timeout_pos.min_heap_idx == i); 3851275970Scy } 3852275970Scy 3853275970Scy /* Check that the common timeouts are fine */ 3854275970Scy for (i = 0; i < base->n_common_timeouts; ++i) { 3855275970Scy struct common_timeout_list *ctl = base->common_timeout_queues[i]; 3856275970Scy struct event *last=NULL, *ev; 3857275970Scy 3858275970Scy EVUTIL_ASSERT_TAILQ_OK(&ctl->events, event, ev_timeout_pos.ev_next_with_common_timeout); 3859275970Scy 3860275970Scy TAILQ_FOREACH(ev, &ctl->events, ev_timeout_pos.ev_next_with_common_timeout) { 3861275970Scy if (last) 3862275970Scy EVUTIL_ASSERT(evutil_timercmp(&last->ev_timeout, &ev->ev_timeout, <=)); 3863275970Scy EVUTIL_ASSERT(ev->ev_flags & EVLIST_TIMEOUT); 3864275970Scy EVUTIL_ASSERT(is_common_timeout(&ev->ev_timeout,base)); 3865275970Scy EVUTIL_ASSERT(COMMON_TIMEOUT_IDX(&ev->ev_timeout) == i); 3866275970Scy last = ev; 3867275970Scy } 3868275970Scy } 3869275970Scy 3870275970Scy /* Check the active queues. */ 3871275970Scy count = 0; 3872275970Scy for (i = 0; i < base->nactivequeues; ++i) { 3873275970Scy struct event_callback *evcb; 3874275970Scy EVUTIL_ASSERT_TAILQ_OK(&base->activequeues[i], event_callback, evcb_active_next); 3875275970Scy TAILQ_FOREACH(evcb, &base->activequeues[i], evcb_active_next) { 3876275970Scy EVUTIL_ASSERT((evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) == EVLIST_ACTIVE); 3877275970Scy EVUTIL_ASSERT(evcb->evcb_pri == i); 3878275970Scy ++count; 3879275970Scy } 3880275970Scy } 3881275970Scy 3882275970Scy { 3883275970Scy struct event_callback *evcb; 3884275970Scy TAILQ_FOREACH(evcb, &base->active_later_queue, evcb_active_next) { 3885275970Scy EVUTIL_ASSERT((evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) == EVLIST_ACTIVE_LATER); 3886275970Scy ++count; 3887275970Scy } 3888275970Scy } 3889275970Scy EVUTIL_ASSERT(count == base->event_count_active); 3890275970Scy} 3891