1/*++ 2/* NAME 3/* events 3 4/* SUMMARY 5/* event manager 6/* SYNOPSIS 7/* #include <events.h> 8/* 9/* time_t event_time() 10/* 11/* void event_loop(delay) 12/* int delay; 13/* 14/* time_t event_request_timer(callback, context, delay) 15/* void (*callback)(int event, char *context); 16/* char *context; 17/* int delay; 18/* 19/* int event_cancel_timer(callback, context) 20/* void (*callback)(int event, char *context); 21/* char *context; 22/* 23/* void event_enable_read(fd, callback, context) 24/* int fd; 25/* void (*callback)(int event, char *context); 26/* char *context; 27/* 28/* void event_enable_write(fd, callback, context) 29/* int fd; 30/* void (*callback)(int event, char *context); 31/* char *context; 32/* 33/* void event_disable_readwrite(fd) 34/* int fd; 35/* 36/* void event_drain(time_limit) 37/* int time_limit; 38/* 39/* void event_fork(void) 40/* DESCRIPTION 41/* This module delivers I/O and timer events. 42/* Multiple I/O streams and timers can be monitored simultaneously. 43/* Events are delivered via callback routines provided by the 44/* application. When requesting an event, the application can provide 45/* private context that is passed back when the callback routine is 46/* executed. 47/* 48/* event_time() returns a cached value of the current time. 49/* 50/* event_loop() monitors all I/O channels for which the application has 51/* expressed interest, and monitors the timer request queue. 52/* It notifies the application whenever events of interest happen. 53/* A negative delay value causes the function to pause until something 54/* happens; a positive delay value causes event_loop() to return when 55/* the next event happens or when the delay time in seconds is over, 56/* whatever happens first. A zero delay effectuates a poll. 57/* 58/* Note: in order to avoid race conditions, event_loop() cannot 59/* not be called recursively. 60/* 61/* event_request_timer() causes the specified callback function to 62/* be called with the specified context argument after \fIdelay\fR 63/* seconds, or as soon as possible thereafter. The delay should 64/* not be negative (the manifest EVENT_NULL_DELAY provides for 65/* convenient zero-delay notification). 66/* The event argument is equal to EVENT_TIME. 67/* Only one timer request can be active per (callback, context) pair. 68/* Calling event_request_timer() with an existing (callback, context) 69/* pair does not schedule a new event, but updates the time of event 70/* delivery. The result is the absolute time at which the timer is 71/* scheduled to go off. 72/* 73/* event_cancel_timer() cancels the specified (callback, context) request. 74/* The application is allowed to cancel non-existing requests. The result 75/* value is the amount of time left before the timer would have gone off, 76/* or -1 in case of no pending timer. 77/* 78/* event_enable_read() (event_enable_write()) enables read (write) events 79/* on the named I/O channel. It is up to the application to assemble 80/* partial reads or writes. 81/* An I/O channel cannot handle more than one request at the 82/* same time. The application is allowed to enable an event that 83/* is already enabled (same channel, same read or write operation, 84/* but perhaps a different callback or context). On systems with 85/* kernel-based event filters this is preferred usage, because 86/* each disable and enable request would cost a system call. 87/* 88/* The manifest constants EVENT_NULL_CONTEXT and EVENT_NULL_TYPE 89/* provide convenient null values. 90/* 91/* The callback routine has the following arguments: 92/* .IP fd 93/* The stream on which the event happened. 94/* .IP event 95/* An indication of the event type: 96/* .RS 97/* .IP EVENT_READ 98/* read event, 99/* .IP EVENT_WRITE 100/* write event, 101/* .IP EVENT_XCPT 102/* exception (actually, any event other than read or write). 103/* .RE 104/* .IP context 105/* Application context given to event_enable_read() (event_enable_write()). 106/* .PP 107/* event_disable_readwrite() disables further I/O events on the specified 108/* I/O channel. The application is allowed to cancel non-existing 109/* I/O event requests. 110/* 111/* event_drain() repeatedly calls event_loop() until no more timer 112/* events or I/O events are pending or until the time limit is reached. 113/* This routine must not be called from an event_whatever() callback 114/* routine. Note: this function assumes that no new I/O events 115/* will be registered. 116/* 117/* event_fork() must be called by a child process after it is 118/* created with fork(), to re-initialize event processing. 119/* DIAGNOSTICS 120/* Panics: interface violations. Fatal errors: out of memory, 121/* system call failure. Warnings: the number of available 122/* file descriptors is much less than FD_SETSIZE. 123/* BUGS 124/* This module is based on event selection. It assumes that the 125/* event_loop() routine is called frequently. This approach is 126/* not suitable for applications with compute-bound loops that 127/* take a significant amount of time. 128/* LICENSE 129/* .ad 130/* .fi 131/* The Secure Mailer license must be distributed with this software. 132/* AUTHOR(S) 133/* Wietse Venema 134/* IBM T.J. Watson Research 135/* P.O. Box 704 136/* Yorktown Heights, NY 10598, USA 137/*--*/ 138 139/* System libraries. */ 140 141#include "sys_defs.h" 142#include <sys/time.h> /* XXX: 44BSD uses bzero() */ 143#include <time.h> 144#include <errno.h> 145#include <unistd.h> 146#include <stddef.h> /* offsetof() */ 147#include <string.h> /* bzero() prototype for 44BSD */ 148#include <limits.h> /* INT_MAX */ 149 150#ifdef USE_SYS_SELECT_H 151#include <sys/select.h> 152#endif 153 154/* Application-specific. */ 155 156#include "mymalloc.h" 157#include "msg.h" 158#include "iostuff.h" 159#include "ring.h" 160#include "events.h" 161 162#if !defined(EVENTS_STYLE) 163#error "must define EVENTS_STYLE" 164#endif 165 166 /* 167 * Traditional BSD-style select(2). Works everywhere, but has a built-in 168 * upper bound on the number of file descriptors, and that limit is hard to 169 * change on Linux. Is sometimes emulated with SYSV-style poll(2) which 170 * doesn't have the file descriptor limit, but unfortunately does not help 171 * to improve the performance of servers with lots of connections. 172 */ 173#define EVENT_ALLOC_INCR 10 174 175#if (EVENTS_STYLE == EVENTS_STYLE_SELECT) 176typedef fd_set EVENT_MASK; 177 178#define EVENT_MASK_BYTE_COUNT(mask) sizeof(*(mask)) 179#define EVENT_MASK_ZERO(mask) FD_ZERO(mask) 180#define EVENT_MASK_SET(fd, mask) FD_SET((fd), (mask)) 181#define EVENT_MASK_ISSET(fd, mask) FD_ISSET((fd), (mask)) 182#define EVENT_MASK_CLR(fd, mask) FD_CLR((fd), (mask)) 183#define EVENT_MASK_CMP(m1, m2) memcmp((m1), (m2), EVENT_MASK_BYTE_COUNT(m1)) 184#else 185 186 /* 187 * Kernel-based event filters (kqueue, /dev/poll, epoll). We use the 188 * following file descriptor mask structure which is expanded on the fly. 189 */ 190typedef struct { 191 char *data; /* bit mask */ 192 size_t data_len; /* data byte count */ 193} EVENT_MASK; 194 195 /* Bits per byte, byte in vector, bit offset in byte, bytes per set. */ 196#define EVENT_MASK_NBBY (8) 197#define EVENT_MASK_FD_BYTE(fd, mask) \ 198 (((unsigned char *) (mask)->data)[(fd) / EVENT_MASK_NBBY]) 199#define EVENT_MASK_FD_BIT(fd) (1 << ((fd) % EVENT_MASK_NBBY)) 200#define EVENT_MASK_BYTES_NEEDED(len) \ 201 (((len) + (EVENT_MASK_NBBY -1)) / EVENT_MASK_NBBY) 202#define EVENT_MASK_BYTE_COUNT(mask) ((mask)->data_len) 203 204 /* Memory management. */ 205#define EVENT_MASK_ALLOC(mask, bit_len) do { \ 206 size_t _byte_len = EVENT_MASK_BYTES_NEEDED(bit_len); \ 207 (mask)->data = mymalloc(_byte_len); \ 208 memset((mask)->data, 0, _byte_len); \ 209 (mask)->data_len = _byte_len; \ 210 } while (0) 211#define EVENT_MASK_REALLOC(mask, bit_len) do { \ 212 size_t _byte_len = EVENT_MASK_BYTES_NEEDED(bit_len); \ 213 size_t _old_len = (mask)->data_len; \ 214 (mask)->data = myrealloc((mask)->data, _byte_len); \ 215 if (_byte_len > _old_len) \ 216 memset((mask)->data + _old_len, 0, _byte_len - _old_len); \ 217 (mask)->data_len = _byte_len; \ 218 } while (0) 219#define EVENT_MASK_FREE(mask) myfree((mask)->data) 220 221 /* Set operations, modeled after FD_ZERO/SET/ISSET/CLR. */ 222#define EVENT_MASK_ZERO(mask) \ 223 memset((mask)->data, 0, (mask)->data_len) 224#define EVENT_MASK_SET(fd, mask) \ 225 (EVENT_MASK_FD_BYTE((fd), (mask)) |= EVENT_MASK_FD_BIT(fd)) 226#define EVENT_MASK_ISSET(fd, mask) \ 227 (EVENT_MASK_FD_BYTE((fd), (mask)) & EVENT_MASK_FD_BIT(fd)) 228#define EVENT_MASK_CLR(fd, mask) \ 229 (EVENT_MASK_FD_BYTE((fd), (mask)) &= ~EVENT_MASK_FD_BIT(fd)) 230#define EVENT_MASK_CMP(m1, m2) \ 231 memcmp((m1)->data, (m2)->data, EVENT_MASK_BYTE_COUNT(m1)) 232#endif 233 234 /* 235 * I/O events. 236 */ 237typedef struct EVENT_FDTABLE EVENT_FDTABLE; 238 239struct EVENT_FDTABLE { 240 EVENT_NOTIFY_RDWR_FN callback; 241 char *context; 242}; 243static EVENT_MASK event_rmask; /* enabled read events */ 244static EVENT_MASK event_wmask; /* enabled write events */ 245static EVENT_MASK event_xmask; /* for bad news mostly */ 246static int event_fdlimit; /* per-process open file limit */ 247static EVENT_FDTABLE *event_fdtable; /* one slot per file descriptor */ 248static int event_fdslots; /* number of file descriptor slots */ 249static int event_max_fd = -1; /* highest fd number seen */ 250 251 /* 252 * FreeBSD kqueue supports no system call to find out what descriptors are 253 * registered in the kernel-based filter. To implement our own sanity checks 254 * we maintain our own descriptor bitmask. 255 * 256 * FreeBSD kqueue does support application context pointers. Unfortunately, 257 * changing that information would cost a system call, and some of the 258 * competitors don't support application context. To keep the implementation 259 * simple we maintain our own table with call-back information. 260 * 261 * FreeBSD kqueue silently unregisters a descriptor from its filter when the 262 * descriptor is closed, so our information could get out of sync with the 263 * kernel. But that will never happen, because we have to meticulously 264 * unregister a file descriptor before it is closed, to avoid errors on 265 * systems that are built with EVENTS_STYLE == EVENTS_STYLE_SELECT. 266 */ 267#if (EVENTS_STYLE == EVENTS_STYLE_KQUEUE) 268#include <sys/event.h> 269 270 /* 271 * Some early FreeBSD implementations don't have the EV_SET macro. 272 */ 273#ifndef EV_SET 274#define EV_SET(kp, id, fi, fl, ffl, da, ud) do { \ 275 (kp)->ident = (id); \ 276 (kp)->filter = (fi); \ 277 (kp)->flags = (fl); \ 278 (kp)->fflags = (ffl); \ 279 (kp)->data = (da); \ 280 (kp)->udata = (ud); \ 281 } while(0) 282#endif 283 284 /* 285 * Macros to initialize the kernel-based filter; see event_init(). 286 */ 287static int event_kq; /* handle to event filter */ 288 289#define EVENT_REG_INIT_HANDLE(er, n) do { \ 290 er = event_kq = kqueue(); \ 291 } while (0) 292#define EVENT_REG_INIT_TEXT "kqueue" 293 294#define EVENT_REG_FORK_HANDLE(er, n) do { \ 295 (void) close(event_kq); \ 296 EVENT_REG_INIT_HANDLE(er, (n)); \ 297 } while (0) 298 299 /* 300 * Macros to update the kernel-based filter; see event_enable_read(), 301 * event_enable_write() and event_disable_readwrite(). 302 */ 303#define EVENT_REG_FD_OP(er, fh, ev, op) do { \ 304 struct kevent dummy; \ 305 EV_SET(&dummy, (fh), (ev), (op), 0, 0, 0); \ 306 (er) = kevent(event_kq, &dummy, 1, 0, 0, 0); \ 307 } while (0) 308 309#define EVENT_REG_ADD_OP(e, f, ev) EVENT_REG_FD_OP((e), (f), (ev), EV_ADD) 310#define EVENT_REG_ADD_READ(e, f) EVENT_REG_ADD_OP((e), (f), EVFILT_READ) 311#define EVENT_REG_ADD_WRITE(e, f) EVENT_REG_ADD_OP((e), (f), EVFILT_WRITE) 312#define EVENT_REG_ADD_TEXT "kevent EV_ADD" 313 314#define EVENT_REG_DEL_OP(e, f, ev) EVENT_REG_FD_OP((e), (f), (ev), EV_DELETE) 315#define EVENT_REG_DEL_READ(e, f) EVENT_REG_DEL_OP((e), (f), EVFILT_READ) 316#define EVENT_REG_DEL_WRITE(e, f) EVENT_REG_DEL_OP((e), (f), EVFILT_WRITE) 317#define EVENT_REG_DEL_TEXT "kevent EV_DELETE" 318 319 /* 320 * Macros to retrieve event buffers from the kernel; see event_loop(). 321 */ 322typedef struct kevent EVENT_BUFFER; 323 324#define EVENT_BUFFER_READ(event_count, event_buf, buflen, delay) do { \ 325 struct timespec ts; \ 326 struct timespec *tsp; \ 327 if ((delay) < 0) { \ 328 tsp = 0; \ 329 } else { \ 330 tsp = &ts; \ 331 ts.tv_nsec = 0; \ 332 ts.tv_sec = (delay); \ 333 } \ 334 (event_count) = kevent(event_kq, (struct kevent *) 0, 0, (event_buf), \ 335 (buflen), (tsp)); \ 336 } while (0) 337#define EVENT_BUFFER_READ_TEXT "kevent" 338 339 /* 340 * Macros to process event buffers from the kernel; see event_loop(). 341 */ 342#define EVENT_GET_FD(bp) ((bp)->ident) 343#define EVENT_GET_TYPE(bp) ((bp)->filter) 344#define EVENT_TEST_READ(bp) (EVENT_GET_TYPE(bp) == EVFILT_READ) 345#define EVENT_TEST_WRITE(bp) (EVENT_GET_TYPE(bp) == EVFILT_WRITE) 346 347#endif 348 349 /* 350 * Solaris /dev/poll does not support application context, so we have to 351 * maintain our own. This has the benefit of avoiding an expensive system 352 * call just to change a call-back function or argument. 353 * 354 * Solaris /dev/poll does have a way to query if a specific descriptor is 355 * registered. However, we maintain a descriptor mask anyway because a) it 356 * avoids having to make an expensive system call to find out if something 357 * is registered, b) some EVENTS_STYLE_MUMBLE implementations need a 358 * descriptor bitmask anyway and c) we use the bitmask already to implement 359 * sanity checks. 360 */ 361#if (EVENTS_STYLE == EVENTS_STYLE_DEVPOLL) 362#include <sys/devpoll.h> 363#include <fcntl.h> 364 365 /* 366 * Macros to initialize the kernel-based filter; see event_init(). 367 */ 368static int event_pollfd; /* handle to file descriptor set */ 369 370#define EVENT_REG_INIT_HANDLE(er, n) do { \ 371 er = event_pollfd = open("/dev/poll", O_RDWR); \ 372 if (event_pollfd >= 0) close_on_exec(event_pollfd, CLOSE_ON_EXEC); \ 373 } while (0) 374#define EVENT_REG_INIT_TEXT "open /dev/poll" 375 376#define EVENT_REG_FORK_HANDLE(er, n) do { \ 377 (void) close(event_pollfd); \ 378 EVENT_REG_INIT_HANDLE(er, (n)); \ 379 } while (0) 380 381 /* 382 * Macros to update the kernel-based filter; see event_enable_read(), 383 * event_enable_write() and event_disable_readwrite(). 384 */ 385#define EVENT_REG_FD_OP(er, fh, ev) do { \ 386 struct pollfd dummy; \ 387 dummy.fd = (fh); \ 388 dummy.events = (ev); \ 389 (er) = write(event_pollfd, (char *) &dummy, \ 390 sizeof(dummy)) != sizeof(dummy) ? -1 : 0; \ 391 } while (0) 392 393#define EVENT_REG_ADD_READ(e, f) EVENT_REG_FD_OP((e), (f), POLLIN) 394#define EVENT_REG_ADD_WRITE(e, f) EVENT_REG_FD_OP((e), (f), POLLOUT) 395#define EVENT_REG_ADD_TEXT "write /dev/poll" 396 397#define EVENT_REG_DEL_BOTH(e, f) EVENT_REG_FD_OP((e), (f), POLLREMOVE) 398#define EVENT_REG_DEL_TEXT "write /dev/poll" 399 400 /* 401 * Macros to retrieve event buffers from the kernel; see event_loop(). 402 */ 403typedef struct pollfd EVENT_BUFFER; 404 405#define EVENT_BUFFER_READ(event_count, event_buf, buflen, delay) do { \ 406 struct dvpoll dvpoll; \ 407 dvpoll.dp_fds = (event_buf); \ 408 dvpoll.dp_nfds = (buflen); \ 409 dvpoll.dp_timeout = (delay) < 0 ? -1 : (delay) * 1000; \ 410 (event_count) = ioctl(event_pollfd, DP_POLL, &dvpoll); \ 411 } while (0) 412#define EVENT_BUFFER_READ_TEXT "ioctl DP_POLL" 413 414 /* 415 * Macros to process event buffers from the kernel; see event_loop(). 416 */ 417#define EVENT_GET_FD(bp) ((bp)->fd) 418#define EVENT_GET_TYPE(bp) ((bp)->revents) 419#define EVENT_TEST_READ(bp) (EVENT_GET_TYPE(bp) & POLLIN) 420#define EVENT_TEST_WRITE(bp) (EVENT_GET_TYPE(bp) & POLLOUT) 421 422#endif 423 424 /* 425 * Linux epoll supports no system call to find out what descriptors are 426 * registered in the kernel-based filter. To implement our own sanity checks 427 * we maintain our own descriptor bitmask. 428 * 429 * Linux epoll does support application context pointers. Unfortunately, 430 * changing that information would cost a system call, and some of the 431 * competitors don't support application context. To keep the implementation 432 * simple we maintain our own table with call-back information. 433 * 434 * Linux epoll silently unregisters a descriptor from its filter when the 435 * descriptor is closed, so our information could get out of sync with the 436 * kernel. But that will never happen, because we have to meticulously 437 * unregister a file descriptor before it is closed, to avoid errors on 438 * systems that are built with EVENTS_STYLE == EVENTS_STYLE_SELECT. 439 */ 440#if (EVENTS_STYLE == EVENTS_STYLE_EPOLL) 441#include <sys/epoll.h> 442 443 /* 444 * Macros to initialize the kernel-based filter; see event_init(). 445 */ 446static int event_epollfd; /* epoll handle */ 447 448#define EVENT_REG_INIT_HANDLE(er, n) do { \ 449 er = event_epollfd = epoll_create(n); \ 450 if (event_epollfd >= 0) close_on_exec(event_epollfd, CLOSE_ON_EXEC); \ 451 } while (0) 452#define EVENT_REG_INIT_TEXT "epoll_create" 453 454#define EVENT_REG_FORK_HANDLE(er, n) do { \ 455 (void) close(event_epollfd); \ 456 EVENT_REG_INIT_HANDLE(er, (n)); \ 457 } while (0) 458 459 /* 460 * Macros to update the kernel-based filter; see event_enable_read(), 461 * event_enable_write() and event_disable_readwrite(). 462 */ 463#define EVENT_REG_FD_OP(er, fh, ev, op) do { \ 464 struct epoll_event dummy; \ 465 dummy.events = (ev); \ 466 dummy.data.fd = (fh); \ 467 (er) = epoll_ctl(event_epollfd, (op), (fh), &dummy); \ 468 } while (0) 469 470#define EVENT_REG_ADD_OP(e, f, ev) EVENT_REG_FD_OP((e), (f), (ev), EPOLL_CTL_ADD) 471#define EVENT_REG_ADD_READ(e, f) EVENT_REG_ADD_OP((e), (f), EPOLLIN) 472#define EVENT_REG_ADD_WRITE(e, f) EVENT_REG_ADD_OP((e), (f), EPOLLOUT) 473#define EVENT_REG_ADD_TEXT "epoll_ctl EPOLL_CTL_ADD" 474 475#define EVENT_REG_DEL_OP(e, f, ev) EVENT_REG_FD_OP((e), (f), (ev), EPOLL_CTL_DEL) 476#define EVENT_REG_DEL_READ(e, f) EVENT_REG_DEL_OP((e), (f), EPOLLIN) 477#define EVENT_REG_DEL_WRITE(e, f) EVENT_REG_DEL_OP((e), (f), EPOLLOUT) 478#define EVENT_REG_DEL_TEXT "epoll_ctl EPOLL_CTL_DEL" 479 480 /* 481 * Macros to retrieve event buffers from the kernel; see event_loop(). 482 */ 483typedef struct epoll_event EVENT_BUFFER; 484 485#define EVENT_BUFFER_READ(event_count, event_buf, buflen, delay) do { \ 486 (event_count) = epoll_wait(event_epollfd, (event_buf), (buflen), \ 487 (delay) < 0 ? -1 : (delay) * 1000); \ 488 } while (0) 489#define EVENT_BUFFER_READ_TEXT "epoll_wait" 490 491 /* 492 * Macros to process event buffers from the kernel; see event_loop(). 493 */ 494#define EVENT_GET_FD(bp) ((bp)->data.fd) 495#define EVENT_GET_TYPE(bp) ((bp)->events) 496#define EVENT_TEST_READ(bp) (EVENT_GET_TYPE(bp) & EPOLLIN) 497#define EVENT_TEST_WRITE(bp) (EVENT_GET_TYPE(bp) & EPOLLOUT) 498 499#endif 500 501 /* 502 * Timer events. Timer requests are kept sorted, in a circular list. We use 503 * the RING abstraction, so we get to use a couple ugly macros. 504 * 505 * When a call-back function adds a timer request, we label the request with 506 * the event_loop() call instance that invoked the call-back. We use this to 507 * prevent zero-delay timer requests from running in a tight loop and 508 * starving I/O events. 509 */ 510typedef struct EVENT_TIMER EVENT_TIMER; 511 512struct EVENT_TIMER { 513 time_t when; /* when event is wanted */ 514 EVENT_NOTIFY_TIME_FN callback; /* callback function */ 515 char *context; /* callback context */ 516 long loop_instance; /* event_loop() call instance */ 517 RING ring; /* linkage */ 518}; 519 520static RING event_timer_head; /* timer queue head */ 521static long event_loop_instance; /* event_loop() call instance */ 522 523#define RING_TO_TIMER(r) \ 524 ((EVENT_TIMER *) ((char *) (r) - offsetof(EVENT_TIMER, ring))) 525 526#define FOREACH_QUEUE_ENTRY(entry, head) \ 527 for (entry = ring_succ(head); entry != (head); entry = ring_succ(entry)) 528 529#define FIRST_TIMER(head) \ 530 (ring_succ(head) != (head) ? RING_TO_TIMER(ring_succ(head)) : 0) 531 532 /* 533 * Other private data structures. 534 */ 535static time_t event_present; /* cached time of day */ 536 537#define EVENT_INIT_NEEDED() (event_present == 0) 538 539/* event_init - set up tables and such */ 540 541static void event_init(void) 542{ 543 EVENT_FDTABLE *fdp; 544 int err; 545 546 if (!EVENT_INIT_NEEDED()) 547 msg_panic("event_init: repeated call"); 548 549 /* 550 * Initialize the file descriptor masks and the call-back table. Where 551 * possible we extend these data structures on the fly. With select(2) 552 * based implementations we can only handle FD_SETSIZE open files. 553 */ 554#if (EVENTS_STYLE == EVENTS_STYLE_SELECT) 555 if ((event_fdlimit = open_limit(FD_SETSIZE)) < 0) 556 msg_fatal("unable to determine open file limit"); 557#else 558 if ((event_fdlimit = open_limit(INT_MAX)) < 0) 559 msg_fatal("unable to determine open file limit"); 560#endif 561 if (event_fdlimit < FD_SETSIZE / 2 && event_fdlimit < 256) 562 msg_warn("could allocate space for only %d open files", event_fdlimit); 563 event_fdslots = EVENT_ALLOC_INCR; 564 event_fdtable = (EVENT_FDTABLE *) 565 mymalloc(sizeof(EVENT_FDTABLE) * event_fdslots); 566 for (fdp = event_fdtable; fdp < event_fdtable + event_fdslots; fdp++) { 567 fdp->callback = 0; 568 fdp->context = 0; 569 } 570 571 /* 572 * Initialize the I/O event request masks. 573 */ 574#if (EVENTS_STYLE == EVENTS_STYLE_SELECT) 575 EVENT_MASK_ZERO(&event_rmask); 576 EVENT_MASK_ZERO(&event_wmask); 577 EVENT_MASK_ZERO(&event_xmask); 578#else 579 EVENT_MASK_ALLOC(&event_rmask, event_fdslots); 580 EVENT_MASK_ALLOC(&event_wmask, event_fdslots); 581 EVENT_MASK_ALLOC(&event_xmask, event_fdslots); 582 583 /* 584 * Initialize the kernel-based filter. 585 */ 586 EVENT_REG_INIT_HANDLE(err, event_fdslots); 587 if (err < 0) 588 msg_fatal("%s: %m", EVENT_REG_INIT_TEXT); 589#endif 590 591 /* 592 * Initialize timer stuff. 593 */ 594 ring_init(&event_timer_head); 595 (void) time(&event_present); 596 597 /* 598 * Avoid an infinite initialization loop. 599 */ 600 if (EVENT_INIT_NEEDED()) 601 msg_panic("event_init: unable to initialize"); 602} 603 604/* event_extend - make room for more descriptor slots */ 605 606static void event_extend(int fd) 607{ 608 const char *myname = "event_extend"; 609 int old_slots = event_fdslots; 610 int new_slots = (event_fdslots > fd / 2 ? 611 2 * old_slots : fd + EVENT_ALLOC_INCR); 612 EVENT_FDTABLE *fdp; 613 int err; 614 615 if (msg_verbose > 2) 616 msg_info("%s: fd %d", myname, fd); 617 event_fdtable = (EVENT_FDTABLE *) 618 myrealloc((char *) event_fdtable, sizeof(EVENT_FDTABLE) * new_slots); 619 event_fdslots = new_slots; 620 for (fdp = event_fdtable + old_slots; 621 fdp < event_fdtable + new_slots; fdp++) { 622 fdp->callback = 0; 623 fdp->context = 0; 624 } 625 626 /* 627 * Initialize the I/O event request masks. 628 */ 629#if (EVENTS_STYLE != EVENTS_STYLE_SELECT) 630 EVENT_MASK_REALLOC(&event_rmask, new_slots); 631 EVENT_MASK_REALLOC(&event_wmask, new_slots); 632 EVENT_MASK_REALLOC(&event_xmask, new_slots); 633#endif 634#ifdef EVENT_REG_UPD_HANDLE 635 EVENT_REG_UPD_HANDLE(err, new_slots); 636 if (err < 0) 637 msg_fatal("%s: %s: %m", myname, EVENT_REG_UPD_TEXT); 638#endif 639} 640 641/* event_time - look up cached time of day */ 642 643time_t event_time(void) 644{ 645 if (EVENT_INIT_NEEDED()) 646 event_init(); 647 648 return (event_present); 649} 650 651/* event_drain - loop until all pending events are done */ 652 653void event_drain(int time_limit) 654{ 655 EVENT_MASK zero_mask; 656 time_t max_time; 657 658 if (EVENT_INIT_NEEDED()) 659 return; 660 661#if (EVENTS_STYLE == EVENTS_STYLE_SELECT) 662 EVENT_MASK_ZERO(&zero_mask); 663#else 664 EVENT_MASK_ALLOC(&zero_mask, event_fdslots); 665#endif 666 (void) time(&event_present); 667 max_time = event_present + time_limit; 668 while (event_present < max_time 669 && (event_timer_head.pred != &event_timer_head 670 || EVENT_MASK_CMP(&zero_mask, &event_xmask) != 0)) { 671 event_loop(1); 672#if (EVENTS_STYLE != EVENTS_STYLE_SELECT) 673 if (EVENT_MASK_BYTE_COUNT(&zero_mask) 674 != EVENT_MASK_BYTES_NEEDED(event_fdslots)) 675 EVENT_MASK_REALLOC(&zero_mask, event_fdslots); 676#endif 677 } 678#if (EVENTS_STYLE != EVENTS_STYLE_SELECT) 679 EVENT_MASK_FREE(&zero_mask); 680#endif 681} 682 683/* event_fork - resume event processing after fork() */ 684 685void event_fork(void) 686{ 687#if (EVENTS_STYLE != EVENTS_STYLE_SELECT) 688 EVENT_FDTABLE *fdp; 689 int err; 690 int fd; 691 692 /* 693 * No event was ever registered, so there's nothing to be done. 694 */ 695 if (EVENT_INIT_NEEDED()) 696 return; 697 698 /* 699 * Close the existing filter handle and open a new kernel-based filter. 700 */ 701 EVENT_REG_FORK_HANDLE(err, event_fdslots); 702 if (err < 0) 703 msg_fatal("%s: %m", EVENT_REG_INIT_TEXT); 704 705 /* 706 * Populate the new kernel-based filter with events that were registered 707 * in the parent process. 708 */ 709 for (fd = 0; fd <= event_max_fd; fd++) { 710 if (EVENT_MASK_ISSET(fd, &event_wmask)) { 711 EVENT_MASK_CLR(fd, &event_wmask); 712 fdp = event_fdtable + fd; 713 event_enable_write(fd, fdp->callback, fdp->context); 714 } else if (EVENT_MASK_ISSET(fd, &event_rmask)) { 715 EVENT_MASK_CLR(fd, &event_rmask); 716 fdp = event_fdtable + fd; 717 event_enable_read(fd, fdp->callback, fdp->context); 718 } 719 } 720#endif 721} 722 723/* event_enable_read - enable read events */ 724 725void event_enable_read(int fd, EVENT_NOTIFY_RDWR_FN callback, char *context) 726{ 727 const char *myname = "event_enable_read"; 728 EVENT_FDTABLE *fdp; 729 int err; 730 731 if (EVENT_INIT_NEEDED()) 732 event_init(); 733 734 /* 735 * Sanity checks. 736 */ 737 if (fd < 0 || fd >= event_fdlimit) 738 msg_panic("%s: bad file descriptor: %d", myname, fd); 739 740 if (msg_verbose > 2) 741 msg_info("%s: fd %d", myname, fd); 742 743 if (fd >= event_fdslots) 744 event_extend(fd); 745 746 /* 747 * Disallow mixed (i.e. read and write) requests on the same descriptor. 748 */ 749 if (EVENT_MASK_ISSET(fd, &event_wmask)) 750 msg_panic("%s: fd %d: read/write I/O request", myname, fd); 751 752 /* 753 * Postfix 2.4 allows multiple event_enable_read() calls on the same 754 * descriptor without requiring event_disable_readwrite() calls between 755 * them. With kernel-based filters (kqueue, /dev/poll, epoll) it's 756 * wasteful to make system calls when we change only application 757 * call-back information. It has a noticeable effect on smtp-source 758 * performance. 759 */ 760 if (EVENT_MASK_ISSET(fd, &event_rmask) == 0) { 761 EVENT_MASK_SET(fd, &event_xmask); 762 EVENT_MASK_SET(fd, &event_rmask); 763 if (event_max_fd < fd) 764 event_max_fd = fd; 765#if (EVENTS_STYLE != EVENTS_STYLE_SELECT) 766 EVENT_REG_ADD_READ(err, fd); 767 if (err < 0) 768 msg_fatal("%s: %s: %m", myname, EVENT_REG_ADD_TEXT); 769#endif 770 } 771 fdp = event_fdtable + fd; 772 if (fdp->callback != callback || fdp->context != context) { 773 fdp->callback = callback; 774 fdp->context = context; 775 } 776} 777 778/* event_enable_write - enable write events */ 779 780void event_enable_write(int fd, EVENT_NOTIFY_RDWR_FN callback, char *context) 781{ 782 const char *myname = "event_enable_write"; 783 EVENT_FDTABLE *fdp; 784 int err; 785 786 if (EVENT_INIT_NEEDED()) 787 event_init(); 788 789 /* 790 * Sanity checks. 791 */ 792 if (fd < 0 || fd >= event_fdlimit) 793 msg_panic("%s: bad file descriptor: %d", myname, fd); 794 795 if (msg_verbose > 2) 796 msg_info("%s: fd %d", myname, fd); 797 798 if (fd >= event_fdslots) 799 event_extend(fd); 800 801 /* 802 * Disallow mixed (i.e. read and write) requests on the same descriptor. 803 */ 804 if (EVENT_MASK_ISSET(fd, &event_rmask)) 805 msg_panic("%s: fd %d: read/write I/O request", myname, fd); 806 807 /* 808 * Postfix 2.4 allows multiple event_enable_write() calls on the same 809 * descriptor without requiring event_disable_readwrite() calls between 810 * them. With kernel-based filters (kqueue, /dev/poll, epoll) it's 811 * incredibly wasteful to make unregister and register system calls when 812 * we change only application call-back information. It has a noticeable 813 * effect on smtp-source performance. 814 */ 815 if (EVENT_MASK_ISSET(fd, &event_wmask) == 0) { 816 EVENT_MASK_SET(fd, &event_xmask); 817 EVENT_MASK_SET(fd, &event_wmask); 818 if (event_max_fd < fd) 819 event_max_fd = fd; 820#if (EVENTS_STYLE != EVENTS_STYLE_SELECT) 821 EVENT_REG_ADD_WRITE(err, fd); 822 if (err < 0) 823 msg_fatal("%s: %s: %m", myname, EVENT_REG_ADD_TEXT); 824#endif 825 } 826 fdp = event_fdtable + fd; 827 if (fdp->callback != callback || fdp->context != context) { 828 fdp->callback = callback; 829 fdp->context = context; 830 } 831} 832 833/* event_disable_readwrite - disable request for read or write events */ 834 835void event_disable_readwrite(int fd) 836{ 837 const char *myname = "event_disable_readwrite"; 838 EVENT_FDTABLE *fdp; 839 int err; 840 841 if (EVENT_INIT_NEEDED()) 842 event_init(); 843 844 /* 845 * Sanity checks. 846 */ 847 if (fd < 0 || fd >= event_fdlimit) 848 msg_panic("%s: bad file descriptor: %d", myname, fd); 849 850 if (msg_verbose > 2) 851 msg_info("%s: fd %d", myname, fd); 852 853 /* 854 * Don't complain when there is nothing to cancel. The request may have 855 * been canceled from another thread. 856 */ 857 if (fd >= event_fdslots) 858 return; 859#if (EVENTS_STYLE != EVENTS_STYLE_SELECT) 860#ifdef EVENT_REG_DEL_BOTH 861 /* XXX Can't seem to disable READ and WRITE events selectively. */ 862 if (EVENT_MASK_ISSET(fd, &event_rmask) 863 || EVENT_MASK_ISSET(fd, &event_wmask)) { 864 EVENT_REG_DEL_BOTH(err, fd); 865 if (err < 0) 866 msg_fatal("%s: %s: %m", myname, EVENT_REG_DEL_TEXT); 867 } 868#else 869 if (EVENT_MASK_ISSET(fd, &event_rmask)) { 870 EVENT_REG_DEL_READ(err, fd); 871 if (err < 0) 872 msg_fatal("%s: %s: %m", myname, EVENT_REG_DEL_TEXT); 873 } else if (EVENT_MASK_ISSET(fd, &event_wmask)) { 874 EVENT_REG_DEL_WRITE(err, fd); 875 if (err < 0) 876 msg_fatal("%s: %s: %m", myname, EVENT_REG_DEL_TEXT); 877 } 878#endif /* EVENT_REG_DEL_BOTH */ 879#endif /* != EVENTS_STYLE_SELECT */ 880 EVENT_MASK_CLR(fd, &event_xmask); 881 EVENT_MASK_CLR(fd, &event_rmask); 882 EVENT_MASK_CLR(fd, &event_wmask); 883 fdp = event_fdtable + fd; 884 fdp->callback = 0; 885 fdp->context = 0; 886} 887 888/* event_request_timer - (re)set timer */ 889 890time_t event_request_timer(EVENT_NOTIFY_TIME_FN callback, char *context, int delay) 891{ 892 const char *myname = "event_request_timer"; 893 RING *ring; 894 EVENT_TIMER *timer; 895 896 if (EVENT_INIT_NEEDED()) 897 event_init(); 898 899 /* 900 * Sanity checks. 901 */ 902 if (delay < 0) 903 msg_panic("%s: invalid delay: %d", myname, delay); 904 905 /* 906 * Make sure we schedule this event at the right time. 907 */ 908 time(&event_present); 909 910 /* 911 * See if they are resetting an existing timer request. If so, take the 912 * request away from the timer queue so that it can be inserted at the 913 * right place. 914 */ 915 FOREACH_QUEUE_ENTRY(ring, &event_timer_head) { 916 timer = RING_TO_TIMER(ring); 917 if (timer->callback == callback && timer->context == context) { 918 timer->when = event_present + delay; 919 timer->loop_instance = event_loop_instance; 920 ring_detach(ring); 921 if (msg_verbose > 2) 922 msg_info("%s: reset 0x%lx 0x%lx %d", myname, 923 (long) callback, (long) context, delay); 924 break; 925 } 926 } 927 928 /* 929 * If not found, schedule a new timer request. 930 */ 931 if (ring == &event_timer_head) { 932 timer = (EVENT_TIMER *) mymalloc(sizeof(EVENT_TIMER)); 933 timer->when = event_present + delay; 934 timer->callback = callback; 935 timer->context = context; 936 timer->loop_instance = event_loop_instance; 937 if (msg_verbose > 2) 938 msg_info("%s: set 0x%lx 0x%lx %d", myname, 939 (long) callback, (long) context, delay); 940 } 941 942 /* 943 * Timer requests are kept sorted to reduce lookup overhead in the event 944 * loop. 945 * 946 * XXX Append the new request after existing requests for the same time 947 * slot. The event_loop() routine depends on this to avoid starving I/O 948 * events when a call-back function schedules a zero-delay timer request. 949 */ 950 FOREACH_QUEUE_ENTRY(ring, &event_timer_head) { 951 if (timer->when < RING_TO_TIMER(ring)->when) 952 break; 953 } 954 ring_prepend(ring, &timer->ring); 955 956 return (timer->when); 957} 958 959/* event_cancel_timer - cancel timer */ 960 961int event_cancel_timer(EVENT_NOTIFY_TIME_FN callback, char *context) 962{ 963 const char *myname = "event_cancel_timer"; 964 RING *ring; 965 EVENT_TIMER *timer; 966 int time_left = -1; 967 968 if (EVENT_INIT_NEEDED()) 969 event_init(); 970 971 /* 972 * See if they are canceling an existing timer request. Do not complain 973 * when the request is not found. It might have been canceled from some 974 * other thread. 975 */ 976 FOREACH_QUEUE_ENTRY(ring, &event_timer_head) { 977 timer = RING_TO_TIMER(ring); 978 if (timer->callback == callback && timer->context == context) { 979 if ((time_left = timer->when - event_present) < 0) 980 time_left = 0; 981 ring_detach(ring); 982 myfree((char *) timer); 983 break; 984 } 985 } 986 if (msg_verbose > 2) 987 msg_info("%s: 0x%lx 0x%lx %d", myname, 988 (long) callback, (long) context, time_left); 989 return (time_left); 990} 991 992/* event_loop - wait for the next event */ 993 994void event_loop(int delay) 995{ 996 const char *myname = "event_loop"; 997 static int nested; 998 999#if (EVENTS_STYLE == EVENTS_STYLE_SELECT) 1000 fd_set rmask; 1001 fd_set wmask; 1002 fd_set xmask; 1003 struct timeval tv; 1004 struct timeval *tvp; 1005 int new_max_fd; 1006 1007#else 1008 EVENT_BUFFER event_buf[100]; 1009 EVENT_BUFFER *bp; 1010 1011#endif 1012 int event_count; 1013 EVENT_TIMER *timer; 1014 int fd; 1015 EVENT_FDTABLE *fdp; 1016 int select_delay; 1017 1018 if (EVENT_INIT_NEEDED()) 1019 event_init(); 1020 1021 /* 1022 * XXX Also print the select() masks? 1023 */ 1024 if (msg_verbose > 2) { 1025 RING *ring; 1026 1027 FOREACH_QUEUE_ENTRY(ring, &event_timer_head) { 1028 timer = RING_TO_TIMER(ring); 1029 msg_info("%s: time left %3d for 0x%lx 0x%lx", myname, 1030 (int) (timer->when - event_present), 1031 (long) timer->callback, (long) timer->context); 1032 } 1033 } 1034 1035 /* 1036 * Find out when the next timer would go off. Timer requests are sorted. 1037 * If any timer is scheduled, adjust the delay appropriately. 1038 */ 1039 if ((timer = FIRST_TIMER(&event_timer_head)) != 0) { 1040 event_present = time((time_t *) 0); 1041 if ((select_delay = timer->when - event_present) < 0) { 1042 select_delay = 0; 1043 } else if (delay >= 0 && select_delay > delay) { 1044 select_delay = delay; 1045 } 1046 } else { 1047 select_delay = delay; 1048 } 1049 if (msg_verbose > 2) 1050 msg_info("event_loop: select_delay %d", select_delay); 1051 1052 /* 1053 * Negative delay means: wait until something happens. Zero delay means: 1054 * poll. Positive delay means: wait at most this long. 1055 */ 1056#if (EVENTS_STYLE == EVENTS_STYLE_SELECT) 1057 if (select_delay < 0) { 1058 tvp = 0; 1059 } else { 1060 tvp = &tv; 1061 tv.tv_usec = 0; 1062 tv.tv_sec = select_delay; 1063 } 1064 1065 /* 1066 * Pause until the next event happens. When select() has a problem, don't 1067 * go into a tight loop. Allow select() to be interrupted due to the 1068 * arrival of a signal. 1069 */ 1070 rmask = event_rmask; 1071 wmask = event_wmask; 1072 xmask = event_xmask; 1073 1074 event_count = select(event_max_fd + 1, &rmask, &wmask, &xmask, tvp); 1075 if (event_count < 0) { 1076 if (errno != EINTR) 1077 msg_fatal("event_loop: select: %m"); 1078 return; 1079 } 1080#else 1081 EVENT_BUFFER_READ(event_count, event_buf, 1082 sizeof(event_buf) / sizeof(event_buf[0]), 1083 select_delay); 1084 if (event_count < 0) { 1085 if (errno != EINTR) 1086 msg_fatal("event_loop: " EVENT_BUFFER_READ_TEXT ": %m"); 1087 return; 1088 } 1089#endif 1090 1091 /* 1092 * Before entering the application call-back routines, make sure we 1093 * aren't being called from a call-back routine. Doing so would make us 1094 * vulnerable to all kinds of race conditions. 1095 */ 1096 if (nested++ > 0) 1097 msg_panic("event_loop: recursive call"); 1098 1099 /* 1100 * Deliver timer events. Allow the application to add/delete timer queue 1101 * requests while it is being called back. Requests are sorted: we keep 1102 * running over the timer request queue from the start, and stop when we 1103 * reach the future or the list end. We also stop when we reach a timer 1104 * request that was added by a call-back that was invoked from this 1105 * event_loop() call instance, for reasons that are explained below. 1106 * 1107 * To avoid dangling pointer problems 1) we must remove a request from the 1108 * timer queue before delivering its event to the application and 2) we 1109 * must look up the next timer request *after* calling the application. 1110 * The latter complicates the handling of zero-delay timer requests that 1111 * are added by event_loop() call-back functions. 1112 * 1113 * XXX When a timer event call-back function adds a new timer request, 1114 * event_request_timer() labels the request with the event_loop() call 1115 * instance that invoked the timer event call-back. We use this instance 1116 * label here to prevent zero-delay timer requests from running in a 1117 * tight loop and starving I/O events. To make this solution work, 1118 * event_request_timer() appends a new request after existing requests 1119 * for the same time slot. 1120 */ 1121 event_present = time((time_t *) 0); 1122 event_loop_instance += 1; 1123 1124 while ((timer = FIRST_TIMER(&event_timer_head)) != 0) { 1125 if (timer->when > event_present) 1126 break; 1127 if (timer->loop_instance == event_loop_instance) 1128 break; 1129 ring_detach(&timer->ring); /* first this */ 1130 if (msg_verbose > 2) 1131 msg_info("%s: timer 0x%lx 0x%lx", myname, 1132 (long) timer->callback, (long) timer->context); 1133 timer->callback(EVENT_TIME, timer->context); /* then this */ 1134 myfree((char *) timer); 1135 } 1136 1137 /* 1138 * Deliver I/O events. Allow the application to cancel event requests 1139 * while it is being called back. To this end, we keep an eye on the 1140 * contents of event_xmask, so that we deliver only events that are still 1141 * wanted. We do not change the event request masks. It is up to the 1142 * application to determine when a read or write is complete. 1143 */ 1144#if (EVENTS_STYLE == EVENTS_STYLE_SELECT) 1145 if (event_count > 0) { 1146 for (new_max_fd = 0, fd = 0; fd <= event_max_fd; fd++) { 1147 if (FD_ISSET(fd, &event_xmask)) { 1148 new_max_fd = fd; 1149 /* In case event_fdtable is updated. */ 1150 fdp = event_fdtable + fd; 1151 if (FD_ISSET(fd, &xmask)) { 1152 if (msg_verbose > 2) 1153 msg_info("%s: exception fd=%d act=0x%lx 0x%lx", myname, 1154 fd, (long) fdp->callback, (long) fdp->context); 1155 fdp->callback(EVENT_XCPT, fdp->context); 1156 } else if (FD_ISSET(fd, &wmask)) { 1157 if (msg_verbose > 2) 1158 msg_info("%s: write fd=%d act=0x%lx 0x%lx", myname, 1159 fd, (long) fdp->callback, (long) fdp->context); 1160 fdp->callback(EVENT_WRITE, fdp->context); 1161 } else if (FD_ISSET(fd, &rmask)) { 1162 if (msg_verbose > 2) 1163 msg_info("%s: read fd=%d act=0x%lx 0x%lx", myname, 1164 fd, (long) fdp->callback, (long) fdp->context); 1165 fdp->callback(EVENT_READ, fdp->context); 1166 } 1167 } 1168 } 1169 event_max_fd = new_max_fd; 1170 } 1171#else 1172 for (bp = event_buf; bp < event_buf + event_count; bp++) { 1173 fd = EVENT_GET_FD(bp); 1174 if (fd < 0 || fd > event_max_fd) 1175 msg_panic("%s: bad file descriptor: %d", myname, fd); 1176 if (EVENT_MASK_ISSET(fd, &event_xmask)) { 1177 fdp = event_fdtable + fd; 1178 if (EVENT_TEST_READ(bp)) { 1179 if (msg_verbose > 2) 1180 msg_info("%s: read fd=%d act=0x%lx 0x%lx", myname, 1181 fd, (long) fdp->callback, (long) fdp->context); 1182 fdp->callback(EVENT_READ, fdp->context); 1183 } else if (EVENT_TEST_WRITE(bp)) { 1184 if (msg_verbose > 2) 1185 msg_info("%s: write fd=%d act=0x%lx 0x%lx", myname, 1186 fd, (long) fdp->callback, 1187 (long) fdp->context); 1188 fdp->callback(EVENT_WRITE, fdp->context); 1189 } else { 1190 if (msg_verbose > 2) 1191 msg_info("%s: other fd=%d act=0x%lx 0x%lx", myname, 1192 fd, (long) fdp->callback, (long) fdp->context); 1193 fdp->callback(EVENT_XCPT, fdp->context); 1194 } 1195 } 1196 } 1197#endif 1198 nested--; 1199} 1200 1201#ifdef TEST 1202 1203 /* 1204 * Proof-of-concept test program for the event manager. Schedule a series of 1205 * events at one-second intervals and let them happen, while echoing any 1206 * lines read from stdin. 1207 */ 1208#include <stdio.h> 1209#include <ctype.h> 1210#include <stdlib.h> 1211 1212/* timer_event - display event */ 1213 1214static void timer_event(int unused_event, char *context) 1215{ 1216 printf("%ld: %s\n", (long) event_present, context); 1217 fflush(stdout); 1218} 1219 1220/* echo - echo text received on stdin */ 1221 1222static void echo(int unused_event, char *unused_context) 1223{ 1224 char buf[BUFSIZ]; 1225 1226 if (fgets(buf, sizeof(buf), stdin) == 0) 1227 exit(0); 1228 printf("Result: %s", buf); 1229} 1230 1231/* request - request a bunch of timer events */ 1232 1233static void request(int unused_event, char *unused_context) 1234{ 1235 event_request_timer(timer_event, "3 first", 3); 1236 event_request_timer(timer_event, "3 second", 3); 1237 event_request_timer(timer_event, "4 first", 4); 1238 event_request_timer(timer_event, "4 second", 4); 1239 event_request_timer(timer_event, "2 first", 2); 1240 event_request_timer(timer_event, "2 second", 2); 1241 event_request_timer(timer_event, "1 first", 1); 1242 event_request_timer(timer_event, "1 second", 1); 1243 event_request_timer(timer_event, "0 first", 0); 1244 event_request_timer(timer_event, "0 second", 0); 1245} 1246 1247int main(int argc, char **argv) 1248{ 1249 if (argv[1]) 1250 msg_verbose = atoi(argv[1]); 1251 event_request_timer(request, (char *) 0, 0); 1252 event_enable_read(fileno(stdin), echo, (char *) 0); 1253 event_drain(10); 1254 exit(0); 1255} 1256 1257#endif 1258