1/*
2 * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 *    notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 *    notice, this list of conditions and the following disclaimer in the
11 *    documentation and/or other materials provided with the distribution.
12 * 3. The name of the author may not be used to endorse or promote products
13 *    derived from this software without specific prior written permission.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27#include "event2/event-config.h"
28#include "evconfig-private.h"
29
30#ifdef _WIN32
31#include <winsock2.h>
32#define WIN32_LEAN_AND_MEAN
33#include <windows.h>
34#undef WIN32_LEAN_AND_MEAN
35#endif
36
37#include <sys/types.h>
38#ifdef EVENT__HAVE_STDLIB_H
39#include <stdlib.h>
40#endif
41#include <errno.h>
42#include <limits.h>
43#ifndef EVENT__HAVE_GETTIMEOFDAY
44#include <sys/timeb.h>
45#endif
46#if !defined(EVENT__HAVE_NANOSLEEP) && !defined(EVENT_HAVE_USLEEP) && \
47	!defined(_WIN32)
48#include <sys/select.h>
49#endif
50#include <time.h>
51#include <sys/stat.h>
52#include <string.h>
53
54/** evutil_usleep_() */
55#if defined(_WIN32)
56#elif defined(EVENT__HAVE_NANOSLEEP)
57#elif defined(EVENT__HAVE_USLEEP)
58#include <unistd.h>
59#endif
60
61#include "event2/util.h"
62#include "util-internal.h"
63#include "log-internal.h"
64#include "mm-internal.h"
65
66#ifndef EVENT__HAVE_GETTIMEOFDAY
67/* No gettimeofday; this must be windows. */
68int
69evutil_gettimeofday(struct timeval *tv, struct timezone *tz)
70{
71#ifdef _MSC_VER
72#define U64_LITERAL(n) n##ui64
73#else
74#define U64_LITERAL(n) n##llu
75#endif
76
77	/* Conversion logic taken from Tor, which in turn took it
78	 * from Perl.  GetSystemTimeAsFileTime returns its value as
79	 * an unaligned (!) 64-bit value containing the number of
80	 * 100-nanosecond intervals since 1 January 1601 UTC. */
81#define EPOCH_BIAS U64_LITERAL(116444736000000000)
82#define UNITS_PER_SEC U64_LITERAL(10000000)
83#define USEC_PER_SEC U64_LITERAL(1000000)
84#define UNITS_PER_USEC U64_LITERAL(10)
85	union {
86		FILETIME ft_ft;
87		ev_uint64_t ft_64;
88	} ft;
89
90	if (tv == NULL)
91		return -1;
92
93	GetSystemTimeAsFileTime(&ft.ft_ft);
94
95	if (EVUTIL_UNLIKELY(ft.ft_64 < EPOCH_BIAS)) {
96		/* Time before the unix epoch. */
97		return -1;
98	}
99	ft.ft_64 -= EPOCH_BIAS;
100	tv->tv_sec = (long) (ft.ft_64 / UNITS_PER_SEC);
101	tv->tv_usec = (long) ((ft.ft_64 / UNITS_PER_USEC) % USEC_PER_SEC);
102	return 0;
103}
104#endif
105
106#define MAX_SECONDS_IN_MSEC_LONG \
107	(((LONG_MAX) - 999) / 1000)
108
109long
110evutil_tv_to_msec_(const struct timeval *tv)
111{
112	if (tv->tv_usec > 1000000 || tv->tv_sec > MAX_SECONDS_IN_MSEC_LONG)
113		return -1;
114
115	return (tv->tv_sec * 1000) + ((tv->tv_usec + 999) / 1000);
116}
117
118/*
119  Replacement for usleep on platforms that don't have one.  Not guaranteed to
120  be any more finegrained than 1 msec.
121 */
122void
123evutil_usleep_(const struct timeval *tv)
124{
125	if (!tv)
126		return;
127#if defined(_WIN32)
128	{
129		long msec = evutil_tv_to_msec_(tv);
130		Sleep((DWORD)msec);
131	}
132#elif defined(EVENT__HAVE_NANOSLEEP)
133	{
134		struct timespec ts;
135		ts.tv_sec = tv->tv_sec;
136		ts.tv_nsec = tv->tv_usec*1000;
137		nanosleep(&ts, NULL);
138	}
139#elif defined(EVENT__HAVE_USLEEP)
140	/* Some systems don't like to usleep more than 999999 usec */
141	sleep(tv->tv_sec);
142	usleep(tv->tv_usec);
143#else
144	select(0, NULL, NULL, NULL, tv);
145#endif
146}
147
148int
149evutil_date_rfc1123(char *date, const size_t datelen, const struct tm *tm)
150{
151	static const char *DAYS[] =
152		{ "Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat" };
153	static const char *MONTHS[] =
154		{ "Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul", "Aug", "Sep", "Oct", "Nov", "Dec" };
155
156	time_t t = time(NULL);
157
158#ifndef _WIN32
159	struct tm sys;
160#endif
161
162	/* If `tm` is null, set system's current time. */
163	if (tm == NULL) {
164#ifdef _WIN32
165		/** TODO: detect _gmtime64()/_gmtime64_s() */
166		tm = gmtime(&t);
167#else
168		gmtime_r(&t, &sys);
169		tm = &sys;
170#endif
171	}
172
173	return evutil_snprintf(
174		date, datelen, "%s, %02d %s %4d %02d:%02d:%02d GMT",
175		DAYS[tm->tm_wday], tm->tm_mday, MONTHS[tm->tm_mon],
176		1900 + tm->tm_year, tm->tm_hour, tm->tm_min, tm->tm_sec);
177}
178
179/*
180   This function assumes it's called repeatedly with a
181   not-actually-so-monotonic time source whose outputs are in 'tv'. It
182   implements a trivial ratcheting mechanism so that the values never go
183   backwards.
184 */
185static void
186adjust_monotonic_time(struct evutil_monotonic_timer *base,
187    struct timeval *tv)
188{
189	evutil_timeradd(tv, &base->adjust_monotonic_clock, tv);
190
191	if (evutil_timercmp(tv, &base->last_time, <)) {
192		/* Guess it wasn't monotonic after all. */
193		struct timeval adjust;
194		evutil_timersub(&base->last_time, tv, &adjust);
195		evutil_timeradd(&adjust, &base->adjust_monotonic_clock,
196		    &base->adjust_monotonic_clock);
197		*tv = base->last_time;
198	}
199	base->last_time = *tv;
200}
201
202/*
203   Allocate a new struct evutil_monotonic_timer
204 */
205struct evutil_monotonic_timer *
206evutil_monotonic_timer_new(void)
207{
208  struct evutil_monotonic_timer *p = NULL;
209
210  p = mm_malloc(sizeof(*p));
211  if (!p) goto done;
212
213  memset(p, 0, sizeof(*p));
214
215 done:
216  return p;
217}
218
219/*
220   Free a struct evutil_monotonic_timer
221 */
222void
223evutil_monotonic_timer_free(struct evutil_monotonic_timer *timer)
224{
225  if (timer) {
226    mm_free(timer);
227  }
228}
229
230/*
231   Set up a struct evutil_monotonic_timer for initial use
232 */
233int
234evutil_configure_monotonic_time(struct evutil_monotonic_timer *timer,
235                                int flags)
236{
237  return evutil_configure_monotonic_time_(timer, flags);
238}
239
240/*
241   Query the current monotonic time
242 */
243int
244evutil_gettime_monotonic(struct evutil_monotonic_timer *timer,
245                         struct timeval *tp)
246{
247  return evutil_gettime_monotonic_(timer, tp);
248}
249
250
251#if defined(HAVE_POSIX_MONOTONIC)
252/* =====
253   The POSIX clock_gettime() interface provides a few ways to get at a
254   monotonic clock.  CLOCK_MONOTONIC is most widely supported.  Linux also
255   provides a CLOCK_MONOTONIC_COARSE with accuracy of about 1-4 msec.
256
257   On all platforms I'm aware of, CLOCK_MONOTONIC really is monotonic.
258   Platforms don't agree about whether it should jump on a sleep/resume.
259 */
260
261int
262evutil_configure_monotonic_time_(struct evutil_monotonic_timer *base,
263    int flags)
264{
265	/* CLOCK_MONOTONIC exists on FreeBSD, Linux, and Solaris.  You need to
266	 * check for it at runtime, because some older kernel versions won't
267	 * have it working. */
268#ifdef CLOCK_MONOTONIC_COARSE
269	const int precise = flags & EV_MONOT_PRECISE;
270#endif
271	const int fallback = flags & EV_MONOT_FALLBACK;
272	struct timespec	ts;
273
274#ifdef CLOCK_MONOTONIC_COARSE
275	if (CLOCK_MONOTONIC_COARSE < 0) {
276		/* Technically speaking, nothing keeps CLOCK_* from being
277		 * negative (as far as I know). This check and the one below
278		 * make sure that it's safe for us to use -1 as an "unset"
279		 * value. */
280		event_errx(1,"I didn't expect CLOCK_MONOTONIC_COARSE to be < 0");
281	}
282	if (! precise && ! fallback) {
283		if (clock_gettime(CLOCK_MONOTONIC_COARSE, &ts) == 0) {
284			base->monotonic_clock = CLOCK_MONOTONIC_COARSE;
285			return 0;
286		}
287	}
288#endif
289	if (!fallback && clock_gettime(CLOCK_MONOTONIC, &ts) == 0) {
290		base->monotonic_clock = CLOCK_MONOTONIC;
291		return 0;
292	}
293
294	if (CLOCK_MONOTONIC < 0) {
295		event_errx(1,"I didn't expect CLOCK_MONOTONIC to be < 0");
296	}
297
298	base->monotonic_clock = -1;
299	return 0;
300}
301
302int
303evutil_gettime_monotonic_(struct evutil_monotonic_timer *base,
304    struct timeval *tp)
305{
306	struct timespec ts;
307
308	if (base->monotonic_clock < 0) {
309		if (evutil_gettimeofday(tp, NULL) < 0)
310			return -1;
311		adjust_monotonic_time(base, tp);
312		return 0;
313	}
314
315	if (clock_gettime(base->monotonic_clock, &ts) == -1)
316		return -1;
317	tp->tv_sec = ts.tv_sec;
318	tp->tv_usec = ts.tv_nsec / 1000;
319
320	return 0;
321}
322#endif
323
324#if defined(HAVE_MACH_MONOTONIC)
325/* ======
326   Apple is a little late to the POSIX party.  And why not?  Instead of
327   clock_gettime(), they provide mach_absolute_time().  Its units are not
328   fixed; we need to use mach_timebase_info() to get the right functions to
329   convert its units into nanoseconds.
330
331   To all appearances, mach_absolute_time() seems to be honest-to-goodness
332   monotonic.  Whether it stops during sleep or not is unspecified in
333   principle, and dependent on CPU architecture in practice.
334 */
335
336int
337evutil_configure_monotonic_time_(struct evutil_monotonic_timer *base,
338    int flags)
339{
340	const int fallback = flags & EV_MONOT_FALLBACK;
341	struct mach_timebase_info mi;
342	memset(base, 0, sizeof(*base));
343	/* OSX has mach_absolute_time() */
344	if (!fallback &&
345	    mach_timebase_info(&mi) == 0 &&
346	    mach_absolute_time() != 0) {
347		/* mach_timebase_info tells us how to convert
348		 * mach_absolute_time() into nanoseconds, but we
349		 * want to use microseconds instead. */
350		mi.denom *= 1000;
351		memcpy(&base->mach_timebase_units, &mi, sizeof(mi));
352	} else {
353		base->mach_timebase_units.numer = 0;
354	}
355	return 0;
356}
357
358int
359evutil_gettime_monotonic_(struct evutil_monotonic_timer *base,
360    struct timeval *tp)
361{
362	ev_uint64_t abstime, usec;
363	if (base->mach_timebase_units.numer == 0) {
364		if (evutil_gettimeofday(tp, NULL) < 0)
365			return -1;
366		adjust_monotonic_time(base, tp);
367		return 0;
368	}
369
370	abstime = mach_absolute_time();
371	usec = (abstime * base->mach_timebase_units.numer)
372	    / (base->mach_timebase_units.denom);
373	tp->tv_sec = usec / 1000000;
374	tp->tv_usec = usec % 1000000;
375
376	return 0;
377}
378#endif
379
380#if defined(HAVE_WIN32_MONOTONIC)
381/* =====
382   Turn we now to Windows.  Want monontonic time on Windows?
383
384   Windows has QueryPerformanceCounter(), which gives time most high-
385   resolution time.  It's a pity it's not so monotonic in practice; it's
386   also got some fun bugs, especially: with older Windowses, under
387   virtualizations, with funny hardware, on multiprocessor systems, and so
388   on.  PEP418 [1] has a nice roundup of the issues here.
389
390   There's GetTickCount64() on Vista and later, which gives a number of 1-msec
391   ticks since startup.  The accuracy here might be as bad as 10-20 msec, I
392   hear.  There's an undocumented function (NtSetTimerResolution) that
393   allegedly increases the accuracy. Good luck!
394
395   There's also GetTickCount(), which is only 32 bits, but seems to be
396   supported on pre-Vista versions of Windows.  Apparently, you can coax
397   another 14 bits out of it, giving you 2231 years before rollover.
398
399   The less said about timeGetTime() the better.
400
401   "We don't care.  We don't have to.  We're the Phone Company."
402            -- Lily Tomlin, SNL
403
404   Our strategy, if precise timers are turned off, is to just use the best
405   GetTickCount equivalent available.  If we've been asked for precise timing,
406   then we mostly[2] assume that GetTickCount is monotonic, and correct
407   GetPerformanceCounter to approximate it.
408
409   [1] http://www.python.org/dev/peps/pep-0418
410   [2] Of course, we feed the Windows stuff into adjust_monotonic_time()
411       anyway, just in case it isn't.
412
413 */
414/*
415    Parts of our logic in the win32 timer code here are closely based on
416    BitTorrent's libUTP library.  That code is subject to the following
417    license:
418
419      Copyright (c) 2010 BitTorrent, Inc.
420
421      Permission is hereby granted, free of charge, to any person obtaining a
422      copy of this software and associated documentation files (the
423      "Software"), to deal in the Software without restriction, including
424      without limitation the rights to use, copy, modify, merge, publish,
425      distribute, sublicense, and/or sell copies of the Software, and to
426      permit persons to whom the Software is furnished to do so, subject to
427      the following conditions:
428
429      The above copyright notice and this permission notice shall be included
430      in all copies or substantial portions of the Software.
431
432      THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
433      OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
434      MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
435      NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
436      LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
437      OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
438      WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
439*/
440
441static ev_uint64_t
442evutil_GetTickCount_(struct evutil_monotonic_timer *base)
443{
444	if (base->GetTickCount64_fn) {
445		/* Let's just use GetTickCount64 if we can. */
446		return base->GetTickCount64_fn();
447	} else if (base->GetTickCount_fn) {
448		/* Greg Hazel assures me that this works, that BitTorrent has
449		 * done it for years, and this it won't turn around and
450		 * bite us.  He says they found it on some game programmers'
451		 * forum some time around 2007.
452		 */
453		ev_uint64_t v = base->GetTickCount_fn();
454		return (DWORD)v | ((v >> 18) & 0xFFFFFFFF00000000);
455	} else {
456		/* Here's the fallback implementation. We have to use
457		 * GetTickCount() with its given signature, so we only get
458		 * 32 bits worth of milliseconds, which will roll ove every
459		 * 49 days or so.  */
460		DWORD ticks = GetTickCount();
461		if (ticks < base->last_tick_count) {
462			base->adjust_tick_count += ((ev_uint64_t)1) << 32;
463		}
464		base->last_tick_count = ticks;
465		return ticks + base->adjust_tick_count;
466	}
467}
468
469int
470evutil_configure_monotonic_time_(struct evutil_monotonic_timer *base,
471    int flags)
472{
473	const int precise = flags & EV_MONOT_PRECISE;
474	const int fallback = flags & EV_MONOT_FALLBACK;
475	HANDLE h;
476	memset(base, 0, sizeof(*base));
477
478	h = evutil_load_windows_system_library_(TEXT("kernel32.dll"));
479	if (h != NULL && !fallback) {
480		base->GetTickCount64_fn = (ev_GetTickCount_func)GetProcAddress(h, "GetTickCount64");
481		base->GetTickCount_fn = (ev_GetTickCount_func)GetProcAddress(h, "GetTickCount");
482	}
483
484	base->first_tick = base->last_tick_count = evutil_GetTickCount_(base);
485	if (precise && !fallback) {
486		LARGE_INTEGER freq;
487		if (QueryPerformanceFrequency(&freq)) {
488			LARGE_INTEGER counter;
489			QueryPerformanceCounter(&counter);
490			base->first_counter = counter.QuadPart;
491			base->usec_per_count = 1.0e6 / freq.QuadPart;
492			base->use_performance_counter = 1;
493		}
494	}
495
496	return 0;
497}
498
499static inline ev_int64_t
500abs64(ev_int64_t i)
501{
502	return i < 0 ? -i : i;
503}
504
505
506int
507evutil_gettime_monotonic_(struct evutil_monotonic_timer *base,
508    struct timeval *tp)
509{
510	ev_uint64_t ticks = evutil_GetTickCount_(base);
511	if (base->use_performance_counter) {
512		/* Here's a trick we took from BitTorrent's libutp, at Greg
513		 * Hazel's recommendation.  We use QueryPerformanceCounter for
514		 * our high-resolution timer, but use GetTickCount*() to keep
515		 * it sane, and adjust_monotonic_time() to keep it monotonic.
516		 */
517		LARGE_INTEGER counter;
518		ev_int64_t counter_elapsed, counter_usec_elapsed, ticks_elapsed;
519		QueryPerformanceCounter(&counter);
520		counter_elapsed = (ev_int64_t)
521		    (counter.QuadPart - base->first_counter);
522		ticks_elapsed = ticks - base->first_tick;
523		/* TODO: This may upset VC6. If you need this to work with
524		 * VC6, please supply an appropriate patch. */
525		counter_usec_elapsed = (ev_int64_t)
526		    (counter_elapsed * base->usec_per_count);
527
528		if (abs64(ticks_elapsed*1000 - counter_usec_elapsed) > 1000000) {
529			/* It appears that the QueryPerformanceCounter()
530			 * result is more than 1 second away from
531			 * GetTickCount() result. Let's adjust it to be as
532			 * accurate as we can; adjust_monotnonic_time() below
533			 * will keep it monotonic. */
534			counter_usec_elapsed = ticks_elapsed * 1000;
535			base->first_counter = (ev_uint64_t) (counter.QuadPart - counter_usec_elapsed / base->usec_per_count);
536		}
537		tp->tv_sec = (time_t) (counter_usec_elapsed / 1000000);
538		tp->tv_usec = counter_usec_elapsed % 1000000;
539
540	} else {
541		/* We're just using GetTickCount(). */
542		tp->tv_sec = (time_t) (ticks / 1000);
543		tp->tv_usec = (ticks % 1000) * 1000;
544	}
545	adjust_monotonic_time(base, tp);
546
547	return 0;
548}
549#endif
550
551#if defined(HAVE_FALLBACK_MONOTONIC)
552/* =====
553   And if none of the other options work, let's just use gettimeofday(), and
554   ratchet it forward so that it acts like a monotonic timer, whether it
555   wants to or not.
556 */
557
558int
559evutil_configure_monotonic_time_(struct evutil_monotonic_timer *base,
560    int precise)
561{
562	memset(base, 0, sizeof(*base));
563	return 0;
564}
565
566int
567evutil_gettime_monotonic_(struct evutil_monotonic_timer *base,
568    struct timeval *tp)
569{
570	if (evutil_gettimeofday(tp, NULL) < 0)
571		return -1;
572	adjust_monotonic_time(base, tp);
573	return 0;
574
575}
576#endif
577