kern_timeout.c revision 171053
1/*-
2 * Copyright (c) 1982, 1986, 1991, 1993
3 *	The Regents of the University of California.  All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 * 4. Neither the name of the University nor the names of its contributors
19 *    may be used to endorse or promote products derived from this software
20 *    without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 *	From: @(#)kern_clock.c	8.5 (Berkeley) 1/21/94
35 */
36
37#include <sys/cdefs.h>
38__FBSDID("$FreeBSD: head/sys/kern/kern_timeout.c 171053 2007-06-26 21:42:01Z attilio $");
39
40#include <sys/param.h>
41#include <sys/systm.h>
42#include <sys/callout.h>
43#include <sys/condvar.h>
44#include <sys/kernel.h>
45#include <sys/ktr.h>
46#include <sys/lock.h>
47#include <sys/mutex.h>
48#include <sys/proc.h>
49#include <sys/sleepqueue.h>
50#include <sys/sysctl.h>
51
52static int avg_depth;
53SYSCTL_INT(_debug, OID_AUTO, to_avg_depth, CTLFLAG_RD, &avg_depth, 0,
54    "Average number of items examined per softclock call. Units = 1/1000");
55static int avg_gcalls;
56SYSCTL_INT(_debug, OID_AUTO, to_avg_gcalls, CTLFLAG_RD, &avg_gcalls, 0,
57    "Average number of Giant callouts made per softclock call. Units = 1/1000");
58static int avg_mtxcalls;
59SYSCTL_INT(_debug, OID_AUTO, to_avg_mtxcalls, CTLFLAG_RD, &avg_mtxcalls, 0,
60    "Average number of mtx callouts made per softclock call. Units = 1/1000");
61static int avg_mpcalls;
62SYSCTL_INT(_debug, OID_AUTO, to_avg_mpcalls, CTLFLAG_RD, &avg_mpcalls, 0,
63    "Average number of MP callouts made per softclock call. Units = 1/1000");
64/*
65 * TODO:
66 *	allocate more timeout table slots when table overflows.
67 */
68
69/* Exported to machdep.c and/or kern_clock.c.  */
70struct callout *callout;
71struct callout_list callfree;
72int callwheelsize, callwheelbits, callwheelmask;
73struct callout_tailq *callwheel;
74int softticks;			/* Like ticks, but for softclock(). */
75struct mtx callout_lock;
76
77static struct callout *nextsoftcheck;	/* Next callout to be checked. */
78
79/**
80 * Locked by callout_lock:
81 *   curr_callout    - If a callout is in progress, it is curr_callout.
82 *                     If curr_callout is non-NULL, threads waiting in
83 *                     callout_drain() will be woken up as soon as the
84 *                     relevant callout completes.
85 *   curr_cancelled  - Changing to 1 with both callout_lock and c_mtx held
86 *                     guarantees that the current callout will not run.
87 *                     The softclock() function sets this to 0 before it
88 *                     drops callout_lock to acquire c_mtx, and it calls
89 *                     the handler only if curr_cancelled is still 0 after
90 *                     c_mtx is successfully acquired.
91 *   callout_wait    - If a thread is waiting in callout_drain(), then
92 *                     callout_wait is nonzero.  Set only when
93 *                     curr_callout is non-NULL.
94 */
95static struct callout *curr_callout;
96static int curr_cancelled;
97static int callout_wait;
98
99/*
100 * kern_timeout_callwheel_alloc() - kernel low level callwheel initialization
101 *
102 *	This code is called very early in the kernel initialization sequence,
103 *	and may be called more then once.
104 */
105caddr_t
106kern_timeout_callwheel_alloc(caddr_t v)
107{
108	/*
109	 * Calculate callout wheel size
110	 */
111	for (callwheelsize = 1, callwheelbits = 0;
112	     callwheelsize < ncallout;
113	     callwheelsize <<= 1, ++callwheelbits)
114		;
115	callwheelmask = callwheelsize - 1;
116
117	callout = (struct callout *)v;
118	v = (caddr_t)(callout + ncallout);
119	callwheel = (struct callout_tailq *)v;
120	v = (caddr_t)(callwheel + callwheelsize);
121	return(v);
122}
123
124/*
125 * kern_timeout_callwheel_init() - initialize previously reserved callwheel
126 *				   space.
127 *
128 *	This code is called just once, after the space reserved for the
129 *	callout wheel has been finalized.
130 */
131void
132kern_timeout_callwheel_init(void)
133{
134	int i;
135
136	SLIST_INIT(&callfree);
137	for (i = 0; i < ncallout; i++) {
138		callout_init(&callout[i], 0);
139		callout[i].c_flags = CALLOUT_LOCAL_ALLOC;
140		SLIST_INSERT_HEAD(&callfree, &callout[i], c_links.sle);
141	}
142	for (i = 0; i < callwheelsize; i++) {
143		TAILQ_INIT(&callwheel[i]);
144	}
145	mtx_init(&callout_lock, "callout", NULL, MTX_SPIN | MTX_RECURSE);
146}
147
148/*
149 * The callout mechanism is based on the work of Adam M. Costello and
150 * George Varghese, published in a technical report entitled "Redesigning
151 * the BSD Callout and Timer Facilities" and modified slightly for inclusion
152 * in FreeBSD by Justin T. Gibbs.  The original work on the data structures
153 * used in this implementation was published by G. Varghese and T. Lauck in
154 * the paper "Hashed and Hierarchical Timing Wheels: Data Structures for
155 * the Efficient Implementation of a Timer Facility" in the Proceedings of
156 * the 11th ACM Annual Symposium on Operating Systems Principles,
157 * Austin, Texas Nov 1987.
158 */
159
160/*
161 * Software (low priority) clock interrupt.
162 * Run periodic events from timeout queue.
163 */
164void
165softclock(void *dummy)
166{
167	struct callout *c;
168	struct callout_tailq *bucket;
169	int curticks;
170	int steps;	/* #steps since we last allowed interrupts */
171	int depth;
172	int mpcalls;
173	int mtxcalls;
174	int gcalls;
175#ifdef DIAGNOSTIC
176	struct bintime bt1, bt2;
177	struct timespec ts2;
178	static uint64_t maxdt = 36893488147419102LL;	/* 2 msec */
179	static timeout_t *lastfunc;
180#endif
181
182#ifndef MAX_SOFTCLOCK_STEPS
183#define MAX_SOFTCLOCK_STEPS 100 /* Maximum allowed value of steps. */
184#endif /* MAX_SOFTCLOCK_STEPS */
185
186	mpcalls = 0;
187	mtxcalls = 0;
188	gcalls = 0;
189	depth = 0;
190	steps = 0;
191	mtx_lock_spin(&callout_lock);
192	while (softticks != ticks) {
193		softticks++;
194		/*
195		 * softticks may be modified by hard clock, so cache
196		 * it while we work on a given bucket.
197		 */
198		curticks = softticks;
199		bucket = &callwheel[curticks & callwheelmask];
200		c = TAILQ_FIRST(bucket);
201		while (c) {
202			depth++;
203			if (c->c_time != curticks) {
204				c = TAILQ_NEXT(c, c_links.tqe);
205				++steps;
206				if (steps >= MAX_SOFTCLOCK_STEPS) {
207					nextsoftcheck = c;
208					/* Give interrupts a chance. */
209					mtx_unlock_spin(&callout_lock);
210					;	/* nothing */
211					mtx_lock_spin(&callout_lock);
212					c = nextsoftcheck;
213					steps = 0;
214				}
215			} else {
216				void (*c_func)(void *);
217				void *c_arg;
218				struct mtx *c_mtx;
219				int c_flags;
220
221				nextsoftcheck = TAILQ_NEXT(c, c_links.tqe);
222				TAILQ_REMOVE(bucket, c, c_links.tqe);
223				c_func = c->c_func;
224				c_arg = c->c_arg;
225				c_mtx = c->c_mtx;
226				c_flags = c->c_flags;
227				if (c->c_flags & CALLOUT_LOCAL_ALLOC) {
228					c->c_func = NULL;
229					c->c_flags = CALLOUT_LOCAL_ALLOC;
230					SLIST_INSERT_HEAD(&callfree, c,
231							  c_links.sle);
232					curr_callout = NULL;
233				} else {
234					c->c_flags =
235					    (c->c_flags & ~CALLOUT_PENDING);
236					curr_callout = c;
237				}
238				curr_cancelled = 0;
239				mtx_unlock_spin(&callout_lock);
240				if (c_mtx != NULL) {
241					if (c_flags & CALLOUT_NETGIANT) {
242						mtx_lock(&Giant);
243						gcalls++;
244						CTR3(KTR_CALLOUT, "netgiant"
245						    " %p func %p arg %p",
246						    c, c_func, c_arg);
247					}
248					mtx_lock(c_mtx);
249					/*
250					 * The callout may have been cancelled
251					 * while we switched locks.
252					 */
253					if (curr_cancelled) {
254						mtx_unlock(c_mtx);
255						goto skip;
256					}
257					/* The callout cannot be stopped now. */
258					curr_cancelled = 1;
259
260					if (c_mtx == &Giant) {
261						gcalls++;
262						CTR3(KTR_CALLOUT,
263						    "callout %p func %p arg %p",
264						    c, c_func, c_arg);
265					} else {
266						mtxcalls++;
267						CTR3(KTR_CALLOUT, "callout mtx"
268						    " %p func %p arg %p",
269						    c, c_func, c_arg);
270					}
271				} else {
272					mpcalls++;
273					CTR3(KTR_CALLOUT,
274					    "callout mpsafe %p func %p arg %p",
275					    c, c_func, c_arg);
276				}
277#ifdef DIAGNOSTIC
278				binuptime(&bt1);
279#endif
280				THREAD_NO_SLEEPING();
281				c_func(c_arg);
282				THREAD_SLEEPING_OK();
283#ifdef DIAGNOSTIC
284				binuptime(&bt2);
285				bintime_sub(&bt2, &bt1);
286				if (bt2.frac > maxdt) {
287					if (lastfunc != c_func ||
288					    bt2.frac > maxdt * 2) {
289						bintime2timespec(&bt2, &ts2);
290						printf(
291			"Expensive timeout(9) function: %p(%p) %jd.%09ld s\n",
292						    c_func, c_arg,
293						    (intmax_t)ts2.tv_sec,
294						    ts2.tv_nsec);
295					}
296					maxdt = bt2.frac;
297					lastfunc = c_func;
298				}
299#endif
300				if ((c_flags & CALLOUT_RETURNUNLOCKED) == 0)
301					mtx_unlock(c_mtx);
302				if (c_flags & CALLOUT_NETGIANT)
303					mtx_unlock(&Giant);
304			skip:
305				mtx_lock_spin(&callout_lock);
306				curr_callout = NULL;
307				if (callout_wait) {
308					/*
309					 * There is someone waiting
310					 * for the callout to complete.
311					 */
312					callout_wait = 0;
313					mtx_unlock_spin(&callout_lock);
314					wakeup(&callout_wait);
315					mtx_lock_spin(&callout_lock);
316				}
317				steps = 0;
318				c = nextsoftcheck;
319			}
320		}
321	}
322	avg_depth += (depth * 1000 - avg_depth) >> 8;
323	avg_mpcalls += (mpcalls * 1000 - avg_mpcalls) >> 8;
324	avg_mtxcalls += (mtxcalls * 1000 - avg_mtxcalls) >> 8;
325	avg_gcalls += (gcalls * 1000 - avg_gcalls) >> 8;
326	nextsoftcheck = NULL;
327	mtx_unlock_spin(&callout_lock);
328}
329
330/*
331 * timeout --
332 *	Execute a function after a specified length of time.
333 *
334 * untimeout --
335 *	Cancel previous timeout function call.
336 *
337 * callout_handle_init --
338 *	Initialize a handle so that using it with untimeout is benign.
339 *
340 *	See AT&T BCI Driver Reference Manual for specification.  This
341 *	implementation differs from that one in that although an
342 *	identification value is returned from timeout, the original
343 *	arguments to timeout as well as the identifier are used to
344 *	identify entries for untimeout.
345 */
346struct callout_handle
347timeout(ftn, arg, to_ticks)
348	timeout_t *ftn;
349	void *arg;
350	int to_ticks;
351{
352	struct callout *new;
353	struct callout_handle handle;
354
355	mtx_lock_spin(&callout_lock);
356
357	/* Fill in the next free callout structure. */
358	new = SLIST_FIRST(&callfree);
359	if (new == NULL)
360		/* XXX Attempt to malloc first */
361		panic("timeout table full");
362	SLIST_REMOVE_HEAD(&callfree, c_links.sle);
363
364	callout_reset(new, to_ticks, ftn, arg);
365
366	handle.callout = new;
367	mtx_unlock_spin(&callout_lock);
368	return (handle);
369}
370
371void
372untimeout(ftn, arg, handle)
373	timeout_t *ftn;
374	void *arg;
375	struct callout_handle handle;
376{
377
378	/*
379	 * Check for a handle that was initialized
380	 * by callout_handle_init, but never used
381	 * for a real timeout.
382	 */
383	if (handle.callout == NULL)
384		return;
385
386	mtx_lock_spin(&callout_lock);
387	if (handle.callout->c_func == ftn && handle.callout->c_arg == arg)
388		callout_stop(handle.callout);
389	mtx_unlock_spin(&callout_lock);
390}
391
392void
393callout_handle_init(struct callout_handle *handle)
394{
395	handle->callout = NULL;
396}
397
398/*
399 * New interface; clients allocate their own callout structures.
400 *
401 * callout_reset() - establish or change a timeout
402 * callout_stop() - disestablish a timeout
403 * callout_init() - initialize a callout structure so that it can
404 *	safely be passed to callout_reset() and callout_stop()
405 *
406 * <sys/callout.h> defines three convenience macros:
407 *
408 * callout_active() - returns truth if callout has not been stopped,
409 *	drained, or deactivated since the last time the callout was
410 *	reset.
411 * callout_pending() - returns truth if callout is still waiting for timeout
412 * callout_deactivate() - marks the callout as having been serviced
413 */
414int
415callout_reset(c, to_ticks, ftn, arg)
416	struct	callout *c;
417	int	to_ticks;
418	void	(*ftn)(void *);
419	void	*arg;
420{
421	int cancelled = 0;
422
423#ifdef notyet /* Some callers of timeout() do not hold Giant. */
424	if (c->c_mtx != NULL)
425		mtx_assert(c->c_mtx, MA_OWNED);
426#endif
427
428	mtx_lock_spin(&callout_lock);
429	if (c == curr_callout) {
430		/*
431		 * We're being asked to reschedule a callout which is
432		 * currently in progress.  If there is a mutex then we
433		 * can cancel the callout if it has not really started.
434		 */
435		if (c->c_mtx != NULL && !curr_cancelled)
436			cancelled = curr_cancelled = 1;
437		if (callout_wait) {
438			/*
439			 * Someone has called callout_drain to kill this
440			 * callout.  Don't reschedule.
441			 */
442			CTR4(KTR_CALLOUT, "%s %p func %p arg %p",
443			    cancelled ? "cancelled" : "failed to cancel",
444			    c, c->c_func, c->c_arg);
445			mtx_unlock_spin(&callout_lock);
446			return (cancelled);
447		}
448	}
449	if (c->c_flags & CALLOUT_PENDING) {
450		if (nextsoftcheck == c) {
451			nextsoftcheck = TAILQ_NEXT(c, c_links.tqe);
452		}
453		TAILQ_REMOVE(&callwheel[c->c_time & callwheelmask], c,
454		    c_links.tqe);
455
456		cancelled = 1;
457
458		/*
459		 * Part of the normal "stop a pending callout" process
460		 * is to clear the CALLOUT_ACTIVE and CALLOUT_PENDING
461		 * flags.  We're not going to bother doing that here,
462		 * because we're going to be setting those flags ten lines
463		 * after this point, and we're holding callout_lock
464		 * between now and then.
465		 */
466	}
467
468	/*
469	 * We could unlock callout_lock here and lock it again before the
470	 * TAILQ_INSERT_TAIL, but there's no point since doing this setup
471	 * doesn't take much time.
472	 */
473	if (to_ticks <= 0)
474		to_ticks = 1;
475
476	c->c_arg = arg;
477	c->c_flags |= (CALLOUT_ACTIVE | CALLOUT_PENDING);
478	c->c_func = ftn;
479	c->c_time = ticks + to_ticks;
480	TAILQ_INSERT_TAIL(&callwheel[c->c_time & callwheelmask],
481			  c, c_links.tqe);
482	CTR5(KTR_CALLOUT, "%sscheduled %p func %p arg %p in %d",
483	    cancelled ? "re" : "", c, c->c_func, c->c_arg, to_ticks);
484	mtx_unlock_spin(&callout_lock);
485
486	return (cancelled);
487}
488
489int
490_callout_stop_safe(c, safe)
491	struct	callout *c;
492	int	safe;
493{
494	int use_mtx;
495
496	if (!safe && c->c_mtx != NULL) {
497#ifdef notyet /* Some callers do not hold Giant for Giant-locked callouts. */
498		mtx_assert(c->c_mtx, MA_OWNED);
499		use_mtx = 1;
500#else
501		use_mtx = mtx_owned(c->c_mtx);
502#endif
503	} else {
504		use_mtx = 0;
505	}
506
507	mtx_lock_spin(&callout_lock);
508	/*
509	 * If the callout isn't pending, it's not on the queue, so
510	 * don't attempt to remove it from the queue.  We can try to
511	 * stop it by other means however.
512	 */
513	if (!(c->c_flags & CALLOUT_PENDING)) {
514		c->c_flags &= ~CALLOUT_ACTIVE;
515
516		/*
517		 * If it wasn't on the queue and it isn't the current
518		 * callout, then we can't stop it, so just bail.
519		 */
520		if (c != curr_callout) {
521			CTR3(KTR_CALLOUT, "failed to stop %p func %p arg %p",
522			    c, c->c_func, c->c_arg);
523			mtx_unlock_spin(&callout_lock);
524			return (0);
525		}
526
527		if (safe) {
528			/*
529			 * The current callout is running (or just
530			 * about to run) and blocking is allowed, so
531			 * just wait for the current invocation to
532			 * finish.
533			 */
534			while (c == curr_callout) {
535
536				/*
537				 * Use direct calls to sleepqueue interface
538				 * instead of cv/msleep in order to avoid
539				 * a LOR between callout_lock and sleepqueue
540				 * chain spinlocks.  This piece of code
541				 * emulates a msleep_spin() call actually.
542				 */
543				mtx_unlock_spin(&callout_lock);
544				sleepq_lock(&callout_wait);
545
546				/*
547				 * Check again the state of curr_callout
548				 * because curthread could have lost the
549				 * race previously won.
550				 */
551				mtx_lock_spin(&callout_lock);
552				if (c != curr_callout) {
553					sleepq_release(&callout_wait);
554					break;
555				}
556				callout_wait = 1;
557				DROP_GIANT();
558				mtx_unlock_spin(&callout_lock);
559				sleepq_add(&callout_wait,
560				    &callout_lock.lock_object, "codrain",
561				    SLEEPQ_SLEEP, 0);
562				sleepq_wait(&callout_wait);
563
564				/* Reacquire locks previously released. */
565				PICKUP_GIANT();
566				mtx_lock_spin(&callout_lock);
567			}
568		} else if (use_mtx && !curr_cancelled) {
569			/*
570			 * The current callout is waiting for it's
571			 * mutex which we hold.  Cancel the callout
572			 * and return.  After our caller drops the
573			 * mutex, the callout will be skipped in
574			 * softclock().
575			 */
576			curr_cancelled = 1;
577			CTR3(KTR_CALLOUT, "cancelled %p func %p arg %p",
578			    c, c->c_func, c->c_arg);
579			mtx_unlock_spin(&callout_lock);
580			return (1);
581		}
582		CTR3(KTR_CALLOUT, "failed to stop %p func %p arg %p",
583		    c, c->c_func, c->c_arg);
584		mtx_unlock_spin(&callout_lock);
585		return (0);
586	}
587	c->c_flags &= ~(CALLOUT_ACTIVE | CALLOUT_PENDING);
588
589	if (nextsoftcheck == c) {
590		nextsoftcheck = TAILQ_NEXT(c, c_links.tqe);
591	}
592	TAILQ_REMOVE(&callwheel[c->c_time & callwheelmask], c, c_links.tqe);
593
594	CTR3(KTR_CALLOUT, "cancelled %p func %p arg %p",
595	    c, c->c_func, c->c_arg);
596
597	if (c->c_flags & CALLOUT_LOCAL_ALLOC) {
598		c->c_func = NULL;
599		SLIST_INSERT_HEAD(&callfree, c, c_links.sle);
600	}
601	mtx_unlock_spin(&callout_lock);
602	return (1);
603}
604
605void
606callout_init(c, mpsafe)
607	struct	callout *c;
608	int mpsafe;
609{
610	bzero(c, sizeof *c);
611	if (mpsafe) {
612		c->c_mtx = NULL;
613		c->c_flags = CALLOUT_RETURNUNLOCKED;
614	} else {
615		c->c_mtx = &Giant;
616		c->c_flags = 0;
617	}
618}
619
620void
621callout_init_mtx(c, mtx, flags)
622	struct	callout *c;
623	struct	mtx *mtx;
624	int flags;
625{
626	bzero(c, sizeof *c);
627	c->c_mtx = mtx;
628	KASSERT((flags & ~(CALLOUT_RETURNUNLOCKED|CALLOUT_NETGIANT)) == 0,
629	    ("callout_init_mtx: bad flags %d", flags));
630	/* CALLOUT_RETURNUNLOCKED makes no sense without a mutex. */
631	KASSERT(mtx != NULL || (flags & CALLOUT_RETURNUNLOCKED) == 0,
632	    ("callout_init_mtx: CALLOUT_RETURNUNLOCKED with no mutex"));
633	c->c_flags = flags & (CALLOUT_RETURNUNLOCKED|CALLOUT_NETGIANT);
634}
635
636#ifdef APM_FIXUP_CALLTODO
637/*
638 * Adjust the kernel calltodo timeout list.  This routine is used after
639 * an APM resume to recalculate the calltodo timer list values with the
640 * number of hz's we have been sleeping.  The next hardclock() will detect
641 * that there are fired timers and run softclock() to execute them.
642 *
643 * Please note, I have not done an exhaustive analysis of what code this
644 * might break.  I am motivated to have my select()'s and alarm()'s that
645 * have expired during suspend firing upon resume so that the applications
646 * which set the timer can do the maintanence the timer was for as close
647 * as possible to the originally intended time.  Testing this code for a
648 * week showed that resuming from a suspend resulted in 22 to 25 timers
649 * firing, which seemed independant on whether the suspend was 2 hours or
650 * 2 days.  Your milage may vary.   - Ken Key <key@cs.utk.edu>
651 */
652void
653adjust_timeout_calltodo(time_change)
654    struct timeval *time_change;
655{
656	register struct callout *p;
657	unsigned long delta_ticks;
658
659	/*
660	 * How many ticks were we asleep?
661	 * (stolen from tvtohz()).
662	 */
663
664	/* Don't do anything */
665	if (time_change->tv_sec < 0)
666		return;
667	else if (time_change->tv_sec <= LONG_MAX / 1000000)
668		delta_ticks = (time_change->tv_sec * 1000000 +
669			       time_change->tv_usec + (tick - 1)) / tick + 1;
670	else if (time_change->tv_sec <= LONG_MAX / hz)
671		delta_ticks = time_change->tv_sec * hz +
672			      (time_change->tv_usec + (tick - 1)) / tick + 1;
673	else
674		delta_ticks = LONG_MAX;
675
676	if (delta_ticks > INT_MAX)
677		delta_ticks = INT_MAX;
678
679	/*
680	 * Now rip through the timer calltodo list looking for timers
681	 * to expire.
682	 */
683
684	/* don't collide with softclock() */
685	mtx_lock_spin(&callout_lock);
686	for (p = calltodo.c_next; p != NULL; p = p->c_next) {
687		p->c_time -= delta_ticks;
688
689		/* Break if the timer had more time on it than delta_ticks */
690		if (p->c_time > 0)
691			break;
692
693		/* take back the ticks the timer didn't use (p->c_time <= 0) */
694		delta_ticks = -p->c_time;
695	}
696	mtx_unlock_spin(&callout_lock);
697
698	return;
699}
700#endif /* APM_FIXUP_CALLTODO */
701