kern_timeout.c revision 128024
11541Srgrimes/*-
21541Srgrimes * Copyright (c) 1982, 1986, 1991, 1993
31541Srgrimes *	The Regents of the University of California.  All rights reserved.
41541Srgrimes * (c) UNIX System Laboratories, Inc.
51541Srgrimes * All or some portions of this file are derived from material licensed
61541Srgrimes * to the University of California by American Telephone and Telegraph
71541Srgrimes * Co. or Unix System Laboratories, Inc. and are reproduced herein with
81541Srgrimes * the permission of UNIX System Laboratories, Inc.
91541Srgrimes *
101541Srgrimes * Redistribution and use in source and binary forms, with or without
111541Srgrimes * modification, are permitted provided that the following conditions
121541Srgrimes * are met:
131541Srgrimes * 1. Redistributions of source code must retain the above copyright
141541Srgrimes *    notice, this list of conditions and the following disclaimer.
151541Srgrimes * 2. Redistributions in binary form must reproduce the above copyright
161541Srgrimes *    notice, this list of conditions and the following disclaimer in the
171541Srgrimes *    documentation and/or other materials provided with the distribution.
181541Srgrimes * 4. Neither the name of the University nor the names of its contributors
191541Srgrimes *    may be used to endorse or promote products derived from this software
201541Srgrimes *    without specific prior written permission.
211541Srgrimes *
221541Srgrimes * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
231541Srgrimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
241541Srgrimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
251541Srgrimes * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
261541Srgrimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
271541Srgrimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
281541Srgrimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
291541Srgrimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
301541Srgrimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
311541Srgrimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
321541Srgrimes * SUCH DAMAGE.
331541Srgrimes *
3413203Swollman *	From: @(#)kern_clock.c	8.5 (Berkeley) 1/21/94
351541Srgrimes */
361541Srgrimes
3713203Swollman#include <sys/cdefs.h>
381541Srgrimes__FBSDID("$FreeBSD: head/sys/kern/kern_timeout.c 128024 2004-04-08 02:03:49Z cperciva $");
391541Srgrimes
402112Swollman#include <sys/param.h>
4112221Sbde#include <sys/systm.h>
421541Srgrimes#include <sys/callout.h>
431541Srgrimes#include <sys/condvar.h>
441541Srgrimes#include <sys/kernel.h>
451541Srgrimes#include <sys/lock.h>
461541Srgrimes#include <sys/mutex.h>
471541Srgrimes#include <sys/sysctl.h>
481541Srgrimes
491541Srgrimesstatic int avg_depth;
5013203SwollmanSYSCTL_INT(_debug, OID_AUTO, to_avg_depth, CTLFLAG_RD, &avg_depth, 0,
5112819Sphk    "Average number of items examined per softclock call. Units = 1/1000");
5212819Sphkstatic int avg_gcalls;
5312819SphkSYSCTL_INT(_debug, OID_AUTO, to_avg_gcalls, CTLFLAG_RD, &avg_gcalls, 0,
5412819Sphk    "Average number of Giant callouts made per softclock call. Units = 1/1000");
5512819Sphkstatic int avg_mpcalls;
5612577SbdeSYSCTL_INT(_debug, OID_AUTO, to_avg_mpcalls, CTLFLAG_RD, &avg_mpcalls, 0,
5712819Sphk    "Average number of MP callouts made per softclock call. Units = 1/1000");
5812819Sphk/*
591541Srgrimes * TODO:
601541Srgrimes *	allocate more timeout table slots when table overflows.
611541Srgrimes */
621541Srgrimes
631541Srgrimes/* Exported to machdep.c and/or kern_clock.c.  */
641541Srgrimesstruct callout *callout;
658876Srgrimesstruct callout_list callfree;
661541Srgrimesint callwheelsize, callwheelbits, callwheelmask;
671541Srgrimesstruct callout_tailq *callwheel;
681541Srgrimesint softticks;			/* Like ticks, but for softclock(). */
691541Srgrimesstruct mtx callout_lock;
701541Srgrimes#ifdef DIAGNOSTIC
711541Srgrimesstruct mtx dont_sleep_in_callout;
721541Srgrimes#endif
731541Srgrimes
741549Srgrimesstatic struct callout *nextsoftcheck;	/* Next callout to be checked. */
751541Srgrimes
761541Srgrimes/*-
771541Srgrimes * Locked by callout_lock:
781541Srgrimes *   curr_callout    - If a callout is in progress, it is curr_callout.
791541Srgrimes *                     If curr_callout is non-NULL, threads waiting on
801541Srgrimes *                     callout_wait will be woken up as soon as the
811541Srgrimes *                     relevant callout completes.
821541Srgrimes *   wakeup_ctr      - Incremented every time a thread wants to wait
831541Srgrimes *                     for a callout to complete.  Modified only when
841541Srgrimes *                     curr_callout is non-NULL.
851541Srgrimes *   wakeup_needed   - If a thread is waiting on callout_wait, then
861541Srgrimes *                     wakeup_needed is nonzero.  Increased only when
871541Srgrimes *                     cutt_callout is non-NULL.
881541Srgrimes */
891541Srgrimesstatic struct callout *curr_callout;
901541Srgrimesstatic int wakeup_ctr;
911541Srgrimesstatic int wakeup_needed;
921541Srgrimes
931541Srgrimes/*-
941541Srgrimes * Locked by callout_wait_lock:
951541Srgrimes *   callout_wait    - If wakeup_needed is set, callout_wait will be
961541Srgrimes *                     triggered after the current callout finishes.
971541Srgrimes *   wakeup_done_ctr - Set to the current value of wakeup_ctr after
981541Srgrimes *                     callout_wait is triggered.
991541Srgrimes */
1001541Srgrimesstatic struct mtx callout_wait_lock;
1011549Srgrimesstatic struct cv callout_wait;
1021541Srgrimesstatic int wakeup_done_ctr;
1031541Srgrimes
1041541Srgrimes/*
1051541Srgrimes * kern_timeout_callwheel_alloc() - kernel low level callwheel initialization
1061541Srgrimes *
1071541Srgrimes *	This code is called very early in the kernel initialization sequence,
1081541Srgrimes *	and may be called more then once.
1091541Srgrimes */
1101541Srgrimescaddr_t
1111541Srgrimeskern_timeout_callwheel_alloc(caddr_t v)
1121541Srgrimes{
1131541Srgrimes	/*
1141541Srgrimes	 * Calculate callout wheel size
1151541Srgrimes	 */
1161541Srgrimes	for (callwheelsize = 1, callwheelbits = 0;
1171541Srgrimes	     callwheelsize < ncallout;
1181541Srgrimes	     callwheelsize <<= 1, ++callwheelbits)
1191541Srgrimes		;
1201541Srgrimes	callwheelmask = callwheelsize - 1;
1211541Srgrimes
1221541Srgrimes	callout = (struct callout *)v;
1231541Srgrimes	v = (caddr_t)(callout + ncallout);
1241549Srgrimes	callwheel = (struct callout_tailq *)v;
1251541Srgrimes	v = (caddr_t)(callwheel + callwheelsize);
1261541Srgrimes	return(v);
1271541Srgrimes}
1281541Srgrimes
1291541Srgrimes/*
1301541Srgrimes * kern_timeout_callwheel_init() - initialize previously reserved callwheel
1311541Srgrimes *				   space.
1321541Srgrimes *
1331541Srgrimes *	This code is called just once, after the space reserved for the
1341541Srgrimes *	callout wheel has been finalized.
1351541Srgrimes */
1361541Srgrimesvoid
1371541Srgrimeskern_timeout_callwheel_init(void)
1381541Srgrimes{
1391541Srgrimes	int i;
1401541Srgrimes
1411541Srgrimes	SLIST_INIT(&callfree);
1421549Srgrimes	for (i = 0; i < ncallout; i++) {
1431541Srgrimes		callout_init(&callout[i], 0);
1441541Srgrimes		callout[i].c_flags = CALLOUT_LOCAL_ALLOC;
1451541Srgrimes		SLIST_INSERT_HEAD(&callfree, &callout[i], c_links.sle);
1461541Srgrimes	}
1471541Srgrimes	for (i = 0; i < callwheelsize; i++) {
1481541Srgrimes		TAILQ_INIT(&callwheel[i]);
1491541Srgrimes	}
1501541Srgrimes	mtx_init(&callout_lock, "callout", NULL, MTX_SPIN | MTX_RECURSE);
1511541Srgrimes#ifdef DIAGNOSTIC
1521541Srgrimes	mtx_init(&dont_sleep_in_callout, "dont_sleep_in_callout", NULL, MTX_DEF);
1531541Srgrimes#endif
1541541Srgrimes	mtx_init(&callout_wait_lock, "callout_wait_lock", NULL, MTX_DEF);
1558876Srgrimes	cv_init(&callout_wait, "callout_wait");
1561541Srgrimes}
1571541Srgrimes
1581541Srgrimes/*
1591541Srgrimes * The callout mechanism is based on the work of Adam M. Costello and
1601541Srgrimes * George Varghese, published in a technical report entitled "Redesigning
1611541Srgrimes * the BSD Callout and Timer Facilities" and modified slightly for inclusion
1621541Srgrimes * in FreeBSD by Justin T. Gibbs.  The original work on the data structures
1631541Srgrimes * used in this implementation was published by G.Varghese and A. Lauck in
1641541Srgrimes * the paper "Hashed and Hierarchical Timing Wheels: Data Structures for
1651541Srgrimes * the Efficient Implementation of a Timer Facility" in the Proceedings of
1661541Srgrimes * the 11th ACM Annual Symposium on Operating Systems Principles,
1671541Srgrimes * Austin, Texas Nov 1987.
1681541Srgrimes */
1691541Srgrimes
1701541Srgrimes/*
1711541Srgrimes * Software (low priority) clock interrupt.
1721541Srgrimes * Run periodic events from timeout queue.
1731541Srgrimes */
1741541Srgrimesvoid
1751541Srgrimessoftclock(void *dummy)
1761541Srgrimes{
1771541Srgrimes	struct callout *c;
1781541Srgrimes	struct callout_tailq *bucket;
1791541Srgrimes	int curticks;
1801541Srgrimes	int steps;	/* #steps since we last allowed interrupts */
1811541Srgrimes	int depth;
1821541Srgrimes	int mpcalls;
1831541Srgrimes	int gcalls;
1841549Srgrimes	int wakeup_cookie;
1851541Srgrimes#ifdef DIAGNOSTIC
1861541Srgrimes	struct bintime bt1, bt2;
1871541Srgrimes	struct timespec ts2;
1881541Srgrimes	static uint64_t maxdt = 36893488147419102LL;	/* 2 msec */
1891541Srgrimes	static timeout_t *lastfunc;
1901541Srgrimes#endif
1911541Srgrimes
1921541Srgrimes#ifndef MAX_SOFTCLOCK_STEPS
1931541Srgrimes#define MAX_SOFTCLOCK_STEPS 100 /* Maximum allowed value of steps. */
1941541Srgrimes#endif /* MAX_SOFTCLOCK_STEPS */
1951541Srgrimes
1961541Srgrimes	mpcalls = 0;
1971541Srgrimes	gcalls = 0;
1981541Srgrimes	depth = 0;
1991541Srgrimes	steps = 0;
2001541Srgrimes	mtx_lock_spin(&callout_lock);
2011541Srgrimes	while (softticks != ticks) {
2021541Srgrimes		softticks++;
2031541Srgrimes		/*
2041541Srgrimes		 * softticks may be modified by hard clock, so cache
2051541Srgrimes		 * it while we work on a given bucket.
2061541Srgrimes		 */
2071541Srgrimes		curticks = softticks;
2081541Srgrimes		bucket = &callwheel[curticks & callwheelmask];
2091549Srgrimes		c = TAILQ_FIRST(bucket);
2101541Srgrimes		while (c) {
2111541Srgrimes			depth++;
2121541Srgrimes			if (c->c_time != curticks) {
2131541Srgrimes				c = TAILQ_NEXT(c, c_links.tqe);
2141541Srgrimes				++steps;
2151541Srgrimes				if (steps >= MAX_SOFTCLOCK_STEPS) {
2161541Srgrimes					nextsoftcheck = c;
2171541Srgrimes					/* Give interrupts a chance. */
2181541Srgrimes					mtx_unlock_spin(&callout_lock);
2191541Srgrimes					;	/* nothing */
2201541Srgrimes					mtx_lock_spin(&callout_lock);
2211541Srgrimes					c = nextsoftcheck;
2221541Srgrimes					steps = 0;
2231541Srgrimes				}
2241541Srgrimes			} else {
2251541Srgrimes				void (*c_func)(void *);
2261541Srgrimes				void *c_arg;
2271541Srgrimes				int c_flags;
2281541Srgrimes
22913203Swollman				nextsoftcheck = TAILQ_NEXT(c, c_links.tqe);
2301541Srgrimes				TAILQ_REMOVE(bucket, c, c_links.tqe);
2311541Srgrimes				c_func = c->c_func;
2321541Srgrimes				c_arg = c->c_arg;
2331541Srgrimes				c_flags = c->c_flags;
2341541Srgrimes				c->c_func = NULL;
2351541Srgrimes				if (c->c_flags & CALLOUT_LOCAL_ALLOC) {
23612221Sbde					c->c_flags = CALLOUT_LOCAL_ALLOC;
2371541Srgrimes					SLIST_INSERT_HEAD(&callfree, c,
2381541Srgrimes							  c_links.sle);
2391541Srgrimes				} else {
2401541Srgrimes					c->c_flags =
2411541Srgrimes					    (c->c_flags & ~CALLOUT_PENDING);
2421541Srgrimes				}
24312221Sbde				curr_callout = c;
2441541Srgrimes				mtx_unlock_spin(&callout_lock);
2451549Srgrimes				if (!(c_flags & CALLOUT_MPSAFE)) {
2461541Srgrimes					mtx_lock(&Giant);
2471541Srgrimes					gcalls++;
2481541Srgrimes				} else {
2491541Srgrimes					mpcalls++;
2501541Srgrimes				}
25113203Swollman#ifdef DIAGNOSTIC
2521541Srgrimes				binuptime(&bt1);
2531541Srgrimes				mtx_lock(&dont_sleep_in_callout);
2541541Srgrimes#endif
2551541Srgrimes				c_func(c_arg);
2561541Srgrimes#ifdef DIAGNOSTIC
2571541Srgrimes				mtx_unlock(&dont_sleep_in_callout);
2581541Srgrimes				binuptime(&bt2);
2591541Srgrimes				bintime_sub(&bt2, &bt1);
2601541Srgrimes				if (bt2.frac > maxdt) {
2611541Srgrimes					if (lastfunc != c_func ||
2621541Srgrimes					    bt2.frac > maxdt * 2) {
2631541Srgrimes						bintime2timespec(&bt2, &ts2);
2641541Srgrimes						printf(
2651541Srgrimes			"Expensive timeout(9) function: %p(%p) %jd.%09ld s\n",
2661541Srgrimes						    c_func, c_arg,
2671541Srgrimes						    (intmax_t)ts2.tv_sec,
2683308Sphk						    ts2.tv_nsec);
2693308Sphk					}
2701541Srgrimes					maxdt = bt2.frac;
2711541Srgrimes					lastfunc = c_func;
2721541Srgrimes				}
2731541Srgrimes#endif
2741541Srgrimes				if (!(c_flags & CALLOUT_MPSAFE))
2751541Srgrimes					mtx_unlock(&Giant);
2761541Srgrimes				mtx_lock_spin(&callout_lock);
2771541Srgrimes				curr_callout = NULL;
2781541Srgrimes				if (wakeup_needed) {
2791541Srgrimes					/*
2801541Srgrimes					 * There might be someone waiting
2811541Srgrimes					 * for the callout to complete.
2821541Srgrimes					 */
2831541Srgrimes					wakeup_cookie = wakeup_ctr;
2841541Srgrimes					mtx_unlock_spin(&callout_lock);
2851541Srgrimes					mtx_lock(&callout_wait_lock);
2861541Srgrimes					cv_broadcast(&callout_wait);
2871541Srgrimes					wakeup_done_ctr = wakeup_cookie;
2881541Srgrimes					mtx_unlock(&callout_wait_lock);
2891541Srgrimes					mtx_lock_spin(&callout_lock);
2901541Srgrimes					wakeup_needed = 0;
2911541Srgrimes				}
2921541Srgrimes				steps = 0;
2931541Srgrimes				c = nextsoftcheck;
2941541Srgrimes			}
2951541Srgrimes		}
2961541Srgrimes	}
2971541Srgrimes	avg_depth += (depth * 1000 - avg_depth) >> 8;
2981541Srgrimes	avg_mpcalls += (mpcalls * 1000 - avg_mpcalls) >> 8;
2991541Srgrimes	avg_gcalls += (gcalls * 1000 - avg_gcalls) >> 8;
3001541Srgrimes	nextsoftcheck = NULL;
3011541Srgrimes	mtx_unlock_spin(&callout_lock);
3021541Srgrimes}
3031541Srgrimes
3041541Srgrimes/*
3058876Srgrimes * timeout --
3061541Srgrimes *	Execute a function after a specified length of time.
3071541Srgrimes *
3081541Srgrimes * untimeout --
3091541Srgrimes *	Cancel previous timeout function call.
3101541Srgrimes *
3111541Srgrimes * callout_handle_init --
3121541Srgrimes *	Initialize a handle so that using it with untimeout is benign.
3131541Srgrimes *
3141541Srgrimes *	See AT&T BCI Driver Reference Manual for specification.  This
3151541Srgrimes *	implementation differs from that one in that although an
3161541Srgrimes *	identification value is returned from timeout, the original
3171541Srgrimes *	arguments to timeout as well as the identifier are used to
3181541Srgrimes *	identify entries for untimeout.
3191541Srgrimes */
3208876Srgrimesstruct callout_handle
3211541Srgrimestimeout(ftn, arg, to_ticks)
3228876Srgrimes	timeout_t *ftn;
3231541Srgrimes	void *arg;
3241541Srgrimes	int to_ticks;
3251541Srgrimes{
3261541Srgrimes	struct callout *new;
3271541Srgrimes	struct callout_handle handle;
3281541Srgrimes
3291541Srgrimes	mtx_lock_spin(&callout_lock);
3301541Srgrimes
3311541Srgrimes	/* Fill in the next free callout structure. */
3321541Srgrimes	new = SLIST_FIRST(&callfree);
3331541Srgrimes	if (new == NULL)
3341541Srgrimes		/* XXX Attempt to malloc first */
3351541Srgrimes		panic("timeout table full");
3361541Srgrimes	SLIST_REMOVE_HEAD(&callfree, c_links.sle);
3371541Srgrimes
3381541Srgrimes	callout_reset(new, to_ticks, ftn, arg);
3391541Srgrimes
3401541Srgrimes	handle.callout = new;
3411541Srgrimes	mtx_unlock_spin(&callout_lock);
3421541Srgrimes	return (handle);
3431541Srgrimes}
34413203Swollman
34513203Swollmanvoid
34613203Swollmanuntimeout(ftn, arg, handle)
3471541Srgrimes	timeout_t *ftn;
3481541Srgrimes	void *arg;
34913203Swollman	struct callout_handle handle;
35012819Sphk{
3511541Srgrimes
3521541Srgrimes	/*
3531541Srgrimes	 * Check for a handle that was initialized
3541541Srgrimes	 * by callout_handle_init, but never used
3551541Srgrimes	 * for a real timeout.
3561541Srgrimes	 */
3571541Srgrimes	if (handle.callout == NULL)
3581541Srgrimes		return;
3591541Srgrimes
3608876Srgrimes	mtx_lock_spin(&callout_lock);
3611541Srgrimes	if (handle.callout->c_func == ftn && handle.callout->c_arg == arg)
3621541Srgrimes		callout_stop(handle.callout);
3631541Srgrimes	mtx_unlock_spin(&callout_lock);
3641541Srgrimes}
3651541Srgrimes
3661541Srgrimesvoid
3671541Srgrimescallout_handle_init(struct callout_handle *handle)
3681541Srgrimes{
3691541Srgrimes	handle->callout = NULL;
3701541Srgrimes}
3711541Srgrimes
3728876Srgrimes/*
3731541Srgrimes * New interface; clients allocate their own callout structures.
3741541Srgrimes *
3751541Srgrimes * callout_reset() - establish or change a timeout
3761541Srgrimes * callout_stop() - disestablish a timeout
3771541Srgrimes * callout_init() - initialize a callout structure so that it can
3781541Srgrimes *	safely be passed to callout_reset() and callout_stop()
3791541Srgrimes *
3801541Srgrimes * <sys/callout.h> defines three convenience macros:
3811541Srgrimes *
3821541Srgrimes * callout_active() - returns truth if callout has not been serviced
3831541Srgrimes * callout_pending() - returns truth if callout is still waiting for timeout
3841541Srgrimes * callout_deactivate() - marks the callout as having been serviced
3851541Srgrimes */
3861541Srgrimesvoid
38712819Sphkcallout_reset(c, to_ticks, ftn, arg)
3881541Srgrimes	struct	callout *c;
3891541Srgrimes	int	to_ticks;
3901541Srgrimes	void	(*ftn)(void *);
3911541Srgrimes	void	*arg;
3921541Srgrimes{
3931541Srgrimes
3941541Srgrimes	mtx_lock_spin(&callout_lock);
3951541Srgrimes	if (c == curr_callout && wakeup_needed) {
3961541Srgrimes		/*
3971541Srgrimes		 * We're being asked to reschedule a callout which is
3981541Srgrimes		 * currently in progress, and someone has called
3991541Srgrimes		 * callout_drain to kill that callout.  Don't reschedule.
4001541Srgrimes		 */
4011541Srgrimes		mtx_unlock_spin(&callout_lock);
4021541Srgrimes		return;
4031541Srgrimes	}
4041541Srgrimes	if (c->c_flags & CALLOUT_PENDING)
4051541Srgrimes		callout_stop(c);
4061541Srgrimes
4071541Srgrimes	/*
4081541Srgrimes	 * We could unlock callout_lock here and lock it again before the
4091541Srgrimes	 * TAILQ_INSERT_TAIL, but there's no point since doing this setup
4101541Srgrimes	 * doesn't take much time.
4111541Srgrimes	 */
4121541Srgrimes	if (to_ticks <= 0)
4131541Srgrimes		to_ticks = 1;
4141541Srgrimes
4151541Srgrimes	c->c_arg = arg;
4161541Srgrimes	c->c_flags |= (CALLOUT_ACTIVE | CALLOUT_PENDING);
4171541Srgrimes	c->c_func = ftn;
4181541Srgrimes	c->c_time = ticks + to_ticks;
4191541Srgrimes	TAILQ_INSERT_TAIL(&callwheel[c->c_time & callwheelmask],
4201541Srgrimes			  c, c_links.tqe);
4211541Srgrimes	mtx_unlock_spin(&callout_lock);
4221541Srgrimes}
42312819Sphk
4241541Srgrimes/* For binary compatibility. */
4251541Srgrimes#undef callout_stop
4261541Srgrimesint
4271541Srgrimescallout_stop(c)
4281541Srgrimes	struct	callout *c;
4291541Srgrimes{
4301541Srgrimes
4311541Srgrimes	return(_callout_stop_safe(c, 0));
4321541Srgrimes}
4331541Srgrimes
4341541Srgrimesint
4351541Srgrimes_callout_stop_safe(c, safe)
4361541Srgrimes	struct	callout *c;
4371541Srgrimes	int	safe;
4381541Srgrimes{
4391541Srgrimes	int wakeup_cookie;
4401541Srgrimes
4411541Srgrimes	mtx_lock_spin(&callout_lock);
4421541Srgrimes	/*
4431541Srgrimes	 * Don't attempt to delete a callout that's not on the queue.
4441541Srgrimes	 */
4451541Srgrimes	if (!(c->c_flags & CALLOUT_PENDING)) {
4461541Srgrimes		c->c_flags &= ~CALLOUT_ACTIVE;
4471541Srgrimes		if (c == curr_callout && safe) {
4481541Srgrimes			/* We need to wait until the callout is finished. */
4491541Srgrimes			wakeup_needed = 1;
4501541Srgrimes			wakeup_cookie = wakeup_ctr++;
4511541Srgrimes			mtx_unlock_spin(&callout_lock);
4521541Srgrimes			mtx_lock(&callout_wait_lock);
4531541Srgrimes
4541541Srgrimes			/*
4551541Srgrimes			 * Check to make sure that softclock() didn't
4561541Srgrimes			 * do the wakeup in between our dropping
4571541Srgrimes			 * callout_lock and picking up callout_wait_lock
4581541Srgrimes			 */
4591541Srgrimes			if (wakeup_cookie - wakeup_done_ctr > 0)
4601541Srgrimes				cv_wait(&callout_wait, &callout_wait_lock);
4611541Srgrimes
4621541Srgrimes			mtx_unlock(&callout_wait_lock);
4631541Srgrimes		} else
4641541Srgrimes			mtx_unlock_spin(&callout_lock);
4651541Srgrimes		return (0);
4661541Srgrimes	}
4671541Srgrimes	c->c_flags &= ~(CALLOUT_ACTIVE | CALLOUT_PENDING);
4681541Srgrimes
4691541Srgrimes	if (nextsoftcheck == c) {
4701541Srgrimes		nextsoftcheck = TAILQ_NEXT(c, c_links.tqe);
4711541Srgrimes	}
4721541Srgrimes	TAILQ_REMOVE(&callwheel[c->c_time & callwheelmask], c, c_links.tqe);
4738876Srgrimes	c->c_func = NULL;
4741541Srgrimes
4751541Srgrimes	if (c->c_flags & CALLOUT_LOCAL_ALLOC) {
4761541Srgrimes		SLIST_INSERT_HEAD(&callfree, c, c_links.sle);
4771541Srgrimes	}
47812819Sphk	mtx_unlock_spin(&callout_lock);
4791541Srgrimes	return (1);
4801541Srgrimes}
4811541Srgrimes
4821541Srgrimesvoid
4831541Srgrimescallout_init(c, mpsafe)
4841541Srgrimes	struct	callout *c;
4851541Srgrimes	int mpsafe;
4861541Srgrimes{
4871541Srgrimes	bzero(c, sizeof *c);
4881541Srgrimes	if (mpsafe)
4891541Srgrimes		c->c_flags |= CALLOUT_MPSAFE;
4901541Srgrimes}
4911541Srgrimes
4921541Srgrimes#ifdef APM_FIXUP_CALLTODO
4931541Srgrimes/*
4941541Srgrimes * Adjust the kernel calltodo timeout list.  This routine is used after
4951541Srgrimes * an APM resume to recalculate the calltodo timer list values with the
49613203Swollman * number of hz's we have been sleeping.  The next hardclock() will detect
497 * that there are fired timers and run softclock() to execute them.
498 *
499 * Please note, I have not done an exhaustive analysis of what code this
500 * might break.  I am motivated to have my select()'s and alarm()'s that
501 * have expired during suspend firing upon resume so that the applications
502 * which set the timer can do the maintanence the timer was for as close
503 * as possible to the originally intended time.  Testing this code for a
504 * week showed that resuming from a suspend resulted in 22 to 25 timers
505 * firing, which seemed independant on whether the suspend was 2 hours or
506 * 2 days.  Your milage may vary.   - Ken Key <key@cs.utk.edu>
507 */
508void
509adjust_timeout_calltodo(time_change)
510    struct timeval *time_change;
511{
512	register struct callout *p;
513	unsigned long delta_ticks;
514
515	/*
516	 * How many ticks were we asleep?
517	 * (stolen from tvtohz()).
518	 */
519
520	/* Don't do anything */
521	if (time_change->tv_sec < 0)
522		return;
523	else if (time_change->tv_sec <= LONG_MAX / 1000000)
524		delta_ticks = (time_change->tv_sec * 1000000 +
525			       time_change->tv_usec + (tick - 1)) / tick + 1;
526	else if (time_change->tv_sec <= LONG_MAX / hz)
527		delta_ticks = time_change->tv_sec * hz +
528			      (time_change->tv_usec + (tick - 1)) / tick + 1;
529	else
530		delta_ticks = LONG_MAX;
531
532	if (delta_ticks > INT_MAX)
533		delta_ticks = INT_MAX;
534
535	/*
536	 * Now rip through the timer calltodo list looking for timers
537	 * to expire.
538	 */
539
540	/* don't collide with softclock() */
541	mtx_lock_spin(&callout_lock);
542	for (p = calltodo.c_next; p != NULL; p = p->c_next) {
543		p->c_time -= delta_ticks;
544
545		/* Break if the timer had more time on it than delta_ticks */
546		if (p->c_time > 0)
547			break;
548
549		/* take back the ticks the timer didn't use (p->c_time <= 0) */
550		delta_ticks = -p->c_time;
551	}
552	mtx_unlock_spin(&callout_lock);
553
554	return;
555}
556#endif /* APM_FIXUP_CALLTODO */
557