kern_timeout.c revision 139831
11541Srgrimes/*-
21541Srgrimes * Copyright (c) 1982, 1986, 1991, 1993
31541Srgrimes *	The Regents of the University of California.  All rights reserved.
41541Srgrimes * (c) UNIX System Laboratories, Inc.
51541Srgrimes * All or some portions of this file are derived from material licensed
61541Srgrimes * to the University of California by American Telephone and Telegraph
71541Srgrimes * Co. or Unix System Laboratories, Inc. and are reproduced herein with
81541Srgrimes * the permission of UNIX System Laboratories, Inc.
91541Srgrimes *
101541Srgrimes * Redistribution and use in source and binary forms, with or without
111541Srgrimes * modification, are permitted provided that the following conditions
121541Srgrimes * are met:
131541Srgrimes * 1. Redistributions of source code must retain the above copyright
141541Srgrimes *    notice, this list of conditions and the following disclaimer.
151541Srgrimes * 2. Redistributions in binary form must reproduce the above copyright
161541Srgrimes *    notice, this list of conditions and the following disclaimer in the
171541Srgrimes *    documentation and/or other materials provided with the distribution.
181541Srgrimes * 4. Neither the name of the University nor the names of its contributors
191541Srgrimes *    may be used to endorse or promote products derived from this software
201541Srgrimes *    without specific prior written permission.
211541Srgrimes *
221541Srgrimes * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
231541Srgrimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
241541Srgrimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
251541Srgrimes * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
261541Srgrimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
271541Srgrimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
281541Srgrimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
291541Srgrimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
301541Srgrimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
311541Srgrimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
321541Srgrimes * SUCH DAMAGE.
331541Srgrimes *
3444510Swollman *	From: @(#)kern_clock.c	8.5 (Berkeley) 1/21/94
351541Srgrimes */
361541Srgrimes
37116182Sobrien#include <sys/cdefs.h>
38116182Sobrien__FBSDID("$FreeBSD: head/sys/kern/kern_timeout.c 139831 2005-01-07 03:25:45Z cperciva $");
39116182Sobrien
401541Srgrimes#include <sys/param.h>
411541Srgrimes#include <sys/systm.h>
4233392Sphk#include <sys/callout.h>
43127969Scperciva#include <sys/condvar.h>
441541Srgrimes#include <sys/kernel.h>
45133229Srwatson#include <sys/ktr.h>
4674914Sjhb#include <sys/lock.h>
4768840Sjhb#include <sys/mutex.h>
48115810Sphk#include <sys/sysctl.h>
491541Srgrimes
50115810Sphkstatic int avg_depth;
51115810SphkSYSCTL_INT(_debug, OID_AUTO, to_avg_depth, CTLFLAG_RD, &avg_depth, 0,
52115810Sphk    "Average number of items examined per softclock call. Units = 1/1000");
53115810Sphkstatic int avg_gcalls;
54115810SphkSYSCTL_INT(_debug, OID_AUTO, to_avg_gcalls, CTLFLAG_RD, &avg_gcalls, 0,
55115810Sphk    "Average number of Giant callouts made per softclock call. Units = 1/1000");
56115810Sphkstatic int avg_mpcalls;
57115810SphkSYSCTL_INT(_debug, OID_AUTO, to_avg_mpcalls, CTLFLAG_RD, &avg_mpcalls, 0,
58115810Sphk    "Average number of MP callouts made per softclock call. Units = 1/1000");
5933392Sphk/*
6033392Sphk * TODO:
6133392Sphk *	allocate more timeout table slots when table overflows.
6233392Sphk */
6333392Sphk
6433392Sphk/* Exported to machdep.c and/or kern_clock.c.  */
6529680Sgibbsstruct callout *callout;
6629680Sgibbsstruct callout_list callfree;
6729680Sgibbsint callwheelsize, callwheelbits, callwheelmask;
6829680Sgibbsstruct callout_tailq *callwheel;
6933392Sphkint softticks;			/* Like ticks, but for softclock(). */
70116606Sphkstruct mtx callout_lock;
71122585Smckusick#ifdef DIAGNOSTIC
72122761Sphkstruct mtx dont_sleep_in_callout;
73122585Smckusick#endif
742112Swollman
7529680Sgibbsstatic struct callout *nextsoftcheck;	/* Next callout to be checked. */
76128024Scperciva
77139831Scperciva/**
78127969Scperciva * Locked by callout_lock:
79127969Scperciva *   curr_callout    - If a callout is in progress, it is curr_callout.
80127969Scperciva *                     If curr_callout is non-NULL, threads waiting on
81127969Scperciva *                     callout_wait will be woken up as soon as the
82127969Scperciva *                     relevant callout completes.
83128024Scperciva *   wakeup_ctr      - Incremented every time a thread wants to wait
84128024Scperciva *                     for a callout to complete.  Modified only when
85128024Scperciva *                     curr_callout is non-NULL.
86127969Scperciva *   wakeup_needed   - If a thread is waiting on callout_wait, then
87127969Scperciva *                     wakeup_needed is nonzero.  Increased only when
88127969Scperciva *                     cutt_callout is non-NULL.
89127969Scperciva */
90127969Scpercivastatic struct callout *curr_callout;
91128024Scpercivastatic int wakeup_ctr;
92127969Scpercivastatic int wakeup_needed;
93128024Scperciva
94139831Scperciva/**
95127969Scperciva * Locked by callout_wait_lock:
96127969Scperciva *   callout_wait    - If wakeup_needed is set, callout_wait will be
97127969Scperciva *                     triggered after the current callout finishes.
98127969Scperciva *   wakeup_done_ctr - Set to the current value of wakeup_ctr after
99127969Scperciva *                     callout_wait is triggered.
100127969Scperciva */
101127969Scpercivastatic struct mtx callout_wait_lock;
102127969Scpercivastatic struct cv callout_wait;
103127969Scpercivastatic int wakeup_done_ctr;
1041541Srgrimes
1051541Srgrimes/*
10682127Sdillon * kern_timeout_callwheel_alloc() - kernel low level callwheel initialization
10782127Sdillon *
10882127Sdillon *	This code is called very early in the kernel initialization sequence,
10982127Sdillon *	and may be called more then once.
11082127Sdillon */
11182127Sdilloncaddr_t
11282127Sdillonkern_timeout_callwheel_alloc(caddr_t v)
11382127Sdillon{
11482127Sdillon	/*
11582127Sdillon	 * Calculate callout wheel size
11682127Sdillon	 */
11782127Sdillon	for (callwheelsize = 1, callwheelbits = 0;
11882127Sdillon	     callwheelsize < ncallout;
11982127Sdillon	     callwheelsize <<= 1, ++callwheelbits)
12082127Sdillon		;
12182127Sdillon	callwheelmask = callwheelsize - 1;
12282127Sdillon
12382127Sdillon	callout = (struct callout *)v;
12482127Sdillon	v = (caddr_t)(callout + ncallout);
12582127Sdillon	callwheel = (struct callout_tailq *)v;
12682127Sdillon	v = (caddr_t)(callwheel + callwheelsize);
12782127Sdillon	return(v);
12882127Sdillon}
12982127Sdillon
13082127Sdillon/*
13182127Sdillon * kern_timeout_callwheel_init() - initialize previously reserved callwheel
13282127Sdillon *				   space.
13382127Sdillon *
13482127Sdillon *	This code is called just once, after the space reserved for the
13582127Sdillon *	callout wheel has been finalized.
13682127Sdillon */
13782127Sdillonvoid
13882127Sdillonkern_timeout_callwheel_init(void)
13982127Sdillon{
14082127Sdillon	int i;
14182127Sdillon
14282127Sdillon	SLIST_INIT(&callfree);
14382127Sdillon	for (i = 0; i < ncallout; i++) {
14482127Sdillon		callout_init(&callout[i], 0);
14582127Sdillon		callout[i].c_flags = CALLOUT_LOCAL_ALLOC;
14682127Sdillon		SLIST_INSERT_HEAD(&callfree, &callout[i], c_links.sle);
14782127Sdillon	}
14882127Sdillon	for (i = 0; i < callwheelsize; i++) {
14982127Sdillon		TAILQ_INIT(&callwheel[i]);
15082127Sdillon	}
15193818Sjhb	mtx_init(&callout_lock, "callout", NULL, MTX_SPIN | MTX_RECURSE);
152122585Smckusick#ifdef DIAGNOSTIC
153122761Sphk	mtx_init(&dont_sleep_in_callout, "dont_sleep_in_callout", NULL, MTX_DEF);
154122585Smckusick#endif
155127969Scperciva	mtx_init(&callout_wait_lock, "callout_wait_lock", NULL, MTX_DEF);
156127969Scperciva	cv_init(&callout_wait, "callout_wait");
15782127Sdillon}
15882127Sdillon
15982127Sdillon/*
16029680Sgibbs * The callout mechanism is based on the work of Adam M. Costello and
16129680Sgibbs * George Varghese, published in a technical report entitled "Redesigning
16229680Sgibbs * the BSD Callout and Timer Facilities" and modified slightly for inclusion
16329680Sgibbs * in FreeBSD by Justin T. Gibbs.  The original work on the data structures
164128630Shmp * used in this implementation was published by G. Varghese and T. Lauck in
16529680Sgibbs * the paper "Hashed and Hierarchical Timing Wheels: Data Structures for
16629680Sgibbs * the Efficient Implementation of a Timer Facility" in the Proceedings of
16729680Sgibbs * the 11th ACM Annual Symposium on Operating Systems Principles,
16829680Sgibbs * Austin, Texas Nov 1987.
16929680Sgibbs */
17032388Sphk
17129680Sgibbs/*
1721541Srgrimes * Software (low priority) clock interrupt.
1731541Srgrimes * Run periodic events from timeout queue.
1741541Srgrimes */
1751541Srgrimesvoid
17667551Sjhbsoftclock(void *dummy)
1771541Srgrimes{
178102936Sphk	struct callout *c;
179102936Sphk	struct callout_tailq *bucket;
180102936Sphk	int curticks;
181102936Sphk	int steps;	/* #steps since we last allowed interrupts */
182115810Sphk	int depth;
183115810Sphk	int mpcalls;
184115810Sphk	int gcalls;
185127969Scperciva	int wakeup_cookie;
186122585Smckusick#ifdef DIAGNOSTIC
187122585Smckusick	struct bintime bt1, bt2;
188122585Smckusick	struct timespec ts2;
189122585Smckusick	static uint64_t maxdt = 36893488147419102LL;	/* 2 msec */
190123254Sphk	static timeout_t *lastfunc;
191122585Smckusick#endif
1921541Srgrimes
19333392Sphk#ifndef MAX_SOFTCLOCK_STEPS
19433392Sphk#define MAX_SOFTCLOCK_STEPS 100 /* Maximum allowed value of steps. */
19533392Sphk#endif /* MAX_SOFTCLOCK_STEPS */
19629680Sgibbs
197115810Sphk	mpcalls = 0;
198115810Sphk	gcalls = 0;
199115810Sphk	depth = 0;
20029680Sgibbs	steps = 0;
20172200Sbmilekic	mtx_lock_spin(&callout_lock);
20229680Sgibbs	while (softticks != ticks) {
20329805Sgibbs		softticks++;
20429805Sgibbs		/*
20529805Sgibbs		 * softticks may be modified by hard clock, so cache
20629805Sgibbs		 * it while we work on a given bucket.
20729805Sgibbs		 */
20829805Sgibbs		curticks = softticks;
20929805Sgibbs		bucket = &callwheel[curticks & callwheelmask];
21029805Sgibbs		c = TAILQ_FIRST(bucket);
21129680Sgibbs		while (c) {
212115810Sphk			depth++;
21329805Sgibbs			if (c->c_time != curticks) {
21429680Sgibbs				c = TAILQ_NEXT(c, c_links.tqe);
21529680Sgibbs				++steps;
21629680Sgibbs				if (steps >= MAX_SOFTCLOCK_STEPS) {
21729680Sgibbs					nextsoftcheck = c;
21829805Sgibbs					/* Give interrupts a chance. */
21972200Sbmilekic					mtx_unlock_spin(&callout_lock);
22081370Sjhb					;	/* nothing */
22172200Sbmilekic					mtx_lock_spin(&callout_lock);
22229680Sgibbs					c = nextsoftcheck;
22329680Sgibbs					steps = 0;
22429680Sgibbs				}
22529680Sgibbs			} else {
22629680Sgibbs				void (*c_func)(void *);
22729680Sgibbs				void *c_arg;
22868889Sjake				int c_flags;
22929680Sgibbs
23029680Sgibbs				nextsoftcheck = TAILQ_NEXT(c, c_links.tqe);
23129805Sgibbs				TAILQ_REMOVE(bucket, c, c_links.tqe);
23229680Sgibbs				c_func = c->c_func;
23329680Sgibbs				c_arg = c->c_arg;
23468889Sjake				c_flags = c->c_flags;
23529680Sgibbs				c->c_func = NULL;
23644510Swollman				if (c->c_flags & CALLOUT_LOCAL_ALLOC) {
23744510Swollman					c->c_flags = CALLOUT_LOCAL_ALLOC;
23844510Swollman					SLIST_INSERT_HEAD(&callfree, c,
23944510Swollman							  c_links.sle);
24044510Swollman				} else {
24144510Swollman					c->c_flags =
24250673Sjlemon					    (c->c_flags & ~CALLOUT_PENDING);
24344510Swollman				}
244127969Scperciva				curr_callout = c;
24572200Sbmilekic				mtx_unlock_spin(&callout_lock);
246115810Sphk				if (!(c_flags & CALLOUT_MPSAFE)) {
24772200Sbmilekic					mtx_lock(&Giant);
248115810Sphk					gcalls++;
249133229Srwatson					CTR1(KTR_CALLOUT, "callout %p", c_func);
250115810Sphk				} else {
251115810Sphk					mpcalls++;
252133229Srwatson					CTR1(KTR_CALLOUT, "callout mpsafe %p",
253133229Srwatson					    c_func);
254115810Sphk				}
255122585Smckusick#ifdef DIAGNOSTIC
256122585Smckusick				binuptime(&bt1);
257122761Sphk				mtx_lock(&dont_sleep_in_callout);
258122585Smckusick#endif
25929680Sgibbs				c_func(c_arg);
260122585Smckusick#ifdef DIAGNOSTIC
261122761Sphk				mtx_unlock(&dont_sleep_in_callout);
262122585Smckusick				binuptime(&bt2);
263122585Smckusick				bintime_sub(&bt2, &bt1);
264122585Smckusick				if (bt2.frac > maxdt) {
265123254Sphk					if (lastfunc != c_func ||
266123254Sphk					    bt2.frac > maxdt * 2) {
267123254Sphk						bintime2timespec(&bt2, &ts2);
268123254Sphk						printf(
269123254Sphk			"Expensive timeout(9) function: %p(%p) %jd.%09ld s\n",
270123254Sphk						    c_func, c_arg,
271123254Sphk						    (intmax_t)ts2.tv_sec,
272123254Sphk						    ts2.tv_nsec);
273123254Sphk					}
274122585Smckusick					maxdt = bt2.frac;
275123254Sphk					lastfunc = c_func;
276122585Smckusick				}
277122585Smckusick#endif
27868889Sjake				if (!(c_flags & CALLOUT_MPSAFE))
27972200Sbmilekic					mtx_unlock(&Giant);
28072200Sbmilekic				mtx_lock_spin(&callout_lock);
281127969Scperciva				curr_callout = NULL;
282127969Scperciva				if (wakeup_needed) {
283127969Scperciva					/*
284127969Scperciva					 * There might be someone waiting
285127969Scperciva					 * for the callout to complete.
286127969Scperciva					 */
287127969Scperciva					wakeup_cookie = wakeup_ctr;
288127969Scperciva					mtx_unlock_spin(&callout_lock);
289127969Scperciva					mtx_lock(&callout_wait_lock);
290127969Scperciva					cv_broadcast(&callout_wait);
291127969Scperciva					wakeup_done_ctr = wakeup_cookie;
292127969Scperciva					mtx_unlock(&callout_wait_lock);
293127969Scperciva					mtx_lock_spin(&callout_lock);
294127969Scperciva					wakeup_needed = 0;
295128024Scperciva				}
29629680Sgibbs				steps = 0;
29729680Sgibbs				c = nextsoftcheck;
29829680Sgibbs			}
29929680Sgibbs		}
3001541Srgrimes	}
301115810Sphk	avg_depth += (depth * 1000 - avg_depth) >> 8;
302115810Sphk	avg_mpcalls += (mpcalls * 1000 - avg_mpcalls) >> 8;
303115810Sphk	avg_gcalls += (gcalls * 1000 - avg_gcalls) >> 8;
30429680Sgibbs	nextsoftcheck = NULL;
30572200Sbmilekic	mtx_unlock_spin(&callout_lock);
3061541Srgrimes}
3071541Srgrimes
3081541Srgrimes/*
3091541Srgrimes * timeout --
3101541Srgrimes *	Execute a function after a specified length of time.
3111541Srgrimes *
3121541Srgrimes * untimeout --
3131541Srgrimes *	Cancel previous timeout function call.
3141541Srgrimes *
31529680Sgibbs * callout_handle_init --
31629680Sgibbs *	Initialize a handle so that using it with untimeout is benign.
31729680Sgibbs *
3181541Srgrimes *	See AT&T BCI Driver Reference Manual for specification.  This
31929680Sgibbs *	implementation differs from that one in that although an
32029680Sgibbs *	identification value is returned from timeout, the original
32129680Sgibbs *	arguments to timeout as well as the identifier are used to
32229680Sgibbs *	identify entries for untimeout.
3231541Srgrimes */
32429680Sgibbsstruct callout_handle
32529680Sgibbstimeout(ftn, arg, to_ticks)
32633824Sbde	timeout_t *ftn;
3271541Srgrimes	void *arg;
32869147Sjlemon	int to_ticks;
3291541Srgrimes{
33029680Sgibbs	struct callout *new;
33129680Sgibbs	struct callout_handle handle;
3321541Srgrimes
33372200Sbmilekic	mtx_lock_spin(&callout_lock);
3341541Srgrimes
3351541Srgrimes	/* Fill in the next free callout structure. */
33629680Sgibbs	new = SLIST_FIRST(&callfree);
33729680Sgibbs	if (new == NULL)
33829680Sgibbs		/* XXX Attempt to malloc first */
3391541Srgrimes		panic("timeout table full");
34029680Sgibbs	SLIST_REMOVE_HEAD(&callfree, c_links.sle);
34144510Swollman
34244510Swollman	callout_reset(new, to_ticks, ftn, arg);
3431541Srgrimes
34444510Swollman	handle.callout = new;
34572200Sbmilekic	mtx_unlock_spin(&callout_lock);
34629680Sgibbs	return (handle);
3471541Srgrimes}
3481541Srgrimes
3491541Srgrimesvoid
35029680Sgibbsuntimeout(ftn, arg, handle)
35133824Sbde	timeout_t *ftn;
3521541Srgrimes	void *arg;
35329680Sgibbs	struct callout_handle handle;
3541541Srgrimes{
3551541Srgrimes
35629680Sgibbs	/*
35729680Sgibbs	 * Check for a handle that was initialized
35829680Sgibbs	 * by callout_handle_init, but never used
35929680Sgibbs	 * for a real timeout.
36029680Sgibbs	 */
36129680Sgibbs	if (handle.callout == NULL)
36229680Sgibbs		return;
36329680Sgibbs
36472200Sbmilekic	mtx_lock_spin(&callout_lock);
36544510Swollman	if (handle.callout->c_func == ftn && handle.callout->c_arg == arg)
36644510Swollman		callout_stop(handle.callout);
36772200Sbmilekic	mtx_unlock_spin(&callout_lock);
3681541Srgrimes}
3691541Srgrimes
37024101Sbdevoid
37129680Sgibbscallout_handle_init(struct callout_handle *handle)
37229680Sgibbs{
37329680Sgibbs	handle->callout = NULL;
37429680Sgibbs}
37529680Sgibbs
37644510Swollman/*
37744510Swollman * New interface; clients allocate their own callout structures.
37844510Swollman *
37944510Swollman * callout_reset() - establish or change a timeout
38044510Swollman * callout_stop() - disestablish a timeout
38144510Swollman * callout_init() - initialize a callout structure so that it can
38244510Swollman *	safely be passed to callout_reset() and callout_stop()
38344510Swollman *
38450673Sjlemon * <sys/callout.h> defines three convenience macros:
38544510Swollman *
38650673Sjlemon * callout_active() - returns truth if callout has not been serviced
38750673Sjlemon * callout_pending() - returns truth if callout is still waiting for timeout
38850673Sjlemon * callout_deactivate() - marks the callout as having been serviced
38944510Swollman */
39044510Swollmanvoid
39169147Sjlemoncallout_reset(c, to_ticks, ftn, arg)
39244510Swollman	struct	callout *c;
39344510Swollman	int	to_ticks;
39492723Salfred	void	(*ftn)(void *);
39544510Swollman	void	*arg;
39644510Swollman{
39744510Swollman
39872200Sbmilekic	mtx_lock_spin(&callout_lock);
399127969Scperciva	if (c == curr_callout && wakeup_needed) {
400127969Scperciva		/*
401127969Scperciva		 * We're being asked to reschedule a callout which is
402127969Scperciva		 * currently in progress, and someone has called
403127969Scperciva		 * callout_drain to kill that callout.  Don't reschedule.
404127969Scperciva		 */
405127969Scperciva		mtx_unlock_spin(&callout_lock);
406127969Scperciva		return;
407128024Scperciva	}
408133190Scperciva	if (c->c_flags & CALLOUT_PENDING) {
409133190Scperciva		if (nextsoftcheck == c) {
410133190Scperciva			nextsoftcheck = TAILQ_NEXT(c, c_links.tqe);
411133190Scperciva		}
412133190Scperciva		TAILQ_REMOVE(&callwheel[c->c_time & callwheelmask], c,
413133190Scperciva		    c_links.tqe);
41444510Swollman
415133190Scperciva		/*
416133190Scperciva		 * Part of the normal "stop a pending callout" process
417133190Scperciva		 * is to clear the CALLOUT_ACTIVE and CALLOUT_PENDING
418133190Scperciva		 * flags.  We're not going to bother doing that here,
419133190Scperciva		 * because we're going to be setting those flags ten lines
420133190Scperciva		 * after this point, and we're holding callout_lock
421133190Scperciva		 * between now and then.
422133190Scperciva		 */
423133190Scperciva	}
424133190Scperciva
42544510Swollman	/*
42681370Sjhb	 * We could unlock callout_lock here and lock it again before the
42781370Sjhb	 * TAILQ_INSERT_TAIL, but there's no point since doing this setup
42881370Sjhb	 * doesn't take much time.
42944510Swollman	 */
43044510Swollman	if (to_ticks <= 0)
43144510Swollman		to_ticks = 1;
43244510Swollman
43344510Swollman	c->c_arg = arg;
43469147Sjlemon	c->c_flags |= (CALLOUT_ACTIVE | CALLOUT_PENDING);
43544510Swollman	c->c_func = ftn;
43644510Swollman	c->c_time = ticks + to_ticks;
43744510Swollman	TAILQ_INSERT_TAIL(&callwheel[c->c_time & callwheelmask],
43844510Swollman			  c, c_links.tqe);
43972200Sbmilekic	mtx_unlock_spin(&callout_lock);
44044510Swollman}
44144510Swollman
44281481Sjhbint
443127969Scperciva_callout_stop_safe(c, safe)
444127969Scperciva	struct	callout *c;
445127969Scperciva	int	safe;
446127969Scperciva{
447127969Scperciva	int wakeup_cookie;
448127969Scperciva
44972200Sbmilekic	mtx_lock_spin(&callout_lock);
45044510Swollman	/*
45144510Swollman	 * Don't attempt to delete a callout that's not on the queue.
45244510Swollman	 */
45344510Swollman	if (!(c->c_flags & CALLOUT_PENDING)) {
45450673Sjlemon		c->c_flags &= ~CALLOUT_ACTIVE;
455127969Scperciva		if (c == curr_callout && safe) {
456128024Scperciva			/* We need to wait until the callout is finished. */
457127969Scperciva			wakeup_needed = 1;
458127969Scperciva			wakeup_cookie = wakeup_ctr++;
459127969Scperciva			mtx_unlock_spin(&callout_lock);
460127969Scperciva			mtx_lock(&callout_wait_lock);
461128024Scperciva
462127969Scperciva			/*
463127969Scperciva			 * Check to make sure that softclock() didn't
464127969Scperciva			 * do the wakeup in between our dropping
465127969Scperciva			 * callout_lock and picking up callout_wait_lock
466127969Scperciva			 */
467127969Scperciva			if (wakeup_cookie - wakeup_done_ctr > 0)
468127969Scperciva				cv_wait(&callout_wait, &callout_wait_lock);
469127969Scperciva
470127969Scperciva			mtx_unlock(&callout_wait_lock);
471127969Scperciva		} else
472127969Scperciva			mtx_unlock_spin(&callout_lock);
47381481Sjhb		return (0);
47444510Swollman	}
47550673Sjlemon	c->c_flags &= ~(CALLOUT_ACTIVE | CALLOUT_PENDING);
47644510Swollman
47744510Swollman	if (nextsoftcheck == c) {
47844510Swollman		nextsoftcheck = TAILQ_NEXT(c, c_links.tqe);
47944510Swollman	}
48044510Swollman	TAILQ_REMOVE(&callwheel[c->c_time & callwheelmask], c, c_links.tqe);
48144510Swollman	c->c_func = NULL;
48244510Swollman
48344510Swollman	if (c->c_flags & CALLOUT_LOCAL_ALLOC) {
48444510Swollman		SLIST_INSERT_HEAD(&callfree, c, c_links.sle);
48544510Swollman	}
48672200Sbmilekic	mtx_unlock_spin(&callout_lock);
48781481Sjhb	return (1);
48844510Swollman}
48944510Swollman
49044510Swollmanvoid
49169147Sjlemoncallout_init(c, mpsafe)
49244510Swollman	struct	callout *c;
49369147Sjlemon	int mpsafe;
49444510Swollman{
49544527Swollman	bzero(c, sizeof *c);
49669147Sjlemon	if (mpsafe)
49769147Sjlemon		c->c_flags |= CALLOUT_MPSAFE;
49844510Swollman}
49944510Swollman
50031950Snate#ifdef APM_FIXUP_CALLTODO
50131950Snate/*
50231950Snate * Adjust the kernel calltodo timeout list.  This routine is used after
50331950Snate * an APM resume to recalculate the calltodo timer list values with the
50431950Snate * number of hz's we have been sleeping.  The next hardclock() will detect
50531950Snate * that there are fired timers and run softclock() to execute them.
50631950Snate *
50731950Snate * Please note, I have not done an exhaustive analysis of what code this
50831950Snate * might break.  I am motivated to have my select()'s and alarm()'s that
50931950Snate * have expired during suspend firing upon resume so that the applications
51031950Snate * which set the timer can do the maintanence the timer was for as close
51131950Snate * as possible to the originally intended time.  Testing this code for a
51231950Snate * week showed that resuming from a suspend resulted in 22 to 25 timers
51331950Snate * firing, which seemed independant on whether the suspend was 2 hours or
51431950Snate * 2 days.  Your milage may vary.   - Ken Key <key@cs.utk.edu>
51531950Snate */
51631950Snatevoid
51731950Snateadjust_timeout_calltodo(time_change)
51831950Snate    struct timeval *time_change;
51931950Snate{
52031950Snate	register struct callout *p;
52131950Snate	unsigned long delta_ticks;
52231950Snate
52331950Snate	/*
52431950Snate	 * How many ticks were we asleep?
52536127Sbde	 * (stolen from tvtohz()).
52631950Snate	 */
52731950Snate
52831950Snate	/* Don't do anything */
52931950Snate	if (time_change->tv_sec < 0)
53031950Snate		return;
53131950Snate	else if (time_change->tv_sec <= LONG_MAX / 1000000)
53231950Snate		delta_ticks = (time_change->tv_sec * 1000000 +
53331950Snate			       time_change->tv_usec + (tick - 1)) / tick + 1;
53431950Snate	else if (time_change->tv_sec <= LONG_MAX / hz)
53531950Snate		delta_ticks = time_change->tv_sec * hz +
53631950Snate			      (time_change->tv_usec + (tick - 1)) / tick + 1;
53731950Snate	else
53831950Snate		delta_ticks = LONG_MAX;
53931950Snate
54031950Snate	if (delta_ticks > INT_MAX)
54131950Snate		delta_ticks = INT_MAX;
54231950Snate
54331950Snate	/*
54431950Snate	 * Now rip through the timer calltodo list looking for timers
54531950Snate	 * to expire.
54631950Snate	 */
54731950Snate
54831950Snate	/* don't collide with softclock() */
54972200Sbmilekic	mtx_lock_spin(&callout_lock);
55031950Snate	for (p = calltodo.c_next; p != NULL; p = p->c_next) {
55131950Snate		p->c_time -= delta_ticks;
55231950Snate
55331950Snate		/* Break if the timer had more time on it than delta_ticks */
55431950Snate		if (p->c_time > 0)
55531950Snate			break;
55631950Snate
55731950Snate		/* take back the ticks the timer didn't use (p->c_time <= 0) */
55831950Snate		delta_ticks = -p->c_time;
55931950Snate	}
56072200Sbmilekic	mtx_unlock_spin(&callout_lock);
56131950Snate
56231950Snate	return;
56331950Snate}
56431950Snate#endif /* APM_FIXUP_CALLTODO */
565