kern_timeout.c revision 69136
159093Sdfr/*-
259093Sdfr * Copyright (c) 1982, 1986, 1991, 1993
359093Sdfr *	The Regents of the University of California.  All rights reserved.
459093Sdfr * (c) UNIX System Laboratories, Inc.
559093Sdfr * All or some portions of this file are derived from material licensed
659093Sdfr * to the University of California by American Telephone and Telegraph
759093Sdfr * Co. or Unix System Laboratories, Inc. and are reproduced herein with
859093Sdfr * the permission of UNIX System Laboratories, Inc.
959093Sdfr *
1059093Sdfr * Redistribution and use in source and binary forms, with or without
1159093Sdfr * modification, are permitted provided that the following conditions
1259093Sdfr * are met:
1359093Sdfr * 1. Redistributions of source code must retain the above copyright
1459093Sdfr *    notice, this list of conditions and the following disclaimer.
1559093Sdfr * 2. Redistributions in binary form must reproduce the above copyright
1659093Sdfr *    notice, this list of conditions and the following disclaimer in the
1759093Sdfr *    documentation and/or other materials provided with the distribution.
1859093Sdfr * 3. All advertising materials mentioning features or use of this software
1959093Sdfr *    must display the following acknowledgement:
2059093Sdfr *	This product includes software developed by the University of
2159093Sdfr *	California, Berkeley and its contributors.
2259093Sdfr * 4. Neither the name of the University nor the names of its contributors
2359093Sdfr *    may be used to endorse or promote products derived from this software
2459093Sdfr *    without specific prior written permission.
2559093Sdfr *
2659093Sdfr * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27116182Sobrien * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28116182Sobrien * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29116182Sobrien * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
3059093Sdfr * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
3159093Sdfr * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
3259093Sdfr * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
3359093Sdfr * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
3459093Sdfr * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
3559093Sdfr * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
3659093Sdfr * SUCH DAMAGE.
3759093Sdfr *
3859093Sdfr *	From: @(#)kern_clock.c	8.5 (Berkeley) 1/21/94
3959093Sdfr * $FreeBSD: head/sys/kern/kern_timeout.c 69136 2000-11-25 03:34:49Z jake $
4059093Sdfr */
4159093Sdfr
4259093Sdfr#include <sys/param.h>
4359093Sdfr#include <sys/systm.h>
4459093Sdfr#include <sys/callout.h>
4559093Sdfr#include <sys/kernel.h>
4659093Sdfr#include <sys/mutex.h>
4759093Sdfr
4859093Sdfr/*
4959093Sdfr * TODO:
5059093Sdfr *	allocate more timeout table slots when table overflows.
5198105Skbyanc */
5298105Skbyanc
5359093Sdfr/* Exported to machdep.c and/or kern_clock.c.  */
5498105Skbyancstruct callout *callout;
5559093Sdfrstruct callout_list callfree;
5698105Skbyancint callwheelsize, callwheelbits, callwheelmask;
5759093Sdfrstruct callout_tailq *callwheel;
5859093Sdfrint softticks;			/* Like ticks, but for softclock(). */
5959093Sdfrstruct mtx callout_lock;
6059093Sdfr
6159093Sdfrstatic struct callout *nextsoftcheck;	/* Next callout to be checked. */
6259093Sdfr
6359093Sdfr/*
6459093Sdfr * The callout mechanism is based on the work of Adam M. Costello and
6559093Sdfr * George Varghese, published in a technical report entitled "Redesigning
6659093Sdfr * the BSD Callout and Timer Facilities" and modified slightly for inclusion
6759093Sdfr * in FreeBSD by Justin T. Gibbs.  The original work on the data structures
6859093Sdfr * used in this implementation was published by G.Varghese and A. Lauck in
6959093Sdfr * the paper "Hashed and Hierarchical Timing Wheels: Data Structures for
7059093Sdfr * the Efficient Implementation of a Timer Facility" in the Proceedings of
7159093Sdfr * the 11th ACM Annual Symposium on Operating Systems Principles,
7259093Sdfr * Austin, Texas Nov 1987.
7359093Sdfr */
7459093Sdfr
7559093Sdfr/*
7659093Sdfr * Software (low priority) clock interrupt.
7759093Sdfr * Run periodic events from timeout queue.
7859093Sdfr */
7959093Sdfrvoid
8059093Sdfrsoftclock(void *dummy)
8165173Sdfr{
8265173Sdfr	register struct callout *c;
8359093Sdfr	register struct callout_tailq *bucket;
8459093Sdfr	register int s;
8559093Sdfr	register int curticks;
8659093Sdfr	register int steps;	/* #steps since we last allowed interrupts */
8759093Sdfr
8859093Sdfr#ifndef MAX_SOFTCLOCK_STEPS
8959093Sdfr#define MAX_SOFTCLOCK_STEPS 100 /* Maximum allowed value of steps. */
9059093Sdfr#endif /* MAX_SOFTCLOCK_STEPS */
9159093Sdfr
9259093Sdfr	steps = 0;
9359093Sdfr	s = splhigh();
9459093Sdfr	mtx_enter(&callout_lock, MTX_SPIN);
9559093Sdfr	while (softticks != ticks) {
9659093Sdfr		softticks++;
9759093Sdfr		/*
9859093Sdfr		 * softticks may be modified by hard clock, so cache
9959093Sdfr		 * it while we work on a given bucket.
10065173Sdfr		 */
10159093Sdfr		curticks = softticks;
10259093Sdfr		bucket = &callwheel[curticks & callwheelmask];
10359093Sdfr		c = TAILQ_FIRST(bucket);
10459093Sdfr		while (c) {
10559093Sdfr			if (c->c_time != curticks) {
10659093Sdfr				c = TAILQ_NEXT(c, c_links.tqe);
10759093Sdfr				++steps;
10865173Sdfr				if (steps >= MAX_SOFTCLOCK_STEPS) {
10965173Sdfr					nextsoftcheck = c;
11065173Sdfr					/* Give interrupts a chance. */
11165173Sdfr					mtx_exit(&callout_lock, MTX_SPIN);
11265173Sdfr					splx(s);
11365173Sdfr					s = splhigh();
11465173Sdfr					mtx_enter(&callout_lock, MTX_SPIN);
11565173Sdfr					c = nextsoftcheck;
11665173Sdfr					steps = 0;
11765173Sdfr				}
11865173Sdfr			} else {
11965173Sdfr				void (*c_func)(void *);
12065173Sdfr				void *c_arg;
12165173Sdfr				int c_flags;
12265173Sdfr
12365173Sdfr				nextsoftcheck = TAILQ_NEXT(c, c_links.tqe);
12465173Sdfr				TAILQ_REMOVE(bucket, c, c_links.tqe);
12565173Sdfr				c_func = c->c_func;
12665173Sdfr				c_arg = c->c_arg;
12765173Sdfr				c_flags = c->c_flags;
12865173Sdfr				c->c_func = NULL;
12965173Sdfr				if (c->c_flags & CALLOUT_LOCAL_ALLOC) {
13065173Sdfr					c->c_flags = CALLOUT_LOCAL_ALLOC;
13165173Sdfr					SLIST_INSERT_HEAD(&callfree, c,
13259093Sdfr							  c_links.sle);
13359093Sdfr				} else {
13459093Sdfr					c->c_flags =
13559093Sdfr					    (c->c_flags & ~CALLOUT_PENDING);
13659093Sdfr				}
13759093Sdfr				mtx_exit(&callout_lock, MTX_SPIN);
13859093Sdfr				if (!(c_flags & CALLOUT_MPSAFE))
13959093Sdfr					mtx_enter(&Giant, MTX_DEF);
14059093Sdfr				splx(s);
14159093Sdfr				c_func(c_arg);
14259093Sdfr				s = splhigh();
14359093Sdfr				if (!(c_flags & CALLOUT_MPSAFE))
14459093Sdfr					mtx_exit(&Giant, MTX_DEF);
14559093Sdfr				mtx_enter(&callout_lock, MTX_SPIN);
14659093Sdfr				steps = 0;
14759093Sdfr				c = nextsoftcheck;
14859093Sdfr			}
14959093Sdfr		}
15059093Sdfr	}
15159093Sdfr	nextsoftcheck = NULL;
15259093Sdfr	mtx_exit(&callout_lock, MTX_SPIN);
15359093Sdfr	splx(s);
15459093Sdfr}
15559093Sdfr
15659093Sdfr/*
15759093Sdfr * timeout --
15859093Sdfr *	Execute a function after a specified length of time.
15959093Sdfr *
16059093Sdfr * untimeout --
16159093Sdfr *	Cancel previous timeout function call.
16259093Sdfr *
16359093Sdfr * callout_handle_init --
16459093Sdfr *	Initialize a handle so that using it with untimeout is benign.
16559093Sdfr *
16659093Sdfr *	See AT&T BCI Driver Reference Manual for specification.  This
16759093Sdfr *	implementation differs from that one in that although an
16859093Sdfr *	identification value is returned from timeout, the original
16959093Sdfr *	arguments to timeout as well as the identifier are used to
17059093Sdfr *	identify entries for untimeout.
17159093Sdfr */
17259093Sdfrstruct callout_handle
17359093Sdfrtimeout(ftn, arg, to_ticks)
17459093Sdfr	timeout_t *ftn;
17559093Sdfr	void *arg;
17659093Sdfr	register int to_ticks;
17759093Sdfr{
17859093Sdfr	int s;
17969781Sdwmalone	struct callout *new;
18059093Sdfr	struct callout_handle handle;
18159093Sdfr
18259093Sdfr	s = splhigh();
18359093Sdfr	mtx_enter(&callout_lock, MTX_SPIN);
18459093Sdfr
18559093Sdfr	/* Fill in the next free callout structure. */
18659093Sdfr	new = SLIST_FIRST(&callfree);
18759093Sdfr	if (new == NULL)
18859093Sdfr		/* XXX Attempt to malloc first */
18959093Sdfr		panic("timeout table full");
19059093Sdfr	SLIST_REMOVE_HEAD(&callfree, c_links.sle);
19159093Sdfr
19259093Sdfr	callout_reset(new, to_ticks, ftn, arg);
19359093Sdfr
19459093Sdfr	handle.callout = new;
19559093Sdfr	mtx_exit(&callout_lock, MTX_SPIN);
19659093Sdfr	splx(s);
19759820Sdfr	return (handle);
19859093Sdfr}
19959093Sdfr
20059093Sdfrvoid
20159093Sdfruntimeout(ftn, arg, handle)
20259093Sdfr	timeout_t *ftn;
20359093Sdfr	void *arg;
20459093Sdfr	struct callout_handle handle;
20559093Sdfr{
20659093Sdfr	register int s;
20759093Sdfr
20859093Sdfr	/*
20959093Sdfr	 * Check for a handle that was initialized
21059820Sdfr	 * by callout_handle_init, but never used
21159820Sdfr	 * for a real timeout.
21259093Sdfr	 */
21359093Sdfr	if (handle.callout == NULL)
21459093Sdfr		return;
21559093Sdfr
21659093Sdfr	s = splhigh();
21759093Sdfr	mtx_enter(&callout_lock, MTX_SPIN);
218	if (handle.callout->c_func == ftn && handle.callout->c_arg == arg)
219		callout_stop(handle.callout);
220	mtx_exit(&callout_lock, MTX_SPIN);
221	splx(s);
222}
223
224void
225callout_handle_init(struct callout_handle *handle)
226{
227	handle->callout = NULL;
228}
229
230/*
231 * New interface; clients allocate their own callout structures.
232 *
233 * callout_reset() - establish or change a timeout
234 * callout_stop() - disestablish a timeout
235 * callout_init() - initialize a callout structure so that it can
236 *	safely be passed to callout_reset() and callout_stop()
237 *
238 * <sys/callout.h> defines three convenience macros:
239 *
240 * callout_active() - returns truth if callout has not been serviced
241 * callout_pending() - returns truth if callout is still waiting for timeout
242 * callout_deactivate() - marks the callout as having been serviced
243 */
244void
245_callout_reset(c, to_ticks, ftn, arg, flags)
246	struct	callout *c;
247	int	to_ticks;
248	void	(*ftn) __P((void *));
249	void	*arg;
250	int	flags;
251{
252	int	s;
253
254	s = splhigh();
255	mtx_enter(&callout_lock, MTX_SPIN);
256	if (c->c_flags & CALLOUT_PENDING)
257		callout_stop(c);
258
259	/*
260	 * We could spl down here and back up at the TAILQ_INSERT_TAIL,
261	 * but there's no point since doing this setup doesn't take much
262	 * time.
263	 */
264	if (to_ticks <= 0)
265		to_ticks = 1;
266
267	c->c_arg = arg;
268	c->c_flags |= (CALLOUT_ACTIVE | CALLOUT_PENDING |
269	    (flags & CALLOUT_MPSAFE));
270	c->c_func = ftn;
271	c->c_time = ticks + to_ticks;
272	TAILQ_INSERT_TAIL(&callwheel[c->c_time & callwheelmask],
273			  c, c_links.tqe);
274	mtx_exit(&callout_lock, MTX_SPIN);
275	splx(s);
276}
277
278void
279callout_stop(c)
280	struct	callout *c;
281{
282	int	s;
283
284	s = splhigh();
285	mtx_enter(&callout_lock, MTX_SPIN);
286	/*
287	 * Don't attempt to delete a callout that's not on the queue.
288	 */
289	if (!(c->c_flags & CALLOUT_PENDING)) {
290		c->c_flags &= ~CALLOUT_ACTIVE;
291		mtx_exit(&callout_lock, MTX_SPIN);
292		splx(s);
293		return;
294	}
295	c->c_flags &= ~(CALLOUT_ACTIVE | CALLOUT_PENDING);
296
297	if (nextsoftcheck == c) {
298		nextsoftcheck = TAILQ_NEXT(c, c_links.tqe);
299	}
300	TAILQ_REMOVE(&callwheel[c->c_time & callwheelmask], c, c_links.tqe);
301	c->c_func = NULL;
302
303	if (c->c_flags & CALLOUT_LOCAL_ALLOC) {
304		SLIST_INSERT_HEAD(&callfree, c, c_links.sle);
305	}
306	mtx_exit(&callout_lock, MTX_SPIN);
307	splx(s);
308}
309
310void
311callout_init(c)
312	struct	callout *c;
313{
314	bzero(c, sizeof *c);
315}
316
317#ifdef APM_FIXUP_CALLTODO
318/*
319 * Adjust the kernel calltodo timeout list.  This routine is used after
320 * an APM resume to recalculate the calltodo timer list values with the
321 * number of hz's we have been sleeping.  The next hardclock() will detect
322 * that there are fired timers and run softclock() to execute them.
323 *
324 * Please note, I have not done an exhaustive analysis of what code this
325 * might break.  I am motivated to have my select()'s and alarm()'s that
326 * have expired during suspend firing upon resume so that the applications
327 * which set the timer can do the maintanence the timer was for as close
328 * as possible to the originally intended time.  Testing this code for a
329 * week showed that resuming from a suspend resulted in 22 to 25 timers
330 * firing, which seemed independant on whether the suspend was 2 hours or
331 * 2 days.  Your milage may vary.   - Ken Key <key@cs.utk.edu>
332 */
333void
334adjust_timeout_calltodo(time_change)
335    struct timeval *time_change;
336{
337	register struct callout *p;
338	unsigned long delta_ticks;
339	int s;
340
341	/*
342	 * How many ticks were we asleep?
343	 * (stolen from tvtohz()).
344	 */
345
346	/* Don't do anything */
347	if (time_change->tv_sec < 0)
348		return;
349	else if (time_change->tv_sec <= LONG_MAX / 1000000)
350		delta_ticks = (time_change->tv_sec * 1000000 +
351			       time_change->tv_usec + (tick - 1)) / tick + 1;
352	else if (time_change->tv_sec <= LONG_MAX / hz)
353		delta_ticks = time_change->tv_sec * hz +
354			      (time_change->tv_usec + (tick - 1)) / tick + 1;
355	else
356		delta_ticks = LONG_MAX;
357
358	if (delta_ticks > INT_MAX)
359		delta_ticks = INT_MAX;
360
361	/*
362	 * Now rip through the timer calltodo list looking for timers
363	 * to expire.
364	 */
365
366	/* don't collide with softclock() */
367	s = splhigh();
368	mtx_enter(&callout_lock, MTX_SPIN);
369	for (p = calltodo.c_next; p != NULL; p = p->c_next) {
370		p->c_time -= delta_ticks;
371
372		/* Break if the timer had more time on it than delta_ticks */
373		if (p->c_time > 0)
374			break;
375
376		/* take back the ticks the timer didn't use (p->c_time <= 0) */
377		delta_ticks = -p->c_time;
378	}
379	mtx_exit(&callout_lock, MTX_SPIN);
380	splx(s);
381
382	return;
383}
384#endif /* APM_FIXUP_CALLTODO */
385