kern_timeout.c revision 68869
1139804Simp/*-
2185435Sbz * Copyright (c) 1982, 1986, 1991, 1993
3185435Sbz *	The Regents of the University of California.  All rights reserved.
4191673Sjamie * (c) UNIX System Laboratories, Inc.
5185435Sbz * All or some portions of this file are derived from material licensed
6190466Sjamie * to the University of California by American Telephone and Telegraph
7185404Sbz * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8185404Sbz * the permission of UNIX System Laboratories, Inc.
9185404Sbz *
10185404Sbz * Redistribution and use in source and binary forms, with or without
11185404Sbz * modification, are permitted provided that the following conditions
12185404Sbz * are met:
13185404Sbz * 1. Redistributions of source code must retain the above copyright
14185404Sbz *    notice, this list of conditions and the following disclaimer.
15185404Sbz * 2. Redistributions in binary form must reproduce the above copyright
16185404Sbz *    notice, this list of conditions and the following disclaimer in the
17185404Sbz *    documentation and/or other materials provided with the distribution.
18185404Sbz * 3. All advertising materials mentioning features or use of this software
19185404Sbz *    must display the following acknowledgement:
20185404Sbz *	This product includes software developed by the University of
21185404Sbz *	California, Berkeley and its contributors.
22185404Sbz * 4. Neither the name of the University nor the names of its contributors
23185404Sbz *    may be used to endorse or promote products derived from this software
24185404Sbz *    without specific prior written permission.
25185404Sbz *
26185404Sbz * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
2746197Sphk * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
2846155Sphk * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29116182Sobrien * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30116182Sobrien * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31116182Sobrien * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32193066Sjamie * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33185435Sbz * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34185435Sbz * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35185435Sbz * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36131177Spjd * SUCH DAMAGE.
3746155Sphk *
3846155Sphk *	From: @(#)kern_clock.c	8.5 (Berkeley) 1/21/94
3946155Sphk * $FreeBSD: head/sys/kern/kern_timeout.c 68869 2000-11-18 00:21:00Z jhb $
4046155Sphk */
4146155Sphk
4246155Sphk#include <sys/param.h>
4346155Sphk#include <sys/systm.h>
44192895Sjamie#include <sys/callout.h>
45164032Srwatson#include <sys/kernel.h>
4646155Sphk#include <sys/mutex.h>
47124882Srwatson
48177785Skib/*
4946155Sphk * TODO:
5087275Srwatson *	allocate more timeout table slots when table overflows.
5187275Srwatson */
52220137Strasz
53221362Strasz/* Exported to machdep.c and/or kern_clock.c.  */
54168401Spjdstruct callout *callout;
55193066Sjamiestruct callout_list callfree;
56113275Smikeint callwheelsize, callwheelbits, callwheelmask;
57147185Spjdstruct callout_tailq *callwheel;
58113275Smikeint softticks;			/* Like ticks, but for softclock(). */
5946155Sphk
60113275Smikestatic struct callout *nextsoftcheck;	/* Next callout to be checked. */
6157163Srwatson
62113275Smike/*
63196019Srwatson * The callout mechanism is based on the work of Adam M. Costello and
6446155Sphk * George Varghese, published in a technical report entitled "Redesigning
65196019Srwatson * the BSD Callout and Timer Facilities" and modified slightly for inclusion
66196019Srwatson * in FreeBSD by Justin T. Gibbs.  The original work on the data structures
6746155Sphk * used in this implementation was published by G.Varghese and A. Lauck in
68196019Srwatson * the paper "Hashed and Hierarchical Timing Wheels: Data Structures for
69185435Sbz * the Efficient Implementation of a Timer Facility" in the Proceedings of
70185435Sbz * the 11th ACM Annual Symposium on Operating Systems Principles,
71185435Sbz * Austin, Texas Nov 1987.
72185435Sbz */
73185435Sbz
74185435Sbz/*
7546155Sphk * Software (low priority) clock interrupt.
76163606Srwatson * Run periodic events from timeout queue.
77163606Srwatson */
78195944Sjamievoid
79195944Sjamiesoftclock(void *dummy)
8046155Sphk{
81227293Sed	register struct callout *c;
8246155Sphk	register struct callout_tailq *bucket;
83202468Sbz	register int s;
84202468Sbz	register int curticks;
85202468Sbz	register int steps;	/* #steps since we last allowed interrupts */
86202468Sbz
87202468Sbz#ifndef MAX_SOFTCLOCK_STEPS
88202468Sbz#define MAX_SOFTCLOCK_STEPS 100 /* Maximum allowed value of steps. */
89202468Sbz#endif /* MAX_SOFTCLOCK_STEPS */
90202468Sbz
91202468Sbz	steps = 0;
92202468Sbz	s = splhigh();
93202468Sbz	mtx_enter(&sched_lock, MTX_SPIN);
94202468Sbz	while (softticks != ticks) {
95202468Sbz		softticks++;
96202468Sbz		/*
97202468Sbz		 * softticks may be modified by hard clock, so cache
98192895Sjamie		 * it while we work on a given bucket.
99192895Sjamie		 */
100192895Sjamie		curticks = softticks;
101192895Sjamie		bucket = &callwheel[curticks & callwheelmask];
102192895Sjamie		c = TAILQ_FIRST(bucket);
103192895Sjamie		while (c) {
104192895Sjamie			if (c->c_time != curticks) {
105192895Sjamie				c = TAILQ_NEXT(c, c_links.tqe);
106231267Smm				++steps;
107194762Sjamie				if (steps >= MAX_SOFTCLOCK_STEPS) {
108195944Sjamie					nextsoftcheck = c;
109201145Santoine					/* Give interrupts a chance. */
110196176Sbz					mtx_exit(&sched_lock, MTX_SPIN);
111202468Sbz					splx(s);
112196176Sbz					s = splhigh();
113202468Sbz					mtx_enter(&sched_lock, MTX_SPIN);
114196176Sbz					c = nextsoftcheck;
115192895Sjamie					steps = 0;
116192895Sjamie				}
117192895Sjamie			} else {
11857163Srwatson				void (*c_func)(void *);
119221362Strasz				void *c_arg;
120168401Spjd
121191673Sjamie				nextsoftcheck = TAILQ_NEXT(c, c_links.tqe);
122191673Sjamie				TAILQ_REMOVE(bucket, c, c_links.tqe);
123221362Strasz				c_func = c->c_func;
124179881Sdelphij				c_arg = c->c_arg;
125113275Smike				c->c_func = NULL;
126191673Sjamie				if (c->c_flags & CALLOUT_LOCAL_ALLOC) {
127190466Sjamie					c->c_flags = CALLOUT_LOCAL_ALLOC;
128191673Sjamie					SLIST_INSERT_HEAD(&callfree, c,
129192895Sjamie							  c_links.sle);
130192895Sjamie				} else {
131221362Strasz					c->c_flags =
132221362Strasz					    (c->c_flags & ~CALLOUT_PENDING);
133232598Strasz				}
134221362Strasz				mtx_exit(&sched_lock, MTX_SPIN);
135221362Strasz				splx(s);
136185435Sbz				c_func(c_arg);
137190466Sjamie				s = splhigh();
138192895Sjamie				mtx_enter(&sched_lock, MTX_SPIN);
139185435Sbz				steps = 0;
140185435Sbz				c = nextsoftcheck;
141190466Sjamie			}
142192895Sjamie		}
143185435Sbz	}
144113275Smike	nextsoftcheck = NULL;
145191673Sjamie	mtx_exit(&sched_lock, MTX_SPIN);
146191673Sjamie	splx(s);
147191673Sjamie}
148191673Sjamie
149191673Sjamie/*
150191673Sjamie * timeout --
151113275Smike *	Execute a function after a specified length of time.
152192895Sjamie *
153216861Sbz * untimeout --
154216861Sbz *	Cancel previous timeout function call.
155216861Sbz *
156192895Sjamie * callout_handle_init --
157192895Sjamie *	Initialize a handle so that using it with untimeout is benign.
158192895Sjamie *
159202468Sbz *	See AT&T BCI Driver Reference Manual for specification.  This
160202468Sbz *	implementation differs from that one in that although an
161202468Sbz *	identification value is returned from timeout, the original
162202468Sbz *	arguments to timeout as well as the identifier are used to
163202468Sbz *	identify entries for untimeout.
164202468Sbz */
165192895Sjamiestruct callout_handle
166216861Sbztimeout(ftn, arg, to_ticks)
167192895Sjamie	timeout_t *ftn;
168192895Sjamie	void *arg;
169192895Sjamie	register int to_ticks;
170202468Sbz{
171202468Sbz	int s;
172202468Sbz	struct callout *new;
173202468Sbz	struct callout_handle handle;
174202468Sbz
175202468Sbz	s = splhigh();
176195870Sjamie	mtx_enter(&sched_lock, MTX_SPIN);
177216861Sbz
178195870Sjamie	/* Fill in the next free callout structure. */
179195870Sjamie	new = SLIST_FIRST(&callfree);
180195870Sjamie	if (new == NULL)
181195870Sjamie		/* XXX Attempt to malloc first */
182195870Sjamie		panic("timeout table full");
183195870Sjamie	SLIST_REMOVE_HEAD(&callfree, c_links.sle);
184195870Sjamie
185195870Sjamie	callout_reset(new, to_ticks, ftn, arg);
186195870Sjamie
187195870Sjamie	handle.callout = new;
188192895Sjamie	mtx_exit(&sched_lock, MTX_SPIN);
189195870Sjamie	splx(s);
190192895Sjamie	return (handle);
191192895Sjamie}
192195870Sjamie
193192895Sjamievoid
194192895Sjamieuntimeout(ftn, arg, handle)
195216861Sbz	timeout_t *ftn;
196192895Sjamie	void *arg;
197192895Sjamie	struct callout_handle handle;
198192895Sjamie{
199192895Sjamie	register int s;
200192895Sjamie
201192895Sjamie	/*
202192895Sjamie	 * Check for a handle that was initialized
203192895Sjamie	 * by callout_handle_init, but never used
204192895Sjamie	 * for a real timeout.
205232059Smm	 */
206232059Smm	if (handle.callout == NULL)
207232186Smm		return;
208232278Smm
209192895Sjamie	s = splhigh();
210216861Sbz	mtx_enter(&sched_lock, MTX_SPIN);
211192895Sjamie	if (handle.callout->c_func == ftn && handle.callout->c_arg == arg)
212192895Sjamie		callout_stop(handle.callout);
213192895Sjamie	mtx_exit(&sched_lock, MTX_SPIN);
214192895Sjamie	splx(s);
215192895Sjamie}
216192895Sjamie
217192895Sjamievoid
218192895Sjamiecallout_handle_init(struct callout_handle *handle)
219192895Sjamie{
220232059Smm	handle->callout = NULL;
221232059Smm}
222232186Smm
223232278Smm/*
224192895Sjamie * New interface; clients allocate their own callout structures.
225216861Sbz *
226192895Sjamie * callout_reset() - establish or change a timeout
227196002Sjamie * callout_stop() - disestablish a timeout
228196002Sjamie * callout_init() - initialize a callout structure so that it can
229232059Smm *	safely be passed to callout_reset() and callout_stop()
230192895Sjamie *
231196002Sjamie * <sys/callout.h> defines three convenience macros:
232231267Smm *
233192895Sjamie * callout_active() - returns truth if callout has not been serviced
234193865Sjamie * callout_pending() - returns truth if callout is still waiting for timeout
235192895Sjamie * callout_deactivate() - marks the callout as having been serviced
236192895Sjamie */
237192895Sjamievoid
238185435Sbzcallout_reset(c, to_ticks, ftn, arg)
239185435Sbz	struct	callout *c;
240185435Sbz	int	to_ticks;
241185435Sbz	void	(*ftn) __P((void *));
242185435Sbz	void	*arg;
243185435Sbz{
244185435Sbz	int	s;
245185435Sbz
246185435Sbz	s = splhigh();
247185435Sbz	mtx_enter(&sched_lock, MTX_SPIN);
248185435Sbz	if (c->c_flags & CALLOUT_PENDING)
249185435Sbz		callout_stop(c);
250185435Sbz
251185435Sbz	/*
252185435Sbz	 * We could spl down here and back up at the TAILQ_INSERT_TAIL,
253185435Sbz	 * but there's no point since doing this setup doesn't take much
254185435Sbz	 * time.
255185435Sbz	 */
256185435Sbz	if (to_ticks <= 0)
257185435Sbz		to_ticks = 1;
258185435Sbz
259185435Sbz	c->c_arg = arg;
260185435Sbz	c->c_flags |= (CALLOUT_ACTIVE | CALLOUT_PENDING);
261185435Sbz	c->c_func = ftn;
262185435Sbz	c->c_time = ticks + to_ticks;
263185435Sbz	TAILQ_INSERT_TAIL(&callwheel[c->c_time & callwheelmask],
264185435Sbz			  c, c_links.tqe);
265185435Sbz	mtx_exit(&sched_lock, MTX_SPIN);
266185435Sbz	splx(s);
267185435Sbz}
268185435Sbz
269185435Sbzvoid
270185435Sbzcallout_stop(c)
271185435Sbz	struct	callout *c;
272185435Sbz{
273185435Sbz	int	s;
274185435Sbz
275190466Sjamie	s = splhigh();
276185435Sbz	mtx_enter(&sched_lock, MTX_SPIN);
277185435Sbz	/*
278185435Sbz	 * Don't attempt to delete a callout that's not on the queue.
279185435Sbz	 */
280185435Sbz	if (!(c->c_flags & CALLOUT_PENDING)) {
281185435Sbz		c->c_flags &= ~CALLOUT_ACTIVE;
282185435Sbz		mtx_exit(&sched_lock, MTX_SPIN);
283185435Sbz		splx(s);
284185435Sbz		return;
285191673Sjamie	}
286191673Sjamie	c->c_flags &= ~(CALLOUT_ACTIVE | CALLOUT_PENDING);
287191673Sjamie
288191673Sjamie	if (nextsoftcheck == c) {
289191673Sjamie		nextsoftcheck = TAILQ_NEXT(c, c_links.tqe);
290191673Sjamie	}
291225617Skmacy	TAILQ_REMOVE(&callwheel[c->c_time & callwheelmask], c, c_links.tqe);
292185435Sbz	c->c_func = NULL;
293191673Sjamie
294191673Sjamie	if (c->c_flags & CALLOUT_LOCAL_ALLOC) {
295192895Sjamie		SLIST_INSERT_HEAD(&callfree, c, c_links.sle);
296185435Sbz	}
297191673Sjamie	mtx_exit(&sched_lock, MTX_SPIN);
298191673Sjamie	splx(s);
299191673Sjamie}
300185435Sbz
301191673Sjamievoid
302191673Sjamiecallout_init(c)
303191673Sjamie	struct	callout *c;
304191673Sjamie{
305185435Sbz	bzero(c, sizeof *c);
306192895Sjamie}
307192895Sjamie
308191673Sjamie#ifdef APM_FIXUP_CALLTODO
309191673Sjamie/*
310191673Sjamie * Adjust the kernel calltodo timeout list.  This routine is used after
311192895Sjamie * an APM resume to recalculate the calltodo timer list values with the
312192895Sjamie * number of hz's we have been sleeping.  The next hardclock() will detect
313192895Sjamie * that there are fired timers and run softclock() to execute them.
314192895Sjamie *
315191673Sjamie * Please note, I have not done an exhaustive analysis of what code this
316191673Sjamie * might break.  I am motivated to have my select()'s and alarm()'s that
317191673Sjamie * have expired during suspend firing upon resume so that the applications
318191673Sjamie * which set the timer can do the maintanence the timer was for as close
319185435Sbz * as possible to the originally intended time.  Testing this code for a
320191673Sjamie * week showed that resuming from a suspend resulted in 22 to 25 timers
321191673Sjamie * firing, which seemed independant on whether the suspend was 2 hours or
322185435Sbz * 2 days.  Your milage may vary.   - Ken Key <key@cs.utk.edu>
323191673Sjamie */
324185435Sbzvoid
325191673Sjamieadjust_timeout_calltodo(time_change)
326191673Sjamie    struct timeval *time_change;
327191673Sjamie{
328191673Sjamie	register struct callout *p;
329191673Sjamie	unsigned long delta_ticks;
330192895Sjamie	int s;
331192895Sjamie
332192895Sjamie	/*
333192895Sjamie	 * How many ticks were we asleep?
334192895Sjamie	 * (stolen from tvtohz()).
335192895Sjamie	 */
336192895Sjamie
337192895Sjamie	/* Don't do anything */
338192895Sjamie	if (time_change->tv_sec < 0)
339192895Sjamie		return;
340192895Sjamie	else if (time_change->tv_sec <= LONG_MAX / 1000000)
341192895Sjamie		delta_ticks = (time_change->tv_sec * 1000000 +
342193865Sjamie			       time_change->tv_usec + (tick - 1)) / tick + 1;
343193865Sjamie	else if (time_change->tv_sec <= LONG_MAX / hz)
344193865Sjamie		delta_ticks = time_change->tv_sec * hz +
345193865Sjamie			      (time_change->tv_usec + (tick - 1)) / tick + 1;
346193865Sjamie	else
347193865Sjamie		delta_ticks = LONG_MAX;
348193865Sjamie
349193865Sjamie	if (delta_ticks > INT_MAX)
350193865Sjamie		delta_ticks = INT_MAX;
351192895Sjamie
352192895Sjamie	/*
353185435Sbz	 * Now rip through the timer calltodo list looking for timers
354193865Sjamie	 * to expire.
355192895Sjamie	 */
356192895Sjamie
357192895Sjamie	/* don't collide with softclock() */
358192895Sjamie	s = splhigh();
359192895Sjamie	mtx_enter(&sched_lock, MTX_SPIN);
360192895Sjamie	for (p = calltodo.c_next; p != NULL; p = p->c_next) {
361192895Sjamie		p->c_time -= delta_ticks;
362192895Sjamie
363192895Sjamie		/* Break if the timer had more time on it than delta_ticks */
364192895Sjamie		if (p->c_time > 0)
365192895Sjamie			break;
366192895Sjamie
367192895Sjamie		/* take back the ticks the timer didn't use (p->c_time <= 0) */
368192895Sjamie		delta_ticks = -p->c_time;
369192895Sjamie	}
370192895Sjamie	mtx_exit(&sched_lock, MTX_SPIN);
371192895Sjamie	splx(s);
372192895Sjamie
373192895Sjamie	return;
374192895Sjamie}
375192895Sjamie#endif /* APM_FIXUP_CALLTODO */
376192895Sjamie