kern_timeout.c revision 33824
1186681Sed/*-
2186681Sed * Copyright (c) 1982, 1986, 1991, 1993
3186681Sed *	The Regents of the University of California.  All rights reserved.
4186681Sed * (c) UNIX System Laboratories, Inc.
5186681Sed * All or some portions of this file are derived from material licensed
6186681Sed * to the University of California by American Telephone and Telegraph
7186681Sed * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8186681Sed * the permission of UNIX System Laboratories, Inc.
9186681Sed *
10186681Sed * Redistribution and use in source and binary forms, with or without
11186681Sed * modification, are permitted provided that the following conditions
12186681Sed * are met:
13186681Sed * 1. Redistributions of source code must retain the above copyright
14186681Sed *    notice, this list of conditions and the following disclaimer.
15186681Sed * 2. Redistributions in binary form must reproduce the above copyright
16186681Sed *    notice, this list of conditions and the following disclaimer in the
17186681Sed *    documentation and/or other materials provided with the distribution.
18186681Sed * 3. All advertising materials mentioning features or use of this software
19186681Sed *    must display the following acknowledgement:
20186681Sed *	This product includes software developed by the University of
21186681Sed *	California, Berkeley and its contributors.
22186681Sed * 4. Neither the name of the University nor the names of its contributors
23186681Sed *    may be used to endorse or promote products derived from this software
24186681Sed *    without specific prior written permission.
25186681Sed *
26186681Sed * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27186681Sed * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28186681Sed * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29186681Sed * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30186681Sed * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31186681Sed * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32186681Sed * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33186681Sed * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34186681Sed * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35186681Sed * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36187367Sed * SUCH DAMAGE.
37187367Sed *
38187367Sed *	@(#)kern_clock.c	8.5 (Berkeley) 1/21/94
39187367Sed * $Id: kern_timeout.c,v 1.53 1998/02/15 14:15:21 phk Exp $
40187367Sed */
41186681Sed
42186681Sed#include <sys/param.h>
43187367Sed#include <sys/systm.h>
44187367Sed#include <sys/callout.h>
45187367Sed#include <sys/kernel.h>
46186681Sed
47186681Sed/*
48186681Sed * TODO:
49186681Sed *	allocate more timeout table slots when table overflows.
50186681Sed */
51186681Sed
52186681Sed/* Exported to machdep.c and/or kern_clock.c.  */
53186681Sedstruct callout *callout;
54186681Sedstruct callout_list callfree;
55186681Sedint callwheelsize, callwheelbits, callwheelmask;
56186681Sedstruct callout_tailq *callwheel;
57186681Sedint softticks;			/* Like ticks, but for softclock(). */
58186681Sed
59186681Sedstatic struct callout *nextsoftcheck;	/* Next callout to be checked. */
60186681Sed
61186681Sed/*
62186681Sed * The callout mechanism is based on the work of Adam M. Costello and
63186681Sed * George Varghese, published in a technical report entitled "Redesigning
64186681Sed * the BSD Callout and Timer Facilities" and modified slightly for inclusion
65186681Sed * in FreeBSD by Justin T. Gibbs.  The original work on the data structures
66186681Sed * used in this implementation was published by G.Varghese and A. Lauck in
67186681Sed * the paper "Hashed and Hierarchical Timing Wheels: Data Structures for
68186681Sed * the Efficient Implementation of a Timer Facility" in the Proceedings of
69186681Sed * the 11th ACM Annual Symposium on Operating Systems Principles,
70186681Sed * Austin, Texas Nov 1987.
71186681Sed */
72186681Sed
73186681Sed/*
74186681Sed * Software (low priority) clock interrupt.
75186681Sed * Run periodic events from timeout queue.
76186681Sed */
77186681Sedvoid
78186681Sedsoftclock()
79186681Sed{
80186681Sed	register struct callout *c;
81186681Sed	register struct callout_tailq *bucket;
82186681Sed	register int s;
83186681Sed	register int curticks;
84186681Sed	register int steps;	/* #steps since we last allowed interrupts */
85186681Sed
86186681Sed#ifndef MAX_SOFTCLOCK_STEPS
87186681Sed#define MAX_SOFTCLOCK_STEPS 100 /* Maximum allowed value of steps. */
88186681Sed#endif /* MAX_SOFTCLOCK_STEPS */
89186681Sed
90186681Sed	steps = 0;
91186681Sed	s = splhigh();
92186681Sed	while (softticks != ticks) {
93186681Sed		softticks++;
94186681Sed		/*
95186681Sed		 * softticks may be modified by hard clock, so cache
96186681Sed		 * it while we work on a given bucket.
97186681Sed		 */
98186681Sed		curticks = softticks;
99186681Sed		bucket = &callwheel[curticks & callwheelmask];
100186681Sed		c = TAILQ_FIRST(bucket);
101193184Sed		while (c) {
102186681Sed			if (c->c_time != curticks) {
103186681Sed				c = TAILQ_NEXT(c, c_links.tqe);
104186681Sed				++steps;
105186681Sed				if (steps >= MAX_SOFTCLOCK_STEPS) {
106186681Sed					nextsoftcheck = c;
107186681Sed					/* Give interrupts a chance. */
108193184Sed					splx(s);
109193184Sed					s = splhigh();
110193184Sed					c = nextsoftcheck;
111186681Sed					steps = 0;
112186681Sed				}
113186681Sed			} else {
114186681Sed				void (*c_func)(void *);
115186681Sed				void *c_arg;
116186681Sed
117186681Sed				nextsoftcheck = TAILQ_NEXT(c, c_links.tqe);
118186681Sed				TAILQ_REMOVE(bucket, c, c_links.tqe);
119186681Sed				c_func = c->c_func;
120186681Sed				c_arg = c->c_arg;
121186681Sed				c->c_func = NULL;
122186681Sed				SLIST_INSERT_HEAD(&callfree, c, c_links.sle);
123187469Sed				splx(s);
124187469Sed				c_func(c_arg);
125187469Sed				s = splhigh();
126187469Sed				steps = 0;
127186681Sed				c = nextsoftcheck;
128186681Sed			}
129186681Sed		}
130186681Sed	}
131186681Sed	nextsoftcheck = NULL;
132186681Sed	splx(s);
133186681Sed}
134186681Sed
135186681Sed/*
136186681Sed * timeout --
137186681Sed *	Execute a function after a specified length of time.
138186681Sed *
139186681Sed * untimeout --
140186681Sed *	Cancel previous timeout function call.
141186681Sed *
142186681Sed * callout_handle_init --
143186681Sed *	Initialize a handle so that using it with untimeout is benign.
144186681Sed *
145186681Sed *	See AT&T BCI Driver Reference Manual for specification.  This
146186681Sed *	implementation differs from that one in that although an
147186681Sed *	identification value is returned from timeout, the original
148186681Sed *	arguments to timeout as well as the identifier are used to
149186681Sed *	identify entries for untimeout.
150186681Sed */
151186681Sedstruct callout_handle
152186681Sedtimeout(ftn, arg, to_ticks)
153186681Sed	timeout_t *ftn;
154186681Sed	void *arg;
155186681Sed	register int to_ticks;
156187469Sed{
157186681Sed	int s;
158186681Sed	struct callout *new;
159187469Sed	struct callout_handle handle;
160187469Sed
161186681Sed	if (to_ticks <= 0)
162187469Sed		to_ticks = 1;
163187469Sed
164187469Sed	/* Lock out the clock. */
165187469Sed	s = splhigh();
166187469Sed
167187469Sed	/* Fill in the next free callout structure. */
168186681Sed	new = SLIST_FIRST(&callfree);
169186681Sed	if (new == NULL)
170186681Sed		/* XXX Attempt to malloc first */
171186681Sed		panic("timeout table full");
172186681Sed
173186681Sed	SLIST_REMOVE_HEAD(&callfree, c_links.sle);
174186681Sed	new->c_arg = arg;
175186681Sed	new->c_func = ftn;
176188391Sed	new->c_time = ticks + to_ticks;
177188391Sed	TAILQ_INSERT_TAIL(&callwheel[new->c_time & callwheelmask],
178188391Sed			  new, c_links.tqe);
179186681Sed
180189617Sed	splx(s);
181186681Sed	handle.callout = new;
182186681Sed	return (handle);
183186681Sed}
184186681Sed
185void
186untimeout(ftn, arg, handle)
187	timeout_t *ftn;
188	void *arg;
189	struct callout_handle handle;
190{
191	register int s;
192
193	/*
194	 * Check for a handle that was initialized
195	 * by callout_handle_init, but never used
196	 * for a real timeout.
197	 */
198	if (handle.callout == NULL)
199		return;
200
201	s = splhigh();
202	if ((handle.callout->c_func == ftn)
203	 && (handle.callout->c_arg == arg)) {
204		if (nextsoftcheck == handle.callout) {
205			nextsoftcheck = TAILQ_NEXT(handle.callout, c_links.tqe);
206		}
207		TAILQ_REMOVE(&callwheel[handle.callout->c_time & callwheelmask],
208			     handle.callout, c_links.tqe);
209		handle.callout->c_func = NULL;
210		SLIST_INSERT_HEAD(&callfree, handle.callout, c_links.sle);
211	}
212	splx(s);
213}
214
215void
216callout_handle_init(struct callout_handle *handle)
217{
218	handle->callout = NULL;
219}
220
221#ifdef APM_FIXUP_CALLTODO
222/*
223 * Adjust the kernel calltodo timeout list.  This routine is used after
224 * an APM resume to recalculate the calltodo timer list values with the
225 * number of hz's we have been sleeping.  The next hardclock() will detect
226 * that there are fired timers and run softclock() to execute them.
227 *
228 * Please note, I have not done an exhaustive analysis of what code this
229 * might break.  I am motivated to have my select()'s and alarm()'s that
230 * have expired during suspend firing upon resume so that the applications
231 * which set the timer can do the maintanence the timer was for as close
232 * as possible to the originally intended time.  Testing this code for a
233 * week showed that resuming from a suspend resulted in 22 to 25 timers
234 * firing, which seemed independant on whether the suspend was 2 hours or
235 * 2 days.  Your milage may vary.   - Ken Key <key@cs.utk.edu>
236 */
237void
238adjust_timeout_calltodo(time_change)
239    struct timeval *time_change;
240{
241	register struct callout *p;
242	unsigned long delta_ticks;
243	int s;
244
245	/*
246	 * How many ticks were we asleep?
247	 * (stolen from hzto()).
248	 */
249
250	/* Don't do anything */
251	if (time_change->tv_sec < 0)
252		return;
253	else if (time_change->tv_sec <= LONG_MAX / 1000000)
254		delta_ticks = (time_change->tv_sec * 1000000 +
255			       time_change->tv_usec + (tick - 1)) / tick + 1;
256	else if (time_change->tv_sec <= LONG_MAX / hz)
257		delta_ticks = time_change->tv_sec * hz +
258			      (time_change->tv_usec + (tick - 1)) / tick + 1;
259	else
260		delta_ticks = LONG_MAX;
261
262	if (delta_ticks > INT_MAX)
263		delta_ticks = INT_MAX;
264
265	/*
266	 * Now rip through the timer calltodo list looking for timers
267	 * to expire.
268	 */
269
270	/* don't collide with softclock() */
271	s = splhigh();
272	for (p = calltodo.c_next; p != NULL; p = p->c_next) {
273		p->c_time -= delta_ticks;
274
275		/* Break if the timer had more time on it than delta_ticks */
276		if (p->c_time > 0)
277			break;
278
279		/* take back the ticks the timer didn't use (p->c_time <= 0) */
280		delta_ticks = -p->c_time;
281	}
282	splx(s);
283
284	return;
285}
286#endif /* APM_FIXUP_CALLTODO */
287