kern_timeout.c revision 177859
1/*-
2 * Copyright (c) 1982, 1986, 1991, 1993
3 *	The Regents of the University of California.  All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 * 4. Neither the name of the University nor the names of its contributors
19 *    may be used to endorse or promote products derived from this software
20 *    without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 *	From: @(#)kern_clock.c	8.5 (Berkeley) 1/21/94
35 */
36
37#include <sys/cdefs.h>
38__FBSDID("$FreeBSD: head/sys/kern/kern_timeout.c 177859 2008-04-02 11:20:30Z jeff $");
39
40#include <sys/param.h>
41#include <sys/systm.h>
42#include <sys/bus.h>
43#include <sys/callout.h>
44#include <sys/condvar.h>
45#include <sys/interrupt.h>
46#include <sys/kernel.h>
47#include <sys/ktr.h>
48#include <sys/lock.h>
49#include <sys/malloc.h>
50#include <sys/mutex.h>
51#include <sys/proc.h>
52#include <sys/sleepqueue.h>
53#include <sys/sysctl.h>
54#include <sys/smp.h>
55
56static int avg_depth;
57SYSCTL_INT(_debug, OID_AUTO, to_avg_depth, CTLFLAG_RD, &avg_depth, 0,
58    "Average number of items examined per softclock call. Units = 1/1000");
59static int avg_gcalls;
60SYSCTL_INT(_debug, OID_AUTO, to_avg_gcalls, CTLFLAG_RD, &avg_gcalls, 0,
61    "Average number of Giant callouts made per softclock call. Units = 1/1000");
62static int avg_lockcalls;
63SYSCTL_INT(_debug, OID_AUTO, to_avg_lockcalls, CTLFLAG_RD, &avg_lockcalls, 0,
64    "Average number of lock callouts made per softclock call. Units = 1/1000");
65static int avg_mpcalls;
66SYSCTL_INT(_debug, OID_AUTO, to_avg_mpcalls, CTLFLAG_RD, &avg_mpcalls, 0,
67    "Average number of MP callouts made per softclock call. Units = 1/1000");
68/*
69 * TODO:
70 *	allocate more timeout table slots when table overflows.
71 */
72int callwheelsize, callwheelbits, callwheelmask;
73
74struct callout_cpu {
75	struct mtx		cc_lock;
76	struct callout		*cc_callout;
77	struct callout_tailq	*cc_callwheel;
78	struct callout_list	cc_callfree;
79	struct callout		*cc_next;
80	struct callout		*cc_curr;
81	void			*cc_cookie;
82	int 			cc_softticks;
83	int			cc_cancel;
84	int			cc_waiting;
85};
86
87#ifdef SMP
88struct callout_cpu cc_cpu[MAXCPU];
89#define	CC_CPU(cpu)	(&cc_cpu[(cpu)])
90#define	CC_SELF()	CC_CPU(PCPU_GET(cpuid))
91#else
92struct callout_cpu cc_cpu;
93#define	CC_CPU(cpu)	&cc_cpu
94#define	CC_SELF()	&cc_cpu
95#endif
96#define	CC_LOCK(cc)	mtx_lock_spin(&(cc)->cc_lock)
97#define	CC_UNLOCK(cc)	mtx_unlock_spin(&(cc)->cc_lock)
98
99static int timeout_cpu;
100
101MALLOC_DEFINE(M_CALLOUT, "callout", "Callout datastructures");
102
103/**
104 * Locked by cc_lock:
105 *   cc_curr         - If a callout is in progress, it is curr_callout.
106 *                     If curr_callout is non-NULL, threads waiting in
107 *                     callout_drain() will be woken up as soon as the
108 *                     relevant callout completes.
109 *   cc_cancel       - Changing to 1 with both callout_lock and c_lock held
110 *                     guarantees that the current callout will not run.
111 *                     The softclock() function sets this to 0 before it
112 *                     drops callout_lock to acquire c_lock, and it calls
113 *                     the handler only if curr_cancelled is still 0 after
114 *                     c_lock is successfully acquired.
115 *   cc_waiting      - If a thread is waiting in callout_drain(), then
116 *                     callout_wait is nonzero.  Set only when
117 *                     curr_callout is non-NULL.
118 */
119
120/*
121 * kern_timeout_callwheel_alloc() - kernel low level callwheel initialization
122 *
123 *	This code is called very early in the kernel initialization sequence,
124 *	and may be called more then once.
125 */
126caddr_t
127kern_timeout_callwheel_alloc(caddr_t v)
128{
129	struct callout_cpu *cc;
130
131	timeout_cpu = PCPU_GET(cpuid);
132	cc = CC_CPU(timeout_cpu);
133	/*
134	 * Calculate callout wheel size
135	 */
136	for (callwheelsize = 1, callwheelbits = 0;
137	     callwheelsize < ncallout;
138	     callwheelsize <<= 1, ++callwheelbits)
139		;
140	callwheelmask = callwheelsize - 1;
141
142	cc->cc_callout = (struct callout *)v;
143	v = (caddr_t)(cc->cc_callout + ncallout);
144	cc->cc_callwheel = (struct callout_tailq *)v;
145	v = (caddr_t)(cc->cc_callwheel + callwheelsize);
146	return(v);
147}
148
149static void
150callout_cpu_init(struct callout_cpu *cc)
151{
152	struct callout *c;
153	int i;
154
155	mtx_init(&cc->cc_lock, "callout", NULL, MTX_SPIN | MTX_RECURSE);
156	SLIST_INIT(&cc->cc_callfree);
157	for (i = 0; i < callwheelsize; i++) {
158		TAILQ_INIT(&cc->cc_callwheel[i]);
159	}
160	if (cc->cc_callout == NULL)
161		return;
162	for (i = 0; i < ncallout; i++) {
163		c = &cc->cc_callout[i];
164		callout_init(c, 0);
165		c->c_flags = CALLOUT_LOCAL_ALLOC;
166		SLIST_INSERT_HEAD(&cc->cc_callfree, c, c_links.sle);
167	}
168}
169
170/*
171 * kern_timeout_callwheel_init() - initialize previously reserved callwheel
172 *				   space.
173 *
174 *	This code is called just once, after the space reserved for the
175 *	callout wheel has been finalized.
176 */
177void
178kern_timeout_callwheel_init(void)
179{
180	callout_cpu_init(CC_CPU(timeout_cpu));
181}
182
183/*
184 * Start standard softclock thread.
185 */
186void    *softclock_ih;
187
188static void
189start_softclock(void *dummy)
190{
191	struct callout_cpu *cc;
192#ifdef SMP
193	int cpu;
194#endif
195
196	cc = CC_CPU(timeout_cpu);
197	if (swi_add(&clk_intr_event, "clock", softclock, cc, SWI_CLOCK,
198	    INTR_MPSAFE, &softclock_ih))
199		panic("died while creating standard software ithreads");
200	cc->cc_cookie = softclock_ih;
201#ifdef SMP
202	for (cpu = 0; cpu <= mp_maxid; cpu++) {
203		if (cpu == timeout_cpu)
204			continue;
205		if (CPU_ABSENT(cpu))
206			continue;
207		cc = CC_CPU(cpu);
208		if (swi_add(NULL, "clock", softclock, cc, SWI_CLOCK,
209		    INTR_MPSAFE, &cc->cc_cookie))
210			panic("died while creating standard software ithreads");
211		cc->cc_callout = NULL;	/* Only cpu0 handles timeout(). */
212		cc->cc_callwheel = malloc(
213		    sizeof(struct callout_tailq) * callwheelsize, M_CALLOUT,
214		    M_WAITOK);
215		callout_cpu_init(cc);
216	}
217#endif
218}
219
220SYSINIT(start_softclock, SI_SUB_SOFTINTR, SI_ORDER_FIRST, start_softclock, NULL);
221
222void
223callout_tick(void)
224{
225	int need_softclock = 0;
226	struct callout_cpu *cc;
227
228	/*
229	 * Process callouts at a very low cpu priority, so we don't keep the
230	 * relatively high clock interrupt priority any longer than necessary.
231	 */
232	cc = CC_SELF();
233	mtx_lock_spin_flags(&cc->cc_lock, MTX_QUIET);
234	if (!TAILQ_EMPTY(&cc->cc_callwheel[ticks & callwheelmask])) {
235		need_softclock = 1;
236	} else if (cc->cc_softticks + 1 == ticks)
237		++cc->cc_softticks;
238	mtx_unlock_spin_flags(&cc->cc_lock, MTX_QUIET);
239	/*
240	 * swi_sched acquires the thread lock, so we don't want to call it
241	 * with cc_lock held; incorrect locking order.
242	 */
243	if (need_softclock)
244		swi_sched(cc->cc_cookie, 0);
245}
246
247static struct callout_cpu *
248callout_lock(struct callout *c)
249{
250	struct callout_cpu *cc;
251	int cpu;
252
253	for (;;) {
254		cpu = c->c_cpu;
255		cc = CC_CPU(cpu);
256		CC_LOCK(cc);
257		if (cpu == c->c_cpu)
258			break;
259		CC_UNLOCK(cc);
260	}
261	return (cc);
262}
263
264/*
265 * The callout mechanism is based on the work of Adam M. Costello and
266 * George Varghese, published in a technical report entitled "Redesigning
267 * the BSD Callout and Timer Facilities" and modified slightly for inclusion
268 * in FreeBSD by Justin T. Gibbs.  The original work on the data structures
269 * used in this implementation was published by G. Varghese and T. Lauck in
270 * the paper "Hashed and Hierarchical Timing Wheels: Data Structures for
271 * the Efficient Implementation of a Timer Facility" in the Proceedings of
272 * the 11th ACM Annual Symposium on Operating Systems Principles,
273 * Austin, Texas Nov 1987.
274 */
275
276/*
277 * Software (low priority) clock interrupt.
278 * Run periodic events from timeout queue.
279 */
280void
281softclock(void *arg)
282{
283	struct callout_cpu *cc;
284	struct callout *c;
285	struct callout_tailq *bucket;
286	int curticks;
287	int steps;	/* #steps since we last allowed interrupts */
288	int depth;
289	int mpcalls;
290	int lockcalls;
291	int gcalls;
292#ifdef DIAGNOSTIC
293	struct bintime bt1, bt2;
294	struct timespec ts2;
295	static uint64_t maxdt = 36893488147419102LL;	/* 2 msec */
296	static timeout_t *lastfunc;
297#endif
298
299#ifndef MAX_SOFTCLOCK_STEPS
300#define MAX_SOFTCLOCK_STEPS 100 /* Maximum allowed value of steps. */
301#endif /* MAX_SOFTCLOCK_STEPS */
302
303	mpcalls = 0;
304	lockcalls = 0;
305	gcalls = 0;
306	depth = 0;
307	steps = 0;
308	cc = (struct callout_cpu *)arg;
309	CC_LOCK(cc);
310	while (cc->cc_softticks != ticks) {
311		cc->cc_softticks++;
312		/*
313		 * cc_softticks may be modified by hard clock, so cache
314		 * it while we work on a given bucket.
315		 */
316		curticks = cc->cc_softticks;
317		bucket = &cc->cc_callwheel[curticks & callwheelmask];
318		c = TAILQ_FIRST(bucket);
319		while (c) {
320			depth++;
321			if (c->c_time != curticks) {
322				c = TAILQ_NEXT(c, c_links.tqe);
323				++steps;
324				if (steps >= MAX_SOFTCLOCK_STEPS) {
325					cc->cc_next = c;
326					/* Give interrupts a chance. */
327					CC_UNLOCK(cc);
328					;	/* nothing */
329					CC_LOCK(cc);
330					c = cc->cc_next;
331					steps = 0;
332				}
333			} else {
334				void (*c_func)(void *);
335				void *c_arg;
336				struct lock_class *class;
337				struct lock_object *c_lock;
338				int c_flags, sharedlock;
339
340				cc->cc_next = TAILQ_NEXT(c, c_links.tqe);
341				TAILQ_REMOVE(bucket, c, c_links.tqe);
342				class = (c->c_lock != NULL) ?
343				    LOCK_CLASS(c->c_lock) : NULL;
344				sharedlock = (c->c_flags & CALLOUT_SHAREDLOCK) ?
345				    0 : 1;
346				c_lock = c->c_lock;
347				c_func = c->c_func;
348				c_arg = c->c_arg;
349				c_flags = c->c_flags;
350				if (c->c_flags & CALLOUT_LOCAL_ALLOC) {
351					c->c_flags = CALLOUT_LOCAL_ALLOC;
352				} else {
353					c->c_flags =
354					    (c->c_flags & ~CALLOUT_PENDING);
355				}
356				cc->cc_curr = c;
357				cc->cc_cancel = 0;
358				CC_UNLOCK(cc);
359				if (c_lock != NULL) {
360					class->lc_lock(c_lock, sharedlock);
361					/*
362					 * The callout may have been cancelled
363					 * while we switched locks.
364					 */
365					if (cc->cc_cancel) {
366						class->lc_unlock(c_lock);
367						goto skip;
368					}
369					/* The callout cannot be stopped now. */
370					cc->cc_cancel = 1;
371
372					if (c_lock == &Giant.lock_object) {
373						gcalls++;
374						CTR3(KTR_CALLOUT,
375						    "callout %p func %p arg %p",
376						    c, c_func, c_arg);
377					} else {
378						lockcalls++;
379						CTR3(KTR_CALLOUT, "callout lock"
380						    " %p func %p arg %p",
381						    c, c_func, c_arg);
382					}
383				} else {
384					mpcalls++;
385					CTR3(KTR_CALLOUT,
386					    "callout mpsafe %p func %p arg %p",
387					    c, c_func, c_arg);
388				}
389#ifdef DIAGNOSTIC
390				binuptime(&bt1);
391#endif
392				THREAD_NO_SLEEPING();
393				c_func(c_arg);
394				THREAD_SLEEPING_OK();
395#ifdef DIAGNOSTIC
396				binuptime(&bt2);
397				bintime_sub(&bt2, &bt1);
398				if (bt2.frac > maxdt) {
399					if (lastfunc != c_func ||
400					    bt2.frac > maxdt * 2) {
401						bintime2timespec(&bt2, &ts2);
402						printf(
403			"Expensive timeout(9) function: %p(%p) %jd.%09ld s\n",
404						    c_func, c_arg,
405						    (intmax_t)ts2.tv_sec,
406						    ts2.tv_nsec);
407					}
408					maxdt = bt2.frac;
409					lastfunc = c_func;
410				}
411#endif
412				if ((c_flags & CALLOUT_RETURNUNLOCKED) == 0)
413					class->lc_unlock(c_lock);
414			skip:
415				CC_LOCK(cc);
416				/*
417				 * If the current callout is locally
418				 * allocated (from timeout(9))
419				 * then put it on the freelist.
420				 *
421				 * Note: we need to check the cached
422				 * copy of c_flags because if it was not
423				 * local, then it's not safe to deref the
424				 * callout pointer.
425				 */
426				if (c_flags & CALLOUT_LOCAL_ALLOC) {
427					KASSERT(c->c_flags ==
428					    CALLOUT_LOCAL_ALLOC,
429					    ("corrupted callout"));
430					c->c_func = NULL;
431					SLIST_INSERT_HEAD(&cc->cc_callfree, c,
432					    c_links.sle);
433				}
434				cc->cc_curr = NULL;
435				if (cc->cc_waiting) {
436					/*
437					 * There is someone waiting
438					 * for the callout to complete.
439					 */
440					cc->cc_waiting = 0;
441					CC_UNLOCK(cc);
442					wakeup(&cc->cc_waiting);
443					CC_LOCK(cc);
444				}
445				steps = 0;
446				c = cc->cc_next;
447			}
448		}
449	}
450	avg_depth += (depth * 1000 - avg_depth) >> 8;
451	avg_mpcalls += (mpcalls * 1000 - avg_mpcalls) >> 8;
452	avg_lockcalls += (lockcalls * 1000 - avg_lockcalls) >> 8;
453	avg_gcalls += (gcalls * 1000 - avg_gcalls) >> 8;
454	cc->cc_next = NULL;
455	CC_UNLOCK(cc);
456}
457
458/*
459 * timeout --
460 *	Execute a function after a specified length of time.
461 *
462 * untimeout --
463 *	Cancel previous timeout function call.
464 *
465 * callout_handle_init --
466 *	Initialize a handle so that using it with untimeout is benign.
467 *
468 *	See AT&T BCI Driver Reference Manual for specification.  This
469 *	implementation differs from that one in that although an
470 *	identification value is returned from timeout, the original
471 *	arguments to timeout as well as the identifier are used to
472 *	identify entries for untimeout.
473 */
474struct callout_handle
475timeout(ftn, arg, to_ticks)
476	timeout_t *ftn;
477	void *arg;
478	int to_ticks;
479{
480	struct callout_cpu *cc;
481	struct callout *new;
482	struct callout_handle handle;
483
484	cc = CC_CPU(timeout_cpu);
485	CC_LOCK(cc);
486	/* Fill in the next free callout structure. */
487	new = SLIST_FIRST(&cc->cc_callfree);
488	if (new == NULL)
489		/* XXX Attempt to malloc first */
490		panic("timeout table full");
491	SLIST_REMOVE_HEAD(&cc->cc_callfree, c_links.sle);
492	callout_reset(new, to_ticks, ftn, arg);
493	handle.callout = new;
494	CC_UNLOCK(cc);
495
496	return (handle);
497}
498
499void
500untimeout(ftn, arg, handle)
501	timeout_t *ftn;
502	void *arg;
503	struct callout_handle handle;
504{
505	struct callout_cpu *cc;
506
507	/*
508	 * Check for a handle that was initialized
509	 * by callout_handle_init, but never used
510	 * for a real timeout.
511	 */
512	if (handle.callout == NULL)
513		return;
514
515	cc = callout_lock(handle.callout);
516	if (handle.callout->c_func == ftn && handle.callout->c_arg == arg)
517		callout_stop(handle.callout);
518	CC_UNLOCK(cc);
519}
520
521void
522callout_handle_init(struct callout_handle *handle)
523{
524	handle->callout = NULL;
525}
526
527/*
528 * New interface; clients allocate their own callout structures.
529 *
530 * callout_reset() - establish or change a timeout
531 * callout_stop() - disestablish a timeout
532 * callout_init() - initialize a callout structure so that it can
533 *	safely be passed to callout_reset() and callout_stop()
534 *
535 * <sys/callout.h> defines three convenience macros:
536 *
537 * callout_active() - returns truth if callout has not been stopped,
538 *	drained, or deactivated since the last time the callout was
539 *	reset.
540 * callout_pending() - returns truth if callout is still waiting for timeout
541 * callout_deactivate() - marks the callout as having been serviced
542 */
543int
544callout_reset_on(struct callout *c, int to_ticks, void (*ftn)(void *),
545    void *arg, int cpu)
546{
547	struct callout_cpu *cc;
548	int cancelled = 0;
549
550	/*
551	 * Don't allow migration of pre-allocated callouts lest they
552	 * become unbalanced.
553	 */
554	if (c->c_flags & CALLOUT_LOCAL_ALLOC)
555		cpu = c->c_cpu;
556retry:
557	cc = callout_lock(c);
558	if (cc->cc_curr == c) {
559		/*
560		 * We're being asked to reschedule a callout which is
561		 * currently in progress.  If there is a lock then we
562		 * can cancel the callout if it has not really started.
563		 */
564		if (c->c_lock != NULL && !cc->cc_cancel)
565			cancelled = cc->cc_cancel = 1;
566		if (cc->cc_waiting) {
567			/*
568			 * Someone has called callout_drain to kill this
569			 * callout.  Don't reschedule.
570			 */
571			CTR4(KTR_CALLOUT, "%s %p func %p arg %p",
572			    cancelled ? "cancelled" : "failed to cancel",
573			    c, c->c_func, c->c_arg);
574			CC_UNLOCK(cc);
575			return (cancelled);
576		}
577	}
578	if (c->c_flags & CALLOUT_PENDING) {
579		if (cc->cc_next == c) {
580			cc->cc_next = TAILQ_NEXT(c, c_links.tqe);
581		}
582		TAILQ_REMOVE(&cc->cc_callwheel[c->c_time & callwheelmask], c,
583		    c_links.tqe);
584
585		cancelled = 1;
586		c->c_flags &= ~(CALLOUT_ACTIVE | CALLOUT_PENDING);
587	}
588	/*
589	 * If the lock must migrate we have to check the state again as
590	 * we can't hold both the new and old locks simultaneously.
591	 */
592	if (c->c_cpu != cpu) {
593		c->c_cpu = cpu;
594		CC_UNLOCK(cc);
595		goto retry;
596	}
597
598	if (to_ticks <= 0)
599		to_ticks = 1;
600
601	c->c_arg = arg;
602	c->c_flags |= (CALLOUT_ACTIVE | CALLOUT_PENDING);
603	c->c_func = ftn;
604	c->c_time = ticks + to_ticks;
605	TAILQ_INSERT_TAIL(&cc->cc_callwheel[c->c_time & callwheelmask],
606			  c, c_links.tqe);
607	CTR5(KTR_CALLOUT, "%sscheduled %p func %p arg %p in %d",
608	    cancelled ? "re" : "", c, c->c_func, c->c_arg, to_ticks);
609	CC_UNLOCK(cc);
610
611	return (cancelled);
612}
613
614int
615_callout_stop_safe(c, safe)
616	struct	callout *c;
617	int	safe;
618{
619	struct callout_cpu *cc;
620	struct lock_class *class;
621	int use_lock, sq_locked;
622
623	/*
624	 * Some old subsystems don't hold Giant while running a callout_stop(),
625	 * so just discard this check for the moment.
626	 */
627	if (!safe && c->c_lock != NULL) {
628		if (c->c_lock == &Giant.lock_object)
629			use_lock = mtx_owned(&Giant);
630		else {
631			use_lock = 1;
632			class = LOCK_CLASS(c->c_lock);
633			class->lc_assert(c->c_lock, LA_XLOCKED);
634		}
635	} else
636		use_lock = 0;
637
638	sq_locked = 0;
639again:
640	cc = callout_lock(c);
641	/*
642	 * If the callout isn't pending, it's not on the queue, so
643	 * don't attempt to remove it from the queue.  We can try to
644	 * stop it by other means however.
645	 */
646	if (!(c->c_flags & CALLOUT_PENDING)) {
647		c->c_flags &= ~CALLOUT_ACTIVE;
648
649		/*
650		 * If it wasn't on the queue and it isn't the current
651		 * callout, then we can't stop it, so just bail.
652		 */
653		if (cc->cc_curr != c) {
654			CTR3(KTR_CALLOUT, "failed to stop %p func %p arg %p",
655			    c, c->c_func, c->c_arg);
656			CC_UNLOCK(cc);
657			if (sq_locked)
658				sleepq_release(&cc->cc_curr);
659			return (0);
660		}
661
662		if (safe) {
663			/*
664			 * The current callout is running (or just
665			 * about to run) and blocking is allowed, so
666			 * just wait for the current invocation to
667			 * finish.
668			 */
669			while (cc->cc_curr == c) {
670
671				/*
672				 * Use direct calls to sleepqueue interface
673				 * instead of cv/msleep in order to avoid
674				 * a LOR between cc_lock and sleepqueue
675				 * chain spinlocks.  This piece of code
676				 * emulates a msleep_spin() call actually.
677				 *
678				 * If we already have the sleepqueue chain
679				 * locked, then we can safely block.  If we
680				 * don't already have it locked, however,
681				 * we have to drop the cc_lock to lock
682				 * it.  This opens several races, so we
683				 * restart at the beginning once we have
684				 * both locks.  If nothing has changed, then
685				 * we will end up back here with sq_locked
686				 * set.
687				 */
688				if (!sq_locked) {
689					CC_UNLOCK(cc);
690					sleepq_lock(&cc->cc_curr);
691					sq_locked = 1;
692					goto again;
693				}
694				cc->cc_waiting = 1;
695				DROP_GIANT();
696				CC_UNLOCK(cc);
697				sleepq_add(&cc->cc_curr,
698				    &cc->cc_lock.lock_object, "codrain",
699				    SLEEPQ_SLEEP, 0);
700				sleepq_wait(&cc->cc_curr, 0);
701				sq_locked = 0;
702
703				/* Reacquire locks previously released. */
704				PICKUP_GIANT();
705				CC_LOCK(cc);
706			}
707		} else if (use_lock && !cc->cc_cancel) {
708			/*
709			 * The current callout is waiting for its
710			 * lock which we hold.  Cancel the callout
711			 * and return.  After our caller drops the
712			 * lock, the callout will be skipped in
713			 * softclock().
714			 */
715			cc->cc_cancel = 1;
716			CTR3(KTR_CALLOUT, "cancelled %p func %p arg %p",
717			    c, c->c_func, c->c_arg);
718			CC_UNLOCK(cc);
719			KASSERT(!sq_locked, ("sleepqueue chain locked"));
720			return (1);
721		}
722		CTR3(KTR_CALLOUT, "failed to stop %p func %p arg %p",
723		    c, c->c_func, c->c_arg);
724		CC_UNLOCK(cc);
725		KASSERT(!sq_locked, ("sleepqueue chain still locked"));
726		return (0);
727	}
728	if (sq_locked)
729		sleepq_release(&cc->cc_curr);
730
731	c->c_flags &= ~(CALLOUT_ACTIVE | CALLOUT_PENDING);
732
733	if (cc->cc_next == c) {
734		cc->cc_next = TAILQ_NEXT(c, c_links.tqe);
735	}
736	TAILQ_REMOVE(&cc->cc_callwheel[c->c_time & callwheelmask], c,
737	    c_links.tqe);
738
739	CTR3(KTR_CALLOUT, "cancelled %p func %p arg %p",
740	    c, c->c_func, c->c_arg);
741
742	if (c->c_flags & CALLOUT_LOCAL_ALLOC) {
743		c->c_func = NULL;
744		SLIST_INSERT_HEAD(&cc->cc_callfree, c, c_links.sle);
745	}
746	CC_UNLOCK(cc);
747	return (1);
748}
749
750void
751callout_init(c, mpsafe)
752	struct	callout *c;
753	int mpsafe;
754{
755	bzero(c, sizeof *c);
756	if (mpsafe) {
757		c->c_lock = NULL;
758		c->c_flags = CALLOUT_RETURNUNLOCKED;
759	} else {
760		c->c_lock = &Giant.lock_object;
761		c->c_flags = 0;
762	}
763	c->c_cpu = timeout_cpu;
764}
765
766void
767_callout_init_lock(c, lock, flags)
768	struct	callout *c;
769	struct	lock_object *lock;
770	int flags;
771{
772	bzero(c, sizeof *c);
773	c->c_lock = lock;
774	KASSERT((flags & ~(CALLOUT_RETURNUNLOCKED | CALLOUT_SHAREDLOCK)) == 0,
775	    ("callout_init_lock: bad flags %d", flags));
776	KASSERT(lock != NULL || (flags & CALLOUT_RETURNUNLOCKED) == 0,
777	    ("callout_init_lock: CALLOUT_RETURNUNLOCKED with no lock"));
778	KASSERT(lock == NULL || !(LOCK_CLASS(lock)->lc_flags &
779	    (LC_SPINLOCK | LC_SLEEPABLE)), ("%s: invalid lock class",
780	    __func__));
781	c->c_flags = flags & (CALLOUT_RETURNUNLOCKED | CALLOUT_SHAREDLOCK);
782	c->c_cpu = timeout_cpu;
783}
784
785#ifdef APM_FIXUP_CALLTODO
786/*
787 * Adjust the kernel calltodo timeout list.  This routine is used after
788 * an APM resume to recalculate the calltodo timer list values with the
789 * number of hz's we have been sleeping.  The next hardclock() will detect
790 * that there are fired timers and run softclock() to execute them.
791 *
792 * Please note, I have not done an exhaustive analysis of what code this
793 * might break.  I am motivated to have my select()'s and alarm()'s that
794 * have expired during suspend firing upon resume so that the applications
795 * which set the timer can do the maintanence the timer was for as close
796 * as possible to the originally intended time.  Testing this code for a
797 * week showed that resuming from a suspend resulted in 22 to 25 timers
798 * firing, which seemed independant on whether the suspend was 2 hours or
799 * 2 days.  Your milage may vary.   - Ken Key <key@cs.utk.edu>
800 */
801void
802adjust_timeout_calltodo(time_change)
803    struct timeval *time_change;
804{
805	register struct callout *p;
806	unsigned long delta_ticks;
807
808	/*
809	 * How many ticks were we asleep?
810	 * (stolen from tvtohz()).
811	 */
812
813	/* Don't do anything */
814	if (time_change->tv_sec < 0)
815		return;
816	else if (time_change->tv_sec <= LONG_MAX / 1000000)
817		delta_ticks = (time_change->tv_sec * 1000000 +
818			       time_change->tv_usec + (tick - 1)) / tick + 1;
819	else if (time_change->tv_sec <= LONG_MAX / hz)
820		delta_ticks = time_change->tv_sec * hz +
821			      (time_change->tv_usec + (tick - 1)) / tick + 1;
822	else
823		delta_ticks = LONG_MAX;
824
825	if (delta_ticks > INT_MAX)
826		delta_ticks = INT_MAX;
827
828	/*
829	 * Now rip through the timer calltodo list looking for timers
830	 * to expire.
831	 */
832
833	/* don't collide with softclock() */
834	CC_LOCK(cc);
835	for (p = calltodo.c_next; p != NULL; p = p->c_next) {
836		p->c_time -= delta_ticks;
837
838		/* Break if the timer had more time on it than delta_ticks */
839		if (p->c_time > 0)
840			break;
841
842		/* take back the ticks the timer didn't use (p->c_time <= 0) */
843		delta_ticks = -p->c_time;
844	}
845	CC_UNLOCK(cc);
846
847	return;
848}
849#endif /* APM_FIXUP_CALLTODO */
850