kern_timeout.c revision 209059
1275988Sngie/*-
2240116Smarcel * Copyright (c) 1982, 1986, 1991, 1993
3240116Smarcel *	The Regents of the University of California.  All rights reserved.
4240116Smarcel * (c) UNIX System Laboratories, Inc.
5240116Smarcel * All or some portions of this file are derived from material licensed
6240116Smarcel * to the University of California by American Telephone and Telegraph
7240116Smarcel * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8240116Smarcel * the permission of UNIX System Laboratories, Inc.
9240116Smarcel *
10240116Smarcel * Redistribution and use in source and binary forms, with or without
11240116Smarcel * modification, are permitted provided that the following conditions
12240116Smarcel * are met:
13240116Smarcel * 1. Redistributions of source code must retain the above copyright
14240116Smarcel *    notice, this list of conditions and the following disclaimer.
15240116Smarcel * 2. Redistributions in binary form must reproduce the above copyright
16240116Smarcel *    notice, this list of conditions and the following disclaimer in the
17240116Smarcel *    documentation and/or other materials provided with the distribution.
18240116Smarcel * 4. Neither the name of the University nor the names of its contributors
19240116Smarcel *    may be used to endorse or promote products derived from this software
20240116Smarcel *    without specific prior written permission.
21240116Smarcel *
22240116Smarcel * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23240116Smarcel * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24275988Sngie * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25240116Smarcel * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26240116Smarcel * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27240116Smarcel * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28240116Smarcel * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29240116Smarcel * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30240116Smarcel * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31240116Smarcel * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32240116Smarcel * SUCH DAMAGE.
33240116Smarcel *
34240116Smarcel *	From: @(#)kern_clock.c	8.5 (Berkeley) 1/21/94
35240116Smarcel */
36240116Smarcel
37240116Smarcel#include <sys/cdefs.h>
38240116Smarcel__FBSDID("$FreeBSD: head/sys/kern/kern_timeout.c 209059 2010-06-11 18:46:34Z jhb $");
39240116Smarcel
40240116Smarcel#include "opt_kdtrace.h"
41240116Smarcel
42240116Smarcel#include <sys/param.h>
43240116Smarcel#include <sys/systm.h>
44240116Smarcel#include <sys/bus.h>
45240116Smarcel#include <sys/callout.h>
46240116Smarcel#include <sys/condvar.h>
47240116Smarcel#include <sys/interrupt.h>
48240116Smarcel#include <sys/kernel.h>
49240116Smarcel#include <sys/ktr.h>
50240116Smarcel#include <sys/lock.h>
51240116Smarcel#include <sys/malloc.h>
52240116Smarcel#include <sys/mutex.h>
53240116Smarcel#include <sys/proc.h>
54240116Smarcel#include <sys/sdt.h>
55240116Smarcel#include <sys/sleepqueue.h>
56240116Smarcel#include <sys/sysctl.h>
57240116Smarcel#include <sys/smp.h>
58240116Smarcel
59240116SmarcelSDT_PROVIDER_DEFINE(callout_execute);
60240116SmarcelSDT_PROBE_DEFINE(callout_execute, kernel, , callout_start);
61240116SmarcelSDT_PROBE_ARGTYPE(callout_execute, kernel, , callout_start, 0,
62240116Smarcel    "struct callout *");
63240116SmarcelSDT_PROBE_DEFINE(callout_execute, kernel, , callout_end);
64240116SmarcelSDT_PROBE_ARGTYPE(callout_execute, kernel, , callout_end, 0,
65240116Smarcel    "struct callout *");
66240116Smarcel
67240116Smarcelstatic int avg_depth;
68240116SmarcelSYSCTL_INT(_debug, OID_AUTO, to_avg_depth, CTLFLAG_RD, &avg_depth, 0,
69275988Sngie    "Average number of items examined per softclock call. Units = 1/1000");
70static int avg_gcalls;
71SYSCTL_INT(_debug, OID_AUTO, to_avg_gcalls, CTLFLAG_RD, &avg_gcalls, 0,
72    "Average number of Giant callouts made per softclock call. Units = 1/1000");
73static int avg_lockcalls;
74SYSCTL_INT(_debug, OID_AUTO, to_avg_lockcalls, CTLFLAG_RD, &avg_lockcalls, 0,
75    "Average number of lock callouts made per softclock call. Units = 1/1000");
76static int avg_mpcalls;
77SYSCTL_INT(_debug, OID_AUTO, to_avg_mpcalls, CTLFLAG_RD, &avg_mpcalls, 0,
78    "Average number of MP callouts made per softclock call. Units = 1/1000");
79/*
80 * TODO:
81 *	allocate more timeout table slots when table overflows.
82 */
83int callwheelsize, callwheelbits, callwheelmask;
84
85/*
86 * There is one struct callout_cpu per cpu, holding all relevant
87 * state for the callout processing thread on the individual CPU.
88 * In particular:
89 *	cc_ticks is incremented once per tick in callout_cpu().
90 *	It tracks the global 'ticks' but in a way that the individual
91 *	threads should not worry about races in the order in which
92 *	hardclock() and hardclock_cpu() run on the various CPUs.
93 *	cc_softclock is advanced in callout_cpu() to point to the
94 *	first entry in cc_callwheel that may need handling. In turn,
95 *	a softclock() is scheduled so it can serve the various entries i
96 *	such that cc_softclock <= i <= cc_ticks .
97 *	XXX maybe cc_softclock and cc_ticks should be volatile ?
98 *
99 *	cc_ticks is also used in callout_reset_cpu() to determine
100 *	when the callout should be served.
101 */
102struct callout_cpu {
103	struct mtx		cc_lock;
104	struct callout		*cc_callout;
105	struct callout_tailq	*cc_callwheel;
106	struct callout_list	cc_callfree;
107	struct callout		*cc_next;
108	struct callout		*cc_curr;
109	void			*cc_cookie;
110	int 			cc_ticks;
111	int 			cc_softticks;
112	int			cc_cancel;
113	int			cc_waiting;
114};
115
116#ifdef SMP
117struct callout_cpu cc_cpu[MAXCPU];
118#define	CC_CPU(cpu)	(&cc_cpu[(cpu)])
119#define	CC_SELF()	CC_CPU(PCPU_GET(cpuid))
120#else
121struct callout_cpu cc_cpu;
122#define	CC_CPU(cpu)	&cc_cpu
123#define	CC_SELF()	&cc_cpu
124#endif
125#define	CC_LOCK(cc)	mtx_lock_spin(&(cc)->cc_lock)
126#define	CC_UNLOCK(cc)	mtx_unlock_spin(&(cc)->cc_lock)
127
128static int timeout_cpu;
129
130MALLOC_DEFINE(M_CALLOUT, "callout", "Callout datastructures");
131
132/**
133 * Locked by cc_lock:
134 *   cc_curr         - If a callout is in progress, it is curr_callout.
135 *                     If curr_callout is non-NULL, threads waiting in
136 *                     callout_drain() will be woken up as soon as the
137 *                     relevant callout completes.
138 *   cc_cancel       - Changing to 1 with both callout_lock and c_lock held
139 *                     guarantees that the current callout will not run.
140 *                     The softclock() function sets this to 0 before it
141 *                     drops callout_lock to acquire c_lock, and it calls
142 *                     the handler only if curr_cancelled is still 0 after
143 *                     c_lock is successfully acquired.
144 *   cc_waiting      - If a thread is waiting in callout_drain(), then
145 *                     callout_wait is nonzero.  Set only when
146 *                     curr_callout is non-NULL.
147 */
148
149/*
150 * kern_timeout_callwheel_alloc() - kernel low level callwheel initialization
151 *
152 *	This code is called very early in the kernel initialization sequence,
153 *	and may be called more then once.
154 */
155caddr_t
156kern_timeout_callwheel_alloc(caddr_t v)
157{
158	struct callout_cpu *cc;
159
160	timeout_cpu = PCPU_GET(cpuid);
161	cc = CC_CPU(timeout_cpu);
162	/*
163	 * Calculate callout wheel size
164	 */
165	for (callwheelsize = 1, callwheelbits = 0;
166	     callwheelsize < ncallout;
167	     callwheelsize <<= 1, ++callwheelbits)
168		;
169	callwheelmask = callwheelsize - 1;
170
171	cc->cc_callout = (struct callout *)v;
172	v = (caddr_t)(cc->cc_callout + ncallout);
173	cc->cc_callwheel = (struct callout_tailq *)v;
174	v = (caddr_t)(cc->cc_callwheel + callwheelsize);
175	return(v);
176}
177
178static void
179callout_cpu_init(struct callout_cpu *cc)
180{
181	struct callout *c;
182	int i;
183
184	mtx_init(&cc->cc_lock, "callout", NULL, MTX_SPIN | MTX_RECURSE);
185	SLIST_INIT(&cc->cc_callfree);
186	for (i = 0; i < callwheelsize; i++) {
187		TAILQ_INIT(&cc->cc_callwheel[i]);
188	}
189	if (cc->cc_callout == NULL)
190		return;
191	for (i = 0; i < ncallout; i++) {
192		c = &cc->cc_callout[i];
193		callout_init(c, 0);
194		c->c_flags = CALLOUT_LOCAL_ALLOC;
195		SLIST_INSERT_HEAD(&cc->cc_callfree, c, c_links.sle);
196	}
197}
198
199/*
200 * kern_timeout_callwheel_init() - initialize previously reserved callwheel
201 *				   space.
202 *
203 *	This code is called just once, after the space reserved for the
204 *	callout wheel has been finalized.
205 */
206void
207kern_timeout_callwheel_init(void)
208{
209	callout_cpu_init(CC_CPU(timeout_cpu));
210}
211
212/*
213 * Start standard softclock thread.
214 */
215void    *softclock_ih;
216
217static void
218start_softclock(void *dummy)
219{
220	struct callout_cpu *cc;
221#ifdef SMP
222	int cpu;
223#endif
224
225	cc = CC_CPU(timeout_cpu);
226	if (swi_add(&clk_intr_event, "clock", softclock, cc, SWI_CLOCK,
227	    INTR_MPSAFE, &softclock_ih))
228		panic("died while creating standard software ithreads");
229	cc->cc_cookie = softclock_ih;
230#ifdef SMP
231	CPU_FOREACH(cpu) {
232		if (cpu == timeout_cpu)
233			continue;
234		cc = CC_CPU(cpu);
235		if (swi_add(NULL, "clock", softclock, cc, SWI_CLOCK,
236		    INTR_MPSAFE, &cc->cc_cookie))
237			panic("died while creating standard software ithreads");
238		cc->cc_callout = NULL;	/* Only cpu0 handles timeout(). */
239		cc->cc_callwheel = malloc(
240		    sizeof(struct callout_tailq) * callwheelsize, M_CALLOUT,
241		    M_WAITOK);
242		callout_cpu_init(cc);
243	}
244#endif
245}
246
247SYSINIT(start_softclock, SI_SUB_SOFTINTR, SI_ORDER_FIRST, start_softclock, NULL);
248
249void
250callout_tick(void)
251{
252	struct callout_cpu *cc;
253	int need_softclock;
254	int bucket;
255
256	/*
257	 * Process callouts at a very low cpu priority, so we don't keep the
258	 * relatively high clock interrupt priority any longer than necessary.
259	 */
260	need_softclock = 0;
261	cc = CC_SELF();
262	mtx_lock_spin_flags(&cc->cc_lock, MTX_QUIET);
263	cc->cc_ticks++;
264	for (; (cc->cc_softticks - cc->cc_ticks) <= 0; cc->cc_softticks++) {
265		bucket = cc->cc_softticks & callwheelmask;
266		if (!TAILQ_EMPTY(&cc->cc_callwheel[bucket])) {
267			need_softclock = 1;
268			break;
269		}
270	}
271	mtx_unlock_spin_flags(&cc->cc_lock, MTX_QUIET);
272	/*
273	 * swi_sched acquires the thread lock, so we don't want to call it
274	 * with cc_lock held; incorrect locking order.
275	 */
276	if (need_softclock)
277		swi_sched(cc->cc_cookie, 0);
278}
279
280static struct callout_cpu *
281callout_lock(struct callout *c)
282{
283	struct callout_cpu *cc;
284	int cpu;
285
286	for (;;) {
287		cpu = c->c_cpu;
288		cc = CC_CPU(cpu);
289		CC_LOCK(cc);
290		if (cpu == c->c_cpu)
291			break;
292		CC_UNLOCK(cc);
293	}
294	return (cc);
295}
296
297/*
298 * The callout mechanism is based on the work of Adam M. Costello and
299 * George Varghese, published in a technical report entitled "Redesigning
300 * the BSD Callout and Timer Facilities" and modified slightly for inclusion
301 * in FreeBSD by Justin T. Gibbs.  The original work on the data structures
302 * used in this implementation was published by G. Varghese and T. Lauck in
303 * the paper "Hashed and Hierarchical Timing Wheels: Data Structures for
304 * the Efficient Implementation of a Timer Facility" in the Proceedings of
305 * the 11th ACM Annual Symposium on Operating Systems Principles,
306 * Austin, Texas Nov 1987.
307 */
308
309/*
310 * Software (low priority) clock interrupt.
311 * Run periodic events from timeout queue.
312 */
313void
314softclock(void *arg)
315{
316	struct callout_cpu *cc;
317	struct callout *c;
318	struct callout_tailq *bucket;
319	int curticks;
320	int steps;	/* #steps since we last allowed interrupts */
321	int depth;
322	int mpcalls;
323	int lockcalls;
324	int gcalls;
325#ifdef DIAGNOSTIC
326	struct bintime bt1, bt2;
327	struct timespec ts2;
328	static uint64_t maxdt = 36893488147419102LL;	/* 2 msec */
329	static timeout_t *lastfunc;
330#endif
331
332#ifndef MAX_SOFTCLOCK_STEPS
333#define MAX_SOFTCLOCK_STEPS 100 /* Maximum allowed value of steps. */
334#endif /* MAX_SOFTCLOCK_STEPS */
335
336	mpcalls = 0;
337	lockcalls = 0;
338	gcalls = 0;
339	depth = 0;
340	steps = 0;
341	cc = (struct callout_cpu *)arg;
342	CC_LOCK(cc);
343	while (cc->cc_softticks - 1 != cc->cc_ticks) {
344		/*
345		 * cc_softticks may be modified by hard clock, so cache
346		 * it while we work on a given bucket.
347		 */
348		curticks = cc->cc_softticks;
349		cc->cc_softticks++;
350		bucket = &cc->cc_callwheel[curticks & callwheelmask];
351		c = TAILQ_FIRST(bucket);
352		while (c) {
353			depth++;
354			if (c->c_time != curticks) {
355				c = TAILQ_NEXT(c, c_links.tqe);
356				++steps;
357				if (steps >= MAX_SOFTCLOCK_STEPS) {
358					cc->cc_next = c;
359					/* Give interrupts a chance. */
360					CC_UNLOCK(cc);
361					;	/* nothing */
362					CC_LOCK(cc);
363					c = cc->cc_next;
364					steps = 0;
365				}
366			} else {
367				void (*c_func)(void *);
368				void *c_arg;
369				struct lock_class *class;
370				struct lock_object *c_lock;
371				int c_flags, sharedlock;
372
373				cc->cc_next = TAILQ_NEXT(c, c_links.tqe);
374				TAILQ_REMOVE(bucket, c, c_links.tqe);
375				class = (c->c_lock != NULL) ?
376				    LOCK_CLASS(c->c_lock) : NULL;
377				sharedlock = (c->c_flags & CALLOUT_SHAREDLOCK) ?
378				    0 : 1;
379				c_lock = c->c_lock;
380				c_func = c->c_func;
381				c_arg = c->c_arg;
382				c_flags = c->c_flags;
383				if (c->c_flags & CALLOUT_LOCAL_ALLOC) {
384					c->c_flags = CALLOUT_LOCAL_ALLOC;
385				} else {
386					c->c_flags =
387					    (c->c_flags & ~CALLOUT_PENDING);
388				}
389				cc->cc_curr = c;
390				cc->cc_cancel = 0;
391				CC_UNLOCK(cc);
392				if (c_lock != NULL) {
393					class->lc_lock(c_lock, sharedlock);
394					/*
395					 * The callout may have been cancelled
396					 * while we switched locks.
397					 */
398					if (cc->cc_cancel) {
399						class->lc_unlock(c_lock);
400						goto skip;
401					}
402					/* The callout cannot be stopped now. */
403					cc->cc_cancel = 1;
404
405					if (c_lock == &Giant.lock_object) {
406						gcalls++;
407						CTR3(KTR_CALLOUT,
408						    "callout %p func %p arg %p",
409						    c, c_func, c_arg);
410					} else {
411						lockcalls++;
412						CTR3(KTR_CALLOUT, "callout lock"
413						    " %p func %p arg %p",
414						    c, c_func, c_arg);
415					}
416				} else {
417					mpcalls++;
418					CTR3(KTR_CALLOUT,
419					    "callout mpsafe %p func %p arg %p",
420					    c, c_func, c_arg);
421				}
422#ifdef DIAGNOSTIC
423				binuptime(&bt1);
424#endif
425				THREAD_NO_SLEEPING();
426				SDT_PROBE(callout_execute, kernel, ,
427				    callout_start, c, 0, 0, 0, 0);
428				c_func(c_arg);
429				SDT_PROBE(callout_execute, kernel, ,
430				    callout_end, c, 0, 0, 0, 0);
431				THREAD_SLEEPING_OK();
432#ifdef DIAGNOSTIC
433				binuptime(&bt2);
434				bintime_sub(&bt2, &bt1);
435				if (bt2.frac > maxdt) {
436					if (lastfunc != c_func ||
437					    bt2.frac > maxdt * 2) {
438						bintime2timespec(&bt2, &ts2);
439						printf(
440			"Expensive timeout(9) function: %p(%p) %jd.%09ld s\n",
441						    c_func, c_arg,
442						    (intmax_t)ts2.tv_sec,
443						    ts2.tv_nsec);
444					}
445					maxdt = bt2.frac;
446					lastfunc = c_func;
447				}
448#endif
449				CTR1(KTR_CALLOUT, "callout %p finished", c);
450				if ((c_flags & CALLOUT_RETURNUNLOCKED) == 0)
451					class->lc_unlock(c_lock);
452			skip:
453				CC_LOCK(cc);
454				/*
455				 * If the current callout is locally
456				 * allocated (from timeout(9))
457				 * then put it on the freelist.
458				 *
459				 * Note: we need to check the cached
460				 * copy of c_flags because if it was not
461				 * local, then it's not safe to deref the
462				 * callout pointer.
463				 */
464				if (c_flags & CALLOUT_LOCAL_ALLOC) {
465					KASSERT(c->c_flags ==
466					    CALLOUT_LOCAL_ALLOC,
467					    ("corrupted callout"));
468					c->c_func = NULL;
469					SLIST_INSERT_HEAD(&cc->cc_callfree, c,
470					    c_links.sle);
471				}
472				cc->cc_curr = NULL;
473				if (cc->cc_waiting) {
474					/*
475					 * There is someone waiting
476					 * for the callout to complete.
477					 */
478					cc->cc_waiting = 0;
479					CC_UNLOCK(cc);
480					wakeup(&cc->cc_waiting);
481					CC_LOCK(cc);
482				}
483				steps = 0;
484				c = cc->cc_next;
485			}
486		}
487	}
488	avg_depth += (depth * 1000 - avg_depth) >> 8;
489	avg_mpcalls += (mpcalls * 1000 - avg_mpcalls) >> 8;
490	avg_lockcalls += (lockcalls * 1000 - avg_lockcalls) >> 8;
491	avg_gcalls += (gcalls * 1000 - avg_gcalls) >> 8;
492	cc->cc_next = NULL;
493	CC_UNLOCK(cc);
494}
495
496/*
497 * timeout --
498 *	Execute a function after a specified length of time.
499 *
500 * untimeout --
501 *	Cancel previous timeout function call.
502 *
503 * callout_handle_init --
504 *	Initialize a handle so that using it with untimeout is benign.
505 *
506 *	See AT&T BCI Driver Reference Manual for specification.  This
507 *	implementation differs from that one in that although an
508 *	identification value is returned from timeout, the original
509 *	arguments to timeout as well as the identifier are used to
510 *	identify entries for untimeout.
511 */
512struct callout_handle
513timeout(ftn, arg, to_ticks)
514	timeout_t *ftn;
515	void *arg;
516	int to_ticks;
517{
518	struct callout_cpu *cc;
519	struct callout *new;
520	struct callout_handle handle;
521
522	cc = CC_CPU(timeout_cpu);
523	CC_LOCK(cc);
524	/* Fill in the next free callout structure. */
525	new = SLIST_FIRST(&cc->cc_callfree);
526	if (new == NULL)
527		/* XXX Attempt to malloc first */
528		panic("timeout table full");
529	SLIST_REMOVE_HEAD(&cc->cc_callfree, c_links.sle);
530	callout_reset(new, to_ticks, ftn, arg);
531	handle.callout = new;
532	CC_UNLOCK(cc);
533
534	return (handle);
535}
536
537void
538untimeout(ftn, arg, handle)
539	timeout_t *ftn;
540	void *arg;
541	struct callout_handle handle;
542{
543	struct callout_cpu *cc;
544
545	/*
546	 * Check for a handle that was initialized
547	 * by callout_handle_init, but never used
548	 * for a real timeout.
549	 */
550	if (handle.callout == NULL)
551		return;
552
553	cc = callout_lock(handle.callout);
554	if (handle.callout->c_func == ftn && handle.callout->c_arg == arg)
555		callout_stop(handle.callout);
556	CC_UNLOCK(cc);
557}
558
559void
560callout_handle_init(struct callout_handle *handle)
561{
562	handle->callout = NULL;
563}
564
565/*
566 * New interface; clients allocate their own callout structures.
567 *
568 * callout_reset() - establish or change a timeout
569 * callout_stop() - disestablish a timeout
570 * callout_init() - initialize a callout structure so that it can
571 *	safely be passed to callout_reset() and callout_stop()
572 *
573 * <sys/callout.h> defines three convenience macros:
574 *
575 * callout_active() - returns truth if callout has not been stopped,
576 *	drained, or deactivated since the last time the callout was
577 *	reset.
578 * callout_pending() - returns truth if callout is still waiting for timeout
579 * callout_deactivate() - marks the callout as having been serviced
580 */
581int
582callout_reset_on(struct callout *c, int to_ticks, void (*ftn)(void *),
583    void *arg, int cpu)
584{
585	struct callout_cpu *cc;
586	int cancelled = 0;
587
588	/*
589	 * Don't allow migration of pre-allocated callouts lest they
590	 * become unbalanced.
591	 */
592	if (c->c_flags & CALLOUT_LOCAL_ALLOC)
593		cpu = c->c_cpu;
594retry:
595	cc = callout_lock(c);
596	if (cc->cc_curr == c) {
597		/*
598		 * We're being asked to reschedule a callout which is
599		 * currently in progress.  If there is a lock then we
600		 * can cancel the callout if it has not really started.
601		 */
602		if (c->c_lock != NULL && !cc->cc_cancel)
603			cancelled = cc->cc_cancel = 1;
604		if (cc->cc_waiting) {
605			/*
606			 * Someone has called callout_drain to kill this
607			 * callout.  Don't reschedule.
608			 */
609			CTR4(KTR_CALLOUT, "%s %p func %p arg %p",
610			    cancelled ? "cancelled" : "failed to cancel",
611			    c, c->c_func, c->c_arg);
612			CC_UNLOCK(cc);
613			return (cancelled);
614		}
615	}
616	if (c->c_flags & CALLOUT_PENDING) {
617		if (cc->cc_next == c) {
618			cc->cc_next = TAILQ_NEXT(c, c_links.tqe);
619		}
620		TAILQ_REMOVE(&cc->cc_callwheel[c->c_time & callwheelmask], c,
621		    c_links.tqe);
622
623		cancelled = 1;
624		c->c_flags &= ~(CALLOUT_ACTIVE | CALLOUT_PENDING);
625	}
626	/*
627	 * If the lock must migrate we have to check the state again as
628	 * we can't hold both the new and old locks simultaneously.
629	 */
630	if (c->c_cpu != cpu) {
631		c->c_cpu = cpu;
632		CC_UNLOCK(cc);
633		goto retry;
634	}
635
636	if (to_ticks <= 0)
637		to_ticks = 1;
638
639	c->c_arg = arg;
640	c->c_flags |= (CALLOUT_ACTIVE | CALLOUT_PENDING);
641	c->c_func = ftn;
642	c->c_time = cc->cc_ticks + to_ticks;
643	TAILQ_INSERT_TAIL(&cc->cc_callwheel[c->c_time & callwheelmask],
644			  c, c_links.tqe);
645	CTR5(KTR_CALLOUT, "%sscheduled %p func %p arg %p in %d",
646	    cancelled ? "re" : "", c, c->c_func, c->c_arg, to_ticks);
647	CC_UNLOCK(cc);
648
649	return (cancelled);
650}
651
652/*
653 * Common idioms that can be optimized in the future.
654 */
655int
656callout_schedule_on(struct callout *c, int to_ticks, int cpu)
657{
658	return callout_reset_on(c, to_ticks, c->c_func, c->c_arg, cpu);
659}
660
661int
662callout_schedule(struct callout *c, int to_ticks)
663{
664	return callout_reset_on(c, to_ticks, c->c_func, c->c_arg, c->c_cpu);
665}
666
667int
668_callout_stop_safe(c, safe)
669	struct	callout *c;
670	int	safe;
671{
672	struct callout_cpu *cc;
673	struct lock_class *class;
674	int use_lock, sq_locked;
675
676	/*
677	 * Some old subsystems don't hold Giant while running a callout_stop(),
678	 * so just discard this check for the moment.
679	 */
680	if (!safe && c->c_lock != NULL) {
681		if (c->c_lock == &Giant.lock_object)
682			use_lock = mtx_owned(&Giant);
683		else {
684			use_lock = 1;
685			class = LOCK_CLASS(c->c_lock);
686			class->lc_assert(c->c_lock, LA_XLOCKED);
687		}
688	} else
689		use_lock = 0;
690
691	sq_locked = 0;
692again:
693	cc = callout_lock(c);
694	/*
695	 * If the callout isn't pending, it's not on the queue, so
696	 * don't attempt to remove it from the queue.  We can try to
697	 * stop it by other means however.
698	 */
699	if (!(c->c_flags & CALLOUT_PENDING)) {
700		c->c_flags &= ~CALLOUT_ACTIVE;
701
702		/*
703		 * If it wasn't on the queue and it isn't the current
704		 * callout, then we can't stop it, so just bail.
705		 */
706		if (cc->cc_curr != c) {
707			CTR3(KTR_CALLOUT, "failed to stop %p func %p arg %p",
708			    c, c->c_func, c->c_arg);
709			CC_UNLOCK(cc);
710			if (sq_locked)
711				sleepq_release(&cc->cc_waiting);
712			return (0);
713		}
714
715		if (safe) {
716			/*
717			 * The current callout is running (or just
718			 * about to run) and blocking is allowed, so
719			 * just wait for the current invocation to
720			 * finish.
721			 */
722			while (cc->cc_curr == c) {
723
724				/*
725				 * Use direct calls to sleepqueue interface
726				 * instead of cv/msleep in order to avoid
727				 * a LOR between cc_lock and sleepqueue
728				 * chain spinlocks.  This piece of code
729				 * emulates a msleep_spin() call actually.
730				 *
731				 * If we already have the sleepqueue chain
732				 * locked, then we can safely block.  If we
733				 * don't already have it locked, however,
734				 * we have to drop the cc_lock to lock
735				 * it.  This opens several races, so we
736				 * restart at the beginning once we have
737				 * both locks.  If nothing has changed, then
738				 * we will end up back here with sq_locked
739				 * set.
740				 */
741				if (!sq_locked) {
742					CC_UNLOCK(cc);
743					sleepq_lock(&cc->cc_waiting);
744					sq_locked = 1;
745					goto again;
746				}
747				cc->cc_waiting = 1;
748				DROP_GIANT();
749				CC_UNLOCK(cc);
750				sleepq_add(&cc->cc_waiting,
751				    &cc->cc_lock.lock_object, "codrain",
752				    SLEEPQ_SLEEP, 0);
753				sleepq_wait(&cc->cc_waiting, 0);
754				sq_locked = 0;
755
756				/* Reacquire locks previously released. */
757				PICKUP_GIANT();
758				CC_LOCK(cc);
759			}
760		} else if (use_lock && !cc->cc_cancel) {
761			/*
762			 * The current callout is waiting for its
763			 * lock which we hold.  Cancel the callout
764			 * and return.  After our caller drops the
765			 * lock, the callout will be skipped in
766			 * softclock().
767			 */
768			cc->cc_cancel = 1;
769			CTR3(KTR_CALLOUT, "cancelled %p func %p arg %p",
770			    c, c->c_func, c->c_arg);
771			CC_UNLOCK(cc);
772			KASSERT(!sq_locked, ("sleepqueue chain locked"));
773			return (1);
774		}
775		CTR3(KTR_CALLOUT, "failed to stop %p func %p arg %p",
776		    c, c->c_func, c->c_arg);
777		CC_UNLOCK(cc);
778		KASSERT(!sq_locked, ("sleepqueue chain still locked"));
779		return (0);
780	}
781	if (sq_locked)
782		sleepq_release(&cc->cc_waiting);
783
784	c->c_flags &= ~(CALLOUT_ACTIVE | CALLOUT_PENDING);
785
786	if (cc->cc_next == c) {
787		cc->cc_next = TAILQ_NEXT(c, c_links.tqe);
788	}
789	TAILQ_REMOVE(&cc->cc_callwheel[c->c_time & callwheelmask], c,
790	    c_links.tqe);
791
792	CTR3(KTR_CALLOUT, "cancelled %p func %p arg %p",
793	    c, c->c_func, c->c_arg);
794
795	if (c->c_flags & CALLOUT_LOCAL_ALLOC) {
796		c->c_func = NULL;
797		SLIST_INSERT_HEAD(&cc->cc_callfree, c, c_links.sle);
798	}
799	CC_UNLOCK(cc);
800	return (1);
801}
802
803void
804callout_init(c, mpsafe)
805	struct	callout *c;
806	int mpsafe;
807{
808	bzero(c, sizeof *c);
809	if (mpsafe) {
810		c->c_lock = NULL;
811		c->c_flags = CALLOUT_RETURNUNLOCKED;
812	} else {
813		c->c_lock = &Giant.lock_object;
814		c->c_flags = 0;
815	}
816	c->c_cpu = timeout_cpu;
817}
818
819void
820_callout_init_lock(c, lock, flags)
821	struct	callout *c;
822	struct	lock_object *lock;
823	int flags;
824{
825	bzero(c, sizeof *c);
826	c->c_lock = lock;
827	KASSERT((flags & ~(CALLOUT_RETURNUNLOCKED | CALLOUT_SHAREDLOCK)) == 0,
828	    ("callout_init_lock: bad flags %d", flags));
829	KASSERT(lock != NULL || (flags & CALLOUT_RETURNUNLOCKED) == 0,
830	    ("callout_init_lock: CALLOUT_RETURNUNLOCKED with no lock"));
831	KASSERT(lock == NULL || !(LOCK_CLASS(lock)->lc_flags &
832	    (LC_SPINLOCK | LC_SLEEPABLE)), ("%s: invalid lock class",
833	    __func__));
834	c->c_flags = flags & (CALLOUT_RETURNUNLOCKED | CALLOUT_SHAREDLOCK);
835	c->c_cpu = timeout_cpu;
836}
837
838#ifdef APM_FIXUP_CALLTODO
839/*
840 * Adjust the kernel calltodo timeout list.  This routine is used after
841 * an APM resume to recalculate the calltodo timer list values with the
842 * number of hz's we have been sleeping.  The next hardclock() will detect
843 * that there are fired timers and run softclock() to execute them.
844 *
845 * Please note, I have not done an exhaustive analysis of what code this
846 * might break.  I am motivated to have my select()'s and alarm()'s that
847 * have expired during suspend firing upon resume so that the applications
848 * which set the timer can do the maintanence the timer was for as close
849 * as possible to the originally intended time.  Testing this code for a
850 * week showed that resuming from a suspend resulted in 22 to 25 timers
851 * firing, which seemed independant on whether the suspend was 2 hours or
852 * 2 days.  Your milage may vary.   - Ken Key <key@cs.utk.edu>
853 */
854void
855adjust_timeout_calltodo(time_change)
856    struct timeval *time_change;
857{
858	register struct callout *p;
859	unsigned long delta_ticks;
860
861	/*
862	 * How many ticks were we asleep?
863	 * (stolen from tvtohz()).
864	 */
865
866	/* Don't do anything */
867	if (time_change->tv_sec < 0)
868		return;
869	else if (time_change->tv_sec <= LONG_MAX / 1000000)
870		delta_ticks = (time_change->tv_sec * 1000000 +
871			       time_change->tv_usec + (tick - 1)) / tick + 1;
872	else if (time_change->tv_sec <= LONG_MAX / hz)
873		delta_ticks = time_change->tv_sec * hz +
874			      (time_change->tv_usec + (tick - 1)) / tick + 1;
875	else
876		delta_ticks = LONG_MAX;
877
878	if (delta_ticks > INT_MAX)
879		delta_ticks = INT_MAX;
880
881	/*
882	 * Now rip through the timer calltodo list looking for timers
883	 * to expire.
884	 */
885
886	/* don't collide with softclock() */
887	CC_LOCK(cc);
888	for (p = calltodo.c_next; p != NULL; p = p->c_next) {
889		p->c_time -= delta_ticks;
890
891		/* Break if the timer had more time on it than delta_ticks */
892		if (p->c_time > 0)
893			break;
894
895		/* take back the ticks the timer didn't use (p->c_time <= 0) */
896		delta_ticks = -p->c_time;
897	}
898	CC_UNLOCK(cc);
899
900	return;
901}
902#endif /* APM_FIXUP_CALLTODO */
903