kern_timeout.c revision 214746
1/*-
2 * Copyright (c) 1982, 1986, 1991, 1993
3 *	The Regents of the University of California.  All rights reserved.
4 * (c) UNIX System Laboratories, Inc.
5 * All or some portions of this file are derived from material licensed
6 * to the University of California by American Telephone and Telegraph
7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
8 * the permission of UNIX System Laboratories, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 * 4. Neither the name of the University nor the names of its contributors
19 *    may be used to endorse or promote products derived from this software
20 *    without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 *	From: @(#)kern_clock.c	8.5 (Berkeley) 1/21/94
35 */
36
37#include <sys/cdefs.h>
38__FBSDID("$FreeBSD: head/sys/kern/kern_timeout.c 214746 2010-11-03 15:38:52Z jhb $");
39
40#include "opt_kdtrace.h"
41
42#include <sys/param.h>
43#include <sys/systm.h>
44#include <sys/bus.h>
45#include <sys/callout.h>
46#include <sys/condvar.h>
47#include <sys/interrupt.h>
48#include <sys/kernel.h>
49#include <sys/ktr.h>
50#include <sys/lock.h>
51#include <sys/malloc.h>
52#include <sys/mutex.h>
53#include <sys/proc.h>
54#include <sys/sdt.h>
55#include <sys/sleepqueue.h>
56#include <sys/sysctl.h>
57#include <sys/smp.h>
58
59SDT_PROVIDER_DEFINE(callout_execute);
60SDT_PROBE_DEFINE(callout_execute, kernel, , callout_start, callout-start);
61SDT_PROBE_ARGTYPE(callout_execute, kernel, , callout_start, 0,
62    "struct callout *");
63SDT_PROBE_DEFINE(callout_execute, kernel, , callout_end, callout-end);
64SDT_PROBE_ARGTYPE(callout_execute, kernel, , callout_end, 0,
65    "struct callout *");
66
67static int avg_depth;
68SYSCTL_INT(_debug, OID_AUTO, to_avg_depth, CTLFLAG_RD, &avg_depth, 0,
69    "Average number of items examined per softclock call. Units = 1/1000");
70static int avg_gcalls;
71SYSCTL_INT(_debug, OID_AUTO, to_avg_gcalls, CTLFLAG_RD, &avg_gcalls, 0,
72    "Average number of Giant callouts made per softclock call. Units = 1/1000");
73static int avg_lockcalls;
74SYSCTL_INT(_debug, OID_AUTO, to_avg_lockcalls, CTLFLAG_RD, &avg_lockcalls, 0,
75    "Average number of lock callouts made per softclock call. Units = 1/1000");
76static int avg_mpcalls;
77SYSCTL_INT(_debug, OID_AUTO, to_avg_mpcalls, CTLFLAG_RD, &avg_mpcalls, 0,
78    "Average number of MP callouts made per softclock call. Units = 1/1000");
79/*
80 * TODO:
81 *	allocate more timeout table slots when table overflows.
82 */
83int callwheelsize, callwheelbits, callwheelmask;
84
85/*
86 * There is one struct callout_cpu per cpu, holding all relevant
87 * state for the callout processing thread on the individual CPU.
88 * In particular:
89 *	cc_ticks is incremented once per tick in callout_cpu().
90 *	It tracks the global 'ticks' but in a way that the individual
91 *	threads should not worry about races in the order in which
92 *	hardclock() and hardclock_cpu() run on the various CPUs.
93 *	cc_softclock is advanced in callout_cpu() to point to the
94 *	first entry in cc_callwheel that may need handling. In turn,
95 *	a softclock() is scheduled so it can serve the various entries i
96 *	such that cc_softclock <= i <= cc_ticks .
97 *	XXX maybe cc_softclock and cc_ticks should be volatile ?
98 *
99 *	cc_ticks is also used in callout_reset_cpu() to determine
100 *	when the callout should be served.
101 */
102struct callout_cpu {
103	struct mtx		cc_lock;
104	struct callout		*cc_callout;
105	struct callout_tailq	*cc_callwheel;
106	struct callout_list	cc_callfree;
107	struct callout		*cc_next;
108	struct callout		*cc_curr;
109	void			*cc_cookie;
110	int 			cc_ticks;
111	int 			cc_softticks;
112	int			cc_cancel;
113	int			cc_waiting;
114	int 			cc_firsttick;
115};
116
117#ifdef SMP
118struct callout_cpu cc_cpu[MAXCPU];
119#define	CC_CPU(cpu)	(&cc_cpu[(cpu)])
120#define	CC_SELF()	CC_CPU(PCPU_GET(cpuid))
121#else
122struct callout_cpu cc_cpu;
123#define	CC_CPU(cpu)	&cc_cpu
124#define	CC_SELF()	&cc_cpu
125#endif
126#define	CC_LOCK(cc)	mtx_lock_spin(&(cc)->cc_lock)
127#define	CC_UNLOCK(cc)	mtx_unlock_spin(&(cc)->cc_lock)
128
129static int timeout_cpu;
130void (*callout_new_inserted)(int cpu, int ticks) = NULL;
131
132MALLOC_DEFINE(M_CALLOUT, "callout", "Callout datastructures");
133
134/**
135 * Locked by cc_lock:
136 *   cc_curr         - If a callout is in progress, it is curr_callout.
137 *                     If curr_callout is non-NULL, threads waiting in
138 *                     callout_drain() will be woken up as soon as the
139 *                     relevant callout completes.
140 *   cc_cancel       - Changing to 1 with both callout_lock and c_lock held
141 *                     guarantees that the current callout will not run.
142 *                     The softclock() function sets this to 0 before it
143 *                     drops callout_lock to acquire c_lock, and it calls
144 *                     the handler only if curr_cancelled is still 0 after
145 *                     c_lock is successfully acquired.
146 *   cc_waiting      - If a thread is waiting in callout_drain(), then
147 *                     callout_wait is nonzero.  Set only when
148 *                     curr_callout is non-NULL.
149 */
150
151/*
152 * kern_timeout_callwheel_alloc() - kernel low level callwheel initialization
153 *
154 *	This code is called very early in the kernel initialization sequence,
155 *	and may be called more then once.
156 */
157caddr_t
158kern_timeout_callwheel_alloc(caddr_t v)
159{
160	struct callout_cpu *cc;
161
162	timeout_cpu = PCPU_GET(cpuid);
163	cc = CC_CPU(timeout_cpu);
164	/*
165	 * Calculate callout wheel size
166	 */
167	for (callwheelsize = 1, callwheelbits = 0;
168	     callwheelsize < ncallout;
169	     callwheelsize <<= 1, ++callwheelbits)
170		;
171	callwheelmask = callwheelsize - 1;
172
173	cc->cc_callout = (struct callout *)v;
174	v = (caddr_t)(cc->cc_callout + ncallout);
175	cc->cc_callwheel = (struct callout_tailq *)v;
176	v = (caddr_t)(cc->cc_callwheel + callwheelsize);
177	return(v);
178}
179
180static void
181callout_cpu_init(struct callout_cpu *cc)
182{
183	struct callout *c;
184	int i;
185
186	mtx_init(&cc->cc_lock, "callout", NULL, MTX_SPIN | MTX_RECURSE);
187	SLIST_INIT(&cc->cc_callfree);
188	for (i = 0; i < callwheelsize; i++) {
189		TAILQ_INIT(&cc->cc_callwheel[i]);
190	}
191	if (cc->cc_callout == NULL)
192		return;
193	for (i = 0; i < ncallout; i++) {
194		c = &cc->cc_callout[i];
195		callout_init(c, 0);
196		c->c_flags = CALLOUT_LOCAL_ALLOC;
197		SLIST_INSERT_HEAD(&cc->cc_callfree, c, c_links.sle);
198	}
199}
200
201/*
202 * kern_timeout_callwheel_init() - initialize previously reserved callwheel
203 *				   space.
204 *
205 *	This code is called just once, after the space reserved for the
206 *	callout wheel has been finalized.
207 */
208void
209kern_timeout_callwheel_init(void)
210{
211	callout_cpu_init(CC_CPU(timeout_cpu));
212}
213
214/*
215 * Start standard softclock thread.
216 */
217static void
218start_softclock(void *dummy)
219{
220	struct callout_cpu *cc;
221#ifdef SMP
222	int cpu;
223#endif
224
225	cc = CC_CPU(timeout_cpu);
226	if (swi_add(&clk_intr_event, "clock", softclock, cc, SWI_CLOCK,
227	    INTR_MPSAFE, &cc->cc_cookie))
228		panic("died while creating standard software ithreads");
229#ifdef SMP
230	CPU_FOREACH(cpu) {
231		if (cpu == timeout_cpu)
232			continue;
233		cc = CC_CPU(cpu);
234		if (swi_add(NULL, "clock", softclock, cc, SWI_CLOCK,
235		    INTR_MPSAFE, &cc->cc_cookie))
236			panic("died while creating standard software ithreads");
237		cc->cc_callout = NULL;	/* Only cpu0 handles timeout(). */
238		cc->cc_callwheel = malloc(
239		    sizeof(struct callout_tailq) * callwheelsize, M_CALLOUT,
240		    M_WAITOK);
241		callout_cpu_init(cc);
242	}
243#endif
244}
245
246SYSINIT(start_softclock, SI_SUB_SOFTINTR, SI_ORDER_FIRST, start_softclock, NULL);
247
248void
249callout_tick(void)
250{
251	struct callout_cpu *cc;
252	int need_softclock;
253	int bucket;
254
255	/*
256	 * Process callouts at a very low cpu priority, so we don't keep the
257	 * relatively high clock interrupt priority any longer than necessary.
258	 */
259	need_softclock = 0;
260	cc = CC_SELF();
261	mtx_lock_spin_flags(&cc->cc_lock, MTX_QUIET);
262	cc->cc_firsttick = cc->cc_ticks = ticks;
263	for (; (cc->cc_softticks - cc->cc_ticks) <= 0; cc->cc_softticks++) {
264		bucket = cc->cc_softticks & callwheelmask;
265		if (!TAILQ_EMPTY(&cc->cc_callwheel[bucket])) {
266			need_softclock = 1;
267			break;
268		}
269	}
270	mtx_unlock_spin_flags(&cc->cc_lock, MTX_QUIET);
271	/*
272	 * swi_sched acquires the thread lock, so we don't want to call it
273	 * with cc_lock held; incorrect locking order.
274	 */
275	if (need_softclock)
276		swi_sched(cc->cc_cookie, 0);
277}
278
279int
280callout_tickstofirst(int limit)
281{
282	struct callout_cpu *cc;
283	struct callout *c;
284	struct callout_tailq *sc;
285	int curticks;
286	int skip = 1;
287
288	cc = CC_SELF();
289	mtx_lock_spin_flags(&cc->cc_lock, MTX_QUIET);
290	curticks = cc->cc_ticks;
291	while( skip < ncallout && skip < limit ) {
292		sc = &cc->cc_callwheel[ (curticks+skip) & callwheelmask ];
293		/* search scanning ticks */
294		TAILQ_FOREACH( c, sc, c_links.tqe ){
295			if (c->c_time - curticks <= ncallout)
296				goto out;
297		}
298		skip++;
299	}
300out:
301	cc->cc_firsttick = curticks + skip;
302	mtx_unlock_spin_flags(&cc->cc_lock, MTX_QUIET);
303	return (skip);
304}
305
306static struct callout_cpu *
307callout_lock(struct callout *c)
308{
309	struct callout_cpu *cc;
310	int cpu;
311
312	for (;;) {
313		cpu = c->c_cpu;
314		cc = CC_CPU(cpu);
315		CC_LOCK(cc);
316		if (cpu == c->c_cpu)
317			break;
318		CC_UNLOCK(cc);
319	}
320	return (cc);
321}
322
323/*
324 * The callout mechanism is based on the work of Adam M. Costello and
325 * George Varghese, published in a technical report entitled "Redesigning
326 * the BSD Callout and Timer Facilities" and modified slightly for inclusion
327 * in FreeBSD by Justin T. Gibbs.  The original work on the data structures
328 * used in this implementation was published by G. Varghese and T. Lauck in
329 * the paper "Hashed and Hierarchical Timing Wheels: Data Structures for
330 * the Efficient Implementation of a Timer Facility" in the Proceedings of
331 * the 11th ACM Annual Symposium on Operating Systems Principles,
332 * Austin, Texas Nov 1987.
333 */
334
335/*
336 * Software (low priority) clock interrupt.
337 * Run periodic events from timeout queue.
338 */
339void
340softclock(void *arg)
341{
342	struct callout_cpu *cc;
343	struct callout *c;
344	struct callout_tailq *bucket;
345	int curticks;
346	int steps;	/* #steps since we last allowed interrupts */
347	int depth;
348	int mpcalls;
349	int lockcalls;
350	int gcalls;
351#ifdef DIAGNOSTIC
352	struct bintime bt1, bt2;
353	struct timespec ts2;
354	static uint64_t maxdt = 36893488147419102LL;	/* 2 msec */
355	static timeout_t *lastfunc;
356#endif
357
358#ifndef MAX_SOFTCLOCK_STEPS
359#define MAX_SOFTCLOCK_STEPS 100 /* Maximum allowed value of steps. */
360#endif /* MAX_SOFTCLOCK_STEPS */
361
362	mpcalls = 0;
363	lockcalls = 0;
364	gcalls = 0;
365	depth = 0;
366	steps = 0;
367	cc = (struct callout_cpu *)arg;
368	CC_LOCK(cc);
369	while (cc->cc_softticks - 1 != cc->cc_ticks) {
370		/*
371		 * cc_softticks may be modified by hard clock, so cache
372		 * it while we work on a given bucket.
373		 */
374		curticks = cc->cc_softticks;
375		cc->cc_softticks++;
376		bucket = &cc->cc_callwheel[curticks & callwheelmask];
377		c = TAILQ_FIRST(bucket);
378		while (c) {
379			depth++;
380			if (c->c_time != curticks) {
381				c = TAILQ_NEXT(c, c_links.tqe);
382				++steps;
383				if (steps >= MAX_SOFTCLOCK_STEPS) {
384					cc->cc_next = c;
385					/* Give interrupts a chance. */
386					CC_UNLOCK(cc);
387					;	/* nothing */
388					CC_LOCK(cc);
389					c = cc->cc_next;
390					steps = 0;
391				}
392			} else {
393				void (*c_func)(void *);
394				void *c_arg;
395				struct lock_class *class;
396				struct lock_object *c_lock;
397				int c_flags, sharedlock;
398
399				cc->cc_next = TAILQ_NEXT(c, c_links.tqe);
400				TAILQ_REMOVE(bucket, c, c_links.tqe);
401				class = (c->c_lock != NULL) ?
402				    LOCK_CLASS(c->c_lock) : NULL;
403				sharedlock = (c->c_flags & CALLOUT_SHAREDLOCK) ?
404				    0 : 1;
405				c_lock = c->c_lock;
406				c_func = c->c_func;
407				c_arg = c->c_arg;
408				c_flags = c->c_flags;
409				if (c->c_flags & CALLOUT_LOCAL_ALLOC) {
410					c->c_flags = CALLOUT_LOCAL_ALLOC;
411				} else {
412					c->c_flags =
413					    (c->c_flags & ~CALLOUT_PENDING);
414				}
415				cc->cc_curr = c;
416				cc->cc_cancel = 0;
417				CC_UNLOCK(cc);
418				if (c_lock != NULL) {
419					class->lc_lock(c_lock, sharedlock);
420					/*
421					 * The callout may have been cancelled
422					 * while we switched locks.
423					 */
424					if (cc->cc_cancel) {
425						class->lc_unlock(c_lock);
426						goto skip;
427					}
428					/* The callout cannot be stopped now. */
429					cc->cc_cancel = 1;
430
431					if (c_lock == &Giant.lock_object) {
432						gcalls++;
433						CTR3(KTR_CALLOUT,
434						    "callout %p func %p arg %p",
435						    c, c_func, c_arg);
436					} else {
437						lockcalls++;
438						CTR3(KTR_CALLOUT, "callout lock"
439						    " %p func %p arg %p",
440						    c, c_func, c_arg);
441					}
442				} else {
443					mpcalls++;
444					CTR3(KTR_CALLOUT,
445					    "callout mpsafe %p func %p arg %p",
446					    c, c_func, c_arg);
447				}
448#ifdef DIAGNOSTIC
449				binuptime(&bt1);
450#endif
451				THREAD_NO_SLEEPING();
452				SDT_PROBE(callout_execute, kernel, ,
453				    callout_start, c, 0, 0, 0, 0);
454				c_func(c_arg);
455				SDT_PROBE(callout_execute, kernel, ,
456				    callout_end, c, 0, 0, 0, 0);
457				THREAD_SLEEPING_OK();
458#ifdef DIAGNOSTIC
459				binuptime(&bt2);
460				bintime_sub(&bt2, &bt1);
461				if (bt2.frac > maxdt) {
462					if (lastfunc != c_func ||
463					    bt2.frac > maxdt * 2) {
464						bintime2timespec(&bt2, &ts2);
465						printf(
466			"Expensive timeout(9) function: %p(%p) %jd.%09ld s\n",
467						    c_func, c_arg,
468						    (intmax_t)ts2.tv_sec,
469						    ts2.tv_nsec);
470					}
471					maxdt = bt2.frac;
472					lastfunc = c_func;
473				}
474#endif
475				CTR1(KTR_CALLOUT, "callout %p finished", c);
476				if ((c_flags & CALLOUT_RETURNUNLOCKED) == 0)
477					class->lc_unlock(c_lock);
478			skip:
479				CC_LOCK(cc);
480				/*
481				 * If the current callout is locally
482				 * allocated (from timeout(9))
483				 * then put it on the freelist.
484				 *
485				 * Note: we need to check the cached
486				 * copy of c_flags because if it was not
487				 * local, then it's not safe to deref the
488				 * callout pointer.
489				 */
490				if (c_flags & CALLOUT_LOCAL_ALLOC) {
491					KASSERT(c->c_flags ==
492					    CALLOUT_LOCAL_ALLOC,
493					    ("corrupted callout"));
494					c->c_func = NULL;
495					SLIST_INSERT_HEAD(&cc->cc_callfree, c,
496					    c_links.sle);
497				}
498				cc->cc_curr = NULL;
499				if (cc->cc_waiting) {
500					/*
501					 * There is someone waiting
502					 * for the callout to complete.
503					 */
504					cc->cc_waiting = 0;
505					CC_UNLOCK(cc);
506					wakeup(&cc->cc_waiting);
507					CC_LOCK(cc);
508				}
509				steps = 0;
510				c = cc->cc_next;
511			}
512		}
513	}
514	avg_depth += (depth * 1000 - avg_depth) >> 8;
515	avg_mpcalls += (mpcalls * 1000 - avg_mpcalls) >> 8;
516	avg_lockcalls += (lockcalls * 1000 - avg_lockcalls) >> 8;
517	avg_gcalls += (gcalls * 1000 - avg_gcalls) >> 8;
518	cc->cc_next = NULL;
519	CC_UNLOCK(cc);
520}
521
522/*
523 * timeout --
524 *	Execute a function after a specified length of time.
525 *
526 * untimeout --
527 *	Cancel previous timeout function call.
528 *
529 * callout_handle_init --
530 *	Initialize a handle so that using it with untimeout is benign.
531 *
532 *	See AT&T BCI Driver Reference Manual for specification.  This
533 *	implementation differs from that one in that although an
534 *	identification value is returned from timeout, the original
535 *	arguments to timeout as well as the identifier are used to
536 *	identify entries for untimeout.
537 */
538struct callout_handle
539timeout(ftn, arg, to_ticks)
540	timeout_t *ftn;
541	void *arg;
542	int to_ticks;
543{
544	struct callout_cpu *cc;
545	struct callout *new;
546	struct callout_handle handle;
547
548	cc = CC_CPU(timeout_cpu);
549	CC_LOCK(cc);
550	/* Fill in the next free callout structure. */
551	new = SLIST_FIRST(&cc->cc_callfree);
552	if (new == NULL)
553		/* XXX Attempt to malloc first */
554		panic("timeout table full");
555	SLIST_REMOVE_HEAD(&cc->cc_callfree, c_links.sle);
556	callout_reset(new, to_ticks, ftn, arg);
557	handle.callout = new;
558	CC_UNLOCK(cc);
559
560	return (handle);
561}
562
563void
564untimeout(ftn, arg, handle)
565	timeout_t *ftn;
566	void *arg;
567	struct callout_handle handle;
568{
569	struct callout_cpu *cc;
570
571	/*
572	 * Check for a handle that was initialized
573	 * by callout_handle_init, but never used
574	 * for a real timeout.
575	 */
576	if (handle.callout == NULL)
577		return;
578
579	cc = callout_lock(handle.callout);
580	if (handle.callout->c_func == ftn && handle.callout->c_arg == arg)
581		callout_stop(handle.callout);
582	CC_UNLOCK(cc);
583}
584
585void
586callout_handle_init(struct callout_handle *handle)
587{
588	handle->callout = NULL;
589}
590
591/*
592 * New interface; clients allocate their own callout structures.
593 *
594 * callout_reset() - establish or change a timeout
595 * callout_stop() - disestablish a timeout
596 * callout_init() - initialize a callout structure so that it can
597 *	safely be passed to callout_reset() and callout_stop()
598 *
599 * <sys/callout.h> defines three convenience macros:
600 *
601 * callout_active() - returns truth if callout has not been stopped,
602 *	drained, or deactivated since the last time the callout was
603 *	reset.
604 * callout_pending() - returns truth if callout is still waiting for timeout
605 * callout_deactivate() - marks the callout as having been serviced
606 */
607int
608callout_reset_on(struct callout *c, int to_ticks, void (*ftn)(void *),
609    void *arg, int cpu)
610{
611	struct callout_cpu *cc;
612	int cancelled = 0;
613
614	/*
615	 * Don't allow migration of pre-allocated callouts lest they
616	 * become unbalanced.
617	 */
618	if (c->c_flags & CALLOUT_LOCAL_ALLOC)
619		cpu = c->c_cpu;
620retry:
621	cc = callout_lock(c);
622	if (cc->cc_curr == c) {
623		/*
624		 * We're being asked to reschedule a callout which is
625		 * currently in progress.  If there is a lock then we
626		 * can cancel the callout if it has not really started.
627		 */
628		if (c->c_lock != NULL && !cc->cc_cancel)
629			cancelled = cc->cc_cancel = 1;
630		if (cc->cc_waiting) {
631			/*
632			 * Someone has called callout_drain to kill this
633			 * callout.  Don't reschedule.
634			 */
635			CTR4(KTR_CALLOUT, "%s %p func %p arg %p",
636			    cancelled ? "cancelled" : "failed to cancel",
637			    c, c->c_func, c->c_arg);
638			CC_UNLOCK(cc);
639			return (cancelled);
640		}
641	}
642	if (c->c_flags & CALLOUT_PENDING) {
643		if (cc->cc_next == c) {
644			cc->cc_next = TAILQ_NEXT(c, c_links.tqe);
645		}
646		TAILQ_REMOVE(&cc->cc_callwheel[c->c_time & callwheelmask], c,
647		    c_links.tqe);
648
649		cancelled = 1;
650		c->c_flags &= ~(CALLOUT_ACTIVE | CALLOUT_PENDING);
651	}
652	/*
653	 * If the lock must migrate we have to check the state again as
654	 * we can't hold both the new and old locks simultaneously.
655	 */
656	if (c->c_cpu != cpu) {
657		c->c_cpu = cpu;
658		CC_UNLOCK(cc);
659		goto retry;
660	}
661
662	if (to_ticks <= 0)
663		to_ticks = 1;
664
665	c->c_arg = arg;
666	c->c_flags |= (CALLOUT_ACTIVE | CALLOUT_PENDING);
667	c->c_func = ftn;
668	c->c_time = ticks + to_ticks;
669	TAILQ_INSERT_TAIL(&cc->cc_callwheel[c->c_time & callwheelmask],
670			  c, c_links.tqe);
671	if ((c->c_time - cc->cc_firsttick) < 0 &&
672	    callout_new_inserted != NULL) {
673		cc->cc_firsttick = c->c_time;
674		(*callout_new_inserted)(cpu,
675		    to_ticks + (ticks - cc->cc_ticks));
676	}
677	CTR5(KTR_CALLOUT, "%sscheduled %p func %p arg %p in %d",
678	    cancelled ? "re" : "", c, c->c_func, c->c_arg, to_ticks);
679	CC_UNLOCK(cc);
680
681	return (cancelled);
682}
683
684/*
685 * Common idioms that can be optimized in the future.
686 */
687int
688callout_schedule_on(struct callout *c, int to_ticks, int cpu)
689{
690	return callout_reset_on(c, to_ticks, c->c_func, c->c_arg, cpu);
691}
692
693int
694callout_schedule(struct callout *c, int to_ticks)
695{
696	return callout_reset_on(c, to_ticks, c->c_func, c->c_arg, c->c_cpu);
697}
698
699int
700_callout_stop_safe(c, safe)
701	struct	callout *c;
702	int	safe;
703{
704	struct callout_cpu *cc;
705	struct lock_class *class;
706	int use_lock, sq_locked;
707
708	/*
709	 * Some old subsystems don't hold Giant while running a callout_stop(),
710	 * so just discard this check for the moment.
711	 */
712	if (!safe && c->c_lock != NULL) {
713		if (c->c_lock == &Giant.lock_object)
714			use_lock = mtx_owned(&Giant);
715		else {
716			use_lock = 1;
717			class = LOCK_CLASS(c->c_lock);
718			class->lc_assert(c->c_lock, LA_XLOCKED);
719		}
720	} else
721		use_lock = 0;
722
723	sq_locked = 0;
724again:
725	cc = callout_lock(c);
726	/*
727	 * If the callout isn't pending, it's not on the queue, so
728	 * don't attempt to remove it from the queue.  We can try to
729	 * stop it by other means however.
730	 */
731	if (!(c->c_flags & CALLOUT_PENDING)) {
732		c->c_flags &= ~CALLOUT_ACTIVE;
733
734		/*
735		 * If it wasn't on the queue and it isn't the current
736		 * callout, then we can't stop it, so just bail.
737		 */
738		if (cc->cc_curr != c) {
739			CTR3(KTR_CALLOUT, "failed to stop %p func %p arg %p",
740			    c, c->c_func, c->c_arg);
741			CC_UNLOCK(cc);
742			if (sq_locked)
743				sleepq_release(&cc->cc_waiting);
744			return (0);
745		}
746
747		if (safe) {
748			/*
749			 * The current callout is running (or just
750			 * about to run) and blocking is allowed, so
751			 * just wait for the current invocation to
752			 * finish.
753			 */
754			while (cc->cc_curr == c) {
755
756				/*
757				 * Use direct calls to sleepqueue interface
758				 * instead of cv/msleep in order to avoid
759				 * a LOR between cc_lock and sleepqueue
760				 * chain spinlocks.  This piece of code
761				 * emulates a msleep_spin() call actually.
762				 *
763				 * If we already have the sleepqueue chain
764				 * locked, then we can safely block.  If we
765				 * don't already have it locked, however,
766				 * we have to drop the cc_lock to lock
767				 * it.  This opens several races, so we
768				 * restart at the beginning once we have
769				 * both locks.  If nothing has changed, then
770				 * we will end up back here with sq_locked
771				 * set.
772				 */
773				if (!sq_locked) {
774					CC_UNLOCK(cc);
775					sleepq_lock(&cc->cc_waiting);
776					sq_locked = 1;
777					goto again;
778				}
779				cc->cc_waiting = 1;
780				DROP_GIANT();
781				CC_UNLOCK(cc);
782				sleepq_add(&cc->cc_waiting,
783				    &cc->cc_lock.lock_object, "codrain",
784				    SLEEPQ_SLEEP, 0);
785				sleepq_wait(&cc->cc_waiting, 0);
786				sq_locked = 0;
787
788				/* Reacquire locks previously released. */
789				PICKUP_GIANT();
790				CC_LOCK(cc);
791			}
792		} else if (use_lock && !cc->cc_cancel) {
793			/*
794			 * The current callout is waiting for its
795			 * lock which we hold.  Cancel the callout
796			 * and return.  After our caller drops the
797			 * lock, the callout will be skipped in
798			 * softclock().
799			 */
800			cc->cc_cancel = 1;
801			CTR3(KTR_CALLOUT, "cancelled %p func %p arg %p",
802			    c, c->c_func, c->c_arg);
803			CC_UNLOCK(cc);
804			KASSERT(!sq_locked, ("sleepqueue chain locked"));
805			return (1);
806		}
807		CTR3(KTR_CALLOUT, "failed to stop %p func %p arg %p",
808		    c, c->c_func, c->c_arg);
809		CC_UNLOCK(cc);
810		KASSERT(!sq_locked, ("sleepqueue chain still locked"));
811		return (0);
812	}
813	if (sq_locked)
814		sleepq_release(&cc->cc_waiting);
815
816	c->c_flags &= ~(CALLOUT_ACTIVE | CALLOUT_PENDING);
817
818	if (cc->cc_next == c) {
819		cc->cc_next = TAILQ_NEXT(c, c_links.tqe);
820	}
821	TAILQ_REMOVE(&cc->cc_callwheel[c->c_time & callwheelmask], c,
822	    c_links.tqe);
823
824	CTR3(KTR_CALLOUT, "cancelled %p func %p arg %p",
825	    c, c->c_func, c->c_arg);
826
827	if (c->c_flags & CALLOUT_LOCAL_ALLOC) {
828		c->c_func = NULL;
829		SLIST_INSERT_HEAD(&cc->cc_callfree, c, c_links.sle);
830	}
831	CC_UNLOCK(cc);
832	return (1);
833}
834
835void
836callout_init(c, mpsafe)
837	struct	callout *c;
838	int mpsafe;
839{
840	bzero(c, sizeof *c);
841	if (mpsafe) {
842		c->c_lock = NULL;
843		c->c_flags = CALLOUT_RETURNUNLOCKED;
844	} else {
845		c->c_lock = &Giant.lock_object;
846		c->c_flags = 0;
847	}
848	c->c_cpu = timeout_cpu;
849}
850
851void
852_callout_init_lock(c, lock, flags)
853	struct	callout *c;
854	struct	lock_object *lock;
855	int flags;
856{
857	bzero(c, sizeof *c);
858	c->c_lock = lock;
859	KASSERT((flags & ~(CALLOUT_RETURNUNLOCKED | CALLOUT_SHAREDLOCK)) == 0,
860	    ("callout_init_lock: bad flags %d", flags));
861	KASSERT(lock != NULL || (flags & CALLOUT_RETURNUNLOCKED) == 0,
862	    ("callout_init_lock: CALLOUT_RETURNUNLOCKED with no lock"));
863	KASSERT(lock == NULL || !(LOCK_CLASS(lock)->lc_flags &
864	    (LC_SPINLOCK | LC_SLEEPABLE)), ("%s: invalid lock class",
865	    __func__));
866	c->c_flags = flags & (CALLOUT_RETURNUNLOCKED | CALLOUT_SHAREDLOCK);
867	c->c_cpu = timeout_cpu;
868}
869
870#ifdef APM_FIXUP_CALLTODO
871/*
872 * Adjust the kernel calltodo timeout list.  This routine is used after
873 * an APM resume to recalculate the calltodo timer list values with the
874 * number of hz's we have been sleeping.  The next hardclock() will detect
875 * that there are fired timers and run softclock() to execute them.
876 *
877 * Please note, I have not done an exhaustive analysis of what code this
878 * might break.  I am motivated to have my select()'s and alarm()'s that
879 * have expired during suspend firing upon resume so that the applications
880 * which set the timer can do the maintanence the timer was for as close
881 * as possible to the originally intended time.  Testing this code for a
882 * week showed that resuming from a suspend resulted in 22 to 25 timers
883 * firing, which seemed independant on whether the suspend was 2 hours or
884 * 2 days.  Your milage may vary.   - Ken Key <key@cs.utk.edu>
885 */
886void
887adjust_timeout_calltodo(time_change)
888    struct timeval *time_change;
889{
890	register struct callout *p;
891	unsigned long delta_ticks;
892
893	/*
894	 * How many ticks were we asleep?
895	 * (stolen from tvtohz()).
896	 */
897
898	/* Don't do anything */
899	if (time_change->tv_sec < 0)
900		return;
901	else if (time_change->tv_sec <= LONG_MAX / 1000000)
902		delta_ticks = (time_change->tv_sec * 1000000 +
903			       time_change->tv_usec + (tick - 1)) / tick + 1;
904	else if (time_change->tv_sec <= LONG_MAX / hz)
905		delta_ticks = time_change->tv_sec * hz +
906			      (time_change->tv_usec + (tick - 1)) / tick + 1;
907	else
908		delta_ticks = LONG_MAX;
909
910	if (delta_ticks > INT_MAX)
911		delta_ticks = INT_MAX;
912
913	/*
914	 * Now rip through the timer calltodo list looking for timers
915	 * to expire.
916	 */
917
918	/* don't collide with softclock() */
919	CC_LOCK(cc);
920	for (p = calltodo.c_next; p != NULL; p = p->c_next) {
921		p->c_time -= delta_ticks;
922
923		/* Break if the timer had more time on it than delta_ticks */
924		if (p->c_time > 0)
925			break;
926
927		/* take back the ticks the timer didn't use (p->c_time <= 0) */
928		delta_ticks = -p->c_time;
929	}
930	CC_UNLOCK(cc);
931
932	return;
933}
934#endif /* APM_FIXUP_CALLTODO */
935