kern_timeout.c revision 150188
11541Srgrimes/*- 21541Srgrimes * Copyright (c) 1982, 1986, 1991, 1993 31541Srgrimes * The Regents of the University of California. All rights reserved. 41541Srgrimes * (c) UNIX System Laboratories, Inc. 51541Srgrimes * All or some portions of this file are derived from material licensed 61541Srgrimes * to the University of California by American Telephone and Telegraph 71541Srgrimes * Co. or Unix System Laboratories, Inc. and are reproduced herein with 81541Srgrimes * the permission of UNIX System Laboratories, Inc. 91541Srgrimes * 101541Srgrimes * Redistribution and use in source and binary forms, with or without 111541Srgrimes * modification, are permitted provided that the following conditions 121541Srgrimes * are met: 131541Srgrimes * 1. Redistributions of source code must retain the above copyright 141541Srgrimes * notice, this list of conditions and the following disclaimer. 151541Srgrimes * 2. Redistributions in binary form must reproduce the above copyright 161541Srgrimes * notice, this list of conditions and the following disclaimer in the 171541Srgrimes * documentation and/or other materials provided with the distribution. 181541Srgrimes * 4. Neither the name of the University nor the names of its contributors 191541Srgrimes * may be used to endorse or promote products derived from this software 201541Srgrimes * without specific prior written permission. 211541Srgrimes * 221541Srgrimes * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 231541Srgrimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 241541Srgrimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 251541Srgrimes * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 261541Srgrimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 271541Srgrimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 281541Srgrimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 291541Srgrimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 301541Srgrimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 311541Srgrimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 321541Srgrimes * SUCH DAMAGE. 331541Srgrimes * 3444510Swollman * From: @(#)kern_clock.c 8.5 (Berkeley) 1/21/94 351541Srgrimes */ 361541Srgrimes 37116182Sobrien#include <sys/cdefs.h> 38116182Sobrien__FBSDID("$FreeBSD: head/sys/kern/kern_timeout.c 150188 2005-09-15 20:20:36Z jhb $"); 39116182Sobrien 401541Srgrimes#include <sys/param.h> 411541Srgrimes#include <sys/systm.h> 4233392Sphk#include <sys/callout.h> 43127969Scperciva#include <sys/condvar.h> 441541Srgrimes#include <sys/kernel.h> 45133229Srwatson#include <sys/ktr.h> 4674914Sjhb#include <sys/lock.h> 4768840Sjhb#include <sys/mutex.h> 48150188Sjhb#include <sys/proc.h> 49115810Sphk#include <sys/sysctl.h> 501541Srgrimes 51115810Sphkstatic int avg_depth; 52115810SphkSYSCTL_INT(_debug, OID_AUTO, to_avg_depth, CTLFLAG_RD, &avg_depth, 0, 53115810Sphk "Average number of items examined per softclock call. Units = 1/1000"); 54115810Sphkstatic int avg_gcalls; 55115810SphkSYSCTL_INT(_debug, OID_AUTO, to_avg_gcalls, CTLFLAG_RD, &avg_gcalls, 0, 56115810Sphk "Average number of Giant callouts made per softclock call. Units = 1/1000"); 57141428Siedowsestatic int avg_mtxcalls; 58141428SiedowseSYSCTL_INT(_debug, OID_AUTO, to_avg_mtxcalls, CTLFLAG_RD, &avg_mtxcalls, 0, 59141428Siedowse "Average number of mtx callouts made per softclock call. Units = 1/1000"); 60115810Sphkstatic int avg_mpcalls; 61115810SphkSYSCTL_INT(_debug, OID_AUTO, to_avg_mpcalls, CTLFLAG_RD, &avg_mpcalls, 0, 62115810Sphk "Average number of MP callouts made per softclock call. Units = 1/1000"); 6333392Sphk/* 6433392Sphk * TODO: 6533392Sphk * allocate more timeout table slots when table overflows. 6633392Sphk */ 6733392Sphk 6833392Sphk/* Exported to machdep.c and/or kern_clock.c. */ 6929680Sgibbsstruct callout *callout; 7029680Sgibbsstruct callout_list callfree; 7129680Sgibbsint callwheelsize, callwheelbits, callwheelmask; 7229680Sgibbsstruct callout_tailq *callwheel; 7333392Sphkint softticks; /* Like ticks, but for softclock(). */ 74116606Sphkstruct mtx callout_lock; 752112Swollman 7629680Sgibbsstatic struct callout *nextsoftcheck; /* Next callout to be checked. */ 77128024Scperciva 78139831Scperciva/** 79127969Scperciva * Locked by callout_lock: 80127969Scperciva * curr_callout - If a callout is in progress, it is curr_callout. 81127969Scperciva * If curr_callout is non-NULL, threads waiting on 82127969Scperciva * callout_wait will be woken up as soon as the 83127969Scperciva * relevant callout completes. 84141428Siedowse * curr_cancelled - Changing to 1 with both callout_lock and c_mtx held 85141428Siedowse * guarantees that the current callout will not run. 86141428Siedowse * The softclock() function sets this to 0 before it 87141428Siedowse * drops callout_lock to acquire c_mtx, and it calls 88141428Siedowse * the handler only if curr_cancelled still 0 when 89141428Siedowse * c_mtx is successfully acquired. 90128024Scperciva * wakeup_ctr - Incremented every time a thread wants to wait 91128024Scperciva * for a callout to complete. Modified only when 92128024Scperciva * curr_callout is non-NULL. 93127969Scperciva * wakeup_needed - If a thread is waiting on callout_wait, then 94127969Scperciva * wakeup_needed is nonzero. Increased only when 95127969Scperciva * cutt_callout is non-NULL. 96127969Scperciva */ 97127969Scpercivastatic struct callout *curr_callout; 98141428Siedowsestatic int curr_cancelled; 99128024Scpercivastatic int wakeup_ctr; 100127969Scpercivastatic int wakeup_needed; 101128024Scperciva 102139831Scperciva/** 103127969Scperciva * Locked by callout_wait_lock: 104127969Scperciva * callout_wait - If wakeup_needed is set, callout_wait will be 105127969Scperciva * triggered after the current callout finishes. 106127969Scperciva * wakeup_done_ctr - Set to the current value of wakeup_ctr after 107127969Scperciva * callout_wait is triggered. 108127969Scperciva */ 109127969Scpercivastatic struct mtx callout_wait_lock; 110127969Scpercivastatic struct cv callout_wait; 111127969Scpercivastatic int wakeup_done_ctr; 1121541Srgrimes 1131541Srgrimes/* 11482127Sdillon * kern_timeout_callwheel_alloc() - kernel low level callwheel initialization 11582127Sdillon * 11682127Sdillon * This code is called very early in the kernel initialization sequence, 11782127Sdillon * and may be called more then once. 11882127Sdillon */ 11982127Sdilloncaddr_t 12082127Sdillonkern_timeout_callwheel_alloc(caddr_t v) 12182127Sdillon{ 12282127Sdillon /* 12382127Sdillon * Calculate callout wheel size 12482127Sdillon */ 12582127Sdillon for (callwheelsize = 1, callwheelbits = 0; 12682127Sdillon callwheelsize < ncallout; 12782127Sdillon callwheelsize <<= 1, ++callwheelbits) 12882127Sdillon ; 12982127Sdillon callwheelmask = callwheelsize - 1; 13082127Sdillon 13182127Sdillon callout = (struct callout *)v; 13282127Sdillon v = (caddr_t)(callout + ncallout); 13382127Sdillon callwheel = (struct callout_tailq *)v; 13482127Sdillon v = (caddr_t)(callwheel + callwheelsize); 13582127Sdillon return(v); 13682127Sdillon} 13782127Sdillon 13882127Sdillon/* 13982127Sdillon * kern_timeout_callwheel_init() - initialize previously reserved callwheel 14082127Sdillon * space. 14182127Sdillon * 14282127Sdillon * This code is called just once, after the space reserved for the 14382127Sdillon * callout wheel has been finalized. 14482127Sdillon */ 14582127Sdillonvoid 14682127Sdillonkern_timeout_callwheel_init(void) 14782127Sdillon{ 14882127Sdillon int i; 14982127Sdillon 15082127Sdillon SLIST_INIT(&callfree); 15182127Sdillon for (i = 0; i < ncallout; i++) { 15282127Sdillon callout_init(&callout[i], 0); 15382127Sdillon callout[i].c_flags = CALLOUT_LOCAL_ALLOC; 15482127Sdillon SLIST_INSERT_HEAD(&callfree, &callout[i], c_links.sle); 15582127Sdillon } 15682127Sdillon for (i = 0; i < callwheelsize; i++) { 15782127Sdillon TAILQ_INIT(&callwheel[i]); 15882127Sdillon } 15993818Sjhb mtx_init(&callout_lock, "callout", NULL, MTX_SPIN | MTX_RECURSE); 160127969Scperciva mtx_init(&callout_wait_lock, "callout_wait_lock", NULL, MTX_DEF); 161127969Scperciva cv_init(&callout_wait, "callout_wait"); 16282127Sdillon} 16382127Sdillon 16482127Sdillon/* 16529680Sgibbs * The callout mechanism is based on the work of Adam M. Costello and 16629680Sgibbs * George Varghese, published in a technical report entitled "Redesigning 16729680Sgibbs * the BSD Callout and Timer Facilities" and modified slightly for inclusion 16829680Sgibbs * in FreeBSD by Justin T. Gibbs. The original work on the data structures 169128630Shmp * used in this implementation was published by G. Varghese and T. Lauck in 17029680Sgibbs * the paper "Hashed and Hierarchical Timing Wheels: Data Structures for 17129680Sgibbs * the Efficient Implementation of a Timer Facility" in the Proceedings of 17229680Sgibbs * the 11th ACM Annual Symposium on Operating Systems Principles, 17329680Sgibbs * Austin, Texas Nov 1987. 17429680Sgibbs */ 17532388Sphk 17629680Sgibbs/* 1771541Srgrimes * Software (low priority) clock interrupt. 1781541Srgrimes * Run periodic events from timeout queue. 1791541Srgrimes */ 1801541Srgrimesvoid 18167551Sjhbsoftclock(void *dummy) 1821541Srgrimes{ 183102936Sphk struct callout *c; 184102936Sphk struct callout_tailq *bucket; 185102936Sphk int curticks; 186102936Sphk int steps; /* #steps since we last allowed interrupts */ 187115810Sphk int depth; 188115810Sphk int mpcalls; 189141428Siedowse int mtxcalls; 190115810Sphk int gcalls; 191127969Scperciva int wakeup_cookie; 192122585Smckusick#ifdef DIAGNOSTIC 193122585Smckusick struct bintime bt1, bt2; 194122585Smckusick struct timespec ts2; 195122585Smckusick static uint64_t maxdt = 36893488147419102LL; /* 2 msec */ 196123254Sphk static timeout_t *lastfunc; 197122585Smckusick#endif 1981541Srgrimes 19933392Sphk#ifndef MAX_SOFTCLOCK_STEPS 20033392Sphk#define MAX_SOFTCLOCK_STEPS 100 /* Maximum allowed value of steps. */ 20133392Sphk#endif /* MAX_SOFTCLOCK_STEPS */ 20229680Sgibbs 203115810Sphk mpcalls = 0; 204141428Siedowse mtxcalls = 0; 205115810Sphk gcalls = 0; 206115810Sphk depth = 0; 20729680Sgibbs steps = 0; 20872200Sbmilekic mtx_lock_spin(&callout_lock); 20929680Sgibbs while (softticks != ticks) { 21029805Sgibbs softticks++; 21129805Sgibbs /* 21229805Sgibbs * softticks may be modified by hard clock, so cache 21329805Sgibbs * it while we work on a given bucket. 21429805Sgibbs */ 21529805Sgibbs curticks = softticks; 21629805Sgibbs bucket = &callwheel[curticks & callwheelmask]; 21729805Sgibbs c = TAILQ_FIRST(bucket); 21829680Sgibbs while (c) { 219115810Sphk depth++; 22029805Sgibbs if (c->c_time != curticks) { 22129680Sgibbs c = TAILQ_NEXT(c, c_links.tqe); 22229680Sgibbs ++steps; 22329680Sgibbs if (steps >= MAX_SOFTCLOCK_STEPS) { 22429680Sgibbs nextsoftcheck = c; 22529805Sgibbs /* Give interrupts a chance. */ 22672200Sbmilekic mtx_unlock_spin(&callout_lock); 22781370Sjhb ; /* nothing */ 22872200Sbmilekic mtx_lock_spin(&callout_lock); 22929680Sgibbs c = nextsoftcheck; 23029680Sgibbs steps = 0; 23129680Sgibbs } 23229680Sgibbs } else { 23329680Sgibbs void (*c_func)(void *); 23429680Sgibbs void *c_arg; 235141428Siedowse struct mtx *c_mtx; 23668889Sjake int c_flags; 23729680Sgibbs 23829680Sgibbs nextsoftcheck = TAILQ_NEXT(c, c_links.tqe); 23929805Sgibbs TAILQ_REMOVE(bucket, c, c_links.tqe); 24029680Sgibbs c_func = c->c_func; 24129680Sgibbs c_arg = c->c_arg; 242141428Siedowse c_mtx = c->c_mtx; 24368889Sjake c_flags = c->c_flags; 24444510Swollman if (c->c_flags & CALLOUT_LOCAL_ALLOC) { 245140489Scperciva c->c_func = NULL; 24644510Swollman c->c_flags = CALLOUT_LOCAL_ALLOC; 24744510Swollman SLIST_INSERT_HEAD(&callfree, c, 24844510Swollman c_links.sle); 249141674Siedowse curr_callout = NULL; 25044510Swollman } else { 25144510Swollman c->c_flags = 25250673Sjlemon (c->c_flags & ~CALLOUT_PENDING); 253141674Siedowse curr_callout = c; 25444510Swollman } 255141428Siedowse curr_cancelled = 0; 25672200Sbmilekic mtx_unlock_spin(&callout_lock); 257141428Siedowse if (c_mtx != NULL) { 258141428Siedowse mtx_lock(c_mtx); 259141428Siedowse /* 260141428Siedowse * The callout may have been cancelled 261141428Siedowse * while we switched locks. 262141428Siedowse */ 263141428Siedowse if (curr_cancelled) { 264141428Siedowse mtx_unlock(c_mtx); 265141428Siedowse mtx_lock_spin(&callout_lock); 266141428Siedowse goto done_locked; 267141428Siedowse } 268141428Siedowse /* The callout cannot be stopped now. */ 269141428Siedowse curr_cancelled = 1; 270141428Siedowse 271141428Siedowse if (c_mtx == &Giant) { 272141428Siedowse gcalls++; 273141428Siedowse CTR1(KTR_CALLOUT, "callout %p", 274141428Siedowse c_func); 275141428Siedowse } else { 276141428Siedowse mtxcalls++; 277141428Siedowse CTR1(KTR_CALLOUT, 278141428Siedowse "callout mtx %p", 279141428Siedowse c_func); 280141428Siedowse } 281115810Sphk } else { 282115810Sphk mpcalls++; 283133229Srwatson CTR1(KTR_CALLOUT, "callout mpsafe %p", 284133229Srwatson c_func); 285115810Sphk } 286122585Smckusick#ifdef DIAGNOSTIC 287122585Smckusick binuptime(&bt1); 288122585Smckusick#endif 289150187Sjhb THREAD_NO_SLEEPING(); 29029680Sgibbs c_func(c_arg); 291150187Sjhb THREAD_SLEEPING_OK(); 292122585Smckusick#ifdef DIAGNOSTIC 293122585Smckusick binuptime(&bt2); 294122585Smckusick bintime_sub(&bt2, &bt1); 295122585Smckusick if (bt2.frac > maxdt) { 296123254Sphk if (lastfunc != c_func || 297123254Sphk bt2.frac > maxdt * 2) { 298123254Sphk bintime2timespec(&bt2, &ts2); 299123254Sphk printf( 300123254Sphk "Expensive timeout(9) function: %p(%p) %jd.%09ld s\n", 301123254Sphk c_func, c_arg, 302123254Sphk (intmax_t)ts2.tv_sec, 303123254Sphk ts2.tv_nsec); 304123254Sphk } 305122585Smckusick maxdt = bt2.frac; 306123254Sphk lastfunc = c_func; 307122585Smckusick } 308122585Smckusick#endif 309141428Siedowse if ((c_flags & CALLOUT_RETURNUNLOCKED) == 0) 310141428Siedowse mtx_unlock(c_mtx); 31172200Sbmilekic mtx_lock_spin(&callout_lock); 312141428Siedowsedone_locked: 313127969Scperciva curr_callout = NULL; 314127969Scperciva if (wakeup_needed) { 315127969Scperciva /* 316127969Scperciva * There might be someone waiting 317127969Scperciva * for the callout to complete. 318127969Scperciva */ 319127969Scperciva wakeup_cookie = wakeup_ctr; 320127969Scperciva mtx_unlock_spin(&callout_lock); 321127969Scperciva mtx_lock(&callout_wait_lock); 322127969Scperciva cv_broadcast(&callout_wait); 323127969Scperciva wakeup_done_ctr = wakeup_cookie; 324127969Scperciva mtx_unlock(&callout_wait_lock); 325127969Scperciva mtx_lock_spin(&callout_lock); 326127969Scperciva wakeup_needed = 0; 327128024Scperciva } 32829680Sgibbs steps = 0; 32929680Sgibbs c = nextsoftcheck; 33029680Sgibbs } 33129680Sgibbs } 3321541Srgrimes } 333115810Sphk avg_depth += (depth * 1000 - avg_depth) >> 8; 334115810Sphk avg_mpcalls += (mpcalls * 1000 - avg_mpcalls) >> 8; 335141428Siedowse avg_mtxcalls += (mtxcalls * 1000 - avg_mtxcalls) >> 8; 336115810Sphk avg_gcalls += (gcalls * 1000 - avg_gcalls) >> 8; 33729680Sgibbs nextsoftcheck = NULL; 33872200Sbmilekic mtx_unlock_spin(&callout_lock); 3391541Srgrimes} 3401541Srgrimes 3411541Srgrimes/* 3421541Srgrimes * timeout -- 3431541Srgrimes * Execute a function after a specified length of time. 3441541Srgrimes * 3451541Srgrimes * untimeout -- 3461541Srgrimes * Cancel previous timeout function call. 3471541Srgrimes * 34829680Sgibbs * callout_handle_init -- 34929680Sgibbs * Initialize a handle so that using it with untimeout is benign. 35029680Sgibbs * 3511541Srgrimes * See AT&T BCI Driver Reference Manual for specification. This 35229680Sgibbs * implementation differs from that one in that although an 35329680Sgibbs * identification value is returned from timeout, the original 35429680Sgibbs * arguments to timeout as well as the identifier are used to 35529680Sgibbs * identify entries for untimeout. 3561541Srgrimes */ 35729680Sgibbsstruct callout_handle 35829680Sgibbstimeout(ftn, arg, to_ticks) 35933824Sbde timeout_t *ftn; 3601541Srgrimes void *arg; 36169147Sjlemon int to_ticks; 3621541Srgrimes{ 36329680Sgibbs struct callout *new; 36429680Sgibbs struct callout_handle handle; 3651541Srgrimes 36672200Sbmilekic mtx_lock_spin(&callout_lock); 3671541Srgrimes 3681541Srgrimes /* Fill in the next free callout structure. */ 36929680Sgibbs new = SLIST_FIRST(&callfree); 37029680Sgibbs if (new == NULL) 37129680Sgibbs /* XXX Attempt to malloc first */ 3721541Srgrimes panic("timeout table full"); 37329680Sgibbs SLIST_REMOVE_HEAD(&callfree, c_links.sle); 37444510Swollman 37544510Swollman callout_reset(new, to_ticks, ftn, arg); 3761541Srgrimes 37744510Swollman handle.callout = new; 37872200Sbmilekic mtx_unlock_spin(&callout_lock); 37929680Sgibbs return (handle); 3801541Srgrimes} 3811541Srgrimes 3821541Srgrimesvoid 38329680Sgibbsuntimeout(ftn, arg, handle) 38433824Sbde timeout_t *ftn; 3851541Srgrimes void *arg; 38629680Sgibbs struct callout_handle handle; 3871541Srgrimes{ 3881541Srgrimes 38929680Sgibbs /* 39029680Sgibbs * Check for a handle that was initialized 39129680Sgibbs * by callout_handle_init, but never used 39229680Sgibbs * for a real timeout. 39329680Sgibbs */ 39429680Sgibbs if (handle.callout == NULL) 39529680Sgibbs return; 39629680Sgibbs 39772200Sbmilekic mtx_lock_spin(&callout_lock); 39844510Swollman if (handle.callout->c_func == ftn && handle.callout->c_arg == arg) 39944510Swollman callout_stop(handle.callout); 40072200Sbmilekic mtx_unlock_spin(&callout_lock); 4011541Srgrimes} 4021541Srgrimes 40324101Sbdevoid 40429680Sgibbscallout_handle_init(struct callout_handle *handle) 40529680Sgibbs{ 40629680Sgibbs handle->callout = NULL; 40729680Sgibbs} 40829680Sgibbs 40944510Swollman/* 41044510Swollman * New interface; clients allocate their own callout structures. 41144510Swollman * 41244510Swollman * callout_reset() - establish or change a timeout 41344510Swollman * callout_stop() - disestablish a timeout 41444510Swollman * callout_init() - initialize a callout structure so that it can 41544510Swollman * safely be passed to callout_reset() and callout_stop() 41644510Swollman * 41750673Sjlemon * <sys/callout.h> defines three convenience macros: 41844510Swollman * 419140487Scperciva * callout_active() - returns truth if callout has not been stopped, 420140487Scperciva * drained, or deactivated since the last time the callout was 421140487Scperciva * reset. 42250673Sjlemon * callout_pending() - returns truth if callout is still waiting for timeout 42350673Sjlemon * callout_deactivate() - marks the callout as having been serviced 42444510Swollman */ 425149879Sglebiusint 42669147Sjlemoncallout_reset(c, to_ticks, ftn, arg) 42744510Swollman struct callout *c; 42844510Swollman int to_ticks; 42992723Salfred void (*ftn)(void *); 43044510Swollman void *arg; 43144510Swollman{ 432149879Sglebius int cancelled = 0; 43344510Swollman 434141428Siedowse#ifdef notyet /* Some callers of timeout() do not hold Giant. */ 435141428Siedowse if (c->c_mtx != NULL) 436141428Siedowse mtx_assert(c->c_mtx, MA_OWNED); 437141428Siedowse#endif 438141428Siedowse 43972200Sbmilekic mtx_lock_spin(&callout_lock); 440141428Siedowse if (c == curr_callout) { 441127969Scperciva /* 442127969Scperciva * We're being asked to reschedule a callout which is 443141428Siedowse * currently in progress. If there is a mutex then we 444141428Siedowse * can cancel the callout if it has not really started. 445127969Scperciva */ 446141428Siedowse if (c->c_mtx != NULL && !curr_cancelled) 447149879Sglebius cancelled = curr_cancelled = 1; 448141428Siedowse if (wakeup_needed) { 449141428Siedowse /* 450141428Siedowse * Someone has called callout_drain to kill this 451141428Siedowse * callout. Don't reschedule. 452141428Siedowse */ 453141428Siedowse mtx_unlock_spin(&callout_lock); 454149879Sglebius return (cancelled); 455141428Siedowse } 456128024Scperciva } 457133190Scperciva if (c->c_flags & CALLOUT_PENDING) { 458133190Scperciva if (nextsoftcheck == c) { 459133190Scperciva nextsoftcheck = TAILQ_NEXT(c, c_links.tqe); 460133190Scperciva } 461133190Scperciva TAILQ_REMOVE(&callwheel[c->c_time & callwheelmask], c, 462133190Scperciva c_links.tqe); 46344510Swollman 464149879Sglebius cancelled = 1; 465149879Sglebius 466133190Scperciva /* 467133190Scperciva * Part of the normal "stop a pending callout" process 468133190Scperciva * is to clear the CALLOUT_ACTIVE and CALLOUT_PENDING 469133190Scperciva * flags. We're not going to bother doing that here, 470133190Scperciva * because we're going to be setting those flags ten lines 471133190Scperciva * after this point, and we're holding callout_lock 472133190Scperciva * between now and then. 473133190Scperciva */ 474133190Scperciva } 475133190Scperciva 47644510Swollman /* 47781370Sjhb * We could unlock callout_lock here and lock it again before the 47881370Sjhb * TAILQ_INSERT_TAIL, but there's no point since doing this setup 47981370Sjhb * doesn't take much time. 48044510Swollman */ 48144510Swollman if (to_ticks <= 0) 48244510Swollman to_ticks = 1; 48344510Swollman 48444510Swollman c->c_arg = arg; 48569147Sjlemon c->c_flags |= (CALLOUT_ACTIVE | CALLOUT_PENDING); 48644510Swollman c->c_func = ftn; 48744510Swollman c->c_time = ticks + to_ticks; 48844510Swollman TAILQ_INSERT_TAIL(&callwheel[c->c_time & callwheelmask], 48944510Swollman c, c_links.tqe); 49072200Sbmilekic mtx_unlock_spin(&callout_lock); 491149879Sglebius 492149879Sglebius return (cancelled); 49344510Swollman} 49444510Swollman 49581481Sjhbint 496127969Scperciva_callout_stop_safe(c, safe) 497127969Scperciva struct callout *c; 498127969Scperciva int safe; 499127969Scperciva{ 500141428Siedowse int use_mtx, wakeup_cookie; 501127969Scperciva 502141428Siedowse if (!safe && c->c_mtx != NULL) { 503141428Siedowse#ifdef notyet /* Some callers do not hold Giant for Giant-locked callouts. */ 504141428Siedowse mtx_assert(c->c_mtx, MA_OWNED); 505141428Siedowse use_mtx = 1; 506141428Siedowse#else 507141428Siedowse use_mtx = mtx_owned(c->c_mtx); 508141428Siedowse#endif 509141428Siedowse } else { 510141428Siedowse use_mtx = 0; 511141428Siedowse } 512141428Siedowse 51372200Sbmilekic mtx_lock_spin(&callout_lock); 51444510Swollman /* 51544510Swollman * Don't attempt to delete a callout that's not on the queue. 51644510Swollman */ 51744510Swollman if (!(c->c_flags & CALLOUT_PENDING)) { 51850673Sjlemon c->c_flags &= ~CALLOUT_ACTIVE; 519141428Siedowse if (c != curr_callout) { 520141428Siedowse mtx_unlock_spin(&callout_lock); 521141428Siedowse return (0); 522141428Siedowse } 523141428Siedowse if (safe) { 524128024Scperciva /* We need to wait until the callout is finished. */ 525127969Scperciva wakeup_needed = 1; 526127969Scperciva wakeup_cookie = wakeup_ctr++; 527127969Scperciva mtx_unlock_spin(&callout_lock); 528127969Scperciva mtx_lock(&callout_wait_lock); 529128024Scperciva 530127969Scperciva /* 531127969Scperciva * Check to make sure that softclock() didn't 532127969Scperciva * do the wakeup in between our dropping 533127969Scperciva * callout_lock and picking up callout_wait_lock 534127969Scperciva */ 535127969Scperciva if (wakeup_cookie - wakeup_done_ctr > 0) 536127969Scperciva cv_wait(&callout_wait, &callout_wait_lock); 537127969Scperciva 538127969Scperciva mtx_unlock(&callout_wait_lock); 539141428Siedowse } else if (use_mtx && !curr_cancelled) { 540141428Siedowse /* We can stop the callout before it runs. */ 541141428Siedowse curr_cancelled = 1; 542141428Siedowse mtx_unlock_spin(&callout_lock); 543141428Siedowse return (1); 544127969Scperciva } else 545127969Scperciva mtx_unlock_spin(&callout_lock); 54681481Sjhb return (0); 54744510Swollman } 54850673Sjlemon c->c_flags &= ~(CALLOUT_ACTIVE | CALLOUT_PENDING); 54944510Swollman 55044510Swollman if (nextsoftcheck == c) { 55144510Swollman nextsoftcheck = TAILQ_NEXT(c, c_links.tqe); 55244510Swollman } 55344510Swollman TAILQ_REMOVE(&callwheel[c->c_time & callwheelmask], c, c_links.tqe); 55444510Swollman 55544510Swollman if (c->c_flags & CALLOUT_LOCAL_ALLOC) { 556140492Scperciva c->c_func = NULL; 55744510Swollman SLIST_INSERT_HEAD(&callfree, c, c_links.sle); 55844510Swollman } 55972200Sbmilekic mtx_unlock_spin(&callout_lock); 56081481Sjhb return (1); 56144510Swollman} 56244510Swollman 56344510Swollmanvoid 56469147Sjlemoncallout_init(c, mpsafe) 56544510Swollman struct callout *c; 56669147Sjlemon int mpsafe; 56744510Swollman{ 56844527Swollman bzero(c, sizeof *c); 569141428Siedowse if (mpsafe) { 570141428Siedowse c->c_mtx = NULL; 571141428Siedowse c->c_flags = CALLOUT_RETURNUNLOCKED; 572141428Siedowse } else { 573141428Siedowse c->c_mtx = &Giant; 574141428Siedowse c->c_flags = 0; 575141428Siedowse } 57644510Swollman} 57744510Swollman 578141428Siedowsevoid 579141428Siedowsecallout_init_mtx(c, mtx, flags) 580141428Siedowse struct callout *c; 581141428Siedowse struct mtx *mtx; 582141428Siedowse int flags; 583141428Siedowse{ 584141428Siedowse bzero(c, sizeof *c); 585141428Siedowse c->c_mtx = mtx; 586141428Siedowse KASSERT((flags & ~CALLOUT_RETURNUNLOCKED) == 0, 587141428Siedowse ("callout_init_mtx: bad flags %d", flags)); 588141428Siedowse /* CALLOUT_RETURNUNLOCKED makes no sense without a mutex. */ 589141428Siedowse KASSERT(mtx != NULL || (flags & CALLOUT_RETURNUNLOCKED) == 0, 590141428Siedowse ("callout_init_mtx: CALLOUT_RETURNUNLOCKED with no mutex")); 591141428Siedowse c->c_flags = flags & CALLOUT_RETURNUNLOCKED; 592141428Siedowse} 593141428Siedowse 59431950Snate#ifdef APM_FIXUP_CALLTODO 59531950Snate/* 59631950Snate * Adjust the kernel calltodo timeout list. This routine is used after 59731950Snate * an APM resume to recalculate the calltodo timer list values with the 59831950Snate * number of hz's we have been sleeping. The next hardclock() will detect 59931950Snate * that there are fired timers and run softclock() to execute them. 60031950Snate * 60131950Snate * Please note, I have not done an exhaustive analysis of what code this 60231950Snate * might break. I am motivated to have my select()'s and alarm()'s that 60331950Snate * have expired during suspend firing upon resume so that the applications 60431950Snate * which set the timer can do the maintanence the timer was for as close 60531950Snate * as possible to the originally intended time. Testing this code for a 60631950Snate * week showed that resuming from a suspend resulted in 22 to 25 timers 60731950Snate * firing, which seemed independant on whether the suspend was 2 hours or 60831950Snate * 2 days. Your milage may vary. - Ken Key <key@cs.utk.edu> 60931950Snate */ 61031950Snatevoid 61131950Snateadjust_timeout_calltodo(time_change) 61231950Snate struct timeval *time_change; 61331950Snate{ 61431950Snate register struct callout *p; 61531950Snate unsigned long delta_ticks; 61631950Snate 61731950Snate /* 61831950Snate * How many ticks were we asleep? 61936127Sbde * (stolen from tvtohz()). 62031950Snate */ 62131950Snate 62231950Snate /* Don't do anything */ 62331950Snate if (time_change->tv_sec < 0) 62431950Snate return; 62531950Snate else if (time_change->tv_sec <= LONG_MAX / 1000000) 62631950Snate delta_ticks = (time_change->tv_sec * 1000000 + 62731950Snate time_change->tv_usec + (tick - 1)) / tick + 1; 62831950Snate else if (time_change->tv_sec <= LONG_MAX / hz) 62931950Snate delta_ticks = time_change->tv_sec * hz + 63031950Snate (time_change->tv_usec + (tick - 1)) / tick + 1; 63131950Snate else 63231950Snate delta_ticks = LONG_MAX; 63331950Snate 63431950Snate if (delta_ticks > INT_MAX) 63531950Snate delta_ticks = INT_MAX; 63631950Snate 63731950Snate /* 63831950Snate * Now rip through the timer calltodo list looking for timers 63931950Snate * to expire. 64031950Snate */ 64131950Snate 64231950Snate /* don't collide with softclock() */ 64372200Sbmilekic mtx_lock_spin(&callout_lock); 64431950Snate for (p = calltodo.c_next; p != NULL; p = p->c_next) { 64531950Snate p->c_time -= delta_ticks; 64631950Snate 64731950Snate /* Break if the timer had more time on it than delta_ticks */ 64831950Snate if (p->c_time > 0) 64931950Snate break; 65031950Snate 65131950Snate /* take back the ticks the timer didn't use (p->c_time <= 0) */ 65231950Snate delta_ticks = -p->c_time; 65331950Snate } 65472200Sbmilekic mtx_unlock_spin(&callout_lock); 65531950Snate 65631950Snate return; 65731950Snate} 65831950Snate#endif /* APM_FIXUP_CALLTODO */ 659