kern_timeout.c revision 155957
11541Srgrimes/*- 21541Srgrimes * Copyright (c) 1982, 1986, 1991, 1993 31541Srgrimes * The Regents of the University of California. All rights reserved. 41541Srgrimes * (c) UNIX System Laboratories, Inc. 51541Srgrimes * All or some portions of this file are derived from material licensed 61541Srgrimes * to the University of California by American Telephone and Telegraph 71541Srgrimes * Co. or Unix System Laboratories, Inc. and are reproduced herein with 81541Srgrimes * the permission of UNIX System Laboratories, Inc. 91541Srgrimes * 101541Srgrimes * Redistribution and use in source and binary forms, with or without 111541Srgrimes * modification, are permitted provided that the following conditions 121541Srgrimes * are met: 131541Srgrimes * 1. Redistributions of source code must retain the above copyright 141541Srgrimes * notice, this list of conditions and the following disclaimer. 151541Srgrimes * 2. Redistributions in binary form must reproduce the above copyright 161541Srgrimes * notice, this list of conditions and the following disclaimer in the 171541Srgrimes * documentation and/or other materials provided with the distribution. 181541Srgrimes * 4. Neither the name of the University nor the names of its contributors 191541Srgrimes * may be used to endorse or promote products derived from this software 201541Srgrimes * without specific prior written permission. 211541Srgrimes * 221541Srgrimes * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 231541Srgrimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 241541Srgrimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 251541Srgrimes * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 261541Srgrimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 271541Srgrimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 281541Srgrimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 291541Srgrimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 301541Srgrimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 311541Srgrimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 321541Srgrimes * SUCH DAMAGE. 331541Srgrimes * 3444510Swollman * From: @(#)kern_clock.c 8.5 (Berkeley) 1/21/94 351541Srgrimes */ 361541Srgrimes 37116182Sobrien#include <sys/cdefs.h> 38116182Sobrien__FBSDID("$FreeBSD: head/sys/kern/kern_timeout.c 155957 2006-02-23 19:13:12Z jhb $"); 39116182Sobrien 401541Srgrimes#include <sys/param.h> 411541Srgrimes#include <sys/systm.h> 4233392Sphk#include <sys/callout.h> 43127969Scperciva#include <sys/condvar.h> 441541Srgrimes#include <sys/kernel.h> 45133229Srwatson#include <sys/ktr.h> 4674914Sjhb#include <sys/lock.h> 4768840Sjhb#include <sys/mutex.h> 48150188Sjhb#include <sys/proc.h> 49115810Sphk#include <sys/sysctl.h> 501541Srgrimes 51115810Sphkstatic int avg_depth; 52115810SphkSYSCTL_INT(_debug, OID_AUTO, to_avg_depth, CTLFLAG_RD, &avg_depth, 0, 53115810Sphk "Average number of items examined per softclock call. Units = 1/1000"); 54115810Sphkstatic int avg_gcalls; 55115810SphkSYSCTL_INT(_debug, OID_AUTO, to_avg_gcalls, CTLFLAG_RD, &avg_gcalls, 0, 56115810Sphk "Average number of Giant callouts made per softclock call. Units = 1/1000"); 57141428Siedowsestatic int avg_mtxcalls; 58141428SiedowseSYSCTL_INT(_debug, OID_AUTO, to_avg_mtxcalls, CTLFLAG_RD, &avg_mtxcalls, 0, 59141428Siedowse "Average number of mtx callouts made per softclock call. Units = 1/1000"); 60115810Sphkstatic int avg_mpcalls; 61115810SphkSYSCTL_INT(_debug, OID_AUTO, to_avg_mpcalls, CTLFLAG_RD, &avg_mpcalls, 0, 62115810Sphk "Average number of MP callouts made per softclock call. Units = 1/1000"); 6333392Sphk/* 6433392Sphk * TODO: 6533392Sphk * allocate more timeout table slots when table overflows. 6633392Sphk */ 6733392Sphk 6833392Sphk/* Exported to machdep.c and/or kern_clock.c. */ 6929680Sgibbsstruct callout *callout; 7029680Sgibbsstruct callout_list callfree; 7129680Sgibbsint callwheelsize, callwheelbits, callwheelmask; 7229680Sgibbsstruct callout_tailq *callwheel; 7333392Sphkint softticks; /* Like ticks, but for softclock(). */ 74116606Sphkstruct mtx callout_lock; 752112Swollman 7629680Sgibbsstatic struct callout *nextsoftcheck; /* Next callout to be checked. */ 77128024Scperciva 78139831Scperciva/** 79127969Scperciva * Locked by callout_lock: 80127969Scperciva * curr_callout - If a callout is in progress, it is curr_callout. 81155957Sjhb * If curr_callout is non-NULL, threads waiting in 82155957Sjhb * callout_drain() will be woken up as soon as the 83127969Scperciva * relevant callout completes. 84141428Siedowse * curr_cancelled - Changing to 1 with both callout_lock and c_mtx held 85141428Siedowse * guarantees that the current callout will not run. 86141428Siedowse * The softclock() function sets this to 0 before it 87141428Siedowse * drops callout_lock to acquire c_mtx, and it calls 88155957Sjhb * the handler only if curr_cancelled is still 0 after 89141428Siedowse * c_mtx is successfully acquired. 90155957Sjhb * callout_wait - If a thread is waiting in callout_drain(), then 91155957Sjhb * callout_wait is nonzero. Set only when 92128024Scperciva * curr_callout is non-NULL. 93127969Scperciva */ 94127969Scpercivastatic struct callout *curr_callout; 95141428Siedowsestatic int curr_cancelled; 96155957Sjhbstatic int callout_wait; 97128024Scperciva 981541Srgrimes/* 9982127Sdillon * kern_timeout_callwheel_alloc() - kernel low level callwheel initialization 10082127Sdillon * 10182127Sdillon * This code is called very early in the kernel initialization sequence, 10282127Sdillon * and may be called more then once. 10382127Sdillon */ 10482127Sdilloncaddr_t 10582127Sdillonkern_timeout_callwheel_alloc(caddr_t v) 10682127Sdillon{ 10782127Sdillon /* 10882127Sdillon * Calculate callout wheel size 10982127Sdillon */ 11082127Sdillon for (callwheelsize = 1, callwheelbits = 0; 11182127Sdillon callwheelsize < ncallout; 11282127Sdillon callwheelsize <<= 1, ++callwheelbits) 11382127Sdillon ; 11482127Sdillon callwheelmask = callwheelsize - 1; 11582127Sdillon 11682127Sdillon callout = (struct callout *)v; 11782127Sdillon v = (caddr_t)(callout + ncallout); 11882127Sdillon callwheel = (struct callout_tailq *)v; 11982127Sdillon v = (caddr_t)(callwheel + callwheelsize); 12082127Sdillon return(v); 12182127Sdillon} 12282127Sdillon 12382127Sdillon/* 12482127Sdillon * kern_timeout_callwheel_init() - initialize previously reserved callwheel 12582127Sdillon * space. 12682127Sdillon * 12782127Sdillon * This code is called just once, after the space reserved for the 12882127Sdillon * callout wheel has been finalized. 12982127Sdillon */ 13082127Sdillonvoid 13182127Sdillonkern_timeout_callwheel_init(void) 13282127Sdillon{ 13382127Sdillon int i; 13482127Sdillon 13582127Sdillon SLIST_INIT(&callfree); 13682127Sdillon for (i = 0; i < ncallout; i++) { 13782127Sdillon callout_init(&callout[i], 0); 13882127Sdillon callout[i].c_flags = CALLOUT_LOCAL_ALLOC; 13982127Sdillon SLIST_INSERT_HEAD(&callfree, &callout[i], c_links.sle); 14082127Sdillon } 14182127Sdillon for (i = 0; i < callwheelsize; i++) { 14282127Sdillon TAILQ_INIT(&callwheel[i]); 14382127Sdillon } 14493818Sjhb mtx_init(&callout_lock, "callout", NULL, MTX_SPIN | MTX_RECURSE); 14582127Sdillon} 14682127Sdillon 14782127Sdillon/* 14829680Sgibbs * The callout mechanism is based on the work of Adam M. Costello and 14929680Sgibbs * George Varghese, published in a technical report entitled "Redesigning 15029680Sgibbs * the BSD Callout and Timer Facilities" and modified slightly for inclusion 15129680Sgibbs * in FreeBSD by Justin T. Gibbs. The original work on the data structures 152128630Shmp * used in this implementation was published by G. Varghese and T. Lauck in 15329680Sgibbs * the paper "Hashed and Hierarchical Timing Wheels: Data Structures for 15429680Sgibbs * the Efficient Implementation of a Timer Facility" in the Proceedings of 15529680Sgibbs * the 11th ACM Annual Symposium on Operating Systems Principles, 15629680Sgibbs * Austin, Texas Nov 1987. 15729680Sgibbs */ 15832388Sphk 15929680Sgibbs/* 1601541Srgrimes * Software (low priority) clock interrupt. 1611541Srgrimes * Run periodic events from timeout queue. 1621541Srgrimes */ 1631541Srgrimesvoid 16467551Sjhbsoftclock(void *dummy) 1651541Srgrimes{ 166102936Sphk struct callout *c; 167102936Sphk struct callout_tailq *bucket; 168102936Sphk int curticks; 169102936Sphk int steps; /* #steps since we last allowed interrupts */ 170115810Sphk int depth; 171115810Sphk int mpcalls; 172141428Siedowse int mtxcalls; 173115810Sphk int gcalls; 174122585Smckusick#ifdef DIAGNOSTIC 175122585Smckusick struct bintime bt1, bt2; 176122585Smckusick struct timespec ts2; 177122585Smckusick static uint64_t maxdt = 36893488147419102LL; /* 2 msec */ 178123254Sphk static timeout_t *lastfunc; 179122585Smckusick#endif 1801541Srgrimes 18133392Sphk#ifndef MAX_SOFTCLOCK_STEPS 18233392Sphk#define MAX_SOFTCLOCK_STEPS 100 /* Maximum allowed value of steps. */ 18333392Sphk#endif /* MAX_SOFTCLOCK_STEPS */ 18429680Sgibbs 185115810Sphk mpcalls = 0; 186141428Siedowse mtxcalls = 0; 187115810Sphk gcalls = 0; 188115810Sphk depth = 0; 18929680Sgibbs steps = 0; 19072200Sbmilekic mtx_lock_spin(&callout_lock); 19129680Sgibbs while (softticks != ticks) { 19229805Sgibbs softticks++; 19329805Sgibbs /* 19429805Sgibbs * softticks may be modified by hard clock, so cache 19529805Sgibbs * it while we work on a given bucket. 19629805Sgibbs */ 19729805Sgibbs curticks = softticks; 19829805Sgibbs bucket = &callwheel[curticks & callwheelmask]; 19929805Sgibbs c = TAILQ_FIRST(bucket); 20029680Sgibbs while (c) { 201115810Sphk depth++; 20229805Sgibbs if (c->c_time != curticks) { 20329680Sgibbs c = TAILQ_NEXT(c, c_links.tqe); 20429680Sgibbs ++steps; 20529680Sgibbs if (steps >= MAX_SOFTCLOCK_STEPS) { 20629680Sgibbs nextsoftcheck = c; 20729805Sgibbs /* Give interrupts a chance. */ 20872200Sbmilekic mtx_unlock_spin(&callout_lock); 20981370Sjhb ; /* nothing */ 21072200Sbmilekic mtx_lock_spin(&callout_lock); 21129680Sgibbs c = nextsoftcheck; 21229680Sgibbs steps = 0; 21329680Sgibbs } 21429680Sgibbs } else { 21529680Sgibbs void (*c_func)(void *); 21629680Sgibbs void *c_arg; 217141428Siedowse struct mtx *c_mtx; 21868889Sjake int c_flags; 21929680Sgibbs 22029680Sgibbs nextsoftcheck = TAILQ_NEXT(c, c_links.tqe); 22129805Sgibbs TAILQ_REMOVE(bucket, c, c_links.tqe); 22229680Sgibbs c_func = c->c_func; 22329680Sgibbs c_arg = c->c_arg; 224141428Siedowse c_mtx = c->c_mtx; 22568889Sjake c_flags = c->c_flags; 22644510Swollman if (c->c_flags & CALLOUT_LOCAL_ALLOC) { 227140489Scperciva c->c_func = NULL; 22844510Swollman c->c_flags = CALLOUT_LOCAL_ALLOC; 22944510Swollman SLIST_INSERT_HEAD(&callfree, c, 23044510Swollman c_links.sle); 231141674Siedowse curr_callout = NULL; 23244510Swollman } else { 23344510Swollman c->c_flags = 23450673Sjlemon (c->c_flags & ~CALLOUT_PENDING); 235141674Siedowse curr_callout = c; 23644510Swollman } 237141428Siedowse curr_cancelled = 0; 23872200Sbmilekic mtx_unlock_spin(&callout_lock); 239141428Siedowse if (c_mtx != NULL) { 240141428Siedowse mtx_lock(c_mtx); 241141428Siedowse /* 242141428Siedowse * The callout may have been cancelled 243141428Siedowse * while we switched locks. 244141428Siedowse */ 245141428Siedowse if (curr_cancelled) { 246141428Siedowse mtx_unlock(c_mtx); 247155957Sjhb goto skip; 248141428Siedowse } 249141428Siedowse /* The callout cannot be stopped now. */ 250141428Siedowse curr_cancelled = 1; 251141428Siedowse 252141428Siedowse if (c_mtx == &Giant) { 253141428Siedowse gcalls++; 254141428Siedowse CTR1(KTR_CALLOUT, "callout %p", 255141428Siedowse c_func); 256141428Siedowse } else { 257141428Siedowse mtxcalls++; 258141428Siedowse CTR1(KTR_CALLOUT, 259141428Siedowse "callout mtx %p", 260141428Siedowse c_func); 261141428Siedowse } 262115810Sphk } else { 263115810Sphk mpcalls++; 264133229Srwatson CTR1(KTR_CALLOUT, "callout mpsafe %p", 265133229Srwatson c_func); 266115810Sphk } 267122585Smckusick#ifdef DIAGNOSTIC 268122585Smckusick binuptime(&bt1); 269122585Smckusick#endif 270150187Sjhb THREAD_NO_SLEEPING(); 27129680Sgibbs c_func(c_arg); 272150187Sjhb THREAD_SLEEPING_OK(); 273122585Smckusick#ifdef DIAGNOSTIC 274122585Smckusick binuptime(&bt2); 275122585Smckusick bintime_sub(&bt2, &bt1); 276122585Smckusick if (bt2.frac > maxdt) { 277123254Sphk if (lastfunc != c_func || 278123254Sphk bt2.frac > maxdt * 2) { 279123254Sphk bintime2timespec(&bt2, &ts2); 280123254Sphk printf( 281123254Sphk "Expensive timeout(9) function: %p(%p) %jd.%09ld s\n", 282123254Sphk c_func, c_arg, 283123254Sphk (intmax_t)ts2.tv_sec, 284123254Sphk ts2.tv_nsec); 285123254Sphk } 286122585Smckusick maxdt = bt2.frac; 287123254Sphk lastfunc = c_func; 288122585Smckusick } 289122585Smckusick#endif 290141428Siedowse if ((c_flags & CALLOUT_RETURNUNLOCKED) == 0) 291141428Siedowse mtx_unlock(c_mtx); 292155957Sjhb skip: 29372200Sbmilekic mtx_lock_spin(&callout_lock); 294127969Scperciva curr_callout = NULL; 295155957Sjhb if (callout_wait) { 296127969Scperciva /* 297155957Sjhb * There is someone waiting 298127969Scperciva * for the callout to complete. 299127969Scperciva */ 300155957Sjhb wakeup(&callout_wait); 301155957Sjhb callout_wait = 0; 302128024Scperciva } 30329680Sgibbs steps = 0; 30429680Sgibbs c = nextsoftcheck; 30529680Sgibbs } 30629680Sgibbs } 3071541Srgrimes } 308115810Sphk avg_depth += (depth * 1000 - avg_depth) >> 8; 309115810Sphk avg_mpcalls += (mpcalls * 1000 - avg_mpcalls) >> 8; 310141428Siedowse avg_mtxcalls += (mtxcalls * 1000 - avg_mtxcalls) >> 8; 311115810Sphk avg_gcalls += (gcalls * 1000 - avg_gcalls) >> 8; 31229680Sgibbs nextsoftcheck = NULL; 31372200Sbmilekic mtx_unlock_spin(&callout_lock); 3141541Srgrimes} 3151541Srgrimes 3161541Srgrimes/* 3171541Srgrimes * timeout -- 3181541Srgrimes * Execute a function after a specified length of time. 3191541Srgrimes * 3201541Srgrimes * untimeout -- 3211541Srgrimes * Cancel previous timeout function call. 3221541Srgrimes * 32329680Sgibbs * callout_handle_init -- 32429680Sgibbs * Initialize a handle so that using it with untimeout is benign. 32529680Sgibbs * 3261541Srgrimes * See AT&T BCI Driver Reference Manual for specification. This 32729680Sgibbs * implementation differs from that one in that although an 32829680Sgibbs * identification value is returned from timeout, the original 32929680Sgibbs * arguments to timeout as well as the identifier are used to 33029680Sgibbs * identify entries for untimeout. 3311541Srgrimes */ 33229680Sgibbsstruct callout_handle 33329680Sgibbstimeout(ftn, arg, to_ticks) 33433824Sbde timeout_t *ftn; 3351541Srgrimes void *arg; 33669147Sjlemon int to_ticks; 3371541Srgrimes{ 33829680Sgibbs struct callout *new; 33929680Sgibbs struct callout_handle handle; 3401541Srgrimes 34172200Sbmilekic mtx_lock_spin(&callout_lock); 3421541Srgrimes 3431541Srgrimes /* Fill in the next free callout structure. */ 34429680Sgibbs new = SLIST_FIRST(&callfree); 34529680Sgibbs if (new == NULL) 34629680Sgibbs /* XXX Attempt to malloc first */ 3471541Srgrimes panic("timeout table full"); 34829680Sgibbs SLIST_REMOVE_HEAD(&callfree, c_links.sle); 34944510Swollman 35044510Swollman callout_reset(new, to_ticks, ftn, arg); 3511541Srgrimes 35244510Swollman handle.callout = new; 35372200Sbmilekic mtx_unlock_spin(&callout_lock); 35429680Sgibbs return (handle); 3551541Srgrimes} 3561541Srgrimes 3571541Srgrimesvoid 35829680Sgibbsuntimeout(ftn, arg, handle) 35933824Sbde timeout_t *ftn; 3601541Srgrimes void *arg; 36129680Sgibbs struct callout_handle handle; 3621541Srgrimes{ 3631541Srgrimes 36429680Sgibbs /* 36529680Sgibbs * Check for a handle that was initialized 36629680Sgibbs * by callout_handle_init, but never used 36729680Sgibbs * for a real timeout. 36829680Sgibbs */ 36929680Sgibbs if (handle.callout == NULL) 37029680Sgibbs return; 37129680Sgibbs 37272200Sbmilekic mtx_lock_spin(&callout_lock); 37344510Swollman if (handle.callout->c_func == ftn && handle.callout->c_arg == arg) 37444510Swollman callout_stop(handle.callout); 37572200Sbmilekic mtx_unlock_spin(&callout_lock); 3761541Srgrimes} 3771541Srgrimes 37824101Sbdevoid 37929680Sgibbscallout_handle_init(struct callout_handle *handle) 38029680Sgibbs{ 38129680Sgibbs handle->callout = NULL; 38229680Sgibbs} 38329680Sgibbs 38444510Swollman/* 38544510Swollman * New interface; clients allocate their own callout structures. 38644510Swollman * 38744510Swollman * callout_reset() - establish or change a timeout 38844510Swollman * callout_stop() - disestablish a timeout 38944510Swollman * callout_init() - initialize a callout structure so that it can 39044510Swollman * safely be passed to callout_reset() and callout_stop() 39144510Swollman * 39250673Sjlemon * <sys/callout.h> defines three convenience macros: 39344510Swollman * 394140487Scperciva * callout_active() - returns truth if callout has not been stopped, 395140487Scperciva * drained, or deactivated since the last time the callout was 396140487Scperciva * reset. 39750673Sjlemon * callout_pending() - returns truth if callout is still waiting for timeout 39850673Sjlemon * callout_deactivate() - marks the callout as having been serviced 39944510Swollman */ 400149879Sglebiusint 40169147Sjlemoncallout_reset(c, to_ticks, ftn, arg) 40244510Swollman struct callout *c; 40344510Swollman int to_ticks; 40492723Salfred void (*ftn)(void *); 40544510Swollman void *arg; 40644510Swollman{ 407149879Sglebius int cancelled = 0; 40844510Swollman 409141428Siedowse#ifdef notyet /* Some callers of timeout() do not hold Giant. */ 410141428Siedowse if (c->c_mtx != NULL) 411141428Siedowse mtx_assert(c->c_mtx, MA_OWNED); 412141428Siedowse#endif 413141428Siedowse 41472200Sbmilekic mtx_lock_spin(&callout_lock); 415141428Siedowse if (c == curr_callout) { 416127969Scperciva /* 417127969Scperciva * We're being asked to reschedule a callout which is 418141428Siedowse * currently in progress. If there is a mutex then we 419141428Siedowse * can cancel the callout if it has not really started. 420127969Scperciva */ 421141428Siedowse if (c->c_mtx != NULL && !curr_cancelled) 422149879Sglebius cancelled = curr_cancelled = 1; 423155957Sjhb if (callout_wait) { 424141428Siedowse /* 425141428Siedowse * Someone has called callout_drain to kill this 426141428Siedowse * callout. Don't reschedule. 427141428Siedowse */ 428141428Siedowse mtx_unlock_spin(&callout_lock); 429149879Sglebius return (cancelled); 430141428Siedowse } 431128024Scperciva } 432133190Scperciva if (c->c_flags & CALLOUT_PENDING) { 433133190Scperciva if (nextsoftcheck == c) { 434133190Scperciva nextsoftcheck = TAILQ_NEXT(c, c_links.tqe); 435133190Scperciva } 436133190Scperciva TAILQ_REMOVE(&callwheel[c->c_time & callwheelmask], c, 437133190Scperciva c_links.tqe); 43844510Swollman 439149879Sglebius cancelled = 1; 440149879Sglebius 441133190Scperciva /* 442133190Scperciva * Part of the normal "stop a pending callout" process 443133190Scperciva * is to clear the CALLOUT_ACTIVE and CALLOUT_PENDING 444133190Scperciva * flags. We're not going to bother doing that here, 445133190Scperciva * because we're going to be setting those flags ten lines 446133190Scperciva * after this point, and we're holding callout_lock 447133190Scperciva * between now and then. 448133190Scperciva */ 449133190Scperciva } 450133190Scperciva 45144510Swollman /* 45281370Sjhb * We could unlock callout_lock here and lock it again before the 45381370Sjhb * TAILQ_INSERT_TAIL, but there's no point since doing this setup 45481370Sjhb * doesn't take much time. 45544510Swollman */ 45644510Swollman if (to_ticks <= 0) 45744510Swollman to_ticks = 1; 45844510Swollman 45944510Swollman c->c_arg = arg; 46069147Sjlemon c->c_flags |= (CALLOUT_ACTIVE | CALLOUT_PENDING); 46144510Swollman c->c_func = ftn; 46244510Swollman c->c_time = ticks + to_ticks; 46344510Swollman TAILQ_INSERT_TAIL(&callwheel[c->c_time & callwheelmask], 46444510Swollman c, c_links.tqe); 46572200Sbmilekic mtx_unlock_spin(&callout_lock); 466149879Sglebius 467149879Sglebius return (cancelled); 46844510Swollman} 46944510Swollman 47081481Sjhbint 471127969Scperciva_callout_stop_safe(c, safe) 472127969Scperciva struct callout *c; 473127969Scperciva int safe; 474127969Scperciva{ 475155957Sjhb int use_mtx; 476127969Scperciva 477141428Siedowse if (!safe && c->c_mtx != NULL) { 478141428Siedowse#ifdef notyet /* Some callers do not hold Giant for Giant-locked callouts. */ 479141428Siedowse mtx_assert(c->c_mtx, MA_OWNED); 480141428Siedowse use_mtx = 1; 481141428Siedowse#else 482141428Siedowse use_mtx = mtx_owned(c->c_mtx); 483141428Siedowse#endif 484141428Siedowse } else { 485141428Siedowse use_mtx = 0; 486141428Siedowse } 487141428Siedowse 48872200Sbmilekic mtx_lock_spin(&callout_lock); 48944510Swollman /* 490155957Sjhb * If the callout isn't pending, it's not on the queue, so 491155957Sjhb * don't attempt to remove it from the queue. We can try to 492155957Sjhb * stop it by other means however. 49344510Swollman */ 49444510Swollman if (!(c->c_flags & CALLOUT_PENDING)) { 49550673Sjlemon c->c_flags &= ~CALLOUT_ACTIVE; 496155957Sjhb 497155957Sjhb /* 498155957Sjhb * If it wasn't on the queue and it isn't the current 499155957Sjhb * callout, then we can't stop it, so just bail. 500155957Sjhb */ 501141428Siedowse if (c != curr_callout) { 502141428Siedowse mtx_unlock_spin(&callout_lock); 503141428Siedowse return (0); 504141428Siedowse } 505155957Sjhb 506141428Siedowse if (safe) { 507127969Scperciva /* 508155957Sjhb * The current callout is running (or just 509155957Sjhb * about to run) and blocking is allowed, so 510155957Sjhb * just wait for the current invocation to 511155957Sjhb * finish. 512127969Scperciva */ 513155957Sjhb while (c == curr_callout) { 514155957Sjhb callout_wait = 1; 515155957Sjhb msleep_spin(&callout_wait, &callout_lock, 516155957Sjhb "codrain", 0); 517155957Sjhb } 518141428Siedowse } else if (use_mtx && !curr_cancelled) { 519155957Sjhb /* 520155957Sjhb * The current callout is waiting for it's 521155957Sjhb * mutex which we hold. Cancel the callout 522155957Sjhb * and return. After our caller drops the 523155957Sjhb * mutex, the callout will be skipped in 524155957Sjhb * softclock(). 525155957Sjhb */ 526141428Siedowse curr_cancelled = 1; 527141428Siedowse mtx_unlock_spin(&callout_lock); 528141428Siedowse return (1); 529155957Sjhb } 530155957Sjhb mtx_unlock_spin(&callout_lock); 53181481Sjhb return (0); 53244510Swollman } 53350673Sjlemon c->c_flags &= ~(CALLOUT_ACTIVE | CALLOUT_PENDING); 53444510Swollman 53544510Swollman if (nextsoftcheck == c) { 53644510Swollman nextsoftcheck = TAILQ_NEXT(c, c_links.tqe); 53744510Swollman } 53844510Swollman TAILQ_REMOVE(&callwheel[c->c_time & callwheelmask], c, c_links.tqe); 53944510Swollman 54044510Swollman if (c->c_flags & CALLOUT_LOCAL_ALLOC) { 541140492Scperciva c->c_func = NULL; 54244510Swollman SLIST_INSERT_HEAD(&callfree, c, c_links.sle); 54344510Swollman } 54472200Sbmilekic mtx_unlock_spin(&callout_lock); 54581481Sjhb return (1); 54644510Swollman} 54744510Swollman 54844510Swollmanvoid 54969147Sjlemoncallout_init(c, mpsafe) 55044510Swollman struct callout *c; 55169147Sjlemon int mpsafe; 55244510Swollman{ 55344527Swollman bzero(c, sizeof *c); 554141428Siedowse if (mpsafe) { 555141428Siedowse c->c_mtx = NULL; 556141428Siedowse c->c_flags = CALLOUT_RETURNUNLOCKED; 557141428Siedowse } else { 558141428Siedowse c->c_mtx = &Giant; 559141428Siedowse c->c_flags = 0; 560141428Siedowse } 56144510Swollman} 56244510Swollman 563141428Siedowsevoid 564141428Siedowsecallout_init_mtx(c, mtx, flags) 565141428Siedowse struct callout *c; 566141428Siedowse struct mtx *mtx; 567141428Siedowse int flags; 568141428Siedowse{ 569141428Siedowse bzero(c, sizeof *c); 570141428Siedowse c->c_mtx = mtx; 571141428Siedowse KASSERT((flags & ~CALLOUT_RETURNUNLOCKED) == 0, 572141428Siedowse ("callout_init_mtx: bad flags %d", flags)); 573141428Siedowse /* CALLOUT_RETURNUNLOCKED makes no sense without a mutex. */ 574141428Siedowse KASSERT(mtx != NULL || (flags & CALLOUT_RETURNUNLOCKED) == 0, 575141428Siedowse ("callout_init_mtx: CALLOUT_RETURNUNLOCKED with no mutex")); 576141428Siedowse c->c_flags = flags & CALLOUT_RETURNUNLOCKED; 577141428Siedowse} 578141428Siedowse 57931950Snate#ifdef APM_FIXUP_CALLTODO 58031950Snate/* 58131950Snate * Adjust the kernel calltodo timeout list. This routine is used after 58231950Snate * an APM resume to recalculate the calltodo timer list values with the 58331950Snate * number of hz's we have been sleeping. The next hardclock() will detect 58431950Snate * that there are fired timers and run softclock() to execute them. 58531950Snate * 58631950Snate * Please note, I have not done an exhaustive analysis of what code this 58731950Snate * might break. I am motivated to have my select()'s and alarm()'s that 58831950Snate * have expired during suspend firing upon resume so that the applications 58931950Snate * which set the timer can do the maintanence the timer was for as close 59031950Snate * as possible to the originally intended time. Testing this code for a 59131950Snate * week showed that resuming from a suspend resulted in 22 to 25 timers 59231950Snate * firing, which seemed independant on whether the suspend was 2 hours or 59331950Snate * 2 days. Your milage may vary. - Ken Key <key@cs.utk.edu> 59431950Snate */ 59531950Snatevoid 59631950Snateadjust_timeout_calltodo(time_change) 59731950Snate struct timeval *time_change; 59831950Snate{ 59931950Snate register struct callout *p; 60031950Snate unsigned long delta_ticks; 60131950Snate 60231950Snate /* 60331950Snate * How many ticks were we asleep? 60436127Sbde * (stolen from tvtohz()). 60531950Snate */ 60631950Snate 60731950Snate /* Don't do anything */ 60831950Snate if (time_change->tv_sec < 0) 60931950Snate return; 61031950Snate else if (time_change->tv_sec <= LONG_MAX / 1000000) 61131950Snate delta_ticks = (time_change->tv_sec * 1000000 + 61231950Snate time_change->tv_usec + (tick - 1)) / tick + 1; 61331950Snate else if (time_change->tv_sec <= LONG_MAX / hz) 61431950Snate delta_ticks = time_change->tv_sec * hz + 61531950Snate (time_change->tv_usec + (tick - 1)) / tick + 1; 61631950Snate else 61731950Snate delta_ticks = LONG_MAX; 61831950Snate 61931950Snate if (delta_ticks > INT_MAX) 62031950Snate delta_ticks = INT_MAX; 62131950Snate 62231950Snate /* 62331950Snate * Now rip through the timer calltodo list looking for timers 62431950Snate * to expire. 62531950Snate */ 62631950Snate 62731950Snate /* don't collide with softclock() */ 62872200Sbmilekic mtx_lock_spin(&callout_lock); 62931950Snate for (p = calltodo.c_next; p != NULL; p = p->c_next) { 63031950Snate p->c_time -= delta_ticks; 63131950Snate 63231950Snate /* Break if the timer had more time on it than delta_ticks */ 63331950Snate if (p->c_time > 0) 63431950Snate break; 63531950Snate 63631950Snate /* take back the ticks the timer didn't use (p->c_time <= 0) */ 63731950Snate delta_ticks = -p->c_time; 63831950Snate } 63972200Sbmilekic mtx_unlock_spin(&callout_lock); 64031950Snate 64131950Snate return; 64231950Snate} 64331950Snate#endif /* APM_FIXUP_CALLTODO */ 644