kern_timeout.c revision 127969
11541Srgrimes/*- 21541Srgrimes * Copyright (c) 1982, 1986, 1991, 1993 31541Srgrimes * The Regents of the University of California. All rights reserved. 41541Srgrimes * (c) UNIX System Laboratories, Inc. 51541Srgrimes * All or some portions of this file are derived from material licensed 61541Srgrimes * to the University of California by American Telephone and Telegraph 71541Srgrimes * Co. or Unix System Laboratories, Inc. and are reproduced herein with 81541Srgrimes * the permission of UNIX System Laboratories, Inc. 91541Srgrimes * 101541Srgrimes * Redistribution and use in source and binary forms, with or without 111541Srgrimes * modification, are permitted provided that the following conditions 121541Srgrimes * are met: 131541Srgrimes * 1. Redistributions of source code must retain the above copyright 141541Srgrimes * notice, this list of conditions and the following disclaimer. 151541Srgrimes * 2. Redistributions in binary form must reproduce the above copyright 161541Srgrimes * notice, this list of conditions and the following disclaimer in the 171541Srgrimes * documentation and/or other materials provided with the distribution. 181541Srgrimes * 4. Neither the name of the University nor the names of its contributors 191541Srgrimes * may be used to endorse or promote products derived from this software 201541Srgrimes * without specific prior written permission. 211541Srgrimes * 221541Srgrimes * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 231541Srgrimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 241541Srgrimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 251541Srgrimes * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 261541Srgrimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 271541Srgrimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 281541Srgrimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 291541Srgrimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 301541Srgrimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 311541Srgrimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 321541Srgrimes * SUCH DAMAGE. 331541Srgrimes * 3444510Swollman * From: @(#)kern_clock.c 8.5 (Berkeley) 1/21/94 351541Srgrimes */ 361541Srgrimes 37116182Sobrien#include <sys/cdefs.h> 38116182Sobrien__FBSDID("$FreeBSD: head/sys/kern/kern_timeout.c 127969 2004-04-06 23:08:49Z cperciva $"); 39116182Sobrien 401541Srgrimes#include <sys/param.h> 411541Srgrimes#include <sys/systm.h> 4233392Sphk#include <sys/callout.h> 43127969Scperciva#include <sys/condvar.h> 441541Srgrimes#include <sys/kernel.h> 4574914Sjhb#include <sys/lock.h> 4668840Sjhb#include <sys/mutex.h> 47115810Sphk#include <sys/sysctl.h> 481541Srgrimes 49115810Sphkstatic int avg_depth; 50115810SphkSYSCTL_INT(_debug, OID_AUTO, to_avg_depth, CTLFLAG_RD, &avg_depth, 0, 51115810Sphk "Average number of items examined per softclock call. Units = 1/1000"); 52115810Sphkstatic int avg_gcalls; 53115810SphkSYSCTL_INT(_debug, OID_AUTO, to_avg_gcalls, CTLFLAG_RD, &avg_gcalls, 0, 54115810Sphk "Average number of Giant callouts made per softclock call. Units = 1/1000"); 55115810Sphkstatic int avg_mpcalls; 56115810SphkSYSCTL_INT(_debug, OID_AUTO, to_avg_mpcalls, CTLFLAG_RD, &avg_mpcalls, 0, 57115810Sphk "Average number of MP callouts made per softclock call. Units = 1/1000"); 5833392Sphk/* 5933392Sphk * TODO: 6033392Sphk * allocate more timeout table slots when table overflows. 6133392Sphk */ 6233392Sphk 6333392Sphk/* Exported to machdep.c and/or kern_clock.c. */ 6429680Sgibbsstruct callout *callout; 6529680Sgibbsstruct callout_list callfree; 6629680Sgibbsint callwheelsize, callwheelbits, callwheelmask; 6729680Sgibbsstruct callout_tailq *callwheel; 6833392Sphkint softticks; /* Like ticks, but for softclock(). */ 69116606Sphkstruct mtx callout_lock; 70122585Smckusick#ifdef DIAGNOSTIC 71122761Sphkstruct mtx dont_sleep_in_callout; 72122585Smckusick#endif 732112Swollman 7429680Sgibbsstatic struct callout *nextsoftcheck; /* Next callout to be checked. */ 75127969Scperciva/* 76127969Scperciva * Locked by callout_lock: 77127969Scperciva * curr_callout - If a callout is in progress, it is curr_callout. 78127969Scperciva * If curr_callout is non-NULL, threads waiting on 79127969Scperciva * callout_wait will be woken up as soon as the 80127969Scperciva * relevant callout completes. 81127969Scperciva * wakeup_needed - If a thread is waiting on callout_wait, then 82127969Scperciva * wakeup_needed is nonzero. Increased only when 83127969Scperciva * cutt_callout is non-NULL. 84127969Scperciva * wakeup_ctr - Incremented every time a thread wants to wait 85127969Scperciva * for a callout to complete. Modified only when 86127969Scperciva * curr_callout is non-NULL. 87127969Scperciva */ 88127969Scpercivastatic struct callout *curr_callout; 89127969Scpercivastatic int wakeup_needed; 90127969Scpercivastatic int wakeup_ctr; 91127969Scperciva/* 92127969Scperciva * Locked by callout_wait_lock: 93127969Scperciva * callout_wait - If wakeup_needed is set, callout_wait will be 94127969Scperciva * triggered after the current callout finishes. 95127969Scperciva * wakeup_done_ctr - Set to the current value of wakeup_ctr after 96127969Scperciva * callout_wait is triggered. 97127969Scperciva */ 98127969Scpercivastatic struct mtx callout_wait_lock; 99127969Scpercivastatic struct cv callout_wait; 100127969Scpercivastatic int wakeup_done_ctr; 1011541Srgrimes 1021541Srgrimes/* 10382127Sdillon * kern_timeout_callwheel_alloc() - kernel low level callwheel initialization 10482127Sdillon * 10582127Sdillon * This code is called very early in the kernel initialization sequence, 10682127Sdillon * and may be called more then once. 10782127Sdillon */ 10882127Sdilloncaddr_t 10982127Sdillonkern_timeout_callwheel_alloc(caddr_t v) 11082127Sdillon{ 11182127Sdillon /* 11282127Sdillon * Calculate callout wheel size 11382127Sdillon */ 11482127Sdillon for (callwheelsize = 1, callwheelbits = 0; 11582127Sdillon callwheelsize < ncallout; 11682127Sdillon callwheelsize <<= 1, ++callwheelbits) 11782127Sdillon ; 11882127Sdillon callwheelmask = callwheelsize - 1; 11982127Sdillon 12082127Sdillon callout = (struct callout *)v; 12182127Sdillon v = (caddr_t)(callout + ncallout); 12282127Sdillon callwheel = (struct callout_tailq *)v; 12382127Sdillon v = (caddr_t)(callwheel + callwheelsize); 12482127Sdillon return(v); 12582127Sdillon} 12682127Sdillon 12782127Sdillon/* 12882127Sdillon * kern_timeout_callwheel_init() - initialize previously reserved callwheel 12982127Sdillon * space. 13082127Sdillon * 13182127Sdillon * This code is called just once, after the space reserved for the 13282127Sdillon * callout wheel has been finalized. 13382127Sdillon */ 13482127Sdillonvoid 13582127Sdillonkern_timeout_callwheel_init(void) 13682127Sdillon{ 13782127Sdillon int i; 13882127Sdillon 13982127Sdillon SLIST_INIT(&callfree); 14082127Sdillon for (i = 0; i < ncallout; i++) { 14182127Sdillon callout_init(&callout[i], 0); 14282127Sdillon callout[i].c_flags = CALLOUT_LOCAL_ALLOC; 14382127Sdillon SLIST_INSERT_HEAD(&callfree, &callout[i], c_links.sle); 14482127Sdillon } 14582127Sdillon for (i = 0; i < callwheelsize; i++) { 14682127Sdillon TAILQ_INIT(&callwheel[i]); 14782127Sdillon } 14893818Sjhb mtx_init(&callout_lock, "callout", NULL, MTX_SPIN | MTX_RECURSE); 149122585Smckusick#ifdef DIAGNOSTIC 150122761Sphk mtx_init(&dont_sleep_in_callout, "dont_sleep_in_callout", NULL, MTX_DEF); 151122585Smckusick#endif 152127969Scperciva mtx_init(&callout_wait_lock, "callout_wait_lock", NULL, MTX_DEF); 153127969Scperciva cv_init(&callout_wait, "callout_wait"); 154127969Scperciva curr_callout = NULL; 155127969Scperciva wakeup_needed = 0; 156127969Scperciva wakeup_ctr = 0; 157127969Scperciva wakeup_done_ctr = 0; 15882127Sdillon} 15982127Sdillon 16082127Sdillon/* 16129680Sgibbs * The callout mechanism is based on the work of Adam M. Costello and 16229680Sgibbs * George Varghese, published in a technical report entitled "Redesigning 16329680Sgibbs * the BSD Callout and Timer Facilities" and modified slightly for inclusion 16429680Sgibbs * in FreeBSD by Justin T. Gibbs. The original work on the data structures 16529680Sgibbs * used in this implementation was published by G.Varghese and A. Lauck in 16629680Sgibbs * the paper "Hashed and Hierarchical Timing Wheels: Data Structures for 16729680Sgibbs * the Efficient Implementation of a Timer Facility" in the Proceedings of 16829680Sgibbs * the 11th ACM Annual Symposium on Operating Systems Principles, 16929680Sgibbs * Austin, Texas Nov 1987. 17029680Sgibbs */ 17132388Sphk 17229680Sgibbs/* 1731541Srgrimes * Software (low priority) clock interrupt. 1741541Srgrimes * Run periodic events from timeout queue. 1751541Srgrimes */ 1761541Srgrimesvoid 17767551Sjhbsoftclock(void *dummy) 1781541Srgrimes{ 179102936Sphk struct callout *c; 180102936Sphk struct callout_tailq *bucket; 181102936Sphk int curticks; 182102936Sphk int steps; /* #steps since we last allowed interrupts */ 183115810Sphk int depth; 184115810Sphk int mpcalls; 185115810Sphk int gcalls; 186127969Scperciva int wakeup_cookie; 187122585Smckusick#ifdef DIAGNOSTIC 188122585Smckusick struct bintime bt1, bt2; 189122585Smckusick struct timespec ts2; 190122585Smckusick static uint64_t maxdt = 36893488147419102LL; /* 2 msec */ 191123254Sphk static timeout_t *lastfunc; 192122585Smckusick#endif 1931541Srgrimes 19433392Sphk#ifndef MAX_SOFTCLOCK_STEPS 19533392Sphk#define MAX_SOFTCLOCK_STEPS 100 /* Maximum allowed value of steps. */ 19633392Sphk#endif /* MAX_SOFTCLOCK_STEPS */ 19729680Sgibbs 198115810Sphk mpcalls = 0; 199115810Sphk gcalls = 0; 200115810Sphk depth = 0; 20129680Sgibbs steps = 0; 20272200Sbmilekic mtx_lock_spin(&callout_lock); 20329680Sgibbs while (softticks != ticks) { 20429805Sgibbs softticks++; 20529805Sgibbs /* 20629805Sgibbs * softticks may be modified by hard clock, so cache 20729805Sgibbs * it while we work on a given bucket. 20829805Sgibbs */ 20929805Sgibbs curticks = softticks; 21029805Sgibbs bucket = &callwheel[curticks & callwheelmask]; 21129805Sgibbs c = TAILQ_FIRST(bucket); 21229680Sgibbs while (c) { 213115810Sphk depth++; 21429805Sgibbs if (c->c_time != curticks) { 21529680Sgibbs c = TAILQ_NEXT(c, c_links.tqe); 21629680Sgibbs ++steps; 21729680Sgibbs if (steps >= MAX_SOFTCLOCK_STEPS) { 21829680Sgibbs nextsoftcheck = c; 21929805Sgibbs /* Give interrupts a chance. */ 22072200Sbmilekic mtx_unlock_spin(&callout_lock); 22181370Sjhb ; /* nothing */ 22272200Sbmilekic mtx_lock_spin(&callout_lock); 22329680Sgibbs c = nextsoftcheck; 22429680Sgibbs steps = 0; 22529680Sgibbs } 22629680Sgibbs } else { 22729680Sgibbs void (*c_func)(void *); 22829680Sgibbs void *c_arg; 22968889Sjake int c_flags; 23029680Sgibbs 23129680Sgibbs nextsoftcheck = TAILQ_NEXT(c, c_links.tqe); 23229805Sgibbs TAILQ_REMOVE(bucket, c, c_links.tqe); 23329680Sgibbs c_func = c->c_func; 23429680Sgibbs c_arg = c->c_arg; 23568889Sjake c_flags = c->c_flags; 23629680Sgibbs c->c_func = NULL; 23744510Swollman if (c->c_flags & CALLOUT_LOCAL_ALLOC) { 23844510Swollman c->c_flags = CALLOUT_LOCAL_ALLOC; 23944510Swollman SLIST_INSERT_HEAD(&callfree, c, 24044510Swollman c_links.sle); 24144510Swollman } else { 24244510Swollman c->c_flags = 24350673Sjlemon (c->c_flags & ~CALLOUT_PENDING); 24444510Swollman } 245127969Scperciva curr_callout = c; 24672200Sbmilekic mtx_unlock_spin(&callout_lock); 247115810Sphk if (!(c_flags & CALLOUT_MPSAFE)) { 24872200Sbmilekic mtx_lock(&Giant); 249115810Sphk gcalls++; 250115810Sphk } else { 251115810Sphk mpcalls++; 252115810Sphk } 253122585Smckusick#ifdef DIAGNOSTIC 254122585Smckusick binuptime(&bt1); 255122761Sphk mtx_lock(&dont_sleep_in_callout); 256122585Smckusick#endif 25729680Sgibbs c_func(c_arg); 258122585Smckusick#ifdef DIAGNOSTIC 259122761Sphk mtx_unlock(&dont_sleep_in_callout); 260122585Smckusick binuptime(&bt2); 261122585Smckusick bintime_sub(&bt2, &bt1); 262122585Smckusick if (bt2.frac > maxdt) { 263123254Sphk if (lastfunc != c_func || 264123254Sphk bt2.frac > maxdt * 2) { 265123254Sphk bintime2timespec(&bt2, &ts2); 266123254Sphk printf( 267123254Sphk "Expensive timeout(9) function: %p(%p) %jd.%09ld s\n", 268123254Sphk c_func, c_arg, 269123254Sphk (intmax_t)ts2.tv_sec, 270123254Sphk ts2.tv_nsec); 271123254Sphk } 272122585Smckusick maxdt = bt2.frac; 273123254Sphk lastfunc = c_func; 274122585Smckusick } 275122585Smckusick#endif 27668889Sjake if (!(c_flags & CALLOUT_MPSAFE)) 27772200Sbmilekic mtx_unlock(&Giant); 27872200Sbmilekic mtx_lock_spin(&callout_lock); 279127969Scperciva curr_callout = NULL; 280127969Scperciva if (wakeup_needed) { 281127969Scperciva /* 282127969Scperciva * There might be someone waiting 283127969Scperciva * for the callout to complete. 284127969Scperciva */ 285127969Scperciva wakeup_cookie = wakeup_ctr; 286127969Scperciva mtx_unlock_spin(&callout_lock); 287127969Scperciva mtx_lock(&callout_wait_lock); 288127969Scperciva cv_broadcast(&callout_wait); 289127969Scperciva wakeup_done_ctr = wakeup_cookie; 290127969Scperciva mtx_unlock(&callout_wait_lock); 291127969Scperciva mtx_lock_spin(&callout_lock); 292127969Scperciva wakeup_needed = 0; 293127969Scperciva }; 29429680Sgibbs steps = 0; 29529680Sgibbs c = nextsoftcheck; 29629680Sgibbs } 29729680Sgibbs } 2981541Srgrimes } 299115810Sphk avg_depth += (depth * 1000 - avg_depth) >> 8; 300115810Sphk avg_mpcalls += (mpcalls * 1000 - avg_mpcalls) >> 8; 301115810Sphk avg_gcalls += (gcalls * 1000 - avg_gcalls) >> 8; 30229680Sgibbs nextsoftcheck = NULL; 30372200Sbmilekic mtx_unlock_spin(&callout_lock); 3041541Srgrimes} 3051541Srgrimes 3061541Srgrimes/* 3071541Srgrimes * timeout -- 3081541Srgrimes * Execute a function after a specified length of time. 3091541Srgrimes * 3101541Srgrimes * untimeout -- 3111541Srgrimes * Cancel previous timeout function call. 3121541Srgrimes * 31329680Sgibbs * callout_handle_init -- 31429680Sgibbs * Initialize a handle so that using it with untimeout is benign. 31529680Sgibbs * 3161541Srgrimes * See AT&T BCI Driver Reference Manual for specification. This 31729680Sgibbs * implementation differs from that one in that although an 31829680Sgibbs * identification value is returned from timeout, the original 31929680Sgibbs * arguments to timeout as well as the identifier are used to 32029680Sgibbs * identify entries for untimeout. 3211541Srgrimes */ 32229680Sgibbsstruct callout_handle 32329680Sgibbstimeout(ftn, arg, to_ticks) 32433824Sbde timeout_t *ftn; 3251541Srgrimes void *arg; 32669147Sjlemon int to_ticks; 3271541Srgrimes{ 32829680Sgibbs struct callout *new; 32929680Sgibbs struct callout_handle handle; 3301541Srgrimes 33172200Sbmilekic mtx_lock_spin(&callout_lock); 3321541Srgrimes 3331541Srgrimes /* Fill in the next free callout structure. */ 33429680Sgibbs new = SLIST_FIRST(&callfree); 33529680Sgibbs if (new == NULL) 33629680Sgibbs /* XXX Attempt to malloc first */ 3371541Srgrimes panic("timeout table full"); 33829680Sgibbs SLIST_REMOVE_HEAD(&callfree, c_links.sle); 33944510Swollman 34044510Swollman callout_reset(new, to_ticks, ftn, arg); 3411541Srgrimes 34244510Swollman handle.callout = new; 34372200Sbmilekic mtx_unlock_spin(&callout_lock); 34429680Sgibbs return (handle); 3451541Srgrimes} 3461541Srgrimes 3471541Srgrimesvoid 34829680Sgibbsuntimeout(ftn, arg, handle) 34933824Sbde timeout_t *ftn; 3501541Srgrimes void *arg; 35129680Sgibbs struct callout_handle handle; 3521541Srgrimes{ 3531541Srgrimes 35429680Sgibbs /* 35529680Sgibbs * Check for a handle that was initialized 35629680Sgibbs * by callout_handle_init, but never used 35729680Sgibbs * for a real timeout. 35829680Sgibbs */ 35929680Sgibbs if (handle.callout == NULL) 36029680Sgibbs return; 36129680Sgibbs 36272200Sbmilekic mtx_lock_spin(&callout_lock); 36344510Swollman if (handle.callout->c_func == ftn && handle.callout->c_arg == arg) 36444510Swollman callout_stop(handle.callout); 36572200Sbmilekic mtx_unlock_spin(&callout_lock); 3661541Srgrimes} 3671541Srgrimes 36824101Sbdevoid 36929680Sgibbscallout_handle_init(struct callout_handle *handle) 37029680Sgibbs{ 37129680Sgibbs handle->callout = NULL; 37229680Sgibbs} 37329680Sgibbs 37444510Swollman/* 37544510Swollman * New interface; clients allocate their own callout structures. 37644510Swollman * 37744510Swollman * callout_reset() - establish or change a timeout 37844510Swollman * callout_stop() - disestablish a timeout 37944510Swollman * callout_init() - initialize a callout structure so that it can 38044510Swollman * safely be passed to callout_reset() and callout_stop() 38144510Swollman * 38250673Sjlemon * <sys/callout.h> defines three convenience macros: 38344510Swollman * 38450673Sjlemon * callout_active() - returns truth if callout has not been serviced 38550673Sjlemon * callout_pending() - returns truth if callout is still waiting for timeout 38650673Sjlemon * callout_deactivate() - marks the callout as having been serviced 38744510Swollman */ 38844510Swollmanvoid 38969147Sjlemoncallout_reset(c, to_ticks, ftn, arg) 39044510Swollman struct callout *c; 39144510Swollman int to_ticks; 39292723Salfred void (*ftn)(void *); 39344510Swollman void *arg; 39444510Swollman{ 39544510Swollman 39672200Sbmilekic mtx_lock_spin(&callout_lock); 397127969Scperciva 398127969Scperciva if (c == curr_callout && wakeup_needed) { 399127969Scperciva /* 400127969Scperciva * We're being asked to reschedule a callout which is 401127969Scperciva * currently in progress, and someone has called 402127969Scperciva * callout_drain to kill that callout. Don't reschedule. 403127969Scperciva */ 404127969Scperciva mtx_unlock_spin(&callout_lock); 405127969Scperciva return; 406127969Scperciva }; 407127969Scperciva 40844510Swollman if (c->c_flags & CALLOUT_PENDING) 40944510Swollman callout_stop(c); 41044510Swollman 41144510Swollman /* 41281370Sjhb * We could unlock callout_lock here and lock it again before the 41381370Sjhb * TAILQ_INSERT_TAIL, but there's no point since doing this setup 41481370Sjhb * doesn't take much time. 41544510Swollman */ 41644510Swollman if (to_ticks <= 0) 41744510Swollman to_ticks = 1; 41844510Swollman 41944510Swollman c->c_arg = arg; 42069147Sjlemon c->c_flags |= (CALLOUT_ACTIVE | CALLOUT_PENDING); 42144510Swollman c->c_func = ftn; 42244510Swollman c->c_time = ticks + to_ticks; 42344510Swollman TAILQ_INSERT_TAIL(&callwheel[c->c_time & callwheelmask], 42444510Swollman c, c_links.tqe); 42572200Sbmilekic mtx_unlock_spin(&callout_lock); 42644510Swollman} 42744510Swollman 428127969Scperciva/* For binary compatibility */ 429127969Scperciva#undef callout_stop 43081481Sjhbint 43144510Swollmancallout_stop(c) 43244510Swollman struct callout *c; 43344510Swollman{ 43444510Swollman 435127969Scperciva return(_callout_stop_safe(c, 0)); 436127969Scperciva} 437127969Scperciva 438127969Scpercivaint 439127969Scperciva_callout_stop_safe(c, safe) 440127969Scperciva struct callout *c; 441127969Scperciva int safe; 442127969Scperciva{ 443127969Scperciva int wakeup_cookie; 444127969Scperciva 44572200Sbmilekic mtx_lock_spin(&callout_lock); 44644510Swollman /* 44744510Swollman * Don't attempt to delete a callout that's not on the queue. 44844510Swollman */ 44944510Swollman if (!(c->c_flags & CALLOUT_PENDING)) { 45050673Sjlemon c->c_flags &= ~CALLOUT_ACTIVE; 451127969Scperciva if (c == curr_callout && safe) { 452127969Scperciva /* We need to wait until the callout is finished */ 453127969Scperciva wakeup_needed = 1; 454127969Scperciva wakeup_cookie = wakeup_ctr++; 455127969Scperciva mtx_unlock_spin(&callout_lock); 456127969Scperciva mtx_lock(&callout_wait_lock); 457127969Scperciva /* 458127969Scperciva * Check to make sure that softclock() didn't 459127969Scperciva * do the wakeup in between our dropping 460127969Scperciva * callout_lock and picking up callout_wait_lock 461127969Scperciva */ 462127969Scperciva if (wakeup_cookie - wakeup_done_ctr > 0) 463127969Scperciva cv_wait(&callout_wait, &callout_wait_lock); 464127969Scperciva 465127969Scperciva mtx_unlock(&callout_wait_lock); 466127969Scperciva } else 467127969Scperciva mtx_unlock_spin(&callout_lock); 46881481Sjhb return (0); 46944510Swollman } 47050673Sjlemon c->c_flags &= ~(CALLOUT_ACTIVE | CALLOUT_PENDING); 47144510Swollman 47244510Swollman if (nextsoftcheck == c) { 47344510Swollman nextsoftcheck = TAILQ_NEXT(c, c_links.tqe); 47444510Swollman } 47544510Swollman TAILQ_REMOVE(&callwheel[c->c_time & callwheelmask], c, c_links.tqe); 47644510Swollman c->c_func = NULL; 47744510Swollman 47844510Swollman if (c->c_flags & CALLOUT_LOCAL_ALLOC) { 47944510Swollman SLIST_INSERT_HEAD(&callfree, c, c_links.sle); 48044510Swollman } 48172200Sbmilekic mtx_unlock_spin(&callout_lock); 48281481Sjhb return (1); 48344510Swollman} 48444510Swollman 48544510Swollmanvoid 48669147Sjlemoncallout_init(c, mpsafe) 48744510Swollman struct callout *c; 48869147Sjlemon int mpsafe; 48944510Swollman{ 49044527Swollman bzero(c, sizeof *c); 49169147Sjlemon if (mpsafe) 49269147Sjlemon c->c_flags |= CALLOUT_MPSAFE; 49344510Swollman} 49444510Swollman 49531950Snate#ifdef APM_FIXUP_CALLTODO 49631950Snate/* 49731950Snate * Adjust the kernel calltodo timeout list. This routine is used after 49831950Snate * an APM resume to recalculate the calltodo timer list values with the 49931950Snate * number of hz's we have been sleeping. The next hardclock() will detect 50031950Snate * that there are fired timers and run softclock() to execute them. 50131950Snate * 50231950Snate * Please note, I have not done an exhaustive analysis of what code this 50331950Snate * might break. I am motivated to have my select()'s and alarm()'s that 50431950Snate * have expired during suspend firing upon resume so that the applications 50531950Snate * which set the timer can do the maintanence the timer was for as close 50631950Snate * as possible to the originally intended time. Testing this code for a 50731950Snate * week showed that resuming from a suspend resulted in 22 to 25 timers 50831950Snate * firing, which seemed independant on whether the suspend was 2 hours or 50931950Snate * 2 days. Your milage may vary. - Ken Key <key@cs.utk.edu> 51031950Snate */ 51131950Snatevoid 51231950Snateadjust_timeout_calltodo(time_change) 51331950Snate struct timeval *time_change; 51431950Snate{ 51531950Snate register struct callout *p; 51631950Snate unsigned long delta_ticks; 51731950Snate 51831950Snate /* 51931950Snate * How many ticks were we asleep? 52036127Sbde * (stolen from tvtohz()). 52131950Snate */ 52231950Snate 52331950Snate /* Don't do anything */ 52431950Snate if (time_change->tv_sec < 0) 52531950Snate return; 52631950Snate else if (time_change->tv_sec <= LONG_MAX / 1000000) 52731950Snate delta_ticks = (time_change->tv_sec * 1000000 + 52831950Snate time_change->tv_usec + (tick - 1)) / tick + 1; 52931950Snate else if (time_change->tv_sec <= LONG_MAX / hz) 53031950Snate delta_ticks = time_change->tv_sec * hz + 53131950Snate (time_change->tv_usec + (tick - 1)) / tick + 1; 53231950Snate else 53331950Snate delta_ticks = LONG_MAX; 53431950Snate 53531950Snate if (delta_ticks > INT_MAX) 53631950Snate delta_ticks = INT_MAX; 53731950Snate 53831950Snate /* 53931950Snate * Now rip through the timer calltodo list looking for timers 54031950Snate * to expire. 54131950Snate */ 54231950Snate 54331950Snate /* don't collide with softclock() */ 54472200Sbmilekic mtx_lock_spin(&callout_lock); 54531950Snate for (p = calltodo.c_next; p != NULL; p = p->c_next) { 54631950Snate p->c_time -= delta_ticks; 54731950Snate 54831950Snate /* Break if the timer had more time on it than delta_ticks */ 54931950Snate if (p->c_time > 0) 55031950Snate break; 55131950Snate 55231950Snate /* take back the ticks the timer didn't use (p->c_time <= 0) */ 55331950Snate delta_ticks = -p->c_time; 55431950Snate } 55572200Sbmilekic mtx_unlock_spin(&callout_lock); 55631950Snate 55731950Snate return; 55831950Snate} 55931950Snate#endif /* APM_FIXUP_CALLTODO */ 560