kern_timeout.c revision 242402
11541Srgrimes/*- 21541Srgrimes * Copyright (c) 1982, 1986, 1991, 1993 31541Srgrimes * The Regents of the University of California. All rights reserved. 41541Srgrimes * (c) UNIX System Laboratories, Inc. 51541Srgrimes * All or some portions of this file are derived from material licensed 61541Srgrimes * to the University of California by American Telephone and Telegraph 71541Srgrimes * Co. or Unix System Laboratories, Inc. and are reproduced herein with 81541Srgrimes * the permission of UNIX System Laboratories, Inc. 91541Srgrimes * 101541Srgrimes * Redistribution and use in source and binary forms, with or without 111541Srgrimes * modification, are permitted provided that the following conditions 121541Srgrimes * are met: 131541Srgrimes * 1. Redistributions of source code must retain the above copyright 141541Srgrimes * notice, this list of conditions and the following disclaimer. 151541Srgrimes * 2. Redistributions in binary form must reproduce the above copyright 161541Srgrimes * notice, this list of conditions and the following disclaimer in the 171541Srgrimes * documentation and/or other materials provided with the distribution. 181541Srgrimes * 4. Neither the name of the University nor the names of its contributors 191541Srgrimes * may be used to endorse or promote products derived from this software 201541Srgrimes * without specific prior written permission. 211541Srgrimes * 221541Srgrimes * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 231541Srgrimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 241541Srgrimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 251541Srgrimes * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 261541Srgrimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 271541Srgrimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 281541Srgrimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 291541Srgrimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 301541Srgrimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 311541Srgrimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 321541Srgrimes * SUCH DAMAGE. 331541Srgrimes * 3444510Swollman * From: @(#)kern_clock.c 8.5 (Berkeley) 1/21/94 351541Srgrimes */ 361541Srgrimes 37116182Sobrien#include <sys/cdefs.h> 38116182Sobrien__FBSDID("$FreeBSD: head/sys/kern/kern_timeout.c 242402 2012-10-31 18:07:18Z attilio $"); 39116182Sobrien 40187664Srwatson#include "opt_kdtrace.h" 41187664Srwatson 421541Srgrimes#include <sys/param.h> 431541Srgrimes#include <sys/systm.h> 44177859Sjeff#include <sys/bus.h> 4533392Sphk#include <sys/callout.h> 46127969Scperciva#include <sys/condvar.h> 47177859Sjeff#include <sys/interrupt.h> 481541Srgrimes#include <sys/kernel.h> 49133229Srwatson#include <sys/ktr.h> 5074914Sjhb#include <sys/lock.h> 51177859Sjeff#include <sys/malloc.h> 5268840Sjhb#include <sys/mutex.h> 53150188Sjhb#include <sys/proc.h> 54187664Srwatson#include <sys/sdt.h> 55171053Sattilio#include <sys/sleepqueue.h> 56115810Sphk#include <sys/sysctl.h> 57177859Sjeff#include <sys/smp.h> 581541Srgrimes 59220456Sattilio#ifdef SMP 60220456Sattilio#include <machine/cpu.h> 61220456Sattilio#endif 62220456Sattilio 63187664SrwatsonSDT_PROVIDER_DEFINE(callout_execute); 64211616SrpauloSDT_PROBE_DEFINE(callout_execute, kernel, , callout_start, callout-start); 65187664SrwatsonSDT_PROBE_ARGTYPE(callout_execute, kernel, , callout_start, 0, 66187664Srwatson "struct callout *"); 67211616SrpauloSDT_PROBE_DEFINE(callout_execute, kernel, , callout_end, callout-end); 68187664SrwatsonSDT_PROBE_ARGTYPE(callout_execute, kernel, , callout_end, 0, 69187664Srwatson "struct callout *"); 70187664Srwatson 71115810Sphkstatic int avg_depth; 72115810SphkSYSCTL_INT(_debug, OID_AUTO, to_avg_depth, CTLFLAG_RD, &avg_depth, 0, 73115810Sphk "Average number of items examined per softclock call. Units = 1/1000"); 74115810Sphkstatic int avg_gcalls; 75115810SphkSYSCTL_INT(_debug, OID_AUTO, to_avg_gcalls, CTLFLAG_RD, &avg_gcalls, 0, 76115810Sphk "Average number of Giant callouts made per softclock call. Units = 1/1000"); 77173760Sattiliostatic int avg_lockcalls; 78173760SattilioSYSCTL_INT(_debug, OID_AUTO, to_avg_lockcalls, CTLFLAG_RD, &avg_lockcalls, 0, 79173760Sattilio "Average number of lock callouts made per softclock call. Units = 1/1000"); 80115810Sphkstatic int avg_mpcalls; 81115810SphkSYSCTL_INT(_debug, OID_AUTO, to_avg_mpcalls, CTLFLAG_RD, &avg_mpcalls, 0, 82115810Sphk "Average number of MP callouts made per softclock call. Units = 1/1000"); 8333392Sphk/* 8433392Sphk * TODO: 8533392Sphk * allocate more timeout table slots when table overflows. 8633392Sphk */ 8729680Sgibbsint callwheelsize, callwheelbits, callwheelmask; 882112Swollman 89200510Sluigi/* 90220456Sattilio * The callout cpu migration entity represents informations necessary for 91220456Sattilio * describing the migrating callout to the new callout cpu. 92220456Sattilio * The cached informations are very important for deferring migration when 93220456Sattilio * the migrating callout is already running. 94220456Sattilio */ 95220456Sattiliostruct cc_mig_ent { 96220456Sattilio#ifdef SMP 97220456Sattilio void (*ce_migration_func)(void *); 98220456Sattilio void *ce_migration_arg; 99220456Sattilio int ce_migration_cpu; 100220456Sattilio int ce_migration_ticks; 101220456Sattilio#endif 102220456Sattilio}; 103220456Sattilio 104220456Sattilio/* 105200510Sluigi * There is one struct callout_cpu per cpu, holding all relevant 106200510Sluigi * state for the callout processing thread on the individual CPU. 107200510Sluigi * In particular: 108200510Sluigi * cc_ticks is incremented once per tick in callout_cpu(). 109200510Sluigi * It tracks the global 'ticks' but in a way that the individual 110200510Sluigi * threads should not worry about races in the order in which 111200510Sluigi * hardclock() and hardclock_cpu() run on the various CPUs. 112200510Sluigi * cc_softclock is advanced in callout_cpu() to point to the 113200510Sluigi * first entry in cc_callwheel that may need handling. In turn, 114200510Sluigi * a softclock() is scheduled so it can serve the various entries i 115200510Sluigi * such that cc_softclock <= i <= cc_ticks . 116200510Sluigi * XXX maybe cc_softclock and cc_ticks should be volatile ? 117200510Sluigi * 118200510Sluigi * cc_ticks is also used in callout_reset_cpu() to determine 119200510Sluigi * when the callout should be served. 120200510Sluigi */ 121177859Sjeffstruct callout_cpu { 122242402Sattilio struct mtx_padalign cc_lock; 123242402Sattilio struct cc_mig_ent cc_migrating_entity; 124177859Sjeff struct callout *cc_callout; 125177859Sjeff struct callout_tailq *cc_callwheel; 126177859Sjeff struct callout_list cc_callfree; 127177859Sjeff struct callout *cc_next; 128177859Sjeff struct callout *cc_curr; 129177859Sjeff void *cc_cookie; 130200510Sluigi int cc_ticks; 131177859Sjeff int cc_softticks; 132177859Sjeff int cc_cancel; 133177859Sjeff int cc_waiting; 134212541Smav int cc_firsttick; 135177859Sjeff}; 136128024Scperciva 137177859Sjeff#ifdef SMP 138220456Sattilio#define cc_migration_func cc_migrating_entity.ce_migration_func 139220456Sattilio#define cc_migration_arg cc_migrating_entity.ce_migration_arg 140220456Sattilio#define cc_migration_cpu cc_migrating_entity.ce_migration_cpu 141220456Sattilio#define cc_migration_ticks cc_migrating_entity.ce_migration_ticks 142220456Sattilio 143177859Sjeffstruct callout_cpu cc_cpu[MAXCPU]; 144220456Sattilio#define CPUBLOCK MAXCPU 145177859Sjeff#define CC_CPU(cpu) (&cc_cpu[(cpu)]) 146177859Sjeff#define CC_SELF() CC_CPU(PCPU_GET(cpuid)) 147177859Sjeff#else 148177859Sjeffstruct callout_cpu cc_cpu; 149177859Sjeff#define CC_CPU(cpu) &cc_cpu 150177859Sjeff#define CC_SELF() &cc_cpu 151177859Sjeff#endif 152177859Sjeff#define CC_LOCK(cc) mtx_lock_spin(&(cc)->cc_lock) 153177859Sjeff#define CC_UNLOCK(cc) mtx_unlock_spin(&(cc)->cc_lock) 154220456Sattilio#define CC_LOCK_ASSERT(cc) mtx_assert(&(cc)->cc_lock, MA_OWNED) 155177859Sjeff 156177859Sjeffstatic int timeout_cpu; 157212541Smavvoid (*callout_new_inserted)(int cpu, int ticks) = NULL; 158177859Sjeff 159227293Sedstatic MALLOC_DEFINE(M_CALLOUT, "callout", "Callout datastructures"); 160177859Sjeff 161139831Scperciva/** 162177859Sjeff * Locked by cc_lock: 163177859Sjeff * cc_curr - If a callout is in progress, it is curr_callout. 164155957Sjhb * If curr_callout is non-NULL, threads waiting in 165177859Sjeff * callout_drain() will be woken up as soon as the 166127969Scperciva * relevant callout completes. 167177859Sjeff * cc_cancel - Changing to 1 with both callout_lock and c_lock held 168141428Siedowse * guarantees that the current callout will not run. 169141428Siedowse * The softclock() function sets this to 0 before it 170173760Sattilio * drops callout_lock to acquire c_lock, and it calls 171155957Sjhb * the handler only if curr_cancelled is still 0 after 172173760Sattilio * c_lock is successfully acquired. 173177859Sjeff * cc_waiting - If a thread is waiting in callout_drain(), then 174155957Sjhb * callout_wait is nonzero. Set only when 175128024Scperciva * curr_callout is non-NULL. 176127969Scperciva */ 177128024Scperciva 1781541Srgrimes/* 179220456Sattilio * Resets the migration entity tied to a specific callout cpu. 180220456Sattilio */ 181220456Sattiliostatic void 182220456Sattiliocc_cme_cleanup(struct callout_cpu *cc) 183220456Sattilio{ 184220456Sattilio 185220456Sattilio#ifdef SMP 186220456Sattilio cc->cc_migration_cpu = CPUBLOCK; 187220456Sattilio cc->cc_migration_ticks = 0; 188220456Sattilio cc->cc_migration_func = NULL; 189220456Sattilio cc->cc_migration_arg = NULL; 190220456Sattilio#endif 191220456Sattilio} 192220456Sattilio 193220456Sattilio/* 194220456Sattilio * Checks if migration is requested by a specific callout cpu. 195220456Sattilio */ 196220456Sattiliostatic int 197220456Sattiliocc_cme_migrating(struct callout_cpu *cc) 198220456Sattilio{ 199220456Sattilio 200220456Sattilio#ifdef SMP 201220456Sattilio return (cc->cc_migration_cpu != CPUBLOCK); 202220456Sattilio#else 203220456Sattilio return (0); 204220456Sattilio#endif 205220456Sattilio} 206220456Sattilio 207220456Sattilio/* 20882127Sdillon * kern_timeout_callwheel_alloc() - kernel low level callwheel initialization 20982127Sdillon * 21082127Sdillon * This code is called very early in the kernel initialization sequence, 21182127Sdillon * and may be called more then once. 21282127Sdillon */ 21382127Sdilloncaddr_t 21482127Sdillonkern_timeout_callwheel_alloc(caddr_t v) 21582127Sdillon{ 216177859Sjeff struct callout_cpu *cc; 217177859Sjeff 218177859Sjeff timeout_cpu = PCPU_GET(cpuid); 219177859Sjeff cc = CC_CPU(timeout_cpu); 22082127Sdillon /* 22182127Sdillon * Calculate callout wheel size 22282127Sdillon */ 22382127Sdillon for (callwheelsize = 1, callwheelbits = 0; 22482127Sdillon callwheelsize < ncallout; 22582127Sdillon callwheelsize <<= 1, ++callwheelbits) 22682127Sdillon ; 22782127Sdillon callwheelmask = callwheelsize - 1; 22882127Sdillon 229177859Sjeff cc->cc_callout = (struct callout *)v; 230177859Sjeff v = (caddr_t)(cc->cc_callout + ncallout); 231177859Sjeff cc->cc_callwheel = (struct callout_tailq *)v; 232177859Sjeff v = (caddr_t)(cc->cc_callwheel + callwheelsize); 23382127Sdillon return(v); 23482127Sdillon} 23582127Sdillon 236177859Sjeffstatic void 237177859Sjeffcallout_cpu_init(struct callout_cpu *cc) 238177859Sjeff{ 239177859Sjeff struct callout *c; 240177859Sjeff int i; 241177859Sjeff 242177859Sjeff mtx_init(&cc->cc_lock, "callout", NULL, MTX_SPIN | MTX_RECURSE); 243177859Sjeff SLIST_INIT(&cc->cc_callfree); 244177859Sjeff for (i = 0; i < callwheelsize; i++) { 245177859Sjeff TAILQ_INIT(&cc->cc_callwheel[i]); 246177859Sjeff } 247220456Sattilio cc_cme_cleanup(cc); 248177859Sjeff if (cc->cc_callout == NULL) 249177859Sjeff return; 250177859Sjeff for (i = 0; i < ncallout; i++) { 251177859Sjeff c = &cc->cc_callout[i]; 252177859Sjeff callout_init(c, 0); 253177859Sjeff c->c_flags = CALLOUT_LOCAL_ALLOC; 254177859Sjeff SLIST_INSERT_HEAD(&cc->cc_callfree, c, c_links.sle); 255177859Sjeff } 256177859Sjeff} 257177859Sjeff 258220456Sattilio#ifdef SMP 25982127Sdillon/* 260220456Sattilio * Switches the cpu tied to a specific callout. 261220456Sattilio * The function expects a locked incoming callout cpu and returns with 262220456Sattilio * locked outcoming callout cpu. 263220456Sattilio */ 264220456Sattiliostatic struct callout_cpu * 265220456Sattiliocallout_cpu_switch(struct callout *c, struct callout_cpu *cc, int new_cpu) 266220456Sattilio{ 267220456Sattilio struct callout_cpu *new_cc; 268220456Sattilio 269220456Sattilio MPASS(c != NULL && cc != NULL); 270220456Sattilio CC_LOCK_ASSERT(cc); 271220456Sattilio 272225057Sattilio /* 273225057Sattilio * Avoid interrupts and preemption firing after the callout cpu 274225057Sattilio * is blocked in order to avoid deadlocks as the new thread 275225057Sattilio * may be willing to acquire the callout cpu lock. 276225057Sattilio */ 277220456Sattilio c->c_cpu = CPUBLOCK; 278225057Sattilio spinlock_enter(); 279220456Sattilio CC_UNLOCK(cc); 280220456Sattilio new_cc = CC_CPU(new_cpu); 281220456Sattilio CC_LOCK(new_cc); 282225057Sattilio spinlock_exit(); 283220456Sattilio c->c_cpu = new_cpu; 284220456Sattilio return (new_cc); 285220456Sattilio} 286220456Sattilio#endif 287220456Sattilio 288220456Sattilio/* 28982127Sdillon * kern_timeout_callwheel_init() - initialize previously reserved callwheel 29082127Sdillon * space. 29182127Sdillon * 29282127Sdillon * This code is called just once, after the space reserved for the 29382127Sdillon * callout wheel has been finalized. 29482127Sdillon */ 29582127Sdillonvoid 29682127Sdillonkern_timeout_callwheel_init(void) 29782127Sdillon{ 298177859Sjeff callout_cpu_init(CC_CPU(timeout_cpu)); 299177859Sjeff} 30082127Sdillon 301177859Sjeff/* 302177859Sjeff * Start standard softclock thread. 303177859Sjeff */ 304177859Sjeffstatic void 305177859Sjeffstart_softclock(void *dummy) 306177859Sjeff{ 307177859Sjeff struct callout_cpu *cc; 308177859Sjeff#ifdef SMP 309177859Sjeff int cpu; 310177859Sjeff#endif 311177859Sjeff 312177859Sjeff cc = CC_CPU(timeout_cpu); 313177859Sjeff if (swi_add(&clk_intr_event, "clock", softclock, cc, SWI_CLOCK, 314214746Sjhb INTR_MPSAFE, &cc->cc_cookie)) 315177859Sjeff panic("died while creating standard software ithreads"); 316177859Sjeff#ifdef SMP 317209059Sjhb CPU_FOREACH(cpu) { 318177859Sjeff if (cpu == timeout_cpu) 319177859Sjeff continue; 320177859Sjeff cc = CC_CPU(cpu); 321177859Sjeff if (swi_add(NULL, "clock", softclock, cc, SWI_CLOCK, 322177859Sjeff INTR_MPSAFE, &cc->cc_cookie)) 323177859Sjeff panic("died while creating standard software ithreads"); 324177859Sjeff cc->cc_callout = NULL; /* Only cpu0 handles timeout(). */ 325177859Sjeff cc->cc_callwheel = malloc( 326177859Sjeff sizeof(struct callout_tailq) * callwheelsize, M_CALLOUT, 327177859Sjeff M_WAITOK); 328177859Sjeff callout_cpu_init(cc); 32982127Sdillon } 330177859Sjeff#endif 331177859Sjeff} 332177859Sjeff 333177859SjeffSYSINIT(start_softclock, SI_SUB_SOFTINTR, SI_ORDER_FIRST, start_softclock, NULL); 334177859Sjeff 335177859Sjeffvoid 336177859Sjeffcallout_tick(void) 337177859Sjeff{ 338177859Sjeff struct callout_cpu *cc; 339180608Sjeff int need_softclock; 340180608Sjeff int bucket; 341177859Sjeff 342177859Sjeff /* 343177859Sjeff * Process callouts at a very low cpu priority, so we don't keep the 344177859Sjeff * relatively high clock interrupt priority any longer than necessary. 345177859Sjeff */ 346180608Sjeff need_softclock = 0; 347177859Sjeff cc = CC_SELF(); 348177859Sjeff mtx_lock_spin_flags(&cc->cc_lock, MTX_QUIET); 349212541Smav cc->cc_firsttick = cc->cc_ticks = ticks; 350200510Sluigi for (; (cc->cc_softticks - cc->cc_ticks) <= 0; cc->cc_softticks++) { 351180608Sjeff bucket = cc->cc_softticks & callwheelmask; 352180608Sjeff if (!TAILQ_EMPTY(&cc->cc_callwheel[bucket])) { 353180608Sjeff need_softclock = 1; 354180608Sjeff break; 355180608Sjeff } 356180608Sjeff } 357177859Sjeff mtx_unlock_spin_flags(&cc->cc_lock, MTX_QUIET); 358177859Sjeff /* 359177859Sjeff * swi_sched acquires the thread lock, so we don't want to call it 360177859Sjeff * with cc_lock held; incorrect locking order. 361177859Sjeff */ 362177859Sjeff if (need_softclock) 363177859Sjeff swi_sched(cc->cc_cookie, 0); 364177859Sjeff} 365177859Sjeff 366212541Smavint 367212603Smavcallout_tickstofirst(int limit) 368212541Smav{ 369212541Smav struct callout_cpu *cc; 370212541Smav struct callout *c; 371212541Smav struct callout_tailq *sc; 372212541Smav int curticks; 373212541Smav int skip = 1; 374212541Smav 375212541Smav cc = CC_SELF(); 376212541Smav mtx_lock_spin_flags(&cc->cc_lock, MTX_QUIET); 377212541Smav curticks = cc->cc_ticks; 378212603Smav while( skip < ncallout && skip < limit ) { 379212541Smav sc = &cc->cc_callwheel[ (curticks+skip) & callwheelmask ]; 380212541Smav /* search scanning ticks */ 381212541Smav TAILQ_FOREACH( c, sc, c_links.tqe ){ 382214597Smav if (c->c_time - curticks <= ncallout) 383212541Smav goto out; 384212541Smav } 385212541Smav skip++; 386212541Smav } 387212541Smavout: 388212541Smav cc->cc_firsttick = curticks + skip; 389212541Smav mtx_unlock_spin_flags(&cc->cc_lock, MTX_QUIET); 390212541Smav return (skip); 391212541Smav} 392212541Smav 393177859Sjeffstatic struct callout_cpu * 394177859Sjeffcallout_lock(struct callout *c) 395177859Sjeff{ 396177859Sjeff struct callout_cpu *cc; 397177859Sjeff int cpu; 398177859Sjeff 399177859Sjeff for (;;) { 400177859Sjeff cpu = c->c_cpu; 401220456Sattilio#ifdef SMP 402220456Sattilio if (cpu == CPUBLOCK) { 403220456Sattilio while (c->c_cpu == CPUBLOCK) 404220456Sattilio cpu_spinwait(); 405220456Sattilio continue; 406220456Sattilio } 407220456Sattilio#endif 408177859Sjeff cc = CC_CPU(cpu); 409177859Sjeff CC_LOCK(cc); 410177859Sjeff if (cpu == c->c_cpu) 411177859Sjeff break; 412177859Sjeff CC_UNLOCK(cc); 41382127Sdillon } 414177859Sjeff return (cc); 41582127Sdillon} 41682127Sdillon 417220456Sattiliostatic void 418220456Sattiliocallout_cc_add(struct callout *c, struct callout_cpu *cc, int to_ticks, 419220456Sattilio void (*func)(void *), void *arg, int cpu) 420220456Sattilio{ 421220456Sattilio 422220456Sattilio CC_LOCK_ASSERT(cc); 423220456Sattilio 424220456Sattilio if (to_ticks <= 0) 425220456Sattilio to_ticks = 1; 426220456Sattilio c->c_arg = arg; 427220456Sattilio c->c_flags |= (CALLOUT_ACTIVE | CALLOUT_PENDING); 428220456Sattilio c->c_func = func; 429220456Sattilio c->c_time = ticks + to_ticks; 430220456Sattilio TAILQ_INSERT_TAIL(&cc->cc_callwheel[c->c_time & callwheelmask], 431220456Sattilio c, c_links.tqe); 432220456Sattilio if ((c->c_time - cc->cc_firsttick) < 0 && 433220456Sattilio callout_new_inserted != NULL) { 434220456Sattilio cc->cc_firsttick = c->c_time; 435220456Sattilio (*callout_new_inserted)(cpu, 436220456Sattilio to_ticks + (ticks - cc->cc_ticks)); 437220456Sattilio } 438220456Sattilio} 439220456Sattilio 440234981Skibstatic void 441234981Skibcallout_cc_del(struct callout *c, struct callout_cpu *cc) 442234981Skib{ 443234981Skib 444234981Skib if (cc->cc_next == c) 445234981Skib cc->cc_next = TAILQ_NEXT(c, c_links.tqe); 446234981Skib if (c->c_flags & CALLOUT_LOCAL_ALLOC) { 447234981Skib c->c_func = NULL; 448234981Skib SLIST_INSERT_HEAD(&cc->cc_callfree, c, c_links.sle); 449234981Skib } 450234981Skib} 451234981Skib 452234981Skibstatic struct callout * 453234981Skibsoftclock_call_cc(struct callout *c, struct callout_cpu *cc, int *mpcalls, 454234981Skib int *lockcalls, int *gcalls) 455234981Skib{ 456234981Skib void (*c_func)(void *); 457234981Skib void *c_arg; 458234981Skib struct lock_class *class; 459234981Skib struct lock_object *c_lock; 460234981Skib int c_flags, sharedlock; 461234981Skib#ifdef SMP 462234981Skib struct callout_cpu *new_cc; 463234981Skib void (*new_func)(void *); 464234981Skib void *new_arg; 465234981Skib int new_cpu, new_ticks; 466234981Skib#endif 467234981Skib#ifdef DIAGNOSTIC 468234981Skib struct bintime bt1, bt2; 469234981Skib struct timespec ts2; 470234981Skib static uint64_t maxdt = 36893488147419102LL; /* 2 msec */ 471234981Skib static timeout_t *lastfunc; 472234981Skib#endif 473234981Skib 474234981Skib cc->cc_next = TAILQ_NEXT(c, c_links.tqe); 475234981Skib class = (c->c_lock != NULL) ? LOCK_CLASS(c->c_lock) : NULL; 476234981Skib sharedlock = (c->c_flags & CALLOUT_SHAREDLOCK) ? 0 : 1; 477234981Skib c_lock = c->c_lock; 478234981Skib c_func = c->c_func; 479234981Skib c_arg = c->c_arg; 480234981Skib c_flags = c->c_flags; 481234981Skib if (c->c_flags & CALLOUT_LOCAL_ALLOC) 482234981Skib c->c_flags = CALLOUT_LOCAL_ALLOC; 483234981Skib else 484234981Skib c->c_flags &= ~CALLOUT_PENDING; 485234981Skib cc->cc_curr = c; 486234981Skib cc->cc_cancel = 0; 487234981Skib CC_UNLOCK(cc); 488234981Skib if (c_lock != NULL) { 489234981Skib class->lc_lock(c_lock, sharedlock); 490234981Skib /* 491234981Skib * The callout may have been cancelled 492234981Skib * while we switched locks. 493234981Skib */ 494234981Skib if (cc->cc_cancel) { 495234981Skib class->lc_unlock(c_lock); 496234981Skib goto skip; 497234981Skib } 498234981Skib /* The callout cannot be stopped now. */ 499234981Skib cc->cc_cancel = 1; 500234981Skib 501234981Skib if (c_lock == &Giant.lock_object) { 502234981Skib (*gcalls)++; 503234981Skib CTR3(KTR_CALLOUT, "callout %p func %p arg %p", 504234981Skib c, c_func, c_arg); 505234981Skib } else { 506234981Skib (*lockcalls)++; 507234981Skib CTR3(KTR_CALLOUT, "callout lock %p func %p arg %p", 508234981Skib c, c_func, c_arg); 509234981Skib } 510234981Skib } else { 511234981Skib (*mpcalls)++; 512234981Skib CTR3(KTR_CALLOUT, "callout mpsafe %p func %p arg %p", 513234981Skib c, c_func, c_arg); 514234981Skib } 515234981Skib#ifdef DIAGNOSTIC 516234981Skib binuptime(&bt1); 517234981Skib#endif 518234981Skib THREAD_NO_SLEEPING(); 519234981Skib SDT_PROBE(callout_execute, kernel, , callout_start, c, 0, 0, 0, 0); 520234981Skib c_func(c_arg); 521234981Skib SDT_PROBE(callout_execute, kernel, , callout_end, c, 0, 0, 0, 0); 522234981Skib THREAD_SLEEPING_OK(); 523234981Skib#ifdef DIAGNOSTIC 524234981Skib binuptime(&bt2); 525234981Skib bintime_sub(&bt2, &bt1); 526234981Skib if (bt2.frac > maxdt) { 527234981Skib if (lastfunc != c_func || bt2.frac > maxdt * 2) { 528234981Skib bintime2timespec(&bt2, &ts2); 529234981Skib printf( 530234981Skib "Expensive timeout(9) function: %p(%p) %jd.%09ld s\n", 531234981Skib c_func, c_arg, (intmax_t)ts2.tv_sec, ts2.tv_nsec); 532234981Skib } 533234981Skib maxdt = bt2.frac; 534234981Skib lastfunc = c_func; 535234981Skib } 536234981Skib#endif 537234981Skib CTR1(KTR_CALLOUT, "callout %p finished", c); 538234981Skib if ((c_flags & CALLOUT_RETURNUNLOCKED) == 0) 539234981Skib class->lc_unlock(c_lock); 540234981Skibskip: 541234981Skib CC_LOCK(cc); 542234981Skib /* 543234981Skib * If the current callout is locally allocated (from 544234981Skib * timeout(9)) then put it on the freelist. 545234981Skib * 546234981Skib * Note: we need to check the cached copy of c_flags because 547234981Skib * if it was not local, then it's not safe to deref the 548234981Skib * callout pointer. 549234981Skib */ 550234981Skib if (c_flags & CALLOUT_LOCAL_ALLOC) { 551234981Skib KASSERT(c->c_flags == CALLOUT_LOCAL_ALLOC, 552234981Skib ("corrupted callout")); 553234981Skib c->c_func = NULL; 554234981Skib SLIST_INSERT_HEAD(&cc->cc_callfree, c, c_links.sle); 555234981Skib } 556234981Skib cc->cc_curr = NULL; 557234981Skib if (cc->cc_waiting) { 558234981Skib /* 559234981Skib * There is someone waiting for the 560234981Skib * callout to complete. 561234981Skib * If the callout was scheduled for 562234981Skib * migration just cancel it. 563234981Skib */ 564234981Skib if (cc_cme_migrating(cc)) 565234981Skib cc_cme_cleanup(cc); 566234981Skib cc->cc_waiting = 0; 567234981Skib CC_UNLOCK(cc); 568234981Skib wakeup(&cc->cc_waiting); 569234981Skib CC_LOCK(cc); 570234981Skib } else if (cc_cme_migrating(cc)) { 571234981Skib#ifdef SMP 572234981Skib /* 573234981Skib * If the callout was scheduled for 574234981Skib * migration just perform it now. 575234981Skib */ 576234981Skib new_cpu = cc->cc_migration_cpu; 577234981Skib new_ticks = cc->cc_migration_ticks; 578234981Skib new_func = cc->cc_migration_func; 579234981Skib new_arg = cc->cc_migration_arg; 580234981Skib cc_cme_cleanup(cc); 581234981Skib 582234981Skib /* 583234981Skib * Handle deferred callout stops 584234981Skib */ 585234981Skib if ((c->c_flags & CALLOUT_DFRMIGRATION) == 0) { 586234981Skib CTR3(KTR_CALLOUT, 587234981Skib "deferred cancelled %p func %p arg %p", 588234981Skib c, new_func, new_arg); 589234981Skib callout_cc_del(c, cc); 590234981Skib goto nextc; 591234981Skib } 592234981Skib 593234981Skib c->c_flags &= ~CALLOUT_DFRMIGRATION; 594234981Skib 595234981Skib /* 596234981Skib * It should be assert here that the 597234981Skib * callout is not destroyed but that 598234981Skib * is not easy. 599234981Skib */ 600234981Skib new_cc = callout_cpu_switch(c, cc, new_cpu); 601234981Skib callout_cc_add(c, new_cc, new_ticks, new_func, new_arg, 602234981Skib new_cpu); 603234981Skib CC_UNLOCK(new_cc); 604234981Skib CC_LOCK(cc); 605234981Skib#else 606234981Skib panic("migration should not happen"); 607234981Skib#endif 608234981Skib } 609234981Skib#ifdef SMP 610234981Skibnextc: 611234981Skib#endif 612234981Skib return (cc->cc_next); 613234981Skib} 614234981Skib 61582127Sdillon/* 61629680Sgibbs * The callout mechanism is based on the work of Adam M. Costello and 61729680Sgibbs * George Varghese, published in a technical report entitled "Redesigning 61829680Sgibbs * the BSD Callout and Timer Facilities" and modified slightly for inclusion 61929680Sgibbs * in FreeBSD by Justin T. Gibbs. The original work on the data structures 620128630Shmp * used in this implementation was published by G. Varghese and T. Lauck in 62129680Sgibbs * the paper "Hashed and Hierarchical Timing Wheels: Data Structures for 62229680Sgibbs * the Efficient Implementation of a Timer Facility" in the Proceedings of 62329680Sgibbs * the 11th ACM Annual Symposium on Operating Systems Principles, 62429680Sgibbs * Austin, Texas Nov 1987. 62529680Sgibbs */ 62632388Sphk 62729680Sgibbs/* 6281541Srgrimes * Software (low priority) clock interrupt. 6291541Srgrimes * Run periodic events from timeout queue. 6301541Srgrimes */ 6311541Srgrimesvoid 632177859Sjeffsoftclock(void *arg) 6331541Srgrimes{ 634177859Sjeff struct callout_cpu *cc; 635102936Sphk struct callout *c; 636102936Sphk struct callout_tailq *bucket; 637102936Sphk int curticks; 638102936Sphk int steps; /* #steps since we last allowed interrupts */ 639115810Sphk int depth; 640115810Sphk int mpcalls; 641173760Sattilio int lockcalls; 642115810Sphk int gcalls; 6431541Srgrimes 64433392Sphk#ifndef MAX_SOFTCLOCK_STEPS 64533392Sphk#define MAX_SOFTCLOCK_STEPS 100 /* Maximum allowed value of steps. */ 64633392Sphk#endif /* MAX_SOFTCLOCK_STEPS */ 64729680Sgibbs 648115810Sphk mpcalls = 0; 649173760Sattilio lockcalls = 0; 650115810Sphk gcalls = 0; 651115810Sphk depth = 0; 65229680Sgibbs steps = 0; 653177859Sjeff cc = (struct callout_cpu *)arg; 654177859Sjeff CC_LOCK(cc); 655200510Sluigi while (cc->cc_softticks - 1 != cc->cc_ticks) { 65629805Sgibbs /* 657177859Sjeff * cc_softticks may be modified by hard clock, so cache 65829805Sgibbs * it while we work on a given bucket. 65929805Sgibbs */ 660177859Sjeff curticks = cc->cc_softticks; 661180608Sjeff cc->cc_softticks++; 662177859Sjeff bucket = &cc->cc_callwheel[curticks & callwheelmask]; 66329805Sgibbs c = TAILQ_FIRST(bucket); 664234981Skib while (c != NULL) { 665115810Sphk depth++; 66629805Sgibbs if (c->c_time != curticks) { 66729680Sgibbs c = TAILQ_NEXT(c, c_links.tqe); 66829680Sgibbs ++steps; 66929680Sgibbs if (steps >= MAX_SOFTCLOCK_STEPS) { 670177859Sjeff cc->cc_next = c; 67129805Sgibbs /* Give interrupts a chance. */ 672177859Sjeff CC_UNLOCK(cc); 67381370Sjhb ; /* nothing */ 674177859Sjeff CC_LOCK(cc); 675177859Sjeff c = cc->cc_next; 67629680Sgibbs steps = 0; 67729680Sgibbs } 67829680Sgibbs } else { 67929805Sgibbs TAILQ_REMOVE(bucket, c, c_links.tqe); 680234981Skib c = softclock_call_cc(c, cc, &mpcalls, 681234981Skib &lockcalls, &gcalls); 68229680Sgibbs steps = 0; 68329680Sgibbs } 68429680Sgibbs } 6851541Srgrimes } 686115810Sphk avg_depth += (depth * 1000 - avg_depth) >> 8; 687115810Sphk avg_mpcalls += (mpcalls * 1000 - avg_mpcalls) >> 8; 688173760Sattilio avg_lockcalls += (lockcalls * 1000 - avg_lockcalls) >> 8; 689115810Sphk avg_gcalls += (gcalls * 1000 - avg_gcalls) >> 8; 690177859Sjeff cc->cc_next = NULL; 691177859Sjeff CC_UNLOCK(cc); 6921541Srgrimes} 6931541Srgrimes 6941541Srgrimes/* 6951541Srgrimes * timeout -- 6961541Srgrimes * Execute a function after a specified length of time. 6971541Srgrimes * 6981541Srgrimes * untimeout -- 6991541Srgrimes * Cancel previous timeout function call. 7001541Srgrimes * 70129680Sgibbs * callout_handle_init -- 70229680Sgibbs * Initialize a handle so that using it with untimeout is benign. 70329680Sgibbs * 7041541Srgrimes * See AT&T BCI Driver Reference Manual for specification. This 70529680Sgibbs * implementation differs from that one in that although an 70629680Sgibbs * identification value is returned from timeout, the original 70729680Sgibbs * arguments to timeout as well as the identifier are used to 70829680Sgibbs * identify entries for untimeout. 7091541Srgrimes */ 71029680Sgibbsstruct callout_handle 71129680Sgibbstimeout(ftn, arg, to_ticks) 71233824Sbde timeout_t *ftn; 7131541Srgrimes void *arg; 71469147Sjlemon int to_ticks; 7151541Srgrimes{ 716177859Sjeff struct callout_cpu *cc; 71729680Sgibbs struct callout *new; 71829680Sgibbs struct callout_handle handle; 7191541Srgrimes 720177859Sjeff cc = CC_CPU(timeout_cpu); 721177859Sjeff CC_LOCK(cc); 7221541Srgrimes /* Fill in the next free callout structure. */ 723177859Sjeff new = SLIST_FIRST(&cc->cc_callfree); 72429680Sgibbs if (new == NULL) 72529680Sgibbs /* XXX Attempt to malloc first */ 7261541Srgrimes panic("timeout table full"); 727177859Sjeff SLIST_REMOVE_HEAD(&cc->cc_callfree, c_links.sle); 72844510Swollman callout_reset(new, to_ticks, ftn, arg); 729177859Sjeff handle.callout = new; 730177859Sjeff CC_UNLOCK(cc); 7311541Srgrimes 73229680Sgibbs return (handle); 7331541Srgrimes} 7341541Srgrimes 7351541Srgrimesvoid 73629680Sgibbsuntimeout(ftn, arg, handle) 73733824Sbde timeout_t *ftn; 7381541Srgrimes void *arg; 73929680Sgibbs struct callout_handle handle; 7401541Srgrimes{ 741177859Sjeff struct callout_cpu *cc; 7421541Srgrimes 74329680Sgibbs /* 74429680Sgibbs * Check for a handle that was initialized 74529680Sgibbs * by callout_handle_init, but never used 74629680Sgibbs * for a real timeout. 74729680Sgibbs */ 74829680Sgibbs if (handle.callout == NULL) 74929680Sgibbs return; 75029680Sgibbs 751177859Sjeff cc = callout_lock(handle.callout); 75244510Swollman if (handle.callout->c_func == ftn && handle.callout->c_arg == arg) 75344510Swollman callout_stop(handle.callout); 754177859Sjeff CC_UNLOCK(cc); 7551541Srgrimes} 7561541Srgrimes 75724101Sbdevoid 75829680Sgibbscallout_handle_init(struct callout_handle *handle) 75929680Sgibbs{ 76029680Sgibbs handle->callout = NULL; 76129680Sgibbs} 76229680Sgibbs 76344510Swollman/* 76444510Swollman * New interface; clients allocate their own callout structures. 76544510Swollman * 76644510Swollman * callout_reset() - establish or change a timeout 76744510Swollman * callout_stop() - disestablish a timeout 76844510Swollman * callout_init() - initialize a callout structure so that it can 76944510Swollman * safely be passed to callout_reset() and callout_stop() 77044510Swollman * 77150673Sjlemon * <sys/callout.h> defines three convenience macros: 77244510Swollman * 773140487Scperciva * callout_active() - returns truth if callout has not been stopped, 774140487Scperciva * drained, or deactivated since the last time the callout was 775140487Scperciva * reset. 77650673Sjlemon * callout_pending() - returns truth if callout is still waiting for timeout 77750673Sjlemon * callout_deactivate() - marks the callout as having been serviced 77844510Swollman */ 779149879Sglebiusint 780177859Sjeffcallout_reset_on(struct callout *c, int to_ticks, void (*ftn)(void *), 781177859Sjeff void *arg, int cpu) 78244510Swollman{ 783177859Sjeff struct callout_cpu *cc; 784149879Sglebius int cancelled = 0; 78544510Swollman 786177859Sjeff /* 787177859Sjeff * Don't allow migration of pre-allocated callouts lest they 788177859Sjeff * become unbalanced. 789177859Sjeff */ 790177859Sjeff if (c->c_flags & CALLOUT_LOCAL_ALLOC) 791177859Sjeff cpu = c->c_cpu; 792177859Sjeff cc = callout_lock(c); 793177859Sjeff if (cc->cc_curr == c) { 794127969Scperciva /* 795127969Scperciva * We're being asked to reschedule a callout which is 796173760Sattilio * currently in progress. If there is a lock then we 797141428Siedowse * can cancel the callout if it has not really started. 798127969Scperciva */ 799177859Sjeff if (c->c_lock != NULL && !cc->cc_cancel) 800177859Sjeff cancelled = cc->cc_cancel = 1; 801177859Sjeff if (cc->cc_waiting) { 802141428Siedowse /* 803141428Siedowse * Someone has called callout_drain to kill this 804141428Siedowse * callout. Don't reschedule. 805141428Siedowse */ 806163246Sglebius CTR4(KTR_CALLOUT, "%s %p func %p arg %p", 807163246Sglebius cancelled ? "cancelled" : "failed to cancel", 808163246Sglebius c, c->c_func, c->c_arg); 809177859Sjeff CC_UNLOCK(cc); 810149879Sglebius return (cancelled); 811141428Siedowse } 812128024Scperciva } 813133190Scperciva if (c->c_flags & CALLOUT_PENDING) { 814177859Sjeff if (cc->cc_next == c) { 815177859Sjeff cc->cc_next = TAILQ_NEXT(c, c_links.tqe); 816133190Scperciva } 817177859Sjeff TAILQ_REMOVE(&cc->cc_callwheel[c->c_time & callwheelmask], c, 818133190Scperciva c_links.tqe); 81944510Swollman 820149879Sglebius cancelled = 1; 821177859Sjeff c->c_flags &= ~(CALLOUT_ACTIVE | CALLOUT_PENDING); 822133190Scperciva } 823220456Sattilio 824220456Sattilio#ifdef SMP 82544510Swollman /* 826220456Sattilio * If the callout must migrate try to perform it immediately. 827220456Sattilio * If the callout is currently running, just defer the migration 828220456Sattilio * to a more appropriate moment. 82944510Swollman */ 830177859Sjeff if (c->c_cpu != cpu) { 831220456Sattilio if (cc->cc_curr == c) { 832220456Sattilio cc->cc_migration_cpu = cpu; 833220456Sattilio cc->cc_migration_ticks = to_ticks; 834220456Sattilio cc->cc_migration_func = ftn; 835220456Sattilio cc->cc_migration_arg = arg; 836234952Skib c->c_flags |= CALLOUT_DFRMIGRATION; 837220456Sattilio CTR5(KTR_CALLOUT, 838220456Sattilio "migration of %p func %p arg %p in %d to %u deferred", 839220456Sattilio c, c->c_func, c->c_arg, to_ticks, cpu); 840220456Sattilio CC_UNLOCK(cc); 841220456Sattilio return (cancelled); 842220456Sattilio } 843220456Sattilio cc = callout_cpu_switch(c, cc, cpu); 844177859Sjeff } 845220456Sattilio#endif 846177859Sjeff 847220456Sattilio callout_cc_add(c, cc, to_ticks, ftn, arg, cpu); 848163246Sglebius CTR5(KTR_CALLOUT, "%sscheduled %p func %p arg %p in %d", 849163246Sglebius cancelled ? "re" : "", c, c->c_func, c->c_arg, to_ticks); 850177859Sjeff CC_UNLOCK(cc); 851149879Sglebius 852149879Sglebius return (cancelled); 85344510Swollman} 85444510Swollman 855181191Ssam/* 856181191Ssam * Common idioms that can be optimized in the future. 857181191Ssam */ 85881481Sjhbint 859181191Ssamcallout_schedule_on(struct callout *c, int to_ticks, int cpu) 860181191Ssam{ 861181191Ssam return callout_reset_on(c, to_ticks, c->c_func, c->c_arg, cpu); 862181191Ssam} 863181191Ssam 864181191Ssamint 865181191Ssamcallout_schedule(struct callout *c, int to_ticks) 866181191Ssam{ 867181191Ssam return callout_reset_on(c, to_ticks, c->c_func, c->c_arg, c->c_cpu); 868181191Ssam} 869181191Ssam 870181191Ssamint 871127969Scperciva_callout_stop_safe(c, safe) 872127969Scperciva struct callout *c; 873127969Scperciva int safe; 874127969Scperciva{ 875220456Sattilio struct callout_cpu *cc, *old_cc; 876173760Sattilio struct lock_class *class; 877173760Sattilio int use_lock, sq_locked; 878127969Scperciva 879173760Sattilio /* 880173760Sattilio * Some old subsystems don't hold Giant while running a callout_stop(), 881173760Sattilio * so just discard this check for the moment. 882173760Sattilio */ 883173760Sattilio if (!safe && c->c_lock != NULL) { 884173760Sattilio if (c->c_lock == &Giant.lock_object) 885173760Sattilio use_lock = mtx_owned(&Giant); 886173760Sattilio else { 887173760Sattilio use_lock = 1; 888173760Sattilio class = LOCK_CLASS(c->c_lock); 889173760Sattilio class->lc_assert(c->c_lock, LA_XLOCKED); 890173760Sattilio } 891173760Sattilio } else 892173760Sattilio use_lock = 0; 893141428Siedowse 894172025Sjhb sq_locked = 0; 895220456Sattilio old_cc = NULL; 896172025Sjhbagain: 897177859Sjeff cc = callout_lock(c); 898220456Sattilio 89944510Swollman /* 900220456Sattilio * If the callout was migrating while the callout cpu lock was 901220456Sattilio * dropped, just drop the sleepqueue lock and check the states 902220456Sattilio * again. 903220456Sattilio */ 904220456Sattilio if (sq_locked != 0 && cc != old_cc) { 905220456Sattilio#ifdef SMP 906220456Sattilio CC_UNLOCK(cc); 907220456Sattilio sleepq_release(&old_cc->cc_waiting); 908220456Sattilio sq_locked = 0; 909220456Sattilio old_cc = NULL; 910220456Sattilio goto again; 911220456Sattilio#else 912220456Sattilio panic("migration should not happen"); 913220456Sattilio#endif 914220456Sattilio } 915220456Sattilio 916220456Sattilio /* 917155957Sjhb * If the callout isn't pending, it's not on the queue, so 918155957Sjhb * don't attempt to remove it from the queue. We can try to 919155957Sjhb * stop it by other means however. 92044510Swollman */ 92144510Swollman if (!(c->c_flags & CALLOUT_PENDING)) { 92250673Sjlemon c->c_flags &= ~CALLOUT_ACTIVE; 923155957Sjhb 924155957Sjhb /* 925155957Sjhb * If it wasn't on the queue and it isn't the current 926155957Sjhb * callout, then we can't stop it, so just bail. 927155957Sjhb */ 928177859Sjeff if (cc->cc_curr != c) { 929163246Sglebius CTR3(KTR_CALLOUT, "failed to stop %p func %p arg %p", 930163246Sglebius c, c->c_func, c->c_arg); 931177859Sjeff CC_UNLOCK(cc); 932172025Sjhb if (sq_locked) 933177949Sjeff sleepq_release(&cc->cc_waiting); 934141428Siedowse return (0); 935141428Siedowse } 936155957Sjhb 937141428Siedowse if (safe) { 938127969Scperciva /* 939155957Sjhb * The current callout is running (or just 940155957Sjhb * about to run) and blocking is allowed, so 941155957Sjhb * just wait for the current invocation to 942155957Sjhb * finish. 943127969Scperciva */ 944177859Sjeff while (cc->cc_curr == c) { 945171053Sattilio 946171053Sattilio /* 947171053Sattilio * Use direct calls to sleepqueue interface 948171053Sattilio * instead of cv/msleep in order to avoid 949177859Sjeff * a LOR between cc_lock and sleepqueue 950171053Sattilio * chain spinlocks. This piece of code 951171053Sattilio * emulates a msleep_spin() call actually. 952172025Sjhb * 953172025Sjhb * If we already have the sleepqueue chain 954172025Sjhb * locked, then we can safely block. If we 955172025Sjhb * don't already have it locked, however, 956177859Sjeff * we have to drop the cc_lock to lock 957172025Sjhb * it. This opens several races, so we 958172025Sjhb * restart at the beginning once we have 959172025Sjhb * both locks. If nothing has changed, then 960172025Sjhb * we will end up back here with sq_locked 961172025Sjhb * set. 962171053Sattilio */ 963172025Sjhb if (!sq_locked) { 964177859Sjeff CC_UNLOCK(cc); 965177949Sjeff sleepq_lock(&cc->cc_waiting); 966172025Sjhb sq_locked = 1; 967220456Sattilio old_cc = cc; 968172025Sjhb goto again; 969172025Sjhb } 970220456Sattilio 971220456Sattilio /* 972220456Sattilio * Migration could be cancelled here, but 973220456Sattilio * as long as it is still not sure when it 974220456Sattilio * will be packed up, just let softclock() 975220456Sattilio * take care of it. 976220456Sattilio */ 977177859Sjeff cc->cc_waiting = 1; 978171053Sattilio DROP_GIANT(); 979177859Sjeff CC_UNLOCK(cc); 980177949Sjeff sleepq_add(&cc->cc_waiting, 981177859Sjeff &cc->cc_lock.lock_object, "codrain", 982171053Sattilio SLEEPQ_SLEEP, 0); 983177949Sjeff sleepq_wait(&cc->cc_waiting, 0); 984172025Sjhb sq_locked = 0; 985220456Sattilio old_cc = NULL; 986171053Sattilio 987171053Sattilio /* Reacquire locks previously released. */ 988171053Sattilio PICKUP_GIANT(); 989177859Sjeff CC_LOCK(cc); 990155957Sjhb } 991177859Sjeff } else if (use_lock && !cc->cc_cancel) { 992155957Sjhb /* 993173760Sattilio * The current callout is waiting for its 994173760Sattilio * lock which we hold. Cancel the callout 995155957Sjhb * and return. After our caller drops the 996173760Sattilio * lock, the callout will be skipped in 997155957Sjhb * softclock(). 998155957Sjhb */ 999177859Sjeff cc->cc_cancel = 1; 1000163246Sglebius CTR3(KTR_CALLOUT, "cancelled %p func %p arg %p", 1001163246Sglebius c, c->c_func, c->c_arg); 1002220456Sattilio KASSERT(!cc_cme_migrating(cc), 1003220456Sattilio ("callout wrongly scheduled for migration")); 1004177859Sjeff CC_UNLOCK(cc); 1005172025Sjhb KASSERT(!sq_locked, ("sleepqueue chain locked")); 1006141428Siedowse return (1); 1007234952Skib } else if ((c->c_flags & CALLOUT_DFRMIGRATION) != 0) { 1008234952Skib c->c_flags &= ~CALLOUT_DFRMIGRATION; 1009234952Skib CTR3(KTR_CALLOUT, "postponing stop %p func %p arg %p", 1010234952Skib c, c->c_func, c->c_arg); 1011234952Skib CC_UNLOCK(cc); 1012234952Skib return (1); 1013155957Sjhb } 1014163246Sglebius CTR3(KTR_CALLOUT, "failed to stop %p func %p arg %p", 1015163246Sglebius c, c->c_func, c->c_arg); 1016177859Sjeff CC_UNLOCK(cc); 1017172025Sjhb KASSERT(!sq_locked, ("sleepqueue chain still locked")); 101881481Sjhb return (0); 101944510Swollman } 1020172025Sjhb if (sq_locked) 1021177949Sjeff sleepq_release(&cc->cc_waiting); 1022172025Sjhb 102350673Sjlemon c->c_flags &= ~(CALLOUT_ACTIVE | CALLOUT_PENDING); 102444510Swollman 1025234981Skib CTR3(KTR_CALLOUT, "cancelled %p func %p arg %p", 1026234981Skib c, c->c_func, c->c_arg); 1027177859Sjeff TAILQ_REMOVE(&cc->cc_callwheel[c->c_time & callwheelmask], c, 1028177859Sjeff c_links.tqe); 1029234981Skib callout_cc_del(c, cc); 103044510Swollman 1031177859Sjeff CC_UNLOCK(cc); 103281481Sjhb return (1); 103344510Swollman} 103444510Swollman 103544510Swollmanvoid 103669147Sjlemoncallout_init(c, mpsafe) 103744510Swollman struct callout *c; 103869147Sjlemon int mpsafe; 103944510Swollman{ 104044527Swollman bzero(c, sizeof *c); 1041141428Siedowse if (mpsafe) { 1042173760Sattilio c->c_lock = NULL; 1043141428Siedowse c->c_flags = CALLOUT_RETURNUNLOCKED; 1044141428Siedowse } else { 1045173760Sattilio c->c_lock = &Giant.lock_object; 1046141428Siedowse c->c_flags = 0; 1047141428Siedowse } 1048177859Sjeff c->c_cpu = timeout_cpu; 104944510Swollman} 105044510Swollman 1051141428Siedowsevoid 1052173760Sattilio_callout_init_lock(c, lock, flags) 1053141428Siedowse struct callout *c; 1054173760Sattilio struct lock_object *lock; 1055141428Siedowse int flags; 1056141428Siedowse{ 1057141428Siedowse bzero(c, sizeof *c); 1058173760Sattilio c->c_lock = lock; 1059173760Sattilio KASSERT((flags & ~(CALLOUT_RETURNUNLOCKED | CALLOUT_SHAREDLOCK)) == 0, 1060173760Sattilio ("callout_init_lock: bad flags %d", flags)); 1061173760Sattilio KASSERT(lock != NULL || (flags & CALLOUT_RETURNUNLOCKED) == 0, 1062173760Sattilio ("callout_init_lock: CALLOUT_RETURNUNLOCKED with no lock")); 1063176013Sattilio KASSERT(lock == NULL || !(LOCK_CLASS(lock)->lc_flags & 1064176013Sattilio (LC_SPINLOCK | LC_SLEEPABLE)), ("%s: invalid lock class", 1065173760Sattilio __func__)); 1066173760Sattilio c->c_flags = flags & (CALLOUT_RETURNUNLOCKED | CALLOUT_SHAREDLOCK); 1067177859Sjeff c->c_cpu = timeout_cpu; 1068141428Siedowse} 1069141428Siedowse 107031950Snate#ifdef APM_FIXUP_CALLTODO 107131950Snate/* 107231950Snate * Adjust the kernel calltodo timeout list. This routine is used after 107331950Snate * an APM resume to recalculate the calltodo timer list values with the 107431950Snate * number of hz's we have been sleeping. The next hardclock() will detect 107531950Snate * that there are fired timers and run softclock() to execute them. 107631950Snate * 107731950Snate * Please note, I have not done an exhaustive analysis of what code this 107831950Snate * might break. I am motivated to have my select()'s and alarm()'s that 107931950Snate * have expired during suspend firing upon resume so that the applications 108031950Snate * which set the timer can do the maintanence the timer was for as close 108131950Snate * as possible to the originally intended time. Testing this code for a 108231950Snate * week showed that resuming from a suspend resulted in 22 to 25 timers 108331950Snate * firing, which seemed independant on whether the suspend was 2 hours or 108431950Snate * 2 days. Your milage may vary. - Ken Key <key@cs.utk.edu> 108531950Snate */ 108631950Snatevoid 108731950Snateadjust_timeout_calltodo(time_change) 108831950Snate struct timeval *time_change; 108931950Snate{ 109031950Snate register struct callout *p; 109131950Snate unsigned long delta_ticks; 109231950Snate 109331950Snate /* 109431950Snate * How many ticks were we asleep? 109536127Sbde * (stolen from tvtohz()). 109631950Snate */ 109731950Snate 109831950Snate /* Don't do anything */ 109931950Snate if (time_change->tv_sec < 0) 110031950Snate return; 110131950Snate else if (time_change->tv_sec <= LONG_MAX / 1000000) 110231950Snate delta_ticks = (time_change->tv_sec * 1000000 + 110331950Snate time_change->tv_usec + (tick - 1)) / tick + 1; 110431950Snate else if (time_change->tv_sec <= LONG_MAX / hz) 110531950Snate delta_ticks = time_change->tv_sec * hz + 110631950Snate (time_change->tv_usec + (tick - 1)) / tick + 1; 110731950Snate else 110831950Snate delta_ticks = LONG_MAX; 110931950Snate 111031950Snate if (delta_ticks > INT_MAX) 111131950Snate delta_ticks = INT_MAX; 111231950Snate 111331950Snate /* 111431950Snate * Now rip through the timer calltodo list looking for timers 111531950Snate * to expire. 111631950Snate */ 111731950Snate 111831950Snate /* don't collide with softclock() */ 1119177859Sjeff CC_LOCK(cc); 112031950Snate for (p = calltodo.c_next; p != NULL; p = p->c_next) { 112131950Snate p->c_time -= delta_ticks; 112231950Snate 112331950Snate /* Break if the timer had more time on it than delta_ticks */ 112431950Snate if (p->c_time > 0) 112531950Snate break; 112631950Snate 112731950Snate /* take back the ticks the timer didn't use (p->c_time <= 0) */ 112831950Snate delta_ticks = -p->c_time; 112931950Snate } 1130177859Sjeff CC_UNLOCK(cc); 113131950Snate 113231950Snate return; 113331950Snate} 113431950Snate#endif /* APM_FIXUP_CALLTODO */ 1135