kern_timeout.c revision 247467
11541Srgrimes/*- 21541Srgrimes * Copyright (c) 1982, 1986, 1991, 1993 31541Srgrimes * The Regents of the University of California. All rights reserved. 41541Srgrimes * (c) UNIX System Laboratories, Inc. 51541Srgrimes * All or some portions of this file are derived from material licensed 61541Srgrimes * to the University of California by American Telephone and Telegraph 71541Srgrimes * Co. or Unix System Laboratories, Inc. and are reproduced herein with 81541Srgrimes * the permission of UNIX System Laboratories, Inc. 91541Srgrimes * 101541Srgrimes * Redistribution and use in source and binary forms, with or without 111541Srgrimes * modification, are permitted provided that the following conditions 121541Srgrimes * are met: 131541Srgrimes * 1. Redistributions of source code must retain the above copyright 141541Srgrimes * notice, this list of conditions and the following disclaimer. 151541Srgrimes * 2. Redistributions in binary form must reproduce the above copyright 161541Srgrimes * notice, this list of conditions and the following disclaimer in the 171541Srgrimes * documentation and/or other materials provided with the distribution. 181541Srgrimes * 4. Neither the name of the University nor the names of its contributors 191541Srgrimes * may be used to endorse or promote products derived from this software 201541Srgrimes * without specific prior written permission. 211541Srgrimes * 221541Srgrimes * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 231541Srgrimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 241541Srgrimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 251541Srgrimes * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 261541Srgrimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 271541Srgrimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 281541Srgrimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 291541Srgrimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 301541Srgrimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 311541Srgrimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 321541Srgrimes * SUCH DAMAGE. 331541Srgrimes * 3444510Swollman * From: @(#)kern_clock.c 8.5 (Berkeley) 1/21/94 351541Srgrimes */ 361541Srgrimes 37116182Sobrien#include <sys/cdefs.h> 38116182Sobrien__FBSDID("$FreeBSD: head/sys/kern/kern_timeout.c 247467 2013-02-28 16:22:49Z davide $"); 39116182Sobrien 40187664Srwatson#include "opt_kdtrace.h" 41187664Srwatson 421541Srgrimes#include <sys/param.h> 431541Srgrimes#include <sys/systm.h> 44177859Sjeff#include <sys/bus.h> 4533392Sphk#include <sys/callout.h> 46127969Scperciva#include <sys/condvar.h> 47177859Sjeff#include <sys/interrupt.h> 481541Srgrimes#include <sys/kernel.h> 49133229Srwatson#include <sys/ktr.h> 5074914Sjhb#include <sys/lock.h> 51177859Sjeff#include <sys/malloc.h> 5268840Sjhb#include <sys/mutex.h> 53150188Sjhb#include <sys/proc.h> 54187664Srwatson#include <sys/sdt.h> 55171053Sattilio#include <sys/sleepqueue.h> 56115810Sphk#include <sys/sysctl.h> 57177859Sjeff#include <sys/smp.h> 581541Srgrimes 59220456Sattilio#ifdef SMP 60220456Sattilio#include <machine/cpu.h> 61220456Sattilio#endif 62220456Sattilio 63187664SrwatsonSDT_PROVIDER_DEFINE(callout_execute); 64211616SrpauloSDT_PROBE_DEFINE(callout_execute, kernel, , callout_start, callout-start); 65187664SrwatsonSDT_PROBE_ARGTYPE(callout_execute, kernel, , callout_start, 0, 66187664Srwatson "struct callout *"); 67211616SrpauloSDT_PROBE_DEFINE(callout_execute, kernel, , callout_end, callout-end); 68187664SrwatsonSDT_PROBE_ARGTYPE(callout_execute, kernel, , callout_end, 0, 69187664Srwatson "struct callout *"); 70187664Srwatson 71115810Sphkstatic int avg_depth; 72115810SphkSYSCTL_INT(_debug, OID_AUTO, to_avg_depth, CTLFLAG_RD, &avg_depth, 0, 73115810Sphk "Average number of items examined per softclock call. Units = 1/1000"); 74115810Sphkstatic int avg_gcalls; 75115810SphkSYSCTL_INT(_debug, OID_AUTO, to_avg_gcalls, CTLFLAG_RD, &avg_gcalls, 0, 76115810Sphk "Average number of Giant callouts made per softclock call. Units = 1/1000"); 77173760Sattiliostatic int avg_lockcalls; 78173760SattilioSYSCTL_INT(_debug, OID_AUTO, to_avg_lockcalls, CTLFLAG_RD, &avg_lockcalls, 0, 79173760Sattilio "Average number of lock callouts made per softclock call. Units = 1/1000"); 80115810Sphkstatic int avg_mpcalls; 81115810SphkSYSCTL_INT(_debug, OID_AUTO, to_avg_mpcalls, CTLFLAG_RD, &avg_mpcalls, 0, 82115810Sphk "Average number of MP callouts made per softclock call. Units = 1/1000"); 8333392Sphk/* 8433392Sphk * TODO: 8533392Sphk * allocate more timeout table slots when table overflows. 8633392Sphk */ 87243853Salfredint callwheelsize, callwheelmask; 882112Swollman 89200510Sluigi/* 90220456Sattilio * The callout cpu migration entity represents informations necessary for 91220456Sattilio * describing the migrating callout to the new callout cpu. 92220456Sattilio * The cached informations are very important for deferring migration when 93220456Sattilio * the migrating callout is already running. 94220456Sattilio */ 95220456Sattiliostruct cc_mig_ent { 96220456Sattilio#ifdef SMP 97220456Sattilio void (*ce_migration_func)(void *); 98220456Sattilio void *ce_migration_arg; 99220456Sattilio int ce_migration_cpu; 100220456Sattilio int ce_migration_ticks; 101220456Sattilio#endif 102220456Sattilio}; 103247467Sdavide 104220456Sattilio/* 105200510Sluigi * There is one struct callout_cpu per cpu, holding all relevant 106200510Sluigi * state for the callout processing thread on the individual CPU. 107200510Sluigi * In particular: 108200510Sluigi * cc_ticks is incremented once per tick in callout_cpu(). 109200510Sluigi * It tracks the global 'ticks' but in a way that the individual 110200510Sluigi * threads should not worry about races in the order in which 111200510Sluigi * hardclock() and hardclock_cpu() run on the various CPUs. 112200510Sluigi * cc_softclock is advanced in callout_cpu() to point to the 113200510Sluigi * first entry in cc_callwheel that may need handling. In turn, 114200510Sluigi * a softclock() is scheduled so it can serve the various entries i 115200510Sluigi * such that cc_softclock <= i <= cc_ticks . 116200510Sluigi * XXX maybe cc_softclock and cc_ticks should be volatile ? 117200510Sluigi * 118200510Sluigi * cc_ticks is also used in callout_reset_cpu() to determine 119200510Sluigi * when the callout should be served. 120200510Sluigi */ 121177859Sjeffstruct callout_cpu { 122242402Sattilio struct mtx_padalign cc_lock; 123242402Sattilio struct cc_mig_ent cc_migrating_entity; 124177859Sjeff struct callout *cc_callout; 125177859Sjeff struct callout_tailq *cc_callwheel; 126177859Sjeff struct callout_list cc_callfree; 127177859Sjeff struct callout *cc_next; 128177859Sjeff struct callout *cc_curr; 129177859Sjeff void *cc_cookie; 130200510Sluigi int cc_ticks; 131177859Sjeff int cc_softticks; 132177859Sjeff int cc_cancel; 133177859Sjeff int cc_waiting; 134212541Smav int cc_firsttick; 135177859Sjeff}; 136128024Scperciva 137177859Sjeff#ifdef SMP 138220456Sattilio#define cc_migration_func cc_migrating_entity.ce_migration_func 139220456Sattilio#define cc_migration_arg cc_migrating_entity.ce_migration_arg 140220456Sattilio#define cc_migration_cpu cc_migrating_entity.ce_migration_cpu 141220456Sattilio#define cc_migration_ticks cc_migrating_entity.ce_migration_ticks 142220456Sattilio 143177859Sjeffstruct callout_cpu cc_cpu[MAXCPU]; 144220456Sattilio#define CPUBLOCK MAXCPU 145177859Sjeff#define CC_CPU(cpu) (&cc_cpu[(cpu)]) 146177859Sjeff#define CC_SELF() CC_CPU(PCPU_GET(cpuid)) 147177859Sjeff#else 148177859Sjeffstruct callout_cpu cc_cpu; 149177859Sjeff#define CC_CPU(cpu) &cc_cpu 150177859Sjeff#define CC_SELF() &cc_cpu 151177859Sjeff#endif 152177859Sjeff#define CC_LOCK(cc) mtx_lock_spin(&(cc)->cc_lock) 153177859Sjeff#define CC_UNLOCK(cc) mtx_unlock_spin(&(cc)->cc_lock) 154220456Sattilio#define CC_LOCK_ASSERT(cc) mtx_assert(&(cc)->cc_lock, MA_OWNED) 155177859Sjeff 156177859Sjeffstatic int timeout_cpu; 157212541Smavvoid (*callout_new_inserted)(int cpu, int ticks) = NULL; 158177859Sjeff 159227293Sedstatic MALLOC_DEFINE(M_CALLOUT, "callout", "Callout datastructures"); 160177859Sjeff 161139831Scperciva/** 162177859Sjeff * Locked by cc_lock: 163177859Sjeff * cc_curr - If a callout is in progress, it is curr_callout. 164155957Sjhb * If curr_callout is non-NULL, threads waiting in 165177859Sjeff * callout_drain() will be woken up as soon as the 166127969Scperciva * relevant callout completes. 167177859Sjeff * cc_cancel - Changing to 1 with both callout_lock and c_lock held 168141428Siedowse * guarantees that the current callout will not run. 169141428Siedowse * The softclock() function sets this to 0 before it 170173760Sattilio * drops callout_lock to acquire c_lock, and it calls 171155957Sjhb * the handler only if curr_cancelled is still 0 after 172173760Sattilio * c_lock is successfully acquired. 173177859Sjeff * cc_waiting - If a thread is waiting in callout_drain(), then 174155957Sjhb * callout_wait is nonzero. Set only when 175128024Scperciva * curr_callout is non-NULL. 176127969Scperciva */ 177128024Scperciva 1781541Srgrimes/* 179220456Sattilio * Resets the migration entity tied to a specific callout cpu. 180220456Sattilio */ 181220456Sattiliostatic void 182220456Sattiliocc_cme_cleanup(struct callout_cpu *cc) 183220456Sattilio{ 184220456Sattilio 185220456Sattilio#ifdef SMP 186220456Sattilio cc->cc_migration_cpu = CPUBLOCK; 187220456Sattilio cc->cc_migration_ticks = 0; 188220456Sattilio cc->cc_migration_func = NULL; 189220456Sattilio cc->cc_migration_arg = NULL; 190220456Sattilio#endif 191220456Sattilio} 192220456Sattilio 193220456Sattilio/* 194220456Sattilio * Checks if migration is requested by a specific callout cpu. 195220456Sattilio */ 196220456Sattiliostatic int 197220456Sattiliocc_cme_migrating(struct callout_cpu *cc) 198220456Sattilio{ 199220456Sattilio 200220456Sattilio#ifdef SMP 201220456Sattilio return (cc->cc_migration_cpu != CPUBLOCK); 202220456Sattilio#else 203220456Sattilio return (0); 204220456Sattilio#endif 205220456Sattilio} 206220456Sattilio 207220456Sattilio/* 20882127Sdillon * kern_timeout_callwheel_alloc() - kernel low level callwheel initialization 20982127Sdillon * 21082127Sdillon * This code is called very early in the kernel initialization sequence, 21182127Sdillon * and may be called more then once. 21282127Sdillon */ 21382127Sdilloncaddr_t 21482127Sdillonkern_timeout_callwheel_alloc(caddr_t v) 21582127Sdillon{ 216177859Sjeff struct callout_cpu *cc; 217177859Sjeff 218177859Sjeff timeout_cpu = PCPU_GET(cpuid); 219177859Sjeff cc = CC_CPU(timeout_cpu); 22082127Sdillon /* 221243853Salfred * Calculate callout wheel size, should be next power of two higher 222243853Salfred * than 'ncallout'. 22382127Sdillon */ 224243853Salfred callwheelsize = 1 << fls(ncallout); 22582127Sdillon callwheelmask = callwheelsize - 1; 22682127Sdillon 227177859Sjeff cc->cc_callout = (struct callout *)v; 228177859Sjeff v = (caddr_t)(cc->cc_callout + ncallout); 229177859Sjeff cc->cc_callwheel = (struct callout_tailq *)v; 230177859Sjeff v = (caddr_t)(cc->cc_callwheel + callwheelsize); 23182127Sdillon return(v); 23282127Sdillon} 23382127Sdillon 234177859Sjeffstatic void 235177859Sjeffcallout_cpu_init(struct callout_cpu *cc) 236177859Sjeff{ 237177859Sjeff struct callout *c; 238177859Sjeff int i; 239177859Sjeff 240177859Sjeff mtx_init(&cc->cc_lock, "callout", NULL, MTX_SPIN | MTX_RECURSE); 241177859Sjeff SLIST_INIT(&cc->cc_callfree); 242177859Sjeff for (i = 0; i < callwheelsize; i++) { 243177859Sjeff TAILQ_INIT(&cc->cc_callwheel[i]); 244177859Sjeff } 245220456Sattilio cc_cme_cleanup(cc); 246177859Sjeff if (cc->cc_callout == NULL) 247177859Sjeff return; 248177859Sjeff for (i = 0; i < ncallout; i++) { 249177859Sjeff c = &cc->cc_callout[i]; 250177859Sjeff callout_init(c, 0); 251177859Sjeff c->c_flags = CALLOUT_LOCAL_ALLOC; 252177859Sjeff SLIST_INSERT_HEAD(&cc->cc_callfree, c, c_links.sle); 253177859Sjeff } 254177859Sjeff} 255177859Sjeff 256220456Sattilio#ifdef SMP 25782127Sdillon/* 258220456Sattilio * Switches the cpu tied to a specific callout. 259220456Sattilio * The function expects a locked incoming callout cpu and returns with 260220456Sattilio * locked outcoming callout cpu. 261220456Sattilio */ 262220456Sattiliostatic struct callout_cpu * 263220456Sattiliocallout_cpu_switch(struct callout *c, struct callout_cpu *cc, int new_cpu) 264220456Sattilio{ 265220456Sattilio struct callout_cpu *new_cc; 266220456Sattilio 267220456Sattilio MPASS(c != NULL && cc != NULL); 268220456Sattilio CC_LOCK_ASSERT(cc); 269220456Sattilio 270225057Sattilio /* 271225057Sattilio * Avoid interrupts and preemption firing after the callout cpu 272225057Sattilio * is blocked in order to avoid deadlocks as the new thread 273225057Sattilio * may be willing to acquire the callout cpu lock. 274225057Sattilio */ 275220456Sattilio c->c_cpu = CPUBLOCK; 276225057Sattilio spinlock_enter(); 277220456Sattilio CC_UNLOCK(cc); 278220456Sattilio new_cc = CC_CPU(new_cpu); 279220456Sattilio CC_LOCK(new_cc); 280225057Sattilio spinlock_exit(); 281220456Sattilio c->c_cpu = new_cpu; 282220456Sattilio return (new_cc); 283220456Sattilio} 284220456Sattilio#endif 285220456Sattilio 286220456Sattilio/* 28782127Sdillon * kern_timeout_callwheel_init() - initialize previously reserved callwheel 28882127Sdillon * space. 28982127Sdillon * 29082127Sdillon * This code is called just once, after the space reserved for the 29182127Sdillon * callout wheel has been finalized. 29282127Sdillon */ 29382127Sdillonvoid 29482127Sdillonkern_timeout_callwheel_init(void) 29582127Sdillon{ 296177859Sjeff callout_cpu_init(CC_CPU(timeout_cpu)); 297177859Sjeff} 29882127Sdillon 299177859Sjeff/* 300177859Sjeff * Start standard softclock thread. 301177859Sjeff */ 302177859Sjeffstatic void 303177859Sjeffstart_softclock(void *dummy) 304177859Sjeff{ 305177859Sjeff struct callout_cpu *cc; 306177859Sjeff#ifdef SMP 307177859Sjeff int cpu; 308177859Sjeff#endif 309177859Sjeff 310177859Sjeff cc = CC_CPU(timeout_cpu); 311177859Sjeff if (swi_add(&clk_intr_event, "clock", softclock, cc, SWI_CLOCK, 312214746Sjhb INTR_MPSAFE, &cc->cc_cookie)) 313177859Sjeff panic("died while creating standard software ithreads"); 314177859Sjeff#ifdef SMP 315209059Sjhb CPU_FOREACH(cpu) { 316177859Sjeff if (cpu == timeout_cpu) 317177859Sjeff continue; 318177859Sjeff cc = CC_CPU(cpu); 319177859Sjeff if (swi_add(NULL, "clock", softclock, cc, SWI_CLOCK, 320177859Sjeff INTR_MPSAFE, &cc->cc_cookie)) 321177859Sjeff panic("died while creating standard software ithreads"); 322177859Sjeff cc->cc_callout = NULL; /* Only cpu0 handles timeout(). */ 323177859Sjeff cc->cc_callwheel = malloc( 324177859Sjeff sizeof(struct callout_tailq) * callwheelsize, M_CALLOUT, 325177859Sjeff M_WAITOK); 326177859Sjeff callout_cpu_init(cc); 32782127Sdillon } 328177859Sjeff#endif 329177859Sjeff} 330177859Sjeff 331177859SjeffSYSINIT(start_softclock, SI_SUB_SOFTINTR, SI_ORDER_FIRST, start_softclock, NULL); 332177859Sjeff 333177859Sjeffvoid 334177859Sjeffcallout_tick(void) 335177859Sjeff{ 336177859Sjeff struct callout_cpu *cc; 337180608Sjeff int need_softclock; 338180608Sjeff int bucket; 339177859Sjeff 340177859Sjeff /* 341177859Sjeff * Process callouts at a very low cpu priority, so we don't keep the 342177859Sjeff * relatively high clock interrupt priority any longer than necessary. 343177859Sjeff */ 344180608Sjeff need_softclock = 0; 345177859Sjeff cc = CC_SELF(); 346177859Sjeff mtx_lock_spin_flags(&cc->cc_lock, MTX_QUIET); 347212541Smav cc->cc_firsttick = cc->cc_ticks = ticks; 348200510Sluigi for (; (cc->cc_softticks - cc->cc_ticks) <= 0; cc->cc_softticks++) { 349180608Sjeff bucket = cc->cc_softticks & callwheelmask; 350180608Sjeff if (!TAILQ_EMPTY(&cc->cc_callwheel[bucket])) { 351180608Sjeff need_softclock = 1; 352180608Sjeff break; 353180608Sjeff } 354180608Sjeff } 355177859Sjeff mtx_unlock_spin_flags(&cc->cc_lock, MTX_QUIET); 356177859Sjeff /* 357177859Sjeff * swi_sched acquires the thread lock, so we don't want to call it 358177859Sjeff * with cc_lock held; incorrect locking order. 359177859Sjeff */ 360177859Sjeff if (need_softclock) 361177859Sjeff swi_sched(cc->cc_cookie, 0); 362177859Sjeff} 363177859Sjeff 364212541Smavint 365212603Smavcallout_tickstofirst(int limit) 366212541Smav{ 367212541Smav struct callout_cpu *cc; 368212541Smav struct callout *c; 369212541Smav struct callout_tailq *sc; 370212541Smav int curticks; 371212541Smav int skip = 1; 372212541Smav 373212541Smav cc = CC_SELF(); 374212541Smav mtx_lock_spin_flags(&cc->cc_lock, MTX_QUIET); 375212541Smav curticks = cc->cc_ticks; 376212603Smav while( skip < ncallout && skip < limit ) { 377212541Smav sc = &cc->cc_callwheel[ (curticks+skip) & callwheelmask ]; 378212541Smav /* search scanning ticks */ 379212541Smav TAILQ_FOREACH( c, sc, c_links.tqe ){ 380214597Smav if (c->c_time - curticks <= ncallout) 381212541Smav goto out; 382212541Smav } 383212541Smav skip++; 384212541Smav } 385212541Smavout: 386212541Smav cc->cc_firsttick = curticks + skip; 387212541Smav mtx_unlock_spin_flags(&cc->cc_lock, MTX_QUIET); 388212541Smav return (skip); 389212541Smav} 390212541Smav 391177859Sjeffstatic struct callout_cpu * 392177859Sjeffcallout_lock(struct callout *c) 393177859Sjeff{ 394177859Sjeff struct callout_cpu *cc; 395177859Sjeff int cpu; 396177859Sjeff 397177859Sjeff for (;;) { 398177859Sjeff cpu = c->c_cpu; 399220456Sattilio#ifdef SMP 400220456Sattilio if (cpu == CPUBLOCK) { 401220456Sattilio while (c->c_cpu == CPUBLOCK) 402220456Sattilio cpu_spinwait(); 403220456Sattilio continue; 404220456Sattilio } 405220456Sattilio#endif 406177859Sjeff cc = CC_CPU(cpu); 407177859Sjeff CC_LOCK(cc); 408177859Sjeff if (cpu == c->c_cpu) 409177859Sjeff break; 410177859Sjeff CC_UNLOCK(cc); 41182127Sdillon } 412177859Sjeff return (cc); 41382127Sdillon} 41482127Sdillon 415220456Sattiliostatic void 416220456Sattiliocallout_cc_add(struct callout *c, struct callout_cpu *cc, int to_ticks, 417220456Sattilio void (*func)(void *), void *arg, int cpu) 418220456Sattilio{ 419220456Sattilio 420220456Sattilio CC_LOCK_ASSERT(cc); 421220456Sattilio 422220456Sattilio if (to_ticks <= 0) 423220456Sattilio to_ticks = 1; 424220456Sattilio c->c_arg = arg; 425220456Sattilio c->c_flags |= (CALLOUT_ACTIVE | CALLOUT_PENDING); 426220456Sattilio c->c_func = func; 427220456Sattilio c->c_time = ticks + to_ticks; 428220456Sattilio TAILQ_INSERT_TAIL(&cc->cc_callwheel[c->c_time & callwheelmask], 429220456Sattilio c, c_links.tqe); 430220456Sattilio if ((c->c_time - cc->cc_firsttick) < 0 && 431220456Sattilio callout_new_inserted != NULL) { 432220456Sattilio cc->cc_firsttick = c->c_time; 433220456Sattilio (*callout_new_inserted)(cpu, 434220456Sattilio to_ticks + (ticks - cc->cc_ticks)); 435220456Sattilio } 436220456Sattilio} 437220456Sattilio 438234981Skibstatic void 439234981Skibcallout_cc_del(struct callout *c, struct callout_cpu *cc) 440234981Skib{ 441234981Skib 442243901Skib if ((c->c_flags & CALLOUT_LOCAL_ALLOC) == 0) 443243901Skib return; 444243901Skib c->c_func = NULL; 445243901Skib SLIST_INSERT_HEAD(&cc->cc_callfree, c, c_links.sle); 446234981Skib} 447234981Skib 448243901Skibstatic void 449234981Skibsoftclock_call_cc(struct callout *c, struct callout_cpu *cc, int *mpcalls, 450234981Skib int *lockcalls, int *gcalls) 451234981Skib{ 452234981Skib void (*c_func)(void *); 453234981Skib void *c_arg; 454234981Skib struct lock_class *class; 455234981Skib struct lock_object *c_lock; 456234981Skib int c_flags, sharedlock; 457234981Skib#ifdef SMP 458234981Skib struct callout_cpu *new_cc; 459234981Skib void (*new_func)(void *); 460234981Skib void *new_arg; 461234981Skib int new_cpu, new_ticks; 462234981Skib#endif 463234981Skib#ifdef DIAGNOSTIC 464234981Skib struct bintime bt1, bt2; 465234981Skib struct timespec ts2; 466234981Skib static uint64_t maxdt = 36893488147419102LL; /* 2 msec */ 467234981Skib static timeout_t *lastfunc; 468234981Skib#endif 469234981Skib 470243901Skib KASSERT((c->c_flags & (CALLOUT_PENDING | CALLOUT_ACTIVE)) == 471243901Skib (CALLOUT_PENDING | CALLOUT_ACTIVE), 472243901Skib ("softclock_call_cc: pend|act %p %x", c, c->c_flags)); 473234981Skib class = (c->c_lock != NULL) ? LOCK_CLASS(c->c_lock) : NULL; 474234981Skib sharedlock = (c->c_flags & CALLOUT_SHAREDLOCK) ? 0 : 1; 475234981Skib c_lock = c->c_lock; 476234981Skib c_func = c->c_func; 477234981Skib c_arg = c->c_arg; 478234981Skib c_flags = c->c_flags; 479234981Skib if (c->c_flags & CALLOUT_LOCAL_ALLOC) 480234981Skib c->c_flags = CALLOUT_LOCAL_ALLOC; 481234981Skib else 482234981Skib c->c_flags &= ~CALLOUT_PENDING; 483234981Skib cc->cc_curr = c; 484234981Skib cc->cc_cancel = 0; 485234981Skib CC_UNLOCK(cc); 486234981Skib if (c_lock != NULL) { 487234981Skib class->lc_lock(c_lock, sharedlock); 488234981Skib /* 489234981Skib * The callout may have been cancelled 490234981Skib * while we switched locks. 491234981Skib */ 492234981Skib if (cc->cc_cancel) { 493234981Skib class->lc_unlock(c_lock); 494234981Skib goto skip; 495234981Skib } 496234981Skib /* The callout cannot be stopped now. */ 497234981Skib cc->cc_cancel = 1; 498234981Skib 499234981Skib if (c_lock == &Giant.lock_object) { 500234981Skib (*gcalls)++; 501234981Skib CTR3(KTR_CALLOUT, "callout %p func %p arg %p", 502234981Skib c, c_func, c_arg); 503234981Skib } else { 504234981Skib (*lockcalls)++; 505234981Skib CTR3(KTR_CALLOUT, "callout lock %p func %p arg %p", 506234981Skib c, c_func, c_arg); 507234981Skib } 508234981Skib } else { 509234981Skib (*mpcalls)++; 510234981Skib CTR3(KTR_CALLOUT, "callout mpsafe %p func %p arg %p", 511234981Skib c, c_func, c_arg); 512234981Skib } 513234981Skib#ifdef DIAGNOSTIC 514234981Skib binuptime(&bt1); 515234981Skib#endif 516234981Skib THREAD_NO_SLEEPING(); 517234981Skib SDT_PROBE(callout_execute, kernel, , callout_start, c, 0, 0, 0, 0); 518234981Skib c_func(c_arg); 519234981Skib SDT_PROBE(callout_execute, kernel, , callout_end, c, 0, 0, 0, 0); 520234981Skib THREAD_SLEEPING_OK(); 521234981Skib#ifdef DIAGNOSTIC 522234981Skib binuptime(&bt2); 523234981Skib bintime_sub(&bt2, &bt1); 524234981Skib if (bt2.frac > maxdt) { 525234981Skib if (lastfunc != c_func || bt2.frac > maxdt * 2) { 526234981Skib bintime2timespec(&bt2, &ts2); 527234981Skib printf( 528234981Skib "Expensive timeout(9) function: %p(%p) %jd.%09ld s\n", 529234981Skib c_func, c_arg, (intmax_t)ts2.tv_sec, ts2.tv_nsec); 530234981Skib } 531234981Skib maxdt = bt2.frac; 532234981Skib lastfunc = c_func; 533234981Skib } 534234981Skib#endif 535234981Skib CTR1(KTR_CALLOUT, "callout %p finished", c); 536234981Skib if ((c_flags & CALLOUT_RETURNUNLOCKED) == 0) 537234981Skib class->lc_unlock(c_lock); 538234981Skibskip: 539234981Skib CC_LOCK(cc); 540243901Skib KASSERT(cc->cc_curr == c, ("mishandled cc_curr")); 541234981Skib cc->cc_curr = NULL; 542234981Skib if (cc->cc_waiting) { 543234981Skib /* 544234981Skib * There is someone waiting for the 545234981Skib * callout to complete. 546234981Skib * If the callout was scheduled for 547234981Skib * migration just cancel it. 548234981Skib */ 549243901Skib if (cc_cme_migrating(cc)) { 550234981Skib cc_cme_cleanup(cc); 551243912Sattilio 552243912Sattilio /* 553243912Sattilio * It should be assert here that the callout is not 554243912Sattilio * destroyed but that is not easy. 555243912Sattilio */ 556243901Skib c->c_flags &= ~CALLOUT_DFRMIGRATION; 557243901Skib } 558234981Skib cc->cc_waiting = 0; 559234981Skib CC_UNLOCK(cc); 560234981Skib wakeup(&cc->cc_waiting); 561234981Skib CC_LOCK(cc); 562234981Skib } else if (cc_cme_migrating(cc)) { 563243912Sattilio KASSERT((c_flags & CALLOUT_LOCAL_ALLOC) == 0, 564243901Skib ("Migrating legacy callout %p", c)); 565234981Skib#ifdef SMP 566234981Skib /* 567234981Skib * If the callout was scheduled for 568234981Skib * migration just perform it now. 569234981Skib */ 570234981Skib new_cpu = cc->cc_migration_cpu; 571234981Skib new_ticks = cc->cc_migration_ticks; 572234981Skib new_func = cc->cc_migration_func; 573234981Skib new_arg = cc->cc_migration_arg; 574234981Skib cc_cme_cleanup(cc); 575234981Skib 576234981Skib /* 577243912Sattilio * It should be assert here that the callout is not destroyed 578243912Sattilio * but that is not easy. 579243912Sattilio * 580243912Sattilio * As first thing, handle deferred callout stops. 581234981Skib */ 582234981Skib if ((c->c_flags & CALLOUT_DFRMIGRATION) == 0) { 583234981Skib CTR3(KTR_CALLOUT, 584234981Skib "deferred cancelled %p func %p arg %p", 585234981Skib c, new_func, new_arg); 586234981Skib callout_cc_del(c, cc); 587243901Skib return; 588234981Skib } 589234981Skib c->c_flags &= ~CALLOUT_DFRMIGRATION; 590234981Skib 591234981Skib new_cc = callout_cpu_switch(c, cc, new_cpu); 592234981Skib callout_cc_add(c, new_cc, new_ticks, new_func, new_arg, 593234981Skib new_cpu); 594234981Skib CC_UNLOCK(new_cc); 595234981Skib CC_LOCK(cc); 596234981Skib#else 597234981Skib panic("migration should not happen"); 598234981Skib#endif 599234981Skib } 600243901Skib /* 601243901Skib * If the current callout is locally allocated (from 602243901Skib * timeout(9)) then put it on the freelist. 603243901Skib * 604243901Skib * Note: we need to check the cached copy of c_flags because 605243901Skib * if it was not local, then it's not safe to deref the 606243901Skib * callout pointer. 607243901Skib */ 608243901Skib KASSERT((c_flags & CALLOUT_LOCAL_ALLOC) == 0 || 609243901Skib c->c_flags == CALLOUT_LOCAL_ALLOC, 610243901Skib ("corrupted callout")); 611243912Sattilio if (c_flags & CALLOUT_LOCAL_ALLOC) 612243912Sattilio callout_cc_del(c, cc); 613234981Skib} 614234981Skib 61582127Sdillon/* 616247467Sdavide * The callout mechanism is based on the work of Adam M. Costello and 61729680Sgibbs * George Varghese, published in a technical report entitled "Redesigning 61829680Sgibbs * the BSD Callout and Timer Facilities" and modified slightly for inclusion 61929680Sgibbs * in FreeBSD by Justin T. Gibbs. The original work on the data structures 620128630Shmp * used in this implementation was published by G. Varghese and T. Lauck in 62129680Sgibbs * the paper "Hashed and Hierarchical Timing Wheels: Data Structures for 62229680Sgibbs * the Efficient Implementation of a Timer Facility" in the Proceedings of 62329680Sgibbs * the 11th ACM Annual Symposium on Operating Systems Principles, 62429680Sgibbs * Austin, Texas Nov 1987. 62529680Sgibbs */ 62632388Sphk 62729680Sgibbs/* 6281541Srgrimes * Software (low priority) clock interrupt. 6291541Srgrimes * Run periodic events from timeout queue. 6301541Srgrimes */ 6311541Srgrimesvoid 632177859Sjeffsoftclock(void *arg) 6331541Srgrimes{ 634177859Sjeff struct callout_cpu *cc; 635102936Sphk struct callout *c; 636102936Sphk struct callout_tailq *bucket; 637102936Sphk int curticks; 638102936Sphk int steps; /* #steps since we last allowed interrupts */ 639115810Sphk int depth; 640115810Sphk int mpcalls; 641173760Sattilio int lockcalls; 642115810Sphk int gcalls; 6431541Srgrimes 64433392Sphk#ifndef MAX_SOFTCLOCK_STEPS 64533392Sphk#define MAX_SOFTCLOCK_STEPS 100 /* Maximum allowed value of steps. */ 64633392Sphk#endif /* MAX_SOFTCLOCK_STEPS */ 64729680Sgibbs 648115810Sphk mpcalls = 0; 649173760Sattilio lockcalls = 0; 650115810Sphk gcalls = 0; 651115810Sphk depth = 0; 65229680Sgibbs steps = 0; 653177859Sjeff cc = (struct callout_cpu *)arg; 654177859Sjeff CC_LOCK(cc); 655200510Sluigi while (cc->cc_softticks - 1 != cc->cc_ticks) { 65629805Sgibbs /* 657177859Sjeff * cc_softticks may be modified by hard clock, so cache 65829805Sgibbs * it while we work on a given bucket. 65929805Sgibbs */ 660177859Sjeff curticks = cc->cc_softticks; 661180608Sjeff cc->cc_softticks++; 662177859Sjeff bucket = &cc->cc_callwheel[curticks & callwheelmask]; 66329805Sgibbs c = TAILQ_FIRST(bucket); 664234981Skib while (c != NULL) { 665115810Sphk depth++; 66629805Sgibbs if (c->c_time != curticks) { 66729680Sgibbs c = TAILQ_NEXT(c, c_links.tqe); 66829680Sgibbs ++steps; 66929680Sgibbs if (steps >= MAX_SOFTCLOCK_STEPS) { 670177859Sjeff cc->cc_next = c; 67129805Sgibbs /* Give interrupts a chance. */ 672177859Sjeff CC_UNLOCK(cc); 67381370Sjhb ; /* nothing */ 674177859Sjeff CC_LOCK(cc); 675177859Sjeff c = cc->cc_next; 67629680Sgibbs steps = 0; 67729680Sgibbs } 67829680Sgibbs } else { 679243901Skib cc->cc_next = TAILQ_NEXT(c, c_links.tqe); 68029805Sgibbs TAILQ_REMOVE(bucket, c, c_links.tqe); 681243901Skib softclock_call_cc(c, cc, &mpcalls, 682234981Skib &lockcalls, &gcalls); 68329680Sgibbs steps = 0; 684243901Skib c = cc->cc_next; 68529680Sgibbs } 68629680Sgibbs } 6871541Srgrimes } 688115810Sphk avg_depth += (depth * 1000 - avg_depth) >> 8; 689115810Sphk avg_mpcalls += (mpcalls * 1000 - avg_mpcalls) >> 8; 690173760Sattilio avg_lockcalls += (lockcalls * 1000 - avg_lockcalls) >> 8; 691115810Sphk avg_gcalls += (gcalls * 1000 - avg_gcalls) >> 8; 692177859Sjeff cc->cc_next = NULL; 693177859Sjeff CC_UNLOCK(cc); 6941541Srgrimes} 6951541Srgrimes 6961541Srgrimes/* 6971541Srgrimes * timeout -- 6981541Srgrimes * Execute a function after a specified length of time. 6991541Srgrimes * 7001541Srgrimes * untimeout -- 7011541Srgrimes * Cancel previous timeout function call. 7021541Srgrimes * 70329680Sgibbs * callout_handle_init -- 70429680Sgibbs * Initialize a handle so that using it with untimeout is benign. 70529680Sgibbs * 7061541Srgrimes * See AT&T BCI Driver Reference Manual for specification. This 70729680Sgibbs * implementation differs from that one in that although an 70829680Sgibbs * identification value is returned from timeout, the original 70929680Sgibbs * arguments to timeout as well as the identifier are used to 71029680Sgibbs * identify entries for untimeout. 7111541Srgrimes */ 71229680Sgibbsstruct callout_handle 71329680Sgibbstimeout(ftn, arg, to_ticks) 71433824Sbde timeout_t *ftn; 7151541Srgrimes void *arg; 71669147Sjlemon int to_ticks; 7171541Srgrimes{ 718177859Sjeff struct callout_cpu *cc; 71929680Sgibbs struct callout *new; 72029680Sgibbs struct callout_handle handle; 7211541Srgrimes 722177859Sjeff cc = CC_CPU(timeout_cpu); 723177859Sjeff CC_LOCK(cc); 7241541Srgrimes /* Fill in the next free callout structure. */ 725177859Sjeff new = SLIST_FIRST(&cc->cc_callfree); 72629680Sgibbs if (new == NULL) 72729680Sgibbs /* XXX Attempt to malloc first */ 7281541Srgrimes panic("timeout table full"); 729177859Sjeff SLIST_REMOVE_HEAD(&cc->cc_callfree, c_links.sle); 73044510Swollman callout_reset(new, to_ticks, ftn, arg); 731177859Sjeff handle.callout = new; 732177859Sjeff CC_UNLOCK(cc); 7331541Srgrimes 73429680Sgibbs return (handle); 7351541Srgrimes} 7361541Srgrimes 7371541Srgrimesvoid 73829680Sgibbsuntimeout(ftn, arg, handle) 73933824Sbde timeout_t *ftn; 7401541Srgrimes void *arg; 74129680Sgibbs struct callout_handle handle; 7421541Srgrimes{ 743177859Sjeff struct callout_cpu *cc; 7441541Srgrimes 74529680Sgibbs /* 74629680Sgibbs * Check for a handle that was initialized 74729680Sgibbs * by callout_handle_init, but never used 74829680Sgibbs * for a real timeout. 74929680Sgibbs */ 75029680Sgibbs if (handle.callout == NULL) 75129680Sgibbs return; 75229680Sgibbs 753177859Sjeff cc = callout_lock(handle.callout); 75444510Swollman if (handle.callout->c_func == ftn && handle.callout->c_arg == arg) 75544510Swollman callout_stop(handle.callout); 756177859Sjeff CC_UNLOCK(cc); 7571541Srgrimes} 7581541Srgrimes 75924101Sbdevoid 76029680Sgibbscallout_handle_init(struct callout_handle *handle) 76129680Sgibbs{ 76229680Sgibbs handle->callout = NULL; 76329680Sgibbs} 76429680Sgibbs 76544510Swollman/* 76644510Swollman * New interface; clients allocate their own callout structures. 76744510Swollman * 76844510Swollman * callout_reset() - establish or change a timeout 76944510Swollman * callout_stop() - disestablish a timeout 77044510Swollman * callout_init() - initialize a callout structure so that it can 77144510Swollman * safely be passed to callout_reset() and callout_stop() 77244510Swollman * 77350673Sjlemon * <sys/callout.h> defines three convenience macros: 77444510Swollman * 775140487Scperciva * callout_active() - returns truth if callout has not been stopped, 776140487Scperciva * drained, or deactivated since the last time the callout was 777140487Scperciva * reset. 77850673Sjlemon * callout_pending() - returns truth if callout is still waiting for timeout 77950673Sjlemon * callout_deactivate() - marks the callout as having been serviced 78044510Swollman */ 781149879Sglebiusint 782177859Sjeffcallout_reset_on(struct callout *c, int to_ticks, void (*ftn)(void *), 783177859Sjeff void *arg, int cpu) 78444510Swollman{ 785177859Sjeff struct callout_cpu *cc; 786149879Sglebius int cancelled = 0; 78744510Swollman 788177859Sjeff /* 789177859Sjeff * Don't allow migration of pre-allocated callouts lest they 790177859Sjeff * become unbalanced. 791177859Sjeff */ 792177859Sjeff if (c->c_flags & CALLOUT_LOCAL_ALLOC) 793177859Sjeff cpu = c->c_cpu; 794177859Sjeff cc = callout_lock(c); 795177859Sjeff if (cc->cc_curr == c) { 796127969Scperciva /* 797127969Scperciva * We're being asked to reschedule a callout which is 798173760Sattilio * currently in progress. If there is a lock then we 799141428Siedowse * can cancel the callout if it has not really started. 800127969Scperciva */ 801177859Sjeff if (c->c_lock != NULL && !cc->cc_cancel) 802177859Sjeff cancelled = cc->cc_cancel = 1; 803177859Sjeff if (cc->cc_waiting) { 804141428Siedowse /* 805141428Siedowse * Someone has called callout_drain to kill this 806141428Siedowse * callout. Don't reschedule. 807141428Siedowse */ 808163246Sglebius CTR4(KTR_CALLOUT, "%s %p func %p arg %p", 809163246Sglebius cancelled ? "cancelled" : "failed to cancel", 810163246Sglebius c, c->c_func, c->c_arg); 811177859Sjeff CC_UNLOCK(cc); 812149879Sglebius return (cancelled); 813141428Siedowse } 814128024Scperciva } 815133190Scperciva if (c->c_flags & CALLOUT_PENDING) { 816177859Sjeff if (cc->cc_next == c) { 817177859Sjeff cc->cc_next = TAILQ_NEXT(c, c_links.tqe); 818133190Scperciva } 819177859Sjeff TAILQ_REMOVE(&cc->cc_callwheel[c->c_time & callwheelmask], c, 820133190Scperciva c_links.tqe); 82144510Swollman 822149879Sglebius cancelled = 1; 823177859Sjeff c->c_flags &= ~(CALLOUT_ACTIVE | CALLOUT_PENDING); 824133190Scperciva } 825220456Sattilio 826220456Sattilio#ifdef SMP 82744510Swollman /* 828220456Sattilio * If the callout must migrate try to perform it immediately. 829220456Sattilio * If the callout is currently running, just defer the migration 830220456Sattilio * to a more appropriate moment. 83144510Swollman */ 832177859Sjeff if (c->c_cpu != cpu) { 833220456Sattilio if (cc->cc_curr == c) { 834220456Sattilio cc->cc_migration_cpu = cpu; 835220456Sattilio cc->cc_migration_ticks = to_ticks; 836220456Sattilio cc->cc_migration_func = ftn; 837220456Sattilio cc->cc_migration_arg = arg; 838234952Skib c->c_flags |= CALLOUT_DFRMIGRATION; 839220456Sattilio CTR5(KTR_CALLOUT, 840220456Sattilio "migration of %p func %p arg %p in %d to %u deferred", 841220456Sattilio c, c->c_func, c->c_arg, to_ticks, cpu); 842220456Sattilio CC_UNLOCK(cc); 843220456Sattilio return (cancelled); 844220456Sattilio } 845220456Sattilio cc = callout_cpu_switch(c, cc, cpu); 846177859Sjeff } 847220456Sattilio#endif 848177859Sjeff 849220456Sattilio callout_cc_add(c, cc, to_ticks, ftn, arg, cpu); 850163246Sglebius CTR5(KTR_CALLOUT, "%sscheduled %p func %p arg %p in %d", 851163246Sglebius cancelled ? "re" : "", c, c->c_func, c->c_arg, to_ticks); 852177859Sjeff CC_UNLOCK(cc); 853149879Sglebius 854149879Sglebius return (cancelled); 85544510Swollman} 85644510Swollman 857181191Ssam/* 858181191Ssam * Common idioms that can be optimized in the future. 859181191Ssam */ 86081481Sjhbint 861181191Ssamcallout_schedule_on(struct callout *c, int to_ticks, int cpu) 862181191Ssam{ 863181191Ssam return callout_reset_on(c, to_ticks, c->c_func, c->c_arg, cpu); 864181191Ssam} 865181191Ssam 866181191Ssamint 867181191Ssamcallout_schedule(struct callout *c, int to_ticks) 868181191Ssam{ 869181191Ssam return callout_reset_on(c, to_ticks, c->c_func, c->c_arg, c->c_cpu); 870181191Ssam} 871181191Ssam 872181191Ssamint 873127969Scperciva_callout_stop_safe(c, safe) 874127969Scperciva struct callout *c; 875127969Scperciva int safe; 876127969Scperciva{ 877220456Sattilio struct callout_cpu *cc, *old_cc; 878173760Sattilio struct lock_class *class; 879173760Sattilio int use_lock, sq_locked; 880127969Scperciva 881173760Sattilio /* 882173760Sattilio * Some old subsystems don't hold Giant while running a callout_stop(), 883173760Sattilio * so just discard this check for the moment. 884173760Sattilio */ 885173760Sattilio if (!safe && c->c_lock != NULL) { 886173760Sattilio if (c->c_lock == &Giant.lock_object) 887173760Sattilio use_lock = mtx_owned(&Giant); 888173760Sattilio else { 889173760Sattilio use_lock = 1; 890173760Sattilio class = LOCK_CLASS(c->c_lock); 891173760Sattilio class->lc_assert(c->c_lock, LA_XLOCKED); 892173760Sattilio } 893173760Sattilio } else 894173760Sattilio use_lock = 0; 895141428Siedowse 896172025Sjhb sq_locked = 0; 897220456Sattilio old_cc = NULL; 898172025Sjhbagain: 899177859Sjeff cc = callout_lock(c); 900220456Sattilio 90144510Swollman /* 902220456Sattilio * If the callout was migrating while the callout cpu lock was 903220456Sattilio * dropped, just drop the sleepqueue lock and check the states 904220456Sattilio * again. 905220456Sattilio */ 906220456Sattilio if (sq_locked != 0 && cc != old_cc) { 907220456Sattilio#ifdef SMP 908220456Sattilio CC_UNLOCK(cc); 909220456Sattilio sleepq_release(&old_cc->cc_waiting); 910220456Sattilio sq_locked = 0; 911220456Sattilio old_cc = NULL; 912220456Sattilio goto again; 913220456Sattilio#else 914220456Sattilio panic("migration should not happen"); 915220456Sattilio#endif 916220456Sattilio } 917220456Sattilio 918220456Sattilio /* 919155957Sjhb * If the callout isn't pending, it's not on the queue, so 920155957Sjhb * don't attempt to remove it from the queue. We can try to 921155957Sjhb * stop it by other means however. 92244510Swollman */ 92344510Swollman if (!(c->c_flags & CALLOUT_PENDING)) { 92450673Sjlemon c->c_flags &= ~CALLOUT_ACTIVE; 925155957Sjhb 926155957Sjhb /* 927155957Sjhb * If it wasn't on the queue and it isn't the current 928155957Sjhb * callout, then we can't stop it, so just bail. 929155957Sjhb */ 930177859Sjeff if (cc->cc_curr != c) { 931163246Sglebius CTR3(KTR_CALLOUT, "failed to stop %p func %p arg %p", 932163246Sglebius c, c->c_func, c->c_arg); 933177859Sjeff CC_UNLOCK(cc); 934172025Sjhb if (sq_locked) 935177949Sjeff sleepq_release(&cc->cc_waiting); 936141428Siedowse return (0); 937141428Siedowse } 938155957Sjhb 939141428Siedowse if (safe) { 940127969Scperciva /* 941155957Sjhb * The current callout is running (or just 942155957Sjhb * about to run) and blocking is allowed, so 943155957Sjhb * just wait for the current invocation to 944155957Sjhb * finish. 945127969Scperciva */ 946177859Sjeff while (cc->cc_curr == c) { 947171053Sattilio 948171053Sattilio /* 949171053Sattilio * Use direct calls to sleepqueue interface 950171053Sattilio * instead of cv/msleep in order to avoid 951177859Sjeff * a LOR between cc_lock and sleepqueue 952171053Sattilio * chain spinlocks. This piece of code 953171053Sattilio * emulates a msleep_spin() call actually. 954172025Sjhb * 955172025Sjhb * If we already have the sleepqueue chain 956172025Sjhb * locked, then we can safely block. If we 957172025Sjhb * don't already have it locked, however, 958177859Sjeff * we have to drop the cc_lock to lock 959172025Sjhb * it. This opens several races, so we 960172025Sjhb * restart at the beginning once we have 961172025Sjhb * both locks. If nothing has changed, then 962172025Sjhb * we will end up back here with sq_locked 963172025Sjhb * set. 964171053Sattilio */ 965172025Sjhb if (!sq_locked) { 966177859Sjeff CC_UNLOCK(cc); 967177949Sjeff sleepq_lock(&cc->cc_waiting); 968172025Sjhb sq_locked = 1; 969220456Sattilio old_cc = cc; 970172025Sjhb goto again; 971172025Sjhb } 972220456Sattilio 973220456Sattilio /* 974220456Sattilio * Migration could be cancelled here, but 975220456Sattilio * as long as it is still not sure when it 976220456Sattilio * will be packed up, just let softclock() 977220456Sattilio * take care of it. 978220456Sattilio */ 979177859Sjeff cc->cc_waiting = 1; 980171053Sattilio DROP_GIANT(); 981177859Sjeff CC_UNLOCK(cc); 982177949Sjeff sleepq_add(&cc->cc_waiting, 983177859Sjeff &cc->cc_lock.lock_object, "codrain", 984171053Sattilio SLEEPQ_SLEEP, 0); 985177949Sjeff sleepq_wait(&cc->cc_waiting, 0); 986172025Sjhb sq_locked = 0; 987220456Sattilio old_cc = NULL; 988171053Sattilio 989171053Sattilio /* Reacquire locks previously released. */ 990171053Sattilio PICKUP_GIANT(); 991177859Sjeff CC_LOCK(cc); 992155957Sjhb } 993177859Sjeff } else if (use_lock && !cc->cc_cancel) { 994155957Sjhb /* 995173760Sattilio * The current callout is waiting for its 996173760Sattilio * lock which we hold. Cancel the callout 997155957Sjhb * and return. After our caller drops the 998173760Sattilio * lock, the callout will be skipped in 999155957Sjhb * softclock(). 1000155957Sjhb */ 1001177859Sjeff cc->cc_cancel = 1; 1002163246Sglebius CTR3(KTR_CALLOUT, "cancelled %p func %p arg %p", 1003163246Sglebius c, c->c_func, c->c_arg); 1004220456Sattilio KASSERT(!cc_cme_migrating(cc), 1005220456Sattilio ("callout wrongly scheduled for migration")); 1006177859Sjeff CC_UNLOCK(cc); 1007172025Sjhb KASSERT(!sq_locked, ("sleepqueue chain locked")); 1008141428Siedowse return (1); 1009234952Skib } else if ((c->c_flags & CALLOUT_DFRMIGRATION) != 0) { 1010234952Skib c->c_flags &= ~CALLOUT_DFRMIGRATION; 1011234952Skib CTR3(KTR_CALLOUT, "postponing stop %p func %p arg %p", 1012234952Skib c, c->c_func, c->c_arg); 1013234952Skib CC_UNLOCK(cc); 1014234952Skib return (1); 1015155957Sjhb } 1016163246Sglebius CTR3(KTR_CALLOUT, "failed to stop %p func %p arg %p", 1017163246Sglebius c, c->c_func, c->c_arg); 1018177859Sjeff CC_UNLOCK(cc); 1019172025Sjhb KASSERT(!sq_locked, ("sleepqueue chain still locked")); 102081481Sjhb return (0); 102144510Swollman } 1022172025Sjhb if (sq_locked) 1023177949Sjeff sleepq_release(&cc->cc_waiting); 1024172025Sjhb 102550673Sjlemon c->c_flags &= ~(CALLOUT_ACTIVE | CALLOUT_PENDING); 102644510Swollman 1027234981Skib CTR3(KTR_CALLOUT, "cancelled %p func %p arg %p", 1028234981Skib c, c->c_func, c->c_arg); 1029243901Skib if (cc->cc_next == c) 1030243901Skib cc->cc_next = TAILQ_NEXT(c, c_links.tqe); 1031177859Sjeff TAILQ_REMOVE(&cc->cc_callwheel[c->c_time & callwheelmask], c, 1032177859Sjeff c_links.tqe); 1033234981Skib callout_cc_del(c, cc); 103444510Swollman 1035177859Sjeff CC_UNLOCK(cc); 103681481Sjhb return (1); 103744510Swollman} 103844510Swollman 103944510Swollmanvoid 104069147Sjlemoncallout_init(c, mpsafe) 104144510Swollman struct callout *c; 104269147Sjlemon int mpsafe; 104344510Swollman{ 104444527Swollman bzero(c, sizeof *c); 1045141428Siedowse if (mpsafe) { 1046173760Sattilio c->c_lock = NULL; 1047141428Siedowse c->c_flags = CALLOUT_RETURNUNLOCKED; 1048141428Siedowse } else { 1049173760Sattilio c->c_lock = &Giant.lock_object; 1050141428Siedowse c->c_flags = 0; 1051141428Siedowse } 1052177859Sjeff c->c_cpu = timeout_cpu; 105344510Swollman} 105444510Swollman 1055141428Siedowsevoid 1056173760Sattilio_callout_init_lock(c, lock, flags) 1057141428Siedowse struct callout *c; 1058173760Sattilio struct lock_object *lock; 1059141428Siedowse int flags; 1060141428Siedowse{ 1061141428Siedowse bzero(c, sizeof *c); 1062173760Sattilio c->c_lock = lock; 1063173760Sattilio KASSERT((flags & ~(CALLOUT_RETURNUNLOCKED | CALLOUT_SHAREDLOCK)) == 0, 1064173760Sattilio ("callout_init_lock: bad flags %d", flags)); 1065173760Sattilio KASSERT(lock != NULL || (flags & CALLOUT_RETURNUNLOCKED) == 0, 1066173760Sattilio ("callout_init_lock: CALLOUT_RETURNUNLOCKED with no lock")); 1067176013Sattilio KASSERT(lock == NULL || !(LOCK_CLASS(lock)->lc_flags & 1068176013Sattilio (LC_SPINLOCK | LC_SLEEPABLE)), ("%s: invalid lock class", 1069173760Sattilio __func__)); 1070173760Sattilio c->c_flags = flags & (CALLOUT_RETURNUNLOCKED | CALLOUT_SHAREDLOCK); 1071177859Sjeff c->c_cpu = timeout_cpu; 1072141428Siedowse} 1073141428Siedowse 107431950Snate#ifdef APM_FIXUP_CALLTODO 107531950Snate/* 107631950Snate * Adjust the kernel calltodo timeout list. This routine is used after 107731950Snate * an APM resume to recalculate the calltodo timer list values with the 107831950Snate * number of hz's we have been sleeping. The next hardclock() will detect 107931950Snate * that there are fired timers and run softclock() to execute them. 108031950Snate * 108131950Snate * Please note, I have not done an exhaustive analysis of what code this 108231950Snate * might break. I am motivated to have my select()'s and alarm()'s that 108331950Snate * have expired during suspend firing upon resume so that the applications 108431950Snate * which set the timer can do the maintanence the timer was for as close 108531950Snate * as possible to the originally intended time. Testing this code for a 108631950Snate * week showed that resuming from a suspend resulted in 22 to 25 timers 108731950Snate * firing, which seemed independant on whether the suspend was 2 hours or 108831950Snate * 2 days. Your milage may vary. - Ken Key <key@cs.utk.edu> 108931950Snate */ 109031950Snatevoid 109131950Snateadjust_timeout_calltodo(time_change) 109231950Snate struct timeval *time_change; 109331950Snate{ 109431950Snate register struct callout *p; 109531950Snate unsigned long delta_ticks; 109631950Snate 109731950Snate /* 109831950Snate * How many ticks were we asleep? 109936127Sbde * (stolen from tvtohz()). 110031950Snate */ 110131950Snate 110231950Snate /* Don't do anything */ 110331950Snate if (time_change->tv_sec < 0) 110431950Snate return; 110531950Snate else if (time_change->tv_sec <= LONG_MAX / 1000000) 110631950Snate delta_ticks = (time_change->tv_sec * 1000000 + 110731950Snate time_change->tv_usec + (tick - 1)) / tick + 1; 110831950Snate else if (time_change->tv_sec <= LONG_MAX / hz) 110931950Snate delta_ticks = time_change->tv_sec * hz + 111031950Snate (time_change->tv_usec + (tick - 1)) / tick + 1; 111131950Snate else 111231950Snate delta_ticks = LONG_MAX; 111331950Snate 111431950Snate if (delta_ticks > INT_MAX) 111531950Snate delta_ticks = INT_MAX; 111631950Snate 111731950Snate /* 111831950Snate * Now rip through the timer calltodo list looking for timers 111931950Snate * to expire. 112031950Snate */ 112131950Snate 112231950Snate /* don't collide with softclock() */ 1123177859Sjeff CC_LOCK(cc); 112431950Snate for (p = calltodo.c_next; p != NULL; p = p->c_next) { 112531950Snate p->c_time -= delta_ticks; 112631950Snate 112731950Snate /* Break if the timer had more time on it than delta_ticks */ 112831950Snate if (p->c_time > 0) 112931950Snate break; 113031950Snate 113131950Snate /* take back the ticks the timer didn't use (p->c_time <= 0) */ 113231950Snate delta_ticks = -p->c_time; 113331950Snate } 1134177859Sjeff CC_UNLOCK(cc); 113531950Snate 113631950Snate return; 113731950Snate} 113831950Snate#endif /* APM_FIXUP_CALLTODO */ 1139