kern_timeout.c revision 247813
11541Srgrimes/*- 21541Srgrimes * Copyright (c) 1982, 1986, 1991, 1993 31541Srgrimes * The Regents of the University of California. All rights reserved. 41541Srgrimes * (c) UNIX System Laboratories, Inc. 51541Srgrimes * All or some portions of this file are derived from material licensed 61541Srgrimes * to the University of California by American Telephone and Telegraph 71541Srgrimes * Co. or Unix System Laboratories, Inc. and are reproduced herein with 81541Srgrimes * the permission of UNIX System Laboratories, Inc. 91541Srgrimes * 101541Srgrimes * Redistribution and use in source and binary forms, with or without 111541Srgrimes * modification, are permitted provided that the following conditions 121541Srgrimes * are met: 131541Srgrimes * 1. Redistributions of source code must retain the above copyright 141541Srgrimes * notice, this list of conditions and the following disclaimer. 151541Srgrimes * 2. Redistributions in binary form must reproduce the above copyright 161541Srgrimes * notice, this list of conditions and the following disclaimer in the 171541Srgrimes * documentation and/or other materials provided with the distribution. 181541Srgrimes * 4. Neither the name of the University nor the names of its contributors 191541Srgrimes * may be used to endorse or promote products derived from this software 201541Srgrimes * without specific prior written permission. 211541Srgrimes * 221541Srgrimes * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 231541Srgrimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 241541Srgrimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 251541Srgrimes * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 261541Srgrimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 271541Srgrimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 281541Srgrimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 291541Srgrimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 301541Srgrimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 311541Srgrimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 321541Srgrimes * SUCH DAMAGE. 331541Srgrimes * 3444510Swollman * From: @(#)kern_clock.c 8.5 (Berkeley) 1/21/94 351541Srgrimes */ 361541Srgrimes 37116182Sobrien#include <sys/cdefs.h> 38116182Sobrien__FBSDID("$FreeBSD: head/sys/kern/kern_timeout.c 247813 2013-03-04 21:09:22Z davide $"); 39116182Sobrien 40247777Sdavide#include "opt_callout_profiling.h" 41187664Srwatson#include "opt_kdtrace.h" 42247777Sdavide#if defined(__arm__) 43247777Sdavide#include "opt_timer.h" 44247777Sdavide#endif 45187664Srwatson 461541Srgrimes#include <sys/param.h> 471541Srgrimes#include <sys/systm.h> 48177859Sjeff#include <sys/bus.h> 4933392Sphk#include <sys/callout.h> 50177859Sjeff#include <sys/interrupt.h> 511541Srgrimes#include <sys/kernel.h> 52133229Srwatson#include <sys/ktr.h> 5374914Sjhb#include <sys/lock.h> 54177859Sjeff#include <sys/malloc.h> 5568840Sjhb#include <sys/mutex.h> 56150188Sjhb#include <sys/proc.h> 57187664Srwatson#include <sys/sdt.h> 58171053Sattilio#include <sys/sleepqueue.h> 59115810Sphk#include <sys/sysctl.h> 60177859Sjeff#include <sys/smp.h> 611541Srgrimes 62220456Sattilio#ifdef SMP 63220456Sattilio#include <machine/cpu.h> 64220456Sattilio#endif 65220456Sattilio 66247777Sdavide#ifndef NO_EVENTTIMERS 67247777SdavideDPCPU_DECLARE(sbintime_t, hardclocktime); 68247777Sdavide#endif 69247777Sdavide 70187664SrwatsonSDT_PROVIDER_DEFINE(callout_execute); 71211616SrpauloSDT_PROBE_DEFINE(callout_execute, kernel, , callout_start, callout-start); 72187664SrwatsonSDT_PROBE_ARGTYPE(callout_execute, kernel, , callout_start, 0, 73187664Srwatson "struct callout *"); 74211616SrpauloSDT_PROBE_DEFINE(callout_execute, kernel, , callout_end, callout-end); 75187664SrwatsonSDT_PROBE_ARGTYPE(callout_execute, kernel, , callout_end, 0, 76187664Srwatson "struct callout *"); 77187664Srwatson 78247777Sdavide#ifdef CALLOUT_PROFILING 79115810Sphkstatic int avg_depth; 80115810SphkSYSCTL_INT(_debug, OID_AUTO, to_avg_depth, CTLFLAG_RD, &avg_depth, 0, 81115810Sphk "Average number of items examined per softclock call. Units = 1/1000"); 82115810Sphkstatic int avg_gcalls; 83115810SphkSYSCTL_INT(_debug, OID_AUTO, to_avg_gcalls, CTLFLAG_RD, &avg_gcalls, 0, 84115810Sphk "Average number of Giant callouts made per softclock call. Units = 1/1000"); 85173760Sattiliostatic int avg_lockcalls; 86173760SattilioSYSCTL_INT(_debug, OID_AUTO, to_avg_lockcalls, CTLFLAG_RD, &avg_lockcalls, 0, 87173760Sattilio "Average number of lock callouts made per softclock call. Units = 1/1000"); 88115810Sphkstatic int avg_mpcalls; 89115810SphkSYSCTL_INT(_debug, OID_AUTO, to_avg_mpcalls, CTLFLAG_RD, &avg_mpcalls, 0, 90115810Sphk "Average number of MP callouts made per softclock call. Units = 1/1000"); 91247777Sdavidestatic int avg_depth_dir; 92247777SdavideSYSCTL_INT(_debug, OID_AUTO, to_avg_depth_dir, CTLFLAG_RD, &avg_depth_dir, 0, 93247777Sdavide "Average number of direct callouts examined per callout_process call. " 94247777Sdavide "Units = 1/1000"); 95247777Sdavidestatic int avg_lockcalls_dir; 96247777SdavideSYSCTL_INT(_debug, OID_AUTO, to_avg_lockcalls_dir, CTLFLAG_RD, 97247777Sdavide &avg_lockcalls_dir, 0, "Average number of lock direct callouts made per " 98247777Sdavide "callout_process call. Units = 1/1000"); 99247777Sdavidestatic int avg_mpcalls_dir; 100247777SdavideSYSCTL_INT(_debug, OID_AUTO, to_avg_mpcalls_dir, CTLFLAG_RD, &avg_mpcalls_dir, 101247777Sdavide 0, "Average number of MP direct callouts made per callout_process call. " 102247777Sdavide "Units = 1/1000"); 103247777Sdavide#endif 10433392Sphk/* 10533392Sphk * TODO: 10633392Sphk * allocate more timeout table slots when table overflows. 10733392Sphk */ 108247715Sdavideu_int callwheelsize, callwheelmask; 1092112Swollman 110200510Sluigi/* 111247777Sdavide * The callout cpu exec entities represent informations necessary for 112247777Sdavide * describing the state of callouts currently running on the CPU and the ones 113247777Sdavide * necessary for migrating callouts to the new callout cpu. In particular, 114247777Sdavide * the first entry of the array cc_exec_entity holds informations for callout 115247777Sdavide * running in SWI thread context, while the second one holds informations 116247777Sdavide * for callout running directly from hardware interrupt context. 117220456Sattilio * The cached informations are very important for deferring migration when 118220456Sattilio * the migrating callout is already running. 119220456Sattilio */ 120247777Sdavidestruct cc_exec { 121247777Sdavide struct callout *cc_next; 122247777Sdavide struct callout *cc_curr; 123220456Sattilio#ifdef SMP 124247777Sdavide void (*ce_migration_func)(void *); 125247777Sdavide void *ce_migration_arg; 126247777Sdavide int ce_migration_cpu; 127247777Sdavide sbintime_t ce_migration_time; 128220456Sattilio#endif 129247813Sdavide bool cc_cancel; 130247813Sdavide bool cc_waiting; 131220456Sattilio}; 132247467Sdavide 133220456Sattilio/* 134200510Sluigi * There is one struct callout_cpu per cpu, holding all relevant 135200510Sluigi * state for the callout processing thread on the individual CPU. 136200510Sluigi */ 137177859Sjeffstruct callout_cpu { 138242402Sattilio struct mtx_padalign cc_lock; 139247777Sdavide struct cc_exec cc_exec_entity[2]; 140177859Sjeff struct callout *cc_callout; 141247777Sdavide struct callout_list *cc_callwheel; 142247777Sdavide struct callout_tailq cc_expireq; 143247777Sdavide struct callout_slist cc_callfree; 144247777Sdavide sbintime_t cc_firstevent; 145247777Sdavide sbintime_t cc_lastscan; 146177859Sjeff void *cc_cookie; 147247777Sdavide u_int cc_bucket; 148177859Sjeff}; 149128024Scperciva 150247777Sdavide#define cc_exec_curr cc_exec_entity[0].cc_curr 151247777Sdavide#define cc_exec_next cc_exec_entity[0].cc_next 152247777Sdavide#define cc_exec_cancel cc_exec_entity[0].cc_cancel 153247777Sdavide#define cc_exec_waiting cc_exec_entity[0].cc_waiting 154247777Sdavide#define cc_exec_curr_dir cc_exec_entity[1].cc_curr 155247777Sdavide#define cc_exec_next_dir cc_exec_entity[1].cc_next 156247777Sdavide#define cc_exec_cancel_dir cc_exec_entity[1].cc_cancel 157247777Sdavide#define cc_exec_waiting_dir cc_exec_entity[1].cc_waiting 158247777Sdavide 159177859Sjeff#ifdef SMP 160247777Sdavide#define cc_migration_func cc_exec_entity[0].ce_migration_func 161247777Sdavide#define cc_migration_arg cc_exec_entity[0].ce_migration_arg 162247777Sdavide#define cc_migration_cpu cc_exec_entity[0].ce_migration_cpu 163247777Sdavide#define cc_migration_time cc_exec_entity[0].ce_migration_time 164247777Sdavide#define cc_migration_func_dir cc_exec_entity[1].ce_migration_func 165247777Sdavide#define cc_migration_arg_dir cc_exec_entity[1].ce_migration_arg 166247777Sdavide#define cc_migration_cpu_dir cc_exec_entity[1].ce_migration_cpu 167247777Sdavide#define cc_migration_time_dir cc_exec_entity[1].ce_migration_time 168220456Sattilio 169177859Sjeffstruct callout_cpu cc_cpu[MAXCPU]; 170220456Sattilio#define CPUBLOCK MAXCPU 171177859Sjeff#define CC_CPU(cpu) (&cc_cpu[(cpu)]) 172177859Sjeff#define CC_SELF() CC_CPU(PCPU_GET(cpuid)) 173177859Sjeff#else 174177859Sjeffstruct callout_cpu cc_cpu; 175177859Sjeff#define CC_CPU(cpu) &cc_cpu 176177859Sjeff#define CC_SELF() &cc_cpu 177177859Sjeff#endif 178177859Sjeff#define CC_LOCK(cc) mtx_lock_spin(&(cc)->cc_lock) 179177859Sjeff#define CC_UNLOCK(cc) mtx_unlock_spin(&(cc)->cc_lock) 180220456Sattilio#define CC_LOCK_ASSERT(cc) mtx_assert(&(cc)->cc_lock, MA_OWNED) 181177859Sjeff 182177859Sjeffstatic int timeout_cpu; 183177859Sjeff 184247777Sdavidestatic void softclock_call_cc(struct callout *c, struct callout_cpu *cc, 185247777Sdavide#ifdef CALLOUT_PROFILING 186247777Sdavide int *mpcalls, int *lockcalls, int *gcalls, 187247777Sdavide#endif 188247777Sdavide int direct); 189247777Sdavide 190227293Sedstatic MALLOC_DEFINE(M_CALLOUT, "callout", "Callout datastructures"); 191177859Sjeff 192139831Scperciva/** 193177859Sjeff * Locked by cc_lock: 194247777Sdavide * cc_curr - If a callout is in progress, it is cc_curr. 195247777Sdavide * If cc_curr is non-NULL, threads waiting in 196177859Sjeff * callout_drain() will be woken up as soon as the 197127969Scperciva * relevant callout completes. 198247777Sdavide * cc_cancel - Changing to 1 with both callout_lock and cc_lock held 199141428Siedowse * guarantees that the current callout will not run. 200141428Siedowse * The softclock() function sets this to 0 before it 201173760Sattilio * drops callout_lock to acquire c_lock, and it calls 202155957Sjhb * the handler only if curr_cancelled is still 0 after 203247777Sdavide * cc_lock is successfully acquired. 204177859Sjeff * cc_waiting - If a thread is waiting in callout_drain(), then 205155957Sjhb * callout_wait is nonzero. Set only when 206247777Sdavide * cc_curr is non-NULL. 207127969Scperciva */ 208128024Scperciva 2091541Srgrimes/* 210247777Sdavide * Resets the execution entity tied to a specific callout cpu. 211220456Sattilio */ 212220456Sattiliostatic void 213247777Sdavidecc_cce_cleanup(struct callout_cpu *cc, int direct) 214220456Sattilio{ 215220456Sattilio 216247777Sdavide cc->cc_exec_entity[direct].cc_curr = NULL; 217247777Sdavide cc->cc_exec_entity[direct].cc_next = NULL; 218247777Sdavide cc->cc_exec_entity[direct].cc_cancel = FALSE; 219247777Sdavide cc->cc_exec_entity[direct].cc_waiting = FALSE; 220220456Sattilio#ifdef SMP 221247777Sdavide cc->cc_exec_entity[direct].ce_migration_cpu = CPUBLOCK; 222247777Sdavide cc->cc_exec_entity[direct].ce_migration_time = 0; 223247777Sdavide cc->cc_exec_entity[direct].ce_migration_func = NULL; 224247777Sdavide cc->cc_exec_entity[direct].ce_migration_arg = NULL; 225220456Sattilio#endif 226220456Sattilio} 227220456Sattilio 228220456Sattilio/* 229220456Sattilio * Checks if migration is requested by a specific callout cpu. 230220456Sattilio */ 231220456Sattiliostatic int 232247777Sdavidecc_cce_migrating(struct callout_cpu *cc, int direct) 233220456Sattilio{ 234220456Sattilio 235220456Sattilio#ifdef SMP 236247777Sdavide return (cc->cc_exec_entity[direct].ce_migration_cpu != CPUBLOCK); 237220456Sattilio#else 238220456Sattilio return (0); 239220456Sattilio#endif 240220456Sattilio} 241220456Sattilio 242220456Sattilio/* 243247698Smav * kern_timeout_callwheel_alloc() - kernel low level callwheel initialization 24482127Sdillon * 24582127Sdillon * This code is called very early in the kernel initialization sequence, 24682127Sdillon * and may be called more then once. 24782127Sdillon */ 24882127Sdilloncaddr_t 24982127Sdillonkern_timeout_callwheel_alloc(caddr_t v) 25082127Sdillon{ 251177859Sjeff struct callout_cpu *cc; 252177859Sjeff 253177859Sjeff timeout_cpu = PCPU_GET(cpuid); 254177859Sjeff cc = CC_CPU(timeout_cpu); 25582127Sdillon /* 256243853Salfred * Calculate callout wheel size, should be next power of two higher 257243853Salfred * than 'ncallout'. 25882127Sdillon */ 259243853Salfred callwheelsize = 1 << fls(ncallout); 26082127Sdillon callwheelmask = callwheelsize - 1; 26182127Sdillon 262177859Sjeff cc->cc_callout = (struct callout *)v; 263177859Sjeff v = (caddr_t)(cc->cc_callout + ncallout); 264247777Sdavide cc->cc_callwheel = (struct callout_list *)v; 265177859Sjeff v = (caddr_t)(cc->cc_callwheel + callwheelsize); 26682127Sdillon return(v); 26782127Sdillon} 26882127Sdillon 269177859Sjeffstatic void 270177859Sjeffcallout_cpu_init(struct callout_cpu *cc) 271177859Sjeff{ 272177859Sjeff struct callout *c; 273177859Sjeff int i; 274177859Sjeff 275177859Sjeff mtx_init(&cc->cc_lock, "callout", NULL, MTX_SPIN | MTX_RECURSE); 276177859Sjeff SLIST_INIT(&cc->cc_callfree); 277247777Sdavide for (i = 0; i < callwheelsize; i++) 278247777Sdavide LIST_INIT(&cc->cc_callwheel[i]); 279247777Sdavide TAILQ_INIT(&cc->cc_expireq); 280247777Sdavide cc->cc_firstevent = INT64_MAX; 281247777Sdavide for (i = 0; i < 2; i++) 282247777Sdavide cc_cce_cleanup(cc, i); 283177859Sjeff if (cc->cc_callout == NULL) 284177859Sjeff return; 285177859Sjeff for (i = 0; i < ncallout; i++) { 286177859Sjeff c = &cc->cc_callout[i]; 287177859Sjeff callout_init(c, 0); 288177859Sjeff c->c_flags = CALLOUT_LOCAL_ALLOC; 289177859Sjeff SLIST_INSERT_HEAD(&cc->cc_callfree, c, c_links.sle); 290177859Sjeff } 291177859Sjeff} 292177859Sjeff 293220456Sattilio#ifdef SMP 29482127Sdillon/* 295220456Sattilio * Switches the cpu tied to a specific callout. 296220456Sattilio * The function expects a locked incoming callout cpu and returns with 297220456Sattilio * locked outcoming callout cpu. 298220456Sattilio */ 299220456Sattiliostatic struct callout_cpu * 300220456Sattiliocallout_cpu_switch(struct callout *c, struct callout_cpu *cc, int new_cpu) 301220456Sattilio{ 302220456Sattilio struct callout_cpu *new_cc; 303220456Sattilio 304220456Sattilio MPASS(c != NULL && cc != NULL); 305220456Sattilio CC_LOCK_ASSERT(cc); 306220456Sattilio 307225057Sattilio /* 308225057Sattilio * Avoid interrupts and preemption firing after the callout cpu 309225057Sattilio * is blocked in order to avoid deadlocks as the new thread 310225057Sattilio * may be willing to acquire the callout cpu lock. 311225057Sattilio */ 312220456Sattilio c->c_cpu = CPUBLOCK; 313225057Sattilio spinlock_enter(); 314220456Sattilio CC_UNLOCK(cc); 315220456Sattilio new_cc = CC_CPU(new_cpu); 316220456Sattilio CC_LOCK(new_cc); 317225057Sattilio spinlock_exit(); 318220456Sattilio c->c_cpu = new_cpu; 319220456Sattilio return (new_cc); 320220456Sattilio} 321220456Sattilio#endif 322220456Sattilio 323220456Sattilio/* 32482127Sdillon * kern_timeout_callwheel_init() - initialize previously reserved callwheel 32582127Sdillon * space. 32682127Sdillon * 32782127Sdillon * This code is called just once, after the space reserved for the 32882127Sdillon * callout wheel has been finalized. 32982127Sdillon */ 33082127Sdillonvoid 33182127Sdillonkern_timeout_callwheel_init(void) 33282127Sdillon{ 333177859Sjeff callout_cpu_init(CC_CPU(timeout_cpu)); 334177859Sjeff} 33582127Sdillon 336177859Sjeff/* 337177859Sjeff * Start standard softclock thread. 338177859Sjeff */ 339177859Sjeffstatic void 340177859Sjeffstart_softclock(void *dummy) 341177859Sjeff{ 342177859Sjeff struct callout_cpu *cc; 343177859Sjeff#ifdef SMP 344177859Sjeff int cpu; 345177859Sjeff#endif 346177859Sjeff 347177859Sjeff cc = CC_CPU(timeout_cpu); 348177859Sjeff if (swi_add(&clk_intr_event, "clock", softclock, cc, SWI_CLOCK, 349214746Sjhb INTR_MPSAFE, &cc->cc_cookie)) 350177859Sjeff panic("died while creating standard software ithreads"); 351177859Sjeff#ifdef SMP 352209059Sjhb CPU_FOREACH(cpu) { 353177859Sjeff if (cpu == timeout_cpu) 354177859Sjeff continue; 355177859Sjeff cc = CC_CPU(cpu); 356177859Sjeff if (swi_add(NULL, "clock", softclock, cc, SWI_CLOCK, 357177859Sjeff INTR_MPSAFE, &cc->cc_cookie)) 358177859Sjeff panic("died while creating standard software ithreads"); 359177859Sjeff cc->cc_callout = NULL; /* Only cpu0 handles timeout(). */ 360177859Sjeff cc->cc_callwheel = malloc( 361247777Sdavide sizeof(struct callout_list) * callwheelsize, M_CALLOUT, 362177859Sjeff M_WAITOK); 363177859Sjeff callout_cpu_init(cc); 36482127Sdillon } 365177859Sjeff#endif 366177859Sjeff} 367177859Sjeff 368177859SjeffSYSINIT(start_softclock, SI_SUB_SOFTINTR, SI_ORDER_FIRST, start_softclock, NULL); 369177859Sjeff 370247777Sdavide#define CC_HASH_SHIFT 8 371247777Sdavide 372247777Sdavidestatic inline u_int 373247777Sdavidecallout_hash(sbintime_t sbt) 374247777Sdavide{ 375247777Sdavide 376247777Sdavide return (sbt >> (32 - CC_HASH_SHIFT)); 377247777Sdavide} 378247777Sdavide 379247777Sdavidestatic inline u_int 380247777Sdavidecallout_get_bucket(sbintime_t sbt) 381247777Sdavide{ 382247777Sdavide 383247777Sdavide return (callout_hash(sbt) & callwheelmask); 384247777Sdavide} 385247777Sdavide 386177859Sjeffvoid 387247777Sdavidecallout_process(sbintime_t now) 388177859Sjeff{ 389247777Sdavide struct callout *tmp, *tmpn; 390177859Sjeff struct callout_cpu *cc; 391247777Sdavide struct callout_list *sc; 392247777Sdavide sbintime_t first, last, max, tmp_max; 393247777Sdavide uint32_t lookahead; 394247777Sdavide u_int firstb, lastb, nowb; 395247777Sdavide#ifdef CALLOUT_PROFILING 396247777Sdavide int depth_dir = 0, mpcalls_dir = 0, lockcalls_dir = 0; 397247777Sdavide#endif 398177859Sjeff 399247777Sdavide cc = CC_SELF(); 400247777Sdavide mtx_lock_spin_flags(&cc->cc_lock, MTX_QUIET); 401247777Sdavide 402247777Sdavide /* Compute the buckets of the last scan and present times. */ 403247777Sdavide firstb = callout_hash(cc->cc_lastscan); 404247777Sdavide cc->cc_lastscan = now; 405247777Sdavide nowb = callout_hash(now); 406247777Sdavide 407247777Sdavide /* Compute the last bucket and minimum time of the bucket after it. */ 408247777Sdavide if (nowb == firstb) 409247777Sdavide lookahead = (SBT_1S / 16); 410247777Sdavide else if (nowb - firstb == 1) 411247777Sdavide lookahead = (SBT_1S / 8); 412247777Sdavide else 413247777Sdavide lookahead = (SBT_1S / 2); 414247777Sdavide first = last = now; 415247777Sdavide first += (lookahead / 2); 416247777Sdavide last += lookahead; 417247777Sdavide last &= (0xffffffffffffffffLLU << (32 - CC_HASH_SHIFT)); 418247777Sdavide lastb = callout_hash(last) - 1; 419247777Sdavide max = last; 420247777Sdavide 421177859Sjeff /* 422247777Sdavide * Check if we wrapped around the entire wheel from the last scan. 423247777Sdavide * In case, we need to scan entirely the wheel for pending callouts. 424177859Sjeff */ 425247777Sdavide if (lastb - firstb >= callwheelsize) { 426247777Sdavide lastb = firstb + callwheelsize - 1; 427247777Sdavide if (nowb - firstb >= callwheelsize) 428247777Sdavide nowb = lastb; 429247777Sdavide } 430247777Sdavide 431247777Sdavide /* Iterate callwheel from firstb to nowb and then up to lastb. */ 432247777Sdavide do { 433247777Sdavide sc = &cc->cc_callwheel[firstb & callwheelmask]; 434247777Sdavide tmp = LIST_FIRST(sc); 435247777Sdavide while (tmp != NULL) { 436247777Sdavide /* Run the callout if present time within allowed. */ 437247777Sdavide if (tmp->c_time <= now) { 438247777Sdavide /* 439247777Sdavide * Consumer told us the callout may be run 440247777Sdavide * directly from hardware interrupt context. 441247777Sdavide */ 442247777Sdavide if (tmp->c_flags & CALLOUT_DIRECT) { 443247777Sdavide#ifdef CALLOUT_PROFILING 444247777Sdavide ++depth_dir; 445247777Sdavide#endif 446247777Sdavide cc->cc_exec_next_dir = 447247777Sdavide LIST_NEXT(tmp, c_links.le); 448247777Sdavide cc->cc_bucket = firstb & callwheelmask; 449247777Sdavide LIST_REMOVE(tmp, c_links.le); 450247777Sdavide softclock_call_cc(tmp, cc, 451247777Sdavide#ifdef CALLOUT_PROFILING 452247777Sdavide &mpcalls_dir, &lockcalls_dir, NULL, 453247777Sdavide#endif 454247777Sdavide 1); 455247777Sdavide tmp = cc->cc_exec_next_dir; 456247777Sdavide } else { 457247777Sdavide tmpn = LIST_NEXT(tmp, c_links.le); 458247777Sdavide LIST_REMOVE(tmp, c_links.le); 459247777Sdavide TAILQ_INSERT_TAIL(&cc->cc_expireq, 460247777Sdavide tmp, c_links.tqe); 461247777Sdavide tmp->c_flags |= CALLOUT_PROCESSED; 462247777Sdavide tmp = tmpn; 463247777Sdavide } 464247777Sdavide continue; 465247777Sdavide } 466247777Sdavide /* Skip events from distant future. */ 467247777Sdavide if (tmp->c_time >= max) 468247777Sdavide goto next; 469247777Sdavide /* 470247777Sdavide * Event minimal time is bigger than present maximal 471247777Sdavide * time, so it cannot be aggregated. 472247777Sdavide */ 473247777Sdavide if (tmp->c_time > last) { 474247777Sdavide lastb = nowb; 475247777Sdavide goto next; 476247777Sdavide } 477247777Sdavide /* Update first and last time, respecting this event. */ 478247777Sdavide if (tmp->c_time < first) 479247777Sdavide first = tmp->c_time; 480247777Sdavide tmp_max = tmp->c_time + tmp->c_precision; 481247777Sdavide if (tmp_max < last) 482247777Sdavide last = tmp_max; 483247777Sdavidenext: 484247777Sdavide tmp = LIST_NEXT(tmp, c_links.le); 485180608Sjeff } 486247777Sdavide /* Proceed with the next bucket. */ 487247777Sdavide firstb++; 488247777Sdavide /* 489247777Sdavide * Stop if we looked after present time and found 490247777Sdavide * some event we can't execute at now. 491247777Sdavide * Stop if we looked far enough into the future. 492247777Sdavide */ 493247777Sdavide } while (((int)(firstb - lastb)) <= 0); 494247777Sdavide cc->cc_firstevent = last; 495247777Sdavide#ifndef NO_EVENTTIMERS 496247777Sdavide cpu_new_callout(curcpu, last, first); 497247777Sdavide#endif 498247777Sdavide#ifdef CALLOUT_PROFILING 499247777Sdavide avg_depth_dir += (depth_dir * 1000 - avg_depth_dir) >> 8; 500247777Sdavide avg_mpcalls_dir += (mpcalls_dir * 1000 - avg_mpcalls_dir) >> 8; 501247777Sdavide avg_lockcalls_dir += (lockcalls_dir * 1000 - avg_lockcalls_dir) >> 8; 502247777Sdavide#endif 503177859Sjeff mtx_unlock_spin_flags(&cc->cc_lock, MTX_QUIET); 504177859Sjeff /* 505177859Sjeff * swi_sched acquires the thread lock, so we don't want to call it 506177859Sjeff * with cc_lock held; incorrect locking order. 507177859Sjeff */ 508247777Sdavide if (!TAILQ_EMPTY(&cc->cc_expireq)) 509177859Sjeff swi_sched(cc->cc_cookie, 0); 510177859Sjeff} 511177859Sjeff 512177859Sjeffstatic struct callout_cpu * 513177859Sjeffcallout_lock(struct callout *c) 514177859Sjeff{ 515177859Sjeff struct callout_cpu *cc; 516177859Sjeff int cpu; 517177859Sjeff 518177859Sjeff for (;;) { 519177859Sjeff cpu = c->c_cpu; 520220456Sattilio#ifdef SMP 521220456Sattilio if (cpu == CPUBLOCK) { 522220456Sattilio while (c->c_cpu == CPUBLOCK) 523220456Sattilio cpu_spinwait(); 524220456Sattilio continue; 525220456Sattilio } 526220456Sattilio#endif 527177859Sjeff cc = CC_CPU(cpu); 528177859Sjeff CC_LOCK(cc); 529177859Sjeff if (cpu == c->c_cpu) 530177859Sjeff break; 531177859Sjeff CC_UNLOCK(cc); 53282127Sdillon } 533177859Sjeff return (cc); 53482127Sdillon} 53582127Sdillon 536220456Sattiliostatic void 537247777Sdavidecallout_cc_add(struct callout *c, struct callout_cpu *cc, 538247777Sdavide sbintime_t sbt, sbintime_t precision, void (*func)(void *), 539247777Sdavide void *arg, int cpu, int flags) 540220456Sattilio{ 541247777Sdavide int bucket; 542220456Sattilio 543220456Sattilio CC_LOCK_ASSERT(cc); 544247777Sdavide if (sbt < cc->cc_lastscan) 545247777Sdavide sbt = cc->cc_lastscan; 546220456Sattilio c->c_arg = arg; 547220456Sattilio c->c_flags |= (CALLOUT_ACTIVE | CALLOUT_PENDING); 548247777Sdavide if (flags & C_DIRECT_EXEC) 549247777Sdavide c->c_flags |= CALLOUT_DIRECT; 550247777Sdavide c->c_flags &= ~CALLOUT_PROCESSED; 551220456Sattilio c->c_func = func; 552247777Sdavide c->c_time = sbt; 553247777Sdavide c->c_precision = precision; 554247777Sdavide bucket = callout_get_bucket(c->c_time); 555247777Sdavide CTR3(KTR_CALLOUT, "precision set for %p: %d.%08x", 556247777Sdavide c, (int)(c->c_precision >> 32), 557247777Sdavide (u_int)(c->c_precision & 0xffffffff)); 558247777Sdavide LIST_INSERT_HEAD(&cc->cc_callwheel[bucket], c, c_links.le); 559247777Sdavide if (cc->cc_bucket == bucket) 560247777Sdavide cc->cc_exec_next_dir = c; 561247777Sdavide#ifndef NO_EVENTTIMERS 562247777Sdavide /* 563247777Sdavide * Inform the eventtimers(4) subsystem there's a new callout 564247777Sdavide * that has been inserted, but only if really required. 565247777Sdavide */ 566247777Sdavide sbt = c->c_time + c->c_precision; 567247777Sdavide if (sbt < cc->cc_firstevent) { 568247777Sdavide cc->cc_firstevent = sbt; 569247777Sdavide cpu_new_callout(cpu, sbt, c->c_time); 570220456Sattilio } 571247777Sdavide#endif 572220456Sattilio} 573220456Sattilio 574234981Skibstatic void 575234981Skibcallout_cc_del(struct callout *c, struct callout_cpu *cc) 576234981Skib{ 577234981Skib 578243901Skib if ((c->c_flags & CALLOUT_LOCAL_ALLOC) == 0) 579243901Skib return; 580243901Skib c->c_func = NULL; 581243901Skib SLIST_INSERT_HEAD(&cc->cc_callfree, c, c_links.sle); 582234981Skib} 583234981Skib 584243901Skibstatic void 585247777Sdavidesoftclock_call_cc(struct callout *c, struct callout_cpu *cc, 586247777Sdavide#ifdef CALLOUT_PROFILING 587247777Sdavide int *mpcalls, int *lockcalls, int *gcalls, 588247777Sdavide#endif 589247777Sdavide int direct) 590234981Skib{ 591234981Skib void (*c_func)(void *); 592234981Skib void *c_arg; 593234981Skib struct lock_class *class; 594234981Skib struct lock_object *c_lock; 595234981Skib int c_flags, sharedlock; 596234981Skib#ifdef SMP 597234981Skib struct callout_cpu *new_cc; 598234981Skib void (*new_func)(void *); 599234981Skib void *new_arg; 600247777Sdavide int flags, new_cpu; 601247777Sdavide sbintime_t new_time; 602234981Skib#endif 603247777Sdavide#if defined(DIAGNOSTIC) || defined(CALLOUT_PROFILING) 604247793Sdavide sbintime_t sbt1, sbt2; 605234981Skib struct timespec ts2; 606247777Sdavide static sbintime_t maxdt = 2 * SBT_1MS; /* 2 msec */ 607234981Skib static timeout_t *lastfunc; 608234981Skib#endif 609234981Skib 610243901Skib KASSERT((c->c_flags & (CALLOUT_PENDING | CALLOUT_ACTIVE)) == 611243901Skib (CALLOUT_PENDING | CALLOUT_ACTIVE), 612243901Skib ("softclock_call_cc: pend|act %p %x", c, c->c_flags)); 613234981Skib class = (c->c_lock != NULL) ? LOCK_CLASS(c->c_lock) : NULL; 614234981Skib sharedlock = (c->c_flags & CALLOUT_SHAREDLOCK) ? 0 : 1; 615234981Skib c_lock = c->c_lock; 616234981Skib c_func = c->c_func; 617234981Skib c_arg = c->c_arg; 618234981Skib c_flags = c->c_flags; 619234981Skib if (c->c_flags & CALLOUT_LOCAL_ALLOC) 620234981Skib c->c_flags = CALLOUT_LOCAL_ALLOC; 621234981Skib else 622234981Skib c->c_flags &= ~CALLOUT_PENDING; 623247777Sdavide cc->cc_exec_entity[direct].cc_curr = c; 624247777Sdavide cc->cc_exec_entity[direct].cc_cancel = FALSE; 625234981Skib CC_UNLOCK(cc); 626234981Skib if (c_lock != NULL) { 627234981Skib class->lc_lock(c_lock, sharedlock); 628234981Skib /* 629234981Skib * The callout may have been cancelled 630234981Skib * while we switched locks. 631234981Skib */ 632247777Sdavide if (cc->cc_exec_entity[direct].cc_cancel) { 633234981Skib class->lc_unlock(c_lock); 634234981Skib goto skip; 635234981Skib } 636234981Skib /* The callout cannot be stopped now. */ 637247777Sdavide cc->cc_exec_entity[direct].cc_cancel = TRUE; 638234981Skib if (c_lock == &Giant.lock_object) { 639247777Sdavide#ifdef CALLOUT_PROFILING 640234981Skib (*gcalls)++; 641247777Sdavide#endif 642247777Sdavide CTR3(KTR_CALLOUT, "callout giant %p func %p arg %p", 643234981Skib c, c_func, c_arg); 644234981Skib } else { 645247777Sdavide#ifdef CALLOUT_PROFILING 646234981Skib (*lockcalls)++; 647247777Sdavide#endif 648234981Skib CTR3(KTR_CALLOUT, "callout lock %p func %p arg %p", 649234981Skib c, c_func, c_arg); 650234981Skib } 651234981Skib } else { 652247777Sdavide#ifdef CALLOUT_PROFILING 653234981Skib (*mpcalls)++; 654247777Sdavide#endif 655247777Sdavide CTR3(KTR_CALLOUT, "callout %p func %p arg %p", 656234981Skib c, c_func, c_arg); 657234981Skib } 658247793Sdavide#if defined(DIAGNOSTIC) || defined(CALLOUT_PROFILING) 659247777Sdavide sbt1 = sbinuptime(); 660234981Skib#endif 661234981Skib THREAD_NO_SLEEPING(); 662234981Skib SDT_PROBE(callout_execute, kernel, , callout_start, c, 0, 0, 0, 0); 663234981Skib c_func(c_arg); 664234981Skib SDT_PROBE(callout_execute, kernel, , callout_end, c, 0, 0, 0, 0); 665234981Skib THREAD_SLEEPING_OK(); 666247793Sdavide#if defined(DIAGNOSTIC) || defined(CALLOUT_PROFILING) 667247793Sdavide sbt2 = sbinuptime(); 668247793Sdavide sbt2 -= sbt1; 669247793Sdavide if (sbt2 > maxdt) { 670247793Sdavide if (lastfunc != c_func || sbt2 > maxdt * 2) { 671247793Sdavide ts2 = sbttots(sbt2); 672234981Skib printf( 673234981Skib "Expensive timeout(9) function: %p(%p) %jd.%09ld s\n", 674234981Skib c_func, c_arg, (intmax_t)ts2.tv_sec, ts2.tv_nsec); 675234981Skib } 676247793Sdavide maxdt = sbt2; 677234981Skib lastfunc = c_func; 678234981Skib } 679234981Skib#endif 680234981Skib CTR1(KTR_CALLOUT, "callout %p finished", c); 681234981Skib if ((c_flags & CALLOUT_RETURNUNLOCKED) == 0) 682234981Skib class->lc_unlock(c_lock); 683234981Skibskip: 684234981Skib CC_LOCK(cc); 685247777Sdavide KASSERT(cc->cc_exec_entity[direct].cc_curr == c, ("mishandled cc_curr")); 686247777Sdavide cc->cc_exec_entity[direct].cc_curr = NULL; 687247777Sdavide if (cc->cc_exec_entity[direct].cc_waiting) { 688234981Skib /* 689234981Skib * There is someone waiting for the 690234981Skib * callout to complete. 691234981Skib * If the callout was scheduled for 692234981Skib * migration just cancel it. 693234981Skib */ 694247777Sdavide if (cc_cce_migrating(cc, direct)) { 695247777Sdavide cc_cce_cleanup(cc, direct); 696243912Sattilio 697243912Sattilio /* 698243912Sattilio * It should be assert here that the callout is not 699243912Sattilio * destroyed but that is not easy. 700243912Sattilio */ 701243901Skib c->c_flags &= ~CALLOUT_DFRMIGRATION; 702243901Skib } 703247777Sdavide cc->cc_exec_entity[direct].cc_waiting = FALSE; 704234981Skib CC_UNLOCK(cc); 705247777Sdavide wakeup(&cc->cc_exec_entity[direct].cc_waiting); 706234981Skib CC_LOCK(cc); 707247777Sdavide } else if (cc_cce_migrating(cc, direct)) { 708243912Sattilio KASSERT((c_flags & CALLOUT_LOCAL_ALLOC) == 0, 709243901Skib ("Migrating legacy callout %p", c)); 710234981Skib#ifdef SMP 711234981Skib /* 712234981Skib * If the callout was scheduled for 713234981Skib * migration just perform it now. 714234981Skib */ 715247777Sdavide new_cpu = cc->cc_exec_entity[direct].ce_migration_cpu; 716247777Sdavide new_time = cc->cc_exec_entity[direct].ce_migration_time; 717247777Sdavide new_func = cc->cc_exec_entity[direct].ce_migration_func; 718247777Sdavide new_arg = cc->cc_exec_entity[direct].ce_migration_arg; 719247777Sdavide cc_cce_cleanup(cc, direct); 720234981Skib 721234981Skib /* 722243912Sattilio * It should be assert here that the callout is not destroyed 723243912Sattilio * but that is not easy. 724243912Sattilio * 725243912Sattilio * As first thing, handle deferred callout stops. 726234981Skib */ 727234981Skib if ((c->c_flags & CALLOUT_DFRMIGRATION) == 0) { 728234981Skib CTR3(KTR_CALLOUT, 729234981Skib "deferred cancelled %p func %p arg %p", 730234981Skib c, new_func, new_arg); 731234981Skib callout_cc_del(c, cc); 732243901Skib return; 733234981Skib } 734234981Skib c->c_flags &= ~CALLOUT_DFRMIGRATION; 735234981Skib 736234981Skib new_cc = callout_cpu_switch(c, cc, new_cpu); 737247777Sdavide flags = (direct) ? C_DIRECT_EXEC : 0; 738247777Sdavide callout_cc_add(c, new_cc, new_time, c->c_precision, new_func, 739247777Sdavide new_arg, new_cpu, flags); 740234981Skib CC_UNLOCK(new_cc); 741234981Skib CC_LOCK(cc); 742234981Skib#else 743234981Skib panic("migration should not happen"); 744234981Skib#endif 745234981Skib } 746243901Skib /* 747243901Skib * If the current callout is locally allocated (from 748243901Skib * timeout(9)) then put it on the freelist. 749243901Skib * 750243901Skib * Note: we need to check the cached copy of c_flags because 751243901Skib * if it was not local, then it's not safe to deref the 752243901Skib * callout pointer. 753243901Skib */ 754243901Skib KASSERT((c_flags & CALLOUT_LOCAL_ALLOC) == 0 || 755243901Skib c->c_flags == CALLOUT_LOCAL_ALLOC, 756243901Skib ("corrupted callout")); 757243912Sattilio if (c_flags & CALLOUT_LOCAL_ALLOC) 758243912Sattilio callout_cc_del(c, cc); 759234981Skib} 760234981Skib 76182127Sdillon/* 762247467Sdavide * The callout mechanism is based on the work of Adam M. Costello and 76329680Sgibbs * George Varghese, published in a technical report entitled "Redesigning 76429680Sgibbs * the BSD Callout and Timer Facilities" and modified slightly for inclusion 76529680Sgibbs * in FreeBSD by Justin T. Gibbs. The original work on the data structures 766128630Shmp * used in this implementation was published by G. Varghese and T. Lauck in 76729680Sgibbs * the paper "Hashed and Hierarchical Timing Wheels: Data Structures for 76829680Sgibbs * the Efficient Implementation of a Timer Facility" in the Proceedings of 76929680Sgibbs * the 11th ACM Annual Symposium on Operating Systems Principles, 77029680Sgibbs * Austin, Texas Nov 1987. 77129680Sgibbs */ 77232388Sphk 77329680Sgibbs/* 7741541Srgrimes * Software (low priority) clock interrupt. 7751541Srgrimes * Run periodic events from timeout queue. 7761541Srgrimes */ 7771541Srgrimesvoid 778177859Sjeffsoftclock(void *arg) 7791541Srgrimes{ 780177859Sjeff struct callout_cpu *cc; 781102936Sphk struct callout *c; 782247777Sdavide#ifdef CALLOUT_PROFILING 783247777Sdavide int depth = 0, gcalls = 0, lockcalls = 0, mpcalls = 0; 784247777Sdavide#endif 7851541Srgrimes 786177859Sjeff cc = (struct callout_cpu *)arg; 787177859Sjeff CC_LOCK(cc); 788247777Sdavide while ((c = TAILQ_FIRST(&cc->cc_expireq)) != NULL) { 789247777Sdavide TAILQ_REMOVE(&cc->cc_expireq, c, c_links.tqe); 790247777Sdavide softclock_call_cc(c, cc, 791247777Sdavide#ifdef CALLOUT_PROFILING 792247777Sdavide &mpcalls, &lockcalls, &gcalls, 793247777Sdavide#endif 794247777Sdavide 0); 795247777Sdavide#ifdef CALLOUT_PROFILING 796247777Sdavide ++depth; 797247777Sdavide#endif 7981541Srgrimes } 799247777Sdavide#ifdef CALLOUT_PROFILING 800115810Sphk avg_depth += (depth * 1000 - avg_depth) >> 8; 801115810Sphk avg_mpcalls += (mpcalls * 1000 - avg_mpcalls) >> 8; 802173760Sattilio avg_lockcalls += (lockcalls * 1000 - avg_lockcalls) >> 8; 803115810Sphk avg_gcalls += (gcalls * 1000 - avg_gcalls) >> 8; 804247777Sdavide#endif 805177859Sjeff CC_UNLOCK(cc); 8061541Srgrimes} 8071541Srgrimes 8081541Srgrimes/* 8091541Srgrimes * timeout -- 8101541Srgrimes * Execute a function after a specified length of time. 8111541Srgrimes * 8121541Srgrimes * untimeout -- 8131541Srgrimes * Cancel previous timeout function call. 8141541Srgrimes * 81529680Sgibbs * callout_handle_init -- 81629680Sgibbs * Initialize a handle so that using it with untimeout is benign. 81729680Sgibbs * 8181541Srgrimes * See AT&T BCI Driver Reference Manual for specification. This 819247698Smav * implementation differs from that one in that although an 82029680Sgibbs * identification value is returned from timeout, the original 82129680Sgibbs * arguments to timeout as well as the identifier are used to 82229680Sgibbs * identify entries for untimeout. 8231541Srgrimes */ 82429680Sgibbsstruct callout_handle 82529680Sgibbstimeout(ftn, arg, to_ticks) 82633824Sbde timeout_t *ftn; 8271541Srgrimes void *arg; 82869147Sjlemon int to_ticks; 8291541Srgrimes{ 830177859Sjeff struct callout_cpu *cc; 83129680Sgibbs struct callout *new; 83229680Sgibbs struct callout_handle handle; 8331541Srgrimes 834177859Sjeff cc = CC_CPU(timeout_cpu); 835177859Sjeff CC_LOCK(cc); 8361541Srgrimes /* Fill in the next free callout structure. */ 837177859Sjeff new = SLIST_FIRST(&cc->cc_callfree); 83829680Sgibbs if (new == NULL) 83929680Sgibbs /* XXX Attempt to malloc first */ 8401541Srgrimes panic("timeout table full"); 841177859Sjeff SLIST_REMOVE_HEAD(&cc->cc_callfree, c_links.sle); 84244510Swollman callout_reset(new, to_ticks, ftn, arg); 843177859Sjeff handle.callout = new; 844177859Sjeff CC_UNLOCK(cc); 8451541Srgrimes 84629680Sgibbs return (handle); 8471541Srgrimes} 8481541Srgrimes 8491541Srgrimesvoid 85029680Sgibbsuntimeout(ftn, arg, handle) 85133824Sbde timeout_t *ftn; 8521541Srgrimes void *arg; 85329680Sgibbs struct callout_handle handle; 8541541Srgrimes{ 855177859Sjeff struct callout_cpu *cc; 8561541Srgrimes 85729680Sgibbs /* 85829680Sgibbs * Check for a handle that was initialized 85929680Sgibbs * by callout_handle_init, but never used 86029680Sgibbs * for a real timeout. 86129680Sgibbs */ 86229680Sgibbs if (handle.callout == NULL) 86329680Sgibbs return; 86429680Sgibbs 865177859Sjeff cc = callout_lock(handle.callout); 86644510Swollman if (handle.callout->c_func == ftn && handle.callout->c_arg == arg) 86744510Swollman callout_stop(handle.callout); 868177859Sjeff CC_UNLOCK(cc); 8691541Srgrimes} 8701541Srgrimes 87124101Sbdevoid 87229680Sgibbscallout_handle_init(struct callout_handle *handle) 87329680Sgibbs{ 87429680Sgibbs handle->callout = NULL; 87529680Sgibbs} 87629680Sgibbs 87744510Swollman/* 87844510Swollman * New interface; clients allocate their own callout structures. 87944510Swollman * 88044510Swollman * callout_reset() - establish or change a timeout 88144510Swollman * callout_stop() - disestablish a timeout 88244510Swollman * callout_init() - initialize a callout structure so that it can 88344510Swollman * safely be passed to callout_reset() and callout_stop() 88444510Swollman * 88550673Sjlemon * <sys/callout.h> defines three convenience macros: 88644510Swollman * 887140487Scperciva * callout_active() - returns truth if callout has not been stopped, 888140487Scperciva * drained, or deactivated since the last time the callout was 889140487Scperciva * reset. 89050673Sjlemon * callout_pending() - returns truth if callout is still waiting for timeout 89150673Sjlemon * callout_deactivate() - marks the callout as having been serviced 89244510Swollman */ 893149879Sglebiusint 894247777Sdavidecallout_reset_sbt_on(struct callout *c, sbintime_t sbt, sbintime_t precision, 895247777Sdavide void (*ftn)(void *), void *arg, int cpu, int flags) 89644510Swollman{ 897247777Sdavide sbintime_t to_sbt, pr; 898177859Sjeff struct callout_cpu *cc; 899247777Sdavide int cancelled, direct; 90044510Swollman 901247777Sdavide cancelled = 0; 902247777Sdavide if (flags & C_ABSOLUTE) { 903247777Sdavide to_sbt = sbt; 904247777Sdavide } else { 905247777Sdavide if ((flags & C_HARDCLOCK) && (sbt < tick_sbt)) 906247777Sdavide sbt = tick_sbt; 907247777Sdavide if ((flags & C_HARDCLOCK) || 908247777Sdavide#ifdef NO_EVENTTIMERS 909247777Sdavide sbt >= sbt_timethreshold) { 910247777Sdavide to_sbt = getsbinuptime(); 911247777Sdavide 912247777Sdavide /* Add safety belt for the case of hz > 1000. */ 913247777Sdavide to_sbt += tc_tick_sbt - tick_sbt; 914247777Sdavide#else 915247777Sdavide sbt >= sbt_tickthreshold) { 916247777Sdavide /* 917247777Sdavide * Obtain the time of the last hardclock() call on 918247777Sdavide * this CPU directly from the kern_clocksource.c. 919247777Sdavide * This value is per-CPU, but it is equal for all 920247777Sdavide * active ones. 921247777Sdavide */ 922247777Sdavide#ifdef __LP64__ 923247777Sdavide to_sbt = DPCPU_GET(hardclocktime); 924247777Sdavide#else 925247777Sdavide spinlock_enter(); 926247777Sdavide to_sbt = DPCPU_GET(hardclocktime); 927247777Sdavide spinlock_exit(); 928247777Sdavide#endif 929247777Sdavide#endif 930247777Sdavide if ((flags & C_HARDCLOCK) == 0) 931247777Sdavide to_sbt += tick_sbt; 932247777Sdavide } else 933247777Sdavide to_sbt = sbinuptime(); 934247777Sdavide to_sbt += sbt; 935247777Sdavide pr = ((C_PRELGET(flags) < 0) ? sbt >> tc_precexp : 936247777Sdavide sbt >> C_PRELGET(flags)); 937247777Sdavide if (pr > precision) 938247777Sdavide precision = pr; 939247777Sdavide } 940177859Sjeff /* 941177859Sjeff * Don't allow migration of pre-allocated callouts lest they 942177859Sjeff * become unbalanced. 943177859Sjeff */ 944177859Sjeff if (c->c_flags & CALLOUT_LOCAL_ALLOC) 945177859Sjeff cpu = c->c_cpu; 946247777Sdavide direct = (c->c_flags & CALLOUT_DIRECT) != 0; 947247777Sdavide KASSERT(!direct || c->c_lock == NULL, 948247777Sdavide ("%s: direct callout %p has lock", __func__, c)); 949177859Sjeff cc = callout_lock(c); 950247777Sdavide if (cc->cc_exec_entity[direct].cc_curr == c) { 951127969Scperciva /* 952127969Scperciva * We're being asked to reschedule a callout which is 953173760Sattilio * currently in progress. If there is a lock then we 954141428Siedowse * can cancel the callout if it has not really started. 955127969Scperciva */ 956247777Sdavide if (c->c_lock != NULL && !cc->cc_exec_entity[direct].cc_cancel) 957247777Sdavide cancelled = cc->cc_exec_entity[direct].cc_cancel = TRUE; 958247777Sdavide if (cc->cc_exec_entity[direct].cc_waiting) { 959141428Siedowse /* 960141428Siedowse * Someone has called callout_drain to kill this 961141428Siedowse * callout. Don't reschedule. 962141428Siedowse */ 963163246Sglebius CTR4(KTR_CALLOUT, "%s %p func %p arg %p", 964163246Sglebius cancelled ? "cancelled" : "failed to cancel", 965163246Sglebius c, c->c_func, c->c_arg); 966177859Sjeff CC_UNLOCK(cc); 967149879Sglebius return (cancelled); 968141428Siedowse } 969128024Scperciva } 970133190Scperciva if (c->c_flags & CALLOUT_PENDING) { 971247777Sdavide if ((c->c_flags & CALLOUT_PROCESSED) == 0) { 972247777Sdavide if (cc->cc_exec_next_dir == c) 973247777Sdavide cc->cc_exec_next_dir = LIST_NEXT(c, c_links.le); 974247777Sdavide LIST_REMOVE(c, c_links.le); 975247777Sdavide } else 976247777Sdavide TAILQ_REMOVE(&cc->cc_expireq, c, c_links.tqe); 977149879Sglebius cancelled = 1; 978177859Sjeff c->c_flags &= ~(CALLOUT_ACTIVE | CALLOUT_PENDING); 979133190Scperciva } 980220456Sattilio 981220456Sattilio#ifdef SMP 98244510Swollman /* 983220456Sattilio * If the callout must migrate try to perform it immediately. 984220456Sattilio * If the callout is currently running, just defer the migration 985220456Sattilio * to a more appropriate moment. 98644510Swollman */ 987177859Sjeff if (c->c_cpu != cpu) { 988247777Sdavide if (cc->cc_exec_entity[direct].cc_curr == c) { 989247777Sdavide cc->cc_exec_entity[direct].ce_migration_cpu = cpu; 990247777Sdavide cc->cc_exec_entity[direct].ce_migration_time 991247777Sdavide = to_sbt; 992247777Sdavide cc->cc_exec_entity[direct].ce_migration_func = ftn; 993247777Sdavide cc->cc_exec_entity[direct].ce_migration_arg = arg; 994234952Skib c->c_flags |= CALLOUT_DFRMIGRATION; 995247777Sdavide CTR6(KTR_CALLOUT, 996247777Sdavide "migration of %p func %p arg %p in %d.%08x to %u deferred", 997247777Sdavide c, c->c_func, c->c_arg, (int)(to_sbt >> 32), 998247777Sdavide (u_int)(to_sbt & 0xffffffff), cpu); 999220456Sattilio CC_UNLOCK(cc); 1000220456Sattilio return (cancelled); 1001220456Sattilio } 1002220456Sattilio cc = callout_cpu_switch(c, cc, cpu); 1003177859Sjeff } 1004220456Sattilio#endif 1005177859Sjeff 1006247777Sdavide callout_cc_add(c, cc, to_sbt, precision, ftn, arg, cpu, flags); 1007247777Sdavide CTR6(KTR_CALLOUT, "%sscheduled %p func %p arg %p in %d.%08x", 1008247777Sdavide cancelled ? "re" : "", c, c->c_func, c->c_arg, (int)(to_sbt >> 32), 1009247777Sdavide (u_int)(to_sbt & 0xffffffff)); 1010177859Sjeff CC_UNLOCK(cc); 1011149879Sglebius 1012149879Sglebius return (cancelled); 101344510Swollman} 101444510Swollman 1015181191Ssam/* 1016181191Ssam * Common idioms that can be optimized in the future. 1017181191Ssam */ 101881481Sjhbint 1019181191Ssamcallout_schedule_on(struct callout *c, int to_ticks, int cpu) 1020181191Ssam{ 1021181191Ssam return callout_reset_on(c, to_ticks, c->c_func, c->c_arg, cpu); 1022181191Ssam} 1023181191Ssam 1024181191Ssamint 1025181191Ssamcallout_schedule(struct callout *c, int to_ticks) 1026181191Ssam{ 1027181191Ssam return callout_reset_on(c, to_ticks, c->c_func, c->c_arg, c->c_cpu); 1028181191Ssam} 1029181191Ssam 1030181191Ssamint 1031127969Scperciva_callout_stop_safe(c, safe) 1032127969Scperciva struct callout *c; 1033127969Scperciva int safe; 1034127969Scperciva{ 1035220456Sattilio struct callout_cpu *cc, *old_cc; 1036173760Sattilio struct lock_class *class; 1037247777Sdavide int direct, sq_locked, use_lock; 1038127969Scperciva 1039173760Sattilio /* 1040173760Sattilio * Some old subsystems don't hold Giant while running a callout_stop(), 1041173760Sattilio * so just discard this check for the moment. 1042173760Sattilio */ 1043173760Sattilio if (!safe && c->c_lock != NULL) { 1044173760Sattilio if (c->c_lock == &Giant.lock_object) 1045173760Sattilio use_lock = mtx_owned(&Giant); 1046173760Sattilio else { 1047173760Sattilio use_lock = 1; 1048173760Sattilio class = LOCK_CLASS(c->c_lock); 1049173760Sattilio class->lc_assert(c->c_lock, LA_XLOCKED); 1050173760Sattilio } 1051173760Sattilio } else 1052173760Sattilio use_lock = 0; 1053247777Sdavide direct = (c->c_flags & CALLOUT_DIRECT) != 0; 1054172025Sjhb sq_locked = 0; 1055220456Sattilio old_cc = NULL; 1056172025Sjhbagain: 1057177859Sjeff cc = callout_lock(c); 1058220456Sattilio 105944510Swollman /* 1060220456Sattilio * If the callout was migrating while the callout cpu lock was 1061220456Sattilio * dropped, just drop the sleepqueue lock and check the states 1062220456Sattilio * again. 1063220456Sattilio */ 1064220456Sattilio if (sq_locked != 0 && cc != old_cc) { 1065220456Sattilio#ifdef SMP 1066220456Sattilio CC_UNLOCK(cc); 1067247777Sdavide sleepq_release(&old_cc->cc_exec_entity[direct].cc_waiting); 1068220456Sattilio sq_locked = 0; 1069220456Sattilio old_cc = NULL; 1070220456Sattilio goto again; 1071220456Sattilio#else 1072220456Sattilio panic("migration should not happen"); 1073220456Sattilio#endif 1074220456Sattilio } 1075220456Sattilio 1076220456Sattilio /* 1077155957Sjhb * If the callout isn't pending, it's not on the queue, so 1078155957Sjhb * don't attempt to remove it from the queue. We can try to 1079155957Sjhb * stop it by other means however. 108044510Swollman */ 108144510Swollman if (!(c->c_flags & CALLOUT_PENDING)) { 108250673Sjlemon c->c_flags &= ~CALLOUT_ACTIVE; 1083155957Sjhb 1084155957Sjhb /* 1085155957Sjhb * If it wasn't on the queue and it isn't the current 1086155957Sjhb * callout, then we can't stop it, so just bail. 1087155957Sjhb */ 1088247777Sdavide if (cc->cc_exec_entity[direct].cc_curr != c) { 1089163246Sglebius CTR3(KTR_CALLOUT, "failed to stop %p func %p arg %p", 1090163246Sglebius c, c->c_func, c->c_arg); 1091177859Sjeff CC_UNLOCK(cc); 1092172025Sjhb if (sq_locked) 1093247777Sdavide sleepq_release( 1094247777Sdavide &cc->cc_exec_entity[direct].cc_waiting); 1095141428Siedowse return (0); 1096141428Siedowse } 1097155957Sjhb 1098141428Siedowse if (safe) { 1099127969Scperciva /* 1100155957Sjhb * The current callout is running (or just 1101155957Sjhb * about to run) and blocking is allowed, so 1102155957Sjhb * just wait for the current invocation to 1103155957Sjhb * finish. 1104127969Scperciva */ 1105247777Sdavide while (cc->cc_exec_entity[direct].cc_curr == c) { 1106171053Sattilio /* 1107171053Sattilio * Use direct calls to sleepqueue interface 1108171053Sattilio * instead of cv/msleep in order to avoid 1109177859Sjeff * a LOR between cc_lock and sleepqueue 1110171053Sattilio * chain spinlocks. This piece of code 1111171053Sattilio * emulates a msleep_spin() call actually. 1112172025Sjhb * 1113172025Sjhb * If we already have the sleepqueue chain 1114172025Sjhb * locked, then we can safely block. If we 1115172025Sjhb * don't already have it locked, however, 1116177859Sjeff * we have to drop the cc_lock to lock 1117172025Sjhb * it. This opens several races, so we 1118172025Sjhb * restart at the beginning once we have 1119172025Sjhb * both locks. If nothing has changed, then 1120172025Sjhb * we will end up back here with sq_locked 1121172025Sjhb * set. 1122171053Sattilio */ 1123172025Sjhb if (!sq_locked) { 1124177859Sjeff CC_UNLOCK(cc); 1125247777Sdavide sleepq_lock( 1126247777Sdavide &cc->cc_exec_entity[direct].cc_waiting); 1127172025Sjhb sq_locked = 1; 1128220456Sattilio old_cc = cc; 1129172025Sjhb goto again; 1130172025Sjhb } 1131220456Sattilio 1132220456Sattilio /* 1133220456Sattilio * Migration could be cancelled here, but 1134220456Sattilio * as long as it is still not sure when it 1135220456Sattilio * will be packed up, just let softclock() 1136220456Sattilio * take care of it. 1137220456Sattilio */ 1138247777Sdavide cc->cc_exec_entity[direct].cc_waiting = TRUE; 1139171053Sattilio DROP_GIANT(); 1140177859Sjeff CC_UNLOCK(cc); 1141247777Sdavide sleepq_add( 1142247777Sdavide &cc->cc_exec_entity[direct].cc_waiting, 1143177859Sjeff &cc->cc_lock.lock_object, "codrain", 1144171053Sattilio SLEEPQ_SLEEP, 0); 1145247777Sdavide sleepq_wait( 1146247777Sdavide &cc->cc_exec_entity[direct].cc_waiting, 1147247777Sdavide 0); 1148172025Sjhb sq_locked = 0; 1149220456Sattilio old_cc = NULL; 1150171053Sattilio 1151171053Sattilio /* Reacquire locks previously released. */ 1152171053Sattilio PICKUP_GIANT(); 1153177859Sjeff CC_LOCK(cc); 1154155957Sjhb } 1155247777Sdavide } else if (use_lock && 1156247777Sdavide !cc->cc_exec_entity[direct].cc_cancel) { 1157155957Sjhb /* 1158173760Sattilio * The current callout is waiting for its 1159173760Sattilio * lock which we hold. Cancel the callout 1160155957Sjhb * and return. After our caller drops the 1161173760Sattilio * lock, the callout will be skipped in 1162155957Sjhb * softclock(). 1163155957Sjhb */ 1164247777Sdavide cc->cc_exec_entity[direct].cc_cancel = TRUE; 1165163246Sglebius CTR3(KTR_CALLOUT, "cancelled %p func %p arg %p", 1166163246Sglebius c, c->c_func, c->c_arg); 1167247777Sdavide KASSERT(!cc_cce_migrating(cc, direct), 1168220456Sattilio ("callout wrongly scheduled for migration")); 1169177859Sjeff CC_UNLOCK(cc); 1170172025Sjhb KASSERT(!sq_locked, ("sleepqueue chain locked")); 1171141428Siedowse return (1); 1172234952Skib } else if ((c->c_flags & CALLOUT_DFRMIGRATION) != 0) { 1173234952Skib c->c_flags &= ~CALLOUT_DFRMIGRATION; 1174234952Skib CTR3(KTR_CALLOUT, "postponing stop %p func %p arg %p", 1175234952Skib c, c->c_func, c->c_arg); 1176234952Skib CC_UNLOCK(cc); 1177234952Skib return (1); 1178155957Sjhb } 1179163246Sglebius CTR3(KTR_CALLOUT, "failed to stop %p func %p arg %p", 1180163246Sglebius c, c->c_func, c->c_arg); 1181177859Sjeff CC_UNLOCK(cc); 1182172025Sjhb KASSERT(!sq_locked, ("sleepqueue chain still locked")); 118381481Sjhb return (0); 118444510Swollman } 1185172025Sjhb if (sq_locked) 1186247777Sdavide sleepq_release(&cc->cc_exec_entity[direct].cc_waiting); 1187172025Sjhb 118850673Sjlemon c->c_flags &= ~(CALLOUT_ACTIVE | CALLOUT_PENDING); 118944510Swollman 1190234981Skib CTR3(KTR_CALLOUT, "cancelled %p func %p arg %p", 1191234981Skib c, c->c_func, c->c_arg); 1192247777Sdavide if ((c->c_flags & CALLOUT_PROCESSED) == 0) { 1193247777Sdavide if (cc->cc_exec_next_dir == c) 1194247777Sdavide cc->cc_exec_next_dir = LIST_NEXT(c, c_links.le); 1195247777Sdavide LIST_REMOVE(c, c_links.le); 1196247777Sdavide } else 1197247777Sdavide TAILQ_REMOVE(&cc->cc_expireq, c, c_links.tqe); 1198234981Skib callout_cc_del(c, cc); 119944510Swollman 1200177859Sjeff CC_UNLOCK(cc); 120181481Sjhb return (1); 120244510Swollman} 120344510Swollman 120444510Swollmanvoid 120569147Sjlemoncallout_init(c, mpsafe) 120644510Swollman struct callout *c; 120769147Sjlemon int mpsafe; 120844510Swollman{ 120944527Swollman bzero(c, sizeof *c); 1210141428Siedowse if (mpsafe) { 1211173760Sattilio c->c_lock = NULL; 1212141428Siedowse c->c_flags = CALLOUT_RETURNUNLOCKED; 1213141428Siedowse } else { 1214173760Sattilio c->c_lock = &Giant.lock_object; 1215141428Siedowse c->c_flags = 0; 1216141428Siedowse } 1217177859Sjeff c->c_cpu = timeout_cpu; 121844510Swollman} 121944510Swollman 1220141428Siedowsevoid 1221173760Sattilio_callout_init_lock(c, lock, flags) 1222141428Siedowse struct callout *c; 1223173760Sattilio struct lock_object *lock; 1224141428Siedowse int flags; 1225141428Siedowse{ 1226141428Siedowse bzero(c, sizeof *c); 1227173760Sattilio c->c_lock = lock; 1228173760Sattilio KASSERT((flags & ~(CALLOUT_RETURNUNLOCKED | CALLOUT_SHAREDLOCK)) == 0, 1229173760Sattilio ("callout_init_lock: bad flags %d", flags)); 1230173760Sattilio KASSERT(lock != NULL || (flags & CALLOUT_RETURNUNLOCKED) == 0, 1231173760Sattilio ("callout_init_lock: CALLOUT_RETURNUNLOCKED with no lock")); 1232176013Sattilio KASSERT(lock == NULL || !(LOCK_CLASS(lock)->lc_flags & 1233176013Sattilio (LC_SPINLOCK | LC_SLEEPABLE)), ("%s: invalid lock class", 1234173760Sattilio __func__)); 1235173760Sattilio c->c_flags = flags & (CALLOUT_RETURNUNLOCKED | CALLOUT_SHAREDLOCK); 1236177859Sjeff c->c_cpu = timeout_cpu; 1237141428Siedowse} 1238141428Siedowse 123931950Snate#ifdef APM_FIXUP_CALLTODO 124031950Snate/* 124131950Snate * Adjust the kernel calltodo timeout list. This routine is used after 124231950Snate * an APM resume to recalculate the calltodo timer list values with the 124331950Snate * number of hz's we have been sleeping. The next hardclock() will detect 124431950Snate * that there are fired timers and run softclock() to execute them. 124531950Snate * 124631950Snate * Please note, I have not done an exhaustive analysis of what code this 124731950Snate * might break. I am motivated to have my select()'s and alarm()'s that 124831950Snate * have expired during suspend firing upon resume so that the applications 124931950Snate * which set the timer can do the maintanence the timer was for as close 125031950Snate * as possible to the originally intended time. Testing this code for a 125131950Snate * week showed that resuming from a suspend resulted in 22 to 25 timers 125231950Snate * firing, which seemed independant on whether the suspend was 2 hours or 125331950Snate * 2 days. Your milage may vary. - Ken Key <key@cs.utk.edu> 125431950Snate */ 125531950Snatevoid 125631950Snateadjust_timeout_calltodo(time_change) 125731950Snate struct timeval *time_change; 125831950Snate{ 125931950Snate register struct callout *p; 126031950Snate unsigned long delta_ticks; 126131950Snate 126231950Snate /* 126331950Snate * How many ticks were we asleep? 126436127Sbde * (stolen from tvtohz()). 126531950Snate */ 126631950Snate 126731950Snate /* Don't do anything */ 126831950Snate if (time_change->tv_sec < 0) 126931950Snate return; 127031950Snate else if (time_change->tv_sec <= LONG_MAX / 1000000) 127131950Snate delta_ticks = (time_change->tv_sec * 1000000 + 127231950Snate time_change->tv_usec + (tick - 1)) / tick + 1; 127331950Snate else if (time_change->tv_sec <= LONG_MAX / hz) 127431950Snate delta_ticks = time_change->tv_sec * hz + 127531950Snate (time_change->tv_usec + (tick - 1)) / tick + 1; 127631950Snate else 127731950Snate delta_ticks = LONG_MAX; 127831950Snate 127931950Snate if (delta_ticks > INT_MAX) 128031950Snate delta_ticks = INT_MAX; 128131950Snate 128231950Snate /* 128331950Snate * Now rip through the timer calltodo list looking for timers 128431950Snate * to expire. 128531950Snate */ 128631950Snate 128731950Snate /* don't collide with softclock() */ 1288177859Sjeff CC_LOCK(cc); 128931950Snate for (p = calltodo.c_next; p != NULL; p = p->c_next) { 129031950Snate p->c_time -= delta_ticks; 129131950Snate 129231950Snate /* Break if the timer had more time on it than delta_ticks */ 129331950Snate if (p->c_time > 0) 129431950Snate break; 129531950Snate 129631950Snate /* take back the ticks the timer didn't use (p->c_time <= 0) */ 129731950Snate delta_ticks = -p->c_time; 129831950Snate } 1299177859Sjeff CC_UNLOCK(cc); 130031950Snate 130131950Snate return; 130231950Snate} 130331950Snate#endif /* APM_FIXUP_CALLTODO */ 1304247777Sdavide 1305247777Sdavidestatic int 1306247777Sdavideflssbt(sbintime_t sbt) 1307247777Sdavide{ 1308247777Sdavide 1309247777Sdavide sbt += (uint64_t)sbt >> 1; 1310247777Sdavide if (sizeof(long) >= sizeof(sbintime_t)) 1311247777Sdavide return (flsl(sbt)); 1312247777Sdavide if (sbt >= SBT_1S) 1313247777Sdavide return (flsl(((uint64_t)sbt) >> 32) + 32); 1314247777Sdavide return (flsl(sbt)); 1315247777Sdavide} 1316247777Sdavide 1317247777Sdavide/* 1318247777Sdavide * Dump immediate statistic snapshot of the scheduled callouts. 1319247777Sdavide */ 1320247777Sdavidestatic int 1321247777Sdavidesysctl_kern_callout_stat(SYSCTL_HANDLER_ARGS) 1322247777Sdavide{ 1323247777Sdavide struct callout *tmp; 1324247777Sdavide struct callout_cpu *cc; 1325247777Sdavide struct callout_list *sc; 1326247777Sdavide sbintime_t maxpr, maxt, medpr, medt, now, spr, st, t; 1327247777Sdavide int ct[64], cpr[64], ccpbk[32]; 1328247777Sdavide int error, val, i, count, tcum, pcum, maxc, c, medc; 1329247777Sdavide#ifdef SMP 1330247777Sdavide int cpu; 1331247777Sdavide#endif 1332247777Sdavide 1333247777Sdavide val = 0; 1334247777Sdavide error = sysctl_handle_int(oidp, &val, 0, req); 1335247777Sdavide if (error != 0 || req->newptr == NULL) 1336247777Sdavide return (error); 1337247777Sdavide count = maxc = 0; 1338247777Sdavide st = spr = maxt = maxpr = 0; 1339247777Sdavide bzero(ccpbk, sizeof(ccpbk)); 1340247777Sdavide bzero(ct, sizeof(ct)); 1341247777Sdavide bzero(cpr, sizeof(cpr)); 1342247777Sdavide now = sbinuptime(); 1343247777Sdavide#ifdef SMP 1344247777Sdavide CPU_FOREACH(cpu) { 1345247777Sdavide cc = CC_CPU(cpu); 1346247777Sdavide#else 1347247777Sdavide cc = CC_CPU(timeout_cpu); 1348247777Sdavide#endif 1349247777Sdavide CC_LOCK(cc); 1350247777Sdavide for (i = 0; i < callwheelsize; i++) { 1351247777Sdavide sc = &cc->cc_callwheel[i]; 1352247777Sdavide c = 0; 1353247777Sdavide LIST_FOREACH(tmp, sc, c_links.le) { 1354247777Sdavide c++; 1355247777Sdavide t = tmp->c_time - now; 1356247777Sdavide if (t < 0) 1357247777Sdavide t = 0; 1358247777Sdavide st += t / SBT_1US; 1359247777Sdavide spr += tmp->c_precision / SBT_1US; 1360247777Sdavide if (t > maxt) 1361247777Sdavide maxt = t; 1362247777Sdavide if (tmp->c_precision > maxpr) 1363247777Sdavide maxpr = tmp->c_precision; 1364247777Sdavide ct[flssbt(t)]++; 1365247777Sdavide cpr[flssbt(tmp->c_precision)]++; 1366247777Sdavide } 1367247777Sdavide if (c > maxc) 1368247777Sdavide maxc = c; 1369247777Sdavide ccpbk[fls(c + c / 2)]++; 1370247777Sdavide count += c; 1371247777Sdavide } 1372247777Sdavide CC_UNLOCK(cc); 1373247777Sdavide#ifdef SMP 1374247777Sdavide } 1375247777Sdavide#endif 1376247777Sdavide 1377247777Sdavide for (i = 0, tcum = 0; i < 64 && tcum < count / 2; i++) 1378247777Sdavide tcum += ct[i]; 1379247777Sdavide medt = (i >= 2) ? (((sbintime_t)1) << (i - 2)) : 0; 1380247777Sdavide for (i = 0, pcum = 0; i < 64 && pcum < count / 2; i++) 1381247777Sdavide pcum += cpr[i]; 1382247777Sdavide medpr = (i >= 2) ? (((sbintime_t)1) << (i - 2)) : 0; 1383247777Sdavide for (i = 0, c = 0; i < 32 && c < count / 2; i++) 1384247777Sdavide c += ccpbk[i]; 1385247777Sdavide medc = (i >= 2) ? (1 << (i - 2)) : 0; 1386247777Sdavide 1387247777Sdavide printf("Scheduled callouts statistic snapshot:\n"); 1388247777Sdavide printf(" Callouts: %6d Buckets: %6d*%-3d Bucket size: 0.%06ds\n", 1389247777Sdavide count, callwheelsize, mp_ncpus, 1000000 >> CC_HASH_SHIFT); 1390247777Sdavide printf(" C/Bk: med %5d avg %6d.%06jd max %6d\n", 1391247777Sdavide medc, 1392247777Sdavide count / callwheelsize / mp_ncpus, 1393247777Sdavide (uint64_t)count * 1000000 / callwheelsize / mp_ncpus % 1000000, 1394247777Sdavide maxc); 1395247777Sdavide printf(" Time: med %5jd.%06jds avg %6jd.%06jds max %6jd.%06jds\n", 1396247777Sdavide medt / SBT_1S, (medt & 0xffffffff) * 1000000 >> 32, 1397247777Sdavide (st / count) / 1000000, (st / count) % 1000000, 1398247777Sdavide maxt / SBT_1S, (maxt & 0xffffffff) * 1000000 >> 32); 1399247777Sdavide printf(" Prec: med %5jd.%06jds avg %6jd.%06jds max %6jd.%06jds\n", 1400247777Sdavide medpr / SBT_1S, (medpr & 0xffffffff) * 1000000 >> 32, 1401247777Sdavide (spr / count) / 1000000, (spr / count) % 1000000, 1402247777Sdavide maxpr / SBT_1S, (maxpr & 0xffffffff) * 1000000 >> 32); 1403247777Sdavide printf(" Distribution: \tbuckets\t time\t tcum\t" 1404247777Sdavide " prec\t pcum\n"); 1405247777Sdavide for (i = 0, tcum = pcum = 0; i < 64; i++) { 1406247777Sdavide if (ct[i] == 0 && cpr[i] == 0) 1407247777Sdavide continue; 1408247777Sdavide t = (i != 0) ? (((sbintime_t)1) << (i - 1)) : 0; 1409247777Sdavide tcum += ct[i]; 1410247777Sdavide pcum += cpr[i]; 1411247777Sdavide printf(" %10jd.%06jds\t 2**%d\t%7d\t%7d\t%7d\t%7d\n", 1412247777Sdavide t / SBT_1S, (t & 0xffffffff) * 1000000 >> 32, 1413247777Sdavide i - 1 - (32 - CC_HASH_SHIFT), 1414247777Sdavide ct[i], tcum, cpr[i], pcum); 1415247777Sdavide } 1416247777Sdavide return (error); 1417247777Sdavide} 1418247777SdavideSYSCTL_PROC(_kern, OID_AUTO, callout_stat, 1419247777Sdavide CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, 1420247777Sdavide 0, 0, sysctl_kern_callout_stat, "I", 1421247777Sdavide "Dump immediate statistic snapshot of the scheduled callouts"); 1422