kern_timeout.c revision 248031
1/*- 2 * Copyright (c) 1982, 1986, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 4. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * From: @(#)kern_clock.c 8.5 (Berkeley) 1/21/94 35 */ 36 37#include <sys/cdefs.h> 38__FBSDID("$FreeBSD: head/sys/kern/kern_timeout.c 248031 2013-03-08 10:14:58Z andre $"); 39 40#include "opt_callout_profiling.h" 41#include "opt_kdtrace.h" 42#if defined(__arm__) 43#include "opt_timer.h" 44#endif 45 46#include <sys/param.h> 47#include <sys/systm.h> 48#include <sys/bus.h> 49#include <sys/callout.h> 50#include <sys/file.h> 51#include <sys/interrupt.h> 52#include <sys/kernel.h> 53#include <sys/ktr.h> 54#include <sys/lock.h> 55#include <sys/malloc.h> 56#include <sys/mutex.h> 57#include <sys/proc.h> 58#include <sys/sdt.h> 59#include <sys/sleepqueue.h> 60#include <sys/sysctl.h> 61#include <sys/smp.h> 62 63#ifdef SMP 64#include <machine/cpu.h> 65#endif 66 67#ifndef NO_EVENTTIMERS 68DPCPU_DECLARE(sbintime_t, hardclocktime); 69#endif 70 71SDT_PROVIDER_DEFINE(callout_execute); 72SDT_PROBE_DEFINE(callout_execute, kernel, , callout_start, callout-start); 73SDT_PROBE_ARGTYPE(callout_execute, kernel, , callout_start, 0, 74 "struct callout *"); 75SDT_PROBE_DEFINE(callout_execute, kernel, , callout_end, callout-end); 76SDT_PROBE_ARGTYPE(callout_execute, kernel, , callout_end, 0, 77 "struct callout *"); 78 79#ifdef CALLOUT_PROFILING 80static int avg_depth; 81SYSCTL_INT(_debug, OID_AUTO, to_avg_depth, CTLFLAG_RD, &avg_depth, 0, 82 "Average number of items examined per softclock call. Units = 1/1000"); 83static int avg_gcalls; 84SYSCTL_INT(_debug, OID_AUTO, to_avg_gcalls, CTLFLAG_RD, &avg_gcalls, 0, 85 "Average number of Giant callouts made per softclock call. Units = 1/1000"); 86static int avg_lockcalls; 87SYSCTL_INT(_debug, OID_AUTO, to_avg_lockcalls, CTLFLAG_RD, &avg_lockcalls, 0, 88 "Average number of lock callouts made per softclock call. Units = 1/1000"); 89static int avg_mpcalls; 90SYSCTL_INT(_debug, OID_AUTO, to_avg_mpcalls, CTLFLAG_RD, &avg_mpcalls, 0, 91 "Average number of MP callouts made per softclock call. Units = 1/1000"); 92static int avg_depth_dir; 93SYSCTL_INT(_debug, OID_AUTO, to_avg_depth_dir, CTLFLAG_RD, &avg_depth_dir, 0, 94 "Average number of direct callouts examined per callout_process call. " 95 "Units = 1/1000"); 96static int avg_lockcalls_dir; 97SYSCTL_INT(_debug, OID_AUTO, to_avg_lockcalls_dir, CTLFLAG_RD, 98 &avg_lockcalls_dir, 0, "Average number of lock direct callouts made per " 99 "callout_process call. Units = 1/1000"); 100static int avg_mpcalls_dir; 101SYSCTL_INT(_debug, OID_AUTO, to_avg_mpcalls_dir, CTLFLAG_RD, &avg_mpcalls_dir, 102 0, "Average number of MP direct callouts made per callout_process call. " 103 "Units = 1/1000"); 104#endif 105 106static int ncallout; 107SYSCTL_INT(_kern, OID_AUTO, ncallout, CTLFLAG_RDTUN, &ncallout, 0, 108 "Number of entries in callwheel and size of timeout() preallocation"); 109 110/* 111 * TODO: 112 * allocate more timeout table slots when table overflows. 113 */ 114u_int callwheelsize, callwheelmask; 115 116/* 117 * The callout cpu exec entities represent informations necessary for 118 * describing the state of callouts currently running on the CPU and the ones 119 * necessary for migrating callouts to the new callout cpu. In particular, 120 * the first entry of the array cc_exec_entity holds informations for callout 121 * running in SWI thread context, while the second one holds informations 122 * for callout running directly from hardware interrupt context. 123 * The cached informations are very important for deferring migration when 124 * the migrating callout is already running. 125 */ 126struct cc_exec { 127 struct callout *cc_next; 128 struct callout *cc_curr; 129#ifdef SMP 130 void (*ce_migration_func)(void *); 131 void *ce_migration_arg; 132 int ce_migration_cpu; 133 sbintime_t ce_migration_time; 134#endif 135 bool cc_cancel; 136 bool cc_waiting; 137}; 138 139/* 140 * There is one struct callout_cpu per cpu, holding all relevant 141 * state for the callout processing thread on the individual CPU. 142 */ 143struct callout_cpu { 144 struct mtx_padalign cc_lock; 145 struct cc_exec cc_exec_entity[2]; 146 struct callout *cc_callout; 147 struct callout_list *cc_callwheel; 148 struct callout_tailq cc_expireq; 149 struct callout_slist cc_callfree; 150 sbintime_t cc_firstevent; 151 sbintime_t cc_lastscan; 152 void *cc_cookie; 153 u_int cc_bucket; 154}; 155 156#define cc_exec_curr cc_exec_entity[0].cc_curr 157#define cc_exec_next cc_exec_entity[0].cc_next 158#define cc_exec_cancel cc_exec_entity[0].cc_cancel 159#define cc_exec_waiting cc_exec_entity[0].cc_waiting 160#define cc_exec_curr_dir cc_exec_entity[1].cc_curr 161#define cc_exec_next_dir cc_exec_entity[1].cc_next 162#define cc_exec_cancel_dir cc_exec_entity[1].cc_cancel 163#define cc_exec_waiting_dir cc_exec_entity[1].cc_waiting 164 165#ifdef SMP 166#define cc_migration_func cc_exec_entity[0].ce_migration_func 167#define cc_migration_arg cc_exec_entity[0].ce_migration_arg 168#define cc_migration_cpu cc_exec_entity[0].ce_migration_cpu 169#define cc_migration_time cc_exec_entity[0].ce_migration_time 170#define cc_migration_func_dir cc_exec_entity[1].ce_migration_func 171#define cc_migration_arg_dir cc_exec_entity[1].ce_migration_arg 172#define cc_migration_cpu_dir cc_exec_entity[1].ce_migration_cpu 173#define cc_migration_time_dir cc_exec_entity[1].ce_migration_time 174 175struct callout_cpu cc_cpu[MAXCPU]; 176#define CPUBLOCK MAXCPU 177#define CC_CPU(cpu) (&cc_cpu[(cpu)]) 178#define CC_SELF() CC_CPU(PCPU_GET(cpuid)) 179#else 180struct callout_cpu cc_cpu; 181#define CC_CPU(cpu) &cc_cpu 182#define CC_SELF() &cc_cpu 183#endif 184#define CC_LOCK(cc) mtx_lock_spin(&(cc)->cc_lock) 185#define CC_UNLOCK(cc) mtx_unlock_spin(&(cc)->cc_lock) 186#define CC_LOCK_ASSERT(cc) mtx_assert(&(cc)->cc_lock, MA_OWNED) 187 188static int timeout_cpu; 189 190static void softclock_call_cc(struct callout *c, struct callout_cpu *cc, 191#ifdef CALLOUT_PROFILING 192 int *mpcalls, int *lockcalls, int *gcalls, 193#endif 194 int direct); 195 196static MALLOC_DEFINE(M_CALLOUT, "callout", "Callout datastructures"); 197 198/** 199 * Locked by cc_lock: 200 * cc_curr - If a callout is in progress, it is cc_curr. 201 * If cc_curr is non-NULL, threads waiting in 202 * callout_drain() will be woken up as soon as the 203 * relevant callout completes. 204 * cc_cancel - Changing to 1 with both callout_lock and cc_lock held 205 * guarantees that the current callout will not run. 206 * The softclock() function sets this to 0 before it 207 * drops callout_lock to acquire c_lock, and it calls 208 * the handler only if curr_cancelled is still 0 after 209 * cc_lock is successfully acquired. 210 * cc_waiting - If a thread is waiting in callout_drain(), then 211 * callout_wait is nonzero. Set only when 212 * cc_curr is non-NULL. 213 */ 214 215/* 216 * Resets the execution entity tied to a specific callout cpu. 217 */ 218static void 219cc_cce_cleanup(struct callout_cpu *cc, int direct) 220{ 221 222 cc->cc_exec_entity[direct].cc_curr = NULL; 223 cc->cc_exec_entity[direct].cc_next = NULL; 224 cc->cc_exec_entity[direct].cc_cancel = false; 225 cc->cc_exec_entity[direct].cc_waiting = false; 226#ifdef SMP 227 cc->cc_exec_entity[direct].ce_migration_cpu = CPUBLOCK; 228 cc->cc_exec_entity[direct].ce_migration_time = 0; 229 cc->cc_exec_entity[direct].ce_migration_func = NULL; 230 cc->cc_exec_entity[direct].ce_migration_arg = NULL; 231#endif 232} 233 234/* 235 * Checks if migration is requested by a specific callout cpu. 236 */ 237static int 238cc_cce_migrating(struct callout_cpu *cc, int direct) 239{ 240 241#ifdef SMP 242 return (cc->cc_exec_entity[direct].ce_migration_cpu != CPUBLOCK); 243#else 244 return (0); 245#endif 246} 247 248/* 249 * kern_timeout_callwheel_alloc() - kernel low level callwheel initialization 250 * 251 * This code is called very early in the kernel initialization sequence, 252 * and may be called more then once. 253 */ 254caddr_t 255kern_timeout_callwheel_alloc(caddr_t v) 256{ 257 struct callout_cpu *cc; 258 259 timeout_cpu = PCPU_GET(cpuid); 260 cc = CC_CPU(timeout_cpu); 261 262 /* 263 * Calculate the size of the callout wheel and the preallocated 264 * timeout() structures. 265 */ 266 ncallout = imin(16 + maxproc + maxfiles, 18508); 267 TUNABLE_INT_FETCH("kern.ncallout", &ncallout); 268 269 /* 270 * Calculate callout wheel size, should be next power of two higher 271 * than 'ncallout'. 272 */ 273 callwheelsize = 1 << fls(ncallout); 274 callwheelmask = callwheelsize - 1; 275 276 cc->cc_callout = (struct callout *)v; 277 v = (caddr_t)(cc->cc_callout + ncallout); 278 cc->cc_callwheel = (struct callout_list *)v; 279 v = (caddr_t)(cc->cc_callwheel + callwheelsize); 280 return(v); 281} 282 283static void 284callout_cpu_init(struct callout_cpu *cc) 285{ 286 struct callout *c; 287 int i; 288 289 mtx_init(&cc->cc_lock, "callout", NULL, MTX_SPIN | MTX_RECURSE); 290 SLIST_INIT(&cc->cc_callfree); 291 for (i = 0; i < callwheelsize; i++) 292 LIST_INIT(&cc->cc_callwheel[i]); 293 TAILQ_INIT(&cc->cc_expireq); 294 cc->cc_firstevent = INT64_MAX; 295 for (i = 0; i < 2; i++) 296 cc_cce_cleanup(cc, i); 297 if (cc->cc_callout == NULL) 298 return; 299 for (i = 0; i < ncallout; i++) { 300 c = &cc->cc_callout[i]; 301 callout_init(c, 0); 302 c->c_flags = CALLOUT_LOCAL_ALLOC; 303 SLIST_INSERT_HEAD(&cc->cc_callfree, c, c_links.sle); 304 } 305} 306 307#ifdef SMP 308/* 309 * Switches the cpu tied to a specific callout. 310 * The function expects a locked incoming callout cpu and returns with 311 * locked outcoming callout cpu. 312 */ 313static struct callout_cpu * 314callout_cpu_switch(struct callout *c, struct callout_cpu *cc, int new_cpu) 315{ 316 struct callout_cpu *new_cc; 317 318 MPASS(c != NULL && cc != NULL); 319 CC_LOCK_ASSERT(cc); 320 321 /* 322 * Avoid interrupts and preemption firing after the callout cpu 323 * is blocked in order to avoid deadlocks as the new thread 324 * may be willing to acquire the callout cpu lock. 325 */ 326 c->c_cpu = CPUBLOCK; 327 spinlock_enter(); 328 CC_UNLOCK(cc); 329 new_cc = CC_CPU(new_cpu); 330 CC_LOCK(new_cc); 331 spinlock_exit(); 332 c->c_cpu = new_cpu; 333 return (new_cc); 334} 335#endif 336 337/* 338 * kern_timeout_callwheel_init() - initialize previously reserved callwheel 339 * space. 340 * 341 * This code is called just once, after the space reserved for the 342 * callout wheel has been finalized. 343 */ 344void 345kern_timeout_callwheel_init(void) 346{ 347 callout_cpu_init(CC_CPU(timeout_cpu)); 348} 349 350/* 351 * Start standard softclock thread. 352 */ 353static void 354start_softclock(void *dummy) 355{ 356 struct callout_cpu *cc; 357#ifdef SMP 358 int cpu; 359#endif 360 361 cc = CC_CPU(timeout_cpu); 362 if (swi_add(&clk_intr_event, "clock", softclock, cc, SWI_CLOCK, 363 INTR_MPSAFE, &cc->cc_cookie)) 364 panic("died while creating standard software ithreads"); 365#ifdef SMP 366 CPU_FOREACH(cpu) { 367 if (cpu == timeout_cpu) 368 continue; 369 cc = CC_CPU(cpu); 370 if (swi_add(NULL, "clock", softclock, cc, SWI_CLOCK, 371 INTR_MPSAFE, &cc->cc_cookie)) 372 panic("died while creating standard software ithreads"); 373 cc->cc_callout = NULL; /* Only cpu0 handles timeout(). */ 374 cc->cc_callwheel = malloc( 375 sizeof(struct callout_list) * callwheelsize, M_CALLOUT, 376 M_WAITOK); 377 callout_cpu_init(cc); 378 } 379#endif 380} 381 382SYSINIT(start_softclock, SI_SUB_SOFTINTR, SI_ORDER_FIRST, start_softclock, NULL); 383 384#define CC_HASH_SHIFT 8 385 386static inline u_int 387callout_hash(sbintime_t sbt) 388{ 389 390 return (sbt >> (32 - CC_HASH_SHIFT)); 391} 392 393static inline u_int 394callout_get_bucket(sbintime_t sbt) 395{ 396 397 return (callout_hash(sbt) & callwheelmask); 398} 399 400void 401callout_process(sbintime_t now) 402{ 403 struct callout *tmp, *tmpn; 404 struct callout_cpu *cc; 405 struct callout_list *sc; 406 sbintime_t first, last, max, tmp_max; 407 uint32_t lookahead; 408 u_int firstb, lastb, nowb; 409#ifdef CALLOUT_PROFILING 410 int depth_dir = 0, mpcalls_dir = 0, lockcalls_dir = 0; 411#endif 412 413 cc = CC_SELF(); 414 mtx_lock_spin_flags(&cc->cc_lock, MTX_QUIET); 415 416 /* Compute the buckets of the last scan and present times. */ 417 firstb = callout_hash(cc->cc_lastscan); 418 cc->cc_lastscan = now; 419 nowb = callout_hash(now); 420 421 /* Compute the last bucket and minimum time of the bucket after it. */ 422 if (nowb == firstb) 423 lookahead = (SBT_1S / 16); 424 else if (nowb - firstb == 1) 425 lookahead = (SBT_1S / 8); 426 else 427 lookahead = (SBT_1S / 2); 428 first = last = now; 429 first += (lookahead / 2); 430 last += lookahead; 431 last &= (0xffffffffffffffffLLU << (32 - CC_HASH_SHIFT)); 432 lastb = callout_hash(last) - 1; 433 max = last; 434 435 /* 436 * Check if we wrapped around the entire wheel from the last scan. 437 * In case, we need to scan entirely the wheel for pending callouts. 438 */ 439 if (lastb - firstb >= callwheelsize) { 440 lastb = firstb + callwheelsize - 1; 441 if (nowb - firstb >= callwheelsize) 442 nowb = lastb; 443 } 444 445 /* Iterate callwheel from firstb to nowb and then up to lastb. */ 446 do { 447 sc = &cc->cc_callwheel[firstb & callwheelmask]; 448 tmp = LIST_FIRST(sc); 449 while (tmp != NULL) { 450 /* Run the callout if present time within allowed. */ 451 if (tmp->c_time <= now) { 452 /* 453 * Consumer told us the callout may be run 454 * directly from hardware interrupt context. 455 */ 456 if (tmp->c_flags & CALLOUT_DIRECT) { 457#ifdef CALLOUT_PROFILING 458 ++depth_dir; 459#endif 460 cc->cc_exec_next_dir = 461 LIST_NEXT(tmp, c_links.le); 462 cc->cc_bucket = firstb & callwheelmask; 463 LIST_REMOVE(tmp, c_links.le); 464 softclock_call_cc(tmp, cc, 465#ifdef CALLOUT_PROFILING 466 &mpcalls_dir, &lockcalls_dir, NULL, 467#endif 468 1); 469 tmp = cc->cc_exec_next_dir; 470 } else { 471 tmpn = LIST_NEXT(tmp, c_links.le); 472 LIST_REMOVE(tmp, c_links.le); 473 TAILQ_INSERT_TAIL(&cc->cc_expireq, 474 tmp, c_links.tqe); 475 tmp->c_flags |= CALLOUT_PROCESSED; 476 tmp = tmpn; 477 } 478 continue; 479 } 480 /* Skip events from distant future. */ 481 if (tmp->c_time >= max) 482 goto next; 483 /* 484 * Event minimal time is bigger than present maximal 485 * time, so it cannot be aggregated. 486 */ 487 if (tmp->c_time > last) { 488 lastb = nowb; 489 goto next; 490 } 491 /* Update first and last time, respecting this event. */ 492 if (tmp->c_time < first) 493 first = tmp->c_time; 494 tmp_max = tmp->c_time + tmp->c_precision; 495 if (tmp_max < last) 496 last = tmp_max; 497next: 498 tmp = LIST_NEXT(tmp, c_links.le); 499 } 500 /* Proceed with the next bucket. */ 501 firstb++; 502 /* 503 * Stop if we looked after present time and found 504 * some event we can't execute at now. 505 * Stop if we looked far enough into the future. 506 */ 507 } while (((int)(firstb - lastb)) <= 0); 508 cc->cc_firstevent = last; 509#ifndef NO_EVENTTIMERS 510 cpu_new_callout(curcpu, last, first); 511#endif 512#ifdef CALLOUT_PROFILING 513 avg_depth_dir += (depth_dir * 1000 - avg_depth_dir) >> 8; 514 avg_mpcalls_dir += (mpcalls_dir * 1000 - avg_mpcalls_dir) >> 8; 515 avg_lockcalls_dir += (lockcalls_dir * 1000 - avg_lockcalls_dir) >> 8; 516#endif 517 mtx_unlock_spin_flags(&cc->cc_lock, MTX_QUIET); 518 /* 519 * swi_sched acquires the thread lock, so we don't want to call it 520 * with cc_lock held; incorrect locking order. 521 */ 522 if (!TAILQ_EMPTY(&cc->cc_expireq)) 523 swi_sched(cc->cc_cookie, 0); 524} 525 526static struct callout_cpu * 527callout_lock(struct callout *c) 528{ 529 struct callout_cpu *cc; 530 int cpu; 531 532 for (;;) { 533 cpu = c->c_cpu; 534#ifdef SMP 535 if (cpu == CPUBLOCK) { 536 while (c->c_cpu == CPUBLOCK) 537 cpu_spinwait(); 538 continue; 539 } 540#endif 541 cc = CC_CPU(cpu); 542 CC_LOCK(cc); 543 if (cpu == c->c_cpu) 544 break; 545 CC_UNLOCK(cc); 546 } 547 return (cc); 548} 549 550static void 551callout_cc_add(struct callout *c, struct callout_cpu *cc, 552 sbintime_t sbt, sbintime_t precision, void (*func)(void *), 553 void *arg, int cpu, int flags) 554{ 555 int bucket; 556 557 CC_LOCK_ASSERT(cc); 558 if (sbt < cc->cc_lastscan) 559 sbt = cc->cc_lastscan; 560 c->c_arg = arg; 561 c->c_flags |= (CALLOUT_ACTIVE | CALLOUT_PENDING); 562 if (flags & C_DIRECT_EXEC) 563 c->c_flags |= CALLOUT_DIRECT; 564 c->c_flags &= ~CALLOUT_PROCESSED; 565 c->c_func = func; 566 c->c_time = sbt; 567 c->c_precision = precision; 568 bucket = callout_get_bucket(c->c_time); 569 CTR3(KTR_CALLOUT, "precision set for %p: %d.%08x", 570 c, (int)(c->c_precision >> 32), 571 (u_int)(c->c_precision & 0xffffffff)); 572 LIST_INSERT_HEAD(&cc->cc_callwheel[bucket], c, c_links.le); 573 if (cc->cc_bucket == bucket) 574 cc->cc_exec_next_dir = c; 575#ifndef NO_EVENTTIMERS 576 /* 577 * Inform the eventtimers(4) subsystem there's a new callout 578 * that has been inserted, but only if really required. 579 */ 580 sbt = c->c_time + c->c_precision; 581 if (sbt < cc->cc_firstevent) { 582 cc->cc_firstevent = sbt; 583 cpu_new_callout(cpu, sbt, c->c_time); 584 } 585#endif 586} 587 588static void 589callout_cc_del(struct callout *c, struct callout_cpu *cc) 590{ 591 592 if ((c->c_flags & CALLOUT_LOCAL_ALLOC) == 0) 593 return; 594 c->c_func = NULL; 595 SLIST_INSERT_HEAD(&cc->cc_callfree, c, c_links.sle); 596} 597 598static void 599softclock_call_cc(struct callout *c, struct callout_cpu *cc, 600#ifdef CALLOUT_PROFILING 601 int *mpcalls, int *lockcalls, int *gcalls, 602#endif 603 int direct) 604{ 605 void (*c_func)(void *); 606 void *c_arg; 607 struct lock_class *class; 608 struct lock_object *c_lock; 609 int c_flags, sharedlock; 610#ifdef SMP 611 struct callout_cpu *new_cc; 612 void (*new_func)(void *); 613 void *new_arg; 614 int flags, new_cpu; 615 sbintime_t new_time; 616#endif 617#if defined(DIAGNOSTIC) || defined(CALLOUT_PROFILING) 618 sbintime_t sbt1, sbt2; 619 struct timespec ts2; 620 static sbintime_t maxdt = 2 * SBT_1MS; /* 2 msec */ 621 static timeout_t *lastfunc; 622#endif 623 624 KASSERT((c->c_flags & (CALLOUT_PENDING | CALLOUT_ACTIVE)) == 625 (CALLOUT_PENDING | CALLOUT_ACTIVE), 626 ("softclock_call_cc: pend|act %p %x", c, c->c_flags)); 627 class = (c->c_lock != NULL) ? LOCK_CLASS(c->c_lock) : NULL; 628 sharedlock = (c->c_flags & CALLOUT_SHAREDLOCK) ? 0 : 1; 629 c_lock = c->c_lock; 630 c_func = c->c_func; 631 c_arg = c->c_arg; 632 c_flags = c->c_flags; 633 if (c->c_flags & CALLOUT_LOCAL_ALLOC) 634 c->c_flags = CALLOUT_LOCAL_ALLOC; 635 else 636 c->c_flags &= ~CALLOUT_PENDING; 637 cc->cc_exec_entity[direct].cc_curr = c; 638 cc->cc_exec_entity[direct].cc_cancel = false; 639 CC_UNLOCK(cc); 640 if (c_lock != NULL) { 641 class->lc_lock(c_lock, sharedlock); 642 /* 643 * The callout may have been cancelled 644 * while we switched locks. 645 */ 646 if (cc->cc_exec_entity[direct].cc_cancel) { 647 class->lc_unlock(c_lock); 648 goto skip; 649 } 650 /* The callout cannot be stopped now. */ 651 cc->cc_exec_entity[direct].cc_cancel = true; 652 if (c_lock == &Giant.lock_object) { 653#ifdef CALLOUT_PROFILING 654 (*gcalls)++; 655#endif 656 CTR3(KTR_CALLOUT, "callout giant %p func %p arg %p", 657 c, c_func, c_arg); 658 } else { 659#ifdef CALLOUT_PROFILING 660 (*lockcalls)++; 661#endif 662 CTR3(KTR_CALLOUT, "callout lock %p func %p arg %p", 663 c, c_func, c_arg); 664 } 665 } else { 666#ifdef CALLOUT_PROFILING 667 (*mpcalls)++; 668#endif 669 CTR3(KTR_CALLOUT, "callout %p func %p arg %p", 670 c, c_func, c_arg); 671 } 672#if defined(DIAGNOSTIC) || defined(CALLOUT_PROFILING) 673 sbt1 = sbinuptime(); 674#endif 675 THREAD_NO_SLEEPING(); 676 SDT_PROBE(callout_execute, kernel, , callout_start, c, 0, 0, 0, 0); 677 c_func(c_arg); 678 SDT_PROBE(callout_execute, kernel, , callout_end, c, 0, 0, 0, 0); 679 THREAD_SLEEPING_OK(); 680#if defined(DIAGNOSTIC) || defined(CALLOUT_PROFILING) 681 sbt2 = sbinuptime(); 682 sbt2 -= sbt1; 683 if (sbt2 > maxdt) { 684 if (lastfunc != c_func || sbt2 > maxdt * 2) { 685 ts2 = sbttots(sbt2); 686 printf( 687 "Expensive timeout(9) function: %p(%p) %jd.%09ld s\n", 688 c_func, c_arg, (intmax_t)ts2.tv_sec, ts2.tv_nsec); 689 } 690 maxdt = sbt2; 691 lastfunc = c_func; 692 } 693#endif 694 CTR1(KTR_CALLOUT, "callout %p finished", c); 695 if ((c_flags & CALLOUT_RETURNUNLOCKED) == 0) 696 class->lc_unlock(c_lock); 697skip: 698 CC_LOCK(cc); 699 KASSERT(cc->cc_exec_entity[direct].cc_curr == c, ("mishandled cc_curr")); 700 cc->cc_exec_entity[direct].cc_curr = NULL; 701 if (cc->cc_exec_entity[direct].cc_waiting) { 702 /* 703 * There is someone waiting for the 704 * callout to complete. 705 * If the callout was scheduled for 706 * migration just cancel it. 707 */ 708 if (cc_cce_migrating(cc, direct)) { 709 cc_cce_cleanup(cc, direct); 710 711 /* 712 * It should be assert here that the callout is not 713 * destroyed but that is not easy. 714 */ 715 c->c_flags &= ~CALLOUT_DFRMIGRATION; 716 } 717 cc->cc_exec_entity[direct].cc_waiting = false; 718 CC_UNLOCK(cc); 719 wakeup(&cc->cc_exec_entity[direct].cc_waiting); 720 CC_LOCK(cc); 721 } else if (cc_cce_migrating(cc, direct)) { 722 KASSERT((c_flags & CALLOUT_LOCAL_ALLOC) == 0, 723 ("Migrating legacy callout %p", c)); 724#ifdef SMP 725 /* 726 * If the callout was scheduled for 727 * migration just perform it now. 728 */ 729 new_cpu = cc->cc_exec_entity[direct].ce_migration_cpu; 730 new_time = cc->cc_exec_entity[direct].ce_migration_time; 731 new_func = cc->cc_exec_entity[direct].ce_migration_func; 732 new_arg = cc->cc_exec_entity[direct].ce_migration_arg; 733 cc_cce_cleanup(cc, direct); 734 735 /* 736 * It should be assert here that the callout is not destroyed 737 * but that is not easy. 738 * 739 * As first thing, handle deferred callout stops. 740 */ 741 if ((c->c_flags & CALLOUT_DFRMIGRATION) == 0) { 742 CTR3(KTR_CALLOUT, 743 "deferred cancelled %p func %p arg %p", 744 c, new_func, new_arg); 745 callout_cc_del(c, cc); 746 return; 747 } 748 c->c_flags &= ~CALLOUT_DFRMIGRATION; 749 750 new_cc = callout_cpu_switch(c, cc, new_cpu); 751 flags = (direct) ? C_DIRECT_EXEC : 0; 752 callout_cc_add(c, new_cc, new_time, c->c_precision, new_func, 753 new_arg, new_cpu, flags); 754 CC_UNLOCK(new_cc); 755 CC_LOCK(cc); 756#else 757 panic("migration should not happen"); 758#endif 759 } 760 /* 761 * If the current callout is locally allocated (from 762 * timeout(9)) then put it on the freelist. 763 * 764 * Note: we need to check the cached copy of c_flags because 765 * if it was not local, then it's not safe to deref the 766 * callout pointer. 767 */ 768 KASSERT((c_flags & CALLOUT_LOCAL_ALLOC) == 0 || 769 c->c_flags == CALLOUT_LOCAL_ALLOC, 770 ("corrupted callout")); 771 if (c_flags & CALLOUT_LOCAL_ALLOC) 772 callout_cc_del(c, cc); 773} 774 775/* 776 * The callout mechanism is based on the work of Adam M. Costello and 777 * George Varghese, published in a technical report entitled "Redesigning 778 * the BSD Callout and Timer Facilities" and modified slightly for inclusion 779 * in FreeBSD by Justin T. Gibbs. The original work on the data structures 780 * used in this implementation was published by G. Varghese and T. Lauck in 781 * the paper "Hashed and Hierarchical Timing Wheels: Data Structures for 782 * the Efficient Implementation of a Timer Facility" in the Proceedings of 783 * the 11th ACM Annual Symposium on Operating Systems Principles, 784 * Austin, Texas Nov 1987. 785 */ 786 787/* 788 * Software (low priority) clock interrupt. 789 * Run periodic events from timeout queue. 790 */ 791void 792softclock(void *arg) 793{ 794 struct callout_cpu *cc; 795 struct callout *c; 796#ifdef CALLOUT_PROFILING 797 int depth = 0, gcalls = 0, lockcalls = 0, mpcalls = 0; 798#endif 799 800 cc = (struct callout_cpu *)arg; 801 CC_LOCK(cc); 802 while ((c = TAILQ_FIRST(&cc->cc_expireq)) != NULL) { 803 TAILQ_REMOVE(&cc->cc_expireq, c, c_links.tqe); 804 softclock_call_cc(c, cc, 805#ifdef CALLOUT_PROFILING 806 &mpcalls, &lockcalls, &gcalls, 807#endif 808 0); 809#ifdef CALLOUT_PROFILING 810 ++depth; 811#endif 812 } 813#ifdef CALLOUT_PROFILING 814 avg_depth += (depth * 1000 - avg_depth) >> 8; 815 avg_mpcalls += (mpcalls * 1000 - avg_mpcalls) >> 8; 816 avg_lockcalls += (lockcalls * 1000 - avg_lockcalls) >> 8; 817 avg_gcalls += (gcalls * 1000 - avg_gcalls) >> 8; 818#endif 819 CC_UNLOCK(cc); 820} 821 822/* 823 * timeout -- 824 * Execute a function after a specified length of time. 825 * 826 * untimeout -- 827 * Cancel previous timeout function call. 828 * 829 * callout_handle_init -- 830 * Initialize a handle so that using it with untimeout is benign. 831 * 832 * See AT&T BCI Driver Reference Manual for specification. This 833 * implementation differs from that one in that although an 834 * identification value is returned from timeout, the original 835 * arguments to timeout as well as the identifier are used to 836 * identify entries for untimeout. 837 */ 838struct callout_handle 839timeout(ftn, arg, to_ticks) 840 timeout_t *ftn; 841 void *arg; 842 int to_ticks; 843{ 844 struct callout_cpu *cc; 845 struct callout *new; 846 struct callout_handle handle; 847 848 cc = CC_CPU(timeout_cpu); 849 CC_LOCK(cc); 850 /* Fill in the next free callout structure. */ 851 new = SLIST_FIRST(&cc->cc_callfree); 852 if (new == NULL) 853 /* XXX Attempt to malloc first */ 854 panic("timeout table full"); 855 SLIST_REMOVE_HEAD(&cc->cc_callfree, c_links.sle); 856 callout_reset(new, to_ticks, ftn, arg); 857 handle.callout = new; 858 CC_UNLOCK(cc); 859 860 return (handle); 861} 862 863void 864untimeout(ftn, arg, handle) 865 timeout_t *ftn; 866 void *arg; 867 struct callout_handle handle; 868{ 869 struct callout_cpu *cc; 870 871 /* 872 * Check for a handle that was initialized 873 * by callout_handle_init, but never used 874 * for a real timeout. 875 */ 876 if (handle.callout == NULL) 877 return; 878 879 cc = callout_lock(handle.callout); 880 if (handle.callout->c_func == ftn && handle.callout->c_arg == arg) 881 callout_stop(handle.callout); 882 CC_UNLOCK(cc); 883} 884 885void 886callout_handle_init(struct callout_handle *handle) 887{ 888 handle->callout = NULL; 889} 890 891/* 892 * New interface; clients allocate their own callout structures. 893 * 894 * callout_reset() - establish or change a timeout 895 * callout_stop() - disestablish a timeout 896 * callout_init() - initialize a callout structure so that it can 897 * safely be passed to callout_reset() and callout_stop() 898 * 899 * <sys/callout.h> defines three convenience macros: 900 * 901 * callout_active() - returns truth if callout has not been stopped, 902 * drained, or deactivated since the last time the callout was 903 * reset. 904 * callout_pending() - returns truth if callout is still waiting for timeout 905 * callout_deactivate() - marks the callout as having been serviced 906 */ 907int 908callout_reset_sbt_on(struct callout *c, sbintime_t sbt, sbintime_t precision, 909 void (*ftn)(void *), void *arg, int cpu, int flags) 910{ 911 sbintime_t to_sbt, pr; 912 struct callout_cpu *cc; 913 int cancelled, direct; 914 915 cancelled = 0; 916 if (flags & C_ABSOLUTE) { 917 to_sbt = sbt; 918 } else { 919 if ((flags & C_HARDCLOCK) && (sbt < tick_sbt)) 920 sbt = tick_sbt; 921 if ((flags & C_HARDCLOCK) || 922#ifdef NO_EVENTTIMERS 923 sbt >= sbt_timethreshold) { 924 to_sbt = getsbinuptime(); 925 926 /* Add safety belt for the case of hz > 1000. */ 927 to_sbt += tc_tick_sbt - tick_sbt; 928#else 929 sbt >= sbt_tickthreshold) { 930 /* 931 * Obtain the time of the last hardclock() call on 932 * this CPU directly from the kern_clocksource.c. 933 * This value is per-CPU, but it is equal for all 934 * active ones. 935 */ 936#ifdef __LP64__ 937 to_sbt = DPCPU_GET(hardclocktime); 938#else 939 spinlock_enter(); 940 to_sbt = DPCPU_GET(hardclocktime); 941 spinlock_exit(); 942#endif 943#endif 944 if ((flags & C_HARDCLOCK) == 0) 945 to_sbt += tick_sbt; 946 } else 947 to_sbt = sbinuptime(); 948 to_sbt += sbt; 949 pr = ((C_PRELGET(flags) < 0) ? sbt >> tc_precexp : 950 sbt >> C_PRELGET(flags)); 951 if (pr > precision) 952 precision = pr; 953 } 954 /* 955 * Don't allow migration of pre-allocated callouts lest they 956 * become unbalanced. 957 */ 958 if (c->c_flags & CALLOUT_LOCAL_ALLOC) 959 cpu = c->c_cpu; 960 direct = (c->c_flags & CALLOUT_DIRECT) != 0; 961 KASSERT(!direct || c->c_lock == NULL, 962 ("%s: direct callout %p has lock", __func__, c)); 963 cc = callout_lock(c); 964 if (cc->cc_exec_entity[direct].cc_curr == c) { 965 /* 966 * We're being asked to reschedule a callout which is 967 * currently in progress. If there is a lock then we 968 * can cancel the callout if it has not really started. 969 */ 970 if (c->c_lock != NULL && !cc->cc_exec_entity[direct].cc_cancel) 971 cancelled = cc->cc_exec_entity[direct].cc_cancel = true; 972 if (cc->cc_exec_entity[direct].cc_waiting) { 973 /* 974 * Someone has called callout_drain to kill this 975 * callout. Don't reschedule. 976 */ 977 CTR4(KTR_CALLOUT, "%s %p func %p arg %p", 978 cancelled ? "cancelled" : "failed to cancel", 979 c, c->c_func, c->c_arg); 980 CC_UNLOCK(cc); 981 return (cancelled); 982 } 983 } 984 if (c->c_flags & CALLOUT_PENDING) { 985 if ((c->c_flags & CALLOUT_PROCESSED) == 0) { 986 if (cc->cc_exec_next_dir == c) 987 cc->cc_exec_next_dir = LIST_NEXT(c, c_links.le); 988 LIST_REMOVE(c, c_links.le); 989 } else 990 TAILQ_REMOVE(&cc->cc_expireq, c, c_links.tqe); 991 cancelled = 1; 992 c->c_flags &= ~(CALLOUT_ACTIVE | CALLOUT_PENDING); 993 } 994 995#ifdef SMP 996 /* 997 * If the callout must migrate try to perform it immediately. 998 * If the callout is currently running, just defer the migration 999 * to a more appropriate moment. 1000 */ 1001 if (c->c_cpu != cpu) { 1002 if (cc->cc_exec_entity[direct].cc_curr == c) { 1003 cc->cc_exec_entity[direct].ce_migration_cpu = cpu; 1004 cc->cc_exec_entity[direct].ce_migration_time 1005 = to_sbt; 1006 cc->cc_exec_entity[direct].ce_migration_func = ftn; 1007 cc->cc_exec_entity[direct].ce_migration_arg = arg; 1008 c->c_flags |= CALLOUT_DFRMIGRATION; 1009 CTR6(KTR_CALLOUT, 1010 "migration of %p func %p arg %p in %d.%08x to %u deferred", 1011 c, c->c_func, c->c_arg, (int)(to_sbt >> 32), 1012 (u_int)(to_sbt & 0xffffffff), cpu); 1013 CC_UNLOCK(cc); 1014 return (cancelled); 1015 } 1016 cc = callout_cpu_switch(c, cc, cpu); 1017 } 1018#endif 1019 1020 callout_cc_add(c, cc, to_sbt, precision, ftn, arg, cpu, flags); 1021 CTR6(KTR_CALLOUT, "%sscheduled %p func %p arg %p in %d.%08x", 1022 cancelled ? "re" : "", c, c->c_func, c->c_arg, (int)(to_sbt >> 32), 1023 (u_int)(to_sbt & 0xffffffff)); 1024 CC_UNLOCK(cc); 1025 1026 return (cancelled); 1027} 1028 1029/* 1030 * Common idioms that can be optimized in the future. 1031 */ 1032int 1033callout_schedule_on(struct callout *c, int to_ticks, int cpu) 1034{ 1035 return callout_reset_on(c, to_ticks, c->c_func, c->c_arg, cpu); 1036} 1037 1038int 1039callout_schedule(struct callout *c, int to_ticks) 1040{ 1041 return callout_reset_on(c, to_ticks, c->c_func, c->c_arg, c->c_cpu); 1042} 1043 1044int 1045_callout_stop_safe(c, safe) 1046 struct callout *c; 1047 int safe; 1048{ 1049 struct callout_cpu *cc, *old_cc; 1050 struct lock_class *class; 1051 int direct, sq_locked, use_lock; 1052 1053 /* 1054 * Some old subsystems don't hold Giant while running a callout_stop(), 1055 * so just discard this check for the moment. 1056 */ 1057 if (!safe && c->c_lock != NULL) { 1058 if (c->c_lock == &Giant.lock_object) 1059 use_lock = mtx_owned(&Giant); 1060 else { 1061 use_lock = 1; 1062 class = LOCK_CLASS(c->c_lock); 1063 class->lc_assert(c->c_lock, LA_XLOCKED); 1064 } 1065 } else 1066 use_lock = 0; 1067 direct = (c->c_flags & CALLOUT_DIRECT) != 0; 1068 sq_locked = 0; 1069 old_cc = NULL; 1070again: 1071 cc = callout_lock(c); 1072 1073 /* 1074 * If the callout was migrating while the callout cpu lock was 1075 * dropped, just drop the sleepqueue lock and check the states 1076 * again. 1077 */ 1078 if (sq_locked != 0 && cc != old_cc) { 1079#ifdef SMP 1080 CC_UNLOCK(cc); 1081 sleepq_release(&old_cc->cc_exec_entity[direct].cc_waiting); 1082 sq_locked = 0; 1083 old_cc = NULL; 1084 goto again; 1085#else 1086 panic("migration should not happen"); 1087#endif 1088 } 1089 1090 /* 1091 * If the callout isn't pending, it's not on the queue, so 1092 * don't attempt to remove it from the queue. We can try to 1093 * stop it by other means however. 1094 */ 1095 if (!(c->c_flags & CALLOUT_PENDING)) { 1096 c->c_flags &= ~CALLOUT_ACTIVE; 1097 1098 /* 1099 * If it wasn't on the queue and it isn't the current 1100 * callout, then we can't stop it, so just bail. 1101 */ 1102 if (cc->cc_exec_entity[direct].cc_curr != c) { 1103 CTR3(KTR_CALLOUT, "failed to stop %p func %p arg %p", 1104 c, c->c_func, c->c_arg); 1105 CC_UNLOCK(cc); 1106 if (sq_locked) 1107 sleepq_release( 1108 &cc->cc_exec_entity[direct].cc_waiting); 1109 return (0); 1110 } 1111 1112 if (safe) { 1113 /* 1114 * The current callout is running (or just 1115 * about to run) and blocking is allowed, so 1116 * just wait for the current invocation to 1117 * finish. 1118 */ 1119 while (cc->cc_exec_entity[direct].cc_curr == c) { 1120 /* 1121 * Use direct calls to sleepqueue interface 1122 * instead of cv/msleep in order to avoid 1123 * a LOR between cc_lock and sleepqueue 1124 * chain spinlocks. This piece of code 1125 * emulates a msleep_spin() call actually. 1126 * 1127 * If we already have the sleepqueue chain 1128 * locked, then we can safely block. If we 1129 * don't already have it locked, however, 1130 * we have to drop the cc_lock to lock 1131 * it. This opens several races, so we 1132 * restart at the beginning once we have 1133 * both locks. If nothing has changed, then 1134 * we will end up back here with sq_locked 1135 * set. 1136 */ 1137 if (!sq_locked) { 1138 CC_UNLOCK(cc); 1139 sleepq_lock( 1140 &cc->cc_exec_entity[direct].cc_waiting); 1141 sq_locked = 1; 1142 old_cc = cc; 1143 goto again; 1144 } 1145 1146 /* 1147 * Migration could be cancelled here, but 1148 * as long as it is still not sure when it 1149 * will be packed up, just let softclock() 1150 * take care of it. 1151 */ 1152 cc->cc_exec_entity[direct].cc_waiting = true; 1153 DROP_GIANT(); 1154 CC_UNLOCK(cc); 1155 sleepq_add( 1156 &cc->cc_exec_entity[direct].cc_waiting, 1157 &cc->cc_lock.lock_object, "codrain", 1158 SLEEPQ_SLEEP, 0); 1159 sleepq_wait( 1160 &cc->cc_exec_entity[direct].cc_waiting, 1161 0); 1162 sq_locked = 0; 1163 old_cc = NULL; 1164 1165 /* Reacquire locks previously released. */ 1166 PICKUP_GIANT(); 1167 CC_LOCK(cc); 1168 } 1169 } else if (use_lock && 1170 !cc->cc_exec_entity[direct].cc_cancel) { 1171 /* 1172 * The current callout is waiting for its 1173 * lock which we hold. Cancel the callout 1174 * and return. After our caller drops the 1175 * lock, the callout will be skipped in 1176 * softclock(). 1177 */ 1178 cc->cc_exec_entity[direct].cc_cancel = true; 1179 CTR3(KTR_CALLOUT, "cancelled %p func %p arg %p", 1180 c, c->c_func, c->c_arg); 1181 KASSERT(!cc_cce_migrating(cc, direct), 1182 ("callout wrongly scheduled for migration")); 1183 CC_UNLOCK(cc); 1184 KASSERT(!sq_locked, ("sleepqueue chain locked")); 1185 return (1); 1186 } else if ((c->c_flags & CALLOUT_DFRMIGRATION) != 0) { 1187 c->c_flags &= ~CALLOUT_DFRMIGRATION; 1188 CTR3(KTR_CALLOUT, "postponing stop %p func %p arg %p", 1189 c, c->c_func, c->c_arg); 1190 CC_UNLOCK(cc); 1191 return (1); 1192 } 1193 CTR3(KTR_CALLOUT, "failed to stop %p func %p arg %p", 1194 c, c->c_func, c->c_arg); 1195 CC_UNLOCK(cc); 1196 KASSERT(!sq_locked, ("sleepqueue chain still locked")); 1197 return (0); 1198 } 1199 if (sq_locked) 1200 sleepq_release(&cc->cc_exec_entity[direct].cc_waiting); 1201 1202 c->c_flags &= ~(CALLOUT_ACTIVE | CALLOUT_PENDING); 1203 1204 CTR3(KTR_CALLOUT, "cancelled %p func %p arg %p", 1205 c, c->c_func, c->c_arg); 1206 if ((c->c_flags & CALLOUT_PROCESSED) == 0) { 1207 if (cc->cc_exec_next_dir == c) 1208 cc->cc_exec_next_dir = LIST_NEXT(c, c_links.le); 1209 LIST_REMOVE(c, c_links.le); 1210 } else 1211 TAILQ_REMOVE(&cc->cc_expireq, c, c_links.tqe); 1212 callout_cc_del(c, cc); 1213 1214 CC_UNLOCK(cc); 1215 return (1); 1216} 1217 1218void 1219callout_init(c, mpsafe) 1220 struct callout *c; 1221 int mpsafe; 1222{ 1223 bzero(c, sizeof *c); 1224 if (mpsafe) { 1225 c->c_lock = NULL; 1226 c->c_flags = CALLOUT_RETURNUNLOCKED; 1227 } else { 1228 c->c_lock = &Giant.lock_object; 1229 c->c_flags = 0; 1230 } 1231 c->c_cpu = timeout_cpu; 1232} 1233 1234void 1235_callout_init_lock(c, lock, flags) 1236 struct callout *c; 1237 struct lock_object *lock; 1238 int flags; 1239{ 1240 bzero(c, sizeof *c); 1241 c->c_lock = lock; 1242 KASSERT((flags & ~(CALLOUT_RETURNUNLOCKED | CALLOUT_SHAREDLOCK)) == 0, 1243 ("callout_init_lock: bad flags %d", flags)); 1244 KASSERT(lock != NULL || (flags & CALLOUT_RETURNUNLOCKED) == 0, 1245 ("callout_init_lock: CALLOUT_RETURNUNLOCKED with no lock")); 1246 KASSERT(lock == NULL || !(LOCK_CLASS(lock)->lc_flags & 1247 (LC_SPINLOCK | LC_SLEEPABLE)), ("%s: invalid lock class", 1248 __func__)); 1249 c->c_flags = flags & (CALLOUT_RETURNUNLOCKED | CALLOUT_SHAREDLOCK); 1250 c->c_cpu = timeout_cpu; 1251} 1252 1253#ifdef APM_FIXUP_CALLTODO 1254/* 1255 * Adjust the kernel calltodo timeout list. This routine is used after 1256 * an APM resume to recalculate the calltodo timer list values with the 1257 * number of hz's we have been sleeping. The next hardclock() will detect 1258 * that there are fired timers and run softclock() to execute them. 1259 * 1260 * Please note, I have not done an exhaustive analysis of what code this 1261 * might break. I am motivated to have my select()'s and alarm()'s that 1262 * have expired during suspend firing upon resume so that the applications 1263 * which set the timer can do the maintanence the timer was for as close 1264 * as possible to the originally intended time. Testing this code for a 1265 * week showed that resuming from a suspend resulted in 22 to 25 timers 1266 * firing, which seemed independant on whether the suspend was 2 hours or 1267 * 2 days. Your milage may vary. - Ken Key <key@cs.utk.edu> 1268 */ 1269void 1270adjust_timeout_calltodo(time_change) 1271 struct timeval *time_change; 1272{ 1273 register struct callout *p; 1274 unsigned long delta_ticks; 1275 1276 /* 1277 * How many ticks were we asleep? 1278 * (stolen from tvtohz()). 1279 */ 1280 1281 /* Don't do anything */ 1282 if (time_change->tv_sec < 0) 1283 return; 1284 else if (time_change->tv_sec <= LONG_MAX / 1000000) 1285 delta_ticks = (time_change->tv_sec * 1000000 + 1286 time_change->tv_usec + (tick - 1)) / tick + 1; 1287 else if (time_change->tv_sec <= LONG_MAX / hz) 1288 delta_ticks = time_change->tv_sec * hz + 1289 (time_change->tv_usec + (tick - 1)) / tick + 1; 1290 else 1291 delta_ticks = LONG_MAX; 1292 1293 if (delta_ticks > INT_MAX) 1294 delta_ticks = INT_MAX; 1295 1296 /* 1297 * Now rip through the timer calltodo list looking for timers 1298 * to expire. 1299 */ 1300 1301 /* don't collide with softclock() */ 1302 CC_LOCK(cc); 1303 for (p = calltodo.c_next; p != NULL; p = p->c_next) { 1304 p->c_time -= delta_ticks; 1305 1306 /* Break if the timer had more time on it than delta_ticks */ 1307 if (p->c_time > 0) 1308 break; 1309 1310 /* take back the ticks the timer didn't use (p->c_time <= 0) */ 1311 delta_ticks = -p->c_time; 1312 } 1313 CC_UNLOCK(cc); 1314 1315 return; 1316} 1317#endif /* APM_FIXUP_CALLTODO */ 1318 1319static int 1320flssbt(sbintime_t sbt) 1321{ 1322 1323 sbt += (uint64_t)sbt >> 1; 1324 if (sizeof(long) >= sizeof(sbintime_t)) 1325 return (flsl(sbt)); 1326 if (sbt >= SBT_1S) 1327 return (flsl(((uint64_t)sbt) >> 32) + 32); 1328 return (flsl(sbt)); 1329} 1330 1331/* 1332 * Dump immediate statistic snapshot of the scheduled callouts. 1333 */ 1334static int 1335sysctl_kern_callout_stat(SYSCTL_HANDLER_ARGS) 1336{ 1337 struct callout *tmp; 1338 struct callout_cpu *cc; 1339 struct callout_list *sc; 1340 sbintime_t maxpr, maxt, medpr, medt, now, spr, st, t; 1341 int ct[64], cpr[64], ccpbk[32]; 1342 int error, val, i, count, tcum, pcum, maxc, c, medc; 1343#ifdef SMP 1344 int cpu; 1345#endif 1346 1347 val = 0; 1348 error = sysctl_handle_int(oidp, &val, 0, req); 1349 if (error != 0 || req->newptr == NULL) 1350 return (error); 1351 count = maxc = 0; 1352 st = spr = maxt = maxpr = 0; 1353 bzero(ccpbk, sizeof(ccpbk)); 1354 bzero(ct, sizeof(ct)); 1355 bzero(cpr, sizeof(cpr)); 1356 now = sbinuptime(); 1357#ifdef SMP 1358 CPU_FOREACH(cpu) { 1359 cc = CC_CPU(cpu); 1360#else 1361 cc = CC_CPU(timeout_cpu); 1362#endif 1363 CC_LOCK(cc); 1364 for (i = 0; i < callwheelsize; i++) { 1365 sc = &cc->cc_callwheel[i]; 1366 c = 0; 1367 LIST_FOREACH(tmp, sc, c_links.le) { 1368 c++; 1369 t = tmp->c_time - now; 1370 if (t < 0) 1371 t = 0; 1372 st += t / SBT_1US; 1373 spr += tmp->c_precision / SBT_1US; 1374 if (t > maxt) 1375 maxt = t; 1376 if (tmp->c_precision > maxpr) 1377 maxpr = tmp->c_precision; 1378 ct[flssbt(t)]++; 1379 cpr[flssbt(tmp->c_precision)]++; 1380 } 1381 if (c > maxc) 1382 maxc = c; 1383 ccpbk[fls(c + c / 2)]++; 1384 count += c; 1385 } 1386 CC_UNLOCK(cc); 1387#ifdef SMP 1388 } 1389#endif 1390 1391 for (i = 0, tcum = 0; i < 64 && tcum < count / 2; i++) 1392 tcum += ct[i]; 1393 medt = (i >= 2) ? (((sbintime_t)1) << (i - 2)) : 0; 1394 for (i = 0, pcum = 0; i < 64 && pcum < count / 2; i++) 1395 pcum += cpr[i]; 1396 medpr = (i >= 2) ? (((sbintime_t)1) << (i - 2)) : 0; 1397 for (i = 0, c = 0; i < 32 && c < count / 2; i++) 1398 c += ccpbk[i]; 1399 medc = (i >= 2) ? (1 << (i - 2)) : 0; 1400 1401 printf("Scheduled callouts statistic snapshot:\n"); 1402 printf(" Callouts: %6d Buckets: %6d*%-3d Bucket size: 0.%06ds\n", 1403 count, callwheelsize, mp_ncpus, 1000000 >> CC_HASH_SHIFT); 1404 printf(" C/Bk: med %5d avg %6d.%06jd max %6d\n", 1405 medc, 1406 count / callwheelsize / mp_ncpus, 1407 (uint64_t)count * 1000000 / callwheelsize / mp_ncpus % 1000000, 1408 maxc); 1409 printf(" Time: med %5jd.%06jds avg %6jd.%06jds max %6jd.%06jds\n", 1410 medt / SBT_1S, (medt & 0xffffffff) * 1000000 >> 32, 1411 (st / count) / 1000000, (st / count) % 1000000, 1412 maxt / SBT_1S, (maxt & 0xffffffff) * 1000000 >> 32); 1413 printf(" Prec: med %5jd.%06jds avg %6jd.%06jds max %6jd.%06jds\n", 1414 medpr / SBT_1S, (medpr & 0xffffffff) * 1000000 >> 32, 1415 (spr / count) / 1000000, (spr / count) % 1000000, 1416 maxpr / SBT_1S, (maxpr & 0xffffffff) * 1000000 >> 32); 1417 printf(" Distribution: \tbuckets\t time\t tcum\t" 1418 " prec\t pcum\n"); 1419 for (i = 0, tcum = pcum = 0; i < 64; i++) { 1420 if (ct[i] == 0 && cpr[i] == 0) 1421 continue; 1422 t = (i != 0) ? (((sbintime_t)1) << (i - 1)) : 0; 1423 tcum += ct[i]; 1424 pcum += cpr[i]; 1425 printf(" %10jd.%06jds\t 2**%d\t%7d\t%7d\t%7d\t%7d\n", 1426 t / SBT_1S, (t & 0xffffffff) * 1000000 >> 32, 1427 i - 1 - (32 - CC_HASH_SHIFT), 1428 ct[i], tcum, cpr[i], pcum); 1429 } 1430 return (error); 1431} 1432SYSCTL_PROC(_kern, OID_AUTO, callout_stat, 1433 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, 1434 0, 0, sysctl_kern_callout_stat, "I", 1435 "Dump immediate statistic snapshot of the scheduled callouts"); 1436