kern_timeout.c revision 278800
1/*- 2 * Copyright (c) 1982, 1986, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 4. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * From: @(#)kern_clock.c 8.5 (Berkeley) 1/21/94 35 */ 36 37#include <sys/cdefs.h> 38__FBSDID("$FreeBSD: stable/10/sys/kern/kern_timeout.c 278800 2015-02-15 13:24:32Z rrs $"); 39 40#include "opt_callout_profiling.h" 41#include "opt_kdtrace.h" 42#if defined(__arm__) 43#include "opt_timer.h" 44#endif 45 46#include <sys/param.h> 47#include <sys/systm.h> 48#include <sys/bus.h> 49#include <sys/callout.h> 50#include <sys/file.h> 51#include <sys/interrupt.h> 52#include <sys/kernel.h> 53#include <sys/ktr.h> 54#include <sys/lock.h> 55#include <sys/malloc.h> 56#include <sys/mutex.h> 57#include <sys/proc.h> 58#include <sys/sdt.h> 59#include <sys/sleepqueue.h> 60#include <sys/sysctl.h> 61#include <sys/smp.h> 62 63#ifdef SMP 64#include <machine/cpu.h> 65#endif 66 67#ifndef NO_EVENTTIMERS 68DPCPU_DECLARE(sbintime_t, hardclocktime); 69#endif 70 71SDT_PROVIDER_DEFINE(callout_execute); 72SDT_PROBE_DEFINE1(callout_execute, kernel, , callout__start, 73 "struct callout *"); 74SDT_PROBE_DEFINE1(callout_execute, kernel, , callout__end, 75 "struct callout *"); 76 77#ifdef CALLOUT_PROFILING 78static int avg_depth; 79SYSCTL_INT(_debug, OID_AUTO, to_avg_depth, CTLFLAG_RD, &avg_depth, 0, 80 "Average number of items examined per softclock call. Units = 1/1000"); 81static int avg_gcalls; 82SYSCTL_INT(_debug, OID_AUTO, to_avg_gcalls, CTLFLAG_RD, &avg_gcalls, 0, 83 "Average number of Giant callouts made per softclock call. Units = 1/1000"); 84static int avg_lockcalls; 85SYSCTL_INT(_debug, OID_AUTO, to_avg_lockcalls, CTLFLAG_RD, &avg_lockcalls, 0, 86 "Average number of lock callouts made per softclock call. Units = 1/1000"); 87static int avg_mpcalls; 88SYSCTL_INT(_debug, OID_AUTO, to_avg_mpcalls, CTLFLAG_RD, &avg_mpcalls, 0, 89 "Average number of MP callouts made per softclock call. Units = 1/1000"); 90static int avg_depth_dir; 91SYSCTL_INT(_debug, OID_AUTO, to_avg_depth_dir, CTLFLAG_RD, &avg_depth_dir, 0, 92 "Average number of direct callouts examined per callout_process call. " 93 "Units = 1/1000"); 94static int avg_lockcalls_dir; 95SYSCTL_INT(_debug, OID_AUTO, to_avg_lockcalls_dir, CTLFLAG_RD, 96 &avg_lockcalls_dir, 0, "Average number of lock direct callouts made per " 97 "callout_process call. Units = 1/1000"); 98static int avg_mpcalls_dir; 99SYSCTL_INT(_debug, OID_AUTO, to_avg_mpcalls_dir, CTLFLAG_RD, &avg_mpcalls_dir, 100 0, "Average number of MP direct callouts made per callout_process call. " 101 "Units = 1/1000"); 102#endif 103 104static int ncallout; 105SYSCTL_INT(_kern, OID_AUTO, ncallout, CTLFLAG_RDTUN, &ncallout, 0, 106 "Number of entries in callwheel and size of timeout() preallocation"); 107 108/* 109 * TODO: 110 * allocate more timeout table slots when table overflows. 111 */ 112u_int callwheelsize, callwheelmask; 113 114/* 115 * The callout cpu exec entities represent informations necessary for 116 * describing the state of callouts currently running on the CPU and the ones 117 * necessary for migrating callouts to the new callout cpu. In particular, 118 * the first entry of the array cc_exec_entity holds informations for callout 119 * running in SWI thread context, while the second one holds informations 120 * for callout running directly from hardware interrupt context. 121 * The cached informations are very important for deferring migration when 122 * the migrating callout is already running. 123 */ 124struct cc_exec { 125 struct callout *cc_curr; 126#ifdef SMP 127 void (*ce_migration_func)(void *); 128 void *ce_migration_arg; 129 int ce_migration_cpu; 130 sbintime_t ce_migration_time; 131 sbintime_t ce_migration_prec; 132#endif 133 bool cc_cancel; 134 bool cc_waiting; 135}; 136 137/* 138 * There is one struct callout_cpu per cpu, holding all relevant 139 * state for the callout processing thread on the individual CPU. 140 */ 141struct callout_cpu { 142 struct mtx_padalign cc_lock; 143 struct cc_exec cc_exec_entity[2]; 144 struct callout *cc_next; 145 struct callout *cc_callout; 146 struct callout_list *cc_callwheel; 147 struct callout_tailq cc_expireq; 148 struct callout_slist cc_callfree; 149 sbintime_t cc_firstevent; 150 sbintime_t cc_lastscan; 151 void *cc_cookie; 152 u_int cc_bucket; 153 char cc_ktr_event_name[20]; 154}; 155 156#define cc_exec_curr(cc, dir) cc->cc_exec_entity[dir].cc_curr 157#define cc_exec_next(cc) cc->cc_next 158#define cc_exec_cancel(cc, dir) cc->cc_exec_entity[dir].cc_cancel 159#define cc_exec_waiting(cc, dir) cc->cc_exec_entity[dir].cc_waiting 160#ifdef SMP 161#define cc_migration_func(cc, dir) cc->cc_exec_entity[dir].ce_migration_func 162#define cc_migration_arg(cc, dir) cc->cc_exec_entity[dir].ce_migration_arg 163#define cc_migration_cpu(cc, dir) cc->cc_exec_entity[dir].ce_migration_cpu 164#define cc_migration_time(cc, dir) cc->cc_exec_entity[dir].ce_migration_time 165#define cc_migration_prec(cc, dir) cc->cc_exec_entity[dir].ce_migration_prec 166 167struct callout_cpu cc_cpu[MAXCPU]; 168#define CPUBLOCK MAXCPU 169#define CC_CPU(cpu) (&cc_cpu[(cpu)]) 170#define CC_SELF() CC_CPU(PCPU_GET(cpuid)) 171#else 172struct callout_cpu cc_cpu; 173#define CC_CPU(cpu) &cc_cpu 174#define CC_SELF() &cc_cpu 175#endif 176#define CC_LOCK(cc) mtx_lock_spin(&(cc)->cc_lock) 177#define CC_UNLOCK(cc) mtx_unlock_spin(&(cc)->cc_lock) 178#define CC_LOCK_ASSERT(cc) mtx_assert(&(cc)->cc_lock, MA_OWNED) 179 180static int timeout_cpu; 181 182static void callout_cpu_init(struct callout_cpu *cc, int cpu); 183static void softclock_call_cc(struct callout *c, struct callout_cpu *cc, 184#ifdef CALLOUT_PROFILING 185 int *mpcalls, int *lockcalls, int *gcalls, 186#endif 187 int direct); 188 189static MALLOC_DEFINE(M_CALLOUT, "callout", "Callout datastructures"); 190 191/** 192 * Locked by cc_lock: 193 * cc_curr - If a callout is in progress, it is cc_curr. 194 * If cc_curr is non-NULL, threads waiting in 195 * callout_drain() will be woken up as soon as the 196 * relevant callout completes. 197 * cc_cancel - Changing to 1 with both callout_lock and cc_lock held 198 * guarantees that the current callout will not run. 199 * The softclock() function sets this to 0 before it 200 * drops callout_lock to acquire c_lock, and it calls 201 * the handler only if curr_cancelled is still 0 after 202 * cc_lock is successfully acquired. 203 * cc_waiting - If a thread is waiting in callout_drain(), then 204 * callout_wait is nonzero. Set only when 205 * cc_curr is non-NULL. 206 */ 207 208/* 209 * Resets the execution entity tied to a specific callout cpu. 210 */ 211static void 212cc_cce_cleanup(struct callout_cpu *cc, int direct) 213{ 214 215 cc_exec_curr(cc, direct) = NULL; 216 cc_exec_cancel(cc, direct) = false; 217 cc_exec_waiting(cc, direct) = false; 218#ifdef SMP 219 cc_migration_cpu(cc, direct) = CPUBLOCK; 220 cc_migration_time(cc, direct) = 0; 221 cc_migration_prec(cc, direct) = 0; 222 cc_migration_func(cc, direct) = NULL; 223 cc_migration_arg(cc, direct) = NULL; 224#endif 225} 226 227/* 228 * Checks if migration is requested by a specific callout cpu. 229 */ 230static int 231cc_cce_migrating(struct callout_cpu *cc, int direct) 232{ 233 234#ifdef SMP 235 return (cc_migration_cpu(cc, direct) != CPUBLOCK); 236#else 237 return (0); 238#endif 239} 240 241/* 242 * Kernel low level callwheel initialization 243 * called on cpu0 during kernel startup. 244 */ 245static void 246callout_callwheel_init(void *dummy) 247{ 248 struct callout_cpu *cc; 249 250 /* 251 * Calculate the size of the callout wheel and the preallocated 252 * timeout() structures. 253 * XXX: Clip callout to result of previous function of maxusers 254 * maximum 384. This is still huge, but acceptable. 255 */ 256 ncallout = imin(16 + maxproc + maxfiles, 18508); 257 TUNABLE_INT_FETCH("kern.ncallout", &ncallout); 258 259 /* 260 * Calculate callout wheel size, should be next power of two higher 261 * than 'ncallout'. 262 */ 263 callwheelsize = 1 << fls(ncallout); 264 callwheelmask = callwheelsize - 1; 265 266 /* 267 * Only cpu0 handles timeout(9) and receives a preallocation. 268 * 269 * XXX: Once all timeout(9) consumers are converted this can 270 * be removed. 271 */ 272 timeout_cpu = PCPU_GET(cpuid); 273 cc = CC_CPU(timeout_cpu); 274 cc->cc_callout = malloc(ncallout * sizeof(struct callout), 275 M_CALLOUT, M_WAITOK); 276 callout_cpu_init(cc, timeout_cpu); 277} 278SYSINIT(callwheel_init, SI_SUB_CPU, SI_ORDER_ANY, callout_callwheel_init, NULL); 279 280/* 281 * Initialize the per-cpu callout structures. 282 */ 283static void 284callout_cpu_init(struct callout_cpu *cc, int cpu) 285{ 286 struct callout *c; 287 int i; 288 289 mtx_init(&cc->cc_lock, "callout", NULL, MTX_SPIN | MTX_RECURSE); 290 SLIST_INIT(&cc->cc_callfree); 291 cc->cc_callwheel = malloc(sizeof(struct callout_list) * callwheelsize, 292 M_CALLOUT, M_WAITOK); 293 for (i = 0; i < callwheelsize; i++) 294 LIST_INIT(&cc->cc_callwheel[i]); 295 TAILQ_INIT(&cc->cc_expireq); 296 cc->cc_firstevent = INT64_MAX; 297 for (i = 0; i < 2; i++) 298 cc_cce_cleanup(cc, i); 299 snprintf(cc->cc_ktr_event_name, sizeof(cc->cc_ktr_event_name), 300 "callwheel cpu %d", cpu); 301 if (cc->cc_callout == NULL) /* Only cpu0 handles timeout(9) */ 302 return; 303 for (i = 0; i < ncallout; i++) { 304 c = &cc->cc_callout[i]; 305 callout_init(c, 0); 306 c->c_flags = CALLOUT_LOCAL_ALLOC; 307 SLIST_INSERT_HEAD(&cc->cc_callfree, c, c_links.sle); 308 } 309} 310 311#ifdef SMP 312/* 313 * Switches the cpu tied to a specific callout. 314 * The function expects a locked incoming callout cpu and returns with 315 * locked outcoming callout cpu. 316 */ 317static struct callout_cpu * 318callout_cpu_switch(struct callout *c, struct callout_cpu *cc, int new_cpu) 319{ 320 struct callout_cpu *new_cc; 321 322 MPASS(c != NULL && cc != NULL); 323 CC_LOCK_ASSERT(cc); 324 325 /* 326 * Avoid interrupts and preemption firing after the callout cpu 327 * is blocked in order to avoid deadlocks as the new thread 328 * may be willing to acquire the callout cpu lock. 329 */ 330 c->c_cpu = CPUBLOCK; 331 spinlock_enter(); 332 CC_UNLOCK(cc); 333 new_cc = CC_CPU(new_cpu); 334 CC_LOCK(new_cc); 335 spinlock_exit(); 336 c->c_cpu = new_cpu; 337 return (new_cc); 338} 339#endif 340 341/* 342 * Start standard softclock thread. 343 */ 344static void 345start_softclock(void *dummy) 346{ 347 struct callout_cpu *cc; 348#ifdef SMP 349 int cpu; 350#endif 351 352 cc = CC_CPU(timeout_cpu); 353 if (swi_add(&clk_intr_event, "clock", softclock, cc, SWI_CLOCK, 354 INTR_MPSAFE, &cc->cc_cookie)) 355 panic("died while creating standard software ithreads"); 356#ifdef SMP 357 CPU_FOREACH(cpu) { 358 if (cpu == timeout_cpu) 359 continue; 360 cc = CC_CPU(cpu); 361 cc->cc_callout = NULL; /* Only cpu0 handles timeout(9). */ 362 callout_cpu_init(cc, cpu); 363 if (swi_add(NULL, "clock", softclock, cc, SWI_CLOCK, 364 INTR_MPSAFE, &cc->cc_cookie)) 365 panic("died while creating standard software ithreads"); 366 } 367#endif 368} 369SYSINIT(start_softclock, SI_SUB_SOFTINTR, SI_ORDER_FIRST, start_softclock, NULL); 370 371#define CC_HASH_SHIFT 8 372 373static inline u_int 374callout_hash(sbintime_t sbt) 375{ 376 377 return (sbt >> (32 - CC_HASH_SHIFT)); 378} 379 380static inline u_int 381callout_get_bucket(sbintime_t sbt) 382{ 383 384 return (callout_hash(sbt) & callwheelmask); 385} 386 387void 388callout_process(sbintime_t now) 389{ 390 struct callout *tmp, *tmpn; 391 struct callout_cpu *cc; 392 struct callout_list *sc; 393 sbintime_t first, last, max, tmp_max; 394 uint32_t lookahead; 395 u_int firstb, lastb, nowb; 396#ifdef CALLOUT_PROFILING 397 int depth_dir = 0, mpcalls_dir = 0, lockcalls_dir = 0; 398#endif 399 400 cc = CC_SELF(); 401 mtx_lock_spin_flags(&cc->cc_lock, MTX_QUIET); 402 403 /* Compute the buckets of the last scan and present times. */ 404 firstb = callout_hash(cc->cc_lastscan); 405 cc->cc_lastscan = now; 406 nowb = callout_hash(now); 407 408 /* Compute the last bucket and minimum time of the bucket after it. */ 409 if (nowb == firstb) 410 lookahead = (SBT_1S / 16); 411 else if (nowb - firstb == 1) 412 lookahead = (SBT_1S / 8); 413 else 414 lookahead = (SBT_1S / 2); 415 first = last = now; 416 first += (lookahead / 2); 417 last += lookahead; 418 last &= (0xffffffffffffffffLLU << (32 - CC_HASH_SHIFT)); 419 lastb = callout_hash(last) - 1; 420 max = last; 421 422 /* 423 * Check if we wrapped around the entire wheel from the last scan. 424 * In case, we need to scan entirely the wheel for pending callouts. 425 */ 426 if (lastb - firstb >= callwheelsize) { 427 lastb = firstb + callwheelsize - 1; 428 if (nowb - firstb >= callwheelsize) 429 nowb = lastb; 430 } 431 432 /* Iterate callwheel from firstb to nowb and then up to lastb. */ 433 do { 434 sc = &cc->cc_callwheel[firstb & callwheelmask]; 435 tmp = LIST_FIRST(sc); 436 while (tmp != NULL) { 437 /* Run the callout if present time within allowed. */ 438 if (tmp->c_time <= now) { 439 /* 440 * Consumer told us the callout may be run 441 * directly from hardware interrupt context. 442 */ 443 if (tmp->c_flags & CALLOUT_DIRECT) { 444#ifdef CALLOUT_PROFILING 445 ++depth_dir; 446#endif 447 cc_exec_next(cc) = 448 LIST_NEXT(tmp, c_links.le); 449 cc->cc_bucket = firstb & callwheelmask; 450 LIST_REMOVE(tmp, c_links.le); 451 softclock_call_cc(tmp, cc, 452#ifdef CALLOUT_PROFILING 453 &mpcalls_dir, &lockcalls_dir, NULL, 454#endif 455 1); 456 tmp = cc_exec_next(cc); 457 cc_exec_next(cc) = NULL; 458 } else { 459 tmpn = LIST_NEXT(tmp, c_links.le); 460 LIST_REMOVE(tmp, c_links.le); 461 TAILQ_INSERT_TAIL(&cc->cc_expireq, 462 tmp, c_links.tqe); 463 tmp->c_flags |= CALLOUT_PROCESSED; 464 tmp = tmpn; 465 } 466 continue; 467 } 468 /* Skip events from distant future. */ 469 if (tmp->c_time >= max) 470 goto next; 471 /* 472 * Event minimal time is bigger than present maximal 473 * time, so it cannot be aggregated. 474 */ 475 if (tmp->c_time > last) { 476 lastb = nowb; 477 goto next; 478 } 479 /* Update first and last time, respecting this event. */ 480 if (tmp->c_time < first) 481 first = tmp->c_time; 482 tmp_max = tmp->c_time + tmp->c_precision; 483 if (tmp_max < last) 484 last = tmp_max; 485next: 486 tmp = LIST_NEXT(tmp, c_links.le); 487 } 488 /* Proceed with the next bucket. */ 489 firstb++; 490 /* 491 * Stop if we looked after present time and found 492 * some event we can't execute at now. 493 * Stop if we looked far enough into the future. 494 */ 495 } while (((int)(firstb - lastb)) <= 0); 496 cc->cc_firstevent = last; 497#ifndef NO_EVENTTIMERS 498 cpu_new_callout(curcpu, last, first); 499#endif 500#ifdef CALLOUT_PROFILING 501 avg_depth_dir += (depth_dir * 1000 - avg_depth_dir) >> 8; 502 avg_mpcalls_dir += (mpcalls_dir * 1000 - avg_mpcalls_dir) >> 8; 503 avg_lockcalls_dir += (lockcalls_dir * 1000 - avg_lockcalls_dir) >> 8; 504#endif 505 mtx_unlock_spin_flags(&cc->cc_lock, MTX_QUIET); 506 /* 507 * swi_sched acquires the thread lock, so we don't want to call it 508 * with cc_lock held; incorrect locking order. 509 */ 510 if (!TAILQ_EMPTY(&cc->cc_expireq)) 511 swi_sched(cc->cc_cookie, 0); 512} 513 514static struct callout_cpu * 515callout_lock(struct callout *c) 516{ 517 struct callout_cpu *cc; 518 int cpu; 519 520 for (;;) { 521 cpu = c->c_cpu; 522#ifdef SMP 523 if (cpu == CPUBLOCK) { 524 while (c->c_cpu == CPUBLOCK) 525 cpu_spinwait(); 526 continue; 527 } 528#endif 529 cc = CC_CPU(cpu); 530 CC_LOCK(cc); 531 if (cpu == c->c_cpu) 532 break; 533 CC_UNLOCK(cc); 534 } 535 return (cc); 536} 537 538static void 539callout_cc_add(struct callout *c, struct callout_cpu *cc, 540 sbintime_t sbt, sbintime_t precision, void (*func)(void *), 541 void *arg, int cpu, int flags) 542{ 543 int bucket; 544 545 CC_LOCK_ASSERT(cc); 546 if (sbt < cc->cc_lastscan) 547 sbt = cc->cc_lastscan; 548 c->c_arg = arg; 549 c->c_flags |= (CALLOUT_ACTIVE | CALLOUT_PENDING); 550 c->c_flags &= ~CALLOUT_PROCESSED; 551 c->c_func = func; 552 c->c_time = sbt; 553 c->c_precision = precision; 554 bucket = callout_get_bucket(c->c_time); 555 CTR3(KTR_CALLOUT, "precision set for %p: %d.%08x", 556 c, (int)(c->c_precision >> 32), 557 (u_int)(c->c_precision & 0xffffffff)); 558 LIST_INSERT_HEAD(&cc->cc_callwheel[bucket], c, c_links.le); 559 if (cc->cc_bucket == bucket) 560 cc_exec_next(cc) = c; 561#ifndef NO_EVENTTIMERS 562 /* 563 * Inform the eventtimers(4) subsystem there's a new callout 564 * that has been inserted, but only if really required. 565 */ 566 if (INT64_MAX - c->c_time < c->c_precision) 567 c->c_precision = INT64_MAX - c->c_time; 568 sbt = c->c_time + c->c_precision; 569 if (sbt < cc->cc_firstevent) { 570 cc->cc_firstevent = sbt; 571 cpu_new_callout(cpu, sbt, c->c_time); 572 } 573#endif 574} 575 576static void 577callout_cc_del(struct callout *c, struct callout_cpu *cc) 578{ 579 580 if ((c->c_flags & CALLOUT_LOCAL_ALLOC) == 0) 581 return; 582 c->c_func = NULL; 583 SLIST_INSERT_HEAD(&cc->cc_callfree, c, c_links.sle); 584} 585 586static void 587softclock_call_cc(struct callout *c, struct callout_cpu *cc, 588#ifdef CALLOUT_PROFILING 589 int *mpcalls, int *lockcalls, int *gcalls, 590#endif 591 int direct) 592{ 593 struct rm_priotracker tracker; 594 void (*c_func)(void *); 595 void *c_arg; 596 struct lock_class *class; 597 struct lock_object *c_lock; 598 uintptr_t lock_status; 599 int c_flags; 600#ifdef SMP 601 struct callout_cpu *new_cc; 602 void (*new_func)(void *); 603 void *new_arg; 604 int flags, new_cpu; 605 sbintime_t new_prec, new_time; 606#endif 607#if defined(DIAGNOSTIC) || defined(CALLOUT_PROFILING) 608 sbintime_t sbt1, sbt2; 609 struct timespec ts2; 610 static sbintime_t maxdt = 2 * SBT_1MS; /* 2 msec */ 611 static timeout_t *lastfunc; 612#endif 613 614 KASSERT((c->c_flags & (CALLOUT_PENDING | CALLOUT_ACTIVE)) == 615 (CALLOUT_PENDING | CALLOUT_ACTIVE), 616 ("softclock_call_cc: pend|act %p %x", c, c->c_flags)); 617 class = (c->c_lock != NULL) ? LOCK_CLASS(c->c_lock) : NULL; 618 lock_status = 0; 619 if (c->c_flags & CALLOUT_SHAREDLOCK) { 620 if (class == &lock_class_rm) 621 lock_status = (uintptr_t)&tracker; 622 else 623 lock_status = 1; 624 } 625 c_lock = c->c_lock; 626 c_func = c->c_func; 627 c_arg = c->c_arg; 628 c_flags = c->c_flags; 629 if (c->c_flags & CALLOUT_LOCAL_ALLOC) 630 c->c_flags = CALLOUT_LOCAL_ALLOC; 631 else 632 c->c_flags &= ~CALLOUT_PENDING; 633 634 cc_exec_curr(cc, direct) = c; 635 cc_exec_cancel(cc, direct) = false; 636 CC_UNLOCK(cc); 637 if (c_lock != NULL) { 638 class->lc_lock(c_lock, lock_status); 639 /* 640 * The callout may have been cancelled 641 * while we switched locks. 642 */ 643 if (cc_exec_cancel(cc, direct)) { 644 class->lc_unlock(c_lock); 645 goto skip; 646 } 647 /* The callout cannot be stopped now. */ 648 cc_exec_cancel(cc, direct) = true; 649 if (c_lock == &Giant.lock_object) { 650#ifdef CALLOUT_PROFILING 651 (*gcalls)++; 652#endif 653 CTR3(KTR_CALLOUT, "callout giant %p func %p arg %p", 654 c, c_func, c_arg); 655 } else { 656#ifdef CALLOUT_PROFILING 657 (*lockcalls)++; 658#endif 659 CTR3(KTR_CALLOUT, "callout lock %p func %p arg %p", 660 c, c_func, c_arg); 661 } 662 } else { 663#ifdef CALLOUT_PROFILING 664 (*mpcalls)++; 665#endif 666 CTR3(KTR_CALLOUT, "callout %p func %p arg %p", 667 c, c_func, c_arg); 668 } 669 KTR_STATE3(KTR_SCHED, "callout", cc->cc_ktr_event_name, "running", 670 "func:%p", c_func, "arg:%p", c_arg, "direct:%d", direct); 671#if defined(DIAGNOSTIC) || defined(CALLOUT_PROFILING) 672 sbt1 = sbinuptime(); 673#endif 674 THREAD_NO_SLEEPING(); 675 SDT_PROBE(callout_execute, kernel, , callout__start, c, 0, 0, 0, 0); 676 c_func(c_arg); 677 SDT_PROBE(callout_execute, kernel, , callout__end, c, 0, 0, 0, 0); 678 THREAD_SLEEPING_OK(); 679#if defined(DIAGNOSTIC) || defined(CALLOUT_PROFILING) 680 sbt2 = sbinuptime(); 681 sbt2 -= sbt1; 682 if (sbt2 > maxdt) { 683 if (lastfunc != c_func || sbt2 > maxdt * 2) { 684 ts2 = sbttots(sbt2); 685 printf( 686 "Expensive timeout(9) function: %p(%p) %jd.%09ld s\n", 687 c_func, c_arg, (intmax_t)ts2.tv_sec, ts2.tv_nsec); 688 } 689 maxdt = sbt2; 690 lastfunc = c_func; 691 } 692#endif 693 KTR_STATE0(KTR_SCHED, "callout", cc->cc_ktr_event_name, "idle"); 694 CTR1(KTR_CALLOUT, "callout %p finished", c); 695 if ((c_flags & CALLOUT_RETURNUNLOCKED) == 0) 696 class->lc_unlock(c_lock); 697skip: 698 CC_LOCK(cc); 699 KASSERT(cc_exec_curr(cc, direct) == c, ("mishandled cc_curr")); 700 cc_exec_curr(cc, direct) = NULL; 701 if (cc_exec_waiting(cc, direct)) { 702 /* 703 * There is someone waiting for the 704 * callout to complete. 705 * If the callout was scheduled for 706 * migration just cancel it. 707 */ 708 if (cc_cce_migrating(cc, direct)) { 709 cc_cce_cleanup(cc, direct); 710 711 /* 712 * It should be assert here that the callout is not 713 * destroyed but that is not easy. 714 */ 715 c->c_flags &= ~CALLOUT_DFRMIGRATION; 716 } 717 cc_exec_waiting(cc, direct) = false; 718 CC_UNLOCK(cc); 719 wakeup(&cc_exec_waiting(cc, direct)); 720 CC_LOCK(cc); 721 } else if (cc_cce_migrating(cc, direct)) { 722 KASSERT((c_flags & CALLOUT_LOCAL_ALLOC) == 0, 723 ("Migrating legacy callout %p", c)); 724#ifdef SMP 725 /* 726 * If the callout was scheduled for 727 * migration just perform it now. 728 */ 729 new_cpu = cc_migration_cpu(cc, direct); 730 new_time = cc_migration_time(cc, direct); 731 new_prec = cc_migration_prec(cc, direct); 732 new_func = cc_migration_func(cc, direct); 733 new_arg = cc_migration_arg(cc, direct); 734 cc_cce_cleanup(cc, direct); 735 736 /* 737 * It should be assert here that the callout is not destroyed 738 * but that is not easy. 739 * 740 * As first thing, handle deferred callout stops. 741 */ 742 if (!callout_migrating(c)) { 743 CTR3(KTR_CALLOUT, 744 "deferred cancelled %p func %p arg %p", 745 c, new_func, new_arg); 746 callout_cc_del(c, cc); 747 return; 748 } 749 c->c_flags &= ~CALLOUT_DFRMIGRATION; 750 751 new_cc = callout_cpu_switch(c, cc, new_cpu); 752 flags = (direct) ? C_DIRECT_EXEC : 0; 753 callout_cc_add(c, new_cc, new_time, new_prec, new_func, 754 new_arg, new_cpu, flags); 755 CC_UNLOCK(new_cc); 756 CC_LOCK(cc); 757#else 758 panic("migration should not happen"); 759#endif 760 } 761 /* 762 * If the current callout is locally allocated (from 763 * timeout(9)) then put it on the freelist. 764 * 765 * Note: we need to check the cached copy of c_flags because 766 * if it was not local, then it's not safe to deref the 767 * callout pointer. 768 */ 769 KASSERT((c_flags & CALLOUT_LOCAL_ALLOC) == 0 || 770 c->c_flags == CALLOUT_LOCAL_ALLOC, 771 ("corrupted callout")); 772 if (c_flags & CALLOUT_LOCAL_ALLOC) 773 callout_cc_del(c, cc); 774} 775 776/* 777 * The callout mechanism is based on the work of Adam M. Costello and 778 * George Varghese, published in a technical report entitled "Redesigning 779 * the BSD Callout and Timer Facilities" and modified slightly for inclusion 780 * in FreeBSD by Justin T. Gibbs. The original work on the data structures 781 * used in this implementation was published by G. Varghese and T. Lauck in 782 * the paper "Hashed and Hierarchical Timing Wheels: Data Structures for 783 * the Efficient Implementation of a Timer Facility" in the Proceedings of 784 * the 11th ACM Annual Symposium on Operating Systems Principles, 785 * Austin, Texas Nov 1987. 786 */ 787 788/* 789 * Software (low priority) clock interrupt. 790 * Run periodic events from timeout queue. 791 */ 792void 793softclock(void *arg) 794{ 795 struct callout_cpu *cc; 796 struct callout *c; 797#ifdef CALLOUT_PROFILING 798 int depth = 0, gcalls = 0, lockcalls = 0, mpcalls = 0; 799#endif 800 801 cc = (struct callout_cpu *)arg; 802 CC_LOCK(cc); 803 while ((c = TAILQ_FIRST(&cc->cc_expireq)) != NULL) { 804 TAILQ_REMOVE(&cc->cc_expireq, c, c_links.tqe); 805 softclock_call_cc(c, cc, 806#ifdef CALLOUT_PROFILING 807 &mpcalls, &lockcalls, &gcalls, 808#endif 809 0); 810#ifdef CALLOUT_PROFILING 811 ++depth; 812#endif 813 } 814#ifdef CALLOUT_PROFILING 815 avg_depth += (depth * 1000 - avg_depth) >> 8; 816 avg_mpcalls += (mpcalls * 1000 - avg_mpcalls) >> 8; 817 avg_lockcalls += (lockcalls * 1000 - avg_lockcalls) >> 8; 818 avg_gcalls += (gcalls * 1000 - avg_gcalls) >> 8; 819#endif 820 CC_UNLOCK(cc); 821} 822 823/* 824 * timeout -- 825 * Execute a function after a specified length of time. 826 * 827 * untimeout -- 828 * Cancel previous timeout function call. 829 * 830 * callout_handle_init -- 831 * Initialize a handle so that using it with untimeout is benign. 832 * 833 * See AT&T BCI Driver Reference Manual for specification. This 834 * implementation differs from that one in that although an 835 * identification value is returned from timeout, the original 836 * arguments to timeout as well as the identifier are used to 837 * identify entries for untimeout. 838 */ 839struct callout_handle 840timeout(ftn, arg, to_ticks) 841 timeout_t *ftn; 842 void *arg; 843 int to_ticks; 844{ 845 struct callout_cpu *cc; 846 struct callout *new; 847 struct callout_handle handle; 848 849 cc = CC_CPU(timeout_cpu); 850 CC_LOCK(cc); 851 /* Fill in the next free callout structure. */ 852 new = SLIST_FIRST(&cc->cc_callfree); 853 if (new == NULL) 854 /* XXX Attempt to malloc first */ 855 panic("timeout table full"); 856 SLIST_REMOVE_HEAD(&cc->cc_callfree, c_links.sle); 857 callout_reset(new, to_ticks, ftn, arg); 858 handle.callout = new; 859 CC_UNLOCK(cc); 860 861 return (handle); 862} 863 864void 865untimeout(ftn, arg, handle) 866 timeout_t *ftn; 867 void *arg; 868 struct callout_handle handle; 869{ 870 struct callout_cpu *cc; 871 872 /* 873 * Check for a handle that was initialized 874 * by callout_handle_init, but never used 875 * for a real timeout. 876 */ 877 if (handle.callout == NULL) 878 return; 879 880 cc = callout_lock(handle.callout); 881 if (handle.callout->c_func == ftn && handle.callout->c_arg == arg) 882 callout_stop(handle.callout); 883 CC_UNLOCK(cc); 884} 885 886void 887callout_handle_init(struct callout_handle *handle) 888{ 889 handle->callout = NULL; 890} 891 892/* 893 * New interface; clients allocate their own callout structures. 894 * 895 * callout_reset() - establish or change a timeout 896 * callout_stop() - disestablish a timeout 897 * callout_init() - initialize a callout structure so that it can 898 * safely be passed to callout_reset() and callout_stop() 899 * 900 * <sys/callout.h> defines three convenience macros: 901 * 902 * callout_active() - returns truth if callout has not been stopped, 903 * drained, or deactivated since the last time the callout was 904 * reset. 905 * callout_pending() - returns truth if callout is still waiting for timeout 906 * callout_deactivate() - marks the callout as having been serviced 907 */ 908int 909callout_reset_sbt_on(struct callout *c, sbintime_t sbt, sbintime_t precision, 910 void (*ftn)(void *), void *arg, int cpu, int flags) 911{ 912 sbintime_t to_sbt, pr; 913 struct callout_cpu *cc; 914 int cancelled, direct; 915 916 cancelled = 0; 917 if (flags & C_ABSOLUTE) { 918 to_sbt = sbt; 919 } else { 920 if ((flags & C_HARDCLOCK) && (sbt < tick_sbt)) 921 sbt = tick_sbt; 922 if ((flags & C_HARDCLOCK) || 923#ifdef NO_EVENTTIMERS 924 sbt >= sbt_timethreshold) { 925 to_sbt = getsbinuptime(); 926 927 /* Add safety belt for the case of hz > 1000. */ 928 to_sbt += tc_tick_sbt - tick_sbt; 929#else 930 sbt >= sbt_tickthreshold) { 931 /* 932 * Obtain the time of the last hardclock() call on 933 * this CPU directly from the kern_clocksource.c. 934 * This value is per-CPU, but it is equal for all 935 * active ones. 936 */ 937#ifdef __LP64__ 938 to_sbt = DPCPU_GET(hardclocktime); 939#else 940 spinlock_enter(); 941 to_sbt = DPCPU_GET(hardclocktime); 942 spinlock_exit(); 943#endif 944#endif 945 if ((flags & C_HARDCLOCK) == 0) 946 to_sbt += tick_sbt; 947 } else 948 to_sbt = sbinuptime(); 949 if (INT64_MAX - to_sbt < sbt) 950 to_sbt = INT64_MAX; 951 else 952 to_sbt += sbt; 953 pr = ((C_PRELGET(flags) < 0) ? sbt >> tc_precexp : 954 sbt >> C_PRELGET(flags)); 955 if (pr > precision) 956 precision = pr; 957 } 958 /* 959 * Don't allow migration of pre-allocated callouts lest they 960 * become unbalanced. 961 */ 962 if (c->c_flags & CALLOUT_LOCAL_ALLOC) 963 cpu = c->c_cpu; 964 /* 965 * This flag used to be added by callout_cc_add, but the 966 * first time you call this we could end up with the 967 * wrong direct flag if we don't do it before we add. 968 */ 969 if (flags & C_DIRECT_EXEC) { 970 c->c_flags |= CALLOUT_DIRECT; 971 } 972 direct = (c->c_flags & CALLOUT_DIRECT) != 0; 973 KASSERT(!direct || c->c_lock == NULL, 974 ("%s: direct callout %p has lock", __func__, c)); 975 cc = callout_lock(c); 976 if (cc_exec_curr(cc, direct) == c) { 977 /* 978 * We're being asked to reschedule a callout which is 979 * currently in progress. If there is a lock then we 980 * can cancel the callout if it has not really started. 981 */ 982 if (c->c_lock != NULL && cc_exec_cancel(cc, direct)) 983 cancelled = cc_exec_cancel(cc, direct) = true; 984 if (cc_exec_waiting(cc, direct)) { 985 /* 986 * Someone has called callout_drain to kill this 987 * callout. Don't reschedule. 988 */ 989 CTR4(KTR_CALLOUT, "%s %p func %p arg %p", 990 cancelled ? "cancelled" : "failed to cancel", 991 c, c->c_func, c->c_arg); 992 CC_UNLOCK(cc); 993 return (cancelled); 994 } 995#ifdef SMP 996 if (callout_migrating(c)) { 997 /* 998 * This only occurs when a second callout_reset_sbt_on 999 * is made after a previous one moved it into 1000 * deferred migration (below). Note we do *not* change 1001 * the prev_cpu even though the previous target may 1002 * be different. 1003 */ 1004 cc_migration_cpu(cc, direct) = cpu; 1005 cc_migration_time(cc, direct) = to_sbt; 1006 cc_migration_prec(cc, direct) = precision; 1007 cc_migration_func(cc, direct) = ftn; 1008 cc_migration_arg(cc, direct) = arg; 1009 cancelled = 1; 1010 CC_UNLOCK(cc); 1011 return (cancelled); 1012 } 1013#endif 1014 } 1015 if (c->c_flags & CALLOUT_PENDING) { 1016 if ((c->c_flags & CALLOUT_PROCESSED) == 0) { 1017 if (cc_exec_next(cc) == c) 1018 cc_exec_next(cc) = LIST_NEXT(c, c_links.le); 1019 LIST_REMOVE(c, c_links.le); 1020 } else 1021 TAILQ_REMOVE(&cc->cc_expireq, c, c_links.tqe); 1022 cancelled = 1; 1023 c->c_flags &= ~(CALLOUT_ACTIVE | CALLOUT_PENDING); 1024 } 1025 1026#ifdef SMP 1027 /* 1028 * If the callout must migrate try to perform it immediately. 1029 * If the callout is currently running, just defer the migration 1030 * to a more appropriate moment. 1031 */ 1032 if (c->c_cpu != cpu) { 1033 if (cc_exec_curr(cc, direct) == c) { 1034 /* 1035 * Pending will have been removed since we are 1036 * actually executing the callout on another 1037 * CPU. That callout should be waiting on the 1038 * lock the caller holds. If we set both 1039 * active/and/pending after we return and the 1040 * lock on the executing callout proceeds, it 1041 * will then see pending is true and return. 1042 * At the return from the actual callout execution 1043 * the migration will occur in softclock_call_cc 1044 * and this new callout will be placed on the 1045 * new CPU via a call to callout_cpu_switch() which 1046 * will get the lock on the right CPU followed 1047 * by a call callout_cc_add() which will add it there. 1048 * (see above in softclock_call_cc()). 1049 */ 1050 cc_migration_cpu(cc, direct) = cpu; 1051 cc_migration_time(cc, direct) = to_sbt; 1052 cc_migration_prec(cc, direct) = precision; 1053 cc_migration_func(cc, direct) = ftn; 1054 cc_migration_arg(cc, direct) = arg; 1055 c->c_flags |= (CALLOUT_DFRMIGRATION | CALLOUT_ACTIVE | CALLOUT_PENDING); 1056 CTR6(KTR_CALLOUT, 1057 "migration of %p func %p arg %p in %d.%08x to %u deferred", 1058 c, c->c_func, c->c_arg, (int)(to_sbt >> 32), 1059 (u_int)(to_sbt & 0xffffffff), cpu); 1060 CC_UNLOCK(cc); 1061 return (cancelled); 1062 } 1063 cc = callout_cpu_switch(c, cc, cpu); 1064 } 1065#endif 1066 1067 callout_cc_add(c, cc, to_sbt, precision, ftn, arg, cpu, flags); 1068 CTR6(KTR_CALLOUT, "%sscheduled %p func %p arg %p in %d.%08x", 1069 cancelled ? "re" : "", c, c->c_func, c->c_arg, (int)(to_sbt >> 32), 1070 (u_int)(to_sbt & 0xffffffff)); 1071 CC_UNLOCK(cc); 1072 1073 return (cancelled); 1074} 1075 1076/* 1077 * Common idioms that can be optimized in the future. 1078 */ 1079int 1080callout_schedule_on(struct callout *c, int to_ticks, int cpu) 1081{ 1082 return callout_reset_on(c, to_ticks, c->c_func, c->c_arg, cpu); 1083} 1084 1085int 1086callout_schedule(struct callout *c, int to_ticks) 1087{ 1088 return callout_reset_on(c, to_ticks, c->c_func, c->c_arg, c->c_cpu); 1089} 1090 1091int 1092_callout_stop_safe(c, safe) 1093 struct callout *c; 1094 int safe; 1095{ 1096 struct callout_cpu *cc, *old_cc; 1097 struct lock_class *class; 1098 int direct, sq_locked, use_lock; 1099 int not_on_a_list; 1100 1101 /* 1102 * Some old subsystems don't hold Giant while running a callout_stop(), 1103 * so just discard this check for the moment. 1104 */ 1105 if (!safe && c->c_lock != NULL) { 1106 if (c->c_lock == &Giant.lock_object) 1107 use_lock = mtx_owned(&Giant); 1108 else { 1109 use_lock = 1; 1110 class = LOCK_CLASS(c->c_lock); 1111 class->lc_assert(c->c_lock, LA_XLOCKED); 1112 } 1113 } else 1114 use_lock = 0; 1115 direct = (c->c_flags & CALLOUT_DIRECT) != 0; 1116 sq_locked = 0; 1117 old_cc = NULL; 1118again: 1119 cc = callout_lock(c); 1120 1121 if ((c->c_flags & (CALLOUT_DFRMIGRATION | CALLOUT_ACTIVE | CALLOUT_PENDING)) == 1122 (CALLOUT_DFRMIGRATION | CALLOUT_ACTIVE | CALLOUT_PENDING)) { 1123 /* 1124 * Special case where this slipped in while we 1125 * were migrating *as* the callout is about to 1126 * execute. The caller probably holds the lock 1127 * the callout wants. 1128 * 1129 * Get rid of the migration first. Then set 1130 * the flag that tells this code *not* to 1131 * try to remove it from any lists (its not 1132 * on one yet). When the callout wheel runs, 1133 * it will ignore this callout. 1134 */ 1135 c->c_flags &= ~(CALLOUT_PENDING|CALLOUT_ACTIVE); 1136 not_on_a_list = 1; 1137 } else { 1138 not_on_a_list = 0; 1139 } 1140 1141 /* 1142 * If the callout was migrating while the callout cpu lock was 1143 * dropped, just drop the sleepqueue lock and check the states 1144 * again. 1145 */ 1146 if (sq_locked != 0 && cc != old_cc) { 1147#ifdef SMP 1148 CC_UNLOCK(cc); 1149 sleepq_release(&cc_exec_waiting(old_cc, direct)); 1150 sq_locked = 0; 1151 old_cc = NULL; 1152 goto again; 1153#else 1154 panic("migration should not happen"); 1155#endif 1156 } 1157 1158 /* 1159 * If the callout isn't pending, it's not on the queue, so 1160 * don't attempt to remove it from the queue. We can try to 1161 * stop it by other means however. 1162 */ 1163 if (!(c->c_flags & CALLOUT_PENDING)) { 1164 c->c_flags &= ~CALLOUT_ACTIVE; 1165 1166 /* 1167 * If it wasn't on the queue and it isn't the current 1168 * callout, then we can't stop it, so just bail. 1169 */ 1170 if (cc_exec_curr(cc, direct) != c) { 1171 CTR3(KTR_CALLOUT, "failed to stop %p func %p arg %p", 1172 c, c->c_func, c->c_arg); 1173 CC_UNLOCK(cc); 1174 if (sq_locked) 1175 sleepq_release(&cc_exec_waiting(cc, direct)); 1176 return (0); 1177 } 1178 1179 if (safe) { 1180 /* 1181 * The current callout is running (or just 1182 * about to run) and blocking is allowed, so 1183 * just wait for the current invocation to 1184 * finish. 1185 */ 1186 while (cc_exec_curr(cc, direct) == c) { 1187 /* 1188 * Use direct calls to sleepqueue interface 1189 * instead of cv/msleep in order to avoid 1190 * a LOR between cc_lock and sleepqueue 1191 * chain spinlocks. This piece of code 1192 * emulates a msleep_spin() call actually. 1193 * 1194 * If we already have the sleepqueue chain 1195 * locked, then we can safely block. If we 1196 * don't already have it locked, however, 1197 * we have to drop the cc_lock to lock 1198 * it. This opens several races, so we 1199 * restart at the beginning once we have 1200 * both locks. If nothing has changed, then 1201 * we will end up back here with sq_locked 1202 * set. 1203 */ 1204 if (!sq_locked) { 1205 CC_UNLOCK(cc); 1206 sleepq_lock( 1207 &cc_exec_waiting(cc, direct)); 1208 sq_locked = 1; 1209 old_cc = cc; 1210 goto again; 1211 } 1212 1213 /* 1214 * Migration could be cancelled here, but 1215 * as long as it is still not sure when it 1216 * will be packed up, just let softclock() 1217 * take care of it. 1218 */ 1219 cc_exec_waiting(cc, direct) = true; 1220 DROP_GIANT(); 1221 CC_UNLOCK(cc); 1222 sleepq_add( 1223 &cc_exec_waiting(cc, direct), 1224 &cc->cc_lock.lock_object, "codrain", 1225 SLEEPQ_SLEEP, 0); 1226 sleepq_wait( 1227 &cc_exec_waiting(cc, direct), 1228 0); 1229 sq_locked = 0; 1230 old_cc = NULL; 1231 1232 /* Reacquire locks previously released. */ 1233 PICKUP_GIANT(); 1234 CC_LOCK(cc); 1235 } 1236 } else if (use_lock && 1237 !cc_exec_cancel(cc, direct)) { 1238 1239 /* 1240 * The current callout is waiting for its 1241 * lock which we hold. Cancel the callout 1242 * and return. After our caller drops the 1243 * lock, the callout will be skipped in 1244 * softclock(). 1245 */ 1246 cc_exec_cancel(cc, direct) = true; 1247 CTR3(KTR_CALLOUT, "cancelled %p func %p arg %p", 1248 c, c->c_func, c->c_arg); 1249 KASSERT(!cc_cce_migrating(cc, direct), 1250 ("callout wrongly scheduled for migration")); 1251 CC_UNLOCK(cc); 1252 KASSERT(!sq_locked, ("sleepqueue chain locked")); 1253 return (1); 1254 } else if (callout_migrating(c)) { 1255 /* 1256 * The callout is currently being serviced 1257 * and the "next" callout is scheduled at 1258 * its completion with a migration. We remove 1259 * the migration flag so it *won't* get rescheduled, 1260 * but we can't stop the one thats running so 1261 * we return 0. 1262 */ 1263 c->c_flags &= ~CALLOUT_DFRMIGRATION; 1264#ifdef SMP 1265 /* 1266 * We can't call cc_cce_cleanup here since 1267 * if we do it will remove .ce_curr and 1268 * its still running. This will prevent a 1269 * reschedule of the callout when the 1270 * execution completes. 1271 */ 1272 cc_migration_cpu(cc, direct) = CPUBLOCK; 1273 cc_migration_time(cc, direct) = 0; 1274 cc_migration_prec(cc, direct) = 0; 1275 cc_migration_func(cc, direct) = NULL; 1276 cc_migration_arg(cc, direct) = NULL; 1277#endif 1278 CTR3(KTR_CALLOUT, "postponing stop %p func %p arg %p", 1279 c, c->c_func, c->c_arg); 1280 CC_UNLOCK(cc); 1281 return (0); 1282 } 1283 CTR3(KTR_CALLOUT, "failed to stop %p func %p arg %p", 1284 c, c->c_func, c->c_arg); 1285 CC_UNLOCK(cc); 1286 KASSERT(!sq_locked, ("sleepqueue chain still locked")); 1287 return (0); 1288 } 1289 if (sq_locked) 1290 sleepq_release(&cc_exec_waiting(cc, direct)); 1291 1292 c->c_flags &= ~(CALLOUT_ACTIVE | CALLOUT_PENDING); 1293 1294 CTR3(KTR_CALLOUT, "cancelled %p func %p arg %p", 1295 c, c->c_func, c->c_arg); 1296 if (not_on_a_list == 0) { 1297 if ((c->c_flags & CALLOUT_PROCESSED) == 0) { 1298 if (cc_exec_next(cc) == c) 1299 cc_exec_next(cc) = LIST_NEXT(c, c_links.le); 1300 LIST_REMOVE(c, c_links.le); 1301 } else 1302 TAILQ_REMOVE(&cc->cc_expireq, c, c_links.tqe); 1303 } 1304 callout_cc_del(c, cc); 1305 CC_UNLOCK(cc); 1306 return (1); 1307} 1308 1309void 1310callout_init(c, mpsafe) 1311 struct callout *c; 1312 int mpsafe; 1313{ 1314 bzero(c, sizeof *c); 1315 if (mpsafe) { 1316 c->c_lock = NULL; 1317 c->c_flags = CALLOUT_RETURNUNLOCKED; 1318 } else { 1319 c->c_lock = &Giant.lock_object; 1320 c->c_flags = 0; 1321 } 1322 c->c_cpu = timeout_cpu; 1323} 1324 1325void 1326_callout_init_lock(c, lock, flags) 1327 struct callout *c; 1328 struct lock_object *lock; 1329 int flags; 1330{ 1331 bzero(c, sizeof *c); 1332 c->c_lock = lock; 1333 KASSERT((flags & ~(CALLOUT_RETURNUNLOCKED | CALLOUT_SHAREDLOCK)) == 0, 1334 ("callout_init_lock: bad flags %d", flags)); 1335 KASSERT(lock != NULL || (flags & CALLOUT_RETURNUNLOCKED) == 0, 1336 ("callout_init_lock: CALLOUT_RETURNUNLOCKED with no lock")); 1337 KASSERT(lock == NULL || !(LOCK_CLASS(lock)->lc_flags & 1338 (LC_SPINLOCK | LC_SLEEPABLE)), ("%s: invalid lock class", 1339 __func__)); 1340 c->c_flags = flags & (CALLOUT_RETURNUNLOCKED | CALLOUT_SHAREDLOCK); 1341 c->c_cpu = timeout_cpu; 1342} 1343 1344#ifdef APM_FIXUP_CALLTODO 1345/* 1346 * Adjust the kernel calltodo timeout list. This routine is used after 1347 * an APM resume to recalculate the calltodo timer list values with the 1348 * number of hz's we have been sleeping. The next hardclock() will detect 1349 * that there are fired timers and run softclock() to execute them. 1350 * 1351 * Please note, I have not done an exhaustive analysis of what code this 1352 * might break. I am motivated to have my select()'s and alarm()'s that 1353 * have expired during suspend firing upon resume so that the applications 1354 * which set the timer can do the maintanence the timer was for as close 1355 * as possible to the originally intended time. Testing this code for a 1356 * week showed that resuming from a suspend resulted in 22 to 25 timers 1357 * firing, which seemed independant on whether the suspend was 2 hours or 1358 * 2 days. Your milage may vary. - Ken Key <key@cs.utk.edu> 1359 */ 1360void 1361adjust_timeout_calltodo(time_change) 1362 struct timeval *time_change; 1363{ 1364 register struct callout *p; 1365 unsigned long delta_ticks; 1366 1367 /* 1368 * How many ticks were we asleep? 1369 * (stolen from tvtohz()). 1370 */ 1371 1372 /* Don't do anything */ 1373 if (time_change->tv_sec < 0) 1374 return; 1375 else if (time_change->tv_sec <= LONG_MAX / 1000000) 1376 delta_ticks = (time_change->tv_sec * 1000000 + 1377 time_change->tv_usec + (tick - 1)) / tick + 1; 1378 else if (time_change->tv_sec <= LONG_MAX / hz) 1379 delta_ticks = time_change->tv_sec * hz + 1380 (time_change->tv_usec + (tick - 1)) / tick + 1; 1381 else 1382 delta_ticks = LONG_MAX; 1383 1384 if (delta_ticks > INT_MAX) 1385 delta_ticks = INT_MAX; 1386 1387 /* 1388 * Now rip through the timer calltodo list looking for timers 1389 * to expire. 1390 */ 1391 1392 /* don't collide with softclock() */ 1393 CC_LOCK(cc); 1394 for (p = calltodo.c_next; p != NULL; p = p->c_next) { 1395 p->c_time -= delta_ticks; 1396 1397 /* Break if the timer had more time on it than delta_ticks */ 1398 if (p->c_time > 0) 1399 break; 1400 1401 /* take back the ticks the timer didn't use (p->c_time <= 0) */ 1402 delta_ticks = -p->c_time; 1403 } 1404 CC_UNLOCK(cc); 1405 1406 return; 1407} 1408#endif /* APM_FIXUP_CALLTODO */ 1409 1410static int 1411flssbt(sbintime_t sbt) 1412{ 1413 1414 sbt += (uint64_t)sbt >> 1; 1415 if (sizeof(long) >= sizeof(sbintime_t)) 1416 return (flsl(sbt)); 1417 if (sbt >= SBT_1S) 1418 return (flsl(((uint64_t)sbt) >> 32) + 32); 1419 return (flsl(sbt)); 1420} 1421 1422/* 1423 * Dump immediate statistic snapshot of the scheduled callouts. 1424 */ 1425static int 1426sysctl_kern_callout_stat(SYSCTL_HANDLER_ARGS) 1427{ 1428 struct callout *tmp; 1429 struct callout_cpu *cc; 1430 struct callout_list *sc; 1431 sbintime_t maxpr, maxt, medpr, medt, now, spr, st, t; 1432 int ct[64], cpr[64], ccpbk[32]; 1433 int error, val, i, count, tcum, pcum, maxc, c, medc; 1434#ifdef SMP 1435 int cpu; 1436#endif 1437 1438 val = 0; 1439 error = sysctl_handle_int(oidp, &val, 0, req); 1440 if (error != 0 || req->newptr == NULL) 1441 return (error); 1442 count = maxc = 0; 1443 st = spr = maxt = maxpr = 0; 1444 bzero(ccpbk, sizeof(ccpbk)); 1445 bzero(ct, sizeof(ct)); 1446 bzero(cpr, sizeof(cpr)); 1447 now = sbinuptime(); 1448#ifdef SMP 1449 CPU_FOREACH(cpu) { 1450 cc = CC_CPU(cpu); 1451#else 1452 cc = CC_CPU(timeout_cpu); 1453#endif 1454 CC_LOCK(cc); 1455 for (i = 0; i < callwheelsize; i++) { 1456 sc = &cc->cc_callwheel[i]; 1457 c = 0; 1458 LIST_FOREACH(tmp, sc, c_links.le) { 1459 c++; 1460 t = tmp->c_time - now; 1461 if (t < 0) 1462 t = 0; 1463 st += t / SBT_1US; 1464 spr += tmp->c_precision / SBT_1US; 1465 if (t > maxt) 1466 maxt = t; 1467 if (tmp->c_precision > maxpr) 1468 maxpr = tmp->c_precision; 1469 ct[flssbt(t)]++; 1470 cpr[flssbt(tmp->c_precision)]++; 1471 } 1472 if (c > maxc) 1473 maxc = c; 1474 ccpbk[fls(c + c / 2)]++; 1475 count += c; 1476 } 1477 CC_UNLOCK(cc); 1478#ifdef SMP 1479 } 1480#endif 1481 1482 for (i = 0, tcum = 0; i < 64 && tcum < count / 2; i++) 1483 tcum += ct[i]; 1484 medt = (i >= 2) ? (((sbintime_t)1) << (i - 2)) : 0; 1485 for (i = 0, pcum = 0; i < 64 && pcum < count / 2; i++) 1486 pcum += cpr[i]; 1487 medpr = (i >= 2) ? (((sbintime_t)1) << (i - 2)) : 0; 1488 for (i = 0, c = 0; i < 32 && c < count / 2; i++) 1489 c += ccpbk[i]; 1490 medc = (i >= 2) ? (1 << (i - 2)) : 0; 1491 1492 printf("Scheduled callouts statistic snapshot:\n"); 1493 printf(" Callouts: %6d Buckets: %6d*%-3d Bucket size: 0.%06ds\n", 1494 count, callwheelsize, mp_ncpus, 1000000 >> CC_HASH_SHIFT); 1495 printf(" C/Bk: med %5d avg %6d.%06jd max %6d\n", 1496 medc, 1497 count / callwheelsize / mp_ncpus, 1498 (uint64_t)count * 1000000 / callwheelsize / mp_ncpus % 1000000, 1499 maxc); 1500 printf(" Time: med %5jd.%06jds avg %6jd.%06jds max %6jd.%06jds\n", 1501 medt / SBT_1S, (medt & 0xffffffff) * 1000000 >> 32, 1502 (st / count) / 1000000, (st / count) % 1000000, 1503 maxt / SBT_1S, (maxt & 0xffffffff) * 1000000 >> 32); 1504 printf(" Prec: med %5jd.%06jds avg %6jd.%06jds max %6jd.%06jds\n", 1505 medpr / SBT_1S, (medpr & 0xffffffff) * 1000000 >> 32, 1506 (spr / count) / 1000000, (spr / count) % 1000000, 1507 maxpr / SBT_1S, (maxpr & 0xffffffff) * 1000000 >> 32); 1508 printf(" Distribution: \tbuckets\t time\t tcum\t" 1509 " prec\t pcum\n"); 1510 for (i = 0, tcum = pcum = 0; i < 64; i++) { 1511 if (ct[i] == 0 && cpr[i] == 0) 1512 continue; 1513 t = (i != 0) ? (((sbintime_t)1) << (i - 1)) : 0; 1514 tcum += ct[i]; 1515 pcum += cpr[i]; 1516 printf(" %10jd.%06jds\t 2**%d\t%7d\t%7d\t%7d\t%7d\n", 1517 t / SBT_1S, (t & 0xffffffff) * 1000000 >> 32, 1518 i - 1 - (32 - CC_HASH_SHIFT), 1519 ct[i], tcum, cpr[i], pcum); 1520 } 1521 return (error); 1522} 1523SYSCTL_PROC(_kern, OID_AUTO, callout_stat, 1524 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, 1525 0, 0, sysctl_kern_callout_stat, "I", 1526 "Dump immediate statistic snapshot of the scheduled callouts"); 1527