kern_timeout.c revision 304900
1/*- 2 * Copyright (c) 1982, 1986, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 4. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * From: @(#)kern_clock.c 8.5 (Berkeley) 1/21/94 35 */ 36 37#include <sys/cdefs.h> 38__FBSDID("$FreeBSD: stable/10/sys/kern/kern_timeout.c 304900 2016-08-27 11:38:37Z kib $"); 39 40#include "opt_callout_profiling.h" 41#include "opt_kdtrace.h" 42#if defined(__arm__) 43#include "opt_timer.h" 44#endif 45 46#include <sys/param.h> 47#include <sys/systm.h> 48#include <sys/bus.h> 49#include <sys/callout.h> 50#include <sys/file.h> 51#include <sys/interrupt.h> 52#include <sys/kernel.h> 53#include <sys/ktr.h> 54#include <sys/lock.h> 55#include <sys/malloc.h> 56#include <sys/mutex.h> 57#include <sys/proc.h> 58#include <sys/sdt.h> 59#include <sys/sleepqueue.h> 60#include <sys/sysctl.h> 61#include <sys/smp.h> 62 63#ifdef SMP 64#include <machine/cpu.h> 65#endif 66 67#ifndef NO_EVENTTIMERS 68DPCPU_DECLARE(sbintime_t, hardclocktime); 69#endif 70 71SDT_PROVIDER_DEFINE(callout_execute); 72SDT_PROBE_DEFINE1(callout_execute, , , callout__start, "struct callout *"); 73SDT_PROBE_DEFINE1(callout_execute, , , callout__end, "struct callout *"); 74 75#ifdef CALLOUT_PROFILING 76static int avg_depth; 77SYSCTL_INT(_debug, OID_AUTO, to_avg_depth, CTLFLAG_RD, &avg_depth, 0, 78 "Average number of items examined per softclock call. Units = 1/1000"); 79static int avg_gcalls; 80SYSCTL_INT(_debug, OID_AUTO, to_avg_gcalls, CTLFLAG_RD, &avg_gcalls, 0, 81 "Average number of Giant callouts made per softclock call. Units = 1/1000"); 82static int avg_lockcalls; 83SYSCTL_INT(_debug, OID_AUTO, to_avg_lockcalls, CTLFLAG_RD, &avg_lockcalls, 0, 84 "Average number of lock callouts made per softclock call. Units = 1/1000"); 85static int avg_mpcalls; 86SYSCTL_INT(_debug, OID_AUTO, to_avg_mpcalls, CTLFLAG_RD, &avg_mpcalls, 0, 87 "Average number of MP callouts made per softclock call. Units = 1/1000"); 88static int avg_depth_dir; 89SYSCTL_INT(_debug, OID_AUTO, to_avg_depth_dir, CTLFLAG_RD, &avg_depth_dir, 0, 90 "Average number of direct callouts examined per callout_process call. " 91 "Units = 1/1000"); 92static int avg_lockcalls_dir; 93SYSCTL_INT(_debug, OID_AUTO, to_avg_lockcalls_dir, CTLFLAG_RD, 94 &avg_lockcalls_dir, 0, "Average number of lock direct callouts made per " 95 "callout_process call. Units = 1/1000"); 96static int avg_mpcalls_dir; 97SYSCTL_INT(_debug, OID_AUTO, to_avg_mpcalls_dir, CTLFLAG_RD, &avg_mpcalls_dir, 98 0, "Average number of MP direct callouts made per callout_process call. " 99 "Units = 1/1000"); 100#endif 101 102static int ncallout; 103SYSCTL_INT(_kern, OID_AUTO, ncallout, CTLFLAG_RDTUN, &ncallout, 0, 104 "Number of entries in callwheel and size of timeout() preallocation"); 105 106/* 107 * TODO: 108 * allocate more timeout table slots when table overflows. 109 */ 110u_int callwheelsize, callwheelmask; 111 112/* 113 * The callout cpu exec entities represent informations necessary for 114 * describing the state of callouts currently running on the CPU and the ones 115 * necessary for migrating callouts to the new callout cpu. In particular, 116 * the first entry of the array cc_exec_entity holds informations for callout 117 * running in SWI thread context, while the second one holds informations 118 * for callout running directly from hardware interrupt context. 119 * The cached informations are very important for deferring migration when 120 * the migrating callout is already running. 121 */ 122struct cc_exec { 123 struct callout *cc_curr; 124#ifdef SMP 125 void (*ce_migration_func)(void *); 126 void *ce_migration_arg; 127 int ce_migration_cpu; 128 sbintime_t ce_migration_time; 129 sbintime_t ce_migration_prec; 130#endif 131 bool cc_cancel; 132 bool cc_waiting; 133}; 134 135/* 136 * There is one struct callout_cpu per cpu, holding all relevant 137 * state for the callout processing thread on the individual CPU. 138 */ 139struct callout_cpu { 140 struct mtx_padalign cc_lock; 141 struct cc_exec cc_exec_entity[2]; 142 struct callout *cc_next; 143 struct callout *cc_callout; 144 struct callout_list *cc_callwheel; 145 struct callout_tailq cc_expireq; 146 struct callout_slist cc_callfree; 147 sbintime_t cc_firstevent; 148 sbintime_t cc_lastscan; 149 void *cc_cookie; 150 u_int cc_bucket; 151 u_int cc_inited; 152 char cc_ktr_event_name[20]; 153}; 154 155#define callout_migrating(c) ((c)->c_iflags & CALLOUT_DFRMIGRATION) 156 157#define cc_exec_curr(cc, dir) cc->cc_exec_entity[dir].cc_curr 158#define cc_exec_next(cc) cc->cc_next 159#define cc_exec_cancel(cc, dir) cc->cc_exec_entity[dir].cc_cancel 160#define cc_exec_waiting(cc, dir) cc->cc_exec_entity[dir].cc_waiting 161#ifdef SMP 162#define cc_migration_func(cc, dir) cc->cc_exec_entity[dir].ce_migration_func 163#define cc_migration_arg(cc, dir) cc->cc_exec_entity[dir].ce_migration_arg 164#define cc_migration_cpu(cc, dir) cc->cc_exec_entity[dir].ce_migration_cpu 165#define cc_migration_time(cc, dir) cc->cc_exec_entity[dir].ce_migration_time 166#define cc_migration_prec(cc, dir) cc->cc_exec_entity[dir].ce_migration_prec 167 168struct callout_cpu cc_cpu[MAXCPU]; 169#define CPUBLOCK MAXCPU 170#define CC_CPU(cpu) (&cc_cpu[(cpu)]) 171#define CC_SELF() CC_CPU(PCPU_GET(cpuid)) 172#else 173struct callout_cpu cc_cpu; 174#define CC_CPU(cpu) &cc_cpu 175#define CC_SELF() &cc_cpu 176#endif 177#define CC_LOCK(cc) mtx_lock_spin(&(cc)->cc_lock) 178#define CC_UNLOCK(cc) mtx_unlock_spin(&(cc)->cc_lock) 179#define CC_LOCK_ASSERT(cc) mtx_assert(&(cc)->cc_lock, MA_OWNED) 180 181static int timeout_cpu; 182 183static void callout_cpu_init(struct callout_cpu *cc, int cpu); 184static void softclock_call_cc(struct callout *c, struct callout_cpu *cc, 185#ifdef CALLOUT_PROFILING 186 int *mpcalls, int *lockcalls, int *gcalls, 187#endif 188 int direct); 189 190static MALLOC_DEFINE(M_CALLOUT, "callout", "Callout datastructures"); 191 192/** 193 * Locked by cc_lock: 194 * cc_curr - If a callout is in progress, it is cc_curr. 195 * If cc_curr is non-NULL, threads waiting in 196 * callout_drain() will be woken up as soon as the 197 * relevant callout completes. 198 * cc_cancel - Changing to 1 with both callout_lock and cc_lock held 199 * guarantees that the current callout will not run. 200 * The softclock() function sets this to 0 before it 201 * drops callout_lock to acquire c_lock, and it calls 202 * the handler only if curr_cancelled is still 0 after 203 * cc_lock is successfully acquired. 204 * cc_waiting - If a thread is waiting in callout_drain(), then 205 * callout_wait is nonzero. Set only when 206 * cc_curr is non-NULL. 207 */ 208 209/* 210 * Resets the execution entity tied to a specific callout cpu. 211 */ 212static void 213cc_cce_cleanup(struct callout_cpu *cc, int direct) 214{ 215 216 cc_exec_curr(cc, direct) = NULL; 217 cc_exec_cancel(cc, direct) = false; 218 cc_exec_waiting(cc, direct) = false; 219#ifdef SMP 220 cc_migration_cpu(cc, direct) = CPUBLOCK; 221 cc_migration_time(cc, direct) = 0; 222 cc_migration_prec(cc, direct) = 0; 223 cc_migration_func(cc, direct) = NULL; 224 cc_migration_arg(cc, direct) = NULL; 225#endif 226} 227 228/* 229 * Checks if migration is requested by a specific callout cpu. 230 */ 231static int 232cc_cce_migrating(struct callout_cpu *cc, int direct) 233{ 234 235#ifdef SMP 236 return (cc_migration_cpu(cc, direct) != CPUBLOCK); 237#else 238 return (0); 239#endif 240} 241 242/* 243 * Kernel low level callwheel initialization 244 * called on cpu0 during kernel startup. 245 */ 246static void 247callout_callwheel_init(void *dummy) 248{ 249 struct callout_cpu *cc; 250 251 /* 252 * Calculate the size of the callout wheel and the preallocated 253 * timeout() structures. 254 * XXX: Clip callout to result of previous function of maxusers 255 * maximum 384. This is still huge, but acceptable. 256 */ 257 memset(CC_CPU(0), 0, sizeof(cc_cpu)); 258 ncallout = imin(16 + maxproc + maxfiles, 18508); 259 TUNABLE_INT_FETCH("kern.ncallout", &ncallout); 260 261 /* 262 * Calculate callout wheel size, should be next power of two higher 263 * than 'ncallout'. 264 */ 265 callwheelsize = 1 << fls(ncallout); 266 callwheelmask = callwheelsize - 1; 267 268 /* 269 * Only cpu0 handles timeout(9) and receives a preallocation. 270 * 271 * XXX: Once all timeout(9) consumers are converted this can 272 * be removed. 273 */ 274 timeout_cpu = PCPU_GET(cpuid); 275 cc = CC_CPU(timeout_cpu); 276 cc->cc_callout = malloc(ncallout * sizeof(struct callout), 277 M_CALLOUT, M_WAITOK); 278 callout_cpu_init(cc, timeout_cpu); 279} 280SYSINIT(callwheel_init, SI_SUB_CPU, SI_ORDER_ANY, callout_callwheel_init, NULL); 281 282/* 283 * Initialize the per-cpu callout structures. 284 */ 285static void 286callout_cpu_init(struct callout_cpu *cc, int cpu) 287{ 288 struct callout *c; 289 int i; 290 291 mtx_init(&cc->cc_lock, "callout", NULL, MTX_SPIN | MTX_RECURSE); 292 SLIST_INIT(&cc->cc_callfree); 293 cc->cc_inited = 1; 294 cc->cc_callwheel = malloc(sizeof(struct callout_list) * callwheelsize, 295 M_CALLOUT, M_WAITOK); 296 for (i = 0; i < callwheelsize; i++) 297 LIST_INIT(&cc->cc_callwheel[i]); 298 TAILQ_INIT(&cc->cc_expireq); 299 cc->cc_firstevent = SBT_MAX; 300 for (i = 0; i < 2; i++) 301 cc_cce_cleanup(cc, i); 302 snprintf(cc->cc_ktr_event_name, sizeof(cc->cc_ktr_event_name), 303 "callwheel cpu %d", cpu); 304 if (cc->cc_callout == NULL) /* Only cpu0 handles timeout(9) */ 305 return; 306 for (i = 0; i < ncallout; i++) { 307 c = &cc->cc_callout[i]; 308 callout_init(c, 0); 309 c->c_iflags = CALLOUT_LOCAL_ALLOC; 310 SLIST_INSERT_HEAD(&cc->cc_callfree, c, c_links.sle); 311 } 312} 313 314#ifdef SMP 315/* 316 * Switches the cpu tied to a specific callout. 317 * The function expects a locked incoming callout cpu and returns with 318 * locked outcoming callout cpu. 319 */ 320static struct callout_cpu * 321callout_cpu_switch(struct callout *c, struct callout_cpu *cc, int new_cpu) 322{ 323 struct callout_cpu *new_cc; 324 325 MPASS(c != NULL && cc != NULL); 326 CC_LOCK_ASSERT(cc); 327 328 /* 329 * Avoid interrupts and preemption firing after the callout cpu 330 * is blocked in order to avoid deadlocks as the new thread 331 * may be willing to acquire the callout cpu lock. 332 */ 333 c->c_cpu = CPUBLOCK; 334 spinlock_enter(); 335 CC_UNLOCK(cc); 336 new_cc = CC_CPU(new_cpu); 337 CC_LOCK(new_cc); 338 spinlock_exit(); 339 c->c_cpu = new_cpu; 340 return (new_cc); 341} 342#endif 343 344/* 345 * Start standard softclock thread. 346 */ 347static void 348start_softclock(void *dummy) 349{ 350 struct callout_cpu *cc; 351#ifdef SMP 352 int cpu; 353#endif 354 355 cc = CC_CPU(timeout_cpu); 356 if (swi_add(&clk_intr_event, "clock", softclock, cc, SWI_CLOCK, 357 INTR_MPSAFE, &cc->cc_cookie)) 358 panic("died while creating standard software ithreads"); 359#ifdef SMP 360 CPU_FOREACH(cpu) { 361 if (cpu == timeout_cpu) 362 continue; 363 cc = CC_CPU(cpu); 364 cc->cc_callout = NULL; /* Only cpu0 handles timeout(9). */ 365 callout_cpu_init(cc, cpu); 366 if (swi_add(NULL, "clock", softclock, cc, SWI_CLOCK, 367 INTR_MPSAFE, &cc->cc_cookie)) 368 panic("died while creating standard software ithreads"); 369 } 370#endif 371} 372SYSINIT(start_softclock, SI_SUB_SOFTINTR, SI_ORDER_FIRST, start_softclock, NULL); 373 374#define CC_HASH_SHIFT 8 375 376static inline u_int 377callout_hash(sbintime_t sbt) 378{ 379 380 return (sbt >> (32 - CC_HASH_SHIFT)); 381} 382 383static inline u_int 384callout_get_bucket(sbintime_t sbt) 385{ 386 387 return (callout_hash(sbt) & callwheelmask); 388} 389 390void 391callout_process(sbintime_t now) 392{ 393 struct callout *tmp, *tmpn; 394 struct callout_cpu *cc; 395 struct callout_list *sc; 396 sbintime_t first, last, max, tmp_max; 397 uint32_t lookahead; 398 u_int firstb, lastb, nowb; 399#ifdef CALLOUT_PROFILING 400 int depth_dir = 0, mpcalls_dir = 0, lockcalls_dir = 0; 401#endif 402 403 cc = CC_SELF(); 404 mtx_lock_spin_flags(&cc->cc_lock, MTX_QUIET); 405 406 /* Compute the buckets of the last scan and present times. */ 407 firstb = callout_hash(cc->cc_lastscan); 408 cc->cc_lastscan = now; 409 nowb = callout_hash(now); 410 411 /* Compute the last bucket and minimum time of the bucket after it. */ 412 if (nowb == firstb) 413 lookahead = (SBT_1S / 16); 414 else if (nowb - firstb == 1) 415 lookahead = (SBT_1S / 8); 416 else 417 lookahead = (SBT_1S / 2); 418 first = last = now; 419 first += (lookahead / 2); 420 last += lookahead; 421 last &= (0xffffffffffffffffLLU << (32 - CC_HASH_SHIFT)); 422 lastb = callout_hash(last) - 1; 423 max = last; 424 425 /* 426 * Check if we wrapped around the entire wheel from the last scan. 427 * In case, we need to scan entirely the wheel for pending callouts. 428 */ 429 if (lastb - firstb >= callwheelsize) { 430 lastb = firstb + callwheelsize - 1; 431 if (nowb - firstb >= callwheelsize) 432 nowb = lastb; 433 } 434 435 /* Iterate callwheel from firstb to nowb and then up to lastb. */ 436 do { 437 sc = &cc->cc_callwheel[firstb & callwheelmask]; 438 tmp = LIST_FIRST(sc); 439 while (tmp != NULL) { 440 /* Run the callout if present time within allowed. */ 441 if (tmp->c_time <= now) { 442 /* 443 * Consumer told us the callout may be run 444 * directly from hardware interrupt context. 445 */ 446 if (tmp->c_iflags & CALLOUT_DIRECT) { 447#ifdef CALLOUT_PROFILING 448 ++depth_dir; 449#endif 450 cc_exec_next(cc) = 451 LIST_NEXT(tmp, c_links.le); 452 cc->cc_bucket = firstb & callwheelmask; 453 LIST_REMOVE(tmp, c_links.le); 454 softclock_call_cc(tmp, cc, 455#ifdef CALLOUT_PROFILING 456 &mpcalls_dir, &lockcalls_dir, NULL, 457#endif 458 1); 459 tmp = cc_exec_next(cc); 460 cc_exec_next(cc) = NULL; 461 } else { 462 tmpn = LIST_NEXT(tmp, c_links.le); 463 LIST_REMOVE(tmp, c_links.le); 464 TAILQ_INSERT_TAIL(&cc->cc_expireq, 465 tmp, c_links.tqe); 466 tmp->c_iflags |= CALLOUT_PROCESSED; 467 tmp = tmpn; 468 } 469 continue; 470 } 471 /* Skip events from distant future. */ 472 if (tmp->c_time >= max) 473 goto next; 474 /* 475 * Event minimal time is bigger than present maximal 476 * time, so it cannot be aggregated. 477 */ 478 if (tmp->c_time > last) { 479 lastb = nowb; 480 goto next; 481 } 482 /* Update first and last time, respecting this event. */ 483 if (tmp->c_time < first) 484 first = tmp->c_time; 485 tmp_max = tmp->c_time + tmp->c_precision; 486 if (tmp_max < last) 487 last = tmp_max; 488next: 489 tmp = LIST_NEXT(tmp, c_links.le); 490 } 491 /* Proceed with the next bucket. */ 492 firstb++; 493 /* 494 * Stop if we looked after present time and found 495 * some event we can't execute at now. 496 * Stop if we looked far enough into the future. 497 */ 498 } while (((int)(firstb - lastb)) <= 0); 499 cc->cc_firstevent = last; 500#ifndef NO_EVENTTIMERS 501 cpu_new_callout(curcpu, last, first); 502#endif 503#ifdef CALLOUT_PROFILING 504 avg_depth_dir += (depth_dir * 1000 - avg_depth_dir) >> 8; 505 avg_mpcalls_dir += (mpcalls_dir * 1000 - avg_mpcalls_dir) >> 8; 506 avg_lockcalls_dir += (lockcalls_dir * 1000 - avg_lockcalls_dir) >> 8; 507#endif 508 mtx_unlock_spin_flags(&cc->cc_lock, MTX_QUIET); 509 /* 510 * swi_sched acquires the thread lock, so we don't want to call it 511 * with cc_lock held; incorrect locking order. 512 */ 513 if (!TAILQ_EMPTY(&cc->cc_expireq)) 514 swi_sched(cc->cc_cookie, 0); 515} 516 517static struct callout_cpu * 518callout_lock(struct callout *c) 519{ 520 struct callout_cpu *cc; 521 int cpu; 522 523 for (;;) { 524 cpu = c->c_cpu; 525#ifdef SMP 526 if (cpu == CPUBLOCK) { 527 while (c->c_cpu == CPUBLOCK) 528 cpu_spinwait(); 529 continue; 530 } 531#endif 532 cc = CC_CPU(cpu); 533 CC_LOCK(cc); 534 if (cpu == c->c_cpu) 535 break; 536 CC_UNLOCK(cc); 537 } 538 return (cc); 539} 540 541static void 542callout_cc_add(struct callout *c, struct callout_cpu *cc, 543 sbintime_t sbt, sbintime_t precision, void (*func)(void *), 544 void *arg, int cpu, int flags) 545{ 546 int bucket; 547 548 CC_LOCK_ASSERT(cc); 549 if (sbt < cc->cc_lastscan) 550 sbt = cc->cc_lastscan; 551 c->c_arg = arg; 552 c->c_iflags |= CALLOUT_PENDING; 553 c->c_iflags &= ~CALLOUT_PROCESSED; 554 c->c_flags |= CALLOUT_ACTIVE; 555 if (flags & C_DIRECT_EXEC) 556 c->c_iflags |= CALLOUT_DIRECT; 557 c->c_func = func; 558 c->c_time = sbt; 559 c->c_precision = precision; 560 bucket = callout_get_bucket(c->c_time); 561 CTR3(KTR_CALLOUT, "precision set for %p: %d.%08x", 562 c, (int)(c->c_precision >> 32), 563 (u_int)(c->c_precision & 0xffffffff)); 564 LIST_INSERT_HEAD(&cc->cc_callwheel[bucket], c, c_links.le); 565 if (cc->cc_bucket == bucket) 566 cc_exec_next(cc) = c; 567#ifndef NO_EVENTTIMERS 568 /* 569 * Inform the eventtimers(4) subsystem there's a new callout 570 * that has been inserted, but only if really required. 571 */ 572 if (SBT_MAX - c->c_time < c->c_precision) 573 c->c_precision = SBT_MAX - c->c_time; 574 sbt = c->c_time + c->c_precision; 575 if (sbt < cc->cc_firstevent) { 576 cc->cc_firstevent = sbt; 577 cpu_new_callout(cpu, sbt, c->c_time); 578 } 579#endif 580} 581 582static void 583callout_cc_del(struct callout *c, struct callout_cpu *cc) 584{ 585 586 if ((c->c_iflags & CALLOUT_LOCAL_ALLOC) == 0) 587 return; 588 c->c_func = NULL; 589 SLIST_INSERT_HEAD(&cc->cc_callfree, c, c_links.sle); 590} 591 592static void 593softclock_call_cc(struct callout *c, struct callout_cpu *cc, 594#ifdef CALLOUT_PROFILING 595 int *mpcalls, int *lockcalls, int *gcalls, 596#endif 597 int direct) 598{ 599 struct rm_priotracker tracker; 600 void (*c_func)(void *); 601 void *c_arg; 602 struct lock_class *class; 603 struct lock_object *c_lock; 604 uintptr_t lock_status; 605 int c_iflags; 606#ifdef SMP 607 struct callout_cpu *new_cc; 608 void (*new_func)(void *); 609 void *new_arg; 610 int flags, new_cpu; 611 sbintime_t new_prec, new_time; 612#endif 613#if defined(DIAGNOSTIC) || defined(CALLOUT_PROFILING) 614 sbintime_t sbt1, sbt2; 615 struct timespec ts2; 616 static sbintime_t maxdt = 2 * SBT_1MS; /* 2 msec */ 617 static timeout_t *lastfunc; 618#endif 619 620 KASSERT((c->c_iflags & CALLOUT_PENDING) == CALLOUT_PENDING, 621 ("softclock_call_cc: pend %p %x", c, c->c_iflags)); 622 KASSERT((c->c_flags & CALLOUT_ACTIVE) == CALLOUT_ACTIVE, 623 ("softclock_call_cc: act %p %x", c, c->c_flags)); 624 class = (c->c_lock != NULL) ? LOCK_CLASS(c->c_lock) : NULL; 625 lock_status = 0; 626 if (c->c_flags & CALLOUT_SHAREDLOCK) { 627 if (class == &lock_class_rm) 628 lock_status = (uintptr_t)&tracker; 629 else 630 lock_status = 1; 631 } 632 c_lock = c->c_lock; 633 c_func = c->c_func; 634 c_arg = c->c_arg; 635 c_iflags = c->c_iflags; 636 if (c->c_iflags & CALLOUT_LOCAL_ALLOC) 637 c->c_iflags = CALLOUT_LOCAL_ALLOC; 638 else 639 c->c_iflags &= ~CALLOUT_PENDING; 640 641 cc_exec_curr(cc, direct) = c; 642 cc_exec_cancel(cc, direct) = false; 643 CC_UNLOCK(cc); 644 if (c_lock != NULL) { 645 class->lc_lock(c_lock, lock_status); 646 /* 647 * The callout may have been cancelled 648 * while we switched locks. 649 */ 650 if (cc_exec_cancel(cc, direct)) { 651 class->lc_unlock(c_lock); 652 goto skip; 653 } 654 /* The callout cannot be stopped now. */ 655 cc_exec_cancel(cc, direct) = true; 656 if (c_lock == &Giant.lock_object) { 657#ifdef CALLOUT_PROFILING 658 (*gcalls)++; 659#endif 660 CTR3(KTR_CALLOUT, "callout giant %p func %p arg %p", 661 c, c_func, c_arg); 662 } else { 663#ifdef CALLOUT_PROFILING 664 (*lockcalls)++; 665#endif 666 CTR3(KTR_CALLOUT, "callout lock %p func %p arg %p", 667 c, c_func, c_arg); 668 } 669 } else { 670#ifdef CALLOUT_PROFILING 671 (*mpcalls)++; 672#endif 673 CTR3(KTR_CALLOUT, "callout %p func %p arg %p", 674 c, c_func, c_arg); 675 } 676 KTR_STATE3(KTR_SCHED, "callout", cc->cc_ktr_event_name, "running", 677 "func:%p", c_func, "arg:%p", c_arg, "direct:%d", direct); 678#if defined(DIAGNOSTIC) || defined(CALLOUT_PROFILING) 679 sbt1 = sbinuptime(); 680#endif 681 THREAD_NO_SLEEPING(); 682 SDT_PROBE1(callout_execute, , , callout__start, c); 683 c_func(c_arg); 684 SDT_PROBE1(callout_execute, , , callout__end, c); 685 THREAD_SLEEPING_OK(); 686#if defined(DIAGNOSTIC) || defined(CALLOUT_PROFILING) 687 sbt2 = sbinuptime(); 688 sbt2 -= sbt1; 689 if (sbt2 > maxdt) { 690 if (lastfunc != c_func || sbt2 > maxdt * 2) { 691 ts2 = sbttots(sbt2); 692 printf( 693 "Expensive timeout(9) function: %p(%p) %jd.%09ld s\n", 694 c_func, c_arg, (intmax_t)ts2.tv_sec, ts2.tv_nsec); 695 } 696 maxdt = sbt2; 697 lastfunc = c_func; 698 } 699#endif 700 KTR_STATE0(KTR_SCHED, "callout", cc->cc_ktr_event_name, "idle"); 701 CTR1(KTR_CALLOUT, "callout %p finished", c); 702 if ((c_iflags & CALLOUT_RETURNUNLOCKED) == 0) 703 class->lc_unlock(c_lock); 704skip: 705 CC_LOCK(cc); 706 KASSERT(cc_exec_curr(cc, direct) == c, ("mishandled cc_curr")); 707 cc_exec_curr(cc, direct) = NULL; 708 if (cc_exec_waiting(cc, direct)) { 709 /* 710 * There is someone waiting for the 711 * callout to complete. 712 * If the callout was scheduled for 713 * migration just cancel it. 714 */ 715 if (cc_cce_migrating(cc, direct)) { 716 cc_cce_cleanup(cc, direct); 717 718 /* 719 * It should be assert here that the callout is not 720 * destroyed but that is not easy. 721 */ 722 c->c_iflags &= ~CALLOUT_DFRMIGRATION; 723 } 724 cc_exec_waiting(cc, direct) = false; 725 CC_UNLOCK(cc); 726 wakeup(&cc_exec_waiting(cc, direct)); 727 CC_LOCK(cc); 728 } else if (cc_cce_migrating(cc, direct)) { 729 KASSERT((c_iflags & CALLOUT_LOCAL_ALLOC) == 0, 730 ("Migrating legacy callout %p", c)); 731#ifdef SMP 732 /* 733 * If the callout was scheduled for 734 * migration just perform it now. 735 */ 736 new_cpu = cc_migration_cpu(cc, direct); 737 new_time = cc_migration_time(cc, direct); 738 new_prec = cc_migration_prec(cc, direct); 739 new_func = cc_migration_func(cc, direct); 740 new_arg = cc_migration_arg(cc, direct); 741 cc_cce_cleanup(cc, direct); 742 743 /* 744 * It should be assert here that the callout is not destroyed 745 * but that is not easy. 746 * 747 * As first thing, handle deferred callout stops. 748 */ 749 if (!callout_migrating(c)) { 750 CTR3(KTR_CALLOUT, 751 "deferred cancelled %p func %p arg %p", 752 c, new_func, new_arg); 753 callout_cc_del(c, cc); 754 return; 755 } 756 c->c_iflags &= ~CALLOUT_DFRMIGRATION; 757 758 new_cc = callout_cpu_switch(c, cc, new_cpu); 759 flags = (direct) ? C_DIRECT_EXEC : 0; 760 callout_cc_add(c, new_cc, new_time, new_prec, new_func, 761 new_arg, new_cpu, flags); 762 CC_UNLOCK(new_cc); 763 CC_LOCK(cc); 764#else 765 panic("migration should not happen"); 766#endif 767 } 768 /* 769 * If the current callout is locally allocated (from 770 * timeout(9)) then put it on the freelist. 771 * 772 * Note: we need to check the cached copy of c_iflags because 773 * if it was not local, then it's not safe to deref the 774 * callout pointer. 775 */ 776 KASSERT((c_iflags & CALLOUT_LOCAL_ALLOC) == 0 || 777 c->c_iflags == CALLOUT_LOCAL_ALLOC, 778 ("corrupted callout")); 779 if (c_iflags & CALLOUT_LOCAL_ALLOC) 780 callout_cc_del(c, cc); 781} 782 783/* 784 * The callout mechanism is based on the work of Adam M. Costello and 785 * George Varghese, published in a technical report entitled "Redesigning 786 * the BSD Callout and Timer Facilities" and modified slightly for inclusion 787 * in FreeBSD by Justin T. Gibbs. The original work on the data structures 788 * used in this implementation was published by G. Varghese and T. Lauck in 789 * the paper "Hashed and Hierarchical Timing Wheels: Data Structures for 790 * the Efficient Implementation of a Timer Facility" in the Proceedings of 791 * the 11th ACM Annual Symposium on Operating Systems Principles, 792 * Austin, Texas Nov 1987. 793 */ 794 795/* 796 * Software (low priority) clock interrupt. 797 * Run periodic events from timeout queue. 798 */ 799void 800softclock(void *arg) 801{ 802 struct callout_cpu *cc; 803 struct callout *c; 804#ifdef CALLOUT_PROFILING 805 int depth = 0, gcalls = 0, lockcalls = 0, mpcalls = 0; 806#endif 807 808 cc = (struct callout_cpu *)arg; 809 CC_LOCK(cc); 810 while ((c = TAILQ_FIRST(&cc->cc_expireq)) != NULL) { 811 TAILQ_REMOVE(&cc->cc_expireq, c, c_links.tqe); 812 softclock_call_cc(c, cc, 813#ifdef CALLOUT_PROFILING 814 &mpcalls, &lockcalls, &gcalls, 815#endif 816 0); 817#ifdef CALLOUT_PROFILING 818 ++depth; 819#endif 820 } 821#ifdef CALLOUT_PROFILING 822 avg_depth += (depth * 1000 - avg_depth) >> 8; 823 avg_mpcalls += (mpcalls * 1000 - avg_mpcalls) >> 8; 824 avg_lockcalls += (lockcalls * 1000 - avg_lockcalls) >> 8; 825 avg_gcalls += (gcalls * 1000 - avg_gcalls) >> 8; 826#endif 827 CC_UNLOCK(cc); 828} 829 830/* 831 * timeout -- 832 * Execute a function after a specified length of time. 833 * 834 * untimeout -- 835 * Cancel previous timeout function call. 836 * 837 * callout_handle_init -- 838 * Initialize a handle so that using it with untimeout is benign. 839 * 840 * See AT&T BCI Driver Reference Manual for specification. This 841 * implementation differs from that one in that although an 842 * identification value is returned from timeout, the original 843 * arguments to timeout as well as the identifier are used to 844 * identify entries for untimeout. 845 */ 846struct callout_handle 847timeout(ftn, arg, to_ticks) 848 timeout_t *ftn; 849 void *arg; 850 int to_ticks; 851{ 852 struct callout_cpu *cc; 853 struct callout *new; 854 struct callout_handle handle; 855 856 cc = CC_CPU(timeout_cpu); 857 CC_LOCK(cc); 858 /* Fill in the next free callout structure. */ 859 new = SLIST_FIRST(&cc->cc_callfree); 860 if (new == NULL) 861 /* XXX Attempt to malloc first */ 862 panic("timeout table full"); 863 SLIST_REMOVE_HEAD(&cc->cc_callfree, c_links.sle); 864 callout_reset(new, to_ticks, ftn, arg); 865 handle.callout = new; 866 CC_UNLOCK(cc); 867 868 return (handle); 869} 870 871void 872untimeout(ftn, arg, handle) 873 timeout_t *ftn; 874 void *arg; 875 struct callout_handle handle; 876{ 877 struct callout_cpu *cc; 878 879 /* 880 * Check for a handle that was initialized 881 * by callout_handle_init, but never used 882 * for a real timeout. 883 */ 884 if (handle.callout == NULL) 885 return; 886 887 cc = callout_lock(handle.callout); 888 if (handle.callout->c_func == ftn && handle.callout->c_arg == arg) 889 callout_stop(handle.callout); 890 CC_UNLOCK(cc); 891} 892 893void 894callout_handle_init(struct callout_handle *handle) 895{ 896 handle->callout = NULL; 897} 898 899void 900callout_when(sbintime_t sbt, sbintime_t precision, int flags, 901 sbintime_t *res, sbintime_t *prec_res) 902{ 903 sbintime_t to_sbt, to_pr; 904 905 if ((flags & (C_ABSOLUTE | C_PRECALC)) != 0) { 906 *res = sbt; 907 *prec_res = precision; 908 return; 909 } 910 if ((flags & C_HARDCLOCK) != 0 && sbt < tick_sbt) 911 sbt = tick_sbt; 912 if ((flags & C_HARDCLOCK) != 0 || 913#ifdef NO_EVENTTIMERS 914 sbt >= sbt_timethreshold) { 915 to_sbt = getsbinuptime(); 916 917 /* Add safety belt for the case of hz > 1000. */ 918 to_sbt += tc_tick_sbt - tick_sbt; 919#else 920 sbt >= sbt_tickthreshold) { 921 /* 922 * Obtain the time of the last hardclock() call on 923 * this CPU directly from the kern_clocksource.c. 924 * This value is per-CPU, but it is equal for all 925 * active ones. 926 */ 927#ifdef __LP64__ 928 to_sbt = DPCPU_GET(hardclocktime); 929#else 930 spinlock_enter(); 931 to_sbt = DPCPU_GET(hardclocktime); 932 spinlock_exit(); 933#endif 934#endif 935 if ((flags & C_HARDCLOCK) == 0) 936 to_sbt += tick_sbt; 937 } else 938 to_sbt = sbinuptime(); 939 if (SBT_MAX - to_sbt < sbt) 940 to_sbt = SBT_MAX; 941 else 942 to_sbt += sbt; 943 *res = to_sbt; 944 to_pr = ((C_PRELGET(flags) < 0) ? sbt >> tc_precexp : 945 sbt >> C_PRELGET(flags)); 946 *prec_res = to_pr > precision ? to_pr : precision; 947} 948 949/* 950 * New interface; clients allocate their own callout structures. 951 * 952 * callout_reset() - establish or change a timeout 953 * callout_stop() - disestablish a timeout 954 * callout_init() - initialize a callout structure so that it can 955 * safely be passed to callout_reset() and callout_stop() 956 * 957 * <sys/callout.h> defines three convenience macros: 958 * 959 * callout_active() - returns truth if callout has not been stopped, 960 * drained, or deactivated since the last time the callout was 961 * reset. 962 * callout_pending() - returns truth if callout is still waiting for timeout 963 * callout_deactivate() - marks the callout as having been serviced 964 */ 965int 966callout_reset_sbt_on(struct callout *c, sbintime_t sbt, sbintime_t prec, 967 void (*ftn)(void *), void *arg, int cpu, int flags) 968{ 969 sbintime_t to_sbt, precision; 970 struct callout_cpu *cc; 971 int cancelled, direct; 972 int ignore_cpu=0; 973 974 cancelled = 0; 975 if (cpu == -1) { 976 ignore_cpu = 1; 977 } else if ((cpu >= MAXCPU) || 978 ((CC_CPU(cpu))->cc_inited == 0)) { 979 /* Invalid CPU spec */ 980 panic("Invalid CPU in callout %d", cpu); 981 } 982 callout_when(sbt, prec, flags, &to_sbt, &precision); 983 984 /* 985 * This flag used to be added by callout_cc_add, but the 986 * first time you call this we could end up with the 987 * wrong direct flag if we don't do it before we add. 988 */ 989 if (flags & C_DIRECT_EXEC) { 990 direct = 1; 991 } else { 992 direct = 0; 993 } 994 KASSERT(!direct || c->c_lock == NULL, 995 ("%s: direct callout %p has lock", __func__, c)); 996 cc = callout_lock(c); 997 /* 998 * Don't allow migration of pre-allocated callouts lest they 999 * become unbalanced or handle the case where the user does 1000 * not care. 1001 */ 1002 if ((c->c_iflags & CALLOUT_LOCAL_ALLOC) || 1003 ignore_cpu) { 1004 cpu = c->c_cpu; 1005 } 1006 1007 if (cc_exec_curr(cc, direct) == c) { 1008 /* 1009 * We're being asked to reschedule a callout which is 1010 * currently in progress. If there is a lock then we 1011 * can cancel the callout if it has not really started. 1012 */ 1013 if (c->c_lock != NULL && !cc_exec_cancel(cc, direct)) 1014 cancelled = cc_exec_cancel(cc, direct) = true; 1015 if (cc_exec_waiting(cc, direct)) { 1016 /* 1017 * Someone has called callout_drain to kill this 1018 * callout. Don't reschedule. 1019 */ 1020 CTR4(KTR_CALLOUT, "%s %p func %p arg %p", 1021 cancelled ? "cancelled" : "failed to cancel", 1022 c, c->c_func, c->c_arg); 1023 CC_UNLOCK(cc); 1024 return (cancelled); 1025 } 1026#ifdef SMP 1027 if (callout_migrating(c)) { 1028 /* 1029 * This only occurs when a second callout_reset_sbt_on 1030 * is made after a previous one moved it into 1031 * deferred migration (below). Note we do *not* change 1032 * the prev_cpu even though the previous target may 1033 * be different. 1034 */ 1035 cc_migration_cpu(cc, direct) = cpu; 1036 cc_migration_time(cc, direct) = to_sbt; 1037 cc_migration_prec(cc, direct) = precision; 1038 cc_migration_func(cc, direct) = ftn; 1039 cc_migration_arg(cc, direct) = arg; 1040 cancelled = 1; 1041 CC_UNLOCK(cc); 1042 return (cancelled); 1043 } 1044#endif 1045 } 1046 if (c->c_iflags & CALLOUT_PENDING) { 1047 if ((c->c_iflags & CALLOUT_PROCESSED) == 0) { 1048 if (cc_exec_next(cc) == c) 1049 cc_exec_next(cc) = LIST_NEXT(c, c_links.le); 1050 LIST_REMOVE(c, c_links.le); 1051 } else { 1052 TAILQ_REMOVE(&cc->cc_expireq, c, c_links.tqe); 1053 } 1054 cancelled = 1; 1055 c->c_iflags &= ~ CALLOUT_PENDING; 1056 c->c_flags &= ~ CALLOUT_ACTIVE; 1057 } 1058 1059#ifdef SMP 1060 /* 1061 * If the callout must migrate try to perform it immediately. 1062 * If the callout is currently running, just defer the migration 1063 * to a more appropriate moment. 1064 */ 1065 if (c->c_cpu != cpu) { 1066 if (cc_exec_curr(cc, direct) == c) { 1067 /* 1068 * Pending will have been removed since we are 1069 * actually executing the callout on another 1070 * CPU. That callout should be waiting on the 1071 * lock the caller holds. If we set both 1072 * active/and/pending after we return and the 1073 * lock on the executing callout proceeds, it 1074 * will then see pending is true and return. 1075 * At the return from the actual callout execution 1076 * the migration will occur in softclock_call_cc 1077 * and this new callout will be placed on the 1078 * new CPU via a call to callout_cpu_switch() which 1079 * will get the lock on the right CPU followed 1080 * by a call callout_cc_add() which will add it there. 1081 * (see above in softclock_call_cc()). 1082 */ 1083 cc_migration_cpu(cc, direct) = cpu; 1084 cc_migration_time(cc, direct) = to_sbt; 1085 cc_migration_prec(cc, direct) = precision; 1086 cc_migration_func(cc, direct) = ftn; 1087 cc_migration_arg(cc, direct) = arg; 1088 c->c_iflags |= (CALLOUT_DFRMIGRATION | CALLOUT_PENDING); 1089 c->c_flags |= CALLOUT_ACTIVE; 1090 CTR6(KTR_CALLOUT, 1091 "migration of %p func %p arg %p in %d.%08x to %u deferred", 1092 c, c->c_func, c->c_arg, (int)(to_sbt >> 32), 1093 (u_int)(to_sbt & 0xffffffff), cpu); 1094 CC_UNLOCK(cc); 1095 return (cancelled); 1096 } 1097 cc = callout_cpu_switch(c, cc, cpu); 1098 } 1099#endif 1100 1101 callout_cc_add(c, cc, to_sbt, precision, ftn, arg, cpu, flags); 1102 CTR6(KTR_CALLOUT, "%sscheduled %p func %p arg %p in %d.%08x", 1103 cancelled ? "re" : "", c, c->c_func, c->c_arg, (int)(to_sbt >> 32), 1104 (u_int)(to_sbt & 0xffffffff)); 1105 CC_UNLOCK(cc); 1106 1107 return (cancelled); 1108} 1109 1110/* 1111 * Common idioms that can be optimized in the future. 1112 */ 1113int 1114callout_schedule_on(struct callout *c, int to_ticks, int cpu) 1115{ 1116 return callout_reset_on(c, to_ticks, c->c_func, c->c_arg, cpu); 1117} 1118 1119int 1120callout_schedule(struct callout *c, int to_ticks) 1121{ 1122 return callout_reset_on(c, to_ticks, c->c_func, c->c_arg, c->c_cpu); 1123} 1124 1125int 1126_callout_stop_safe(c, flags) 1127 struct callout *c; 1128 int flags; 1129{ 1130 struct callout_cpu *cc, *old_cc; 1131 struct lock_class *class; 1132 int direct, sq_locked, use_lock; 1133 int not_on_a_list; 1134 1135 /* 1136 * Some old subsystems don't hold Giant while running a callout_stop(), 1137 * so just discard this check for the moment. 1138 */ 1139 if ((flags & CS_DRAIN) == 0 && c->c_lock != NULL) { 1140 if (c->c_lock == &Giant.lock_object) 1141 use_lock = mtx_owned(&Giant); 1142 else { 1143 use_lock = 1; 1144 class = LOCK_CLASS(c->c_lock); 1145 class->lc_assert(c->c_lock, LA_XLOCKED); 1146 } 1147 } else 1148 use_lock = 0; 1149 if (c->c_iflags & CALLOUT_DIRECT) { 1150 direct = 1; 1151 } else { 1152 direct = 0; 1153 } 1154 sq_locked = 0; 1155 old_cc = NULL; 1156again: 1157 cc = callout_lock(c); 1158 1159 if ((c->c_iflags & (CALLOUT_DFRMIGRATION | CALLOUT_PENDING)) == 1160 (CALLOUT_DFRMIGRATION | CALLOUT_PENDING) && 1161 ((c->c_flags & CALLOUT_ACTIVE) == CALLOUT_ACTIVE)) { 1162 /* 1163 * Special case where this slipped in while we 1164 * were migrating *as* the callout is about to 1165 * execute. The caller probably holds the lock 1166 * the callout wants. 1167 * 1168 * Get rid of the migration first. Then set 1169 * the flag that tells this code *not* to 1170 * try to remove it from any lists (its not 1171 * on one yet). When the callout wheel runs, 1172 * it will ignore this callout. 1173 */ 1174 c->c_iflags &= ~CALLOUT_PENDING; 1175 c->c_flags &= ~CALLOUT_ACTIVE; 1176 not_on_a_list = 1; 1177 } else { 1178 not_on_a_list = 0; 1179 } 1180 1181 /* 1182 * If the callout was migrating while the callout cpu lock was 1183 * dropped, just drop the sleepqueue lock and check the states 1184 * again. 1185 */ 1186 if (sq_locked != 0 && cc != old_cc) { 1187#ifdef SMP 1188 CC_UNLOCK(cc); 1189 sleepq_release(&cc_exec_waiting(old_cc, direct)); 1190 sq_locked = 0; 1191 old_cc = NULL; 1192 goto again; 1193#else 1194 panic("migration should not happen"); 1195#endif 1196 } 1197 1198 /* 1199 * If the callout isn't pending, it's not on the queue, so 1200 * don't attempt to remove it from the queue. We can try to 1201 * stop it by other means however. 1202 */ 1203 if (!(c->c_iflags & CALLOUT_PENDING)) { 1204 c->c_flags &= ~CALLOUT_ACTIVE; 1205 1206 /* 1207 * If it wasn't on the queue and it isn't the current 1208 * callout, then we can't stop it, so just bail. 1209 */ 1210 if (cc_exec_curr(cc, direct) != c) { 1211 CTR3(KTR_CALLOUT, "failed to stop %p func %p arg %p", 1212 c, c->c_func, c->c_arg); 1213 CC_UNLOCK(cc); 1214 if (sq_locked) 1215 sleepq_release(&cc_exec_waiting(cc, direct)); 1216 return (0); 1217 } 1218 1219 if ((flags & CS_DRAIN) != 0) { 1220 /* 1221 * The current callout is running (or just 1222 * about to run) and blocking is allowed, so 1223 * just wait for the current invocation to 1224 * finish. 1225 */ 1226 while (cc_exec_curr(cc, direct) == c) { 1227 /* 1228 * Use direct calls to sleepqueue interface 1229 * instead of cv/msleep in order to avoid 1230 * a LOR between cc_lock and sleepqueue 1231 * chain spinlocks. This piece of code 1232 * emulates a msleep_spin() call actually. 1233 * 1234 * If we already have the sleepqueue chain 1235 * locked, then we can safely block. If we 1236 * don't already have it locked, however, 1237 * we have to drop the cc_lock to lock 1238 * it. This opens several races, so we 1239 * restart at the beginning once we have 1240 * both locks. If nothing has changed, then 1241 * we will end up back here with sq_locked 1242 * set. 1243 */ 1244 if (!sq_locked) { 1245 CC_UNLOCK(cc); 1246 sleepq_lock( 1247 &cc_exec_waiting(cc, direct)); 1248 sq_locked = 1; 1249 old_cc = cc; 1250 goto again; 1251 } 1252 1253 /* 1254 * Migration could be cancelled here, but 1255 * as long as it is still not sure when it 1256 * will be packed up, just let softclock() 1257 * take care of it. 1258 */ 1259 cc_exec_waiting(cc, direct) = true; 1260 DROP_GIANT(); 1261 CC_UNLOCK(cc); 1262 sleepq_add( 1263 &cc_exec_waiting(cc, direct), 1264 &cc->cc_lock.lock_object, "codrain", 1265 SLEEPQ_SLEEP, 0); 1266 sleepq_wait( 1267 &cc_exec_waiting(cc, direct), 1268 0); 1269 sq_locked = 0; 1270 old_cc = NULL; 1271 1272 /* Reacquire locks previously released. */ 1273 PICKUP_GIANT(); 1274 CC_LOCK(cc); 1275 } 1276 } else if (use_lock && 1277 !cc_exec_cancel(cc, direct)) { 1278 1279 /* 1280 * The current callout is waiting for its 1281 * lock which we hold. Cancel the callout 1282 * and return. After our caller drops the 1283 * lock, the callout will be skipped in 1284 * softclock(). 1285 */ 1286 cc_exec_cancel(cc, direct) = true; 1287 CTR3(KTR_CALLOUT, "cancelled %p func %p arg %p", 1288 c, c->c_func, c->c_arg); 1289 KASSERT(!cc_cce_migrating(cc, direct), 1290 ("callout wrongly scheduled for migration")); 1291 if (callout_migrating(c)) { 1292 c->c_iflags &= ~CALLOUT_DFRMIGRATION; 1293#ifdef SMP 1294 cc_migration_cpu(cc, direct) = CPUBLOCK; 1295 cc_migration_time(cc, direct) = 0; 1296 cc_migration_prec(cc, direct) = 0; 1297 cc_migration_func(cc, direct) = NULL; 1298 cc_migration_arg(cc, direct) = NULL; 1299#endif 1300 } 1301 CC_UNLOCK(cc); 1302 KASSERT(!sq_locked, ("sleepqueue chain locked")); 1303 return (1); 1304 } else if (callout_migrating(c)) { 1305 /* 1306 * The callout is currently being serviced 1307 * and the "next" callout is scheduled at 1308 * its completion with a migration. We remove 1309 * the migration flag so it *won't* get rescheduled, 1310 * but we can't stop the one thats running so 1311 * we return 0. 1312 */ 1313 c->c_iflags &= ~CALLOUT_DFRMIGRATION; 1314#ifdef SMP 1315 /* 1316 * We can't call cc_cce_cleanup here since 1317 * if we do it will remove .ce_curr and 1318 * its still running. This will prevent a 1319 * reschedule of the callout when the 1320 * execution completes. 1321 */ 1322 cc_migration_cpu(cc, direct) = CPUBLOCK; 1323 cc_migration_time(cc, direct) = 0; 1324 cc_migration_prec(cc, direct) = 0; 1325 cc_migration_func(cc, direct) = NULL; 1326 cc_migration_arg(cc, direct) = NULL; 1327#endif 1328 CTR3(KTR_CALLOUT, "postponing stop %p func %p arg %p", 1329 c, c->c_func, c->c_arg); 1330 CC_UNLOCK(cc); 1331 return ((flags & CS_MIGRBLOCK) != 0); 1332 } 1333 CTR3(KTR_CALLOUT, "failed to stop %p func %p arg %p", 1334 c, c->c_func, c->c_arg); 1335 CC_UNLOCK(cc); 1336 KASSERT(!sq_locked, ("sleepqueue chain still locked")); 1337 return (0); 1338 } 1339 if (sq_locked) 1340 sleepq_release(&cc_exec_waiting(cc, direct)); 1341 1342 c->c_iflags &= ~CALLOUT_PENDING; 1343 c->c_flags &= ~CALLOUT_ACTIVE; 1344 1345 CTR3(KTR_CALLOUT, "cancelled %p func %p arg %p", 1346 c, c->c_func, c->c_arg); 1347 if (not_on_a_list == 0) { 1348 if ((c->c_iflags & CALLOUT_PROCESSED) == 0) { 1349 if (cc_exec_next(cc) == c) 1350 cc_exec_next(cc) = LIST_NEXT(c, c_links.le); 1351 LIST_REMOVE(c, c_links.le); 1352 } else { 1353 TAILQ_REMOVE(&cc->cc_expireq, c, c_links.tqe); 1354 } 1355 } 1356 callout_cc_del(c, cc); 1357 CC_UNLOCK(cc); 1358 return (1); 1359} 1360 1361void 1362callout_init(c, mpsafe) 1363 struct callout *c; 1364 int mpsafe; 1365{ 1366 bzero(c, sizeof *c); 1367 if (mpsafe) { 1368 c->c_lock = NULL; 1369 c->c_iflags = CALLOUT_RETURNUNLOCKED; 1370 } else { 1371 c->c_lock = &Giant.lock_object; 1372 c->c_iflags = 0; 1373 } 1374 c->c_cpu = timeout_cpu; 1375} 1376 1377void 1378_callout_init_lock(c, lock, flags) 1379 struct callout *c; 1380 struct lock_object *lock; 1381 int flags; 1382{ 1383 bzero(c, sizeof *c); 1384 c->c_lock = lock; 1385 KASSERT((flags & ~(CALLOUT_RETURNUNLOCKED | CALLOUT_SHAREDLOCK)) == 0, 1386 ("callout_init_lock: bad flags %d", flags)); 1387 KASSERT(lock != NULL || (flags & CALLOUT_RETURNUNLOCKED) == 0, 1388 ("callout_init_lock: CALLOUT_RETURNUNLOCKED with no lock")); 1389 KASSERT(lock == NULL || !(LOCK_CLASS(lock)->lc_flags & 1390 (LC_SPINLOCK | LC_SLEEPABLE)), ("%s: invalid lock class", 1391 __func__)); 1392 c->c_iflags = flags & (CALLOUT_RETURNUNLOCKED | CALLOUT_SHAREDLOCK); 1393 c->c_cpu = timeout_cpu; 1394} 1395 1396#ifdef APM_FIXUP_CALLTODO 1397/* 1398 * Adjust the kernel calltodo timeout list. This routine is used after 1399 * an APM resume to recalculate the calltodo timer list values with the 1400 * number of hz's we have been sleeping. The next hardclock() will detect 1401 * that there are fired timers and run softclock() to execute them. 1402 * 1403 * Please note, I have not done an exhaustive analysis of what code this 1404 * might break. I am motivated to have my select()'s and alarm()'s that 1405 * have expired during suspend firing upon resume so that the applications 1406 * which set the timer can do the maintanence the timer was for as close 1407 * as possible to the originally intended time. Testing this code for a 1408 * week showed that resuming from a suspend resulted in 22 to 25 timers 1409 * firing, which seemed independent on whether the suspend was 2 hours or 1410 * 2 days. Your milage may vary. - Ken Key <key@cs.utk.edu> 1411 */ 1412void 1413adjust_timeout_calltodo(time_change) 1414 struct timeval *time_change; 1415{ 1416 register struct callout *p; 1417 unsigned long delta_ticks; 1418 1419 /* 1420 * How many ticks were we asleep? 1421 * (stolen from tvtohz()). 1422 */ 1423 1424 /* Don't do anything */ 1425 if (time_change->tv_sec < 0) 1426 return; 1427 else if (time_change->tv_sec <= LONG_MAX / 1000000) 1428 delta_ticks = (time_change->tv_sec * 1000000 + 1429 time_change->tv_usec + (tick - 1)) / tick + 1; 1430 else if (time_change->tv_sec <= LONG_MAX / hz) 1431 delta_ticks = time_change->tv_sec * hz + 1432 (time_change->tv_usec + (tick - 1)) / tick + 1; 1433 else 1434 delta_ticks = LONG_MAX; 1435 1436 if (delta_ticks > INT_MAX) 1437 delta_ticks = INT_MAX; 1438 1439 /* 1440 * Now rip through the timer calltodo list looking for timers 1441 * to expire. 1442 */ 1443 1444 /* don't collide with softclock() */ 1445 CC_LOCK(cc); 1446 for (p = calltodo.c_next; p != NULL; p = p->c_next) { 1447 p->c_time -= delta_ticks; 1448 1449 /* Break if the timer had more time on it than delta_ticks */ 1450 if (p->c_time > 0) 1451 break; 1452 1453 /* take back the ticks the timer didn't use (p->c_time <= 0) */ 1454 delta_ticks = -p->c_time; 1455 } 1456 CC_UNLOCK(cc); 1457 1458 return; 1459} 1460#endif /* APM_FIXUP_CALLTODO */ 1461 1462static int 1463flssbt(sbintime_t sbt) 1464{ 1465 1466 sbt += (uint64_t)sbt >> 1; 1467 if (sizeof(long) >= sizeof(sbintime_t)) 1468 return (flsl(sbt)); 1469 if (sbt >= SBT_1S) 1470 return (flsl(((uint64_t)sbt) >> 32) + 32); 1471 return (flsl(sbt)); 1472} 1473 1474/* 1475 * Dump immediate statistic snapshot of the scheduled callouts. 1476 */ 1477static int 1478sysctl_kern_callout_stat(SYSCTL_HANDLER_ARGS) 1479{ 1480 struct callout *tmp; 1481 struct callout_cpu *cc; 1482 struct callout_list *sc; 1483 sbintime_t maxpr, maxt, medpr, medt, now, spr, st, t; 1484 int ct[64], cpr[64], ccpbk[32]; 1485 int error, val, i, count, tcum, pcum, maxc, c, medc; 1486#ifdef SMP 1487 int cpu; 1488#endif 1489 1490 val = 0; 1491 error = sysctl_handle_int(oidp, &val, 0, req); 1492 if (error != 0 || req->newptr == NULL) 1493 return (error); 1494 count = maxc = 0; 1495 st = spr = maxt = maxpr = 0; 1496 bzero(ccpbk, sizeof(ccpbk)); 1497 bzero(ct, sizeof(ct)); 1498 bzero(cpr, sizeof(cpr)); 1499 now = sbinuptime(); 1500#ifdef SMP 1501 CPU_FOREACH(cpu) { 1502 cc = CC_CPU(cpu); 1503#else 1504 cc = CC_CPU(timeout_cpu); 1505#endif 1506 CC_LOCK(cc); 1507 for (i = 0; i < callwheelsize; i++) { 1508 sc = &cc->cc_callwheel[i]; 1509 c = 0; 1510 LIST_FOREACH(tmp, sc, c_links.le) { 1511 c++; 1512 t = tmp->c_time - now; 1513 if (t < 0) 1514 t = 0; 1515 st += t / SBT_1US; 1516 spr += tmp->c_precision / SBT_1US; 1517 if (t > maxt) 1518 maxt = t; 1519 if (tmp->c_precision > maxpr) 1520 maxpr = tmp->c_precision; 1521 ct[flssbt(t)]++; 1522 cpr[flssbt(tmp->c_precision)]++; 1523 } 1524 if (c > maxc) 1525 maxc = c; 1526 ccpbk[fls(c + c / 2)]++; 1527 count += c; 1528 } 1529 CC_UNLOCK(cc); 1530#ifdef SMP 1531 } 1532#endif 1533 1534 for (i = 0, tcum = 0; i < 64 && tcum < count / 2; i++) 1535 tcum += ct[i]; 1536 medt = (i >= 2) ? (((sbintime_t)1) << (i - 2)) : 0; 1537 for (i = 0, pcum = 0; i < 64 && pcum < count / 2; i++) 1538 pcum += cpr[i]; 1539 medpr = (i >= 2) ? (((sbintime_t)1) << (i - 2)) : 0; 1540 for (i = 0, c = 0; i < 32 && c < count / 2; i++) 1541 c += ccpbk[i]; 1542 medc = (i >= 2) ? (1 << (i - 2)) : 0; 1543 1544 printf("Scheduled callouts statistic snapshot:\n"); 1545 printf(" Callouts: %6d Buckets: %6d*%-3d Bucket size: 0.%06ds\n", 1546 count, callwheelsize, mp_ncpus, 1000000 >> CC_HASH_SHIFT); 1547 printf(" C/Bk: med %5d avg %6d.%06jd max %6d\n", 1548 medc, 1549 count / callwheelsize / mp_ncpus, 1550 (uint64_t)count * 1000000 / callwheelsize / mp_ncpus % 1000000, 1551 maxc); 1552 printf(" Time: med %5jd.%06jds avg %6jd.%06jds max %6jd.%06jds\n", 1553 medt / SBT_1S, (medt & 0xffffffff) * 1000000 >> 32, 1554 (st / count) / 1000000, (st / count) % 1000000, 1555 maxt / SBT_1S, (maxt & 0xffffffff) * 1000000 >> 32); 1556 printf(" Prec: med %5jd.%06jds avg %6jd.%06jds max %6jd.%06jds\n", 1557 medpr / SBT_1S, (medpr & 0xffffffff) * 1000000 >> 32, 1558 (spr / count) / 1000000, (spr / count) % 1000000, 1559 maxpr / SBT_1S, (maxpr & 0xffffffff) * 1000000 >> 32); 1560 printf(" Distribution: \tbuckets\t time\t tcum\t" 1561 " prec\t pcum\n"); 1562 for (i = 0, tcum = pcum = 0; i < 64; i++) { 1563 if (ct[i] == 0 && cpr[i] == 0) 1564 continue; 1565 t = (i != 0) ? (((sbintime_t)1) << (i - 1)) : 0; 1566 tcum += ct[i]; 1567 pcum += cpr[i]; 1568 printf(" %10jd.%06jds\t 2**%d\t%7d\t%7d\t%7d\t%7d\n", 1569 t / SBT_1S, (t & 0xffffffff) * 1000000 >> 32, 1570 i - 1 - (32 - CC_HASH_SHIFT), 1571 ct[i], tcum, cpr[i], pcum); 1572 } 1573 return (error); 1574} 1575SYSCTL_PROC(_kern, OID_AUTO, callout_stat, 1576 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, 1577 0, 0, sysctl_kern_callout_stat, "I", 1578 "Dump immediate statistic snapshot of the scheduled callouts"); 1579