kern_timeout.c revision 234952
1/*- 2 * Copyright (c) 1982, 1986, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 4. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * From: @(#)kern_clock.c 8.5 (Berkeley) 1/21/94 35 */ 36 37#include <sys/cdefs.h> 38__FBSDID("$FreeBSD: head/sys/kern/kern_timeout.c 234952 2012-05-03 10:38:02Z kib $"); 39 40#include "opt_kdtrace.h" 41 42#include <sys/param.h> 43#include <sys/systm.h> 44#include <sys/bus.h> 45#include <sys/callout.h> 46#include <sys/condvar.h> 47#include <sys/interrupt.h> 48#include <sys/kernel.h> 49#include <sys/ktr.h> 50#include <sys/lock.h> 51#include <sys/malloc.h> 52#include <sys/mutex.h> 53#include <sys/proc.h> 54#include <sys/sdt.h> 55#include <sys/sleepqueue.h> 56#include <sys/sysctl.h> 57#include <sys/smp.h> 58 59#ifdef SMP 60#include <machine/cpu.h> 61#endif 62 63SDT_PROVIDER_DEFINE(callout_execute); 64SDT_PROBE_DEFINE(callout_execute, kernel, , callout_start, callout-start); 65SDT_PROBE_ARGTYPE(callout_execute, kernel, , callout_start, 0, 66 "struct callout *"); 67SDT_PROBE_DEFINE(callout_execute, kernel, , callout_end, callout-end); 68SDT_PROBE_ARGTYPE(callout_execute, kernel, , callout_end, 0, 69 "struct callout *"); 70 71static int avg_depth; 72SYSCTL_INT(_debug, OID_AUTO, to_avg_depth, CTLFLAG_RD, &avg_depth, 0, 73 "Average number of items examined per softclock call. Units = 1/1000"); 74static int avg_gcalls; 75SYSCTL_INT(_debug, OID_AUTO, to_avg_gcalls, CTLFLAG_RD, &avg_gcalls, 0, 76 "Average number of Giant callouts made per softclock call. Units = 1/1000"); 77static int avg_lockcalls; 78SYSCTL_INT(_debug, OID_AUTO, to_avg_lockcalls, CTLFLAG_RD, &avg_lockcalls, 0, 79 "Average number of lock callouts made per softclock call. Units = 1/1000"); 80static int avg_mpcalls; 81SYSCTL_INT(_debug, OID_AUTO, to_avg_mpcalls, CTLFLAG_RD, &avg_mpcalls, 0, 82 "Average number of MP callouts made per softclock call. Units = 1/1000"); 83/* 84 * TODO: 85 * allocate more timeout table slots when table overflows. 86 */ 87int callwheelsize, callwheelbits, callwheelmask; 88 89/* 90 * The callout cpu migration entity represents informations necessary for 91 * describing the migrating callout to the new callout cpu. 92 * The cached informations are very important for deferring migration when 93 * the migrating callout is already running. 94 */ 95struct cc_mig_ent { 96#ifdef SMP 97 void (*ce_migration_func)(void *); 98 void *ce_migration_arg; 99 int ce_migration_cpu; 100 int ce_migration_ticks; 101#endif 102}; 103 104/* 105 * There is one struct callout_cpu per cpu, holding all relevant 106 * state for the callout processing thread on the individual CPU. 107 * In particular: 108 * cc_ticks is incremented once per tick in callout_cpu(). 109 * It tracks the global 'ticks' but in a way that the individual 110 * threads should not worry about races in the order in which 111 * hardclock() and hardclock_cpu() run on the various CPUs. 112 * cc_softclock is advanced in callout_cpu() to point to the 113 * first entry in cc_callwheel that may need handling. In turn, 114 * a softclock() is scheduled so it can serve the various entries i 115 * such that cc_softclock <= i <= cc_ticks . 116 * XXX maybe cc_softclock and cc_ticks should be volatile ? 117 * 118 * cc_ticks is also used in callout_reset_cpu() to determine 119 * when the callout should be served. 120 */ 121struct callout_cpu { 122 struct cc_mig_ent cc_migrating_entity; 123 struct mtx cc_lock; 124 struct callout *cc_callout; 125 struct callout_tailq *cc_callwheel; 126 struct callout_list cc_callfree; 127 struct callout *cc_next; 128 struct callout *cc_curr; 129 void *cc_cookie; 130 int cc_ticks; 131 int cc_softticks; 132 int cc_cancel; 133 int cc_waiting; 134 int cc_firsttick; 135}; 136 137#ifdef SMP 138#define cc_migration_func cc_migrating_entity.ce_migration_func 139#define cc_migration_arg cc_migrating_entity.ce_migration_arg 140#define cc_migration_cpu cc_migrating_entity.ce_migration_cpu 141#define cc_migration_ticks cc_migrating_entity.ce_migration_ticks 142 143struct callout_cpu cc_cpu[MAXCPU]; 144#define CPUBLOCK MAXCPU 145#define CC_CPU(cpu) (&cc_cpu[(cpu)]) 146#define CC_SELF() CC_CPU(PCPU_GET(cpuid)) 147#else 148struct callout_cpu cc_cpu; 149#define CC_CPU(cpu) &cc_cpu 150#define CC_SELF() &cc_cpu 151#endif 152#define CC_LOCK(cc) mtx_lock_spin(&(cc)->cc_lock) 153#define CC_UNLOCK(cc) mtx_unlock_spin(&(cc)->cc_lock) 154#define CC_LOCK_ASSERT(cc) mtx_assert(&(cc)->cc_lock, MA_OWNED) 155 156static int timeout_cpu; 157void (*callout_new_inserted)(int cpu, int ticks) = NULL; 158 159static MALLOC_DEFINE(M_CALLOUT, "callout", "Callout datastructures"); 160 161/** 162 * Locked by cc_lock: 163 * cc_curr - If a callout is in progress, it is curr_callout. 164 * If curr_callout is non-NULL, threads waiting in 165 * callout_drain() will be woken up as soon as the 166 * relevant callout completes. 167 * cc_cancel - Changing to 1 with both callout_lock and c_lock held 168 * guarantees that the current callout will not run. 169 * The softclock() function sets this to 0 before it 170 * drops callout_lock to acquire c_lock, and it calls 171 * the handler only if curr_cancelled is still 0 after 172 * c_lock is successfully acquired. 173 * cc_waiting - If a thread is waiting in callout_drain(), then 174 * callout_wait is nonzero. Set only when 175 * curr_callout is non-NULL. 176 */ 177 178/* 179 * Resets the migration entity tied to a specific callout cpu. 180 */ 181static void 182cc_cme_cleanup(struct callout_cpu *cc) 183{ 184 185#ifdef SMP 186 cc->cc_migration_cpu = CPUBLOCK; 187 cc->cc_migration_ticks = 0; 188 cc->cc_migration_func = NULL; 189 cc->cc_migration_arg = NULL; 190#endif 191} 192 193/* 194 * Checks if migration is requested by a specific callout cpu. 195 */ 196static int 197cc_cme_migrating(struct callout_cpu *cc) 198{ 199 200#ifdef SMP 201 return (cc->cc_migration_cpu != CPUBLOCK); 202#else 203 return (0); 204#endif 205} 206 207/* 208 * kern_timeout_callwheel_alloc() - kernel low level callwheel initialization 209 * 210 * This code is called very early in the kernel initialization sequence, 211 * and may be called more then once. 212 */ 213caddr_t 214kern_timeout_callwheel_alloc(caddr_t v) 215{ 216 struct callout_cpu *cc; 217 218 timeout_cpu = PCPU_GET(cpuid); 219 cc = CC_CPU(timeout_cpu); 220 /* 221 * Calculate callout wheel size 222 */ 223 for (callwheelsize = 1, callwheelbits = 0; 224 callwheelsize < ncallout; 225 callwheelsize <<= 1, ++callwheelbits) 226 ; 227 callwheelmask = callwheelsize - 1; 228 229 cc->cc_callout = (struct callout *)v; 230 v = (caddr_t)(cc->cc_callout + ncallout); 231 cc->cc_callwheel = (struct callout_tailq *)v; 232 v = (caddr_t)(cc->cc_callwheel + callwheelsize); 233 return(v); 234} 235 236static void 237callout_cpu_init(struct callout_cpu *cc) 238{ 239 struct callout *c; 240 int i; 241 242 mtx_init(&cc->cc_lock, "callout", NULL, MTX_SPIN | MTX_RECURSE); 243 SLIST_INIT(&cc->cc_callfree); 244 for (i = 0; i < callwheelsize; i++) { 245 TAILQ_INIT(&cc->cc_callwheel[i]); 246 } 247 cc_cme_cleanup(cc); 248 if (cc->cc_callout == NULL) 249 return; 250 for (i = 0; i < ncallout; i++) { 251 c = &cc->cc_callout[i]; 252 callout_init(c, 0); 253 c->c_flags = CALLOUT_LOCAL_ALLOC; 254 SLIST_INSERT_HEAD(&cc->cc_callfree, c, c_links.sle); 255 } 256} 257 258#ifdef SMP 259/* 260 * Switches the cpu tied to a specific callout. 261 * The function expects a locked incoming callout cpu and returns with 262 * locked outcoming callout cpu. 263 */ 264static struct callout_cpu * 265callout_cpu_switch(struct callout *c, struct callout_cpu *cc, int new_cpu) 266{ 267 struct callout_cpu *new_cc; 268 269 MPASS(c != NULL && cc != NULL); 270 CC_LOCK_ASSERT(cc); 271 272 /* 273 * Avoid interrupts and preemption firing after the callout cpu 274 * is blocked in order to avoid deadlocks as the new thread 275 * may be willing to acquire the callout cpu lock. 276 */ 277 c->c_cpu = CPUBLOCK; 278 spinlock_enter(); 279 CC_UNLOCK(cc); 280 new_cc = CC_CPU(new_cpu); 281 CC_LOCK(new_cc); 282 spinlock_exit(); 283 c->c_cpu = new_cpu; 284 return (new_cc); 285} 286#endif 287 288/* 289 * kern_timeout_callwheel_init() - initialize previously reserved callwheel 290 * space. 291 * 292 * This code is called just once, after the space reserved for the 293 * callout wheel has been finalized. 294 */ 295void 296kern_timeout_callwheel_init(void) 297{ 298 callout_cpu_init(CC_CPU(timeout_cpu)); 299} 300 301/* 302 * Start standard softclock thread. 303 */ 304static void 305start_softclock(void *dummy) 306{ 307 struct callout_cpu *cc; 308#ifdef SMP 309 int cpu; 310#endif 311 312 cc = CC_CPU(timeout_cpu); 313 if (swi_add(&clk_intr_event, "clock", softclock, cc, SWI_CLOCK, 314 INTR_MPSAFE, &cc->cc_cookie)) 315 panic("died while creating standard software ithreads"); 316#ifdef SMP 317 CPU_FOREACH(cpu) { 318 if (cpu == timeout_cpu) 319 continue; 320 cc = CC_CPU(cpu); 321 if (swi_add(NULL, "clock", softclock, cc, SWI_CLOCK, 322 INTR_MPSAFE, &cc->cc_cookie)) 323 panic("died while creating standard software ithreads"); 324 cc->cc_callout = NULL; /* Only cpu0 handles timeout(). */ 325 cc->cc_callwheel = malloc( 326 sizeof(struct callout_tailq) * callwheelsize, M_CALLOUT, 327 M_WAITOK); 328 callout_cpu_init(cc); 329 } 330#endif 331} 332 333SYSINIT(start_softclock, SI_SUB_SOFTINTR, SI_ORDER_FIRST, start_softclock, NULL); 334 335void 336callout_tick(void) 337{ 338 struct callout_cpu *cc; 339 int need_softclock; 340 int bucket; 341 342 /* 343 * Process callouts at a very low cpu priority, so we don't keep the 344 * relatively high clock interrupt priority any longer than necessary. 345 */ 346 need_softclock = 0; 347 cc = CC_SELF(); 348 mtx_lock_spin_flags(&cc->cc_lock, MTX_QUIET); 349 cc->cc_firsttick = cc->cc_ticks = ticks; 350 for (; (cc->cc_softticks - cc->cc_ticks) <= 0; cc->cc_softticks++) { 351 bucket = cc->cc_softticks & callwheelmask; 352 if (!TAILQ_EMPTY(&cc->cc_callwheel[bucket])) { 353 need_softclock = 1; 354 break; 355 } 356 } 357 mtx_unlock_spin_flags(&cc->cc_lock, MTX_QUIET); 358 /* 359 * swi_sched acquires the thread lock, so we don't want to call it 360 * with cc_lock held; incorrect locking order. 361 */ 362 if (need_softclock) 363 swi_sched(cc->cc_cookie, 0); 364} 365 366int 367callout_tickstofirst(int limit) 368{ 369 struct callout_cpu *cc; 370 struct callout *c; 371 struct callout_tailq *sc; 372 int curticks; 373 int skip = 1; 374 375 cc = CC_SELF(); 376 mtx_lock_spin_flags(&cc->cc_lock, MTX_QUIET); 377 curticks = cc->cc_ticks; 378 while( skip < ncallout && skip < limit ) { 379 sc = &cc->cc_callwheel[ (curticks+skip) & callwheelmask ]; 380 /* search scanning ticks */ 381 TAILQ_FOREACH( c, sc, c_links.tqe ){ 382 if (c->c_time - curticks <= ncallout) 383 goto out; 384 } 385 skip++; 386 } 387out: 388 cc->cc_firsttick = curticks + skip; 389 mtx_unlock_spin_flags(&cc->cc_lock, MTX_QUIET); 390 return (skip); 391} 392 393static struct callout_cpu * 394callout_lock(struct callout *c) 395{ 396 struct callout_cpu *cc; 397 int cpu; 398 399 for (;;) { 400 cpu = c->c_cpu; 401#ifdef SMP 402 if (cpu == CPUBLOCK) { 403 while (c->c_cpu == CPUBLOCK) 404 cpu_spinwait(); 405 continue; 406 } 407#endif 408 cc = CC_CPU(cpu); 409 CC_LOCK(cc); 410 if (cpu == c->c_cpu) 411 break; 412 CC_UNLOCK(cc); 413 } 414 return (cc); 415} 416 417static void 418callout_cc_add(struct callout *c, struct callout_cpu *cc, int to_ticks, 419 void (*func)(void *), void *arg, int cpu) 420{ 421 422 CC_LOCK_ASSERT(cc); 423 424 if (to_ticks <= 0) 425 to_ticks = 1; 426 c->c_arg = arg; 427 c->c_flags |= (CALLOUT_ACTIVE | CALLOUT_PENDING); 428 c->c_func = func; 429 c->c_time = ticks + to_ticks; 430 TAILQ_INSERT_TAIL(&cc->cc_callwheel[c->c_time & callwheelmask], 431 c, c_links.tqe); 432 if ((c->c_time - cc->cc_firsttick) < 0 && 433 callout_new_inserted != NULL) { 434 cc->cc_firsttick = c->c_time; 435 (*callout_new_inserted)(cpu, 436 to_ticks + (ticks - cc->cc_ticks)); 437 } 438} 439 440/* 441 * The callout mechanism is based on the work of Adam M. Costello and 442 * George Varghese, published in a technical report entitled "Redesigning 443 * the BSD Callout and Timer Facilities" and modified slightly for inclusion 444 * in FreeBSD by Justin T. Gibbs. The original work on the data structures 445 * used in this implementation was published by G. Varghese and T. Lauck in 446 * the paper "Hashed and Hierarchical Timing Wheels: Data Structures for 447 * the Efficient Implementation of a Timer Facility" in the Proceedings of 448 * the 11th ACM Annual Symposium on Operating Systems Principles, 449 * Austin, Texas Nov 1987. 450 */ 451 452/* 453 * Software (low priority) clock interrupt. 454 * Run periodic events from timeout queue. 455 */ 456void 457softclock(void *arg) 458{ 459 struct callout_cpu *cc; 460 struct callout *c; 461 struct callout_tailq *bucket; 462 int curticks; 463 int steps; /* #steps since we last allowed interrupts */ 464 int depth; 465 int mpcalls; 466 int lockcalls; 467 int gcalls; 468#ifdef DIAGNOSTIC 469 struct bintime bt1, bt2; 470 struct timespec ts2; 471 static uint64_t maxdt = 36893488147419102LL; /* 2 msec */ 472 static timeout_t *lastfunc; 473#endif 474 475#ifndef MAX_SOFTCLOCK_STEPS 476#define MAX_SOFTCLOCK_STEPS 100 /* Maximum allowed value of steps. */ 477#endif /* MAX_SOFTCLOCK_STEPS */ 478 479 mpcalls = 0; 480 lockcalls = 0; 481 gcalls = 0; 482 depth = 0; 483 steps = 0; 484 cc = (struct callout_cpu *)arg; 485 CC_LOCK(cc); 486 while (cc->cc_softticks - 1 != cc->cc_ticks) { 487 /* 488 * cc_softticks may be modified by hard clock, so cache 489 * it while we work on a given bucket. 490 */ 491 curticks = cc->cc_softticks; 492 cc->cc_softticks++; 493 bucket = &cc->cc_callwheel[curticks & callwheelmask]; 494 c = TAILQ_FIRST(bucket); 495 while (c) { 496 depth++; 497 if (c->c_time != curticks) { 498 c = TAILQ_NEXT(c, c_links.tqe); 499 ++steps; 500 if (steps >= MAX_SOFTCLOCK_STEPS) { 501 cc->cc_next = c; 502 /* Give interrupts a chance. */ 503 CC_UNLOCK(cc); 504 ; /* nothing */ 505 CC_LOCK(cc); 506 c = cc->cc_next; 507 steps = 0; 508 } 509 } else { 510 void (*c_func)(void *); 511 void *c_arg; 512 struct lock_class *class; 513 struct lock_object *c_lock; 514 int c_flags, sharedlock; 515 516 cc->cc_next = TAILQ_NEXT(c, c_links.tqe); 517 TAILQ_REMOVE(bucket, c, c_links.tqe); 518 class = (c->c_lock != NULL) ? 519 LOCK_CLASS(c->c_lock) : NULL; 520 sharedlock = (c->c_flags & CALLOUT_SHAREDLOCK) ? 521 0 : 1; 522 c_lock = c->c_lock; 523 c_func = c->c_func; 524 c_arg = c->c_arg; 525 c_flags = c->c_flags; 526 if (c->c_flags & CALLOUT_LOCAL_ALLOC) { 527 c->c_flags = CALLOUT_LOCAL_ALLOC; 528 } else { 529 c->c_flags = 530 (c->c_flags & ~CALLOUT_PENDING); 531 } 532 cc->cc_curr = c; 533 cc->cc_cancel = 0; 534 CC_UNLOCK(cc); 535 if (c_lock != NULL) { 536 class->lc_lock(c_lock, sharedlock); 537 /* 538 * The callout may have been cancelled 539 * while we switched locks. 540 */ 541 if (cc->cc_cancel) { 542 class->lc_unlock(c_lock); 543 goto skip; 544 } 545 /* The callout cannot be stopped now. */ 546 cc->cc_cancel = 1; 547 548 if (c_lock == &Giant.lock_object) { 549 gcalls++; 550 CTR3(KTR_CALLOUT, 551 "callout %p func %p arg %p", 552 c, c_func, c_arg); 553 } else { 554 lockcalls++; 555 CTR3(KTR_CALLOUT, "callout lock" 556 " %p func %p arg %p", 557 c, c_func, c_arg); 558 } 559 } else { 560 mpcalls++; 561 CTR3(KTR_CALLOUT, 562 "callout mpsafe %p func %p arg %p", 563 c, c_func, c_arg); 564 } 565#ifdef DIAGNOSTIC 566 binuptime(&bt1); 567#endif 568 THREAD_NO_SLEEPING(); 569 SDT_PROBE(callout_execute, kernel, , 570 callout_start, c, 0, 0, 0, 0); 571 c_func(c_arg); 572 SDT_PROBE(callout_execute, kernel, , 573 callout_end, c, 0, 0, 0, 0); 574 THREAD_SLEEPING_OK(); 575#ifdef DIAGNOSTIC 576 binuptime(&bt2); 577 bintime_sub(&bt2, &bt1); 578 if (bt2.frac > maxdt) { 579 if (lastfunc != c_func || 580 bt2.frac > maxdt * 2) { 581 bintime2timespec(&bt2, &ts2); 582 printf( 583 "Expensive timeout(9) function: %p(%p) %jd.%09ld s\n", 584 c_func, c_arg, 585 (intmax_t)ts2.tv_sec, 586 ts2.tv_nsec); 587 } 588 maxdt = bt2.frac; 589 lastfunc = c_func; 590 } 591#endif 592 CTR1(KTR_CALLOUT, "callout %p finished", c); 593 if ((c_flags & CALLOUT_RETURNUNLOCKED) == 0) 594 class->lc_unlock(c_lock); 595 skip: 596 CC_LOCK(cc); 597 /* 598 * If the current callout is locally 599 * allocated (from timeout(9)) 600 * then put it on the freelist. 601 * 602 * Note: we need to check the cached 603 * copy of c_flags because if it was not 604 * local, then it's not safe to deref the 605 * callout pointer. 606 */ 607 if (c_flags & CALLOUT_LOCAL_ALLOC) { 608 KASSERT(c->c_flags == 609 CALLOUT_LOCAL_ALLOC, 610 ("corrupted callout")); 611 c->c_func = NULL; 612 SLIST_INSERT_HEAD(&cc->cc_callfree, c, 613 c_links.sle); 614 } 615 cc->cc_curr = NULL; 616 if (cc->cc_waiting) { 617 618 /* 619 * There is someone waiting for the 620 * callout to complete. 621 * If the callout was scheduled for 622 * migration just cancel it. 623 */ 624 if (cc_cme_migrating(cc)) 625 cc_cme_cleanup(cc); 626 cc->cc_waiting = 0; 627 CC_UNLOCK(cc); 628 wakeup(&cc->cc_waiting); 629 CC_LOCK(cc); 630 } else if (cc_cme_migrating(cc)) { 631#ifdef SMP 632 struct callout_cpu *new_cc; 633 void (*new_func)(void *); 634 void *new_arg; 635 int new_cpu, new_ticks; 636 637 /* 638 * If the callout was scheduled for 639 * migration just perform it now. 640 */ 641 new_cpu = cc->cc_migration_cpu; 642 new_ticks = cc->cc_migration_ticks; 643 new_func = cc->cc_migration_func; 644 new_arg = cc->cc_migration_arg; 645 cc_cme_cleanup(cc); 646 647 /* 648 * Handle deferred callout stops 649 */ 650 if ((c->c_flags & CALLOUT_DFRMIGRATION) 651 == 0) { 652 CTR3(KTR_CALLOUT, 653 "deferred cancelled %p func %p arg %p", 654 c, new_func, new_arg); 655 if (cc->cc_next == c) { 656 cc->cc_next = 657 TAILQ_NEXT(c, 658 c_links.tqe); 659 } 660 if (c->c_flags & 661 CALLOUT_LOCAL_ALLOC) { 662 c->c_func = NULL; 663 SLIST_INSERT_HEAD( 664 &cc->cc_callfree, c, 665 c_links.sle); 666 } 667 goto nextc; 668 } else { 669 c->c_flags &= ~ 670 CALLOUT_DFRMIGRATION; 671 } 672 673 /* 674 * It should be assert here that the 675 * callout is not destroyed but that 676 * is not easy. 677 */ 678 new_cc = callout_cpu_switch(c, cc, 679 new_cpu); 680 callout_cc_add(c, new_cc, new_ticks, 681 new_func, new_arg, new_cpu); 682 CC_UNLOCK(new_cc); 683 CC_LOCK(cc); 684#else 685 panic("migration should not happen"); 686#endif 687 } 688#ifdef SMP 689nextc: 690#endif 691 steps = 0; 692 c = cc->cc_next; 693 } 694 } 695 } 696 avg_depth += (depth * 1000 - avg_depth) >> 8; 697 avg_mpcalls += (mpcalls * 1000 - avg_mpcalls) >> 8; 698 avg_lockcalls += (lockcalls * 1000 - avg_lockcalls) >> 8; 699 avg_gcalls += (gcalls * 1000 - avg_gcalls) >> 8; 700 cc->cc_next = NULL; 701 CC_UNLOCK(cc); 702} 703 704/* 705 * timeout -- 706 * Execute a function after a specified length of time. 707 * 708 * untimeout -- 709 * Cancel previous timeout function call. 710 * 711 * callout_handle_init -- 712 * Initialize a handle so that using it with untimeout is benign. 713 * 714 * See AT&T BCI Driver Reference Manual for specification. This 715 * implementation differs from that one in that although an 716 * identification value is returned from timeout, the original 717 * arguments to timeout as well as the identifier are used to 718 * identify entries for untimeout. 719 */ 720struct callout_handle 721timeout(ftn, arg, to_ticks) 722 timeout_t *ftn; 723 void *arg; 724 int to_ticks; 725{ 726 struct callout_cpu *cc; 727 struct callout *new; 728 struct callout_handle handle; 729 730 cc = CC_CPU(timeout_cpu); 731 CC_LOCK(cc); 732 /* Fill in the next free callout structure. */ 733 new = SLIST_FIRST(&cc->cc_callfree); 734 if (new == NULL) 735 /* XXX Attempt to malloc first */ 736 panic("timeout table full"); 737 SLIST_REMOVE_HEAD(&cc->cc_callfree, c_links.sle); 738 callout_reset(new, to_ticks, ftn, arg); 739 handle.callout = new; 740 CC_UNLOCK(cc); 741 742 return (handle); 743} 744 745void 746untimeout(ftn, arg, handle) 747 timeout_t *ftn; 748 void *arg; 749 struct callout_handle handle; 750{ 751 struct callout_cpu *cc; 752 753 /* 754 * Check for a handle that was initialized 755 * by callout_handle_init, but never used 756 * for a real timeout. 757 */ 758 if (handle.callout == NULL) 759 return; 760 761 cc = callout_lock(handle.callout); 762 if (handle.callout->c_func == ftn && handle.callout->c_arg == arg) 763 callout_stop(handle.callout); 764 CC_UNLOCK(cc); 765} 766 767void 768callout_handle_init(struct callout_handle *handle) 769{ 770 handle->callout = NULL; 771} 772 773/* 774 * New interface; clients allocate their own callout structures. 775 * 776 * callout_reset() - establish or change a timeout 777 * callout_stop() - disestablish a timeout 778 * callout_init() - initialize a callout structure so that it can 779 * safely be passed to callout_reset() and callout_stop() 780 * 781 * <sys/callout.h> defines three convenience macros: 782 * 783 * callout_active() - returns truth if callout has not been stopped, 784 * drained, or deactivated since the last time the callout was 785 * reset. 786 * callout_pending() - returns truth if callout is still waiting for timeout 787 * callout_deactivate() - marks the callout as having been serviced 788 */ 789int 790callout_reset_on(struct callout *c, int to_ticks, void (*ftn)(void *), 791 void *arg, int cpu) 792{ 793 struct callout_cpu *cc; 794 int cancelled = 0; 795 796 /* 797 * Don't allow migration of pre-allocated callouts lest they 798 * become unbalanced. 799 */ 800 if (c->c_flags & CALLOUT_LOCAL_ALLOC) 801 cpu = c->c_cpu; 802 cc = callout_lock(c); 803 if (cc->cc_curr == c) { 804 /* 805 * We're being asked to reschedule a callout which is 806 * currently in progress. If there is a lock then we 807 * can cancel the callout if it has not really started. 808 */ 809 if (c->c_lock != NULL && !cc->cc_cancel) 810 cancelled = cc->cc_cancel = 1; 811 if (cc->cc_waiting) { 812 /* 813 * Someone has called callout_drain to kill this 814 * callout. Don't reschedule. 815 */ 816 CTR4(KTR_CALLOUT, "%s %p func %p arg %p", 817 cancelled ? "cancelled" : "failed to cancel", 818 c, c->c_func, c->c_arg); 819 CC_UNLOCK(cc); 820 return (cancelled); 821 } 822 } 823 if (c->c_flags & CALLOUT_PENDING) { 824 if (cc->cc_next == c) { 825 cc->cc_next = TAILQ_NEXT(c, c_links.tqe); 826 } 827 TAILQ_REMOVE(&cc->cc_callwheel[c->c_time & callwheelmask], c, 828 c_links.tqe); 829 830 cancelled = 1; 831 c->c_flags &= ~(CALLOUT_ACTIVE | CALLOUT_PENDING); 832 } 833 834#ifdef SMP 835 /* 836 * If the callout must migrate try to perform it immediately. 837 * If the callout is currently running, just defer the migration 838 * to a more appropriate moment. 839 */ 840 if (c->c_cpu != cpu) { 841 if (cc->cc_curr == c) { 842 cc->cc_migration_cpu = cpu; 843 cc->cc_migration_ticks = to_ticks; 844 cc->cc_migration_func = ftn; 845 cc->cc_migration_arg = arg; 846 c->c_flags |= CALLOUT_DFRMIGRATION; 847 CTR5(KTR_CALLOUT, 848 "migration of %p func %p arg %p in %d to %u deferred", 849 c, c->c_func, c->c_arg, to_ticks, cpu); 850 CC_UNLOCK(cc); 851 return (cancelled); 852 } 853 cc = callout_cpu_switch(c, cc, cpu); 854 } 855#endif 856 857 callout_cc_add(c, cc, to_ticks, ftn, arg, cpu); 858 CTR5(KTR_CALLOUT, "%sscheduled %p func %p arg %p in %d", 859 cancelled ? "re" : "", c, c->c_func, c->c_arg, to_ticks); 860 CC_UNLOCK(cc); 861 862 return (cancelled); 863} 864 865/* 866 * Common idioms that can be optimized in the future. 867 */ 868int 869callout_schedule_on(struct callout *c, int to_ticks, int cpu) 870{ 871 return callout_reset_on(c, to_ticks, c->c_func, c->c_arg, cpu); 872} 873 874int 875callout_schedule(struct callout *c, int to_ticks) 876{ 877 return callout_reset_on(c, to_ticks, c->c_func, c->c_arg, c->c_cpu); 878} 879 880int 881_callout_stop_safe(c, safe) 882 struct callout *c; 883 int safe; 884{ 885 struct callout_cpu *cc, *old_cc; 886 struct lock_class *class; 887 int use_lock, sq_locked; 888 889 /* 890 * Some old subsystems don't hold Giant while running a callout_stop(), 891 * so just discard this check for the moment. 892 */ 893 if (!safe && c->c_lock != NULL) { 894 if (c->c_lock == &Giant.lock_object) 895 use_lock = mtx_owned(&Giant); 896 else { 897 use_lock = 1; 898 class = LOCK_CLASS(c->c_lock); 899 class->lc_assert(c->c_lock, LA_XLOCKED); 900 } 901 } else 902 use_lock = 0; 903 904 sq_locked = 0; 905 old_cc = NULL; 906again: 907 cc = callout_lock(c); 908 909 /* 910 * If the callout was migrating while the callout cpu lock was 911 * dropped, just drop the sleepqueue lock and check the states 912 * again. 913 */ 914 if (sq_locked != 0 && cc != old_cc) { 915#ifdef SMP 916 CC_UNLOCK(cc); 917 sleepq_release(&old_cc->cc_waiting); 918 sq_locked = 0; 919 old_cc = NULL; 920 goto again; 921#else 922 panic("migration should not happen"); 923#endif 924 } 925 926 /* 927 * If the callout isn't pending, it's not on the queue, so 928 * don't attempt to remove it from the queue. We can try to 929 * stop it by other means however. 930 */ 931 if (!(c->c_flags & CALLOUT_PENDING)) { 932 c->c_flags &= ~CALLOUT_ACTIVE; 933 934 /* 935 * If it wasn't on the queue and it isn't the current 936 * callout, then we can't stop it, so just bail. 937 */ 938 if (cc->cc_curr != c) { 939 CTR3(KTR_CALLOUT, "failed to stop %p func %p arg %p", 940 c, c->c_func, c->c_arg); 941 CC_UNLOCK(cc); 942 if (sq_locked) 943 sleepq_release(&cc->cc_waiting); 944 return (0); 945 } 946 947 if (safe) { 948 /* 949 * The current callout is running (or just 950 * about to run) and blocking is allowed, so 951 * just wait for the current invocation to 952 * finish. 953 */ 954 while (cc->cc_curr == c) { 955 956 /* 957 * Use direct calls to sleepqueue interface 958 * instead of cv/msleep in order to avoid 959 * a LOR between cc_lock and sleepqueue 960 * chain spinlocks. This piece of code 961 * emulates a msleep_spin() call actually. 962 * 963 * If we already have the sleepqueue chain 964 * locked, then we can safely block. If we 965 * don't already have it locked, however, 966 * we have to drop the cc_lock to lock 967 * it. This opens several races, so we 968 * restart at the beginning once we have 969 * both locks. If nothing has changed, then 970 * we will end up back here with sq_locked 971 * set. 972 */ 973 if (!sq_locked) { 974 CC_UNLOCK(cc); 975 sleepq_lock(&cc->cc_waiting); 976 sq_locked = 1; 977 old_cc = cc; 978 goto again; 979 } 980 981 /* 982 * Migration could be cancelled here, but 983 * as long as it is still not sure when it 984 * will be packed up, just let softclock() 985 * take care of it. 986 */ 987 cc->cc_waiting = 1; 988 DROP_GIANT(); 989 CC_UNLOCK(cc); 990 sleepq_add(&cc->cc_waiting, 991 &cc->cc_lock.lock_object, "codrain", 992 SLEEPQ_SLEEP, 0); 993 sleepq_wait(&cc->cc_waiting, 0); 994 sq_locked = 0; 995 old_cc = NULL; 996 997 /* Reacquire locks previously released. */ 998 PICKUP_GIANT(); 999 CC_LOCK(cc); 1000 } 1001 } else if (use_lock && !cc->cc_cancel) { 1002 /* 1003 * The current callout is waiting for its 1004 * lock which we hold. Cancel the callout 1005 * and return. After our caller drops the 1006 * lock, the callout will be skipped in 1007 * softclock(). 1008 */ 1009 cc->cc_cancel = 1; 1010 CTR3(KTR_CALLOUT, "cancelled %p func %p arg %p", 1011 c, c->c_func, c->c_arg); 1012 KASSERT(!cc_cme_migrating(cc), 1013 ("callout wrongly scheduled for migration")); 1014 CC_UNLOCK(cc); 1015 KASSERT(!sq_locked, ("sleepqueue chain locked")); 1016 return (1); 1017 } else if ((c->c_flags & CALLOUT_DFRMIGRATION) != 0) { 1018 c->c_flags &= ~CALLOUT_DFRMIGRATION; 1019 CTR3(KTR_CALLOUT, "postponing stop %p func %p arg %p", 1020 c, c->c_func, c->c_arg); 1021 CC_UNLOCK(cc); 1022 return (1); 1023 } 1024 CTR3(KTR_CALLOUT, "failed to stop %p func %p arg %p", 1025 c, c->c_func, c->c_arg); 1026 CC_UNLOCK(cc); 1027 KASSERT(!sq_locked, ("sleepqueue chain still locked")); 1028 return (0); 1029 } 1030 if (sq_locked) 1031 sleepq_release(&cc->cc_waiting); 1032 1033 c->c_flags &= ~(CALLOUT_ACTIVE | CALLOUT_PENDING); 1034 1035 if (cc->cc_next == c) { 1036 cc->cc_next = TAILQ_NEXT(c, c_links.tqe); 1037 } 1038 TAILQ_REMOVE(&cc->cc_callwheel[c->c_time & callwheelmask], c, 1039 c_links.tqe); 1040 1041 CTR3(KTR_CALLOUT, "cancelled %p func %p arg %p", 1042 c, c->c_func, c->c_arg); 1043 1044 if (c->c_flags & CALLOUT_LOCAL_ALLOC) { 1045 c->c_func = NULL; 1046 SLIST_INSERT_HEAD(&cc->cc_callfree, c, c_links.sle); 1047 } 1048 CC_UNLOCK(cc); 1049 return (1); 1050} 1051 1052void 1053callout_init(c, mpsafe) 1054 struct callout *c; 1055 int mpsafe; 1056{ 1057 bzero(c, sizeof *c); 1058 if (mpsafe) { 1059 c->c_lock = NULL; 1060 c->c_flags = CALLOUT_RETURNUNLOCKED; 1061 } else { 1062 c->c_lock = &Giant.lock_object; 1063 c->c_flags = 0; 1064 } 1065 c->c_cpu = timeout_cpu; 1066} 1067 1068void 1069_callout_init_lock(c, lock, flags) 1070 struct callout *c; 1071 struct lock_object *lock; 1072 int flags; 1073{ 1074 bzero(c, sizeof *c); 1075 c->c_lock = lock; 1076 KASSERT((flags & ~(CALLOUT_RETURNUNLOCKED | CALLOUT_SHAREDLOCK)) == 0, 1077 ("callout_init_lock: bad flags %d", flags)); 1078 KASSERT(lock != NULL || (flags & CALLOUT_RETURNUNLOCKED) == 0, 1079 ("callout_init_lock: CALLOUT_RETURNUNLOCKED with no lock")); 1080 KASSERT(lock == NULL || !(LOCK_CLASS(lock)->lc_flags & 1081 (LC_SPINLOCK | LC_SLEEPABLE)), ("%s: invalid lock class", 1082 __func__)); 1083 c->c_flags = flags & (CALLOUT_RETURNUNLOCKED | CALLOUT_SHAREDLOCK); 1084 c->c_cpu = timeout_cpu; 1085} 1086 1087#ifdef APM_FIXUP_CALLTODO 1088/* 1089 * Adjust the kernel calltodo timeout list. This routine is used after 1090 * an APM resume to recalculate the calltodo timer list values with the 1091 * number of hz's we have been sleeping. The next hardclock() will detect 1092 * that there are fired timers and run softclock() to execute them. 1093 * 1094 * Please note, I have not done an exhaustive analysis of what code this 1095 * might break. I am motivated to have my select()'s and alarm()'s that 1096 * have expired during suspend firing upon resume so that the applications 1097 * which set the timer can do the maintanence the timer was for as close 1098 * as possible to the originally intended time. Testing this code for a 1099 * week showed that resuming from a suspend resulted in 22 to 25 timers 1100 * firing, which seemed independant on whether the suspend was 2 hours or 1101 * 2 days. Your milage may vary. - Ken Key <key@cs.utk.edu> 1102 */ 1103void 1104adjust_timeout_calltodo(time_change) 1105 struct timeval *time_change; 1106{ 1107 register struct callout *p; 1108 unsigned long delta_ticks; 1109 1110 /* 1111 * How many ticks were we asleep? 1112 * (stolen from tvtohz()). 1113 */ 1114 1115 /* Don't do anything */ 1116 if (time_change->tv_sec < 0) 1117 return; 1118 else if (time_change->tv_sec <= LONG_MAX / 1000000) 1119 delta_ticks = (time_change->tv_sec * 1000000 + 1120 time_change->tv_usec + (tick - 1)) / tick + 1; 1121 else if (time_change->tv_sec <= LONG_MAX / hz) 1122 delta_ticks = time_change->tv_sec * hz + 1123 (time_change->tv_usec + (tick - 1)) / tick + 1; 1124 else 1125 delta_ticks = LONG_MAX; 1126 1127 if (delta_ticks > INT_MAX) 1128 delta_ticks = INT_MAX; 1129 1130 /* 1131 * Now rip through the timer calltodo list looking for timers 1132 * to expire. 1133 */ 1134 1135 /* don't collide with softclock() */ 1136 CC_LOCK(cc); 1137 for (p = calltodo.c_next; p != NULL; p = p->c_next) { 1138 p->c_time -= delta_ticks; 1139 1140 /* Break if the timer had more time on it than delta_ticks */ 1141 if (p->c_time > 0) 1142 break; 1143 1144 /* take back the ticks the timer didn't use (p->c_time <= 0) */ 1145 delta_ticks = -p->c_time; 1146 } 1147 CC_UNLOCK(cc); 1148 1149 return; 1150} 1151#endif /* APM_FIXUP_CALLTODO */ 1152