kern_timeout.c revision 212541
1/*- 2 * Copyright (c) 1982, 1986, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 4. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * From: @(#)kern_clock.c 8.5 (Berkeley) 1/21/94 35 */ 36 37#include <sys/cdefs.h> 38__FBSDID("$FreeBSD: head/sys/kern/kern_timeout.c 212541 2010-09-13 07:25:35Z mav $"); 39 40#include "opt_kdtrace.h" 41 42#include <sys/param.h> 43#include <sys/systm.h> 44#include <sys/bus.h> 45#include <sys/callout.h> 46#include <sys/condvar.h> 47#include <sys/interrupt.h> 48#include <sys/kernel.h> 49#include <sys/ktr.h> 50#include <sys/lock.h> 51#include <sys/malloc.h> 52#include <sys/mutex.h> 53#include <sys/proc.h> 54#include <sys/sdt.h> 55#include <sys/sleepqueue.h> 56#include <sys/sysctl.h> 57#include <sys/smp.h> 58 59SDT_PROVIDER_DEFINE(callout_execute); 60SDT_PROBE_DEFINE(callout_execute, kernel, , callout_start, callout-start); 61SDT_PROBE_ARGTYPE(callout_execute, kernel, , callout_start, 0, 62 "struct callout *"); 63SDT_PROBE_DEFINE(callout_execute, kernel, , callout_end, callout-end); 64SDT_PROBE_ARGTYPE(callout_execute, kernel, , callout_end, 0, 65 "struct callout *"); 66 67static int avg_depth; 68SYSCTL_INT(_debug, OID_AUTO, to_avg_depth, CTLFLAG_RD, &avg_depth, 0, 69 "Average number of items examined per softclock call. Units = 1/1000"); 70static int avg_gcalls; 71SYSCTL_INT(_debug, OID_AUTO, to_avg_gcalls, CTLFLAG_RD, &avg_gcalls, 0, 72 "Average number of Giant callouts made per softclock call. Units = 1/1000"); 73static int avg_lockcalls; 74SYSCTL_INT(_debug, OID_AUTO, to_avg_lockcalls, CTLFLAG_RD, &avg_lockcalls, 0, 75 "Average number of lock callouts made per softclock call. Units = 1/1000"); 76static int avg_mpcalls; 77SYSCTL_INT(_debug, OID_AUTO, to_avg_mpcalls, CTLFLAG_RD, &avg_mpcalls, 0, 78 "Average number of MP callouts made per softclock call. Units = 1/1000"); 79/* 80 * TODO: 81 * allocate more timeout table slots when table overflows. 82 */ 83int callwheelsize, callwheelbits, callwheelmask; 84 85/* 86 * There is one struct callout_cpu per cpu, holding all relevant 87 * state for the callout processing thread on the individual CPU. 88 * In particular: 89 * cc_ticks is incremented once per tick in callout_cpu(). 90 * It tracks the global 'ticks' but in a way that the individual 91 * threads should not worry about races in the order in which 92 * hardclock() and hardclock_cpu() run on the various CPUs. 93 * cc_softclock is advanced in callout_cpu() to point to the 94 * first entry in cc_callwheel that may need handling. In turn, 95 * a softclock() is scheduled so it can serve the various entries i 96 * such that cc_softclock <= i <= cc_ticks . 97 * XXX maybe cc_softclock and cc_ticks should be volatile ? 98 * 99 * cc_ticks is also used in callout_reset_cpu() to determine 100 * when the callout should be served. 101 */ 102struct callout_cpu { 103 struct mtx cc_lock; 104 struct callout *cc_callout; 105 struct callout_tailq *cc_callwheel; 106 struct callout_list cc_callfree; 107 struct callout *cc_next; 108 struct callout *cc_curr; 109 void *cc_cookie; 110 int cc_ticks; 111 int cc_softticks; 112 int cc_cancel; 113 int cc_waiting; 114 int cc_firsttick; 115}; 116 117#ifdef SMP 118struct callout_cpu cc_cpu[MAXCPU]; 119#define CC_CPU(cpu) (&cc_cpu[(cpu)]) 120#define CC_SELF() CC_CPU(PCPU_GET(cpuid)) 121#else 122struct callout_cpu cc_cpu; 123#define CC_CPU(cpu) &cc_cpu 124#define CC_SELF() &cc_cpu 125#endif 126#define CC_LOCK(cc) mtx_lock_spin(&(cc)->cc_lock) 127#define CC_UNLOCK(cc) mtx_unlock_spin(&(cc)->cc_lock) 128 129static int timeout_cpu; 130void (*callout_new_inserted)(int cpu, int ticks) = NULL; 131 132MALLOC_DEFINE(M_CALLOUT, "callout", "Callout datastructures"); 133 134/** 135 * Locked by cc_lock: 136 * cc_curr - If a callout is in progress, it is curr_callout. 137 * If curr_callout is non-NULL, threads waiting in 138 * callout_drain() will be woken up as soon as the 139 * relevant callout completes. 140 * cc_cancel - Changing to 1 with both callout_lock and c_lock held 141 * guarantees that the current callout will not run. 142 * The softclock() function sets this to 0 before it 143 * drops callout_lock to acquire c_lock, and it calls 144 * the handler only if curr_cancelled is still 0 after 145 * c_lock is successfully acquired. 146 * cc_waiting - If a thread is waiting in callout_drain(), then 147 * callout_wait is nonzero. Set only when 148 * curr_callout is non-NULL. 149 */ 150 151/* 152 * kern_timeout_callwheel_alloc() - kernel low level callwheel initialization 153 * 154 * This code is called very early in the kernel initialization sequence, 155 * and may be called more then once. 156 */ 157caddr_t 158kern_timeout_callwheel_alloc(caddr_t v) 159{ 160 struct callout_cpu *cc; 161 162 timeout_cpu = PCPU_GET(cpuid); 163 cc = CC_CPU(timeout_cpu); 164 /* 165 * Calculate callout wheel size 166 */ 167 for (callwheelsize = 1, callwheelbits = 0; 168 callwheelsize < ncallout; 169 callwheelsize <<= 1, ++callwheelbits) 170 ; 171 callwheelmask = callwheelsize - 1; 172 173 cc->cc_callout = (struct callout *)v; 174 v = (caddr_t)(cc->cc_callout + ncallout); 175 cc->cc_callwheel = (struct callout_tailq *)v; 176 v = (caddr_t)(cc->cc_callwheel + callwheelsize); 177 return(v); 178} 179 180static void 181callout_cpu_init(struct callout_cpu *cc) 182{ 183 struct callout *c; 184 int i; 185 186 mtx_init(&cc->cc_lock, "callout", NULL, MTX_SPIN | MTX_RECURSE); 187 SLIST_INIT(&cc->cc_callfree); 188 for (i = 0; i < callwheelsize; i++) { 189 TAILQ_INIT(&cc->cc_callwheel[i]); 190 } 191 if (cc->cc_callout == NULL) 192 return; 193 for (i = 0; i < ncallout; i++) { 194 c = &cc->cc_callout[i]; 195 callout_init(c, 0); 196 c->c_flags = CALLOUT_LOCAL_ALLOC; 197 SLIST_INSERT_HEAD(&cc->cc_callfree, c, c_links.sle); 198 } 199} 200 201/* 202 * kern_timeout_callwheel_init() - initialize previously reserved callwheel 203 * space. 204 * 205 * This code is called just once, after the space reserved for the 206 * callout wheel has been finalized. 207 */ 208void 209kern_timeout_callwheel_init(void) 210{ 211 callout_cpu_init(CC_CPU(timeout_cpu)); 212} 213 214/* 215 * Start standard softclock thread. 216 */ 217void *softclock_ih; 218 219static void 220start_softclock(void *dummy) 221{ 222 struct callout_cpu *cc; 223#ifdef SMP 224 int cpu; 225#endif 226 227 cc = CC_CPU(timeout_cpu); 228 if (swi_add(&clk_intr_event, "clock", softclock, cc, SWI_CLOCK, 229 INTR_MPSAFE, &softclock_ih)) 230 panic("died while creating standard software ithreads"); 231 cc->cc_cookie = softclock_ih; 232#ifdef SMP 233 CPU_FOREACH(cpu) { 234 if (cpu == timeout_cpu) 235 continue; 236 cc = CC_CPU(cpu); 237 if (swi_add(NULL, "clock", softclock, cc, SWI_CLOCK, 238 INTR_MPSAFE, &cc->cc_cookie)) 239 panic("died while creating standard software ithreads"); 240 cc->cc_callout = NULL; /* Only cpu0 handles timeout(). */ 241 cc->cc_callwheel = malloc( 242 sizeof(struct callout_tailq) * callwheelsize, M_CALLOUT, 243 M_WAITOK); 244 callout_cpu_init(cc); 245 } 246#endif 247} 248 249SYSINIT(start_softclock, SI_SUB_SOFTINTR, SI_ORDER_FIRST, start_softclock, NULL); 250 251void 252callout_tick(void) 253{ 254 struct callout_cpu *cc; 255 int need_softclock; 256 int bucket; 257 258 /* 259 * Process callouts at a very low cpu priority, so we don't keep the 260 * relatively high clock interrupt priority any longer than necessary. 261 */ 262 need_softclock = 0; 263 cc = CC_SELF(); 264 mtx_lock_spin_flags(&cc->cc_lock, MTX_QUIET); 265 cc->cc_firsttick = cc->cc_ticks = ticks; 266 for (; (cc->cc_softticks - cc->cc_ticks) <= 0; cc->cc_softticks++) { 267 bucket = cc->cc_softticks & callwheelmask; 268 if (!TAILQ_EMPTY(&cc->cc_callwheel[bucket])) { 269 need_softclock = 1; 270 break; 271 } 272 } 273 mtx_unlock_spin_flags(&cc->cc_lock, MTX_QUIET); 274 /* 275 * swi_sched acquires the thread lock, so we don't want to call it 276 * with cc_lock held; incorrect locking order. 277 */ 278 if (need_softclock) 279 swi_sched(cc->cc_cookie, 0); 280} 281 282int 283callout_tickstofirst(void) 284{ 285 struct callout_cpu *cc; 286 struct callout *c; 287 struct callout_tailq *sc; 288 int curticks; 289 int skip = 1; 290 291 cc = CC_SELF(); 292 mtx_lock_spin_flags(&cc->cc_lock, MTX_QUIET); 293 curticks = cc->cc_ticks; 294 while( skip < ncallout && skip < hz/8 ) { 295 sc = &cc->cc_callwheel[ (curticks+skip) & callwheelmask ]; 296 /* search scanning ticks */ 297 TAILQ_FOREACH( c, sc, c_links.tqe ){ 298 if (c && (c->c_time <= curticks + ncallout) 299 && (c->c_time > 0)) 300 goto out; 301 } 302 skip++; 303 } 304out: 305 cc->cc_firsttick = curticks + skip; 306 mtx_unlock_spin_flags(&cc->cc_lock, MTX_QUIET); 307 return (skip); 308} 309 310static struct callout_cpu * 311callout_lock(struct callout *c) 312{ 313 struct callout_cpu *cc; 314 int cpu; 315 316 for (;;) { 317 cpu = c->c_cpu; 318 cc = CC_CPU(cpu); 319 CC_LOCK(cc); 320 if (cpu == c->c_cpu) 321 break; 322 CC_UNLOCK(cc); 323 } 324 return (cc); 325} 326 327/* 328 * The callout mechanism is based on the work of Adam M. Costello and 329 * George Varghese, published in a technical report entitled "Redesigning 330 * the BSD Callout and Timer Facilities" and modified slightly for inclusion 331 * in FreeBSD by Justin T. Gibbs. The original work on the data structures 332 * used in this implementation was published by G. Varghese and T. Lauck in 333 * the paper "Hashed and Hierarchical Timing Wheels: Data Structures for 334 * the Efficient Implementation of a Timer Facility" in the Proceedings of 335 * the 11th ACM Annual Symposium on Operating Systems Principles, 336 * Austin, Texas Nov 1987. 337 */ 338 339/* 340 * Software (low priority) clock interrupt. 341 * Run periodic events from timeout queue. 342 */ 343void 344softclock(void *arg) 345{ 346 struct callout_cpu *cc; 347 struct callout *c; 348 struct callout_tailq *bucket; 349 int curticks; 350 int steps; /* #steps since we last allowed interrupts */ 351 int depth; 352 int mpcalls; 353 int lockcalls; 354 int gcalls; 355#ifdef DIAGNOSTIC 356 struct bintime bt1, bt2; 357 struct timespec ts2; 358 static uint64_t maxdt = 36893488147419102LL; /* 2 msec */ 359 static timeout_t *lastfunc; 360#endif 361 362#ifndef MAX_SOFTCLOCK_STEPS 363#define MAX_SOFTCLOCK_STEPS 100 /* Maximum allowed value of steps. */ 364#endif /* MAX_SOFTCLOCK_STEPS */ 365 366 mpcalls = 0; 367 lockcalls = 0; 368 gcalls = 0; 369 depth = 0; 370 steps = 0; 371 cc = (struct callout_cpu *)arg; 372 CC_LOCK(cc); 373 while (cc->cc_softticks - 1 != cc->cc_ticks) { 374 /* 375 * cc_softticks may be modified by hard clock, so cache 376 * it while we work on a given bucket. 377 */ 378 curticks = cc->cc_softticks; 379 cc->cc_softticks++; 380 bucket = &cc->cc_callwheel[curticks & callwheelmask]; 381 c = TAILQ_FIRST(bucket); 382 while (c) { 383 depth++; 384 if (c->c_time != curticks) { 385 c = TAILQ_NEXT(c, c_links.tqe); 386 ++steps; 387 if (steps >= MAX_SOFTCLOCK_STEPS) { 388 cc->cc_next = c; 389 /* Give interrupts a chance. */ 390 CC_UNLOCK(cc); 391 ; /* nothing */ 392 CC_LOCK(cc); 393 c = cc->cc_next; 394 steps = 0; 395 } 396 } else { 397 void (*c_func)(void *); 398 void *c_arg; 399 struct lock_class *class; 400 struct lock_object *c_lock; 401 int c_flags, sharedlock; 402 403 cc->cc_next = TAILQ_NEXT(c, c_links.tqe); 404 TAILQ_REMOVE(bucket, c, c_links.tqe); 405 class = (c->c_lock != NULL) ? 406 LOCK_CLASS(c->c_lock) : NULL; 407 sharedlock = (c->c_flags & CALLOUT_SHAREDLOCK) ? 408 0 : 1; 409 c_lock = c->c_lock; 410 c_func = c->c_func; 411 c_arg = c->c_arg; 412 c_flags = c->c_flags; 413 if (c->c_flags & CALLOUT_LOCAL_ALLOC) { 414 c->c_flags = CALLOUT_LOCAL_ALLOC; 415 } else { 416 c->c_flags = 417 (c->c_flags & ~CALLOUT_PENDING); 418 } 419 cc->cc_curr = c; 420 cc->cc_cancel = 0; 421 CC_UNLOCK(cc); 422 if (c_lock != NULL) { 423 class->lc_lock(c_lock, sharedlock); 424 /* 425 * The callout may have been cancelled 426 * while we switched locks. 427 */ 428 if (cc->cc_cancel) { 429 class->lc_unlock(c_lock); 430 goto skip; 431 } 432 /* The callout cannot be stopped now. */ 433 cc->cc_cancel = 1; 434 435 if (c_lock == &Giant.lock_object) { 436 gcalls++; 437 CTR3(KTR_CALLOUT, 438 "callout %p func %p arg %p", 439 c, c_func, c_arg); 440 } else { 441 lockcalls++; 442 CTR3(KTR_CALLOUT, "callout lock" 443 " %p func %p arg %p", 444 c, c_func, c_arg); 445 } 446 } else { 447 mpcalls++; 448 CTR3(KTR_CALLOUT, 449 "callout mpsafe %p func %p arg %p", 450 c, c_func, c_arg); 451 } 452#ifdef DIAGNOSTIC 453 binuptime(&bt1); 454#endif 455 THREAD_NO_SLEEPING(); 456 SDT_PROBE(callout_execute, kernel, , 457 callout_start, c, 0, 0, 0, 0); 458 c_func(c_arg); 459 SDT_PROBE(callout_execute, kernel, , 460 callout_end, c, 0, 0, 0, 0); 461 THREAD_SLEEPING_OK(); 462#ifdef DIAGNOSTIC 463 binuptime(&bt2); 464 bintime_sub(&bt2, &bt1); 465 if (bt2.frac > maxdt) { 466 if (lastfunc != c_func || 467 bt2.frac > maxdt * 2) { 468 bintime2timespec(&bt2, &ts2); 469 printf( 470 "Expensive timeout(9) function: %p(%p) %jd.%09ld s\n", 471 c_func, c_arg, 472 (intmax_t)ts2.tv_sec, 473 ts2.tv_nsec); 474 } 475 maxdt = bt2.frac; 476 lastfunc = c_func; 477 } 478#endif 479 CTR1(KTR_CALLOUT, "callout %p finished", c); 480 if ((c_flags & CALLOUT_RETURNUNLOCKED) == 0) 481 class->lc_unlock(c_lock); 482 skip: 483 CC_LOCK(cc); 484 /* 485 * If the current callout is locally 486 * allocated (from timeout(9)) 487 * then put it on the freelist. 488 * 489 * Note: we need to check the cached 490 * copy of c_flags because if it was not 491 * local, then it's not safe to deref the 492 * callout pointer. 493 */ 494 if (c_flags & CALLOUT_LOCAL_ALLOC) { 495 KASSERT(c->c_flags == 496 CALLOUT_LOCAL_ALLOC, 497 ("corrupted callout")); 498 c->c_func = NULL; 499 SLIST_INSERT_HEAD(&cc->cc_callfree, c, 500 c_links.sle); 501 } 502 cc->cc_curr = NULL; 503 if (cc->cc_waiting) { 504 /* 505 * There is someone waiting 506 * for the callout to complete. 507 */ 508 cc->cc_waiting = 0; 509 CC_UNLOCK(cc); 510 wakeup(&cc->cc_waiting); 511 CC_LOCK(cc); 512 } 513 steps = 0; 514 c = cc->cc_next; 515 } 516 } 517 } 518 avg_depth += (depth * 1000 - avg_depth) >> 8; 519 avg_mpcalls += (mpcalls * 1000 - avg_mpcalls) >> 8; 520 avg_lockcalls += (lockcalls * 1000 - avg_lockcalls) >> 8; 521 avg_gcalls += (gcalls * 1000 - avg_gcalls) >> 8; 522 cc->cc_next = NULL; 523 CC_UNLOCK(cc); 524} 525 526/* 527 * timeout -- 528 * Execute a function after a specified length of time. 529 * 530 * untimeout -- 531 * Cancel previous timeout function call. 532 * 533 * callout_handle_init -- 534 * Initialize a handle so that using it with untimeout is benign. 535 * 536 * See AT&T BCI Driver Reference Manual for specification. This 537 * implementation differs from that one in that although an 538 * identification value is returned from timeout, the original 539 * arguments to timeout as well as the identifier are used to 540 * identify entries for untimeout. 541 */ 542struct callout_handle 543timeout(ftn, arg, to_ticks) 544 timeout_t *ftn; 545 void *arg; 546 int to_ticks; 547{ 548 struct callout_cpu *cc; 549 struct callout *new; 550 struct callout_handle handle; 551 552 cc = CC_CPU(timeout_cpu); 553 CC_LOCK(cc); 554 /* Fill in the next free callout structure. */ 555 new = SLIST_FIRST(&cc->cc_callfree); 556 if (new == NULL) 557 /* XXX Attempt to malloc first */ 558 panic("timeout table full"); 559 SLIST_REMOVE_HEAD(&cc->cc_callfree, c_links.sle); 560 callout_reset(new, to_ticks, ftn, arg); 561 handle.callout = new; 562 CC_UNLOCK(cc); 563 564 return (handle); 565} 566 567void 568untimeout(ftn, arg, handle) 569 timeout_t *ftn; 570 void *arg; 571 struct callout_handle handle; 572{ 573 struct callout_cpu *cc; 574 575 /* 576 * Check for a handle that was initialized 577 * by callout_handle_init, but never used 578 * for a real timeout. 579 */ 580 if (handle.callout == NULL) 581 return; 582 583 cc = callout_lock(handle.callout); 584 if (handle.callout->c_func == ftn && handle.callout->c_arg == arg) 585 callout_stop(handle.callout); 586 CC_UNLOCK(cc); 587} 588 589void 590callout_handle_init(struct callout_handle *handle) 591{ 592 handle->callout = NULL; 593} 594 595/* 596 * New interface; clients allocate their own callout structures. 597 * 598 * callout_reset() - establish or change a timeout 599 * callout_stop() - disestablish a timeout 600 * callout_init() - initialize a callout structure so that it can 601 * safely be passed to callout_reset() and callout_stop() 602 * 603 * <sys/callout.h> defines three convenience macros: 604 * 605 * callout_active() - returns truth if callout has not been stopped, 606 * drained, or deactivated since the last time the callout was 607 * reset. 608 * callout_pending() - returns truth if callout is still waiting for timeout 609 * callout_deactivate() - marks the callout as having been serviced 610 */ 611int 612callout_reset_on(struct callout *c, int to_ticks, void (*ftn)(void *), 613 void *arg, int cpu) 614{ 615 struct callout_cpu *cc; 616 int cancelled = 0; 617 618 /* 619 * Don't allow migration of pre-allocated callouts lest they 620 * become unbalanced. 621 */ 622 if (c->c_flags & CALLOUT_LOCAL_ALLOC) 623 cpu = c->c_cpu; 624retry: 625 cc = callout_lock(c); 626 if (cc->cc_curr == c) { 627 /* 628 * We're being asked to reschedule a callout which is 629 * currently in progress. If there is a lock then we 630 * can cancel the callout if it has not really started. 631 */ 632 if (c->c_lock != NULL && !cc->cc_cancel) 633 cancelled = cc->cc_cancel = 1; 634 if (cc->cc_waiting) { 635 /* 636 * Someone has called callout_drain to kill this 637 * callout. Don't reschedule. 638 */ 639 CTR4(KTR_CALLOUT, "%s %p func %p arg %p", 640 cancelled ? "cancelled" : "failed to cancel", 641 c, c->c_func, c->c_arg); 642 CC_UNLOCK(cc); 643 return (cancelled); 644 } 645 } 646 if (c->c_flags & CALLOUT_PENDING) { 647 if (cc->cc_next == c) { 648 cc->cc_next = TAILQ_NEXT(c, c_links.tqe); 649 } 650 TAILQ_REMOVE(&cc->cc_callwheel[c->c_time & callwheelmask], c, 651 c_links.tqe); 652 653 cancelled = 1; 654 c->c_flags &= ~(CALLOUT_ACTIVE | CALLOUT_PENDING); 655 } 656 /* 657 * If the lock must migrate we have to check the state again as 658 * we can't hold both the new and old locks simultaneously. 659 */ 660 if (c->c_cpu != cpu) { 661 c->c_cpu = cpu; 662 CC_UNLOCK(cc); 663 goto retry; 664 } 665 666 if (to_ticks <= 0) 667 to_ticks = 1; 668 669 c->c_arg = arg; 670 c->c_flags |= (CALLOUT_ACTIVE | CALLOUT_PENDING); 671 c->c_func = ftn; 672 c->c_time = ticks + to_ticks; 673 TAILQ_INSERT_TAIL(&cc->cc_callwheel[c->c_time & callwheelmask], 674 c, c_links.tqe); 675 if ((c->c_time - cc->cc_firsttick) < 0) { 676 cc->cc_firsttick = c->c_time; 677 (*callout_new_inserted)(cpu, 678 to_ticks + (ticks - cc->cc_ticks)); 679 } 680 CTR5(KTR_CALLOUT, "%sscheduled %p func %p arg %p in %d", 681 cancelled ? "re" : "", c, c->c_func, c->c_arg, to_ticks); 682 CC_UNLOCK(cc); 683 684 return (cancelled); 685} 686 687/* 688 * Common idioms that can be optimized in the future. 689 */ 690int 691callout_schedule_on(struct callout *c, int to_ticks, int cpu) 692{ 693 return callout_reset_on(c, to_ticks, c->c_func, c->c_arg, cpu); 694} 695 696int 697callout_schedule(struct callout *c, int to_ticks) 698{ 699 return callout_reset_on(c, to_ticks, c->c_func, c->c_arg, c->c_cpu); 700} 701 702int 703_callout_stop_safe(c, safe) 704 struct callout *c; 705 int safe; 706{ 707 struct callout_cpu *cc; 708 struct lock_class *class; 709 int use_lock, sq_locked; 710 711 /* 712 * Some old subsystems don't hold Giant while running a callout_stop(), 713 * so just discard this check for the moment. 714 */ 715 if (!safe && c->c_lock != NULL) { 716 if (c->c_lock == &Giant.lock_object) 717 use_lock = mtx_owned(&Giant); 718 else { 719 use_lock = 1; 720 class = LOCK_CLASS(c->c_lock); 721 class->lc_assert(c->c_lock, LA_XLOCKED); 722 } 723 } else 724 use_lock = 0; 725 726 sq_locked = 0; 727again: 728 cc = callout_lock(c); 729 /* 730 * If the callout isn't pending, it's not on the queue, so 731 * don't attempt to remove it from the queue. We can try to 732 * stop it by other means however. 733 */ 734 if (!(c->c_flags & CALLOUT_PENDING)) { 735 c->c_flags &= ~CALLOUT_ACTIVE; 736 737 /* 738 * If it wasn't on the queue and it isn't the current 739 * callout, then we can't stop it, so just bail. 740 */ 741 if (cc->cc_curr != c) { 742 CTR3(KTR_CALLOUT, "failed to stop %p func %p arg %p", 743 c, c->c_func, c->c_arg); 744 CC_UNLOCK(cc); 745 if (sq_locked) 746 sleepq_release(&cc->cc_waiting); 747 return (0); 748 } 749 750 if (safe) { 751 /* 752 * The current callout is running (or just 753 * about to run) and blocking is allowed, so 754 * just wait for the current invocation to 755 * finish. 756 */ 757 while (cc->cc_curr == c) { 758 759 /* 760 * Use direct calls to sleepqueue interface 761 * instead of cv/msleep in order to avoid 762 * a LOR between cc_lock and sleepqueue 763 * chain spinlocks. This piece of code 764 * emulates a msleep_spin() call actually. 765 * 766 * If we already have the sleepqueue chain 767 * locked, then we can safely block. If we 768 * don't already have it locked, however, 769 * we have to drop the cc_lock to lock 770 * it. This opens several races, so we 771 * restart at the beginning once we have 772 * both locks. If nothing has changed, then 773 * we will end up back here with sq_locked 774 * set. 775 */ 776 if (!sq_locked) { 777 CC_UNLOCK(cc); 778 sleepq_lock(&cc->cc_waiting); 779 sq_locked = 1; 780 goto again; 781 } 782 cc->cc_waiting = 1; 783 DROP_GIANT(); 784 CC_UNLOCK(cc); 785 sleepq_add(&cc->cc_waiting, 786 &cc->cc_lock.lock_object, "codrain", 787 SLEEPQ_SLEEP, 0); 788 sleepq_wait(&cc->cc_waiting, 0); 789 sq_locked = 0; 790 791 /* Reacquire locks previously released. */ 792 PICKUP_GIANT(); 793 CC_LOCK(cc); 794 } 795 } else if (use_lock && !cc->cc_cancel) { 796 /* 797 * The current callout is waiting for its 798 * lock which we hold. Cancel the callout 799 * and return. After our caller drops the 800 * lock, the callout will be skipped in 801 * softclock(). 802 */ 803 cc->cc_cancel = 1; 804 CTR3(KTR_CALLOUT, "cancelled %p func %p arg %p", 805 c, c->c_func, c->c_arg); 806 CC_UNLOCK(cc); 807 KASSERT(!sq_locked, ("sleepqueue chain locked")); 808 return (1); 809 } 810 CTR3(KTR_CALLOUT, "failed to stop %p func %p arg %p", 811 c, c->c_func, c->c_arg); 812 CC_UNLOCK(cc); 813 KASSERT(!sq_locked, ("sleepqueue chain still locked")); 814 return (0); 815 } 816 if (sq_locked) 817 sleepq_release(&cc->cc_waiting); 818 819 c->c_flags &= ~(CALLOUT_ACTIVE | CALLOUT_PENDING); 820 821 if (cc->cc_next == c) { 822 cc->cc_next = TAILQ_NEXT(c, c_links.tqe); 823 } 824 TAILQ_REMOVE(&cc->cc_callwheel[c->c_time & callwheelmask], c, 825 c_links.tqe); 826 827 CTR3(KTR_CALLOUT, "cancelled %p func %p arg %p", 828 c, c->c_func, c->c_arg); 829 830 if (c->c_flags & CALLOUT_LOCAL_ALLOC) { 831 c->c_func = NULL; 832 SLIST_INSERT_HEAD(&cc->cc_callfree, c, c_links.sle); 833 } 834 CC_UNLOCK(cc); 835 return (1); 836} 837 838void 839callout_init(c, mpsafe) 840 struct callout *c; 841 int mpsafe; 842{ 843 bzero(c, sizeof *c); 844 if (mpsafe) { 845 c->c_lock = NULL; 846 c->c_flags = CALLOUT_RETURNUNLOCKED; 847 } else { 848 c->c_lock = &Giant.lock_object; 849 c->c_flags = 0; 850 } 851 c->c_cpu = timeout_cpu; 852} 853 854void 855_callout_init_lock(c, lock, flags) 856 struct callout *c; 857 struct lock_object *lock; 858 int flags; 859{ 860 bzero(c, sizeof *c); 861 c->c_lock = lock; 862 KASSERT((flags & ~(CALLOUT_RETURNUNLOCKED | CALLOUT_SHAREDLOCK)) == 0, 863 ("callout_init_lock: bad flags %d", flags)); 864 KASSERT(lock != NULL || (flags & CALLOUT_RETURNUNLOCKED) == 0, 865 ("callout_init_lock: CALLOUT_RETURNUNLOCKED with no lock")); 866 KASSERT(lock == NULL || !(LOCK_CLASS(lock)->lc_flags & 867 (LC_SPINLOCK | LC_SLEEPABLE)), ("%s: invalid lock class", 868 __func__)); 869 c->c_flags = flags & (CALLOUT_RETURNUNLOCKED | CALLOUT_SHAREDLOCK); 870 c->c_cpu = timeout_cpu; 871} 872 873#ifdef APM_FIXUP_CALLTODO 874/* 875 * Adjust the kernel calltodo timeout list. This routine is used after 876 * an APM resume to recalculate the calltodo timer list values with the 877 * number of hz's we have been sleeping. The next hardclock() will detect 878 * that there are fired timers and run softclock() to execute them. 879 * 880 * Please note, I have not done an exhaustive analysis of what code this 881 * might break. I am motivated to have my select()'s and alarm()'s that 882 * have expired during suspend firing upon resume so that the applications 883 * which set the timer can do the maintanence the timer was for as close 884 * as possible to the originally intended time. Testing this code for a 885 * week showed that resuming from a suspend resulted in 22 to 25 timers 886 * firing, which seemed independant on whether the suspend was 2 hours or 887 * 2 days. Your milage may vary. - Ken Key <key@cs.utk.edu> 888 */ 889void 890adjust_timeout_calltodo(time_change) 891 struct timeval *time_change; 892{ 893 register struct callout *p; 894 unsigned long delta_ticks; 895 896 /* 897 * How many ticks were we asleep? 898 * (stolen from tvtohz()). 899 */ 900 901 /* Don't do anything */ 902 if (time_change->tv_sec < 0) 903 return; 904 else if (time_change->tv_sec <= LONG_MAX / 1000000) 905 delta_ticks = (time_change->tv_sec * 1000000 + 906 time_change->tv_usec + (tick - 1)) / tick + 1; 907 else if (time_change->tv_sec <= LONG_MAX / hz) 908 delta_ticks = time_change->tv_sec * hz + 909 (time_change->tv_usec + (tick - 1)) / tick + 1; 910 else 911 delta_ticks = LONG_MAX; 912 913 if (delta_ticks > INT_MAX) 914 delta_ticks = INT_MAX; 915 916 /* 917 * Now rip through the timer calltodo list looking for timers 918 * to expire. 919 */ 920 921 /* don't collide with softclock() */ 922 CC_LOCK(cc); 923 for (p = calltodo.c_next; p != NULL; p = p->c_next) { 924 p->c_time -= delta_ticks; 925 926 /* Break if the timer had more time on it than delta_ticks */ 927 if (p->c_time > 0) 928 break; 929 930 /* take back the ticks the timer didn't use (p->c_time <= 0) */ 931 delta_ticks = -p->c_time; 932 } 933 CC_UNLOCK(cc); 934 935 return; 936} 937#endif /* APM_FIXUP_CALLTODO */ 938