kern_timeout.c revision 172025
1178354Ssam/*- 2178354Ssam * Copyright (c) 1982, 1986, 1991, 1993 3178354Ssam * The Regents of the University of California. All rights reserved. 4178354Ssam * (c) UNIX System Laboratories, Inc. 5178354Ssam * All or some portions of this file are derived from material licensed 6178354Ssam * to the University of California by American Telephone and Telegraph 7178354Ssam * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8178354Ssam * the permission of UNIX System Laboratories, Inc. 9178354Ssam * 10178354Ssam * Redistribution and use in source and binary forms, with or without 11178354Ssam * modification, are permitted provided that the following conditions 12178354Ssam * are met: 13178354Ssam * 1. Redistributions of source code must retain the above copyright 14178354Ssam * notice, this list of conditions and the following disclaimer. 15178354Ssam * 2. Redistributions in binary form must reproduce the above copyright 16178354Ssam * notice, this list of conditions and the following disclaimer in the 17178354Ssam * documentation and/or other materials provided with the distribution. 18178354Ssam * 4. Neither the name of the University nor the names of its contributors 19178354Ssam * may be used to endorse or promote products derived from this software 20178354Ssam * without specific prior written permission. 21178354Ssam * 22178354Ssam * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23178354Ssam * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24178354Ssam * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25178354Ssam * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26178354Ssam * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27178354Ssam * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28178354Ssam * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29178354Ssam * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30178354Ssam * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31178354Ssam * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32178354Ssam * SUCH DAMAGE. 33178354Ssam * 34178354Ssam * From: @(#)kern_clock.c 8.5 (Berkeley) 1/21/94 35186107Ssam */ 36178354Ssam 37178354Ssam#include <sys/cdefs.h> 38178354Ssam__FBSDID("$FreeBSD: head/sys/kern/kern_timeout.c 172025 2007-08-31 19:01:30Z jhb $"); 39178354Ssam 40178354Ssam#include <sys/param.h> 41178354Ssam#include <sys/systm.h> 42178354Ssam#include <sys/callout.h> 43178354Ssam#include <sys/condvar.h> 44178354Ssam#include <sys/kernel.h> 45178354Ssam#include <sys/ktr.h> 46178354Ssam#include <sys/lock.h> 47178354Ssam#include <sys/mutex.h> 48178354Ssam#include <sys/proc.h> 49178354Ssam#include <sys/sleepqueue.h> 50178354Ssam#include <sys/sysctl.h> 51178354Ssam 52178354Ssamstatic int avg_depth; 53178354SsamSYSCTL_INT(_debug, OID_AUTO, to_avg_depth, CTLFLAG_RD, &avg_depth, 0, 54178354Ssam "Average number of items examined per softclock call. Units = 1/1000"); 55178354Ssamstatic int avg_gcalls; 56178354SsamSYSCTL_INT(_debug, OID_AUTO, to_avg_gcalls, CTLFLAG_RD, &avg_gcalls, 0, 57178354Ssam "Average number of Giant callouts made per softclock call. Units = 1/1000"); 58static int avg_mtxcalls; 59SYSCTL_INT(_debug, OID_AUTO, to_avg_mtxcalls, CTLFLAG_RD, &avg_mtxcalls, 0, 60 "Average number of mtx callouts made per softclock call. Units = 1/1000"); 61static int avg_mpcalls; 62SYSCTL_INT(_debug, OID_AUTO, to_avg_mpcalls, CTLFLAG_RD, &avg_mpcalls, 0, 63 "Average number of MP callouts made per softclock call. Units = 1/1000"); 64/* 65 * TODO: 66 * allocate more timeout table slots when table overflows. 67 */ 68 69/* Exported to machdep.c and/or kern_clock.c. */ 70struct callout *callout; 71struct callout_list callfree; 72int callwheelsize, callwheelbits, callwheelmask; 73struct callout_tailq *callwheel; 74int softticks; /* Like ticks, but for softclock(). */ 75struct mtx callout_lock; 76 77static struct callout *nextsoftcheck; /* Next callout to be checked. */ 78 79/** 80 * Locked by callout_lock: 81 * curr_callout - If a callout is in progress, it is curr_callout. 82 * If curr_callout is non-NULL, threads waiting in 83 * callout_drain() will be woken up as soon as the 84 * relevant callout completes. 85 * curr_cancelled - Changing to 1 with both callout_lock and c_mtx held 86 * guarantees that the current callout will not run. 87 * The softclock() function sets this to 0 before it 88 * drops callout_lock to acquire c_mtx, and it calls 89 * the handler only if curr_cancelled is still 0 after 90 * c_mtx is successfully acquired. 91 * callout_wait - If a thread is waiting in callout_drain(), then 92 * callout_wait is nonzero. Set only when 93 * curr_callout is non-NULL. 94 */ 95static struct callout *curr_callout; 96static int curr_cancelled; 97static int callout_wait; 98 99/* 100 * kern_timeout_callwheel_alloc() - kernel low level callwheel initialization 101 * 102 * This code is called very early in the kernel initialization sequence, 103 * and may be called more then once. 104 */ 105caddr_t 106kern_timeout_callwheel_alloc(caddr_t v) 107{ 108 /* 109 * Calculate callout wheel size 110 */ 111 for (callwheelsize = 1, callwheelbits = 0; 112 callwheelsize < ncallout; 113 callwheelsize <<= 1, ++callwheelbits) 114 ; 115 callwheelmask = callwheelsize - 1; 116 117 callout = (struct callout *)v; 118 v = (caddr_t)(callout + ncallout); 119 callwheel = (struct callout_tailq *)v; 120 v = (caddr_t)(callwheel + callwheelsize); 121 return(v); 122} 123 124/* 125 * kern_timeout_callwheel_init() - initialize previously reserved callwheel 126 * space. 127 * 128 * This code is called just once, after the space reserved for the 129 * callout wheel has been finalized. 130 */ 131void 132kern_timeout_callwheel_init(void) 133{ 134 int i; 135 136 SLIST_INIT(&callfree); 137 for (i = 0; i < ncallout; i++) { 138 callout_init(&callout[i], 0); 139 callout[i].c_flags = CALLOUT_LOCAL_ALLOC; 140 SLIST_INSERT_HEAD(&callfree, &callout[i], c_links.sle); 141 } 142 for (i = 0; i < callwheelsize; i++) { 143 TAILQ_INIT(&callwheel[i]); 144 } 145 mtx_init(&callout_lock, "callout", NULL, MTX_SPIN | MTX_RECURSE); 146} 147 148/* 149 * The callout mechanism is based on the work of Adam M. Costello and 150 * George Varghese, published in a technical report entitled "Redesigning 151 * the BSD Callout and Timer Facilities" and modified slightly for inclusion 152 * in FreeBSD by Justin T. Gibbs. The original work on the data structures 153 * used in this implementation was published by G. Varghese and T. Lauck in 154 * the paper "Hashed and Hierarchical Timing Wheels: Data Structures for 155 * the Efficient Implementation of a Timer Facility" in the Proceedings of 156 * the 11th ACM Annual Symposium on Operating Systems Principles, 157 * Austin, Texas Nov 1987. 158 */ 159 160/* 161 * Software (low priority) clock interrupt. 162 * Run periodic events from timeout queue. 163 */ 164void 165softclock(void *dummy) 166{ 167 struct callout *c; 168 struct callout_tailq *bucket; 169 int curticks; 170 int steps; /* #steps since we last allowed interrupts */ 171 int depth; 172 int mpcalls; 173 int mtxcalls; 174 int gcalls; 175#ifdef DIAGNOSTIC 176 struct bintime bt1, bt2; 177 struct timespec ts2; 178 static uint64_t maxdt = 36893488147419102LL; /* 2 msec */ 179 static timeout_t *lastfunc; 180#endif 181 182#ifndef MAX_SOFTCLOCK_STEPS 183#define MAX_SOFTCLOCK_STEPS 100 /* Maximum allowed value of steps. */ 184#endif /* MAX_SOFTCLOCK_STEPS */ 185 186 mpcalls = 0; 187 mtxcalls = 0; 188 gcalls = 0; 189 depth = 0; 190 steps = 0; 191 mtx_lock_spin(&callout_lock); 192 while (softticks != ticks) { 193 softticks++; 194 /* 195 * softticks may be modified by hard clock, so cache 196 * it while we work on a given bucket. 197 */ 198 curticks = softticks; 199 bucket = &callwheel[curticks & callwheelmask]; 200 c = TAILQ_FIRST(bucket); 201 while (c) { 202 depth++; 203 if (c->c_time != curticks) { 204 c = TAILQ_NEXT(c, c_links.tqe); 205 ++steps; 206 if (steps >= MAX_SOFTCLOCK_STEPS) { 207 nextsoftcheck = c; 208 /* Give interrupts a chance. */ 209 mtx_unlock_spin(&callout_lock); 210 ; /* nothing */ 211 mtx_lock_spin(&callout_lock); 212 c = nextsoftcheck; 213 steps = 0; 214 } 215 } else { 216 void (*c_func)(void *); 217 void *c_arg; 218 struct mtx *c_mtx; 219 int c_flags; 220 221 nextsoftcheck = TAILQ_NEXT(c, c_links.tqe); 222 TAILQ_REMOVE(bucket, c, c_links.tqe); 223 c_func = c->c_func; 224 c_arg = c->c_arg; 225 c_mtx = c->c_mtx; 226 c_flags = c->c_flags; 227 if (c->c_flags & CALLOUT_LOCAL_ALLOC) { 228 c->c_func = NULL; 229 c->c_flags = CALLOUT_LOCAL_ALLOC; 230 SLIST_INSERT_HEAD(&callfree, c, 231 c_links.sle); 232 curr_callout = NULL; 233 } else { 234 c->c_flags = 235 (c->c_flags & ~CALLOUT_PENDING); 236 curr_callout = c; 237 } 238 curr_cancelled = 0; 239 mtx_unlock_spin(&callout_lock); 240 if (c_mtx != NULL) { 241 if (c_flags & CALLOUT_NETGIANT) { 242 mtx_lock(&Giant); 243 gcalls++; 244 CTR3(KTR_CALLOUT, "netgiant" 245 " %p func %p arg %p", 246 c, c_func, c_arg); 247 } 248 mtx_lock(c_mtx); 249 /* 250 * The callout may have been cancelled 251 * while we switched locks. 252 */ 253 if (curr_cancelled) { 254 mtx_unlock(c_mtx); 255 goto skip; 256 } 257 /* The callout cannot be stopped now. */ 258 curr_cancelled = 1; 259 260 if (c_mtx == &Giant) { 261 gcalls++; 262 CTR3(KTR_CALLOUT, 263 "callout %p func %p arg %p", 264 c, c_func, c_arg); 265 } else { 266 mtxcalls++; 267 CTR3(KTR_CALLOUT, "callout mtx" 268 " %p func %p arg %p", 269 c, c_func, c_arg); 270 } 271 } else { 272 mpcalls++; 273 CTR3(KTR_CALLOUT, 274 "callout mpsafe %p func %p arg %p", 275 c, c_func, c_arg); 276 } 277#ifdef DIAGNOSTIC 278 binuptime(&bt1); 279#endif 280 THREAD_NO_SLEEPING(); 281 c_func(c_arg); 282 THREAD_SLEEPING_OK(); 283#ifdef DIAGNOSTIC 284 binuptime(&bt2); 285 bintime_sub(&bt2, &bt1); 286 if (bt2.frac > maxdt) { 287 if (lastfunc != c_func || 288 bt2.frac > maxdt * 2) { 289 bintime2timespec(&bt2, &ts2); 290 printf( 291 "Expensive timeout(9) function: %p(%p) %jd.%09ld s\n", 292 c_func, c_arg, 293 (intmax_t)ts2.tv_sec, 294 ts2.tv_nsec); 295 } 296 maxdt = bt2.frac; 297 lastfunc = c_func; 298 } 299#endif 300 if ((c_flags & CALLOUT_RETURNUNLOCKED) == 0) 301 mtx_unlock(c_mtx); 302 if (c_flags & CALLOUT_NETGIANT) 303 mtx_unlock(&Giant); 304 skip: 305 mtx_lock_spin(&callout_lock); 306 curr_callout = NULL; 307 if (callout_wait) { 308 /* 309 * There is someone waiting 310 * for the callout to complete. 311 */ 312 callout_wait = 0; 313 mtx_unlock_spin(&callout_lock); 314 wakeup(&callout_wait); 315 mtx_lock_spin(&callout_lock); 316 } 317 steps = 0; 318 c = nextsoftcheck; 319 } 320 } 321 } 322 avg_depth += (depth * 1000 - avg_depth) >> 8; 323 avg_mpcalls += (mpcalls * 1000 - avg_mpcalls) >> 8; 324 avg_mtxcalls += (mtxcalls * 1000 - avg_mtxcalls) >> 8; 325 avg_gcalls += (gcalls * 1000 - avg_gcalls) >> 8; 326 nextsoftcheck = NULL; 327 mtx_unlock_spin(&callout_lock); 328} 329 330/* 331 * timeout -- 332 * Execute a function after a specified length of time. 333 * 334 * untimeout -- 335 * Cancel previous timeout function call. 336 * 337 * callout_handle_init -- 338 * Initialize a handle so that using it with untimeout is benign. 339 * 340 * See AT&T BCI Driver Reference Manual for specification. This 341 * implementation differs from that one in that although an 342 * identification value is returned from timeout, the original 343 * arguments to timeout as well as the identifier are used to 344 * identify entries for untimeout. 345 */ 346struct callout_handle 347timeout(ftn, arg, to_ticks) 348 timeout_t *ftn; 349 void *arg; 350 int to_ticks; 351{ 352 struct callout *new; 353 struct callout_handle handle; 354 355 mtx_lock_spin(&callout_lock); 356 357 /* Fill in the next free callout structure. */ 358 new = SLIST_FIRST(&callfree); 359 if (new == NULL) 360 /* XXX Attempt to malloc first */ 361 panic("timeout table full"); 362 SLIST_REMOVE_HEAD(&callfree, c_links.sle); 363 364 callout_reset(new, to_ticks, ftn, arg); 365 366 handle.callout = new; 367 mtx_unlock_spin(&callout_lock); 368 return (handle); 369} 370 371void 372untimeout(ftn, arg, handle) 373 timeout_t *ftn; 374 void *arg; 375 struct callout_handle handle; 376{ 377 378 /* 379 * Check for a handle that was initialized 380 * by callout_handle_init, but never used 381 * for a real timeout. 382 */ 383 if (handle.callout == NULL) 384 return; 385 386 mtx_lock_spin(&callout_lock); 387 if (handle.callout->c_func == ftn && handle.callout->c_arg == arg) 388 callout_stop(handle.callout); 389 mtx_unlock_spin(&callout_lock); 390} 391 392void 393callout_handle_init(struct callout_handle *handle) 394{ 395 handle->callout = NULL; 396} 397 398/* 399 * New interface; clients allocate their own callout structures. 400 * 401 * callout_reset() - establish or change a timeout 402 * callout_stop() - disestablish a timeout 403 * callout_init() - initialize a callout structure so that it can 404 * safely be passed to callout_reset() and callout_stop() 405 * 406 * <sys/callout.h> defines three convenience macros: 407 * 408 * callout_active() - returns truth if callout has not been stopped, 409 * drained, or deactivated since the last time the callout was 410 * reset. 411 * callout_pending() - returns truth if callout is still waiting for timeout 412 * callout_deactivate() - marks the callout as having been serviced 413 */ 414int 415callout_reset(c, to_ticks, ftn, arg) 416 struct callout *c; 417 int to_ticks; 418 void (*ftn)(void *); 419 void *arg; 420{ 421 int cancelled = 0; 422 423#ifdef notyet /* Some callers of timeout() do not hold Giant. */ 424 if (c->c_mtx != NULL) 425 mtx_assert(c->c_mtx, MA_OWNED); 426#endif 427 428 mtx_lock_spin(&callout_lock); 429 if (c == curr_callout) { 430 /* 431 * We're being asked to reschedule a callout which is 432 * currently in progress. If there is a mutex then we 433 * can cancel the callout if it has not really started. 434 */ 435 if (c->c_mtx != NULL && !curr_cancelled) 436 cancelled = curr_cancelled = 1; 437 if (callout_wait) { 438 /* 439 * Someone has called callout_drain to kill this 440 * callout. Don't reschedule. 441 */ 442 CTR4(KTR_CALLOUT, "%s %p func %p arg %p", 443 cancelled ? "cancelled" : "failed to cancel", 444 c, c->c_func, c->c_arg); 445 mtx_unlock_spin(&callout_lock); 446 return (cancelled); 447 } 448 } 449 if (c->c_flags & CALLOUT_PENDING) { 450 if (nextsoftcheck == c) { 451 nextsoftcheck = TAILQ_NEXT(c, c_links.tqe); 452 } 453 TAILQ_REMOVE(&callwheel[c->c_time & callwheelmask], c, 454 c_links.tqe); 455 456 cancelled = 1; 457 458 /* 459 * Part of the normal "stop a pending callout" process 460 * is to clear the CALLOUT_ACTIVE and CALLOUT_PENDING 461 * flags. We're not going to bother doing that here, 462 * because we're going to be setting those flags ten lines 463 * after this point, and we're holding callout_lock 464 * between now and then. 465 */ 466 } 467 468 /* 469 * We could unlock callout_lock here and lock it again before the 470 * TAILQ_INSERT_TAIL, but there's no point since doing this setup 471 * doesn't take much time. 472 */ 473 if (to_ticks <= 0) 474 to_ticks = 1; 475 476 c->c_arg = arg; 477 c->c_flags |= (CALLOUT_ACTIVE | CALLOUT_PENDING); 478 c->c_func = ftn; 479 c->c_time = ticks + to_ticks; 480 TAILQ_INSERT_TAIL(&callwheel[c->c_time & callwheelmask], 481 c, c_links.tqe); 482 CTR5(KTR_CALLOUT, "%sscheduled %p func %p arg %p in %d", 483 cancelled ? "re" : "", c, c->c_func, c->c_arg, to_ticks); 484 mtx_unlock_spin(&callout_lock); 485 486 return (cancelled); 487} 488 489int 490_callout_stop_safe(c, safe) 491 struct callout *c; 492 int safe; 493{ 494 int use_mtx, sq_locked; 495 496 if (!safe && c->c_mtx != NULL) { 497#ifdef notyet /* Some callers do not hold Giant for Giant-locked callouts. */ 498 mtx_assert(c->c_mtx, MA_OWNED); 499 use_mtx = 1; 500#else 501 use_mtx = mtx_owned(c->c_mtx); 502#endif 503 } else { 504 use_mtx = 0; 505 } 506 507 sq_locked = 0; 508again: 509 mtx_lock_spin(&callout_lock); 510 /* 511 * If the callout isn't pending, it's not on the queue, so 512 * don't attempt to remove it from the queue. We can try to 513 * stop it by other means however. 514 */ 515 if (!(c->c_flags & CALLOUT_PENDING)) { 516 c->c_flags &= ~CALLOUT_ACTIVE; 517 518 /* 519 * If it wasn't on the queue and it isn't the current 520 * callout, then we can't stop it, so just bail. 521 */ 522 if (c != curr_callout) { 523 CTR3(KTR_CALLOUT, "failed to stop %p func %p arg %p", 524 c, c->c_func, c->c_arg); 525 mtx_unlock_spin(&callout_lock); 526 if (sq_locked) 527 sleepq_release(&callout_wait); 528 return (0); 529 } 530 531 if (safe) { 532 /* 533 * The current callout is running (or just 534 * about to run) and blocking is allowed, so 535 * just wait for the current invocation to 536 * finish. 537 */ 538 while (c == curr_callout) { 539 540 /* 541 * Use direct calls to sleepqueue interface 542 * instead of cv/msleep in order to avoid 543 * a LOR between callout_lock and sleepqueue 544 * chain spinlocks. This piece of code 545 * emulates a msleep_spin() call actually. 546 * 547 * If we already have the sleepqueue chain 548 * locked, then we can safely block. If we 549 * don't already have it locked, however, 550 * we have to drop the callout_lock to lock 551 * it. This opens several races, so we 552 * restart at the beginning once we have 553 * both locks. If nothing has changed, then 554 * we will end up back here with sq_locked 555 * set. 556 */ 557 if (!sq_locked) { 558 mtx_unlock_spin(&callout_lock); 559 sleepq_lock(&callout_wait); 560 sq_locked = 1; 561 goto again; 562 } 563 564 callout_wait = 1; 565 DROP_GIANT(); 566 mtx_unlock_spin(&callout_lock); 567 sleepq_add(&callout_wait, 568 &callout_lock.lock_object, "codrain", 569 SLEEPQ_SLEEP, 0); 570 sleepq_wait(&callout_wait); 571 sq_locked = 0; 572 573 /* Reacquire locks previously released. */ 574 PICKUP_GIANT(); 575 mtx_lock_spin(&callout_lock); 576 } 577 } else if (use_mtx && !curr_cancelled) { 578 /* 579 * The current callout is waiting for it's 580 * mutex which we hold. Cancel the callout 581 * and return. After our caller drops the 582 * mutex, the callout will be skipped in 583 * softclock(). 584 */ 585 curr_cancelled = 1; 586 CTR3(KTR_CALLOUT, "cancelled %p func %p arg %p", 587 c, c->c_func, c->c_arg); 588 mtx_unlock_spin(&callout_lock); 589 KASSERT(!sq_locked, ("sleepqueue chain locked")); 590 return (1); 591 } 592 CTR3(KTR_CALLOUT, "failed to stop %p func %p arg %p", 593 c, c->c_func, c->c_arg); 594 mtx_unlock_spin(&callout_lock); 595 KASSERT(!sq_locked, ("sleepqueue chain still locked")); 596 return (0); 597 } 598 if (sq_locked) 599 sleepq_release(&callout_wait); 600 601 c->c_flags &= ~(CALLOUT_ACTIVE | CALLOUT_PENDING); 602 603 if (nextsoftcheck == c) { 604 nextsoftcheck = TAILQ_NEXT(c, c_links.tqe); 605 } 606 TAILQ_REMOVE(&callwheel[c->c_time & callwheelmask], c, c_links.tqe); 607 608 CTR3(KTR_CALLOUT, "cancelled %p func %p arg %p", 609 c, c->c_func, c->c_arg); 610 611 if (c->c_flags & CALLOUT_LOCAL_ALLOC) { 612 c->c_func = NULL; 613 SLIST_INSERT_HEAD(&callfree, c, c_links.sle); 614 } 615 mtx_unlock_spin(&callout_lock); 616 return (1); 617} 618 619void 620callout_init(c, mpsafe) 621 struct callout *c; 622 int mpsafe; 623{ 624 bzero(c, sizeof *c); 625 if (mpsafe) { 626 c->c_mtx = NULL; 627 c->c_flags = CALLOUT_RETURNUNLOCKED; 628 } else { 629 c->c_mtx = &Giant; 630 c->c_flags = 0; 631 } 632} 633 634void 635callout_init_mtx(c, mtx, flags) 636 struct callout *c; 637 struct mtx *mtx; 638 int flags; 639{ 640 bzero(c, sizeof *c); 641 c->c_mtx = mtx; 642 KASSERT((flags & ~(CALLOUT_RETURNUNLOCKED|CALLOUT_NETGIANT)) == 0, 643 ("callout_init_mtx: bad flags %d", flags)); 644 /* CALLOUT_RETURNUNLOCKED makes no sense without a mutex. */ 645 KASSERT(mtx != NULL || (flags & CALLOUT_RETURNUNLOCKED) == 0, 646 ("callout_init_mtx: CALLOUT_RETURNUNLOCKED with no mutex")); 647 c->c_flags = flags & (CALLOUT_RETURNUNLOCKED|CALLOUT_NETGIANT); 648} 649 650#ifdef APM_FIXUP_CALLTODO 651/* 652 * Adjust the kernel calltodo timeout list. This routine is used after 653 * an APM resume to recalculate the calltodo timer list values with the 654 * number of hz's we have been sleeping. The next hardclock() will detect 655 * that there are fired timers and run softclock() to execute them. 656 * 657 * Please note, I have not done an exhaustive analysis of what code this 658 * might break. I am motivated to have my select()'s and alarm()'s that 659 * have expired during suspend firing upon resume so that the applications 660 * which set the timer can do the maintanence the timer was for as close 661 * as possible to the originally intended time. Testing this code for a 662 * week showed that resuming from a suspend resulted in 22 to 25 timers 663 * firing, which seemed independant on whether the suspend was 2 hours or 664 * 2 days. Your milage may vary. - Ken Key <key@cs.utk.edu> 665 */ 666void 667adjust_timeout_calltodo(time_change) 668 struct timeval *time_change; 669{ 670 register struct callout *p; 671 unsigned long delta_ticks; 672 673 /* 674 * How many ticks were we asleep? 675 * (stolen from tvtohz()). 676 */ 677 678 /* Don't do anything */ 679 if (time_change->tv_sec < 0) 680 return; 681 else if (time_change->tv_sec <= LONG_MAX / 1000000) 682 delta_ticks = (time_change->tv_sec * 1000000 + 683 time_change->tv_usec + (tick - 1)) / tick + 1; 684 else if (time_change->tv_sec <= LONG_MAX / hz) 685 delta_ticks = time_change->tv_sec * hz + 686 (time_change->tv_usec + (tick - 1)) / tick + 1; 687 else 688 delta_ticks = LONG_MAX; 689 690 if (delta_ticks > INT_MAX) 691 delta_ticks = INT_MAX; 692 693 /* 694 * Now rip through the timer calltodo list looking for timers 695 * to expire. 696 */ 697 698 /* don't collide with softclock() */ 699 mtx_lock_spin(&callout_lock); 700 for (p = calltodo.c_next; p != NULL; p = p->c_next) { 701 p->c_time -= delta_ticks; 702 703 /* Break if the timer had more time on it than delta_ticks */ 704 if (p->c_time > 0) 705 break; 706 707 /* take back the ticks the timer didn't use (p->c_time <= 0) */ 708 delta_ticks = -p->c_time; 709 } 710 mtx_unlock_spin(&callout_lock); 711 712 return; 713} 714#endif /* APM_FIXUP_CALLTODO */ 715