kern_timeout.c revision 123254
1/*- 2 * Copyright (c) 1982, 1986, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the University of 21 * California, Berkeley and its contributors. 22 * 4. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * From: @(#)kern_clock.c 8.5 (Berkeley) 1/21/94 39 */ 40 41#include <sys/cdefs.h> 42__FBSDID("$FreeBSD: head/sys/kern/kern_timeout.c 123254 2003-12-07 20:03:28Z phk $"); 43 44#include <sys/param.h> 45#include <sys/systm.h> 46#include <sys/callout.h> 47#include <sys/kernel.h> 48#include <sys/lock.h> 49#include <sys/mutex.h> 50#include <sys/sysctl.h> 51 52static int avg_depth; 53SYSCTL_INT(_debug, OID_AUTO, to_avg_depth, CTLFLAG_RD, &avg_depth, 0, 54 "Average number of items examined per softclock call. Units = 1/1000"); 55static int avg_gcalls; 56SYSCTL_INT(_debug, OID_AUTO, to_avg_gcalls, CTLFLAG_RD, &avg_gcalls, 0, 57 "Average number of Giant callouts made per softclock call. Units = 1/1000"); 58static int avg_mpcalls; 59SYSCTL_INT(_debug, OID_AUTO, to_avg_mpcalls, CTLFLAG_RD, &avg_mpcalls, 0, 60 "Average number of MP callouts made per softclock call. Units = 1/1000"); 61/* 62 * TODO: 63 * allocate more timeout table slots when table overflows. 64 */ 65 66/* Exported to machdep.c and/or kern_clock.c. */ 67struct callout *callout; 68struct callout_list callfree; 69int callwheelsize, callwheelbits, callwheelmask; 70struct callout_tailq *callwheel; 71int softticks; /* Like ticks, but for softclock(). */ 72struct mtx callout_lock; 73#ifdef DIAGNOSTIC 74struct mtx dont_sleep_in_callout; 75#endif 76 77static struct callout *nextsoftcheck; /* Next callout to be checked. */ 78 79/* 80 * kern_timeout_callwheel_alloc() - kernel low level callwheel initialization 81 * 82 * This code is called very early in the kernel initialization sequence, 83 * and may be called more then once. 84 */ 85caddr_t 86kern_timeout_callwheel_alloc(caddr_t v) 87{ 88 /* 89 * Calculate callout wheel size 90 */ 91 for (callwheelsize = 1, callwheelbits = 0; 92 callwheelsize < ncallout; 93 callwheelsize <<= 1, ++callwheelbits) 94 ; 95 callwheelmask = callwheelsize - 1; 96 97 callout = (struct callout *)v; 98 v = (caddr_t)(callout + ncallout); 99 callwheel = (struct callout_tailq *)v; 100 v = (caddr_t)(callwheel + callwheelsize); 101 return(v); 102} 103 104/* 105 * kern_timeout_callwheel_init() - initialize previously reserved callwheel 106 * space. 107 * 108 * This code is called just once, after the space reserved for the 109 * callout wheel has been finalized. 110 */ 111void 112kern_timeout_callwheel_init(void) 113{ 114 int i; 115 116 SLIST_INIT(&callfree); 117 for (i = 0; i < ncallout; i++) { 118 callout_init(&callout[i], 0); 119 callout[i].c_flags = CALLOUT_LOCAL_ALLOC; 120 SLIST_INSERT_HEAD(&callfree, &callout[i], c_links.sle); 121 } 122 for (i = 0; i < callwheelsize; i++) { 123 TAILQ_INIT(&callwheel[i]); 124 } 125 mtx_init(&callout_lock, "callout", NULL, MTX_SPIN | MTX_RECURSE); 126#ifdef DIAGNOSTIC 127 mtx_init(&dont_sleep_in_callout, "dont_sleep_in_callout", NULL, MTX_DEF); 128#endif 129} 130 131/* 132 * The callout mechanism is based on the work of Adam M. Costello and 133 * George Varghese, published in a technical report entitled "Redesigning 134 * the BSD Callout and Timer Facilities" and modified slightly for inclusion 135 * in FreeBSD by Justin T. Gibbs. The original work on the data structures 136 * used in this implementation was published by G.Varghese and A. Lauck in 137 * the paper "Hashed and Hierarchical Timing Wheels: Data Structures for 138 * the Efficient Implementation of a Timer Facility" in the Proceedings of 139 * the 11th ACM Annual Symposium on Operating Systems Principles, 140 * Austin, Texas Nov 1987. 141 */ 142 143/* 144 * Software (low priority) clock interrupt. 145 * Run periodic events from timeout queue. 146 */ 147void 148softclock(void *dummy) 149{ 150 struct callout *c; 151 struct callout_tailq *bucket; 152 int curticks; 153 int steps; /* #steps since we last allowed interrupts */ 154 int depth; 155 int mpcalls; 156 int gcalls; 157#ifdef DIAGNOSTIC 158 struct bintime bt1, bt2; 159 struct timespec ts2; 160 static uint64_t maxdt = 36893488147419102LL; /* 2 msec */ 161 static timeout_t *lastfunc; 162#endif 163 164#ifndef MAX_SOFTCLOCK_STEPS 165#define MAX_SOFTCLOCK_STEPS 100 /* Maximum allowed value of steps. */ 166#endif /* MAX_SOFTCLOCK_STEPS */ 167 168 mpcalls = 0; 169 gcalls = 0; 170 depth = 0; 171 steps = 0; 172 mtx_lock_spin(&callout_lock); 173 while (softticks != ticks) { 174 softticks++; 175 /* 176 * softticks may be modified by hard clock, so cache 177 * it while we work on a given bucket. 178 */ 179 curticks = softticks; 180 bucket = &callwheel[curticks & callwheelmask]; 181 c = TAILQ_FIRST(bucket); 182 while (c) { 183 depth++; 184 if (c->c_time != curticks) { 185 c = TAILQ_NEXT(c, c_links.tqe); 186 ++steps; 187 if (steps >= MAX_SOFTCLOCK_STEPS) { 188 nextsoftcheck = c; 189 /* Give interrupts a chance. */ 190 mtx_unlock_spin(&callout_lock); 191 ; /* nothing */ 192 mtx_lock_spin(&callout_lock); 193 c = nextsoftcheck; 194 steps = 0; 195 } 196 } else { 197 void (*c_func)(void *); 198 void *c_arg; 199 int c_flags; 200 201 nextsoftcheck = TAILQ_NEXT(c, c_links.tqe); 202 TAILQ_REMOVE(bucket, c, c_links.tqe); 203 c_func = c->c_func; 204 c_arg = c->c_arg; 205 c_flags = c->c_flags; 206 c->c_func = NULL; 207 if (c->c_flags & CALLOUT_LOCAL_ALLOC) { 208 c->c_flags = CALLOUT_LOCAL_ALLOC; 209 SLIST_INSERT_HEAD(&callfree, c, 210 c_links.sle); 211 } else { 212 c->c_flags = 213 (c->c_flags & ~CALLOUT_PENDING); 214 } 215 mtx_unlock_spin(&callout_lock); 216 if (!(c_flags & CALLOUT_MPSAFE)) { 217 mtx_lock(&Giant); 218 gcalls++; 219 } else { 220 mpcalls++; 221 } 222#ifdef DIAGNOSTIC 223 binuptime(&bt1); 224 mtx_lock(&dont_sleep_in_callout); 225#endif 226 c_func(c_arg); 227#ifdef DIAGNOSTIC 228 mtx_unlock(&dont_sleep_in_callout); 229 binuptime(&bt2); 230 bintime_sub(&bt2, &bt1); 231 if (bt2.frac > maxdt) { 232 if (lastfunc != c_func || 233 bt2.frac > maxdt * 2) { 234 bintime2timespec(&bt2, &ts2); 235 printf( 236 "Expensive timeout(9) function: %p(%p) %jd.%09ld s\n", 237 c_func, c_arg, 238 (intmax_t)ts2.tv_sec, 239 ts2.tv_nsec); 240 } 241 maxdt = bt2.frac; 242 lastfunc = c_func; 243 } 244#endif 245 if (!(c_flags & CALLOUT_MPSAFE)) 246 mtx_unlock(&Giant); 247 mtx_lock_spin(&callout_lock); 248 steps = 0; 249 c = nextsoftcheck; 250 } 251 } 252 } 253 avg_depth += (depth * 1000 - avg_depth) >> 8; 254 avg_mpcalls += (mpcalls * 1000 - avg_mpcalls) >> 8; 255 avg_gcalls += (gcalls * 1000 - avg_gcalls) >> 8; 256 nextsoftcheck = NULL; 257 mtx_unlock_spin(&callout_lock); 258} 259 260/* 261 * timeout -- 262 * Execute a function after a specified length of time. 263 * 264 * untimeout -- 265 * Cancel previous timeout function call. 266 * 267 * callout_handle_init -- 268 * Initialize a handle so that using it with untimeout is benign. 269 * 270 * See AT&T BCI Driver Reference Manual for specification. This 271 * implementation differs from that one in that although an 272 * identification value is returned from timeout, the original 273 * arguments to timeout as well as the identifier are used to 274 * identify entries for untimeout. 275 */ 276struct callout_handle 277timeout(ftn, arg, to_ticks) 278 timeout_t *ftn; 279 void *arg; 280 int to_ticks; 281{ 282 struct callout *new; 283 struct callout_handle handle; 284 285 mtx_lock_spin(&callout_lock); 286 287 /* Fill in the next free callout structure. */ 288 new = SLIST_FIRST(&callfree); 289 if (new == NULL) 290 /* XXX Attempt to malloc first */ 291 panic("timeout table full"); 292 SLIST_REMOVE_HEAD(&callfree, c_links.sle); 293 294 callout_reset(new, to_ticks, ftn, arg); 295 296 handle.callout = new; 297 mtx_unlock_spin(&callout_lock); 298 return (handle); 299} 300 301void 302untimeout(ftn, arg, handle) 303 timeout_t *ftn; 304 void *arg; 305 struct callout_handle handle; 306{ 307 308 /* 309 * Check for a handle that was initialized 310 * by callout_handle_init, but never used 311 * for a real timeout. 312 */ 313 if (handle.callout == NULL) 314 return; 315 316 mtx_lock_spin(&callout_lock); 317 if (handle.callout->c_func == ftn && handle.callout->c_arg == arg) 318 callout_stop(handle.callout); 319 mtx_unlock_spin(&callout_lock); 320} 321 322void 323callout_handle_init(struct callout_handle *handle) 324{ 325 handle->callout = NULL; 326} 327 328/* 329 * New interface; clients allocate their own callout structures. 330 * 331 * callout_reset() - establish or change a timeout 332 * callout_stop() - disestablish a timeout 333 * callout_init() - initialize a callout structure so that it can 334 * safely be passed to callout_reset() and callout_stop() 335 * 336 * <sys/callout.h> defines three convenience macros: 337 * 338 * callout_active() - returns truth if callout has not been serviced 339 * callout_pending() - returns truth if callout is still waiting for timeout 340 * callout_deactivate() - marks the callout as having been serviced 341 */ 342void 343callout_reset(c, to_ticks, ftn, arg) 344 struct callout *c; 345 int to_ticks; 346 void (*ftn)(void *); 347 void *arg; 348{ 349 350 mtx_lock_spin(&callout_lock); 351 if (c->c_flags & CALLOUT_PENDING) 352 callout_stop(c); 353 354 /* 355 * We could unlock callout_lock here and lock it again before the 356 * TAILQ_INSERT_TAIL, but there's no point since doing this setup 357 * doesn't take much time. 358 */ 359 if (to_ticks <= 0) 360 to_ticks = 1; 361 362 c->c_arg = arg; 363 c->c_flags |= (CALLOUT_ACTIVE | CALLOUT_PENDING); 364 c->c_func = ftn; 365 c->c_time = ticks + to_ticks; 366 TAILQ_INSERT_TAIL(&callwheel[c->c_time & callwheelmask], 367 c, c_links.tqe); 368 mtx_unlock_spin(&callout_lock); 369} 370 371int 372callout_stop(c) 373 struct callout *c; 374{ 375 376 mtx_lock_spin(&callout_lock); 377 /* 378 * Don't attempt to delete a callout that's not on the queue. 379 */ 380 if (!(c->c_flags & CALLOUT_PENDING)) { 381 c->c_flags &= ~CALLOUT_ACTIVE; 382 mtx_unlock_spin(&callout_lock); 383 return (0); 384 } 385 c->c_flags &= ~(CALLOUT_ACTIVE | CALLOUT_PENDING); 386 387 if (nextsoftcheck == c) { 388 nextsoftcheck = TAILQ_NEXT(c, c_links.tqe); 389 } 390 TAILQ_REMOVE(&callwheel[c->c_time & callwheelmask], c, c_links.tqe); 391 c->c_func = NULL; 392 393 if (c->c_flags & CALLOUT_LOCAL_ALLOC) { 394 SLIST_INSERT_HEAD(&callfree, c, c_links.sle); 395 } 396 mtx_unlock_spin(&callout_lock); 397 return (1); 398} 399 400void 401callout_init(c, mpsafe) 402 struct callout *c; 403 int mpsafe; 404{ 405 bzero(c, sizeof *c); 406 if (mpsafe) 407 c->c_flags |= CALLOUT_MPSAFE; 408} 409 410#ifdef APM_FIXUP_CALLTODO 411/* 412 * Adjust the kernel calltodo timeout list. This routine is used after 413 * an APM resume to recalculate the calltodo timer list values with the 414 * number of hz's we have been sleeping. The next hardclock() will detect 415 * that there are fired timers and run softclock() to execute them. 416 * 417 * Please note, I have not done an exhaustive analysis of what code this 418 * might break. I am motivated to have my select()'s and alarm()'s that 419 * have expired during suspend firing upon resume so that the applications 420 * which set the timer can do the maintanence the timer was for as close 421 * as possible to the originally intended time. Testing this code for a 422 * week showed that resuming from a suspend resulted in 22 to 25 timers 423 * firing, which seemed independant on whether the suspend was 2 hours or 424 * 2 days. Your milage may vary. - Ken Key <key@cs.utk.edu> 425 */ 426void 427adjust_timeout_calltodo(time_change) 428 struct timeval *time_change; 429{ 430 register struct callout *p; 431 unsigned long delta_ticks; 432 433 /* 434 * How many ticks were we asleep? 435 * (stolen from tvtohz()). 436 */ 437 438 /* Don't do anything */ 439 if (time_change->tv_sec < 0) 440 return; 441 else if (time_change->tv_sec <= LONG_MAX / 1000000) 442 delta_ticks = (time_change->tv_sec * 1000000 + 443 time_change->tv_usec + (tick - 1)) / tick + 1; 444 else if (time_change->tv_sec <= LONG_MAX / hz) 445 delta_ticks = time_change->tv_sec * hz + 446 (time_change->tv_usec + (tick - 1)) / tick + 1; 447 else 448 delta_ticks = LONG_MAX; 449 450 if (delta_ticks > INT_MAX) 451 delta_ticks = INT_MAX; 452 453 /* 454 * Now rip through the timer calltodo list looking for timers 455 * to expire. 456 */ 457 458 /* don't collide with softclock() */ 459 mtx_lock_spin(&callout_lock); 460 for (p = calltodo.c_next; p != NULL; p = p->c_next) { 461 p->c_time -= delta_ticks; 462 463 /* Break if the timer had more time on it than delta_ticks */ 464 if (p->c_time > 0) 465 break; 466 467 /* take back the ticks the timer didn't use (p->c_time <= 0) */ 468 delta_ticks = -p->c_time; 469 } 470 mtx_unlock_spin(&callout_lock); 471 472 return; 473} 474#endif /* APM_FIXUP_CALLTODO */ 475