kern_timeout.c revision 12650
1317017Sdim/*- 2317017Sdim * Copyright (c) 1982, 1986, 1991, 1993 3353358Sdim * The Regents of the University of California. All rights reserved. 4353358Sdim * (c) UNIX System Laboratories, Inc. 5353358Sdim * All or some portions of this file are derived from material licensed 6317017Sdim * to the University of California by American Telephone and Telegraph 7317017Sdim * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8317017Sdim * the permission of UNIX System Laboratories, Inc. 9317017Sdim * 10317017Sdim * Redistribution and use in source and binary forms, with or without 11317017Sdim * modification, are permitted provided that the following conditions 12317017Sdim * are met: 13317017Sdim * 1. Redistributions of source code must retain the above copyright 14317017Sdim * notice, this list of conditions and the following disclaimer. 15317017Sdim * 2. Redistributions in binary form must reproduce the above copyright 16317017Sdim * notice, this list of conditions and the following disclaimer in the 17317017Sdim * documentation and/or other materials provided with the distribution. 18317017Sdim * 3. All advertising materials mentioning features or use of this software 19317017Sdim * must display the following acknowledgement: 20317017Sdim * This product includes software developed by the University of 21317017Sdim * California, Berkeley and its contributors. 22317017Sdim * 4. Neither the name of the University nor the names of its contributors 23317017Sdim * may be used to endorse or promote products derived from this software 24317017Sdim * without specific prior written permission. 25317017Sdim * 26317017Sdim * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27341825Sdim * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28341825Sdim * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29341825Sdim * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30341825Sdim * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31317017Sdim * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32317017Sdim * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33317017Sdim * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34344779Sdim * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35344779Sdim * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36344779Sdim * SUCH DAMAGE. 37344779Sdim * 38344779Sdim * @(#)kern_clock.c 8.5 (Berkeley) 1/21/94 39344779Sdim * $Id: kern_clock.c,v 1.21 1995/12/04 16:48:20 phk Exp $ 40344779Sdim */ 41344779Sdim 42317017Sdim/* Portions of this software are covered by the following: */ 43317017Sdim/****************************************************************************** 44341825Sdim * * 45341825Sdim * Copyright (c) David L. Mills 1993, 1994 * 46321238Sdim * * 47317017Sdim * Permission to use, copy, modify, and distribute this software and its * 48317017Sdim * documentation for any purpose and without fee is hereby granted, provided * 49317017Sdim * that the above copyright notice appears in all copies and that both the * 50341825Sdim * copyright notice and this permission notice appear in supporting * 51341825Sdim * documentation, and that the name University of Delaware not be used in * 52317017Sdim * advertising or publicity pertaining to distribution of the software * 53317017Sdim * without specific, written prior permission. The University of Delaware * 54317017Sdim * makes no representations about the suitability this software for any * 55317017Sdim * purpose. It is provided "as is" without express or implied warranty. * 56317017Sdim * * 57317017Sdim *****************************************************************************/ 58317017Sdim 59317017Sdim#include <sys/param.h> 60319547Sdim#include <sys/systm.h> 61319547Sdim#include <sys/dkstat.h> 62317017Sdim#include <sys/callout.h> 63317017Sdim#include <sys/kernel.h> 64317017Sdim#include <sys/proc.h> 65341825Sdim#include <sys/resourcevar.h> 66341825Sdim#include <sys/signalvar.h> 67341825Sdim#include <sys/timex.h> 68317017Sdim#include <vm/vm.h> 69317017Sdim#include <sys/sysctl.h> 70317017Sdim 71317017Sdim#include <machine/cpu.h> 72317017Sdim#include <machine/clock.h> 73317017Sdim 74317017Sdim#ifdef GPROF 75317017Sdim#include <sys/gmon.h> 76317017Sdim#endif 77317017Sdim 78317017Sdimstatic void initclocks __P((void *dummy)); 79317017SdimSYSINIT(clocks, SI_SUB_CLOCKS, SI_ORDER_FIRST, initclocks, NULL) 80341825Sdim 81317017Sdim/* Does anybody else really care about these? */ 82317017Sdimstruct callout *callfree, *callout, calltodo; 83 84/* Some of these don't belong here, but it's easiest to concentrate them. */ 85long cp_time[CPUSTATES]; 86long dk_seek[DK_NDRIVE]; 87long dk_time[DK_NDRIVE]; 88long dk_wds[DK_NDRIVE]; 89long dk_wpms[DK_NDRIVE]; 90long dk_xfer[DK_NDRIVE]; 91 92int dk_busy; 93int dk_ndrive = 0; 94char dk_names[DK_NDRIVE][DK_NAMELEN]; 95 96long tk_cancc; 97long tk_nin; 98long tk_nout; 99long tk_rawcc; 100 101/* 102 * Clock handling routines. 103 * 104 * This code is written to operate with two timers that run independently of 105 * each other. The main clock, running hz times per second, is used to keep 106 * track of real time. The second timer handles kernel and user profiling, 107 * and does resource use estimation. If the second timer is programmable, 108 * it is randomized to avoid aliasing between the two clocks. For example, 109 * the randomization prevents an adversary from always giving up the cpu 110 * just before its quantum expires. Otherwise, it would never accumulate 111 * cpu ticks. The mean frequency of the second timer is stathz. 112 * 113 * If no second timer exists, stathz will be zero; in this case we drive 114 * profiling and statistics off the main clock. This WILL NOT be accurate; 115 * do not do it unless absolutely necessary. 116 * 117 * The statistics clock may (or may not) be run at a higher rate while 118 * profiling. This profile clock runs at profhz. We require that profhz 119 * be an integral multiple of stathz. 120 * 121 * If the statistics clock is running fast, it must be divided by the ratio 122 * profhz/stathz for statistics. (For profiling, every tick counts.) 123 */ 124 125/* 126 * TODO: 127 * allocate more timeout table slots when table overflows. 128 */ 129 130/* 131 * Bump a timeval by a small number of usec's. 132 */ 133#define BUMPTIME(t, usec) { \ 134 register volatile struct timeval *tp = (t); \ 135 register long us; \ 136 \ 137 tp->tv_usec = us = tp->tv_usec + (usec); \ 138 if (us >= 1000000) { \ 139 tp->tv_usec = us - 1000000; \ 140 tp->tv_sec++; \ 141 } \ 142} 143 144int stathz; 145int profhz; 146int profprocs; 147int ticks; 148static int psdiv, pscnt; /* prof => stat divider */ 149int psratio; /* ratio: prof / stat */ 150 151volatile struct timeval time; 152volatile struct timeval mono_time; 153 154/* 155 * Phase-lock loop (PLL) definitions 156 * 157 * The following variables are read and set by the ntp_adjtime() system 158 * call. 159 * 160 * time_state shows the state of the system clock, with values defined 161 * in the timex.h header file. 162 * 163 * time_status shows the status of the system clock, with bits defined 164 * in the timex.h header file. 165 * 166 * time_offset is used by the PLL to adjust the system time in small 167 * increments. 168 * 169 * time_constant determines the bandwidth or "stiffness" of the PLL. 170 * 171 * time_tolerance determines maximum frequency error or tolerance of the 172 * CPU clock oscillator and is a property of the architecture; however, 173 * in principle it could change as result of the presence of external 174 * discipline signals, for instance. 175 * 176 * time_precision is usually equal to the kernel tick variable; however, 177 * in cases where a precision clock counter or external clock is 178 * available, the resolution can be much less than this and depend on 179 * whether the external clock is working or not. 180 * 181 * time_maxerror is initialized by a ntp_adjtime() call and increased by 182 * the kernel once each second to reflect the maximum error 183 * bound growth. 184 * 185 * time_esterror is set and read by the ntp_adjtime() call, but 186 * otherwise not used by the kernel. 187 */ 188int time_status = STA_UNSYNC; /* clock status bits */ 189int time_state = TIME_OK; /* clock state */ 190long time_offset = 0; /* time offset (us) */ 191long time_constant = 0; /* pll time constant */ 192long time_tolerance = MAXFREQ; /* frequency tolerance (scaled ppm) */ 193long time_precision = 1; /* clock precision (us) */ 194long time_maxerror = MAXPHASE; /* maximum error (us) */ 195long time_esterror = MAXPHASE; /* estimated error (us) */ 196 197/* 198 * The following variables establish the state of the PLL and the 199 * residual time and frequency offset of the local clock. The scale 200 * factors are defined in the timex.h header file. 201 * 202 * time_phase and time_freq are the phase increment and the frequency 203 * increment, respectively, of the kernel time variable at each tick of 204 * the clock. 205 * 206 * time_freq is set via ntp_adjtime() from a value stored in a file when 207 * the synchronization daemon is first started. Its value is retrieved 208 * via ntp_adjtime() and written to the file about once per hour by the 209 * daemon. 210 * 211 * time_adj is the adjustment added to the value of tick at each timer 212 * interrupt and is recomputed at each timer interrupt. 213 * 214 * time_reftime is the second's portion of the system time on the last 215 * call to ntp_adjtime(). It is used to adjust the time_freq variable 216 * and to increase the time_maxerror as the time since last update 217 * increases. 218 */ 219long time_phase = 0; /* phase offset (scaled us) */ 220long time_freq = 0; /* frequency offset (scaled ppm) */ 221long time_adj = 0; /* tick adjust (scaled 1 / hz) */ 222long time_reftime = 0; /* time at last adjustment (s) */ 223 224#ifdef PPS_SYNC 225/* 226 * The following variables are used only if the if the kernel PPS 227 * discipline code is configured (PPS_SYNC). The scale factors are 228 * defined in the timex.h header file. 229 * 230 * pps_time contains the time at each calibration interval, as read by 231 * microtime(). 232 * 233 * pps_offset is the time offset produced by the time median filter 234 * pps_tf[], while pps_jitter is the dispersion measured by this 235 * filter. 236 * 237 * pps_freq is the frequency offset produced by the frequency median 238 * filter pps_ff[], while pps_stabil is the dispersion measured by 239 * this filter. 240 * 241 * pps_usec is latched from a high resolution counter or external clock 242 * at pps_time. Here we want the hardware counter contents only, not the 243 * contents plus the time_tv.usec as usual. 244 * 245 * pps_valid counts the number of seconds since the last PPS update. It 246 * is used as a watchdog timer to disable the PPS discipline should the 247 * PPS signal be lost. 248 * 249 * pps_glitch counts the number of seconds since the beginning of an 250 * offset burst more than tick/2 from current nominal offset. It is used 251 * mainly to suppress error bursts due to priority conflicts between the 252 * PPS interrupt and timer interrupt. 253 * 254 * pps_count counts the seconds of the calibration interval, the 255 * duration of which is pps_shift in powers of two. 256 * 257 * pps_intcnt counts the calibration intervals for use in the interval- 258 * adaptation algorithm. It's just too complicated for words. 259 */ 260struct timeval pps_time; /* kernel time at last interval */ 261long pps_offset = 0; /* pps time offset (us) */ 262long pps_jitter = MAXTIME; /* pps time dispersion (jitter) (us) */ 263long pps_tf[] = {0, 0, 0}; /* pps time offset median filter (us) */ 264long pps_freq = 0; /* frequency offset (scaled ppm) */ 265long pps_stabil = MAXFREQ; /* frequency dispersion (scaled ppm) */ 266long pps_ff[] = {0, 0, 0}; /* frequency offset median filter */ 267long pps_usec = 0; /* microsec counter at last interval */ 268long pps_valid = PPS_VALID; /* pps signal watchdog counter */ 269int pps_glitch = 0; /* pps signal glitch counter */ 270int pps_count = 0; /* calibration interval counter (s) */ 271int pps_shift = PPS_SHIFT; /* interval duration (s) (shift) */ 272int pps_intcnt = 0; /* intervals at current duration */ 273 274/* 275 * PPS signal quality monitors 276 * 277 * pps_jitcnt counts the seconds that have been discarded because the 278 * jitter measured by the time median filter exceeds the limit MAXTIME 279 * (100 us). 280 * 281 * pps_calcnt counts the frequency calibration intervals, which are 282 * variable from 4 s to 256 s. 283 * 284 * pps_errcnt counts the calibration intervals which have been discarded 285 * because the wander exceeds the limit MAXFREQ (100 ppm) or where the 286 * calibration interval jitter exceeds two ticks. 287 * 288 * pps_stbcnt counts the calibration intervals that have been discarded 289 * because the frequency wander exceeds the limit MAXFREQ / 4 (25 us). 290 */ 291long pps_jitcnt = 0; /* jitter limit exceeded */ 292long pps_calcnt = 0; /* calibration intervals */ 293long pps_errcnt = 0; /* calibration errors */ 294long pps_stbcnt = 0; /* stability limit exceeded */ 295#endif /* PPS_SYNC */ 296 297/* XXX none of this stuff works under FreeBSD */ 298#ifdef EXT_CLOCK 299/* 300 * External clock definitions 301 * 302 * The following definitions and declarations are used only if an 303 * external clock (HIGHBALL or TPRO) is configured on the system. 304 */ 305#define CLOCK_INTERVAL 30 /* CPU clock update interval (s) */ 306 307/* 308 * The clock_count variable is set to CLOCK_INTERVAL at each PPS 309 * interrupt and decremented once each second. 310 */ 311int clock_count = 0; /* CPU clock counter */ 312 313#ifdef HIGHBALL 314/* 315 * The clock_offset and clock_cpu variables are used by the HIGHBALL 316 * interface. The clock_offset variable defines the offset between 317 * system time and the HIGBALL counters. The clock_cpu variable contains 318 * the offset between the system clock and the HIGHBALL clock for use in 319 * disciplining the kernel time variable. 320 */ 321extern struct timeval clock_offset; /* Highball clock offset */ 322long clock_cpu = 0; /* CPU clock adjust */ 323#endif /* HIGHBALL */ 324#endif /* EXT_CLOCK */ 325 326/* 327 * hardupdate() - local clock update 328 * 329 * This routine is called by ntp_adjtime() to update the local clock 330 * phase and frequency. This is used to implement an adaptive-parameter, 331 * first-order, type-II phase-lock loop. The code computes new time and 332 * frequency offsets each time it is called. The hardclock() routine 333 * amortizes these offsets at each tick interrupt. If the kernel PPS 334 * discipline code is configured (PPS_SYNC), the PPS signal itself 335 * determines the new time offset, instead of the calling argument. 336 * Presumably, calls to ntp_adjtime() occur only when the caller 337 * believes the local clock is valid within some bound (+-128 ms with 338 * NTP). If the caller's time is far different than the PPS time, an 339 * argument will ensue, and it's not clear who will lose. 340 * 341 * For default SHIFT_UPDATE = 12, the offset is limited to +-512 ms, the 342 * maximum interval between updates is 4096 s and the maximum frequency 343 * offset is +-31.25 ms/s. 344 * 345 * Note: splclock() is in effect. 346 */ 347void 348hardupdate(offset) 349 long offset; 350{ 351 long ltemp, mtemp; 352 353 if (!(time_status & STA_PLL) && !(time_status & STA_PPSTIME)) 354 return; 355 ltemp = offset; 356#ifdef PPS_SYNC 357 if (time_status & STA_PPSTIME && time_status & STA_PPSSIGNAL) 358 ltemp = pps_offset; 359#endif /* PPS_SYNC */ 360 if (ltemp > MAXPHASE) 361 time_offset = MAXPHASE << SHIFT_UPDATE; 362 else if (ltemp < -MAXPHASE) 363 time_offset = -(MAXPHASE << SHIFT_UPDATE); 364 else 365 time_offset = ltemp << SHIFT_UPDATE; 366 mtemp = time.tv_sec - time_reftime; 367 time_reftime = time.tv_sec; 368 if (mtemp > MAXSEC) 369 mtemp = 0; 370 371 /* ugly multiply should be replaced */ 372 if (ltemp < 0) 373 time_freq -= (-ltemp * mtemp) >> (time_constant + 374 time_constant + SHIFT_KF - SHIFT_USEC); 375 else 376 time_freq += (ltemp * mtemp) >> (time_constant + 377 time_constant + SHIFT_KF - SHIFT_USEC); 378 if (time_freq > time_tolerance) 379 time_freq = time_tolerance; 380 else if (time_freq < -time_tolerance) 381 time_freq = -time_tolerance; 382} 383 384 385 386/* 387 * Initialize clock frequencies and start both clocks running. 388 */ 389/* ARGSUSED*/ 390static void 391initclocks(dummy) 392 void *dummy; 393{ 394 register int i; 395 396 /* 397 * Set divisors to 1 (normal case) and let the machine-specific 398 * code do its bit. 399 */ 400 psdiv = pscnt = 1; 401 cpu_initclocks(); 402 403 /* 404 * Compute profhz/stathz, and fix profhz if needed. 405 */ 406 i = stathz ? stathz : hz; 407 if (profhz == 0) 408 profhz = i; 409 psratio = profhz / i; 410} 411 412/* 413 * The real-time timer, interrupting hz times per second. 414 */ 415void 416hardclock(frame) 417 register struct clockframe *frame; 418{ 419 register struct callout *p1; 420 register struct proc *p; 421 register int needsoft; 422 423 /* 424 * Update real-time timeout queue. 425 * At front of queue are some number of events which are ``due''. 426 * The time to these is <= 0 and if negative represents the 427 * number of ticks which have passed since it was supposed to happen. 428 * The rest of the q elements (times > 0) are events yet to happen, 429 * where the time for each is given as a delta from the previous. 430 * Decrementing just the first of these serves to decrement the time 431 * to all events. 432 */ 433 needsoft = 0; 434 for (p1 = calltodo.c_next; p1 != NULL; p1 = p1->c_next) { 435 if (--p1->c_time > 0) 436 break; 437 needsoft = 1; 438 if (p1->c_time == 0) 439 break; 440 } 441 442 p = curproc; 443 if (p) { 444 register struct pstats *pstats; 445 446 /* 447 * Run current process's virtual and profile time, as needed. 448 */ 449 pstats = p->p_stats; 450 if (CLKF_USERMODE(frame) && 451 timerisset(&pstats->p_timer[ITIMER_VIRTUAL].it_value) && 452 itimerdecr(&pstats->p_timer[ITIMER_VIRTUAL], tick) == 0) 453 psignal(p, SIGVTALRM); 454 if (timerisset(&pstats->p_timer[ITIMER_PROF].it_value) && 455 itimerdecr(&pstats->p_timer[ITIMER_PROF], tick) == 0) 456 psignal(p, SIGPROF); 457 } 458 459 /* 460 * If no separate statistics clock is available, run it from here. 461 */ 462 if (stathz == 0) 463 statclock(frame); 464 465 /* 466 * Increment the time-of-day. 467 */ 468 ticks++; 469 { 470 int time_update; 471 struct timeval newtime = time; 472 long ltemp; 473 474 if (timedelta == 0) { 475 time_update = CPU_THISTICKLEN(tick); 476 } else { 477 time_update = CPU_THISTICKLEN(tick) + tickdelta; 478 timedelta -= tickdelta; 479 } 480 BUMPTIME(&mono_time, time_update); 481 482 /* 483 * Compute the phase adjustment. If the low-order bits 484 * (time_phase) of the update overflow, bump the high-order bits 485 * (time_update). 486 */ 487 time_phase += time_adj; 488 if (time_phase <= -FINEUSEC) { 489 ltemp = -time_phase >> SHIFT_SCALE; 490 time_phase += ltemp << SHIFT_SCALE; 491 time_update -= ltemp; 492 } 493 else if (time_phase >= FINEUSEC) { 494 ltemp = time_phase >> SHIFT_SCALE; 495 time_phase -= ltemp << SHIFT_SCALE; 496 time_update += ltemp; 497 } 498 499 newtime.tv_usec += time_update; 500 /* 501 * On rollover of the second the phase adjustment to be used for 502 * the next second is calculated. Also, the maximum error is 503 * increased by the tolerance. If the PPS frequency discipline 504 * code is present, the phase is increased to compensate for the 505 * CPU clock oscillator frequency error. 506 * 507 * With SHIFT_SCALE = 23, the maximum frequency adjustment is 508 * +-256 us per tick, or 25.6 ms/s at a clock frequency of 100 509 * Hz. The time contribution is shifted right a minimum of two 510 * bits, while the frequency contribution is a right shift. 511 * Thus, overflow is prevented if the frequency contribution is 512 * limited to half the maximum or 15.625 ms/s. 513 */ 514 if (newtime.tv_usec >= 1000000) { 515 newtime.tv_usec -= 1000000; 516 newtime.tv_sec++; 517 time_maxerror += time_tolerance >> SHIFT_USEC; 518 if (time_offset < 0) { 519 ltemp = -time_offset >> 520 (SHIFT_KG + time_constant); 521 time_offset += ltemp; 522 time_adj = -ltemp << 523 (SHIFT_SCALE - SHIFT_HZ - SHIFT_UPDATE); 524 } else { 525 ltemp = time_offset >> 526 (SHIFT_KG + time_constant); 527 time_offset -= ltemp; 528 time_adj = ltemp << 529 (SHIFT_SCALE - SHIFT_HZ - SHIFT_UPDATE); 530 } 531#ifdef PPS_SYNC 532 /* 533 * Gnaw on the watchdog counter and update the frequency 534 * computed by the pll and the PPS signal. 535 */ 536 pps_valid++; 537 if (pps_valid == PPS_VALID) { 538 pps_jitter = MAXTIME; 539 pps_stabil = MAXFREQ; 540 time_status &= ~(STA_PPSSIGNAL | STA_PPSJITTER | 541 STA_PPSWANDER | STA_PPSERROR); 542 } 543 ltemp = time_freq + pps_freq; 544#else 545 ltemp = time_freq; 546#endif /* PPS_SYNC */ 547 if (ltemp < 0) 548 time_adj -= -ltemp >> 549 (SHIFT_USEC + SHIFT_HZ - SHIFT_SCALE); 550 else 551 time_adj += ltemp >> 552 (SHIFT_USEC + SHIFT_HZ - SHIFT_SCALE); 553 554 /* 555 * When the CPU clock oscillator frequency is not a 556 * power of two in Hz, the SHIFT_HZ is only an 557 * approximate scale factor. In the SunOS kernel, this 558 * results in a PLL gain factor of 1/1.28 = 0.78 what it 559 * should be. In the following code the overall gain is 560 * increased by a factor of 1.25, which results in a 561 * residual error less than 3 percent. 562 */ 563 /* Same thing applies for FreeBSD --GAW */ 564 if (hz == 100) { 565 if (time_adj < 0) 566 time_adj -= -time_adj >> 2; 567 else 568 time_adj += time_adj >> 2; 569 } 570 571 /* XXX - this is really bogus, but can't be fixed until 572 xntpd's idea of the system clock is fixed to know how 573 the user wants leap seconds handled; in the mean time, 574 we assume that users of NTP are running without proper 575 leap second support (this is now the default anyway) */ 576 /* 577 * Leap second processing. If in leap-insert state at 578 * the end of the day, the system clock is set back one 579 * second; if in leap-delete state, the system clock is 580 * set ahead one second. The microtime() routine or 581 * external clock driver will insure that reported time 582 * is always monotonic. The ugly divides should be 583 * replaced. 584 */ 585 switch (time_state) { 586 587 case TIME_OK: 588 if (time_status & STA_INS) 589 time_state = TIME_INS; 590 else if (time_status & STA_DEL) 591 time_state = TIME_DEL; 592 break; 593 594 case TIME_INS: 595 if (newtime.tv_sec % 86400 == 0) { 596 newtime.tv_sec--; 597 time_state = TIME_OOP; 598 } 599 break; 600 601 case TIME_DEL: 602 if ((newtime.tv_sec + 1) % 86400 == 0) { 603 newtime.tv_sec++; 604 time_state = TIME_WAIT; 605 } 606 break; 607 608 case TIME_OOP: 609 time_state = TIME_WAIT; 610 break; 611 612 case TIME_WAIT: 613 if (!(time_status & (STA_INS | STA_DEL))) 614 time_state = TIME_OK; 615 } 616 } 617 CPU_CLOCKUPDATE(&time, &newtime); 618 } 619 620 /* 621 * Process callouts at a very low cpu priority, so we don't keep the 622 * relatively high clock interrupt priority any longer than necessary. 623 */ 624 if (needsoft) { 625 if (CLKF_BASEPRI(frame)) { 626 /* 627 * Save the overhead of a software interrupt; 628 * it will happen as soon as we return, so do it now. 629 */ 630 (void)splsoftclock(); 631 softclock(); 632 } else 633 setsoftclock(); 634 } 635} 636 637/* 638 * Software (low priority) clock interrupt. 639 * Run periodic events from timeout queue. 640 */ 641/*ARGSUSED*/ 642void 643softclock() 644{ 645 register struct callout *c; 646 register void *arg; 647 register void (*func) __P((void *)); 648 register int s; 649 650 s = splhigh(); 651 while ((c = calltodo.c_next) != NULL && c->c_time <= 0) { 652 func = c->c_func; 653 arg = c->c_arg; 654 calltodo.c_next = c->c_next; 655 c->c_next = callfree; 656 callfree = c; 657 splx(s); 658 (*func)(arg); 659 (void) splhigh(); 660 } 661 splx(s); 662} 663 664/* 665 * timeout -- 666 * Execute a function after a specified length of time. 667 * 668 * untimeout -- 669 * Cancel previous timeout function call. 670 * 671 * See AT&T BCI Driver Reference Manual for specification. This 672 * implementation differs from that one in that no identification 673 * value is returned from timeout, rather, the original arguments 674 * to timeout are used to identify entries for untimeout. 675 */ 676void 677timeout(ftn, arg, ticks) 678 timeout_t ftn; 679 void *arg; 680 register int ticks; 681{ 682 register struct callout *new, *p, *t; 683 register int s; 684 685 if (ticks <= 0) 686 ticks = 1; 687 688 /* Lock out the clock. */ 689 s = splhigh(); 690 691 /* Fill in the next free callout structure. */ 692 if (callfree == NULL) 693 panic("timeout table full"); 694 new = callfree; 695 callfree = new->c_next; 696 new->c_arg = arg; 697 new->c_func = ftn; 698 699 /* 700 * The time for each event is stored as a difference from the time 701 * of the previous event on the queue. Walk the queue, correcting 702 * the ticks argument for queue entries passed. Correct the ticks 703 * value for the queue entry immediately after the insertion point 704 * as well. Watch out for negative c_time values; these represent 705 * overdue events. 706 */ 707 for (p = &calltodo; 708 (t = p->c_next) != NULL && ticks > t->c_time; p = t) 709 if (t->c_time > 0) 710 ticks -= t->c_time; 711 new->c_time = ticks; 712 if (t != NULL) 713 t->c_time -= ticks; 714 715 /* Insert the new entry into the queue. */ 716 p->c_next = new; 717 new->c_next = t; 718 splx(s); 719} 720 721void 722untimeout(ftn, arg) 723 timeout_t ftn; 724 void *arg; 725{ 726 register struct callout *p, *t; 727 register int s; 728 729 s = splhigh(); 730 for (p = &calltodo; (t = p->c_next) != NULL; p = t) 731 if (t->c_func == ftn && t->c_arg == arg) { 732 /* Increment next entry's tick count. */ 733 if (t->c_next && t->c_time > 0) 734 t->c_next->c_time += t->c_time; 735 736 /* Move entry from callout queue to callfree queue. */ 737 p->c_next = t->c_next; 738 t->c_next = callfree; 739 callfree = t; 740 break; 741 } 742 splx(s); 743} 744 745/* 746 * Compute number of hz until specified time. Used to 747 * compute third argument to timeout() from an absolute time. 748 */ 749int 750hzto(tv) 751 struct timeval *tv; 752{ 753 register unsigned long ticks; 754 register long sec, usec; 755 int s; 756 757 /* 758 * If the number of usecs in the whole seconds part of the time 759 * difference fits in a long, then the total number of usecs will 760 * fit in an unsigned long. Compute the total and convert it to 761 * ticks, rounding up and adding 1 to allow for the current tick 762 * to expire. Rounding also depends on unsigned long arithmetic 763 * to avoid overflow. 764 * 765 * Otherwise, if the number of ticks in the whole seconds part of 766 * the time difference fits in a long, then convert the parts to 767 * ticks separately and add, using similar rounding methods and 768 * overflow avoidance. This method would work in the previous 769 * case but it is slightly slower and assumes that hz is integral. 770 * 771 * Otherwise, round the time difference down to the maximum 772 * representable value. 773 * 774 * If ints have 32 bits, then the maximum value for any timeout in 775 * 10ms ticks is 248 days. 776 */ 777 s = splclock(); 778 sec = tv->tv_sec - time.tv_sec; 779 usec = tv->tv_usec - time.tv_usec; 780 splx(s); 781 if (usec < 0) { 782 sec--; 783 usec += 1000000; 784 } 785 if (sec < 0) { 786#ifdef DIAGNOSTIC 787 printf("hzto: negative time difference %ld sec %ld usec\n", 788 sec, usec); 789#endif 790 ticks = 1; 791 } else if (sec <= LONG_MAX / 1000000) 792 ticks = (sec * 1000000 + (unsigned long)usec + (tick - 1)) 793 / tick + 1; 794 else if (sec <= LONG_MAX / hz) 795 ticks = sec * hz 796 + ((unsigned long)usec + (tick - 1)) / tick + 1; 797 else 798 ticks = LONG_MAX; 799 if (ticks > INT_MAX) 800 ticks = INT_MAX; 801 return (ticks); 802} 803 804/* 805 * Start profiling on a process. 806 * 807 * Kernel profiling passes proc0 which never exits and hence 808 * keeps the profile clock running constantly. 809 */ 810void 811startprofclock(p) 812 register struct proc *p; 813{ 814 int s; 815 816 if ((p->p_flag & P_PROFIL) == 0) { 817 p->p_flag |= P_PROFIL; 818 if (++profprocs == 1 && stathz != 0) { 819 s = splstatclock(); 820 psdiv = pscnt = psratio; 821 setstatclockrate(profhz); 822 splx(s); 823 } 824 } 825} 826 827/* 828 * Stop profiling on a process. 829 */ 830void 831stopprofclock(p) 832 register struct proc *p; 833{ 834 int s; 835 836 if (p->p_flag & P_PROFIL) { 837 p->p_flag &= ~P_PROFIL; 838 if (--profprocs == 0 && stathz != 0) { 839 s = splstatclock(); 840 psdiv = pscnt = 1; 841 setstatclockrate(stathz); 842 splx(s); 843 } 844 } 845} 846 847/* 848 * Statistics clock. Grab profile sample, and if divider reaches 0, 849 * do process and kernel statistics. 850 */ 851void 852statclock(frame) 853 register struct clockframe *frame; 854{ 855#ifdef GPROF 856 register struct gmonparam *g; 857#endif 858 register struct proc *p = curproc; 859 register int i; 860 861 if (p) { 862 struct pstats *pstats; 863 struct rusage *ru; 864 struct vmspace *vm; 865 866 /* bump the resource usage of integral space use */ 867 if ((pstats = p->p_stats) && (ru = &pstats->p_ru) && (vm = p->p_vmspace)) { 868 ru->ru_ixrss += vm->vm_tsize * PAGE_SIZE / 1024; 869 ru->ru_idrss += vm->vm_dsize * PAGE_SIZE / 1024; 870 ru->ru_isrss += vm->vm_ssize * PAGE_SIZE / 1024; 871 if ((vm->vm_pmap.pm_stats.resident_count * PAGE_SIZE / 1024) > 872 ru->ru_maxrss) { 873 ru->ru_maxrss = 874 vm->vm_pmap.pm_stats.resident_count * PAGE_SIZE / 1024; 875 } 876 } 877 } 878 879 if (CLKF_USERMODE(frame)) { 880 if (p->p_flag & P_PROFIL) 881 addupc_intr(p, CLKF_PC(frame), 1); 882 if (--pscnt > 0) 883 return; 884 /* 885 * Came from user mode; CPU was in user state. 886 * If this process is being profiled record the tick. 887 */ 888 p->p_uticks++; 889 if (p->p_nice > NZERO) 890 cp_time[CP_NICE]++; 891 else 892 cp_time[CP_USER]++; 893 } else { 894#ifdef GPROF 895 /* 896 * Kernel statistics are just like addupc_intr, only easier. 897 */ 898 g = &_gmonparam; 899 if (g->state == GMON_PROF_ON) { 900 i = CLKF_PC(frame) - g->lowpc; 901 if (i < g->textsize) { 902 i /= HISTFRACTION * sizeof(*g->kcount); 903 g->kcount[i]++; 904 } 905 } 906#endif 907 if (--pscnt > 0) 908 return; 909 /* 910 * Came from kernel mode, so we were: 911 * - handling an interrupt, 912 * - doing syscall or trap work on behalf of the current 913 * user process, or 914 * - spinning in the idle loop. 915 * Whichever it is, charge the time as appropriate. 916 * Note that we charge interrupts to the current process, 917 * regardless of whether they are ``for'' that process, 918 * so that we know how much of its real time was spent 919 * in ``non-process'' (i.e., interrupt) work. 920 */ 921 if (CLKF_INTR(frame)) { 922 if (p != NULL) 923 p->p_iticks++; 924 cp_time[CP_INTR]++; 925 } else if (p != NULL) { 926 p->p_sticks++; 927 cp_time[CP_SYS]++; 928 } else 929 cp_time[CP_IDLE]++; 930 } 931 pscnt = psdiv; 932 933 /* 934 * We maintain statistics shown by user-level statistics 935 * programs: the amount of time in each cpu state, and 936 * the amount of time each of DK_NDRIVE ``drives'' is busy. 937 * 938 * XXX should either run linked list of drives, or (better) 939 * grab timestamps in the start & done code. 940 */ 941 for (i = 0; i < DK_NDRIVE; i++) 942 if (dk_busy & (1 << i)) 943 dk_time[i]++; 944 945 /* 946 * We adjust the priority of the current process. The priority of 947 * a process gets worse as it accumulates CPU time. The cpu usage 948 * estimator (p_estcpu) is increased here. The formula for computing 949 * priorities (in kern_synch.c) will compute a different value each 950 * time p_estcpu increases by 4. The cpu usage estimator ramps up 951 * quite quickly when the process is running (linearly), and decays 952 * away exponentially, at a rate which is proportionally slower when 953 * the system is busy. The basic principal is that the system will 954 * 90% forget that the process used a lot of CPU time in 5 * loadav 955 * seconds. This causes the system to favor processes which haven't 956 * run much recently, and to round-robin among other processes. 957 */ 958 if (p != NULL) { 959 p->p_cpticks++; 960 if (++p->p_estcpu == 0) 961 p->p_estcpu--; 962 if ((p->p_estcpu & 3) == 0) { 963 resetpriority(p); 964 if (p->p_priority >= PUSER) 965 p->p_priority = p->p_usrpri; 966 } 967 } 968} 969 970/* 971 * Return information about system clocks. 972 */ 973static int 974sysctl_kern_clockrate SYSCTL_HANDLER_ARGS 975{ 976 struct clockinfo clkinfo; 977 /* 978 * Construct clockinfo structure. 979 */ 980 clkinfo.hz = hz; 981 clkinfo.tick = tick; 982 clkinfo.profhz = profhz; 983 clkinfo.stathz = stathz ? stathz : hz; 984 return (sysctl_handle_opaque(oidp, &clkinfo, sizeof clkinfo, req)); 985} 986 987SYSCTL_PROC(_kern, KERN_CLOCKRATE, clockrate, CTLTYPE_STRUCT|CTLFLAG_RD, 988 0, 0, sysctl_kern_clockrate, "S,clockinfo",""); 989 990/*#ifdef PPS_SYNC*/ 991#if 0 992/* This code is completely bogus; if anybody ever wants to use it, get 993 * the current version from Dave Mills. */ 994 995/* 996 * hardpps() - discipline CPU clock oscillator to external pps signal 997 * 998 * This routine is called at each PPS interrupt in order to discipline 999 * the CPU clock oscillator to the PPS signal. It integrates successive 1000 * phase differences between the two oscillators and calculates the 1001 * frequency offset. This is used in hardclock() to discipline the CPU 1002 * clock oscillator so that intrinsic frequency error is cancelled out. 1003 * The code requires the caller to capture the time and hardware 1004 * counter value at the designated PPS signal transition. 1005 */ 1006void 1007hardpps(tvp, usec) 1008 struct timeval *tvp; /* time at PPS */ 1009 long usec; /* hardware counter at PPS */ 1010{ 1011 long u_usec, v_usec, bigtick; 1012 long cal_sec, cal_usec; 1013 1014 /* 1015 * During the calibration interval adjust the starting time when 1016 * the tick overflows. At the end of the interval compute the 1017 * duration of the interval and the difference of the hardware 1018 * counters at the beginning and end of the interval. This code 1019 * is deliciously complicated by the fact valid differences may 1020 * exceed the value of tick when using long calibration 1021 * intervals and small ticks. Note that the counter can be 1022 * greater than tick if caught at just the wrong instant, but 1023 * the values returned and used here are correct. 1024 */ 1025 bigtick = (long)tick << SHIFT_USEC; 1026 pps_usec -= ntp_pll.ybar; 1027 if (pps_usec >= bigtick) 1028 pps_usec -= bigtick; 1029 if (pps_usec < 0) 1030 pps_usec += bigtick; 1031 pps_time.tv_sec++; 1032 pps_count++; 1033 if (pps_count < (1 << pps_shift)) 1034 return; 1035 pps_count = 0; 1036 ntp_pll.calcnt++; 1037 u_usec = usec << SHIFT_USEC; 1038 v_usec = pps_usec - u_usec; 1039 if (v_usec >= bigtick >> 1) 1040 v_usec -= bigtick; 1041 if (v_usec < -(bigtick >> 1)) 1042 v_usec += bigtick; 1043 if (v_usec < 0) 1044 v_usec = -(-v_usec >> ntp_pll.shift); 1045 else 1046 v_usec = v_usec >> ntp_pll.shift; 1047 pps_usec = u_usec; 1048 cal_sec = tvp->tv_sec; 1049 cal_usec = tvp->tv_usec; 1050 cal_sec -= pps_time.tv_sec; 1051 cal_usec -= pps_time.tv_usec; 1052 if (cal_usec < 0) { 1053 cal_usec += 1000000; 1054 cal_sec--; 1055 } 1056 pps_time = *tvp; 1057 1058 /* 1059 * Check for lost interrupts, noise, excessive jitter and 1060 * excessive frequency error. The number of timer ticks during 1061 * the interval may vary +-1 tick. Add to this a margin of one 1062 * tick for the PPS signal jitter and maximum frequency 1063 * deviation. If the limits are exceeded, the calibration 1064 * interval is reset to the minimum and we start over. 1065 */ 1066 u_usec = (long)tick << 1; 1067 if (!((cal_sec == -1 && cal_usec > (1000000 - u_usec)) 1068 || (cal_sec == 0 && cal_usec < u_usec)) 1069 || v_usec > ntp_pll.tolerance || v_usec < -ntp_pll.tolerance) { 1070 ntp_pll.jitcnt++; 1071 ntp_pll.shift = NTP_PLL.SHIFT; 1072 pps_dispinc = PPS_DISPINC; 1073 ntp_pll.intcnt = 0; 1074 return; 1075 } 1076 1077 /* 1078 * A three-stage median filter is used to help deglitch the pps 1079 * signal. The median sample becomes the offset estimate; the 1080 * difference between the other two samples becomes the 1081 * dispersion estimate. 1082 */ 1083 pps_mf[2] = pps_mf[1]; 1084 pps_mf[1] = pps_mf[0]; 1085 pps_mf[0] = v_usec; 1086 if (pps_mf[0] > pps_mf[1]) { 1087 if (pps_mf[1] > pps_mf[2]) { 1088 u_usec = pps_mf[1]; /* 0 1 2 */ 1089 v_usec = pps_mf[0] - pps_mf[2]; 1090 } else if (pps_mf[2] > pps_mf[0]) { 1091 u_usec = pps_mf[0]; /* 2 0 1 */ 1092 v_usec = pps_mf[2] - pps_mf[1]; 1093 } else { 1094 u_usec = pps_mf[2]; /* 0 2 1 */ 1095 v_usec = pps_mf[0] - pps_mf[1]; 1096 } 1097 } else { 1098 if (pps_mf[1] < pps_mf[2]) { 1099 u_usec = pps_mf[1]; /* 2 1 0 */ 1100 v_usec = pps_mf[2] - pps_mf[0]; 1101 } else if (pps_mf[2] < pps_mf[0]) { 1102 u_usec = pps_mf[0]; /* 1 0 2 */ 1103 v_usec = pps_mf[1] - pps_mf[2]; 1104 } else { 1105 u_usec = pps_mf[2]; /* 1 2 0 */ 1106 v_usec = pps_mf[1] - pps_mf[0]; 1107 } 1108 } 1109 1110 /* 1111 * Here the dispersion average is updated. If it is less than 1112 * the threshold pps_dispmax, the frequency average is updated 1113 * as well, but clamped to the tolerance. 1114 */ 1115 v_usec = (v_usec >> 1) - ntp_pll.disp; 1116 if (v_usec < 0) 1117 ntp_pll.disp -= -v_usec >> PPS_AVG; 1118 else 1119 ntp_pll.disp += v_usec >> PPS_AVG; 1120 if (ntp_pll.disp > pps_dispmax) { 1121 ntp_pll.discnt++; 1122 return; 1123 } 1124 if (u_usec < 0) { 1125 ntp_pll.ybar -= -u_usec >> PPS_AVG; 1126 if (ntp_pll.ybar < -ntp_pll.tolerance) 1127 ntp_pll.ybar = -ntp_pll.tolerance; 1128 u_usec = -u_usec; 1129 } else { 1130 ntp_pll.ybar += u_usec >> PPS_AVG; 1131 if (ntp_pll.ybar > ntp_pll.tolerance) 1132 ntp_pll.ybar = ntp_pll.tolerance; 1133 } 1134 1135 /* 1136 * Here the calibration interval is adjusted. If the maximum 1137 * time difference is greater than tick/4, reduce the interval 1138 * by half. If this is not the case for four consecutive 1139 * intervals, double the interval. 1140 */ 1141 if (u_usec << ntp_pll.shift > bigtick >> 2) { 1142 ntp_pll.intcnt = 0; 1143 if (ntp_pll.shift > NTP_PLL.SHIFT) { 1144 ntp_pll.shift--; 1145 pps_dispinc <<= 1; 1146 } 1147 } else if (ntp_pll.intcnt >= 4) { 1148 ntp_pll.intcnt = 0; 1149 if (ntp_pll.shift < NTP_PLL.SHIFTMAX) { 1150 ntp_pll.shift++; 1151 pps_dispinc >>= 1; 1152 } 1153 } else 1154 ntp_pll.intcnt++; 1155} 1156#endif /* PPS_SYNC */ 1157