kern_timeout.c revision 3098
1/*- 2 * Copyright (c) 1982, 1986, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * (c) UNIX System Laboratories, Inc. 5 * All or some portions of this file are derived from material licensed 6 * to the University of California by American Telephone and Telegraph 7 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 8 * the permission of UNIX System Laboratories, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the University of 21 * California, Berkeley and its contributors. 22 * 4. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * @(#)kern_clock.c 8.5 (Berkeley) 1/21/94 39 * $Id: kern_clock.c,v 1.6 1994/09/18 20:39:46 wollman Exp $ 40 */ 41 42/* Portions of this software are covered by the following: */ 43/****************************************************************************** 44 * * 45 * Copyright (c) David L. Mills 1993, 1994 * 46 * * 47 * Permission to use, copy, modify, and distribute this software and its * 48 * documentation for any purpose and without fee is hereby granted, provided * 49 * that the above copyright notice appears in all copies and that both the * 50 * copyright notice and this permission notice appear in supporting * 51 * documentation, and that the name University of Delaware not be used in * 52 * advertising or publicity pertaining to distribution of the software * 53 * without specific, written prior permission. The University of Delaware * 54 * makes no representations about the suitability this software for any * 55 * purpose. It is provided "as is" without express or implied warranty. * 56 * * 57 *****************************************************************************/ 58 59#include <sys/param.h> 60#include <sys/systm.h> 61#include <sys/dkstat.h> 62#include <sys/callout.h> 63#include <sys/kernel.h> 64#include <sys/proc.h> 65#include <sys/resourcevar.h> 66#include <sys/timex.h> 67#include <vm/vm.h> 68 69#include <machine/cpu.h> 70#include <machine/clock.h> 71 72#ifdef GPROF 73#include <sys/gmon.h> 74#endif 75 76/* Does anybody else really care about these? */ 77struct callout *callfree, *callout, calltodo; 78int ncallout; 79 80/* Some of these don't belong here, but it's easiest to concentrate them. */ 81long cp_time[CPUSTATES]; 82long dk_seek[DK_NDRIVE]; 83long dk_time[DK_NDRIVE]; 84long dk_wds[DK_NDRIVE]; 85long dk_wpms[DK_NDRIVE]; 86long dk_xfer[DK_NDRIVE]; 87 88int dk_busy; 89int dk_ndrive = DK_NDRIVE; 90 91long tk_cancc; 92long tk_nin; 93long tk_nout; 94long tk_rawcc; 95 96/* 97 * Clock handling routines. 98 * 99 * This code is written to operate with two timers that run independently of 100 * each other. The main clock, running hz times per second, is used to keep 101 * track of real time. The second timer handles kernel and user profiling, 102 * and does resource use estimation. If the second timer is programmable, 103 * it is randomized to avoid aliasing between the two clocks. For example, 104 * the randomization prevents an adversary from always giving up the cpu 105 * just before its quantum expires. Otherwise, it would never accumulate 106 * cpu ticks. The mean frequency of the second timer is stathz. 107 * 108 * If no second timer exists, stathz will be zero; in this case we drive 109 * profiling and statistics off the main clock. This WILL NOT be accurate; 110 * do not do it unless absolutely necessary. 111 * 112 * The statistics clock may (or may not) be run at a higher rate while 113 * profiling. This profile clock runs at profhz. We require that profhz 114 * be an integral multiple of stathz. 115 * 116 * If the statistics clock is running fast, it must be divided by the ratio 117 * profhz/stathz for statistics. (For profiling, every tick counts.) 118 */ 119 120/* 121 * TODO: 122 * allocate more timeout table slots when table overflows. 123 */ 124 125/* 126 * Bump a timeval by a small number of usec's. 127 */ 128#define BUMPTIME(t, usec) { \ 129 register volatile struct timeval *tp = (t); \ 130 register long us; \ 131 \ 132 tp->tv_usec = us = tp->tv_usec + (usec); \ 133 if (us >= 1000000) { \ 134 tp->tv_usec = us - 1000000; \ 135 tp->tv_sec++; \ 136 } \ 137} 138 139int stathz; 140int profhz; 141int profprocs; 142int ticks; 143static int psdiv, pscnt; /* prof => stat divider */ 144int psratio; /* ratio: prof / stat */ 145 146volatile struct timeval time; 147volatile struct timeval mono_time; 148 149/* 150 * Phase-lock loop (PLL) definitions 151 * 152 * The following variables are read and set by the ntp_adjtime() system 153 * call. 154 * 155 * time_state shows the state of the system clock, with values defined 156 * in the timex.h header file. 157 * 158 * time_status shows the status of the system clock, with bits defined 159 * in the timex.h header file. 160 * 161 * time_offset is used by the PLL to adjust the system time in small 162 * increments. 163 * 164 * time_constant determines the bandwidth or "stiffness" of the PLL. 165 * 166 * time_tolerance determines maximum frequency error or tolerance of the 167 * CPU clock oscillator and is a property of the architecture; however, 168 * in principle it could change as result of the presence of external 169 * discipline signals, for instance. 170 * 171 * time_precision is usually equal to the kernel tick variable; however, 172 * in cases where a precision clock counter or external clock is 173 * available, the resolution can be much less than this and depend on 174 * whether the external clock is working or not. 175 * 176 * time_maxerror is initialized by a ntp_adjtime() call and increased by 177 * the kernel once each second to reflect the maximum error 178 * bound growth. 179 * 180 * time_esterror is set and read by the ntp_adjtime() call, but 181 * otherwise not used by the kernel. 182 */ 183int time_status = STA_UNSYNC; /* clock status bits */ 184int time_state = TIME_OK; /* clock state */ 185long time_offset = 0; /* time offset (us) */ 186long time_constant = 0; /* pll time constant */ 187long time_tolerance = MAXFREQ; /* frequency tolerance (scaled ppm) */ 188long time_precision = 1; /* clock precision (us) */ 189long time_maxerror = MAXPHASE; /* maximum error (us) */ 190long time_esterror = MAXPHASE; /* estimated error (us) */ 191 192/* 193 * The following variables establish the state of the PLL and the 194 * residual time and frequency offset of the local clock. The scale 195 * factors are defined in the timex.h header file. 196 * 197 * time_phase and time_freq are the phase increment and the frequency 198 * increment, respectively, of the kernel time variable at each tick of 199 * the clock. 200 * 201 * time_freq is set via ntp_adjtime() from a value stored in a file when 202 * the synchronization daemon is first started. Its value is retrieved 203 * via ntp_adjtime() and written to the file about once per hour by the 204 * daemon. 205 * 206 * time_adj is the adjustment added to the value of tick at each timer 207 * interrupt and is recomputed at each timer interrupt. 208 * 209 * time_reftime is the second's portion of the system time on the last 210 * call to ntp_adjtime(). It is used to adjust the time_freq variable 211 * and to increase the time_maxerror as the time since last update 212 * increases. 213 */ 214long time_phase = 0; /* phase offset (scaled us) */ 215long time_freq = 0; /* frequency offset (scaled ppm) */ 216long time_adj = 0; /* tick adjust (scaled 1 / hz) */ 217long time_reftime = 0; /* time at last adjustment (s) */ 218 219#ifdef PPS_SYNC 220/* 221 * The following variables are used only if the if the kernel PPS 222 * discipline code is configured (PPS_SYNC). The scale factors are 223 * defined in the timex.h header file. 224 * 225 * pps_time contains the time at each calibration interval, as read by 226 * microtime(). 227 * 228 * pps_offset is the time offset produced by the time median filter 229 * pps_tf[], while pps_jitter is the dispersion measured by this 230 * filter. 231 * 232 * pps_freq is the frequency offset produced by the frequency median 233 * filter pps_ff[], while pps_stabil is the dispersion measured by 234 * this filter. 235 * 236 * pps_usec is latched from a high resolution counter or external clock 237 * at pps_time. Here we want the hardware counter contents only, not the 238 * contents plus the time_tv.usec as usual. 239 * 240 * pps_valid counts the number of seconds since the last PPS update. It 241 * is used as a watchdog timer to disable the PPS discipline should the 242 * PPS signal be lost. 243 * 244 * pps_glitch counts the number of seconds since the beginning of an 245 * offset burst more than tick/2 from current nominal offset. It is used 246 * mainly to suppress error bursts due to priority conflicts between the 247 * PPS interrupt and timer interrupt. 248 * 249 * pps_count counts the seconds of the calibration interval, the 250 * duration of which is pps_shift in powers of two. 251 * 252 * pps_intcnt counts the calibration intervals for use in the interval- 253 * adaptation algorithm. It's just too complicated for words. 254 */ 255struct timeval pps_time; /* kernel time at last interval */ 256long pps_offset = 0; /* pps time offset (us) */ 257long pps_jitter = MAXTIME; /* pps time dispersion (jitter) (us) */ 258long pps_tf[] = {0, 0, 0}; /* pps time offset median filter (us) */ 259long pps_freq = 0; /* frequency offset (scaled ppm) */ 260long pps_stabil = MAXFREQ; /* frequency dispersion (scaled ppm) */ 261long pps_ff[] = {0, 0, 0}; /* frequency offset median filter */ 262long pps_usec = 0; /* microsec counter at last interval */ 263long pps_valid = PPS_VALID; /* pps signal watchdog counter */ 264int pps_glitch = 0; /* pps signal glitch counter */ 265int pps_count = 0; /* calibration interval counter (s) */ 266int pps_shift = PPS_SHIFT; /* interval duration (s) (shift) */ 267int pps_intcnt = 0; /* intervals at current duration */ 268 269/* 270 * PPS signal quality monitors 271 * 272 * pps_jitcnt counts the seconds that have been discarded because the 273 * jitter measured by the time median filter exceeds the limit MAXTIME 274 * (100 us). 275 * 276 * pps_calcnt counts the frequency calibration intervals, which are 277 * variable from 4 s to 256 s. 278 * 279 * pps_errcnt counts the calibration intervals which have been discarded 280 * because the wander exceeds the limit MAXFREQ (100 ppm) or where the 281 * calibration interval jitter exceeds two ticks. 282 * 283 * pps_stbcnt counts the calibration intervals that have been discarded 284 * because the frequency wander exceeds the limit MAXFREQ / 4 (25 us). 285 */ 286long pps_jitcnt = 0; /* jitter limit exceeded */ 287long pps_calcnt = 0; /* calibration intervals */ 288long pps_errcnt = 0; /* calibration errors */ 289long pps_stbcnt = 0; /* stability limit exceeded */ 290#endif /* PPS_SYNC */ 291 292/* XXX none of this stuff works under FreeBSD */ 293#ifdef EXT_CLOCK 294/* 295 * External clock definitions 296 * 297 * The following definitions and declarations are used only if an 298 * external clock (HIGHBALL or TPRO) is configured on the system. 299 */ 300#define CLOCK_INTERVAL 30 /* CPU clock update interval (s) */ 301 302/* 303 * The clock_count variable is set to CLOCK_INTERVAL at each PPS 304 * interrupt and decremented once each second. 305 */ 306int clock_count = 0; /* CPU clock counter */ 307 308#ifdef HIGHBALL 309/* 310 * The clock_offset and clock_cpu variables are used by the HIGHBALL 311 * interface. The clock_offset variable defines the offset between 312 * system time and the HIGBALL counters. The clock_cpu variable contains 313 * the offset between the system clock and the HIGHBALL clock for use in 314 * disciplining the kernel time variable. 315 */ 316extern struct timeval clock_offset; /* Highball clock offset */ 317long clock_cpu = 0; /* CPU clock adjust */ 318#endif /* HIGHBALL */ 319#endif /* EXT_CLOCK */ 320 321/* 322 * hardupdate() - local clock update 323 * 324 * This routine is called by ntp_adjtime() to update the local clock 325 * phase and frequency. This is used to implement an adaptive-parameter, 326 * first-order, type-II phase-lock loop. The code computes new time and 327 * frequency offsets each time it is called. The hardclock() routine 328 * amortizes these offsets at each tick interrupt. If the kernel PPS 329 * discipline code is configured (PPS_SYNC), the PPS signal itself 330 * determines the new time offset, instead of the calling argument. 331 * Presumably, calls to ntp_adjtime() occur only when the caller 332 * believes the local clock is valid within some bound (+-128 ms with 333 * NTP). If the caller's time is far different than the PPS time, an 334 * argument will ensue, and it's not clear who will lose. 335 * 336 * For default SHIFT_UPDATE = 12, the offset is limited to +-512 ms, the 337 * maximum interval between updates is 4096 s and the maximum frequency 338 * offset is +-31.25 ms/s. 339 * 340 * Note: splclock() is in effect. 341 */ 342void 343hardupdate(offset) 344 long offset; 345{ 346 long ltemp, mtemp; 347 348 if (!(time_status & STA_PLL) && !(time_status & STA_PPSTIME)) 349 return; 350 ltemp = offset; 351#ifdef PPS_SYNC 352 if (time_status & STA_PPSTIME && time_status & STA_PPSSIGNAL) 353 ltemp = pps_offset; 354#endif /* PPS_SYNC */ 355 if (ltemp > MAXPHASE) 356 time_offset = MAXPHASE << SHIFT_UPDATE; 357 else if (ltemp < -MAXPHASE) 358 time_offset = -(MAXPHASE << SHIFT_UPDATE); 359 else 360 time_offset = ltemp << SHIFT_UPDATE; 361 mtemp = time.tv_sec - time_reftime; 362 time_reftime = time.tv_sec; 363 if (mtemp > MAXSEC) 364 mtemp = 0; 365 366 /* ugly multiply should be replaced */ 367 if (ltemp < 0) 368 time_freq -= (-ltemp * mtemp) >> (time_constant + 369 time_constant + SHIFT_KF - SHIFT_USEC); 370 else 371 time_freq += (ltemp * mtemp) >> (time_constant + 372 time_constant + SHIFT_KF - SHIFT_USEC); 373 if (time_freq > time_tolerance) 374 time_freq = time_tolerance; 375 else if (time_freq < -time_tolerance) 376 time_freq = -time_tolerance; 377} 378 379 380 381/* 382 * Initialize clock frequencies and start both clocks running. 383 */ 384void 385initclocks() 386{ 387 register int i; 388 389 /* 390 * Set divisors to 1 (normal case) and let the machine-specific 391 * code do its bit. 392 */ 393 psdiv = pscnt = 1; 394 cpu_initclocks(); 395 396 /* 397 * Compute profhz/stathz, and fix profhz if needed. 398 */ 399 i = stathz ? stathz : hz; 400 if (profhz == 0) 401 profhz = i; 402 psratio = profhz / i; 403} 404 405/* 406 * The real-time timer, interrupting hz times per second. 407 */ 408void 409hardclock(frame) 410 register struct clockframe *frame; 411{ 412 register struct callout *p1; 413 register struct proc *p; 414 register int needsoft; 415 extern int tickdelta; 416 extern long timedelta; 417 418 /* 419 * Update real-time timeout queue. 420 * At front of queue are some number of events which are ``due''. 421 * The time to these is <= 0 and if negative represents the 422 * number of ticks which have passed since it was supposed to happen. 423 * The rest of the q elements (times > 0) are events yet to happen, 424 * where the time for each is given as a delta from the previous. 425 * Decrementing just the first of these serves to decrement the time 426 * to all events. 427 */ 428 needsoft = 0; 429 for (p1 = calltodo.c_next; p1 != NULL; p1 = p1->c_next) { 430 if (--p1->c_time > 0) 431 break; 432 needsoft = 1; 433 if (p1->c_time == 0) 434 break; 435 } 436 437 p = curproc; 438 if (p) { 439 register struct pstats *pstats; 440 441 /* 442 * Run current process's virtual and profile time, as needed. 443 */ 444 pstats = p->p_stats; 445 if (CLKF_USERMODE(frame) && 446 timerisset(&pstats->p_timer[ITIMER_VIRTUAL].it_value) && 447 itimerdecr(&pstats->p_timer[ITIMER_VIRTUAL], tick) == 0) 448 psignal(p, SIGVTALRM); 449 if (timerisset(&pstats->p_timer[ITIMER_PROF].it_value) && 450 itimerdecr(&pstats->p_timer[ITIMER_PROF], tick) == 0) 451 psignal(p, SIGPROF); 452 } 453 454 /* 455 * If no separate statistics clock is available, run it from here. 456 */ 457 if (stathz == 0) 458 statclock(frame); 459 460 /* 461 * Increment the time-of-day. 462 */ 463 ticks++; 464 { 465 int time_update; 466 struct timeval newtime = time; 467 long ltemp; 468 469 if (timedelta == 0) { 470 time_update = tick; 471 } else { 472 if (timedelta < 0) { 473 time_update = tick - tickdelta; 474 timedelta += tickdelta; 475 } else { 476 time_update = tick + tickdelta; 477 timedelta -= tickdelta; 478 } 479 } 480 BUMPTIME(&mono_time, time_update); 481 482 /* 483 * Compute the phase adjustment. If the low-order bits 484 * (time_phase) of the update overflow, bump the high-order bits 485 * (time_update). 486 */ 487 time_phase += time_adj; 488 if (time_phase <= -FINEUSEC) { 489 ltemp = -time_phase >> SHIFT_SCALE; 490 time_phase += ltemp << SHIFT_SCALE; 491 time_update -= ltemp; 492 } 493 else if (time_phase >= FINEUSEC) { 494 ltemp = time_phase >> SHIFT_SCALE; 495 time_phase -= ltemp << SHIFT_SCALE; 496 time_update += ltemp; 497 } 498 499 newtime.tv_usec += time_update; 500 /* 501 * On rollover of the second the phase adjustment to be used for 502 * the next second is calculated. Also, the maximum error is 503 * increased by the tolerance. If the PPS frequency discipline 504 * code is present, the phase is increased to compensate for the 505 * CPU clock oscillator frequency error. 506 * 507 * With SHIFT_SCALE = 23, the maximum frequency adjustment is 508 * +-256 us per tick, or 25.6 ms/s at a clock frequency of 100 509 * Hz. The time contribution is shifted right a minimum of two 510 * bits, while the frequency contribution is a right shift. 511 * Thus, overflow is prevented if the frequency contribution is 512 * limited to half the maximum or 15.625 ms/s. 513 */ 514 if (newtime.tv_usec >= 1000000) { 515 newtime.tv_usec -= 1000000; 516 newtime.tv_sec++; 517 time_maxerror += time_tolerance >> SHIFT_USEC; 518 if (time_offset < 0) { 519 ltemp = -time_offset >> 520 (SHIFT_KG + time_constant); 521 time_offset += ltemp; 522 time_adj = -ltemp << 523 (SHIFT_SCALE - SHIFT_HZ - SHIFT_UPDATE); 524 } else { 525 ltemp = time_offset >> 526 (SHIFT_KG + time_constant); 527 time_offset -= ltemp; 528 time_adj = ltemp << 529 (SHIFT_SCALE - SHIFT_HZ - SHIFT_UPDATE); 530 } 531#ifdef PPS_SYNC 532 /* 533 * Gnaw on the watchdog counter and update the frequency 534 * computed by the pll and the PPS signal. 535 */ 536 pps_valid++; 537 if (pps_valid == PPS_VALID) { 538 pps_jitter = MAXTIME; 539 pps_stabil = MAXFREQ; 540 time_status &= ~(STA_PPSSIGNAL | STA_PPSJITTER | 541 STA_PPSWANDER | STA_PPSERROR); 542 } 543 ltemp = time_freq + pps_freq; 544#else 545 ltemp = time_freq; 546#endif /* PPS_SYNC */ 547 if (ltemp < 0) 548 time_adj -= -ltemp >> 549 (SHIFT_USEC + SHIFT_HZ - SHIFT_SCALE); 550 else 551 time_adj += ltemp >> 552 (SHIFT_USEC + SHIFT_HZ - SHIFT_SCALE); 553 554 /* 555 * When the CPU clock oscillator frequency is not a 556 * power of two in Hz, the SHIFT_HZ is only an 557 * approximate scale factor. In the SunOS kernel, this 558 * results in a PLL gain factor of 1/1.28 = 0.78 what it 559 * should be. In the following code the overall gain is 560 * increased by a factor of 1.25, which results in a 561 * residual error less than 3 percent. 562 */ 563 /* Same thing applies for FreeBSD --GAW */ 564 if (hz == 100) { 565 if (time_adj < 0) 566 time_adj -= -time_adj >> 2; 567 else 568 time_adj += time_adj >> 2; 569 } 570 571 /* XXX - this is really bogus, but can't be fixed until 572 xntpd's idea of the system clock is fixed to know how 573 the user wants leap seconds handled; in the mean time, 574 we assume that users of NTP are running without proper 575 leap second support (this is now the default anyway) */ 576 /* 577 * Leap second processing. If in leap-insert state at 578 * the end of the day, the system clock is set back one 579 * second; if in leap-delete state, the system clock is 580 * set ahead one second. The microtime() routine or 581 * external clock driver will insure that reported time 582 * is always monotonic. The ugly divides should be 583 * replaced. 584 */ 585 switch (time_state) { 586 587 case TIME_OK: 588 if (time_status & STA_INS) 589 time_state = TIME_INS; 590 else if (time_status & STA_DEL) 591 time_state = TIME_DEL; 592 break; 593 594 case TIME_INS: 595 if (newtime.tv_sec % 86400 == 0) { 596 newtime.tv_sec--; 597 time_state = TIME_OOP; 598 } 599 break; 600 601 case TIME_DEL: 602 if ((newtime.tv_sec + 1) % 86400 == 0) { 603 newtime.tv_sec++; 604 time_state = TIME_WAIT; 605 } 606 break; 607 608 case TIME_OOP: 609 time_state = TIME_WAIT; 610 break; 611 612 case TIME_WAIT: 613 if (!(time_status & (STA_INS | STA_DEL))) 614 time_state = TIME_OK; 615 } 616 } 617 CPU_CLOCKUPDATE(&time, &newtime); 618 } 619 620 /* 621 * Process callouts at a very low cpu priority, so we don't keep the 622 * relatively high clock interrupt priority any longer than necessary. 623 */ 624 if (needsoft) { 625 if (CLKF_BASEPRI(frame)) { 626 /* 627 * Save the overhead of a software interrupt; 628 * it will happen as soon as we return, so do it now. 629 */ 630 (void)splsoftclock(); 631 softclock(); 632 } else 633 setsoftclock(); 634 } 635} 636 637/* 638 * Software (low priority) clock interrupt. 639 * Run periodic events from timeout queue. 640 */ 641/*ARGSUSED*/ 642void 643softclock() 644{ 645 register struct callout *c; 646 register void *arg; 647 register void (*func) __P((void *)); 648 register int s; 649 650 s = splhigh(); 651 while ((c = calltodo.c_next) != NULL && c->c_time <= 0) { 652 func = c->c_func; 653 arg = c->c_arg; 654 calltodo.c_next = c->c_next; 655 c->c_next = callfree; 656 callfree = c; 657 splx(s); 658 (*func)(arg); 659 (void) splhigh(); 660 } 661 splx(s); 662} 663 664/* 665 * timeout -- 666 * Execute a function after a specified length of time. 667 * 668 * untimeout -- 669 * Cancel previous timeout function call. 670 * 671 * See AT&T BCI Driver Reference Manual for specification. This 672 * implementation differs from that one in that no identification 673 * value is returned from timeout, rather, the original arguments 674 * to timeout are used to identify entries for untimeout. 675 */ 676void 677timeout(ftn, arg, ticks) 678 timeout_t ftn; 679 void *arg; 680 register int ticks; 681{ 682 register struct callout *new, *p, *t; 683 register int s; 684 685 if (ticks <= 0) 686 ticks = 1; 687 688 /* Lock out the clock. */ 689 s = splhigh(); 690 691 /* Fill in the next free callout structure. */ 692 if (callfree == NULL) 693 panic("timeout table full"); 694 new = callfree; 695 callfree = new->c_next; 696 new->c_arg = arg; 697 new->c_func = ftn; 698 699 /* 700 * The time for each event is stored as a difference from the time 701 * of the previous event on the queue. Walk the queue, correcting 702 * the ticks argument for queue entries passed. Correct the ticks 703 * value for the queue entry immediately after the insertion point 704 * as well. Watch out for negative c_time values; these represent 705 * overdue events. 706 */ 707 for (p = &calltodo; 708 (t = p->c_next) != NULL && ticks > t->c_time; p = t) 709 if (t->c_time > 0) 710 ticks -= t->c_time; 711 new->c_time = ticks; 712 if (t != NULL) 713 t->c_time -= ticks; 714 715 /* Insert the new entry into the queue. */ 716 p->c_next = new; 717 new->c_next = t; 718 splx(s); 719} 720 721void 722untimeout(ftn, arg) 723 timeout_t ftn; 724 void *arg; 725{ 726 register struct callout *p, *t; 727 register int s; 728 729 s = splhigh(); 730 for (p = &calltodo; (t = p->c_next) != NULL; p = t) 731 if (t->c_func == ftn && t->c_arg == arg) { 732 /* Increment next entry's tick count. */ 733 if (t->c_next && t->c_time > 0) 734 t->c_next->c_time += t->c_time; 735 736 /* Move entry from callout queue to callfree queue. */ 737 p->c_next = t->c_next; 738 t->c_next = callfree; 739 callfree = t; 740 break; 741 } 742 splx(s); 743} 744 745/* 746 * Compute number of hz until specified time. Used to 747 * compute third argument to timeout() from an absolute time. 748 */ 749int 750hzto(tv) 751 struct timeval *tv; 752{ 753 register long ticks, sec; 754 int s; 755 756 /* 757 * If number of milliseconds will fit in 32 bit arithmetic, 758 * then compute number of milliseconds to time and scale to 759 * ticks. Otherwise just compute number of hz in time, rounding 760 * times greater than representible to maximum value. 761 * 762 * Delta times less than 25 days can be computed ``exactly''. 763 * Maximum value for any timeout in 10ms ticks is 250 days. 764 */ 765 s = splhigh(); 766 sec = tv->tv_sec - time.tv_sec; 767 if (sec <= 0x7fffffff / 1000 - 1000) 768 ticks = ((tv->tv_sec - time.tv_sec) * 1000 + 769 (tv->tv_usec - time.tv_usec) / 1000) / (tick / 1000); 770 else if (sec <= 0x7fffffff / hz) 771 ticks = sec * hz; 772 else 773 ticks = 0x7fffffff; 774 splx(s); 775 return (ticks); 776} 777 778/* 779 * Start profiling on a process. 780 * 781 * Kernel profiling passes proc0 which never exits and hence 782 * keeps the profile clock running constantly. 783 */ 784void 785startprofclock(p) 786 register struct proc *p; 787{ 788 int s; 789 790 if ((p->p_flag & P_PROFIL) == 0) { 791 p->p_flag |= P_PROFIL; 792 if (++profprocs == 1 && stathz != 0) { 793 s = splstatclock(); 794 psdiv = pscnt = psratio; 795 setstatclockrate(profhz); 796 splx(s); 797 } 798 } 799} 800 801/* 802 * Stop profiling on a process. 803 */ 804void 805stopprofclock(p) 806 register struct proc *p; 807{ 808 int s; 809 810 if (p->p_flag & P_PROFIL) { 811 p->p_flag &= ~P_PROFIL; 812 if (--profprocs == 0 && stathz != 0) { 813 s = splstatclock(); 814 psdiv = pscnt = 1; 815 setstatclockrate(stathz); 816 splx(s); 817 } 818 } 819} 820 821/* 822 * Statistics clock. Grab profile sample, and if divider reaches 0, 823 * do process and kernel statistics. 824 */ 825void 826statclock(frame) 827 register struct clockframe *frame; 828{ 829#ifdef GPROF 830 register struct gmonparam *g; 831#endif 832 register struct proc *p = curproc; 833 register int i; 834 835 if (p) { 836 struct pstats *pstats; 837 struct rusage *ru; 838 struct vmspace *vm; 839 840 /* bump the resource usage of integral space use */ 841 if ((pstats = p->p_stats) && (ru = &pstats->p_ru) && (vm = p->p_vmspace)) { 842 ru->ru_ixrss += vm->vm_tsize * PAGE_SIZE / 1024; 843 ru->ru_idrss += vm->vm_dsize * PAGE_SIZE / 1024; 844 ru->ru_isrss += vm->vm_ssize * PAGE_SIZE / 1024; 845 if ((vm->vm_pmap.pm_stats.resident_count * PAGE_SIZE / 1024) > 846 ru->ru_maxrss) { 847 ru->ru_maxrss = 848 vm->vm_pmap.pm_stats.resident_count * PAGE_SIZE / 1024; 849 } 850 } 851 } 852 853 if (CLKF_USERMODE(frame)) { 854 if (p->p_flag & P_PROFIL) 855 addupc_intr(p, CLKF_PC(frame), 1); 856 if (--pscnt > 0) 857 return; 858 /* 859 * Came from user mode; CPU was in user state. 860 * If this process is being profiled record the tick. 861 */ 862 p->p_uticks++; 863 if (p->p_nice > NZERO) 864 cp_time[CP_NICE]++; 865 else 866 cp_time[CP_USER]++; 867 } else { 868#ifdef GPROF 869 /* 870 * Kernel statistics are just like addupc_intr, only easier. 871 */ 872 g = &_gmonparam; 873 if (g->state == GMON_PROF_ON) { 874 i = CLKF_PC(frame) - g->lowpc; 875 if (i < g->textsize) { 876 i /= HISTFRACTION * sizeof(*g->kcount); 877 g->kcount[i]++; 878 } 879 } 880#endif 881 if (--pscnt > 0) 882 return; 883 /* 884 * Came from kernel mode, so we were: 885 * - handling an interrupt, 886 * - doing syscall or trap work on behalf of the current 887 * user process, or 888 * - spinning in the idle loop. 889 * Whichever it is, charge the time as appropriate. 890 * Note that we charge interrupts to the current process, 891 * regardless of whether they are ``for'' that process, 892 * so that we know how much of its real time was spent 893 * in ``non-process'' (i.e., interrupt) work. 894 */ 895 if (CLKF_INTR(frame)) { 896 if (p != NULL) 897 p->p_iticks++; 898 cp_time[CP_INTR]++; 899 } else if (p != NULL) { 900 p->p_sticks++; 901 cp_time[CP_SYS]++; 902 } else 903 cp_time[CP_IDLE]++; 904 } 905 pscnt = psdiv; 906 907 /* 908 * We maintain statistics shown by user-level statistics 909 * programs: the amount of time in each cpu state, and 910 * the amount of time each of DK_NDRIVE ``drives'' is busy. 911 * 912 * XXX should either run linked list of drives, or (better) 913 * grab timestamps in the start & done code. 914 */ 915 for (i = 0; i < DK_NDRIVE; i++) 916 if (dk_busy & (1 << i)) 917 dk_time[i]++; 918 919 /* 920 * We adjust the priority of the current process. The priority of 921 * a process gets worse as it accumulates CPU time. The cpu usage 922 * estimator (p_estcpu) is increased here. The formula for computing 923 * priorities (in kern_synch.c) will compute a different value each 924 * time p_estcpu increases by 4. The cpu usage estimator ramps up 925 * quite quickly when the process is running (linearly), and decays 926 * away exponentially, at a rate which is proportionally slower when 927 * the system is busy. The basic principal is that the system will 928 * 90% forget that the process used a lot of CPU time in 5 * loadav 929 * seconds. This causes the system to favor processes which haven't 930 * run much recently, and to round-robin among other processes. 931 */ 932 if (p != NULL) { 933 p->p_cpticks++; 934 if (++p->p_estcpu == 0) 935 p->p_estcpu--; 936 if ((p->p_estcpu & 3) == 0) { 937 resetpriority(p); 938 if (p->p_priority >= PUSER) 939 p->p_priority = p->p_usrpri; 940 } 941 } 942} 943 944/* 945 * Return information about system clocks. 946 */ 947int 948sysctl_clockrate(where, sizep) 949 register char *where; 950 size_t *sizep; 951{ 952 struct clockinfo clkinfo; 953 954 /* 955 * Construct clockinfo structure. 956 */ 957 clkinfo.hz = hz; 958 clkinfo.tick = tick; 959 clkinfo.profhz = profhz; 960 clkinfo.stathz = stathz ? stathz : hz; 961 return (sysctl_rdstruct(where, sizep, NULL, &clkinfo, sizeof(clkinfo))); 962} 963 964/*#ifdef PPS_SYNC*/ 965#if 0 966/* This code is completely bogus; if anybody ever wants to use it, get 967 * the current version from Dave Mills. */ 968 969/* 970 * hardpps() - discipline CPU clock oscillator to external pps signal 971 * 972 * This routine is called at each PPS interrupt in order to discipline 973 * the CPU clock oscillator to the PPS signal. It integrates successive 974 * phase differences between the two oscillators and calculates the 975 * frequency offset. This is used in hardclock() to discipline the CPU 976 * clock oscillator so that intrinsic frequency error is cancelled out. 977 * The code requires the caller to capture the time and hardware 978 * counter value at the designated PPS signal transition. 979 */ 980void 981hardpps(tvp, usec) 982 struct timeval *tvp; /* time at PPS */ 983 long usec; /* hardware counter at PPS */ 984{ 985 long u_usec, v_usec, bigtick; 986 long cal_sec, cal_usec; 987 988 /* 989 * During the calibration interval adjust the starting time when 990 * the tick overflows. At the end of the interval compute the 991 * duration of the interval and the difference of the hardware 992 * counters at the beginning and end of the interval. This code 993 * is deliciously complicated by the fact valid differences may 994 * exceed the value of tick when using long calibration 995 * intervals and small ticks. Note that the counter can be 996 * greater than tick if caught at just the wrong instant, but 997 * the values returned and used here are correct. 998 */ 999 bigtick = (long)tick << SHIFT_USEC; 1000 pps_usec -= ntp_pll.ybar; 1001 if (pps_usec >= bigtick) 1002 pps_usec -= bigtick; 1003 if (pps_usec < 0) 1004 pps_usec += bigtick; 1005 pps_time.tv_sec++; 1006 pps_count++; 1007 if (pps_count < (1 << pps_shift)) 1008 return; 1009 pps_count = 0; 1010 ntp_pll.calcnt++; 1011 u_usec = usec << SHIFT_USEC; 1012 v_usec = pps_usec - u_usec; 1013 if (v_usec >= bigtick >> 1) 1014 v_usec -= bigtick; 1015 if (v_usec < -(bigtick >> 1)) 1016 v_usec += bigtick; 1017 if (v_usec < 0) 1018 v_usec = -(-v_usec >> ntp_pll.shift); 1019 else 1020 v_usec = v_usec >> ntp_pll.shift; 1021 pps_usec = u_usec; 1022 cal_sec = tvp->tv_sec; 1023 cal_usec = tvp->tv_usec; 1024 cal_sec -= pps_time.tv_sec; 1025 cal_usec -= pps_time.tv_usec; 1026 if (cal_usec < 0) { 1027 cal_usec += 1000000; 1028 cal_sec--; 1029 } 1030 pps_time = *tvp; 1031 1032 /* 1033 * Check for lost interrupts, noise, excessive jitter and 1034 * excessive frequency error. The number of timer ticks during 1035 * the interval may vary +-1 tick. Add to this a margin of one 1036 * tick for the PPS signal jitter and maximum frequency 1037 * deviation. If the limits are exceeded, the calibration 1038 * interval is reset to the minimum and we start over. 1039 */ 1040 u_usec = (long)tick << 1; 1041 if (!((cal_sec == -1 && cal_usec > (1000000 - u_usec)) 1042 || (cal_sec == 0 && cal_usec < u_usec)) 1043 || v_usec > ntp_pll.tolerance || v_usec < -ntp_pll.tolerance) { 1044 ntp_pll.jitcnt++; 1045 ntp_pll.shift = NTP_PLL.SHIFT; 1046 pps_dispinc = PPS_DISPINC; 1047 ntp_pll.intcnt = 0; 1048 return; 1049 } 1050 1051 /* 1052 * A three-stage median filter is used to help deglitch the pps 1053 * signal. The median sample becomes the offset estimate; the 1054 * difference between the other two samples becomes the 1055 * dispersion estimate. 1056 */ 1057 pps_mf[2] = pps_mf[1]; 1058 pps_mf[1] = pps_mf[0]; 1059 pps_mf[0] = v_usec; 1060 if (pps_mf[0] > pps_mf[1]) { 1061 if (pps_mf[1] > pps_mf[2]) { 1062 u_usec = pps_mf[1]; /* 0 1 2 */ 1063 v_usec = pps_mf[0] - pps_mf[2]; 1064 } else if (pps_mf[2] > pps_mf[0]) { 1065 u_usec = pps_mf[0]; /* 2 0 1 */ 1066 v_usec = pps_mf[2] - pps_mf[1]; 1067 } else { 1068 u_usec = pps_mf[2]; /* 0 2 1 */ 1069 v_usec = pps_mf[0] - pps_mf[1]; 1070 } 1071 } else { 1072 if (pps_mf[1] < pps_mf[2]) { 1073 u_usec = pps_mf[1]; /* 2 1 0 */ 1074 v_usec = pps_mf[2] - pps_mf[0]; 1075 } else if (pps_mf[2] < pps_mf[0]) { 1076 u_usec = pps_mf[0]; /* 1 0 2 */ 1077 v_usec = pps_mf[1] - pps_mf[2]; 1078 } else { 1079 u_usec = pps_mf[2]; /* 1 2 0 */ 1080 v_usec = pps_mf[1] - pps_mf[0]; 1081 } 1082 } 1083 1084 /* 1085 * Here the dispersion average is updated. If it is less than 1086 * the threshold pps_dispmax, the frequency average is updated 1087 * as well, but clamped to the tolerance. 1088 */ 1089 v_usec = (v_usec >> 1) - ntp_pll.disp; 1090 if (v_usec < 0) 1091 ntp_pll.disp -= -v_usec >> PPS_AVG; 1092 else 1093 ntp_pll.disp += v_usec >> PPS_AVG; 1094 if (ntp_pll.disp > pps_dispmax) { 1095 ntp_pll.discnt++; 1096 return; 1097 } 1098 if (u_usec < 0) { 1099 ntp_pll.ybar -= -u_usec >> PPS_AVG; 1100 if (ntp_pll.ybar < -ntp_pll.tolerance) 1101 ntp_pll.ybar = -ntp_pll.tolerance; 1102 u_usec = -u_usec; 1103 } else { 1104 ntp_pll.ybar += u_usec >> PPS_AVG; 1105 if (ntp_pll.ybar > ntp_pll.tolerance) 1106 ntp_pll.ybar = ntp_pll.tolerance; 1107 } 1108 1109 /* 1110 * Here the calibration interval is adjusted. If the maximum 1111 * time difference is greater than tick/4, reduce the interval 1112 * by half. If this is not the case for four consecutive 1113 * intervals, double the interval. 1114 */ 1115 if (u_usec << ntp_pll.shift > bigtick >> 2) { 1116 ntp_pll.intcnt = 0; 1117 if (ntp_pll.shift > NTP_PLL.SHIFT) { 1118 ntp_pll.shift--; 1119 pps_dispinc <<= 1; 1120 } 1121 } else if (ntp_pll.intcnt >= 4) { 1122 ntp_pll.intcnt = 0; 1123 if (ntp_pll.shift < NTP_PLL.SHIFTMAX) { 1124 ntp_pll.shift++; 1125 pps_dispinc >>= 1; 1126 } 1127 } else 1128 ntp_pll.intcnt++; 1129} 1130#endif /* PPS_SYNC */ 1131