kern_tc.c revision 44695
1/*- 2 * Copyright (c) 1997, 1998 Poul-Henning Kamp <phk@FreeBSD.org> 3 * Copyright (c) 1982, 1986, 1991, 1993 4 * The Regents of the University of California. All rights reserved. 5 * (c) UNIX System Laboratories, Inc. 6 * All or some portions of this file are derived from material licensed 7 * to the University of California by American Telephone and Telegraph 8 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 9 * the permission of UNIX System Laboratories, Inc. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. All advertising materials mentioning features or use of this software 20 * must display the following acknowledgement: 21 * This product includes software developed by the University of 22 * California, Berkeley and its contributors. 23 * 4. Neither the name of the University nor the names of its contributors 24 * may be used to endorse or promote products derived from this software 25 * without specific prior written permission. 26 * 27 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 28 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 29 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 30 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 31 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 37 * SUCH DAMAGE. 38 * 39 * @(#)kern_clock.c 8.5 (Berkeley) 1/21/94 40 * $Id: kern_clock.c,v 1.91 1999/03/12 11:09:50 phk Exp $ 41 */ 42 43#include "opt_ntp.h" 44 45#include <sys/param.h> 46#include <sys/systm.h> 47#include <sys/dkstat.h> 48#include <sys/callout.h> 49#include <sys/kernel.h> 50#include <sys/proc.h> 51#include <sys/malloc.h> 52#include <sys/resourcevar.h> 53#include <sys/signalvar.h> 54#include <sys/timex.h> 55#include <sys/timepps.h> 56#include <vm/vm.h> 57#include <sys/lock.h> 58#include <vm/pmap.h> 59#include <vm/vm_map.h> 60#include <sys/sysctl.h> 61 62#include <machine/cpu.h> 63#include <machine/limits.h> 64 65#ifdef GPROF 66#include <sys/gmon.h> 67#endif 68 69#if defined(SMP) && defined(BETTER_CLOCK) 70#include <machine/smp.h> 71#endif 72 73/* 74 * Number of timecounters used to implement stable storage 75 */ 76#ifndef NTIMECOUNTER 77#define NTIMECOUNTER 5 78#endif 79 80static MALLOC_DEFINE(M_TIMECOUNTER, "timecounter", 81 "Timecounter stable storage"); 82 83static void initclocks __P((void *dummy)); 84SYSINIT(clocks, SI_SUB_CLOCKS, SI_ORDER_FIRST, initclocks, NULL) 85 86static void tco_forward __P((int force)); 87static void tco_setscales __P((struct timecounter *tc)); 88static __inline unsigned tco_delta __P((struct timecounter *tc)); 89 90/* Some of these don't belong here, but it's easiest to concentrate them. */ 91#if defined(SMP) && defined(BETTER_CLOCK) 92long cp_time[CPUSTATES]; 93#else 94static long cp_time[CPUSTATES]; 95#endif 96 97long tk_cancc; 98long tk_nin; 99long tk_nout; 100long tk_rawcc; 101 102time_t time_second; 103 104/* 105 * Which update policy to use. 106 * 0 - every tick, bad hardware may fail with "calcru negative..." 107 * 1 - more resistent to the above hardware, but less efficient. 108 */ 109static int tco_method; 110 111/* 112 * Implement a dummy timecounter which we can use until we get a real one 113 * in the air. This allows the console and other early stuff to use 114 * timeservices. 115 */ 116 117static unsigned 118dummy_get_timecount(struct timecounter *tc) 119{ 120 static unsigned now; 121 return (++now); 122} 123 124static struct timecounter dummy_timecounter = { 125 dummy_get_timecount, 126 0, 127 ~0u, 128 1000000, 129 "dummy" 130}; 131 132struct timecounter *timecounter = &dummy_timecounter; 133 134/* 135 * Clock handling routines. 136 * 137 * This code is written to operate with two timers that run independently of 138 * each other. 139 * 140 * The main timer, running hz times per second, is used to trigger interval 141 * timers, timeouts and rescheduling as needed. 142 * 143 * The second timer handles kernel and user profiling, 144 * and does resource use estimation. If the second timer is programmable, 145 * it is randomized to avoid aliasing between the two clocks. For example, 146 * the randomization prevents an adversary from always giving up the cpu 147 * just before its quantum expires. Otherwise, it would never accumulate 148 * cpu ticks. The mean frequency of the second timer is stathz. 149 * 150 * If no second timer exists, stathz will be zero; in this case we drive 151 * profiling and statistics off the main clock. This WILL NOT be accurate; 152 * do not do it unless absolutely necessary. 153 * 154 * The statistics clock may (or may not) be run at a higher rate while 155 * profiling. This profile clock runs at profhz. We require that profhz 156 * be an integral multiple of stathz. 157 * 158 * If the statistics clock is running fast, it must be divided by the ratio 159 * profhz/stathz for statistics. (For profiling, every tick counts.) 160 * 161 * Time-of-day is maintained using a "timecounter", which may or may 162 * not be related to the hardware generating the above mentioned 163 * interrupts. 164 */ 165 166int stathz; 167int profhz; 168static int profprocs; 169int ticks; 170static int psdiv, pscnt; /* prof => stat divider */ 171int psratio; /* ratio: prof / stat */ 172 173/* 174 * Initialize clock frequencies and start both clocks running. 175 */ 176/* ARGSUSED*/ 177static void 178initclocks(dummy) 179 void *dummy; 180{ 181 register int i; 182 183 /* 184 * Set divisors to 1 (normal case) and let the machine-specific 185 * code do its bit. 186 */ 187 psdiv = pscnt = 1; 188 cpu_initclocks(); 189 190 /* 191 * Compute profhz/stathz, and fix profhz if needed. 192 */ 193 i = stathz ? stathz : hz; 194 if (profhz == 0) 195 profhz = i; 196 psratio = profhz / i; 197} 198 199/* 200 * The real-time timer, interrupting hz times per second. 201 */ 202void 203hardclock(frame) 204 register struct clockframe *frame; 205{ 206 register struct proc *p; 207 208 p = curproc; 209 if (p) { 210 register struct pstats *pstats; 211 212 /* 213 * Run current process's virtual and profile time, as needed. 214 */ 215 pstats = p->p_stats; 216 if (CLKF_USERMODE(frame) && 217 timevalisset(&pstats->p_timer[ITIMER_VIRTUAL].it_value) && 218 itimerdecr(&pstats->p_timer[ITIMER_VIRTUAL], tick) == 0) 219 psignal(p, SIGVTALRM); 220 if (timevalisset(&pstats->p_timer[ITIMER_PROF].it_value) && 221 itimerdecr(&pstats->p_timer[ITIMER_PROF], tick) == 0) 222 psignal(p, SIGPROF); 223 } 224 225#if defined(SMP) && defined(BETTER_CLOCK) 226 forward_hardclock(pscnt); 227#endif 228 229 /* 230 * If no separate statistics clock is available, run it from here. 231 */ 232 if (stathz == 0) 233 statclock(frame); 234 235 tco_forward(0); 236 ticks++; 237 238 /* 239 * Process callouts at a very low cpu priority, so we don't keep the 240 * relatively high clock interrupt priority any longer than necessary. 241 */ 242 if (TAILQ_FIRST(&callwheel[ticks & callwheelmask]) != NULL) { 243 if (CLKF_BASEPRI(frame)) { 244 /* 245 * Save the overhead of a software interrupt; 246 * it will happen as soon as we return, so do it now. 247 */ 248 (void)splsoftclock(); 249 softclock(); 250 } else 251 setsoftclock(); 252 } else if (softticks + 1 == ticks) 253 ++softticks; 254} 255 256/* 257 * Compute number of ticks in the specified amount of time. 258 */ 259int 260tvtohz(tv) 261 struct timeval *tv; 262{ 263 register unsigned long ticks; 264 register long sec, usec; 265 266 /* 267 * If the number of usecs in the whole seconds part of the time 268 * difference fits in a long, then the total number of usecs will 269 * fit in an unsigned long. Compute the total and convert it to 270 * ticks, rounding up and adding 1 to allow for the current tick 271 * to expire. Rounding also depends on unsigned long arithmetic 272 * to avoid overflow. 273 * 274 * Otherwise, if the number of ticks in the whole seconds part of 275 * the time difference fits in a long, then convert the parts to 276 * ticks separately and add, using similar rounding methods and 277 * overflow avoidance. This method would work in the previous 278 * case but it is slightly slower and assumes that hz is integral. 279 * 280 * Otherwise, round the time difference down to the maximum 281 * representable value. 282 * 283 * If ints have 32 bits, then the maximum value for any timeout in 284 * 10ms ticks is 248 days. 285 */ 286 sec = tv->tv_sec; 287 usec = tv->tv_usec; 288 if (usec < 0) { 289 sec--; 290 usec += 1000000; 291 } 292 if (sec < 0) { 293#ifdef DIAGNOSTIC 294 if (usec > 0) { 295 sec++; 296 usec -= 1000000; 297 } 298 printf("tvotohz: negative time difference %ld sec %ld usec\n", 299 sec, usec); 300#endif 301 ticks = 1; 302 } else if (sec <= LONG_MAX / 1000000) 303 ticks = (sec * 1000000 + (unsigned long)usec + (tick - 1)) 304 / tick + 1; 305 else if (sec <= LONG_MAX / hz) 306 ticks = sec * hz 307 + ((unsigned long)usec + (tick - 1)) / tick + 1; 308 else 309 ticks = LONG_MAX; 310 if (ticks > INT_MAX) 311 ticks = INT_MAX; 312 return ((int)ticks); 313} 314 315/* 316 * Start profiling on a process. 317 * 318 * Kernel profiling passes proc0 which never exits and hence 319 * keeps the profile clock running constantly. 320 */ 321void 322startprofclock(p) 323 register struct proc *p; 324{ 325 int s; 326 327 if ((p->p_flag & P_PROFIL) == 0) { 328 p->p_flag |= P_PROFIL; 329 if (++profprocs == 1 && stathz != 0) { 330 s = splstatclock(); 331 psdiv = pscnt = psratio; 332 setstatclockrate(profhz); 333 splx(s); 334 } 335 } 336} 337 338/* 339 * Stop profiling on a process. 340 */ 341void 342stopprofclock(p) 343 register struct proc *p; 344{ 345 int s; 346 347 if (p->p_flag & P_PROFIL) { 348 p->p_flag &= ~P_PROFIL; 349 if (--profprocs == 0 && stathz != 0) { 350 s = splstatclock(); 351 psdiv = pscnt = 1; 352 setstatclockrate(stathz); 353 splx(s); 354 } 355 } 356} 357 358/* 359 * Statistics clock. Grab profile sample, and if divider reaches 0, 360 * do process and kernel statistics. 361 */ 362void 363statclock(frame) 364 register struct clockframe *frame; 365{ 366#ifdef GPROF 367 register struct gmonparam *g; 368 int i; 369#endif 370 register struct proc *p; 371 struct pstats *pstats; 372 long rss; 373 struct rusage *ru; 374 struct vmspace *vm; 375 376 if (curproc != NULL && CLKF_USERMODE(frame)) { 377 p = curproc; 378 if (p->p_flag & P_PROFIL) 379 addupc_intr(p, CLKF_PC(frame), 1); 380#if defined(SMP) && defined(BETTER_CLOCK) 381 if (stathz != 0) 382 forward_statclock(pscnt); 383#endif 384 if (--pscnt > 0) 385 return; 386 /* 387 * Came from user mode; CPU was in user state. 388 * If this process is being profiled record the tick. 389 */ 390 p->p_uticks++; 391 if (p->p_nice > NZERO) 392 cp_time[CP_NICE]++; 393 else 394 cp_time[CP_USER]++; 395 } else { 396#ifdef GPROF 397 /* 398 * Kernel statistics are just like addupc_intr, only easier. 399 */ 400 g = &_gmonparam; 401 if (g->state == GMON_PROF_ON) { 402 i = CLKF_PC(frame) - g->lowpc; 403 if (i < g->textsize) { 404 i /= HISTFRACTION * sizeof(*g->kcount); 405 g->kcount[i]++; 406 } 407 } 408#endif 409#if defined(SMP) && defined(BETTER_CLOCK) 410 if (stathz != 0) 411 forward_statclock(pscnt); 412#endif 413 if (--pscnt > 0) 414 return; 415 /* 416 * Came from kernel mode, so we were: 417 * - handling an interrupt, 418 * - doing syscall or trap work on behalf of the current 419 * user process, or 420 * - spinning in the idle loop. 421 * Whichever it is, charge the time as appropriate. 422 * Note that we charge interrupts to the current process, 423 * regardless of whether they are ``for'' that process, 424 * so that we know how much of its real time was spent 425 * in ``non-process'' (i.e., interrupt) work. 426 */ 427 p = curproc; 428 if (CLKF_INTR(frame)) { 429 if (p != NULL) 430 p->p_iticks++; 431 cp_time[CP_INTR]++; 432 } else if (p != NULL) { 433 p->p_sticks++; 434 cp_time[CP_SYS]++; 435 } else 436 cp_time[CP_IDLE]++; 437 } 438 pscnt = psdiv; 439 440 /* 441 * We maintain statistics shown by user-level statistics 442 * programs: the amount of time in each cpu state. 443 */ 444 445 /* 446 * We adjust the priority of the current process. The priority of 447 * a process gets worse as it accumulates CPU time. The cpu usage 448 * estimator (p_estcpu) is increased here. The formula for computing 449 * priorities (in kern_synch.c) will compute a different value each 450 * time p_estcpu increases by 4. The cpu usage estimator ramps up 451 * quite quickly when the process is running (linearly), and decays 452 * away exponentially, at a rate which is proportionally slower when 453 * the system is busy. The basic principal is that the system will 454 * 90% forget that the process used a lot of CPU time in 5 * loadav 455 * seconds. This causes the system to favor processes which haven't 456 * run much recently, and to round-robin among other processes. 457 */ 458 if (p != NULL) { 459 p->p_cpticks++; 460 if (++p->p_estcpu == 0) 461 p->p_estcpu--; 462 if ((p->p_estcpu & 3) == 0) { 463 resetpriority(p); 464 if (p->p_priority >= PUSER) 465 p->p_priority = p->p_usrpri; 466 } 467 468 /* Update resource usage integrals and maximums. */ 469 if ((pstats = p->p_stats) != NULL && 470 (ru = &pstats->p_ru) != NULL && 471 (vm = p->p_vmspace) != NULL) { 472 ru->ru_ixrss += pgtok(vm->vm_tsize); 473 ru->ru_idrss += pgtok(vm->vm_dsize); 474 ru->ru_isrss += pgtok(vm->vm_ssize); 475 rss = pgtok(vmspace_resident_count(vm)); 476 if (ru->ru_maxrss < rss) 477 ru->ru_maxrss = rss; 478 } 479 } 480} 481 482/* 483 * Return information about system clocks. 484 */ 485static int 486sysctl_kern_clockrate SYSCTL_HANDLER_ARGS 487{ 488 struct clockinfo clkinfo; 489 /* 490 * Construct clockinfo structure. 491 */ 492 clkinfo.hz = hz; 493 clkinfo.tick = tick; 494 clkinfo.tickadj = tickadj; 495 clkinfo.profhz = profhz; 496 clkinfo.stathz = stathz ? stathz : hz; 497 return (sysctl_handle_opaque(oidp, &clkinfo, sizeof clkinfo, req)); 498} 499 500SYSCTL_PROC(_kern, KERN_CLOCKRATE, clockrate, CTLTYPE_STRUCT|CTLFLAG_RD, 501 0, 0, sysctl_kern_clockrate, "S,clockinfo",""); 502 503static __inline unsigned 504tco_delta(struct timecounter *tc) 505{ 506 507 return ((tc->tc_get_timecount(tc) - tc->tc_offset_count) & 508 tc->tc_counter_mask); 509} 510 511/* 512 * We have eight functions for looking at the clock, four for 513 * microseconds and four for nanoseconds. For each there is fast 514 * but less precise version "get{nano|micro}[up]time" which will 515 * return a time which is up to 1/HZ previous to the call, whereas 516 * the raw version "{nano|micro}[up]time" will return a timestamp 517 * which is as precise as possible. The "up" variants return the 518 * time relative to system boot, these are well suited for time 519 * interval measurements. 520 */ 521 522void 523getmicrotime(struct timeval *tvp) 524{ 525 struct timecounter *tc; 526 527 if (!tco_method) { 528 tc = timecounter; 529 *tvp = tc->tc_microtime; 530 } else { 531 microtime(tvp); 532 } 533} 534 535void 536getnanotime(struct timespec *tsp) 537{ 538 struct timecounter *tc; 539 540 if (!tco_method) { 541 tc = timecounter; 542 *tsp = tc->tc_nanotime; 543 } else { 544 nanotime(tsp); 545 } 546} 547 548void 549microtime(struct timeval *tv) 550{ 551 struct timecounter *tc; 552 553 tc = (struct timecounter *)timecounter; 554 tv->tv_sec = tc->tc_offset_sec; 555 tv->tv_usec = tc->tc_offset_micro; 556 tv->tv_usec += ((u_int64_t)tco_delta(tc) * tc->tc_scale_micro) >> 32; 557 tv->tv_usec += boottime.tv_usec; 558 tv->tv_sec += boottime.tv_sec; 559 while (tv->tv_usec >= 1000000) { 560 tv->tv_usec -= 1000000; 561 tv->tv_sec++; 562 } 563} 564 565void 566nanotime(struct timespec *ts) 567{ 568 unsigned count; 569 u_int64_t delta; 570 struct timecounter *tc; 571 572 tc = (struct timecounter *)timecounter; 573 ts->tv_sec = tc->tc_offset_sec; 574 count = tco_delta(tc); 575 delta = tc->tc_offset_nano; 576 delta += ((u_int64_t)count * tc->tc_scale_nano_f); 577 delta >>= 32; 578 delta += ((u_int64_t)count * tc->tc_scale_nano_i); 579 delta += boottime.tv_usec * 1000; 580 ts->tv_sec += boottime.tv_sec; 581 while (delta >= 1000000000) { 582 delta -= 1000000000; 583 ts->tv_sec++; 584 } 585 ts->tv_nsec = delta; 586} 587 588void 589getmicrouptime(struct timeval *tvp) 590{ 591 struct timecounter *tc; 592 593 if (!tco_method) { 594 tc = timecounter; 595 tvp->tv_sec = tc->tc_offset_sec; 596 tvp->tv_usec = tc->tc_offset_micro; 597 } else { 598 microuptime(tvp); 599 } 600} 601 602void 603getnanouptime(struct timespec *tsp) 604{ 605 struct timecounter *tc; 606 607 if (!tco_method) { 608 tc = timecounter; 609 tsp->tv_sec = tc->tc_offset_sec; 610 tsp->tv_nsec = tc->tc_offset_nano >> 32; 611 } else { 612 nanouptime(tsp); 613 } 614} 615 616void 617microuptime(struct timeval *tv) 618{ 619 struct timecounter *tc; 620 621 tc = (struct timecounter *)timecounter; 622 tv->tv_sec = tc->tc_offset_sec; 623 tv->tv_usec = tc->tc_offset_micro; 624 tv->tv_usec += ((u_int64_t)tco_delta(tc) * tc->tc_scale_micro) >> 32; 625 if (tv->tv_usec >= 1000000) { 626 tv->tv_usec -= 1000000; 627 tv->tv_sec++; 628 } 629} 630 631void 632nanouptime(struct timespec *ts) 633{ 634 unsigned count; 635 u_int64_t delta; 636 struct timecounter *tc; 637 638 tc = (struct timecounter *)timecounter; 639 ts->tv_sec = tc->tc_offset_sec; 640 count = tco_delta(tc); 641 delta = tc->tc_offset_nano; 642 delta += ((u_int64_t)count * tc->tc_scale_nano_f); 643 delta >>= 32; 644 delta += ((u_int64_t)count * tc->tc_scale_nano_i); 645 if (delta >= 1000000000) { 646 delta -= 1000000000; 647 ts->tv_sec++; 648 } 649 ts->tv_nsec = delta; 650} 651 652static void 653tco_setscales(struct timecounter *tc) 654{ 655 u_int64_t scale; 656 657 scale = 1000000000LL << 32; 658 scale += tc->tc_adjustment; 659 scale /= tc->tc_frequency; 660 tc->tc_scale_micro = scale / 1000; 661 tc->tc_scale_nano_f = scale & 0xffffffff; 662 tc->tc_scale_nano_i = scale >> 32; 663} 664 665void 666init_timecounter(struct timecounter *tc) 667{ 668 struct timespec ts1; 669 struct timecounter *t1, *t2, *t3; 670 int i; 671 672 tc->tc_adjustment = 0; 673 tco_setscales(tc); 674 tc->tc_offset_count = tc->tc_get_timecount(tc); 675 tc->tc_tweak = tc; 676 MALLOC(t1, struct timecounter *, sizeof *t1, M_TIMECOUNTER, M_WAITOK); 677 *t1 = *tc; 678 t2 = t1; 679 for (i = 1; i < NTIMECOUNTER; i++) { 680 MALLOC(t3, struct timecounter *, sizeof *t3, 681 M_TIMECOUNTER, M_WAITOK); 682 *t3 = *tc; 683 t3->tc_other = t2; 684 t2 = t3; 685 } 686 t1->tc_other = t3; 687 tc = t1; 688 689 printf("Timecounter \"%s\" frequency %lu Hz\n", 690 tc->tc_name, (u_long)tc->tc_frequency); 691 692 /* XXX: For now always start using the counter. */ 693 tc->tc_offset_count = tc->tc_get_timecount(tc); 694 nanouptime(&ts1); 695 tc->tc_offset_nano = (u_int64_t)ts1.tv_nsec << 32; 696 tc->tc_offset_micro = ts1.tv_nsec / 1000; 697 tc->tc_offset_sec = ts1.tv_sec; 698 timecounter = tc; 699} 700 701void 702set_timecounter(struct timespec *ts) 703{ 704 struct timespec ts2; 705 706 nanouptime(&ts2); 707 boottime.tv_sec = ts->tv_sec - ts2.tv_sec; 708 boottime.tv_usec = (ts->tv_nsec - ts2.tv_nsec) / 1000; 709 if (boottime.tv_usec < 0) { 710 boottime.tv_usec += 1000000; 711 boottime.tv_sec--; 712 } 713 /* fiddle all the little crinkly bits around the fiords... */ 714 tco_forward(1); 715} 716 717 718#if 0 /* Currently unused */ 719void 720switch_timecounter(struct timecounter *newtc) 721{ 722 int s; 723 struct timecounter *tc; 724 struct timespec ts; 725 726 s = splclock(); 727 tc = timecounter; 728 if (newtc == tc || newtc == tc->tc_other) { 729 splx(s); 730 return; 731 } 732 nanouptime(&ts); 733 newtc->tc_offset_sec = ts.tv_sec; 734 newtc->tc_offset_nano = (u_int64_t)ts.tv_nsec << 32; 735 newtc->tc_offset_micro = ts.tv_nsec / 1000; 736 newtc->tc_offset_count = newtc->tc_get_timecount(newtc); 737 timecounter = newtc; 738 splx(s); 739} 740#endif 741 742static struct timecounter * 743sync_other_counter(void) 744{ 745 struct timecounter *tc, *tcn, *tco; 746 unsigned delta; 747 748 tco = timecounter; 749 tc = tco->tc_other; 750 tcn = tc->tc_other; 751 *tc = *tco; 752 tc->tc_other = tcn; 753 delta = tco_delta(tc); 754 tc->tc_offset_count += delta; 755 tc->tc_offset_count &= tc->tc_counter_mask; 756 tc->tc_offset_nano += (u_int64_t)delta * tc->tc_scale_nano_f; 757 tc->tc_offset_nano += (u_int64_t)delta * tc->tc_scale_nano_i << 32; 758 return (tc); 759} 760 761static void 762tco_forward(int force) 763{ 764 struct timecounter *tc, *tco; 765 766 tco = timecounter; 767 tc = sync_other_counter(); 768 /* 769 * We may be inducing a tiny error here, the tc_poll_pps() may 770 * process a latched count which happens after the tco_delta() 771 * in sync_other_counter(), which would extend the previous 772 * counters parameters into the domain of this new one. 773 * Since the timewindow is very small for this, the error is 774 * going to be only a few weenieseconds (as Dave Mills would 775 * say), so lets just not talk more about it, OK ? 776 */ 777 if (tco->tc_poll_pps) 778 tco->tc_poll_pps(tco); 779 if (timedelta != 0) { 780 tc->tc_offset_nano += (u_int64_t)(tickdelta * 1000) << 32; 781 timedelta -= tickdelta; 782 force++; 783 } 784 785 while (tc->tc_offset_nano >= 1000000000ULL << 32) { 786 tc->tc_offset_nano -= 1000000000ULL << 32; 787 tc->tc_offset_sec++; 788 ntp_update_second(tc); /* XXX only needed if xntpd runs */ 789 tco_setscales(tc); 790 force++; 791 } 792 793 if (tco_method && !force) 794 return; 795 796 tc->tc_offset_micro = (tc->tc_offset_nano / 1000) >> 32; 797 798 /* Figure out the wall-clock time */ 799 tc->tc_nanotime.tv_sec = tc->tc_offset_sec + boottime.tv_sec; 800 tc->tc_nanotime.tv_nsec = 801 (tc->tc_offset_nano >> 32) + boottime.tv_usec * 1000; 802 tc->tc_microtime.tv_usec = tc->tc_offset_micro + boottime.tv_usec; 803 if (tc->tc_nanotime.tv_nsec >= 1000000000) { 804 tc->tc_nanotime.tv_nsec -= 1000000000; 805 tc->tc_microtime.tv_usec -= 1000000; 806 tc->tc_nanotime.tv_sec++; 807 } 808 time_second = tc->tc_microtime.tv_sec = tc->tc_nanotime.tv_sec; 809 810 timecounter = tc; 811} 812 813SYSCTL_NODE(_kern, OID_AUTO, timecounter, CTLFLAG_RW, 0, ""); 814 815SYSCTL_INT(_kern_timecounter, OID_AUTO, method, CTLFLAG_RW, &tco_method, 0, 816 "This variable determines the method used for updating timecounters. " 817 "If the default algorithm (0) fails with \"calcru negative...\" messages " 818 "try the alternate algorithm (1) which handles bad hardware better." 819 820); 821 822 823int 824pps_ioctl(u_long cmd, caddr_t data, struct pps_state *pps) 825{ 826 pps_params_t *app; 827 pps_info_t *api; 828 829 switch (cmd) { 830 case PPS_IOC_CREATE: 831 return (0); 832 case PPS_IOC_DESTROY: 833 return (0); 834 case PPS_IOC_SETPARAMS: 835 app = (pps_params_t *)data; 836 if (app->mode & ~pps->ppscap) 837 return (EINVAL); 838 pps->ppsparam = *app; 839 return (0); 840 case PPS_IOC_GETPARAMS: 841 app = (pps_params_t *)data; 842 *app = pps->ppsparam; 843 return (0); 844 case PPS_IOC_GETCAP: 845 *(int*)data = pps->ppscap; 846 return (0); 847 case PPS_IOC_FETCH: 848 api = (pps_info_t *)data; 849 pps->ppsinfo.current_mode = pps->ppsparam.mode; 850 *api = pps->ppsinfo; 851 return (0); 852 case PPS_IOC_WAIT: 853 return (EOPNOTSUPP); 854 default: 855 return (ENOTTY); 856 } 857} 858 859void 860pps_init(struct pps_state *pps) 861{ 862 pps->ppscap |= PPS_TSFMT_TSPEC; 863 if (pps->ppscap & PPS_CAPTUREASSERT) 864 pps->ppscap |= PPS_OFFSETASSERT; 865 if (pps->ppscap & PPS_CAPTURECLEAR) 866 pps->ppscap |= PPS_OFFSETCLEAR; 867#ifdef PPS_SYNC 868 if (pps->ppscap & PPS_CAPTUREASSERT) 869 pps->ppscap |= PPS_HARDPPSONASSERT; 870 if (pps->ppscap & PPS_CAPTURECLEAR) 871 pps->ppscap |= PPS_HARDPPSONCLEAR; 872#endif 873} 874 875void 876pps_event(struct pps_state *pps, struct timecounter *tc, unsigned count, int event) 877{ 878 struct timespec ts, *tsp, *osp; 879 u_int64_t delta; 880 unsigned tcount, *pcount; 881 int foff, fhard; 882 pps_seq_t *pseq; 883 884 /* Things would be easier with arrays... */ 885 if (event == PPS_CAPTUREASSERT) { 886 tsp = &pps->ppsinfo.assert_timestamp; 887 osp = &pps->ppsparam.assert_offset; 888 foff = pps->ppsparam.mode & PPS_OFFSETASSERT; 889 fhard = pps->ppsparam.mode & PPS_HARDPPSONASSERT; 890 pcount = &pps->ppscount[0]; 891 pseq = &pps->ppsinfo.assert_sequence; 892 } else { 893 tsp = &pps->ppsinfo.clear_timestamp; 894 osp = &pps->ppsparam.clear_offset; 895 foff = pps->ppsparam.mode & PPS_OFFSETCLEAR; 896 fhard = pps->ppsparam.mode & PPS_HARDPPSONCLEAR; 897 pcount = &pps->ppscount[1]; 898 pseq = &pps->ppsinfo.clear_sequence; 899 } 900 901 /* The timecounter changed: bail */ 902 if (!pps->ppstc || 903 pps->ppstc->tc_name != tc->tc_name || 904 tc->tc_name != timecounter->tc_name) { 905 pps->ppstc = tc; 906 *pcount = count; 907 return; 908 } 909 910 /* Now, make sure we have the right instance */ 911 tc = timecounter; 912 913 /* Nothing really happened */ 914 if (*pcount == count) 915 return; 916 917 *pcount = count; 918 919 /* Convert the count to timespec */ 920 ts.tv_sec = tc->tc_offset_sec; 921 tcount = count - tc->tc_offset_count; 922 tcount &= tc->tc_counter_mask; 923 delta = tc->tc_offset_nano; 924 delta += ((u_int64_t)tcount * tc->tc_scale_nano_f); 925 delta >>= 32; 926 delta += ((u_int64_t)tcount * tc->tc_scale_nano_i); 927 delta += boottime.tv_usec * 1000; 928 ts.tv_sec += boottime.tv_sec; 929 while (delta >= 1000000000) { 930 delta -= 1000000000; 931 ts.tv_sec++; 932 } 933 ts.tv_nsec = delta; 934 935 (*pseq)++; 936 *tsp = ts; 937 938 if (foff) { 939 timespecadd(tsp, osp); 940 if (tsp->tv_nsec < 0) { 941 tsp->tv_nsec += 1000000000; 942 tsp->tv_sec -= 1; 943 } 944 } 945#ifdef PPS_SYNC 946 if (fhard) { 947 /* magic, at its best... */ 948 tcount = count - pps->ppscount[2]; 949 pps->ppscount[2] = count; 950 tcount &= tc->tc_counter_mask; 951 delta = ((u_int64_t)tcount * tc->tc_tweak->tc_scale_nano_f); 952 delta >>= 32; 953 delta += ((u_int64_t)tcount * tc->tc_tweak->tc_scale_nano_i); 954 hardpps(tsp, delta); 955 } 956#endif 957} 958 959