1static volatile int print_tci = 1; 2 3/*- 4 * Copyright (c) 1997, 1998 Poul-Henning Kamp <phk@FreeBSD.org> 5 * Copyright (c) 1982, 1986, 1991, 1993 6 * The Regents of the University of California. All rights reserved. 7 * (c) UNIX System Laboratories, Inc. 8 * All or some portions of this file are derived from material licensed 9 * to the University of California by American Telephone and Telegraph 10 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 11 * the permission of UNIX System Laboratories, Inc. 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 3. All advertising materials mentioning features or use of this software 22 * must display the following acknowledgement: 23 * This product includes software developed by the University of 24 * California, Berkeley and its contributors. 25 * 4. Neither the name of the University nor the names of its contributors 26 * may be used to endorse or promote products derived from this software 27 * without specific prior written permission. 28 * 29 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 30 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 32 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 39 * SUCH DAMAGE. 40 * 41 * @(#)kern_clock.c 8.5 (Berkeley) 1/21/94
| 1static volatile int print_tci = 1; 2 3/*- 4 * Copyright (c) 1997, 1998 Poul-Henning Kamp <phk@FreeBSD.org> 5 * Copyright (c) 1982, 1986, 1991, 1993 6 * The Regents of the University of California. All rights reserved. 7 * (c) UNIX System Laboratories, Inc. 8 * All or some portions of this file are derived from material licensed 9 * to the University of California by American Telephone and Telegraph 10 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 11 * the permission of UNIX System Laboratories, Inc. 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 3. All advertising materials mentioning features or use of this software 22 * must display the following acknowledgement: 23 * This product includes software developed by the University of 24 * California, Berkeley and its contributors. 25 * 4. Neither the name of the University nor the names of its contributors 26 * may be used to endorse or promote products derived from this software 27 * without specific prior written permission. 28 * 29 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 30 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 32 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 39 * SUCH DAMAGE. 40 * 41 * @(#)kern_clock.c 8.5 (Berkeley) 1/21/94
|
42 * $Id: kern_clock.c,v 1.70 1998/05/28 09:30:16 phk Exp $
| 42 * $Id: kern_clock.c,v 1.71 1998/06/07 08:40:41 phk Exp $
|
43 */ 44 45#include <sys/param.h> 46#include <sys/systm.h> 47#include <sys/dkstat.h> 48#include <sys/callout.h> 49#include <sys/kernel.h> 50#include <sys/proc.h> 51#include <sys/resourcevar.h> 52#include <sys/signalvar.h> 53#include <sys/timex.h> 54#include <vm/vm.h> 55#include <sys/lock.h> 56#include <vm/pmap.h> 57#include <vm/vm_map.h> 58#include <sys/sysctl.h> 59 60#include <machine/cpu.h> 61#include <machine/limits.h> 62 63#ifdef GPROF 64#include <sys/gmon.h> 65#endif 66 67#if defined(SMP) && defined(BETTER_CLOCK) 68#include <machine/smp.h> 69#endif 70 71static void initclocks __P((void *dummy)); 72SYSINIT(clocks, SI_SUB_CLOCKS, SI_ORDER_FIRST, initclocks, NULL) 73 74static void tco_forward __P((void)); 75static void tco_setscales __P((struct timecounter *tc)); 76static __inline unsigned tco_getdelta __P((struct timecounter *tc)); 77 78/* Some of these don't belong here, but it's easiest to concentrate them. */ 79#if defined(SMP) && defined(BETTER_CLOCK) 80long cp_time[CPUSTATES]; 81#else 82static long cp_time[CPUSTATES]; 83#endif 84long dk_seek[DK_NDRIVE]; 85static long dk_time[DK_NDRIVE]; /* time busy (in statclock ticks) */ 86long dk_wds[DK_NDRIVE]; 87long dk_wpms[DK_NDRIVE]; 88long dk_xfer[DK_NDRIVE]; 89 90int dk_busy; 91int dk_ndrive = 0; 92char dk_names[DK_NDRIVE][DK_NAMELEN]; 93 94long tk_cancc; 95long tk_nin; 96long tk_nout; 97long tk_rawcc; 98 99struct timecounter *timecounter; 100 101time_t time_second; 102 103/* 104 * Clock handling routines. 105 * 106 * This code is written to operate with two timers that run independently of 107 * each other. 108 * 109 * The main timer, running hz times per second, is used to trigger interval 110 * timers, timeouts and rescheduling as needed. 111 * 112 * The second timer handles kernel and user profiling, 113 * and does resource use estimation. If the second timer is programmable, 114 * it is randomized to avoid aliasing between the two clocks. For example, 115 * the randomization prevents an adversary from always giving up the cpu 116 * just before its quantum expires. Otherwise, it would never accumulate 117 * cpu ticks. The mean frequency of the second timer is stathz. 118 * 119 * If no second timer exists, stathz will be zero; in this case we drive 120 * profiling and statistics off the main clock. This WILL NOT be accurate; 121 * do not do it unless absolutely necessary. 122 * 123 * The statistics clock may (or may not) be run at a higher rate while 124 * profiling. This profile clock runs at profhz. We require that profhz 125 * be an integral multiple of stathz. 126 * 127 * If the statistics clock is running fast, it must be divided by the ratio 128 * profhz/stathz for statistics. (For profiling, every tick counts.) 129 * 130 * Time-of-day is maintained using a "timecounter", which may or may 131 * not be related to the hardware generating the above mentioned 132 * interrupts. 133 */ 134 135int stathz; 136int profhz; 137static int profprocs; 138int ticks; 139static int psdiv, pscnt; /* prof => stat divider */ 140int psratio; /* ratio: prof / stat */ 141 142/* 143 * Initialize clock frequencies and start both clocks running. 144 */ 145/* ARGSUSED*/ 146static void 147initclocks(dummy) 148 void *dummy; 149{ 150 register int i; 151 152 /* 153 * Set divisors to 1 (normal case) and let the machine-specific 154 * code do its bit. 155 */ 156 psdiv = pscnt = 1; 157 cpu_initclocks(); 158 159 /* 160 * Compute profhz/stathz, and fix profhz if needed. 161 */ 162 i = stathz ? stathz : hz; 163 if (profhz == 0) 164 profhz = i; 165 psratio = profhz / i; 166} 167 168/* 169 * The real-time timer, interrupting hz times per second. 170 */ 171void 172hardclock(frame) 173 register struct clockframe *frame; 174{ 175 register struct proc *p; 176 177 p = curproc; 178 if (p) { 179 register struct pstats *pstats; 180 181 /* 182 * Run current process's virtual and profile time, as needed. 183 */ 184 pstats = p->p_stats; 185 if (CLKF_USERMODE(frame) && 186 timevalisset(&pstats->p_timer[ITIMER_VIRTUAL].it_value) && 187 itimerdecr(&pstats->p_timer[ITIMER_VIRTUAL], tick) == 0) 188 psignal(p, SIGVTALRM); 189 if (timevalisset(&pstats->p_timer[ITIMER_PROF].it_value) && 190 itimerdecr(&pstats->p_timer[ITIMER_PROF], tick) == 0) 191 psignal(p, SIGPROF); 192 } 193 194#if defined(SMP) && defined(BETTER_CLOCK) 195 forward_hardclock(pscnt); 196#endif 197 198 /* 199 * If no separate statistics clock is available, run it from here. 200 */ 201 if (stathz == 0) 202 statclock(frame); 203 204 tco_forward(); 205 ticks++; 206 207 /* 208 * Process callouts at a very low cpu priority, so we don't keep the 209 * relatively high clock interrupt priority any longer than necessary. 210 */ 211 if (TAILQ_FIRST(&callwheel[ticks & callwheelmask]) != NULL) { 212 if (CLKF_BASEPRI(frame)) { 213 /* 214 * Save the overhead of a software interrupt; 215 * it will happen as soon as we return, so do it now. 216 */ 217 (void)splsoftclock(); 218 softclock(); 219 } else 220 setsoftclock(); 221 } else if (softticks + 1 == ticks) 222 ++softticks; 223} 224 225/* 226 * Compute number of ticks in the specified amount of time. 227 */ 228int 229tvtohz(tv) 230 struct timeval *tv; 231{ 232 register unsigned long ticks; 233 register long sec, usec; 234 235 /* 236 * If the number of usecs in the whole seconds part of the time 237 * difference fits in a long, then the total number of usecs will 238 * fit in an unsigned long. Compute the total and convert it to 239 * ticks, rounding up and adding 1 to allow for the current tick 240 * to expire. Rounding also depends on unsigned long arithmetic 241 * to avoid overflow. 242 * 243 * Otherwise, if the number of ticks in the whole seconds part of 244 * the time difference fits in a long, then convert the parts to 245 * ticks separately and add, using similar rounding methods and 246 * overflow avoidance. This method would work in the previous 247 * case but it is slightly slower and assumes that hz is integral. 248 * 249 * Otherwise, round the time difference down to the maximum 250 * representable value. 251 * 252 * If ints have 32 bits, then the maximum value for any timeout in 253 * 10ms ticks is 248 days. 254 */ 255 sec = tv->tv_sec; 256 usec = tv->tv_usec; 257 if (usec < 0) { 258 sec--; 259 usec += 1000000; 260 } 261 if (sec < 0) { 262#ifdef DIAGNOSTIC 263 if (usec > 0) { 264 sec++; 265 usec -= 1000000; 266 } 267 printf("tvotohz: negative time difference %ld sec %ld usec\n", 268 sec, usec); 269#endif 270 ticks = 1; 271 } else if (sec <= LONG_MAX / 1000000) 272 ticks = (sec * 1000000 + (unsigned long)usec + (tick - 1)) 273 / tick + 1; 274 else if (sec <= LONG_MAX / hz) 275 ticks = sec * hz 276 + ((unsigned long)usec + (tick - 1)) / tick + 1; 277 else 278 ticks = LONG_MAX; 279 if (ticks > INT_MAX) 280 ticks = INT_MAX; 281 return (ticks); 282} 283 284 285/* 286 * Compute number of hz until specified time. Used to 287 * compute third argument to timeout() from an absolute time. 288 */ 289int 290hzto(tv) 291 struct timeval *tv; 292{ 293 struct timeval t2; 294 295 getmicrotime(&t2); 296 t2.tv_sec = tv->tv_sec - t2.tv_sec; 297 t2.tv_usec = tv->tv_usec - t2.tv_usec; 298 return (tvtohz(&t2)); 299} 300 301/* 302 * Start profiling on a process. 303 * 304 * Kernel profiling passes proc0 which never exits and hence 305 * keeps the profile clock running constantly. 306 */ 307void 308startprofclock(p) 309 register struct proc *p; 310{ 311 int s; 312 313 if ((p->p_flag & P_PROFIL) == 0) { 314 p->p_flag |= P_PROFIL; 315 if (++profprocs == 1 && stathz != 0) { 316 s = splstatclock(); 317 psdiv = pscnt = psratio; 318 setstatclockrate(profhz); 319 splx(s); 320 } 321 } 322} 323 324/* 325 * Stop profiling on a process. 326 */ 327void 328stopprofclock(p) 329 register struct proc *p; 330{ 331 int s; 332 333 if (p->p_flag & P_PROFIL) { 334 p->p_flag &= ~P_PROFIL; 335 if (--profprocs == 0 && stathz != 0) { 336 s = splstatclock(); 337 psdiv = pscnt = 1; 338 setstatclockrate(stathz); 339 splx(s); 340 } 341 } 342} 343 344/* 345 * Statistics clock. Grab profile sample, and if divider reaches 0, 346 * do process and kernel statistics. 347 */ 348void 349statclock(frame) 350 register struct clockframe *frame; 351{ 352#ifdef GPROF 353 register struct gmonparam *g; 354#endif 355 register struct proc *p; 356 register int i; 357 struct pstats *pstats; 358 long rss; 359 struct rusage *ru; 360 struct vmspace *vm; 361 362 if (CLKF_USERMODE(frame)) { 363 p = curproc; 364 if (p->p_flag & P_PROFIL) 365 addupc_intr(p, CLKF_PC(frame), 1); 366#if defined(SMP) && defined(BETTER_CLOCK) 367 if (stathz != 0) 368 forward_statclock(pscnt); 369#endif 370 if (--pscnt > 0) 371 return; 372 /* 373 * Came from user mode; CPU was in user state. 374 * If this process is being profiled record the tick. 375 */ 376 p->p_uticks++; 377 if (p->p_nice > NZERO) 378 cp_time[CP_NICE]++; 379 else 380 cp_time[CP_USER]++; 381 } else { 382#ifdef GPROF 383 /* 384 * Kernel statistics are just like addupc_intr, only easier. 385 */ 386 g = &_gmonparam; 387 if (g->state == GMON_PROF_ON) { 388 i = CLKF_PC(frame) - g->lowpc; 389 if (i < g->textsize) { 390 i /= HISTFRACTION * sizeof(*g->kcount); 391 g->kcount[i]++; 392 } 393 } 394#endif 395#if defined(SMP) && defined(BETTER_CLOCK) 396 if (stathz != 0) 397 forward_statclock(pscnt); 398#endif 399 if (--pscnt > 0) 400 return; 401 /* 402 * Came from kernel mode, so we were: 403 * - handling an interrupt, 404 * - doing syscall or trap work on behalf of the current 405 * user process, or 406 * - spinning in the idle loop. 407 * Whichever it is, charge the time as appropriate. 408 * Note that we charge interrupts to the current process, 409 * regardless of whether they are ``for'' that process, 410 * so that we know how much of its real time was spent 411 * in ``non-process'' (i.e., interrupt) work. 412 */ 413 p = curproc; 414 if (CLKF_INTR(frame)) { 415 if (p != NULL) 416 p->p_iticks++; 417 cp_time[CP_INTR]++; 418 } else if (p != NULL) { 419 p->p_sticks++; 420 cp_time[CP_SYS]++; 421 } else 422 cp_time[CP_IDLE]++; 423 } 424 pscnt = psdiv; 425 426 /* 427 * We maintain statistics shown by user-level statistics 428 * programs: the amount of time in each cpu state, and 429 * the amount of time each of DK_NDRIVE ``drives'' is busy. 430 * 431 * XXX should either run linked list of drives, or (better) 432 * grab timestamps in the start & done code. 433 */ 434 for (i = 0; i < DK_NDRIVE; i++) 435 if (dk_busy & (1 << i)) 436 dk_time[i]++; 437 438 /* 439 * We adjust the priority of the current process. The priority of 440 * a process gets worse as it accumulates CPU time. The cpu usage 441 * estimator (p_estcpu) is increased here. The formula for computing 442 * priorities (in kern_synch.c) will compute a different value each 443 * time p_estcpu increases by 4. The cpu usage estimator ramps up 444 * quite quickly when the process is running (linearly), and decays 445 * away exponentially, at a rate which is proportionally slower when 446 * the system is busy. The basic principal is that the system will 447 * 90% forget that the process used a lot of CPU time in 5 * loadav 448 * seconds. This causes the system to favor processes which haven't 449 * run much recently, and to round-robin among other processes. 450 */ 451 if (p != NULL) { 452 p->p_cpticks++; 453 if (++p->p_estcpu == 0) 454 p->p_estcpu--; 455 if ((p->p_estcpu & 3) == 0) { 456 resetpriority(p); 457 if (p->p_priority >= PUSER) 458 p->p_priority = p->p_usrpri; 459 } 460 461 /* Update resource usage integrals and maximums. */ 462 if ((pstats = p->p_stats) != NULL && 463 (ru = &pstats->p_ru) != NULL && 464 (vm = p->p_vmspace) != NULL) { 465 ru->ru_ixrss += vm->vm_tsize * PAGE_SIZE / 1024; 466 ru->ru_idrss += vm->vm_dsize * PAGE_SIZE / 1024; 467 ru->ru_isrss += vm->vm_ssize * PAGE_SIZE / 1024; 468 rss = vm->vm_pmap.pm_stats.resident_count * 469 PAGE_SIZE / 1024; 470 if (ru->ru_maxrss < rss) 471 ru->ru_maxrss = rss; 472 } 473 } 474} 475 476/* 477 * Return information about system clocks. 478 */ 479static int 480sysctl_kern_clockrate SYSCTL_HANDLER_ARGS 481{ 482 struct clockinfo clkinfo; 483 /* 484 * Construct clockinfo structure. 485 */ 486 clkinfo.hz = hz; 487 clkinfo.tick = tick; 488 clkinfo.tickadj = tickadj; 489 clkinfo.profhz = profhz; 490 clkinfo.stathz = stathz ? stathz : hz; 491 return (sysctl_handle_opaque(oidp, &clkinfo, sizeof clkinfo, req)); 492} 493 494SYSCTL_PROC(_kern, KERN_CLOCKRATE, clockrate, CTLTYPE_STRUCT|CTLFLAG_RD, 495 0, 0, sysctl_kern_clockrate, "S,clockinfo",""); 496 497static __inline unsigned 498tco_getdelta(struct timecounter *tc) 499{ 500 501 return ((tc->get_timecount(tc) - tc->offset_count) & tc->counter_mask); 502} 503 504/* 505 * We have four functions for looking at the clock, two for microseconds 506 * and two for nanoseconds. For each there is fast but less precise 507 * version "get{nano|micro}time" which will return a time which is up 508 * to 1/HZ previous to the call, whereas the raw version "{nano|micro}time" 509 * will return a timestamp which is as precise as possible. 510 */ 511 512void 513getmicrotime(struct timeval *tvp) 514{ 515 struct timecounter *tc; 516 517 tc = timecounter; 518 *tvp = tc->microtime; 519} 520 521void 522getnanotime(struct timespec *tsp) 523{ 524 struct timecounter *tc; 525 526 tc = timecounter; 527 *tsp = tc->nanotime; 528} 529 530void 531microtime(struct timeval *tv) 532{ 533 struct timecounter *tc; 534 535 tc = (struct timecounter *)timecounter; 536 tv->tv_sec = tc->offset_sec; 537 tv->tv_usec = tc->offset_micro; 538 tv->tv_usec += ((u_int64_t)tco_getdelta(tc) * tc->scale_micro) >> 32; 539 tv->tv_usec += boottime.tv_usec; 540 tv->tv_sec += boottime.tv_sec; 541 while (tv->tv_usec >= 1000000) { 542 tv->tv_usec -= 1000000; 543 tv->tv_sec++; 544 } 545} 546 547void
| 43 */ 44 45#include <sys/param.h> 46#include <sys/systm.h> 47#include <sys/dkstat.h> 48#include <sys/callout.h> 49#include <sys/kernel.h> 50#include <sys/proc.h> 51#include <sys/resourcevar.h> 52#include <sys/signalvar.h> 53#include <sys/timex.h> 54#include <vm/vm.h> 55#include <sys/lock.h> 56#include <vm/pmap.h> 57#include <vm/vm_map.h> 58#include <sys/sysctl.h> 59 60#include <machine/cpu.h> 61#include <machine/limits.h> 62 63#ifdef GPROF 64#include <sys/gmon.h> 65#endif 66 67#if defined(SMP) && defined(BETTER_CLOCK) 68#include <machine/smp.h> 69#endif 70 71static void initclocks __P((void *dummy)); 72SYSINIT(clocks, SI_SUB_CLOCKS, SI_ORDER_FIRST, initclocks, NULL) 73 74static void tco_forward __P((void)); 75static void tco_setscales __P((struct timecounter *tc)); 76static __inline unsigned tco_getdelta __P((struct timecounter *tc)); 77 78/* Some of these don't belong here, but it's easiest to concentrate them. */ 79#if defined(SMP) && defined(BETTER_CLOCK) 80long cp_time[CPUSTATES]; 81#else 82static long cp_time[CPUSTATES]; 83#endif 84long dk_seek[DK_NDRIVE]; 85static long dk_time[DK_NDRIVE]; /* time busy (in statclock ticks) */ 86long dk_wds[DK_NDRIVE]; 87long dk_wpms[DK_NDRIVE]; 88long dk_xfer[DK_NDRIVE]; 89 90int dk_busy; 91int dk_ndrive = 0; 92char dk_names[DK_NDRIVE][DK_NAMELEN]; 93 94long tk_cancc; 95long tk_nin; 96long tk_nout; 97long tk_rawcc; 98 99struct timecounter *timecounter; 100 101time_t time_second; 102 103/* 104 * Clock handling routines. 105 * 106 * This code is written to operate with two timers that run independently of 107 * each other. 108 * 109 * The main timer, running hz times per second, is used to trigger interval 110 * timers, timeouts and rescheduling as needed. 111 * 112 * The second timer handles kernel and user profiling, 113 * and does resource use estimation. If the second timer is programmable, 114 * it is randomized to avoid aliasing between the two clocks. For example, 115 * the randomization prevents an adversary from always giving up the cpu 116 * just before its quantum expires. Otherwise, it would never accumulate 117 * cpu ticks. The mean frequency of the second timer is stathz. 118 * 119 * If no second timer exists, stathz will be zero; in this case we drive 120 * profiling and statistics off the main clock. This WILL NOT be accurate; 121 * do not do it unless absolutely necessary. 122 * 123 * The statistics clock may (or may not) be run at a higher rate while 124 * profiling. This profile clock runs at profhz. We require that profhz 125 * be an integral multiple of stathz. 126 * 127 * If the statistics clock is running fast, it must be divided by the ratio 128 * profhz/stathz for statistics. (For profiling, every tick counts.) 129 * 130 * Time-of-day is maintained using a "timecounter", which may or may 131 * not be related to the hardware generating the above mentioned 132 * interrupts. 133 */ 134 135int stathz; 136int profhz; 137static int profprocs; 138int ticks; 139static int psdiv, pscnt; /* prof => stat divider */ 140int psratio; /* ratio: prof / stat */ 141 142/* 143 * Initialize clock frequencies and start both clocks running. 144 */ 145/* ARGSUSED*/ 146static void 147initclocks(dummy) 148 void *dummy; 149{ 150 register int i; 151 152 /* 153 * Set divisors to 1 (normal case) and let the machine-specific 154 * code do its bit. 155 */ 156 psdiv = pscnt = 1; 157 cpu_initclocks(); 158 159 /* 160 * Compute profhz/stathz, and fix profhz if needed. 161 */ 162 i = stathz ? stathz : hz; 163 if (profhz == 0) 164 profhz = i; 165 psratio = profhz / i; 166} 167 168/* 169 * The real-time timer, interrupting hz times per second. 170 */ 171void 172hardclock(frame) 173 register struct clockframe *frame; 174{ 175 register struct proc *p; 176 177 p = curproc; 178 if (p) { 179 register struct pstats *pstats; 180 181 /* 182 * Run current process's virtual and profile time, as needed. 183 */ 184 pstats = p->p_stats; 185 if (CLKF_USERMODE(frame) && 186 timevalisset(&pstats->p_timer[ITIMER_VIRTUAL].it_value) && 187 itimerdecr(&pstats->p_timer[ITIMER_VIRTUAL], tick) == 0) 188 psignal(p, SIGVTALRM); 189 if (timevalisset(&pstats->p_timer[ITIMER_PROF].it_value) && 190 itimerdecr(&pstats->p_timer[ITIMER_PROF], tick) == 0) 191 psignal(p, SIGPROF); 192 } 193 194#if defined(SMP) && defined(BETTER_CLOCK) 195 forward_hardclock(pscnt); 196#endif 197 198 /* 199 * If no separate statistics clock is available, run it from here. 200 */ 201 if (stathz == 0) 202 statclock(frame); 203 204 tco_forward(); 205 ticks++; 206 207 /* 208 * Process callouts at a very low cpu priority, so we don't keep the 209 * relatively high clock interrupt priority any longer than necessary. 210 */ 211 if (TAILQ_FIRST(&callwheel[ticks & callwheelmask]) != NULL) { 212 if (CLKF_BASEPRI(frame)) { 213 /* 214 * Save the overhead of a software interrupt; 215 * it will happen as soon as we return, so do it now. 216 */ 217 (void)splsoftclock(); 218 softclock(); 219 } else 220 setsoftclock(); 221 } else if (softticks + 1 == ticks) 222 ++softticks; 223} 224 225/* 226 * Compute number of ticks in the specified amount of time. 227 */ 228int 229tvtohz(tv) 230 struct timeval *tv; 231{ 232 register unsigned long ticks; 233 register long sec, usec; 234 235 /* 236 * If the number of usecs in the whole seconds part of the time 237 * difference fits in a long, then the total number of usecs will 238 * fit in an unsigned long. Compute the total and convert it to 239 * ticks, rounding up and adding 1 to allow for the current tick 240 * to expire. Rounding also depends on unsigned long arithmetic 241 * to avoid overflow. 242 * 243 * Otherwise, if the number of ticks in the whole seconds part of 244 * the time difference fits in a long, then convert the parts to 245 * ticks separately and add, using similar rounding methods and 246 * overflow avoidance. This method would work in the previous 247 * case but it is slightly slower and assumes that hz is integral. 248 * 249 * Otherwise, round the time difference down to the maximum 250 * representable value. 251 * 252 * If ints have 32 bits, then the maximum value for any timeout in 253 * 10ms ticks is 248 days. 254 */ 255 sec = tv->tv_sec; 256 usec = tv->tv_usec; 257 if (usec < 0) { 258 sec--; 259 usec += 1000000; 260 } 261 if (sec < 0) { 262#ifdef DIAGNOSTIC 263 if (usec > 0) { 264 sec++; 265 usec -= 1000000; 266 } 267 printf("tvotohz: negative time difference %ld sec %ld usec\n", 268 sec, usec); 269#endif 270 ticks = 1; 271 } else if (sec <= LONG_MAX / 1000000) 272 ticks = (sec * 1000000 + (unsigned long)usec + (tick - 1)) 273 / tick + 1; 274 else if (sec <= LONG_MAX / hz) 275 ticks = sec * hz 276 + ((unsigned long)usec + (tick - 1)) / tick + 1; 277 else 278 ticks = LONG_MAX; 279 if (ticks > INT_MAX) 280 ticks = INT_MAX; 281 return (ticks); 282} 283 284 285/* 286 * Compute number of hz until specified time. Used to 287 * compute third argument to timeout() from an absolute time. 288 */ 289int 290hzto(tv) 291 struct timeval *tv; 292{ 293 struct timeval t2; 294 295 getmicrotime(&t2); 296 t2.tv_sec = tv->tv_sec - t2.tv_sec; 297 t2.tv_usec = tv->tv_usec - t2.tv_usec; 298 return (tvtohz(&t2)); 299} 300 301/* 302 * Start profiling on a process. 303 * 304 * Kernel profiling passes proc0 which never exits and hence 305 * keeps the profile clock running constantly. 306 */ 307void 308startprofclock(p) 309 register struct proc *p; 310{ 311 int s; 312 313 if ((p->p_flag & P_PROFIL) == 0) { 314 p->p_flag |= P_PROFIL; 315 if (++profprocs == 1 && stathz != 0) { 316 s = splstatclock(); 317 psdiv = pscnt = psratio; 318 setstatclockrate(profhz); 319 splx(s); 320 } 321 } 322} 323 324/* 325 * Stop profiling on a process. 326 */ 327void 328stopprofclock(p) 329 register struct proc *p; 330{ 331 int s; 332 333 if (p->p_flag & P_PROFIL) { 334 p->p_flag &= ~P_PROFIL; 335 if (--profprocs == 0 && stathz != 0) { 336 s = splstatclock(); 337 psdiv = pscnt = 1; 338 setstatclockrate(stathz); 339 splx(s); 340 } 341 } 342} 343 344/* 345 * Statistics clock. Grab profile sample, and if divider reaches 0, 346 * do process and kernel statistics. 347 */ 348void 349statclock(frame) 350 register struct clockframe *frame; 351{ 352#ifdef GPROF 353 register struct gmonparam *g; 354#endif 355 register struct proc *p; 356 register int i; 357 struct pstats *pstats; 358 long rss; 359 struct rusage *ru; 360 struct vmspace *vm; 361 362 if (CLKF_USERMODE(frame)) { 363 p = curproc; 364 if (p->p_flag & P_PROFIL) 365 addupc_intr(p, CLKF_PC(frame), 1); 366#if defined(SMP) && defined(BETTER_CLOCK) 367 if (stathz != 0) 368 forward_statclock(pscnt); 369#endif 370 if (--pscnt > 0) 371 return; 372 /* 373 * Came from user mode; CPU was in user state. 374 * If this process is being profiled record the tick. 375 */ 376 p->p_uticks++; 377 if (p->p_nice > NZERO) 378 cp_time[CP_NICE]++; 379 else 380 cp_time[CP_USER]++; 381 } else { 382#ifdef GPROF 383 /* 384 * Kernel statistics are just like addupc_intr, only easier. 385 */ 386 g = &_gmonparam; 387 if (g->state == GMON_PROF_ON) { 388 i = CLKF_PC(frame) - g->lowpc; 389 if (i < g->textsize) { 390 i /= HISTFRACTION * sizeof(*g->kcount); 391 g->kcount[i]++; 392 } 393 } 394#endif 395#if defined(SMP) && defined(BETTER_CLOCK) 396 if (stathz != 0) 397 forward_statclock(pscnt); 398#endif 399 if (--pscnt > 0) 400 return; 401 /* 402 * Came from kernel mode, so we were: 403 * - handling an interrupt, 404 * - doing syscall or trap work on behalf of the current 405 * user process, or 406 * - spinning in the idle loop. 407 * Whichever it is, charge the time as appropriate. 408 * Note that we charge interrupts to the current process, 409 * regardless of whether they are ``for'' that process, 410 * so that we know how much of its real time was spent 411 * in ``non-process'' (i.e., interrupt) work. 412 */ 413 p = curproc; 414 if (CLKF_INTR(frame)) { 415 if (p != NULL) 416 p->p_iticks++; 417 cp_time[CP_INTR]++; 418 } else if (p != NULL) { 419 p->p_sticks++; 420 cp_time[CP_SYS]++; 421 } else 422 cp_time[CP_IDLE]++; 423 } 424 pscnt = psdiv; 425 426 /* 427 * We maintain statistics shown by user-level statistics 428 * programs: the amount of time in each cpu state, and 429 * the amount of time each of DK_NDRIVE ``drives'' is busy. 430 * 431 * XXX should either run linked list of drives, or (better) 432 * grab timestamps in the start & done code. 433 */ 434 for (i = 0; i < DK_NDRIVE; i++) 435 if (dk_busy & (1 << i)) 436 dk_time[i]++; 437 438 /* 439 * We adjust the priority of the current process. The priority of 440 * a process gets worse as it accumulates CPU time. The cpu usage 441 * estimator (p_estcpu) is increased here. The formula for computing 442 * priorities (in kern_synch.c) will compute a different value each 443 * time p_estcpu increases by 4. The cpu usage estimator ramps up 444 * quite quickly when the process is running (linearly), and decays 445 * away exponentially, at a rate which is proportionally slower when 446 * the system is busy. The basic principal is that the system will 447 * 90% forget that the process used a lot of CPU time in 5 * loadav 448 * seconds. This causes the system to favor processes which haven't 449 * run much recently, and to round-robin among other processes. 450 */ 451 if (p != NULL) { 452 p->p_cpticks++; 453 if (++p->p_estcpu == 0) 454 p->p_estcpu--; 455 if ((p->p_estcpu & 3) == 0) { 456 resetpriority(p); 457 if (p->p_priority >= PUSER) 458 p->p_priority = p->p_usrpri; 459 } 460 461 /* Update resource usage integrals and maximums. */ 462 if ((pstats = p->p_stats) != NULL && 463 (ru = &pstats->p_ru) != NULL && 464 (vm = p->p_vmspace) != NULL) { 465 ru->ru_ixrss += vm->vm_tsize * PAGE_SIZE / 1024; 466 ru->ru_idrss += vm->vm_dsize * PAGE_SIZE / 1024; 467 ru->ru_isrss += vm->vm_ssize * PAGE_SIZE / 1024; 468 rss = vm->vm_pmap.pm_stats.resident_count * 469 PAGE_SIZE / 1024; 470 if (ru->ru_maxrss < rss) 471 ru->ru_maxrss = rss; 472 } 473 } 474} 475 476/* 477 * Return information about system clocks. 478 */ 479static int 480sysctl_kern_clockrate SYSCTL_HANDLER_ARGS 481{ 482 struct clockinfo clkinfo; 483 /* 484 * Construct clockinfo structure. 485 */ 486 clkinfo.hz = hz; 487 clkinfo.tick = tick; 488 clkinfo.tickadj = tickadj; 489 clkinfo.profhz = profhz; 490 clkinfo.stathz = stathz ? stathz : hz; 491 return (sysctl_handle_opaque(oidp, &clkinfo, sizeof clkinfo, req)); 492} 493 494SYSCTL_PROC(_kern, KERN_CLOCKRATE, clockrate, CTLTYPE_STRUCT|CTLFLAG_RD, 495 0, 0, sysctl_kern_clockrate, "S,clockinfo",""); 496 497static __inline unsigned 498tco_getdelta(struct timecounter *tc) 499{ 500 501 return ((tc->get_timecount(tc) - tc->offset_count) & tc->counter_mask); 502} 503 504/* 505 * We have four functions for looking at the clock, two for microseconds 506 * and two for nanoseconds. For each there is fast but less precise 507 * version "get{nano|micro}time" which will return a time which is up 508 * to 1/HZ previous to the call, whereas the raw version "{nano|micro}time" 509 * will return a timestamp which is as precise as possible. 510 */ 511 512void 513getmicrotime(struct timeval *tvp) 514{ 515 struct timecounter *tc; 516 517 tc = timecounter; 518 *tvp = tc->microtime; 519} 520 521void 522getnanotime(struct timespec *tsp) 523{ 524 struct timecounter *tc; 525 526 tc = timecounter; 527 *tsp = tc->nanotime; 528} 529 530void 531microtime(struct timeval *tv) 532{ 533 struct timecounter *tc; 534 535 tc = (struct timecounter *)timecounter; 536 tv->tv_sec = tc->offset_sec; 537 tv->tv_usec = tc->offset_micro; 538 tv->tv_usec += ((u_int64_t)tco_getdelta(tc) * tc->scale_micro) >> 32; 539 tv->tv_usec += boottime.tv_usec; 540 tv->tv_sec += boottime.tv_sec; 541 while (tv->tv_usec >= 1000000) { 542 tv->tv_usec -= 1000000; 543 tv->tv_sec++; 544 } 545} 546 547void
|
548nanotime(struct timespec *tv)
| 548nanotime(struct timespec *ts)
|
549{ 550 unsigned count; 551 u_int64_t delta; 552 struct timecounter *tc; 553 554 tc = (struct timecounter *)timecounter;
| 549{ 550 unsigned count; 551 u_int64_t delta; 552 struct timecounter *tc; 553 554 tc = (struct timecounter *)timecounter;
|
555 tv->tv_sec = tc->offset_sec;
| 555 ts->tv_sec = tc->offset_sec;
|
556 count = tco_getdelta(tc); 557 delta = tc->offset_nano; 558 delta += ((u_int64_t)count * tc->scale_nano_f); 559 delta >>= 32; 560 delta += ((u_int64_t)count * tc->scale_nano_i); 561 delta += boottime.tv_usec * 1000;
| 556 count = tco_getdelta(tc); 557 delta = tc->offset_nano; 558 delta += ((u_int64_t)count * tc->scale_nano_f); 559 delta >>= 32; 560 delta += ((u_int64_t)count * tc->scale_nano_i); 561 delta += boottime.tv_usec * 1000;
|
562 tv->tv_sec += boottime.tv_sec;
| 562 ts->tv_sec += boottime.tv_sec;
|
563 while (delta >= 1000000000) { 564 delta -= 1000000000;
| 563 while (delta >= 1000000000) { 564 delta -= 1000000000;
|
565 tv->tv_sec++;
| 565 ts->tv_sec++;
|
566 }
| 566 }
|
567 tv->tv_nsec = delta;
| 567 ts->tv_nsec = delta;
|
568} 569 570void
| 568} 569 570void
|
| 571timecounter_timespec(unsigned count, struct timespec *ts) 572{ 573 u_int64_t delta; 574 struct timecounter *tc; 575 576 tc = (struct timecounter *)timecounter; 577 ts->tv_sec = tc->offset_sec; 578 count -= tc->offset_count; 579 count &= tc->counter_mask; 580 delta = tc->offset_nano; 581 delta += ((u_int64_t)count * tc->scale_nano_f); 582 delta >>= 32; 583 delta += ((u_int64_t)count * tc->scale_nano_i); 584 delta += boottime.tv_usec * 1000; 585 ts->tv_sec += boottime.tv_sec; 586 while (delta >= 1000000000) { 587 delta -= 1000000000; 588 ts->tv_sec++; 589 } 590 ts->tv_nsec = delta; 591} 592 593void
|
571getmicrouptime(struct timeval *tvp) 572{ 573 struct timecounter *tc; 574 575 tc = timecounter; 576 tvp->tv_sec = tc->offset_sec; 577 tvp->tv_usec = tc->offset_micro; 578} 579 580void 581getnanouptime(struct timespec *tsp) 582{ 583 struct timecounter *tc; 584 585 tc = timecounter; 586 tsp->tv_sec = tc->offset_sec; 587 tsp->tv_nsec = tc->offset_nano >> 32; 588} 589 590void 591microuptime(struct timeval *tv) 592{ 593 struct timecounter *tc; 594 595 tc = (struct timecounter *)timecounter; 596 tv->tv_sec = tc->offset_sec; 597 tv->tv_usec = tc->offset_micro; 598 tv->tv_usec += ((u_int64_t)tco_getdelta(tc) * tc->scale_micro) >> 32; 599 if (tv->tv_usec >= 1000000) { 600 tv->tv_usec -= 1000000; 601 tv->tv_sec++; 602 } 603} 604 605void 606nanouptime(struct timespec *tv) 607{ 608 unsigned count; 609 u_int64_t delta; 610 struct timecounter *tc; 611 612 tc = (struct timecounter *)timecounter; 613 tv->tv_sec = tc->offset_sec; 614 count = tco_getdelta(tc); 615 delta = tc->offset_nano; 616 delta += ((u_int64_t)count * tc->scale_nano_f); 617 delta >>= 32; 618 delta += ((u_int64_t)count * tc->scale_nano_i); 619 if (delta >= 1000000000) { 620 delta -= 1000000000; 621 tv->tv_sec++; 622 } 623 tv->tv_nsec = delta; 624} 625 626static void 627tco_setscales(struct timecounter *tc) 628{ 629 u_int64_t scale; 630 631 scale = 1000000000LL << 32; 632 if (tc->adjustment > 0) 633 scale += (tc->adjustment * 1000LL) << 10; 634 else 635 scale -= (-tc->adjustment * 1000LL) << 10; 636 scale /= tc->frequency; 637 tc->scale_micro = scale / 1000; 638 tc->scale_nano_f = scale & 0xffffffff; 639 tc->scale_nano_i = scale >> 32; 640} 641 642void 643init_timecounter(struct timecounter *tc) 644{ 645 struct timespec ts0, ts1; 646 int i; 647 648 tc->adjustment = 0; 649 tco_setscales(tc); 650 tc->offset_count = tc->get_timecount(tc); 651 tc[0].tweak = &tc[0]; 652 tc[2] = tc[1] = tc[0]; 653 tc[1].other = &tc[2]; 654 tc[2].other = &tc[1]; 655 if (!timecounter || !strcmp(timecounter->name, "dummy")) 656 timecounter = &tc[2]; 657 tc = &tc[1]; 658 659 /* 660 * Figure out the cost of calling this timecounter. 661 */ 662 nanotime(&ts0); 663 for (i = 0; i < 256; i ++) 664 tc->get_timecount(tc); 665 nanotime(&ts1); 666 ts1.tv_sec -= ts0.tv_sec; 667 tc->cost = ts1.tv_sec * 1000000000 + ts1.tv_nsec - ts0.tv_nsec; 668 tc->cost >>= 8; 669 if (print_tci && strcmp(tc->name, "dummy")) 670 printf("Timecounter \"%s\" frequency %lu Hz cost %u ns\n", 671 tc->name, tc->frequency, tc->cost); 672 673 /* XXX: For now always start using the counter. */ 674 tc->offset_count = tc->get_timecount(tc); 675 nanotime(&ts1); 676 tc->offset_nano = (u_int64_t)ts1.tv_nsec << 32; 677 tc->offset_micro = ts1.tv_nsec / 1000; 678 tc->offset_sec = ts1.tv_sec; 679 timecounter = tc; 680} 681 682void 683set_timecounter(struct timespec *ts) 684{ 685 struct timespec ts2; 686 687 nanouptime(&ts2); 688 boottime.tv_sec = ts->tv_sec - ts2.tv_sec; 689 boottime.tv_usec = (ts->tv_nsec - ts2.tv_nsec) / 1000; 690 if (boottime.tv_usec < 0) { 691 boottime.tv_usec += 1000000; 692 boottime.tv_sec--; 693 } 694 /* fiddle all the little crinkly bits around the fiords... */ 695 tco_forward(); 696} 697 698 699#if 0 /* Currently unused */ 700void 701switch_timecounter(struct timecounter *newtc) 702{ 703 int s; 704 struct timecounter *tc; 705 struct timespec ts; 706 707 s = splclock(); 708 tc = timecounter; 709 if (newtc == tc || newtc == tc->other) { 710 splx(s); 711 return; 712 } 713 nanotime(&ts); 714 newtc->offset_sec = ts.tv_sec; 715 newtc->offset_nano = (u_int64_t)ts.tv_nsec << 32; 716 newtc->offset_micro = ts.tv_nsec / 1000; 717 newtc->offset_count = newtc->get_timecount(newtc); 718 timecounter = newtc; 719 splx(s); 720} 721#endif 722 723static struct timecounter * 724sync_other_counter(void) 725{ 726 struct timecounter *tc, *tco; 727 unsigned delta; 728
| 594getmicrouptime(struct timeval *tvp) 595{ 596 struct timecounter *tc; 597 598 tc = timecounter; 599 tvp->tv_sec = tc->offset_sec; 600 tvp->tv_usec = tc->offset_micro; 601} 602 603void 604getnanouptime(struct timespec *tsp) 605{ 606 struct timecounter *tc; 607 608 tc = timecounter; 609 tsp->tv_sec = tc->offset_sec; 610 tsp->tv_nsec = tc->offset_nano >> 32; 611} 612 613void 614microuptime(struct timeval *tv) 615{ 616 struct timecounter *tc; 617 618 tc = (struct timecounter *)timecounter; 619 tv->tv_sec = tc->offset_sec; 620 tv->tv_usec = tc->offset_micro; 621 tv->tv_usec += ((u_int64_t)tco_getdelta(tc) * tc->scale_micro) >> 32; 622 if (tv->tv_usec >= 1000000) { 623 tv->tv_usec -= 1000000; 624 tv->tv_sec++; 625 } 626} 627 628void 629nanouptime(struct timespec *tv) 630{ 631 unsigned count; 632 u_int64_t delta; 633 struct timecounter *tc; 634 635 tc = (struct timecounter *)timecounter; 636 tv->tv_sec = tc->offset_sec; 637 count = tco_getdelta(tc); 638 delta = tc->offset_nano; 639 delta += ((u_int64_t)count * tc->scale_nano_f); 640 delta >>= 32; 641 delta += ((u_int64_t)count * tc->scale_nano_i); 642 if (delta >= 1000000000) { 643 delta -= 1000000000; 644 tv->tv_sec++; 645 } 646 tv->tv_nsec = delta; 647} 648 649static void 650tco_setscales(struct timecounter *tc) 651{ 652 u_int64_t scale; 653 654 scale = 1000000000LL << 32; 655 if (tc->adjustment > 0) 656 scale += (tc->adjustment * 1000LL) << 10; 657 else 658 scale -= (-tc->adjustment * 1000LL) << 10; 659 scale /= tc->frequency; 660 tc->scale_micro = scale / 1000; 661 tc->scale_nano_f = scale & 0xffffffff; 662 tc->scale_nano_i = scale >> 32; 663} 664 665void 666init_timecounter(struct timecounter *tc) 667{ 668 struct timespec ts0, ts1; 669 int i; 670 671 tc->adjustment = 0; 672 tco_setscales(tc); 673 tc->offset_count = tc->get_timecount(tc); 674 tc[0].tweak = &tc[0]; 675 tc[2] = tc[1] = tc[0]; 676 tc[1].other = &tc[2]; 677 tc[2].other = &tc[1]; 678 if (!timecounter || !strcmp(timecounter->name, "dummy")) 679 timecounter = &tc[2]; 680 tc = &tc[1]; 681 682 /* 683 * Figure out the cost of calling this timecounter. 684 */ 685 nanotime(&ts0); 686 for (i = 0; i < 256; i ++) 687 tc->get_timecount(tc); 688 nanotime(&ts1); 689 ts1.tv_sec -= ts0.tv_sec; 690 tc->cost = ts1.tv_sec * 1000000000 + ts1.tv_nsec - ts0.tv_nsec; 691 tc->cost >>= 8; 692 if (print_tci && strcmp(tc->name, "dummy")) 693 printf("Timecounter \"%s\" frequency %lu Hz cost %u ns\n", 694 tc->name, tc->frequency, tc->cost); 695 696 /* XXX: For now always start using the counter. */ 697 tc->offset_count = tc->get_timecount(tc); 698 nanotime(&ts1); 699 tc->offset_nano = (u_int64_t)ts1.tv_nsec << 32; 700 tc->offset_micro = ts1.tv_nsec / 1000; 701 tc->offset_sec = ts1.tv_sec; 702 timecounter = tc; 703} 704 705void 706set_timecounter(struct timespec *ts) 707{ 708 struct timespec ts2; 709 710 nanouptime(&ts2); 711 boottime.tv_sec = ts->tv_sec - ts2.tv_sec; 712 boottime.tv_usec = (ts->tv_nsec - ts2.tv_nsec) / 1000; 713 if (boottime.tv_usec < 0) { 714 boottime.tv_usec += 1000000; 715 boottime.tv_sec--; 716 } 717 /* fiddle all the little crinkly bits around the fiords... */ 718 tco_forward(); 719} 720 721 722#if 0 /* Currently unused */ 723void 724switch_timecounter(struct timecounter *newtc) 725{ 726 int s; 727 struct timecounter *tc; 728 struct timespec ts; 729 730 s = splclock(); 731 tc = timecounter; 732 if (newtc == tc || newtc == tc->other) { 733 splx(s); 734 return; 735 } 736 nanotime(&ts); 737 newtc->offset_sec = ts.tv_sec; 738 newtc->offset_nano = (u_int64_t)ts.tv_nsec << 32; 739 newtc->offset_micro = ts.tv_nsec / 1000; 740 newtc->offset_count = newtc->get_timecount(newtc); 741 timecounter = newtc; 742 splx(s); 743} 744#endif 745 746static struct timecounter * 747sync_other_counter(void) 748{ 749 struct timecounter *tc, *tco; 750 unsigned delta; 751
|
| 752 if (timecounter->poll_pps) 753 timecounter->poll_pps(timecounter);
|
729 tc = timecounter->other; 730 tco = tc->other; 731 *tc = *timecounter; 732 tc->other = tco; 733 delta = tco_getdelta(tc); 734 tc->offset_count += delta; 735 tc->offset_count &= tc->counter_mask; 736 tc->offset_nano += (u_int64_t)delta * tc->scale_nano_f; 737 tc->offset_nano += (u_int64_t)delta * tc->scale_nano_i << 32; 738 return (tc); 739} 740 741static void 742tco_forward(void) 743{ 744 struct timecounter *tc; 745 746 tc = sync_other_counter(); 747 if (timedelta != 0) { 748 tc->offset_nano += (u_int64_t)(tickdelta * 1000) << 32; 749 timedelta -= tickdelta; 750 } 751 752 while (tc->offset_nano >= 1000000000ULL << 32) { 753 tc->offset_nano -= 1000000000ULL << 32; 754 tc->offset_sec++; 755 tc->frequency = tc->tweak->frequency; 756 tc->adjustment = tc->tweak->adjustment; 757 ntp_update_second(tc); /* XXX only needed if xntpd runs */ 758 tco_setscales(tc); 759 } 760 761 tc->offset_micro = (tc->offset_nano / 1000) >> 32; 762 763 /* Figure out the wall-clock time */ 764 tc->nanotime.tv_sec = tc->offset_sec + boottime.tv_sec; 765 tc->nanotime.tv_nsec = (tc->offset_nano >> 32) + boottime.tv_usec * 1000; 766 tc->microtime.tv_usec = tc->offset_micro + boottime.tv_usec; 767 if (tc->nanotime.tv_nsec >= 1000000000) { 768 tc->nanotime.tv_nsec -= 1000000000; 769 tc->microtime.tv_usec -= 1000000; 770 tc->nanotime.tv_sec++; 771 } 772 time_second = tc->microtime.tv_sec = tc->nanotime.tv_sec; 773 774 timecounter = tc; 775} 776 777static int 778sysctl_kern_timecounter_frequency SYSCTL_HANDLER_ARGS 779{ 780 781 return (sysctl_handle_opaque(oidp, &timecounter->tweak->frequency, 782 sizeof(timecounter->tweak->frequency), req)); 783} 784 785static int 786sysctl_kern_timecounter_adjustment SYSCTL_HANDLER_ARGS 787{ 788 789 return (sysctl_handle_opaque(oidp, &timecounter->tweak->adjustment, 790 sizeof(timecounter->tweak->adjustment), req)); 791} 792 793SYSCTL_NODE(_kern, OID_AUTO, timecounter, CTLFLAG_RW, 0, ""); 794 795SYSCTL_PROC(_kern_timecounter, OID_AUTO, frequency, CTLTYPE_INT | CTLFLAG_RW, 796 0, sizeof(u_int), sysctl_kern_timecounter_frequency, "I", ""); 797 798SYSCTL_PROC(_kern_timecounter, OID_AUTO, adjustment, CTLTYPE_INT | CTLFLAG_RW, 799 0, sizeof(int), sysctl_kern_timecounter_adjustment, "I", ""); 800 801/* 802 * Implement a dummy timecounter which we can use until we get a real one 803 * in the air. This allows the console and other early stuff to use 804 * timeservices. 805 */ 806 807static unsigned 808dummy_get_timecount(void *tc) 809{ 810 static unsigned now; 811 return (++now); 812} 813 814static struct timecounter dummy_timecounter[3] = { 815 { 816 dummy_get_timecount,
| 754 tc = timecounter->other; 755 tco = tc->other; 756 *tc = *timecounter; 757 tc->other = tco; 758 delta = tco_getdelta(tc); 759 tc->offset_count += delta; 760 tc->offset_count &= tc->counter_mask; 761 tc->offset_nano += (u_int64_t)delta * tc->scale_nano_f; 762 tc->offset_nano += (u_int64_t)delta * tc->scale_nano_i << 32; 763 return (tc); 764} 765 766static void 767tco_forward(void) 768{ 769 struct timecounter *tc; 770 771 tc = sync_other_counter(); 772 if (timedelta != 0) { 773 tc->offset_nano += (u_int64_t)(tickdelta * 1000) << 32; 774 timedelta -= tickdelta; 775 } 776 777 while (tc->offset_nano >= 1000000000ULL << 32) { 778 tc->offset_nano -= 1000000000ULL << 32; 779 tc->offset_sec++; 780 tc->frequency = tc->tweak->frequency; 781 tc->adjustment = tc->tweak->adjustment; 782 ntp_update_second(tc); /* XXX only needed if xntpd runs */ 783 tco_setscales(tc); 784 } 785 786 tc->offset_micro = (tc->offset_nano / 1000) >> 32; 787 788 /* Figure out the wall-clock time */ 789 tc->nanotime.tv_sec = tc->offset_sec + boottime.tv_sec; 790 tc->nanotime.tv_nsec = (tc->offset_nano >> 32) + boottime.tv_usec * 1000; 791 tc->microtime.tv_usec = tc->offset_micro + boottime.tv_usec; 792 if (tc->nanotime.tv_nsec >= 1000000000) { 793 tc->nanotime.tv_nsec -= 1000000000; 794 tc->microtime.tv_usec -= 1000000; 795 tc->nanotime.tv_sec++; 796 } 797 time_second = tc->microtime.tv_sec = tc->nanotime.tv_sec; 798 799 timecounter = tc; 800} 801 802static int 803sysctl_kern_timecounter_frequency SYSCTL_HANDLER_ARGS 804{ 805 806 return (sysctl_handle_opaque(oidp, &timecounter->tweak->frequency, 807 sizeof(timecounter->tweak->frequency), req)); 808} 809 810static int 811sysctl_kern_timecounter_adjustment SYSCTL_HANDLER_ARGS 812{ 813 814 return (sysctl_handle_opaque(oidp, &timecounter->tweak->adjustment, 815 sizeof(timecounter->tweak->adjustment), req)); 816} 817 818SYSCTL_NODE(_kern, OID_AUTO, timecounter, CTLFLAG_RW, 0, ""); 819 820SYSCTL_PROC(_kern_timecounter, OID_AUTO, frequency, CTLTYPE_INT | CTLFLAG_RW, 821 0, sizeof(u_int), sysctl_kern_timecounter_frequency, "I", ""); 822 823SYSCTL_PROC(_kern_timecounter, OID_AUTO, adjustment, CTLTYPE_INT | CTLFLAG_RW, 824 0, sizeof(int), sysctl_kern_timecounter_adjustment, "I", ""); 825 826/* 827 * Implement a dummy timecounter which we can use until we get a real one 828 * in the air. This allows the console and other early stuff to use 829 * timeservices. 830 */ 831 832static unsigned 833dummy_get_timecount(void *tc) 834{ 835 static unsigned now; 836 return (++now); 837} 838 839static struct timecounter dummy_timecounter[3] = { 840 { 841 dummy_get_timecount,
|
| 842 0,
|
817 ~0u, 818 1000000, 819 "dummy" 820 } 821}; 822 823static void 824initdummytimecounter(void *dummy) 825{ 826 init_timecounter(dummy_timecounter); 827} 828 829SYSINIT(dummytc, SI_SUB_CONSOLE, SI_ORDER_FIRST, initdummytimecounter, NULL)
| 843 ~0u, 844 1000000, 845 "dummy" 846 } 847}; 848 849static void 850initdummytimecounter(void *dummy) 851{ 852 init_timecounter(dummy_timecounter); 853} 854 855SYSINIT(dummytc, SI_SUB_CONSOLE, SI_ORDER_FIRST, initdummytimecounter, NULL)
|