kern_tc.c revision 34618
1static volatile int print_tci = 1; 2 3/*- 4 * Copyright (c) 1997, 1998 Poul-Henning Kamp <phk@FreeBSD.org> 5 * Copyright (c) 1982, 1986, 1991, 1993 6 * The Regents of the University of California. All rights reserved. 7 * (c) UNIX System Laboratories, Inc. 8 * All or some portions of this file are derived from material licensed 9 * to the University of California by American Telephone and Telegraph 10 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 11 * the permission of UNIX System Laboratories, Inc. 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 3. All advertising materials mentioning features or use of this software 22 * must display the following acknowledgement: 23 * This product includes software developed by the University of 24 * California, Berkeley and its contributors. 25 * 4. Neither the name of the University nor the names of its contributors 26 * may be used to endorse or promote products derived from this software 27 * without specific prior written permission. 28 * 29 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 30 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 32 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 39 * SUCH DAMAGE. 40 * 41 * @(#)kern_clock.c 8.5 (Berkeley) 1/21/94 42 * $Id: kern_clock.c,v 1.57 1998/02/20 16:35:49 phk Exp $ 43 */ 44 45#include <sys/param.h> 46#include <sys/systm.h> 47#include <sys/dkstat.h> 48#include <sys/callout.h> 49#include <sys/kernel.h> 50#include <sys/proc.h> 51#include <sys/resourcevar.h> 52#include <sys/signalvar.h> 53#include <sys/timex.h> 54#include <vm/vm.h> 55#include <sys/lock.h> 56#include <vm/pmap.h> 57#include <vm/vm_map.h> 58#include <sys/sysctl.h> 59 60#include <machine/cpu.h> 61#include <machine/limits.h> 62 63#ifdef GPROF 64#include <sys/gmon.h> 65#endif 66 67#if defined(SMP) && defined(BETTER_CLOCK) 68#include <machine/smp.h> 69#endif 70 71static void initclocks __P((void *dummy)); 72SYSINIT(clocks, SI_SUB_CLOCKS, SI_ORDER_FIRST, initclocks, NULL) 73 74static void tco_forward __P((void)); 75static void tco_setscales __P((struct timecounter *tc)); 76 77/* Some of these don't belong here, but it's easiest to concentrate them. */ 78#if defined(SMP) && defined(BETTER_CLOCK) 79long cp_time[CPUSTATES]; 80#else 81static long cp_time[CPUSTATES]; 82#endif 83long dk_seek[DK_NDRIVE]; 84static long dk_time[DK_NDRIVE]; /* time busy (in statclock ticks) */ 85long dk_wds[DK_NDRIVE]; 86long dk_wpms[DK_NDRIVE]; 87long dk_xfer[DK_NDRIVE]; 88 89int dk_busy; 90int dk_ndrive = 0; 91char dk_names[DK_NDRIVE][DK_NAMELEN]; 92 93long tk_cancc; 94long tk_nin; 95long tk_nout; 96long tk_rawcc; 97 98struct timecounter *timecounter; 99 100/* 101 * Clock handling routines. 102 * 103 * This code is written to operate with two timers that run independently of 104 * each other. 105 * 106 * The main timer, running hz times per second, is used to trigger interval 107 * timers, timeouts and rescheduling as needed. 108 * 109 * The second timer handles kernel and user profiling, 110 * and does resource use estimation. If the second timer is programmable, 111 * it is randomized to avoid aliasing between the two clocks. For example, 112 * the randomization prevents an adversary from always giving up the cpu 113 * just before its quantum expires. Otherwise, it would never accumulate 114 * cpu ticks. The mean frequency of the second timer is stathz. 115 * 116 * If no second timer exists, stathz will be zero; in this case we drive 117 * profiling and statistics off the main clock. This WILL NOT be accurate; 118 * do not do it unless absolutely necessary. 119 * 120 * The statistics clock may (or may not) be run at a higher rate while 121 * profiling. This profile clock runs at profhz. We require that profhz 122 * be an integral multiple of stathz. 123 * 124 * If the statistics clock is running fast, it must be divided by the ratio 125 * profhz/stathz for statistics. (For profiling, every tick counts.) 126 * 127 * Time-of-day is maintained using a "timecounter", which may or may 128 * not be related to the hardware generating the above mentioned 129 * interrupts. 130 */ 131 132int stathz; 133int profhz; 134static int profprocs; 135int ticks; 136static int psdiv, pscnt; /* prof => stat divider */ 137int psratio; /* ratio: prof / stat */ 138 139struct timeval time; 140volatile struct timeval mono_time; 141 142/* 143 * Initialize clock frequencies and start both clocks running. 144 */ 145/* ARGSUSED*/ 146static void 147initclocks(dummy) 148 void *dummy; 149{ 150 register int i; 151 152 /* 153 * Set divisors to 1 (normal case) and let the machine-specific 154 * code do its bit. 155 */ 156 psdiv = pscnt = 1; 157 cpu_initclocks(); 158 159 /* 160 * Compute profhz/stathz, and fix profhz if needed. 161 */ 162 i = stathz ? stathz : hz; 163 if (profhz == 0) 164 profhz = i; 165 psratio = profhz / i; 166} 167 168/* 169 * The real-time timer, interrupting hz times per second. 170 */ 171void 172hardclock(frame) 173 register struct clockframe *frame; 174{ 175 register struct proc *p; 176 177 p = curproc; 178 if (p) { 179 register struct pstats *pstats; 180 181 /* 182 * Run current process's virtual and profile time, as needed. 183 */ 184 pstats = p->p_stats; 185 if (CLKF_USERMODE(frame) && 186 timerisset(&pstats->p_timer[ITIMER_VIRTUAL].it_value) && 187 itimerdecr(&pstats->p_timer[ITIMER_VIRTUAL], tick) == 0) 188 psignal(p, SIGVTALRM); 189 if (timerisset(&pstats->p_timer[ITIMER_PROF].it_value) && 190 itimerdecr(&pstats->p_timer[ITIMER_PROF], tick) == 0) 191 psignal(p, SIGPROF); 192 } 193 194#if defined(SMP) && defined(BETTER_CLOCK) 195 forward_hardclock(pscnt); 196#endif 197 198 /* 199 * If no separate statistics clock is available, run it from here. 200 */ 201 if (stathz == 0) 202 statclock(frame); 203 204 tco_forward(); 205 ticks++; 206 207 /* 208 * Process callouts at a very low cpu priority, so we don't keep the 209 * relatively high clock interrupt priority any longer than necessary. 210 */ 211 if (TAILQ_FIRST(&callwheel[ticks & callwheelmask]) != NULL) { 212 if (CLKF_BASEPRI(frame)) { 213 /* 214 * Save the overhead of a software interrupt; 215 * it will happen as soon as we return, so do it now. 216 */ 217 (void)splsoftclock(); 218 softclock(); 219 } else 220 setsoftclock(); 221 } else if (softticks + 1 == ticks) 222 ++softticks; 223} 224 225void 226gettime(struct timeval *tvp) 227{ 228 int s; 229 230 s = splclock(); 231 /* XXX should use microtime() iff tv_usec is used. */ 232 *tvp = time; 233 splx(s); 234} 235 236/* 237 * Compute number of hz until specified time. Used to 238 * compute third argument to timeout() from an absolute time. 239 * XXX this interface is often inconvenient. We often just need the 240 * number of ticks in a timeval, but to use hzto() for that we have 241 * to add `time' to the timeval and do everything at splclock(). 242 */ 243int 244hzto(tv) 245 struct timeval *tv; 246{ 247 register unsigned long ticks; 248 register long sec, usec; 249 int s; 250 251 /* 252 * If the number of usecs in the whole seconds part of the time 253 * difference fits in a long, then the total number of usecs will 254 * fit in an unsigned long. Compute the total and convert it to 255 * ticks, rounding up and adding 1 to allow for the current tick 256 * to expire. Rounding also depends on unsigned long arithmetic 257 * to avoid overflow. 258 * 259 * Otherwise, if the number of ticks in the whole seconds part of 260 * the time difference fits in a long, then convert the parts to 261 * ticks separately and add, using similar rounding methods and 262 * overflow avoidance. This method would work in the previous 263 * case but it is slightly slower and assumes that hz is integral. 264 * 265 * Otherwise, round the time difference down to the maximum 266 * representable value. 267 * 268 * If ints have 32 bits, then the maximum value for any timeout in 269 * 10ms ticks is 248 days. 270 */ 271 s = splclock(); 272 sec = tv->tv_sec - time.tv_sec; 273 usec = tv->tv_usec - time.tv_usec; 274 splx(s); 275 if (usec < 0) { 276 sec--; 277 usec += 1000000; 278 } 279 if (sec < 0) { 280#ifdef DIAGNOSTIC 281 if (usec > 0) { 282 sec++; 283 usec -= 1000000; 284 } 285 printf("hzto: negative time difference %ld sec %ld usec\n", 286 sec, usec); 287#endif 288 ticks = 1; 289 } else if (sec <= LONG_MAX / 1000000) 290 ticks = (sec * 1000000 + (unsigned long)usec + (tick - 1)) 291 / tick + 1; 292 else if (sec <= LONG_MAX / hz) 293 ticks = sec * hz 294 + ((unsigned long)usec + (tick - 1)) / tick + 1; 295 else 296 ticks = LONG_MAX; 297 if (ticks > INT_MAX) 298 ticks = INT_MAX; 299 return (ticks); 300} 301 302/* 303 * Start profiling on a process. 304 * 305 * Kernel profiling passes proc0 which never exits and hence 306 * keeps the profile clock running constantly. 307 */ 308void 309startprofclock(p) 310 register struct proc *p; 311{ 312 int s; 313 314 if ((p->p_flag & P_PROFIL) == 0) { 315 p->p_flag |= P_PROFIL; 316 if (++profprocs == 1 && stathz != 0) { 317 s = splstatclock(); 318 psdiv = pscnt = psratio; 319 setstatclockrate(profhz); 320 splx(s); 321 } 322 } 323} 324 325/* 326 * Stop profiling on a process. 327 */ 328void 329stopprofclock(p) 330 register struct proc *p; 331{ 332 int s; 333 334 if (p->p_flag & P_PROFIL) { 335 p->p_flag &= ~P_PROFIL; 336 if (--profprocs == 0 && stathz != 0) { 337 s = splstatclock(); 338 psdiv = pscnt = 1; 339 setstatclockrate(stathz); 340 splx(s); 341 } 342 } 343} 344 345/* 346 * Statistics clock. Grab profile sample, and if divider reaches 0, 347 * do process and kernel statistics. 348 */ 349void 350statclock(frame) 351 register struct clockframe *frame; 352{ 353#ifdef GPROF 354 register struct gmonparam *g; 355#endif 356 register struct proc *p; 357 register int i; 358 struct pstats *pstats; 359 long rss; 360 struct rusage *ru; 361 struct vmspace *vm; 362 363 if (CLKF_USERMODE(frame)) { 364 p = curproc; 365 if (p->p_flag & P_PROFIL) 366 addupc_intr(p, CLKF_PC(frame), 1); 367#if defined(SMP) && defined(BETTER_CLOCK) 368 if (stathz != 0) 369 forward_statclock(pscnt); 370#endif 371 if (--pscnt > 0) 372 return; 373 /* 374 * Came from user mode; CPU was in user state. 375 * If this process is being profiled record the tick. 376 */ 377 p->p_uticks++; 378 if (p->p_nice > NZERO) 379 cp_time[CP_NICE]++; 380 else 381 cp_time[CP_USER]++; 382 } else { 383#ifdef GPROF 384 /* 385 * Kernel statistics are just like addupc_intr, only easier. 386 */ 387 g = &_gmonparam; 388 if (g->state == GMON_PROF_ON) { 389 i = CLKF_PC(frame) - g->lowpc; 390 if (i < g->textsize) { 391 i /= HISTFRACTION * sizeof(*g->kcount); 392 g->kcount[i]++; 393 } 394 } 395#endif 396#if defined(SMP) && defined(BETTER_CLOCK) 397 if (stathz != 0) 398 forward_statclock(pscnt); 399#endif 400 if (--pscnt > 0) 401 return; 402 /* 403 * Came from kernel mode, so we were: 404 * - handling an interrupt, 405 * - doing syscall or trap work on behalf of the current 406 * user process, or 407 * - spinning in the idle loop. 408 * Whichever it is, charge the time as appropriate. 409 * Note that we charge interrupts to the current process, 410 * regardless of whether they are ``for'' that process, 411 * so that we know how much of its real time was spent 412 * in ``non-process'' (i.e., interrupt) work. 413 */ 414 p = curproc; 415 if (CLKF_INTR(frame)) { 416 if (p != NULL) 417 p->p_iticks++; 418 cp_time[CP_INTR]++; 419 } else if (p != NULL) { 420 p->p_sticks++; 421 cp_time[CP_SYS]++; 422 } else 423 cp_time[CP_IDLE]++; 424 } 425 pscnt = psdiv; 426 427 /* 428 * We maintain statistics shown by user-level statistics 429 * programs: the amount of time in each cpu state, and 430 * the amount of time each of DK_NDRIVE ``drives'' is busy. 431 * 432 * XXX should either run linked list of drives, or (better) 433 * grab timestamps in the start & done code. 434 */ 435 for (i = 0; i < DK_NDRIVE; i++) 436 if (dk_busy & (1 << i)) 437 dk_time[i]++; 438 439 /* 440 * We adjust the priority of the current process. The priority of 441 * a process gets worse as it accumulates CPU time. The cpu usage 442 * estimator (p_estcpu) is increased here. The formula for computing 443 * priorities (in kern_synch.c) will compute a different value each 444 * time p_estcpu increases by 4. The cpu usage estimator ramps up 445 * quite quickly when the process is running (linearly), and decays 446 * away exponentially, at a rate which is proportionally slower when 447 * the system is busy. The basic principal is that the system will 448 * 90% forget that the process used a lot of CPU time in 5 * loadav 449 * seconds. This causes the system to favor processes which haven't 450 * run much recently, and to round-robin among other processes. 451 */ 452 if (p != NULL) { 453 p->p_cpticks++; 454 if (++p->p_estcpu == 0) 455 p->p_estcpu--; 456 if ((p->p_estcpu & 3) == 0) { 457 resetpriority(p); 458 if (p->p_priority >= PUSER) 459 p->p_priority = p->p_usrpri; 460 } 461 462 /* Update resource usage integrals and maximums. */ 463 if ((pstats = p->p_stats) != NULL && 464 (ru = &pstats->p_ru) != NULL && 465 (vm = p->p_vmspace) != NULL) { 466 ru->ru_ixrss += vm->vm_tsize * PAGE_SIZE / 1024; 467 ru->ru_idrss += vm->vm_dsize * PAGE_SIZE / 1024; 468 ru->ru_isrss += vm->vm_ssize * PAGE_SIZE / 1024; 469 rss = vm->vm_pmap.pm_stats.resident_count * 470 PAGE_SIZE / 1024; 471 if (ru->ru_maxrss < rss) 472 ru->ru_maxrss = rss; 473 } 474 } 475} 476 477/* 478 * Return information about system clocks. 479 */ 480static int 481sysctl_kern_clockrate SYSCTL_HANDLER_ARGS 482{ 483 struct clockinfo clkinfo; 484 /* 485 * Construct clockinfo structure. 486 */ 487 clkinfo.hz = hz; 488 clkinfo.tick = tick; 489 clkinfo.tickadj = tickadj; 490 clkinfo.profhz = profhz; 491 clkinfo.stathz = stathz ? stathz : hz; 492 return (sysctl_handle_opaque(oidp, &clkinfo, sizeof clkinfo, req)); 493} 494 495SYSCTL_PROC(_kern, KERN_CLOCKRATE, clockrate, CTLTYPE_STRUCT|CTLFLAG_RD, 496 0, 0, sysctl_kern_clockrate, "S,clockinfo",""); 497 498void 499microtime(struct timeval *tv) 500{ 501 struct timecounter *tc; 502 503 tc = (struct timecounter *)timecounter; 504 tv->tv_sec = tc->offset_sec; 505 tv->tv_usec = tc->offset_micro; 506 tv->tv_usec += 507 ((u_int64_t)tc->get_timedelta(tc) * tc->scale_micro) >> 32; 508 if (tv->tv_usec >= 1000000) { 509 tv->tv_usec -= 1000000; 510 tv->tv_sec++; 511 } 512} 513 514void 515nanotime(struct timespec *tv) 516{ 517 u_int count; 518 u_int64_t delta; 519 struct timecounter *tc; 520 521 tc = (struct timecounter *)timecounter; 522 tv->tv_sec = tc->offset_sec; 523 count = tc->get_timedelta(tc); 524 delta = tc->offset_nano; 525 delta += ((u_int64_t)count * tc->scale_nano_f); 526 delta >>= 32; 527 delta += ((u_int64_t)count * tc->scale_nano_i); 528 if (delta >= 1000000000) { 529 delta -= 1000000000; 530 tv->tv_sec++; 531 } 532 tv->tv_nsec = delta; 533} 534 535static void 536tco_setscales(struct timecounter *tc) 537{ 538 u_int64_t scale; 539 540 scale = 1000000000LL << 32; 541 if (tc->adjustment > 0) 542 scale += (tc->adjustment * 1000LL) << 10; 543 else 544 scale -= (-tc->adjustment * 1000LL) << 10; 545 scale /= tc->frequency; 546 tc->scale_micro = scale / 1000; 547 tc->scale_nano_f = scale & 0xffffffff; 548 tc->scale_nano_i = scale >> 32; 549} 550 551static u_int 552delta_timecounter(struct timecounter *tc) 553{ 554 555 return((tc->get_timecount() - tc->offset_count) & tc->counter_mask); 556} 557 558void 559init_timecounter(struct timecounter *tc) 560{ 561 struct timespec ts0, ts1; 562 int i; 563 564 if (!tc->get_timedelta) 565 tc->get_timedelta = delta_timecounter; 566 tc->adjustment = 0; 567 tco_setscales(tc); 568 tc->offset_count = tc->get_timecount(); 569 tc[0].tweak = &tc[0]; 570 tc[2] = tc[1] = tc[0]; 571 tc[1].other = &tc[2]; 572 tc[2].other = &tc[1]; 573 if (!timecounter) 574 timecounter = &tc[2]; 575 tc = &tc[1]; 576 577 /* 578 * Figure out the cost of calling this timecounter. 579 * XXX: The 1:15 ratio is a guess at reality. 580 */ 581 nanotime(&ts0); 582 for (i = 0; i < 16; i ++) 583 tc->get_timecount(); 584 for (i = 0; i < 240; i ++) 585 tc->get_timedelta(tc); 586 nanotime(&ts1); 587 ts1.tv_sec -= ts0.tv_sec; 588 tc->cost = ts1.tv_sec * 1000000000 + ts1.tv_nsec - ts0.tv_nsec; 589 tc->cost >>= 8; 590 if (print_tci) 591 printf("Timecounter \"%s\" frequency %lu Hz cost %u ns\n", 592 tc->name, tc->frequency, tc->cost); 593 594 /* XXX: For now always start using the counter. */ 595 tc->offset_count = tc->get_timecount(); 596 nanotime(&ts1); 597 tc->offset_nano = (u_int64_t)ts1.tv_nsec << 32; 598 tc->offset_micro = ts1.tv_nsec / 1000; 599 tc->offset_sec = ts1.tv_sec; 600 timecounter = tc; 601} 602 603void 604set_timecounter(struct timespec *ts) 605{ 606 struct timecounter *tc, *tco; 607 int s; 608 609 /* 610 * XXX we must be called at splclock() to preven *ts becoming 611 * invalid, so there is no point in spls here. 612 */ 613 s = splclock(); 614 tc = timecounter->other; 615 tco = tc->other; 616 *tc = *timecounter; 617 tc->other = tco; 618 tc->offset_sec = ts->tv_sec; 619 tc->offset_nano = (u_int64_t)ts->tv_nsec << 32; 620 tc->offset_micro = ts->tv_nsec / 1000; 621 tc->offset_count = tc->get_timecount(); 622 time.tv_sec = tc->offset_sec; 623 time.tv_usec = tc->offset_micro; 624 timecounter = tc; 625 splx(s); 626} 627 628void 629switch_timecounter(struct timecounter *newtc) 630{ 631 int s; 632 struct timecounter *tc; 633 struct timespec ts; 634 635 s = splclock(); 636 tc = timecounter; 637 if (newtc == tc || newtc == tc->other) { 638 splx(s); 639 return; 640 } 641 nanotime(&ts); 642 newtc->offset_sec = ts.tv_sec; 643 newtc->offset_nano = (u_int64_t)ts.tv_nsec << 32; 644 newtc->offset_micro = ts.tv_nsec / 1000; 645 newtc->offset_count = newtc->get_timecount(); 646 timecounter = newtc; 647 splx(s); 648} 649 650static struct timecounter * 651sync_other_counter(void) 652{ 653 struct timecounter *tc, *tco; 654 u_int delta; 655 656 tc = timecounter->other; 657 tco = tc->other; 658 *tc = *timecounter; 659 tc->other = tco; 660 delta = tc->get_timedelta(tc); 661 tc->offset_count += delta; 662 tc->offset_count &= tc->counter_mask; 663 tc->offset_nano += (u_int64_t)delta * tc->scale_nano_f; 664 tc->offset_nano += (u_int64_t)delta * tc->scale_nano_i << 32; 665 return (tc); 666} 667 668static void 669tco_forward(void) 670{ 671 struct timecounter *tc; 672 673 tc = sync_other_counter(); 674 if (timedelta != 0) { 675 tc->offset_nano += (u_int64_t)(tickdelta * 1000) << 32; 676 mono_time.tv_usec += tickdelta; 677 timedelta -= tickdelta; 678 } 679 mono_time.tv_usec += tick; 680 if (mono_time.tv_usec >= 1000000) { 681 mono_time.tv_usec -= 1000000; 682 mono_time.tv_sec++; 683 } 684 685 if (tc->offset_nano >= 1000000000ULL << 32) { 686 tc->offset_nano -= 1000000000ULL << 32; 687 tc->offset_sec++; 688 tc->frequency = tc->tweak->frequency; 689 tc->adjustment = tc->tweak->adjustment; 690 ntp_update_second(tc); /* XXX only needed if xntpd runs */ 691 tco_setscales(tc); 692 } 693 694 tc->offset_micro = (tc->offset_nano / 1000) >> 32; 695 696 time.tv_usec = tc->offset_micro; 697 time.tv_sec = tc->offset_sec; 698 timecounter = tc; 699} 700 701static int 702sysctl_kern_timecounter_frequency SYSCTL_HANDLER_ARGS 703{ 704 705 return (sysctl_handle_opaque(oidp, &timecounter->tweak->frequency, 706 sizeof(timecounter->tweak->frequency), req)); 707} 708 709static int 710sysctl_kern_timecounter_adjustment SYSCTL_HANDLER_ARGS 711{ 712 713 return (sysctl_handle_opaque(oidp, &timecounter->tweak->adjustment, 714 sizeof(timecounter->tweak->adjustment), req)); 715} 716 717SYSCTL_NODE(_kern, OID_AUTO, timecounter, CTLFLAG_RW, 0, ""); 718 719SYSCTL_PROC(_kern_timecounter, OID_AUTO, frequency, CTLTYPE_INT | CTLFLAG_RW, 720 0, sizeof(u_int), sysctl_kern_timecounter_frequency, "I", ""); 721 722SYSCTL_PROC(_kern_timecounter, OID_AUTO, adjustment, CTLTYPE_INT | CTLFLAG_RW, 723 0, sizeof(int), sysctl_kern_timecounter_adjustment, "I", ""); 724