Deleted Added
full compact
kern_tc.c (34901) kern_tc.c (34961)
1static volatile int print_tci = 1;
2
3/*-
4 * Copyright (c) 1997, 1998 Poul-Henning Kamp <phk@FreeBSD.org>
5 * Copyright (c) 1982, 1986, 1991, 1993
6 * The Regents of the University of California. All rights reserved.
7 * (c) UNIX System Laboratories, Inc.
8 * All or some portions of this file are derived from material licensed
9 * to the University of California by American Telephone and Telegraph
10 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
11 * the permission of UNIX System Laboratories, Inc.
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 * 3. All advertising materials mentioning features or use of this software
22 * must display the following acknowledgement:
23 * This product includes software developed by the University of
24 * California, Berkeley and its contributors.
25 * 4. Neither the name of the University nor the names of its contributors
26 * may be used to endorse or promote products derived from this software
27 * without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
30 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39 * SUCH DAMAGE.
40 *
41 * @(#)kern_clock.c 8.5 (Berkeley) 1/21/94
1static volatile int print_tci = 1;
2
3/*-
4 * Copyright (c) 1997, 1998 Poul-Henning Kamp <phk@FreeBSD.org>
5 * Copyright (c) 1982, 1986, 1991, 1993
6 * The Regents of the University of California. All rights reserved.
7 * (c) UNIX System Laboratories, Inc.
8 * All or some portions of this file are derived from material licensed
9 * to the University of California by American Telephone and Telegraph
10 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
11 * the permission of UNIX System Laboratories, Inc.
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 * 3. All advertising materials mentioning features or use of this software
22 * must display the following acknowledgement:
23 * This product includes software developed by the University of
24 * California, Berkeley and its contributors.
25 * 4. Neither the name of the University nor the names of its contributors
26 * may be used to endorse or promote products derived from this software
27 * without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
30 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39 * SUCH DAMAGE.
40 *
41 * @(#)kern_clock.c 8.5 (Berkeley) 1/21/94
42 * $Id: kern_clock.c,v 1.58 1998/03/16 10:19:12 phk Exp $
42 * $Id: kern_clock.c,v 1.59 1998/03/26 20:51:31 phk Exp $
43 */
44
45#include <sys/param.h>
46#include <sys/systm.h>
47#include <sys/dkstat.h>
48#include <sys/callout.h>
49#include <sys/kernel.h>
50#include <sys/proc.h>
51#include <sys/resourcevar.h>
52#include <sys/signalvar.h>
53#include <sys/timex.h>
54#include <vm/vm.h>
55#include <sys/lock.h>
56#include <vm/pmap.h>
57#include <vm/vm_map.h>
58#include <sys/sysctl.h>
59
60#include <machine/cpu.h>
61#include <machine/limits.h>
62
63#ifdef GPROF
64#include <sys/gmon.h>
65#endif
66
67#if defined(SMP) && defined(BETTER_CLOCK)
68#include <machine/smp.h>
69#endif
70
71static void initclocks __P((void *dummy));
72SYSINIT(clocks, SI_SUB_CLOCKS, SI_ORDER_FIRST, initclocks, NULL)
73
74static void tco_forward __P((void));
75static void tco_setscales __P((struct timecounter *tc));
76
77/* Some of these don't belong here, but it's easiest to concentrate them. */
78#if defined(SMP) && defined(BETTER_CLOCK)
79long cp_time[CPUSTATES];
80#else
81static long cp_time[CPUSTATES];
82#endif
83long dk_seek[DK_NDRIVE];
84static long dk_time[DK_NDRIVE]; /* time busy (in statclock ticks) */
85long dk_wds[DK_NDRIVE];
86long dk_wpms[DK_NDRIVE];
87long dk_xfer[DK_NDRIVE];
88
89int dk_busy;
90int dk_ndrive = 0;
91char dk_names[DK_NDRIVE][DK_NAMELEN];
92
93long tk_cancc;
94long tk_nin;
95long tk_nout;
96long tk_rawcc;
97
98struct timecounter *timecounter;
99
43 */
44
45#include <sys/param.h>
46#include <sys/systm.h>
47#include <sys/dkstat.h>
48#include <sys/callout.h>
49#include <sys/kernel.h>
50#include <sys/proc.h>
51#include <sys/resourcevar.h>
52#include <sys/signalvar.h>
53#include <sys/timex.h>
54#include <vm/vm.h>
55#include <sys/lock.h>
56#include <vm/pmap.h>
57#include <vm/vm_map.h>
58#include <sys/sysctl.h>
59
60#include <machine/cpu.h>
61#include <machine/limits.h>
62
63#ifdef GPROF
64#include <sys/gmon.h>
65#endif
66
67#if defined(SMP) && defined(BETTER_CLOCK)
68#include <machine/smp.h>
69#endif
70
71static void initclocks __P((void *dummy));
72SYSINIT(clocks, SI_SUB_CLOCKS, SI_ORDER_FIRST, initclocks, NULL)
73
74static void tco_forward __P((void));
75static void tco_setscales __P((struct timecounter *tc));
76
77/* Some of these don't belong here, but it's easiest to concentrate them. */
78#if defined(SMP) && defined(BETTER_CLOCK)
79long cp_time[CPUSTATES];
80#else
81static long cp_time[CPUSTATES];
82#endif
83long dk_seek[DK_NDRIVE];
84static long dk_time[DK_NDRIVE]; /* time busy (in statclock ticks) */
85long dk_wds[DK_NDRIVE];
86long dk_wpms[DK_NDRIVE];
87long dk_xfer[DK_NDRIVE];
88
89int dk_busy;
90int dk_ndrive = 0;
91char dk_names[DK_NDRIVE][DK_NAMELEN];
92
93long tk_cancc;
94long tk_nin;
95long tk_nout;
96long tk_rawcc;
97
98struct timecounter *timecounter;
99
100time_t time_second;
101
100/*
101 * Clock handling routines.
102 *
103 * This code is written to operate with two timers that run independently of
104 * each other.
105 *
106 * The main timer, running hz times per second, is used to trigger interval
107 * timers, timeouts and rescheduling as needed.
108 *
109 * The second timer handles kernel and user profiling,
110 * and does resource use estimation. If the second timer is programmable,
111 * it is randomized to avoid aliasing between the two clocks. For example,
112 * the randomization prevents an adversary from always giving up the cpu
113 * just before its quantum expires. Otherwise, it would never accumulate
114 * cpu ticks. The mean frequency of the second timer is stathz.
115 *
116 * If no second timer exists, stathz will be zero; in this case we drive
117 * profiling and statistics off the main clock. This WILL NOT be accurate;
118 * do not do it unless absolutely necessary.
119 *
120 * The statistics clock may (or may not) be run at a higher rate while
121 * profiling. This profile clock runs at profhz. We require that profhz
122 * be an integral multiple of stathz.
123 *
124 * If the statistics clock is running fast, it must be divided by the ratio
125 * profhz/stathz for statistics. (For profiling, every tick counts.)
126 *
127 * Time-of-day is maintained using a "timecounter", which may or may
128 * not be related to the hardware generating the above mentioned
129 * interrupts.
130 */
131
132int stathz;
133int profhz;
134static int profprocs;
135int ticks;
136static int psdiv, pscnt; /* prof => stat divider */
137int psratio; /* ratio: prof / stat */
138
102/*
103 * Clock handling routines.
104 *
105 * This code is written to operate with two timers that run independently of
106 * each other.
107 *
108 * The main timer, running hz times per second, is used to trigger interval
109 * timers, timeouts and rescheduling as needed.
110 *
111 * The second timer handles kernel and user profiling,
112 * and does resource use estimation. If the second timer is programmable,
113 * it is randomized to avoid aliasing between the two clocks. For example,
114 * the randomization prevents an adversary from always giving up the cpu
115 * just before its quantum expires. Otherwise, it would never accumulate
116 * cpu ticks. The mean frequency of the second timer is stathz.
117 *
118 * If no second timer exists, stathz will be zero; in this case we drive
119 * profiling and statistics off the main clock. This WILL NOT be accurate;
120 * do not do it unless absolutely necessary.
121 *
122 * The statistics clock may (or may not) be run at a higher rate while
123 * profiling. This profile clock runs at profhz. We require that profhz
124 * be an integral multiple of stathz.
125 *
126 * If the statistics clock is running fast, it must be divided by the ratio
127 * profhz/stathz for statistics. (For profiling, every tick counts.)
128 *
129 * Time-of-day is maintained using a "timecounter", which may or may
130 * not be related to the hardware generating the above mentioned
131 * interrupts.
132 */
133
134int stathz;
135int profhz;
136static int profprocs;
137int ticks;
138static int psdiv, pscnt; /* prof => stat divider */
139int psratio; /* ratio: prof / stat */
140
139struct timeval time;
140volatile struct timeval mono_time;
141
142/*
143 * Initialize clock frequencies and start both clocks running.
144 */
145/* ARGSUSED*/
146static void
147initclocks(dummy)
148 void *dummy;
149{
150 register int i;
151
152 /*
153 * Set divisors to 1 (normal case) and let the machine-specific
154 * code do its bit.
155 */
156 psdiv = pscnt = 1;
157 cpu_initclocks();
158
159 /*
160 * Compute profhz/stathz, and fix profhz if needed.
161 */
162 i = stathz ? stathz : hz;
163 if (profhz == 0)
164 profhz = i;
165 psratio = profhz / i;
166}
167
168/*
169 * The real-time timer, interrupting hz times per second.
170 */
171void
172hardclock(frame)
173 register struct clockframe *frame;
174{
175 register struct proc *p;
176
177 p = curproc;
178 if (p) {
179 register struct pstats *pstats;
180
181 /*
182 * Run current process's virtual and profile time, as needed.
183 */
184 pstats = p->p_stats;
185 if (CLKF_USERMODE(frame) &&
186 timerisset(&pstats->p_timer[ITIMER_VIRTUAL].it_value) &&
187 itimerdecr(&pstats->p_timer[ITIMER_VIRTUAL], tick) == 0)
188 psignal(p, SIGVTALRM);
189 if (timerisset(&pstats->p_timer[ITIMER_PROF].it_value) &&
190 itimerdecr(&pstats->p_timer[ITIMER_PROF], tick) == 0)
191 psignal(p, SIGPROF);
192 }
193
194#if defined(SMP) && defined(BETTER_CLOCK)
195 forward_hardclock(pscnt);
196#endif
197
198 /*
199 * If no separate statistics clock is available, run it from here.
200 */
201 if (stathz == 0)
202 statclock(frame);
203
204 tco_forward();
205 ticks++;
206
207 /*
208 * Process callouts at a very low cpu priority, so we don't keep the
209 * relatively high clock interrupt priority any longer than necessary.
210 */
211 if (TAILQ_FIRST(&callwheel[ticks & callwheelmask]) != NULL) {
212 if (CLKF_BASEPRI(frame)) {
213 /*
214 * Save the overhead of a software interrupt;
215 * it will happen as soon as we return, so do it now.
216 */
217 (void)splsoftclock();
218 softclock();
219 } else
220 setsoftclock();
221 } else if (softticks + 1 == ticks)
222 ++softticks;
223}
224
225/*
141volatile struct timeval mono_time;
142
143/*
144 * Initialize clock frequencies and start both clocks running.
145 */
146/* ARGSUSED*/
147static void
148initclocks(dummy)
149 void *dummy;
150{
151 register int i;
152
153 /*
154 * Set divisors to 1 (normal case) and let the machine-specific
155 * code do its bit.
156 */
157 psdiv = pscnt = 1;
158 cpu_initclocks();
159
160 /*
161 * Compute profhz/stathz, and fix profhz if needed.
162 */
163 i = stathz ? stathz : hz;
164 if (profhz == 0)
165 profhz = i;
166 psratio = profhz / i;
167}
168
169/*
170 * The real-time timer, interrupting hz times per second.
171 */
172void
173hardclock(frame)
174 register struct clockframe *frame;
175{
176 register struct proc *p;
177
178 p = curproc;
179 if (p) {
180 register struct pstats *pstats;
181
182 /*
183 * Run current process's virtual and profile time, as needed.
184 */
185 pstats = p->p_stats;
186 if (CLKF_USERMODE(frame) &&
187 timerisset(&pstats->p_timer[ITIMER_VIRTUAL].it_value) &&
188 itimerdecr(&pstats->p_timer[ITIMER_VIRTUAL], tick) == 0)
189 psignal(p, SIGVTALRM);
190 if (timerisset(&pstats->p_timer[ITIMER_PROF].it_value) &&
191 itimerdecr(&pstats->p_timer[ITIMER_PROF], tick) == 0)
192 psignal(p, SIGPROF);
193 }
194
195#if defined(SMP) && defined(BETTER_CLOCK)
196 forward_hardclock(pscnt);
197#endif
198
199 /*
200 * If no separate statistics clock is available, run it from here.
201 */
202 if (stathz == 0)
203 statclock(frame);
204
205 tco_forward();
206 ticks++;
207
208 /*
209 * Process callouts at a very low cpu priority, so we don't keep the
210 * relatively high clock interrupt priority any longer than necessary.
211 */
212 if (TAILQ_FIRST(&callwheel[ticks & callwheelmask]) != NULL) {
213 if (CLKF_BASEPRI(frame)) {
214 /*
215 * Save the overhead of a software interrupt;
216 * it will happen as soon as we return, so do it now.
217 */
218 (void)splsoftclock();
219 softclock();
220 } else
221 setsoftclock();
222 } else if (softticks + 1 == ticks)
223 ++softticks;
224}
225
226/*
226 * Compute number of hz until specified time. Used to
227 * compute third argument to timeout() from an absolute time.
228 * XXX this interface is often inconvenient. We often just need the
229 * number of ticks in a timeval, but to use hzto() for that we have
230 * to add `time' to the timeval and do everything at splclock().
227 * Compute number of ticks in the specified amount of time.
231 */
232int
228 */
229int
233hzto(tv)
230tvtohz(tv)
234 struct timeval *tv;
235{
236 register unsigned long ticks;
237 register long sec, usec;
238 int s;
239
240 /*
241 * If the number of usecs in the whole seconds part of the time
242 * difference fits in a long, then the total number of usecs will
243 * fit in an unsigned long. Compute the total and convert it to
244 * ticks, rounding up and adding 1 to allow for the current tick
245 * to expire. Rounding also depends on unsigned long arithmetic
246 * to avoid overflow.
247 *
248 * Otherwise, if the number of ticks in the whole seconds part of
249 * the time difference fits in a long, then convert the parts to
250 * ticks separately and add, using similar rounding methods and
251 * overflow avoidance. This method would work in the previous
252 * case but it is slightly slower and assumes that hz is integral.
253 *
254 * Otherwise, round the time difference down to the maximum
255 * representable value.
256 *
257 * If ints have 32 bits, then the maximum value for any timeout in
258 * 10ms ticks is 248 days.
259 */
231 struct timeval *tv;
232{
233 register unsigned long ticks;
234 register long sec, usec;
235 int s;
236
237 /*
238 * If the number of usecs in the whole seconds part of the time
239 * difference fits in a long, then the total number of usecs will
240 * fit in an unsigned long. Compute the total and convert it to
241 * ticks, rounding up and adding 1 to allow for the current tick
242 * to expire. Rounding also depends on unsigned long arithmetic
243 * to avoid overflow.
244 *
245 * Otherwise, if the number of ticks in the whole seconds part of
246 * the time difference fits in a long, then convert the parts to
247 * ticks separately and add, using similar rounding methods and
248 * overflow avoidance. This method would work in the previous
249 * case but it is slightly slower and assumes that hz is integral.
250 *
251 * Otherwise, round the time difference down to the maximum
252 * representable value.
253 *
254 * If ints have 32 bits, then the maximum value for any timeout in
255 * 10ms ticks is 248 days.
256 */
260 s = splclock();
261 sec = tv->tv_sec - time.tv_sec;
262 usec = tv->tv_usec - time.tv_usec;
263 splx(s);
257 sec = tv->tv_sec;
258 usec = tv->tv_usec;
264 if (usec < 0) {
265 sec--;
266 usec += 1000000;
267 }
268 if (sec < 0) {
269#ifdef DIAGNOSTIC
270 if (usec > 0) {
271 sec++;
272 usec -= 1000000;
273 }
259 if (usec < 0) {
260 sec--;
261 usec += 1000000;
262 }
263 if (sec < 0) {
264#ifdef DIAGNOSTIC
265 if (usec > 0) {
266 sec++;
267 usec -= 1000000;
268 }
274 printf("hzto: negative time difference %ld sec %ld usec\n",
269 printf("tvotohz: negative time difference %ld sec %ld usec\n",
275 sec, usec);
276#endif
277 ticks = 1;
278 } else if (sec <= LONG_MAX / 1000000)
279 ticks = (sec * 1000000 + (unsigned long)usec + (tick - 1))
280 / tick + 1;
281 else if (sec <= LONG_MAX / hz)
282 ticks = sec * hz
283 + ((unsigned long)usec + (tick - 1)) / tick + 1;
284 else
285 ticks = LONG_MAX;
286 if (ticks > INT_MAX)
287 ticks = INT_MAX;
288 return (ticks);
289}
290
270 sec, usec);
271#endif
272 ticks = 1;
273 } else if (sec <= LONG_MAX / 1000000)
274 ticks = (sec * 1000000 + (unsigned long)usec + (tick - 1))
275 / tick + 1;
276 else if (sec <= LONG_MAX / hz)
277 ticks = sec * hz
278 + ((unsigned long)usec + (tick - 1)) / tick + 1;
279 else
280 ticks = LONG_MAX;
281 if (ticks > INT_MAX)
282 ticks = INT_MAX;
283 return (ticks);
284}
285
286
291/*
287/*
288 * Compute number of hz until specified time. Used to
289 * compute third argument to timeout() from an absolute time.
290 */
291int
292hzto(tv)
293 struct timeval *tv;
294{
295 register long sec, usec;
296 struct timeval t2;
297
298 getmicrotime(&t2);
299 t2.tv_sec = tv->tv_sec - t2.tv_sec;
300 t2.tv_usec = tv->tv_usec - t2.tv_usec;
301 return (tvtohz(&t2));
302}
303
304/*
292 * Start profiling on a process.
293 *
294 * Kernel profiling passes proc0 which never exits and hence
295 * keeps the profile clock running constantly.
296 */
297void
298startprofclock(p)
299 register struct proc *p;
300{
301 int s;
302
303 if ((p->p_flag & P_PROFIL) == 0) {
304 p->p_flag |= P_PROFIL;
305 if (++profprocs == 1 && stathz != 0) {
306 s = splstatclock();
307 psdiv = pscnt = psratio;
308 setstatclockrate(profhz);
309 splx(s);
310 }
311 }
312}
313
314/*
315 * Stop profiling on a process.
316 */
317void
318stopprofclock(p)
319 register struct proc *p;
320{
321 int s;
322
323 if (p->p_flag & P_PROFIL) {
324 p->p_flag &= ~P_PROFIL;
325 if (--profprocs == 0 && stathz != 0) {
326 s = splstatclock();
327 psdiv = pscnt = 1;
328 setstatclockrate(stathz);
329 splx(s);
330 }
331 }
332}
333
334/*
335 * Statistics clock. Grab profile sample, and if divider reaches 0,
336 * do process and kernel statistics.
337 */
338void
339statclock(frame)
340 register struct clockframe *frame;
341{
342#ifdef GPROF
343 register struct gmonparam *g;
344#endif
345 register struct proc *p;
346 register int i;
347 struct pstats *pstats;
348 long rss;
349 struct rusage *ru;
350 struct vmspace *vm;
351
352 if (CLKF_USERMODE(frame)) {
353 p = curproc;
354 if (p->p_flag & P_PROFIL)
355 addupc_intr(p, CLKF_PC(frame), 1);
356#if defined(SMP) && defined(BETTER_CLOCK)
357 if (stathz != 0)
358 forward_statclock(pscnt);
359#endif
360 if (--pscnt > 0)
361 return;
362 /*
363 * Came from user mode; CPU was in user state.
364 * If this process is being profiled record the tick.
365 */
366 p->p_uticks++;
367 if (p->p_nice > NZERO)
368 cp_time[CP_NICE]++;
369 else
370 cp_time[CP_USER]++;
371 } else {
372#ifdef GPROF
373 /*
374 * Kernel statistics are just like addupc_intr, only easier.
375 */
376 g = &_gmonparam;
377 if (g->state == GMON_PROF_ON) {
378 i = CLKF_PC(frame) - g->lowpc;
379 if (i < g->textsize) {
380 i /= HISTFRACTION * sizeof(*g->kcount);
381 g->kcount[i]++;
382 }
383 }
384#endif
385#if defined(SMP) && defined(BETTER_CLOCK)
386 if (stathz != 0)
387 forward_statclock(pscnt);
388#endif
389 if (--pscnt > 0)
390 return;
391 /*
392 * Came from kernel mode, so we were:
393 * - handling an interrupt,
394 * - doing syscall or trap work on behalf of the current
395 * user process, or
396 * - spinning in the idle loop.
397 * Whichever it is, charge the time as appropriate.
398 * Note that we charge interrupts to the current process,
399 * regardless of whether they are ``for'' that process,
400 * so that we know how much of its real time was spent
401 * in ``non-process'' (i.e., interrupt) work.
402 */
403 p = curproc;
404 if (CLKF_INTR(frame)) {
405 if (p != NULL)
406 p->p_iticks++;
407 cp_time[CP_INTR]++;
408 } else if (p != NULL) {
409 p->p_sticks++;
410 cp_time[CP_SYS]++;
411 } else
412 cp_time[CP_IDLE]++;
413 }
414 pscnt = psdiv;
415
416 /*
417 * We maintain statistics shown by user-level statistics
418 * programs: the amount of time in each cpu state, and
419 * the amount of time each of DK_NDRIVE ``drives'' is busy.
420 *
421 * XXX should either run linked list of drives, or (better)
422 * grab timestamps in the start & done code.
423 */
424 for (i = 0; i < DK_NDRIVE; i++)
425 if (dk_busy & (1 << i))
426 dk_time[i]++;
427
428 /*
429 * We adjust the priority of the current process. The priority of
430 * a process gets worse as it accumulates CPU time. The cpu usage
431 * estimator (p_estcpu) is increased here. The formula for computing
432 * priorities (in kern_synch.c) will compute a different value each
433 * time p_estcpu increases by 4. The cpu usage estimator ramps up
434 * quite quickly when the process is running (linearly), and decays
435 * away exponentially, at a rate which is proportionally slower when
436 * the system is busy. The basic principal is that the system will
437 * 90% forget that the process used a lot of CPU time in 5 * loadav
438 * seconds. This causes the system to favor processes which haven't
439 * run much recently, and to round-robin among other processes.
440 */
441 if (p != NULL) {
442 p->p_cpticks++;
443 if (++p->p_estcpu == 0)
444 p->p_estcpu--;
445 if ((p->p_estcpu & 3) == 0) {
446 resetpriority(p);
447 if (p->p_priority >= PUSER)
448 p->p_priority = p->p_usrpri;
449 }
450
451 /* Update resource usage integrals and maximums. */
452 if ((pstats = p->p_stats) != NULL &&
453 (ru = &pstats->p_ru) != NULL &&
454 (vm = p->p_vmspace) != NULL) {
455 ru->ru_ixrss += vm->vm_tsize * PAGE_SIZE / 1024;
456 ru->ru_idrss += vm->vm_dsize * PAGE_SIZE / 1024;
457 ru->ru_isrss += vm->vm_ssize * PAGE_SIZE / 1024;
458 rss = vm->vm_pmap.pm_stats.resident_count *
459 PAGE_SIZE / 1024;
460 if (ru->ru_maxrss < rss)
461 ru->ru_maxrss = rss;
462 }
463 }
464}
465
466/*
467 * Return information about system clocks.
468 */
469static int
470sysctl_kern_clockrate SYSCTL_HANDLER_ARGS
471{
472 struct clockinfo clkinfo;
473 /*
474 * Construct clockinfo structure.
475 */
476 clkinfo.hz = hz;
477 clkinfo.tick = tick;
478 clkinfo.tickadj = tickadj;
479 clkinfo.profhz = profhz;
480 clkinfo.stathz = stathz ? stathz : hz;
481 return (sysctl_handle_opaque(oidp, &clkinfo, sizeof clkinfo, req));
482}
483
484SYSCTL_PROC(_kern, KERN_CLOCKRATE, clockrate, CTLTYPE_STRUCT|CTLFLAG_RD,
485 0, 0, sysctl_kern_clockrate, "S,clockinfo","");
486
487
488/*
489 * We have four functions for looking at the clock, two for microseconds
490 * and two for nanoseconds. For each there is fast but less precise
491 * version "get{nano|micro}time" which will return a time which is up
492 * to 1/HZ previous to the call, whereas the raw version "{nano|micro}time"
493 * will return a timestamp which is as precise as possible.
494 */
495
496void
497getmicrotime(struct timeval *tvp)
498{
499 struct timecounter *tc;
500
501 tc = timecounter;
502 tvp->tv_sec = tc->offset_sec;
503 tvp->tv_usec = tc->offset_micro;
504}
505
506void
507getnanotime(struct timespec *tsp)
508{
509 struct timecounter *tc;
510
511 tc = timecounter;
512 tsp->tv_sec = tc->offset_sec;
513 tsp->tv_nsec = tc->offset_nano;
514}
515
516void
517microtime(struct timeval *tv)
518{
519 struct timecounter *tc;
520
521 tc = (struct timecounter *)timecounter;
522 tv->tv_sec = tc->offset_sec;
523 tv->tv_usec = tc->offset_micro;
524 tv->tv_usec +=
525 ((u_int64_t)tc->get_timedelta(tc) * tc->scale_micro) >> 32;
526 if (tv->tv_usec >= 1000000) {
527 tv->tv_usec -= 1000000;
528 tv->tv_sec++;
529 }
530}
531
532void
533nanotime(struct timespec *tv)
534{
535 u_int count;
536 u_int64_t delta;
537 struct timecounter *tc;
538
539 tc = (struct timecounter *)timecounter;
540 tv->tv_sec = tc->offset_sec;
541 count = tc->get_timedelta(tc);
542 delta = tc->offset_nano;
543 delta += ((u_int64_t)count * tc->scale_nano_f);
544 delta >>= 32;
545 delta += ((u_int64_t)count * tc->scale_nano_i);
546 if (delta >= 1000000000) {
547 delta -= 1000000000;
548 tv->tv_sec++;
549 }
550 tv->tv_nsec = delta;
551}
552
553static void
554tco_setscales(struct timecounter *tc)
555{
556 u_int64_t scale;
557
558 scale = 1000000000LL << 32;
559 if (tc->adjustment > 0)
560 scale += (tc->adjustment * 1000LL) << 10;
561 else
562 scale -= (-tc->adjustment * 1000LL) << 10;
563 scale /= tc->frequency;
564 tc->scale_micro = scale / 1000;
565 tc->scale_nano_f = scale & 0xffffffff;
566 tc->scale_nano_i = scale >> 32;
567}
568
569static u_int
570delta_timecounter(struct timecounter *tc)
571{
572
573 return((tc->get_timecount() - tc->offset_count) & tc->counter_mask);
574}
575
576void
577init_timecounter(struct timecounter *tc)
578{
579 struct timespec ts0, ts1;
580 int i;
581
582 if (!tc->get_timedelta)
583 tc->get_timedelta = delta_timecounter;
584 tc->adjustment = 0;
585 tco_setscales(tc);
586 tc->offset_count = tc->get_timecount();
587 tc[0].tweak = &tc[0];
588 tc[2] = tc[1] = tc[0];
589 tc[1].other = &tc[2];
590 tc[2].other = &tc[1];
591 if (!timecounter)
592 timecounter = &tc[2];
593 tc = &tc[1];
594
595 /*
596 * Figure out the cost of calling this timecounter.
597 * XXX: The 1:15 ratio is a guess at reality.
598 */
599 nanotime(&ts0);
600 for (i = 0; i < 16; i ++)
601 tc->get_timecount();
602 for (i = 0; i < 240; i ++)
603 tc->get_timedelta(tc);
604 nanotime(&ts1);
605 ts1.tv_sec -= ts0.tv_sec;
606 tc->cost = ts1.tv_sec * 1000000000 + ts1.tv_nsec - ts0.tv_nsec;
607 tc->cost >>= 8;
608 if (print_tci)
609 printf("Timecounter \"%s\" frequency %lu Hz cost %u ns\n",
610 tc->name, tc->frequency, tc->cost);
611
612 /* XXX: For now always start using the counter. */
613 tc->offset_count = tc->get_timecount();
614 nanotime(&ts1);
615 tc->offset_nano = (u_int64_t)ts1.tv_nsec << 32;
616 tc->offset_micro = ts1.tv_nsec / 1000;
617 tc->offset_sec = ts1.tv_sec;
618 timecounter = tc;
619}
620
621void
622set_timecounter(struct timespec *ts)
623{
624 struct timecounter *tc, *tco;
625 int s;
626
627 /*
628 * XXX we must be called at splclock() to preven *ts becoming
629 * invalid, so there is no point in spls here.
630 */
631 s = splclock();
632 tc = timecounter->other;
633 tco = tc->other;
634 *tc = *timecounter;
635 tc->other = tco;
636 tc->offset_sec = ts->tv_sec;
637 tc->offset_nano = (u_int64_t)ts->tv_nsec << 32;
638 tc->offset_micro = ts->tv_nsec / 1000;
639 tc->offset_count = tc->get_timecount();
305 * Start profiling on a process.
306 *
307 * Kernel profiling passes proc0 which never exits and hence
308 * keeps the profile clock running constantly.
309 */
310void
311startprofclock(p)
312 register struct proc *p;
313{
314 int s;
315
316 if ((p->p_flag & P_PROFIL) == 0) {
317 p->p_flag |= P_PROFIL;
318 if (++profprocs == 1 && stathz != 0) {
319 s = splstatclock();
320 psdiv = pscnt = psratio;
321 setstatclockrate(profhz);
322 splx(s);
323 }
324 }
325}
326
327/*
328 * Stop profiling on a process.
329 */
330void
331stopprofclock(p)
332 register struct proc *p;
333{
334 int s;
335
336 if (p->p_flag & P_PROFIL) {
337 p->p_flag &= ~P_PROFIL;
338 if (--profprocs == 0 && stathz != 0) {
339 s = splstatclock();
340 psdiv = pscnt = 1;
341 setstatclockrate(stathz);
342 splx(s);
343 }
344 }
345}
346
347/*
348 * Statistics clock. Grab profile sample, and if divider reaches 0,
349 * do process and kernel statistics.
350 */
351void
352statclock(frame)
353 register struct clockframe *frame;
354{
355#ifdef GPROF
356 register struct gmonparam *g;
357#endif
358 register struct proc *p;
359 register int i;
360 struct pstats *pstats;
361 long rss;
362 struct rusage *ru;
363 struct vmspace *vm;
364
365 if (CLKF_USERMODE(frame)) {
366 p = curproc;
367 if (p->p_flag & P_PROFIL)
368 addupc_intr(p, CLKF_PC(frame), 1);
369#if defined(SMP) && defined(BETTER_CLOCK)
370 if (stathz != 0)
371 forward_statclock(pscnt);
372#endif
373 if (--pscnt > 0)
374 return;
375 /*
376 * Came from user mode; CPU was in user state.
377 * If this process is being profiled record the tick.
378 */
379 p->p_uticks++;
380 if (p->p_nice > NZERO)
381 cp_time[CP_NICE]++;
382 else
383 cp_time[CP_USER]++;
384 } else {
385#ifdef GPROF
386 /*
387 * Kernel statistics are just like addupc_intr, only easier.
388 */
389 g = &_gmonparam;
390 if (g->state == GMON_PROF_ON) {
391 i = CLKF_PC(frame) - g->lowpc;
392 if (i < g->textsize) {
393 i /= HISTFRACTION * sizeof(*g->kcount);
394 g->kcount[i]++;
395 }
396 }
397#endif
398#if defined(SMP) && defined(BETTER_CLOCK)
399 if (stathz != 0)
400 forward_statclock(pscnt);
401#endif
402 if (--pscnt > 0)
403 return;
404 /*
405 * Came from kernel mode, so we were:
406 * - handling an interrupt,
407 * - doing syscall or trap work on behalf of the current
408 * user process, or
409 * - spinning in the idle loop.
410 * Whichever it is, charge the time as appropriate.
411 * Note that we charge interrupts to the current process,
412 * regardless of whether they are ``for'' that process,
413 * so that we know how much of its real time was spent
414 * in ``non-process'' (i.e., interrupt) work.
415 */
416 p = curproc;
417 if (CLKF_INTR(frame)) {
418 if (p != NULL)
419 p->p_iticks++;
420 cp_time[CP_INTR]++;
421 } else if (p != NULL) {
422 p->p_sticks++;
423 cp_time[CP_SYS]++;
424 } else
425 cp_time[CP_IDLE]++;
426 }
427 pscnt = psdiv;
428
429 /*
430 * We maintain statistics shown by user-level statistics
431 * programs: the amount of time in each cpu state, and
432 * the amount of time each of DK_NDRIVE ``drives'' is busy.
433 *
434 * XXX should either run linked list of drives, or (better)
435 * grab timestamps in the start & done code.
436 */
437 for (i = 0; i < DK_NDRIVE; i++)
438 if (dk_busy & (1 << i))
439 dk_time[i]++;
440
441 /*
442 * We adjust the priority of the current process. The priority of
443 * a process gets worse as it accumulates CPU time. The cpu usage
444 * estimator (p_estcpu) is increased here. The formula for computing
445 * priorities (in kern_synch.c) will compute a different value each
446 * time p_estcpu increases by 4. The cpu usage estimator ramps up
447 * quite quickly when the process is running (linearly), and decays
448 * away exponentially, at a rate which is proportionally slower when
449 * the system is busy. The basic principal is that the system will
450 * 90% forget that the process used a lot of CPU time in 5 * loadav
451 * seconds. This causes the system to favor processes which haven't
452 * run much recently, and to round-robin among other processes.
453 */
454 if (p != NULL) {
455 p->p_cpticks++;
456 if (++p->p_estcpu == 0)
457 p->p_estcpu--;
458 if ((p->p_estcpu & 3) == 0) {
459 resetpriority(p);
460 if (p->p_priority >= PUSER)
461 p->p_priority = p->p_usrpri;
462 }
463
464 /* Update resource usage integrals and maximums. */
465 if ((pstats = p->p_stats) != NULL &&
466 (ru = &pstats->p_ru) != NULL &&
467 (vm = p->p_vmspace) != NULL) {
468 ru->ru_ixrss += vm->vm_tsize * PAGE_SIZE / 1024;
469 ru->ru_idrss += vm->vm_dsize * PAGE_SIZE / 1024;
470 ru->ru_isrss += vm->vm_ssize * PAGE_SIZE / 1024;
471 rss = vm->vm_pmap.pm_stats.resident_count *
472 PAGE_SIZE / 1024;
473 if (ru->ru_maxrss < rss)
474 ru->ru_maxrss = rss;
475 }
476 }
477}
478
479/*
480 * Return information about system clocks.
481 */
482static int
483sysctl_kern_clockrate SYSCTL_HANDLER_ARGS
484{
485 struct clockinfo clkinfo;
486 /*
487 * Construct clockinfo structure.
488 */
489 clkinfo.hz = hz;
490 clkinfo.tick = tick;
491 clkinfo.tickadj = tickadj;
492 clkinfo.profhz = profhz;
493 clkinfo.stathz = stathz ? stathz : hz;
494 return (sysctl_handle_opaque(oidp, &clkinfo, sizeof clkinfo, req));
495}
496
497SYSCTL_PROC(_kern, KERN_CLOCKRATE, clockrate, CTLTYPE_STRUCT|CTLFLAG_RD,
498 0, 0, sysctl_kern_clockrate, "S,clockinfo","");
499
500
501/*
502 * We have four functions for looking at the clock, two for microseconds
503 * and two for nanoseconds. For each there is fast but less precise
504 * version "get{nano|micro}time" which will return a time which is up
505 * to 1/HZ previous to the call, whereas the raw version "{nano|micro}time"
506 * will return a timestamp which is as precise as possible.
507 */
508
509void
510getmicrotime(struct timeval *tvp)
511{
512 struct timecounter *tc;
513
514 tc = timecounter;
515 tvp->tv_sec = tc->offset_sec;
516 tvp->tv_usec = tc->offset_micro;
517}
518
519void
520getnanotime(struct timespec *tsp)
521{
522 struct timecounter *tc;
523
524 tc = timecounter;
525 tsp->tv_sec = tc->offset_sec;
526 tsp->tv_nsec = tc->offset_nano;
527}
528
529void
530microtime(struct timeval *tv)
531{
532 struct timecounter *tc;
533
534 tc = (struct timecounter *)timecounter;
535 tv->tv_sec = tc->offset_sec;
536 tv->tv_usec = tc->offset_micro;
537 tv->tv_usec +=
538 ((u_int64_t)tc->get_timedelta(tc) * tc->scale_micro) >> 32;
539 if (tv->tv_usec >= 1000000) {
540 tv->tv_usec -= 1000000;
541 tv->tv_sec++;
542 }
543}
544
545void
546nanotime(struct timespec *tv)
547{
548 u_int count;
549 u_int64_t delta;
550 struct timecounter *tc;
551
552 tc = (struct timecounter *)timecounter;
553 tv->tv_sec = tc->offset_sec;
554 count = tc->get_timedelta(tc);
555 delta = tc->offset_nano;
556 delta += ((u_int64_t)count * tc->scale_nano_f);
557 delta >>= 32;
558 delta += ((u_int64_t)count * tc->scale_nano_i);
559 if (delta >= 1000000000) {
560 delta -= 1000000000;
561 tv->tv_sec++;
562 }
563 tv->tv_nsec = delta;
564}
565
566static void
567tco_setscales(struct timecounter *tc)
568{
569 u_int64_t scale;
570
571 scale = 1000000000LL << 32;
572 if (tc->adjustment > 0)
573 scale += (tc->adjustment * 1000LL) << 10;
574 else
575 scale -= (-tc->adjustment * 1000LL) << 10;
576 scale /= tc->frequency;
577 tc->scale_micro = scale / 1000;
578 tc->scale_nano_f = scale & 0xffffffff;
579 tc->scale_nano_i = scale >> 32;
580}
581
582static u_int
583delta_timecounter(struct timecounter *tc)
584{
585
586 return((tc->get_timecount() - tc->offset_count) & tc->counter_mask);
587}
588
589void
590init_timecounter(struct timecounter *tc)
591{
592 struct timespec ts0, ts1;
593 int i;
594
595 if (!tc->get_timedelta)
596 tc->get_timedelta = delta_timecounter;
597 tc->adjustment = 0;
598 tco_setscales(tc);
599 tc->offset_count = tc->get_timecount();
600 tc[0].tweak = &tc[0];
601 tc[2] = tc[1] = tc[0];
602 tc[1].other = &tc[2];
603 tc[2].other = &tc[1];
604 if (!timecounter)
605 timecounter = &tc[2];
606 tc = &tc[1];
607
608 /*
609 * Figure out the cost of calling this timecounter.
610 * XXX: The 1:15 ratio is a guess at reality.
611 */
612 nanotime(&ts0);
613 for (i = 0; i < 16; i ++)
614 tc->get_timecount();
615 for (i = 0; i < 240; i ++)
616 tc->get_timedelta(tc);
617 nanotime(&ts1);
618 ts1.tv_sec -= ts0.tv_sec;
619 tc->cost = ts1.tv_sec * 1000000000 + ts1.tv_nsec - ts0.tv_nsec;
620 tc->cost >>= 8;
621 if (print_tci)
622 printf("Timecounter \"%s\" frequency %lu Hz cost %u ns\n",
623 tc->name, tc->frequency, tc->cost);
624
625 /* XXX: For now always start using the counter. */
626 tc->offset_count = tc->get_timecount();
627 nanotime(&ts1);
628 tc->offset_nano = (u_int64_t)ts1.tv_nsec << 32;
629 tc->offset_micro = ts1.tv_nsec / 1000;
630 tc->offset_sec = ts1.tv_sec;
631 timecounter = tc;
632}
633
634void
635set_timecounter(struct timespec *ts)
636{
637 struct timecounter *tc, *tco;
638 int s;
639
640 /*
641 * XXX we must be called at splclock() to preven *ts becoming
642 * invalid, so there is no point in spls here.
643 */
644 s = splclock();
645 tc = timecounter->other;
646 tco = tc->other;
647 *tc = *timecounter;
648 tc->other = tco;
649 tc->offset_sec = ts->tv_sec;
650 tc->offset_nano = (u_int64_t)ts->tv_nsec << 32;
651 tc->offset_micro = ts->tv_nsec / 1000;
652 tc->offset_count = tc->get_timecount();
640 time.tv_sec = tc->offset_sec;
641 time.tv_usec = tc->offset_micro;
653 time_second = tc->offset_sec;
642 timecounter = tc;
643 splx(s);
644}
645
646void
647switch_timecounter(struct timecounter *newtc)
648{
649 int s;
650 struct timecounter *tc;
651 struct timespec ts;
652
653 s = splclock();
654 tc = timecounter;
655 if (newtc == tc || newtc == tc->other) {
656 splx(s);
657 return;
658 }
659 nanotime(&ts);
660 newtc->offset_sec = ts.tv_sec;
661 newtc->offset_nano = (u_int64_t)ts.tv_nsec << 32;
662 newtc->offset_micro = ts.tv_nsec / 1000;
663 newtc->offset_count = newtc->get_timecount();
664 timecounter = newtc;
665 splx(s);
666}
667
668static struct timecounter *
669sync_other_counter(void)
670{
671 struct timecounter *tc, *tco;
672 u_int delta;
673
674 tc = timecounter->other;
675 tco = tc->other;
676 *tc = *timecounter;
677 tc->other = tco;
678 delta = tc->get_timedelta(tc);
679 tc->offset_count += delta;
680 tc->offset_count &= tc->counter_mask;
681 tc->offset_nano += (u_int64_t)delta * tc->scale_nano_f;
682 tc->offset_nano += (u_int64_t)delta * tc->scale_nano_i << 32;
683 return (tc);
684}
685
686static void
687tco_forward(void)
688{
689 struct timecounter *tc;
690
691 tc = sync_other_counter();
692 if (timedelta != 0) {
693 tc->offset_nano += (u_int64_t)(tickdelta * 1000) << 32;
694 mono_time.tv_usec += tickdelta;
695 timedelta -= tickdelta;
696 }
697 mono_time.tv_usec += tick;
698 if (mono_time.tv_usec >= 1000000) {
699 mono_time.tv_usec -= 1000000;
700 mono_time.tv_sec++;
701 }
702
703 if (tc->offset_nano >= 1000000000ULL << 32) {
704 tc->offset_nano -= 1000000000ULL << 32;
705 tc->offset_sec++;
706 tc->frequency = tc->tweak->frequency;
707 tc->adjustment = tc->tweak->adjustment;
708 ntp_update_second(tc); /* XXX only needed if xntpd runs */
709 tco_setscales(tc);
710 }
711
712 tc->offset_micro = (tc->offset_nano / 1000) >> 32;
713
654 timecounter = tc;
655 splx(s);
656}
657
658void
659switch_timecounter(struct timecounter *newtc)
660{
661 int s;
662 struct timecounter *tc;
663 struct timespec ts;
664
665 s = splclock();
666 tc = timecounter;
667 if (newtc == tc || newtc == tc->other) {
668 splx(s);
669 return;
670 }
671 nanotime(&ts);
672 newtc->offset_sec = ts.tv_sec;
673 newtc->offset_nano = (u_int64_t)ts.tv_nsec << 32;
674 newtc->offset_micro = ts.tv_nsec / 1000;
675 newtc->offset_count = newtc->get_timecount();
676 timecounter = newtc;
677 splx(s);
678}
679
680static struct timecounter *
681sync_other_counter(void)
682{
683 struct timecounter *tc, *tco;
684 u_int delta;
685
686 tc = timecounter->other;
687 tco = tc->other;
688 *tc = *timecounter;
689 tc->other = tco;
690 delta = tc->get_timedelta(tc);
691 tc->offset_count += delta;
692 tc->offset_count &= tc->counter_mask;
693 tc->offset_nano += (u_int64_t)delta * tc->scale_nano_f;
694 tc->offset_nano += (u_int64_t)delta * tc->scale_nano_i << 32;
695 return (tc);
696}
697
698static void
699tco_forward(void)
700{
701 struct timecounter *tc;
702
703 tc = sync_other_counter();
704 if (timedelta != 0) {
705 tc->offset_nano += (u_int64_t)(tickdelta * 1000) << 32;
706 mono_time.tv_usec += tickdelta;
707 timedelta -= tickdelta;
708 }
709 mono_time.tv_usec += tick;
710 if (mono_time.tv_usec >= 1000000) {
711 mono_time.tv_usec -= 1000000;
712 mono_time.tv_sec++;
713 }
714
715 if (tc->offset_nano >= 1000000000ULL << 32) {
716 tc->offset_nano -= 1000000000ULL << 32;
717 tc->offset_sec++;
718 tc->frequency = tc->tweak->frequency;
719 tc->adjustment = tc->tweak->adjustment;
720 ntp_update_second(tc); /* XXX only needed if xntpd runs */
721 tco_setscales(tc);
722 }
723
724 tc->offset_micro = (tc->offset_nano / 1000) >> 32;
725
714 time.tv_usec = tc->offset_micro;
715 time.tv_sec = tc->offset_sec;
726 time_second = tc->offset_sec;
716 timecounter = tc;
717}
718
719static int
720sysctl_kern_timecounter_frequency SYSCTL_HANDLER_ARGS
721{
722
723 return (sysctl_handle_opaque(oidp, &timecounter->tweak->frequency,
724 sizeof(timecounter->tweak->frequency), req));
725}
726
727static int
728sysctl_kern_timecounter_adjustment SYSCTL_HANDLER_ARGS
729{
730
731 return (sysctl_handle_opaque(oidp, &timecounter->tweak->adjustment,
732 sizeof(timecounter->tweak->adjustment), req));
733}
734
735SYSCTL_NODE(_kern, OID_AUTO, timecounter, CTLFLAG_RW, 0, "");
736
737SYSCTL_PROC(_kern_timecounter, OID_AUTO, frequency, CTLTYPE_INT | CTLFLAG_RW,
738 0, sizeof(u_int), sysctl_kern_timecounter_frequency, "I", "");
739
740SYSCTL_PROC(_kern_timecounter, OID_AUTO, adjustment, CTLTYPE_INT | CTLFLAG_RW,
741 0, sizeof(int), sysctl_kern_timecounter_adjustment, "I", "");
727 timecounter = tc;
728}
729
730static int
731sysctl_kern_timecounter_frequency SYSCTL_HANDLER_ARGS
732{
733
734 return (sysctl_handle_opaque(oidp, &timecounter->tweak->frequency,
735 sizeof(timecounter->tweak->frequency), req));
736}
737
738static int
739sysctl_kern_timecounter_adjustment SYSCTL_HANDLER_ARGS
740{
741
742 return (sysctl_handle_opaque(oidp, &timecounter->tweak->adjustment,
743 sizeof(timecounter->tweak->adjustment), req));
744}
745
746SYSCTL_NODE(_kern, OID_AUTO, timecounter, CTLFLAG_RW, 0, "");
747
748SYSCTL_PROC(_kern_timecounter, OID_AUTO, frequency, CTLTYPE_INT | CTLFLAG_RW,
749 0, sizeof(u_int), sysctl_kern_timecounter_frequency, "I", "");
750
751SYSCTL_PROC(_kern_timecounter, OID_AUTO, adjustment, CTLTYPE_INT | CTLFLAG_RW,
752 0, sizeof(int), sysctl_kern_timecounter_adjustment, "I", "");