Deleted Added
sdiff udiff text old ( 247454 ) new ( 247463 )
full compact
1/*-
2 * Copyright (c) 2010-2012 Alexander Motin <mav@FreeBSD.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer,
10 * without modification, immediately at the beginning of the file.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: head/sys/kern/kern_clocksource.c 247454 2013-02-28 10:46:54Z davide $");
29
30/*
31 * Common routines to manage event timers hardware.
32 */
33
34#include "opt_device_polling.h"
35#include "opt_kdtrace.h"
36
37#include <sys/param.h>
38#include <sys/systm.h>
39#include <sys/bus.h>
40#include <sys/lock.h>
41#include <sys/kdb.h>
42#include <sys/ktr.h>
43#include <sys/mutex.h>
44#include <sys/proc.h>
45#include <sys/kernel.h>
46#include <sys/sched.h>
47#include <sys/smp.h>
48#include <sys/sysctl.h>
49#include <sys/timeet.h>
50#include <sys/timetc.h>
51
52#include <machine/atomic.h>
53#include <machine/clock.h>
54#include <machine/cpu.h>
55#include <machine/smp.h>
56
57#ifdef KDTRACE_HOOKS
58#include <sys/dtrace_bsd.h>
59cyclic_clock_func_t cyclic_clock_func = NULL;
60#endif
61
62int cpu_can_deep_sleep = 0; /* C3 state is available. */
63int cpu_disable_deep_sleep = 0; /* Timer dies in C3. */
64
65static void setuptimer(void);
66static void loadtimer(struct bintime *now, int first);
67static int doconfigtimer(void);
68static void configtimer(int start);
69static int round_freq(struct eventtimer *et, int freq);
70
71static void getnextcpuevent(struct bintime *event, int idle);
72static void getnextevent(struct bintime *event);
73static int handleevents(struct bintime *now, int fake);
74#ifdef SMP
75static void cpu_new_callout(int cpu, int ticks);
76#endif
77
78static struct mtx et_hw_mtx;
79
80#define ET_HW_LOCK(state) \
81 { \
82 if (timer->et_flags & ET_FLAGS_PERCPU) \
83 mtx_lock_spin(&(state)->et_hw_mtx); \
84 else \
85 mtx_lock_spin(&et_hw_mtx); \
86 }
87
88#define ET_HW_UNLOCK(state) \
89 { \
90 if (timer->et_flags & ET_FLAGS_PERCPU) \
91 mtx_unlock_spin(&(state)->et_hw_mtx); \
92 else \
93 mtx_unlock_spin(&et_hw_mtx); \
94 }
95
96static struct eventtimer *timer = NULL;
97static struct bintime timerperiod; /* Timer period for periodic mode. */
98static struct bintime hardperiod; /* hardclock() events period. */
99static struct bintime statperiod; /* statclock() events period. */
100static struct bintime profperiod; /* profclock() events period. */
101static struct bintime nexttick; /* Next global timer tick time. */
102static struct bintime nexthard; /* Next global hardlock() event. */
103static u_int busy = 0; /* Reconfiguration is in progress. */
104static int profiling = 0; /* Profiling events enabled. */
105
106static char timername[32]; /* Wanted timer. */
107TUNABLE_STR("kern.eventtimer.timer", timername, sizeof(timername));
108
109static int singlemul = 0; /* Multiplier for periodic mode. */
110TUNABLE_INT("kern.eventtimer.singlemul", &singlemul);
111SYSCTL_INT(_kern_eventtimer, OID_AUTO, singlemul, CTLFLAG_RW, &singlemul,
112 0, "Multiplier for periodic mode");
113
114static u_int idletick = 0; /* Run periodic events when idle. */
115TUNABLE_INT("kern.eventtimer.idletick", &idletick);
116SYSCTL_UINT(_kern_eventtimer, OID_AUTO, idletick, CTLFLAG_RW, &idletick,
117 0, "Run periodic events when idle");
118
119static u_int activetick = 1; /* Run all periodic events when active. */
120TUNABLE_INT("kern.eventtimer.activetick", &activetick);
121SYSCTL_UINT(_kern_eventtimer, OID_AUTO, activetick, CTLFLAG_RW, &activetick,
122 0, "Run all periodic events when active");
123
124static int periodic = 0; /* Periodic or one-shot mode. */
125static int want_periodic = 0; /* What mode to prefer. */
126TUNABLE_INT("kern.eventtimer.periodic", &want_periodic);
127
128struct pcpu_state {
129 struct mtx et_hw_mtx; /* Per-CPU timer mutex. */
130 u_int action; /* Reconfiguration requests. */
131 u_int handle; /* Immediate handle resuests. */
132 struct bintime now; /* Last tick time. */
133 struct bintime nextevent; /* Next scheduled event on this CPU. */
134 struct bintime nexttick; /* Next timer tick time. */
135 struct bintime nexthard; /* Next hardlock() event. */
136 struct bintime nextstat; /* Next statclock() event. */
137 struct bintime nextprof; /* Next profclock() event. */
138#ifdef KDTRACE_HOOKS
139 struct bintime nextcyc; /* Next OpenSolaris cyclics event. */
140#endif
141 int ipi; /* This CPU needs IPI. */
142 int idle; /* This CPU is in idle mode. */
143};
144
145static DPCPU_DEFINE(struct pcpu_state, timerstate);
146
147#define FREQ2BT(freq, bt) \
148{ \
149 (bt)->sec = 0; \
150 (bt)->frac = ((uint64_t)0x8000000000000000 / (freq)) << 1; \
151}
152#define BT2FREQ(bt) \
153 (((uint64_t)0x8000000000000000 + ((bt)->frac >> 2)) / \
154 ((bt)->frac >> 1))
155
156/*
157 * Timer broadcast IPI handler.
158 */
159int
160hardclockintr(void)
161{
162 struct bintime now;
163 struct pcpu_state *state;
164 int done;
165
166 if (doconfigtimer() || busy)
167 return (FILTER_HANDLED);
168 state = DPCPU_PTR(timerstate);
169 now = state->now;
170 CTR4(KTR_SPARE2, "ipi at %d: now %d.%08x%08x",
171 curcpu, now.sec, (u_int)(now.frac >> 32),
172 (u_int)(now.frac & 0xffffffff));
173 done = handleevents(&now, 0);
174 return (done ? FILTER_HANDLED : FILTER_STRAY);
175}
176
177/*
178 * Handle all events for specified time on this CPU
179 */
180static int
181handleevents(struct bintime *now, int fake)
182{
183 struct bintime t;
184 struct trapframe *frame;
185 struct pcpu_state *state;
186 uintfptr_t pc;
187 int usermode;
188 int done, runs;
189
190 CTR4(KTR_SPARE2, "handle at %d: now %d.%08x%08x",
191 curcpu, now->sec, (u_int)(now->frac >> 32),
192 (u_int)(now->frac & 0xffffffff));
193 done = 0;
194 if (fake) {
195 frame = NULL;
196 usermode = 0;
197 pc = 0;
198 } else {
199 frame = curthread->td_intr_frame;
200 usermode = TRAPF_USERMODE(frame);
201 pc = TRAPF_PC(frame);
202 }
203
204 state = DPCPU_PTR(timerstate);
205
206 runs = 0;
207 while (bintime_cmp(now, &state->nexthard, >=)) {
208 bintime_addx(&state->nexthard, hardperiod.frac);
209 runs++;
210 }
211 if (runs) {
212 if ((timer->et_flags & ET_FLAGS_PERCPU) == 0 &&
213 bintime_cmp(&state->nexthard, &nexthard, >))
214 nexthard = state->nexthard;
215 if (fake < 2) {
216 hardclock_cnt(runs, usermode);
217 done = 1;
218 }
219 }
220 runs = 0;
221 while (bintime_cmp(now, &state->nextstat, >=)) {
222 bintime_addx(&state->nextstat, statperiod.frac);
223 runs++;
224 }
225 if (runs && fake < 2) {
226 statclock_cnt(runs, usermode);
227 done = 1;
228 }
229 if (profiling) {
230 runs = 0;
231 while (bintime_cmp(now, &state->nextprof, >=)) {
232 bintime_addx(&state->nextprof, profperiod.frac);
233 runs++;
234 }
235 if (runs && !fake) {
236 profclock_cnt(runs, usermode, pc);
237 done = 1;
238 }
239 } else
240 state->nextprof = state->nextstat;
241
242#ifdef KDTRACE_HOOKS
243 if (fake == 0 && cyclic_clock_func != NULL &&
244 state->nextcyc.sec != -1 &&
245 bintime_cmp(now, &state->nextcyc, >=)) {
246 state->nextcyc.sec = -1;
247 (*cyclic_clock_func)(frame);
248 }
249#endif
250
251 getnextcpuevent(&t, 0);
252 if (fake == 2) {
253 state->nextevent = t;
254 return (done);
255 }
256 ET_HW_LOCK(state);
257 if (!busy) {
258 state->idle = 0;
259 state->nextevent = t;
260 loadtimer(now, 0);
261 }
262 ET_HW_UNLOCK(state);
263 return (done);
264}
265
266/*
267 * Schedule binuptime of the next event on current CPU.
268 */
269static void
270getnextcpuevent(struct bintime *event, int idle)
271{
272 struct bintime tmp;
273 struct pcpu_state *state;
274 int skip;
275
276 state = DPCPU_PTR(timerstate);
277 /* Handle hardclock() events. */
278 *event = state->nexthard;
279 if (idle || (!activetick && !profiling &&
280 (timer->et_flags & ET_FLAGS_PERCPU) == 0)) {
281 skip = idle ? 4 : (stathz / 2);
282 if (curcpu == CPU_FIRST() && tc_min_ticktock_freq > skip)
283 skip = tc_min_ticktock_freq;
284 skip = callout_tickstofirst(hz / skip) - 1;
285 CTR2(KTR_SPARE2, "skip at %d: %d", curcpu, skip);
286 tmp = hardperiod;
287 bintime_mul(&tmp, skip);
288 bintime_add(event, &tmp);
289 }
290 if (!idle) { /* If CPU is active - handle other types of events. */
291 if (bintime_cmp(event, &state->nextstat, >))
292 *event = state->nextstat;
293 if (profiling && bintime_cmp(event, &state->nextprof, >))
294 *event = state->nextprof;
295 }
296#ifdef KDTRACE_HOOKS
297 if (state->nextcyc.sec != -1 && bintime_cmp(event, &state->nextcyc, >))
298 *event = state->nextcyc;
299#endif
300}
301
302/*
303 * Schedule binuptime of the next event on all CPUs.
304 */
305static void
306getnextevent(struct bintime *event)
307{
308 struct pcpu_state *state;
309#ifdef SMP
310 int cpu;
311#endif
312 int c, nonidle;
313
314 state = DPCPU_PTR(timerstate);
315 *event = state->nextevent;
316 c = curcpu;
317 nonidle = !state->idle;
318 if ((timer->et_flags & ET_FLAGS_PERCPU) == 0) {
319#ifdef SMP
320 if (smp_started) {
321 CPU_FOREACH(cpu) {
322 if (curcpu == cpu)
323 continue;
324 state = DPCPU_ID_PTR(cpu, timerstate);
325 nonidle += !state->idle;
326 if (bintime_cmp(event, &state->nextevent, >)) {
327 *event = state->nextevent;
328 c = cpu;
329 }
330 }
331 }
332#endif
333 if (nonidle != 0 && bintime_cmp(event, &nexthard, >))
334 *event = nexthard;
335 }
336 CTR5(KTR_SPARE2, "next at %d: next %d.%08x%08x by %d",
337 curcpu, event->sec, (u_int)(event->frac >> 32),
338 (u_int)(event->frac & 0xffffffff), c);
339}
340
341/* Hardware timer callback function. */
342static void
343timercb(struct eventtimer *et, void *arg)
344{
345 struct bintime now;
346 struct bintime *next;
347 struct pcpu_state *state;
348#ifdef SMP
349 int cpu, bcast;
350#endif
351
352 /* Do not touch anything if somebody reconfiguring timers. */
353 if (busy)
354 return;
355 /* Update present and next tick times. */
356 state = DPCPU_PTR(timerstate);
357 if (et->et_flags & ET_FLAGS_PERCPU) {
358 next = &state->nexttick;
359 } else
360 next = &nexttick;
361 binuptime(&now);
362 if (periodic) {
363 *next = now;
364 bintime_addx(next, timerperiod.frac); /* Next tick in 1 period. */
365 } else
366 next->sec = -1; /* Next tick is not scheduled yet. */
367 state->now = now;
368 CTR4(KTR_SPARE2, "intr at %d: now %d.%08x%08x",
369 curcpu, (int)(now.sec), (u_int)(now.frac >> 32),
370 (u_int)(now.frac & 0xffffffff));
371
372#ifdef SMP
373 /* Prepare broadcasting to other CPUs for non-per-CPU timers. */
374 bcast = 0;
375 if ((et->et_flags & ET_FLAGS_PERCPU) == 0 && smp_started) {
376 CPU_FOREACH(cpu) {
377 state = DPCPU_ID_PTR(cpu, timerstate);
378 ET_HW_LOCK(state);
379 state->now = now;
380 if (bintime_cmp(&now, &state->nextevent, >=)) {
381 state->nextevent.sec++;
382 if (curcpu != cpu) {
383 state->ipi = 1;
384 bcast = 1;
385 }
386 }
387 ET_HW_UNLOCK(state);
388 }
389 }
390#endif
391
392 /* Handle events for this time on this CPU. */
393 handleevents(&now, 0);
394
395#ifdef SMP
396 /* Broadcast interrupt to other CPUs for non-per-CPU timers. */
397 if (bcast) {
398 CPU_FOREACH(cpu) {
399 if (curcpu == cpu)
400 continue;
401 state = DPCPU_ID_PTR(cpu, timerstate);
402 if (state->ipi) {
403 state->ipi = 0;
404 ipi_cpu(cpu, IPI_HARDCLOCK);
405 }
406 }
407 }
408#endif
409}
410
411/*
412 * Load new value into hardware timer.
413 */
414static void
415loadtimer(struct bintime *now, int start)
416{
417 struct pcpu_state *state;
418 struct bintime new;
419 struct bintime *next;
420 uint64_t tmp;
421 int eq;
422
423 if (timer->et_flags & ET_FLAGS_PERCPU) {
424 state = DPCPU_PTR(timerstate);
425 next = &state->nexttick;
426 } else
427 next = &nexttick;
428 if (periodic) {
429 if (start) {
430 /*
431 * Try to start all periodic timers aligned
432 * to period to make events synchronous.
433 */
434 tmp = ((uint64_t)now->sec << 36) + (now->frac >> 28);
435 tmp = (tmp % (timerperiod.frac >> 28)) << 28;
436 new.sec = 0;
437 new.frac = timerperiod.frac - tmp;
438 if (new.frac < tmp) /* Left less then passed. */
439 bintime_addx(&new, timerperiod.frac);
440 CTR5(KTR_SPARE2, "load p at %d: now %d.%08x first in %d.%08x",
441 curcpu, now->sec, (u_int)(now->frac >> 32),
442 new.sec, (u_int)(new.frac >> 32));
443 *next = new;
444 bintime_add(next, now);
445 et_start(timer, &new, &timerperiod);
446 }
447 } else {
448 getnextevent(&new);
449 eq = bintime_cmp(&new, next, ==);
450 CTR5(KTR_SPARE2, "load at %d: next %d.%08x%08x eq %d",
451 curcpu, new.sec, (u_int)(new.frac >> 32),
452 (u_int)(new.frac & 0xffffffff),
453 eq);
454 if (!eq) {
455 *next = new;
456 bintime_sub(&new, now);
457 et_start(timer, &new, NULL);
458 }
459 }
460}
461
462/*
463 * Prepare event timer parameters after configuration changes.
464 */
465static void
466setuptimer(void)
467{
468 int freq;
469
470 if (periodic && (timer->et_flags & ET_FLAGS_PERIODIC) == 0)
471 periodic = 0;
472 else if (!periodic && (timer->et_flags & ET_FLAGS_ONESHOT) == 0)
473 periodic = 1;
474 singlemul = MIN(MAX(singlemul, 1), 20);
475 freq = hz * singlemul;
476 while (freq < (profiling ? profhz : stathz))
477 freq += hz;
478 freq = round_freq(timer, freq);
479 FREQ2BT(freq, &timerperiod);
480}
481
482/*
483 * Reconfigure specified per-CPU timer on other CPU. Called from IPI handler.
484 */
485static int
486doconfigtimer(void)
487{
488 struct bintime now;
489 struct pcpu_state *state;
490
491 state = DPCPU_PTR(timerstate);
492 switch (atomic_load_acq_int(&state->action)) {
493 case 1:
494 binuptime(&now);
495 ET_HW_LOCK(state);
496 loadtimer(&now, 1);
497 ET_HW_UNLOCK(state);
498 state->handle = 0;
499 atomic_store_rel_int(&state->action, 0);
500 return (1);
501 case 2:
502 ET_HW_LOCK(state);
503 et_stop(timer);
504 ET_HW_UNLOCK(state);
505 state->handle = 0;
506 atomic_store_rel_int(&state->action, 0);
507 return (1);
508 }
509 if (atomic_readandclear_int(&state->handle) && !busy) {
510 binuptime(&now);
511 handleevents(&now, 0);
512 return (1);
513 }
514 return (0);
515}
516
517/*
518 * Reconfigure specified timer.
519 * For per-CPU timers use IPI to make other CPUs to reconfigure.
520 */
521static void
522configtimer(int start)
523{
524 struct bintime now, next;
525 struct pcpu_state *state;
526 int cpu;
527
528 if (start) {
529 setuptimer();
530 binuptime(&now);
531 }
532 critical_enter();
533 ET_HW_LOCK(DPCPU_PTR(timerstate));
534 if (start) {
535 /* Initialize time machine parameters. */
536 next = now;
537 bintime_addx(&next, timerperiod.frac);
538 if (periodic)
539 nexttick = next;
540 else
541 nexttick.sec = -1;
542 CPU_FOREACH(cpu) {
543 state = DPCPU_ID_PTR(cpu, timerstate);
544 state->now = now;
545 state->nextevent = next;
546 if (periodic)
547 state->nexttick = next;
548 else
549 state->nexttick.sec = -1;
550 state->nexthard = next;
551 state->nextstat = next;
552 state->nextprof = next;
553 hardclock_sync(cpu);
554 }
555 busy = 0;
556 /* Start global timer or per-CPU timer of this CPU. */
557 loadtimer(&now, 1);
558 } else {
559 busy = 1;
560 /* Stop global timer or per-CPU timer of this CPU. */
561 et_stop(timer);
562 }
563 ET_HW_UNLOCK(DPCPU_PTR(timerstate));
564#ifdef SMP
565 /* If timer is global or there is no other CPUs yet - we are done. */
566 if ((timer->et_flags & ET_FLAGS_PERCPU) == 0 || !smp_started) {
567 critical_exit();
568 return;
569 }
570 /* Set reconfigure flags for other CPUs. */
571 CPU_FOREACH(cpu) {
572 state = DPCPU_ID_PTR(cpu, timerstate);
573 atomic_store_rel_int(&state->action,
574 (cpu == curcpu) ? 0 : ( start ? 1 : 2));
575 }
576 /* Broadcast reconfigure IPI. */
577 ipi_all_but_self(IPI_HARDCLOCK);
578 /* Wait for reconfiguration completed. */
579restart:
580 cpu_spinwait();
581 CPU_FOREACH(cpu) {
582 if (cpu == curcpu)
583 continue;
584 state = DPCPU_ID_PTR(cpu, timerstate);
585 if (atomic_load_acq_int(&state->action))
586 goto restart;
587 }
588#endif
589 critical_exit();
590}
591
592/*
593 * Calculate nearest frequency supported by hardware timer.
594 */
595static int
596round_freq(struct eventtimer *et, int freq)
597{
598 uint64_t div;
599
600 if (et->et_frequency != 0) {
601 div = lmax((et->et_frequency + freq / 2) / freq, 1);
602 if (et->et_flags & ET_FLAGS_POW2DIV)
603 div = 1 << (flsl(div + div / 2) - 1);
604 freq = (et->et_frequency + div / 2) / div;
605 }
606 if (et->et_min_period.sec > 0)
607 panic("Event timer \"%s\" doesn't support sub-second periods!",
608 et->et_name);
609 else if (et->et_min_period.frac != 0)
610 freq = min(freq, BT2FREQ(&et->et_min_period));
611 if (et->et_max_period.sec == 0 && et->et_max_period.frac != 0)
612 freq = max(freq, BT2FREQ(&et->et_max_period));
613 return (freq);
614}
615
616/*
617 * Configure and start event timers (BSP part).
618 */
619void
620cpu_initclocks_bsp(void)
621{
622 struct pcpu_state *state;
623 int base, div, cpu;
624
625 mtx_init(&et_hw_mtx, "et_hw_mtx", NULL, MTX_SPIN);
626 CPU_FOREACH(cpu) {
627 state = DPCPU_ID_PTR(cpu, timerstate);
628 mtx_init(&state->et_hw_mtx, "et_hw_mtx", NULL, MTX_SPIN);
629#ifdef KDTRACE_HOOKS
630 state->nextcyc.sec = -1;
631#endif
632 }
633#ifdef SMP
634 callout_new_inserted = cpu_new_callout;
635#endif
636 periodic = want_periodic;
637 /* Grab requested timer or the best of present. */
638 if (timername[0])
639 timer = et_find(timername, 0, 0);
640 if (timer == NULL && periodic) {
641 timer = et_find(NULL,
642 ET_FLAGS_PERIODIC, ET_FLAGS_PERIODIC);
643 }
644 if (timer == NULL) {
645 timer = et_find(NULL,
646 ET_FLAGS_ONESHOT, ET_FLAGS_ONESHOT);
647 }
648 if (timer == NULL && !periodic) {
649 timer = et_find(NULL,
650 ET_FLAGS_PERIODIC, ET_FLAGS_PERIODIC);
651 }
652 if (timer == NULL)
653 panic("No usable event timer found!");
654 et_init(timer, timercb, NULL, NULL);
655
656 /* Adapt to timer capabilities. */
657 if (periodic && (timer->et_flags & ET_FLAGS_PERIODIC) == 0)
658 periodic = 0;
659 else if (!periodic && (timer->et_flags & ET_FLAGS_ONESHOT) == 0)
660 periodic = 1;
661 if (timer->et_flags & ET_FLAGS_C3STOP)
662 cpu_disable_deep_sleep++;
663
664 /*
665 * We honor the requested 'hz' value.
666 * We want to run stathz in the neighborhood of 128hz.
667 * We would like profhz to run as often as possible.
668 */
669 if (singlemul <= 0 || singlemul > 20) {
670 if (hz >= 1500 || (hz % 128) == 0)
671 singlemul = 1;
672 else if (hz >= 750)
673 singlemul = 2;
674 else
675 singlemul = 4;
676 }
677 if (periodic) {
678 base = round_freq(timer, hz * singlemul);
679 singlemul = max((base + hz / 2) / hz, 1);
680 hz = (base + singlemul / 2) / singlemul;
681 if (base <= 128)
682 stathz = base;
683 else {
684 div = base / 128;
685 if (div >= singlemul && (div % singlemul) == 0)
686 div++;
687 stathz = base / div;
688 }
689 profhz = stathz;
690 while ((profhz + stathz) <= 128 * 64)
691 profhz += stathz;
692 profhz = round_freq(timer, profhz);
693 } else {
694 hz = round_freq(timer, hz);
695 stathz = round_freq(timer, 127);
696 profhz = round_freq(timer, stathz * 64);
697 }
698 tick = 1000000 / hz;
699 FREQ2BT(hz, &hardperiod);
700 FREQ2BT(stathz, &statperiod);
701 FREQ2BT(profhz, &profperiod);
702 ET_LOCK();
703 configtimer(1);
704 ET_UNLOCK();
705}
706
707/*
708 * Start per-CPU event timers on APs.
709 */
710void
711cpu_initclocks_ap(void)
712{
713 struct bintime now;
714 struct pcpu_state *state;
715
716 state = DPCPU_PTR(timerstate);
717 binuptime(&now);
718 ET_HW_LOCK(state);
719 state->now = now;
720 hardclock_sync(curcpu);
721 handleevents(&state->now, 2);
722 if (timer->et_flags & ET_FLAGS_PERCPU)
723 loadtimer(&now, 1);
724 ET_HW_UNLOCK(state);
725}
726
727/*
728 * Switch to profiling clock rates.
729 */
730void
731cpu_startprofclock(void)
732{
733
734 ET_LOCK();
735 if (profiling == 0) {
736 if (periodic) {
737 configtimer(0);
738 profiling = 1;
739 configtimer(1);
740 } else
741 profiling = 1;
742 } else
743 profiling++;
744 ET_UNLOCK();
745}
746
747/*
748 * Switch to regular clock rates.
749 */
750void
751cpu_stopprofclock(void)
752{
753
754 ET_LOCK();
755 if (profiling == 1) {
756 if (periodic) {
757 configtimer(0);
758 profiling = 0;
759 configtimer(1);
760 } else
761 profiling = 0;
762 } else
763 profiling--;
764 ET_UNLOCK();
765}
766
767/*
768 * Switch to idle mode (all ticks handled).
769 */
770sbintime_t
771cpu_idleclock(void)
772{
773 struct bintime now, t;
774 struct pcpu_state *state;
775
776 if (idletick || busy ||
777 (periodic && (timer->et_flags & ET_FLAGS_PERCPU))
778#ifdef DEVICE_POLLING
779 || curcpu == CPU_FIRST()
780#endif
781 )
782 return (-1);
783 state = DPCPU_PTR(timerstate);
784 if (periodic)
785 now = state->now;
786 else
787 binuptime(&now);
788 CTR4(KTR_SPARE2, "idle at %d: now %d.%08x%08x",
789 curcpu, now.sec, (u_int)(now.frac >> 32),
790 (u_int)(now.frac & 0xffffffff));
791 getnextcpuevent(&t, 1);
792 ET_HW_LOCK(state);
793 state->idle = 1;
794 state->nextevent = t;
795 if (!periodic)
796 loadtimer(&now, 0);
797 ET_HW_UNLOCK(state);
798 bintime_sub(&t, &now);
799 return (MAX(bttosbt(t), 0));
800}
801
802/*
803 * Switch to active mode (skip empty ticks).
804 */
805void
806cpu_activeclock(void)
807{
808 struct bintime now;
809 struct pcpu_state *state;
810 struct thread *td;
811
812 state = DPCPU_PTR(timerstate);
813 if (state->idle == 0 || busy)
814 return;
815 if (periodic)
816 now = state->now;
817 else
818 binuptime(&now);
819 CTR4(KTR_SPARE2, "active at %d: now %d.%08x%08x",
820 curcpu, now.sec, (u_int)(now.frac >> 32),
821 (u_int)(now.frac & 0xffffffff));
822 spinlock_enter();
823 td = curthread;
824 td->td_intr_nesting_level++;
825 handleevents(&now, 1);
826 td->td_intr_nesting_level--;
827 spinlock_exit();
828}
829
830#ifdef KDTRACE_HOOKS
831void
832clocksource_cyc_set(const struct bintime *t)
833{
834 struct bintime now;
835 struct pcpu_state *state;
836
837 state = DPCPU_PTR(timerstate);
838 if (periodic)
839 now = state->now;
840 else
841 binuptime(&now);
842
843 CTR4(KTR_SPARE2, "set_cyc at %d: now %d.%08x%08x",
844 curcpu, now.sec, (u_int)(now.frac >> 32),
845 (u_int)(now.frac & 0xffffffff));
846 CTR4(KTR_SPARE2, "set_cyc at %d: t %d.%08x%08x",
847 curcpu, t->sec, (u_int)(t->frac >> 32),
848 (u_int)(t->frac & 0xffffffff));
849
850 ET_HW_LOCK(state);
851 if (bintime_cmp(t, &state->nextcyc, ==)) {
852 ET_HW_UNLOCK(state);
853 return;
854 }
855 state->nextcyc = *t;
856 if (bintime_cmp(&state->nextcyc, &state->nextevent, >=)) {
857 ET_HW_UNLOCK(state);
858 return;
859 }
860 state->nextevent = state->nextcyc;
861 if (!periodic)
862 loadtimer(&now, 0);
863 ET_HW_UNLOCK(state);
864}
865#endif
866
867#ifdef SMP
868static void
869cpu_new_callout(int cpu, int ticks)
870{
871 struct bintime tmp;
872 struct pcpu_state *state;
873
874 CTR3(KTR_SPARE2, "new co at %d: on %d in %d",
875 curcpu, cpu, ticks);
876 state = DPCPU_ID_PTR(cpu, timerstate);
877 ET_HW_LOCK(state);
878 if (state->idle == 0 || busy) {
879 ET_HW_UNLOCK(state);
880 return;
881 }
882 /*
883 * If timer is periodic - just update next event time for target CPU.
884 * If timer is global - there is chance it is already programmed.
885 */
886 if (periodic || (timer->et_flags & ET_FLAGS_PERCPU) == 0) {
887 tmp = hardperiod;
888 bintime_mul(&tmp, ticks - 1);
889 bintime_add(&tmp, &state->nexthard);
890 if (bintime_cmp(&tmp, &state->nextevent, <))
891 state->nextevent = tmp;
892 if (periodic ||
893 bintime_cmp(&state->nextevent, &nexttick, >=)) {
894 ET_HW_UNLOCK(state);
895 return;
896 }
897 }
898 /*
899 * Otherwise we have to wake that CPU up, as we can't get present
900 * bintime to reprogram global timer from here. If timer is per-CPU,
901 * we by definition can't do it from here.
902 */
903 ET_HW_UNLOCK(state);
904 if (timer->et_flags & ET_FLAGS_PERCPU) {
905 state->handle = 1;
906 ipi_cpu(cpu, IPI_HARDCLOCK);
907 } else {
908 if (!cpu_idle_wakeup(cpu))
909 ipi_cpu(cpu, IPI_AST);
910 }
911}
912#endif
913
914/*
915 * Report or change the active event timers hardware.
916 */
917static int
918sysctl_kern_eventtimer_timer(SYSCTL_HANDLER_ARGS)
919{
920 char buf[32];
921 struct eventtimer *et;
922 int error;
923
924 ET_LOCK();
925 et = timer;
926 snprintf(buf, sizeof(buf), "%s", et->et_name);
927 ET_UNLOCK();
928 error = sysctl_handle_string(oidp, buf, sizeof(buf), req);
929 ET_LOCK();
930 et = timer;
931 if (error != 0 || req->newptr == NULL ||
932 strcasecmp(buf, et->et_name) == 0) {
933 ET_UNLOCK();
934 return (error);
935 }
936 et = et_find(buf, 0, 0);
937 if (et == NULL) {
938 ET_UNLOCK();
939 return (ENOENT);
940 }
941 configtimer(0);
942 et_free(timer);
943 if (et->et_flags & ET_FLAGS_C3STOP)
944 cpu_disable_deep_sleep++;
945 if (timer->et_flags & ET_FLAGS_C3STOP)
946 cpu_disable_deep_sleep--;
947 periodic = want_periodic;
948 timer = et;
949 et_init(timer, timercb, NULL, NULL);
950 configtimer(1);
951 ET_UNLOCK();
952 return (error);
953}
954SYSCTL_PROC(_kern_eventtimer, OID_AUTO, timer,
955 CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE,
956 0, 0, sysctl_kern_eventtimer_timer, "A", "Chosen event timer");
957
958/*
959 * Report or change the active event timer periodicity.
960 */
961static int
962sysctl_kern_eventtimer_periodic(SYSCTL_HANDLER_ARGS)
963{
964 int error, val;
965
966 val = periodic;
967 error = sysctl_handle_int(oidp, &val, 0, req);
968 if (error != 0 || req->newptr == NULL)
969 return (error);
970 ET_LOCK();
971 configtimer(0);
972 periodic = want_periodic = val;
973 configtimer(1);
974 ET_UNLOCK();
975 return (error);
976}
977SYSCTL_PROC(_kern_eventtimer, OID_AUTO, periodic,
978 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
979 0, 0, sysctl_kern_eventtimer_periodic, "I", "Enable event timer periodic mode");