kern_clocksource.c revision 212967
1/*- 2 * Copyright (c) 2010 Alexander Motin <mav@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer, 10 * without modification, immediately at the beginning of the file. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27#include <sys/cdefs.h> 28__FBSDID("$FreeBSD: head/sys/kern/kern_clocksource.c 212967 2010-09-21 16:50:24Z mav $"); 29 30/* 31 * Common routines to manage event timers hardware. 32 */ 33 34/* XEN has own timer routines now. */ 35#ifndef XEN 36 37#include "opt_kdtrace.h" 38 39#include <sys/param.h> 40#include <sys/systm.h> 41#include <sys/bus.h> 42#include <sys/lock.h> 43#include <sys/kdb.h> 44#include <sys/ktr.h> 45#include <sys/mutex.h> 46#include <sys/proc.h> 47#include <sys/kernel.h> 48#include <sys/sched.h> 49#include <sys/smp.h> 50#include <sys/sysctl.h> 51#include <sys/timeet.h> 52#include <sys/timetc.h> 53 54#include <machine/atomic.h> 55#include <machine/clock.h> 56#include <machine/cpu.h> 57#include <machine/smp.h> 58 59#ifdef KDTRACE_HOOKS 60#include <sys/dtrace_bsd.h> 61cyclic_clock_func_t cyclic_clock_func[MAXCPU]; 62#endif 63 64int cpu_disable_deep_sleep = 0; /* Timer dies in C3. */ 65 66static void setuptimer(void); 67static void loadtimer(struct bintime *now, int first); 68static int doconfigtimer(void); 69static void configtimer(int start); 70static int round_freq(struct eventtimer *et, int freq); 71 72static void getnextcpuevent(struct bintime *event, int idle); 73static void getnextevent(struct bintime *event); 74static int handleevents(struct bintime *now, int fake); 75#ifdef SMP 76static void cpu_new_callout(int cpu, int ticks); 77#endif 78 79static struct mtx et_hw_mtx; 80 81#define ET_HW_LOCK(state) \ 82 { \ 83 if (timer->et_flags & ET_FLAGS_PERCPU) \ 84 mtx_lock_spin(&(state)->et_hw_mtx); \ 85 else \ 86 mtx_lock_spin(&et_hw_mtx); \ 87 } 88 89#define ET_HW_UNLOCK(state) \ 90 { \ 91 if (timer->et_flags & ET_FLAGS_PERCPU) \ 92 mtx_unlock_spin(&(state)->et_hw_mtx); \ 93 else \ 94 mtx_unlock_spin(&et_hw_mtx); \ 95 } 96 97static struct eventtimer *timer = NULL; 98static struct bintime timerperiod; /* Timer period for periodic mode. */ 99static struct bintime hardperiod; /* hardclock() events period. */ 100static struct bintime statperiod; /* statclock() events period. */ 101static struct bintime profperiod; /* profclock() events period. */ 102static struct bintime nexttick; /* Next global timer tick time. */ 103static u_int busy = 0; /* Reconfiguration is in progress. */ 104static int profiling = 0; /* Profiling events enabled. */ 105 106static char timername[32]; /* Wanted timer. */ 107TUNABLE_STR("kern.eventtimer.timer", timername, sizeof(timername)); 108 109static int singlemul = 0; /* Multiplier for periodic mode. */ 110TUNABLE_INT("kern.eventtimer.singlemul", &singlemul); 111SYSCTL_INT(_kern_eventtimer, OID_AUTO, singlemul, CTLFLAG_RW, &singlemul, 112 0, "Multiplier for periodic mode"); 113 114static u_int idletick = 0; /* Idle mode allowed. */ 115TUNABLE_INT("kern.eventtimer.idletick", &idletick); 116SYSCTL_INT(_kern_eventtimer, OID_AUTO, idletick, CTLFLAG_RW, &idletick, 117 0, "Run periodic events when idle"); 118 119static int periodic = 0; /* Periodic or one-shot mode. */ 120static int want_periodic = 0; /* What mode to prefer. */ 121TUNABLE_INT("kern.eventtimer.periodic", &want_periodic); 122 123struct pcpu_state { 124 struct mtx et_hw_mtx; /* Per-CPU timer mutex. */ 125 u_int action; /* Reconfiguration requests. */ 126 u_int handle; /* Immediate handle resuests. */ 127 struct bintime now; /* Last tick time. */ 128 struct bintime nextevent; /* Next scheduled event on this CPU. */ 129 struct bintime nexttick; /* Next timer tick time. */ 130 struct bintime nexthard; /* Next hardlock() event. */ 131 struct bintime nextstat; /* Next statclock() event. */ 132 struct bintime nextprof; /* Next profclock() event. */ 133 int ipi; /* This CPU needs IPI. */ 134 int idle; /* This CPU is in idle mode. */ 135}; 136 137static DPCPU_DEFINE(struct pcpu_state, timerstate); 138 139#define FREQ2BT(freq, bt) \ 140{ \ 141 (bt)->sec = 0; \ 142 (bt)->frac = ((uint64_t)0x8000000000000000 / (freq)) << 1; \ 143} 144#define BT2FREQ(bt) \ 145 (((uint64_t)0x8000000000000000 + ((bt)->frac >> 2)) / \ 146 ((bt)->frac >> 1)) 147 148/* 149 * Timer broadcast IPI handler. 150 */ 151int 152hardclockintr(void) 153{ 154 struct bintime now; 155 struct pcpu_state *state; 156 int done; 157 158 if (doconfigtimer() || busy) 159 return (FILTER_HANDLED); 160 state = DPCPU_PTR(timerstate); 161 now = state->now; 162 CTR4(KTR_SPARE2, "ipi at %d: now %d.%08x%08x", 163 curcpu, now.sec, (unsigned int)(now.frac >> 32), 164 (unsigned int)(now.frac & 0xffffffff)); 165 done = handleevents(&now, 0); 166 return (done ? FILTER_HANDLED : FILTER_STRAY); 167} 168 169/* 170 * Handle all events for specified time on this CPU 171 */ 172static int 173handleevents(struct bintime *now, int fake) 174{ 175 struct bintime t; 176 struct trapframe *frame; 177 struct pcpu_state *state; 178 uintfptr_t pc; 179 int usermode; 180 int done, runs; 181 182 CTR4(KTR_SPARE2, "handle at %d: now %d.%08x%08x", 183 curcpu, now->sec, (unsigned int)(now->frac >> 32), 184 (unsigned int)(now->frac & 0xffffffff)); 185 done = 0; 186 if (fake) { 187 frame = NULL; 188 usermode = 0; 189 pc = 0; 190 } else { 191 frame = curthread->td_intr_frame; 192 usermode = TRAPF_USERMODE(frame); 193 pc = TRAPF_PC(frame); 194 } 195#ifdef KDTRACE_HOOKS 196 /* 197 * If the DTrace hooks are configured and a callback function 198 * has been registered, then call it to process the high speed 199 * timers. 200 */ 201 if (!fake && cyclic_clock_func[curcpu] != NULL) 202 (*cyclic_clock_func[curcpu])(frame); 203#endif 204 runs = 0; 205 state = DPCPU_PTR(timerstate); 206 while (bintime_cmp(now, &state->nexthard, >=)) { 207 bintime_add(&state->nexthard, &hardperiod); 208 runs++; 209 } 210 if (runs) { 211 hardclock_anycpu(runs, usermode); 212 done = 1; 213 } 214 while (bintime_cmp(now, &state->nextstat, >=)) { 215 statclock(usermode); 216 bintime_add(&state->nextstat, &statperiod); 217 done = 1; 218 } 219 if (profiling) { 220 while (bintime_cmp(now, &state->nextprof, >=)) { 221 if (!fake) 222 profclock(usermode, pc); 223 bintime_add(&state->nextprof, &profperiod); 224 done = 1; 225 } 226 } else 227 state->nextprof = state->nextstat; 228 getnextcpuevent(&t, 0); 229 ET_HW_LOCK(state); 230 if (!busy) { 231 state->idle = 0; 232 state->nextevent = t; 233 loadtimer(now, 0); 234 } 235 ET_HW_UNLOCK(state); 236 return (done); 237} 238 239/* 240 * Schedule binuptime of the next event on current CPU. 241 */ 242static void 243getnextcpuevent(struct bintime *event, int idle) 244{ 245 struct bintime tmp; 246 struct pcpu_state *state; 247 int skip; 248 249 state = DPCPU_PTR(timerstate); 250 *event = state->nexthard; 251 if (idle) { /* If CPU is idle - ask callouts for how long. */ 252 skip = 4; 253 if (curcpu == CPU_FIRST() && tc_min_ticktock_freq > skip) 254 skip = tc_min_ticktock_freq; 255 skip = callout_tickstofirst(hz / skip) - 1; 256 CTR2(KTR_SPARE2, "skip at %d: %d", curcpu, skip); 257 tmp = hardperiod; 258 bintime_mul(&tmp, skip); 259 bintime_add(event, &tmp); 260 } else { /* If CPU is active - handle all types of events. */ 261 if (bintime_cmp(event, &state->nextstat, >)) 262 *event = state->nextstat; 263 if (profiling && 264 bintime_cmp(event, &state->nextprof, >)) 265 *event = state->nextprof; 266 } 267} 268 269/* 270 * Schedule binuptime of the next event on all CPUs. 271 */ 272static void 273getnextevent(struct bintime *event) 274{ 275 struct pcpu_state *state; 276#ifdef SMP 277 int cpu; 278#endif 279 int c; 280 281 state = DPCPU_PTR(timerstate); 282 *event = state->nextevent; 283 c = curcpu; 284#ifdef SMP 285 if ((timer->et_flags & ET_FLAGS_PERCPU) == 0) { 286 CPU_FOREACH(cpu) { 287 if (curcpu == cpu) 288 continue; 289 state = DPCPU_ID_PTR(cpu, timerstate); 290 if (bintime_cmp(event, &state->nextevent, >)) { 291 *event = state->nextevent; 292 c = cpu; 293 } 294 } 295 } 296#endif 297 CTR5(KTR_SPARE2, "next at %d: next %d.%08x%08x by %d", 298 curcpu, event->sec, (unsigned int)(event->frac >> 32), 299 (unsigned int)(event->frac & 0xffffffff), c); 300} 301 302/* Hardware timer callback function. */ 303static void 304timercb(struct eventtimer *et, void *arg) 305{ 306 struct bintime now; 307 struct bintime *next; 308 struct pcpu_state *state; 309#ifdef SMP 310 int cpu, bcast; 311#endif 312 313 /* Do not touch anything if somebody reconfiguring timers. */ 314 if (busy) 315 return; 316 /* Update present and next tick times. */ 317 state = DPCPU_PTR(timerstate); 318 if (et->et_flags & ET_FLAGS_PERCPU) { 319 next = &state->nexttick; 320 } else 321 next = &nexttick; 322 if (periodic) { 323 now = *next; /* Ex-next tick time becomes present time. */ 324 bintime_add(next, &timerperiod); /* Next tick in 1 period. */ 325 } else { 326 binuptime(&now); /* Get present time from hardware. */ 327 next->sec = -1; /* Next tick is not scheduled yet. */ 328 } 329 state->now = now; 330 CTR4(KTR_SPARE2, "intr at %d: now %d.%08x%08x", 331 curcpu, now.sec, (unsigned int)(now.frac >> 32), 332 (unsigned int)(now.frac & 0xffffffff)); 333 334#ifdef SMP 335 /* Prepare broadcasting to other CPUs for non-per-CPU timers. */ 336 bcast = 0; 337 if ((et->et_flags & ET_FLAGS_PERCPU) == 0 && smp_started) { 338 CPU_FOREACH(cpu) { 339 state = DPCPU_ID_PTR(cpu, timerstate); 340 ET_HW_LOCK(state); 341 state->now = now; 342 if (bintime_cmp(&now, &state->nextevent, >=)) { 343 state->nextevent.sec++; 344 if (curcpu != cpu) { 345 state->ipi = 1; 346 bcast = 1; 347 } 348 } 349 ET_HW_UNLOCK(state); 350 } 351 } 352#endif 353 354 /* Handle events for this time on this CPU. */ 355 handleevents(&now, 0); 356 357#ifdef SMP 358 /* Broadcast interrupt to other CPUs for non-per-CPU timers. */ 359 if (bcast) { 360 CPU_FOREACH(cpu) { 361 if (curcpu == cpu) 362 continue; 363 state = DPCPU_ID_PTR(cpu, timerstate); 364 if (state->ipi) { 365 state->ipi = 0; 366 ipi_cpu(cpu, IPI_HARDCLOCK); 367 } 368 } 369 } 370#endif 371} 372 373/* 374 * Load new value into hardware timer. 375 */ 376static void 377loadtimer(struct bintime *now, int start) 378{ 379 struct pcpu_state *state; 380 struct bintime new; 381 struct bintime *next; 382 uint64_t tmp; 383 int eq; 384 385 if (periodic) { 386 if (start) { 387 /* 388 * Try to start all periodic timers aligned 389 * to period to make events synchronous. 390 */ 391 tmp = ((uint64_t)now->sec << 36) + (now->frac >> 28); 392 tmp = (tmp % (timerperiod.frac >> 28)) << 28; 393 tmp = timerperiod.frac - tmp; 394 new = timerperiod; 395 bintime_addx(&new, tmp); 396 CTR5(KTR_SPARE2, "load p at %d: now %d.%08x first in %d.%08x", 397 curcpu, now->sec, (unsigned int)(now->frac >> 32), 398 new.sec, (unsigned int)(new.frac >> 32)); 399 et_start(timer, &new, &timerperiod); 400 } 401 } else { 402 if (timer->et_flags & ET_FLAGS_PERCPU) { 403 state = DPCPU_PTR(timerstate); 404 next = &state->nexttick; 405 } else 406 next = &nexttick; 407 getnextevent(&new); 408 eq = bintime_cmp(&new, next, ==); 409 CTR5(KTR_SPARE2, "load at %d: next %d.%08x%08x eq %d", 410 curcpu, new.sec, (unsigned int)(new.frac >> 32), 411 (unsigned int)(new.frac & 0xffffffff), 412 eq); 413 if (!eq) { 414 *next = new; 415 bintime_sub(&new, now); 416 et_start(timer, &new, NULL); 417 } 418 } 419} 420 421/* 422 * Prepare event timer parameters after configuration changes. 423 */ 424static void 425setuptimer(void) 426{ 427 int freq; 428 429 if (periodic && (timer->et_flags & ET_FLAGS_PERIODIC) == 0) 430 periodic = 0; 431 else if (!periodic && (timer->et_flags & ET_FLAGS_ONESHOT) == 0) 432 periodic = 1; 433 singlemul = MIN(MAX(singlemul, 1), 20); 434 freq = hz * singlemul; 435 while (freq < (profiling ? profhz : stathz)) 436 freq += hz; 437 freq = round_freq(timer, freq); 438 FREQ2BT(freq, &timerperiod); 439} 440 441/* 442 * Reconfigure specified per-CPU timer on other CPU. Called from IPI handler. 443 */ 444static int 445doconfigtimer(void) 446{ 447 struct bintime now; 448 struct pcpu_state *state; 449 450 state = DPCPU_PTR(timerstate); 451 switch (atomic_load_acq_int(&state->action)) { 452 case 1: 453 binuptime(&now); 454 ET_HW_LOCK(state); 455 loadtimer(&now, 1); 456 ET_HW_UNLOCK(state); 457 state->handle = 0; 458 atomic_store_rel_int(&state->action, 0); 459 return (1); 460 case 2: 461 ET_HW_LOCK(state); 462 et_stop(timer); 463 ET_HW_UNLOCK(state); 464 state->handle = 0; 465 atomic_store_rel_int(&state->action, 0); 466 return (1); 467 } 468 if (atomic_readandclear_int(&state->handle) && !busy) { 469 binuptime(&now); 470 handleevents(&now, 0); 471 return (1); 472 } 473 return (0); 474} 475 476/* 477 * Reconfigure specified timer. 478 * For per-CPU timers use IPI to make other CPUs to reconfigure. 479 */ 480static void 481configtimer(int start) 482{ 483 struct bintime now, next; 484 struct pcpu_state *state; 485 int cpu; 486 487 if (start) { 488 setuptimer(); 489 binuptime(&now); 490 } 491 critical_enter(); 492 ET_HW_LOCK(DPCPU_PTR(timerstate)); 493 if (start) { 494 /* Initialize time machine parameters. */ 495 next = now; 496 bintime_add(&next, &timerperiod); 497 if (periodic) 498 nexttick = next; 499 else 500 nexttick.sec = -1; 501 CPU_FOREACH(cpu) { 502 state = DPCPU_ID_PTR(cpu, timerstate); 503 state->now = now; 504 state->nextevent = next; 505 if (periodic) 506 state->nexttick = next; 507 else 508 state->nexttick.sec = -1; 509 state->nexthard = next; 510 state->nextstat = next; 511 state->nextprof = next; 512 hardclock_sync(cpu); 513 } 514 busy = 0; 515 /* Start global timer or per-CPU timer of this CPU. */ 516 loadtimer(&now, 1); 517 } else { 518 busy = 1; 519 /* Stop global timer or per-CPU timer of this CPU. */ 520 et_stop(timer); 521 } 522 ET_HW_UNLOCK(DPCPU_PTR(timerstate)); 523#ifdef SMP 524 /* If timer is global or there is no other CPUs yet - we are done. */ 525 if ((timer->et_flags & ET_FLAGS_PERCPU) == 0 || !smp_started) { 526 critical_exit(); 527 return; 528 } 529 /* Set reconfigure flags for other CPUs. */ 530 CPU_FOREACH(cpu) { 531 state = DPCPU_ID_PTR(cpu, timerstate); 532 atomic_store_rel_int(&state->action, 533 (cpu == curcpu) ? 0 : ( start ? 1 : 2)); 534 } 535 /* Broadcast reconfigure IPI. */ 536 ipi_all_but_self(IPI_HARDCLOCK); 537 /* Wait for reconfiguration completed. */ 538restart: 539 cpu_spinwait(); 540 CPU_FOREACH(cpu) { 541 if (cpu == curcpu) 542 continue; 543 state = DPCPU_ID_PTR(cpu, timerstate); 544 if (atomic_load_acq_int(&state->action)) 545 goto restart; 546 } 547#endif 548 critical_exit(); 549} 550 551/* 552 * Calculate nearest frequency supported by hardware timer. 553 */ 554static int 555round_freq(struct eventtimer *et, int freq) 556{ 557 uint64_t div; 558 559 if (et->et_frequency != 0) { 560 div = lmax((et->et_frequency + freq / 2) / freq, 1); 561 if (et->et_flags & ET_FLAGS_POW2DIV) 562 div = 1 << (flsl(div + div / 2) - 1); 563 freq = (et->et_frequency + div / 2) / div; 564 } 565 if (et->et_min_period.sec > 0) 566 freq = 0; 567 else if (et->et_min_period.frac != 0) 568 freq = min(freq, BT2FREQ(&et->et_min_period)); 569 if (et->et_max_period.sec == 0 && et->et_max_period.frac != 0) 570 freq = max(freq, BT2FREQ(&et->et_max_period)); 571 return (freq); 572} 573 574/* 575 * Configure and start event timers (BSP part). 576 */ 577void 578cpu_initclocks_bsp(void) 579{ 580 struct pcpu_state *state; 581 int base, div, cpu; 582 583 mtx_init(&et_hw_mtx, "et_hw_mtx", NULL, MTX_SPIN); 584 CPU_FOREACH(cpu) { 585 state = DPCPU_ID_PTR(cpu, timerstate); 586 mtx_init(&state->et_hw_mtx, "et_hw_mtx", NULL, MTX_SPIN); 587 } 588#ifdef SMP 589 callout_new_inserted = cpu_new_callout; 590#endif 591 periodic = want_periodic; 592 /* Grab requested timer or the best of present. */ 593 if (timername[0]) 594 timer = et_find(timername, 0, 0); 595 if (timer == NULL && periodic) { 596 timer = et_find(NULL, 597 ET_FLAGS_PERIODIC, ET_FLAGS_PERIODIC); 598 } 599 if (timer == NULL) { 600 timer = et_find(NULL, 601 ET_FLAGS_ONESHOT, ET_FLAGS_ONESHOT); 602 } 603 if (timer == NULL && !periodic) { 604 timer = et_find(NULL, 605 ET_FLAGS_PERIODIC, ET_FLAGS_PERIODIC); 606 } 607 if (timer == NULL) 608 panic("No usable event timer found!"); 609 et_init(timer, timercb, NULL, NULL); 610 611 /* Adapt to timer capabilities. */ 612 if (periodic && (timer->et_flags & ET_FLAGS_PERIODIC) == 0) 613 periodic = 0; 614 else if (!periodic && (timer->et_flags & ET_FLAGS_ONESHOT) == 0) 615 periodic = 1; 616 if (timer->et_flags & ET_FLAGS_C3STOP) 617 cpu_disable_deep_sleep++; 618 619 /* 620 * We honor the requested 'hz' value. 621 * We want to run stathz in the neighborhood of 128hz. 622 * We would like profhz to run as often as possible. 623 */ 624 if (singlemul <= 0 || singlemul > 20) { 625 if (hz >= 1500 || (hz % 128) == 0) 626 singlemul = 1; 627 else if (hz >= 750) 628 singlemul = 2; 629 else 630 singlemul = 4; 631 } 632 if (periodic) { 633 base = round_freq(timer, hz * singlemul); 634 singlemul = max((base + hz / 2) / hz, 1); 635 hz = (base + singlemul / 2) / singlemul; 636 if (base <= 128) 637 stathz = base; 638 else { 639 div = base / 128; 640 if (div >= singlemul && (div % singlemul) == 0) 641 div++; 642 stathz = base / div; 643 } 644 profhz = stathz; 645 while ((profhz + stathz) <= 128 * 64) 646 profhz += stathz; 647 profhz = round_freq(timer, profhz); 648 } else { 649 hz = round_freq(timer, hz); 650 stathz = round_freq(timer, 127); 651 profhz = round_freq(timer, stathz * 64); 652 } 653 tick = 1000000 / hz; 654 FREQ2BT(hz, &hardperiod); 655 FREQ2BT(stathz, &statperiod); 656 FREQ2BT(profhz, &profperiod); 657 ET_LOCK(); 658 configtimer(1); 659 ET_UNLOCK(); 660} 661 662/* 663 * Start per-CPU event timers on APs. 664 */ 665void 666cpu_initclocks_ap(void) 667{ 668 struct bintime now; 669 struct pcpu_state *state; 670 671 if (timer->et_flags & ET_FLAGS_PERCPU) { 672 state = DPCPU_PTR(timerstate); 673 binuptime(&now); 674 ET_HW_LOCK(state); 675 loadtimer(&now, 1); 676 ET_HW_UNLOCK(state); 677 } 678} 679 680/* 681 * Switch to profiling clock rates. 682 */ 683void 684cpu_startprofclock(void) 685{ 686 687 ET_LOCK(); 688 if (periodic) { 689 configtimer(0); 690 profiling = 1; 691 configtimer(1); 692 } else 693 profiling = 1; 694 ET_UNLOCK(); 695} 696 697/* 698 * Switch to regular clock rates. 699 */ 700void 701cpu_stopprofclock(void) 702{ 703 704 ET_LOCK(); 705 if (periodic) { 706 configtimer(0); 707 profiling = 0; 708 configtimer(1); 709 } else 710 profiling = 0; 711 ET_UNLOCK(); 712} 713 714/* 715 * Switch to idle mode (all ticks handled). 716 */ 717void 718cpu_idleclock(void) 719{ 720 struct bintime now, t; 721 struct pcpu_state *state; 722 723 if (idletick || busy || 724 (periodic && (timer->et_flags & ET_FLAGS_PERCPU))) 725 return; 726 state = DPCPU_PTR(timerstate); 727 if (periodic) 728 now = state->now; 729 else 730 binuptime(&now); 731 CTR4(KTR_SPARE2, "idle at %d: now %d.%08x%08x", 732 curcpu, now.sec, (unsigned int)(now.frac >> 32), 733 (unsigned int)(now.frac & 0xffffffff)); 734 getnextcpuevent(&t, 1); 735 ET_HW_LOCK(state); 736 state->idle = 1; 737 state->nextevent = t; 738 if (!periodic) 739 loadtimer(&now, 0); 740 ET_HW_UNLOCK(state); 741} 742 743/* 744 * Switch to active mode (skip empty ticks). 745 */ 746void 747cpu_activeclock(void) 748{ 749 struct bintime now; 750 struct pcpu_state *state; 751 struct thread *td; 752 753 state = DPCPU_PTR(timerstate); 754 if (state->idle == 0 || busy) 755 return; 756 if (periodic) 757 now = state->now; 758 else 759 binuptime(&now); 760 CTR4(KTR_SPARE2, "active at %d: now %d.%08x%08x", 761 curcpu, now.sec, (unsigned int)(now.frac >> 32), 762 (unsigned int)(now.frac & 0xffffffff)); 763 spinlock_enter(); 764 td = curthread; 765 td->td_intr_nesting_level++; 766 handleevents(&now, 1); 767 td->td_intr_nesting_level--; 768 spinlock_exit(); 769} 770 771#ifdef SMP 772static void 773cpu_new_callout(int cpu, int ticks) 774{ 775 struct bintime tmp; 776 struct pcpu_state *state; 777 778 CTR3(KTR_SPARE2, "new co at %d: on %d in %d", 779 curcpu, cpu, ticks); 780 state = DPCPU_ID_PTR(cpu, timerstate); 781 ET_HW_LOCK(state); 782 if (state->idle == 0 || busy) { 783 ET_HW_UNLOCK(state); 784 return; 785 } 786 /* 787 * If timer is periodic - just update next event time for target CPU. 788 */ 789 if (periodic) { 790 state->nextevent = state->nexthard; 791 tmp = hardperiod; 792 bintime_mul(&tmp, ticks - 1); 793 bintime_add(&state->nextevent, &tmp); 794 ET_HW_UNLOCK(state); 795 return; 796 } 797 /* 798 * Otherwise we have to wake that CPU up, as we can't get present 799 * bintime to reprogram global timer from here. If timer is per-CPU, 800 * we by definition can't do it from here. 801 */ 802 ET_HW_UNLOCK(state); 803 if (timer->et_flags & ET_FLAGS_PERCPU) { 804 state->handle = 1; 805 ipi_cpu(cpu, IPI_HARDCLOCK); 806 } else { 807 if (!cpu_idle_wakeup(cpu)) 808 ipi_cpu(cpu, IPI_AST); 809 } 810} 811#endif 812 813/* 814 * Report or change the active event timers hardware. 815 */ 816static int 817sysctl_kern_eventtimer_timer(SYSCTL_HANDLER_ARGS) 818{ 819 char buf[32]; 820 struct eventtimer *et; 821 int error; 822 823 ET_LOCK(); 824 et = timer; 825 snprintf(buf, sizeof(buf), "%s", et->et_name); 826 ET_UNLOCK(); 827 error = sysctl_handle_string(oidp, buf, sizeof(buf), req); 828 ET_LOCK(); 829 et = timer; 830 if (error != 0 || req->newptr == NULL || 831 strcasecmp(buf, et->et_name) == 0) { 832 ET_UNLOCK(); 833 return (error); 834 } 835 et = et_find(buf, 0, 0); 836 if (et == NULL) { 837 ET_UNLOCK(); 838 return (ENOENT); 839 } 840 configtimer(0); 841 et_free(timer); 842 if (et->et_flags & ET_FLAGS_C3STOP) 843 cpu_disable_deep_sleep++; 844 if (timer->et_flags & ET_FLAGS_C3STOP) 845 cpu_disable_deep_sleep--; 846 periodic = want_periodic; 847 timer = et; 848 et_init(timer, timercb, NULL, NULL); 849 configtimer(1); 850 ET_UNLOCK(); 851 return (error); 852} 853SYSCTL_PROC(_kern_eventtimer, OID_AUTO, timer, 854 CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE, 855 0, 0, sysctl_kern_eventtimer_timer, "A", "Chosen event timer"); 856 857/* 858 * Report or change the active event timer periodicity. 859 */ 860static int 861sysctl_kern_eventtimer_periodic(SYSCTL_HANDLER_ARGS) 862{ 863 int error, val; 864 865 val = periodic; 866 error = sysctl_handle_int(oidp, &val, 0, req); 867 if (error != 0 || req->newptr == NULL) 868 return (error); 869 ET_LOCK(); 870 configtimer(0); 871 periodic = want_periodic = val; 872 configtimer(1); 873 ET_UNLOCK(); 874 return (error); 875} 876SYSCTL_PROC(_kern_eventtimer, OID_AUTO, periodic, 877 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, 878 0, 0, sysctl_kern_eventtimer_periodic, "I", "Enable event timer periodic mode"); 879 880#endif 881