kern_clocksource.c revision 212603
1/*- 2 * Copyright (c) 2010 Alexander Motin <mav@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer, 10 * without modification, immediately at the beginning of the file. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27#include <sys/cdefs.h> 28__FBSDID("$FreeBSD: head/sys/kern/kern_clocksource.c 212603 2010-09-14 08:48:06Z mav $"); 29 30/* 31 * Common routines to manage event timers hardware. 32 */ 33 34/* XEN has own timer routines now. */ 35#ifndef XEN 36 37#include "opt_kdtrace.h" 38 39#include <sys/param.h> 40#include <sys/systm.h> 41#include <sys/bus.h> 42#include <sys/lock.h> 43#include <sys/kdb.h> 44#include <sys/ktr.h> 45#include <sys/mutex.h> 46#include <sys/proc.h> 47#include <sys/kernel.h> 48#include <sys/sched.h> 49#include <sys/smp.h> 50#include <sys/sysctl.h> 51#include <sys/timeet.h> 52#include <sys/timetc.h> 53 54#include <machine/atomic.h> 55#include <machine/clock.h> 56#include <machine/cpu.h> 57#include <machine/smp.h> 58 59#ifdef KDTRACE_HOOKS 60#include <sys/dtrace_bsd.h> 61cyclic_clock_func_t cyclic_clock_func[MAXCPU]; 62#endif 63 64int cpu_disable_deep_sleep = 0; /* Timer dies in C3. */ 65 66static void setuptimer(void); 67static void loadtimer(struct bintime *now, int first); 68static int doconfigtimer(void); 69static void configtimer(int start); 70static int round_freq(struct eventtimer *et, int freq); 71 72static void getnextcpuevent(struct bintime *event, int idle); 73static void getnextevent(struct bintime *event); 74static int handleevents(struct bintime *now, int fake); 75#ifdef SMP 76static void cpu_new_callout(int cpu, int ticks); 77#endif 78 79static struct mtx et_hw_mtx; 80 81#define ET_HW_LOCK(state) \ 82 { \ 83 if (timer->et_flags & ET_FLAGS_PERCPU) \ 84 mtx_lock_spin(&(state)->et_hw_mtx); \ 85 else \ 86 mtx_lock_spin(&et_hw_mtx); \ 87 } 88 89#define ET_HW_UNLOCK(state) \ 90 { \ 91 if (timer->et_flags & ET_FLAGS_PERCPU) \ 92 mtx_unlock_spin(&(state)->et_hw_mtx); \ 93 else \ 94 mtx_unlock_spin(&et_hw_mtx); \ 95 } 96 97static struct eventtimer *timer = NULL; 98static struct bintime timerperiod; /* Timer period for periodic mode. */ 99static struct bintime hardperiod; /* hardclock() events period. */ 100static struct bintime statperiod; /* statclock() events period. */ 101static struct bintime profperiod; /* profclock() events period. */ 102static struct bintime nexttick; /* Next global timer tick time. */ 103static u_int busy = 0; /* Reconfiguration is in progress. */ 104static int profiling = 0; /* Profiling events enabled. */ 105 106static char timername[32]; /* Wanted timer. */ 107TUNABLE_STR("kern.eventtimer.timer", timername, sizeof(timername)); 108 109static int singlemul = 0; /* Multiplier for periodic mode. */ 110TUNABLE_INT("kern.eventtimer.singlemul", &singlemul); 111SYSCTL_INT(_kern_eventtimer, OID_AUTO, singlemul, CTLFLAG_RW, &singlemul, 112 0, "Multiplier for periodic mode"); 113 114static u_int idletick = 0; /* Idle mode allowed. */ 115TUNABLE_INT("kern.eventtimer.idletick", &idletick); 116SYSCTL_INT(_kern_eventtimer, OID_AUTO, idletick, CTLFLAG_RW, &idletick, 117 0, "Run periodic events when idle"); 118 119static int periodic = 0; /* Periodic or one-shot mode. */ 120TUNABLE_INT("kern.eventtimer.periodic", &periodic); 121 122struct pcpu_state { 123 struct mtx et_hw_mtx; /* Per-CPU timer mutex. */ 124 u_int action; /* Reconfiguration requests. */ 125 u_int handle; /* Immediate handle resuests. */ 126 struct bintime now; /* Last tick time. */ 127 struct bintime nextevent; /* Next scheduled event on this CPU. */ 128 struct bintime nexttick; /* Next timer tick time. */ 129 struct bintime nexthard; /* Next hardlock() event. */ 130 struct bintime nextstat; /* Next statclock() event. */ 131 struct bintime nextprof; /* Next profclock() event. */ 132 int ipi; /* This CPU needs IPI. */ 133 int idle; /* This CPU is in idle mode. */ 134}; 135 136static DPCPU_DEFINE(struct pcpu_state, timerstate); 137 138#define FREQ2BT(freq, bt) \ 139{ \ 140 (bt)->sec = 0; \ 141 (bt)->frac = ((uint64_t)0x8000000000000000 / (freq)) << 1; \ 142} 143#define BT2FREQ(bt) \ 144 (((uint64_t)0x8000000000000000 + ((bt)->frac >> 2)) / \ 145 ((bt)->frac >> 1)) 146 147/* 148 * Timer broadcast IPI handler. 149 */ 150int 151hardclockintr(void) 152{ 153 struct bintime now; 154 struct pcpu_state *state; 155 int done; 156 157 if (doconfigtimer() || busy) 158 return (FILTER_HANDLED); 159 state = DPCPU_PTR(timerstate); 160 now = state->now; 161 CTR4(KTR_SPARE2, "ipi at %d: now %d.%08x%08x", 162 curcpu, now.sec, (unsigned int)(now.frac >> 32), 163 (unsigned int)(now.frac & 0xffffffff)); 164 done = handleevents(&now, 0); 165 return (done ? FILTER_HANDLED : FILTER_STRAY); 166} 167 168/* 169 * Handle all events for specified time on this CPU 170 */ 171static int 172handleevents(struct bintime *now, int fake) 173{ 174 struct bintime t; 175 struct trapframe *frame; 176 struct pcpu_state *state; 177 uintfptr_t pc; 178 int usermode; 179 int done, runs; 180 181 CTR4(KTR_SPARE2, "handle at %d: now %d.%08x%08x", 182 curcpu, now->sec, (unsigned int)(now->frac >> 32), 183 (unsigned int)(now->frac & 0xffffffff)); 184 done = 0; 185 if (fake) { 186 frame = NULL; 187 usermode = 0; 188 pc = 0; 189 } else { 190 frame = curthread->td_intr_frame; 191 usermode = TRAPF_USERMODE(frame); 192 pc = TRAPF_PC(frame); 193 } 194#ifdef KDTRACE_HOOKS 195 /* 196 * If the DTrace hooks are configured and a callback function 197 * has been registered, then call it to process the high speed 198 * timers. 199 */ 200 if (!fake && cyclic_clock_func[curcpu] != NULL) 201 (*cyclic_clock_func[curcpu])(frame); 202#endif 203 runs = 0; 204 state = DPCPU_PTR(timerstate); 205 while (bintime_cmp(now, &state->nexthard, >=)) { 206 bintime_add(&state->nexthard, &hardperiod); 207 runs++; 208 } 209 if (runs) { 210 hardclock_anycpu(runs, usermode); 211 done = 1; 212 } 213 while (bintime_cmp(now, &state->nextstat, >=)) { 214 statclock(usermode); 215 bintime_add(&state->nextstat, &statperiod); 216 done = 1; 217 } 218 if (profiling) { 219 while (bintime_cmp(now, &state->nextprof, >=)) { 220 if (!fake) 221 profclock(usermode, pc); 222 bintime_add(&state->nextprof, &profperiod); 223 done = 1; 224 } 225 } else 226 state->nextprof = state->nextstat; 227 getnextcpuevent(&t, 0); 228 ET_HW_LOCK(state); 229 if (!busy) { 230 state->idle = 0; 231 state->nextevent = t; 232 loadtimer(now, 0); 233 } 234 ET_HW_UNLOCK(state); 235 return (done); 236} 237 238/* 239 * Schedule binuptime of the next event on current CPU. 240 */ 241static void 242getnextcpuevent(struct bintime *event, int idle) 243{ 244 struct bintime tmp; 245 struct pcpu_state *state; 246 int skip; 247 248 state = DPCPU_PTR(timerstate); 249 *event = state->nexthard; 250 if (idle) { /* If CPU is idle - ask callouts for how long. */ 251 skip = 4; 252 if (curcpu == CPU_FIRST() && tc_min_ticktock_freq > skip) 253 skip = tc_min_ticktock_freq; 254 skip = callout_tickstofirst(hz / skip) - 1; 255 CTR2(KTR_SPARE2, "skip at %d: %d", curcpu, skip); 256 tmp = hardperiod; 257 bintime_mul(&tmp, skip); 258 bintime_add(event, &tmp); 259 } else { /* If CPU is active - handle all types of events. */ 260 if (bintime_cmp(event, &state->nextstat, >)) 261 *event = state->nextstat; 262 if (profiling && 263 bintime_cmp(event, &state->nextprof, >)) 264 *event = state->nextprof; 265 } 266} 267 268/* 269 * Schedule binuptime of the next event on all CPUs. 270 */ 271static void 272getnextevent(struct bintime *event) 273{ 274 struct pcpu_state *state; 275#ifdef SMP 276 int cpu; 277#endif 278 int c; 279 280 state = DPCPU_PTR(timerstate); 281 *event = state->nextevent; 282 c = curcpu; 283#ifdef SMP 284 if ((timer->et_flags & ET_FLAGS_PERCPU) == 0) { 285 CPU_FOREACH(cpu) { 286 if (curcpu == cpu) 287 continue; 288 state = DPCPU_ID_PTR(cpu, timerstate); 289 if (bintime_cmp(event, &state->nextevent, >)) { 290 *event = state->nextevent; 291 c = cpu; 292 } 293 } 294 } 295#endif 296 CTR5(KTR_SPARE2, "next at %d: next %d.%08x%08x by %d", 297 curcpu, event->sec, (unsigned int)(event->frac >> 32), 298 (unsigned int)(event->frac & 0xffffffff), c); 299} 300 301/* Hardware timer callback function. */ 302static void 303timercb(struct eventtimer *et, void *arg) 304{ 305 struct bintime now; 306 struct bintime *next; 307 struct pcpu_state *state; 308#ifdef SMP 309 int cpu, bcast; 310#endif 311 312 /* Do not touch anything if somebody reconfiguring timers. */ 313 if (busy) 314 return; 315 /* Update present and next tick times. */ 316 state = DPCPU_PTR(timerstate); 317 if (et->et_flags & ET_FLAGS_PERCPU) { 318 next = &state->nexttick; 319 } else 320 next = &nexttick; 321 if (periodic) { 322 now = *next; /* Ex-next tick time becomes present time. */ 323 bintime_add(next, &timerperiod); /* Next tick in 1 period. */ 324 } else { 325 binuptime(&now); /* Get present time from hardware. */ 326 next->sec = -1; /* Next tick is not scheduled yet. */ 327 } 328 state->now = now; 329 CTR4(KTR_SPARE2, "intr at %d: now %d.%08x%08x", 330 curcpu, now.sec, (unsigned int)(now.frac >> 32), 331 (unsigned int)(now.frac & 0xffffffff)); 332 333#ifdef SMP 334 /* Prepare broadcasting to other CPUs for non-per-CPU timers. */ 335 bcast = 0; 336 if ((et->et_flags & ET_FLAGS_PERCPU) == 0 && smp_started) { 337 CPU_FOREACH(cpu) { 338 if (curcpu == cpu) 339 continue; 340 state = DPCPU_ID_PTR(cpu, timerstate); 341 ET_HW_LOCK(state); 342 state->now = now; 343 if (bintime_cmp(&now, &state->nextevent, >=)) { 344 state->nextevent.sec++; 345 state->ipi = 1; 346 bcast = 1; 347 } 348 ET_HW_UNLOCK(state); 349 } 350 } 351#endif 352 353 /* Handle events for this time on this CPU. */ 354 handleevents(&now, 0); 355 356#ifdef SMP 357 /* Broadcast interrupt to other CPUs for non-per-CPU timers. */ 358 if (bcast) { 359 CPU_FOREACH(cpu) { 360 if (curcpu == cpu) 361 continue; 362 state = DPCPU_ID_PTR(cpu, timerstate); 363 if (state->ipi) { 364 state->ipi = 0; 365 ipi_cpu(cpu, IPI_HARDCLOCK); 366 } 367 } 368 } 369#endif 370} 371 372/* 373 * Load new value into hardware timer. 374 */ 375static void 376loadtimer(struct bintime *now, int start) 377{ 378 struct pcpu_state *state; 379 struct bintime new; 380 struct bintime *next; 381 uint64_t tmp; 382 int eq; 383 384 if (periodic) { 385 if (start) { 386 /* 387 * Try to start all periodic timers aligned 388 * to period to make events synchronous. 389 */ 390 tmp = ((uint64_t)now->sec << 36) + (now->frac >> 28); 391 tmp = (tmp % (timerperiod.frac >> 28)) << 28; 392 tmp = timerperiod.frac - tmp; 393 new = timerperiod; 394 bintime_addx(&new, tmp); 395 CTR5(KTR_SPARE2, "load p at %d: now %d.%08x first in %d.%08x", 396 curcpu, now->sec, (unsigned int)(now->frac >> 32), 397 new.sec, (unsigned int)(new.frac >> 32)); 398 et_start(timer, &new, &timerperiod); 399 } 400 } else { 401 if (timer->et_flags & ET_FLAGS_PERCPU) { 402 state = DPCPU_PTR(timerstate); 403 next = &state->nexttick; 404 } else 405 next = &nexttick; 406 getnextevent(&new); 407 eq = bintime_cmp(&new, next, ==); 408 CTR5(KTR_SPARE2, "load at %d: next %d.%08x%08x eq %d", 409 curcpu, new.sec, (unsigned int)(new.frac >> 32), 410 (unsigned int)(new.frac & 0xffffffff), 411 eq); 412 if (!eq) { 413 *next = new; 414 bintime_sub(&new, now); 415 et_start(timer, &new, NULL); 416 } 417 } 418} 419 420/* 421 * Prepare event timer parameters after configuration changes. 422 */ 423static void 424setuptimer(void) 425{ 426 int freq; 427 428 if (periodic && (timer->et_flags & ET_FLAGS_PERIODIC) == 0) 429 periodic = 0; 430 else if (!periodic && (timer->et_flags & ET_FLAGS_ONESHOT) == 0) 431 periodic = 1; 432 singlemul = MIN(MAX(singlemul, 1), 20); 433 freq = hz * singlemul; 434 while (freq < (profiling ? profhz : stathz)) 435 freq += hz; 436 freq = round_freq(timer, freq); 437 FREQ2BT(freq, &timerperiod); 438} 439 440/* 441 * Reconfigure specified per-CPU timer on other CPU. Called from IPI handler. 442 */ 443static int 444doconfigtimer(void) 445{ 446 struct bintime now; 447 struct pcpu_state *state; 448 449 state = DPCPU_PTR(timerstate); 450 switch (atomic_load_acq_int(&state->action)) { 451 case 1: 452 binuptime(&now); 453 ET_HW_LOCK(state); 454 loadtimer(&now, 1); 455 ET_HW_UNLOCK(state); 456 state->handle = 0; 457 atomic_store_rel_int(&state->action, 0); 458 return (1); 459 case 2: 460 ET_HW_LOCK(state); 461 et_stop(timer); 462 ET_HW_UNLOCK(state); 463 state->handle = 0; 464 atomic_store_rel_int(&state->action, 0); 465 return (1); 466 } 467 if (atomic_readandclear_int(&state->handle) && !busy) { 468 binuptime(&now); 469 handleevents(&now, 0); 470 return (1); 471 } 472 return (0); 473} 474 475/* 476 * Reconfigure specified timer. 477 * For per-CPU timers use IPI to make other CPUs to reconfigure. 478 */ 479static void 480configtimer(int start) 481{ 482 struct bintime now, next; 483 struct pcpu_state *state; 484 int cpu; 485 486 if (start) { 487 setuptimer(); 488 binuptime(&now); 489 } 490 critical_enter(); 491 ET_HW_LOCK(DPCPU_PTR(timerstate)); 492 if (start) { 493 /* Initialize time machine parameters. */ 494 next = now; 495 bintime_add(&next, &timerperiod); 496 if (periodic) 497 nexttick = next; 498 else 499 nexttick.sec = -1; 500 CPU_FOREACH(cpu) { 501 state = DPCPU_ID_PTR(cpu, timerstate); 502 state->now = now; 503 state->nextevent = next; 504 if (periodic) 505 state->nexttick = next; 506 else 507 state->nexttick.sec = -1; 508 state->nexthard = next; 509 state->nextstat = next; 510 state->nextprof = next; 511 hardclock_sync(cpu); 512 } 513 busy = 0; 514 /* Start global timer or per-CPU timer of this CPU. */ 515 loadtimer(&now, 1); 516 } else { 517 busy = 1; 518 /* Stop global timer or per-CPU timer of this CPU. */ 519 et_stop(timer); 520 } 521 ET_HW_UNLOCK(DPCPU_PTR(timerstate)); 522#ifdef SMP 523 /* If timer is global or there is no other CPUs yet - we are done. */ 524 if ((timer->et_flags & ET_FLAGS_PERCPU) == 0 || !smp_started) { 525 critical_exit(); 526 return; 527 } 528 /* Set reconfigure flags for other CPUs. */ 529 CPU_FOREACH(cpu) { 530 state = DPCPU_ID_PTR(cpu, timerstate); 531 atomic_store_rel_int(&state->action, 532 (cpu == curcpu) ? 0 : ( start ? 1 : 2)); 533 } 534 /* Broadcast reconfigure IPI. */ 535 ipi_all_but_self(IPI_HARDCLOCK); 536 /* Wait for reconfiguration completed. */ 537restart: 538 cpu_spinwait(); 539 CPU_FOREACH(cpu) { 540 if (cpu == curcpu) 541 continue; 542 state = DPCPU_ID_PTR(cpu, timerstate); 543 if (atomic_load_acq_int(&state->action)) 544 goto restart; 545 } 546#endif 547 critical_exit(); 548} 549 550/* 551 * Calculate nearest frequency supported by hardware timer. 552 */ 553static int 554round_freq(struct eventtimer *et, int freq) 555{ 556 uint64_t div; 557 558 if (et->et_frequency != 0) { 559 div = lmax((et->et_frequency + freq / 2) / freq, 1); 560 if (et->et_flags & ET_FLAGS_POW2DIV) 561 div = 1 << (flsl(div + div / 2) - 1); 562 freq = (et->et_frequency + div / 2) / div; 563 } 564 if (et->et_min_period.sec > 0) 565 freq = 0; 566 else if (et->et_min_period.frac != 0) 567 freq = min(freq, BT2FREQ(&et->et_min_period)); 568 if (et->et_max_period.sec == 0 && et->et_max_period.frac != 0) 569 freq = max(freq, BT2FREQ(&et->et_max_period)); 570 return (freq); 571} 572 573/* 574 * Configure and start event timers (BSP part). 575 */ 576void 577cpu_initclocks_bsp(void) 578{ 579 struct pcpu_state *state; 580 int base, div, cpu; 581 582 mtx_init(&et_hw_mtx, "et_hw_mtx", NULL, MTX_SPIN); 583 CPU_FOREACH(cpu) { 584 state = DPCPU_ID_PTR(cpu, timerstate); 585 mtx_init(&state->et_hw_mtx, "et_hw_mtx", NULL, MTX_SPIN); 586 } 587#ifdef SMP 588 callout_new_inserted = cpu_new_callout; 589#endif 590 /* Grab requested timer or the best of present. */ 591 if (timername[0]) 592 timer = et_find(timername, 0, 0); 593 if (timer == NULL && periodic) { 594 timer = et_find(NULL, 595 ET_FLAGS_PERIODIC, ET_FLAGS_PERIODIC); 596 } 597 if (timer == NULL) { 598 timer = et_find(NULL, 599 ET_FLAGS_ONESHOT, ET_FLAGS_ONESHOT); 600 } 601 if (timer == NULL && !periodic) { 602 timer = et_find(NULL, 603 ET_FLAGS_PERIODIC, ET_FLAGS_PERIODIC); 604 } 605 if (timer == NULL) 606 panic("No usable event timer found!"); 607 et_init(timer, timercb, NULL, NULL); 608 609 /* Adapt to timer capabilities. */ 610 if (periodic && (timer->et_flags & ET_FLAGS_PERIODIC) == 0) 611 periodic = 0; 612 else if (!periodic && (timer->et_flags & ET_FLAGS_ONESHOT) == 0) 613 periodic = 1; 614 if (timer->et_flags & ET_FLAGS_C3STOP) 615 cpu_disable_deep_sleep++; 616 617 /* 618 * We honor the requested 'hz' value. 619 * We want to run stathz in the neighborhood of 128hz. 620 * We would like profhz to run as often as possible. 621 */ 622 if (singlemul <= 0 || singlemul > 20) { 623 if (hz >= 1500 || (hz % 128) == 0) 624 singlemul = 1; 625 else if (hz >= 750) 626 singlemul = 2; 627 else 628 singlemul = 4; 629 } 630 if (periodic) { 631 base = round_freq(timer, hz * singlemul); 632 singlemul = max((base + hz / 2) / hz, 1); 633 hz = (base + singlemul / 2) / singlemul; 634 if (base <= 128) 635 stathz = base; 636 else { 637 div = base / 128; 638 if (div >= singlemul && (div % singlemul) == 0) 639 div++; 640 stathz = base / div; 641 } 642 profhz = stathz; 643 while ((profhz + stathz) <= 128 * 64) 644 profhz += stathz; 645 profhz = round_freq(timer, profhz); 646 } else { 647 hz = round_freq(timer, hz); 648 stathz = round_freq(timer, 127); 649 profhz = round_freq(timer, stathz * 64); 650 } 651 tick = 1000000 / hz; 652 FREQ2BT(hz, &hardperiod); 653 FREQ2BT(stathz, &statperiod); 654 FREQ2BT(profhz, &profperiod); 655 ET_LOCK(); 656 configtimer(1); 657 ET_UNLOCK(); 658} 659 660/* 661 * Start per-CPU event timers on APs. 662 */ 663void 664cpu_initclocks_ap(void) 665{ 666 struct bintime now; 667 struct pcpu_state *state; 668 669 if (timer->et_flags & ET_FLAGS_PERCPU) { 670 state = DPCPU_PTR(timerstate); 671 binuptime(&now); 672 ET_HW_LOCK(state); 673 loadtimer(&now, 1); 674 ET_HW_UNLOCK(state); 675 } 676} 677 678/* 679 * Switch to profiling clock rates. 680 */ 681void 682cpu_startprofclock(void) 683{ 684 685 ET_LOCK(); 686 if (periodic) { 687 configtimer(0); 688 profiling = 1; 689 configtimer(1); 690 } else 691 profiling = 1; 692 ET_UNLOCK(); 693} 694 695/* 696 * Switch to regular clock rates. 697 */ 698void 699cpu_stopprofclock(void) 700{ 701 702 ET_LOCK(); 703 if (periodic) { 704 configtimer(0); 705 profiling = 0; 706 configtimer(1); 707 } else 708 profiling = 0; 709 ET_UNLOCK(); 710} 711 712/* 713 * Switch to idle mode (all ticks handled). 714 */ 715void 716cpu_idleclock(void) 717{ 718 struct bintime now, t; 719 struct pcpu_state *state; 720 721 if (idletick || busy || 722 (periodic && (timer->et_flags & ET_FLAGS_PERCPU))) 723 return; 724 state = DPCPU_PTR(timerstate); 725 if (periodic) 726 now = state->now; 727 else 728 binuptime(&now); 729 CTR4(KTR_SPARE2, "idle at %d: now %d.%08x%08x", 730 curcpu, now.sec, (unsigned int)(now.frac >> 32), 731 (unsigned int)(now.frac & 0xffffffff)); 732 getnextcpuevent(&t, 1); 733 ET_HW_LOCK(state); 734 state->idle = 1; 735 state->nextevent = t; 736 if (!periodic) 737 loadtimer(&now, 0); 738 ET_HW_UNLOCK(state); 739} 740 741/* 742 * Switch to active mode (skip empty ticks). 743 */ 744void 745cpu_activeclock(void) 746{ 747 struct bintime now; 748 struct pcpu_state *state; 749 struct thread *td; 750 751 state = DPCPU_PTR(timerstate); 752 if (state->idle == 0 || busy) 753 return; 754 if (periodic) 755 now = state->now; 756 else 757 binuptime(&now); 758 CTR4(KTR_SPARE2, "active at %d: now %d.%08x%08x", 759 curcpu, now.sec, (unsigned int)(now.frac >> 32), 760 (unsigned int)(now.frac & 0xffffffff)); 761 spinlock_enter(); 762 td = curthread; 763 td->td_intr_nesting_level++; 764 handleevents(&now, 1); 765 td->td_intr_nesting_level--; 766 spinlock_exit(); 767} 768 769#ifdef SMP 770static void 771cpu_new_callout(int cpu, int ticks) 772{ 773 struct bintime tmp; 774 struct pcpu_state *state; 775 776 CTR3(KTR_SPARE2, "new co at %d: on %d in %d", 777 curcpu, cpu, ticks); 778 state = DPCPU_ID_PTR(cpu, timerstate); 779 ET_HW_LOCK(state); 780 if (state->idle == 0 || busy) { 781 ET_HW_UNLOCK(state); 782 return; 783 } 784 /* 785 * If timer is periodic - just update next event time for target CPU. 786 */ 787 if (periodic) { 788 state->nextevent = state->nexthard; 789 tmp = hardperiod; 790 bintime_mul(&tmp, ticks - 1); 791 bintime_add(&state->nextevent, &tmp); 792 ET_HW_UNLOCK(state); 793 return; 794 } 795 /* 796 * Otherwise we have to wake that CPU up, as we can't get present 797 * bintime to reprogram global timer from here. If timer is per-CPU, 798 * we by definition can't do it from here. 799 */ 800 ET_HW_UNLOCK(state); 801 if (timer->et_flags & ET_FLAGS_PERCPU) { 802 state->handle = 1; 803 ipi_cpu(cpu, IPI_HARDCLOCK); 804 } else { 805 if (!cpu_idle_wakeup(cpu)) 806 ipi_cpu(cpu, IPI_AST); 807 } 808} 809#endif 810 811/* 812 * Report or change the active event timers hardware. 813 */ 814static int 815sysctl_kern_eventtimer_timer(SYSCTL_HANDLER_ARGS) 816{ 817 char buf[32]; 818 struct eventtimer *et; 819 int error; 820 821 ET_LOCK(); 822 et = timer; 823 snprintf(buf, sizeof(buf), "%s", et->et_name); 824 ET_UNLOCK(); 825 error = sysctl_handle_string(oidp, buf, sizeof(buf), req); 826 ET_LOCK(); 827 et = timer; 828 if (error != 0 || req->newptr == NULL || 829 strcasecmp(buf, et->et_name) == 0) { 830 ET_UNLOCK(); 831 return (error); 832 } 833 et = et_find(buf, 0, 0); 834 if (et == NULL) { 835 ET_UNLOCK(); 836 return (ENOENT); 837 } 838 configtimer(0); 839 et_free(timer); 840 if (et->et_flags & ET_FLAGS_C3STOP) 841 cpu_disable_deep_sleep++; 842 if (timer->et_flags & ET_FLAGS_C3STOP) 843 cpu_disable_deep_sleep--; 844 timer = et; 845 et_init(timer, timercb, NULL, NULL); 846 configtimer(1); 847 ET_UNLOCK(); 848 return (error); 849} 850SYSCTL_PROC(_kern_eventtimer, OID_AUTO, timer, 851 CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE, 852 0, 0, sysctl_kern_eventtimer_timer, "A", "Chosen event timer"); 853 854/* 855 * Report or change the active event timer periodicity. 856 */ 857static int 858sysctl_kern_eventtimer_periodic(SYSCTL_HANDLER_ARGS) 859{ 860 int error, val; 861 862 val = periodic; 863 error = sysctl_handle_int(oidp, &val, 0, req); 864 if (error != 0 || req->newptr == NULL) 865 return (error); 866 ET_LOCK(); 867 configtimer(0); 868 periodic = val; 869 configtimer(1); 870 ET_UNLOCK(); 871 return (error); 872} 873SYSCTL_PROC(_kern_eventtimer, OID_AUTO, periodic, 874 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, 875 0, 0, sysctl_kern_eventtimer_periodic, "I", "Enable event timer periodic mode"); 876 877#endif 878