1/*- 2 * Copyright (c) 2010-2013 Alexander Motin <mav@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer, 10 * without modification, immediately at the beginning of the file. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27#include <sys/cdefs.h> 28__FBSDID("$FreeBSD: stable/11/sys/kern/kern_clocksource.c 360500 2020-04-30 17:51:26Z mav $"); 29 30/* 31 * Common routines to manage event timers hardware. 32 */ 33 34#include "opt_device_polling.h" 35 36#include <sys/param.h> 37#include <sys/systm.h> 38#include <sys/bus.h> 39#include <sys/limits.h> 40#include <sys/lock.h> 41#include <sys/kdb.h> 42#include <sys/ktr.h> 43#include <sys/mutex.h> 44#include <sys/proc.h> 45#include <sys/kernel.h> 46#include <sys/sched.h> 47#include <sys/smp.h> 48#include <sys/sysctl.h> 49#include <sys/timeet.h> 50#include <sys/timetc.h> 51 52#include <machine/atomic.h> 53#include <machine/clock.h> 54#include <machine/cpu.h> 55#include <machine/smp.h> 56 57int cpu_disable_c2_sleep = 0; /* Timer dies in C2. */ 58int cpu_disable_c3_sleep = 0; /* Timer dies in C3. */ 59 60static void setuptimer(void); 61static void loadtimer(sbintime_t now, int first); 62static int doconfigtimer(void); 63static void configtimer(int start); 64static int round_freq(struct eventtimer *et, int freq); 65 66static sbintime_t getnextcpuevent(int idle); 67static sbintime_t getnextevent(void); 68static int handleevents(sbintime_t now, int fake); 69 70static struct mtx et_hw_mtx; 71 72#define ET_HW_LOCK(state) \ 73 { \ 74 if (timer->et_flags & ET_FLAGS_PERCPU) \ 75 mtx_lock_spin(&(state)->et_hw_mtx); \ 76 else \ 77 mtx_lock_spin(&et_hw_mtx); \ 78 } 79 80#define ET_HW_UNLOCK(state) \ 81 { \ 82 if (timer->et_flags & ET_FLAGS_PERCPU) \ 83 mtx_unlock_spin(&(state)->et_hw_mtx); \ 84 else \ 85 mtx_unlock_spin(&et_hw_mtx); \ 86 } 87 88static struct eventtimer *timer = NULL; 89static sbintime_t timerperiod; /* Timer period for periodic mode. */ 90static sbintime_t statperiod; /* statclock() events period. */ 91static sbintime_t profperiod; /* profclock() events period. */ 92static sbintime_t nexttick; /* Next global timer tick time. */ 93static u_int busy = 1; /* Reconfiguration is in progress. */ 94static int profiling; /* Profiling events enabled. */ 95 96static char timername[32]; /* Wanted timer. */ 97TUNABLE_STR("kern.eventtimer.timer", timername, sizeof(timername)); 98 99static int singlemul; /* Multiplier for periodic mode. */ 100SYSCTL_INT(_kern_eventtimer, OID_AUTO, singlemul, CTLFLAG_RWTUN, &singlemul, 101 0, "Multiplier for periodic mode"); 102 103static u_int idletick; /* Run periodic events when idle. */ 104SYSCTL_UINT(_kern_eventtimer, OID_AUTO, idletick, CTLFLAG_RWTUN, &idletick, 105 0, "Run periodic events when idle"); 106 107static int periodic; /* Periodic or one-shot mode. */ 108static int want_periodic; /* What mode to prefer. */ 109TUNABLE_INT("kern.eventtimer.periodic", &want_periodic); 110 111struct pcpu_state { 112 struct mtx et_hw_mtx; /* Per-CPU timer mutex. */ 113 u_int action; /* Reconfiguration requests. */ 114 u_int handle; /* Immediate handle resuests. */ 115 sbintime_t now; /* Last tick time. */ 116 sbintime_t nextevent; /* Next scheduled event on this CPU. */ 117 sbintime_t nexttick; /* Next timer tick time. */ 118 sbintime_t nexthard; /* Next hardclock() event. */ 119 sbintime_t nextstat; /* Next statclock() event. */ 120 sbintime_t nextprof; /* Next profclock() event. */ 121 sbintime_t nextcall; /* Next callout event. */ 122 sbintime_t nextcallopt; /* Next optional callout event. */ 123 int ipi; /* This CPU needs IPI. */ 124 int idle; /* This CPU is in idle mode. */ 125}; 126 127static DPCPU_DEFINE(struct pcpu_state, timerstate); 128DPCPU_DEFINE(sbintime_t, hardclocktime); 129 130/* 131 * Timer broadcast IPI handler. 132 */ 133int 134hardclockintr(void) 135{ 136 sbintime_t now; 137 struct pcpu_state *state; 138 int done; 139 140 if (doconfigtimer() || busy) 141 return (FILTER_HANDLED); 142 state = DPCPU_PTR(timerstate); 143 now = state->now; 144 CTR3(KTR_SPARE2, "ipi at %d: now %d.%08x", 145 curcpu, (int)(now >> 32), (u_int)(now & 0xffffffff)); 146 done = handleevents(now, 0); 147 return (done ? FILTER_HANDLED : FILTER_STRAY); 148} 149 150/* 151 * Handle all events for specified time on this CPU 152 */ 153static int 154handleevents(sbintime_t now, int fake) 155{ 156 sbintime_t t, *hct; 157 struct trapframe *frame; 158 struct pcpu_state *state; 159 int usermode; 160 int done, runs; 161 162 CTR3(KTR_SPARE2, "handle at %d: now %d.%08x", 163 curcpu, (int)(now >> 32), (u_int)(now & 0xffffffff)); 164 done = 0; 165 if (fake) { 166 frame = NULL; 167 usermode = 0; 168 } else { 169 frame = curthread->td_intr_frame; 170 usermode = TRAPF_USERMODE(frame); 171 } 172 173 state = DPCPU_PTR(timerstate); 174 175 runs = 0; 176 while (now >= state->nexthard) { 177 state->nexthard += tick_sbt; 178 runs++; 179 } 180 if (runs) { 181 hct = DPCPU_PTR(hardclocktime); 182 *hct = state->nexthard - tick_sbt; 183 if (fake < 2) { 184 hardclock_cnt(runs, usermode); 185 done = 1; 186 } 187 } 188 runs = 0; 189 while (now >= state->nextstat) { 190 state->nextstat += statperiod; 191 runs++; 192 } 193 if (runs && fake < 2) { 194 statclock_cnt(runs, usermode); 195 done = 1; 196 } 197 if (profiling) { 198 runs = 0; 199 while (now >= state->nextprof) { 200 state->nextprof += profperiod; 201 runs++; 202 } 203 if (runs && !fake) { 204 profclock_cnt(runs, usermode, TRAPF_PC(frame)); 205 done = 1; 206 } 207 } else 208 state->nextprof = state->nextstat; 209 if (now >= state->nextcallopt || now >= state->nextcall) { 210 state->nextcall = state->nextcallopt = SBT_MAX; 211 callout_process(now); 212 } 213 214 t = getnextcpuevent(0); 215 ET_HW_LOCK(state); 216 if (!busy) { 217 state->idle = 0; 218 state->nextevent = t; 219 loadtimer(now, (fake == 2) && 220 (timer->et_flags & ET_FLAGS_PERCPU)); 221 } 222 ET_HW_UNLOCK(state); 223 return (done); 224} 225 226/* 227 * Schedule binuptime of the next event on current CPU. 228 */ 229static sbintime_t 230getnextcpuevent(int idle) 231{ 232 sbintime_t event; 233 struct pcpu_state *state; 234 u_int hardfreq; 235 236 state = DPCPU_PTR(timerstate); 237 /* Handle hardclock() events, skipping some if CPU is idle. */ 238 event = state->nexthard; 239 if (idle) { 240 hardfreq = (u_int)hz / 2; 241 if (tc_min_ticktock_freq > 2 242#ifdef SMP 243 && curcpu == CPU_FIRST() 244#endif 245 ) 246 hardfreq = hz / tc_min_ticktock_freq; 247 if (hardfreq > 1) 248 event += tick_sbt * (hardfreq - 1); 249 } 250 /* Handle callout events. */ 251 if (event > state->nextcall) 252 event = state->nextcall; 253 if (!idle) { /* If CPU is active - handle other types of events. */ 254 if (event > state->nextstat) 255 event = state->nextstat; 256 if (profiling && event > state->nextprof) 257 event = state->nextprof; 258 } 259 return (event); 260} 261 262/* 263 * Schedule binuptime of the next event on all CPUs. 264 */ 265static sbintime_t 266getnextevent(void) 267{ 268 struct pcpu_state *state; 269 sbintime_t event; 270#ifdef SMP 271 int cpu; 272#endif 273#ifdef KTR 274 int c; 275 276 c = -1; 277#endif 278 state = DPCPU_PTR(timerstate); 279 event = state->nextevent; 280#ifdef SMP 281 if ((timer->et_flags & ET_FLAGS_PERCPU) == 0) { 282 CPU_FOREACH(cpu) { 283 state = DPCPU_ID_PTR(cpu, timerstate); 284 if (event > state->nextevent) { 285 event = state->nextevent; 286#ifdef KTR 287 c = cpu; 288#endif 289 } 290 } 291 } 292#endif 293 CTR4(KTR_SPARE2, "next at %d: next %d.%08x by %d", 294 curcpu, (int)(event >> 32), (u_int)(event & 0xffffffff), c); 295 return (event); 296} 297 298/* Hardware timer callback function. */ 299static void 300timercb(struct eventtimer *et, void *arg) 301{ 302 sbintime_t now; 303 sbintime_t *next; 304 struct pcpu_state *state; 305#ifdef SMP 306 int cpu, bcast; 307#endif 308 309 /* Do not touch anything if somebody reconfiguring timers. */ 310 if (busy) 311 return; 312 /* Update present and next tick times. */ 313 state = DPCPU_PTR(timerstate); 314 if (et->et_flags & ET_FLAGS_PERCPU) { 315 next = &state->nexttick; 316 } else 317 next = &nexttick; 318 now = sbinuptime(); 319 if (periodic) 320 *next = now + timerperiod; 321 else 322 *next = -1; /* Next tick is not scheduled yet. */ 323 state->now = now; 324 CTR3(KTR_SPARE2, "intr at %d: now %d.%08x", 325 curcpu, (int)(now >> 32), (u_int)(now & 0xffffffff)); 326 327#ifdef SMP 328#ifdef EARLY_AP_STARTUP 329 MPASS(mp_ncpus == 1 || smp_started); 330#endif 331 /* Prepare broadcasting to other CPUs for non-per-CPU timers. */ 332 bcast = 0; 333#ifdef EARLY_AP_STARTUP 334 if ((et->et_flags & ET_FLAGS_PERCPU) == 0) { 335#else 336 if ((et->et_flags & ET_FLAGS_PERCPU) == 0 && smp_started) { 337#endif 338 CPU_FOREACH(cpu) { 339 state = DPCPU_ID_PTR(cpu, timerstate); 340 ET_HW_LOCK(state); 341 state->now = now; 342 if (now >= state->nextevent) { 343 state->nextevent += SBT_1S; 344 if (curcpu != cpu) { 345 state->ipi = 1; 346 bcast = 1; 347 } 348 } 349 ET_HW_UNLOCK(state); 350 } 351 } 352#endif 353 354 /* Handle events for this time on this CPU. */ 355 handleevents(now, 0); 356 357#ifdef SMP 358 /* Broadcast interrupt to other CPUs for non-per-CPU timers. */ 359 if (bcast) { 360 CPU_FOREACH(cpu) { 361 if (curcpu == cpu) 362 continue; 363 state = DPCPU_ID_PTR(cpu, timerstate); 364 if (state->ipi) { 365 state->ipi = 0; 366 ipi_cpu(cpu, IPI_HARDCLOCK); 367 } 368 } 369 } 370#endif 371} 372 373/* 374 * Load new value into hardware timer. 375 */ 376static void 377loadtimer(sbintime_t now, int start) 378{ 379 struct pcpu_state *state; 380 sbintime_t new; 381 sbintime_t *next; 382 uint64_t tmp; 383 int eq; 384 385 if (timer->et_flags & ET_FLAGS_PERCPU) { 386 state = DPCPU_PTR(timerstate); 387 next = &state->nexttick; 388 } else 389 next = &nexttick; 390 if (periodic) { 391 if (start) { 392 /* 393 * Try to start all periodic timers aligned 394 * to period to make events synchronous. 395 */ 396 tmp = now % timerperiod; 397 new = timerperiod - tmp; 398 if (new < tmp) /* Left less then passed. */ 399 new += timerperiod; 400 CTR5(KTR_SPARE2, "load p at %d: now %d.%08x first in %d.%08x", 401 curcpu, (int)(now >> 32), (u_int)(now & 0xffffffff), 402 (int)(new >> 32), (u_int)(new & 0xffffffff)); 403 *next = new + now; 404 et_start(timer, new, timerperiod); 405 } 406 } else { 407 new = getnextevent(); 408 eq = (new == *next); 409 CTR4(KTR_SPARE2, "load at %d: next %d.%08x eq %d", 410 curcpu, (int)(new >> 32), (u_int)(new & 0xffffffff), eq); 411 if (!eq) { 412 *next = new; 413 et_start(timer, new - now, 0); 414 } 415 } 416} 417 418/* 419 * Prepare event timer parameters after configuration changes. 420 */ 421static void 422setuptimer(void) 423{ 424 int freq; 425 426 if (periodic && (timer->et_flags & ET_FLAGS_PERIODIC) == 0) 427 periodic = 0; 428 else if (!periodic && (timer->et_flags & ET_FLAGS_ONESHOT) == 0) 429 periodic = 1; 430 singlemul = MIN(MAX(singlemul, 1), 20); 431 freq = hz * singlemul; 432 while (freq < (profiling ? profhz : stathz)) 433 freq += hz; 434 freq = round_freq(timer, freq); 435 timerperiod = SBT_1S / freq; 436} 437 438/* 439 * Reconfigure specified per-CPU timer on other CPU. Called from IPI handler. 440 */ 441static int 442doconfigtimer(void) 443{ 444 sbintime_t now; 445 struct pcpu_state *state; 446 447 state = DPCPU_PTR(timerstate); 448 switch (atomic_load_acq_int(&state->action)) { 449 case 1: 450 now = sbinuptime(); 451 ET_HW_LOCK(state); 452 loadtimer(now, 1); 453 ET_HW_UNLOCK(state); 454 state->handle = 0; 455 atomic_store_rel_int(&state->action, 0); 456 return (1); 457 case 2: 458 ET_HW_LOCK(state); 459 et_stop(timer); 460 ET_HW_UNLOCK(state); 461 state->handle = 0; 462 atomic_store_rel_int(&state->action, 0); 463 return (1); 464 } 465 if (atomic_readandclear_int(&state->handle) && !busy) { 466 now = sbinuptime(); 467 handleevents(now, 0); 468 return (1); 469 } 470 return (0); 471} 472 473/* 474 * Reconfigure specified timer. 475 * For per-CPU timers use IPI to make other CPUs to reconfigure. 476 */ 477static void 478configtimer(int start) 479{ 480 sbintime_t now, next; 481 struct pcpu_state *state; 482 int cpu; 483 484 if (start) { 485 setuptimer(); 486 now = sbinuptime(); 487 } else 488 now = 0; 489 critical_enter(); 490 ET_HW_LOCK(DPCPU_PTR(timerstate)); 491 if (start) { 492 /* Initialize time machine parameters. */ 493 next = now + timerperiod; 494 if (periodic) 495 nexttick = next; 496 else 497 nexttick = -1; 498#ifdef EARLY_AP_STARTUP 499 MPASS(mp_ncpus == 1 || smp_started); 500#endif 501 CPU_FOREACH(cpu) { 502 state = DPCPU_ID_PTR(cpu, timerstate); 503 state->now = now; 504#ifndef EARLY_AP_STARTUP 505 if (!smp_started && cpu != CPU_FIRST()) 506 state->nextevent = SBT_MAX; 507 else 508#endif 509 state->nextevent = next; 510 if (periodic) 511 state->nexttick = next; 512 else 513 state->nexttick = -1; 514 state->nexthard = next; 515 state->nextstat = next; 516 state->nextprof = next; 517 state->nextcall = next; 518 state->nextcallopt = next; 519 hardclock_sync(cpu); 520 } 521 busy = 0; 522 /* Start global timer or per-CPU timer of this CPU. */ 523 loadtimer(now, 1); 524 } else { 525 busy = 1; 526 /* Stop global timer or per-CPU timer of this CPU. */ 527 et_stop(timer); 528 } 529 ET_HW_UNLOCK(DPCPU_PTR(timerstate)); 530#ifdef SMP 531#ifdef EARLY_AP_STARTUP 532 /* If timer is global we are done. */ 533 if ((timer->et_flags & ET_FLAGS_PERCPU) == 0) { 534#else 535 /* If timer is global or there is no other CPUs yet - we are done. */ 536 if ((timer->et_flags & ET_FLAGS_PERCPU) == 0 || !smp_started) { 537#endif 538 critical_exit(); 539 return; 540 } 541 /* Set reconfigure flags for other CPUs. */ 542 CPU_FOREACH(cpu) { 543 state = DPCPU_ID_PTR(cpu, timerstate); 544 atomic_store_rel_int(&state->action, 545 (cpu == curcpu) ? 0 : ( start ? 1 : 2)); 546 } 547 /* Broadcast reconfigure IPI. */ 548 ipi_all_but_self(IPI_HARDCLOCK); 549 /* Wait for reconfiguration completed. */ 550restart: 551 cpu_spinwait(); 552 CPU_FOREACH(cpu) { 553 if (cpu == curcpu) 554 continue; 555 state = DPCPU_ID_PTR(cpu, timerstate); 556 if (atomic_load_acq_int(&state->action)) 557 goto restart; 558 } 559#endif 560 critical_exit(); 561} 562 563/* 564 * Calculate nearest frequency supported by hardware timer. 565 */ 566static int 567round_freq(struct eventtimer *et, int freq) 568{ 569 uint64_t div; 570 571 if (et->et_frequency != 0) { 572 div = lmax((et->et_frequency + freq / 2) / freq, 1); 573 if (et->et_flags & ET_FLAGS_POW2DIV) 574 div = 1 << (flsl(div + div / 2) - 1); 575 freq = (et->et_frequency + div / 2) / div; 576 } 577 if (et->et_min_period > SBT_1S) 578 panic("Event timer \"%s\" doesn't support sub-second periods!", 579 et->et_name); 580 else if (et->et_min_period != 0) 581 freq = min(freq, SBT2FREQ(et->et_min_period)); 582 if (et->et_max_period < SBT_1S && et->et_max_period != 0) 583 freq = max(freq, SBT2FREQ(et->et_max_period)); 584 return (freq); 585} 586 587/* 588 * Configure and start event timers (BSP part). 589 */ 590void 591cpu_initclocks_bsp(void) 592{ 593 struct pcpu_state *state; 594 int base, div, cpu; 595 596 mtx_init(&et_hw_mtx, "et_hw_mtx", NULL, MTX_SPIN); 597 CPU_FOREACH(cpu) { 598 state = DPCPU_ID_PTR(cpu, timerstate); 599 mtx_init(&state->et_hw_mtx, "et_hw_mtx", NULL, MTX_SPIN); 600 state->nextcall = SBT_MAX; 601 state->nextcallopt = SBT_MAX; 602 } 603 periodic = want_periodic; 604 /* Grab requested timer or the best of present. */ 605 if (timername[0]) 606 timer = et_find(timername, 0, 0); 607 if (timer == NULL && periodic) { 608 timer = et_find(NULL, 609 ET_FLAGS_PERIODIC, ET_FLAGS_PERIODIC); 610 } 611 if (timer == NULL) { 612 timer = et_find(NULL, 613 ET_FLAGS_ONESHOT, ET_FLAGS_ONESHOT); 614 } 615 if (timer == NULL && !periodic) { 616 timer = et_find(NULL, 617 ET_FLAGS_PERIODIC, ET_FLAGS_PERIODIC); 618 } 619 if (timer == NULL) 620 panic("No usable event timer found!"); 621 et_init(timer, timercb, NULL, NULL); 622 623 /* Adapt to timer capabilities. */ 624 if (periodic && (timer->et_flags & ET_FLAGS_PERIODIC) == 0) 625 periodic = 0; 626 else if (!periodic && (timer->et_flags & ET_FLAGS_ONESHOT) == 0) 627 periodic = 1; 628 if (timer->et_flags & ET_FLAGS_C3STOP) 629 cpu_disable_c3_sleep++; 630 631 /* 632 * We honor the requested 'hz' value. 633 * We want to run stathz in the neighborhood of 128hz. 634 * We would like profhz to run as often as possible. 635 */ 636 if (singlemul <= 0 || singlemul > 20) { 637 if (hz >= 1500 || (hz % 128) == 0) 638 singlemul = 1; 639 else if (hz >= 750) 640 singlemul = 2; 641 else 642 singlemul = 4; 643 } 644 if (periodic) { 645 base = round_freq(timer, hz * singlemul); 646 singlemul = max((base + hz / 2) / hz, 1); 647 hz = (base + singlemul / 2) / singlemul; 648 if (base <= 128) 649 stathz = base; 650 else { 651 div = base / 128; 652 if (div >= singlemul && (div % singlemul) == 0) 653 div++; 654 stathz = base / div; 655 } 656 profhz = stathz; 657 while ((profhz + stathz) <= 128 * 64) 658 profhz += stathz; 659 profhz = round_freq(timer, profhz); 660 } else { 661 hz = round_freq(timer, hz); 662 stathz = round_freq(timer, 127); 663 profhz = round_freq(timer, stathz * 64); 664 } 665 tick = 1000000 / hz; 666 tick_sbt = SBT_1S / hz; 667 tick_bt = sbttobt(tick_sbt); 668 statperiod = SBT_1S / stathz; 669 profperiod = SBT_1S / profhz; 670 ET_LOCK(); 671 configtimer(1); 672 ET_UNLOCK(); 673} 674 675/* 676 * Start per-CPU event timers on APs. 677 */ 678void 679cpu_initclocks_ap(void) 680{ 681 sbintime_t now; 682 struct pcpu_state *state; 683 struct thread *td; 684 685 state = DPCPU_PTR(timerstate); 686 now = sbinuptime(); 687 ET_HW_LOCK(state); 688 state->now = now; 689 hardclock_sync(curcpu); 690 spinlock_enter(); 691 ET_HW_UNLOCK(state); 692 td = curthread; 693 td->td_intr_nesting_level++; 694 handleevents(state->now, 2); 695 td->td_intr_nesting_level--; 696 spinlock_exit(); 697} 698 699void 700suspendclock(void) 701{ 702 ET_LOCK(); 703 configtimer(0); 704 ET_UNLOCK(); 705} 706 707void 708resumeclock(void) 709{ 710 ET_LOCK(); 711 configtimer(1); 712 ET_UNLOCK(); 713} 714 715/* 716 * Switch to profiling clock rates. 717 */ 718void 719cpu_startprofclock(void) 720{ 721 722 ET_LOCK(); 723 if (profiling == 0) { 724 if (periodic) { 725 configtimer(0); 726 profiling = 1; 727 configtimer(1); 728 } else 729 profiling = 1; 730 } else 731 profiling++; 732 ET_UNLOCK(); 733} 734 735/* 736 * Switch to regular clock rates. 737 */ 738void 739cpu_stopprofclock(void) 740{ 741 742 ET_LOCK(); 743 if (profiling == 1) { 744 if (periodic) { 745 configtimer(0); 746 profiling = 0; 747 configtimer(1); 748 } else 749 profiling = 0; 750 } else 751 profiling--; 752 ET_UNLOCK(); 753} 754 755/* 756 * Switch to idle mode (all ticks handled). 757 */ 758sbintime_t 759cpu_idleclock(void) 760{ 761 sbintime_t now, t; 762 struct pcpu_state *state; 763 764 if (idletick || busy || 765 (periodic && (timer->et_flags & ET_FLAGS_PERCPU)) 766#ifdef DEVICE_POLLING 767 || curcpu == CPU_FIRST() 768#endif 769 ) 770 return (-1); 771 state = DPCPU_PTR(timerstate); 772 if (periodic) 773 now = state->now; 774 else 775 now = sbinuptime(); 776 CTR3(KTR_SPARE2, "idle at %d: now %d.%08x", 777 curcpu, (int)(now >> 32), (u_int)(now & 0xffffffff)); 778 t = getnextcpuevent(1); 779 ET_HW_LOCK(state); 780 state->idle = 1; 781 state->nextevent = t; 782 if (!periodic) 783 loadtimer(now, 0); 784 ET_HW_UNLOCK(state); 785 return (MAX(t - now, 0)); 786} 787 788/* 789 * Switch to active mode (skip empty ticks). 790 */ 791void 792cpu_activeclock(void) 793{ 794 sbintime_t now; 795 struct pcpu_state *state; 796 struct thread *td; 797 798 state = DPCPU_PTR(timerstate); 799 if (state->idle == 0 || busy) 800 return; 801 if (periodic) 802 now = state->now; 803 else 804 now = sbinuptime(); 805 CTR3(KTR_SPARE2, "active at %d: now %d.%08x", 806 curcpu, (int)(now >> 32), (u_int)(now & 0xffffffff)); 807 spinlock_enter(); 808 td = curthread; 809 td->td_intr_nesting_level++; 810 handleevents(now, 1); 811 td->td_intr_nesting_level--; 812 spinlock_exit(); 813} 814 815/* 816 * Change the frequency of the given timer. This changes et->et_frequency and 817 * if et is the active timer it reconfigures the timer on all CPUs. This is 818 * intended to be a private interface for the use of et_change_frequency() only. 819 */ 820void 821cpu_et_frequency(struct eventtimer *et, uint64_t newfreq) 822{ 823 824 ET_LOCK(); 825 if (et == timer) { 826 configtimer(0); 827 et->et_frequency = newfreq; 828 configtimer(1); 829 } else 830 et->et_frequency = newfreq; 831 ET_UNLOCK(); 832} 833 834void 835cpu_new_callout(int cpu, sbintime_t bt, sbintime_t bt_opt) 836{ 837 struct pcpu_state *state; 838 839 /* Do not touch anything if somebody reconfiguring timers. */ 840 if (busy) 841 return; 842 CTR6(KTR_SPARE2, "new co at %d: on %d at %d.%08x - %d.%08x", 843 curcpu, cpu, (int)(bt_opt >> 32), (u_int)(bt_opt & 0xffffffff), 844 (int)(bt >> 32), (u_int)(bt & 0xffffffff)); 845 state = DPCPU_ID_PTR(cpu, timerstate); 846 ET_HW_LOCK(state); 847 848 /* 849 * If there is callout time already set earlier -- do nothing. 850 * This check may appear redundant because we check already in 851 * callout_process() but this double check guarantees we're safe 852 * with respect to race conditions between interrupts execution 853 * and scheduling. 854 */ 855 state->nextcallopt = bt_opt; 856 if (bt >= state->nextcall) 857 goto done; 858 state->nextcall = bt; 859 /* If there is some other event set earlier -- do nothing. */ 860 if (bt >= state->nextevent) 861 goto done; 862 state->nextevent = bt; 863 /* If timer is periodic -- there is nothing to reprogram. */ 864 if (periodic) 865 goto done; 866 /* If timer is global or of the current CPU -- reprogram it. */ 867 if ((timer->et_flags & ET_FLAGS_PERCPU) == 0 || cpu == curcpu) { 868 loadtimer(sbinuptime(), 0); 869done: 870 ET_HW_UNLOCK(state); 871 return; 872 } 873 /* Otherwise make other CPU to reprogram it. */ 874 state->handle = 1; 875 ET_HW_UNLOCK(state); 876#ifdef SMP 877 ipi_cpu(cpu, IPI_HARDCLOCK); 878#endif 879} 880 881/* 882 * Report or change the active event timers hardware. 883 */ 884static int 885sysctl_kern_eventtimer_timer(SYSCTL_HANDLER_ARGS) 886{ 887 char buf[32]; 888 struct eventtimer *et; 889 int error; 890 891 ET_LOCK(); 892 et = timer; 893 snprintf(buf, sizeof(buf), "%s", et->et_name); 894 ET_UNLOCK(); 895 error = sysctl_handle_string(oidp, buf, sizeof(buf), req); 896 ET_LOCK(); 897 et = timer; 898 if (error != 0 || req->newptr == NULL || 899 strcasecmp(buf, et->et_name) == 0) { 900 ET_UNLOCK(); 901 return (error); 902 } 903 et = et_find(buf, 0, 0); 904 if (et == NULL) { 905 ET_UNLOCK(); 906 return (ENOENT); 907 } 908 configtimer(0); 909 et_free(timer); 910 if (et->et_flags & ET_FLAGS_C3STOP) 911 cpu_disable_c3_sleep++; 912 if (timer->et_flags & ET_FLAGS_C3STOP) 913 cpu_disable_c3_sleep--; 914 periodic = want_periodic; 915 timer = et; 916 et_init(timer, timercb, NULL, NULL); 917 configtimer(1); 918 ET_UNLOCK(); 919 return (error); 920} 921SYSCTL_PROC(_kern_eventtimer, OID_AUTO, timer, 922 CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE, 923 0, 0, sysctl_kern_eventtimer_timer, "A", "Chosen event timer"); 924 925/* 926 * Report or change the active event timer periodicity. 927 */ 928static int 929sysctl_kern_eventtimer_periodic(SYSCTL_HANDLER_ARGS) 930{ 931 int error, val; 932 933 val = periodic; 934 error = sysctl_handle_int(oidp, &val, 0, req); 935 if (error != 0 || req->newptr == NULL) 936 return (error); 937 ET_LOCK(); 938 configtimer(0); 939 periodic = want_periodic = val; 940 configtimer(1); 941 ET_UNLOCK(); 942 return (error); 943} 944SYSCTL_PROC(_kern_eventtimer, OID_AUTO, periodic, 945 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, 946 0, 0, sysctl_kern_eventtimer_periodic, "I", "Enable event timer periodic mode"); 947 948#include "opt_ddb.h" 949 950#ifdef DDB 951#include <ddb/ddb.h> 952 953DB_SHOW_COMMAND(clocksource, db_show_clocksource) 954{ 955 struct pcpu_state *st; 956 int c; 957 958 CPU_FOREACH(c) { 959 st = DPCPU_ID_PTR(c, timerstate); 960 db_printf( 961 "CPU %2d: action %d handle %d ipi %d idle %d\n" 962 " now %#jx nevent %#jx (%jd)\n" 963 " ntick %#jx (%jd) nhard %#jx (%jd)\n" 964 " nstat %#jx (%jd) nprof %#jx (%jd)\n" 965 " ncall %#jx (%jd) ncallopt %#jx (%jd)\n", 966 c, st->action, st->handle, st->ipi, st->idle, 967 (uintmax_t)st->now, 968 (uintmax_t)st->nextevent, 969 (uintmax_t)(st->nextevent - st->now) / tick_sbt, 970 (uintmax_t)st->nexttick, 971 (uintmax_t)(st->nexttick - st->now) / tick_sbt, 972 (uintmax_t)st->nexthard, 973 (uintmax_t)(st->nexthard - st->now) / tick_sbt, 974 (uintmax_t)st->nextstat, 975 (uintmax_t)(st->nextstat - st->now) / tick_sbt, 976 (uintmax_t)st->nextprof, 977 (uintmax_t)(st->nextprof - st->now) / tick_sbt, 978 (uintmax_t)st->nextcall, 979 (uintmax_t)(st->nextcall - st->now) / tick_sbt, 980 (uintmax_t)st->nextcallopt, 981 (uintmax_t)(st->nextcallopt - st->now) / tick_sbt); 982 } 983} 984 985#endif 986