1// Copyright 2016 The Fuchsia Authors 2// Copyright (c) 2009 Corey Tabaka 3// 4// Use of this source code is governed by a MIT-style 5// license that can be found in the LICENSE file or at 6// https://opensource.org/licenses/MIT 7 8#include <sys/types.h> 9 10#include <assert.h> 11#include <debug.h> 12#include <err.h> 13#include <inttypes.h> 14#include <reg.h> 15#include <trace.h> 16 17#include <arch/x86.h> 18#include <arch/x86/apic.h> 19#include <arch/x86/feature.h> 20#include <arch/x86/pvclock.h> 21#include <arch/x86/timer_freq.h> 22#include <dev/interrupt.h> 23#include <fbl/algorithm.h> 24#include <kernel/cmdline.h> 25#include <kernel/spinlock.h> 26#include <kernel/thread.h> 27#include <lib/fixed_point.h> 28#include <lk/init.h> 29#include <platform.h> 30#include <platform/console.h> 31#include <platform/pc.h> 32#include <platform/pc/acpi.h> 33#include <platform/pc/hpet.h> 34#include <platform/pc/timer.h> 35#include <platform/timer.h> 36#include <pow2.h> 37#include <zircon/time.h> 38#include <zircon/types.h> 39 40#include "platform_p.h" 41 42// Current timer scheme: 43// The HPET is used to calibrate the local APIC timers and the TSC. If the 44// HPET is not present, we will fallback to calibrating using the PIT. 45// 46// For wall-time, we use the following mechanisms, in order of highest 47// preference to least: 48// 1) TSC: If the CPU advertises an invariant TSC, then we will use the TSC for 49// tracking wall time in a tickless manner. 50// 2) HPET: If there is an HPET present, we will use its count to track wall 51// time in a tickless manner. 52// 3) PIT: We will use periodic interrupts to update wall time. 53// 54// The local APICs are responsible for handling timer callbacks 55// sent from the scheduler. 56 57enum clock_source { 58 CLOCK_PIT, 59 CLOCK_HPET, 60 CLOCK_TSC, 61 62 CLOCK_COUNT 63}; 64 65const char* clock_name[] = { 66 [CLOCK_PIT] = "PIT", 67 [CLOCK_HPET] = "HPET", 68 [CLOCK_TSC] = "TSC", 69}; 70static_assert(fbl::count_of(clock_name) == CLOCK_COUNT, ""); 71 72// PIT time accounting info 73static struct fp_32_64 us_per_pit; 74static volatile uint64_t pit_ticks; 75static uint16_t pit_divisor; 76static uint32_t ns_per_pit_rounded_up; 77 78// Whether or not we have an Invariant TSC (controls whether we use the PIT or 79// not after initialization). The Invariant TSC is rate-invariant under P-, C-, 80// and T-state transitions. 81static bool invariant_tsc; 82// Whether or not we have a Constant TSC (controls whether we bother calibrating 83// the TSC). Constant TSC predates the Invariant TSC. The Constant TSC is 84// rate-invariant under P-state transitions. 85static bool constant_tsc; 86 87static enum clock_source wall_clock; 88static enum clock_source calibration_clock; 89 90// APIC timer calibration values 91static bool use_tsc_deadline; 92static uint32_t apic_ticks_per_ms = 0; 93static struct fp_32_64 apic_ticks_per_ns; 94static uint8_t apic_divisor = 0; 95 96// TSC timer calibration values 97static uint64_t tsc_ticks_per_ms; 98static struct fp_32_64 ns_per_tsc; 99static struct fp_32_64 tsc_per_ns; 100static uint32_t ns_per_tsc_rounded_up; 101 102// HPET calibration values 103static struct fp_32_64 ns_per_hpet; 104static uint32_t ns_per_hpet_rounded_up; 105 106#define INTERNAL_FREQ 1193182U 107#define INTERNAL_FREQ_3X 3579546U 108 109#define INTERNAL_FREQ_TICKS_PER_MS (INTERNAL_FREQ / 1000) 110 111/* Maximum amount of time that can be program on the timer to schedule the next 112 * interrupt, in miliseconds */ 113#define MAX_TIMER_INTERVAL ZX_MSEC(55) 114 115#define LOCAL_TRACE 0 116 117zx_time_t current_time(void) { 118 zx_time_t time; 119 120 switch (wall_clock) { 121 case CLOCK_TSC: { 122 uint64_t tsc = rdtsc(); 123 time = ticks_to_nanos(tsc); 124 break; 125 } 126 case CLOCK_HPET: { 127 uint64_t counter = hpet_get_value(); 128 time = u64_mul_u64_fp32_64(counter, ns_per_hpet); 129 break; 130 } 131 case CLOCK_PIT: { 132 time = u64_mul_u64_fp32_64(pit_ticks, us_per_pit) * 1000; 133 break; 134 } 135 default: 136 panic("Invalid wall clock source\n"); 137 } 138 139 return time; 140} 141 142// Round up t to a clock tick, so that when the APIC timer fires, the wall time 143// will have elapsed. 144static zx_time_t discrete_time_roundup(zx_time_t t) { 145 zx_duration_t value; 146 switch (wall_clock) { 147 case CLOCK_TSC: { 148 value = ns_per_tsc_rounded_up; 149 break; 150 } 151 case CLOCK_HPET: { 152 value = ns_per_hpet_rounded_up; 153 break; 154 } 155 case CLOCK_PIT: { 156 value = ns_per_pit_rounded_up; 157 break; 158 } 159 default: 160 panic("Invalid wall clock source\n"); 161 } 162 163 return zx_time_add_duration(t, value); 164} 165 166zx_ticks_t ticks_per_second(void) { 167 return tsc_ticks_per_ms * 1000; 168} 169 170zx_ticks_t current_ticks(void) { 171 return rdtsc(); 172} 173 174zx_time_t ticks_to_nanos(zx_ticks_t ticks) { 175 return u64_mul_u64_fp32_64(ticks, ns_per_tsc); 176} 177 178// The PIT timer will keep track of wall time if we aren't using the TSC 179static void pit_timer_tick(void* arg) { 180 pit_ticks += 1; 181} 182 183// The APIC timers will call this when they fire 184void platform_handle_apic_timer_tick(void) { 185 timer_tick(current_time()); 186} 187 188static void set_pit_frequency(uint32_t frequency) { 189 uint32_t count, remainder; 190 191 /* figure out the correct pit_divisor for the desired frequency */ 192 if (frequency <= 18) { 193 count = 0xffff; 194 } else if (frequency >= INTERNAL_FREQ) { 195 count = 1; 196 } else { 197 count = INTERNAL_FREQ_3X / frequency; 198 remainder = INTERNAL_FREQ_3X % frequency; 199 200 if (remainder >= INTERNAL_FREQ_3X / 2) { 201 count += 1; 202 } 203 204 count /= 3; 205 remainder = count % 3; 206 207 if (remainder >= 1) { 208 count += 1; 209 } 210 } 211 212 pit_divisor = count & 0xffff; 213 214 /* 215 * funky math that i don't feel like explaining. essentially 32.32 fixed 216 * point representation of the configured timer delta. 217 */ 218 fp_32_64_div_32_32(&us_per_pit, 1000 * 1000 * 3 * count, INTERNAL_FREQ_3X); 219 220 // Add 1us to the PIT tick rate to deal with rounding 221 ns_per_pit_rounded_up = (u32_mul_u64_fp32_64(1, us_per_pit) + 1) * 1000; 222 223 //dprintf(DEBUG, "set_pit_frequency: pit_divisor=%04x\n", pit_divisor); 224 225 /* 226 * setup the Programmable Interval Timer 227 * timer 0, mode 2, binary counter, LSB followed by MSB 228 */ 229 outp(I8253_CONTROL_REG, 0x34); 230 outp(I8253_DATA_REG, static_cast<uint8_t>(pit_divisor)); // LSB 231 outp(I8253_DATA_REG, static_cast<uint8_t>(pit_divisor >> 8)); // MSB 232} 233 234static inline void pit_calibration_cycle_preamble(uint16_t ms) { 235 // Make the PIT run for 236 const uint16_t init_pic_count = static_cast<uint16_t>(INTERNAL_FREQ_TICKS_PER_MS * ms); 237 // Program PIT in the interrupt on terminal count configuration, 238 // this makes it count down and set the output high when it hits 0. 239 outp(I8253_CONTROL_REG, 0x30); 240 outp(I8253_DATA_REG, static_cast<uint8_t>(init_pic_count)); // LSB 241} 242 243static inline void pit_calibration_cycle(uint16_t ms) { 244 // Make the PIT run for ms millis, see comments in the preamble 245 const uint16_t init_pic_count = static_cast<uint16_t>(INTERNAL_FREQ_TICKS_PER_MS * ms); 246 outp(I8253_DATA_REG, static_cast<uint8_t>(init_pic_count >> 8)); // MSB 247 248 uint8_t status = 0; 249 do { 250 // Send a read-back command that latches the status of ch0 251 outp(I8253_CONTROL_REG, 0xe2); 252 status = inp(I8253_DATA_REG); 253 // Wait for bit 7 (output) to go high and for bit 6 (null count) to go low 254 } while ((status & 0xc0) != 0x80); 255} 256 257static inline void pit_calibration_cycle_cleanup(void) { 258 // Stop the PIT by starting a mode change but not writing a counter 259 outp(I8253_CONTROL_REG, 0x38); 260} 261 262static inline void hpet_calibration_cycle_preamble(void) { 263 hpet_enable(); 264} 265 266static inline void hpet_calibration_cycle(uint16_t ms) { 267 hpet_wait_ms(ms); 268} 269 270static inline void hpet_calibration_cycle_cleanup(void) { 271 hpet_disable(); 272} 273 274static void calibrate_apic_timer(void) { 275 ASSERT(arch_ints_disabled()); 276 277 const uint64_t apic_freq = x86_lookup_core_crystal_freq(); 278 if (apic_freq != 0) { 279 ASSERT(apic_freq / 1000 <= UINT32_MAX); 280 apic_ticks_per_ms = static_cast<uint32_t>(apic_freq / 1000); 281 apic_divisor = 1; 282 fp_32_64_div_32_32(&apic_ticks_per_ns, apic_ticks_per_ms, 1000 * 1000); 283 printf("APIC frequency: %" PRIu32 " ticks/ms\n", apic_ticks_per_ms); 284 return; 285 } 286 287 printf("Could not find APIC frequency: Calibrating APIC with %s\n", 288 clock_name[calibration_clock]); 289 290 apic_divisor = 1; 291outer: 292 while (apic_divisor != 0) { 293 uint32_t best_time[2] = {UINT32_MAX, UINT32_MAX}; 294 const uint16_t duration_ms[2] = {2, 4}; 295 for (int trial = 0; trial < 2; ++trial) { 296 for (int tries = 0; tries < 3; ++tries) { 297 switch (calibration_clock) { 298 case CLOCK_HPET: 299 hpet_calibration_cycle_preamble(); 300 break; 301 case CLOCK_PIT: 302 pit_calibration_cycle_preamble(duration_ms[trial]); 303 break; 304 default: 305 PANIC_UNIMPLEMENTED; 306 } 307 308 // Setup APIC timer to count down with interrupt masked 309 zx_status_t status = apic_timer_set_oneshot( 310 UINT32_MAX, 311 apic_divisor, 312 true); 313 ASSERT(status == ZX_OK); 314 315 switch (calibration_clock) { 316 case CLOCK_HPET: 317 hpet_calibration_cycle(duration_ms[trial]); 318 break; 319 case CLOCK_PIT: 320 pit_calibration_cycle(duration_ms[trial]); 321 break; 322 default: 323 PANIC_UNIMPLEMENTED; 324 } 325 326 uint32_t apic_ticks = UINT32_MAX - apic_timer_current_count(); 327 if (apic_ticks < best_time[trial]) { 328 best_time[trial] = apic_ticks; 329 } 330 LTRACEF("Calibration trial %d found %u ticks/ms\n", 331 tries, apic_ticks); 332 333 switch (calibration_clock) { 334 case CLOCK_HPET: 335 hpet_calibration_cycle_cleanup(); 336 break; 337 case CLOCK_PIT: 338 pit_calibration_cycle_cleanup(); 339 break; 340 default: 341 PANIC_UNIMPLEMENTED; 342 } 343 } 344 345 // If the APIC ran out of time every time, try again with a higher 346 // divisor 347 if (best_time[trial] == UINT32_MAX) { 348 apic_divisor = static_cast<uint8_t>(apic_divisor * 2); 349 goto outer; 350 } 351 } 352 apic_ticks_per_ms = (best_time[1] - best_time[0]) / (duration_ms[1] - duration_ms[0]); 353 fp_32_64_div_32_32(&apic_ticks_per_ns, apic_ticks_per_ms, 1000 * 1000); 354 break; 355 } 356 ASSERT(apic_divisor != 0); 357 358 printf("APIC timer calibrated: %" PRIu32 " ticks/ms, divisor %d\n", 359 apic_ticks_per_ms, apic_divisor); 360} 361 362static uint64_t calibrate_tsc_count(uint16_t duration_ms) { 363 uint64_t best_time = UINT64_MAX; 364 365 for (int tries = 0; tries < 3; ++tries) { 366 switch (calibration_clock) { 367 case CLOCK_HPET: 368 hpet_calibration_cycle_preamble(); 369 break; 370 case CLOCK_PIT: 371 pit_calibration_cycle_preamble(duration_ms); 372 break; 373 default: 374 PANIC_UNIMPLEMENTED; 375 } 376 377 // Use CPUID to serialize the instruction stream 378 uint32_t _ignored; 379 cpuid(0, &_ignored, &_ignored, &_ignored, &_ignored); 380 uint64_t start = rdtsc(); 381 cpuid(0, &_ignored, &_ignored, &_ignored, &_ignored); 382 383 switch (calibration_clock) { 384 case CLOCK_HPET: 385 hpet_calibration_cycle(duration_ms); 386 break; 387 case CLOCK_PIT: 388 pit_calibration_cycle(duration_ms); 389 break; 390 default: 391 PANIC_UNIMPLEMENTED; 392 } 393 394 cpuid(0, &_ignored, &_ignored, &_ignored, &_ignored); 395 zx_ticks_t end = rdtsc(); 396 cpuid(0, &_ignored, &_ignored, &_ignored, &_ignored); 397 398 zx_ticks_t tsc_ticks = end - start; 399 if (tsc_ticks < best_time) { 400 best_time = tsc_ticks; 401 } 402 LTRACEF("Calibration trial %d found %" PRIu64 " ticks/ms\n", 403 tries, tsc_ticks); 404 switch (calibration_clock) { 405 case CLOCK_HPET: 406 hpet_calibration_cycle_cleanup(); 407 break; 408 case CLOCK_PIT: 409 pit_calibration_cycle_cleanup(); 410 break; 411 default: 412 PANIC_UNIMPLEMENTED; 413 } 414 } 415 416 return best_time; 417} 418 419static void calibrate_tsc(bool has_pvclock) { 420 ASSERT(arch_ints_disabled()); 421 422 const uint64_t tsc_freq = has_pvclock ? pvclock_get_tsc_freq() : x86_lookup_tsc_freq(); 423 if (tsc_freq != 0) { 424 tsc_ticks_per_ms = tsc_freq / 1000; 425 printf("TSC frequency: %" PRIu64 " ticks/ms\n", tsc_ticks_per_ms); 426 } else { 427 printf("Could not find TSC frequency: Calibrating TSC with %s\n", 428 clock_name[calibration_clock]); 429 430 uint32_t duration_ms[2] = {2, 4}; 431 uint64_t best_time[2] = { 432 calibrate_tsc_count(static_cast<uint16_t>(duration_ms[0])), 433 calibrate_tsc_count(static_cast<uint16_t>(duration_ms[1]))}; 434 435 while (best_time[0] >= best_time[1] && 2 * duration_ms[1] < MAX_TIMER_INTERVAL) { 436 duration_ms[0] = duration_ms[1]; 437 duration_ms[1] *= 2; 438 best_time[0] = best_time[1]; 439 best_time[1] = calibrate_tsc_count(static_cast<uint16_t>(duration_ms[1])); 440 } 441 442 ASSERT(best_time[0] < best_time[1]); 443 444 tsc_ticks_per_ms = (best_time[1] - best_time[0]) / (duration_ms[1] - duration_ms[0]); 445 446 printf("TSC calibrated: %" PRIu64 " ticks/ms\n", tsc_ticks_per_ms); 447 } 448 449 ASSERT(tsc_ticks_per_ms <= UINT32_MAX); 450 fp_32_64_div_32_32(&ns_per_tsc, 1000 * 1000, static_cast<uint32_t>(tsc_ticks_per_ms)); 451 fp_32_64_div_32_32(&tsc_per_ns, static_cast<uint32_t>(tsc_ticks_per_ms), 1000 * 1000); 452 // Add 1ns to conservatively deal with rounding 453 ns_per_tsc_rounded_up = u32_mul_u64_fp32_64(1, ns_per_tsc) + 1; 454 455 LTRACEF("ns_per_tsc: %08x.%08x%08x\n", ns_per_tsc.l0, ns_per_tsc.l32, ns_per_tsc.l64); 456} 457 458static void pc_init_timer(uint level) { 459 const struct x86_model_info* cpu_model = x86_get_model(); 460 461 constant_tsc = false; 462 if (x86_vendor == X86_VENDOR_INTEL) { 463 /* This condition taken from Intel 3B 17.15 (Time-Stamp Counter). This 464 * is the negation of the non-Constant TSC section, since the Constant 465 * TSC section is incomplete (the behavior is architectural going 466 * forward, and modern CPUs are not on the list). */ 467 constant_tsc = !((cpu_model->family == 0x6 && cpu_model->model == 0x9) || 468 (cpu_model->family == 0x6 && cpu_model->model == 0xd) || 469 (cpu_model->family == 0xf && cpu_model->model < 0x3)); 470 } 471 invariant_tsc = x86_feature_test(X86_FEATURE_INVAR_TSC); 472 473 bool has_pvclock = pvclock_is_present(); 474 if (has_pvclock) { 475 zx_status_t status = pvclock_init(); 476 if (status == ZX_OK) { 477 invariant_tsc = pvclock_is_stable(); 478 } else { 479 has_pvclock = false; 480 } 481 } 482 483 bool has_hpet = hpet_is_present(); 484 if (has_hpet) { 485 calibration_clock = CLOCK_HPET; 486 const uint64_t hpet_ms_rate = hpet_ticks_per_ms(); 487 ASSERT(hpet_ms_rate <= UINT32_MAX); 488 printf("HPET frequency: %" PRIu64 " ticks/ms\n", hpet_ms_rate); 489 fp_32_64_div_32_32(&ns_per_hpet, 1000 * 1000, static_cast<uint32_t>(hpet_ms_rate)); 490 // Add 1ns to conservatively deal with rounding 491 ns_per_hpet_rounded_up = u32_mul_u64_fp32_64(1, ns_per_hpet) + 1; 492 } else { 493 calibration_clock = CLOCK_PIT; 494 } 495 496 const char* force_wallclock = cmdline_get("kernel.wallclock"); 497 bool use_invariant_tsc = invariant_tsc && (!force_wallclock || !strcmp(force_wallclock, "tsc")); 498 499 use_tsc_deadline = use_invariant_tsc && 500 x86_feature_test(X86_FEATURE_TSC_DEADLINE); 501 if (!use_tsc_deadline) { 502 calibrate_apic_timer(); 503 } 504 505 if (use_invariant_tsc) { 506 calibrate_tsc(has_pvclock); 507 508 // Program PIT in the software strobe configuration, but do not load 509 // the count. This will pause the PIT. 510 outp(I8253_CONTROL_REG, 0x38); 511 wall_clock = CLOCK_TSC; 512 } else { 513 if (constant_tsc || invariant_tsc) { 514 // Calibrate the TSC even though it's not as good as we want, so we 515 // can still let folks still use it for cheap timing. 516 calibrate_tsc(has_pvclock); 517 } 518 519 if (has_hpet && (!force_wallclock || !strcmp(force_wallclock, "hpet"))) { 520 wall_clock = CLOCK_HPET; 521 hpet_set_value(0); 522 hpet_enable(); 523 } else { 524 if (force_wallclock && strcmp(force_wallclock, "pit")) { 525 panic("Could not satisfy kernel.wallclock choice\n"); 526 } 527 528 wall_clock = CLOCK_PIT; 529 530 set_pit_frequency(1000); // ~1ms granularity 531 532 uint32_t irq = apic_io_isa_to_global(ISA_IRQ_PIT); 533 zx_status_t status = register_int_handler(irq, &pit_timer_tick, NULL); 534 DEBUG_ASSERT(status == ZX_OK); 535 unmask_interrupt(irq); 536 } 537 } 538 539 printf("timer features: constant_tsc %d invariant_tsc %d tsc_deadline %d\n", 540 constant_tsc, invariant_tsc, use_tsc_deadline); 541 printf("Using %s as wallclock\n", clock_name[wall_clock]); 542} 543LK_INIT_HOOK(timer, &pc_init_timer, LK_INIT_LEVEL_VM + 3); 544 545zx_status_t platform_set_oneshot_timer(zx_time_t deadline) { 546 DEBUG_ASSERT(arch_ints_disabled()); 547 548 if (deadline < 0) { 549 deadline = 0; 550 } 551 deadline = discrete_time_roundup(deadline); 552 DEBUG_ASSERT(deadline > 0); 553 554 if (use_tsc_deadline) { 555 // Check if the deadline would overflow the TSC. 556 const uint64_t tsc_ticks_per_ns = tsc_ticks_per_ms / ZX_MSEC(1); 557 if (UINT64_MAX / deadline < tsc_ticks_per_ns) { 558 return ZX_ERR_INVALID_ARGS; 559 } 560 561 // We rounded up to the tick after above. 562 const uint64_t tsc_deadline = u64_mul_u64_fp32_64(deadline, tsc_per_ns); 563 LTRACEF("Scheduling oneshot timer: %" PRIu64 " deadline\n", tsc_deadline); 564 apic_timer_set_tsc_deadline(tsc_deadline, false /* unmasked */); 565 return ZX_OK; 566 } 567 568 const zx_time_t now = current_time(); 569 if (now >= deadline) { 570 // Deadline has already passed. We still need to schedule a timer so that 571 // the interrupt fires. 572 LTRACEF("Scheduling oneshot timer for min duration\n"); 573 return apic_timer_set_oneshot(1, 1, false /* unmasked */); 574 } 575 const zx_duration_t interval = zx_time_sub_time(deadline, now); 576 DEBUG_ASSERT(interval > 0); 577 578 uint64_t apic_ticks_needed = u64_mul_u64_fp32_64(interval, apic_ticks_per_ns); 579 if (apic_ticks_needed == 0) { 580 apic_ticks_needed = 1; 581 } 582 583 // Find the shift needed for this timeout, since count is 32-bit. 584 const uint highest_set_bit = log2_ulong_floor(apic_ticks_needed); 585 uint8_t extra_shift = (highest_set_bit <= 31) ? 0 : static_cast<uint8_t>(highest_set_bit - 31); 586 if (extra_shift > 8) { 587 extra_shift = 8; 588 } 589 590 uint32_t divisor = apic_divisor << extra_shift; 591 uint32_t count; 592 // If the divisor is too large, we're at our maximum timeout. Saturate the 593 // timer. It'll fire earlier than requested, but the scheduler will notice 594 // and ask us to set the timer up again. 595 if (divisor <= 128) { 596 count = (uint32_t)(apic_ticks_needed >> extra_shift); 597 DEBUG_ASSERT((apic_ticks_needed >> extra_shift) <= UINT32_MAX); 598 } else { 599 divisor = 128; 600 count = UINT32_MAX; 601 } 602 603 // Make sure we're not underflowing 604 if (count == 0) { 605 DEBUG_ASSERT(divisor == 1); 606 count = 1; 607 } 608 609 LTRACEF("Scheduling oneshot timer: %u count, %u div\n", count, divisor); 610 return apic_timer_set_oneshot(count, static_cast<uint8_t>(divisor), false /* unmasked */); 611} 612 613void platform_stop_timer(void) { 614 /* Enable interrupt mode that will stop the decreasing counter of the PIT */ 615 //outp(I8253_CONTROL_REG, 0x30); 616 apic_timer_stop(); 617} 618 619void platform_shutdown_timer(void) { 620 DEBUG_ASSERT(arch_ints_disabled()); 621 622 // TODO(maniscalco): What should we do here? Anything? 623} 624 625static uint64_t saved_hpet_val; 626void pc_prep_suspend_timer(void) { 627 if (hpet_is_present()) { 628 saved_hpet_val = hpet_get_value(); 629 } 630} 631 632void pc_resume_timer(void) { 633 switch (wall_clock) { 634 case CLOCK_HPET: 635 hpet_set_value(saved_hpet_val); 636 hpet_enable(); 637 break; 638 case CLOCK_PIT: { 639 set_pit_frequency(1000); // ~1ms granularity 640 641 uint32_t irq = apic_io_isa_to_global(ISA_IRQ_PIT); 642 unmask_interrupt(irq); 643 break; 644 } 645 default: 646 break; 647 } 648} 649