1// Copyright 2016 The Fuchsia Authors 2// 3// Use of this source code is governed by a MIT-style 4// license that can be found in the LICENSE file or at 5// https://opensource.org/licenses/MIT 6 7#include <assert.h> 8#include <stdio.h> 9#include <string.h> 10 11#include <arch/ops.h> 12#include <arch/spinlock.h> 13#include <arch/x86.h> 14#include <arch/x86/apic.h> 15#include <arch/x86/feature.h> 16#include <arch/x86/interrupts.h> 17#include <arch/x86/mp.h> 18#include <debug.h> 19#include <dev/interrupt.h> 20#include <err.h> 21#include <vm/vm_aspace.h> 22#include <zircon/types.h> 23 24#include <lib/console.h> 25 26// We currently only implement support for the xAPIC 27 28// Virtual address of the local APIC's MMIO registers 29static void* apic_virt_base; 30static bool x2apic_enabled = false; 31 32static uint8_t bsp_apic_id; 33static bool bsp_apic_id_valid; 34 35// local apic registers 36// set as an offset into the mmio region here 37// x2APIC msr offsets are these >> 4 38#define LAPIC_REG_ID (0x020) 39#define LAPIC_REG_VERSION (0x030) 40#define LAPIC_REG_TASK_PRIORITY (0x080) 41#define LAPIC_REG_PROCESSOR_PRIORITY (0x0A0) 42#define LAPIC_REG_EOI (0x0B0) 43#define LAPIC_REG_LOGICAL_DST (0x0D0) 44#define LAPIC_REG_SPURIOUS_IRQ (0x0F0) 45#define LAPIC_REG_IN_SERVICE(x) (0x100 + ((x) << 4)) 46#define LAPIC_REG_TRIGGER_MODE(x) (0x180 + ((x) << 4)) 47#define LAPIC_REG_IRQ_REQUEST(x) (0x200 + ((x) << 4)) 48#define LAPIC_REG_ERROR_STATUS (0x280) 49#define LAPIC_REG_LVT_CMCI (0x2F0) 50#define LAPIC_REG_IRQ_CMD_LOW (0x300) 51#define LAPIC_REG_IRQ_CMD_HIGH (0x310) 52#define LAPIC_REG_LVT_TIMER (0x320) 53#define LAPIC_REG_LVT_THERMAL (0x330) 54#define LAPIC_REG_LVT_PERF (0x340) 55#define LAPIC_REG_LVT_LINT0 (0x350) 56#define LAPIC_REG_LVT_LINT1 (0x360) 57#define LAPIC_REG_LVT_ERROR (0x370) 58#define LAPIC_REG_INIT_COUNT (0x380) 59#define LAPIC_REG_CURRENT_COUNT (0x390) 60#define LAPIC_REG_DIVIDE_CONF (0x3E0) 61 62#define LAPIC_X2APIC_MSR_BASE (0x800) 63#define LAPIC_X2APIC_MSR_ICR (0x830) 64#define LAPIC_X2APIC_MSR_SELF_IPI (0x83f) 65 66// Spurious IRQ bitmasks 67#define SVR_APIC_ENABLE (1 << 8) 68#define SVR_SPURIOUS_VECTOR(x) (x) 69 70// Interrupt Command bitmasks 71#define ICR_VECTOR(x) (x) 72#define ICR_DELIVERY_PENDING (1 << 12) 73#define ICR_LEVEL_ASSERT (1 << 14) 74#define ICR_DST(x) (((uint32_t)(x)) << 24) 75#define ICR_DST_BROADCAST ICR_DST(0xff) 76#define ICR_DELIVERY_MODE(x) (((uint32_t)(x)) << 8) 77#define ICR_DST_SHORTHAND(x) (((uint32_t)(x)) << 18) 78#define ICR_DST_SELF ICR_DST_SHORTHAND(1) 79#define ICR_DST_ALL ICR_DST_SHORTHAND(2) 80#define ICR_DST_ALL_MINUS_SELF ICR_DST_SHORTHAND(3) 81 82#define X2_ICR_DST(x) ((uint64_t)(x) << 32) 83#define X2_ICR_BROADCAST ((uint64_t)(0xffffffff) << 32) 84 85// Common LVT bitmasks 86#define LVT_VECTOR(x) (x) 87#define LVT_DELIVERY_MODE(x) (((uint32_t)(x)) << 8) 88#define LVT_DELIVERY_PENDING (1 << 12) 89 90static void apic_error_init(void); 91static void apic_timer_init(void); 92static void apic_pmi_init(void); 93 94static uint32_t lapic_reg_read(size_t offset) { 95 if (x2apic_enabled) { 96 return read_msr32(LAPIC_X2APIC_MSR_BASE + (uint32_t)(offset >> 4)); 97 } else { 98 return *((volatile uint32_t*)((uintptr_t)apic_virt_base + offset)); 99 } 100} 101 102static void lapic_reg_write(size_t offset, uint32_t val) { 103 if (x2apic_enabled) { 104 write_msr(LAPIC_X2APIC_MSR_BASE + (uint32_t)(offset >> 4), val); 105 } else { 106 *((volatile uint32_t*)((uintptr_t)apic_virt_base + offset)) = val; 107 } 108} 109 110static void lapic_reg_or(size_t offset, uint32_t bits) { 111 lapic_reg_write(offset, lapic_reg_read(offset) | bits); 112} 113 114static void lapic_reg_and(size_t offset, uint32_t bits) { 115 lapic_reg_write(offset, lapic_reg_read(offset) & bits); 116} 117 118// This function must be called once on the kernel address space 119void apic_vm_init(void) { 120 // only memory map the aperture if we're using the legacy mmio interface 121 if (!x2apic_enabled) { 122 ASSERT(apic_virt_base == nullptr); 123 // Create a mapping for the page of MMIO registers 124 zx_status_t res = VmAspace::kernel_aspace()->AllocPhysical( 125 "lapic", 126 PAGE_SIZE, // size 127 &apic_virt_base, // returned virtual address 128 PAGE_SIZE_SHIFT, // alignment log2 129 APIC_PHYS_BASE, // physical address 130 0, // vmm flags 131 ARCH_MMU_FLAG_PERM_READ | ARCH_MMU_FLAG_PERM_WRITE | 132 ARCH_MMU_FLAG_UNCACHED_DEVICE); // arch mmu flags 133 if (res != ZX_OK) { 134 panic("Could not allocate APIC management page: %d\n", res); 135 } 136 ASSERT(apic_virt_base != nullptr); 137 } 138} 139 140// Initializes the current processor's local APIC. Should be called after 141// apic_vm_init has been called. 142void apic_local_init(void) { 143 DEBUG_ASSERT(arch_ints_disabled()); 144 145 uint64_t v = read_msr(X86_MSR_IA32_APIC_BASE); 146 147 // if were the boot processor, test and cache x2apic ability 148 if (v & IA32_APIC_BASE_BSP) { 149 if (x86_feature_test(X86_FEATURE_X2APIC)) { 150 dprintf(SPEW, "x2APIC enabled\n"); 151 x2apic_enabled = true; 152 } 153 } 154 155 // Enter xAPIC or x2APIC mode and set the base address 156 v |= IA32_APIC_BASE_XAPIC_ENABLE; 157 v |= x2apic_enabled ? IA32_APIC_BASE_X2APIC_ENABLE : 0; 158 write_msr(X86_MSR_IA32_APIC_BASE, v); 159 160 // If this is the bootstrap processor, we should record our APIC ID now 161 // that we know it. 162 if (v & IA32_APIC_BASE_BSP) { 163 uint8_t id = apic_local_id(); 164 165 bsp_apic_id = id; 166 bsp_apic_id_valid = true; 167 x86_set_local_apic_id(id); 168 } 169 170 // Specify the spurious interrupt vector and enable the local APIC 171 uint32_t svr = SVR_SPURIOUS_VECTOR(X86_INT_APIC_SPURIOUS) | SVR_APIC_ENABLE; 172 lapic_reg_write(LAPIC_REG_SPURIOUS_IRQ, svr); 173 174 apic_error_init(); 175 apic_timer_init(); 176 apic_pmi_init(); 177} 178 179uint8_t apic_local_id(void) { 180 uint32_t id = lapic_reg_read(LAPIC_REG_ID); 181 182 // legacy apic stores the id in the top 8 bits of the register 183 if (!x2apic_enabled) 184 id >>= 24; 185 186 // we can only deal with 8 bit apic ids right now 187 DEBUG_ASSERT(id < 256); 188 189 return (uint8_t)id; 190} 191 192uint8_t apic_bsp_id(void) { 193 DEBUG_ASSERT(bsp_apic_id_valid); 194 return bsp_apic_id; 195} 196 197static inline void apic_wait_for_ipi_send(void) { 198 while (lapic_reg_read(LAPIC_REG_IRQ_CMD_LOW) & ICR_DELIVERY_PENDING) 199 ; 200} 201 202// We only support physical destination modes for now 203 204void apic_send_ipi( 205 uint8_t vector, 206 uint32_t dst_apic_id, 207 enum apic_interrupt_delivery_mode dm) { 208 // we only support 8 bit apic ids 209 DEBUG_ASSERT(dst_apic_id < UINT8_MAX); 210 211 uint32_t request = ICR_VECTOR(vector) | ICR_LEVEL_ASSERT; 212 request |= ICR_DELIVERY_MODE(dm); 213 214 spin_lock_saved_state_t state; 215 arch_interrupt_save(&state, 0); 216 if (x2apic_enabled) { 217 write_msr(LAPIC_X2APIC_MSR_ICR, X2_ICR_DST(dst_apic_id) | request); 218 } else { 219 lapic_reg_write(LAPIC_REG_IRQ_CMD_HIGH, ICR_DST(dst_apic_id)); 220 lapic_reg_write(LAPIC_REG_IRQ_CMD_LOW, request); 221 apic_wait_for_ipi_send(); 222 } 223 arch_interrupt_restore(state, 0); 224} 225 226void apic_send_self_ipi(uint8_t vector, enum apic_interrupt_delivery_mode dm) { 227 uint32_t request = ICR_VECTOR(vector) | ICR_LEVEL_ASSERT; 228 request |= ICR_DELIVERY_MODE(dm) | ICR_DST_SELF; 229 230 spin_lock_saved_state_t state; 231 arch_interrupt_save(&state, 0); 232 if (x2apic_enabled) { 233 // special register for triggering self ipis 234 write_msr(LAPIC_X2APIC_MSR_SELF_IPI, vector); 235 } else { 236 lapic_reg_write(LAPIC_REG_IRQ_CMD_LOW, request); 237 apic_wait_for_ipi_send(); 238 } 239 arch_interrupt_restore(state, 0); 240} 241 242// Broadcast to everyone including self 243void apic_send_broadcast_self_ipi( 244 uint8_t vector, 245 enum apic_interrupt_delivery_mode dm) { 246 uint32_t request = ICR_VECTOR(vector) | ICR_LEVEL_ASSERT; 247 request |= ICR_DELIVERY_MODE(dm) | ICR_DST_ALL; 248 249 spin_lock_saved_state_t state; 250 arch_interrupt_save(&state, 0); 251 if (x2apic_enabled) { 252 write_msr(LAPIC_X2APIC_MSR_ICR, X2_ICR_BROADCAST | request); 253 } else { 254 lapic_reg_write(LAPIC_REG_IRQ_CMD_HIGH, ICR_DST_BROADCAST); 255 lapic_reg_write(LAPIC_REG_IRQ_CMD_LOW, request); 256 apic_wait_for_ipi_send(); 257 } 258 arch_interrupt_restore(state, 0); 259} 260 261// Broadcast to everyone excluding self 262void apic_send_broadcast_ipi( 263 uint8_t vector, 264 enum apic_interrupt_delivery_mode dm) { 265 uint32_t request = ICR_VECTOR(vector) | ICR_LEVEL_ASSERT; 266 request |= ICR_DELIVERY_MODE(dm) | ICR_DST_ALL_MINUS_SELF; 267 268 spin_lock_saved_state_t state; 269 arch_interrupt_save(&state, 0); 270 if (x2apic_enabled) { 271 write_msr(LAPIC_X2APIC_MSR_ICR, X2_ICR_BROADCAST | request); 272 } else { 273 lapic_reg_write(LAPIC_REG_IRQ_CMD_HIGH, ICR_DST_BROADCAST); 274 lapic_reg_write(LAPIC_REG_IRQ_CMD_LOW, request); 275 apic_wait_for_ipi_send(); 276 } 277 arch_interrupt_restore(state, 0); 278} 279 280void apic_issue_eoi(void) { 281 // Write 0 to the EOI address to issue an EOI 282 lapic_reg_write(LAPIC_REG_EOI, 0); 283} 284 285// If this function returns an error, timer state will not have 286// been changed. 287static zx_status_t apic_timer_set_divide_value(uint8_t v) { 288 uint32_t new_value = 0; 289 switch (v) { 290 case 1: 291 new_value = 0xb; 292 break; 293 case 2: 294 new_value = 0x0; 295 break; 296 case 4: 297 new_value = 0x1; 298 break; 299 case 8: 300 new_value = 0x2; 301 break; 302 case 16: 303 new_value = 0x3; 304 break; 305 case 32: 306 new_value = 0x8; 307 break; 308 case 64: 309 new_value = 0x9; 310 break; 311 case 128: 312 new_value = 0xa; 313 break; 314 default: 315 return ZX_ERR_INVALID_ARGS; 316 } 317 lapic_reg_write(LAPIC_REG_DIVIDE_CONF, new_value); 318 return ZX_OK; 319} 320 321static void apic_timer_init(void) { 322 lapic_reg_write(LAPIC_REG_LVT_TIMER, LVT_VECTOR(X86_INT_APIC_TIMER) | LVT_MASKED); 323} 324 325// Racy; primarily useful for calibrating the timer. 326uint32_t apic_timer_current_count(void) { 327 return lapic_reg_read(LAPIC_REG_CURRENT_COUNT); 328} 329 330void apic_timer_mask(void) { 331 spin_lock_saved_state_t state; 332 arch_interrupt_save(&state, 0); 333 lapic_reg_or(LAPIC_REG_LVT_TIMER, LVT_MASKED); 334 arch_interrupt_restore(state, 0); 335} 336 337void apic_timer_unmask(void) { 338 spin_lock_saved_state_t state; 339 arch_interrupt_save(&state, 0); 340 lapic_reg_and(LAPIC_REG_LVT_TIMER, ~LVT_MASKED); 341 arch_interrupt_restore(state, 0); 342} 343 344void apic_timer_stop(void) { 345 spin_lock_saved_state_t state; 346 arch_interrupt_save(&state, 0); 347 lapic_reg_write(LAPIC_REG_INIT_COUNT, 0); 348 if (x86_feature_test(X86_FEATURE_TSC_DEADLINE)) { 349 write_msr(X86_MSR_IA32_TSC_DEADLINE, 0); 350 } 351 arch_interrupt_restore(state, 0); 352} 353 354zx_status_t apic_timer_set_oneshot(uint32_t count, uint8_t divisor, bool masked) { 355 zx_status_t status = ZX_OK; 356 uint32_t timer_config = LVT_VECTOR(X86_INT_APIC_TIMER) | 357 LVT_TIMER_MODE_ONESHOT; 358 if (masked) { 359 timer_config |= LVT_MASKED; 360 } 361 362 spin_lock_saved_state_t state; 363 arch_interrupt_save(&state, 0); 364 365 status = apic_timer_set_divide_value(divisor); 366 if (status != ZX_OK) { 367 goto cleanup; 368 } 369 lapic_reg_write(LAPIC_REG_LVT_TIMER, timer_config); 370 lapic_reg_write(LAPIC_REG_INIT_COUNT, count); 371cleanup: 372 arch_interrupt_restore(state, 0); 373 return status; 374} 375 376void apic_timer_set_tsc_deadline(uint64_t deadline, bool masked) { 377 DEBUG_ASSERT(x86_feature_test(X86_FEATURE_TSC_DEADLINE)); 378 379 uint32_t timer_config = LVT_VECTOR(X86_INT_APIC_TIMER) | 380 LVT_TIMER_MODE_TSC_DEADLINE; 381 if (masked) { 382 timer_config |= LVT_MASKED; 383 } 384 385 spin_lock_saved_state_t state; 386 arch_interrupt_save(&state, 0); 387 388 lapic_reg_write(LAPIC_REG_LVT_TIMER, timer_config); 389 // Intel recommends using an MFENCE to ensure the LVT_TIMER_ADDR write 390 // takes before the write_msr(), since writes to this MSR are ignored if the 391 // time mode is not DEADLINE. 392 mb(); 393 write_msr(X86_MSR_IA32_TSC_DEADLINE, deadline); 394 395 arch_interrupt_restore(state, 0); 396} 397 398zx_status_t apic_timer_set_periodic(uint32_t count, uint8_t divisor) { 399 zx_status_t status = ZX_OK; 400 spin_lock_saved_state_t state; 401 arch_interrupt_save(&state, 0); 402 403 status = apic_timer_set_divide_value(divisor); 404 if (status != ZX_OK) { 405 goto cleanup; 406 } 407 lapic_reg_write(LAPIC_REG_LVT_TIMER, LVT_VECTOR(X86_INT_APIC_TIMER) | LVT_TIMER_MODE_PERIODIC); 408 lapic_reg_write(LAPIC_REG_INIT_COUNT, count); 409cleanup: 410 arch_interrupt_restore(state, 0); 411 return status; 412} 413 414void apic_timer_interrupt_handler(void) { 415 platform_handle_apic_timer_tick(); 416} 417 418static void apic_error_init(void) { 419 lapic_reg_write(LAPIC_REG_LVT_ERROR, LVT_VECTOR(X86_INT_APIC_ERROR)); 420 // Re-arm the error interrupt triggering mechanism 421 lapic_reg_write(LAPIC_REG_ERROR_STATUS, 0); 422} 423 424void apic_error_interrupt_handler(void) { 425 DEBUG_ASSERT(arch_ints_disabled()); 426 427 // This write doesn't effect the subsequent read, but is required prior to 428 // reading. 429 lapic_reg_write(LAPIC_REG_ERROR_STATUS, 0); 430 panic("APIC error detected: %u\n", lapic_reg_read(LAPIC_REG_ERROR_STATUS)); 431} 432 433static void apic_pmi_init(void) { 434 lapic_reg_write(LAPIC_REG_LVT_PERF, LVT_VECTOR(X86_INT_APIC_PMI) | LVT_MASKED); 435} 436 437void apic_pmi_mask(void) { 438 spin_lock_saved_state_t state; 439 arch_interrupt_save(&state, 0); 440 lapic_reg_or(LAPIC_REG_LVT_PERF, LVT_MASKED); 441 arch_interrupt_restore(state, 0); 442} 443 444void apic_pmi_unmask(void) { 445 spin_lock_saved_state_t state; 446 arch_interrupt_save(&state, 0); 447 lapic_reg_and(LAPIC_REG_LVT_PERF, ~LVT_MASKED); 448 arch_interrupt_restore(state, 0); 449} 450 451static int cmd_apic(int argc, const cmd_args* argv, uint32_t flags) { 452 if (argc < 2) { 453 notenoughargs: 454 printf("not enough arguments\n"); 455 usage: 456 printf("usage:\n"); 457 printf("%s dump io\n", argv[0].str); 458 printf("%s dump local\n", argv[0].str); 459 printf("%s broadcast <vec>\n", argv[0].str); 460 printf("%s self <vec>\n", argv[0].str); 461 return ZX_ERR_INTERNAL; 462 } 463 464 if (!strcmp(argv[1].str, "broadcast")) { 465 if (argc < 3) 466 goto notenoughargs; 467 uint8_t vec = (uint8_t)argv[2].u; 468 apic_send_broadcast_ipi(vec, DELIVERY_MODE_FIXED); 469 printf("irr: %x\n", lapic_reg_read(LAPIC_REG_IRQ_REQUEST(vec / 32))); 470 printf("isr: %x\n", lapic_reg_read(LAPIC_REG_IN_SERVICE(vec / 32))); 471 printf("icr: %x\n", lapic_reg_read(LAPIC_REG_IRQ_CMD_LOW)); 472 } else if (!strcmp(argv[1].str, "self")) { 473 if (argc < 3) 474 goto notenoughargs; 475 uint8_t vec = (uint8_t)argv[2].u; 476 apic_send_self_ipi(vec, DELIVERY_MODE_FIXED); 477 printf("irr: %x\n", lapic_reg_read(LAPIC_REG_IRQ_REQUEST(vec / 32))); 478 printf("isr: %x\n", lapic_reg_read(LAPIC_REG_IN_SERVICE(vec / 32))); 479 printf("icr: %x\n", lapic_reg_read(LAPIC_REG_IRQ_CMD_LOW)); 480 } else if (!strcmp(argv[1].str, "dump")) { 481 if (argc < 3) 482 goto notenoughargs; 483 if (!strcmp(argv[2].str, "local")) { 484 printf("Caution: this is only for one CPU\n"); 485 apic_local_debug(); 486 } else if (!strcmp(argv[2].str, "io")) { 487 apic_io_debug(); 488 } else { 489 printf("unknown subcommand\n"); 490 goto usage; 491 } 492 } else { 493 printf("unknown command\n"); 494 goto usage; 495 } 496 497 return ZX_OK; 498} 499 500void apic_local_debug(void) { 501 spin_lock_saved_state_t state; 502 arch_interrupt_save(&state, 0); 503 504 printf("apic %02x:\n", apic_local_id()); 505 printf(" version: %08x:\n", lapic_reg_read(LAPIC_REG_VERSION)); 506 printf(" logical_dst: %08x\n", lapic_reg_read(LAPIC_REG_LOGICAL_DST)); 507 printf(" spurious_irq: %08x\n", lapic_reg_read(LAPIC_REG_SPURIOUS_IRQ)); 508 printf(" tpr: %02x\n", (uint8_t)lapic_reg_read(LAPIC_REG_TASK_PRIORITY)); 509 printf(" ppr: %02x\n", (uint8_t)lapic_reg_read(LAPIC_REG_PROCESSOR_PRIORITY)); 510 for (int i = 0; i < 8; ++i) 511 printf(" irr %d: %08x\n", i, lapic_reg_read(LAPIC_REG_IRQ_REQUEST(i))); 512 for (int i = 0; i < 8; ++i) 513 printf(" isr %d: %08x\n", i, lapic_reg_read(LAPIC_REG_IN_SERVICE(i))); 514 515 arch_interrupt_restore(state, 0); 516} 517 518STATIC_COMMAND_START 519#if LK_DEBUGLEVEL > 0 520STATIC_COMMAND("apic", "apic commands", &cmd_apic) 521#endif 522STATIC_COMMAND_END(apic); 523