mp_x86.c revision 117928
1/* 2 * Copyright (c) 1996, by Steve Passe 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. The name of the developer may NOT be used to endorse or promote products 11 * derived from this software without specific prior written permission. 12 * 13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 * 25 */ 26 27#include <sys/cdefs.h> 28__FBSDID("$FreeBSD: head/sys/i386/i386/mp_machdep.c 117928 2003-07-23 19:04:28Z jhb $"); 29 30#include "opt_cpu.h" 31#include "opt_kstack_pages.h" 32 33#ifdef SMP 34#include <machine/smptests.h> 35#else 36#if !defined(lint) 37#error 38#endif 39#endif 40 41#include <sys/param.h> 42#include <sys/systm.h> 43#include <sys/bus.h> 44#include <sys/cons.h> /* cngetc() */ 45#ifdef GPROF 46#include <sys/gmon.h> 47#endif 48#include <sys/kernel.h> 49#include <sys/ktr.h> 50#include <sys/lock.h> 51#include <sys/malloc.h> 52#include <sys/memrange.h> 53#include <sys/mutex.h> 54#include <sys/pcpu.h> 55#include <sys/proc.h> 56#include <sys/smp.h> 57#include <sys/sysctl.h> 58#include <sys/user.h> 59 60#include <vm/vm.h> 61#include <vm/vm_param.h> 62#include <vm/pmap.h> 63#include <vm/vm_kern.h> 64#include <vm/vm_extern.h> 65#include <vm/vm_map.h> 66 67#include <machine/apic.h> 68#include <machine/atomic.h> 69#include <machine/clock.h> 70#include <machine/cpu.h> 71#include <machine/cpufunc.h> 72#include <machine/mpapic.h> 73#include <machine/psl.h> 74#include <machine/segments.h> 75#include <machine/smp.h> 76#include <machine/smptests.h> /** TEST_DEFAULT_CONFIG, TEST_TEST1 */ 77#include <machine/tss.h> 78#include <machine/specialreg.h> 79#include <machine/privatespace.h> 80 81#if defined(APIC_IO) 82#include <machine/md_var.h> /* setidt() */ 83#include <i386/isa/icu.h> /* IPIs */ 84#include <i386/isa/intr_machdep.h> /* IPIs */ 85#endif /* APIC_IO */ 86 87#if defined(TEST_DEFAULT_CONFIG) 88#define MPFPS_MPFB1 TEST_DEFAULT_CONFIG 89#else 90#define MPFPS_MPFB1 mpfps->mpfb1 91#endif /* TEST_DEFAULT_CONFIG */ 92 93#define WARMBOOT_TARGET 0 94#define WARMBOOT_OFF (KERNBASE + 0x0467) 95#define WARMBOOT_SEG (KERNBASE + 0x0469) 96 97#ifdef PC98 98#define BIOS_BASE (0xe8000) 99#define BIOS_SIZE (0x18000) 100#else 101#define BIOS_BASE (0xf0000) 102#define BIOS_SIZE (0x10000) 103#endif 104#define BIOS_COUNT (BIOS_SIZE/4) 105 106#define CMOS_REG (0x70) 107#define CMOS_DATA (0x71) 108#define BIOS_RESET (0x0f) 109#define BIOS_WARM (0x0a) 110 111#define PROCENTRY_FLAG_EN 0x01 112#define PROCENTRY_FLAG_BP 0x02 113#define IOAPICENTRY_FLAG_EN 0x01 114 115 116/* MP Floating Pointer Structure */ 117typedef struct MPFPS { 118 char signature[4]; 119 void *pap; 120 u_char length; 121 u_char spec_rev; 122 u_char checksum; 123 u_char mpfb1; 124 u_char mpfb2; 125 u_char mpfb3; 126 u_char mpfb4; 127 u_char mpfb5; 128} *mpfps_t; 129 130/* MP Configuration Table Header */ 131typedef struct MPCTH { 132 char signature[4]; 133 u_short base_table_length; 134 u_char spec_rev; 135 u_char checksum; 136 u_char oem_id[8]; 137 u_char product_id[12]; 138 void *oem_table_pointer; 139 u_short oem_table_size; 140 u_short entry_count; 141 void *apic_address; 142 u_short extended_table_length; 143 u_char extended_table_checksum; 144 u_char reserved; 145} *mpcth_t; 146 147 148typedef struct PROCENTRY { 149 u_char type; 150 u_char apic_id; 151 u_char apic_version; 152 u_char cpu_flags; 153 u_long cpu_signature; 154 u_long feature_flags; 155 u_long reserved1; 156 u_long reserved2; 157} *proc_entry_ptr; 158 159typedef struct BUSENTRY { 160 u_char type; 161 u_char bus_id; 162 char bus_type[6]; 163} *bus_entry_ptr; 164 165typedef struct IOAPICENTRY { 166 u_char type; 167 u_char apic_id; 168 u_char apic_version; 169 u_char apic_flags; 170 void *apic_address; 171} *io_apic_entry_ptr; 172 173typedef struct INTENTRY { 174 u_char type; 175 u_char int_type; 176 u_short int_flags; 177 u_char src_bus_id; 178 u_char src_bus_irq; 179 u_char dst_apic_id; 180 u_char dst_apic_int; 181} *int_entry_ptr; 182 183/* descriptions of MP basetable entries */ 184typedef struct BASETABLE_ENTRY { 185 u_char type; 186 u_char length; 187 char name[16]; 188} basetable_entry; 189 190/* 191 * this code MUST be enabled here and in mpboot.s. 192 * it follows the very early stages of AP boot by placing values in CMOS ram. 193 * it NORMALLY will never be needed and thus the primitive method for enabling. 194 * 195#define CHECK_POINTS 196 */ 197 198#if defined(CHECK_POINTS) && !defined(PC98) 199#define CHECK_READ(A) (outb(CMOS_REG, (A)), inb(CMOS_DATA)) 200#define CHECK_WRITE(A,D) (outb(CMOS_REG, (A)), outb(CMOS_DATA, (D))) 201 202#define CHECK_INIT(D); \ 203 CHECK_WRITE(0x34, (D)); \ 204 CHECK_WRITE(0x35, (D)); \ 205 CHECK_WRITE(0x36, (D)); \ 206 CHECK_WRITE(0x37, (D)); \ 207 CHECK_WRITE(0x38, (D)); \ 208 CHECK_WRITE(0x39, (D)); 209 210#define CHECK_PRINT(S); \ 211 printf("%s: %d, %d, %d, %d, %d, %d\n", \ 212 (S), \ 213 CHECK_READ(0x34), \ 214 CHECK_READ(0x35), \ 215 CHECK_READ(0x36), \ 216 CHECK_READ(0x37), \ 217 CHECK_READ(0x38), \ 218 CHECK_READ(0x39)); 219 220#else /* CHECK_POINTS */ 221 222#define CHECK_INIT(D) 223#define CHECK_PRINT(S) 224 225#endif /* CHECK_POINTS */ 226 227/* 228 * Values to send to the POST hardware. 229 */ 230#define MP_BOOTADDRESS_POST 0x10 231#define MP_PROBE_POST 0x11 232#define MPTABLE_PASS1_POST 0x12 233 234#define MP_START_POST 0x13 235#define MP_ENABLE_POST 0x14 236#define MPTABLE_PASS2_POST 0x15 237 238#define START_ALL_APS_POST 0x16 239#define INSTALL_AP_TRAMP_POST 0x17 240#define START_AP_POST 0x18 241 242#define MP_ANNOUNCE_POST 0x19 243 244static int need_hyperthreading_fixup; 245static u_int logical_cpus; 246static u_int logical_cpus_mask; 247 248/* used to hold the AP's until we are ready to release them */ 249static struct mtx ap_boot_mtx; 250 251/** XXX FIXME: where does this really belong, isa.h/isa.c perhaps? */ 252int current_postcode; 253 254/** XXX FIXME: what system files declare these??? */ 255extern struct region_descriptor r_gdt, r_idt; 256 257int bsp_apic_ready = 0; /* flags useability of BSP apic */ 258int mp_naps; /* # of Applications processors */ 259int mp_nbusses; /* # of busses */ 260int mp_napics; /* # of IO APICs */ 261int boot_cpu_id; /* designated BSP */ 262vm_offset_t cpu_apic_address; 263vm_offset_t io_apic_address[NAPICID]; /* NAPICID is more than enough */ 264extern int nkpt; 265 266u_int32_t cpu_apic_versions[MAXCPU]; 267u_int32_t *io_apic_versions; 268 269#ifdef APIC_INTR_REORDER 270struct { 271 volatile int *location; 272 int bit; 273} apic_isrbit_location[32]; 274#endif 275 276struct apic_intmapinfo int_to_apicintpin[APIC_INTMAPSIZE]; 277 278/* 279 * APIC ID logical/physical mapping structures. 280 * We oversize these to simplify boot-time config. 281 */ 282int cpu_num_to_apic_id[NAPICID]; 283int io_num_to_apic_id[NAPICID]; 284int apic_id_to_logical[NAPICID]; 285 286/* 287 * CPU topology map datastructures for HTT. 288 */ 289struct cpu_group mp_groups[NAPICID]; 290struct cpu_top mp_top; 291struct cpu_top *smp_topology; 292 293 294/* AP uses this during bootstrap. Do not staticize. */ 295char *bootSTK; 296static int bootAP; 297 298/* Hotwire a 0->4MB V==P mapping */ 299extern pt_entry_t *KPTphys; 300 301/* SMP page table page */ 302extern pt_entry_t *SMPpt; 303 304struct pcb stoppcbs[MAXCPU]; 305 306#ifdef APIC_IO 307/* Variables needed for SMP tlb shootdown. */ 308vm_offset_t smp_tlb_addr1; 309vm_offset_t smp_tlb_addr2; 310volatile int smp_tlb_wait; 311static struct mtx smp_tlb_mtx; 312#endif 313 314/* 315 * Local data and functions. 316 */ 317 318/* Set to 1 once we're ready to let the APs out of the pen. */ 319static volatile int aps_ready = 0; 320 321static int mp_capable; 322static u_int boot_address; 323static u_int base_memory; 324 325static int picmode; /* 0: virtual wire mode, 1: PIC mode */ 326static mpfps_t mpfps; 327static int search_for_sig(u_int32_t target, int count); 328static void mp_enable(u_int boot_addr); 329 330static void mptable_hyperthread_fixup(u_int id_mask); 331static void mptable_pass1(void); 332static int mptable_pass2(void); 333static void default_mp_table(int type); 334static void fix_mp_table(void); 335static void setup_apic_irq_mapping(void); 336static void init_locks(void); 337static int start_all_aps(u_int boot_addr); 338static void install_ap_tramp(u_int boot_addr); 339static int start_ap(int logicalCpu, u_int boot_addr); 340void ap_init(void); 341static int apic_int_is_bus_type(int intr, int bus_type); 342static void release_aps(void *dummy); 343 344/* 345 * initialize all the SMP locks 346 */ 347 348/* lock region used by kernel profiling */ 349int mcount_lock; 350 351#ifdef USE_COMLOCK 352/* locks com (tty) data/hardware accesses: a FASTINTR() */ 353struct mtx com_mtx; 354#endif /* USE_COMLOCK */ 355 356static void 357init_locks(void) 358{ 359 360#ifdef USE_COMLOCK 361 mtx_init(&com_mtx, "com", NULL, MTX_SPIN); 362#endif /* USE_COMLOCK */ 363#ifdef APIC_IO 364 mtx_init(&smp_tlb_mtx, "tlb", NULL, MTX_SPIN); 365#endif 366} 367 368/* 369 * Calculate usable address in base memory for AP trampoline code. 370 */ 371u_int 372mp_bootaddress(u_int basemem) 373{ 374 POSTCODE(MP_BOOTADDRESS_POST); 375 376 base_memory = basemem * 1024; /* convert to bytes */ 377 378 boot_address = base_memory & ~0xfff; /* round down to 4k boundary */ 379 if ((base_memory - boot_address) < bootMP_size) 380 boot_address -= 4096; /* not enough, lower by 4k */ 381 382 return boot_address; 383} 384 385 386/* 387 * Look for an Intel MP spec table (ie, SMP capable hardware). 388 */ 389void 390i386_mp_probe(void) 391{ 392 int x; 393 u_long segment; 394 u_int32_t target; 395 396 POSTCODE(MP_PROBE_POST); 397 398 /* see if EBDA exists */ 399 if ((segment = (u_long) * (u_short *) (KERNBASE + 0x40e)) != 0) { 400 /* search first 1K of EBDA */ 401 target = (u_int32_t) (segment << 4); 402 if ((x = search_for_sig(target, 1024 / 4)) >= 0) 403 goto found; 404 } else { 405 /* last 1K of base memory, effective 'top of base' passed in */ 406 target = (u_int32_t) (base_memory - 0x400); 407 if ((x = search_for_sig(target, 1024 / 4)) >= 0) 408 goto found; 409 } 410 411 /* search the BIOS */ 412 target = (u_int32_t) BIOS_BASE; 413 if ((x = search_for_sig(target, BIOS_COUNT)) >= 0) 414 goto found; 415 416 /* nothing found */ 417 mpfps = (mpfps_t)0; 418 mp_capable = 0; 419 return; 420 421found: 422 /* calculate needed resources */ 423 mpfps = (mpfps_t)x; 424 mptable_pass1(); 425 426 /* flag fact that we are running multiple processors */ 427 mp_capable = 1; 428} 429 430int 431cpu_mp_probe(void) 432{ 433 /* 434 * Record BSP in CPU map 435 * This is done here so that MBUF init code works correctly. 436 */ 437 all_cpus = 1; 438 439 return (mp_capable); 440} 441 442/* 443 * Initialize the SMP hardware and the APIC and start up the AP's. 444 */ 445void 446cpu_mp_start(void) 447{ 448 POSTCODE(MP_START_POST); 449 450 /* look for MP capable motherboard */ 451 if (mp_capable) 452 mp_enable(boot_address); 453 else 454 panic("MP hardware not found!"); 455 456 cpu_setregs(); 457} 458 459 460/* 461 * Print various information about the SMP system hardware and setup. 462 */ 463void 464cpu_mp_announce(void) 465{ 466 int x; 467 468 POSTCODE(MP_ANNOUNCE_POST); 469 470 printf(" cpu0 (BSP): apic id: %2d", CPU_TO_ID(0)); 471 printf(", version: 0x%08x", cpu_apic_versions[0]); 472 printf(", at 0x%08x\n", cpu_apic_address); 473 for (x = 1; x <= mp_naps; ++x) { 474 printf(" cpu%d (AP): apic id: %2d", x, CPU_TO_ID(x)); 475 printf(", version: 0x%08x", cpu_apic_versions[x]); 476 printf(", at 0x%08x\n", cpu_apic_address); 477 } 478 479#if defined(APIC_IO) 480 for (x = 0; x < mp_napics; ++x) { 481 printf(" io%d (APIC): apic id: %2d", x, IO_TO_ID(x)); 482 printf(", version: 0x%08x", io_apic_versions[x]); 483 printf(", at 0x%08x\n", io_apic_address[x]); 484 } 485#else 486 printf(" Warning: APIC I/O disabled\n"); 487#endif /* APIC_IO */ 488} 489 490/* 491 * AP cpu's call this to sync up protected mode. 492 */ 493void 494init_secondary(void) 495{ 496 int gsel_tss; 497 int x, myid = bootAP; 498 u_int cr0; 499 500 gdt_segs[GPRIV_SEL].ssd_base = (int) &SMP_prvspace[myid]; 501 gdt_segs[GPROC0_SEL].ssd_base = 502 (int) &SMP_prvspace[myid].pcpu.pc_common_tss; 503 SMP_prvspace[myid].pcpu.pc_prvspace = 504 &SMP_prvspace[myid].pcpu; 505 506 for (x = 0; x < NGDT; x++) { 507 ssdtosd(&gdt_segs[x], &gdt[myid * NGDT + x].sd); 508 } 509 510 r_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1; 511 r_gdt.rd_base = (int) &gdt[myid * NGDT]; 512 lgdt(&r_gdt); /* does magic intra-segment return */ 513 514 lidt(&r_idt); 515 516 lldt(_default_ldt); 517 PCPU_SET(currentldt, _default_ldt); 518 519 gsel_tss = GSEL(GPROC0_SEL, SEL_KPL); 520 gdt[myid * NGDT + GPROC0_SEL].sd.sd_type = SDT_SYS386TSS; 521 PCPU_SET(common_tss.tss_esp0, 0); /* not used until after switch */ 522 PCPU_SET(common_tss.tss_ss0, GSEL(GDATA_SEL, SEL_KPL)); 523 PCPU_SET(common_tss.tss_ioopt, (sizeof (struct i386tss)) << 16); 524 PCPU_SET(tss_gdt, &gdt[myid * NGDT + GPROC0_SEL].sd); 525 PCPU_SET(common_tssd, *PCPU_GET(tss_gdt)); 526 ltr(gsel_tss); 527 528 /* 529 * Set to a known state: 530 * Set by mpboot.s: CR0_PG, CR0_PE 531 * Set by cpu_setregs: CR0_NE, CR0_MP, CR0_TS, CR0_WP, CR0_AM 532 */ 533 cr0 = rcr0(); 534 cr0 &= ~(CR0_CD | CR0_NW | CR0_EM); 535 load_cr0(cr0); 536 537 pmap_set_opt(); 538} 539 540 541#if defined(APIC_IO) 542/* 543 * Final configuration of the BSP's local APIC: 544 * - disable 'pic mode'. 545 * - disable 'virtual wire mode'. 546 * - enable NMI. 547 */ 548void 549bsp_apic_configure(void) 550{ 551 u_char byte; 552 u_int32_t temp; 553 554 /* leave 'pic mode' if necessary */ 555 if (picmode) { 556 outb(0x22, 0x70); /* select IMCR */ 557 byte = inb(0x23); /* current contents */ 558 byte |= 0x01; /* mask external INTR */ 559 outb(0x23, byte); /* disconnect 8259s/NMI */ 560 } 561 562 /* mask lint0 (the 8259 'virtual wire' connection) */ 563 temp = lapic.lvt_lint0; 564 temp |= APIC_LVT_M; /* set the mask */ 565 lapic.lvt_lint0 = temp; 566 567 /* setup lint1 to handle NMI */ 568 temp = lapic.lvt_lint1; 569 temp &= ~APIC_LVT_M; /* clear the mask */ 570 lapic.lvt_lint1 = temp; 571 572 if (bootverbose) 573 apic_dump("bsp_apic_configure()"); 574} 575#endif /* APIC_IO */ 576 577 578/******************************************************************* 579 * local functions and data 580 */ 581 582/* 583 * start the SMP system 584 */ 585static void 586mp_enable(u_int boot_addr) 587{ 588 int x; 589#if defined(APIC_IO) 590 int apic; 591 u_int ux; 592#endif /* APIC_IO */ 593 594 POSTCODE(MP_ENABLE_POST); 595 596 /* turn on 4MB of V == P addressing so we can get to MP table */ 597 *(int *)PTD = PG_V | PG_RW | ((uintptr_t)(void *)KPTphys & PG_FRAME); 598 invltlb(); 599 600 /* examine the MP table for needed info, uses physical addresses */ 601 x = mptable_pass2(); 602 603 *(int *)PTD = 0; 604 invltlb(); 605 606 /* can't process default configs till the CPU APIC is pmapped */ 607 if (x) 608 default_mp_table(x); 609 610 /* post scan cleanup */ 611 fix_mp_table(); 612 setup_apic_irq_mapping(); 613 614#if defined(APIC_IO) 615 616 /* fill the LOGICAL io_apic_versions table */ 617 for (apic = 0; apic < mp_napics; ++apic) { 618 ux = io_apic_read(apic, IOAPIC_VER); 619 io_apic_versions[apic] = ux; 620 io_apic_set_id(apic, IO_TO_ID(apic)); 621 } 622 623 /* program each IO APIC in the system */ 624 for (apic = 0; apic < mp_napics; ++apic) 625 if (io_apic_setup(apic) < 0) 626 panic("IO APIC setup failure"); 627 628 /* install a 'Spurious INTerrupt' vector */ 629 setidt(XSPURIOUSINT_OFFSET, Xspuriousint, 630 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 631 632 /* install an inter-CPU IPI for TLB invalidation */ 633 setidt(XINVLTLB_OFFSET, Xinvltlb, 634 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 635 setidt(XINVLPG_OFFSET, Xinvlpg, 636 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 637 setidt(XINVLRNG_OFFSET, Xinvlrng, 638 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 639 640 /* install an inter-CPU IPI for forwarding hardclock() */ 641 setidt(XHARDCLOCK_OFFSET, Xhardclock, 642 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 643 644 /* install an inter-CPU IPI for forwarding statclock() */ 645 setidt(XSTATCLOCK_OFFSET, Xstatclock, 646 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 647 648 /* install an inter-CPU IPI for lazy pmap release */ 649 setidt(XLAZYPMAP_OFFSET, Xlazypmap, 650 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 651 652 /* install an inter-CPU IPI for all-CPU rendezvous */ 653 setidt(XRENDEZVOUS_OFFSET, Xrendezvous, 654 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 655 656 /* install an inter-CPU IPI for forcing an additional software trap */ 657 setidt(XCPUAST_OFFSET, Xcpuast, 658 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 659 660 /* install an inter-CPU IPI for CPU stop/restart */ 661 setidt(XCPUSTOP_OFFSET, Xcpustop, 662 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 663 664#if defined(TEST_TEST1) 665 /* install a "fake hardware INTerrupt" vector */ 666 setidt(XTEST1_OFFSET, Xtest1, 667 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 668#endif /** TEST_TEST1 */ 669 670#endif /* APIC_IO */ 671 672 /* initialize all SMP locks */ 673 init_locks(); 674 675 /* start each Application Processor */ 676 start_all_aps(boot_addr); 677} 678 679 680/* 681 * look for the MP spec signature 682 */ 683 684/* string defined by the Intel MP Spec as identifying the MP table */ 685#define MP_SIG 0x5f504d5f /* _MP_ */ 686#define NEXT(X) ((X) += 4) 687static int 688search_for_sig(u_int32_t target, int count) 689{ 690 int x; 691 u_int32_t *addr = (u_int32_t *) (KERNBASE + target); 692 693 for (x = 0; x < count; NEXT(x)) 694 if (addr[x] == MP_SIG) 695 /* make array index a byte index */ 696 return (target + (x * sizeof(u_int32_t))); 697 698 return -1; 699} 700 701 702static basetable_entry basetable_entry_types[] = 703{ 704 {0, 20, "Processor"}, 705 {1, 8, "Bus"}, 706 {2, 8, "I/O APIC"}, 707 {3, 8, "I/O INT"}, 708 {4, 8, "Local INT"} 709}; 710 711typedef struct BUSDATA { 712 u_char bus_id; 713 enum busTypes bus_type; 714} bus_datum; 715 716typedef struct INTDATA { 717 u_char int_type; 718 u_short int_flags; 719 u_char src_bus_id; 720 u_char src_bus_irq; 721 u_char dst_apic_id; 722 u_char dst_apic_int; 723 u_char int_vector; 724} io_int, local_int; 725 726typedef struct BUSTYPENAME { 727 u_char type; 728 char name[7]; 729} bus_type_name; 730 731static bus_type_name bus_type_table[] = 732{ 733 {CBUS, "CBUS"}, 734 {CBUSII, "CBUSII"}, 735 {EISA, "EISA"}, 736 {MCA, "MCA"}, 737 {UNKNOWN_BUSTYPE, "---"}, 738 {ISA, "ISA"}, 739 {MCA, "MCA"}, 740 {UNKNOWN_BUSTYPE, "---"}, 741 {UNKNOWN_BUSTYPE, "---"}, 742 {UNKNOWN_BUSTYPE, "---"}, 743 {UNKNOWN_BUSTYPE, "---"}, 744 {UNKNOWN_BUSTYPE, "---"}, 745 {PCI, "PCI"}, 746 {UNKNOWN_BUSTYPE, "---"}, 747 {UNKNOWN_BUSTYPE, "---"}, 748 {UNKNOWN_BUSTYPE, "---"}, 749 {UNKNOWN_BUSTYPE, "---"}, 750 {XPRESS, "XPRESS"}, 751 {UNKNOWN_BUSTYPE, "---"} 752}; 753/* from MP spec v1.4, table 5-1 */ 754static int default_data[7][5] = 755{ 756/* nbus, id0, type0, id1, type1 */ 757 {1, 0, ISA, 255, 255}, 758 {1, 0, EISA, 255, 255}, 759 {1, 0, EISA, 255, 255}, 760 {1, 0, MCA, 255, 255}, 761 {2, 0, ISA, 1, PCI}, 762 {2, 0, EISA, 1, PCI}, 763 {2, 0, MCA, 1, PCI} 764}; 765 766 767/* the bus data */ 768static bus_datum *bus_data; 769 770/* the IO INT data, one entry per possible APIC INTerrupt */ 771static io_int *io_apic_ints; 772 773static int nintrs; 774 775static int processor_entry(proc_entry_ptr entry, int cpu); 776static int bus_entry(bus_entry_ptr entry, int bus); 777static int io_apic_entry(io_apic_entry_ptr entry, int apic); 778static int int_entry(int_entry_ptr entry, int intr); 779static int lookup_bus_type(char *name); 780 781 782/* 783 * 1st pass on motherboard's Intel MP specification table. 784 * 785 * initializes: 786 * mp_ncpus = 1 787 * 788 * determines: 789 * cpu_apic_address (common to all CPUs) 790 * io_apic_address[N] 791 * mp_naps 792 * mp_nbusses 793 * mp_napics 794 * nintrs 795 */ 796static void 797mptable_pass1(void) 798{ 799 int x; 800 mpcth_t cth; 801 int totalSize; 802 void* position; 803 int count; 804 int type; 805 u_int id_mask; 806 807 POSTCODE(MPTABLE_PASS1_POST); 808 809 /* clear various tables */ 810 for (x = 0; x < NAPICID; ++x) { 811 io_apic_address[x] = ~0; /* IO APIC address table */ 812 } 813 814 /* init everything to empty */ 815 mp_naps = 0; 816 mp_nbusses = 0; 817 mp_napics = 0; 818 nintrs = 0; 819 id_mask = 0; 820 821 /* check for use of 'default' configuration */ 822 if (MPFPS_MPFB1 != 0) { 823 /* use default addresses */ 824 cpu_apic_address = DEFAULT_APIC_BASE; 825 io_apic_address[0] = DEFAULT_IO_APIC_BASE; 826 827 /* fill in with defaults */ 828 mp_naps = 2; /* includes BSP */ 829 mp_maxid = 1; 830 mp_nbusses = default_data[MPFPS_MPFB1 - 1][0]; 831#if defined(APIC_IO) 832 mp_napics = 1; 833 nintrs = 16; 834#endif /* APIC_IO */ 835 } 836 else { 837 if ((cth = mpfps->pap) == 0) 838 panic("MP Configuration Table Header MISSING!"); 839 840 cpu_apic_address = (vm_offset_t) cth->apic_address; 841 842 /* walk the table, recording info of interest */ 843 totalSize = cth->base_table_length - sizeof(struct MPCTH); 844 position = (u_char *) cth + sizeof(struct MPCTH); 845 count = cth->entry_count; 846 847 while (count--) { 848 switch (type = *(u_char *) position) { 849 case 0: /* processor_entry */ 850 if (((proc_entry_ptr)position)->cpu_flags 851 & PROCENTRY_FLAG_EN) { 852 ++mp_naps; 853 mp_maxid++; 854 id_mask |= 1 << 855 ((proc_entry_ptr)position)->apic_id; 856 } 857 break; 858 case 1: /* bus_entry */ 859 ++mp_nbusses; 860 break; 861 case 2: /* io_apic_entry */ 862 if (((io_apic_entry_ptr)position)->apic_flags 863 & IOAPICENTRY_FLAG_EN) 864 io_apic_address[mp_napics++] = 865 (vm_offset_t)((io_apic_entry_ptr) 866 position)->apic_address; 867 break; 868 case 3: /* int_entry */ 869 ++nintrs; 870 break; 871 case 4: /* int_entry */ 872 break; 873 default: 874 panic("mpfps Base Table HOSED!"); 875 /* NOTREACHED */ 876 } 877 878 totalSize -= basetable_entry_types[type].length; 879 (u_char*)position += basetable_entry_types[type].length; 880 } 881 } 882 883 /* qualify the numbers */ 884 if (mp_naps > MAXCPU) { 885 printf("Warning: only using %d of %d available CPUs!\n", 886 MAXCPU, mp_naps); 887 mp_naps = MAXCPU; 888 } 889 890 /* See if we need to fixup HT logical CPUs. */ 891 mptable_hyperthread_fixup(id_mask); 892 893 /* 894 * Count the BSP. 895 * This is also used as a counter while starting the APs. 896 */ 897 mp_ncpus = 1; 898 899 --mp_naps; /* subtract the BSP */ 900} 901 902 903/* 904 * 2nd pass on motherboard's Intel MP specification table. 905 * 906 * sets: 907 * boot_cpu_id 908 * ID_TO_IO(N), phy APIC ID to log CPU/IO table 909 * CPU_TO_ID(N), logical CPU to APIC ID table 910 * IO_TO_ID(N), logical IO to APIC ID table 911 * bus_data[N] 912 * io_apic_ints[N] 913 */ 914static int 915mptable_pass2(void) 916{ 917 struct PROCENTRY proc; 918 int x; 919 mpcth_t cth; 920 int totalSize; 921 void* position; 922 int count; 923 int type; 924 int apic, bus, cpu, intr; 925 int i, j; 926 int pgeflag; 927 928 POSTCODE(MPTABLE_PASS2_POST); 929 930 /* Initialize fake proc entry for use with HT fixup. */ 931 bzero(&proc, sizeof(proc)); 932 proc.type = 0; 933 proc.cpu_flags = PROCENTRY_FLAG_EN; 934 935 pgeflag = 0; /* XXX - Not used under SMP yet. */ 936 937 MALLOC(io_apic_versions, u_int32_t *, sizeof(u_int32_t) * mp_napics, 938 M_DEVBUF, M_WAITOK); 939 MALLOC(ioapic, volatile ioapic_t **, sizeof(ioapic_t *) * mp_napics, 940 M_DEVBUF, M_WAITOK); 941 MALLOC(io_apic_ints, io_int *, sizeof(io_int) * (nintrs + 1), 942 M_DEVBUF, M_WAITOK); 943 MALLOC(bus_data, bus_datum *, sizeof(bus_datum) * mp_nbusses, 944 M_DEVBUF, M_WAITOK); 945 946 bzero(ioapic, sizeof(ioapic_t *) * mp_napics); 947 948 for (i = 0; i < mp_napics; i++) { 949 for (j = 0; j < mp_napics; j++) { 950 /* same page frame as a previous IO apic? */ 951 if (((vm_offset_t)SMPpt[NPTEPG-2-j] & PG_FRAME) == 952 (io_apic_address[i] & PG_FRAME)) { 953 ioapic[i] = (ioapic_t *)((u_int)SMP_prvspace 954 + (NPTEPG-2-j) * PAGE_SIZE 955 + (io_apic_address[i] & PAGE_MASK)); 956 break; 957 } 958 /* use this slot if available */ 959 if (((vm_offset_t)SMPpt[NPTEPG-2-j] & PG_FRAME) == 0) { 960 SMPpt[NPTEPG-2-j] = (pt_entry_t)(PG_V | PG_RW | 961 pgeflag | (io_apic_address[i] & PG_FRAME)); 962 ioapic[i] = (ioapic_t *)((u_int)SMP_prvspace 963 + (NPTEPG-2-j) * PAGE_SIZE 964 + (io_apic_address[i] & PAGE_MASK)); 965 break; 966 } 967 } 968 } 969 970 /* clear various tables */ 971 for (x = 0; x < NAPICID; ++x) { 972 ID_TO_IO(x) = -1; /* phy APIC ID to log CPU/IO table */ 973 CPU_TO_ID(x) = -1; /* logical CPU to APIC ID table */ 974 IO_TO_ID(x) = -1; /* logical IO to APIC ID table */ 975 } 976 977 /* clear bus data table */ 978 for (x = 0; x < mp_nbusses; ++x) 979 bus_data[x].bus_id = 0xff; 980 981 /* clear IO APIC INT table */ 982 for (x = 0; x < (nintrs + 1); ++x) { 983 io_apic_ints[x].int_type = 0xff; 984 io_apic_ints[x].int_vector = 0xff; 985 } 986 987 /* setup the cpu/apic mapping arrays */ 988 boot_cpu_id = -1; 989 990 /* record whether PIC or virtual-wire mode */ 991 picmode = (mpfps->mpfb2 & 0x80) ? 1 : 0; 992 993 /* check for use of 'default' configuration */ 994 if (MPFPS_MPFB1 != 0) 995 return MPFPS_MPFB1; /* return default configuration type */ 996 997 if ((cth = mpfps->pap) == 0) 998 panic("MP Configuration Table Header MISSING!"); 999 1000 /* walk the table, recording info of interest */ 1001 totalSize = cth->base_table_length - sizeof(struct MPCTH); 1002 position = (u_char *) cth + sizeof(struct MPCTH); 1003 count = cth->entry_count; 1004 apic = bus = intr = 0; 1005 cpu = 1; /* pre-count the BSP */ 1006 1007 while (count--) { 1008 switch (type = *(u_char *) position) { 1009 case 0: 1010 if (processor_entry(position, cpu)) { 1011 if (logical_cpus != 0 && 1012 cpu % logical_cpus != 0) 1013 logical_cpus_mask |= (1 << cpu); 1014 ++cpu; 1015 } 1016 if (need_hyperthreading_fixup) { 1017 /* 1018 * Create fake mptable processor entries 1019 * and feed them to processor_entry() to 1020 * enumerate the logical CPUs. 1021 */ 1022 proc.apic_id = ((proc_entry_ptr)position)->apic_id; 1023 for (i = 1; i < logical_cpus; i++) { 1024 proc.apic_id++; 1025 (void)processor_entry(&proc, cpu); 1026 logical_cpus_mask |= (1 << cpu); 1027 cpu++; 1028 } 1029 } 1030 break; 1031 case 1: 1032 if (bus_entry(position, bus)) 1033 ++bus; 1034 break; 1035 case 2: 1036 if (io_apic_entry(position, apic)) 1037 ++apic; 1038 break; 1039 case 3: 1040 if (int_entry(position, intr)) 1041 ++intr; 1042 break; 1043 case 4: 1044 /* int_entry(position); */ 1045 break; 1046 default: 1047 panic("mpfps Base Table HOSED!"); 1048 /* NOTREACHED */ 1049 } 1050 1051 totalSize -= basetable_entry_types[type].length; 1052 (u_char *) position += basetable_entry_types[type].length; 1053 } 1054 1055 if (boot_cpu_id == -1) 1056 panic("NO BSP found!"); 1057 1058 /* report fact that its NOT a default configuration */ 1059 return 0; 1060} 1061 1062/* 1063 * Check if we should perform a hyperthreading "fix-up" to 1064 * enumerate any logical CPU's that aren't already listed 1065 * in the table. 1066 * 1067 * XXX: We assume that all of the physical CPUs in the 1068 * system have the same number of logical CPUs. 1069 * 1070 * XXX: We assume that APIC ID's are allocated such that 1071 * the APIC ID's for a physical processor are aligned 1072 * with the number of logical CPU's in the processor. 1073 */ 1074static void 1075mptable_hyperthread_fixup(u_int id_mask) 1076{ 1077 u_int i, id; 1078 int logical; 1079 1080 /* Nothing to do if there is no HTT support. */ 1081 if ((cpu_feature & CPUID_HTT) == 0) 1082 return; 1083 logical_cpus = (cpu_procinfo & CPUID_HTT_CORES) >> 16; 1084 if (logical_cpus <= 1) 1085 return; 1086 1087 /* 1088 * For each APIC ID of a CPU that is set in the mask, 1089 * scan the other candidate APIC ID's for this 1090 * physical processor. If any of those ID's are 1091 * already in the table, then kill the fixup. 1092 */ 1093 for (id = 0; id <= MAXCPU; id++) { 1094 if ((id_mask & 1 << id) == 0) 1095 continue; 1096 /* First, make sure we are on a logical_cpus boundary. */ 1097 if (id % logical_cpus != 0) 1098 return; 1099 for (i = id + 1; i < id + logical_cpus; i++) 1100 if ((id_mask & 1 << i) != 0) 1101 return; 1102 } 1103 1104 /* 1105 * Ok, the ID's checked out, so enable the fixup. We have to fixup 1106 * mp_naps and mp_maxid right now. 1107 */ 1108 need_hyperthreading_fixup = 1; 1109 mp_maxid *= logical_cpus; 1110 mp_naps *= logical_cpus; 1111 1112 /* 1113 * Now setup the cpu topology map. 1114 */ 1115 mp_top.ct_count = mp_naps / logical_cpus; 1116 mp_top.ct_group = mp_groups; 1117 1118 /* 1119 * The first logical id is directly after the last valid physical id. 1120 */ 1121 logical = mp_top.ct_count + 1; 1122 1123 for (i = 0; i < mp_top.ct_count; i++) { 1124 int j; 1125 1126 mp_groups[i].cg_mask = (1 << i); 1127 for (j = 1; j < logical_cpus; j++) 1128 mp_groups[i].cg_mask |= (1 << logical++); 1129 mp_groups[i].cg_count = logical_cpus; 1130 mp_groups[i].cg_children = 0; 1131 } 1132 1133 smp_topology = &mp_top; 1134} 1135 1136void 1137assign_apic_irq(int apic, int intpin, int irq) 1138{ 1139 int x; 1140 1141 if (int_to_apicintpin[irq].ioapic != -1) 1142 panic("assign_apic_irq: inconsistent table"); 1143 1144 int_to_apicintpin[irq].ioapic = apic; 1145 int_to_apicintpin[irq].int_pin = intpin; 1146 int_to_apicintpin[irq].apic_address = ioapic[apic]; 1147 int_to_apicintpin[irq].redirindex = IOAPIC_REDTBL + 2 * intpin; 1148 1149 for (x = 0; x < nintrs; x++) { 1150 if ((io_apic_ints[x].int_type == 0 || 1151 io_apic_ints[x].int_type == 3) && 1152 io_apic_ints[x].int_vector == 0xff && 1153 io_apic_ints[x].dst_apic_id == IO_TO_ID(apic) && 1154 io_apic_ints[x].dst_apic_int == intpin) 1155 io_apic_ints[x].int_vector = irq; 1156 } 1157} 1158 1159void 1160revoke_apic_irq(int irq) 1161{ 1162 int x; 1163 int oldapic; 1164 int oldintpin; 1165 1166 if (int_to_apicintpin[irq].ioapic == -1) 1167 panic("revoke_apic_irq: inconsistent table"); 1168 1169 oldapic = int_to_apicintpin[irq].ioapic; 1170 oldintpin = int_to_apicintpin[irq].int_pin; 1171 1172 int_to_apicintpin[irq].ioapic = -1; 1173 int_to_apicintpin[irq].int_pin = 0; 1174 int_to_apicintpin[irq].apic_address = NULL; 1175 int_to_apicintpin[irq].redirindex = 0; 1176 1177 for (x = 0; x < nintrs; x++) { 1178 if ((io_apic_ints[x].int_type == 0 || 1179 io_apic_ints[x].int_type == 3) && 1180 io_apic_ints[x].int_vector != 0xff && 1181 io_apic_ints[x].dst_apic_id == IO_TO_ID(oldapic) && 1182 io_apic_ints[x].dst_apic_int == oldintpin) 1183 io_apic_ints[x].int_vector = 0xff; 1184 } 1185} 1186 1187 1188static void 1189allocate_apic_irq(int intr) 1190{ 1191 int apic; 1192 int intpin; 1193 int irq; 1194 1195 if (io_apic_ints[intr].int_vector != 0xff) 1196 return; /* Interrupt handler already assigned */ 1197 1198 if (io_apic_ints[intr].int_type != 0 && 1199 (io_apic_ints[intr].int_type != 3 || 1200 (io_apic_ints[intr].dst_apic_id == IO_TO_ID(0) && 1201 io_apic_ints[intr].dst_apic_int == 0))) 1202 return; /* Not INT or ExtInt on != (0, 0) */ 1203 1204 irq = 0; 1205 while (irq < APIC_INTMAPSIZE && 1206 int_to_apicintpin[irq].ioapic != -1) 1207 irq++; 1208 1209 if (irq >= APIC_INTMAPSIZE) 1210 return; /* No free interrupt handlers */ 1211 1212 apic = ID_TO_IO(io_apic_ints[intr].dst_apic_id); 1213 intpin = io_apic_ints[intr].dst_apic_int; 1214 1215 assign_apic_irq(apic, intpin, irq); 1216 io_apic_setup_intpin(apic, intpin); 1217} 1218 1219 1220static void 1221swap_apic_id(int apic, int oldid, int newid) 1222{ 1223 int x; 1224 int oapic; 1225 1226 1227 if (oldid == newid) 1228 return; /* Nothing to do */ 1229 1230 printf("Changing APIC ID for IO APIC #%d from %d to %d in MP table\n", 1231 apic, oldid, newid); 1232 1233 /* Swap physical APIC IDs in interrupt entries */ 1234 for (x = 0; x < nintrs; x++) { 1235 if (io_apic_ints[x].dst_apic_id == oldid) 1236 io_apic_ints[x].dst_apic_id = newid; 1237 else if (io_apic_ints[x].dst_apic_id == newid) 1238 io_apic_ints[x].dst_apic_id = oldid; 1239 } 1240 1241 /* Swap physical APIC IDs in IO_TO_ID mappings */ 1242 for (oapic = 0; oapic < mp_napics; oapic++) 1243 if (IO_TO_ID(oapic) == newid) 1244 break; 1245 1246 if (oapic < mp_napics) { 1247 printf("Changing APIC ID for IO APIC #%d from " 1248 "%d to %d in MP table\n", 1249 oapic, newid, oldid); 1250 IO_TO_ID(oapic) = oldid; 1251 } 1252 IO_TO_ID(apic) = newid; 1253} 1254 1255 1256static void 1257fix_id_to_io_mapping(void) 1258{ 1259 int x; 1260 1261 for (x = 0; x < NAPICID; x++) 1262 ID_TO_IO(x) = -1; 1263 1264 for (x = 0; x <= mp_naps; x++) 1265 if (CPU_TO_ID(x) < NAPICID) 1266 ID_TO_IO(CPU_TO_ID(x)) = x; 1267 1268 for (x = 0; x < mp_napics; x++) 1269 if (IO_TO_ID(x) < NAPICID) 1270 ID_TO_IO(IO_TO_ID(x)) = x; 1271} 1272 1273 1274static int 1275first_free_apic_id(void) 1276{ 1277 int freeid, x; 1278 1279 for (freeid = 0; freeid < NAPICID; freeid++) { 1280 for (x = 0; x <= mp_naps; x++) 1281 if (CPU_TO_ID(x) == freeid) 1282 break; 1283 if (x <= mp_naps) 1284 continue; 1285 for (x = 0; x < mp_napics; x++) 1286 if (IO_TO_ID(x) == freeid) 1287 break; 1288 if (x < mp_napics) 1289 continue; 1290 return freeid; 1291 } 1292 return freeid; 1293} 1294 1295 1296static int 1297io_apic_id_acceptable(int apic, int id) 1298{ 1299 int cpu; /* Logical CPU number */ 1300 int oapic; /* Logical IO APIC number for other IO APIC */ 1301 1302 if (id >= NAPICID) 1303 return 0; /* Out of range */ 1304 1305 for (cpu = 0; cpu <= mp_naps; cpu++) 1306 if (CPU_TO_ID(cpu) == id) 1307 return 0; /* Conflict with CPU */ 1308 1309 for (oapic = 0; oapic < mp_napics && oapic < apic; oapic++) 1310 if (IO_TO_ID(oapic) == id) 1311 return 0; /* Conflict with other APIC */ 1312 1313 return 1; /* ID is acceptable for IO APIC */ 1314} 1315 1316 1317/* 1318 * parse an Intel MP specification table 1319 */ 1320static void 1321fix_mp_table(void) 1322{ 1323 int x; 1324 int id; 1325 int bus_0 = 0; /* Stop GCC warning */ 1326 int bus_pci = 0; /* Stop GCC warning */ 1327 int num_pci_bus; 1328 int apic; /* IO APIC unit number */ 1329 int freeid; /* Free physical APIC ID */ 1330 int physid; /* Current physical IO APIC ID */ 1331 1332 /* 1333 * Fix mis-numbering of the PCI bus and its INT entries if the BIOS 1334 * did it wrong. The MP spec says that when more than 1 PCI bus 1335 * exists the BIOS must begin with bus entries for the PCI bus and use 1336 * actual PCI bus numbering. This implies that when only 1 PCI bus 1337 * exists the BIOS can choose to ignore this ordering, and indeed many 1338 * MP motherboards do ignore it. This causes a problem when the PCI 1339 * sub-system makes requests of the MP sub-system based on PCI bus 1340 * numbers. So here we look for the situation and renumber the 1341 * busses and associated INTs in an effort to "make it right". 1342 */ 1343 1344 /* find bus 0, PCI bus, count the number of PCI busses */ 1345 for (num_pci_bus = 0, x = 0; x < mp_nbusses; ++x) { 1346 if (bus_data[x].bus_id == 0) { 1347 bus_0 = x; 1348 } 1349 if (bus_data[x].bus_type == PCI) { 1350 ++num_pci_bus; 1351 bus_pci = x; 1352 } 1353 } 1354 /* 1355 * bus_0 == slot of bus with ID of 0 1356 * bus_pci == slot of last PCI bus encountered 1357 */ 1358 1359 /* check the 1 PCI bus case for sanity */ 1360 /* if it is number 0 all is well */ 1361 if (num_pci_bus == 1 && 1362 bus_data[bus_pci].bus_id != 0) { 1363 1364 /* mis-numbered, swap with whichever bus uses slot 0 */ 1365 1366 /* swap the bus entry types */ 1367 bus_data[bus_pci].bus_type = bus_data[bus_0].bus_type; 1368 bus_data[bus_0].bus_type = PCI; 1369 1370 /* swap each relavant INTerrupt entry */ 1371 id = bus_data[bus_pci].bus_id; 1372 for (x = 0; x < nintrs; ++x) { 1373 if (io_apic_ints[x].src_bus_id == id) { 1374 io_apic_ints[x].src_bus_id = 0; 1375 } 1376 else if (io_apic_ints[x].src_bus_id == 0) { 1377 io_apic_ints[x].src_bus_id = id; 1378 } 1379 } 1380 } 1381 1382 /* Assign IO APIC IDs. 1383 * 1384 * First try the existing ID. If a conflict is detected, try 1385 * the ID in the MP table. If a conflict is still detected, find 1386 * a free id. 1387 * 1388 * We cannot use the ID_TO_IO table before all conflicts has been 1389 * resolved and the table has been corrected. 1390 */ 1391 for (apic = 0; apic < mp_napics; ++apic) { /* For all IO APICs */ 1392 1393 /* First try to use the value set by the BIOS */ 1394 physid = io_apic_get_id(apic); 1395 if (io_apic_id_acceptable(apic, physid)) { 1396 if (IO_TO_ID(apic) != physid) 1397 swap_apic_id(apic, IO_TO_ID(apic), physid); 1398 continue; 1399 } 1400 1401 /* Then check if the value in the MP table is acceptable */ 1402 if (io_apic_id_acceptable(apic, IO_TO_ID(apic))) 1403 continue; 1404 1405 /* Last resort, find a free APIC ID and use it */ 1406 freeid = first_free_apic_id(); 1407 if (freeid >= NAPICID) 1408 panic("No free physical APIC IDs found"); 1409 1410 if (io_apic_id_acceptable(apic, freeid)) { 1411 swap_apic_id(apic, IO_TO_ID(apic), freeid); 1412 continue; 1413 } 1414 panic("Free physical APIC ID not usable"); 1415 } 1416 fix_id_to_io_mapping(); 1417 1418 /* detect and fix broken Compaq MP table */ 1419 if (apic_int_type(0, 0) == -1) { 1420 printf("APIC_IO: MP table broken: 8259->APIC entry missing!\n"); 1421 io_apic_ints[nintrs].int_type = 3; /* ExtInt */ 1422 io_apic_ints[nintrs].int_vector = 0xff; /* Unassigned */ 1423 /* XXX fixme, set src bus id etc, but it doesn't seem to hurt */ 1424 io_apic_ints[nintrs].dst_apic_id = IO_TO_ID(0); 1425 io_apic_ints[nintrs].dst_apic_int = 0; /* Pin 0 */ 1426 nintrs++; 1427 } 1428} 1429 1430 1431/* Assign low level interrupt handlers */ 1432static void 1433setup_apic_irq_mapping(void) 1434{ 1435 int x; 1436 int int_vector; 1437 1438 /* Clear array */ 1439 for (x = 0; x < APIC_INTMAPSIZE; x++) { 1440 int_to_apicintpin[x].ioapic = -1; 1441 int_to_apicintpin[x].int_pin = 0; 1442 int_to_apicintpin[x].apic_address = NULL; 1443 int_to_apicintpin[x].redirindex = 0; 1444 } 1445 1446 /* First assign ISA/EISA interrupts */ 1447 for (x = 0; x < nintrs; x++) { 1448 int_vector = io_apic_ints[x].src_bus_irq; 1449 if (int_vector < APIC_INTMAPSIZE && 1450 io_apic_ints[x].int_vector == 0xff && 1451 int_to_apicintpin[int_vector].ioapic == -1 && 1452 (apic_int_is_bus_type(x, ISA) || 1453 apic_int_is_bus_type(x, EISA)) && 1454 io_apic_ints[x].int_type == 0) { 1455 assign_apic_irq(ID_TO_IO(io_apic_ints[x].dst_apic_id), 1456 io_apic_ints[x].dst_apic_int, 1457 int_vector); 1458 } 1459 } 1460 1461 /* Assign ExtInt entry if no ISA/EISA interrupt 0 entry */ 1462 for (x = 0; x < nintrs; x++) { 1463 if (io_apic_ints[x].dst_apic_int == 0 && 1464 io_apic_ints[x].dst_apic_id == IO_TO_ID(0) && 1465 io_apic_ints[x].int_vector == 0xff && 1466 int_to_apicintpin[0].ioapic == -1 && 1467 io_apic_ints[x].int_type == 3) { 1468 assign_apic_irq(0, 0, 0); 1469 break; 1470 } 1471 } 1472 /* PCI interrupt assignment is deferred */ 1473} 1474 1475 1476static int 1477processor_entry(proc_entry_ptr entry, int cpu) 1478{ 1479 /* check for usability */ 1480 if (!(entry->cpu_flags & PROCENTRY_FLAG_EN)) 1481 return 0; 1482 1483 if(entry->apic_id >= NAPICID) 1484 panic("CPU APIC ID out of range (0..%d)", NAPICID - 1); 1485 /* check for BSP flag */ 1486 if (entry->cpu_flags & PROCENTRY_FLAG_BP) { 1487 boot_cpu_id = entry->apic_id; 1488 CPU_TO_ID(0) = entry->apic_id; 1489 ID_TO_CPU(entry->apic_id) = 0; 1490 return 0; /* its already been counted */ 1491 } 1492 1493 /* add another AP to list, if less than max number of CPUs */ 1494 else if (cpu < MAXCPU) { 1495 CPU_TO_ID(cpu) = entry->apic_id; 1496 ID_TO_CPU(entry->apic_id) = cpu; 1497 return 1; 1498 } 1499 1500 return 0; 1501} 1502 1503 1504static int 1505bus_entry(bus_entry_ptr entry, int bus) 1506{ 1507 int x; 1508 char c, name[8]; 1509 1510 /* encode the name into an index */ 1511 for (x = 0; x < 6; ++x) { 1512 if ((c = entry->bus_type[x]) == ' ') 1513 break; 1514 name[x] = c; 1515 } 1516 name[x] = '\0'; 1517 1518 if ((x = lookup_bus_type(name)) == UNKNOWN_BUSTYPE) 1519 panic("unknown bus type: '%s'", name); 1520 1521 bus_data[bus].bus_id = entry->bus_id; 1522 bus_data[bus].bus_type = x; 1523 1524 return 1; 1525} 1526 1527 1528static int 1529io_apic_entry(io_apic_entry_ptr entry, int apic) 1530{ 1531 if (!(entry->apic_flags & IOAPICENTRY_FLAG_EN)) 1532 return 0; 1533 1534 IO_TO_ID(apic) = entry->apic_id; 1535 if (entry->apic_id < NAPICID) 1536 ID_TO_IO(entry->apic_id) = apic; 1537 1538 return 1; 1539} 1540 1541 1542static int 1543lookup_bus_type(char *name) 1544{ 1545 int x; 1546 1547 for (x = 0; x < MAX_BUSTYPE; ++x) 1548 if (strcmp(bus_type_table[x].name, name) == 0) 1549 return bus_type_table[x].type; 1550 1551 return UNKNOWN_BUSTYPE; 1552} 1553 1554 1555static int 1556int_entry(int_entry_ptr entry, int intr) 1557{ 1558 int apic; 1559 1560 io_apic_ints[intr].int_type = entry->int_type; 1561 io_apic_ints[intr].int_flags = entry->int_flags; 1562 io_apic_ints[intr].src_bus_id = entry->src_bus_id; 1563 io_apic_ints[intr].src_bus_irq = entry->src_bus_irq; 1564 if (entry->dst_apic_id == 255) { 1565 /* This signal goes to all IO APICS. Select an IO APIC 1566 with sufficient number of interrupt pins */ 1567 for (apic = 0; apic < mp_napics; apic++) 1568 if (((io_apic_read(apic, IOAPIC_VER) & 1569 IOART_VER_MAXREDIR) >> MAXREDIRSHIFT) >= 1570 entry->dst_apic_int) 1571 break; 1572 if (apic < mp_napics) 1573 io_apic_ints[intr].dst_apic_id = IO_TO_ID(apic); 1574 else 1575 io_apic_ints[intr].dst_apic_id = entry->dst_apic_id; 1576 } else 1577 io_apic_ints[intr].dst_apic_id = entry->dst_apic_id; 1578 io_apic_ints[intr].dst_apic_int = entry->dst_apic_int; 1579 1580 return 1; 1581} 1582 1583 1584static int 1585apic_int_is_bus_type(int intr, int bus_type) 1586{ 1587 int bus; 1588 1589 for (bus = 0; bus < mp_nbusses; ++bus) 1590 if ((bus_data[bus].bus_id == io_apic_ints[intr].src_bus_id) 1591 && ((int) bus_data[bus].bus_type == bus_type)) 1592 return 1; 1593 1594 return 0; 1595} 1596 1597 1598/* 1599 * Given a traditional ISA INT mask, return an APIC mask. 1600 */ 1601u_int 1602isa_apic_mask(u_int isa_mask) 1603{ 1604 int isa_irq; 1605 int apic_pin; 1606 1607#if defined(SKIP_IRQ15_REDIRECT) 1608 if (isa_mask == (1 << 15)) { 1609 printf("skipping ISA IRQ15 redirect\n"); 1610 return isa_mask; 1611 } 1612#endif /* SKIP_IRQ15_REDIRECT */ 1613 1614 isa_irq = ffs(isa_mask); /* find its bit position */ 1615 if (isa_irq == 0) /* doesn't exist */ 1616 return 0; 1617 --isa_irq; /* make it zero based */ 1618 1619 apic_pin = isa_apic_irq(isa_irq); /* look for APIC connection */ 1620 if (apic_pin == -1) 1621 return 0; 1622 1623 return (1 << apic_pin); /* convert pin# to a mask */ 1624} 1625 1626 1627/* 1628 * Determine which APIC pin an ISA/EISA INT is attached to. 1629 */ 1630#define INTTYPE(I) (io_apic_ints[(I)].int_type) 1631#define INTPIN(I) (io_apic_ints[(I)].dst_apic_int) 1632#define INTIRQ(I) (io_apic_ints[(I)].int_vector) 1633#define INTAPIC(I) (ID_TO_IO(io_apic_ints[(I)].dst_apic_id)) 1634 1635#define SRCBUSIRQ(I) (io_apic_ints[(I)].src_bus_irq) 1636int 1637isa_apic_irq(int isa_irq) 1638{ 1639 int intr; 1640 1641 for (intr = 0; intr < nintrs; ++intr) { /* check each record */ 1642 if (INTTYPE(intr) == 0) { /* standard INT */ 1643 if (SRCBUSIRQ(intr) == isa_irq) { 1644 if (apic_int_is_bus_type(intr, ISA) || 1645 apic_int_is_bus_type(intr, EISA)) { 1646 if (INTIRQ(intr) == 0xff) 1647 return -1; /* unassigned */ 1648 return INTIRQ(intr); /* found */ 1649 } 1650 } 1651 } 1652 } 1653 return -1; /* NOT found */ 1654} 1655 1656 1657/* 1658 * Determine which APIC pin a PCI INT is attached to. 1659 */ 1660#define SRCBUSID(I) (io_apic_ints[(I)].src_bus_id) 1661#define SRCBUSDEVICE(I) ((io_apic_ints[(I)].src_bus_irq >> 2) & 0x1f) 1662#define SRCBUSLINE(I) (io_apic_ints[(I)].src_bus_irq & 0x03) 1663int 1664pci_apic_irq(int pciBus, int pciDevice, int pciInt) 1665{ 1666 int intr; 1667 1668 --pciInt; /* zero based */ 1669 1670 for (intr = 0; intr < nintrs; ++intr) /* check each record */ 1671 if ((INTTYPE(intr) == 0) /* standard INT */ 1672 && (SRCBUSID(intr) == pciBus) 1673 && (SRCBUSDEVICE(intr) == pciDevice) 1674 && (SRCBUSLINE(intr) == pciInt)) /* a candidate IRQ */ 1675 if (apic_int_is_bus_type(intr, PCI)) { 1676 if (INTIRQ(intr) == 0xff) 1677 allocate_apic_irq(intr); 1678 if (INTIRQ(intr) == 0xff) 1679 return -1; /* unassigned */ 1680 return INTIRQ(intr); /* exact match */ 1681 } 1682 1683 return -1; /* NOT found */ 1684} 1685 1686int 1687next_apic_irq(int irq) 1688{ 1689 int intr, ointr; 1690 int bus, bustype; 1691 1692 bus = 0; 1693 bustype = 0; 1694 for (intr = 0; intr < nintrs; intr++) { 1695 if (INTIRQ(intr) != irq || INTTYPE(intr) != 0) 1696 continue; 1697 bus = SRCBUSID(intr); 1698 bustype = apic_bus_type(bus); 1699 if (bustype != ISA && 1700 bustype != EISA && 1701 bustype != PCI) 1702 continue; 1703 break; 1704 } 1705 if (intr >= nintrs) { 1706 return -1; 1707 } 1708 for (ointr = intr + 1; ointr < nintrs; ointr++) { 1709 if (INTTYPE(ointr) != 0) 1710 continue; 1711 if (bus != SRCBUSID(ointr)) 1712 continue; 1713 if (bustype == PCI) { 1714 if (SRCBUSDEVICE(intr) != SRCBUSDEVICE(ointr)) 1715 continue; 1716 if (SRCBUSLINE(intr) != SRCBUSLINE(ointr)) 1717 continue; 1718 } 1719 if (bustype == ISA || bustype == EISA) { 1720 if (SRCBUSIRQ(intr) != SRCBUSIRQ(ointr)) 1721 continue; 1722 } 1723 if (INTPIN(intr) == INTPIN(ointr)) 1724 continue; 1725 break; 1726 } 1727 if (ointr >= nintrs) { 1728 return -1; 1729 } 1730 return INTIRQ(ointr); 1731} 1732#undef SRCBUSLINE 1733#undef SRCBUSDEVICE 1734#undef SRCBUSID 1735#undef SRCBUSIRQ 1736 1737#undef INTPIN 1738#undef INTIRQ 1739#undef INTAPIC 1740#undef INTTYPE 1741 1742 1743/* 1744 * Reprogram the MB chipset to NOT redirect an ISA INTerrupt. 1745 * 1746 * XXX FIXME: 1747 * Exactly what this means is unclear at this point. It is a solution 1748 * for motherboards that redirect the MBIRQ0 pin. Generically a motherboard 1749 * could route any of the ISA INTs to upper (>15) IRQ values. But most would 1750 * NOT be redirected via MBIRQ0, thus "undirect()ing" them would NOT be an 1751 * option. 1752 */ 1753int 1754undirect_isa_irq(int rirq) 1755{ 1756#if defined(READY) 1757 if (bootverbose) 1758 printf("Freeing redirected ISA irq %d.\n", rirq); 1759 /** FIXME: tickle the MB redirector chip */ 1760 return -1; 1761#else 1762 if (bootverbose) 1763 printf("Freeing (NOT implemented) redirected ISA irq %d.\n", rirq); 1764 return 0; 1765#endif /* READY */ 1766} 1767 1768 1769/* 1770 * Reprogram the MB chipset to NOT redirect a PCI INTerrupt 1771 */ 1772int 1773undirect_pci_irq(int rirq) 1774{ 1775#if defined(READY) 1776 if (bootverbose) 1777 printf("Freeing redirected PCI irq %d.\n", rirq); 1778 1779 /** FIXME: tickle the MB redirector chip */ 1780 return -1; 1781#else 1782 if (bootverbose) 1783 printf("Freeing (NOT implemented) redirected PCI irq %d.\n", 1784 rirq); 1785 return 0; 1786#endif /* READY */ 1787} 1788 1789 1790/* 1791 * given a bus ID, return: 1792 * the bus type if found 1793 * -1 if NOT found 1794 */ 1795int 1796apic_bus_type(int id) 1797{ 1798 int x; 1799 1800 for (x = 0; x < mp_nbusses; ++x) 1801 if (bus_data[x].bus_id == id) 1802 return bus_data[x].bus_type; 1803 1804 return -1; 1805} 1806 1807 1808/* 1809 * given a LOGICAL APIC# and pin#, return: 1810 * the associated src bus ID if found 1811 * -1 if NOT found 1812 */ 1813int 1814apic_src_bus_id(int apic, int pin) 1815{ 1816 int x; 1817 1818 /* search each of the possible INTerrupt sources */ 1819 for (x = 0; x < nintrs; ++x) 1820 if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) && 1821 (pin == io_apic_ints[x].dst_apic_int)) 1822 return (io_apic_ints[x].src_bus_id); 1823 1824 return -1; /* NOT found */ 1825} 1826 1827 1828/* 1829 * given a LOGICAL APIC# and pin#, return: 1830 * the associated src bus IRQ if found 1831 * -1 if NOT found 1832 */ 1833int 1834apic_src_bus_irq(int apic, int pin) 1835{ 1836 int x; 1837 1838 for (x = 0; x < nintrs; x++) 1839 if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) && 1840 (pin == io_apic_ints[x].dst_apic_int)) 1841 return (io_apic_ints[x].src_bus_irq); 1842 1843 return -1; /* NOT found */ 1844} 1845 1846 1847/* 1848 * given a LOGICAL APIC# and pin#, return: 1849 * the associated INTerrupt type if found 1850 * -1 if NOT found 1851 */ 1852int 1853apic_int_type(int apic, int pin) 1854{ 1855 int x; 1856 1857 /* search each of the possible INTerrupt sources */ 1858 for (x = 0; x < nintrs; ++x) 1859 if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) && 1860 (pin == io_apic_ints[x].dst_apic_int)) 1861 return (io_apic_ints[x].int_type); 1862 1863 return -1; /* NOT found */ 1864} 1865 1866int 1867apic_irq(int apic, int pin) 1868{ 1869 int x; 1870 int res; 1871 1872 for (x = 0; x < nintrs; ++x) 1873 if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) && 1874 (pin == io_apic_ints[x].dst_apic_int)) { 1875 res = io_apic_ints[x].int_vector; 1876 if (res == 0xff) 1877 return -1; 1878 if (apic != int_to_apicintpin[res].ioapic) 1879 panic("apic_irq: inconsistent table"); 1880 if (pin != int_to_apicintpin[res].int_pin) 1881 panic("apic_irq inconsistent table (2)"); 1882 return res; 1883 } 1884 return -1; 1885} 1886 1887 1888/* 1889 * given a LOGICAL APIC# and pin#, return: 1890 * the associated trigger mode if found 1891 * -1 if NOT found 1892 */ 1893int 1894apic_trigger(int apic, int pin) 1895{ 1896 int x; 1897 1898 /* search each of the possible INTerrupt sources */ 1899 for (x = 0; x < nintrs; ++x) 1900 if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) && 1901 (pin == io_apic_ints[x].dst_apic_int)) 1902 return ((io_apic_ints[x].int_flags >> 2) & 0x03); 1903 1904 return -1; /* NOT found */ 1905} 1906 1907 1908/* 1909 * given a LOGICAL APIC# and pin#, return: 1910 * the associated 'active' level if found 1911 * -1 if NOT found 1912 */ 1913int 1914apic_polarity(int apic, int pin) 1915{ 1916 int x; 1917 1918 /* search each of the possible INTerrupt sources */ 1919 for (x = 0; x < nintrs; ++x) 1920 if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) && 1921 (pin == io_apic_ints[x].dst_apic_int)) 1922 return (io_apic_ints[x].int_flags & 0x03); 1923 1924 return -1; /* NOT found */ 1925} 1926 1927 1928/* 1929 * set data according to MP defaults 1930 * FIXME: probably not complete yet... 1931 */ 1932static void 1933default_mp_table(int type) 1934{ 1935 int ap_cpu_id; 1936#if defined(APIC_IO) 1937 int io_apic_id; 1938 int pin; 1939#endif /* APIC_IO */ 1940 1941#if 0 1942 printf(" MP default config type: %d\n", type); 1943 switch (type) { 1944 case 1: 1945 printf(" bus: ISA, APIC: 82489DX\n"); 1946 break; 1947 case 2: 1948 printf(" bus: EISA, APIC: 82489DX\n"); 1949 break; 1950 case 3: 1951 printf(" bus: EISA, APIC: 82489DX\n"); 1952 break; 1953 case 4: 1954 printf(" bus: MCA, APIC: 82489DX\n"); 1955 break; 1956 case 5: 1957 printf(" bus: ISA+PCI, APIC: Integrated\n"); 1958 break; 1959 case 6: 1960 printf(" bus: EISA+PCI, APIC: Integrated\n"); 1961 break; 1962 case 7: 1963 printf(" bus: MCA+PCI, APIC: Integrated\n"); 1964 break; 1965 default: 1966 printf(" future type\n"); 1967 break; 1968 /* NOTREACHED */ 1969 } 1970#endif /* 0 */ 1971 1972 boot_cpu_id = (lapic.id & APIC_ID_MASK) >> 24; 1973 ap_cpu_id = (boot_cpu_id == 0) ? 1 : 0; 1974 1975 /* BSP */ 1976 CPU_TO_ID(0) = boot_cpu_id; 1977 ID_TO_CPU(boot_cpu_id) = 0; 1978 1979 /* one and only AP */ 1980 CPU_TO_ID(1) = ap_cpu_id; 1981 ID_TO_CPU(ap_cpu_id) = 1; 1982 1983#if defined(APIC_IO) 1984 /* one and only IO APIC */ 1985 io_apic_id = (io_apic_read(0, IOAPIC_ID) & APIC_ID_MASK) >> 24; 1986 1987 /* 1988 * sanity check, refer to MP spec section 3.6.6, last paragraph 1989 * necessary as some hardware isn't properly setting up the IO APIC 1990 */ 1991#if defined(REALLY_ANAL_IOAPICID_VALUE) 1992 if (io_apic_id != 2) { 1993#else 1994 if ((io_apic_id == 0) || (io_apic_id == 1) || (io_apic_id == 15)) { 1995#endif /* REALLY_ANAL_IOAPICID_VALUE */ 1996 io_apic_set_id(0, 2); 1997 io_apic_id = 2; 1998 } 1999 IO_TO_ID(0) = io_apic_id; 2000 ID_TO_IO(io_apic_id) = 0; 2001#endif /* APIC_IO */ 2002 2003 /* fill out bus entries */ 2004 switch (type) { 2005 case 1: 2006 case 2: 2007 case 3: 2008 case 4: 2009 case 5: 2010 case 6: 2011 case 7: 2012 bus_data[0].bus_id = default_data[type - 1][1]; 2013 bus_data[0].bus_type = default_data[type - 1][2]; 2014 bus_data[1].bus_id = default_data[type - 1][3]; 2015 bus_data[1].bus_type = default_data[type - 1][4]; 2016 break; 2017 2018 /* case 4: case 7: MCA NOT supported */ 2019 default: /* illegal/reserved */ 2020 panic("BAD default MP config: %d", type); 2021 /* NOTREACHED */ 2022 } 2023 2024#if defined(APIC_IO) 2025 /* general cases from MP v1.4, table 5-2 */ 2026 for (pin = 0; pin < 16; ++pin) { 2027 io_apic_ints[pin].int_type = 0; 2028 io_apic_ints[pin].int_flags = 0x05; /* edge/active-hi */ 2029 io_apic_ints[pin].src_bus_id = 0; 2030 io_apic_ints[pin].src_bus_irq = pin; /* IRQ2 caught below */ 2031 io_apic_ints[pin].dst_apic_id = io_apic_id; 2032 io_apic_ints[pin].dst_apic_int = pin; /* 1-to-1 */ 2033 } 2034 2035 /* special cases from MP v1.4, table 5-2 */ 2036 if (type == 2) { 2037 io_apic_ints[2].int_type = 0xff; /* N/C */ 2038 io_apic_ints[13].int_type = 0xff; /* N/C */ 2039#if !defined(APIC_MIXED_MODE) 2040 /** FIXME: ??? */ 2041 panic("sorry, can't support type 2 default yet"); 2042#endif /* APIC_MIXED_MODE */ 2043 } 2044 else 2045 io_apic_ints[2].src_bus_irq = 0; /* ISA IRQ0 is on APIC INT 2 */ 2046 2047 if (type == 7) 2048 io_apic_ints[0].int_type = 0xff; /* N/C */ 2049 else 2050 io_apic_ints[0].int_type = 3; /* vectored 8259 */ 2051#endif /* APIC_IO */ 2052} 2053 2054 2055/* 2056 * start each AP in our list 2057 */ 2058static int 2059start_all_aps(u_int boot_addr) 2060{ 2061 int x, i, pg; 2062#ifndef PC98 2063 u_char mpbiosreason; 2064#endif 2065 u_long mpbioswarmvec; 2066 struct pcpu *pc; 2067 char *stack; 2068 uintptr_t kptbase; 2069 2070 POSTCODE(START_ALL_APS_POST); 2071 2072 mtx_init(&ap_boot_mtx, "ap boot", NULL, MTX_SPIN); 2073 2074 /* initialize BSP's local APIC */ 2075 apic_initialize(); 2076 bsp_apic_ready = 1; 2077 2078 /* install the AP 1st level boot code */ 2079 install_ap_tramp(boot_addr); 2080 2081 2082 /* save the current value of the warm-start vector */ 2083 mpbioswarmvec = *((u_long *) WARMBOOT_OFF); 2084#ifndef PC98 2085 outb(CMOS_REG, BIOS_RESET); 2086 mpbiosreason = inb(CMOS_DATA); 2087#endif 2088 2089 /* set up temporary P==V mapping for AP boot */ 2090 /* XXX this is a hack, we should boot the AP on its own stack/PTD */ 2091 kptbase = (uintptr_t)(void *)KPTphys; 2092 for (x = 0; x < NKPT; x++) 2093 PTD[x] = (pd_entry_t)(PG_V | PG_RW | 2094 ((kptbase + x * PAGE_SIZE) & PG_FRAME)); 2095 invltlb(); 2096 2097 /* start each AP */ 2098 for (x = 1; x <= mp_naps; ++x) { 2099 2100 /* This is a bit verbose, it will go away soon. */ 2101 2102 /* first page of AP's private space */ 2103 pg = x * i386_btop(sizeof(struct privatespace)); 2104 2105 /* allocate a new private data page */ 2106 pc = (struct pcpu *)kmem_alloc(kernel_map, PAGE_SIZE); 2107 2108 /* wire it into the private page table page */ 2109 SMPpt[pg] = (pt_entry_t)(PG_V | PG_RW | vtophys(pc)); 2110 2111 /* allocate and set up an idle stack data page */ 2112 stack = (char *)kmem_alloc(kernel_map, KSTACK_PAGES * PAGE_SIZE); /* XXXKSE */ 2113 for (i = 0; i < KSTACK_PAGES; i++) 2114 SMPpt[pg + 1 + i] = (pt_entry_t) 2115 (PG_V | PG_RW | vtophys(PAGE_SIZE * i + stack)); 2116 2117 /* prime data page for it to use */ 2118 pcpu_init(pc, x, sizeof(struct pcpu)); 2119 2120 /* setup a vector to our boot code */ 2121 *((volatile u_short *) WARMBOOT_OFF) = WARMBOOT_TARGET; 2122 *((volatile u_short *) WARMBOOT_SEG) = (boot_addr >> 4); 2123#ifndef PC98 2124 outb(CMOS_REG, BIOS_RESET); 2125 outb(CMOS_DATA, BIOS_WARM); /* 'warm-start' */ 2126#endif 2127 2128 bootSTK = &SMP_prvspace[x].idlekstack[KSTACK_PAGES * PAGE_SIZE]; 2129 bootAP = x; 2130 2131 /* attempt to start the Application Processor */ 2132 CHECK_INIT(99); /* setup checkpoints */ 2133 if (!start_ap(x, boot_addr)) { 2134 printf("AP #%d (PHY# %d) failed!\n", x, CPU_TO_ID(x)); 2135 CHECK_PRINT("trace"); /* show checkpoints */ 2136 /* better panic as the AP may be running loose */ 2137 printf("panic y/n? [y] "); 2138 if (cngetc() != 'n') 2139 panic("bye-bye"); 2140 } 2141 CHECK_PRINT("trace"); /* show checkpoints */ 2142 2143 /* record its version info */ 2144 cpu_apic_versions[x] = cpu_apic_versions[0]; 2145 2146 all_cpus |= (1 << x); /* record AP in CPU map */ 2147 } 2148 2149 /* build our map of 'other' CPUs */ 2150 PCPU_SET(other_cpus, all_cpus & ~PCPU_GET(cpumask)); 2151 2152 /* fill in our (BSP) APIC version */ 2153 cpu_apic_versions[0] = lapic.version; 2154 2155 /* restore the warmstart vector */ 2156 *(u_long *) WARMBOOT_OFF = mpbioswarmvec; 2157#ifndef PC98 2158 outb(CMOS_REG, BIOS_RESET); 2159 outb(CMOS_DATA, mpbiosreason); 2160#endif 2161 2162 /* 2163 * Set up the idle context for the BSP. Similar to above except 2164 * that some was done by locore, some by pmap.c and some is implicit 2165 * because the BSP is cpu#0 and the page is initially zero, and also 2166 * because we can refer to variables by name on the BSP.. 2167 */ 2168 2169 /* Allocate and setup BSP idle stack */ 2170 stack = (char *)kmem_alloc(kernel_map, KSTACK_PAGES * PAGE_SIZE); 2171 for (i = 0; i < KSTACK_PAGES; i++) 2172 SMPpt[1 + i] = (pt_entry_t) 2173 (PG_V | PG_RW | vtophys(PAGE_SIZE * i + stack)); 2174 2175 for (x = 0; x < NKPT; x++) 2176 PTD[x] = 0; 2177 pmap_set_opt(); 2178 2179 /* number of APs actually started */ 2180 return mp_ncpus - 1; 2181} 2182 2183 2184/* 2185 * load the 1st level AP boot code into base memory. 2186 */ 2187 2188/* targets for relocation */ 2189extern void bigJump(void); 2190extern void bootCodeSeg(void); 2191extern void bootDataSeg(void); 2192extern void MPentry(void); 2193extern u_int MP_GDT; 2194extern u_int mp_gdtbase; 2195 2196static void 2197install_ap_tramp(u_int boot_addr) 2198{ 2199 int x; 2200 int size = *(int *) ((u_long) & bootMP_size); 2201 u_char *src = (u_char *) ((u_long) bootMP); 2202 u_char *dst = (u_char *) boot_addr + KERNBASE; 2203 u_int boot_base = (u_int) bootMP; 2204 u_int8_t *dst8; 2205 u_int16_t *dst16; 2206 u_int32_t *dst32; 2207 2208 POSTCODE(INSTALL_AP_TRAMP_POST); 2209 2210 for (x = 0; x < size; ++x) 2211 *dst++ = *src++; 2212 2213 /* 2214 * modify addresses in code we just moved to basemem. unfortunately we 2215 * need fairly detailed info about mpboot.s for this to work. changes 2216 * to mpboot.s might require changes here. 2217 */ 2218 2219 /* boot code is located in KERNEL space */ 2220 dst = (u_char *) boot_addr + KERNBASE; 2221 2222 /* modify the lgdt arg */ 2223 dst32 = (u_int32_t *) (dst + ((u_int) & mp_gdtbase - boot_base)); 2224 *dst32 = boot_addr + ((u_int) & MP_GDT - boot_base); 2225 2226 /* modify the ljmp target for MPentry() */ 2227 dst32 = (u_int32_t *) (dst + ((u_int) bigJump - boot_base) + 1); 2228 *dst32 = ((u_int) MPentry - KERNBASE); 2229 2230 /* modify the target for boot code segment */ 2231 dst16 = (u_int16_t *) (dst + ((u_int) bootCodeSeg - boot_base)); 2232 dst8 = (u_int8_t *) (dst16 + 1); 2233 *dst16 = (u_int) boot_addr & 0xffff; 2234 *dst8 = ((u_int) boot_addr >> 16) & 0xff; 2235 2236 /* modify the target for boot data segment */ 2237 dst16 = (u_int16_t *) (dst + ((u_int) bootDataSeg - boot_base)); 2238 dst8 = (u_int8_t *) (dst16 + 1); 2239 *dst16 = (u_int) boot_addr & 0xffff; 2240 *dst8 = ((u_int) boot_addr >> 16) & 0xff; 2241} 2242 2243 2244/* 2245 * this function starts the AP (application processor) identified 2246 * by the APIC ID 'physicalCpu'. It does quite a "song and dance" 2247 * to accomplish this. This is necessary because of the nuances 2248 * of the different hardware we might encounter. It ain't pretty, 2249 * but it seems to work. 2250 */ 2251static int 2252start_ap(int logical_cpu, u_int boot_addr) 2253{ 2254 int physical_cpu; 2255 int vector; 2256 int cpus; 2257 u_long icr_lo, icr_hi; 2258 2259 POSTCODE(START_AP_POST); 2260 2261 /* get the PHYSICAL APIC ID# */ 2262 physical_cpu = CPU_TO_ID(logical_cpu); 2263 2264 /* calculate the vector */ 2265 vector = (boot_addr >> 12) & 0xff; 2266 2267 /* used as a watchpoint to signal AP startup */ 2268 cpus = mp_ncpus; 2269 2270 /* 2271 * first we do an INIT/RESET IPI this INIT IPI might be run, reseting 2272 * and running the target CPU. OR this INIT IPI might be latched (P5 2273 * bug), CPU waiting for STARTUP IPI. OR this INIT IPI might be 2274 * ignored. 2275 */ 2276 2277 /* setup the address for the target AP */ 2278 icr_hi = lapic.icr_hi & ~APIC_ID_MASK; 2279 icr_hi |= (physical_cpu << 24); 2280 lapic.icr_hi = icr_hi; 2281 2282 /* setup common fields for subsequent IPIs */ 2283 icr_lo = lapic.icr_lo & APIC_ICRLO_RESV_MASK; 2284 icr_lo |= APIC_DESTMODE_PHY; 2285 2286 /* do an INIT IPI: assert RESET */ 2287 lapic.icr_lo = icr_lo | APIC_DEST_DESTFLD | APIC_TRIGMOD_EDGE | 2288 APIC_LEVEL_ASSERT | APIC_DELMODE_INIT; 2289 2290 /* wait for pending status end */ 2291 while (lapic.icr_lo & APIC_DELSTAT_MASK) 2292 /* spin */ ; 2293 2294 /* do an INIT IPI: deassert RESET */ 2295 lapic.icr_lo = icr_lo | APIC_DEST_ALLESELF | APIC_TRIGMOD_LEVEL | 2296 APIC_LEVEL_DEASSERT | APIC_DELMODE_INIT; 2297 2298 /* wait for pending status end */ 2299 u_sleep(10000); /* wait ~10mS */ 2300 while (lapic.icr_lo & APIC_DELSTAT_MASK) 2301 /* spin */ ; 2302 2303 /* 2304 * next we do a STARTUP IPI: the previous INIT IPI might still be 2305 * latched, (P5 bug) this 1st STARTUP would then terminate 2306 * immediately, and the previously started INIT IPI would continue. OR 2307 * the previous INIT IPI has already run. and this STARTUP IPI will 2308 * run. OR the previous INIT IPI was ignored. and this STARTUP IPI 2309 * will run. 2310 */ 2311 2312 /* do a STARTUP IPI */ 2313 lapic.icr_lo = icr_lo | APIC_DEST_DESTFLD | APIC_TRIGMOD_EDGE | 2314 APIC_LEVEL_DEASSERT | APIC_DELMODE_STARTUP | vector; 2315 while (lapic.icr_lo & APIC_DELSTAT_MASK) 2316 /* spin */ ; 2317 u_sleep(200); /* wait ~200uS */ 2318 2319 /* 2320 * finally we do a 2nd STARTUP IPI: this 2nd STARTUP IPI should run IF 2321 * the previous STARTUP IPI was cancelled by a latched INIT IPI. OR 2322 * this STARTUP IPI will be ignored, as only ONE STARTUP IPI is 2323 * recognized after hardware RESET or INIT IPI. 2324 */ 2325 2326 lapic.icr_lo = icr_lo | APIC_DEST_DESTFLD | APIC_TRIGMOD_EDGE | 2327 APIC_LEVEL_DEASSERT | APIC_DELMODE_STARTUP | vector; 2328 while (lapic.icr_lo & APIC_DELSTAT_MASK) 2329 /* spin */ ; 2330 u_sleep(200); /* wait ~200uS */ 2331 2332 /* wait for it to start */ 2333 set_apic_timer(5000000);/* == 5 seconds */ 2334 while (read_apic_timer()) 2335 if (mp_ncpus > cpus) 2336 return 1; /* return SUCCESS */ 2337 2338 return 0; /* return FAILURE */ 2339} 2340 2341#if defined(APIC_IO) 2342 2343#ifdef COUNT_XINVLTLB_HITS 2344u_int xhits_gbl[MAXCPU]; 2345u_int xhits_pg[MAXCPU]; 2346u_int xhits_rng[MAXCPU]; 2347SYSCTL_NODE(_debug, OID_AUTO, xhits, CTLFLAG_RW, 0, ""); 2348SYSCTL_OPAQUE(_debug_xhits, OID_AUTO, global, CTLFLAG_RW, &xhits_gbl, 2349 sizeof(xhits_gbl), "IU", ""); 2350SYSCTL_OPAQUE(_debug_xhits, OID_AUTO, page, CTLFLAG_RW, &xhits_pg, 2351 sizeof(xhits_pg), "IU", ""); 2352SYSCTL_OPAQUE(_debug_xhits, OID_AUTO, range, CTLFLAG_RW, &xhits_rng, 2353 sizeof(xhits_rng), "IU", ""); 2354 2355u_int ipi_global; 2356u_int ipi_page; 2357u_int ipi_range; 2358u_int ipi_range_size; 2359SYSCTL_INT(_debug_xhits, OID_AUTO, ipi_global, CTLFLAG_RW, &ipi_global, 0, ""); 2360SYSCTL_INT(_debug_xhits, OID_AUTO, ipi_page, CTLFLAG_RW, &ipi_page, 0, ""); 2361SYSCTL_INT(_debug_xhits, OID_AUTO, ipi_range, CTLFLAG_RW, &ipi_range, 0, ""); 2362SYSCTL_INT(_debug_xhits, OID_AUTO, ipi_range_size, CTLFLAG_RW, &ipi_range_size, 2363 0, ""); 2364 2365u_int ipi_masked_global; 2366u_int ipi_masked_page; 2367u_int ipi_masked_range; 2368u_int ipi_masked_range_size; 2369SYSCTL_INT(_debug_xhits, OID_AUTO, ipi_masked_global, CTLFLAG_RW, 2370 &ipi_masked_global, 0, ""); 2371SYSCTL_INT(_debug_xhits, OID_AUTO, ipi_masked_page, CTLFLAG_RW, 2372 &ipi_masked_page, 0, ""); 2373SYSCTL_INT(_debug_xhits, OID_AUTO, ipi_masked_range, CTLFLAG_RW, 2374 &ipi_masked_range, 0, ""); 2375SYSCTL_INT(_debug_xhits, OID_AUTO, ipi_masked_range_size, CTLFLAG_RW, 2376 &ipi_masked_range_size, 0, ""); 2377#endif 2378 2379/* 2380 * Flush the TLB on all other CPU's 2381 */ 2382static void 2383smp_tlb_shootdown(u_int vector, vm_offset_t addr1, vm_offset_t addr2) 2384{ 2385 u_int ncpu; 2386 register_t eflags; 2387 2388 ncpu = mp_ncpus - 1; /* does not shootdown self */ 2389 if (ncpu < 1) 2390 return; /* no other cpus */ 2391 eflags = read_eflags(); 2392 if ((eflags & PSL_I) == 0) 2393 panic("absolutely cannot call smp_ipi_shootdown with interrupts already disabled"); 2394 mtx_lock_spin(&smp_tlb_mtx); 2395 smp_tlb_addr1 = addr1; 2396 smp_tlb_addr2 = addr2; 2397 atomic_store_rel_int(&smp_tlb_wait, 0); 2398 ipi_all_but_self(vector); 2399 while (smp_tlb_wait < ncpu) 2400 ia32_pause(); 2401 mtx_unlock_spin(&smp_tlb_mtx); 2402} 2403 2404/* 2405 * This is about as magic as it gets. fortune(1) has got similar code 2406 * for reversing bits in a word. Who thinks up this stuff?? 2407 * 2408 * Yes, it does appear to be consistently faster than: 2409 * while (i = ffs(m)) { 2410 * m >>= i; 2411 * bits++; 2412 * } 2413 * and 2414 * while (lsb = (m & -m)) { // This is magic too 2415 * m &= ~lsb; // or: m ^= lsb 2416 * bits++; 2417 * } 2418 * Both of these latter forms do some very strange things on gcc-3.1 with 2419 * -mcpu=pentiumpro and/or -march=pentiumpro and/or -O or -O2. 2420 * There is probably an SSE or MMX popcnt instruction. 2421 * 2422 * I wonder if this should be in libkern? 2423 * 2424 * XXX Stop the presses! Another one: 2425 * static __inline u_int32_t 2426 * popcnt1(u_int32_t v) 2427 * { 2428 * v -= ((v >> 1) & 0x55555555); 2429 * v = (v & 0x33333333) + ((v >> 2) & 0x33333333); 2430 * v = (v + (v >> 4)) & 0x0F0F0F0F; 2431 * return (v * 0x01010101) >> 24; 2432 * } 2433 * The downside is that it has a multiply. With a pentium3 with 2434 * -mcpu=pentiumpro and -march=pentiumpro then gcc-3.1 will use 2435 * an imull, and in that case it is faster. In most other cases 2436 * it appears slightly slower. 2437 */ 2438static __inline u_int32_t 2439popcnt(u_int32_t m) 2440{ 2441 2442 m = (m & 0x55555555) + ((m & 0xaaaaaaaa) >> 1); 2443 m = (m & 0x33333333) + ((m & 0xcccccccc) >> 2); 2444 m = (m & 0x0f0f0f0f) + ((m & 0xf0f0f0f0) >> 4); 2445 m = (m & 0x00ff00ff) + ((m & 0xff00ff00) >> 8); 2446 m = (m & 0x0000ffff) + ((m & 0xffff0000) >> 16); 2447 return m; 2448} 2449 2450static void 2451smp_targeted_tlb_shootdown(u_int mask, u_int vector, vm_offset_t addr1, vm_offset_t addr2) 2452{ 2453 int ncpu, othercpus; 2454 register_t eflags; 2455 2456 othercpus = mp_ncpus - 1; 2457 if (mask == (u_int)-1) { 2458 ncpu = othercpus; 2459 if (ncpu < 1) 2460 return; 2461 } else { 2462 /* XXX there should be a pcpu self mask */ 2463 mask &= ~(1 << PCPU_GET(cpuid)); 2464 if (mask == 0) 2465 return; 2466 ncpu = popcnt(mask); 2467 if (ncpu > othercpus) { 2468 /* XXX this should be a panic offence */ 2469 printf("SMP: tlb shootdown to %d other cpus (only have %d)\n", 2470 ncpu, othercpus); 2471 ncpu = othercpus; 2472 } 2473 /* XXX should be a panic, implied by mask == 0 above */ 2474 if (ncpu < 1) 2475 return; 2476 } 2477 eflags = read_eflags(); 2478 if ((eflags & PSL_I) == 0) 2479 panic("absolutely cannot call smp_targeted_ipi_shootdown with interrupts already disabled"); 2480 mtx_lock_spin(&smp_tlb_mtx); 2481 smp_tlb_addr1 = addr1; 2482 smp_tlb_addr2 = addr2; 2483 atomic_store_rel_int(&smp_tlb_wait, 0); 2484 if (mask == (u_int)-1) 2485 ipi_all_but_self(vector); 2486 else 2487 ipi_selected(mask, vector); 2488 while (smp_tlb_wait < ncpu) 2489 ia32_pause(); 2490 mtx_unlock_spin(&smp_tlb_mtx); 2491} 2492#endif 2493 2494void 2495smp_invltlb(void) 2496{ 2497#if defined(APIC_IO) 2498 if (smp_started) { 2499 smp_tlb_shootdown(IPI_INVLTLB, 0, 0); 2500#ifdef COUNT_XINVLTLB_HITS 2501 ipi_global++; 2502#endif 2503 } 2504#endif /* APIC_IO */ 2505} 2506 2507void 2508smp_invlpg(vm_offset_t addr) 2509{ 2510#if defined(APIC_IO) 2511 if (smp_started) { 2512 smp_tlb_shootdown(IPI_INVLPG, addr, 0); 2513#ifdef COUNT_XINVLTLB_HITS 2514 ipi_page++; 2515#endif 2516 } 2517#endif /* APIC_IO */ 2518} 2519 2520void 2521smp_invlpg_range(vm_offset_t addr1, vm_offset_t addr2) 2522{ 2523#if defined(APIC_IO) 2524 if (smp_started) { 2525 smp_tlb_shootdown(IPI_INVLRNG, addr1, addr2); 2526#ifdef COUNT_XINVLTLB_HITS 2527 ipi_range++; 2528 ipi_range_size += (addr2 - addr1) / PAGE_SIZE; 2529#endif 2530 } 2531#endif /* APIC_IO */ 2532} 2533 2534void 2535smp_masked_invltlb(u_int mask) 2536{ 2537#if defined(APIC_IO) 2538 if (smp_started) { 2539 smp_targeted_tlb_shootdown(mask, IPI_INVLTLB, 0, 0); 2540#ifdef COUNT_XINVLTLB_HITS 2541 ipi_masked_global++; 2542#endif 2543 } 2544#endif /* APIC_IO */ 2545} 2546 2547void 2548smp_masked_invlpg(u_int mask, vm_offset_t addr) 2549{ 2550#if defined(APIC_IO) 2551 if (smp_started) { 2552 smp_targeted_tlb_shootdown(mask, IPI_INVLPG, addr, 0); 2553#ifdef COUNT_XINVLTLB_HITS 2554 ipi_masked_page++; 2555#endif 2556 } 2557#endif /* APIC_IO */ 2558} 2559 2560void 2561smp_masked_invlpg_range(u_int mask, vm_offset_t addr1, vm_offset_t addr2) 2562{ 2563#if defined(APIC_IO) 2564 if (smp_started) { 2565 smp_targeted_tlb_shootdown(mask, IPI_INVLRNG, addr1, addr2); 2566#ifdef COUNT_XINVLTLB_HITS 2567 ipi_masked_range++; 2568 ipi_masked_range_size += (addr2 - addr1) / PAGE_SIZE; 2569#endif 2570 } 2571#endif /* APIC_IO */ 2572} 2573 2574 2575/* 2576 * This is called once the rest of the system is up and running and we're 2577 * ready to let the AP's out of the pen. 2578 */ 2579void 2580ap_init(void) 2581{ 2582 u_int apic_id; 2583 2584 /* spin until all the AP's are ready */ 2585 while (!aps_ready) 2586 ia32_pause(); 2587 2588 /* BSP may have changed PTD while we were waiting */ 2589 invltlb(); 2590 2591#if defined(I586_CPU) && !defined(NO_F00F_HACK) 2592 lidt(&r_idt); 2593#endif 2594 2595 /* set up CPU registers and state */ 2596 cpu_setregs(); 2597 2598 /* set up FPU state on the AP */ 2599 npxinit(__INITIAL_NPXCW__); 2600 2601 /* set up SSE registers */ 2602 enable_sse(); 2603 2604 /* A quick check from sanity claus */ 2605 apic_id = (apic_id_to_logical[(lapic.id & 0x0f000000) >> 24]); 2606 if (PCPU_GET(cpuid) != apic_id) { 2607 printf("SMP: cpuid = %d\n", PCPU_GET(cpuid)); 2608 printf("SMP: apic_id = %d\n", apic_id); 2609 printf("PTD[MPPTDI] = %#jx\n", (uintmax_t)PTD[MPPTDI]); 2610 panic("cpuid mismatch! boom!!"); 2611 } 2612 2613 /* Init local apic for irq's */ 2614 apic_initialize(); 2615 2616 /* Set memory range attributes for this CPU to match the BSP */ 2617 mem_range_AP_init(); 2618 2619 mtx_lock_spin(&ap_boot_mtx); 2620 2621 smp_cpus++; 2622 2623 CTR1(KTR_SMP, "SMP: AP CPU #%d Launched", PCPU_GET(cpuid)); 2624 printf("SMP: AP CPU #%d Launched!\n", PCPU_GET(cpuid)); 2625 2626 /* Build our map of 'other' CPUs. */ 2627 PCPU_SET(other_cpus, all_cpus & ~PCPU_GET(cpumask)); 2628 2629 if (bootverbose) 2630 apic_dump("ap_init()"); 2631 2632 if (smp_cpus == mp_ncpus) { 2633 /* enable IPI's, tlb shootdown, freezes etc */ 2634 atomic_store_rel_int(&smp_started, 1); 2635 smp_active = 1; /* historic */ 2636 } 2637 2638 mtx_unlock_spin(&ap_boot_mtx); 2639 2640 /* wait until all the AP's are up */ 2641 while (smp_started == 0) 2642 ia32_pause(); 2643 2644 /* ok, now grab sched_lock and enter the scheduler */ 2645 mtx_lock_spin(&sched_lock); 2646 2647 binuptime(PCPU_PTR(switchtime)); 2648 PCPU_SET(switchticks, ticks); 2649 2650 cpu_throw(NULL, choosethread()); /* doesn't return */ 2651 2652 panic("scheduler returned us to %s", __func__); 2653} 2654 2655/* 2656 * For statclock, we send an IPI to all CPU's to have them call this 2657 * function. 2658 * 2659 * WARNING! unpend() will call statclock() directly and skip this 2660 * routine. 2661 */ 2662void 2663forwarded_statclock(struct clockframe frame) 2664{ 2665 2666 if (profprocs != 0) 2667 profclock(&frame); 2668 if (pscnt == psdiv) 2669 statclock(&frame); 2670} 2671 2672void 2673forward_statclock(void) 2674{ 2675 int map; 2676 2677 CTR0(KTR_SMP, "forward_statclock"); 2678 2679 if (!smp_started || cold || panicstr) 2680 return; 2681 2682 map = PCPU_GET(other_cpus) & ~stopped_cpus ; 2683 if (map != 0) 2684 ipi_selected(map, IPI_STATCLOCK); 2685} 2686 2687/* 2688 * For each hardclock(), we send an IPI to all other CPU's to have them 2689 * execute this function. It would be nice to reduce contention on 2690 * sched_lock if we could simply peek at the CPU to determine the user/kernel 2691 * state and call hardclock_process() on the CPU receiving the clock interrupt 2692 * and then just use a simple IPI to handle any ast's if needed. 2693 * 2694 * WARNING! unpend() will call hardclock_process() directly and skip this 2695 * routine. 2696 */ 2697void 2698forwarded_hardclock(struct clockframe frame) 2699{ 2700 2701 hardclock_process(&frame); 2702} 2703 2704void 2705forward_hardclock(void) 2706{ 2707 u_int map; 2708 2709 CTR0(KTR_SMP, "forward_hardclock"); 2710 2711 if (!smp_started || cold || panicstr) 2712 return; 2713 2714 map = PCPU_GET(other_cpus) & ~stopped_cpus ; 2715 if (map != 0) 2716 ipi_selected(map, IPI_HARDCLOCK); 2717} 2718 2719#ifdef APIC_INTR_REORDER 2720/* 2721 * Maintain mapping from softintr vector to isr bit in local apic. 2722 */ 2723void 2724set_lapic_isrloc(int intr, int vector) 2725{ 2726 if (intr < 0 || intr > 32) 2727 panic("set_apic_isrloc: bad intr argument: %d",intr); 2728 if (vector < ICU_OFFSET || vector > 255) 2729 panic("set_apic_isrloc: bad vector argument: %d",vector); 2730 apic_isrbit_location[intr].location = &lapic.isr0 + ((vector>>5)<<2); 2731 apic_isrbit_location[intr].bit = (1<<(vector & 31)); 2732} 2733#endif 2734 2735/* 2736 * send an IPI to a set of cpus. 2737 */ 2738void 2739ipi_selected(u_int32_t cpus, u_int ipi) 2740{ 2741 2742 CTR3(KTR_SMP, "%s: cpus: %x ipi: %x", __func__, cpus, ipi); 2743 selected_apic_ipi(cpus, ipi, APIC_DELMODE_FIXED); 2744} 2745 2746/* 2747 * send an IPI INTerrupt containing 'vector' to all CPUs, including myself 2748 */ 2749void 2750ipi_all(u_int ipi) 2751{ 2752 2753 CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi); 2754 apic_ipi(APIC_DEST_ALLISELF, ipi, APIC_DELMODE_FIXED); 2755} 2756 2757/* 2758 * send an IPI to all CPUs EXCEPT myself 2759 */ 2760void 2761ipi_all_but_self(u_int ipi) 2762{ 2763 2764 CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi); 2765 apic_ipi(APIC_DEST_ALLESELF, ipi, APIC_DELMODE_FIXED); 2766} 2767 2768/* 2769 * send an IPI to myself 2770 */ 2771void 2772ipi_self(u_int ipi) 2773{ 2774 2775 CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi); 2776 apic_ipi(APIC_DEST_SELF, ipi, APIC_DELMODE_FIXED); 2777} 2778 2779static void 2780release_aps(void *dummy __unused) 2781{ 2782 2783 if (mp_ncpus == 1) 2784 return; 2785 mtx_lock_spin(&sched_lock); 2786 atomic_store_rel_int(&aps_ready, 1); 2787 while (smp_started == 0) 2788 ia32_pause(); 2789 mtx_unlock_spin(&sched_lock); 2790} 2791 2792SYSINIT(start_aps, SI_SUB_SMP, SI_ORDER_FIRST, release_aps, NULL); 2793 2794static int hlt_cpus_mask; 2795static int hlt_logical_cpus = 1; 2796static struct sysctl_ctx_list logical_cpu_clist; 2797 2798static int 2799sysctl_hlt_cpus(SYSCTL_HANDLER_ARGS) 2800{ 2801 u_int mask; 2802 int error; 2803 2804 mask = hlt_cpus_mask; 2805 error = sysctl_handle_int(oidp, &mask, 0, req); 2806 if (error || !req->newptr) 2807 return (error); 2808 2809 if (logical_cpus_mask != 0 && 2810 (mask & logical_cpus_mask) == logical_cpus_mask) 2811 hlt_logical_cpus = 1; 2812 else 2813 hlt_logical_cpus = 0; 2814 2815 if ((mask & all_cpus) == all_cpus) 2816 mask &= ~(1<<0); 2817 hlt_cpus_mask = mask; 2818 return (error); 2819} 2820SYSCTL_PROC(_machdep, OID_AUTO, hlt_cpus, CTLTYPE_INT|CTLFLAG_RW, 2821 0, 0, sysctl_hlt_cpus, "IU", ""); 2822 2823static int 2824sysctl_hlt_logical_cpus(SYSCTL_HANDLER_ARGS) 2825{ 2826 int disable, error; 2827 2828 disable = hlt_logical_cpus; 2829 error = sysctl_handle_int(oidp, &disable, 0, req); 2830 if (error || !req->newptr) 2831 return (error); 2832 2833 if (disable) 2834 hlt_cpus_mask |= logical_cpus_mask; 2835 else 2836 hlt_cpus_mask &= ~logical_cpus_mask; 2837 2838 if ((hlt_cpus_mask & all_cpus) == all_cpus) 2839 hlt_cpus_mask &= ~(1<<0); 2840 2841 hlt_logical_cpus = disable; 2842 return (error); 2843} 2844 2845static void 2846cpu_hlt_setup(void *dummy __unused) 2847{ 2848 2849 if (logical_cpus_mask != 0) { 2850 TUNABLE_INT_FETCH("machdep.hlt_logical_cpus", 2851 &hlt_logical_cpus); 2852 sysctl_ctx_init(&logical_cpu_clist); 2853 SYSCTL_ADD_PROC(&logical_cpu_clist, 2854 SYSCTL_STATIC_CHILDREN(_machdep), OID_AUTO, 2855 "hlt_logical_cpus", CTLTYPE_INT|CTLFLAG_RW, 0, 0, 2856 sysctl_hlt_logical_cpus, "IU", ""); 2857 SYSCTL_ADD_UINT(&logical_cpu_clist, 2858 SYSCTL_STATIC_CHILDREN(_machdep), OID_AUTO, 2859 "logical_cpus_mask", CTLTYPE_INT|CTLFLAG_RD, 2860 &logical_cpus_mask, 0, ""); 2861 2862 if (hlt_logical_cpus) 2863 hlt_cpus_mask |= logical_cpus_mask; 2864 } 2865} 2866SYSINIT(cpu_hlt, SI_SUB_SMP, SI_ORDER_ANY, cpu_hlt_setup, NULL); 2867 2868int 2869mp_grab_cpu_hlt(void) 2870{ 2871 u_int mask = PCPU_GET(cpumask); 2872 int retval; 2873 2874 retval = mask & hlt_cpus_mask; 2875 while (mask & hlt_cpus_mask) 2876 __asm __volatile("sti; hlt" : : : "memory"); 2877 return (retval); 2878} 2879