mp_machdep.c revision 71727
1/* 2 * Copyright (c) 1996, by Steve Passe 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. The name of the developer may NOT be used to endorse or promote products 11 * derived from this software without specific prior written permission. 12 * 13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 * 25 * $FreeBSD: head/sys/i386/i386/mp_machdep.c 71727 2001-01-28 01:07:54Z tegge $ 26 */ 27 28#include "opt_cpu.h" 29#include "opt_user_ldt.h" 30 31#ifdef SMP 32#include <machine/smptests.h> 33#else 34#error 35#endif 36 37#include <sys/param.h> 38#include <sys/bus.h> 39#include <sys/systm.h> 40#include <sys/kernel.h> 41#include <sys/proc.h> 42#include <sys/sysctl.h> 43#include <sys/malloc.h> 44#include <sys/memrange.h> 45#include <sys/mutex.h> 46#ifdef BETTER_CLOCK 47#include <sys/dkstat.h> 48#endif 49#include <sys/cons.h> /* cngetc() */ 50 51#include <vm/vm.h> 52#include <vm/vm_param.h> 53#include <vm/pmap.h> 54#include <vm/vm_kern.h> 55#include <vm/vm_extern.h> 56#ifdef BETTER_CLOCK 57#include <sys/lock.h> 58#include <vm/vm_map.h> 59#include <sys/user.h> 60#ifdef GPROF 61#include <sys/gmon.h> 62#endif 63#endif 64 65#include <machine/smp.h> 66#include <machine/apic.h> 67#include <machine/atomic.h> 68#include <machine/cpufunc.h> 69#include <machine/mpapic.h> 70#include <machine/psl.h> 71#include <machine/segments.h> 72#include <machine/smptests.h> /** TEST_DEFAULT_CONFIG, TEST_TEST1 */ 73#include <machine/tss.h> 74#include <machine/specialreg.h> 75#include <machine/globaldata.h> 76 77#if defined(APIC_IO) 78#include <machine/md_var.h> /* setidt() */ 79#include <i386/isa/icu.h> /* IPIs */ 80#include <i386/isa/intr_machdep.h> /* IPIs */ 81#endif /* APIC_IO */ 82 83#if defined(TEST_DEFAULT_CONFIG) 84#define MPFPS_MPFB1 TEST_DEFAULT_CONFIG 85#else 86#define MPFPS_MPFB1 mpfps->mpfb1 87#endif /* TEST_DEFAULT_CONFIG */ 88 89#define WARMBOOT_TARGET 0 90#define WARMBOOT_OFF (KERNBASE + 0x0467) 91#define WARMBOOT_SEG (KERNBASE + 0x0469) 92 93#ifdef PC98 94#define BIOS_BASE (0xe8000) 95#define BIOS_SIZE (0x18000) 96#else 97#define BIOS_BASE (0xf0000) 98#define BIOS_SIZE (0x10000) 99#endif 100#define BIOS_COUNT (BIOS_SIZE/4) 101 102#define CMOS_REG (0x70) 103#define CMOS_DATA (0x71) 104#define BIOS_RESET (0x0f) 105#define BIOS_WARM (0x0a) 106 107#define PROCENTRY_FLAG_EN 0x01 108#define PROCENTRY_FLAG_BP 0x02 109#define IOAPICENTRY_FLAG_EN 0x01 110 111 112/* MP Floating Pointer Structure */ 113typedef struct MPFPS { 114 char signature[4]; 115 void *pap; 116 u_char length; 117 u_char spec_rev; 118 u_char checksum; 119 u_char mpfb1; 120 u_char mpfb2; 121 u_char mpfb3; 122 u_char mpfb4; 123 u_char mpfb5; 124} *mpfps_t; 125 126/* MP Configuration Table Header */ 127typedef struct MPCTH { 128 char signature[4]; 129 u_short base_table_length; 130 u_char spec_rev; 131 u_char checksum; 132 u_char oem_id[8]; 133 u_char product_id[12]; 134 void *oem_table_pointer; 135 u_short oem_table_size; 136 u_short entry_count; 137 void *apic_address; 138 u_short extended_table_length; 139 u_char extended_table_checksum; 140 u_char reserved; 141} *mpcth_t; 142 143 144typedef struct PROCENTRY { 145 u_char type; 146 u_char apic_id; 147 u_char apic_version; 148 u_char cpu_flags; 149 u_long cpu_signature; 150 u_long feature_flags; 151 u_long reserved1; 152 u_long reserved2; 153} *proc_entry_ptr; 154 155typedef struct BUSENTRY { 156 u_char type; 157 u_char bus_id; 158 char bus_type[6]; 159} *bus_entry_ptr; 160 161typedef struct IOAPICENTRY { 162 u_char type; 163 u_char apic_id; 164 u_char apic_version; 165 u_char apic_flags; 166 void *apic_address; 167} *io_apic_entry_ptr; 168 169typedef struct INTENTRY { 170 u_char type; 171 u_char int_type; 172 u_short int_flags; 173 u_char src_bus_id; 174 u_char src_bus_irq; 175 u_char dst_apic_id; 176 u_char dst_apic_int; 177} *int_entry_ptr; 178 179/* descriptions of MP basetable entries */ 180typedef struct BASETABLE_ENTRY { 181 u_char type; 182 u_char length; 183 char name[16]; 184} basetable_entry; 185 186/* 187 * this code MUST be enabled here and in mpboot.s. 188 * it follows the very early stages of AP boot by placing values in CMOS ram. 189 * it NORMALLY will never be needed and thus the primitive method for enabling. 190 * 191#define CHECK_POINTS 192 */ 193 194#if defined(CHECK_POINTS) && !defined(PC98) 195#define CHECK_READ(A) (outb(CMOS_REG, (A)), inb(CMOS_DATA)) 196#define CHECK_WRITE(A,D) (outb(CMOS_REG, (A)), outb(CMOS_DATA, (D))) 197 198#define CHECK_INIT(D); \ 199 CHECK_WRITE(0x34, (D)); \ 200 CHECK_WRITE(0x35, (D)); \ 201 CHECK_WRITE(0x36, (D)); \ 202 CHECK_WRITE(0x37, (D)); \ 203 CHECK_WRITE(0x38, (D)); \ 204 CHECK_WRITE(0x39, (D)); 205 206#define CHECK_PRINT(S); \ 207 printf("%s: %d, %d, %d, %d, %d, %d\n", \ 208 (S), \ 209 CHECK_READ(0x34), \ 210 CHECK_READ(0x35), \ 211 CHECK_READ(0x36), \ 212 CHECK_READ(0x37), \ 213 CHECK_READ(0x38), \ 214 CHECK_READ(0x39)); 215 216#else /* CHECK_POINTS */ 217 218#define CHECK_INIT(D) 219#define CHECK_PRINT(S) 220 221#endif /* CHECK_POINTS */ 222 223/* 224 * Values to send to the POST hardware. 225 */ 226#define MP_BOOTADDRESS_POST 0x10 227#define MP_PROBE_POST 0x11 228#define MPTABLE_PASS1_POST 0x12 229 230#define MP_START_POST 0x13 231#define MP_ENABLE_POST 0x14 232#define MPTABLE_PASS2_POST 0x15 233 234#define START_ALL_APS_POST 0x16 235#define INSTALL_AP_TRAMP_POST 0x17 236#define START_AP_POST 0x18 237 238#define MP_ANNOUNCE_POST 0x19 239 240/* used to hold the AP's until we are ready to release them */ 241struct mtx ap_boot_mtx; 242 243/** XXX FIXME: where does this really belong, isa.h/isa.c perhaps? */ 244int current_postcode; 245 246/** XXX FIXME: what system files declare these??? */ 247extern struct region_descriptor r_gdt, r_idt; 248 249int bsp_apic_ready = 0; /* flags useability of BSP apic */ 250int mp_ncpus; /* # of CPUs, including BSP */ 251int mp_naps; /* # of Applications processors */ 252int mp_nbusses; /* # of busses */ 253int mp_napics; /* # of IO APICs */ 254int boot_cpu_id; /* designated BSP */ 255vm_offset_t cpu_apic_address; 256vm_offset_t io_apic_address[NAPICID]; /* NAPICID is more than enough */ 257extern int nkpt; 258 259u_int32_t cpu_apic_versions[MAXCPU]; 260u_int32_t *io_apic_versions; 261 262#ifdef APIC_INTR_REORDER 263struct { 264 volatile int *location; 265 int bit; 266} apic_isrbit_location[32]; 267#endif 268 269struct apic_intmapinfo int_to_apicintpin[APIC_INTMAPSIZE]; 270 271/* 272 * APIC ID logical/physical mapping structures. 273 * We oversize these to simplify boot-time config. 274 */ 275int cpu_num_to_apic_id[NAPICID]; 276int io_num_to_apic_id[NAPICID]; 277int apic_id_to_logical[NAPICID]; 278 279 280/* Bitmap of all available CPUs */ 281u_int all_cpus; 282 283/* AP uses this during bootstrap. Do not staticize. */ 284char *bootSTK; 285static int bootAP; 286 287/* Hotwire a 0->4MB V==P mapping */ 288extern pt_entry_t *KPTphys; 289 290/* SMP page table page */ 291extern pt_entry_t *SMPpt; 292 293struct pcb stoppcbs[MAXCPU]; 294 295int smp_started; /* has the system started? */ 296int smp_active = 0; /* are the APs allowed to run? */ 297SYSCTL_INT(_machdep, OID_AUTO, smp_active, CTLFLAG_RW, &smp_active, 0, ""); 298 299/* XXX maybe should be hw.ncpu */ 300static int smp_cpus = 1; /* how many cpu's running */ 301SYSCTL_INT(_machdep, OID_AUTO, smp_cpus, CTLFLAG_RD, &smp_cpus, 0, ""); 302 303int invltlb_ok = 0; /* throttle smp_invltlb() till safe */ 304SYSCTL_INT(_machdep, OID_AUTO, invltlb_ok, CTLFLAG_RW, &invltlb_ok, 0, ""); 305 306/* Enable forwarding of a signal to a process running on a different CPU */ 307static int forward_signal_enabled = 1; 308SYSCTL_INT(_machdep, OID_AUTO, forward_signal_enabled, CTLFLAG_RW, 309 &forward_signal_enabled, 0, ""); 310 311/* Enable forwarding of roundrobin to all other cpus */ 312static int forward_roundrobin_enabled = 1; 313SYSCTL_INT(_machdep, OID_AUTO, forward_roundrobin_enabled, CTLFLAG_RW, 314 &forward_roundrobin_enabled, 0, ""); 315 316 317/* 318 * Local data and functions. 319 */ 320 321/* Set to 1 once we're ready to let the APs out of the pen. */ 322static volatile int aps_ready = 0; 323 324static int mp_capable; 325static u_int boot_address; 326static u_int base_memory; 327 328static int picmode; /* 0: virtual wire mode, 1: PIC mode */ 329static mpfps_t mpfps; 330static int search_for_sig(u_int32_t target, int count); 331static void mp_enable(u_int boot_addr); 332 333static void mptable_pass1(void); 334static int mptable_pass2(void); 335static void default_mp_table(int type); 336static void fix_mp_table(void); 337static void setup_apic_irq_mapping(void); 338static void init_locks(void); 339static int start_all_aps(u_int boot_addr); 340static void install_ap_tramp(u_int boot_addr); 341static int start_ap(int logicalCpu, u_int boot_addr); 342void ap_init(void); 343static int apic_int_is_bus_type(int intr, int bus_type); 344static void release_aps(void *dummy); 345 346/* 347 * initialize all the SMP locks 348 */ 349 350/* critical region around IO APIC, apic_imen */ 351struct mtx imen_mtx; 352 353/* lock region used by kernel profiling */ 354struct mtx mcount_mtx; 355 356#ifdef USE_COMLOCK 357/* locks com (tty) data/hardware accesses: a FASTINTR() */ 358struct mtx com_mtx; 359#endif /* USE_COMLOCK */ 360 361/* lock around the MP rendezvous */ 362static struct mtx smp_rv_mtx; 363 364/* only 1 CPU can panic at a time :) */ 365struct mtx panic_mtx; 366 367static void 368init_locks(void) 369{ 370 /* 371 * XXX The mcount mutex probably needs to be statically initialized, 372 * since it will be used even in the function calls that get us to this 373 * point. 374 */ 375 mtx_init(&mcount_mtx, "mcount", MTX_DEF); 376 377 mtx_init(&smp_rv_mtx, "smp rendezvous", MTX_SPIN); 378 mtx_init(&panic_mtx, "panic", MTX_DEF); 379 380#ifdef USE_COMLOCK 381 mtx_init(&com_mtx, "com", MTX_SPIN); 382#endif /* USE_COMLOCK */ 383 384 mtx_init(&ap_boot_mtx, "ap boot", MTX_SPIN); 385} 386 387/* 388 * Calculate usable address in base memory for AP trampoline code. 389 */ 390u_int 391mp_bootaddress(u_int basemem) 392{ 393 POSTCODE(MP_BOOTADDRESS_POST); 394 395 base_memory = basemem * 1024; /* convert to bytes */ 396 397 boot_address = base_memory & ~0xfff; /* round down to 4k boundary */ 398 if ((base_memory - boot_address) < bootMP_size) 399 boot_address -= 4096; /* not enough, lower by 4k */ 400 401 return boot_address; 402} 403 404 405/* 406 * Look for an Intel MP spec table (ie, SMP capable hardware). 407 */ 408int 409mp_probe(void) 410{ 411 int x; 412 u_long segment; 413 u_int32_t target; 414 415 POSTCODE(MP_PROBE_POST); 416 417 /* see if EBDA exists */ 418 if ((segment = (u_long) * (u_short *) (KERNBASE + 0x40e)) != 0) { 419 /* search first 1K of EBDA */ 420 target = (u_int32_t) (segment << 4); 421 if ((x = search_for_sig(target, 1024 / 4)) >= 0) 422 goto found; 423 } else { 424 /* last 1K of base memory, effective 'top of base' passed in */ 425 target = (u_int32_t) (base_memory - 0x400); 426 if ((x = search_for_sig(target, 1024 / 4)) >= 0) 427 goto found; 428 } 429 430 /* search the BIOS */ 431 target = (u_int32_t) BIOS_BASE; 432 if ((x = search_for_sig(target, BIOS_COUNT)) >= 0) 433 goto found; 434 435 /* nothing found */ 436 mpfps = (mpfps_t)0; 437 mp_capable = 0; 438 return 0; 439 440found: 441 /* calculate needed resources */ 442 mpfps = (mpfps_t)x; 443 mptable_pass1(); 444 445 /* flag fact that we are running multiple processors */ 446 mp_capable = 1; 447 return 1; 448} 449 450 451/* 452 * Initialize the SMP hardware and the APIC and start up the AP's. 453 */ 454void 455mp_start(void) 456{ 457 POSTCODE(MP_START_POST); 458 459 /* look for MP capable motherboard */ 460 if (mp_capable) 461 mp_enable(boot_address); 462 else 463 panic("MP hardware not found!"); 464} 465 466 467/* 468 * Print various information about the SMP system hardware and setup. 469 */ 470void 471mp_announce(void) 472{ 473 int x; 474 475 POSTCODE(MP_ANNOUNCE_POST); 476 477 printf("FreeBSD/SMP: Multiprocessor motherboard\n"); 478 printf(" cpu0 (BSP): apic id: %2d", CPU_TO_ID(0)); 479 printf(", version: 0x%08x", cpu_apic_versions[0]); 480 printf(", at 0x%08x\n", cpu_apic_address); 481 for (x = 1; x <= mp_naps; ++x) { 482 printf(" cpu%d (AP): apic id: %2d", x, CPU_TO_ID(x)); 483 printf(", version: 0x%08x", cpu_apic_versions[x]); 484 printf(", at 0x%08x\n", cpu_apic_address); 485 } 486 487#if defined(APIC_IO) 488 for (x = 0; x < mp_napics; ++x) { 489 printf(" io%d (APIC): apic id: %2d", x, IO_TO_ID(x)); 490 printf(", version: 0x%08x", io_apic_versions[x]); 491 printf(", at 0x%08x\n", io_apic_address[x]); 492 } 493#else 494 printf(" Warning: APIC I/O disabled\n"); 495#endif /* APIC_IO */ 496} 497 498/* 499 * AP cpu's call this to sync up protected mode. 500 */ 501void 502init_secondary(void) 503{ 504 int gsel_tss; 505 int x, myid = bootAP; 506 507 gdt_segs[GPRIV_SEL].ssd_base = (int) &SMP_prvspace[myid]; 508 gdt_segs[GPROC0_SEL].ssd_base = 509 (int) &SMP_prvspace[myid].globaldata.gd_common_tss; 510 SMP_prvspace[myid].globaldata.gd_prvspace = 511 &SMP_prvspace[myid].globaldata; 512 513 for (x = 0; x < NGDT; x++) { 514 ssdtosd(&gdt_segs[x], &gdt[myid * NGDT + x].sd); 515 } 516 517 r_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1; 518 r_gdt.rd_base = (int) &gdt[myid * NGDT]; 519 lgdt(&r_gdt); /* does magic intra-segment return */ 520 521 lidt(&r_idt); 522 523 lldt(_default_ldt); 524#ifdef USER_LDT 525 PCPU_SET(currentldt, _default_ldt); 526#endif 527 528 gsel_tss = GSEL(GPROC0_SEL, SEL_KPL); 529 gdt[myid * NGDT + GPROC0_SEL].sd.sd_type = SDT_SYS386TSS; 530 PCPU_SET(common_tss.tss_esp0, 0); /* not used until after switch */ 531 PCPU_SET(common_tss.tss_ss0, GSEL(GDATA_SEL, SEL_KPL)); 532 PCPU_SET(common_tss.tss_ioopt, (sizeof (struct i386tss)) << 16); 533 PCPU_SET(tss_gdt, &gdt[myid * NGDT + GPROC0_SEL].sd); 534 PCPU_SET(common_tssd, *PCPU_GET(tss_gdt)); 535 ltr(gsel_tss); 536 537 pmap_set_opt(); 538} 539 540 541#if defined(APIC_IO) 542/* 543 * Final configuration of the BSP's local APIC: 544 * - disable 'pic mode'. 545 * - disable 'virtual wire mode'. 546 * - enable NMI. 547 */ 548void 549bsp_apic_configure(void) 550{ 551 u_char byte; 552 u_int32_t temp; 553 554 /* leave 'pic mode' if necessary */ 555 if (picmode) { 556 outb(0x22, 0x70); /* select IMCR */ 557 byte = inb(0x23); /* current contents */ 558 byte |= 0x01; /* mask external INTR */ 559 outb(0x23, byte); /* disconnect 8259s/NMI */ 560 } 561 562 /* mask lint0 (the 8259 'virtual wire' connection) */ 563 temp = lapic.lvt_lint0; 564 temp |= APIC_LVT_M; /* set the mask */ 565 lapic.lvt_lint0 = temp; 566 567 /* setup lint1 to handle NMI */ 568 temp = lapic.lvt_lint1; 569 temp &= ~APIC_LVT_M; /* clear the mask */ 570 lapic.lvt_lint1 = temp; 571 572 if (bootverbose) 573 apic_dump("bsp_apic_configure()"); 574} 575#endif /* APIC_IO */ 576 577 578/******************************************************************* 579 * local functions and data 580 */ 581 582/* 583 * start the SMP system 584 */ 585static void 586mp_enable(u_int boot_addr) 587{ 588 int x; 589#if defined(APIC_IO) 590 int apic; 591 u_int ux; 592#endif /* APIC_IO */ 593 594 POSTCODE(MP_ENABLE_POST); 595 596 /* turn on 4MB of V == P addressing so we can get to MP table */ 597 *(int *)PTD = PG_V | PG_RW | ((uintptr_t)(void *)KPTphys & PG_FRAME); 598 invltlb(); 599 600 /* examine the MP table for needed info, uses physical addresses */ 601 x = mptable_pass2(); 602 603 *(int *)PTD = 0; 604 invltlb(); 605 606 /* can't process default configs till the CPU APIC is pmapped */ 607 if (x) 608 default_mp_table(x); 609 610 /* post scan cleanup */ 611 fix_mp_table(); 612 setup_apic_irq_mapping(); 613 614#if defined(APIC_IO) 615 616 /* fill the LOGICAL io_apic_versions table */ 617 for (apic = 0; apic < mp_napics; ++apic) { 618 ux = io_apic_read(apic, IOAPIC_VER); 619 io_apic_versions[apic] = ux; 620 io_apic_set_id(apic, IO_TO_ID(apic)); 621 } 622 623 /* program each IO APIC in the system */ 624 for (apic = 0; apic < mp_napics; ++apic) 625 if (io_apic_setup(apic) < 0) 626 panic("IO APIC setup failure"); 627 628 /* install a 'Spurious INTerrupt' vector */ 629 setidt(XSPURIOUSINT_OFFSET, Xspuriousint, 630 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 631 632 /* install an inter-CPU IPI for TLB invalidation */ 633 setidt(XINVLTLB_OFFSET, Xinvltlb, 634 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 635 636#ifdef BETTER_CLOCK 637 /* install an inter-CPU IPI for reading processor state */ 638 setidt(XCPUCHECKSTATE_OFFSET, Xcpucheckstate, 639 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 640#endif 641 642 /* install an inter-CPU IPI for all-CPU rendezvous */ 643 setidt(XRENDEZVOUS_OFFSET, Xrendezvous, 644 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 645 646 /* install an inter-CPU IPI for forcing an additional software trap */ 647 setidt(XCPUAST_OFFSET, Xcpuast, 648 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 649 650 /* install an inter-CPU IPI for CPU stop/restart */ 651 setidt(XCPUSTOP_OFFSET, Xcpustop, 652 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 653 654#if defined(TEST_TEST1) 655 /* install a "fake hardware INTerrupt" vector */ 656 setidt(XTEST1_OFFSET, Xtest1, 657 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 658#endif /** TEST_TEST1 */ 659 660#endif /* APIC_IO */ 661 662 /* initialize all SMP locks */ 663 init_locks(); 664 665 /* start each Application Processor */ 666 start_all_aps(boot_addr); 667} 668 669 670/* 671 * look for the MP spec signature 672 */ 673 674/* string defined by the Intel MP Spec as identifying the MP table */ 675#define MP_SIG 0x5f504d5f /* _MP_ */ 676#define NEXT(X) ((X) += 4) 677static int 678search_for_sig(u_int32_t target, int count) 679{ 680 int x; 681 u_int32_t *addr = (u_int32_t *) (KERNBASE + target); 682 683 for (x = 0; x < count; NEXT(x)) 684 if (addr[x] == MP_SIG) 685 /* make array index a byte index */ 686 return (target + (x * sizeof(u_int32_t))); 687 688 return -1; 689} 690 691 692static basetable_entry basetable_entry_types[] = 693{ 694 {0, 20, "Processor"}, 695 {1, 8, "Bus"}, 696 {2, 8, "I/O APIC"}, 697 {3, 8, "I/O INT"}, 698 {4, 8, "Local INT"} 699}; 700 701typedef struct BUSDATA { 702 u_char bus_id; 703 enum busTypes bus_type; 704} bus_datum; 705 706typedef struct INTDATA { 707 u_char int_type; 708 u_short int_flags; 709 u_char src_bus_id; 710 u_char src_bus_irq; 711 u_char dst_apic_id; 712 u_char dst_apic_int; 713 u_char int_vector; 714} io_int, local_int; 715 716typedef struct BUSTYPENAME { 717 u_char type; 718 char name[7]; 719} bus_type_name; 720 721static bus_type_name bus_type_table[] = 722{ 723 {CBUS, "CBUS"}, 724 {CBUSII, "CBUSII"}, 725 {EISA, "EISA"}, 726 {MCA, "MCA"}, 727 {UNKNOWN_BUSTYPE, "---"}, 728 {ISA, "ISA"}, 729 {MCA, "MCA"}, 730 {UNKNOWN_BUSTYPE, "---"}, 731 {UNKNOWN_BUSTYPE, "---"}, 732 {UNKNOWN_BUSTYPE, "---"}, 733 {UNKNOWN_BUSTYPE, "---"}, 734 {UNKNOWN_BUSTYPE, "---"}, 735 {PCI, "PCI"}, 736 {UNKNOWN_BUSTYPE, "---"}, 737 {UNKNOWN_BUSTYPE, "---"}, 738 {UNKNOWN_BUSTYPE, "---"}, 739 {UNKNOWN_BUSTYPE, "---"}, 740 {XPRESS, "XPRESS"}, 741 {UNKNOWN_BUSTYPE, "---"} 742}; 743/* from MP spec v1.4, table 5-1 */ 744static int default_data[7][5] = 745{ 746/* nbus, id0, type0, id1, type1 */ 747 {1, 0, ISA, 255, 255}, 748 {1, 0, EISA, 255, 255}, 749 {1, 0, EISA, 255, 255}, 750 {1, 0, MCA, 255, 255}, 751 {2, 0, ISA, 1, PCI}, 752 {2, 0, EISA, 1, PCI}, 753 {2, 0, MCA, 1, PCI} 754}; 755 756 757/* the bus data */ 758static bus_datum *bus_data; 759 760/* the IO INT data, one entry per possible APIC INTerrupt */ 761static io_int *io_apic_ints; 762 763static int nintrs; 764 765static int processor_entry __P((proc_entry_ptr entry, int cpu)); 766static int bus_entry __P((bus_entry_ptr entry, int bus)); 767static int io_apic_entry __P((io_apic_entry_ptr entry, int apic)); 768static int int_entry __P((int_entry_ptr entry, int intr)); 769static int lookup_bus_type __P((char *name)); 770 771 772/* 773 * 1st pass on motherboard's Intel MP specification table. 774 * 775 * initializes: 776 * mp_ncpus = 1 777 * 778 * determines: 779 * cpu_apic_address (common to all CPUs) 780 * io_apic_address[N] 781 * mp_naps 782 * mp_nbusses 783 * mp_napics 784 * nintrs 785 */ 786static void 787mptable_pass1(void) 788{ 789 int x; 790 mpcth_t cth; 791 int totalSize; 792 void* position; 793 int count; 794 int type; 795 796 POSTCODE(MPTABLE_PASS1_POST); 797 798 /* clear various tables */ 799 for (x = 0; x < NAPICID; ++x) { 800 io_apic_address[x] = ~0; /* IO APIC address table */ 801 } 802 803 /* init everything to empty */ 804 mp_naps = 0; 805 mp_nbusses = 0; 806 mp_napics = 0; 807 nintrs = 0; 808 809 /* check for use of 'default' configuration */ 810 if (MPFPS_MPFB1 != 0) { 811 /* use default addresses */ 812 cpu_apic_address = DEFAULT_APIC_BASE; 813 io_apic_address[0] = DEFAULT_IO_APIC_BASE; 814 815 /* fill in with defaults */ 816 mp_naps = 2; /* includes BSP */ 817 mp_nbusses = default_data[MPFPS_MPFB1 - 1][0]; 818#if defined(APIC_IO) 819 mp_napics = 1; 820 nintrs = 16; 821#endif /* APIC_IO */ 822 } 823 else { 824 if ((cth = mpfps->pap) == 0) 825 panic("MP Configuration Table Header MISSING!"); 826 827 cpu_apic_address = (vm_offset_t) cth->apic_address; 828 829 /* walk the table, recording info of interest */ 830 totalSize = cth->base_table_length - sizeof(struct MPCTH); 831 position = (u_char *) cth + sizeof(struct MPCTH); 832 count = cth->entry_count; 833 834 while (count--) { 835 switch (type = *(u_char *) position) { 836 case 0: /* processor_entry */ 837 if (((proc_entry_ptr)position)->cpu_flags 838 & PROCENTRY_FLAG_EN) 839 ++mp_naps; 840 break; 841 case 1: /* bus_entry */ 842 ++mp_nbusses; 843 break; 844 case 2: /* io_apic_entry */ 845 if (((io_apic_entry_ptr)position)->apic_flags 846 & IOAPICENTRY_FLAG_EN) 847 io_apic_address[mp_napics++] = 848 (vm_offset_t)((io_apic_entry_ptr) 849 position)->apic_address; 850 break; 851 case 3: /* int_entry */ 852 ++nintrs; 853 break; 854 case 4: /* int_entry */ 855 break; 856 default: 857 panic("mpfps Base Table HOSED!"); 858 /* NOTREACHED */ 859 } 860 861 totalSize -= basetable_entry_types[type].length; 862 (u_char*)position += basetable_entry_types[type].length; 863 } 864 } 865 866 /* qualify the numbers */ 867 if (mp_naps > MAXCPU) { 868 printf("Warning: only using %d of %d available CPUs!\n", 869 MAXCPU, mp_naps); 870 mp_naps = MAXCPU; 871 } 872 873 /* 874 * Count the BSP. 875 * This is also used as a counter while starting the APs. 876 */ 877 mp_ncpus = 1; 878 879 --mp_naps; /* subtract the BSP */ 880} 881 882 883/* 884 * 2nd pass on motherboard's Intel MP specification table. 885 * 886 * sets: 887 * boot_cpu_id 888 * ID_TO_IO(N), phy APIC ID to log CPU/IO table 889 * CPU_TO_ID(N), logical CPU to APIC ID table 890 * IO_TO_ID(N), logical IO to APIC ID table 891 * bus_data[N] 892 * io_apic_ints[N] 893 */ 894static int 895mptable_pass2(void) 896{ 897 int x; 898 mpcth_t cth; 899 int totalSize; 900 void* position; 901 int count; 902 int type; 903 int apic, bus, cpu, intr; 904 int i, j; 905 int pgeflag; 906 907 POSTCODE(MPTABLE_PASS2_POST); 908 909 pgeflag = 0; /* XXX - Not used under SMP yet. */ 910 911 MALLOC(io_apic_versions, u_int32_t *, sizeof(u_int32_t) * mp_napics, 912 M_DEVBUF, M_WAITOK); 913 MALLOC(ioapic, volatile ioapic_t **, sizeof(ioapic_t *) * mp_napics, 914 M_DEVBUF, M_WAITOK); 915 MALLOC(io_apic_ints, io_int *, sizeof(io_int) * (nintrs + 1), 916 M_DEVBUF, M_WAITOK); 917 MALLOC(bus_data, bus_datum *, sizeof(bus_datum) * mp_nbusses, 918 M_DEVBUF, M_WAITOK); 919 920 bzero(ioapic, sizeof(ioapic_t *) * mp_napics); 921 922 for (i = 0; i < mp_napics; i++) { 923 for (j = 0; j < mp_napics; j++) { 924 /* same page frame as a previous IO apic? */ 925 if (((vm_offset_t)SMPpt[NPTEPG-2-j] & PG_FRAME) == 926 (io_apic_address[i] & PG_FRAME)) { 927 ioapic[i] = (ioapic_t *)((u_int)SMP_prvspace 928 + (NPTEPG-2-j) * PAGE_SIZE 929 + (io_apic_address[i] & PAGE_MASK)); 930 break; 931 } 932 /* use this slot if available */ 933 if (((vm_offset_t)SMPpt[NPTEPG-2-j] & PG_FRAME) == 0) { 934 SMPpt[NPTEPG-2-j] = (pt_entry_t)(PG_V | PG_RW | 935 pgeflag | (io_apic_address[i] & PG_FRAME)); 936 ioapic[i] = (ioapic_t *)((u_int)SMP_prvspace 937 + (NPTEPG-2-j) * PAGE_SIZE 938 + (io_apic_address[i] & PAGE_MASK)); 939 break; 940 } 941 } 942 } 943 944 /* clear various tables */ 945 for (x = 0; x < NAPICID; ++x) { 946 ID_TO_IO(x) = -1; /* phy APIC ID to log CPU/IO table */ 947 CPU_TO_ID(x) = -1; /* logical CPU to APIC ID table */ 948 IO_TO_ID(x) = -1; /* logical IO to APIC ID table */ 949 } 950 951 /* clear bus data table */ 952 for (x = 0; x < mp_nbusses; ++x) 953 bus_data[x].bus_id = 0xff; 954 955 /* clear IO APIC INT table */ 956 for (x = 0; x < (nintrs + 1); ++x) { 957 io_apic_ints[x].int_type = 0xff; 958 io_apic_ints[x].int_vector = 0xff; 959 } 960 961 /* setup the cpu/apic mapping arrays */ 962 boot_cpu_id = -1; 963 964 /* record whether PIC or virtual-wire mode */ 965 picmode = (mpfps->mpfb2 & 0x80) ? 1 : 0; 966 967 /* check for use of 'default' configuration */ 968 if (MPFPS_MPFB1 != 0) 969 return MPFPS_MPFB1; /* return default configuration type */ 970 971 if ((cth = mpfps->pap) == 0) 972 panic("MP Configuration Table Header MISSING!"); 973 974 /* walk the table, recording info of interest */ 975 totalSize = cth->base_table_length - sizeof(struct MPCTH); 976 position = (u_char *) cth + sizeof(struct MPCTH); 977 count = cth->entry_count; 978 apic = bus = intr = 0; 979 cpu = 1; /* pre-count the BSP */ 980 981 while (count--) { 982 switch (type = *(u_char *) position) { 983 case 0: 984 if (processor_entry(position, cpu)) 985 ++cpu; 986 break; 987 case 1: 988 if (bus_entry(position, bus)) 989 ++bus; 990 break; 991 case 2: 992 if (io_apic_entry(position, apic)) 993 ++apic; 994 break; 995 case 3: 996 if (int_entry(position, intr)) 997 ++intr; 998 break; 999 case 4: 1000 /* int_entry(position); */ 1001 break; 1002 default: 1003 panic("mpfps Base Table HOSED!"); 1004 /* NOTREACHED */ 1005 } 1006 1007 totalSize -= basetable_entry_types[type].length; 1008 (u_char *) position += basetable_entry_types[type].length; 1009 } 1010 1011 if (boot_cpu_id == -1) 1012 panic("NO BSP found!"); 1013 1014 /* report fact that its NOT a default configuration */ 1015 return 0; 1016} 1017 1018 1019void 1020assign_apic_irq(int apic, int intpin, int irq) 1021{ 1022 int x; 1023 1024 if (int_to_apicintpin[irq].ioapic != -1) 1025 panic("assign_apic_irq: inconsistent table"); 1026 1027 int_to_apicintpin[irq].ioapic = apic; 1028 int_to_apicintpin[irq].int_pin = intpin; 1029 int_to_apicintpin[irq].apic_address = ioapic[apic]; 1030 int_to_apicintpin[irq].redirindex = IOAPIC_REDTBL + 2 * intpin; 1031 1032 for (x = 0; x < nintrs; x++) { 1033 if ((io_apic_ints[x].int_type == 0 || 1034 io_apic_ints[x].int_type == 3) && 1035 io_apic_ints[x].int_vector == 0xff && 1036 io_apic_ints[x].dst_apic_id == IO_TO_ID(apic) && 1037 io_apic_ints[x].dst_apic_int == intpin) 1038 io_apic_ints[x].int_vector = irq; 1039 } 1040} 1041 1042void 1043revoke_apic_irq(int irq) 1044{ 1045 int x; 1046 int oldapic; 1047 int oldintpin; 1048 1049 if (int_to_apicintpin[irq].ioapic == -1) 1050 panic("assign_apic_irq: inconsistent table"); 1051 1052 oldapic = int_to_apicintpin[irq].ioapic; 1053 oldintpin = int_to_apicintpin[irq].int_pin; 1054 1055 int_to_apicintpin[irq].ioapic = -1; 1056 int_to_apicintpin[irq].int_pin = 0; 1057 int_to_apicintpin[irq].apic_address = NULL; 1058 int_to_apicintpin[irq].redirindex = 0; 1059 1060 for (x = 0; x < nintrs; x++) { 1061 if ((io_apic_ints[x].int_type == 0 || 1062 io_apic_ints[x].int_type == 3) && 1063 io_apic_ints[x].int_vector == 0xff && 1064 io_apic_ints[x].dst_apic_id == IO_TO_ID(oldapic) && 1065 io_apic_ints[x].dst_apic_int == oldintpin) 1066 io_apic_ints[x].int_vector = 0xff; 1067 } 1068} 1069 1070 1071static void 1072allocate_apic_irq(int intr) 1073{ 1074 int apic; 1075 int intpin; 1076 int irq; 1077 1078 if (io_apic_ints[intr].int_vector != 0xff) 1079 return; /* Interrupt handler already assigned */ 1080 1081 if (io_apic_ints[intr].int_type != 0 && 1082 (io_apic_ints[intr].int_type != 3 || 1083 (io_apic_ints[intr].dst_apic_id == IO_TO_ID(0) && 1084 io_apic_ints[intr].dst_apic_int == 0))) 1085 return; /* Not INT or ExtInt on != (0, 0) */ 1086 1087 irq = 0; 1088 while (irq < APIC_INTMAPSIZE && 1089 int_to_apicintpin[irq].ioapic != -1) 1090 irq++; 1091 1092 if (irq >= APIC_INTMAPSIZE) 1093 return; /* No free interrupt handlers */ 1094 1095 apic = ID_TO_IO(io_apic_ints[intr].dst_apic_id); 1096 intpin = io_apic_ints[intr].dst_apic_int; 1097 1098 assign_apic_irq(apic, intpin, irq); 1099 io_apic_setup_intpin(apic, intpin); 1100} 1101 1102 1103static void 1104swap_apic_id(int apic, int oldid, int newid) 1105{ 1106 int x; 1107 int oapic; 1108 1109 1110 if (oldid == newid) 1111 return; /* Nothing to do */ 1112 1113 printf("Changing APIC ID for IO APIC #%d from %d to %d in MP table\n", 1114 apic, oldid, newid); 1115 1116 /* Swap physical APIC IDs in interrupt entries */ 1117 for (x = 0; x < nintrs; x++) { 1118 if (io_apic_ints[x].dst_apic_id == oldid) 1119 io_apic_ints[x].dst_apic_id = newid; 1120 else if (io_apic_ints[x].dst_apic_id == newid) 1121 io_apic_ints[x].dst_apic_id = oldid; 1122 } 1123 1124 /* Swap physical APIC IDs in IO_TO_ID mappings */ 1125 for (oapic = 0; oapic < mp_napics; oapic++) 1126 if (IO_TO_ID(oapic) == newid) 1127 break; 1128 1129 if (oapic < mp_napics) { 1130 printf("Changing APIC ID for IO APIC #%d from " 1131 "%d to %d in MP table\n", 1132 oapic, newid, oldid); 1133 IO_TO_ID(oapic) = oldid; 1134 } 1135 IO_TO_ID(apic) = newid; 1136} 1137 1138 1139static void 1140fix_id_to_io_mapping(void) 1141{ 1142 int x; 1143 1144 for (x = 0; x < NAPICID; x++) 1145 ID_TO_IO(x) = -1; 1146 1147 for (x = 0; x <= mp_naps; x++) 1148 if (CPU_TO_ID(x) < NAPICID) 1149 ID_TO_IO(CPU_TO_ID(x)) = x; 1150 1151 for (x = 0; x < mp_napics; x++) 1152 if (IO_TO_ID(x) < NAPICID) 1153 ID_TO_IO(IO_TO_ID(x)) = x; 1154} 1155 1156 1157static int 1158first_free_apic_id(void) 1159{ 1160 int freeid, x; 1161 1162 for (freeid = 0; freeid < NAPICID; freeid++) { 1163 for (x = 0; x <= mp_naps; x++) 1164 if (CPU_TO_ID(x) == freeid) 1165 break; 1166 if (x <= mp_naps) 1167 continue; 1168 for (x = 0; x < mp_napics; x++) 1169 if (IO_TO_ID(x) == freeid) 1170 break; 1171 if (x < mp_napics) 1172 continue; 1173 return freeid; 1174 } 1175 return freeid; 1176} 1177 1178 1179static int 1180io_apic_id_acceptable(int apic, int id) 1181{ 1182 int cpu; /* Logical CPU number */ 1183 int oapic; /* Logical IO APIC number for other IO APIC */ 1184 1185 if (id >= NAPICID) 1186 return 0; /* Out of range */ 1187 1188 for (cpu = 0; cpu <= mp_naps; cpu++) 1189 if (CPU_TO_ID(cpu) == id) 1190 return 0; /* Conflict with CPU */ 1191 1192 for (oapic = 0; oapic < mp_napics && oapic < apic; oapic++) 1193 if (IO_TO_ID(oapic) == id) 1194 return 0; /* Conflict with other APIC */ 1195 1196 return 1; /* ID is acceptable for IO APIC */ 1197} 1198 1199 1200/* 1201 * parse an Intel MP specification table 1202 */ 1203static void 1204fix_mp_table(void) 1205{ 1206 int x; 1207 int id; 1208 int bus_0 = 0; /* Stop GCC warning */ 1209 int bus_pci = 0; /* Stop GCC warning */ 1210 int num_pci_bus; 1211 int apic; /* IO APIC unit number */ 1212 int freeid; /* Free physical APIC ID */ 1213 int physid; /* Current physical IO APIC ID */ 1214 1215 /* 1216 * Fix mis-numbering of the PCI bus and its INT entries if the BIOS 1217 * did it wrong. The MP spec says that when more than 1 PCI bus 1218 * exists the BIOS must begin with bus entries for the PCI bus and use 1219 * actual PCI bus numbering. This implies that when only 1 PCI bus 1220 * exists the BIOS can choose to ignore this ordering, and indeed many 1221 * MP motherboards do ignore it. This causes a problem when the PCI 1222 * sub-system makes requests of the MP sub-system based on PCI bus 1223 * numbers. So here we look for the situation and renumber the 1224 * busses and associated INTs in an effort to "make it right". 1225 */ 1226 1227 /* find bus 0, PCI bus, count the number of PCI busses */ 1228 for (num_pci_bus = 0, x = 0; x < mp_nbusses; ++x) { 1229 if (bus_data[x].bus_id == 0) { 1230 bus_0 = x; 1231 } 1232 if (bus_data[x].bus_type == PCI) { 1233 ++num_pci_bus; 1234 bus_pci = x; 1235 } 1236 } 1237 /* 1238 * bus_0 == slot of bus with ID of 0 1239 * bus_pci == slot of last PCI bus encountered 1240 */ 1241 1242 /* check the 1 PCI bus case for sanity */ 1243 /* if it is number 0 all is well */ 1244 if (num_pci_bus == 1 && 1245 bus_data[bus_pci].bus_id != 0) { 1246 1247 /* mis-numbered, swap with whichever bus uses slot 0 */ 1248 1249 /* swap the bus entry types */ 1250 bus_data[bus_pci].bus_type = bus_data[bus_0].bus_type; 1251 bus_data[bus_0].bus_type = PCI; 1252 1253 /* swap each relavant INTerrupt entry */ 1254 id = bus_data[bus_pci].bus_id; 1255 for (x = 0; x < nintrs; ++x) { 1256 if (io_apic_ints[x].src_bus_id == id) { 1257 io_apic_ints[x].src_bus_id = 0; 1258 } 1259 else if (io_apic_ints[x].src_bus_id == 0) { 1260 io_apic_ints[x].src_bus_id = id; 1261 } 1262 } 1263 } 1264 1265 /* Assign IO APIC IDs. 1266 * 1267 * First try the existing ID. If a conflict is detected, try 1268 * the ID in the MP table. If a conflict is still detected, find 1269 * a free id. 1270 * 1271 * We cannot use the ID_TO_IO table before all conflicts has been 1272 * resolved and the table has been corrected. 1273 */ 1274 for (apic = 0; apic < mp_napics; ++apic) { /* For all IO APICs */ 1275 1276 /* First try to use the value set by the BIOS */ 1277 physid = io_apic_get_id(apic); 1278 if (io_apic_id_acceptable(apic, physid)) { 1279 if (IO_TO_ID(apic) != physid) 1280 swap_apic_id(apic, IO_TO_ID(apic), physid); 1281 continue; 1282 } 1283 1284 /* Then check if the value in the MP table is acceptable */ 1285 if (io_apic_id_acceptable(apic, IO_TO_ID(apic))) 1286 continue; 1287 1288 /* Last resort, find a free APIC ID and use it */ 1289 freeid = first_free_apic_id(); 1290 if (freeid >= NAPICID) 1291 panic("No free physical APIC IDs found"); 1292 1293 if (io_apic_id_acceptable(apic, freeid)) { 1294 swap_apic_id(apic, IO_TO_ID(apic), freeid); 1295 continue; 1296 } 1297 panic("Free physical APIC ID not usable"); 1298 } 1299 fix_id_to_io_mapping(); 1300 1301 /* detect and fix broken Compaq MP table */ 1302 if (apic_int_type(0, 0) == -1) { 1303 printf("APIC_IO: MP table broken: 8259->APIC entry missing!\n"); 1304 io_apic_ints[nintrs].int_type = 3; /* ExtInt */ 1305 io_apic_ints[nintrs].int_vector = 0xff; /* Unassigned */ 1306 /* XXX fixme, set src bus id etc, but it doesn't seem to hurt */ 1307 io_apic_ints[nintrs].dst_apic_id = IO_TO_ID(0); 1308 io_apic_ints[nintrs].dst_apic_int = 0; /* Pin 0 */ 1309 nintrs++; 1310 } 1311} 1312 1313 1314/* Assign low level interrupt handlers */ 1315static void 1316setup_apic_irq_mapping(void) 1317{ 1318 int x; 1319 int int_vector; 1320 1321 /* Clear array */ 1322 for (x = 0; x < APIC_INTMAPSIZE; x++) { 1323 int_to_apicintpin[x].ioapic = -1; 1324 int_to_apicintpin[x].int_pin = 0; 1325 int_to_apicintpin[x].apic_address = NULL; 1326 int_to_apicintpin[x].redirindex = 0; 1327 } 1328 1329 /* First assign ISA/EISA interrupts */ 1330 for (x = 0; x < nintrs; x++) { 1331 int_vector = io_apic_ints[x].src_bus_irq; 1332 if (int_vector < APIC_INTMAPSIZE && 1333 io_apic_ints[x].int_vector == 0xff && 1334 int_to_apicintpin[int_vector].ioapic == -1 && 1335 (apic_int_is_bus_type(x, ISA) || 1336 apic_int_is_bus_type(x, EISA)) && 1337 io_apic_ints[x].int_type == 0) { 1338 assign_apic_irq(ID_TO_IO(io_apic_ints[x].dst_apic_id), 1339 io_apic_ints[x].dst_apic_int, 1340 int_vector); 1341 } 1342 } 1343 1344 /* Assign ExtInt entry if no ISA/EISA interrupt 0 entry */ 1345 for (x = 0; x < nintrs; x++) { 1346 if (io_apic_ints[x].dst_apic_int == 0 && 1347 io_apic_ints[x].dst_apic_id == IO_TO_ID(0) && 1348 io_apic_ints[x].int_vector == 0xff && 1349 int_to_apicintpin[0].ioapic == -1 && 1350 io_apic_ints[x].int_type == 3) { 1351 assign_apic_irq(0, 0, 0); 1352 break; 1353 } 1354 } 1355 /* PCI interrupt assignment is deferred */ 1356} 1357 1358 1359static int 1360processor_entry(proc_entry_ptr entry, int cpu) 1361{ 1362 /* check for usability */ 1363 if (!(entry->cpu_flags & PROCENTRY_FLAG_EN)) 1364 return 0; 1365 1366 if(entry->apic_id >= NAPICID) 1367 panic("CPU APIC ID out of range (0..%d)", NAPICID - 1); 1368 /* check for BSP flag */ 1369 if (entry->cpu_flags & PROCENTRY_FLAG_BP) { 1370 boot_cpu_id = entry->apic_id; 1371 CPU_TO_ID(0) = entry->apic_id; 1372 ID_TO_CPU(entry->apic_id) = 0; 1373 return 0; /* its already been counted */ 1374 } 1375 1376 /* add another AP to list, if less than max number of CPUs */ 1377 else if (cpu < MAXCPU) { 1378 CPU_TO_ID(cpu) = entry->apic_id; 1379 ID_TO_CPU(entry->apic_id) = cpu; 1380 return 1; 1381 } 1382 1383 return 0; 1384} 1385 1386 1387static int 1388bus_entry(bus_entry_ptr entry, int bus) 1389{ 1390 int x; 1391 char c, name[8]; 1392 1393 /* encode the name into an index */ 1394 for (x = 0; x < 6; ++x) { 1395 if ((c = entry->bus_type[x]) == ' ') 1396 break; 1397 name[x] = c; 1398 } 1399 name[x] = '\0'; 1400 1401 if ((x = lookup_bus_type(name)) == UNKNOWN_BUSTYPE) 1402 panic("unknown bus type: '%s'", name); 1403 1404 bus_data[bus].bus_id = entry->bus_id; 1405 bus_data[bus].bus_type = x; 1406 1407 return 1; 1408} 1409 1410 1411static int 1412io_apic_entry(io_apic_entry_ptr entry, int apic) 1413{ 1414 if (!(entry->apic_flags & IOAPICENTRY_FLAG_EN)) 1415 return 0; 1416 1417 IO_TO_ID(apic) = entry->apic_id; 1418 if (entry->apic_id < NAPICID) 1419 ID_TO_IO(entry->apic_id) = apic; 1420 1421 return 1; 1422} 1423 1424 1425static int 1426lookup_bus_type(char *name) 1427{ 1428 int x; 1429 1430 for (x = 0; x < MAX_BUSTYPE; ++x) 1431 if (strcmp(bus_type_table[x].name, name) == 0) 1432 return bus_type_table[x].type; 1433 1434 return UNKNOWN_BUSTYPE; 1435} 1436 1437 1438static int 1439int_entry(int_entry_ptr entry, int intr) 1440{ 1441 int apic; 1442 1443 io_apic_ints[intr].int_type = entry->int_type; 1444 io_apic_ints[intr].int_flags = entry->int_flags; 1445 io_apic_ints[intr].src_bus_id = entry->src_bus_id; 1446 io_apic_ints[intr].src_bus_irq = entry->src_bus_irq; 1447 if (entry->dst_apic_id == 255) { 1448 /* This signal goes to all IO APICS. Select an IO APIC 1449 with sufficient number of interrupt pins */ 1450 for (apic = 0; apic < mp_napics; apic++) 1451 if (((io_apic_read(apic, IOAPIC_VER) & 1452 IOART_VER_MAXREDIR) >> MAXREDIRSHIFT) >= 1453 entry->dst_apic_int) 1454 break; 1455 if (apic < mp_napics) 1456 io_apic_ints[intr].dst_apic_id = IO_TO_ID(apic); 1457 else 1458 io_apic_ints[intr].dst_apic_id = entry->dst_apic_id; 1459 } else 1460 io_apic_ints[intr].dst_apic_id = entry->dst_apic_id; 1461 io_apic_ints[intr].dst_apic_int = entry->dst_apic_int; 1462 1463 return 1; 1464} 1465 1466 1467static int 1468apic_int_is_bus_type(int intr, int bus_type) 1469{ 1470 int bus; 1471 1472 for (bus = 0; bus < mp_nbusses; ++bus) 1473 if ((bus_data[bus].bus_id == io_apic_ints[intr].src_bus_id) 1474 && ((int) bus_data[bus].bus_type == bus_type)) 1475 return 1; 1476 1477 return 0; 1478} 1479 1480 1481/* 1482 * Given a traditional ISA INT mask, return an APIC mask. 1483 */ 1484u_int 1485isa_apic_mask(u_int isa_mask) 1486{ 1487 int isa_irq; 1488 int apic_pin; 1489 1490#if defined(SKIP_IRQ15_REDIRECT) 1491 if (isa_mask == (1 << 15)) { 1492 printf("skipping ISA IRQ15 redirect\n"); 1493 return isa_mask; 1494 } 1495#endif /* SKIP_IRQ15_REDIRECT */ 1496 1497 isa_irq = ffs(isa_mask); /* find its bit position */ 1498 if (isa_irq == 0) /* doesn't exist */ 1499 return 0; 1500 --isa_irq; /* make it zero based */ 1501 1502 apic_pin = isa_apic_irq(isa_irq); /* look for APIC connection */ 1503 if (apic_pin == -1) 1504 return 0; 1505 1506 return (1 << apic_pin); /* convert pin# to a mask */ 1507} 1508 1509 1510/* 1511 * Determine which APIC pin an ISA/EISA INT is attached to. 1512 */ 1513#define INTTYPE(I) (io_apic_ints[(I)].int_type) 1514#define INTPIN(I) (io_apic_ints[(I)].dst_apic_int) 1515#define INTIRQ(I) (io_apic_ints[(I)].int_vector) 1516#define INTAPIC(I) (ID_TO_IO(io_apic_ints[(I)].dst_apic_id)) 1517 1518#define SRCBUSIRQ(I) (io_apic_ints[(I)].src_bus_irq) 1519int 1520isa_apic_irq(int isa_irq) 1521{ 1522 int intr; 1523 1524 for (intr = 0; intr < nintrs; ++intr) { /* check each record */ 1525 if (INTTYPE(intr) == 0) { /* standard INT */ 1526 if (SRCBUSIRQ(intr) == isa_irq) { 1527 if (apic_int_is_bus_type(intr, ISA) || 1528 apic_int_is_bus_type(intr, EISA)) { 1529 if (INTIRQ(intr) == 0xff) 1530 return -1; /* unassigned */ 1531 return INTIRQ(intr); /* found */ 1532 } 1533 } 1534 } 1535 } 1536 return -1; /* NOT found */ 1537} 1538 1539 1540/* 1541 * Determine which APIC pin a PCI INT is attached to. 1542 */ 1543#define SRCBUSID(I) (io_apic_ints[(I)].src_bus_id) 1544#define SRCBUSDEVICE(I) ((io_apic_ints[(I)].src_bus_irq >> 2) & 0x1f) 1545#define SRCBUSLINE(I) (io_apic_ints[(I)].src_bus_irq & 0x03) 1546int 1547pci_apic_irq(int pciBus, int pciDevice, int pciInt) 1548{ 1549 int intr; 1550 1551 --pciInt; /* zero based */ 1552 1553 for (intr = 0; intr < nintrs; ++intr) /* check each record */ 1554 if ((INTTYPE(intr) == 0) /* standard INT */ 1555 && (SRCBUSID(intr) == pciBus) 1556 && (SRCBUSDEVICE(intr) == pciDevice) 1557 && (SRCBUSLINE(intr) == pciInt)) /* a candidate IRQ */ 1558 if (apic_int_is_bus_type(intr, PCI)) { 1559 if (INTIRQ(intr) == 0xff) 1560 allocate_apic_irq(intr); 1561 if (INTIRQ(intr) == 0xff) 1562 return -1; /* unassigned */ 1563 return INTIRQ(intr); /* exact match */ 1564 } 1565 1566 return -1; /* NOT found */ 1567} 1568 1569int 1570next_apic_irq(int irq) 1571{ 1572 int intr, ointr; 1573 int bus, bustype; 1574 1575 bus = 0; 1576 bustype = 0; 1577 for (intr = 0; intr < nintrs; intr++) { 1578 if (INTIRQ(intr) != irq || INTTYPE(intr) != 0) 1579 continue; 1580 bus = SRCBUSID(intr); 1581 bustype = apic_bus_type(bus); 1582 if (bustype != ISA && 1583 bustype != EISA && 1584 bustype != PCI) 1585 continue; 1586 break; 1587 } 1588 if (intr >= nintrs) { 1589 return -1; 1590 } 1591 for (ointr = intr + 1; ointr < nintrs; ointr++) { 1592 if (INTTYPE(ointr) != 0) 1593 continue; 1594 if (bus != SRCBUSID(ointr)) 1595 continue; 1596 if (bustype == PCI) { 1597 if (SRCBUSDEVICE(intr) != SRCBUSDEVICE(ointr)) 1598 continue; 1599 if (SRCBUSLINE(intr) != SRCBUSLINE(ointr)) 1600 continue; 1601 } 1602 if (bustype == ISA || bustype == EISA) { 1603 if (SRCBUSIRQ(intr) != SRCBUSIRQ(ointr)) 1604 continue; 1605 } 1606 if (INTPIN(intr) == INTPIN(ointr)) 1607 continue; 1608 break; 1609 } 1610 if (ointr >= nintrs) { 1611 return -1; 1612 } 1613 return INTIRQ(ointr); 1614} 1615#undef SRCBUSLINE 1616#undef SRCBUSDEVICE 1617#undef SRCBUSID 1618#undef SRCBUSIRQ 1619 1620#undef INTPIN 1621#undef INTIRQ 1622#undef INTAPIC 1623#undef INTTYPE 1624 1625 1626/* 1627 * Reprogram the MB chipset to NOT redirect an ISA INTerrupt. 1628 * 1629 * XXX FIXME: 1630 * Exactly what this means is unclear at this point. It is a solution 1631 * for motherboards that redirect the MBIRQ0 pin. Generically a motherboard 1632 * could route any of the ISA INTs to upper (>15) IRQ values. But most would 1633 * NOT be redirected via MBIRQ0, thus "undirect()ing" them would NOT be an 1634 * option. 1635 */ 1636int 1637undirect_isa_irq(int rirq) 1638{ 1639#if defined(READY) 1640 if (bootverbose) 1641 printf("Freeing redirected ISA irq %d.\n", rirq); 1642 /** FIXME: tickle the MB redirector chip */ 1643 return -1; 1644#else 1645 if (bootverbose) 1646 printf("Freeing (NOT implemented) redirected ISA irq %d.\n", rirq); 1647 return 0; 1648#endif /* READY */ 1649} 1650 1651 1652/* 1653 * Reprogram the MB chipset to NOT redirect a PCI INTerrupt 1654 */ 1655int 1656undirect_pci_irq(int rirq) 1657{ 1658#if defined(READY) 1659 if (bootverbose) 1660 printf("Freeing redirected PCI irq %d.\n", rirq); 1661 1662 /** FIXME: tickle the MB redirector chip */ 1663 return -1; 1664#else 1665 if (bootverbose) 1666 printf("Freeing (NOT implemented) redirected PCI irq %d.\n", 1667 rirq); 1668 return 0; 1669#endif /* READY */ 1670} 1671 1672 1673/* 1674 * given a bus ID, return: 1675 * the bus type if found 1676 * -1 if NOT found 1677 */ 1678int 1679apic_bus_type(int id) 1680{ 1681 int x; 1682 1683 for (x = 0; x < mp_nbusses; ++x) 1684 if (bus_data[x].bus_id == id) 1685 return bus_data[x].bus_type; 1686 1687 return -1; 1688} 1689 1690 1691/* 1692 * given a LOGICAL APIC# and pin#, return: 1693 * the associated src bus ID if found 1694 * -1 if NOT found 1695 */ 1696int 1697apic_src_bus_id(int apic, int pin) 1698{ 1699 int x; 1700 1701 /* search each of the possible INTerrupt sources */ 1702 for (x = 0; x < nintrs; ++x) 1703 if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) && 1704 (pin == io_apic_ints[x].dst_apic_int)) 1705 return (io_apic_ints[x].src_bus_id); 1706 1707 return -1; /* NOT found */ 1708} 1709 1710 1711/* 1712 * given a LOGICAL APIC# and pin#, return: 1713 * the associated src bus IRQ if found 1714 * -1 if NOT found 1715 */ 1716int 1717apic_src_bus_irq(int apic, int pin) 1718{ 1719 int x; 1720 1721 for (x = 0; x < nintrs; x++) 1722 if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) && 1723 (pin == io_apic_ints[x].dst_apic_int)) 1724 return (io_apic_ints[x].src_bus_irq); 1725 1726 return -1; /* NOT found */ 1727} 1728 1729 1730/* 1731 * given a LOGICAL APIC# and pin#, return: 1732 * the associated INTerrupt type if found 1733 * -1 if NOT found 1734 */ 1735int 1736apic_int_type(int apic, int pin) 1737{ 1738 int x; 1739 1740 /* search each of the possible INTerrupt sources */ 1741 for (x = 0; x < nintrs; ++x) 1742 if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) && 1743 (pin == io_apic_ints[x].dst_apic_int)) 1744 return (io_apic_ints[x].int_type); 1745 1746 return -1; /* NOT found */ 1747} 1748 1749int 1750apic_irq(int apic, int pin) 1751{ 1752 int x; 1753 int res; 1754 1755 for (x = 0; x < nintrs; ++x) 1756 if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) && 1757 (pin == io_apic_ints[x].dst_apic_int)) { 1758 res = io_apic_ints[x].int_vector; 1759 if (res == 0xff) 1760 return -1; 1761 if (apic != int_to_apicintpin[res].ioapic) 1762 panic("apic_irq: inconsistent table"); 1763 if (pin != int_to_apicintpin[res].int_pin) 1764 panic("apic_irq inconsistent table (2)"); 1765 return res; 1766 } 1767 return -1; 1768} 1769 1770 1771/* 1772 * given a LOGICAL APIC# and pin#, return: 1773 * the associated trigger mode if found 1774 * -1 if NOT found 1775 */ 1776int 1777apic_trigger(int apic, int pin) 1778{ 1779 int x; 1780 1781 /* search each of the possible INTerrupt sources */ 1782 for (x = 0; x < nintrs; ++x) 1783 if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) && 1784 (pin == io_apic_ints[x].dst_apic_int)) 1785 return ((io_apic_ints[x].int_flags >> 2) & 0x03); 1786 1787 return -1; /* NOT found */ 1788} 1789 1790 1791/* 1792 * given a LOGICAL APIC# and pin#, return: 1793 * the associated 'active' level if found 1794 * -1 if NOT found 1795 */ 1796int 1797apic_polarity(int apic, int pin) 1798{ 1799 int x; 1800 1801 /* search each of the possible INTerrupt sources */ 1802 for (x = 0; x < nintrs; ++x) 1803 if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) && 1804 (pin == io_apic_ints[x].dst_apic_int)) 1805 return (io_apic_ints[x].int_flags & 0x03); 1806 1807 return -1; /* NOT found */ 1808} 1809 1810 1811/* 1812 * set data according to MP defaults 1813 * FIXME: probably not complete yet... 1814 */ 1815static void 1816default_mp_table(int type) 1817{ 1818 int ap_cpu_id; 1819#if defined(APIC_IO) 1820 int io_apic_id; 1821 int pin; 1822#endif /* APIC_IO */ 1823 1824#if 0 1825 printf(" MP default config type: %d\n", type); 1826 switch (type) { 1827 case 1: 1828 printf(" bus: ISA, APIC: 82489DX\n"); 1829 break; 1830 case 2: 1831 printf(" bus: EISA, APIC: 82489DX\n"); 1832 break; 1833 case 3: 1834 printf(" bus: EISA, APIC: 82489DX\n"); 1835 break; 1836 case 4: 1837 printf(" bus: MCA, APIC: 82489DX\n"); 1838 break; 1839 case 5: 1840 printf(" bus: ISA+PCI, APIC: Integrated\n"); 1841 break; 1842 case 6: 1843 printf(" bus: EISA+PCI, APIC: Integrated\n"); 1844 break; 1845 case 7: 1846 printf(" bus: MCA+PCI, APIC: Integrated\n"); 1847 break; 1848 default: 1849 printf(" future type\n"); 1850 break; 1851 /* NOTREACHED */ 1852 } 1853#endif /* 0 */ 1854 1855 boot_cpu_id = (lapic.id & APIC_ID_MASK) >> 24; 1856 ap_cpu_id = (boot_cpu_id == 0) ? 1 : 0; 1857 1858 /* BSP */ 1859 CPU_TO_ID(0) = boot_cpu_id; 1860 ID_TO_CPU(boot_cpu_id) = 0; 1861 1862 /* one and only AP */ 1863 CPU_TO_ID(1) = ap_cpu_id; 1864 ID_TO_CPU(ap_cpu_id) = 1; 1865 1866#if defined(APIC_IO) 1867 /* one and only IO APIC */ 1868 io_apic_id = (io_apic_read(0, IOAPIC_ID) & APIC_ID_MASK) >> 24; 1869 1870 /* 1871 * sanity check, refer to MP spec section 3.6.6, last paragraph 1872 * necessary as some hardware isn't properly setting up the IO APIC 1873 */ 1874#if defined(REALLY_ANAL_IOAPICID_VALUE) 1875 if (io_apic_id != 2) { 1876#else 1877 if ((io_apic_id == 0) || (io_apic_id == 1) || (io_apic_id == 15)) { 1878#endif /* REALLY_ANAL_IOAPICID_VALUE */ 1879 io_apic_set_id(0, 2); 1880 io_apic_id = 2; 1881 } 1882 IO_TO_ID(0) = io_apic_id; 1883 ID_TO_IO(io_apic_id) = 0; 1884#endif /* APIC_IO */ 1885 1886 /* fill out bus entries */ 1887 switch (type) { 1888 case 1: 1889 case 2: 1890 case 3: 1891 case 4: 1892 case 5: 1893 case 6: 1894 case 7: 1895 bus_data[0].bus_id = default_data[type - 1][1]; 1896 bus_data[0].bus_type = default_data[type - 1][2]; 1897 bus_data[1].bus_id = default_data[type - 1][3]; 1898 bus_data[1].bus_type = default_data[type - 1][4]; 1899 break; 1900 1901 /* case 4: case 7: MCA NOT supported */ 1902 default: /* illegal/reserved */ 1903 panic("BAD default MP config: %d", type); 1904 /* NOTREACHED */ 1905 } 1906 1907#if defined(APIC_IO) 1908 /* general cases from MP v1.4, table 5-2 */ 1909 for (pin = 0; pin < 16; ++pin) { 1910 io_apic_ints[pin].int_type = 0; 1911 io_apic_ints[pin].int_flags = 0x05; /* edge/active-hi */ 1912 io_apic_ints[pin].src_bus_id = 0; 1913 io_apic_ints[pin].src_bus_irq = pin; /* IRQ2 caught below */ 1914 io_apic_ints[pin].dst_apic_id = io_apic_id; 1915 io_apic_ints[pin].dst_apic_int = pin; /* 1-to-1 */ 1916 } 1917 1918 /* special cases from MP v1.4, table 5-2 */ 1919 if (type == 2) { 1920 io_apic_ints[2].int_type = 0xff; /* N/C */ 1921 io_apic_ints[13].int_type = 0xff; /* N/C */ 1922#if !defined(APIC_MIXED_MODE) 1923 /** FIXME: ??? */ 1924 panic("sorry, can't support type 2 default yet"); 1925#endif /* APIC_MIXED_MODE */ 1926 } 1927 else 1928 io_apic_ints[2].src_bus_irq = 0; /* ISA IRQ0 is on APIC INT 2 */ 1929 1930 if (type == 7) 1931 io_apic_ints[0].int_type = 0xff; /* N/C */ 1932 else 1933 io_apic_ints[0].int_type = 3; /* vectored 8259 */ 1934#endif /* APIC_IO */ 1935} 1936 1937 1938/* 1939 * start each AP in our list 1940 */ 1941static int 1942start_all_aps(u_int boot_addr) 1943{ 1944 int x, i, pg; 1945 u_char mpbiosreason; 1946 u_long mpbioswarmvec; 1947 struct globaldata *gd; 1948 char *stack; 1949 1950 POSTCODE(START_ALL_APS_POST); 1951 1952 /* initialize BSP's local APIC */ 1953 apic_initialize(); 1954 bsp_apic_ready = 1; 1955 1956 /* install the AP 1st level boot code */ 1957 install_ap_tramp(boot_addr); 1958 1959 1960 /* save the current value of the warm-start vector */ 1961 mpbioswarmvec = *((u_long *) WARMBOOT_OFF); 1962#ifndef PC98 1963 outb(CMOS_REG, BIOS_RESET); 1964 mpbiosreason = inb(CMOS_DATA); 1965#endif 1966 1967 /* record BSP in CPU map */ 1968 all_cpus = 1; 1969 1970 /* set up 0 -> 4MB P==V mapping for AP boot */ 1971 *(int *)PTD = PG_V | PG_RW | ((uintptr_t)(void *)KPTphys & PG_FRAME); 1972 invltlb(); 1973 1974 /* start each AP */ 1975 for (x = 1; x <= mp_naps; ++x) { 1976 1977 /* This is a bit verbose, it will go away soon. */ 1978 1979 /* first page of AP's private space */ 1980 pg = x * i386_btop(sizeof(struct privatespace)); 1981 1982 /* allocate a new private data page */ 1983 gd = (struct globaldata *)kmem_alloc(kernel_map, PAGE_SIZE); 1984 1985 /* wire it into the private page table page */ 1986 SMPpt[pg] = (pt_entry_t)(PG_V | PG_RW | vtophys(gd)); 1987 1988 /* allocate and set up an idle stack data page */ 1989 stack = (char *)kmem_alloc(kernel_map, UPAGES*PAGE_SIZE); 1990 for (i = 0; i < UPAGES; i++) 1991 SMPpt[pg + 5 + i] = (pt_entry_t) 1992 (PG_V | PG_RW | vtophys(PAGE_SIZE * i + stack)); 1993 1994 SMPpt[pg + 1] = 0; /* *prv_CMAP1 */ 1995 SMPpt[pg + 2] = 0; /* *prv_CMAP2 */ 1996 SMPpt[pg + 3] = 0; /* *prv_CMAP3 */ 1997 SMPpt[pg + 4] = 0; /* *prv_PMAP1 */ 1998 1999 /* prime data page for it to use */ 2000 SLIST_INSERT_HEAD(&cpuhead, gd, gd_allcpu); 2001 gd->gd_cpuid = x; 2002 gd->gd_cpu_lockid = x << 24; 2003 2004 /* setup a vector to our boot code */ 2005 *((volatile u_short *) WARMBOOT_OFF) = WARMBOOT_TARGET; 2006 *((volatile u_short *) WARMBOOT_SEG) = (boot_addr >> 4); 2007#ifndef PC98 2008 outb(CMOS_REG, BIOS_RESET); 2009 outb(CMOS_DATA, BIOS_WARM); /* 'warm-start' */ 2010#endif 2011 2012 bootSTK = &SMP_prvspace[x].idlestack[UPAGES*PAGE_SIZE]; 2013 bootAP = x; 2014 2015 /* attempt to start the Application Processor */ 2016 CHECK_INIT(99); /* setup checkpoints */ 2017 if (!start_ap(x, boot_addr)) { 2018 printf("AP #%d (PHY# %d) failed!\n", x, CPU_TO_ID(x)); 2019 CHECK_PRINT("trace"); /* show checkpoints */ 2020 /* better panic as the AP may be running loose */ 2021 printf("panic y/n? [y] "); 2022 if (cngetc() != 'n') 2023 panic("bye-bye"); 2024 } 2025 CHECK_PRINT("trace"); /* show checkpoints */ 2026 2027 /* record its version info */ 2028 cpu_apic_versions[x] = cpu_apic_versions[0]; 2029 2030 all_cpus |= (1 << x); /* record AP in CPU map */ 2031 } 2032 2033 /* build our map of 'other' CPUs */ 2034 PCPU_SET(other_cpus, all_cpus & ~(1 << PCPU_GET(cpuid))); 2035 2036 /* fill in our (BSP) APIC version */ 2037 cpu_apic_versions[0] = lapic.version; 2038 2039 /* restore the warmstart vector */ 2040 *(u_long *) WARMBOOT_OFF = mpbioswarmvec; 2041#ifndef PC98 2042 outb(CMOS_REG, BIOS_RESET); 2043 outb(CMOS_DATA, mpbiosreason); 2044#endif 2045 2046 /* 2047 * Set up the idle context for the BSP. Similar to above except 2048 * that some was done by locore, some by pmap.c and some is implicit 2049 * because the BSP is cpu#0 and the page is initially zero, and also 2050 * because we can refer to variables by name on the BSP.. 2051 */ 2052 2053 /* Allocate and setup BSP idle stack */ 2054 stack = (char *)kmem_alloc(kernel_map, UPAGES * PAGE_SIZE); 2055 for (i = 0; i < UPAGES; i++) 2056 SMPpt[5 + i] = (pt_entry_t) 2057 (PG_V | PG_RW | vtophys(PAGE_SIZE * i + stack)); 2058 2059 *(int *)PTD = 0; 2060 pmap_set_opt(); 2061 2062 /* number of APs actually started */ 2063 return mp_ncpus - 1; 2064} 2065 2066 2067/* 2068 * load the 1st level AP boot code into base memory. 2069 */ 2070 2071/* targets for relocation */ 2072extern void bigJump(void); 2073extern void bootCodeSeg(void); 2074extern void bootDataSeg(void); 2075extern void MPentry(void); 2076extern u_int MP_GDT; 2077extern u_int mp_gdtbase; 2078 2079static void 2080install_ap_tramp(u_int boot_addr) 2081{ 2082 int x; 2083 int size = *(int *) ((u_long) & bootMP_size); 2084 u_char *src = (u_char *) ((u_long) bootMP); 2085 u_char *dst = (u_char *) boot_addr + KERNBASE; 2086 u_int boot_base = (u_int) bootMP; 2087 u_int8_t *dst8; 2088 u_int16_t *dst16; 2089 u_int32_t *dst32; 2090 2091 POSTCODE(INSTALL_AP_TRAMP_POST); 2092 2093 for (x = 0; x < size; ++x) 2094 *dst++ = *src++; 2095 2096 /* 2097 * modify addresses in code we just moved to basemem. unfortunately we 2098 * need fairly detailed info about mpboot.s for this to work. changes 2099 * to mpboot.s might require changes here. 2100 */ 2101 2102 /* boot code is located in KERNEL space */ 2103 dst = (u_char *) boot_addr + KERNBASE; 2104 2105 /* modify the lgdt arg */ 2106 dst32 = (u_int32_t *) (dst + ((u_int) & mp_gdtbase - boot_base)); 2107 *dst32 = boot_addr + ((u_int) & MP_GDT - boot_base); 2108 2109 /* modify the ljmp target for MPentry() */ 2110 dst32 = (u_int32_t *) (dst + ((u_int) bigJump - boot_base) + 1); 2111 *dst32 = ((u_int) MPentry - KERNBASE); 2112 2113 /* modify the target for boot code segment */ 2114 dst16 = (u_int16_t *) (dst + ((u_int) bootCodeSeg - boot_base)); 2115 dst8 = (u_int8_t *) (dst16 + 1); 2116 *dst16 = (u_int) boot_addr & 0xffff; 2117 *dst8 = ((u_int) boot_addr >> 16) & 0xff; 2118 2119 /* modify the target for boot data segment */ 2120 dst16 = (u_int16_t *) (dst + ((u_int) bootDataSeg - boot_base)); 2121 dst8 = (u_int8_t *) (dst16 + 1); 2122 *dst16 = (u_int) boot_addr & 0xffff; 2123 *dst8 = ((u_int) boot_addr >> 16) & 0xff; 2124} 2125 2126 2127/* 2128 * this function starts the AP (application processor) identified 2129 * by the APIC ID 'physicalCpu'. It does quite a "song and dance" 2130 * to accomplish this. This is necessary because of the nuances 2131 * of the different hardware we might encounter. It ain't pretty, 2132 * but it seems to work. 2133 */ 2134static int 2135start_ap(int logical_cpu, u_int boot_addr) 2136{ 2137 int physical_cpu; 2138 int vector; 2139 int cpus; 2140 u_long icr_lo, icr_hi; 2141 2142 POSTCODE(START_AP_POST); 2143 2144 /* get the PHYSICAL APIC ID# */ 2145 physical_cpu = CPU_TO_ID(logical_cpu); 2146 2147 /* calculate the vector */ 2148 vector = (boot_addr >> 12) & 0xff; 2149 2150 /* used as a watchpoint to signal AP startup */ 2151 cpus = mp_ncpus; 2152 2153 /* 2154 * first we do an INIT/RESET IPI this INIT IPI might be run, reseting 2155 * and running the target CPU. OR this INIT IPI might be latched (P5 2156 * bug), CPU waiting for STARTUP IPI. OR this INIT IPI might be 2157 * ignored. 2158 */ 2159 2160 /* setup the address for the target AP */ 2161 icr_hi = lapic.icr_hi & ~APIC_ID_MASK; 2162 icr_hi |= (physical_cpu << 24); 2163 lapic.icr_hi = icr_hi; 2164 2165 /* do an INIT IPI: assert RESET */ 2166 icr_lo = lapic.icr_lo & 0xfff00000; 2167 lapic.icr_lo = icr_lo | 0x0000c500; 2168 2169 /* wait for pending status end */ 2170 while (lapic.icr_lo & APIC_DELSTAT_MASK) 2171 /* spin */ ; 2172 2173 /* do an INIT IPI: deassert RESET */ 2174 lapic.icr_lo = icr_lo | 0x00008500; 2175 2176 /* wait for pending status end */ 2177 u_sleep(10000); /* wait ~10mS */ 2178 while (lapic.icr_lo & APIC_DELSTAT_MASK) 2179 /* spin */ ; 2180 2181 /* 2182 * next we do a STARTUP IPI: the previous INIT IPI might still be 2183 * latched, (P5 bug) this 1st STARTUP would then terminate 2184 * immediately, and the previously started INIT IPI would continue. OR 2185 * the previous INIT IPI has already run. and this STARTUP IPI will 2186 * run. OR the previous INIT IPI was ignored. and this STARTUP IPI 2187 * will run. 2188 */ 2189 2190 /* do a STARTUP IPI */ 2191 lapic.icr_lo = icr_lo | 0x00000600 | vector; 2192 while (lapic.icr_lo & APIC_DELSTAT_MASK) 2193 /* spin */ ; 2194 u_sleep(200); /* wait ~200uS */ 2195 2196 /* 2197 * finally we do a 2nd STARTUP IPI: this 2nd STARTUP IPI should run IF 2198 * the previous STARTUP IPI was cancelled by a latched INIT IPI. OR 2199 * this STARTUP IPI will be ignored, as only ONE STARTUP IPI is 2200 * recognized after hardware RESET or INIT IPI. 2201 */ 2202 2203 lapic.icr_lo = icr_lo | 0x00000600 | vector; 2204 while (lapic.icr_lo & APIC_DELSTAT_MASK) 2205 /* spin */ ; 2206 u_sleep(200); /* wait ~200uS */ 2207 2208 /* wait for it to start */ 2209 set_apic_timer(5000000);/* == 5 seconds */ 2210 while (read_apic_timer()) 2211 if (mp_ncpus > cpus) 2212 return 1; /* return SUCCESS */ 2213 2214 return 0; /* return FAILURE */ 2215} 2216 2217/* 2218 * Flush the TLB on all other CPU's 2219 * 2220 * XXX: Needs to handshake and wait for completion before proceding. 2221 */ 2222void 2223smp_invltlb(void) 2224{ 2225#if defined(APIC_IO) 2226 if (smp_started && invltlb_ok) 2227 all_but_self_ipi(XINVLTLB_OFFSET); 2228#endif /* APIC_IO */ 2229} 2230 2231void 2232invlpg(u_int addr) 2233{ 2234 __asm __volatile("invlpg (%0)"::"r"(addr):"memory"); 2235 2236 /* send a message to the other CPUs */ 2237 smp_invltlb(); 2238} 2239 2240void 2241invltlb(void) 2242{ 2243 u_long temp; 2244 2245 /* 2246 * This should be implemented as load_cr3(rcr3()) when load_cr3() is 2247 * inlined. 2248 */ 2249 __asm __volatile("movl %%cr3, %0; movl %0, %%cr3":"=r"(temp) :: "memory"); 2250 2251 /* send a message to the other CPUs */ 2252 smp_invltlb(); 2253} 2254 2255 2256/* 2257 * This is called once the rest of the system is up and running and we're 2258 * ready to let the AP's out of the pen. 2259 */ 2260void 2261ap_init(void) 2262{ 2263 u_int apic_id; 2264 2265 /* spin until all the AP's are ready */ 2266 while (!aps_ready) 2267 /* spin */ ; 2268 2269 /* lock against other AP's that are waking up */ 2270 mtx_enter(&ap_boot_mtx, MTX_SPIN); 2271 2272 /* BSP may have changed PTD while we're waiting for the lock */ 2273 cpu_invltlb(); 2274 2275 smp_cpus++; 2276 2277#if defined(I586_CPU) && !defined(NO_F00F_HACK) 2278 lidt(&r_idt); 2279#endif 2280 2281 /* Build our map of 'other' CPUs. */ 2282 PCPU_SET(other_cpus, all_cpus & ~(1 << PCPU_GET(cpuid))); 2283 2284 printf("SMP: AP CPU #%d Launched!\n", PCPU_GET(cpuid)); 2285 2286 /* set up CPU registers and state */ 2287 cpu_setregs(); 2288 2289 /* set up FPU state on the AP */ 2290 npxinit(__INITIAL_NPXCW__); 2291 2292 /* A quick check from sanity claus */ 2293 apic_id = (apic_id_to_logical[(lapic.id & 0x0f000000) >> 24]); 2294 if (PCPU_GET(cpuid) != apic_id) { 2295 printf("SMP: cpuid = %d\n", PCPU_GET(cpuid)); 2296 printf("SMP: apic_id = %d\n", apic_id); 2297 printf("PTD[MPPTDI] = %p\n", (void *)PTD[MPPTDI]); 2298 panic("cpuid mismatch! boom!!"); 2299 } 2300 2301 /* Init local apic for irq's */ 2302 apic_initialize(); 2303 2304 /* Set memory range attributes for this CPU to match the BSP */ 2305 mem_range_AP_init(); 2306 2307 /* 2308 * Activate smp_invltlb, although strictly speaking, this isn't 2309 * quite correct yet. We should have a bitfield for cpus willing 2310 * to accept TLB flush IPI's or something and sync them. 2311 */ 2312 if (smp_cpus == mp_ncpus) { 2313 invltlb_ok = 1; 2314 smp_started = 1; /* enable IPI's, tlb shootdown, freezes etc */ 2315 smp_active = 1; /* historic */ 2316 } 2317 2318 /* let other AP's wake up now */ 2319 mtx_exit(&ap_boot_mtx, MTX_SPIN); 2320 2321 /* wait until all the AP's are up */ 2322 while (smp_started == 0) 2323 ; /* nothing */ 2324 2325 /* 2326 * Set curproc to our per-cpu idleproc so that mutexes have 2327 * something unique to lock with. 2328 */ 2329 PCPU_SET(curproc, PCPU_GET(idleproc)); 2330 2331 microuptime(PCPU_PTR(switchtime)); 2332 PCPU_SET(switchticks, ticks); 2333 2334 /* ok, now grab sched_lock and enter the scheduler */ 2335 enable_intr(); 2336 mtx_enter(&sched_lock, MTX_SPIN); 2337 cpu_throw(); /* doesn't return */ 2338 2339 panic("scheduler returned us to ap_init"); 2340} 2341 2342#ifdef BETTER_CLOCK 2343 2344#define CHECKSTATE_USER 0 2345#define CHECKSTATE_SYS 1 2346#define CHECKSTATE_INTR 2 2347 2348/* Do not staticize. Used from apic_vector.s */ 2349struct proc* checkstate_curproc[MAXCPU]; 2350int checkstate_cpustate[MAXCPU]; 2351u_long checkstate_pc[MAXCPU]; 2352 2353#define PC_TO_INDEX(pc, prof) \ 2354 ((int)(((u_quad_t)((pc) - (prof)->pr_off) * \ 2355 (u_quad_t)((prof)->pr_scale)) >> 16) & ~1) 2356 2357static void 2358addupc_intr_forwarded(struct proc *p, int id, int *astmap) 2359{ 2360 int i; 2361 struct uprof *prof; 2362 u_long pc; 2363 2364 pc = checkstate_pc[id]; 2365 prof = &p->p_stats->p_prof; 2366 if (pc >= prof->pr_off && 2367 (i = PC_TO_INDEX(pc, prof)) < prof->pr_size) { 2368 mtx_assert(&sched_lock, MA_OWNED); 2369 if ((p->p_sflag & PS_OWEUPC) == 0) { 2370 prof->pr_addr = pc; 2371 prof->pr_ticks = 1; 2372 p->p_sflag |= PS_OWEUPC; 2373 } 2374 *astmap |= (1 << id); 2375 } 2376} 2377 2378static void 2379forwarded_statclock(int id, int pscnt, int *astmap) 2380{ 2381 struct pstats *pstats; 2382 long rss; 2383 struct rusage *ru; 2384 struct vmspace *vm; 2385 int cpustate; 2386 struct proc *p; 2387#ifdef GPROF 2388 register struct gmonparam *g; 2389 int i; 2390#endif 2391 2392 mtx_assert(&sched_lock, MA_OWNED); 2393 p = checkstate_curproc[id]; 2394 cpustate = checkstate_cpustate[id]; 2395 2396 /* XXX */ 2397 if (p->p_ithd) 2398 cpustate = CHECKSTATE_INTR; 2399 else if (p == SMP_prvspace[id].globaldata.gd_idleproc) 2400 cpustate = CHECKSTATE_SYS; 2401 2402 switch (cpustate) { 2403 case CHECKSTATE_USER: 2404 if (p->p_sflag & PS_PROFIL) 2405 addupc_intr_forwarded(p, id, astmap); 2406 if (pscnt > 1) 2407 return; 2408 p->p_uticks++; 2409 if (p->p_nice > NZERO) 2410 cp_time[CP_NICE]++; 2411 else 2412 cp_time[CP_USER]++; 2413 break; 2414 case CHECKSTATE_SYS: 2415#ifdef GPROF 2416 /* 2417 * Kernel statistics are just like addupc_intr, only easier. 2418 */ 2419 g = &_gmonparam; 2420 if (g->state == GMON_PROF_ON) { 2421 i = checkstate_pc[id] - g->lowpc; 2422 if (i < g->textsize) { 2423 i /= HISTFRACTION * sizeof(*g->kcount); 2424 g->kcount[i]++; 2425 } 2426 } 2427#endif 2428 if (pscnt > 1) 2429 return; 2430 2431 p->p_sticks++; 2432 if (p == SMP_prvspace[id].globaldata.gd_idleproc) 2433 cp_time[CP_IDLE]++; 2434 else 2435 cp_time[CP_SYS]++; 2436 break; 2437 case CHECKSTATE_INTR: 2438 default: 2439#ifdef GPROF 2440 /* 2441 * Kernel statistics are just like addupc_intr, only easier. 2442 */ 2443 g = &_gmonparam; 2444 if (g->state == GMON_PROF_ON) { 2445 i = checkstate_pc[id] - g->lowpc; 2446 if (i < g->textsize) { 2447 i /= HISTFRACTION * sizeof(*g->kcount); 2448 g->kcount[i]++; 2449 } 2450 } 2451#endif 2452 if (pscnt > 1) 2453 return; 2454 KASSERT(p != NULL, ("NULL process in interrupt state")); 2455 p->p_iticks++; 2456 cp_time[CP_INTR]++; 2457 } 2458 2459 schedclock(p); 2460 2461 /* Update resource usage integrals and maximums. */ 2462 if ((pstats = p->p_stats) != NULL && 2463 (ru = &pstats->p_ru) != NULL && 2464 (vm = p->p_vmspace) != NULL) { 2465 ru->ru_ixrss += pgtok(vm->vm_tsize); 2466 ru->ru_idrss += pgtok(vm->vm_dsize); 2467 ru->ru_isrss += pgtok(vm->vm_ssize); 2468 rss = pgtok(vmspace_resident_count(vm)); 2469 if (ru->ru_maxrss < rss) 2470 ru->ru_maxrss = rss; 2471 } 2472} 2473 2474void 2475forward_statclock(int pscnt) 2476{ 2477 int map; 2478 int id; 2479 int i; 2480 2481 /* Kludge. We don't yet have separate locks for the interrupts 2482 * and the kernel. This means that we cannot let the other processors 2483 * handle complex interrupts while inhibiting them from entering 2484 * the kernel in a non-interrupt context. 2485 * 2486 * What we can do, without changing the locking mechanisms yet, 2487 * is letting the other processors handle a very simple interrupt 2488 * (wich determines the processor states), and do the main 2489 * work ourself. 2490 */ 2491 2492 CTR1(KTR_SMP, "forward_statclock(%d)", pscnt); 2493 2494 if (!smp_started || !invltlb_ok || cold || panicstr) 2495 return; 2496 2497 /* Step 1: Probe state (user, cpu, interrupt, spinlock, idle ) */ 2498 2499 map = PCPU_GET(other_cpus) & ~stopped_cpus ; 2500 checkstate_probed_cpus = 0; 2501 if (map != 0) 2502 selected_apic_ipi(map, 2503 XCPUCHECKSTATE_OFFSET, APIC_DELMODE_FIXED); 2504 2505 i = 0; 2506 while (checkstate_probed_cpus != map) { 2507 /* spin */ 2508 i++; 2509 if (i == 100000) { 2510#ifdef BETTER_CLOCK_DIAGNOSTIC 2511 printf("forward_statclock: checkstate %x\n", 2512 checkstate_probed_cpus); 2513#endif 2514 break; 2515 } 2516 } 2517 2518 /* 2519 * Step 2: walk through other processors processes, update ticks and 2520 * profiling info. 2521 */ 2522 2523 map = 0; 2524 for (id = 0; id < mp_ncpus; id++) { 2525 if (id == PCPU_GET(cpuid)) 2526 continue; 2527 if (((1 << id) & checkstate_probed_cpus) == 0) 2528 continue; 2529 forwarded_statclock(id, pscnt, &map); 2530 } 2531 if (map != 0) { 2532 checkstate_need_ast |= map; 2533 selected_apic_ipi(map, XCPUAST_OFFSET, APIC_DELMODE_FIXED); 2534 i = 0; 2535 while ((checkstate_need_ast & map) != 0) { 2536 /* spin */ 2537 i++; 2538 if (i > 100000) { 2539#ifdef BETTER_CLOCK_DIAGNOSTIC 2540 printf("forward_statclock: dropped ast 0x%x\n", 2541 checkstate_need_ast & map); 2542#endif 2543 break; 2544 } 2545 } 2546 } 2547} 2548 2549void 2550forward_hardclock(int pscnt) 2551{ 2552 int map; 2553 int id; 2554 struct proc *p; 2555 struct pstats *pstats; 2556 int i; 2557 2558 /* Kludge. We don't yet have separate locks for the interrupts 2559 * and the kernel. This means that we cannot let the other processors 2560 * handle complex interrupts while inhibiting them from entering 2561 * the kernel in a non-interrupt context. 2562 * 2563 * What we can do, without changing the locking mechanisms yet, 2564 * is letting the other processors handle a very simple interrupt 2565 * (wich determines the processor states), and do the main 2566 * work ourself. 2567 */ 2568 2569 CTR1(KTR_SMP, "forward_hardclock(%d)", pscnt); 2570 2571 if (!smp_started || !invltlb_ok || cold || panicstr) 2572 return; 2573 2574 /* Step 1: Probe state (user, cpu, interrupt, spinlock, idle) */ 2575 2576 map = PCPU_GET(other_cpus) & ~stopped_cpus ; 2577 checkstate_probed_cpus = 0; 2578 if (map != 0) 2579 selected_apic_ipi(map, 2580 XCPUCHECKSTATE_OFFSET, APIC_DELMODE_FIXED); 2581 2582 i = 0; 2583 while (checkstate_probed_cpus != map) { 2584 /* spin */ 2585 i++; 2586 if (i == 100000) { 2587#ifdef BETTER_CLOCK_DIAGNOSTIC 2588 printf("forward_hardclock: checkstate %x\n", 2589 checkstate_probed_cpus); 2590#endif 2591 break; 2592 } 2593 } 2594 2595 /* 2596 * Step 2: walk through other processors processes, update virtual 2597 * timer and profiling timer. If stathz == 0, also update ticks and 2598 * profiling info. 2599 */ 2600 2601 map = 0; 2602 for (id = 0; id < mp_ncpus; id++) { 2603 if (id == PCPU_GET(cpuid)) 2604 continue; 2605 if (((1 << id) & checkstate_probed_cpus) == 0) 2606 continue; 2607 p = checkstate_curproc[id]; 2608 if (p) { 2609 pstats = p->p_stats; 2610 if (checkstate_cpustate[id] == CHECKSTATE_USER && 2611 timevalisset(&pstats->p_timer[ITIMER_VIRTUAL].it_value) && 2612 itimerdecr(&pstats->p_timer[ITIMER_VIRTUAL], tick) == 0) { 2613 p->p_sflag |= PS_ALRMPEND; 2614 map |= (1 << id); 2615 } 2616 if (timevalisset(&pstats->p_timer[ITIMER_PROF].it_value) && 2617 itimerdecr(&pstats->p_timer[ITIMER_PROF], tick) == 0) { 2618 p->p_sflag |= PS_PROFPEND; 2619 map |= (1 << id); 2620 } 2621 } 2622 if (stathz == 0) { 2623 forwarded_statclock( id, pscnt, &map); 2624 } 2625 } 2626 if (map != 0) { 2627 checkstate_need_ast |= map; 2628 selected_apic_ipi(map, XCPUAST_OFFSET, APIC_DELMODE_FIXED); 2629 i = 0; 2630 while ((checkstate_need_ast & map) != 0) { 2631 /* spin */ 2632 i++; 2633 if (i > 100000) { 2634#ifdef BETTER_CLOCK_DIAGNOSTIC 2635 printf("forward_hardclock: dropped ast 0x%x\n", 2636 checkstate_need_ast & map); 2637#endif 2638 break; 2639 } 2640 } 2641 } 2642} 2643 2644#endif /* BETTER_CLOCK */ 2645 2646void 2647forward_signal(struct proc *p) 2648{ 2649 int map; 2650 int id; 2651 int i; 2652 2653 /* Kludge. We don't yet have separate locks for the interrupts 2654 * and the kernel. This means that we cannot let the other processors 2655 * handle complex interrupts while inhibiting them from entering 2656 * the kernel in a non-interrupt context. 2657 * 2658 * What we can do, without changing the locking mechanisms yet, 2659 * is letting the other processors handle a very simple interrupt 2660 * (wich determines the processor states), and do the main 2661 * work ourself. 2662 */ 2663 2664 CTR1(KTR_SMP, "forward_signal(%p)", p); 2665 2666 if (!smp_started || !invltlb_ok || cold || panicstr) 2667 return; 2668 if (!forward_signal_enabled) 2669 return; 2670 mtx_enter(&sched_lock, MTX_SPIN); 2671 while (1) { 2672 if (p->p_stat != SRUN) { 2673 mtx_exit(&sched_lock, MTX_SPIN); 2674 return; 2675 } 2676 id = p->p_oncpu; 2677 mtx_exit(&sched_lock, MTX_SPIN); 2678 if (id == 0xff) 2679 return; 2680 map = (1<<id); 2681 checkstate_need_ast |= map; 2682 selected_apic_ipi(map, XCPUAST_OFFSET, APIC_DELMODE_FIXED); 2683 i = 0; 2684 while ((checkstate_need_ast & map) != 0) { 2685 /* spin */ 2686 i++; 2687 if (i > 100000) { 2688#if 0 2689 printf("forward_signal: dropped ast 0x%x\n", 2690 checkstate_need_ast & map); 2691#endif 2692 break; 2693 } 2694 } 2695 mtx_enter(&sched_lock, MTX_SPIN); 2696 if (id == p->p_oncpu) { 2697 mtx_exit(&sched_lock, MTX_SPIN); 2698 return; 2699 } 2700 } 2701} 2702 2703void 2704forward_roundrobin(void) 2705{ 2706 u_int map; 2707 int i; 2708 2709 CTR0(KTR_SMP, "forward_roundrobin()"); 2710 2711 if (!smp_started || !invltlb_ok || cold || panicstr) 2712 return; 2713 if (!forward_roundrobin_enabled) 2714 return; 2715 resched_cpus |= PCPU_GET(other_cpus); 2716 map = PCPU_GET(other_cpus) & ~stopped_cpus ; 2717#if 1 2718 selected_apic_ipi(map, XCPUAST_OFFSET, APIC_DELMODE_FIXED); 2719#else 2720 (void) all_but_self_ipi(XCPUAST_OFFSET); 2721#endif 2722 i = 0; 2723 while ((checkstate_need_ast & map) != 0) { 2724 /* spin */ 2725 i++; 2726 if (i > 100000) { 2727#if 0 2728 printf("forward_roundrobin: dropped ast 0x%x\n", 2729 checkstate_need_ast & map); 2730#endif 2731 break; 2732 } 2733 } 2734} 2735 2736/* 2737 * When called the executing CPU will send an IPI to all other CPUs 2738 * requesting that they halt execution. 2739 * 2740 * Usually (but not necessarily) called with 'other_cpus' as its arg. 2741 * 2742 * - Signals all CPUs in map to stop. 2743 * - Waits for each to stop. 2744 * 2745 * Returns: 2746 * -1: error 2747 * 0: NA 2748 * 1: ok 2749 * 2750 * XXX FIXME: this is not MP-safe, needs a lock to prevent multiple CPUs 2751 * from executing at same time. 2752 */ 2753int 2754stop_cpus(u_int map) 2755{ 2756 int count = 0; 2757 2758 if (!smp_started) 2759 return 0; 2760 2761 /* send the Xcpustop IPI to all CPUs in map */ 2762 selected_apic_ipi(map, XCPUSTOP_OFFSET, APIC_DELMODE_FIXED); 2763 2764 while (count++ < 100000 && (stopped_cpus & map) != map) 2765 /* spin */ ; 2766 2767#ifdef DIAGNOSTIC 2768 if ((stopped_cpus & map) != map) 2769 printf("Warning: CPUs 0x%x did not stop!\n", 2770 (~(stopped_cpus & map)) & map); 2771#endif 2772 2773 return 1; 2774} 2775 2776 2777/* 2778 * Called by a CPU to restart stopped CPUs. 2779 * 2780 * Usually (but not necessarily) called with 'stopped_cpus' as its arg. 2781 * 2782 * - Signals all CPUs in map to restart. 2783 * - Waits for each to restart. 2784 * 2785 * Returns: 2786 * -1: error 2787 * 0: NA 2788 * 1: ok 2789 */ 2790int 2791restart_cpus(u_int map) 2792{ 2793 int count = 0; 2794 2795 if (!smp_started) 2796 return 0; 2797 2798 started_cpus = map; /* signal other cpus to restart */ 2799 2800 /* wait for each to clear its bit */ 2801 while (count++ < 100000 && (stopped_cpus & map) != 0) 2802 /* spin */ ; 2803 2804#ifdef DIAGNOSTIC 2805 if ((stopped_cpus & map) != 0) 2806 printf("Warning: CPUs 0x%x did not restart!\n", 2807 (~(stopped_cpus & map)) & map); 2808#endif 2809 2810 return 1; 2811} 2812 2813 2814#ifdef APIC_INTR_REORDER 2815/* 2816 * Maintain mapping from softintr vector to isr bit in local apic. 2817 */ 2818void 2819set_lapic_isrloc(int intr, int vector) 2820{ 2821 if (intr < 0 || intr > 32) 2822 panic("set_apic_isrloc: bad intr argument: %d",intr); 2823 if (vector < ICU_OFFSET || vector > 255) 2824 panic("set_apic_isrloc: bad vector argument: %d",vector); 2825 apic_isrbit_location[intr].location = &lapic.isr0 + ((vector>>5)<<2); 2826 apic_isrbit_location[intr].bit = (1<<(vector & 31)); 2827} 2828#endif 2829 2830/* 2831 * All-CPU rendezvous. CPUs are signalled, all execute the setup function 2832 * (if specified), rendezvous, execute the action function (if specified), 2833 * rendezvous again, execute the teardown function (if specified), and then 2834 * resume. 2835 * 2836 * Note that the supplied external functions _must_ be reentrant and aware 2837 * that they are running in parallel and in an unknown lock context. 2838 */ 2839static void (*smp_rv_setup_func)(void *arg); 2840static void (*smp_rv_action_func)(void *arg); 2841static void (*smp_rv_teardown_func)(void *arg); 2842static void *smp_rv_func_arg; 2843static volatile int smp_rv_waiters[2]; 2844 2845void 2846smp_rendezvous_action(void) 2847{ 2848 /* setup function */ 2849 if (smp_rv_setup_func != NULL) 2850 smp_rv_setup_func(smp_rv_func_arg); 2851 /* spin on entry rendezvous */ 2852 atomic_add_int(&smp_rv_waiters[0], 1); 2853 while (smp_rv_waiters[0] < mp_ncpus) 2854 ; 2855 /* action function */ 2856 if (smp_rv_action_func != NULL) 2857 smp_rv_action_func(smp_rv_func_arg); 2858 /* spin on exit rendezvous */ 2859 atomic_add_int(&smp_rv_waiters[1], 1); 2860 while (smp_rv_waiters[1] < mp_ncpus) 2861 ; 2862 /* teardown function */ 2863 if (smp_rv_teardown_func != NULL) 2864 smp_rv_teardown_func(smp_rv_func_arg); 2865} 2866 2867void 2868smp_rendezvous(void (* setup_func)(void *), 2869 void (* action_func)(void *), 2870 void (* teardown_func)(void *), 2871 void *arg) 2872{ 2873 2874 /* obtain rendezvous lock */ 2875 mtx_enter(&smp_rv_mtx, MTX_SPIN); 2876 2877 /* set static function pointers */ 2878 smp_rv_setup_func = setup_func; 2879 smp_rv_action_func = action_func; 2880 smp_rv_teardown_func = teardown_func; 2881 smp_rv_func_arg = arg; 2882 smp_rv_waiters[0] = 0; 2883 smp_rv_waiters[1] = 0; 2884 2885 /* 2886 * signal other processors, which will enter the IPI with interrupts off 2887 */ 2888 all_but_self_ipi(XRENDEZVOUS_OFFSET); 2889 2890 /* call executor function */ 2891 smp_rendezvous_action(); 2892 2893 /* release lock */ 2894 mtx_exit(&smp_rv_mtx, MTX_SPIN); 2895} 2896 2897void 2898release_aps(void *dummy __unused) 2899{ 2900 atomic_store_rel_int(&aps_ready, 1); 2901} 2902 2903SYSINIT(start_aps, SI_SUB_SMP, SI_ORDER_FIRST, release_aps, NULL); 2904