mp_x86.c revision 71321
1/* 2 * Copyright (c) 1996, by Steve Passe 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. The name of the developer may NOT be used to endorse or promote products 11 * derived from this software without specific prior written permission. 12 * 13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 * 25 * $FreeBSD: head/sys/i386/i386/mp_machdep.c 71321 2001-01-21 07:54:10Z peter $ 26 */ 27 28#include "opt_cpu.h" 29#include "opt_user_ldt.h" 30 31#ifdef SMP 32#include <machine/smptests.h> 33#else 34#error 35#endif 36 37#include <sys/param.h> 38#include <sys/bus.h> 39#include <sys/systm.h> 40#include <sys/kernel.h> 41#include <sys/proc.h> 42#include <sys/sysctl.h> 43#include <sys/malloc.h> 44#include <sys/memrange.h> 45#include <sys/mutex.h> 46#ifdef BETTER_CLOCK 47#include <sys/dkstat.h> 48#endif 49#include <sys/cons.h> /* cngetc() */ 50 51#include <vm/vm.h> 52#include <vm/vm_param.h> 53#include <vm/pmap.h> 54#include <vm/vm_kern.h> 55#include <vm/vm_extern.h> 56#ifdef BETTER_CLOCK 57#include <sys/lock.h> 58#include <vm/vm_map.h> 59#include <sys/user.h> 60#ifdef GPROF 61#include <sys/gmon.h> 62#endif 63#endif 64 65#include <machine/smp.h> 66#include <machine/apic.h> 67#include <machine/atomic.h> 68#include <machine/cpufunc.h> 69#include <machine/mpapic.h> 70#include <machine/psl.h> 71#include <machine/segments.h> 72#include <machine/smptests.h> /** TEST_DEFAULT_CONFIG, TEST_TEST1 */ 73#include <machine/tss.h> 74#include <machine/specialreg.h> 75#include <machine/globaldata.h> 76 77#if defined(APIC_IO) 78#include <machine/md_var.h> /* setidt() */ 79#include <i386/isa/icu.h> /* IPIs */ 80#include <i386/isa/intr_machdep.h> /* IPIs */ 81#endif /* APIC_IO */ 82 83#if defined(TEST_DEFAULT_CONFIG) 84#define MPFPS_MPFB1 TEST_DEFAULT_CONFIG 85#else 86#define MPFPS_MPFB1 mpfps->mpfb1 87#endif /* TEST_DEFAULT_CONFIG */ 88 89#define WARMBOOT_TARGET 0 90#define WARMBOOT_OFF (KERNBASE + 0x0467) 91#define WARMBOOT_SEG (KERNBASE + 0x0469) 92 93#ifdef PC98 94#define BIOS_BASE (0xe8000) 95#define BIOS_SIZE (0x18000) 96#else 97#define BIOS_BASE (0xf0000) 98#define BIOS_SIZE (0x10000) 99#endif 100#define BIOS_COUNT (BIOS_SIZE/4) 101 102#define CMOS_REG (0x70) 103#define CMOS_DATA (0x71) 104#define BIOS_RESET (0x0f) 105#define BIOS_WARM (0x0a) 106 107#define PROCENTRY_FLAG_EN 0x01 108#define PROCENTRY_FLAG_BP 0x02 109#define IOAPICENTRY_FLAG_EN 0x01 110 111 112/* MP Floating Pointer Structure */ 113typedef struct MPFPS { 114 char signature[4]; 115 void *pap; 116 u_char length; 117 u_char spec_rev; 118 u_char checksum; 119 u_char mpfb1; 120 u_char mpfb2; 121 u_char mpfb3; 122 u_char mpfb4; 123 u_char mpfb5; 124} *mpfps_t; 125 126/* MP Configuration Table Header */ 127typedef struct MPCTH { 128 char signature[4]; 129 u_short base_table_length; 130 u_char spec_rev; 131 u_char checksum; 132 u_char oem_id[8]; 133 u_char product_id[12]; 134 void *oem_table_pointer; 135 u_short oem_table_size; 136 u_short entry_count; 137 void *apic_address; 138 u_short extended_table_length; 139 u_char extended_table_checksum; 140 u_char reserved; 141} *mpcth_t; 142 143 144typedef struct PROCENTRY { 145 u_char type; 146 u_char apic_id; 147 u_char apic_version; 148 u_char cpu_flags; 149 u_long cpu_signature; 150 u_long feature_flags; 151 u_long reserved1; 152 u_long reserved2; 153} *proc_entry_ptr; 154 155typedef struct BUSENTRY { 156 u_char type; 157 u_char bus_id; 158 char bus_type[6]; 159} *bus_entry_ptr; 160 161typedef struct IOAPICENTRY { 162 u_char type; 163 u_char apic_id; 164 u_char apic_version; 165 u_char apic_flags; 166 void *apic_address; 167} *io_apic_entry_ptr; 168 169typedef struct INTENTRY { 170 u_char type; 171 u_char int_type; 172 u_short int_flags; 173 u_char src_bus_id; 174 u_char src_bus_irq; 175 u_char dst_apic_id; 176 u_char dst_apic_int; 177} *int_entry_ptr; 178 179/* descriptions of MP basetable entries */ 180typedef struct BASETABLE_ENTRY { 181 u_char type; 182 u_char length; 183 char name[16]; 184} basetable_entry; 185 186/* 187 * this code MUST be enabled here and in mpboot.s. 188 * it follows the very early stages of AP boot by placing values in CMOS ram. 189 * it NORMALLY will never be needed and thus the primitive method for enabling. 190 * 191#define CHECK_POINTS 192 */ 193 194#if defined(CHECK_POINTS) && !defined(PC98) 195#define CHECK_READ(A) (outb(CMOS_REG, (A)), inb(CMOS_DATA)) 196#define CHECK_WRITE(A,D) (outb(CMOS_REG, (A)), outb(CMOS_DATA, (D))) 197 198#define CHECK_INIT(D); \ 199 CHECK_WRITE(0x34, (D)); \ 200 CHECK_WRITE(0x35, (D)); \ 201 CHECK_WRITE(0x36, (D)); \ 202 CHECK_WRITE(0x37, (D)); \ 203 CHECK_WRITE(0x38, (D)); \ 204 CHECK_WRITE(0x39, (D)); 205 206#define CHECK_PRINT(S); \ 207 printf("%s: %d, %d, %d, %d, %d, %d\n", \ 208 (S), \ 209 CHECK_READ(0x34), \ 210 CHECK_READ(0x35), \ 211 CHECK_READ(0x36), \ 212 CHECK_READ(0x37), \ 213 CHECK_READ(0x38), \ 214 CHECK_READ(0x39)); 215 216#else /* CHECK_POINTS */ 217 218#define CHECK_INIT(D) 219#define CHECK_PRINT(S) 220 221#endif /* CHECK_POINTS */ 222 223/* 224 * Values to send to the POST hardware. 225 */ 226#define MP_BOOTADDRESS_POST 0x10 227#define MP_PROBE_POST 0x11 228#define MPTABLE_PASS1_POST 0x12 229 230#define MP_START_POST 0x13 231#define MP_ENABLE_POST 0x14 232#define MPTABLE_PASS2_POST 0x15 233 234#define START_ALL_APS_POST 0x16 235#define INSTALL_AP_TRAMP_POST 0x17 236#define START_AP_POST 0x18 237 238#define MP_ANNOUNCE_POST 0x19 239 240/* used to hold the AP's until we are ready to release them */ 241struct simplelock ap_boot_lock; 242 243/** XXX FIXME: where does this really belong, isa.h/isa.c perhaps? */ 244int current_postcode; 245 246/** XXX FIXME: what system files declare these??? */ 247extern struct region_descriptor r_gdt, r_idt; 248 249int bsp_apic_ready = 0; /* flags useability of BSP apic */ 250int mp_ncpus; /* # of CPUs, including BSP */ 251int mp_naps; /* # of Applications processors */ 252int mp_nbusses; /* # of busses */ 253int mp_napics; /* # of IO APICs */ 254int boot_cpu_id; /* designated BSP */ 255vm_offset_t cpu_apic_address; 256vm_offset_t io_apic_address[NAPICID]; /* NAPICID is more than enough */ 257extern int nkpt; 258 259u_int32_t cpu_apic_versions[MAXCPU]; 260u_int32_t *io_apic_versions; 261 262#ifdef APIC_INTR_REORDER 263struct { 264 volatile int *location; 265 int bit; 266} apic_isrbit_location[32]; 267#endif 268 269struct apic_intmapinfo int_to_apicintpin[APIC_INTMAPSIZE]; 270 271/* 272 * APIC ID logical/physical mapping structures. 273 * We oversize these to simplify boot-time config. 274 */ 275int cpu_num_to_apic_id[NAPICID]; 276int io_num_to_apic_id[NAPICID]; 277int apic_id_to_logical[NAPICID]; 278 279 280/* Bitmap of all available CPUs */ 281u_int all_cpus; 282 283/* AP uses this during bootstrap. Do not staticize. */ 284char *bootSTK; 285static int bootAP; 286 287/* Hotwire a 0->4MB V==P mapping */ 288extern pt_entry_t *KPTphys; 289 290/* SMP page table page */ 291extern pt_entry_t *SMPpt; 292 293struct pcb stoppcbs[MAXCPU]; 294 295int smp_started; /* has the system started? */ 296 297/* 298 * Local data and functions. 299 */ 300 301static int mp_capable; 302static u_int boot_address; 303static u_int base_memory; 304 305static int picmode; /* 0: virtual wire mode, 1: PIC mode */ 306static mpfps_t mpfps; 307static int search_for_sig(u_int32_t target, int count); 308static void mp_enable(u_int boot_addr); 309 310static void mptable_pass1(void); 311static int mptable_pass2(void); 312static void default_mp_table(int type); 313static void fix_mp_table(void); 314static void setup_apic_irq_mapping(void); 315static void init_locks(void); 316static int start_all_aps(u_int boot_addr); 317static void install_ap_tramp(u_int boot_addr); 318static int start_ap(int logicalCpu, u_int boot_addr); 319static int apic_int_is_bus_type(int intr, int bus_type); 320static void release_aps(void *dummy); 321 322/* 323 * Calculate usable address in base memory for AP trampoline code. 324 */ 325u_int 326mp_bootaddress(u_int basemem) 327{ 328 POSTCODE(MP_BOOTADDRESS_POST); 329 330 base_memory = basemem * 1024; /* convert to bytes */ 331 332 boot_address = base_memory & ~0xfff; /* round down to 4k boundary */ 333 if ((base_memory - boot_address) < bootMP_size) 334 boot_address -= 4096; /* not enough, lower by 4k */ 335 336 return boot_address; 337} 338 339 340/* 341 * Look for an Intel MP spec table (ie, SMP capable hardware). 342 */ 343int 344mp_probe(void) 345{ 346 int x; 347 u_long segment; 348 u_int32_t target; 349 350 POSTCODE(MP_PROBE_POST); 351 352 /* see if EBDA exists */ 353 if ((segment = (u_long) * (u_short *) (KERNBASE + 0x40e)) != 0) { 354 /* search first 1K of EBDA */ 355 target = (u_int32_t) (segment << 4); 356 if ((x = search_for_sig(target, 1024 / 4)) >= 0) 357 goto found; 358 } else { 359 /* last 1K of base memory, effective 'top of base' passed in */ 360 target = (u_int32_t) (base_memory - 0x400); 361 if ((x = search_for_sig(target, 1024 / 4)) >= 0) 362 goto found; 363 } 364 365 /* search the BIOS */ 366 target = (u_int32_t) BIOS_BASE; 367 if ((x = search_for_sig(target, BIOS_COUNT)) >= 0) 368 goto found; 369 370 /* nothing found */ 371 mpfps = (mpfps_t)0; 372 mp_capable = 0; 373 return 0; 374 375found: 376 /* calculate needed resources */ 377 mpfps = (mpfps_t)x; 378 mptable_pass1(); 379 380 /* flag fact that we are running multiple processors */ 381 mp_capable = 1; 382 return 1; 383} 384 385 386/* 387 * Initialize the SMP hardware and the APIC and start up the AP's. 388 */ 389void 390mp_start(void) 391{ 392 POSTCODE(MP_START_POST); 393 394 /* look for MP capable motherboard */ 395 if (mp_capable) 396 mp_enable(boot_address); 397 else 398 panic("MP hardware not found!"); 399} 400 401 402/* 403 * Print various information about the SMP system hardware and setup. 404 */ 405void 406mp_announce(void) 407{ 408 int x; 409 410 POSTCODE(MP_ANNOUNCE_POST); 411 412 printf("FreeBSD/SMP: Multiprocessor motherboard\n"); 413 printf(" cpu0 (BSP): apic id: %2d", CPU_TO_ID(0)); 414 printf(", version: 0x%08x", cpu_apic_versions[0]); 415 printf(", at 0x%08x\n", cpu_apic_address); 416 for (x = 1; x <= mp_naps; ++x) { 417 printf(" cpu%d (AP): apic id: %2d", x, CPU_TO_ID(x)); 418 printf(", version: 0x%08x", cpu_apic_versions[x]); 419 printf(", at 0x%08x\n", cpu_apic_address); 420 } 421 422#if defined(APIC_IO) 423 for (x = 0; x < mp_napics; ++x) { 424 printf(" io%d (APIC): apic id: %2d", x, IO_TO_ID(x)); 425 printf(", version: 0x%08x", io_apic_versions[x]); 426 printf(", at 0x%08x\n", io_apic_address[x]); 427 } 428#else 429 printf(" Warning: APIC I/O disabled\n"); 430#endif /* APIC_IO */ 431} 432 433/* 434 * AP cpu's call this to sync up protected mode. 435 */ 436void 437init_secondary(void) 438{ 439 int gsel_tss; 440 int x, myid = bootAP; 441 442 gdt_segs[GPRIV_SEL].ssd_base = (int) &SMP_prvspace[myid]; 443 gdt_segs[GPROC0_SEL].ssd_base = 444 (int) &SMP_prvspace[myid].globaldata.gd_common_tss; 445 SMP_prvspace[myid].globaldata.gd_prvspace = 446 &SMP_prvspace[myid].globaldata; 447 448 for (x = 0; x < NGDT; x++) { 449 ssdtosd(&gdt_segs[x], &gdt[myid * NGDT + x].sd); 450 } 451 452 r_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1; 453 r_gdt.rd_base = (int) &gdt[myid * NGDT]; 454 lgdt(&r_gdt); /* does magic intra-segment return */ 455 456 lidt(&r_idt); 457 458 lldt(_default_ldt); 459#ifdef USER_LDT 460 PCPU_SET(currentldt, _default_ldt); 461#endif 462 463 gsel_tss = GSEL(GPROC0_SEL, SEL_KPL); 464 gdt[myid * NGDT + GPROC0_SEL].sd.sd_type = SDT_SYS386TSS; 465 PCPU_SET(common_tss.tss_esp0, 0); /* not used until after switch */ 466 PCPU_SET(common_tss.tss_ss0, GSEL(GDATA_SEL, SEL_KPL)); 467 PCPU_SET(common_tss.tss_ioopt, (sizeof (struct i386tss)) << 16); 468 PCPU_SET(tss_gdt, &gdt[myid * NGDT + GPROC0_SEL].sd); 469 PCPU_SET(common_tssd, *PCPU_GET(tss_gdt)); 470 ltr(gsel_tss); 471 472 pmap_set_opt(); 473} 474 475 476#if defined(APIC_IO) 477/* 478 * Final configuration of the BSP's local APIC: 479 * - disable 'pic mode'. 480 * - disable 'virtual wire mode'. 481 * - enable NMI. 482 */ 483void 484bsp_apic_configure(void) 485{ 486 u_char byte; 487 u_int32_t temp; 488 489 /* leave 'pic mode' if necessary */ 490 if (picmode) { 491 outb(0x22, 0x70); /* select IMCR */ 492 byte = inb(0x23); /* current contents */ 493 byte |= 0x01; /* mask external INTR */ 494 outb(0x23, byte); /* disconnect 8259s/NMI */ 495 } 496 497 /* mask lint0 (the 8259 'virtual wire' connection) */ 498 temp = lapic.lvt_lint0; 499 temp |= APIC_LVT_M; /* set the mask */ 500 lapic.lvt_lint0 = temp; 501 502 /* setup lint1 to handle NMI */ 503 temp = lapic.lvt_lint1; 504 temp &= ~APIC_LVT_M; /* clear the mask */ 505 lapic.lvt_lint1 = temp; 506 507 if (bootverbose) 508 apic_dump("bsp_apic_configure()"); 509} 510#endif /* APIC_IO */ 511 512 513/******************************************************************* 514 * local functions and data 515 */ 516 517/* 518 * start the SMP system 519 */ 520static void 521mp_enable(u_int boot_addr) 522{ 523 int x; 524#if defined(APIC_IO) 525 int apic; 526 u_int ux; 527#endif /* APIC_IO */ 528 529 POSTCODE(MP_ENABLE_POST); 530 531 /* turn on 4MB of V == P addressing so we can get to MP table */ 532 *(int *)PTD = PG_V | PG_RW | ((uintptr_t)(void *)KPTphys & PG_FRAME); 533 invltlb(); 534 535 /* examine the MP table for needed info, uses physical addresses */ 536 x = mptable_pass2(); 537 538 *(int *)PTD = 0; 539 invltlb(); 540 541 /* can't process default configs till the CPU APIC is pmapped */ 542 if (x) 543 default_mp_table(x); 544 545 /* post scan cleanup */ 546 fix_mp_table(); 547 setup_apic_irq_mapping(); 548 549#if defined(APIC_IO) 550 551 /* fill the LOGICAL io_apic_versions table */ 552 for (apic = 0; apic < mp_napics; ++apic) { 553 ux = io_apic_read(apic, IOAPIC_VER); 554 io_apic_versions[apic] = ux; 555 io_apic_set_id(apic, IO_TO_ID(apic)); 556 } 557 558 /* program each IO APIC in the system */ 559 for (apic = 0; apic < mp_napics; ++apic) 560 if (io_apic_setup(apic) < 0) 561 panic("IO APIC setup failure"); 562 563 /* install a 'Spurious INTerrupt' vector */ 564 setidt(XSPURIOUSINT_OFFSET, Xspuriousint, 565 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 566 567 /* install an inter-CPU IPI for TLB invalidation */ 568 setidt(XINVLTLB_OFFSET, Xinvltlb, 569 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 570 571#ifdef BETTER_CLOCK 572 /* install an inter-CPU IPI for reading processor state */ 573 setidt(XCPUCHECKSTATE_OFFSET, Xcpucheckstate, 574 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 575#endif 576 577 /* install an inter-CPU IPI for all-CPU rendezvous */ 578 setidt(XRENDEZVOUS_OFFSET, Xrendezvous, 579 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 580 581 /* install an inter-CPU IPI for forcing an additional software trap */ 582 setidt(XCPUAST_OFFSET, Xcpuast, 583 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 584 585 /* install an inter-CPU IPI for interrupt forwarding */ 586 setidt(XFORWARD_IRQ_OFFSET, Xforward_irq, 587 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 588 589 /* install an inter-CPU IPI for CPU stop/restart */ 590 setidt(XCPUSTOP_OFFSET, Xcpustop, 591 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 592 593#if defined(TEST_TEST1) 594 /* install a "fake hardware INTerrupt" vector */ 595 setidt(XTEST1_OFFSET, Xtest1, 596 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 597#endif /** TEST_TEST1 */ 598 599#endif /* APIC_IO */ 600 601 /* initialize all SMP locks */ 602 init_locks(); 603 604 /* obtain the ap_boot_lock */ 605 s_lock(&ap_boot_lock); 606 607 /* start each Application Processor */ 608 start_all_aps(boot_addr); 609} 610 611 612/* 613 * look for the MP spec signature 614 */ 615 616/* string defined by the Intel MP Spec as identifying the MP table */ 617#define MP_SIG 0x5f504d5f /* _MP_ */ 618#define NEXT(X) ((X) += 4) 619static int 620search_for_sig(u_int32_t target, int count) 621{ 622 int x; 623 u_int32_t *addr = (u_int32_t *) (KERNBASE + target); 624 625 for (x = 0; x < count; NEXT(x)) 626 if (addr[x] == MP_SIG) 627 /* make array index a byte index */ 628 return (target + (x * sizeof(u_int32_t))); 629 630 return -1; 631} 632 633 634static basetable_entry basetable_entry_types[] = 635{ 636 {0, 20, "Processor"}, 637 {1, 8, "Bus"}, 638 {2, 8, "I/O APIC"}, 639 {3, 8, "I/O INT"}, 640 {4, 8, "Local INT"} 641}; 642 643typedef struct BUSDATA { 644 u_char bus_id; 645 enum busTypes bus_type; 646} bus_datum; 647 648typedef struct INTDATA { 649 u_char int_type; 650 u_short int_flags; 651 u_char src_bus_id; 652 u_char src_bus_irq; 653 u_char dst_apic_id; 654 u_char dst_apic_int; 655 u_char int_vector; 656} io_int, local_int; 657 658typedef struct BUSTYPENAME { 659 u_char type; 660 char name[7]; 661} bus_type_name; 662 663static bus_type_name bus_type_table[] = 664{ 665 {CBUS, "CBUS"}, 666 {CBUSII, "CBUSII"}, 667 {EISA, "EISA"}, 668 {MCA, "MCA"}, 669 {UNKNOWN_BUSTYPE, "---"}, 670 {ISA, "ISA"}, 671 {MCA, "MCA"}, 672 {UNKNOWN_BUSTYPE, "---"}, 673 {UNKNOWN_BUSTYPE, "---"}, 674 {UNKNOWN_BUSTYPE, "---"}, 675 {UNKNOWN_BUSTYPE, "---"}, 676 {UNKNOWN_BUSTYPE, "---"}, 677 {PCI, "PCI"}, 678 {UNKNOWN_BUSTYPE, "---"}, 679 {UNKNOWN_BUSTYPE, "---"}, 680 {UNKNOWN_BUSTYPE, "---"}, 681 {UNKNOWN_BUSTYPE, "---"}, 682 {XPRESS, "XPRESS"}, 683 {UNKNOWN_BUSTYPE, "---"} 684}; 685/* from MP spec v1.4, table 5-1 */ 686static int default_data[7][5] = 687{ 688/* nbus, id0, type0, id1, type1 */ 689 {1, 0, ISA, 255, 255}, 690 {1, 0, EISA, 255, 255}, 691 {1, 0, EISA, 255, 255}, 692 {1, 0, MCA, 255, 255}, 693 {2, 0, ISA, 1, PCI}, 694 {2, 0, EISA, 1, PCI}, 695 {2, 0, MCA, 1, PCI} 696}; 697 698 699/* the bus data */ 700static bus_datum *bus_data; 701 702/* the IO INT data, one entry per possible APIC INTerrupt */ 703static io_int *io_apic_ints; 704 705static int nintrs; 706 707static int processor_entry __P((proc_entry_ptr entry, int cpu)); 708static int bus_entry __P((bus_entry_ptr entry, int bus)); 709static int io_apic_entry __P((io_apic_entry_ptr entry, int apic)); 710static int int_entry __P((int_entry_ptr entry, int intr)); 711static int lookup_bus_type __P((char *name)); 712 713 714/* 715 * 1st pass on motherboard's Intel MP specification table. 716 * 717 * initializes: 718 * mp_ncpus = 1 719 * 720 * determines: 721 * cpu_apic_address (common to all CPUs) 722 * io_apic_address[N] 723 * mp_naps 724 * mp_nbusses 725 * mp_napics 726 * nintrs 727 */ 728static void 729mptable_pass1(void) 730{ 731 int x; 732 mpcth_t cth; 733 int totalSize; 734 void* position; 735 int count; 736 int type; 737 738 POSTCODE(MPTABLE_PASS1_POST); 739 740 /* clear various tables */ 741 for (x = 0; x < NAPICID; ++x) { 742 io_apic_address[x] = ~0; /* IO APIC address table */ 743 } 744 745 /* init everything to empty */ 746 mp_naps = 0; 747 mp_nbusses = 0; 748 mp_napics = 0; 749 nintrs = 0; 750 751 /* check for use of 'default' configuration */ 752 if (MPFPS_MPFB1 != 0) { 753 /* use default addresses */ 754 cpu_apic_address = DEFAULT_APIC_BASE; 755 io_apic_address[0] = DEFAULT_IO_APIC_BASE; 756 757 /* fill in with defaults */ 758 mp_naps = 2; /* includes BSP */ 759 mp_nbusses = default_data[MPFPS_MPFB1 - 1][0]; 760#if defined(APIC_IO) 761 mp_napics = 1; 762 nintrs = 16; 763#endif /* APIC_IO */ 764 } 765 else { 766 if ((cth = mpfps->pap) == 0) 767 panic("MP Configuration Table Header MISSING!"); 768 769 cpu_apic_address = (vm_offset_t) cth->apic_address; 770 771 /* walk the table, recording info of interest */ 772 totalSize = cth->base_table_length - sizeof(struct MPCTH); 773 position = (u_char *) cth + sizeof(struct MPCTH); 774 count = cth->entry_count; 775 776 while (count--) { 777 switch (type = *(u_char *) position) { 778 case 0: /* processor_entry */ 779 if (((proc_entry_ptr)position)->cpu_flags 780 & PROCENTRY_FLAG_EN) 781 ++mp_naps; 782 break; 783 case 1: /* bus_entry */ 784 ++mp_nbusses; 785 break; 786 case 2: /* io_apic_entry */ 787 if (((io_apic_entry_ptr)position)->apic_flags 788 & IOAPICENTRY_FLAG_EN) 789 io_apic_address[mp_napics++] = 790 (vm_offset_t)((io_apic_entry_ptr) 791 position)->apic_address; 792 break; 793 case 3: /* int_entry */ 794 ++nintrs; 795 break; 796 case 4: /* int_entry */ 797 break; 798 default: 799 panic("mpfps Base Table HOSED!"); 800 /* NOTREACHED */ 801 } 802 803 totalSize -= basetable_entry_types[type].length; 804 (u_char*)position += basetable_entry_types[type].length; 805 } 806 } 807 808 /* qualify the numbers */ 809 if (mp_naps > MAXCPU) { 810 printf("Warning: only using %d of %d available CPUs!\n", 811 MAXCPU, mp_naps); 812 mp_naps = MAXCPU; 813 } 814 815 /* 816 * Count the BSP. 817 * This is also used as a counter while starting the APs. 818 */ 819 mp_ncpus = 1; 820 821 --mp_naps; /* subtract the BSP */ 822} 823 824 825/* 826 * 2nd pass on motherboard's Intel MP specification table. 827 * 828 * sets: 829 * boot_cpu_id 830 * ID_TO_IO(N), phy APIC ID to log CPU/IO table 831 * CPU_TO_ID(N), logical CPU to APIC ID table 832 * IO_TO_ID(N), logical IO to APIC ID table 833 * bus_data[N] 834 * io_apic_ints[N] 835 */ 836static int 837mptable_pass2(void) 838{ 839 int x; 840 mpcth_t cth; 841 int totalSize; 842 void* position; 843 int count; 844 int type; 845 int apic, bus, cpu, intr; 846 int i, j; 847 int pgeflag; 848 849 POSTCODE(MPTABLE_PASS2_POST); 850 851 pgeflag = 0; /* XXX - Not used under SMP yet. */ 852 853 MALLOC(io_apic_versions, u_int32_t *, sizeof(u_int32_t) * mp_napics, 854 M_DEVBUF, M_WAITOK); 855 MALLOC(ioapic, volatile ioapic_t **, sizeof(ioapic_t *) * mp_napics, 856 M_DEVBUF, M_WAITOK); 857 MALLOC(io_apic_ints, io_int *, sizeof(io_int) * (nintrs + 1), 858 M_DEVBUF, M_WAITOK); 859 MALLOC(bus_data, bus_datum *, sizeof(bus_datum) * mp_nbusses, 860 M_DEVBUF, M_WAITOK); 861 862 bzero(ioapic, sizeof(ioapic_t *) * mp_napics); 863 864 for (i = 0; i < mp_napics; i++) { 865 for (j = 0; j < mp_napics; j++) { 866 /* same page frame as a previous IO apic? */ 867 if (((vm_offset_t)SMPpt[NPTEPG-2-j] & PG_FRAME) == 868 (io_apic_address[i] & PG_FRAME)) { 869 ioapic[i] = (ioapic_t *)((u_int)SMP_prvspace 870 + (NPTEPG-2-j) * PAGE_SIZE 871 + (io_apic_address[i] & PAGE_MASK)); 872 break; 873 } 874 /* use this slot if available */ 875 if (((vm_offset_t)SMPpt[NPTEPG-2-j] & PG_FRAME) == 0) { 876 SMPpt[NPTEPG-2-j] = (pt_entry_t)(PG_V | PG_RW | 877 pgeflag | (io_apic_address[i] & PG_FRAME)); 878 ioapic[i] = (ioapic_t *)((u_int)SMP_prvspace 879 + (NPTEPG-2-j) * PAGE_SIZE 880 + (io_apic_address[i] & PAGE_MASK)); 881 break; 882 } 883 } 884 } 885 886 /* clear various tables */ 887 for (x = 0; x < NAPICID; ++x) { 888 ID_TO_IO(x) = -1; /* phy APIC ID to log CPU/IO table */ 889 CPU_TO_ID(x) = -1; /* logical CPU to APIC ID table */ 890 IO_TO_ID(x) = -1; /* logical IO to APIC ID table */ 891 } 892 893 /* clear bus data table */ 894 for (x = 0; x < mp_nbusses; ++x) 895 bus_data[x].bus_id = 0xff; 896 897 /* clear IO APIC INT table */ 898 for (x = 0; x < (nintrs + 1); ++x) { 899 io_apic_ints[x].int_type = 0xff; 900 io_apic_ints[x].int_vector = 0xff; 901 } 902 903 /* setup the cpu/apic mapping arrays */ 904 boot_cpu_id = -1; 905 906 /* record whether PIC or virtual-wire mode */ 907 picmode = (mpfps->mpfb2 & 0x80) ? 1 : 0; 908 909 /* check for use of 'default' configuration */ 910 if (MPFPS_MPFB1 != 0) 911 return MPFPS_MPFB1; /* return default configuration type */ 912 913 if ((cth = mpfps->pap) == 0) 914 panic("MP Configuration Table Header MISSING!"); 915 916 /* walk the table, recording info of interest */ 917 totalSize = cth->base_table_length - sizeof(struct MPCTH); 918 position = (u_char *) cth + sizeof(struct MPCTH); 919 count = cth->entry_count; 920 apic = bus = intr = 0; 921 cpu = 1; /* pre-count the BSP */ 922 923 while (count--) { 924 switch (type = *(u_char *) position) { 925 case 0: 926 if (processor_entry(position, cpu)) 927 ++cpu; 928 break; 929 case 1: 930 if (bus_entry(position, bus)) 931 ++bus; 932 break; 933 case 2: 934 if (io_apic_entry(position, apic)) 935 ++apic; 936 break; 937 case 3: 938 if (int_entry(position, intr)) 939 ++intr; 940 break; 941 case 4: 942 /* int_entry(position); */ 943 break; 944 default: 945 panic("mpfps Base Table HOSED!"); 946 /* NOTREACHED */ 947 } 948 949 totalSize -= basetable_entry_types[type].length; 950 (u_char *) position += basetable_entry_types[type].length; 951 } 952 953 if (boot_cpu_id == -1) 954 panic("NO BSP found!"); 955 956 /* report fact that its NOT a default configuration */ 957 return 0; 958} 959 960 961void 962assign_apic_irq(int apic, int intpin, int irq) 963{ 964 int x; 965 966 if (int_to_apicintpin[irq].ioapic != -1) 967 panic("assign_apic_irq: inconsistent table"); 968 969 int_to_apicintpin[irq].ioapic = apic; 970 int_to_apicintpin[irq].int_pin = intpin; 971 int_to_apicintpin[irq].apic_address = ioapic[apic]; 972 int_to_apicintpin[irq].redirindex = IOAPIC_REDTBL + 2 * intpin; 973 974 for (x = 0; x < nintrs; x++) { 975 if ((io_apic_ints[x].int_type == 0 || 976 io_apic_ints[x].int_type == 3) && 977 io_apic_ints[x].int_vector == 0xff && 978 io_apic_ints[x].dst_apic_id == IO_TO_ID(apic) && 979 io_apic_ints[x].dst_apic_int == intpin) 980 io_apic_ints[x].int_vector = irq; 981 } 982} 983 984void 985revoke_apic_irq(int irq) 986{ 987 int x; 988 int oldapic; 989 int oldintpin; 990 991 if (int_to_apicintpin[irq].ioapic == -1) 992 panic("assign_apic_irq: inconsistent table"); 993 994 oldapic = int_to_apicintpin[irq].ioapic; 995 oldintpin = int_to_apicintpin[irq].int_pin; 996 997 int_to_apicintpin[irq].ioapic = -1; 998 int_to_apicintpin[irq].int_pin = 0; 999 int_to_apicintpin[irq].apic_address = NULL; 1000 int_to_apicintpin[irq].redirindex = 0; 1001 1002 for (x = 0; x < nintrs; x++) { 1003 if ((io_apic_ints[x].int_type == 0 || 1004 io_apic_ints[x].int_type == 3) && 1005 io_apic_ints[x].int_vector == 0xff && 1006 io_apic_ints[x].dst_apic_id == IO_TO_ID(oldapic) && 1007 io_apic_ints[x].dst_apic_int == oldintpin) 1008 io_apic_ints[x].int_vector = 0xff; 1009 } 1010} 1011 1012 1013 1014static void 1015swap_apic_id(int apic, int oldid, int newid) 1016{ 1017 int x; 1018 int oapic; 1019 1020 1021 if (oldid == newid) 1022 return; /* Nothing to do */ 1023 1024 printf("Changing APIC ID for IO APIC #%d from %d to %d in MP table\n", 1025 apic, oldid, newid); 1026 1027 /* Swap physical APIC IDs in interrupt entries */ 1028 for (x = 0; x < nintrs; x++) { 1029 if (io_apic_ints[x].dst_apic_id == oldid) 1030 io_apic_ints[x].dst_apic_id = newid; 1031 else if (io_apic_ints[x].dst_apic_id == newid) 1032 io_apic_ints[x].dst_apic_id = oldid; 1033 } 1034 1035 /* Swap physical APIC IDs in IO_TO_ID mappings */ 1036 for (oapic = 0; oapic < mp_napics; oapic++) 1037 if (IO_TO_ID(oapic) == newid) 1038 break; 1039 1040 if (oapic < mp_napics) { 1041 printf("Changing APIC ID for IO APIC #%d from " 1042 "%d to %d in MP table\n", 1043 oapic, newid, oldid); 1044 IO_TO_ID(oapic) = oldid; 1045 } 1046 IO_TO_ID(apic) = newid; 1047} 1048 1049 1050static void 1051fix_id_to_io_mapping(void) 1052{ 1053 int x; 1054 1055 for (x = 0; x < NAPICID; x++) 1056 ID_TO_IO(x) = -1; 1057 1058 for (x = 0; x <= mp_naps; x++) 1059 if (CPU_TO_ID(x) < NAPICID) 1060 ID_TO_IO(CPU_TO_ID(x)) = x; 1061 1062 for (x = 0; x < mp_napics; x++) 1063 if (IO_TO_ID(x) < NAPICID) 1064 ID_TO_IO(IO_TO_ID(x)) = x; 1065} 1066 1067 1068static int 1069first_free_apic_id(void) 1070{ 1071 int freeid, x; 1072 1073 for (freeid = 0; freeid < NAPICID; freeid++) { 1074 for (x = 0; x <= mp_naps; x++) 1075 if (CPU_TO_ID(x) == freeid) 1076 break; 1077 if (x <= mp_naps) 1078 continue; 1079 for (x = 0; x < mp_napics; x++) 1080 if (IO_TO_ID(x) == freeid) 1081 break; 1082 if (x < mp_napics) 1083 continue; 1084 return freeid; 1085 } 1086 return freeid; 1087} 1088 1089 1090static int 1091io_apic_id_acceptable(int apic, int id) 1092{ 1093 int cpu; /* Logical CPU number */ 1094 int oapic; /* Logical IO APIC number for other IO APIC */ 1095 1096 if (id >= NAPICID) 1097 return 0; /* Out of range */ 1098 1099 for (cpu = 0; cpu <= mp_naps; cpu++) 1100 if (CPU_TO_ID(cpu) == id) 1101 return 0; /* Conflict with CPU */ 1102 1103 for (oapic = 0; oapic < mp_napics && oapic < apic; oapic++) 1104 if (IO_TO_ID(oapic) == id) 1105 return 0; /* Conflict with other APIC */ 1106 1107 return 1; /* ID is acceptable for IO APIC */ 1108} 1109 1110 1111/* 1112 * parse an Intel MP specification table 1113 */ 1114static void 1115fix_mp_table(void) 1116{ 1117 int x; 1118 int id; 1119 int bus_0 = 0; /* Stop GCC warning */ 1120 int bus_pci = 0; /* Stop GCC warning */ 1121 int num_pci_bus; 1122 int apic; /* IO APIC unit number */ 1123 int freeid; /* Free physical APIC ID */ 1124 int physid; /* Current physical IO APIC ID */ 1125 1126 /* 1127 * Fix mis-numbering of the PCI bus and its INT entries if the BIOS 1128 * did it wrong. The MP spec says that when more than 1 PCI bus 1129 * exists the BIOS must begin with bus entries for the PCI bus and use 1130 * actual PCI bus numbering. This implies that when only 1 PCI bus 1131 * exists the BIOS can choose to ignore this ordering, and indeed many 1132 * MP motherboards do ignore it. This causes a problem when the PCI 1133 * sub-system makes requests of the MP sub-system based on PCI bus 1134 * numbers. So here we look for the situation and renumber the 1135 * busses and associated INTs in an effort to "make it right". 1136 */ 1137 1138 /* find bus 0, PCI bus, count the number of PCI busses */ 1139 for (num_pci_bus = 0, x = 0; x < mp_nbusses; ++x) { 1140 if (bus_data[x].bus_id == 0) { 1141 bus_0 = x; 1142 } 1143 if (bus_data[x].bus_type == PCI) { 1144 ++num_pci_bus; 1145 bus_pci = x; 1146 } 1147 } 1148 /* 1149 * bus_0 == slot of bus with ID of 0 1150 * bus_pci == slot of last PCI bus encountered 1151 */ 1152 1153 /* check the 1 PCI bus case for sanity */ 1154 /* if it is number 0 all is well */ 1155 if (num_pci_bus == 1 && 1156 bus_data[bus_pci].bus_id != 0) { 1157 1158 /* mis-numbered, swap with whichever bus uses slot 0 */ 1159 1160 /* swap the bus entry types */ 1161 bus_data[bus_pci].bus_type = bus_data[bus_0].bus_type; 1162 bus_data[bus_0].bus_type = PCI; 1163 1164 /* swap each relavant INTerrupt entry */ 1165 id = bus_data[bus_pci].bus_id; 1166 for (x = 0; x < nintrs; ++x) { 1167 if (io_apic_ints[x].src_bus_id == id) { 1168 io_apic_ints[x].src_bus_id = 0; 1169 } 1170 else if (io_apic_ints[x].src_bus_id == 0) { 1171 io_apic_ints[x].src_bus_id = id; 1172 } 1173 } 1174 } 1175 1176 /* Assign IO APIC IDs. 1177 * 1178 * First try the existing ID. If a conflict is detected, try 1179 * the ID in the MP table. If a conflict is still detected, find 1180 * a free id. 1181 * 1182 * We cannot use the ID_TO_IO table before all conflicts has been 1183 * resolved and the table has been corrected. 1184 */ 1185 for (apic = 0; apic < mp_napics; ++apic) { /* For all IO APICs */ 1186 1187 /* First try to use the value set by the BIOS */ 1188 physid = io_apic_get_id(apic); 1189 if (io_apic_id_acceptable(apic, physid)) { 1190 if (IO_TO_ID(apic) != physid) 1191 swap_apic_id(apic, IO_TO_ID(apic), physid); 1192 continue; 1193 } 1194 1195 /* Then check if the value in the MP table is acceptable */ 1196 if (io_apic_id_acceptable(apic, IO_TO_ID(apic))) 1197 continue; 1198 1199 /* Last resort, find a free APIC ID and use it */ 1200 freeid = first_free_apic_id(); 1201 if (freeid >= NAPICID) 1202 panic("No free physical APIC IDs found"); 1203 1204 if (io_apic_id_acceptable(apic, freeid)) { 1205 swap_apic_id(apic, IO_TO_ID(apic), freeid); 1206 continue; 1207 } 1208 panic("Free physical APIC ID not usable"); 1209 } 1210 fix_id_to_io_mapping(); 1211 1212 /* detect and fix broken Compaq MP table */ 1213 if (apic_int_type(0, 0) == -1) { 1214 printf("APIC_IO: MP table broken: 8259->APIC entry missing!\n"); 1215 io_apic_ints[nintrs].int_type = 3; /* ExtInt */ 1216 io_apic_ints[nintrs].int_vector = 0xff; /* Unassigned */ 1217 /* XXX fixme, set src bus id etc, but it doesn't seem to hurt */ 1218 io_apic_ints[nintrs].dst_apic_id = IO_TO_ID(0); 1219 io_apic_ints[nintrs].dst_apic_int = 0; /* Pin 0 */ 1220 nintrs++; 1221 } 1222} 1223 1224 1225/* Assign low level interrupt handlers */ 1226static void 1227setup_apic_irq_mapping(void) 1228{ 1229 int x; 1230 int int_vector; 1231 1232 /* Clear array */ 1233 for (x = 0; x < APIC_INTMAPSIZE; x++) { 1234 int_to_apicintpin[x].ioapic = -1; 1235 int_to_apicintpin[x].int_pin = 0; 1236 int_to_apicintpin[x].apic_address = NULL; 1237 int_to_apicintpin[x].redirindex = 0; 1238 } 1239 1240 /* First assign ISA/EISA interrupts */ 1241 for (x = 0; x < nintrs; x++) { 1242 int_vector = io_apic_ints[x].src_bus_irq; 1243 if (int_vector < APIC_INTMAPSIZE && 1244 io_apic_ints[x].int_vector == 0xff && 1245 int_to_apicintpin[int_vector].ioapic == -1 && 1246 (apic_int_is_bus_type(x, ISA) || 1247 apic_int_is_bus_type(x, EISA)) && 1248 io_apic_ints[x].int_type == 0) { 1249 assign_apic_irq(ID_TO_IO(io_apic_ints[x].dst_apic_id), 1250 io_apic_ints[x].dst_apic_int, 1251 int_vector); 1252 } 1253 } 1254 1255 /* Assign first set of interrupts to intpins on IOAPIC #0 */ 1256 for (x = 0; x < nintrs; x++) { 1257 int_vector = io_apic_ints[x].dst_apic_int; 1258 if (int_vector < APIC_INTMAPSIZE && 1259 io_apic_ints[x].dst_apic_id == IO_TO_ID(0) && 1260 io_apic_ints[x].int_vector == 0xff && 1261 int_to_apicintpin[int_vector].ioapic == -1 && 1262 (io_apic_ints[x].int_type == 0 || 1263 io_apic_ints[x].int_type == 3)) { 1264 assign_apic_irq(0, 1265 io_apic_ints[x].dst_apic_int, 1266 int_vector); 1267 } 1268 } 1269 /* 1270 * Assign interrupts for remaining intpins. 1271 * Skip IOAPIC #0 intpin 0 if the type is ExtInt, since this indicates 1272 * that an entry for ISA/EISA irq 0 exist, and a fallback to mixed mode 1273 * due to 8254 interrupts not being delivered can reuse that low level 1274 * interrupt handler. 1275 */ 1276 int_vector = 0; 1277 while (int_vector < APIC_INTMAPSIZE && 1278 int_to_apicintpin[int_vector].ioapic != -1) 1279 int_vector++; 1280 for (x = 0; x < nintrs && int_vector < APIC_INTMAPSIZE; x++) { 1281 if ((io_apic_ints[x].int_type == 0 || 1282 (io_apic_ints[x].int_type == 3 && 1283 (io_apic_ints[x].dst_apic_id != IO_TO_ID(0) || 1284 io_apic_ints[x].dst_apic_int != 0))) && 1285 io_apic_ints[x].int_vector == 0xff) { 1286 assign_apic_irq(ID_TO_IO(io_apic_ints[x].dst_apic_id), 1287 io_apic_ints[x].dst_apic_int, 1288 int_vector); 1289 int_vector++; 1290 while (int_vector < APIC_INTMAPSIZE && 1291 int_to_apicintpin[int_vector].ioapic != -1) 1292 int_vector++; 1293 } 1294 } 1295} 1296 1297 1298static int 1299processor_entry(proc_entry_ptr entry, int cpu) 1300{ 1301 /* check for usability */ 1302 if (!(entry->cpu_flags & PROCENTRY_FLAG_EN)) 1303 return 0; 1304 1305 if(entry->apic_id >= NAPICID) 1306 panic("CPU APIC ID out of range (0..%d)", NAPICID - 1); 1307 /* check for BSP flag */ 1308 if (entry->cpu_flags & PROCENTRY_FLAG_BP) { 1309 boot_cpu_id = entry->apic_id; 1310 CPU_TO_ID(0) = entry->apic_id; 1311 ID_TO_CPU(entry->apic_id) = 0; 1312 return 0; /* its already been counted */ 1313 } 1314 1315 /* add another AP to list, if less than max number of CPUs */ 1316 else if (cpu < MAXCPU) { 1317 CPU_TO_ID(cpu) = entry->apic_id; 1318 ID_TO_CPU(entry->apic_id) = cpu; 1319 return 1; 1320 } 1321 1322 return 0; 1323} 1324 1325 1326static int 1327bus_entry(bus_entry_ptr entry, int bus) 1328{ 1329 int x; 1330 char c, name[8]; 1331 1332 /* encode the name into an index */ 1333 for (x = 0; x < 6; ++x) { 1334 if ((c = entry->bus_type[x]) == ' ') 1335 break; 1336 name[x] = c; 1337 } 1338 name[x] = '\0'; 1339 1340 if ((x = lookup_bus_type(name)) == UNKNOWN_BUSTYPE) 1341 panic("unknown bus type: '%s'", name); 1342 1343 bus_data[bus].bus_id = entry->bus_id; 1344 bus_data[bus].bus_type = x; 1345 1346 return 1; 1347} 1348 1349 1350static int 1351io_apic_entry(io_apic_entry_ptr entry, int apic) 1352{ 1353 if (!(entry->apic_flags & IOAPICENTRY_FLAG_EN)) 1354 return 0; 1355 1356 IO_TO_ID(apic) = entry->apic_id; 1357 if (entry->apic_id < NAPICID) 1358 ID_TO_IO(entry->apic_id) = apic; 1359 1360 return 1; 1361} 1362 1363 1364static int 1365lookup_bus_type(char *name) 1366{ 1367 int x; 1368 1369 for (x = 0; x < MAX_BUSTYPE; ++x) 1370 if (strcmp(bus_type_table[x].name, name) == 0) 1371 return bus_type_table[x].type; 1372 1373 return UNKNOWN_BUSTYPE; 1374} 1375 1376 1377static int 1378int_entry(int_entry_ptr entry, int intr) 1379{ 1380 int apic; 1381 1382 io_apic_ints[intr].int_type = entry->int_type; 1383 io_apic_ints[intr].int_flags = entry->int_flags; 1384 io_apic_ints[intr].src_bus_id = entry->src_bus_id; 1385 io_apic_ints[intr].src_bus_irq = entry->src_bus_irq; 1386 if (entry->dst_apic_id == 255) { 1387 /* This signal goes to all IO APICS. Select an IO APIC 1388 with sufficient number of interrupt pins */ 1389 for (apic = 0; apic < mp_napics; apic++) 1390 if (((io_apic_read(apic, IOAPIC_VER) & 1391 IOART_VER_MAXREDIR) >> MAXREDIRSHIFT) >= 1392 entry->dst_apic_int) 1393 break; 1394 if (apic < mp_napics) 1395 io_apic_ints[intr].dst_apic_id = IO_TO_ID(apic); 1396 else 1397 io_apic_ints[intr].dst_apic_id = entry->dst_apic_id; 1398 } else 1399 io_apic_ints[intr].dst_apic_id = entry->dst_apic_id; 1400 io_apic_ints[intr].dst_apic_int = entry->dst_apic_int; 1401 1402 return 1; 1403} 1404 1405 1406static int 1407apic_int_is_bus_type(int intr, int bus_type) 1408{ 1409 int bus; 1410 1411 for (bus = 0; bus < mp_nbusses; ++bus) 1412 if ((bus_data[bus].bus_id == io_apic_ints[intr].src_bus_id) 1413 && ((int) bus_data[bus].bus_type == bus_type)) 1414 return 1; 1415 1416 return 0; 1417} 1418 1419 1420/* 1421 * Given a traditional ISA INT mask, return an APIC mask. 1422 */ 1423u_int 1424isa_apic_mask(u_int isa_mask) 1425{ 1426 int isa_irq; 1427 int apic_pin; 1428 1429#if defined(SKIP_IRQ15_REDIRECT) 1430 if (isa_mask == (1 << 15)) { 1431 printf("skipping ISA IRQ15 redirect\n"); 1432 return isa_mask; 1433 } 1434#endif /* SKIP_IRQ15_REDIRECT */ 1435 1436 isa_irq = ffs(isa_mask); /* find its bit position */ 1437 if (isa_irq == 0) /* doesn't exist */ 1438 return 0; 1439 --isa_irq; /* make it zero based */ 1440 1441 apic_pin = isa_apic_irq(isa_irq); /* look for APIC connection */ 1442 if (apic_pin == -1) 1443 return 0; 1444 1445 return (1 << apic_pin); /* convert pin# to a mask */ 1446} 1447 1448 1449/* 1450 * Determine which APIC pin an ISA/EISA INT is attached to. 1451 */ 1452#define INTTYPE(I) (io_apic_ints[(I)].int_type) 1453#define INTPIN(I) (io_apic_ints[(I)].dst_apic_int) 1454#define INTIRQ(I) (io_apic_ints[(I)].int_vector) 1455#define INTAPIC(I) (ID_TO_IO(io_apic_ints[(I)].dst_apic_id)) 1456 1457#define SRCBUSIRQ(I) (io_apic_ints[(I)].src_bus_irq) 1458int 1459isa_apic_irq(int isa_irq) 1460{ 1461 int intr; 1462 1463 for (intr = 0; intr < nintrs; ++intr) { /* check each record */ 1464 if (INTTYPE(intr) == 0) { /* standard INT */ 1465 if (SRCBUSIRQ(intr) == isa_irq) { 1466 if (apic_int_is_bus_type(intr, ISA) || 1467 apic_int_is_bus_type(intr, EISA)) 1468 return INTIRQ(intr); /* found */ 1469 } 1470 } 1471 } 1472 return -1; /* NOT found */ 1473} 1474 1475 1476/* 1477 * Determine which APIC pin a PCI INT is attached to. 1478 */ 1479#define SRCBUSID(I) (io_apic_ints[(I)].src_bus_id) 1480#define SRCBUSDEVICE(I) ((io_apic_ints[(I)].src_bus_irq >> 2) & 0x1f) 1481#define SRCBUSLINE(I) (io_apic_ints[(I)].src_bus_irq & 0x03) 1482int 1483pci_apic_irq(int pciBus, int pciDevice, int pciInt) 1484{ 1485 int intr; 1486 1487 --pciInt; /* zero based */ 1488 1489 for (intr = 0; intr < nintrs; ++intr) /* check each record */ 1490 if ((INTTYPE(intr) == 0) /* standard INT */ 1491 && (SRCBUSID(intr) == pciBus) 1492 && (SRCBUSDEVICE(intr) == pciDevice) 1493 && (SRCBUSLINE(intr) == pciInt)) /* a candidate IRQ */ 1494 if (apic_int_is_bus_type(intr, PCI)) 1495 return INTIRQ(intr); /* exact match */ 1496 1497 return -1; /* NOT found */ 1498} 1499 1500int 1501next_apic_irq(int irq) 1502{ 1503 int intr, ointr; 1504 int bus, bustype; 1505 1506 bus = 0; 1507 bustype = 0; 1508 for (intr = 0; intr < nintrs; intr++) { 1509 if (INTIRQ(intr) != irq || INTTYPE(intr) != 0) 1510 continue; 1511 bus = SRCBUSID(intr); 1512 bustype = apic_bus_type(bus); 1513 if (bustype != ISA && 1514 bustype != EISA && 1515 bustype != PCI) 1516 continue; 1517 break; 1518 } 1519 if (intr >= nintrs) { 1520 return -1; 1521 } 1522 for (ointr = intr + 1; ointr < nintrs; ointr++) { 1523 if (INTTYPE(ointr) != 0) 1524 continue; 1525 if (bus != SRCBUSID(ointr)) 1526 continue; 1527 if (bustype == PCI) { 1528 if (SRCBUSDEVICE(intr) != SRCBUSDEVICE(ointr)) 1529 continue; 1530 if (SRCBUSLINE(intr) != SRCBUSLINE(ointr)) 1531 continue; 1532 } 1533 if (bustype == ISA || bustype == EISA) { 1534 if (SRCBUSIRQ(intr) != SRCBUSIRQ(ointr)) 1535 continue; 1536 } 1537 if (INTPIN(intr) == INTPIN(ointr)) 1538 continue; 1539 break; 1540 } 1541 if (ointr >= nintrs) { 1542 return -1; 1543 } 1544 return INTIRQ(ointr); 1545} 1546#undef SRCBUSLINE 1547#undef SRCBUSDEVICE 1548#undef SRCBUSID 1549#undef SRCBUSIRQ 1550 1551#undef INTPIN 1552#undef INTIRQ 1553#undef INTAPIC 1554#undef INTTYPE 1555 1556 1557/* 1558 * Reprogram the MB chipset to NOT redirect an ISA INTerrupt. 1559 * 1560 * XXX FIXME: 1561 * Exactly what this means is unclear at this point. It is a solution 1562 * for motherboards that redirect the MBIRQ0 pin. Generically a motherboard 1563 * could route any of the ISA INTs to upper (>15) IRQ values. But most would 1564 * NOT be redirected via MBIRQ0, thus "undirect()ing" them would NOT be an 1565 * option. 1566 */ 1567int 1568undirect_isa_irq(int rirq) 1569{ 1570#if defined(READY) 1571 if (bootverbose) 1572 printf("Freeing redirected ISA irq %d.\n", rirq); 1573 /** FIXME: tickle the MB redirector chip */ 1574 return -1; 1575#else 1576 if (bootverbose) 1577 printf("Freeing (NOT implemented) redirected ISA irq %d.\n", rirq); 1578 return 0; 1579#endif /* READY */ 1580} 1581 1582 1583/* 1584 * Reprogram the MB chipset to NOT redirect a PCI INTerrupt 1585 */ 1586int 1587undirect_pci_irq(int rirq) 1588{ 1589#if defined(READY) 1590 if (bootverbose) 1591 printf("Freeing redirected PCI irq %d.\n", rirq); 1592 1593 /** FIXME: tickle the MB redirector chip */ 1594 return -1; 1595#else 1596 if (bootverbose) 1597 printf("Freeing (NOT implemented) redirected PCI irq %d.\n", 1598 rirq); 1599 return 0; 1600#endif /* READY */ 1601} 1602 1603 1604/* 1605 * given a bus ID, return: 1606 * the bus type if found 1607 * -1 if NOT found 1608 */ 1609int 1610apic_bus_type(int id) 1611{ 1612 int x; 1613 1614 for (x = 0; x < mp_nbusses; ++x) 1615 if (bus_data[x].bus_id == id) 1616 return bus_data[x].bus_type; 1617 1618 return -1; 1619} 1620 1621 1622/* 1623 * given a LOGICAL APIC# and pin#, return: 1624 * the associated src bus ID if found 1625 * -1 if NOT found 1626 */ 1627int 1628apic_src_bus_id(int apic, int pin) 1629{ 1630 int x; 1631 1632 /* search each of the possible INTerrupt sources */ 1633 for (x = 0; x < nintrs; ++x) 1634 if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) && 1635 (pin == io_apic_ints[x].dst_apic_int)) 1636 return (io_apic_ints[x].src_bus_id); 1637 1638 return -1; /* NOT found */ 1639} 1640 1641 1642/* 1643 * given a LOGICAL APIC# and pin#, return: 1644 * the associated src bus IRQ if found 1645 * -1 if NOT found 1646 */ 1647int 1648apic_src_bus_irq(int apic, int pin) 1649{ 1650 int x; 1651 1652 for (x = 0; x < nintrs; x++) 1653 if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) && 1654 (pin == io_apic_ints[x].dst_apic_int)) 1655 return (io_apic_ints[x].src_bus_irq); 1656 1657 return -1; /* NOT found */ 1658} 1659 1660 1661/* 1662 * given a LOGICAL APIC# and pin#, return: 1663 * the associated INTerrupt type if found 1664 * -1 if NOT found 1665 */ 1666int 1667apic_int_type(int apic, int pin) 1668{ 1669 int x; 1670 1671 /* search each of the possible INTerrupt sources */ 1672 for (x = 0; x < nintrs; ++x) 1673 if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) && 1674 (pin == io_apic_ints[x].dst_apic_int)) 1675 return (io_apic_ints[x].int_type); 1676 1677 return -1; /* NOT found */ 1678} 1679 1680int 1681apic_irq(int apic, int pin) 1682{ 1683 int x; 1684 int res; 1685 1686 for (x = 0; x < nintrs; ++x) 1687 if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) && 1688 (pin == io_apic_ints[x].dst_apic_int)) { 1689 res = io_apic_ints[x].int_vector; 1690 if (res == 0xff) 1691 return -1; 1692 if (apic != int_to_apicintpin[res].ioapic) 1693 panic("apic_irq: inconsistent table"); 1694 if (pin != int_to_apicintpin[res].int_pin) 1695 panic("apic_irq inconsistent table (2)"); 1696 return res; 1697 } 1698 return -1; 1699} 1700 1701 1702/* 1703 * given a LOGICAL APIC# and pin#, return: 1704 * the associated trigger mode if found 1705 * -1 if NOT found 1706 */ 1707int 1708apic_trigger(int apic, int pin) 1709{ 1710 int x; 1711 1712 /* search each of the possible INTerrupt sources */ 1713 for (x = 0; x < nintrs; ++x) 1714 if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) && 1715 (pin == io_apic_ints[x].dst_apic_int)) 1716 return ((io_apic_ints[x].int_flags >> 2) & 0x03); 1717 1718 return -1; /* NOT found */ 1719} 1720 1721 1722/* 1723 * given a LOGICAL APIC# and pin#, return: 1724 * the associated 'active' level if found 1725 * -1 if NOT found 1726 */ 1727int 1728apic_polarity(int apic, int pin) 1729{ 1730 int x; 1731 1732 /* search each of the possible INTerrupt sources */ 1733 for (x = 0; x < nintrs; ++x) 1734 if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) && 1735 (pin == io_apic_ints[x].dst_apic_int)) 1736 return (io_apic_ints[x].int_flags & 0x03); 1737 1738 return -1; /* NOT found */ 1739} 1740 1741 1742/* 1743 * set data according to MP defaults 1744 * FIXME: probably not complete yet... 1745 */ 1746static void 1747default_mp_table(int type) 1748{ 1749 int ap_cpu_id; 1750#if defined(APIC_IO) 1751 int io_apic_id; 1752 int pin; 1753#endif /* APIC_IO */ 1754 1755#if 0 1756 printf(" MP default config type: %d\n", type); 1757 switch (type) { 1758 case 1: 1759 printf(" bus: ISA, APIC: 82489DX\n"); 1760 break; 1761 case 2: 1762 printf(" bus: EISA, APIC: 82489DX\n"); 1763 break; 1764 case 3: 1765 printf(" bus: EISA, APIC: 82489DX\n"); 1766 break; 1767 case 4: 1768 printf(" bus: MCA, APIC: 82489DX\n"); 1769 break; 1770 case 5: 1771 printf(" bus: ISA+PCI, APIC: Integrated\n"); 1772 break; 1773 case 6: 1774 printf(" bus: EISA+PCI, APIC: Integrated\n"); 1775 break; 1776 case 7: 1777 printf(" bus: MCA+PCI, APIC: Integrated\n"); 1778 break; 1779 default: 1780 printf(" future type\n"); 1781 break; 1782 /* NOTREACHED */ 1783 } 1784#endif /* 0 */ 1785 1786 boot_cpu_id = (lapic.id & APIC_ID_MASK) >> 24; 1787 ap_cpu_id = (boot_cpu_id == 0) ? 1 : 0; 1788 1789 /* BSP */ 1790 CPU_TO_ID(0) = boot_cpu_id; 1791 ID_TO_CPU(boot_cpu_id) = 0; 1792 1793 /* one and only AP */ 1794 CPU_TO_ID(1) = ap_cpu_id; 1795 ID_TO_CPU(ap_cpu_id) = 1; 1796 1797#if defined(APIC_IO) 1798 /* one and only IO APIC */ 1799 io_apic_id = (io_apic_read(0, IOAPIC_ID) & APIC_ID_MASK) >> 24; 1800 1801 /* 1802 * sanity check, refer to MP spec section 3.6.6, last paragraph 1803 * necessary as some hardware isn't properly setting up the IO APIC 1804 */ 1805#if defined(REALLY_ANAL_IOAPICID_VALUE) 1806 if (io_apic_id != 2) { 1807#else 1808 if ((io_apic_id == 0) || (io_apic_id == 1) || (io_apic_id == 15)) { 1809#endif /* REALLY_ANAL_IOAPICID_VALUE */ 1810 io_apic_set_id(0, 2); 1811 io_apic_id = 2; 1812 } 1813 IO_TO_ID(0) = io_apic_id; 1814 ID_TO_IO(io_apic_id) = 0; 1815#endif /* APIC_IO */ 1816 1817 /* fill out bus entries */ 1818 switch (type) { 1819 case 1: 1820 case 2: 1821 case 3: 1822 case 4: 1823 case 5: 1824 case 6: 1825 case 7: 1826 bus_data[0].bus_id = default_data[type - 1][1]; 1827 bus_data[0].bus_type = default_data[type - 1][2]; 1828 bus_data[1].bus_id = default_data[type - 1][3]; 1829 bus_data[1].bus_type = default_data[type - 1][4]; 1830 break; 1831 1832 /* case 4: case 7: MCA NOT supported */ 1833 default: /* illegal/reserved */ 1834 panic("BAD default MP config: %d", type); 1835 /* NOTREACHED */ 1836 } 1837 1838#if defined(APIC_IO) 1839 /* general cases from MP v1.4, table 5-2 */ 1840 for (pin = 0; pin < 16; ++pin) { 1841 io_apic_ints[pin].int_type = 0; 1842 io_apic_ints[pin].int_flags = 0x05; /* edge/active-hi */ 1843 io_apic_ints[pin].src_bus_id = 0; 1844 io_apic_ints[pin].src_bus_irq = pin; /* IRQ2 caught below */ 1845 io_apic_ints[pin].dst_apic_id = io_apic_id; 1846 io_apic_ints[pin].dst_apic_int = pin; /* 1-to-1 */ 1847 } 1848 1849 /* special cases from MP v1.4, table 5-2 */ 1850 if (type == 2) { 1851 io_apic_ints[2].int_type = 0xff; /* N/C */ 1852 io_apic_ints[13].int_type = 0xff; /* N/C */ 1853#if !defined(APIC_MIXED_MODE) 1854 /** FIXME: ??? */ 1855 panic("sorry, can't support type 2 default yet"); 1856#endif /* APIC_MIXED_MODE */ 1857 } 1858 else 1859 io_apic_ints[2].src_bus_irq = 0; /* ISA IRQ0 is on APIC INT 2 */ 1860 1861 if (type == 7) 1862 io_apic_ints[0].int_type = 0xff; /* N/C */ 1863 else 1864 io_apic_ints[0].int_type = 3; /* vectored 8259 */ 1865#endif /* APIC_IO */ 1866} 1867 1868 1869/* 1870 * initialize all the SMP locks 1871 */ 1872 1873/* critical region around IO APIC, apic_imen */ 1874struct simplelock imen_lock; 1875 1876/* critical region around splxx(), cpl, cml, cil, ipending */ 1877struct simplelock cpl_lock; 1878 1879/* Make FAST_INTR() routines sequential */ 1880struct simplelock fast_intr_lock; 1881 1882/* critical region around INTR() routines */ 1883struct simplelock intr_lock; 1884 1885/* lock region used by kernel profiling */ 1886struct simplelock mcount_lock; 1887 1888#ifdef USE_COMLOCK 1889/* locks com (tty) data/hardware accesses: a FASTINTR() */ 1890struct simplelock com_lock; 1891#endif /* USE_COMLOCK */ 1892 1893/* lock around the MP rendezvous */ 1894static struct simplelock smp_rv_lock; 1895 1896/* only 1 CPU can panic at a time :) */ 1897struct simplelock panic_lock; 1898 1899static void 1900init_locks(void) 1901{ 1902 s_lock_init((struct simplelock*)&mcount_lock); 1903 1904 s_lock_init((struct simplelock*)&fast_intr_lock); 1905 s_lock_init((struct simplelock*)&intr_lock); 1906 s_lock_init((struct simplelock*)&imen_lock); 1907 s_lock_init((struct simplelock*)&cpl_lock); 1908 s_lock_init(&smp_rv_lock); 1909 s_lock_init(&panic_lock); 1910 1911#ifdef USE_COMLOCK 1912 s_lock_init((struct simplelock*)&com_lock); 1913#endif /* USE_COMLOCK */ 1914 1915 s_lock_init(&ap_boot_lock); 1916} 1917 1918/* 1919 * start each AP in our list 1920 */ 1921static int 1922start_all_aps(u_int boot_addr) 1923{ 1924 int x, i, pg; 1925 u_char mpbiosreason; 1926 u_long mpbioswarmvec; 1927 struct globaldata *gd; 1928 char *stack; 1929 1930 POSTCODE(START_ALL_APS_POST); 1931 1932 /* initialize BSP's local APIC */ 1933 apic_initialize(); 1934 bsp_apic_ready = 1; 1935 1936 /* install the AP 1st level boot code */ 1937 install_ap_tramp(boot_addr); 1938 1939 1940 /* save the current value of the warm-start vector */ 1941 mpbioswarmvec = *((u_long *) WARMBOOT_OFF); 1942#ifndef PC98 1943 outb(CMOS_REG, BIOS_RESET); 1944 mpbiosreason = inb(CMOS_DATA); 1945#endif 1946 1947 /* record BSP in CPU map */ 1948 all_cpus = 1; 1949 1950 /* set up 0 -> 4MB P==V mapping for AP boot */ 1951 *(int *)PTD = PG_V | PG_RW | ((uintptr_t)(void *)KPTphys & PG_FRAME); 1952 invltlb(); 1953 1954 /* start each AP */ 1955 for (x = 1; x <= mp_naps; ++x) { 1956 1957 /* This is a bit verbose, it will go away soon. */ 1958 1959 /* first page of AP's private space */ 1960 pg = x * i386_btop(sizeof(struct privatespace)); 1961 1962 /* allocate a new private data page */ 1963 gd = (struct globaldata *)kmem_alloc(kernel_map, PAGE_SIZE); 1964 1965 /* wire it into the private page table page */ 1966 SMPpt[pg] = (pt_entry_t)(PG_V | PG_RW | vtophys(gd)); 1967 1968 /* allocate and set up an idle stack data page */ 1969 stack = (char *)kmem_alloc(kernel_map, UPAGES*PAGE_SIZE); 1970 for (i = 0; i < UPAGES; i++) 1971 SMPpt[pg + 5 + i] = (pt_entry_t) 1972 (PG_V | PG_RW | vtophys(PAGE_SIZE * i + stack)); 1973 1974 SMPpt[pg + 1] = 0; /* *prv_CMAP1 */ 1975 SMPpt[pg + 2] = 0; /* *prv_CMAP2 */ 1976 SMPpt[pg + 3] = 0; /* *prv_CMAP3 */ 1977 SMPpt[pg + 4] = 0; /* *prv_PMAP1 */ 1978 1979 /* prime data page for it to use */ 1980 SLIST_INSERT_HEAD(&cpuhead, gd, gd_allcpu); 1981 gd->gd_cpuid = x; 1982 gd->gd_cpu_lockid = x << 24; 1983 1984 /* setup a vector to our boot code */ 1985 *((volatile u_short *) WARMBOOT_OFF) = WARMBOOT_TARGET; 1986 *((volatile u_short *) WARMBOOT_SEG) = (boot_addr >> 4); 1987#ifndef PC98 1988 outb(CMOS_REG, BIOS_RESET); 1989 outb(CMOS_DATA, BIOS_WARM); /* 'warm-start' */ 1990#endif 1991 1992 bootSTK = &SMP_prvspace[x].idlestack[UPAGES*PAGE_SIZE]; 1993 bootAP = x; 1994 1995 /* attempt to start the Application Processor */ 1996 CHECK_INIT(99); /* setup checkpoints */ 1997 if (!start_ap(x, boot_addr)) { 1998 printf("AP #%d (PHY# %d) failed!\n", x, CPU_TO_ID(x)); 1999 CHECK_PRINT("trace"); /* show checkpoints */ 2000 /* better panic as the AP may be running loose */ 2001 printf("panic y/n? [y] "); 2002 if (cngetc() != 'n') 2003 panic("bye-bye"); 2004 } 2005 CHECK_PRINT("trace"); /* show checkpoints */ 2006 2007 /* record its version info */ 2008 cpu_apic_versions[x] = cpu_apic_versions[0]; 2009 2010 all_cpus |= (1 << x); /* record AP in CPU map */ 2011 } 2012 2013 /* build our map of 'other' CPUs */ 2014 PCPU_SET(other_cpus, all_cpus & ~(1 << PCPU_GET(cpuid))); 2015 2016 /* fill in our (BSP) APIC version */ 2017 cpu_apic_versions[0] = lapic.version; 2018 2019 /* restore the warmstart vector */ 2020 *(u_long *) WARMBOOT_OFF = mpbioswarmvec; 2021#ifndef PC98 2022 outb(CMOS_REG, BIOS_RESET); 2023 outb(CMOS_DATA, mpbiosreason); 2024#endif 2025 2026 /* 2027 * Set up the idle context for the BSP. Similar to above except 2028 * that some was done by locore, some by pmap.c and some is implicit 2029 * because the BSP is cpu#0 and the page is initially zero, and also 2030 * because we can refer to variables by name on the BSP.. 2031 */ 2032 2033 /* Allocate and setup BSP idle stack */ 2034 stack = (char *)kmem_alloc(kernel_map, UPAGES * PAGE_SIZE); 2035 for (i = 0; i < UPAGES; i++) 2036 SMPpt[5 + i] = (pt_entry_t) 2037 (PG_V | PG_RW | vtophys(PAGE_SIZE * i + stack)); 2038 2039 *(int *)PTD = 0; 2040 pmap_set_opt(); 2041 2042 /* number of APs actually started */ 2043 return mp_ncpus - 1; 2044} 2045 2046 2047/* 2048 * load the 1st level AP boot code into base memory. 2049 */ 2050 2051/* targets for relocation */ 2052extern void bigJump(void); 2053extern void bootCodeSeg(void); 2054extern void bootDataSeg(void); 2055extern void MPentry(void); 2056extern u_int MP_GDT; 2057extern u_int mp_gdtbase; 2058 2059static void 2060install_ap_tramp(u_int boot_addr) 2061{ 2062 int x; 2063 int size = *(int *) ((u_long) & bootMP_size); 2064 u_char *src = (u_char *) ((u_long) bootMP); 2065 u_char *dst = (u_char *) boot_addr + KERNBASE; 2066 u_int boot_base = (u_int) bootMP; 2067 u_int8_t *dst8; 2068 u_int16_t *dst16; 2069 u_int32_t *dst32; 2070 2071 POSTCODE(INSTALL_AP_TRAMP_POST); 2072 2073 for (x = 0; x < size; ++x) 2074 *dst++ = *src++; 2075 2076 /* 2077 * modify addresses in code we just moved to basemem. unfortunately we 2078 * need fairly detailed info about mpboot.s for this to work. changes 2079 * to mpboot.s might require changes here. 2080 */ 2081 2082 /* boot code is located in KERNEL space */ 2083 dst = (u_char *) boot_addr + KERNBASE; 2084 2085 /* modify the lgdt arg */ 2086 dst32 = (u_int32_t *) (dst + ((u_int) & mp_gdtbase - boot_base)); 2087 *dst32 = boot_addr + ((u_int) & MP_GDT - boot_base); 2088 2089 /* modify the ljmp target for MPentry() */ 2090 dst32 = (u_int32_t *) (dst + ((u_int) bigJump - boot_base) + 1); 2091 *dst32 = ((u_int) MPentry - KERNBASE); 2092 2093 /* modify the target for boot code segment */ 2094 dst16 = (u_int16_t *) (dst + ((u_int) bootCodeSeg - boot_base)); 2095 dst8 = (u_int8_t *) (dst16 + 1); 2096 *dst16 = (u_int) boot_addr & 0xffff; 2097 *dst8 = ((u_int) boot_addr >> 16) & 0xff; 2098 2099 /* modify the target for boot data segment */ 2100 dst16 = (u_int16_t *) (dst + ((u_int) bootDataSeg - boot_base)); 2101 dst8 = (u_int8_t *) (dst16 + 1); 2102 *dst16 = (u_int) boot_addr & 0xffff; 2103 *dst8 = ((u_int) boot_addr >> 16) & 0xff; 2104} 2105 2106 2107/* 2108 * this function starts the AP (application processor) identified 2109 * by the APIC ID 'physicalCpu'. It does quite a "song and dance" 2110 * to accomplish this. This is necessary because of the nuances 2111 * of the different hardware we might encounter. It ain't pretty, 2112 * but it seems to work. 2113 */ 2114static int 2115start_ap(int logical_cpu, u_int boot_addr) 2116{ 2117 int physical_cpu; 2118 int vector; 2119 int cpus; 2120 u_long icr_lo, icr_hi; 2121 2122 POSTCODE(START_AP_POST); 2123 2124 /* get the PHYSICAL APIC ID# */ 2125 physical_cpu = CPU_TO_ID(logical_cpu); 2126 2127 /* calculate the vector */ 2128 vector = (boot_addr >> 12) & 0xff; 2129 2130 /* used as a watchpoint to signal AP startup */ 2131 cpus = mp_ncpus; 2132 2133 /* 2134 * first we do an INIT/RESET IPI this INIT IPI might be run, reseting 2135 * and running the target CPU. OR this INIT IPI might be latched (P5 2136 * bug), CPU waiting for STARTUP IPI. OR this INIT IPI might be 2137 * ignored. 2138 */ 2139 2140 /* setup the address for the target AP */ 2141 icr_hi = lapic.icr_hi & ~APIC_ID_MASK; 2142 icr_hi |= (physical_cpu << 24); 2143 lapic.icr_hi = icr_hi; 2144 2145 /* do an INIT IPI: assert RESET */ 2146 icr_lo = lapic.icr_lo & 0xfff00000; 2147 lapic.icr_lo = icr_lo | 0x0000c500; 2148 2149 /* wait for pending status end */ 2150 while (lapic.icr_lo & APIC_DELSTAT_MASK) 2151 /* spin */ ; 2152 2153 /* do an INIT IPI: deassert RESET */ 2154 lapic.icr_lo = icr_lo | 0x00008500; 2155 2156 /* wait for pending status end */ 2157 u_sleep(10000); /* wait ~10mS */ 2158 while (lapic.icr_lo & APIC_DELSTAT_MASK) 2159 /* spin */ ; 2160 2161 /* 2162 * next we do a STARTUP IPI: the previous INIT IPI might still be 2163 * latched, (P5 bug) this 1st STARTUP would then terminate 2164 * immediately, and the previously started INIT IPI would continue. OR 2165 * the previous INIT IPI has already run. and this STARTUP IPI will 2166 * run. OR the previous INIT IPI was ignored. and this STARTUP IPI 2167 * will run. 2168 */ 2169 2170 /* do a STARTUP IPI */ 2171 lapic.icr_lo = icr_lo | 0x00000600 | vector; 2172 while (lapic.icr_lo & APIC_DELSTAT_MASK) 2173 /* spin */ ; 2174 u_sleep(200); /* wait ~200uS */ 2175 2176 /* 2177 * finally we do a 2nd STARTUP IPI: this 2nd STARTUP IPI should run IF 2178 * the previous STARTUP IPI was cancelled by a latched INIT IPI. OR 2179 * this STARTUP IPI will be ignored, as only ONE STARTUP IPI is 2180 * recognized after hardware RESET or INIT IPI. 2181 */ 2182 2183 lapic.icr_lo = icr_lo | 0x00000600 | vector; 2184 while (lapic.icr_lo & APIC_DELSTAT_MASK) 2185 /* spin */ ; 2186 u_sleep(200); /* wait ~200uS */ 2187 2188 /* wait for it to start */ 2189 set_apic_timer(5000000);/* == 5 seconds */ 2190 while (read_apic_timer()) 2191 if (mp_ncpus > cpus) 2192 return 1; /* return SUCCESS */ 2193 2194 return 0; /* return FAILURE */ 2195} 2196 2197/* 2198 * Flush the TLB on all other CPU's 2199 * 2200 * XXX: Needs to handshake and wait for completion before proceding. 2201 */ 2202void 2203smp_invltlb(void) 2204{ 2205#if defined(APIC_IO) 2206 if (smp_started && invltlb_ok) 2207 all_but_self_ipi(XINVLTLB_OFFSET); 2208#endif /* APIC_IO */ 2209} 2210 2211void 2212invlpg(u_int addr) 2213{ 2214 __asm __volatile("invlpg (%0)"::"r"(addr):"memory"); 2215 2216 /* send a message to the other CPUs */ 2217 smp_invltlb(); 2218} 2219 2220void 2221invltlb(void) 2222{ 2223 u_long temp; 2224 2225 /* 2226 * This should be implemented as load_cr3(rcr3()) when load_cr3() is 2227 * inlined. 2228 */ 2229 __asm __volatile("movl %%cr3, %0; movl %0, %%cr3":"=r"(temp) :: "memory"); 2230 2231 /* send a message to the other CPUs */ 2232 smp_invltlb(); 2233} 2234 2235 2236/* 2237 * When called the executing CPU will send an IPI to all other CPUs 2238 * requesting that they halt execution. 2239 * 2240 * Usually (but not necessarily) called with 'other_cpus' as its arg. 2241 * 2242 * - Signals all CPUs in map to stop. 2243 * - Waits for each to stop. 2244 * 2245 * Returns: 2246 * -1: error 2247 * 0: NA 2248 * 1: ok 2249 * 2250 * XXX FIXME: this is not MP-safe, needs a lock to prevent multiple CPUs 2251 * from executing at same time. 2252 */ 2253int 2254stop_cpus(u_int map) 2255{ 2256 int count = 0; 2257 2258 if (!smp_started) 2259 return 0; 2260 2261 /* send the Xcpustop IPI to all CPUs in map */ 2262 selected_apic_ipi(map, XCPUSTOP_OFFSET, APIC_DELMODE_FIXED); 2263 2264 while (count++ < 100000 && (stopped_cpus & map) != map) 2265 /* spin */ ; 2266 2267#ifdef DIAGNOSTIC 2268 if ((stopped_cpus & map) != map) 2269 printf("Warning: CPUs 0x%x did not stop!\n", 2270 (~(stopped_cpus & map)) & map); 2271#endif 2272 2273 return 1; 2274} 2275 2276 2277/* 2278 * Called by a CPU to restart stopped CPUs. 2279 * 2280 * Usually (but not necessarily) called with 'stopped_cpus' as its arg. 2281 * 2282 * - Signals all CPUs in map to restart. 2283 * - Waits for each to restart. 2284 * 2285 * Returns: 2286 * -1: error 2287 * 0: NA 2288 * 1: ok 2289 */ 2290int 2291restart_cpus(u_int map) 2292{ 2293 int count = 0; 2294 2295 if (!smp_started) 2296 return 0; 2297 2298 started_cpus = map; /* signal other cpus to restart */ 2299 2300 /* wait for each to clear its bit */ 2301 while (count++ < 100000 && (stopped_cpus & map) != 0) 2302 /* spin */ ; 2303 2304#ifdef DIAGNOSTIC 2305 if ((stopped_cpus & map) != 0) 2306 printf("Warning: CPUs 0x%x did not restart!\n", 2307 (~(stopped_cpus & map)) & map); 2308#endif 2309 2310 return 1; 2311} 2312 2313int smp_active = 0; /* are the APs allowed to run? */ 2314SYSCTL_INT(_machdep, OID_AUTO, smp_active, CTLFLAG_RW, &smp_active, 0, ""); 2315 2316/* XXX maybe should be hw.ncpu */ 2317static int smp_cpus = 1; /* how many cpu's running */ 2318SYSCTL_INT(_machdep, OID_AUTO, smp_cpus, CTLFLAG_RD, &smp_cpus, 0, ""); 2319 2320int invltlb_ok = 0; /* throttle smp_invltlb() till safe */ 2321SYSCTL_INT(_machdep, OID_AUTO, invltlb_ok, CTLFLAG_RW, &invltlb_ok, 0, ""); 2322 2323/* Warning: Do not staticize. Used from swtch.s */ 2324int do_page_zero_idle = 1; /* bzero pages for fun and profit in idleloop */ 2325SYSCTL_INT(_machdep, OID_AUTO, do_page_zero_idle, CTLFLAG_RW, 2326 &do_page_zero_idle, 0, ""); 2327 2328/* Is forwarding of a interrupt to the CPU holding the ISR lock enabled ? */ 2329int forward_irq_enabled = 1; 2330SYSCTL_INT(_machdep, OID_AUTO, forward_irq_enabled, CTLFLAG_RW, 2331 &forward_irq_enabled, 0, ""); 2332 2333/* Enable forwarding of a signal to a process running on a different CPU */ 2334static int forward_signal_enabled = 1; 2335SYSCTL_INT(_machdep, OID_AUTO, forward_signal_enabled, CTLFLAG_RW, 2336 &forward_signal_enabled, 0, ""); 2337 2338/* Enable forwarding of roundrobin to all other cpus */ 2339static int forward_roundrobin_enabled = 1; 2340SYSCTL_INT(_machdep, OID_AUTO, forward_roundrobin_enabled, CTLFLAG_RW, 2341 &forward_roundrobin_enabled, 0, ""); 2342 2343/* 2344 * This is called once the rest of the system is up and running and we're 2345 * ready to let the AP's out of the pen. 2346 */ 2347void ap_init(void); 2348 2349void 2350ap_init(void) 2351{ 2352 u_int apic_id; 2353 2354 /* lock against other AP's that are waking up */ 2355 s_lock(&ap_boot_lock); 2356 2357 /* BSP may have changed PTD while we're waiting for the lock */ 2358 cpu_invltlb(); 2359 2360 smp_cpus++; 2361 2362#if defined(I586_CPU) && !defined(NO_F00F_HACK) 2363 lidt(&r_idt); 2364#endif 2365 2366 /* Build our map of 'other' CPUs. */ 2367 PCPU_SET(other_cpus, all_cpus & ~(1 << PCPU_GET(cpuid))); 2368 2369 printf("SMP: AP CPU #%d Launched!\n", PCPU_GET(cpuid)); 2370 2371 /* set up CPU registers and state */ 2372 cpu_setregs(); 2373 2374 /* set up FPU state on the AP */ 2375 npxinit(__INITIAL_NPXCW__); 2376 2377 /* A quick check from sanity claus */ 2378 apic_id = (apic_id_to_logical[(lapic.id & 0x0f000000) >> 24]); 2379 if (PCPU_GET(cpuid) != apic_id) { 2380 printf("SMP: cpuid = %d\n", PCPU_GET(cpuid)); 2381 printf("SMP: apic_id = %d\n", apic_id); 2382 printf("PTD[MPPTDI] = %p\n", (void *)PTD[MPPTDI]); 2383 panic("cpuid mismatch! boom!!"); 2384 } 2385 2386 /* Init local apic for irq's */ 2387 apic_initialize(); 2388 2389 /* Set memory range attributes for this CPU to match the BSP */ 2390 mem_range_AP_init(); 2391 2392 /* 2393 * Activate smp_invltlb, although strictly speaking, this isn't 2394 * quite correct yet. We should have a bitfield for cpus willing 2395 * to accept TLB flush IPI's or something and sync them. 2396 */ 2397 if (smp_cpus == mp_ncpus) { 2398 invltlb_ok = 1; 2399 smp_started = 1; /* enable IPI's, tlb shootdown, freezes etc */ 2400 smp_active = 1; /* historic */ 2401 } 2402 2403 /* let other AP's wake up now */ 2404 s_unlock(&ap_boot_lock); 2405 2406 /* wait until all the AP's are up */ 2407 while (smp_started == 0) 2408 ; /* nothing */ 2409 2410 /* 2411 * Set curproc to our per-cpu idleproc so that mutexes have 2412 * something unique to lock with. 2413 */ 2414 PCPU_SET(curproc, PCPU_GET(idleproc)); 2415 2416 microuptime(PCPU_PTR(switchtime)); 2417 PCPU_SET(switchticks, ticks); 2418 2419 /* ok, now grab sched_lock and enter the scheduler */ 2420 enable_intr(); 2421 mtx_enter(&sched_lock, MTX_SPIN); 2422 cpu_throw(); /* doesn't return */ 2423 2424 panic("scheduler returned us to ap_init"); 2425} 2426 2427#ifdef BETTER_CLOCK 2428 2429#define CHECKSTATE_USER 0 2430#define CHECKSTATE_SYS 1 2431#define CHECKSTATE_INTR 2 2432 2433/* Do not staticize. Used from apic_vector.s */ 2434struct proc* checkstate_curproc[MAXCPU]; 2435int checkstate_cpustate[MAXCPU]; 2436u_long checkstate_pc[MAXCPU]; 2437 2438#define PC_TO_INDEX(pc, prof) \ 2439 ((int)(((u_quad_t)((pc) - (prof)->pr_off) * \ 2440 (u_quad_t)((prof)->pr_scale)) >> 16) & ~1) 2441 2442static void 2443addupc_intr_forwarded(struct proc *p, int id, int *astmap) 2444{ 2445 int i; 2446 struct uprof *prof; 2447 u_long pc; 2448 2449 pc = checkstate_pc[id]; 2450 prof = &p->p_stats->p_prof; 2451 if (pc >= prof->pr_off && 2452 (i = PC_TO_INDEX(pc, prof)) < prof->pr_size) { 2453 if ((p->p_flag & P_OWEUPC) == 0) { 2454 prof->pr_addr = pc; 2455 prof->pr_ticks = 1; 2456 p->p_flag |= P_OWEUPC; 2457 } 2458 *astmap |= (1 << id); 2459 } 2460} 2461 2462static void 2463forwarded_statclock(int id, int pscnt, int *astmap) 2464{ 2465 struct pstats *pstats; 2466 long rss; 2467 struct rusage *ru; 2468 struct vmspace *vm; 2469 int cpustate; 2470 struct proc *p; 2471#ifdef GPROF 2472 register struct gmonparam *g; 2473 int i; 2474#endif 2475 2476 p = checkstate_curproc[id]; 2477 cpustate = checkstate_cpustate[id]; 2478 2479 /* XXX */ 2480 if (p->p_ithd) 2481 cpustate = CHECKSTATE_INTR; 2482 else if (p == SMP_prvspace[id].globaldata.gd_idleproc) 2483 cpustate = CHECKSTATE_SYS; 2484 2485 switch (cpustate) { 2486 case CHECKSTATE_USER: 2487 if (p->p_flag & P_PROFIL) 2488 addupc_intr_forwarded(p, id, astmap); 2489 if (pscnt > 1) 2490 return; 2491 p->p_uticks++; 2492 if (p->p_nice > NZERO) 2493 cp_time[CP_NICE]++; 2494 else 2495 cp_time[CP_USER]++; 2496 break; 2497 case CHECKSTATE_SYS: 2498#ifdef GPROF 2499 /* 2500 * Kernel statistics are just like addupc_intr, only easier. 2501 */ 2502 g = &_gmonparam; 2503 if (g->state == GMON_PROF_ON) { 2504 i = checkstate_pc[id] - g->lowpc; 2505 if (i < g->textsize) { 2506 i /= HISTFRACTION * sizeof(*g->kcount); 2507 g->kcount[i]++; 2508 } 2509 } 2510#endif 2511 if (pscnt > 1) 2512 return; 2513 2514 p->p_sticks++; 2515 if (p == SMP_prvspace[id].globaldata.gd_idleproc) 2516 cp_time[CP_IDLE]++; 2517 else 2518 cp_time[CP_SYS]++; 2519 break; 2520 case CHECKSTATE_INTR: 2521 default: 2522#ifdef GPROF 2523 /* 2524 * Kernel statistics are just like addupc_intr, only easier. 2525 */ 2526 g = &_gmonparam; 2527 if (g->state == GMON_PROF_ON) { 2528 i = checkstate_pc[id] - g->lowpc; 2529 if (i < g->textsize) { 2530 i /= HISTFRACTION * sizeof(*g->kcount); 2531 g->kcount[i]++; 2532 } 2533 } 2534#endif 2535 if (pscnt > 1) 2536 return; 2537 if (p) 2538 p->p_iticks++; 2539 cp_time[CP_INTR]++; 2540 } 2541 schedclock(p); 2542 2543 /* Update resource usage integrals and maximums. */ 2544 if ((pstats = p->p_stats) != NULL && 2545 (ru = &pstats->p_ru) != NULL && 2546 (vm = p->p_vmspace) != NULL) { 2547 ru->ru_ixrss += pgtok(vm->vm_tsize); 2548 ru->ru_idrss += pgtok(vm->vm_dsize); 2549 ru->ru_isrss += pgtok(vm->vm_ssize); 2550 rss = pgtok(vmspace_resident_count(vm)); 2551 if (ru->ru_maxrss < rss) 2552 ru->ru_maxrss = rss; 2553 } 2554} 2555 2556void 2557forward_statclock(int pscnt) 2558{ 2559 int map; 2560 int id; 2561 int i; 2562 2563 /* Kludge. We don't yet have separate locks for the interrupts 2564 * and the kernel. This means that we cannot let the other processors 2565 * handle complex interrupts while inhibiting them from entering 2566 * the kernel in a non-interrupt context. 2567 * 2568 * What we can do, without changing the locking mechanisms yet, 2569 * is letting the other processors handle a very simple interrupt 2570 * (wich determines the processor states), and do the main 2571 * work ourself. 2572 */ 2573 2574 if (!smp_started || !invltlb_ok || cold || panicstr) 2575 return; 2576 2577 /* Step 1: Probe state (user, cpu, interrupt, spinlock, idle ) */ 2578 2579 map = PCPU_GET(other_cpus) & ~stopped_cpus ; 2580 checkstate_probed_cpus = 0; 2581 if (map != 0) 2582 selected_apic_ipi(map, 2583 XCPUCHECKSTATE_OFFSET, APIC_DELMODE_FIXED); 2584 2585 i = 0; 2586 while (checkstate_probed_cpus != map) { 2587 /* spin */ 2588 i++; 2589 if (i == 100000) { 2590#ifdef BETTER_CLOCK_DIAGNOSTIC 2591 printf("forward_statclock: checkstate %x\n", 2592 checkstate_probed_cpus); 2593#endif 2594 break; 2595 } 2596 } 2597 2598 /* 2599 * Step 2: walk through other processors processes, update ticks and 2600 * profiling info. 2601 */ 2602 2603 map = 0; 2604 for (id = 0; id < mp_ncpus; id++) { 2605 if (id == PCPU_GET(cpuid)) 2606 continue; 2607 if (((1 << id) & checkstate_probed_cpus) == 0) 2608 continue; 2609 forwarded_statclock(id, pscnt, &map); 2610 } 2611 if (map != 0) { 2612 checkstate_need_ast |= map; 2613 selected_apic_ipi(map, XCPUAST_OFFSET, APIC_DELMODE_FIXED); 2614 i = 0; 2615 while ((checkstate_need_ast & map) != 0) { 2616 /* spin */ 2617 i++; 2618 if (i > 100000) { 2619#ifdef BETTER_CLOCK_DIAGNOSTIC 2620 printf("forward_statclock: dropped ast 0x%x\n", 2621 checkstate_need_ast & map); 2622#endif 2623 break; 2624 } 2625 } 2626 } 2627} 2628 2629void 2630forward_hardclock(int pscnt) 2631{ 2632 int map; 2633 int id; 2634 struct proc *p; 2635 struct pstats *pstats; 2636 int i; 2637 2638 /* Kludge. We don't yet have separate locks for the interrupts 2639 * and the kernel. This means that we cannot let the other processors 2640 * handle complex interrupts while inhibiting them from entering 2641 * the kernel in a non-interrupt context. 2642 * 2643 * What we can do, without changing the locking mechanisms yet, 2644 * is letting the other processors handle a very simple interrupt 2645 * (wich determines the processor states), and do the main 2646 * work ourself. 2647 */ 2648 2649 if (!smp_started || !invltlb_ok || cold || panicstr) 2650 return; 2651 2652 /* Step 1: Probe state (user, cpu, interrupt, spinlock, idle) */ 2653 2654 map = PCPU_GET(other_cpus) & ~stopped_cpus ; 2655 checkstate_probed_cpus = 0; 2656 if (map != 0) 2657 selected_apic_ipi(map, 2658 XCPUCHECKSTATE_OFFSET, APIC_DELMODE_FIXED); 2659 2660 i = 0; 2661 while (checkstate_probed_cpus != map) { 2662 /* spin */ 2663 i++; 2664 if (i == 100000) { 2665#ifdef BETTER_CLOCK_DIAGNOSTIC 2666 printf("forward_hardclock: checkstate %x\n", 2667 checkstate_probed_cpus); 2668#endif 2669 break; 2670 } 2671 } 2672 2673 /* 2674 * Step 2: walk through other processors processes, update virtual 2675 * timer and profiling timer. If stathz == 0, also update ticks and 2676 * profiling info. 2677 */ 2678 2679 map = 0; 2680 for (id = 0; id < mp_ncpus; id++) { 2681 if (id == PCPU_GET(cpuid)) 2682 continue; 2683 if (((1 << id) & checkstate_probed_cpus) == 0) 2684 continue; 2685 p = checkstate_curproc[id]; 2686 if (p) { 2687 pstats = p->p_stats; 2688 if (checkstate_cpustate[id] == CHECKSTATE_USER && 2689 timevalisset(&pstats->p_timer[ITIMER_VIRTUAL].it_value) && 2690 itimerdecr(&pstats->p_timer[ITIMER_VIRTUAL], tick) == 0) { 2691 psignal(p, SIGVTALRM); 2692 map |= (1 << id); 2693 } 2694 if (timevalisset(&pstats->p_timer[ITIMER_PROF].it_value) && 2695 itimerdecr(&pstats->p_timer[ITIMER_PROF], tick) == 0) { 2696 psignal(p, SIGPROF); 2697 map |= (1 << id); 2698 } 2699 } 2700 if (stathz == 0) { 2701 forwarded_statclock( id, pscnt, &map); 2702 } 2703 } 2704 if (map != 0) { 2705 checkstate_need_ast |= map; 2706 selected_apic_ipi(map, XCPUAST_OFFSET, APIC_DELMODE_FIXED); 2707 i = 0; 2708 while ((checkstate_need_ast & map) != 0) { 2709 /* spin */ 2710 i++; 2711 if (i > 100000) { 2712#ifdef BETTER_CLOCK_DIAGNOSTIC 2713 printf("forward_hardclock: dropped ast 0x%x\n", 2714 checkstate_need_ast & map); 2715#endif 2716 break; 2717 } 2718 } 2719 } 2720} 2721 2722#endif /* BETTER_CLOCK */ 2723 2724void 2725forward_signal(struct proc *p) 2726{ 2727 int map; 2728 int id; 2729 int i; 2730 2731 /* Kludge. We don't yet have separate locks for the interrupts 2732 * and the kernel. This means that we cannot let the other processors 2733 * handle complex interrupts while inhibiting them from entering 2734 * the kernel in a non-interrupt context. 2735 * 2736 * What we can do, without changing the locking mechanisms yet, 2737 * is letting the other processors handle a very simple interrupt 2738 * (wich determines the processor states), and do the main 2739 * work ourself. 2740 */ 2741 2742 if (!smp_started || !invltlb_ok || cold || panicstr) 2743 return; 2744 if (!forward_signal_enabled) 2745 return; 2746 mtx_enter(&sched_lock, MTX_SPIN); 2747 while (1) { 2748 if (p->p_stat != SRUN) { 2749 mtx_exit(&sched_lock, MTX_SPIN); 2750 return; 2751 } 2752 id = p->p_oncpu; 2753 mtx_exit(&sched_lock, MTX_SPIN); 2754 if (id == 0xff) 2755 return; 2756 map = (1<<id); 2757 checkstate_need_ast |= map; 2758 selected_apic_ipi(map, XCPUAST_OFFSET, APIC_DELMODE_FIXED); 2759 i = 0; 2760 while ((checkstate_need_ast & map) != 0) { 2761 /* spin */ 2762 i++; 2763 if (i > 100000) { 2764#if 0 2765 printf("forward_signal: dropped ast 0x%x\n", 2766 checkstate_need_ast & map); 2767#endif 2768 break; 2769 } 2770 } 2771 mtx_enter(&sched_lock, MTX_SPIN); 2772 if (id == p->p_oncpu) { 2773 mtx_exit(&sched_lock, MTX_SPIN); 2774 return; 2775 } 2776 } 2777} 2778 2779void 2780forward_roundrobin(void) 2781{ 2782 u_int map; 2783 int i; 2784 2785 if (!smp_started || !invltlb_ok || cold || panicstr) 2786 return; 2787 if (!forward_roundrobin_enabled) 2788 return; 2789 resched_cpus |= PCPU_GET(other_cpus); 2790 map = PCPU_GET(other_cpus) & ~stopped_cpus ; 2791#if 1 2792 selected_apic_ipi(map, XCPUAST_OFFSET, APIC_DELMODE_FIXED); 2793#else 2794 (void) all_but_self_ipi(XCPUAST_OFFSET); 2795#endif 2796 i = 0; 2797 while ((checkstate_need_ast & map) != 0) { 2798 /* spin */ 2799 i++; 2800 if (i > 100000) { 2801#if 0 2802 printf("forward_roundrobin: dropped ast 0x%x\n", 2803 checkstate_need_ast & map); 2804#endif 2805 break; 2806 } 2807 } 2808} 2809 2810 2811#ifdef APIC_INTR_REORDER 2812/* 2813 * Maintain mapping from softintr vector to isr bit in local apic. 2814 */ 2815void 2816set_lapic_isrloc(int intr, int vector) 2817{ 2818 if (intr < 0 || intr > 32) 2819 panic("set_apic_isrloc: bad intr argument: %d",intr); 2820 if (vector < ICU_OFFSET || vector > 255) 2821 panic("set_apic_isrloc: bad vector argument: %d",vector); 2822 apic_isrbit_location[intr].location = &lapic.isr0 + ((vector>>5)<<2); 2823 apic_isrbit_location[intr].bit = (1<<(vector & 31)); 2824} 2825#endif 2826 2827/* 2828 * All-CPU rendezvous. CPUs are signalled, all execute the setup function 2829 * (if specified), rendezvous, execute the action function (if specified), 2830 * rendezvous again, execute the teardown function (if specified), and then 2831 * resume. 2832 * 2833 * Note that the supplied external functions _must_ be reentrant and aware 2834 * that they are running in parallel and in an unknown lock context. 2835 */ 2836static void (*smp_rv_setup_func)(void *arg); 2837static void (*smp_rv_action_func)(void *arg); 2838static void (*smp_rv_teardown_func)(void *arg); 2839static void *smp_rv_func_arg; 2840static volatile int smp_rv_waiters[2]; 2841 2842void 2843smp_rendezvous_action(void) 2844{ 2845 /* setup function */ 2846 if (smp_rv_setup_func != NULL) 2847 smp_rv_setup_func(smp_rv_func_arg); 2848 /* spin on entry rendezvous */ 2849 atomic_add_int(&smp_rv_waiters[0], 1); 2850 while (smp_rv_waiters[0] < mp_ncpus) 2851 ; 2852 /* action function */ 2853 if (smp_rv_action_func != NULL) 2854 smp_rv_action_func(smp_rv_func_arg); 2855 /* spin on exit rendezvous */ 2856 atomic_add_int(&smp_rv_waiters[1], 1); 2857 while (smp_rv_waiters[1] < mp_ncpus) 2858 ; 2859 /* teardown function */ 2860 if (smp_rv_teardown_func != NULL) 2861 smp_rv_teardown_func(smp_rv_func_arg); 2862} 2863 2864void 2865smp_rendezvous(void (* setup_func)(void *), 2866 void (* action_func)(void *), 2867 void (* teardown_func)(void *), 2868 void *arg) 2869{ 2870 u_int efl; 2871 2872 /* obtain rendezvous lock */ 2873 s_lock(&smp_rv_lock); /* XXX sleep here? NOWAIT flag? */ 2874 2875 /* set static function pointers */ 2876 smp_rv_setup_func = setup_func; 2877 smp_rv_action_func = action_func; 2878 smp_rv_teardown_func = teardown_func; 2879 smp_rv_func_arg = arg; 2880 smp_rv_waiters[0] = 0; 2881 smp_rv_waiters[1] = 0; 2882 2883 /* disable interrupts on this CPU, save interrupt status */ 2884 efl = read_eflags(); 2885 write_eflags(efl & ~PSL_I); 2886 2887 /* signal other processors, which will enter the IPI with interrupts off */ 2888 all_but_self_ipi(XRENDEZVOUS_OFFSET); 2889 2890 /* call executor function */ 2891 smp_rendezvous_action(); 2892 2893 /* restore interrupt flag */ 2894 write_eflags(efl); 2895 2896 /* release lock */ 2897 s_unlock(&smp_rv_lock); 2898} 2899 2900void 2901release_aps(void *dummy __unused) 2902{ 2903 s_unlock(&ap_boot_lock); 2904} 2905 2906SYSINIT(start_aps, SI_SUB_SMP, SI_ORDER_FIRST, release_aps, NULL); 2907