mptable.c revision 71337
1/* 2 * Copyright (c) 1996, by Steve Passe 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. The name of the developer may NOT be used to endorse or promote products 11 * derived from this software without specific prior written permission. 12 * 13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 * 25 * $FreeBSD: head/sys/i386/i386/mptable.c 71337 2001-01-21 19:25:07Z jake $ 26 */ 27 28#include "opt_cpu.h" 29#include "opt_user_ldt.h" 30 31#ifdef SMP 32#include <machine/smptests.h> 33#else 34#error 35#endif 36 37#include <sys/param.h> 38#include <sys/bus.h> 39#include <sys/systm.h> 40#include <sys/kernel.h> 41#include <sys/proc.h> 42#include <sys/sysctl.h> 43#include <sys/malloc.h> 44#include <sys/memrange.h> 45#include <sys/mutex.h> 46#ifdef BETTER_CLOCK 47#include <sys/dkstat.h> 48#endif 49#include <sys/cons.h> /* cngetc() */ 50 51#include <vm/vm.h> 52#include <vm/vm_param.h> 53#include <vm/pmap.h> 54#include <vm/vm_kern.h> 55#include <vm/vm_extern.h> 56#ifdef BETTER_CLOCK 57#include <sys/lock.h> 58#include <vm/vm_map.h> 59#include <sys/user.h> 60#ifdef GPROF 61#include <sys/gmon.h> 62#endif 63#endif 64 65#include <machine/smp.h> 66#include <machine/apic.h> 67#include <machine/atomic.h> 68#include <machine/cpufunc.h> 69#include <machine/mpapic.h> 70#include <machine/psl.h> 71#include <machine/segments.h> 72#include <machine/smptests.h> /** TEST_DEFAULT_CONFIG, TEST_TEST1 */ 73#include <machine/tss.h> 74#include <machine/specialreg.h> 75#include <machine/globaldata.h> 76 77#if defined(APIC_IO) 78#include <machine/md_var.h> /* setidt() */ 79#include <i386/isa/icu.h> /* IPIs */ 80#include <i386/isa/intr_machdep.h> /* IPIs */ 81#endif /* APIC_IO */ 82 83#if defined(TEST_DEFAULT_CONFIG) 84#define MPFPS_MPFB1 TEST_DEFAULT_CONFIG 85#else 86#define MPFPS_MPFB1 mpfps->mpfb1 87#endif /* TEST_DEFAULT_CONFIG */ 88 89#define WARMBOOT_TARGET 0 90#define WARMBOOT_OFF (KERNBASE + 0x0467) 91#define WARMBOOT_SEG (KERNBASE + 0x0469) 92 93#ifdef PC98 94#define BIOS_BASE (0xe8000) 95#define BIOS_SIZE (0x18000) 96#else 97#define BIOS_BASE (0xf0000) 98#define BIOS_SIZE (0x10000) 99#endif 100#define BIOS_COUNT (BIOS_SIZE/4) 101 102#define CMOS_REG (0x70) 103#define CMOS_DATA (0x71) 104#define BIOS_RESET (0x0f) 105#define BIOS_WARM (0x0a) 106 107#define PROCENTRY_FLAG_EN 0x01 108#define PROCENTRY_FLAG_BP 0x02 109#define IOAPICENTRY_FLAG_EN 0x01 110 111 112/* MP Floating Pointer Structure */ 113typedef struct MPFPS { 114 char signature[4]; 115 void *pap; 116 u_char length; 117 u_char spec_rev; 118 u_char checksum; 119 u_char mpfb1; 120 u_char mpfb2; 121 u_char mpfb3; 122 u_char mpfb4; 123 u_char mpfb5; 124} *mpfps_t; 125 126/* MP Configuration Table Header */ 127typedef struct MPCTH { 128 char signature[4]; 129 u_short base_table_length; 130 u_char spec_rev; 131 u_char checksum; 132 u_char oem_id[8]; 133 u_char product_id[12]; 134 void *oem_table_pointer; 135 u_short oem_table_size; 136 u_short entry_count; 137 void *apic_address; 138 u_short extended_table_length; 139 u_char extended_table_checksum; 140 u_char reserved; 141} *mpcth_t; 142 143 144typedef struct PROCENTRY { 145 u_char type; 146 u_char apic_id; 147 u_char apic_version; 148 u_char cpu_flags; 149 u_long cpu_signature; 150 u_long feature_flags; 151 u_long reserved1; 152 u_long reserved2; 153} *proc_entry_ptr; 154 155typedef struct BUSENTRY { 156 u_char type; 157 u_char bus_id; 158 char bus_type[6]; 159} *bus_entry_ptr; 160 161typedef struct IOAPICENTRY { 162 u_char type; 163 u_char apic_id; 164 u_char apic_version; 165 u_char apic_flags; 166 void *apic_address; 167} *io_apic_entry_ptr; 168 169typedef struct INTENTRY { 170 u_char type; 171 u_char int_type; 172 u_short int_flags; 173 u_char src_bus_id; 174 u_char src_bus_irq; 175 u_char dst_apic_id; 176 u_char dst_apic_int; 177} *int_entry_ptr; 178 179/* descriptions of MP basetable entries */ 180typedef struct BASETABLE_ENTRY { 181 u_char type; 182 u_char length; 183 char name[16]; 184} basetable_entry; 185 186/* 187 * this code MUST be enabled here and in mpboot.s. 188 * it follows the very early stages of AP boot by placing values in CMOS ram. 189 * it NORMALLY will never be needed and thus the primitive method for enabling. 190 * 191#define CHECK_POINTS 192 */ 193 194#if defined(CHECK_POINTS) && !defined(PC98) 195#define CHECK_READ(A) (outb(CMOS_REG, (A)), inb(CMOS_DATA)) 196#define CHECK_WRITE(A,D) (outb(CMOS_REG, (A)), outb(CMOS_DATA, (D))) 197 198#define CHECK_INIT(D); \ 199 CHECK_WRITE(0x34, (D)); \ 200 CHECK_WRITE(0x35, (D)); \ 201 CHECK_WRITE(0x36, (D)); \ 202 CHECK_WRITE(0x37, (D)); \ 203 CHECK_WRITE(0x38, (D)); \ 204 CHECK_WRITE(0x39, (D)); 205 206#define CHECK_PRINT(S); \ 207 printf("%s: %d, %d, %d, %d, %d, %d\n", \ 208 (S), \ 209 CHECK_READ(0x34), \ 210 CHECK_READ(0x35), \ 211 CHECK_READ(0x36), \ 212 CHECK_READ(0x37), \ 213 CHECK_READ(0x38), \ 214 CHECK_READ(0x39)); 215 216#else /* CHECK_POINTS */ 217 218#define CHECK_INIT(D) 219#define CHECK_PRINT(S) 220 221#endif /* CHECK_POINTS */ 222 223/* 224 * Values to send to the POST hardware. 225 */ 226#define MP_BOOTADDRESS_POST 0x10 227#define MP_PROBE_POST 0x11 228#define MPTABLE_PASS1_POST 0x12 229 230#define MP_START_POST 0x13 231#define MP_ENABLE_POST 0x14 232#define MPTABLE_PASS2_POST 0x15 233 234#define START_ALL_APS_POST 0x16 235#define INSTALL_AP_TRAMP_POST 0x17 236#define START_AP_POST 0x18 237 238#define MP_ANNOUNCE_POST 0x19 239 240/* used to hold the AP's until we are ready to release them */ 241struct simplelock ap_boot_lock; 242 243/** XXX FIXME: where does this really belong, isa.h/isa.c perhaps? */ 244int current_postcode; 245 246/** XXX FIXME: what system files declare these??? */ 247extern struct region_descriptor r_gdt, r_idt; 248 249int bsp_apic_ready = 0; /* flags useability of BSP apic */ 250int mp_ncpus; /* # of CPUs, including BSP */ 251int mp_naps; /* # of Applications processors */ 252int mp_nbusses; /* # of busses */ 253int mp_napics; /* # of IO APICs */ 254int boot_cpu_id; /* designated BSP */ 255vm_offset_t cpu_apic_address; 256vm_offset_t io_apic_address[NAPICID]; /* NAPICID is more than enough */ 257extern int nkpt; 258 259u_int32_t cpu_apic_versions[MAXCPU]; 260u_int32_t *io_apic_versions; 261 262#ifdef APIC_INTR_REORDER 263struct { 264 volatile int *location; 265 int bit; 266} apic_isrbit_location[32]; 267#endif 268 269struct apic_intmapinfo int_to_apicintpin[APIC_INTMAPSIZE]; 270 271/* 272 * APIC ID logical/physical mapping structures. 273 * We oversize these to simplify boot-time config. 274 */ 275int cpu_num_to_apic_id[NAPICID]; 276int io_num_to_apic_id[NAPICID]; 277int apic_id_to_logical[NAPICID]; 278 279 280/* Bitmap of all available CPUs */ 281u_int all_cpus; 282 283/* AP uses this during bootstrap. Do not staticize. */ 284char *bootSTK; 285static int bootAP; 286 287/* Hotwire a 0->4MB V==P mapping */ 288extern pt_entry_t *KPTphys; 289 290/* SMP page table page */ 291extern pt_entry_t *SMPpt; 292 293struct pcb stoppcbs[MAXCPU]; 294 295int smp_started; /* has the system started? */ 296 297/* 298 * Local data and functions. 299 */ 300 301static int mp_capable; 302static u_int boot_address; 303static u_int base_memory; 304 305static int picmode; /* 0: virtual wire mode, 1: PIC mode */ 306static mpfps_t mpfps; 307static int search_for_sig(u_int32_t target, int count); 308static void mp_enable(u_int boot_addr); 309 310static void mptable_pass1(void); 311static int mptable_pass2(void); 312static void default_mp_table(int type); 313static void fix_mp_table(void); 314static void setup_apic_irq_mapping(void); 315static void init_locks(void); 316static int start_all_aps(u_int boot_addr); 317static void install_ap_tramp(u_int boot_addr); 318static int start_ap(int logicalCpu, u_int boot_addr); 319static int apic_int_is_bus_type(int intr, int bus_type); 320static void release_aps(void *dummy); 321 322/* 323 * Calculate usable address in base memory for AP trampoline code. 324 */ 325u_int 326mp_bootaddress(u_int basemem) 327{ 328 POSTCODE(MP_BOOTADDRESS_POST); 329 330 base_memory = basemem * 1024; /* convert to bytes */ 331 332 boot_address = base_memory & ~0xfff; /* round down to 4k boundary */ 333 if ((base_memory - boot_address) < bootMP_size) 334 boot_address -= 4096; /* not enough, lower by 4k */ 335 336 return boot_address; 337} 338 339 340/* 341 * Look for an Intel MP spec table (ie, SMP capable hardware). 342 */ 343int 344mp_probe(void) 345{ 346 int x; 347 u_long segment; 348 u_int32_t target; 349 350 POSTCODE(MP_PROBE_POST); 351 352 /* see if EBDA exists */ 353 if ((segment = (u_long) * (u_short *) (KERNBASE + 0x40e)) != 0) { 354 /* search first 1K of EBDA */ 355 target = (u_int32_t) (segment << 4); 356 if ((x = search_for_sig(target, 1024 / 4)) >= 0) 357 goto found; 358 } else { 359 /* last 1K of base memory, effective 'top of base' passed in */ 360 target = (u_int32_t) (base_memory - 0x400); 361 if ((x = search_for_sig(target, 1024 / 4)) >= 0) 362 goto found; 363 } 364 365 /* search the BIOS */ 366 target = (u_int32_t) BIOS_BASE; 367 if ((x = search_for_sig(target, BIOS_COUNT)) >= 0) 368 goto found; 369 370 /* nothing found */ 371 mpfps = (mpfps_t)0; 372 mp_capable = 0; 373 return 0; 374 375found: 376 /* calculate needed resources */ 377 mpfps = (mpfps_t)x; 378 mptable_pass1(); 379 380 /* flag fact that we are running multiple processors */ 381 mp_capable = 1; 382 return 1; 383} 384 385 386/* 387 * Initialize the SMP hardware and the APIC and start up the AP's. 388 */ 389void 390mp_start(void) 391{ 392 POSTCODE(MP_START_POST); 393 394 /* look for MP capable motherboard */ 395 if (mp_capable) 396 mp_enable(boot_address); 397 else 398 panic("MP hardware not found!"); 399} 400 401 402/* 403 * Print various information about the SMP system hardware and setup. 404 */ 405void 406mp_announce(void) 407{ 408 int x; 409 410 POSTCODE(MP_ANNOUNCE_POST); 411 412 printf("FreeBSD/SMP: Multiprocessor motherboard\n"); 413 printf(" cpu0 (BSP): apic id: %2d", CPU_TO_ID(0)); 414 printf(", version: 0x%08x", cpu_apic_versions[0]); 415 printf(", at 0x%08x\n", cpu_apic_address); 416 for (x = 1; x <= mp_naps; ++x) { 417 printf(" cpu%d (AP): apic id: %2d", x, CPU_TO_ID(x)); 418 printf(", version: 0x%08x", cpu_apic_versions[x]); 419 printf(", at 0x%08x\n", cpu_apic_address); 420 } 421 422#if defined(APIC_IO) 423 for (x = 0; x < mp_napics; ++x) { 424 printf(" io%d (APIC): apic id: %2d", x, IO_TO_ID(x)); 425 printf(", version: 0x%08x", io_apic_versions[x]); 426 printf(", at 0x%08x\n", io_apic_address[x]); 427 } 428#else 429 printf(" Warning: APIC I/O disabled\n"); 430#endif /* APIC_IO */ 431} 432 433/* 434 * AP cpu's call this to sync up protected mode. 435 */ 436void 437init_secondary(void) 438{ 439 int gsel_tss; 440 int x, myid = bootAP; 441 442 gdt_segs[GPRIV_SEL].ssd_base = (int) &SMP_prvspace[myid]; 443 gdt_segs[GPROC0_SEL].ssd_base = 444 (int) &SMP_prvspace[myid].globaldata.gd_common_tss; 445 SMP_prvspace[myid].globaldata.gd_prvspace = 446 &SMP_prvspace[myid].globaldata; 447 448 for (x = 0; x < NGDT; x++) { 449 ssdtosd(&gdt_segs[x], &gdt[myid * NGDT + x].sd); 450 } 451 452 r_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1; 453 r_gdt.rd_base = (int) &gdt[myid * NGDT]; 454 lgdt(&r_gdt); /* does magic intra-segment return */ 455 456 lidt(&r_idt); 457 458 lldt(_default_ldt); 459#ifdef USER_LDT 460 PCPU_SET(currentldt, _default_ldt); 461#endif 462 463 gsel_tss = GSEL(GPROC0_SEL, SEL_KPL); 464 gdt[myid * NGDT + GPROC0_SEL].sd.sd_type = SDT_SYS386TSS; 465 PCPU_SET(common_tss.tss_esp0, 0); /* not used until after switch */ 466 PCPU_SET(common_tss.tss_ss0, GSEL(GDATA_SEL, SEL_KPL)); 467 PCPU_SET(common_tss.tss_ioopt, (sizeof (struct i386tss)) << 16); 468 PCPU_SET(tss_gdt, &gdt[myid * NGDT + GPROC0_SEL].sd); 469 PCPU_SET(common_tssd, *PCPU_GET(tss_gdt)); 470 ltr(gsel_tss); 471 472 pmap_set_opt(); 473} 474 475 476#if defined(APIC_IO) 477/* 478 * Final configuration of the BSP's local APIC: 479 * - disable 'pic mode'. 480 * - disable 'virtual wire mode'. 481 * - enable NMI. 482 */ 483void 484bsp_apic_configure(void) 485{ 486 u_char byte; 487 u_int32_t temp; 488 489 /* leave 'pic mode' if necessary */ 490 if (picmode) { 491 outb(0x22, 0x70); /* select IMCR */ 492 byte = inb(0x23); /* current contents */ 493 byte |= 0x01; /* mask external INTR */ 494 outb(0x23, byte); /* disconnect 8259s/NMI */ 495 } 496 497 /* mask lint0 (the 8259 'virtual wire' connection) */ 498 temp = lapic.lvt_lint0; 499 temp |= APIC_LVT_M; /* set the mask */ 500 lapic.lvt_lint0 = temp; 501 502 /* setup lint1 to handle NMI */ 503 temp = lapic.lvt_lint1; 504 temp &= ~APIC_LVT_M; /* clear the mask */ 505 lapic.lvt_lint1 = temp; 506 507 if (bootverbose) 508 apic_dump("bsp_apic_configure()"); 509} 510#endif /* APIC_IO */ 511 512 513/******************************************************************* 514 * local functions and data 515 */ 516 517/* 518 * start the SMP system 519 */ 520static void 521mp_enable(u_int boot_addr) 522{ 523 int x; 524#if defined(APIC_IO) 525 int apic; 526 u_int ux; 527#endif /* APIC_IO */ 528 529 POSTCODE(MP_ENABLE_POST); 530 531 /* turn on 4MB of V == P addressing so we can get to MP table */ 532 *(int *)PTD = PG_V | PG_RW | ((uintptr_t)(void *)KPTphys & PG_FRAME); 533 invltlb(); 534 535 /* examine the MP table for needed info, uses physical addresses */ 536 x = mptable_pass2(); 537 538 *(int *)PTD = 0; 539 invltlb(); 540 541 /* can't process default configs till the CPU APIC is pmapped */ 542 if (x) 543 default_mp_table(x); 544 545 /* post scan cleanup */ 546 fix_mp_table(); 547 setup_apic_irq_mapping(); 548 549#if defined(APIC_IO) 550 551 /* fill the LOGICAL io_apic_versions table */ 552 for (apic = 0; apic < mp_napics; ++apic) { 553 ux = io_apic_read(apic, IOAPIC_VER); 554 io_apic_versions[apic] = ux; 555 io_apic_set_id(apic, IO_TO_ID(apic)); 556 } 557 558 /* program each IO APIC in the system */ 559 for (apic = 0; apic < mp_napics; ++apic) 560 if (io_apic_setup(apic) < 0) 561 panic("IO APIC setup failure"); 562 563 /* install a 'Spurious INTerrupt' vector */ 564 setidt(XSPURIOUSINT_OFFSET, Xspuriousint, 565 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 566 567 /* install an inter-CPU IPI for TLB invalidation */ 568 setidt(XINVLTLB_OFFSET, Xinvltlb, 569 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 570 571#ifdef BETTER_CLOCK 572 /* install an inter-CPU IPI for reading processor state */ 573 setidt(XCPUCHECKSTATE_OFFSET, Xcpucheckstate, 574 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 575#endif 576 577 /* install an inter-CPU IPI for all-CPU rendezvous */ 578 setidt(XRENDEZVOUS_OFFSET, Xrendezvous, 579 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 580 581 /* install an inter-CPU IPI for forcing an additional software trap */ 582 setidt(XCPUAST_OFFSET, Xcpuast, 583 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 584 585#if 0 586 /* install an inter-CPU IPI for interrupt forwarding */ 587 setidt(XFORWARD_IRQ_OFFSET, Xforward_irq, 588 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 589#endif 590 591 /* install an inter-CPU IPI for CPU stop/restart */ 592 setidt(XCPUSTOP_OFFSET, Xcpustop, 593 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 594 595#if defined(TEST_TEST1) 596 /* install a "fake hardware INTerrupt" vector */ 597 setidt(XTEST1_OFFSET, Xtest1, 598 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 599#endif /** TEST_TEST1 */ 600 601#endif /* APIC_IO */ 602 603 /* initialize all SMP locks */ 604 init_locks(); 605 606 /* obtain the ap_boot_lock */ 607 s_lock(&ap_boot_lock); 608 609 /* start each Application Processor */ 610 start_all_aps(boot_addr); 611} 612 613 614/* 615 * look for the MP spec signature 616 */ 617 618/* string defined by the Intel MP Spec as identifying the MP table */ 619#define MP_SIG 0x5f504d5f /* _MP_ */ 620#define NEXT(X) ((X) += 4) 621static int 622search_for_sig(u_int32_t target, int count) 623{ 624 int x; 625 u_int32_t *addr = (u_int32_t *) (KERNBASE + target); 626 627 for (x = 0; x < count; NEXT(x)) 628 if (addr[x] == MP_SIG) 629 /* make array index a byte index */ 630 return (target + (x * sizeof(u_int32_t))); 631 632 return -1; 633} 634 635 636static basetable_entry basetable_entry_types[] = 637{ 638 {0, 20, "Processor"}, 639 {1, 8, "Bus"}, 640 {2, 8, "I/O APIC"}, 641 {3, 8, "I/O INT"}, 642 {4, 8, "Local INT"} 643}; 644 645typedef struct BUSDATA { 646 u_char bus_id; 647 enum busTypes bus_type; 648} bus_datum; 649 650typedef struct INTDATA { 651 u_char int_type; 652 u_short int_flags; 653 u_char src_bus_id; 654 u_char src_bus_irq; 655 u_char dst_apic_id; 656 u_char dst_apic_int; 657 u_char int_vector; 658} io_int, local_int; 659 660typedef struct BUSTYPENAME { 661 u_char type; 662 char name[7]; 663} bus_type_name; 664 665static bus_type_name bus_type_table[] = 666{ 667 {CBUS, "CBUS"}, 668 {CBUSII, "CBUSII"}, 669 {EISA, "EISA"}, 670 {MCA, "MCA"}, 671 {UNKNOWN_BUSTYPE, "---"}, 672 {ISA, "ISA"}, 673 {MCA, "MCA"}, 674 {UNKNOWN_BUSTYPE, "---"}, 675 {UNKNOWN_BUSTYPE, "---"}, 676 {UNKNOWN_BUSTYPE, "---"}, 677 {UNKNOWN_BUSTYPE, "---"}, 678 {UNKNOWN_BUSTYPE, "---"}, 679 {PCI, "PCI"}, 680 {UNKNOWN_BUSTYPE, "---"}, 681 {UNKNOWN_BUSTYPE, "---"}, 682 {UNKNOWN_BUSTYPE, "---"}, 683 {UNKNOWN_BUSTYPE, "---"}, 684 {XPRESS, "XPRESS"}, 685 {UNKNOWN_BUSTYPE, "---"} 686}; 687/* from MP spec v1.4, table 5-1 */ 688static int default_data[7][5] = 689{ 690/* nbus, id0, type0, id1, type1 */ 691 {1, 0, ISA, 255, 255}, 692 {1, 0, EISA, 255, 255}, 693 {1, 0, EISA, 255, 255}, 694 {1, 0, MCA, 255, 255}, 695 {2, 0, ISA, 1, PCI}, 696 {2, 0, EISA, 1, PCI}, 697 {2, 0, MCA, 1, PCI} 698}; 699 700 701/* the bus data */ 702static bus_datum *bus_data; 703 704/* the IO INT data, one entry per possible APIC INTerrupt */ 705static io_int *io_apic_ints; 706 707static int nintrs; 708 709static int processor_entry __P((proc_entry_ptr entry, int cpu)); 710static int bus_entry __P((bus_entry_ptr entry, int bus)); 711static int io_apic_entry __P((io_apic_entry_ptr entry, int apic)); 712static int int_entry __P((int_entry_ptr entry, int intr)); 713static int lookup_bus_type __P((char *name)); 714 715 716/* 717 * 1st pass on motherboard's Intel MP specification table. 718 * 719 * initializes: 720 * mp_ncpus = 1 721 * 722 * determines: 723 * cpu_apic_address (common to all CPUs) 724 * io_apic_address[N] 725 * mp_naps 726 * mp_nbusses 727 * mp_napics 728 * nintrs 729 */ 730static void 731mptable_pass1(void) 732{ 733 int x; 734 mpcth_t cth; 735 int totalSize; 736 void* position; 737 int count; 738 int type; 739 740 POSTCODE(MPTABLE_PASS1_POST); 741 742 /* clear various tables */ 743 for (x = 0; x < NAPICID; ++x) { 744 io_apic_address[x] = ~0; /* IO APIC address table */ 745 } 746 747 /* init everything to empty */ 748 mp_naps = 0; 749 mp_nbusses = 0; 750 mp_napics = 0; 751 nintrs = 0; 752 753 /* check for use of 'default' configuration */ 754 if (MPFPS_MPFB1 != 0) { 755 /* use default addresses */ 756 cpu_apic_address = DEFAULT_APIC_BASE; 757 io_apic_address[0] = DEFAULT_IO_APIC_BASE; 758 759 /* fill in with defaults */ 760 mp_naps = 2; /* includes BSP */ 761 mp_nbusses = default_data[MPFPS_MPFB1 - 1][0]; 762#if defined(APIC_IO) 763 mp_napics = 1; 764 nintrs = 16; 765#endif /* APIC_IO */ 766 } 767 else { 768 if ((cth = mpfps->pap) == 0) 769 panic("MP Configuration Table Header MISSING!"); 770 771 cpu_apic_address = (vm_offset_t) cth->apic_address; 772 773 /* walk the table, recording info of interest */ 774 totalSize = cth->base_table_length - sizeof(struct MPCTH); 775 position = (u_char *) cth + sizeof(struct MPCTH); 776 count = cth->entry_count; 777 778 while (count--) { 779 switch (type = *(u_char *) position) { 780 case 0: /* processor_entry */ 781 if (((proc_entry_ptr)position)->cpu_flags 782 & PROCENTRY_FLAG_EN) 783 ++mp_naps; 784 break; 785 case 1: /* bus_entry */ 786 ++mp_nbusses; 787 break; 788 case 2: /* io_apic_entry */ 789 if (((io_apic_entry_ptr)position)->apic_flags 790 & IOAPICENTRY_FLAG_EN) 791 io_apic_address[mp_napics++] = 792 (vm_offset_t)((io_apic_entry_ptr) 793 position)->apic_address; 794 break; 795 case 3: /* int_entry */ 796 ++nintrs; 797 break; 798 case 4: /* int_entry */ 799 break; 800 default: 801 panic("mpfps Base Table HOSED!"); 802 /* NOTREACHED */ 803 } 804 805 totalSize -= basetable_entry_types[type].length; 806 (u_char*)position += basetable_entry_types[type].length; 807 } 808 } 809 810 /* qualify the numbers */ 811 if (mp_naps > MAXCPU) { 812 printf("Warning: only using %d of %d available CPUs!\n", 813 MAXCPU, mp_naps); 814 mp_naps = MAXCPU; 815 } 816 817 /* 818 * Count the BSP. 819 * This is also used as a counter while starting the APs. 820 */ 821 mp_ncpus = 1; 822 823 --mp_naps; /* subtract the BSP */ 824} 825 826 827/* 828 * 2nd pass on motherboard's Intel MP specification table. 829 * 830 * sets: 831 * boot_cpu_id 832 * ID_TO_IO(N), phy APIC ID to log CPU/IO table 833 * CPU_TO_ID(N), logical CPU to APIC ID table 834 * IO_TO_ID(N), logical IO to APIC ID table 835 * bus_data[N] 836 * io_apic_ints[N] 837 */ 838static int 839mptable_pass2(void) 840{ 841 int x; 842 mpcth_t cth; 843 int totalSize; 844 void* position; 845 int count; 846 int type; 847 int apic, bus, cpu, intr; 848 int i, j; 849 int pgeflag; 850 851 POSTCODE(MPTABLE_PASS2_POST); 852 853 pgeflag = 0; /* XXX - Not used under SMP yet. */ 854 855 MALLOC(io_apic_versions, u_int32_t *, sizeof(u_int32_t) * mp_napics, 856 M_DEVBUF, M_WAITOK); 857 MALLOC(ioapic, volatile ioapic_t **, sizeof(ioapic_t *) * mp_napics, 858 M_DEVBUF, M_WAITOK); 859 MALLOC(io_apic_ints, io_int *, sizeof(io_int) * (nintrs + 1), 860 M_DEVBUF, M_WAITOK); 861 MALLOC(bus_data, bus_datum *, sizeof(bus_datum) * mp_nbusses, 862 M_DEVBUF, M_WAITOK); 863 864 bzero(ioapic, sizeof(ioapic_t *) * mp_napics); 865 866 for (i = 0; i < mp_napics; i++) { 867 for (j = 0; j < mp_napics; j++) { 868 /* same page frame as a previous IO apic? */ 869 if (((vm_offset_t)SMPpt[NPTEPG-2-j] & PG_FRAME) == 870 (io_apic_address[i] & PG_FRAME)) { 871 ioapic[i] = (ioapic_t *)((u_int)SMP_prvspace 872 + (NPTEPG-2-j) * PAGE_SIZE 873 + (io_apic_address[i] & PAGE_MASK)); 874 break; 875 } 876 /* use this slot if available */ 877 if (((vm_offset_t)SMPpt[NPTEPG-2-j] & PG_FRAME) == 0) { 878 SMPpt[NPTEPG-2-j] = (pt_entry_t)(PG_V | PG_RW | 879 pgeflag | (io_apic_address[i] & PG_FRAME)); 880 ioapic[i] = (ioapic_t *)((u_int)SMP_prvspace 881 + (NPTEPG-2-j) * PAGE_SIZE 882 + (io_apic_address[i] & PAGE_MASK)); 883 break; 884 } 885 } 886 } 887 888 /* clear various tables */ 889 for (x = 0; x < NAPICID; ++x) { 890 ID_TO_IO(x) = -1; /* phy APIC ID to log CPU/IO table */ 891 CPU_TO_ID(x) = -1; /* logical CPU to APIC ID table */ 892 IO_TO_ID(x) = -1; /* logical IO to APIC ID table */ 893 } 894 895 /* clear bus data table */ 896 for (x = 0; x < mp_nbusses; ++x) 897 bus_data[x].bus_id = 0xff; 898 899 /* clear IO APIC INT table */ 900 for (x = 0; x < (nintrs + 1); ++x) { 901 io_apic_ints[x].int_type = 0xff; 902 io_apic_ints[x].int_vector = 0xff; 903 } 904 905 /* setup the cpu/apic mapping arrays */ 906 boot_cpu_id = -1; 907 908 /* record whether PIC or virtual-wire mode */ 909 picmode = (mpfps->mpfb2 & 0x80) ? 1 : 0; 910 911 /* check for use of 'default' configuration */ 912 if (MPFPS_MPFB1 != 0) 913 return MPFPS_MPFB1; /* return default configuration type */ 914 915 if ((cth = mpfps->pap) == 0) 916 panic("MP Configuration Table Header MISSING!"); 917 918 /* walk the table, recording info of interest */ 919 totalSize = cth->base_table_length - sizeof(struct MPCTH); 920 position = (u_char *) cth + sizeof(struct MPCTH); 921 count = cth->entry_count; 922 apic = bus = intr = 0; 923 cpu = 1; /* pre-count the BSP */ 924 925 while (count--) { 926 switch (type = *(u_char *) position) { 927 case 0: 928 if (processor_entry(position, cpu)) 929 ++cpu; 930 break; 931 case 1: 932 if (bus_entry(position, bus)) 933 ++bus; 934 break; 935 case 2: 936 if (io_apic_entry(position, apic)) 937 ++apic; 938 break; 939 case 3: 940 if (int_entry(position, intr)) 941 ++intr; 942 break; 943 case 4: 944 /* int_entry(position); */ 945 break; 946 default: 947 panic("mpfps Base Table HOSED!"); 948 /* NOTREACHED */ 949 } 950 951 totalSize -= basetable_entry_types[type].length; 952 (u_char *) position += basetable_entry_types[type].length; 953 } 954 955 if (boot_cpu_id == -1) 956 panic("NO BSP found!"); 957 958 /* report fact that its NOT a default configuration */ 959 return 0; 960} 961 962 963void 964assign_apic_irq(int apic, int intpin, int irq) 965{ 966 int x; 967 968 if (int_to_apicintpin[irq].ioapic != -1) 969 panic("assign_apic_irq: inconsistent table"); 970 971 int_to_apicintpin[irq].ioapic = apic; 972 int_to_apicintpin[irq].int_pin = intpin; 973 int_to_apicintpin[irq].apic_address = ioapic[apic]; 974 int_to_apicintpin[irq].redirindex = IOAPIC_REDTBL + 2 * intpin; 975 976 for (x = 0; x < nintrs; x++) { 977 if ((io_apic_ints[x].int_type == 0 || 978 io_apic_ints[x].int_type == 3) && 979 io_apic_ints[x].int_vector == 0xff && 980 io_apic_ints[x].dst_apic_id == IO_TO_ID(apic) && 981 io_apic_ints[x].dst_apic_int == intpin) 982 io_apic_ints[x].int_vector = irq; 983 } 984} 985 986void 987revoke_apic_irq(int irq) 988{ 989 int x; 990 int oldapic; 991 int oldintpin; 992 993 if (int_to_apicintpin[irq].ioapic == -1) 994 panic("assign_apic_irq: inconsistent table"); 995 996 oldapic = int_to_apicintpin[irq].ioapic; 997 oldintpin = int_to_apicintpin[irq].int_pin; 998 999 int_to_apicintpin[irq].ioapic = -1; 1000 int_to_apicintpin[irq].int_pin = 0; 1001 int_to_apicintpin[irq].apic_address = NULL; 1002 int_to_apicintpin[irq].redirindex = 0; 1003 1004 for (x = 0; x < nintrs; x++) { 1005 if ((io_apic_ints[x].int_type == 0 || 1006 io_apic_ints[x].int_type == 3) && 1007 io_apic_ints[x].int_vector == 0xff && 1008 io_apic_ints[x].dst_apic_id == IO_TO_ID(oldapic) && 1009 io_apic_ints[x].dst_apic_int == oldintpin) 1010 io_apic_ints[x].int_vector = 0xff; 1011 } 1012} 1013 1014 1015 1016static void 1017swap_apic_id(int apic, int oldid, int newid) 1018{ 1019 int x; 1020 int oapic; 1021 1022 1023 if (oldid == newid) 1024 return; /* Nothing to do */ 1025 1026 printf("Changing APIC ID for IO APIC #%d from %d to %d in MP table\n", 1027 apic, oldid, newid); 1028 1029 /* Swap physical APIC IDs in interrupt entries */ 1030 for (x = 0; x < nintrs; x++) { 1031 if (io_apic_ints[x].dst_apic_id == oldid) 1032 io_apic_ints[x].dst_apic_id = newid; 1033 else if (io_apic_ints[x].dst_apic_id == newid) 1034 io_apic_ints[x].dst_apic_id = oldid; 1035 } 1036 1037 /* Swap physical APIC IDs in IO_TO_ID mappings */ 1038 for (oapic = 0; oapic < mp_napics; oapic++) 1039 if (IO_TO_ID(oapic) == newid) 1040 break; 1041 1042 if (oapic < mp_napics) { 1043 printf("Changing APIC ID for IO APIC #%d from " 1044 "%d to %d in MP table\n", 1045 oapic, newid, oldid); 1046 IO_TO_ID(oapic) = oldid; 1047 } 1048 IO_TO_ID(apic) = newid; 1049} 1050 1051 1052static void 1053fix_id_to_io_mapping(void) 1054{ 1055 int x; 1056 1057 for (x = 0; x < NAPICID; x++) 1058 ID_TO_IO(x) = -1; 1059 1060 for (x = 0; x <= mp_naps; x++) 1061 if (CPU_TO_ID(x) < NAPICID) 1062 ID_TO_IO(CPU_TO_ID(x)) = x; 1063 1064 for (x = 0; x < mp_napics; x++) 1065 if (IO_TO_ID(x) < NAPICID) 1066 ID_TO_IO(IO_TO_ID(x)) = x; 1067} 1068 1069 1070static int 1071first_free_apic_id(void) 1072{ 1073 int freeid, x; 1074 1075 for (freeid = 0; freeid < NAPICID; freeid++) { 1076 for (x = 0; x <= mp_naps; x++) 1077 if (CPU_TO_ID(x) == freeid) 1078 break; 1079 if (x <= mp_naps) 1080 continue; 1081 for (x = 0; x < mp_napics; x++) 1082 if (IO_TO_ID(x) == freeid) 1083 break; 1084 if (x < mp_napics) 1085 continue; 1086 return freeid; 1087 } 1088 return freeid; 1089} 1090 1091 1092static int 1093io_apic_id_acceptable(int apic, int id) 1094{ 1095 int cpu; /* Logical CPU number */ 1096 int oapic; /* Logical IO APIC number for other IO APIC */ 1097 1098 if (id >= NAPICID) 1099 return 0; /* Out of range */ 1100 1101 for (cpu = 0; cpu <= mp_naps; cpu++) 1102 if (CPU_TO_ID(cpu) == id) 1103 return 0; /* Conflict with CPU */ 1104 1105 for (oapic = 0; oapic < mp_napics && oapic < apic; oapic++) 1106 if (IO_TO_ID(oapic) == id) 1107 return 0; /* Conflict with other APIC */ 1108 1109 return 1; /* ID is acceptable for IO APIC */ 1110} 1111 1112 1113/* 1114 * parse an Intel MP specification table 1115 */ 1116static void 1117fix_mp_table(void) 1118{ 1119 int x; 1120 int id; 1121 int bus_0 = 0; /* Stop GCC warning */ 1122 int bus_pci = 0; /* Stop GCC warning */ 1123 int num_pci_bus; 1124 int apic; /* IO APIC unit number */ 1125 int freeid; /* Free physical APIC ID */ 1126 int physid; /* Current physical IO APIC ID */ 1127 1128 /* 1129 * Fix mis-numbering of the PCI bus and its INT entries if the BIOS 1130 * did it wrong. The MP spec says that when more than 1 PCI bus 1131 * exists the BIOS must begin with bus entries for the PCI bus and use 1132 * actual PCI bus numbering. This implies that when only 1 PCI bus 1133 * exists the BIOS can choose to ignore this ordering, and indeed many 1134 * MP motherboards do ignore it. This causes a problem when the PCI 1135 * sub-system makes requests of the MP sub-system based on PCI bus 1136 * numbers. So here we look for the situation and renumber the 1137 * busses and associated INTs in an effort to "make it right". 1138 */ 1139 1140 /* find bus 0, PCI bus, count the number of PCI busses */ 1141 for (num_pci_bus = 0, x = 0; x < mp_nbusses; ++x) { 1142 if (bus_data[x].bus_id == 0) { 1143 bus_0 = x; 1144 } 1145 if (bus_data[x].bus_type == PCI) { 1146 ++num_pci_bus; 1147 bus_pci = x; 1148 } 1149 } 1150 /* 1151 * bus_0 == slot of bus with ID of 0 1152 * bus_pci == slot of last PCI bus encountered 1153 */ 1154 1155 /* check the 1 PCI bus case for sanity */ 1156 /* if it is number 0 all is well */ 1157 if (num_pci_bus == 1 && 1158 bus_data[bus_pci].bus_id != 0) { 1159 1160 /* mis-numbered, swap with whichever bus uses slot 0 */ 1161 1162 /* swap the bus entry types */ 1163 bus_data[bus_pci].bus_type = bus_data[bus_0].bus_type; 1164 bus_data[bus_0].bus_type = PCI; 1165 1166 /* swap each relavant INTerrupt entry */ 1167 id = bus_data[bus_pci].bus_id; 1168 for (x = 0; x < nintrs; ++x) { 1169 if (io_apic_ints[x].src_bus_id == id) { 1170 io_apic_ints[x].src_bus_id = 0; 1171 } 1172 else if (io_apic_ints[x].src_bus_id == 0) { 1173 io_apic_ints[x].src_bus_id = id; 1174 } 1175 } 1176 } 1177 1178 /* Assign IO APIC IDs. 1179 * 1180 * First try the existing ID. If a conflict is detected, try 1181 * the ID in the MP table. If a conflict is still detected, find 1182 * a free id. 1183 * 1184 * We cannot use the ID_TO_IO table before all conflicts has been 1185 * resolved and the table has been corrected. 1186 */ 1187 for (apic = 0; apic < mp_napics; ++apic) { /* For all IO APICs */ 1188 1189 /* First try to use the value set by the BIOS */ 1190 physid = io_apic_get_id(apic); 1191 if (io_apic_id_acceptable(apic, physid)) { 1192 if (IO_TO_ID(apic) != physid) 1193 swap_apic_id(apic, IO_TO_ID(apic), physid); 1194 continue; 1195 } 1196 1197 /* Then check if the value in the MP table is acceptable */ 1198 if (io_apic_id_acceptable(apic, IO_TO_ID(apic))) 1199 continue; 1200 1201 /* Last resort, find a free APIC ID and use it */ 1202 freeid = first_free_apic_id(); 1203 if (freeid >= NAPICID) 1204 panic("No free physical APIC IDs found"); 1205 1206 if (io_apic_id_acceptable(apic, freeid)) { 1207 swap_apic_id(apic, IO_TO_ID(apic), freeid); 1208 continue; 1209 } 1210 panic("Free physical APIC ID not usable"); 1211 } 1212 fix_id_to_io_mapping(); 1213 1214 /* detect and fix broken Compaq MP table */ 1215 if (apic_int_type(0, 0) == -1) { 1216 printf("APIC_IO: MP table broken: 8259->APIC entry missing!\n"); 1217 io_apic_ints[nintrs].int_type = 3; /* ExtInt */ 1218 io_apic_ints[nintrs].int_vector = 0xff; /* Unassigned */ 1219 /* XXX fixme, set src bus id etc, but it doesn't seem to hurt */ 1220 io_apic_ints[nintrs].dst_apic_id = IO_TO_ID(0); 1221 io_apic_ints[nintrs].dst_apic_int = 0; /* Pin 0 */ 1222 nintrs++; 1223 } 1224} 1225 1226 1227/* Assign low level interrupt handlers */ 1228static void 1229setup_apic_irq_mapping(void) 1230{ 1231 int x; 1232 int int_vector; 1233 1234 /* Clear array */ 1235 for (x = 0; x < APIC_INTMAPSIZE; x++) { 1236 int_to_apicintpin[x].ioapic = -1; 1237 int_to_apicintpin[x].int_pin = 0; 1238 int_to_apicintpin[x].apic_address = NULL; 1239 int_to_apicintpin[x].redirindex = 0; 1240 } 1241 1242 /* First assign ISA/EISA interrupts */ 1243 for (x = 0; x < nintrs; x++) { 1244 int_vector = io_apic_ints[x].src_bus_irq; 1245 if (int_vector < APIC_INTMAPSIZE && 1246 io_apic_ints[x].int_vector == 0xff && 1247 int_to_apicintpin[int_vector].ioapic == -1 && 1248 (apic_int_is_bus_type(x, ISA) || 1249 apic_int_is_bus_type(x, EISA)) && 1250 io_apic_ints[x].int_type == 0) { 1251 assign_apic_irq(ID_TO_IO(io_apic_ints[x].dst_apic_id), 1252 io_apic_ints[x].dst_apic_int, 1253 int_vector); 1254 } 1255 } 1256 1257 /* Assign first set of interrupts to intpins on IOAPIC #0 */ 1258 for (x = 0; x < nintrs; x++) { 1259 int_vector = io_apic_ints[x].dst_apic_int; 1260 if (int_vector < APIC_INTMAPSIZE && 1261 io_apic_ints[x].dst_apic_id == IO_TO_ID(0) && 1262 io_apic_ints[x].int_vector == 0xff && 1263 int_to_apicintpin[int_vector].ioapic == -1 && 1264 (io_apic_ints[x].int_type == 0 || 1265 io_apic_ints[x].int_type == 3)) { 1266 assign_apic_irq(0, 1267 io_apic_ints[x].dst_apic_int, 1268 int_vector); 1269 } 1270 } 1271 /* 1272 * Assign interrupts for remaining intpins. 1273 * Skip IOAPIC #0 intpin 0 if the type is ExtInt, since this indicates 1274 * that an entry for ISA/EISA irq 0 exist, and a fallback to mixed mode 1275 * due to 8254 interrupts not being delivered can reuse that low level 1276 * interrupt handler. 1277 */ 1278 int_vector = 0; 1279 while (int_vector < APIC_INTMAPSIZE && 1280 int_to_apicintpin[int_vector].ioapic != -1) 1281 int_vector++; 1282 for (x = 0; x < nintrs && int_vector < APIC_INTMAPSIZE; x++) { 1283 if ((io_apic_ints[x].int_type == 0 || 1284 (io_apic_ints[x].int_type == 3 && 1285 (io_apic_ints[x].dst_apic_id != IO_TO_ID(0) || 1286 io_apic_ints[x].dst_apic_int != 0))) && 1287 io_apic_ints[x].int_vector == 0xff) { 1288 assign_apic_irq(ID_TO_IO(io_apic_ints[x].dst_apic_id), 1289 io_apic_ints[x].dst_apic_int, 1290 int_vector); 1291 int_vector++; 1292 while (int_vector < APIC_INTMAPSIZE && 1293 int_to_apicintpin[int_vector].ioapic != -1) 1294 int_vector++; 1295 } 1296 } 1297} 1298 1299 1300static int 1301processor_entry(proc_entry_ptr entry, int cpu) 1302{ 1303 /* check for usability */ 1304 if (!(entry->cpu_flags & PROCENTRY_FLAG_EN)) 1305 return 0; 1306 1307 if(entry->apic_id >= NAPICID) 1308 panic("CPU APIC ID out of range (0..%d)", NAPICID - 1); 1309 /* check for BSP flag */ 1310 if (entry->cpu_flags & PROCENTRY_FLAG_BP) { 1311 boot_cpu_id = entry->apic_id; 1312 CPU_TO_ID(0) = entry->apic_id; 1313 ID_TO_CPU(entry->apic_id) = 0; 1314 return 0; /* its already been counted */ 1315 } 1316 1317 /* add another AP to list, if less than max number of CPUs */ 1318 else if (cpu < MAXCPU) { 1319 CPU_TO_ID(cpu) = entry->apic_id; 1320 ID_TO_CPU(entry->apic_id) = cpu; 1321 return 1; 1322 } 1323 1324 return 0; 1325} 1326 1327 1328static int 1329bus_entry(bus_entry_ptr entry, int bus) 1330{ 1331 int x; 1332 char c, name[8]; 1333 1334 /* encode the name into an index */ 1335 for (x = 0; x < 6; ++x) { 1336 if ((c = entry->bus_type[x]) == ' ') 1337 break; 1338 name[x] = c; 1339 } 1340 name[x] = '\0'; 1341 1342 if ((x = lookup_bus_type(name)) == UNKNOWN_BUSTYPE) 1343 panic("unknown bus type: '%s'", name); 1344 1345 bus_data[bus].bus_id = entry->bus_id; 1346 bus_data[bus].bus_type = x; 1347 1348 return 1; 1349} 1350 1351 1352static int 1353io_apic_entry(io_apic_entry_ptr entry, int apic) 1354{ 1355 if (!(entry->apic_flags & IOAPICENTRY_FLAG_EN)) 1356 return 0; 1357 1358 IO_TO_ID(apic) = entry->apic_id; 1359 if (entry->apic_id < NAPICID) 1360 ID_TO_IO(entry->apic_id) = apic; 1361 1362 return 1; 1363} 1364 1365 1366static int 1367lookup_bus_type(char *name) 1368{ 1369 int x; 1370 1371 for (x = 0; x < MAX_BUSTYPE; ++x) 1372 if (strcmp(bus_type_table[x].name, name) == 0) 1373 return bus_type_table[x].type; 1374 1375 return UNKNOWN_BUSTYPE; 1376} 1377 1378 1379static int 1380int_entry(int_entry_ptr entry, int intr) 1381{ 1382 int apic; 1383 1384 io_apic_ints[intr].int_type = entry->int_type; 1385 io_apic_ints[intr].int_flags = entry->int_flags; 1386 io_apic_ints[intr].src_bus_id = entry->src_bus_id; 1387 io_apic_ints[intr].src_bus_irq = entry->src_bus_irq; 1388 if (entry->dst_apic_id == 255) { 1389 /* This signal goes to all IO APICS. Select an IO APIC 1390 with sufficient number of interrupt pins */ 1391 for (apic = 0; apic < mp_napics; apic++) 1392 if (((io_apic_read(apic, IOAPIC_VER) & 1393 IOART_VER_MAXREDIR) >> MAXREDIRSHIFT) >= 1394 entry->dst_apic_int) 1395 break; 1396 if (apic < mp_napics) 1397 io_apic_ints[intr].dst_apic_id = IO_TO_ID(apic); 1398 else 1399 io_apic_ints[intr].dst_apic_id = entry->dst_apic_id; 1400 } else 1401 io_apic_ints[intr].dst_apic_id = entry->dst_apic_id; 1402 io_apic_ints[intr].dst_apic_int = entry->dst_apic_int; 1403 1404 return 1; 1405} 1406 1407 1408static int 1409apic_int_is_bus_type(int intr, int bus_type) 1410{ 1411 int bus; 1412 1413 for (bus = 0; bus < mp_nbusses; ++bus) 1414 if ((bus_data[bus].bus_id == io_apic_ints[intr].src_bus_id) 1415 && ((int) bus_data[bus].bus_type == bus_type)) 1416 return 1; 1417 1418 return 0; 1419} 1420 1421 1422/* 1423 * Given a traditional ISA INT mask, return an APIC mask. 1424 */ 1425u_int 1426isa_apic_mask(u_int isa_mask) 1427{ 1428 int isa_irq; 1429 int apic_pin; 1430 1431#if defined(SKIP_IRQ15_REDIRECT) 1432 if (isa_mask == (1 << 15)) { 1433 printf("skipping ISA IRQ15 redirect\n"); 1434 return isa_mask; 1435 } 1436#endif /* SKIP_IRQ15_REDIRECT */ 1437 1438 isa_irq = ffs(isa_mask); /* find its bit position */ 1439 if (isa_irq == 0) /* doesn't exist */ 1440 return 0; 1441 --isa_irq; /* make it zero based */ 1442 1443 apic_pin = isa_apic_irq(isa_irq); /* look for APIC connection */ 1444 if (apic_pin == -1) 1445 return 0; 1446 1447 return (1 << apic_pin); /* convert pin# to a mask */ 1448} 1449 1450 1451/* 1452 * Determine which APIC pin an ISA/EISA INT is attached to. 1453 */ 1454#define INTTYPE(I) (io_apic_ints[(I)].int_type) 1455#define INTPIN(I) (io_apic_ints[(I)].dst_apic_int) 1456#define INTIRQ(I) (io_apic_ints[(I)].int_vector) 1457#define INTAPIC(I) (ID_TO_IO(io_apic_ints[(I)].dst_apic_id)) 1458 1459#define SRCBUSIRQ(I) (io_apic_ints[(I)].src_bus_irq) 1460int 1461isa_apic_irq(int isa_irq) 1462{ 1463 int intr; 1464 1465 for (intr = 0; intr < nintrs; ++intr) { /* check each record */ 1466 if (INTTYPE(intr) == 0) { /* standard INT */ 1467 if (SRCBUSIRQ(intr) == isa_irq) { 1468 if (apic_int_is_bus_type(intr, ISA) || 1469 apic_int_is_bus_type(intr, EISA)) 1470 return INTIRQ(intr); /* found */ 1471 } 1472 } 1473 } 1474 return -1; /* NOT found */ 1475} 1476 1477 1478/* 1479 * Determine which APIC pin a PCI INT is attached to. 1480 */ 1481#define SRCBUSID(I) (io_apic_ints[(I)].src_bus_id) 1482#define SRCBUSDEVICE(I) ((io_apic_ints[(I)].src_bus_irq >> 2) & 0x1f) 1483#define SRCBUSLINE(I) (io_apic_ints[(I)].src_bus_irq & 0x03) 1484int 1485pci_apic_irq(int pciBus, int pciDevice, int pciInt) 1486{ 1487 int intr; 1488 1489 --pciInt; /* zero based */ 1490 1491 for (intr = 0; intr < nintrs; ++intr) /* check each record */ 1492 if ((INTTYPE(intr) == 0) /* standard INT */ 1493 && (SRCBUSID(intr) == pciBus) 1494 && (SRCBUSDEVICE(intr) == pciDevice) 1495 && (SRCBUSLINE(intr) == pciInt)) /* a candidate IRQ */ 1496 if (apic_int_is_bus_type(intr, PCI)) 1497 return INTIRQ(intr); /* exact match */ 1498 1499 return -1; /* NOT found */ 1500} 1501 1502int 1503next_apic_irq(int irq) 1504{ 1505 int intr, ointr; 1506 int bus, bustype; 1507 1508 bus = 0; 1509 bustype = 0; 1510 for (intr = 0; intr < nintrs; intr++) { 1511 if (INTIRQ(intr) != irq || INTTYPE(intr) != 0) 1512 continue; 1513 bus = SRCBUSID(intr); 1514 bustype = apic_bus_type(bus); 1515 if (bustype != ISA && 1516 bustype != EISA && 1517 bustype != PCI) 1518 continue; 1519 break; 1520 } 1521 if (intr >= nintrs) { 1522 return -1; 1523 } 1524 for (ointr = intr + 1; ointr < nintrs; ointr++) { 1525 if (INTTYPE(ointr) != 0) 1526 continue; 1527 if (bus != SRCBUSID(ointr)) 1528 continue; 1529 if (bustype == PCI) { 1530 if (SRCBUSDEVICE(intr) != SRCBUSDEVICE(ointr)) 1531 continue; 1532 if (SRCBUSLINE(intr) != SRCBUSLINE(ointr)) 1533 continue; 1534 } 1535 if (bustype == ISA || bustype == EISA) { 1536 if (SRCBUSIRQ(intr) != SRCBUSIRQ(ointr)) 1537 continue; 1538 } 1539 if (INTPIN(intr) == INTPIN(ointr)) 1540 continue; 1541 break; 1542 } 1543 if (ointr >= nintrs) { 1544 return -1; 1545 } 1546 return INTIRQ(ointr); 1547} 1548#undef SRCBUSLINE 1549#undef SRCBUSDEVICE 1550#undef SRCBUSID 1551#undef SRCBUSIRQ 1552 1553#undef INTPIN 1554#undef INTIRQ 1555#undef INTAPIC 1556#undef INTTYPE 1557 1558 1559/* 1560 * Reprogram the MB chipset to NOT redirect an ISA INTerrupt. 1561 * 1562 * XXX FIXME: 1563 * Exactly what this means is unclear at this point. It is a solution 1564 * for motherboards that redirect the MBIRQ0 pin. Generically a motherboard 1565 * could route any of the ISA INTs to upper (>15) IRQ values. But most would 1566 * NOT be redirected via MBIRQ0, thus "undirect()ing" them would NOT be an 1567 * option. 1568 */ 1569int 1570undirect_isa_irq(int rirq) 1571{ 1572#if defined(READY) 1573 if (bootverbose) 1574 printf("Freeing redirected ISA irq %d.\n", rirq); 1575 /** FIXME: tickle the MB redirector chip */ 1576 return -1; 1577#else 1578 if (bootverbose) 1579 printf("Freeing (NOT implemented) redirected ISA irq %d.\n", rirq); 1580 return 0; 1581#endif /* READY */ 1582} 1583 1584 1585/* 1586 * Reprogram the MB chipset to NOT redirect a PCI INTerrupt 1587 */ 1588int 1589undirect_pci_irq(int rirq) 1590{ 1591#if defined(READY) 1592 if (bootverbose) 1593 printf("Freeing redirected PCI irq %d.\n", rirq); 1594 1595 /** FIXME: tickle the MB redirector chip */ 1596 return -1; 1597#else 1598 if (bootverbose) 1599 printf("Freeing (NOT implemented) redirected PCI irq %d.\n", 1600 rirq); 1601 return 0; 1602#endif /* READY */ 1603} 1604 1605 1606/* 1607 * given a bus ID, return: 1608 * the bus type if found 1609 * -1 if NOT found 1610 */ 1611int 1612apic_bus_type(int id) 1613{ 1614 int x; 1615 1616 for (x = 0; x < mp_nbusses; ++x) 1617 if (bus_data[x].bus_id == id) 1618 return bus_data[x].bus_type; 1619 1620 return -1; 1621} 1622 1623 1624/* 1625 * given a LOGICAL APIC# and pin#, return: 1626 * the associated src bus ID if found 1627 * -1 if NOT found 1628 */ 1629int 1630apic_src_bus_id(int apic, int pin) 1631{ 1632 int x; 1633 1634 /* search each of the possible INTerrupt sources */ 1635 for (x = 0; x < nintrs; ++x) 1636 if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) && 1637 (pin == io_apic_ints[x].dst_apic_int)) 1638 return (io_apic_ints[x].src_bus_id); 1639 1640 return -1; /* NOT found */ 1641} 1642 1643 1644/* 1645 * given a LOGICAL APIC# and pin#, return: 1646 * the associated src bus IRQ if found 1647 * -1 if NOT found 1648 */ 1649int 1650apic_src_bus_irq(int apic, int pin) 1651{ 1652 int x; 1653 1654 for (x = 0; x < nintrs; x++) 1655 if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) && 1656 (pin == io_apic_ints[x].dst_apic_int)) 1657 return (io_apic_ints[x].src_bus_irq); 1658 1659 return -1; /* NOT found */ 1660} 1661 1662 1663/* 1664 * given a LOGICAL APIC# and pin#, return: 1665 * the associated INTerrupt type if found 1666 * -1 if NOT found 1667 */ 1668int 1669apic_int_type(int apic, int pin) 1670{ 1671 int x; 1672 1673 /* search each of the possible INTerrupt sources */ 1674 for (x = 0; x < nintrs; ++x) 1675 if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) && 1676 (pin == io_apic_ints[x].dst_apic_int)) 1677 return (io_apic_ints[x].int_type); 1678 1679 return -1; /* NOT found */ 1680} 1681 1682int 1683apic_irq(int apic, int pin) 1684{ 1685 int x; 1686 int res; 1687 1688 for (x = 0; x < nintrs; ++x) 1689 if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) && 1690 (pin == io_apic_ints[x].dst_apic_int)) { 1691 res = io_apic_ints[x].int_vector; 1692 if (res == 0xff) 1693 return -1; 1694 if (apic != int_to_apicintpin[res].ioapic) 1695 panic("apic_irq: inconsistent table"); 1696 if (pin != int_to_apicintpin[res].int_pin) 1697 panic("apic_irq inconsistent table (2)"); 1698 return res; 1699 } 1700 return -1; 1701} 1702 1703 1704/* 1705 * given a LOGICAL APIC# and pin#, return: 1706 * the associated trigger mode if found 1707 * -1 if NOT found 1708 */ 1709int 1710apic_trigger(int apic, int pin) 1711{ 1712 int x; 1713 1714 /* search each of the possible INTerrupt sources */ 1715 for (x = 0; x < nintrs; ++x) 1716 if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) && 1717 (pin == io_apic_ints[x].dst_apic_int)) 1718 return ((io_apic_ints[x].int_flags >> 2) & 0x03); 1719 1720 return -1; /* NOT found */ 1721} 1722 1723 1724/* 1725 * given a LOGICAL APIC# and pin#, return: 1726 * the associated 'active' level if found 1727 * -1 if NOT found 1728 */ 1729int 1730apic_polarity(int apic, int pin) 1731{ 1732 int x; 1733 1734 /* search each of the possible INTerrupt sources */ 1735 for (x = 0; x < nintrs; ++x) 1736 if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) && 1737 (pin == io_apic_ints[x].dst_apic_int)) 1738 return (io_apic_ints[x].int_flags & 0x03); 1739 1740 return -1; /* NOT found */ 1741} 1742 1743 1744/* 1745 * set data according to MP defaults 1746 * FIXME: probably not complete yet... 1747 */ 1748static void 1749default_mp_table(int type) 1750{ 1751 int ap_cpu_id; 1752#if defined(APIC_IO) 1753 int io_apic_id; 1754 int pin; 1755#endif /* APIC_IO */ 1756 1757#if 0 1758 printf(" MP default config type: %d\n", type); 1759 switch (type) { 1760 case 1: 1761 printf(" bus: ISA, APIC: 82489DX\n"); 1762 break; 1763 case 2: 1764 printf(" bus: EISA, APIC: 82489DX\n"); 1765 break; 1766 case 3: 1767 printf(" bus: EISA, APIC: 82489DX\n"); 1768 break; 1769 case 4: 1770 printf(" bus: MCA, APIC: 82489DX\n"); 1771 break; 1772 case 5: 1773 printf(" bus: ISA+PCI, APIC: Integrated\n"); 1774 break; 1775 case 6: 1776 printf(" bus: EISA+PCI, APIC: Integrated\n"); 1777 break; 1778 case 7: 1779 printf(" bus: MCA+PCI, APIC: Integrated\n"); 1780 break; 1781 default: 1782 printf(" future type\n"); 1783 break; 1784 /* NOTREACHED */ 1785 } 1786#endif /* 0 */ 1787 1788 boot_cpu_id = (lapic.id & APIC_ID_MASK) >> 24; 1789 ap_cpu_id = (boot_cpu_id == 0) ? 1 : 0; 1790 1791 /* BSP */ 1792 CPU_TO_ID(0) = boot_cpu_id; 1793 ID_TO_CPU(boot_cpu_id) = 0; 1794 1795 /* one and only AP */ 1796 CPU_TO_ID(1) = ap_cpu_id; 1797 ID_TO_CPU(ap_cpu_id) = 1; 1798 1799#if defined(APIC_IO) 1800 /* one and only IO APIC */ 1801 io_apic_id = (io_apic_read(0, IOAPIC_ID) & APIC_ID_MASK) >> 24; 1802 1803 /* 1804 * sanity check, refer to MP spec section 3.6.6, last paragraph 1805 * necessary as some hardware isn't properly setting up the IO APIC 1806 */ 1807#if defined(REALLY_ANAL_IOAPICID_VALUE) 1808 if (io_apic_id != 2) { 1809#else 1810 if ((io_apic_id == 0) || (io_apic_id == 1) || (io_apic_id == 15)) { 1811#endif /* REALLY_ANAL_IOAPICID_VALUE */ 1812 io_apic_set_id(0, 2); 1813 io_apic_id = 2; 1814 } 1815 IO_TO_ID(0) = io_apic_id; 1816 ID_TO_IO(io_apic_id) = 0; 1817#endif /* APIC_IO */ 1818 1819 /* fill out bus entries */ 1820 switch (type) { 1821 case 1: 1822 case 2: 1823 case 3: 1824 case 4: 1825 case 5: 1826 case 6: 1827 case 7: 1828 bus_data[0].bus_id = default_data[type - 1][1]; 1829 bus_data[0].bus_type = default_data[type - 1][2]; 1830 bus_data[1].bus_id = default_data[type - 1][3]; 1831 bus_data[1].bus_type = default_data[type - 1][4]; 1832 break; 1833 1834 /* case 4: case 7: MCA NOT supported */ 1835 default: /* illegal/reserved */ 1836 panic("BAD default MP config: %d", type); 1837 /* NOTREACHED */ 1838 } 1839 1840#if defined(APIC_IO) 1841 /* general cases from MP v1.4, table 5-2 */ 1842 for (pin = 0; pin < 16; ++pin) { 1843 io_apic_ints[pin].int_type = 0; 1844 io_apic_ints[pin].int_flags = 0x05; /* edge/active-hi */ 1845 io_apic_ints[pin].src_bus_id = 0; 1846 io_apic_ints[pin].src_bus_irq = pin; /* IRQ2 caught below */ 1847 io_apic_ints[pin].dst_apic_id = io_apic_id; 1848 io_apic_ints[pin].dst_apic_int = pin; /* 1-to-1 */ 1849 } 1850 1851 /* special cases from MP v1.4, table 5-2 */ 1852 if (type == 2) { 1853 io_apic_ints[2].int_type = 0xff; /* N/C */ 1854 io_apic_ints[13].int_type = 0xff; /* N/C */ 1855#if !defined(APIC_MIXED_MODE) 1856 /** FIXME: ??? */ 1857 panic("sorry, can't support type 2 default yet"); 1858#endif /* APIC_MIXED_MODE */ 1859 } 1860 else 1861 io_apic_ints[2].src_bus_irq = 0; /* ISA IRQ0 is on APIC INT 2 */ 1862 1863 if (type == 7) 1864 io_apic_ints[0].int_type = 0xff; /* N/C */ 1865 else 1866 io_apic_ints[0].int_type = 3; /* vectored 8259 */ 1867#endif /* APIC_IO */ 1868} 1869 1870 1871/* 1872 * initialize all the SMP locks 1873 */ 1874 1875/* critical region around IO APIC, apic_imen */ 1876struct simplelock imen_lock; 1877 1878/* critical region around splxx(), cpl, cml, cil, ipending */ 1879struct simplelock cpl_lock; 1880 1881/* Make FAST_INTR() routines sequential */ 1882struct simplelock fast_intr_lock; 1883 1884/* critical region around INTR() routines */ 1885struct simplelock intr_lock; 1886 1887/* lock region used by kernel profiling */ 1888struct simplelock mcount_lock; 1889 1890#ifdef USE_COMLOCK 1891/* locks com (tty) data/hardware accesses: a FASTINTR() */ 1892struct simplelock com_lock; 1893#endif /* USE_COMLOCK */ 1894 1895/* lock around the MP rendezvous */ 1896static struct simplelock smp_rv_lock; 1897 1898/* only 1 CPU can panic at a time :) */ 1899struct simplelock panic_lock; 1900 1901static void 1902init_locks(void) 1903{ 1904 s_lock_init((struct simplelock*)&mcount_lock); 1905 1906 s_lock_init((struct simplelock*)&fast_intr_lock); 1907 s_lock_init((struct simplelock*)&intr_lock); 1908 s_lock_init((struct simplelock*)&imen_lock); 1909 s_lock_init((struct simplelock*)&cpl_lock); 1910 s_lock_init(&smp_rv_lock); 1911 s_lock_init(&panic_lock); 1912 1913#ifdef USE_COMLOCK 1914 s_lock_init((struct simplelock*)&com_lock); 1915#endif /* USE_COMLOCK */ 1916 1917 s_lock_init(&ap_boot_lock); 1918} 1919 1920/* 1921 * start each AP in our list 1922 */ 1923static int 1924start_all_aps(u_int boot_addr) 1925{ 1926 int x, i, pg; 1927 u_char mpbiosreason; 1928 u_long mpbioswarmvec; 1929 struct globaldata *gd; 1930 char *stack; 1931 1932 POSTCODE(START_ALL_APS_POST); 1933 1934 /* initialize BSP's local APIC */ 1935 apic_initialize(); 1936 bsp_apic_ready = 1; 1937 1938 /* install the AP 1st level boot code */ 1939 install_ap_tramp(boot_addr); 1940 1941 1942 /* save the current value of the warm-start vector */ 1943 mpbioswarmvec = *((u_long *) WARMBOOT_OFF); 1944#ifndef PC98 1945 outb(CMOS_REG, BIOS_RESET); 1946 mpbiosreason = inb(CMOS_DATA); 1947#endif 1948 1949 /* record BSP in CPU map */ 1950 all_cpus = 1; 1951 1952 /* set up 0 -> 4MB P==V mapping for AP boot */ 1953 *(int *)PTD = PG_V | PG_RW | ((uintptr_t)(void *)KPTphys & PG_FRAME); 1954 invltlb(); 1955 1956 /* start each AP */ 1957 for (x = 1; x <= mp_naps; ++x) { 1958 1959 /* This is a bit verbose, it will go away soon. */ 1960 1961 /* first page of AP's private space */ 1962 pg = x * i386_btop(sizeof(struct privatespace)); 1963 1964 /* allocate a new private data page */ 1965 gd = (struct globaldata *)kmem_alloc(kernel_map, PAGE_SIZE); 1966 1967 /* wire it into the private page table page */ 1968 SMPpt[pg] = (pt_entry_t)(PG_V | PG_RW | vtophys(gd)); 1969 1970 /* allocate and set up an idle stack data page */ 1971 stack = (char *)kmem_alloc(kernel_map, UPAGES*PAGE_SIZE); 1972 for (i = 0; i < UPAGES; i++) 1973 SMPpt[pg + 5 + i] = (pt_entry_t) 1974 (PG_V | PG_RW | vtophys(PAGE_SIZE * i + stack)); 1975 1976 SMPpt[pg + 1] = 0; /* *prv_CMAP1 */ 1977 SMPpt[pg + 2] = 0; /* *prv_CMAP2 */ 1978 SMPpt[pg + 3] = 0; /* *prv_CMAP3 */ 1979 SMPpt[pg + 4] = 0; /* *prv_PMAP1 */ 1980 1981 /* prime data page for it to use */ 1982 SLIST_INSERT_HEAD(&cpuhead, gd, gd_allcpu); 1983 gd->gd_cpuid = x; 1984 gd->gd_cpu_lockid = x << 24; 1985 1986 /* setup a vector to our boot code */ 1987 *((volatile u_short *) WARMBOOT_OFF) = WARMBOOT_TARGET; 1988 *((volatile u_short *) WARMBOOT_SEG) = (boot_addr >> 4); 1989#ifndef PC98 1990 outb(CMOS_REG, BIOS_RESET); 1991 outb(CMOS_DATA, BIOS_WARM); /* 'warm-start' */ 1992#endif 1993 1994 bootSTK = &SMP_prvspace[x].idlestack[UPAGES*PAGE_SIZE]; 1995 bootAP = x; 1996 1997 /* attempt to start the Application Processor */ 1998 CHECK_INIT(99); /* setup checkpoints */ 1999 if (!start_ap(x, boot_addr)) { 2000 printf("AP #%d (PHY# %d) failed!\n", x, CPU_TO_ID(x)); 2001 CHECK_PRINT("trace"); /* show checkpoints */ 2002 /* better panic as the AP may be running loose */ 2003 printf("panic y/n? [y] "); 2004 if (cngetc() != 'n') 2005 panic("bye-bye"); 2006 } 2007 CHECK_PRINT("trace"); /* show checkpoints */ 2008 2009 /* record its version info */ 2010 cpu_apic_versions[x] = cpu_apic_versions[0]; 2011 2012 all_cpus |= (1 << x); /* record AP in CPU map */ 2013 } 2014 2015 /* build our map of 'other' CPUs */ 2016 PCPU_SET(other_cpus, all_cpus & ~(1 << PCPU_GET(cpuid))); 2017 2018 /* fill in our (BSP) APIC version */ 2019 cpu_apic_versions[0] = lapic.version; 2020 2021 /* restore the warmstart vector */ 2022 *(u_long *) WARMBOOT_OFF = mpbioswarmvec; 2023#ifndef PC98 2024 outb(CMOS_REG, BIOS_RESET); 2025 outb(CMOS_DATA, mpbiosreason); 2026#endif 2027 2028 /* 2029 * Set up the idle context for the BSP. Similar to above except 2030 * that some was done by locore, some by pmap.c and some is implicit 2031 * because the BSP is cpu#0 and the page is initially zero, and also 2032 * because we can refer to variables by name on the BSP.. 2033 */ 2034 2035 /* Allocate and setup BSP idle stack */ 2036 stack = (char *)kmem_alloc(kernel_map, UPAGES * PAGE_SIZE); 2037 for (i = 0; i < UPAGES; i++) 2038 SMPpt[5 + i] = (pt_entry_t) 2039 (PG_V | PG_RW | vtophys(PAGE_SIZE * i + stack)); 2040 2041 *(int *)PTD = 0; 2042 pmap_set_opt(); 2043 2044 /* number of APs actually started */ 2045 return mp_ncpus - 1; 2046} 2047 2048 2049/* 2050 * load the 1st level AP boot code into base memory. 2051 */ 2052 2053/* targets for relocation */ 2054extern void bigJump(void); 2055extern void bootCodeSeg(void); 2056extern void bootDataSeg(void); 2057extern void MPentry(void); 2058extern u_int MP_GDT; 2059extern u_int mp_gdtbase; 2060 2061static void 2062install_ap_tramp(u_int boot_addr) 2063{ 2064 int x; 2065 int size = *(int *) ((u_long) & bootMP_size); 2066 u_char *src = (u_char *) ((u_long) bootMP); 2067 u_char *dst = (u_char *) boot_addr + KERNBASE; 2068 u_int boot_base = (u_int) bootMP; 2069 u_int8_t *dst8; 2070 u_int16_t *dst16; 2071 u_int32_t *dst32; 2072 2073 POSTCODE(INSTALL_AP_TRAMP_POST); 2074 2075 for (x = 0; x < size; ++x) 2076 *dst++ = *src++; 2077 2078 /* 2079 * modify addresses in code we just moved to basemem. unfortunately we 2080 * need fairly detailed info about mpboot.s for this to work. changes 2081 * to mpboot.s might require changes here. 2082 */ 2083 2084 /* boot code is located in KERNEL space */ 2085 dst = (u_char *) boot_addr + KERNBASE; 2086 2087 /* modify the lgdt arg */ 2088 dst32 = (u_int32_t *) (dst + ((u_int) & mp_gdtbase - boot_base)); 2089 *dst32 = boot_addr + ((u_int) & MP_GDT - boot_base); 2090 2091 /* modify the ljmp target for MPentry() */ 2092 dst32 = (u_int32_t *) (dst + ((u_int) bigJump - boot_base) + 1); 2093 *dst32 = ((u_int) MPentry - KERNBASE); 2094 2095 /* modify the target for boot code segment */ 2096 dst16 = (u_int16_t *) (dst + ((u_int) bootCodeSeg - boot_base)); 2097 dst8 = (u_int8_t *) (dst16 + 1); 2098 *dst16 = (u_int) boot_addr & 0xffff; 2099 *dst8 = ((u_int) boot_addr >> 16) & 0xff; 2100 2101 /* modify the target for boot data segment */ 2102 dst16 = (u_int16_t *) (dst + ((u_int) bootDataSeg - boot_base)); 2103 dst8 = (u_int8_t *) (dst16 + 1); 2104 *dst16 = (u_int) boot_addr & 0xffff; 2105 *dst8 = ((u_int) boot_addr >> 16) & 0xff; 2106} 2107 2108 2109/* 2110 * this function starts the AP (application processor) identified 2111 * by the APIC ID 'physicalCpu'. It does quite a "song and dance" 2112 * to accomplish this. This is necessary because of the nuances 2113 * of the different hardware we might encounter. It ain't pretty, 2114 * but it seems to work. 2115 */ 2116static int 2117start_ap(int logical_cpu, u_int boot_addr) 2118{ 2119 int physical_cpu; 2120 int vector; 2121 int cpus; 2122 u_long icr_lo, icr_hi; 2123 2124 POSTCODE(START_AP_POST); 2125 2126 /* get the PHYSICAL APIC ID# */ 2127 physical_cpu = CPU_TO_ID(logical_cpu); 2128 2129 /* calculate the vector */ 2130 vector = (boot_addr >> 12) & 0xff; 2131 2132 /* used as a watchpoint to signal AP startup */ 2133 cpus = mp_ncpus; 2134 2135 /* 2136 * first we do an INIT/RESET IPI this INIT IPI might be run, reseting 2137 * and running the target CPU. OR this INIT IPI might be latched (P5 2138 * bug), CPU waiting for STARTUP IPI. OR this INIT IPI might be 2139 * ignored. 2140 */ 2141 2142 /* setup the address for the target AP */ 2143 icr_hi = lapic.icr_hi & ~APIC_ID_MASK; 2144 icr_hi |= (physical_cpu << 24); 2145 lapic.icr_hi = icr_hi; 2146 2147 /* do an INIT IPI: assert RESET */ 2148 icr_lo = lapic.icr_lo & 0xfff00000; 2149 lapic.icr_lo = icr_lo | 0x0000c500; 2150 2151 /* wait for pending status end */ 2152 while (lapic.icr_lo & APIC_DELSTAT_MASK) 2153 /* spin */ ; 2154 2155 /* do an INIT IPI: deassert RESET */ 2156 lapic.icr_lo = icr_lo | 0x00008500; 2157 2158 /* wait for pending status end */ 2159 u_sleep(10000); /* wait ~10mS */ 2160 while (lapic.icr_lo & APIC_DELSTAT_MASK) 2161 /* spin */ ; 2162 2163 /* 2164 * next we do a STARTUP IPI: the previous INIT IPI might still be 2165 * latched, (P5 bug) this 1st STARTUP would then terminate 2166 * immediately, and the previously started INIT IPI would continue. OR 2167 * the previous INIT IPI has already run. and this STARTUP IPI will 2168 * run. OR the previous INIT IPI was ignored. and this STARTUP IPI 2169 * will run. 2170 */ 2171 2172 /* do a STARTUP IPI */ 2173 lapic.icr_lo = icr_lo | 0x00000600 | vector; 2174 while (lapic.icr_lo & APIC_DELSTAT_MASK) 2175 /* spin */ ; 2176 u_sleep(200); /* wait ~200uS */ 2177 2178 /* 2179 * finally we do a 2nd STARTUP IPI: this 2nd STARTUP IPI should run IF 2180 * the previous STARTUP IPI was cancelled by a latched INIT IPI. OR 2181 * this STARTUP IPI will be ignored, as only ONE STARTUP IPI is 2182 * recognized after hardware RESET or INIT IPI. 2183 */ 2184 2185 lapic.icr_lo = icr_lo | 0x00000600 | vector; 2186 while (lapic.icr_lo & APIC_DELSTAT_MASK) 2187 /* spin */ ; 2188 u_sleep(200); /* wait ~200uS */ 2189 2190 /* wait for it to start */ 2191 set_apic_timer(5000000);/* == 5 seconds */ 2192 while (read_apic_timer()) 2193 if (mp_ncpus > cpus) 2194 return 1; /* return SUCCESS */ 2195 2196 return 0; /* return FAILURE */ 2197} 2198 2199/* 2200 * Flush the TLB on all other CPU's 2201 * 2202 * XXX: Needs to handshake and wait for completion before proceding. 2203 */ 2204void 2205smp_invltlb(void) 2206{ 2207#if defined(APIC_IO) 2208 if (smp_started && invltlb_ok) 2209 all_but_self_ipi(XINVLTLB_OFFSET); 2210#endif /* APIC_IO */ 2211} 2212 2213void 2214invlpg(u_int addr) 2215{ 2216 __asm __volatile("invlpg (%0)"::"r"(addr):"memory"); 2217 2218 /* send a message to the other CPUs */ 2219 smp_invltlb(); 2220} 2221 2222void 2223invltlb(void) 2224{ 2225 u_long temp; 2226 2227 /* 2228 * This should be implemented as load_cr3(rcr3()) when load_cr3() is 2229 * inlined. 2230 */ 2231 __asm __volatile("movl %%cr3, %0; movl %0, %%cr3":"=r"(temp) :: "memory"); 2232 2233 /* send a message to the other CPUs */ 2234 smp_invltlb(); 2235} 2236 2237 2238/* 2239 * When called the executing CPU will send an IPI to all other CPUs 2240 * requesting that they halt execution. 2241 * 2242 * Usually (but not necessarily) called with 'other_cpus' as its arg. 2243 * 2244 * - Signals all CPUs in map to stop. 2245 * - Waits for each to stop. 2246 * 2247 * Returns: 2248 * -1: error 2249 * 0: NA 2250 * 1: ok 2251 * 2252 * XXX FIXME: this is not MP-safe, needs a lock to prevent multiple CPUs 2253 * from executing at same time. 2254 */ 2255int 2256stop_cpus(u_int map) 2257{ 2258 int count = 0; 2259 2260 if (!smp_started) 2261 return 0; 2262 2263 /* send the Xcpustop IPI to all CPUs in map */ 2264 selected_apic_ipi(map, XCPUSTOP_OFFSET, APIC_DELMODE_FIXED); 2265 2266 while (count++ < 100000 && (stopped_cpus & map) != map) 2267 /* spin */ ; 2268 2269#ifdef DIAGNOSTIC 2270 if ((stopped_cpus & map) != map) 2271 printf("Warning: CPUs 0x%x did not stop!\n", 2272 (~(stopped_cpus & map)) & map); 2273#endif 2274 2275 return 1; 2276} 2277 2278 2279/* 2280 * Called by a CPU to restart stopped CPUs. 2281 * 2282 * Usually (but not necessarily) called with 'stopped_cpus' as its arg. 2283 * 2284 * - Signals all CPUs in map to restart. 2285 * - Waits for each to restart. 2286 * 2287 * Returns: 2288 * -1: error 2289 * 0: NA 2290 * 1: ok 2291 */ 2292int 2293restart_cpus(u_int map) 2294{ 2295 int count = 0; 2296 2297 if (!smp_started) 2298 return 0; 2299 2300 started_cpus = map; /* signal other cpus to restart */ 2301 2302 /* wait for each to clear its bit */ 2303 while (count++ < 100000 && (stopped_cpus & map) != 0) 2304 /* spin */ ; 2305 2306#ifdef DIAGNOSTIC 2307 if ((stopped_cpus & map) != 0) 2308 printf("Warning: CPUs 0x%x did not restart!\n", 2309 (~(stopped_cpus & map)) & map); 2310#endif 2311 2312 return 1; 2313} 2314 2315int smp_active = 0; /* are the APs allowed to run? */ 2316SYSCTL_INT(_machdep, OID_AUTO, smp_active, CTLFLAG_RW, &smp_active, 0, ""); 2317 2318/* XXX maybe should be hw.ncpu */ 2319static int smp_cpus = 1; /* how many cpu's running */ 2320SYSCTL_INT(_machdep, OID_AUTO, smp_cpus, CTLFLAG_RD, &smp_cpus, 0, ""); 2321 2322int invltlb_ok = 0; /* throttle smp_invltlb() till safe */ 2323SYSCTL_INT(_machdep, OID_AUTO, invltlb_ok, CTLFLAG_RW, &invltlb_ok, 0, ""); 2324 2325/* Warning: Do not staticize. Used from swtch.s */ 2326int do_page_zero_idle = 1; /* bzero pages for fun and profit in idleloop */ 2327SYSCTL_INT(_machdep, OID_AUTO, do_page_zero_idle, CTLFLAG_RW, 2328 &do_page_zero_idle, 0, ""); 2329 2330/* Is forwarding of a interrupt to the CPU holding the ISR lock enabled ? */ 2331int forward_irq_enabled = 1; 2332SYSCTL_INT(_machdep, OID_AUTO, forward_irq_enabled, CTLFLAG_RW, 2333 &forward_irq_enabled, 0, ""); 2334 2335/* Enable forwarding of a signal to a process running on a different CPU */ 2336static int forward_signal_enabled = 1; 2337SYSCTL_INT(_machdep, OID_AUTO, forward_signal_enabled, CTLFLAG_RW, 2338 &forward_signal_enabled, 0, ""); 2339 2340/* Enable forwarding of roundrobin to all other cpus */ 2341static int forward_roundrobin_enabled = 1; 2342SYSCTL_INT(_machdep, OID_AUTO, forward_roundrobin_enabled, CTLFLAG_RW, 2343 &forward_roundrobin_enabled, 0, ""); 2344 2345/* 2346 * This is called once the rest of the system is up and running and we're 2347 * ready to let the AP's out of the pen. 2348 */ 2349void ap_init(void); 2350 2351void 2352ap_init(void) 2353{ 2354 u_int apic_id; 2355 2356 /* lock against other AP's that are waking up */ 2357 s_lock(&ap_boot_lock); 2358 2359 /* BSP may have changed PTD while we're waiting for the lock */ 2360 cpu_invltlb(); 2361 2362 smp_cpus++; 2363 2364#if defined(I586_CPU) && !defined(NO_F00F_HACK) 2365 lidt(&r_idt); 2366#endif 2367 2368 /* Build our map of 'other' CPUs. */ 2369 PCPU_SET(other_cpus, all_cpus & ~(1 << PCPU_GET(cpuid))); 2370 2371 printf("SMP: AP CPU #%d Launched!\n", PCPU_GET(cpuid)); 2372 2373 /* set up CPU registers and state */ 2374 cpu_setregs(); 2375 2376 /* set up FPU state on the AP */ 2377 npxinit(__INITIAL_NPXCW__); 2378 2379 /* A quick check from sanity claus */ 2380 apic_id = (apic_id_to_logical[(lapic.id & 0x0f000000) >> 24]); 2381 if (PCPU_GET(cpuid) != apic_id) { 2382 printf("SMP: cpuid = %d\n", PCPU_GET(cpuid)); 2383 printf("SMP: apic_id = %d\n", apic_id); 2384 printf("PTD[MPPTDI] = %p\n", (void *)PTD[MPPTDI]); 2385 panic("cpuid mismatch! boom!!"); 2386 } 2387 2388 /* Init local apic for irq's */ 2389 apic_initialize(); 2390 2391 /* Set memory range attributes for this CPU to match the BSP */ 2392 mem_range_AP_init(); 2393 2394 /* 2395 * Activate smp_invltlb, although strictly speaking, this isn't 2396 * quite correct yet. We should have a bitfield for cpus willing 2397 * to accept TLB flush IPI's or something and sync them. 2398 */ 2399 if (smp_cpus == mp_ncpus) { 2400 invltlb_ok = 1; 2401 smp_started = 1; /* enable IPI's, tlb shootdown, freezes etc */ 2402 smp_active = 1; /* historic */ 2403 } 2404 2405 /* let other AP's wake up now */ 2406 s_unlock(&ap_boot_lock); 2407 2408 /* wait until all the AP's are up */ 2409 while (smp_started == 0) 2410 ; /* nothing */ 2411 2412 /* 2413 * Set curproc to our per-cpu idleproc so that mutexes have 2414 * something unique to lock with. 2415 */ 2416 PCPU_SET(curproc, PCPU_GET(idleproc)); 2417 2418 microuptime(PCPU_PTR(switchtime)); 2419 PCPU_SET(switchticks, ticks); 2420 2421 /* ok, now grab sched_lock and enter the scheduler */ 2422 enable_intr(); 2423 mtx_enter(&sched_lock, MTX_SPIN); 2424 cpu_throw(); /* doesn't return */ 2425 2426 panic("scheduler returned us to ap_init"); 2427} 2428 2429#ifdef BETTER_CLOCK 2430 2431#define CHECKSTATE_USER 0 2432#define CHECKSTATE_SYS 1 2433#define CHECKSTATE_INTR 2 2434 2435/* Do not staticize. Used from apic_vector.s */ 2436struct proc* checkstate_curproc[MAXCPU]; 2437int checkstate_cpustate[MAXCPU]; 2438u_long checkstate_pc[MAXCPU]; 2439 2440#define PC_TO_INDEX(pc, prof) \ 2441 ((int)(((u_quad_t)((pc) - (prof)->pr_off) * \ 2442 (u_quad_t)((prof)->pr_scale)) >> 16) & ~1) 2443 2444static void 2445addupc_intr_forwarded(struct proc *p, int id, int *astmap) 2446{ 2447 int i; 2448 struct uprof *prof; 2449 u_long pc; 2450 2451 pc = checkstate_pc[id]; 2452 prof = &p->p_stats->p_prof; 2453 if (pc >= prof->pr_off && 2454 (i = PC_TO_INDEX(pc, prof)) < prof->pr_size) { 2455 if ((p->p_flag & P_OWEUPC) == 0) { 2456 prof->pr_addr = pc; 2457 prof->pr_ticks = 1; 2458 p->p_flag |= P_OWEUPC; 2459 } 2460 *astmap |= (1 << id); 2461 } 2462} 2463 2464static void 2465forwarded_statclock(int id, int pscnt, int *astmap) 2466{ 2467 struct pstats *pstats; 2468 long rss; 2469 struct rusage *ru; 2470 struct vmspace *vm; 2471 int cpustate; 2472 struct proc *p; 2473#ifdef GPROF 2474 register struct gmonparam *g; 2475 int i; 2476#endif 2477 2478 p = checkstate_curproc[id]; 2479 cpustate = checkstate_cpustate[id]; 2480 2481 /* XXX */ 2482 if (p->p_ithd) 2483 cpustate = CHECKSTATE_INTR; 2484 else if (p == SMP_prvspace[id].globaldata.gd_idleproc) 2485 cpustate = CHECKSTATE_SYS; 2486 2487 switch (cpustate) { 2488 case CHECKSTATE_USER: 2489 if (p->p_flag & P_PROFIL) 2490 addupc_intr_forwarded(p, id, astmap); 2491 if (pscnt > 1) 2492 return; 2493 p->p_uticks++; 2494 if (p->p_nice > NZERO) 2495 cp_time[CP_NICE]++; 2496 else 2497 cp_time[CP_USER]++; 2498 break; 2499 case CHECKSTATE_SYS: 2500#ifdef GPROF 2501 /* 2502 * Kernel statistics are just like addupc_intr, only easier. 2503 */ 2504 g = &_gmonparam; 2505 if (g->state == GMON_PROF_ON) { 2506 i = checkstate_pc[id] - g->lowpc; 2507 if (i < g->textsize) { 2508 i /= HISTFRACTION * sizeof(*g->kcount); 2509 g->kcount[i]++; 2510 } 2511 } 2512#endif 2513 if (pscnt > 1) 2514 return; 2515 2516 p->p_sticks++; 2517 if (p == SMP_prvspace[id].globaldata.gd_idleproc) 2518 cp_time[CP_IDLE]++; 2519 else 2520 cp_time[CP_SYS]++; 2521 break; 2522 case CHECKSTATE_INTR: 2523 default: 2524#ifdef GPROF 2525 /* 2526 * Kernel statistics are just like addupc_intr, only easier. 2527 */ 2528 g = &_gmonparam; 2529 if (g->state == GMON_PROF_ON) { 2530 i = checkstate_pc[id] - g->lowpc; 2531 if (i < g->textsize) { 2532 i /= HISTFRACTION * sizeof(*g->kcount); 2533 g->kcount[i]++; 2534 } 2535 } 2536#endif 2537 if (pscnt > 1) 2538 return; 2539 if (p) 2540 p->p_iticks++; 2541 cp_time[CP_INTR]++; 2542 } 2543 schedclock(p); 2544 2545 /* Update resource usage integrals and maximums. */ 2546 if ((pstats = p->p_stats) != NULL && 2547 (ru = &pstats->p_ru) != NULL && 2548 (vm = p->p_vmspace) != NULL) { 2549 ru->ru_ixrss += pgtok(vm->vm_tsize); 2550 ru->ru_idrss += pgtok(vm->vm_dsize); 2551 ru->ru_isrss += pgtok(vm->vm_ssize); 2552 rss = pgtok(vmspace_resident_count(vm)); 2553 if (ru->ru_maxrss < rss) 2554 ru->ru_maxrss = rss; 2555 } 2556} 2557 2558void 2559forward_statclock(int pscnt) 2560{ 2561 int map; 2562 int id; 2563 int i; 2564 2565 /* Kludge. We don't yet have separate locks for the interrupts 2566 * and the kernel. This means that we cannot let the other processors 2567 * handle complex interrupts while inhibiting them from entering 2568 * the kernel in a non-interrupt context. 2569 * 2570 * What we can do, without changing the locking mechanisms yet, 2571 * is letting the other processors handle a very simple interrupt 2572 * (wich determines the processor states), and do the main 2573 * work ourself. 2574 */ 2575 2576 if (!smp_started || !invltlb_ok || cold || panicstr) 2577 return; 2578 2579 /* Step 1: Probe state (user, cpu, interrupt, spinlock, idle ) */ 2580 2581 map = PCPU_GET(other_cpus) & ~stopped_cpus ; 2582 checkstate_probed_cpus = 0; 2583 if (map != 0) 2584 selected_apic_ipi(map, 2585 XCPUCHECKSTATE_OFFSET, APIC_DELMODE_FIXED); 2586 2587 i = 0; 2588 while (checkstate_probed_cpus != map) { 2589 /* spin */ 2590 i++; 2591 if (i == 100000) { 2592#ifdef BETTER_CLOCK_DIAGNOSTIC 2593 printf("forward_statclock: checkstate %x\n", 2594 checkstate_probed_cpus); 2595#endif 2596 break; 2597 } 2598 } 2599 2600 /* 2601 * Step 2: walk through other processors processes, update ticks and 2602 * profiling info. 2603 */ 2604 2605 map = 0; 2606 for (id = 0; id < mp_ncpus; id++) { 2607 if (id == PCPU_GET(cpuid)) 2608 continue; 2609 if (((1 << id) & checkstate_probed_cpus) == 0) 2610 continue; 2611 forwarded_statclock(id, pscnt, &map); 2612 } 2613 if (map != 0) { 2614 checkstate_need_ast |= map; 2615 selected_apic_ipi(map, XCPUAST_OFFSET, APIC_DELMODE_FIXED); 2616 i = 0; 2617 while ((checkstate_need_ast & map) != 0) { 2618 /* spin */ 2619 i++; 2620 if (i > 100000) { 2621#ifdef BETTER_CLOCK_DIAGNOSTIC 2622 printf("forward_statclock: dropped ast 0x%x\n", 2623 checkstate_need_ast & map); 2624#endif 2625 break; 2626 } 2627 } 2628 } 2629} 2630 2631void 2632forward_hardclock(int pscnt) 2633{ 2634 int map; 2635 int id; 2636 struct proc *p; 2637 struct pstats *pstats; 2638 int i; 2639 2640 /* Kludge. We don't yet have separate locks for the interrupts 2641 * and the kernel. This means that we cannot let the other processors 2642 * handle complex interrupts while inhibiting them from entering 2643 * the kernel in a non-interrupt context. 2644 * 2645 * What we can do, without changing the locking mechanisms yet, 2646 * is letting the other processors handle a very simple interrupt 2647 * (wich determines the processor states), and do the main 2648 * work ourself. 2649 */ 2650 2651 if (!smp_started || !invltlb_ok || cold || panicstr) 2652 return; 2653 2654 /* Step 1: Probe state (user, cpu, interrupt, spinlock, idle) */ 2655 2656 map = PCPU_GET(other_cpus) & ~stopped_cpus ; 2657 checkstate_probed_cpus = 0; 2658 if (map != 0) 2659 selected_apic_ipi(map, 2660 XCPUCHECKSTATE_OFFSET, APIC_DELMODE_FIXED); 2661 2662 i = 0; 2663 while (checkstate_probed_cpus != map) { 2664 /* spin */ 2665 i++; 2666 if (i == 100000) { 2667#ifdef BETTER_CLOCK_DIAGNOSTIC 2668 printf("forward_hardclock: checkstate %x\n", 2669 checkstate_probed_cpus); 2670#endif 2671 break; 2672 } 2673 } 2674 2675 /* 2676 * Step 2: walk through other processors processes, update virtual 2677 * timer and profiling timer. If stathz == 0, also update ticks and 2678 * profiling info. 2679 */ 2680 2681 map = 0; 2682 for (id = 0; id < mp_ncpus; id++) { 2683 if (id == PCPU_GET(cpuid)) 2684 continue; 2685 if (((1 << id) & checkstate_probed_cpus) == 0) 2686 continue; 2687 p = checkstate_curproc[id]; 2688 if (p) { 2689 pstats = p->p_stats; 2690 if (checkstate_cpustate[id] == CHECKSTATE_USER && 2691 timevalisset(&pstats->p_timer[ITIMER_VIRTUAL].it_value) && 2692 itimerdecr(&pstats->p_timer[ITIMER_VIRTUAL], tick) == 0) { 2693 psignal(p, SIGVTALRM); 2694 map |= (1 << id); 2695 } 2696 if (timevalisset(&pstats->p_timer[ITIMER_PROF].it_value) && 2697 itimerdecr(&pstats->p_timer[ITIMER_PROF], tick) == 0) { 2698 psignal(p, SIGPROF); 2699 map |= (1 << id); 2700 } 2701 } 2702 if (stathz == 0) { 2703 forwarded_statclock( id, pscnt, &map); 2704 } 2705 } 2706 if (map != 0) { 2707 checkstate_need_ast |= map; 2708 selected_apic_ipi(map, XCPUAST_OFFSET, APIC_DELMODE_FIXED); 2709 i = 0; 2710 while ((checkstate_need_ast & map) != 0) { 2711 /* spin */ 2712 i++; 2713 if (i > 100000) { 2714#ifdef BETTER_CLOCK_DIAGNOSTIC 2715 printf("forward_hardclock: dropped ast 0x%x\n", 2716 checkstate_need_ast & map); 2717#endif 2718 break; 2719 } 2720 } 2721 } 2722} 2723 2724#endif /* BETTER_CLOCK */ 2725 2726void 2727forward_signal(struct proc *p) 2728{ 2729 int map; 2730 int id; 2731 int i; 2732 2733 /* Kludge. We don't yet have separate locks for the interrupts 2734 * and the kernel. This means that we cannot let the other processors 2735 * handle complex interrupts while inhibiting them from entering 2736 * the kernel in a non-interrupt context. 2737 * 2738 * What we can do, without changing the locking mechanisms yet, 2739 * is letting the other processors handle a very simple interrupt 2740 * (wich determines the processor states), and do the main 2741 * work ourself. 2742 */ 2743 2744 if (!smp_started || !invltlb_ok || cold || panicstr) 2745 return; 2746 if (!forward_signal_enabled) 2747 return; 2748 mtx_enter(&sched_lock, MTX_SPIN); 2749 while (1) { 2750 if (p->p_stat != SRUN) { 2751 mtx_exit(&sched_lock, MTX_SPIN); 2752 return; 2753 } 2754 id = p->p_oncpu; 2755 mtx_exit(&sched_lock, MTX_SPIN); 2756 if (id == 0xff) 2757 return; 2758 map = (1<<id); 2759 checkstate_need_ast |= map; 2760 selected_apic_ipi(map, XCPUAST_OFFSET, APIC_DELMODE_FIXED); 2761 i = 0; 2762 while ((checkstate_need_ast & map) != 0) { 2763 /* spin */ 2764 i++; 2765 if (i > 100000) { 2766#if 0 2767 printf("forward_signal: dropped ast 0x%x\n", 2768 checkstate_need_ast & map); 2769#endif 2770 break; 2771 } 2772 } 2773 mtx_enter(&sched_lock, MTX_SPIN); 2774 if (id == p->p_oncpu) { 2775 mtx_exit(&sched_lock, MTX_SPIN); 2776 return; 2777 } 2778 } 2779} 2780 2781void 2782forward_roundrobin(void) 2783{ 2784 u_int map; 2785 int i; 2786 2787 if (!smp_started || !invltlb_ok || cold || panicstr) 2788 return; 2789 if (!forward_roundrobin_enabled) 2790 return; 2791 resched_cpus |= PCPU_GET(other_cpus); 2792 map = PCPU_GET(other_cpus) & ~stopped_cpus ; 2793#if 1 2794 selected_apic_ipi(map, XCPUAST_OFFSET, APIC_DELMODE_FIXED); 2795#else 2796 (void) all_but_self_ipi(XCPUAST_OFFSET); 2797#endif 2798 i = 0; 2799 while ((checkstate_need_ast & map) != 0) { 2800 /* spin */ 2801 i++; 2802 if (i > 100000) { 2803#if 0 2804 printf("forward_roundrobin: dropped ast 0x%x\n", 2805 checkstate_need_ast & map); 2806#endif 2807 break; 2808 } 2809 } 2810} 2811 2812 2813#ifdef APIC_INTR_REORDER 2814/* 2815 * Maintain mapping from softintr vector to isr bit in local apic. 2816 */ 2817void 2818set_lapic_isrloc(int intr, int vector) 2819{ 2820 if (intr < 0 || intr > 32) 2821 panic("set_apic_isrloc: bad intr argument: %d",intr); 2822 if (vector < ICU_OFFSET || vector > 255) 2823 panic("set_apic_isrloc: bad vector argument: %d",vector); 2824 apic_isrbit_location[intr].location = &lapic.isr0 + ((vector>>5)<<2); 2825 apic_isrbit_location[intr].bit = (1<<(vector & 31)); 2826} 2827#endif 2828 2829/* 2830 * All-CPU rendezvous. CPUs are signalled, all execute the setup function 2831 * (if specified), rendezvous, execute the action function (if specified), 2832 * rendezvous again, execute the teardown function (if specified), and then 2833 * resume. 2834 * 2835 * Note that the supplied external functions _must_ be reentrant and aware 2836 * that they are running in parallel and in an unknown lock context. 2837 */ 2838static void (*smp_rv_setup_func)(void *arg); 2839static void (*smp_rv_action_func)(void *arg); 2840static void (*smp_rv_teardown_func)(void *arg); 2841static void *smp_rv_func_arg; 2842static volatile int smp_rv_waiters[2]; 2843 2844void 2845smp_rendezvous_action(void) 2846{ 2847 /* setup function */ 2848 if (smp_rv_setup_func != NULL) 2849 smp_rv_setup_func(smp_rv_func_arg); 2850 /* spin on entry rendezvous */ 2851 atomic_add_int(&smp_rv_waiters[0], 1); 2852 while (smp_rv_waiters[0] < mp_ncpus) 2853 ; 2854 /* action function */ 2855 if (smp_rv_action_func != NULL) 2856 smp_rv_action_func(smp_rv_func_arg); 2857 /* spin on exit rendezvous */ 2858 atomic_add_int(&smp_rv_waiters[1], 1); 2859 while (smp_rv_waiters[1] < mp_ncpus) 2860 ; 2861 /* teardown function */ 2862 if (smp_rv_teardown_func != NULL) 2863 smp_rv_teardown_func(smp_rv_func_arg); 2864} 2865 2866void 2867smp_rendezvous(void (* setup_func)(void *), 2868 void (* action_func)(void *), 2869 void (* teardown_func)(void *), 2870 void *arg) 2871{ 2872 u_int efl; 2873 2874 /* obtain rendezvous lock */ 2875 s_lock(&smp_rv_lock); /* XXX sleep here? NOWAIT flag? */ 2876 2877 /* set static function pointers */ 2878 smp_rv_setup_func = setup_func; 2879 smp_rv_action_func = action_func; 2880 smp_rv_teardown_func = teardown_func; 2881 smp_rv_func_arg = arg; 2882 smp_rv_waiters[0] = 0; 2883 smp_rv_waiters[1] = 0; 2884 2885 /* disable interrupts on this CPU, save interrupt status */ 2886 efl = read_eflags(); 2887 write_eflags(efl & ~PSL_I); 2888 2889 /* signal other processors, which will enter the IPI with interrupts off */ 2890 all_but_self_ipi(XRENDEZVOUS_OFFSET); 2891 2892 /* call executor function */ 2893 smp_rendezvous_action(); 2894 2895 /* restore interrupt flag */ 2896 write_eflags(efl); 2897 2898 /* release lock */ 2899 s_unlock(&smp_rv_lock); 2900} 2901 2902void 2903release_aps(void *dummy __unused) 2904{ 2905 s_unlock(&ap_boot_lock); 2906} 2907 2908SYSINIT(start_aps, SI_SUB_SMP, SI_ORDER_FIRST, release_aps, NULL); 2909