mptable.c revision 55891
190075Sobrien/* 2169689Skan * Copyright (c) 1996, by Steve Passe 390075Sobrien * All rights reserved. 490075Sobrien * 590075Sobrien * Redistribution and use in source and binary forms, with or without 690075Sobrien * modification, are permitted provided that the following conditions 790075Sobrien * are met: 890075Sobrien * 1. Redistributions of source code must retain the above copyright 990075Sobrien * notice, this list of conditions and the following disclaimer. 1090075Sobrien * 2. The name of the developer may NOT be used to endorse or promote products 1190075Sobrien * derived from this software without specific prior written permission. 1290075Sobrien * 1390075Sobrien * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 1490075Sobrien * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 1590075Sobrien * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 1690075Sobrien * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 1790075Sobrien * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 1890075Sobrien * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19169689Skan * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20169689Skan * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 2190075Sobrien * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 2290075Sobrien * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 2390075Sobrien * SUCH DAMAGE. 2490075Sobrien * 2590075Sobrien * $FreeBSD: head/sys/i386/i386/mptable.c 55891 2000-01-13 09:09:02Z mdodd $ 26132718Skan */ 27132718Skan 2890075Sobrien#include "opt_smp.h" 2990075Sobrien#include "opt_cpu.h" 3090075Sobrien#include "opt_user_ldt.h" 3190075Sobrien 3290075Sobrien#ifdef SMP 3390075Sobrien#include <machine/smptests.h> 3490075Sobrien#else 3590075Sobrien#error 3690075Sobrien#endif 3790075Sobrien 3890075Sobrien#include <sys/param.h> 3990075Sobrien#include <sys/systm.h> 4090075Sobrien#include <sys/kernel.h> 4190075Sobrien#include <sys/proc.h> 42117395Skan#include <sys/sysctl.h> 43132718Skan#include <sys/malloc.h> 4490075Sobrien#include <sys/memrange.h> 4590075Sobrien#ifdef BETTER_CLOCK 46132718Skan#include <sys/dkstat.h> 47132718Skan#endif 4890075Sobrien#include <sys/cons.h> /* cngetc() */ 4990075Sobrien 5090075Sobrien#include <vm/vm.h> 5190075Sobrien#include <vm/vm_param.h> 5290075Sobrien#include <vm/pmap.h> 5390075Sobrien#include <vm/vm_kern.h> 5490075Sobrien#include <vm/vm_extern.h> 5590075Sobrien#ifdef BETTER_CLOCK 5690075Sobrien#include <sys/lock.h> 5790075Sobrien#include <vm/vm_map.h> 5890075Sobrien#include <sys/user.h> 59132718Skan#ifdef GPROF 6090075Sobrien#include <sys/gmon.h> 6190075Sobrien#endif 6290075Sobrien#endif 6390075Sobrien 64169689Skan#include <machine/smp.h> 6590075Sobrien#include <machine/apic.h> 6690075Sobrien#include <machine/atomic.h> 6790075Sobrien#include <machine/cpufunc.h> 6890075Sobrien#include <machine/mpapic.h> 6990075Sobrien#include <machine/psl.h> 70169689Skan#include <machine/segments.h> 71169689Skan#include <machine/smptests.h> /** TEST_DEFAULT_CONFIG, TEST_TEST1 */ 72169689Skan#include <machine/tss.h> 73169689Skan#include <machine/specialreg.h> 74169689Skan#include <machine/globaldata.h> 75169689Skan 76169689Skan#if defined(APIC_IO) 77169689Skan#include <machine/md_var.h> /* setidt() */ 78169689Skan#include <i386/isa/icu.h> /* IPIs */ 79169689Skan#include <i386/isa/intr_machdep.h> /* IPIs */ 80169689Skan#endif /* APIC_IO */ 8190075Sobrien 8290075Sobrien#if defined(TEST_DEFAULT_CONFIG) 8390075Sobrien#define MPFPS_MPFB1 TEST_DEFAULT_CONFIG 8490075Sobrien#else 85169689Skan#define MPFPS_MPFB1 mpfps->mpfb1 8690075Sobrien#endif /* TEST_DEFAULT_CONFIG */ 87 88#define WARMBOOT_TARGET 0 89#define WARMBOOT_OFF (KERNBASE + 0x0467) 90#define WARMBOOT_SEG (KERNBASE + 0x0469) 91 92#ifdef PC98 93#define BIOS_BASE (0xe8000) 94#define BIOS_SIZE (0x18000) 95#else 96#define BIOS_BASE (0xf0000) 97#define BIOS_SIZE (0x10000) 98#endif 99#define BIOS_COUNT (BIOS_SIZE/4) 100 101#define CMOS_REG (0x70) 102#define CMOS_DATA (0x71) 103#define BIOS_RESET (0x0f) 104#define BIOS_WARM (0x0a) 105 106#define PROCENTRY_FLAG_EN 0x01 107#define PROCENTRY_FLAG_BP 0x02 108#define IOAPICENTRY_FLAG_EN 0x01 109 110 111/* MP Floating Pointer Structure */ 112typedef struct MPFPS { 113 char signature[4]; 114 void *pap; 115 u_char length; 116 u_char spec_rev; 117 u_char checksum; 118 u_char mpfb1; 119 u_char mpfb2; 120 u_char mpfb3; 121 u_char mpfb4; 122 u_char mpfb5; 123} *mpfps_t; 124 125/* MP Configuration Table Header */ 126typedef struct MPCTH { 127 char signature[4]; 128 u_short base_table_length; 129 u_char spec_rev; 130 u_char checksum; 131 u_char oem_id[8]; 132 u_char product_id[12]; 133 void *oem_table_pointer; 134 u_short oem_table_size; 135 u_short entry_count; 136 void *apic_address; 137 u_short extended_table_length; 138 u_char extended_table_checksum; 139 u_char reserved; 140} *mpcth_t; 141 142 143typedef struct PROCENTRY { 144 u_char type; 145 u_char apic_id; 146 u_char apic_version; 147 u_char cpu_flags; 148 u_long cpu_signature; 149 u_long feature_flags; 150 u_long reserved1; 151 u_long reserved2; 152} *proc_entry_ptr; 153 154typedef struct BUSENTRY { 155 u_char type; 156 u_char bus_id; 157 char bus_type[6]; 158} *bus_entry_ptr; 159 160typedef struct IOAPICENTRY { 161 u_char type; 162 u_char apic_id; 163 u_char apic_version; 164 u_char apic_flags; 165 void *apic_address; 166} *io_apic_entry_ptr; 167 168typedef struct INTENTRY { 169 u_char type; 170 u_char int_type; 171 u_short int_flags; 172 u_char src_bus_id; 173 u_char src_bus_irq; 174 u_char dst_apic_id; 175 u_char dst_apic_int; 176} *int_entry_ptr; 177 178/* descriptions of MP basetable entries */ 179typedef struct BASETABLE_ENTRY { 180 u_char type; 181 u_char length; 182 char name[16]; 183} basetable_entry; 184 185/* 186 * this code MUST be enabled here and in mpboot.s. 187 * it follows the very early stages of AP boot by placing values in CMOS ram. 188 * it NORMALLY will never be needed and thus the primitive method for enabling. 189 * 190#define CHECK_POINTS 191 */ 192 193#if defined(CHECK_POINTS) && !defined(PC98) 194#define CHECK_READ(A) (outb(CMOS_REG, (A)), inb(CMOS_DATA)) 195#define CHECK_WRITE(A,D) (outb(CMOS_REG, (A)), outb(CMOS_DATA, (D))) 196 197#define CHECK_INIT(D); \ 198 CHECK_WRITE(0x34, (D)); \ 199 CHECK_WRITE(0x35, (D)); \ 200 CHECK_WRITE(0x36, (D)); \ 201 CHECK_WRITE(0x37, (D)); \ 202 CHECK_WRITE(0x38, (D)); \ 203 CHECK_WRITE(0x39, (D)); 204 205#define CHECK_PRINT(S); \ 206 printf("%s: %d, %d, %d, %d, %d, %d\n", \ 207 (S), \ 208 CHECK_READ(0x34), \ 209 CHECK_READ(0x35), \ 210 CHECK_READ(0x36), \ 211 CHECK_READ(0x37), \ 212 CHECK_READ(0x38), \ 213 CHECK_READ(0x39)); 214 215#else /* CHECK_POINTS */ 216 217#define CHECK_INIT(D) 218#define CHECK_PRINT(S) 219 220#endif /* CHECK_POINTS */ 221 222/* 223 * Values to send to the POST hardware. 224 */ 225#define MP_BOOTADDRESS_POST 0x10 226#define MP_PROBE_POST 0x11 227#define MPTABLE_PASS1_POST 0x12 228 229#define MP_START_POST 0x13 230#define MP_ENABLE_POST 0x14 231#define MPTABLE_PASS2_POST 0x15 232 233#define START_ALL_APS_POST 0x16 234#define INSTALL_AP_TRAMP_POST 0x17 235#define START_AP_POST 0x18 236 237#define MP_ANNOUNCE_POST 0x19 238 239 240/** XXX FIXME: where does this really belong, isa.h/isa.c perhaps? */ 241int current_postcode; 242 243/** XXX FIXME: what system files declare these??? */ 244extern struct region_descriptor r_gdt, r_idt; 245 246int bsp_apic_ready = 0; /* flags useability of BSP apic */ 247int mp_ncpus; /* # of CPUs, including BSP */ 248int mp_naps; /* # of Applications processors */ 249int mp_nbusses; /* # of busses */ 250int mp_napics; /* # of IO APICs */ 251int boot_cpu_id; /* designated BSP */ 252vm_offset_t cpu_apic_address; 253vm_offset_t io_apic_address[NAPICID]; /* NAPICID is more than enough */ 254extern int nkpt; 255 256u_int32_t cpu_apic_versions[NCPU]; 257u_int32_t io_apic_versions[NAPIC]; 258 259#ifdef APIC_INTR_DIAGNOSTIC 260int apic_itrace_enter[32]; 261int apic_itrace_tryisrlock[32]; 262int apic_itrace_gotisrlock[32]; 263int apic_itrace_active[32]; 264int apic_itrace_masked[32]; 265int apic_itrace_noisrlock[32]; 266int apic_itrace_masked2[32]; 267int apic_itrace_unmask[32]; 268int apic_itrace_noforward[32]; 269int apic_itrace_leave[32]; 270int apic_itrace_enter2[32]; 271int apic_itrace_doreti[32]; 272int apic_itrace_splz[32]; 273int apic_itrace_eoi[32]; 274#ifdef APIC_INTR_DIAGNOSTIC_IRQ 275unsigned short apic_itrace_debugbuffer[32768]; 276int apic_itrace_debugbuffer_idx; 277struct simplelock apic_itrace_debuglock; 278#endif 279#endif 280 281#ifdef APIC_INTR_REORDER 282struct { 283 volatile int *location; 284 int bit; 285} apic_isrbit_location[32]; 286#endif 287 288struct apic_intmapinfo int_to_apicintpin[APIC_INTMAPSIZE]; 289 290/* 291 * APIC ID logical/physical mapping structures. 292 * We oversize these to simplify boot-time config. 293 */ 294int cpu_num_to_apic_id[NAPICID]; 295int io_num_to_apic_id[NAPICID]; 296int apic_id_to_logical[NAPICID]; 297 298 299/* Bitmap of all available CPUs */ 300u_int all_cpus; 301 302/* AP uses this during bootstrap. Do not staticize. */ 303char *bootSTK; 304static int bootAP; 305 306/* Hotwire a 0->4MB V==P mapping */ 307extern pt_entry_t *KPTphys; 308 309/* SMP page table page */ 310extern pt_entry_t *SMPpt; 311 312struct pcb stoppcbs[NCPU]; 313 314int smp_started; /* has the system started? */ 315 316/* 317 * Local data and functions. 318 */ 319 320static int mp_capable; 321static u_int boot_address; 322static u_int base_memory; 323 324static int picmode; /* 0: virtual wire mode, 1: PIC mode */ 325static mpfps_t mpfps; 326static int search_for_sig(u_int32_t target, int count); 327static void mp_enable(u_int boot_addr); 328 329static int mptable_pass1(void); 330static int mptable_pass2(void); 331static void default_mp_table(int type); 332static void fix_mp_table(void); 333static void setup_apic_irq_mapping(void); 334static void init_locks(void); 335static int start_all_aps(u_int boot_addr); 336static void install_ap_tramp(u_int boot_addr); 337static int start_ap(int logicalCpu, u_int boot_addr); 338static int apic_int_is_bus_type(int intr, int bus_type); 339 340/* 341 * Calculate usable address in base memory for AP trampoline code. 342 */ 343u_int 344mp_bootaddress(u_int basemem) 345{ 346 POSTCODE(MP_BOOTADDRESS_POST); 347 348 base_memory = basemem * 1024; /* convert to bytes */ 349 350 boot_address = base_memory & ~0xfff; /* round down to 4k boundary */ 351 if ((base_memory - boot_address) < bootMP_size) 352 boot_address -= 4096; /* not enough, lower by 4k */ 353 354 return boot_address; 355} 356 357 358/* 359 * Look for an Intel MP spec table (ie, SMP capable hardware). 360 */ 361int 362mp_probe(void) 363{ 364 int x; 365 u_long segment; 366 u_int32_t target; 367 368 POSTCODE(MP_PROBE_POST); 369 370 /* see if EBDA exists */ 371 if ((segment = (u_long) * (u_short *) (KERNBASE + 0x40e)) != 0) { 372 /* search first 1K of EBDA */ 373 target = (u_int32_t) (segment << 4); 374 if ((x = search_for_sig(target, 1024 / 4)) >= 0) 375 goto found; 376 } else { 377 /* last 1K of base memory, effective 'top of base' passed in */ 378 target = (u_int32_t) (base_memory - 0x400); 379 if ((x = search_for_sig(target, 1024 / 4)) >= 0) 380 goto found; 381 } 382 383 /* search the BIOS */ 384 target = (u_int32_t) BIOS_BASE; 385 if ((x = search_for_sig(target, BIOS_COUNT)) >= 0) 386 goto found; 387 388 /* nothing found */ 389 mpfps = (mpfps_t)0; 390 mp_capable = 0; 391 return 0; 392 393found: 394 /* calculate needed resources */ 395 mpfps = (mpfps_t)x; 396 if (mptable_pass1()) 397 panic("you must reconfigure your kernel"); 398 399 /* flag fact that we are running multiple processors */ 400 mp_capable = 1; 401 return 1; 402} 403 404 405/* 406 * Startup the SMP processors. 407 */ 408void 409mp_start(void) 410{ 411 POSTCODE(MP_START_POST); 412 413 /* look for MP capable motherboard */ 414 if (mp_capable) 415 mp_enable(boot_address); 416 else 417 panic("MP hardware not found!"); 418} 419 420 421/* 422 * Print various information about the SMP system hardware and setup. 423 */ 424void 425mp_announce(void) 426{ 427 int x; 428 429 POSTCODE(MP_ANNOUNCE_POST); 430 431 printf("FreeBSD/SMP: Multiprocessor motherboard\n"); 432 printf(" cpu0 (BSP): apic id: %2d", CPU_TO_ID(0)); 433 printf(", version: 0x%08x", cpu_apic_versions[0]); 434 printf(", at 0x%08x\n", cpu_apic_address); 435 for (x = 1; x <= mp_naps; ++x) { 436 printf(" cpu%d (AP): apic id: %2d", x, CPU_TO_ID(x)); 437 printf(", version: 0x%08x", cpu_apic_versions[x]); 438 printf(", at 0x%08x\n", cpu_apic_address); 439 } 440 441#if defined(APIC_IO) 442 for (x = 0; x < mp_napics; ++x) { 443 printf(" io%d (APIC): apic id: %2d", x, IO_TO_ID(x)); 444 printf(", version: 0x%08x", io_apic_versions[x]); 445 printf(", at 0x%08x\n", io_apic_address[x]); 446 } 447#else 448 printf(" Warning: APIC I/O disabled\n"); 449#endif /* APIC_IO */ 450} 451 452/* 453 * AP cpu's call this to sync up protected mode. 454 */ 455void 456init_secondary(void) 457{ 458 int gsel_tss; 459 int x, myid = bootAP; 460 461 gdt_segs[GPRIV_SEL].ssd_base = (int) &SMP_prvspace[myid]; 462 gdt_segs[GPROC0_SEL].ssd_base = 463 (int) &SMP_prvspace[myid].globaldata.gd_common_tss; 464 SMP_prvspace[myid].globaldata.gd_prvspace = &SMP_prvspace[myid]; 465 466 for (x = 0; x < NGDT; x++) { 467 ssdtosd(&gdt_segs[x], &gdt[myid * NGDT + x].sd); 468 } 469 470 r_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1; 471 r_gdt.rd_base = (int) &gdt[myid * NGDT]; 472 lgdt(&r_gdt); /* does magic intra-segment return */ 473 474 lidt(&r_idt); 475 476 lldt(_default_ldt); 477#ifdef USER_LDT 478 currentldt = _default_ldt; 479#endif 480 481 gsel_tss = GSEL(GPROC0_SEL, SEL_KPL); 482 gdt[myid * NGDT + GPROC0_SEL].sd.sd_type = SDT_SYS386TSS; 483 common_tss.tss_esp0 = 0; /* not used until after switch */ 484 common_tss.tss_ss0 = GSEL(GDATA_SEL, SEL_KPL); 485 common_tss.tss_ioopt = (sizeof common_tss) << 16; 486 tss_gdt = &gdt[myid * NGDT + GPROC0_SEL].sd; 487 common_tssd = *tss_gdt; 488 ltr(gsel_tss); 489 490 load_cr0(0x8005003b); /* XXX! */ 491 492 pmap_set_opt(); 493} 494 495 496#if defined(APIC_IO) 497/* 498 * Final configuration of the BSP's local APIC: 499 * - disable 'pic mode'. 500 * - disable 'virtual wire mode'. 501 * - enable NMI. 502 */ 503void 504bsp_apic_configure(void) 505{ 506 u_char byte; 507 u_int32_t temp; 508 509 /* leave 'pic mode' if necessary */ 510 if (picmode) { 511 outb(0x22, 0x70); /* select IMCR */ 512 byte = inb(0x23); /* current contents */ 513 byte |= 0x01; /* mask external INTR */ 514 outb(0x23, byte); /* disconnect 8259s/NMI */ 515 } 516 517 /* mask lint0 (the 8259 'virtual wire' connection) */ 518 temp = lapic.lvt_lint0; 519 temp |= APIC_LVT_M; /* set the mask */ 520 lapic.lvt_lint0 = temp; 521 522 /* setup lint1 to handle NMI */ 523 temp = lapic.lvt_lint1; 524 temp &= ~APIC_LVT_M; /* clear the mask */ 525 lapic.lvt_lint1 = temp; 526 527 if (bootverbose) 528 apic_dump("bsp_apic_configure()"); 529} 530#endif /* APIC_IO */ 531 532 533/******************************************************************* 534 * local functions and data 535 */ 536 537/* 538 * start the SMP system 539 */ 540static void 541mp_enable(u_int boot_addr) 542{ 543 int x; 544#if defined(APIC_IO) 545 int apic; 546 u_int ux; 547#endif /* APIC_IO */ 548 549 POSTCODE(MP_ENABLE_POST); 550 551 /* turn on 4MB of V == P addressing so we can get to MP table */ 552 *(int *)PTD = PG_V | PG_RW | ((uintptr_t)(void *)KPTphys & PG_FRAME); 553 invltlb(); 554 555 /* examine the MP table for needed info, uses physical addresses */ 556 x = mptable_pass2(); 557 558 *(int *)PTD = 0; 559 invltlb(); 560 561 /* can't process default configs till the CPU APIC is pmapped */ 562 if (x) 563 default_mp_table(x); 564 565 /* post scan cleanup */ 566 fix_mp_table(); 567 setup_apic_irq_mapping(); 568 569#if defined(APIC_IO) 570 571 /* fill the LOGICAL io_apic_versions table */ 572 for (apic = 0; apic < mp_napics; ++apic) { 573 ux = io_apic_read(apic, IOAPIC_VER); 574 io_apic_versions[apic] = ux; 575 } 576 577 /* program each IO APIC in the system */ 578 for (apic = 0; apic < mp_napics; ++apic) 579 if (io_apic_setup(apic) < 0) 580 panic("IO APIC setup failure"); 581 582 /* install a 'Spurious INTerrupt' vector */ 583 setidt(XSPURIOUSINT_OFFSET, Xspuriousint, 584 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 585 586 /* install an inter-CPU IPI for TLB invalidation */ 587 setidt(XINVLTLB_OFFSET, Xinvltlb, 588 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 589 590#ifdef BETTER_CLOCK 591 /* install an inter-CPU IPI for reading processor state */ 592 setidt(XCPUCHECKSTATE_OFFSET, Xcpucheckstate, 593 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 594#endif 595 596 /* install an inter-CPU IPI for all-CPU rendezvous */ 597 setidt(XRENDEZVOUS_OFFSET, Xrendezvous, 598 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 599 600 /* install an inter-CPU IPI for forcing an additional software trap */ 601 setidt(XCPUAST_OFFSET, Xcpuast, 602 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 603 604 /* install an inter-CPU IPI for interrupt forwarding */ 605 setidt(XFORWARD_IRQ_OFFSET, Xforward_irq, 606 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 607 608 /* install an inter-CPU IPI for CPU stop/restart */ 609 setidt(XCPUSTOP_OFFSET, Xcpustop, 610 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 611 612#if defined(TEST_TEST1) 613 /* install a "fake hardware INTerrupt" vector */ 614 setidt(XTEST1_OFFSET, Xtest1, 615 SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 616#endif /** TEST_TEST1 */ 617 618#endif /* APIC_IO */ 619 620 /* initialize all SMP locks */ 621 init_locks(); 622 623 /* start each Application Processor */ 624 start_all_aps(boot_addr); 625 626 /* 627 * The init process might be started on a different CPU now, 628 * and the boot CPU might not call prepare_usermode to get 629 * cr0 correctly configured. Thus we initialize cr0 here. 630 */ 631 load_cr0(rcr0() | CR0_WP | CR0_AM); 632} 633 634 635/* 636 * look for the MP spec signature 637 */ 638 639/* string defined by the Intel MP Spec as identifying the MP table */ 640#define MP_SIG 0x5f504d5f /* _MP_ */ 641#define NEXT(X) ((X) += 4) 642static int 643search_for_sig(u_int32_t target, int count) 644{ 645 int x; 646 u_int32_t *addr = (u_int32_t *) (KERNBASE + target); 647 648 for (x = 0; x < count; NEXT(x)) 649 if (addr[x] == MP_SIG) 650 /* make array index a byte index */ 651 return (target + (x * sizeof(u_int32_t))); 652 653 return -1; 654} 655 656 657static basetable_entry basetable_entry_types[] = 658{ 659 {0, 20, "Processor"}, 660 {1, 8, "Bus"}, 661 {2, 8, "I/O APIC"}, 662 {3, 8, "I/O INT"}, 663 {4, 8, "Local INT"} 664}; 665 666typedef struct BUSDATA { 667 u_char bus_id; 668 enum busTypes bus_type; 669} bus_datum; 670 671typedef struct INTDATA { 672 u_char int_type; 673 u_short int_flags; 674 u_char src_bus_id; 675 u_char src_bus_irq; 676 u_char dst_apic_id; 677 u_char dst_apic_int; 678 u_char int_vector; 679} io_int, local_int; 680 681typedef struct BUSTYPENAME { 682 u_char type; 683 char name[7]; 684} bus_type_name; 685 686static bus_type_name bus_type_table[] = 687{ 688 {CBUS, "CBUS"}, 689 {CBUSII, "CBUSII"}, 690 {EISA, "EISA"}, 691 {MCA, "MCA"}, 692 {UNKNOWN_BUSTYPE, "---"}, 693 {ISA, "ISA"}, 694 {MCA, "MCA"}, 695 {UNKNOWN_BUSTYPE, "---"}, 696 {UNKNOWN_BUSTYPE, "---"}, 697 {UNKNOWN_BUSTYPE, "---"}, 698 {UNKNOWN_BUSTYPE, "---"}, 699 {UNKNOWN_BUSTYPE, "---"}, 700 {PCI, "PCI"}, 701 {UNKNOWN_BUSTYPE, "---"}, 702 {UNKNOWN_BUSTYPE, "---"}, 703 {UNKNOWN_BUSTYPE, "---"}, 704 {UNKNOWN_BUSTYPE, "---"}, 705 {XPRESS, "XPRESS"}, 706 {UNKNOWN_BUSTYPE, "---"} 707}; 708/* from MP spec v1.4, table 5-1 */ 709static int default_data[7][5] = 710{ 711/* nbus, id0, type0, id1, type1 */ 712 {1, 0, ISA, 255, 255}, 713 {1, 0, EISA, 255, 255}, 714 {1, 0, EISA, 255, 255}, 715 {1, 0, MCA, 255, 255}, 716 {2, 0, ISA, 1, PCI}, 717 {2, 0, EISA, 1, PCI}, 718 {2, 0, MCA, 1, PCI} 719}; 720 721 722/* the bus data */ 723static bus_datum bus_data[NBUS]; 724 725/* the IO INT data, one entry per possible APIC INTerrupt */ 726static io_int io_apic_ints[NINTR]; 727 728static int nintrs; 729 730static int processor_entry __P((proc_entry_ptr entry, int cpu)); 731static int bus_entry __P((bus_entry_ptr entry, int bus)); 732static int io_apic_entry __P((io_apic_entry_ptr entry, int apic)); 733static int int_entry __P((int_entry_ptr entry, int intr)); 734static int lookup_bus_type __P((char *name)); 735 736 737/* 738 * 1st pass on motherboard's Intel MP specification table. 739 * 740 * initializes: 741 * mp_ncpus = 1 742 * 743 * determines: 744 * cpu_apic_address (common to all CPUs) 745 * io_apic_address[N] 746 * mp_naps 747 * mp_nbusses 748 * mp_napics 749 * nintrs 750 */ 751static int 752mptable_pass1(void) 753{ 754 int x; 755 mpcth_t cth; 756 int totalSize; 757 void* position; 758 int count; 759 int type; 760 int mustpanic; 761 762 POSTCODE(MPTABLE_PASS1_POST); 763 764 mustpanic = 0; 765 766 /* clear various tables */ 767 for (x = 0; x < NAPICID; ++x) { 768 io_apic_address[x] = ~0; /* IO APIC address table */ 769 } 770 771 /* init everything to empty */ 772 mp_naps = 0; 773 mp_nbusses = 0; 774 mp_napics = 0; 775 nintrs = 0; 776 777 /* check for use of 'default' configuration */ 778 if (MPFPS_MPFB1 != 0) { 779 /* use default addresses */ 780 cpu_apic_address = DEFAULT_APIC_BASE; 781 io_apic_address[0] = DEFAULT_IO_APIC_BASE; 782 783 /* fill in with defaults */ 784 mp_naps = 2; /* includes BSP */ 785 mp_nbusses = default_data[MPFPS_MPFB1 - 1][0]; 786#if defined(APIC_IO) 787 mp_napics = 1; 788 nintrs = 16; 789#endif /* APIC_IO */ 790 } 791 else { 792 if ((cth = mpfps->pap) == 0) 793 panic("MP Configuration Table Header MISSING!"); 794 795 cpu_apic_address = (vm_offset_t) cth->apic_address; 796 797 /* walk the table, recording info of interest */ 798 totalSize = cth->base_table_length - sizeof(struct MPCTH); 799 position = (u_char *) cth + sizeof(struct MPCTH); 800 count = cth->entry_count; 801 802 while (count--) { 803 switch (type = *(u_char *) position) { 804 case 0: /* processor_entry */ 805 if (((proc_entry_ptr)position)->cpu_flags 806 & PROCENTRY_FLAG_EN) 807 ++mp_naps; 808 break; 809 case 1: /* bus_entry */ 810 ++mp_nbusses; 811 break; 812 case 2: /* io_apic_entry */ 813 if (((io_apic_entry_ptr)position)->apic_flags 814 & IOAPICENTRY_FLAG_EN) 815 io_apic_address[mp_napics++] = 816 (vm_offset_t)((io_apic_entry_ptr) 817 position)->apic_address; 818 break; 819 case 3: /* int_entry */ 820 ++nintrs; 821 break; 822 case 4: /* int_entry */ 823 break; 824 default: 825 panic("mpfps Base Table HOSED!"); 826 /* NOTREACHED */ 827 } 828 829 totalSize -= basetable_entry_types[type].length; 830 (u_char*)position += basetable_entry_types[type].length; 831 } 832 } 833 834 /* qualify the numbers */ 835 if (mp_naps > NCPU) { 836 printf("Warning: only using %d of %d available CPUs!\n", 837 NCPU, mp_naps); 838 mp_naps = NCPU; 839 } 840 if (mp_nbusses > NBUS) { 841 printf("found %d busses, increase NBUS\n", mp_nbusses); 842 mustpanic = 1; 843 } 844 if (mp_napics > NAPIC) { 845 printf("found %d apics, increase NAPIC\n", mp_napics); 846 mustpanic = 1; 847 } 848 if (nintrs > NINTR) { 849 printf("found %d intrs, increase NINTR\n", nintrs); 850 mustpanic = 1; 851 } 852 853 /* 854 * Count the BSP. 855 * This is also used as a counter while starting the APs. 856 */ 857 mp_ncpus = 1; 858 859 --mp_naps; /* subtract the BSP */ 860 861 return mustpanic; 862} 863 864 865/* 866 * 2nd pass on motherboard's Intel MP specification table. 867 * 868 * sets: 869 * boot_cpu_id 870 * ID_TO_IO(N), phy APIC ID to log CPU/IO table 871 * CPU_TO_ID(N), logical CPU to APIC ID table 872 * IO_TO_ID(N), logical IO to APIC ID table 873 * bus_data[N] 874 * io_apic_ints[N] 875 */ 876static int 877mptable_pass2(void) 878{ 879 int x; 880 mpcth_t cth; 881 int totalSize; 882 void* position; 883 int count; 884 int type; 885 int apic, bus, cpu, intr; 886 887 POSTCODE(MPTABLE_PASS2_POST); 888 889 /* clear various tables */ 890 for (x = 0; x < NAPICID; ++x) { 891 ID_TO_IO(x) = -1; /* phy APIC ID to log CPU/IO table */ 892 CPU_TO_ID(x) = -1; /* logical CPU to APIC ID table */ 893 IO_TO_ID(x) = -1; /* logical IO to APIC ID table */ 894 } 895 896 /* clear bus data table */ 897 for (x = 0; x < NBUS; ++x) 898 bus_data[x].bus_id = 0xff; 899 900 /* clear IO APIC INT table */ 901 for (x = 0; x < NINTR; ++x) { 902 io_apic_ints[x].int_type = 0xff; 903 io_apic_ints[x].int_vector = 0xff; 904 } 905 906 /* setup the cpu/apic mapping arrays */ 907 boot_cpu_id = -1; 908 909 /* record whether PIC or virtual-wire mode */ 910 picmode = (mpfps->mpfb2 & 0x80) ? 1 : 0; 911 912 /* check for use of 'default' configuration */ 913 if (MPFPS_MPFB1 != 0) 914 return MPFPS_MPFB1; /* return default configuration type */ 915 916 if ((cth = mpfps->pap) == 0) 917 panic("MP Configuration Table Header MISSING!"); 918 919 /* walk the table, recording info of interest */ 920 totalSize = cth->base_table_length - sizeof(struct MPCTH); 921 position = (u_char *) cth + sizeof(struct MPCTH); 922 count = cth->entry_count; 923 apic = bus = intr = 0; 924 cpu = 1; /* pre-count the BSP */ 925 926 while (count--) { 927 switch (type = *(u_char *) position) { 928 case 0: 929 if (processor_entry(position, cpu)) 930 ++cpu; 931 break; 932 case 1: 933 if (bus_entry(position, bus)) 934 ++bus; 935 break; 936 case 2: 937 if (io_apic_entry(position, apic)) 938 ++apic; 939 break; 940 case 3: 941 if (int_entry(position, intr)) 942 ++intr; 943 break; 944 case 4: 945 /* int_entry(position); */ 946 break; 947 default: 948 panic("mpfps Base Table HOSED!"); 949 /* NOTREACHED */ 950 } 951 952 totalSize -= basetable_entry_types[type].length; 953 (u_char *) position += basetable_entry_types[type].length; 954 } 955 956 if (boot_cpu_id == -1) 957 panic("NO BSP found!"); 958 959 /* report fact that its NOT a default configuration */ 960 return 0; 961} 962 963 964void 965assign_apic_irq(int apic, int intpin, int irq) 966{ 967 int x; 968 969 if (int_to_apicintpin[irq].ioapic != -1) 970 panic("assign_apic_irq: inconsistent table"); 971 972 int_to_apicintpin[irq].ioapic = apic; 973 int_to_apicintpin[irq].int_pin = intpin; 974 int_to_apicintpin[irq].apic_address = ioapic[apic]; 975 int_to_apicintpin[irq].redirindex = IOAPIC_REDTBL + 2 * intpin; 976 977 for (x = 0; x < nintrs; x++) { 978 if ((io_apic_ints[x].int_type == 0 || 979 io_apic_ints[x].int_type == 3) && 980 io_apic_ints[x].int_vector == 0xff && 981 io_apic_ints[x].dst_apic_id == IO_TO_ID(apic) && 982 io_apic_ints[x].dst_apic_int == intpin) 983 io_apic_ints[x].int_vector = irq; 984 } 985} 986 987void 988revoke_apic_irq(int irq) 989{ 990 int x; 991 int oldapic; 992 int oldintpin; 993 994 if (int_to_apicintpin[irq].ioapic == -1) 995 panic("assign_apic_irq: inconsistent table"); 996 997 oldapic = int_to_apicintpin[irq].ioapic; 998 oldintpin = int_to_apicintpin[irq].int_pin; 999 1000 int_to_apicintpin[irq].ioapic = -1; 1001 int_to_apicintpin[irq].int_pin = 0; 1002 int_to_apicintpin[irq].apic_address = NULL; 1003 int_to_apicintpin[irq].redirindex = 0; 1004 1005 for (x = 0; x < nintrs; x++) { 1006 if ((io_apic_ints[x].int_type == 0 || 1007 io_apic_ints[x].int_type == 3) && 1008 io_apic_ints[x].int_vector == 0xff && 1009 io_apic_ints[x].dst_apic_id == IO_TO_ID(oldapic) && 1010 io_apic_ints[x].dst_apic_int == oldintpin) 1011 io_apic_ints[x].int_vector = 0xff; 1012 } 1013} 1014 1015/* 1016 * parse an Intel MP specification table 1017 */ 1018static void 1019fix_mp_table(void) 1020{ 1021 int x; 1022 int id; 1023 int bus_0 = 0; /* Stop GCC warning */ 1024 int bus_pci = 0; /* Stop GCC warning */ 1025 int num_pci_bus; 1026 1027 /* 1028 * Fix mis-numbering of the PCI bus and its INT entries if the BIOS 1029 * did it wrong. The MP spec says that when more than 1 PCI bus 1030 * exists the BIOS must begin with bus entries for the PCI bus and use 1031 * actual PCI bus numbering. This implies that when only 1 PCI bus 1032 * exists the BIOS can choose to ignore this ordering, and indeed many 1033 * MP motherboards do ignore it. This causes a problem when the PCI 1034 * sub-system makes requests of the MP sub-system based on PCI bus 1035 * numbers. So here we look for the situation and renumber the 1036 * busses and associated INTs in an effort to "make it right". 1037 */ 1038 1039 /* find bus 0, PCI bus, count the number of PCI busses */ 1040 for (num_pci_bus = 0, x = 0; x < mp_nbusses; ++x) { 1041 if (bus_data[x].bus_id == 0) { 1042 bus_0 = x; 1043 } 1044 if (bus_data[x].bus_type == PCI) { 1045 ++num_pci_bus; 1046 bus_pci = x; 1047 } 1048 } 1049 /* 1050 * bus_0 == slot of bus with ID of 0 1051 * bus_pci == slot of last PCI bus encountered 1052 */ 1053 1054 /* check the 1 PCI bus case for sanity */ 1055 if (num_pci_bus == 1) { 1056 1057 /* if it is number 0 all is well */ 1058 if (bus_data[bus_pci].bus_id == 0) 1059 return; 1060 1061 /* mis-numbered, swap with whichever bus uses slot 0 */ 1062 1063 /* swap the bus entry types */ 1064 bus_data[bus_pci].bus_type = bus_data[bus_0].bus_type; 1065 bus_data[bus_0].bus_type = PCI; 1066 1067 /* swap each relavant INTerrupt entry */ 1068 id = bus_data[bus_pci].bus_id; 1069 for (x = 0; x < nintrs; ++x) { 1070 if (io_apic_ints[x].src_bus_id == id) { 1071 io_apic_ints[x].src_bus_id = 0; 1072 } 1073 else if (io_apic_ints[x].src_bus_id == 0) { 1074 io_apic_ints[x].src_bus_id = id; 1075 } 1076 } 1077 } 1078} 1079 1080 1081/* Assign low level interrupt handlers */ 1082static void 1083setup_apic_irq_mapping(void) 1084{ 1085 int x; 1086 int int_vector; 1087 1088 /* Clear array */ 1089 for (x = 0; x < APIC_INTMAPSIZE; x++) { 1090 int_to_apicintpin[x].ioapic = -1; 1091 int_to_apicintpin[x].int_pin = 0; 1092 int_to_apicintpin[x].apic_address = NULL; 1093 int_to_apicintpin[x].redirindex = 0; 1094 } 1095 1096 /* First assign ISA/EISA interrupts */ 1097 for (x = 0; x < nintrs; x++) { 1098 int_vector = io_apic_ints[x].src_bus_irq; 1099 if (int_vector < APIC_INTMAPSIZE && 1100 io_apic_ints[x].int_vector == 0xff && 1101 int_to_apicintpin[int_vector].ioapic == -1 && 1102 (apic_int_is_bus_type(x, ISA) || 1103 apic_int_is_bus_type(x, EISA)) && 1104 io_apic_ints[x].int_type == 0) { 1105 assign_apic_irq(ID_TO_IO(io_apic_ints[x].dst_apic_id), 1106 io_apic_ints[x].dst_apic_int, 1107 int_vector); 1108 } 1109 } 1110 1111 /* Assign interrupts on first 24 intpins on IOAPIC #0 */ 1112 for (x = 0; x < nintrs; x++) { 1113 int_vector = io_apic_ints[x].dst_apic_int; 1114 if (int_vector < APIC_INTMAPSIZE && 1115 io_apic_ints[x].dst_apic_id == IO_TO_ID(0) && 1116 io_apic_ints[x].int_vector == 0xff && 1117 int_to_apicintpin[int_vector].ioapic == -1 && 1118 (io_apic_ints[x].int_type == 0 || 1119 io_apic_ints[x].int_type == 3)) { 1120 assign_apic_irq(0, 1121 io_apic_ints[x].dst_apic_int, 1122 int_vector); 1123 } 1124 } 1125 /* 1126 * Assign interrupts for remaining intpins. 1127 * Skip IOAPIC #0 intpin 0 if the type is ExtInt, since this indicates 1128 * that an entry for ISA/EISA irq 0 exist, and a fallback to mixed mode 1129 * due to 8254 interrupts not being delivered can reuse that low level 1130 * interrupt handler. 1131 */ 1132 int_vector = 0; 1133 while (int_vector < APIC_INTMAPSIZE && 1134 int_to_apicintpin[int_vector].ioapic != -1) 1135 int_vector++; 1136 for (x = 0; x < nintrs && int_vector < APIC_INTMAPSIZE; x++) { 1137 if ((io_apic_ints[x].int_type == 0 || 1138 (io_apic_ints[x].int_type == 3 && 1139 (io_apic_ints[x].dst_apic_id != IO_TO_ID(0) || 1140 io_apic_ints[x].dst_apic_int != 0))) && 1141 io_apic_ints[x].int_vector == 0xff) { 1142 assign_apic_irq(ID_TO_IO(io_apic_ints[x].dst_apic_id), 1143 io_apic_ints[x].dst_apic_int, 1144 int_vector); 1145 int_vector++; 1146 while (int_vector < APIC_INTMAPSIZE && 1147 int_to_apicintpin[int_vector].ioapic != -1) 1148 int_vector++; 1149 } 1150 } 1151} 1152 1153 1154static int 1155processor_entry(proc_entry_ptr entry, int cpu) 1156{ 1157 /* check for usability */ 1158 if (!(entry->cpu_flags & PROCENTRY_FLAG_EN)) 1159 return 0; 1160 1161 /* check for BSP flag */ 1162 if (entry->cpu_flags & PROCENTRY_FLAG_BP) { 1163 boot_cpu_id = entry->apic_id; 1164 CPU_TO_ID(0) = entry->apic_id; 1165 ID_TO_CPU(entry->apic_id) = 0; 1166 return 0; /* its already been counted */ 1167 } 1168 1169 /* add another AP to list, if less than max number of CPUs */ 1170 else if (cpu < NCPU) { 1171 CPU_TO_ID(cpu) = entry->apic_id; 1172 ID_TO_CPU(entry->apic_id) = cpu; 1173 return 1; 1174 } 1175 1176 return 0; 1177} 1178 1179 1180static int 1181bus_entry(bus_entry_ptr entry, int bus) 1182{ 1183 int x; 1184 char c, name[8]; 1185 1186 /* encode the name into an index */ 1187 for (x = 0; x < 6; ++x) { 1188 if ((c = entry->bus_type[x]) == ' ') 1189 break; 1190 name[x] = c; 1191 } 1192 name[x] = '\0'; 1193 1194 if ((x = lookup_bus_type(name)) == UNKNOWN_BUSTYPE) 1195 panic("unknown bus type: '%s'", name); 1196 1197 bus_data[bus].bus_id = entry->bus_id; 1198 bus_data[bus].bus_type = x; 1199 1200 return 1; 1201} 1202 1203 1204static int 1205io_apic_entry(io_apic_entry_ptr entry, int apic) 1206{ 1207 if (!(entry->apic_flags & IOAPICENTRY_FLAG_EN)) 1208 return 0; 1209 1210 IO_TO_ID(apic) = entry->apic_id; 1211 ID_TO_IO(entry->apic_id) = apic; 1212 1213 return 1; 1214} 1215 1216 1217static int 1218lookup_bus_type(char *name) 1219{ 1220 int x; 1221 1222 for (x = 0; x < MAX_BUSTYPE; ++x) 1223 if (strcmp(bus_type_table[x].name, name) == 0) 1224 return bus_type_table[x].type; 1225 1226 return UNKNOWN_BUSTYPE; 1227} 1228 1229 1230static int 1231int_entry(int_entry_ptr entry, int intr) 1232{ 1233 int apic; 1234 1235 io_apic_ints[intr].int_type = entry->int_type; 1236 io_apic_ints[intr].int_flags = entry->int_flags; 1237 io_apic_ints[intr].src_bus_id = entry->src_bus_id; 1238 io_apic_ints[intr].src_bus_irq = entry->src_bus_irq; 1239 if (entry->dst_apic_id == 255) { 1240 /* This signal goes to all IO APICS. Select an IO APIC 1241 with sufficient number of interrupt pins */ 1242 for (apic = 0; apic < mp_napics; apic++) 1243 if (((io_apic_read(apic, IOAPIC_VER) & 1244 IOART_VER_MAXREDIR) >> MAXREDIRSHIFT) >= 1245 entry->dst_apic_int) 1246 break; 1247 if (apic < mp_napics) 1248 io_apic_ints[intr].dst_apic_id = IO_TO_ID(apic); 1249 else 1250 io_apic_ints[intr].dst_apic_id = entry->dst_apic_id; 1251 } else 1252 io_apic_ints[intr].dst_apic_id = entry->dst_apic_id; 1253 io_apic_ints[intr].dst_apic_int = entry->dst_apic_int; 1254 1255 return 1; 1256} 1257 1258 1259static int 1260apic_int_is_bus_type(int intr, int bus_type) 1261{ 1262 int bus; 1263 1264 for (bus = 0; bus < mp_nbusses; ++bus) 1265 if ((bus_data[bus].bus_id == io_apic_ints[intr].src_bus_id) 1266 && ((int) bus_data[bus].bus_type == bus_type)) 1267 return 1; 1268 1269 return 0; 1270} 1271 1272 1273/* 1274 * Given a traditional ISA INT mask, return an APIC mask. 1275 */ 1276u_int 1277isa_apic_mask(u_int isa_mask) 1278{ 1279 int isa_irq; 1280 int apic_pin; 1281 1282#if defined(SKIP_IRQ15_REDIRECT) 1283 if (isa_mask == (1 << 15)) { 1284 printf("skipping ISA IRQ15 redirect\n"); 1285 return isa_mask; 1286 } 1287#endif /* SKIP_IRQ15_REDIRECT */ 1288 1289 isa_irq = ffs(isa_mask); /* find its bit position */ 1290 if (isa_irq == 0) /* doesn't exist */ 1291 return 0; 1292 --isa_irq; /* make it zero based */ 1293 1294 apic_pin = isa_apic_irq(isa_irq); /* look for APIC connection */ 1295 if (apic_pin == -1) 1296 return 0; 1297 1298 return (1 << apic_pin); /* convert pin# to a mask */ 1299} 1300 1301 1302/* 1303 * Determine which APIC pin an ISA/EISA INT is attached to. 1304 */ 1305#define INTTYPE(I) (io_apic_ints[(I)].int_type) 1306#define INTPIN(I) (io_apic_ints[(I)].dst_apic_int) 1307#define INTIRQ(I) (io_apic_ints[(I)].int_vector) 1308#define INTAPIC(I) (ID_TO_IO(io_apic_ints[(I)].dst_apic_id)) 1309 1310#define SRCBUSIRQ(I) (io_apic_ints[(I)].src_bus_irq) 1311int 1312isa_apic_irq(int isa_irq) 1313{ 1314 int intr; 1315 1316 for (intr = 0; intr < nintrs; ++intr) { /* check each record */ 1317 if (INTTYPE(intr) == 0) { /* standard INT */ 1318 if (SRCBUSIRQ(intr) == isa_irq) { 1319 if (apic_int_is_bus_type(intr, ISA) || 1320 apic_int_is_bus_type(intr, EISA)) 1321 return INTIRQ(intr); /* found */ 1322 } 1323 } 1324 } 1325 return -1; /* NOT found */ 1326} 1327 1328 1329/* 1330 * Determine which APIC pin a PCI INT is attached to. 1331 */ 1332#define SRCBUSID(I) (io_apic_ints[(I)].src_bus_id) 1333#define SRCBUSDEVICE(I) ((io_apic_ints[(I)].src_bus_irq >> 2) & 0x1f) 1334#define SRCBUSLINE(I) (io_apic_ints[(I)].src_bus_irq & 0x03) 1335int 1336pci_apic_irq(int pciBus, int pciDevice, int pciInt) 1337{ 1338 int intr; 1339 1340 --pciInt; /* zero based */ 1341 1342 for (intr = 0; intr < nintrs; ++intr) /* check each record */ 1343 if ((INTTYPE(intr) == 0) /* standard INT */ 1344 && (SRCBUSID(intr) == pciBus) 1345 && (SRCBUSDEVICE(intr) == pciDevice) 1346 && (SRCBUSLINE(intr) == pciInt)) /* a candidate IRQ */ 1347 if (apic_int_is_bus_type(intr, PCI)) 1348 return INTIRQ(intr); /* exact match */ 1349 1350 return -1; /* NOT found */ 1351} 1352 1353int 1354next_apic_irq(int irq) 1355{ 1356 int intr, ointr; 1357 int bus, bustype; 1358 1359 bus = 0; 1360 bustype = 0; 1361 for (intr = 0; intr < nintrs; intr++) { 1362 if (INTIRQ(intr) != irq || INTTYPE(intr) != 0) 1363 continue; 1364 bus = SRCBUSID(intr); 1365 bustype = apic_bus_type(bus); 1366 if (bustype != ISA && 1367 bustype != EISA && 1368 bustype != PCI) 1369 continue; 1370 break; 1371 } 1372 if (intr >= nintrs) { 1373 return -1; 1374 } 1375 for (ointr = intr + 1; ointr < nintrs; ointr++) { 1376 if (INTTYPE(ointr) != 0) 1377 continue; 1378 if (bus != SRCBUSID(ointr)) 1379 continue; 1380 if (bustype == PCI) { 1381 if (SRCBUSDEVICE(intr) != SRCBUSDEVICE(ointr)) 1382 continue; 1383 if (SRCBUSLINE(intr) != SRCBUSLINE(ointr)) 1384 continue; 1385 } 1386 if (bustype == ISA || bustype == EISA) { 1387 if (SRCBUSIRQ(intr) != SRCBUSIRQ(ointr)) 1388 continue; 1389 } 1390 if (INTPIN(intr) == INTPIN(ointr)) 1391 continue; 1392 break; 1393 } 1394 if (ointr >= nintrs) { 1395 return -1; 1396 } 1397 return INTIRQ(ointr); 1398} 1399#undef SRCBUSLINE 1400#undef SRCBUSDEVICE 1401#undef SRCBUSID 1402#undef SRCBUSIRQ 1403 1404#undef INTPIN 1405#undef INTIRQ 1406#undef INTAPIC 1407#undef INTTYPE 1408 1409 1410/* 1411 * Reprogram the MB chipset to NOT redirect an ISA INTerrupt. 1412 * 1413 * XXX FIXME: 1414 * Exactly what this means is unclear at this point. It is a solution 1415 * for motherboards that redirect the MBIRQ0 pin. Generically a motherboard 1416 * could route any of the ISA INTs to upper (>15) IRQ values. But most would 1417 * NOT be redirected via MBIRQ0, thus "undirect()ing" them would NOT be an 1418 * option. 1419 */ 1420int 1421undirect_isa_irq(int rirq) 1422{ 1423#if defined(READY) 1424 if (bootverbose) 1425 printf("Freeing redirected ISA irq %d.\n", rirq); 1426 /** FIXME: tickle the MB redirector chip */ 1427 return ???; 1428#else 1429 if (bootverbose) 1430 printf("Freeing (NOT implemented) redirected ISA irq %d.\n", rirq); 1431 return 0; 1432#endif /* READY */ 1433} 1434 1435 1436/* 1437 * Reprogram the MB chipset to NOT redirect a PCI INTerrupt 1438 */ 1439int 1440undirect_pci_irq(int rirq) 1441{ 1442#if defined(READY) 1443 if (bootverbose) 1444 printf("Freeing redirected PCI irq %d.\n", rirq); 1445 1446 /** FIXME: tickle the MB redirector chip */ 1447 return ???; 1448#else 1449 if (bootverbose) 1450 printf("Freeing (NOT implemented) redirected PCI irq %d.\n", 1451 rirq); 1452 return 0; 1453#endif /* READY */ 1454} 1455 1456 1457/* 1458 * given a bus ID, return: 1459 * the bus type if found 1460 * -1 if NOT found 1461 */ 1462int 1463apic_bus_type(int id) 1464{ 1465 int x; 1466 1467 for (x = 0; x < mp_nbusses; ++x) 1468 if (bus_data[x].bus_id == id) 1469 return bus_data[x].bus_type; 1470 1471 return -1; 1472} 1473 1474 1475/* 1476 * given a LOGICAL APIC# and pin#, return: 1477 * the associated src bus ID if found 1478 * -1 if NOT found 1479 */ 1480int 1481apic_src_bus_id(int apic, int pin) 1482{ 1483 int x; 1484 1485 /* search each of the possible INTerrupt sources */ 1486 for (x = 0; x < nintrs; ++x) 1487 if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) && 1488 (pin == io_apic_ints[x].dst_apic_int)) 1489 return (io_apic_ints[x].src_bus_id); 1490 1491 return -1; /* NOT found */ 1492} 1493 1494 1495/* 1496 * given a LOGICAL APIC# and pin#, return: 1497 * the associated src bus IRQ if found 1498 * -1 if NOT found 1499 */ 1500int 1501apic_src_bus_irq(int apic, int pin) 1502{ 1503 int x; 1504 1505 for (x = 0; x < nintrs; x++) 1506 if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) && 1507 (pin == io_apic_ints[x].dst_apic_int)) 1508 return (io_apic_ints[x].src_bus_irq); 1509 1510 return -1; /* NOT found */ 1511} 1512 1513 1514/* 1515 * given a LOGICAL APIC# and pin#, return: 1516 * the associated INTerrupt type if found 1517 * -1 if NOT found 1518 */ 1519int 1520apic_int_type(int apic, int pin) 1521{ 1522 int x; 1523 1524 /* search each of the possible INTerrupt sources */ 1525 for (x = 0; x < nintrs; ++x) 1526 if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) && 1527 (pin == io_apic_ints[x].dst_apic_int)) 1528 return (io_apic_ints[x].int_type); 1529 1530 return -1; /* NOT found */ 1531} 1532 1533int 1534apic_irq(int apic, int pin) 1535{ 1536 int x; 1537 int res; 1538 1539 for (x = 0; x < nintrs; ++x) 1540 if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) && 1541 (pin == io_apic_ints[x].dst_apic_int)) { 1542 res = io_apic_ints[x].int_vector; 1543 if (res == 0xff) 1544 return -1; 1545 if (apic != int_to_apicintpin[res].ioapic) 1546 panic("apic_irq: inconsistent table"); 1547 if (pin != int_to_apicintpin[res].int_pin) 1548 panic("apic_irq inconsistent table (2)"); 1549 return res; 1550 } 1551 return -1; 1552} 1553 1554 1555/* 1556 * given a LOGICAL APIC# and pin#, return: 1557 * the associated trigger mode if found 1558 * -1 if NOT found 1559 */ 1560int 1561apic_trigger(int apic, int pin) 1562{ 1563 int x; 1564 1565 /* search each of the possible INTerrupt sources */ 1566 for (x = 0; x < nintrs; ++x) 1567 if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) && 1568 (pin == io_apic_ints[x].dst_apic_int)) 1569 return ((io_apic_ints[x].int_flags >> 2) & 0x03); 1570 1571 return -1; /* NOT found */ 1572} 1573 1574 1575/* 1576 * given a LOGICAL APIC# and pin#, return: 1577 * the associated 'active' level if found 1578 * -1 if NOT found 1579 */ 1580int 1581apic_polarity(int apic, int pin) 1582{ 1583 int x; 1584 1585 /* search each of the possible INTerrupt sources */ 1586 for (x = 0; x < nintrs; ++x) 1587 if ((apic == ID_TO_IO(io_apic_ints[x].dst_apic_id)) && 1588 (pin == io_apic_ints[x].dst_apic_int)) 1589 return (io_apic_ints[x].int_flags & 0x03); 1590 1591 return -1; /* NOT found */ 1592} 1593 1594 1595/* 1596 * set data according to MP defaults 1597 * FIXME: probably not complete yet... 1598 */ 1599static void 1600default_mp_table(int type) 1601{ 1602 int ap_cpu_id; 1603#if defined(APIC_IO) 1604 u_int32_t ux; 1605 int io_apic_id; 1606 int pin; 1607#endif /* APIC_IO */ 1608 1609#if 0 1610 printf(" MP default config type: %d\n", type); 1611 switch (type) { 1612 case 1: 1613 printf(" bus: ISA, APIC: 82489DX\n"); 1614 break; 1615 case 2: 1616 printf(" bus: EISA, APIC: 82489DX\n"); 1617 break; 1618 case 3: 1619 printf(" bus: EISA, APIC: 82489DX\n"); 1620 break; 1621 case 4: 1622 printf(" bus: MCA, APIC: 82489DX\n"); 1623 break; 1624 case 5: 1625 printf(" bus: ISA+PCI, APIC: Integrated\n"); 1626 break; 1627 case 6: 1628 printf(" bus: EISA+PCI, APIC: Integrated\n"); 1629 break; 1630 case 7: 1631 printf(" bus: MCA+PCI, APIC: Integrated\n"); 1632 break; 1633 default: 1634 printf(" future type\n"); 1635 break; 1636 /* NOTREACHED */ 1637 } 1638#endif /* 0 */ 1639 1640 boot_cpu_id = (lapic.id & APIC_ID_MASK) >> 24; 1641 ap_cpu_id = (boot_cpu_id == 0) ? 1 : 0; 1642 1643 /* BSP */ 1644 CPU_TO_ID(0) = boot_cpu_id; 1645 ID_TO_CPU(boot_cpu_id) = 0; 1646 1647 /* one and only AP */ 1648 CPU_TO_ID(1) = ap_cpu_id; 1649 ID_TO_CPU(ap_cpu_id) = 1; 1650 1651#if defined(APIC_IO) 1652 /* one and only IO APIC */ 1653 io_apic_id = (io_apic_read(0, IOAPIC_ID) & APIC_ID_MASK) >> 24; 1654 1655 /* 1656 * sanity check, refer to MP spec section 3.6.6, last paragraph 1657 * necessary as some hardware isn't properly setting up the IO APIC 1658 */ 1659#if defined(REALLY_ANAL_IOAPICID_VALUE) 1660 if (io_apic_id != 2) { 1661#else 1662 if ((io_apic_id == 0) || (io_apic_id == 1) || (io_apic_id == 15)) { 1663#endif /* REALLY_ANAL_IOAPICID_VALUE */ 1664 ux = io_apic_read(0, IOAPIC_ID); /* get current contents */ 1665 ux &= ~APIC_ID_MASK; /* clear the ID field */ 1666 ux |= 0x02000000; /* set it to '2' */ 1667 io_apic_write(0, IOAPIC_ID, ux); /* write new value */ 1668 ux = io_apic_read(0, IOAPIC_ID); /* re-read && test */ 1669 if ((ux & APIC_ID_MASK) != 0x02000000) 1670 panic("can't control IO APIC ID, reg: 0x%08x", ux); 1671 io_apic_id = 2; 1672 } 1673 IO_TO_ID(0) = io_apic_id; 1674 ID_TO_IO(io_apic_id) = 0; 1675#endif /* APIC_IO */ 1676 1677 /* fill out bus entries */ 1678 switch (type) { 1679 case 1: 1680 case 2: 1681 case 3: 1682 case 4: 1683 case 5: 1684 case 6: 1685 case 7: 1686 bus_data[0].bus_id = default_data[type - 1][1]; 1687 bus_data[0].bus_type = default_data[type - 1][2]; 1688 bus_data[1].bus_id = default_data[type - 1][3]; 1689 bus_data[1].bus_type = default_data[type - 1][4]; 1690 break; 1691 1692 /* case 4: case 7: MCA NOT supported */ 1693 default: /* illegal/reserved */ 1694 panic("BAD default MP config: %d", type); 1695 /* NOTREACHED */ 1696 } 1697 1698#if defined(APIC_IO) 1699 /* general cases from MP v1.4, table 5-2 */ 1700 for (pin = 0; pin < 16; ++pin) { 1701 io_apic_ints[pin].int_type = 0; 1702 io_apic_ints[pin].int_flags = 0x05; /* edge/active-hi */ 1703 io_apic_ints[pin].src_bus_id = 0; 1704 io_apic_ints[pin].src_bus_irq = pin; /* IRQ2 caught below */ 1705 io_apic_ints[pin].dst_apic_id = io_apic_id; 1706 io_apic_ints[pin].dst_apic_int = pin; /* 1-to-1 */ 1707 } 1708 1709 /* special cases from MP v1.4, table 5-2 */ 1710 if (type == 2) { 1711 io_apic_ints[2].int_type = 0xff; /* N/C */ 1712 io_apic_ints[13].int_type = 0xff; /* N/C */ 1713#if !defined(APIC_MIXED_MODE) 1714 /** FIXME: ??? */ 1715 panic("sorry, can't support type 2 default yet"); 1716#endif /* APIC_MIXED_MODE */ 1717 } 1718 else 1719 io_apic_ints[2].src_bus_irq = 0; /* ISA IRQ0 is on APIC INT 2 */ 1720 1721 if (type == 7) 1722 io_apic_ints[0].int_type = 0xff; /* N/C */ 1723 else 1724 io_apic_ints[0].int_type = 3; /* vectored 8259 */ 1725#endif /* APIC_IO */ 1726} 1727 1728 1729/* 1730 * initialize all the SMP locks 1731 */ 1732 1733/* critical region around IO APIC, apic_imen */ 1734struct simplelock imen_lock; 1735 1736/* critical region around splxx(), cpl, cml, cil, ipending */ 1737struct simplelock cpl_lock; 1738 1739/* Make FAST_INTR() routines sequential */ 1740struct simplelock fast_intr_lock; 1741 1742/* critical region around INTR() routines */ 1743struct simplelock intr_lock; 1744 1745/* lock regions protected in UP kernel via cli/sti */ 1746struct simplelock mpintr_lock; 1747 1748/* lock region used by kernel profiling */ 1749struct simplelock mcount_lock; 1750 1751#ifdef USE_COMLOCK 1752/* locks com (tty) data/hardware accesses: a FASTINTR() */ 1753struct simplelock com_lock; 1754#endif /* USE_COMLOCK */ 1755 1756#ifdef USE_CLOCKLOCK 1757/* lock regions around the clock hardware */ 1758struct simplelock clock_lock; 1759#endif /* USE_CLOCKLOCK */ 1760 1761/* lock around the MP rendezvous */ 1762static struct simplelock smp_rv_lock; 1763 1764static void 1765init_locks(void) 1766{ 1767 /* 1768 * Get the initial mp_lock with a count of 1 for the BSP. 1769 * This uses a LOGICAL cpu ID, ie BSP == 0. 1770 */ 1771 mp_lock = 0x00000001; 1772 1773 /* ISR uses its own "giant lock" */ 1774 isr_lock = FREE_LOCK; 1775 1776#if defined(APIC_INTR_DIAGNOSTIC) && defined(APIC_INTR_DIAGNOSTIC_IRQ) 1777 s_lock_init((struct simplelock*)&apic_itrace_debuglock); 1778#endif 1779 1780 s_lock_init((struct simplelock*)&mpintr_lock); 1781 1782 s_lock_init((struct simplelock*)&mcount_lock); 1783 1784 s_lock_init((struct simplelock*)&fast_intr_lock); 1785 s_lock_init((struct simplelock*)&intr_lock); 1786 s_lock_init((struct simplelock*)&imen_lock); 1787 s_lock_init((struct simplelock*)&cpl_lock); 1788 s_lock_init(&smp_rv_lock); 1789 1790#ifdef USE_COMLOCK 1791 s_lock_init((struct simplelock*)&com_lock); 1792#endif /* USE_COMLOCK */ 1793#ifdef USE_CLOCKLOCK 1794 s_lock_init((struct simplelock*)&clock_lock); 1795#endif /* USE_CLOCKLOCK */ 1796} 1797 1798 1799/* Wait for all APs to be fully initialized */ 1800extern int wait_ap(unsigned int); 1801 1802/* 1803 * start each AP in our list 1804 */ 1805static int 1806start_all_aps(u_int boot_addr) 1807{ 1808 int x, i, pg; 1809 u_char mpbiosreason; 1810 u_long mpbioswarmvec; 1811 struct globaldata *gd; 1812 char *stack; 1813 1814 POSTCODE(START_ALL_APS_POST); 1815 1816 /* initialize BSP's local APIC */ 1817 apic_initialize(); 1818 bsp_apic_ready = 1; 1819 1820 /* install the AP 1st level boot code */ 1821 install_ap_tramp(boot_addr); 1822 1823 1824 /* save the current value of the warm-start vector */ 1825 mpbioswarmvec = *((u_long *) WARMBOOT_OFF); 1826#ifndef PC98 1827 outb(CMOS_REG, BIOS_RESET); 1828 mpbiosreason = inb(CMOS_DATA); 1829#endif 1830 1831 /* record BSP in CPU map */ 1832 all_cpus = 1; 1833 1834 /* set up 0 -> 4MB P==V mapping for AP boot */ 1835 *(int *)PTD = PG_V | PG_RW | ((uintptr_t)(void *)KPTphys & PG_FRAME); 1836 invltlb(); 1837 1838 /* start each AP */ 1839 for (x = 1; x <= mp_naps; ++x) { 1840 1841 /* This is a bit verbose, it will go away soon. */ 1842 1843 /* first page of AP's private space */ 1844 pg = x * i386_btop(sizeof(struct privatespace)); 1845 1846 /* allocate a new private data page */ 1847 gd = (struct globaldata *)kmem_alloc(kernel_map, PAGE_SIZE); 1848 1849 /* wire it into the private page table page */ 1850 SMPpt[pg] = (pt_entry_t)(PG_V | PG_RW | vtophys(gd)); 1851 1852 /* allocate and set up an idle stack data page */ 1853 stack = (char *)kmem_alloc(kernel_map, UPAGES*PAGE_SIZE); 1854 for (i = 0; i < UPAGES; i++) 1855 SMPpt[pg + 5 + i] = (pt_entry_t) 1856 (PG_V | PG_RW | vtophys(PAGE_SIZE * i + stack)); 1857 1858 SMPpt[pg + 1] = 0; /* *prv_CMAP1 */ 1859 SMPpt[pg + 2] = 0; /* *prv_CMAP2 */ 1860 SMPpt[pg + 3] = 0; /* *prv_CMAP3 */ 1861 SMPpt[pg + 4] = 0; /* *prv_PMAP1 */ 1862 1863 /* prime data page for it to use */ 1864 gd->gd_cpuid = x; 1865 gd->gd_cpu_lockid = x << 24; 1866 gd->gd_prv_CMAP1 = &SMPpt[pg + 1]; 1867 gd->gd_prv_CMAP2 = &SMPpt[pg + 2]; 1868 gd->gd_prv_CMAP3 = &SMPpt[pg + 3]; 1869 gd->gd_prv_PMAP1 = &SMPpt[pg + 4]; 1870 gd->gd_prv_CADDR1 = SMP_prvspace[x].CPAGE1; 1871 gd->gd_prv_CADDR2 = SMP_prvspace[x].CPAGE2; 1872 gd->gd_prv_CADDR3 = SMP_prvspace[x].CPAGE3; 1873 gd->gd_prv_PADDR1 = (unsigned *)SMP_prvspace[x].PPAGE1; 1874 1875 /* setup a vector to our boot code */ 1876 *((volatile u_short *) WARMBOOT_OFF) = WARMBOOT_TARGET; 1877 *((volatile u_short *) WARMBOOT_SEG) = (boot_addr >> 4); 1878#ifndef PC98 1879 outb(CMOS_REG, BIOS_RESET); 1880 outb(CMOS_DATA, BIOS_WARM); /* 'warm-start' */ 1881#endif 1882 1883 bootSTK = &SMP_prvspace[x].idlestack[UPAGES*PAGE_SIZE]; 1884 bootAP = x; 1885 1886 /* attempt to start the Application Processor */ 1887 CHECK_INIT(99); /* setup checkpoints */ 1888 if (!start_ap(x, boot_addr)) { 1889 printf("AP #%d (PHY# %d) failed!\n", x, CPU_TO_ID(x)); 1890 CHECK_PRINT("trace"); /* show checkpoints */ 1891 /* better panic as the AP may be running loose */ 1892 printf("panic y/n? [y] "); 1893 if (cngetc() != 'n') 1894 panic("bye-bye"); 1895 } 1896 CHECK_PRINT("trace"); /* show checkpoints */ 1897 1898 /* record its version info */ 1899 cpu_apic_versions[x] = cpu_apic_versions[0]; 1900 1901 all_cpus |= (1 << x); /* record AP in CPU map */ 1902 } 1903 1904 /* build our map of 'other' CPUs */ 1905 other_cpus = all_cpus & ~(1 << cpuid); 1906 1907 /* fill in our (BSP) APIC version */ 1908 cpu_apic_versions[0] = lapic.version; 1909 1910 /* restore the warmstart vector */ 1911 *(u_long *) WARMBOOT_OFF = mpbioswarmvec; 1912#ifndef PC98 1913 outb(CMOS_REG, BIOS_RESET); 1914 outb(CMOS_DATA, mpbiosreason); 1915#endif 1916 1917 /* 1918 * Set up the idle context for the BSP. Similar to above except 1919 * that some was done by locore, some by pmap.c and some is implicit 1920 * because the BSP is cpu#0 and the page is initially zero, and also 1921 * because we can refer to variables by name on the BSP.. 1922 */ 1923 1924 /* Allocate and setup BSP idle stack */ 1925 stack = (char *)kmem_alloc(kernel_map, UPAGES * PAGE_SIZE); 1926 for (i = 0; i < UPAGES; i++) 1927 SMPpt[5 + i] = (pt_entry_t) 1928 (PG_V | PG_RW | vtophys(PAGE_SIZE * i + stack)); 1929 1930 *(int *)PTD = 0; 1931 pmap_set_opt(); 1932 1933 /* number of APs actually started */ 1934 return mp_ncpus - 1; 1935} 1936 1937 1938/* 1939 * load the 1st level AP boot code into base memory. 1940 */ 1941 1942/* targets for relocation */ 1943extern void bigJump(void); 1944extern void bootCodeSeg(void); 1945extern void bootDataSeg(void); 1946extern void MPentry(void); 1947extern u_int MP_GDT; 1948extern u_int mp_gdtbase; 1949 1950static void 1951install_ap_tramp(u_int boot_addr) 1952{ 1953 int x; 1954 int size = *(int *) ((u_long) & bootMP_size); 1955 u_char *src = (u_char *) ((u_long) bootMP); 1956 u_char *dst = (u_char *) boot_addr + KERNBASE; 1957 u_int boot_base = (u_int) bootMP; 1958 u_int8_t *dst8; 1959 u_int16_t *dst16; 1960 u_int32_t *dst32; 1961 1962 POSTCODE(INSTALL_AP_TRAMP_POST); 1963 1964 for (x = 0; x < size; ++x) 1965 *dst++ = *src++; 1966 1967 /* 1968 * modify addresses in code we just moved to basemem. unfortunately we 1969 * need fairly detailed info about mpboot.s for this to work. changes 1970 * to mpboot.s might require changes here. 1971 */ 1972 1973 /* boot code is located in KERNEL space */ 1974 dst = (u_char *) boot_addr + KERNBASE; 1975 1976 /* modify the lgdt arg */ 1977 dst32 = (u_int32_t *) (dst + ((u_int) & mp_gdtbase - boot_base)); 1978 *dst32 = boot_addr + ((u_int) & MP_GDT - boot_base); 1979 1980 /* modify the ljmp target for MPentry() */ 1981 dst32 = (u_int32_t *) (dst + ((u_int) bigJump - boot_base) + 1); 1982 *dst32 = ((u_int) MPentry - KERNBASE); 1983 1984 /* modify the target for boot code segment */ 1985 dst16 = (u_int16_t *) (dst + ((u_int) bootCodeSeg - boot_base)); 1986 dst8 = (u_int8_t *) (dst16 + 1); 1987 *dst16 = (u_int) boot_addr & 0xffff; 1988 *dst8 = ((u_int) boot_addr >> 16) & 0xff; 1989 1990 /* modify the target for boot data segment */ 1991 dst16 = (u_int16_t *) (dst + ((u_int) bootDataSeg - boot_base)); 1992 dst8 = (u_int8_t *) (dst16 + 1); 1993 *dst16 = (u_int) boot_addr & 0xffff; 1994 *dst8 = ((u_int) boot_addr >> 16) & 0xff; 1995} 1996 1997 1998/* 1999 * this function starts the AP (application processor) identified 2000 * by the APIC ID 'physicalCpu'. It does quite a "song and dance" 2001 * to accomplish this. This is necessary because of the nuances 2002 * of the different hardware we might encounter. It ain't pretty, 2003 * but it seems to work. 2004 */ 2005static int 2006start_ap(int logical_cpu, u_int boot_addr) 2007{ 2008 int physical_cpu; 2009 int vector; 2010 int cpus; 2011 u_long icr_lo, icr_hi; 2012 2013 POSTCODE(START_AP_POST); 2014 2015 /* get the PHYSICAL APIC ID# */ 2016 physical_cpu = CPU_TO_ID(logical_cpu); 2017 2018 /* calculate the vector */ 2019 vector = (boot_addr >> 12) & 0xff; 2020 2021 /* used as a watchpoint to signal AP startup */ 2022 cpus = mp_ncpus; 2023 2024 /* 2025 * first we do an INIT/RESET IPI this INIT IPI might be run, reseting 2026 * and running the target CPU. OR this INIT IPI might be latched (P5 2027 * bug), CPU waiting for STARTUP IPI. OR this INIT IPI might be 2028 * ignored. 2029 */ 2030 2031 /* setup the address for the target AP */ 2032 icr_hi = lapic.icr_hi & ~APIC_ID_MASK; 2033 icr_hi |= (physical_cpu << 24); 2034 lapic.icr_hi = icr_hi; 2035 2036 /* do an INIT IPI: assert RESET */ 2037 icr_lo = lapic.icr_lo & 0xfff00000; 2038 lapic.icr_lo = icr_lo | 0x0000c500; 2039 2040 /* wait for pending status end */ 2041 while (lapic.icr_lo & APIC_DELSTAT_MASK) 2042 /* spin */ ; 2043 2044 /* do an INIT IPI: deassert RESET */ 2045 lapic.icr_lo = icr_lo | 0x00008500; 2046 2047 /* wait for pending status end */ 2048 u_sleep(10000); /* wait ~10mS */ 2049 while (lapic.icr_lo & APIC_DELSTAT_MASK) 2050 /* spin */ ; 2051 2052 /* 2053 * next we do a STARTUP IPI: the previous INIT IPI might still be 2054 * latched, (P5 bug) this 1st STARTUP would then terminate 2055 * immediately, and the previously started INIT IPI would continue. OR 2056 * the previous INIT IPI has already run. and this STARTUP IPI will 2057 * run. OR the previous INIT IPI was ignored. and this STARTUP IPI 2058 * will run. 2059 */ 2060 2061 /* do a STARTUP IPI */ 2062 lapic.icr_lo = icr_lo | 0x00000600 | vector; 2063 while (lapic.icr_lo & APIC_DELSTAT_MASK) 2064 /* spin */ ; 2065 u_sleep(200); /* wait ~200uS */ 2066 2067 /* 2068 * finally we do a 2nd STARTUP IPI: this 2nd STARTUP IPI should run IF 2069 * the previous STARTUP IPI was cancelled by a latched INIT IPI. OR 2070 * this STARTUP IPI will be ignored, as only ONE STARTUP IPI is 2071 * recognized after hardware RESET or INIT IPI. 2072 */ 2073 2074 lapic.icr_lo = icr_lo | 0x00000600 | vector; 2075 while (lapic.icr_lo & APIC_DELSTAT_MASK) 2076 /* spin */ ; 2077 u_sleep(200); /* wait ~200uS */ 2078 2079 /* wait for it to start */ 2080 set_apic_timer(5000000);/* == 5 seconds */ 2081 while (read_apic_timer()) 2082 if (mp_ncpus > cpus) 2083 return 1; /* return SUCCESS */ 2084 2085 return 0; /* return FAILURE */ 2086} 2087 2088 2089/* 2090 * Flush the TLB on all other CPU's 2091 * 2092 * XXX: Needs to handshake and wait for completion before proceding. 2093 */ 2094void 2095smp_invltlb(void) 2096{ 2097#if defined(APIC_IO) 2098 if (smp_started && invltlb_ok) 2099 all_but_self_ipi(XINVLTLB_OFFSET); 2100#endif /* APIC_IO */ 2101} 2102 2103void 2104invlpg(u_int addr) 2105{ 2106 __asm __volatile("invlpg (%0)"::"r"(addr):"memory"); 2107 2108 /* send a message to the other CPUs */ 2109 smp_invltlb(); 2110} 2111 2112void 2113invltlb(void) 2114{ 2115 u_long temp; 2116 2117 /* 2118 * This should be implemented as load_cr3(rcr3()) when load_cr3() is 2119 * inlined. 2120 */ 2121 __asm __volatile("movl %%cr3, %0; movl %0, %%cr3":"=r"(temp) :: "memory"); 2122 2123 /* send a message to the other CPUs */ 2124 smp_invltlb(); 2125} 2126 2127 2128/* 2129 * When called the executing CPU will send an IPI to all other CPUs 2130 * requesting that they halt execution. 2131 * 2132 * Usually (but not necessarily) called with 'other_cpus' as its arg. 2133 * 2134 * - Signals all CPUs in map to stop. 2135 * - Waits for each to stop. 2136 * 2137 * Returns: 2138 * -1: error 2139 * 0: NA 2140 * 1: ok 2141 * 2142 * XXX FIXME: this is not MP-safe, needs a lock to prevent multiple CPUs 2143 * from executing at same time. 2144 */ 2145int 2146stop_cpus(u_int map) 2147{ 2148 if (!smp_started) 2149 return 0; 2150 2151 /* send the Xcpustop IPI to all CPUs in map */ 2152 selected_apic_ipi(map, XCPUSTOP_OFFSET, APIC_DELMODE_FIXED); 2153 2154 while ((stopped_cpus & map) != map) 2155 /* spin */ ; 2156 2157 return 1; 2158} 2159 2160 2161/* 2162 * Called by a CPU to restart stopped CPUs. 2163 * 2164 * Usually (but not necessarily) called with 'stopped_cpus' as its arg. 2165 * 2166 * - Signals all CPUs in map to restart. 2167 * - Waits for each to restart. 2168 * 2169 * Returns: 2170 * -1: error 2171 * 0: NA 2172 * 1: ok 2173 */ 2174int 2175restart_cpus(u_int map) 2176{ 2177 if (!smp_started) 2178 return 0; 2179 2180 started_cpus = map; /* signal other cpus to restart */ 2181 2182 while ((stopped_cpus & map) != 0) /* wait for each to clear its bit */ 2183 /* spin */ ; 2184 2185 return 1; 2186} 2187 2188int smp_active = 0; /* are the APs allowed to run? */ 2189SYSCTL_INT(_machdep, OID_AUTO, smp_active, CTLFLAG_RW, &smp_active, 0, ""); 2190 2191/* XXX maybe should be hw.ncpu */ 2192static int smp_cpus = 1; /* how many cpu's running */ 2193SYSCTL_INT(_machdep, OID_AUTO, smp_cpus, CTLFLAG_RD, &smp_cpus, 0, ""); 2194 2195int invltlb_ok = 0; /* throttle smp_invltlb() till safe */ 2196SYSCTL_INT(_machdep, OID_AUTO, invltlb_ok, CTLFLAG_RW, &invltlb_ok, 0, ""); 2197 2198/* Warning: Do not staticize. Used from swtch.s */ 2199int do_page_zero_idle = 1; /* bzero pages for fun and profit in idleloop */ 2200SYSCTL_INT(_machdep, OID_AUTO, do_page_zero_idle, CTLFLAG_RW, 2201 &do_page_zero_idle, 0, ""); 2202 2203/* Is forwarding of a interrupt to the CPU holding the ISR lock enabled ? */ 2204int forward_irq_enabled = 1; 2205SYSCTL_INT(_machdep, OID_AUTO, forward_irq_enabled, CTLFLAG_RW, 2206 &forward_irq_enabled, 0, ""); 2207 2208/* Enable forwarding of a signal to a process running on a different CPU */ 2209static int forward_signal_enabled = 1; 2210SYSCTL_INT(_machdep, OID_AUTO, forward_signal_enabled, CTLFLAG_RW, 2211 &forward_signal_enabled, 0, ""); 2212 2213/* Enable forwarding of roundrobin to all other cpus */ 2214static int forward_roundrobin_enabled = 1; 2215SYSCTL_INT(_machdep, OID_AUTO, forward_roundrobin_enabled, CTLFLAG_RW, 2216 &forward_roundrobin_enabled, 0, ""); 2217 2218/* 2219 * This is called once the rest of the system is up and running and we're 2220 * ready to let the AP's out of the pen. 2221 */ 2222void ap_init(void); 2223 2224void 2225ap_init() 2226{ 2227 u_int apic_id; 2228 2229 /* BSP may have changed PTD while we're waiting for the lock */ 2230 cpu_invltlb(); 2231 2232 smp_cpus++; 2233 2234#if defined(I586_CPU) && !defined(NO_F00F_HACK) 2235 lidt(&r_idt); 2236#endif 2237 2238 /* Build our map of 'other' CPUs. */ 2239 other_cpus = all_cpus & ~(1 << cpuid); 2240 2241 printf("SMP: AP CPU #%d Launched!\n", cpuid); 2242 2243 /* XXX FIXME: i386 specific, and redundant: Setup the FPU. */ 2244 load_cr0((rcr0() & ~CR0_EM) | CR0_MP | CR0_NE | CR0_TS); 2245 2246 /* set up FPU state on the AP */ 2247 npxinit(__INITIAL_NPXCW__); 2248 2249 /* A quick check from sanity claus */ 2250 apic_id = (apic_id_to_logical[(lapic.id & 0x0f000000) >> 24]); 2251 if (cpuid != apic_id) { 2252 printf("SMP: cpuid = %d\n", cpuid); 2253 printf("SMP: apic_id = %d\n", apic_id); 2254 printf("PTD[MPPTDI] = %p\n", (void *)PTD[MPPTDI]); 2255 panic("cpuid mismatch! boom!!"); 2256 } 2257 2258 /* Init local apic for irq's */ 2259 apic_initialize(); 2260 2261 /* Set memory range attributes for this CPU to match the BSP */ 2262 mem_range_AP_init(); 2263 2264 /* 2265 * Activate smp_invltlb, although strictly speaking, this isn't 2266 * quite correct yet. We should have a bitfield for cpus willing 2267 * to accept TLB flush IPI's or something and sync them. 2268 */ 2269 if (smp_cpus == mp_ncpus) { 2270 invltlb_ok = 1; 2271 smp_started = 1; /* enable IPI's, tlb shootdown, freezes etc */ 2272 smp_active = 1; /* historic */ 2273 } 2274} 2275 2276#ifdef BETTER_CLOCK 2277 2278#define CHECKSTATE_USER 0 2279#define CHECKSTATE_SYS 1 2280#define CHECKSTATE_INTR 2 2281 2282/* Do not staticize. Used from apic_vector.s */ 2283struct proc* checkstate_curproc[NCPU]; 2284int checkstate_cpustate[NCPU]; 2285u_long checkstate_pc[NCPU]; 2286 2287extern long cp_time[CPUSTATES]; 2288 2289#define PC_TO_INDEX(pc, prof) \ 2290 ((int)(((u_quad_t)((pc) - (prof)->pr_off) * \ 2291 (u_quad_t)((prof)->pr_scale)) >> 16) & ~1) 2292 2293static void 2294addupc_intr_forwarded(struct proc *p, int id, int *astmap) 2295{ 2296 int i; 2297 struct uprof *prof; 2298 u_long pc; 2299 2300 pc = checkstate_pc[id]; 2301 prof = &p->p_stats->p_prof; 2302 if (pc >= prof->pr_off && 2303 (i = PC_TO_INDEX(pc, prof)) < prof->pr_size) { 2304 if ((p->p_flag & P_OWEUPC) == 0) { 2305 prof->pr_addr = pc; 2306 prof->pr_ticks = 1; 2307 p->p_flag |= P_OWEUPC; 2308 } 2309 *astmap |= (1 << id); 2310 } 2311} 2312 2313static void 2314forwarded_statclock(int id, int pscnt, int *astmap) 2315{ 2316 struct pstats *pstats; 2317 long rss; 2318 struct rusage *ru; 2319 struct vmspace *vm; 2320 int cpustate; 2321 struct proc *p; 2322#ifdef GPROF 2323 register struct gmonparam *g; 2324 int i; 2325#endif 2326 2327 p = checkstate_curproc[id]; 2328 cpustate = checkstate_cpustate[id]; 2329 2330 switch (cpustate) { 2331 case CHECKSTATE_USER: 2332 if (p->p_flag & P_PROFIL) 2333 addupc_intr_forwarded(p, id, astmap); 2334 if (pscnt > 1) 2335 return; 2336 p->p_uticks++; 2337 if (p->p_nice > NZERO) 2338 cp_time[CP_NICE]++; 2339 else 2340 cp_time[CP_USER]++; 2341 break; 2342 case CHECKSTATE_SYS: 2343#ifdef GPROF 2344 /* 2345 * Kernel statistics are just like addupc_intr, only easier. 2346 */ 2347 g = &_gmonparam; 2348 if (g->state == GMON_PROF_ON) { 2349 i = checkstate_pc[id] - g->lowpc; 2350 if (i < g->textsize) { 2351 i /= HISTFRACTION * sizeof(*g->kcount); 2352 g->kcount[i]++; 2353 } 2354 } 2355#endif 2356 if (pscnt > 1) 2357 return; 2358 2359 if (!p) 2360 cp_time[CP_IDLE]++; 2361 else { 2362 p->p_sticks++; 2363 cp_time[CP_SYS]++; 2364 } 2365 break; 2366 case CHECKSTATE_INTR: 2367 default: 2368#ifdef GPROF 2369 /* 2370 * Kernel statistics are just like addupc_intr, only easier. 2371 */ 2372 g = &_gmonparam; 2373 if (g->state == GMON_PROF_ON) { 2374 i = checkstate_pc[id] - g->lowpc; 2375 if (i < g->textsize) { 2376 i /= HISTFRACTION * sizeof(*g->kcount); 2377 g->kcount[i]++; 2378 } 2379 } 2380#endif 2381 if (pscnt > 1) 2382 return; 2383 if (p) 2384 p->p_iticks++; 2385 cp_time[CP_INTR]++; 2386 } 2387 if (p != NULL) { 2388 schedclock(p); 2389 2390 /* Update resource usage integrals and maximums. */ 2391 if ((pstats = p->p_stats) != NULL && 2392 (ru = &pstats->p_ru) != NULL && 2393 (vm = p->p_vmspace) != NULL) { 2394 ru->ru_ixrss += pgtok(vm->vm_tsize); 2395 ru->ru_idrss += pgtok(vm->vm_dsize); 2396 ru->ru_isrss += pgtok(vm->vm_ssize); 2397 rss = pgtok(vmspace_resident_count(vm)); 2398 if (ru->ru_maxrss < rss) 2399 ru->ru_maxrss = rss; 2400 } 2401 } 2402} 2403 2404void 2405forward_statclock(int pscnt) 2406{ 2407 int map; 2408 int id; 2409 int i; 2410 2411 /* Kludge. We don't yet have separate locks for the interrupts 2412 * and the kernel. This means that we cannot let the other processors 2413 * handle complex interrupts while inhibiting them from entering 2414 * the kernel in a non-interrupt context. 2415 * 2416 * What we can do, without changing the locking mechanisms yet, 2417 * is letting the other processors handle a very simple interrupt 2418 * (wich determines the processor states), and do the main 2419 * work ourself. 2420 */ 2421 2422 if (!smp_started || !invltlb_ok || cold || panicstr) 2423 return; 2424 2425 /* Step 1: Probe state (user, cpu, interrupt, spinlock, idle ) */ 2426 2427 map = other_cpus & ~stopped_cpus ; 2428 checkstate_probed_cpus = 0; 2429 if (map != 0) 2430 selected_apic_ipi(map, 2431 XCPUCHECKSTATE_OFFSET, APIC_DELMODE_FIXED); 2432 2433 i = 0; 2434 while (checkstate_probed_cpus != map) { 2435 /* spin */ 2436 i++; 2437 if (i == 100000) { 2438#ifdef BETTER_CLOCK_DIAGNOSTIC 2439 printf("forward_statclock: checkstate %x\n", 2440 checkstate_probed_cpus); 2441#endif 2442 break; 2443 } 2444 } 2445 2446 /* 2447 * Step 2: walk through other processors processes, update ticks and 2448 * profiling info. 2449 */ 2450 2451 map = 0; 2452 for (id = 0; id < mp_ncpus; id++) { 2453 if (id == cpuid) 2454 continue; 2455 if (((1 << id) & checkstate_probed_cpus) == 0) 2456 continue; 2457 forwarded_statclock(id, pscnt, &map); 2458 } 2459 if (map != 0) { 2460 checkstate_need_ast |= map; 2461 selected_apic_ipi(map, XCPUAST_OFFSET, APIC_DELMODE_FIXED); 2462 i = 0; 2463 while ((checkstate_need_ast & map) != 0) { 2464 /* spin */ 2465 i++; 2466 if (i > 100000) { 2467#ifdef BETTER_CLOCK_DIAGNOSTIC 2468 printf("forward_statclock: dropped ast 0x%x\n", 2469 checkstate_need_ast & map); 2470#endif 2471 break; 2472 } 2473 } 2474 } 2475} 2476 2477void 2478forward_hardclock(int pscnt) 2479{ 2480 int map; 2481 int id; 2482 struct proc *p; 2483 struct pstats *pstats; 2484 int i; 2485 2486 /* Kludge. We don't yet have separate locks for the interrupts 2487 * and the kernel. This means that we cannot let the other processors 2488 * handle complex interrupts while inhibiting them from entering 2489 * the kernel in a non-interrupt context. 2490 * 2491 * What we can do, without changing the locking mechanisms yet, 2492 * is letting the other processors handle a very simple interrupt 2493 * (wich determines the processor states), and do the main 2494 * work ourself. 2495 */ 2496 2497 if (!smp_started || !invltlb_ok || cold || panicstr) 2498 return; 2499 2500 /* Step 1: Probe state (user, cpu, interrupt, spinlock, idle) */ 2501 2502 map = other_cpus & ~stopped_cpus ; 2503 checkstate_probed_cpus = 0; 2504 if (map != 0) 2505 selected_apic_ipi(map, 2506 XCPUCHECKSTATE_OFFSET, APIC_DELMODE_FIXED); 2507 2508 i = 0; 2509 while (checkstate_probed_cpus != map) { 2510 /* spin */ 2511 i++; 2512 if (i == 100000) { 2513#ifdef BETTER_CLOCK_DIAGNOSTIC 2514 printf("forward_hardclock: checkstate %x\n", 2515 checkstate_probed_cpus); 2516#endif 2517 break; 2518 } 2519 } 2520 2521 /* 2522 * Step 2: walk through other processors processes, update virtual 2523 * timer and profiling timer. If stathz == 0, also update ticks and 2524 * profiling info. 2525 */ 2526 2527 map = 0; 2528 for (id = 0; id < mp_ncpus; id++) { 2529 if (id == cpuid) 2530 continue; 2531 if (((1 << id) & checkstate_probed_cpus) == 0) 2532 continue; 2533 p = checkstate_curproc[id]; 2534 if (p) { 2535 pstats = p->p_stats; 2536 if (checkstate_cpustate[id] == CHECKSTATE_USER && 2537 timevalisset(&pstats->p_timer[ITIMER_VIRTUAL].it_value) && 2538 itimerdecr(&pstats->p_timer[ITIMER_VIRTUAL], tick) == 0) { 2539 psignal(p, SIGVTALRM); 2540 map |= (1 << id); 2541 } 2542 if (timevalisset(&pstats->p_timer[ITIMER_PROF].it_value) && 2543 itimerdecr(&pstats->p_timer[ITIMER_PROF], tick) == 0) { 2544 psignal(p, SIGPROF); 2545 map |= (1 << id); 2546 } 2547 } 2548 if (stathz == 0) { 2549 forwarded_statclock( id, pscnt, &map); 2550 } 2551 } 2552 if (map != 0) { 2553 checkstate_need_ast |= map; 2554 selected_apic_ipi(map, XCPUAST_OFFSET, APIC_DELMODE_FIXED); 2555 i = 0; 2556 while ((checkstate_need_ast & map) != 0) { 2557 /* spin */ 2558 i++; 2559 if (i > 100000) { 2560#ifdef BETTER_CLOCK_DIAGNOSTIC 2561 printf("forward_hardclock: dropped ast 0x%x\n", 2562 checkstate_need_ast & map); 2563#endif 2564 break; 2565 } 2566 } 2567 } 2568} 2569 2570#endif /* BETTER_CLOCK */ 2571 2572void 2573forward_signal(struct proc *p) 2574{ 2575 int map; 2576 int id; 2577 int i; 2578 2579 /* Kludge. We don't yet have separate locks for the interrupts 2580 * and the kernel. This means that we cannot let the other processors 2581 * handle complex interrupts while inhibiting them from entering 2582 * the kernel in a non-interrupt context. 2583 * 2584 * What we can do, without changing the locking mechanisms yet, 2585 * is letting the other processors handle a very simple interrupt 2586 * (wich determines the processor states), and do the main 2587 * work ourself. 2588 */ 2589 2590 if (!smp_started || !invltlb_ok || cold || panicstr) 2591 return; 2592 if (!forward_signal_enabled) 2593 return; 2594 while (1) { 2595 if (p->p_stat != SRUN) 2596 return; 2597 id = p->p_oncpu; 2598 if (id == 0xff) 2599 return; 2600 map = (1<<id); 2601 checkstate_need_ast |= map; 2602 selected_apic_ipi(map, XCPUAST_OFFSET, APIC_DELMODE_FIXED); 2603 i = 0; 2604 while ((checkstate_need_ast & map) != 0) { 2605 /* spin */ 2606 i++; 2607 if (i > 100000) { 2608#if 0 2609 printf("forward_signal: dropped ast 0x%x\n", 2610 checkstate_need_ast & map); 2611#endif 2612 break; 2613 } 2614 } 2615 if (id == p->p_oncpu) 2616 return; 2617 } 2618} 2619 2620void 2621forward_roundrobin(void) 2622{ 2623 u_int map; 2624 int i; 2625 2626 if (!smp_started || !invltlb_ok || cold || panicstr) 2627 return; 2628 if (!forward_roundrobin_enabled) 2629 return; 2630 resched_cpus |= other_cpus; 2631 map = other_cpus & ~stopped_cpus ; 2632#if 1 2633 selected_apic_ipi(map, XCPUAST_OFFSET, APIC_DELMODE_FIXED); 2634#else 2635 (void) all_but_self_ipi(XCPUAST_OFFSET); 2636#endif 2637 i = 0; 2638 while ((checkstate_need_ast & map) != 0) { 2639 /* spin */ 2640 i++; 2641 if (i > 100000) { 2642#if 0 2643 printf("forward_roundrobin: dropped ast 0x%x\n", 2644 checkstate_need_ast & map); 2645#endif 2646 break; 2647 } 2648 } 2649} 2650 2651 2652#ifdef APIC_INTR_REORDER 2653/* 2654 * Maintain mapping from softintr vector to isr bit in local apic. 2655 */ 2656void 2657set_lapic_isrloc(int intr, int vector) 2658{ 2659 if (intr < 0 || intr > 32) 2660 panic("set_apic_isrloc: bad intr argument: %d",intr); 2661 if (vector < ICU_OFFSET || vector > 255) 2662 panic("set_apic_isrloc: bad vector argument: %d",vector); 2663 apic_isrbit_location[intr].location = &lapic.isr0 + ((vector>>5)<<2); 2664 apic_isrbit_location[intr].bit = (1<<(vector & 31)); 2665} 2666#endif 2667 2668/* 2669 * All-CPU rendezvous. CPUs are signalled, all execute the setup function 2670 * (if specified), rendezvous, execute the action function (if specified), 2671 * rendezvous again, execute the teardown function (if specified), and then 2672 * resume. 2673 * 2674 * Note that the supplied external functions _must_ be reentrant and aware 2675 * that they are running in parallel and in an unknown lock context. 2676 */ 2677static void (*smp_rv_setup_func)(void *arg); 2678static void (*smp_rv_action_func)(void *arg); 2679static void (*smp_rv_teardown_func)(void *arg); 2680static void *smp_rv_func_arg; 2681static volatile int smp_rv_waiters[2]; 2682 2683void 2684smp_rendezvous_action(void) 2685{ 2686 /* setup function */ 2687 if (smp_rv_setup_func != NULL) 2688 smp_rv_setup_func(smp_rv_func_arg); 2689 /* spin on entry rendezvous */ 2690 atomic_add_int(&smp_rv_waiters[0], 1); 2691 while (smp_rv_waiters[0] < mp_ncpus) 2692 ; 2693 /* action function */ 2694 if (smp_rv_action_func != NULL) 2695 smp_rv_action_func(smp_rv_func_arg); 2696 /* spin on exit rendezvous */ 2697 atomic_add_int(&smp_rv_waiters[1], 1); 2698 while (smp_rv_waiters[1] < mp_ncpus) 2699 ; 2700 /* teardown function */ 2701 if (smp_rv_teardown_func != NULL) 2702 smp_rv_teardown_func(smp_rv_func_arg); 2703} 2704 2705void 2706smp_rendezvous(void (* setup_func)(void *), 2707 void (* action_func)(void *), 2708 void (* teardown_func)(void *), 2709 void *arg) 2710{ 2711 u_int efl; 2712 2713 /* obtain rendezvous lock */ 2714 s_lock(&smp_rv_lock); /* XXX sleep here? NOWAIT flag? */ 2715 2716 /* set static function pointers */ 2717 smp_rv_setup_func = setup_func; 2718 smp_rv_action_func = action_func; 2719 smp_rv_teardown_func = teardown_func; 2720 smp_rv_func_arg = arg; 2721 smp_rv_waiters[0] = 0; 2722 smp_rv_waiters[1] = 0; 2723 2724 /* disable interrupts on this CPU, save interrupt status */ 2725 efl = read_eflags(); 2726 write_eflags(efl & ~PSL_I); 2727 2728 /* signal other processors, which will enter the IPI with interrupts off */ 2729 all_but_self_ipi(XRENDEZVOUS_OFFSET); 2730 2731 /* call executor function */ 2732 smp_rendezvous_action(); 2733 2734 /* restore interrupt flag */ 2735 write_eflags(efl); 2736 2737 /* release lock */ 2738 s_unlock(&smp_rv_lock); 2739} 2740