1/* 2 * Intel Multiprocessor Specification 1.1 and 1.4 3 * compliant MP-table parsing routines. 4 * 5 * (c) 1995 Alan Cox, Building #3 <alan@redhat.com> 6 * (c) 1998, 1999, 2000 Ingo Molnar <mingo@redhat.com> 7 * 8 * Fixes 9 * Erich Boleyn : MP v1.4 and additional changes. 10 * Alan Cox : Added EBDA scanning 11 * Ingo Molnar : various cleanups and rewrites 12 * Maciej W. Rozycki: Bits for default MP configurations 13 * Paul Diefenbaugh: Added full ACPI support 14 */ 15 16#include <linux/mm.h> 17#include <linux/init.h> 18#include <linux/acpi.h> 19#include <linux/delay.h> 20#include <linux/bootmem.h> 21#include <linux/kernel_stat.h> 22#include <linux/mc146818rtc.h> 23#include <linux/bitops.h> 24 25#include <asm/smp.h> 26#include <asm/acpi.h> 27#include <asm/mtrr.h> 28#include <asm/mpspec.h> 29#include <asm/io_apic.h> 30 31#include <mach_apic.h> 32#include <mach_apicdef.h> 33#include <mach_mpparse.h> 34#include <bios_ebda.h> 35 36/* Have we found an MP table */ 37int smp_found_config; 38unsigned int __cpuinitdata maxcpus = NR_CPUS; 39 40/* 41 * Various Linux-internal data structures created from the 42 * MP-table. 43 */ 44int apic_version [MAX_APICS]; 45int mp_bus_id_to_type [MAX_MP_BUSSES]; 46int mp_bus_id_to_node [MAX_MP_BUSSES]; 47int mp_bus_id_to_local [MAX_MP_BUSSES]; 48int quad_local_to_mp_bus_id [NR_CPUS/4][4]; 49int mp_bus_id_to_pci_bus [MAX_MP_BUSSES] = { [0 ... MAX_MP_BUSSES-1] = -1 }; 50static int mp_current_pci_id; 51 52/* I/O APIC entries */ 53struct mpc_config_ioapic mp_ioapics[MAX_IO_APICS]; 54 55/* # of MP IRQ source entries */ 56struct mpc_config_intsrc mp_irqs[MAX_IRQ_SOURCES]; 57 58/* MP IRQ source entries */ 59int mp_irq_entries; 60 61int nr_ioapics; 62 63int pic_mode; 64unsigned long mp_lapic_addr; 65 66unsigned int def_to_bigsmp = 0; 67 68/* Processor that is doing the boot up */ 69unsigned int boot_cpu_physical_apicid = -1U; 70/* Internal processor count */ 71unsigned int __cpuinitdata num_processors; 72 73/* Bitmask of physically existing CPUs */ 74physid_mask_t phys_cpu_present_map; 75 76u8 bios_cpu_apicid[NR_CPUS] = { [0 ... NR_CPUS-1] = BAD_APICID }; 77 78/* 79 * Intel MP BIOS table parsing routines: 80 */ 81 82 83/* 84 * Checksum an MP configuration block. 85 */ 86 87static int __init mpf_checksum(unsigned char *mp, int len) 88{ 89 int sum = 0; 90 91 while (len--) 92 sum += *mp++; 93 94 return sum & 0xFF; 95} 96 97/* 98 * Have to match translation table entries to main table entries by counter 99 * hence the mpc_record variable .... can't see a less disgusting way of 100 * doing this .... 101 */ 102 103static int mpc_record; 104static struct mpc_config_translation *translation_table[MAX_MPC_ENTRY] __cpuinitdata; 105 106static void __cpuinit MP_processor_info (struct mpc_config_processor *m) 107{ 108 int ver, apicid; 109 physid_mask_t phys_cpu; 110 111 if (!(m->mpc_cpuflag & CPU_ENABLED)) 112 return; 113 114 apicid = mpc_apic_id(m, translation_table[mpc_record]); 115 116 if (m->mpc_featureflag&(1<<0)) 117 Dprintk(" Floating point unit present.\n"); 118 if (m->mpc_featureflag&(1<<7)) 119 Dprintk(" Machine Exception supported.\n"); 120 if (m->mpc_featureflag&(1<<8)) 121 Dprintk(" 64 bit compare & exchange supported.\n"); 122 if (m->mpc_featureflag&(1<<9)) 123 Dprintk(" Internal APIC present.\n"); 124 if (m->mpc_featureflag&(1<<11)) 125 Dprintk(" SEP present.\n"); 126 if (m->mpc_featureflag&(1<<12)) 127 Dprintk(" MTRR present.\n"); 128 if (m->mpc_featureflag&(1<<13)) 129 Dprintk(" PGE present.\n"); 130 if (m->mpc_featureflag&(1<<14)) 131 Dprintk(" MCA present.\n"); 132 if (m->mpc_featureflag&(1<<15)) 133 Dprintk(" CMOV present.\n"); 134 if (m->mpc_featureflag&(1<<16)) 135 Dprintk(" PAT present.\n"); 136 if (m->mpc_featureflag&(1<<17)) 137 Dprintk(" PSE present.\n"); 138 if (m->mpc_featureflag&(1<<18)) 139 Dprintk(" PSN present.\n"); 140 if (m->mpc_featureflag&(1<<19)) 141 Dprintk(" Cache Line Flush Instruction present.\n"); 142 /* 20 Reserved */ 143 if (m->mpc_featureflag&(1<<21)) 144 Dprintk(" Debug Trace and EMON Store present.\n"); 145 if (m->mpc_featureflag&(1<<22)) 146 Dprintk(" ACPI Thermal Throttle Registers present.\n"); 147 if (m->mpc_featureflag&(1<<23)) 148 Dprintk(" MMX present.\n"); 149 if (m->mpc_featureflag&(1<<24)) 150 Dprintk(" FXSR present.\n"); 151 if (m->mpc_featureflag&(1<<25)) 152 Dprintk(" XMM present.\n"); 153 if (m->mpc_featureflag&(1<<26)) 154 Dprintk(" Willamette New Instructions present.\n"); 155 if (m->mpc_featureflag&(1<<27)) 156 Dprintk(" Self Snoop present.\n"); 157 if (m->mpc_featureflag&(1<<28)) 158 Dprintk(" HT present.\n"); 159 if (m->mpc_featureflag&(1<<29)) 160 Dprintk(" Thermal Monitor present.\n"); 161 /* 30, 31 Reserved */ 162 163 164 if (m->mpc_cpuflag & CPU_BOOTPROCESSOR) { 165 Dprintk(" Bootup CPU\n"); 166 boot_cpu_physical_apicid = m->mpc_apicid; 167 } 168 169 ver = m->mpc_apicver; 170 171 /* 172 * Validate version 173 */ 174 if (ver == 0x0) { 175 printk(KERN_WARNING "BIOS bug, APIC version is 0 for CPU#%d! " 176 "fixing up to 0x10. (tell your hw vendor)\n", 177 m->mpc_apicid); 178 ver = 0x10; 179 } 180 apic_version[m->mpc_apicid] = ver; 181 182 phys_cpu = apicid_to_cpu_present(apicid); 183 physids_or(phys_cpu_present_map, phys_cpu_present_map, phys_cpu); 184 185 if (num_processors >= NR_CPUS) { 186 printk(KERN_WARNING "WARNING: NR_CPUS limit of %i reached." 187 " Processor ignored.\n", NR_CPUS); 188 return; 189 } 190 191 if (num_processors >= maxcpus) { 192 printk(KERN_WARNING "WARNING: maxcpus limit of %i reached." 193 " Processor ignored.\n", maxcpus); 194 return; 195 } 196 197 cpu_set(num_processors, cpu_possible_map); 198 num_processors++; 199 200 /* 201 * Would be preferable to switch to bigsmp when CONFIG_HOTPLUG_CPU=y 202 * but we need to work other dependencies like SMP_SUSPEND etc 203 * before this can be done without some confusion. 204 * if (CPU_HOTPLUG_ENABLED || num_processors > 8) 205 * - Ashok Raj <ashok.raj@intel.com> 206 */ 207 if (num_processors > 8) { 208 switch (boot_cpu_data.x86_vendor) { 209 case X86_VENDOR_INTEL: 210 if (!APIC_XAPIC(ver)) { 211 def_to_bigsmp = 0; 212 break; 213 } 214 /* If P4 and above fall through */ 215 case X86_VENDOR_AMD: 216 def_to_bigsmp = 1; 217 } 218 } 219 bios_cpu_apicid[num_processors - 1] = m->mpc_apicid; 220} 221 222static void __init MP_bus_info (struct mpc_config_bus *m) 223{ 224 char str[7]; 225 226 memcpy(str, m->mpc_bustype, 6); 227 str[6] = 0; 228 229 mpc_oem_bus_info(m, str, translation_table[mpc_record]); 230 231#if MAX_MP_BUSSES < 256 232 if (m->mpc_busid >= MAX_MP_BUSSES) { 233 printk(KERN_WARNING "MP table busid value (%d) for bustype %s " 234 " is too large, max. supported is %d\n", 235 m->mpc_busid, str, MAX_MP_BUSSES - 1); 236 return; 237 } 238#endif 239 240 if (strncmp(str, BUSTYPE_ISA, sizeof(BUSTYPE_ISA)-1) == 0) { 241 mp_bus_id_to_type[m->mpc_busid] = MP_BUS_ISA; 242 } else if (strncmp(str, BUSTYPE_EISA, sizeof(BUSTYPE_EISA)-1) == 0) { 243 mp_bus_id_to_type[m->mpc_busid] = MP_BUS_EISA; 244 } else if (strncmp(str, BUSTYPE_PCI, sizeof(BUSTYPE_PCI)-1) == 0) { 245 mpc_oem_pci_bus(m, translation_table[mpc_record]); 246 mp_bus_id_to_type[m->mpc_busid] = MP_BUS_PCI; 247 mp_bus_id_to_pci_bus[m->mpc_busid] = mp_current_pci_id; 248 mp_current_pci_id++; 249 } else if (strncmp(str, BUSTYPE_MCA, sizeof(BUSTYPE_MCA)-1) == 0) { 250 mp_bus_id_to_type[m->mpc_busid] = MP_BUS_MCA; 251 } else { 252 printk(KERN_WARNING "Unknown bustype %s - ignoring\n", str); 253 } 254} 255 256static void __init MP_ioapic_info (struct mpc_config_ioapic *m) 257{ 258 if (!(m->mpc_flags & MPC_APIC_USABLE)) 259 return; 260 261 printk(KERN_INFO "I/O APIC #%d Version %d at 0x%lX.\n", 262 m->mpc_apicid, m->mpc_apicver, m->mpc_apicaddr); 263 if (nr_ioapics >= MAX_IO_APICS) { 264 printk(KERN_CRIT "Max # of I/O APICs (%d) exceeded (found %d).\n", 265 MAX_IO_APICS, nr_ioapics); 266 panic("Recompile kernel with bigger MAX_IO_APICS!.\n"); 267 } 268 if (!m->mpc_apicaddr) { 269 printk(KERN_ERR "WARNING: bogus zero I/O APIC address" 270 " found in MP table, skipping!\n"); 271 return; 272 } 273 mp_ioapics[nr_ioapics] = *m; 274 nr_ioapics++; 275} 276 277static void __init MP_intsrc_info (struct mpc_config_intsrc *m) 278{ 279 mp_irqs [mp_irq_entries] = *m; 280 Dprintk("Int: type %d, pol %d, trig %d, bus %d," 281 " IRQ %02x, APIC ID %x, APIC INT %02x\n", 282 m->mpc_irqtype, m->mpc_irqflag & 3, 283 (m->mpc_irqflag >> 2) & 3, m->mpc_srcbus, 284 m->mpc_srcbusirq, m->mpc_dstapic, m->mpc_dstirq); 285 if (++mp_irq_entries == MAX_IRQ_SOURCES) 286 panic("Max # of irq sources exceeded!!\n"); 287} 288 289static void __init MP_lintsrc_info (struct mpc_config_lintsrc *m) 290{ 291 Dprintk("Lint: type %d, pol %d, trig %d, bus %d," 292 " IRQ %02x, APIC ID %x, APIC LINT %02x\n", 293 m->mpc_irqtype, m->mpc_irqflag & 3, 294 (m->mpc_irqflag >> 2) &3, m->mpc_srcbusid, 295 m->mpc_srcbusirq, m->mpc_destapic, m->mpc_destapiclint); 296} 297 298#ifdef CONFIG_X86_NUMAQ 299static void __init MP_translation_info (struct mpc_config_translation *m) 300{ 301 printk(KERN_INFO "Translation: record %d, type %d, quad %d, global %d, local %d\n", mpc_record, m->trans_type, m->trans_quad, m->trans_global, m->trans_local); 302 303 if (mpc_record >= MAX_MPC_ENTRY) 304 printk(KERN_ERR "MAX_MPC_ENTRY exceeded!\n"); 305 else 306 translation_table[mpc_record] = m; /* stash this for later */ 307 if (m->trans_quad < MAX_NUMNODES && !node_online(m->trans_quad)) 308 node_set_online(m->trans_quad); 309} 310 311/* 312 * Read/parse the MPC oem tables 313 */ 314 315static void __init smp_read_mpc_oem(struct mp_config_oemtable *oemtable, \ 316 unsigned short oemsize) 317{ 318 int count = sizeof (*oemtable); /* the header size */ 319 unsigned char *oemptr = ((unsigned char *)oemtable)+count; 320 321 mpc_record = 0; 322 printk(KERN_INFO "Found an OEM MPC table at %8p - parsing it ... \n", oemtable); 323 if (memcmp(oemtable->oem_signature,MPC_OEM_SIGNATURE,4)) 324 { 325 printk(KERN_WARNING "SMP mpc oemtable: bad signature [%c%c%c%c]!\n", 326 oemtable->oem_signature[0], 327 oemtable->oem_signature[1], 328 oemtable->oem_signature[2], 329 oemtable->oem_signature[3]); 330 return; 331 } 332 if (mpf_checksum((unsigned char *)oemtable,oemtable->oem_length)) 333 { 334 printk(KERN_WARNING "SMP oem mptable: checksum error!\n"); 335 return; 336 } 337 while (count < oemtable->oem_length) { 338 switch (*oemptr) { 339 case MP_TRANSLATION: 340 { 341 struct mpc_config_translation *m= 342 (struct mpc_config_translation *)oemptr; 343 MP_translation_info(m); 344 oemptr += sizeof(*m); 345 count += sizeof(*m); 346 ++mpc_record; 347 break; 348 } 349 default: 350 { 351 printk(KERN_WARNING "Unrecognised OEM table entry type! - %d\n", (int) *oemptr); 352 return; 353 } 354 } 355 } 356} 357 358static inline void mps_oem_check(struct mp_config_table *mpc, char *oem, 359 char *productid) 360{ 361 if (strncmp(oem, "IBM NUMA", 8)) 362 printk("Warning! May not be a NUMA-Q system!\n"); 363 if (mpc->mpc_oemptr) 364 smp_read_mpc_oem((struct mp_config_oemtable *) mpc->mpc_oemptr, 365 mpc->mpc_oemsize); 366} 367#endif /* CONFIG_X86_NUMAQ */ 368 369/* 370 * Read/parse the MPC 371 */ 372 373static int __init smp_read_mpc(struct mp_config_table *mpc) 374{ 375 char str[16]; 376 char oem[10]; 377 int count=sizeof(*mpc); 378 unsigned char *mpt=((unsigned char *)mpc)+count; 379 380 if (memcmp(mpc->mpc_signature,MPC_SIGNATURE,4)) { 381 printk(KERN_ERR "SMP mptable: bad signature [0x%x]!\n", 382 *(u32 *)mpc->mpc_signature); 383 return 0; 384 } 385 if (mpf_checksum((unsigned char *)mpc,mpc->mpc_length)) { 386 printk(KERN_ERR "SMP mptable: checksum error!\n"); 387 return 0; 388 } 389 if (mpc->mpc_spec!=0x01 && mpc->mpc_spec!=0x04) { 390 printk(KERN_ERR "SMP mptable: bad table version (%d)!!\n", 391 mpc->mpc_spec); 392 return 0; 393 } 394 if (!mpc->mpc_lapic) { 395 printk(KERN_ERR "SMP mptable: null local APIC address!\n"); 396 return 0; 397 } 398 memcpy(oem,mpc->mpc_oem,8); 399 oem[8]=0; 400 printk(KERN_INFO "OEM ID: %s ",oem); 401 402 memcpy(str,mpc->mpc_productid,12); 403 str[12]=0; 404 printk("Product ID: %s ",str); 405 406 mps_oem_check(mpc, oem, str); 407 408 printk("APIC at: 0x%lX\n",mpc->mpc_lapic); 409 410 /* 411 * Save the local APIC address (it might be non-default) -- but only 412 * if we're not using ACPI. 413 */ 414 if (!acpi_lapic) 415 mp_lapic_addr = mpc->mpc_lapic; 416 417 /* 418 * Now process the configuration blocks. 419 */ 420 mpc_record = 0; 421 while (count < mpc->mpc_length) { 422 switch(*mpt) { 423 case MP_PROCESSOR: 424 { 425 struct mpc_config_processor *m= 426 (struct mpc_config_processor *)mpt; 427 /* ACPI may have already provided this data */ 428 if (!acpi_lapic) 429 MP_processor_info(m); 430 mpt += sizeof(*m); 431 count += sizeof(*m); 432 break; 433 } 434 case MP_BUS: 435 { 436 struct mpc_config_bus *m= 437 (struct mpc_config_bus *)mpt; 438 MP_bus_info(m); 439 mpt += sizeof(*m); 440 count += sizeof(*m); 441 break; 442 } 443 case MP_IOAPIC: 444 { 445 struct mpc_config_ioapic *m= 446 (struct mpc_config_ioapic *)mpt; 447 MP_ioapic_info(m); 448 mpt+=sizeof(*m); 449 count+=sizeof(*m); 450 break; 451 } 452 case MP_INTSRC: 453 { 454 struct mpc_config_intsrc *m= 455 (struct mpc_config_intsrc *)mpt; 456 457 MP_intsrc_info(m); 458 mpt+=sizeof(*m); 459 count+=sizeof(*m); 460 break; 461 } 462 case MP_LINTSRC: 463 { 464 struct mpc_config_lintsrc *m= 465 (struct mpc_config_lintsrc *)mpt; 466 MP_lintsrc_info(m); 467 mpt+=sizeof(*m); 468 count+=sizeof(*m); 469 break; 470 } 471 default: 472 { 473 count = mpc->mpc_length; 474 break; 475 } 476 } 477 ++mpc_record; 478 } 479 setup_apic_routing(); 480 if (!num_processors) 481 printk(KERN_ERR "SMP mptable: no processors registered!\n"); 482 return num_processors; 483} 484 485static int __init ELCR_trigger(unsigned int irq) 486{ 487 unsigned int port; 488 489 port = 0x4d0 + (irq >> 3); 490 return (inb(port) >> (irq & 7)) & 1; 491} 492 493static void __init construct_default_ioirq_mptable(int mpc_default_type) 494{ 495 struct mpc_config_intsrc intsrc; 496 int i; 497 int ELCR_fallback = 0; 498 499 intsrc.mpc_type = MP_INTSRC; 500 intsrc.mpc_irqflag = 0; /* conforming */ 501 intsrc.mpc_srcbus = 0; 502 intsrc.mpc_dstapic = mp_ioapics[0].mpc_apicid; 503 504 intsrc.mpc_irqtype = mp_INT; 505 506 /* 507 * If true, we have an ISA/PCI system with no IRQ entries 508 * in the MP table. To prevent the PCI interrupts from being set up 509 * incorrectly, we try to use the ELCR. The sanity check to see if 510 * there is good ELCR data is very simple - IRQ0, 1, 2 and 13 can 511 * never be level sensitive, so we simply see if the ELCR agrees. 512 * If it does, we assume it's valid. 513 */ 514 if (mpc_default_type == 5) { 515 printk(KERN_INFO "ISA/PCI bus type with no IRQ information... falling back to ELCR\n"); 516 517 if (ELCR_trigger(0) || ELCR_trigger(1) || ELCR_trigger(2) || ELCR_trigger(13)) 518 printk(KERN_WARNING "ELCR contains invalid data... not using ELCR\n"); 519 else { 520 printk(KERN_INFO "Using ELCR to identify PCI interrupts\n"); 521 ELCR_fallback = 1; 522 } 523 } 524 525 for (i = 0; i < 16; i++) { 526 switch (mpc_default_type) { 527 case 2: 528 if (i == 0 || i == 13) 529 continue; /* IRQ0 & IRQ13 not connected */ 530 /* fall through */ 531 default: 532 if (i == 2) 533 continue; /* IRQ2 is never connected */ 534 } 535 536 if (ELCR_fallback) { 537 /* 538 * If the ELCR indicates a level-sensitive interrupt, we 539 * copy that information over to the MP table in the 540 * irqflag field (level sensitive, active high polarity). 541 */ 542 if (ELCR_trigger(i)) 543 intsrc.mpc_irqflag = 13; 544 else 545 intsrc.mpc_irqflag = 0; 546 } 547 548 intsrc.mpc_srcbusirq = i; 549 intsrc.mpc_dstirq = i ? i : 2; /* IRQ0 to INTIN2 */ 550 MP_intsrc_info(&intsrc); 551 } 552 553 intsrc.mpc_irqtype = mp_ExtINT; 554 intsrc.mpc_srcbusirq = 0; 555 intsrc.mpc_dstirq = 0; /* 8259A to INTIN0 */ 556 MP_intsrc_info(&intsrc); 557} 558 559static inline void __init construct_default_ISA_mptable(int mpc_default_type) 560{ 561 struct mpc_config_processor processor; 562 struct mpc_config_bus bus; 563 struct mpc_config_ioapic ioapic; 564 struct mpc_config_lintsrc lintsrc; 565 int linttypes[2] = { mp_ExtINT, mp_NMI }; 566 int i; 567 568 /* 569 * local APIC has default address 570 */ 571 mp_lapic_addr = APIC_DEFAULT_PHYS_BASE; 572 573 /* 574 * 2 CPUs, numbered 0 & 1. 575 */ 576 processor.mpc_type = MP_PROCESSOR; 577 /* Either an integrated APIC or a discrete 82489DX. */ 578 processor.mpc_apicver = mpc_default_type > 4 ? 0x10 : 0x01; 579 processor.mpc_cpuflag = CPU_ENABLED; 580 processor.mpc_cpufeature = (boot_cpu_data.x86 << 8) | 581 (boot_cpu_data.x86_model << 4) | 582 boot_cpu_data.x86_mask; 583 processor.mpc_featureflag = boot_cpu_data.x86_capability[0]; 584 processor.mpc_reserved[0] = 0; 585 processor.mpc_reserved[1] = 0; 586 for (i = 0; i < 2; i++) { 587 processor.mpc_apicid = i; 588 MP_processor_info(&processor); 589 } 590 591 bus.mpc_type = MP_BUS; 592 bus.mpc_busid = 0; 593 switch (mpc_default_type) { 594 default: 595 printk("???\n"); 596 printk(KERN_ERR "Unknown standard configuration %d\n", 597 mpc_default_type); 598 /* fall through */ 599 case 1: 600 case 5: 601 memcpy(bus.mpc_bustype, "ISA ", 6); 602 break; 603 case 2: 604 case 6: 605 case 3: 606 memcpy(bus.mpc_bustype, "EISA ", 6); 607 break; 608 case 4: 609 case 7: 610 memcpy(bus.mpc_bustype, "MCA ", 6); 611 } 612 MP_bus_info(&bus); 613 if (mpc_default_type > 4) { 614 bus.mpc_busid = 1; 615 memcpy(bus.mpc_bustype, "PCI ", 6); 616 MP_bus_info(&bus); 617 } 618 619 ioapic.mpc_type = MP_IOAPIC; 620 ioapic.mpc_apicid = 2; 621 ioapic.mpc_apicver = mpc_default_type > 4 ? 0x10 : 0x01; 622 ioapic.mpc_flags = MPC_APIC_USABLE; 623 ioapic.mpc_apicaddr = 0xFEC00000; 624 MP_ioapic_info(&ioapic); 625 626 /* 627 * We set up most of the low 16 IO-APIC pins according to MPS rules. 628 */ 629 construct_default_ioirq_mptable(mpc_default_type); 630 631 lintsrc.mpc_type = MP_LINTSRC; 632 lintsrc.mpc_irqflag = 0; /* conforming */ 633 lintsrc.mpc_srcbusid = 0; 634 lintsrc.mpc_srcbusirq = 0; 635 lintsrc.mpc_destapic = MP_APIC_ALL; 636 for (i = 0; i < 2; i++) { 637 lintsrc.mpc_irqtype = linttypes[i]; 638 lintsrc.mpc_destapiclint = i; 639 MP_lintsrc_info(&lintsrc); 640 } 641} 642 643static struct intel_mp_floating *mpf_found; 644 645/* 646 * Scan the memory blocks for an SMP configuration block. 647 */ 648void __init get_smp_config (void) 649{ 650 struct intel_mp_floating *mpf = mpf_found; 651 652 /* 653 * ACPI supports both logical (e.g. Hyper-Threading) and physical 654 * processors, where MPS only supports physical. 655 */ 656 if (acpi_lapic && acpi_ioapic) { 657 printk(KERN_INFO "Using ACPI (MADT) for SMP configuration information\n"); 658 return; 659 } 660 else if (acpi_lapic) 661 printk(KERN_INFO "Using ACPI for processor (LAPIC) configuration information\n"); 662 663 printk(KERN_INFO "Intel MultiProcessor Specification v1.%d\n", mpf->mpf_specification); 664 if (mpf->mpf_feature2 & (1<<7)) { 665 printk(KERN_INFO " IMCR and PIC compatibility mode.\n"); 666 pic_mode = 1; 667 } else { 668 printk(KERN_INFO " Virtual Wire compatibility mode.\n"); 669 pic_mode = 0; 670 } 671 672 /* 673 * Now see if we need to read further. 674 */ 675 if (mpf->mpf_feature1 != 0) { 676 677 printk(KERN_INFO "Default MP configuration #%d\n", mpf->mpf_feature1); 678 construct_default_ISA_mptable(mpf->mpf_feature1); 679 680 } else if (mpf->mpf_physptr) { 681 682 /* 683 * Read the physical hardware table. Anything here will 684 * override the defaults. 685 */ 686 if (!smp_read_mpc(phys_to_virt(mpf->mpf_physptr))) { 687 smp_found_config = 0; 688 printk(KERN_ERR "BIOS bug, MP table errors detected!...\n"); 689 printk(KERN_ERR "... disabling SMP support. (tell your hw vendor)\n"); 690 return; 691 } 692 /* 693 * If there are no explicit MP IRQ entries, then we are 694 * broken. We set up most of the low 16 IO-APIC pins to 695 * ISA defaults and hope it will work. 696 */ 697 if (!mp_irq_entries) { 698 struct mpc_config_bus bus; 699 700 printk(KERN_ERR "BIOS bug, no explicit IRQ entries, using default mptable. (tell your hw vendor)\n"); 701 702 bus.mpc_type = MP_BUS; 703 bus.mpc_busid = 0; 704 memcpy(bus.mpc_bustype, "ISA ", 6); 705 MP_bus_info(&bus); 706 707 construct_default_ioirq_mptable(0); 708 } 709 710 } else 711 BUG(); 712 713 printk(KERN_INFO "Processors: %d\n", num_processors); 714 /* 715 * Only use the first configuration found. 716 */ 717} 718 719static int __init smp_scan_config (unsigned long base, unsigned long length) 720{ 721 unsigned long *bp = phys_to_virt(base); 722 struct intel_mp_floating *mpf; 723 724 Dprintk("Scan SMP from %p for %ld bytes.\n", bp,length); 725 if (sizeof(*mpf) != 16) 726 printk("Error: MPF size\n"); 727 728 while (length > 0) { 729 mpf = (struct intel_mp_floating *)bp; 730 if ((*bp == SMP_MAGIC_IDENT) && 731 (mpf->mpf_length == 1) && 732 !mpf_checksum((unsigned char *)bp, 16) && 733 ((mpf->mpf_specification == 1) 734 || (mpf->mpf_specification == 4)) ) { 735 736 smp_found_config = 1; 737 printk(KERN_INFO "found SMP MP-table at %08lx\n", 738 virt_to_phys(mpf)); 739 reserve_bootmem(virt_to_phys(mpf), PAGE_SIZE); 740 if (mpf->mpf_physptr) { 741 /* 742 * We cannot access to MPC table to compute 743 * table size yet, as only few megabytes from 744 * the bottom is mapped now. 745 * PC-9800's MPC table places on the very last 746 * of physical memory; so that simply reserving 747 * PAGE_SIZE from mpg->mpf_physptr yields BUG() 748 * in reserve_bootmem. 749 */ 750 unsigned long size = PAGE_SIZE; 751 unsigned long end = max_low_pfn * PAGE_SIZE; 752 if (mpf->mpf_physptr + size > end) 753 size = end - mpf->mpf_physptr; 754 reserve_bootmem(mpf->mpf_physptr, size); 755 } 756 757 mpf_found = mpf; 758 return 1; 759 } 760 bp += 4; 761 length -= 16; 762 } 763 return 0; 764} 765 766void __init find_smp_config (void) 767{ 768 unsigned int address; 769 770 if (smp_scan_config(0x0,0x400) || 771 smp_scan_config(639*0x400,0x400) || 772 smp_scan_config(0xF0000,0x10000)) 773 return; 774 /* 775 * If it is an SMP machine we should know now, unless the 776 * configuration is in an EISA/MCA bus machine with an 777 * extended bios data area. 778 * 779 * there is a real-mode segmented pointer pointing to the 780 * 4K EBDA area at 0x40E, calculate and scan it here. 781 * 782 * NOTE! There are Linux loaders that will corrupt the EBDA 783 * area, and as such this kind of SMP config may be less 784 * trustworthy, simply because the SMP table may have been 785 * stomped on during early boot. These loaders are buggy and 786 * should be fixed. 787 * 788 * MP1.4 SPEC states to only scan first 1K of 4K EBDA. 789 */ 790 791 address = get_bios_ebda(); 792 if (address) 793 smp_scan_config(address, 0x400); 794} 795 796int es7000_plat; 797 798/* -------------------------------------------------------------------------- 799 ACPI-based MP Configuration 800 -------------------------------------------------------------------------- */ 801 802#ifdef CONFIG_ACPI 803 804void __init mp_register_lapic_address(u64 address) 805{ 806 mp_lapic_addr = (unsigned long) address; 807 808 set_fixmap_nocache(FIX_APIC_BASE, mp_lapic_addr); 809 810 if (boot_cpu_physical_apicid == -1U) 811 boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID)); 812 813 Dprintk("Boot CPU = %d\n", boot_cpu_physical_apicid); 814} 815 816void __cpuinit mp_register_lapic (u8 id, u8 enabled) 817{ 818 struct mpc_config_processor processor; 819 int boot_cpu = 0; 820 821 if (MAX_APICS - id <= 0) { 822 printk(KERN_WARNING "Processor #%d invalid (max %d)\n", 823 id, MAX_APICS); 824 return; 825 } 826 827 if (id == boot_cpu_physical_apicid) 828 boot_cpu = 1; 829 830 processor.mpc_type = MP_PROCESSOR; 831 processor.mpc_apicid = id; 832 processor.mpc_apicver = GET_APIC_VERSION(apic_read(APIC_LVR)); 833 processor.mpc_cpuflag = (enabled ? CPU_ENABLED : 0); 834 processor.mpc_cpuflag |= (boot_cpu ? CPU_BOOTPROCESSOR : 0); 835 processor.mpc_cpufeature = (boot_cpu_data.x86 << 8) | 836 (boot_cpu_data.x86_model << 4) | boot_cpu_data.x86_mask; 837 processor.mpc_featureflag = boot_cpu_data.x86_capability[0]; 838 processor.mpc_reserved[0] = 0; 839 processor.mpc_reserved[1] = 0; 840 841 MP_processor_info(&processor); 842} 843 844#ifdef CONFIG_X86_IO_APIC 845 846#define MP_ISA_BUS 0 847#define MP_MAX_IOAPIC_PIN 127 848 849static struct mp_ioapic_routing { 850 int apic_id; 851 int gsi_base; 852 int gsi_end; 853 u32 pin_programmed[4]; 854} mp_ioapic_routing[MAX_IO_APICS]; 855 856static int mp_find_ioapic (int gsi) 857{ 858 int i = 0; 859 860 /* Find the IOAPIC that manages this GSI. */ 861 for (i = 0; i < nr_ioapics; i++) { 862 if ((gsi >= mp_ioapic_routing[i].gsi_base) 863 && (gsi <= mp_ioapic_routing[i].gsi_end)) 864 return i; 865 } 866 867 printk(KERN_ERR "ERROR: Unable to locate IOAPIC for GSI %d\n", gsi); 868 869 return -1; 870} 871 872void __init mp_register_ioapic(u8 id, u32 address, u32 gsi_base) 873{ 874 int idx = 0; 875 int tmpid; 876 877 if (nr_ioapics >= MAX_IO_APICS) { 878 printk(KERN_ERR "ERROR: Max # of I/O APICs (%d) exceeded " 879 "(found %d)\n", MAX_IO_APICS, nr_ioapics); 880 panic("Recompile kernel with bigger MAX_IO_APICS!\n"); 881 } 882 if (!address) { 883 printk(KERN_ERR "WARNING: Bogus (zero) I/O APIC address" 884 " found in MADT table, skipping!\n"); 885 return; 886 } 887 888 idx = nr_ioapics++; 889 890 mp_ioapics[idx].mpc_type = MP_IOAPIC; 891 mp_ioapics[idx].mpc_flags = MPC_APIC_USABLE; 892 mp_ioapics[idx].mpc_apicaddr = address; 893 894 set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address); 895 if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) 896 && !APIC_XAPIC(apic_version[boot_cpu_physical_apicid])) 897 tmpid = io_apic_get_unique_id(idx, id); 898 else 899 tmpid = id; 900 if (tmpid == -1) { 901 nr_ioapics--; 902 return; 903 } 904 mp_ioapics[idx].mpc_apicid = tmpid; 905 mp_ioapics[idx].mpc_apicver = io_apic_get_version(idx); 906 907 /* 908 * Build basic GSI lookup table to facilitate gsi->io_apic lookups 909 * and to prevent reprogramming of IOAPIC pins (PCI GSIs). 910 */ 911 mp_ioapic_routing[idx].apic_id = mp_ioapics[idx].mpc_apicid; 912 mp_ioapic_routing[idx].gsi_base = gsi_base; 913 mp_ioapic_routing[idx].gsi_end = gsi_base + 914 io_apic_get_redir_entries(idx); 915 916 printk("IOAPIC[%d]: apic_id %d, version %d, address 0x%lx, " 917 "GSI %d-%d\n", idx, mp_ioapics[idx].mpc_apicid, 918 mp_ioapics[idx].mpc_apicver, mp_ioapics[idx].mpc_apicaddr, 919 mp_ioapic_routing[idx].gsi_base, 920 mp_ioapic_routing[idx].gsi_end); 921} 922 923void __init 924mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger, u32 gsi) 925{ 926 struct mpc_config_intsrc intsrc; 927 int ioapic = -1; 928 int pin = -1; 929 930 /* 931 * Convert 'gsi' to 'ioapic.pin'. 932 */ 933 ioapic = mp_find_ioapic(gsi); 934 if (ioapic < 0) 935 return; 936 pin = gsi - mp_ioapic_routing[ioapic].gsi_base; 937 938 /* 939 * TBD: This check is for faulty timer entries, where the override 940 * erroneously sets the trigger to level, resulting in a HUGE 941 * increase of timer interrupts! 942 */ 943 if ((bus_irq == 0) && (trigger == 3)) 944 trigger = 1; 945 946 intsrc.mpc_type = MP_INTSRC; 947 intsrc.mpc_irqtype = mp_INT; 948 intsrc.mpc_irqflag = (trigger << 2) | polarity; 949 intsrc.mpc_srcbus = MP_ISA_BUS; 950 intsrc.mpc_srcbusirq = bus_irq; /* IRQ */ 951 intsrc.mpc_dstapic = mp_ioapics[ioapic].mpc_apicid; /* APIC ID */ 952 intsrc.mpc_dstirq = pin; /* INTIN# */ 953 954 Dprintk("Int: type %d, pol %d, trig %d, bus %d, irq %d, %d-%d\n", 955 intsrc.mpc_irqtype, intsrc.mpc_irqflag & 3, 956 (intsrc.mpc_irqflag >> 2) & 3, intsrc.mpc_srcbus, 957 intsrc.mpc_srcbusirq, intsrc.mpc_dstapic, intsrc.mpc_dstirq); 958 959 mp_irqs[mp_irq_entries] = intsrc; 960 if (++mp_irq_entries == MAX_IRQ_SOURCES) 961 panic("Max # of irq sources exceeded!\n"); 962} 963 964void __init mp_config_acpi_legacy_irqs (void) 965{ 966 struct mpc_config_intsrc intsrc; 967 int i = 0; 968 int ioapic = -1; 969 970 /* 971 * Fabricate the legacy ISA bus (bus #31). 972 */ 973 mp_bus_id_to_type[MP_ISA_BUS] = MP_BUS_ISA; 974 Dprintk("Bus #%d is ISA\n", MP_ISA_BUS); 975 976 /* 977 * Older generations of ES7000 have no legacy identity mappings 978 */ 979 if (es7000_plat == 1) 980 return; 981 982 /* 983 * Locate the IOAPIC that manages the ISA IRQs (0-15). 984 */ 985 ioapic = mp_find_ioapic(0); 986 if (ioapic < 0) 987 return; 988 989 intsrc.mpc_type = MP_INTSRC; 990 intsrc.mpc_irqflag = 0; /* Conforming */ 991 intsrc.mpc_srcbus = MP_ISA_BUS; 992 intsrc.mpc_dstapic = mp_ioapics[ioapic].mpc_apicid; 993 994 /* 995 * Use the default configuration for the IRQs 0-15. Unless 996 * overriden by (MADT) interrupt source override entries. 997 */ 998 for (i = 0; i < 16; i++) { 999 int idx; 1000 1001 for (idx = 0; idx < mp_irq_entries; idx++) { 1002 struct mpc_config_intsrc *irq = mp_irqs + idx; 1003 1004 /* Do we already have a mapping for this ISA IRQ? */ 1005 if (irq->mpc_srcbus == MP_ISA_BUS && irq->mpc_srcbusirq == i) 1006 break; 1007 1008 /* Do we already have a mapping for this IOAPIC pin */ 1009 if ((irq->mpc_dstapic == intsrc.mpc_dstapic) && 1010 (irq->mpc_dstirq == i)) 1011 break; 1012 } 1013 1014 if (idx != mp_irq_entries) { 1015 printk(KERN_DEBUG "ACPI: IRQ%d used by override.\n", i); 1016 continue; /* IRQ already used */ 1017 } 1018 1019 intsrc.mpc_irqtype = mp_INT; 1020 intsrc.mpc_srcbusirq = i; /* Identity mapped */ 1021 intsrc.mpc_dstirq = i; 1022 1023 Dprintk("Int: type %d, pol %d, trig %d, bus %d, irq %d, " 1024 "%d-%d\n", intsrc.mpc_irqtype, intsrc.mpc_irqflag & 3, 1025 (intsrc.mpc_irqflag >> 2) & 3, intsrc.mpc_srcbus, 1026 intsrc.mpc_srcbusirq, intsrc.mpc_dstapic, 1027 intsrc.mpc_dstirq); 1028 1029 mp_irqs[mp_irq_entries] = intsrc; 1030 if (++mp_irq_entries == MAX_IRQ_SOURCES) 1031 panic("Max # of irq sources exceeded!\n"); 1032 } 1033} 1034 1035#define MAX_GSI_NUM 4096 1036 1037int mp_register_gsi(u32 gsi, int triggering, int polarity) 1038{ 1039 int ioapic = -1; 1040 int ioapic_pin = 0; 1041 int idx, bit = 0; 1042 static int pci_irq = 16; 1043 /* 1044 * Mapping between Global System Interrups, which 1045 * represent all possible interrupts, and IRQs 1046 * assigned to actual devices. 1047 */ 1048 static int gsi_to_irq[MAX_GSI_NUM]; 1049 1050 /* Don't set up the ACPI SCI because it's already set up */ 1051 if (acpi_gbl_FADT.sci_interrupt == gsi) 1052 return gsi; 1053 1054 ioapic = mp_find_ioapic(gsi); 1055 if (ioapic < 0) { 1056 printk(KERN_WARNING "No IOAPIC for GSI %u\n", gsi); 1057 return gsi; 1058 } 1059 1060 ioapic_pin = gsi - mp_ioapic_routing[ioapic].gsi_base; 1061 1062 if (ioapic_renumber_irq) 1063 gsi = ioapic_renumber_irq(ioapic, gsi); 1064 1065 /* 1066 * Avoid pin reprogramming. PRTs typically include entries 1067 * with redundant pin->gsi mappings (but unique PCI devices); 1068 * we only program the IOAPIC on the first. 1069 */ 1070 bit = ioapic_pin % 32; 1071 idx = (ioapic_pin < 32) ? 0 : (ioapic_pin / 32); 1072 if (idx > 3) { 1073 printk(KERN_ERR "Invalid reference to IOAPIC pin " 1074 "%d-%d\n", mp_ioapic_routing[ioapic].apic_id, 1075 ioapic_pin); 1076 return gsi; 1077 } 1078 if ((1<<bit) & mp_ioapic_routing[ioapic].pin_programmed[idx]) { 1079 Dprintk(KERN_DEBUG "Pin %d-%d already programmed\n", 1080 mp_ioapic_routing[ioapic].apic_id, ioapic_pin); 1081 return gsi_to_irq[gsi]; 1082 } 1083 1084 mp_ioapic_routing[ioapic].pin_programmed[idx] |= (1<<bit); 1085 1086 if (triggering == ACPI_LEVEL_SENSITIVE) { 1087 /* 1088 * For PCI devices assign IRQs in order, avoiding gaps 1089 * due to unused I/O APIC pins. 1090 */ 1091 int irq = gsi; 1092 if (gsi < MAX_GSI_NUM) { 1093 if (gsi > 15 || (gsi == 0 && !timer_uses_ioapic_pin_0)) 1094 gsi = pci_irq++; 1095 /* 1096 * Don't assign IRQ used by ACPI SCI 1097 */ 1098 if (gsi == acpi_gbl_FADT.sci_interrupt) 1099 gsi = pci_irq++; 1100 gsi_to_irq[irq] = gsi; 1101 } else { 1102 printk(KERN_ERR "GSI %u is too high\n", gsi); 1103 return gsi; 1104 } 1105 } 1106 1107 io_apic_set_pci_routing(ioapic, ioapic_pin, gsi, 1108 triggering == ACPI_EDGE_SENSITIVE ? 0 : 1, 1109 polarity == ACPI_ACTIVE_HIGH ? 0 : 1); 1110 return gsi; 1111} 1112 1113#endif /* CONFIG_X86_IO_APIC */ 1114#endif /* CONFIG_ACPI */ 1115