1/* 2 * Common boot and setup code for both 32-bit and 64-bit. 3 * Extracted from arch/powerpc/kernel/setup_64.c. 4 * 5 * Copyright (C) 2001 PPC64 Team, IBM Corp 6 * 7 * This program is free software; you can redistribute it and/or 8 * modify it under the terms of the GNU General Public License 9 * as published by the Free Software Foundation; either version 10 * 2 of the License, or (at your option) any later version. 11 */ 12 13#undef DEBUG 14 15#include <linux/module.h> 16#include <linux/string.h> 17#include <linux/sched.h> 18#include <linux/init.h> 19#include <linux/kernel.h> 20#include <linux/reboot.h> 21#include <linux/delay.h> 22#include <linux/initrd.h> 23#include <linux/platform_device.h> 24#include <linux/seq_file.h> 25#include <linux/ioport.h> 26#include <linux/console.h> 27#include <linux/screen_info.h> 28#include <linux/root_dev.h> 29#include <linux/notifier.h> 30#include <linux/cpu.h> 31#include <linux/unistd.h> 32#include <linux/serial.h> 33#include <linux/serial_8250.h> 34#include <linux/debugfs.h> 35#include <linux/percpu.h> 36#include <linux/memblock.h> 37#include <linux/of_platform.h> 38#include <asm/io.h> 39#include <asm/paca.h> 40#include <asm/prom.h> 41#include <asm/processor.h> 42#include <asm/vdso_datapage.h> 43#include <asm/pgtable.h> 44#include <asm/smp.h> 45#include <asm/elf.h> 46#include <asm/machdep.h> 47#include <asm/time.h> 48#include <asm/cputable.h> 49#include <asm/sections.h> 50#include <asm/firmware.h> 51#include <asm/btext.h> 52#include <asm/nvram.h> 53#include <asm/setup.h> 54#include <asm/system.h> 55#include <asm/rtas.h> 56#include <asm/iommu.h> 57#include <asm/serial.h> 58#include <asm/cache.h> 59#include <asm/page.h> 60#include <asm/mmu.h> 61#include <asm/xmon.h> 62#include <asm/cputhreads.h> 63#include <mm/mmu_decl.h> 64 65#include "setup.h" 66 67#ifdef DEBUG 68#include <asm/udbg.h> 69#define DBG(fmt...) udbg_printf(fmt) 70#else 71#define DBG(fmt...) 72#endif 73 74/* The main machine-dep calls structure 75 */ 76struct machdep_calls ppc_md; 77EXPORT_SYMBOL(ppc_md); 78struct machdep_calls *machine_id; 79EXPORT_SYMBOL(machine_id); 80 81unsigned long klimit = (unsigned long) _end; 82 83char cmd_line[COMMAND_LINE_SIZE]; 84 85/* 86 * This still seems to be needed... -- paulus 87 */ 88struct screen_info screen_info = { 89 .orig_x = 0, 90 .orig_y = 25, 91 .orig_video_cols = 80, 92 .orig_video_lines = 25, 93 .orig_video_isVGA = 1, 94 .orig_video_points = 16 95}; 96 97/* Variables required to store legacy IO irq routing */ 98int of_i8042_kbd_irq; 99EXPORT_SYMBOL_GPL(of_i8042_kbd_irq); 100int of_i8042_aux_irq; 101EXPORT_SYMBOL_GPL(of_i8042_aux_irq); 102 103#ifdef __DO_IRQ_CANON 104int ppc_do_canonicalize_irqs; 105EXPORT_SYMBOL(ppc_do_canonicalize_irqs); 106#endif 107 108/* also used by kexec */ 109void machine_shutdown(void) 110{ 111 if (ppc_md.machine_shutdown) 112 ppc_md.machine_shutdown(); 113} 114 115void machine_restart(char *cmd) 116{ 117 machine_shutdown(); 118 if (ppc_md.restart) 119 ppc_md.restart(cmd); 120#ifdef CONFIG_SMP 121 smp_send_stop(); 122#endif 123 printk(KERN_EMERG "System Halted, OK to turn off power\n"); 124 local_irq_disable(); 125 while (1) ; 126} 127 128void machine_power_off(void) 129{ 130 machine_shutdown(); 131 if (ppc_md.power_off) 132 ppc_md.power_off(); 133#ifdef CONFIG_SMP 134 smp_send_stop(); 135#endif 136 printk(KERN_EMERG "System Halted, OK to turn off power\n"); 137 local_irq_disable(); 138 while (1) ; 139} 140/* Used by the G5 thermal driver */ 141EXPORT_SYMBOL_GPL(machine_power_off); 142 143void (*pm_power_off)(void) = machine_power_off; 144EXPORT_SYMBOL_GPL(pm_power_off); 145 146void machine_halt(void) 147{ 148 machine_shutdown(); 149 if (ppc_md.halt) 150 ppc_md.halt(); 151#ifdef CONFIG_SMP 152 smp_send_stop(); 153#endif 154 printk(KERN_EMERG "System Halted, OK to turn off power\n"); 155 local_irq_disable(); 156 while (1) ; 157} 158 159 160#ifdef CONFIG_TAU 161extern u32 cpu_temp(unsigned long cpu); 162extern u32 cpu_temp_both(unsigned long cpu); 163#endif /* CONFIG_TAU */ 164 165#ifdef CONFIG_SMP 166DEFINE_PER_CPU(unsigned int, cpu_pvr); 167#endif 168 169static void show_cpuinfo_summary(struct seq_file *m) 170{ 171 struct device_node *root; 172 const char *model = NULL; 173#if defined(CONFIG_SMP) && defined(CONFIG_PPC32) 174 unsigned long bogosum = 0; 175 int i; 176 for_each_online_cpu(i) 177 bogosum += loops_per_jiffy; 178 seq_printf(m, "total bogomips\t: %lu.%02lu\n", 179 bogosum/(500000/HZ), bogosum/(5000/HZ) % 100); 180#endif /* CONFIG_SMP && CONFIG_PPC32 */ 181 seq_printf(m, "timebase\t: %lu\n", ppc_tb_freq); 182 if (ppc_md.name) 183 seq_printf(m, "platform\t: %s\n", ppc_md.name); 184 root = of_find_node_by_path("/"); 185 if (root) 186 model = of_get_property(root, "model", NULL); 187 if (model) 188 seq_printf(m, "model\t\t: %s\n", model); 189 of_node_put(root); 190 191 if (ppc_md.show_cpuinfo != NULL) 192 ppc_md.show_cpuinfo(m); 193 194#ifdef CONFIG_PPC32 195 /* Display the amount of memory */ 196 seq_printf(m, "Memory\t\t: %d MB\n", 197 (unsigned int)(total_memory / (1024 * 1024))); 198#endif 199} 200 201static int show_cpuinfo(struct seq_file *m, void *v) 202{ 203 unsigned long cpu_id = (unsigned long)v - 1; 204 unsigned int pvr; 205 unsigned short maj; 206 unsigned short min; 207 208 /* We only show online cpus: disable preempt (overzealous, I 209 * knew) to prevent cpu going down. */ 210 preempt_disable(); 211 if (!cpu_online(cpu_id)) { 212 preempt_enable(); 213 return 0; 214 } 215 216#ifdef CONFIG_SMP 217 pvr = per_cpu(cpu_pvr, cpu_id); 218#else 219 pvr = mfspr(SPRN_PVR); 220#endif 221 maj = (pvr >> 8) & 0xFF; 222 min = pvr & 0xFF; 223 224 seq_printf(m, "processor\t: %lu\n", cpu_id); 225 seq_printf(m, "cpu\t\t: "); 226 227 if (cur_cpu_spec->pvr_mask) 228 seq_printf(m, "%s", cur_cpu_spec->cpu_name); 229 else 230 seq_printf(m, "unknown (%08x)", pvr); 231 232#ifdef CONFIG_ALTIVEC 233 if (cpu_has_feature(CPU_FTR_ALTIVEC)) 234 seq_printf(m, ", altivec supported"); 235#endif /* CONFIG_ALTIVEC */ 236 237 seq_printf(m, "\n"); 238 239#ifdef CONFIG_TAU 240 if (cur_cpu_spec->cpu_features & CPU_FTR_TAU) { 241#ifdef CONFIG_TAU_AVERAGE 242 /* more straightforward, but potentially misleading */ 243 seq_printf(m, "temperature \t: %u C (uncalibrated)\n", 244 cpu_temp(cpu_id)); 245#else 246 /* show the actual temp sensor range */ 247 u32 temp; 248 temp = cpu_temp_both(cpu_id); 249 seq_printf(m, "temperature \t: %u-%u C (uncalibrated)\n", 250 temp & 0xff, temp >> 16); 251#endif 252 } 253#endif /* CONFIG_TAU */ 254 255 /* 256 * Assume here that all clock rates are the same in a 257 * smp system. -- Cort 258 */ 259 if (ppc_proc_freq) 260 seq_printf(m, "clock\t\t: %lu.%06luMHz\n", 261 ppc_proc_freq / 1000000, ppc_proc_freq % 1000000); 262 263 if (ppc_md.show_percpuinfo != NULL) 264 ppc_md.show_percpuinfo(m, cpu_id); 265 266 /* If we are a Freescale core do a simple check so 267 * we dont have to keep adding cases in the future */ 268 if (PVR_VER(pvr) & 0x8000) { 269 switch (PVR_VER(pvr)) { 270 case 0x8000: /* 7441/7450/7451, Voyager */ 271 case 0x8001: /* 7445/7455, Apollo 6 */ 272 case 0x8002: /* 7447/7457, Apollo 7 */ 273 case 0x8003: /* 7447A, Apollo 7 PM */ 274 case 0x8004: /* 7448, Apollo 8 */ 275 case 0x800c: /* 7410, Nitro */ 276 maj = ((pvr >> 8) & 0xF); 277 min = PVR_MIN(pvr); 278 break; 279 default: /* e500/book-e */ 280 maj = PVR_MAJ(pvr); 281 min = PVR_MIN(pvr); 282 break; 283 } 284 } else { 285 switch (PVR_VER(pvr)) { 286 case 0x0020: /* 403 family */ 287 maj = PVR_MAJ(pvr) + 1; 288 min = PVR_MIN(pvr); 289 break; 290 case 0x1008: /* 740P/750P ?? */ 291 maj = ((pvr >> 8) & 0xFF) - 1; 292 min = pvr & 0xFF; 293 break; 294 default: 295 maj = (pvr >> 8) & 0xFF; 296 min = pvr & 0xFF; 297 break; 298 } 299 } 300 301 seq_printf(m, "revision\t: %hd.%hd (pvr %04x %04x)\n", 302 maj, min, PVR_VER(pvr), PVR_REV(pvr)); 303 304#ifdef CONFIG_PPC32 305 seq_printf(m, "bogomips\t: %lu.%02lu\n", 306 loops_per_jiffy / (500000/HZ), 307 (loops_per_jiffy / (5000/HZ)) % 100); 308#endif 309 310#ifdef CONFIG_SMP 311 seq_printf(m, "\n"); 312#endif 313 314 preempt_enable(); 315 316 /* If this is the last cpu, print the summary */ 317 if (cpumask_next(cpu_id, cpu_online_mask) >= nr_cpu_ids) 318 show_cpuinfo_summary(m); 319 320 return 0; 321} 322 323static void *c_start(struct seq_file *m, loff_t *pos) 324{ 325 if (*pos == 0) /* just in case, cpu 0 is not the first */ 326 *pos = cpumask_first(cpu_online_mask); 327 else 328 *pos = cpumask_next(*pos - 1, cpu_online_mask); 329 if ((*pos) < nr_cpu_ids) 330 return (void *)(unsigned long)(*pos + 1); 331 return NULL; 332} 333 334static void *c_next(struct seq_file *m, void *v, loff_t *pos) 335{ 336 (*pos)++; 337 return c_start(m, pos); 338} 339 340static void c_stop(struct seq_file *m, void *v) 341{ 342} 343 344const struct seq_operations cpuinfo_op = { 345 .start =c_start, 346 .next = c_next, 347 .stop = c_stop, 348 .show = show_cpuinfo, 349}; 350 351void __init check_for_initrd(void) 352{ 353#ifdef CONFIG_BLK_DEV_INITRD 354 DBG(" -> check_for_initrd() initrd_start=0x%lx initrd_end=0x%lx\n", 355 initrd_start, initrd_end); 356 357 /* If we were passed an initrd, set the ROOT_DEV properly if the values 358 * look sensible. If not, clear initrd reference. 359 */ 360 if (is_kernel_addr(initrd_start) && is_kernel_addr(initrd_end) && 361 initrd_end > initrd_start) 362 ROOT_DEV = Root_RAM0; 363 else 364 initrd_start = initrd_end = 0; 365 366 if (initrd_start) 367 printk("Found initrd at 0x%lx:0x%lx\n", initrd_start, initrd_end); 368 369 DBG(" <- check_for_initrd()\n"); 370#endif /* CONFIG_BLK_DEV_INITRD */ 371} 372 373#ifdef CONFIG_SMP 374 375int threads_per_core, threads_shift; 376cpumask_t threads_core_mask; 377 378static void __init cpu_init_thread_core_maps(int tpc) 379{ 380 int i; 381 382 threads_per_core = tpc; 383 threads_core_mask = CPU_MASK_NONE; 384 385 /* This implementation only supports power of 2 number of threads 386 * for simplicity and performance 387 */ 388 threads_shift = ilog2(tpc); 389 BUG_ON(tpc != (1 << threads_shift)); 390 391 for (i = 0; i < tpc; i++) 392 cpu_set(i, threads_core_mask); 393 394 printk(KERN_INFO "CPU maps initialized for %d thread%s per core\n", 395 tpc, tpc > 1 ? "s" : ""); 396 printk(KERN_DEBUG " (thread shift is %d)\n", threads_shift); 397} 398 399 400/** 401 * setup_cpu_maps - initialize the following cpu maps: 402 * cpu_possible_mask 403 * cpu_present_mask 404 * 405 * Having the possible map set up early allows us to restrict allocations 406 * of things like irqstacks to num_possible_cpus() rather than NR_CPUS. 407 * 408 * We do not initialize the online map here; cpus set their own bits in 409 * cpu_online_mask as they come up. 410 * 411 * This function is valid only for Open Firmware systems. finish_device_tree 412 * must be called before using this. 413 * 414 * While we're here, we may as well set the "physical" cpu ids in the paca. 415 * 416 * NOTE: This must match the parsing done in early_init_dt_scan_cpus. 417 */ 418void __init smp_setup_cpu_maps(void) 419{ 420 struct device_node *dn = NULL; 421 int cpu = 0; 422 int nthreads = 1; 423 424 DBG("smp_setup_cpu_maps()\n"); 425 426 while ((dn = of_find_node_by_type(dn, "cpu")) && cpu < NR_CPUS) { 427 const int *intserv; 428 int j, len; 429 430 DBG(" * %s...\n", dn->full_name); 431 432 intserv = of_get_property(dn, "ibm,ppc-interrupt-server#s", 433 &len); 434 if (intserv) { 435 nthreads = len / sizeof(int); 436 DBG(" ibm,ppc-interrupt-server#s -> %d threads\n", 437 nthreads); 438 } else { 439 DBG(" no ibm,ppc-interrupt-server#s -> 1 thread\n"); 440 intserv = of_get_property(dn, "reg", NULL); 441 if (!intserv) 442 intserv = &cpu; /* assume logical == phys */ 443 } 444 445 for (j = 0; j < nthreads && cpu < NR_CPUS; j++) { 446 DBG(" thread %d -> cpu %d (hard id %d)\n", 447 j, cpu, intserv[j]); 448 set_cpu_present(cpu, true); 449 set_hard_smp_processor_id(cpu, intserv[j]); 450 set_cpu_possible(cpu, true); 451 cpu++; 452 } 453 } 454 455 /* If no SMT supported, nthreads is forced to 1 */ 456 if (!cpu_has_feature(CPU_FTR_SMT)) { 457 DBG(" SMT disabled ! nthreads forced to 1\n"); 458 nthreads = 1; 459 } 460 461#ifdef CONFIG_PPC64 462 /* 463 * On pSeries LPAR, we need to know how many cpus 464 * could possibly be added to this partition. 465 */ 466 if (machine_is(pseries) && firmware_has_feature(FW_FEATURE_LPAR) && 467 (dn = of_find_node_by_path("/rtas"))) { 468 int num_addr_cell, num_size_cell, maxcpus; 469 const unsigned int *ireg; 470 471 num_addr_cell = of_n_addr_cells(dn); 472 num_size_cell = of_n_size_cells(dn); 473 474 ireg = of_get_property(dn, "ibm,lrdr-capacity", NULL); 475 476 if (!ireg) 477 goto out; 478 479 maxcpus = ireg[num_addr_cell + num_size_cell]; 480 481 /* Double maxcpus for processors which have SMT capability */ 482 if (cpu_has_feature(CPU_FTR_SMT)) 483 maxcpus *= nthreads; 484 485 if (maxcpus > NR_CPUS) { 486 printk(KERN_WARNING 487 "Partition configured for %d cpus, " 488 "operating system maximum is %d.\n", 489 maxcpus, NR_CPUS); 490 maxcpus = NR_CPUS; 491 } else 492 printk(KERN_INFO "Partition configured for %d cpus.\n", 493 maxcpus); 494 495 for (cpu = 0; cpu < maxcpus; cpu++) 496 set_cpu_possible(cpu, true); 497 out: 498 of_node_put(dn); 499 } 500 vdso_data->processorCount = num_present_cpus(); 501#endif /* CONFIG_PPC64 */ 502 503 /* Initialize CPU <=> thread mapping/ 504 * 505 * WARNING: We assume that the number of threads is the same for 506 * every CPU in the system. If that is not the case, then some code 507 * here will have to be reworked 508 */ 509 cpu_init_thread_core_maps(nthreads); 510 511 free_unused_pacas(); 512} 513#endif /* CONFIG_SMP */ 514 515#ifdef CONFIG_PCSPKR_PLATFORM 516static __init int add_pcspkr(void) 517{ 518 struct device_node *np; 519 struct platform_device *pd; 520 int ret; 521 522 np = of_find_compatible_node(NULL, NULL, "pnpPNP,100"); 523 of_node_put(np); 524 if (!np) 525 return -ENODEV; 526 527 pd = platform_device_alloc("pcspkr", -1); 528 if (!pd) 529 return -ENOMEM; 530 531 ret = platform_device_add(pd); 532 if (ret) 533 platform_device_put(pd); 534 535 return ret; 536} 537device_initcall(add_pcspkr); 538#endif /* CONFIG_PCSPKR_PLATFORM */ 539 540void probe_machine(void) 541{ 542 extern struct machdep_calls __machine_desc_start; 543 extern struct machdep_calls __machine_desc_end; 544 545 /* 546 * Iterate all ppc_md structures until we find the proper 547 * one for the current machine type 548 */ 549 DBG("Probing machine type ...\n"); 550 551 for (machine_id = &__machine_desc_start; 552 machine_id < &__machine_desc_end; 553 machine_id++) { 554 DBG(" %s ...", machine_id->name); 555 memcpy(&ppc_md, machine_id, sizeof(struct machdep_calls)); 556 if (ppc_md.probe()) { 557 DBG(" match !\n"); 558 break; 559 } 560 DBG("\n"); 561 } 562 /* What can we do if we didn't find ? */ 563 if (machine_id >= &__machine_desc_end) { 564 DBG("No suitable machine found !\n"); 565 for (;;); 566 } 567 568 printk(KERN_INFO "Using %s machine description\n", ppc_md.name); 569} 570 571/* Match a class of boards, not a specific device configuration. */ 572int check_legacy_ioport(unsigned long base_port) 573{ 574 struct device_node *parent, *np = NULL; 575 int ret = -ENODEV; 576 577 switch(base_port) { 578 case I8042_DATA_REG: 579 if (!(np = of_find_compatible_node(NULL, NULL, "pnpPNP,303"))) 580 np = of_find_compatible_node(NULL, NULL, "pnpPNP,f03"); 581 if (np) { 582 parent = of_get_parent(np); 583 584 of_i8042_kbd_irq = irq_of_parse_and_map(parent, 0); 585 if (!of_i8042_kbd_irq) 586 of_i8042_kbd_irq = 1; 587 588 of_i8042_aux_irq = irq_of_parse_and_map(parent, 1); 589 if (!of_i8042_aux_irq) 590 of_i8042_aux_irq = 12; 591 592 of_node_put(np); 593 np = parent; 594 break; 595 } 596 np = of_find_node_by_type(NULL, "8042"); 597 /* Pegasos has no device_type on its 8042 node, look for the 598 * name instead */ 599 if (!np) 600 np = of_find_node_by_name(NULL, "8042"); 601 break; 602 case FDC_BASE: /* FDC1 */ 603 np = of_find_node_by_type(NULL, "fdc"); 604 break; 605#ifdef CONFIG_PPC_PREP 606 case _PIDXR: 607 case _PNPWRP: 608 case PNPBIOS_BASE: 609 /* implement me */ 610#endif 611 default: 612 /* ipmi is supposed to fail here */ 613 break; 614 } 615 if (!np) 616 return ret; 617 parent = of_get_parent(np); 618 if (parent) { 619 if (strcmp(parent->type, "isa") == 0) 620 ret = 0; 621 of_node_put(parent); 622 } 623 of_node_put(np); 624 return ret; 625} 626EXPORT_SYMBOL(check_legacy_ioport); 627 628static int ppc_panic_event(struct notifier_block *this, 629 unsigned long event, void *ptr) 630{ 631 ppc_md.panic(ptr); /* May not return */ 632 return NOTIFY_DONE; 633} 634 635static struct notifier_block ppc_panic_block = { 636 .notifier_call = ppc_panic_event, 637 .priority = INT_MIN /* may not return; must be done last */ 638}; 639 640void __init setup_panic(void) 641{ 642 atomic_notifier_chain_register(&panic_notifier_list, &ppc_panic_block); 643} 644 645#ifdef CONFIG_CHECK_CACHE_COHERENCY 646/* 647 * For platforms that have configurable cache-coherency. This function 648 * checks that the cache coherency setting of the kernel matches the setting 649 * left by the firmware, as indicated in the device tree. Since a mismatch 650 * will eventually result in DMA failures, we print * and error and call 651 * BUG() in that case. 652 */ 653 654#ifdef CONFIG_NOT_COHERENT_CACHE 655#define KERNEL_COHERENCY 0 656#else 657#define KERNEL_COHERENCY 1 658#endif 659 660static int __init check_cache_coherency(void) 661{ 662 struct device_node *np; 663 const void *prop; 664 int devtree_coherency; 665 666 np = of_find_node_by_path("/"); 667 prop = of_get_property(np, "coherency-off", NULL); 668 of_node_put(np); 669 670 devtree_coherency = prop ? 0 : 1; 671 672 if (devtree_coherency != KERNEL_COHERENCY) { 673 printk(KERN_ERR 674 "kernel coherency:%s != device tree_coherency:%s\n", 675 KERNEL_COHERENCY ? "on" : "off", 676 devtree_coherency ? "on" : "off"); 677 BUG(); 678 } 679 680 return 0; 681} 682 683late_initcall(check_cache_coherency); 684#endif /* CONFIG_CHECK_CACHE_COHERENCY */ 685 686#ifdef CONFIG_DEBUG_FS 687struct dentry *powerpc_debugfs_root; 688EXPORT_SYMBOL(powerpc_debugfs_root); 689 690static int powerpc_debugfs_init(void) 691{ 692 powerpc_debugfs_root = debugfs_create_dir("powerpc", NULL); 693 694 return powerpc_debugfs_root == NULL; 695} 696arch_initcall(powerpc_debugfs_init); 697#endif 698 699static int ppc_dflt_bus_notify(struct notifier_block *nb, 700 unsigned long action, void *data) 701{ 702 struct device *dev = data; 703 704 /* We are only intereted in device addition */ 705 if (action != BUS_NOTIFY_ADD_DEVICE) 706 return 0; 707 708 set_dma_ops(dev, &dma_direct_ops); 709 710 return NOTIFY_DONE; 711} 712 713static struct notifier_block ppc_dflt_plat_bus_notifier = { 714 .notifier_call = ppc_dflt_bus_notify, 715 .priority = INT_MAX, 716}; 717 718static int __init setup_bus_notifier(void) 719{ 720 bus_register_notifier(&platform_bus_type, &ppc_dflt_plat_bus_notifier); 721 return 0; 722} 723 724arch_initcall(setup_bus_notifier); 725