1/* 2 * linux/arch/arm/kernel/setup.c 3 * 4 * Copyright (C) 1995-2001 Russell King 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10#include <linux/module.h> 11#include <linux/kernel.h> 12#include <linux/stddef.h> 13#include <linux/ioport.h> 14#include <linux/delay.h> 15#include <linux/utsname.h> 16#include <linux/initrd.h> 17#include <linux/console.h> 18#include <linux/bootmem.h> 19#include <linux/seq_file.h> 20#include <linux/screen_info.h> 21#include <linux/init.h> 22#include <linux/kexec.h> 23#include <linux/crash_dump.h> 24#include <linux/root_dev.h> 25#include <linux/cpu.h> 26#include <linux/interrupt.h> 27#include <linux/smp.h> 28#include <linux/fs.h> 29#include <linux/proc_fs.h> 30#include <linux/memblock.h> 31 32#include <asm/unified.h> 33#include <asm/cpu.h> 34#include <asm/cputype.h> 35#include <asm/elf.h> 36#include <asm/procinfo.h> 37#include <asm/sections.h> 38#include <asm/setup.h> 39#include <asm/mach-types.h> 40#include <asm/cacheflush.h> 41#include <asm/cachetype.h> 42#include <asm/tlbflush.h> 43 44#include <asm/mach/arch.h> 45#include <asm/mach/irq.h> 46#include <asm/mach/time.h> 47#include <asm/traps.h> 48#include <asm/unwind.h> 49 50#if defined(CONFIG_DEPRECATED_PARAM_STRUCT) 51#include "compat.h" 52#endif 53#include "atags.h" 54#include "tcm.h" 55 56#ifndef MEM_SIZE 57#define MEM_SIZE (16*1024*1024) 58#endif 59 60#if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE) 61char fpe_type[8]; 62 63static int __init fpe_setup(char *line) 64{ 65 memcpy(fpe_type, line, 8); 66 return 1; 67} 68 69__setup("fpe=", fpe_setup); 70#endif 71 72extern void paging_init(struct machine_desc *desc); 73extern void reboot_setup(char *str); 74 75unsigned int processor_id; 76EXPORT_SYMBOL(processor_id); 77unsigned int __machine_arch_type; 78EXPORT_SYMBOL(__machine_arch_type); 79unsigned int cacheid; 80EXPORT_SYMBOL(cacheid); 81 82unsigned int __atags_pointer __initdata; 83 84unsigned int system_rev; 85EXPORT_SYMBOL(system_rev); 86 87unsigned int system_serial_low; 88EXPORT_SYMBOL(system_serial_low); 89 90unsigned int system_serial_high; 91EXPORT_SYMBOL(system_serial_high); 92 93unsigned int elf_hwcap; 94EXPORT_SYMBOL(elf_hwcap); 95 96 97#ifdef MULTI_CPU 98struct processor processor; 99#endif 100#ifdef MULTI_TLB 101struct cpu_tlb_fns cpu_tlb; 102#endif 103#ifdef MULTI_USER 104struct cpu_user_fns cpu_user; 105#endif 106#ifdef MULTI_CACHE 107struct cpu_cache_fns cpu_cache; 108#endif 109#ifdef CONFIG_OUTER_CACHE 110struct outer_cache_fns outer_cache; 111EXPORT_SYMBOL(outer_cache); 112#endif 113 114struct stack { 115 u32 irq[3]; 116 u32 abt[3]; 117 u32 und[3]; 118} ____cacheline_aligned; 119 120static struct stack stacks[NR_CPUS]; 121 122char elf_platform[ELF_PLATFORM_SIZE]; 123EXPORT_SYMBOL(elf_platform); 124 125static const char *cpu_name; 126static const char *machine_name; 127static char __initdata cmd_line[COMMAND_LINE_SIZE]; 128 129static char default_command_line[COMMAND_LINE_SIZE] __initdata = CONFIG_CMDLINE; 130static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } }; 131#define ENDIANNESS ((char)endian_test.l) 132 133DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data); 134 135/* 136 * Standard memory resources 137 */ 138static struct resource mem_res[] = { 139 { 140 .name = "Video RAM", 141 .start = 0, 142 .end = 0, 143 .flags = IORESOURCE_MEM 144 }, 145 { 146 .name = "Kernel text", 147 .start = 0, 148 .end = 0, 149 .flags = IORESOURCE_MEM 150 }, 151 { 152 .name = "Kernel data", 153 .start = 0, 154 .end = 0, 155 .flags = IORESOURCE_MEM 156 } 157}; 158 159#define video_ram mem_res[0] 160#define kernel_code mem_res[1] 161#define kernel_data mem_res[2] 162 163static struct resource io_res[] = { 164 { 165 .name = "reserved", 166 .start = 0x3bc, 167 .end = 0x3be, 168 .flags = IORESOURCE_IO | IORESOURCE_BUSY 169 }, 170 { 171 .name = "reserved", 172 .start = 0x378, 173 .end = 0x37f, 174 .flags = IORESOURCE_IO | IORESOURCE_BUSY 175 }, 176 { 177 .name = "reserved", 178 .start = 0x278, 179 .end = 0x27f, 180 .flags = IORESOURCE_IO | IORESOURCE_BUSY 181 } 182}; 183 184#define lp0 io_res[0] 185#define lp1 io_res[1] 186#define lp2 io_res[2] 187 188static const char *proc_arch[] = { 189 "undefined/unknown", 190 "3", 191 "4", 192 "4T", 193 "5", 194 "5T", 195 "5TE", 196 "5TEJ", 197 "6TEJ", 198 "7", 199 "?(11)", 200 "?(12)", 201 "?(13)", 202 "?(14)", 203 "?(15)", 204 "?(16)", 205 "?(17)", 206}; 207 208int cpu_architecture(void) 209{ 210 int cpu_arch; 211 212 if ((read_cpuid_id() & 0x0008f000) == 0) { 213 cpu_arch = CPU_ARCH_UNKNOWN; 214 } else if ((read_cpuid_id() & 0x0008f000) == 0x00007000) { 215 cpu_arch = (read_cpuid_id() & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3; 216 } else if ((read_cpuid_id() & 0x00080000) == 0x00000000) { 217 cpu_arch = (read_cpuid_id() >> 16) & 7; 218 if (cpu_arch) 219 cpu_arch += CPU_ARCH_ARMv3; 220 } else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) { 221 unsigned int mmfr0; 222 223 /* Revised CPUID format. Read the Memory Model Feature 224 * Register 0 and check for VMSAv7 or PMSAv7 */ 225 asm("mrc p15, 0, %0, c0, c1, 4" 226 : "=r" (mmfr0)); 227 if ((mmfr0 & 0x0000000f) == 0x00000003 || 228 (mmfr0 & 0x000000f0) == 0x00000030) 229 cpu_arch = CPU_ARCH_ARMv7; 230 else if ((mmfr0 & 0x0000000f) == 0x00000002 || 231 (mmfr0 & 0x000000f0) == 0x00000020) 232 cpu_arch = CPU_ARCH_ARMv6; 233 else 234 cpu_arch = CPU_ARCH_UNKNOWN; 235 } else 236 cpu_arch = CPU_ARCH_UNKNOWN; 237 238 return cpu_arch; 239} 240 241static void __init cacheid_init(void) 242{ 243 unsigned int cachetype = read_cpuid_cachetype(); 244 unsigned int arch = cpu_architecture(); 245 246 if (arch >= CPU_ARCH_ARMv6) { 247 if ((cachetype & (7 << 29)) == 4 << 29) { 248 /* ARMv7 register format */ 249 cacheid = CACHEID_VIPT_NONALIASING; 250 if ((cachetype & (3 << 14)) == 1 << 14) 251 cacheid |= CACHEID_ASID_TAGGED; 252 } else if (cachetype & (1 << 23)) 253 cacheid = CACHEID_VIPT_ALIASING; 254 else 255 cacheid = CACHEID_VIPT_NONALIASING; 256 } else { 257 cacheid = CACHEID_VIVT; 258 } 259 260 printk("CPU: %s data cache, %s instruction cache\n", 261 cache_is_vivt() ? "VIVT" : 262 cache_is_vipt_aliasing() ? "VIPT aliasing" : 263 cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown", 264 cache_is_vivt() ? "VIVT" : 265 icache_is_vivt_asid_tagged() ? "VIVT ASID tagged" : 266 cache_is_vipt_aliasing() ? "VIPT aliasing" : 267 cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown"); 268} 269 270/* 271 * These functions re-use the assembly code in head.S, which 272 * already provide the required functionality. 273 */ 274extern struct proc_info_list *lookup_processor_type(unsigned int); 275extern struct machine_desc *lookup_machine_type(unsigned int); 276 277static void __init feat_v6_fixup(void) 278{ 279 int id = read_cpuid_id(); 280 281 if ((id & 0xff0f0000) != 0x41070000) 282 return; 283 284 /* 285 * HWCAP_TLS is available only on 1136 r1p0 and later, 286 * see also kuser_get_tls_init. 287 */ 288 if ((((id >> 4) & 0xfff) == 0xb36) && (((id >> 20) & 3) == 0)) 289 elf_hwcap &= ~HWCAP_TLS; 290} 291 292static void __init setup_processor(void) 293{ 294 struct proc_info_list *list; 295 296 /* 297 * locate processor in the list of supported processor 298 * types. The linker builds this table for us from the 299 * entries in arch/arm/mm/proc-*.S 300 */ 301 list = lookup_processor_type(read_cpuid_id()); 302 if (!list) { 303 printk("CPU configuration botched (ID %08x), unable " 304 "to continue.\n", read_cpuid_id()); 305 while (1); 306 } 307 308 cpu_name = list->cpu_name; 309 310#ifdef MULTI_CPU 311 processor = *list->proc; 312#endif 313#ifdef MULTI_TLB 314 cpu_tlb = *list->tlb; 315#endif 316#ifdef MULTI_USER 317 cpu_user = *list->user; 318#endif 319#ifdef MULTI_CACHE 320 cpu_cache = *list->cache; 321#endif 322 323 printk("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n", 324 cpu_name, read_cpuid_id(), read_cpuid_id() & 15, 325 proc_arch[cpu_architecture()], cr_alignment); 326 327 sprintf(init_utsname()->machine, "%s%c", list->arch_name, ENDIANNESS); 328 sprintf(elf_platform, "%s%c", list->elf_name, ENDIANNESS); 329 elf_hwcap = list->elf_hwcap; 330#ifndef CONFIG_ARM_THUMB 331 elf_hwcap &= ~HWCAP_THUMB; 332#endif 333 334 feat_v6_fixup(); 335 336 cacheid_init(); 337 cpu_proc_init(); 338} 339 340/* 341 * cpu_init - initialise one CPU. 342 * 343 * cpu_init sets up the per-CPU stacks. 344 */ 345void cpu_init(void) 346{ 347 unsigned int cpu = smp_processor_id(); 348 struct stack *stk = &stacks[cpu]; 349 350 if (cpu >= NR_CPUS) { 351 printk(KERN_CRIT "CPU%u: bad primary CPU number\n", cpu); 352 BUG(); 353 } 354 355 /* 356 * Define the placement constraint for the inline asm directive below. 357 * In Thumb-2, msr with an immediate value is not allowed. 358 */ 359#ifdef CONFIG_THUMB2_KERNEL 360#define PLC "r" 361#else 362#define PLC "I" 363#endif 364 365 /* 366 * setup stacks for re-entrant exception handlers 367 */ 368 __asm__ ( 369 "msr cpsr_c, %1\n\t" 370 "add r14, %0, %2\n\t" 371 "mov sp, r14\n\t" 372 "msr cpsr_c, %3\n\t" 373 "add r14, %0, %4\n\t" 374 "mov sp, r14\n\t" 375 "msr cpsr_c, %5\n\t" 376 "add r14, %0, %6\n\t" 377 "mov sp, r14\n\t" 378 "msr cpsr_c, %7" 379 : 380 : "r" (stk), 381 PLC (PSR_F_BIT | PSR_I_BIT | IRQ_MODE), 382 "I" (offsetof(struct stack, irq[0])), 383 PLC (PSR_F_BIT | PSR_I_BIT | ABT_MODE), 384 "I" (offsetof(struct stack, abt[0])), 385 PLC (PSR_F_BIT | PSR_I_BIT | UND_MODE), 386 "I" (offsetof(struct stack, und[0])), 387 PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE) 388 : "r14"); 389} 390 391static struct machine_desc * __init setup_machine(unsigned int nr) 392{ 393 struct machine_desc *list; 394 395 /* 396 * locate machine in the list of supported machines. 397 */ 398 list = lookup_machine_type(nr); 399 if (!list) { 400 printk("Machine configuration botched (nr %d), unable " 401 "to continue.\n", nr); 402 while (1); 403 } 404 405 printk("Machine: %s\n", list->name); 406 407 return list; 408} 409 410static int __init arm_add_memory(unsigned long start, unsigned long size) 411{ 412 struct membank *bank = &meminfo.bank[meminfo.nr_banks]; 413 414 if (meminfo.nr_banks >= NR_BANKS) { 415 printk(KERN_CRIT "NR_BANKS too low, " 416 "ignoring memory at %#lx\n", start); 417 return -EINVAL; 418 } 419 420 /* 421 * Ensure that start/size are aligned to a page boundary. 422 * Size is appropriately rounded down, start is rounded up. 423 */ 424 size -= start & ~PAGE_MASK; 425 bank->start = PAGE_ALIGN(start); 426 bank->size = size & PAGE_MASK; 427 428 /* 429 * Check whether this memory region has non-zero size or 430 * invalid node number. 431 */ 432 if (bank->size == 0) 433 return -EINVAL; 434 435 meminfo.nr_banks++; 436 return 0; 437} 438 439/* 440 * Pick out the memory size. We look for mem=size@start, 441 * where start and size are "size[KkMm]" 442 */ 443static int __init early_mem(char *p) 444{ 445 static int usermem __initdata = 0; 446 unsigned long size, start; 447 char *endp; 448 449 /* 450 * If the user specifies memory size, we 451 * blow away any automatically generated 452 * size. 453 */ 454 if (usermem == 0) { 455 usermem = 1; 456 meminfo.nr_banks = 0; 457 } 458 459 start = PHYS_OFFSET; 460 size = memparse(p, &endp); 461 if (*endp == '@') 462 start = memparse(endp + 1, NULL); 463 464 arm_add_memory(start, size); 465 466 return 0; 467} 468early_param("mem", early_mem); 469 470static void __init 471setup_ramdisk(int doload, int prompt, int image_start, unsigned int rd_sz) 472{ 473#ifdef CONFIG_BLK_DEV_RAM 474 extern int rd_size, rd_image_start, rd_prompt, rd_doload; 475 476 rd_image_start = image_start; 477 rd_prompt = prompt; 478 rd_doload = doload; 479 480 if (rd_sz) 481 rd_size = rd_sz; 482#endif 483} 484 485static void __init 486request_standard_resources(struct meminfo *mi, struct machine_desc *mdesc) 487{ 488 struct resource *res; 489 int i; 490 491 kernel_code.start = virt_to_phys(_text); 492 kernel_code.end = virt_to_phys(_etext - 1); 493 kernel_data.start = virt_to_phys(_data); 494 kernel_data.end = virt_to_phys(_end - 1); 495 496 for (i = 0; i < mi->nr_banks; i++) { 497 if (mi->bank[i].size == 0) 498 continue; 499 500 res = alloc_bootmem_low(sizeof(*res)); 501 res->name = "System RAM"; 502 res->start = mi->bank[i].start; 503 res->end = mi->bank[i].start + mi->bank[i].size - 1; 504 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; 505 506 request_resource(&iomem_resource, res); 507 508 if (kernel_code.start >= res->start && 509 kernel_code.end <= res->end) 510 request_resource(res, &kernel_code); 511 if (kernel_data.start >= res->start && 512 kernel_data.end <= res->end) 513 request_resource(res, &kernel_data); 514 } 515 516 if (mdesc->video_start) { 517 video_ram.start = mdesc->video_start; 518 video_ram.end = mdesc->video_end; 519 request_resource(&iomem_resource, &video_ram); 520 } 521 522 /* 523 * Some machines don't have the possibility of ever 524 * possessing lp0, lp1 or lp2 525 */ 526 if (mdesc->reserve_lp0) 527 request_resource(&ioport_resource, &lp0); 528 if (mdesc->reserve_lp1) 529 request_resource(&ioport_resource, &lp1); 530 if (mdesc->reserve_lp2) 531 request_resource(&ioport_resource, &lp2); 532} 533 534/* 535 * Tag parsing. 536 * 537 * This is the new way of passing data to the kernel at boot time. Rather 538 * than passing a fixed inflexible structure to the kernel, we pass a list 539 * of variable-sized tags to the kernel. The first tag must be a ATAG_CORE 540 * tag for the list to be recognised (to distinguish the tagged list from 541 * a param_struct). The list is terminated with a zero-length tag (this tag 542 * is not parsed in any way). 543 */ 544static int __init parse_tag_core(const struct tag *tag) 545{ 546 if (tag->hdr.size > 2) { 547 if ((tag->u.core.flags & 1) == 0) 548 root_mountflags &= ~MS_RDONLY; 549 ROOT_DEV = old_decode_dev(tag->u.core.rootdev); 550 } 551 return 0; 552} 553 554__tagtable(ATAG_CORE, parse_tag_core); 555 556static int __init parse_tag_mem32(const struct tag *tag) 557{ 558 return arm_add_memory(tag->u.mem.start, tag->u.mem.size); 559} 560 561__tagtable(ATAG_MEM, parse_tag_mem32); 562 563#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE) 564struct screen_info screen_info = { 565 .orig_video_lines = 30, 566 .orig_video_cols = 80, 567 .orig_video_mode = 0, 568 .orig_video_ega_bx = 0, 569 .orig_video_isVGA = 1, 570 .orig_video_points = 8 571}; 572 573static int __init parse_tag_videotext(const struct tag *tag) 574{ 575 screen_info.orig_x = tag->u.videotext.x; 576 screen_info.orig_y = tag->u.videotext.y; 577 screen_info.orig_video_page = tag->u.videotext.video_page; 578 screen_info.orig_video_mode = tag->u.videotext.video_mode; 579 screen_info.orig_video_cols = tag->u.videotext.video_cols; 580 screen_info.orig_video_ega_bx = tag->u.videotext.video_ega_bx; 581 screen_info.orig_video_lines = tag->u.videotext.video_lines; 582 screen_info.orig_video_isVGA = tag->u.videotext.video_isvga; 583 screen_info.orig_video_points = tag->u.videotext.video_points; 584 return 0; 585} 586 587__tagtable(ATAG_VIDEOTEXT, parse_tag_videotext); 588#endif 589 590static int __init parse_tag_ramdisk(const struct tag *tag) 591{ 592 setup_ramdisk((tag->u.ramdisk.flags & 1) == 0, 593 (tag->u.ramdisk.flags & 2) == 0, 594 tag->u.ramdisk.start, tag->u.ramdisk.size); 595 return 0; 596} 597 598__tagtable(ATAG_RAMDISK, parse_tag_ramdisk); 599 600static int __init parse_tag_serialnr(const struct tag *tag) 601{ 602 system_serial_low = tag->u.serialnr.low; 603 system_serial_high = tag->u.serialnr.high; 604 return 0; 605} 606 607__tagtable(ATAG_SERIAL, parse_tag_serialnr); 608 609static int __init parse_tag_revision(const struct tag *tag) 610{ 611 system_rev = tag->u.revision.rev; 612 return 0; 613} 614 615__tagtable(ATAG_REVISION, parse_tag_revision); 616 617#ifndef CONFIG_CMDLINE_FORCE 618static int __init parse_tag_cmdline(const struct tag *tag) 619{ 620 strlcpy(default_command_line, tag->u.cmdline.cmdline, COMMAND_LINE_SIZE); 621 return 0; 622} 623 624__tagtable(ATAG_CMDLINE, parse_tag_cmdline); 625#endif /* CONFIG_CMDLINE_FORCE */ 626 627/* 628 * Scan the tag table for this tag, and call its parse function. 629 * The tag table is built by the linker from all the __tagtable 630 * declarations. 631 */ 632static int __init parse_tag(const struct tag *tag) 633{ 634 extern struct tagtable __tagtable_begin, __tagtable_end; 635 struct tagtable *t; 636 637 for (t = &__tagtable_begin; t < &__tagtable_end; t++) 638 if (tag->hdr.tag == t->tag) { 639 t->parse(tag); 640 break; 641 } 642 643 return t < &__tagtable_end; 644} 645 646/* 647 * Parse all tags in the list, checking both the global and architecture 648 * specific tag tables. 649 */ 650static void __init parse_tags(const struct tag *t) 651{ 652 for (; t->hdr.size; t = tag_next(t)) 653 if (!parse_tag(t)) 654 printk(KERN_WARNING 655 "Ignoring unrecognised tag 0x%08x\n", 656 t->hdr.tag); 657} 658 659/* 660 * This holds our defaults. 661 */ 662static struct init_tags { 663 struct tag_header hdr1; 664 struct tag_core core; 665 struct tag_header hdr2; 666 struct tag_mem32 mem; 667 struct tag_header hdr3; 668} init_tags __initdata = { 669 { tag_size(tag_core), ATAG_CORE }, 670 { 1, PAGE_SIZE, 0xff }, 671 { tag_size(tag_mem32), ATAG_MEM }, 672 { MEM_SIZE, CONFIG_DRAM_BASE }, 673 { 0, ATAG_NONE } 674}; 675 676static void (*init_machine)(void) __initdata; 677 678static int __init customize_machine(void) 679{ 680 /* customizes platform devices, or adds new ones */ 681 if (init_machine) 682 init_machine(); 683 return 0; 684} 685arch_initcall(customize_machine); 686 687#ifdef CONFIG_KEXEC 688static inline unsigned long long get_total_mem(void) 689{ 690 unsigned long total; 691 692 total = max_low_pfn - min_low_pfn; 693 return total << PAGE_SHIFT; 694} 695 696/** 697 * reserve_crashkernel() - reserves memory are for crash kernel 698 * 699 * This function reserves memory area given in "crashkernel=" kernel command 700 * line parameter. The memory reserved is used by a dump capture kernel when 701 * primary kernel is crashing. 702 */ 703static void __init reserve_crashkernel(void) 704{ 705 unsigned long long crash_size, crash_base; 706 unsigned long long total_mem; 707 int ret; 708 709 total_mem = get_total_mem(); 710 ret = parse_crashkernel(boot_command_line, total_mem, 711 &crash_size, &crash_base); 712 if (ret) 713 return; 714 715 ret = reserve_bootmem(crash_base, crash_size, BOOTMEM_EXCLUSIVE); 716 if (ret < 0) { 717 printk(KERN_WARNING "crashkernel reservation failed - " 718 "memory is in use (0x%lx)\n", (unsigned long)crash_base); 719 return; 720 } 721 722 printk(KERN_INFO "Reserving %ldMB of memory at %ldMB " 723 "for crashkernel (System RAM: %ldMB)\n", 724 (unsigned long)(crash_size >> 20), 725 (unsigned long)(crash_base >> 20), 726 (unsigned long)(total_mem >> 20)); 727 728 crashk_res.start = crash_base; 729 crashk_res.end = crash_base + crash_size - 1; 730 insert_resource(&iomem_resource, &crashk_res); 731} 732#else 733static inline void reserve_crashkernel(void) {} 734#endif /* CONFIG_KEXEC */ 735 736/* 737 * Note: elfcorehdr_addr is not just limited to vmcore. It is also used by 738 * is_kdump_kernel() to determine if we are booting after a panic. Hence 739 * ifdef it under CONFIG_CRASH_DUMP and not CONFIG_PROC_VMCORE. 740 */ 741 742#ifdef CONFIG_CRASH_DUMP 743/* 744 * elfcorehdr= specifies the location of elf core header stored by the crashed 745 * kernel. This option will be passed by kexec loader to the capture kernel. 746 */ 747static int __init setup_elfcorehdr(char *arg) 748{ 749 char *end; 750 751 if (!arg) 752 return -EINVAL; 753 754 elfcorehdr_addr = memparse(arg, &end); 755 return end > arg ? 0 : -EINVAL; 756} 757early_param("elfcorehdr", setup_elfcorehdr); 758#endif /* CONFIG_CRASH_DUMP */ 759 760static void __init squash_mem_tags(struct tag *tag) 761{ 762 for (; tag->hdr.size; tag = tag_next(tag)) 763 if (tag->hdr.tag == ATAG_MEM) 764 tag->hdr.tag = ATAG_NONE; 765} 766 767void __init setup_arch(char **cmdline_p) 768{ 769 struct tag *tags = (struct tag *)&init_tags; 770 struct machine_desc *mdesc; 771 char *from = default_command_line; 772 773 init_tags.mem.start = PHYS_OFFSET; 774 775 unwind_init(); 776 777 setup_processor(); 778 mdesc = setup_machine(machine_arch_type); 779 machine_name = mdesc->name; 780 781 if (mdesc->soft_reboot) 782 reboot_setup("s"); 783 784 mdesc->boot_params = PHYS_OFFSET + (CONFIG_BOARD_PARAMS_PHYS - CONFIG_DRAM_BASE); 785 786 if (__atags_pointer) 787 tags = phys_to_virt(__atags_pointer); 788 else if (mdesc->boot_params) 789 tags = phys_to_virt(mdesc->boot_params); 790 791#if defined(CONFIG_DEPRECATED_PARAM_STRUCT) 792 /* 793 * If we have the old style parameters, convert them to 794 * a tag list. 795 */ 796 if (tags->hdr.tag != ATAG_CORE) 797 convert_to_tag_list(tags); 798#endif 799 if (tags->hdr.tag != ATAG_CORE) 800 tags = (struct tag *)&init_tags; 801 802 if (mdesc->fixup) 803 mdesc->fixup(mdesc, tags, &from, &meminfo); 804 805 if (tags->hdr.tag == ATAG_CORE) { 806 if (meminfo.nr_banks != 0) 807 squash_mem_tags(tags); 808 save_atags(tags); 809 parse_tags(tags); 810 } 811 812 init_mm.start_code = (unsigned long) _text; 813 init_mm.end_code = (unsigned long) _etext; 814 init_mm.end_data = (unsigned long) _edata; 815 init_mm.brk = (unsigned long) _end; 816 817 /* parse_early_param needs a boot_command_line */ 818 strlcpy(boot_command_line, from, COMMAND_LINE_SIZE); 819 820 /* populate cmd_line too for later use, preserving boot_command_line */ 821 strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE); 822 *cmdline_p = cmd_line; 823 824 parse_early_param(); 825 826 arm_memblock_init(&meminfo, mdesc); 827 828 paging_init(mdesc); 829 request_standard_resources(&meminfo, mdesc); 830 831#ifdef CONFIG_SMP 832 smp_init_cpus(); 833#endif 834 reserve_crashkernel(); 835 836 cpu_init(); 837 tcm_init(); 838 839 /* 840 * Set up various architecture-specific pointers 841 */ 842 arch_nr_irqs = mdesc->nr_irqs; 843 init_arch_irq = mdesc->init_irq; 844 system_timer = mdesc->timer; 845 init_machine = mdesc->init_machine; 846 847#ifdef CONFIG_VT 848#if defined(CONFIG_VGA_CONSOLE) 849 conswitchp = &vga_con; 850#elif defined(CONFIG_DUMMY_CONSOLE) 851 conswitchp = &dummy_con; 852#endif 853#endif 854 early_trap_init(); 855} 856 857 858static int __init topology_init(void) 859{ 860 int cpu; 861 862 for_each_possible_cpu(cpu) { 863 struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu); 864 cpuinfo->cpu.hotpluggable = 1; 865 register_cpu(&cpuinfo->cpu, cpu); 866 } 867 868 return 0; 869} 870subsys_initcall(topology_init); 871 872#ifdef CONFIG_HAVE_PROC_CPU 873static int __init proc_cpu_init(void) 874{ 875 struct proc_dir_entry *res; 876 877 res = proc_mkdir("cpu", NULL); 878 if (!res) 879 return -ENOMEM; 880 return 0; 881} 882fs_initcall(proc_cpu_init); 883#endif 884 885static const char *hwcap_str[] = { 886 "swp", 887 "half", 888 "thumb", 889 "26bit", 890 "fastmult", 891 "fpa", 892 "vfp", 893 "edsp", 894 "java", 895 "iwmmxt", 896 "crunch", 897 "thumbee", 898 "neon", 899 "vfpv3", 900 "vfpv3d16", 901 NULL 902}; 903 904static int c_show(struct seq_file *m, void *v) 905{ 906 int i; 907 908 seq_printf(m, "Processor\t: %s rev %d (%s)\n", 909 cpu_name, read_cpuid_id() & 15, elf_platform); 910 911#if defined(CONFIG_SMP) 912 for_each_online_cpu(i) { 913 /* 914 * glibc reads /proc/cpuinfo to determine the number of 915 * online processors, looking for lines beginning with 916 * "processor". Give glibc what it expects. 917 */ 918 seq_printf(m, "processor\t: %d\n", i); 919 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n\n", 920 per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ), 921 (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100); 922 } 923#else /* CONFIG_SMP */ 924 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n", 925 loops_per_jiffy / (500000/HZ), 926 (loops_per_jiffy / (5000/HZ)) % 100); 927#endif 928 929 /* dump out the processor features */ 930 seq_puts(m, "Features\t: "); 931 932 for (i = 0; hwcap_str[i]; i++) 933 if (elf_hwcap & (1 << i)) 934 seq_printf(m, "%s ", hwcap_str[i]); 935 936 seq_printf(m, "\nCPU implementer\t: 0x%02x\n", read_cpuid_id() >> 24); 937 seq_printf(m, "CPU architecture: %s\n", proc_arch[cpu_architecture()]); 938 939 if ((read_cpuid_id() & 0x0008f000) == 0x00000000) { 940 /* pre-ARM7 */ 941 seq_printf(m, "CPU part\t: %07x\n", read_cpuid_id() >> 4); 942 } else { 943 if ((read_cpuid_id() & 0x0008f000) == 0x00007000) { 944 /* ARM7 */ 945 seq_printf(m, "CPU variant\t: 0x%02x\n", 946 (read_cpuid_id() >> 16) & 127); 947 } else { 948 /* post-ARM7 */ 949 seq_printf(m, "CPU variant\t: 0x%x\n", 950 (read_cpuid_id() >> 20) & 15); 951 } 952 seq_printf(m, "CPU part\t: 0x%03x\n", 953 (read_cpuid_id() >> 4) & 0xfff); 954 } 955 seq_printf(m, "CPU revision\t: %d\n", read_cpuid_id() & 15); 956 957 seq_puts(m, "\n"); 958 959 seq_printf(m, "Hardware\t: %s\n", machine_name); 960 seq_printf(m, "Revision\t: %04x\n", system_rev); 961 seq_printf(m, "Serial\t\t: %08x%08x\n", 962 system_serial_high, system_serial_low); 963 964 return 0; 965} 966 967static void *c_start(struct seq_file *m, loff_t *pos) 968{ 969 return *pos < 1 ? (void *)1 : NULL; 970} 971 972static void *c_next(struct seq_file *m, void *v, loff_t *pos) 973{ 974 ++*pos; 975 return NULL; 976} 977 978static void c_stop(struct seq_file *m, void *v) 979{ 980} 981 982const struct seq_operations cpuinfo_op = { 983 .start = c_start, 984 .next = c_next, 985 .stop = c_stop, 986 .show = c_show 987}; 988