1/* 2 * Architecture-specific setup. 3 * 4 * Copyright (C) 1998-2001, 2003-2004 Hewlett-Packard Co 5 * David Mosberger-Tang <davidm@hpl.hp.com> 6 * Stephane Eranian <eranian@hpl.hp.com> 7 * Copyright (C) 2000, 2004 Intel Corp 8 * Rohit Seth <rohit.seth@intel.com> 9 * Suresh Siddha <suresh.b.siddha@intel.com> 10 * Gordon Jin <gordon.jin@intel.com> 11 * Copyright (C) 1999 VA Linux Systems 12 * Copyright (C) 1999 Walt Drummond <drummond@valinux.com> 13 * 14 * 12/26/04 S.Siddha, G.Jin, R.Seth 15 * Add multi-threading and multi-core detection 16 * 11/12/01 D.Mosberger Convert get_cpuinfo() to seq_file based show_cpuinfo(). 17 * 04/04/00 D.Mosberger renamed cpu_initialized to cpu_online_map 18 * 03/31/00 R.Seth cpu_initialized and current->processor fixes 19 * 02/04/00 D.Mosberger some more get_cpuinfo fixes... 20 * 02/01/00 R.Seth fixed get_cpuinfo for SMP 21 * 01/07/99 S.Eranian added the support for command line argument 22 * 06/24/99 W.Drummond added boot_cpu_data. 23 * 05/28/05 Z. Menyhart Dynamic stride size for "flush_icache_range()" 24 */ 25#include <linux/module.h> 26#include <linux/init.h> 27 28#include <linux/acpi.h> 29#include <linux/bootmem.h> 30#include <linux/console.h> 31#include <linux/delay.h> 32#include <linux/kernel.h> 33#include <linux/reboot.h> 34#include <linux/sched.h> 35#include <linux/seq_file.h> 36#include <linux/string.h> 37#include <linux/threads.h> 38#include <linux/screen_info.h> 39#include <linux/dmi.h> 40#include <linux/serial.h> 41#include <linux/serial_core.h> 42#include <linux/efi.h> 43#include <linux/initrd.h> 44#include <linux/pm.h> 45#include <linux/cpufreq.h> 46#include <linux/kexec.h> 47#include <linux/crash_dump.h> 48 49#include <asm/ia32.h> 50#include <asm/machvec.h> 51#include <asm/mca.h> 52#include <asm/meminit.h> 53#include <asm/page.h> 54#include <asm/patch.h> 55#include <asm/pgtable.h> 56#include <asm/processor.h> 57#include <asm/sal.h> 58#include <asm/sections.h> 59#include <asm/setup.h> 60#include <asm/smp.h> 61#include <asm/system.h> 62#include <asm/unistd.h> 63#include <asm/system.h> 64 65#if defined(CONFIG_SMP) && (IA64_CPU_SIZE > PAGE_SIZE) 66# error "struct cpuinfo_ia64 too big!" 67#endif 68 69#ifdef CONFIG_SMP 70unsigned long __per_cpu_offset[NR_CPUS]; 71EXPORT_SYMBOL(__per_cpu_offset); 72#endif 73 74extern void ia64_setup_printk_clock(void); 75 76DEFINE_PER_CPU(struct cpuinfo_ia64, cpu_info); 77DEFINE_PER_CPU(unsigned long, local_per_cpu_offset); 78unsigned long ia64_cycles_per_usec; 79struct ia64_boot_param *ia64_boot_param; 80struct screen_info screen_info; 81unsigned long vga_console_iobase; 82unsigned long vga_console_membase; 83 84static struct resource data_resource = { 85 .name = "Kernel data", 86 .flags = IORESOURCE_BUSY | IORESOURCE_MEM 87}; 88 89static struct resource code_resource = { 90 .name = "Kernel code", 91 .flags = IORESOURCE_BUSY | IORESOURCE_MEM 92}; 93extern char _text[], _end[], _etext[]; 94 95unsigned long ia64_max_cacheline_size; 96 97int dma_get_cache_alignment(void) 98{ 99 return ia64_max_cacheline_size; 100} 101EXPORT_SYMBOL(dma_get_cache_alignment); 102 103unsigned long ia64_iobase; /* virtual address for I/O accesses */ 104EXPORT_SYMBOL(ia64_iobase); 105struct io_space io_space[MAX_IO_SPACES]; 106EXPORT_SYMBOL(io_space); 107unsigned int num_io_spaces; 108 109/* 110 * "flush_icache_range()" needs to know what processor dependent stride size to use 111 * when it makes i-cache(s) coherent with d-caches. 112 */ 113#define I_CACHE_STRIDE_SHIFT 5 /* Safest way to go: 32 bytes by 32 bytes */ 114unsigned long ia64_i_cache_stride_shift = ~0; 115 116/* 117 * The merge_mask variable needs to be set to (max(iommu_page_size(iommu)) - 1). This 118 * mask specifies a mask of address bits that must be 0 in order for two buffers to be 119 * mergeable by the I/O MMU (i.e., the end address of the first buffer and the start 120 * address of the second buffer must be aligned to (merge_mask+1) in order to be 121 * mergeable). By default, we assume there is no I/O MMU which can merge physically 122 * discontiguous buffers, so we set the merge_mask to ~0UL, which corresponds to a iommu 123 * page-size of 2^64. 124 */ 125unsigned long ia64_max_iommu_merge_mask = ~0UL; 126EXPORT_SYMBOL(ia64_max_iommu_merge_mask); 127 128/* 129 * We use a special marker for the end of memory and it uses the extra (+1) slot 130 */ 131struct rsvd_region rsvd_region[IA64_MAX_RSVD_REGIONS + 1] __initdata; 132int num_rsvd_regions __initdata; 133 134 135/* 136 * Filter incoming memory segments based on the primitive map created from the boot 137 * parameters. Segments contained in the map are removed from the memory ranges. A 138 * caller-specified function is called with the memory ranges that remain after filtering. 139 * This routine does not assume the incoming segments are sorted. 140 */ 141int __init 142filter_rsvd_memory (unsigned long start, unsigned long end, void *arg) 143{ 144 unsigned long range_start, range_end, prev_start; 145 void (*func)(unsigned long, unsigned long, int); 146 int i; 147 148#if IGNORE_PFN0 149 if (start == PAGE_OFFSET) { 150 printk(KERN_WARNING "warning: skipping physical page 0\n"); 151 start += PAGE_SIZE; 152 if (start >= end) return 0; 153 } 154#endif 155 /* 156 * lowest possible address(walker uses virtual) 157 */ 158 prev_start = PAGE_OFFSET; 159 func = arg; 160 161 for (i = 0; i < num_rsvd_regions; ++i) { 162 range_start = max(start, prev_start); 163 range_end = min(end, rsvd_region[i].start); 164 165 if (range_start < range_end) 166 call_pernode_memory(__pa(range_start), range_end - range_start, func); 167 168 /* nothing more available in this segment */ 169 if (range_end == end) return 0; 170 171 prev_start = rsvd_region[i].end; 172 } 173 /* end of memory marker allows full processing inside loop body */ 174 return 0; 175} 176 177static void __init 178sort_regions (struct rsvd_region *rsvd_region, int max) 179{ 180 int j; 181 182 /* simple bubble sorting */ 183 while (max--) { 184 for (j = 0; j < max; ++j) { 185 if (rsvd_region[j].start > rsvd_region[j+1].start) { 186 struct rsvd_region tmp; 187 tmp = rsvd_region[j]; 188 rsvd_region[j] = rsvd_region[j + 1]; 189 rsvd_region[j + 1] = tmp; 190 } 191 } 192 } 193} 194 195/* 196 * Request address space for all standard resources 197 */ 198static int __init register_memory(void) 199{ 200 code_resource.start = ia64_tpa(_text); 201 code_resource.end = ia64_tpa(_etext) - 1; 202 data_resource.start = ia64_tpa(_etext); 203 data_resource.end = ia64_tpa(_end) - 1; 204 efi_initialize_iomem_resources(&code_resource, &data_resource); 205 206 return 0; 207} 208 209__initcall(register_memory); 210 211/** 212 * reserve_memory - setup reserved memory areas 213 * 214 * Setup the reserved memory areas set aside for the boot parameters, 215 * initrd, etc. There are currently %IA64_MAX_RSVD_REGIONS defined, 216 * see include/asm-ia64/meminit.h if you need to define more. 217 */ 218void __init 219reserve_memory (void) 220{ 221 int n = 0; 222 223 /* 224 * none of the entries in this table overlap 225 */ 226 rsvd_region[n].start = (unsigned long) ia64_boot_param; 227 rsvd_region[n].end = rsvd_region[n].start + sizeof(*ia64_boot_param); 228 n++; 229 230 rsvd_region[n].start = (unsigned long) __va(ia64_boot_param->efi_memmap); 231 rsvd_region[n].end = rsvd_region[n].start + ia64_boot_param->efi_memmap_size; 232 n++; 233 234 rsvd_region[n].start = (unsigned long) __va(ia64_boot_param->command_line); 235 rsvd_region[n].end = (rsvd_region[n].start 236 + strlen(__va(ia64_boot_param->command_line)) + 1); 237 n++; 238 239 rsvd_region[n].start = (unsigned long) ia64_imva((void *)KERNEL_START); 240 rsvd_region[n].end = (unsigned long) ia64_imva(_end); 241 n++; 242 243#ifdef CONFIG_BLK_DEV_INITRD 244 if (ia64_boot_param->initrd_start) { 245 rsvd_region[n].start = (unsigned long)__va(ia64_boot_param->initrd_start); 246 rsvd_region[n].end = rsvd_region[n].start + ia64_boot_param->initrd_size; 247 n++; 248 } 249#endif 250 251#ifdef CONFIG_PROC_VMCORE 252 if (reserve_elfcorehdr(&rsvd_region[n].start, 253 &rsvd_region[n].end) == 0) 254 n++; 255#endif 256 257 efi_memmap_init(&rsvd_region[n].start, &rsvd_region[n].end); 258 n++; 259 260#ifdef CONFIG_KEXEC 261 /* crashkernel=size@offset specifies the size to reserve for a crash 262 * kernel. If offset is 0, then it is determined automatically. 263 * By reserving this memory we guarantee that linux never set's it 264 * up as a DMA target.Useful for holding code to do something 265 * appropriate after a kernel panic. 266 */ 267 { 268 char *from = strstr(boot_command_line, "crashkernel="); 269 unsigned long base, size; 270 if (from) { 271 size = memparse(from + 12, &from); 272 if (*from == '@') 273 base = memparse(from+1, &from); 274 else 275 base = 0; 276 if (size) { 277 if (!base) { 278 sort_regions(rsvd_region, n); 279 base = kdump_find_rsvd_region(size, 280 rsvd_region, n); 281 } 282 if (base != ~0UL) { 283 rsvd_region[n].start = 284 (unsigned long)__va(base); 285 rsvd_region[n].end = 286 (unsigned long)__va(base + size); 287 n++; 288 crashk_res.start = base; 289 crashk_res.end = base + size - 1; 290 } 291 } 292 } 293 efi_memmap_res.start = ia64_boot_param->efi_memmap; 294 efi_memmap_res.end = efi_memmap_res.start + 295 ia64_boot_param->efi_memmap_size; 296 boot_param_res.start = __pa(ia64_boot_param); 297 boot_param_res.end = boot_param_res.start + 298 sizeof(*ia64_boot_param); 299 } 300#endif 301 /* end of memory marker */ 302 rsvd_region[n].start = ~0UL; 303 rsvd_region[n].end = ~0UL; 304 n++; 305 306 num_rsvd_regions = n; 307 BUG_ON(IA64_MAX_RSVD_REGIONS + 1 < n); 308 309 sort_regions(rsvd_region, num_rsvd_regions); 310} 311 312 313/** 314 * find_initrd - get initrd parameters from the boot parameter structure 315 * 316 * Grab the initrd start and end from the boot parameter struct given us by 317 * the boot loader. 318 */ 319void __init 320find_initrd (void) 321{ 322#ifdef CONFIG_BLK_DEV_INITRD 323 if (ia64_boot_param->initrd_start) { 324 initrd_start = (unsigned long)__va(ia64_boot_param->initrd_start); 325 initrd_end = initrd_start+ia64_boot_param->initrd_size; 326 327 printk(KERN_INFO "Initial ramdisk at: 0x%lx (%lu bytes)\n", 328 initrd_start, ia64_boot_param->initrd_size); 329 } 330#endif 331} 332 333static void __init 334io_port_init (void) 335{ 336 unsigned long phys_iobase; 337 338 /* 339 * Set `iobase' based on the EFI memory map or, failing that, the 340 * value firmware left in ar.k0. 341 * 342 * Note that in ia32 mode, IN/OUT instructions use ar.k0 to compute 343 * the port's virtual address, so ia32_load_state() loads it with a 344 * user virtual address. But in ia64 mode, glibc uses the 345 * *physical* address in ar.k0 to mmap the appropriate area from 346 * /dev/mem, and the inX()/outX() interfaces use MMIO. In both 347 * cases, user-mode can only use the legacy 0-64K I/O port space. 348 * 349 * ar.k0 is not involved in kernel I/O port accesses, which can use 350 * any of the I/O port spaces and are done via MMIO using the 351 * virtual mmio_base from the appropriate io_space[]. 352 */ 353 phys_iobase = efi_get_iobase(); 354 if (!phys_iobase) { 355 phys_iobase = ia64_get_kr(IA64_KR_IO_BASE); 356 printk(KERN_INFO "No I/O port range found in EFI memory map, " 357 "falling back to AR.KR0 (0x%lx)\n", phys_iobase); 358 } 359 ia64_iobase = (unsigned long) ioremap(phys_iobase, 0); 360 ia64_set_kr(IA64_KR_IO_BASE, __pa(ia64_iobase)); 361 362 /* setup legacy IO port space */ 363 io_space[0].mmio_base = ia64_iobase; 364 io_space[0].sparse = 1; 365 num_io_spaces = 1; 366} 367 368/** 369 * early_console_setup - setup debugging console 370 * 371 * Consoles started here require little enough setup that we can start using 372 * them very early in the boot process, either right after the machine 373 * vector initialization, or even before if the drivers can detect their hw. 374 * 375 * Returns non-zero if a console couldn't be setup. 376 */ 377static inline int __init 378early_console_setup (char *cmdline) 379{ 380 int earlycons = 0; 381 382#ifdef CONFIG_SERIAL_SGI_L1_CONSOLE 383 { 384 extern int sn_serial_console_early_setup(void); 385 if (!sn_serial_console_early_setup()) 386 earlycons++; 387 } 388#endif 389#ifdef CONFIG_EFI_PCDP 390 if (!efi_setup_pcdp_console(cmdline)) 391 earlycons++; 392#endif 393#ifdef CONFIG_SERIAL_8250_CONSOLE 394 if (!early_serial_console_init(cmdline)) 395 earlycons++; 396#endif 397 398 return (earlycons) ? 0 : -1; 399} 400 401static inline void 402mark_bsp_online (void) 403{ 404#ifdef CONFIG_SMP 405 /* If we register an early console, allow CPU 0 to printk */ 406 cpu_set(smp_processor_id(), cpu_online_map); 407#endif 408} 409 410#ifdef CONFIG_SMP 411static void __init 412check_for_logical_procs (void) 413{ 414 pal_logical_to_physical_t info; 415 s64 status; 416 417 status = ia64_pal_logical_to_phys(0, &info); 418 if (status == -1) { 419 printk(KERN_INFO "No logical to physical processor mapping " 420 "available\n"); 421 return; 422 } 423 if (status) { 424 printk(KERN_ERR "ia64_pal_logical_to_phys failed with %ld\n", 425 status); 426 return; 427 } 428 /* 429 * Total number of siblings that BSP has. Though not all of them 430 * may have booted successfully. The correct number of siblings 431 * booted is in info.overview_num_log. 432 */ 433 smp_num_siblings = info.overview_tpc; 434 smp_num_cpucores = info.overview_cpp; 435} 436#endif 437 438static __initdata int nomca; 439static __init int setup_nomca(char *s) 440{ 441 nomca = 1; 442 return 0; 443} 444early_param("nomca", setup_nomca); 445 446#ifdef CONFIG_PROC_VMCORE 447/* elfcorehdr= specifies the location of elf core header 448 * stored by the crashed kernel. 449 */ 450static int __init parse_elfcorehdr(char *arg) 451{ 452 if (!arg) 453 return -EINVAL; 454 455 elfcorehdr_addr = memparse(arg, &arg); 456 return 0; 457} 458early_param("elfcorehdr", parse_elfcorehdr); 459 460int __init reserve_elfcorehdr(unsigned long *start, unsigned long *end) 461{ 462 unsigned long length; 463 464 /* We get the address using the kernel command line, 465 * but the size is extracted from the EFI tables. 466 * Both address and size are required for reservation 467 * to work properly. 468 */ 469 470 if (elfcorehdr_addr >= ELFCORE_ADDR_MAX) 471 return -EINVAL; 472 473 if ((length = vmcore_find_descriptor_size(elfcorehdr_addr)) == 0) { 474 elfcorehdr_addr = ELFCORE_ADDR_MAX; 475 return -EINVAL; 476 } 477 478 *start = (unsigned long)__va(elfcorehdr_addr); 479 *end = *start + length; 480 return 0; 481} 482 483#endif /* CONFIG_PROC_VMCORE */ 484 485void __init 486setup_arch (char **cmdline_p) 487{ 488 unw_init(); 489 490 ia64_patch_vtop((u64) __start___vtop_patchlist, (u64) __end___vtop_patchlist); 491 492 *cmdline_p = __va(ia64_boot_param->command_line); 493 strlcpy(boot_command_line, *cmdline_p, COMMAND_LINE_SIZE); 494 495 efi_init(); 496 io_port_init(); 497 498 parse_early_param(); 499 500#ifdef CONFIG_IA64_GENERIC 501 machvec_init(NULL); 502#endif 503 504 if (early_console_setup(*cmdline_p) == 0) 505 mark_bsp_online(); 506 507#ifdef CONFIG_ACPI 508 /* Initialize the ACPI boot-time table parser */ 509 acpi_table_init(); 510# ifdef CONFIG_ACPI_NUMA 511 acpi_numa_init(); 512# endif 513#else 514# ifdef CONFIG_SMP 515 smp_build_cpu_map(); /* happens, e.g., with the Ski simulator */ 516# endif 517#endif /* CONFIG_APCI_BOOT */ 518 519 find_memory(); 520 521 /* process SAL system table: */ 522 ia64_sal_init(__va(efi.sal_systab)); 523 524 ia64_setup_printk_clock(); 525 526#ifdef CONFIG_SMP 527 cpu_physical_id(0) = hard_smp_processor_id(); 528 529 cpu_set(0, cpu_sibling_map[0]); 530 cpu_set(0, cpu_core_map[0]); 531 532 check_for_logical_procs(); 533 if (smp_num_cpucores > 1) 534 printk(KERN_INFO 535 "cpu package is Multi-Core capable: number of cores=%d\n", 536 smp_num_cpucores); 537 if (smp_num_siblings > 1) 538 printk(KERN_INFO 539 "cpu package is Multi-Threading capable: number of siblings=%d\n", 540 smp_num_siblings); 541#endif 542 543 cpu_init(); /* initialize the bootstrap CPU */ 544 mmu_context_init(); /* initialize context_id bitmap */ 545 546 check_sal_cache_flush(); 547 548#ifdef CONFIG_ACPI 549 acpi_boot_init(); 550#endif 551 552#ifdef CONFIG_VT 553 if (!conswitchp) { 554# if defined(CONFIG_DUMMY_CONSOLE) 555 conswitchp = &dummy_con; 556# endif 557# if defined(CONFIG_VGA_CONSOLE) 558 /* 559 * Non-legacy systems may route legacy VGA MMIO range to system 560 * memory. vga_con probes the MMIO hole, so memory looks like 561 * a VGA device to it. The EFI memory map can tell us if it's 562 * memory so we can avoid this problem. 563 */ 564 if (efi_mem_type(0xA0000) != EFI_CONVENTIONAL_MEMORY) 565 conswitchp = &vga_con; 566# endif 567 } 568#endif 569 570 /* enable IA-64 Machine Check Abort Handling unless disabled */ 571 if (!nomca) 572 ia64_mca_init(); 573 574 platform_setup(cmdline_p); 575 paging_init(); 576} 577 578/* 579 * Display cpu info for all CPUs. 580 */ 581static int 582show_cpuinfo (struct seq_file *m, void *v) 583{ 584#ifdef CONFIG_SMP 585# define lpj c->loops_per_jiffy 586# define cpunum c->cpu 587#else 588# define lpj loops_per_jiffy 589# define cpunum 0 590#endif 591 static struct { 592 unsigned long mask; 593 const char *feature_name; 594 } feature_bits[] = { 595 { 1UL << 0, "branchlong" }, 596 { 1UL << 1, "spontaneous deferral"}, 597 { 1UL << 2, "16-byte atomic ops" } 598 }; 599 char features[128], *cp, *sep; 600 struct cpuinfo_ia64 *c = v; 601 unsigned long mask; 602 unsigned long proc_freq; 603 int i, size; 604 605 mask = c->features; 606 607 /* build the feature string: */ 608 memcpy(features, "standard", 9); 609 cp = features; 610 size = sizeof(features); 611 sep = ""; 612 for (i = 0; i < ARRAY_SIZE(feature_bits) && size > 1; ++i) { 613 if (mask & feature_bits[i].mask) { 614 cp += snprintf(cp, size, "%s%s", sep, 615 feature_bits[i].feature_name), 616 sep = ", "; 617 mask &= ~feature_bits[i].mask; 618 size = sizeof(features) - (cp - features); 619 } 620 } 621 if (mask && size > 1) { 622 /* print unknown features as a hex value */ 623 snprintf(cp, size, "%s0x%lx", sep, mask); 624 } 625 626 proc_freq = cpufreq_quick_get(cpunum); 627 if (!proc_freq) 628 proc_freq = c->proc_freq / 1000; 629 630 seq_printf(m, 631 "processor : %d\n" 632 "vendor : %s\n" 633 "arch : IA-64\n" 634 "family : %u\n" 635 "model : %u\n" 636 "model name : %s\n" 637 "revision : %u\n" 638 "archrev : %u\n" 639 "features : %s\n" 640 "cpu number : %lu\n" 641 "cpu regs : %u\n" 642 "cpu MHz : %lu.%03lu\n" 643 "itc MHz : %lu.%06lu\n" 644 "BogoMIPS : %lu.%02lu\n", 645 cpunum, c->vendor, c->family, c->model, 646 c->model_name, c->revision, c->archrev, 647 features, c->ppn, c->number, 648 proc_freq / 1000, proc_freq % 1000, 649 c->itc_freq / 1000000, c->itc_freq % 1000000, 650 lpj*HZ/500000, (lpj*HZ/5000) % 100); 651#ifdef CONFIG_SMP 652 seq_printf(m, "siblings : %u\n", cpus_weight(cpu_core_map[cpunum])); 653 if (c->threads_per_core > 1 || c->cores_per_socket > 1) 654 seq_printf(m, 655 "physical id: %u\n" 656 "core id : %u\n" 657 "thread id : %u\n", 658 c->socket_id, c->core_id, c->thread_id); 659#endif 660 seq_printf(m,"\n"); 661 662 return 0; 663} 664 665static void * 666c_start (struct seq_file *m, loff_t *pos) 667{ 668#ifdef CONFIG_SMP 669 while (*pos < NR_CPUS && !cpu_isset(*pos, cpu_online_map)) 670 ++*pos; 671#endif 672 return *pos < NR_CPUS ? cpu_data(*pos) : NULL; 673} 674 675static void * 676c_next (struct seq_file *m, void *v, loff_t *pos) 677{ 678 ++*pos; 679 return c_start(m, pos); 680} 681 682static void 683c_stop (struct seq_file *m, void *v) 684{ 685} 686 687struct seq_operations cpuinfo_op = { 688 .start = c_start, 689 .next = c_next, 690 .stop = c_stop, 691 .show = show_cpuinfo 692}; 693 694#define MAX_BRANDS 8 695static char brandname[MAX_BRANDS][128]; 696 697static char * __cpuinit 698get_model_name(__u8 family, __u8 model) 699{ 700 static int overflow; 701 char brand[128]; 702 int i; 703 704 memcpy(brand, "Unknown", 8); 705 if (ia64_pal_get_brand_info(brand)) { 706 if (family == 0x7) 707 memcpy(brand, "Merced", 7); 708 else if (family == 0x1f) switch (model) { 709 case 0: memcpy(brand, "McKinley", 9); break; 710 case 1: memcpy(brand, "Madison", 8); break; 711 case 2: memcpy(brand, "Madison up to 9M cache", 23); break; 712 } 713 } 714 for (i = 0; i < MAX_BRANDS; i++) 715 if (strcmp(brandname[i], brand) == 0) 716 return brandname[i]; 717 for (i = 0; i < MAX_BRANDS; i++) 718 if (brandname[i][0] == '\0') 719 return strcpy(brandname[i], brand); 720 if (overflow++ == 0) 721 printk(KERN_ERR 722 "%s: Table overflow. Some processor model information will be missing\n", 723 __FUNCTION__); 724 return "Unknown"; 725} 726 727static void __cpuinit 728identify_cpu (struct cpuinfo_ia64 *c) 729{ 730 union { 731 unsigned long bits[5]; 732 struct { 733 /* id 0 & 1: */ 734 char vendor[16]; 735 736 /* id 2 */ 737 u64 ppn; /* processor serial number */ 738 739 /* id 3: */ 740 unsigned number : 8; 741 unsigned revision : 8; 742 unsigned model : 8; 743 unsigned family : 8; 744 unsigned archrev : 8; 745 unsigned reserved : 24; 746 747 /* id 4: */ 748 u64 features; 749 } field; 750 } cpuid; 751 pal_vm_info_1_u_t vm1; 752 pal_vm_info_2_u_t vm2; 753 pal_status_t status; 754 unsigned long impl_va_msb = 50, phys_addr_size = 44; /* Itanium defaults */ 755 int i; 756 for (i = 0; i < 5; ++i) 757 cpuid.bits[i] = ia64_get_cpuid(i); 758 759 memcpy(c->vendor, cpuid.field.vendor, 16); 760#ifdef CONFIG_SMP 761 c->cpu = smp_processor_id(); 762 763 /* below default values will be overwritten by identify_siblings() 764 * for Multi-Threading/Multi-Core capable CPUs 765 */ 766 c->threads_per_core = c->cores_per_socket = c->num_log = 1; 767 c->socket_id = -1; 768 769 identify_siblings(c); 770#endif 771 c->ppn = cpuid.field.ppn; 772 c->number = cpuid.field.number; 773 c->revision = cpuid.field.revision; 774 c->model = cpuid.field.model; 775 c->family = cpuid.field.family; 776 c->archrev = cpuid.field.archrev; 777 c->features = cpuid.field.features; 778 c->model_name = get_model_name(c->family, c->model); 779 780 status = ia64_pal_vm_summary(&vm1, &vm2); 781 if (status == PAL_STATUS_SUCCESS) { 782 impl_va_msb = vm2.pal_vm_info_2_s.impl_va_msb; 783 phys_addr_size = vm1.pal_vm_info_1_s.phys_add_size; 784 } 785 c->unimpl_va_mask = ~((7L<<61) | ((1L << (impl_va_msb + 1)) - 1)); 786 c->unimpl_pa_mask = ~((1L<<63) | ((1L << phys_addr_size) - 1)); 787} 788 789void __init 790setup_per_cpu_areas (void) 791{ 792 /* start_kernel() requires this... */ 793#ifdef CONFIG_ACPI_HOTPLUG_CPU 794 prefill_possible_map(); 795#endif 796} 797 798/* 799 * Calculate the max. cache line size. 800 * 801 * In addition, the minimum of the i-cache stride sizes is calculated for 802 * "flush_icache_range()". 803 */ 804static void __cpuinit 805get_max_cacheline_size (void) 806{ 807 unsigned long line_size, max = 1; 808 u64 l, levels, unique_caches; 809 pal_cache_config_info_t cci; 810 s64 status; 811 812 status = ia64_pal_cache_summary(&levels, &unique_caches); 813 if (status != 0) { 814 printk(KERN_ERR "%s: ia64_pal_cache_summary() failed (status=%ld)\n", 815 __FUNCTION__, status); 816 max = SMP_CACHE_BYTES; 817 /* Safest setup for "flush_icache_range()" */ 818 ia64_i_cache_stride_shift = I_CACHE_STRIDE_SHIFT; 819 goto out; 820 } 821 822 for (l = 0; l < levels; ++l) { 823 status = ia64_pal_cache_config_info(l, /* cache_type (data_or_unified)= */ 2, 824 &cci); 825 if (status != 0) { 826 printk(KERN_ERR 827 "%s: ia64_pal_cache_config_info(l=%lu, 2) failed (status=%ld)\n", 828 __FUNCTION__, l, status); 829 max = SMP_CACHE_BYTES; 830 /* The safest setup for "flush_icache_range()" */ 831 cci.pcci_stride = I_CACHE_STRIDE_SHIFT; 832 cci.pcci_unified = 1; 833 } 834 line_size = 1 << cci.pcci_line_size; 835 if (line_size > max) 836 max = line_size; 837 if (!cci.pcci_unified) { 838 status = ia64_pal_cache_config_info(l, 839 /* cache_type (instruction)= */ 1, 840 &cci); 841 if (status != 0) { 842 printk(KERN_ERR 843 "%s: ia64_pal_cache_config_info(l=%lu, 1) failed (status=%ld)\n", 844 __FUNCTION__, l, status); 845 /* The safest setup for "flush_icache_range()" */ 846 cci.pcci_stride = I_CACHE_STRIDE_SHIFT; 847 } 848 } 849 if (cci.pcci_stride < ia64_i_cache_stride_shift) 850 ia64_i_cache_stride_shift = cci.pcci_stride; 851 } 852 out: 853 if (max > ia64_max_cacheline_size) 854 ia64_max_cacheline_size = max; 855} 856 857/* 858 * cpu_init() initializes state that is per-CPU. This function acts 859 * as a 'CPU state barrier', nothing should get across. 860 */ 861void __cpuinit 862cpu_init (void) 863{ 864 extern void __cpuinit ia64_mmu_init (void *); 865 static unsigned long max_num_phys_stacked = IA64_NUM_PHYS_STACK_REG; 866 unsigned long num_phys_stacked; 867 pal_vm_info_2_u_t vmi; 868 unsigned int max_ctx; 869 struct cpuinfo_ia64 *cpu_info; 870 void *cpu_data; 871 872 cpu_data = per_cpu_init(); 873 874 /* 875 * We set ar.k3 so that assembly code in MCA handler can compute 876 * physical addresses of per cpu variables with a simple: 877 * phys = ar.k3 + &per_cpu_var 878 */ 879 ia64_set_kr(IA64_KR_PER_CPU_DATA, 880 ia64_tpa(cpu_data) - (long) __per_cpu_start); 881 882 get_max_cacheline_size(); 883 884 /* 885 * We can't pass "local_cpu_data" to identify_cpu() because we haven't called 886 * ia64_mmu_init() yet. And we can't call ia64_mmu_init() first because it 887 * depends on the data returned by identify_cpu(). We break the dependency by 888 * accessing cpu_data() through the canonical per-CPU address. 889 */ 890 cpu_info = cpu_data + ((char *) &__ia64_per_cpu_var(cpu_info) - __per_cpu_start); 891 identify_cpu(cpu_info); 892 893#ifdef CONFIG_MCKINLEY 894 { 895# define FEATURE_SET 16 896 struct ia64_pal_retval iprv; 897 898 if (cpu_info->family == 0x1f) { 899 PAL_CALL_PHYS(iprv, PAL_PROC_GET_FEATURES, 0, FEATURE_SET, 0); 900 if ((iprv.status == 0) && (iprv.v0 & 0x80) && (iprv.v2 & 0x80)) 901 PAL_CALL_PHYS(iprv, PAL_PROC_SET_FEATURES, 902 (iprv.v1 | 0x80), FEATURE_SET, 0); 903 } 904 } 905#endif 906 907 /* Clear the stack memory reserved for pt_regs: */ 908 memset(task_pt_regs(current), 0, sizeof(struct pt_regs)); 909 910 ia64_set_kr(IA64_KR_FPU_OWNER, 0); 911 912 /* 913 * Initialize the page-table base register to a global 914 * directory with all zeroes. This ensure that we can handle 915 * TLB-misses to user address-space even before we created the 916 * first user address-space. This may happen, e.g., due to 917 * aggressive use of lfetch.fault. 918 */ 919 ia64_set_kr(IA64_KR_PT_BASE, __pa(ia64_imva(empty_zero_page))); 920 921 /* 922 * Initialize default control register to defer speculative faults except 923 * for those arising from TLB misses, which are not deferred. The 924 * kernel MUST NOT depend on a particular setting of these bits (in other words, 925 * the kernel must have recovery code for all speculative accesses). Turn on 926 * dcr.lc as per recommendation by the architecture team. Most IA-32 apps 927 * shouldn't be affected by this (moral: keep your ia32 locks aligned and you'll 928 * be fine). 929 */ 930 ia64_setreg(_IA64_REG_CR_DCR, ( IA64_DCR_DP | IA64_DCR_DK | IA64_DCR_DX | IA64_DCR_DR 931 | IA64_DCR_DA | IA64_DCR_DD | IA64_DCR_LC)); 932 atomic_inc(&init_mm.mm_count); 933 current->active_mm = &init_mm; 934 if (current->mm) 935 BUG(); 936 937 ia64_mmu_init(ia64_imva(cpu_data)); 938 ia64_mca_cpu_init(ia64_imva(cpu_data)); 939 940#ifdef CONFIG_IA32_SUPPORT 941 ia32_cpu_init(); 942#endif 943 944 /* Clear ITC to eliminate sched_clock() overflows in human time. */ 945 ia64_set_itc(0); 946 947 /* disable all local interrupt sources: */ 948 ia64_set_itv(1 << 16); 949 ia64_set_lrr0(1 << 16); 950 ia64_set_lrr1(1 << 16); 951 ia64_setreg(_IA64_REG_CR_PMV, 1 << 16); 952 ia64_setreg(_IA64_REG_CR_CMCV, 1 << 16); 953 954 /* clear TPR & XTP to enable all interrupt classes: */ 955 ia64_setreg(_IA64_REG_CR_TPR, 0); 956#ifdef CONFIG_SMP 957 normal_xtp(); 958#endif 959 960 /* set ia64_ctx.max_rid to the maximum RID that is supported by all CPUs: */ 961 if (ia64_pal_vm_summary(NULL, &vmi) == 0) 962 max_ctx = (1U << (vmi.pal_vm_info_2_s.rid_size - 3)) - 1; 963 else { 964 printk(KERN_WARNING "cpu_init: PAL VM summary failed, assuming 18 RID bits\n"); 965 max_ctx = (1U << 15) - 1; /* use architected minimum */ 966 } 967 while (max_ctx < ia64_ctx.max_ctx) { 968 unsigned int old = ia64_ctx.max_ctx; 969 if (cmpxchg(&ia64_ctx.max_ctx, old, max_ctx) == old) 970 break; 971 } 972 973 if (ia64_pal_rse_info(&num_phys_stacked, NULL) != 0) { 974 printk(KERN_WARNING "cpu_init: PAL RSE info failed; assuming 96 physical " 975 "stacked regs\n"); 976 num_phys_stacked = 96; 977 } 978 /* size of physical stacked register partition plus 8 bytes: */ 979 if (num_phys_stacked > max_num_phys_stacked) { 980 ia64_patch_phys_stack_reg(num_phys_stacked*8 + 8); 981 max_num_phys_stacked = num_phys_stacked; 982 } 983 platform_cpu_init(); 984 pm_idle = default_idle; 985} 986 987/* 988 * On SMP systems, when the scheduler does migration-cost autodetection, 989 * it needs a way to flush as much of the CPU's caches as possible. 990 */ 991void sched_cacheflush(void) 992{ 993 ia64_sal_cache_flush(3); 994} 995 996void __init 997check_bugs (void) 998{ 999 ia64_patch_mckinley_e9((unsigned long) __start___mckinley_e9_bundles, 1000 (unsigned long) __end___mckinley_e9_bundles); 1001} 1002 1003static int __init run_dmi_scan(void) 1004{ 1005 dmi_scan_machine(); 1006 return 0; 1007} 1008core_initcall(run_dmi_scan); 1009