1/* 2 * Copyright 2010 Tilera Corporation. All Rights Reserved. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public License 6 * as published by the Free Software Foundation, version 2. 7 * 8 * This program is distributed in the hope that it will be useful, but 9 * WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or 11 * NON INFRINGEMENT. See the GNU General Public License for 12 * more details. 13 */ 14 15#include <linux/sched.h> 16#include <linux/kernel.h> 17#include <linux/mmzone.h> 18#include <linux/bootmem.h> 19#include <linux/module.h> 20#include <linux/node.h> 21#include <linux/cpu.h> 22#include <linux/ioport.h> 23#include <linux/irq.h> 24#include <linux/kexec.h> 25#include <linux/pci.h> 26#include <linux/initrd.h> 27#include <linux/io.h> 28#include <linux/highmem.h> 29#include <linux/smp.h> 30#include <linux/timex.h> 31#include <asm/setup.h> 32#include <asm/sections.h> 33#include <asm/sections.h> 34#include <asm/cacheflush.h> 35#include <asm/cacheflush.h> 36#include <asm/pgalloc.h> 37#include <asm/mmu_context.h> 38#include <hv/hypervisor.h> 39#include <arch/interrupts.h> 40 41/* <linux/smp.h> doesn't provide this definition. */ 42#ifndef CONFIG_SMP 43#define setup_max_cpus 1 44#endif 45 46static inline int ABS(int x) { return x >= 0 ? x : -x; } 47 48/* Chip information */ 49char chip_model[64] __write_once; 50 51struct pglist_data node_data[MAX_NUMNODES] __read_mostly; 52EXPORT_SYMBOL(node_data); 53 54/* We only create bootmem data on node 0. */ 55static bootmem_data_t __initdata node0_bdata; 56 57/* Information on the NUMA nodes that we compute early */ 58unsigned long __cpuinitdata node_start_pfn[MAX_NUMNODES]; 59unsigned long __cpuinitdata node_end_pfn[MAX_NUMNODES]; 60unsigned long __initdata node_memmap_pfn[MAX_NUMNODES]; 61unsigned long __initdata node_percpu_pfn[MAX_NUMNODES]; 62unsigned long __initdata node_free_pfn[MAX_NUMNODES]; 63 64#ifdef CONFIG_HIGHMEM 65/* Page frame index of end of lowmem on each controller. */ 66unsigned long __cpuinitdata node_lowmem_end_pfn[MAX_NUMNODES]; 67 68/* Number of pages that can be mapped into lowmem. */ 69static unsigned long __initdata mappable_physpages; 70#endif 71 72/* Data on which physical memory controller corresponds to which NUMA node */ 73int node_controller[MAX_NUMNODES] = { [0 ... MAX_NUMNODES-1] = -1 }; 74 75#ifdef CONFIG_HIGHMEM 76/* Map information from VAs to PAs */ 77unsigned long pbase_map[1 << (32 - HPAGE_SHIFT)] 78 __write_once __attribute__((aligned(L2_CACHE_BYTES))); 79EXPORT_SYMBOL(pbase_map); 80 81/* Map information from PAs to VAs */ 82void *vbase_map[NR_PA_HIGHBIT_VALUES] 83 __write_once __attribute__((aligned(L2_CACHE_BYTES))); 84EXPORT_SYMBOL(vbase_map); 85#endif 86 87/* Node number as a function of the high PA bits */ 88int highbits_to_node[NR_PA_HIGHBIT_VALUES] __write_once; 89EXPORT_SYMBOL(highbits_to_node); 90 91static unsigned int __initdata maxmem_pfn = -1U; 92static unsigned int __initdata maxnodemem_pfn[MAX_NUMNODES] = { 93 [0 ... MAX_NUMNODES-1] = -1U 94}; 95static nodemask_t __initdata isolnodes; 96 97#ifdef CONFIG_PCI 98enum { DEFAULT_PCI_RESERVE_MB = 64 }; 99static unsigned int __initdata pci_reserve_mb = DEFAULT_PCI_RESERVE_MB; 100unsigned long __initdata pci_reserve_start_pfn = -1U; 101unsigned long __initdata pci_reserve_end_pfn = -1U; 102#endif 103 104static int __init setup_maxmem(char *str) 105{ 106 long maxmem_mb; 107 if (str == NULL || strict_strtol(str, 0, &maxmem_mb) != 0 || 108 maxmem_mb == 0) 109 return -EINVAL; 110 111 maxmem_pfn = (maxmem_mb >> (HPAGE_SHIFT - 20)) << 112 (HPAGE_SHIFT - PAGE_SHIFT); 113 pr_info("Forcing RAM used to no more than %dMB\n", 114 maxmem_pfn >> (20 - PAGE_SHIFT)); 115 return 0; 116} 117early_param("maxmem", setup_maxmem); 118 119static int __init setup_maxnodemem(char *str) 120{ 121 char *endp; 122 long maxnodemem_mb, node; 123 124 node = str ? simple_strtoul(str, &endp, 0) : INT_MAX; 125 if (node >= MAX_NUMNODES || *endp != ':' || 126 strict_strtol(endp+1, 0, &maxnodemem_mb) != 0) 127 return -EINVAL; 128 129 maxnodemem_pfn[node] = (maxnodemem_mb >> (HPAGE_SHIFT - 20)) << 130 (HPAGE_SHIFT - PAGE_SHIFT); 131 pr_info("Forcing RAM used on node %ld to no more than %dMB\n", 132 node, maxnodemem_pfn[node] >> (20 - PAGE_SHIFT)); 133 return 0; 134} 135early_param("maxnodemem", setup_maxnodemem); 136 137static int __init setup_isolnodes(char *str) 138{ 139 char buf[MAX_NUMNODES * 5]; 140 if (str == NULL || nodelist_parse(str, isolnodes) != 0) 141 return -EINVAL; 142 143 nodelist_scnprintf(buf, sizeof(buf), isolnodes); 144 pr_info("Set isolnodes value to '%s'\n", buf); 145 return 0; 146} 147early_param("isolnodes", setup_isolnodes); 148 149#ifdef CONFIG_PCI 150static int __init setup_pci_reserve(char* str) 151{ 152 unsigned long mb; 153 154 if (str == NULL || strict_strtoul(str, 0, &mb) != 0 || 155 mb > 3 * 1024) 156 return -EINVAL; 157 158 pci_reserve_mb = mb; 159 pr_info("Reserving %dMB for PCIE root complex mappings\n", 160 pci_reserve_mb); 161 return 0; 162} 163early_param("pci_reserve", setup_pci_reserve); 164#endif 165 166#ifndef __tilegx__ 167/* 168 * vmalloc=size forces the vmalloc area to be exactly 'size' bytes. 169 * This can be used to increase (or decrease) the vmalloc area. 170 */ 171static int __init parse_vmalloc(char *arg) 172{ 173 if (!arg) 174 return -EINVAL; 175 176 VMALLOC_RESERVE = (memparse(arg, &arg) + PGDIR_SIZE - 1) & PGDIR_MASK; 177 178 /* See validate_va() for more on this test. */ 179 if ((long)_VMALLOC_START >= 0) 180 early_panic("\"vmalloc=%#lx\" value too large: maximum %#lx\n", 181 VMALLOC_RESERVE, _VMALLOC_END - 0x80000000UL); 182 183 return 0; 184} 185early_param("vmalloc", parse_vmalloc); 186#endif 187 188#ifdef CONFIG_HIGHMEM 189/* 190 * Determine for each controller where its lowmem is mapped and how 191 * much of it is mapped there. On controller zero, the first few 192 * megabytes are mapped at 0xfd000000 as code, so in principle we 193 * could start our data mappings higher up, but for now we don't 194 * bother, to avoid additional confusion. 195 * 196 * One question is whether, on systems with more than 768 Mb and 197 * controllers of different sizes, to map in a proportionate amount of 198 * each one, or to try to map the same amount from each controller. 199 * (E.g. if we have three controllers with 256MB, 1GB, and 256MB 200 * respectively, do we map 256MB from each, or do we map 128 MB, 512 201 * MB, and 128 MB respectively?) For now we use a proportionate 202 * solution like the latter. 203 * 204 * The VA/PA mapping demands that we align our decisions at 16 MB 205 * boundaries so that we can rapidly convert VA to PA. 206 */ 207static void *__init setup_pa_va_mapping(void) 208{ 209 unsigned long curr_pages = 0; 210 unsigned long vaddr = PAGE_OFFSET; 211 nodemask_t highonlynodes = isolnodes; 212 int i, j; 213 214 memset(pbase_map, -1, sizeof(pbase_map)); 215 memset(vbase_map, -1, sizeof(vbase_map)); 216 217 /* Node zero cannot be isolated for LOWMEM purposes. */ 218 node_clear(0, highonlynodes); 219 220 /* Count up the number of pages on non-highonlynodes controllers. */ 221 mappable_physpages = 0; 222 for_each_online_node(i) { 223 if (!node_isset(i, highonlynodes)) 224 mappable_physpages += 225 node_end_pfn[i] - node_start_pfn[i]; 226 } 227 228 for_each_online_node(i) { 229 unsigned long start = node_start_pfn[i]; 230 unsigned long end = node_end_pfn[i]; 231 unsigned long size = end - start; 232 unsigned long vaddr_end; 233 234 if (node_isset(i, highonlynodes)) { 235 /* Mark this controller as having no lowmem. */ 236 node_lowmem_end_pfn[i] = start; 237 continue; 238 } 239 240 curr_pages += size; 241 if (mappable_physpages > MAXMEM_PFN) { 242 vaddr_end = PAGE_OFFSET + 243 (((u64)curr_pages * MAXMEM_PFN / 244 mappable_physpages) 245 << PAGE_SHIFT); 246 } else { 247 vaddr_end = PAGE_OFFSET + (curr_pages << PAGE_SHIFT); 248 } 249 for (j = 0; vaddr < vaddr_end; vaddr += HPAGE_SIZE, ++j) { 250 unsigned long this_pfn = 251 start + (j << HUGETLB_PAGE_ORDER); 252 pbase_map[vaddr >> HPAGE_SHIFT] = this_pfn; 253 if (vbase_map[__pfn_to_highbits(this_pfn)] == 254 (void *)-1) 255 vbase_map[__pfn_to_highbits(this_pfn)] = 256 (void *)(vaddr & HPAGE_MASK); 257 } 258 node_lowmem_end_pfn[i] = start + (j << HUGETLB_PAGE_ORDER); 259 BUG_ON(node_lowmem_end_pfn[i] > end); 260 } 261 262 /* Return highest address of any mapped memory. */ 263 return (void *)vaddr; 264} 265#endif /* CONFIG_HIGHMEM */ 266 267/* 268 * Register our most important memory mappings with the debug stub. 269 * 270 * This is up to 4 mappings for lowmem, one mapping per memory 271 * controller, plus one for our text segment. 272 */ 273static void __cpuinit store_permanent_mappings(void) 274{ 275 int i; 276 277 for_each_online_node(i) { 278 HV_PhysAddr pa = ((HV_PhysAddr)node_start_pfn[i]) << PAGE_SHIFT; 279#ifdef CONFIG_HIGHMEM 280 HV_PhysAddr high_mapped_pa = node_lowmem_end_pfn[i]; 281#else 282 HV_PhysAddr high_mapped_pa = node_end_pfn[i]; 283#endif 284 285 unsigned long pages = high_mapped_pa - node_start_pfn[i]; 286 HV_VirtAddr addr = (HV_VirtAddr) __va(pa); 287 hv_store_mapping(addr, pages << PAGE_SHIFT, pa); 288 } 289 290 hv_store_mapping((HV_VirtAddr)_stext, 291 (uint32_t)(_einittext - _stext), 0); 292} 293 294/* 295 * Use hv_inquire_physical() to populate node_{start,end}_pfn[] 296 * and node_online_map, doing suitable sanity-checking. 297 * Also set min_low_pfn, max_low_pfn, and max_pfn. 298 */ 299static void __init setup_memory(void) 300{ 301 int i, j; 302 int highbits_seen[NR_PA_HIGHBIT_VALUES] = { 0 }; 303#ifdef CONFIG_HIGHMEM 304 long highmem_pages; 305#endif 306#ifndef __tilegx__ 307 int cap; 308#endif 309#if defined(CONFIG_HIGHMEM) || defined(__tilegx__) 310 long lowmem_pages; 311#endif 312 313 /* We are using a char to hold the cpu_2_node[] mapping */ 314 BUG_ON(MAX_NUMNODES > 127); 315 316 /* Discover the ranges of memory available to us */ 317 for (i = 0; ; ++i) { 318 unsigned long start, size, end, highbits; 319 HV_PhysAddrRange range = hv_inquire_physical(i); 320 if (range.size == 0) 321 break; 322#ifdef CONFIG_FLATMEM 323 if (i > 0) { 324 pr_err("Can't use discontiguous PAs: %#llx..%#llx\n", 325 range.size, range.start + range.size); 326 continue; 327 } 328#endif 329#ifndef __tilegx__ 330 if ((unsigned long)range.start) { 331 pr_err("Range not at 4GB multiple: %#llx..%#llx\n", 332 range.start, range.start + range.size); 333 continue; 334 } 335#endif 336 if ((range.start & (HPAGE_SIZE-1)) != 0 || 337 (range.size & (HPAGE_SIZE-1)) != 0) { 338 unsigned long long start_pa = range.start; 339 unsigned long long orig_size = range.size; 340 range.start = (start_pa + HPAGE_SIZE - 1) & HPAGE_MASK; 341 range.size -= (range.start - start_pa); 342 range.size &= HPAGE_MASK; 343 pr_err("Range not hugepage-aligned: %#llx..%#llx:" 344 " now %#llx-%#llx\n", 345 start_pa, start_pa + orig_size, 346 range.start, range.start + range.size); 347 } 348 highbits = __pa_to_highbits(range.start); 349 if (highbits >= NR_PA_HIGHBIT_VALUES) { 350 pr_err("PA high bits too high: %#llx..%#llx\n", 351 range.start, range.start + range.size); 352 continue; 353 } 354 if (highbits_seen[highbits]) { 355 pr_err("Range overlaps in high bits: %#llx..%#llx\n", 356 range.start, range.start + range.size); 357 continue; 358 } 359 highbits_seen[highbits] = 1; 360 if (PFN_DOWN(range.size) > maxnodemem_pfn[i]) { 361 int max_size = maxnodemem_pfn[i]; 362 if (max_size > 0) { 363 pr_err("Maxnodemem reduced node %d to" 364 " %d pages\n", i, max_size); 365 range.size = PFN_PHYS(max_size); 366 } else { 367 pr_err("Maxnodemem disabled node %d\n", i); 368 continue; 369 } 370 } 371 if (num_physpages + PFN_DOWN(range.size) > maxmem_pfn) { 372 int max_size = maxmem_pfn - num_physpages; 373 if (max_size > 0) { 374 pr_err("Maxmem reduced node %d to %d pages\n", 375 i, max_size); 376 range.size = PFN_PHYS(max_size); 377 } else { 378 pr_err("Maxmem disabled node %d\n", i); 379 continue; 380 } 381 } 382 if (i >= MAX_NUMNODES) { 383 pr_err("Too many PA nodes (#%d): %#llx...%#llx\n", 384 i, range.size, range.size + range.start); 385 continue; 386 } 387 388 start = range.start >> PAGE_SHIFT; 389 size = range.size >> PAGE_SHIFT; 390 end = start + size; 391 392#ifndef __tilegx__ 393 if (((HV_PhysAddr)end << PAGE_SHIFT) != 394 (range.start + range.size)) { 395 pr_err("PAs too high to represent: %#llx..%#llx\n", 396 range.start, range.start + range.size); 397 continue; 398 } 399#endif 400#ifdef CONFIG_PCI 401 /* 402 * Blocks that overlap the pci reserved region must 403 * have enough space to hold the maximum percpu data 404 * region at the top of the range. If there isn't 405 * enough space above the reserved region, just 406 * truncate the node. 407 */ 408 if (start <= pci_reserve_start_pfn && 409 end > pci_reserve_start_pfn) { 410 unsigned int per_cpu_size = 411 __per_cpu_end - __per_cpu_start; 412 unsigned int percpu_pages = 413 NR_CPUS * (PFN_UP(per_cpu_size) >> PAGE_SHIFT); 414 if (end < pci_reserve_end_pfn + percpu_pages) { 415 end = pci_reserve_start_pfn; 416 pr_err("PCI mapping region reduced node %d to" 417 " %ld pages\n", i, end - start); 418 } 419 } 420#endif 421 422 for (j = __pfn_to_highbits(start); 423 j <= __pfn_to_highbits(end - 1); j++) 424 highbits_to_node[j] = i; 425 426 node_start_pfn[i] = start; 427 node_end_pfn[i] = end; 428 node_controller[i] = range.controller; 429 num_physpages += size; 430 max_pfn = end; 431 432 /* Mark node as online */ 433 node_set(i, node_online_map); 434 node_set(i, node_possible_map); 435 } 436 437#ifndef __tilegx__ 438 /* 439 * For 4KB pages, mem_map "struct page" data is 1% of the size 440 * of the physical memory, so can be quite big (640 MB for 441 * four 16G zones). These structures must be mapped in 442 * lowmem, and since we currently cap out at about 768 MB, 443 * it's impractical to try to use this much address space. 444 * For now, arbitrarily cap the amount of physical memory 445 * we're willing to use at 8 million pages (32GB of 4KB pages). 446 */ 447 cap = 8 * 1024 * 1024; /* 8 million pages */ 448 if (num_physpages > cap) { 449 int num_nodes = num_online_nodes(); 450 int cap_each = cap / num_nodes; 451 unsigned long dropped_pages = 0; 452 for (i = 0; i < num_nodes; ++i) { 453 int size = node_end_pfn[i] - node_start_pfn[i]; 454 if (size > cap_each) { 455 dropped_pages += (size - cap_each); 456 node_end_pfn[i] = node_start_pfn[i] + cap_each; 457 } 458 } 459 num_physpages -= dropped_pages; 460 pr_warning("Only using %ldMB memory;" 461 " ignoring %ldMB.\n", 462 num_physpages >> (20 - PAGE_SHIFT), 463 dropped_pages >> (20 - PAGE_SHIFT)); 464 pr_warning("Consider using a larger page size.\n"); 465 } 466#endif 467 468 /* Heap starts just above the last loaded address. */ 469 min_low_pfn = PFN_UP((unsigned long)_end - PAGE_OFFSET); 470 471#ifdef CONFIG_HIGHMEM 472 /* Find where we map lowmem from each controller. */ 473 high_memory = setup_pa_va_mapping(); 474 475 /* Set max_low_pfn based on what node 0 can directly address. */ 476 max_low_pfn = node_lowmem_end_pfn[0]; 477 478 lowmem_pages = (mappable_physpages > MAXMEM_PFN) ? 479 MAXMEM_PFN : mappable_physpages; 480 highmem_pages = (long) (num_physpages - lowmem_pages); 481 482 pr_notice("%ldMB HIGHMEM available.\n", 483 pages_to_mb(highmem_pages > 0 ? highmem_pages : 0)); 484 pr_notice("%ldMB LOWMEM available.\n", 485 pages_to_mb(lowmem_pages)); 486#else 487 /* Set max_low_pfn based on what node 0 can directly address. */ 488 max_low_pfn = node_end_pfn[0]; 489 490#ifndef __tilegx__ 491 if (node_end_pfn[0] > MAXMEM_PFN) { 492 pr_warning("Only using %ldMB LOWMEM.\n", 493 MAXMEM>>20); 494 pr_warning("Use a HIGHMEM enabled kernel.\n"); 495 max_low_pfn = MAXMEM_PFN; 496 max_pfn = MAXMEM_PFN; 497 num_physpages = MAXMEM_PFN; 498 node_end_pfn[0] = MAXMEM_PFN; 499 } else { 500 pr_notice("%ldMB memory available.\n", 501 pages_to_mb(node_end_pfn[0])); 502 } 503 for (i = 1; i < MAX_NUMNODES; ++i) { 504 node_start_pfn[i] = 0; 505 node_end_pfn[i] = 0; 506 } 507 high_memory = __va(node_end_pfn[0]); 508#else 509 lowmem_pages = 0; 510 for (i = 0; i < MAX_NUMNODES; ++i) { 511 int pages = node_end_pfn[i] - node_start_pfn[i]; 512 lowmem_pages += pages; 513 if (pages) 514 high_memory = pfn_to_kaddr(node_end_pfn[i]); 515 } 516 pr_notice("%ldMB memory available.\n", 517 pages_to_mb(lowmem_pages)); 518#endif 519#endif 520} 521 522static void __init setup_bootmem_allocator(void) 523{ 524 unsigned long bootmap_size, first_alloc_pfn, last_alloc_pfn; 525 526 /* Provide a node 0 bdata. */ 527 NODE_DATA(0)->bdata = &node0_bdata; 528 529#ifdef CONFIG_PCI 530 /* Don't let boot memory alias the PCI region. */ 531 last_alloc_pfn = min(max_low_pfn, pci_reserve_start_pfn); 532#else 533 last_alloc_pfn = max_low_pfn; 534#endif 535 536 /* 537 * Initialize the boot-time allocator (with low memory only): 538 * The first argument says where to put the bitmap, and the 539 * second says where the end of allocatable memory is. 540 */ 541 bootmap_size = init_bootmem(min_low_pfn, last_alloc_pfn); 542 543 /* 544 * Let the bootmem allocator use all the space we've given it 545 * except for its own bitmap. 546 */ 547 first_alloc_pfn = min_low_pfn + PFN_UP(bootmap_size); 548 if (first_alloc_pfn >= last_alloc_pfn) 549 early_panic("Not enough memory on controller 0 for bootmem\n"); 550 551 free_bootmem(PFN_PHYS(first_alloc_pfn), 552 PFN_PHYS(last_alloc_pfn - first_alloc_pfn)); 553 554#ifdef CONFIG_KEXEC 555 if (crashk_res.start != crashk_res.end) 556 reserve_bootmem(crashk_res.start, 557 crashk_res.end - crashk_res.start + 1, 0); 558#endif 559 560} 561 562void *__init alloc_remap(int nid, unsigned long size) 563{ 564 int pages = node_end_pfn[nid] - node_start_pfn[nid]; 565 void *map = pfn_to_kaddr(node_memmap_pfn[nid]); 566 BUG_ON(size != pages * sizeof(struct page)); 567 memset(map, 0, size); 568 return map; 569} 570 571static int __init percpu_size(void) 572{ 573 int size = ALIGN(__per_cpu_end - __per_cpu_start, PAGE_SIZE); 574#ifdef CONFIG_MODULES 575 if (size < PERCPU_ENOUGH_ROOM) 576 size = PERCPU_ENOUGH_ROOM; 577#endif 578 /* In several places we assume the per-cpu data fits on a huge page. */ 579 BUG_ON(kdata_huge && size > HPAGE_SIZE); 580 return size; 581} 582 583static inline unsigned long alloc_bootmem_pfn(int size, unsigned long goal) 584{ 585 void *kva = __alloc_bootmem(size, PAGE_SIZE, goal); 586 unsigned long pfn = kaddr_to_pfn(kva); 587 BUG_ON(goal && PFN_PHYS(pfn) != goal); 588 return pfn; 589} 590 591static void __init zone_sizes_init(void) 592{ 593 unsigned long zones_size[MAX_NR_ZONES] = { 0 }; 594 unsigned long node_percpu[MAX_NUMNODES] = { 0 }; 595 int size = percpu_size(); 596 int num_cpus = smp_height * smp_width; 597 int i; 598 599 for (i = 0; i < num_cpus; ++i) 600 node_percpu[cpu_to_node(i)] += size; 601 602 for_each_online_node(i) { 603 unsigned long start = node_start_pfn[i]; 604 unsigned long end = node_end_pfn[i]; 605#ifdef CONFIG_HIGHMEM 606 unsigned long lowmem_end = node_lowmem_end_pfn[i]; 607#else 608 unsigned long lowmem_end = end; 609#endif 610 int memmap_size = (end - start) * sizeof(struct page); 611 node_free_pfn[i] = start; 612 613 /* 614 * Set aside pages for per-cpu data and the mem_map array. 615 * 616 * Since the per-cpu data requires special homecaching, 617 * if we are in kdata_huge mode, we put it at the end of 618 * the lowmem region. If we're not in kdata_huge mode, 619 * we take the per-cpu pages from the bottom of the 620 * controller, since that avoids fragmenting a huge page 621 * that users might want. We always take the memmap 622 * from the bottom of the controller, since with 623 * kdata_huge that lets it be under a huge TLB entry. 624 * 625 * If the user has requested isolnodes for a controller, 626 * though, there'll be no lowmem, so we just alloc_bootmem 627 * the memmap. There will be no percpu memory either. 628 */ 629 if (__pfn_to_highbits(start) == 0) { 630 /* In low PAs, allocate via bootmem. */ 631 unsigned long goal = 0; 632 node_memmap_pfn[i] = 633 alloc_bootmem_pfn(memmap_size, goal); 634 if (kdata_huge) 635 goal = PFN_PHYS(lowmem_end) - node_percpu[i]; 636 if (node_percpu[i]) 637 node_percpu_pfn[i] = 638 alloc_bootmem_pfn(node_percpu[i], goal); 639 } else if (cpu_isset(i, isolnodes)) { 640 node_memmap_pfn[i] = alloc_bootmem_pfn(memmap_size, 0); 641 BUG_ON(node_percpu[i] != 0); 642 } else { 643 /* In high PAs, just reserve some pages. */ 644 node_memmap_pfn[i] = node_free_pfn[i]; 645 node_free_pfn[i] += PFN_UP(memmap_size); 646 if (!kdata_huge) { 647 node_percpu_pfn[i] = node_free_pfn[i]; 648 node_free_pfn[i] += PFN_UP(node_percpu[i]); 649 } else { 650 node_percpu_pfn[i] = 651 lowmem_end - PFN_UP(node_percpu[i]); 652 } 653 } 654 655#ifdef CONFIG_HIGHMEM 656 if (start > lowmem_end) { 657 zones_size[ZONE_NORMAL] = 0; 658 zones_size[ZONE_HIGHMEM] = end - start; 659 } else { 660 zones_size[ZONE_NORMAL] = lowmem_end - start; 661 zones_size[ZONE_HIGHMEM] = end - lowmem_end; 662 } 663#else 664 zones_size[ZONE_NORMAL] = end - start; 665#endif 666 667 /* 668 * Everyone shares node 0's bootmem allocator, but 669 * we use alloc_remap(), above, to put the actual 670 * struct page array on the individual controllers, 671 * which is most of the data that we actually care about. 672 * We can't place bootmem allocators on the other 673 * controllers since the bootmem allocator can only 674 * operate on 32-bit physical addresses. 675 */ 676 NODE_DATA(i)->bdata = NODE_DATA(0)->bdata; 677 678 free_area_init_node(i, zones_size, start, NULL); 679 printk(KERN_DEBUG " DMA zone: %ld per-cpu pages\n", 680 PFN_UP(node_percpu[i])); 681 682 /* Track the type of memory on each node */ 683 if (zones_size[ZONE_NORMAL]) 684 node_set_state(i, N_NORMAL_MEMORY); 685#ifdef CONFIG_HIGHMEM 686 if (end != start) 687 node_set_state(i, N_HIGH_MEMORY); 688#endif 689 690 node_set_online(i); 691 } 692} 693 694#ifdef CONFIG_NUMA 695 696/* which logical CPUs are on which nodes */ 697struct cpumask node_2_cpu_mask[MAX_NUMNODES] __write_once; 698EXPORT_SYMBOL(node_2_cpu_mask); 699 700/* which node each logical CPU is on */ 701char cpu_2_node[NR_CPUS] __write_once __attribute__((aligned(L2_CACHE_BYTES))); 702EXPORT_SYMBOL(cpu_2_node); 703 704/* Return cpu_to_node() except for cpus not yet assigned, which return -1 */ 705static int __init cpu_to_bound_node(int cpu, struct cpumask* unbound_cpus) 706{ 707 if (!cpu_possible(cpu) || cpumask_test_cpu(cpu, unbound_cpus)) 708 return -1; 709 else 710 return cpu_to_node(cpu); 711} 712 713/* Return number of immediately-adjacent tiles sharing the same NUMA node. */ 714static int __init node_neighbors(int node, int cpu, 715 struct cpumask *unbound_cpus) 716{ 717 int neighbors = 0; 718 int w = smp_width; 719 int h = smp_height; 720 int x = cpu % w; 721 int y = cpu / w; 722 if (x > 0 && cpu_to_bound_node(cpu-1, unbound_cpus) == node) 723 ++neighbors; 724 if (x < w-1 && cpu_to_bound_node(cpu+1, unbound_cpus) == node) 725 ++neighbors; 726 if (y > 0 && cpu_to_bound_node(cpu-w, unbound_cpus) == node) 727 ++neighbors; 728 if (y < h-1 && cpu_to_bound_node(cpu+w, unbound_cpus) == node) 729 ++neighbors; 730 return neighbors; 731} 732 733static void __init setup_numa_mapping(void) 734{ 735 int distance[MAX_NUMNODES][NR_CPUS]; 736 HV_Coord coord; 737 int cpu, node, cpus, i, x, y; 738 int num_nodes = num_online_nodes(); 739 struct cpumask unbound_cpus; 740 nodemask_t default_nodes; 741 742 cpumask_clear(&unbound_cpus); 743 744 /* Get set of nodes we will use for defaults */ 745 nodes_andnot(default_nodes, node_online_map, isolnodes); 746 if (nodes_empty(default_nodes)) { 747 BUG_ON(!node_isset(0, node_online_map)); 748 pr_err("Forcing NUMA node zero available as a default node\n"); 749 node_set(0, default_nodes); 750 } 751 752 /* Populate the distance[] array */ 753 memset(distance, -1, sizeof(distance)); 754 cpu = 0; 755 for (coord.y = 0; coord.y < smp_height; ++coord.y) { 756 for (coord.x = 0; coord.x < smp_width; 757 ++coord.x, ++cpu) { 758 BUG_ON(cpu >= nr_cpu_ids); 759 if (!cpu_possible(cpu)) { 760 cpu_2_node[cpu] = -1; 761 continue; 762 } 763 for_each_node_mask(node, default_nodes) { 764 HV_MemoryControllerInfo info = 765 hv_inquire_memory_controller( 766 coord, node_controller[node]); 767 distance[node][cpu] = 768 ABS(info.coord.x) + ABS(info.coord.y); 769 } 770 cpumask_set_cpu(cpu, &unbound_cpus); 771 } 772 } 773 cpus = cpu; 774 775 /* 776 * Round-robin through the NUMA nodes until all the cpus are 777 * assigned. We could be more clever here (e.g. create four 778 * sorted linked lists on the same set of cpu nodes, and pull 779 * off them in round-robin sequence, removing from all four 780 * lists each time) but given the relatively small numbers 781 * involved, O(n^2) seem OK for a one-time cost. 782 */ 783 node = first_node(default_nodes); 784 while (!cpumask_empty(&unbound_cpus)) { 785 int best_cpu = -1; 786 int best_distance = INT_MAX; 787 for (cpu = 0; cpu < cpus; ++cpu) { 788 if (cpumask_test_cpu(cpu, &unbound_cpus)) { 789 /* 790 * Compute metric, which is how much 791 * closer the cpu is to this memory 792 * controller than the others, shifted 793 * up, and then the number of 794 * neighbors already in the node as an 795 * epsilon adjustment to try to keep 796 * the nodes compact. 797 */ 798 int d = distance[node][cpu] * num_nodes; 799 for_each_node_mask(i, default_nodes) { 800 if (i != node) 801 d -= distance[i][cpu]; 802 } 803 d *= 8; /* allow space for epsilon */ 804 d -= node_neighbors(node, cpu, &unbound_cpus); 805 if (d < best_distance) { 806 best_cpu = cpu; 807 best_distance = d; 808 } 809 } 810 } 811 BUG_ON(best_cpu < 0); 812 cpumask_set_cpu(best_cpu, &node_2_cpu_mask[node]); 813 cpu_2_node[best_cpu] = node; 814 cpumask_clear_cpu(best_cpu, &unbound_cpus); 815 node = next_node(node, default_nodes); 816 if (node == MAX_NUMNODES) 817 node = first_node(default_nodes); 818 } 819 820 /* Print out node assignments and set defaults for disabled cpus */ 821 cpu = 0; 822 for (y = 0; y < smp_height; ++y) { 823 printk(KERN_DEBUG "NUMA cpu-to-node row %d:", y); 824 for (x = 0; x < smp_width; ++x, ++cpu) { 825 if (cpu_to_node(cpu) < 0) { 826 pr_cont(" -"); 827 cpu_2_node[cpu] = first_node(default_nodes); 828 } else { 829 pr_cont(" %d", cpu_to_node(cpu)); 830 } 831 } 832 pr_cont("\n"); 833 } 834} 835 836static struct cpu cpu_devices[NR_CPUS]; 837 838static int __init topology_init(void) 839{ 840 int i; 841 842 for_each_online_node(i) 843 register_one_node(i); 844 845 for_each_present_cpu(i) 846 register_cpu(&cpu_devices[i], i); 847 848 return 0; 849} 850 851subsys_initcall(topology_init); 852 853#else /* !CONFIG_NUMA */ 854 855#define setup_numa_mapping() do { } while (0) 856 857#endif /* CONFIG_NUMA */ 858 859/** 860 * setup_cpu() - Do all necessary per-cpu, tile-specific initialization. 861 * @boot: Is this the boot cpu? 862 * 863 * Called from setup_arch() on the boot cpu, or online_secondary(). 864 */ 865void __cpuinit setup_cpu(int boot) 866{ 867 /* The boot cpu sets up its permanent mappings much earlier. */ 868 if (!boot) 869 store_permanent_mappings(); 870 871 /* Allow asynchronous TLB interrupts. */ 872#if CHIP_HAS_TILE_DMA() 873 raw_local_irq_unmask(INT_DMATLB_MISS); 874 raw_local_irq_unmask(INT_DMATLB_ACCESS); 875#endif 876#if CHIP_HAS_SN_PROC() 877 raw_local_irq_unmask(INT_SNITLB_MISS); 878#endif 879 880 /* 881 * Allow user access to many generic SPRs, like the cycle 882 * counter, PASS/FAIL/DONE, INTERRUPT_CRITICAL_SECTION, etc. 883 */ 884 __insn_mtspr(SPR_MPL_WORLD_ACCESS_SET_0, 1); 885 886#if CHIP_HAS_SN() 887 /* Static network is not restricted. */ 888 __insn_mtspr(SPR_MPL_SN_ACCESS_SET_0, 1); 889#endif 890#if CHIP_HAS_SN_PROC() 891 __insn_mtspr(SPR_MPL_SN_NOTIFY_SET_0, 1); 892 __insn_mtspr(SPR_MPL_SN_CPL_SET_0, 1); 893#endif 894 895 /* 896 * Set the MPL for interrupt control 0 to user level. 897 * This includes access to the SYSTEM_SAVE and EX_CONTEXT SPRs, 898 * as well as the PL 0 interrupt mask. 899 */ 900 __insn_mtspr(SPR_MPL_INTCTRL_0_SET_0, 1); 901 902 /* Initialize IRQ support for this cpu. */ 903 setup_irq_regs(); 904 905#ifdef CONFIG_HARDWALL 906 /* Reset the network state on this cpu. */ 907 reset_network_state(); 908#endif 909} 910 911static int __initdata set_initramfs_file; 912static char __initdata initramfs_file[128] = "initramfs.cpio.gz"; 913 914static int __init setup_initramfs_file(char *str) 915{ 916 if (str == NULL) 917 return -EINVAL; 918 strncpy(initramfs_file, str, sizeof(initramfs_file) - 1); 919 set_initramfs_file = 1; 920 921 return 0; 922} 923early_param("initramfs_file", setup_initramfs_file); 924 925/* 926 * We look for an additional "initramfs.cpio.gz" file in the hvfs. 927 * If there is one, we allocate some memory for it and it will be 928 * unpacked to the initramfs after any built-in initramfs_data. 929 */ 930static void __init load_hv_initrd(void) 931{ 932 HV_FS_StatInfo stat; 933 int fd, rc; 934 void *initrd; 935 936 fd = hv_fs_findfile((HV_VirtAddr) initramfs_file); 937 if (fd == HV_ENOENT) { 938 if (set_initramfs_file) 939 pr_warning("No such hvfs initramfs file '%s'\n", 940 initramfs_file); 941 return; 942 } 943 BUG_ON(fd < 0); 944 stat = hv_fs_fstat(fd); 945 BUG_ON(stat.size < 0); 946 if (stat.flags & HV_FS_ISDIR) { 947 pr_warning("Ignoring hvfs file '%s': it's a directory.\n", 948 initramfs_file); 949 return; 950 } 951 initrd = alloc_bootmem_pages(stat.size); 952 rc = hv_fs_pread(fd, (HV_VirtAddr) initrd, stat.size, 0); 953 if (rc != stat.size) { 954 pr_err("Error reading %d bytes from hvfs file '%s': %d\n", 955 stat.size, initramfs_file, rc); 956 free_initrd_mem((unsigned long) initrd, stat.size); 957 return; 958 } 959 initrd_start = (unsigned long) initrd; 960 initrd_end = initrd_start + stat.size; 961} 962 963void __init free_initrd_mem(unsigned long begin, unsigned long end) 964{ 965 free_bootmem(__pa(begin), end - begin); 966} 967 968static void __init validate_hv(void) 969{ 970 /* 971 * It may already be too late, but let's check our built-in 972 * configuration against what the hypervisor is providing. 973 */ 974 unsigned long glue_size = hv_sysconf(HV_SYSCONF_GLUE_SIZE); 975 int hv_page_size = hv_sysconf(HV_SYSCONF_PAGE_SIZE_SMALL); 976 int hv_hpage_size = hv_sysconf(HV_SYSCONF_PAGE_SIZE_LARGE); 977 HV_ASIDRange asid_range; 978 979#ifndef CONFIG_SMP 980 HV_Topology topology = hv_inquire_topology(); 981 BUG_ON(topology.coord.x != 0 || topology.coord.y != 0); 982 if (topology.width != 1 || topology.height != 1) { 983 pr_warning("Warning: booting UP kernel on %dx%d grid;" 984 " will ignore all but first tile.\n", 985 topology.width, topology.height); 986 } 987#endif 988 989 if (PAGE_OFFSET + HV_GLUE_START_CPA + glue_size > (unsigned long)_text) 990 early_panic("Hypervisor glue size %ld is too big!\n", 991 glue_size); 992 if (hv_page_size != PAGE_SIZE) 993 early_panic("Hypervisor page size %#x != our %#lx\n", 994 hv_page_size, PAGE_SIZE); 995 if (hv_hpage_size != HPAGE_SIZE) 996 early_panic("Hypervisor huge page size %#x != our %#lx\n", 997 hv_hpage_size, HPAGE_SIZE); 998 999#ifdef CONFIG_SMP 1000 /* 1001 * Some hypervisor APIs take a pointer to a bitmap array 1002 * whose size is at least the number of cpus on the chip. 1003 * We use a struct cpumask for this, so it must be big enough. 1004 */ 1005 if ((smp_height * smp_width) > nr_cpu_ids) 1006 early_panic("Hypervisor %d x %d grid too big for Linux" 1007 " NR_CPUS %d\n", smp_height, smp_width, 1008 nr_cpu_ids); 1009#endif 1010 1011 /* 1012 * Check that we're using allowed ASIDs, and initialize the 1013 * various asid variables to their appropriate initial states. 1014 */ 1015 asid_range = hv_inquire_asid(0); 1016 __get_cpu_var(current_asid) = min_asid = asid_range.start; 1017 max_asid = asid_range.start + asid_range.size - 1; 1018 1019 if (hv_confstr(HV_CONFSTR_CHIP_MODEL, (HV_VirtAddr)chip_model, 1020 sizeof(chip_model)) < 0) { 1021 pr_err("Warning: HV_CONFSTR_CHIP_MODEL not available\n"); 1022 strlcpy(chip_model, "unknown", sizeof(chip_model)); 1023 } 1024} 1025 1026static void __init validate_va(void) 1027{ 1028#ifndef __tilegx__ 1029 /* 1030 * Similarly, make sure we're only using allowed VAs. 1031 * We assume we can contiguously use MEM_USER_INTRPT .. MEM_HV_INTRPT, 1032 * and 0 .. KERNEL_HIGH_VADDR. 1033 * In addition, make sure we CAN'T use the end of memory, since 1034 * we use the last chunk of each pgd for the pgd_list. 1035 */ 1036 int i, fc_fd_ok = 0; 1037 unsigned long max_va = 0; 1038 unsigned long list_va = 1039 ((PGD_LIST_OFFSET / sizeof(pgd_t)) << PGDIR_SHIFT); 1040 1041 for (i = 0; ; ++i) { 1042 HV_VirtAddrRange range = hv_inquire_virtual(i); 1043 if (range.size == 0) 1044 break; 1045 if (range.start <= MEM_USER_INTRPT && 1046 range.start + range.size >= MEM_HV_INTRPT) 1047 fc_fd_ok = 1; 1048 if (range.start == 0) 1049 max_va = range.size; 1050 BUG_ON(range.start + range.size > list_va); 1051 } 1052 if (!fc_fd_ok) 1053 early_panic("Hypervisor not configured for VAs 0xfc/0xfd\n"); 1054 if (max_va == 0) 1055 early_panic("Hypervisor not configured for low VAs\n"); 1056 if (max_va < KERNEL_HIGH_VADDR) 1057 early_panic("Hypervisor max VA %#lx smaller than %#lx\n", 1058 max_va, KERNEL_HIGH_VADDR); 1059 1060 /* Kernel PCs must have their high bit set; see intvec.S. */ 1061 if ((long)VMALLOC_START >= 0) 1062 early_panic( 1063 "Linux VMALLOC region below the 2GB line (%#lx)!\n" 1064 "Reconfigure the kernel with fewer NR_HUGE_VMAPS\n" 1065 "or smaller VMALLOC_RESERVE.\n", 1066 VMALLOC_START); 1067#endif 1068} 1069 1070/* 1071 * cpu_lotar_map lists all the cpus that are valid for the supervisor 1072 * to cache data on at a page level, i.e. what cpus can be placed in 1073 * the LOTAR field of a PTE. It is equivalent to the set of possible 1074 * cpus plus any other cpus that are willing to share their cache. 1075 * It is set by hv_inquire_tiles(HV_INQ_TILES_LOTAR). 1076 */ 1077struct cpumask __write_once cpu_lotar_map; 1078EXPORT_SYMBOL(cpu_lotar_map); 1079 1080#if CHIP_HAS_CBOX_HOME_MAP() 1081/* 1082 * hash_for_home_map lists all the tiles that hash-for-home data 1083 * will be cached on. Note that this may includes tiles that are not 1084 * valid for this supervisor to use otherwise (e.g. if a hypervisor 1085 * device is being shared between multiple supervisors). 1086 * It is set by hv_inquire_tiles(HV_INQ_TILES_HFH_CACHE). 1087 */ 1088struct cpumask hash_for_home_map; 1089EXPORT_SYMBOL(hash_for_home_map); 1090#endif 1091 1092/* 1093 * cpu_cacheable_map lists all the cpus whose caches the hypervisor can 1094 * flush on our behalf. It is set to cpu_possible_map OR'ed with 1095 * hash_for_home_map, and it is what should be passed to 1096 * hv_flush_remote() to flush all caches. Note that if there are 1097 * dedicated hypervisor driver tiles that have authorized use of their 1098 * cache, those tiles will only appear in cpu_lotar_map, NOT in 1099 * cpu_cacheable_map, as they are a special case. 1100 */ 1101struct cpumask __write_once cpu_cacheable_map; 1102EXPORT_SYMBOL(cpu_cacheable_map); 1103 1104static __initdata struct cpumask disabled_map; 1105 1106static int __init disabled_cpus(char *str) 1107{ 1108 int boot_cpu = smp_processor_id(); 1109 1110 if (str == NULL || cpulist_parse_crop(str, &disabled_map) != 0) 1111 return -EINVAL; 1112 if (cpumask_test_cpu(boot_cpu, &disabled_map)) { 1113 pr_err("disabled_cpus: can't disable boot cpu %d\n", boot_cpu); 1114 cpumask_clear_cpu(boot_cpu, &disabled_map); 1115 } 1116 return 0; 1117} 1118 1119early_param("disabled_cpus", disabled_cpus); 1120 1121void __init print_disabled_cpus(void) 1122{ 1123 if (!cpumask_empty(&disabled_map)) { 1124 char buf[100]; 1125 cpulist_scnprintf(buf, sizeof(buf), &disabled_map); 1126 pr_info("CPUs not available for Linux: %s\n", buf); 1127 } 1128} 1129 1130static void __init setup_cpu_maps(void) 1131{ 1132 struct cpumask hv_disabled_map, cpu_possible_init; 1133 int boot_cpu = smp_processor_id(); 1134 int cpus, i, rc; 1135 1136 /* Learn which cpus are allowed by the hypervisor. */ 1137 rc = hv_inquire_tiles(HV_INQ_TILES_AVAIL, 1138 (HV_VirtAddr) cpumask_bits(&cpu_possible_init), 1139 sizeof(cpu_cacheable_map)); 1140 if (rc < 0) 1141 early_panic("hv_inquire_tiles(AVAIL) failed: rc %d\n", rc); 1142 if (!cpumask_test_cpu(boot_cpu, &cpu_possible_init)) 1143 early_panic("Boot CPU %d disabled by hypervisor!\n", boot_cpu); 1144 1145 /* Compute the cpus disabled by the hvconfig file. */ 1146 cpumask_complement(&hv_disabled_map, &cpu_possible_init); 1147 1148 /* Include them with the cpus disabled by "disabled_cpus". */ 1149 cpumask_or(&disabled_map, &disabled_map, &hv_disabled_map); 1150 1151 /* 1152 * Disable every cpu after "setup_max_cpus". But don't mark 1153 * as disabled the cpus that are outside of our initial rectangle, 1154 * since that turns out to be confusing. 1155 */ 1156 cpus = 1; /* this cpu */ 1157 cpumask_set_cpu(boot_cpu, &disabled_map); /* ignore this cpu */ 1158 for (i = 0; cpus < setup_max_cpus; ++i) 1159 if (!cpumask_test_cpu(i, &disabled_map)) 1160 ++cpus; 1161 for (; i < smp_height * smp_width; ++i) 1162 cpumask_set_cpu(i, &disabled_map); 1163 cpumask_clear_cpu(boot_cpu, &disabled_map); /* reset this cpu */ 1164 for (i = smp_height * smp_width; i < NR_CPUS; ++i) 1165 cpumask_clear_cpu(i, &disabled_map); 1166 1167 /* 1168 * Setup cpu_possible map as every cpu allocated to us, minus 1169 * the results of any "disabled_cpus" settings. 1170 */ 1171 cpumask_andnot(&cpu_possible_init, &cpu_possible_init, &disabled_map); 1172 init_cpu_possible(&cpu_possible_init); 1173 1174 /* Learn which cpus are valid for LOTAR caching. */ 1175 rc = hv_inquire_tiles(HV_INQ_TILES_LOTAR, 1176 (HV_VirtAddr) cpumask_bits(&cpu_lotar_map), 1177 sizeof(cpu_lotar_map)); 1178 if (rc < 0) { 1179 pr_err("warning: no HV_INQ_TILES_LOTAR; using AVAIL\n"); 1180 cpu_lotar_map = cpu_possible_map; 1181 } 1182 1183#if CHIP_HAS_CBOX_HOME_MAP() 1184 /* Retrieve set of CPUs used for hash-for-home caching */ 1185 rc = hv_inquire_tiles(HV_INQ_TILES_HFH_CACHE, 1186 (HV_VirtAddr) hash_for_home_map.bits, 1187 sizeof(hash_for_home_map)); 1188 if (rc < 0) 1189 early_panic("hv_inquire_tiles(HFH_CACHE) failed: rc %d\n", rc); 1190 cpumask_or(&cpu_cacheable_map, &cpu_possible_map, &hash_for_home_map); 1191#else 1192 cpu_cacheable_map = cpu_possible_map; 1193#endif 1194} 1195 1196 1197static int __init dataplane(char *str) 1198{ 1199 pr_warning("WARNING: dataplane support disabled in this kernel\n"); 1200 return 0; 1201} 1202 1203early_param("dataplane", dataplane); 1204 1205#ifdef CONFIG_CMDLINE_BOOL 1206static char __initdata builtin_cmdline[COMMAND_LINE_SIZE] = CONFIG_CMDLINE; 1207#endif 1208 1209void __init setup_arch(char **cmdline_p) 1210{ 1211 int len; 1212 1213#if defined(CONFIG_CMDLINE_BOOL) && defined(CONFIG_CMDLINE_OVERRIDE) 1214 len = hv_get_command_line((HV_VirtAddr) boot_command_line, 1215 COMMAND_LINE_SIZE); 1216 if (boot_command_line[0]) 1217 pr_warning("WARNING: ignoring dynamic command line \"%s\"\n", 1218 boot_command_line); 1219 strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE); 1220#else 1221 char *hv_cmdline; 1222#if defined(CONFIG_CMDLINE_BOOL) 1223 if (builtin_cmdline[0]) { 1224 int builtin_len = strlcpy(boot_command_line, builtin_cmdline, 1225 COMMAND_LINE_SIZE); 1226 if (builtin_len < COMMAND_LINE_SIZE-1) 1227 boot_command_line[builtin_len++] = ' '; 1228 hv_cmdline = &boot_command_line[builtin_len]; 1229 len = COMMAND_LINE_SIZE - builtin_len; 1230 } else 1231#endif 1232 { 1233 hv_cmdline = boot_command_line; 1234 len = COMMAND_LINE_SIZE; 1235 } 1236 len = hv_get_command_line((HV_VirtAddr) hv_cmdline, len); 1237 if (len < 0 || len > COMMAND_LINE_SIZE) 1238 early_panic("hv_get_command_line failed: %d\n", len); 1239#endif 1240 1241 *cmdline_p = boot_command_line; 1242 1243 /* Set disabled_map and setup_max_cpus very early */ 1244 parse_early_param(); 1245 1246 /* Make sure the kernel is compatible with the hypervisor. */ 1247 validate_hv(); 1248 validate_va(); 1249 1250 setup_cpu_maps(); 1251 1252 1253#ifdef CONFIG_PCI 1254 /* 1255 * Initialize the PCI structures. This is done before memory 1256 * setup so that we know whether or not a pci_reserve region 1257 * is necessary. 1258 */ 1259 if (tile_pci_init() == 0) 1260 pci_reserve_mb = 0; 1261 1262 /* PCI systems reserve a region just below 4GB for mapping iomem. */ 1263 pci_reserve_end_pfn = (1 << (32 - PAGE_SHIFT)); 1264 pci_reserve_start_pfn = pci_reserve_end_pfn - 1265 (pci_reserve_mb << (20 - PAGE_SHIFT)); 1266#endif 1267 1268 init_mm.start_code = (unsigned long) _text; 1269 init_mm.end_code = (unsigned long) _etext; 1270 init_mm.end_data = (unsigned long) _edata; 1271 init_mm.brk = (unsigned long) _end; 1272 1273 setup_memory(); 1274 store_permanent_mappings(); 1275 setup_bootmem_allocator(); 1276 1277 /* 1278 * NOTE: before this point _nobody_ is allowed to allocate 1279 * any memory using the bootmem allocator. 1280 */ 1281 1282 paging_init(); 1283 setup_numa_mapping(); 1284 zone_sizes_init(); 1285 set_page_homes(); 1286 setup_cpu(1); 1287 setup_clock(); 1288 load_hv_initrd(); 1289} 1290 1291 1292/* 1293 * Set up per-cpu memory. 1294 */ 1295 1296unsigned long __per_cpu_offset[NR_CPUS] __write_once; 1297EXPORT_SYMBOL(__per_cpu_offset); 1298 1299static size_t __initdata pfn_offset[MAX_NUMNODES] = { 0 }; 1300static unsigned long __initdata percpu_pfn[NR_CPUS] = { 0 }; 1301 1302/* 1303 * As the percpu code allocates pages, we return the pages from the 1304 * end of the node for the specified cpu. 1305 */ 1306static void *__init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align) 1307{ 1308 int nid = cpu_to_node(cpu); 1309 unsigned long pfn = node_percpu_pfn[nid] + pfn_offset[nid]; 1310 1311 BUG_ON(size % PAGE_SIZE != 0); 1312 pfn_offset[nid] += size / PAGE_SIZE; 1313 if (percpu_pfn[cpu] == 0) 1314 percpu_pfn[cpu] = pfn; 1315 return pfn_to_kaddr(pfn); 1316} 1317 1318/* 1319 * Pages reserved for percpu memory are not freeable, and in any case we are 1320 * on a short path to panic() in setup_per_cpu_area() at this point anyway. 1321 */ 1322static void __init pcpu_fc_free(void *ptr, size_t size) 1323{ 1324} 1325 1326/* 1327 * Set up vmalloc page tables using bootmem for the percpu code. 1328 */ 1329static void __init pcpu_fc_populate_pte(unsigned long addr) 1330{ 1331 pgd_t *pgd; 1332 pud_t *pud; 1333 pmd_t *pmd; 1334 pte_t *pte; 1335 1336 BUG_ON(pgd_addr_invalid(addr)); 1337 1338 pgd = swapper_pg_dir + pgd_index(addr); 1339 pud = pud_offset(pgd, addr); 1340 BUG_ON(!pud_present(*pud)); 1341 pmd = pmd_offset(pud, addr); 1342 if (pmd_present(*pmd)) { 1343 BUG_ON(pmd_huge_page(*pmd)); 1344 } else { 1345 pte = __alloc_bootmem(L2_KERNEL_PGTABLE_SIZE, 1346 HV_PAGE_TABLE_ALIGN, 0); 1347 pmd_populate_kernel(&init_mm, pmd, pte); 1348 } 1349} 1350 1351void __init setup_per_cpu_areas(void) 1352{ 1353 struct page *pg; 1354 unsigned long delta, pfn, lowmem_va; 1355 unsigned long size = percpu_size(); 1356 char *ptr; 1357 int rc, cpu, i; 1358 1359 rc = pcpu_page_first_chunk(PERCPU_MODULE_RESERVE, pcpu_fc_alloc, 1360 pcpu_fc_free, pcpu_fc_populate_pte); 1361 if (rc < 0) 1362 panic("Cannot initialize percpu area (err=%d)", rc); 1363 1364 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start; 1365 for_each_possible_cpu(cpu) { 1366 __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu]; 1367 1368 /* finv the copy out of cache so we can change homecache */ 1369 ptr = pcpu_base_addr + pcpu_unit_offsets[cpu]; 1370 __finv_buffer(ptr, size); 1371 pfn = percpu_pfn[cpu]; 1372 1373 /* Rewrite the page tables to cache on that cpu */ 1374 pg = pfn_to_page(pfn); 1375 for (i = 0; i < size; i += PAGE_SIZE, ++pfn, ++pg) { 1376 1377 /* Update the vmalloc mapping and page home. */ 1378 pte_t *ptep = 1379 virt_to_pte(NULL, (unsigned long)ptr + i); 1380 pte_t pte = *ptep; 1381 BUG_ON(pfn != pte_pfn(pte)); 1382 pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_TILE_L3); 1383 pte = set_remote_cache_cpu(pte, cpu); 1384 set_pte(ptep, pte); 1385 1386 /* Update the lowmem mapping for consistency. */ 1387 lowmem_va = (unsigned long)pfn_to_kaddr(pfn); 1388 ptep = virt_to_pte(NULL, lowmem_va); 1389 if (pte_huge(*ptep)) { 1390 printk(KERN_DEBUG "early shatter of huge page" 1391 " at %#lx\n", lowmem_va); 1392 shatter_pmd((pmd_t *)ptep); 1393 ptep = virt_to_pte(NULL, lowmem_va); 1394 BUG_ON(pte_huge(*ptep)); 1395 } 1396 BUG_ON(pfn != pte_pfn(*ptep)); 1397 set_pte(ptep, pte); 1398 } 1399 } 1400 1401 /* Set our thread pointer appropriately. */ 1402 set_my_cpu_offset(__per_cpu_offset[smp_processor_id()]); 1403 1404 /* Make sure the finv's have completed. */ 1405 mb_incoherent(); 1406 1407 /* Flush the TLB so we reference it properly from here on out. */ 1408 local_flush_tlb_all(); 1409} 1410 1411static struct resource data_resource = { 1412 .name = "Kernel data", 1413 .start = 0, 1414 .end = 0, 1415 .flags = IORESOURCE_BUSY | IORESOURCE_MEM 1416}; 1417 1418static struct resource code_resource = { 1419 .name = "Kernel code", 1420 .start = 0, 1421 .end = 0, 1422 .flags = IORESOURCE_BUSY | IORESOURCE_MEM 1423}; 1424 1425/* 1426 * We reserve all resources above 4GB so that PCI won't try to put 1427 * mappings above 4GB; the standard allows that for some devices but 1428 * the probing code trunates values to 32 bits. 1429 */ 1430#ifdef CONFIG_PCI 1431static struct resource* __init 1432insert_non_bus_resource(void) 1433{ 1434 struct resource *res = 1435 kzalloc(sizeof(struct resource), GFP_ATOMIC); 1436 res->name = "Non-Bus Physical Address Space"; 1437 res->start = (1ULL << 32); 1438 res->end = -1LL; 1439 res->flags = IORESOURCE_BUSY | IORESOURCE_MEM; 1440 if (insert_resource(&iomem_resource, res)) { 1441 kfree(res); 1442 return NULL; 1443 } 1444 return res; 1445} 1446#endif 1447 1448static struct resource* __init 1449insert_ram_resource(u64 start_pfn, u64 end_pfn) 1450{ 1451 struct resource *res = 1452 kzalloc(sizeof(struct resource), GFP_ATOMIC); 1453 res->name = "System RAM"; 1454 res->start = start_pfn << PAGE_SHIFT; 1455 res->end = (end_pfn << PAGE_SHIFT) - 1; 1456 res->flags = IORESOURCE_BUSY | IORESOURCE_MEM; 1457 if (insert_resource(&iomem_resource, res)) { 1458 kfree(res); 1459 return NULL; 1460 } 1461 return res; 1462} 1463 1464/* 1465 * Request address space for all standard resources 1466 * 1467 * If the system includes PCI root complex drivers, we need to create 1468 * a window just below 4GB where PCI BARs can be mapped. 1469 */ 1470static int __init request_standard_resources(void) 1471{ 1472 int i; 1473 enum { CODE_DELTA = MEM_SV_INTRPT - PAGE_OFFSET }; 1474 1475 iomem_resource.end = -1LL; 1476#ifdef CONFIG_PCI 1477 insert_non_bus_resource(); 1478#endif 1479 1480 for_each_online_node(i) { 1481 u64 start_pfn = node_start_pfn[i]; 1482 u64 end_pfn = node_end_pfn[i]; 1483 1484#ifdef CONFIG_PCI 1485 if (start_pfn <= pci_reserve_start_pfn && 1486 end_pfn > pci_reserve_start_pfn) { 1487 if (end_pfn > pci_reserve_end_pfn) 1488 insert_ram_resource(pci_reserve_end_pfn, 1489 end_pfn); 1490 end_pfn = pci_reserve_start_pfn; 1491 } 1492#endif 1493 insert_ram_resource(start_pfn, end_pfn); 1494 } 1495 1496 code_resource.start = __pa(_text - CODE_DELTA); 1497 code_resource.end = __pa(_etext - CODE_DELTA)-1; 1498 data_resource.start = __pa(_sdata); 1499 data_resource.end = __pa(_end)-1; 1500 1501 insert_resource(&iomem_resource, &code_resource); 1502 insert_resource(&iomem_resource, &data_resource); 1503 1504#ifdef CONFIG_KEXEC 1505 insert_resource(&iomem_resource, &crashk_res); 1506#endif 1507 1508 return 0; 1509} 1510 1511subsys_initcall(request_standard_resources); 1512