1/* 2 * Procedures for creating, accessing and interpreting the device tree. 3 * 4 * Paul Mackerras August 1996. 5 * Copyright (C) 1996-2005 Paul Mackerras. 6 * 7 * Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner. 8 * {engebret|bergner}@us.ibm.com 9 * 10 * This program is free software; you can redistribute it and/or 11 * modify it under the terms of the GNU General Public License 12 * as published by the Free Software Foundation; either version 13 * 2 of the License, or (at your option) any later version. 14 */ 15 16#undef DEBUG 17 18#include <stdarg.h> 19#include <linux/kernel.h> 20#include <linux/string.h> 21#include <linux/init.h> 22#include <linux/threads.h> 23#include <linux/spinlock.h> 24#include <linux/types.h> 25#include <linux/pci.h> 26#include <linux/stringify.h> 27#include <linux/delay.h> 28#include <linux/initrd.h> 29#include <linux/bitops.h> 30#include <linux/module.h> 31#include <linux/kexec.h> 32#include <linux/debugfs.h> 33#include <linux/irq.h> 34 35#include <asm/prom.h> 36#include <asm/rtas.h> 37#include <asm/lmb.h> 38#include <asm/page.h> 39#include <asm/processor.h> 40#include <asm/irq.h> 41#include <asm/io.h> 42#include <asm/kdump.h> 43#include <asm/smp.h> 44#include <asm/system.h> 45#include <asm/mmu.h> 46#include <asm/pgtable.h> 47#include <asm/pci.h> 48#include <asm/iommu.h> 49#include <asm/btext.h> 50#include <asm/sections.h> 51#include <asm/machdep.h> 52#include <asm/pSeries_reconfig.h> 53#include <asm/pci-bridge.h> 54#include <asm/kexec.h> 55 56#ifdef DEBUG 57#define DBG(fmt...) printk(KERN_ERR fmt) 58#else 59#define DBG(fmt...) 60#endif 61 62 63static int __initdata dt_root_addr_cells; 64static int __initdata dt_root_size_cells; 65 66#ifdef CONFIG_PPC64 67int __initdata iommu_is_off; 68int __initdata iommu_force_on; 69unsigned long tce_alloc_start, tce_alloc_end; 70#endif 71 72typedef u32 cell_t; 73 74struct boot_param_header *initial_boot_params; 75 76static struct device_node *allnodes = NULL; 77 78/* use when traversing tree through the allnext, child, sibling, 79 * or parent members of struct device_node. 80 */ 81static DEFINE_RWLOCK(devtree_lock); 82 83/* export that to outside world */ 84struct device_node *of_chosen; 85 86static inline char *find_flat_dt_string(u32 offset) 87{ 88 return ((char *)initial_boot_params) + 89 initial_boot_params->off_dt_strings + offset; 90} 91 92/** 93 * This function is used to scan the flattened device-tree, it is 94 * used to extract the memory informations at boot before we can 95 * unflatten the tree 96 */ 97int __init of_scan_flat_dt(int (*it)(unsigned long node, 98 const char *uname, int depth, 99 void *data), 100 void *data) 101{ 102 unsigned long p = ((unsigned long)initial_boot_params) + 103 initial_boot_params->off_dt_struct; 104 int rc = 0; 105 int depth = -1; 106 107 do { 108 u32 tag = *((u32 *)p); 109 char *pathp; 110 111 p += 4; 112 if (tag == OF_DT_END_NODE) { 113 depth --; 114 continue; 115 } 116 if (tag == OF_DT_NOP) 117 continue; 118 if (tag == OF_DT_END) 119 break; 120 if (tag == OF_DT_PROP) { 121 u32 sz = *((u32 *)p); 122 p += 8; 123 if (initial_boot_params->version < 0x10) 124 p = _ALIGN(p, sz >= 8 ? 8 : 4); 125 p += sz; 126 p = _ALIGN(p, 4); 127 continue; 128 } 129 if (tag != OF_DT_BEGIN_NODE) { 130 printk(KERN_WARNING "Invalid tag %x scanning flattened" 131 " device tree !\n", tag); 132 return -EINVAL; 133 } 134 depth++; 135 pathp = (char *)p; 136 p = _ALIGN(p + strlen(pathp) + 1, 4); 137 if ((*pathp) == '/') { 138 char *lp, *np; 139 for (lp = NULL, np = pathp; *np; np++) 140 if ((*np) == '/') 141 lp = np+1; 142 if (lp != NULL) 143 pathp = lp; 144 } 145 rc = it(p, pathp, depth, data); 146 if (rc != 0) 147 break; 148 } while(1); 149 150 return rc; 151} 152 153unsigned long __init of_get_flat_dt_root(void) 154{ 155 unsigned long p = ((unsigned long)initial_boot_params) + 156 initial_boot_params->off_dt_struct; 157 158 while(*((u32 *)p) == OF_DT_NOP) 159 p += 4; 160 BUG_ON (*((u32 *)p) != OF_DT_BEGIN_NODE); 161 p += 4; 162 return _ALIGN(p + strlen((char *)p) + 1, 4); 163} 164 165/** 166 * This function can be used within scan_flattened_dt callback to get 167 * access to properties 168 */ 169void* __init of_get_flat_dt_prop(unsigned long node, const char *name, 170 unsigned long *size) 171{ 172 unsigned long p = node; 173 174 do { 175 u32 tag = *((u32 *)p); 176 u32 sz, noff; 177 const char *nstr; 178 179 p += 4; 180 if (tag == OF_DT_NOP) 181 continue; 182 if (tag != OF_DT_PROP) 183 return NULL; 184 185 sz = *((u32 *)p); 186 noff = *((u32 *)(p + 4)); 187 p += 8; 188 if (initial_boot_params->version < 0x10) 189 p = _ALIGN(p, sz >= 8 ? 8 : 4); 190 191 nstr = find_flat_dt_string(noff); 192 if (nstr == NULL) { 193 printk(KERN_WARNING "Can't find property index" 194 " name !\n"); 195 return NULL; 196 } 197 if (strcmp(name, nstr) == 0) { 198 if (size) 199 *size = sz; 200 return (void *)p; 201 } 202 p += sz; 203 p = _ALIGN(p, 4); 204 } while(1); 205} 206 207int __init of_flat_dt_is_compatible(unsigned long node, const char *compat) 208{ 209 const char* cp; 210 unsigned long cplen, l; 211 212 cp = of_get_flat_dt_prop(node, "compatible", &cplen); 213 if (cp == NULL) 214 return 0; 215 while (cplen > 0) { 216 if (strncasecmp(cp, compat, strlen(compat)) == 0) 217 return 1; 218 l = strlen(cp) + 1; 219 cp += l; 220 cplen -= l; 221 } 222 223 return 0; 224} 225 226static void *__init unflatten_dt_alloc(unsigned long *mem, unsigned long size, 227 unsigned long align) 228{ 229 void *res; 230 231 *mem = _ALIGN(*mem, align); 232 res = (void *)*mem; 233 *mem += size; 234 235 return res; 236} 237 238static unsigned long __init unflatten_dt_node(unsigned long mem, 239 unsigned long *p, 240 struct device_node *dad, 241 struct device_node ***allnextpp, 242 unsigned long fpsize) 243{ 244 struct device_node *np; 245 struct property *pp, **prev_pp = NULL; 246 char *pathp; 247 u32 tag; 248 unsigned int l, allocl; 249 int has_name = 0; 250 int new_format = 0; 251 252 tag = *((u32 *)(*p)); 253 if (tag != OF_DT_BEGIN_NODE) { 254 printk("Weird tag at start of node: %x\n", tag); 255 return mem; 256 } 257 *p += 4; 258 pathp = (char *)*p; 259 l = allocl = strlen(pathp) + 1; 260 *p = _ALIGN(*p + l, 4); 261 262 /* version 0x10 has a more compact unit name here instead of the full 263 * path. we accumulate the full path size using "fpsize", we'll rebuild 264 * it later. We detect this because the first character of the name is 265 * not '/'. 266 */ 267 if ((*pathp) != '/') { 268 new_format = 1; 269 if (fpsize == 0) { 270 /* root node: special case. fpsize accounts for path 271 * plus terminating zero. root node only has '/', so 272 * fpsize should be 2, but we want to avoid the first 273 * level nodes to have two '/' so we use fpsize 1 here 274 */ 275 fpsize = 1; 276 allocl = 2; 277 } else { 278 /* account for '/' and path size minus terminal 0 279 * already in 'l' 280 */ 281 fpsize += l; 282 allocl = fpsize; 283 } 284 } 285 286 287 np = unflatten_dt_alloc(&mem, sizeof(struct device_node) + allocl, 288 __alignof__(struct device_node)); 289 if (allnextpp) { 290 memset(np, 0, sizeof(*np)); 291 np->full_name = ((char*)np) + sizeof(struct device_node); 292 if (new_format) { 293 char *p = np->full_name; 294 /* rebuild full path for new format */ 295 if (dad && dad->parent) { 296 strcpy(p, dad->full_name); 297#ifdef DEBUG 298 if ((strlen(p) + l + 1) != allocl) { 299 DBG("%s: p: %d, l: %d, a: %d\n", 300 pathp, (int)strlen(p), l, allocl); 301 } 302#endif 303 p += strlen(p); 304 } 305 *(p++) = '/'; 306 memcpy(p, pathp, l); 307 } else 308 memcpy(np->full_name, pathp, l); 309 prev_pp = &np->properties; 310 **allnextpp = np; 311 *allnextpp = &np->allnext; 312 if (dad != NULL) { 313 np->parent = dad; 314 /* we temporarily use the next field as `last_child'*/ 315 if (dad->next == 0) 316 dad->child = np; 317 else 318 dad->next->sibling = np; 319 dad->next = np; 320 } 321 kref_init(&np->kref); 322 } 323 while(1) { 324 u32 sz, noff; 325 char *pname; 326 327 tag = *((u32 *)(*p)); 328 if (tag == OF_DT_NOP) { 329 *p += 4; 330 continue; 331 } 332 if (tag != OF_DT_PROP) 333 break; 334 *p += 4; 335 sz = *((u32 *)(*p)); 336 noff = *((u32 *)((*p) + 4)); 337 *p += 8; 338 if (initial_boot_params->version < 0x10) 339 *p = _ALIGN(*p, sz >= 8 ? 8 : 4); 340 341 pname = find_flat_dt_string(noff); 342 if (pname == NULL) { 343 printk("Can't find property name in list !\n"); 344 break; 345 } 346 if (strcmp(pname, "name") == 0) 347 has_name = 1; 348 l = strlen(pname) + 1; 349 pp = unflatten_dt_alloc(&mem, sizeof(struct property), 350 __alignof__(struct property)); 351 if (allnextpp) { 352 if (strcmp(pname, "linux,phandle") == 0) { 353 np->node = *((u32 *)*p); 354 if (np->linux_phandle == 0) 355 np->linux_phandle = np->node; 356 } 357 if (strcmp(pname, "ibm,phandle") == 0) 358 np->linux_phandle = *((u32 *)*p); 359 pp->name = pname; 360 pp->length = sz; 361 pp->value = (void *)*p; 362 *prev_pp = pp; 363 prev_pp = &pp->next; 364 } 365 *p = _ALIGN((*p) + sz, 4); 366 } 367 /* with version 0x10 we may not have the name property, recreate 368 * it here from the unit name if absent 369 */ 370 if (!has_name) { 371 char *p = pathp, *ps = pathp, *pa = NULL; 372 int sz; 373 374 while (*p) { 375 if ((*p) == '@') 376 pa = p; 377 if ((*p) == '/') 378 ps = p + 1; 379 p++; 380 } 381 if (pa < ps) 382 pa = p; 383 sz = (pa - ps) + 1; 384 pp = unflatten_dt_alloc(&mem, sizeof(struct property) + sz, 385 __alignof__(struct property)); 386 if (allnextpp) { 387 pp->name = "name"; 388 pp->length = sz; 389 pp->value = pp + 1; 390 *prev_pp = pp; 391 prev_pp = &pp->next; 392 memcpy(pp->value, ps, sz - 1); 393 ((char *)pp->value)[sz - 1] = 0; 394 DBG("fixed up name for %s -> %s\n", pathp, 395 (char *)pp->value); 396 } 397 } 398 if (allnextpp) { 399 *prev_pp = NULL; 400 np->name = of_get_property(np, "name", NULL); 401 np->type = of_get_property(np, "device_type", NULL); 402 403 if (!np->name) 404 np->name = "<NULL>"; 405 if (!np->type) 406 np->type = "<NULL>"; 407 } 408 while (tag == OF_DT_BEGIN_NODE) { 409 mem = unflatten_dt_node(mem, p, np, allnextpp, fpsize); 410 tag = *((u32 *)(*p)); 411 } 412 if (tag != OF_DT_END_NODE) { 413 printk("Weird tag at end of node: %x\n", tag); 414 return mem; 415 } 416 *p += 4; 417 return mem; 418} 419 420static int __init early_parse_mem(char *p) 421{ 422 if (!p) 423 return 1; 424 425 memory_limit = PAGE_ALIGN(memparse(p, &p)); 426 DBG("memory limit = 0x%lx\n", memory_limit); 427 428 return 0; 429} 430early_param("mem", early_parse_mem); 431 432/* 433 * The device tree may be allocated below our memory limit, or inside the 434 * crash kernel region for kdump. If so, move it out now. 435 */ 436static void move_device_tree(void) 437{ 438 unsigned long start, size; 439 void *p; 440 441 DBG("-> move_device_tree\n"); 442 443 start = __pa(initial_boot_params); 444 size = initial_boot_params->totalsize; 445 446 if ((memory_limit && (start + size) > memory_limit) || 447 overlaps_crashkernel(start, size)) { 448 p = __va(lmb_alloc_base(size, PAGE_SIZE, lmb.rmo_size)); 449 memcpy(p, initial_boot_params, size); 450 initial_boot_params = (struct boot_param_header *)p; 451 DBG("Moved device tree to 0x%p\n", p); 452 } 453 454 DBG("<- move_device_tree\n"); 455} 456 457/** 458 * unflattens the device-tree passed by the firmware, creating the 459 * tree of struct device_node. It also fills the "name" and "type" 460 * pointers of the nodes so the normal device-tree walking functions 461 * can be used (this used to be done by finish_device_tree) 462 */ 463void __init unflatten_device_tree(void) 464{ 465 unsigned long start, mem, size; 466 struct device_node **allnextp = &allnodes; 467 468 DBG(" -> unflatten_device_tree()\n"); 469 470 /* First pass, scan for size */ 471 start = ((unsigned long)initial_boot_params) + 472 initial_boot_params->off_dt_struct; 473 size = unflatten_dt_node(0, &start, NULL, NULL, 0); 474 size = (size | 3) + 1; 475 476 DBG(" size is %lx, allocating...\n", size); 477 478 /* Allocate memory for the expanded device tree */ 479 mem = lmb_alloc(size + 4, __alignof__(struct device_node)); 480 mem = (unsigned long) __va(mem); 481 482 ((u32 *)mem)[size / 4] = 0xdeadbeef; 483 484 DBG(" unflattening %lx...\n", mem); 485 486 /* Second pass, do actual unflattening */ 487 start = ((unsigned long)initial_boot_params) + 488 initial_boot_params->off_dt_struct; 489 unflatten_dt_node(mem, &start, NULL, &allnextp, 0); 490 if (*((u32 *)start) != OF_DT_END) 491 printk(KERN_WARNING "Weird tag at end of tree: %08x\n", *((u32 *)start)); 492 if (((u32 *)mem)[size / 4] != 0xdeadbeef) 493 printk(KERN_WARNING "End of tree marker overwritten: %08x\n", 494 ((u32 *)mem)[size / 4] ); 495 *allnextp = NULL; 496 497 /* Get pointer to OF "/chosen" node for use everywhere */ 498 of_chosen = of_find_node_by_path("/chosen"); 499 if (of_chosen == NULL) 500 of_chosen = of_find_node_by_path("/chosen@0"); 501 502 DBG(" <- unflatten_device_tree()\n"); 503} 504 505/* 506 * ibm,pa-features is a per-cpu property that contains a string of 507 * attribute descriptors, each of which has a 2 byte header plus up 508 * to 254 bytes worth of processor attribute bits. First header 509 * byte specifies the number of bytes following the header. 510 * Second header byte is an "attribute-specifier" type, of which 511 * zero is the only currently-defined value. 512 * Implementation: Pass in the byte and bit offset for the feature 513 * that we are interested in. The function will return -1 if the 514 * pa-features property is missing, or a 1/0 to indicate if the feature 515 * is supported/not supported. Note that the bit numbers are 516 * big-endian to match the definition in PAPR. 517 */ 518static struct ibm_pa_feature { 519 unsigned long cpu_features; /* CPU_FTR_xxx bit */ 520 unsigned int cpu_user_ftrs; /* PPC_FEATURE_xxx bit */ 521 unsigned char pabyte; /* byte number in ibm,pa-features */ 522 unsigned char pabit; /* bit number (big-endian) */ 523 unsigned char invert; /* if 1, pa bit set => clear feature */ 524} ibm_pa_features[] __initdata = { 525 {0, PPC_FEATURE_HAS_MMU, 0, 0, 0}, 526 {0, PPC_FEATURE_HAS_FPU, 0, 1, 0}, 527 {CPU_FTR_SLB, 0, 0, 2, 0}, 528 {CPU_FTR_CTRL, 0, 0, 3, 0}, 529 {CPU_FTR_NOEXECUTE, 0, 0, 6, 0}, 530 {CPU_FTR_NODSISRALIGN, 0, 1, 1, 1}, 531 {CPU_FTR_REAL_LE, PPC_FEATURE_TRUE_LE, 5, 0, 0}, 532}; 533 534static void __init scan_features(unsigned long node, unsigned char *ftrs, 535 unsigned long tablelen, 536 struct ibm_pa_feature *fp, 537 unsigned long ft_size) 538{ 539 unsigned long i, len, bit; 540 541 /* find descriptor with type == 0 */ 542 for (;;) { 543 if (tablelen < 3) 544 return; 545 len = 2 + ftrs[0]; 546 if (tablelen < len) 547 return; /* descriptor 0 not found */ 548 if (ftrs[1] == 0) 549 break; 550 tablelen -= len; 551 ftrs += len; 552 } 553 554 /* loop over bits we know about */ 555 for (i = 0; i < ft_size; ++i, ++fp) { 556 if (fp->pabyte >= ftrs[0]) 557 continue; 558 bit = (ftrs[2 + fp->pabyte] >> (7 - fp->pabit)) & 1; 559 if (bit ^ fp->invert) { 560 cur_cpu_spec->cpu_features |= fp->cpu_features; 561 cur_cpu_spec->cpu_user_features |= fp->cpu_user_ftrs; 562 } else { 563 cur_cpu_spec->cpu_features &= ~fp->cpu_features; 564 cur_cpu_spec->cpu_user_features &= ~fp->cpu_user_ftrs; 565 } 566 } 567} 568 569static void __init check_cpu_pa_features(unsigned long node) 570{ 571 unsigned char *pa_ftrs; 572 unsigned long tablelen; 573 574 pa_ftrs = of_get_flat_dt_prop(node, "ibm,pa-features", &tablelen); 575 if (pa_ftrs == NULL) 576 return; 577 578 scan_features(node, pa_ftrs, tablelen, 579 ibm_pa_features, ARRAY_SIZE(ibm_pa_features)); 580} 581 582static struct feature_property { 583 const char *name; 584 u32 min_value; 585 unsigned long cpu_feature; 586 unsigned long cpu_user_ftr; 587} feature_properties[] __initdata = { 588#ifdef CONFIG_ALTIVEC 589 {"altivec", 0, CPU_FTR_ALTIVEC, PPC_FEATURE_HAS_ALTIVEC}, 590 {"ibm,vmx", 1, CPU_FTR_ALTIVEC, PPC_FEATURE_HAS_ALTIVEC}, 591#endif /* CONFIG_ALTIVEC */ 592#ifdef CONFIG_PPC64 593 {"ibm,dfp", 1, 0, PPC_FEATURE_HAS_DFP}, 594 {"ibm,purr", 1, CPU_FTR_PURR, 0}, 595 {"ibm,spurr", 1, CPU_FTR_SPURR, 0}, 596#endif /* CONFIG_PPC64 */ 597}; 598 599static void __init check_cpu_feature_properties(unsigned long node) 600{ 601 unsigned long i; 602 struct feature_property *fp = feature_properties; 603 const u32 *prop; 604 605 for (i = 0; i < ARRAY_SIZE(feature_properties); ++i, ++fp) { 606 prop = of_get_flat_dt_prop(node, fp->name, NULL); 607 if (prop && *prop >= fp->min_value) { 608 cur_cpu_spec->cpu_features |= fp->cpu_feature; 609 cur_cpu_spec->cpu_user_features |= fp->cpu_user_ftr; 610 } 611 } 612} 613 614static int __init early_init_dt_scan_cpus(unsigned long node, 615 const char *uname, int depth, 616 void *data) 617{ 618 static int logical_cpuid = 0; 619 char *type = of_get_flat_dt_prop(node, "device_type", NULL); 620 const u32 *prop; 621 const u32 *intserv; 622 int i, nthreads; 623 unsigned long len; 624 int found = 0; 625 626 /* We are scanning "cpu" nodes only */ 627 if (type == NULL || strcmp(type, "cpu") != 0) 628 return 0; 629 630 /* Get physical cpuid */ 631 intserv = of_get_flat_dt_prop(node, "ibm,ppc-interrupt-server#s", &len); 632 if (intserv) { 633 nthreads = len / sizeof(int); 634 } else { 635 intserv = of_get_flat_dt_prop(node, "reg", NULL); 636 nthreads = 1; 637 } 638 639 /* 640 * Now see if any of these threads match our boot cpu. 641 * NOTE: This must match the parsing done in smp_setup_cpu_maps. 642 */ 643 for (i = 0; i < nthreads; i++) { 644 /* 645 * version 2 of the kexec param format adds the phys cpuid of 646 * booted proc. 647 */ 648 if (initial_boot_params && initial_boot_params->version >= 2) { 649 if (intserv[i] == 650 initial_boot_params->boot_cpuid_phys) { 651 found = 1; 652 break; 653 } 654 } else { 655 /* 656 * Check if it's the boot-cpu, set it's hw index now, 657 * unfortunately this format did not support booting 658 * off secondary threads. 659 */ 660 if (of_get_flat_dt_prop(node, 661 "linux,boot-cpu", NULL) != NULL) { 662 found = 1; 663 break; 664 } 665 } 666 667#ifdef CONFIG_SMP 668 /* logical cpu id is always 0 on UP kernels */ 669 logical_cpuid++; 670#endif 671 } 672 673 if (found) { 674 DBG("boot cpu: logical %d physical %d\n", logical_cpuid, 675 intserv[i]); 676 boot_cpuid = logical_cpuid; 677 set_hard_smp_processor_id(boot_cpuid, intserv[i]); 678 679 /* 680 * PAPR defines "logical" PVR values for cpus that 681 * meet various levels of the architecture: 682 * 0x0f000001 Architecture version 2.04 683 * 0x0f000002 Architecture version 2.05 684 * If the cpu-version property in the cpu node contains 685 * such a value, we call identify_cpu again with the 686 * logical PVR value in order to use the cpu feature 687 * bits appropriate for the architecture level. 688 * 689 * A POWER6 partition in "POWER6 architected" mode 690 * uses the 0x0f000002 PVR value; in POWER5+ mode 691 * it uses 0x0f000001. 692 */ 693 prop = of_get_flat_dt_prop(node, "cpu-version", NULL); 694 if (prop && (*prop & 0xff000000) == 0x0f000000) 695 identify_cpu(0, *prop); 696 } 697 698 check_cpu_feature_properties(node); 699 check_cpu_pa_features(node); 700 701#ifdef CONFIG_PPC_PSERIES 702 if (nthreads > 1) 703 cur_cpu_spec->cpu_features |= CPU_FTR_SMT; 704 else 705 cur_cpu_spec->cpu_features &= ~CPU_FTR_SMT; 706#endif 707 708 return 0; 709} 710 711#ifdef CONFIG_BLK_DEV_INITRD 712static void __init early_init_dt_check_for_initrd(unsigned long node) 713{ 714 unsigned long l; 715 u32 *prop; 716 717 DBG("Looking for initrd properties... "); 718 719 prop = of_get_flat_dt_prop(node, "linux,initrd-start", &l); 720 if (prop) { 721 initrd_start = (unsigned long)__va(of_read_ulong(prop, l/4)); 722 723 prop = of_get_flat_dt_prop(node, "linux,initrd-end", &l); 724 if (prop) { 725 initrd_end = (unsigned long) 726 __va(of_read_ulong(prop, l/4)); 727 initrd_below_start_ok = 1; 728 } else { 729 initrd_start = 0; 730 } 731 } 732 733 DBG("initrd_start=0x%lx initrd_end=0x%lx\n", initrd_start, initrd_end); 734} 735#else 736static inline void early_init_dt_check_for_initrd(unsigned long node) 737{ 738} 739#endif /* CONFIG_BLK_DEV_INITRD */ 740 741static int __init early_init_dt_scan_chosen(unsigned long node, 742 const char *uname, int depth, void *data) 743{ 744 unsigned long *lprop; 745 unsigned long l; 746 char *p; 747 748 DBG("search \"chosen\", depth: %d, uname: %s\n", depth, uname); 749 750 if (depth != 1 || 751 (strcmp(uname, "chosen") != 0 && strcmp(uname, "chosen@0") != 0)) 752 return 0; 753 754#ifdef CONFIG_PPC64 755 /* check if iommu is forced on or off */ 756 if (of_get_flat_dt_prop(node, "linux,iommu-off", NULL) != NULL) 757 iommu_is_off = 1; 758 if (of_get_flat_dt_prop(node, "linux,iommu-force-on", NULL) != NULL) 759 iommu_force_on = 1; 760#endif 761 762 /* mem=x on the command line is the preferred mechanism */ 763 lprop = of_get_flat_dt_prop(node, "linux,memory-limit", NULL); 764 if (lprop) 765 memory_limit = *lprop; 766 767#ifdef CONFIG_PPC64 768 lprop = of_get_flat_dt_prop(node, "linux,tce-alloc-start", NULL); 769 if (lprop) 770 tce_alloc_start = *lprop; 771 lprop = of_get_flat_dt_prop(node, "linux,tce-alloc-end", NULL); 772 if (lprop) 773 tce_alloc_end = *lprop; 774#endif 775 776#ifdef CONFIG_KEXEC 777 lprop = (u64*)of_get_flat_dt_prop(node, "linux,crashkernel-base", NULL); 778 if (lprop) 779 crashk_res.start = *lprop; 780 781 lprop = (u64*)of_get_flat_dt_prop(node, "linux,crashkernel-size", NULL); 782 if (lprop) 783 crashk_res.end = crashk_res.start + *lprop - 1; 784#endif 785 786 early_init_dt_check_for_initrd(node); 787 788 /* Retreive command line */ 789 p = of_get_flat_dt_prop(node, "bootargs", &l); 790 if (p != NULL && l > 0) 791 strlcpy(cmd_line, p, min((int)l, COMMAND_LINE_SIZE)); 792 793#ifdef CONFIG_CMDLINE 794 if (p == NULL || l == 0 || (l == 1 && (*p) == 0)) 795 strlcpy(cmd_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE); 796#endif /* CONFIG_CMDLINE */ 797 798 DBG("Command line is: %s\n", cmd_line); 799 800 /* break now */ 801 return 1; 802} 803 804static int __init early_init_dt_scan_root(unsigned long node, 805 const char *uname, int depth, void *data) 806{ 807 u32 *prop; 808 809 if (depth != 0) 810 return 0; 811 812 prop = of_get_flat_dt_prop(node, "#size-cells", NULL); 813 dt_root_size_cells = (prop == NULL) ? 1 : *prop; 814 DBG("dt_root_size_cells = %x\n", dt_root_size_cells); 815 816 prop = of_get_flat_dt_prop(node, "#address-cells", NULL); 817 dt_root_addr_cells = (prop == NULL) ? 2 : *prop; 818 DBG("dt_root_addr_cells = %x\n", dt_root_addr_cells); 819 820 /* break now */ 821 return 1; 822} 823 824static unsigned long __init dt_mem_next_cell(int s, cell_t **cellp) 825{ 826 cell_t *p = *cellp; 827 828 *cellp = p + s; 829 return of_read_ulong(p, s); 830} 831 832#ifdef CONFIG_PPC_PSERIES 833/* 834 * Interpret the ibm,dynamic-memory property in the 835 * /ibm,dynamic-reconfiguration-memory node. 836 * This contains a list of memory blocks along with NUMA affinity 837 * information. 838 */ 839static int __init early_init_dt_scan_drconf_memory(unsigned long node) 840{ 841 cell_t *dm, *ls; 842 unsigned long l, n; 843 unsigned long base, size, lmb_size, flags; 844 845 ls = (cell_t *)of_get_flat_dt_prop(node, "ibm,lmb-size", &l); 846 if (ls == NULL || l < dt_root_size_cells * sizeof(cell_t)) 847 return 0; 848 lmb_size = dt_mem_next_cell(dt_root_size_cells, &ls); 849 850 dm = (cell_t *)of_get_flat_dt_prop(node, "ibm,dynamic-memory", &l); 851 if (dm == NULL || l < sizeof(cell_t)) 852 return 0; 853 854 n = *dm++; /* number of entries */ 855 if (l < (n * (dt_root_addr_cells + 4) + 1) * sizeof(cell_t)) 856 return 0; 857 858 for (; n != 0; --n) { 859 base = dt_mem_next_cell(dt_root_addr_cells, &dm); 860 flags = dm[3]; 861 /* skip DRC index, pad, assoc. list index, flags */ 862 dm += 4; 863 /* skip this block if the reserved bit is set in flags (0x80) 864 or if the block is not assigned to this partition (0x8) */ 865 if ((flags & 0x80) || !(flags & 0x8)) 866 continue; 867 size = lmb_size; 868 if (iommu_is_off) { 869 if (base >= 0x80000000ul) 870 continue; 871 if ((base + size) > 0x80000000ul) 872 size = 0x80000000ul - base; 873 } 874 lmb_add(base, size); 875 } 876 lmb_dump_all(); 877 return 0; 878} 879#else 880#define early_init_dt_scan_drconf_memory(node) 0 881#endif /* CONFIG_PPC_PSERIES */ 882 883static int __init early_init_dt_scan_memory(unsigned long node, 884 const char *uname, int depth, void *data) 885{ 886 char *type = of_get_flat_dt_prop(node, "device_type", NULL); 887 cell_t *reg, *endp; 888 unsigned long l; 889 890 /* Look for the ibm,dynamic-reconfiguration-memory node */ 891 if (depth == 1 && 892 strcmp(uname, "ibm,dynamic-reconfiguration-memory") == 0) 893 return early_init_dt_scan_drconf_memory(node); 894 895 /* We are scanning "memory" nodes only */ 896 if (type == NULL) { 897 /* 898 * The longtrail doesn't have a device_type on the 899 * /memory node, so look for the node called /memory@0. 900 */ 901 if (depth != 1 || strcmp(uname, "memory@0") != 0) 902 return 0; 903 } else if (strcmp(type, "memory") != 0) 904 return 0; 905 906 reg = (cell_t *)of_get_flat_dt_prop(node, "linux,usable-memory", &l); 907 if (reg == NULL) 908 reg = (cell_t *)of_get_flat_dt_prop(node, "reg", &l); 909 if (reg == NULL) 910 return 0; 911 912 endp = reg + (l / sizeof(cell_t)); 913 914 DBG("memory scan node %s, reg size %ld, data: %x %x %x %x,\n", 915 uname, l, reg[0], reg[1], reg[2], reg[3]); 916 917 while ((endp - reg) >= (dt_root_addr_cells + dt_root_size_cells)) { 918 unsigned long base, size; 919 920 base = dt_mem_next_cell(dt_root_addr_cells, ®); 921 size = dt_mem_next_cell(dt_root_size_cells, ®); 922 923 if (size == 0) 924 continue; 925 DBG(" - %lx , %lx\n", base, size); 926#ifdef CONFIG_PPC64 927 if (iommu_is_off) { 928 if (base >= 0x80000000ul) 929 continue; 930 if ((base + size) > 0x80000000ul) 931 size = 0x80000000ul - base; 932 } 933#endif 934 lmb_add(base, size); 935 } 936 return 0; 937} 938 939static void __init early_reserve_mem(void) 940{ 941 u64 base, size; 942 u64 *reserve_map; 943 unsigned long self_base; 944 unsigned long self_size; 945 946 reserve_map = (u64 *)(((unsigned long)initial_boot_params) + 947 initial_boot_params->off_mem_rsvmap); 948 949 /* before we do anything, lets reserve the dt blob */ 950 self_base = __pa((unsigned long)initial_boot_params); 951 self_size = initial_boot_params->totalsize; 952 lmb_reserve(self_base, self_size); 953 954#ifdef CONFIG_BLK_DEV_INITRD 955 /* then reserve the initrd, if any */ 956 if (initrd_start && (initrd_end > initrd_start)) 957 lmb_reserve(__pa(initrd_start), initrd_end - initrd_start); 958#endif /* CONFIG_BLK_DEV_INITRD */ 959 960#ifdef CONFIG_PPC32 961 /* 962 * Handle the case where we might be booting from an old kexec 963 * image that setup the mem_rsvmap as pairs of 32-bit values 964 */ 965 if (*reserve_map > 0xffffffffull) { 966 u32 base_32, size_32; 967 u32 *reserve_map_32 = (u32 *)reserve_map; 968 969 while (1) { 970 base_32 = *(reserve_map_32++); 971 size_32 = *(reserve_map_32++); 972 if (size_32 == 0) 973 break; 974 /* skip if the reservation is for the blob */ 975 if (base_32 == self_base && size_32 == self_size) 976 continue; 977 DBG("reserving: %x -> %x\n", base_32, size_32); 978 lmb_reserve(base_32, size_32); 979 } 980 return; 981 } 982#endif 983 while (1) { 984 base = *(reserve_map++); 985 size = *(reserve_map++); 986 if (size == 0) 987 break; 988 DBG("reserving: %llx -> %llx\n", base, size); 989 lmb_reserve(base, size); 990 } 991 992} 993 994void __init early_init_devtree(void *params) 995{ 996 DBG(" -> early_init_devtree()\n"); 997 998 /* Setup flat device-tree pointer */ 999 initial_boot_params = params; 1000 1001#ifdef CONFIG_PPC_RTAS 1002 /* Some machines might need RTAS info for debugging, grab it now. */ 1003 of_scan_flat_dt(early_init_dt_scan_rtas, NULL); 1004#endif 1005 1006 /* Retrieve various informations from the /chosen node of the 1007 * device-tree, including the platform type, initrd location and 1008 * size, TCE reserve, and more ... 1009 */ 1010 of_scan_flat_dt(early_init_dt_scan_chosen, NULL); 1011 1012 /* Scan memory nodes and rebuild LMBs */ 1013 lmb_init(); 1014 of_scan_flat_dt(early_init_dt_scan_root, NULL); 1015 of_scan_flat_dt(early_init_dt_scan_memory, NULL); 1016 1017 /* Save command line for /proc/cmdline and then parse parameters */ 1018 strlcpy(boot_command_line, cmd_line, COMMAND_LINE_SIZE); 1019 parse_early_param(); 1020 1021 /* Reserve LMB regions used by kernel, initrd, dt, etc... */ 1022 lmb_reserve(PHYSICAL_START, __pa(klimit) - PHYSICAL_START); 1023 reserve_kdump_trampoline(); 1024 reserve_crashkernel(); 1025 early_reserve_mem(); 1026 1027 lmb_enforce_memory_limit(memory_limit); 1028 lmb_analyze(); 1029 1030 DBG("Phys. mem: %lx\n", lmb_phys_mem_size()); 1031 1032 move_device_tree(); 1033 1034 DBG("Scanning CPUs ...\n"); 1035 1036 /* Retreive CPU related informations from the flat tree 1037 * (altivec support, boot CPU ID, ...) 1038 */ 1039 of_scan_flat_dt(early_init_dt_scan_cpus, NULL); 1040 1041 DBG(" <- early_init_devtree()\n"); 1042} 1043 1044#undef printk 1045 1046int of_n_addr_cells(struct device_node* np) 1047{ 1048 const int *ip; 1049 do { 1050 if (np->parent) 1051 np = np->parent; 1052 ip = of_get_property(np, "#address-cells", NULL); 1053 if (ip != NULL) 1054 return *ip; 1055 } while (np->parent); 1056 /* No #address-cells property for the root node, default to 1 */ 1057 return 1; 1058} 1059EXPORT_SYMBOL(of_n_addr_cells); 1060 1061int of_n_size_cells(struct device_node* np) 1062{ 1063 const int* ip; 1064 do { 1065 if (np->parent) 1066 np = np->parent; 1067 ip = of_get_property(np, "#size-cells", NULL); 1068 if (ip != NULL) 1069 return *ip; 1070 } while (np->parent); 1071 /* No #size-cells property for the root node, default to 1 */ 1072 return 1; 1073} 1074EXPORT_SYMBOL(of_n_size_cells); 1075 1076/** Checks if the given "compat" string matches one of the strings in 1077 * the device's "compatible" property 1078 */ 1079int of_device_is_compatible(const struct device_node *device, 1080 const char *compat) 1081{ 1082 const char* cp; 1083 int cplen, l; 1084 1085 cp = of_get_property(device, "compatible", &cplen); 1086 if (cp == NULL) 1087 return 0; 1088 while (cplen > 0) { 1089 if (strncasecmp(cp, compat, strlen(compat)) == 0) 1090 return 1; 1091 l = strlen(cp) + 1; 1092 cp += l; 1093 cplen -= l; 1094 } 1095 1096 return 0; 1097} 1098EXPORT_SYMBOL(of_device_is_compatible); 1099 1100 1101/** 1102 * Indicates whether the root node has a given value in its 1103 * compatible property. 1104 */ 1105int machine_is_compatible(const char *compat) 1106{ 1107 struct device_node *root; 1108 int rc = 0; 1109 1110 root = of_find_node_by_path("/"); 1111 if (root) { 1112 rc = of_device_is_compatible(root, compat); 1113 of_node_put(root); 1114 } 1115 return rc; 1116} 1117EXPORT_SYMBOL(machine_is_compatible); 1118 1119/******* 1120 * 1121 * New implementation of the OF "find" APIs, return a refcounted 1122 * object, call of_node_put() when done. The device tree and list 1123 * are protected by a rw_lock. 1124 * 1125 * Note that property management will need some locking as well, 1126 * this isn't dealt with yet. 1127 * 1128 *******/ 1129 1130/** 1131 * of_find_node_by_name - Find a node by its "name" property 1132 * @from: The node to start searching from or NULL, the node 1133 * you pass will not be searched, only the next one 1134 * will; typically, you pass what the previous call 1135 * returned. of_node_put() will be called on it 1136 * @name: The name string to match against 1137 * 1138 * Returns a node pointer with refcount incremented, use 1139 * of_node_put() on it when done. 1140 */ 1141struct device_node *of_find_node_by_name(struct device_node *from, 1142 const char *name) 1143{ 1144 struct device_node *np; 1145 1146 read_lock(&devtree_lock); 1147 np = from ? from->allnext : allnodes; 1148 for (; np != NULL; np = np->allnext) 1149 if (np->name != NULL && strcasecmp(np->name, name) == 0 1150 && of_node_get(np)) 1151 break; 1152 of_node_put(from); 1153 read_unlock(&devtree_lock); 1154 return np; 1155} 1156EXPORT_SYMBOL(of_find_node_by_name); 1157 1158/** 1159 * of_find_node_by_type - Find a node by its "device_type" property 1160 * @from: The node to start searching from, or NULL to start searching 1161 * the entire device tree. The node you pass will not be 1162 * searched, only the next one will; typically, you pass 1163 * what the previous call returned. of_node_put() will be 1164 * called on from for you. 1165 * @type: The type string to match against 1166 * 1167 * Returns a node pointer with refcount incremented, use 1168 * of_node_put() on it when done. 1169 */ 1170struct device_node *of_find_node_by_type(struct device_node *from, 1171 const char *type) 1172{ 1173 struct device_node *np; 1174 1175 read_lock(&devtree_lock); 1176 np = from ? from->allnext : allnodes; 1177 for (; np != 0; np = np->allnext) 1178 if (np->type != 0 && strcasecmp(np->type, type) == 0 1179 && of_node_get(np)) 1180 break; 1181 of_node_put(from); 1182 read_unlock(&devtree_lock); 1183 return np; 1184} 1185EXPORT_SYMBOL(of_find_node_by_type); 1186 1187/** 1188 * of_find_compatible_node - Find a node based on type and one of the 1189 * tokens in its "compatible" property 1190 * @from: The node to start searching from or NULL, the node 1191 * you pass will not be searched, only the next one 1192 * will; typically, you pass what the previous call 1193 * returned. of_node_put() will be called on it 1194 * @type: The type string to match "device_type" or NULL to ignore 1195 * @compatible: The string to match to one of the tokens in the device 1196 * "compatible" list. 1197 * 1198 * Returns a node pointer with refcount incremented, use 1199 * of_node_put() on it when done. 1200 */ 1201struct device_node *of_find_compatible_node(struct device_node *from, 1202 const char *type, const char *compatible) 1203{ 1204 struct device_node *np; 1205 1206 read_lock(&devtree_lock); 1207 np = from ? from->allnext : allnodes; 1208 for (; np != 0; np = np->allnext) { 1209 if (type != NULL 1210 && !(np->type != 0 && strcasecmp(np->type, type) == 0)) 1211 continue; 1212 if (of_device_is_compatible(np, compatible) && of_node_get(np)) 1213 break; 1214 } 1215 of_node_put(from); 1216 read_unlock(&devtree_lock); 1217 return np; 1218} 1219EXPORT_SYMBOL(of_find_compatible_node); 1220 1221/** 1222 * of_find_node_by_path - Find a node matching a full OF path 1223 * @path: The full path to match 1224 * 1225 * Returns a node pointer with refcount incremented, use 1226 * of_node_put() on it when done. 1227 */ 1228struct device_node *of_find_node_by_path(const char *path) 1229{ 1230 struct device_node *np = allnodes; 1231 1232 read_lock(&devtree_lock); 1233 for (; np != 0; np = np->allnext) { 1234 if (np->full_name != 0 && strcasecmp(np->full_name, path) == 0 1235 && of_node_get(np)) 1236 break; 1237 } 1238 read_unlock(&devtree_lock); 1239 return np; 1240} 1241EXPORT_SYMBOL(of_find_node_by_path); 1242 1243/** 1244 * of_find_node_by_phandle - Find a node given a phandle 1245 * @handle: phandle of the node to find 1246 * 1247 * Returns a node pointer with refcount incremented, use 1248 * of_node_put() on it when done. 1249 */ 1250struct device_node *of_find_node_by_phandle(phandle handle) 1251{ 1252 struct device_node *np; 1253 1254 read_lock(&devtree_lock); 1255 for (np = allnodes; np != 0; np = np->allnext) 1256 if (np->linux_phandle == handle) 1257 break; 1258 of_node_get(np); 1259 read_unlock(&devtree_lock); 1260 return np; 1261} 1262EXPORT_SYMBOL(of_find_node_by_phandle); 1263 1264/** 1265 * of_find_all_nodes - Get next node in global list 1266 * @prev: Previous node or NULL to start iteration 1267 * of_node_put() will be called on it 1268 * 1269 * Returns a node pointer with refcount incremented, use 1270 * of_node_put() on it when done. 1271 */ 1272struct device_node *of_find_all_nodes(struct device_node *prev) 1273{ 1274 struct device_node *np; 1275 1276 read_lock(&devtree_lock); 1277 np = prev ? prev->allnext : allnodes; 1278 for (; np != 0; np = np->allnext) 1279 if (of_node_get(np)) 1280 break; 1281 of_node_put(prev); 1282 read_unlock(&devtree_lock); 1283 return np; 1284} 1285EXPORT_SYMBOL(of_find_all_nodes); 1286 1287/** 1288 * of_get_parent - Get a node's parent if any 1289 * @node: Node to get parent 1290 * 1291 * Returns a node pointer with refcount incremented, use 1292 * of_node_put() on it when done. 1293 */ 1294struct device_node *of_get_parent(const struct device_node *node) 1295{ 1296 struct device_node *np; 1297 1298 if (!node) 1299 return NULL; 1300 1301 read_lock(&devtree_lock); 1302 np = of_node_get(node->parent); 1303 read_unlock(&devtree_lock); 1304 return np; 1305} 1306EXPORT_SYMBOL(of_get_parent); 1307 1308/** 1309 * of_get_next_child - Iterate a node childs 1310 * @node: parent node 1311 * @prev: previous child of the parent node, or NULL to get first 1312 * 1313 * Returns a node pointer with refcount incremented, use 1314 * of_node_put() on it when done. 1315 */ 1316struct device_node *of_get_next_child(const struct device_node *node, 1317 struct device_node *prev) 1318{ 1319 struct device_node *next; 1320 1321 read_lock(&devtree_lock); 1322 next = prev ? prev->sibling : node->child; 1323 for (; next != 0; next = next->sibling) 1324 if (of_node_get(next)) 1325 break; 1326 of_node_put(prev); 1327 read_unlock(&devtree_lock); 1328 return next; 1329} 1330EXPORT_SYMBOL(of_get_next_child); 1331 1332/** 1333 * of_node_get - Increment refcount of a node 1334 * @node: Node to inc refcount, NULL is supported to 1335 * simplify writing of callers 1336 * 1337 * Returns node. 1338 */ 1339struct device_node *of_node_get(struct device_node *node) 1340{ 1341 if (node) 1342 kref_get(&node->kref); 1343 return node; 1344} 1345EXPORT_SYMBOL(of_node_get); 1346 1347static inline struct device_node * kref_to_device_node(struct kref *kref) 1348{ 1349 return container_of(kref, struct device_node, kref); 1350} 1351 1352/** 1353 * of_node_release - release a dynamically allocated node 1354 * @kref: kref element of the node to be released 1355 * 1356 * In of_node_put() this function is passed to kref_put() 1357 * as the destructor. 1358 */ 1359static void of_node_release(struct kref *kref) 1360{ 1361 struct device_node *node = kref_to_device_node(kref); 1362 struct property *prop = node->properties; 1363 1364 if (!OF_IS_DYNAMIC(node)) 1365 return; 1366 while (prop) { 1367 struct property *next = prop->next; 1368 kfree(prop->name); 1369 kfree(prop->value); 1370 kfree(prop); 1371 prop = next; 1372 1373 if (!prop) { 1374 prop = node->deadprops; 1375 node->deadprops = NULL; 1376 } 1377 } 1378 kfree(node->full_name); 1379 kfree(node->data); 1380 kfree(node); 1381} 1382 1383/** 1384 * of_node_put - Decrement refcount of a node 1385 * @node: Node to dec refcount, NULL is supported to 1386 * simplify writing of callers 1387 * 1388 */ 1389void of_node_put(struct device_node *node) 1390{ 1391 if (node) 1392 kref_put(&node->kref, of_node_release); 1393} 1394EXPORT_SYMBOL(of_node_put); 1395 1396/* 1397 * Plug a device node into the tree and global list. 1398 */ 1399void of_attach_node(struct device_node *np) 1400{ 1401 write_lock(&devtree_lock); 1402 np->sibling = np->parent->child; 1403 np->allnext = allnodes; 1404 np->parent->child = np; 1405 allnodes = np; 1406 write_unlock(&devtree_lock); 1407} 1408 1409/* 1410 * "Unplug" a node from the device tree. The caller must hold 1411 * a reference to the node. The memory associated with the node 1412 * is not freed until its refcount goes to zero. 1413 */ 1414void of_detach_node(const struct device_node *np) 1415{ 1416 struct device_node *parent; 1417 1418 write_lock(&devtree_lock); 1419 1420 parent = np->parent; 1421 1422 if (allnodes == np) 1423 allnodes = np->allnext; 1424 else { 1425 struct device_node *prev; 1426 for (prev = allnodes; 1427 prev->allnext != np; 1428 prev = prev->allnext) 1429 ; 1430 prev->allnext = np->allnext; 1431 } 1432 1433 if (parent->child == np) 1434 parent->child = np->sibling; 1435 else { 1436 struct device_node *prevsib; 1437 for (prevsib = np->parent->child; 1438 prevsib->sibling != np; 1439 prevsib = prevsib->sibling) 1440 ; 1441 prevsib->sibling = np->sibling; 1442 } 1443 1444 write_unlock(&devtree_lock); 1445} 1446 1447#ifdef CONFIG_PPC_PSERIES 1448/* 1449 * Fix up the uninitialized fields in a new device node: 1450 * name, type and pci-specific fields 1451 */ 1452 1453static int of_finish_dynamic_node(struct device_node *node) 1454{ 1455 struct device_node *parent = of_get_parent(node); 1456 int err = 0; 1457 const phandle *ibm_phandle; 1458 1459 node->name = of_get_property(node, "name", NULL); 1460 node->type = of_get_property(node, "device_type", NULL); 1461 1462 if (!node->name) 1463 node->name = "<NULL>"; 1464 if (!node->type) 1465 node->type = "<NULL>"; 1466 1467 if (!parent) { 1468 err = -ENODEV; 1469 goto out; 1470 } 1471 1472 /* We don't support that function on PowerMac, at least 1473 * not yet 1474 */ 1475 if (machine_is(powermac)) 1476 return -ENODEV; 1477 1478 /* fix up new node's linux_phandle field */ 1479 if ((ibm_phandle = of_get_property(node, "ibm,phandle", NULL))) 1480 node->linux_phandle = *ibm_phandle; 1481 1482out: 1483 of_node_put(parent); 1484 return err; 1485} 1486 1487static int prom_reconfig_notifier(struct notifier_block *nb, 1488 unsigned long action, void *node) 1489{ 1490 int err; 1491 1492 switch (action) { 1493 case PSERIES_RECONFIG_ADD: 1494 err = of_finish_dynamic_node(node); 1495 if (err < 0) { 1496 printk(KERN_ERR "finish_node returned %d\n", err); 1497 err = NOTIFY_BAD; 1498 } 1499 break; 1500 default: 1501 err = NOTIFY_DONE; 1502 break; 1503 } 1504 return err; 1505} 1506 1507static struct notifier_block prom_reconfig_nb = { 1508 .notifier_call = prom_reconfig_notifier, 1509 .priority = 10, /* This one needs to run first */ 1510}; 1511 1512static int __init prom_reconfig_setup(void) 1513{ 1514 return pSeries_reconfig_notifier_register(&prom_reconfig_nb); 1515} 1516__initcall(prom_reconfig_setup); 1517#endif 1518 1519struct property *of_find_property(const struct device_node *np, 1520 const char *name, 1521 int *lenp) 1522{ 1523 struct property *pp; 1524 1525 read_lock(&devtree_lock); 1526 for (pp = np->properties; pp != 0; pp = pp->next) 1527 if (strcmp(pp->name, name) == 0) { 1528 if (lenp != 0) 1529 *lenp = pp->length; 1530 break; 1531 } 1532 read_unlock(&devtree_lock); 1533 1534 return pp; 1535} 1536EXPORT_SYMBOL(of_find_property); 1537 1538/* 1539 * Find a property with a given name for a given node 1540 * and return the value. 1541 */ 1542const void *of_get_property(const struct device_node *np, const char *name, 1543 int *lenp) 1544{ 1545 struct property *pp = of_find_property(np,name,lenp); 1546 return pp ? pp->value : NULL; 1547} 1548EXPORT_SYMBOL(of_get_property); 1549 1550/* 1551 * Add a property to a node 1552 */ 1553int prom_add_property(struct device_node* np, struct property* prop) 1554{ 1555 struct property **next; 1556 1557 prop->next = NULL; 1558 write_lock(&devtree_lock); 1559 next = &np->properties; 1560 while (*next) { 1561 if (strcmp(prop->name, (*next)->name) == 0) { 1562 /* duplicate ! don't insert it */ 1563 write_unlock(&devtree_lock); 1564 return -1; 1565 } 1566 next = &(*next)->next; 1567 } 1568 *next = prop; 1569 write_unlock(&devtree_lock); 1570 1571#ifdef CONFIG_PROC_DEVICETREE 1572 /* try to add to proc as well if it was initialized */ 1573 if (np->pde) 1574 proc_device_tree_add_prop(np->pde, prop); 1575#endif /* CONFIG_PROC_DEVICETREE */ 1576 1577 return 0; 1578} 1579 1580/* 1581 * Remove a property from a node. Note that we don't actually 1582 * remove it, since we have given out who-knows-how-many pointers 1583 * to the data using get-property. Instead we just move the property 1584 * to the "dead properties" list, so it won't be found any more. 1585 */ 1586int prom_remove_property(struct device_node *np, struct property *prop) 1587{ 1588 struct property **next; 1589 int found = 0; 1590 1591 write_lock(&devtree_lock); 1592 next = &np->properties; 1593 while (*next) { 1594 if (*next == prop) { 1595 /* found the node */ 1596 *next = prop->next; 1597 prop->next = np->deadprops; 1598 np->deadprops = prop; 1599 found = 1; 1600 break; 1601 } 1602 next = &(*next)->next; 1603 } 1604 write_unlock(&devtree_lock); 1605 1606 if (!found) 1607 return -ENODEV; 1608 1609#ifdef CONFIG_PROC_DEVICETREE 1610 /* try to remove the proc node as well */ 1611 if (np->pde) 1612 proc_device_tree_remove_prop(np->pde, prop); 1613#endif /* CONFIG_PROC_DEVICETREE */ 1614 1615 return 0; 1616} 1617 1618/* 1619 * Update a property in a node. Note that we don't actually 1620 * remove it, since we have given out who-knows-how-many pointers 1621 * to the data using get-property. Instead we just move the property 1622 * to the "dead properties" list, and add the new property to the 1623 * property list 1624 */ 1625int prom_update_property(struct device_node *np, 1626 struct property *newprop, 1627 struct property *oldprop) 1628{ 1629 struct property **next; 1630 int found = 0; 1631 1632 write_lock(&devtree_lock); 1633 next = &np->properties; 1634 while (*next) { 1635 if (*next == oldprop) { 1636 /* found the node */ 1637 newprop->next = oldprop->next; 1638 *next = newprop; 1639 oldprop->next = np->deadprops; 1640 np->deadprops = oldprop; 1641 found = 1; 1642 break; 1643 } 1644 next = &(*next)->next; 1645 } 1646 write_unlock(&devtree_lock); 1647 1648 if (!found) 1649 return -ENODEV; 1650 1651#ifdef CONFIG_PROC_DEVICETREE 1652 /* try to add to proc as well if it was initialized */ 1653 if (np->pde) 1654 proc_device_tree_update_prop(np->pde, newprop, oldprop); 1655#endif /* CONFIG_PROC_DEVICETREE */ 1656 1657 return 0; 1658} 1659 1660 1661/* Find the device node for a given logical cpu number, also returns the cpu 1662 * local thread number (index in ibm,interrupt-server#s) if relevant and 1663 * asked for (non NULL) 1664 */ 1665struct device_node *of_get_cpu_node(int cpu, unsigned int *thread) 1666{ 1667 int hardid; 1668 struct device_node *np; 1669 1670 hardid = get_hard_smp_processor_id(cpu); 1671 1672 for_each_node_by_type(np, "cpu") { 1673 const u32 *intserv; 1674 unsigned int plen, t; 1675 1676 /* Check for ibm,ppc-interrupt-server#s. If it doesn't exist 1677 * fallback to "reg" property and assume no threads 1678 */ 1679 intserv = of_get_property(np, "ibm,ppc-interrupt-server#s", 1680 &plen); 1681 if (intserv == NULL) { 1682 const u32 *reg = of_get_property(np, "reg", NULL); 1683 if (reg == NULL) 1684 continue; 1685 if (*reg == hardid) { 1686 if (thread) 1687 *thread = 0; 1688 return np; 1689 } 1690 } else { 1691 plen /= sizeof(u32); 1692 for (t = 0; t < plen; t++) { 1693 if (hardid == intserv[t]) { 1694 if (thread) 1695 *thread = t; 1696 return np; 1697 } 1698 } 1699 } 1700 } 1701 return NULL; 1702} 1703EXPORT_SYMBOL(of_get_cpu_node); 1704 1705#ifdef DEBUG 1706static struct debugfs_blob_wrapper flat_dt_blob; 1707 1708static int __init export_flat_device_tree(void) 1709{ 1710 struct dentry *d; 1711 1712 d = debugfs_create_dir("powerpc", NULL); 1713 if (!d) 1714 return 1; 1715 1716 flat_dt_blob.data = initial_boot_params; 1717 flat_dt_blob.size = initial_boot_params->totalsize; 1718 1719 d = debugfs_create_blob("flat-device-tree", S_IFREG | S_IRUSR, 1720 d, &flat_dt_blob); 1721 if (!d) 1722 return 1; 1723 1724 return 0; 1725} 1726__initcall(export_flat_device_tree); 1727#endif 1728