1/* 2 * Port for PPC64 David Engebretsen, IBM Corp. 3 * Contains common pci routines for ppc64 platform, pSeries and iSeries brands. 4 * 5 * Copyright (C) 2003 Anton Blanchard <anton@au.ibm.com>, IBM 6 * Rework, based on alpha PCI code. 7 * 8 * This program is free software; you can redistribute it and/or 9 * modify it under the terms of the GNU General Public License 10 * as published by the Free Software Foundation; either version 11 * 2 of the License, or (at your option) any later version. 12 */ 13 14#undef DEBUG 15 16#include <linux/kernel.h> 17#include <linux/pci.h> 18#include <linux/string.h> 19#include <linux/init.h> 20#include <linux/bootmem.h> 21#include <linux/mm.h> 22#include <linux/list.h> 23#include <linux/syscalls.h> 24#include <linux/irq.h> 25 26#include <asm/processor.h> 27#include <asm/io.h> 28#include <asm/prom.h> 29#include <asm/pci-bridge.h> 30#include <asm/byteorder.h> 31#include <asm/machdep.h> 32#include <asm/ppc-pci.h> 33#include <asm/firmware.h> 34 35#ifdef DEBUG 36#include <asm/udbg.h> 37#define DBG(fmt...) printk(fmt) 38#else 39#define DBG(fmt...) 40#endif 41 42unsigned long pci_probe_only = 1; 43int pci_assign_all_buses = 0; 44static int pci_initial_scan_done; 45 46static void fixup_resource(struct resource *res, struct pci_dev *dev); 47static void do_bus_setup(struct pci_bus *bus); 48static void phbs_remap_io(void); 49 50/* pci_io_base -- the base address from which io bars are offsets. 51 * This is the lowest I/O base address (so bar values are always positive), 52 * and it *must* be the start of ISA space if an ISA bus exists because 53 * ISA drivers use hard coded offsets. If no ISA bus exists a dummy 54 * page is mapped and isa_io_limit prevents access to it. 55 */ 56unsigned long isa_io_base; /* NULL if no ISA bus */ 57EXPORT_SYMBOL(isa_io_base); 58unsigned long pci_io_base; 59EXPORT_SYMBOL(pci_io_base); 60 61void iSeries_pcibios_init(void); 62 63LIST_HEAD(hose_list); 64 65static struct dma_mapping_ops *pci_dma_ops; 66 67int global_phb_number; /* Global phb counter */ 68 69/* Cached ISA bridge dev. */ 70struct pci_dev *ppc64_isabridge_dev = NULL; 71EXPORT_SYMBOL_GPL(ppc64_isabridge_dev); 72 73void set_pci_dma_ops(struct dma_mapping_ops *dma_ops) 74{ 75 pci_dma_ops = dma_ops; 76} 77 78struct dma_mapping_ops *get_pci_dma_ops(void) 79{ 80 return pci_dma_ops; 81} 82EXPORT_SYMBOL(get_pci_dma_ops); 83 84static void fixup_broken_pcnet32(struct pci_dev* dev) 85{ 86 if ((dev->class>>8 == PCI_CLASS_NETWORK_ETHERNET)) { 87 dev->vendor = PCI_VENDOR_ID_AMD; 88 pci_write_config_word(dev, PCI_VENDOR_ID, PCI_VENDOR_ID_AMD); 89 } 90} 91DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TRIDENT, PCI_ANY_ID, fixup_broken_pcnet32); 92 93void pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region, 94 struct resource *res) 95{ 96 unsigned long offset = 0; 97 struct pci_controller *hose = pci_bus_to_host(dev->bus); 98 99 if (!hose) 100 return; 101 102 if (res->flags & IORESOURCE_IO) 103 offset = (unsigned long)hose->io_base_virt - pci_io_base; 104 105 if (res->flags & IORESOURCE_MEM) 106 offset = hose->pci_mem_offset; 107 108 region->start = res->start - offset; 109 region->end = res->end - offset; 110} 111 112void pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res, 113 struct pci_bus_region *region) 114{ 115 unsigned long offset = 0; 116 struct pci_controller *hose = pci_bus_to_host(dev->bus); 117 118 if (!hose) 119 return; 120 121 if (res->flags & IORESOURCE_IO) 122 offset = (unsigned long)hose->io_base_virt - pci_io_base; 123 124 if (res->flags & IORESOURCE_MEM) 125 offset = hose->pci_mem_offset; 126 127 res->start = region->start + offset; 128 res->end = region->end + offset; 129} 130 131#ifdef CONFIG_HOTPLUG 132EXPORT_SYMBOL(pcibios_resource_to_bus); 133EXPORT_SYMBOL(pcibios_bus_to_resource); 134#endif 135 136/* 137 * We need to avoid collisions with `mirrored' VGA ports 138 * and other strange ISA hardware, so we always want the 139 * addresses to be allocated in the 0x000-0x0ff region 140 * modulo 0x400. 141 * 142 * Why? Because some silly external IO cards only decode 143 * the low 10 bits of the IO address. The 0x00-0xff region 144 * is reserved for motherboard devices that decode all 16 145 * bits, so it's ok to allocate at, say, 0x2800-0x28ff, 146 * but we want to try to avoid allocating at 0x2900-0x2bff 147 * which might have be mirrored at 0x0100-0x03ff.. 148 */ 149void pcibios_align_resource(void *data, struct resource *res, 150 resource_size_t size, resource_size_t align) 151{ 152 struct pci_dev *dev = data; 153 struct pci_controller *hose = pci_bus_to_host(dev->bus); 154 resource_size_t start = res->start; 155 unsigned long alignto; 156 157 if (res->flags & IORESOURCE_IO) { 158 unsigned long offset = (unsigned long)hose->io_base_virt - 159 pci_io_base; 160 /* Make sure we start at our min on all hoses */ 161 if (start - offset < PCIBIOS_MIN_IO) 162 start = PCIBIOS_MIN_IO + offset; 163 164 /* 165 * Put everything into 0x00-0xff region modulo 0x400 166 */ 167 if (start & 0x300) 168 start = (start + 0x3ff) & ~0x3ff; 169 170 } else if (res->flags & IORESOURCE_MEM) { 171 /* Make sure we start at our min on all hoses */ 172 if (start - hose->pci_mem_offset < PCIBIOS_MIN_MEM) 173 start = PCIBIOS_MIN_MEM + hose->pci_mem_offset; 174 175 /* Align to multiple of size of minimum base. */ 176 alignto = max(0x1000UL, align); 177 start = ALIGN(start, alignto); 178 } 179 180 res->start = start; 181} 182 183static DEFINE_SPINLOCK(hose_spinlock); 184 185/* 186 * pci_controller(phb) initialized common variables. 187 */ 188static void __devinit pci_setup_pci_controller(struct pci_controller *hose) 189{ 190 memset(hose, 0, sizeof(struct pci_controller)); 191 192 spin_lock(&hose_spinlock); 193 hose->global_number = global_phb_number++; 194 list_add_tail(&hose->list_node, &hose_list); 195 spin_unlock(&hose_spinlock); 196} 197 198struct pci_controller * pcibios_alloc_controller(struct device_node *dev) 199{ 200 struct pci_controller *phb; 201 202 if (mem_init_done) 203 phb = kmalloc(sizeof(struct pci_controller), GFP_KERNEL); 204 else 205 phb = alloc_bootmem(sizeof (struct pci_controller)); 206 if (phb == NULL) 207 return NULL; 208 pci_setup_pci_controller(phb); 209 phb->arch_data = dev; 210 phb->is_dynamic = mem_init_done; 211 if (dev) { 212 int nid = of_node_to_nid(dev); 213 214 if (nid < 0 || !node_online(nid)) 215 nid = -1; 216 217 PHB_SET_NODE(phb, nid); 218 } 219 return phb; 220} 221 222void pcibios_free_controller(struct pci_controller *phb) 223{ 224 spin_lock(&hose_spinlock); 225 list_del(&phb->list_node); 226 spin_unlock(&hose_spinlock); 227 228 if (phb->is_dynamic) 229 kfree(phb); 230} 231 232void __devinit pcibios_claim_one_bus(struct pci_bus *b) 233{ 234 struct pci_dev *dev; 235 struct pci_bus *child_bus; 236 237 list_for_each_entry(dev, &b->devices, bus_list) { 238 int i; 239 240 for (i = 0; i < PCI_NUM_RESOURCES; i++) { 241 struct resource *r = &dev->resource[i]; 242 243 if (r->parent || !r->start || !r->flags) 244 continue; 245 pci_claim_resource(dev, i); 246 } 247 } 248 249 list_for_each_entry(child_bus, &b->children, node) 250 pcibios_claim_one_bus(child_bus); 251} 252#ifdef CONFIG_HOTPLUG 253EXPORT_SYMBOL_GPL(pcibios_claim_one_bus); 254#endif 255 256static void __init pcibios_claim_of_setup(void) 257{ 258 struct pci_bus *b; 259 260 if (firmware_has_feature(FW_FEATURE_ISERIES)) 261 return; 262 263 list_for_each_entry(b, &pci_root_buses, node) 264 pcibios_claim_one_bus(b); 265} 266 267static u32 get_int_prop(struct device_node *np, const char *name, u32 def) 268{ 269 const u32 *prop; 270 int len; 271 272 prop = of_get_property(np, name, &len); 273 if (prop && len >= 4) 274 return *prop; 275 return def; 276} 277 278static unsigned int pci_parse_of_flags(u32 addr0) 279{ 280 unsigned int flags = 0; 281 282 if (addr0 & 0x02000000) { 283 flags = IORESOURCE_MEM | PCI_BASE_ADDRESS_SPACE_MEMORY; 284 flags |= (addr0 >> 22) & PCI_BASE_ADDRESS_MEM_TYPE_64; 285 flags |= (addr0 >> 28) & PCI_BASE_ADDRESS_MEM_TYPE_1M; 286 if (addr0 & 0x40000000) 287 flags |= IORESOURCE_PREFETCH 288 | PCI_BASE_ADDRESS_MEM_PREFETCH; 289 } else if (addr0 & 0x01000000) 290 flags = IORESOURCE_IO | PCI_BASE_ADDRESS_SPACE_IO; 291 return flags; 292} 293 294#define GET_64BIT(prop, i) ((((u64) (prop)[(i)]) << 32) | (prop)[(i)+1]) 295 296static void pci_parse_of_addrs(struct device_node *node, struct pci_dev *dev) 297{ 298 u64 base, size; 299 unsigned int flags; 300 struct resource *res; 301 const u32 *addrs; 302 u32 i; 303 int proplen; 304 305 addrs = of_get_property(node, "assigned-addresses", &proplen); 306 if (!addrs) 307 return; 308 DBG(" parse addresses (%d bytes) @ %p\n", proplen, addrs); 309 for (; proplen >= 20; proplen -= 20, addrs += 5) { 310 flags = pci_parse_of_flags(addrs[0]); 311 if (!flags) 312 continue; 313 base = GET_64BIT(addrs, 1); 314 size = GET_64BIT(addrs, 3); 315 if (!size) 316 continue; 317 i = addrs[0] & 0xff; 318 DBG(" base: %llx, size: %llx, i: %x\n", 319 (unsigned long long)base, (unsigned long long)size, i); 320 321 if (PCI_BASE_ADDRESS_0 <= i && i <= PCI_BASE_ADDRESS_5) { 322 res = &dev->resource[(i - PCI_BASE_ADDRESS_0) >> 2]; 323 } else if (i == dev->rom_base_reg) { 324 res = &dev->resource[PCI_ROM_RESOURCE]; 325 flags |= IORESOURCE_READONLY | IORESOURCE_CACHEABLE; 326 } else { 327 printk(KERN_ERR "PCI: bad cfg reg num 0x%x\n", i); 328 continue; 329 } 330 res->start = base; 331 res->end = base + size - 1; 332 res->flags = flags; 333 res->name = pci_name(dev); 334 fixup_resource(res, dev); 335 } 336} 337 338struct pci_dev *of_create_pci_dev(struct device_node *node, 339 struct pci_bus *bus, int devfn) 340{ 341 struct pci_dev *dev; 342 const char *type; 343 344 dev = alloc_pci_dev(); 345 if (!dev) 346 return NULL; 347 type = of_get_property(node, "device_type", NULL); 348 if (type == NULL) 349 type = ""; 350 351 DBG(" create device, devfn: %x, type: %s\n", devfn, type); 352 353 dev->bus = bus; 354 dev->sysdata = node; 355 dev->dev.parent = bus->bridge; 356 dev->dev.bus = &pci_bus_type; 357 dev->devfn = devfn; 358 dev->multifunction = 0; /* maybe a lie? */ 359 360 dev->vendor = get_int_prop(node, "vendor-id", 0xffff); 361 dev->device = get_int_prop(node, "device-id", 0xffff); 362 dev->subsystem_vendor = get_int_prop(node, "subsystem-vendor-id", 0); 363 dev->subsystem_device = get_int_prop(node, "subsystem-id", 0); 364 365 dev->cfg_size = pci_cfg_space_size(dev); 366 367 sprintf(pci_name(dev), "%04x:%02x:%02x.%d", pci_domain_nr(bus), 368 dev->bus->number, PCI_SLOT(devfn), PCI_FUNC(devfn)); 369 dev->class = get_int_prop(node, "class-code", 0); 370 371 DBG(" class: 0x%x\n", dev->class); 372 373 dev->current_state = 4; /* unknown power state */ 374 dev->error_state = pci_channel_io_normal; 375 376 if (!strcmp(type, "pci") || !strcmp(type, "pciex")) { 377 /* a PCI-PCI bridge */ 378 dev->hdr_type = PCI_HEADER_TYPE_BRIDGE; 379 dev->rom_base_reg = PCI_ROM_ADDRESS1; 380 } else if (!strcmp(type, "cardbus")) { 381 dev->hdr_type = PCI_HEADER_TYPE_CARDBUS; 382 } else { 383 dev->hdr_type = PCI_HEADER_TYPE_NORMAL; 384 dev->rom_base_reg = PCI_ROM_ADDRESS; 385 /* Maybe do a default OF mapping here */ 386 dev->irq = NO_IRQ; 387 } 388 389 pci_parse_of_addrs(node, dev); 390 391 DBG(" adding to system ...\n"); 392 393 pci_device_add(dev, bus); 394 395 return dev; 396} 397EXPORT_SYMBOL(of_create_pci_dev); 398 399void __devinit of_scan_bus(struct device_node *node, 400 struct pci_bus *bus) 401{ 402 struct device_node *child = NULL; 403 const u32 *reg; 404 int reglen, devfn; 405 struct pci_dev *dev; 406 407 DBG("of_scan_bus(%s) bus no %d... \n", node->full_name, bus->number); 408 409 while ((child = of_get_next_child(node, child)) != NULL) { 410 DBG(" * %s\n", child->full_name); 411 reg = of_get_property(child, "reg", ®len); 412 if (reg == NULL || reglen < 20) 413 continue; 414 devfn = (reg[0] >> 8) & 0xff; 415 416 /* create a new pci_dev for this device */ 417 dev = of_create_pci_dev(child, bus, devfn); 418 if (!dev) 419 continue; 420 DBG("dev header type: %x\n", dev->hdr_type); 421 422 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE || 423 dev->hdr_type == PCI_HEADER_TYPE_CARDBUS) 424 of_scan_pci_bridge(child, dev); 425 } 426 427 do_bus_setup(bus); 428} 429EXPORT_SYMBOL(of_scan_bus); 430 431void __devinit of_scan_pci_bridge(struct device_node *node, 432 struct pci_dev *dev) 433{ 434 struct pci_bus *bus; 435 const u32 *busrange, *ranges; 436 int len, i, mode; 437 struct resource *res; 438 unsigned int flags; 439 u64 size; 440 441 DBG("of_scan_pci_bridge(%s)\n", node->full_name); 442 443 /* parse bus-range property */ 444 busrange = of_get_property(node, "bus-range", &len); 445 if (busrange == NULL || len != 8) { 446 printk(KERN_DEBUG "Can't get bus-range for PCI-PCI bridge %s\n", 447 node->full_name); 448 return; 449 } 450 ranges = of_get_property(node, "ranges", &len); 451 if (ranges == NULL) { 452 printk(KERN_DEBUG "Can't get ranges for PCI-PCI bridge %s\n", 453 node->full_name); 454 return; 455 } 456 457 bus = pci_add_new_bus(dev->bus, dev, busrange[0]); 458 if (!bus) { 459 printk(KERN_ERR "Failed to create pci bus for %s\n", 460 node->full_name); 461 return; 462 } 463 464 bus->primary = dev->bus->number; 465 bus->subordinate = busrange[1]; 466 bus->bridge_ctl = 0; 467 bus->sysdata = node; 468 469 /* parse ranges property */ 470 /* PCI #address-cells == 3 and #size-cells == 2 always */ 471 res = &dev->resource[PCI_BRIDGE_RESOURCES]; 472 for (i = 0; i < PCI_NUM_RESOURCES - PCI_BRIDGE_RESOURCES; ++i) { 473 res->flags = 0; 474 bus->resource[i] = res; 475 ++res; 476 } 477 i = 1; 478 for (; len >= 32; len -= 32, ranges += 8) { 479 flags = pci_parse_of_flags(ranges[0]); 480 size = GET_64BIT(ranges, 6); 481 if (flags == 0 || size == 0) 482 continue; 483 if (flags & IORESOURCE_IO) { 484 res = bus->resource[0]; 485 if (res->flags) { 486 printk(KERN_ERR "PCI: ignoring extra I/O range" 487 " for bridge %s\n", node->full_name); 488 continue; 489 } 490 } else { 491 if (i >= PCI_NUM_RESOURCES - PCI_BRIDGE_RESOURCES) { 492 printk(KERN_ERR "PCI: too many memory ranges" 493 " for bridge %s\n", node->full_name); 494 continue; 495 } 496 res = bus->resource[i]; 497 ++i; 498 } 499 res->start = GET_64BIT(ranges, 1); 500 res->end = res->start + size - 1; 501 res->flags = flags; 502 fixup_resource(res, dev); 503 } 504 sprintf(bus->name, "PCI Bus %04x:%02x", pci_domain_nr(bus), 505 bus->number); 506 DBG(" bus name: %s\n", bus->name); 507 508 mode = PCI_PROBE_NORMAL; 509 if (ppc_md.pci_probe_mode) 510 mode = ppc_md.pci_probe_mode(bus); 511 DBG(" probe mode: %d\n", mode); 512 513 if (mode == PCI_PROBE_DEVTREE) 514 of_scan_bus(node, bus); 515 else if (mode == PCI_PROBE_NORMAL) 516 pci_scan_child_bus(bus); 517} 518EXPORT_SYMBOL(of_scan_pci_bridge); 519 520void __devinit scan_phb(struct pci_controller *hose) 521{ 522 struct pci_bus *bus; 523 struct device_node *node = hose->arch_data; 524 int i, mode; 525 struct resource *res; 526 527 DBG("Scanning PHB %s\n", node ? node->full_name : "<NO NAME>"); 528 529 bus = pci_create_bus(hose->parent, hose->first_busno, hose->ops, node); 530 if (bus == NULL) { 531 printk(KERN_ERR "Failed to create bus for PCI domain %04x\n", 532 hose->global_number); 533 return; 534 } 535 bus->secondary = hose->first_busno; 536 hose->bus = bus; 537 538 bus->resource[0] = res = &hose->io_resource; 539 if (res->flags && request_resource(&ioport_resource, res)) 540 printk(KERN_ERR "Failed to request PCI IO region " 541 "on PCI domain %04x\n", hose->global_number); 542 543 for (i = 0; i < 3; ++i) { 544 res = &hose->mem_resources[i]; 545 bus->resource[i+1] = res; 546 if (res->flags && request_resource(&iomem_resource, res)) 547 printk(KERN_ERR "Failed to request PCI memory region " 548 "on PCI domain %04x\n", hose->global_number); 549 } 550 551 mode = PCI_PROBE_NORMAL; 552 553 if (node && ppc_md.pci_probe_mode) 554 mode = ppc_md.pci_probe_mode(bus); 555 DBG(" probe mode: %d\n", mode); 556 if (mode == PCI_PROBE_DEVTREE) { 557 bus->subordinate = hose->last_busno; 558 of_scan_bus(node, bus); 559 } 560 561 if (mode == PCI_PROBE_NORMAL) 562 hose->last_busno = bus->subordinate = pci_scan_child_bus(bus); 563} 564 565static int __init pcibios_init(void) 566{ 567 struct pci_controller *hose, *tmp; 568 569 /* For now, override phys_mem_access_prot. If we need it, 570 * later, we may move that initialization to each ppc_md 571 */ 572 ppc_md.phys_mem_access_prot = pci_phys_mem_access_prot; 573 574 if (firmware_has_feature(FW_FEATURE_ISERIES)) 575 iSeries_pcibios_init(); 576 577 printk(KERN_DEBUG "PCI: Probing PCI hardware\n"); 578 579 /* Scan all of the recorded PCI controllers. */ 580 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) { 581 scan_phb(hose); 582 pci_bus_add_devices(hose->bus); 583 } 584 585 if (!firmware_has_feature(FW_FEATURE_ISERIES)) { 586 if (pci_probe_only) 587 pcibios_claim_of_setup(); 588 else 589 pci_assign_unassigned_resources(); 590 } 591 592 /* Call machine dependent final fixup */ 593 if (ppc_md.pcibios_fixup) 594 ppc_md.pcibios_fixup(); 595 596 /* Cache the location of the ISA bridge (if we have one) */ 597 ppc64_isabridge_dev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL); 598 if (ppc64_isabridge_dev != NULL) 599 printk(KERN_DEBUG "ISA bridge at %s\n", pci_name(ppc64_isabridge_dev)); 600 601 if (!firmware_has_feature(FW_FEATURE_ISERIES)) 602 /* map in PCI I/O space */ 603 phbs_remap_io(); 604 605 pci_initial_scan_done = 1; 606 607 printk(KERN_DEBUG "PCI: Probing PCI hardware done\n"); 608 609 return 0; 610} 611 612subsys_initcall(pcibios_init); 613 614char __init *pcibios_setup(char *str) 615{ 616 return str; 617} 618 619int pcibios_enable_device(struct pci_dev *dev, int mask) 620{ 621 u16 cmd, oldcmd; 622 int i; 623 624 pci_read_config_word(dev, PCI_COMMAND, &cmd); 625 oldcmd = cmd; 626 627 for (i = 0; i < PCI_NUM_RESOURCES; i++) { 628 struct resource *res = &dev->resource[i]; 629 630 /* Only set up the requested stuff */ 631 if (!(mask & (1<<i))) 632 continue; 633 634 if (res->flags & IORESOURCE_IO) 635 cmd |= PCI_COMMAND_IO; 636 if (res->flags & IORESOURCE_MEM) 637 cmd |= PCI_COMMAND_MEMORY; 638 } 639 640 if (cmd != oldcmd) { 641 printk(KERN_DEBUG "PCI: Enabling device: (%s), cmd %x\n", 642 pci_name(dev), cmd); 643 /* Enable the appropriate bits in the PCI command register. */ 644 pci_write_config_word(dev, PCI_COMMAND, cmd); 645 } 646 return 0; 647} 648 649/* 650 * Return the domain number for this bus. 651 */ 652int pci_domain_nr(struct pci_bus *bus) 653{ 654 if (firmware_has_feature(FW_FEATURE_ISERIES)) 655 return 0; 656 else { 657 struct pci_controller *hose = pci_bus_to_host(bus); 658 659 return hose->global_number; 660 } 661} 662 663EXPORT_SYMBOL(pci_domain_nr); 664 665/* Decide whether to display the domain number in /proc */ 666int pci_proc_domain(struct pci_bus *bus) 667{ 668 if (firmware_has_feature(FW_FEATURE_ISERIES)) 669 return 0; 670 else { 671 struct pci_controller *hose = pci_bus_to_host(bus); 672 return hose->buid; 673 } 674} 675 676/* 677 * Platform support for /proc/bus/pci/X/Y mmap()s, 678 * modelled on the sparc64 implementation by Dave Miller. 679 * -- paulus. 680 */ 681 682static struct resource *__pci_mmap_make_offset(struct pci_dev *dev, 683 resource_size_t *offset, 684 enum pci_mmap_state mmap_state) 685{ 686 struct pci_controller *hose = pci_bus_to_host(dev->bus); 687 unsigned long io_offset = 0; 688 int i, res_bit; 689 690 if (hose == 0) 691 return NULL; /* should never happen */ 692 693 /* If memory, add on the PCI bridge address offset */ 694 if (mmap_state == pci_mmap_mem) { 695 res_bit = IORESOURCE_MEM; 696 } else { 697 io_offset = (unsigned long)hose->io_base_virt - pci_io_base; 698 *offset += io_offset; 699 res_bit = IORESOURCE_IO; 700 } 701 702 /* 703 * Check that the offset requested corresponds to one of the 704 * resources of the device. 705 */ 706 for (i = 0; i <= PCI_ROM_RESOURCE; i++) { 707 struct resource *rp = &dev->resource[i]; 708 int flags = rp->flags; 709 710 /* treat ROM as memory (should be already) */ 711 if (i == PCI_ROM_RESOURCE) 712 flags |= IORESOURCE_MEM; 713 714 /* Active and same type? */ 715 if ((flags & res_bit) == 0) 716 continue; 717 718 /* In the range of this resource? */ 719 if (*offset < (rp->start & PAGE_MASK) || *offset > rp->end) 720 continue; 721 722 /* found it! construct the final physical address */ 723 if (mmap_state == pci_mmap_io) 724 *offset += hose->io_base_phys - io_offset; 725 return rp; 726 } 727 728 return NULL; 729} 730 731/* 732 * Set vm_page_prot of VMA, as appropriate for this architecture, for a pci 733 * device mapping. 734 */ 735static pgprot_t __pci_mmap_set_pgprot(struct pci_dev *dev, struct resource *rp, 736 pgprot_t protection, 737 enum pci_mmap_state mmap_state, 738 int write_combine) 739{ 740 unsigned long prot = pgprot_val(protection); 741 742 if (mmap_state != pci_mmap_mem) 743 write_combine = 0; 744 else if (write_combine == 0) { 745 if (rp->flags & IORESOURCE_PREFETCH) 746 write_combine = 1; 747 } 748 749 prot |= _PAGE_NO_CACHE; 750 if (write_combine) 751 prot &= ~_PAGE_GUARDED; 752 else 753 prot |= _PAGE_GUARDED; 754 755 return __pgprot(prot); 756} 757 758/* 759 * This one is used by /dev/mem and fbdev who have no clue about the 760 * PCI device, it tries to find the PCI device first and calls the 761 * above routine 762 */ 763pgprot_t pci_phys_mem_access_prot(struct file *file, 764 unsigned long pfn, 765 unsigned long size, 766 pgprot_t protection) 767{ 768 struct pci_dev *pdev = NULL; 769 struct resource *found = NULL; 770 unsigned long prot = pgprot_val(protection); 771 unsigned long offset = pfn << PAGE_SHIFT; 772 int i; 773 774 if (page_is_ram(pfn)) 775 return __pgprot(prot); 776 777 prot |= _PAGE_NO_CACHE | _PAGE_GUARDED; 778 779 for_each_pci_dev(pdev) { 780 for (i = 0; i <= PCI_ROM_RESOURCE; i++) { 781 struct resource *rp = &pdev->resource[i]; 782 int flags = rp->flags; 783 784 /* Active and same type? */ 785 if ((flags & IORESOURCE_MEM) == 0) 786 continue; 787 /* In the range of this resource? */ 788 if (offset < (rp->start & PAGE_MASK) || 789 offset > rp->end) 790 continue; 791 found = rp; 792 break; 793 } 794 if (found) 795 break; 796 } 797 if (found) { 798 if (found->flags & IORESOURCE_PREFETCH) 799 prot &= ~_PAGE_GUARDED; 800 pci_dev_put(pdev); 801 } 802 803 DBG("non-PCI map for %lx, prot: %lx\n", offset, prot); 804 805 return __pgprot(prot); 806} 807 808 809/* 810 * Perform the actual remap of the pages for a PCI device mapping, as 811 * appropriate for this architecture. The region in the process to map 812 * is described by vm_start and vm_end members of VMA, the base physical 813 * address is found in vm_pgoff. 814 * The pci device structure is provided so that architectures may make mapping 815 * decisions on a per-device or per-bus basis. 816 * 817 * Returns a negative error code on failure, zero on success. 818 */ 819int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, 820 enum pci_mmap_state mmap_state, int write_combine) 821{ 822 resource_size_t offset = vma->vm_pgoff << PAGE_SHIFT; 823 struct resource *rp; 824 int ret; 825 826 rp = __pci_mmap_make_offset(dev, &offset, mmap_state); 827 if (rp == NULL) 828 return -EINVAL; 829 830 vma->vm_pgoff = offset >> PAGE_SHIFT; 831 vma->vm_page_prot = __pci_mmap_set_pgprot(dev, rp, 832 vma->vm_page_prot, 833 mmap_state, write_combine); 834 835 ret = remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, 836 vma->vm_end - vma->vm_start, vma->vm_page_prot); 837 838 return ret; 839} 840 841static ssize_t pci_show_devspec(struct device *dev, 842 struct device_attribute *attr, char *buf) 843{ 844 struct pci_dev *pdev; 845 struct device_node *np; 846 847 pdev = to_pci_dev (dev); 848 np = pci_device_to_OF_node(pdev); 849 if (np == NULL || np->full_name == NULL) 850 return 0; 851 return sprintf(buf, "%s", np->full_name); 852} 853static DEVICE_ATTR(devspec, S_IRUGO, pci_show_devspec, NULL); 854 855void pcibios_add_platform_entries(struct pci_dev *pdev) 856{ 857 device_create_file(&pdev->dev, &dev_attr_devspec); 858} 859 860#define ISA_SPACE_MASK 0x1 861#define ISA_SPACE_IO 0x1 862 863static void __devinit pci_process_ISA_OF_ranges(struct device_node *isa_node, 864 unsigned long phb_io_base_phys, 865 void __iomem * phb_io_base_virt) 866{ 867 /* Remove these asap */ 868 869 struct pci_address { 870 u32 a_hi; 871 u32 a_mid; 872 u32 a_lo; 873 }; 874 875 struct isa_address { 876 u32 a_hi; 877 u32 a_lo; 878 }; 879 880 struct isa_range { 881 struct isa_address isa_addr; 882 struct pci_address pci_addr; 883 unsigned int size; 884 }; 885 886 const struct isa_range *range; 887 unsigned long pci_addr; 888 unsigned int isa_addr; 889 unsigned int size; 890 int rlen = 0; 891 892 range = of_get_property(isa_node, "ranges", &rlen); 893 if (range == NULL || (rlen < sizeof(struct isa_range))) { 894 printk(KERN_ERR "no ISA ranges or unexpected isa range size," 895 "mapping 64k\n"); 896 __ioremap_explicit(phb_io_base_phys, 897 (unsigned long)phb_io_base_virt, 898 0x10000, _PAGE_NO_CACHE | _PAGE_GUARDED); 899 return; 900 } 901 902 /* From "ISA Binding to 1275" 903 * The ranges property is laid out as an array of elements, 904 * each of which comprises: 905 * cells 0 - 1: an ISA address 906 * cells 2 - 4: a PCI address 907 * (size depending on dev->n_addr_cells) 908 * cell 5: the size of the range 909 */ 910 if ((range->isa_addr.a_hi && ISA_SPACE_MASK) == ISA_SPACE_IO) { 911 isa_addr = range->isa_addr.a_lo; 912 pci_addr = (unsigned long) range->pci_addr.a_mid << 32 | 913 range->pci_addr.a_lo; 914 915 /* Assume these are both zero */ 916 if ((pci_addr != 0) || (isa_addr != 0)) { 917 printk(KERN_ERR "unexpected isa to pci mapping: %s\n", 918 __FUNCTION__); 919 return; 920 } 921 922 size = PAGE_ALIGN(range->size); 923 924 __ioremap_explicit(phb_io_base_phys, 925 (unsigned long) phb_io_base_virt, 926 size, _PAGE_NO_CACHE | _PAGE_GUARDED); 927 } 928} 929 930void __devinit pci_process_bridge_OF_ranges(struct pci_controller *hose, 931 struct device_node *dev, int prim) 932{ 933 const unsigned int *ranges; 934 unsigned int pci_space; 935 unsigned long size; 936 int rlen = 0; 937 int memno = 0; 938 struct resource *res; 939 int np, na = of_n_addr_cells(dev); 940 unsigned long pci_addr, cpu_phys_addr; 941 942 np = na + 5; 943 944 /* From "PCI Binding to 1275" 945 * The ranges property is laid out as an array of elements, 946 * each of which comprises: 947 * cells 0 - 2: a PCI address 948 * cells 3 or 3+4: a CPU physical address 949 * (size depending on dev->n_addr_cells) 950 * cells 4+5 or 5+6: the size of the range 951 */ 952 ranges = of_get_property(dev, "ranges", &rlen); 953 if (ranges == NULL) 954 return; 955 hose->io_base_phys = 0; 956 while ((rlen -= np * sizeof(unsigned int)) >= 0) { 957 res = NULL; 958 pci_space = ranges[0]; 959 pci_addr = ((unsigned long)ranges[1] << 32) | ranges[2]; 960 cpu_phys_addr = of_translate_address(dev, &ranges[3]); 961 size = ((unsigned long)ranges[na+3] << 32) | ranges[na+4]; 962 ranges += np; 963 if (size == 0) 964 continue; 965 966 /* Now consume following elements while they are contiguous */ 967 while (rlen >= np * sizeof(unsigned int)) { 968 unsigned long addr, phys; 969 970 if (ranges[0] != pci_space) 971 break; 972 addr = ((unsigned long)ranges[1] << 32) | ranges[2]; 973 phys = ranges[3]; 974 if (na >= 2) 975 phys = (phys << 32) | ranges[4]; 976 if (addr != pci_addr + size || 977 phys != cpu_phys_addr + size) 978 break; 979 980 size += ((unsigned long)ranges[na+3] << 32) 981 | ranges[na+4]; 982 ranges += np; 983 rlen -= np * sizeof(unsigned int); 984 } 985 986 switch ((pci_space >> 24) & 0x3) { 987 case 1: /* I/O space */ 988 hose->io_base_phys = cpu_phys_addr - pci_addr; 989 /* handle from 0 to top of I/O window */ 990 hose->pci_io_size = pci_addr + size; 991 992 res = &hose->io_resource; 993 res->flags = IORESOURCE_IO; 994 res->start = pci_addr; 995 DBG("phb%d: IO 0x%lx -> 0x%lx\n", hose->global_number, 996 res->start, res->start + size - 1); 997 break; 998 case 2: /* memory space */ 999 memno = 0; 1000 while (memno < 3 && hose->mem_resources[memno].flags) 1001 ++memno; 1002 1003 if (memno == 0) 1004 hose->pci_mem_offset = cpu_phys_addr - pci_addr; 1005 if (memno < 3) { 1006 res = &hose->mem_resources[memno]; 1007 res->flags = IORESOURCE_MEM; 1008 res->start = cpu_phys_addr; 1009 DBG("phb%d: MEM 0x%lx -> 0x%lx\n", hose->global_number, 1010 res->start, res->start + size - 1); 1011 } 1012 break; 1013 } 1014 if (res != NULL) { 1015 res->name = dev->full_name; 1016 res->end = res->start + size - 1; 1017 res->parent = NULL; 1018 res->sibling = NULL; 1019 res->child = NULL; 1020 } 1021 } 1022} 1023 1024void __devinit pci_setup_phb_io(struct pci_controller *hose, int primary) 1025{ 1026 unsigned long size = hose->pci_io_size; 1027 unsigned long io_virt_offset; 1028 struct resource *res; 1029 struct device_node *isa_dn; 1030 1031 if (size == 0) 1032 return; 1033 1034 hose->io_base_virt = reserve_phb_iospace(size); 1035 DBG("phb%d io_base_phys 0x%lx io_base_virt 0x%lx\n", 1036 hose->global_number, hose->io_base_phys, 1037 (unsigned long) hose->io_base_virt); 1038 1039 if (primary) { 1040 pci_io_base = (unsigned long)hose->io_base_virt; 1041 isa_dn = of_find_node_by_type(NULL, "isa"); 1042 if (isa_dn) { 1043 isa_io_base = pci_io_base; 1044 pci_process_ISA_OF_ranges(isa_dn, hose->io_base_phys, 1045 hose->io_base_virt); 1046 of_node_put(isa_dn); 1047 } 1048 } 1049 1050 io_virt_offset = (unsigned long)hose->io_base_virt - pci_io_base; 1051 res = &hose->io_resource; 1052 res->start += io_virt_offset; 1053 res->end += io_virt_offset; 1054 1055 /* If this is called after the initial PCI scan, then we need to 1056 * proceed to IO mappings now 1057 */ 1058 if (pci_initial_scan_done) 1059 __ioremap_explicit(hose->io_base_phys, 1060 (unsigned long)hose->io_base_virt, 1061 hose->pci_io_size, 1062 _PAGE_NO_CACHE | _PAGE_GUARDED); 1063} 1064 1065void __devinit pci_setup_phb_io_dynamic(struct pci_controller *hose, 1066 int primary) 1067{ 1068 unsigned long size = hose->pci_io_size; 1069 unsigned long io_virt_offset; 1070 struct resource *res; 1071 1072 if (size == 0) 1073 return; 1074 1075 hose->io_base_virt = __ioremap(hose->io_base_phys, size, 1076 _PAGE_NO_CACHE | _PAGE_GUARDED); 1077 DBG("phb%d io_base_phys 0x%lx io_base_virt 0x%lx\n", 1078 hose->global_number, hose->io_base_phys, 1079 (unsigned long) hose->io_base_virt); 1080 1081 if (primary) 1082 pci_io_base = (unsigned long)hose->io_base_virt; 1083 1084 io_virt_offset = (unsigned long)hose->io_base_virt - pci_io_base; 1085 res = &hose->io_resource; 1086 res->start += io_virt_offset; 1087 res->end += io_virt_offset; 1088} 1089 1090 1091static int get_bus_io_range(struct pci_bus *bus, unsigned long *start_phys, 1092 unsigned long *start_virt, unsigned long *size) 1093{ 1094 struct pci_controller *hose = pci_bus_to_host(bus); 1095 struct resource *res; 1096 1097 if (bus->self) 1098 res = bus->resource[0]; 1099 else 1100 /* Root Bus */ 1101 res = &hose->io_resource; 1102 1103 if (res->end == 0 && res->start == 0) 1104 return 1; 1105 1106 *start_virt = pci_io_base + res->start; 1107 *start_phys = *start_virt + hose->io_base_phys 1108 - (unsigned long) hose->io_base_virt; 1109 1110 if (res->end > res->start) 1111 *size = res->end - res->start + 1; 1112 else { 1113 printk("%s(): unexpected region 0x%lx->0x%lx\n", 1114 __FUNCTION__, res->start, res->end); 1115 return 1; 1116 } 1117 1118 return 0; 1119} 1120 1121int unmap_bus_range(struct pci_bus *bus) 1122{ 1123 unsigned long start_phys; 1124 unsigned long start_virt; 1125 unsigned long size; 1126 1127 if (!bus) { 1128 printk(KERN_ERR "%s() expected bus\n", __FUNCTION__); 1129 return 1; 1130 } 1131 1132 if (get_bus_io_range(bus, &start_phys, &start_virt, &size)) 1133 return 1; 1134 if (__iounmap_explicit((void __iomem *) start_virt, size)) 1135 return 1; 1136 1137 return 0; 1138} 1139EXPORT_SYMBOL(unmap_bus_range); 1140 1141int remap_bus_range(struct pci_bus *bus) 1142{ 1143 unsigned long start_phys; 1144 unsigned long start_virt; 1145 unsigned long size; 1146 1147 if (!bus) { 1148 printk(KERN_ERR "%s() expected bus\n", __FUNCTION__); 1149 return 1; 1150 } 1151 1152 1153 if (get_bus_io_range(bus, &start_phys, &start_virt, &size)) 1154 return 1; 1155 if (start_phys == 0) 1156 return 1; 1157 printk(KERN_DEBUG "mapping IO %lx -> %lx, size: %lx\n", start_phys, start_virt, size); 1158 if (__ioremap_explicit(start_phys, start_virt, size, 1159 _PAGE_NO_CACHE | _PAGE_GUARDED)) 1160 return 1; 1161 1162 return 0; 1163} 1164EXPORT_SYMBOL(remap_bus_range); 1165 1166static void phbs_remap_io(void) 1167{ 1168 struct pci_controller *hose, *tmp; 1169 1170 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) 1171 remap_bus_range(hose->bus); 1172} 1173 1174static void __devinit fixup_resource(struct resource *res, struct pci_dev *dev) 1175{ 1176 struct pci_controller *hose = pci_bus_to_host(dev->bus); 1177 unsigned long offset; 1178 1179 if (res->flags & IORESOURCE_IO) { 1180 offset = (unsigned long)hose->io_base_virt - pci_io_base; 1181 1182 res->start += offset; 1183 res->end += offset; 1184 } else if (res->flags & IORESOURCE_MEM) { 1185 res->start += hose->pci_mem_offset; 1186 res->end += hose->pci_mem_offset; 1187 } 1188} 1189 1190void __devinit pcibios_fixup_device_resources(struct pci_dev *dev, 1191 struct pci_bus *bus) 1192{ 1193 /* Update device resources. */ 1194 int i; 1195 1196 for (i = 0; i < PCI_NUM_RESOURCES; i++) 1197 if (dev->resource[i].flags) 1198 fixup_resource(&dev->resource[i], dev); 1199} 1200EXPORT_SYMBOL(pcibios_fixup_device_resources); 1201 1202void __devinit pcibios_setup_new_device(struct pci_dev *dev) 1203{ 1204 struct dev_archdata *sd = &dev->dev.archdata; 1205 1206 sd->of_node = pci_device_to_OF_node(dev); 1207 1208 DBG("PCI device %s OF node: %s\n", pci_name(dev), 1209 sd->of_node ? sd->of_node->full_name : "<none>"); 1210 1211 sd->dma_ops = pci_dma_ops; 1212#ifdef CONFIG_NUMA 1213 sd->numa_node = pcibus_to_node(dev->bus); 1214#else 1215 sd->numa_node = -1; 1216#endif 1217 if (ppc_md.pci_dma_dev_setup) 1218 ppc_md.pci_dma_dev_setup(dev); 1219} 1220EXPORT_SYMBOL(pcibios_setup_new_device); 1221 1222static void __devinit do_bus_setup(struct pci_bus *bus) 1223{ 1224 struct pci_dev *dev; 1225 1226 if (ppc_md.pci_dma_bus_setup) 1227 ppc_md.pci_dma_bus_setup(bus); 1228 1229 list_for_each_entry(dev, &bus->devices, bus_list) 1230 pcibios_setup_new_device(dev); 1231 1232 /* Read default IRQs and fixup if necessary */ 1233 list_for_each_entry(dev, &bus->devices, bus_list) { 1234 pci_read_irq_line(dev); 1235 if (ppc_md.pci_irq_fixup) 1236 ppc_md.pci_irq_fixup(dev); 1237 } 1238} 1239 1240void __devinit pcibios_fixup_bus(struct pci_bus *bus) 1241{ 1242 struct pci_dev *dev = bus->self; 1243 struct device_node *np; 1244 1245 np = pci_bus_to_OF_node(bus); 1246 1247 DBG("pcibios_fixup_bus(%s)\n", np ? np->full_name : "<???>"); 1248 1249 if (dev && pci_probe_only && 1250 (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) { 1251 /* This is a subordinate bridge */ 1252 1253 pci_read_bridge_bases(bus); 1254 pcibios_fixup_device_resources(dev, bus); 1255 } 1256 1257 do_bus_setup(bus); 1258 1259 if (!pci_probe_only) 1260 return; 1261 1262 list_for_each_entry(dev, &bus->devices, bus_list) 1263 if ((dev->class >> 8) != PCI_CLASS_BRIDGE_PCI) 1264 pcibios_fixup_device_resources(dev, bus); 1265} 1266EXPORT_SYMBOL(pcibios_fixup_bus); 1267 1268/* 1269 * Reads the interrupt pin to determine if interrupt is use by card. 1270 * If the interrupt is used, then gets the interrupt line from the 1271 * openfirmware and sets it in the pci_dev and pci_config line. 1272 */ 1273int pci_read_irq_line(struct pci_dev *pci_dev) 1274{ 1275 struct of_irq oirq; 1276 unsigned int virq; 1277 1278 DBG("Try to map irq for %s...\n", pci_name(pci_dev)); 1279 1280#ifdef DEBUG 1281 memset(&oirq, 0xff, sizeof(oirq)); 1282#endif 1283 /* Try to get a mapping from the device-tree */ 1284 if (of_irq_map_pci(pci_dev, &oirq)) { 1285 u8 line, pin; 1286 1287 /* If that fails, lets fallback to what is in the config 1288 * space and map that through the default controller. We 1289 * also set the type to level low since that's what PCI 1290 * interrupts are. If your platform does differently, then 1291 * either provide a proper interrupt tree or don't use this 1292 * function. 1293 */ 1294 if (pci_read_config_byte(pci_dev, PCI_INTERRUPT_PIN, &pin)) 1295 return -1; 1296 if (pin == 0) 1297 return -1; 1298 if (pci_read_config_byte(pci_dev, PCI_INTERRUPT_LINE, &line) || 1299 line == 0xff) { 1300 return -1; 1301 } 1302 DBG(" -> no map ! Using irq line %d from PCI config\n", line); 1303 1304 virq = irq_create_mapping(NULL, line); 1305 if (virq != NO_IRQ) 1306 set_irq_type(virq, IRQ_TYPE_LEVEL_LOW); 1307 } else { 1308 DBG(" -> got one, spec %d cells (0x%08x 0x%08x...) on %s\n", 1309 oirq.size, oirq.specifier[0], oirq.specifier[1], 1310 oirq.controller->full_name); 1311 1312 virq = irq_create_of_mapping(oirq.controller, oirq.specifier, 1313 oirq.size); 1314 } 1315 if(virq == NO_IRQ) { 1316 DBG(" -> failed to map !\n"); 1317 return -1; 1318 } 1319 1320 DBG(" -> mapped to linux irq %d\n", virq); 1321 1322 pci_dev->irq = virq; 1323 1324 return 0; 1325} 1326EXPORT_SYMBOL(pci_read_irq_line); 1327 1328void pci_resource_to_user(const struct pci_dev *dev, int bar, 1329 const struct resource *rsrc, 1330 resource_size_t *start, resource_size_t *end) 1331{ 1332 struct pci_controller *hose = pci_bus_to_host(dev->bus); 1333 resource_size_t offset = 0; 1334 1335 if (hose == NULL) 1336 return; 1337 1338 if (rsrc->flags & IORESOURCE_IO) 1339 offset = (unsigned long)hose->io_base_virt - pci_io_base; 1340 1341 /* We pass a fully fixed up address to userland for MMIO instead of 1342 * a BAR value because X is lame and expects to be able to use that 1343 * to pass to /dev/mem ! 1344 * 1345 * That means that we'll have potentially 64 bits values where some 1346 * userland apps only expect 32 (like X itself since it thinks only 1347 * Sparc has 64 bits MMIO) but if we don't do that, we break it on 1348 * 32 bits CHRPs :-( 1349 * 1350 * Hopefully, the sysfs insterface is immune to that gunk. Once X 1351 * has been fixed (and the fix spread enough), we can re-enable the 1352 * 2 lines below and pass down a BAR value to userland. In that case 1353 * we'll also have to re-enable the matching code in 1354 * __pci_mmap_make_offset(). 1355 * 1356 * BenH. 1357 */ 1358 1359 *start = rsrc->start - offset; 1360 *end = rsrc->end - offset; 1361} 1362 1363struct pci_controller* pci_find_hose_for_OF_device(struct device_node* node) 1364{ 1365 if (!have_of) 1366 return NULL; 1367 while(node) { 1368 struct pci_controller *hose, *tmp; 1369 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) 1370 if (hose->arch_data == node) 1371 return hose; 1372 node = node->parent; 1373 } 1374 return NULL; 1375} 1376 1377unsigned long pci_address_to_pio(phys_addr_t address) 1378{ 1379 struct pci_controller *hose, *tmp; 1380 1381 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) { 1382 if (address >= hose->io_base_phys && 1383 address < (hose->io_base_phys + hose->pci_io_size)) { 1384 unsigned long base = 1385 (unsigned long)hose->io_base_virt - pci_io_base; 1386 return base + (address - hose->io_base_phys); 1387 } 1388 } 1389 return (unsigned int)-1; 1390} 1391EXPORT_SYMBOL_GPL(pci_address_to_pio); 1392 1393 1394#define IOBASE_BRIDGE_NUMBER 0 1395#define IOBASE_MEMORY 1 1396#define IOBASE_IO 2 1397#define IOBASE_ISA_IO 3 1398#define IOBASE_ISA_MEM 4 1399 1400long sys_pciconfig_iobase(long which, unsigned long in_bus, 1401 unsigned long in_devfn) 1402{ 1403 struct pci_controller* hose; 1404 struct list_head *ln; 1405 struct pci_bus *bus = NULL; 1406 struct device_node *hose_node; 1407 1408 /* Argh ! Please forgive me for that hack, but that's the 1409 * simplest way to get existing XFree to not lockup on some 1410 * G5 machines... So when something asks for bus 0 io base 1411 * (bus 0 is HT root), we return the AGP one instead. 1412 */ 1413 if (machine_is_compatible("MacRISC4")) 1414 if (in_bus == 0) 1415 in_bus = 0xf0; 1416 1417 /* That syscall isn't quite compatible with PCI domains, but it's 1418 * used on pre-domains setup. We return the first match 1419 */ 1420 1421 for (ln = pci_root_buses.next; ln != &pci_root_buses; ln = ln->next) { 1422 bus = pci_bus_b(ln); 1423 if (in_bus >= bus->number && in_bus <= bus->subordinate) 1424 break; 1425 bus = NULL; 1426 } 1427 if (bus == NULL || bus->sysdata == NULL) 1428 return -ENODEV; 1429 1430 hose_node = (struct device_node *)bus->sysdata; 1431 hose = PCI_DN(hose_node)->phb; 1432 1433 switch (which) { 1434 case IOBASE_BRIDGE_NUMBER: 1435 return (long)hose->first_busno; 1436 case IOBASE_MEMORY: 1437 return (long)hose->pci_mem_offset; 1438 case IOBASE_IO: 1439 return (long)hose->io_base_phys; 1440 case IOBASE_ISA_IO: 1441 return (long)isa_io_base; 1442 case IOBASE_ISA_MEM: 1443 return -EINVAL; 1444 } 1445 1446 return -EOPNOTSUPP; 1447} 1448 1449#ifdef CONFIG_NUMA 1450int pcibus_to_node(struct pci_bus *bus) 1451{ 1452 struct pci_controller *phb = pci_bus_to_host(bus); 1453 return phb->node; 1454} 1455EXPORT_SYMBOL(pcibus_to_node); 1456#endif 1457