1/* 2 * PCI Bus Services, see include/linux/pci.h for further explanation. 3 * 4 * Copyright 1993 -- 1997 Drew Eckhardt, Frederic Potter, 5 * David Mosberger-Tang 6 * 7 * Copyright 1997 -- 2000 Martin Mares <mj@ucw.cz> 8 */ 9 10#include <linux/kernel.h> 11#include <linux/delay.h> 12#include <linux/init.h> 13#include <linux/pci.h> 14#include <linux/pm.h> 15#include <linux/slab.h> 16#include <linux/module.h> 17#include <linux/spinlock.h> 18#include <linux/string.h> 19#include <linux/log2.h> 20#include <linux/pci-aspm.h> 21#include <linux/pm_wakeup.h> 22#include <linux/interrupt.h> 23#include <linux/device.h> 24#include <linux/pm_runtime.h> 25#include <asm/setup.h> 26#include "pci.h" 27 28const char *pci_power_names[] = { 29 "error", "D0", "D1", "D2", "D3hot", "D3cold", "unknown", 30}; 31EXPORT_SYMBOL_GPL(pci_power_names); 32 33int isa_dma_bridge_buggy; 34EXPORT_SYMBOL(isa_dma_bridge_buggy); 35 36int pci_pci_problems; 37EXPORT_SYMBOL(pci_pci_problems); 38 39unsigned int pci_pm_d3_delay; 40 41static void pci_dev_d3_sleep(struct pci_dev *dev) 42{ 43 unsigned int delay = dev->d3_delay; 44 45 if (delay < pci_pm_d3_delay) 46 delay = pci_pm_d3_delay; 47 48 msleep(delay); 49} 50 51#ifdef CONFIG_PCI_DOMAINS 52int pci_domains_supported = 1; 53#endif 54 55#define DEFAULT_CARDBUS_IO_SIZE (256) 56#define DEFAULT_CARDBUS_MEM_SIZE (64*1024*1024) 57/* pci=cbmemsize=nnM,cbiosize=nn can override this */ 58unsigned long pci_cardbus_io_size = DEFAULT_CARDBUS_IO_SIZE; 59unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE; 60 61#define DEFAULT_HOTPLUG_IO_SIZE (256) 62#define DEFAULT_HOTPLUG_MEM_SIZE (2*1024*1024) 63/* pci=hpmemsize=nnM,hpiosize=nn can override this */ 64unsigned long pci_hotplug_io_size = DEFAULT_HOTPLUG_IO_SIZE; 65unsigned long pci_hotplug_mem_size = DEFAULT_HOTPLUG_MEM_SIZE; 66 67/* 68 * The default CLS is used if arch didn't set CLS explicitly and not 69 * all pci devices agree on the same value. Arch can override either 70 * the dfl or actual value as it sees fit. Don't forget this is 71 * measured in 32-bit words, not bytes. 72 */ 73u8 pci_dfl_cache_line_size __devinitdata = L1_CACHE_BYTES >> 2; 74u8 pci_cache_line_size; 75 76/** 77 * pci_bus_max_busnr - returns maximum PCI bus number of given bus' children 78 * @bus: pointer to PCI bus structure to search 79 * 80 * Given a PCI bus, returns the highest PCI bus number present in the set 81 * including the given PCI bus and its list of child PCI buses. 82 */ 83unsigned char pci_bus_max_busnr(struct pci_bus* bus) 84{ 85 struct list_head *tmp; 86 unsigned char max, n; 87 88 max = bus->subordinate; 89 list_for_each(tmp, &bus->children) { 90 n = pci_bus_max_busnr(pci_bus_b(tmp)); 91 if(n > max) 92 max = n; 93 } 94 return max; 95} 96EXPORT_SYMBOL_GPL(pci_bus_max_busnr); 97 98#ifdef CONFIG_HAS_IOMEM 99void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar) 100{ 101 /* 102 * Make sure the BAR is actually a memory resource, not an IO resource 103 */ 104 if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) { 105 WARN_ON(1); 106 return NULL; 107 } 108 return ioremap_nocache(pci_resource_start(pdev, bar), 109 pci_resource_len(pdev, bar)); 110} 111EXPORT_SYMBOL_GPL(pci_ioremap_bar); 112#endif 113 114 115#define PCI_FIND_CAP_TTL 48 116 117static int __pci_find_next_cap_ttl(struct pci_bus *bus, unsigned int devfn, 118 u8 pos, int cap, int *ttl) 119{ 120 u8 id; 121 122 while ((*ttl)--) { 123 pci_bus_read_config_byte(bus, devfn, pos, &pos); 124 if (pos < 0x40) 125 break; 126 pos &= ~3; 127 pci_bus_read_config_byte(bus, devfn, pos + PCI_CAP_LIST_ID, 128 &id); 129 if (id == 0xff) 130 break; 131 if (id == cap) 132 return pos; 133 pos += PCI_CAP_LIST_NEXT; 134 } 135 return 0; 136} 137 138static int __pci_find_next_cap(struct pci_bus *bus, unsigned int devfn, 139 u8 pos, int cap) 140{ 141 int ttl = PCI_FIND_CAP_TTL; 142 143 return __pci_find_next_cap_ttl(bus, devfn, pos, cap, &ttl); 144} 145 146int pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap) 147{ 148 return __pci_find_next_cap(dev->bus, dev->devfn, 149 pos + PCI_CAP_LIST_NEXT, cap); 150} 151EXPORT_SYMBOL_GPL(pci_find_next_capability); 152 153static int __pci_bus_find_cap_start(struct pci_bus *bus, 154 unsigned int devfn, u8 hdr_type) 155{ 156 u16 status; 157 158 pci_bus_read_config_word(bus, devfn, PCI_STATUS, &status); 159 if (!(status & PCI_STATUS_CAP_LIST)) 160 return 0; 161 162 switch (hdr_type) { 163 case PCI_HEADER_TYPE_NORMAL: 164 case PCI_HEADER_TYPE_BRIDGE: 165 return PCI_CAPABILITY_LIST; 166 case PCI_HEADER_TYPE_CARDBUS: 167 return PCI_CB_CAPABILITY_LIST; 168 default: 169 return 0; 170 } 171 172 return 0; 173} 174 175/** 176 * pci_find_capability - query for devices' capabilities 177 * @dev: PCI device to query 178 * @cap: capability code 179 * 180 * Tell if a device supports a given PCI capability. 181 * Returns the address of the requested capability structure within the 182 * device's PCI configuration space or 0 in case the device does not 183 * support it. Possible values for @cap: 184 * 185 * %PCI_CAP_ID_PM Power Management 186 * %PCI_CAP_ID_AGP Accelerated Graphics Port 187 * %PCI_CAP_ID_VPD Vital Product Data 188 * %PCI_CAP_ID_SLOTID Slot Identification 189 * %PCI_CAP_ID_MSI Message Signalled Interrupts 190 * %PCI_CAP_ID_CHSWP CompactPCI HotSwap 191 * %PCI_CAP_ID_PCIX PCI-X 192 * %PCI_CAP_ID_EXP PCI Express 193 */ 194int pci_find_capability(struct pci_dev *dev, int cap) 195{ 196 int pos; 197 198 pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type); 199 if (pos) 200 pos = __pci_find_next_cap(dev->bus, dev->devfn, pos, cap); 201 202 return pos; 203} 204 205/** 206 * pci_bus_find_capability - query for devices' capabilities 207 * @bus: the PCI bus to query 208 * @devfn: PCI device to query 209 * @cap: capability code 210 * 211 * Like pci_find_capability() but works for pci devices that do not have a 212 * pci_dev structure set up yet. 213 * 214 * Returns the address of the requested capability structure within the 215 * device's PCI configuration space or 0 in case the device does not 216 * support it. 217 */ 218int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap) 219{ 220 int pos; 221 u8 hdr_type; 222 223 pci_bus_read_config_byte(bus, devfn, PCI_HEADER_TYPE, &hdr_type); 224 225 pos = __pci_bus_find_cap_start(bus, devfn, hdr_type & 0x7f); 226 if (pos) 227 pos = __pci_find_next_cap(bus, devfn, pos, cap); 228 229 return pos; 230} 231 232/** 233 * pci_find_ext_capability - Find an extended capability 234 * @dev: PCI device to query 235 * @cap: capability code 236 * 237 * Returns the address of the requested extended capability structure 238 * within the device's PCI configuration space or 0 if the device does 239 * not support it. Possible values for @cap: 240 * 241 * %PCI_EXT_CAP_ID_ERR Advanced Error Reporting 242 * %PCI_EXT_CAP_ID_VC Virtual Channel 243 * %PCI_EXT_CAP_ID_DSN Device Serial Number 244 * %PCI_EXT_CAP_ID_PWR Power Budgeting 245 */ 246int pci_find_ext_capability(struct pci_dev *dev, int cap) 247{ 248 u32 header; 249 int ttl; 250 int pos = PCI_CFG_SPACE_SIZE; 251 252 /* minimum 8 bytes per capability */ 253 ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8; 254 255 if (dev->cfg_size <= PCI_CFG_SPACE_SIZE) 256 return 0; 257 258 if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL) 259 return 0; 260 261 /* 262 * If we have no capabilities, this is indicated by cap ID, 263 * cap version and next pointer all being 0. 264 */ 265 if (header == 0) 266 return 0; 267 268 while (ttl-- > 0) { 269 if (PCI_EXT_CAP_ID(header) == cap) 270 return pos; 271 272 pos = PCI_EXT_CAP_NEXT(header); 273 if (pos < PCI_CFG_SPACE_SIZE) 274 break; 275 276 if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL) 277 break; 278 } 279 280 return 0; 281} 282EXPORT_SYMBOL_GPL(pci_find_ext_capability); 283 284/** 285 * pci_bus_find_ext_capability - find an extended capability 286 * @bus: the PCI bus to query 287 * @devfn: PCI device to query 288 * @cap: capability code 289 * 290 * Like pci_find_ext_capability() but works for pci devices that do not have a 291 * pci_dev structure set up yet. 292 * 293 * Returns the address of the requested capability structure within the 294 * device's PCI configuration space or 0 in case the device does not 295 * support it. 296 */ 297int pci_bus_find_ext_capability(struct pci_bus *bus, unsigned int devfn, 298 int cap) 299{ 300 u32 header; 301 int ttl; 302 int pos = PCI_CFG_SPACE_SIZE; 303 304 /* minimum 8 bytes per capability */ 305 ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8; 306 307 if (!pci_bus_read_config_dword(bus, devfn, pos, &header)) 308 return 0; 309 if (header == 0xffffffff || header == 0) 310 return 0; 311 312 while (ttl-- > 0) { 313 if (PCI_EXT_CAP_ID(header) == cap) 314 return pos; 315 316 pos = PCI_EXT_CAP_NEXT(header); 317 if (pos < PCI_CFG_SPACE_SIZE) 318 break; 319 320 if (!pci_bus_read_config_dword(bus, devfn, pos, &header)) 321 break; 322 } 323 324 return 0; 325} 326 327static int __pci_find_next_ht_cap(struct pci_dev *dev, int pos, int ht_cap) 328{ 329 int rc, ttl = PCI_FIND_CAP_TTL; 330 u8 cap, mask; 331 332 if (ht_cap == HT_CAPTYPE_SLAVE || ht_cap == HT_CAPTYPE_HOST) 333 mask = HT_3BIT_CAP_MASK; 334 else 335 mask = HT_5BIT_CAP_MASK; 336 337 pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn, pos, 338 PCI_CAP_ID_HT, &ttl); 339 while (pos) { 340 rc = pci_read_config_byte(dev, pos + 3, &cap); 341 if (rc != PCIBIOS_SUCCESSFUL) 342 return 0; 343 344 if ((cap & mask) == ht_cap) 345 return pos; 346 347 pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn, 348 pos + PCI_CAP_LIST_NEXT, 349 PCI_CAP_ID_HT, &ttl); 350 } 351 352 return 0; 353} 354/** 355 * pci_find_next_ht_capability - query a device's Hypertransport capabilities 356 * @dev: PCI device to query 357 * @pos: Position from which to continue searching 358 * @ht_cap: Hypertransport capability code 359 * 360 * To be used in conjunction with pci_find_ht_capability() to search for 361 * all capabilities matching @ht_cap. @pos should always be a value returned 362 * from pci_find_ht_capability(). 363 * 364 * NB. To be 100% safe against broken PCI devices, the caller should take 365 * steps to avoid an infinite loop. 366 */ 367int pci_find_next_ht_capability(struct pci_dev *dev, int pos, int ht_cap) 368{ 369 return __pci_find_next_ht_cap(dev, pos + PCI_CAP_LIST_NEXT, ht_cap); 370} 371EXPORT_SYMBOL_GPL(pci_find_next_ht_capability); 372 373/** 374 * pci_find_ht_capability - query a device's Hypertransport capabilities 375 * @dev: PCI device to query 376 * @ht_cap: Hypertransport capability code 377 * 378 * Tell if a device supports a given Hypertransport capability. 379 * Returns an address within the device's PCI configuration space 380 * or 0 in case the device does not support the request capability. 381 * The address points to the PCI capability, of type PCI_CAP_ID_HT, 382 * which has a Hypertransport capability matching @ht_cap. 383 */ 384int pci_find_ht_capability(struct pci_dev *dev, int ht_cap) 385{ 386 int pos; 387 388 pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type); 389 if (pos) 390 pos = __pci_find_next_ht_cap(dev, pos, ht_cap); 391 392 return pos; 393} 394EXPORT_SYMBOL_GPL(pci_find_ht_capability); 395 396/** 397 * pci_find_parent_resource - return resource region of parent bus of given region 398 * @dev: PCI device structure contains resources to be searched 399 * @res: child resource record for which parent is sought 400 * 401 * For given resource region of given device, return the resource 402 * region of parent bus the given region is contained in or where 403 * it should be allocated from. 404 */ 405struct resource * 406pci_find_parent_resource(const struct pci_dev *dev, struct resource *res) 407{ 408 const struct pci_bus *bus = dev->bus; 409 int i; 410 struct resource *best = NULL, *r; 411 412 pci_bus_for_each_resource(bus, r, i) { 413 if (!r) 414 continue; 415 if (res->start && !(res->start >= r->start && res->end <= r->end)) 416 continue; /* Not contained */ 417 if ((res->flags ^ r->flags) & (IORESOURCE_IO | IORESOURCE_MEM)) 418 continue; /* Wrong type */ 419 if (!((res->flags ^ r->flags) & IORESOURCE_PREFETCH)) 420 return r; /* Exact match */ 421 /* We can't insert a non-prefetch resource inside a prefetchable parent .. */ 422 if (r->flags & IORESOURCE_PREFETCH) 423 continue; 424 /* .. but we can put a prefetchable resource inside a non-prefetchable one */ 425 if (!best) 426 best = r; 427 } 428 return best; 429} 430 431/** 432 * pci_restore_bars - restore a devices BAR values (e.g. after wake-up) 433 * @dev: PCI device to have its BARs restored 434 * 435 * Restore the BAR values for a given device, so as to make it 436 * accessible by its driver. 437 */ 438static void 439pci_restore_bars(struct pci_dev *dev) 440{ 441 int i; 442 443 for (i = 0; i < PCI_BRIDGE_RESOURCES; i++) 444 pci_update_resource(dev, i); 445} 446 447static struct pci_platform_pm_ops *pci_platform_pm; 448 449int pci_set_platform_pm(struct pci_platform_pm_ops *ops) 450{ 451 if (!ops->is_manageable || !ops->set_state || !ops->choose_state 452 || !ops->sleep_wake || !ops->can_wakeup) 453 return -EINVAL; 454 pci_platform_pm = ops; 455 return 0; 456} 457 458static inline bool platform_pci_power_manageable(struct pci_dev *dev) 459{ 460 return pci_platform_pm ? pci_platform_pm->is_manageable(dev) : false; 461} 462 463static inline int platform_pci_set_power_state(struct pci_dev *dev, 464 pci_power_t t) 465{ 466 return pci_platform_pm ? pci_platform_pm->set_state(dev, t) : -ENOSYS; 467} 468 469static inline pci_power_t platform_pci_choose_state(struct pci_dev *dev) 470{ 471 return pci_platform_pm ? 472 pci_platform_pm->choose_state(dev) : PCI_POWER_ERROR; 473} 474 475static inline bool platform_pci_can_wakeup(struct pci_dev *dev) 476{ 477 return pci_platform_pm ? pci_platform_pm->can_wakeup(dev) : false; 478} 479 480static inline int platform_pci_sleep_wake(struct pci_dev *dev, bool enable) 481{ 482 return pci_platform_pm ? 483 pci_platform_pm->sleep_wake(dev, enable) : -ENODEV; 484} 485 486static inline int platform_pci_run_wake(struct pci_dev *dev, bool enable) 487{ 488 return pci_platform_pm ? 489 pci_platform_pm->run_wake(dev, enable) : -ENODEV; 490} 491 492/** 493 * pci_raw_set_power_state - Use PCI PM registers to set the power state of 494 * given PCI device 495 * @dev: PCI device to handle. 496 * @state: PCI power state (D0, D1, D2, D3hot) to put the device into. 497 * 498 * RETURN VALUE: 499 * -EINVAL if the requested state is invalid. 500 * -EIO if device does not support PCI PM or its PM capabilities register has a 501 * wrong version, or device doesn't support the requested state. 502 * 0 if device already is in the requested state. 503 * 0 if device's power state has been successfully changed. 504 */ 505static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state) 506{ 507 u16 pmcsr; 508 bool need_restore = false; 509 510 /* Check if we're already there */ 511 if (dev->current_state == state) 512 return 0; 513 514 if (!dev->pm_cap) 515 return -EIO; 516 517 if (state < PCI_D0 || state > PCI_D3hot) 518 return -EINVAL; 519 520 /* Validate current state: 521 * Can enter D0 from any state, but if we can only go deeper 522 * to sleep if we're already in a low power state 523 */ 524 if (state != PCI_D0 && dev->current_state <= PCI_D3cold 525 && dev->current_state > state) { 526 dev_err(&dev->dev, "invalid power transition " 527 "(from state %d to %d)\n", dev->current_state, state); 528 return -EINVAL; 529 } 530 531 /* check if this device supports the desired state */ 532 if ((state == PCI_D1 && !dev->d1_support) 533 || (state == PCI_D2 && !dev->d2_support)) 534 return -EIO; 535 536 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr); 537 538 /* If we're (effectively) in D3, force entire word to 0. 539 * This doesn't affect PME_Status, disables PME_En, and 540 * sets PowerState to 0. 541 */ 542 switch (dev->current_state) { 543 case PCI_D0: 544 case PCI_D1: 545 case PCI_D2: 546 pmcsr &= ~PCI_PM_CTRL_STATE_MASK; 547 pmcsr |= state; 548 break; 549 case PCI_D3hot: 550 case PCI_D3cold: 551 case PCI_UNKNOWN: /* Boot-up */ 552 if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot 553 && !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET)) 554 need_restore = true; 555 /* Fall-through: force to D0 */ 556 default: 557 pmcsr = 0; 558 break; 559 } 560 561 /* enter specified state */ 562 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr); 563 564 /* Mandatory power management transition delays */ 565 /* see PCI PM 1.1 5.6.1 table 18 */ 566 if (state == PCI_D3hot || dev->current_state == PCI_D3hot) 567 pci_dev_d3_sleep(dev); 568 else if (state == PCI_D2 || dev->current_state == PCI_D2) 569 udelay(PCI_PM_D2_DELAY); 570 571 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr); 572 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK); 573 if (dev->current_state != state && printk_ratelimit()) 574 dev_info(&dev->dev, "Refused to change power state, " 575 "currently in D%d\n", dev->current_state); 576 577 /* According to section 5.4.1 of the "PCI BUS POWER MANAGEMENT 578 * INTERFACE SPECIFICATION, REV. 1.2", a device transitioning 579 * from D3hot to D0 _may_ perform an internal reset, thereby 580 * going to "D0 Uninitialized" rather than "D0 Initialized". 581 * For example, at least some versions of the 3c905B and the 582 * 3c556B exhibit this behaviour. 583 * 584 * At least some laptop BIOSen (e.g. the Thinkpad T21) leave 585 * devices in a D3hot state at boot. Consequently, we need to 586 * restore at least the BARs so that the device will be 587 * accessible to its driver. 588 */ 589 if (need_restore) 590 pci_restore_bars(dev); 591 592 if (dev->bus->self) 593 pcie_aspm_pm_state_change(dev->bus->self); 594 595 return 0; 596} 597 598/** 599 * pci_update_current_state - Read PCI power state of given device from its 600 * PCI PM registers and cache it 601 * @dev: PCI device to handle. 602 * @state: State to cache in case the device doesn't have the PM capability 603 */ 604void pci_update_current_state(struct pci_dev *dev, pci_power_t state) 605{ 606 if (dev->pm_cap) { 607 u16 pmcsr; 608 609 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr); 610 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK); 611 } else { 612 dev->current_state = state; 613 } 614} 615 616/** 617 * pci_platform_power_transition - Use platform to change device power state 618 * @dev: PCI device to handle. 619 * @state: State to put the device into. 620 */ 621static int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state) 622{ 623 int error; 624 625 if (platform_pci_power_manageable(dev)) { 626 error = platform_pci_set_power_state(dev, state); 627 if (!error) 628 pci_update_current_state(dev, state); 629 } else { 630 error = -ENODEV; 631 /* Fall back to PCI_D0 if native PM is not supported */ 632 if (!dev->pm_cap) 633 dev->current_state = PCI_D0; 634 } 635 636 return error; 637} 638 639/** 640 * __pci_start_power_transition - Start power transition of a PCI device 641 * @dev: PCI device to handle. 642 * @state: State to put the device into. 643 */ 644static void __pci_start_power_transition(struct pci_dev *dev, pci_power_t state) 645{ 646 if (state == PCI_D0) 647 pci_platform_power_transition(dev, PCI_D0); 648} 649 650/** 651 * __pci_complete_power_transition - Complete power transition of a PCI device 652 * @dev: PCI device to handle. 653 * @state: State to put the device into. 654 * 655 * This function should not be called directly by device drivers. 656 */ 657int __pci_complete_power_transition(struct pci_dev *dev, pci_power_t state) 658{ 659 return state >= PCI_D0 ? 660 pci_platform_power_transition(dev, state) : -EINVAL; 661} 662EXPORT_SYMBOL_GPL(__pci_complete_power_transition); 663 664/** 665 * pci_set_power_state - Set the power state of a PCI device 666 * @dev: PCI device to handle. 667 * @state: PCI power state (D0, D1, D2, D3hot) to put the device into. 668 * 669 * Transition a device to a new power state, using the platform firmware and/or 670 * the device's PCI PM registers. 671 * 672 * RETURN VALUE: 673 * -EINVAL if the requested state is invalid. 674 * -EIO if device does not support PCI PM or its PM capabilities register has a 675 * wrong version, or device doesn't support the requested state. 676 * 0 if device already is in the requested state. 677 * 0 if device's power state has been successfully changed. 678 */ 679int pci_set_power_state(struct pci_dev *dev, pci_power_t state) 680{ 681 int error; 682 683 /* bound the state we're entering */ 684 if (state > PCI_D3hot) 685 state = PCI_D3hot; 686 else if (state < PCI_D0) 687 state = PCI_D0; 688 else if ((state == PCI_D1 || state == PCI_D2) && pci_no_d1d2(dev)) 689 /* 690 * If the device or the parent bridge do not support PCI PM, 691 * ignore the request if we're doing anything other than putting 692 * it into D0 (which would only happen on boot). 693 */ 694 return 0; 695 696 __pci_start_power_transition(dev, state); 697 698 /* This device is quirked not to be put into D3, so 699 don't put it in D3 */ 700 if (state == PCI_D3hot && (dev->dev_flags & PCI_DEV_FLAGS_NO_D3)) 701 return 0; 702 703 error = pci_raw_set_power_state(dev, state); 704 705 if (!__pci_complete_power_transition(dev, state)) 706 error = 0; 707 708 return error; 709} 710 711/** 712 * pci_choose_state - Choose the power state of a PCI device 713 * @dev: PCI device to be suspended 714 * @state: target sleep state for the whole system. This is the value 715 * that is passed to suspend() function. 716 * 717 * Returns PCI power state suitable for given device and given system 718 * message. 719 */ 720 721pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state) 722{ 723 pci_power_t ret; 724 725 if (!pci_find_capability(dev, PCI_CAP_ID_PM)) 726 return PCI_D0; 727 728 ret = platform_pci_choose_state(dev); 729 if (ret != PCI_POWER_ERROR) 730 return ret; 731 732 switch (state.event) { 733 case PM_EVENT_ON: 734 return PCI_D0; 735 case PM_EVENT_FREEZE: 736 case PM_EVENT_PRETHAW: 737 /* REVISIT both freeze and pre-thaw "should" use D0 */ 738 case PM_EVENT_SUSPEND: 739 case PM_EVENT_HIBERNATE: 740 return PCI_D3hot; 741 default: 742 dev_info(&dev->dev, "unrecognized suspend event %d\n", 743 state.event); 744 BUG(); 745 } 746 return PCI_D0; 747} 748 749EXPORT_SYMBOL(pci_choose_state); 750 751#define PCI_EXP_SAVE_REGS 7 752 753#define pcie_cap_has_devctl(type, flags) 1 754#define pcie_cap_has_lnkctl(type, flags) \ 755 ((flags & PCI_EXP_FLAGS_VERS) > 1 || \ 756 (type == PCI_EXP_TYPE_ROOT_PORT || \ 757 type == PCI_EXP_TYPE_ENDPOINT || \ 758 type == PCI_EXP_TYPE_LEG_END)) 759#define pcie_cap_has_sltctl(type, flags) \ 760 ((flags & PCI_EXP_FLAGS_VERS) > 1 || \ 761 ((type == PCI_EXP_TYPE_ROOT_PORT) || \ 762 (type == PCI_EXP_TYPE_DOWNSTREAM && \ 763 (flags & PCI_EXP_FLAGS_SLOT)))) 764#define pcie_cap_has_rtctl(type, flags) \ 765 ((flags & PCI_EXP_FLAGS_VERS) > 1 || \ 766 (type == PCI_EXP_TYPE_ROOT_PORT || \ 767 type == PCI_EXP_TYPE_RC_EC)) 768#define pcie_cap_has_devctl2(type, flags) \ 769 ((flags & PCI_EXP_FLAGS_VERS) > 1) 770#define pcie_cap_has_lnkctl2(type, flags) \ 771 ((flags & PCI_EXP_FLAGS_VERS) > 1) 772#define pcie_cap_has_sltctl2(type, flags) \ 773 ((flags & PCI_EXP_FLAGS_VERS) > 1) 774 775static int pci_save_pcie_state(struct pci_dev *dev) 776{ 777 int pos, i = 0; 778 struct pci_cap_saved_state *save_state; 779 u16 *cap; 780 u16 flags; 781 782 pos = pci_pcie_cap(dev); 783 if (!pos) 784 return 0; 785 786 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP); 787 if (!save_state) { 788 dev_err(&dev->dev, "buffer not found in %s\n", __func__); 789 return -ENOMEM; 790 } 791 cap = (u16 *)&save_state->data[0]; 792 793 pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &flags); 794 795 if (pcie_cap_has_devctl(dev->pcie_type, flags)) 796 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &cap[i++]); 797 if (pcie_cap_has_lnkctl(dev->pcie_type, flags)) 798 pci_read_config_word(dev, pos + PCI_EXP_LNKCTL, &cap[i++]); 799 if (pcie_cap_has_sltctl(dev->pcie_type, flags)) 800 pci_read_config_word(dev, pos + PCI_EXP_SLTCTL, &cap[i++]); 801 if (pcie_cap_has_rtctl(dev->pcie_type, flags)) 802 pci_read_config_word(dev, pos + PCI_EXP_RTCTL, &cap[i++]); 803 if (pcie_cap_has_devctl2(dev->pcie_type, flags)) 804 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &cap[i++]); 805 if (pcie_cap_has_lnkctl2(dev->pcie_type, flags)) 806 pci_read_config_word(dev, pos + PCI_EXP_LNKCTL2, &cap[i++]); 807 if (pcie_cap_has_sltctl2(dev->pcie_type, flags)) 808 pci_read_config_word(dev, pos + PCI_EXP_SLTCTL2, &cap[i++]); 809 810 return 0; 811} 812 813static void pci_restore_pcie_state(struct pci_dev *dev) 814{ 815 int i = 0, pos; 816 struct pci_cap_saved_state *save_state; 817 u16 *cap; 818 u16 flags; 819 820 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP); 821 pos = pci_find_capability(dev, PCI_CAP_ID_EXP); 822 if (!save_state || pos <= 0) 823 return; 824 cap = (u16 *)&save_state->data[0]; 825 826 pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &flags); 827 828 if (pcie_cap_has_devctl(dev->pcie_type, flags)) 829 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, cap[i++]); 830 if (pcie_cap_has_lnkctl(dev->pcie_type, flags)) 831 pci_write_config_word(dev, pos + PCI_EXP_LNKCTL, cap[i++]); 832 if (pcie_cap_has_sltctl(dev->pcie_type, flags)) 833 pci_write_config_word(dev, pos + PCI_EXP_SLTCTL, cap[i++]); 834 if (pcie_cap_has_rtctl(dev->pcie_type, flags)) 835 pci_write_config_word(dev, pos + PCI_EXP_RTCTL, cap[i++]); 836 if (pcie_cap_has_devctl2(dev->pcie_type, flags)) 837 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, cap[i++]); 838 if (pcie_cap_has_lnkctl2(dev->pcie_type, flags)) 839 pci_write_config_word(dev, pos + PCI_EXP_LNKCTL2, cap[i++]); 840 if (pcie_cap_has_sltctl2(dev->pcie_type, flags)) 841 pci_write_config_word(dev, pos + PCI_EXP_SLTCTL2, cap[i++]); 842} 843 844 845static int pci_save_pcix_state(struct pci_dev *dev) 846{ 847 int pos; 848 struct pci_cap_saved_state *save_state; 849 850 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX); 851 if (pos <= 0) 852 return 0; 853 854 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX); 855 if (!save_state) { 856 dev_err(&dev->dev, "buffer not found in %s\n", __func__); 857 return -ENOMEM; 858 } 859 860 pci_read_config_word(dev, pos + PCI_X_CMD, (u16 *)save_state->data); 861 862 return 0; 863} 864 865static void pci_restore_pcix_state(struct pci_dev *dev) 866{ 867 int i = 0, pos; 868 struct pci_cap_saved_state *save_state; 869 u16 *cap; 870 871 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX); 872 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX); 873 if (!save_state || pos <= 0) 874 return; 875 cap = (u16 *)&save_state->data[0]; 876 877 pci_write_config_word(dev, pos + PCI_X_CMD, cap[i++]); 878} 879 880 881/** 882 * pci_save_state - save the PCI configuration space of a device before suspending 883 * @dev: - PCI device that we're dealing with 884 */ 885int 886pci_save_state(struct pci_dev *dev) 887{ 888 int i; 889 for (i = 0; i < 16; i++) 890 pci_read_config_dword(dev, i * 4, &dev->saved_config_space[i]); 891 dev->state_saved = true; 892 if ((i = pci_save_pcie_state(dev)) != 0) 893 return i; 894 if ((i = pci_save_pcix_state(dev)) != 0) 895 return i; 896 return 0; 897} 898 899/** 900 * pci_restore_state - Restore the saved state of a PCI device 901 * @dev: - PCI device that we're dealing with 902 */ 903int 904pci_restore_state(struct pci_dev *dev) 905{ 906 int i; 907 u32 val; 908 909 if (!dev->state_saved) 910 return 0; 911 912 /* PCI Express register must be restored first */ 913 pci_restore_pcie_state(dev); 914 915 /* 916 * The Base Address register should be programmed before the command 917 * register(s) 918 */ 919 for (i = 15; i >= 0; i--) { 920 pci_read_config_dword(dev, i * 4, &val); 921 if (val != dev->saved_config_space[i]) { 922 dev_printk(KERN_DEBUG, &dev->dev, "restoring config " 923 "space at offset %#x (was %#x, writing %#x)\n", 924 i, val, (int)dev->saved_config_space[i]); 925 pci_write_config_dword(dev,i * 4, 926 dev->saved_config_space[i]); 927 } 928 } 929 pci_restore_pcix_state(dev); 930 pci_restore_msi_state(dev); 931 pci_restore_iov_state(dev); 932 933 dev->state_saved = false; 934 935 return 0; 936} 937 938static int do_pci_enable_device(struct pci_dev *dev, int bars) 939{ 940 int err; 941 942 err = pci_set_power_state(dev, PCI_D0); 943 if (err < 0 && err != -EIO) 944 return err; 945 err = pcibios_enable_device(dev, bars); 946 if (err < 0) 947 return err; 948 pci_fixup_device(pci_fixup_enable, dev); 949 950 return 0; 951} 952 953/** 954 * pci_reenable_device - Resume abandoned device 955 * @dev: PCI device to be resumed 956 * 957 * Note this function is a backend of pci_default_resume and is not supposed 958 * to be called by normal code, write proper resume handler and use it instead. 959 */ 960int pci_reenable_device(struct pci_dev *dev) 961{ 962 if (pci_is_enabled(dev)) 963 return do_pci_enable_device(dev, (1 << PCI_NUM_RESOURCES) - 1); 964 return 0; 965} 966 967static int __pci_enable_device_flags(struct pci_dev *dev, 968 resource_size_t flags) 969{ 970 int err; 971 int i, bars = 0; 972 973 if (atomic_add_return(1, &dev->enable_cnt) > 1) 974 return 0; /* already enabled */ 975 976 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) 977 if (dev->resource[i].flags & flags) 978 bars |= (1 << i); 979 980 err = do_pci_enable_device(dev, bars); 981 if (err < 0) 982 atomic_dec(&dev->enable_cnt); 983 return err; 984} 985 986/** 987 * pci_enable_device_io - Initialize a device for use with IO space 988 * @dev: PCI device to be initialized 989 * 990 * Initialize device before it's used by a driver. Ask low-level code 991 * to enable I/O resources. Wake up the device if it was suspended. 992 * Beware, this function can fail. 993 */ 994int pci_enable_device_io(struct pci_dev *dev) 995{ 996 return __pci_enable_device_flags(dev, IORESOURCE_IO); 997} 998 999/** 1000 * pci_enable_device_mem - Initialize a device for use with Memory space 1001 * @dev: PCI device to be initialized 1002 * 1003 * Initialize device before it's used by a driver. Ask low-level code 1004 * to enable Memory resources. Wake up the device if it was suspended. 1005 * Beware, this function can fail. 1006 */ 1007int pci_enable_device_mem(struct pci_dev *dev) 1008{ 1009 return __pci_enable_device_flags(dev, IORESOURCE_MEM); 1010} 1011 1012/** 1013 * pci_enable_device - Initialize device before it's used by a driver. 1014 * @dev: PCI device to be initialized 1015 * 1016 * Initialize device before it's used by a driver. Ask low-level code 1017 * to enable I/O and memory. Wake up the device if it was suspended. 1018 * Beware, this function can fail. 1019 * 1020 * Note we don't actually enable the device many times if we call 1021 * this function repeatedly (we just increment the count). 1022 */ 1023int pci_enable_device(struct pci_dev *dev) 1024{ 1025 return __pci_enable_device_flags(dev, IORESOURCE_MEM | IORESOURCE_IO); 1026} 1027 1028/* 1029 * Managed PCI resources. This manages device on/off, intx/msi/msix 1030 * on/off and BAR regions. pci_dev itself records msi/msix status, so 1031 * there's no need to track it separately. pci_devres is initialized 1032 * when a device is enabled using managed PCI device enable interface. 1033 */ 1034struct pci_devres { 1035 unsigned int enabled:1; 1036 unsigned int pinned:1; 1037 unsigned int orig_intx:1; 1038 unsigned int restore_intx:1; 1039 u32 region_mask; 1040}; 1041 1042static void pcim_release(struct device *gendev, void *res) 1043{ 1044 struct pci_dev *dev = container_of(gendev, struct pci_dev, dev); 1045 struct pci_devres *this = res; 1046 int i; 1047 1048 if (dev->msi_enabled) 1049 pci_disable_msi(dev); 1050 if (dev->msix_enabled) 1051 pci_disable_msix(dev); 1052 1053 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) 1054 if (this->region_mask & (1 << i)) 1055 pci_release_region(dev, i); 1056 1057 if (this->restore_intx) 1058 pci_intx(dev, this->orig_intx); 1059 1060 if (this->enabled && !this->pinned) 1061 pci_disable_device(dev); 1062} 1063 1064static struct pci_devres * get_pci_dr(struct pci_dev *pdev) 1065{ 1066 struct pci_devres *dr, *new_dr; 1067 1068 dr = devres_find(&pdev->dev, pcim_release, NULL, NULL); 1069 if (dr) 1070 return dr; 1071 1072 new_dr = devres_alloc(pcim_release, sizeof(*new_dr), GFP_KERNEL); 1073 if (!new_dr) 1074 return NULL; 1075 return devres_get(&pdev->dev, new_dr, NULL, NULL); 1076} 1077 1078static struct pci_devres * find_pci_dr(struct pci_dev *pdev) 1079{ 1080 if (pci_is_managed(pdev)) 1081 return devres_find(&pdev->dev, pcim_release, NULL, NULL); 1082 return NULL; 1083} 1084 1085/** 1086 * pcim_enable_device - Managed pci_enable_device() 1087 * @pdev: PCI device to be initialized 1088 * 1089 * Managed pci_enable_device(). 1090 */ 1091int pcim_enable_device(struct pci_dev *pdev) 1092{ 1093 struct pci_devres *dr; 1094 int rc; 1095 1096 dr = get_pci_dr(pdev); 1097 if (unlikely(!dr)) 1098 return -ENOMEM; 1099 if (dr->enabled) 1100 return 0; 1101 1102 rc = pci_enable_device(pdev); 1103 if (!rc) { 1104 pdev->is_managed = 1; 1105 dr->enabled = 1; 1106 } 1107 return rc; 1108} 1109 1110/** 1111 * pcim_pin_device - Pin managed PCI device 1112 * @pdev: PCI device to pin 1113 * 1114 * Pin managed PCI device @pdev. Pinned device won't be disabled on 1115 * driver detach. @pdev must have been enabled with 1116 * pcim_enable_device(). 1117 */ 1118void pcim_pin_device(struct pci_dev *pdev) 1119{ 1120 struct pci_devres *dr; 1121 1122 dr = find_pci_dr(pdev); 1123 WARN_ON(!dr || !dr->enabled); 1124 if (dr) 1125 dr->pinned = 1; 1126} 1127 1128/** 1129 * pcibios_disable_device - disable arch specific PCI resources for device dev 1130 * @dev: the PCI device to disable 1131 * 1132 * Disables architecture specific PCI resources for the device. This 1133 * is the default implementation. Architecture implementations can 1134 * override this. 1135 */ 1136void __attribute__ ((weak)) pcibios_disable_device (struct pci_dev *dev) {} 1137 1138static void do_pci_disable_device(struct pci_dev *dev) 1139{ 1140 u16 pci_command; 1141 1142 pci_read_config_word(dev, PCI_COMMAND, &pci_command); 1143 if (pci_command & PCI_COMMAND_MASTER) { 1144 pci_command &= ~PCI_COMMAND_MASTER; 1145 pci_write_config_word(dev, PCI_COMMAND, pci_command); 1146 } 1147 1148 pcibios_disable_device(dev); 1149} 1150 1151/** 1152 * pci_disable_enabled_device - Disable device without updating enable_cnt 1153 * @dev: PCI device to disable 1154 * 1155 * NOTE: This function is a backend of PCI power management routines and is 1156 * not supposed to be called drivers. 1157 */ 1158void pci_disable_enabled_device(struct pci_dev *dev) 1159{ 1160 if (pci_is_enabled(dev)) 1161 do_pci_disable_device(dev); 1162} 1163 1164/** 1165 * pci_disable_device - Disable PCI device after use 1166 * @dev: PCI device to be disabled 1167 * 1168 * Signal to the system that the PCI device is not in use by the system 1169 * anymore. This only involves disabling PCI bus-mastering, if active. 1170 * 1171 * Note we don't actually disable the device until all callers of 1172 * pci_enable_device() have called pci_disable_device(). 1173 */ 1174void 1175pci_disable_device(struct pci_dev *dev) 1176{ 1177 struct pci_devres *dr; 1178 1179 dr = find_pci_dr(dev); 1180 if (dr) 1181 dr->enabled = 0; 1182 1183 if (atomic_sub_return(1, &dev->enable_cnt) != 0) 1184 return; 1185 1186 do_pci_disable_device(dev); 1187 1188 dev->is_busmaster = 0; 1189} 1190 1191/** 1192 * pcibios_set_pcie_reset_state - set reset state for device dev 1193 * @dev: the PCIe device reset 1194 * @state: Reset state to enter into 1195 * 1196 * 1197 * Sets the PCIe reset state for the device. This is the default 1198 * implementation. Architecture implementations can override this. 1199 */ 1200int __attribute__ ((weak)) pcibios_set_pcie_reset_state(struct pci_dev *dev, 1201 enum pcie_reset_state state) 1202{ 1203 return -EINVAL; 1204} 1205 1206/** 1207 * pci_set_pcie_reset_state - set reset state for device dev 1208 * @dev: the PCIe device reset 1209 * @state: Reset state to enter into 1210 * 1211 * 1212 * Sets the PCI reset state for the device. 1213 */ 1214int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state) 1215{ 1216 return pcibios_set_pcie_reset_state(dev, state); 1217} 1218 1219/** 1220 * pci_check_pme_status - Check if given device has generated PME. 1221 * @dev: Device to check. 1222 * 1223 * Check the PME status of the device and if set, clear it and clear PME enable 1224 * (if set). Return 'true' if PME status and PME enable were both set or 1225 * 'false' otherwise. 1226 */ 1227bool pci_check_pme_status(struct pci_dev *dev) 1228{ 1229 int pmcsr_pos; 1230 u16 pmcsr; 1231 bool ret = false; 1232 1233 if (!dev->pm_cap) 1234 return false; 1235 1236 pmcsr_pos = dev->pm_cap + PCI_PM_CTRL; 1237 pci_read_config_word(dev, pmcsr_pos, &pmcsr); 1238 if (!(pmcsr & PCI_PM_CTRL_PME_STATUS)) 1239 return false; 1240 1241 /* Clear PME status. */ 1242 pmcsr |= PCI_PM_CTRL_PME_STATUS; 1243 if (pmcsr & PCI_PM_CTRL_PME_ENABLE) { 1244 /* Disable PME to avoid interrupt flood. */ 1245 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE; 1246 ret = true; 1247 } 1248 1249 pci_write_config_word(dev, pmcsr_pos, pmcsr); 1250 1251 return ret; 1252} 1253 1254/* 1255 * Time to wait before the system can be put into a sleep state after reporting 1256 * a wakeup event signaled by a PCI device. 1257 */ 1258#define PCI_WAKEUP_COOLDOWN 100 1259 1260/** 1261 * pci_wakeup_event - Report a wakeup event related to a given PCI device. 1262 * @dev: Device to report the wakeup event for. 1263 */ 1264void pci_wakeup_event(struct pci_dev *dev) 1265{ 1266 if (device_may_wakeup(&dev->dev)) 1267 pm_wakeup_event(&dev->dev, PCI_WAKEUP_COOLDOWN); 1268} 1269 1270/** 1271 * pci_pme_wakeup - Wake up a PCI device if its PME Status bit is set. 1272 * @dev: Device to handle. 1273 * @ign: Ignored. 1274 * 1275 * Check if @dev has generated PME and queue a resume request for it in that 1276 * case. 1277 */ 1278static int pci_pme_wakeup(struct pci_dev *dev, void *ign) 1279{ 1280 if (pci_check_pme_status(dev)) { 1281 pm_request_resume(&dev->dev); 1282 pci_wakeup_event(dev); 1283 } 1284 return 0; 1285} 1286 1287/** 1288 * pci_pme_wakeup_bus - Walk given bus and wake up devices on it, if necessary. 1289 * @bus: Top bus of the subtree to walk. 1290 */ 1291void pci_pme_wakeup_bus(struct pci_bus *bus) 1292{ 1293 if (bus) 1294 pci_walk_bus(bus, pci_pme_wakeup, NULL); 1295} 1296 1297/** 1298 * pci_pme_capable - check the capability of PCI device to generate PME# 1299 * @dev: PCI device to handle. 1300 * @state: PCI state from which device will issue PME#. 1301 */ 1302bool pci_pme_capable(struct pci_dev *dev, pci_power_t state) 1303{ 1304 if (!dev->pm_cap) 1305 return false; 1306 1307 return !!(dev->pme_support & (1 << state)); 1308} 1309 1310/** 1311 * pci_pme_active - enable or disable PCI device's PME# function 1312 * @dev: PCI device to handle. 1313 * @enable: 'true' to enable PME# generation; 'false' to disable it. 1314 * 1315 * The caller must verify that the device is capable of generating PME# before 1316 * calling this function with @enable equal to 'true'. 1317 */ 1318void pci_pme_active(struct pci_dev *dev, bool enable) 1319{ 1320 u16 pmcsr; 1321 1322 if (!dev->pm_cap) 1323 return; 1324 1325 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr); 1326 /* Clear PME_Status by writing 1 to it and enable PME# */ 1327 pmcsr |= PCI_PM_CTRL_PME_STATUS | PCI_PM_CTRL_PME_ENABLE; 1328 if (!enable) 1329 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE; 1330 1331 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr); 1332 1333 dev_printk(KERN_DEBUG, &dev->dev, "PME# %s\n", 1334 enable ? "enabled" : "disabled"); 1335} 1336 1337/** 1338 * __pci_enable_wake - enable PCI device as wakeup event source 1339 * @dev: PCI device affected 1340 * @state: PCI state from which device will issue wakeup events 1341 * @runtime: True if the events are to be generated at run time 1342 * @enable: True to enable event generation; false to disable 1343 * 1344 * This enables the device as a wakeup event source, or disables it. 1345 * When such events involves platform-specific hooks, those hooks are 1346 * called automatically by this routine. 1347 * 1348 * Devices with legacy power management (no standard PCI PM capabilities) 1349 * always require such platform hooks. 1350 * 1351 * RETURN VALUE: 1352 * 0 is returned on success 1353 * -EINVAL is returned if device is not supposed to wake up the system 1354 * Error code depending on the platform is returned if both the platform and 1355 * the native mechanism fail to enable the generation of wake-up events 1356 */ 1357int __pci_enable_wake(struct pci_dev *dev, pci_power_t state, 1358 bool runtime, bool enable) 1359{ 1360 int ret = 0; 1361 1362 if (enable && !runtime && !device_may_wakeup(&dev->dev)) 1363 return -EINVAL; 1364 1365 /* Don't do the same thing twice in a row for one device. */ 1366 if (!!enable == !!dev->wakeup_prepared) 1367 return 0; 1368 1369 /* 1370 * According to "PCI System Architecture" 4th ed. by Tom Shanley & Don 1371 * Anderson we should be doing PME# wake enable followed by ACPI wake 1372 * enable. To disable wake-up we call the platform first, for symmetry. 1373 */ 1374 1375 if (enable) { 1376 int error; 1377 1378 if (pci_pme_capable(dev, state)) 1379 pci_pme_active(dev, true); 1380 else 1381 ret = 1; 1382 error = runtime ? platform_pci_run_wake(dev, true) : 1383 platform_pci_sleep_wake(dev, true); 1384 if (ret) 1385 ret = error; 1386 if (!ret) 1387 dev->wakeup_prepared = true; 1388 } else { 1389 if (runtime) 1390 platform_pci_run_wake(dev, false); 1391 else 1392 platform_pci_sleep_wake(dev, false); 1393 pci_pme_active(dev, false); 1394 dev->wakeup_prepared = false; 1395 } 1396 1397 return ret; 1398} 1399EXPORT_SYMBOL(__pci_enable_wake); 1400 1401/** 1402 * pci_wake_from_d3 - enable/disable device to wake up from D3_hot or D3_cold 1403 * @dev: PCI device to prepare 1404 * @enable: True to enable wake-up event generation; false to disable 1405 * 1406 * Many drivers want the device to wake up the system from D3_hot or D3_cold 1407 * and this function allows them to set that up cleanly - pci_enable_wake() 1408 * should not be called twice in a row to enable wake-up due to PCI PM vs ACPI 1409 * ordering constraints. 1410 * 1411 * This function only returns error code if the device is not capable of 1412 * generating PME# from both D3_hot and D3_cold, and the platform is unable to 1413 * enable wake-up power for it. 1414 */ 1415int pci_wake_from_d3(struct pci_dev *dev, bool enable) 1416{ 1417 return pci_pme_capable(dev, PCI_D3cold) ? 1418 pci_enable_wake(dev, PCI_D3cold, enable) : 1419 pci_enable_wake(dev, PCI_D3hot, enable); 1420} 1421 1422/** 1423 * pci_target_state - find an appropriate low power state for a given PCI dev 1424 * @dev: PCI device 1425 * 1426 * Use underlying platform code to find a supported low power state for @dev. 1427 * If the platform can't manage @dev, return the deepest state from which it 1428 * can generate wake events, based on any available PME info. 1429 */ 1430pci_power_t pci_target_state(struct pci_dev *dev) 1431{ 1432 pci_power_t target_state = PCI_D3hot; 1433 1434 if (platform_pci_power_manageable(dev)) { 1435 /* 1436 * Call the platform to choose the target state of the device 1437 * and enable wake-up from this state if supported. 1438 */ 1439 pci_power_t state = platform_pci_choose_state(dev); 1440 1441 switch (state) { 1442 case PCI_POWER_ERROR: 1443 case PCI_UNKNOWN: 1444 break; 1445 case PCI_D1: 1446 case PCI_D2: 1447 if (pci_no_d1d2(dev)) 1448 break; 1449 default: 1450 target_state = state; 1451 } 1452 } else if (!dev->pm_cap) { 1453 target_state = PCI_D0; 1454 } else if (device_may_wakeup(&dev->dev)) { 1455 /* 1456 * Find the deepest state from which the device can generate 1457 * wake-up events, make it the target state and enable device 1458 * to generate PME#. 1459 */ 1460 if (dev->pme_support) { 1461 while (target_state 1462 && !(dev->pme_support & (1 << target_state))) 1463 target_state--; 1464 } 1465 } 1466 1467 return target_state; 1468} 1469 1470/** 1471 * pci_prepare_to_sleep - prepare PCI device for system-wide transition into a sleep state 1472 * @dev: Device to handle. 1473 * 1474 * Choose the power state appropriate for the device depending on whether 1475 * it can wake up the system and/or is power manageable by the platform 1476 * (PCI_D3hot is the default) and put the device into that state. 1477 */ 1478int pci_prepare_to_sleep(struct pci_dev *dev) 1479{ 1480 pci_power_t target_state = pci_target_state(dev); 1481 int error; 1482 1483 if (target_state == PCI_POWER_ERROR) 1484 return -EIO; 1485 1486 pci_enable_wake(dev, target_state, device_may_wakeup(&dev->dev)); 1487 1488 error = pci_set_power_state(dev, target_state); 1489 1490 if (error) 1491 pci_enable_wake(dev, target_state, false); 1492 1493 return error; 1494} 1495 1496/** 1497 * pci_back_from_sleep - turn PCI device on during system-wide transition into working state 1498 * @dev: Device to handle. 1499 * 1500 * Disable device's system wake-up capability and put it into D0. 1501 */ 1502int pci_back_from_sleep(struct pci_dev *dev) 1503{ 1504 pci_enable_wake(dev, PCI_D0, false); 1505 return pci_set_power_state(dev, PCI_D0); 1506} 1507 1508/** 1509 * pci_finish_runtime_suspend - Carry out PCI-specific part of runtime suspend. 1510 * @dev: PCI device being suspended. 1511 * 1512 * Prepare @dev to generate wake-up events at run time and put it into a low 1513 * power state. 1514 */ 1515int pci_finish_runtime_suspend(struct pci_dev *dev) 1516{ 1517 pci_power_t target_state = pci_target_state(dev); 1518 int error; 1519 1520 if (target_state == PCI_POWER_ERROR) 1521 return -EIO; 1522 1523 __pci_enable_wake(dev, target_state, true, pci_dev_run_wake(dev)); 1524 1525 error = pci_set_power_state(dev, target_state); 1526 1527 if (error) 1528 __pci_enable_wake(dev, target_state, true, false); 1529 1530 return error; 1531} 1532 1533/** 1534 * pci_dev_run_wake - Check if device can generate run-time wake-up events. 1535 * @dev: Device to check. 1536 * 1537 * Return true if the device itself is cabable of generating wake-up events 1538 * (through the platform or using the native PCIe PME) or if the device supports 1539 * PME and one of its upstream bridges can generate wake-up events. 1540 */ 1541bool pci_dev_run_wake(struct pci_dev *dev) 1542{ 1543 struct pci_bus *bus = dev->bus; 1544 1545 if (device_run_wake(&dev->dev)) 1546 return true; 1547 1548 if (!dev->pme_support) 1549 return false; 1550 1551 while (bus->parent) { 1552 struct pci_dev *bridge = bus->self; 1553 1554 if (device_run_wake(&bridge->dev)) 1555 return true; 1556 1557 bus = bus->parent; 1558 } 1559 1560 /* We have reached the root bus. */ 1561 if (bus->bridge) 1562 return device_run_wake(bus->bridge); 1563 1564 return false; 1565} 1566EXPORT_SYMBOL_GPL(pci_dev_run_wake); 1567 1568/** 1569 * pci_pm_init - Initialize PM functions of given PCI device 1570 * @dev: PCI device to handle. 1571 */ 1572void pci_pm_init(struct pci_dev *dev) 1573{ 1574 int pm; 1575 u16 pmc; 1576 1577 pm_runtime_forbid(&dev->dev); 1578 device_enable_async_suspend(&dev->dev); 1579 dev->wakeup_prepared = false; 1580 1581 dev->pm_cap = 0; 1582 1583 /* find PCI PM capability in list */ 1584 pm = pci_find_capability(dev, PCI_CAP_ID_PM); 1585 if (!pm) 1586 return; 1587 /* Check device's ability to generate PME# */ 1588 pci_read_config_word(dev, pm + PCI_PM_PMC, &pmc); 1589 1590 if ((pmc & PCI_PM_CAP_VER_MASK) > 3) { 1591 dev_err(&dev->dev, "unsupported PM cap regs version (%u)\n", 1592 pmc & PCI_PM_CAP_VER_MASK); 1593 return; 1594 } 1595 1596 dev->pm_cap = pm; 1597 dev->d3_delay = PCI_PM_D3_WAIT; 1598 1599 dev->d1_support = false; 1600 dev->d2_support = false; 1601 if (!pci_no_d1d2(dev)) { 1602 if (pmc & PCI_PM_CAP_D1) 1603 dev->d1_support = true; 1604 if (pmc & PCI_PM_CAP_D2) 1605 dev->d2_support = true; 1606 1607 if (dev->d1_support || dev->d2_support) 1608 dev_printk(KERN_DEBUG, &dev->dev, "supports%s%s\n", 1609 dev->d1_support ? " D1" : "", 1610 dev->d2_support ? " D2" : ""); 1611 } 1612 1613 pmc &= PCI_PM_CAP_PME_MASK; 1614 if (pmc) { 1615 dev_printk(KERN_DEBUG, &dev->dev, 1616 "PME# supported from%s%s%s%s%s\n", 1617 (pmc & PCI_PM_CAP_PME_D0) ? " D0" : "", 1618 (pmc & PCI_PM_CAP_PME_D1) ? " D1" : "", 1619 (pmc & PCI_PM_CAP_PME_D2) ? " D2" : "", 1620 (pmc & PCI_PM_CAP_PME_D3) ? " D3hot" : "", 1621 (pmc & PCI_PM_CAP_PME_D3cold) ? " D3cold" : ""); 1622 dev->pme_support = pmc >> PCI_PM_CAP_PME_SHIFT; 1623 /* 1624 * Make device's PM flags reflect the wake-up capability, but 1625 * let the user space enable it to wake up the system as needed. 1626 */ 1627 device_set_wakeup_capable(&dev->dev, true); 1628 /* Disable the PME# generation functionality */ 1629 pci_pme_active(dev, false); 1630 } else { 1631 dev->pme_support = 0; 1632 } 1633} 1634 1635/** 1636 * platform_pci_wakeup_init - init platform wakeup if present 1637 * @dev: PCI device 1638 * 1639 * Some devices don't have PCI PM caps but can still generate wakeup 1640 * events through platform methods (like ACPI events). If @dev supports 1641 * platform wakeup events, set the device flag to indicate as much. This 1642 * may be redundant if the device also supports PCI PM caps, but double 1643 * initialization should be safe in that case. 1644 */ 1645void platform_pci_wakeup_init(struct pci_dev *dev) 1646{ 1647 if (!platform_pci_can_wakeup(dev)) 1648 return; 1649 1650 device_set_wakeup_capable(&dev->dev, true); 1651 platform_pci_sleep_wake(dev, false); 1652} 1653 1654/** 1655 * pci_add_save_buffer - allocate buffer for saving given capability registers 1656 * @dev: the PCI device 1657 * @cap: the capability to allocate the buffer for 1658 * @size: requested size of the buffer 1659 */ 1660static int pci_add_cap_save_buffer( 1661 struct pci_dev *dev, char cap, unsigned int size) 1662{ 1663 int pos; 1664 struct pci_cap_saved_state *save_state; 1665 1666 pos = pci_find_capability(dev, cap); 1667 if (pos <= 0) 1668 return 0; 1669 1670 save_state = kzalloc(sizeof(*save_state) + size, GFP_KERNEL); 1671 if (!save_state) 1672 return -ENOMEM; 1673 1674 save_state->cap_nr = cap; 1675 pci_add_saved_cap(dev, save_state); 1676 1677 return 0; 1678} 1679 1680/** 1681 * pci_allocate_cap_save_buffers - allocate buffers for saving capabilities 1682 * @dev: the PCI device 1683 */ 1684void pci_allocate_cap_save_buffers(struct pci_dev *dev) 1685{ 1686 int error; 1687 1688 error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_EXP, 1689 PCI_EXP_SAVE_REGS * sizeof(u16)); 1690 if (error) 1691 dev_err(&dev->dev, 1692 "unable to preallocate PCI Express save buffer\n"); 1693 1694 error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_PCIX, sizeof(u16)); 1695 if (error) 1696 dev_err(&dev->dev, 1697 "unable to preallocate PCI-X save buffer\n"); 1698} 1699 1700/** 1701 * pci_enable_ari - enable ARI forwarding if hardware support it 1702 * @dev: the PCI device 1703 */ 1704void pci_enable_ari(struct pci_dev *dev) 1705{ 1706 int pos; 1707 u32 cap; 1708 u16 ctrl; 1709 struct pci_dev *bridge; 1710 1711 if (!pci_is_pcie(dev) || dev->devfn) 1712 return; 1713 1714 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI); 1715 if (!pos) 1716 return; 1717 1718 bridge = dev->bus->self; 1719 if (!bridge || !pci_is_pcie(bridge)) 1720 return; 1721 1722 pos = pci_pcie_cap(bridge); 1723 if (!pos) 1724 return; 1725 1726 pci_read_config_dword(bridge, pos + PCI_EXP_DEVCAP2, &cap); 1727 if (!(cap & PCI_EXP_DEVCAP2_ARI)) 1728 return; 1729 1730 pci_read_config_word(bridge, pos + PCI_EXP_DEVCTL2, &ctrl); 1731 ctrl |= PCI_EXP_DEVCTL2_ARI; 1732 pci_write_config_word(bridge, pos + PCI_EXP_DEVCTL2, ctrl); 1733 1734 bridge->ari_enabled = 1; 1735} 1736 1737static int pci_acs_enable; 1738 1739/** 1740 * pci_request_acs - ask for ACS to be enabled if supported 1741 */ 1742void pci_request_acs(void) 1743{ 1744 pci_acs_enable = 1; 1745} 1746 1747/** 1748 * pci_enable_acs - enable ACS if hardware support it 1749 * @dev: the PCI device 1750 */ 1751void pci_enable_acs(struct pci_dev *dev) 1752{ 1753 int pos; 1754 u16 cap; 1755 u16 ctrl; 1756 1757 if (!pci_acs_enable) 1758 return; 1759 1760 if (!pci_is_pcie(dev)) 1761 return; 1762 1763 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS); 1764 if (!pos) 1765 return; 1766 1767 pci_read_config_word(dev, pos + PCI_ACS_CAP, &cap); 1768 pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl); 1769 1770 /* Source Validation */ 1771 ctrl |= (cap & PCI_ACS_SV); 1772 1773 /* P2P Request Redirect */ 1774 ctrl |= (cap & PCI_ACS_RR); 1775 1776 /* P2P Completion Redirect */ 1777 ctrl |= (cap & PCI_ACS_CR); 1778 1779 /* Upstream Forwarding */ 1780 ctrl |= (cap & PCI_ACS_UF); 1781 1782 pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl); 1783} 1784 1785/** 1786 * pci_swizzle_interrupt_pin - swizzle INTx for device behind bridge 1787 * @dev: the PCI device 1788 * @pin: the INTx pin (1=INTA, 2=INTB, 3=INTD, 4=INTD) 1789 * 1790 * Perform INTx swizzling for a device behind one level of bridge. This is 1791 * required by section 9.1 of the PCI-to-PCI bridge specification for devices 1792 * behind bridges on add-in cards. For devices with ARI enabled, the slot 1793 * number is always 0 (see the Implementation Note in section 2.2.8.1 of 1794 * the PCI Express Base Specification, Revision 2.1) 1795 */ 1796u8 pci_swizzle_interrupt_pin(struct pci_dev *dev, u8 pin) 1797{ 1798 int slot; 1799 1800 if (pci_ari_enabled(dev->bus)) 1801 slot = 0; 1802 else 1803 slot = PCI_SLOT(dev->devfn); 1804 1805 return (((pin - 1) + slot) % 4) + 1; 1806} 1807 1808int 1809pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge) 1810{ 1811 u8 pin; 1812 1813 pin = dev->pin; 1814 if (!pin) 1815 return -1; 1816 1817 while (!pci_is_root_bus(dev->bus)) { 1818 pin = pci_swizzle_interrupt_pin(dev, pin); 1819 dev = dev->bus->self; 1820 } 1821 *bridge = dev; 1822 return pin; 1823} 1824 1825/** 1826 * pci_common_swizzle - swizzle INTx all the way to root bridge 1827 * @dev: the PCI device 1828 * @pinp: pointer to the INTx pin value (1=INTA, 2=INTB, 3=INTD, 4=INTD) 1829 * 1830 * Perform INTx swizzling for a device. This traverses through all PCI-to-PCI 1831 * bridges all the way up to a PCI root bus. 1832 */ 1833u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp) 1834{ 1835 u8 pin = *pinp; 1836 1837 while (!pci_is_root_bus(dev->bus)) { 1838 pin = pci_swizzle_interrupt_pin(dev, pin); 1839 dev = dev->bus->self; 1840 } 1841 *pinp = pin; 1842 return PCI_SLOT(dev->devfn); 1843} 1844 1845/** 1846 * pci_release_region - Release a PCI bar 1847 * @pdev: PCI device whose resources were previously reserved by pci_request_region 1848 * @bar: BAR to release 1849 * 1850 * Releases the PCI I/O and memory resources previously reserved by a 1851 * successful call to pci_request_region. Call this function only 1852 * after all use of the PCI regions has ceased. 1853 */ 1854void pci_release_region(struct pci_dev *pdev, int bar) 1855{ 1856 struct pci_devres *dr; 1857 1858 if (pci_resource_len(pdev, bar) == 0) 1859 return; 1860 if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) 1861 release_region(pci_resource_start(pdev, bar), 1862 pci_resource_len(pdev, bar)); 1863 else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) 1864 release_mem_region(pci_resource_start(pdev, bar), 1865 pci_resource_len(pdev, bar)); 1866 1867 dr = find_pci_dr(pdev); 1868 if (dr) 1869 dr->region_mask &= ~(1 << bar); 1870} 1871 1872/** 1873 * __pci_request_region - Reserved PCI I/O and memory resource 1874 * @pdev: PCI device whose resources are to be reserved 1875 * @bar: BAR to be reserved 1876 * @res_name: Name to be associated with resource. 1877 * @exclusive: whether the region access is exclusive or not 1878 * 1879 * Mark the PCI region associated with PCI device @pdev BR @bar as 1880 * being reserved by owner @res_name. Do not access any 1881 * address inside the PCI regions unless this call returns 1882 * successfully. 1883 * 1884 * If @exclusive is set, then the region is marked so that userspace 1885 * is explicitly not allowed to map the resource via /dev/mem or 1886 * sysfs MMIO access. 1887 * 1888 * Returns 0 on success, or %EBUSY on error. A warning 1889 * message is also printed on failure. 1890 */ 1891static int __pci_request_region(struct pci_dev *pdev, int bar, const char *res_name, 1892 int exclusive) 1893{ 1894 struct pci_devres *dr; 1895 1896 if (pci_resource_len(pdev, bar) == 0) 1897 return 0; 1898 1899 if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) { 1900 if (!request_region(pci_resource_start(pdev, bar), 1901 pci_resource_len(pdev, bar), res_name)) 1902 goto err_out; 1903 } 1904 else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) { 1905 if (!__request_mem_region(pci_resource_start(pdev, bar), 1906 pci_resource_len(pdev, bar), res_name, 1907 exclusive)) 1908 goto err_out; 1909 } 1910 1911 dr = find_pci_dr(pdev); 1912 if (dr) 1913 dr->region_mask |= 1 << bar; 1914 1915 return 0; 1916 1917err_out: 1918 dev_warn(&pdev->dev, "BAR %d: can't reserve %pR\n", bar, 1919 &pdev->resource[bar]); 1920 return -EBUSY; 1921} 1922 1923/** 1924 * pci_request_region - Reserve PCI I/O and memory resource 1925 * @pdev: PCI device whose resources are to be reserved 1926 * @bar: BAR to be reserved 1927 * @res_name: Name to be associated with resource 1928 * 1929 * Mark the PCI region associated with PCI device @pdev BAR @bar as 1930 * being reserved by owner @res_name. Do not access any 1931 * address inside the PCI regions unless this call returns 1932 * successfully. 1933 * 1934 * Returns 0 on success, or %EBUSY on error. A warning 1935 * message is also printed on failure. 1936 */ 1937int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name) 1938{ 1939 return __pci_request_region(pdev, bar, res_name, 0); 1940} 1941 1942/** 1943 * pci_request_region_exclusive - Reserved PCI I/O and memory resource 1944 * @pdev: PCI device whose resources are to be reserved 1945 * @bar: BAR to be reserved 1946 * @res_name: Name to be associated with resource. 1947 * 1948 * Mark the PCI region associated with PCI device @pdev BR @bar as 1949 * being reserved by owner @res_name. Do not access any 1950 * address inside the PCI regions unless this call returns 1951 * successfully. 1952 * 1953 * Returns 0 on success, or %EBUSY on error. A warning 1954 * message is also printed on failure. 1955 * 1956 * The key difference that _exclusive makes it that userspace is 1957 * explicitly not allowed to map the resource via /dev/mem or 1958 * sysfs. 1959 */ 1960int pci_request_region_exclusive(struct pci_dev *pdev, int bar, const char *res_name) 1961{ 1962 return __pci_request_region(pdev, bar, res_name, IORESOURCE_EXCLUSIVE); 1963} 1964/** 1965 * pci_release_selected_regions - Release selected PCI I/O and memory resources 1966 * @pdev: PCI device whose resources were previously reserved 1967 * @bars: Bitmask of BARs to be released 1968 * 1969 * Release selected PCI I/O and memory resources previously reserved. 1970 * Call this function only after all use of the PCI regions has ceased. 1971 */ 1972void pci_release_selected_regions(struct pci_dev *pdev, int bars) 1973{ 1974 int i; 1975 1976 for (i = 0; i < 6; i++) 1977 if (bars & (1 << i)) 1978 pci_release_region(pdev, i); 1979} 1980 1981int __pci_request_selected_regions(struct pci_dev *pdev, int bars, 1982 const char *res_name, int excl) 1983{ 1984 int i; 1985 1986 for (i = 0; i < 6; i++) 1987 if (bars & (1 << i)) 1988 if (__pci_request_region(pdev, i, res_name, excl)) 1989 goto err_out; 1990 return 0; 1991 1992err_out: 1993 while(--i >= 0) 1994 if (bars & (1 << i)) 1995 pci_release_region(pdev, i); 1996 1997 return -EBUSY; 1998} 1999 2000 2001/** 2002 * pci_request_selected_regions - Reserve selected PCI I/O and memory resources 2003 * @pdev: PCI device whose resources are to be reserved 2004 * @bars: Bitmask of BARs to be requested 2005 * @res_name: Name to be associated with resource 2006 */ 2007int pci_request_selected_regions(struct pci_dev *pdev, int bars, 2008 const char *res_name) 2009{ 2010 return __pci_request_selected_regions(pdev, bars, res_name, 0); 2011} 2012 2013int pci_request_selected_regions_exclusive(struct pci_dev *pdev, 2014 int bars, const char *res_name) 2015{ 2016 return __pci_request_selected_regions(pdev, bars, res_name, 2017 IORESOURCE_EXCLUSIVE); 2018} 2019 2020/** 2021 * pci_release_regions - Release reserved PCI I/O and memory resources 2022 * @pdev: PCI device whose resources were previously reserved by pci_request_regions 2023 * 2024 * Releases all PCI I/O and memory resources previously reserved by a 2025 * successful call to pci_request_regions. Call this function only 2026 * after all use of the PCI regions has ceased. 2027 */ 2028 2029void pci_release_regions(struct pci_dev *pdev) 2030{ 2031 pci_release_selected_regions(pdev, (1 << 6) - 1); 2032} 2033 2034/** 2035 * pci_request_regions - Reserved PCI I/O and memory resources 2036 * @pdev: PCI device whose resources are to be reserved 2037 * @res_name: Name to be associated with resource. 2038 * 2039 * Mark all PCI regions associated with PCI device @pdev as 2040 * being reserved by owner @res_name. Do not access any 2041 * address inside the PCI regions unless this call returns 2042 * successfully. 2043 * 2044 * Returns 0 on success, or %EBUSY on error. A warning 2045 * message is also printed on failure. 2046 */ 2047int pci_request_regions(struct pci_dev *pdev, const char *res_name) 2048{ 2049 return pci_request_selected_regions(pdev, ((1 << 6) - 1), res_name); 2050} 2051 2052/** 2053 * pci_request_regions_exclusive - Reserved PCI I/O and memory resources 2054 * @pdev: PCI device whose resources are to be reserved 2055 * @res_name: Name to be associated with resource. 2056 * 2057 * Mark all PCI regions associated with PCI device @pdev as 2058 * being reserved by owner @res_name. Do not access any 2059 * address inside the PCI regions unless this call returns 2060 * successfully. 2061 * 2062 * pci_request_regions_exclusive() will mark the region so that 2063 * /dev/mem and the sysfs MMIO access will not be allowed. 2064 * 2065 * Returns 0 on success, or %EBUSY on error. A warning 2066 * message is also printed on failure. 2067 */ 2068int pci_request_regions_exclusive(struct pci_dev *pdev, const char *res_name) 2069{ 2070 return pci_request_selected_regions_exclusive(pdev, 2071 ((1 << 6) - 1), res_name); 2072} 2073 2074static void __pci_set_master(struct pci_dev *dev, bool enable) 2075{ 2076 u16 old_cmd, cmd; 2077 2078 pci_read_config_word(dev, PCI_COMMAND, &old_cmd); 2079 if (enable) 2080 cmd = old_cmd | PCI_COMMAND_MASTER; 2081 else 2082 cmd = old_cmd & ~PCI_COMMAND_MASTER; 2083 if (cmd != old_cmd) { 2084 dev_dbg(&dev->dev, "%s bus mastering\n", 2085 enable ? "enabling" : "disabling"); 2086 pci_write_config_word(dev, PCI_COMMAND, cmd); 2087 } 2088 dev->is_busmaster = enable; 2089} 2090 2091/** 2092 * pci_set_master - enables bus-mastering for device dev 2093 * @dev: the PCI device to enable 2094 * 2095 * Enables bus-mastering on the device and calls pcibios_set_master() 2096 * to do the needed arch specific settings. 2097 */ 2098void pci_set_master(struct pci_dev *dev) 2099{ 2100 __pci_set_master(dev, true); 2101 pcibios_set_master(dev); 2102} 2103 2104/** 2105 * pci_clear_master - disables bus-mastering for device dev 2106 * @dev: the PCI device to disable 2107 */ 2108void pci_clear_master(struct pci_dev *dev) 2109{ 2110 __pci_set_master(dev, false); 2111} 2112 2113/** 2114 * pci_set_cacheline_size - ensure the CACHE_LINE_SIZE register is programmed 2115 * @dev: the PCI device for which MWI is to be enabled 2116 * 2117 * Helper function for pci_set_mwi. 2118 * Originally copied from drivers/net/acenic.c. 2119 * Copyright 1998-2001 by Jes Sorensen, <jes@trained-monkey.org>. 2120 * 2121 * RETURNS: An appropriate -ERRNO error value on error, or zero for success. 2122 */ 2123int pci_set_cacheline_size(struct pci_dev *dev) 2124{ 2125 u8 cacheline_size; 2126 2127 if (!pci_cache_line_size) 2128 return -EINVAL; 2129 2130 /* Validate current setting: the PCI_CACHE_LINE_SIZE must be 2131 equal to or multiple of the right value. */ 2132 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size); 2133 if (cacheline_size >= pci_cache_line_size && 2134 (cacheline_size % pci_cache_line_size) == 0) 2135 return 0; 2136 2137 /* Write the correct value. */ 2138 pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, pci_cache_line_size); 2139 /* Read it back. */ 2140 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size); 2141 if (cacheline_size == pci_cache_line_size) 2142 return 0; 2143 2144 dev_printk(KERN_DEBUG, &dev->dev, "cache line size of %d is not " 2145 "supported\n", pci_cache_line_size << 2); 2146 2147 return -EINVAL; 2148} 2149EXPORT_SYMBOL_GPL(pci_set_cacheline_size); 2150 2151#ifdef PCI_DISABLE_MWI 2152int pci_set_mwi(struct pci_dev *dev) 2153{ 2154 return 0; 2155} 2156 2157int pci_try_set_mwi(struct pci_dev *dev) 2158{ 2159 return 0; 2160} 2161 2162void pci_clear_mwi(struct pci_dev *dev) 2163{ 2164} 2165 2166#else 2167 2168/** 2169 * pci_set_mwi - enables memory-write-invalidate PCI transaction 2170 * @dev: the PCI device for which MWI is enabled 2171 * 2172 * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND. 2173 * 2174 * RETURNS: An appropriate -ERRNO error value on error, or zero for success. 2175 */ 2176int 2177pci_set_mwi(struct pci_dev *dev) 2178{ 2179 int rc; 2180 u16 cmd; 2181 2182 rc = pci_set_cacheline_size(dev); 2183 if (rc) 2184 return rc; 2185 2186 pci_read_config_word(dev, PCI_COMMAND, &cmd); 2187 if (! (cmd & PCI_COMMAND_INVALIDATE)) { 2188 dev_dbg(&dev->dev, "enabling Mem-Wr-Inval\n"); 2189 cmd |= PCI_COMMAND_INVALIDATE; 2190 pci_write_config_word(dev, PCI_COMMAND, cmd); 2191 } 2192 2193 return 0; 2194} 2195 2196/** 2197 * pci_try_set_mwi - enables memory-write-invalidate PCI transaction 2198 * @dev: the PCI device for which MWI is enabled 2199 * 2200 * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND. 2201 * Callers are not required to check the return value. 2202 * 2203 * RETURNS: An appropriate -ERRNO error value on error, or zero for success. 2204 */ 2205int pci_try_set_mwi(struct pci_dev *dev) 2206{ 2207 int rc = pci_set_mwi(dev); 2208 return rc; 2209} 2210 2211/** 2212 * pci_clear_mwi - disables Memory-Write-Invalidate for device dev 2213 * @dev: the PCI device to disable 2214 * 2215 * Disables PCI Memory-Write-Invalidate transaction on the device 2216 */ 2217void 2218pci_clear_mwi(struct pci_dev *dev) 2219{ 2220 u16 cmd; 2221 2222 pci_read_config_word(dev, PCI_COMMAND, &cmd); 2223 if (cmd & PCI_COMMAND_INVALIDATE) { 2224 cmd &= ~PCI_COMMAND_INVALIDATE; 2225 pci_write_config_word(dev, PCI_COMMAND, cmd); 2226 } 2227} 2228#endif /* ! PCI_DISABLE_MWI */ 2229 2230/** 2231 * pci_intx - enables/disables PCI INTx for device dev 2232 * @pdev: the PCI device to operate on 2233 * @enable: boolean: whether to enable or disable PCI INTx 2234 * 2235 * Enables/disables PCI INTx for device dev 2236 */ 2237void 2238pci_intx(struct pci_dev *pdev, int enable) 2239{ 2240 u16 pci_command, new; 2241 2242 pci_read_config_word(pdev, PCI_COMMAND, &pci_command); 2243 2244 if (enable) { 2245 new = pci_command & ~PCI_COMMAND_INTX_DISABLE; 2246 } else { 2247 new = pci_command | PCI_COMMAND_INTX_DISABLE; 2248 } 2249 2250 if (new != pci_command) { 2251 struct pci_devres *dr; 2252 2253 pci_write_config_word(pdev, PCI_COMMAND, new); 2254 2255 dr = find_pci_dr(pdev); 2256 if (dr && !dr->restore_intx) { 2257 dr->restore_intx = 1; 2258 dr->orig_intx = !enable; 2259 } 2260 } 2261} 2262 2263/** 2264 * pci_msi_off - disables any msi or msix capabilities 2265 * @dev: the PCI device to operate on 2266 * 2267 * If you want to use msi see pci_enable_msi and friends. 2268 * This is a lower level primitive that allows us to disable 2269 * msi operation at the device level. 2270 */ 2271void pci_msi_off(struct pci_dev *dev) 2272{ 2273 int pos; 2274 u16 control; 2275 2276 pos = pci_find_capability(dev, PCI_CAP_ID_MSI); 2277 if (pos) { 2278 pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control); 2279 control &= ~PCI_MSI_FLAGS_ENABLE; 2280 pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control); 2281 } 2282 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX); 2283 if (pos) { 2284 pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control); 2285 control &= ~PCI_MSIX_FLAGS_ENABLE; 2286 pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control); 2287 } 2288} 2289EXPORT_SYMBOL_GPL(pci_msi_off); 2290 2291int pci_set_dma_max_seg_size(struct pci_dev *dev, unsigned int size) 2292{ 2293 return dma_set_max_seg_size(&dev->dev, size); 2294} 2295EXPORT_SYMBOL(pci_set_dma_max_seg_size); 2296 2297int pci_set_dma_seg_boundary(struct pci_dev *dev, unsigned long mask) 2298{ 2299 return dma_set_seg_boundary(&dev->dev, mask); 2300} 2301EXPORT_SYMBOL(pci_set_dma_seg_boundary); 2302 2303static int pcie_flr(struct pci_dev *dev, int probe) 2304{ 2305 int i; 2306 int pos; 2307 u32 cap; 2308 u16 status, control; 2309 2310 pos = pci_pcie_cap(dev); 2311 if (!pos) 2312 return -ENOTTY; 2313 2314 pci_read_config_dword(dev, pos + PCI_EXP_DEVCAP, &cap); 2315 if (!(cap & PCI_EXP_DEVCAP_FLR)) 2316 return -ENOTTY; 2317 2318 if (probe) 2319 return 0; 2320 2321 /* Wait for Transaction Pending bit clean */ 2322 for (i = 0; i < 4; i++) { 2323 if (i) 2324 msleep((1 << (i - 1)) * 100); 2325 2326 pci_read_config_word(dev, pos + PCI_EXP_DEVSTA, &status); 2327 if (!(status & PCI_EXP_DEVSTA_TRPND)) 2328 goto clear; 2329 } 2330 2331 dev_err(&dev->dev, "transaction is not cleared; " 2332 "proceeding with reset anyway\n"); 2333 2334clear: 2335 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &control); 2336 control |= PCI_EXP_DEVCTL_BCR_FLR; 2337 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, control); 2338 2339 msleep(100); 2340 2341 return 0; 2342} 2343 2344static int pci_af_flr(struct pci_dev *dev, int probe) 2345{ 2346 int i; 2347 int pos; 2348 u8 cap; 2349 u8 status; 2350 2351 pos = pci_find_capability(dev, PCI_CAP_ID_AF); 2352 if (!pos) 2353 return -ENOTTY; 2354 2355 pci_read_config_byte(dev, pos + PCI_AF_CAP, &cap); 2356 if (!(cap & PCI_AF_CAP_TP) || !(cap & PCI_AF_CAP_FLR)) 2357 return -ENOTTY; 2358 2359 if (probe) 2360 return 0; 2361 2362 /* Wait for Transaction Pending bit clean */ 2363 for (i = 0; i < 4; i++) { 2364 if (i) 2365 msleep((1 << (i - 1)) * 100); 2366 2367 pci_read_config_byte(dev, pos + PCI_AF_STATUS, &status); 2368 if (!(status & PCI_AF_STATUS_TP)) 2369 goto clear; 2370 } 2371 2372 dev_err(&dev->dev, "transaction is not cleared; " 2373 "proceeding with reset anyway\n"); 2374 2375clear: 2376 pci_write_config_byte(dev, pos + PCI_AF_CTRL, PCI_AF_CTRL_FLR); 2377 msleep(100); 2378 2379 return 0; 2380} 2381 2382static int pci_pm_reset(struct pci_dev *dev, int probe) 2383{ 2384 u16 csr; 2385 2386 if (!dev->pm_cap) 2387 return -ENOTTY; 2388 2389 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &csr); 2390 if (csr & PCI_PM_CTRL_NO_SOFT_RESET) 2391 return -ENOTTY; 2392 2393 if (probe) 2394 return 0; 2395 2396 if (dev->current_state != PCI_D0) 2397 return -EINVAL; 2398 2399 csr &= ~PCI_PM_CTRL_STATE_MASK; 2400 csr |= PCI_D3hot; 2401 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr); 2402 pci_dev_d3_sleep(dev); 2403 2404 csr &= ~PCI_PM_CTRL_STATE_MASK; 2405 csr |= PCI_D0; 2406 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr); 2407 pci_dev_d3_sleep(dev); 2408 2409 return 0; 2410} 2411 2412static int pci_parent_bus_reset(struct pci_dev *dev, int probe) 2413{ 2414 u16 ctrl; 2415 struct pci_dev *pdev; 2416 2417 if (pci_is_root_bus(dev->bus) || dev->subordinate || !dev->bus->self) 2418 return -ENOTTY; 2419 2420 list_for_each_entry(pdev, &dev->bus->devices, bus_list) 2421 if (pdev != dev) 2422 return -ENOTTY; 2423 2424 if (probe) 2425 return 0; 2426 2427 pci_read_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, &ctrl); 2428 ctrl |= PCI_BRIDGE_CTL_BUS_RESET; 2429 pci_write_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, ctrl); 2430 msleep(100); 2431 2432 ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET; 2433 pci_write_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, ctrl); 2434 msleep(100); 2435 2436 return 0; 2437} 2438 2439static int pci_dev_reset(struct pci_dev *dev, int probe) 2440{ 2441 int rc; 2442 2443 might_sleep(); 2444 2445 if (!probe) { 2446 pci_block_user_cfg_access(dev); 2447 /* block PM suspend, driver probe, etc. */ 2448 device_lock(&dev->dev); 2449 } 2450 2451 rc = pci_dev_specific_reset(dev, probe); 2452 if (rc != -ENOTTY) 2453 goto done; 2454 2455 rc = pcie_flr(dev, probe); 2456 if (rc != -ENOTTY) 2457 goto done; 2458 2459 rc = pci_af_flr(dev, probe); 2460 if (rc != -ENOTTY) 2461 goto done; 2462 2463 rc = pci_pm_reset(dev, probe); 2464 if (rc != -ENOTTY) 2465 goto done; 2466 2467 rc = pci_parent_bus_reset(dev, probe); 2468done: 2469 if (!probe) { 2470 device_unlock(&dev->dev); 2471 pci_unblock_user_cfg_access(dev); 2472 } 2473 2474 return rc; 2475} 2476 2477/** 2478 * __pci_reset_function - reset a PCI device function 2479 * @dev: PCI device to reset 2480 * 2481 * Some devices allow an individual function to be reset without affecting 2482 * other functions in the same device. The PCI device must be responsive 2483 * to PCI config space in order to use this function. 2484 * 2485 * The device function is presumed to be unused when this function is called. 2486 * Resetting the device will make the contents of PCI configuration space 2487 * random, so any caller of this must be prepared to reinitialise the 2488 * device including MSI, bus mastering, BARs, decoding IO and memory spaces, 2489 * etc. 2490 * 2491 * Returns 0 if the device function was successfully reset or negative if the 2492 * device doesn't support resetting a single function. 2493 */ 2494int __pci_reset_function(struct pci_dev *dev) 2495{ 2496 return pci_dev_reset(dev, 0); 2497} 2498EXPORT_SYMBOL_GPL(__pci_reset_function); 2499 2500/** 2501 * pci_probe_reset_function - check whether the device can be safely reset 2502 * @dev: PCI device to reset 2503 * 2504 * Some devices allow an individual function to be reset without affecting 2505 * other functions in the same device. The PCI device must be responsive 2506 * to PCI config space in order to use this function. 2507 * 2508 * Returns 0 if the device function can be reset or negative if the 2509 * device doesn't support resetting a single function. 2510 */ 2511int pci_probe_reset_function(struct pci_dev *dev) 2512{ 2513 return pci_dev_reset(dev, 1); 2514} 2515 2516/** 2517 * pci_reset_function - quiesce and reset a PCI device function 2518 * @dev: PCI device to reset 2519 * 2520 * Some devices allow an individual function to be reset without affecting 2521 * other functions in the same device. The PCI device must be responsive 2522 * to PCI config space in order to use this function. 2523 * 2524 * This function does not just reset the PCI portion of a device, but 2525 * clears all the state associated with the device. This function differs 2526 * from __pci_reset_function in that it saves and restores device state 2527 * over the reset. 2528 * 2529 * Returns 0 if the device function was successfully reset or negative if the 2530 * device doesn't support resetting a single function. 2531 */ 2532int pci_reset_function(struct pci_dev *dev) 2533{ 2534 int rc; 2535 2536 rc = pci_dev_reset(dev, 1); 2537 if (rc) 2538 return rc; 2539 2540 pci_save_state(dev); 2541 2542 /* 2543 * both INTx and MSI are disabled after the Interrupt Disable bit 2544 * is set and the Bus Master bit is cleared. 2545 */ 2546 pci_write_config_word(dev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE); 2547 2548 rc = pci_dev_reset(dev, 0); 2549 2550 pci_restore_state(dev); 2551 2552 return rc; 2553} 2554EXPORT_SYMBOL_GPL(pci_reset_function); 2555 2556/** 2557 * pcix_get_max_mmrbc - get PCI-X maximum designed memory read byte count 2558 * @dev: PCI device to query 2559 * 2560 * Returns mmrbc: maximum designed memory read count in bytes 2561 * or appropriate error value. 2562 */ 2563int pcix_get_max_mmrbc(struct pci_dev *dev) 2564{ 2565 int cap; 2566 u32 stat; 2567 2568 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX); 2569 if (!cap) 2570 return -EINVAL; 2571 2572 if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat)) 2573 return -EINVAL; 2574 2575 return 512 << ((stat & PCI_X_STATUS_MAX_READ) >> 21); 2576} 2577EXPORT_SYMBOL(pcix_get_max_mmrbc); 2578 2579/** 2580 * pcix_get_mmrbc - get PCI-X maximum memory read byte count 2581 * @dev: PCI device to query 2582 * 2583 * Returns mmrbc: maximum memory read count in bytes 2584 * or appropriate error value. 2585 */ 2586int pcix_get_mmrbc(struct pci_dev *dev) 2587{ 2588 int cap; 2589 u16 cmd; 2590 2591 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX); 2592 if (!cap) 2593 return -EINVAL; 2594 2595 if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd)) 2596 return -EINVAL; 2597 2598 return 512 << ((cmd & PCI_X_CMD_MAX_READ) >> 2); 2599} 2600EXPORT_SYMBOL(pcix_get_mmrbc); 2601 2602/** 2603 * pcix_set_mmrbc - set PCI-X maximum memory read byte count 2604 * @dev: PCI device to query 2605 * @mmrbc: maximum memory read count in bytes 2606 * valid values are 512, 1024, 2048, 4096 2607 * 2608 * If possible sets maximum memory read byte count, some bridges have erratas 2609 * that prevent this. 2610 */ 2611int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc) 2612{ 2613 int cap; 2614 u32 stat, v, o; 2615 u16 cmd; 2616 2617 if (mmrbc < 512 || mmrbc > 4096 || !is_power_of_2(mmrbc)) 2618 return -EINVAL; 2619 2620 v = ffs(mmrbc) - 10; 2621 2622 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX); 2623 if (!cap) 2624 return -EINVAL; 2625 2626 if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat)) 2627 return -EINVAL; 2628 2629 if (v > (stat & PCI_X_STATUS_MAX_READ) >> 21) 2630 return -E2BIG; 2631 2632 if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd)) 2633 return -EINVAL; 2634 2635 o = (cmd & PCI_X_CMD_MAX_READ) >> 2; 2636 if (o != v) { 2637 if (v > o && dev->bus && 2638 (dev->bus->bus_flags & PCI_BUS_FLAGS_NO_MMRBC)) 2639 return -EIO; 2640 2641 cmd &= ~PCI_X_CMD_MAX_READ; 2642 cmd |= v << 2; 2643 if (pci_write_config_word(dev, cap + PCI_X_CMD, cmd)) 2644 return -EIO; 2645 } 2646 return 0; 2647} 2648EXPORT_SYMBOL(pcix_set_mmrbc); 2649 2650/** 2651 * pcie_get_readrq - get PCI Express read request size 2652 * @dev: PCI device to query 2653 * 2654 * Returns maximum memory read request in bytes 2655 * or appropriate error value. 2656 */ 2657int pcie_get_readrq(struct pci_dev *dev) 2658{ 2659 int ret, cap; 2660 u16 ctl; 2661 2662 cap = pci_pcie_cap(dev); 2663 if (!cap) 2664 return -EINVAL; 2665 2666 ret = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl); 2667 if (!ret) 2668 ret = 128 << ((ctl & PCI_EXP_DEVCTL_READRQ) >> 12); 2669 2670 return ret; 2671} 2672EXPORT_SYMBOL(pcie_get_readrq); 2673 2674/** 2675 * pcie_set_readrq - set PCI Express maximum memory read request 2676 * @dev: PCI device to query 2677 * @rq: maximum memory read count in bytes 2678 * valid values are 128, 256, 512, 1024, 2048, 4096 2679 * 2680 * If possible sets maximum read byte count 2681 */ 2682int pcie_set_readrq(struct pci_dev *dev, int rq) 2683{ 2684 int cap, err = -EINVAL; 2685 u16 ctl, v; 2686 2687 if (rq < 128 || rq > 4096 || !is_power_of_2(rq)) 2688 goto out; 2689 2690 v = (ffs(rq) - 8) << 12; 2691 2692 cap = pci_pcie_cap(dev); 2693 if (!cap) 2694 goto out; 2695 2696 err = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl); 2697 if (err) 2698 goto out; 2699 2700 if ((ctl & PCI_EXP_DEVCTL_READRQ) != v) { 2701 ctl &= ~PCI_EXP_DEVCTL_READRQ; 2702 ctl |= v; 2703 err = pci_write_config_dword(dev, cap + PCI_EXP_DEVCTL, ctl); 2704 } 2705 2706out: 2707 return err; 2708} 2709EXPORT_SYMBOL(pcie_set_readrq); 2710 2711/** 2712 * pci_select_bars - Make BAR mask from the type of resource 2713 * @dev: the PCI device for which BAR mask is made 2714 * @flags: resource type mask to be selected 2715 * 2716 * This helper routine makes bar mask from the type of resource. 2717 */ 2718int pci_select_bars(struct pci_dev *dev, unsigned long flags) 2719{ 2720 int i, bars = 0; 2721 for (i = 0; i < PCI_NUM_RESOURCES; i++) 2722 if (pci_resource_flags(dev, i) & flags) 2723 bars |= (1 << i); 2724 return bars; 2725} 2726 2727/** 2728 * pci_resource_bar - get position of the BAR associated with a resource 2729 * @dev: the PCI device 2730 * @resno: the resource number 2731 * @type: the BAR type to be filled in 2732 * 2733 * Returns BAR position in config space, or 0 if the BAR is invalid. 2734 */ 2735int pci_resource_bar(struct pci_dev *dev, int resno, enum pci_bar_type *type) 2736{ 2737 int reg; 2738 2739 if (resno < PCI_ROM_RESOURCE) { 2740 *type = pci_bar_unknown; 2741 return PCI_BASE_ADDRESS_0 + 4 * resno; 2742 } else if (resno == PCI_ROM_RESOURCE) { 2743 *type = pci_bar_mem32; 2744 return dev->rom_base_reg; 2745 } else if (resno < PCI_BRIDGE_RESOURCES) { 2746 /* device specific resource */ 2747 reg = pci_iov_resource_bar(dev, resno, type); 2748 if (reg) 2749 return reg; 2750 } 2751 2752 dev_err(&dev->dev, "BAR %d: invalid resource\n", resno); 2753 return 0; 2754} 2755 2756/* Some architectures require additional programming to enable VGA */ 2757static arch_set_vga_state_t arch_set_vga_state; 2758 2759void __init pci_register_set_vga_state(arch_set_vga_state_t func) 2760{ 2761 arch_set_vga_state = func; /* NULL disables */ 2762} 2763 2764static int pci_set_vga_state_arch(struct pci_dev *dev, bool decode, 2765 unsigned int command_bits, bool change_bridge) 2766{ 2767 if (arch_set_vga_state) 2768 return arch_set_vga_state(dev, decode, command_bits, 2769 change_bridge); 2770 return 0; 2771} 2772 2773/** 2774 * pci_set_vga_state - set VGA decode state on device and parents if requested 2775 * @dev: the PCI device 2776 * @decode: true = enable decoding, false = disable decoding 2777 * @command_bits: PCI_COMMAND_IO and/or PCI_COMMAND_MEMORY 2778 * @change_bridge: traverse ancestors and change bridges 2779 */ 2780int pci_set_vga_state(struct pci_dev *dev, bool decode, 2781 unsigned int command_bits, bool change_bridge) 2782{ 2783 struct pci_bus *bus; 2784 struct pci_dev *bridge; 2785 u16 cmd; 2786 int rc; 2787 2788 WARN_ON(command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY)); 2789 2790 /* ARCH specific VGA enables */ 2791 rc = pci_set_vga_state_arch(dev, decode, command_bits, change_bridge); 2792 if (rc) 2793 return rc; 2794 2795 pci_read_config_word(dev, PCI_COMMAND, &cmd); 2796 if (decode == true) 2797 cmd |= command_bits; 2798 else 2799 cmd &= ~command_bits; 2800 pci_write_config_word(dev, PCI_COMMAND, cmd); 2801 2802 if (change_bridge == false) 2803 return 0; 2804 2805 bus = dev->bus; 2806 while (bus) { 2807 bridge = bus->self; 2808 if (bridge) { 2809 pci_read_config_word(bridge, PCI_BRIDGE_CONTROL, 2810 &cmd); 2811 if (decode == true) 2812 cmd |= PCI_BRIDGE_CTL_VGA; 2813 else 2814 cmd &= ~PCI_BRIDGE_CTL_VGA; 2815 pci_write_config_word(bridge, PCI_BRIDGE_CONTROL, 2816 cmd); 2817 } 2818 bus = bus->parent; 2819 } 2820 return 0; 2821} 2822 2823#define RESOURCE_ALIGNMENT_PARAM_SIZE COMMAND_LINE_SIZE 2824static char resource_alignment_param[RESOURCE_ALIGNMENT_PARAM_SIZE] = {0}; 2825static DEFINE_SPINLOCK(resource_alignment_lock); 2826 2827/** 2828 * pci_specified_resource_alignment - get resource alignment specified by user. 2829 * @dev: the PCI device to get 2830 * 2831 * RETURNS: Resource alignment if it is specified. 2832 * Zero if it is not specified. 2833 */ 2834resource_size_t pci_specified_resource_alignment(struct pci_dev *dev) 2835{ 2836 int seg, bus, slot, func, align_order, count; 2837 resource_size_t align = 0; 2838 char *p; 2839 2840 spin_lock(&resource_alignment_lock); 2841 p = resource_alignment_param; 2842 while (*p) { 2843 count = 0; 2844 if (sscanf(p, "%d%n", &align_order, &count) == 1 && 2845 p[count] == '@') { 2846 p += count + 1; 2847 } else { 2848 align_order = -1; 2849 } 2850 if (sscanf(p, "%x:%x:%x.%x%n", 2851 &seg, &bus, &slot, &func, &count) != 4) { 2852 seg = 0; 2853 if (sscanf(p, "%x:%x.%x%n", 2854 &bus, &slot, &func, &count) != 3) { 2855 /* Invalid format */ 2856 printk(KERN_ERR "PCI: Can't parse resource_alignment parameter: %s\n", 2857 p); 2858 break; 2859 } 2860 } 2861 p += count; 2862 if (seg == pci_domain_nr(dev->bus) && 2863 bus == dev->bus->number && 2864 slot == PCI_SLOT(dev->devfn) && 2865 func == PCI_FUNC(dev->devfn)) { 2866 if (align_order == -1) { 2867 align = PAGE_SIZE; 2868 } else { 2869 align = 1 << align_order; 2870 } 2871 /* Found */ 2872 break; 2873 } 2874 if (*p != ';' && *p != ',') { 2875 /* End of param or invalid format */ 2876 break; 2877 } 2878 p++; 2879 } 2880 spin_unlock(&resource_alignment_lock); 2881 return align; 2882} 2883 2884/** 2885 * pci_is_reassigndev - check if specified PCI is target device to reassign 2886 * @dev: the PCI device to check 2887 * 2888 * RETURNS: non-zero for PCI device is a target device to reassign, 2889 * or zero is not. 2890 */ 2891int pci_is_reassigndev(struct pci_dev *dev) 2892{ 2893 return (pci_specified_resource_alignment(dev) != 0); 2894} 2895 2896ssize_t pci_set_resource_alignment_param(const char *buf, size_t count) 2897{ 2898 if (count > RESOURCE_ALIGNMENT_PARAM_SIZE - 1) 2899 count = RESOURCE_ALIGNMENT_PARAM_SIZE - 1; 2900 spin_lock(&resource_alignment_lock); 2901 strncpy(resource_alignment_param, buf, count); 2902 resource_alignment_param[count] = '\0'; 2903 spin_unlock(&resource_alignment_lock); 2904 return count; 2905} 2906 2907ssize_t pci_get_resource_alignment_param(char *buf, size_t size) 2908{ 2909 size_t count; 2910 spin_lock(&resource_alignment_lock); 2911 count = snprintf(buf, size, "%s", resource_alignment_param); 2912 spin_unlock(&resource_alignment_lock); 2913 return count; 2914} 2915 2916static ssize_t pci_resource_alignment_show(struct bus_type *bus, char *buf) 2917{ 2918 return pci_get_resource_alignment_param(buf, PAGE_SIZE); 2919} 2920 2921static ssize_t pci_resource_alignment_store(struct bus_type *bus, 2922 const char *buf, size_t count) 2923{ 2924 return pci_set_resource_alignment_param(buf, count); 2925} 2926 2927BUS_ATTR(resource_alignment, 0644, pci_resource_alignment_show, 2928 pci_resource_alignment_store); 2929 2930static int __init pci_resource_alignment_sysfs_init(void) 2931{ 2932 return bus_create_file(&pci_bus_type, 2933 &bus_attr_resource_alignment); 2934} 2935 2936late_initcall(pci_resource_alignment_sysfs_init); 2937 2938static void __devinit pci_no_domains(void) 2939{ 2940#ifdef CONFIG_PCI_DOMAINS 2941 pci_domains_supported = 0; 2942#endif 2943} 2944 2945/** 2946 * pci_ext_cfg_enabled - can we access extended PCI config space? 2947 * @dev: The PCI device of the root bridge. 2948 * 2949 * Returns 1 if we can access PCI extended config space (offsets 2950 * greater than 0xff). This is the default implementation. Architecture 2951 * implementations can override this. 2952 */ 2953int __attribute__ ((weak)) pci_ext_cfg_avail(struct pci_dev *dev) 2954{ 2955 return 1; 2956} 2957 2958void __weak pci_fixup_cardbus(struct pci_bus *bus) 2959{ 2960} 2961EXPORT_SYMBOL(pci_fixup_cardbus); 2962 2963static int __init pci_setup(char *str) 2964{ 2965 while (str) { 2966 char *k = strchr(str, ','); 2967 if (k) 2968 *k++ = 0; 2969 if (*str && (str = pcibios_setup(str)) && *str) { 2970 if (!strcmp(str, "nomsi")) { 2971 pci_no_msi(); 2972 } else if (!strcmp(str, "noaer")) { 2973 pci_no_aer(); 2974 } else if (!strcmp(str, "nodomains")) { 2975 pci_no_domains(); 2976 } else if (!strncmp(str, "cbiosize=", 9)) { 2977 pci_cardbus_io_size = memparse(str + 9, &str); 2978 } else if (!strncmp(str, "cbmemsize=", 10)) { 2979 pci_cardbus_mem_size = memparse(str + 10, &str); 2980 } else if (!strncmp(str, "resource_alignment=", 19)) { 2981 pci_set_resource_alignment_param(str + 19, 2982 strlen(str + 19)); 2983 } else if (!strncmp(str, "ecrc=", 5)) { 2984 pcie_ecrc_get_policy(str + 5); 2985 } else if (!strncmp(str, "hpiosize=", 9)) { 2986 pci_hotplug_io_size = memparse(str + 9, &str); 2987 } else if (!strncmp(str, "hpmemsize=", 10)) { 2988 pci_hotplug_mem_size = memparse(str + 10, &str); 2989 } else { 2990 printk(KERN_ERR "PCI: Unknown option `%s'\n", 2991 str); 2992 } 2993 } 2994 str = k; 2995 } 2996 return 0; 2997} 2998early_param("pci", pci_setup); 2999 3000EXPORT_SYMBOL(pci_reenable_device); 3001EXPORT_SYMBOL(pci_enable_device_io); 3002EXPORT_SYMBOL(pci_enable_device_mem); 3003EXPORT_SYMBOL(pci_enable_device); 3004EXPORT_SYMBOL(pcim_enable_device); 3005EXPORT_SYMBOL(pcim_pin_device); 3006EXPORT_SYMBOL(pci_disable_device); 3007EXPORT_SYMBOL(pci_find_capability); 3008EXPORT_SYMBOL(pci_bus_find_capability); 3009EXPORT_SYMBOL(pci_release_regions); 3010EXPORT_SYMBOL(pci_request_regions); 3011EXPORT_SYMBOL(pci_request_regions_exclusive); 3012EXPORT_SYMBOL(pci_release_region); 3013EXPORT_SYMBOL(pci_request_region); 3014EXPORT_SYMBOL(pci_request_region_exclusive); 3015EXPORT_SYMBOL(pci_release_selected_regions); 3016EXPORT_SYMBOL(pci_request_selected_regions); 3017EXPORT_SYMBOL(pci_request_selected_regions_exclusive); 3018EXPORT_SYMBOL(pci_set_master); 3019EXPORT_SYMBOL(pci_clear_master); 3020EXPORT_SYMBOL(pci_set_mwi); 3021EXPORT_SYMBOL(pci_try_set_mwi); 3022EXPORT_SYMBOL(pci_clear_mwi); 3023EXPORT_SYMBOL_GPL(pci_intx); 3024EXPORT_SYMBOL(pci_assign_resource); 3025EXPORT_SYMBOL(pci_find_parent_resource); 3026EXPORT_SYMBOL(pci_select_bars); 3027 3028EXPORT_SYMBOL(pci_set_power_state); 3029EXPORT_SYMBOL(pci_save_state); 3030EXPORT_SYMBOL(pci_restore_state); 3031EXPORT_SYMBOL(pci_pme_capable); 3032EXPORT_SYMBOL(pci_pme_active); 3033EXPORT_SYMBOL(pci_wake_from_d3); 3034EXPORT_SYMBOL(pci_target_state); 3035EXPORT_SYMBOL(pci_prepare_to_sleep); 3036EXPORT_SYMBOL(pci_back_from_sleep); 3037EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state); 3038