1// Copyright 2016 The Fuchsia Authors
2// Copyright (c) 2016, Google, Inc. All rights reserved
3//
4// Use of this source code is governed by a MIT-style
5// license that can be found in the LICENSE file or at
6// https://opensource.org/licenses/MIT
7
8#include <arch/mmu.h>
9#include <assert.h>
10#include <debug.h>
11#include <dev/interrupt.h>
12#include <dev/pcie_bridge.h>
13#include <dev/pcie_bus_driver.h>
14#include <dev/pcie_device.h>
15#include <err.h>
16#include <fbl/algorithm.h>
17#include <fbl/auto_lock.h>
18#include <fbl/limits.h>
19#include <inttypes.h>
20#include <kernel/spinlock.h>
21#include <lk/init.h>
22#include <platform.h>
23#include <string.h>
24#include <trace.h>
25#include <vm/arch_vm_aspace.h>
26#include <vm/vm.h>
27#include <zircon/compiler.h>
28#include <zircon/time.h>
29#include <zircon/types.h>
30
31#include <fbl/alloc_checker.h>
32
33using fbl::AutoLock;
34
35#define LOCAL_TRACE 0
36
37namespace {  // anon namespace.  Externals do not need to know about PcieDeviceImpl
38class PcieDeviceImpl : public PcieDevice {
39public:
40    static fbl::RefPtr<PcieDevice> Create(PcieUpstreamNode& upstream, uint dev_id, uint func_id);
41
42    // Disallow copying, assigning and moving.
43    DISALLOW_COPY_ASSIGN_AND_MOVE(PcieDeviceImpl);
44
45    // Implement ref counting, do not let derived classes override.
46    PCIE_IMPLEMENT_REFCOUNTED;
47
48protected:
49    PcieDeviceImpl(PcieBusDriver& bus_drv, uint bus_id, uint dev_id, uint func_id)
50        : PcieDevice(bus_drv, bus_id, dev_id, func_id, false) { }
51};
52
53fbl::RefPtr<PcieDevice> PcieDeviceImpl::Create(PcieUpstreamNode& upstream,
54                                                uint dev_id, uint func_id) {
55    fbl::AllocChecker ac;
56    auto raw_dev = new (&ac) PcieDeviceImpl(upstream.driver(),
57                                            upstream.managed_bus_id(),
58                                            dev_id,
59                                            func_id);
60    if (!ac.check()) {
61        TRACEF("Out of memory attemping to create PCIe device %02x:%02x.%01x.\n",
62                upstream.managed_bus_id(), dev_id, func_id);
63        return nullptr;
64    }
65
66    auto dev = fbl::AdoptRef(static_cast<PcieDevice*>(raw_dev));
67    zx_status_t res = raw_dev->Init(upstream);
68    if (res != ZX_OK) {
69        TRACEF("Failed to initialize PCIe device %02x:%02x.%01x. (res %d)\n",
70                upstream.managed_bus_id(), dev_id, func_id, res);
71        return nullptr;
72    }
73
74    return dev;
75}
76}  // namespace
77
78PcieDevice::PcieDevice(PcieBusDriver& bus_drv,
79                       uint bus_id, uint dev_id, uint func_id, bool is_bridge)
80    : bus_drv_(bus_drv),
81      is_bridge_(is_bridge),
82      bus_id_(bus_id),
83      dev_id_(dev_id),
84      func_id_(func_id),
85      bar_count_(is_bridge ? PCIE_BAR_REGS_PER_BRIDGE : PCIE_BAR_REGS_PER_DEVICE) {
86}
87
88PcieDevice::~PcieDevice() {
89    /* We should already be unlinked from the bus's device tree. */
90    DEBUG_ASSERT(!upstream_);
91    DEBUG_ASSERT(!plugged_in_);
92
93    /* TODO(johngro) : ASSERT that this device no longer participating in any of
94     * the bus driver's shared IRQ dispatching. */
95
96    /* Make certain that all bus access (MMIO, PIO, Bus mastering) has been
97     * disabled.  Also, explicitly disable legacy IRQs */
98    if (cfg_)
99        cfg_->Write(PciConfig::kCommand, PCIE_CFG_COMMAND_INT_DISABLE);
100}
101
102fbl::RefPtr<PcieDevice> PcieDevice::Create(PcieUpstreamNode& upstream, uint dev_id, uint func_id) {
103    return PcieDeviceImpl::Create(upstream, dev_id, func_id);
104}
105
106zx_status_t PcieDevice::Init(PcieUpstreamNode& upstream) {
107    AutoLock dev_lock(&dev_lock_);
108
109    zx_status_t res = InitLocked(upstream);
110    if (res == ZX_OK) {
111        // Things went well, flag the device as plugged in and link ourselves up to
112        // the graph.
113        plugged_in_ = true;
114        bus_drv_.LinkDeviceToUpstream(*this, upstream);
115    }
116
117    return res;
118}
119
120zx_status_t PcieDevice::InitLocked(PcieUpstreamNode& upstream) {
121    zx_status_t res;
122    DEBUG_ASSERT(dev_lock_.IsHeld());
123    DEBUG_ASSERT(cfg_ == nullptr);
124
125    cfg_ = bus_drv_.GetConfig(bus_id_, dev_id_, func_id_, &cfg_phys_);
126    if (cfg_ == nullptr) {
127        TRACEF("Failed to fetch config for device %02x:%02x.%01x.\n", bus_id_, dev_id_, func_id_);
128        return ZX_ERR_BAD_STATE;
129    }
130
131    // Cache basic device info
132    vendor_id_ = cfg_->Read(PciConfig::kVendorId);
133    device_id_ = cfg_->Read(PciConfig::kDeviceId);
134    class_id_  = cfg_->Read(PciConfig::kBaseClass);
135    subclass_  = cfg_->Read(PciConfig::kSubClass);
136    prog_if_   = cfg_->Read(PciConfig::kProgramInterface);
137    rev_id_    = cfg_->Read(PciConfig::kRevisionId);
138
139    // Determine the details of each of the BARs, but do not actually allocate
140    // space on the bus for them yet.
141    res = ProbeBarsLocked();
142    if (res != ZX_OK)
143        return res;
144
145    // Parse and sanity check the capabilities and extended capabilities lists
146    // if they exist
147    res = ProbeCapabilitiesLocked();
148    if (res != ZX_OK)
149        return res;
150
151    // Now that we know what our capabilities are, initialize our internal IRQ
152    // bookkeeping
153    res = InitLegacyIrqStateLocked(upstream);
154    if (res != ZX_OK)
155        return res;
156
157    return ZX_OK;
158}
159
160fbl::RefPtr<PcieUpstreamNode> PcieDevice::GetUpstream() {
161    return bus_drv_.GetUpstream(*this);
162}
163
164void PcieDevice::Unplug() {
165    /* Begin by completely nerfing this device, and preventing an new API
166     * operations on it.  We need to be inside the dev lock to do this.  Note:
167     * it is assumed that we will not disappear during any of this function,
168     * because our caller is holding a reference to us. */
169    AutoLock dev_lock(&dev_lock_);
170
171    if (plugged_in_) {
172        /* Remove all access this device has to the PCI bus */
173        cfg_->Write(PciConfig::kCommand, PCIE_CFG_COMMAND_INT_DISABLE);
174
175        /* TODO(johngro) : Make sure that our interrupt mode has been set to
176         * completely disabled.  Do not return allocated BARs to the central
177         * pool yet.  These regions of the physical bus need to remain
178         * "allocated" until all drivers/users in the system release their last
179         * reference to the device.  This way, if the device gets plugged in
180         * again immediately, the new version of the device will not end up
181         * getting mapped underneath any stale driver instances. */
182
183        plugged_in_ = false;
184    } else {
185        /* TODO(johngro) : Assert that the device has been completely disabled. */
186    }
187
188    /* Unlink ourselves from our upstream parent (if we still have one). */
189    bus_drv_.UnlinkDeviceFromUpstream(*this);
190}
191
192zx_status_t PcieDevice::DoFunctionLevelReset() {
193    zx_status_t ret;
194
195    // TODO(johngro) : Function level reset is an operation which can take quite
196    // a long time (more than a second).  We should not hold the device lock for
197    // the entire duration of the operation.  This should be re-done so that the
198    // device can be placed into a "resetting" state (and other API calls can
199    // fail with ZX_ERR_BAD_STATE, or some-such) and the lock can be released while the
200    // reset timeouts run.  This way, a spontaneous unplug event can occur and
201    // not block the whole world because the device unplugged was in the process
202    // of a FLR.
203    AutoLock dev_lock(&dev_lock_);
204
205    // Make certain to check to see if the device is still plugged in.
206    if (!plugged_in_)
207        return ZX_ERR_UNAVAILABLE;
208
209    // Disallow reset if we currently have an active IRQ mode.
210    //
211    // Note: the only possible reason for get_irq_mode to fail would be for the
212    // device to be unplugged.  Since we have already checked for that, we
213    // assert that the call should succeed.
214    pcie_irq_mode_info_t irq_mode_info;
215    ret = GetIrqModeLocked(&irq_mode_info);
216    DEBUG_ASSERT(ZX_OK == ret);
217
218    if (irq_mode_info.mode != PCIE_IRQ_MODE_DISABLED)
219        return ZX_ERR_BAD_STATE;
220
221    DEBUG_ASSERT(!irq_mode_info.registered_handlers);
222    DEBUG_ASSERT(!irq_mode_info.max_handlers);
223
224    // If cannot reset via the PCIe capability, or the PCI advanced capability,
225    // then this device simply does not support function level reset.
226    if (!(pcie_ && pcie_->has_flr()) && !(pci_af_ && pci_af_->has_flr()))
227        return ZX_ERR_NOT_SUPPORTED;
228
229    // Pick the functions we need for testing whether or not transactions are
230    // pending for this device, and for initiating the FLR
231    bool (*check_trans_pending)(void* ctx);
232    void (*initiate_flr)(void* ctx);
233
234    if (pcie_ && pcie_->has_flr()) {
235        check_trans_pending = [](void* ctx) -> bool {
236            auto thiz = reinterpret_cast<PcieDevice*>(ctx);
237            return thiz->cfg_->Read(thiz->pcie_->device.status()) &
238                                    PCS_DEV_STS_TRANSACTIONS_PENDING;
239        };
240        initiate_flr = [](void* ctx) {
241            auto thiz = reinterpret_cast<PcieDevice*>(ctx);
242            auto val = static_cast<uint16_t>(thiz->cfg_->Read(thiz->pcie_->device.ctrl()) |
243                                                              PCS_DEV_CTRL_INITIATE_FLR);
244            thiz->cfg_->Write(thiz->pcie_->device.ctrl(), val);
245        };
246    } else {
247        check_trans_pending = [](void* ctx) -> bool {
248            auto thiz = reinterpret_cast<PcieDevice*>(ctx);
249            return thiz->cfg_->Read(thiz->pci_af_->af_status()) & PCS_ADVCAPS_STATUS_TRANS_PENDING;
250        };
251        initiate_flr = [](void* ctx) {
252            auto thiz = reinterpret_cast<PcieDevice*>(ctx);
253            thiz->cfg_->Write(thiz->pci_af_->af_ctrl(), PCS_ADVCAPS_CTRL_INITIATE_FLR);
254        };
255    }
256
257    // Following the procedure outlined in the Implementation notes
258    uint32_t bar_backup[PCIE_MAX_BAR_REGS];
259    uint16_t cmd_backup;
260
261    // 1) Make sure driver code is not creating new transactions (not much I
262    //    can do about this, just have to hope).
263    // 2) Clear out the command register so that no new transactions may be
264    //    initiated.  Also back up the BARs in the process.
265    {
266        DEBUG_ASSERT(irq_.legacy.shared_handler != nullptr);
267        AutoSpinLock cmd_reg_lock(&cmd_reg_lock_);
268
269        cmd_backup = cfg_->Read(PciConfig::kCommand);
270        cfg_->Write(PciConfig::kCommand, PCIE_CFG_COMMAND_INT_DISABLE);
271        for (uint i = 0; i < bar_count_; ++i)
272            bar_backup[i] = cfg_->Read(PciConfig::kBAR(i));
273    }
274
275    // 3) Poll the transaction pending bit until it clears.  This may take
276    //    "several seconds"
277    zx_time_t start = current_time();
278    ret = ZX_ERR_TIMED_OUT;
279    do {
280        if (!check_trans_pending(this)) {
281            ret = ZX_OK;
282            break;
283        }
284        thread_sleep_relative(ZX_MSEC(1));
285    } while (zx_time_sub_time(current_time(), start) < ZX_SEC(5));
286
287    if (ret != ZX_OK) {
288        TRACEF("Timeout waiting for pending transactions to clear the bus "
289               "for %02x:%02x.%01x\n",
290               bus_id_, dev_id_, func_id_);
291
292        // Restore the command register
293        AutoSpinLock cmd_reg_lock(&cmd_reg_lock_);
294        cfg_->Write(PciConfig::kCommand, cmd_backup);
295
296        return ret;
297    } else {
298        // 4) Software initiates the FLR
299        initiate_flr(this);
300
301        // 5) Software waits 100mSec
302        thread_sleep_relative(ZX_MSEC(100));
303    }
304
305    // NOTE: Even though the spec says that the reset operation is supposed
306    // to always take less than 100mSec, no one really follows this rule.
307    // Generally speaking, when a device resets, config read cycles will
308    // return all 0xFFs until the device finally resets and comes back.
309    // Poll the Vendor ID field until the device finally completes it's
310    // reset.
311    start = current_time();
312    ret   = ZX_ERR_TIMED_OUT;
313    do {
314        if (cfg_->Read(PciConfig::kVendorId) != PCIE_INVALID_VENDOR_ID) {
315            ret = ZX_OK;
316            break;
317        }
318        thread_sleep_relative(ZX_MSEC(1));
319    } while (zx_time_sub_time(current_time(), start) < ZX_SEC(5));
320
321    if (ret == ZX_OK) {
322        // 6) Software reconfigures the function and enables it for normal operation
323        AutoSpinLock cmd_reg_lock(&cmd_reg_lock_);
324
325        for (uint i = 0; i < bar_count_; ++i)
326            cfg_->Write(PciConfig::kBAR(i), bar_backup[i]);
327        cfg_->Write(PciConfig::kCommand, cmd_backup);
328    } else {
329        // TODO(johngro) : What do we do if this fails?  If we trigger a
330        // device reset, and the device fails to re-appear after 5 seconds,
331        // it is probably gone for good.  We probably need to force unload
332        // any device drivers which had previously owned the device.
333        TRACEF("Timeout waiting for %02x:%02x.%01x to complete function "
334               "level reset.  This is Very Bad.\n",
335               bus_id_, dev_id_, func_id_);
336    }
337
338    return ret;
339}
340
341zx_status_t PcieDevice::ModifyCmd(uint16_t clr_bits, uint16_t set_bits) {
342    AutoLock dev_lock(&dev_lock_);
343
344    /* In order to keep internal bookkeeping coherent, and interactions between
345     * MSI/MSI-X and Legacy IRQ mode safe, API users may not directly manipulate
346     * the legacy IRQ enable/disable bit.  Just ignore them if they try to
347     * manipulate the bit via the modify cmd API. */
348    clr_bits = static_cast<uint16_t>(clr_bits & ~PCIE_CFG_COMMAND_INT_DISABLE);
349    set_bits = static_cast<uint16_t>(set_bits & ~PCIE_CFG_COMMAND_INT_DISABLE);
350
351    if (plugged_in_) {
352        ModifyCmdLocked(clr_bits, set_bits);
353        return ZX_OK;
354    }
355
356    return ZX_ERR_UNAVAILABLE;
357}
358
359void PcieDevice::ModifyCmdLocked(uint16_t clr_bits, uint16_t set_bits) {
360    DEBUG_ASSERT(dev_lock_.IsHeld());
361
362    {
363        AutoSpinLock cmd_reg_lock(&cmd_reg_lock_);
364        cfg_->Write(PciConfig::kCommand,
365                     static_cast<uint16_t>((cfg_->Read(PciConfig::kCommand) & ~clr_bits)
366                                                                             |  set_bits));
367    }
368}
369
370zx_status_t PcieDevice::ProbeBarsLocked() {
371    DEBUG_ASSERT(cfg_);
372    DEBUG_ASSERT(dev_lock_.IsHeld());
373
374    static_assert(PCIE_MAX_BAR_REGS >= PCIE_BAR_REGS_PER_DEVICE, "");
375    static_assert(PCIE_MAX_BAR_REGS >= PCIE_BAR_REGS_PER_BRIDGE, "");
376
377    __UNUSED uint8_t header_type = cfg_->Read(PciConfig::kHeaderType) & PCI_HEADER_TYPE_MASK;
378
379    DEBUG_ASSERT((header_type == PCI_HEADER_TYPE_STANDARD) ||
380                 (header_type == PCI_HEADER_TYPE_PCI_BRIDGE));
381    DEBUG_ASSERT(bar_count_ <= fbl::count_of(bars_));
382
383    for (uint i = 0; i < bar_count_; ++i) {
384        /* If this is a re-scan of the bus, We should not be re-enumerating BARs. */
385        DEBUG_ASSERT(bars_[i].size == 0);
386        DEBUG_ASSERT(bars_[i].allocation == nullptr);
387
388        zx_status_t probe_res = ProbeBarLocked(i);
389        if (probe_res != ZX_OK)
390            return probe_res;
391
392        if (bars_[i].size > 0) {
393            /* If this was a 64 bit bar, it took two registers to store.  Make
394             * sure to skip the next register */
395            if (bars_[i].is_64bit) {
396                i++;
397
398                if (i >= bar_count_) {
399                    TRACEF("Device %02x:%02x:%01x claims to have 64-bit BAR in position %u/%u!\n",
400                           bus_id_, dev_id_, func_id_, i, bar_count_);
401                    return ZX_ERR_BAD_STATE;
402                }
403            }
404        }
405    }
406
407    return ZX_OK;
408}
409
410zx_status_t PcieDevice::ProbeBarLocked(uint bar_id) {
411    DEBUG_ASSERT(cfg_);
412    DEBUG_ASSERT(bar_id < bar_count_);
413    DEBUG_ASSERT(bar_id < fbl::count_of(bars_));
414
415    /* Determine the type of BAR this is.  Make sure that it is one of the types we understand */
416    pcie_bar_info_t& bar_info  = bars_[bar_id];
417    uint32_t bar_val           = cfg_->Read(PciConfig::kBAR(bar_id));
418    bar_info.is_mmio           = (bar_val & PCI_BAR_IO_TYPE_MASK) == PCI_BAR_IO_TYPE_MMIO;
419    bar_info.is_64bit          = bar_info.is_mmio &&
420                                 ((bar_val & PCI_BAR_MMIO_TYPE_MASK) == PCI_BAR_MMIO_TYPE_64BIT);
421    bar_info.is_prefetchable   = bar_info.is_mmio && (bar_val & PCI_BAR_MMIO_PREFETCH_MASK);
422    bar_info.first_bar_reg     = bar_id;
423
424    if (bar_info.is_64bit) {
425        if ((bar_id + 1) >= bar_count_) {
426            TRACEF("Illegal 64-bit MMIO BAR position (%u/%u) while fetching BAR info "
427                   "for device config @%p\n",
428                   bar_id, bar_count_, cfg_);
429            return ZX_ERR_BAD_STATE;
430        }
431    } else {
432        if (bar_info.is_mmio && ((bar_val & PCI_BAR_MMIO_TYPE_MASK) != PCI_BAR_MMIO_TYPE_32BIT)) {
433            TRACEF("Unrecognized MMIO BAR type (BAR[%u] == 0x%08x) while fetching BAR info "
434                   "for device config @%p\n",
435                   bar_id, bar_val, cfg_);
436            return ZX_ERR_BAD_STATE;
437        }
438    }
439
440    /* Disable either MMIO or PIO (depending on the BAR type) access while we
441     * perform the probe.  We don't want the addresses written during probing to
442     * conflict with anything else on the bus.  Note:  No drivers should have
443     * acccess to this device's registers during the probe process as the device
444     * should not have been published yet.  That said, there could be other
445     * (special case) parts of the system accessing a devices registers at this
446     * point in time, like an early init debug console or serial port.  Don't
447     * make any attempt to print or log until the probe operation has been
448     * completed.  Hopefully these special systems are quiescent at this point
449     * in time, otherwise they might see some minor glitching while access is
450     * disabled.
451     */
452    uint16_t backup = cfg_->Read(PciConfig::kCommand);
453    if (bar_info.is_mmio)
454        cfg_->Write(PciConfig::kCommand, static_cast<uint16_t>(backup & ~PCI_COMMAND_MEM_EN));
455    else
456        cfg_->Write(PciConfig::kCommand, static_cast<uint16_t>(backup & ~PCI_COMMAND_IO_EN));
457
458    /* Figure out the size of this BAR region by writing 1's to the
459     * address bits, then reading back to see which bits the device
460     * considers un-configurable. */
461    uint32_t addr_mask = bar_info.is_mmio ? PCI_BAR_MMIO_ADDR_MASK : PCI_BAR_PIO_ADDR_MASK;
462    uint32_t addr_lo   = bar_val & addr_mask;
463    uint64_t size_mask;
464
465    cfg_->Write(PciConfig::kBAR(bar_id), bar_val | addr_mask);
466    size_mask = ~(cfg_->Read(PciConfig::kBAR(bar_id)) & addr_mask);
467    cfg_->Write(PciConfig::kBAR(bar_id), bar_val);
468
469    if (bar_info.is_mmio) {
470        if (bar_info.is_64bit) {
471
472            /* 64bit MMIO? Probe the upper bits as well */
473            bar_id++;
474            bar_val = cfg_->Read(PciConfig::kBAR(bar_id));
475            cfg_->Write(PciConfig::kBAR(bar_id), 0xFFFFFFFF);
476            size_mask |= ((uint64_t)~cfg_->Read(PciConfig::kBAR(bar_id))) << 32;
477            cfg_->Write(PciConfig::kBAR(bar_id), bar_val);
478            bar_info.size = size_mask + 1;
479            bar_info.bus_addr = (static_cast<uint64_t>(bar_val) << 32) | addr_lo;
480        } else {
481            bar_info.size = (uint32_t)(size_mask + 1);
482            bar_info.bus_addr = addr_lo;
483        }
484    } else {
485        /* PIO BAR */
486        bar_info.size = ((uint32_t)(size_mask + 1)) & PCIE_PIO_ADDR_SPACE_MASK;
487        bar_info.bus_addr = addr_lo;
488    }
489
490    /* Restore the command register to its previous value */
491    cfg_->Write(PciConfig::kCommand, backup);
492
493    /* Success */
494    return ZX_OK;
495}
496
497
498zx_status_t PcieDevice::AllocateBars() {
499    AutoLock dev_lock(&dev_lock_);
500    return AllocateBarsLocked();
501}
502
503zx_status_t PcieDevice::AllocateBarsLocked() {
504    DEBUG_ASSERT(dev_lock_.IsHeld());
505    DEBUG_ASSERT(plugged_in_);
506
507    // Have we become unplugged?
508    if (!plugged_in_)
509        return ZX_ERR_UNAVAILABLE;
510
511    /* Allocate BARs for the device */
512    DEBUG_ASSERT(bar_count_ <= fbl::count_of(bars_));
513    for (size_t i = 0; i < bar_count_; ++i) {
514        if (bars_[i].size) {
515            zx_status_t ret = AllocateBarLocked(bars_[i]);
516            if (ret != ZX_OK)
517                return ret;
518        }
519    }
520
521    return ZX_OK;
522}
523
524zx_status_t PcieDevice::AllocateBarLocked(pcie_bar_info_t& info) {
525    DEBUG_ASSERT(dev_lock_.IsHeld());
526    DEBUG_ASSERT(plugged_in_);
527
528    // Do not attempt to remap if we are rescanning the bus and this BAR is
529    // already allocated, or if it does not exist (size is zero)
530    if ((info.size == 0) || (info.allocation != nullptr))
531        return ZX_OK;
532
533    // Hold a reference to our upstream node while we do this.  If we cannot
534    // obtain a reference, then our upstream node has become unplugged and we
535    // should just fail out now.
536    auto upstream = GetUpstream();
537    if (upstream == nullptr)
538        return ZX_ERR_UNAVAILABLE;
539
540    /* Does this BAR already have an assigned address?  If so, try to preserve
541     * it, if possible. */
542    if (info.bus_addr != 0) {
543        RegionAllocator* alloc = nullptr;
544        if (upstream->type() == PcieUpstreamNode::Type::BRIDGE && info.is_prefetchable) {
545            alloc = &upstream->pf_mmio_regions();
546        } else if (info.is_mmio) {
547            /* We currently do not support preserving an MMIO region which spans
548             * the 4GB mark.  If we encounter such a thing, clear out the
549             * allocation and attempt to re-allocate. */
550            uint64_t inclusive_end = info.bus_addr + info.size - 1;
551            if (inclusive_end <= fbl::numeric_limits<uint32_t>::max()) {
552                alloc = &upstream->mmio_lo_regions();
553            } else
554            if (info.bus_addr > fbl::numeric_limits<uint32_t>::max()) {
555                alloc = &upstream->mmio_hi_regions();
556            }
557        } else {
558            alloc = &upstream->pio_regions();
559        }
560
561        zx_status_t res = ZX_ERR_NOT_FOUND;
562        if (alloc != nullptr) {
563            res = alloc->GetRegion({ .base = info.bus_addr, .size = info.size }, info.allocation);
564        }
565
566        if (res == ZX_OK)
567            return ZX_OK;
568
569        TRACEF("Failed to preserve device %02x:%02x.%01x's %s window "
570               "[%#" PRIx64 ", %#" PRIx64 "] Attempting to re-allocate.\n",
571               bus_id_, dev_id_, func_id_,
572               info.is_mmio ? (info.is_prefetchable ? "PFMMIO" : "MMIO") : "PIO",
573               info.bus_addr, info.bus_addr + info.size - 1);
574        info.bus_addr = 0;
575    }
576
577    /* We failed to preserve the allocation and need to attempt to
578     * dynamically allocate a new region.  Close the device MMIO/PIO
579     * windows, disable interrupts and shut of bus mastering (which will
580     * also disable MSI interrupts) before we attempt dynamic allocation.
581     */
582    AssignCmdLocked(PCIE_CFG_COMMAND_INT_DISABLE);
583
584    /* Choose which region allocator we will attempt to allocate from, then
585     * check to see if we have the space. */
586    RegionAllocator* alloc = !info.is_mmio
587                             ? &upstream->pio_regions()
588                             : (info.is_64bit ? &upstream->mmio_hi_regions()
589                                              : &upstream->mmio_lo_regions());
590    uint32_t addr_mask = info.is_mmio
591                       ? PCI_BAR_MMIO_ADDR_MASK
592                       : PCI_BAR_PIO_ADDR_MASK;
593
594    /* If check to see if we have the space to allocate within the chosen
595     * range.  In the case of a 64 bit MMIO BAR, if we run out of space in
596     * the high-memory MMIO range, try the low memory range as well.
597     */
598    while (true) {
599        /* MMIO windows and I/O windows on systems where I/O space is actually
600         * memory mapped must be aligned to a page boundary, at least. */
601        bool     is_io_space = PCIE_HAS_IO_ADDR_SPACE && !info.is_mmio;
602        uint64_t align_size  = ((info.size >= PAGE_SIZE) || is_io_space)
603                             ? info.size
604                             : PAGE_SIZE;
605        zx_status_t res = alloc->GetRegion(align_size, align_size, info.allocation);
606
607        if (res != ZX_OK) {
608            if ((res == ZX_ERR_NOT_FOUND) && (alloc == &upstream->mmio_hi_regions())) {
609                LTRACEF("Insufficient space to map 64-bit MMIO BAR in high region while "
610                        "configuring BARs for device at %02x:%02x.%01x (cfg vaddr = %p).  "
611                        "Falling back on low memory region.\n",
612                        bus_id_, dev_id_, func_id_, cfg_);
613                alloc = &upstream->mmio_lo_regions();
614                continue;
615            }
616
617            TRACEF("Failed to dynamically allocate %s BAR region (size %#" PRIx64 ") "
618                   "while configuring BARs for device at %02x:%02x.%01x (res = %d)\n",
619                   info.is_mmio ? (info.is_prefetchable ? "PFMMIO" : "MMIO") : "PIO", info.size,
620                   bus_id_, dev_id_, func_id_, res);
621
622            // Looks like we are out of luck.  Propagate the error up the stack
623            // so that our upstream node knows to disable us.
624            return res;
625        }
626
627        break;
628    }
629
630    /* Allocation succeeded.  Record our allocated and aligned physical address
631     * in our BAR(s) */
632    DEBUG_ASSERT(info.allocation != nullptr);
633    uint bar_reg = info.first_bar_reg;
634    info.bus_addr = info.allocation->base;
635
636    cfg_->Write(PciConfig::kBAR(bar_reg), static_cast<uint32_t>((info.bus_addr & 0xFFFFFFFF) |
637                                                (cfg_->Read(PciConfig::kBAR(bar_reg)) & ~addr_mask)));
638    if (info.is_64bit)
639        cfg_->Write(PciConfig::kBAR(bar_reg + 1), static_cast<uint32_t>(info.bus_addr >> 32));
640
641    return ZX_OK;
642}
643
644void PcieDevice::Disable() {
645    DEBUG_ASSERT(!dev_lock_.IsHeld());
646    AutoLock dev_lock(&dev_lock_);
647    DisableLocked();
648}
649
650void PcieDevice::DisableLocked() {
651    // Disable a device because we cannot allocate space for all of its BARs (or
652    // forwarding windows, in the case of a bridge).  Flag the device as
653    // disabled from here on out.
654    DEBUG_ASSERT(dev_lock_.IsHeld());
655    TRACEF("WARNING - Disabling device %02x:%02x.%01x due to unsatisfiable configuration\n",
656            bus_id_, dev_id_, func_id_);
657
658    // Flag the device as disabled.  Close the device's MMIO/PIO windows, shut
659    // off device initiated accesses to the bus, disable legacy interrupts.
660    // Basically, prevent the device from doing anything from here on out.
661    disabled_ = true;
662    AssignCmdLocked(PCIE_CFG_COMMAND_INT_DISABLE);
663
664    // Release all BAR allocations back into the pool they came from.
665    for (auto& bar : bars_)
666        bar.allocation = nullptr;
667}
668
669void PcieDevice::Dump() const {
670    printf("PCI: device at %02x:%02x:%02x vid:did %04x:%04x\n",
671            bus_id(), dev_id(), func_id(),
672            vendor_id(), device_id());
673}
674