1// Copyright 2016 The Fuchsia Authors
2// Copyright (c) 2016, Google, Inc. All rights reserved
3//
4// Use of this source code is governed by a MIT-style
5// license that can be found in the LICENSE file or at
6// https://opensource.org/licenses/MIT
7
8#include <dev/pcie_bridge.h>
9#include <dev/pcie_bus_driver.h>
10#include <dev/pcie_device.h>
11#include <dev/pcie_root.h>
12#include <inttypes.h>
13#include <vm/vm_aspace.h>
14#include <lib/pci/pio.h>
15#include <lk/init.h>
16#include <fbl/algorithm.h>
17#include <fbl/alloc_checker.h>
18#include <fbl/auto_lock.h>
19#include <fbl/limits.h>
20#include <fbl/mutex.h>
21#include <trace.h>
22
23using fbl::AutoLock;
24
25/* TODO(johngro) : figure this out someday.
26 *
27 * In theory, BARs which map PIO regions for devices are supposed to be able to
28 * use bits [2, 31] to describe the programable section of the PIO window.  On
29 * real x86/64 systems, however, using the write-1s-readback technique to
30 * determine programable bits of the BAR's address (and therefor the size of the
31 * I/O window) shows that the upper 16 bits are not programable.  This makes
32 * sense for x86 (where I/O space is only 16-bits), but fools the system into
33 * thinking that the I/O window is enormous.
34 *
35 * For now, just define a mask which can be used during PIO window space
36 * calculations which limits the size to 16 bits for x86/64 systems.  non-x86
37 * systems are still free to use all of the bits for their PIO addresses
38 * (although, it is still a bit unclear what it would mean to generate an IO
39 * space cycle on an architecture which has no such thing as IO space).
40 */
41constexpr size_t PcieBusDriver::REGION_BOOKKEEPING_SLAB_SIZE;
42constexpr size_t PcieBusDriver::REGION_BOOKKEEPING_MAX_MEM;
43
44fbl::RefPtr<PcieBusDriver> PcieBusDriver::driver_;
45fbl::Mutex PcieBusDriver::driver_lock_;
46
47PcieBusDriver::PcieBusDriver(PciePlatformInterface& platform) : platform_(platform) { }
48PcieBusDriver::~PcieBusDriver() {
49    // TODO(johngro): For now, if the bus driver is shutting down and unloading,
50    // ASSERT that there are no currently claimed devices out there.  In the the
51    // long run, we need to gracefully handle disconnecting from all user mode
52    // drivers (probably using a simulated hot-unplug) if we unload the bus
53    // driver.
54    ForeachDevice([](const fbl::RefPtr<PcieDevice>& dev, void* ctx, uint level) -> bool {
55                      DEBUG_ASSERT(dev);
56                      return true;
57                  }, nullptr);
58
59    /* Shut off all of our IRQs and free all of our bookkeeping */
60    ShutdownIrqs();
61
62    // Free the device tree
63    ForeachRoot([](const fbl::RefPtr<PcieRoot>& root, void* ctx) -> bool {
64                     root->UnplugDownstream();
65                     return true;
66                   }, nullptr);
67    roots_.clear();
68
69    // Release the region bookkeeping memory.
70    region_bookkeeping_.reset();
71
72    // Unmap and free all of our mapped ECAM regions.
73    ecam_regions_.clear();
74}
75
76zx_status_t PcieBusDriver::AddRoot(fbl::RefPtr<PcieRoot>&& root) {
77    if (root == nullptr)
78        return ZX_ERR_INVALID_ARGS;
79
80    // Make sure that we are not already started.
81    if (!IsNotStarted()) {
82        TRACEF("Cannot add more PCIe roots once the bus driver has been started!\n");
83        return ZX_ERR_BAD_STATE;
84    }
85
86    // Attempt to add it to the collection of roots.
87    {
88        AutoLock bus_topology_lock(&bus_topology_lock_);
89        if (!roots_.insert_or_find(fbl::move(root))) {
90            TRACEF("Failed to add PCIe root for bus %u, root already exists!\n",
91                    root->managed_bus_id());
92            return ZX_ERR_ALREADY_EXISTS;
93        }
94    }
95
96    return ZX_OK;
97}
98
99zx_status_t PcieBusDriver::RescanDevices() {
100    if (!IsOperational()) {
101        TRACEF("Cannot rescan devices until the bus driver is operational!\n");
102        return ZX_ERR_BAD_STATE;
103    }
104
105    AutoLock lock(&bus_rescan_lock_);
106
107    // Scan each root looking for for devices and other bridges.
108    ForeachRoot([](const fbl::RefPtr<PcieRoot>& root, void* ctx) -> bool {
109                     root->ScanDownstream();
110                     return true;
111                   }, nullptr);
112
113    // Attempt to allocate any unallocated BARs
114    ForeachRoot([](const fbl::RefPtr<PcieRoot>& root, void* ctx) -> bool {
115                     root->AllocateDownstreamBars();
116                     return true;
117                   }, nullptr);
118
119    return ZX_OK;
120}
121
122bool PcieBusDriver::IsNotStarted(bool allow_quirks_phase) const {
123    AutoLock start_lock(&start_lock_);
124
125    if ((state_ != State::NOT_STARTED) &&
126        (!allow_quirks_phase || (state_ != State::STARTING_RUNNING_QUIRKS)))
127        return false;
128
129    return true;
130}
131
132bool PcieBusDriver::AdvanceState(State expected, State next) {
133    AutoLock start_lock(&start_lock_);
134
135    if (state_ != expected) {
136        TRACEF("Failed to advance PCIe bus driver state to %u.  "
137               "Expected state (%u) does not match current state (%u)\n",
138               static_cast<uint>(next),
139               static_cast<uint>(expected),
140               static_cast<uint>(state_));
141        return false;
142    }
143
144    state_ = next;
145    return true;
146}
147
148zx_status_t PcieBusDriver::StartBusDriver() {
149    if (!AdvanceState(State::NOT_STARTED, State::STARTING_SCANNING))
150        return ZX_ERR_BAD_STATE;
151
152    {
153        AutoLock lock(&bus_rescan_lock_);
154
155        // Scan each root looking for for devices and other bridges.
156        ForeachRoot([](const fbl::RefPtr<PcieRoot>& root, void* ctx) -> bool {
157                         root->ScanDownstream();
158                         return true;
159                       }, nullptr);
160
161        if (!AdvanceState(State::STARTING_SCANNING, State::STARTING_RUNNING_QUIRKS))
162            return ZX_ERR_BAD_STATE;
163
164        // Run registered quirk handlers for any newly discovered devices.
165        ForeachDevice([](const fbl::RefPtr<PcieDevice>& dev, void* ctx, uint level) -> bool {
166            PcieBusDriver::RunQuirks(dev);
167            return true;
168        }, nullptr);
169
170        // Indicate to the registered quirks handlers that we are finished with the
171        // quirks phase.
172        PcieBusDriver::RunQuirks(nullptr);
173
174        if (!AdvanceState(State::STARTING_RUNNING_QUIRKS, State::STARTING_RESOURCE_ALLOCATION))
175            return ZX_ERR_BAD_STATE;
176
177        // Attempt to allocate any unallocated BARs
178        ForeachRoot([](const fbl::RefPtr<PcieRoot>& root, void* ctx) -> bool {
179                         root->AllocateDownstreamBars();
180                         return true;
181                       }, nullptr);
182    }
183
184    if (!AdvanceState(State::STARTING_RESOURCE_ALLOCATION, State::OPERATIONAL))
185        return ZX_ERR_BAD_STATE;
186
187    return ZX_OK;
188}
189
190fbl::RefPtr<PcieDevice> PcieBusDriver::GetNthDevice(uint32_t index) {
191    struct GetNthDeviceState {
192        uint32_t index;
193        fbl::RefPtr<PcieDevice> ret;
194    } state;
195
196    state.index = index;
197
198    ForeachDevice(
199        [](const fbl::RefPtr<PcieDevice>& dev, void* ctx, uint level) -> bool {
200            DEBUG_ASSERT(dev && ctx);
201
202            auto state = reinterpret_cast<GetNthDeviceState*>(ctx);
203            if (!state->index) {
204                state->ret = dev;
205                return false;
206            }
207
208            state->index--;
209            return true;
210        }, &state);
211
212    return fbl::move(state.ret);
213}
214
215void PcieBusDriver::LinkDeviceToUpstream(PcieDevice& dev, PcieUpstreamNode& upstream) {
216    AutoLock lock(&bus_topology_lock_);
217
218    // Have the device hold a reference to its upstream bridge.
219    DEBUG_ASSERT(dev.upstream_ == nullptr);
220    dev.upstream_ = fbl::WrapRefPtr(&upstream);
221
222    // Have the bridge hold a reference to the device
223    uint ndx = (dev.dev_id() * PCIE_MAX_FUNCTIONS_PER_DEVICE) + dev.func_id();
224    DEBUG_ASSERT(ndx < fbl::count_of(upstream.downstream_));
225    DEBUG_ASSERT(upstream.downstream_[ndx] == nullptr);
226    upstream.downstream_[ndx] = fbl::WrapRefPtr(&dev);
227}
228
229void PcieBusDriver::UnlinkDeviceFromUpstream(PcieDevice& dev) {
230    AutoLock lock(&bus_topology_lock_);
231
232    if (dev.upstream_ != nullptr) {
233        uint ndx = (dev.dev_id() * PCIE_MAX_FUNCTIONS_PER_DEVICE) + dev.func_id();
234        DEBUG_ASSERT(ndx < fbl::count_of(dev.upstream_->downstream_));
235        DEBUG_ASSERT(&dev == dev.upstream_->downstream_[ndx].get());
236
237        // Let go of the upstream's reference to the device
238        dev.upstream_->downstream_[ndx] = nullptr;
239
240        // Let go of the device's reference to its upstream
241        dev.upstream_ = nullptr;
242    }
243}
244
245fbl::RefPtr<PcieUpstreamNode> PcieBusDriver::GetUpstream(PcieDevice& dev) {
246    AutoLock lock(&bus_topology_lock_);
247    auto ret = dev.upstream_;
248    return ret;
249}
250
251fbl::RefPtr<PcieDevice> PcieBusDriver::GetDownstream(PcieUpstreamNode& upstream, uint ndx) {
252    DEBUG_ASSERT(ndx <= fbl::count_of(upstream.downstream_));
253    AutoLock lock(&bus_topology_lock_);
254    auto ret = upstream.downstream_[ndx];
255    return ret;
256}
257
258fbl::RefPtr<PcieDevice> PcieBusDriver::GetRefedDevice(uint bus_id,
259                                                       uint dev_id,
260                                                       uint func_id) {
261    struct GetRefedDeviceState {
262        uint bus_id;
263        uint dev_id;
264        uint func_id;
265        fbl::RefPtr<PcieDevice> ret;
266    } state;
267
268    state.bus_id  = bus_id,
269    state.dev_id  = dev_id,
270    state.func_id = func_id,
271
272    ForeachDevice(
273            [](const fbl::RefPtr<PcieDevice>& dev, void* ctx, uint level) -> bool {
274                DEBUG_ASSERT(dev && ctx);
275                auto state = reinterpret_cast<GetRefedDeviceState*>(ctx);
276
277                if ((state->bus_id  == dev->bus_id()) &&
278                    (state->dev_id  == dev->dev_id()) &&
279                    (state->func_id == dev->func_id())) {
280                    state->ret = dev;
281                    return false;
282                }
283
284                return true;
285            }, &state);
286
287    return fbl::move(state.ret);
288}
289
290void PcieBusDriver::ForeachRoot(ForeachRootCallback cbk, void* ctx) {
291    DEBUG_ASSERT(cbk);
292
293    // Iterate over the roots, calling the registered callback for each one.
294    // Hold a reference to each root while we do this, but do not hold the
295    // topology lock.  Note that this requires some slightly special handling
296    // when it comes to advancing the iterator as the root we are holding the
297    // reference to could (in theory) be removed from the collection during the
298    // callback..
299    bus_topology_lock_.Acquire();
300
301    auto iter = roots_.begin();
302    while (iter.IsValid()) {
303        // Grab our ref.
304        auto root_ref = iter.CopyPointer();
305
306        // Perform our callback.
307        bus_topology_lock_.Release();
308        bool keep_going = cbk(root_ref, ctx);
309        bus_topology_lock_.Acquire();
310        if (!keep_going)
311            break;
312
313        // If the root is still in the collection, simply advance the iterator.
314        // Otherwise, find the root (if any) with the next higher managed bus
315        // id.
316        if (root_ref->InContainer()) {
317            ++iter;
318        } else {
319            iter = roots_.upper_bound(root_ref->GetKey());
320        }
321    }
322
323    bus_topology_lock_.Release();
324}
325
326void PcieBusDriver::ForeachDevice(ForeachDeviceCallback cbk, void* ctx) {
327    DEBUG_ASSERT(cbk);
328
329    struct ForeachDeviceCtx {
330        PcieBusDriver* driver;
331        ForeachDeviceCallback dev_cbk;
332        void* dev_ctx;
333    };
334
335    ForeachDeviceCtx foreach_device_ctx = {
336        .driver = this,
337        .dev_cbk = cbk,
338        .dev_ctx = ctx,
339    };
340
341    ForeachRoot([](const fbl::RefPtr<PcieRoot>& root, void* ctx_) -> bool {
342                     auto ctx = static_cast<ForeachDeviceCtx*>(ctx_);
343                     return ctx->driver->ForeachDownstreamDevice(
344                             root, 0, ctx->dev_cbk, ctx->dev_ctx);
345                   }, &foreach_device_ctx);
346}
347
348zx_status_t PcieBusDriver::AllocBookkeeping() {
349    // Create the RegionPool we will use to supply the memory for the
350    // bookkeeping for all of our region tracking and allocation needs.  Then
351    // assign it to each of our allocators.
352    region_bookkeeping_ = RegionAllocator::RegionPool::Create(REGION_BOOKKEEPING_MAX_MEM);
353    if (region_bookkeeping_ == nullptr) {
354        TRACEF("Failed to create pool allocator for Region bookkeeping!\n");
355        return ZX_ERR_NO_MEMORY;
356    }
357
358    mmio_lo_regions_.SetRegionPool(region_bookkeeping_);
359    mmio_hi_regions_.SetRegionPool(region_bookkeeping_);
360    pio_regions_.SetRegionPool(region_bookkeeping_);
361
362    return ZX_OK;
363}
364
365bool PcieBusDriver::ForeachDownstreamDevice(const fbl::RefPtr<PcieUpstreamNode>& upstream,
366                                            uint                                  level,
367                                            ForeachDeviceCallback                 cbk,
368                                            void*                                 ctx) {
369    DEBUG_ASSERT(upstream && cbk);
370    bool keep_going = true;
371
372    for (uint i = 0; keep_going && (i < fbl::count_of(upstream->downstream_)); ++i) {
373        auto dev = upstream->GetDownstream(i);
374
375        if (!dev)
376            continue;
377
378        keep_going = cbk(dev, ctx, level);
379
380        // It should be impossible to have a bridge topology such that we could
381        // recurse more than 256 times.
382        if (keep_going && (level < 256)) {
383            if (dev->is_bridge()) {
384                // TODO(johngro): eliminate the need to hold this extra ref.  If
385                // we had the ability to up and downcast when moving RefPtrs, we
386                // could just fbl::move dev into a PcieBridge pointer and then
387                // down into a PcieUpstreamNode pointer.
388                fbl::RefPtr<PcieUpstreamNode> downstream_bridge(
389                        static_cast<PcieUpstreamNode*>(
390                        static_cast<PcieBridge*>(dev.get())));
391                keep_going = ForeachDownstreamDevice(downstream_bridge, level + 1, cbk, ctx);
392            }
393        }
394    }
395
396    return keep_going;
397}
398
399zx_status_t PcieBusDriver::AddSubtractBusRegion(uint64_t base,
400                                                uint64_t size,
401                                                PciAddrSpace aspace,
402                                                bool add_op) {
403    if (!IsNotStarted(true)) {
404        TRACEF("Cannot add/subtract bus regions once the bus driver has been started!\n");
405        return ZX_ERR_BAD_STATE;
406    }
407
408    if (!size)
409        return ZX_ERR_INVALID_ARGS;
410
411    uint64_t end = base + size - 1;
412    auto OpPtr = add_op ? &RegionAllocator::AddRegion : &RegionAllocator::SubtractRegion;
413
414    if (aspace == PciAddrSpace::MMIO) {
415        // Figure out if this goes in the low region, the high region, or needs
416        // to be split into two regions.
417        constexpr uint64_t U32_MAX = fbl::numeric_limits<uint32_t>::max();
418        auto& mmio_lo = mmio_lo_regions_;
419        auto& mmio_hi = mmio_hi_regions_;
420
421        if (end <= U32_MAX) {
422            return (mmio_lo.*OpPtr)({ .base = base, .size = size }, true);
423        } else
424        if (base > U32_MAX) {
425            return (mmio_hi.*OpPtr)({ .base = base, .size = size }, true);
426        } else {
427            uint64_t lo_base = base;
428            uint64_t hi_base = U32_MAX + 1;
429            uint64_t lo_size = hi_base - lo_base;
430            uint64_t hi_size = size - lo_size;
431            zx_status_t res;
432
433            res = (mmio_lo.*OpPtr)({ .base = lo_base, .size = lo_size }, true);
434            if (res != ZX_OK)
435                return res;
436
437            return (mmio_hi.*OpPtr)({ .base = hi_base, .size = hi_size }, true);
438        }
439    } else {
440        DEBUG_ASSERT(aspace == PciAddrSpace::PIO);
441
442        if ((base | end) & ~PCIE_PIO_ADDR_SPACE_MASK)
443            return ZX_ERR_INVALID_ARGS;
444
445        return (pio_regions_.*OpPtr)({ .base = base, .size = size }, true);
446    }
447}
448
449zx_status_t PcieBusDriver::InitializeDriver(PciePlatformInterface& platform) {
450    AutoLock lock(&driver_lock_);
451
452    if (driver_ != nullptr) {
453        TRACEF("Failed to initialize PCIe bus driver; driver already initialized\n");
454        return ZX_ERR_BAD_STATE;
455    }
456
457    fbl::AllocChecker ac;
458    driver_ = fbl::AdoptRef(new (&ac) PcieBusDriver(platform));
459    if (!ac.check()) {
460        TRACEF("Failed to allocate PCIe bus driver\n");
461        return ZX_ERR_NO_MEMORY;
462    }
463
464    zx_status_t ret = driver_->AllocBookkeeping();
465    if (ret != ZX_OK)
466        driver_.reset();
467
468    return ret;
469}
470
471void PcieBusDriver::ShutdownDriver() {
472    fbl::RefPtr<PcieBusDriver> driver;
473
474    {
475        AutoLock lock(&driver_lock_);
476        driver = fbl::move(driver_);
477    }
478
479    driver.reset();
480}
481
482/*******************************************************************************
483 *
484 *  ECAM support
485 *
486 ******************************************************************************/
487/* TODO(cja): The bus driver owns all configs as well as devices so the
488 * lifecycle of both are already dependent. Should this still return a refptr?
489 */
490const PciConfig* PcieBusDriver::GetConfig(uint bus_id,
491                                        uint dev_id,
492                                        uint func_id,
493                                        paddr_t* out_cfg_phys) {
494    DEBUG_ASSERT(bus_id  < PCIE_MAX_BUSSES);
495    DEBUG_ASSERT(dev_id  < PCIE_MAX_DEVICES_PER_BUS);
496    DEBUG_ASSERT(func_id < PCIE_MAX_FUNCTIONS_PER_DEVICE);
497
498    uintptr_t addr;
499    if (is_mmio_) {
500        // Find the region which would contain this bus_id, if any.
501        // add does not overlap with any already defined regions.
502        AutoLock ecam_region_lock(&ecam_region_lock_);
503        auto iter = ecam_regions_.upper_bound(static_cast<uint8_t>(bus_id));
504        --iter;
505
506        if (out_cfg_phys) {
507            *out_cfg_phys = 0;
508        }
509
510        if (!iter.IsValid()) {
511            return nullptr;
512        }
513
514        if ((bus_id < iter->ecam().bus_start) ||
515                (bus_id > iter->ecam().bus_end)) {
516            return nullptr;
517        }
518
519        bus_id -= iter->ecam().bus_start;
520        size_t offset = (static_cast<size_t>(bus_id)  << 20) |
521            (static_cast<size_t>(dev_id)  << 15) |
522            (static_cast<size_t>(func_id) << 12);
523
524        if (out_cfg_phys) {
525            *out_cfg_phys = iter->ecam().phys_base + offset;
526        }
527
528        // TODO(cja): Move to a BDF based associative container for better lookup time
529        // and insert or find behavior.
530        addr = reinterpret_cast<uintptr_t>(static_cast<uint8_t*>(iter->vaddr()) + offset);
531    } else {
532        addr = Pci::PciBdfAddr(static_cast<uint8_t>(bus_id), static_cast<uint8_t>(dev_id),
533                               static_cast<uint8_t>(func_id), 0);
534    }
535
536    auto cfg_iter = configs_.find_if([addr](const PciConfig& cfg) {
537                                        return (cfg.base() == addr);
538                                        });
539    /* An entry for this bdf config has been found in cache, return it */
540    if (cfg_iter.IsValid()) {
541        return &(*cfg_iter);
542    }
543
544    // Nothing found, create a new PciConfig for this address
545    auto cfg = PciConfig::Create(addr, (is_mmio_) ? PciAddrSpace::MMIO : PciAddrSpace::PIO);
546    configs_.push_front(cfg);
547    return cfg.get();
548}
549
550zx_status_t PcieBusDriver::AddEcamRegion(const EcamRegion& ecam) {
551    if (!IsNotStarted()) {
552        TRACEF("Cannot add/subtract ECAM regions once the bus driver has been started!\n");
553        return ZX_ERR_BAD_STATE;
554    }
555
556    // Sanity check the region first.
557    if (ecam.bus_start > ecam.bus_end)
558        return ZX_ERR_INVALID_ARGS;
559
560    size_t bus_count = static_cast<size_t>(ecam.bus_end) - ecam.bus_start + 1u;
561    if (ecam.size != (PCIE_ECAM_BYTE_PER_BUS * bus_count))
562        return ZX_ERR_INVALID_ARGS;
563
564    // Grab the ECAM lock and make certain that the region we have been asked to
565    // add does not overlap with any already defined regions.
566    AutoLock ecam_region_lock(&ecam_region_lock_);
567    auto iter = ecam_regions_.upper_bound(ecam.bus_start);
568    --iter;
569
570    // If iter is valid, it now points to the region with the largest bus_start
571    // which is <= ecam.bus_start.  If any region overlaps with the region we
572    // are attempting to add, it will be this one.
573    if (iter.IsValid()) {
574        uint8_t iter_start = iter->ecam().bus_start;
575        uint8_t iter_end   = iter->ecam().bus_end;
576        if (((iter_start >= ecam.bus_start) && (iter_start <= ecam.bus_end)) ||
577            ((ecam.bus_start >= iter_start) && (ecam.bus_start <= iter_end)))
578            return ZX_ERR_BAD_STATE;
579    }
580
581    // Looks good.  Attempt to allocate and map this ECAM region.
582    fbl::AllocChecker ac;
583    fbl::unique_ptr<MappedEcamRegion> region(new (&ac) MappedEcamRegion(ecam));
584    if (!ac.check()) {
585        TRACEF("Failed to allocate ECAM region for bus range [0x%02x, 0x%02x]\n",
586               ecam.bus_start, ecam.bus_end);
587        return ZX_ERR_NO_MEMORY;
588    }
589
590    zx_status_t res = region->MapEcam();
591    if (res != ZX_OK) {
592        TRACEF("Failed to map ECAM region for bus range [0x%02x, 0x%02x]\n",
593               ecam.bus_start, ecam.bus_end);
594        return res;
595    }
596
597    // Everything checks out.  Add the new region to our set of regions and we are done.
598    ecam_regions_.insert(fbl::move(region));
599    return ZX_OK;
600}
601
602PcieBusDriver::MappedEcamRegion::~MappedEcamRegion() {
603    if (vaddr_ != nullptr) {
604        VmAspace::kernel_aspace()->FreeRegion(reinterpret_cast<vaddr_t>(vaddr_));
605    }
606}
607
608zx_status_t PcieBusDriver::MappedEcamRegion::MapEcam() {
609    DEBUG_ASSERT(ecam_.bus_start <= ecam_.bus_end);
610    DEBUG_ASSERT((ecam_.size % PCIE_ECAM_BYTE_PER_BUS) == 0);
611    DEBUG_ASSERT((ecam_.size / PCIE_ECAM_BYTE_PER_BUS) ==
612                 (static_cast<size_t>(ecam_.bus_end) - ecam_.bus_start + 1u));
613
614    if (vaddr_ != nullptr)
615        return ZX_ERR_BAD_STATE;
616
617    char name_buf[32];
618    snprintf(name_buf, sizeof(name_buf), "pcie_cfg_%02x_%02x", ecam_.bus_start, ecam_.bus_end);
619
620    return VmAspace::kernel_aspace()->AllocPhysical(
621            name_buf,
622            ecam_.size,
623            &vaddr_,
624            PAGE_SIZE_SHIFT,
625            ecam_.phys_base,
626            0 /* vmm flags */,
627            ARCH_MMU_FLAG_UNCACHED_DEVICE |
628            ARCH_MMU_FLAG_PERM_READ |
629            ARCH_MMU_FLAG_PERM_WRITE);
630}
631
632// External references to the quirks handler table.
633extern const PcieBusDriver::QuirkHandler pcie_quirk_handlers[];
634void PcieBusDriver::RunQuirks(const fbl::RefPtr<PcieDevice>& dev) {
635    if (dev && dev->quirks_done())
636        return;
637
638    for (size_t i = 0; pcie_quirk_handlers[i] != nullptr; i++) {
639        pcie_quirk_handlers[i](dev);
640    }
641
642    if (dev != nullptr)
643        dev->SetQuirksDone();
644}
645
646// Workaround to disable all devices on the bus for mexec. This should not be
647// used for any other reason due to it intentionally leaving drivers in a bad
648// state (some may crash).
649// TODO(cja): The paradise serial workaround in particular may need a smarter
650// way of being handled in the future because it is not uncommon to have serial
651// bus devices initialized by the bios that we need to retain in zedboot/crash
652// situations.
653void PcieBusDriver::DisableBus() {
654    fbl::AutoLock lock(&driver_lock_);
655    ForeachDevice(
656        [](const fbl::RefPtr<PcieDevice>& dev, void* ctx, uint level) -> bool {
657            if (!dev->is_bridge() && !(dev->vendor_id() == 0x8086 && dev->device_id() == 0x9d66)) {
658                TRACEF("Disabling device %#02x:%#02x.%01x - VID %#04x DID %#04x\n",
659                    dev->dev_id(), dev->bus_id(), dev->func_id(), dev->vendor_id(),
660                    dev->device_id());
661                dev->EnableBusMaster(false);
662                dev->Disable();
663            } else {
664                TRACEF("Skipping LP Serial disable!");
665            }
666            return true;
667        }, nullptr);
668}
669