1 // Copyright 2016 The Fuchsia Authors
2 // Copyright (c) 2016, Google, Inc. All rights reserved
3 //
4 // Use of this source code is governed by a MIT-style
5 // license that can be found in the LICENSE file or at
6 // https://opensource.org/licenses/MIT
7 
8 #include <dev/pcie_bridge.h>
9 #include <dev/pcie_bus_driver.h>
10 #include <dev/pcie_device.h>
11 #include <dev/pcie_root.h>
12 #include <fbl/algorithm.h>
13 #include <fbl/alloc_checker.h>
14 #include <fbl/auto_lock.h>
15 #include <fbl/limits.h>
16 #include <fbl/mutex.h>
17 #include <inttypes.h>
18 #include <ktl/move.h>
19 #include <lib/pci/pio.h>
20 #include <lk/init.h>
21 #include <trace.h>
22 #include <vm/vm_aspace.h>
23 
24 using fbl::AutoLock;
25 
26 /* TODO(johngro) : figure this out someday.
27  *
28  * In theory, BARs which map PIO regions for devices are supposed to be able to
29  * use bits [2, 31] to describe the programmable section of the PIO window.  On
30  * real x86/64 systems, however, using the write-1s-readback technique to
31  * determine programmable bits of the BAR's address (and therefor the size of the
32  * I/O window) shows that the upper 16 bits are not programmable.  This makes
33  * sense for x86 (where I/O space is only 16-bits), but fools the system into
34  * thinking that the I/O window is enormous.
35  *
36  * For now, just define a mask which can be used during PIO window space
37  * calculations which limits the size to 16 bits for x86/64 systems.  non-x86
38  * systems are still free to use all of the bits for their PIO addresses
39  * (although, it is still a bit unclear what it would mean to generate an IO
40  * space cycle on an architecture which has no such thing as IO space).
41  */
42 constexpr size_t PcieBusDriver::REGION_BOOKKEEPING_SLAB_SIZE;
43 constexpr size_t PcieBusDriver::REGION_BOOKKEEPING_MAX_MEM;
44 
45 fbl::RefPtr<PcieBusDriver> PcieBusDriver::driver_;
46 fbl::Mutex PcieBusDriver::driver_lock_;
47 
PcieBusDriver(PciePlatformInterface & platform)48 PcieBusDriver::PcieBusDriver(PciePlatformInterface& platform) : platform_(platform) { }
~PcieBusDriver()49 PcieBusDriver::~PcieBusDriver() {
50     // TODO(johngro): For now, if the bus driver is shutting down and unloading,
51     // ASSERT that there are no currently claimed devices out there.  In the
52     // long run, we need to gracefully handle disconnecting from all user mode
53     // drivers (probably using a simulated hot-unplug) if we unload the bus
54     // driver.
55     ForeachDevice([](const fbl::RefPtr<PcieDevice>& dev, void* ctx, uint level) -> bool {
56                       DEBUG_ASSERT(dev);
57                       return true;
58                   }, nullptr);
59 
60     /* Shut off all of our IRQs and free all of our bookkeeping */
61     ShutdownIrqs();
62 
63     // Free the device tree
64     ForeachRoot([](const fbl::RefPtr<PcieRoot>& root, void* ctx) -> bool {
65                      root->UnplugDownstream();
66                      return true;
67                    }, nullptr);
68     roots_.clear();
69 
70     // Release the region bookkeeping memory.
71     region_bookkeeping_.reset();
72 }
73 
AddRoot(fbl::RefPtr<PcieRoot> && root)74 zx_status_t PcieBusDriver::AddRoot(fbl::RefPtr<PcieRoot>&& root) {
75     if (root == nullptr)
76         return ZX_ERR_INVALID_ARGS;
77 
78     // Make sure that we are not already started.
79     if (!IsNotStarted()) {
80         TRACEF("Cannot add more PCIe roots once the bus driver has been started!\n");
81         return ZX_ERR_BAD_STATE;
82     }
83 
84     // Attempt to add it to the collection of roots.
85     {
86         AutoLock bus_topology_lock(&bus_topology_lock_);
87         if (!roots_.insert_or_find(ktl::move(root))) {
88             TRACEF("Failed to add PCIe root for bus %u, root already exists!\n",
89                     root->managed_bus_id());
90             return ZX_ERR_ALREADY_EXISTS;
91         }
92     }
93 
94     return ZX_OK;
95 }
96 
SetAddressTranslationProvider(ktl::unique_ptr<PcieAddressProvider> provider)97 zx_status_t PcieBusDriver::SetAddressTranslationProvider(ktl::unique_ptr<PcieAddressProvider> provider) {
98     if (!IsNotStarted()) {
99         TRACEF("Cannot set an address provider if the driver is already running\n");
100         return ZX_ERR_BAD_STATE;
101     }
102 
103     if (provider == nullptr) {
104         return ZX_ERR_INVALID_ARGS;
105     }
106 
107     addr_provider_ = ktl::move(provider);
108 
109     return ZX_OK;
110 }
111 
RescanDevices()112 zx_status_t PcieBusDriver::RescanDevices() {
113     if (!IsOperational()) {
114         TRACEF("Cannot rescan devices until the bus driver is operational!\n");
115         return ZX_ERR_BAD_STATE;
116     }
117 
118     AutoLock lock(&bus_rescan_lock_);
119 
120     // Scan each root looking for for devices and other bridges.
121     ForeachRoot([](const fbl::RefPtr<PcieRoot>& root, void* ctx) -> bool {
122                      root->ScanDownstream();
123                      return true;
124                    }, nullptr);
125 
126     // Attempt to allocate any unallocated BARs
127     ForeachRoot([](const fbl::RefPtr<PcieRoot>& root, void* ctx) -> bool {
128                      root->AllocateDownstreamBars();
129                      return true;
130                    }, nullptr);
131 
132     return ZX_OK;
133 }
134 
IsNotStarted(bool allow_quirks_phase) const135 bool PcieBusDriver::IsNotStarted(bool allow_quirks_phase) const {
136     AutoLock start_lock(&start_lock_);
137 
138     if ((state_ != State::NOT_STARTED) &&
139         (!allow_quirks_phase || (state_ != State::STARTING_RUNNING_QUIRKS)))
140         return false;
141 
142     return true;
143 }
144 
AdvanceState(State expected,State next)145 bool PcieBusDriver::AdvanceState(State expected, State next) {
146     AutoLock start_lock(&start_lock_);
147 
148     if (state_ != expected) {
149         TRACEF("Failed to advance PCIe bus driver state to %u.  "
150                "Expected state (%u) does not match current state (%u)\n",
151                static_cast<uint>(next),
152                static_cast<uint>(expected),
153                static_cast<uint>(state_));
154         return false;
155     }
156 
157     state_ = next;
158     return true;
159 }
160 
StartBusDriver()161 zx_status_t PcieBusDriver::StartBusDriver() {
162     if (!AdvanceState(State::NOT_STARTED, State::STARTING_SCANNING))
163         return ZX_ERR_BAD_STATE;
164 
165     {
166         AutoLock lock(&bus_rescan_lock_);
167 
168         // Scan each root looking for for devices and other bridges.
169         ForeachRoot([](const fbl::RefPtr<PcieRoot>& root, void* ctx) -> bool {
170                          root->ScanDownstream();
171                          return true;
172                        }, nullptr);
173 
174         if (!AdvanceState(State::STARTING_SCANNING, State::STARTING_RUNNING_QUIRKS))
175             return ZX_ERR_BAD_STATE;
176 
177         // Run registered quirk handlers for any newly discovered devices.
178         ForeachDevice([](const fbl::RefPtr<PcieDevice>& dev, void* ctx, uint level) -> bool {
179             PcieBusDriver::RunQuirks(dev);
180             return true;
181         }, nullptr);
182 
183         // Indicate to the registered quirks handlers that we are finished with the
184         // quirks phase.
185         PcieBusDriver::RunQuirks(nullptr);
186 
187         if (!AdvanceState(State::STARTING_RUNNING_QUIRKS, State::STARTING_RESOURCE_ALLOCATION))
188             return ZX_ERR_BAD_STATE;
189 
190         // Attempt to allocate any unallocated BARs
191         ForeachRoot([](const fbl::RefPtr<PcieRoot>& root, void* ctx) -> bool {
192                          root->AllocateDownstreamBars();
193                          return true;
194                        }, nullptr);
195     }
196 
197     if (!AdvanceState(State::STARTING_RESOURCE_ALLOCATION, State::OPERATIONAL))
198         return ZX_ERR_BAD_STATE;
199 
200     return ZX_OK;
201 }
202 
GetNthDevice(uint32_t index)203 fbl::RefPtr<PcieDevice> PcieBusDriver::GetNthDevice(uint32_t index) {
204     struct GetNthDeviceState {
205         uint32_t index;
206         fbl::RefPtr<PcieDevice> ret;
207     } state;
208 
209     state.index = index;
210 
211     ForeachDevice(
212         [](const fbl::RefPtr<PcieDevice>& dev, void* ctx, uint level) -> bool {
213             DEBUG_ASSERT(dev && ctx);
214 
215             auto state = reinterpret_cast<GetNthDeviceState*>(ctx);
216             if (!state->index) {
217                 state->ret = dev;
218                 return false;
219             }
220 
221             state->index--;
222             return true;
223         }, &state);
224 
225     return ktl::move(state.ret);
226 }
227 
LinkDeviceToUpstream(PcieDevice & dev,PcieUpstreamNode & upstream)228 void PcieBusDriver::LinkDeviceToUpstream(PcieDevice& dev, PcieUpstreamNode& upstream) {
229     AutoLock lock(&bus_topology_lock_);
230 
231     // Have the device hold a reference to its upstream bridge.
232     DEBUG_ASSERT(dev.upstream_ == nullptr);
233     dev.upstream_ = fbl::WrapRefPtr(&upstream);
234 
235     // Have the bridge hold a reference to the device
236     uint ndx = (dev.dev_id() * PCIE_MAX_FUNCTIONS_PER_DEVICE) + dev.func_id();
237     DEBUG_ASSERT(ndx < fbl::count_of(upstream.downstream_));
238     DEBUG_ASSERT(upstream.downstream_[ndx] == nullptr);
239     upstream.downstream_[ndx] = fbl::WrapRefPtr(&dev);
240 }
241 
UnlinkDeviceFromUpstream(PcieDevice & dev)242 void PcieBusDriver::UnlinkDeviceFromUpstream(PcieDevice& dev) {
243     AutoLock lock(&bus_topology_lock_);
244 
245     if (dev.upstream_ != nullptr) {
246         uint ndx = (dev.dev_id() * PCIE_MAX_FUNCTIONS_PER_DEVICE) + dev.func_id();
247         DEBUG_ASSERT(ndx < fbl::count_of(dev.upstream_->downstream_));
248         DEBUG_ASSERT(&dev == dev.upstream_->downstream_[ndx].get());
249 
250         // Let go of the upstream's reference to the device
251         dev.upstream_->downstream_[ndx] = nullptr;
252 
253         // Let go of the device's reference to its upstream
254         dev.upstream_ = nullptr;
255     }
256 }
257 
GetUpstream(PcieDevice & dev)258 fbl::RefPtr<PcieUpstreamNode> PcieBusDriver::GetUpstream(PcieDevice& dev) {
259     AutoLock lock(&bus_topology_lock_);
260     auto ret = dev.upstream_;
261     return ret;
262 }
263 
GetDownstream(PcieUpstreamNode & upstream,uint ndx)264 fbl::RefPtr<PcieDevice> PcieBusDriver::GetDownstream(PcieUpstreamNode& upstream, uint ndx) {
265     DEBUG_ASSERT(ndx <= fbl::count_of(upstream.downstream_));
266     AutoLock lock(&bus_topology_lock_);
267     auto ret = upstream.downstream_[ndx];
268     return ret;
269 }
270 
GetRefedDevice(uint bus_id,uint dev_id,uint func_id)271 fbl::RefPtr<PcieDevice> PcieBusDriver::GetRefedDevice(uint bus_id,
272                                                        uint dev_id,
273                                                        uint func_id) {
274     struct GetRefedDeviceState {
275         uint bus_id;
276         uint dev_id;
277         uint func_id;
278         fbl::RefPtr<PcieDevice> ret;
279     } state;
280 
281     state.bus_id  = bus_id,
282     state.dev_id  = dev_id,
283     state.func_id = func_id,
284 
285     ForeachDevice(
286             [](const fbl::RefPtr<PcieDevice>& dev, void* ctx, uint level) -> bool {
287                 DEBUG_ASSERT(dev && ctx);
288                 auto state = reinterpret_cast<GetRefedDeviceState*>(ctx);
289 
290                 if ((state->bus_id  == dev->bus_id()) &&
291                     (state->dev_id  == dev->dev_id()) &&
292                     (state->func_id == dev->func_id())) {
293                     state->ret = dev;
294                     return false;
295                 }
296 
297                 return true;
298             }, &state);
299 
300     return ktl::move(state.ret);
301 }
302 
ForeachRoot(ForeachRootCallback cbk,void * ctx)303 void PcieBusDriver::ForeachRoot(ForeachRootCallback cbk, void* ctx) {
304     DEBUG_ASSERT(cbk);
305 
306     // Iterate over the roots, calling the registered callback for each one.
307     // Hold a reference to each root while we do this, but do not hold the
308     // topology lock.  Note that this requires some slightly special handling
309     // when it comes to advancing the iterator as the root we are holding the
310     // reference to could (in theory) be removed from the collection during the
311     // callback..
312     bus_topology_lock_.Acquire();
313 
314     auto iter = roots_.begin();
315     while (iter.IsValid()) {
316         // Grab our ref.
317         auto root_ref = iter.CopyPointer();
318 
319         // Perform our callback.
320         bus_topology_lock_.Release();
321         bool keep_going = cbk(root_ref, ctx);
322         bus_topology_lock_.Acquire();
323         if (!keep_going)
324             break;
325 
326         // If the root is still in the collection, simply advance the iterator.
327         // Otherwise, find the root (if any) with the next higher managed bus
328         // id.
329         if (root_ref->InContainer()) {
330             ++iter;
331         } else {
332             iter = roots_.upper_bound(root_ref->GetKey());
333         }
334     }
335 
336     bus_topology_lock_.Release();
337 }
338 
ForeachDevice(ForeachDeviceCallback cbk,void * ctx)339 void PcieBusDriver::ForeachDevice(ForeachDeviceCallback cbk, void* ctx) {
340     DEBUG_ASSERT(cbk);
341 
342     struct ForeachDeviceCtx {
343         PcieBusDriver* driver;
344         ForeachDeviceCallback dev_cbk;
345         void* dev_ctx;
346     };
347 
348     ForeachDeviceCtx foreach_device_ctx = {
349         .driver = this,
350         .dev_cbk = cbk,
351         .dev_ctx = ctx,
352     };
353 
354     ForeachRoot([](const fbl::RefPtr<PcieRoot>& root, void* ctx_) -> bool {
355                      auto ctx = static_cast<ForeachDeviceCtx*>(ctx_);
356                      return ctx->driver->ForeachDownstreamDevice(
357                              root, 0, ctx->dev_cbk, ctx->dev_ctx);
358                    }, &foreach_device_ctx);
359 }
360 
AllocBookkeeping()361 zx_status_t PcieBusDriver::AllocBookkeeping() {
362     // Create the RegionPool we will use to supply the memory for the
363     // bookkeeping for all of our region tracking and allocation needs.  Then
364     // assign it to each of our allocators.
365     region_bookkeeping_ = RegionAllocator::RegionPool::Create(REGION_BOOKKEEPING_MAX_MEM);
366     if (region_bookkeeping_ == nullptr) {
367         TRACEF("Failed to create pool allocator for Region bookkeeping!\n");
368         return ZX_ERR_NO_MEMORY;
369     }
370 
371     mmio_lo_regions_.SetRegionPool(region_bookkeeping_);
372     mmio_hi_regions_.SetRegionPool(region_bookkeeping_);
373     pio_regions_.SetRegionPool(region_bookkeeping_);
374 
375     return ZX_OK;
376 }
377 
ForeachDownstreamDevice(const fbl::RefPtr<PcieUpstreamNode> & upstream,uint level,ForeachDeviceCallback cbk,void * ctx)378 bool PcieBusDriver::ForeachDownstreamDevice(const fbl::RefPtr<PcieUpstreamNode>& upstream,
379                                             uint                                  level,
380                                             ForeachDeviceCallback                 cbk,
381                                             void*                                 ctx) {
382     DEBUG_ASSERT(upstream && cbk);
383     bool keep_going = true;
384 
385     for (uint i = 0; keep_going && (i < fbl::count_of(upstream->downstream_)); ++i) {
386         auto dev = upstream->GetDownstream(i);
387 
388         if (!dev)
389             continue;
390 
391         keep_going = cbk(dev, ctx, level);
392 
393         // It should be impossible to have a bridge topology such that we could
394         // recurse more than 256 times.
395         if (keep_going && (level < 256)) {
396             if (dev->is_bridge()) {
397                 // TODO(johngro): eliminate the need to hold this extra ref.  If
398                 // we had the ability to up and downcast when moving RefPtrs, we
399                 // could just ktl::move dev into a PcieBridge pointer and then
400                 // down into a PcieUpstreamNode pointer.
401                 fbl::RefPtr<PcieUpstreamNode> downstream_bridge(
402                         static_cast<PcieUpstreamNode*>(
403                         static_cast<PcieBridge*>(dev.get())));
404                 keep_going = ForeachDownstreamDevice(downstream_bridge, level + 1, cbk, ctx);
405             }
406         }
407     }
408 
409     return keep_going;
410 }
411 
AddSubtractBusRegion(uint64_t base,uint64_t size,PciAddrSpace aspace,bool add_op)412 zx_status_t PcieBusDriver::AddSubtractBusRegion(uint64_t base,
413                                                 uint64_t size,
414                                                 PciAddrSpace aspace,
415                                                 bool add_op) {
416     if (!IsNotStarted(true)) {
417         TRACEF("Cannot add/subtract bus regions once the bus driver has been started!\n");
418         return ZX_ERR_BAD_STATE;
419     }
420 
421     if (!size)
422         return ZX_ERR_INVALID_ARGS;
423 
424     uint64_t end = base + size - 1;
425     auto OpPtr = add_op ? &RegionAllocator::AddRegion : &RegionAllocator::SubtractRegion;
426 
427     if (aspace == PciAddrSpace::MMIO) {
428         // Figure out if this goes in the low region, the high region, or needs
429         // to be split into two regions.
430         constexpr uint64_t U32_MAX = fbl::numeric_limits<uint32_t>::max();
431         auto& mmio_lo = mmio_lo_regions_;
432         auto& mmio_hi = mmio_hi_regions_;
433 
434         if (end <= U32_MAX) {
435             return (mmio_lo.*OpPtr)({ .base = base, .size = size }, true);
436         } else
437         if (base > U32_MAX) {
438             return (mmio_hi.*OpPtr)({ .base = base, .size = size }, true);
439         } else {
440             uint64_t lo_base = base;
441             uint64_t hi_base = U32_MAX + 1;
442             uint64_t lo_size = hi_base - lo_base;
443             uint64_t hi_size = size - lo_size;
444             zx_status_t res;
445 
446             res = (mmio_lo.*OpPtr)({ .base = lo_base, .size = lo_size }, true);
447             if (res != ZX_OK)
448                 return res;
449 
450             return (mmio_hi.*OpPtr)({ .base = hi_base, .size = hi_size }, true);
451         }
452     } else {
453         DEBUG_ASSERT(aspace == PciAddrSpace::PIO);
454 
455         if ((base | end) & ~PCIE_PIO_ADDR_SPACE_MASK)
456             return ZX_ERR_INVALID_ARGS;
457 
458         return (pio_regions_.*OpPtr)({ .base = base, .size = size }, true);
459     }
460 }
461 
InitializeDriver(PciePlatformInterface & platform)462 zx_status_t PcieBusDriver::InitializeDriver(PciePlatformInterface& platform) {
463     AutoLock lock(&driver_lock_);
464 
465     if (driver_ != nullptr) {
466         TRACEF("Failed to initialize PCIe bus driver; driver already initialized\n");
467         return ZX_ERR_BAD_STATE;
468     }
469 
470     fbl::AllocChecker ac;
471     driver_ = fbl::AdoptRef(new (&ac) PcieBusDriver(platform));
472     if (!ac.check()) {
473         TRACEF("Failed to allocate PCIe bus driver\n");
474         return ZX_ERR_NO_MEMORY;
475     }
476 
477     zx_status_t ret = driver_->AllocBookkeeping();
478     if (ret != ZX_OK)
479         driver_.reset();
480 
481     return ret;
482 }
483 
ShutdownDriver()484 void PcieBusDriver::ShutdownDriver() {
485     fbl::RefPtr<PcieBusDriver> driver;
486 
487     {
488         AutoLock lock(&driver_lock_);
489         driver = ktl::move(driver_);
490     }
491 
492     driver.reset();
493 }
494 
495 /*******************************************************************************
496  *
497  *  ECAM support
498  *
499  ******************************************************************************/
500 /* TODO(cja): The bus driver owns all configs as well as devices so the
501  * lifecycle of both are already dependent. Should this still return a refptr?
502  */
GetConfig(uint bus_id,uint dev_id,uint func_id,paddr_t * out_cfg_phys)503 const PciConfig* PcieBusDriver::GetConfig(uint bus_id,
504                                           uint dev_id,
505                                           uint func_id,
506                                           paddr_t* out_cfg_phys) {
507     DEBUG_ASSERT(bus_id  < PCIE_MAX_BUSSES);
508     DEBUG_ASSERT(dev_id  < PCIE_MAX_DEVICES_PER_BUS);
509     DEBUG_ASSERT(func_id < PCIE_MAX_FUNCTIONS_PER_DEVICE);
510 
511     if (!addr_provider_) {
512         TRACEF("Cannot get state if no address translation provider is set\n");
513         return nullptr;
514     }
515 
516     if (out_cfg_phys) {
517         *out_cfg_phys = 0;
518     }
519 
520     uintptr_t addr;
521     zx_status_t result = addr_provider_->Translate(static_cast<uint8_t>(bus_id),
522                                                    static_cast<uint8_t>(dev_id),
523                                                    static_cast<uint8_t>(func_id),
524                                                    &addr, out_cfg_phys);
525     if (result != ZX_OK) {
526         return nullptr;
527     }
528 
529     // Check if we already have this config space cached somewhere.
530     auto cfg_iter = configs_.find_if([addr](const PciConfig& cfg) {
531                                         return (cfg.base() == addr);
532                                         });
533 
534     if (cfg_iter.IsValid()) {
535         return &(*cfg_iter);
536     }
537 
538     // Nothing found, create a new PciConfig for this address
539     auto cfg = addr_provider_->CreateConfig(addr);
540     configs_.push_front(cfg);
541 
542     return cfg.get();
543 }
544 
545 // External references to the quirks handler table.
546 extern const PcieBusDriver::QuirkHandler pcie_quirk_handlers[];
RunQuirks(const fbl::RefPtr<PcieDevice> & dev)547 void PcieBusDriver::RunQuirks(const fbl::RefPtr<PcieDevice>& dev) {
548     if (dev && dev->quirks_done())
549         return;
550 
551     for (size_t i = 0; pcie_quirk_handlers[i] != nullptr; i++) {
552         pcie_quirk_handlers[i](dev);
553     }
554 
555     if (dev != nullptr)
556         dev->SetQuirksDone();
557 }
558 
559 // Workaround to disable all devices on the bus for mexec. This should not be
560 // used for any other reason due to it intentionally leaving drivers in a bad
561 // state (some may crash).
562 // TODO(cja): The paradise serial workaround in particular may need a smarter
563 // way of being handled in the future because it is not uncommon to have serial
564 // bus devices initialized by the bios that we need to retain in zedboot/crash
565 // situations.
DisableBus()566 void PcieBusDriver::DisableBus() {
567     fbl::AutoLock lock(&driver_lock_);
568     ForeachDevice(
569         [](const fbl::RefPtr<PcieDevice>& dev, void* ctx, uint level) -> bool {
570             if (!dev->is_bridge() && !(dev->vendor_id() == 0x8086 && dev->device_id() == 0x9d66)) {
571                 TRACEF("Disabling device %#02x:%#02x.%01x - VID %#04x DID %#04x\n",
572                     dev->dev_id(), dev->bus_id(), dev->func_id(), dev->vendor_id(),
573                     dev->device_id());
574                 dev->EnableBusMaster(false);
575                 dev->Disable();
576             } else {
577                 TRACEF("Skipping LP Serial disable!");
578             }
579             return true;
580         }, nullptr);
581 }
582