1 // Copyright 2016 The Fuchsia Authors
2 // Copyright (c) 2016, Google, Inc. All rights reserved
3 //
4 // Use of this source code is governed by a MIT-style
5 // license that can be found in the LICENSE file or at
6 // https://opensource.org/licenses/MIT
7 //
8 #include <assert.h>
9 #include <debug.h>
10 #include <dev/interrupt.h>
11 #include <dev/pcie_bus_driver.h>
12 #include <dev/pcie_bridge.h>
13 #include <dev/pcie_root.h>
14 #include <err.h>
15 #include <kernel/spinlock.h>
16 #include <list.h>
17 #include <pow2.h>
18 #include <string.h>
19 #include <trace.h>
20 #include <vm/vm.h>
21
22 #include <dev/pci_config.h>
23 #include <dev/pcie_device.h>
24
25 #include <fbl/alloc_checker.h>
26 #include <fbl/auto_lock.h>
27 #include <ktl/move.h>
28
29 using fbl::AutoLock;
30
31 #define LOCAL_TRACE 0
32
33 /******************************************************************************
34 *
35 * Helper routines common to all IRQ modes.
36 *
37 ******************************************************************************/
ResetCommonIrqBookkeeping()38 void PcieDevice::ResetCommonIrqBookkeeping() {
39 if (irq_.handler_count > 1) {
40 DEBUG_ASSERT(irq_.handlers != &irq_.singleton_handler);
41 delete[] irq_.handlers;
42 }
43
44 irq_.singleton_handler.handler = nullptr;
45 irq_.singleton_handler.ctx = nullptr;
46 irq_.singleton_handler.dev = nullptr;
47 irq_.mode = PCIE_IRQ_MODE_DISABLED;
48 irq_.handlers = nullptr;
49 irq_.handler_count = 0;
50 }
51
AllocIrqHandlers(uint requested_irqs,bool is_masked)52 zx_status_t PcieDevice::AllocIrqHandlers(uint requested_irqs, bool is_masked) {
53 DEBUG_ASSERT(requested_irqs);
54 DEBUG_ASSERT(!irq_.handlers);
55 DEBUG_ASSERT(!irq_.handler_count);
56
57 if (requested_irqs == 1) {
58 irq_.handlers = &irq_.singleton_handler;
59 irq_.handler_count = 1;
60 } else {
61 fbl::AllocChecker ac;
62 irq_.handlers = new (&ac) pcie_irq_handler_state_t[requested_irqs];
63
64 if (!ac.check())
65 return ZX_ERR_NO_MEMORY;
66
67 irq_.handler_count = requested_irqs;
68 }
69
70 for (uint i = 0; i < irq_.handler_count; ++i) {
71 DEBUG_ASSERT(irq_.handlers[i].handler == nullptr);
72 DEBUG_ASSERT(irq_.handlers[i].dev == nullptr);
73 DEBUG_ASSERT(irq_.handlers[i].ctx == nullptr);
74 irq_.handlers[i].dev = this;
75 irq_.handlers[i].pci_irq_id = i;
76 irq_.handlers[i].masked = is_masked;
77 }
78
79 return ZX_OK;
80 }
81
82 /******************************************************************************
83 *
84 * Legacy IRQ mode routines.
85 *
86 ******************************************************************************/
Create(uint irq_id)87 fbl::RefPtr<SharedLegacyIrqHandler> SharedLegacyIrqHandler::Create(uint irq_id) {
88 fbl::AllocChecker ac;
89
90 SharedLegacyIrqHandler* handler = new (&ac) SharedLegacyIrqHandler(irq_id);
91 if (!ac.check()) {
92 TRACEF("Failed to create shared legacy IRQ handler for system IRQ ID %u\n", irq_id);
93 return nullptr;
94 }
95
96 return fbl::AdoptRef(handler);
97 }
98
SharedLegacyIrqHandler(uint irq_id)99 SharedLegacyIrqHandler::SharedLegacyIrqHandler(uint irq_id)
100 : irq_id_(irq_id) {
101 list_initialize(&device_handler_list_);
102 mask_interrupt(irq_id_); // This should not be needed, but just in case.
103 zx_status_t status = register_int_handler(irq_id_, HandlerThunk, this);
104 DEBUG_ASSERT(status == ZX_OK);
105 }
106
~SharedLegacyIrqHandler()107 SharedLegacyIrqHandler::~SharedLegacyIrqHandler() {
108 DEBUG_ASSERT(list_is_empty(&device_handler_list_));
109 mask_interrupt(irq_id_);
110 zx_status_t status = register_int_handler(irq_id_, nullptr, nullptr);
111 DEBUG_ASSERT(status == ZX_OK);
112 }
113
Handler()114 void SharedLegacyIrqHandler::Handler() {
115 /* Go over the list of device's which share this legacy IRQ and give them a
116 * chance to handle any interrupts which may be pending in their device.
117 * Keep track of whether or not any device has requested a re-schedule event
118 * at the end of this IRQ. */
119 AutoSpinLockNoIrqSave list_lock(&device_handler_list_lock_);
120
121 if (list_is_empty(&device_handler_list_)) {
122 TRACEF("Received legacy PCI INT (system IRQ %u), but there are no devices registered to "
123 "handle this interrupt. This is Very Bad. Disabling the interrupt at the system "
124 "IRQ level to prevent meltdown.\n",
125 irq_id_);
126 mask_interrupt(irq_id_);
127 return;
128 }
129
130 PcieDevice* dev;
131 list_for_every_entry(&device_handler_list_,
132 dev,
133 PcieDevice,
134 irq_.legacy.shared_handler_node) {
135 uint16_t command, status;
136 auto cfg = dev->config();
137
138 {
139 AutoSpinLockNoIrqSave cmd_reg_lock(&dev->cmd_reg_lock_);
140 command = cfg->Read(PciConfig::kCommand);
141 status = cfg->Read(PciConfig::kStatus);
142 }
143
144 if ((status & PCIE_CFG_STATUS_INT_STS) && !(command & PCIE_CFG_COMMAND_INT_DISABLE)) {
145 DEBUG_ASSERT(dev);
146 pcie_irq_handler_state_t* hstate = &dev->irq_.handlers[0];
147
148 if (hstate) {
149 pcie_irq_handler_retval_t irq_ret = PCIE_IRQRET_MASK;
150 AutoSpinLockNoIrqSave device_handler_lock(&hstate->lock);
151
152 if (hstate->handler) {
153 if (!hstate->masked)
154 irq_ret = hstate->handler(*dev, 0, hstate->ctx);
155 } else {
156 TRACEF("Received legacy PCI INT (system IRQ %u) for %02x:%02x.%02x, but no irq_ "
157 "handler has been registered by the driver. Force disabling the "
158 "interrupt.\n",
159 irq_id_, dev->bus_id_, dev->dev_id_, dev->func_id_);
160 }
161
162 if (irq_ret & PCIE_IRQRET_MASK) {
163 hstate->masked = true;
164 {
165 AutoSpinLockNoIrqSave cmd_reg_lock(&dev->cmd_reg_lock_);
166 command = cfg->Read(PciConfig::kCommand);
167 cfg->Write(PciConfig::kCommand, command | PCIE_CFG_COMMAND_INT_DISABLE);
168 }
169 }
170 } else {
171 TRACEF("Received legacy PCI INT (system IRQ %u) for %02x:%02x.%02x , but no irq_ "
172 "handlers have been allocated! Force disabling the interrupt.\n",
173 irq_id_, dev->bus_id_, dev->dev_id_, dev->func_id_);
174
175 {
176 AutoSpinLockNoIrqSave cmd_reg_lock(&dev->cmd_reg_lock_);
177 command = cfg->Read(PciConfig::kCommand);
178 cfg->Write(PciConfig::kCommand, command | PCIE_CFG_COMMAND_INT_DISABLE);
179 }
180 }
181 }
182 }
183 }
184
AddDevice(PcieDevice & dev)185 void SharedLegacyIrqHandler::AddDevice(PcieDevice& dev) {
186 DEBUG_ASSERT(dev.irq_.legacy.shared_handler.get() == this);
187 DEBUG_ASSERT(!list_in_list(&dev.irq_.legacy.shared_handler_node));
188
189 /* Make certain that the device's legacy IRQ has been masked at the PCI
190 * device level. Then add this dev to the handler's list. If this was the
191 * first device added to the handler list, unmask the handler IRQ at the top
192 * level. */
193 AutoSpinLock lock(&device_handler_list_lock_);
194
195 dev.cfg_->Write(PciConfig::kCommand, dev.cfg_->Read(PciConfig::kCommand) |
196 PCIE_CFG_COMMAND_INT_DISABLE);
197
198 bool first_device = list_is_empty(&device_handler_list_);
199 list_add_tail(&device_handler_list_, &dev.irq_.legacy.shared_handler_node);
200
201 if (first_device)
202 unmask_interrupt(irq_id_);
203 }
204
RemoveDevice(PcieDevice & dev)205 void SharedLegacyIrqHandler::RemoveDevice(PcieDevice& dev) {
206 DEBUG_ASSERT(dev.irq_.legacy.shared_handler.get() == this);
207 DEBUG_ASSERT(list_in_list(&dev.irq_.legacy.shared_handler_node));
208
209 /* Make absolutely sure we have been masked at the PCIe config level, then
210 * remove the device from the shared handler list. If this was the last
211 * device on the list, mask the top level IRQ */
212 AutoSpinLock lock(&device_handler_list_lock_);
213
214 dev.cfg_->Write(PciConfig::kCommand, dev.cfg_->Read(PciConfig::kCommand) |
215 PCIE_CFG_COMMAND_INT_DISABLE);
216 list_delete(&dev.irq_.legacy.shared_handler_node);
217
218 if (list_is_empty(&device_handler_list_))
219 mask_interrupt(irq_id_);
220 }
221
MaskUnmaskLegacyIrq(bool mask)222 zx_status_t PcieDevice::MaskUnmaskLegacyIrq(bool mask) {
223 if (!irq_.handlers || !irq_.handler_count)
224 return ZX_ERR_INVALID_ARGS;
225
226 pcie_irq_handler_state_t& hstate = irq_.handlers[0];
227
228 {
229 AutoSpinLock lock(&hstate.lock);
230
231 if (mask) ModifyCmdLocked(0, PCIE_CFG_COMMAND_INT_DISABLE);
232 else ModifyCmdLocked(PCIE_CFG_COMMAND_INT_DISABLE, 0);
233 hstate.masked = mask;
234 }
235
236 return ZX_OK;
237 }
238
EnterLegacyIrqMode(uint requested_irqs)239 zx_status_t PcieDevice::EnterLegacyIrqMode(uint requested_irqs) {
240 DEBUG_ASSERT(requested_irqs);
241
242 if (!irq_.legacy.pin || (requested_irqs > 1))
243 return ZX_ERR_NOT_SUPPORTED;
244
245 // Make absolutely certain we are masked.
246 ModifyCmdLocked(0, PCIE_CFG_COMMAND_INT_DISABLE);
247
248 // We can never fail to allocated a single handlers (since we are going to
249 // use the pre-allocated singleton)
250 __UNUSED zx_status_t res = AllocIrqHandlers(requested_irqs, true);
251 DEBUG_ASSERT(res == ZX_OK);
252 DEBUG_ASSERT(irq_.handlers == &irq_.singleton_handler);
253
254 irq_.mode = PCIE_IRQ_MODE_LEGACY;
255 irq_.legacy.shared_handler->AddDevice(*this);
256
257 return ZX_OK;
258 }
259
LeaveLegacyIrqMode()260 void PcieDevice::LeaveLegacyIrqMode() {
261 /* Disable legacy IRQs and unregister from the shared legacy handler */
262 MaskUnmaskLegacyIrq(true);
263 irq_.legacy.shared_handler->RemoveDevice(*this);
264
265 /* Release any handler storage and reset all of our bookkeeping */
266 ResetCommonIrqBookkeeping();
267 }
268
269 /******************************************************************************
270 *
271 * MSI IRQ mode routines.
272 *
273 ******************************************************************************/
MaskUnmaskMsiIrqLocked(uint irq_id,bool mask)274 bool PcieDevice::MaskUnmaskMsiIrqLocked(uint irq_id, bool mask) {
275 DEBUG_ASSERT(irq_.mode == PCIE_IRQ_MODE_MSI);
276 DEBUG_ASSERT(irq_id < irq_.handler_count);
277 DEBUG_ASSERT(irq_.handlers);
278
279 pcie_irq_handler_state_t& hstate = irq_.handlers[irq_id];
280 DEBUG_ASSERT(hstate.lock.IsHeld());
281
282 /* Internal code should not be calling this function if they want to mask
283 * the interrupt, but it is not possible to do so. */
284 DEBUG_ASSERT(!mask ||
285 bus_drv_.platform().supports_msi_masking() ||
286 irq_.msi->has_pvm());
287
288 /* If we can mask at the PCI device level, do so. */
289 if (irq_.msi->has_pvm()) {
290 DEBUG_ASSERT(irq_id < PCIE_MAX_MSI_IRQS);
291 uint32_t val = cfg_->Read(irq_.msi->mask_bits_reg());
292 if (mask) val |= (static_cast<uint32_t>(1u) << irq_id);
293 else val &= ~(static_cast<uint32_t>(1u) << irq_id);
294 cfg_->Write(irq_.msi->mask_bits_reg(), val);
295 }
296
297 /* If we can mask at the platform interrupt controller level, do so. */
298 DEBUG_ASSERT(irq_.msi->irq_block_.allocated);
299 DEBUG_ASSERT(irq_id < irq_.msi->irq_block_.num_irq);
300 if (bus_drv_.platform().supports_msi_masking())
301 bus_drv_.platform().MaskUnmaskMsi(&irq_.msi->irq_block_, irq_id, mask);
302
303 bool ret = hstate.masked;
304 hstate.masked = mask;
305 return ret;
306 }
307
MaskUnmaskMsiIrq(uint irq_id,bool mask)308 zx_status_t PcieDevice::MaskUnmaskMsiIrq(uint irq_id, bool mask) {
309 if (irq_id >= irq_.handler_count)
310 return ZX_ERR_INVALID_ARGS;
311
312 /* If a mask is being requested, and we cannot mask at either the platform
313 * interrupt controller or the PCI device level, tell the caller that the
314 * operation is unsupported. */
315 if (mask && !bus_drv_.platform().supports_msi_masking() && !irq_.msi->has_pvm())
316 return ZX_ERR_NOT_SUPPORTED;
317
318 DEBUG_ASSERT(irq_.handlers);
319
320 {
321 AutoSpinLock handler_lock(&irq_.handlers[irq_id].lock);
322 MaskUnmaskMsiIrqLocked(irq_id, mask);
323 }
324
325 return ZX_OK;
326 }
327
MaskAllMsiVectors()328 void PcieDevice::MaskAllMsiVectors() {
329 DEBUG_ASSERT(irq_.msi);
330 DEBUG_ASSERT(irq_.msi->is_valid());
331
332 for (uint i = 0; i < irq_.handler_count; i++)
333 MaskUnmaskMsiIrq(i, true);
334
335 /* In theory, this should not be needed as all of the relevant bits should
336 * have already been masked during the calls to MaskUnmaskMsiIrq. Just to
337 * be careful, however, we explicitly mask all of the upper bits as well. */
338 if (irq_.msi->has_pvm())
339 cfg_->Write(irq_.msi->mask_bits_reg(), 0xFFFFFFFF);
340 }
341
SetMsiTarget(uint64_t tgt_addr,uint32_t tgt_data)342 void PcieDevice::SetMsiTarget(uint64_t tgt_addr, uint32_t tgt_data) {
343 DEBUG_ASSERT(irq_.msi);
344 DEBUG_ASSERT(irq_.msi->is_valid());
345 DEBUG_ASSERT(irq_.msi->is64Bit() || !(tgt_addr >> 32));
346 DEBUG_ASSERT(!(tgt_data >> 16));
347
348 /* Make sure MSI is disabled_ and all vectors masked (if possible) before
349 * changing the target address and data. */
350 SetMsiEnb(false);
351 MaskAllMsiVectors();
352
353 /* lower bits of the address register are common to all forms of the MSI
354 * capability structure. Upper address bits and data position depend on
355 * whether this is a 64 bit or 32 bit version */
356 cfg_->Write(irq_.msi->addr_reg(), static_cast<uint32_t>(tgt_addr & 0xFFFFFFFF));
357 if (irq_.msi->is64Bit()) {
358 cfg_->Write(irq_.msi->addr_upper_reg(), static_cast<uint32_t>(tgt_addr >> 32));
359 }
360 cfg_->Write(irq_.msi->data_reg(), static_cast<uint16_t>(tgt_data & 0xFFFF));
361 }
362
FreeMsiBlock()363 void PcieDevice::FreeMsiBlock() {
364 /* If no block has been allocated, there is nothing to do */
365 if (!irq_.msi->irq_block_.allocated)
366 return;
367
368 DEBUG_ASSERT(bus_drv_.platform().supports_msi());
369
370 /* Mask the IRQ at the platform interrupt controller level if we can, and
371 * unregister any registered handler. */
372 const msi_block_t* b = &irq_.msi->irq_block_;
373 for (uint i = 0; i < b->num_irq; i++) {
374 if (bus_drv_.platform().supports_msi_masking()) {
375 bus_drv_.platform().MaskUnmaskMsi(b, i, true);
376 }
377 bus_drv_.platform().RegisterMsiHandler(b, i, nullptr, nullptr);
378 }
379
380 /* Give the block of IRQs back to the plaform */
381 bus_drv_.platform().FreeMsiBlock(&irq_.msi->irq_block_);
382 DEBUG_ASSERT(!irq_.msi->irq_block_.allocated);
383 }
384
SetMsiMultiMessageEnb(uint requested_irqs)385 void PcieDevice::SetMsiMultiMessageEnb(uint requested_irqs) {
386 DEBUG_ASSERT(irq_.msi);
387 DEBUG_ASSERT(irq_.msi->is_valid());
388 DEBUG_ASSERT((requested_irqs >= 1) && (requested_irqs <= PCIE_MAX_MSI_IRQS));
389
390 uint log2 = log2_uint_ceil(requested_irqs);
391
392 DEBUG_ASSERT(log2 <= 5);
393 DEBUG_ASSERT(!log2 || ((0x1u << (log2 - 1)) < requested_irqs));
394 DEBUG_ASSERT((0x1u << log2) >= requested_irqs);
395
396 cfg_->Write(irq_.msi->ctrl_reg(),
397 PCIE_CAP_MSI_CTRL_SET_MME(log2, cfg_->Read(irq_.msi->ctrl_reg())));
398 }
399
LeaveMsiIrqMode()400 void PcieDevice::LeaveMsiIrqMode() {
401 /* Disable MSI, mask all vectors and zero out the target */
402 SetMsiTarget(0x0, 0x0);
403
404 /* Return any allocated irq_ block to the platform, unregistering with
405 * the interrupt controller and synchronizing with the dispatchers in
406 * the process. */
407 FreeMsiBlock();
408
409 /* Reset our common state, free any allocated handlers */
410 ResetCommonIrqBookkeeping();
411 }
412
EnterMsiIrqMode(uint requested_irqs)413 zx_status_t PcieDevice::EnterMsiIrqMode(uint requested_irqs) {
414 DEBUG_ASSERT(requested_irqs);
415
416 zx_status_t res = ZX_OK;
417
418 // We cannot go into MSI mode if we don't support MSI at all, or we don't
419 // support the number of IRQs requested
420 if (!irq_.msi ||
421 !irq_.msi->is_valid() ||
422 !bus_drv_.platform().supports_msi() ||
423 (requested_irqs > irq_.msi->max_irqs()))
424 return ZX_ERR_NOT_SUPPORTED;
425
426 // If we support PVM, make sure that we are completely masked before
427 // attempting to allocate the block of IRQs.
428 bool initially_masked;
429 if (irq_.msi->has_pvm()) {
430 cfg_->Write(irq_.msi->mask_bits_reg(), 0xFFFFFFFF);
431 initially_masked = true;
432 } else {
433 // If we cannot mask at the PCI level, then our IRQs will be initially
434 // masked only if the platform supports masking at the interrupt
435 // controller level.
436 initially_masked = bus_drv_.platform().supports_msi_masking();
437 }
438
439 /* Ask the platform for a chunk of MSI compatible IRQs */
440 DEBUG_ASSERT(!irq_.msi->irq_block_.allocated);
441 res = bus_drv_.platform().AllocMsiBlock(requested_irqs,
442 irq_.msi->is64Bit(),
443 false, /* is_msix == false */
444 &irq_.msi->irq_block_);
445 if (res != ZX_OK) {
446 LTRACEF("Failed to allocate a block of %u MSI IRQs for device "
447 "%02x:%02x.%01x (res %d)\n",
448 requested_irqs, bus_id_, dev_id_, func_id_, res);
449 goto bailout;
450 }
451
452 /* Allocate our handler table */
453 res = AllocIrqHandlers(requested_irqs, initially_masked);
454 if (res != ZX_OK)
455 goto bailout;
456
457 /* Record our new IRQ mode */
458 irq_.mode = PCIE_IRQ_MODE_MSI;
459
460 /* Program the target write transaction into the MSI registers. As a side
461 * effect, this will ensure that...
462 *
463 * 1) MSI mode has been disabled_ at the top level
464 * 2) Each IRQ has been masked at system level (if supported)
465 * 3) Each IRQ has been masked at the PCI PVM level (if supported)
466 */
467 DEBUG_ASSERT(irq_.msi->irq_block_.allocated);
468 SetMsiTarget(irq_.msi->irq_block_.tgt_addr, irq_.msi->irq_block_.tgt_data);
469
470 /* Properly program the multi-message enable field in the control register */
471 SetMsiMultiMessageEnb(requested_irqs);
472
473 /* Register each IRQ with the dispatcher */
474 DEBUG_ASSERT(irq_.handler_count <= irq_.msi->irq_block_.num_irq);
475 for (uint i = 0; i < irq_.handler_count; ++i) {
476 bus_drv_.platform().RegisterMsiHandler(&irq_.msi->irq_block_,
477 i,
478 PcieDevice::MsiIrqHandlerThunk,
479 irq_.handlers + i);
480 }
481
482 /* Enable MSI at the top level */
483 SetMsiEnb(true);
484
485 bailout:
486 if (res != ZX_OK)
487 LeaveMsiIrqMode();
488
489 return res;
490 }
491
MsiIrqHandler(pcie_irq_handler_state_t & hstate)492 void PcieDevice::MsiIrqHandler(pcie_irq_handler_state_t& hstate) {
493 DEBUG_ASSERT(irq_.msi);
494 /* No need to save IRQ state; we are in an IRQ handler at the moment. */
495 AutoSpinLockNoIrqSave handler_lock(&hstate.lock);
496
497 /* Mask our IRQ if we can. */
498 bool was_masked;
499 if (bus_drv_.platform().supports_msi_masking() || irq_.msi->has_pvm()) {
500 was_masked = MaskUnmaskMsiIrqLocked(hstate.pci_irq_id, true);
501 } else {
502 DEBUG_ASSERT(!hstate.masked);
503 was_masked = false;
504 }
505
506 /* If the IRQ was masked or the handler removed by the time we got here,
507 * leave the IRQ masked, unlock and get out. */
508 if (was_masked || !hstate.handler)
509 return;
510
511 /* Dispatch */
512 pcie_irq_handler_retval_t irq_ret = hstate.handler(*this, hstate.pci_irq_id, hstate.ctx);
513
514 /* Re-enable the IRQ if asked to do so */
515 if (!(irq_ret & PCIE_IRQRET_MASK))
516 MaskUnmaskMsiIrqLocked(hstate.pci_irq_id, false);
517 }
518
MsiIrqHandlerThunk(void * arg)519 interrupt_eoi PcieDevice::MsiIrqHandlerThunk(void *arg) {
520 DEBUG_ASSERT(arg);
521 auto& hstate = *(reinterpret_cast<pcie_irq_handler_state_t*>(arg));
522 DEBUG_ASSERT(hstate.dev);
523 hstate.dev->MsiIrqHandler(hstate);
524 return IRQ_EOI_DEACTIVATE;
525 }
526
527 /******************************************************************************
528 *
529 * Internal implementation of the Kernel facing API.
530 *
531 ******************************************************************************/
QueryIrqModeCapabilitiesLocked(pcie_irq_mode_t mode,pcie_irq_mode_caps_t * out_caps) const532 zx_status_t PcieDevice::QueryIrqModeCapabilitiesLocked(pcie_irq_mode_t mode,
533 pcie_irq_mode_caps_t* out_caps) const {
534 DEBUG_ASSERT(plugged_in_);
535 DEBUG_ASSERT(dev_lock_.IsHeld());
536 DEBUG_ASSERT(out_caps);
537
538 memset(out_caps, 0, sizeof(*out_caps));
539
540 switch (mode) {
541 // All devices always support "DISABLED". No need to set the max_irqs to
542 // zero or the PVM supported flag to false, the memset has taken care of
543 // this for us already.
544 case PCIE_IRQ_MODE_DISABLED:
545 return ZX_OK;
546
547 case PCIE_IRQ_MODE_LEGACY:
548 if (!irq_.legacy.pin)
549 return ZX_ERR_NOT_SUPPORTED;
550
551 out_caps->max_irqs = 1;
552 out_caps->per_vector_masking_supported = true;
553 break;
554
555 case PCIE_IRQ_MODE_MSI:
556 /* If the platform does not support MSI, then we don't support MSI,
557 * even if the device does. */
558 if (!bus_drv_.platform().supports_msi())
559 return ZX_ERR_NOT_SUPPORTED;
560
561 /* If the device supports MSI, it will have a pointer to the control
562 * structure in config. */
563 if (!irq_.msi || !irq_.msi->is_valid())
564 return ZX_ERR_NOT_SUPPORTED;
565
566 /* We support PVM if either the device does, or if the platform is
567 * capable of masking and unmasking individual IRQs from an MSI block
568 * allocation. */
569 out_caps->max_irqs = irq_.msi->max_irqs();
570 out_caps->per_vector_masking_supported = irq_.msi->has_pvm()
571 || (bus_drv_.platform().supports_msi_masking());
572 break;
573
574 case PCIE_IRQ_MODE_MSI_X:
575 /* If the platform does not support MSI, then we don't support MSI,
576 * even if the device does. */
577 if (!bus_drv_.platform().supports_msi())
578 return ZX_ERR_NOT_SUPPORTED;
579
580 /* TODO(johngro) : finish MSI-X implementation. */
581 return ZX_ERR_NOT_SUPPORTED;
582
583 default:
584 return ZX_ERR_INVALID_ARGS;
585 }
586
587 return ZX_OK;
588 }
589
GetIrqModeLocked(pcie_irq_mode_info_t * out_info) const590 zx_status_t PcieDevice::GetIrqModeLocked(pcie_irq_mode_info_t* out_info) const {
591 DEBUG_ASSERT(plugged_in_);
592 DEBUG_ASSERT(dev_lock_.IsHeld());
593 DEBUG_ASSERT(out_info);
594
595 out_info->mode = irq_.mode;
596 out_info->max_handlers = irq_.handler_count;
597 out_info->registered_handlers = irq_.registered_handler_count;
598
599 return ZX_OK;
600 }
601
SetIrqModeLocked(pcie_irq_mode_t mode,uint requested_irqs)602 zx_status_t PcieDevice::SetIrqModeLocked(pcie_irq_mode_t mode, uint requested_irqs) {
603 DEBUG_ASSERT(plugged_in_);
604 DEBUG_ASSERT(dev_lock_.IsHeld());
605
606 // Ensure the mode selection is valid
607 if (mode >= PCIE_IRQ_MODE_COUNT) {
608 return ZX_ERR_INVALID_ARGS;
609 }
610
611 // We need a valid number of IRQs for any mode that isn't strictly
612 // disabling the device's IRQs.
613 if (mode != PCIE_IRQ_MODE_DISABLED && requested_irqs < 1) {
614 return ZX_ERR_INVALID_ARGS;
615 }
616
617 // Leave our present IRQ mode
618 switch (irq_.mode) {
619 case PCIE_IRQ_MODE_LEGACY:
620 DEBUG_ASSERT(list_in_list(&irq_.legacy.shared_handler_node));
621 LeaveLegacyIrqMode();
622 break;
623
624 case PCIE_IRQ_MODE_MSI:
625 DEBUG_ASSERT(irq_.msi);
626 DEBUG_ASSERT(irq_.msi->is_valid());
627 DEBUG_ASSERT(irq_.msi->irq_block_.allocated);
628 LeaveMsiIrqMode();
629 break;
630
631 // Right now, there should be no way to get into MSI-X mode
632 case PCIE_IRQ_MODE_MSI_X:
633 return ZX_ERR_NOT_SUPPORTED;
634
635 // If we're disabled we have no work to do besides some sanity checks
636 case PCIE_IRQ_MODE_DISABLED:
637 default:
638 DEBUG_ASSERT(!irq_.handlers);
639 DEBUG_ASSERT(!irq_.handler_count);
640 break;
641 }
642
643 // At this point we should be in the disabled state and can enable
644 // the requested IRQ mode.
645 switch (mode) {
646 case PCIE_IRQ_MODE_DISABLED: return ZX_OK;
647 case PCIE_IRQ_MODE_LEGACY: return EnterLegacyIrqMode(requested_irqs);
648 case PCIE_IRQ_MODE_MSI: return EnterMsiIrqMode (requested_irqs);
649 default: return ZX_ERR_NOT_SUPPORTED;
650 }
651 }
652
RegisterIrqHandlerLocked(uint irq_id,pcie_irq_handler_fn_t handler,void * ctx)653 zx_status_t PcieDevice::RegisterIrqHandlerLocked(uint irq_id,
654 pcie_irq_handler_fn_t handler,
655 void* ctx) {
656 DEBUG_ASSERT(plugged_in_);
657 DEBUG_ASSERT(dev_lock_.IsHeld());
658
659 /* Cannot register a handler if we are currently disabled_ */
660 if (irq_.mode == PCIE_IRQ_MODE_DISABLED)
661 return ZX_ERR_BAD_STATE;
662
663 DEBUG_ASSERT(irq_.handlers);
664 DEBUG_ASSERT(irq_.handler_count);
665
666 /* Make sure that the IRQ ID is within range */
667 if (irq_id >= irq_.handler_count)
668 return ZX_ERR_INVALID_ARGS;
669
670 /* Looks good, register (or unregister the handler) and we are done. */
671 pcie_irq_handler_state_t& hstate = irq_.handlers[irq_id];
672
673 /* Update our registered handler bookkeeping. Perform some sanity checks as we do so */
674 if (hstate.handler) {
675 DEBUG_ASSERT(irq_.registered_handler_count);
676 if (!handler)
677 irq_.registered_handler_count--;
678 } else {
679 if (handler)
680 irq_.registered_handler_count++;
681 }
682 DEBUG_ASSERT(irq_.registered_handler_count <= irq_.handler_count);
683
684 {
685 AutoSpinLock handler_lock(&hstate.lock);
686 hstate.handler = handler;
687 hstate.ctx = handler ? ctx : nullptr;
688 }
689
690 return ZX_OK;
691 }
692
MaskUnmaskIrqLocked(uint irq_id,bool mask)693 zx_status_t PcieDevice::MaskUnmaskIrqLocked(uint irq_id, bool mask) {
694 DEBUG_ASSERT(plugged_in_);
695 DEBUG_ASSERT(dev_lock_.IsHeld());
696
697 /* Cannot manipulate mask status while in the DISABLED state */
698 if (irq_.mode == PCIE_IRQ_MODE_DISABLED)
699 return ZX_ERR_BAD_STATE;
700
701 DEBUG_ASSERT(irq_.handlers);
702 DEBUG_ASSERT(irq_.handler_count);
703
704 /* Make sure that the IRQ ID is within range */
705 if (irq_id >= irq_.handler_count)
706 return ZX_ERR_INVALID_ARGS;
707
708 /* If we are unmasking (enabling), then we need to make sure that there is a
709 * handler in place for the IRQ we are enabling. */
710 pcie_irq_handler_state_t& hstate = irq_.handlers[irq_id];
711 if (!mask && !hstate.handler)
712 return ZX_ERR_BAD_STATE;
713
714 /* OK, everything looks good. Go ahead and make the change based on the
715 * mode we are currently in. */
716 switch (irq_.mode) {
717 case PCIE_IRQ_MODE_LEGACY: return MaskUnmaskLegacyIrq(mask);
718 case PCIE_IRQ_MODE_MSI: return MaskUnmaskMsiIrq(irq_id, mask);
719 case PCIE_IRQ_MODE_MSI_X: return ZX_ERR_NOT_SUPPORTED;
720 default:
721 DEBUG_ASSERT(false); /* This should be un-possible! */
722 return ZX_ERR_INTERNAL;
723 }
724
725 return ZX_OK;
726 }
727
728 /******************************************************************************
729 *
730 * Kernel API; prototypes in dev/pcie_irqs.h
731 *
732 ******************************************************************************/
QueryIrqModeCapabilities(pcie_irq_mode_t mode,pcie_irq_mode_caps_t * out_caps) const733 zx_status_t PcieDevice::QueryIrqModeCapabilities(pcie_irq_mode_t mode,
734 pcie_irq_mode_caps_t* out_caps) const {
735 if (!out_caps)
736 return ZX_ERR_INVALID_ARGS;
737
738 AutoLock dev_lock(&dev_lock_);
739
740 return (plugged_in_ && !disabled_)
741 ? QueryIrqModeCapabilitiesLocked(mode, out_caps)
742 : ZX_ERR_BAD_STATE;
743 }
744
GetIrqMode(pcie_irq_mode_info_t * out_info) const745 zx_status_t PcieDevice::GetIrqMode(pcie_irq_mode_info_t* out_info) const {
746 if (!out_info)
747 return ZX_ERR_INVALID_ARGS;
748
749 AutoLock dev_lock(&dev_lock_);
750
751 return (plugged_in_ && !disabled_)
752 ? GetIrqModeLocked(out_info)
753 : ZX_ERR_BAD_STATE;
754 }
755
SetIrqMode(pcie_irq_mode_t mode,uint requested_irqs)756 zx_status_t PcieDevice::SetIrqMode(pcie_irq_mode_t mode, uint requested_irqs) {
757 AutoLock dev_lock(&dev_lock_);
758
759 return ((mode == PCIE_IRQ_MODE_DISABLED) || (plugged_in_ && !disabled_))
760 ? SetIrqModeLocked(mode, requested_irqs)
761 : ZX_ERR_BAD_STATE;
762 }
763
RegisterIrqHandler(uint irq_id,pcie_irq_handler_fn_t handler,void * ctx)764 zx_status_t PcieDevice::RegisterIrqHandler(uint irq_id, pcie_irq_handler_fn_t handler, void* ctx) {
765 AutoLock dev_lock(&dev_lock_);
766
767 return (plugged_in_ && !disabled_)
768 ? RegisterIrqHandlerLocked(irq_id, handler, ctx)
769 : ZX_ERR_BAD_STATE;
770 }
771
MaskUnmaskIrq(uint irq_id,bool mask)772 zx_status_t PcieDevice::MaskUnmaskIrq(uint irq_id, bool mask) {
773 AutoLock dev_lock(&dev_lock_);
774
775 return (mask || (plugged_in_ && !disabled_))
776 ? MaskUnmaskIrqLocked(irq_id, mask)
777 : ZX_ERR_BAD_STATE;
778 }
779
780
781 // Map from a device's interrupt pin ID to the proper system IRQ ID. Follow the
782 // PCIe graph up to the root, swizzling as we traverse PCIe switches,
783 // PCIe-to-PCI bridges, and native PCI-to-PCI bridges. Once we hit the root,
784 // perform the final remapping using the platform supplied remapping routine.
785 //
786 // Platform independent swizzling behavior is documented in the PCIe base
787 // specification in section 2.2.8.1 and Table 2-20.
788 //
789 // Platform dependent remapping is an exercise for the reader. FWIW: PC
790 // architectures use the _PRT tables in ACPI to perform the remapping.
791 //
MapPinToIrqLocked(fbl::RefPtr<PcieUpstreamNode> && upstream)792 zx_status_t PcieDevice::MapPinToIrqLocked(fbl::RefPtr<PcieUpstreamNode>&& upstream) {
793 DEBUG_ASSERT(dev_lock_.IsHeld());
794
795 if (!legacy_irq_pin() || (legacy_irq_pin() > PCIE_MAX_LEGACY_IRQ_PINS))
796 return ZX_ERR_BAD_STATE;
797
798 auto dev = fbl::WrapRefPtr(this);
799 uint pin = legacy_irq_pin() - 1; // Change to 0s indexing
800
801 // Walk up the PCI/PCIe tree, applying the swizzling rules as we go. Stop
802 // when we reach the device which is hanging off of the root bus/root
803 // complex. At this point, platform specific swizzling takes over.
804 while ((upstream != nullptr) &&
805 (upstream->type() == PcieUpstreamNode::Type::BRIDGE)) {
806 // TODO(johngro) : Eliminate the null-check of bridge below. Currently,
807 // it is needed because we have gcc/g++'s "null-dereference" warning
808 // turned on, and because of the potentially offsetting nature of static
809 // casting, the compiler cannot be sure that bridge is non-null, just
810 // because upstream was non-null (check in the while predicate, above).
811 // Even adding explicit checks to the Downcast method in RefPtr<> does
812 // not seem to satisfy it.
813 //
814 // Some potential future options include...
815 // 1) Change this to DEBUG_ASSERT and turn off the null-dereference
816 // warning in release builds.
817 // 2) Wait until GCC becomes smart enough to figure this out.
818 // 3) Switch completely to clang (assuming that clang does not have
819 // similar problems).
820 auto bridge = fbl::RefPtr<PcieBridge>::Downcast(ktl::move(upstream));
821 if (bridge == nullptr)
822 return ZX_ERR_INTERNAL;
823
824 // We need to swizzle every time we pass through...
825 // 1) A PCI-to-PCI bridge (real or virtual)
826 // 2) A PCIe-to-PCI bridge
827 // 3) The Upstream port of a switch.
828 //
829 // We do NOT swizzle when we pass through...
830 // 1) A root port hanging off the root complex. (any swizzling here is up
831 // to the platform implementation)
832 // 2) A Downstream switch port. Since downstream PCIe switch ports are
833 // only permitted to have a single device located at position 0 on
834 // their "bus", it does not really matter if we do the swizzle or
835 // not, since it would turn out to be an identity transformation
836 // anyway.
837 switch (bridge->pcie_device_type()) {
838 // UNKNOWN devices are devices which did not have a PCI Express
839 // Capabilities structure in their capabilities list. Since every
840 // device we pass through on the way up the tree should be a device
841 // with a Type 1 header, these should be PCI-to-PCI bridges (real or
842 // virtual)
843 case PCIE_DEVTYPE_UNKNOWN:
844 case PCIE_DEVTYPE_SWITCH_UPSTREAM_PORT:
845 case PCIE_DEVTYPE_PCIE_TO_PCI_BRIDGE:
846 case PCIE_DEVTYPE_PCI_TO_PCIE_BRIDGE:
847 pin = (pin + dev->dev_id()) % PCIE_MAX_LEGACY_IRQ_PINS;
848 break;
849
850 default:
851 break;
852 }
853
854 // Climb one branch higher up the tree
855 dev = ktl::move(bridge);
856 upstream = dev->GetUpstream();
857 }
858
859 // If our upstream is ever null as we climb the tree, then something must
860 // have been unplugged as we were climbing.
861 if (upstream == nullptr)
862 return ZX_ERR_BAD_STATE;
863
864 // We have hit root of the tree. Something is very wrong if our
865 // UpstreamNode is not, in fact, a root.
866 if (upstream->type() != PcieUpstreamNode::Type::ROOT) {
867 TRACEF("Failed to map legacy pin to platform IRQ ID for dev "
868 "%02x:%02x.%01x (pin %u). Top of the device tree "
869 "(managed bus ID 0x%02x) does not appear to be either a root or a "
870 "bridge! (type %u)\n",
871 bus_id_, dev_id_, func_id_, irq_.legacy.pin,
872 upstream->managed_bus_id(), static_cast<uint>(upstream->type()));
873 return ZX_ERR_BAD_STATE;
874 }
875
876 // TODO(johngro) : Eliminate the null-check of root below. See the TODO for
877 // the downcast of upstream -> bridge above for details.
878 auto root = fbl::RefPtr<PcieRoot>::Downcast(ktl::move(upstream));
879 if (root == nullptr)
880 return ZX_ERR_INTERNAL;
881 return root->Swizzle(dev->dev_id(), dev->func_id(), pin, &irq_.legacy.irq_id);
882 }
883
InitLegacyIrqStateLocked(PcieUpstreamNode & upstream)884 zx_status_t PcieDevice::InitLegacyIrqStateLocked(PcieUpstreamNode& upstream) {
885 DEBUG_ASSERT(dev_lock_.IsHeld());
886 DEBUG_ASSERT(cfg_);
887 DEBUG_ASSERT(irq_.legacy.shared_handler == nullptr);
888
889 // Make certain that the device's legacy IRQ (if any) has been disabled.
890 ModifyCmdLocked(0u, PCIE_CFG_COMMAND_INT_DISABLE);
891
892 // Does config say that we have a legacy IRQ pin? If so use the bus driver
893 // to map it to the system IRQ ID, then grab a hold of the shared legacy IRQ
894 // handler.
895 irq_.legacy.pin = cfg_->Read(PciConfig::kInterruptPin);
896 if (irq_.legacy.pin) {
897 zx_status_t res = MapPinToIrqLocked(fbl::RefPtr<PcieUpstreamNode>(&upstream));
898 if (res != ZX_OK) {
899 TRACEF("Failed to map legacy pin to platform IRQ ID for "
900 "dev %02x:%02x.%01x (pin %u)\n",
901 bus_id_, dev_id_, func_id_,
902 irq_.legacy.pin);
903 return res;
904 }
905
906 irq_.legacy.shared_handler = bus_drv_.FindLegacyIrqHandler(irq_.legacy.irq_id);
907 if (irq_.legacy.shared_handler == nullptr) {
908 TRACEF("Failed to find or create shared legacy IRQ handler for "
909 "dev %02x:%02x.%01x (pin %u, irq_id %u)\n",
910 bus_id_, dev_id_, func_id_,
911 irq_.legacy.pin, irq_.legacy.irq_id);
912 return ZX_ERR_NO_RESOURCES;
913 }
914 }
915
916 return ZX_OK;
917 }
918
ShutdownIrqs()919 void PcieBusDriver::ShutdownIrqs() {
920 /* Shut off all of our legacy IRQs and free all of our bookkeeping */
921 AutoLock lock(&legacy_irq_list_lock_);
922 legacy_irq_list_.clear();
923 }
924
FindLegacyIrqHandler(uint irq_id)925 fbl::RefPtr<SharedLegacyIrqHandler> PcieBusDriver::FindLegacyIrqHandler(uint irq_id) {
926 /* Search to see if we have already created a shared handler for this system
927 * level IRQ id already */
928 AutoLock lock(&legacy_irq_list_lock_);
929
930 auto iter = legacy_irq_list_.begin();
931 while (iter != legacy_irq_list_.end()) {
932 if (irq_id == iter->irq_id())
933 return iter.CopyPointer();
934 ++iter;
935 }
936
937 auto handler = SharedLegacyIrqHandler::Create(irq_id);
938 if (handler != nullptr)
939 legacy_irq_list_.push_front(handler);
940
941 return handler;
942 }
943