1 /*
2 * Copyright (c) 2006-2022, RT-Thread Development Team
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 *
6 * Change Logs:
7 * Date Author Notes
8 * 2022-10-24 GuEe-GUI first version
9 */
10
11 #include <rtthread.h>
12 #include <rtservice.h>
13
14 #define DBG_TAG "rtdm.pci"
15 #define DBG_LVL DBG_INFO
16 #include <rtdbg.h>
17
18 #include <drivers/pci.h>
19 #include <drivers/misc.h>
20 #include <drivers/core/bus.h>
21
spin_lock(struct rt_spinlock * spinlock)22 rt_inline void spin_lock(struct rt_spinlock *spinlock)
23 {
24 rt_hw_spin_lock(&spinlock->lock);
25 }
26
spin_unlock(struct rt_spinlock * spinlock)27 rt_inline void spin_unlock(struct rt_spinlock *spinlock)
28 {
29 rt_hw_spin_unlock(&spinlock->lock);
30 }
31
rt_pci_domain(struct rt_pci_device * pdev)32 rt_uint32_t rt_pci_domain(struct rt_pci_device *pdev)
33 {
34 struct rt_pci_host_bridge *host_bridge;
35
36 if (!pdev)
37 {
38 return RT_UINT32_MAX;
39 }
40
41 if ((host_bridge = rt_pci_find_host_bridge(pdev->bus)))
42 {
43 return host_bridge->domain;
44 }
45
46 return RT_UINT32_MAX;
47 }
48
pci_find_next_cap_ttl(struct rt_pci_bus * bus,rt_uint32_t devfn,rt_uint8_t pos,int cap,int * ttl)49 static rt_uint8_t pci_find_next_cap_ttl(struct rt_pci_bus *bus,
50 rt_uint32_t devfn, rt_uint8_t pos, int cap, int *ttl)
51 {
52 rt_uint8_t ret = 0, id;
53 rt_uint16_t ent;
54
55 rt_pci_bus_read_config_u8(bus, devfn, pos, &pos);
56
57 while ((*ttl)--)
58 {
59 if (pos < 0x40)
60 {
61 break;
62 }
63
64 pos &= ~3;
65 rt_pci_bus_read_config_u16(bus, devfn, pos, &ent);
66
67 id = ent & 0xff;
68 if (id == 0xff)
69 {
70 break;
71 }
72 if (id == cap)
73 {
74 ret = pos;
75 break;
76 }
77 pos = (ent >> 8);
78 }
79
80 return ret;
81 }
82
pci_find_next_cap(struct rt_pci_bus * bus,rt_uint32_t devfn,rt_uint8_t pos,int cap)83 static rt_uint8_t pci_find_next_cap(struct rt_pci_bus *bus,
84 rt_uint32_t devfn, rt_uint8_t pos, int cap)
85 {
86 int ttl = RT_PCI_FIND_CAP_TTL;
87
88 return pci_find_next_cap_ttl(bus, devfn, pos, cap, &ttl);
89 }
90
pci_bus_find_cap_start(struct rt_pci_bus * bus,rt_uint32_t devfn,rt_uint8_t hdr_type)91 static rt_uint8_t pci_bus_find_cap_start(struct rt_pci_bus *bus,
92 rt_uint32_t devfn, rt_uint8_t hdr_type)
93 {
94 rt_uint8_t res = 0;
95 rt_uint16_t status;
96
97 rt_pci_bus_read_config_u16(bus, devfn, PCIR_STATUS, &status);
98
99 if (status & PCIM_STATUS_CAPPRESENT)
100 {
101 switch (hdr_type)
102 {
103 case PCIM_HDRTYPE_NORMAL:
104 case PCIM_HDRTYPE_BRIDGE:
105 res = PCIR_CAP_PTR;
106 break;
107
108 case PCIM_HDRTYPE_CARDBUS:
109 res = PCIR_CAP_PTR_2;
110 break;
111 }
112 }
113
114 return res;
115 }
116
rt_pci_bus_find_capability(struct rt_pci_bus * bus,rt_uint32_t devfn,int cap)117 rt_uint8_t rt_pci_bus_find_capability(struct rt_pci_bus *bus, rt_uint32_t devfn, int cap)
118 {
119 rt_uint8_t hdr_type, ret = RT_UINT8_MAX;
120
121 if (bus)
122 {
123 rt_pci_bus_read_config_u8(bus, devfn, PCIR_HDRTYPE, &hdr_type);
124
125 ret = pci_bus_find_cap_start(bus, devfn, hdr_type & PCIM_HDRTYPE);
126
127 if (ret)
128 {
129 ret = pci_find_next_cap(bus, devfn, ret, cap);
130 }
131 }
132
133 return ret;
134 }
135
rt_pci_find_capability(struct rt_pci_device * pdev,int cap)136 rt_uint8_t rt_pci_find_capability(struct rt_pci_device *pdev, int cap)
137 {
138 rt_uint8_t res = RT_UINT8_MAX;
139
140 if (pdev)
141 {
142 res = pci_bus_find_cap_start(pdev->bus, pdev->devfn, pdev->hdr_type);
143
144 if (res)
145 {
146 res = pci_find_next_cap(pdev->bus, pdev->devfn, res, cap);
147 }
148 }
149
150 return res;
151 }
152
rt_pci_find_next_capability(struct rt_pci_device * pdev,rt_uint8_t pos,int cap)153 rt_uint8_t rt_pci_find_next_capability(struct rt_pci_device *pdev, rt_uint8_t pos, int cap)
154 {
155 rt_uint8_t res = RT_UINT8_MAX;
156
157 if (pdev)
158 {
159 res = pci_find_next_cap(pdev->bus, pdev->devfn, pos + PCICAP_NEXTPTR, cap);
160 }
161
162 return res;
163 }
164
rt_pci_find_ext_capability(struct rt_pci_device * pdev,int cap)165 rt_uint16_t rt_pci_find_ext_capability(struct rt_pci_device *pdev, int cap)
166 {
167 return rt_pci_find_ext_next_capability(pdev, 0, cap);
168 }
169
rt_pci_find_ext_next_capability(struct rt_pci_device * pdev,rt_uint16_t pos,int cap)170 rt_uint16_t rt_pci_find_ext_next_capability(struct rt_pci_device *pdev, rt_uint16_t pos, int cap)
171 {
172 int ttl;
173 rt_uint32_t header;
174 rt_uint16_t start = pos;
175
176 /* minimum 8 bytes per capability */
177 ttl = ((PCIE_REGMAX + 1) - (PCI_REGMAX + 1)) / 8;
178
179 if (pdev->cfg_size <= PCI_REGMAX + 1)
180 {
181 return 0;
182 }
183
184 if (!pos)
185 {
186 pos = PCI_REGMAX + 1;
187 }
188
189 if (rt_pci_read_config_u32(pdev, pos, &header))
190 {
191 return 0;
192 }
193
194 /*
195 * If we have no capabilities, this is indicated by cap ID,
196 * cap version and next pointer all being 0.
197 */
198 if (header == 0)
199 {
200 return 0;
201 }
202
203 while (ttl-- > 0)
204 {
205 if (PCI_EXTCAP_ID(header) == cap && pos != start)
206 {
207 return pos;
208 }
209
210 pos = PCI_EXTCAP_NEXTPTR(header);
211
212 if (pos < PCI_REGMAX + 1)
213 {
214 break;
215 }
216
217 if (rt_pci_read_config_u32(pdev, pos, &header))
218 {
219 break;
220 }
221 }
222
223 return 0;
224 }
225
pci_set_master(struct rt_pci_device * pdev,rt_bool_t enable)226 static void pci_set_master(struct rt_pci_device *pdev, rt_bool_t enable)
227 {
228 rt_uint16_t old_cmd, cmd;
229
230 rt_pci_read_config_u16(pdev, PCIR_COMMAND, &old_cmd);
231
232 if (enable)
233 {
234 cmd = old_cmd | PCIM_CMD_BUSMASTEREN;
235 }
236 else
237 {
238 cmd = old_cmd & ~PCIM_CMD_BUSMASTEREN;
239 }
240
241 if (cmd != old_cmd)
242 {
243 rt_pci_write_config_u16(pdev, PCIR_COMMAND, cmd);
244 }
245
246 pdev->busmaster = !!enable;
247 }
248
rt_pci_set_master(struct rt_pci_device * pdev)249 void rt_pci_set_master(struct rt_pci_device *pdev)
250 {
251 if (pdev)
252 {
253 pci_set_master(pdev, RT_TRUE);
254 }
255 }
256
rt_pci_clear_master(struct rt_pci_device * pdev)257 void rt_pci_clear_master(struct rt_pci_device *pdev)
258 {
259 if (pdev)
260 {
261 pci_set_master(pdev, RT_FALSE);
262 }
263 }
264
rt_pci_intx(struct rt_pci_device * pdev,rt_bool_t enable)265 void rt_pci_intx(struct rt_pci_device *pdev, rt_bool_t enable)
266 {
267 rt_uint16_t pci_command, new;
268
269 if (!pdev)
270 {
271 return;
272 }
273
274 rt_pci_read_config_u16(pdev, PCIR_COMMAND, &pci_command);
275
276 if (enable)
277 {
278 new = pci_command & ~PCIM_CMD_INTxDIS;
279 }
280 else
281 {
282 new = pci_command | PCIM_CMD_INTxDIS;
283 }
284
285 if (new != pci_command)
286 {
287 rt_pci_write_config_u16(pdev, PCIR_COMMAND, new);
288 }
289 }
290
pci_check_and_set_intx_mask(struct rt_pci_device * pdev,rt_bool_t mask)291 static rt_bool_t pci_check_and_set_intx_mask(struct rt_pci_device *pdev, rt_bool_t mask)
292 {
293 rt_ubase_t level;
294 rt_bool_t irq_pending;
295 rt_bool_t res = RT_TRUE;
296 rt_uint16_t origcmd, newcmd;
297 rt_uint32_t cmd_status_dword;
298 struct rt_pci_bus *bus = pdev->bus;
299
300 level = rt_spin_lock_irqsave(&rt_pci_lock);
301
302 bus->ops->read(bus, pdev->devfn, PCIR_COMMAND, 4, &cmd_status_dword);
303
304 irq_pending = (cmd_status_dword >> 16) & PCIM_STATUS_INTxSTATE;
305
306 /*
307 * Check interrupt status register to see whether our device
308 * triggered the interrupt (when masking) or the next IRQ is
309 * already pending (when unmasking).
310 */
311 if (mask != irq_pending)
312 {
313 res = RT_FALSE;
314 }
315 else
316 {
317 origcmd = cmd_status_dword;
318 newcmd = origcmd & ~PCIM_CMD_INTxDIS;
319
320 if (mask)
321 {
322 newcmd |= PCIM_CMD_INTxDIS;
323 }
324 if (newcmd != origcmd)
325 {
326 bus->ops->write(bus, pdev->devfn, PCIR_COMMAND, 2, newcmd);
327 }
328 }
329
330 rt_spin_unlock_irqrestore(&rt_pci_lock, level);
331
332 return res;
333 }
334
rt_pci_check_and_mask_intx(struct rt_pci_device * pdev)335 rt_bool_t rt_pci_check_and_mask_intx(struct rt_pci_device *pdev)
336 {
337 rt_bool_t res = RT_FALSE;
338
339 if (pdev)
340 {
341 res = pci_check_and_set_intx_mask(pdev, RT_TRUE);
342 }
343
344 return res;
345 }
346
rt_pci_check_and_unmask_intx(struct rt_pci_device * pdev)347 rt_bool_t rt_pci_check_and_unmask_intx(struct rt_pci_device *pdev)
348 {
349 rt_bool_t res = RT_FALSE;
350
351 if (pdev)
352 {
353 res = pci_check_and_set_intx_mask(pdev, RT_FALSE);
354 }
355
356 return res;
357 }
358
rt_pci_irq_mask(struct rt_pci_device * pdev)359 void rt_pci_irq_mask(struct rt_pci_device *pdev)
360 {
361 if (pdev)
362 {
363 rt_bool_t unused;
364 struct rt_pic_irq *pirq;
365
366 rt_pci_intx(pdev, RT_FALSE);
367
368 pirq = rt_pic_find_pirq(pdev->intx_pic, pdev->irq);
369 RT_ASSERT(pirq != RT_NULL);
370
371 rt_hw_spin_lock(&pirq->rw_lock.lock);
372 unused = rt_list_isempty(&pirq->isr.list);
373 rt_hw_spin_unlock(&pirq->rw_lock.lock);
374
375 if (unused)
376 {
377 rt_hw_interrupt_mask(pdev->irq);
378 }
379 }
380 }
381
rt_pci_irq_unmask(struct rt_pci_device * pdev)382 void rt_pci_irq_unmask(struct rt_pci_device *pdev)
383 {
384 if (pdev)
385 {
386 rt_hw_interrupt_umask(pdev->irq);
387 rt_pci_intx(pdev, RT_TRUE);
388 }
389 }
390
rt_pci_find_root_bus(struct rt_pci_bus * bus)391 struct rt_pci_bus *rt_pci_find_root_bus(struct rt_pci_bus *bus)
392 {
393 if (!bus)
394 {
395 return RT_NULL;
396 }
397
398 while (bus->parent)
399 {
400 bus = bus->parent;
401 }
402
403 return bus;
404 }
405
rt_pci_find_host_bridge(struct rt_pci_bus * bus)406 struct rt_pci_host_bridge *rt_pci_find_host_bridge(struct rt_pci_bus *bus)
407 {
408 if (!bus)
409 {
410 return RT_NULL;
411 }
412
413 if ((bus = rt_pci_find_root_bus(bus)))
414 {
415 return rt_container_of(bus->host_bridge, struct rt_pci_host_bridge, parent);
416 }
417
418 return RT_NULL;
419 }
420
rt_pci_irq_intx(struct rt_pci_device * pdev,rt_uint8_t pin)421 rt_uint8_t rt_pci_irq_intx(struct rt_pci_device *pdev, rt_uint8_t pin)
422 {
423 int slot = 0;
424
425 if (!pdev->ari_enabled)
426 {
427 slot = RT_PCI_SLOT(pdev->devfn);
428 }
429
430 return (((pin - 1) + slot) % 4) + 1;
431 }
432
rt_pci_irq_slot(struct rt_pci_device * pdev,rt_uint8_t * pinp)433 rt_uint8_t rt_pci_irq_slot(struct rt_pci_device *pdev, rt_uint8_t *pinp)
434 {
435 rt_uint8_t pin = *pinp;
436
437 while (!rt_pci_is_root_bus(pdev->bus))
438 {
439 pin = rt_pci_irq_intx(pdev, pin);
440 pdev = pdev->bus->self;
441 }
442
443 *pinp = pin;
444
445 return RT_PCI_SLOT(pdev->devfn);
446 }
447
rt_pci_region_setup(struct rt_pci_host_bridge * host_bridge)448 rt_err_t rt_pci_region_setup(struct rt_pci_host_bridge *host_bridge)
449 {
450 rt_err_t err = host_bridge->bus_regions_nr == 0 ? -RT_EEMPTY : RT_EOK;
451
452 for (int i = 0; i < host_bridge->bus_regions_nr; ++i)
453 {
454 struct rt_pci_bus_region *region = &host_bridge->bus_regions[i];
455 /*
456 * Avoid allocating PCI resources from address 0 -- this is illegal
457 * according to PCI 2.1 and moreover. Use a reasonable starting value of
458 * 0x1000 instead if the bus start address is below 0x1000.
459 */
460 region->bus_start = rt_max_t(rt_size_t, 0x1000, region->phy_addr);
461
462 LOG_I("Bus %s region(%d):",
463 region->flags == PCI_BUS_REGION_F_MEM ? "Memory" :
464 (region->flags == PCI_BUS_REGION_F_PREFETCH ? "Prefetchable Mem" :
465 (region->flags == PCI_BUS_REGION_F_IO ? "I/O" : "Unknown")), i);
466 LOG_I(" cpu: [%p, %p]", region->cpu_addr, (region->cpu_addr + region->size - 1));
467 LOG_I(" physical: [%p, %p]", region->phy_addr, (region->phy_addr + region->size - 1));
468 }
469
470 return err;
471 }
472
rt_pci_region_alloc(struct rt_pci_host_bridge * host_bridge,void ** out_addr,rt_size_t size,rt_ubase_t flags,rt_bool_t mem64)473 struct rt_pci_bus_region *rt_pci_region_alloc(struct rt_pci_host_bridge *host_bridge,
474 void **out_addr, rt_size_t size, rt_ubase_t flags, rt_bool_t mem64)
475 {
476 struct rt_pci_bus_region *bus_region, *region = RT_NULL;
477
478 bus_region = &host_bridge->bus_regions[0];
479
480 for (int i = 0; i < host_bridge->bus_regions_nr; ++i, ++bus_region)
481 {
482 if (bus_region->flags == flags && bus_region->size > 0)
483 {
484 void *addr;
485
486 region = bus_region;
487 addr = (void *)(((region->bus_start - 1) | (size - 1)) + 1);
488
489 if ((rt_uint64_t)addr - region->phy_addr + size <= region->size)
490 {
491 rt_bool_t addr64 = !!rt_upper_32_bits((rt_ubase_t)addr);
492
493 if (mem64)
494 {
495 if (!addr64)
496 {
497 region = RT_NULL;
498
499 /* Try again */
500 continue;
501 }
502 }
503 else if (addr64)
504 {
505 region = RT_NULL;
506
507 /* Try again */
508 continue;
509 }
510
511 region->bus_start = ((rt_uint64_t)addr + size);
512 *out_addr = addr;
513 }
514
515 break;
516 }
517 }
518
519 if (!region && mem64)
520 {
521 /* Retry */
522 region = rt_pci_region_alloc(host_bridge, out_addr, size, flags, RT_FALSE);
523 }
524
525 return region;
526 }
527
rt_pci_device_alloc_resource(struct rt_pci_host_bridge * host_bridge,struct rt_pci_device * pdev)528 rt_err_t rt_pci_device_alloc_resource(struct rt_pci_host_bridge *host_bridge,
529 struct rt_pci_device *pdev)
530 {
531 rt_err_t err = RT_EOK;
532 rt_size_t size;
533 rt_ubase_t addr = 0;
534 rt_uint32_t cfg;
535 rt_size_t bars_nr;
536 rt_uint8_t hdr_type;
537 rt_bool_t prefetch = RT_FALSE;
538 rt_uint16_t class, command = 0;
539
540 for (int i = 0; i < host_bridge->bus_regions_nr; ++i)
541 {
542 if (host_bridge->bus_regions[i].flags == PCI_BUS_REGION_F_PREFETCH)
543 {
544 prefetch = RT_TRUE;
545 break;
546 }
547 }
548
549 rt_pci_read_config_u16(pdev, PCIR_COMMAND, &command);
550 command = (command & ~(PCIM_CMD_PORTEN | PCIM_CMD_MEMEN)) | PCIM_CMD_BUSMASTEREN;
551 rt_pci_read_config_u8(pdev, PCIR_HDRTYPE, &hdr_type);
552
553 if (pdev->hdr_type != hdr_type)
554 {
555 LOG_W("%s may not initialized", rt_dm_dev_get_name(&pdev->parent));
556 }
557
558 switch (hdr_type)
559 {
560 case PCIM_HDRTYPE_NORMAL:
561 bars_nr = PCI_STD_NUM_BARS;
562 break;
563
564 case PCIM_HDRTYPE_BRIDGE:
565 bars_nr = 2;
566 break;
567
568 case PCIM_HDRTYPE_CARDBUS:
569 bars_nr = 0;
570 break;
571
572 default:
573 bars_nr = 0;
574 break;
575 }
576
577 for (int i = 0; i < bars_nr; ++i)
578 {
579 rt_ubase_t flags;
580 rt_ubase_t bar_base;
581 rt_bool_t mem64 = RT_FALSE;
582 struct rt_pci_bus_region *region;
583
584 cfg = 0;
585 bar_base = PCIR_BAR(i);
586
587 rt_pci_write_config_u32(pdev, bar_base, RT_UINT32_MAX);
588 rt_pci_read_config_u32(pdev, bar_base, &cfg);
589
590 if (!cfg)
591 {
592 continue;
593 }
594 else if (cfg == RT_UINT32_MAX)
595 {
596 rt_pci_write_config_u32(pdev, bar_base, 0UL);
597 continue;
598 }
599
600 if (cfg & PCIM_BAR_SPACE)
601 {
602 mem64 = RT_FALSE;
603 flags = PCI_BUS_REGION_F_IO;
604
605 size = cfg & PCIM_BAR_IO_MASK;
606 size &= ~(size - 1);
607 }
608 else
609 {
610 /* memory */
611 if ((cfg & PCIM_BAR_MEM_TYPE_MASK) == PCIM_BAR_MEM_TYPE_64)
612 {
613 /* 64bits */
614 rt_uint32_t cfg64;
615 rt_uint64_t bar64;
616
617 mem64 = RT_TRUE;
618
619 rt_pci_write_config_u32(pdev, bar_base + sizeof(rt_uint32_t), RT_UINT32_MAX);
620 rt_pci_read_config_u32(pdev, bar_base + sizeof(rt_uint32_t), &cfg64);
621
622 bar64 = ((rt_uint64_t)cfg64 << 32) | cfg;
623
624 size = ~(bar64 & PCIM_BAR_MEM_MASK) + 1;
625 }
626 else
627 {
628 /* 32bits */
629 mem64 = RT_FALSE;
630 size = (rt_uint32_t)(~(cfg & PCIM_BAR_MEM_MASK) + 1);
631 }
632
633 if (prefetch && (cfg & PCIM_BAR_MEM_PREFETCH))
634 {
635 flags = PCI_BUS_REGION_F_PREFETCH;
636 }
637 else
638 {
639 flags = PCI_BUS_REGION_F_MEM;
640 }
641 }
642
643 region = rt_pci_region_alloc(host_bridge, (void **)&addr, size, flags, mem64);
644
645 if (region)
646 {
647 rt_pci_write_config_u32(pdev, bar_base, addr);
648
649 if (mem64)
650 {
651 bar_base += sizeof(rt_uint32_t);
652 #ifdef RT_PCI_SYS_64BIT
653 rt_pci_write_config_u32(pdev, bar_base, (rt_uint32_t)(addr >> 32));
654 #else
655 /*
656 * If we are a 64-bit decoder then increment to the upper 32 bits
657 * of the bar and force it to locate in the lower 4GB of memory.
658 */
659 rt_pci_write_config_u32(pdev, bar_base, 0UL);
660 #endif
661 }
662
663 pdev->resource[i].size = size;
664 pdev->resource[i].base = region->cpu_addr + (addr - region->phy_addr);
665 pdev->resource[i].flags = flags;
666
667 if (mem64)
668 {
669 ++i;
670 pdev->resource[i].flags = PCI_BUS_REGION_F_NONE;
671 }
672 }
673 else
674 {
675 err = -RT_ERROR;
676 LOG_W("%s alloc bar(%d) address fail", rt_dm_dev_get_name(&pdev->parent), i);
677 }
678
679 command |= (cfg & PCIM_BAR_SPACE) ? PCIM_CMD_PORTEN : PCIM_CMD_MEMEN;
680 }
681
682 if (hdr_type == PCIM_HDRTYPE_NORMAL || hdr_type == PCIM_HDRTYPE_BRIDGE)
683 {
684 int rom_addr = (hdr_type == PCIM_HDRTYPE_NORMAL) ? PCIR_BIOS : PCIR_BIOS_1;
685
686 rt_pci_write_config_u32(pdev, rom_addr, 0xfffffffe);
687 rt_pci_read_config_u32(pdev, rom_addr, &cfg);
688
689 if (cfg)
690 {
691 size = -(cfg & ~1);
692
693 if (rt_pci_region_alloc(host_bridge, (void **)&addr, size, PCI_BUS_REGION_F_MEM, RT_FALSE))
694 {
695 rt_pci_write_config_u32(pdev, rom_addr, addr);
696 }
697 command |= PCIM_CMD_MEMEN;
698
699 pdev->rom.base = addr;
700 pdev->rom.size = size;
701 pdev->rom.flags = PCI_BUS_REGION_F_MEM;
702 }
703 }
704
705 rt_pci_read_config_u16(pdev, PCIR_SUBCLASS, &class);
706
707 if (class == PCIS_DISPLAY_VGA)
708 {
709 command |= PCIM_CMD_PORTEN;
710 }
711
712 rt_pci_write_config_u16(pdev, PCIR_COMMAND, command);
713 rt_pci_write_config_u8(pdev, PCIR_CACHELNSZ, RT_PCI_CACHE_LINE_SIZE);
714 rt_pci_write_config_u8(pdev, PCIR_LATTIMER, 0x80);
715
716 return err;
717 }
718
rt_pci_find_bar(struct rt_pci_device * pdev,rt_ubase_t flags,int index)719 struct rt_pci_bus_resource *rt_pci_find_bar(struct rt_pci_device* pdev,rt_ubase_t flags,int index)
720 {
721 for (int i = 0; i < RT_PCI_BAR_NR_MAX; i++)
722 {
723 if (pdev->resource[i].flags == flags)
724 {
725 index--;
726 if (index == 0)
727 return &pdev->resource[i];
728 }
729 }
730 return RT_NULL;
731 }
732
rt_pci_enum_device(struct rt_pci_bus * bus,rt_bool_t (callback (struct rt_pci_device *,void *)),void * data)733 void rt_pci_enum_device(struct rt_pci_bus *bus,
734 rt_bool_t (callback(struct rt_pci_device *, void *)), void *data)
735 {
736 rt_bool_t is_end = RT_FALSE;
737 struct rt_spinlock *lock;
738 struct rt_pci_bus *parent;
739 struct rt_pci_device *pdev, *last_pdev = RT_NULL;
740
741 /* Walk tree */
742 while (bus && !is_end)
743 {
744 /* Goto bottom */
745 for (;;)
746 {
747 lock = &bus->lock;
748
749 spin_lock(lock);
750 if (rt_list_isempty(&bus->children_nodes))
751 {
752 parent = bus->parent;
753 break;
754 }
755 bus = rt_list_entry(&bus->children_nodes, struct rt_pci_bus, list);
756 spin_unlock(lock);
757 }
758
759 rt_list_for_each_entry(pdev, &bus->devices_nodes, list)
760 {
761 if (last_pdev)
762 {
763 spin_unlock(lock);
764
765 if (callback(last_pdev, data))
766 {
767 spin_lock(lock);
768 --last_pdev->parent.ref_count;
769
770 is_end = RT_TRUE;
771 break;
772 }
773
774 spin_lock(lock);
775 --last_pdev->parent.ref_count;
776 }
777 ++pdev->parent.ref_count;
778 last_pdev = pdev;
779 }
780
781 if (!is_end && last_pdev)
782 {
783 spin_unlock(lock);
784
785 if (callback(last_pdev, data))
786 {
787 is_end = RT_TRUE;
788 }
789
790 spin_lock(lock);
791 --last_pdev->parent.ref_count;
792 }
793 last_pdev = RT_NULL;
794 spin_unlock(lock);
795
796 /* Up a level or goto next */
797 while (!is_end)
798 {
799 lock = &bus->lock;
800
801 if (!parent)
802 {
803 /* Root bus, is end */
804 bus = RT_NULL;
805 break;
806 }
807
808 spin_lock(lock);
809 if (bus->list.next != &parent->children_nodes)
810 {
811 /* Has next sibling */
812 bus = rt_list_entry(bus->list.next, struct rt_pci_bus, list);
813 spin_unlock(lock);
814 break;
815 }
816
817 /* All device on this buss' parent */
818 rt_list_for_each_entry(pdev, &parent->devices_nodes, list)
819 {
820 if (last_pdev)
821 {
822 spin_unlock(lock);
823
824 if (callback(last_pdev, data))
825 {
826 spin_lock(lock);
827 --last_pdev->parent.ref_count;
828
829 is_end = RT_TRUE;
830 break;
831 }
832
833 spin_lock(lock);
834 --last_pdev->parent.ref_count;
835 }
836 ++pdev->parent.ref_count;
837 last_pdev = pdev;
838 }
839
840 if (!is_end && last_pdev)
841 {
842 spin_unlock(lock);
843
844 if (callback(last_pdev, data))
845 {
846 is_end = RT_TRUE;
847 }
848
849 spin_lock(lock);
850 --last_pdev->parent.ref_count;
851 }
852 last_pdev = RT_NULL;
853
854 bus = parent;
855 parent = parent->parent;
856 spin_unlock(lock);
857 }
858 }
859 }
860
rt_pci_match_id(struct rt_pci_device * pdev,const struct rt_pci_device_id * id)861 const struct rt_pci_device_id *rt_pci_match_id(struct rt_pci_device *pdev,
862 const struct rt_pci_device_id *id)
863 {
864 if ((id->vendor == PCI_ANY_ID || id->vendor == pdev->vendor) &&
865 (id->device == PCI_ANY_ID || id->device == pdev->device) &&
866 (id->subsystem_vendor == PCI_ANY_ID || id->subsystem_vendor == pdev->subsystem_vendor) &&
867 (id->subsystem_device == PCI_ANY_ID || id->subsystem_device == pdev->subsystem_device) &&
868 !((id->class ^ pdev->class) & id->class_mask))
869 {
870 return id;
871 }
872
873 return RT_NULL;
874 }
875
rt_pci_match_ids(struct rt_pci_device * pdev,const struct rt_pci_device_id * ids)876 const struct rt_pci_device_id *rt_pci_match_ids(struct rt_pci_device *pdev,
877 const struct rt_pci_device_id *ids)
878 {
879 while (ids->vendor || ids->subsystem_vendor || ids->class_mask)
880 {
881 if (rt_pci_match_id(pdev, ids))
882 {
883 return ids;
884 }
885
886 ++ids;
887 }
888
889 return RT_NULL;
890 }
891
892 static struct rt_bus pci_bus;
893
rt_pci_driver_register(struct rt_pci_driver * pdrv)894 rt_err_t rt_pci_driver_register(struct rt_pci_driver *pdrv)
895 {
896 RT_ASSERT(pdrv != RT_NULL);
897
898 pdrv->parent.bus = &pci_bus;
899 #if RT_NAME_MAX > 0
900 rt_strcpy(pdrv->parent.parent.name, pdrv->name);
901 #else
902 pdrv->parent.parent.name = pdrv->name;
903 #endif
904
905 return rt_driver_register(&pdrv->parent);
906 }
907
rt_pci_device_register(struct rt_pci_device * pdev)908 rt_err_t rt_pci_device_register(struct rt_pci_device *pdev)
909 {
910 rt_err_t err;
911 RT_ASSERT(pdev != RT_NULL);
912
913 if ((err = rt_bus_add_device(&pci_bus, &pdev->parent)))
914 {
915 return err;
916 }
917
918 return RT_EOK;
919 }
920
pci_match(rt_driver_t drv,rt_device_t dev)921 static rt_bool_t pci_match(rt_driver_t drv, rt_device_t dev)
922 {
923 rt_bool_t match = RT_FALSE;
924 struct rt_pci_driver *pdrv = rt_container_of(drv, struct rt_pci_driver, parent);
925 struct rt_pci_device *pdev = rt_container_of(dev, struct rt_pci_device, parent);
926
927 if (pdrv->name && pdev->name)
928 {
929 match = rt_strcmp(pdrv->name, pdev->name) ? RT_FALSE : RT_TRUE;
930 }
931
932 if (!match)
933 {
934 pdev->id = rt_pci_match_ids(pdev, pdrv->ids);
935
936 match = pdev->id ? RT_TRUE : RT_FALSE;
937 }
938
939 return match;
940 }
941
pci_probe(rt_device_t dev)942 static rt_err_t pci_probe(rt_device_t dev)
943 {
944 rt_err_t err = RT_EOK;
945 struct rt_pci_driver *pdrv = rt_container_of(dev->drv, struct rt_pci_driver, parent);
946 struct rt_pci_device *pdev = rt_container_of(dev, struct rt_pci_device, parent);
947
948 rt_pci_assign_irq(pdev);
949 rt_pci_enable_wake(pdev, RT_PCI_D0, RT_TRUE);
950
951 err = pdrv->probe(pdev);
952
953 if (err)
954 {
955 rt_pci_enable_wake(pdev, RT_PCI_D0, RT_FALSE);
956 }
957
958 return err;
959 }
960
pci_remove(rt_device_t dev)961 static rt_err_t pci_remove(rt_device_t dev)
962 {
963 rt_err_t err = RT_EOK;
964 struct rt_pci_bus *bus;
965 struct rt_pci_driver *pdrv = rt_container_of(dev->drv, struct rt_pci_driver, parent);
966 struct rt_pci_device *pdev = rt_container_of(dev, struct rt_pci_device, parent);
967
968 if (pdrv && pdrv->remove)
969 {
970 if ((err = pdrv->remove(pdev)))
971 {
972 return err;
973 }
974 }
975
976 rt_pci_enable_wake(pdev, RT_PCI_D0, RT_FALSE);
977
978 bus = pdev->bus;
979 rt_pci_device_remove(pdev);
980 /* Just try to remove */
981 rt_pci_bus_remove(bus);
982
983 return err;
984 }
985
pci_shutdown(rt_device_t dev)986 static rt_err_t pci_shutdown(rt_device_t dev)
987 {
988 struct rt_pci_bus *bus;
989 struct rt_pci_driver *pdrv = rt_container_of(dev->drv, struct rt_pci_driver, parent);
990 struct rt_pci_device *pdev = rt_container_of(dev, struct rt_pci_device, parent);
991
992 if (pdrv && pdrv->shutdown)
993 {
994 pdrv->shutdown(pdev);
995 }
996
997 rt_pci_enable_wake(pdev, RT_PCI_D0, RT_FALSE);
998
999 bus = pdev->bus;
1000 rt_pci_device_remove(pdev);
1001 /* Just try to remove */
1002 rt_pci_bus_remove(bus);
1003
1004 return RT_EOK;
1005 }
1006
1007 static struct rt_bus pci_bus =
1008 {
1009 .name = "pci",
1010 .match = pci_match,
1011 .probe = pci_probe,
1012 .remove = pci_remove,
1013 .shutdown = pci_shutdown,
1014 };
1015
pci_bus_init(void)1016 static int pci_bus_init(void)
1017 {
1018 rt_bus_register(&pci_bus);
1019
1020 return 0;
1021 }
1022 INIT_CORE_EXPORT(pci_bus_init);
1023