1 /*
2 * Copyright (c) 2006-2023, RT-Thread Development Team
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 *
6 * Change Logs:
7 * Date Author Notes
8 * 2023-09-23 GuEe-GUI first version
9 */
10
11 #define DBG_TAG "pcie.dw-host"
12 #define DBG_LVL DBG_INFO
13 #include <rtdbg.h>
14
15 #include "pcie-dw.h"
16
dw_pcie_irq_ack(struct rt_pic_irq * pirq)17 static void dw_pcie_irq_ack(struct rt_pic_irq *pirq)
18 {
19 int hwirq = pirq->hwirq;
20 rt_uint32_t res, bit, ctrl;
21 struct dw_pcie_port *port = pirq->pic->priv_data;
22 struct dw_pcie *pci = to_dw_pcie_from_port(port);
23
24 ctrl = hwirq / MAX_MSI_IRQS_PER_CTRL;
25 res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
26 bit = hwirq % MAX_MSI_IRQS_PER_CTRL;
27
28 dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_STATUS + res, RT_BIT(bit));
29 }
30
dw_pcie_irq_mask(struct rt_pic_irq * pirq)31 static void dw_pcie_irq_mask(struct rt_pic_irq *pirq)
32 {
33 rt_ubase_t level;
34 int hwirq = pirq->hwirq;
35 rt_uint32_t res, bit, ctrl;
36 struct dw_pcie_port *port = pirq->pic->priv_data;
37 struct dw_pcie *pci = to_dw_pcie_from_port(port);
38
39 rt_pci_msi_mask_irq(pirq);
40
41 level = rt_spin_lock_irqsave(&port->lock);
42
43 ctrl = hwirq / MAX_MSI_IRQS_PER_CTRL;
44 res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
45 bit = hwirq % MAX_MSI_IRQS_PER_CTRL;
46
47 port->irq_mask[ctrl] |= RT_BIT(bit);
48 dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK + res, port->irq_mask[ctrl]);
49
50 rt_spin_unlock_irqrestore(&port->lock, level);
51 }
52
dw_pcie_irq_unmask(struct rt_pic_irq * pirq)53 static void dw_pcie_irq_unmask(struct rt_pic_irq *pirq)
54 {
55 rt_ubase_t level;
56 int hwirq = pirq->hwirq;
57 rt_uint32_t res, bit, ctrl;
58 struct dw_pcie_port *port = pirq->pic->priv_data;
59 struct dw_pcie *pci = to_dw_pcie_from_port(port);
60
61 rt_pci_msi_unmask_irq(pirq);
62
63 level = rt_spin_lock_irqsave(&port->lock);
64
65 ctrl = hwirq / MAX_MSI_IRQS_PER_CTRL;
66 res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
67 bit = hwirq % MAX_MSI_IRQS_PER_CTRL;
68
69 port->irq_mask[ctrl] &= ~RT_BIT(bit);
70 dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK + res, port->irq_mask[ctrl]);
71
72 rt_spin_unlock_irqrestore(&port->lock, level);
73 }
74
dw_pcie_compose_msi_msg(struct rt_pic_irq * pirq,struct rt_pci_msi_msg * msg)75 static void dw_pcie_compose_msi_msg(struct rt_pic_irq *pirq, struct rt_pci_msi_msg *msg)
76 {
77 rt_uint64_t msi_target;
78 struct dw_pcie_port *port = pirq->pic->priv_data;
79
80 msi_target = (rt_uint64_t)port->msi_data_phy;
81
82 msg->address_lo = rt_lower_32_bits(msi_target);
83 msg->address_hi = rt_upper_32_bits(msi_target);
84
85 msg->data = pirq->hwirq;
86 }
87
dw_pcie_irq_alloc_msi(struct rt_pic * pic,struct rt_pci_msi_desc * msi_desc)88 static int dw_pcie_irq_alloc_msi(struct rt_pic *pic, struct rt_pci_msi_desc *msi_desc)
89 {
90 rt_ubase_t level;
91 int irq, hwirq;
92 struct rt_pic_irq *pirq;
93 struct dw_pcie_port *port = pic->priv_data;
94
95 level = rt_spin_lock_irqsave(&port->lock);
96 hwirq = rt_bitmap_next_clear_bit(port->msi_map, 0, port->irq_count);
97
98 if (hwirq >= port->irq_count)
99 {
100 irq = -RT_EEMPTY;
101 goto _out_lock;
102 }
103
104 pirq = rt_pic_find_irq(pic, hwirq);
105
106 irq = rt_pic_config_irq(pic, hwirq, hwirq);
107 pirq->mode = RT_IRQ_MODE_EDGE_RISING;
108
109 rt_bitmap_set_bit(port->msi_map, hwirq);
110
111 _out_lock:
112 rt_spin_unlock_irqrestore(&port->lock, level);
113
114 return irq;
115 }
116
dw_pcie_irq_free_msi(struct rt_pic * pic,int irq)117 static void dw_pcie_irq_free_msi(struct rt_pic *pic, int irq)
118 {
119 rt_ubase_t level;
120 struct rt_pic_irq *pirq;
121 struct dw_pcie_port *port = pic->priv_data;
122
123 pirq = rt_pic_find_pirq(pic, irq);
124
125 if (!pirq)
126 {
127 return;
128 }
129
130 level = rt_spin_lock_irqsave(&port->lock);
131 rt_bitmap_clear_bit(port->msi_map, pirq->hwirq);
132 rt_spin_unlock_irqrestore(&port->lock, level);
133 }
134
135 const static struct rt_pic_ops dw_pci_msi_ops =
136 {
137 .name = "DWPCI-MSI",
138 .irq_ack = dw_pcie_irq_ack,
139 .irq_mask = dw_pcie_irq_mask,
140 .irq_unmask = dw_pcie_irq_unmask,
141 .irq_compose_msi_msg = dw_pcie_compose_msi_msg,
142 .irq_alloc_msi = dw_pcie_irq_alloc_msi,
143 .irq_free_msi = dw_pcie_irq_free_msi,
144 .flags = RT_PIC_F_IRQ_ROUTING,
145 };
146
147 /* MSI int handler */
dw_handle_msi_irq(struct dw_pcie_port * port)148 rt_err_t dw_handle_msi_irq(struct dw_pcie_port *port)
149 {
150 rt_err_t err;
151 int i, pos;
152 rt_bitmap_t status;
153 rt_uint32_t num_ctrls;
154 struct rt_pic_irq *pirq;
155 struct dw_pcie *pci = to_dw_pcie_from_port(port);
156 struct rt_pic *msi_pic = port->msi_pic;
157
158 err = -RT_EEMPTY;
159 num_ctrls = RT_DIV_ROUND_UP(port->irq_count, MAX_MSI_IRQS_PER_CTRL);
160
161 for (i = 0; i < num_ctrls; ++i)
162 {
163 status = dw_pcie_readl_dbi(pci, PCIE_MSI_INTR0_STATUS +
164 (i * MSI_REG_CTRL_BLOCK_SIZE));
165
166 if (!status)
167 {
168 continue;
169 }
170
171 err = RT_EOK;
172
173 rt_bitmap_for_each_set_bit(&status, pos, MAX_MSI_IRQS_PER_CTRL)
174 {
175 pirq = rt_pic_find_irq(msi_pic, pos + i * MAX_MSI_IRQS_PER_CTRL);
176
177 dw_pcie_irq_ack(pirq);
178
179 rt_pic_handle_isr(pirq);
180 }
181 }
182
183 return err;
184 }
185
dw_pcie_msi_isr(int irqno,void * param)186 static void dw_pcie_msi_isr(int irqno, void *param)
187 {
188 struct dw_pcie_port *port = param;
189
190 dw_handle_msi_irq(port);
191 }
192
dw_pcie_free_msi(struct dw_pcie_port * port)193 void dw_pcie_free_msi(struct dw_pcie_port *port)
194 {
195 if (port->msi_irq >= 0)
196 {
197 rt_hw_interrupt_mask(port->msi_irq);
198 rt_pic_detach_irq(port->msi_irq, port);
199 }
200
201 if (port->msi_data)
202 {
203 struct dw_pcie *pci = to_dw_pcie_from_port(port);
204
205 rt_dma_free_coherent(pci->dev, sizeof(rt_uint64_t), port->msi_data,
206 port->msi_data_phy);
207 }
208 }
209
dw_pcie_msi_init(struct dw_pcie_port * port)210 void dw_pcie_msi_init(struct dw_pcie_port *port)
211 {
212 #ifdef RT_PCI_MSI
213 struct dw_pcie *pci = to_dw_pcie_from_port(port);
214 rt_uint64_t msi_target = (rt_uint64_t)port->msi_data_phy;
215
216 /* Program the msi_data_phy */
217 dw_pcie_writel_dbi(pci, PCIE_MSI_ADDR_LO, rt_lower_32_bits(msi_target));
218 dw_pcie_writel_dbi(pci, PCIE_MSI_ADDR_HI, rt_upper_32_bits(msi_target));
219 #endif
220 }
221
222 static const struct rt_pci_ops dw_child_pcie_ops;
223 static const struct rt_pci_ops dw_pcie_ops;
224
dw_pcie_host_init(struct dw_pcie_port * port)225 rt_err_t dw_pcie_host_init(struct dw_pcie_port *port)
226 {
227 rt_err_t err;
228 struct dw_pcie *pci = to_dw_pcie_from_port(port);
229 struct rt_device *dev = pci->dev;
230 struct rt_pci_host_bridge *bridge;
231
232 rt_spin_lock_init(&port->lock);
233
234 rt_dm_dev_get_address_by_name(dev, "config", &port->cfg0_addr, &port->cfg0_size);
235
236 if (port->cfg0_addr)
237 {
238 port->cfg0_base = rt_ioremap((void *)port->cfg0_addr, port->cfg0_size);
239
240 if (!port->cfg0_base)
241 {
242 return -RT_EIO;
243 }
244 }
245 else if (!port->cfg0_base)
246 {
247 LOG_E("Missing 'config' reg space");
248 }
249
250 if (!(bridge = rt_pci_host_bridge_alloc(0)))
251 {
252 return -RT_ENOMEM;
253 }
254
255 bridge->parent.ofw_node = dev->ofw_node;
256
257 if ((err = rt_pci_host_bridge_init(bridge)))
258 {
259 goto _err_free_bridge;
260 }
261
262 port->bridge = bridge;
263
264 for (int i = 0; i < bridge->bus_regions_nr; ++i)
265 {
266 struct rt_pci_bus_region *region = &bridge->bus_regions[i];
267
268 switch (region->flags)
269 {
270 case PCI_BUS_REGION_F_IO:
271 port->io_addr = region->cpu_addr;
272 port->io_bus_addr = region->phy_addr;
273 port->io_size = region->size;
274 break;
275
276 case PCI_BUS_REGION_F_NONE:
277 port->cfg0_size = region->size;
278 port->cfg0_addr = region->cpu_addr;
279
280 if (!pci->dbi_base)
281 {
282 pci->dbi_base = rt_ioremap((void *)port->cfg0_addr, port->cfg0_size);
283
284 if (!pci->dbi_base)
285 {
286 LOG_E("Error with ioremap");
287 return -RT_ENOMEM;
288 }
289 }
290 break;
291
292 default:
293 break;
294 }
295 }
296
297 if (!port->cfg0_base && port->cfg0_addr)
298 {
299 port->cfg0_base = rt_ioremap((void *)port->cfg0_addr, port->cfg0_size);
300
301 if (!port->cfg0_base)
302 {
303 return -RT_ENOMEM;
304 }
305 }
306
307 if (rt_dm_dev_prop_read_u32(dev, "num-viewport", &pci->num_viewport))
308 {
309 pci->num_viewport = 2;
310 }
311
312 if (pci->link_gen < 1)
313 {
314 pci->link_gen = -1;
315 rt_dm_dev_prop_read_u32(dev, "max-link-speed", &pci->link_gen);
316 }
317
318 /*
319 * If a specific SoC driver needs to change the default number of vectors,
320 * it needs to implement the set_irq_count callback.
321 */
322 if (!port->ops->set_irq_count)
323 {
324 port->irq_count = MSI_DEF_NUM_VECTORS;
325 }
326 else
327 {
328 port->ops->set_irq_count(port);
329
330 if (port->irq_count > MAX_MSI_IRQS || port->irq_count == 0)
331 {
332 LOG_E("Invalid count of irq = %d", port->irq_count);
333
334 return -RT_EINVAL;
335 }
336 }
337
338 if (!port->ops->msi_host_init)
339 {
340 port->msi_pic = rt_calloc(1, sizeof(*port->msi_pic));
341
342 if (!port->msi_pic)
343 {
344 return -RT_ENOMEM;
345 }
346
347 port->msi_pic->priv_data = port;
348 port->msi_pic->ops = &dw_pci_msi_ops;
349 rt_pic_linear_irq(port->msi_pic, port->irq_count);
350 rt_pic_user_extends(port->msi_pic);
351
352 if (port->msi_irq)
353 {
354 rt_hw_interrupt_install(port->msi_irq, dw_pcie_msi_isr, port, "dwc-pci-msi");
355 rt_hw_interrupt_umask(port->msi_irq);
356 }
357
358 port->msi_data = rt_dma_alloc_coherent(pci->dev, sizeof(rt_uint64_t),
359 &port->msi_data_phy);
360
361 if (!port->msi_data)
362 {
363 err = -RT_ENOMEM;
364 goto _err_free_msi;
365 }
366 }
367 else
368 {
369 if ((err = port->ops->msi_host_init(port)))
370 {
371 return err;
372 }
373 }
374
375 /* Set default bus ops */
376 bridge->ops = &dw_pcie_ops;
377 bridge->child_ops = &dw_child_pcie_ops;
378
379 if (port->ops->host_init && (err = port->ops->host_init(port)))
380 {
381 goto _err_free_msi;
382 }
383
384 bridge->sysdata = port;
385
386 if ((err = rt_pci_host_bridge_probe(bridge)))
387 {
388 goto _err_free_msi;
389 }
390
391 return RT_EOK;
392
393 _err_free_msi:
394 if (!port->ops->msi_host_init)
395 {
396 dw_pcie_free_msi(port);
397
398 rt_pic_cancel_irq(port->msi_pic);
399 rt_free(port->msi_pic);
400 port->msi_pic = RT_NULL;
401 }
402
403 _err_free_bridge:
404 rt_pci_host_bridge_free(bridge);
405 port->bridge = RT_NULL;
406
407 return err;
408 }
409
dw_pcie_host_deinit(struct dw_pcie_port * port)410 void dw_pcie_host_deinit(struct dw_pcie_port *port)
411 {
412 if (!port->ops->msi_host_init)
413 {
414 dw_pcie_free_msi(port);
415 }
416 }
417
dw_pcie_host_free(struct dw_pcie_port * port)418 void dw_pcie_host_free(struct dw_pcie_port *port)
419 {
420 if (!port->ops->msi_host_init)
421 {
422 dw_pcie_free_msi(port);
423
424 rt_pic_cancel_irq(port->msi_pic);
425 rt_free(port->msi_pic);
426 }
427
428 if (port->bridge)
429 {
430 rt_pci_host_bridge_free(port->bridge);
431 }
432 }
433
dw_pcie_other_conf_map(struct rt_pci_bus * bus,rt_uint32_t devfn,int reg)434 static void *dw_pcie_other_conf_map(struct rt_pci_bus *bus, rt_uint32_t devfn, int reg)
435 {
436 int type;
437 rt_uint32_t busdev;
438 struct dw_pcie_port *port = bus->sysdata;
439 struct dw_pcie *pci = to_dw_pcie_from_port(port);
440
441 /*
442 * Checking whether the link is up here is a last line of defense
443 * against platforms that forward errors on the system bus as
444 * SError upon PCI configuration transactions issued when the link is down.
445 * This check is racy by definition and does not stop the system from
446 * triggering an SError if the link goes down after this check is performed.
447 */
448 if (!dw_pcie_link_up(pci))
449 {
450 return RT_NULL;
451 }
452
453 busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(RT_PCI_SLOT(devfn)) |
454 PCIE_ATU_FUNC(RT_PCI_FUNC(devfn));
455
456 if (rt_pci_is_root_bus(bus->parent))
457 {
458 type = PCIE_ATU_TYPE_CFG0;
459 }
460 else
461 {
462 type = PCIE_ATU_TYPE_CFG1;
463 }
464
465 dw_pcie_prog_outbound_atu(pci, 0, type, port->cfg0_addr, busdev, port->cfg0_size);
466
467 return port->cfg0_base + reg;
468 }
469
dw_pcie_other_read_conf(struct rt_pci_bus * bus,rt_uint32_t devfn,int reg,int width,rt_uint32_t * value)470 static rt_err_t dw_pcie_other_read_conf(struct rt_pci_bus *bus,
471 rt_uint32_t devfn, int reg, int width, rt_uint32_t *value)
472 {
473 rt_err_t err;
474 struct dw_pcie_port *port = bus->sysdata;
475 struct dw_pcie *pci = to_dw_pcie_from_port(port);
476
477 err = rt_pci_bus_read_config_uxx(bus, devfn, reg, width, value);
478
479 if (!err && (pci->iatu_unroll_enabled & DWC_IATU_IOCFG_SHARED))
480 {
481 dw_pcie_prog_outbound_atu(pci, 0, PCIE_ATU_TYPE_IO,
482 port->io_addr, port->io_bus_addr, port->io_size);
483 }
484
485 return err;
486 }
487
dw_pcie_other_write_conf(struct rt_pci_bus * bus,rt_uint32_t devfn,int reg,int width,rt_uint32_t value)488 static rt_err_t dw_pcie_other_write_conf(struct rt_pci_bus *bus,
489 rt_uint32_t devfn, int reg, int width, rt_uint32_t value)
490 {
491 rt_err_t err;
492 struct dw_pcie_port *port = bus->sysdata;
493 struct dw_pcie *pci = to_dw_pcie_from_port(port);
494
495 err = rt_pci_bus_write_config_uxx(bus, devfn, reg, width, value);
496
497 if (!err && (pci->iatu_unroll_enabled & DWC_IATU_IOCFG_SHARED))
498 {
499 dw_pcie_prog_outbound_atu(pci, 0, PCIE_ATU_TYPE_IO,
500 port->io_addr, port->io_bus_addr, port->io_size);
501 }
502
503 return err;
504 }
505
506 static const struct rt_pci_ops dw_child_pcie_ops =
507 {
508 .map = dw_pcie_other_conf_map,
509 .read = dw_pcie_other_read_conf,
510 .write = dw_pcie_other_write_conf,
511 };
512
dw_pcie_own_conf_map(struct rt_pci_bus * bus,rt_uint32_t devfn,int reg)513 void *dw_pcie_own_conf_map(struct rt_pci_bus *bus, rt_uint32_t devfn, int reg)
514 {
515 struct dw_pcie_port *port = bus->sysdata;
516 struct dw_pcie *pci = to_dw_pcie_from_port(port);
517
518 if (RT_PCI_SLOT(devfn) > 0)
519 {
520 return RT_NULL;
521 }
522
523 return pci->dbi_base + reg;
524 }
525
526 static const struct rt_pci_ops dw_pcie_ops =
527 {
528 .map = dw_pcie_own_conf_map,
529 .read = rt_pci_bus_read_config_uxx,
530 .write = rt_pci_bus_write_config_uxx,
531 };
532
dw_pcie_setup_rc(struct dw_pcie_port * port)533 void dw_pcie_setup_rc(struct dw_pcie_port *port)
534 {
535 rt_uint32_t val, num_ctrls;
536 struct dw_pcie *pci = to_dw_pcie_from_port(port);
537
538 /*
539 * Enable DBI read-only registers for writing/updating configuration.
540 * Write permission gets disabled towards the end of this function.
541 */
542 dw_pcie_dbi_ro_writable_enable(pci, RT_TRUE);
543
544 dw_pcie_setup(pci);
545
546 if (!port->ops->msi_host_init)
547 {
548 num_ctrls = RT_DIV_ROUND_UP(port->irq_count, MAX_MSI_IRQS_PER_CTRL);
549
550 /* Initialize IRQ Status array */
551 for (int ctrl = 0; ctrl < num_ctrls; ++ctrl)
552 {
553 port->irq_mask[ctrl] = ~0;
554
555 dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK +
556 (ctrl * MSI_REG_CTRL_BLOCK_SIZE), port->irq_mask[ctrl]);
557 dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_ENABLE +
558 (ctrl * MSI_REG_CTRL_BLOCK_SIZE), ~0);
559 }
560 }
561
562 /* Setup RC BARs */
563 dw_pcie_writel_dbi(pci, PCIR_BAR(0), PCIM_BAR_MEM_TYPE_64);
564 dw_pcie_writel_dbi(pci, PCIR_BAR(1), PCIM_BAR_MEM_TYPE_32);
565
566 /* Setup interrupt pins */
567 val = dw_pcie_readl_dbi(pci, PCIR_INTLINE);
568 val &= 0xffff00ff;
569 val |= 0x00000100;
570 dw_pcie_writel_dbi(pci, PCIR_INTLINE, val);
571
572 /* Setup bus numbers */
573 val = dw_pcie_readl_dbi(pci, PCIR_PRIBUS_1);
574 val &= 0xff000000;
575 val |= 0x00ff0100;
576 dw_pcie_writel_dbi(pci, PCIR_PRIBUS_1, val);
577
578 /* Setup command register */
579 val = dw_pcie_readl_dbi(pci, PCIR_COMMAND);
580 val &= 0xffff0000;
581 val |= PCIM_CMD_PORTEN | PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN | PCIM_CMD_SERRESPEN;
582 dw_pcie_writel_dbi(pci, PCIR_COMMAND, val);
583
584 /*
585 * If the platform provides its own child bus config accesses, it means
586 * the platform uses its own address translation component rather than
587 * ATU, so we should not program the ATU here.
588 */
589 if (pci->port.bridge->child_ops == &dw_child_pcie_ops)
590 {
591 int atu_idx = 0;
592 struct rt_pci_host_bridge *bridge = port->bridge;
593
594 /* Get last memory resource entry */
595 for (int i = 0; i < bridge->bus_regions_nr; ++i)
596 {
597 struct rt_pci_bus_region *region = &bridge->bus_regions[i];
598
599 if (region->flags != PCI_BUS_REGION_F_MEM)
600 {
601 continue;
602 }
603
604 if (pci->num_viewport <= ++atu_idx)
605 {
606 break;
607 }
608
609 dw_pcie_prog_outbound_atu(pci, atu_idx,
610 PCIE_ATU_TYPE_MEM, region->cpu_addr,
611 region->phy_addr, region->size);
612 }
613
614 if (port->io_size)
615 {
616 if (pci->num_viewport > ++atu_idx)
617 {
618 dw_pcie_prog_outbound_atu(pci, atu_idx,
619 PCIE_ATU_TYPE_IO, port->io_addr,
620 port->io_bus_addr, port->io_size);
621 }
622 else
623 {
624 pci->iatu_unroll_enabled |= DWC_IATU_IOCFG_SHARED;
625 }
626 }
627
628 if (pci->num_viewport <= atu_idx)
629 {
630 LOG_W("Resources exceed number of ATU entries (%d)", pci->num_viewport);
631 }
632 }
633
634 dw_pcie_writel_dbi(pci, PCIR_BAR(0), 0);
635
636 /* Program correct class for RC */
637 dw_pcie_writew_dbi(pci, PCIR_SUBCLASS, PCIS_BRIDGE_PCI);
638
639 val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
640 val |= PORT_LOGIC_SPEED_CHANGE;
641 dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
642
643 dw_pcie_dbi_ro_writable_enable(pci, RT_FALSE);
644 }
645