1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * (C) Copyright 2010
4 * Vipin Kumar, STMicroelectronics, vipin.kumar@st.com.
5 */
6
7 /*
8 * Designware ethernet IP driver for U-Boot
9 */
10
11 #include <common.h>
12 #include <clk.h>
13 #include <cpu_func.h>
14 #include <dm.h>
15 #include <errno.h>
16 #include <log.h>
17 #include <miiphy.h>
18 #include <malloc.h>
19 #include <net.h>
20 #include <pci.h>
21 #include <reset.h>
22 #include <asm/cache.h>
23 #include <dm/device_compat.h>
24 #include <dm/device-internal.h>
25 #include <dm/devres.h>
26 #include <dm/lists.h>
27 #include <linux/compiler.h>
28 #include <linux/delay.h>
29 #include <linux/err.h>
30 #include <linux/kernel.h>
31 #include <asm/io.h>
32 #include <power/regulator.h>
33 #include "designware.h"
34
dw_mdio_read(struct mii_dev * bus,int addr,int devad,int reg)35 static int dw_mdio_read(struct mii_dev *bus, int addr, int devad, int reg)
36 {
37 struct dw_eth_dev *priv = dev_get_priv((struct udevice *)bus->priv);
38 struct eth_mac_regs *mac_p = priv->mac_regs_p;
39 ulong start;
40 u16 miiaddr;
41 int timeout = CFG_MDIO_TIMEOUT;
42
43 miiaddr = ((addr << MIIADDRSHIFT) & MII_ADDRMSK) |
44 ((reg << MIIREGSHIFT) & MII_REGMSK);
45
46 writel(miiaddr | MII_CLKRANGE_150_250M | MII_BUSY, &mac_p->miiaddr);
47
48 start = get_timer(0);
49 while (get_timer(start) < timeout) {
50 if (!(readl(&mac_p->miiaddr) & MII_BUSY))
51 return readl(&mac_p->miidata);
52 udelay(10);
53 };
54
55 return -ETIMEDOUT;
56 }
57
dw_mdio_write(struct mii_dev * bus,int addr,int devad,int reg,u16 val)58 static int dw_mdio_write(struct mii_dev *bus, int addr, int devad, int reg,
59 u16 val)
60 {
61 struct dw_eth_dev *priv = dev_get_priv((struct udevice *)bus->priv);
62 struct eth_mac_regs *mac_p = priv->mac_regs_p;
63 ulong start;
64 u16 miiaddr;
65 int ret = -ETIMEDOUT, timeout = CFG_MDIO_TIMEOUT;
66
67 writel(val, &mac_p->miidata);
68 miiaddr = ((addr << MIIADDRSHIFT) & MII_ADDRMSK) |
69 ((reg << MIIREGSHIFT) & MII_REGMSK) | MII_WRITE;
70
71 writel(miiaddr | MII_CLKRANGE_150_250M | MII_BUSY, &mac_p->miiaddr);
72
73 start = get_timer(0);
74 while (get_timer(start) < timeout) {
75 if (!(readl(&mac_p->miiaddr) & MII_BUSY)) {
76 ret = 0;
77 break;
78 }
79 udelay(10);
80 };
81
82 return ret;
83 }
84
85 #if CONFIG_IS_ENABLED(DM_GPIO)
__dw_mdio_reset(struct udevice * dev)86 static int __dw_mdio_reset(struct udevice *dev)
87 {
88 struct dw_eth_dev *priv = dev_get_priv(dev);
89 struct dw_eth_pdata *pdata = dev_get_plat(dev);
90 int ret;
91
92 if (!dm_gpio_is_valid(&priv->reset_gpio))
93 return 0;
94
95 /* reset the phy */
96 ret = dm_gpio_set_value(&priv->reset_gpio, 0);
97 if (ret)
98 return ret;
99
100 udelay(pdata->reset_delays[0]);
101
102 ret = dm_gpio_set_value(&priv->reset_gpio, 1);
103 if (ret)
104 return ret;
105
106 udelay(pdata->reset_delays[1]);
107
108 ret = dm_gpio_set_value(&priv->reset_gpio, 0);
109 if (ret)
110 return ret;
111
112 udelay(pdata->reset_delays[2]);
113
114 return 0;
115 }
116
dw_mdio_reset(struct mii_dev * bus)117 static int dw_mdio_reset(struct mii_dev *bus)
118 {
119 struct udevice *dev = bus->priv;
120
121 return __dw_mdio_reset(dev);
122 }
123 #endif
124
125 #if IS_ENABLED(CONFIG_DM_MDIO)
designware_eth_mdio_read(struct udevice * mdio_dev,int addr,int devad,int reg)126 int designware_eth_mdio_read(struct udevice *mdio_dev, int addr, int devad, int reg)
127 {
128 struct mdio_perdev_priv *pdata = dev_get_uclass_priv(mdio_dev);
129
130 return dw_mdio_read(pdata->mii_bus, addr, devad, reg);
131 }
132
designware_eth_mdio_write(struct udevice * mdio_dev,int addr,int devad,int reg,u16 val)133 int designware_eth_mdio_write(struct udevice *mdio_dev, int addr, int devad, int reg, u16 val)
134 {
135 struct mdio_perdev_priv *pdata = dev_get_uclass_priv(mdio_dev);
136
137 return dw_mdio_write(pdata->mii_bus, addr, devad, reg, val);
138 }
139
140 #if CONFIG_IS_ENABLED(DM_GPIO)
designware_eth_mdio_reset(struct udevice * mdio_dev)141 int designware_eth_mdio_reset(struct udevice *mdio_dev)
142 {
143 struct mdio_perdev_priv *mdio_pdata = dev_get_uclass_priv(mdio_dev);
144 struct udevice *dev = mdio_pdata->mii_bus->priv;
145
146 return __dw_mdio_reset(dev->parent);
147 }
148 #endif
149
150 static const struct mdio_ops designware_eth_mdio_ops = {
151 .read = designware_eth_mdio_read,
152 .write = designware_eth_mdio_write,
153 #if CONFIG_IS_ENABLED(DM_GPIO)
154 .reset = designware_eth_mdio_reset,
155 #endif
156 };
157
designware_eth_mdio_probe(struct udevice * dev)158 static int designware_eth_mdio_probe(struct udevice *dev)
159 {
160 /* Use the priv data of parent */
161 dev_set_priv(dev, dev_get_priv(dev->parent));
162
163 return 0;
164 }
165
166 U_BOOT_DRIVER(designware_eth_mdio) = {
167 .name = "eth_designware_mdio",
168 .id = UCLASS_MDIO,
169 .probe = designware_eth_mdio_probe,
170 .ops = &designware_eth_mdio_ops,
171 .plat_auto = sizeof(struct mdio_perdev_priv),
172 };
173 #endif
174
dw_mdio_init(const char * name,void * priv)175 static int dw_mdio_init(const char *name, void *priv)
176 {
177 struct mii_dev *bus = mdio_alloc();
178
179 if (!bus) {
180 printf("Failed to allocate MDIO bus\n");
181 return -ENOMEM;
182 }
183
184 bus->read = dw_mdio_read;
185 bus->write = dw_mdio_write;
186 snprintf(bus->name, sizeof(bus->name), "%s", name);
187 #if CONFIG_IS_ENABLED(DM_GPIO)
188 bus->reset = dw_mdio_reset;
189 #endif
190
191 bus->priv = priv;
192
193 return mdio_register(bus);
194 }
195
196 #if IS_ENABLED(CONFIG_DM_MDIO)
dw_dm_mdio_init(const char * name,void * priv)197 static int dw_dm_mdio_init(const char *name, void *priv)
198 {
199 struct udevice *dev = priv;
200 ofnode node;
201 int ret;
202
203 ofnode_for_each_subnode(node, dev_ofnode(dev)) {
204 const char *subnode_name = ofnode_get_name(node);
205 struct udevice *mdiodev;
206
207 if (strcmp(subnode_name, "mdio"))
208 continue;
209
210 ret = device_bind_driver_to_node(dev, "eth_designware_mdio",
211 subnode_name, node, &mdiodev);
212 if (ret)
213 debug("%s: not able to bind mdio device node\n", __func__);
214
215 return 0;
216 }
217
218 printf("%s: mdio node is missing, registering legacy mdio bus", __func__);
219
220 return dw_mdio_init(name, priv);
221 }
222 #endif
223
tx_descs_init(struct dw_eth_dev * priv)224 static void tx_descs_init(struct dw_eth_dev *priv)
225 {
226 struct eth_dma_regs *dma_p = priv->dma_regs_p;
227 struct dmamacdescr *desc_table_p = &priv->tx_mac_descrtable[0];
228 char *txbuffs = &priv->txbuffs[0];
229 struct dmamacdescr *desc_p;
230 u32 idx;
231
232 for (idx = 0; idx < CFG_TX_DESCR_NUM; idx++) {
233 desc_p = &desc_table_p[idx];
234 desc_p->dmamac_addr = (ulong)&txbuffs[idx * CFG_ETH_BUFSIZE];
235 desc_p->dmamac_next = (ulong)&desc_table_p[idx + 1];
236
237 #if defined(CONFIG_DW_ALTDESCRIPTOR)
238 desc_p->txrx_status &= ~(DESC_TXSTS_TXINT | DESC_TXSTS_TXLAST |
239 DESC_TXSTS_TXFIRST | DESC_TXSTS_TXCRCDIS |
240 DESC_TXSTS_TXCHECKINSCTRL |
241 DESC_TXSTS_TXRINGEND | DESC_TXSTS_TXPADDIS);
242
243 desc_p->txrx_status |= DESC_TXSTS_TXCHAIN;
244 desc_p->dmamac_cntl = 0;
245 desc_p->txrx_status &= ~(DESC_TXSTS_MSK | DESC_TXSTS_OWNBYDMA);
246 #else
247 desc_p->dmamac_cntl = DESC_TXCTRL_TXCHAIN;
248 desc_p->txrx_status = 0;
249 #endif
250 }
251
252 /* Correcting the last pointer of the chain */
253 desc_p->dmamac_next = (ulong)&desc_table_p[0];
254
255 /* Flush all Tx buffer descriptors at once */
256 flush_dcache_range((ulong)priv->tx_mac_descrtable,
257 (ulong)priv->tx_mac_descrtable +
258 sizeof(priv->tx_mac_descrtable));
259
260 writel((ulong)&desc_table_p[0], &dma_p->txdesclistaddr);
261 priv->tx_currdescnum = 0;
262 }
263
rx_descs_init(struct dw_eth_dev * priv)264 static void rx_descs_init(struct dw_eth_dev *priv)
265 {
266 struct eth_dma_regs *dma_p = priv->dma_regs_p;
267 struct dmamacdescr *desc_table_p = &priv->rx_mac_descrtable[0];
268 char *rxbuffs = &priv->rxbuffs[0];
269 struct dmamacdescr *desc_p;
270 u32 idx;
271
272 /* Before passing buffers to GMAC we need to make sure zeros
273 * written there right after "priv" structure allocation were
274 * flushed into RAM.
275 * Otherwise there's a chance to get some of them flushed in RAM when
276 * GMAC is already pushing data to RAM via DMA. This way incoming from
277 * GMAC data will be corrupted. */
278 flush_dcache_range((ulong)rxbuffs, (ulong)rxbuffs + RX_TOTAL_BUFSIZE);
279
280 for (idx = 0; idx < CFG_RX_DESCR_NUM; idx++) {
281 desc_p = &desc_table_p[idx];
282 desc_p->dmamac_addr = (ulong)&rxbuffs[idx * CFG_ETH_BUFSIZE];
283 desc_p->dmamac_next = (ulong)&desc_table_p[idx + 1];
284
285 desc_p->dmamac_cntl =
286 (MAC_MAX_FRAME_SZ & DESC_RXCTRL_SIZE1MASK) |
287 DESC_RXCTRL_RXCHAIN;
288
289 desc_p->txrx_status = DESC_RXSTS_OWNBYDMA;
290 }
291
292 /* Correcting the last pointer of the chain */
293 desc_p->dmamac_next = (ulong)&desc_table_p[0];
294
295 /* Flush all Rx buffer descriptors at once */
296 flush_dcache_range((ulong)priv->rx_mac_descrtable,
297 (ulong)priv->rx_mac_descrtable +
298 sizeof(priv->rx_mac_descrtable));
299
300 writel((ulong)&desc_table_p[0], &dma_p->rxdesclistaddr);
301 priv->rx_currdescnum = 0;
302 }
303
_dw_write_hwaddr(struct dw_eth_dev * priv,u8 * mac_id)304 static int _dw_write_hwaddr(struct dw_eth_dev *priv, u8 *mac_id)
305 {
306 struct eth_mac_regs *mac_p = priv->mac_regs_p;
307 u32 macid_lo, macid_hi;
308
309 macid_lo = mac_id[0] + (mac_id[1] << 8) + (mac_id[2] << 16) +
310 (mac_id[3] << 24);
311 macid_hi = mac_id[4] + (mac_id[5] << 8);
312
313 writel(macid_hi, &mac_p->macaddr0hi);
314 writel(macid_lo, &mac_p->macaddr0lo);
315
316 return 0;
317 }
318
dw_adjust_link(struct dw_eth_dev * priv,struct eth_mac_regs * mac_p,struct phy_device * phydev)319 static int dw_adjust_link(struct dw_eth_dev *priv, struct eth_mac_regs *mac_p,
320 struct phy_device *phydev)
321 {
322 u32 conf = readl(&mac_p->conf) | FRAMEBURSTENABLE | DISABLERXOWN;
323
324 if (!phydev->link) {
325 printf("%s: No link.\n", phydev->dev->name);
326 return 0;
327 }
328
329 if (phydev->speed != 1000)
330 conf |= MII_PORTSELECT;
331 else
332 conf &= ~MII_PORTSELECT;
333
334 if (phydev->speed == 100)
335 conf |= FES_100;
336
337 if (phydev->duplex)
338 conf |= FULLDPLXMODE;
339
340 writel(conf, &mac_p->conf);
341
342 printf("Speed: %d, %s duplex%s\n", phydev->speed,
343 (phydev->duplex) ? "full" : "half",
344 (phydev->port == PORT_FIBRE) ? ", fiber mode" : "");
345
346 return 0;
347 }
348
_dw_eth_halt(struct dw_eth_dev * priv)349 static void _dw_eth_halt(struct dw_eth_dev *priv)
350 {
351 struct eth_mac_regs *mac_p = priv->mac_regs_p;
352 struct eth_dma_regs *dma_p = priv->dma_regs_p;
353
354 writel(readl(&mac_p->conf) & ~(RXENABLE | TXENABLE), &mac_p->conf);
355 writel(readl(&dma_p->opmode) & ~(RXSTART | TXSTART), &dma_p->opmode);
356
357 phy_shutdown(priv->phydev);
358 }
359
designware_eth_init(struct dw_eth_dev * priv,u8 * enetaddr)360 int designware_eth_init(struct dw_eth_dev *priv, u8 *enetaddr)
361 {
362 struct eth_mac_regs *mac_p = priv->mac_regs_p;
363 struct eth_dma_regs *dma_p = priv->dma_regs_p;
364 unsigned int start;
365 int ret;
366
367 writel(readl(&dma_p->busmode) | DMAMAC_SRST, &dma_p->busmode);
368
369 /*
370 * When a MII PHY is used, we must set the PS bit for the DMA
371 * reset to succeed.
372 */
373 if (priv->phydev->interface == PHY_INTERFACE_MODE_MII)
374 writel(readl(&mac_p->conf) | MII_PORTSELECT, &mac_p->conf);
375 else
376 writel(readl(&mac_p->conf) & ~MII_PORTSELECT, &mac_p->conf);
377
378 start = get_timer(0);
379 while (readl(&dma_p->busmode) & DMAMAC_SRST) {
380 if (get_timer(start) >= CFG_MACRESET_TIMEOUT) {
381 printf("DMA reset timeout\n");
382 return -ETIMEDOUT;
383 }
384
385 mdelay(100);
386 };
387
388 /*
389 * Soft reset above clears HW address registers.
390 * So we have to set it here once again.
391 */
392 _dw_write_hwaddr(priv, enetaddr);
393
394 rx_descs_init(priv);
395 tx_descs_init(priv);
396
397 writel(FIXEDBURST | PRIORXTX_41 | DMA_PBL, &dma_p->busmode);
398
399 #ifndef CONFIG_DW_MAC_FORCE_THRESHOLD_MODE
400 writel(readl(&dma_p->opmode) | FLUSHTXFIFO | STOREFORWARD,
401 &dma_p->opmode);
402 #else
403 writel(readl(&dma_p->opmode) | FLUSHTXFIFO,
404 &dma_p->opmode);
405 #endif
406
407 writel(readl(&dma_p->opmode) | RXSTART | TXSTART, &dma_p->opmode);
408
409 #ifdef CONFIG_DW_AXI_BURST_LEN
410 writel((CONFIG_DW_AXI_BURST_LEN & 0x1FF >> 1), &dma_p->axibus);
411 #endif
412
413 /* Start up the PHY */
414 ret = phy_startup(priv->phydev);
415 if (ret) {
416 printf("Could not initialize PHY %s\n",
417 priv->phydev->dev->name);
418 return ret;
419 }
420
421 ret = dw_adjust_link(priv, mac_p, priv->phydev);
422 if (ret)
423 return ret;
424
425 return 0;
426 }
427
designware_eth_enable(struct dw_eth_dev * priv)428 int designware_eth_enable(struct dw_eth_dev *priv)
429 {
430 struct eth_mac_regs *mac_p = priv->mac_regs_p;
431
432 if (!priv->phydev->link)
433 return -EIO;
434
435 writel(readl(&mac_p->conf) | RXENABLE | TXENABLE, &mac_p->conf);
436
437 return 0;
438 }
439
440 #define ETH_ZLEN 60
441
_dw_eth_send(struct dw_eth_dev * priv,void * packet,int length)442 static int _dw_eth_send(struct dw_eth_dev *priv, void *packet, int length)
443 {
444 struct eth_dma_regs *dma_p = priv->dma_regs_p;
445 u32 desc_num = priv->tx_currdescnum;
446 struct dmamacdescr *desc_p = &priv->tx_mac_descrtable[desc_num];
447 ulong desc_start = (ulong)desc_p;
448 ulong desc_end = desc_start +
449 roundup(sizeof(*desc_p), ARCH_DMA_MINALIGN);
450 ulong data_start = desc_p->dmamac_addr;
451 ulong data_end = data_start + roundup(length, ARCH_DMA_MINALIGN);
452 /*
453 * Strictly we only need to invalidate the "txrx_status" field
454 * for the following check, but on some platforms we cannot
455 * invalidate only 4 bytes, so we flush the entire descriptor,
456 * which is 16 bytes in total. This is safe because the
457 * individual descriptors in the array are each aligned to
458 * ARCH_DMA_MINALIGN and padded appropriately.
459 */
460 invalidate_dcache_range(desc_start, desc_end);
461
462 /* Check if the descriptor is owned by CPU */
463 if (desc_p->txrx_status & DESC_TXSTS_OWNBYDMA) {
464 printf("CPU not owner of tx frame\n");
465 return -EPERM;
466 }
467
468 memcpy((void *)data_start, packet, length);
469 if (length < ETH_ZLEN) {
470 memset(&((char *)data_start)[length], 0, ETH_ZLEN - length);
471 length = ETH_ZLEN;
472 }
473
474 /* Flush data to be sent */
475 flush_dcache_range(data_start, data_end);
476
477 #if defined(CONFIG_DW_ALTDESCRIPTOR)
478 desc_p->txrx_status |= DESC_TXSTS_TXFIRST | DESC_TXSTS_TXLAST;
479 desc_p->dmamac_cntl = (desc_p->dmamac_cntl & ~DESC_TXCTRL_SIZE1MASK) |
480 ((length << DESC_TXCTRL_SIZE1SHFT) &
481 DESC_TXCTRL_SIZE1MASK);
482
483 desc_p->txrx_status &= ~(DESC_TXSTS_MSK);
484 desc_p->txrx_status |= DESC_TXSTS_OWNBYDMA;
485 #else
486 desc_p->dmamac_cntl = (desc_p->dmamac_cntl & ~DESC_TXCTRL_SIZE1MASK) |
487 ((length << DESC_TXCTRL_SIZE1SHFT) &
488 DESC_TXCTRL_SIZE1MASK) | DESC_TXCTRL_TXLAST |
489 DESC_TXCTRL_TXFIRST;
490
491 desc_p->txrx_status = DESC_TXSTS_OWNBYDMA;
492 #endif
493
494 /* Flush modified buffer descriptor */
495 flush_dcache_range(desc_start, desc_end);
496
497 /* Test the wrap-around condition. */
498 if (++desc_num >= CFG_TX_DESCR_NUM)
499 desc_num = 0;
500
501 priv->tx_currdescnum = desc_num;
502
503 /* Start the transmission */
504 writel(POLL_DATA, &dma_p->txpolldemand);
505
506 return 0;
507 }
508
_dw_eth_recv(struct dw_eth_dev * priv,uchar ** packetp)509 static int _dw_eth_recv(struct dw_eth_dev *priv, uchar **packetp)
510 {
511 u32 status, desc_num = priv->rx_currdescnum;
512 struct dmamacdescr *desc_p = &priv->rx_mac_descrtable[desc_num];
513 int length = -EAGAIN;
514 ulong desc_start = (ulong)desc_p;
515 ulong desc_end = desc_start +
516 roundup(sizeof(*desc_p), ARCH_DMA_MINALIGN);
517 ulong data_start = desc_p->dmamac_addr;
518 ulong data_end;
519
520 /* Invalidate entire buffer descriptor */
521 invalidate_dcache_range(desc_start, desc_end);
522
523 status = desc_p->txrx_status;
524
525 /* Check if the owner is the CPU */
526 if (!(status & DESC_RXSTS_OWNBYDMA)) {
527
528 length = (status & DESC_RXSTS_FRMLENMSK) >>
529 DESC_RXSTS_FRMLENSHFT;
530
531 /* Invalidate received data */
532 data_end = data_start + roundup(length, ARCH_DMA_MINALIGN);
533 invalidate_dcache_range(data_start, data_end);
534 *packetp = (uchar *)(ulong)desc_p->dmamac_addr;
535 }
536
537 return length;
538 }
539
_dw_free_pkt(struct dw_eth_dev * priv)540 static int _dw_free_pkt(struct dw_eth_dev *priv)
541 {
542 u32 desc_num = priv->rx_currdescnum;
543 struct dmamacdescr *desc_p = &priv->rx_mac_descrtable[desc_num];
544 ulong desc_start = (ulong)desc_p;
545 ulong desc_end = desc_start +
546 roundup(sizeof(*desc_p), ARCH_DMA_MINALIGN);
547
548 /*
549 * Make the current descriptor valid again and go to
550 * the next one
551 */
552 desc_p->txrx_status |= DESC_RXSTS_OWNBYDMA;
553
554 /* Flush only status field - others weren't changed */
555 flush_dcache_range(desc_start, desc_end);
556
557 /* Test the wrap-around condition. */
558 if (++desc_num >= CFG_RX_DESCR_NUM)
559 desc_num = 0;
560 priv->rx_currdescnum = desc_num;
561
562 return 0;
563 }
564
dw_phy_init(struct dw_eth_dev * priv,void * dev)565 static int dw_phy_init(struct dw_eth_dev *priv, void *dev)
566 {
567 struct phy_device *phydev;
568 int ret;
569
570 #if IS_ENABLED(CONFIG_DM_MDIO)
571 phydev = dm_eth_phy_connect(dev);
572 if (!phydev)
573 return -ENODEV;
574 #else
575 int phy_addr = -1;
576
577 #ifdef CONFIG_PHY_ADDR
578 phy_addr = CONFIG_PHY_ADDR;
579 #endif
580
581 phydev = phy_connect(priv->bus, phy_addr, dev, priv->interface);
582 if (!phydev)
583 return -ENODEV;
584 #endif
585
586 phydev->supported &= PHY_GBIT_FEATURES;
587 if (priv->max_speed) {
588 ret = phy_set_supported(phydev, priv->max_speed);
589 if (ret)
590 return ret;
591 }
592 phydev->advertising = phydev->supported;
593
594 priv->phydev = phydev;
595 phy_config(phydev);
596
597 return 0;
598 }
599
designware_eth_start(struct udevice * dev)600 static int designware_eth_start(struct udevice *dev)
601 {
602 struct eth_pdata *pdata = dev_get_plat(dev);
603 struct dw_eth_dev *priv = dev_get_priv(dev);
604 int ret;
605
606 ret = designware_eth_init(priv, pdata->enetaddr);
607 if (ret)
608 return ret;
609 ret = designware_eth_enable(priv);
610 if (ret)
611 return ret;
612
613 return 0;
614 }
615
designware_eth_send(struct udevice * dev,void * packet,int length)616 int designware_eth_send(struct udevice *dev, void *packet, int length)
617 {
618 struct dw_eth_dev *priv = dev_get_priv(dev);
619
620 return _dw_eth_send(priv, packet, length);
621 }
622
designware_eth_recv(struct udevice * dev,int flags,uchar ** packetp)623 int designware_eth_recv(struct udevice *dev, int flags, uchar **packetp)
624 {
625 struct dw_eth_dev *priv = dev_get_priv(dev);
626
627 return _dw_eth_recv(priv, packetp);
628 }
629
designware_eth_free_pkt(struct udevice * dev,uchar * packet,int length)630 int designware_eth_free_pkt(struct udevice *dev, uchar *packet, int length)
631 {
632 struct dw_eth_dev *priv = dev_get_priv(dev);
633
634 return _dw_free_pkt(priv);
635 }
636
designware_eth_stop(struct udevice * dev)637 void designware_eth_stop(struct udevice *dev)
638 {
639 struct dw_eth_dev *priv = dev_get_priv(dev);
640
641 return _dw_eth_halt(priv);
642 }
643
designware_eth_write_hwaddr(struct udevice * dev)644 int designware_eth_write_hwaddr(struct udevice *dev)
645 {
646 struct eth_pdata *pdata = dev_get_plat(dev);
647 struct dw_eth_dev *priv = dev_get_priv(dev);
648
649 return _dw_write_hwaddr(priv, pdata->enetaddr);
650 }
651
designware_eth_bind(struct udevice * dev)652 static int designware_eth_bind(struct udevice *dev)
653 {
654 if (IS_ENABLED(CONFIG_PCI)) {
655 static int num_cards;
656 char name[20];
657
658 /* Create a unique device name for PCI type devices */
659 if (device_is_on_pci_bus(dev)) {
660 sprintf(name, "eth_designware#%u", num_cards++);
661 device_set_name(dev, name);
662 }
663 }
664
665 return 0;
666 }
667
designware_eth_probe(struct udevice * dev)668 int designware_eth_probe(struct udevice *dev)
669 {
670 struct eth_pdata *pdata = dev_get_plat(dev);
671 struct dw_eth_dev *priv = dev_get_priv(dev);
672 u32 iobase = pdata->iobase;
673 ulong ioaddr;
674 int ret, err;
675 struct reset_ctl_bulk reset_bulk;
676 #ifdef CONFIG_CLK
677 int i, clock_nb;
678
679 priv->clock_count = 0;
680 clock_nb = dev_count_phandle_with_args(dev, "clocks", "#clock-cells",
681 0);
682 if (clock_nb > 0) {
683 priv->clocks = devm_kcalloc(dev, clock_nb, sizeof(struct clk),
684 GFP_KERNEL);
685 if (!priv->clocks)
686 return -ENOMEM;
687
688 for (i = 0; i < clock_nb; i++) {
689 err = clk_get_by_index(dev, i, &priv->clocks[i]);
690 if (err < 0)
691 break;
692
693 err = clk_enable(&priv->clocks[i]);
694 if (err && err != -ENOSYS && err != -ENOTSUPP) {
695 pr_err("failed to enable clock %d\n", i);
696 clk_free(&priv->clocks[i]);
697 goto clk_err;
698 }
699 priv->clock_count++;
700 }
701 } else if (clock_nb != -ENOENT) {
702 pr_err("failed to get clock phandle(%d)\n", clock_nb);
703 return clock_nb;
704 }
705 #endif
706
707 #if defined(CONFIG_DM_REGULATOR)
708 struct udevice *phy_supply;
709
710 ret = device_get_supply_regulator(dev, "phy-supply",
711 &phy_supply);
712 if (ret) {
713 debug("%s: No phy supply\n", dev->name);
714 } else {
715 ret = regulator_set_enable(phy_supply, true);
716 if (ret) {
717 puts("Error enabling phy supply\n");
718 return ret;
719 }
720 }
721 #endif
722
723 ret = reset_get_bulk(dev, &reset_bulk);
724 if (ret)
725 dev_warn(dev, "Can't get reset: %d\n", ret);
726 else
727 reset_deassert_bulk(&reset_bulk);
728
729 /*
730 * If we are on PCI bus, either directly attached to a PCI root port,
731 * or via a PCI bridge, fill in plat before we probe the hardware.
732 */
733 if (IS_ENABLED(CONFIG_PCI) && device_is_on_pci_bus(dev)) {
734 dm_pci_read_config32(dev, PCI_BASE_ADDRESS_0, &iobase);
735 iobase &= PCI_BASE_ADDRESS_MEM_MASK;
736 iobase = dm_pci_mem_to_phys(dev, iobase);
737
738 pdata->iobase = iobase;
739 pdata->phy_interface = PHY_INTERFACE_MODE_RMII;
740 }
741
742 debug("%s, iobase=%x, priv=%p\n", __func__, iobase, priv);
743 ioaddr = iobase;
744 priv->mac_regs_p = (struct eth_mac_regs *)ioaddr;
745 priv->dma_regs_p = (struct eth_dma_regs *)(ioaddr + DW_DMA_BASE_OFFSET);
746 priv->interface = pdata->phy_interface;
747 priv->max_speed = pdata->max_speed;
748
749 #if IS_ENABLED(CONFIG_DM_MDIO)
750 ret = dw_dm_mdio_init(dev->name, dev);
751 #else
752 ret = dw_mdio_init(dev->name, dev);
753 #endif
754 if (ret) {
755 err = ret;
756 goto mdio_err;
757 }
758 priv->bus = miiphy_get_dev_by_name(dev->name);
759
760 ret = dw_phy_init(priv, dev);
761 debug("%s, ret=%d\n", __func__, ret);
762 if (!ret)
763 return 0;
764
765 /* continue here for cleanup if no PHY found */
766 err = ret;
767 mdio_unregister(priv->bus);
768 mdio_free(priv->bus);
769 mdio_err:
770
771 #ifdef CONFIG_CLK
772 clk_err:
773 ret = clk_release_all(priv->clocks, priv->clock_count);
774 if (ret)
775 pr_err("failed to disable all clocks\n");
776
777 #endif
778 return err;
779 }
780
designware_eth_remove(struct udevice * dev)781 static int designware_eth_remove(struct udevice *dev)
782 {
783 struct dw_eth_dev *priv = dev_get_priv(dev);
784
785 free(priv->phydev);
786 mdio_unregister(priv->bus);
787 mdio_free(priv->bus);
788
789 #ifdef CONFIG_CLK
790 return clk_release_all(priv->clocks, priv->clock_count);
791 #else
792 return 0;
793 #endif
794 }
795
796 const struct eth_ops designware_eth_ops = {
797 .start = designware_eth_start,
798 .send = designware_eth_send,
799 .recv = designware_eth_recv,
800 .free_pkt = designware_eth_free_pkt,
801 .stop = designware_eth_stop,
802 .write_hwaddr = designware_eth_write_hwaddr,
803 };
804
designware_eth_of_to_plat(struct udevice * dev)805 int designware_eth_of_to_plat(struct udevice *dev)
806 {
807 struct dw_eth_pdata *dw_pdata = dev_get_plat(dev);
808 #if CONFIG_IS_ENABLED(DM_GPIO)
809 struct dw_eth_dev *priv = dev_get_priv(dev);
810 #endif
811 struct eth_pdata *pdata = &dw_pdata->eth_pdata;
812 #if CONFIG_IS_ENABLED(DM_GPIO)
813 int reset_flags = GPIOD_IS_OUT;
814 #endif
815 int ret = 0;
816
817 pdata->iobase = dev_read_addr(dev);
818 pdata->phy_interface = dev_read_phy_mode(dev);
819 if (pdata->phy_interface == PHY_INTERFACE_MODE_NA)
820 return -EINVAL;
821
822 pdata->max_speed = dev_read_u32_default(dev, "max-speed", 0);
823
824 #if CONFIG_IS_ENABLED(DM_GPIO)
825 if (dev_read_bool(dev, "snps,reset-active-low"))
826 reset_flags |= GPIOD_ACTIVE_LOW;
827
828 ret = gpio_request_by_name(dev, "snps,reset-gpio", 0,
829 &priv->reset_gpio, reset_flags);
830 if (ret == 0) {
831 ret = dev_read_u32_array(dev, "snps,reset-delays-us",
832 dw_pdata->reset_delays, 3);
833 } else if (ret == -ENOENT) {
834 ret = 0;
835 }
836 #endif
837
838 return ret;
839 }
840
841 static const struct udevice_id designware_eth_ids[] = {
842 { .compatible = "allwinner,sun7i-a20-gmac" },
843 { .compatible = "amlogic,meson6-dwmac" },
844 { .compatible = "st,stm32-dwmac" },
845 { .compatible = "snps,arc-dwmac-3.70a" },
846 { }
847 };
848
849 U_BOOT_DRIVER(eth_designware) = {
850 .name = "eth_designware",
851 .id = UCLASS_ETH,
852 .of_match = designware_eth_ids,
853 .of_to_plat = designware_eth_of_to_plat,
854 .bind = designware_eth_bind,
855 .probe = designware_eth_probe,
856 .remove = designware_eth_remove,
857 .ops = &designware_eth_ops,
858 .priv_auto = sizeof(struct dw_eth_dev),
859 .plat_auto = sizeof(struct dw_eth_pdata),
860 .flags = DM_FLAG_ALLOC_PRIV_DMA,
861 };
862
863 static struct pci_device_id supported[] = {
864 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_QRK_EMAC) },
865 { }
866 };
867
868 U_BOOT_PCI_DEVICE(eth_designware, supported);
869