Lines Matching refs:eth
60 void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg) in mtk_w32() argument
62 __raw_writel(val, eth->base + reg); in mtk_w32()
65 u32 mtk_r32(struct mtk_eth *eth, unsigned reg) in mtk_r32() argument
67 return __raw_readl(eth->base + reg); in mtk_r32()
70 static u32 mtk_m32(struct mtk_eth *eth, u32 mask, u32 set, unsigned reg) in mtk_m32() argument
74 val = mtk_r32(eth, reg); in mtk_m32()
77 mtk_w32(eth, val, reg); in mtk_m32()
81 static int mtk_mdio_busy_wait(struct mtk_eth *eth) in mtk_mdio_busy_wait() argument
86 if (!(mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_ACCESS)) in mtk_mdio_busy_wait()
93 dev_err(eth->dev, "mdio: MDIO timeout\n"); in mtk_mdio_busy_wait()
97 static u32 _mtk_mdio_write(struct mtk_eth *eth, u32 phy_addr, in _mtk_mdio_write() argument
100 if (mtk_mdio_busy_wait(eth)) in _mtk_mdio_write()
105 mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START | PHY_IAC_WRITE | in _mtk_mdio_write()
110 if (mtk_mdio_busy_wait(eth)) in _mtk_mdio_write()
116 static u32 _mtk_mdio_read(struct mtk_eth *eth, int phy_addr, int phy_reg) in _mtk_mdio_read() argument
120 if (mtk_mdio_busy_wait(eth)) in _mtk_mdio_read()
123 mtk_w32(eth, PHY_IAC_ACCESS | PHY_IAC_START | PHY_IAC_READ | in _mtk_mdio_read()
128 if (mtk_mdio_busy_wait(eth)) in _mtk_mdio_read()
131 d = mtk_r32(eth, MTK_PHY_IAC) & 0xffff; in _mtk_mdio_read()
139 struct mtk_eth *eth = bus->priv; in mtk_mdio_write() local
141 return _mtk_mdio_write(eth, phy_addr, phy_reg, val); in mtk_mdio_write()
146 struct mtk_eth *eth = bus->priv; in mtk_mdio_read() local
148 return _mtk_mdio_read(eth, phy_addr, phy_reg); in mtk_mdio_read()
151 static int mt7621_gmac0_rgmii_adjust(struct mtk_eth *eth, in mt7621_gmac0_rgmii_adjust() argument
159 regmap_read(eth->ethsys, ETHSYS_SYSCFG, &val); in mt7621_gmac0_rgmii_adjust()
162 dev_err(eth->dev, in mt7621_gmac0_rgmii_adjust()
170 regmap_update_bits(eth->ethsys, ETHSYS_CLKCFG0, in mt7621_gmac0_rgmii_adjust()
176 static void mtk_gmac0_rgmii_adjust(struct mtk_eth *eth, in mtk_gmac0_rgmii_adjust() argument
183 mtk_w32(eth, TRGMII_MODE, INTF_MODE); in mtk_gmac0_rgmii_adjust()
185 ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], val); in mtk_gmac0_rgmii_adjust()
187 dev_err(eth->dev, "Failed to set trgmii pll: %d\n", ret); in mtk_gmac0_rgmii_adjust()
193 mtk_w32(eth, val, INTF_MODE); in mtk_gmac0_rgmii_adjust()
195 regmap_update_bits(eth->ethsys, ETHSYS_CLKCFG0, in mtk_gmac0_rgmii_adjust()
200 ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], val); in mtk_gmac0_rgmii_adjust()
202 dev_err(eth->dev, "Failed to set trgmii pll: %d\n", ret); in mtk_gmac0_rgmii_adjust()
206 mtk_w32(eth, val, TRGMII_RCK_CTRL); in mtk_gmac0_rgmii_adjust()
210 mtk_w32(eth, val, TRGMII_TCK_CTRL); in mtk_gmac0_rgmii_adjust()
218 struct mtk_eth *eth = mac->hw; in mtk_mac_config() local
223 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) && in mtk_mac_config()
241 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RGMII)) { in mtk_mac_config()
242 err = mtk_gmac_rgmii_path_setup(eth, mac->id); in mtk_mac_config()
250 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) { in mtk_mac_config()
251 err = mtk_gmac_sgmii_path_setup(eth, mac->id); in mtk_mac_config()
257 if (MTK_HAS_CAPS(eth->soc->caps, MTK_GEPHY)) { in mtk_mac_config()
258 err = mtk_gmac_gephy_path_setup(eth, mac->id); in mtk_mac_config()
313 regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val); in mtk_mac_config()
316 regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val); in mtk_mac_config()
327 regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val); in mtk_mac_config()
329 regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0, in mtk_mac_config()
334 sid = (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_SGMII)) ? in mtk_mac_config()
339 err = mtk_sgmii_setup_mode_force(eth->sgmii, sid, in mtk_mac_config()
342 err = mtk_sgmii_setup_mode_an(eth->sgmii, sid); in mtk_mac_config()
347 regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0, in mtk_mac_config()
350 dev_err(eth->dev, in mtk_mac_config()
368 dev_err(eth->dev, "%s: GMAC%d mode %s not supported!\n", __func__, in mtk_mac_config()
373 dev_err(eth->dev, "%s: GMAC%d mode %s err: %d!\n", __func__, in mtk_mac_config()
561 static int mtk_mdio_init(struct mtk_eth *eth) in mtk_mdio_init() argument
566 mii_np = of_get_child_by_name(eth->dev->of_node, "mdio-bus"); in mtk_mdio_init()
568 dev_err(eth->dev, "no %s child node found", "mdio-bus"); in mtk_mdio_init()
577 eth->mii_bus = devm_mdiobus_alloc(eth->dev); in mtk_mdio_init()
578 if (!eth->mii_bus) { in mtk_mdio_init()
583 eth->mii_bus->name = "mdio"; in mtk_mdio_init()
584 eth->mii_bus->read = mtk_mdio_read; in mtk_mdio_init()
585 eth->mii_bus->write = mtk_mdio_write; in mtk_mdio_init()
586 eth->mii_bus->priv = eth; in mtk_mdio_init()
587 eth->mii_bus->parent = eth->dev; in mtk_mdio_init()
589 snprintf(eth->mii_bus->id, MII_BUS_ID_SIZE, "%pOFn", mii_np); in mtk_mdio_init()
590 ret = of_mdiobus_register(eth->mii_bus, mii_np); in mtk_mdio_init()
597 static void mtk_mdio_cleanup(struct mtk_eth *eth) in mtk_mdio_cleanup() argument
599 if (!eth->mii_bus) in mtk_mdio_cleanup()
602 mdiobus_unregister(eth->mii_bus); in mtk_mdio_cleanup()
605 static inline void mtk_tx_irq_disable(struct mtk_eth *eth, u32 mask) in mtk_tx_irq_disable() argument
610 spin_lock_irqsave(ð->tx_irq_lock, flags); in mtk_tx_irq_disable()
611 val = mtk_r32(eth, eth->tx_int_mask_reg); in mtk_tx_irq_disable()
612 mtk_w32(eth, val & ~mask, eth->tx_int_mask_reg); in mtk_tx_irq_disable()
613 spin_unlock_irqrestore(ð->tx_irq_lock, flags); in mtk_tx_irq_disable()
616 static inline void mtk_tx_irq_enable(struct mtk_eth *eth, u32 mask) in mtk_tx_irq_enable() argument
621 spin_lock_irqsave(ð->tx_irq_lock, flags); in mtk_tx_irq_enable()
622 val = mtk_r32(eth, eth->tx_int_mask_reg); in mtk_tx_irq_enable()
623 mtk_w32(eth, val | mask, eth->tx_int_mask_reg); in mtk_tx_irq_enable()
624 spin_unlock_irqrestore(ð->tx_irq_lock, flags); in mtk_tx_irq_enable()
627 static inline void mtk_rx_irq_disable(struct mtk_eth *eth, u32 mask) in mtk_rx_irq_disable() argument
632 spin_lock_irqsave(ð->rx_irq_lock, flags); in mtk_rx_irq_disable()
633 val = mtk_r32(eth, MTK_PDMA_INT_MASK); in mtk_rx_irq_disable()
634 mtk_w32(eth, val & ~mask, MTK_PDMA_INT_MASK); in mtk_rx_irq_disable()
635 spin_unlock_irqrestore(ð->rx_irq_lock, flags); in mtk_rx_irq_disable()
638 static inline void mtk_rx_irq_enable(struct mtk_eth *eth, u32 mask) in mtk_rx_irq_enable() argument
643 spin_lock_irqsave(ð->rx_irq_lock, flags); in mtk_rx_irq_enable()
644 val = mtk_r32(eth, MTK_PDMA_INT_MASK); in mtk_rx_irq_enable()
645 mtk_w32(eth, val | mask, MTK_PDMA_INT_MASK); in mtk_rx_irq_enable()
646 spin_unlock_irqrestore(ð->rx_irq_lock, flags); in mtk_rx_irq_enable()
653 struct mtk_eth *eth = mac->hw; in mtk_set_mac_address() local
663 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) { in mtk_set_mac_address()
684 struct mtk_eth *eth = mac->hw; in mtk_stats_update_mac() local
688 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) { in mtk_stats_update_mac()
734 static void mtk_stats_update(struct mtk_eth *eth) in mtk_stats_update() argument
739 if (!eth->mac[i] || !eth->mac[i]->hw_stats) in mtk_stats_update()
741 if (spin_trylock(ð->mac[i]->hw_stats->stats_lock)) { in mtk_stats_update()
742 mtk_stats_update_mac(eth->mac[i]); in mtk_stats_update()
743 spin_unlock(ð->mac[i]->hw_stats->stats_lock); in mtk_stats_update()
817 static int mtk_init_fq_dma(struct mtk_eth *eth) in mtk_init_fq_dma() argument
824 eth->scratch_ring = dma_alloc_coherent(eth->dev, in mtk_init_fq_dma()
826 ð->phy_scratch_ring, in mtk_init_fq_dma()
828 if (unlikely(!eth->scratch_ring)) in mtk_init_fq_dma()
831 eth->scratch_head = kcalloc(cnt, MTK_QDMA_PAGE_SIZE, in mtk_init_fq_dma()
833 if (unlikely(!eth->scratch_head)) in mtk_init_fq_dma()
836 dma_addr = dma_map_single(eth->dev, in mtk_init_fq_dma()
837 eth->scratch_head, cnt * MTK_QDMA_PAGE_SIZE, in mtk_init_fq_dma()
839 if (unlikely(dma_mapping_error(eth->dev, dma_addr))) in mtk_init_fq_dma()
842 phy_ring_tail = eth->phy_scratch_ring + in mtk_init_fq_dma()
846 eth->scratch_ring[i].txd1 = in mtk_init_fq_dma()
849 eth->scratch_ring[i].txd2 = (eth->phy_scratch_ring + in mtk_init_fq_dma()
851 eth->scratch_ring[i].txd3 = TX_DMA_SDL(MTK_QDMA_PAGE_SIZE); in mtk_init_fq_dma()
854 mtk_w32(eth, eth->phy_scratch_ring, MTK_QDMA_FQ_HEAD); in mtk_init_fq_dma()
855 mtk_w32(eth, phy_ring_tail, MTK_QDMA_FQ_TAIL); in mtk_init_fq_dma()
856 mtk_w32(eth, (cnt << 16) | cnt, MTK_QDMA_FQ_CNT); in mtk_init_fq_dma()
857 mtk_w32(eth, MTK_QDMA_PAGE_SIZE << 16, MTK_QDMA_FQ_BLEN); in mtk_init_fq_dma()
888 static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf, in mtk_tx_unmap() argument
891 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { in mtk_tx_unmap()
893 dma_unmap_single(eth->dev, in mtk_tx_unmap()
898 dma_unmap_page(eth->dev, in mtk_tx_unmap()
905 dma_unmap_page(eth->dev, in mtk_tx_unmap()
912 dma_unmap_page(eth->dev, in mtk_tx_unmap()
930 static void setup_tx_buf(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf, in setup_tx_buf() argument
934 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { in setup_tx_buf()
957 struct mtk_eth *eth = mac->hw; in mtk_tx_map() local
990 mapped_addr = dma_map_single(eth->dev, skb->data, in mtk_tx_map()
992 if (unlikely(dma_mapping_error(eth->dev, mapped_addr))) in mtk_tx_map()
999 setup_tx_buf(eth, itx_buf, itxd_pdma, mapped_addr, skb_headlen(skb), in mtk_tx_map()
1017 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA) || in mtk_tx_map()
1031 mapped_addr = skb_frag_dma_map(eth->dev, frag, offset, in mtk_tx_map()
1034 if (unlikely(dma_mapping_error(eth->dev, mapped_addr))) in mtk_tx_map()
1055 setup_tx_buf(eth, tx_buf, txd_pdma, mapped_addr, in mtk_tx_map()
1069 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { in mtk_tx_map()
1087 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { in mtk_tx_map()
1090 mtk_w32(eth, txd->txd2, MTK_QTX_CTX_PTR); in mtk_tx_map()
1094 mtk_w32(eth, next_idx, MT7628_TX_CTX_IDX0); in mtk_tx_map()
1104 mtk_tx_unmap(eth, tx_buf, false); in mtk_tx_map()
1107 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) in mtk_tx_map()
1136 static int mtk_queue_stopped(struct mtk_eth *eth) in mtk_queue_stopped() argument
1141 if (!eth->netdev[i]) in mtk_queue_stopped()
1143 if (netif_queue_stopped(eth->netdev[i])) in mtk_queue_stopped()
1150 static void mtk_wake_queue(struct mtk_eth *eth) in mtk_wake_queue() argument
1155 if (!eth->netdev[i]) in mtk_wake_queue()
1157 netif_wake_queue(eth->netdev[i]); in mtk_wake_queue()
1164 struct mtk_eth *eth = mac->hw; in mtk_start_xmit() local
1165 struct mtk_tx_ring *ring = ð->tx_ring; in mtk_start_xmit()
1174 spin_lock(ð->page_lock); in mtk_start_xmit()
1176 if (unlikely(test_bit(MTK_RESETTING, ð->state))) in mtk_start_xmit()
1182 netif_err(eth, tx_queued, dev, in mtk_start_xmit()
1184 spin_unlock(ð->page_lock); in mtk_start_xmit()
1191 netif_warn(eth, tx_err, dev, in mtk_start_xmit()
1209 spin_unlock(ð->page_lock); in mtk_start_xmit()
1214 spin_unlock(ð->page_lock); in mtk_start_xmit()
1220 static struct mtk_rx_ring *mtk_get_rx_ring(struct mtk_eth *eth) in mtk_get_rx_ring() argument
1226 if (!eth->hwlro) in mtk_get_rx_ring()
1227 return ð->rx_ring[0]; in mtk_get_rx_ring()
1230 ring = ð->rx_ring[i]; in mtk_get_rx_ring()
1241 static void mtk_update_rx_cpu_idx(struct mtk_eth *eth) in mtk_update_rx_cpu_idx() argument
1246 if (!eth->hwlro) { in mtk_update_rx_cpu_idx()
1247 ring = ð->rx_ring[0]; in mtk_update_rx_cpu_idx()
1248 mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg); in mtk_update_rx_cpu_idx()
1251 ring = ð->rx_ring[i]; in mtk_update_rx_cpu_idx()
1254 mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg); in mtk_update_rx_cpu_idx()
1261 struct mtk_eth *eth) in mtk_poll_rx() argument
1278 ring = mtk_get_rx_ring(eth); in mtk_poll_rx()
1290 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) || in mtk_poll_rx()
1298 !eth->netdev[mac])) in mtk_poll_rx()
1301 netdev = eth->netdev[mac]; in mtk_poll_rx()
1303 if (unlikely(test_bit(MTK_RESETTING, ð->state))) in mtk_poll_rx()
1312 dma_addr = dma_map_single(eth->dev, in mtk_poll_rx()
1314 eth->ip_align, in mtk_poll_rx()
1317 if (unlikely(dma_mapping_error(eth->dev, dma_addr))) { in mtk_poll_rx()
1323 dma_unmap_single(eth->dev, trxd.rxd1, in mtk_poll_rx()
1338 if (trxd.rxd4 & eth->rx_dma_l4_valid) in mtk_poll_rx()
1363 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) in mtk_poll_rx()
1379 mtk_update_rx_cpu_idx(eth); in mtk_poll_rx()
1382 eth->rx_packets += done; in mtk_poll_rx()
1383 eth->rx_bytes += bytes; in mtk_poll_rx()
1384 dim_update_sample(eth->rx_events, eth->rx_packets, eth->rx_bytes, in mtk_poll_rx()
1386 net_dim(ð->rx_dim, dim_sample); in mtk_poll_rx()
1391 static int mtk_poll_tx_qdma(struct mtk_eth *eth, int budget, in mtk_poll_tx_qdma() argument
1394 struct mtk_tx_ring *ring = ð->tx_ring; in mtk_poll_tx_qdma()
1401 dma = mtk_r32(eth, MTK_QTX_DRX_PTR); in mtk_poll_tx_qdma()
1426 mtk_tx_unmap(eth, tx_buf, true); in mtk_poll_tx_qdma()
1435 mtk_w32(eth, cpu, MTK_QTX_CRX_PTR); in mtk_poll_tx_qdma()
1440 static int mtk_poll_tx_pdma(struct mtk_eth *eth, int budget, in mtk_poll_tx_pdma() argument
1443 struct mtk_tx_ring *ring = ð->tx_ring; in mtk_poll_tx_pdma()
1450 dma = mtk_r32(eth, MT7628_TX_DTX_IDX0); in mtk_poll_tx_pdma()
1464 mtk_tx_unmap(eth, tx_buf, true); in mtk_poll_tx_pdma()
1478 static int mtk_poll_tx(struct mtk_eth *eth, int budget) in mtk_poll_tx() argument
1480 struct mtk_tx_ring *ring = ð->tx_ring; in mtk_poll_tx()
1489 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) in mtk_poll_tx()
1490 budget = mtk_poll_tx_qdma(eth, budget, done, bytes); in mtk_poll_tx()
1492 budget = mtk_poll_tx_pdma(eth, budget, done, bytes); in mtk_poll_tx()
1495 if (!eth->netdev[i] || !done[i]) in mtk_poll_tx()
1497 netdev_completed_queue(eth->netdev[i], done[i], bytes[i]); in mtk_poll_tx()
1499 eth->tx_packets += done[i]; in mtk_poll_tx()
1500 eth->tx_bytes += bytes[i]; in mtk_poll_tx()
1503 dim_update_sample(eth->tx_events, eth->tx_packets, eth->tx_bytes, in mtk_poll_tx()
1505 net_dim(ð->tx_dim, dim_sample); in mtk_poll_tx()
1507 if (mtk_queue_stopped(eth) && in mtk_poll_tx()
1509 mtk_wake_queue(eth); in mtk_poll_tx()
1514 static void mtk_handle_status_irq(struct mtk_eth *eth) in mtk_handle_status_irq() argument
1516 u32 status2 = mtk_r32(eth, MTK_INT_STATUS2); in mtk_handle_status_irq()
1519 mtk_stats_update(eth); in mtk_handle_status_irq()
1520 mtk_w32(eth, (MTK_GDM1_AF | MTK_GDM2_AF), in mtk_handle_status_irq()
1527 struct mtk_eth *eth = container_of(napi, struct mtk_eth, tx_napi); in mtk_napi_tx() local
1530 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) in mtk_napi_tx()
1531 mtk_handle_status_irq(eth); in mtk_napi_tx()
1532 mtk_w32(eth, MTK_TX_DONE_INT, eth->tx_int_status_reg); in mtk_napi_tx()
1533 tx_done = mtk_poll_tx(eth, budget); in mtk_napi_tx()
1535 if (unlikely(netif_msg_intr(eth))) { in mtk_napi_tx()
1536 dev_info(eth->dev, in mtk_napi_tx()
1538 mtk_r32(eth, eth->tx_int_status_reg), in mtk_napi_tx()
1539 mtk_r32(eth, eth->tx_int_mask_reg)); in mtk_napi_tx()
1545 if (mtk_r32(eth, eth->tx_int_status_reg) & MTK_TX_DONE_INT) in mtk_napi_tx()
1549 mtk_tx_irq_enable(eth, MTK_TX_DONE_INT); in mtk_napi_tx()
1556 struct mtk_eth *eth = container_of(napi, struct mtk_eth, rx_napi); in mtk_napi_rx() local
1559 mtk_handle_status_irq(eth); in mtk_napi_rx()
1564 mtk_w32(eth, MTK_RX_DONE_INT, MTK_PDMA_INT_STATUS); in mtk_napi_rx()
1565 rx_done = mtk_poll_rx(napi, budget - rx_done_total, eth); in mtk_napi_rx()
1568 if (unlikely(netif_msg_intr(eth))) { in mtk_napi_rx()
1569 dev_info(eth->dev, in mtk_napi_rx()
1571 mtk_r32(eth, MTK_PDMA_INT_STATUS), in mtk_napi_rx()
1572 mtk_r32(eth, MTK_PDMA_INT_MASK)); in mtk_napi_rx()
1578 } while (mtk_r32(eth, MTK_PDMA_INT_STATUS) & MTK_RX_DONE_INT); in mtk_napi_rx()
1581 mtk_rx_irq_enable(eth, MTK_RX_DONE_INT); in mtk_napi_rx()
1586 static int mtk_tx_alloc(struct mtk_eth *eth) in mtk_tx_alloc() argument
1588 struct mtk_tx_ring *ring = ð->tx_ring; in mtk_tx_alloc()
1596 ring->dma = dma_alloc_coherent(eth->dev, MTK_DMA_SIZE * sz, in mtk_tx_alloc()
1613 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { in mtk_tx_alloc()
1614 ring->dma_pdma = dma_alloc_coherent(eth->dev, MTK_DMA_SIZE * sz, in mtk_tx_alloc()
1638 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { in mtk_tx_alloc()
1639 mtk_w32(eth, ring->phys, MTK_QTX_CTX_PTR); in mtk_tx_alloc()
1640 mtk_w32(eth, ring->phys, MTK_QTX_DTX_PTR); in mtk_tx_alloc()
1641 mtk_w32(eth, in mtk_tx_alloc()
1644 mtk_w32(eth, ring->last_free_ptr, MTK_QTX_DRX_PTR); in mtk_tx_alloc()
1645 mtk_w32(eth, (QDMA_RES_THRES << 8) | QDMA_RES_THRES, in mtk_tx_alloc()
1648 mtk_w32(eth, ring->phys_pdma, MT7628_TX_BASE_PTR0); in mtk_tx_alloc()
1649 mtk_w32(eth, MTK_DMA_SIZE, MT7628_TX_MAX_CNT0); in mtk_tx_alloc()
1650 mtk_w32(eth, 0, MT7628_TX_CTX_IDX0); in mtk_tx_alloc()
1651 mtk_w32(eth, MT7628_PST_DTX_IDX0, MTK_PDMA_RST_IDX); in mtk_tx_alloc()
1660 static void mtk_tx_clean(struct mtk_eth *eth) in mtk_tx_clean() argument
1662 struct mtk_tx_ring *ring = ð->tx_ring; in mtk_tx_clean()
1667 mtk_tx_unmap(eth, &ring->buf[i], false); in mtk_tx_clean()
1673 dma_free_coherent(eth->dev, in mtk_tx_clean()
1681 dma_free_coherent(eth->dev, in mtk_tx_clean()
1689 static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag) in mtk_rx_alloc() argument
1699 ring = ð->rx_ring_qdma; in mtk_rx_alloc()
1702 ring = ð->rx_ring[ring_no]; in mtk_rx_alloc()
1726 ring->dma = dma_alloc_coherent(eth->dev, in mtk_rx_alloc()
1733 dma_addr_t dma_addr = dma_map_single(eth->dev, in mtk_rx_alloc()
1734 ring->data[i] + NET_SKB_PAD + eth->ip_align, in mtk_rx_alloc()
1737 if (unlikely(dma_mapping_error(eth->dev, dma_addr))) in mtk_rx_alloc()
1741 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) in mtk_rx_alloc()
1755 mtk_w32(eth, ring->phys, MTK_PRX_BASE_PTR_CFG(ring_no) + offset); in mtk_rx_alloc()
1756 mtk_w32(eth, rx_dma_size, MTK_PRX_MAX_CNT_CFG(ring_no) + offset); in mtk_rx_alloc()
1757 mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg + offset); in mtk_rx_alloc()
1758 mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no), MTK_PDMA_RST_IDX + offset); in mtk_rx_alloc()
1763 static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring) in mtk_rx_clean() argument
1773 dma_unmap_single(eth->dev, in mtk_rx_clean()
1784 dma_free_coherent(eth->dev, in mtk_rx_clean()
1792 static int mtk_hwlro_rx_init(struct mtk_eth *eth) in mtk_hwlro_rx_init() argument
1816 mtk_w32(eth, ring_ctrl_dw1, MTK_LRO_CTRL_DW1_CFG(i)); in mtk_hwlro_rx_init()
1817 mtk_w32(eth, ring_ctrl_dw2, MTK_LRO_CTRL_DW2_CFG(i)); in mtk_hwlro_rx_init()
1818 mtk_w32(eth, ring_ctrl_dw3, MTK_LRO_CTRL_DW3_CFG(i)); in mtk_hwlro_rx_init()
1828 mtk_w32(eth, MTK_HW_LRO_BW_THRE, MTK_PDMA_LRO_CTRL_DW2); in mtk_hwlro_rx_init()
1831 mtk_w32(eth, MTK_HW_LRO_REPLACE_DELTA, MTK_PDMA_LRO_ALT_SCORE_DELTA); in mtk_hwlro_rx_init()
1834 mtk_w32(eth, (MTK_HW_LRO_TIMER_UNIT << 16) | MTK_HW_LRO_REFRESH_TIME, in mtk_hwlro_rx_init()
1846 mtk_w32(eth, lro_ctrl_dw3, MTK_PDMA_LRO_CTRL_DW3); in mtk_hwlro_rx_init()
1847 mtk_w32(eth, lro_ctrl_dw0, MTK_PDMA_LRO_CTRL_DW0); in mtk_hwlro_rx_init()
1852 static void mtk_hwlro_rx_uninit(struct mtk_eth *eth) in mtk_hwlro_rx_uninit() argument
1858 mtk_w32(eth, MTK_LRO_RING_RELINQUISH_REQ, MTK_PDMA_LRO_CTRL_DW0); in mtk_hwlro_rx_uninit()
1862 val = mtk_r32(eth, MTK_PDMA_LRO_CTRL_DW0); in mtk_hwlro_rx_uninit()
1872 mtk_w32(eth, 0, MTK_LRO_CTRL_DW2_CFG(i)); in mtk_hwlro_rx_uninit()
1875 mtk_w32(eth, 0, MTK_PDMA_LRO_CTRL_DW0); in mtk_hwlro_rx_uninit()
1878 static void mtk_hwlro_val_ipaddr(struct mtk_eth *eth, int idx, __be32 ip) in mtk_hwlro_val_ipaddr() argument
1882 reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx)); in mtk_hwlro_val_ipaddr()
1885 mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx)); in mtk_hwlro_val_ipaddr()
1887 mtk_w32(eth, ip, MTK_LRO_DIP_DW0_CFG(idx)); in mtk_hwlro_val_ipaddr()
1890 mtk_w32(eth, (reg_val | MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx)); in mtk_hwlro_val_ipaddr()
1893 static void mtk_hwlro_inval_ipaddr(struct mtk_eth *eth, int idx) in mtk_hwlro_inval_ipaddr() argument
1897 reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx)); in mtk_hwlro_inval_ipaddr()
1900 mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx)); in mtk_hwlro_inval_ipaddr()
1902 mtk_w32(eth, 0, MTK_LRO_DIP_DW0_CFG(idx)); in mtk_hwlro_inval_ipaddr()
1924 struct mtk_eth *eth = mac->hw; in mtk_hwlro_add_ipaddr() local
1937 mtk_hwlro_val_ipaddr(eth, hwlro_idx, mac->hwlro_ip[fsp->location]); in mtk_hwlro_add_ipaddr()
1948 struct mtk_eth *eth = mac->hw; in mtk_hwlro_del_ipaddr() local
1959 mtk_hwlro_inval_ipaddr(eth, hwlro_idx); in mtk_hwlro_del_ipaddr()
1967 struct mtk_eth *eth = mac->hw; in mtk_hwlro_netdev_disable() local
1974 mtk_hwlro_inval_ipaddr(eth, hwlro_idx); in mtk_hwlro_netdev_disable()
2055 static int mtk_dma_busy_wait(struct mtk_eth *eth) in mtk_dma_busy_wait() argument
2061 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) in mtk_dma_busy_wait()
2066 ret = readx_poll_timeout_atomic(__raw_readl, eth->base + reg, val, in mtk_dma_busy_wait()
2070 dev_err(eth->dev, "DMA init timeout\n"); in mtk_dma_busy_wait()
2075 static int mtk_dma_init(struct mtk_eth *eth) in mtk_dma_init() argument
2080 if (mtk_dma_busy_wait(eth)) in mtk_dma_init()
2083 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { in mtk_dma_init()
2087 err = mtk_init_fq_dma(eth); in mtk_dma_init()
2092 err = mtk_tx_alloc(eth); in mtk_dma_init()
2096 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { in mtk_dma_init()
2097 err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_QDMA); in mtk_dma_init()
2102 err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_NORMAL); in mtk_dma_init()
2106 if (eth->hwlro) { in mtk_dma_init()
2108 err = mtk_rx_alloc(eth, i, MTK_RX_FLAGS_HWLRO); in mtk_dma_init()
2112 err = mtk_hwlro_rx_init(eth); in mtk_dma_init()
2117 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { in mtk_dma_init()
2121 mtk_w32(eth, FC_THRES_DROP_MODE | FC_THRES_DROP_EN | in mtk_dma_init()
2123 mtk_w32(eth, 0x0, MTK_QDMA_HRED2); in mtk_dma_init()
2129 static void mtk_dma_free(struct mtk_eth *eth) in mtk_dma_free() argument
2134 if (eth->netdev[i]) in mtk_dma_free()
2135 netdev_reset_queue(eth->netdev[i]); in mtk_dma_free()
2136 if (eth->scratch_ring) { in mtk_dma_free()
2137 dma_free_coherent(eth->dev, in mtk_dma_free()
2139 eth->scratch_ring, in mtk_dma_free()
2140 eth->phy_scratch_ring); in mtk_dma_free()
2141 eth->scratch_ring = NULL; in mtk_dma_free()
2142 eth->phy_scratch_ring = 0; in mtk_dma_free()
2144 mtk_tx_clean(eth); in mtk_dma_free()
2145 mtk_rx_clean(eth, ð->rx_ring[0]); in mtk_dma_free()
2146 mtk_rx_clean(eth, ð->rx_ring_qdma); in mtk_dma_free()
2148 if (eth->hwlro) { in mtk_dma_free()
2149 mtk_hwlro_rx_uninit(eth); in mtk_dma_free()
2151 mtk_rx_clean(eth, ð->rx_ring[i]); in mtk_dma_free()
2154 kfree(eth->scratch_head); in mtk_dma_free()
2160 struct mtk_eth *eth = mac->hw; in mtk_tx_timeout() local
2162 eth->netdev[mac->id]->stats.tx_errors++; in mtk_tx_timeout()
2163 netif_err(eth, tx_err, dev, in mtk_tx_timeout()
2165 schedule_work(ð->pending_work); in mtk_tx_timeout()
2170 struct mtk_eth *eth = _eth; in mtk_handle_irq_rx() local
2172 eth->rx_events++; in mtk_handle_irq_rx()
2173 if (likely(napi_schedule_prep(ð->rx_napi))) { in mtk_handle_irq_rx()
2174 __napi_schedule(ð->rx_napi); in mtk_handle_irq_rx()
2175 mtk_rx_irq_disable(eth, MTK_RX_DONE_INT); in mtk_handle_irq_rx()
2183 struct mtk_eth *eth = _eth; in mtk_handle_irq_tx() local
2185 eth->tx_events++; in mtk_handle_irq_tx()
2186 if (likely(napi_schedule_prep(ð->tx_napi))) { in mtk_handle_irq_tx()
2187 __napi_schedule(ð->tx_napi); in mtk_handle_irq_tx()
2188 mtk_tx_irq_disable(eth, MTK_TX_DONE_INT); in mtk_handle_irq_tx()
2196 struct mtk_eth *eth = _eth; in mtk_handle_irq() local
2198 if (mtk_r32(eth, MTK_PDMA_INT_MASK) & MTK_RX_DONE_INT) { in mtk_handle_irq()
2199 if (mtk_r32(eth, MTK_PDMA_INT_STATUS) & MTK_RX_DONE_INT) in mtk_handle_irq()
2202 if (mtk_r32(eth, eth->tx_int_mask_reg) & MTK_TX_DONE_INT) { in mtk_handle_irq()
2203 if (mtk_r32(eth, eth->tx_int_status_reg) & MTK_TX_DONE_INT) in mtk_handle_irq()
2214 struct mtk_eth *eth = mac->hw; in mtk_poll_controller() local
2216 mtk_tx_irq_disable(eth, MTK_TX_DONE_INT); in mtk_poll_controller()
2217 mtk_rx_irq_disable(eth, MTK_RX_DONE_INT); in mtk_poll_controller()
2218 mtk_handle_irq_rx(eth->irq[2], dev); in mtk_poll_controller()
2219 mtk_tx_irq_enable(eth, MTK_TX_DONE_INT); in mtk_poll_controller()
2220 mtk_rx_irq_enable(eth, MTK_RX_DONE_INT); in mtk_poll_controller()
2224 static int mtk_start_dma(struct mtk_eth *eth) in mtk_start_dma() argument
2229 err = mtk_dma_init(eth); in mtk_start_dma()
2231 mtk_dma_free(eth); in mtk_start_dma()
2235 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { in mtk_start_dma()
2236 mtk_w32(eth, in mtk_start_dma()
2243 mtk_w32(eth, in mtk_start_dma()
2248 mtk_w32(eth, MTK_TX_WB_DDONE | MTK_TX_DMA_EN | MTK_RX_DMA_EN | in mtk_start_dma()
2256 static void mtk_gdm_config(struct mtk_eth *eth, u32 config) in mtk_gdm_config() argument
2260 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) in mtk_gdm_config()
2264 u32 val = mtk_r32(eth, MTK_GDMA_FWD_CFG(i)); in mtk_gdm_config()
2274 if (!i && eth->netdev[0] && netdev_uses_dsa(eth->netdev[0])) in mtk_gdm_config()
2277 mtk_w32(eth, val, MTK_GDMA_FWD_CFG(i)); in mtk_gdm_config()
2280 mtk_w32(eth, RST_GL_PSE, MTK_RST_GL); in mtk_gdm_config()
2281 mtk_w32(eth, 0, MTK_RST_GL); in mtk_gdm_config()
2287 struct mtk_eth *eth = mac->hw; in mtk_open() local
2298 if (!refcount_read(ð->dma_refcnt)) { in mtk_open()
2302 err = mtk_start_dma(eth); in mtk_open()
2306 if (eth->soc->offload_version && mtk_ppe_start(ð->ppe) == 0) in mtk_open()
2309 mtk_gdm_config(eth, gdm_config); in mtk_open()
2311 napi_enable(ð->tx_napi); in mtk_open()
2312 napi_enable(ð->rx_napi); in mtk_open()
2313 mtk_tx_irq_enable(eth, MTK_TX_DONE_INT); in mtk_open()
2314 mtk_rx_irq_enable(eth, MTK_RX_DONE_INT); in mtk_open()
2315 refcount_set(ð->dma_refcnt, 1); in mtk_open()
2318 refcount_inc(ð->dma_refcnt); in mtk_open()
2325 static void mtk_stop_dma(struct mtk_eth *eth, u32 glo_cfg) in mtk_stop_dma() argument
2331 spin_lock_bh(ð->page_lock); in mtk_stop_dma()
2332 val = mtk_r32(eth, glo_cfg); in mtk_stop_dma()
2333 mtk_w32(eth, val & ~(MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN), in mtk_stop_dma()
2335 spin_unlock_bh(ð->page_lock); in mtk_stop_dma()
2339 val = mtk_r32(eth, glo_cfg); in mtk_stop_dma()
2351 struct mtk_eth *eth = mac->hw; in mtk_stop() local
2360 if (!refcount_dec_and_test(ð->dma_refcnt)) in mtk_stop()
2363 mtk_gdm_config(eth, MTK_GDMA_DROP_ALL); in mtk_stop()
2365 mtk_tx_irq_disable(eth, MTK_TX_DONE_INT); in mtk_stop()
2366 mtk_rx_irq_disable(eth, MTK_RX_DONE_INT); in mtk_stop()
2367 napi_disable(ð->tx_napi); in mtk_stop()
2368 napi_disable(ð->rx_napi); in mtk_stop()
2370 cancel_work_sync(ð->rx_dim.work); in mtk_stop()
2371 cancel_work_sync(ð->tx_dim.work); in mtk_stop()
2373 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) in mtk_stop()
2374 mtk_stop_dma(eth, MTK_QDMA_GLO_CFG); in mtk_stop()
2375 mtk_stop_dma(eth, MTK_PDMA_GLO_CFG); in mtk_stop()
2377 mtk_dma_free(eth); in mtk_stop()
2379 if (eth->soc->offload_version) in mtk_stop()
2380 mtk_ppe_stop(ð->ppe); in mtk_stop()
2385 static void ethsys_reset(struct mtk_eth *eth, u32 reset_bits) in ethsys_reset() argument
2387 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, in ethsys_reset()
2392 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, in ethsys_reset()
2398 static void mtk_clk_disable(struct mtk_eth *eth) in mtk_clk_disable() argument
2403 clk_disable_unprepare(eth->clks[clk]); in mtk_clk_disable()
2406 static int mtk_clk_enable(struct mtk_eth *eth) in mtk_clk_enable() argument
2411 ret = clk_prepare_enable(eth->clks[clk]); in mtk_clk_enable()
2420 clk_disable_unprepare(eth->clks[clk]); in mtk_clk_enable()
2428 struct mtk_eth *eth = container_of(dim, struct mtk_eth, rx_dim); in mtk_dim_rx() local
2432 cur_profile = net_dim_get_rx_moderation(eth->rx_dim.mode, in mtk_dim_rx()
2434 spin_lock_bh(ð->dim_lock); in mtk_dim_rx()
2436 val = mtk_r32(eth, MTK_PDMA_DELAY_INT); in mtk_dim_rx()
2446 mtk_w32(eth, val, MTK_PDMA_DELAY_INT); in mtk_dim_rx()
2447 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) in mtk_dim_rx()
2448 mtk_w32(eth, val, MTK_QDMA_DELAY_INT); in mtk_dim_rx()
2450 spin_unlock_bh(ð->dim_lock); in mtk_dim_rx()
2458 struct mtk_eth *eth = container_of(dim, struct mtk_eth, tx_dim); in mtk_dim_tx() local
2462 cur_profile = net_dim_get_tx_moderation(eth->tx_dim.mode, in mtk_dim_tx()
2464 spin_lock_bh(ð->dim_lock); in mtk_dim_tx()
2466 val = mtk_r32(eth, MTK_PDMA_DELAY_INT); in mtk_dim_tx()
2476 mtk_w32(eth, val, MTK_PDMA_DELAY_INT); in mtk_dim_tx()
2477 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) in mtk_dim_tx()
2478 mtk_w32(eth, val, MTK_QDMA_DELAY_INT); in mtk_dim_tx()
2480 spin_unlock_bh(ð->dim_lock); in mtk_dim_tx()
2485 static int mtk_hw_init(struct mtk_eth *eth) in mtk_hw_init() argument
2489 if (test_and_set_bit(MTK_HW_INIT, ð->state)) in mtk_hw_init()
2492 pm_runtime_enable(eth->dev); in mtk_hw_init()
2493 pm_runtime_get_sync(eth->dev); in mtk_hw_init()
2495 ret = mtk_clk_enable(eth); in mtk_hw_init()
2499 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) { in mtk_hw_init()
2500 ret = device_reset(eth->dev); in mtk_hw_init()
2502 dev_err(eth->dev, "MAC reset failed!\n"); in mtk_hw_init()
2507 mtk_dim_rx(ð->rx_dim.work); in mtk_hw_init()
2508 mtk_dim_tx(ð->tx_dim.work); in mtk_hw_init()
2511 mtk_tx_irq_disable(eth, ~0); in mtk_hw_init()
2512 mtk_rx_irq_disable(eth, ~0); in mtk_hw_init()
2518 ethsys_reset(eth, RSTCTRL_FE); in mtk_hw_init()
2519 ethsys_reset(eth, RSTCTRL_PPE); in mtk_hw_init()
2521 if (eth->pctl) { in mtk_hw_init()
2523 regmap_write(eth->pctl, GPIO_DRV_SEL10, 0xa00); in mtk_hw_init()
2526 regmap_write(eth->pctl, GPIO_OD33_CTRL8, 0x5); in mtk_hw_init()
2529 regmap_write(eth->pctl, GPIO_BIAS_CTRL, 0x0); in mtk_hw_init()
2537 mtk_w32(eth, MAC_MCR_FORCE_LINK_DOWN, MTK_MAC_MCR(i)); in mtk_hw_init()
2542 val = mtk_r32(eth, MTK_CDMQ_IG_CTRL); in mtk_hw_init()
2543 mtk_w32(eth, val | MTK_CDMQ_STAG_EN, MTK_CDMQ_IG_CTRL); in mtk_hw_init()
2546 mtk_w32(eth, 1, MTK_CDMP_EG_CTRL); in mtk_hw_init()
2549 mtk_dim_rx(ð->rx_dim.work); in mtk_hw_init()
2550 mtk_dim_tx(ð->tx_dim.work); in mtk_hw_init()
2553 mtk_tx_irq_disable(eth, ~0); in mtk_hw_init()
2554 mtk_rx_irq_disable(eth, ~0); in mtk_hw_init()
2557 mtk_w32(eth, MTK_TX_DONE_INT, MTK_PDMA_INT_GRP1); in mtk_hw_init()
2558 mtk_w32(eth, MTK_RX_DONE_INT, MTK_PDMA_INT_GRP2); in mtk_hw_init()
2559 mtk_w32(eth, MTK_TX_DONE_INT, MTK_QDMA_INT_GRP1); in mtk_hw_init()
2560 mtk_w32(eth, MTK_RX_DONE_INT, MTK_QDMA_INT_GRP2); in mtk_hw_init()
2561 mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP); in mtk_hw_init()
2566 pm_runtime_put_sync(eth->dev); in mtk_hw_init()
2567 pm_runtime_disable(eth->dev); in mtk_hw_init()
2572 static int mtk_hw_deinit(struct mtk_eth *eth) in mtk_hw_deinit() argument
2574 if (!test_and_clear_bit(MTK_HW_INIT, ð->state)) in mtk_hw_deinit()
2577 mtk_clk_disable(eth); in mtk_hw_deinit()
2579 pm_runtime_put_sync(eth->dev); in mtk_hw_deinit()
2580 pm_runtime_disable(eth->dev); in mtk_hw_deinit()
2588 struct mtk_eth *eth = mac->hw; in mtk_init() local
2595 dev_err(eth->dev, "generated random MAC address %pM\n", in mtk_init()
2605 struct mtk_eth *eth = mac->hw; in mtk_uninit() local
2608 mtk_tx_irq_disable(eth, ~0); in mtk_uninit()
2609 mtk_rx_irq_disable(eth, ~0); in mtk_uninit()
2616 struct mtk_eth *eth = mac->hw; in mtk_change_mtu() local
2619 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) { in mtk_change_mtu()
2659 struct mtk_eth *eth = container_of(work, struct mtk_eth, pending_work); in mtk_pending_work() local
2665 dev_dbg(eth->dev, "[%s][%d] reset\n", __func__, __LINE__); in mtk_pending_work()
2667 while (test_and_set_bit_lock(MTK_RESETTING, ð->state)) in mtk_pending_work()
2670 dev_dbg(eth->dev, "[%s][%d] mtk_stop starts\n", __func__, __LINE__); in mtk_pending_work()
2673 if (!eth->netdev[i]) in mtk_pending_work()
2675 mtk_stop(eth->netdev[i]); in mtk_pending_work()
2678 dev_dbg(eth->dev, "[%s][%d] mtk_stop ends\n", __func__, __LINE__); in mtk_pending_work()
2683 mtk_hw_deinit(eth); in mtk_pending_work()
2685 if (eth->dev->pins) in mtk_pending_work()
2686 pinctrl_select_state(eth->dev->pins->p, in mtk_pending_work()
2687 eth->dev->pins->default_state); in mtk_pending_work()
2688 mtk_hw_init(eth); in mtk_pending_work()
2694 err = mtk_open(eth->netdev[i]); in mtk_pending_work()
2696 netif_alert(eth, ifup, eth->netdev[i], in mtk_pending_work()
2698 dev_close(eth->netdev[i]); in mtk_pending_work()
2702 dev_dbg(eth->dev, "[%s][%d] reset done\n", __func__, __LINE__); in mtk_pending_work()
2704 clear_bit_unlock(MTK_RESETTING, ð->state); in mtk_pending_work()
2709 static int mtk_free_dev(struct mtk_eth *eth) in mtk_free_dev() argument
2714 if (!eth->netdev[i]) in mtk_free_dev()
2716 free_netdev(eth->netdev[i]); in mtk_free_dev()
2722 static int mtk_unreg_dev(struct mtk_eth *eth) in mtk_unreg_dev() argument
2727 if (!eth->netdev[i]) in mtk_unreg_dev()
2729 unregister_netdev(eth->netdev[i]); in mtk_unreg_dev()
2735 static int mtk_cleanup(struct mtk_eth *eth) in mtk_cleanup() argument
2737 mtk_unreg_dev(eth); in mtk_cleanup()
2738 mtk_free_dev(eth); in mtk_cleanup()
2739 cancel_work_sync(ð->pending_work); in mtk_cleanup()
2948 static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np) in mtk_add_mac() argument
2957 dev_err(eth->dev, "missing mac id\n"); in mtk_add_mac()
2963 dev_err(eth->dev, "%d is not a valid mac id\n", id); in mtk_add_mac()
2967 if (eth->netdev[id]) { in mtk_add_mac()
2968 dev_err(eth->dev, "duplicate mac id found: %d\n", id); in mtk_add_mac()
2972 eth->netdev[id] = alloc_etherdev(sizeof(*mac)); in mtk_add_mac()
2973 if (!eth->netdev[id]) { in mtk_add_mac()
2974 dev_err(eth->dev, "alloc_etherdev failed\n"); in mtk_add_mac()
2977 mac = netdev_priv(eth->netdev[id]); in mtk_add_mac()
2978 eth->mac[id] = mac; in mtk_add_mac()
2980 mac->hw = eth; in mtk_add_mac()
2986 mac->hw_stats = devm_kzalloc(eth->dev, in mtk_add_mac()
2990 dev_err(eth->dev, "failed to allocate counter memory\n"); in mtk_add_mac()
3001 dev_err(eth->dev, "incorrect phy-mode\n"); in mtk_add_mac()
3010 mac->phylink_config.dev = ð->netdev[id]->dev; in mtk_add_mac()
3023 SET_NETDEV_DEV(eth->netdev[id], eth->dev); in mtk_add_mac()
3024 eth->netdev[id]->watchdog_timeo = 5 * HZ; in mtk_add_mac()
3025 eth->netdev[id]->netdev_ops = &mtk_netdev_ops; in mtk_add_mac()
3026 eth->netdev[id]->base_addr = (unsigned long)eth->base; in mtk_add_mac()
3028 eth->netdev[id]->hw_features = eth->soc->hw_features; in mtk_add_mac()
3029 if (eth->hwlro) in mtk_add_mac()
3030 eth->netdev[id]->hw_features |= NETIF_F_LRO; in mtk_add_mac()
3032 eth->netdev[id]->vlan_features = eth->soc->hw_features & in mtk_add_mac()
3034 eth->netdev[id]->features |= eth->soc->hw_features; in mtk_add_mac()
3035 eth->netdev[id]->ethtool_ops = &mtk_ethtool_ops; in mtk_add_mac()
3037 eth->netdev[id]->irq = eth->irq[0]; in mtk_add_mac()
3038 eth->netdev[id]->dev.of_node = np; in mtk_add_mac()
3040 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) in mtk_add_mac()
3041 eth->netdev[id]->max_mtu = MTK_MAX_RX_LENGTH - MTK_RX_ETH_HLEN; in mtk_add_mac()
3043 eth->netdev[id]->max_mtu = MTK_MAX_RX_LENGTH_2K - MTK_RX_ETH_HLEN; in mtk_add_mac()
3048 free_netdev(eth->netdev[id]); in mtk_add_mac()
3055 struct mtk_eth *eth; in mtk_probe() local
3058 eth = devm_kzalloc(&pdev->dev, sizeof(*eth), GFP_KERNEL); in mtk_probe()
3059 if (!eth) in mtk_probe()
3062 eth->soc = of_device_get_match_data(&pdev->dev); in mtk_probe()
3064 eth->dev = &pdev->dev; in mtk_probe()
3065 eth->base = devm_platform_ioremap_resource(pdev, 0); in mtk_probe()
3066 if (IS_ERR(eth->base)) in mtk_probe()
3067 return PTR_ERR(eth->base); in mtk_probe()
3069 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { in mtk_probe()
3070 eth->tx_int_mask_reg = MTK_QDMA_INT_MASK; in mtk_probe()
3071 eth->tx_int_status_reg = MTK_QDMA_INT_STATUS; in mtk_probe()
3073 eth->tx_int_mask_reg = MTK_PDMA_INT_MASK; in mtk_probe()
3074 eth->tx_int_status_reg = MTK_PDMA_INT_STATUS; in mtk_probe()
3077 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) { in mtk_probe()
3078 eth->rx_dma_l4_valid = RX_DMA_L4_VALID_PDMA; in mtk_probe()
3079 eth->ip_align = NET_IP_ALIGN; in mtk_probe()
3081 eth->rx_dma_l4_valid = RX_DMA_L4_VALID; in mtk_probe()
3084 spin_lock_init(ð->page_lock); in mtk_probe()
3085 spin_lock_init(ð->tx_irq_lock); in mtk_probe()
3086 spin_lock_init(ð->rx_irq_lock); in mtk_probe()
3087 spin_lock_init(ð->dim_lock); in mtk_probe()
3089 eth->rx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; in mtk_probe()
3090 INIT_WORK(ð->rx_dim.work, mtk_dim_rx); in mtk_probe()
3092 eth->tx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; in mtk_probe()
3093 INIT_WORK(ð->tx_dim.work, mtk_dim_tx); in mtk_probe()
3095 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) { in mtk_probe()
3096 eth->ethsys = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, in mtk_probe()
3098 if (IS_ERR(eth->ethsys)) { in mtk_probe()
3100 return PTR_ERR(eth->ethsys); in mtk_probe()
3104 if (MTK_HAS_CAPS(eth->soc->caps, MTK_INFRA)) { in mtk_probe()
3105 eth->infra = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, in mtk_probe()
3107 if (IS_ERR(eth->infra)) { in mtk_probe()
3109 return PTR_ERR(eth->infra); in mtk_probe()
3113 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) { in mtk_probe()
3114 eth->sgmii = devm_kzalloc(eth->dev, sizeof(*eth->sgmii), in mtk_probe()
3116 if (!eth->sgmii) in mtk_probe()
3119 err = mtk_sgmii_init(eth->sgmii, pdev->dev.of_node, in mtk_probe()
3120 eth->soc->ana_rgc3); in mtk_probe()
3126 if (eth->soc->required_pctl) { in mtk_probe()
3127 eth->pctl = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, in mtk_probe()
3129 if (IS_ERR(eth->pctl)) { in mtk_probe()
3131 return PTR_ERR(eth->pctl); in mtk_probe()
3136 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT) && i > 0) in mtk_probe()
3137 eth->irq[i] = eth->irq[0]; in mtk_probe()
3139 eth->irq[i] = platform_get_irq(pdev, i); in mtk_probe()
3140 if (eth->irq[i] < 0) { in mtk_probe()
3145 for (i = 0; i < ARRAY_SIZE(eth->clks); i++) { in mtk_probe()
3146 eth->clks[i] = devm_clk_get(eth->dev, in mtk_probe()
3148 if (IS_ERR(eth->clks[i])) { in mtk_probe()
3149 if (PTR_ERR(eth->clks[i]) == -EPROBE_DEFER) in mtk_probe()
3151 if (eth->soc->required_clks & BIT(i)) { in mtk_probe()
3156 eth->clks[i] = NULL; in mtk_probe()
3160 eth->msg_enable = netif_msg_init(mtk_msg_level, MTK_DEFAULT_MSG_ENABLE); in mtk_probe()
3161 INIT_WORK(ð->pending_work, mtk_pending_work); in mtk_probe()
3163 err = mtk_hw_init(eth); in mtk_probe()
3167 eth->hwlro = MTK_HAS_CAPS(eth->soc->caps, MTK_HWLRO); in mtk_probe()
3177 err = mtk_add_mac(eth, mac_np); in mtk_probe()
3184 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT)) { in mtk_probe()
3185 err = devm_request_irq(eth->dev, eth->irq[0], in mtk_probe()
3187 dev_name(eth->dev), eth); in mtk_probe()
3189 err = devm_request_irq(eth->dev, eth->irq[1], in mtk_probe()
3191 dev_name(eth->dev), eth); in mtk_probe()
3195 err = devm_request_irq(eth->dev, eth->irq[2], in mtk_probe()
3197 dev_name(eth->dev), eth); in mtk_probe()
3203 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) { in mtk_probe()
3204 err = mtk_mdio_init(eth); in mtk_probe()
3209 if (eth->soc->offload_version) { in mtk_probe()
3210 err = mtk_ppe_init(ð->ppe, eth->dev, in mtk_probe()
3211 eth->base + MTK_ETH_PPE_BASE, 2); in mtk_probe()
3215 err = mtk_eth_offload_init(eth); in mtk_probe()
3221 if (!eth->netdev[i]) in mtk_probe()
3224 err = register_netdev(eth->netdev[i]); in mtk_probe()
3226 dev_err(eth->dev, "error bringing up device\n"); in mtk_probe()
3229 netif_info(eth, probe, eth->netdev[i], in mtk_probe()
3231 eth->netdev[i]->base_addr, eth->irq[0]); in mtk_probe()
3237 init_dummy_netdev(ð->dummy_dev); in mtk_probe()
3238 netif_napi_add(ð->dummy_dev, ð->tx_napi, mtk_napi_tx, in mtk_probe()
3240 netif_napi_add(ð->dummy_dev, ð->rx_napi, mtk_napi_rx, in mtk_probe()
3243 platform_set_drvdata(pdev, eth); in mtk_probe()
3248 mtk_mdio_cleanup(eth); in mtk_probe()
3250 mtk_free_dev(eth); in mtk_probe()
3252 mtk_hw_deinit(eth); in mtk_probe()
3259 struct mtk_eth *eth = platform_get_drvdata(pdev); in mtk_remove() local
3265 if (!eth->netdev[i]) in mtk_remove()
3267 mtk_stop(eth->netdev[i]); in mtk_remove()
3268 mac = netdev_priv(eth->netdev[i]); in mtk_remove()
3272 mtk_hw_deinit(eth); in mtk_remove()
3274 netif_napi_del(ð->tx_napi); in mtk_remove()
3275 netif_napi_del(ð->rx_napi); in mtk_remove()
3276 mtk_cleanup(eth); in mtk_remove()
3277 mtk_mdio_cleanup(eth); in mtk_remove()