Lines Matching refs:eth
187 void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg) in mtk_w32() argument
189 __raw_writel(val, eth->base + reg); in mtk_w32()
192 u32 mtk_r32(struct mtk_eth *eth, unsigned reg) in mtk_r32() argument
194 return __raw_readl(eth->base + reg); in mtk_r32()
197 static u32 mtk_m32(struct mtk_eth *eth, u32 mask, u32 set, unsigned reg) in mtk_m32() argument
201 val = mtk_r32(eth, reg); in mtk_m32()
204 mtk_w32(eth, val, reg); in mtk_m32()
208 static int mtk_mdio_busy_wait(struct mtk_eth *eth) in mtk_mdio_busy_wait() argument
213 if (!(mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_ACCESS)) in mtk_mdio_busy_wait()
220 dev_err(eth->dev, "mdio: MDIO timeout\n"); in mtk_mdio_busy_wait()
224 static int _mtk_mdio_write_c22(struct mtk_eth *eth, u32 phy_addr, u32 phy_reg, in _mtk_mdio_write_c22() argument
229 ret = mtk_mdio_busy_wait(eth); in _mtk_mdio_write_c22()
233 mtk_w32(eth, PHY_IAC_ACCESS | in _mtk_mdio_write_c22()
241 ret = mtk_mdio_busy_wait(eth); in _mtk_mdio_write_c22()
248 static int _mtk_mdio_write_c45(struct mtk_eth *eth, u32 phy_addr, in _mtk_mdio_write_c45() argument
253 ret = mtk_mdio_busy_wait(eth); in _mtk_mdio_write_c45()
257 mtk_w32(eth, PHY_IAC_ACCESS | in _mtk_mdio_write_c45()
265 ret = mtk_mdio_busy_wait(eth); in _mtk_mdio_write_c45()
269 mtk_w32(eth, PHY_IAC_ACCESS | in _mtk_mdio_write_c45()
277 ret = mtk_mdio_busy_wait(eth); in _mtk_mdio_write_c45()
284 static int _mtk_mdio_read_c22(struct mtk_eth *eth, u32 phy_addr, u32 phy_reg) in _mtk_mdio_read_c22() argument
288 ret = mtk_mdio_busy_wait(eth); in _mtk_mdio_read_c22()
292 mtk_w32(eth, PHY_IAC_ACCESS | in _mtk_mdio_read_c22()
299 ret = mtk_mdio_busy_wait(eth); in _mtk_mdio_read_c22()
303 return mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_DATA_MASK; in _mtk_mdio_read_c22()
306 static int _mtk_mdio_read_c45(struct mtk_eth *eth, u32 phy_addr, in _mtk_mdio_read_c45() argument
311 ret = mtk_mdio_busy_wait(eth); in _mtk_mdio_read_c45()
315 mtk_w32(eth, PHY_IAC_ACCESS | in _mtk_mdio_read_c45()
323 ret = mtk_mdio_busy_wait(eth); in _mtk_mdio_read_c45()
327 mtk_w32(eth, PHY_IAC_ACCESS | in _mtk_mdio_read_c45()
334 ret = mtk_mdio_busy_wait(eth); in _mtk_mdio_read_c45()
338 return mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_DATA_MASK; in _mtk_mdio_read_c45()
344 struct mtk_eth *eth = bus->priv; in mtk_mdio_write_c22() local
346 return _mtk_mdio_write_c22(eth, phy_addr, phy_reg, val); in mtk_mdio_write_c22()
352 struct mtk_eth *eth = bus->priv; in mtk_mdio_write_c45() local
354 return _mtk_mdio_write_c45(eth, phy_addr, devad, phy_reg, val); in mtk_mdio_write_c45()
359 struct mtk_eth *eth = bus->priv; in mtk_mdio_read_c22() local
361 return _mtk_mdio_read_c22(eth, phy_addr, phy_reg); in mtk_mdio_read_c22()
367 struct mtk_eth *eth = bus->priv; in mtk_mdio_read_c45() local
369 return _mtk_mdio_read_c45(eth, phy_addr, devad, phy_reg); in mtk_mdio_read_c45()
372 static int mt7621_gmac0_rgmii_adjust(struct mtk_eth *eth, in mt7621_gmac0_rgmii_adjust() argument
380 regmap_read(eth->ethsys, ETHSYS_SYSCFG, &val); in mt7621_gmac0_rgmii_adjust()
383 dev_err(eth->dev, in mt7621_gmac0_rgmii_adjust()
391 regmap_update_bits(eth->ethsys, ETHSYS_CLKCFG0, in mt7621_gmac0_rgmii_adjust()
397 static void mtk_gmac0_rgmii_adjust(struct mtk_eth *eth, in mtk_gmac0_rgmii_adjust() argument
404 mtk_w32(eth, TRGMII_MODE, INTF_MODE); in mtk_gmac0_rgmii_adjust()
406 ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], val); in mtk_gmac0_rgmii_adjust()
408 dev_err(eth->dev, "Failed to set trgmii pll: %d\n", ret); in mtk_gmac0_rgmii_adjust()
414 mtk_w32(eth, val, INTF_MODE); in mtk_gmac0_rgmii_adjust()
416 regmap_update_bits(eth->ethsys, ETHSYS_CLKCFG0, in mtk_gmac0_rgmii_adjust()
421 ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], val); in mtk_gmac0_rgmii_adjust()
423 dev_err(eth->dev, "Failed to set trgmii pll: %d\n", ret); in mtk_gmac0_rgmii_adjust()
427 mtk_w32(eth, val, TRGMII_RCK_CTRL); in mtk_gmac0_rgmii_adjust()
431 mtk_w32(eth, val, TRGMII_TCK_CTRL); in mtk_gmac0_rgmii_adjust()
439 struct mtk_eth *eth = mac->hw; in mtk_mac_select_pcs() local
444 sid = (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_SGMII)) ? in mtk_mac_select_pcs()
447 return mtk_sgmii_select_pcs(eth->sgmii, sid); in mtk_mac_select_pcs()
458 struct mtk_eth *eth = mac->hw; in mtk_mac_config() local
463 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) && in mtk_mac_config()
481 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RGMII)) { in mtk_mac_config()
482 err = mtk_gmac_rgmii_path_setup(eth, mac->id); in mtk_mac_config()
490 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) { in mtk_mac_config()
491 err = mtk_gmac_sgmii_path_setup(eth, mac->id); in mtk_mac_config()
497 if (MTK_HAS_CAPS(eth->soc->caps, MTK_GEPHY)) { in mtk_mac_config()
498 err = mtk_gmac_gephy_path_setup(eth, mac->id); in mtk_mac_config()
561 regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val); in mtk_mac_config()
564 regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val); in mtk_mac_config()
575 regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val); in mtk_mac_config()
577 regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0, in mtk_mac_config()
584 dev_err(eth->dev, in mtk_mac_config()
592 dev_err(eth->dev, "%s: GMAC%d mode %s not supported!\n", __func__, in mtk_mac_config()
597 dev_err(eth->dev, "%s: GMAC%d mode %s err: %d!\n", __func__, in mtk_mac_config()
606 struct mtk_eth *eth = mac->hw; in mtk_mac_finish() local
612 regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0, in mtk_mac_finish()
672 static void mtk_set_queue_speed(struct mtk_eth *eth, unsigned int idx, in mtk_set_queue_speed() argument
675 const struct mtk_soc_data *soc = eth->soc; in mtk_set_queue_speed()
686 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) in mtk_set_queue_speed()
738 mtk_w32(eth, val, soc->reg_map->qdma.qtx_sch + ofs); in mtk_set_queue_speed()
791 static int mtk_mdio_init(struct mtk_eth *eth) in mtk_mdio_init() argument
796 mii_np = of_get_child_by_name(eth->dev->of_node, "mdio-bus"); in mtk_mdio_init()
798 dev_err(eth->dev, "no %s child node found", "mdio-bus"); in mtk_mdio_init()
807 eth->mii_bus = devm_mdiobus_alloc(eth->dev); in mtk_mdio_init()
808 if (!eth->mii_bus) { in mtk_mdio_init()
813 eth->mii_bus->name = "mdio"; in mtk_mdio_init()
814 eth->mii_bus->read = mtk_mdio_read_c22; in mtk_mdio_init()
815 eth->mii_bus->write = mtk_mdio_write_c22; in mtk_mdio_init()
816 eth->mii_bus->read_c45 = mtk_mdio_read_c45; in mtk_mdio_init()
817 eth->mii_bus->write_c45 = mtk_mdio_write_c45; in mtk_mdio_init()
818 eth->mii_bus->priv = eth; in mtk_mdio_init()
819 eth->mii_bus->parent = eth->dev; in mtk_mdio_init()
821 snprintf(eth->mii_bus->id, MII_BUS_ID_SIZE, "%pOFn", mii_np); in mtk_mdio_init()
822 ret = of_mdiobus_register(eth->mii_bus, mii_np); in mtk_mdio_init()
829 static void mtk_mdio_cleanup(struct mtk_eth *eth) in mtk_mdio_cleanup() argument
831 if (!eth->mii_bus) in mtk_mdio_cleanup()
834 mdiobus_unregister(eth->mii_bus); in mtk_mdio_cleanup()
837 static inline void mtk_tx_irq_disable(struct mtk_eth *eth, u32 mask) in mtk_tx_irq_disable() argument
842 spin_lock_irqsave(ð->tx_irq_lock, flags); in mtk_tx_irq_disable()
843 val = mtk_r32(eth, eth->soc->reg_map->tx_irq_mask); in mtk_tx_irq_disable()
844 mtk_w32(eth, val & ~mask, eth->soc->reg_map->tx_irq_mask); in mtk_tx_irq_disable()
845 spin_unlock_irqrestore(ð->tx_irq_lock, flags); in mtk_tx_irq_disable()
848 static inline void mtk_tx_irq_enable(struct mtk_eth *eth, u32 mask) in mtk_tx_irq_enable() argument
853 spin_lock_irqsave(ð->tx_irq_lock, flags); in mtk_tx_irq_enable()
854 val = mtk_r32(eth, eth->soc->reg_map->tx_irq_mask); in mtk_tx_irq_enable()
855 mtk_w32(eth, val | mask, eth->soc->reg_map->tx_irq_mask); in mtk_tx_irq_enable()
856 spin_unlock_irqrestore(ð->tx_irq_lock, flags); in mtk_tx_irq_enable()
859 static inline void mtk_rx_irq_disable(struct mtk_eth *eth, u32 mask) in mtk_rx_irq_disable() argument
864 spin_lock_irqsave(ð->rx_irq_lock, flags); in mtk_rx_irq_disable()
865 val = mtk_r32(eth, eth->soc->reg_map->pdma.irq_mask); in mtk_rx_irq_disable()
866 mtk_w32(eth, val & ~mask, eth->soc->reg_map->pdma.irq_mask); in mtk_rx_irq_disable()
867 spin_unlock_irqrestore(ð->rx_irq_lock, flags); in mtk_rx_irq_disable()
870 static inline void mtk_rx_irq_enable(struct mtk_eth *eth, u32 mask) in mtk_rx_irq_enable() argument
875 spin_lock_irqsave(ð->rx_irq_lock, flags); in mtk_rx_irq_enable()
876 val = mtk_r32(eth, eth->soc->reg_map->pdma.irq_mask); in mtk_rx_irq_enable()
877 mtk_w32(eth, val | mask, eth->soc->reg_map->pdma.irq_mask); in mtk_rx_irq_enable()
878 spin_unlock_irqrestore(ð->rx_irq_lock, flags); in mtk_rx_irq_enable()
885 struct mtk_eth *eth = mac->hw; in mtk_set_mac_address() local
895 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) { in mtk_set_mac_address()
916 struct mtk_eth *eth = mac->hw; in mtk_stats_update_mac() local
920 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) { in mtk_stats_update_mac()
928 const struct mtk_reg_map *reg_map = eth->soc->reg_map; in mtk_stats_update_mac()
966 static void mtk_stats_update(struct mtk_eth *eth) in mtk_stats_update() argument
971 if (!eth->mac[i] || !eth->mac[i]->hw_stats) in mtk_stats_update()
973 if (spin_trylock(ð->mac[i]->hw_stats->stats_lock)) { in mtk_stats_update()
974 mtk_stats_update_mac(eth->mac[i]); in mtk_stats_update()
975 spin_unlock(ð->mac[i]->hw_stats->stats_lock); in mtk_stats_update()
1034 static bool mtk_rx_get_desc(struct mtk_eth *eth, struct mtk_rx_dma_v2 *rxd, in mtk_rx_get_desc() argument
1044 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) { in mtk_rx_get_desc()
1064 static int mtk_init_fq_dma(struct mtk_eth *eth) in mtk_init_fq_dma() argument
1066 const struct mtk_soc_data *soc = eth->soc; in mtk_init_fq_dma()
1072 eth->scratch_ring = dma_alloc_coherent(eth->dma_dev, in mtk_init_fq_dma()
1074 ð->phy_scratch_ring, in mtk_init_fq_dma()
1076 if (unlikely(!eth->scratch_ring)) in mtk_init_fq_dma()
1079 eth->scratch_head = kcalloc(cnt, MTK_QDMA_PAGE_SIZE, GFP_KERNEL); in mtk_init_fq_dma()
1080 if (unlikely(!eth->scratch_head)) in mtk_init_fq_dma()
1083 dma_addr = dma_map_single(eth->dma_dev, in mtk_init_fq_dma()
1084 eth->scratch_head, cnt * MTK_QDMA_PAGE_SIZE, in mtk_init_fq_dma()
1086 if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr))) in mtk_init_fq_dma()
1089 phy_ring_tail = eth->phy_scratch_ring + soc->txrx.txd_size * (cnt - 1); in mtk_init_fq_dma()
1094 txd = eth->scratch_ring + i * soc->txrx.txd_size; in mtk_init_fq_dma()
1097 txd->txd2 = eth->phy_scratch_ring + in mtk_init_fq_dma()
1110 mtk_w32(eth, eth->phy_scratch_ring, soc->reg_map->qdma.fq_head); in mtk_init_fq_dma()
1111 mtk_w32(eth, phy_ring_tail, soc->reg_map->qdma.fq_tail); in mtk_init_fq_dma()
1112 mtk_w32(eth, (cnt << 16) | cnt, soc->reg_map->qdma.fq_count); in mtk_init_fq_dma()
1113 mtk_w32(eth, MTK_QDMA_PAGE_SIZE << 16, soc->reg_map->qdma.fq_blen); in mtk_init_fq_dma()
1142 static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf, in mtk_tx_unmap() argument
1145 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { in mtk_tx_unmap()
1147 dma_unmap_single(eth->dma_dev, in mtk_tx_unmap()
1152 dma_unmap_page(eth->dma_dev, in mtk_tx_unmap()
1159 dma_unmap_page(eth->dma_dev, in mtk_tx_unmap()
1166 dma_unmap_page(eth->dma_dev, in mtk_tx_unmap()
1196 static void setup_tx_buf(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf, in setup_tx_buf() argument
1200 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { in setup_tx_buf()
1223 struct mtk_eth *eth = mac->hw; in mtk_tx_set_dma_desc_v1() local
1254 struct mtk_eth *eth = mac->hw; in mtk_tx_set_dma_desc_v2() local
1291 struct mtk_eth *eth = mac->hw; in mtk_tx_set_dma_desc() local
1293 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) in mtk_tx_set_dma_desc()
1314 struct mtk_eth *eth = mac->hw; in mtk_tx_map() local
1315 const struct mtk_soc_data *soc = eth->soc; in mtk_tx_map()
1332 txd_info.addr = dma_map_single(eth->dma_dev, skb->data, txd_info.size, in mtk_tx_map()
1334 if (unlikely(dma_mapping_error(eth->dma_dev, txd_info.addr))) in mtk_tx_map()
1342 setup_tx_buf(eth, itx_buf, itxd_pdma, txd_info.addr, txd_info.size, in mtk_tx_map()
1375 txd_info.addr = skb_frag_dma_map(eth->dma_dev, frag, in mtk_tx_map()
1378 if (unlikely(dma_mapping_error(eth->dma_dev, txd_info.addr))) in mtk_tx_map()
1392 setup_tx_buf(eth, tx_buf, txd_pdma, txd_info.addr, in mtk_tx_map()
1424 mtk_w32(eth, txd->txd2, soc->reg_map->qdma.ctx_ptr); in mtk_tx_map()
1430 mtk_w32(eth, next_idx, MT7628_TX_CTX_IDX0); in mtk_tx_map()
1440 mtk_tx_unmap(eth, tx_buf, NULL, false); in mtk_tx_map()
1453 static int mtk_cal_txd_req(struct mtk_eth *eth, struct sk_buff *skb) in mtk_cal_txd_req() argument
1462 eth->soc->txrx.dma_max_len); in mtk_cal_txd_req()
1471 static int mtk_queue_stopped(struct mtk_eth *eth) in mtk_queue_stopped() argument
1476 if (!eth->netdev[i]) in mtk_queue_stopped()
1478 if (netif_queue_stopped(eth->netdev[i])) in mtk_queue_stopped()
1485 static void mtk_wake_queue(struct mtk_eth *eth) in mtk_wake_queue() argument
1490 if (!eth->netdev[i]) in mtk_wake_queue()
1492 netif_tx_wake_all_queues(eth->netdev[i]); in mtk_wake_queue()
1499 struct mtk_eth *eth = mac->hw; in mtk_start_xmit() local
1500 struct mtk_tx_ring *ring = ð->tx_ring; in mtk_start_xmit()
1509 spin_lock(ð->page_lock); in mtk_start_xmit()
1511 if (unlikely(test_bit(MTK_RESETTING, ð->state))) in mtk_start_xmit()
1514 tx_num = mtk_cal_txd_req(eth, skb); in mtk_start_xmit()
1517 netif_err(eth, tx_queued, dev, in mtk_start_xmit()
1519 spin_unlock(ð->page_lock); in mtk_start_xmit()
1526 netif_warn(eth, tx_err, dev, in mtk_start_xmit()
1544 spin_unlock(ð->page_lock); in mtk_start_xmit()
1549 spin_unlock(ð->page_lock); in mtk_start_xmit()
1555 static struct mtk_rx_ring *mtk_get_rx_ring(struct mtk_eth *eth) in mtk_get_rx_ring() argument
1561 if (!eth->hwlro) in mtk_get_rx_ring()
1562 return ð->rx_ring[0]; in mtk_get_rx_ring()
1567 ring = ð->rx_ring[i]; in mtk_get_rx_ring()
1569 rxd = ring->dma + idx * eth->soc->txrx.rxd_size; in mtk_get_rx_ring()
1579 static void mtk_update_rx_cpu_idx(struct mtk_eth *eth) in mtk_update_rx_cpu_idx() argument
1584 if (!eth->hwlro) { in mtk_update_rx_cpu_idx()
1585 ring = ð->rx_ring[0]; in mtk_update_rx_cpu_idx()
1586 mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg); in mtk_update_rx_cpu_idx()
1589 ring = ð->rx_ring[i]; in mtk_update_rx_cpu_idx()
1592 mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg); in mtk_update_rx_cpu_idx()
1598 static bool mtk_page_pool_enabled(struct mtk_eth *eth) in mtk_page_pool_enabled() argument
1600 return MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2); in mtk_page_pool_enabled()
1603 static struct page_pool *mtk_create_page_pool(struct mtk_eth *eth, in mtk_create_page_pool() argument
1612 .dev = eth->dma_dev, in mtk_create_page_pool()
1619 pp_params.dma_dir = rcu_access_pointer(eth->prog) ? DMA_BIDIRECTIONAL in mtk_create_page_pool()
1625 err = __xdp_rxq_info_reg(xdp_q, ð->dummy_dev, id, in mtk_create_page_pool()
1626 eth->rx_napi.napi_id, PAGE_SIZE); in mtk_create_page_pool()
1666 static int mtk_xdp_frame_map(struct mtk_eth *eth, struct net_device *dev, in mtk_xdp_frame_map() argument
1671 struct mtk_tx_ring *ring = ð->tx_ring; in mtk_xdp_frame_map()
1676 txd_info->addr = dma_map_single(eth->dma_dev, data, in mtk_xdp_frame_map()
1678 if (unlikely(dma_mapping_error(eth->dma_dev, txd_info->addr))) in mtk_xdp_frame_map()
1687 dma_sync_single_for_device(eth->dma_dev, txd_info->addr, in mtk_xdp_frame_map()
1697 setup_tx_buf(eth, tx_buf, txd_pdma, txd_info->addr, txd_info->size, in mtk_xdp_frame_map()
1703 static int mtk_xdp_submit_frame(struct mtk_eth *eth, struct xdp_frame *xdpf, in mtk_xdp_submit_frame() argument
1707 const struct mtk_soc_data *soc = eth->soc; in mtk_xdp_submit_frame()
1708 struct mtk_tx_ring *ring = ð->tx_ring; in mtk_xdp_submit_frame()
1721 if (unlikely(test_bit(MTK_RESETTING, ð->state))) in mtk_xdp_submit_frame()
1728 spin_lock(ð->page_lock); in mtk_xdp_submit_frame()
1732 spin_unlock(ð->page_lock); in mtk_xdp_submit_frame()
1742 err = mtk_xdp_frame_map(eth, dev, &txd_info, txd, tx_buf, in mtk_xdp_submit_frame()
1790 mtk_w32(eth, txd->txd2, soc->reg_map->qdma.ctx_ptr); in mtk_xdp_submit_frame()
1795 mtk_w32(eth, NEXT_DESP_IDX(idx, ring->dma_size), in mtk_xdp_submit_frame()
1799 spin_unlock(ð->page_lock); in mtk_xdp_submit_frame()
1806 mtk_tx_unmap(eth, tx_buf, NULL, false); in mtk_xdp_submit_frame()
1818 spin_unlock(ð->page_lock); in mtk_xdp_submit_frame()
1828 struct mtk_eth *eth = mac->hw; in mtk_xdp_xmit() local
1835 if (mtk_xdp_submit_frame(eth, frames[i], dev, true)) in mtk_xdp_xmit()
1848 static u32 mtk_xdp_run(struct mtk_eth *eth, struct mtk_rx_ring *ring, in mtk_xdp_run() argument
1859 prog = rcu_dereference(eth->prog); in mtk_xdp_run()
1879 if (!xdpf || mtk_xdp_submit_frame(eth, xdpf, dev, false)) { in mtk_xdp_run()
1912 struct mtk_eth *eth) in mtk_poll_rx() argument
1932 ring = mtk_get_rx_ring(eth); in mtk_poll_rx()
1937 rxd = ring->dma + idx * eth->soc->txrx.rxd_size; in mtk_poll_rx()
1940 if (!mtk_rx_get_desc(eth, &trxd, rxd)) in mtk_poll_rx()
1944 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) in mtk_poll_rx()
1946 else if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) && in mtk_poll_rx()
1951 !eth->netdev[mac])) in mtk_poll_rx()
1954 netdev = eth->netdev[mac]; in mtk_poll_rx()
1956 if (unlikely(test_bit(MTK_RESETTING, ð->state))) in mtk_poll_rx()
1975 dma_sync_single_for_cpu(eth->dma_dev, in mtk_poll_rx()
1984 ret = mtk_xdp_run(eth, ring, &xdp, netdev); in mtk_poll_rx()
2013 dma_addr = dma_map_single(eth->dma_dev, in mtk_poll_rx()
2014 new_data + NET_SKB_PAD + eth->ip_align, in mtk_poll_rx()
2016 if (unlikely(dma_mapping_error(eth->dma_dev, in mtk_poll_rx()
2023 dma_unmap_single(eth->dma_dev, trxd.rxd1, in mtk_poll_rx()
2040 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) { in mtk_poll_rx()
2056 if (*rxdcsum & eth->soc->txrx.rx_dma_l4_valid) in mtk_poll_rx()
2063 mtk_ppe_check_skb(eth->ppe[0], skb, hash); in mtk_poll_rx()
2066 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) { in mtk_poll_rx()
2085 if (port < ARRAY_SIZE(eth->dsa_meta) && in mtk_poll_rx()
2086 eth->dsa_meta[port]) in mtk_poll_rx()
2087 skb_dst_set_noref(skb, ð->dsa_meta[port]->dst); in mtk_poll_rx()
2099 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) in mtk_poll_rx()
2114 mtk_update_rx_cpu_idx(eth); in mtk_poll_rx()
2117 eth->rx_packets += done; in mtk_poll_rx()
2118 eth->rx_bytes += bytes; in mtk_poll_rx()
2119 dim_update_sample(eth->rx_events, eth->rx_packets, eth->rx_bytes, in mtk_poll_rx()
2121 net_dim(ð->rx_dim, dim_sample); in mtk_poll_rx()
2137 mtk_poll_tx_done(struct mtk_eth *eth, struct mtk_poll_state *state, u8 mac, in mtk_poll_tx_done() argument
2145 eth->tx_packets++; in mtk_poll_tx_done()
2146 eth->tx_bytes += bytes; in mtk_poll_tx_done()
2148 dev = eth->netdev[mac]; in mtk_poll_tx_done()
2167 static int mtk_poll_tx_qdma(struct mtk_eth *eth, int budget, in mtk_poll_tx_qdma() argument
2170 const struct mtk_reg_map *reg_map = eth->soc->reg_map; in mtk_poll_tx_qdma()
2171 struct mtk_tx_ring *ring = ð->tx_ring; in mtk_poll_tx_qdma()
2178 dma = mtk_r32(eth, reg_map->qdma.drx_ptr); in mtk_poll_tx_qdma()
2192 eth->soc->txrx.txd_size); in mtk_poll_tx_qdma()
2201 mtk_poll_tx_done(eth, state, mac, tx_buf->data); in mtk_poll_tx_qdma()
2205 mtk_tx_unmap(eth, tx_buf, &bq, true); in mtk_poll_tx_qdma()
2215 mtk_w32(eth, cpu, reg_map->qdma.crx_ptr); in mtk_poll_tx_qdma()
2220 static int mtk_poll_tx_pdma(struct mtk_eth *eth, int budget, in mtk_poll_tx_pdma() argument
2223 struct mtk_tx_ring *ring = ð->tx_ring; in mtk_poll_tx_pdma()
2230 dma = mtk_r32(eth, MT7628_TX_DTX_IDX0); in mtk_poll_tx_pdma()
2240 mtk_poll_tx_done(eth, state, 0, tx_buf->data); in mtk_poll_tx_pdma()
2243 mtk_tx_unmap(eth, tx_buf, &bq, true); in mtk_poll_tx_pdma()
2245 desc = ring->dma + cpu * eth->soc->txrx.txd_size; in mtk_poll_tx_pdma()
2258 static int mtk_poll_tx(struct mtk_eth *eth, int budget) in mtk_poll_tx() argument
2260 struct mtk_tx_ring *ring = ð->tx_ring; in mtk_poll_tx()
2264 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) in mtk_poll_tx()
2265 budget = mtk_poll_tx_qdma(eth, budget, &state); in mtk_poll_tx()
2267 budget = mtk_poll_tx_pdma(eth, budget, &state); in mtk_poll_tx()
2272 dim_update_sample(eth->tx_events, eth->tx_packets, eth->tx_bytes, in mtk_poll_tx()
2274 net_dim(ð->tx_dim, dim_sample); in mtk_poll_tx()
2276 if (mtk_queue_stopped(eth) && in mtk_poll_tx()
2278 mtk_wake_queue(eth); in mtk_poll_tx()
2283 static void mtk_handle_status_irq(struct mtk_eth *eth) in mtk_handle_status_irq() argument
2285 u32 status2 = mtk_r32(eth, MTK_INT_STATUS2); in mtk_handle_status_irq()
2288 mtk_stats_update(eth); in mtk_handle_status_irq()
2289 mtk_w32(eth, (MTK_GDM1_AF | MTK_GDM2_AF), in mtk_handle_status_irq()
2296 struct mtk_eth *eth = container_of(napi, struct mtk_eth, tx_napi); in mtk_napi_tx() local
2297 const struct mtk_reg_map *reg_map = eth->soc->reg_map; in mtk_napi_tx()
2300 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) in mtk_napi_tx()
2301 mtk_handle_status_irq(eth); in mtk_napi_tx()
2302 mtk_w32(eth, MTK_TX_DONE_INT, reg_map->tx_irq_status); in mtk_napi_tx()
2303 tx_done = mtk_poll_tx(eth, budget); in mtk_napi_tx()
2305 if (unlikely(netif_msg_intr(eth))) { in mtk_napi_tx()
2306 dev_info(eth->dev, in mtk_napi_tx()
2308 mtk_r32(eth, reg_map->tx_irq_status), in mtk_napi_tx()
2309 mtk_r32(eth, reg_map->tx_irq_mask)); in mtk_napi_tx()
2315 if (mtk_r32(eth, reg_map->tx_irq_status) & MTK_TX_DONE_INT) in mtk_napi_tx()
2319 mtk_tx_irq_enable(eth, MTK_TX_DONE_INT); in mtk_napi_tx()
2326 struct mtk_eth *eth = container_of(napi, struct mtk_eth, rx_napi); in mtk_napi_rx() local
2327 const struct mtk_reg_map *reg_map = eth->soc->reg_map; in mtk_napi_rx()
2330 mtk_handle_status_irq(eth); in mtk_napi_rx()
2335 mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask, in mtk_napi_rx()
2337 rx_done = mtk_poll_rx(napi, budget - rx_done_total, eth); in mtk_napi_rx()
2340 if (unlikely(netif_msg_intr(eth))) { in mtk_napi_rx()
2341 dev_info(eth->dev, in mtk_napi_rx()
2343 mtk_r32(eth, reg_map->pdma.irq_status), in mtk_napi_rx()
2344 mtk_r32(eth, reg_map->pdma.irq_mask)); in mtk_napi_rx()
2350 } while (mtk_r32(eth, reg_map->pdma.irq_status) & in mtk_napi_rx()
2351 eth->soc->txrx.rx_irq_done_mask); in mtk_napi_rx()
2354 mtk_rx_irq_enable(eth, eth->soc->txrx.rx_irq_done_mask); in mtk_napi_rx()
2359 static int mtk_tx_alloc(struct mtk_eth *eth) in mtk_tx_alloc() argument
2361 const struct mtk_soc_data *soc = eth->soc; in mtk_tx_alloc()
2362 struct mtk_tx_ring *ring = ð->tx_ring; in mtk_tx_alloc()
2378 ring->dma = dma_alloc_coherent(eth->dma_dev, ring_size * sz, in mtk_tx_alloc()
2404 ring->dma_pdma = dma_alloc_coherent(eth->dma_dev, ring_size * sz, in mtk_tx_alloc()
2428 mtk_w32(eth, ring->phys, soc->reg_map->qdma.ctx_ptr); in mtk_tx_alloc()
2429 mtk_w32(eth, ring->phys, soc->reg_map->qdma.dtx_ptr); in mtk_tx_alloc()
2430 mtk_w32(eth, in mtk_tx_alloc()
2433 mtk_w32(eth, ring->last_free_ptr, soc->reg_map->qdma.drx_ptr); in mtk_tx_alloc()
2437 mtk_w32(eth, val, soc->reg_map->qdma.qtx_cfg + ofs); in mtk_tx_alloc()
2444 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) in mtk_tx_alloc()
2446 mtk_w32(eth, val, soc->reg_map->qdma.qtx_sch + ofs); in mtk_tx_alloc()
2450 mtk_w32(eth, val, soc->reg_map->qdma.tx_sch_rate); in mtk_tx_alloc()
2451 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) in mtk_tx_alloc()
2452 mtk_w32(eth, val, soc->reg_map->qdma.tx_sch_rate + 4); in mtk_tx_alloc()
2454 mtk_w32(eth, ring->phys_pdma, MT7628_TX_BASE_PTR0); in mtk_tx_alloc()
2455 mtk_w32(eth, ring_size, MT7628_TX_MAX_CNT0); in mtk_tx_alloc()
2456 mtk_w32(eth, 0, MT7628_TX_CTX_IDX0); in mtk_tx_alloc()
2457 mtk_w32(eth, MT7628_PST_DTX_IDX0, soc->reg_map->pdma.rst_idx); in mtk_tx_alloc()
2466 static void mtk_tx_clean(struct mtk_eth *eth) in mtk_tx_clean() argument
2468 const struct mtk_soc_data *soc = eth->soc; in mtk_tx_clean()
2469 struct mtk_tx_ring *ring = ð->tx_ring; in mtk_tx_clean()
2474 mtk_tx_unmap(eth, &ring->buf[i], NULL, false); in mtk_tx_clean()
2480 dma_free_coherent(eth->dma_dev, in mtk_tx_clean()
2487 dma_free_coherent(eth->dma_dev, in mtk_tx_clean()
2494 static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag) in mtk_rx_alloc() argument
2496 const struct mtk_reg_map *reg_map = eth->soc->reg_map; in mtk_rx_alloc()
2504 ring = ð->rx_ring_qdma; in mtk_rx_alloc()
2506 ring = ð->rx_ring[ring_no]; in mtk_rx_alloc()
2524 if (mtk_page_pool_enabled(eth)) { in mtk_rx_alloc()
2527 pp = mtk_create_page_pool(eth, &ring->xdp_q, ring_no, in mtk_rx_alloc()
2535 ring->dma = dma_alloc_coherent(eth->dma_dev, in mtk_rx_alloc()
2536 rx_dma_size * eth->soc->txrx.rxd_size, in mtk_rx_alloc()
2546 rxd = ring->dma + i * eth->soc->txrx.rxd_size; in mtk_rx_alloc()
2561 dma_addr = dma_map_single(eth->dma_dev, in mtk_rx_alloc()
2562 data + NET_SKB_PAD + eth->ip_align, in mtk_rx_alloc()
2564 if (unlikely(dma_mapping_error(eth->dma_dev, in mtk_rx_alloc()
2573 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) in mtk_rx_alloc()
2580 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) { in mtk_rx_alloc()
2603 mtk_w32(eth, ring->phys, in mtk_rx_alloc()
2605 mtk_w32(eth, rx_dma_size, in mtk_rx_alloc()
2607 mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no), in mtk_rx_alloc()
2610 mtk_w32(eth, ring->phys, in mtk_rx_alloc()
2612 mtk_w32(eth, rx_dma_size, in mtk_rx_alloc()
2614 mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no), in mtk_rx_alloc()
2617 mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg); in mtk_rx_alloc()
2622 static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring) in mtk_rx_clean() argument
2633 rxd = ring->dma + i * eth->soc->txrx.rxd_size; in mtk_rx_clean()
2637 dma_unmap_single(eth->dma_dev, rxd->rxd1, in mtk_rx_clean()
2646 dma_free_coherent(eth->dma_dev, in mtk_rx_clean()
2647 ring->dma_size * eth->soc->txrx.rxd_size, in mtk_rx_clean()
2660 static int mtk_hwlro_rx_init(struct mtk_eth *eth) in mtk_hwlro_rx_init() argument
2684 mtk_w32(eth, ring_ctrl_dw1, MTK_LRO_CTRL_DW1_CFG(i)); in mtk_hwlro_rx_init()
2685 mtk_w32(eth, ring_ctrl_dw2, MTK_LRO_CTRL_DW2_CFG(i)); in mtk_hwlro_rx_init()
2686 mtk_w32(eth, ring_ctrl_dw3, MTK_LRO_CTRL_DW3_CFG(i)); in mtk_hwlro_rx_init()
2696 mtk_w32(eth, MTK_HW_LRO_BW_THRE, MTK_PDMA_LRO_CTRL_DW2); in mtk_hwlro_rx_init()
2699 mtk_w32(eth, MTK_HW_LRO_REPLACE_DELTA, MTK_PDMA_LRO_ALT_SCORE_DELTA); in mtk_hwlro_rx_init()
2702 mtk_w32(eth, (MTK_HW_LRO_TIMER_UNIT << 16) | MTK_HW_LRO_REFRESH_TIME, in mtk_hwlro_rx_init()
2714 mtk_w32(eth, lro_ctrl_dw3, MTK_PDMA_LRO_CTRL_DW3); in mtk_hwlro_rx_init()
2715 mtk_w32(eth, lro_ctrl_dw0, MTK_PDMA_LRO_CTRL_DW0); in mtk_hwlro_rx_init()
2720 static void mtk_hwlro_rx_uninit(struct mtk_eth *eth) in mtk_hwlro_rx_uninit() argument
2726 mtk_w32(eth, MTK_LRO_RING_RELINQUISH_REQ, MTK_PDMA_LRO_CTRL_DW0); in mtk_hwlro_rx_uninit()
2730 val = mtk_r32(eth, MTK_PDMA_LRO_CTRL_DW0); in mtk_hwlro_rx_uninit()
2740 mtk_w32(eth, 0, MTK_LRO_CTRL_DW2_CFG(i)); in mtk_hwlro_rx_uninit()
2743 mtk_w32(eth, 0, MTK_PDMA_LRO_CTRL_DW0); in mtk_hwlro_rx_uninit()
2746 static void mtk_hwlro_val_ipaddr(struct mtk_eth *eth, int idx, __be32 ip) in mtk_hwlro_val_ipaddr() argument
2750 reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx)); in mtk_hwlro_val_ipaddr()
2753 mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx)); in mtk_hwlro_val_ipaddr()
2755 mtk_w32(eth, ip, MTK_LRO_DIP_DW0_CFG(idx)); in mtk_hwlro_val_ipaddr()
2758 mtk_w32(eth, (reg_val | MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx)); in mtk_hwlro_val_ipaddr()
2761 static void mtk_hwlro_inval_ipaddr(struct mtk_eth *eth, int idx) in mtk_hwlro_inval_ipaddr() argument
2765 reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx)); in mtk_hwlro_inval_ipaddr()
2768 mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx)); in mtk_hwlro_inval_ipaddr()
2770 mtk_w32(eth, 0, MTK_LRO_DIP_DW0_CFG(idx)); in mtk_hwlro_inval_ipaddr()
2792 struct mtk_eth *eth = mac->hw; in mtk_hwlro_add_ipaddr() local
2805 mtk_hwlro_val_ipaddr(eth, hwlro_idx, mac->hwlro_ip[fsp->location]); in mtk_hwlro_add_ipaddr()
2816 struct mtk_eth *eth = mac->hw; in mtk_hwlro_del_ipaddr() local
2827 mtk_hwlro_inval_ipaddr(eth, hwlro_idx); in mtk_hwlro_del_ipaddr()
2835 struct mtk_eth *eth = mac->hw; in mtk_hwlro_netdev_disable() local
2842 mtk_hwlro_inval_ipaddr(eth, hwlro_idx); in mtk_hwlro_netdev_disable()
2915 struct mtk_eth *eth = mac->hw; in mtk_set_features() local
2926 mtk_w32(eth, !!(features & NETIF_F_HW_VLAN_CTAG_RX), in mtk_set_features()
2931 if (!eth->netdev[i] || eth->netdev[i] == dev) in mtk_set_features()
2933 eth->netdev[i]->features &= ~NETIF_F_HW_VLAN_CTAG_RX; in mtk_set_features()
2934 eth->netdev[i]->features |= features & NETIF_F_HW_VLAN_CTAG_RX; in mtk_set_features()
2941 static int mtk_dma_busy_wait(struct mtk_eth *eth) in mtk_dma_busy_wait() argument
2947 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) in mtk_dma_busy_wait()
2948 reg = eth->soc->reg_map->qdma.glo_cfg; in mtk_dma_busy_wait()
2950 reg = eth->soc->reg_map->pdma.glo_cfg; in mtk_dma_busy_wait()
2952 ret = readx_poll_timeout_atomic(__raw_readl, eth->base + reg, val, in mtk_dma_busy_wait()
2956 dev_err(eth->dev, "DMA init timeout\n"); in mtk_dma_busy_wait()
2961 static int mtk_dma_init(struct mtk_eth *eth) in mtk_dma_init() argument
2966 if (mtk_dma_busy_wait(eth)) in mtk_dma_init()
2969 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { in mtk_dma_init()
2973 err = mtk_init_fq_dma(eth); in mtk_dma_init()
2978 err = mtk_tx_alloc(eth); in mtk_dma_init()
2982 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { in mtk_dma_init()
2983 err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_QDMA); in mtk_dma_init()
2988 err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_NORMAL); in mtk_dma_init()
2992 if (eth->hwlro) { in mtk_dma_init()
2994 err = mtk_rx_alloc(eth, i, MTK_RX_FLAGS_HWLRO); in mtk_dma_init()
2998 err = mtk_hwlro_rx_init(eth); in mtk_dma_init()
3003 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { in mtk_dma_init()
3007 mtk_w32(eth, FC_THRES_DROP_MODE | FC_THRES_DROP_EN | in mtk_dma_init()
3008 FC_THRES_MIN, eth->soc->reg_map->qdma.fc_th); in mtk_dma_init()
3009 mtk_w32(eth, 0x0, eth->soc->reg_map->qdma.hred); in mtk_dma_init()
3015 static void mtk_dma_free(struct mtk_eth *eth) in mtk_dma_free() argument
3017 const struct mtk_soc_data *soc = eth->soc; in mtk_dma_free()
3021 if (eth->netdev[i]) in mtk_dma_free()
3022 netdev_reset_queue(eth->netdev[i]); in mtk_dma_free()
3023 if (eth->scratch_ring) { in mtk_dma_free()
3024 dma_free_coherent(eth->dma_dev, in mtk_dma_free()
3026 eth->scratch_ring, eth->phy_scratch_ring); in mtk_dma_free()
3027 eth->scratch_ring = NULL; in mtk_dma_free()
3028 eth->phy_scratch_ring = 0; in mtk_dma_free()
3030 mtk_tx_clean(eth); in mtk_dma_free()
3031 mtk_rx_clean(eth, ð->rx_ring[0]); in mtk_dma_free()
3032 mtk_rx_clean(eth, ð->rx_ring_qdma); in mtk_dma_free()
3034 if (eth->hwlro) { in mtk_dma_free()
3035 mtk_hwlro_rx_uninit(eth); in mtk_dma_free()
3037 mtk_rx_clean(eth, ð->rx_ring[i]); in mtk_dma_free()
3040 kfree(eth->scratch_head); in mtk_dma_free()
3043 static bool mtk_hw_reset_check(struct mtk_eth *eth) in mtk_hw_reset_check() argument
3045 u32 val = mtk_r32(eth, MTK_INT_STATUS2); in mtk_hw_reset_check()
3055 struct mtk_eth *eth = mac->hw; in mtk_tx_timeout() local
3057 if (test_bit(MTK_RESETTING, ð->state)) in mtk_tx_timeout()
3060 if (!mtk_hw_reset_check(eth)) in mtk_tx_timeout()
3063 eth->netdev[mac->id]->stats.tx_errors++; in mtk_tx_timeout()
3064 netif_err(eth, tx_err, dev, "transmit timed out\n"); in mtk_tx_timeout()
3066 schedule_work(ð->pending_work); in mtk_tx_timeout()
3071 struct mtk_eth *eth = _eth; in mtk_handle_irq_rx() local
3073 eth->rx_events++; in mtk_handle_irq_rx()
3074 if (likely(napi_schedule_prep(ð->rx_napi))) { in mtk_handle_irq_rx()
3075 __napi_schedule(ð->rx_napi); in mtk_handle_irq_rx()
3076 mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask); in mtk_handle_irq_rx()
3084 struct mtk_eth *eth = _eth; in mtk_handle_irq_tx() local
3086 eth->tx_events++; in mtk_handle_irq_tx()
3087 if (likely(napi_schedule_prep(ð->tx_napi))) { in mtk_handle_irq_tx()
3088 __napi_schedule(ð->tx_napi); in mtk_handle_irq_tx()
3089 mtk_tx_irq_disable(eth, MTK_TX_DONE_INT); in mtk_handle_irq_tx()
3097 struct mtk_eth *eth = _eth; in mtk_handle_irq() local
3098 const struct mtk_reg_map *reg_map = eth->soc->reg_map; in mtk_handle_irq()
3100 if (mtk_r32(eth, reg_map->pdma.irq_mask) & in mtk_handle_irq()
3101 eth->soc->txrx.rx_irq_done_mask) { in mtk_handle_irq()
3102 if (mtk_r32(eth, reg_map->pdma.irq_status) & in mtk_handle_irq()
3103 eth->soc->txrx.rx_irq_done_mask) in mtk_handle_irq()
3106 if (mtk_r32(eth, reg_map->tx_irq_mask) & MTK_TX_DONE_INT) { in mtk_handle_irq()
3107 if (mtk_r32(eth, reg_map->tx_irq_status) & MTK_TX_DONE_INT) in mtk_handle_irq()
3118 struct mtk_eth *eth = mac->hw; in mtk_poll_controller() local
3120 mtk_tx_irq_disable(eth, MTK_TX_DONE_INT); in mtk_poll_controller()
3121 mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask); in mtk_poll_controller()
3122 mtk_handle_irq_rx(eth->irq[2], dev); in mtk_poll_controller()
3123 mtk_tx_irq_enable(eth, MTK_TX_DONE_INT); in mtk_poll_controller()
3124 mtk_rx_irq_enable(eth, eth->soc->txrx.rx_irq_done_mask); in mtk_poll_controller()
3128 static int mtk_start_dma(struct mtk_eth *eth) in mtk_start_dma() argument
3131 const struct mtk_reg_map *reg_map = eth->soc->reg_map; in mtk_start_dma()
3134 err = mtk_dma_init(eth); in mtk_start_dma()
3136 mtk_dma_free(eth); in mtk_start_dma()
3140 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { in mtk_start_dma()
3141 val = mtk_r32(eth, reg_map->qdma.glo_cfg); in mtk_start_dma()
3146 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) in mtk_start_dma()
3152 mtk_w32(eth, val, reg_map->qdma.glo_cfg); in mtk_start_dma()
3154 mtk_w32(eth, in mtk_start_dma()
3159 mtk_w32(eth, MTK_TX_WB_DDONE | MTK_TX_DMA_EN | MTK_RX_DMA_EN | in mtk_start_dma()
3167 static void mtk_gdm_config(struct mtk_eth *eth, u32 config) in mtk_gdm_config() argument
3171 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) in mtk_gdm_config()
3175 u32 val = mtk_r32(eth, MTK_GDMA_FWD_CFG(i)); in mtk_gdm_config()
3185 if (eth->netdev[i] && netdev_uses_dsa(eth->netdev[i])) in mtk_gdm_config()
3188 mtk_w32(eth, val, MTK_GDMA_FWD_CFG(i)); in mtk_gdm_config()
3191 mtk_w32(eth, RST_GL_PSE, MTK_RST_GL); in mtk_gdm_config()
3192 mtk_w32(eth, 0, MTK_RST_GL); in mtk_gdm_config()
3209 struct mtk_eth *eth = mac->hw; in mtk_device_event() local
3240 mtk_set_queue_speed(eth, dp->index + 3, s.base.speed); in mtk_device_event()
3248 struct mtk_eth *eth = mac->hw; in mtk_open() local
3251 if (mtk_uses_dsa(dev) && !eth->prog) { in mtk_open()
3252 for (i = 0; i < ARRAY_SIZE(eth->dsa_meta); i++) { in mtk_open()
3253 struct metadata_dst *md_dst = eth->dsa_meta[i]; in mtk_open()
3264 eth->dsa_meta[i] = md_dst; in mtk_open()
3270 u32 val = mtk_r32(eth, MTK_CDMP_IG_CTRL); in mtk_open()
3272 mtk_w32(eth, val, MTK_CDMP_IG_CTRL); in mtk_open()
3283 if (!refcount_read(ð->dma_refcnt)) { in mtk_open()
3284 const struct mtk_soc_data *soc = eth->soc; in mtk_open()
3288 err = mtk_start_dma(eth); in mtk_open()
3294 for (i = 0; i < ARRAY_SIZE(eth->ppe); i++) in mtk_open()
3295 mtk_ppe_start(eth->ppe[i]); in mtk_open()
3299 mtk_gdm_config(eth, gdm_config); in mtk_open()
3301 napi_enable(ð->tx_napi); in mtk_open()
3302 napi_enable(ð->rx_napi); in mtk_open()
3303 mtk_tx_irq_enable(eth, MTK_TX_DONE_INT); in mtk_open()
3304 mtk_rx_irq_enable(eth, soc->txrx.rx_irq_done_mask); in mtk_open()
3305 refcount_set(ð->dma_refcnt, 1); in mtk_open()
3308 refcount_inc(ð->dma_refcnt); in mtk_open()
3316 static void mtk_stop_dma(struct mtk_eth *eth, u32 glo_cfg) in mtk_stop_dma() argument
3322 spin_lock_bh(ð->page_lock); in mtk_stop_dma()
3323 val = mtk_r32(eth, glo_cfg); in mtk_stop_dma()
3324 mtk_w32(eth, val & ~(MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN), in mtk_stop_dma()
3326 spin_unlock_bh(ð->page_lock); in mtk_stop_dma()
3330 val = mtk_r32(eth, glo_cfg); in mtk_stop_dma()
3342 struct mtk_eth *eth = mac->hw; in mtk_stop() local
3352 if (!refcount_dec_and_test(ð->dma_refcnt)) in mtk_stop()
3355 mtk_gdm_config(eth, MTK_GDMA_DROP_ALL); in mtk_stop()
3357 mtk_tx_irq_disable(eth, MTK_TX_DONE_INT); in mtk_stop()
3358 mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask); in mtk_stop()
3359 napi_disable(ð->tx_napi); in mtk_stop()
3360 napi_disable(ð->rx_napi); in mtk_stop()
3362 cancel_work_sync(ð->rx_dim.work); in mtk_stop()
3363 cancel_work_sync(ð->tx_dim.work); in mtk_stop()
3365 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) in mtk_stop()
3366 mtk_stop_dma(eth, eth->soc->reg_map->qdma.glo_cfg); in mtk_stop()
3367 mtk_stop_dma(eth, eth->soc->reg_map->pdma.glo_cfg); in mtk_stop()
3369 mtk_dma_free(eth); in mtk_stop()
3371 for (i = 0; i < ARRAY_SIZE(eth->ppe); i++) in mtk_stop()
3372 mtk_ppe_stop(eth->ppe[i]); in mtk_stop()
3381 struct mtk_eth *eth = mac->hw; in mtk_xdp_setup() local
3385 if (eth->hwlro) { in mtk_xdp_setup()
3395 need_update = !!eth->prog != !!prog; in mtk_xdp_setup()
3399 old_prog = rcu_replace_pointer(eth->prog, prog, lockdep_rtnl_is_held()); in mtk_xdp_setup()
3419 static void ethsys_reset(struct mtk_eth *eth, u32 reset_bits) in ethsys_reset() argument
3421 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, in ethsys_reset()
3426 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, in ethsys_reset()
3432 static void mtk_clk_disable(struct mtk_eth *eth) in mtk_clk_disable() argument
3437 clk_disable_unprepare(eth->clks[clk]); in mtk_clk_disable()
3440 static int mtk_clk_enable(struct mtk_eth *eth) in mtk_clk_enable() argument
3445 ret = clk_prepare_enable(eth->clks[clk]); in mtk_clk_enable()
3454 clk_disable_unprepare(eth->clks[clk]); in mtk_clk_enable()
3462 struct mtk_eth *eth = container_of(dim, struct mtk_eth, rx_dim); in mtk_dim_rx() local
3463 const struct mtk_reg_map *reg_map = eth->soc->reg_map; in mtk_dim_rx()
3467 cur_profile = net_dim_get_rx_moderation(eth->rx_dim.mode, in mtk_dim_rx()
3469 spin_lock_bh(ð->dim_lock); in mtk_dim_rx()
3471 val = mtk_r32(eth, reg_map->pdma.delay_irq); in mtk_dim_rx()
3481 mtk_w32(eth, val, reg_map->pdma.delay_irq); in mtk_dim_rx()
3482 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) in mtk_dim_rx()
3483 mtk_w32(eth, val, reg_map->qdma.delay_irq); in mtk_dim_rx()
3485 spin_unlock_bh(ð->dim_lock); in mtk_dim_rx()
3493 struct mtk_eth *eth = container_of(dim, struct mtk_eth, tx_dim); in mtk_dim_tx() local
3494 const struct mtk_reg_map *reg_map = eth->soc->reg_map; in mtk_dim_tx()
3498 cur_profile = net_dim_get_tx_moderation(eth->tx_dim.mode, in mtk_dim_tx()
3500 spin_lock_bh(ð->dim_lock); in mtk_dim_tx()
3502 val = mtk_r32(eth, reg_map->pdma.delay_irq); in mtk_dim_tx()
3512 mtk_w32(eth, val, reg_map->pdma.delay_irq); in mtk_dim_tx()
3513 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) in mtk_dim_tx()
3514 mtk_w32(eth, val, reg_map->qdma.delay_irq); in mtk_dim_tx()
3516 spin_unlock_bh(ð->dim_lock); in mtk_dim_tx()
3523 struct mtk_eth *eth = mac->hw; in mtk_set_mcr_max_rx() local
3526 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) in mtk_set_mcr_max_rx()
3545 static void mtk_hw_reset(struct mtk_eth *eth) in mtk_hw_reset() argument
3549 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) { in mtk_hw_reset()
3550 regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN, 0); in mtk_hw_reset()
3556 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1)) in mtk_hw_reset()
3559 ethsys_reset(eth, RSTCTRL_ETH | RSTCTRL_FE | val); in mtk_hw_reset()
3561 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) in mtk_hw_reset()
3562 regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN, in mtk_hw_reset()
3566 static u32 mtk_hw_reset_read(struct mtk_eth *eth) in mtk_hw_reset_read() argument
3570 regmap_read(eth->ethsys, ETHSYS_RSTCTRL, &val); in mtk_hw_reset_read()
3574 static void mtk_hw_warm_reset(struct mtk_eth *eth) in mtk_hw_warm_reset() argument
3578 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, RSTCTRL_FE, in mtk_hw_warm_reset()
3580 if (readx_poll_timeout_atomic(mtk_hw_reset_read, eth, val, in mtk_hw_warm_reset()
3582 dev_err(eth->dev, "warm reset failed\n"); in mtk_hw_warm_reset()
3583 mtk_hw_reset(eth); in mtk_hw_warm_reset()
3587 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) in mtk_hw_warm_reset()
3592 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1)) in mtk_hw_warm_reset()
3595 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, rst_mask, rst_mask); in mtk_hw_warm_reset()
3598 val = mtk_hw_reset_read(eth); in mtk_hw_warm_reset()
3600 dev_err(eth->dev, "warm reset stage0 failed %08x (%08x)\n", in mtk_hw_warm_reset()
3604 regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, rst_mask, ~rst_mask); in mtk_hw_warm_reset()
3607 val = mtk_hw_reset_read(eth); in mtk_hw_warm_reset()
3609 dev_err(eth->dev, "warm reset stage1 failed %08x (%08x)\n", in mtk_hw_warm_reset()
3613 static bool mtk_hw_check_dma_hang(struct mtk_eth *eth) in mtk_hw_check_dma_hang() argument
3615 const struct mtk_reg_map *reg_map = eth->soc->reg_map; in mtk_hw_check_dma_hang()
3623 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) in mtk_hw_check_dma_hang()
3627 wdidx = mtk_r32(eth, reg_map->wdma_base[0] + 0xc); in mtk_hw_check_dma_hang()
3629 val = mtk_r32(eth, reg_map->wdma_base[0] + 0x204); in mtk_hw_check_dma_hang()
3632 val = mtk_r32(eth, reg_map->wdma_base[0] + 0x230); in mtk_hw_check_dma_hang()
3635 oq_free = (!(mtk_r32(eth, reg_map->pse_oq_sta) & GENMASK(24, 16)) && in mtk_hw_check_dma_hang()
3636 !(mtk_r32(eth, reg_map->pse_oq_sta + 0x4) & GENMASK(8, 0)) && in mtk_hw_check_dma_hang()
3637 !(mtk_r32(eth, reg_map->pse_oq_sta + 0x10) & GENMASK(24, 16))); in mtk_hw_check_dma_hang()
3639 if (wdidx == eth->reset.wdidx && wtx_busy && cdm_full && oq_free) { in mtk_hw_check_dma_hang()
3640 if (++eth->reset.wdma_hang_count > 2) { in mtk_hw_check_dma_hang()
3641 eth->reset.wdma_hang_count = 0; in mtk_hw_check_dma_hang()
3648 qfsm_hang = !!mtk_r32(eth, reg_map->qdma.qtx_cfg + 0x234); in mtk_hw_check_dma_hang()
3649 qfwd_hang = !mtk_r32(eth, reg_map->qdma.qtx_cfg + 0x308); in mtk_hw_check_dma_hang()
3651 gdm1_tx = FIELD_GET(GENMASK(31, 16), mtk_r32(eth, MTK_FE_GDM1_FSM)) > 0; in mtk_hw_check_dma_hang()
3652 gdm2_tx = FIELD_GET(GENMASK(31, 16), mtk_r32(eth, MTK_FE_GDM2_FSM)) > 0; in mtk_hw_check_dma_hang()
3653 gmac1_tx = FIELD_GET(GENMASK(31, 24), mtk_r32(eth, MTK_MAC_FSM(0))) != 1; in mtk_hw_check_dma_hang()
3654 gmac2_tx = FIELD_GET(GENMASK(31, 24), mtk_r32(eth, MTK_MAC_FSM(1))) != 1; in mtk_hw_check_dma_hang()
3655 gdm1_fc = mtk_r32(eth, reg_map->gdm1_cnt + 0x24); in mtk_hw_check_dma_hang()
3656 gdm2_fc = mtk_r32(eth, reg_map->gdm1_cnt + 0x64); in mtk_hw_check_dma_hang()
3661 if (++eth->reset.qdma_hang_count > 2) { in mtk_hw_check_dma_hang()
3662 eth->reset.qdma_hang_count = 0; in mtk_hw_check_dma_hang()
3669 oq_hang = !!(mtk_r32(eth, reg_map->pse_oq_sta) & GENMASK(8, 0)); in mtk_hw_check_dma_hang()
3670 cdm1_busy = !!(mtk_r32(eth, MTK_FE_CDM1_FSM) & GENMASK(31, 16)); in mtk_hw_check_dma_hang()
3671 adma_busy = !(mtk_r32(eth, reg_map->pdma.adma_rx_dbg0) & GENMASK(4, 0)) && in mtk_hw_check_dma_hang()
3672 !(mtk_r32(eth, reg_map->pdma.adma_rx_dbg0) & BIT(6)); in mtk_hw_check_dma_hang()
3675 if (++eth->reset.adma_hang_count > 2) { in mtk_hw_check_dma_hang()
3676 eth->reset.adma_hang_count = 0; in mtk_hw_check_dma_hang()
3682 eth->reset.wdma_hang_count = 0; in mtk_hw_check_dma_hang()
3683 eth->reset.qdma_hang_count = 0; in mtk_hw_check_dma_hang()
3684 eth->reset.adma_hang_count = 0; in mtk_hw_check_dma_hang()
3686 eth->reset.wdidx = wdidx; in mtk_hw_check_dma_hang()
3694 struct mtk_eth *eth = container_of(del_work, struct mtk_eth, in mtk_hw_reset_monitor_work() local
3697 if (test_bit(MTK_RESETTING, ð->state)) in mtk_hw_reset_monitor_work()
3701 if (mtk_hw_check_dma_hang(eth)) in mtk_hw_reset_monitor_work()
3702 schedule_work(ð->pending_work); in mtk_hw_reset_monitor_work()
3705 schedule_delayed_work(ð->reset.monitor_work, in mtk_hw_reset_monitor_work()
3709 static int mtk_hw_init(struct mtk_eth *eth, bool reset) in mtk_hw_init() argument
3713 const struct mtk_reg_map *reg_map = eth->soc->reg_map; in mtk_hw_init()
3716 if (!reset && test_and_set_bit(MTK_HW_INIT, ð->state)) in mtk_hw_init()
3720 pm_runtime_enable(eth->dev); in mtk_hw_init()
3721 pm_runtime_get_sync(eth->dev); in mtk_hw_init()
3723 ret = mtk_clk_enable(eth); in mtk_hw_init()
3728 if (eth->ethsys) in mtk_hw_init()
3729 regmap_update_bits(eth->ethsys, ETHSYS_DMA_AG_MAP, dma_mask, in mtk_hw_init()
3730 of_dma_is_coherent(eth->dma_dev->of_node) * dma_mask); in mtk_hw_init()
3732 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) { in mtk_hw_init()
3733 ret = device_reset(eth->dev); in mtk_hw_init()
3735 dev_err(eth->dev, "MAC reset failed!\n"); in mtk_hw_init()
3740 mtk_dim_rx(ð->rx_dim.work); in mtk_hw_init()
3741 mtk_dim_tx(ð->tx_dim.work); in mtk_hw_init()
3744 mtk_tx_irq_disable(eth, ~0); in mtk_hw_init()
3745 mtk_rx_irq_disable(eth, ~0); in mtk_hw_init()
3753 mtk_hw_warm_reset(eth); in mtk_hw_init()
3755 mtk_hw_reset(eth); in mtk_hw_init()
3757 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) { in mtk_hw_init()
3759 val = mtk_r32(eth, MTK_FE_GLO_MISC); in mtk_hw_init()
3760 mtk_w32(eth, val | BIT(4), MTK_FE_GLO_MISC); in mtk_hw_init()
3763 if (eth->pctl) { in mtk_hw_init()
3765 regmap_write(eth->pctl, GPIO_DRV_SEL10, 0xa00); in mtk_hw_init()
3768 regmap_write(eth->pctl, GPIO_OD33_CTRL8, 0x5); in mtk_hw_init()
3771 regmap_write(eth->pctl, GPIO_BIAS_CTRL, 0x0); in mtk_hw_init()
3779 struct net_device *dev = eth->netdev[i]; in mtk_hw_init()
3781 mtk_w32(eth, MAC_MCR_FORCE_LINK_DOWN, MTK_MAC_MCR(i)); in mtk_hw_init()
3792 val = mtk_r32(eth, MTK_CDMQ_IG_CTRL); in mtk_hw_init()
3793 mtk_w32(eth, val | MTK_CDMQ_STAG_EN, MTK_CDMQ_IG_CTRL); in mtk_hw_init()
3794 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) { in mtk_hw_init()
3795 val = mtk_r32(eth, MTK_CDMP_IG_CTRL); in mtk_hw_init()
3796 mtk_w32(eth, val | MTK_CDMP_STAG_EN, MTK_CDMP_IG_CTRL); in mtk_hw_init()
3800 mtk_w32(eth, 1, MTK_CDMP_EG_CTRL); in mtk_hw_init()
3803 mtk_dim_rx(ð->rx_dim.work); in mtk_hw_init()
3804 mtk_dim_tx(ð->tx_dim.work); in mtk_hw_init()
3807 mtk_tx_irq_disable(eth, ~0); in mtk_hw_init()
3808 mtk_rx_irq_disable(eth, ~0); in mtk_hw_init()
3811 mtk_w32(eth, MTK_TX_DONE_INT, reg_map->pdma.int_grp); in mtk_hw_init()
3812 mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask, reg_map->pdma.int_grp + 4); in mtk_hw_init()
3813 mtk_w32(eth, MTK_TX_DONE_INT, reg_map->qdma.int_grp); in mtk_hw_init()
3814 mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask, reg_map->qdma.int_grp + 4); in mtk_hw_init()
3815 mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP); in mtk_hw_init()
3817 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) { in mtk_hw_init()
3819 mtk_w32(eth, 0x00000300, PSE_DROP_CFG); in mtk_hw_init()
3822 mtk_w32(eth, 0x00000300, PSE_PPE0_DROP); in mtk_hw_init()
3825 mtk_w32(eth, 0x01fa01f4, PSE_FQFC_CFG2); in mtk_hw_init()
3828 mtk_w32(eth, 0x001a000e, PSE_IQ_REV(1)); in mtk_hw_init()
3829 mtk_w32(eth, 0x01ff001a, PSE_IQ_REV(2)); in mtk_hw_init()
3830 mtk_w32(eth, 0x000e01ff, PSE_IQ_REV(3)); in mtk_hw_init()
3831 mtk_w32(eth, 0x000e000e, PSE_IQ_REV(4)); in mtk_hw_init()
3832 mtk_w32(eth, 0x000e000e, PSE_IQ_REV(5)); in mtk_hw_init()
3833 mtk_w32(eth, 0x000e000e, PSE_IQ_REV(6)); in mtk_hw_init()
3834 mtk_w32(eth, 0x000e000e, PSE_IQ_REV(7)); in mtk_hw_init()
3835 mtk_w32(eth, 0x000e000e, PSE_IQ_REV(8)); in mtk_hw_init()
3838 mtk_w32(eth, 0x000f000a, PSE_OQ_TH(1)); in mtk_hw_init()
3839 mtk_w32(eth, 0x001a000f, PSE_OQ_TH(2)); in mtk_hw_init()
3840 mtk_w32(eth, 0x000f001a, PSE_OQ_TH(3)); in mtk_hw_init()
3841 mtk_w32(eth, 0x01ff000f, PSE_OQ_TH(4)); in mtk_hw_init()
3842 mtk_w32(eth, 0x000f000f, PSE_OQ_TH(5)); in mtk_hw_init()
3843 mtk_w32(eth, 0x0006000f, PSE_OQ_TH(6)); in mtk_hw_init()
3844 mtk_w32(eth, 0x00060006, PSE_OQ_TH(7)); in mtk_hw_init()
3845 mtk_w32(eth, 0x00060006, PSE_OQ_TH(8)); in mtk_hw_init()
3848 mtk_w32(eth, 0x00000004, MTK_GDM2_THRES); in mtk_hw_init()
3849 mtk_w32(eth, 0x00000004, MTK_CDMW0_THRES); in mtk_hw_init()
3850 mtk_w32(eth, 0x00000004, MTK_CDMW1_THRES); in mtk_hw_init()
3851 mtk_w32(eth, 0x00000004, MTK_CDME0_THRES); in mtk_hw_init()
3852 mtk_w32(eth, 0x00000004, MTK_CDME1_THRES); in mtk_hw_init()
3853 mtk_w32(eth, 0x00000004, MTK_CDMM_THRES); in mtk_hw_init()
3860 pm_runtime_put_sync(eth->dev); in mtk_hw_init()
3861 pm_runtime_disable(eth->dev); in mtk_hw_init()
3867 static int mtk_hw_deinit(struct mtk_eth *eth) in mtk_hw_deinit() argument
3869 if (!test_and_clear_bit(MTK_HW_INIT, ð->state)) in mtk_hw_deinit()
3872 mtk_clk_disable(eth); in mtk_hw_deinit()
3874 pm_runtime_put_sync(eth->dev); in mtk_hw_deinit()
3875 pm_runtime_disable(eth->dev); in mtk_hw_deinit()
3883 struct mtk_eth *eth = mac->hw; in mtk_init() local
3890 dev_err(eth->dev, "generated random MAC address %pM\n", in mtk_init()
3900 struct mtk_eth *eth = mac->hw; in mtk_uninit() local
3903 mtk_tx_irq_disable(eth, ~0); in mtk_uninit()
3904 mtk_rx_irq_disable(eth, ~0); in mtk_uninit()
3911 struct mtk_eth *eth = mac->hw; in mtk_change_mtu() local
3913 if (rcu_access_pointer(eth->prog) && in mtk_change_mtu()
3941 static void mtk_prepare_for_reset(struct mtk_eth *eth) in mtk_prepare_for_reset() argument
3947 val = mtk_r32(eth, MTK_FE_GLO_CFG) | MTK_FE_LINK_DOWN_P3; in mtk_prepare_for_reset()
3948 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1)) in mtk_prepare_for_reset()
3950 mtk_w32(eth, val, MTK_FE_GLO_CFG); in mtk_prepare_for_reset()
3953 for (i = 0; i < ARRAY_SIZE(eth->ppe); i++) in mtk_prepare_for_reset()
3954 mtk_ppe_prepare_reset(eth->ppe[i]); in mtk_prepare_for_reset()
3957 mtk_w32(eth, 0, MTK_FE_INT_ENABLE); in mtk_prepare_for_reset()
3961 val = mtk_r32(eth, MTK_MAC_MCR(i)) & ~MAC_MCR_FORCE_LINK; in mtk_prepare_for_reset()
3962 mtk_w32(eth, val, MTK_MAC_MCR(i)); in mtk_prepare_for_reset()
3968 struct mtk_eth *eth = container_of(work, struct mtk_eth, pending_work); in mtk_pending_work() local
3974 set_bit(MTK_RESETTING, ð->state); in mtk_pending_work()
3976 mtk_prepare_for_reset(eth); in mtk_pending_work()
3981 mtk_prepare_for_reset(eth); in mtk_pending_work()
3985 if (!eth->netdev[i] || !netif_running(eth->netdev[i])) in mtk_pending_work()
3988 mtk_stop(eth->netdev[i]); in mtk_pending_work()
3994 if (eth->dev->pins) in mtk_pending_work()
3995 pinctrl_select_state(eth->dev->pins->p, in mtk_pending_work()
3996 eth->dev->pins->default_state); in mtk_pending_work()
3997 mtk_hw_init(eth, true); in mtk_pending_work()
4004 if (mtk_open(eth->netdev[i])) { in mtk_pending_work()
4005 netif_alert(eth, ifup, eth->netdev[i], in mtk_pending_work()
4007 dev_close(eth->netdev[i]); in mtk_pending_work()
4012 val = mtk_r32(eth, MTK_FE_GLO_CFG) & ~MTK_FE_LINK_DOWN_P3; in mtk_pending_work()
4013 if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1)) in mtk_pending_work()
4015 mtk_w32(eth, val, MTK_FE_GLO_CFG); in mtk_pending_work()
4017 clear_bit(MTK_RESETTING, ð->state); in mtk_pending_work()
4024 static int mtk_free_dev(struct mtk_eth *eth) in mtk_free_dev() argument
4029 if (!eth->netdev[i]) in mtk_free_dev()
4031 free_netdev(eth->netdev[i]); in mtk_free_dev()
4034 for (i = 0; i < ARRAY_SIZE(eth->dsa_meta); i++) { in mtk_free_dev()
4035 if (!eth->dsa_meta[i]) in mtk_free_dev()
4037 metadata_dst_free(eth->dsa_meta[i]); in mtk_free_dev()
4043 static int mtk_unreg_dev(struct mtk_eth *eth) in mtk_unreg_dev() argument
4049 if (!eth->netdev[i]) in mtk_unreg_dev()
4051 mac = netdev_priv(eth->netdev[i]); in mtk_unreg_dev()
4052 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) in mtk_unreg_dev()
4054 unregister_netdev(eth->netdev[i]); in mtk_unreg_dev()
4060 static int mtk_cleanup(struct mtk_eth *eth) in mtk_cleanup() argument
4062 mtk_unreg_dev(eth); in mtk_cleanup()
4063 mtk_free_dev(eth); in mtk_cleanup()
4064 cancel_work_sync(ð->pending_work); in mtk_cleanup()
4065 cancel_delayed_work_sync(ð->reset.monitor_work); in mtk_cleanup()
4166 static void mtk_ethtool_pp_stats(struct mtk_eth *eth, u64 *data) in mtk_ethtool_pp_stats() argument
4171 for (i = 0; i < ARRAY_SIZE(eth->rx_ring); i++) { in mtk_ethtool_pp_stats()
4172 struct mtk_rx_ring *ring = ð->rx_ring[i]; in mtk_ethtool_pp_stats()
4325 static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np) in mtk_add_mac() argument
4335 dev_err(eth->dev, "missing mac id\n"); in mtk_add_mac()
4341 dev_err(eth->dev, "%d is not a valid mac id\n", id); in mtk_add_mac()
4345 if (eth->netdev[id]) { in mtk_add_mac()
4346 dev_err(eth->dev, "duplicate mac id found: %d\n", id); in mtk_add_mac()
4350 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) in mtk_add_mac()
4353 eth->netdev[id] = alloc_etherdev_mqs(sizeof(*mac), txqs, 1); in mtk_add_mac()
4354 if (!eth->netdev[id]) { in mtk_add_mac()
4355 dev_err(eth->dev, "alloc_etherdev failed\n"); in mtk_add_mac()
4358 mac = netdev_priv(eth->netdev[id]); in mtk_add_mac()
4359 eth->mac[id] = mac; in mtk_add_mac()
4361 mac->hw = eth; in mtk_add_mac()
4367 mac->hw_stats = devm_kzalloc(eth->dev, in mtk_add_mac()
4371 dev_err(eth->dev, "failed to allocate counter memory\n"); in mtk_add_mac()
4382 dev_err(eth->dev, "incorrect phy-mode\n"); in mtk_add_mac()
4390 mac->phylink_config.dev = ð->netdev[id]->dev; in mtk_add_mac()
4428 SET_NETDEV_DEV(eth->netdev[id], eth->dev); in mtk_add_mac()
4429 eth->netdev[id]->watchdog_timeo = 5 * HZ; in mtk_add_mac()
4430 eth->netdev[id]->netdev_ops = &mtk_netdev_ops; in mtk_add_mac()
4431 eth->netdev[id]->base_addr = (unsigned long)eth->base; in mtk_add_mac()
4433 eth->netdev[id]->hw_features = eth->soc->hw_features; in mtk_add_mac()
4434 if (eth->hwlro) in mtk_add_mac()
4435 eth->netdev[id]->hw_features |= NETIF_F_LRO; in mtk_add_mac()
4437 eth->netdev[id]->vlan_features = eth->soc->hw_features & in mtk_add_mac()
4439 eth->netdev[id]->features |= eth->soc->hw_features; in mtk_add_mac()
4440 eth->netdev[id]->ethtool_ops = &mtk_ethtool_ops; in mtk_add_mac()
4442 eth->netdev[id]->irq = eth->irq[0]; in mtk_add_mac()
4443 eth->netdev[id]->dev.of_node = np; in mtk_add_mac()
4445 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) in mtk_add_mac()
4446 eth->netdev[id]->max_mtu = MTK_MAX_RX_LENGTH - MTK_RX_ETH_HLEN; in mtk_add_mac()
4448 eth->netdev[id]->max_mtu = MTK_MAX_RX_LENGTH_2K - MTK_RX_ETH_HLEN; in mtk_add_mac()
4450 if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { in mtk_add_mac()
4455 if (mtk_page_pool_enabled(eth)) in mtk_add_mac()
4456 eth->netdev[id]->xdp_features = NETDEV_XDP_ACT_BASIC | in mtk_add_mac()
4464 free_netdev(eth->netdev[id]); in mtk_add_mac()
4468 void mtk_eth_set_dma_device(struct mtk_eth *eth, struct device *dma_dev) in mtk_eth_set_dma_device() argument
4477 dev = eth->netdev[i]; in mtk_eth_set_dma_device()
4487 eth->dma_dev = dma_dev; in mtk_eth_set_dma_device()
4501 struct mtk_eth *eth; in mtk_probe() local
4504 eth = devm_kzalloc(&pdev->dev, sizeof(*eth), GFP_KERNEL); in mtk_probe()
4505 if (!eth) in mtk_probe()
4508 eth->soc = of_device_get_match_data(&pdev->dev); in mtk_probe()
4510 eth->dev = &pdev->dev; in mtk_probe()
4511 eth->dma_dev = &pdev->dev; in mtk_probe()
4512 eth->base = devm_platform_ioremap_resource(pdev, 0); in mtk_probe()
4513 if (IS_ERR(eth->base)) in mtk_probe()
4514 return PTR_ERR(eth->base); in mtk_probe()
4516 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) in mtk_probe()
4517 eth->ip_align = NET_IP_ALIGN; in mtk_probe()
4519 spin_lock_init(ð->page_lock); in mtk_probe()
4520 spin_lock_init(ð->tx_irq_lock); in mtk_probe()
4521 spin_lock_init(ð->rx_irq_lock); in mtk_probe()
4522 spin_lock_init(ð->dim_lock); in mtk_probe()
4524 eth->rx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; in mtk_probe()
4525 INIT_WORK(ð->rx_dim.work, mtk_dim_rx); in mtk_probe()
4526 INIT_DELAYED_WORK(ð->reset.monitor_work, mtk_hw_reset_monitor_work); in mtk_probe()
4528 eth->tx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; in mtk_probe()
4529 INIT_WORK(ð->tx_dim.work, mtk_dim_tx); in mtk_probe()
4531 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) { in mtk_probe()
4532 eth->ethsys = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, in mtk_probe()
4534 if (IS_ERR(eth->ethsys)) { in mtk_probe()
4536 return PTR_ERR(eth->ethsys); in mtk_probe()
4540 if (MTK_HAS_CAPS(eth->soc->caps, MTK_INFRA)) { in mtk_probe()
4541 eth->infra = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, in mtk_probe()
4543 if (IS_ERR(eth->infra)) { in mtk_probe()
4545 return PTR_ERR(eth->infra); in mtk_probe()
4559 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) { in mtk_probe()
4560 eth->sgmii = devm_kzalloc(eth->dev, sizeof(*eth->sgmii), in mtk_probe()
4562 if (!eth->sgmii) in mtk_probe()
4565 err = mtk_sgmii_init(eth->sgmii, pdev->dev.of_node, in mtk_probe()
4566 eth->soc->ana_rgc3); in mtk_probe()
4572 if (eth->soc->required_pctl) { in mtk_probe()
4573 eth->pctl = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, in mtk_probe()
4575 if (IS_ERR(eth->pctl)) { in mtk_probe()
4577 return PTR_ERR(eth->pctl); in mtk_probe()
4581 if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) { in mtk_probe()
4587 if (eth->soc->offload_version) { in mtk_probe()
4593 if (i >= ARRAY_SIZE(eth->soc->reg_map->wdma_base)) in mtk_probe()
4601 wdma_base = eth->soc->reg_map->wdma_base[i]; in mtk_probe()
4603 mtk_wed_add_hw(np, eth, eth->base + wdma_base, in mtk_probe()
4609 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT) && i > 0) in mtk_probe()
4610 eth->irq[i] = eth->irq[0]; in mtk_probe()
4612 eth->irq[i] = platform_get_irq(pdev, i); in mtk_probe()
4613 if (eth->irq[i] < 0) { in mtk_probe()
4619 for (i = 0; i < ARRAY_SIZE(eth->clks); i++) { in mtk_probe()
4620 eth->clks[i] = devm_clk_get(eth->dev, in mtk_probe()
4622 if (IS_ERR(eth->clks[i])) { in mtk_probe()
4623 if (PTR_ERR(eth->clks[i]) == -EPROBE_DEFER) { in mtk_probe()
4627 if (eth->soc->required_clks & BIT(i)) { in mtk_probe()
4633 eth->clks[i] = NULL; in mtk_probe()
4637 eth->msg_enable = netif_msg_init(mtk_msg_level, MTK_DEFAULT_MSG_ENABLE); in mtk_probe()
4638 INIT_WORK(ð->pending_work, mtk_pending_work); in mtk_probe()
4640 err = mtk_hw_init(eth, false); in mtk_probe()
4644 eth->hwlro = MTK_HAS_CAPS(eth->soc->caps, MTK_HWLRO); in mtk_probe()
4654 err = mtk_add_mac(eth, mac_np); in mtk_probe()
4661 if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT)) { in mtk_probe()
4662 err = devm_request_irq(eth->dev, eth->irq[0], in mtk_probe()
4664 dev_name(eth->dev), eth); in mtk_probe()
4666 err = devm_request_irq(eth->dev, eth->irq[1], in mtk_probe()
4668 dev_name(eth->dev), eth); in mtk_probe()
4672 err = devm_request_irq(eth->dev, eth->irq[2], in mtk_probe()
4674 dev_name(eth->dev), eth); in mtk_probe()
4680 if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) { in mtk_probe()
4681 err = mtk_mdio_init(eth); in mtk_probe()
4686 if (eth->soc->offload_version) { in mtk_probe()
4689 num_ppe = MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ? 2 : 1; in mtk_probe()
4690 num_ppe = min_t(u32, ARRAY_SIZE(eth->ppe), num_ppe); in mtk_probe()
4692 u32 ppe_addr = eth->soc->reg_map->ppe_base + i * 0x400; in mtk_probe()
4694 eth->ppe[i] = mtk_ppe_init(eth, eth->base + ppe_addr, in mtk_probe()
4695 eth->soc->offload_version, i); in mtk_probe()
4696 if (!eth->ppe[i]) { in mtk_probe()
4702 err = mtk_eth_offload_init(eth); in mtk_probe()
4708 if (!eth->netdev[i]) in mtk_probe()
4711 err = register_netdev(eth->netdev[i]); in mtk_probe()
4713 dev_err(eth->dev, "error bringing up device\n"); in mtk_probe()
4716 netif_info(eth, probe, eth->netdev[i], in mtk_probe()
4718 eth->netdev[i]->base_addr, eth->irq[0]); in mtk_probe()
4724 init_dummy_netdev(ð->dummy_dev); in mtk_probe()
4725 netif_napi_add(ð->dummy_dev, ð->tx_napi, mtk_napi_tx); in mtk_probe()
4726 netif_napi_add(ð->dummy_dev, ð->rx_napi, mtk_napi_rx); in mtk_probe()
4728 platform_set_drvdata(pdev, eth); in mtk_probe()
4729 schedule_delayed_work(ð->reset.monitor_work, in mtk_probe()
4735 mtk_ppe_deinit(eth); in mtk_probe()
4736 mtk_mdio_cleanup(eth); in mtk_probe()
4738 mtk_free_dev(eth); in mtk_probe()
4740 mtk_hw_deinit(eth); in mtk_probe()
4749 struct mtk_eth *eth = platform_get_drvdata(pdev); in mtk_remove() local
4755 if (!eth->netdev[i]) in mtk_remove()
4757 mtk_stop(eth->netdev[i]); in mtk_remove()
4758 mac = netdev_priv(eth->netdev[i]); in mtk_remove()
4763 mtk_hw_deinit(eth); in mtk_remove()
4765 netif_napi_del(ð->tx_napi); in mtk_remove()
4766 netif_napi_del(ð->rx_napi); in mtk_remove()
4767 mtk_cleanup(eth); in mtk_remove()
4768 mtk_mdio_cleanup(eth); in mtk_remove()