Lines Matching refs:mdp

351 	struct sh_eth_private *mdp = netdev_priv(ndev);  in sh_eth_write()  local
352 u16 offset = mdp->reg_offset[enum_index]; in sh_eth_write()
357 iowrite32(data, mdp->addr + offset); in sh_eth_write()
362 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_read() local
363 u16 offset = mdp->reg_offset[enum_index]; in sh_eth_read()
368 return ioread32(mdp->addr + offset); in sh_eth_read()
378 static u16 sh_eth_tsu_get_offset(struct sh_eth_private *mdp, int enum_index) in sh_eth_tsu_get_offset() argument
380 return mdp->reg_offset[enum_index]; in sh_eth_tsu_get_offset()
383 static void sh_eth_tsu_write(struct sh_eth_private *mdp, u32 data, in sh_eth_tsu_write() argument
386 u16 offset = sh_eth_tsu_get_offset(mdp, enum_index); in sh_eth_tsu_write()
391 iowrite32(data, mdp->tsu_addr + offset); in sh_eth_tsu_write()
394 static u32 sh_eth_tsu_read(struct sh_eth_private *mdp, int enum_index) in sh_eth_tsu_read() argument
396 u16 offset = sh_eth_tsu_get_offset(mdp, enum_index); in sh_eth_tsu_read()
401 return ioread32(mdp->tsu_addr + offset); in sh_eth_tsu_read()
417 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_select_mii() local
420 switch (mdp->phy_interface) { in sh_eth_select_mii()
445 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_set_duplex() local
447 sh_eth_modify(ndev, ECMR, ECMR_DM, mdp->duplex ? ECMR_DM : 0); in sh_eth_set_duplex()
452 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_chip_reset() local
455 sh_eth_tsu_write(mdp, ARSTR_ARST, ARSTR); in sh_eth_chip_reset()
484 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_soft_reset_gether() local
505 if (mdp->cd->csmr) in sh_eth_soft_reset_gether()
509 if (mdp->cd->select_mii) in sh_eth_soft_reset_gether()
517 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_set_rate_gether() local
519 if (WARN_ON(!mdp->cd->gecmr)) in sh_eth_set_rate_gether()
522 switch (mdp->speed) { in sh_eth_set_rate_gether()
636 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_set_rate_rcar() local
638 switch (mdp->speed) { in sh_eth_set_rate_rcar()
799 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_set_rate_sh7724() local
801 switch (mdp->speed) { in sh_eth_set_rate_sh7724()
843 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_set_rate_sh7757() local
845 switch (mdp->speed) { in sh_eth_set_rate_sh7757()
914 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_set_rate_giga() local
916 if (WARN_ON(!mdp->cd->gecmr)) in sh_eth_set_rate_giga()
919 switch (mdp->speed) { in sh_eth_set_rate_giga()
1235 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_tx_free() local
1241 for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) { in sh_eth_tx_free()
1242 entry = mdp->dirty_tx % mdp->num_tx_ring; in sh_eth_tx_free()
1243 txdesc = &mdp->tx_ring[entry]; in sh_eth_tx_free()
1249 netif_info(mdp, tx_done, ndev, in sh_eth_tx_free()
1253 if (mdp->tx_skbuff[entry]) { in sh_eth_tx_free()
1254 dma_unmap_single(&mdp->pdev->dev, in sh_eth_tx_free()
1258 dev_kfree_skb_irq(mdp->tx_skbuff[entry]); in sh_eth_tx_free()
1259 mdp->tx_skbuff[entry] = NULL; in sh_eth_tx_free()
1263 if (entry >= mdp->num_tx_ring - 1) in sh_eth_tx_free()
1277 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_ring_free() local
1280 if (mdp->rx_ring) { in sh_eth_ring_free()
1281 for (i = 0; i < mdp->num_rx_ring; i++) { in sh_eth_ring_free()
1282 if (mdp->rx_skbuff[i]) { in sh_eth_ring_free()
1283 struct sh_eth_rxdesc *rxdesc = &mdp->rx_ring[i]; in sh_eth_ring_free()
1285 dma_unmap_single(&mdp->pdev->dev, in sh_eth_ring_free()
1287 ALIGN(mdp->rx_buf_sz, 32), in sh_eth_ring_free()
1291 ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring; in sh_eth_ring_free()
1292 dma_free_coherent(&mdp->pdev->dev, ringsize, mdp->rx_ring, in sh_eth_ring_free()
1293 mdp->rx_desc_dma); in sh_eth_ring_free()
1294 mdp->rx_ring = NULL; in sh_eth_ring_free()
1298 if (mdp->rx_skbuff) { in sh_eth_ring_free()
1299 for (i = 0; i < mdp->num_rx_ring; i++) in sh_eth_ring_free()
1300 dev_kfree_skb(mdp->rx_skbuff[i]); in sh_eth_ring_free()
1302 kfree(mdp->rx_skbuff); in sh_eth_ring_free()
1303 mdp->rx_skbuff = NULL; in sh_eth_ring_free()
1305 if (mdp->tx_ring) { in sh_eth_ring_free()
1308 ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring; in sh_eth_ring_free()
1309 dma_free_coherent(&mdp->pdev->dev, ringsize, mdp->tx_ring, in sh_eth_ring_free()
1310 mdp->tx_desc_dma); in sh_eth_ring_free()
1311 mdp->tx_ring = NULL; in sh_eth_ring_free()
1315 kfree(mdp->tx_skbuff); in sh_eth_ring_free()
1316 mdp->tx_skbuff = NULL; in sh_eth_ring_free()
1322 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_ring_format() local
1327 int rx_ringsize = sizeof(*rxdesc) * mdp->num_rx_ring; in sh_eth_ring_format()
1328 int tx_ringsize = sizeof(*txdesc) * mdp->num_tx_ring; in sh_eth_ring_format()
1329 int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN + 32 - 1; in sh_eth_ring_format()
1333 mdp->cur_rx = 0; in sh_eth_ring_format()
1334 mdp->cur_tx = 0; in sh_eth_ring_format()
1335 mdp->dirty_rx = 0; in sh_eth_ring_format()
1336 mdp->dirty_tx = 0; in sh_eth_ring_format()
1338 memset(mdp->rx_ring, 0, rx_ringsize); in sh_eth_ring_format()
1341 for (i = 0; i < mdp->num_rx_ring; i++) { in sh_eth_ring_format()
1343 mdp->rx_skbuff[i] = NULL; in sh_eth_ring_format()
1350 buf_len = ALIGN(mdp->rx_buf_sz, 32); in sh_eth_ring_format()
1351 dma_addr = dma_map_single(&mdp->pdev->dev, skb->data, buf_len, in sh_eth_ring_format()
1353 if (dma_mapping_error(&mdp->pdev->dev, dma_addr)) { in sh_eth_ring_format()
1357 mdp->rx_skbuff[i] = skb; in sh_eth_ring_format()
1360 rxdesc = &mdp->rx_ring[i]; in sh_eth_ring_format()
1367 sh_eth_write(ndev, mdp->rx_desc_dma, RDLAR); in sh_eth_ring_format()
1368 if (mdp->cd->xdfar_rw) in sh_eth_ring_format()
1369 sh_eth_write(ndev, mdp->rx_desc_dma, RDFAR); in sh_eth_ring_format()
1373 mdp->dirty_rx = (u32) (i - mdp->num_rx_ring); in sh_eth_ring_format()
1379 memset(mdp->tx_ring, 0, tx_ringsize); in sh_eth_ring_format()
1382 for (i = 0; i < mdp->num_tx_ring; i++) { in sh_eth_ring_format()
1383 mdp->tx_skbuff[i] = NULL; in sh_eth_ring_format()
1384 txdesc = &mdp->tx_ring[i]; in sh_eth_ring_format()
1389 sh_eth_write(ndev, mdp->tx_desc_dma, TDLAR); in sh_eth_ring_format()
1390 if (mdp->cd->xdfar_rw) in sh_eth_ring_format()
1391 sh_eth_write(ndev, mdp->tx_desc_dma, TDFAR); in sh_eth_ring_format()
1401 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_ring_init() local
1409 mdp->rx_buf_sz = (ndev->mtu <= 1492 ? PKT_BUF_SZ : in sh_eth_ring_init()
1411 if (mdp->cd->rpadir) in sh_eth_ring_init()
1412 mdp->rx_buf_sz += NET_IP_ALIGN; in sh_eth_ring_init()
1415 mdp->rx_skbuff = kcalloc(mdp->num_rx_ring, sizeof(*mdp->rx_skbuff), in sh_eth_ring_init()
1417 if (!mdp->rx_skbuff) in sh_eth_ring_init()
1420 mdp->tx_skbuff = kcalloc(mdp->num_tx_ring, sizeof(*mdp->tx_skbuff), in sh_eth_ring_init()
1422 if (!mdp->tx_skbuff) in sh_eth_ring_init()
1426 rx_ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring; in sh_eth_ring_init()
1427 mdp->rx_ring = dma_alloc_coherent(&mdp->pdev->dev, rx_ringsize, in sh_eth_ring_init()
1428 &mdp->rx_desc_dma, GFP_KERNEL); in sh_eth_ring_init()
1429 if (!mdp->rx_ring) in sh_eth_ring_init()
1432 mdp->dirty_rx = 0; in sh_eth_ring_init()
1435 tx_ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring; in sh_eth_ring_init()
1436 mdp->tx_ring = dma_alloc_coherent(&mdp->pdev->dev, tx_ringsize, in sh_eth_ring_init()
1437 &mdp->tx_desc_dma, GFP_KERNEL); in sh_eth_ring_init()
1438 if (!mdp->tx_ring) in sh_eth_ring_init()
1451 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_dev_init() local
1455 ret = mdp->cd->soft_reset(ndev); in sh_eth_dev_init()
1459 if (mdp->cd->rmiimode) in sh_eth_dev_init()
1464 if (mdp->cd->rpadir) in sh_eth_dev_init()
1471 if (mdp->cd->hw_swap) in sh_eth_dev_init()
1478 sh_eth_write(ndev, mdp->cd->fdr_value, FDR); in sh_eth_dev_init()
1484 sh_eth_write(ndev, mdp->cd->trscer_err_mask, TRSCER); in sh_eth_dev_init()
1487 if (mdp->cd->nbst) in sh_eth_dev_init()
1491 if (mdp->cd->bculr) in sh_eth_dev_init()
1494 sh_eth_write(ndev, mdp->cd->fcftr_value, FCFTR); in sh_eth_dev_init()
1496 if (!mdp->cd->no_trimd) in sh_eth_dev_init()
1504 mdp->irq_enabled = true; in sh_eth_dev_init()
1505 sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR); in sh_eth_dev_init()
1508 sh_eth_write(ndev, ECMR_ZPF | (mdp->duplex ? ECMR_DM : 0) | in sh_eth_dev_init()
1512 if (mdp->cd->set_rate) in sh_eth_dev_init()
1513 mdp->cd->set_rate(ndev); in sh_eth_dev_init()
1516 sh_eth_write(ndev, mdp->cd->ecsr_value, ECSR); in sh_eth_dev_init()
1519 sh_eth_write(ndev, mdp->cd->ecsipr_value, ECSIPR); in sh_eth_dev_init()
1525 if (mdp->cd->apr) in sh_eth_dev_init()
1527 if (mdp->cd->mpr) in sh_eth_dev_init()
1529 if (mdp->cd->tpauser) in sh_eth_dev_init()
1540 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_dev_exit() local
1546 for (i = 0; i < mdp->num_tx_ring; i++) in sh_eth_dev_exit()
1547 mdp->tx_ring[i].status &= ~cpu_to_le32(TD_TACT); in sh_eth_dev_exit()
1562 mdp->cd->soft_reset(ndev); in sh_eth_dev_exit()
1565 if (mdp->cd->rmiimode) in sh_eth_dev_exit()
1588 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_rx() local
1591 int entry = mdp->cur_rx % mdp->num_rx_ring; in sh_eth_rx()
1592 int boguscnt = (mdp->dirty_rx + mdp->num_rx_ring) - mdp->cur_rx; in sh_eth_rx()
1596 int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN + 32 - 1; in sh_eth_rx()
1603 rxdesc = &mdp->rx_ring[entry]; in sh_eth_rx()
1613 netif_info(mdp, rx_status, ndev, in sh_eth_rx()
1626 if (mdp->cd->csmr) in sh_eth_rx()
1629 skb = mdp->rx_skbuff[entry]; in sh_eth_rx()
1647 if (!mdp->cd->hw_swap) in sh_eth_rx()
1651 mdp->rx_skbuff[entry] = NULL; in sh_eth_rx()
1652 if (mdp->cd->rpadir) in sh_eth_rx()
1654 dma_unmap_single(&mdp->pdev->dev, dma_addr, in sh_eth_rx()
1655 ALIGN(mdp->rx_buf_sz, 32), in sh_eth_rx()
1667 entry = (++mdp->cur_rx) % mdp->num_rx_ring; in sh_eth_rx()
1668 rxdesc = &mdp->rx_ring[entry]; in sh_eth_rx()
1672 for (; mdp->cur_rx - mdp->dirty_rx > 0; mdp->dirty_rx++) { in sh_eth_rx()
1673 entry = mdp->dirty_rx % mdp->num_rx_ring; in sh_eth_rx()
1674 rxdesc = &mdp->rx_ring[entry]; in sh_eth_rx()
1676 buf_len = ALIGN(mdp->rx_buf_sz, 32); in sh_eth_rx()
1679 if (mdp->rx_skbuff[entry] == NULL) { in sh_eth_rx()
1684 dma_addr = dma_map_single(&mdp->pdev->dev, skb->data, in sh_eth_rx()
1686 if (dma_mapping_error(&mdp->pdev->dev, dma_addr)) { in sh_eth_rx()
1690 mdp->rx_skbuff[entry] = skb; in sh_eth_rx()
1696 if (entry >= mdp->num_rx_ring - 1) in sh_eth_rx()
1707 if (intr_status & EESR_RDE && !mdp->cd->no_xdfar) { in sh_eth_rx()
1711 mdp->cur_rx = count; in sh_eth_rx()
1712 mdp->dirty_rx = count; in sh_eth_rx()
1737 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_emac_interrupt() local
1746 pm_wakeup_event(&mdp->pdev->dev, 0); in sh_eth_emac_interrupt()
1749 if (mdp->cd->no_psr || mdp->no_ether_link) in sh_eth_emac_interrupt()
1752 if (mdp->ether_link_active_low) in sh_eth_emac_interrupt()
1771 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_error() local
1778 netif_err(mdp, tx_err, ndev, "Transmit Abort\n"); in sh_eth_error()
1793 netif_err(mdp, tx_err, ndev, "Transmit Descriptor Empty\n"); in sh_eth_error()
1799 netif_err(mdp, tx_err, ndev, "Transmit FIFO Under flow\n"); in sh_eth_error()
1812 if (!mdp->cd->no_ade && (intr_status & EESR_ADE)) { in sh_eth_error()
1815 netif_err(mdp, tx_err, ndev, "Address Error\n"); in sh_eth_error()
1819 if (mdp->cd->no_ade) in sh_eth_error()
1827 intr_status, mdp->cur_tx, mdp->dirty_tx, in sh_eth_error()
1833 if (edtrr ^ mdp->cd->edtrr_trns) { in sh_eth_error()
1835 sh_eth_write(ndev, mdp->cd->edtrr_trns, EDTRR); in sh_eth_error()
1845 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_interrupt() local
1846 struct sh_eth_cpu_data *cd = mdp->cd; in sh_eth_interrupt()
1850 spin_lock(&mdp->lock); in sh_eth_interrupt()
1868 if (unlikely(!mdp->irq_enabled)) { in sh_eth_interrupt()
1874 if (napi_schedule_prep(&mdp->napi)) { in sh_eth_interrupt()
1878 __napi_schedule(&mdp->napi); in sh_eth_interrupt()
1907 spin_unlock(&mdp->lock); in sh_eth_interrupt()
1914 struct sh_eth_private *mdp = container_of(napi, struct sh_eth_private, in sh_eth_poll() local
1934 if (mdp->irq_enabled) in sh_eth_poll()
1935 sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR); in sh_eth_poll()
1943 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_adjust_link() local
1948 spin_lock_irqsave(&mdp->lock, flags); in sh_eth_adjust_link()
1951 if (mdp->cd->no_psr || mdp->no_ether_link) in sh_eth_adjust_link()
1955 if (phydev->duplex != mdp->duplex) { in sh_eth_adjust_link()
1957 mdp->duplex = phydev->duplex; in sh_eth_adjust_link()
1958 if (mdp->cd->set_duplex) in sh_eth_adjust_link()
1959 mdp->cd->set_duplex(ndev); in sh_eth_adjust_link()
1962 if (phydev->speed != mdp->speed) { in sh_eth_adjust_link()
1964 mdp->speed = phydev->speed; in sh_eth_adjust_link()
1965 if (mdp->cd->set_rate) in sh_eth_adjust_link()
1966 mdp->cd->set_rate(ndev); in sh_eth_adjust_link()
1968 if (!mdp->link) { in sh_eth_adjust_link()
1971 mdp->link = phydev->link; in sh_eth_adjust_link()
1973 } else if (mdp->link) { in sh_eth_adjust_link()
1975 mdp->link = 0; in sh_eth_adjust_link()
1976 mdp->speed = 0; in sh_eth_adjust_link()
1977 mdp->duplex = -1; in sh_eth_adjust_link()
1981 if ((mdp->cd->no_psr || mdp->no_ether_link) && phydev->link) in sh_eth_adjust_link()
1984 spin_unlock_irqrestore(&mdp->lock, flags); in sh_eth_adjust_link()
1986 if (new_state && netif_msg_link(mdp)) in sh_eth_adjust_link()
1994 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_phy_init() local
1997 mdp->link = 0; in sh_eth_phy_init()
1998 mdp->speed = 0; in sh_eth_phy_init()
1999 mdp->duplex = -1; in sh_eth_phy_init()
2008 mdp->phy_interface); in sh_eth_phy_init()
2017 mdp->mii_bus->id, mdp->phy_id); in sh_eth_phy_init()
2020 mdp->phy_interface); in sh_eth_phy_init()
2029 if (mdp->cd->register_type != SH_ETH_REG_GIGABIT) in sh_eth_phy_init()
2063 struct sh_eth_private *mdp = netdev_priv(ndev); in __sh_eth_get_regs() local
2064 struct sh_eth_cpu_data *cd = mdp->cd; in __sh_eth_get_regs()
2088 if (mdp->reg_offset[reg] != SH_ETH_OFFSET_INVALID) { \ in __sh_eth_get_regs()
2097 #define add_tsu_reg(reg) add_reg_from(reg, sh_eth_tsu_read(mdp, reg)) in __sh_eth_get_regs()
2211 *buf++ = ioread32(mdp->tsu_addr + in __sh_eth_get_regs()
2212 mdp->reg_offset[TSU_ADRH0] + in __sh_eth_get_regs()
2234 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_get_regs() local
2238 pm_runtime_get_sync(&mdp->pdev->dev); in sh_eth_get_regs()
2240 pm_runtime_put_sync(&mdp->pdev->dev); in sh_eth_get_regs()
2245 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_get_msglevel() local
2246 return mdp->msg_enable; in sh_eth_get_msglevel()
2251 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_set_msglevel() local
2252 mdp->msg_enable = value; in sh_eth_set_msglevel()
2274 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_get_ethtool_stats() local
2278 data[i++] = mdp->cur_rx; in sh_eth_get_ethtool_stats()
2279 data[i++] = mdp->cur_tx; in sh_eth_get_ethtool_stats()
2280 data[i++] = mdp->dirty_rx; in sh_eth_get_ethtool_stats()
2281 data[i++] = mdp->dirty_tx; in sh_eth_get_ethtool_stats()
2299 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_get_ringparam() local
2303 ring->rx_pending = mdp->num_rx_ring; in sh_eth_get_ringparam()
2304 ring->tx_pending = mdp->num_tx_ring; in sh_eth_get_ringparam()
2312 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_set_ringparam() local
2332 mdp->irq_enabled = false; in sh_eth_set_ringparam()
2334 napi_synchronize(&mdp->napi); in sh_eth_set_ringparam()
2344 mdp->num_rx_ring = ring->rx_pending; in sh_eth_set_ringparam()
2345 mdp->num_tx_ring = ring->tx_pending; in sh_eth_set_ringparam()
2369 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_get_wol() local
2374 if (mdp->cd->magic) { in sh_eth_get_wol()
2376 wol->wolopts = mdp->wol_enabled ? WAKE_MAGIC : 0; in sh_eth_get_wol()
2382 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_set_wol() local
2384 if (!mdp->cd->magic || wol->wolopts & ~WAKE_MAGIC) in sh_eth_set_wol()
2387 mdp->wol_enabled = !!(wol->wolopts & WAKE_MAGIC); in sh_eth_set_wol()
2389 device_set_wakeup_enable(&mdp->pdev->dev, mdp->wol_enabled); in sh_eth_set_wol()
2415 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_open() local
2418 pm_runtime_get_sync(&mdp->pdev->dev); in sh_eth_open()
2420 napi_enable(&mdp->napi); in sh_eth_open()
2423 mdp->cd->irq_flags, ndev->name, ndev); in sh_eth_open()
2446 mdp->is_opened = 1; in sh_eth_open()
2453 napi_disable(&mdp->napi); in sh_eth_open()
2454 pm_runtime_put_sync(&mdp->pdev->dev); in sh_eth_open()
2461 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_tx_timeout() local
2467 netif_err(mdp, timer, ndev, in sh_eth_tx_timeout()
2475 for (i = 0; i < mdp->num_rx_ring; i++) { in sh_eth_tx_timeout()
2476 rxdesc = &mdp->rx_ring[i]; in sh_eth_tx_timeout()
2479 dev_kfree_skb(mdp->rx_skbuff[i]); in sh_eth_tx_timeout()
2480 mdp->rx_skbuff[i] = NULL; in sh_eth_tx_timeout()
2482 for (i = 0; i < mdp->num_tx_ring; i++) { in sh_eth_tx_timeout()
2483 dev_kfree_skb(mdp->tx_skbuff[i]); in sh_eth_tx_timeout()
2484 mdp->tx_skbuff[i] = NULL; in sh_eth_tx_timeout()
2497 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_start_xmit() local
2503 spin_lock_irqsave(&mdp->lock, flags); in sh_eth_start_xmit()
2504 if ((mdp->cur_tx - mdp->dirty_tx) >= (mdp->num_tx_ring - 4)) { in sh_eth_start_xmit()
2506 netif_warn(mdp, tx_queued, ndev, "TxFD exhausted.\n"); in sh_eth_start_xmit()
2508 spin_unlock_irqrestore(&mdp->lock, flags); in sh_eth_start_xmit()
2512 spin_unlock_irqrestore(&mdp->lock, flags); in sh_eth_start_xmit()
2517 entry = mdp->cur_tx % mdp->num_tx_ring; in sh_eth_start_xmit()
2518 mdp->tx_skbuff[entry] = skb; in sh_eth_start_xmit()
2519 txdesc = &mdp->tx_ring[entry]; in sh_eth_start_xmit()
2521 if (!mdp->cd->hw_swap) in sh_eth_start_xmit()
2523 dma_addr = dma_map_single(&mdp->pdev->dev, skb->data, skb->len, in sh_eth_start_xmit()
2525 if (dma_mapping_error(&mdp->pdev->dev, dma_addr)) { in sh_eth_start_xmit()
2533 if (entry >= mdp->num_tx_ring - 1) in sh_eth_start_xmit()
2539 mdp->cur_tx++; in sh_eth_start_xmit()
2541 if (!(sh_eth_read(ndev, EDTRR) & mdp->cd->edtrr_trns)) in sh_eth_start_xmit()
2542 sh_eth_write(ndev, mdp->cd->edtrr_trns, EDTRR); in sh_eth_start_xmit()
2565 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_get_stats() local
2567 if (mdp->cd->no_tx_cntrs) in sh_eth_get_stats()
2570 if (!mdp->is_opened) in sh_eth_get_stats()
2577 if (mdp->cd->cexcr) { in sh_eth_get_stats()
2593 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_close() local
2601 mdp->irq_enabled = false; in sh_eth_close()
2603 napi_disable(&mdp->napi); in sh_eth_close()
2619 mdp->is_opened = 0; in sh_eth_close()
2621 pm_runtime_put(&mdp->pdev->dev); in sh_eth_close()
2643 static u32 sh_eth_tsu_get_post_bit(struct sh_eth_private *mdp, int entry) in sh_eth_tsu_get_post_bit() argument
2645 return (0x08 >> (mdp->port << 1)) << (28 - ((entry % 8) * 4)); in sh_eth_tsu_get_post_bit()
2651 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_tsu_enable_cam_entry_post() local
2655 tmp = sh_eth_tsu_read(mdp, reg); in sh_eth_tsu_enable_cam_entry_post()
2656 sh_eth_tsu_write(mdp, tmp | sh_eth_tsu_get_post_bit(mdp, entry), reg); in sh_eth_tsu_enable_cam_entry_post()
2662 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_tsu_disable_cam_entry_post() local
2667 ref_mask = sh_eth_tsu_get_post_bit(mdp, entry) & ~post_mask; in sh_eth_tsu_disable_cam_entry_post()
2669 tmp = sh_eth_tsu_read(mdp, reg); in sh_eth_tsu_disable_cam_entry_post()
2670 sh_eth_tsu_write(mdp, tmp & ~post_mask, reg); in sh_eth_tsu_disable_cam_entry_post()
2679 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_tsu_busy() local
2681 while ((sh_eth_tsu_read(mdp, TSU_ADSBSY) & TSU_ADSBSY_0)) { in sh_eth_tsu_busy()
2696 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_tsu_write_entry() local
2700 iowrite32(val, mdp->tsu_addr + offset); in sh_eth_tsu_write_entry()
2705 iowrite32(val, mdp->tsu_addr + offset + 4); in sh_eth_tsu_write_entry()
2714 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_tsu_read_entry() local
2717 val = ioread32(mdp->tsu_addr + offset); in sh_eth_tsu_read_entry()
2722 val = ioread32(mdp->tsu_addr + offset + 4); in sh_eth_tsu_read_entry()
2730 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_tsu_find_entry() local
2731 u16 reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0); in sh_eth_tsu_find_entry()
2757 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_tsu_disable_cam_entry_table() local
2758 u16 reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0); in sh_eth_tsu_disable_cam_entry_table()
2762 sh_eth_tsu_write(mdp, sh_eth_tsu_read(mdp, TSU_TEN) & in sh_eth_tsu_disable_cam_entry_table()
2774 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_tsu_add_entry() local
2775 u16 reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0); in sh_eth_tsu_add_entry()
2778 if (!mdp->cd->tsu) in sh_eth_tsu_add_entry()
2792 sh_eth_tsu_write(mdp, sh_eth_tsu_read(mdp, TSU_TEN) | in sh_eth_tsu_add_entry()
2804 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_tsu_del_entry() local
2807 if (!mdp->cd->tsu) in sh_eth_tsu_del_entry()
2827 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_tsu_purge_all() local
2830 if (!mdp->cd->tsu) in sh_eth_tsu_purge_all()
2848 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_tsu_purge_mcast() local
2849 u16 reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0); in sh_eth_tsu_purge_mcast()
2853 if (!mdp->cd->tsu) in sh_eth_tsu_purge_mcast()
2866 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_set_rx_mode() local
2871 spin_lock_irqsave(&mdp->lock, flags); in sh_eth_set_rx_mode()
2876 if (mdp->cd->tsu) in sh_eth_set_rx_mode()
2892 } else if (mdp->cd->tsu) { in sh_eth_set_rx_mode()
2911 spin_unlock_irqrestore(&mdp->lock, flags); in sh_eth_set_rx_mode()
2916 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_set_rx_csum() local
2919 spin_lock_irqsave(&mdp->lock, flags); in sh_eth_set_rx_csum()
2930 spin_unlock_irqrestore(&mdp->lock, flags); in sh_eth_set_rx_csum()
2937 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_set_features() local
2939 if (changed & NETIF_F_RXCSUM && mdp->cd->rx_csum) in sh_eth_set_features()
2947 static int sh_eth_get_vtag_index(struct sh_eth_private *mdp) in sh_eth_get_vtag_index() argument
2949 if (!mdp->port) in sh_eth_get_vtag_index()
2958 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_vlan_rx_add_vid() local
2959 int vtag_reg_index = sh_eth_get_vtag_index(mdp); in sh_eth_vlan_rx_add_vid()
2961 if (unlikely(!mdp->cd->tsu)) in sh_eth_vlan_rx_add_vid()
2968 mdp->vlan_num_ids++; in sh_eth_vlan_rx_add_vid()
2973 if (mdp->vlan_num_ids > 1) { in sh_eth_vlan_rx_add_vid()
2975 sh_eth_tsu_write(mdp, 0, vtag_reg_index); in sh_eth_vlan_rx_add_vid()
2979 sh_eth_tsu_write(mdp, TSU_VTAG_ENABLE | (vid & TSU_VTAG_VID_MASK), in sh_eth_vlan_rx_add_vid()
2988 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_vlan_rx_kill_vid() local
2989 int vtag_reg_index = sh_eth_get_vtag_index(mdp); in sh_eth_vlan_rx_kill_vid()
2991 if (unlikely(!mdp->cd->tsu)) in sh_eth_vlan_rx_kill_vid()
2998 mdp->vlan_num_ids--; in sh_eth_vlan_rx_kill_vid()
2999 sh_eth_tsu_write(mdp, 0, vtag_reg_index); in sh_eth_vlan_rx_kill_vid()
3005 static void sh_eth_tsu_init(struct sh_eth_private *mdp) in sh_eth_tsu_init() argument
3007 if (!mdp->cd->dual_port) { in sh_eth_tsu_init()
3008 sh_eth_tsu_write(mdp, 0, TSU_TEN); /* Disable all CAM entry */ in sh_eth_tsu_init()
3009 sh_eth_tsu_write(mdp, TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL, in sh_eth_tsu_init()
3014 sh_eth_tsu_write(mdp, 0, TSU_FWEN0); /* Disable forward(0->1) */ in sh_eth_tsu_init()
3015 sh_eth_tsu_write(mdp, 0, TSU_FWEN1); /* Disable forward(1->0) */ in sh_eth_tsu_init()
3016 sh_eth_tsu_write(mdp, 0, TSU_FCM); /* forward fifo 3k-3k */ in sh_eth_tsu_init()
3017 sh_eth_tsu_write(mdp, 0xc, TSU_BSYSL0); in sh_eth_tsu_init()
3018 sh_eth_tsu_write(mdp, 0xc, TSU_BSYSL1); in sh_eth_tsu_init()
3019 sh_eth_tsu_write(mdp, 0, TSU_PRISL0); in sh_eth_tsu_init()
3020 sh_eth_tsu_write(mdp, 0, TSU_PRISL1); in sh_eth_tsu_init()
3021 sh_eth_tsu_write(mdp, 0, TSU_FWSL0); in sh_eth_tsu_init()
3022 sh_eth_tsu_write(mdp, 0, TSU_FWSL1); in sh_eth_tsu_init()
3023 sh_eth_tsu_write(mdp, TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL, TSU_FWSLC); in sh_eth_tsu_init()
3024 sh_eth_tsu_write(mdp, 0, TSU_QTAGM0); /* Disable QTAG(0->1) */ in sh_eth_tsu_init()
3025 sh_eth_tsu_write(mdp, 0, TSU_QTAGM1); /* Disable QTAG(1->0) */ in sh_eth_tsu_init()
3026 sh_eth_tsu_write(mdp, 0, TSU_FWSR); /* all interrupt status clear */ in sh_eth_tsu_init()
3027 sh_eth_tsu_write(mdp, 0, TSU_FWINMK); /* Disable all interrupt */ in sh_eth_tsu_init()
3028 sh_eth_tsu_write(mdp, 0, TSU_TEN); /* Disable all CAM entry */ in sh_eth_tsu_init()
3029 sh_eth_tsu_write(mdp, 0, TSU_POST1); /* Disable CAM entry [ 0- 7] */ in sh_eth_tsu_init()
3030 sh_eth_tsu_write(mdp, 0, TSU_POST2); /* Disable CAM entry [ 8-15] */ in sh_eth_tsu_init()
3031 sh_eth_tsu_write(mdp, 0, TSU_POST3); /* Disable CAM entry [16-23] */ in sh_eth_tsu_init()
3032 sh_eth_tsu_write(mdp, 0, TSU_POST4); /* Disable CAM entry [24-31] */ in sh_eth_tsu_init()
3036 static int sh_mdio_release(struct sh_eth_private *mdp) in sh_mdio_release() argument
3039 mdiobus_unregister(mdp->mii_bus); in sh_mdio_release()
3042 free_mdio_bitbang(mdp->mii_bus); in sh_mdio_release()
3093 static int sh_mdio_init(struct sh_eth_private *mdp, in sh_mdio_init() argument
3098 struct platform_device *pdev = mdp->pdev; in sh_mdio_init()
3099 struct device *dev = &mdp->pdev->dev; in sh_mdio_init()
3107 bitbang->addr = mdp->addr + mdp->reg_offset[PIR]; in sh_mdio_init()
3112 mdp->mii_bus = alloc_mdio_bitbang(&bitbang->ctrl); in sh_mdio_init()
3113 if (!mdp->mii_bus) in sh_mdio_init()
3117 mdp->mii_bus->read = sh_mdiobb_read_c22; in sh_mdio_init()
3118 mdp->mii_bus->write = sh_mdiobb_write_c22; in sh_mdio_init()
3119 mdp->mii_bus->read_c45 = sh_mdiobb_read_c45; in sh_mdio_init()
3120 mdp->mii_bus->write_c45 = sh_mdiobb_write_c45; in sh_mdio_init()
3123 mdp->mii_bus->name = "sh_mii"; in sh_mdio_init()
3124 mdp->mii_bus->parent = dev; in sh_mdio_init()
3125 snprintf(mdp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", in sh_mdio_init()
3130 mdp->mii_bus->irq[pd->phy] = pd->phy_irq; in sh_mdio_init()
3132 ret = of_mdiobus_register(mdp->mii_bus, dev->of_node); in sh_mdio_init()
3139 free_mdio_bitbang(mdp->mii_bus); in sh_mdio_init()
3252 struct sh_eth_private *mdp; in sh_eth_drv_probe() local
3270 mdp = netdev_priv(ndev); in sh_eth_drv_probe()
3271 mdp->num_tx_ring = TX_RING_SIZE; in sh_eth_drv_probe()
3272 mdp->num_rx_ring = RX_RING_SIZE; in sh_eth_drv_probe()
3273 mdp->addr = devm_platform_get_and_ioremap_resource(pdev, 0, &res); in sh_eth_drv_probe()
3274 if (IS_ERR(mdp->addr)) { in sh_eth_drv_probe()
3275 ret = PTR_ERR(mdp->addr); in sh_eth_drv_probe()
3281 spin_lock_init(&mdp->lock); in sh_eth_drv_probe()
3282 mdp->pdev = pdev; in sh_eth_drv_probe()
3293 mdp->phy_id = pd->phy; in sh_eth_drv_probe()
3294 mdp->phy_interface = pd->phy_interface; in sh_eth_drv_probe()
3295 mdp->no_ether_link = pd->no_ether_link; in sh_eth_drv_probe()
3296 mdp->ether_link_active_low = pd->ether_link_active_low; in sh_eth_drv_probe()
3300 mdp->cd = (struct sh_eth_cpu_data *)id->driver_data; in sh_eth_drv_probe()
3302 mdp->cd = (struct sh_eth_cpu_data *)of_device_get_match_data(&pdev->dev); in sh_eth_drv_probe()
3304 mdp->reg_offset = sh_eth_get_register_offset(mdp->cd->register_type); in sh_eth_drv_probe()
3305 if (!mdp->reg_offset) { in sh_eth_drv_probe()
3307 mdp->cd->register_type); in sh_eth_drv_probe()
3311 sh_eth_set_default_cpu_data(mdp->cd); in sh_eth_drv_probe()
3320 if (mdp->cd->rx_csum) { in sh_eth_drv_probe()
3326 if (mdp->cd->tsu) in sh_eth_drv_probe()
3334 mdp->msg_enable = SH_ETH_DEF_MSG_ENABLE; in sh_eth_drv_probe()
3344 if (mdp->cd->tsu) { in sh_eth_drv_probe()
3366 mdp->tsu_addr = devm_ioremap(&pdev->dev, rtsu->start, in sh_eth_drv_probe()
3368 if (!mdp->tsu_addr) { in sh_eth_drv_probe()
3373 mdp->port = port; in sh_eth_drv_probe()
3378 if (mdp->cd->chip_reset) in sh_eth_drv_probe()
3379 mdp->cd->chip_reset(ndev); in sh_eth_drv_probe()
3382 sh_eth_tsu_init(mdp); in sh_eth_drv_probe()
3386 if (mdp->cd->rmiimode) in sh_eth_drv_probe()
3390 ret = sh_mdio_init(mdp, pd); in sh_eth_drv_probe()
3396 netif_napi_add(ndev, &mdp->napi, sh_eth_poll); in sh_eth_drv_probe()
3403 if (mdp->cd->magic) in sh_eth_drv_probe()
3416 netif_napi_del(&mdp->napi); in sh_eth_drv_probe()
3417 sh_mdio_release(mdp); in sh_eth_drv_probe()
3431 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_drv_remove() local
3434 netif_napi_del(&mdp->napi); in sh_eth_drv_remove()
3435 sh_mdio_release(mdp); in sh_eth_drv_remove()
3446 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_wol_setup() local
3450 napi_disable(&mdp->napi); in sh_eth_wol_setup()
3461 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_wol_restore() local
3464 napi_enable(&mdp->napi); in sh_eth_wol_restore()
3485 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_suspend() local
3493 if (mdp->wol_enabled) in sh_eth_suspend()
3504 struct sh_eth_private *mdp = netdev_priv(ndev); in sh_eth_resume() local
3510 if (mdp->wol_enabled) in sh_eth_resume()