Lines Matching refs:np

322 static void free_rxtx_rings(struct netdev_private *np);
326 static void free_ringdesc(struct netdev_private *np);
353 struct netdev_private *np; in w840_probe1() local
373 dev = alloc_etherdev(sizeof(*np)); in w840_probe1()
393 np = netdev_priv(dev); in w840_probe1()
394 np->pci_dev = pdev; in w840_probe1()
395 np->chip_id = chip_idx; in w840_probe1()
396 np->drv_flags = pci_id_tbl[chip_idx].drv_flags; in w840_probe1()
397 spin_lock_init(&np->lock); in w840_probe1()
398 np->mii_if.dev = dev; in w840_probe1()
399 np->mii_if.mdio_read = mdio_read; in w840_probe1()
400 np->mii_if.mdio_write = mdio_write; in w840_probe1()
401 np->base_addr = ioaddr; in w840_probe1()
411 np->mii_if.full_duplex = 1; in w840_probe1()
418 np->mii_if.full_duplex = 1; in w840_probe1()
420 if (np->mii_if.full_duplex) in w840_probe1()
421 np->mii_if.force_media = 1; in w840_probe1()
435 if (np->drv_flags & CanHaveMII) { in w840_probe1()
440 np->phys[phy_idx++] = phy; in w840_probe1()
441 np->mii_if.advertising = mdio_read(dev, phy, MII_ADVERTISE); in w840_probe1()
442 np->mii = (mdio_read(dev, phy, MII_PHYSID1) << 16)+ in w840_probe1()
446 np->mii, phy, mii_status, in w840_probe1()
447 np->mii_if.advertising); in w840_probe1()
450 np->mii_cnt = phy_idx; in w840_probe1()
451 np->mii_if.phy_id = np->phys[0]; in w840_probe1()
558 struct netdev_private *np = netdev_priv(dev); in mdio_read() local
559 void __iomem *mdio_addr = np->base_addr + MIICtrl; in mdio_read()
588 struct netdev_private *np = netdev_priv(dev); in mdio_write() local
589 void __iomem *mdio_addr = np->base_addr + MIICtrl; in mdio_write()
593 if (location == 4 && phy_id == np->phys[0]) in mdio_write()
594 np->mii_if.advertising = value; in mdio_write()
620 struct netdev_private *np = netdev_priv(dev); in netdev_open() local
621 void __iomem *ioaddr = np->base_addr; in netdev_open()
622 const int irq = np->pci_dev->irq; in netdev_open()
639 spin_lock_irq(&np->lock); in netdev_open()
642 spin_unlock_irq(&np->lock); in netdev_open()
649 timer_setup(&np->timer, netdev_timer, 0); in netdev_open()
650 np->timer.expires = jiffies + 1*HZ; in netdev_open()
651 add_timer(&np->timer); in netdev_open()
662 struct netdev_private *np = netdev_priv(dev); in update_link() local
666 mii_reg = mdio_read(dev, np->phys[0], MII_BMSR); in update_link()
669 return np->csr6; in update_link()
671 mii_reg = mdio_read(dev, np->phys[0], MII_BMSR); in update_link()
677 np->phys[0]); in update_link()
680 return np->csr6; in update_link()
686 np->phys[0]); in update_link()
690 if ((np->mii & ~0xf) == MII_DAVICOM_DM9101) { in update_link()
699 mii_reg = mdio_read(dev, np->phys[0], MII_BMCR); in update_link()
704 mii_reg = mdio_read(dev, np->phys[0], MII_LPA); in update_link()
705 negotiated = mii_reg & np->mii_if.advertising; in update_link()
710 duplex |= np->mii_if.force_media; in update_link()
712 result = np->csr6 & ~0x20000200; in update_link()
717 if (result != np->csr6 && debug) in update_link()
721 np->phys[0]); in update_link()
728 struct netdev_private *np = netdev_priv(dev); in update_csr6() local
729 void __iomem *ioaddr = np->base_addr; in update_csr6()
734 if (new==np->csr6) in update_csr6()
737 iowrite32(np->csr6 & ~0x2002, ioaddr + NetworkConfig); in update_csr6()
759 np->csr6 = new; in update_csr6()
761 iowrite32(np->csr6, ioaddr + NetworkConfig); in update_csr6()
763 np->mii_if.full_duplex = 1; in update_csr6()
768 struct netdev_private *np = from_timer(np, t, timer); in netdev_timer() local
769 struct net_device *dev = pci_get_drvdata(np->pci_dev); in netdev_timer()
770 void __iomem *ioaddr = np->base_addr; in netdev_timer()
776 spin_lock_irq(&np->lock); in netdev_timer()
778 spin_unlock_irq(&np->lock); in netdev_timer()
779 np->timer.expires = jiffies + 10*HZ; in netdev_timer()
780 add_timer(&np->timer); in netdev_timer()
785 struct netdev_private *np = netdev_priv(dev); in init_rxtx_rings() local
788 np->rx_head_desc = &np->rx_ring[0]; in init_rxtx_rings()
789 np->tx_ring = (struct w840_tx_desc*)&np->rx_ring[RX_RING_SIZE]; in init_rxtx_rings()
793 np->rx_ring[i].length = np->rx_buf_sz; in init_rxtx_rings()
794 np->rx_ring[i].status = 0; in init_rxtx_rings()
795 np->rx_skbuff[i] = NULL; in init_rxtx_rings()
798 np->rx_ring[i-1].length |= DescEndRing; in init_rxtx_rings()
802 struct sk_buff *skb = netdev_alloc_skb(dev, np->rx_buf_sz); in init_rxtx_rings()
803 np->rx_skbuff[i] = skb; in init_rxtx_rings()
806 np->rx_addr[i] = dma_map_single(&np->pci_dev->dev, skb->data, in init_rxtx_rings()
807 np->rx_buf_sz, in init_rxtx_rings()
810 np->rx_ring[i].buffer1 = np->rx_addr[i]; in init_rxtx_rings()
811 np->rx_ring[i].status = DescOwned; in init_rxtx_rings()
814 np->cur_rx = 0; in init_rxtx_rings()
815 np->dirty_rx = (unsigned int)(i - RX_RING_SIZE); in init_rxtx_rings()
819 np->tx_skbuff[i] = NULL; in init_rxtx_rings()
820 np->tx_ring[i].status = 0; in init_rxtx_rings()
822 np->tx_full = 0; in init_rxtx_rings()
823 np->tx_q_bytes = np->dirty_tx = np->cur_tx = 0; in init_rxtx_rings()
825 iowrite32(np->ring_dma_addr, np->base_addr + RxRingPtr); in init_rxtx_rings()
826 iowrite32(np->ring_dma_addr+sizeof(struct w840_rx_desc)*RX_RING_SIZE, in init_rxtx_rings()
827 np->base_addr + TxRingPtr); in init_rxtx_rings()
831 static void free_rxtx_rings(struct netdev_private* np) in free_rxtx_rings() argument
836 np->rx_ring[i].status = 0; in free_rxtx_rings()
837 if (np->rx_skbuff[i]) { in free_rxtx_rings()
838 dma_unmap_single(&np->pci_dev->dev, np->rx_addr[i], in free_rxtx_rings()
839 np->rx_skbuff[i]->len, in free_rxtx_rings()
841 dev_kfree_skb(np->rx_skbuff[i]); in free_rxtx_rings()
843 np->rx_skbuff[i] = NULL; in free_rxtx_rings()
846 if (np->tx_skbuff[i]) { in free_rxtx_rings()
847 dma_unmap_single(&np->pci_dev->dev, np->tx_addr[i], in free_rxtx_rings()
848 np->tx_skbuff[i]->len, DMA_TO_DEVICE); in free_rxtx_rings()
849 dev_kfree_skb(np->tx_skbuff[i]); in free_rxtx_rings()
851 np->tx_skbuff[i] = NULL; in free_rxtx_rings()
857 struct netdev_private *np = netdev_priv(dev); in init_registers() local
858 void __iomem *ioaddr = np->base_addr; in init_registers()
901 np->csr6 = 0; in init_registers()
915 struct netdev_private *np = netdev_priv(dev); in tx_timeout() local
916 void __iomem *ioaddr = np->base_addr; in tx_timeout()
917 const int irq = np->pci_dev->irq; in tx_timeout()
924 printk(KERN_DEBUG " Rx ring %p: ", np->rx_ring); in tx_timeout()
926 printk(KERN_CONT " %08x", (unsigned int)np->rx_ring[i].status); in tx_timeout()
928 printk(KERN_DEBUG " Tx ring %p: ", np->tx_ring); in tx_timeout()
930 printk(KERN_CONT " %08x", np->tx_ring[i].status); in tx_timeout()
934 np->cur_tx, np->dirty_tx, np->tx_full, np->tx_q_bytes); in tx_timeout()
938 spin_lock_irq(&np->lock); in tx_timeout()
945 iowrite32(1, np->base_addr+PCIBusCfg); in tx_timeout()
948 free_rxtx_rings(np); in tx_timeout()
951 spin_unlock_irq(&np->lock); in tx_timeout()
956 np->stats.tx_errors++; in tx_timeout()
962 struct netdev_private *np = netdev_priv(dev); in alloc_ringdesc() local
964 np->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32); in alloc_ringdesc()
966 np->rx_ring = dma_alloc_coherent(&np->pci_dev->dev, in alloc_ringdesc()
969 &np->ring_dma_addr, GFP_KERNEL); in alloc_ringdesc()
970 if(!np->rx_ring) in alloc_ringdesc()
976 static void free_ringdesc(struct netdev_private *np) in free_ringdesc() argument
978 dma_free_coherent(&np->pci_dev->dev, in free_ringdesc()
981 np->rx_ring, np->ring_dma_addr); in free_ringdesc()
987 struct netdev_private *np = netdev_priv(dev); in start_tx() local
994 entry = np->cur_tx % TX_RING_SIZE; in start_tx()
996 np->tx_addr[entry] = dma_map_single(&np->pci_dev->dev, skb->data, in start_tx()
998 np->tx_skbuff[entry] = skb; in start_tx()
1000 np->tx_ring[entry].buffer1 = np->tx_addr[entry]; in start_tx()
1002 np->tx_ring[entry].length = DescWholePkt | skb->len; in start_tx()
1006 np->tx_ring[entry].buffer2 = np->tx_addr[entry]+TX_BUFLIMIT; in start_tx()
1007 np->tx_ring[entry].length = DescWholePkt | (len << 11) | TX_BUFLIMIT; in start_tx()
1010 np->tx_ring[entry].length |= DescEndRing; in start_tx()
1022 spin_lock_irq(&np->lock); in start_tx()
1023 np->cur_tx++; in start_tx()
1026 np->tx_ring[entry].status = DescOwned; in start_tx()
1028 iowrite32(0, np->base_addr + TxStartDemand); in start_tx()
1029 np->tx_q_bytes += skb->len; in start_tx()
1032 if (np->cur_tx - np->dirty_tx > TX_QUEUE_LEN || in start_tx()
1033 ((np->drv_flags & HasBrokenTx) && np->tx_q_bytes > TX_BUG_FIFO_LIMIT)) { in start_tx()
1036 np->tx_full = 1; in start_tx()
1038 spin_unlock_irq(&np->lock); in start_tx()
1042 np->cur_tx, entry); in start_tx()
1049 struct netdev_private *np = netdev_priv(dev); in netdev_tx_done() local
1050 for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) { in netdev_tx_done()
1051 int entry = np->dirty_tx % TX_RING_SIZE; in netdev_tx_done()
1052 int tx_status = np->tx_ring[entry].status; in netdev_tx_done()
1062 np->stats.tx_errors++; in netdev_tx_done()
1063 if (tx_status & 0x0104) np->stats.tx_aborted_errors++; in netdev_tx_done()
1064 if (tx_status & 0x0C80) np->stats.tx_carrier_errors++; in netdev_tx_done()
1065 if (tx_status & 0x0200) np->stats.tx_window_errors++; in netdev_tx_done()
1066 if (tx_status & 0x0002) np->stats.tx_fifo_errors++; in netdev_tx_done()
1067 if ((tx_status & 0x0080) && np->mii_if.full_duplex == 0) in netdev_tx_done()
1068 np->stats.tx_heartbeat_errors++; in netdev_tx_done()
1075 np->stats.tx_bytes += np->tx_skbuff[entry]->len; in netdev_tx_done()
1076 np->stats.collisions += (tx_status >> 3) & 15; in netdev_tx_done()
1077 np->stats.tx_packets++; in netdev_tx_done()
1080 dma_unmap_single(&np->pci_dev->dev, np->tx_addr[entry], in netdev_tx_done()
1081 np->tx_skbuff[entry]->len, DMA_TO_DEVICE); in netdev_tx_done()
1082 np->tx_q_bytes -= np->tx_skbuff[entry]->len; in netdev_tx_done()
1083 dev_kfree_skb_irq(np->tx_skbuff[entry]); in netdev_tx_done()
1084 np->tx_skbuff[entry] = NULL; in netdev_tx_done()
1086 if (np->tx_full && in netdev_tx_done()
1087 np->cur_tx - np->dirty_tx < TX_QUEUE_LEN_RESTART && in netdev_tx_done()
1088 np->tx_q_bytes < TX_BUG_FIFO_LIMIT) { in netdev_tx_done()
1090 np->tx_full = 0; in netdev_tx_done()
1101 struct netdev_private *np = netdev_priv(dev); in intr_handler() local
1102 void __iomem *ioaddr = np->base_addr; in intr_handler()
1128 np->cur_tx != np->dirty_tx) { in intr_handler()
1129 spin_lock(&np->lock); in intr_handler()
1131 spin_unlock(&np->lock); in intr_handler()
1145 spin_lock(&np->lock); in intr_handler()
1150 spin_unlock(&np->lock); in intr_handler()
1165 struct netdev_private *np = netdev_priv(dev); in netdev_rx() local
1166 int entry = np->cur_rx % RX_RING_SIZE; in netdev_rx()
1167 int work_limit = np->dirty_rx + RX_RING_SIZE - np->cur_rx; in netdev_rx()
1171 entry, np->rx_ring[entry].status); in netdev_rx()
1176 struct w840_rx_desc *desc = np->rx_head_desc; in netdev_rx()
1190 np->cur_rx, status); in netdev_rx()
1191 np->stats.rx_length_errors++; in netdev_rx()
1198 np->stats.rx_errors++; /* end of a packet.*/ in netdev_rx()
1199 if (status & 0x0890) np->stats.rx_length_errors++; in netdev_rx()
1200 if (status & 0x004C) np->stats.rx_frame_errors++; in netdev_rx()
1201 if (status & 0x0002) np->stats.rx_crc_errors++; in netdev_rx()
1218 dma_sync_single_for_cpu(&np->pci_dev->dev, in netdev_rx()
1219 np->rx_addr[entry], in netdev_rx()
1220 np->rx_skbuff[entry]->len, in netdev_rx()
1222 skb_copy_to_linear_data(skb, np->rx_skbuff[entry]->data, pkt_len); in netdev_rx()
1224 dma_sync_single_for_device(&np->pci_dev->dev, in netdev_rx()
1225 np->rx_addr[entry], in netdev_rx()
1226 np->rx_skbuff[entry]->len, in netdev_rx()
1229 dma_unmap_single(&np->pci_dev->dev, in netdev_rx()
1230 np->rx_addr[entry], in netdev_rx()
1231 np->rx_skbuff[entry]->len, in netdev_rx()
1233 skb_put(skb = np->rx_skbuff[entry], pkt_len); in netdev_rx()
1234 np->rx_skbuff[entry] = NULL; in netdev_rx()
1246 np->stats.rx_packets++; in netdev_rx()
1247 np->stats.rx_bytes += pkt_len; in netdev_rx()
1249 entry = (++np->cur_rx) % RX_RING_SIZE; in netdev_rx()
1250 np->rx_head_desc = &np->rx_ring[entry]; in netdev_rx()
1254 for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) { in netdev_rx()
1256 entry = np->dirty_rx % RX_RING_SIZE; in netdev_rx()
1257 if (np->rx_skbuff[entry] == NULL) { in netdev_rx()
1258 skb = netdev_alloc_skb(dev, np->rx_buf_sz); in netdev_rx()
1259 np->rx_skbuff[entry] = skb; in netdev_rx()
1262 np->rx_addr[entry] = dma_map_single(&np->pci_dev->dev, in netdev_rx()
1264 np->rx_buf_sz, in netdev_rx()
1266 np->rx_ring[entry].buffer1 = np->rx_addr[entry]; in netdev_rx()
1269 np->rx_ring[entry].status = DescOwned; in netdev_rx()
1277 struct netdev_private *np = netdev_priv(dev); in netdev_error() local
1278 void __iomem *ioaddr = np->base_addr; in netdev_error()
1284 spin_lock(&np->lock); in netdev_error()
1292 new = np->csr6 + 0x4000; in netdev_error()
1294 new = (np->csr6 >> 14)&0x7f; in netdev_error()
1299 new = (np->csr6 & ~(0x7F << 14)) | (new<<14); in netdev_error()
1305 np->stats.rx_errors++; in netdev_error()
1312 np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff; in netdev_error()
1314 spin_unlock(&np->lock); in netdev_error()
1319 struct netdev_private *np = netdev_priv(dev); in get_stats() local
1320 void __iomem *ioaddr = np->base_addr; in get_stats()
1323 spin_lock_irq(&np->lock); in get_stats()
1325 np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff; in get_stats()
1326 spin_unlock_irq(&np->lock); in get_stats()
1328 return &np->stats; in get_stats()
1334 struct netdev_private *np = netdev_priv(dev); in __set_rx_mode() local
1335 void __iomem *ioaddr = np->base_addr; in __set_rx_mode()
1368 struct netdev_private *np = netdev_priv(dev); in set_rx_mode() local
1370 spin_lock_irq(&np->lock); in set_rx_mode()
1371 update_csr6(dev, (np->csr6 & ~0x00F8) | rx_mode); in set_rx_mode()
1372 spin_unlock_irq(&np->lock); in set_rx_mode()
1377 struct netdev_private *np = netdev_priv(dev); in netdev_get_drvinfo() local
1380 strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info)); in netdev_get_drvinfo()
1386 struct netdev_private *np = netdev_priv(dev); in netdev_get_link_ksettings() local
1388 spin_lock_irq(&np->lock); in netdev_get_link_ksettings()
1389 mii_ethtool_get_link_ksettings(&np->mii_if, cmd); in netdev_get_link_ksettings()
1390 spin_unlock_irq(&np->lock); in netdev_get_link_ksettings()
1398 struct netdev_private *np = netdev_priv(dev); in netdev_set_link_ksettings() local
1401 spin_lock_irq(&np->lock); in netdev_set_link_ksettings()
1402 rc = mii_ethtool_set_link_ksettings(&np->mii_if, cmd); in netdev_set_link_ksettings()
1403 spin_unlock_irq(&np->lock); in netdev_set_link_ksettings()
1410 struct netdev_private *np = netdev_priv(dev); in netdev_nway_reset() local
1411 return mii_nway_restart(&np->mii_if); in netdev_nway_reset()
1416 struct netdev_private *np = netdev_priv(dev); in netdev_get_link() local
1417 return mii_link_ok(&np->mii_if); in netdev_get_link()
1443 struct netdev_private *np = netdev_priv(dev); in netdev_ioctl() local
1451 spin_lock_irq(&np->lock); in netdev_ioctl()
1453 spin_unlock_irq(&np->lock); in netdev_ioctl()
1457 spin_lock_irq(&np->lock); in netdev_ioctl()
1459 spin_unlock_irq(&np->lock); in netdev_ioctl()
1468 struct netdev_private *np = netdev_priv(dev); in netdev_close() local
1469 void __iomem *ioaddr = np->base_addr; in netdev_close()
1478 np->cur_tx, np->dirty_tx, in netdev_close()
1479 np->cur_rx, np->dirty_rx); in netdev_close()
1483 spin_lock_irq(&np->lock); in netdev_close()
1487 spin_unlock_irq(&np->lock); in netdev_close()
1489 free_irq(np->pci_dev->irq, dev); in netdev_close()
1494 np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff; in netdev_close()
1500 printk(KERN_DEBUG" Tx ring at %p:\n", np->tx_ring); in netdev_close()
1503 i, np->tx_ring[i].length, in netdev_close()
1504 np->tx_ring[i].status, np->tx_ring[i].buffer1); in netdev_close()
1505 printk(KERN_DEBUG " Rx ring %p:\n", np->rx_ring); in netdev_close()
1508 i, np->rx_ring[i].length, in netdev_close()
1509 np->rx_ring[i].status, np->rx_ring[i].buffer1); in netdev_close()
1514 del_timer_sync(&np->timer); in netdev_close()
1516 free_rxtx_rings(np); in netdev_close()
1517 free_ringdesc(np); in netdev_close()
1527 struct netdev_private *np = netdev_priv(dev); in w840_remove1() local
1529 pci_iounmap(pdev, np->base_addr); in w840_remove1()
1560 struct netdev_private *np = netdev_priv(dev); in w840_suspend() local
1561 void __iomem *ioaddr = np->base_addr; in w840_suspend()
1565 del_timer_sync(&np->timer); in w840_suspend()
1567 spin_lock_irq(&np->lock); in w840_suspend()
1571 spin_unlock_irq(&np->lock); in w840_suspend()
1573 synchronize_irq(np->pci_dev->irq); in w840_suspend()
1576 np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff; in w840_suspend()
1580 BUG_ON(np->csr6 || ioread32(ioaddr + IntrEnable)); in w840_suspend()
1584 free_rxtx_rings(np); in w840_suspend()
1595 struct netdev_private *np = netdev_priv(dev); in w840_resume() local
1601 spin_lock_irq(&np->lock); in w840_resume()
1602 iowrite32(1, np->base_addr+PCIBusCfg); in w840_resume()
1603 ioread32(np->base_addr+PCIBusCfg); in w840_resume()
1608 spin_unlock_irq(&np->lock); in w840_resume()
1612 mod_timer(&np->timer, jiffies + 1*HZ); in w840_resume()