Lines Matching refs:tx

252 static void tsnep_tx_ring_cleanup(struct tsnep_tx *tx)  in tsnep_tx_ring_cleanup()  argument
254 struct device *dmadev = tx->adapter->dmadev; in tsnep_tx_ring_cleanup()
257 memset(tx->entry, 0, sizeof(tx->entry)); in tsnep_tx_ring_cleanup()
260 if (tx->page[i]) { in tsnep_tx_ring_cleanup()
261 dma_free_coherent(dmadev, PAGE_SIZE, tx->page[i], in tsnep_tx_ring_cleanup()
262 tx->page_dma[i]); in tsnep_tx_ring_cleanup()
263 tx->page[i] = NULL; in tsnep_tx_ring_cleanup()
264 tx->page_dma[i] = 0; in tsnep_tx_ring_cleanup()
269 static int tsnep_tx_ring_init(struct tsnep_tx *tx) in tsnep_tx_ring_init() argument
271 struct device *dmadev = tx->adapter->dmadev; in tsnep_tx_ring_init()
278 tx->page[i] = in tsnep_tx_ring_init()
279 dma_alloc_coherent(dmadev, PAGE_SIZE, &tx->page_dma[i], in tsnep_tx_ring_init()
281 if (!tx->page[i]) { in tsnep_tx_ring_init()
286 entry = &tx->entry[TSNEP_RING_ENTRIES_PER_PAGE * i + j]; in tsnep_tx_ring_init()
288 (((u8 *)tx->page[i]) + TSNEP_DESC_SIZE * j); in tsnep_tx_ring_init()
291 entry->desc_dma = tx->page_dma[i] + TSNEP_DESC_SIZE * j; in tsnep_tx_ring_init()
295 entry = &tx->entry[i]; in tsnep_tx_ring_init()
296 next_entry = &tx->entry[(i + 1) % TSNEP_RING_SIZE]; in tsnep_tx_ring_init()
303 tsnep_tx_ring_cleanup(tx); in tsnep_tx_ring_init()
307 static void tsnep_tx_activate(struct tsnep_tx *tx, int index, int length, in tsnep_tx_activate() argument
310 struct tsnep_tx_entry *entry = &tx->entry[index]; in tsnep_tx_activate()
343 if (index == tx->increment_owner_counter) { in tsnep_tx_activate()
344 tx->owner_counter++; in tsnep_tx_activate()
345 if (tx->owner_counter == 4) in tsnep_tx_activate()
346 tx->owner_counter = 1; in tsnep_tx_activate()
347 tx->increment_owner_counter--; in tsnep_tx_activate()
348 if (tx->increment_owner_counter < 0) in tsnep_tx_activate()
349 tx->increment_owner_counter = TSNEP_RING_SIZE - 1; in tsnep_tx_activate()
352 (tx->owner_counter << TSNEP_DESC_OWNER_COUNTER_SHIFT) & in tsnep_tx_activate()
367 static int tsnep_tx_desc_available(struct tsnep_tx *tx) in tsnep_tx_desc_available() argument
369 if (tx->read <= tx->write) in tsnep_tx_desc_available()
370 return TSNEP_RING_SIZE - tx->write + tx->read - 1; in tsnep_tx_desc_available()
372 return tx->read - tx->write - 1; in tsnep_tx_desc_available()
375 static int tsnep_tx_map(struct sk_buff *skb, struct tsnep_tx *tx, int count) in tsnep_tx_map() argument
377 struct device *dmadev = tx->adapter->dmadev; in tsnep_tx_map()
385 entry = &tx->entry[(tx->write + i) % TSNEP_RING_SIZE]; in tsnep_tx_map()
407 entry->desc->tx = __cpu_to_le64(dma); in tsnep_tx_map()
415 static int tsnep_tx_unmap(struct tsnep_tx *tx, int index, int count) in tsnep_tx_unmap() argument
417 struct device *dmadev = tx->adapter->dmadev; in tsnep_tx_unmap()
423 entry = &tx->entry[(index + i) % TSNEP_RING_SIZE]; in tsnep_tx_unmap()
446 struct tsnep_tx *tx) in tsnep_xmit_frame_ring() argument
457 if (tsnep_tx_desc_available(tx) < count) { in tsnep_xmit_frame_ring()
461 netif_stop_subqueue(tx->adapter->netdev, tx->queue_index); in tsnep_xmit_frame_ring()
466 entry = &tx->entry[tx->write]; in tsnep_xmit_frame_ring()
469 retval = tsnep_tx_map(skb, tx, count); in tsnep_xmit_frame_ring()
471 tsnep_tx_unmap(tx, tx->write, count); in tsnep_xmit_frame_ring()
475 tx->dropped++; in tsnep_xmit_frame_ring()
485 tsnep_tx_activate(tx, (tx->write + i) % TSNEP_RING_SIZE, length, in tsnep_xmit_frame_ring()
487 tx->write = (tx->write + count) % TSNEP_RING_SIZE; in tsnep_xmit_frame_ring()
494 iowrite32(TSNEP_CONTROL_TX_ENABLE, tx->addr + TSNEP_CONTROL); in tsnep_xmit_frame_ring()
496 if (tsnep_tx_desc_available(tx) < (MAX_SKB_FRAGS + 1)) { in tsnep_xmit_frame_ring()
498 netif_stop_subqueue(tx->adapter->netdev, tx->queue_index); in tsnep_xmit_frame_ring()
504 static int tsnep_xdp_tx_map(struct xdp_frame *xdpf, struct tsnep_tx *tx, in tsnep_xdp_tx_map() argument
507 struct device *dmadev = tx->adapter->dmadev; in tsnep_xdp_tx_map()
520 entry = &tx->entry[(tx->write + i) % TSNEP_RING_SIZE]; in tsnep_xdp_tx_map()
546 entry->desc->tx = __cpu_to_le64(dma); in tsnep_xdp_tx_map()
561 struct tsnep_tx *tx, u32 type) in tsnep_xdp_xmit_frame_ring() argument
575 if (tsnep_tx_desc_available(tx) < (MAX_SKB_FRAGS + 1 + count)) in tsnep_xdp_xmit_frame_ring()
578 entry = &tx->entry[tx->write]; in tsnep_xdp_xmit_frame_ring()
581 retval = tsnep_xdp_tx_map(xdpf, tx, shinfo, count, type); in tsnep_xdp_xmit_frame_ring()
583 tsnep_tx_unmap(tx, tx->write, count); in tsnep_xdp_xmit_frame_ring()
586 tx->dropped++; in tsnep_xdp_xmit_frame_ring()
593 tsnep_tx_activate(tx, (tx->write + i) % TSNEP_RING_SIZE, length, in tsnep_xdp_xmit_frame_ring()
595 tx->write = (tx->write + count) % TSNEP_RING_SIZE; in tsnep_xdp_xmit_frame_ring()
603 static void tsnep_xdp_xmit_flush(struct tsnep_tx *tx) in tsnep_xdp_xmit_flush() argument
605 iowrite32(TSNEP_CONTROL_TX_ENABLE, tx->addr + TSNEP_CONTROL); in tsnep_xdp_xmit_flush()
610 struct netdev_queue *tx_nq, struct tsnep_tx *tx) in tsnep_xdp_xmit_back() argument
620 xmit = tsnep_xdp_xmit_frame_ring(xdpf, tx, TSNEP_TX_TYPE_XDP_TX); in tsnep_xdp_xmit_back()
631 static bool tsnep_tx_poll(struct tsnep_tx *tx, int napi_budget) in tsnep_tx_poll() argument
639 nq = netdev_get_tx_queue(tx->adapter->netdev, tx->queue_index); in tsnep_tx_poll()
643 if (tx->read == tx->write) in tsnep_tx_poll()
646 entry = &tx->entry[tx->read]; in tsnep_tx_poll()
665 length = tsnep_tx_unmap(tx, tx->read, count); in tsnep_tx_poll()
695 tx->read = (tx->read + count) % TSNEP_RING_SIZE; in tsnep_tx_poll()
697 tx->packets++; in tsnep_tx_poll()
698 tx->bytes += length + ETH_FCS_LEN; in tsnep_tx_poll()
703 if ((tsnep_tx_desc_available(tx) >= ((MAX_SKB_FRAGS + 1) * 2)) && in tsnep_tx_poll()
713 static bool tsnep_tx_pending(struct tsnep_tx *tx) in tsnep_tx_pending() argument
719 nq = netdev_get_tx_queue(tx->adapter->netdev, tx->queue_index); in tsnep_tx_pending()
722 if (tx->read != tx->write) { in tsnep_tx_pending()
723 entry = &tx->entry[tx->read]; in tsnep_tx_pending()
736 int queue_index, struct tsnep_tx *tx) in tsnep_tx_open() argument
741 memset(tx, 0, sizeof(*tx)); in tsnep_tx_open()
742 tx->adapter = adapter; in tsnep_tx_open()
743 tx->addr = addr; in tsnep_tx_open()
744 tx->queue_index = queue_index; in tsnep_tx_open()
746 retval = tsnep_tx_ring_init(tx); in tsnep_tx_open()
750 dma = tx->entry[0].desc_dma | TSNEP_RESET_OWNER_COUNTER; in tsnep_tx_open()
751 iowrite32(DMA_ADDR_LOW(dma), tx->addr + TSNEP_TX_DESC_ADDR_LOW); in tsnep_tx_open()
752 iowrite32(DMA_ADDR_HIGH(dma), tx->addr + TSNEP_TX_DESC_ADDR_HIGH); in tsnep_tx_open()
753 tx->owner_counter = 1; in tsnep_tx_open()
754 tx->increment_owner_counter = TSNEP_RING_SIZE - 1; in tsnep_tx_open()
759 static void tsnep_tx_close(struct tsnep_tx *tx) in tsnep_tx_close() argument
763 readx_poll_timeout(ioread32, tx->addr + TSNEP_CONTROL, val, in tsnep_tx_close()
767 tsnep_tx_ring_cleanup(tx); in tsnep_tx_close()
964 struct netdev_queue *tx_nq, struct tsnep_tx *tx) in tsnep_xdp_run_prog() argument
982 if (!tsnep_xdp_xmit_back(rx->adapter, xdp, tx_nq, tx)) in tsnep_xdp_run_prog()
1006 struct netdev_queue *tx_nq, struct tsnep_tx *tx) in tsnep_finalize_xdp() argument
1010 tsnep_xdp_xmit_flush(tx); in tsnep_finalize_xdp()
1059 struct tsnep_tx *tx; in tsnep_rx_poll() local
1071 tx = &rx->adapter->tx[rx->tx_queue_index]; in tsnep_rx_poll()
1132 &xdp_status, tx_nq, tx); in tsnep_rx_poll()
1162 tsnep_finalize_xdp(rx->adapter, xdp_status, tx_nq, tx); in tsnep_rx_poll()
1225 if (queue->tx && tsnep_tx_pending(queue->tx)) in tsnep_pending()
1241 if (queue->tx) in tsnep_poll()
1242 complete = tsnep_tx_poll(queue->tx, budget); in tsnep_poll()
1282 if (queue->tx && queue->rx) in tsnep_request_irq()
1285 else if (queue->tx) in tsnep_request_irq()
1287 queue->tx->queue_index); in tsnep_request_irq()
1336 struct tsnep_tx *tx = queue->tx; in tsnep_queue_open() local
1345 if (tx) in tsnep_queue_open()
1346 rx->tx_queue_index = tx->queue_index; in tsnep_queue_open()
1387 if (adapter->queue[i].tx) { in tsnep_netdev_open()
1390 adapter->queue[i].tx); in tsnep_netdev_open()
1439 if (adapter->queue[i].tx) in tsnep_netdev_open()
1440 tsnep_tx_close(adapter->queue[i].tx); in tsnep_netdev_open()
1462 if (adapter->queue[i].tx) in tsnep_netdev_close()
1463 tsnep_tx_close(adapter->queue[i].tx); in tsnep_netdev_close()
1478 return tsnep_xmit_frame_ring(skb, &adapter->tx[queue_mapping]); in tsnep_netdev_xmit_frame()
1516 stats->tx_packets += adapter->tx[i].packets; in tsnep_netdev_get_stats64()
1517 stats->tx_bytes += adapter->tx[i].bytes; in tsnep_netdev_get_stats64()
1518 stats->tx_dropped += adapter->tx[i].dropped; in tsnep_netdev_get_stats64()
1631 return &adapter->tx[cpu]; in tsnep_xdp_get_tx()
1640 struct tsnep_tx *tx; in tsnep_netdev_xdp_xmit() local
1647 tx = tsnep_xdp_get_tx(adapter, cpu); in tsnep_netdev_xdp_xmit()
1648 nq = netdev_get_tx_queue(adapter->netdev, tx->queue_index); in tsnep_netdev_xdp_xmit()
1653 xmit = tsnep_xdp_xmit_frame_ring(xdp[nxmit], tx, in tsnep_netdev_xdp_xmit()
1665 tsnep_xdp_xmit_flush(tx); in tsnep_netdev_xdp_xmit()
1801 adapter->queue[0].tx = &adapter->tx[0]; in tsnep_queue_init()
1825 adapter->queue[i].tx = &adapter->tx[i]; in tsnep_queue_init()