| /drivers/net/ethernet/intel/ice/ |
| A D | ice_txrx.c | 141 static struct netdev_queue *txring_txq(const struct ice_tx_ring *ring) in txring_txq() function 183 netdev_tx_reset_queue(txring_txq(tx_ring)); in ice_clean_tx_ring() 226 netdev_txq_bql_complete_prefetchw(txring_txq(tx_ring)); in ice_clean_tx_irq() 315 netdev_tx_completed_queue(txring_txq(tx_ring), total_pkts, total_bytes); in ice_clean_tx_irq() 324 if (netif_tx_queue_stopped(txring_txq(tx_ring)) && in ice_clean_tx_irq() 326 netif_tx_wake_queue(txring_txq(tx_ring)); in ice_clean_tx_irq() 1675 netif_tx_stop_queue(txring_txq(tx_ring)); in __ice_maybe_stop_tx() 1684 netif_tx_start_queue(txring_txq(tx_ring)); in __ice_maybe_stop_tx() 1836 kick = __netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount, in ice_tx_map() 2467 netdev_txq_bql_enqueue_prefetchw(txring_txq(tx_ring)); in ice_xmit_frame_ring()
|
| /drivers/net/ethernet/intel/igc/ |
| A D | igc_tsn.c | 83 netdev_tx_sent_queue(txring_txq(ring), skb->len); in igc_fpe_init_tx_descriptor() 101 nq = txring_txq(ring); in igc_fpe_xmit_smd_frame()
|
| A D | igc_main.c | 255 netdev_tx_reset_queue(txring_txq(tx_ring)); in igc_clean_tx_ring() 1127 netdev_tx_sent_queue(txring_txq(ring), skb->len); in igc_init_tx_empty_descriptor() 1386 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); in igc_tx_map() 1414 if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) { in igc_tx_map() 2445 netdev_tx_sent_queue(txring_txq(ring), head->bytecount); in igc_xdp_init_tx_descriptor() 2499 nq = txring_txq(ring); in igc_xdp_xmit_back() 2576 nq = txring_txq(ring); in igc_finalize_xdp() 3036 struct netdev_queue *nq = txring_txq(ring); in igc_xdp_xmit_zc() 3105 netdev_tx_sent_queue(txring_txq(ring), xdp_desc.len); in igc_xdp_xmit_zc() 3231 netdev_tx_completed_queue(txring_txq(tx_ring), in igc_clean_tx_irq() [all …]
|
| A D | igc.h | 707 static inline struct netdev_queue *txring_txq(const struct igc_ring *tx_ring) in txring_txq() function
|
| /drivers/net/ethernet/intel/igb/ |
| A D | igb.h | 817 static inline struct netdev_queue *txring_txq(const struct igb_ring *tx_ring) in txring_txq() function 825 lockdep_assert_held(&txring_txq(ring)->_xmit_lock); in igb_xdp_ring_update_tail()
|
| A D | igb_xsk.c | 521 netdev_tx_sent_queue(txring_txq(tx_ring), total_bytes); in igb_xmit_zc()
|
| A D | igb_main.c | 2977 nq = txring_txq(tx_ring); in igb_xdp_xmit_back() 3014 nq = txring_txq(tx_ring); in igb_xdp_xmit() 4999 netdev_tx_reset_queue(txring_txq(tx_ring)); in igb_clean_tx_ring() 6357 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); in igb_tx_map() 6385 if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) { in igb_tx_map() 6492 netdev_tx_sent_queue(txring_txq(tx_ring), tx_head->bytecount); in igb_xmit_xdp_ring() 6506 if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) in igb_xmit_xdp_ring() 8433 netdev_tx_completed_queue(txring_txq(tx_ring), in igb_clean_tx_irq() 8451 nq = txring_txq(tx_ring); in igb_clean_tx_irq() 9000 nq = txring_txq(tx_ring); in igb_finalize_xdp()
|
| A D | igb_ethtool.c | 1877 netdev_tx_reset_queue(txring_txq(tx_ring)); in igb_clean_test_rings()
|
| /drivers/net/ethernet/intel/iavf/ |
| A D | iavf_txrx.h | 388 static inline struct netdev_queue *txring_txq(const struct iavf_ring *ring) in txring_txq() function
|
| A D | iavf_txrx.c | 105 netdev_tx_reset_queue(txring_txq(tx_ring)); in iavf_clean_tx_ring() 347 netdev_tx_completed_queue(txring_txq(tx_ring), in iavf_clean_tx_irq() 2232 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); in iavf_tx_map() 2261 if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) { in iavf_tx_map()
|
| /drivers/net/ethernet/intel/i40e/ |
| A D | i40e_txrx.h | 562 static inline struct netdev_queue *txring_txq(const struct i40e_ring *ring) in txring_txq() function
|
| A D | i40e_txrx.c | 814 netdev_tx_reset_queue(txring_txq(tx_ring)); in i40e_clean_tx_ring() 1031 netdev_tx_completed_queue(txring_txq(tx_ring), in i40e_clean_tx_irq() 3658 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); in i40e_tx_map() 3699 if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) { in i40e_tx_map()
|
| /drivers/net/ethernet/intel/fm10k/ |
| A D | fm10k_main.c | 1006 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); in fm10k_tx_map() 1029 if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) { in fm10k_tx_map() 1306 netdev_tx_completed_queue(txring_txq(tx_ring), in fm10k_clean_tx_irq()
|
| A D | fm10k.h | 167 static inline struct netdev_queue *txring_txq(const struct fm10k_ring *ring) in txring_txq() function
|
| A D | fm10k_netdev.c | 187 netdev_tx_reset_queue(txring_txq(tx_ring)); in fm10k_clean_tx_ring()
|
| /drivers/net/ethernet/meta/fbnic/ |
| A D | fbnic_txrx.c | 103 static struct netdev_queue *txring_txq(const struct net_device *dev, in txring_txq() function 113 struct netdev_queue *txq = txring_txq(dev, ring); in fbnic_maybe_stop_tx() 129 struct netdev_queue *dev_queue = txring_txq(skb->dev, ring); in fbnic_tx_sent_queue() 583 txq = txring_txq(nv->napi.dev, ring); in fbnic_clean_twq0()
|
| /drivers/net/ethernet/intel/ixgbe/ |
| A D | ixgbe.h | 1056 static inline struct netdev_queue *txring_txq(const struct ixgbe_ring *ring) in txring_txq() function
|
| A D | ixgbe_main.c | 6637 netdev_tx_reset_queue(txring_txq(tx_ring)); in ixgbe_clean_tx_ring() 9034 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); in ixgbe_tx_map() 9062 if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) { in ixgbe_tx_map()
|
| A D | ixgbe_ethtool.c | 2148 netdev_tx_reset_queue(txring_txq(tx_ring)); in ixgbe_clean_test_rings()
|