Lines Matching refs:tx_ring
23 static void i40e_fdir(struct i40e_ring *tx_ring, in i40e_fdir() argument
27 struct i40e_pf *pf = tx_ring->vsi->back; in i40e_fdir()
32 i = tx_ring->next_to_use; in i40e_fdir()
33 fdir_desc = I40E_TX_FDIRDESC(tx_ring, i); in i40e_fdir()
36 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; in i40e_fdir()
88 struct i40e_ring *tx_ring; in i40e_program_fdir_filter() local
100 tx_ring = vsi->tx_rings[0]; in i40e_program_fdir_filter()
101 dev = tx_ring->dev; in i40e_program_fdir_filter()
104 for (i = I40E_FD_CLEAN_DELAY; I40E_DESC_UNUSED(tx_ring) < 2; i--) { in i40e_program_fdir_filter()
116 i = tx_ring->next_to_use; in i40e_program_fdir_filter()
117 first = &tx_ring->tx_bi[i]; in i40e_program_fdir_filter()
118 i40e_fdir(tx_ring, fdir_data, add); in i40e_program_fdir_filter()
121 i = tx_ring->next_to_use; in i40e_program_fdir_filter()
122 tx_desc = I40E_TX_DESC(tx_ring, i); in i40e_program_fdir_filter()
123 tx_buf = &tx_ring->tx_bi[i]; in i40e_program_fdir_filter()
125 tx_ring->next_to_use = ((i + 1) < tx_ring->count) ? i + 1 : 0; in i40e_program_fdir_filter()
150 writel(tx_ring->next_to_use, tx_ring->tail); in i40e_program_fdir_filter()
783 void i40e_clean_tx_ring(struct i40e_ring *tx_ring) in i40e_clean_tx_ring() argument
788 if (ring_is_xdp(tx_ring) && tx_ring->xsk_pool) { in i40e_clean_tx_ring()
789 i40e_xsk_clean_tx_ring(tx_ring); in i40e_clean_tx_ring()
792 if (!tx_ring->tx_bi) in i40e_clean_tx_ring()
796 for (i = 0; i < tx_ring->count; i++) in i40e_clean_tx_ring()
797 i40e_unmap_and_free_tx_resource(tx_ring, in i40e_clean_tx_ring()
798 &tx_ring->tx_bi[i]); in i40e_clean_tx_ring()
801 bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count; in i40e_clean_tx_ring()
802 memset(tx_ring->tx_bi, 0, bi_size); in i40e_clean_tx_ring()
805 memset(tx_ring->desc, 0, tx_ring->size); in i40e_clean_tx_ring()
807 tx_ring->next_to_use = 0; in i40e_clean_tx_ring()
808 tx_ring->next_to_clean = 0; in i40e_clean_tx_ring()
810 if (!tx_ring->netdev) in i40e_clean_tx_ring()
814 netdev_tx_reset_queue(txring_txq(tx_ring)); in i40e_clean_tx_ring()
823 void i40e_free_tx_resources(struct i40e_ring *tx_ring) in i40e_free_tx_resources() argument
825 i40e_clean_tx_ring(tx_ring); in i40e_free_tx_resources()
826 kfree(tx_ring->tx_bi); in i40e_free_tx_resources()
827 tx_ring->tx_bi = NULL; in i40e_free_tx_resources()
829 if (tx_ring->desc) { in i40e_free_tx_resources()
830 dma_free_coherent(tx_ring->dev, tx_ring->size, in i40e_free_tx_resources()
831 tx_ring->desc, tx_ring->dma); in i40e_free_tx_resources()
832 tx_ring->desc = NULL; in i40e_free_tx_resources()
874 struct i40e_ring *tx_ring = NULL; in i40e_detect_recover_hung() local
893 tx_ring = vsi->tx_rings[i]; in i40e_detect_recover_hung()
894 if (tx_ring && tx_ring->desc) { in i40e_detect_recover_hung()
902 packets = tx_ring->stats.packets & INT_MAX; in i40e_detect_recover_hung()
903 if (tx_ring->tx_stats.prev_pkt_ctr == packets) { in i40e_detect_recover_hung()
904 i40e_force_wb(vsi, tx_ring->q_vector); in i40e_detect_recover_hung()
912 tx_ring->tx_stats.prev_pkt_ctr = in i40e_detect_recover_hung()
913 i40e_get_tx_pending(tx_ring, true) ? packets : -1; in i40e_detect_recover_hung()
928 struct i40e_ring *tx_ring, int napi_budget, in i40e_clean_tx_irq() argument
931 int i = tx_ring->next_to_clean; in i40e_clean_tx_irq()
938 tx_buf = &tx_ring->tx_bi[i]; in i40e_clean_tx_irq()
939 tx_desc = I40E_TX_DESC(tx_ring, i); in i40e_clean_tx_irq()
940 i -= tx_ring->count; in i40e_clean_tx_irq()
942 tx_head = I40E_TX_DESC(tx_ring, i40e_get_head(tx_ring)); in i40e_clean_tx_irq()
954 i40e_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf); in i40e_clean_tx_irq()
967 if (ring_is_xdp(tx_ring)) in i40e_clean_tx_irq()
973 dma_unmap_single(tx_ring->dev, in i40e_clean_tx_irq()
985 tx_ring, tx_desc, tx_buf); in i40e_clean_tx_irq()
991 i -= tx_ring->count; in i40e_clean_tx_irq()
992 tx_buf = tx_ring->tx_bi; in i40e_clean_tx_irq()
993 tx_desc = I40E_TX_DESC(tx_ring, 0); in i40e_clean_tx_irq()
998 dma_unmap_page(tx_ring->dev, in i40e_clean_tx_irq()
1011 i -= tx_ring->count; in i40e_clean_tx_irq()
1012 tx_buf = tx_ring->tx_bi; in i40e_clean_tx_irq()
1013 tx_desc = I40E_TX_DESC(tx_ring, 0); in i40e_clean_tx_irq()
1022 i += tx_ring->count; in i40e_clean_tx_irq()
1023 tx_ring->next_to_clean = i; in i40e_clean_tx_irq()
1024 i40e_update_tx_stats(tx_ring, total_packets, total_bytes); in i40e_clean_tx_irq()
1025 i40e_arm_wb(tx_ring, vsi, budget); in i40e_clean_tx_irq()
1027 if (ring_is_xdp(tx_ring)) in i40e_clean_tx_irq()
1031 netdev_tx_completed_queue(txring_txq(tx_ring), in i40e_clean_tx_irq()
1035 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) && in i40e_clean_tx_irq()
1036 (I40E_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) { in i40e_clean_tx_irq()
1041 if (__netif_subqueue_stopped(tx_ring->netdev, in i40e_clean_tx_irq()
1042 tx_ring->queue_index) && in i40e_clean_tx_irq()
1044 netif_wake_subqueue(tx_ring->netdev, in i40e_clean_tx_irq()
1045 tx_ring->queue_index); in i40e_clean_tx_irq()
1046 ++tx_ring->tx_stats.restart_queue; in i40e_clean_tx_irq()
1416 int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring) in i40e_setup_tx_descriptors() argument
1418 struct device *dev = tx_ring->dev; in i40e_setup_tx_descriptors()
1425 WARN_ON(tx_ring->tx_bi); in i40e_setup_tx_descriptors()
1426 bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count; in i40e_setup_tx_descriptors()
1427 tx_ring->tx_bi = kzalloc(bi_size, GFP_KERNEL); in i40e_setup_tx_descriptors()
1428 if (!tx_ring->tx_bi) in i40e_setup_tx_descriptors()
1431 u64_stats_init(&tx_ring->syncp); in i40e_setup_tx_descriptors()
1434 tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc); in i40e_setup_tx_descriptors()
1438 tx_ring->size += sizeof(u32); in i40e_setup_tx_descriptors()
1439 tx_ring->size = ALIGN(tx_ring->size, 4096); in i40e_setup_tx_descriptors()
1440 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, in i40e_setup_tx_descriptors()
1441 &tx_ring->dma, GFP_KERNEL); in i40e_setup_tx_descriptors()
1442 if (!tx_ring->desc) { in i40e_setup_tx_descriptors()
1444 tx_ring->size); in i40e_setup_tx_descriptors()
1448 tx_ring->next_to_use = 0; in i40e_setup_tx_descriptors()
1449 tx_ring->next_to_clean = 0; in i40e_setup_tx_descriptors()
1450 tx_ring->tx_stats.prev_pkt_ctr = -1; in i40e_setup_tx_descriptors()
1454 kfree(tx_ring->tx_bi); in i40e_setup_tx_descriptors()
1455 tx_ring->tx_bi = NULL; in i40e_setup_tx_descriptors()
2861 static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb, in i40e_atr() argument
2865 struct i40e_pf *pf = tx_ring->vsi->back; in i40e_atr()
2885 if (!tx_ring->atr_sample_rate) in i40e_atr()
2931 tx_ring->atr_count++; in i40e_atr()
2937 (tx_ring->atr_count < tx_ring->atr_sample_rate)) in i40e_atr()
2940 tx_ring->atr_count = 0; in i40e_atr()
2943 i = tx_ring->next_to_use; in i40e_atr()
2944 fdir_desc = I40E_TX_FDIRDESC(tx_ring, i); in i40e_atr()
2947 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; in i40e_atr()
2950 tx_ring->queue_index); in i40e_atr()
2957 flex_ptype |= tx_ring->vsi->id << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT; in i40e_atr()
3005 struct i40e_ring *tx_ring, in i40e_tx_prepare_vlan_flags() argument
3012 !(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) { in i40e_tx_prepare_vlan_flags()
3041 if (!test_bit(I40E_FLAG_DCB_ENA, tx_ring->vsi->back->flags)) in i40e_tx_prepare_vlan_flags()
3201 static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb, in i40e_tsyn() argument
3216 pf = i40e_netdev_to_pf(tx_ring->netdev); in i40e_tsyn()
3247 struct i40e_ring *tx_ring, in i40e_tx_enable_csum() argument
3428 static void i40e_create_tx_ctx(struct i40e_ring *tx_ring, in i40e_create_tx_ctx() argument
3433 int i = tx_ring->next_to_use; in i40e_create_tx_ctx()
3440 context_desc = I40E_TX_CTXTDESC(tx_ring, i); in i40e_create_tx_ctx()
3443 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; in i40e_create_tx_ctx()
3459 int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) in __i40e_maybe_stop_tx() argument
3461 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); in __i40e_maybe_stop_tx()
3465 ++tx_ring->tx_stats.tx_stopped; in __i40e_maybe_stop_tx()
3468 if (likely(I40E_DESC_UNUSED(tx_ring) < size)) in __i40e_maybe_stop_tx()
3472 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); in __i40e_maybe_stop_tx()
3473 ++tx_ring->tx_stats.restart_queue; in __i40e_maybe_stop_tx()
3573 static inline int i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, in i40e_tx_map() argument
3582 u16 i = tx_ring->next_to_use; in i40e_tx_map()
3594 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); in i40e_tx_map()
3596 tx_desc = I40E_TX_DESC(tx_ring, i); in i40e_tx_map()
3602 if (dma_mapping_error(tx_ring->dev, dma)) in i40e_tx_map()
3622 if (i == tx_ring->count) { in i40e_tx_map()
3623 tx_desc = I40E_TX_DESC(tx_ring, 0); in i40e_tx_map()
3644 if (i == tx_ring->count) { in i40e_tx_map()
3645 tx_desc = I40E_TX_DESC(tx_ring, 0); in i40e_tx_map()
3652 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, in i40e_tx_map()
3655 tx_bi = &tx_ring->tx_bi[i]; in i40e_tx_map()
3658 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); in i40e_tx_map()
3661 if (i == tx_ring->count) in i40e_tx_map()
3664 tx_ring->next_to_use = i; in i40e_tx_map()
3666 i40e_maybe_stop_tx(tx_ring, DESC_NEEDED); in i40e_tx_map()
3674 desc_count |= ++tx_ring->packet_stride; in i40e_tx_map()
3679 tx_ring->packet_stride = 0; in i40e_tx_map()
3699 if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) { in i40e_tx_map()
3700 writel(i, tx_ring->tail); in i40e_tx_map()
3706 dev_info(tx_ring->dev, "TX DMA map failed\n"); in i40e_tx_map()
3710 tx_bi = &tx_ring->tx_bi[i]; in i40e_tx_map()
3711 i40e_unmap_and_free_tx_resource(tx_ring, tx_bi); in i40e_tx_map()
3715 i = tx_ring->count; in i40e_tx_map()
3719 tx_ring->next_to_use = i; in i40e_tx_map()
3872 struct i40e_ring *tx_ring) in i40e_xmit_frame_ring() argument
3887 i40e_trace(xmit_frame_ring, skb, tx_ring); in i40e_xmit_frame_ring()
3896 tx_ring->tx_stats.tx_linearize++; in i40e_xmit_frame_ring()
3905 if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) { in i40e_xmit_frame_ring()
3906 tx_ring->tx_stats.tx_busy++; in i40e_xmit_frame_ring()
3911 first = &tx_ring->tx_bi[tx_ring->next_to_use]; in i40e_xmit_frame_ring()
3917 if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags)) in i40e_xmit_frame_ring()
3929 tx_ring, &cd_tunneling); in i40e_xmit_frame_ring()
3933 tsyn = i40e_tsyn(tx_ring, skb, tx_flags, &cd_type_cmd_tso_mss); in i40e_xmit_frame_ring()
3941 i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss, in i40e_xmit_frame_ring()
3948 i40e_atr(tx_ring, skb, tx_flags); in i40e_xmit_frame_ring()
3950 if (i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len, in i40e_xmit_frame_ring()
3957 i40e_trace(xmit_frame_ring_drop, first->skb, tx_ring); in i40e_xmit_frame_ring()
3962 struct i40e_pf *pf = i40e_netdev_to_pf(tx_ring->netdev); in i40e_xmit_frame_ring()
3983 struct i40e_ring *tx_ring = vsi->tx_rings[skb->queue_mapping]; in i40e_lan_xmit_frame() local
3991 return i40e_xmit_frame_ring(skb, tx_ring); in i40e_lan_xmit_frame()