Lines Matching refs:rx_ring

690 void iavf_clean_rx_ring(struct iavf_ring *rx_ring)  in iavf_clean_rx_ring()  argument
696 if (!rx_ring->rx_bi) in iavf_clean_rx_ring()
699 if (rx_ring->skb) { in iavf_clean_rx_ring()
700 dev_kfree_skb(rx_ring->skb); in iavf_clean_rx_ring()
701 rx_ring->skb = NULL; in iavf_clean_rx_ring()
705 for (i = 0; i < rx_ring->count; i++) { in iavf_clean_rx_ring()
706 struct iavf_rx_buffer *rx_bi = &rx_ring->rx_bi[i]; in iavf_clean_rx_ring()
714 dma_sync_single_range_for_cpu(rx_ring->dev, in iavf_clean_rx_ring()
717 rx_ring->rx_buf_len, in iavf_clean_rx_ring()
721 dma_unmap_page_attrs(rx_ring->dev, rx_bi->dma, in iavf_clean_rx_ring()
722 iavf_rx_pg_size(rx_ring), in iavf_clean_rx_ring()
732 bi_size = sizeof(struct iavf_rx_buffer) * rx_ring->count; in iavf_clean_rx_ring()
733 memset(rx_ring->rx_bi, 0, bi_size); in iavf_clean_rx_ring()
736 memset(rx_ring->desc, 0, rx_ring->size); in iavf_clean_rx_ring()
738 rx_ring->next_to_alloc = 0; in iavf_clean_rx_ring()
739 rx_ring->next_to_clean = 0; in iavf_clean_rx_ring()
740 rx_ring->next_to_use = 0; in iavf_clean_rx_ring()
749 void iavf_free_rx_resources(struct iavf_ring *rx_ring) in iavf_free_rx_resources() argument
751 iavf_clean_rx_ring(rx_ring); in iavf_free_rx_resources()
752 kfree(rx_ring->rx_bi); in iavf_free_rx_resources()
753 rx_ring->rx_bi = NULL; in iavf_free_rx_resources()
755 if (rx_ring->desc) { in iavf_free_rx_resources()
756 dma_free_coherent(rx_ring->dev, rx_ring->size, in iavf_free_rx_resources()
757 rx_ring->desc, rx_ring->dma); in iavf_free_rx_resources()
758 rx_ring->desc = NULL; in iavf_free_rx_resources()
768 int iavf_setup_rx_descriptors(struct iavf_ring *rx_ring) in iavf_setup_rx_descriptors() argument
770 struct device *dev = rx_ring->dev; in iavf_setup_rx_descriptors()
774 WARN_ON(rx_ring->rx_bi); in iavf_setup_rx_descriptors()
775 bi_size = sizeof(struct iavf_rx_buffer) * rx_ring->count; in iavf_setup_rx_descriptors()
776 rx_ring->rx_bi = kzalloc(bi_size, GFP_KERNEL); in iavf_setup_rx_descriptors()
777 if (!rx_ring->rx_bi) in iavf_setup_rx_descriptors()
780 u64_stats_init(&rx_ring->syncp); in iavf_setup_rx_descriptors()
783 rx_ring->size = rx_ring->count * sizeof(union iavf_32byte_rx_desc); in iavf_setup_rx_descriptors()
784 rx_ring->size = ALIGN(rx_ring->size, 4096); in iavf_setup_rx_descriptors()
785 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, in iavf_setup_rx_descriptors()
786 &rx_ring->dma, GFP_KERNEL); in iavf_setup_rx_descriptors()
788 if (!rx_ring->desc) { in iavf_setup_rx_descriptors()
790 rx_ring->size); in iavf_setup_rx_descriptors()
794 rx_ring->next_to_alloc = 0; in iavf_setup_rx_descriptors()
795 rx_ring->next_to_clean = 0; in iavf_setup_rx_descriptors()
796 rx_ring->next_to_use = 0; in iavf_setup_rx_descriptors()
800 kfree(rx_ring->rx_bi); in iavf_setup_rx_descriptors()
801 rx_ring->rx_bi = NULL; in iavf_setup_rx_descriptors()
810 static inline void iavf_release_rx_desc(struct iavf_ring *rx_ring, u32 val) in iavf_release_rx_desc() argument
812 rx_ring->next_to_use = val; in iavf_release_rx_desc()
815 rx_ring->next_to_alloc = val; in iavf_release_rx_desc()
823 writel(val, rx_ring->tail); in iavf_release_rx_desc()
832 static inline unsigned int iavf_rx_offset(struct iavf_ring *rx_ring) in iavf_rx_offset() argument
834 return ring_uses_build_skb(rx_ring) ? IAVF_SKB_PAD : 0; in iavf_rx_offset()
845 static bool iavf_alloc_mapped_page(struct iavf_ring *rx_ring, in iavf_alloc_mapped_page() argument
853 rx_ring->rx_stats.page_reuse_count++; in iavf_alloc_mapped_page()
858 page = dev_alloc_pages(iavf_rx_pg_order(rx_ring)); in iavf_alloc_mapped_page()
860 rx_ring->rx_stats.alloc_page_failed++; in iavf_alloc_mapped_page()
865 dma = dma_map_page_attrs(rx_ring->dev, page, 0, in iavf_alloc_mapped_page()
866 iavf_rx_pg_size(rx_ring), in iavf_alloc_mapped_page()
873 if (dma_mapping_error(rx_ring->dev, dma)) { in iavf_alloc_mapped_page()
874 __free_pages(page, iavf_rx_pg_order(rx_ring)); in iavf_alloc_mapped_page()
875 rx_ring->rx_stats.alloc_page_failed++; in iavf_alloc_mapped_page()
881 bi->page_offset = iavf_rx_offset(rx_ring); in iavf_alloc_mapped_page()
895 static void iavf_receive_skb(struct iavf_ring *rx_ring, in iavf_receive_skb() argument
898 struct iavf_q_vector *q_vector = rx_ring->q_vector; in iavf_receive_skb()
900 if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) && in iavf_receive_skb()
903 else if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_STAG_RX) && in iavf_receive_skb()
917 bool iavf_alloc_rx_buffers(struct iavf_ring *rx_ring, u16 cleaned_count) in iavf_alloc_rx_buffers() argument
919 u16 ntu = rx_ring->next_to_use; in iavf_alloc_rx_buffers()
924 if (!rx_ring->netdev || !cleaned_count) in iavf_alloc_rx_buffers()
927 rx_desc = IAVF_RX_DESC(rx_ring, ntu); in iavf_alloc_rx_buffers()
928 bi = &rx_ring->rx_bi[ntu]; in iavf_alloc_rx_buffers()
931 if (!iavf_alloc_mapped_page(rx_ring, bi)) in iavf_alloc_rx_buffers()
935 dma_sync_single_range_for_device(rx_ring->dev, bi->dma, in iavf_alloc_rx_buffers()
937 rx_ring->rx_buf_len, in iavf_alloc_rx_buffers()
948 if (unlikely(ntu == rx_ring->count)) { in iavf_alloc_rx_buffers()
949 rx_desc = IAVF_RX_DESC(rx_ring, 0); in iavf_alloc_rx_buffers()
950 bi = rx_ring->rx_bi; in iavf_alloc_rx_buffers()
960 if (rx_ring->next_to_use != ntu) in iavf_alloc_rx_buffers()
961 iavf_release_rx_desc(rx_ring, ntu); in iavf_alloc_rx_buffers()
966 if (rx_ring->next_to_use != ntu) in iavf_alloc_rx_buffers()
967 iavf_release_rx_desc(rx_ring, ntu); in iavf_alloc_rx_buffers()
1120 void iavf_process_skb_fields(struct iavf_ring *rx_ring, in iavf_process_skb_fields() argument
1124 iavf_rx_hash(rx_ring, rx_desc, skb, rx_ptype); in iavf_process_skb_fields()
1126 iavf_rx_checksum(rx_ring->vsi, skb, rx_desc); in iavf_process_skb_fields()
1128 skb_record_rx_queue(skb, rx_ring->queue_index); in iavf_process_skb_fields()
1131 skb->protocol = eth_type_trans(skb, rx_ring->netdev); in iavf_process_skb_fields()
1147 static bool iavf_cleanup_headers(struct iavf_ring *rx_ring, struct sk_buff *skb) in iavf_cleanup_headers() argument
1163 static void iavf_reuse_rx_page(struct iavf_ring *rx_ring, in iavf_reuse_rx_page() argument
1167 u16 nta = rx_ring->next_to_alloc; in iavf_reuse_rx_page()
1169 new_buff = &rx_ring->rx_bi[nta]; in iavf_reuse_rx_page()
1173 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; in iavf_reuse_rx_page()
1253 static void iavf_add_rx_frag(struct iavf_ring *rx_ring, in iavf_add_rx_frag() argument
1259 unsigned int truesize = iavf_rx_pg_size(rx_ring) / 2; in iavf_add_rx_frag()
1261 unsigned int truesize = SKB_DATA_ALIGN(size + iavf_rx_offset(rx_ring)); in iavf_add_rx_frag()
1286 static struct iavf_rx_buffer *iavf_get_rx_buffer(struct iavf_ring *rx_ring, in iavf_get_rx_buffer() argument
1291 rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean]; in iavf_get_rx_buffer()
1297 dma_sync_single_range_for_cpu(rx_ring->dev, in iavf_get_rx_buffer()
1319 static struct sk_buff *iavf_construct_skb(struct iavf_ring *rx_ring, in iavf_construct_skb() argument
1325 unsigned int truesize = iavf_rx_pg_size(rx_ring) / 2; in iavf_construct_skb()
1339 skb = __napi_alloc_skb(&rx_ring->q_vector->napi, in iavf_construct_skb()
1383 static struct sk_buff *iavf_build_skb(struct iavf_ring *rx_ring, in iavf_build_skb() argument
1389 unsigned int truesize = iavf_rx_pg_size(rx_ring) / 2; in iavf_build_skb()
1429 static void iavf_put_rx_buffer(struct iavf_ring *rx_ring, in iavf_put_rx_buffer() argument
1437 iavf_reuse_rx_page(rx_ring, rx_buffer); in iavf_put_rx_buffer()
1438 rx_ring->rx_stats.page_reuse_count++; in iavf_put_rx_buffer()
1441 dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, in iavf_put_rx_buffer()
1442 iavf_rx_pg_size(rx_ring), in iavf_put_rx_buffer()
1463 static bool iavf_is_non_eop(struct iavf_ring *rx_ring, in iavf_is_non_eop() argument
1467 u32 ntc = rx_ring->next_to_clean + 1; in iavf_is_non_eop()
1470 ntc = (ntc < rx_ring->count) ? ntc : 0; in iavf_is_non_eop()
1471 rx_ring->next_to_clean = ntc; in iavf_is_non_eop()
1473 prefetch(IAVF_RX_DESC(rx_ring, ntc)); in iavf_is_non_eop()
1480 rx_ring->rx_stats.non_eop_descs++; in iavf_is_non_eop()
1497 static int iavf_clean_rx_irq(struct iavf_ring *rx_ring, int budget) in iavf_clean_rx_irq() argument
1500 struct sk_buff *skb = rx_ring->skb; in iavf_clean_rx_irq()
1501 u16 cleaned_count = IAVF_DESC_UNUSED(rx_ring); in iavf_clean_rx_irq()
1515 iavf_alloc_rx_buffers(rx_ring, cleaned_count); in iavf_clean_rx_irq()
1519 rx_desc = IAVF_RX_DESC(rx_ring, rx_ring->next_to_clean); in iavf_clean_rx_irq()
1540 iavf_trace(clean_rx_irq, rx_ring, rx_desc, skb); in iavf_clean_rx_irq()
1541 rx_buffer = iavf_get_rx_buffer(rx_ring, size); in iavf_clean_rx_irq()
1545 iavf_add_rx_frag(rx_ring, rx_buffer, skb, size); in iavf_clean_rx_irq()
1546 else if (ring_uses_build_skb(rx_ring)) in iavf_clean_rx_irq()
1547 skb = iavf_build_skb(rx_ring, rx_buffer, size); in iavf_clean_rx_irq()
1549 skb = iavf_construct_skb(rx_ring, rx_buffer, size); in iavf_clean_rx_irq()
1553 rx_ring->rx_stats.alloc_buff_failed++; in iavf_clean_rx_irq()
1559 iavf_put_rx_buffer(rx_ring, rx_buffer); in iavf_clean_rx_irq()
1562 if (iavf_is_non_eop(rx_ring, rx_desc, skb)) in iavf_clean_rx_irq()
1576 if (iavf_cleanup_headers(rx_ring, skb)) { in iavf_clean_rx_irq()
1589 iavf_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype); in iavf_clean_rx_irq()
1592 rx_ring->flags & IAVF_TXRX_FLAGS_VLAN_TAG_LOC_L2TAG1) in iavf_clean_rx_irq()
1596 rx_ring->flags & IAVF_RXR_FLAGS_VLAN_TAG_LOC_L2TAG2_2) in iavf_clean_rx_irq()
1599 iavf_trace(clean_rx_irq_rx, rx_ring, rx_desc, skb); in iavf_clean_rx_irq()
1600 iavf_receive_skb(rx_ring, skb, vlan_tag); in iavf_clean_rx_irq()
1607 rx_ring->skb = skb; in iavf_clean_rx_irq()
1609 u64_stats_update_begin(&rx_ring->syncp); in iavf_clean_rx_irq()
1610 rx_ring->stats.packets += total_rx_packets; in iavf_clean_rx_irq()
1611 rx_ring->stats.bytes += total_rx_bytes; in iavf_clean_rx_irq()
1612 u64_stats_update_end(&rx_ring->syncp); in iavf_clean_rx_irq()
1613 rx_ring->q_vector->rx.total_packets += total_rx_packets; in iavf_clean_rx_irq()
1614 rx_ring->q_vector->rx.total_bytes += total_rx_bytes; in iavf_clean_rx_irq()