Lines Matching refs:rx_ring

383 void ice_clean_rx_ring(struct ice_rx_ring *rx_ring)  in ice_clean_rx_ring()  argument
385 struct xdp_buff *xdp = &rx_ring->xdp; in ice_clean_rx_ring()
386 struct device *dev = rx_ring->dev; in ice_clean_rx_ring()
391 if (!rx_ring->rx_buf) in ice_clean_rx_ring()
394 if (rx_ring->xsk_pool) { in ice_clean_rx_ring()
395 ice_xsk_clean_rx_ring(rx_ring); in ice_clean_rx_ring()
405 for (i = 0; i < rx_ring->count; i++) { in ice_clean_rx_ring()
406 struct ice_rx_buf *rx_buf = &rx_ring->rx_buf[i]; in ice_clean_rx_ring()
416 rx_ring->rx_buf_len, in ice_clean_rx_ring()
420 dma_unmap_page_attrs(dev, rx_buf->dma, ice_rx_pg_size(rx_ring), in ice_clean_rx_ring()
429 if (rx_ring->xsk_pool) in ice_clean_rx_ring()
430 memset(rx_ring->xdp_buf, 0, array_size(rx_ring->count, sizeof(*rx_ring->xdp_buf))); in ice_clean_rx_ring()
432 memset(rx_ring->rx_buf, 0, array_size(rx_ring->count, sizeof(*rx_ring->rx_buf))); in ice_clean_rx_ring()
435 size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc), in ice_clean_rx_ring()
437 memset(rx_ring->desc, 0, size); in ice_clean_rx_ring()
439 rx_ring->next_to_alloc = 0; in ice_clean_rx_ring()
440 rx_ring->next_to_clean = 0; in ice_clean_rx_ring()
441 rx_ring->first_desc = 0; in ice_clean_rx_ring()
442 rx_ring->next_to_use = 0; in ice_clean_rx_ring()
451 void ice_free_rx_ring(struct ice_rx_ring *rx_ring) in ice_free_rx_ring() argument
455 ice_clean_rx_ring(rx_ring); in ice_free_rx_ring()
456 if (rx_ring->vsi->type == ICE_VSI_PF) in ice_free_rx_ring()
457 if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq)) in ice_free_rx_ring()
458 xdp_rxq_info_unreg(&rx_ring->xdp_rxq); in ice_free_rx_ring()
459 rx_ring->xdp_prog = NULL; in ice_free_rx_ring()
460 if (rx_ring->xsk_pool) { in ice_free_rx_ring()
461 kfree(rx_ring->xdp_buf); in ice_free_rx_ring()
462 rx_ring->xdp_buf = NULL; in ice_free_rx_ring()
464 kfree(rx_ring->rx_buf); in ice_free_rx_ring()
465 rx_ring->rx_buf = NULL; in ice_free_rx_ring()
468 if (rx_ring->desc) { in ice_free_rx_ring()
469 size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc), in ice_free_rx_ring()
471 dmam_free_coherent(rx_ring->dev, size, in ice_free_rx_ring()
472 rx_ring->desc, rx_ring->dma); in ice_free_rx_ring()
473 rx_ring->desc = NULL; in ice_free_rx_ring()
483 int ice_setup_rx_ring(struct ice_rx_ring *rx_ring) in ice_setup_rx_ring() argument
485 struct device *dev = rx_ring->dev; in ice_setup_rx_ring()
492 WARN_ON(rx_ring->rx_buf); in ice_setup_rx_ring()
493 rx_ring->rx_buf = in ice_setup_rx_ring()
494 kcalloc(rx_ring->count, sizeof(*rx_ring->rx_buf), GFP_KERNEL); in ice_setup_rx_ring()
495 if (!rx_ring->rx_buf) in ice_setup_rx_ring()
499 size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc), in ice_setup_rx_ring()
501 rx_ring->desc = dmam_alloc_coherent(dev, size, &rx_ring->dma, in ice_setup_rx_ring()
503 if (!rx_ring->desc) { in ice_setup_rx_ring()
509 rx_ring->next_to_use = 0; in ice_setup_rx_ring()
510 rx_ring->next_to_clean = 0; in ice_setup_rx_ring()
511 rx_ring->first_desc = 0; in ice_setup_rx_ring()
513 if (ice_is_xdp_ena_vsi(rx_ring->vsi)) in ice_setup_rx_ring()
514 WRITE_ONCE(rx_ring->xdp_prog, rx_ring->vsi->xdp_prog); in ice_setup_rx_ring()
516 if (rx_ring->vsi->type == ICE_VSI_PF && in ice_setup_rx_ring()
517 !xdp_rxq_info_is_reg(&rx_ring->xdp_rxq)) in ice_setup_rx_ring()
518 if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev, in ice_setup_rx_ring()
519 rx_ring->q_index, rx_ring->q_vector->napi.napi_id)) in ice_setup_rx_ring()
524 kfree(rx_ring->rx_buf); in ice_setup_rx_ring()
525 rx_ring->rx_buf = NULL; in ice_setup_rx_ring()
538 ice_rx_frame_truesize(struct ice_rx_ring *rx_ring, const unsigned int size) in ice_rx_frame_truesize() argument
543 truesize = ice_rx_pg_size(rx_ring) / 2; /* Must be power-of-2 */ in ice_rx_frame_truesize()
545 truesize = rx_ring->rx_offset ? in ice_rx_frame_truesize()
546 SKB_DATA_ALIGN(rx_ring->rx_offset + size) + in ice_rx_frame_truesize()
564 ice_run_xdp(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp, in ice_run_xdp() argument
588 if (xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog)) in ice_run_xdp()
593 bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act); in ice_run_xdp()
597 trace_xdp_exception(rx_ring->netdev, xdp_prog, act); in ice_run_xdp()
605 ice_set_rx_bufs_act(xdp, rx_ring, ret); in ice_run_xdp()
700 ice_alloc_mapped_page(struct ice_rx_ring *rx_ring, struct ice_rx_buf *bi) in ice_alloc_mapped_page() argument
710 page = dev_alloc_pages(ice_rx_pg_order(rx_ring)); in ice_alloc_mapped_page()
712 rx_ring->ring_stats->rx_stats.alloc_page_failed++; in ice_alloc_mapped_page()
717 dma = dma_map_page_attrs(rx_ring->dev, page, 0, ice_rx_pg_size(rx_ring), in ice_alloc_mapped_page()
723 if (dma_mapping_error(rx_ring->dev, dma)) { in ice_alloc_mapped_page()
724 __free_pages(page, ice_rx_pg_order(rx_ring)); in ice_alloc_mapped_page()
725 rx_ring->ring_stats->rx_stats.alloc_page_failed++; in ice_alloc_mapped_page()
731 bi->page_offset = rx_ring->rx_offset; in ice_alloc_mapped_page()
751 bool ice_alloc_rx_bufs(struct ice_rx_ring *rx_ring, unsigned int cleaned_count) in ice_alloc_rx_bufs() argument
754 u16 ntu = rx_ring->next_to_use; in ice_alloc_rx_bufs()
758 if ((!rx_ring->netdev && rx_ring->vsi->type != ICE_VSI_CTRL) || in ice_alloc_rx_bufs()
763 rx_desc = ICE_RX_DESC(rx_ring, ntu); in ice_alloc_rx_bufs()
764 bi = &rx_ring->rx_buf[ntu]; in ice_alloc_rx_bufs()
768 if (!ice_alloc_mapped_page(rx_ring, bi)) in ice_alloc_rx_bufs()
772 dma_sync_single_range_for_device(rx_ring->dev, bi->dma, in ice_alloc_rx_bufs()
774 rx_ring->rx_buf_len, in ice_alloc_rx_bufs()
785 if (unlikely(ntu == rx_ring->count)) { in ice_alloc_rx_bufs()
786 rx_desc = ICE_RX_DESC(rx_ring, 0); in ice_alloc_rx_bufs()
787 bi = rx_ring->rx_buf; in ice_alloc_rx_bufs()
797 if (rx_ring->next_to_use != ntu) in ice_alloc_rx_bufs()
798 ice_release_rx_desc(rx_ring, ntu); in ice_alloc_rx_bufs()
878 ice_add_xdp_frag(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp, in ice_add_xdp_frag() argument
894 ice_set_rx_bufs_act(xdp, rx_ring, ICE_XDP_CONSUMED); in ice_add_xdp_frag()
916 ice_reuse_rx_page(struct ice_rx_ring *rx_ring, struct ice_rx_buf *old_buf) in ice_reuse_rx_page() argument
918 u16 nta = rx_ring->next_to_alloc; in ice_reuse_rx_page()
921 new_buf = &rx_ring->rx_buf[nta]; in ice_reuse_rx_page()
925 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; in ice_reuse_rx_page()
946 ice_get_rx_buf(struct ice_rx_ring *rx_ring, const unsigned int size, in ice_get_rx_buf() argument
951 rx_buf = &rx_ring->rx_buf[ntc]; in ice_get_rx_buf()
963 dma_sync_single_range_for_cpu(rx_ring->dev, rx_buf->dma, in ice_get_rx_buf()
983 ice_build_skb(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp) in ice_build_skb() argument
1009 skb_record_rx_queue(skb, rx_ring->q_index); in ice_build_skb()
1037 ice_construct_skb(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp) in ice_construct_skb() argument
1055 skb = __napi_alloc_skb(&rx_ring->q_vector->napi, ICE_RX_HDR_SIZE, in ice_construct_skb()
1060 rx_buf = &rx_ring->rx_buf[rx_ring->first_desc]; in ice_construct_skb()
1061 skb_record_rx_queue(skb, rx_ring->q_index); in ice_construct_skb()
1118 ice_put_rx_buf(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf) in ice_put_rx_buf() argument
1125 ice_reuse_rx_page(rx_ring, rx_buf); in ice_put_rx_buf()
1128 dma_unmap_page_attrs(rx_ring->dev, rx_buf->dma, in ice_put_rx_buf()
1129 ice_rx_pg_size(rx_ring), DMA_FROM_DEVICE, in ice_put_rx_buf()
1150 int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget) in ice_clean_rx_irq() argument
1153 unsigned int offset = rx_ring->rx_offset; in ice_clean_rx_irq()
1154 struct xdp_buff *xdp = &rx_ring->xdp; in ice_clean_rx_irq()
1157 u32 ntc = rx_ring->next_to_clean; in ice_clean_rx_irq()
1158 u32 cnt = rx_ring->count; in ice_clean_rx_irq()
1167 xdp->frame_sz = ice_rx_frame_truesize(rx_ring, 0); in ice_clean_rx_irq()
1170 xdp_prog = READ_ONCE(rx_ring->xdp_prog); in ice_clean_rx_irq()
1172 xdp_ring = rx_ring->xdp_ring; in ice_clean_rx_irq()
1187 rx_desc = ICE_RX_DESC(rx_ring, ntc); in ice_clean_rx_irq()
1204 ice_trace(clean_rx_irq, rx_ring, rx_desc); in ice_clean_rx_irq()
1205 if (rx_desc->wb.rxdid == FDIR_DESC_RXDID || !rx_ring->netdev) { in ice_clean_rx_irq()
1206 struct ice_vsi *ctrl_vsi = rx_ring->vsi; in ice_clean_rx_irq()
1220 rx_buf = ice_get_rx_buf(rx_ring, size, ntc); in ice_clean_rx_irq()
1230 xdp->frame_sz = ice_rx_frame_truesize(rx_ring, size); in ice_clean_rx_irq()
1233 } else if (ice_add_xdp_frag(rx_ring, xdp, rx_buf, size)) { in ice_clean_rx_irq()
1240 if (ice_is_non_eop(rx_ring, rx_desc)) in ice_clean_rx_irq()
1243 ice_run_xdp(rx_ring, xdp, xdp_prog, xdp_ring, rx_buf); in ice_clean_rx_irq()
1250 rx_ring->first_desc = ntc; in ice_clean_rx_irq()
1253 if (likely(ice_ring_uses_build_skb(rx_ring))) in ice_clean_rx_irq()
1254 skb = ice_build_skb(rx_ring, xdp); in ice_clean_rx_irq()
1256 skb = ice_construct_skb(rx_ring, xdp); in ice_clean_rx_irq()
1259 rx_ring->ring_stats->rx_stats.alloc_page_failed++; in ice_clean_rx_irq()
1262 ice_set_rx_bufs_act(xdp, rx_ring, in ice_clean_rx_irq()
1265 rx_ring->first_desc = ntc; in ice_clean_rx_irq()
1269 rx_ring->first_desc = ntc; in ice_clean_rx_irq()
1291 ice_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype); in ice_clean_rx_irq()
1293 ice_trace(clean_rx_irq_indicate, rx_ring, rx_desc, skb); in ice_clean_rx_irq()
1295 ice_receive_skb(rx_ring, skb, vlan_tag); in ice_clean_rx_irq()
1301 first = rx_ring->first_desc; in ice_clean_rx_irq()
1303 struct ice_rx_buf *buf = &rx_ring->rx_buf[cached_ntc]; in ice_clean_rx_irq()
1314 ice_put_rx_buf(rx_ring, buf); in ice_clean_rx_irq()
1318 rx_ring->next_to_clean = ntc; in ice_clean_rx_irq()
1320 failure = ice_alloc_rx_bufs(rx_ring, ICE_RX_DESC_UNUSED(rx_ring)); in ice_clean_rx_irq()
1325 if (rx_ring->ring_stats) in ice_clean_rx_irq()
1326 ice_update_rx_ring_stats(rx_ring, total_rx_pkts, in ice_clean_rx_irq()
1353 struct ice_rx_ring *rx_ring; in __ice_update_sample() local
1355 ice_for_each_rx_ring(rx_ring, *rc) { in __ice_update_sample()
1358 ring_stats = rx_ring->ring_stats; in __ice_update_sample()
1517 struct ice_rx_ring *rx_ring; in ice_napi_poll() local
1554 ice_for_each_rx_ring(rx_ring, q_vector->rx) { in ice_napi_poll()
1561 cleaned = rx_ring->xsk_pool ? in ice_napi_poll()
1562 ice_clean_rx_irq_zc(rx_ring, budget_per_ring) : in ice_napi_poll()
1563 ice_clean_rx_irq(rx_ring, budget_per_ring); in ice_napi_poll()