Lines Matching refs:rx_ring

382 void ice_clean_rx_ring(struct ice_rx_ring *rx_ring)  in ice_clean_rx_ring()  argument
384 struct xdp_buff *xdp = &rx_ring->xdp; in ice_clean_rx_ring()
385 struct device *dev = rx_ring->dev; in ice_clean_rx_ring()
390 if (!rx_ring->rx_buf) in ice_clean_rx_ring()
393 if (rx_ring->xsk_pool) { in ice_clean_rx_ring()
394 ice_xsk_clean_rx_ring(rx_ring); in ice_clean_rx_ring()
404 for (i = 0; i < rx_ring->count; i++) { in ice_clean_rx_ring()
405 struct ice_rx_buf *rx_buf = &rx_ring->rx_buf[i]; in ice_clean_rx_ring()
415 rx_ring->rx_buf_len, in ice_clean_rx_ring()
419 dma_unmap_page_attrs(dev, rx_buf->dma, ice_rx_pg_size(rx_ring), in ice_clean_rx_ring()
428 if (rx_ring->xsk_pool) in ice_clean_rx_ring()
429 memset(rx_ring->xdp_buf, 0, array_size(rx_ring->count, sizeof(*rx_ring->xdp_buf))); in ice_clean_rx_ring()
431 memset(rx_ring->rx_buf, 0, array_size(rx_ring->count, sizeof(*rx_ring->rx_buf))); in ice_clean_rx_ring()
434 size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc), in ice_clean_rx_ring()
436 memset(rx_ring->desc, 0, size); in ice_clean_rx_ring()
438 rx_ring->next_to_alloc = 0; in ice_clean_rx_ring()
439 rx_ring->next_to_clean = 0; in ice_clean_rx_ring()
440 rx_ring->first_desc = 0; in ice_clean_rx_ring()
441 rx_ring->next_to_use = 0; in ice_clean_rx_ring()
450 void ice_free_rx_ring(struct ice_rx_ring *rx_ring) in ice_free_rx_ring() argument
454 ice_clean_rx_ring(rx_ring); in ice_free_rx_ring()
455 if (rx_ring->vsi->type == ICE_VSI_PF) in ice_free_rx_ring()
456 if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq)) in ice_free_rx_ring()
457 xdp_rxq_info_unreg(&rx_ring->xdp_rxq); in ice_free_rx_ring()
458 WRITE_ONCE(rx_ring->xdp_prog, NULL); in ice_free_rx_ring()
459 if (rx_ring->xsk_pool) { in ice_free_rx_ring()
460 kfree(rx_ring->xdp_buf); in ice_free_rx_ring()
461 rx_ring->xdp_buf = NULL; in ice_free_rx_ring()
463 kfree(rx_ring->rx_buf); in ice_free_rx_ring()
464 rx_ring->rx_buf = NULL; in ice_free_rx_ring()
467 if (rx_ring->desc) { in ice_free_rx_ring()
468 size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc), in ice_free_rx_ring()
470 dmam_free_coherent(rx_ring->dev, size, in ice_free_rx_ring()
471 rx_ring->desc, rx_ring->dma); in ice_free_rx_ring()
472 rx_ring->desc = NULL; in ice_free_rx_ring()
482 int ice_setup_rx_ring(struct ice_rx_ring *rx_ring) in ice_setup_rx_ring() argument
484 struct device *dev = rx_ring->dev; in ice_setup_rx_ring()
491 WARN_ON(rx_ring->rx_buf); in ice_setup_rx_ring()
492 rx_ring->rx_buf = in ice_setup_rx_ring()
493 kcalloc(rx_ring->count, sizeof(*rx_ring->rx_buf), GFP_KERNEL); in ice_setup_rx_ring()
494 if (!rx_ring->rx_buf) in ice_setup_rx_ring()
498 size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc), in ice_setup_rx_ring()
500 rx_ring->desc = dmam_alloc_coherent(dev, size, &rx_ring->dma, in ice_setup_rx_ring()
502 if (!rx_ring->desc) { in ice_setup_rx_ring()
508 rx_ring->next_to_use = 0; in ice_setup_rx_ring()
509 rx_ring->next_to_clean = 0; in ice_setup_rx_ring()
510 rx_ring->first_desc = 0; in ice_setup_rx_ring()
512 if (ice_is_xdp_ena_vsi(rx_ring->vsi)) in ice_setup_rx_ring()
513 WRITE_ONCE(rx_ring->xdp_prog, rx_ring->vsi->xdp_prog); in ice_setup_rx_ring()
518 kfree(rx_ring->rx_buf); in ice_setup_rx_ring()
519 rx_ring->rx_buf = NULL; in ice_setup_rx_ring()
534 ice_run_xdp(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp, in ice_run_xdp() argument
560 if (xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog)) in ice_run_xdp()
565 bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act); in ice_run_xdp()
569 trace_xdp_exception(rx_ring->netdev, xdp_prog, act); in ice_run_xdp()
670 ice_alloc_mapped_page(struct ice_rx_ring *rx_ring, struct ice_rx_buf *bi) in ice_alloc_mapped_page() argument
680 page = dev_alloc_pages(ice_rx_pg_order(rx_ring)); in ice_alloc_mapped_page()
682 rx_ring->ring_stats->rx_stats.alloc_page_failed++; in ice_alloc_mapped_page()
687 dma = dma_map_page_attrs(rx_ring->dev, page, 0, ice_rx_pg_size(rx_ring), in ice_alloc_mapped_page()
693 if (dma_mapping_error(rx_ring->dev, dma)) { in ice_alloc_mapped_page()
694 __free_pages(page, ice_rx_pg_order(rx_ring)); in ice_alloc_mapped_page()
695 rx_ring->ring_stats->rx_stats.alloc_page_failed++; in ice_alloc_mapped_page()
701 bi->page_offset = rx_ring->rx_offset; in ice_alloc_mapped_page()
713 void ice_init_ctrl_rx_descs(struct ice_rx_ring *rx_ring, u32 count) in ice_init_ctrl_rx_descs() argument
716 u32 ntu = rx_ring->next_to_use; in ice_init_ctrl_rx_descs()
721 rx_desc = ICE_RX_DESC(rx_ring, ntu); in ice_init_ctrl_rx_descs()
726 if (unlikely(ntu == rx_ring->count)) { in ice_init_ctrl_rx_descs()
727 rx_desc = ICE_RX_DESC(rx_ring, 0); in ice_init_ctrl_rx_descs()
735 if (rx_ring->next_to_use != ntu) in ice_init_ctrl_rx_descs()
736 ice_release_rx_desc(rx_ring, ntu); in ice_init_ctrl_rx_descs()
752 bool ice_alloc_rx_bufs(struct ice_rx_ring *rx_ring, unsigned int cleaned_count) in ice_alloc_rx_bufs() argument
755 u16 ntu = rx_ring->next_to_use; in ice_alloc_rx_bufs()
759 if (!rx_ring->netdev || !cleaned_count) in ice_alloc_rx_bufs()
763 rx_desc = ICE_RX_DESC(rx_ring, ntu); in ice_alloc_rx_bufs()
764 bi = &rx_ring->rx_buf[ntu]; in ice_alloc_rx_bufs()
768 if (!ice_alloc_mapped_page(rx_ring, bi)) in ice_alloc_rx_bufs()
772 dma_sync_single_range_for_device(rx_ring->dev, bi->dma, in ice_alloc_rx_bufs()
774 rx_ring->rx_buf_len, in ice_alloc_rx_bufs()
785 if (unlikely(ntu == rx_ring->count)) { in ice_alloc_rx_bufs()
786 rx_desc = ICE_RX_DESC(rx_ring, 0); in ice_alloc_rx_bufs()
787 bi = rx_ring->rx_buf; in ice_alloc_rx_bufs()
797 if (rx_ring->next_to_use != ntu) in ice_alloc_rx_bufs()
798 ice_release_rx_desc(rx_ring, ntu); in ice_alloc_rx_bufs()
877 ice_add_xdp_frag(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp, in ice_add_xdp_frag() argument
900 rx_ring->nr_frags = sinfo->nr_frags; in ice_add_xdp_frag()
916 ice_reuse_rx_page(struct ice_rx_ring *rx_ring, struct ice_rx_buf *old_buf) in ice_reuse_rx_page() argument
918 u16 nta = rx_ring->next_to_alloc; in ice_reuse_rx_page()
921 new_buf = &rx_ring->rx_buf[nta]; in ice_reuse_rx_page()
925 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; in ice_reuse_rx_page()
947 ice_get_rx_buf(struct ice_rx_ring *rx_ring, const unsigned int size, in ice_get_rx_buf() argument
952 rx_buf = &rx_ring->rx_buf[ntc]; in ice_get_rx_buf()
958 dma_sync_single_range_for_cpu(rx_ring->dev, rx_buf->dma, in ice_get_rx_buf()
977 static void ice_get_pgcnts(struct ice_rx_ring *rx_ring) in ice_get_pgcnts() argument
979 u32 nr_frags = rx_ring->nr_frags + 1; in ice_get_pgcnts()
980 u32 idx = rx_ring->first_desc; in ice_get_pgcnts()
982 u32 cnt = rx_ring->count; in ice_get_pgcnts()
985 rx_buf = &rx_ring->rx_buf[idx]; in ice_get_pgcnts()
1003 ice_build_skb(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp) in ice_build_skb() argument
1029 skb_record_rx_queue(skb, rx_ring->q_index); in ice_build_skb()
1056 ice_construct_skb(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp) in ice_construct_skb() argument
1074 skb = napi_alloc_skb(&rx_ring->q_vector->napi, ICE_RX_HDR_SIZE); in ice_construct_skb()
1078 rx_buf = &rx_ring->rx_buf[rx_ring->first_desc]; in ice_construct_skb()
1079 skb_record_rx_queue(skb, rx_ring->q_index); in ice_construct_skb()
1136 ice_put_rx_buf(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf) in ice_put_rx_buf() argument
1143 ice_reuse_rx_page(rx_ring, rx_buf); in ice_put_rx_buf()
1146 dma_unmap_page_attrs(rx_ring->dev, rx_buf->dma, in ice_put_rx_buf()
1147 ice_rx_pg_size(rx_ring), DMA_FROM_DEVICE, in ice_put_rx_buf()
1168 static void ice_put_rx_mbuf(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp, in ice_put_rx_mbuf() argument
1171 u32 nr_frags = rx_ring->nr_frags + 1; in ice_put_rx_mbuf()
1172 u32 idx = rx_ring->first_desc; in ice_put_rx_mbuf()
1173 u32 cnt = rx_ring->count; in ice_put_rx_mbuf()
1182 buf = &rx_ring->rx_buf[idx]; in ice_put_rx_mbuf()
1193 ice_put_rx_buf(rx_ring, buf); in ice_put_rx_mbuf()
1204 buf = &rx_ring->rx_buf[idx]; in ice_put_rx_mbuf()
1205 ice_put_rx_buf(rx_ring, buf); in ice_put_rx_mbuf()
1211 rx_ring->first_desc = ntc; in ice_put_rx_mbuf()
1212 rx_ring->nr_frags = 0; in ice_put_rx_mbuf()
1222 void ice_clean_ctrl_rx_irq(struct ice_rx_ring *rx_ring) in ice_clean_ctrl_rx_irq() argument
1224 u32 ntc = rx_ring->next_to_clean; in ice_clean_ctrl_rx_irq()
1226 u32 cnt = rx_ring->count; in ice_clean_ctrl_rx_irq()
1229 struct ice_vsi *ctrl_vsi = rx_ring->vsi; in ice_clean_ctrl_rx_irq()
1233 rx_desc = ICE_RX_DESC(rx_ring, ntc); in ice_clean_ctrl_rx_irq()
1249 rx_ring->first_desc = ntc; in ice_clean_ctrl_rx_irq()
1250 rx_ring->next_to_clean = ntc; in ice_clean_ctrl_rx_irq()
1251 ice_init_ctrl_rx_descs(rx_ring, ICE_RX_DESC_UNUSED(rx_ring)); in ice_clean_ctrl_rx_irq()
1266 static int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget) in ice_clean_rx_irq() argument
1269 unsigned int offset = rx_ring->rx_offset; in ice_clean_rx_irq()
1270 struct xdp_buff *xdp = &rx_ring->xdp; in ice_clean_rx_irq()
1273 u32 ntc = rx_ring->next_to_clean; in ice_clean_rx_irq()
1275 u32 cnt = rx_ring->count; in ice_clean_rx_irq()
1279 xdp_prog = READ_ONCE(rx_ring->xdp_prog); in ice_clean_rx_irq()
1281 xdp_ring = rx_ring->xdp_ring; in ice_clean_rx_irq()
1295 rx_desc = ICE_RX_DESC(rx_ring, ntc); in ice_clean_rx_irq()
1312 ice_trace(clean_rx_irq, rx_ring, rx_desc); in ice_clean_rx_irq()
1318 rx_buf = ice_get_rx_buf(rx_ring, size, ntc); in ice_clean_rx_irq()
1327 } else if (ice_add_xdp_frag(rx_ring, xdp, rx_buf, size)) { in ice_clean_rx_irq()
1328 ice_put_rx_mbuf(rx_ring, xdp, NULL, ntc, ICE_XDP_CONSUMED); in ice_clean_rx_irq()
1335 if (ice_is_non_eop(rx_ring, rx_desc)) in ice_clean_rx_irq()
1338 ice_get_pgcnts(rx_ring); in ice_clean_rx_irq()
1339 xdp_verdict = ice_run_xdp(rx_ring, xdp, xdp_prog, xdp_ring, rx_desc); in ice_clean_rx_irq()
1345 ice_put_rx_mbuf(rx_ring, xdp, &xdp_xmit, ntc, xdp_verdict); in ice_clean_rx_irq()
1349 if (likely(ice_ring_uses_build_skb(rx_ring))) in ice_clean_rx_irq()
1350 skb = ice_build_skb(rx_ring, xdp); in ice_clean_rx_irq()
1352 skb = ice_construct_skb(rx_ring, xdp); in ice_clean_rx_irq()
1355 rx_ring->ring_stats->rx_stats.alloc_page_failed++; in ice_clean_rx_irq()
1358 ice_put_rx_mbuf(rx_ring, xdp, &xdp_xmit, ntc, xdp_verdict); in ice_clean_rx_irq()
1380 ice_process_skb_fields(rx_ring, rx_desc, skb); in ice_clean_rx_irq()
1382 ice_trace(clean_rx_irq_indicate, rx_ring, rx_desc, skb); in ice_clean_rx_irq()
1384 ice_receive_skb(rx_ring, skb, vlan_tci); in ice_clean_rx_irq()
1390 rx_ring->next_to_clean = ntc; in ice_clean_rx_irq()
1392 failure = ice_alloc_rx_bufs(rx_ring, ICE_RX_DESC_UNUSED(rx_ring)); in ice_clean_rx_irq()
1397 if (rx_ring->ring_stats) in ice_clean_rx_irq()
1398 ice_update_rx_ring_stats(rx_ring, total_rx_pkts, in ice_clean_rx_irq()
1425 struct ice_rx_ring *rx_ring; in __ice_update_sample() local
1427 ice_for_each_rx_ring(rx_ring, *rc) { in __ice_update_sample()
1430 ring_stats = rx_ring->ring_stats; in __ice_update_sample()
1589 struct ice_rx_ring *rx_ring; in ice_napi_poll() local
1627 ice_for_each_rx_ring(rx_ring, q_vector->rx) { in ice_napi_poll()
1628 struct xsk_buff_pool *xsk_pool = READ_ONCE(rx_ring->xsk_pool); in ice_napi_poll()
1635 cleaned = rx_ring->xsk_pool ? in ice_napi_poll()
1636 ice_clean_rx_irq_zc(rx_ring, xsk_pool, budget_per_ring) : in ice_napi_poll()
1637 ice_clean_rx_irq(rx_ring, budget_per_ring); in ice_napi_poll()