Searched refs:nb_pkts (Results 1 – 5 of 5) sorted by relevance
/linux-6.3-rc2/tools/testing/selftests/bpf/ |
A D | xskxceiver.c | 558 if (pkt_nb >= pkt_stream->nb_pkts) in pkt_stream_get_pkt() 611 pkt_stream->nb_pkts = nb_pkts; in __pkt_stream_alloc() 630 pkt_stream = __pkt_stream_alloc(nb_pkts); in pkt_stream_generate() 634 pkt_stream->nb_pkts = nb_pkts; in pkt_stream_generate() 635 for (i = 0; i < nb_pkts; i++) { in pkt_stream_generate() 668 for (i = 1; i < ifobj->pkt_stream->nb_pkts; i += 2) in __pkt_stream_replace_half() 690 for (i = 1; i < pkt_stream->nb_pkts; i += 2) in pkt_stream_receive_half() 721 struct pkt *pkts, u32 nb_pkts) in __pkt_stream_generate_custom() argument 726 pkt_stream = __pkt_stream_alloc(nb_pkts); in __pkt_stream_generate_custom() 730 for (i = 0; i < nb_pkts; i++) { in __pkt_stream_generate_custom() [all …]
|
A D | xskxceiver.h | 126 u32 nb_pkts; member
|
/linux-6.3-rc2/drivers/net/ethernet/intel/i40e/ |
A D | i40e_xsk.c | 507 static void i40e_fill_tx_hw_ring(struct i40e_ring *xdp_ring, struct xdp_desc *descs, u32 nb_pkts, in i40e_fill_tx_hw_ring() argument 512 batched = nb_pkts & ~(PKTS_PER_BATCH - 1); in i40e_fill_tx_hw_ring() 513 leftover = nb_pkts & (PKTS_PER_BATCH - 1); in i40e_fill_tx_hw_ring() 539 u32 nb_pkts, nb_processed = 0; in i40e_xmit_zc() local 542 nb_pkts = xsk_tx_peek_release_desc_batch(xdp_ring->xsk_pool, budget); in i40e_xmit_zc() 543 if (!nb_pkts) in i40e_xmit_zc() 546 if (xdp_ring->next_to_use + nb_pkts >= xdp_ring->count) { in i40e_xmit_zc() 552 i40e_fill_tx_hw_ring(xdp_ring, &descs[nb_processed], nb_pkts - nb_processed, in i40e_xmit_zc() 559 i40e_update_tx_stats(xdp_ring, nb_pkts, total_bytes); in i40e_xmit_zc() 561 return nb_pkts < budget; in i40e_xmit_zc()
|
/linux-6.3-rc2/net/xdp/ |
A D | xsk.c | 350 u32 nb_pkts = 0; in xsk_tx_peek_release_fallback() local 352 while (nb_pkts < max_entries && xsk_tx_peek_desc(pool, &descs[nb_pkts])) in xsk_tx_peek_release_fallback() 353 nb_pkts++; in xsk_tx_peek_release_fallback() 356 return nb_pkts; in xsk_tx_peek_release_fallback() 372 nb_pkts = 0; in xsk_tx_peek_release_desc_batch() 376 nb_pkts = xskq_cons_nb_entries(xs->tx, nb_pkts); in xsk_tx_peek_release_desc_batch() 384 nb_pkts = xskq_prod_nb_free(pool->cq, nb_pkts); in xsk_tx_peek_release_desc_batch() 385 if (!nb_pkts) in xsk_tx_peek_release_desc_batch() 388 nb_pkts = xskq_cons_read_desc_batch(xs->tx, pool, nb_pkts); in xsk_tx_peek_release_desc_batch() 389 if (!nb_pkts) { in xsk_tx_peek_release_desc_batch() [all …]
|
/linux-6.3-rc2/drivers/net/ethernet/intel/ice/ |
A D | ice_xsk.c | 945 u32 nb_pkts, unsigned int *total_bytes) in ice_fill_tx_hw_ring() argument 949 batched = ALIGN_DOWN(nb_pkts, PKTS_PER_BATCH); in ice_fill_tx_hw_ring() 950 leftover = nb_pkts & (PKTS_PER_BATCH - 1); in ice_fill_tx_hw_ring() 966 u32 nb_pkts, nb_processed = 0; in ice_xmit_zc() local 975 nb_pkts = xsk_tx_peek_release_desc_batch(xdp_ring->xsk_pool, budget); in ice_xmit_zc() 976 if (!nb_pkts) in ice_xmit_zc() 979 if (xdp_ring->next_to_use + nb_pkts >= xdp_ring->count) { in ice_xmit_zc() 985 ice_fill_tx_hw_ring(xdp_ring, &descs[nb_processed], nb_pkts - nb_processed, in ice_xmit_zc() 990 ice_update_tx_ring_stats(xdp_ring, nb_pkts, total_bytes); in ice_xmit_zc() 995 return nb_pkts < budget; in ice_xmit_zc()
|
Completed in 20 milliseconds