| /linux/drivers/net/ethernet/intel/ice/ |
| A D | ice_xsk.c | 355 if (!rx_ring->xsk_pool) in ice_realloc_zc_buf() 608 struct xsk_buff_pool *xsk_pool) in ice_clean_xdp_irq_zc() argument 842 struct xsk_buff_pool *xsk_pool, in ice_clean_rx_irq_zc() argument 909 xsk_pool); in ice_clean_rx_irq_zc() 964 if (xsk_uses_need_wakeup(xsk_pool)) { in ice_clean_rx_irq_zc() 969 xsk_set_rx_need_wakeup(xsk_pool); in ice_clean_rx_irq_zc() 971 xsk_clear_rx_need_wakeup(xsk_pool); in ice_clean_rx_irq_zc() 1046 struct xsk_buff_pool *xsk_pool, in ice_fill_tx_hw_ring() argument 1101 if (xsk_uses_need_wakeup(xsk_pool)) in ice_xmit_zc() 1102 xsk_set_tx_need_wakeup(xsk_pool); in ice_xmit_zc() [all …]
|
| A D | ice_xsk.h | 24 struct xsk_buff_pool *xsk_pool, 28 struct xsk_buff_pool *xsk_pool, u16 count); 32 bool ice_xmit_zc(struct ice_tx_ring *xdp_ring, struct xsk_buff_pool *xsk_pool); 36 struct xsk_buff_pool __always_unused *xsk_pool) in ice_xmit_zc() argument 51 struct xsk_buff_pool __always_unused *xsk_pool, in ice_clean_rx_irq_zc() argument 59 struct xsk_buff_pool __always_unused *xsk_pool, in ice_alloc_rx_bufs_zc() argument
|
| A D | ice_base.c | 510 xsk_pool_fill_cb(ring->xsk_pool, &desc); in ice_xsk_pool_fill_cb() 557 if (ring->xsk_pool) { in ice_vsi_cfg_rxq() 561 xsk_pool_get_rx_frame_size(ring->xsk_pool); in ice_vsi_cfg_rxq() 573 xsk_pool_set_rxq_info(ring->xsk_pool, &ring->xdp_rxq); in ice_vsi_cfg_rxq() 606 if (ring->xsk_pool) { in ice_vsi_cfg_rxq() 609 if (!xsk_buff_can_alloc(ring->xsk_pool, num_bufs)) { in ice_vsi_cfg_rxq() 617 ok = ice_alloc_rx_bufs_zc(ring, ring->xsk_pool, num_bufs); in ice_vsi_cfg_rxq()
|
| A D | ice_txrx.c | 156 if (ice_ring_is_xdp(tx_ring) && tx_ring->xsk_pool) { in ice_clean_tx_ring() 394 if (rx_ring->xsk_pool) { in ice_clean_rx_ring() 429 if (rx_ring->xsk_pool) in ice_clean_rx_ring() 460 if (rx_ring->xsk_pool) { in ice_free_rx_ring() 1485 struct xsk_buff_pool *xsk_pool = READ_ONCE(tx_ring->xsk_pool); in ice_napi_poll() local 1488 if (xsk_pool) in ice_napi_poll() 1489 wd = ice_xmit_zc(tx_ring, xsk_pool); in ice_napi_poll() 1515 struct xsk_buff_pool *xsk_pool = READ_ONCE(rx_ring->xsk_pool); in ice_napi_poll() local 1522 cleaned = rx_ring->xsk_pool ? in ice_napi_poll() 1523 ice_clean_rx_irq_zc(rx_ring, xsk_pool, budget_per_ring) : in ice_napi_poll()
|
| A D | ice_txrx.h | 360 struct xsk_buff_pool *xsk_pool; member 386 struct xsk_buff_pool *xsk_pool; member
|
| /linux/drivers/net/ethernet/intel/i40e/ |
| A D | i40e_xsk.c | 210 if (xsk_uses_need_wakeup(rx_ring->xsk_pool) && err == -ENOBUFS) in i40e_run_xdp_zc() 520 if (xsk_uses_need_wakeup(rx_ring->xsk_pool)) { in i40e_clean_rx_irq_zc() 522 xsk_set_rx_need_wakeup(rx_ring->xsk_pool); in i40e_clean_rx_irq_zc() 524 xsk_clear_rx_need_wakeup(rx_ring->xsk_pool); in i40e_clean_rx_irq_zc() 538 dma = xsk_buff_raw_get_dma(xdp_ring->xsk_pool, desc->addr); in i40e_xmit_pkt() 603 struct xdp_desc *descs = xdp_ring->xsk_pool->tx_descs; in i40e_xmit_zc() 654 struct xsk_buff_pool *bp = tx_ring->xsk_pool; in i40e_clean_xdp_tx_irq() 699 if (xsk_uses_need_wakeup(tx_ring->xsk_pool)) in i40e_clean_xdp_tx_irq() 700 xsk_set_tx_need_wakeup(tx_ring->xsk_pool); in i40e_clean_xdp_tx_irq() 732 if (!vsi->xdp_rings[queue_id]->xsk_pool) in i40e_xsk_wakeup() [all …]
|
| /linux/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/ |
| A D | rx.c | 30 if (unlikely(!xsk_buff_can_alloc(rq->xsk_pool, rq->mpwqe.pages_per_wqe))) in mlx5e_xsk_alloc_rx_mpwqe() 35 batch = xsk_buff_alloc_batch(rq->xsk_pool, xsk_buffs, in mlx5e_xsk_alloc_rx_mpwqe() 45 xsk_buffs[batch] = xsk_buff_alloc(rq->xsk_pool); in mlx5e_xsk_alloc_rx_mpwqe() 102 rq->xsk_pool->chunk_size); in mlx5e_xsk_alloc_rx_mpwqe() 103 __be32 frame_size = cpu_to_be32(rq->xsk_pool->chunk_size); in mlx5e_xsk_alloc_rx_mpwqe() 173 alloc = xsk_buff_alloc_batch(rq->xsk_pool, buffs + ix, wqe_bulk); in mlx5e_xsk_alloc_rx_wqes_batched() 175 alloc = xsk_buff_alloc_batch(rq->xsk_pool, buffs + ix, contig); in mlx5e_xsk_alloc_rx_wqes_batched() 177 alloc += xsk_buff_alloc_batch(rq->xsk_pool, buffs, wqe_bulk - contig); in mlx5e_xsk_alloc_rx_wqes_batched() 213 *frag->xskp = xsk_buff_alloc(rq->xsk_pool); in mlx5e_xsk_alloc_rx_wqes()
|
| A D | tx.c | 58 if (xp_tx_metadata_enabled(sq->xsk_pool)) in mlx5e_xsk_tx_post_err() 66 struct xsk_buff_pool *pool = sq->xsk_pool; in mlx5e_xsk_tx() 114 if (xp_tx_metadata_enabled(sq->xsk_pool)) { in mlx5e_xsk_tx()
|
| A D | setup.c | 82 rq->xsk_pool = pool; in mlx5e_init_xsk_rq()
|
| /linux/drivers/net/ethernet/mellanox/mlx5/core/ |
| A D | en_txrx.c | 90 bool need_wakeup = xsk_uses_need_wakeup(xskrq->xsk_pool); in mlx5e_napi_xsk_post() 98 xsk_set_tx_need_wakeup(xsksq->xsk_pool); in mlx5e_napi_xsk_post() 102 xsk_clear_tx_need_wakeup(xsksq->xsk_pool); in mlx5e_napi_xsk_post() 108 xsk_set_rx_need_wakeup(xskrq->xsk_pool); in mlx5e_napi_xsk_post() 117 xsk_set_rx_need_wakeup(xskrq->xsk_pool); in mlx5e_napi_xsk_post() 119 xsk_clear_rx_need_wakeup(xskrq->xsk_pool); in mlx5e_napi_xsk_post()
|
| A D | en_main.c | 565 u32 xsk_chunk_size = rq->xsk_pool ? rq->xsk_pool->chunk_size : 0; in mlx5e_create_rq_umr_mkey() 610 WARN_ON(rq->xsk_pool); in mlx5e_init_frags_partition() 673 if (rq->xsk_pool) in mlx5e_init_wqe_alloc_info() 691 if (rq->xsk_pool) in mlx5e_init_wqe_alloc_info() 1449 struct xsk_buff_pool *xsk_pool, in mlx5e_alloc_xdpsq() argument 1465 sq->xsk_pool = xsk_pool; in mlx5e_alloc_xdpsq() 1467 sq->stats = sq->xsk_pool ? in mlx5e_alloc_xdpsq() 2656 struct xsk_buff_pool *xsk_pool, in mlx5e_open_channel() argument 2707 if (xsk_pool) { in mlx5e_open_channel() 4437 struct xsk_buff_pool *xsk_pool = in mlx5e_xsk_validate_mtu() local [all …]
|
| /linux/drivers/net/ethernet/netronome/nfp/nfd3/ |
| A D | xsk.c | 21 struct xsk_buff_pool *pool = r_vec->xsk_pool; in nfp_nfd3_xsk_tx_xdp() 324 xsk_tx_completed(r_vec->xsk_pool, done_pkts - reused); in nfp_nfd3_xsk_complete() 337 struct xsk_buff_pool *xsk_pool; in nfp_nfd3_xsk_tx() local 342 xsk_pool = r_vec->xsk_pool; in nfp_nfd3_xsk_tx() 346 if (!xsk_tx_peek_desc(xsk_pool, &desc[i])) in nfp_nfd3_xsk_tx() 356 xsk_buff_raw_dma_sync_for_device(xsk_pool, desc[i].addr, in nfp_nfd3_xsk_tx() 368 xsk_buff_raw_get_dma(xsk_pool, desc[i].addr)); in nfp_nfd3_xsk_tx() 381 xsk_tx_release(xsk_pool); in nfp_nfd3_xsk_tx()
|
| A D | rings.c | 25 if (tx_ring->r_vec->xsk_pool) { in nfp_nfd3_xsk_tx_bufs_free() 29 xsk_tx_completed(tx_ring->r_vec->xsk_pool, 1); in nfp_nfd3_xsk_tx_bufs_free()
|
| /linux/drivers/net/ethernet/freescale/dpaa2/ |
| A D | dpaa2-xsk.c | 49 ch->xsk_pool->umem->headroom); in dpaa2_xsk_run_xdp() 183 if (!ch->xsk_pool) in dpaa2_xsk_disable_pool() 200 ch->xsk_pool = NULL; in dpaa2_xsk_disable_pool() 265 ch->xsk_pool = pool; in dpaa2_xsk_enable_pool() 354 addr = xsk_buff_raw_get_dma(ch->xsk_pool, xdp_desc->addr); in dpaa2_xsk_tx_build_fd() 355 xsk_buff_raw_dma_sync_for_device(ch->xsk_pool, addr, xdp_desc->len); in dpaa2_xsk_tx_build_fd() 393 struct xdp_desc *xdp_descs = ch->xsk_pool->tx_descs; in dpaa2_xsk_tx() 410 batch = xsk_tx_peek_release_desc_batch(ch->xsk_pool, budget); in dpaa2_xsk_tx() 451 xsk_tx_release(ch->xsk_pool); in dpaa2_xsk_tx()
|
| /linux/drivers/net/ethernet/intel/ixgbe/ |
| A D | ixgbe_xsk.c | 114 if (xsk_uses_need_wakeup(rx_ring->xsk_pool) && err == -ENOBUFS) in ixgbe_run_xdp_zc() 168 bi->xdp = xsk_buff_alloc(rx_ring->xsk_pool); in ixgbe_alloc_rx_buffers_zc() 364 if (xsk_uses_need_wakeup(rx_ring->xsk_pool)) { in ixgbe_clean_rx_irq_zc() 366 xsk_set_rx_need_wakeup(rx_ring->xsk_pool); in ixgbe_clean_rx_irq_zc() 368 xsk_clear_rx_need_wakeup(rx_ring->xsk_pool); in ixgbe_clean_rx_irq_zc() 393 struct xsk_buff_pool *pool = xdp_ring->xsk_pool; in ixgbe_xmit_zc() 461 struct xsk_buff_pool *pool = tx_ring->xsk_pool; in ixgbe_clean_xdp_tx_irq() 528 if (!ring->xsk_pool) in ixgbe_xsk_wakeup() 543 struct xsk_buff_pool *pool = tx_ring->xsk_pool; in ixgbe_xsk_clean_tx_ring()
|
| /linux/drivers/net/ethernet/engleder/ |
| A D | tsnep_main.c | 886 if (tx->xsk_pool) { in tsnep_tx_poll() 955 if (rx->xsk_pool && entry->xdp) in tsnep_rx_ring_cleanup() 1675 if (rx->xsk_pool) in tsnep_rx_open() 1688 if (rx->xsk_pool) { in tsnep_rx_open() 1703 if (rx->xsk_pool) in tsnep_rx_close() 1815 done = queue->rx->xsk_pool ? in tsnep_poll() 1948 if (rx->xsk_pool) in tsnep_queue_open() 2100 queue->tx->xsk_pool = pool; in tsnep_enable_xsk() 2101 queue->rx->xsk_pool = pool; in tsnep_enable_xsk() 2120 queue->rx->xsk_pool = NULL; in tsnep_disable_xsk() [all …]
|
| A D | tsnep.h | 93 struct xsk_buff_pool *xsk_pool; member 131 struct xsk_buff_pool *xsk_pool; member
|
| /linux/drivers/net/ethernet/netronome/nfp/ |
| A D | nfp_net_xsk.c | 22 headroom = xsk_pool_get_headroom(rx_ring->r_vec->xsk_pool); in nfp_net_xsk_rx_bufs_stash() 60 struct xsk_buff_pool *pool = r_vec->xsk_pool; in nfp_net_xsk_rx_ring_fill_freelist()
|
| A D | nfp_net_debugfs.c | 46 if (!r_vec->xsk_pool) { in nfp_rx_q_show()
|
| /linux/drivers/net/ethernet/google/gve/ |
| A D | gve_tx.c | 188 if (xsk_complete > 0 && tx->xsk_pool) in gve_clean_xdp_done() 189 xsk_tx_completed(tx->xsk_pool, xsk_complete); in gve_clean_xdp_done() 959 if (!xsk_tx_peek_desc(tx->xsk_pool, &desc)) { in gve_xsk_tx() 964 data = xsk_buff_raw_get_data(tx->xsk_pool, desc.addr); in gve_xsk_tx() 972 xsk_tx_release(tx->xsk_pool); in gve_xsk_tx() 992 if (tx->xsk_pool) { in gve_xdp_poll() 999 if (xsk_uses_need_wakeup(tx->xsk_pool)) in gve_xdp_poll() 1000 xsk_set_tx_need_wakeup(tx->xsk_pool); in gve_xdp_poll()
|
| A D | gve_main.c | 1213 if (rx->xsk_pool) { in gve_reg_xdp_info() 1222 xsk_pool_set_rxq_info(rx->xsk_pool, in gve_reg_xdp_info() 1255 if (rx->xsk_pool) { in gve_unreg_xdp_info() 1257 rx->xsk_pool = NULL; in gve_unreg_xdp_info() 1263 priv->tx[tx_qid].xsk_pool = NULL; in gve_unreg_xdp_info() 1642 rx->xsk_pool = pool; in gve_xsk_pool_enable() 1645 priv->tx[tx_qid].xsk_pool = pool; in gve_xsk_pool_enable() 1678 priv->rx[qid].xsk_pool = NULL; in gve_xsk_pool_disable() 1680 priv->tx[tx_qid].xsk_pool = NULL; in gve_xsk_pool_disable() 1690 priv->rx[qid].xsk_pool = NULL; in gve_xsk_pool_disable() [all …]
|
| /linux/drivers/net/ethernet/stmicro/stmmac/ |
| A D | stmmac_main.c | 239 if (rx_q->xsk_pool) { in stmmac_disable_all_queues() 1695 if (rx_q->xsk_pool) { in __init_dma_rx_desc_rings() 1712 if (rx_q->xsk_pool) { in __init_dma_rx_desc_rings() 1763 if (rx_q->xsk_pool) in init_dma_rx_desc_rings() 1769 rx_q->xsk_pool = NULL; in init_dma_rx_desc_rings() 1899 tx_q->xsk_pool = NULL; in dma_free_tx_skbufs() 1929 if (rx_q->xsk_pool) in __free_dma_rx_desc_resources() 1935 rx_q->xsk_pool = NULL; in __free_dma_rx_desc_resources() 2418 if (rx_q->xsk_pool) { in stmmac_dma_operation_mode() 2763 if (tx_q->xsk_pool) { in stmmac_tx_clean() [all …]
|
| A D | stmmac.h | 76 struct xsk_buff_pool *xsk_pool; member 120 struct xsk_buff_pool *xsk_pool; member
|
| /linux/drivers/net/ethernet/intel/igc/ |
| A D | igc_ptp.c | 760 struct xsk_buff_pool *xsk_pool; in igc_ptp_tx_reg_to_stamp() local 762 xsk_pool = adapter->tx_ring[tstamp->xsk_queue_index]->xsk_pool; in igc_ptp_tx_reg_to_stamp() 763 if (xsk_pool && xp_tx_metadata_enabled(xsk_pool)) { in igc_ptp_tx_reg_to_stamp()
|
| A D | igc_main.c | 251 if (tx_ring->xsk_pool && xsk_frames) in igc_clean_tx_ring() 464 if (ring->xsk_pool) in igc_clean_rx_ring() 637 if (ring->xsk_pool) { in igc_configure_rx_ring() 670 if (ring->xsk_pool) in igc_configure_rx_ring() 2312 bi->xdp = xsk_buff_alloc(ring->xsk_pool); in igc_alloc_rx_buffers_zc() 2857 xsk_set_rx_need_wakeup(ring->xsk_pool); in igc_clean_rx_irq_zc() 3146 if (tx_ring->xsk_pool) { in igc_clean_tx_irq() 4049 if (ring->xsk_pool) in igc_configure() 4675 int cleaned = rx_ring->xsk_pool ? in igc_poll() 6734 if (!ring->xsk_pool) in igc_xsk_wakeup() [all …]
|