Home
last modified time | relevance | path

Searched refs:skbs (Results 1 – 25 of 60) sorted by relevance

123

/linux-6.3-rc2/Documentation/networking/
A Dskbuff.rst15 Shared skbs and skb clones
19 to keep a struct sk_buff alive. skbs with a ``sk_buff.users != 1`` are referred
20 to as shared skbs (see skb_shared()).
22 skb_clone() allows for fast duplication of skbs. None of the data buffers
24 &skb_shared_info.refcount indicates the number of skbs pointing at the same
27 dataref and headerless skbs
31 :doc: dataref and headerless skbs
A Dnetdev-features.rst129 ndo_start_xmit can handle skbs with frags in high memory.
133 Those features say that ndo_start_xmit can handle fragmented skbs:
134 NETIF_F_SG --- paged skbs (skb_shinfo()->frags), NETIF_F_FRAGLIST ---
135 chained skbs (skb->next/prev list).
A Dxdp-rx-metadata.rst76 ``skbs``. However, TC-BPF programs can access the XDP metadata area using
80 can override some of the metadata used for building ``skbs``.
A Dsegmentation-offloads.rst60 UFO is deprecated: modern kernels will no longer generate UFO skbs, but can
165 padded and stored as chained skbs, and skb_segment() splits based on those.
181 will check for GSO_BY_FRAGS and WARN if asked to manipulate these skbs.
/linux-6.3-rc2/drivers/net/ethernet/actions/
A Dowl-emac.c206 ring->skbs[i] = skb; in owl_emac_ring_prepare_rx()
255 dev_kfree_skb(ring->skbs[i]); in owl_emac_ring_unprepare_rx()
256 ring->skbs[i] = NULL; in owl_emac_ring_unprepare_rx()
274 dev_kfree_skb(ring->skbs[i]); in owl_emac_ring_unprepare_tx()
275 ring->skbs[i] = NULL; in owl_emac_ring_unprepare_tx()
290 if (!ring->skbs) in owl_emac_ring_alloc()
530 ring->skbs[tx_head] = skb; in owl_emac_setup_frame_xmit()
605 ring->skbs[tx_head] = skb; in owl_emac_ndo_start_xmit()
680 skb = ring->skbs[tx_tail]; in owl_emac_tx_complete_tail()
684 ring->skbs[tx_tail] = NULL; in owl_emac_tx_complete_tail()
[all …]
A Dowl-emac.h247 struct sk_buff **skbs; member
/linux-6.3-rc2/drivers/net/ethernet/sfc/
A Dselftest.c83 struct sk_buff **skbs; member
424 state->skbs[i] = skb; in efx_begin_loopback()
478 skb = state->skbs[i]; in efx_end_loopback()
534 state->skbs = kcalloc(state->packet_count, in efx_test_loopback()
535 sizeof(state->skbs[0]), GFP_KERNEL); in efx_test_loopback()
536 if (!state->skbs) in efx_test_loopback()
557 kfree(state->skbs); in efx_test_loopback()
/linux-6.3-rc2/drivers/net/ethernet/sfc/siena/
A Dselftest.c83 struct sk_buff **skbs; member
424 state->skbs[i] = skb; in efx_begin_loopback()
478 skb = state->skbs[i]; in efx_end_loopback()
534 state->skbs = kcalloc(state->packet_count, in efx_test_loopback()
535 sizeof(state->skbs[0]), GFP_KERNEL); in efx_test_loopback()
536 if (!state->skbs) in efx_test_loopback()
557 kfree(state->skbs); in efx_test_loopback()
/linux-6.3-rc2/drivers/net/ethernet/sfc/falcon/
A Dselftest.c80 struct sk_buff **skbs; member
426 state->skbs[i] = skb; in ef4_begin_loopback()
480 skb = state->skbs[i]; in ef4_end_loopback()
536 state->skbs = kcalloc(state->packet_count, in ef4_test_loopback()
537 sizeof(state->skbs[0]), GFP_KERNEL); in ef4_test_loopback()
538 if (!state->skbs) in ef4_test_loopback()
559 kfree(state->skbs); in ef4_test_loopback()
/linux-6.3-rc2/drivers/net/ethernet/renesas/
A Drswitch.c251 if (gq->skbs[index]) in rswitch_gwca_queue_alloc_skb()
255 if (!gq->skbs[index]) in rswitch_gwca_queue_alloc_skb()
265 gq->skbs[index] = NULL; in rswitch_gwca_queue_alloc_skb()
291 kfree(gq->skbs); in rswitch_gwca_queue_free()
292 gq->skbs = NULL; in rswitch_gwca_queue_free()
316 gq->skbs = kcalloc(gq->ring_size, sizeof(*gq->skbs), GFP_KERNEL); in rswitch_gwca_queue_alloc()
317 if (!gq->skbs) in rswitch_gwca_queue_alloc()
714 skb = gq->skbs[gq->cur]; in rswitch_rx()
715 gq->skbs[gq->cur] = NULL; in rswitch_rx()
776 skb = gq->skbs[gq->dirty]; in rswitch_tx_free()
[all …]
/linux-6.3-rc2/drivers/net/ethernet/socionext/
A Dsni_ave.c226 struct sk_buff *skbs; member
585 skb = priv->rx.desc[entry].skbs; in ave_rxdesc_prepare()
618 priv->rx.desc[entry].skbs = skb; in ave_rxdesc_prepare()
715 if (priv->tx.desc[done_idx].skbs) { in ave_tx_complete()
776 skb = priv->rx.desc[proc_idx].skbs; in ave_rx_receive()
777 priv->rx.desc[proc_idx].skbs = NULL; in ave_rx_receive()
1371 if (!priv->tx.desc[entry].skbs) in ave_stop()
1376 priv->tx.desc[entry].skbs = NULL; in ave_stop()
1383 if (!priv->rx.desc[entry].skbs) in ave_stop()
1388 priv->rx.desc[entry].skbs = NULL; in ave_stop()
[all …]
/linux-6.3-rc2/net/mac80211/
A Dtx.c1014 skb_queue_walk(&tx->skbs, skb) { in ieee80211_tx_h_fragment()
1703 __skb_unlink(skb, skbs); in ieee80211_tx_frags()
1726 skbs); in ieee80211_tx_frags()
1737 skb_queue_splice_init(skbs, in ieee80211_tx_frags()
1753 __skb_unlink(skb, skbs); in ieee80211_tx_frags()
1776 skb = skb_peek(skbs); in __ieee80211_tx()
1936 skb2 = __skb_dequeue(&tx.skbs); in ieee80211_tx_prepare_skb()
4615 skbs++; in ieee80211_8023_xmit()
4779 struct sk_buff_head skbs; in ieee80211_tx_pending_skb() local
4781 __skb_queue_head_init(&skbs); in ieee80211_tx_pending_skb()
[all …]
A Dwpa.c255 skb_queue_walk(&tx->skbs, skb) { in ieee80211_crypto_tkip_encrypt()
496 skb_queue_walk(&tx->skbs, skb) { in ieee80211_crypto_ccmp_encrypt()
695 skb_queue_walk(&tx->skbs, skb) { in ieee80211_crypto_gcmp_encrypt()
830 if (WARN_ON(skb_queue_len(&tx->skbs) != 1)) in ieee80211_crypto_aes_cmac_encrypt()
833 skb = skb_peek(&tx->skbs); in ieee80211_crypto_aes_cmac_encrypt()
878 if (WARN_ON(skb_queue_len(&tx->skbs) != 1)) in ieee80211_crypto_aes_cmac_256_encrypt()
881 skb = skb_peek(&tx->skbs); in ieee80211_crypto_aes_cmac_256_encrypt()
1023 if (WARN_ON(skb_queue_len(&tx->skbs) != 1)) in ieee80211_crypto_aes_gmac_encrypt()
1026 skb = skb_peek(&tx->skbs); in ieee80211_crypto_aes_gmac_encrypt()
/linux-6.3-rc2/drivers/net/wireless/mediatek/mt7601u/
A Ddma.c283 struct sk_buff_head skbs; in mt7601u_tx_tasklet() local
286 __skb_queue_head_init(&skbs); in mt7601u_tx_tasklet()
295 skb_queue_splice_init(&dev->tx_skb_done, &skbs); in mt7601u_tx_tasklet()
299 while (!skb_queue_empty(&skbs)) { in mt7601u_tx_tasklet()
300 struct sk_buff *skb = __skb_dequeue(&skbs); in mt7601u_tx_tasklet()
/linux-6.3-rc2/include/linux/
A Dskb_array.h191 struct sk_buff **skbs, int n) in skb_array_unconsume() argument
193 ptr_ring_unconsume(&a->ring, (void **)skbs, n, __skb_array_destroy_skb); in skb_array_unconsume()
/linux-6.3-rc2/drivers/net/ethernet/netronome/nfp/nfd3/
A Dxsk.c391 unsigned int pkts_polled, skbs = 0; in nfp_nfd3_xsk_poll() local
393 pkts_polled = nfp_nfd3_xsk_rx(r_vec->rx_ring, budget, &skbs); in nfp_nfd3_xsk_poll()
404 if (pkts_polled < budget && napi_complete_done(napi, skbs)) in nfp_nfd3_xsk_poll()
/linux-6.3-rc2/kernel/bpf/
A Dcpumap.c315 void *skbs[CPUMAP_BATCH]; in cpu_map_kthread_run() local
364 m = kmem_cache_alloc_bulk(skbuff_cache, gfp, nframes, skbs); in cpu_map_kthread_run()
367 skbs[i] = NULL; /* effect: xdp_return_frame */ in cpu_map_kthread_run()
375 struct sk_buff *skb = skbs[i]; in cpu_map_kthread_run()
/linux-6.3-rc2/net/bpf/
A Dtest_run.c113 struct sk_buff **skbs; member
173 xdp->skbs = kvmalloc_array(xdp->batch_size, sizeof(void *), GFP_KERNEL); in xdp_test_run_setup()
174 if (!xdp->skbs) in xdp_test_run_setup()
204 kvfree(xdp->skbs); in xdp_test_run_setup()
215 kfree(xdp->skbs); in xdp_test_run_teardown()
237 struct sk_buff **skbs, in xdp_recv_frames() argument
244 n = kmem_cache_alloc_bulk(skbuff_cache, gfp, nframes, (void **)skbs); in xdp_recv_frames()
253 struct sk_buff *skb = skbs[i]; in xdp_recv_frames()
341 ret = xdp_recv_frames(frames, nframes, xdp->skbs, xdp->dev); in xdp_test_run_batch()
/linux-6.3-rc2/drivers/net/ethernet/qlogic/qede/
A Dqede_fp.c79 struct sk_buff *skb = txq->sw_tx_ring.skbs[idx].skb; in qede_free_tx_pkt()
84 bool data_split = txq->sw_tx_ring.skbs[idx].flags & QEDE_TSO_SPLIT_BD; in qede_free_tx_pkt()
124 txq->sw_tx_ring.skbs[idx].skb = NULL; in qede_free_tx_pkt()
125 txq->sw_tx_ring.skbs[idx].flags = 0; in qede_free_tx_pkt()
136 struct sk_buff *skb = txq->sw_tx_ring.skbs[idx].skb; in qede_free_failed_tx_pkt()
172 txq->sw_tx_ring.skbs[idx].skb = NULL; in qede_free_failed_tx_pkt()
173 txq->sw_tx_ring.skbs[idx].flags = 0; in qede_free_failed_tx_pkt()
1519 txq->sw_tx_ring.skbs[idx].skb = skb; in qede_start_xmit()
1641 txq->sw_tx_ring.skbs[idx].flags |= QEDE_TSO_SPLIT_BD; in qede_start_xmit()
/linux-6.3-rc2/drivers/net/wireless/intel/iwlwifi/dvm/
A Dtx.c1122 struct sk_buff_head skbs; in iwlagn_rx_reply_tx() local
1143 __skb_queue_head_init(&skbs); in iwlagn_rx_reply_tx()
1172 iwl_trans_reclaim(priv->trans, txq_id, ssn, &skbs); in iwlagn_rx_reply_tx()
1177 skb_queue_walk(&skbs, skb) { in iwlagn_rx_reply_tx()
1248 while (!skb_queue_empty(&skbs)) { in iwlagn_rx_reply_tx()
1249 skb = __skb_dequeue(&skbs); in iwlagn_rx_reply_tx()
/linux-6.3-rc2/drivers/net/ethernet/mediatek/
A Dmtk_star_emac.c242 struct sk_buff *skbs[MTK_STAR_RING_NUM_DESCS]; member
326 desc_data->skb = ring->skbs[ring->tail]; in mtk_star_ring_pop_tail()
329 ring->skbs[ring->tail] = NULL; in mtk_star_ring_pop_tail()
350 ring->skbs[ring->head] = desc_data->skb; in mtk_star_ring_push_head()
719 ring->skbs[i] = skb; in mtk_star_prepare_rx_skbs()
739 desc_data.skb = ring->skbs[i]; in mtk_star_ring_free_skbs()
/linux-6.3-rc2/drivers/net/ethernet/natsemi/
A Dns83820.c401 struct sk_buff *skbs[NR_RX_DESC]; member
524 BUG_ON(NULL != dev->rx_info.skbs[next_empty]); in ns83820_add_rx_skb()
525 dev->rx_info.skbs[next_empty] = skb; in ns83820_add_rx_skb()
790 struct sk_buff *skb = dev->rx_info.skbs[i]; in ns83820_cleanup_rx()
791 dev->rx_info.skbs[i] = NULL; in ns83820_cleanup_rx()
856 skb = info->skbs[next_rx];
857 info->skbs[next_rx] = NULL;
/linux-6.3-rc2/drivers/net/wireless/intel/iwlwifi/queue/
A Dtx.h176 struct sk_buff_head *skbs);
/linux-6.3-rc2/drivers/net/wireless/intel/iwlwifi/
A Diwl-trans.h566 struct sk_buff_head *skbs);
1198 int ssn, struct sk_buff_head *skbs) in iwl_trans_reclaim() argument
1205 trans->ops->reclaim(trans, queue, ssn, skbs); in iwl_trans_reclaim()
/linux-6.3-rc2/drivers/net/wireless/intel/iwlegacy/
A D3945.c283 skb = txq->skbs[txq->q.read_ptr]; in il3945_tx_queue_reclaim()
285 txq->skbs[txq->q.read_ptr] = NULL; in il3945_tx_queue_reclaim()
332 info = IEEE80211_SKB_CB(txq->skbs[txq->q.read_ptr]); in il3945_hdl_tx()
667 if (txq->skbs) { in il3945_hw_txq_free_tfd()
668 struct sk_buff *skb = txq->skbs[txq->q.read_ptr]; in il3945_hw_txq_free_tfd()
673 txq->skbs[txq->q.read_ptr] = NULL; in il3945_hw_txq_free_tfd()

Completed in 112 milliseconds

123