| /net/core/ |
| A D | gro.c | 140 frag = pinfo->frags + nr_frags; in skb_gro_receive() 141 frag2 = skbinfo->frags + i; in skb_gro_receive() 161 skb_frag_t *frag = pinfo->frags + nr_frags; in skb_gro_receive() 195 skb_frag_off_add(&skbinfo->frags[0], eat); in skb_gro_receive() 196 skb_frag_size_sub(&skbinfo->frags[0], eat); in skb_gro_receive() 400 frag0 = &pinfo->frags[0]; in skb_gro_reset_offset() 423 skb_frag_off_add(&pinfo->frags[0], grow); in gro_pull_from_frag0() 424 skb_frag_size_sub(&pinfo->frags[0], grow); in gro_pull_from_frag0() 426 if (unlikely(!skb_frag_size(&pinfo->frags[0]))) { in gro_pull_from_frag0() 428 memmove(pinfo->frags, pinfo->frags + 1, in gro_pull_from_frag0() [all …]
|
| A D | net_test.c | 46 const unsigned int *frags; member 75 .frags = (const unsigned int[]) { GSO_TEST_SIZE, 1 }, 83 .frags = (const unsigned int[]) { GSO_TEST_SIZE, GSO_TEST_SIZE, 2 }, 92 .frags = (const unsigned int[]) { GSO_TEST_SIZE, 3 }, 174 skb_fill_page_desc(skb, i, page, pg_off, tcase->frags[i]); in gso_test_func() 175 pg_off += tcase->frags[i]; in gso_test_func()
|
| A D | skbuff.c | 2189 skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i]; in __pskb_copy_fclone() 2901 *frag = skb_shinfo(skb)->frags[i]; in __pskb_pull_tail() 3840 skb_shinfo(to)->frags[j] = skb_shinfo(from)->frags[i]; in skb_zerocopy() 4089 skb_shinfo(skb1)->frags[i] = skb_shinfo(skb)->frags[i]; in skb_split_inside_header() 4117 skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i]; in skb_split_no_header() 4292 skb_shinfo(skb)->frags[to++] = skb_shinfo(skb)->frags[from++]; in skb_shift() 4793 frag = skb_shinfo(head_skb)->frags; in skb_segment() 4822 frag = skb_shinfo(list_skb)->frags; in skb_segment() 4907 nskb_frag = skb_shinfo(nskb)->frags; in skb_segment() 6131 from_shinfo->frags, in skb_try_coalesce() [all …]
|
| A D | tso.c | 54 skb_frag_t *frag = &skb_shinfo(skb)->frags[tso->next_frag_idx]; in tso_build_data() 80 skb_frag_t *frag = &skb_shinfo(skb)->frags[tso->next_frag_idx]; in tso_start()
|
| A D | xdp.c | 473 __xdp_return(skb_frag_netmem(&sinfo->frags[i]), xdpf->mem_type, in xdp_return_frame() 490 __xdp_return(skb_frag_netmem(&sinfo->frags[i]), xdpf->mem_type, in xdp_return_frame_rx_napi() 527 skb_frag_t *frag = &sinfo->frags[i]; in xdp_return_frame_bulk() 558 __xdp_return(skb_frag_netmem(&sinfo->frags[i]), in xdp_return_buff() 701 const skb_frag_t *frag = &xinfo->frags[i]; in xdp_copy_frags_from_zc()
|
| A D | pktgen.c | 2830 int frags = pkt_dev->nfrags; in pktgen_finalize_skb() local 2835 if (frags > MAX_SKB_FRAGS) in pktgen_finalize_skb() 2836 frags = MAX_SKB_FRAGS; in pktgen_finalize_skb() 2837 len = datalen - frags * PAGE_SIZE; in pktgen_finalize_skb() 2840 datalen = frags * PAGE_SIZE; in pktgen_finalize_skb() 2844 frag_len = (datalen/frags) < PAGE_SIZE ? in pktgen_finalize_skb() 2845 (datalen/frags) : PAGE_SIZE; in pktgen_finalize_skb() 2859 if (i == (frags - 1)) in pktgen_finalize_skb() 2860 skb_frag_fill_page_desc(&skb_shinfo(skb)->frags[i], in pktgen_finalize_skb() 2865 skb_frag_fill_page_desc(&skb_shinfo(skb)->frags[i], in pktgen_finalize_skb() [all …]
|
| A D | datagram.c | 417 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in __skb_datagram_iter() 571 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in skb_copy_datagram_from_iter() 665 skb_frag_t *last = &skb_shinfo(skb)->frags[frag - 1]; in zerocopy_fill_skb_from_iter()
|
| /net/sunrpc/auth_gss/ |
| A D | gss_krb5_crypto.c | 304 struct scatterlist frags[4]; member 321 sg_set_page(&desc->frags[desc->fragno], sg_page(sg), sg->length, in decryptor() 332 sg_mark_end(&desc->frags[desc->fragno - 1]); in decryptor() 334 skcipher_request_set_crypt(desc->req, desc->frags, desc->frags, in decryptor() 341 sg_init_table(desc->frags, 4); in decryptor() 344 sg_set_page(&desc->frags[0], sg_page(sg), fraglen, in decryptor() 568 sg_init_table(desc.frags, 4); in krb5_cbc_cts_decrypt()
|
| /net/xfrm/ |
| A D | xfrm_iptfs.c | 321 skb_frag_t frags[MAX_SKB_FRAGS + 1]; member 352 frag = &walk->frags[walk->nr_frags++]; in iptfs_skb_prepare_frag_walk() 364 from = &shinfo->frags[i]; in iptfs_skb_prepare_frag_walk() 369 frag = &walk->frags[walk->nr_frags++]; in iptfs_skb_prepare_frag_walk() 388 walk->past -= walk->frags[--walk->fragi].len; in iptfs_skb_reset_frag_walk() 393 walk->past += walk->frags[walk->fragi++].len; in iptfs_skb_reset_frag_walk() 426 skb_frag_t *frag = &walk->frags[fragi]; in iptfs_skb_can_add_frags() 472 skb_frag_t *frag = &walk->frags[walk->fragi]; in iptfs_skb_add_frags() 473 skb_frag_t *tofrag = &shinfo->frags[shinfo->nr_frags]; in iptfs_skb_add_frags() 2157 memcpy(&toi->frags[toi->nr_frags], fromi->frags, in iptfs_consume_frags() [all …]
|
| A D | trace_iptfs.h | 54 ? page_address(netmem_to_page(skb_shinfo(skb)->frags[0].netmem)) 57 ? skb_shinfo(skb)->frags[0].offset
|
| A D | xfrm_ipcomp.c | 74 frag = skb_shinfo(skb)->frags + skb_shinfo(skb)->nr_frags; in ipcomp_post_acomp()
|
| /net/sctp/ |
| A D | outqueue.c | 1228 union sctp_sack_variable *frags; in sctp_sack_update_unack_data() local 1234 frags = (union sctp_sack_variable *)(sack + 1); in sctp_sack_update_unack_data() 1236 unack_data -= ((ntohs(frags[i].gab.end) - in sctp_sack_update_unack_data() 1237 ntohs(frags[i].gab.start) + 1)); in sctp_sack_update_unack_data() 1316 union sctp_sack_variable *frags = in sctp_outq_sack() local 1319 highest_tsn += ntohs(frags[gap_ack_blocks - 1].gab.end); in sctp_outq_sack() 1775 union sctp_sack_variable *frags; in sctp_acked() local 1794 frags = (union sctp_sack_variable *)(sack + 1); in sctp_acked() 1798 if (tsn_offset >= ntohs(frags[i].gab.start) && in sctp_acked() 1799 tsn_offset <= ntohs(frags[i].gab.end)) in sctp_acked()
|
| /net/tipc/ |
| A D | msg.c | 311 int pktmax, struct sk_buff_head *frags) in tipc_msg_fragment() argument 339 __skb_queue_tail(frags, _skb); in tipc_msg_fragment() 353 __skb_queue_purge(frags); in tipc_msg_fragment() 354 __skb_queue_head_init(frags); in tipc_msg_fragment()
|
| A D | link.c | 1958 struct sk_buff_head tmpxq, tnlq, frags; in tipc_link_tnl_prepare() local 1994 __skb_queue_head_init(&frags); in tipc_link_tnl_prepare() 2037 &frags); in tipc_link_tnl_prepare() 2043 pktcnt += skb_queue_len(&frags) - 1; in tipc_link_tnl_prepare() 2045 skb_queue_splice_tail_init(&frags, &tnlq); in tipc_link_tnl_prepare()
|
| /net/tls/ |
| A D | tls_strp.c | 54 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in tls_strp_skb_copy() 200 __skb_frag_unref(&shinfo->frags[i], false); in tls_strp_flush_anchor_copy() 218 frag = &skb_shinfo(skb)->frags[skb->len / PAGE_SIZE]; in tls_strp_copyin_frag()
|
| A D | tls_device.c | 144 __skb_frag_unref(&record->frags[i], false); in destroy_record() 261 frag = &record->frags[record->num_frags - 1]; in tls_append_frag() 298 frag = &record->frags[i]; in tls_push_record() 335 tls_fill_prepend(ctx, skb_frag_address(&record->frags[0]), in tls_device_record_close() 351 frag = &record->frags[0]; in tls_create_new_record()
|
| A D | tls_device_fallback.c | 260 skb_frag_t *frag = &record->frags[i]; in fill_sg_in()
|
| /net/ipv6/ |
| A D | udp.c | 1332 struct sk_buff *frags = skb_shinfo(skb)->frag_list; in udp6_hwcsum_outgoing() local 1335 if (!frags) { in udp6_hwcsum_outgoing() 1353 csum = csum_add(csum, frags->csum); in udp6_hwcsum_outgoing() 1354 } while ((frags = frags->next)); in udp6_hwcsum_outgoing()
|
| /net/kcm/ |
| A D | kcmsock.c | 632 WARN_ON_ONCE(!skb_frag_page(&skb_shinfo(skb)->frags[0]))) { in kcm_write_msgs() 639 msize += skb_frag_size(&skb_shinfo(skb)->frags[i]); in kcm_write_msgs() 642 (const struct bio_vec *)skb_shinfo(skb)->frags, in kcm_write_msgs() 869 &skb_shinfo(skb)->frags[i - 1], copy); in kcm_sendmsg()
|
| /net/xdp/ |
| A D | xsk.c | 159 u32 frags = xdp_buff_has_frags(xdp); in xsk_rcv_zc() local 165 if (frags) in xsk_rcv_zc() 171 if (likely(!frags)) in xsk_rcv_zc() 274 frag = &sinfo->frags[0]; in __xsk_rcv()
|
| /net/ipv4/ |
| A D | tcp.c | 1261 skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy); in tcp_sendmsg_locked() 1843 frag = skb_shinfo(skb)->frags; in skb_advance_to_frag() 1906 if (frag == &info->frags[info->nr_frags - 1]) in tcp_zerocopy_set_hint_for_skb() 2155 const skb_frag_t *frags = NULL; in tcp_zerocopy_receive() local 2229 frags = skb_advance_to_frag(skb, offset, &offset_frag); in tcp_zerocopy_receive() 2230 if (!frags || offset_frag) in tcp_zerocopy_receive() 2234 mappable_offset = find_next_mappable_frag(frags, in tcp_zerocopy_receive() 2240 page = skb_frag_page(frags); in tcp_zerocopy_receive() 2248 frags++; in tcp_zerocopy_receive() 2512 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in tcp_recvmsg_dmabuf()
|
| A D | tcp_sigpool.c | 346 const skb_frag_t *f = &shi->frags[i]; in tcp_sigpool_hash_skb_data()
|
| A D | tcp_output.c | 1711 int size = skb_frag_size(&shinfo->frags[i]); in __pskb_trim_head() 1717 shinfo->frags[k] = shinfo->frags[i]; in __pskb_trim_head() 1719 skb_frag_off_add(&shinfo->frags[k], eat); in __pskb_trim_head() 1720 skb_frag_size_sub(&shinfo->frags[k], eat); in __pskb_trim_head() 2364 skb_frag_t *lastfrag = NULL, *fragto = skb_shinfo(to)->frags; in tcp_clone_payload() 2372 const skb_frag_t *fragfrom = skb_shinfo(skb)->frags; in tcp_clone_payload()
|
| /net/bpf/ |
| A D | test_run.c | 482 skb_frag_t *frag = &sinfo->frags[i]; in bpf_test_finish() 1301 frag = &sinfo->frags[sinfo->nr_frags++]; in bpf_prog_test_run_xdp() 1345 __free_page(skb_frag_page(&sinfo->frags[i])); in bpf_prog_test_run_xdp()
|
| /net/mac80211/ |
| A D | sta_info.h | 769 struct ieee80211_fragment_cache frags; member
|