Lines Matching refs:from
3367 int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len) in skb_store_bits() argument
3379 skb_copy_to_linear_data_offset(skb, offset, from, copy); in skb_store_bits()
3383 from += copy; in skb_store_bits()
3408 memcpy(vaddr + p_off, from + copied, p_len); in skb_store_bits()
3415 from += copy; in skb_store_bits()
3430 from, copy)) in skb_store_bits()
3435 from += copy; in skb_store_bits()
3757 skb_zerocopy_headlen(const struct sk_buff *from) in skb_zerocopy_headlen() argument
3761 if (!from->head_frag || in skb_zerocopy_headlen()
3762 skb_headlen(from) < L1_CACHE_BYTES || in skb_zerocopy_headlen()
3763 skb_shinfo(from)->nr_frags >= MAX_SKB_FRAGS) { in skb_zerocopy_headlen()
3764 hlen = skb_headlen(from); in skb_zerocopy_headlen()
3766 hlen = from->len; in skb_zerocopy_headlen()
3769 if (skb_has_frag_list(from)) in skb_zerocopy_headlen()
3770 hlen = from->len; in skb_zerocopy_headlen()
3795 skb_zerocopy(struct sk_buff *to, struct sk_buff *from, int len, int hlen) in skb_zerocopy() argument
3803 BUG_ON(!from->head_frag && !hlen); in skb_zerocopy()
3807 return skb_copy_bits(from, 0, skb_put(to, len), len); in skb_zerocopy()
3810 ret = skb_copy_bits(from, 0, skb_put(to, hlen), hlen); in skb_zerocopy()
3815 plen = min_t(int, skb_headlen(from), len); in skb_zerocopy()
3817 page = virt_to_head_page(from->head); in skb_zerocopy()
3818 offset = from->data - (unsigned char *)page_address(page); in skb_zerocopy()
3829 if (unlikely(skb_orphan_frags(from, GFP_ATOMIC))) { in skb_zerocopy()
3830 skb_tx_error(from); in skb_zerocopy()
3833 skb_zerocopy_clone(to, from, GFP_ATOMIC); in skb_zerocopy()
3835 for (i = 0; i < skb_shinfo(from)->nr_frags; i++) { in skb_zerocopy()
3840 skb_shinfo(to)->frags[j] = skb_shinfo(from)->frags[i]; in skb_zerocopy()
4195 int from, to, merge, todo; in skb_shift() local
4209 from = 0; in skb_shift()
4211 fragfrom = &skb_shinfo(skb)->frags[from]; in skb_shift()
4229 fragfrom = &skb_shinfo(skb)->frags[from]; in skb_shift()
4239 from++; in skb_shift()
4244 (skb_shinfo(skb)->nr_frags - from) > (MAX_SKB_FRAGS - to)) in skb_shift()
4250 while ((todo > 0) && (from < skb_shinfo(skb)->nr_frags)) { in skb_shift()
4254 fragfrom = &skb_shinfo(skb)->frags[from]; in skb_shift()
4260 from++; in skb_shift()
4291 while (from < skb_shinfo(skb)->nr_frags) in skb_shift()
4292 skb_shinfo(skb)->frags[to++] = skb_shinfo(skb)->frags[from++]; in skb_shift()
4320 void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from, in skb_prepare_seq_read() argument
4323 st->lower_offset = from; in skb_prepare_seq_read()
4519 unsigned int skb_find_text(struct sk_buff *skb, unsigned int from, in skb_find_text() argument
4531 skb_prepare_seq_read(skb, from, to, TS_SKB_CB(&state)); in skb_find_text()
4534 return (ret + patlen <= to - from ? ret : UINT_MAX); in skb_find_text()
6064 bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from, in skb_try_coalesce() argument
6068 int i, delta, len = from->len; in skb_try_coalesce()
6081 if (to->pp_recycle != from->pp_recycle) in skb_try_coalesce()
6084 if (skb_frags_readable(from) != skb_frags_readable(to)) in skb_try_coalesce()
6087 if (len <= skb_tailroom(to) && skb_frags_readable(from)) { in skb_try_coalesce()
6089 BUG_ON(skb_copy_bits(from, 0, skb_put(to, len), len)); in skb_try_coalesce()
6095 from_shinfo = skb_shinfo(from); in skb_try_coalesce()
6098 if (skb_zcopy(to) || skb_zcopy(from)) in skb_try_coalesce()
6101 if (skb_headlen(from) != 0) { in skb_try_coalesce()
6109 if (skb_head_is_locked(from)) in skb_try_coalesce()
6112 delta = from->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff)); in skb_try_coalesce()
6114 page = virt_to_head_page(from->head); in skb_try_coalesce()
6115 offset = from->data - (unsigned char *)page_address(page); in skb_try_coalesce()
6118 page, offset, skb_headlen(from)); in skb_try_coalesce()
6125 delta = from->truesize - SKB_TRUESIZE(skb_end_offset(from)); in skb_try_coalesce()
6135 if (!skb_cloned(from)) in skb_try_coalesce()
6141 if (skb_pp_frag_ref(from)) { in skb_try_coalesce()