Lines Matching refs:offset

132 	      offsetof(skb_frag_t, offset));
134 sizeof_field(skb_frag_t, offset));
2669 int offset = skb_headlen(skb); in ___pskb_trim() local
2679 if (offset >= len) in ___pskb_trim()
2683 int end = offset + skb_frag_size(&skb_shinfo(skb)->frags[i]); in ___pskb_trim()
2686 offset = end; in ___pskb_trim()
2690 skb_frag_size_set(&skb_shinfo(skb)->frags[i++], len - offset); in ___pskb_trim()
2705 int end = offset + frag->len; in ___pskb_trim()
2721 offset = end; in ___pskb_trim()
2726 unlikely((err = pskb_trim(frag, len - offset)))) in ___pskb_trim()
2762 int offset = skb_checksum_start_offset(skb) + skb->csum_offset; in pskb_trim_rcsum_slow() local
2764 if (offset + sizeof(__sum16) > hdlen) in pskb_trim_rcsum_slow()
2940 int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len) in skb_copy_bits() argument
2946 if (offset > (int)skb->len - len) in skb_copy_bits()
2950 if ((copy = start - offset) > 0) { in skb_copy_bits()
2953 skb_copy_from_linear_data_offset(skb, offset, to, copy); in skb_copy_bits()
2956 offset += copy; in skb_copy_bits()
2967 WARN_ON(start > offset + len); in skb_copy_bits()
2970 if ((copy = end - offset) > 0) { in skb_copy_bits()
2979 skb_frag_off(f) + offset - start, in skb_copy_bits()
2988 offset += copy; in skb_copy_bits()
2997 WARN_ON(start > offset + len); in skb_copy_bits()
3000 if ((copy = end - offset) > 0) { in skb_copy_bits()
3003 if (skb_copy_bits(frag_iter, offset - start, to, copy)) in skb_copy_bits()
3007 offset += copy; in skb_copy_bits()
3031 unsigned int *offset, in linear_to_page() argument
3039 *len = min_t(unsigned int, *len, pfrag->size - pfrag->offset); in linear_to_page()
3041 memcpy(page_address(pfrag->page) + pfrag->offset, in linear_to_page()
3042 page_address(page) + *offset, *len); in linear_to_page()
3043 *offset = pfrag->offset; in linear_to_page()
3044 pfrag->offset += *len; in linear_to_page()
3051 unsigned int offset) in spd_can_coalesce() argument
3055 (spd->partial[spd->nr_pages - 1].offset + in spd_can_coalesce()
3056 spd->partial[spd->nr_pages - 1].len == offset); in spd_can_coalesce()
3063 unsigned int *len, unsigned int offset, bool linear, in spd_fill_page() argument
3070 page = linear_to_page(page, len, &offset, sk); in spd_fill_page()
3074 if (spd_can_coalesce(spd, page, offset)) { in spd_fill_page()
3081 spd->partial[spd->nr_pages].offset = offset; in spd_fill_page()
3125 unsigned int *offset, unsigned int *len, in __skb_splice_bits() argument
3139 offset, len, spd, in __skb_splice_bits()
3158 offset, len, spd, false, sk)) in __skb_splice_bits()
3163 if (*offset >= iter->len) { in __skb_splice_bits()
3164 *offset -= iter->len; in __skb_splice_bits()
3171 if (__skb_splice_bits(iter, pipe, offset, len, spd, sk)) in __skb_splice_bits()
3182 int skb_splice_bits(struct sk_buff *skb, struct sock *sk, unsigned int offset, in skb_splice_bits() argument
3197 __skb_splice_bits(skb, pipe, &offset, &tlen, &spd, sk); in skb_splice_bits()
3230 static int __skb_send_sock(struct sock *sk, struct sk_buff *skb, int offset, in __skb_send_sock() argument
3242 while (offset < skb_headlen(skb) && len) { in __skb_send_sock()
3246 slen = min_t(int, len, skb_headlen(skb) - offset); in __skb_send_sock()
3247 kv.iov_base = skb->data + offset; in __skb_send_sock()
3260 offset += ret; in __skb_send_sock()
3269 offset -= skb_headlen(skb); in __skb_send_sock()
3275 if (offset < skb_frag_size(frag)) in __skb_send_sock()
3278 offset -= skb_frag_size(frag); in __skb_send_sock()
3284 slen = min_t(size_t, len, skb_frag_size(frag) - offset); in __skb_send_sock()
3296 skb_frag_off(frag) + offset); in __skb_send_sock()
3306 offset += ret; in __skb_send_sock()
3310 offset = 0; in __skb_send_sock()
3335 int skb_send_sock_locked(struct sock *sk, struct sk_buff *skb, int offset, in skb_send_sock_locked() argument
3338 return __skb_send_sock(sk, skb, offset, len, sendmsg_locked, 0); in skb_send_sock_locked()
3343 int offset, int len, int flags) in skb_send_sock_locked_with_flags() argument
3345 return __skb_send_sock(sk, skb, offset, len, sendmsg_locked, flags); in skb_send_sock_locked_with_flags()
3350 int skb_send_sock(struct sock *sk, struct sk_buff *skb, int offset, int len) in skb_send_sock() argument
3352 return __skb_send_sock(sk, skb, offset, len, sendmsg_unlocked, 0); in skb_send_sock()
3367 int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len) in skb_store_bits() argument
3373 if (offset > (int)skb->len - len) in skb_store_bits()
3376 if ((copy = start - offset) > 0) { in skb_store_bits()
3379 skb_copy_to_linear_data_offset(skb, offset, from, copy); in skb_store_bits()
3382 offset += copy; in skb_store_bits()
3393 WARN_ON(start > offset + len); in skb_store_bits()
3396 if ((copy = end - offset) > 0) { in skb_store_bits()
3405 skb_frag_off(frag) + offset - start, in skb_store_bits()
3414 offset += copy; in skb_store_bits()
3423 WARN_ON(start > offset + len); in skb_store_bits()
3426 if ((copy = end - offset) > 0) { in skb_store_bits()
3429 if (skb_store_bits(frag_iter, offset - start, in skb_store_bits()
3434 offset += copy; in skb_store_bits()
3448 __wsum skb_checksum(const struct sk_buff *skb, int offset, int len, __wsum csum) in skb_checksum() argument
3451 int i, copy = start - offset; in skb_checksum()
3459 csum = csum_partial(skb->data + offset, copy, csum); in skb_checksum()
3462 offset += copy; in skb_checksum()
3473 WARN_ON(start > offset + len); in skb_checksum()
3476 if ((copy = end - offset) > 0) { in skb_checksum()
3486 skb_frag_off(frag) + offset - start, in skb_checksum()
3497 offset += copy; in skb_checksum()
3505 WARN_ON(start > offset + len); in skb_checksum()
3508 if ((copy = end - offset) > 0) { in skb_checksum()
3512 csum2 = skb_checksum(frag_iter, offset - start, copy, in skb_checksum()
3517 offset += copy; in skb_checksum()
3530 __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, in skb_copy_and_csum_bits() argument
3534 int i, copy = start - offset; in skb_copy_and_csum_bits()
3543 csum = csum_partial_copy_nocheck(skb->data + offset, to, in skb_copy_and_csum_bits()
3547 offset += copy; in skb_copy_and_csum_bits()
3558 WARN_ON(start > offset + len); in skb_copy_and_csum_bits()
3561 if ((copy = end - offset) > 0) { in skb_copy_and_csum_bits()
3572 skb_frag_off(frag) + offset - start, in skb_copy_and_csum_bits()
3585 offset += copy; in skb_copy_and_csum_bits()
3595 WARN_ON(start > offset + len); in skb_copy_and_csum_bits()
3598 if ((copy = end - offset) > 0) { in skb_copy_and_csum_bits()
3602 offset - start, in skb_copy_and_csum_bits()
3607 offset += copy; in skb_copy_and_csum_bits()
3619 u32 skb_crc32c(const struct sk_buff *skb, int offset, int len, u32 crc) in skb_crc32c() argument
3622 int i, copy = start - offset; in skb_crc32c()
3627 crc = crc32c(crc, skb->data + offset, copy); in skb_crc32c()
3631 offset += copy; in skb_crc32c()
3641 WARN_ON(start > offset + len); in skb_crc32c()
3644 copy = end - offset; in skb_crc32c()
3652 skb_frag_off(frag) + offset - start, in skb_crc32c()
3661 offset += copy; in skb_crc32c()
3669 WARN_ON(start > offset + len); in skb_crc32c()
3672 copy = end - offset; in skb_crc32c()
3675 crc = skb_crc32c(frag_iter, offset - start, copy, crc); in skb_crc32c()
3679 offset += copy; in skb_crc32c()
3801 unsigned int offset; in skb_zerocopy() local
3818 offset = from->data - (unsigned char *)page_address(page); in skb_zerocopy()
3820 offset, plen); in skb_zerocopy()
4472 int skb_copy_seq_read(struct skb_seq_state *st, int offset, void *to, int len) in skb_copy_seq_read() argument
4478 sqlen = skb_seq_read(offset, &data, st); in skb_copy_seq_read()
4487 offset += sqlen; in skb_copy_seq_read()
4495 static unsigned int skb_ts_get_next_block(unsigned int offset, const u8 **text, in skb_ts_get_next_block() argument
4499 return skb_seq_read(offset, text, TS_SKB_CB(state)); in skb_ts_get_next_block()
4539 int offset, size_t size, size_t max_frags) in skb_append_pagefrags() argument
4543 if (skb_can_coalesce(skb, i, page, offset)) { in skb_append_pagefrags()
4548 skb_fill_page_desc_noacc(skb, i, page, offset, size); in skb_append_pagefrags()
4593 unsigned int offset) in skb_segment_list() argument
4603 skb_push(skb, -skb_network_offset(skb) + offset); in skb_segment_list()
4643 skb_push(nskb, -skb_network_offset(nskb) + offset); in skb_segment_list()
4653 offset + tnl_hlen); in skb_segment_list()
4700 unsigned int offset = doffset; in skb_segment() local
4805 len = head_skb->len - offset; in skb_segment()
4810 hsize = skb_headlen(head_skb) - offset; in skb_segment()
4826 while (pos < offset + len) { in skb_segment()
4830 if (pos + size > offset + len) in skb_segment()
4894 skb_copy_and_csum_bits(head_skb, offset, in skb_segment()
4901 if (skb_copy_bits(head_skb, offset, skb_put(nskb, len), len)) in skb_segment()
4909 skb_copy_from_linear_data_offset(head_skb, offset, in skb_segment()
4918 while (pos < offset + len) { in skb_segment()
4955 if (pos < offset) { in skb_segment()
4956 skb_frag_off_add(nskb_frag, offset - pos); in skb_segment()
4957 skb_frag_size_sub(nskb_frag, offset - pos); in skb_segment()
4962 if (pos + size <= offset + len) { in skb_segment()
4967 skb_frag_size_sub(nskb_frag, pos + size - (offset + len)); in skb_segment()
4993 } while ((offset += len) < head_skb->len); in skb_segment()
5133 __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len, in __skb_to_sgvec() argument
5137 int i, copy = start - offset; in __skb_to_sgvec()
5147 sg_set_buf(sg, skb->data + offset, copy); in __skb_to_sgvec()
5151 offset += copy; in __skb_to_sgvec()
5157 WARN_ON(start > offset + len); in __skb_to_sgvec()
5160 if ((copy = end - offset) > 0) { in __skb_to_sgvec()
5168 skb_frag_off(frag) + offset - start); in __skb_to_sgvec()
5172 offset += copy; in __skb_to_sgvec()
5180 WARN_ON(start > offset + len); in __skb_to_sgvec()
5183 if ((copy = end - offset) > 0) { in __skb_to_sgvec()
5189 ret = __skb_to_sgvec(frag_iter, sg+elt, offset - start, in __skb_to_sgvec()
5196 offset += copy; in __skb_to_sgvec()
5216 int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) in skb_to_sgvec() argument
5218 int nsg = __skb_to_sgvec(skb, sg, offset, len, 0); in skb_to_sgvec()
5249 int offset, int len) in skb_to_sgvec_nomark() argument
5251 return __skb_to_sgvec(skb, sg, offset, len, 0); in skb_to_sgvec_nomark()
6011 unsigned int offset = skb_transport_offset(skb); in skb_checksum_trimmed() local
6018 if (!pskb_may_pull(skb_chk, offset)) in skb_checksum_trimmed()
6021 skb_pull_rcsum(skb_chk, offset); in skb_checksum_trimmed()
6023 skb_push_rcsum(skb_chk, offset); in skb_checksum_trimmed()
6103 unsigned int offset; in skb_try_coalesce() local
6115 offset = from->data - (unsigned char *)page_address(page); in skb_try_coalesce()
6118 page, offset, skb_headlen(from)); in skb_try_coalesce()
6301 int offset = skb->data - skb_mac_header(skb); in __skb_vlan_pop() local
6304 if (WARN_ONCE(offset, in __skb_vlan_pop()
6306 offset)) { in __skb_vlan_pop()
6368 int offset = skb->data - skb_mac_header(skb); in skb_vlan_push() local
6371 if (WARN_ONCE(offset, in skb_vlan_push()
6373 offset)) { in skb_vlan_push()
6962 return (void *)ext + (ext->offset[id] * SKB_EXT_ALIGN_VALUE); in skb_ext_get_ptr()
6979 memset(new->offset, 0, sizeof(new->offset)); in __skb_ext_alloc()
7040 ext->offset[id] = newoff; in __skb_ext_set()
7086 new->offset[id] = newoff; in skb_ext_add()
7220 size_t offset, size_t len) in skb_splice_csum_page() argument
7226 csum = csum_partial(kaddr + offset, len, 0); in skb_splice_csum_page()