Lines Matching refs:skb

94 int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb)  in skb_gro_receive()  argument
96 struct skb_shared_info *pinfo, *skbinfo = skb_shinfo(skb); in skb_gro_receive()
97 unsigned int offset = skb_gro_offset(skb); in skb_gro_receive()
98 unsigned int headlen = skb_headlen(skb); in skb_gro_receive()
99 unsigned int len = skb_gro_len(skb); in skb_gro_receive()
111 if (p->pp_recycle != skb->pp_recycle) in skb_gro_receive()
115 NAPI_GRO_CB(skb)->flush)) in skb_gro_receive()
119 if (NAPI_GRO_CB(skb)->proto != IPPROTO_TCP || in skb_gro_receive()
126 segs = NAPI_GRO_CB(skb)->count; in skb_gro_receive()
153 new_truesize = SKB_TRUESIZE(skb_end_offset(skb)); in skb_gro_receive()
154 delta_truesize = skb->truesize - new_truesize; in skb_gro_receive()
156 skb->truesize = new_truesize; in skb_gro_receive()
157 skb->len -= skb->data_len; in skb_gro_receive()
158 skb->data_len = 0; in skb_gro_receive()
160 NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE; in skb_gro_receive()
162 } else if (skb->head_frag) { in skb_gro_receive()
165 struct page *page = virt_to_head_page(skb->head); in skb_gro_receive()
172 first_offset = skb->data - in skb_gro_receive()
184 delta_truesize = skb->truesize - new_truesize; in skb_gro_receive()
185 skb->truesize = new_truesize; in skb_gro_receive()
186 NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE_STOLEN_HEAD; in skb_gro_receive()
192 skb->destructor = NULL; in skb_gro_receive()
193 skb->sk = NULL; in skb_gro_receive()
194 delta_truesize = skb->truesize; in skb_gro_receive()
200 skb->data_len -= eat; in skb_gro_receive()
201 skb->len -= eat; in skb_gro_receive()
205 __skb_pull(skb, offset); in skb_gro_receive()
208 skb_shinfo(p)->frag_list = skb; in skb_gro_receive()
210 NAPI_GRO_CB(p)->last->next = skb; in skb_gro_receive()
211 NAPI_GRO_CB(p)->last = skb; in skb_gro_receive()
212 __skb_header_release(skb); in skb_gro_receive()
225 NAPI_GRO_CB(skb)->same_flow = 1; in skb_gro_receive()
229 int skb_gro_receive_list(struct sk_buff *p, struct sk_buff *skb) in skb_gro_receive_list() argument
231 if (unlikely(p->len + skb->len >= 65536)) in skb_gro_receive_list()
235 skb_shinfo(p)->frag_list = skb; in skb_gro_receive_list()
237 NAPI_GRO_CB(p)->last->next = skb; in skb_gro_receive_list()
239 skb_pull(skb, skb_gro_offset(skb)); in skb_gro_receive_list()
241 NAPI_GRO_CB(p)->last = skb; in skb_gro_receive_list()
243 p->data_len += skb->len; in skb_gro_receive_list()
246 skb->destructor = NULL; in skb_gro_receive_list()
247 skb->sk = NULL; in skb_gro_receive_list()
248 p->truesize += skb->truesize; in skb_gro_receive_list()
249 p->len += skb->len; in skb_gro_receive_list()
251 NAPI_GRO_CB(skb)->same_flow = 1; in skb_gro_receive_list()
257 static void napi_gro_complete(struct napi_struct *napi, struct sk_buff *skb) in napi_gro_complete() argument
261 __be16 type = skb->protocol; in napi_gro_complete()
264 BUILD_BUG_ON(sizeof(struct napi_gro_cb) > sizeof(skb->cb)); in napi_gro_complete()
266 if (NAPI_GRO_CB(skb)->count == 1) { in napi_gro_complete()
267 skb_shinfo(skb)->gso_size = 0; in napi_gro_complete()
278 skb, 0); in napi_gro_complete()
285 kfree_skb(skb); in napi_gro_complete()
290 gro_normal_one(napi, skb, NAPI_GRO_CB(skb)->count); in napi_gro_complete()
297 struct sk_buff *skb, *p; in __napi_gro_flush_chain() local
299 list_for_each_entry_safe_reverse(skb, p, head, list) { in __napi_gro_flush_chain()
300 if (flush_old && NAPI_GRO_CB(skb)->age == jiffies) in __napi_gro_flush_chain()
302 skb_list_del_init(skb); in __napi_gro_flush_chain()
303 napi_gro_complete(napi, skb); in __napi_gro_flush_chain()
328 static unsigned long gro_list_prepare_tc_ext(const struct sk_buff *skb, in gro_list_prepare_tc_ext() argument
336 skb_ext = skb_ext_find(skb, TC_SKB_EXT); in gro_list_prepare_tc_ext()
347 const struct sk_buff *skb) in gro_list_prepare() argument
349 unsigned int maclen = skb->dev->hard_header_len; in gro_list_prepare()
350 u32 hash = skb_get_hash_raw(skb); in gro_list_prepare()
361 diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev; in gro_list_prepare()
362 diffs |= p->vlan_all ^ skb->vlan_all; in gro_list_prepare()
363 diffs |= skb_metadata_differs(p, skb); in gro_list_prepare()
366 skb_mac_header(skb)); in gro_list_prepare()
369 skb_mac_header(skb), in gro_list_prepare()
377 if (!diffs && unlikely(skb->slow_gro | p->slow_gro)) { in gro_list_prepare()
378 diffs |= p->sk != skb->sk; in gro_list_prepare()
379 diffs |= skb_metadata_dst_cmp(p, skb); in gro_list_prepare()
380 diffs |= skb_get_nfct(p) ^ skb_get_nfct(skb); in gro_list_prepare()
382 diffs |= gro_list_prepare_tc_ext(skb, p, diffs); in gro_list_prepare()
389 static inline void skb_gro_reset_offset(struct sk_buff *skb, u32 nhoff) in skb_gro_reset_offset() argument
395 NAPI_GRO_CB(skb)->network_offset = 0; in skb_gro_reset_offset()
396 NAPI_GRO_CB(skb)->data_offset = 0; in skb_gro_reset_offset()
397 headlen = skb_headlen(skb); in skb_gro_reset_offset()
398 NAPI_GRO_CB(skb)->frag0 = skb->data; in skb_gro_reset_offset()
399 NAPI_GRO_CB(skb)->frag0_len = headlen; in skb_gro_reset_offset()
403 pinfo = skb_shinfo(skb); in skb_gro_reset_offset()
409 NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0); in skb_gro_reset_offset()
410 NAPI_GRO_CB(skb)->frag0_len = min_t(unsigned int, in skb_gro_reset_offset()
412 skb->end - skb->tail); in skb_gro_reset_offset()
416 static void gro_pull_from_frag0(struct sk_buff *skb, int grow) in gro_pull_from_frag0() argument
418 struct skb_shared_info *pinfo = skb_shinfo(skb); in gro_pull_from_frag0()
420 BUG_ON(skb->end - skb->tail < grow); in gro_pull_from_frag0()
422 memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow); in gro_pull_from_frag0()
424 skb->data_len -= grow; in gro_pull_from_frag0()
425 skb->tail += grow; in gro_pull_from_frag0()
431 skb_frag_unref(skb, 0); in gro_pull_from_frag0()
437 static void gro_try_pull_from_frag0(struct sk_buff *skb) in gro_try_pull_from_frag0() argument
439 int grow = skb_gro_offset(skb) - skb_headlen(skb); in gro_try_pull_from_frag0()
442 gro_pull_from_frag0(skb, grow); in gro_try_pull_from_frag0()
464 static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb) in dev_gro_receive() argument
466 u32 bucket = skb_get_hash_raw(skb) & (GRO_HASH_BUCKETS - 1); in dev_gro_receive()
470 __be16 type = skb->protocol; in dev_gro_receive()
475 if (netif_elide_gro(skb->dev)) in dev_gro_receive()
478 gro_list_prepare(&gro_list->list, skb); in dev_gro_receive()
489 skb_set_network_header(skb, skb_gro_offset(skb)); in dev_gro_receive()
490 skb_reset_mac_len(skb); in dev_gro_receive()
494 *(u32 *)&NAPI_GRO_CB(skb)->zeroed = 0; in dev_gro_receive()
495 NAPI_GRO_CB(skb)->flush = skb_has_frag_list(skb); in dev_gro_receive()
496 NAPI_GRO_CB(skb)->count = 1; in dev_gro_receive()
497 if (unlikely(skb_is_gso(skb))) { in dev_gro_receive()
498 NAPI_GRO_CB(skb)->count = skb_shinfo(skb)->gso_segs; in dev_gro_receive()
500 if (!skb_is_gso_tcp(skb) || in dev_gro_receive()
501 (skb_shinfo(skb)->gso_type & SKB_GSO_DODGY)) in dev_gro_receive()
502 NAPI_GRO_CB(skb)->flush = 1; in dev_gro_receive()
506 switch (skb->ip_summed) { in dev_gro_receive()
508 NAPI_GRO_CB(skb)->csum = skb->csum; in dev_gro_receive()
509 NAPI_GRO_CB(skb)->csum_valid = 1; in dev_gro_receive()
512 NAPI_GRO_CB(skb)->csum_cnt = skb->csum_level + 1; in dev_gro_receive()
518 &gro_list->list, skb); in dev_gro_receive()
527 same_flow = NAPI_GRO_CB(skb)->same_flow; in dev_gro_receive()
528 ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED; in dev_gro_receive()
539 if (NAPI_GRO_CB(skb)->flush) in dev_gro_receive()
548 gro_try_pull_from_frag0(skb); in dev_gro_receive()
549 NAPI_GRO_CB(skb)->age = jiffies; in dev_gro_receive()
550 NAPI_GRO_CB(skb)->last = skb; in dev_gro_receive()
551 if (!skb_is_gso(skb)) in dev_gro_receive()
552 skb_shinfo(skb)->gso_size = skb_gro_len(skb); in dev_gro_receive()
553 list_add(&skb->list, &gro_list->list); in dev_gro_receive()
567 gro_try_pull_from_frag0(skb); in dev_gro_receive()
600 struct sk_buff *skb, in napi_skb_finish() argument
605 gro_normal_one(napi, skb, 1); in napi_skb_finish()
609 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD) in napi_skb_finish()
610 napi_skb_free_stolen_head(skb); in napi_skb_finish()
611 else if (skb->fclone != SKB_FCLONE_UNAVAILABLE) in napi_skb_finish()
612 __kfree_skb(skb); in napi_skb_finish()
614 __napi_kfree_skb(skb, SKB_CONSUMED); in napi_skb_finish()
626 gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb) in napi_gro_receive() argument
630 skb_mark_napi_id(skb, napi); in napi_gro_receive()
631 trace_napi_gro_receive_entry(skb); in napi_gro_receive()
633 skb_gro_reset_offset(skb, 0); in napi_gro_receive()
635 ret = napi_skb_finish(napi, skb, dev_gro_receive(napi, skb)); in napi_gro_receive()
642 static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb) in napi_reuse_skb() argument
644 if (unlikely(skb->pfmemalloc)) { in napi_reuse_skb()
645 consume_skb(skb); in napi_reuse_skb()
648 __skb_pull(skb, skb_headlen(skb)); in napi_reuse_skb()
650 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb)); in napi_reuse_skb()
651 __vlan_hwaccel_clear_tag(skb); in napi_reuse_skb()
652 skb->dev = napi->dev; in napi_reuse_skb()
653 skb->skb_iif = 0; in napi_reuse_skb()
656 skb->pkt_type = PACKET_HOST; in napi_reuse_skb()
658 skb->encapsulation = 0; in napi_reuse_skb()
659 skb_shinfo(skb)->gso_type = 0; in napi_reuse_skb()
660 skb_shinfo(skb)->gso_size = 0; in napi_reuse_skb()
661 if (unlikely(skb->slow_gro)) { in napi_reuse_skb()
662 skb_orphan(skb); in napi_reuse_skb()
663 skb_ext_reset(skb); in napi_reuse_skb()
664 nf_reset_ct(skb); in napi_reuse_skb()
665 skb->slow_gro = 0; in napi_reuse_skb()
668 napi->skb = skb; in napi_reuse_skb()
673 struct sk_buff *skb = napi->skb; in napi_get_frags() local
675 if (!skb) { in napi_get_frags()
676 skb = napi_alloc_skb(napi, GRO_MAX_HEAD); in napi_get_frags()
677 if (skb) { in napi_get_frags()
678 napi->skb = skb; in napi_get_frags()
679 skb_mark_napi_id(skb, napi); in napi_get_frags()
682 return skb; in napi_get_frags()
687 struct sk_buff *skb, in napi_frags_finish() argument
693 __skb_push(skb, ETH_HLEN); in napi_frags_finish()
694 skb->protocol = eth_type_trans(skb, skb->dev); in napi_frags_finish()
696 gro_normal_one(napi, skb, 1); in napi_frags_finish()
700 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD) in napi_frags_finish()
701 napi_skb_free_stolen_head(skb); in napi_frags_finish()
703 napi_reuse_skb(napi, skb); in napi_frags_finish()
720 struct sk_buff *skb = napi->skb; in napi_frags_skb() local
724 napi->skb = NULL; in napi_frags_skb()
726 skb_reset_mac_header(skb); in napi_frags_skb()
727 skb_gro_reset_offset(skb, hlen); in napi_frags_skb()
729 if (unlikely(!skb_gro_may_pull(skb, hlen))) { in napi_frags_skb()
730 eth = skb_gro_header_slow(skb, hlen, 0); in napi_frags_skb()
734 napi_reuse_skb(napi, skb); in napi_frags_skb()
738 eth = (const struct ethhdr *)skb->data; in napi_frags_skb()
740 if (NAPI_GRO_CB(skb)->frag0 != skb->data) in napi_frags_skb()
741 gro_pull_from_frag0(skb, hlen); in napi_frags_skb()
743 NAPI_GRO_CB(skb)->frag0 += hlen; in napi_frags_skb()
744 NAPI_GRO_CB(skb)->frag0_len -= hlen; in napi_frags_skb()
746 __skb_pull(skb, hlen); in napi_frags_skb()
753 skb->protocol = eth->h_proto; in napi_frags_skb()
755 return skb; in napi_frags_skb()
761 struct sk_buff *skb = napi_frags_skb(napi); in napi_gro_frags() local
763 trace_napi_gro_frags_entry(skb); in napi_gro_frags()
765 ret = napi_frags_finish(napi, skb, dev_gro_receive(napi, skb)); in napi_gro_frags()
775 __sum16 __skb_gro_checksum_complete(struct sk_buff *skb) in __skb_gro_checksum_complete() argument
780 wsum = skb_checksum(skb, skb_gro_offset(skb), skb_gro_len(skb), 0); in __skb_gro_checksum_complete()
783 sum = csum_fold(csum_add(NAPI_GRO_CB(skb)->csum, wsum)); in __skb_gro_checksum_complete()
786 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) && in __skb_gro_checksum_complete()
787 !skb->csum_complete_sw) in __skb_gro_checksum_complete()
788 netdev_rx_csum_fault(skb->dev, skb); in __skb_gro_checksum_complete()
791 NAPI_GRO_CB(skb)->csum = wsum; in __skb_gro_checksum_complete()
792 NAPI_GRO_CB(skb)->csum_valid = 1; in __skb_gro_checksum_complete()