Lines Matching refs:skb
148 bool ip_call_ra_chain(struct sk_buff *skb) in ip_call_ra_chain() argument
151 u8 protocol = ip_hdr(skb)->protocol; in ip_call_ra_chain()
153 struct net_device *dev = skb->dev; in ip_call_ra_chain()
165 if (ip_is_fragment(ip_hdr(skb))) { in ip_call_ra_chain()
166 if (ip_defrag(net, skb, IP_DEFRAG_CALL_RA_CHAIN)) in ip_call_ra_chain()
170 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); in ip_call_ra_chain()
179 raw_rcv(last, skb); in ip_call_ra_chain()
187 void ip_protocol_deliver_rcu(struct net *net, struct sk_buff *skb, int protocol) in ip_protocol_deliver_rcu() argument
193 raw = raw_local_deliver(skb, protocol); in ip_protocol_deliver_rcu()
198 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) { in ip_protocol_deliver_rcu()
199 kfree_skb(skb); in ip_protocol_deliver_rcu()
202 nf_reset_ct(skb); in ip_protocol_deliver_rcu()
205 skb); in ip_protocol_deliver_rcu()
213 if (xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) { in ip_protocol_deliver_rcu()
215 icmp_send(skb, ICMP_DEST_UNREACH, in ip_protocol_deliver_rcu()
218 kfree_skb(skb); in ip_protocol_deliver_rcu()
221 consume_skb(skb); in ip_protocol_deliver_rcu()
226 static int ip_local_deliver_finish(struct net *net, struct sock *sk, struct sk_buff *skb) in ip_local_deliver_finish() argument
228 __skb_pull(skb, skb_network_header_len(skb)); in ip_local_deliver_finish()
231 ip_protocol_deliver_rcu(net, skb, ip_hdr(skb)->protocol); in ip_local_deliver_finish()
240 int ip_local_deliver(struct sk_buff *skb) in ip_local_deliver() argument
245 struct net *net = dev_net(skb->dev); in ip_local_deliver()
247 if (ip_is_fragment(ip_hdr(skb))) { in ip_local_deliver()
248 if (ip_defrag(net, skb, IP_DEFRAG_LOCAL_DELIVER)) in ip_local_deliver()
253 net, NULL, skb, skb->dev, NULL, in ip_local_deliver()
258 static inline bool ip_rcv_options(struct sk_buff *skb, struct net_device *dev) in ip_rcv_options() argument
270 if (skb_cow(skb, skb_headroom(skb))) { in ip_rcv_options()
275 iph = ip_hdr(skb); in ip_rcv_options()
276 opt = &(IPCB(skb)->opt); in ip_rcv_options()
279 if (ip_options_compile(dev_net(dev), opt, skb)) { in ip_rcv_options()
297 if (ip_options_rcv_srr(skb, dev)) in ip_rcv_options()
306 static bool ip_can_use_hint(const struct sk_buff *skb, const struct iphdr *iph, in ip_can_use_hint() argument
309 return hint && !skb_dst(skb) && ip_hdr(hint)->daddr == iph->daddr && in ip_can_use_hint()
316 struct sk_buff *skb, struct net_device *dev, in ip_rcv_finish_core() argument
319 const struct iphdr *iph = ip_hdr(skb); in ip_rcv_finish_core()
320 int (*edemux)(struct sk_buff *skb); in ip_rcv_finish_core()
324 if (ip_can_use_hint(skb, iph, hint)) { in ip_rcv_finish_core()
325 err = ip_route_use_hint(skb, iph->daddr, iph->saddr, iph->tos, in ip_rcv_finish_core()
332 !skb_dst(skb) && in ip_rcv_finish_core()
333 !skb->sk && in ip_rcv_finish_core()
341 udp_v4_early_demux, skb); in ip_rcv_finish_core()
345 iph = ip_hdr(skb); in ip_rcv_finish_core()
353 if (!skb_valid_dst(skb)) { in ip_rcv_finish_core()
354 err = ip_route_input_noref(skb, iph->daddr, iph->saddr, in ip_rcv_finish_core()
361 if (unlikely(skb_dst(skb)->tclassid)) { in ip_rcv_finish_core()
363 u32 idx = skb_dst(skb)->tclassid; in ip_rcv_finish_core()
365 st[idx&0xFF].o_bytes += skb->len; in ip_rcv_finish_core()
367 st[(idx>>16)&0xFF].i_bytes += skb->len; in ip_rcv_finish_core()
371 if (iph->ihl > 5 && ip_rcv_options(skb, dev)) in ip_rcv_finish_core()
374 rt = skb_rtable(skb); in ip_rcv_finish_core()
376 __IP_UPD_PO_STATS(net, IPSTATS_MIB_INMCAST, skb->len); in ip_rcv_finish_core()
378 __IP_UPD_PO_STATS(net, IPSTATS_MIB_INBCAST, skb->len); in ip_rcv_finish_core()
379 } else if (skb->pkt_type == PACKET_BROADCAST || in ip_rcv_finish_core()
380 skb->pkt_type == PACKET_MULTICAST) { in ip_rcv_finish_core()
406 kfree_skb(skb); in ip_rcv_finish_core()
415 static int ip_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb) in ip_rcv_finish() argument
417 struct net_device *dev = skb->dev; in ip_rcv_finish()
423 skb = l3mdev_ip_rcv(skb); in ip_rcv_finish()
424 if (!skb) in ip_rcv_finish()
427 ret = ip_rcv_finish_core(net, sk, skb, dev, NULL); in ip_rcv_finish()
429 ret = dst_input(skb); in ip_rcv_finish()
436 static struct sk_buff *ip_rcv_core(struct sk_buff *skb, struct net *net) in ip_rcv_core() argument
444 if (skb->pkt_type == PACKET_OTHERHOST) in ip_rcv_core()
447 __IP_UPD_PO_STATS(net, IPSTATS_MIB_IN, skb->len); in ip_rcv_core()
449 skb = skb_share_check(skb, GFP_ATOMIC); in ip_rcv_core()
450 if (!skb) { in ip_rcv_core()
455 if (!pskb_may_pull(skb, sizeof(struct iphdr))) in ip_rcv_core()
458 iph = ip_hdr(skb); in ip_rcv_core()
479 max_t(unsigned short, 1, skb_shinfo(skb)->gso_segs)); in ip_rcv_core()
481 if (!pskb_may_pull(skb, iph->ihl*4)) in ip_rcv_core()
484 iph = ip_hdr(skb); in ip_rcv_core()
490 if (skb->len < len) { in ip_rcv_core()
500 if (pskb_trim_rcsum(skb, len)) { in ip_rcv_core()
505 iph = ip_hdr(skb); in ip_rcv_core()
506 skb->transport_header = skb->network_header + iph->ihl*4; in ip_rcv_core()
509 memset(IPCB(skb), 0, sizeof(struct inet_skb_parm)); in ip_rcv_core()
510 IPCB(skb)->iif = skb->skb_iif; in ip_rcv_core()
513 if (!skb_sk_is_prefetched(skb)) in ip_rcv_core()
514 skb_orphan(skb); in ip_rcv_core()
516 return skb; in ip_rcv_core()
523 kfree_skb(skb); in ip_rcv_core()
531 int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, in ip_rcv() argument
536 skb = ip_rcv_core(skb, net); in ip_rcv()
537 if (skb == NULL) in ip_rcv()
541 net, NULL, skb, dev, NULL, in ip_rcv()
547 struct sk_buff *skb, *next; in ip_sublist_rcv_finish() local
549 list_for_each_entry_safe(skb, next, head, list) { in ip_sublist_rcv_finish()
550 skb_list_del_init(skb); in ip_sublist_rcv_finish()
551 dst_input(skb); in ip_sublist_rcv_finish()
556 struct sk_buff *skb, int rt_type) in ip_extract_route_hint() argument
561 return skb; in ip_extract_route_hint()
567 struct sk_buff *skb, *next, *hint = NULL; in ip_list_rcv_finish() local
572 list_for_each_entry_safe(skb, next, head, list) { in ip_list_rcv_finish()
573 struct net_device *dev = skb->dev; in ip_list_rcv_finish()
576 skb_list_del_init(skb); in ip_list_rcv_finish()
580 skb = l3mdev_ip_rcv(skb); in ip_list_rcv_finish()
581 if (!skb) in ip_list_rcv_finish()
583 if (ip_rcv_finish_core(net, sk, skb, dev, hint) == NET_RX_DROP) in ip_list_rcv_finish()
586 dst = skb_dst(skb); in ip_list_rcv_finish()
588 hint = ip_extract_route_hint(net, skb, in ip_list_rcv_finish()
598 list_add_tail(&skb->list, &sublist); in ip_list_rcv_finish()
618 struct sk_buff *skb, *next; in ip_list_rcv() local
622 list_for_each_entry_safe(skb, next, head, list) { in ip_list_rcv()
623 struct net_device *dev = skb->dev; in ip_list_rcv()
626 skb_list_del_init(skb); in ip_list_rcv()
627 skb = ip_rcv_core(skb, net); in ip_list_rcv()
628 if (skb == NULL) in ip_list_rcv()
640 list_add_tail(&skb->list, &sublist); in ip_list_rcv()