Lines Matching refs:skb
148 bool ip_call_ra_chain(struct sk_buff *skb) in ip_call_ra_chain() argument
151 u8 protocol = ip_hdr(skb)->protocol; in ip_call_ra_chain()
153 struct net_device *dev = skb->dev; in ip_call_ra_chain()
165 if (ip_is_fragment(ip_hdr(skb))) { in ip_call_ra_chain()
166 if (ip_defrag(net, skb, IP_DEFRAG_CALL_RA_CHAIN)) in ip_call_ra_chain()
170 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); in ip_call_ra_chain()
179 raw_rcv(last, skb); in ip_call_ra_chain()
187 void ip_protocol_deliver_rcu(struct net *net, struct sk_buff *skb, int protocol) in ip_protocol_deliver_rcu() argument
193 raw = raw_local_deliver(skb, protocol); in ip_protocol_deliver_rcu()
198 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) { in ip_protocol_deliver_rcu()
199 kfree_skb_reason(skb, in ip_protocol_deliver_rcu()
203 nf_reset_ct(skb); in ip_protocol_deliver_rcu()
206 skb); in ip_protocol_deliver_rcu()
214 if (xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) { in ip_protocol_deliver_rcu()
216 icmp_send(skb, ICMP_DEST_UNREACH, in ip_protocol_deliver_rcu()
219 kfree_skb_reason(skb, SKB_DROP_REASON_IP_NOPROTO); in ip_protocol_deliver_rcu()
222 consume_skb(skb); in ip_protocol_deliver_rcu()
227 static int ip_local_deliver_finish(struct net *net, struct sock *sk, struct sk_buff *skb) in ip_local_deliver_finish() argument
229 if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC))) { in ip_local_deliver_finish()
231 kfree_skb_reason(skb, SKB_DROP_REASON_NOMEM); in ip_local_deliver_finish()
235 skb_clear_delivery_time(skb); in ip_local_deliver_finish()
236 __skb_pull(skb, skb_network_header_len(skb)); in ip_local_deliver_finish()
239 ip_protocol_deliver_rcu(net, skb, ip_hdr(skb)->protocol); in ip_local_deliver_finish()
248 int ip_local_deliver(struct sk_buff *skb) in ip_local_deliver() argument
253 struct net *net = dev_net(skb->dev); in ip_local_deliver()
255 if (ip_is_fragment(ip_hdr(skb))) { in ip_local_deliver()
256 if (ip_defrag(net, skb, IP_DEFRAG_LOCAL_DELIVER)) in ip_local_deliver()
261 net, NULL, skb, skb->dev, NULL, in ip_local_deliver()
266 static inline bool ip_rcv_options(struct sk_buff *skb, struct net_device *dev) in ip_rcv_options() argument
278 if (skb_cow(skb, skb_headroom(skb))) { in ip_rcv_options()
283 iph = ip_hdr(skb); in ip_rcv_options()
284 opt = &(IPCB(skb)->opt); in ip_rcv_options()
287 if (ip_options_compile(dev_net(dev), opt, skb)) { in ip_rcv_options()
305 if (ip_options_rcv_srr(skb, dev)) in ip_rcv_options()
314 static bool ip_can_use_hint(const struct sk_buff *skb, const struct iphdr *iph, in ip_can_use_hint() argument
317 return hint && !skb_dst(skb) && ip_hdr(hint)->daddr == iph->daddr && in ip_can_use_hint()
321 int tcp_v4_early_demux(struct sk_buff *skb);
322 int udp_v4_early_demux(struct sk_buff *skb);
324 struct sk_buff *skb, struct net_device *dev, in ip_rcv_finish_core() argument
327 const struct iphdr *iph = ip_hdr(skb); in ip_rcv_finish_core()
331 if (ip_can_use_hint(skb, iph, hint)) { in ip_rcv_finish_core()
332 drop_reason = ip_route_use_hint(skb, iph->daddr, iph->saddr, in ip_rcv_finish_core()
340 !skb_dst(skb) && in ip_rcv_finish_core()
341 !skb->sk && in ip_rcv_finish_core()
346 tcp_v4_early_demux(skb); in ip_rcv_finish_core()
349 iph = ip_hdr(skb); in ip_rcv_finish_core()
354 drop_reason = udp_v4_early_demux(skb); in ip_rcv_finish_core()
360 iph = ip_hdr(skb); in ip_rcv_finish_core()
370 if (!skb_valid_dst(skb)) { in ip_rcv_finish_core()
371 drop_reason = ip_route_input_noref(skb, iph->daddr, iph->saddr, in ip_rcv_finish_core()
380 IPCB(skb)->flags |= IPSKB_NOPOLICY; in ip_rcv_finish_core()
384 if (unlikely(skb_dst(skb)->tclassid)) { in ip_rcv_finish_core()
386 u32 idx = skb_dst(skb)->tclassid; in ip_rcv_finish_core()
388 st[idx&0xFF].o_bytes += skb->len; in ip_rcv_finish_core()
390 st[(idx>>16)&0xFF].i_bytes += skb->len; in ip_rcv_finish_core()
394 if (iph->ihl > 5 && ip_rcv_options(skb, dev)) in ip_rcv_finish_core()
397 rt = skb_rtable(skb); in ip_rcv_finish_core()
399 __IP_UPD_PO_STATS(net, IPSTATS_MIB_INMCAST, skb->len); in ip_rcv_finish_core()
401 __IP_UPD_PO_STATS(net, IPSTATS_MIB_INBCAST, skb->len); in ip_rcv_finish_core()
402 } else if (skb->pkt_type == PACKET_BROADCAST || in ip_rcv_finish_core()
403 skb->pkt_type == PACKET_MULTICAST) { in ip_rcv_finish_core()
431 kfree_skb_reason(skb, drop_reason); in ip_rcv_finish_core()
440 static int ip_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb) in ip_rcv_finish() argument
442 struct net_device *dev = skb->dev; in ip_rcv_finish()
448 skb = l3mdev_ip_rcv(skb); in ip_rcv_finish()
449 if (!skb) in ip_rcv_finish()
452 ret = ip_rcv_finish_core(net, skb, dev, NULL); in ip_rcv_finish()
454 ret = dst_input(skb); in ip_rcv_finish()
461 static struct sk_buff *ip_rcv_core(struct sk_buff *skb, struct net *net) in ip_rcv_core() argument
470 if (skb->pkt_type == PACKET_OTHERHOST) { in ip_rcv_core()
471 dev_core_stats_rx_otherhost_dropped_inc(skb->dev); in ip_rcv_core()
476 __IP_UPD_PO_STATS(net, IPSTATS_MIB_IN, skb->len); in ip_rcv_core()
478 skb = skb_share_check(skb, GFP_ATOMIC); in ip_rcv_core()
479 if (!skb) { in ip_rcv_core()
485 if (!pskb_may_pull(skb, sizeof(struct iphdr))) in ip_rcv_core()
488 iph = ip_hdr(skb); in ip_rcv_core()
509 max_t(unsigned short, 1, skb_shinfo(skb)->gso_segs)); in ip_rcv_core()
511 if (!pskb_may_pull(skb, iph->ihl*4)) in ip_rcv_core()
514 iph = ip_hdr(skb); in ip_rcv_core()
519 len = iph_totlen(skb, iph); in ip_rcv_core()
520 if (skb->len < len) { in ip_rcv_core()
531 if (pskb_trim_rcsum(skb, len)) { in ip_rcv_core()
536 iph = ip_hdr(skb); in ip_rcv_core()
537 skb->transport_header = skb->network_header + iph->ihl*4; in ip_rcv_core()
540 memset(IPCB(skb), 0, sizeof(struct inet_skb_parm)); in ip_rcv_core()
541 IPCB(skb)->iif = skb->skb_iif; in ip_rcv_core()
544 if (!skb_sk_is_prefetched(skb)) in ip_rcv_core()
545 skb_orphan(skb); in ip_rcv_core()
547 return skb; in ip_rcv_core()
557 kfree_skb_reason(skb, drop_reason); in ip_rcv_core()
565 int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, in ip_rcv() argument
570 skb = ip_rcv_core(skb, net); in ip_rcv()
571 if (skb == NULL) in ip_rcv()
575 net, NULL, skb, dev, NULL, in ip_rcv()
581 struct sk_buff *skb, *next; in ip_sublist_rcv_finish() local
583 list_for_each_entry_safe(skb, next, head, list) { in ip_sublist_rcv_finish()
584 skb_list_del_init(skb); in ip_sublist_rcv_finish()
585 dst_input(skb); in ip_sublist_rcv_finish()
590 struct sk_buff *skb, int rt_type) in ip_extract_route_hint() argument
593 IPCB(skb)->flags & IPSKB_MULTIPATH) in ip_extract_route_hint()
596 return skb; in ip_extract_route_hint()
601 struct sk_buff *skb, *next, *hint = NULL; in ip_list_rcv_finish() local
605 list_for_each_entry_safe(skb, next, head, list) { in ip_list_rcv_finish()
606 struct net_device *dev = skb->dev; in ip_list_rcv_finish()
609 skb_list_del_init(skb); in ip_list_rcv_finish()
613 skb = l3mdev_ip_rcv(skb); in ip_list_rcv_finish()
614 if (!skb) in ip_list_rcv_finish()
616 if (ip_rcv_finish_core(net, skb, dev, hint) == NET_RX_DROP) in ip_list_rcv_finish()
619 dst = skb_dst(skb); in ip_list_rcv_finish()
621 hint = ip_extract_route_hint(net, skb, in ip_list_rcv_finish()
631 list_add_tail(&skb->list, &sublist); in ip_list_rcv_finish()
651 struct sk_buff *skb, *next; in ip_list_rcv() local
654 list_for_each_entry_safe(skb, next, head, list) { in ip_list_rcv()
655 struct net_device *dev = skb->dev; in ip_list_rcv()
658 skb_list_del_init(skb); in ip_list_rcv()
659 skb = ip_rcv_core(skb, net); in ip_list_rcv()
660 if (skb == NULL) in ip_list_rcv()
672 list_add_tail(&skb->list, &sublist); in ip_list_rcv()