Home
last modified time | relevance | path

Searched refs:skb (Results 1 – 25 of 1126) sorted by relevance

12345678910>>...46

/net/xfrm/
A Dxfrm_output.c73 skb->mac_header = skb->network_header + in xfrm4_transport_output()
75 skb->transport_header = skb->network_header + ihl; in xfrm4_transport_output()
239 skb->mac_header = skb->network_header + in xfrm4_beet_encap_add()
284 skb->mac_header = skb->network_header + in xfrm4_tunnel_encap_add()
329 skb->mac_header = skb->network_header + in xfrm6_tunnel_encap_add()
368 skb->mac_header = skb->network_header + in xfrm6_beet_encap_add()
508 skb->mark = xfrm_smark_get(skb->mark, x); in xfrm_output_one()
645 __skb_push(skb, skb->dev->hard_header_len); in xfrm_dev_direct_output()
755 memset(IPCB(skb), 0, sizeof(*IPCB(skb))); in xfrm_output()
843 !skb_gso_validate_network_len(skb, ip_skb_dst_mtu(skb->sk, skb)))) { in xfrm4_tunnel_check_size()
[all …]
A Dxfrm_input.c255 eth_hdr(skb)->h_proto = skb->protocol; in xfrm4_remove_tunnel_encap()
292 eth_hdr(skb)->h_proto = skb->protocol; in xfrm6_remove_tunnel_encap()
308 err = skb_cow_head(skb, size + skb->mac_len); in xfrm6_remove_beet_encap()
393 int ihl = skb->data - skb_transport_header(skb); in xfrm4_transport_input()
395 if (skb->transport_header != skb->network_header) { in xfrm4_transport_input()
401 skb->network_header = skb->transport_header; in xfrm4_transport_input()
403 ip_hdr(skb)->tot_len = htons(skb->len + ihl); in xfrm4_transport_input()
412 int ihl = skb->data - skb_transport_header(skb); in xfrm6_transport_input()
414 if (skb->transport_header != skb->network_header) { in xfrm6_transport_input()
420 skb->network_header = skb->transport_header; in xfrm6_transport_input()
[all …]
/net/core/
A Dgro.c154 skb->len -= skb->data_len; in skb_gro_receive()
236 skb_pull(skb, skb_gro_offset(skb)); in skb_gro_receive_list()
408 skb->end - skb->tail); in skb_gro_reset_offset()
547 NAPI_GRO_CB(skb)->last = skb; in dev_gro_receive()
644 __skb_pull(skb, skb_headlen(skb)); in napi_reuse_skb()
665 napi->skb = skb; in napi_reuse_skb()
670 struct sk_buff *skb = napi->skb; in napi_get_frags() local
675 napi->skb = skb; in napi_get_frags()
691 skb->protocol = eth_type_trans(skb, skb->dev); in napi_frags_finish()
717 struct sk_buff *skb = napi->skb; in napi_frags_skb() local
[all …]
A Dskbuff.c208 msg, addr, skb->len, sz, skb->head, skb->data, in skb_panic()
1138 skb->destructor(skb); in skb_release_head_state()
1300 skb->csum, skb->csum_start, skb->csum_offset, skb->ip_summed, in skb_dump()
1302 skb->hash, skb->sw_hash, skb->l4_hash, in skb_dump()
1303 ntohs(skb->protocol), skb->pkt_type, skb->skb_iif, in skb_dump()
1304 skb->priority, skb->mark, skb->alloc_cpu, skb->vlan_all, in skb_dump()
2519 ntail = skb->data_len + pad - (skb->end - skb->tail); in __skb_pad()
3322 skb = skb->next; in __skb_send_sock()
6890 skb->data_len = skb->len; in pskb_carve_inside_nonlinear()
6941 if (skb->data_len > skb->end - skb->tail || in skb_condense()
[all …]
A Dgso.c48 __skb_pull(skb, vlan_depth); in skb_mac_gso_segment()
59 __skb_push(skb, skb->data - skb_mac_header(skb)); in skb_mac_gso_segment()
116 sizeof(*SKB_GSO_CB(skb)) > sizeof(skb->cb)); in __skb_gso_segment()
118 SKB_GSO_CB(skb)->mac_offset = skb_headroom(skb); in __skb_gso_segment()
122 skb_reset_mac_len(skb); in __skb_gso_segment()
126 if (segs != skb && unlikely(skb_needs_check(skb, tx_path) && !IS_ERR(segs))) in __skb_gso_segment()
148 if (skb->encapsulation) { in skb_gso_transport_seglen()
155 thlen = tcp_hdrlen(skb); in skb_gso_transport_seglen()
197 unsigned int hdr_len = skb_transport_header(skb) - skb_mac_header(skb); in skb_gso_mac_seglen()
255 return skb_gso_size_check(skb, skb_gso_network_seglen(skb), mtu); in skb_gso_validate_network_len()
[all …]
/net/bridge/
A Dbr_netfilter_hooks.c280 skb->dev = bridge_parent(skb->dev); in br_nf_pre_routing_finish_bridge()
404 net, sk, skb, skb->dev, in br_nf_pre_routing_finish()
460 skb->dev = brnf_get_logical_dev(skb, skb->dev, net); in setup_pre_routing()
497 if (IS_IPV6(skb) || is_vlan_ipv6(skb, state->net) || in br_nf_pre_routing()
514 if (!IS_IP(skb) && !is_vlan_ip(skb, state->net) && in br_nf_pre_routing()
532 skb->transport_header = skb->network_header + ip_hdr(skb)->ihl * 4; in br_nf_pre_routing()
642 if (!IS_ARP(skb) && !is_vlan_arp(skb, net)) { in br_nf_forward_finish()
771 if (IS_IP(skb) || is_vlan_ip(skb, state->net) || in br_nf_forward()
774 if (IS_IPV6(skb) || is_vlan_ipv6(skb, state->net) || in br_nf_forward()
777 if (IS_ARP(skb) || is_vlan_arp(skb, state->net)) in br_nf_forward()
[all …]
A Dbr_forward.c36 if (!is_skb_forwardable(skb->dev, skb)) in br_dev_queue_push_xmit()
45 if (!vlan_get_protocol_and_depth(skb, skb->protocol, &depth)) in br_dev_queue_push_xmit()
58 kfree_skb(skb); in br_dev_queue_push_xmit()
67 net, sk, skb, NULL, skb->dev, in br_forward_finish()
87 skb = br_handle_vlan(to->br, to, vg, skb); in __br_forward()
88 if (!skb) in __br_forward()
104 if (!is_skb_forwardable(skb->dev, skb)) in __br_forward()
116 net, NULL, skb, indev, skb->dev, in __br_forward()
125 skb = skb_clone(skb, GFP_ATOMIC); in deliver_clone()
126 if (!skb) { in deliver_clone()
[all …]
A Dbr_netfilter_ipv6.c53 if (skb->len < ip6h_len) in br_validate_ipv6()
56 hdr = ipv6_hdr(skb); in br_validate_ipv6()
110 kfree_skb(skb); in br_nf_pre_routing_finish_ipv6()
122 skb_dst_drop(skb); in br_nf_pre_routing_finish_ipv6()
126 kfree_skb(skb); in br_nf_pre_routing_finish_ipv6()
135 net, sk, skb, skb->dev, NULL, in br_nf_pre_routing_finish_ipv6()
144 kfree_skb(skb); in br_nf_pre_routing_finish_ipv6()
147 skb_dst_drop(skb); in br_nf_pre_routing_finish_ipv6()
151 skb->dev = br_indev; in br_nf_pre_routing_finish_ipv6()
182 skb->transport_header = skb->network_header + sizeof(struct ipv6hdr); in br_nf_pre_routing_ipv6()
[all …]
A Dbr_input.c55 kfree_skb(skb); in br_pass_frame_up()
59 indev = skb->dev; in br_pass_frame_up()
60 skb->dev = brdev; in br_pass_frame_up()
61 skb = br_handle_vlan(br, NULL, vg, skb); in br_pass_frame_up()
62 if (!skb) in br_pass_frame_up()
65 br_multicast_count(br, NULL, skb, br_multicast_igmp_type(skb), in br_pass_frame_up()
307 br_handle_frame_finish(dev_net(skb->dev), NULL, skb); in nf_hook_bridge_pre()
346 skb = skb_share_check(skb, GFP_ATOMIC); in br_handle_frame()
347 if (!skb) in br_handle_frame()
381 *pskb = skb; in br_handle_frame()
[all …]
/net/ipv6/
A Dexthdrs.c307 !pskb_may_pull(skb, (skb_transport_offset(skb) + in ipv6_destopt_rcv()
402 skb_postpull_rcsum(skb, skb_network_header(skb), in ipv6_srh_rcv()
405 skb_postpull_rcsum(skb, skb_transport_header(skb), in ipv6_srh_rcv()
413 __skb_tunnel_rx(skb, skb->dev, net); in ipv6_srh_rcv()
513 skb_postpull_rcsum(skb, skb_network_header(skb), in ipv6_rpl_srh_rcv()
516 skb_postpull_rcsum(skb, skb_transport_header(skb), in ipv6_rpl_srh_rcv()
523 __skb_tunnel_rx(skb, skb->dev, net); in ipv6_rpl_srh_rcv()
612 skb_postpush_rcsum(skb, ipv6_hdr(skb), in ipv6_rpl_srh_rcv()
666 !pskb_may_pull(skb, (skb_transport_offset(skb) + in ipv6_rthdr_rcv()
807 skb_push(skb, -skb_network_offset(skb)); in ipv6_rthdr_rcv()
[all …]
A Dip6_input.c52 !skb_dst(skb) && !skb->sk) { in ip6_rcv_finish_core()
74 skb = l3mdev_ip6_rcv(skb); in ip6_rcv_finish()
75 if (!skb) in ip6_rcv_finish()
106 return skb; in ip6_extract_route_hint()
123 skb = l3mdev_ip6_rcv(skb); in ip6_list_rcv_finish()
124 if (!skb) in ip6_list_rcv_finish()
190 IP6CB(skb)->iif = skb_valid_dst(skb) ? in ip6_rcv_core()
308 skb = ip6_rcv_core(skb, dev, net); in ipv6_rcv()
338 skb = ip6_rcv_core(skb, dev, net); in ipv6_list_rcv()
380 if (!pskb_pull(skb, skb_transport_offset(skb))) in ip6_protocol_deliver_rcu()
[all …]
A Dip6_offload.c70 skb_gro_pull(skb, off - skb_gro_receive_network_offset(skb)); in ipv6_gro_pull_exthdrs()
101 __skb_pull(skb, len); in ipv6_gso_pull_exthdrs()
126 nhoff = skb_network_header(skb) - skb_mac_header(skb); in ipv6_gso_segment()
156 skb->network_header = skb_mac_header(skb) + nhoff - skb->head; in ipv6_gso_segment()
164 for (skb = segs; skb; skb = skb->next) { in ipv6_gso_segment()
173 skb->network_header = (u8 *)ipv6h - skb->head; in ipv6_gso_segment()
184 if (skb->next) in ipv6_gso_segment()
241 NAPI_GRO_CB(skb)->network_offsets[NAPI_GRO_CB(skb)->encap_mark] = off; in ipv6_gro_receive()
259 skb_set_transport_header(skb, skb_gro_offset(skb)); in ipv6_gro_receive()
358 memmove(skb_mac_header(skb) - hoplen, skb_mac_header(skb), in ipv6_gro_complete()
[all …]
/net/ipv4/
A Dip_input.c236 __skb_pull(skb, skb_network_header_len(skb)); in ip_local_deliver_finish()
261 net, NULL, skb, skb->dev, NULL, in ip_local_deliver()
278 if (skb_cow(skb, skb_headroom(skb))) { in ip_rcv_options()
448 skb = l3mdev_ip_rcv(skb); in ip_rcv_finish()
449 if (!skb) in ip_rcv_finish()
478 skb = skb_share_check(skb, GFP_ATOMIC); in ip_rcv_core()
541 IPCB(skb)->iif = skb->skb_iif; in ip_rcv_core()
570 skb = ip_rcv_core(skb, net); in ip_rcv()
613 skb = l3mdev_ip_rcv(skb); in ip_list_rcv_finish()
614 if (!skb) in ip_list_rcv_finish()
[all …]
A Dtcp_offload.c22 while (skb) { in tcp_gso_tstamp()
29 skb = skb->next; in tcp_gso_tstamp()
91 skb = skb_segment_list(skb, features, skb_mac_header_len(skb)); in __tcp4_gso_segment_list()
92 if (IS_ERR(skb)) in __tcp4_gso_segment_list()
93 return skb; in __tcp4_gso_segment_list()
169 skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss); in tcp_gso_segment()
196 skb = segs; in tcp_gso_segment()
222 skb = skb->next; in tcp_gso_segment()
402 skb->inner_transport_header = skb->transport_header; in tcp_gro_complete()
404 skb->csum_start = (unsigned char *)th - skb->head; in tcp_gro_complete()
[all …]
A Dudp_offload.c211 skb->mac_len = skb_inner_network_offset(skb); in __skb_udp_tunnel_segment()
220 need_ipsec = (skb_dst(skb) && dst_xfrm(skb_dst(skb))) || skb_sec_path(skb); in __skb_udp_tunnel_segment()
254 skb = segs; in __skb_udp_tunnel_segment()
301 skb->csum_start = skb_transport_header(skb) - skb->head; in __skb_udp_tunnel_segment()
304 } while ((skb = skb->next)); in __skb_udp_tunnel_segment()
468 skb = skb_segment_list(skb, features, skb_mac_header_len(skb)); in __udp_gso_segment_list()
675 csum = skb_checksum(skb, 0, skb->len, 0); in udp4_ufo_fragment()
754 if (!pskb_may_pull(skb, skb_gro_offset(skb))) { in udp_gro_receive_segment()
911 skb->csum_start = (unsigned char *)uh - skb->head; in udp_gro_complete_segment()
915 skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count; in udp_gro_complete_segment()
[all …]
A Dgre_offload.c19 int tnl_hlen = skb_inner_mac_header(skb) - skb_transport_header(skb); in gre_gso_segment()
27 if (!skb->encapsulation) in gre_gso_segment()
37 skb->encapsulation = 0; in gre_gso_segment()
41 skb_set_network_header(skb, skb_inner_network_offset(skb)); in gre_gso_segment()
42 skb->mac_len = skb_inner_network_offset(skb); in gre_gso_segment()
43 skb->protocol = skb->inner_protocol; in gre_gso_segment()
52 need_ipsec = skb_dst(skb) && dst_xfrm(skb_dst(skb)); in gre_gso_segment()
69 skb = segs; in gre_gso_segment()
101 partial_adj = skb->len + skb_headroom(skb) - in gre_gso_segment()
114 skb->csum_start = skb_transport_header(skb) - skb->head; in gre_gso_segment()
[all …]
A Desp4_offload.c31 struct sk_buff *skb) in esp4_gro_receive() argument
46 xo = xfrm_offload(skb); in esp4_gro_receive()
56 x = xfrm_input_state_lookup(dev_net(skb->dev), skb->mark, in esp4_gro_receive()
69 skb->mark = xfrm_smark_get(skb->mark, x); in esp4_gro_receive()
95 secpath_reset(skb); in esp4_gro_receive()
111 skb_push(skb, -skb_network_offset(skb)); in esp4_gso_encap()
180 __skb_pull(skb, skb_transport_offset(skb)); in xfrm4_beet_gso_segment()
321 skb_push(skb, -skb_network_offset(skb)); in esp_xmit()
349 ip_hdr(skb)->tot_len = htons(skb->len); in esp_xmit()
368 secpath_reset(skb); in esp_xmit()
[all …]
/net/devlink/
A Dnetlink_gen.h25 struct sk_buff *skb, struct genl_info *info);
27 struct sk_buff *skb, struct genl_info *info);
29 struct sk_buff *skb,
36 struct sk_buff *skb, struct genl_info *info);
41 int devlink_nl_port_get_dumpit(struct sk_buff *skb,
51 int devlink_nl_sb_pool_get_dumpit(struct sk_buff *skb,
84 int devlink_nl_param_get_dumpit(struct sk_buff *skb,
88 int devlink_nl_region_get_dumpit(struct sk_buff *skb,
99 int devlink_nl_info_get_dumpit(struct sk_buff *skb,
117 int devlink_nl_trap_get_dumpit(struct sk_buff *skb,
[all …]
/net/netfilter/ipvs/
A Dip_vs_xmit.c116 else if (skb->len > mtu && !skb_is_gso(skb)) { in __mtu_check_toobig_v6()
173 (!skb->dev || skb->dev->flags & IFF_LOOPBACK) && in crosses_local_route_boundary()
236 skb->len > mtu && !skb_is_gso(skb) && in ensure_mtu_is_adequate()
581 skb->dev = skb_dst(skb)->dev; in __ip_vs_get_out_rt_v6()
976 return skb; in ip_vs_prepare_tunneled_skb()
1196 skb->transport_header = skb->network_header; in ip_vs_tunnel_xmit()
1217 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); in ip_vs_tunnel_xmit()
1238 ip_local_out(net, skb->sk, skb); in ip_vs_tunnel_xmit()
1339 skb->transport_header = skb->network_header; in ip_vs_tunnel_xmit_v6()
1360 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); in ip_vs_tunnel_xmit_v6()
[all …]
/net/bridge/netfilter/
A Dnf_conntrack_bridge.c47 iph = ip_hdr(skb); in nf_br_ip_fragment()
56 mtu = skb->dev->mtu; in nf_br_ip_fragment()
121 consume_skb(skb); in nf_br_ip_fragment()
125 kfree_skb(skb); in nf_br_ip_fragment()
167 br_skb_cb_restore(skb, &cb, IPCB(skb)->frag_max_size); in nf_ct_br_defrag4()
197 br_skb_cb_restore(skb, &cb, IP6CB(skb)->frag_max_size); in nf_ct_br_defrag6()
210 iph = ip_hdr(skb); in nf_ct_br_ip_check()
234 if (skb->len < len) in nf_ct_br_ipv6_check()
302 nf_reset_ct(skb); in nf_ct_bridge_in()
318 skb->_nfct = 0; in nf_ct_bridge_in()
[all …]
/net/openvswitch/
A Dactions.c100 da->skb = skb; in add_deferred_actions()
150 err = skb_mpls_pop(skb, ethertype, skb->mac_len, in pop_mpls()
235 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2); in set_eth_addr()
242 skb_postpush_rcsum(skb, eth_hdr(skb), ETH_ALEN * 2); in set_eth_addr()
775 skb_postpush_rcsum(skb, skb->data, data->l2_len); in ovs_vport_output()
779 skb->inner_network_header = skb->network_header; in ovs_vport_output()
836 skb->network_header = skb->inner_network_header; in ovs_fragment()
904 pskb_trim(skb, skb->len - cutlen); in do_output()
1302 md.trunc_size = skb->len - OVS_CB(skb)->cutlen; in execute_psample()
1553 skb = last ? skb : skb_clone(skb, GFP_ATOMIC); in clone_execute()
[all …]
/net/ieee802154/6lowpan/
A Drx.c25 skb->dev->stats.rx_bytes += skb->len; in lowpan_give_skb_to_device()
27 return netif_rx(skb); in lowpan_give_skb_to_device()
40 kfree_skb(skb); in lowpan_rx_handlers_result()
72 ret = lowpan_frag_rcv(skb, *skb_network_header(skb) & in lowpan_rx_h_frag()
118 skb_pull(skb, 1); in lowpan_rx_h_ipv6()
286 skb = skb_share_check(skb, GFP_ATOMIC); in lowpan_rcv()
287 if (!skb) in lowpan_rcv()
289 skb->dev = ldev; in lowpan_rcv()
297 skb = skb_unshare(skb, GFP_ATOMIC); in lowpan_rcv()
298 if (!skb) in lowpan_rcv()
[all …]
/net/sched/
A Dsch_frag.c32 kfree_skb(skb); in sch_frag_xmit()
48 skb_postpush_rcsum(skb, skb->data, data->l2_len); in sch_frag_xmit()
51 return data->xmit(skb); in sch_frag_xmit()
74 skb_pull(skb, hlen); in sch_frag_prepare_frag()
112 ret = ip_do_fragment(net, skb->sk, skb, sch_frag_xmit); in sch_fragment()
130 ret = ipv6_stub->ipv6_fragment(net, skb->sk, skb, in sch_fragment()
144 kfree_skb(skb); in sch_fragment()
148 int sch_frag_xmit_hook(struct sk_buff *skb, int (*xmit)(struct sk_buff *skb)) in sch_frag_xmit_hook() argument
153 if (mru && skb->len > mru + skb->dev->hard_header_len) in sch_frag_xmit_hook()
154 err = sch_fragment(dev_net(skb->dev), skb, mru, xmit); in sch_frag_xmit_hook()
[all …]
/net/ax25/
A Dax25_in.c109 pid = *skb->data; in ax25_rx_iframe()
118 kfree_skb(skb); in ax25_rx_iframe()
119 skb = skbn; in ax25_rx_iframe()
123 skb->mac_header = skb->network_header; in ax25_rx_iframe()
128 netif_rx(skb); in ax25_rx_iframe()
232 ax25_send_to_raw(&dest, skb, skb->data[1]); in ax25_rcv()
246 netif_rx(skb); in ax25_rcv()
256 netif_rx(skb); in ax25_rcv()
381 kfree_skb(skb); in ax25_rcv()
425 kfree_skb(skb); in ax25_rcv()
[all …]
/net/lapb/
A Dlapb_subr.c48 struct sk_buff *skb; in lapb_frames_acked() local
59 kfree_skb(skb); in lapb_frames_acked()
78 skb_prev = skb; in lapb_requeue_frames()
145 skb_pull(skb, 1); in lapb_decode()
160 skb_pull(skb, 2); in lapb_decode()
172 skb_pull(skb, 2); in lapb_decode()
181 skb_pull(skb, 1); in lapb_decode()
209 skb_pull(skb, 1); in lapb_decode()
224 struct sk_buff *skb; in lapb_send_control() local
260 struct sk_buff *skb; in lapb_transmit_frmr() local
[all …]

Completed in 98 milliseconds

12345678910>>...46