Lines Matching refs:skb
33 rmnet_map_ipv4_dl_csum_trailer(struct sk_buff *skb, in rmnet_map_ipv4_dl_csum_trailer() argument
37 struct iphdr *ip4h = (struct iphdr *)skb->data; in rmnet_map_ipv4_dl_csum_trailer()
38 void *txporthdr = skb->data + ip4h->ihl * 4; in rmnet_map_ipv4_dl_csum_trailer()
109 rmnet_map_ipv6_dl_csum_trailer(struct sk_buff *skb, in rmnet_map_ipv6_dl_csum_trailer() argument
113 struct ipv6hdr *ip6h = (struct ipv6hdr *)skb->data; in rmnet_map_ipv6_dl_csum_trailer()
114 void *txporthdr = skb->data + sizeof(*ip6h); in rmnet_map_ipv6_dl_csum_trailer()
159 rmnet_map_ipv6_dl_csum_trailer(struct sk_buff *skb, in rmnet_map_ipv6_dl_csum_trailer() argument
183 struct sk_buff *skb) in rmnet_map_ipv4_ul_csum_header() argument
190 val |= skb->csum_offset & MAP_CSUM_UL_OFFSET_MASK; in rmnet_map_ipv4_ul_csum_header()
192 ul_header->csum_start_offset = htons(skb_network_header_len(skb)); in rmnet_map_ipv4_ul_csum_header()
195 skb->ip_summed = CHECKSUM_NONE; in rmnet_map_ipv4_ul_csum_header()
218 struct sk_buff *skb) in rmnet_map_ipv6_ul_csum_header() argument
225 val |= skb->csum_offset & MAP_CSUM_UL_OFFSET_MASK; in rmnet_map_ipv6_ul_csum_header()
227 ul_header->csum_start_offset = htons(skb_network_header_len(skb)); in rmnet_map_ipv6_ul_csum_header()
230 skb->ip_summed = CHECKSUM_NONE; in rmnet_map_ipv6_ul_csum_header()
238 struct sk_buff *skb) in rmnet_map_ipv6_ul_csum_header() argument
243 static void rmnet_map_v5_checksum_uplink_packet(struct sk_buff *skb, in rmnet_map_v5_checksum_uplink_packet() argument
250 ul_header = skb_push(skb, sizeof(*ul_header)); in rmnet_map_v5_checksum_uplink_packet()
255 if (skb->ip_summed == CHECKSUM_PARTIAL) { in rmnet_map_v5_checksum_uplink_packet()
256 void *iph = ip_hdr(skb); in rmnet_map_v5_checksum_uplink_packet()
261 if (skb->protocol == htons(ETH_P_IP)) { in rmnet_map_v5_checksum_uplink_packet()
267 skb->protocol == htons(ETH_P_IPV6)) { in rmnet_map_v5_checksum_uplink_packet()
279 skb->ip_summed = CHECKSUM_NONE; in rmnet_map_v5_checksum_uplink_packet()
295 struct rmnet_map_header *rmnet_map_add_map_header(struct sk_buff *skb, in rmnet_map_add_map_header() argument
303 map_datalen = skb->len - hdrlen; in rmnet_map_add_map_header()
305 skb_push(skb, sizeof(struct rmnet_map_header)); in rmnet_map_add_map_header()
323 if (skb_tailroom(skb) < padding) in rmnet_map_add_map_header()
326 skb_put_zero(skb, padding); in rmnet_map_add_map_header()
342 struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb, in rmnet_map_deaggregate() argument
347 void *data = skb->data; in rmnet_map_deaggregate()
352 if (skb->len == 0) in rmnet_map_deaggregate()
355 maph = (struct rmnet_map_header *)skb->data; in rmnet_map_deaggregate()
371 if (((int)skb->len - (int)packet_len) < 0) in rmnet_map_deaggregate()
391 memcpy(skbn->data, skb->data, packet_len); in rmnet_map_deaggregate()
392 skb_pull(skb, packet_len); in rmnet_map_deaggregate()
403 int rmnet_map_checksum_downlink_packet(struct sk_buff *skb, u16 len) in rmnet_map_checksum_downlink_packet() argument
405 struct rmnet_priv *priv = netdev_priv(skb->dev); in rmnet_map_checksum_downlink_packet()
408 if (unlikely(!(skb->dev->features & NETIF_F_RXCSUM))) { in rmnet_map_checksum_downlink_packet()
413 csum_trailer = (struct rmnet_map_dl_csum_trailer *)(skb->data + len); in rmnet_map_checksum_downlink_packet()
420 if (skb->protocol == htons(ETH_P_IP)) in rmnet_map_checksum_downlink_packet()
421 return rmnet_map_ipv4_dl_csum_trailer(skb, csum_trailer, priv); in rmnet_map_checksum_downlink_packet()
423 if (IS_ENABLED(CONFIG_IPV6) && skb->protocol == htons(ETH_P_IPV6)) in rmnet_map_checksum_downlink_packet()
424 return rmnet_map_ipv6_dl_csum_trailer(skb, csum_trailer, priv); in rmnet_map_checksum_downlink_packet()
431 static void rmnet_map_v4_checksum_uplink_packet(struct sk_buff *skb, in rmnet_map_v4_checksum_uplink_packet() argument
439 skb_push(skb, sizeof(struct rmnet_map_ul_csum_header)); in rmnet_map_v4_checksum_uplink_packet()
445 if (skb->ip_summed != CHECKSUM_PARTIAL) in rmnet_map_v4_checksum_uplink_packet()
451 if (skb->protocol == htons(ETH_P_IP)) { in rmnet_map_v4_checksum_uplink_packet()
452 rmnet_map_ipv4_ul_csum_header(iphdr, ul_header, skb); in rmnet_map_v4_checksum_uplink_packet()
457 if (IS_ENABLED(CONFIG_IPV6) && skb->protocol == htons(ETH_P_IPV6)) { in rmnet_map_v4_checksum_uplink_packet()
458 rmnet_map_ipv6_ul_csum_header(iphdr, ul_header, skb); in rmnet_map_v4_checksum_uplink_packet()
474 void rmnet_map_checksum_uplink_packet(struct sk_buff *skb, in rmnet_map_checksum_uplink_packet() argument
481 rmnet_map_v4_checksum_uplink_packet(skb, orig_dev); in rmnet_map_checksum_uplink_packet()
484 rmnet_map_v5_checksum_uplink_packet(skb, port, orig_dev); in rmnet_map_checksum_uplink_packet()
492 int rmnet_map_process_next_hdr_packet(struct sk_buff *skb, in rmnet_map_process_next_hdr_packet() argument
495 struct rmnet_priv *priv = netdev_priv(skb->dev); in rmnet_map_process_next_hdr_packet()
499 next_hdr = (struct rmnet_map_v5_csum_header *)(skb->data + in rmnet_map_process_next_hdr_packet()
508 if (unlikely(!(skb->dev->features & NETIF_F_RXCSUM))) { in rmnet_map_process_next_hdr_packet()
512 skb->ip_summed = CHECKSUM_UNNECESSARY; in rmnet_map_process_next_hdr_packet()
518 skb_pull(skb, sizeof(*next_hdr)); in rmnet_map_process_next_hdr_packet()
533 static void rmnet_send_skb(struct rmnet_port *port, struct sk_buff *skb) in rmnet_send_skb() argument
535 if (skb_needs_linearize(skb, port->dev->features)) { in rmnet_send_skb()
536 if (unlikely(__skb_linearize(skb))) { in rmnet_send_skb()
541 dev_kfree_skb_any(skb); in rmnet_send_skb()
546 dev_queue_xmit(skb); in rmnet_send_skb()
551 struct sk_buff *skb = NULL; in rmnet_map_flush_tx_packet_work() local
560 skb = port->skbagg_head; in rmnet_map_flush_tx_packet_work()
567 if (skb) in rmnet_map_flush_tx_packet_work()
568 rmnet_send_skb(port, skb); in rmnet_map_flush_tx_packet_work()
582 unsigned int rmnet_map_tx_aggregate(struct sk_buff *skb, struct rmnet_port *port, in rmnet_map_tx_aggregate() argument
586 unsigned int len = skb->len; in rmnet_map_tx_aggregate()
600 size = port->egress_agg_params.bytes - skb->len; in rmnet_map_tx_aggregate()
612 port->skbagg_head = skb_copy_expand(skb, 0, size, GFP_ATOMIC); in rmnet_map_tx_aggregate()
616 dev_kfree_skb_any(skb); in rmnet_map_tx_aggregate()
626 if (skb->len > size) { in rmnet_map_tx_aggregate()
637 port->skbagg_tail->next = skb; in rmnet_map_tx_aggregate()
639 skb_shinfo(port->skbagg_head)->frag_list = skb; in rmnet_map_tx_aggregate()
641 port->skbagg_head->len += skb->len; in rmnet_map_tx_aggregate()
642 port->skbagg_head->data_len += skb->len; in rmnet_map_tx_aggregate()
643 port->skbagg_head->truesize += skb->truesize; in rmnet_map_tx_aggregate()
644 port->skbagg_tail = skb; in rmnet_map_tx_aggregate()
671 skb->protocol = htons(ETH_P_MAP); in rmnet_map_tx_aggregate()
672 dev_queue_xmit(skb); in rmnet_map_tx_aggregate()