| /net/ipv4/ |
| A D | tcp_offload.c | 23 if (before(ts_seq, seq + mss)) { in tcp_gso_tstamp() 30 seq += mss; in tcp_gso_tstamp() 141 unsigned int mss; in tcp_gso_segment() local 163 if (unlikely(skb->len <= mss)) in tcp_gso_segment() 216 seq += mss; in tcp_gso_segment() 321 unsigned int mss = 1; in tcp_gro_receive() local 343 mss = skb_shinfo(p)->gso_size; in tcp_gro_receive() 352 flush |= (len - 1) >= mss; in tcp_gro_receive() 365 mss = 1; in tcp_gro_receive() 371 mss = 1; in tcp_gro_receive() [all …]
|
| A D | syncookies.c | 158 const __u16 mss = *mssp; in __cookie_v4_init_sequence() local 161 if (mss >= msstab[mssind]) in __cookie_v4_init_sequence() 320 int mss, u32 tsoff) in cookie_tcp_reqsk_alloc() argument 342 req->mss = mss; in cookie_tcp_reqsk_alloc() 362 int mss; in cookie_tcp_check() local 367 mss = __cookie_v4_check(ip_hdr(skb), tcp_hdr(skb)); in cookie_tcp_check() 368 if (!mss) { in cookie_tcp_check() 390 &tcp_opt, mss, tsoff); in cookie_tcp_check() 475 tcp_select_initial_window(sk, full_space, req->mss, in cookie_v4_check()
|
| A D | tcp_recovery.c | 225 u32 mss; in tcp_newreno_mark_lost() local 230 mss = tcp_skb_mss(skb); in tcp_newreno_mark_lost() 231 if (tcp_skb_pcount(skb) > 1 && skb->len > mss) in tcp_newreno_mark_lost() 233 mss, mss, GFP_ATOMIC); in tcp_newreno_mark_lost()
|
| A D | tcp_timer.c | 167 int mss; in tcp_mtu_probing() local 177 mss = tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low) >> 1; in tcp_mtu_probing() 178 mss = min(READ_ONCE(net->ipv4.sysctl_tcp_base_mss), mss); in tcp_mtu_probing() 179 mss = max(mss, READ_ONCE(net->ipv4.sysctl_tcp_mtu_probe_floor)); in tcp_mtu_probing() 180 mss = max(mss, READ_ONCE(net->ipv4.sysctl_tcp_min_snd_mss)); in tcp_mtu_probing() 181 icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, mss); in tcp_mtu_probing()
|
| A D | tcp_metrics.c | 29 u16 mss; member 134 tm->tcpm_fastopen.mss = 0; in tcpm_suck_dst() 561 void tcp_fastopen_cache_get(struct sock *sk, u16 *mss, in tcp_fastopen_cache_get() argument 574 if (tfom->mss) in tcp_fastopen_cache_get() 575 *mss = tfom->mss; in tcp_fastopen_cache_get() 584 void tcp_fastopen_cache_set(struct sock *sk, u16 mss, in tcp_fastopen_cache_set() argument 599 if (mss) in tcp_fastopen_cache_set() 600 tfom->mss = mss; in tcp_fastopen_cache_set() 721 if (tfom->mss && in tcp_metrics_fill_info() 723 tfom->mss) < 0) in tcp_metrics_fill_info()
|
| A D | tcp_output.c | 127 int mss = tp->advmss; in tcp_advertise_mss() local 132 if (metric < mss) { in tcp_advertise_mss() 133 mss = metric; in tcp_advertise_mss() 134 tp->advmss = mss; in tcp_advertise_mss() 138 return (__u16)mss; in tcp_advertise_mss() 221 if (space > mss) in tcp_select_initial_window() 934 opts->mss = mss; in tcp_synack_options() 1797 return mss + in tcp_mss_to_mtu() 2977 (pcount - 1) * mss, mss, in tcp_send_loss_probe() 3108 if (mss <= 0) in __tcp_select_window() [all …]
|
| A D | udp_offload.c | 466 unsigned int mss = skb_shinfo(skb)->gso_size; in __udp_gso_segment_list() local 472 udp_hdr(skb)->len = htons(sizeof(struct udphdr) + mss); in __udp_gso_segment_list() 487 unsigned int mss; in __udp_gso_segment() local 493 mss = skb_shinfo(gso_skb)->gso_size; in __udp_gso_segment() 494 if (gso_skb->len <= sizeof(*uh) + mss) in __udp_gso_segment() 511 mss); in __udp_gso_segment() 563 mss *= skb_shinfo(segs)->gso_segs; in __udp_gso_segment() 574 newlen = htons(sizeof(*uh) + mss); in __udp_gso_segment() 641 unsigned int mss; in udp4_ufo_fragment() local 662 mss = skb_shinfo(skb)->gso_size; in udp4_ufo_fragment() [all …]
|
| A D | tcp_input.c | 1099 if ((metric > tp->reordering * mss) && mss) { in tcp_check_sack_reordering() 1108 tp->reordering = min_t(u32, (metric + mss - 1) / mss, in tcp_check_sack_reordering() 1372 unsigned int new_len = (pkt_len / mss) * mss; in tcp_match_skb_to_sack() 1585 int mss; in tcp_shift_skb_data() local 2761 int mss; in tcp_simple_retransmit() local 2774 mss = -1; in tcp_simple_retransmit() 4035 u16 mss = 0; in tcp_parse_mss_option() local 4068 return mss; in tcp_parse_mss_option() 7106 u16 mss; in tcp_get_syncookie_mss() local 7121 if (!mss) in tcp_get_syncookie_mss() [all …]
|
| A D | tcp_minisocks.c | 435 int mss; in tcp_openreq_init_rwin() local 437 mss = tcp_mss_clamp(tp, dst_metric_advmss(dst)); in tcp_openreq_init_rwin() 450 else if (full_space < rcv_wnd * mss) in tcp_openreq_init_rwin() 451 full_space = rcv_wnd * mss; in tcp_openreq_init_rwin() 455 mss - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0), in tcp_openreq_init_rwin() 633 newtp->rx_opt.mss_clamp = req->mss; in tcp_create_openreq_child()
|
| A D | tcp_fastopen.c | 419 bool tcp_fastopen_cookie_check(struct sock *sk, u16 *mss, in tcp_fastopen_cookie_check() argument 424 tcp_fastopen_cache_get(sk, mss, cookie); in tcp_fastopen_cookie_check() 455 u16 mss; in tcp_fastopen_defer_connect() local 458 if (tcp_fastopen_cookie_check(sk, &mss, &cookie)) { in tcp_fastopen_defer_connect()
|
| A D | tcp_bbr.c | 246 unsigned int mss = tcp_sk(sk)->mss_cache; in bbr_rate_bytes_per_sec() local 248 rate *= mss; in bbr_rate_bytes_per_sec()
|
| A D | tcp_ipv4.c | 1883 u16 mss = 0; local 1885 mss = tcp_get_syncookie_mss(&tcp_request_sock_ops, 1887 if (mss) { 1888 *cookie = __cookie_v4_init_sequence(iph, th, &mss); 1892 return mss;
|
| /net/ipv6/ |
| A D | syncookies.c | 96 const __u16 mss = *mssp; in __cookie_v6_init_sequence() local 99 if (mss >= msstab[mssind]) in __cookie_v6_init_sequence() 135 int mss; in cookie_tcp_check() local 140 mss = __cookie_v6_check(ipv6_hdr(skb), tcp_hdr(skb)); in cookie_tcp_check() 141 if (!mss) { in cookie_tcp_check() 163 &tcp_opt, mss, tsoff); in cookie_tcp_check() 256 tcp_select_initial_window(sk, full_space, req->mss, in cookie_v6_check()
|
| A D | udp_offload.c | 23 unsigned int mss; in udp6_ufo_fragment() local 49 mss = skb_shinfo(skb)->gso_size; in udp6_ufo_fragment() 50 if (unlikely(skb->len <= mss)) in udp6_ufo_fragment()
|
| A D | tcp_ipv6.c | 1314 u16 mss = 0; local 1316 mss = tcp_get_syncookie_mss(&tcp6_request_sock_ops, 1318 if (mss) { 1319 *cookie = __cookie_v6_init_sequence(iph, th, &mss); 1323 return mss;
|
| /net/netfilter/ |
| A D | nfnetlink_osf.c | 73 u16 mss = 0; in nf_osf_match_one() local 106 mss = ctx->optp[3]; in nf_osf_match_one() 107 mss <<= 8; in nf_osf_match_one() 108 mss |= ctx->optp[2]; in nf_osf_match_one() 110 mss = ntohs((__force __be16)mss); in nf_osf_match_one() 141 if (ctx->window == f->wss.val * mss || in nf_osf_match_one() 147 if (ctx->window == f->wss.val * (mss + 40) || in nf_osf_match_one()
|
| A D | nf_synproxy_core.c | 455 u16 mss = opts->mss_encode; in synproxy_send_client_synack() local 472 nth->seq = htonl(__cookie_v4_init_sequence(iph, th, &mss)); in synproxy_send_client_synack() 618 int mss; in synproxy_recv_client_ack() local 620 mss = __cookie_v4_check(ip_hdr(skb), th); in synproxy_recv_client_ack() 621 if (mss == 0) { in synproxy_recv_client_ack() 627 opts->mss_option = mss; in synproxy_recv_client_ack() 868 u16 mss = opts->mss_encode; in synproxy_send_client_synack_ipv6() local 1035 int mss; in synproxy_recv_client_ack_ipv6() local 1037 mss = nf_cookie_v6_check(ipv6_hdr(skb), th); in synproxy_recv_client_ack_ipv6() 1038 if (mss == 0) { in synproxy_recv_client_ack_ipv6() [all …]
|
| A D | nft_synproxy.c | 35 opts->mss_option = info->mss; in nft_synproxy_tcp_options() 161 priv->info.mss = ntohs(nla_get_be16(tb[NFTA_SYNPROXY_MSS])); in nft_synproxy_do_init() 230 if (nla_put_be16(skb, NFTA_SYNPROXY_MSS, htons(priv->info.mss)) || in nft_synproxy_do_dump()
|
| /net/batman-adv/ |
| A D | tp_meter.c | 150 tp_vars->cwnd = batadv_tp_cwnd(tp_vars->cwnd, mss, mss); in batadv_tp_update_cwnd() 157 ((mss * mss) << 6) / (tp_vars->cwnd << 3)); in batadv_tp_update_cwnd() 158 if (tp_vars->dec_cwnd < (mss << 3)) { in batadv_tp_update_cwnd() 163 tp_vars->cwnd = batadv_tp_cwnd(tp_vars->cwnd, mss, mss); in batadv_tp_update_cwnd() 635 size_t packet_len, mss; in batadv_tp_recv_ack() local 639 mss = BATADV_TP_PLEN; in batadv_tp_recv_ack() 705 mss); in batadv_tp_recv_ack() 731 mss, mss); in batadv_tp_recv_ack() 739 mss); in batadv_tp_recv_ack() 745 if (recv_ack - atomic_read(&tp_vars->last_acked) >= mss) in batadv_tp_recv_ack() [all …]
|
| /net/tipc/ |
| A D | msg.c | 206 int mss, struct sk_buff_head *txq) in tipc_msg_append() argument 218 if (!skb || skb->len >= mss) { in tipc_msg_append() 219 skb = tipc_buf_acquire(mss, GFP_KERNEL); in tipc_msg_append() 234 cpy = min_t(size_t, rem, mss - mlen); in tipc_msg_append() 520 bool tipc_msg_try_bundle(struct sk_buff *tskb, struct sk_buff **skb, u32 mss, in tipc_msg_try_bundle() argument 534 if (mss <= INT_H_SIZE + msg_size(msg)) in tipc_msg_try_bundle() 549 if (unlikely(mss < BUF_ALIGN(INT_H_SIZE + tsz) + msg_size(msg))) in tipc_msg_try_bundle() 551 if (unlikely(pskb_expand_head(tskb, INT_H_SIZE, mss - tsz - INT_H_SIZE, in tipc_msg_try_bundle() 565 if (likely(tipc_msg_bundle(tskb, msg, mss))) { in tipc_msg_try_bundle()
|
| A D | msg.h | 1166 bool tipc_msg_try_bundle(struct sk_buff *tskb, struct sk_buff **skb, u32 mss, 1174 int mss, struct sk_buff_head *txq);
|
| /net/ipv4/netfilter/ |
| A D | ipt_SYNPROXY.c | 40 opts.mss_option = info->mss; in synproxy_tg4()
|
| /net/ipv6/netfilter/ |
| A D | ip6t_SYNPROXY.c | 40 opts.mss_option = info->mss; in synproxy_tg6()
|
| /net/core/ |
| A D | skbuff.c | 4698 unsigned int mss = skb_shinfo(head_skb)->gso_size; in skb_segment() local 4714 mss != GSO_BY_FRAGS && mss != skb_headlen(head_skb)) { in skb_segment() 4742 if (sg && csum && (mss != GSO_BY_FRAGS)) { in skb_segment() 4778 partial_segs = min(len, GSO_BY_FRAGS - 1) / mss; in skb_segment() 4780 mss *= partial_segs; in skb_segment() 4802 if (unlikely(mss == GSO_BY_FRAGS)) { in skb_segment() 4806 if (len > mss) in skb_segment() 4807 len = mss; in skb_segment() 4946 pos, mss); in skb_segment()
|
| A D | filter.c | 7528 u16 mss; in BPF_CALL_5() local 7553 mss = tcp_v4_get_syncookie(sk, iph, th, &cookie); in BPF_CALL_5() 7571 if (mss == 0) in BPF_CALL_5() 7574 return cookie | ((u64)mss << 32); in BPF_CALL_5() 7897 u16 mss; in BPF_CALL_3() local 7905 return cookie | ((u64)mss << 32); in BPF_CALL_3() 7926 u16 mss; in BPF_CALL_3() local 7931 mss = tcp_parse_mss_option(th, 0) ?: mss_clamp; in BPF_CALL_3() 7934 return cookie | ((u64)mss << 32); in BPF_CALL_3() 12090 if (attrs->mss < min_mss) in bpf_sk_assign_tcp_reqsk() [all …]
|