| /net/ipv4/ |
| A D | tcp_input.c | 606 tp->rcv_ssthresh = min(tp->rcv_ssthresh, tp->window_clamp); in tcp_init_buffer_space() 608 tp->rcvq_space.space = min3(tp->rcv_ssthresh, tp->rcv_wnd, in tcp_init_buffer_space() 631 tp->rcv_ssthresh = min(tp->window_clamp, 2U * tp->advmss); in tcp_clamp_window() 708 tp->rcv_rtt_est.seq = tp->rcv_nxt + tp->rcv_wnd; in tcp_rcv_rtt_measure() 932 tp->rttvar_us -= (tp->rttvar_us - tp->mdev_max_us) >> 2; in tcp_rtt_estimator() 933 tp->rtt_seq = tp->snd_nxt; in tcp_rtt_estimator() 944 tp->rtt_seq = tp->snd_nxt; in tcp_rtt_estimator() 2210 tp->high_seq = tp->snd_nxt; in tcp_enter_loss() 2503 if (tp->snd_una == tp->high_seq && tcp_is_reno(tp)) { in tcp_is_non_sack_preventing_reopen() 3326 tp->snd_up = tp->snd_una; in tcp_clean_rtx_queue() [all …]
|
| A D | tcp_recovery.c | 9 if (!tp->reord_seen) { in tcp_rack_reo_wnd() 16 if (tp->sacked_out >= tp->reordering && in tcp_rack_reo_wnd() 28 return min((tcp_min_rtt(tp) >> 2) * tp->rack.reo_wnd_steps, in tcp_rack_reo_wnd() 29 tp->srtt_us >> 3); in tcp_rack_reo_wnd() 100 if (!tp->rack.advanced) in tcp_rack_mark_lost() 104 tp->rack.advanced = 0; in tcp_rack_mark_lost() 137 tp->rack.advanced = 1; in tcp_rack_advance() 138 tp->rack.rtt_us = rtt_us; in tcp_rack_advance() 153 u32 lost = tp->lost; in tcp_rack_reo_timeout() 205 tp->rack.last_delivered = tp->delivered; in tcp_rack_update_reo_wnd() [all …]
|
| A D | tcp_rate.c | 42 struct tcp_sock *tp = tcp_sk(sk); in tcp_rate_skb_sent() local 58 if (!tp->packets_out) { in tcp_rate_skb_sent() 124 if (tp->app_limited && after(tp->delivered, tp->app_limited)) in tcp_rate_gen() 125 tp->app_limited = 0; in tcp_rate_gen() 132 tp->delivered_mstamp = tp->tcp_mstamp; in tcp_rate_gen() 178 tp->rx_opt.sack_ok, tcp_min_rtt(tp)); in tcp_rate_gen() 199 tp->write_seq - tp->snd_nxt < tp->mss_cache && in tcp_rate_check_app_limited() 203 tcp_packets_in_flight(tp) < tcp_snd_cwnd(tp) && in tcp_rate_check_app_limited() 205 tp->lost_out <= tp->retrans_out) in tcp_rate_check_app_limited() 206 tp->app_limited = in tcp_rate_check_app_limited() [all …]
|
| A D | tcp_output.c | 103 ((tp->snd_nxt - tcp_wnd_end(tp)) < (1 << tp->rx_opt.rcv_wscale)))) in tcp_acceptable_seq() 273 tp->rcv_wup = tp->rcv_nxt; in tcp_select_window() 296 tp->rcv_wup = tp->rcv_nxt; in tcp_select_window() 422 return tp->snd_una != tp->snd_up; in tcp_urg_mode() 1316 tp->tcp_wstamp_ns = max(tp->tcp_wstamp_ns, tp->tcp_clock_cache); in __tcp_transmit_skb() 2993 tp->tlp_high_seq = tp->snd_nxt; in tcp_send_loss_probe() 3938 tp->rcv_ssthresh = tp->rcv_wnd; in tcp_connect_init() 3945 tp->snd_una = tp->write_seq; in tcp_connect_init() 3946 tp->snd_sml = tp->write_seq; in tcp_connect_init() 3947 tp->snd_up = tp->write_seq; in tcp_connect_init() [all …]
|
| A D | tcp.c | 650 answ = READ_ONCE(tp->write_seq) - tp->snd_una; in tcp_ioctl() 674 tp->pushed_seq = tp->write_seq; in tcp_mark_push() 679 return after(tp->write_seq, tp->pushed_seq + (tp->max_window >> 1)); in forced_push() 702 tp->snd_up = tp->write_seq; in tcp_mark_urg() 2378 inq = tp->rcv_nxt - tp->copied_seq; in tcp_inq_hint() 2843 if (unlikely(tp->urg_data) && after(tp->copied_seq, tp->urg_seq)) { in tcp_recvmsg_locked() 3343 } else if (tp->snd_nxt != tp->write_seq && in tcp_disconnect() 3356 WRITE_ONCE(tp->copied_seq, tp->rcv_nxt); in tcp_disconnect() 3373 seq = tp->write_seq + tp->max_window + 2; in tcp_disconnect() 3405 memset(&tp->rx_opt, 0, sizeof(tp->rx_opt)); in tcp_disconnect() [all …]
|
| A D | tcp_vegas.c | 162 return min(tp->snd_ssthresh, tcp_snd_cwnd(tp)); in tcp_vegas_ssthresh() 241 tcp_snd_cwnd_set(tp, min(tcp_snd_cwnd(tp), in tcp_vegas_cong_avoid() 243 tp->snd_ssthresh = tcp_vegas_ssthresh(tp); in tcp_vegas_cong_avoid() 258 tcp_snd_cwnd_set(tp, tcp_snd_cwnd(tp) - 1); in tcp_vegas_cong_avoid() 259 tp->snd_ssthresh in tcp_vegas_cong_avoid() 265 tcp_snd_cwnd_set(tp, tcp_snd_cwnd(tp) + 1); in tcp_vegas_cong_avoid() 273 if (tcp_snd_cwnd(tp) < 2) in tcp_vegas_cong_avoid() 274 tcp_snd_cwnd_set(tp, 2); in tcp_vegas_cong_avoid() 275 else if (tcp_snd_cwnd(tp) > tp->snd_cwnd_clamp) in tcp_vegas_cong_avoid() 276 tcp_snd_cwnd_set(tp, tp->snd_cwnd_clamp); in tcp_vegas_cong_avoid() [all …]
|
| A D | tcp_timer.c | 38 elapsed = tcp_time_stamp_ts(tp) - tp->retrans_stamp; in tcp_clamp_rto_to_user_timeout() 39 if (tp->tcp_usec_ts) in tcp_clamp_rto_to_user_timeout() 124 (!tp->snd_wnd && !tp->packets_out)) in tcp_out_of_resources() 232 if (tp->tcp_usec_ts) { in retransmits_timed_out() 445 tp->rto_stamp = tcp_time_stamp_ms(tp); in tcp_update_rto_stats() 448 tp->total_rto++; in tcp_update_rto_stats() 485 tp->retrans_stamp = tcp_time_stamp_ts(tp); in tcp_fastopen_synack_timer() 568 rtx_delta = tcp_time_stamp_ts(tp) - (tp->retrans_stamp ?: in tcp_retransmit_timer() 576 inet->inet_num, tp->snd_una, tp->snd_nxt, in tcp_retransmit_timer() 584 inet->inet_num, tp->snd_una, tp->snd_nxt, in tcp_retransmit_timer() [all …]
|
| A D | tcp_hybla.c | 57 tcp_snd_cwnd_set(tp, 2); in hybla_init() 58 tp->snd_cwnd_clamp = 65535; in hybla_init() 142 tp->snd_cwnd_cnt++; in hybla_cong_avoid() 146 tcp_snd_cwnd_set(tp, tcp_snd_cwnd(tp) + (increment >> 7)); in hybla_cong_avoid() 151 tcp_snd_cwnd_set(tp, tcp_snd_cwnd(tp) + 1); in hybla_cong_avoid() 153 tp->snd_cwnd_cnt = 0; in hybla_cong_avoid() 156 if (increment == 0 && odd == 0 && tp->snd_cwnd_cnt >= tcp_snd_cwnd(tp)) { in hybla_cong_avoid() 157 tcp_snd_cwnd_set(tp, tcp_snd_cwnd(tp) + 1); in hybla_cong_avoid() 158 tp->snd_cwnd_cnt = 0; in hybla_cong_avoid() 162 tcp_snd_cwnd_set(tp, min(tcp_snd_cwnd(tp), tp->snd_ssthresh)); in hybla_cong_avoid() [all …]
|
| A D | tcp_highspeed.c | 102 struct tcp_sock *tp = tcp_sk(sk); in hstcp_init() local 109 tp->snd_cwnd_clamp = min_t(u32, tp->snd_cwnd_clamp, 0xffffffff/128); in hstcp_init() 114 struct tcp_sock *tp = tcp_sk(sk); in hstcp_cong_avoid() local 120 if (tcp_in_slow_start(tp)) in hstcp_cong_avoid() 121 tcp_slow_start(tp, acked); in hstcp_cong_avoid() 140 if (tcp_snd_cwnd(tp) < tp->snd_cwnd_clamp) { in hstcp_cong_avoid() 142 tp->snd_cwnd_cnt += ca->ai + 1; in hstcp_cong_avoid() 143 if (tp->snd_cwnd_cnt >= tcp_snd_cwnd(tp)) { in hstcp_cong_avoid() 144 tp->snd_cwnd_cnt -= tcp_snd_cwnd(tp); in hstcp_cong_avoid() 145 tcp_snd_cwnd_set(tp, tcp_snd_cwnd(tp) + 1); in hstcp_cong_avoid() [all …]
|
| A D | tcp_yeah.c | 43 struct tcp_sock *tp = tcp_sk(sk); in tcp_yeah_init() local 55 tp->snd_cwnd_clamp = min_t(u32, tp->snd_cwnd_clamp, 0xffffffff/128); in tcp_yeah_init() 60 struct tcp_sock *tp = tcp_sk(sk); in tcp_yeah_cong_avoid() local 66 if (tcp_in_slow_start(tp)) { in tcp_yeah_cong_avoid() 67 acked = tcp_slow_start(tp, acked); in tcp_yeah_cong_avoid() 74 tcp_cong_avoid_ai(tp, min(tcp_snd_cwnd(tp), TCP_SCALABLE_AI_CNT), in tcp_yeah_cong_avoid() 78 tcp_cong_avoid_ai(tp, tcp_snd_cwnd(tp), acked); in tcp_yeah_cong_avoid() 133 bw = tcp_snd_cwnd(tp); in tcp_yeah_cong_avoid() 145 tcp_snd_cwnd_set(tp, tcp_snd_cwnd(tp) - reduction); in tcp_yeah_cong_avoid() 147 tcp_snd_cwnd_set(tp, max(tcp_snd_cwnd(tp), in tcp_yeah_cong_avoid() [all …]
|
| A D | tcp_veno.c | 155 if (tcp_in_slow_start(tp)) { in tcp_veno_cong_avoid() 167 tcp_cong_avoid_ai(tp, tcp_snd_cwnd(tp), acked); in tcp_veno_cong_avoid() 172 if (tp->snd_cwnd_cnt >= tcp_snd_cwnd(tp)) { in tcp_veno_cong_avoid() 174 tcp_snd_cwnd(tp) < tp->snd_cwnd_clamp) { in tcp_veno_cong_avoid() 175 tcp_snd_cwnd_set(tp, tcp_snd_cwnd(tp) + 1); in tcp_veno_cong_avoid() 179 tp->snd_cwnd_cnt = 0; in tcp_veno_cong_avoid() 181 tp->snd_cwnd_cnt += acked; in tcp_veno_cong_avoid() 184 if (tcp_snd_cwnd(tp) < 2) in tcp_veno_cong_avoid() 185 tcp_snd_cwnd_set(tp, 2); in tcp_veno_cong_avoid() 186 else if (tcp_snd_cwnd(tp) > tp->snd_cwnd_clamp) in tcp_veno_cong_avoid() [all …]
|
| A D | tcp_dctcp.c | 83 ca->next_seq = tp->snd_nxt; in dctcp_reset() 85 ca->old_delivered = tp->delivered; in dctcp_reset() 93 if (tcp_ecn_mode_any(tp) || in dctcp_init() 98 ca->prior_rcv_nxt = tp->rcv_nxt; in dctcp_init() 105 dctcp_reset(tp, ca); in dctcp_init() 121 struct tcp_sock *tp = tcp_sk(sk); in dctcp_ssthresh() local 123 ca->loss_cwnd = tcp_snd_cwnd(tp); in dctcp_ssthresh() 124 return max(tcp_snd_cwnd(tp) - ((tcp_snd_cwnd(tp) * ca->dctcp_alpha) >> 11U), 2U); in dctcp_ssthresh() 170 dctcp_reset(tp, ca); in dctcp_update_alpha() 177 struct tcp_sock *tp = tcp_sk(sk); in dctcp_react_to_loss() local [all …]
|
| A D | tcp_nv.c | 125 struct tcp_sock *tp = tcp_sk(sk); in tcpnv_reset() local 132 ca->nv_rtt_start_seq = tp->snd_una; in tcpnv_reset() 134 ca->nv_last_snd_una = tp->snd_una; in tcpnv_reset() 182 struct tcp_sock *tp = tcp_sk(sk); in tcpnv_cong_avoid() local 193 if (tcp_in_slow_start(tp)) { in tcpnv_cong_avoid() 242 struct tcp_sock *tp = tcp_sk(sk); in tcpnv_acked() local 266 ca->nv_last_snd_una = tp->snd_una; in tcpnv_acked() 367 80000ULL * tp->mss_cache); in tcpnv_acked() 399 tp->snd_ssthresh = in tcpnv_acked() 407 tcp_snd_cwnd_set(tp, tcp_snd_cwnd(tp) - dec); in tcpnv_acked() [all …]
|
| A D | tcp_bbr.c | 268 struct tcp_sock *tp = tcp_sk(sk); in bbr_init_pacing_rate_from_rtt() local 288 struct tcp_sock *tp = tcp_sk(sk); in bbr_set_pacing_rate() local 306 struct tcp_sock *tp = tcp_sk(sk); in bbr_tso_segs_goal() local 444 now_ns = tp->tcp_clock_cache; in bbr_packets_in_net_at_edt() 486 u32 cwnd = tcp_snd_cwnd(tp); in bbr_set_cwnd_to_recover_or_restore() 548 tcp_snd_cwnd_set(tp, min(cwnd, tp->snd_cwnd_clamp)); /* apply global cap */ in bbr_set_cwnd() 550 tcp_snd_cwnd_set(tp, min(tcp_snd_cwnd(tp), bbr_cwnd_min_target)); in bbr_set_cwnd() 642 bbr->lt_last_lost = tp->lost; in bbr_reset_lt_bw_sampling_interval() 918 tcp_snd_cwnd_set(tp, max(tcp_snd_cwnd(tp), bbr->prior_cwnd)); in bbr_check_probe_rtt_done() 966 tp->app_limited = in bbr_update_min_rtt() [all …]
|
| A D | tcp_cdg.c | 150 u32 now_us = tp->tcp_mstamp; in tcp_cdg_hystart_update() 164 tcp_snd_cwnd(tp)); in tcp_cdg_hystart_update() 165 tp->snd_ssthresh = tcp_snd_cwnd(tp); in tcp_cdg_hystart_update() 183 tcp_snd_cwnd(tp)); in tcp_cdg_hystart_update() 184 tp->snd_ssthresh = tcp_snd_cwnd(tp); in tcp_cdg_hystart_update() 277 ca->rtt_seq = tp->snd_nxt; in tcp_cdg_cong_avoid() 311 if (tp->sacked_out == 0) { in tcp_cdg_acked() 337 return tcp_snd_cwnd(tp); in tcp_cdg_ssthresh() 359 ca->rtt_seq = tp->snd_nxt; in tcp_cdg_cwnd_event() 364 ca->rtt_seq = tp->snd_nxt; in tcp_cdg_cwnd_event() [all …]
|
| A D | tcp_cong.c | 457 u32 cwnd = min(tcp_snd_cwnd(tp) + acked, tp->snd_ssthresh); in tcp_slow_start() 460 tcp_snd_cwnd_set(tp, min(cwnd, tp->snd_cwnd_clamp)); in tcp_slow_start() 472 if (tp->snd_cwnd_cnt >= w) { in tcp_cong_avoid_ai() 473 tp->snd_cwnd_cnt = 0; in tcp_cong_avoid_ai() 474 tcp_snd_cwnd_set(tp, tcp_snd_cwnd(tp) + 1); in tcp_cong_avoid_ai() 477 tp->snd_cwnd_cnt += acked; in tcp_cong_avoid_ai() 478 if (tp->snd_cwnd_cnt >= w) { in tcp_cong_avoid_ai() 482 tcp_snd_cwnd_set(tp, tcp_snd_cwnd(tp) + delta); in tcp_cong_avoid_ai() 484 tcp_snd_cwnd_set(tp, min(tcp_snd_cwnd(tp), tp->snd_cwnd_clamp)); in tcp_cong_avoid_ai() 509 tcp_cong_avoid_ai(tp, tcp_snd_cwnd(tp), acked); in tcp_reno_cong_avoid() [all …]
|
| A D | tcp_fastopen.c | 189 tp->segs_in = 0; in tcp_fastopen_add_skb() 190 tcp_segs_in(tp, skb); in tcp_fastopen_add_skb() 200 tp->syn_data_acked = 1; in tcp_fastopen_add_skb() 244 struct tcp_sock *tp; in tcp_fastopen_create_child() local 262 tp = tcp_sk(child); in tcp_fastopen_create_child() 271 tp->max_window = tp->snd_wnd; in tcp_fastopen_create_child() 293 tp->rcv_wup = tp->rcv_nxt; in tcp_fastopen_create_child() 457 if (tp->fastopen_connect && !tp->fastopen_req) { in tcp_fastopen_defer_connect() 466 tp->fastopen_req = kzalloc(sizeof(*tp->fastopen_req), in tcp_fastopen_defer_connect() 468 if (tp->fastopen_req) in tcp_fastopen_defer_connect() [all …]
|
| A D | tcp_lp.c | 139 struct tcp_sock *tp = tcp_sk(sk); in tcp_lp_remote_hz_estimator() local 151 tp->rx_opt.rcv_tsecr == lp->local_ref_time) in tcp_lp_remote_hz_estimator() 156 (tp->rx_opt.rcv_tsecr - lp->local_ref_time); in tcp_lp_remote_hz_estimator() 174 lp->remote_ref_time = tp->rx_opt.rcv_tsval; in tcp_lp_remote_hz_estimator() 175 lp->local_ref_time = tp->rx_opt.rcv_tsecr; in tcp_lp_remote_hz_estimator() 193 struct tcp_sock *tp = tcp_sk(sk); in tcp_lp_owd_calculator() local 273 struct tcp_sock *tp = tcp_sk(sk); in tcp_lp_pkts_acked() local 275 u32 now = tcp_time_stamp_ts(tp); in tcp_lp_pkts_acked() 282 delta = now - tp->rx_opt.rcv_tsecr; in tcp_lp_pkts_acked() 316 tcp_snd_cwnd_set(tp, 1U); in tcp_lp_pkts_acked() [all …]
|
| /net/sched/ |
| A D | cls_api.c | 94 n->tp = tp; in tcf_exts_miss_cookie_base_alloc() 393 tp->usesw = !tp->ops->reoffload; in tcf_proto_create() 397 err = tp->ops->init(tp); in tcf_proto_create() 1126 for (; tp; tp = tcf_chain_dereference(tp->next, chain)) in __tcf_get_next_proto() 1728 for (; tp; tp = rcu_dereference_bh(tp->next)) { in __tcf_classify() 1743 if (unlikely(n->tp != tp || n->tp->chain != n->chain || in __tcf_classify() 2215 tp; tp = tcf_get_next_proto(chain, tp)) in tfilter_notify_chain() 2223 tp->ops->put(tp, fh); in tfilter_put() 2440 if (tp && !IS_ERR(tp)) in tc_new_tfilter() 2608 if (tp && !IS_ERR(tp)) in tc_del_tfilter() [all …]
|
| A D | cls_matchall.c | 29 const struct tcf_proto *tp, in mall_classify() argument 45 static int mall_init(struct tcf_proto *tp) in mall_init() argument 74 struct tcf_block *block = tp->chain->block; in mall_destroy_hw_filter() 90 struct tcf_block *block = tp->chain->block; in mall_replace_hw_filter() 135 tcf_unbind_filter(tp, &head->res); in mall_destroy() 217 tcf_bind_filter(tp, &new->res, base); in mall_change() 231 tcf_proto_update_usesw(tp, new->flags); in mall_change() 234 rcu_assign_pointer(tp->root, new); in mall_change() 239 tcf_unbind_filter(tp, &new->res); in mall_change() 269 if (arg->fn(tp, head, arg) < 0) in mall_walk() [all …]
|
| A D | cls_bpf.c | 50 struct tcf_proto *tp; member 82 const struct tcf_proto *tp, in cls_bpf_classify() argument 253 rcu_assign_pointer(tp->root, head); in cls_bpf_init() 294 cls_bpf_stop_offload(tp, prog, extack); in __cls_bpf_delete() 296 tcf_unbind_filter(tp, &prog->res); in __cls_bpf_delete() 308 __cls_bpf_delete(tp, arg, extack); in cls_bpf_delete() 320 __cls_bpf_delete(tp, prog, extack); in cls_bpf_destroy() 501 tcf_bind_filter(tp, &prog->res, base); in cls_bpf_change() 517 tcf_unbind_filter(tp, &oldprog->res); in cls_bpf_change() 529 tcf_unbind_filter(tp, &prog->res); in cls_bpf_change() [all …]
|
| A D | cls_u32.c | 115 const struct tcf_proto *tp, in u32_classify() argument 304 ht = rtnl_dereference(tp->root); in u32_get() 365 void *key = tc_u_common_ptr(tp); in u32_init() 374 root_ht->prio = tp->prio; in u32_init() 399 tp->data = tp_c; in u32_init() 616 u32_clear_hnode(tp, ht, extack); in u32_destroy_hnode() 666 tp->data = NULL; in u32_destroy() 998 ht->prio = tp->prio; in u32_change() 1185 u32_unbind_filter(tp, n, tb); in u32_change() 1217 if (ht->prio != tp->prio) in u32_walk() [all …]
|
| A D | cls_cgroup.c | 22 struct tcf_proto *tp; member 27 const struct tcf_proto *tp, in cls_cgroup_classify() argument 30 struct cls_cgroup_head *head = rcu_dereference_bh(tp->root); in cls_cgroup_classify() 46 static void *cls_cgroup_get(struct tcf_proto *tp, u32 handle) in cls_cgroup_get() argument 51 static int cls_cgroup_init(struct tcf_proto *tp) in cls_cgroup_init() argument 79 struct tcf_proto *tp, unsigned long base, in cls_cgroup_change() argument 85 struct cls_cgroup_head *head = rtnl_dereference(tp->root); in cls_cgroup_change() 106 new->tp = tp; in cls_cgroup_change() 122 rcu_assign_pointer(tp->root, new); in cls_cgroup_change() 137 struct cls_cgroup_head *head = rtnl_dereference(tp->root); in cls_cgroup_destroy() [all …]
|
| A D | cls_basic.c | 34 struct tcf_proto *tp; member 41 const struct tcf_proto *tp, in basic_classify() argument 64 struct basic_head *head = rtnl_dereference(tp->root); in basic_get() 76 static int basic_init(struct tcf_proto *tp) in basic_init() argument 85 rcu_assign_pointer(tp->root, head); in basic_init() 116 tcf_unbind_filter(tp, &f->res); in basic_destroy() 134 tcf_unbind_filter(tp, &f->res); in basic_delete() 165 tcf_bind_filter(tp, &f->res, base); in basic_set_parms() 168 f->tp = tp; in basic_set_parms() 234 tcf_unbind_filter(tp, &fold->res); in basic_change() [all …]
|
| A D | cls_fw.c | 40 struct tcf_proto *tp; member 52 const struct tcf_proto *tp, in fw_classify() argument 107 static int fw_init(struct tcf_proto *tp) in fw_init() argument 146 tcf_unbind_filter(tp, &f->res); in fw_destroy() 175 tcf_unbind_filter(tp, &f->res); in fw_delete() 233 tcf_bind_filter(tp, &f->res, base); in fw_set_parms() 271 fnew->tp = f->tp; in fw_change() 295 tcf_unbind_filter(tp, &f->res); in fw_change() 316 rcu_assign_pointer(tp->root, head); in fw_change() 327 f->tp = tp; in fw_change() [all …]
|