Lines Matching refs:icsk
29 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_clamp_rto_to_user_timeout() local
34 user_timeout = READ_ONCE(icsk->icsk_user_timeout); in tcp_clamp_rto_to_user_timeout()
36 return icsk->icsk_rto; in tcp_clamp_rto_to_user_timeout()
46 return min_t(u32, icsk->icsk_rto, msecs_to_jiffies(remaining)); in tcp_clamp_rto_to_user_timeout()
51 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_clamp_probe0_to_user_timeout() local
55 user_timeout = READ_ONCE(icsk->icsk_user_timeout); in tcp_clamp_probe0_to_user_timeout()
56 if (!user_timeout || !icsk->icsk_probes_tstamp) in tcp_clamp_probe0_to_user_timeout()
59 elapsed = tcp_jiffies32 - icsk->icsk_probes_tstamp; in tcp_clamp_probe0_to_user_timeout()
164 static void tcp_mtu_probing(struct inet_connection_sock *icsk, struct sock *sk) in tcp_mtu_probing() argument
173 if (!icsk->icsk_mtup.enabled) { in tcp_mtu_probing()
174 icsk->icsk_mtup.enabled = 1; in tcp_mtu_probing()
175 icsk->icsk_mtup.probe_timestamp = tcp_jiffies32; in tcp_mtu_probing()
177 mss = tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low) >> 1; in tcp_mtu_probing()
181 icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, mss); in tcp_mtu_probing()
183 tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); in tcp_mtu_probing()
243 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_write_timeout() local
250 if (icsk->icsk_retransmits) in tcp_write_timeout()
253 retry_until = READ_ONCE(icsk->icsk_syn_retries) ? : in tcp_write_timeout()
260 expired = icsk->icsk_retransmits >= max_retransmits; in tcp_write_timeout()
264 tcp_mtu_probing(icsk, sk); in tcp_write_timeout()
271 const bool alive = icsk->icsk_rto < TCP_RTO_MAX; in tcp_write_timeout()
283 READ_ONCE(icsk->icsk_user_timeout)); in tcp_write_timeout()
289 icsk->icsk_retransmits, in tcp_write_timeout()
290 icsk->icsk_rto, (int)expired); in tcp_write_timeout()
309 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_delack_timer_handler() local
322 if (!(icsk->icsk_ack.pending & ICSK_ACK_TIMER)) in tcp_delack_timer_handler()
325 if (time_after(icsk->icsk_ack.timeout, jiffies)) { in tcp_delack_timer_handler()
326 sk_reset_timer(sk, &icsk->icsk_delack_timer, icsk->icsk_ack.timeout); in tcp_delack_timer_handler()
329 icsk->icsk_ack.pending &= ~ICSK_ACK_TIMER; in tcp_delack_timer_handler()
334 icsk->icsk_ack.ato = min_t(u32, icsk->icsk_ack.ato << 1, icsk->icsk_rto); in tcp_delack_timer_handler()
340 icsk->icsk_ack.ato = TCP_ATO_MIN; in tcp_delack_timer_handler()
360 struct inet_connection_sock *icsk = in tcp_delack_timer() local
361 from_timer(icsk, t, icsk_delack_timer); in tcp_delack_timer()
362 struct sock *sk = &icsk->icsk_inet.sk; in tcp_delack_timer()
379 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_probe_timer() local
385 icsk->icsk_probes_out = 0; in tcp_probe_timer()
386 icsk->icsk_probes_tstamp = 0; in tcp_probe_timer()
398 if (!icsk->icsk_probes_tstamp) { in tcp_probe_timer()
399 icsk->icsk_probes_tstamp = tcp_jiffies32; in tcp_probe_timer()
401 u32 user_timeout = READ_ONCE(icsk->icsk_user_timeout); in tcp_probe_timer()
404 (s32)(tcp_jiffies32 - icsk->icsk_probes_tstamp) >= in tcp_probe_timer()
410 const bool alive = inet_csk_rto_backoff(icsk, TCP_RTO_MAX) < TCP_RTO_MAX; in tcp_probe_timer()
413 if (!alive && icsk->icsk_backoff >= max_probes) in tcp_probe_timer()
419 if (icsk->icsk_probes_out >= max_probes) { in tcp_probe_timer()
429 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_update_rto_stats() local
432 if (!icsk->icsk_retransmits) { in tcp_update_rto_stats()
436 icsk->icsk_retransmits++; in tcp_update_rto_stats()
446 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_fastopen_synack_timer() local
455 max_retries = READ_ONCE(icsk->icsk_syn_retries) ? : in tcp_fastopen_synack_timer()
463 if (icsk->icsk_retransmits == 1) in tcp_fastopen_synack_timer()
483 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_rtx_probe0_timed_out() local
484 u32 user_timeout = READ_ONCE(icsk->icsk_user_timeout); in tcp_rtx_probe0_timed_out()
502 rcv_delta = icsk->icsk_timeout - tp->rcv_tstamp; in tcp_rtx_probe0_timed_out()
524 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_retransmit_timer() local
592 if (icsk->icsk_retransmits == 0) { in tcp_retransmit_timer()
595 if (icsk->icsk_ca_state == TCP_CA_Recovery) { in tcp_retransmit_timer()
600 } else if (icsk->icsk_ca_state == TCP_CA_Loss) { in tcp_retransmit_timer()
602 } else if ((icsk->icsk_ca_state == TCP_CA_Disorder) || in tcp_retransmit_timer()
655 icsk->icsk_retransmits <= TCP_THIN_LINEAR_RETRIES) { in tcp_retransmit_timer()
656 icsk->icsk_backoff = 0; in tcp_retransmit_timer()
657 icsk->icsk_rto = clamp(__tcp_set_rto(tp), in tcp_retransmit_timer()
666 icsk->icsk_backoff++; in tcp_retransmit_timer()
667 icsk->icsk_rto = min(icsk->icsk_rto << 1, TCP_RTO_MAX); in tcp_retransmit_timer()
681 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_write_timer_handler() local
685 !icsk->icsk_pending) in tcp_write_timer_handler()
688 if (time_after(icsk->icsk_timeout, jiffies)) { in tcp_write_timer_handler()
689 sk_reset_timer(sk, &icsk->icsk_retransmit_timer, icsk->icsk_timeout); in tcp_write_timer_handler()
694 event = icsk->icsk_pending; in tcp_write_timer_handler()
704 icsk->icsk_pending = 0; in tcp_write_timer_handler()
708 icsk->icsk_pending = 0; in tcp_write_timer_handler()
716 struct inet_connection_sock *icsk = in tcp_write_timer() local
717 from_timer(icsk, t, icsk_retransmit_timer); in tcp_write_timer()
718 struct sock *sk = &icsk->icsk_inet.sk; in tcp_write_timer()
756 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_keepalive_timer() local
800 u32 user_timeout = READ_ONCE(icsk->icsk_user_timeout); in tcp_keepalive_timer()
807 icsk->icsk_probes_out > 0) || in tcp_keepalive_timer()
809 icsk->icsk_probes_out >= keepalive_probes(tp))) { in tcp_keepalive_timer()
816 icsk->icsk_probes_out++; in tcp_keepalive_timer()