Lines Matching refs:icsk
227 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_measure_rcv_mss() local
228 const unsigned int lss = icsk->icsk_ack.last_seg_size; in tcp_measure_rcv_mss()
231 icsk->icsk_ack.last_seg_size = 0; in tcp_measure_rcv_mss()
237 if (len >= icsk->icsk_ack.rcv_mss) { in tcp_measure_rcv_mss()
242 if (unlikely(len != icsk->icsk_ack.rcv_mss)) { in tcp_measure_rcv_mss()
259 icsk->icsk_ack.rcv_mss = min_t(unsigned int, len, in tcp_measure_rcv_mss()
262 DO_ONCE_LITE_IF(len > icsk->icsk_ack.rcv_mss + MAX_TCP_OPTION_SPACE, in tcp_measure_rcv_mss()
276 icsk->icsk_ack.pending |= ICSK_ACK_PUSHED; in tcp_measure_rcv_mss()
297 icsk->icsk_ack.last_seg_size = len; in tcp_measure_rcv_mss()
299 icsk->icsk_ack.rcv_mss = len; in tcp_measure_rcv_mss()
303 if (icsk->icsk_ack.pending & ICSK_ACK_PUSHED) in tcp_measure_rcv_mss()
304 icsk->icsk_ack.pending |= ICSK_ACK_PUSHED2; in tcp_measure_rcv_mss()
305 icsk->icsk_ack.pending |= ICSK_ACK_PUSHED; in tcp_measure_rcv_mss()
311 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_incr_quickack() local
312 unsigned int quickacks = tcp_sk(sk)->rcv_wnd / (2 * icsk->icsk_ack.rcv_mss); in tcp_incr_quickack()
317 if (quickacks > icsk->icsk_ack.quick) in tcp_incr_quickack()
318 icsk->icsk_ack.quick = quickacks; in tcp_incr_quickack()
323 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_enter_quickack_mode() local
327 icsk->icsk_ack.ato = TCP_ATO_MIN; in tcp_enter_quickack_mode()
336 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_in_quickack_mode() local
338 return icsk->icsk_ack.dst_quick_ack || in tcp_in_quickack_mode()
339 (icsk->icsk_ack.quick && !inet_csk_in_pingpong_mode(sk)); in tcp_in_quickack_mode()
616 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_clamp_window() local
620 icsk->icsk_ack.quick = 0; in tcp_clamp_window()
811 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_save_lrcv_flowlabel() local
814 icsk->icsk_ack.lrcv_flowlabel = ntohl(ip6_flowlabel(ipv6_hdr(skb))); in tcp_save_lrcv_flowlabel()
831 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_event_data_recv() local
842 if (!icsk->icsk_ack.ato) { in tcp_event_data_recv()
847 icsk->icsk_ack.ato = TCP_ATO_MIN; in tcp_event_data_recv()
849 int m = now - icsk->icsk_ack.lrcvtime; in tcp_event_data_recv()
853 icsk->icsk_ack.ato = (icsk->icsk_ack.ato >> 1) + TCP_ATO_MIN / 2; in tcp_event_data_recv()
854 } else if (m < icsk->icsk_ack.ato) { in tcp_event_data_recv()
855 icsk->icsk_ack.ato = (icsk->icsk_ack.ato >> 1) + m; in tcp_event_data_recv()
856 if (icsk->icsk_ack.ato > icsk->icsk_rto) in tcp_event_data_recv()
857 icsk->icsk_ack.ato = icsk->icsk_rto; in tcp_event_data_recv()
858 } else if (m > icsk->icsk_rto) { in tcp_event_data_recv()
865 icsk->icsk_ack.lrcvtime = now; in tcp_event_data_recv()
2178 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_enter_loss() local
2181 bool new_recovery = icsk->icsk_ca_state < TCP_CA_Recovery; in tcp_enter_loss()
2187 if (icsk->icsk_ca_state <= TCP_CA_Disorder || in tcp_enter_loss()
2189 (icsk->icsk_ca_state == TCP_CA_Loss && !icsk->icsk_retransmits)) { in tcp_enter_loss()
2192 tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk); in tcp_enter_loss()
2204 if (icsk->icsk_ca_state <= TCP_CA_Disorder && in tcp_enter_loss()
2219 (new_recovery || icsk->icsk_retransmits) && in tcp_enter_loss()
2480 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_undo_cwnd_reduction() local
2482 tcp_snd_cwnd_set(tp, icsk->icsk_ca_ops->undo_cwnd(sk)); in tcp_undo_cwnd_reduction()
2698 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_mtup_probe_failed() local
2700 icsk->icsk_mtup.search_high = icsk->icsk_mtup.probe_size - 1; in tcp_mtup_probe_failed()
2701 icsk->icsk_mtup.probe_size = 0; in tcp_mtup_probe_failed()
2708 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_mtup_probe_success() local
2714 do_div(val, icsk->icsk_mtup.probe_size); in tcp_mtup_probe_success()
2722 icsk->icsk_mtup.search_low = icsk->icsk_mtup.probe_size; in tcp_mtup_probe_success()
2723 icsk->icsk_mtup.probe_size = 0; in tcp_mtup_probe_success()
2724 tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); in tcp_mtup_probe_success()
2740 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_non_congestion_loss_retransmit() local
2743 if (icsk->icsk_ca_state != TCP_CA_Loss) { in tcp_non_congestion_loss_retransmit()
2954 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_fastretrans_alert() local
2976 if (icsk->icsk_ca_state == TCP_CA_Open) { in tcp_fastretrans_alert()
2980 switch (icsk->icsk_ca_state) { in tcp_fastretrans_alert()
3001 switch (icsk->icsk_ca_state) { in tcp_fastretrans_alert()
3013 if (icsk->icsk_ca_state != TCP_CA_Recovery) { in tcp_fastretrans_alert()
3024 if (icsk->icsk_ca_state != TCP_CA_Loss) in tcp_fastretrans_alert()
3027 if (!(icsk->icsk_ca_state == TCP_CA_Open || in tcp_fastretrans_alert()
3039 if (icsk->icsk_ca_state <= TCP_CA_Disorder) in tcp_fastretrans_alert()
3049 if (icsk->icsk_ca_state < TCP_CA_CWR && in tcp_fastretrans_alert()
3050 icsk->icsk_mtup.probe_size && in tcp_fastretrans_alert()
3138 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_cong_avoid() local
3140 icsk->icsk_ca_ops->cong_avoid(sk, ack, acked); in tcp_cong_avoid()
3149 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_rearm_rto() local
3163 if (icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT || in tcp_rearm_rto()
3164 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) { in tcp_rearm_rto()
3229 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_clean_rtx_queue() local
3358 if (unlikely(icsk->icsk_mtup.probe_size && in tcp_clean_rtx_queue()
3389 if (icsk->icsk_ca_ops->pkts_acked) { in tcp_clean_rtx_queue()
3395 icsk->icsk_ca_ops->pkts_acked(sk, &sample); in tcp_clean_rtx_queue()
3403 icsk = inet_csk(sk); in tcp_clean_rtx_queue()
3406 tp->lost_out, icsk->icsk_ca_state); in tcp_clean_rtx_queue()
3411 tp->sacked_out, icsk->icsk_ca_state); in tcp_clean_rtx_queue()
3416 tp->retrans_out, icsk->icsk_ca_state); in tcp_clean_rtx_queue()
3426 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_ack_probe() local
3434 icsk->icsk_backoff = 0; in tcp_ack_probe()
3435 icsk->icsk_probes_tstamp = 0; in tcp_ack_probe()
3478 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_cong_control() local
3480 if (icsk->icsk_ca_ops->cong_control) { in tcp_cong_control()
3481 icsk->icsk_ca_ops->cong_control(sk, ack, flag, rs); in tcp_cong_control()
3751 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_in_ack_event() local
3753 if (icsk->icsk_ca_ops->in_ack_event) { in tcp_in_ack_event()
3764 icsk->icsk_ca_ops->in_ack_event(sk, ack_ev_flags); in tcp_in_ack_event()
3807 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_ack() local
3854 icsk->icsk_retransmits = 0; in tcp_ack()
3916 icsk->icsk_probes_out = 0; in tcp_ack()
6235 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_init_transfer() local
6239 icsk->icsk_af_ops->rebuild_header(sk); in tcp_init_transfer()
6256 if (!icsk->icsk_ca_initialized) in tcp_init_transfer()
6264 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_finish_connect() local
6268 icsk->icsk_ack.lrcvtime = tcp_jiffies32; in tcp_finish_connect()
6271 icsk->icsk_af_ops->sk_rx_dst_set(sk, skb); in tcp_finish_connect()
6383 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_rcv_synsent_state_process() local
6406 if (icsk->icsk_retransmits == 0) in tcp_rcv_synsent_state_process()
6488 tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); in tcp_rcv_synsent_state_process()
6512 READ_ONCE(icsk->icsk_accept_queue.rskq_defer_accept) || in tcp_rcv_synsent_state_process()
6589 tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); in tcp_rcv_synsent_state_process()
6679 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_rcv_state_process() local
6708 icsk->icsk_af_ops->conn_request(sk, skb); in tcp_rcv_state_process()