| /linux/net/dccp/ |
| A D | timer.c | 37 if (icsk->icsk_retransmits != 0) in dccp_write_timeout() 98 if (icsk->icsk_retransmits == 0) in dccp_retransmit_timer() 107 icsk->icsk_retransmits = 1; in dccp_retransmit_timer() 109 min(icsk->icsk_rto, in dccp_retransmit_timer() 115 icsk->icsk_backoff++; in dccp_retransmit_timer() 117 icsk->icsk_rto = min(icsk->icsk_rto << 1, DCCP_RTO_MAX); in dccp_retransmit_timer() 144 icsk->icsk_timeout); in dccp_write_timer() 148 event = icsk->icsk_pending; in dccp_write_timer() 149 icsk->icsk_pending = 0; in dccp_write_timer() 199 icsk->icsk_ack.ato = min_t(u32, icsk->icsk_ack.ato << 1, in dccp_delack_timer() [all …]
|
| A D | output.c | 73 if (icsk->icsk_retransmits == 0) in dccp_transmit_skb() 131 icsk->icsk_af_ops->send_check(sk, skb); in dccp_transmit_skb() 163 struct inet_connection_sock *icsk = inet_csk(sk); in dccp_sync_mss() local 169 cur_mps -= (icsk->icsk_af_ops->net_header_len + icsk->icsk_ext_hdr_len + in dccp_sync_mss() 189 icsk->icsk_pmtu_cookie = pmtu; in dccp_sync_mss() 573 icsk->icsk_retransmits = 0; in dccp_connect() 575 icsk->icsk_rto, DCCP_RTO_MAX); in dccp_connect() 619 if (icsk->icsk_ack.pending & ICSK_ACK_TIMER) { 625 if (icsk->icsk_ack.blocked) { 631 timeout = icsk->icsk_ack.timeout; [all …]
|
| A D | diag.c | 19 const struct inet_connection_sock *icsk = inet_csk(sk); in dccp_get_info() local 24 info->tcpi_retransmits = icsk->icsk_retransmits; in dccp_get_info() 25 info->tcpi_probes = icsk->icsk_probes_out; in dccp_get_info() 26 info->tcpi_backoff = icsk->icsk_backoff; in dccp_get_info() 27 info->tcpi_pmtu = icsk->icsk_pmtu_cookie; in dccp_get_info()
|
| A D | minisocks.c | 40 const struct inet_connection_sock *icsk = inet_csk(sk); in dccp_time_wait() local 41 const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1); in dccp_time_wait()
|
| A D | input.c | 401 const struct inet_connection_sock *icsk = inet_csk(sk); in dccp_rcv_request_sent_state_process() local 443 dccp_sync_mss(sk, icsk->icsk_pmtu_cookie); in dccp_rcv_request_sent_state_process() 472 icsk->icsk_af_ops->rebuild_header(sk); in dccp_rcv_request_sent_state_process() 480 icsk->icsk_accept_queue.rskq_defer_accept) { in dccp_rcv_request_sent_state_process()
|
| /linux/net/ipv4/ |
| A D | tcp_timer.c | 36 return icsk->icsk_rto; in tcp_clamp_rto_to_user_timeout() 326 sk_reset_timer(sk, &icsk->icsk_delack_timer, icsk->icsk_ack.timeout); in tcp_delack_timer_handler() 334 icsk->icsk_ack.ato = min_t(u32, icsk->icsk_ack.ato << 1, icsk->icsk_rto); in tcp_delack_timer_handler() 436 icsk->icsk_retransmits++; in tcp_update_rto_stats() 656 icsk->icsk_backoff = 0; in tcp_retransmit_timer() 666 icsk->icsk_backoff++; in tcp_retransmit_timer() 667 icsk->icsk_rto = min(icsk->icsk_rto << 1, TCP_RTO_MAX); in tcp_retransmit_timer() 685 !icsk->icsk_pending) in tcp_write_timer_handler() 689 sk_reset_timer(sk, &icsk->icsk_retransmit_timer, icsk->icsk_timeout); in tcp_write_timer_handler() 704 icsk->icsk_pending = 0; in tcp_write_timer_handler() [all …]
|
| A D | tcp_ulp.c | 106 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_update_ulp() local 108 if (icsk->icsk_ulp_ops->update) in tcp_update_ulp() 114 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_cleanup_ulp() local 120 if (!icsk->icsk_ulp_ops) in tcp_cleanup_ulp() 123 if (icsk->icsk_ulp_ops->release) in tcp_cleanup_ulp() 124 icsk->icsk_ulp_ops->release(sk); in tcp_cleanup_ulp() 125 module_put(icsk->icsk_ulp_ops->owner); in tcp_cleanup_ulp() 127 icsk->icsk_ulp_ops = NULL; in tcp_cleanup_ulp() 132 struct inet_connection_sock *icsk = inet_csk(sk); in __tcp_set_ulp() local 136 if (icsk->icsk_ulp_ops) in __tcp_set_ulp() [all …]
|
| A D | tcp_cong.c | 43 if (icsk->icsk_ca_ops->set_state) in tcp_set_ca_state() 45 icsk->icsk_ca_state = ca_state; in tcp_set_ca_state() 225 icsk->icsk_ca_ops = ca; in tcp_assign_congestion_control() 228 memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv)); in tcp_assign_congestion_control() 240 if (icsk->icsk_ca_ops->init) in tcp_init_congestion_control() 241 icsk->icsk_ca_ops->init(sk); in tcp_init_congestion_control() 255 icsk->icsk_ca_ops = ca; in tcp_reinit_congestion_control() 256 icsk->icsk_ca_setsockopt = 1; in tcp_reinit_congestion_control() 257 memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv)); in tcp_reinit_congestion_control() 275 bpf_module_put(icsk->icsk_ca_ops, icsk->icsk_ca_ops->owner); in tcp_cleanup_congestion_control() [all …]
|
| A D | tcp_output.c | 177 if ((u32)(now - icsk->icsk_ack.lrcvtime) < icsk->icsk_ack.ato) in tcp_event_data_sent() 2461 interval = icsk->icsk_mtup.search_high - icsk->icsk_mtup.search_low; in tcp_mtu_probe() 3117 icsk->icsk_ack.quick = 0; in __tcp_select_window() 3176 icsk->icsk_ack.quick = 0; in __tcp_select_window() 3867 bpf_module_put(icsk->icsk_ca_ops, icsk->icsk_ca_ops->owner); in tcp_ca_dst_init() 3869 icsk->icsk_ca_ops = ca; in tcp_ca_dst_init() 4250 icsk->icsk_ack.retry++; in __tcp_send_ack() 4379 icsk->icsk_probes_out = 0; in tcp_send_probe0() 4380 icsk->icsk_backoff = 0; in tcp_send_probe0() 4385 icsk->icsk_probes_out++; in tcp_send_probe0() [all …]
|
| A D | inet_connection_sock.c | 770 icsk->icsk_pending = icsk->icsk_ack.pending = 0; in inet_csk_init_xmit_timers() 778 icsk->icsk_pending = icsk->icsk_ack.pending = 0; in inet_csk_clear_xmit_timers() 780 sk_stop_timer(sk, &icsk->icsk_retransmit_timer); in inet_csk_clear_xmit_timers() 781 sk_stop_timer(sk, &icsk->icsk_delack_timer); in inet_csk_clear_xmit_timers() 793 icsk->icsk_pending = icsk->icsk_ack.pending = 0; in inet_csk_clear_xmit_timers_sync() 1087 struct inet_connection_sock *icsk; in reqsk_timer_handler() local 1116 icsk = inet_csk(sk_listener); in reqsk_timer_handler() 1137 queue = &icsk->icsk_accept_queue; in reqsk_timer_handler() 1231 if (!icsk->icsk_ulp_ops) in inet_clone_ulp() 1334 if (icsk->icsk_ulp_ops && !icsk->icsk_ulp_ops->clone) in inet_ulp_can_listen() [all …]
|
| A D | tcp_input.c | 601 icsk->icsk_ack.quick = 0; in tcp_clamp_window() 828 if (!icsk->icsk_ack.ato) { in tcp_event_data_recv() 839 icsk->icsk_ack.ato = (icsk->icsk_ack.ato >> 1) + TCP_ATO_MIN / 2; in tcp_event_data_recv() 841 icsk->icsk_ack.ato = (icsk->icsk_ack.ato >> 1) + m; in tcp_event_data_recv() 842 if (icsk->icsk_ack.ato > icsk->icsk_rto) in tcp_event_data_recv() 843 icsk->icsk_ack.ato = icsk->icsk_rto; in tcp_event_data_recv() 2203 (icsk->icsk_ca_state == TCP_CA_Loss && !icsk->icsk_retransmits)) { in tcp_enter_loss() 2795 icsk->icsk_mtup.search_high = icsk->icsk_mtup.probe_size - 1; in tcp_mtup_probe_failed() 2817 icsk->icsk_mtup.search_low = icsk->icsk_mtup.probe_size; in tcp_mtup_probe_success() 3525 icsk = inet_csk(sk); in tcp_clean_rtx_queue() [all …]
|
| A D | inet_timewait_sock.c | 113 const struct inet_connection_sock *icsk = inet_csk(sk); in inet_twsk_hashdance_schedule() local 130 tw->tw_tb = icsk->icsk_bind_hash; in inet_twsk_hashdance_schedule() 131 WARN_ON(!icsk->icsk_bind_hash); in inet_twsk_hashdance_schedule() 133 tw->tw_tb2 = icsk->icsk_bind2_hash; in inet_twsk_hashdance_schedule() 134 WARN_ON(!icsk->icsk_bind2_hash); in inet_twsk_hashdance_schedule()
|
| A D | tcp_htcp.c | 84 const struct inet_connection_sock *icsk = inet_csk(sk); in measure_rtt() local 92 if (icsk->icsk_ca_state == TCP_CA_Open) { in measure_rtt() 104 const struct inet_connection_sock *icsk = inet_csk(sk); in measure_achieved_throughput() local 109 if (icsk->icsk_ca_state == TCP_CA_Open) in measure_achieved_throughput() 119 if (!((1 << icsk->icsk_ca_state) & (TCPF_CA_Open | TCPF_CA_Disorder))) { in measure_achieved_throughput()
|
| A D | tcp_diag.c | 115 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_diag_get_aux() local 135 ulp_ops = icsk->icsk_ulp_ops; in tcp_diag_get_aux() 146 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_diag_get_aux_size() local 170 ulp_ops = icsk->icsk_ulp_ops; in tcp_diag_get_aux_size()
|
| A D | tcp.c | 434 icsk->icsk_rto = TCP_TIMEOUT_INIT; in tcp_init_sock() 468 icsk->icsk_sync_mss = tcp_sync_mss; in tcp_init_sock() 3324 icsk->icsk_backoff = 0; in tcp_disconnect() 3325 icsk->icsk_probes_out = 0; in tcp_disconnect() 3326 icsk->icsk_probes_tstamp = 0; in tcp_disconnect() 3338 if (icsk->icsk_ca_ops->release) in tcp_disconnect() 3339 icsk->icsk_ca_ops->release(sk); in tcp_disconnect() 3340 memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv)); in tcp_disconnect() 3341 icsk->icsk_ca_initialized = 0; in tcp_disconnect() 4378 ca_ops = icsk->icsk_ca_ops; in do_tcp_getsockopt() [all …]
|
| A D | tcp_minisocks.c | 304 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_time_wait() local 313 const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1); in tcp_time_wait() 461 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_ca_openreq_child() local 471 icsk->icsk_ca_dst_locked = tcp_ca_dst_locked(dst); in tcp_ca_openreq_child() 472 icsk->icsk_ca_ops = ca; in tcp_ca_openreq_child() 480 (!icsk->icsk_ca_setsockopt || in tcp_ca_openreq_child() 481 !bpf_try_module_get(icsk->icsk_ca_ops, icsk->icsk_ca_ops->owner))) in tcp_ca_openreq_child()
|
| A D | inet_diag.c | 305 if (!icsk) { in inet_sk_diag_fill() 310 if (icsk->icsk_pending == ICSK_TIME_RETRANS || in inet_sk_diag_fill() 311 icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT || in inet_sk_diag_fill() 312 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) { in inet_sk_diag_fill() 314 r->idiag_retrans = icsk->icsk_retransmits; in inet_sk_diag_fill() 316 jiffies_delta_to_msecs(icsk->icsk_timeout - jiffies); in inet_sk_diag_fill() 317 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) { in inet_sk_diag_fill() 319 r->idiag_retrans = icsk->icsk_probes_out; in inet_sk_diag_fill() 324 r->idiag_retrans = icsk->icsk_probes_out; in inet_sk_diag_fill() 343 ca_ops = READ_ONCE(icsk->icsk_ca_ops); in inet_sk_diag_fill() [all …]
|
| /linux/include/net/ |
| A D | inet_connection_sock.h | 200 icsk->icsk_pending = 0; in inet_csk_clear_xmit_timer() 205 icsk->icsk_ack.pending = 0; in inet_csk_clear_xmit_timer() 206 icsk->icsk_ack.retry = 0; in inet_csk_clear_xmit_timer() 232 icsk->icsk_pending = what; in inet_csk_reset_xmit_timer() 233 icsk->icsk_timeout = jiffies + when; in inet_csk_reset_xmit_timer() 234 sk_reset_timer(sk, &icsk->icsk_retransmit_timer, icsk->icsk_timeout); in inet_csk_reset_xmit_timer() 237 icsk->icsk_ack.timeout = jiffies + when; in inet_csk_reset_xmit_timer() 238 sk_reset_timer(sk, &icsk->icsk_delack_timer, icsk->icsk_ack.timeout); in inet_csk_reset_xmit_timer() 248 u64 when = (u64)icsk->icsk_rto << icsk->icsk_backoff; in inet_csk_rto_backoff() 349 if (icsk->icsk_ack.pingpong < U8_MAX) in inet_csk_inc_pingpong_cnt() [all …]
|
| A D | espintcp.h | 35 const struct inet_connection_sock *icsk = inet_csk(sk); in espintcp_getctx() local 38 return (__force void *)icsk->icsk_ulp_data; in espintcp_getctx()
|
| /linux/tools/testing/selftests/bpf/progs/ |
| A D | bpf_iter_tcp4.c | 77 const struct inet_connection_sock *icsk; in dump_tcp_sock() local 88 icsk = &tp->inet_conn; in dump_tcp_sock() 89 inet = &icsk->icsk_inet; in dump_tcp_sock() 91 fastopenq = &icsk->icsk_accept_queue.fastopenq; in dump_tcp_sock() 98 if (icsk->icsk_pending == ICSK_TIME_RETRANS || in dump_tcp_sock() 102 timer_expires = icsk->icsk_timeout; in dump_tcp_sock() 105 timer_expires = icsk->icsk_timeout; in dump_tcp_sock() 130 icsk->icsk_retransmits, uid, in dump_tcp_sock() 131 icsk->icsk_probes_out, in dump_tcp_sock() 136 jiffies_to_clock_t(icsk->icsk_rto), in dump_tcp_sock() [all …]
|
| A D | bpf_iter_tcp6.c | 77 const struct inet_connection_sock *icsk; in dump_tcp6_sock() local 88 icsk = &tp->tcp.inet_conn; in dump_tcp6_sock() 89 inet = &icsk->icsk_inet; in dump_tcp6_sock() 91 fastopenq = &icsk->icsk_accept_queue.fastopenq; in dump_tcp6_sock() 98 if (icsk->icsk_pending == ICSK_TIME_RETRANS || in dump_tcp6_sock() 102 timer_expires = icsk->icsk_timeout; in dump_tcp6_sock() 105 timer_expires = icsk->icsk_timeout; in dump_tcp6_sock() 134 icsk->icsk_retransmits, uid, in dump_tcp6_sock() 135 icsk->icsk_probes_out, in dump_tcp6_sock() 140 jiffies_to_clock_t(icsk->icsk_rto), in dump_tcp6_sock() [all …]
|
| A D | sock_destroy_prog.c | 77 const struct inet_connection_sock *icsk; in iter_tcp6_server() local 92 icsk = &tcp_sk->tcp.inet_conn; in iter_tcp6_server() 93 inet = &icsk->icsk_inet; in iter_tcp6_server()
|
| /linux/net/mptcp/ |
| A D | token_test.c | 34 struct inet_connection_sock *icsk; in build_icsk() local 36 icsk = kunit_kzalloc(test, sizeof(struct inet_connection_sock), in build_icsk() 38 KUNIT_EXPECT_NOT_ERR_OR_NULL(test, icsk); in build_icsk() 39 return icsk; in build_icsk() 73 struct inet_connection_sock *icsk = build_icsk(test); in mptcp_token_test_msk_basic() local 79 rcu_assign_pointer(icsk->icsk_ulp_data, ctx); in mptcp_token_test_msk_basic() 84 mptcp_token_new_connect((struct sock *)icsk)); in mptcp_token_test_msk_basic()
|
| /linux/net/tls/ |
| A D | tls_toe.c | 48 struct inet_connection_sock *icsk = inet_csk(sk); in tls_toe_sk_destruct() local 53 rcu_assign_pointer(icsk->icsk_ulp_data, NULL); in tls_toe_sk_destruct()
|
| /linux/net/ipv6/ |
| A D | tcp_ipv6.c | 221 u32 exthdrlen = icsk->icsk_ext_hdr_len; in tcp_v6_connect() 232 WRITE_ONCE(icsk->icsk_af_ops, &ipv6_mapped); in tcp_v6_connect() 243 icsk->icsk_ext_hdr_len = exthdrlen; in tcp_v6_connect() 301 icsk->icsk_ext_hdr_len = 0; in tcp_v6_connect() 303 icsk->icsk_ext_hdr_len = opt->opt_flen + in tcp_v6_connect() 2125 icsk->icsk_af_ops = &ipv6_specific; 2190 timer_expires = icsk->icsk_timeout; 2193 timer_expires = icsk->icsk_timeout; 2225 icsk->icsk_retransmits, 2227 icsk->icsk_probes_out, [all …]
|