| /linux/include/net/ |
| A D | inet_connection_sock.h | 154 return (void *)inet_csk(sk)->icsk_ca_priv; in inet_csk_ca() 179 inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_SCHED; in inet_csk_schedule_ack() 189 memset(&inet_csk(sk)->icsk_ack, 0, sizeof(inet_csk(sk)->icsk_ack)); in inet_csk_delack_init() 197 struct inet_connection_sock *icsk = inet_csk(sk); in inet_csk_clear_xmit_timer() 222 struct inet_connection_sock *icsk = inet_csk(sk); in inet_csk_reset_xmit_timer() 274 reqsk_queue_added(&inet_csk(sk)->icsk_accept_queue); in inet_csk_reqsk_queue_added() 330 inet_csk(sk)->icsk_ack.pingpong = in inet_csk_enter_pingpong_mode() 336 inet_csk(sk)->icsk_ack.pingpong = 0; in inet_csk_exit_pingpong_mode() 341 return inet_csk(sk)->icsk_ack.pingpong >= in inet_csk_in_pingpong_mode() 347 struct inet_connection_sock *icsk = inet_csk(sk); in inet_csk_inc_pingpong_cnt() [all …]
|
| A D | tcp.h | 359 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_dec_quickack_mode() 744 if (inet_csk(sk)->icsk_rto > TCP_RTO_MAX) in tcp_bound_rto() 745 inet_csk(sk)->icsk_rto = TCP_RTO_MAX; in tcp_bound_rto() 786 u32 rto_min = inet_csk(sk)->icsk_rto_min; in tcp_rto_min() 1336 (1 << inet_csk(sk)->icsk_ca_state); in tcp_in_cwnd_reduction() 1448 inet_csk(sk)->icsk_backoff); in tcp_probe0_when() 1513 if (delta > inet_csk(sk)->icsk_rto) in tcp_slow_start_after_idle_check() 1678 const int rto = inet_csk(sk)->icsk_rto; in tcp_fin_time() 2437 u32 rto = inet_csk(sk)->icsk_rto; in tcp_rto_delta_us() 2452 inet_csk(sk)->icsk_ca_state, in tcp_rto_delta_us() [all …]
|
| A D | espintcp.h | 35 const struct inet_connection_sock *icsk = inet_csk(sk); in espintcp_getctx()
|
| /linux/net/ipv4/ |
| A D | tcp_recovery.c | 13 if (inet_csk(sk)->icsk_ca_state >= TCP_CA_Recovery) in tcp_rack_reo_wnd() 109 timeout, inet_csk(sk)->icsk_rto); in tcp_rack_mark_lost() 158 if (inet_csk(sk)->icsk_ca_state != TCP_CA_Recovery) { in tcp_rack_reo_timeout() 160 if (!inet_csk(sk)->icsk_ca_ops->cong_control) in tcp_rack_reo_timeout() 165 if (inet_csk(sk)->icsk_pending != ICSK_TIME_RETRANS) in tcp_rack_reo_timeout() 219 const u8 state = inet_csk(sk)->icsk_ca_state; in tcp_newreno_mark_lost()
|
| A D | tcp_timer.c | 29 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_clamp_rto_to_user_timeout() 51 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_clamp_probe0_to_user_timeout() 220 if (!inet_csk(sk)->icsk_retransmits) in retransmits_timed_out() 243 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_write_timeout() 309 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_delack_timer_handler() 379 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_probe_timer() 429 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_update_rto_stats() 446 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_fastopen_synack_timer() 524 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_retransmit_timer() 681 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_write_timer_handler() [all …]
|
| A D | inet_connection_sock.c | 592 if (!inet_csk(sk)->icsk_bind_hash) in inet_csk_get_port() 594 WARN_ON(inet_csk(sk)->icsk_bind_hash != tb); in inet_csk_get_port() 595 WARN_ON(inet_csk(sk)->icsk_bind2_hash != tb2); in inet_csk_get_port() 618 struct inet_connection_sock *icsk = inet_csk(sk); in inet_csk_wait_for_connect() 666 struct inet_connection_sock *icsk = inet_csk(sk); in inet_csk_accept() 765 struct inet_connection_sock *icsk = inet_csk(sk); in inet_csk_init_xmit_timers() 776 struct inet_connection_sock *icsk = inet_csk(sk); in inet_csk_clear_xmit_timers() 788 struct inet_connection_sock *icsk = inet_csk(sk); in inet_csk_clear_xmit_timers_sync() 1116 icsk = inet_csk(sk_listener); in reqsk_timer_handler() 1342 struct inet_connection_sock *icsk = inet_csk(sk); in inet_csk_listen_start() [all …]
|
| A D | tcp_dctcp.h | 29 if (inet_csk(sk)->icsk_ack.pending & ICSK_ACK_TIMER) { in dctcp_ece_ack_update() 33 inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_NOW; in dctcp_ece_ack_update()
|
| A D | tcp_ulp.c | 106 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_update_ulp() 114 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_cleanup_ulp() 132 struct inet_connection_sock *icsk = inet_csk(sk); in __tcp_set_ulp()
|
| A D | inet_hashtables.c | 169 inet_csk(sk)->icsk_bind_hash = tb; in inet_bind_hash() 170 inet_csk(sk)->icsk_bind2_hash = tb2; in inet_bind_hash() 190 tb = inet_csk(sk)->icsk_bind_hash; in __inet_put_port() 191 inet_csk(sk)->icsk_bind_hash = NULL; in __inet_put_port() 195 if (inet_csk(sk)->icsk_bind2_hash) { in __inet_put_port() 199 inet_csk(sk)->icsk_bind2_hash = NULL; in __inet_put_port() 234 tb = inet_csk(sk)->icsk_bind_hash; in __inet_inherit_port() 235 tb2 = inet_csk(sk)->icsk_bind2_hash; in __inet_inherit_port() 718 inet_csk(sk2)->icsk_bind_hash == tb && in inet_reuseport_add_sock() 900 if (!inet_csk(sk)->icsk_bind2_hash) { in __inet_bhash2_update_saddr() [all …]
|
| A D | tcp_cong.c | 39 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_set_ca_state() 218 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_assign_congestion_control() 237 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_init_congestion_control() 252 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_reinit_congestion_control() 271 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_cleanup_congestion_control() 413 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_set_congestion_control()
|
| A D | tcp_input.c | 489 return 2 * inet_csk(sk)->icsk_ack.rcv_mss; in __tcp_grow_window() 543 inet_csk(sk)->icsk_ack.quick |= 1; in tcp_grow_window() 631 inet_csk(sk)->icsk_ack.rcv_mss = hint; in tcp_initialize_rcv_mss() 987 inet_csk(sk)->icsk_rto = __tcp_set_rto(tp); in tcp_set_rto() 2234 !inet_csk(sk)->icsk_mtup.probe_size; in tcp_enter_loss() 2669 inet_csk(sk)->icsk_retransmits = 0; in tcp_try_undo_loss() 3233 inet_csk(sk)->icsk_backoff = 0; in tcp_ack_update_rtt() 3275 u32 rto = inet_csk(sk)->icsk_rto; in tcp_rearm_rto() 3525 icsk = inet_csk(sk); in tcp_clean_rtx_queue() 5273 inet_csk(sk)->icsk_ack.pending |= in tcp_data_queue() [all …]
|
| A D | tcp_fastopen.c | 43 inet_csk(sk)->icsk_accept_queue.fastopenq.ctx, 1); in tcp_fastopen_destroy_cipher() 82 q = &inet_csk(sk)->icsk_accept_queue.fastopenq; in tcp_fastopen_reset_cipher() 244 struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue; in tcp_fastopen_create_child() 248 child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL, in tcp_fastopen_create_child() 312 fastopenq = &inet_csk(sk)->icsk_accept_queue.fastopenq; in tcp_fastopen_queue_check() 583 u32 timeouts = inet_csk(sk)->icsk_retransmits; in tcp_fastopen_active_detect_blackhole()
|
| A D | tcp_output.c | 69 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_event_new_data_sent() 166 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_event_data_sent() 1163 inet_csk(sk)->icsk_af_ops->mtu_reduced(sk); in tcp_release_cb() 1873 if (mtu != inet_csk(sk)->icsk_pmtu_cookie) in tcp_current_mss() 2904 jiffies_to_usecs(inet_csk(sk)->icsk_rto) : in tcp_schedule_loss_probe() 2960 inet_csk(sk)->icsk_pending = 0; in tcp_send_loss_probe() 2993 inet_csk(sk)->icsk_pending = 0; in tcp_send_loss_probe() 3545 inet_csk(sk)->icsk_rto, in tcp_xmit_retransmit_queue() 3943 inet_csk(sk)->icsk_rto = tcp_timeout_init(sk); in tcp_connect_init() 3944 inet_csk(sk)->icsk_retransmits = 0; in tcp_connect_init() [all …]
|
| A D | tcp_dctcp.c | 114 inet_csk(sk)->icsk_ca_ops = &dctcp_reno; in dctcp_init() 186 new_state != inet_csk(sk)->icsk_ca_state) in dctcp_state() 227 if (inet_csk(sk)->icsk_ca_ops != &dctcp_reno) { in dctcp_get_info()
|
| A D | tcp_minisocks.c | 304 const struct inet_connection_sock *icsk = inet_csk(sk); in tcp_time_wait() 461 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_ca_openreq_child() 524 newicsk = inet_csk(newsk); in tcp_create_openreq_child() 845 if (req->num_timeout < READ_ONCE(inet_csk(sk)->icsk_accept_queue.rskq_defer_accept) && in tcp_check_req() 858 child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL, in tcp_check_req() 864 reqsk_queue_removed(&inet_csk(req->rsk_listener)->icsk_accept_queue, req); in tcp_check_req()
|
| A D | tcp_diag.c | 115 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_diag_get_aux() 146 struct inet_connection_sock *icsk = inet_csk(sk); in tcp_diag_get_aux_size()
|
| /linux/net/dccp/ |
| A D | output.c | 46 const struct inet_connection_sock *icsk = inet_csk(sk); in dccp_transmit_skb() 163 struct inet_connection_sock *icsk = inet_csk(sk); in dccp_sync_mss() 272 inet_csk(sk)->icsk_rto, in dccp_xmit_packet() 390 if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk) != 0) in dccp_retransmit_skb() 394 inet_csk(sk)->icsk_retransmits++; in dccp_retransmit_skb() 521 int err = inet_csk(sk)->icsk_af_ops->rebuild_header(sk); in dccp_send_reset() 546 struct inet_connection_sock *icsk = inet_csk(sk); in dccp_connect() 590 inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN; in dccp_send_ack() 610 struct inet_connection_sock *icsk = inet_csk(sk);
|
| A D | minisocks.c | 40 const struct inet_connection_sock *icsk = inet_csk(sk); in dccp_time_wait() 86 struct inet_connection_sock *newicsk = inet_csk(newsk); in dccp_create_openreq_child() 189 child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL, in dccp_check_req()
|
| A D | timer.c | 33 const struct inet_connection_sock *icsk = inet_csk(sk); in dccp_write_timeout() 85 struct inet_connection_sock *icsk = inet_csk(sk); in dccp_retransmit_timer()
|
| A D | ipv6.c | 157 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) in dccp_v6_err() 450 inet_csk(newsk)->icsk_af_ops = &dccp_ipv6_mapped; in dccp_v6_request_recv_sock() 470 dccp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie); in dccp_v6_request_recv_sock() 542 inet_csk(newsk)->icsk_ext_hdr_len = 0; in dccp_v6_request_recv_sock() 544 inet_csk(newsk)->icsk_ext_hdr_len = opt->opt_nflen + in dccp_v6_request_recv_sock() 826 struct inet_connection_sock *icsk = inet_csk(sk); in dccp_v6_connect() 1031 inet_csk(sk)->icsk_af_ops = &dccp_ipv6_af_ops; in dccp_v6_init_sock()
|
| /linux/tools/testing/selftests/bpf/progs/ |
| A D | bpf_dctcp.c | 167 new_state != BPF_CORE_READ_BITFIELD(inet_csk(sk), icsk_ca_state)) in BPF_PROG() 199 if (inet_csk(sk)->icsk_ack.pending & ICSK_ACK_TIMER) { in dctcp_ece_ack_update() 203 inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_NOW; in dctcp_ece_ack_update()
|
| A D | bpf_tracing_net.h | 143 static inline struct inet_connection_sock *inet_csk(const struct sock *sk) in inet_csk() function 150 return (void *)inet_csk(sk)->icsk_ca_priv; in inet_csk_ca()
|
| A D | bpf_cc_cubic.c | 132 (1 << inet_csk(sk)->icsk_ca_state)) { in BPF_PROG() 139 inet_csk(sk)->icsk_ca_state == TCP_CA_CWR) { in BPF_PROG()
|
| /linux/net/mptcp/ |
| A D | mptcp_diag.c | 23 return inet_sk_diag_fill(sk, inet_csk(sk), skb, cb, req, NLM_F_MULTI, in sk_diag_dump() 52 err = inet_sk_diag_fill(sk, inet_csk(sk), rep, cb, req, 0, in mptcp_diag_dump_one() 106 if (!ctx || strcmp(inet_csk(sk)->icsk_ulp_ops->name, "mptcp")) in mptcp_diag_dump_listeners()
|
| /linux/net/core/ |
| A D | request_sock.c | 94 fastopenq = &inet_csk(lsk)->icsk_accept_queue.fastopenq; in reqsk_fastopen_remove()
|