Lines Matching refs:ssk
296 static bool __mptcp_move_skb(struct mptcp_sock *msk, struct sock *ssk, in __mptcp_move_skb() argument
300 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); in __mptcp_move_skb()
305 __skb_unlink(skb, &ssk->sk_receive_queue); in __mptcp_move_skb()
445 const struct sock *ssk = mptcp_subflow_tcp_sock(subflow); in mptcp_timeout_from_subflow() local
447 return inet_csk(ssk)->icsk_pending && !subflow->stale_count ? in mptcp_timeout_from_subflow()
448 icsk_timeout(inet_csk(ssk)) - jiffies : 0; in mptcp_timeout_from_subflow()
461 static inline bool tcp_can_send_ack(const struct sock *ssk) in tcp_can_send_ack() argument
463 return !((1 << inet_sk_state_load(ssk)) & in tcp_can_send_ack()
467 void __mptcp_subflow_send_ack(struct sock *ssk) in __mptcp_subflow_send_ack() argument
469 if (tcp_can_send_ack(ssk)) in __mptcp_subflow_send_ack()
470 tcp_send_ack(ssk); in __mptcp_subflow_send_ack()
473 static void mptcp_subflow_send_ack(struct sock *ssk) in mptcp_subflow_send_ack() argument
477 slow = lock_sock_fast(ssk); in mptcp_subflow_send_ack()
478 __mptcp_subflow_send_ack(ssk); in mptcp_subflow_send_ack()
479 unlock_sock_fast(ssk, slow); in mptcp_subflow_send_ack()
490 static void mptcp_subflow_cleanup_rbuf(struct sock *ssk, int copied) in mptcp_subflow_cleanup_rbuf() argument
494 slow = lock_sock_fast(ssk); in mptcp_subflow_cleanup_rbuf()
495 if (tcp_can_send_ack(ssk)) in mptcp_subflow_cleanup_rbuf()
496 tcp_cleanup_rbuf(ssk, copied); in mptcp_subflow_cleanup_rbuf()
497 unlock_sock_fast(ssk, slow); in mptcp_subflow_cleanup_rbuf()
500 static bool mptcp_subflow_could_cleanup(const struct sock *ssk, bool rx_empty) in mptcp_subflow_could_cleanup() argument
502 const struct inet_connection_sock *icsk = inet_csk(ssk); in mptcp_subflow_could_cleanup()
504 const struct tcp_sock *tp = tcp_sk(ssk); in mptcp_subflow_could_cleanup()
525 struct sock *ssk = mptcp_subflow_tcp_sock(subflow); in mptcp_cleanup_rbuf() local
527 if (cleanup || mptcp_subflow_could_cleanup(ssk, rx_empty)) in mptcp_cleanup_rbuf()
528 mptcp_subflow_cleanup_rbuf(ssk, copied); in mptcp_cleanup_rbuf()
582 static void mptcp_dss_corruption(struct mptcp_sock *msk, struct sock *ssk) in mptcp_dss_corruption() argument
584 if (!mptcp_try_fallback(ssk, MPTCP_MIB_DSSCORRUPTIONFALLBACK)) { in mptcp_dss_corruption()
585 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_DSSCORRUPTIONRESET); in mptcp_dss_corruption()
586 mptcp_subflow_reset(ssk); in mptcp_dss_corruption()
591 struct sock *ssk) in __mptcp_move_skbs_from_subflow() argument
593 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); in __mptcp_move_skbs_from_subflow()
599 pr_debug("msk=%p ssk=%p\n", msk, ssk); in __mptcp_move_skbs_from_subflow()
600 tp = tcp_sk(ssk); in __mptcp_move_skbs_from_subflow()
614 skb = skb_peek(&ssk->sk_receive_queue); in __mptcp_move_skbs_from_subflow()
635 ret = __mptcp_move_skb(msk, ssk, skb, offset, len) || ret; in __mptcp_move_skbs_from_subflow()
640 mptcp_dss_corruption(msk, ssk); in __mptcp_move_skbs_from_subflow()
645 mptcp_dss_corruption(msk, ssk); in __mptcp_move_skbs_from_subflow()
648 sk_eat_skb(ssk, skb); in __mptcp_move_skbs_from_subflow()
652 more_data_avail = mptcp_subflow_data_available(ssk); in __mptcp_move_skbs_from_subflow()
706 static bool __mptcp_subflow_error_report(struct sock *sk, struct sock *ssk) in __mptcp_subflow_error_report() argument
708 int err = sock_error(ssk); in __mptcp_subflow_error_report()
725 ssk_state = inet_sk_state_load(ssk); in __mptcp_subflow_error_report()
749 static bool move_skbs_to_msk(struct mptcp_sock *msk, struct sock *ssk) in move_skbs_to_msk() argument
754 moved = __mptcp_move_skbs_from_subflow(msk, ssk); in move_skbs_to_msk()
756 if (unlikely(ssk->sk_err)) { in move_skbs_to_msk()
773 static void __mptcp_rcvbuf_update(struct sock *sk, struct sock *ssk) in __mptcp_rcvbuf_update() argument
775 if (unlikely(ssk->sk_rcvbuf > sk->sk_rcvbuf)) in __mptcp_rcvbuf_update()
776 WRITE_ONCE(sk->sk_rcvbuf, ssk->sk_rcvbuf); in __mptcp_rcvbuf_update()
779 static void __mptcp_data_ready(struct sock *sk, struct sock *ssk) in __mptcp_data_ready() argument
783 __mptcp_rcvbuf_update(sk, ssk); in __mptcp_data_ready()
786 if (move_skbs_to_msk(msk, ssk) && mptcp_epollin_ready(sk)) in __mptcp_data_ready()
790 void mptcp_data_ready(struct sock *sk, struct sock *ssk) in mptcp_data_ready() argument
792 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); in mptcp_data_ready()
803 __mptcp_data_ready(sk, ssk); in mptcp_data_ready()
809 static void mptcp_subflow_joined(struct mptcp_sock *msk, struct sock *ssk) in mptcp_subflow_joined() argument
811 mptcp_subflow_ctx(ssk)->map_seq = READ_ONCE(msk->ack_seq); in mptcp_subflow_joined()
813 mptcp_event(MPTCP_EVENT_SUB_ESTABLISHED, msk, ssk, GFP_ATOMIC); in mptcp_subflow_joined()
816 static bool __mptcp_finish_join(struct mptcp_sock *msk, struct sock *ssk) in __mptcp_finish_join() argument
828 mptcp_subflow_joined(msk, ssk); in __mptcp_finish_join()
834 if (sk->sk_socket && !ssk->sk_socket) in __mptcp_finish_join()
835 mptcp_sock_graft(ssk, sk->sk_socket); in __mptcp_finish_join()
837 mptcp_subflow_ctx(ssk)->subflow_id = msk->subflow_id++; in __mptcp_finish_join()
838 mptcp_sockopt_sync_locked(msk, ssk); in __mptcp_finish_join()
840 __mptcp_propagate_sndbuf(sk, ssk); in __mptcp_finish_join()
850 struct sock *ssk = mptcp_subflow_tcp_sock(subflow); in __mptcp_flush_join_list() local
851 bool slow = lock_sock_fast(ssk); in __mptcp_flush_join_list()
854 if (!__mptcp_finish_join(msk, ssk)) in __mptcp_flush_join_list()
855 mptcp_subflow_reset(ssk); in __mptcp_flush_join_list()
856 unlock_sock_fast(ssk, slow); in __mptcp_flush_join_list()
1017 struct sock *ssk = mptcp_subflow_tcp_sock(subflow); in mptcp_enter_memory_pressure() local
1020 tcp_enter_memory_pressure(ssk); in mptcp_enter_memory_pressure()
1021 sk_stream_moderate_sndbuf(ssk); in mptcp_enter_memory_pressure()
1068 static int mptcp_check_allowed_size(const struct mptcp_sock *msk, struct sock *ssk, in mptcp_check_allowed_size() argument
1080 if (unlikely(tcp_sk(ssk)->snd_wnd < mptcp_snd_wnd)) { in mptcp_check_allowed_size()
1081 tcp_sk(ssk)->snd_wnd = min_t(u64, U32_MAX, mptcp_snd_wnd); in mptcp_check_allowed_size()
1082 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_SNDWNDSHARED); in mptcp_check_allowed_size()
1117 static struct sk_buff *__mptcp_alloc_tx_skb(struct sock *sk, struct sock *ssk, gfp_t gfp) in __mptcp_alloc_tx_skb() argument
1125 if (likely(sk_wmem_schedule(ssk, skb->truesize))) { in __mptcp_alloc_tx_skb()
1126 tcp_skb_entail(ssk, skb); in __mptcp_alloc_tx_skb()
1134 static struct sk_buff *mptcp_alloc_tx_skb(struct sock *sk, struct sock *ssk, bool data_lock_held) in mptcp_alloc_tx_skb() argument
1138 return __mptcp_alloc_tx_skb(sk, ssk, gfp); in mptcp_alloc_tx_skb()
1154 struct sock *ssk, in mptcp_update_infinite_map() argument
1163 if (!mptcp_try_fallback(ssk, MPTCP_MIB_INFINITEMAPTX)) { in mptcp_update_infinite_map()
1164 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_FALLBACKFAILED); in mptcp_update_infinite_map()
1165 mptcp_subflow_reset(ssk); in mptcp_update_infinite_map()
1169 mptcp_subflow_ctx(ssk)->send_infinite_map = 0; in mptcp_update_infinite_map()
1174 static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk, in mptcp_sendmsg_frag() argument
1190 msk, ssk, dfrag->data_seq, dfrag->data_len, info->sent); in mptcp_sendmsg_frag()
1196 if (unlikely(!__tcp_can_send(ssk))) in mptcp_sendmsg_frag()
1200 if (unlikely(ssk->sk_gso_max_size > MPTCP_MAX_GSO_SIZE)) in mptcp_sendmsg_frag()
1201 ssk->sk_gso_max_size = MPTCP_MAX_GSO_SIZE; in mptcp_sendmsg_frag()
1202 info->mss_now = tcp_send_mss(ssk, &info->size_goal, info->flags); in mptcp_sendmsg_frag()
1205 skb = tcp_write_queue_tail(ssk); in mptcp_sendmsg_frag()
1216 tcp_mark_push(tcp_sk(ssk), skb); in mptcp_sendmsg_frag()
1223 tcp_mark_push(tcp_sk(ssk), skb); in mptcp_sendmsg_frag()
1230 skb = mptcp_alloc_tx_skb(sk, ssk, info->data_lock_held); in mptcp_sendmsg_frag()
1240 copy = mptcp_check_allowed_size(msk, ssk, data_seq, copy); in mptcp_sendmsg_frag()
1244 if (snd_una != msk->snd_nxt || tcp_write_queue_tail(ssk)) { in mptcp_sendmsg_frag()
1245 tcp_remove_empty_skb(ssk); in mptcp_sendmsg_frag()
1255 if (!sk_wmem_schedule(ssk, copy)) { in mptcp_sendmsg_frag()
1256 tcp_remove_empty_skb(ssk); in mptcp_sendmsg_frag()
1270 sk_wmem_queued_add(ssk, copy); in mptcp_sendmsg_frag()
1271 sk_mem_charge(ssk, copy); in mptcp_sendmsg_frag()
1272 WRITE_ONCE(tcp_sk(ssk)->write_seq, tcp_sk(ssk)->write_seq + copy); in mptcp_sendmsg_frag()
1285 mpext->subflow_seq = mptcp_subflow_ctx(ssk)->rel_write_seq; in mptcp_sendmsg_frag()
1295 mptcp_subflow_ctx(ssk)->rel_write_seq += copy; in mptcp_sendmsg_frag()
1299 tcp_push_pending_frames(ssk); in mptcp_sendmsg_frag()
1305 if (mptcp_subflow_ctx(ssk)->send_infinite_map) in mptcp_sendmsg_frag()
1306 mptcp_update_infinite_map(msk, ssk, mpext); in mptcp_sendmsg_frag()
1308 mptcp_subflow_ctx(ssk)->rel_write_seq += copy; in mptcp_sendmsg_frag()
1319 struct sock *ssk; member
1360 struct sock *ssk; in mptcp_subflow_get_send() local
1366 send_info[i].ssk = NULL; in mptcp_subflow_get_send()
1374 ssk = mptcp_subflow_tcp_sock(subflow); in mptcp_subflow_get_send()
1383 subflow->avg_pacing_rate = READ_ONCE(ssk->sk_pacing_rate); in mptcp_subflow_get_send()
1389 linger_time = div_u64((u64)READ_ONCE(ssk->sk_wmem_queued) << 32, pace); in mptcp_subflow_get_send()
1391 send_info[backup].ssk = ssk; in mptcp_subflow_get_send()
1399 send_info[SSK_MODE_ACTIVE].ssk = send_info[SSK_MODE_BACKUP].ssk; in mptcp_subflow_get_send()
1412 ssk = send_info[SSK_MODE_ACTIVE].ssk; in mptcp_subflow_get_send()
1413 if (!ssk || !sk_stream_memory_free(ssk)) in mptcp_subflow_get_send()
1417 wmem = READ_ONCE(ssk->sk_wmem_queued); in mptcp_subflow_get_send()
1419 return ssk; in mptcp_subflow_get_send()
1421 subflow = mptcp_subflow_ctx(ssk); in mptcp_subflow_get_send()
1423 READ_ONCE(ssk->sk_pacing_rate) * burst, in mptcp_subflow_get_send()
1426 return ssk; in mptcp_subflow_get_send()
1429 static void mptcp_push_release(struct sock *ssk, struct mptcp_sendmsg_info *info) in mptcp_push_release() argument
1431 tcp_push(ssk, 0, info->mss_now, tcp_sk(ssk)->nonagle, info->size_goal); in mptcp_push_release()
1432 release_sock(ssk); in mptcp_push_release()
1471 static int __subflow_push_pending(struct sock *sk, struct sock *ssk, in __subflow_push_pending() argument
1485 ret = mptcp_sendmsg_frag(sk, ssk, dfrag, info); in __subflow_push_pending()
1500 !sk_stream_memory_free(ssk) || in __subflow_push_pending()
1501 !mptcp_subflow_active(mptcp_subflow_ctx(ssk))) { in __subflow_push_pending()
1517 struct sock *prev_ssk = NULL, *ssk = NULL; in __mptcp_push_pending() local
1538 prev_ssk = ssk; in __mptcp_push_pending()
1539 ssk = mptcp_subflow_tcp_sock(subflow); in __mptcp_push_pending()
1540 if (ssk != prev_ssk) { in __mptcp_push_pending()
1551 lock_sock(ssk); in __mptcp_push_pending()
1556 ret = __subflow_push_pending(sk, ssk, &info); in __mptcp_push_pending()
1559 (1 << ssk->sk_state) & in __mptcp_push_pending()
1570 if (ssk) in __mptcp_push_pending()
1571 mptcp_push_release(ssk, &info); in __mptcp_push_pending()
1580 static void __mptcp_subflow_push_pending(struct sock *sk, struct sock *ssk, bool first) in __mptcp_subflow_push_pending() argument
1592 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); in __mptcp_subflow_push_pending()
1600 ret = __subflow_push_pending(sk, ssk, &info); in __mptcp_subflow_push_pending()
1613 ret = __subflow_push_pending(sk, ssk, &info); in __mptcp_subflow_push_pending()
1622 if (xmit_ssk != ssk) { in __mptcp_subflow_push_pending()
1636 tcp_push(ssk, 0, info.mss_now, tcp_sk(ssk)->nonagle, in __mptcp_subflow_push_pending()
1654 struct sock *ssk; in mptcp_sendmsg_fastopen() local
1664 ssk = __mptcp_nmpc_sk(msk); in mptcp_sendmsg_fastopen()
1665 if (IS_ERR(ssk)) in mptcp_sendmsg_fastopen()
1666 return PTR_ERR(ssk); in mptcp_sendmsg_fastopen()
1671 ssk = msk->first; in mptcp_sendmsg_fastopen()
1673 lock_sock(ssk); in mptcp_sendmsg_fastopen()
1676 ret = tcp_sendmsg_fastopen(ssk, msg, copied_syn, len, NULL); in mptcp_sendmsg_fastopen()
1679 release_sock(ssk); in mptcp_sendmsg_fastopen()
2013 struct sock *ssk; in mptcp_rcv_space_adjust() local
2016 ssk = mptcp_subflow_tcp_sock(subflow); in mptcp_rcv_space_adjust()
2017 slow = lock_sock_fast(ssk); in mptcp_rcv_space_adjust()
2018 WRITE_ONCE(ssk->sk_rcvbuf, rcvbuf); in mptcp_rcv_space_adjust()
2019 WRITE_ONCE(tcp_sk(ssk)->window_clamp, window_clamp); in mptcp_rcv_space_adjust()
2020 if (tcp_can_send_ack(ssk)) in mptcp_rcv_space_adjust()
2021 tcp_cleanup_rbuf(ssk, 1); in mptcp_rcv_space_adjust()
2022 unlock_sock_fast(ssk, slow); in mptcp_rcv_space_adjust()
2064 struct sock *ssk; in __mptcp_move_skbs() local
2078 ssk = mptcp_subflow_tcp_sock(subflow); in __mptcp_move_skbs()
2079 slowpath = lock_sock_fast(ssk); in __mptcp_move_skbs()
2080 ret = __mptcp_move_skbs_from_subflow(msk, ssk) || ret; in __mptcp_move_skbs()
2081 if (unlikely(ssk->sk_err)) in __mptcp_move_skbs()
2083 unlock_sock_fast(ssk, slowpath); in __mptcp_move_skbs()
2271 struct sock *ssk = mptcp_subflow_tcp_sock(subflow); in mptcp_subflow_get_retrans() local
2277 if (!tcp_rtx_and_write_queues_empty(ssk)) { in mptcp_subflow_get_retrans()
2278 mptcp_pm_subflow_chk_stale(msk, ssk); in mptcp_subflow_get_retrans()
2285 backup = ssk; in mptcp_subflow_get_retrans()
2290 pick = ssk; in mptcp_subflow_get_retrans()
2345 static void __mptcp_subflow_disconnect(struct sock *ssk, in __mptcp_subflow_disconnect() argument
2349 if (((1 << ssk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) || in __mptcp_subflow_disconnect()
2354 WARN_ON_ONCE(tcp_disconnect(ssk, 0)); in __mptcp_subflow_disconnect()
2357 tcp_shutdown(ssk, SEND_SHUTDOWN); in __mptcp_subflow_disconnect()
2369 static void __mptcp_close_ssk(struct sock *sk, struct sock *ssk, in __mptcp_close_ssk() argument
2381 if (msk->in_accept_queue && msk->first == ssk && in __mptcp_close_ssk()
2382 (sock_flag(sk, SOCK_DEAD) || sock_flag(ssk, SOCK_DEAD))) { in __mptcp_close_ssk()
2386 lock_sock_nested(ssk, SINGLE_DEPTH_NESTING); in __mptcp_close_ssk()
2387 mptcp_subflow_drop_ctx(ssk); in __mptcp_close_ssk()
2391 dispose_it = msk->free_first || ssk != msk->first; in __mptcp_close_ssk()
2395 lock_sock_nested(ssk, SINGLE_DEPTH_NESTING); in __mptcp_close_ssk()
2401 ssk->sk_lingertime = 0; in __mptcp_close_ssk()
2402 sock_set_flag(ssk, SOCK_LINGER); in __mptcp_close_ssk()
2408 __mptcp_subflow_disconnect(ssk, subflow, flags); in __mptcp_close_ssk()
2409 release_sock(ssk); in __mptcp_close_ssk()
2420 if (!inet_csk(ssk)->icsk_ulp_ops) { in __mptcp_close_ssk()
2421 WARN_ON_ONCE(!sock_flag(ssk, SOCK_DEAD)); in __mptcp_close_ssk()
2425 __tcp_close(ssk, 0); in __mptcp_close_ssk()
2428 __sock_put(ssk); in __mptcp_close_ssk()
2432 __mptcp_subflow_error_report(sk, ssk); in __mptcp_close_ssk()
2433 release_sock(ssk); in __mptcp_close_ssk()
2435 sock_put(ssk); in __mptcp_close_ssk()
2437 if (ssk == msk->first) in __mptcp_close_ssk()
2462 void mptcp_close_ssk(struct sock *sk, struct sock *ssk, in mptcp_close_ssk() argument
2472 mptcp_event(MPTCP_EVENT_SUB_CLOSED, mptcp_sk(sk), ssk, GFP_KERNEL); in mptcp_close_ssk()
2479 __mptcp_close_ssk(sk, ssk, subflow, MPTCP_CF_PUSH); in mptcp_close_ssk()
2495 struct sock *ssk = mptcp_subflow_tcp_sock(subflow); in __mptcp_close_subflow() local
2496 int ssk_state = inet_sk_state_load(ssk); in __mptcp_close_subflow()
2504 if (!skb_queue_empty_lockless(&ssk->sk_receive_queue)) in __mptcp_close_subflow()
2507 mptcp_close_ssk(sk, ssk, subflow); in __mptcp_close_subflow()
2577 struct sock *ssk; in __mptcp_retrans() local
2612 ssk = mptcp_subflow_tcp_sock(subflow); in __mptcp_retrans()
2614 lock_sock(ssk); in __mptcp_retrans()
2628 release_sock(ssk); in __mptcp_retrans()
2633 ret = mptcp_sendmsg_frag(sk, ssk, dfrag, &info); in __mptcp_retrans()
2643 tcp_push(ssk, 0, info.mss_now, tcp_sk(ssk)->nonagle, in __mptcp_retrans()
2649 release_sock(ssk); in __mptcp_retrans()
2687 struct sock *ssk = msk->first; in mptcp_mp_fail_no_response() local
2690 if (!ssk) in mptcp_mp_fail_no_response()
2695 slow = lock_sock_fast(ssk); in mptcp_mp_fail_no_response()
2696 mptcp_subflow_reset(ssk); in mptcp_mp_fail_no_response()
2697 WRITE_ONCE(mptcp_subflow_ctx(ssk)->fail_tout, 0); in mptcp_mp_fail_no_response()
2698 unlock_sock_fast(ssk, slow); in mptcp_mp_fail_no_response()
2854 void mptcp_subflow_shutdown(struct sock *sk, struct sock *ssk, int how) in mptcp_subflow_shutdown() argument
2856 lock_sock(ssk); in mptcp_subflow_shutdown()
2858 switch (ssk->sk_state) { in mptcp_subflow_shutdown()
2864 WARN_ON_ONCE(tcp_disconnect(ssk, O_NONBLOCK)); in mptcp_subflow_shutdown()
2869 ssk->sk_shutdown |= how; in mptcp_subflow_shutdown()
2870 tcp_shutdown(ssk, how); in mptcp_subflow_shutdown()
2878 pr_debug("Sending DATA_FIN on subflow %p\n", ssk); in mptcp_subflow_shutdown()
2879 tcp_send_ack(ssk); in mptcp_subflow_shutdown()
2886 release_sock(ssk); in mptcp_subflow_shutdown()
3014 struct sock *ssk; in mptcp_check_listen_stop() local
3020 ssk = mptcp_sk(sk)->first; in mptcp_check_listen_stop()
3021 if (WARN_ON_ONCE(!ssk || inet_sk_state_load(ssk) != TCP_LISTEN)) in mptcp_check_listen_stop()
3024 lock_sock_nested(ssk, SINGLE_DEPTH_NESTING); in mptcp_check_listen_stop()
3025 tcp_set_state(ssk, TCP_CLOSE); in mptcp_check_listen_stop()
3026 mptcp_subflow_queue_clean(sk, ssk); in mptcp_check_listen_stop()
3027 inet_csk_listen_stop(ssk); in mptcp_check_listen_stop()
3028 mptcp_event_pm_listener(ssk, MPTCP_EVENT_LISTENER_CLOSED); in mptcp_check_listen_stop()
3029 release_sock(ssk); in mptcp_check_listen_stop()
3062 struct sock *ssk = mptcp_subflow_tcp_sock(subflow); in __mptcp_close() local
3063 bool slow = lock_sock_fast_nested(ssk); in __mptcp_close()
3065 subflows_alive += ssk->sk_state != TCP_CLOSE; in __mptcp_close()
3070 if (ssk == msk->first) in __mptcp_close()
3076 ssk->sk_socket = NULL; in __mptcp_close()
3077 ssk->sk_wq = NULL; in __mptcp_close()
3078 unlock_sock_fast(ssk, slow); in __mptcp_close()
3116 static void mptcp_copy_inaddrs(struct sock *msk, const struct sock *ssk) in mptcp_copy_inaddrs() argument
3119 const struct ipv6_pinfo *ssk6 = inet6_sk(ssk); in mptcp_copy_inaddrs()
3122 msk->sk_v6_daddr = ssk->sk_v6_daddr; in mptcp_copy_inaddrs()
3123 msk->sk_v6_rcv_saddr = ssk->sk_v6_rcv_saddr; in mptcp_copy_inaddrs()
3131 inet_sk(msk)->inet_num = inet_sk(ssk)->inet_num; in mptcp_copy_inaddrs()
3132 inet_sk(msk)->inet_dport = inet_sk(ssk)->inet_dport; in mptcp_copy_inaddrs()
3133 inet_sk(msk)->inet_sport = inet_sk(ssk)->inet_sport; in mptcp_copy_inaddrs()
3134 inet_sk(msk)->inet_daddr = inet_sk(ssk)->inet_daddr; in mptcp_copy_inaddrs()
3135 inet_sk(msk)->inet_saddr = inet_sk(ssk)->inet_saddr; in mptcp_copy_inaddrs()
3136 inet_sk(msk)->inet_rcv_saddr = inet_sk(ssk)->inet_rcv_saddr; in mptcp_copy_inaddrs()
3245 struct sock *ssk, in mptcp_sk_clone_init() argument
3281 WRITE_ONCE(msk->wnd_end, msk->snd_nxt + tcp_sk(ssk)->snd_wnd); in mptcp_sk_clone_init()
3297 WRITE_ONCE(msk->first, ssk); in mptcp_sk_clone_init()
3298 subflow = mptcp_subflow_ctx(ssk); in mptcp_sk_clone_init()
3300 sock_hold(ssk); in mptcp_sk_clone_init()
3310 mptcp_copy_inaddrs(nsk, ssk); in mptcp_sk_clone_init()
3311 __mptcp_propagate_sndbuf(nsk, ssk); in mptcp_sk_clone_init()
3313 mptcp_rcv_space_init(msk, ssk); in mptcp_sk_clone_init()
3323 void mptcp_rcv_space_init(struct mptcp_sock *msk, const struct sock *ssk) in mptcp_rcv_space_init() argument
3325 const struct tcp_sock *tp = tcp_sk(ssk); in mptcp_rcv_space_init()
3379 void __mptcp_check_push(struct sock *sk, struct sock *ssk) in __mptcp_check_push() argument
3385 __mptcp_subflow_push_pending(sk, ssk, false); in __mptcp_check_push()
3457 static void schedule_3rdack_retransmission(struct sock *ssk) in schedule_3rdack_retransmission() argument
3459 struct inet_connection_sock *icsk = inet_csk(ssk); in schedule_3rdack_retransmission()
3460 struct tcp_sock *tp = tcp_sk(ssk); in schedule_3rdack_retransmission()
3463 if (READ_ONCE(mptcp_subflow_ctx(ssk)->fully_established)) in schedule_3rdack_retransmission()
3476 sk_reset_timer(ssk, &icsk->icsk_delack_timer, timeout); in schedule_3rdack_retransmission()
3479 void mptcp_subflow_process_delegated(struct sock *ssk, long status) in mptcp_subflow_process_delegated() argument
3481 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); in mptcp_subflow_process_delegated()
3487 __mptcp_subflow_push_pending(sk, ssk, true); in mptcp_subflow_process_delegated()
3501 schedule_3rdack_retransmission(ssk); in mptcp_subflow_process_delegated()
3529 void mptcp_finish_connect(struct sock *ssk) in mptcp_finish_connect() argument
3535 subflow = mptcp_subflow_ctx(ssk); in mptcp_finish_connect()
3549 mptcp_pm_new_connection(msk, ssk, 0); in mptcp_finish_connect()
3561 bool mptcp_finish_join(struct sock *ssk) in mptcp_finish_join() argument
3563 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); in mptcp_finish_join()
3583 mptcp_subflow_joined(msk, ssk); in mptcp_finish_join()
3585 mptcp_propagate_sndbuf(parent, ssk); in mptcp_finish_join()
3590 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_JOINREJECTED); in mptcp_finish_join()
3599 ret = __mptcp_finish_join(msk, ssk); in mptcp_finish_join()
3601 sock_hold(ssk); in mptcp_finish_join()
3605 sock_hold(ssk); in mptcp_finish_join()
3695 struct sock *ssk; in mptcp_connect() local
3697 ssk = __mptcp_nmpc_sk(msk); in mptcp_connect()
3698 if (IS_ERR(ssk)) in mptcp_connect()
3699 return PTR_ERR(ssk); in mptcp_connect()
3702 subflow = mptcp_subflow_ctx(ssk); in mptcp_connect()
3707 if (rcu_access_pointer(tcp_sk(ssk)->md5sig_info)) in mptcp_connect()
3714 else if (mptcp_token_new_connect(ssk) < 0) in mptcp_connect()
3729 lock_sock(ssk); in mptcp_connect()
3734 if (ssk->sk_state != TCP_CLOSE) in mptcp_connect()
3737 if (BPF_CGROUP_PRE_CONNECT_ENABLED(ssk)) { in mptcp_connect()
3738 err = ssk->sk_prot->pre_connect(ssk, uaddr, addr_len); in mptcp_connect()
3743 err = ssk->sk_prot->connect(ssk, uaddr, addr_len); in mptcp_connect()
3747 inet_assign_bit(DEFER_CONNECT, sk, inet_test_bit(DEFER_CONNECT, ssk)); in mptcp_connect()
3751 release_sock(ssk); in mptcp_connect()
3763 mptcp_copy_inaddrs(sk, ssk); in mptcp_connect()
3803 struct sock *ssk, *sk = sock->sk; in mptcp_bind() local
3807 ssk = __mptcp_nmpc_sk(msk); in mptcp_bind()
3808 if (IS_ERR(ssk)) { in mptcp_bind()
3809 err = PTR_ERR(ssk); in mptcp_bind()
3814 err = inet_bind_sk(ssk, uaddr, addr_len); in mptcp_bind()
3817 err = inet6_bind_sk(ssk, uaddr, addr_len); in mptcp_bind()
3820 mptcp_copy_inaddrs(sk, ssk); in mptcp_bind()
3831 struct sock *ssk; in mptcp_listen() local
3842 ssk = __mptcp_nmpc_sk(msk); in mptcp_listen()
3843 if (IS_ERR(ssk)) { in mptcp_listen()
3844 err = PTR_ERR(ssk); in mptcp_listen()
3851 lock_sock(ssk); in mptcp_listen()
3852 err = __inet_listen_sk(ssk, backlog); in mptcp_listen()
3853 release_sock(ssk); in mptcp_listen()
3854 mptcp_set_state(sk, inet_sk_state_load(ssk)); in mptcp_listen()
3858 mptcp_copy_inaddrs(sk, ssk); in mptcp_listen()
3859 mptcp_event_pm_listener(ssk, MPTCP_EVENT_LISTENER_CREATED); in mptcp_listen()
3871 struct sock *ssk, *newsk; in mptcp_stream_accept() local
3878 ssk = READ_ONCE(msk->first); in mptcp_stream_accept()
3879 if (!ssk) in mptcp_stream_accept()
3882 pr_debug("ssk=%p, listener=%p\n", ssk, mptcp_subflow_ctx(ssk)); in mptcp_stream_accept()
3883 newsk = inet_csk_accept(ssk, arg); in mptcp_stream_accept()
3904 MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_MPCAPABLEPASSIVEACK); in mptcp_stream_accept()
3918 struct sock *ssk = mptcp_subflow_tcp_sock(subflow); in mptcp_stream_accept() local
3920 if (!ssk->sk_socket) in mptcp_stream_accept()
3921 mptcp_sock_graft(ssk, newsock); in mptcp_stream_accept()
3982 struct sock *ssk = READ_ONCE(msk->first); in mptcp_poll() local
3984 if (WARN_ON_ONCE(!ssk)) in mptcp_poll()
3987 return inet_csk_listen_poll(ssk); in mptcp_poll()
4054 struct sock *ssk = mptcp_subflow_tcp_sock(subflow); in mptcp_napi_poll() local
4056 bh_lock_sock_nested(ssk); in mptcp_napi_poll()
4057 if (!sock_owned_by_user(ssk)) { in mptcp_napi_poll()
4058 mptcp_subflow_process_delegated(ssk, xchg(&subflow->delegated_status, 0)); in mptcp_napi_poll()
4068 bh_unlock_sock(ssk); in mptcp_napi_poll()
4069 sock_put(ssk); in mptcp_napi_poll()