Lines Matching refs:sk
53 static void __mptcp_destroy_sock(struct sock *sk);
54 static void __mptcp_check_send_data_fin(struct sock *sk);
77 static bool mptcp_is_tcpsk(struct sock *sk) in mptcp_is_tcpsk() argument
79 struct socket *sock = sk->sk_socket; in mptcp_is_tcpsk()
81 if (unlikely(sk->sk_prot == &tcp_prot)) { in mptcp_is_tcpsk()
92 } else if (unlikely(sk->sk_prot == &tcpv6_prot)) { in mptcp_is_tcpsk()
104 struct sock *sk = (struct sock *)msk; in __mptcp_socket_create() local
108 err = mptcp_subflow_create_socket(sk, &ssock); in __mptcp_socket_create()
112 msk->first = ssock->sk; in __mptcp_socket_create()
114 subflow = mptcp_subflow_ctx(ssock->sk); in __mptcp_socket_create()
116 sock_hold(ssock->sk); in __mptcp_socket_create()
118 mptcp_sock_graft(msk->first, sk->sk_socket); in __mptcp_socket_create()
123 static void mptcp_drop(struct sock *sk, struct sk_buff *skb) in mptcp_drop() argument
125 sk_drops_add(sk, skb); in mptcp_drop()
129 static void mptcp_rmem_charge(struct sock *sk, int size) in mptcp_rmem_charge() argument
131 mptcp_sk(sk)->rmem_fwd_alloc -= size; in mptcp_rmem_charge()
134 static bool mptcp_try_coalesce(struct sock *sk, struct sk_buff *to, in mptcp_try_coalesce() argument
149 atomic_add(delta, &sk->sk_rmem_alloc); in mptcp_try_coalesce()
150 mptcp_rmem_charge(sk, delta); in mptcp_try_coalesce()
163 static void __mptcp_rmem_reclaim(struct sock *sk, int amount) in __mptcp_rmem_reclaim() argument
166 mptcp_sk(sk)->rmem_fwd_alloc -= amount << SK_MEM_QUANTUM_SHIFT; in __mptcp_rmem_reclaim()
167 __sk_mem_reduce_allocated(sk, amount); in __mptcp_rmem_reclaim()
170 static void mptcp_rmem_uncharge(struct sock *sk, int size) in mptcp_rmem_uncharge() argument
172 struct mptcp_sock *msk = mptcp_sk(sk); in mptcp_rmem_uncharge()
176 reclaimable = msk->rmem_fwd_alloc - sk_unused_reserved_mem(sk); in mptcp_rmem_uncharge()
180 __mptcp_rmem_reclaim(sk, SK_RECLAIM_CHUNK); in mptcp_rmem_uncharge()
186 struct sock *sk = skb->sk; in mptcp_rfree() local
188 atomic_sub(len, &sk->sk_rmem_alloc); in mptcp_rfree()
189 mptcp_rmem_uncharge(sk, len); in mptcp_rfree()
192 static void mptcp_set_owner_r(struct sk_buff *skb, struct sock *sk) in mptcp_set_owner_r() argument
195 skb->sk = sk; in mptcp_set_owner_r()
197 atomic_add(skb->truesize, &sk->sk_rmem_alloc); in mptcp_set_owner_r()
198 mptcp_rmem_charge(sk, skb->truesize); in mptcp_set_owner_r()
207 struct sock *sk = (struct sock *)msk; in mptcp_data_queue_ofo() local
220 mptcp_drop(sk, skb); in mptcp_data_queue_ofo()
224 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_NODSSWINDOW); in mptcp_data_queue_ofo()
229 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_OFOQUEUE); in mptcp_data_queue_ofo()
241 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_OFOMERGE); in mptcp_data_queue_ofo()
242 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_OFOQUEUETAIL); in mptcp_data_queue_ofo()
248 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_OFOQUEUETAIL); in mptcp_data_queue_ofo()
266 mptcp_drop(sk, skb); in mptcp_data_queue_ofo()
267 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_DUPDATA); in mptcp_data_queue_ofo()
282 mptcp_drop(sk, skb1); in mptcp_data_queue_ofo()
283 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_DUPDATA); in mptcp_data_queue_ofo()
287 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_OFOMERGE); in mptcp_data_queue_ofo()
304 mptcp_drop(sk, skb1); in mptcp_data_queue_ofo()
305 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_DUPDATA); in mptcp_data_queue_ofo()
313 mptcp_set_owner_r(skb, sk); in mptcp_data_queue_ofo()
316 static bool mptcp_rmem_schedule(struct sock *sk, struct sock *ssk, int size) in mptcp_rmem_schedule() argument
318 struct mptcp_sock *msk = mptcp_sk(sk); in mptcp_rmem_schedule()
327 if (!__sk_mem_raise_allocated(sk, size, amt, SK_MEM_RECV)) { in mptcp_rmem_schedule()
343 struct sock *sk = (struct sock *)msk; in __mptcp_move_skb() local
353 if (!mptcp_rmem_schedule(sk, ssk, skb->truesize)) in __mptcp_move_skb()
370 tail = skb_peek_tail(&sk->sk_receive_queue); in __mptcp_move_skb()
371 if (tail && mptcp_try_coalesce(sk, tail, skb)) in __mptcp_move_skb()
374 mptcp_set_owner_r(skb, sk); in __mptcp_move_skb()
375 __skb_queue_tail(&sk->sk_receive_queue, skb); in __mptcp_move_skb()
385 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_DUPDATA); in __mptcp_move_skb()
387 mptcp_drop(sk, skb); in __mptcp_move_skb()
391 static void mptcp_stop_timer(struct sock *sk) in mptcp_stop_timer() argument
393 struct inet_connection_sock *icsk = inet_csk(sk); in mptcp_stop_timer()
395 sk_stop_timer(sk, &icsk->icsk_retransmit_timer); in mptcp_stop_timer()
396 mptcp_sk(sk)->timer_ival = 0; in mptcp_stop_timer()
399 static void mptcp_close_wake_up(struct sock *sk) in mptcp_close_wake_up() argument
401 if (sock_flag(sk, SOCK_DEAD)) in mptcp_close_wake_up()
404 sk->sk_state_change(sk); in mptcp_close_wake_up()
405 if (sk->sk_shutdown == SHUTDOWN_MASK || in mptcp_close_wake_up()
406 sk->sk_state == TCP_CLOSE) in mptcp_close_wake_up()
407 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_HUP); in mptcp_close_wake_up()
409 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); in mptcp_close_wake_up()
412 static bool mptcp_pending_data_fin_ack(struct sock *sk) in mptcp_pending_data_fin_ack() argument
414 struct mptcp_sock *msk = mptcp_sk(sk); in mptcp_pending_data_fin_ack()
417 ((1 << sk->sk_state) & in mptcp_pending_data_fin_ack()
422 static void mptcp_check_data_fin_ack(struct sock *sk) in mptcp_check_data_fin_ack() argument
424 struct mptcp_sock *msk = mptcp_sk(sk); in mptcp_check_data_fin_ack()
427 if (mptcp_pending_data_fin_ack(sk)) { in mptcp_check_data_fin_ack()
430 switch (sk->sk_state) { in mptcp_check_data_fin_ack()
432 inet_sk_state_store(sk, TCP_FIN_WAIT2); in mptcp_check_data_fin_ack()
436 inet_sk_state_store(sk, TCP_CLOSE); in mptcp_check_data_fin_ack()
440 mptcp_close_wake_up(sk); in mptcp_check_data_fin_ack()
444 static bool mptcp_pending_data_fin(struct sock *sk, u64 *seq) in mptcp_pending_data_fin() argument
446 struct mptcp_sock *msk = mptcp_sk(sk); in mptcp_pending_data_fin()
449 ((1 << sk->sk_state) & in mptcp_pending_data_fin()
464 static void mptcp_set_datafin_timeout(const struct sock *sk) in mptcp_set_datafin_timeout() argument
466 struct inet_connection_sock *icsk = inet_csk(sk); in mptcp_set_datafin_timeout()
468 mptcp_sk(sk)->timer_ival = min(TCP_RTO_MAX, in mptcp_set_datafin_timeout()
472 static void __mptcp_set_timeout(struct sock *sk, long tout) in __mptcp_set_timeout() argument
474 mptcp_sk(sk)->timer_ival = tout > 0 ? tout : TCP_RTO_MIN; in __mptcp_set_timeout()
485 static void mptcp_set_timeout(struct sock *sk) in mptcp_set_timeout() argument
490 mptcp_for_each_subflow(mptcp_sk(sk), subflow) in mptcp_set_timeout()
492 __mptcp_set_timeout(sk, tout); in mptcp_set_timeout()
546 struct sock *sk = (struct sock *)msk; in mptcp_cleanup_rbuf() local
547 int space = __mptcp_space(sk); in mptcp_cleanup_rbuf()
551 rx_empty = !__mptcp_rmem(sk); in mptcp_cleanup_rbuf()
561 static bool mptcp_check_data_fin(struct sock *sk) in mptcp_check_data_fin() argument
563 struct mptcp_sock *msk = mptcp_sk(sk); in mptcp_check_data_fin()
583 if (mptcp_pending_data_fin(sk, &rcv_data_fin_seq)) { in mptcp_check_data_fin()
587 sk->sk_shutdown |= RCV_SHUTDOWN; in mptcp_check_data_fin()
590 switch (sk->sk_state) { in mptcp_check_data_fin()
592 inet_sk_state_store(sk, TCP_CLOSE_WAIT); in mptcp_check_data_fin()
595 inet_sk_state_store(sk, TCP_CLOSING); in mptcp_check_data_fin()
598 inet_sk_state_store(sk, TCP_CLOSE); in mptcp_check_data_fin()
608 mptcp_close_wake_up(sk); in mptcp_check_data_fin()
618 struct sock *sk = (struct sock *)msk; in __mptcp_move_skbs_from_subflow() local
625 sk_rbuf = READ_ONCE(sk->sk_rcvbuf); in __mptcp_move_skbs_from_subflow()
627 if (!(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) { in __mptcp_move_skbs_from_subflow()
631 WRITE_ONCE(sk->sk_rcvbuf, ssk_rbuf); in __mptcp_move_skbs_from_subflow()
696 if (atomic_read(&sk->sk_rmem_alloc) > sk_rbuf) { in __mptcp_move_skbs_from_subflow()
708 struct sock *sk = (struct sock *)msk; in __mptcp_ofo_queue() local
726 mptcp_drop(sk, skb); in __mptcp_ofo_queue()
727 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_DUPDATA); in __mptcp_ofo_queue()
732 tail = skb_peek_tail(&sk->sk_receive_queue); in __mptcp_ofo_queue()
741 __skb_queue_tail(&sk->sk_receive_queue, skb); in __mptcp_ofo_queue()
754 struct sock *sk = (struct sock *)msk; in move_skbs_to_msk() local
760 if (!sock_owned_by_user(sk)) in move_skbs_to_msk()
761 __mptcp_error_report(sk); in move_skbs_to_msk()
771 if (mptcp_pending_data_fin(sk, NULL)) in move_skbs_to_msk()
772 mptcp_schedule_work(sk); in move_skbs_to_msk()
776 void mptcp_data_ready(struct sock *sk, struct sock *ssk) in mptcp_data_ready() argument
779 struct mptcp_sock *msk = mptcp_sk(sk); in mptcp_data_ready()
790 sk_rbuf = READ_ONCE(sk->sk_rcvbuf); in mptcp_data_ready()
795 if (__mptcp_rmem(sk) > sk_rbuf) { in mptcp_data_ready()
796 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_RCVPRUNED); in mptcp_data_ready()
801 mptcp_data_lock(sk); in mptcp_data_ready()
803 sk->sk_data_ready(sk); in mptcp_data_ready()
805 mptcp_data_unlock(sk); in mptcp_data_ready()
851 static bool mptcp_timer_pending(struct sock *sk) in mptcp_timer_pending() argument
853 return timer_pending(&inet_csk(sk)->icsk_retransmit_timer); in mptcp_timer_pending()
856 static void mptcp_reset_timer(struct sock *sk) in mptcp_reset_timer() argument
858 struct inet_connection_sock *icsk = inet_csk(sk); in mptcp_reset_timer()
862 if (unlikely(inet_sk_state_load(sk) == TCP_CLOSE)) in mptcp_reset_timer()
865 tout = mptcp_sk(sk)->timer_ival; in mptcp_reset_timer()
866 sk_reset_timer(sk, &icsk->icsk_retransmit_timer, jiffies + tout); in mptcp_reset_timer()
869 bool mptcp_schedule_work(struct sock *sk) in mptcp_schedule_work() argument
871 if (inet_sk_state_load(sk) != TCP_CLOSE && in mptcp_schedule_work()
872 schedule_work(&mptcp_sk(sk)->work)) { in mptcp_schedule_work()
876 sock_hold(sk); in mptcp_schedule_work()
882 void mptcp_subflow_eof(struct sock *sk) in mptcp_subflow_eof() argument
884 if (!test_and_set_bit(MPTCP_WORK_EOF, &mptcp_sk(sk)->flags)) in mptcp_subflow_eof()
885 mptcp_schedule_work(sk); in mptcp_subflow_eof()
891 struct sock *sk = (struct sock *)msk; in mptcp_check_for_eof() local
899 if (!(sk->sk_shutdown & RCV_SHUTDOWN)) { in mptcp_check_for_eof()
903 sk->sk_shutdown |= RCV_SHUTDOWN; in mptcp_check_for_eof()
906 sk->sk_data_ready(sk); in mptcp_check_for_eof()
909 switch (sk->sk_state) { in mptcp_check_for_eof()
911 inet_sk_state_store(sk, TCP_CLOSE_WAIT); in mptcp_check_for_eof()
914 inet_sk_state_store(sk, TCP_CLOSING); in mptcp_check_for_eof()
917 inet_sk_state_store(sk, TCP_CLOSE); in mptcp_check_for_eof()
922 mptcp_close_wake_up(sk); in mptcp_check_for_eof()
928 struct sock *sk = (struct sock *)msk; in mptcp_subflow_recv_lookup() local
930 sock_owned_by_me(sk); in mptcp_subflow_recv_lookup()
969 static void __mptcp_mem_reclaim_partial(struct sock *sk) in __mptcp_mem_reclaim_partial() argument
971 int reclaimable = mptcp_sk(sk)->rmem_fwd_alloc - sk_unused_reserved_mem(sk); in __mptcp_mem_reclaim_partial()
973 lockdep_assert_held_once(&sk->sk_lock.slock); in __mptcp_mem_reclaim_partial()
975 __mptcp_rmem_reclaim(sk, reclaimable - 1); in __mptcp_mem_reclaim_partial()
976 sk_mem_reclaim_partial(sk); in __mptcp_mem_reclaim_partial()
979 static void mptcp_mem_reclaim_partial(struct sock *sk) in mptcp_mem_reclaim_partial() argument
981 mptcp_data_lock(sk); in mptcp_mem_reclaim_partial()
982 __mptcp_mem_reclaim_partial(sk); in mptcp_mem_reclaim_partial()
983 mptcp_data_unlock(sk); in mptcp_mem_reclaim_partial()
986 static void dfrag_uncharge(struct sock *sk, int len) in dfrag_uncharge() argument
988 sk_mem_uncharge(sk, len); in dfrag_uncharge()
989 sk_wmem_queued_add(sk, -len); in dfrag_uncharge()
992 static void dfrag_clear(struct sock *sk, struct mptcp_data_frag *dfrag) in dfrag_clear() argument
997 dfrag_uncharge(sk, len); in dfrag_clear()
1001 static void __mptcp_clean_una(struct sock *sk) in __mptcp_clean_una() argument
1003 struct mptcp_sock *msk = mptcp_sk(sk); in __mptcp_clean_una()
1024 WRITE_ONCE(msk->first_pending, mptcp_send_next(sk)); in __mptcp_clean_una()
1027 dfrag_clear(sk, dfrag); in __mptcp_clean_una()
1031 dfrag = mptcp_rtx_head(sk); in __mptcp_clean_una()
1049 dfrag_uncharge(sk, delta); in __mptcp_clean_una()
1058 if (cleaned && tcp_under_memory_pressure(sk)) in __mptcp_clean_una()
1059 __mptcp_mem_reclaim_partial(sk); in __mptcp_clean_una()
1063 if (mptcp_timer_pending(sk) && !mptcp_data_fin_enabled(msk)) in __mptcp_clean_una()
1064 mptcp_stop_timer(sk); in __mptcp_clean_una()
1066 mptcp_reset_timer(sk); in __mptcp_clean_una()
1070 static void __mptcp_clean_una_wakeup(struct sock *sk) in __mptcp_clean_una_wakeup() argument
1072 lockdep_assert_held_once(&sk->sk_lock.slock); in __mptcp_clean_una_wakeup()
1074 __mptcp_clean_una(sk); in __mptcp_clean_una_wakeup()
1075 mptcp_write_space(sk); in __mptcp_clean_una_wakeup()
1078 static void mptcp_clean_una_wakeup(struct sock *sk) in mptcp_clean_una_wakeup() argument
1080 mptcp_data_lock(sk); in mptcp_clean_una_wakeup()
1081 __mptcp_clean_una_wakeup(sk); in mptcp_clean_una_wakeup()
1082 mptcp_data_unlock(sk); in mptcp_clean_una_wakeup()
1085 static void mptcp_enter_memory_pressure(struct sock *sk) in mptcp_enter_memory_pressure() argument
1088 struct mptcp_sock *msk = mptcp_sk(sk); in mptcp_enter_memory_pressure()
1091 sk_stream_moderate_sndbuf(sk); in mptcp_enter_memory_pressure()
1105 static bool mptcp_page_frag_refill(struct sock *sk, struct page_frag *pfrag) in mptcp_page_frag_refill() argument
1108 pfrag, sk->sk_allocation))) in mptcp_page_frag_refill()
1111 mptcp_enter_memory_pressure(sk); in mptcp_page_frag_refill()
1169 static struct sk_buff *__mptcp_do_alloc_tx_skb(struct sock *sk, gfp_t gfp) in __mptcp_do_alloc_tx_skb() argument
1183 mptcp_enter_memory_pressure(sk); in __mptcp_do_alloc_tx_skb()
1188 static struct sk_buff *__mptcp_alloc_tx_skb(struct sock *sk, struct sock *ssk, gfp_t gfp) in __mptcp_alloc_tx_skb() argument
1192 skb = __mptcp_do_alloc_tx_skb(sk, gfp); in __mptcp_alloc_tx_skb()
1204 static struct sk_buff *mptcp_alloc_tx_skb(struct sock *sk, struct sock *ssk, bool data_lock_held) in mptcp_alloc_tx_skb() argument
1206 gfp_t gfp = data_lock_held ? GFP_ATOMIC : sk->sk_allocation; in mptcp_alloc_tx_skb()
1208 if (unlikely(tcp_under_memory_pressure(sk))) { in mptcp_alloc_tx_skb()
1210 __mptcp_mem_reclaim_partial(sk); in mptcp_alloc_tx_skb()
1212 mptcp_mem_reclaim_partial(sk); in mptcp_alloc_tx_skb()
1214 return __mptcp_alloc_tx_skb(sk, ssk, gfp); in mptcp_alloc_tx_skb()
1229 static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk, in mptcp_sendmsg_frag() argument
1235 struct mptcp_sock *msk = mptcp_sk(sk); in mptcp_sendmsg_frag()
1279 skb = mptcp_alloc_tx_skb(sk, ssk, info->data_lock_held); in mptcp_sendmsg_frag()
1405 struct sock *sk = (struct sock *)msk; in mptcp_subflow_get_send() local
1412 sock_owned_by_me(sk); in mptcp_subflow_get_send()
1424 mptcp_set_timeout(sk); in mptcp_subflow_get_send()
1455 __mptcp_set_timeout(sk, tout); in mptcp_subflow_get_send()
1502 static void mptcp_check_and_set_pending(struct sock *sk) in mptcp_check_and_set_pending() argument
1504 if (mptcp_send_head(sk) && in mptcp_check_and_set_pending()
1505 !test_bit(MPTCP_PUSH_PENDING, &mptcp_sk(sk)->flags)) in mptcp_check_and_set_pending()
1506 set_bit(MPTCP_PUSH_PENDING, &mptcp_sk(sk)->flags); in mptcp_check_and_set_pending()
1509 void __mptcp_push_pending(struct sock *sk, unsigned int flags) in __mptcp_push_pending() argument
1512 struct mptcp_sock *msk = mptcp_sk(sk); in __mptcp_push_pending()
1519 while ((dfrag = mptcp_send_head(sk))) { in __mptcp_push_pending()
1545 ret = mptcp_sendmsg_frag(sk, ssk, dfrag, &info); in __mptcp_push_pending()
1557 WRITE_ONCE(msk->first_pending, mptcp_send_next(sk)); in __mptcp_push_pending()
1566 if (!mptcp_timer_pending(sk)) in __mptcp_push_pending()
1567 mptcp_reset_timer(sk); in __mptcp_push_pending()
1569 __mptcp_check_send_data_fin(sk); in __mptcp_push_pending()
1572 static void __mptcp_subflow_push_pending(struct sock *sk, struct sock *ssk) in __mptcp_subflow_push_pending() argument
1574 struct mptcp_sock *msk = mptcp_sk(sk); in __mptcp_subflow_push_pending()
1584 while ((dfrag = mptcp_send_head(sk))) { in __mptcp_subflow_push_pending()
1595 xmit_ssk = first ? ssk : mptcp_subflow_get_send(mptcp_sk(sk)); in __mptcp_subflow_push_pending()
1604 ret = mptcp_sendmsg_frag(sk, ssk, dfrag, &info); in __mptcp_subflow_push_pending()
1615 WRITE_ONCE(msk->first_pending, mptcp_send_next(sk)); in __mptcp_subflow_push_pending()
1625 if (!mptcp_timer_pending(sk)) in __mptcp_subflow_push_pending()
1626 mptcp_reset_timer(sk); in __mptcp_subflow_push_pending()
1630 mptcp_schedule_work(sk); in __mptcp_subflow_push_pending()
1634 static void mptcp_set_nospace(struct sock *sk) in mptcp_set_nospace() argument
1637 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); in mptcp_set_nospace()
1640 set_bit(MPTCP_NOSPACE, &mptcp_sk(sk)->flags); in mptcp_set_nospace()
1643 static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) in mptcp_sendmsg() argument
1645 struct mptcp_sock *msk = mptcp_sk(sk); in mptcp_sendmsg()
1658 lock_sock(sk); in mptcp_sendmsg()
1660 timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); in mptcp_sendmsg()
1662 if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) { in mptcp_sendmsg()
1663 ret = sk_stream_wait_connect(sk, &timeo); in mptcp_sendmsg()
1668 pfrag = sk_page_frag(sk); in mptcp_sendmsg()
1676 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) { in mptcp_sendmsg()
1684 dfrag = mptcp_pending_tail(sk); in mptcp_sendmsg()
1687 if (!sk_stream_memory_free(sk)) in mptcp_sendmsg()
1690 if (!mptcp_page_frag_refill(sk, pfrag)) in mptcp_sendmsg()
1706 if (!sk_wmem_schedule(sk, total_ts)) in mptcp_sendmsg()
1716 sk->sk_forward_alloc -= total_ts; in mptcp_sendmsg()
1726 sk_wmem_queued_add(sk, frag_truesize); in mptcp_sendmsg()
1740 mptcp_set_nospace(sk); in mptcp_sendmsg()
1741 __mptcp_push_pending(sk, msg->msg_flags); in mptcp_sendmsg()
1742 ret = sk_stream_wait_memory(sk, &timeo); in mptcp_sendmsg()
1748 __mptcp_push_pending(sk, msg->msg_flags); in mptcp_sendmsg()
1751 release_sock(sk); in mptcp_sendmsg()
1814 struct sock *sk = (struct sock *)msk; in mptcp_rcv_space_adjust() local
1818 sock_owned_by_me(sk); in mptcp_rcv_space_adjust()
1854 if (sock_net(sk)->ipv4.sysctl_tcp_moderate_rcvbuf && in mptcp_rcv_space_adjust()
1855 !(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) { in mptcp_rcv_space_adjust()
1867 while (tcp_win_from_space(sk, rcvmem) < advmss) in mptcp_rcv_space_adjust()
1872 sock_net(sk)->ipv4.sysctl_tcp_rmem[2]); in mptcp_rcv_space_adjust()
1874 if (rcvbuf > sk->sk_rcvbuf) { in mptcp_rcv_space_adjust()
1877 window_clamp = tcp_win_from_space(sk, rcvbuf); in mptcp_rcv_space_adjust()
1878 WRITE_ONCE(sk->sk_rcvbuf, rcvbuf); in mptcp_rcv_space_adjust()
1905 static void __mptcp_update_rmem(struct sock *sk) in __mptcp_update_rmem() argument
1907 struct mptcp_sock *msk = mptcp_sk(sk); in __mptcp_update_rmem()
1912 atomic_sub(msk->rmem_released, &sk->sk_rmem_alloc); in __mptcp_update_rmem()
1913 mptcp_rmem_uncharge(sk, msk->rmem_released); in __mptcp_update_rmem()
1917 static void __mptcp_splice_receive_queue(struct sock *sk) in __mptcp_splice_receive_queue() argument
1919 struct mptcp_sock *msk = mptcp_sk(sk); in __mptcp_splice_receive_queue()
1921 skb_queue_splice_tail_init(&sk->sk_receive_queue, &msk->receive_queue); in __mptcp_splice_receive_queue()
1926 struct sock *sk = (struct sock *)msk; in __mptcp_move_skbs() local
1943 mptcp_data_lock(sk); in __mptcp_move_skbs()
1944 __mptcp_update_rmem(sk); in __mptcp_move_skbs()
1946 mptcp_data_unlock(sk); in __mptcp_move_skbs()
1949 __mptcp_error_report(sk); in __mptcp_move_skbs()
1956 !skb_queue_empty_lockless(&sk->sk_receive_queue)) { in __mptcp_move_skbs()
1957 mptcp_data_lock(sk); in __mptcp_move_skbs()
1958 __mptcp_update_rmem(sk); in __mptcp_move_skbs()
1960 __mptcp_splice_receive_queue(sk); in __mptcp_move_skbs()
1961 mptcp_data_unlock(sk); in __mptcp_move_skbs()
1968 static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, in mptcp_recvmsg() argument
1971 struct mptcp_sock *msk = mptcp_sk(sk); in mptcp_recvmsg()
1979 return inet_recv_error(sk, msg, len, addr_len); in mptcp_recvmsg()
1981 lock_sock(sk); in mptcp_recvmsg()
1982 if (unlikely(sk->sk_state == TCP_LISTEN)) { in mptcp_recvmsg()
1987 timeo = sock_rcvtimeo(sk, nonblock); in mptcp_recvmsg()
1990 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len); in mptcp_recvmsg()
2017 if (sk->sk_err || in mptcp_recvmsg()
2018 sk->sk_state == TCP_CLOSE || in mptcp_recvmsg()
2019 (sk->sk_shutdown & RCV_SHUTDOWN) || in mptcp_recvmsg()
2024 if (sk->sk_err) { in mptcp_recvmsg()
2025 copied = sock_error(sk); in mptcp_recvmsg()
2032 if (sk->sk_shutdown & RCV_SHUTDOWN) { in mptcp_recvmsg()
2041 if (sk->sk_state == TCP_CLOSE) { in mptcp_recvmsg()
2058 sk_wait_data(sk, &timeo, NULL); in mptcp_recvmsg()
2064 tcp_recv_timestamp(msg, sk, &tss); in mptcp_recvmsg()
2068 msk, skb_queue_empty_lockless(&sk->sk_receive_queue), in mptcp_recvmsg()
2073 release_sock(sk); in mptcp_recvmsg()
2081 struct sock *sk = &icsk->icsk_inet.sk; in mptcp_retransmit_timer() local
2082 struct mptcp_sock *msk = mptcp_sk(sk); in mptcp_retransmit_timer()
2084 bh_lock_sock(sk); in mptcp_retransmit_timer()
2085 if (!sock_owned_by_user(sk)) { in mptcp_retransmit_timer()
2088 mptcp_schedule_work(sk); in mptcp_retransmit_timer()
2093 bh_unlock_sock(sk); in mptcp_retransmit_timer()
2094 sock_put(sk); in mptcp_retransmit_timer()
2099 struct sock *sk = from_timer(sk, t, sk_timer); in mptcp_timeout_timer() local
2101 mptcp_schedule_work(sk); in mptcp_timeout_timer()
2102 sock_put(sk); in mptcp_timeout_timer()
2159 bool __mptcp_retransmit_pending_data(struct sock *sk) in __mptcp_retransmit_pending_data() argument
2162 struct mptcp_sock *msk = mptcp_sk(sk); in __mptcp_retransmit_pending_data()
2164 if (__mptcp_check_fallback(mptcp_sk(sk))) in __mptcp_retransmit_pending_data()
2167 if (tcp_rtx_and_write_queues_empty(sk)) in __mptcp_retransmit_pending_data()
2174 mptcp_data_lock(sk); in __mptcp_retransmit_pending_data()
2175 __mptcp_clean_una_wakeup(sk); in __mptcp_retransmit_pending_data()
2176 rtx_head = mptcp_rtx_head(sk); in __mptcp_retransmit_pending_data()
2178 mptcp_data_unlock(sk); in __mptcp_retransmit_pending_data()
2184 mptcp_data_unlock(sk); in __mptcp_retransmit_pending_data()
2207 static void __mptcp_close_ssk(struct sock *sk, struct sock *ssk, in __mptcp_close_ssk() argument
2210 struct mptcp_sock *msk = mptcp_sk(sk); in __mptcp_close_ssk()
2223 need_push = __mptcp_retransmit_pending_data(sk); in __mptcp_close_ssk()
2249 if (msk->subflow && ssk == msk->subflow->sk) in __mptcp_close_ssk()
2253 __mptcp_push_pending(sk, 0); in __mptcp_close_ssk()
2256 void mptcp_close_ssk(struct sock *sk, struct sock *ssk, in mptcp_close_ssk() argument
2259 if (sk->sk_state == TCP_ESTABLISHED) in mptcp_close_ssk()
2260 mptcp_event(MPTCP_EVENT_SUB_CLOSED, mptcp_sk(sk), ssk, GFP_KERNEL); in mptcp_close_ssk()
2261 __mptcp_close_ssk(sk, ssk, subflow); in mptcp_close_ssk()
2264 static unsigned int mptcp_sync_mss(struct sock *sk, u32 pmtu) in mptcp_sync_mss() argument
2289 static bool mptcp_check_close_timeout(const struct sock *sk) in mptcp_check_close_timeout() argument
2291 s32 delta = tcp_jiffies32 - inet_csk(sk)->icsk_mtup.probe_timestamp; in mptcp_check_close_timeout()
2300 mptcp_for_each_subflow(mptcp_sk(sk), subflow) { in mptcp_check_close_timeout()
2311 struct sock *sk = &msk->sk.icsk_inet.sk; in mptcp_check_fastclose() local
2330 inet_sk_state_store(sk, TCP_CLOSE); in mptcp_check_fastclose()
2331 sk->sk_shutdown = SHUTDOWN_MASK; in mptcp_check_fastclose()
2335 mptcp_close_wake_up(sk); in mptcp_check_fastclose()
2338 static void __mptcp_retrans(struct sock *sk) in __mptcp_retrans() argument
2340 struct mptcp_sock *msk = mptcp_sk(sk); in __mptcp_retrans()
2347 mptcp_clean_una_wakeup(sk); in __mptcp_retrans()
2351 dfrag = mptcp_rtx_head(sk); in __mptcp_retrans()
2354 struct inet_connection_sock *icsk = inet_csk(sk); in __mptcp_retrans()
2357 mptcp_set_datafin_timeout(sk); in __mptcp_retrans()
2363 if (!mptcp_send_head(sk)) in __mptcp_retrans()
2378 ret = mptcp_sendmsg_frag(sk, ssk, dfrag, &info); in __mptcp_retrans()
2382 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_RETRANSSEGS); in __mptcp_retrans()
2395 mptcp_check_and_set_pending(sk); in __mptcp_retrans()
2397 if (!mptcp_timer_pending(sk)) in __mptcp_retrans()
2398 mptcp_reset_timer(sk); in __mptcp_retrans()
2404 struct sock *sk = &msk->sk.icsk_inet.sk; in mptcp_worker() local
2407 lock_sock(sk); in mptcp_worker()
2408 state = sk->sk_state; in mptcp_worker()
2412 mptcp_check_data_fin_ack(sk); in mptcp_worker()
2423 __mptcp_check_send_data_fin(sk); in mptcp_worker()
2424 mptcp_check_data_fin(sk); in mptcp_worker()
2430 if (sock_flag(sk, SOCK_DEAD) && in mptcp_worker()
2431 (mptcp_check_close_timeout(sk) || sk->sk_state == TCP_CLOSE)) { in mptcp_worker()
2432 inet_sk_state_store(sk, TCP_CLOSE); in mptcp_worker()
2433 __mptcp_destroy_sock(sk); in mptcp_worker()
2441 __mptcp_retrans(sk); in mptcp_worker()
2444 release_sock(sk); in mptcp_worker()
2445 sock_put(sk); in mptcp_worker()
2448 static int __mptcp_init_sock(struct sock *sk) in __mptcp_init_sock() argument
2450 struct mptcp_sock *msk = mptcp_sk(sk); in __mptcp_init_sock()
2466 inet_csk(sk)->icsk_sync_mss = mptcp_sync_mss; in __mptcp_init_sock()
2467 WRITE_ONCE(msk->csum_enabled, mptcp_is_checksum_enabled(sock_net(sk))); in __mptcp_init_sock()
2473 timer_setup(&msk->sk.icsk_retransmit_timer, mptcp_retransmit_timer, 0); in __mptcp_init_sock()
2474 timer_setup(&sk->sk_timer, mptcp_timeout_timer, 0); in __mptcp_init_sock()
2479 static int mptcp_init_sock(struct sock *sk) in mptcp_init_sock() argument
2481 struct inet_connection_sock *icsk = inet_csk(sk); in mptcp_init_sock()
2482 struct net *net = sock_net(sk); in mptcp_init_sock()
2485 ret = __mptcp_init_sock(sk); in mptcp_init_sock()
2495 ret = __mptcp_socket_create(mptcp_sk(sk)); in mptcp_init_sock()
2502 tcp_assign_congestion_control(sk); in mptcp_init_sock()
2503 strcpy(mptcp_sk(sk)->ca_name, icsk->icsk_ca_ops->name); in mptcp_init_sock()
2506 tcp_cleanup_congestion_control(sk); in mptcp_init_sock()
2509 sk_sockets_allocated_inc(sk); in mptcp_init_sock()
2510 sk->sk_rcvbuf = sock_net(sk)->ipv4.sysctl_tcp_rmem[1]; in mptcp_init_sock()
2511 sk->sk_sndbuf = sock_net(sk)->ipv4.sysctl_tcp_wmem[1]; in mptcp_init_sock()
2516 static void __mptcp_clear_xmit(struct sock *sk) in __mptcp_clear_xmit() argument
2518 struct mptcp_sock *msk = mptcp_sk(sk); in __mptcp_clear_xmit()
2523 dfrag_clear(sk, dfrag); in __mptcp_clear_xmit()
2526 static void mptcp_cancel_work(struct sock *sk) in mptcp_cancel_work() argument
2528 struct mptcp_sock *msk = mptcp_sk(sk); in mptcp_cancel_work()
2531 __sock_put(sk); in mptcp_cancel_work()
2534 void mptcp_subflow_shutdown(struct sock *sk, struct sock *ssk, int how) in mptcp_subflow_shutdown() argument
2547 if (__mptcp_check_fallback(mptcp_sk(sk))) { in mptcp_subflow_shutdown()
2554 if (!mptcp_timer_pending(sk)) in mptcp_subflow_shutdown()
2555 mptcp_reset_timer(sk); in mptcp_subflow_shutdown()
2580 static int mptcp_close_state(struct sock *sk) in mptcp_close_state() argument
2582 int next = (int)new_state[sk->sk_state]; in mptcp_close_state()
2585 inet_sk_state_store(sk, ns); in mptcp_close_state()
2590 static void __mptcp_check_send_data_fin(struct sock *sk) in __mptcp_check_send_data_fin() argument
2593 struct mptcp_sock *msk = mptcp_sk(sk); in __mptcp_check_send_data_fin()
2596 msk, msk->snd_data_fin_enable, !!mptcp_send_head(sk), in __mptcp_check_send_data_fin()
2603 mptcp_send_head(sk)) in __mptcp_check_send_data_fin()
2612 if ((1 << sk->sk_state) & (TCPF_CLOSING | TCPF_LAST_ACK)) { in __mptcp_check_send_data_fin()
2613 inet_sk_state_store(sk, TCP_CLOSE); in __mptcp_check_send_data_fin()
2614 mptcp_close_wake_up(sk); in __mptcp_check_send_data_fin()
2615 } else if (sk->sk_state == TCP_FIN_WAIT1) { in __mptcp_check_send_data_fin()
2616 inet_sk_state_store(sk, TCP_FIN_WAIT2); in __mptcp_check_send_data_fin()
2624 mptcp_subflow_shutdown(sk, tcp_sk, SEND_SHUTDOWN); in __mptcp_check_send_data_fin()
2628 static void __mptcp_wr_shutdown(struct sock *sk) in __mptcp_wr_shutdown() argument
2630 struct mptcp_sock *msk = mptcp_sk(sk); in __mptcp_wr_shutdown()
2633 msk, msk->snd_data_fin_enable, sk->sk_shutdown, sk->sk_state, in __mptcp_wr_shutdown()
2634 !!mptcp_send_head(sk)); in __mptcp_wr_shutdown()
2640 __mptcp_check_send_data_fin(sk); in __mptcp_wr_shutdown()
2643 static void __mptcp_destroy_sock(struct sock *sk) in __mptcp_destroy_sock() argument
2646 struct mptcp_sock *msk = mptcp_sk(sk); in __mptcp_destroy_sock()
2661 sk_stop_timer(sk, &msk->sk.icsk_retransmit_timer); in __mptcp_destroy_sock()
2662 sk_stop_timer(sk, &sk->sk_timer); in __mptcp_destroy_sock()
2667 __mptcp_close_ssk(sk, ssk, subflow); in __mptcp_destroy_sock()
2670 sk->sk_prot->destroy(sk); in __mptcp_destroy_sock()
2674 sk_stream_kill_queues(sk); in __mptcp_destroy_sock()
2675 xfrm_sk_free_policy(sk); in __mptcp_destroy_sock()
2677 sk_refcnt_debug_release(sk); in __mptcp_destroy_sock()
2679 sock_put(sk); in __mptcp_destroy_sock()
2682 static void mptcp_close(struct sock *sk, long timeout) in mptcp_close() argument
2687 lock_sock(sk); in mptcp_close()
2688 sk->sk_shutdown = SHUTDOWN_MASK; in mptcp_close()
2690 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE)) { in mptcp_close()
2691 inet_sk_state_store(sk, TCP_CLOSE); in mptcp_close()
2695 if (mptcp_close_state(sk)) in mptcp_close()
2696 __mptcp_wr_shutdown(sk); in mptcp_close()
2698 sk_stream_wait_close(sk, timeout); in mptcp_close()
2702 inet_csk(sk)->icsk_mtup.probe_timestamp = tcp_jiffies32; in mptcp_close()
2703 mptcp_for_each_subflow(mptcp_sk(sk), subflow) { in mptcp_close()
2710 sock_orphan(sk); in mptcp_close()
2712 sock_hold(sk); in mptcp_close()
2713 pr_debug("msk=%p state=%d", sk, sk->sk_state); in mptcp_close()
2714 if (sk->sk_state == TCP_CLOSE) { in mptcp_close()
2715 __mptcp_destroy_sock(sk); in mptcp_close()
2718 sk_reset_timer(sk, &sk->sk_timer, jiffies + TCP_TIMEWAIT_LEN); in mptcp_close()
2720 release_sock(sk); in mptcp_close()
2722 mptcp_cancel_work(sk); in mptcp_close()
2724 if (mptcp_sk(sk)->token) in mptcp_close()
2725 mptcp_event(MPTCP_EVENT_CLOSED, mptcp_sk(sk), NULL, GFP_KERNEL); in mptcp_close()
2727 sock_put(sk); in mptcp_close()
2753 static int mptcp_disconnect(struct sock *sk, int flags) in mptcp_disconnect() argument
2756 struct mptcp_sock *msk = mptcp_sk(sk); in mptcp_disconnect()
2771 static struct ipv6_pinfo *mptcp_inet6_sk(const struct sock *sk) in mptcp_inet6_sk() argument
2775 return (struct ipv6_pinfo *)(((u8 *)sk) + offset); in mptcp_inet6_sk()
2779 struct sock *mptcp_sk_clone(const struct sock *sk, in mptcp_sk_clone() argument
2784 struct sock *nsk = sk_clone_lock(sk, GFP_ATOMIC); in mptcp_sk_clone()
2810 msk->setsockopt_seq = mptcp_sk(sk)->setsockopt_seq; in mptcp_sk_clone()
2851 static struct sock *mptcp_accept(struct sock *sk, int flags, int *err, in mptcp_accept() argument
2854 struct mptcp_sock *msk = mptcp_sk(sk); in mptcp_accept()
2864 pr_debug("msk=%p, listener=%p", msk, mptcp_subflow_ctx(listener->sk)); in mptcp_accept()
2865 newsk = inet_csk_accept(listener->sk, flags, err, kern); in mptcp_accept()
2888 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPCAPABLEPASSIVEACK); in mptcp_accept()
2890 MPTCP_INC_STATS(sock_net(sk), in mptcp_accept()
2901 struct sock *sk = (struct sock *)msk; in mptcp_destroy_common() local
2903 __mptcp_clear_xmit(sk); in mptcp_destroy_common()
2906 skb_queue_splice_tail_init(&msk->receive_queue, &sk->sk_receive_queue); in mptcp_destroy_common()
2907 __skb_queue_purge(&sk->sk_receive_queue); in mptcp_destroy_common()
2913 sk->sk_forward_alloc += msk->rmem_fwd_alloc; in mptcp_destroy_common()
2919 static void mptcp_destroy(struct sock *sk) in mptcp_destroy() argument
2921 struct mptcp_sock *msk = mptcp_sk(sk); in mptcp_destroy()
2924 sk_sockets_allocated_dec(sk); in mptcp_destroy()
2927 void __mptcp_data_acked(struct sock *sk) in __mptcp_data_acked() argument
2929 if (!sock_owned_by_user(sk)) in __mptcp_data_acked()
2930 __mptcp_clean_una(sk); in __mptcp_data_acked()
2932 set_bit(MPTCP_CLEAN_UNA, &mptcp_sk(sk)->flags); in __mptcp_data_acked()
2934 if (mptcp_pending_data_fin_ack(sk)) in __mptcp_data_acked()
2935 mptcp_schedule_work(sk); in __mptcp_data_acked()
2938 void __mptcp_check_push(struct sock *sk, struct sock *ssk) in __mptcp_check_push() argument
2940 if (!mptcp_send_head(sk)) in __mptcp_check_push()
2943 if (!sock_owned_by_user(sk)) { in __mptcp_check_push()
2944 struct sock *xmit_ssk = mptcp_subflow_get_send(mptcp_sk(sk)); in __mptcp_check_push()
2947 __mptcp_subflow_push_pending(sk, ssk); in __mptcp_check_push()
2951 set_bit(MPTCP_PUSH_PENDING, &mptcp_sk(sk)->flags); in __mptcp_check_push()
2956 static void mptcp_release_cb(struct sock *sk) in mptcp_release_cb() argument
2961 if (test_and_clear_bit(MPTCP_PUSH_PENDING, &mptcp_sk(sk)->flags)) in mptcp_release_cb()
2963 if (test_and_clear_bit(MPTCP_RETRANSMIT, &mptcp_sk(sk)->flags)) in mptcp_release_cb()
2976 spin_unlock_bh(&sk->sk_lock.slock); in mptcp_release_cb()
2978 __mptcp_push_pending(sk, 0); in mptcp_release_cb()
2980 __mptcp_retrans(sk); in mptcp_release_cb()
2983 spin_lock_bh(&sk->sk_lock.slock); in mptcp_release_cb()
2989 if (test_and_clear_bit(MPTCP_CONNECTED, &mptcp_sk(sk)->flags)) in mptcp_release_cb()
2990 __mptcp_set_connected(sk); in mptcp_release_cb()
2991 if (test_and_clear_bit(MPTCP_CLEAN_UNA, &mptcp_sk(sk)->flags)) in mptcp_release_cb()
2992 __mptcp_clean_una_wakeup(sk); in mptcp_release_cb()
2993 if (test_and_clear_bit(MPTCP_ERROR_REPORT, &mptcp_sk(sk)->flags)) in mptcp_release_cb()
2994 __mptcp_error_report(sk); in mptcp_release_cb()
2996 __mptcp_update_rmem(sk); in mptcp_release_cb()
3028 struct sock *sk = subflow->conn; in mptcp_subflow_process_delegated() local
3031 mptcp_data_lock(sk); in mptcp_subflow_process_delegated()
3032 if (!sock_owned_by_user(sk)) in mptcp_subflow_process_delegated()
3033 __mptcp_subflow_push_pending(sk, ssk); in mptcp_subflow_process_delegated()
3035 set_bit(MPTCP_PUSH_PENDING, &mptcp_sk(sk)->flags); in mptcp_subflow_process_delegated()
3036 mptcp_data_unlock(sk); in mptcp_subflow_process_delegated()
3045 static int mptcp_hash(struct sock *sk) in mptcp_hash() argument
3054 static void mptcp_unhash(struct sock *sk) in mptcp_unhash() argument
3059 static int mptcp_get_port(struct sock *sk, unsigned short snum) in mptcp_get_port() argument
3061 struct mptcp_sock *msk = mptcp_sk(sk); in mptcp_get_port()
3069 return inet_csk_get_port(ssock->sk, snum); in mptcp_get_port()
3076 struct sock *sk; in mptcp_finish_connect() local
3080 sk = subflow->conn; in mptcp_finish_connect()
3081 msk = mptcp_sk(sk); in mptcp_finish_connect()
3083 pr_debug("msk=%p, token=%u", sk, subflow->token); in mptcp_finish_connect()
3107 void mptcp_sock_graft(struct sock *sk, struct socket *parent) in mptcp_sock_graft() argument
3109 write_lock_bh(&sk->sk_callback_lock); in mptcp_sock_graft()
3110 rcu_assign_pointer(sk->sk_wq, &parent->wq); in mptcp_sock_graft()
3111 sk_set_socket(sk, parent); in mptcp_sock_graft()
3112 sk->sk_uid = SOCK_INODE(parent)->i_uid; in mptcp_sock_graft()
3113 write_unlock_bh(&sk->sk_callback_lock); in mptcp_sock_graft()
3169 static void mptcp_shutdown(struct sock *sk, int how) in mptcp_shutdown() argument
3171 pr_debug("sk=%p, how=%d", sk, how); in mptcp_shutdown()
3173 if ((how & SEND_SHUTDOWN) && mptcp_close_state(sk)) in mptcp_shutdown()
3174 __mptcp_wr_shutdown(sk); in mptcp_shutdown()
3177 static int mptcp_forward_alloc_get(const struct sock *sk) in mptcp_forward_alloc_get() argument
3179 return sk->sk_forward_alloc + mptcp_sk(sk)->rmem_fwd_alloc; in mptcp_forward_alloc_get()
3213 struct mptcp_sock *msk = mptcp_sk(sock->sk); in mptcp_bind()
3217 lock_sock(sock->sk); in mptcp_bind()
3226 mptcp_copy_inaddrs(sock->sk, ssock->sk); in mptcp_bind()
3229 release_sock(sock->sk); in mptcp_bind()
3243 struct mptcp_sock *msk = mptcp_sk(sock->sk); in mptcp_stream_connect()
3248 lock_sock(sock->sk); in mptcp_stream_connect()
3264 inet_sk_state_store(sock->sk, TCP_SYN_SENT); in mptcp_stream_connect()
3265 subflow = mptcp_subflow_ctx(ssock->sk); in mptcp_stream_connect()
3270 if (rcu_access_pointer(tcp_sk(ssock->sk)->md5sig_info)) in mptcp_stream_connect()
3273 if (subflow->request_mptcp && mptcp_token_new_connect(ssock->sk)) { in mptcp_stream_connect()
3274 MPTCP_INC_STATS(sock_net(ssock->sk), MPTCP_MIB_TOKENFALLBACKINIT); in mptcp_stream_connect()
3278 MPTCP_INC_STATS(sock_net(sock->sk), MPTCP_MIB_MPCAPABLEACTIVE); in mptcp_stream_connect()
3288 mptcp_copy_inaddrs(sock->sk, ssock->sk); in mptcp_stream_connect()
3290 inet_sk_state_store(sock->sk, inet_sk_state_load(ssock->sk)); in mptcp_stream_connect()
3293 release_sock(sock->sk); in mptcp_stream_connect()
3299 struct mptcp_sock *msk = mptcp_sk(sock->sk); in mptcp_listen()
3305 lock_sock(sock->sk); in mptcp_listen()
3313 inet_sk_state_store(sock->sk, TCP_LISTEN); in mptcp_listen()
3314 sock_set_flag(sock->sk, SOCK_RCU_FREE); in mptcp_listen()
3317 inet_sk_state_store(sock->sk, inet_sk_state_load(ssock->sk)); in mptcp_listen()
3319 mptcp_copy_inaddrs(sock->sk, ssock->sk); in mptcp_listen()
3322 release_sock(sock->sk); in mptcp_listen()
3329 struct mptcp_sock *msk = mptcp_sk(sock->sk); in mptcp_stream_accept()
3335 lock_sock(sock->sk); in mptcp_stream_accept()
3336 if (sock->sk->sk_state != TCP_LISTEN) in mptcp_stream_accept()
3344 sock_hold(ssock->sk); in mptcp_stream_accept()
3345 release_sock(sock->sk); in mptcp_stream_accept()
3348 if (err == 0 && !mptcp_is_tcpsk(newsock->sk)) { in mptcp_stream_accept()
3349 struct mptcp_sock *msk = mptcp_sk(newsock->sk); in mptcp_stream_accept()
3351 struct sock *newsk = newsock->sk; in mptcp_stream_accept()
3385 if (inet_csk_listen_poll(ssock->sk)) in mptcp_stream_accept()
3387 sock_put(ssock->sk); in mptcp_stream_accept()
3391 release_sock(sock->sk); in mptcp_stream_accept()
3409 struct sock *sk = (struct sock *)msk; in mptcp_check_writeable() local
3411 if (unlikely(sk->sk_shutdown & SEND_SHUTDOWN)) in mptcp_check_writeable()
3414 if (sk_stream_is_writeable(sk)) in mptcp_check_writeable()
3417 mptcp_set_nospace(sk); in mptcp_check_writeable()
3419 if (sk_stream_is_writeable(sk)) in mptcp_check_writeable()
3428 struct sock *sk = sock->sk; in mptcp_poll() local
3433 msk = mptcp_sk(sk); in mptcp_poll()
3436 state = inet_sk_state_load(sk); in mptcp_poll()
3445 if (sk->sk_shutdown == SHUTDOWN_MASK || state == TCP_CLOSE) in mptcp_poll()
3447 if (sk->sk_shutdown & RCV_SHUTDOWN) in mptcp_poll()
3452 if (sk->sk_err) in mptcp_poll()
3580 static void mptcp_v6_destroy(struct sock *sk) in mptcp_v6_destroy() argument
3582 mptcp_destroy(sk); in mptcp_v6_destroy()
3583 inet6_destroy_sock(sk); in mptcp_v6_destroy()