Lines Matching refs:msk
35 struct mptcp_sock msk; member
56 static u64 mptcp_wnd_end(const struct mptcp_sock *msk) in mptcp_wnd_end() argument
58 return READ_ONCE(msk->wnd_end); in mptcp_wnd_end()
71 bool __mptcp_try_fallback(struct mptcp_sock *msk, int fb_mib) in __mptcp_try_fallback() argument
73 struct net *net = sock_net((struct sock *)msk); in __mptcp_try_fallback()
75 if (__mptcp_check_fallback(msk)) in __mptcp_try_fallback()
78 spin_lock_bh(&msk->fallback_lock); in __mptcp_try_fallback()
79 if (!msk->allow_infinite_fallback) { in __mptcp_try_fallback()
80 spin_unlock_bh(&msk->fallback_lock); in __mptcp_try_fallback()
84 msk->allow_subflows = false; in __mptcp_try_fallback()
85 set_bit(MPTCP_FALLBACK_DONE, &msk->flags); in __mptcp_try_fallback()
87 spin_unlock_bh(&msk->fallback_lock); in __mptcp_try_fallback()
91 static int __mptcp_socket_create(struct mptcp_sock *msk) in __mptcp_socket_create() argument
94 struct sock *sk = (struct sock *)msk; in __mptcp_socket_create()
102 msk->scaling_ratio = tcp_sk(ssock->sk)->scaling_ratio; in __mptcp_socket_create()
103 WRITE_ONCE(msk->first, ssock->sk); in __mptcp_socket_create()
105 list_add(&subflow->node, &msk->conn_list); in __mptcp_socket_create()
108 subflow->subflow_id = msk->subflow_id++; in __mptcp_socket_create()
112 mptcp_sock_graft(msk->first, sk->sk_socket); in __mptcp_socket_create()
121 struct sock *__mptcp_nmpc_sk(struct mptcp_sock *msk) in __mptcp_nmpc_sk() argument
123 struct sock *sk = (struct sock *)msk; in __mptcp_nmpc_sk()
129 if (!msk->first) { in __mptcp_nmpc_sk()
130 ret = __mptcp_socket_create(msk); in __mptcp_nmpc_sk()
135 return msk->first; in __mptcp_nmpc_sk()
172 static bool mptcp_ooo_try_coalesce(struct mptcp_sock *msk, struct sk_buff *to, in mptcp_ooo_try_coalesce() argument
178 return mptcp_try_coalesce((struct sock *)msk, to, from); in mptcp_ooo_try_coalesce()
185 static void mptcp_data_queue_ofo(struct mptcp_sock *msk, struct sk_buff *skb) in mptcp_data_queue_ofo() argument
187 struct sock *sk = (struct sock *)msk; in mptcp_data_queue_ofo()
194 max_seq = atomic64_read(&msk->rcv_wnd_sent); in mptcp_data_queue_ofo()
196 pr_debug("msk=%p seq=%llx limit=%llx empty=%d\n", msk, seq, max_seq, in mptcp_data_queue_ofo()
197 RB_EMPTY_ROOT(&msk->out_of_order_queue)); in mptcp_data_queue_ofo()
203 (unsigned long long)atomic64_read(&msk->rcv_wnd_sent)); in mptcp_data_queue_ofo()
208 p = &msk->out_of_order_queue.rb_node; in mptcp_data_queue_ofo()
210 if (RB_EMPTY_ROOT(&msk->out_of_order_queue)) { in mptcp_data_queue_ofo()
212 rb_insert_color(&skb->rbnode, &msk->out_of_order_queue); in mptcp_data_queue_ofo()
213 msk->ooo_last_skb = skb; in mptcp_data_queue_ofo()
220 if (mptcp_ooo_try_coalesce(msk, msk->ooo_last_skb, skb)) { in mptcp_data_queue_ofo()
227 if (!before64(seq, MPTCP_SKB_CB(msk->ooo_last_skb)->end_seq)) { in mptcp_data_queue_ofo()
229 parent = &msk->ooo_last_skb->rbnode; in mptcp_data_queue_ofo()
261 &msk->out_of_order_queue); in mptcp_data_queue_ofo()
266 } else if (mptcp_ooo_try_coalesce(msk, skb1, skb)) { in mptcp_data_queue_ofo()
276 rb_insert_color(&skb->rbnode, &msk->out_of_order_queue); in mptcp_data_queue_ofo()
283 rb_erase(&skb1->rbnode, &msk->out_of_order_queue); in mptcp_data_queue_ofo()
289 msk->ooo_last_skb = skb; in mptcp_data_queue_ofo()
296 static bool __mptcp_move_skb(struct mptcp_sock *msk, struct sock *ssk, in __mptcp_move_skb() argument
301 struct sock *sk = (struct sock *)msk; in __mptcp_move_skb()
328 if (MPTCP_SKB_CB(skb)->map_seq == msk->ack_seq) { in __mptcp_move_skb()
330 msk->bytes_received += copy_len; in __mptcp_move_skb()
331 WRITE_ONCE(msk->ack_seq, msk->ack_seq + copy_len); in __mptcp_move_skb()
339 } else if (after64(MPTCP_SKB_CB(skb)->map_seq, msk->ack_seq)) { in __mptcp_move_skb()
340 mptcp_data_queue_ofo(msk, skb); in __mptcp_move_skb()
377 struct mptcp_sock *msk = mptcp_sk(sk); in mptcp_pending_data_fin_ack() local
381 msk->write_seq == READ_ONCE(msk->snd_una); in mptcp_pending_data_fin_ack()
386 struct mptcp_sock *msk = mptcp_sk(sk); in mptcp_check_data_fin_ack() local
390 WRITE_ONCE(msk->snd_data_fin_enable, 0); in mptcp_check_data_fin_ack()
409 struct mptcp_sock *msk = mptcp_sk(sk); in mptcp_pending_data_fin() local
411 if (READ_ONCE(msk->rcv_data_fin) && in mptcp_pending_data_fin()
414 u64 rcv_data_fin_seq = READ_ONCE(msk->rcv_data_fin_seq); in mptcp_pending_data_fin()
416 if (READ_ONCE(msk->ack_seq) == rcv_data_fin_seq) { in mptcp_pending_data_fin()
482 static void mptcp_send_ack(struct mptcp_sock *msk) in mptcp_send_ack() argument
486 mptcp_for_each_subflow(msk, subflow) in mptcp_send_ack()
513 static void mptcp_cleanup_rbuf(struct mptcp_sock *msk, int copied) in mptcp_cleanup_rbuf() argument
515 int old_space = READ_ONCE(msk->old_wspace); in mptcp_cleanup_rbuf()
517 struct sock *sk = (struct sock *)msk; in mptcp_cleanup_rbuf()
524 mptcp_for_each_subflow(msk, subflow) { in mptcp_cleanup_rbuf()
534 struct mptcp_sock *msk = mptcp_sk(sk); in mptcp_check_data_fin() local
552 WRITE_ONCE(msk->ack_seq, msk->ack_seq + 1); in mptcp_check_data_fin()
553 WRITE_ONCE(msk->rcv_data_fin, 0); in mptcp_check_data_fin()
575 if (!__mptcp_check_fallback(msk)) in mptcp_check_data_fin()
576 mptcp_send_ack(msk); in mptcp_check_data_fin()
582 static void mptcp_dss_corruption(struct mptcp_sock *msk, struct sock *ssk) in mptcp_dss_corruption() argument
590 static bool __mptcp_move_skbs_from_subflow(struct mptcp_sock *msk, in __mptcp_move_skbs_from_subflow() argument
594 struct sock *sk = (struct sock *)msk; in __mptcp_move_skbs_from_subflow()
599 pr_debug("msk=%p ssk=%p\n", msk, ssk); in __mptcp_move_skbs_from_subflow()
618 if (__mptcp_check_fallback(msk)) { in __mptcp_move_skbs_from_subflow()
635 ret = __mptcp_move_skb(msk, ssk, skb, offset, len) || ret; in __mptcp_move_skbs_from_subflow()
640 mptcp_dss_corruption(msk, ssk); in __mptcp_move_skbs_from_subflow()
645 mptcp_dss_corruption(msk, ssk); in __mptcp_move_skbs_from_subflow()
657 msk->last_data_recv = tcp_jiffies32; in __mptcp_move_skbs_from_subflow()
661 static bool __mptcp_ofo_queue(struct mptcp_sock *msk) in __mptcp_ofo_queue() argument
663 struct sock *sk = (struct sock *)msk; in __mptcp_ofo_queue()
669 p = rb_first(&msk->out_of_order_queue); in __mptcp_ofo_queue()
670 pr_debug("msk=%p empty=%d\n", msk, RB_EMPTY_ROOT(&msk->out_of_order_queue)); in __mptcp_ofo_queue()
673 if (after64(MPTCP_SKB_CB(skb)->map_seq, msk->ack_seq)) in __mptcp_ofo_queue()
677 rb_erase(&skb->rbnode, &msk->out_of_order_queue); in __mptcp_ofo_queue()
680 msk->ack_seq))) { in __mptcp_ofo_queue()
688 if (!tail || !mptcp_ooo_try_coalesce(msk, tail, skb)) { in __mptcp_ofo_queue()
689 int delta = msk->ack_seq - MPTCP_SKB_CB(skb)->map_seq; in __mptcp_ofo_queue()
693 MPTCP_SKB_CB(skb)->map_seq, msk->ack_seq, in __mptcp_ofo_queue()
699 msk->bytes_received += end_seq - msk->ack_seq; in __mptcp_ofo_queue()
700 WRITE_ONCE(msk->ack_seq, end_seq); in __mptcp_ofo_queue()
739 struct mptcp_sock *msk = mptcp_sk(sk); in __mptcp_error_report() local
741 mptcp_for_each_subflow(msk, subflow) in __mptcp_error_report()
749 static bool move_skbs_to_msk(struct mptcp_sock *msk, struct sock *ssk) in move_skbs_to_msk() argument
751 struct sock *sk = (struct sock *)msk; in move_skbs_to_msk()
754 moved = __mptcp_move_skbs_from_subflow(msk, ssk); in move_skbs_to_msk()
755 __mptcp_ofo_queue(msk); in move_skbs_to_msk()
760 __set_bit(MPTCP_ERROR_REPORT, &msk->cb_flags); in move_skbs_to_msk()
781 struct mptcp_sock *msk = mptcp_sk(sk); in __mptcp_data_ready() local
786 if (move_skbs_to_msk(msk, ssk) && mptcp_epollin_ready(sk)) in __mptcp_data_ready()
809 static void mptcp_subflow_joined(struct mptcp_sock *msk, struct sock *ssk) in mptcp_subflow_joined() argument
811 mptcp_subflow_ctx(ssk)->map_seq = READ_ONCE(msk->ack_seq); in mptcp_subflow_joined()
812 msk->allow_infinite_fallback = false; in mptcp_subflow_joined()
813 mptcp_event(MPTCP_EVENT_SUB_ESTABLISHED, msk, ssk, GFP_ATOMIC); in mptcp_subflow_joined()
816 static bool __mptcp_finish_join(struct mptcp_sock *msk, struct sock *ssk) in __mptcp_finish_join() argument
818 struct sock *sk = (struct sock *)msk; in __mptcp_finish_join()
823 spin_lock_bh(&msk->fallback_lock); in __mptcp_finish_join()
824 if (!msk->allow_subflows) { in __mptcp_finish_join()
825 spin_unlock_bh(&msk->fallback_lock); in __mptcp_finish_join()
828 mptcp_subflow_joined(msk, ssk); in __mptcp_finish_join()
829 spin_unlock_bh(&msk->fallback_lock); in __mptcp_finish_join()
837 mptcp_subflow_ctx(ssk)->subflow_id = msk->subflow_id++; in __mptcp_finish_join()
838 mptcp_sockopt_sync_locked(msk, ssk); in __mptcp_finish_join()
847 struct mptcp_sock *msk = mptcp_sk(sk); in __mptcp_flush_join_list() local
853 list_move_tail(&subflow->node, &msk->conn_list); in __mptcp_flush_join_list()
854 if (!__mptcp_finish_join(msk, ssk)) in __mptcp_flush_join_list()
910 static bool mptcp_frag_can_collapse_to(const struct mptcp_sock *msk, in mptcp_frag_can_collapse_to() argument
917 df->data_seq + df->data_len == msk->write_seq; in mptcp_frag_can_collapse_to()
938 struct mptcp_sock *msk = mptcp_sk(sk); in __mptcp_clean_una() local
942 snd_una = msk->snd_una; in __mptcp_clean_una()
943 list_for_each_entry_safe(dfrag, dtmp, &msk->rtx_queue, list) { in __mptcp_clean_una()
947 if (unlikely(dfrag == msk->first_pending)) { in __mptcp_clean_una()
949 if (WARN_ON_ONCE(!msk->recovery)) in __mptcp_clean_una()
952 WRITE_ONCE(msk->first_pending, mptcp_send_next(sk)); in __mptcp_clean_una()
964 if (WARN_ON_ONCE(!msk->recovery)) in __mptcp_clean_una()
980 if (unlikely(msk->recovery) && after64(msk->snd_una, msk->recovery_snd_nxt)) in __mptcp_clean_una()
981 msk->recovery = false; in __mptcp_clean_una()
984 if (snd_una == msk->snd_nxt && snd_una == msk->write_seq) { in __mptcp_clean_una()
985 if (mptcp_rtx_timer_pending(sk) && !mptcp_data_fin_enabled(msk)) in __mptcp_clean_una()
1013 struct mptcp_sock *msk = mptcp_sk(sk); in mptcp_enter_memory_pressure() local
1016 mptcp_for_each_subflow(msk, subflow) { in mptcp_enter_memory_pressure()
1042 mptcp_carve_data_frag(const struct mptcp_sock *msk, struct page_frag *pfrag, in mptcp_carve_data_frag() argument
1050 dfrag->data_seq = msk->write_seq; in mptcp_carve_data_frag()
1068 static int mptcp_check_allowed_size(const struct mptcp_sock *msk, struct sock *ssk, in mptcp_check_allowed_size() argument
1071 u64 window_end = mptcp_wnd_end(msk); in mptcp_check_allowed_size()
1074 if (__mptcp_check_fallback(msk)) in mptcp_check_allowed_size()
1153 static void mptcp_update_infinite_map(struct mptcp_sock *msk, in mptcp_update_infinite_map() argument
1180 struct mptcp_sock *msk = mptcp_sk(sk); in mptcp_sendmsg_frag() local
1190 msk, ssk, dfrag->data_seq, dfrag->data_len, info->sent); in mptcp_sendmsg_frag()
1240 copy = mptcp_check_allowed_size(msk, ssk, data_seq, copy); in mptcp_sendmsg_frag()
1242 u64 snd_una = READ_ONCE(msk->snd_una); in mptcp_sendmsg_frag()
1244 if (snd_una != msk->snd_nxt || tcp_write_queue_tail(ssk)) { in mptcp_sendmsg_frag()
1297 if (READ_ONCE(msk->csum_enabled)) in mptcp_sendmsg_frag()
1303 if (READ_ONCE(msk->csum_enabled)) in mptcp_sendmsg_frag()
1306 mptcp_update_infinite_map(msk, ssk, mpext); in mptcp_sendmsg_frag()
1353 struct sock *mptcp_subflow_get_send(struct mptcp_sock *msk) in mptcp_subflow_get_send() argument
1357 struct sock *sk = (struct sock *)msk; in mptcp_subflow_get_send()
1370 mptcp_for_each_subflow(msk, subflow) { in mptcp_subflow_get_send()
1416 burst = min_t(int, MPTCP_SEND_BURST_SIZE, mptcp_wnd_end(msk) - msk->snd_nxt); in mptcp_subflow_get_send()
1425 msk->snd_burst = burst; in mptcp_subflow_get_send()
1435 static void mptcp_update_post_push(struct mptcp_sock *msk, in mptcp_update_post_push() argument
1443 msk->snd_burst -= sent; in mptcp_update_post_push()
1456 if (likely(after64(snd_nxt_new, msk->snd_nxt))) { in mptcp_update_post_push()
1457 msk->bytes_sent += snd_nxt_new - msk->snd_nxt; in mptcp_update_post_push()
1458 WRITE_ONCE(msk->snd_nxt, snd_nxt_new); in mptcp_update_post_push()
1474 struct mptcp_sock *msk = mptcp_sk(sk); in __subflow_push_pending() local
1495 mptcp_update_post_push(msk, dfrag, ret); in __subflow_push_pending()
1497 WRITE_ONCE(msk->first_pending, mptcp_send_next(sk)); in __subflow_push_pending()
1499 if (msk->snd_burst <= 0 || in __subflow_push_pending()
1511 msk->last_data_sent = tcp_jiffies32; in __subflow_push_pending()
1518 struct mptcp_sock *msk = mptcp_sk(sk); in __mptcp_push_pending() local
1529 if (mptcp_sched_get_send(msk)) in __mptcp_push_pending()
1534 mptcp_for_each_subflow(msk, subflow) { in __mptcp_push_pending()
1582 struct mptcp_sock *msk = mptcp_sk(sk); in __mptcp_subflow_push_pending() local
1608 if (mptcp_sched_get_send(msk)) in __mptcp_subflow_push_pending()
1619 mptcp_for_each_subflow(msk, subflow) { in __mptcp_subflow_push_pending()
1641 if (msk->snd_data_fin_enable && in __mptcp_subflow_push_pending()
1642 msk->snd_nxt + 1 == msk->write_seq) in __mptcp_subflow_push_pending()
1653 struct mptcp_sock *msk = mptcp_sk(sk); in mptcp_sendmsg_fastopen() local
1664 ssk = __mptcp_nmpc_sk(msk); in mptcp_sendmsg_fastopen()
1668 if (!msk->first) in mptcp_sendmsg_fastopen()
1671 ssk = msk->first; in mptcp_sendmsg_fastopen()
1675 msk->fastopening = 1; in mptcp_sendmsg_fastopen()
1677 msk->fastopening = 0; in mptcp_sendmsg_fastopen()
1726 const struct mptcp_sock *msk = mptcp_sk(sk); in mptcp_send_limit() local
1736 not_sent = msk->write_seq - msk->snd_nxt; in mptcp_send_limit()
1745 struct mptcp_sock *msk = mptcp_sk(sk); in mptcp_sendmsg() local
1798 dfrag_collapsed = mptcp_frag_can_collapse_to(msk, pfrag, dfrag); in mptcp_sendmsg()
1803 dfrag = mptcp_carve_data_frag(msk, pfrag, pfrag->offset); in mptcp_sendmsg()
1831 WRITE_ONCE(msk->write_seq, msk->write_seq + psize); in mptcp_sendmsg()
1839 list_add_tail(&dfrag->list, &msk->rtx_queue); in mptcp_sendmsg()
1840 if (!msk->first_pending) in mptcp_sendmsg()
1841 WRITE_ONCE(msk->first_pending, dfrag); in mptcp_sendmsg()
1843 pr_debug("msk=%p dfrag at seq=%llu len=%u sent=%u new=%d\n", msk, in mptcp_sendmsg()
1872 static void mptcp_rcv_space_adjust(struct mptcp_sock *msk, int copied);
1880 struct mptcp_sock *msk = mptcp_sk(sk); in __mptcp_recvmsg_mskq() local
1910 msk->bytes_consumed += count; in __mptcp_recvmsg_mskq()
1922 msk->bytes_consumed += count; in __mptcp_recvmsg_mskq()
1929 mptcp_rcv_space_adjust(msk, copied); in __mptcp_recvmsg_mskq()
1937 static void mptcp_rcv_space_adjust(struct mptcp_sock *msk, int copied) in mptcp_rcv_space_adjust() argument
1940 struct sock *sk = (struct sock *)msk; in mptcp_rcv_space_adjust()
1945 msk_owned_by_me(msk); in mptcp_rcv_space_adjust()
1950 if (!msk->rcvspace_init) in mptcp_rcv_space_adjust()
1951 mptcp_rcv_space_init(msk, msk->first); in mptcp_rcv_space_adjust()
1953 msk->rcvq_space.copied += copied; in mptcp_rcv_space_adjust()
1956 time = tcp_stamp_us_delta(mstamp, msk->rcvq_space.time); in mptcp_rcv_space_adjust()
1958 rtt_us = msk->rcvq_space.rtt_us; in mptcp_rcv_space_adjust()
1963 mptcp_for_each_subflow(msk, subflow) { in mptcp_rcv_space_adjust()
1978 msk->rcvq_space.rtt_us = rtt_us; in mptcp_rcv_space_adjust()
1979 msk->scaling_ratio = scaling_ratio; in mptcp_rcv_space_adjust()
1983 if (msk->rcvq_space.copied <= msk->rcvq_space.space) in mptcp_rcv_space_adjust()
1991 rcvwin = ((u64)msk->rcvq_space.copied << 1) + 16 * advmss; in mptcp_rcv_space_adjust()
1993 grow = rcvwin * (msk->rcvq_space.copied - msk->rcvq_space.space); in mptcp_rcv_space_adjust()
1995 do_div(grow, msk->rcvq_space.space); in mptcp_rcv_space_adjust()
2012 mptcp_for_each_subflow(msk, subflow) { in mptcp_rcv_space_adjust()
2027 msk->rcvq_space.space = msk->rcvq_space.copied; in mptcp_rcv_space_adjust()
2029 msk->rcvq_space.copied = 0; in mptcp_rcv_space_adjust()
2030 msk->rcvq_space.time = mstamp; in mptcp_rcv_space_adjust()
2034 __mptcp_first_ready_from(struct mptcp_sock *msk, in __mptcp_first_ready_from() argument
2040 subflow = mptcp_next_subflow(msk, subflow); in __mptcp_first_ready_from()
2050 struct mptcp_sock *msk = mptcp_sk(sk); in __mptcp_move_skbs() local
2053 if (list_empty(&msk->conn_list)) in __mptcp_move_skbs()
2058 mptcp_for_each_subflow(msk, subflow) in __mptcp_move_skbs()
2061 subflow = list_first_entry(&msk->conn_list, in __mptcp_move_skbs()
2074 subflow = __mptcp_first_ready_from(msk, subflow); in __mptcp_move_skbs()
2080 ret = __mptcp_move_skbs_from_subflow(msk, ssk) || ret; in __mptcp_move_skbs()
2085 subflow = mptcp_next_subflow(msk, subflow); in __mptcp_move_skbs()
2088 __mptcp_ofo_queue(msk); in __mptcp_move_skbs()
2090 mptcp_check_data_fin((struct sock *)msk); in __mptcp_move_skbs()
2096 const struct mptcp_sock *msk = mptcp_sk(sk); in mptcp_inq_hint() local
2101 u64 hint_val = READ_ONCE(msk->ack_seq) - MPTCP_SKB_CB(skb)->map_seq; in mptcp_inq_hint()
2118 struct mptcp_sock *msk = mptcp_sk(sk); in mptcp_recvmsg() local
2139 if (unlikely(msk->recvmsg_inq)) in mptcp_recvmsg()
2202 mptcp_cleanup_rbuf(msk, copied); in mptcp_recvmsg()
2210 mptcp_cleanup_rbuf(msk, copied); in mptcp_recvmsg()
2225 msk, skb_queue_empty(&sk->sk_receive_queue), copied); in mptcp_recvmsg()
2236 struct mptcp_sock *msk = mptcp_sk(sk); in mptcp_retransmit_timer() local
2241 if (!test_and_set_bit(MPTCP_WORK_RTX, &msk->flags)) in mptcp_retransmit_timer()
2245 __set_bit(MPTCP_RETRANSMIT, &msk->cb_flags); in mptcp_retransmit_timer()
2264 struct sock *mptcp_subflow_get_retrans(struct mptcp_sock *msk) in mptcp_subflow_get_retrans() argument
2270 mptcp_for_each_subflow(msk, subflow) { in mptcp_subflow_get_retrans()
2278 mptcp_pm_subflow_chk_stale(msk, ssk); in mptcp_subflow_get_retrans()
2303 struct mptcp_sock *msk = mptcp_sk(sk); in __mptcp_retransmit_pending_data() local
2305 if (__mptcp_check_fallback(msk)) in __mptcp_retransmit_pending_data()
2320 msk->recovery_snd_nxt = msk->snd_nxt; in __mptcp_retransmit_pending_data()
2321 msk->recovery = true; in __mptcp_retransmit_pending_data()
2324 msk->first_pending = rtx_head; in __mptcp_retransmit_pending_data()
2325 msk->snd_burst = 0; in __mptcp_retransmit_pending_data()
2328 list_for_each_entry(cur, &msk->rtx_queue, list) { in __mptcp_retransmit_pending_data()
2373 struct mptcp_sock *msk = mptcp_sk(sk); in __mptcp_close_ssk() local
2381 if (msk->in_accept_queue && msk->first == ssk && in __mptcp_close_ssk()
2391 dispose_it = msk->free_first || ssk != msk->first; in __mptcp_close_ssk()
2397 if ((flags & MPTCP_CF_FASTCLOSE) && !__mptcp_check_fallback(msk)) { in __mptcp_close_ssk()
2437 if (ssk == msk->first) in __mptcp_close_ssk()
2438 WRITE_ONCE(msk->first, NULL); in __mptcp_close_ssk()
2450 if (list_is_singular(&msk->conn_list) && msk->first && in __mptcp_close_ssk()
2451 inet_sk_state_load(msk->first) == TCP_CLOSE) { in __mptcp_close_ssk()
2453 msk->in_accept_queue || sock_flag(sk, SOCK_DEAD)) { in __mptcp_close_ssk()
2490 struct mptcp_sock *msk = mptcp_sk(sk); in __mptcp_close_subflow() local
2494 mptcp_for_each_subflow_safe(msk, subflow, tmp) { in __mptcp_close_subflow()
2522 static void mptcp_check_fastclose(struct mptcp_sock *msk) in mptcp_check_fastclose() argument
2525 struct sock *sk = (struct sock *)msk; in mptcp_check_fastclose()
2527 if (likely(!READ_ONCE(msk->rcv_fastclose))) in mptcp_check_fastclose()
2530 mptcp_token_destroy(msk); in mptcp_check_fastclose()
2532 mptcp_for_each_subflow_safe(msk, subflow, tmp) { in mptcp_check_fastclose()
2561 set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags); in mptcp_check_fastclose()
2574 struct mptcp_sock *msk = mptcp_sk(sk); in __mptcp_retrans() local
2584 err = mptcp_sched_get_retrans(msk); in __mptcp_retrans()
2587 if (mptcp_data_fin_enabled(msk)) { in __mptcp_retrans()
2592 mptcp_send_ack(msk); in __mptcp_retrans()
2606 mptcp_for_each_subflow(msk, subflow) { in __mptcp_retrans()
2618 info.limit = READ_ONCE(msk->csum_enabled) ? dfrag->data_len : in __mptcp_retrans()
2625 spin_lock_bh(&msk->fallback_lock); in __mptcp_retrans()
2626 if (__mptcp_check_fallback(msk)) { in __mptcp_retrans()
2627 spin_unlock_bh(&msk->fallback_lock); in __mptcp_retrans()
2645 msk->allow_infinite_fallback = false; in __mptcp_retrans()
2647 spin_unlock_bh(&msk->fallback_lock); in __mptcp_retrans()
2653 msk->bytes_retrans += len; in __mptcp_retrans()
2666 void mptcp_reset_tout_timer(struct mptcp_sock *msk, unsigned long fail_tout) in mptcp_reset_tout_timer() argument
2668 struct sock *sk = (struct sock *)msk; in mptcp_reset_tout_timer()
2685 static void mptcp_mp_fail_no_response(struct mptcp_sock *msk) in mptcp_mp_fail_no_response() argument
2687 struct sock *ssk = msk->first; in mptcp_mp_fail_no_response()
2704 struct mptcp_sock *msk = mptcp_sk(sk); in mptcp_do_fastclose() local
2707 mptcp_for_each_subflow_safe(msk, subflow, tmp) in mptcp_do_fastclose()
2714 struct mptcp_sock *msk = container_of(work, struct mptcp_sock, work); in mptcp_worker() local
2715 struct sock *sk = (struct sock *)msk; in mptcp_worker()
2724 mptcp_check_fastclose(msk); in mptcp_worker()
2726 mptcp_pm_worker(msk); in mptcp_worker()
2732 if (test_and_clear_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags)) in mptcp_worker()
2745 if (test_and_clear_bit(MPTCP_WORK_RTX, &msk->flags)) in mptcp_worker()
2748 fail_tout = msk->first ? READ_ONCE(mptcp_subflow_ctx(msk->first)->fail_tout) : 0; in mptcp_worker()
2750 mptcp_mp_fail_no_response(msk); in mptcp_worker()
2759 struct mptcp_sock *msk = mptcp_sk(sk); in __mptcp_init_sock() local
2761 INIT_LIST_HEAD(&msk->conn_list); in __mptcp_init_sock()
2762 INIT_LIST_HEAD(&msk->join_list); in __mptcp_init_sock()
2763 INIT_LIST_HEAD(&msk->rtx_queue); in __mptcp_init_sock()
2764 INIT_WORK(&msk->work, mptcp_worker); in __mptcp_init_sock()
2765 msk->out_of_order_queue = RB_ROOT; in __mptcp_init_sock()
2766 msk->first_pending = NULL; in __mptcp_init_sock()
2767 msk->timer_ival = TCP_RTO_MIN; in __mptcp_init_sock()
2768 msk->scaling_ratio = TCP_DEFAULT_SCALING_RATIO; in __mptcp_init_sock()
2770 WRITE_ONCE(msk->first, NULL); in __mptcp_init_sock()
2772 WRITE_ONCE(msk->csum_enabled, mptcp_is_checksum_enabled(sock_net(sk))); in __mptcp_init_sock()
2773 msk->allow_infinite_fallback = true; in __mptcp_init_sock()
2774 msk->allow_subflows = true; in __mptcp_init_sock()
2775 msk->recovery = false; in __mptcp_init_sock()
2776 msk->subflow_id = 1; in __mptcp_init_sock()
2777 msk->last_data_sent = tcp_jiffies32; in __mptcp_init_sock()
2778 msk->last_data_recv = tcp_jiffies32; in __mptcp_init_sock()
2779 msk->last_ack_recv = tcp_jiffies32; in __mptcp_init_sock()
2781 mptcp_pm_data_init(msk); in __mptcp_init_sock()
2782 spin_lock_init(&msk->fallback_lock); in __mptcp_init_sock()
2785 timer_setup(&msk->sk.icsk_retransmit_timer, mptcp_retransmit_timer, 0); in __mptcp_init_sock()
2838 struct mptcp_sock *msk = mptcp_sk(sk); in __mptcp_clear_xmit() local
2841 WRITE_ONCE(msk->first_pending, NULL); in __mptcp_clear_xmit()
2842 list_for_each_entry_safe(dfrag, dtmp, &msk->rtx_queue, list) in __mptcp_clear_xmit()
2848 struct mptcp_sock *msk = mptcp_sk(sk); in mptcp_cancel_work() local
2850 if (cancel_work_sync(&msk->work)) in mptcp_cancel_work()
2942 struct mptcp_sock *msk = mptcp_sk(sk); in mptcp_check_send_data_fin() local
2945 msk, msk->snd_data_fin_enable, !!mptcp_send_head(sk), in mptcp_check_send_data_fin()
2946 msk->snd_nxt, msk->write_seq); in mptcp_check_send_data_fin()
2951 if (!msk->snd_data_fin_enable || msk->snd_nxt + 1 != msk->write_seq || in mptcp_check_send_data_fin()
2955 WRITE_ONCE(msk->snd_nxt, msk->write_seq); in mptcp_check_send_data_fin()
2957 mptcp_for_each_subflow(msk, subflow) { in mptcp_check_send_data_fin()
2966 struct mptcp_sock *msk = mptcp_sk(sk); in __mptcp_wr_shutdown() local
2969 msk, msk->snd_data_fin_enable, sk->sk_shutdown, sk->sk_state, in __mptcp_wr_shutdown()
2973 WRITE_ONCE(msk->write_seq, msk->write_seq + 1); in __mptcp_wr_shutdown()
2974 WRITE_ONCE(msk->snd_data_fin_enable, 1); in __mptcp_wr_shutdown()
2981 struct mptcp_sock *msk = mptcp_sk(sk); in __mptcp_destroy_sock() local
2983 pr_debug("msk=%p\n", msk); in __mptcp_destroy_sock()
2989 msk->pm.status = 0; in __mptcp_destroy_sock()
2990 mptcp_release_sched(msk); in __mptcp_destroy_sock()
3035 struct mptcp_sock *msk = mptcp_sk(sk); in __mptcp_close() local
3047 if (mptcp_data_avail(msk) || timeout < 0) { in __mptcp_close()
3061 mptcp_for_each_subflow(msk, subflow) { in __mptcp_close()
3070 if (ssk == msk->first) in __mptcp_close()
3090 mptcp_pm_connection_closed(msk); in __mptcp_close()
3116 static void mptcp_copy_inaddrs(struct sock *msk, const struct sock *ssk) in mptcp_copy_inaddrs() argument
3120 struct ipv6_pinfo *msk6 = inet6_sk(msk); in mptcp_copy_inaddrs()
3122 msk->sk_v6_daddr = ssk->sk_v6_daddr; in mptcp_copy_inaddrs()
3123 msk->sk_v6_rcv_saddr = ssk->sk_v6_rcv_saddr; in mptcp_copy_inaddrs()
3131 inet_sk(msk)->inet_num = inet_sk(ssk)->inet_num; in mptcp_copy_inaddrs()
3132 inet_sk(msk)->inet_dport = inet_sk(ssk)->inet_dport; in mptcp_copy_inaddrs()
3133 inet_sk(msk)->inet_sport = inet_sk(ssk)->inet_sport; in mptcp_copy_inaddrs()
3134 inet_sk(msk)->inet_daddr = inet_sk(ssk)->inet_daddr; in mptcp_copy_inaddrs()
3135 inet_sk(msk)->inet_saddr = inet_sk(ssk)->inet_saddr; in mptcp_copy_inaddrs()
3136 inet_sk(msk)->inet_rcv_saddr = inet_sk(ssk)->inet_rcv_saddr; in mptcp_copy_inaddrs()
3141 struct mptcp_sock *msk = mptcp_sk(sk); in mptcp_disconnect() local
3147 if (msk->fastopening) in mptcp_disconnect()
3156 mptcp_pm_connection_closed(msk); in mptcp_disconnect()
3161 mptcp_destroy_common(msk, MPTCP_CF_FASTCLOSE); in mptcp_disconnect()
3166 spin_lock_bh(&msk->fallback_lock); in mptcp_disconnect()
3167 msk->allow_subflows = true; in mptcp_disconnect()
3168 msk->allow_infinite_fallback = true; in mptcp_disconnect()
3169 WRITE_ONCE(msk->flags, 0); in mptcp_disconnect()
3170 spin_unlock_bh(&msk->fallback_lock); in mptcp_disconnect()
3172 msk->cb_flags = 0; in mptcp_disconnect()
3173 msk->recovery = false; in mptcp_disconnect()
3174 WRITE_ONCE(msk->can_ack, false); in mptcp_disconnect()
3175 WRITE_ONCE(msk->fully_established, false); in mptcp_disconnect()
3176 WRITE_ONCE(msk->rcv_data_fin, false); in mptcp_disconnect()
3177 WRITE_ONCE(msk->snd_data_fin_enable, false); in mptcp_disconnect()
3178 WRITE_ONCE(msk->rcv_fastclose, false); in mptcp_disconnect()
3179 WRITE_ONCE(msk->use_64bit_ack, false); in mptcp_disconnect()
3180 WRITE_ONCE(msk->csum_enabled, mptcp_is_checksum_enabled(sock_net(sk))); in mptcp_disconnect()
3181 mptcp_pm_data_reset(msk); in mptcp_disconnect()
3183 msk->bytes_consumed = 0; in mptcp_disconnect()
3184 msk->bytes_acked = 0; in mptcp_disconnect()
3185 msk->bytes_received = 0; in mptcp_disconnect()
3186 msk->bytes_sent = 0; in mptcp_disconnect()
3187 msk->bytes_retrans = 0; in mptcp_disconnect()
3188 msk->rcvspace_init = 0; in mptcp_disconnect()
3198 struct mptcp6_sock *msk6 = container_of(mptcp_sk(sk), struct mptcp6_sock, msk); in mptcp_inet6_sk()
3251 struct mptcp_sock *msk; in mptcp_sk_clone_init() local
3270 msk = mptcp_sk(nsk); in mptcp_sk_clone_init()
3271 WRITE_ONCE(msk->local_key, subflow_req->local_key); in mptcp_sk_clone_init()
3272 WRITE_ONCE(msk->token, subflow_req->token); in mptcp_sk_clone_init()
3273 msk->in_accept_queue = 1; in mptcp_sk_clone_init()
3274 WRITE_ONCE(msk->fully_established, false); in mptcp_sk_clone_init()
3276 WRITE_ONCE(msk->csum_enabled, true); in mptcp_sk_clone_init()
3278 WRITE_ONCE(msk->write_seq, subflow_req->idsn + 1); in mptcp_sk_clone_init()
3279 WRITE_ONCE(msk->snd_nxt, msk->write_seq); in mptcp_sk_clone_init()
3280 WRITE_ONCE(msk->snd_una, msk->write_seq); in mptcp_sk_clone_init()
3281 WRITE_ONCE(msk->wnd_end, msk->snd_nxt + tcp_sk(ssk)->snd_wnd); in mptcp_sk_clone_init()
3282 msk->setsockopt_seq = mptcp_sk(sk)->setsockopt_seq; in mptcp_sk_clone_init()
3283 mptcp_init_sched(msk, mptcp_sk(sk)->sched); in mptcp_sk_clone_init()
3286 msk->subflow_id = 2; in mptcp_sk_clone_init()
3297 WRITE_ONCE(msk->first, ssk); in mptcp_sk_clone_init()
3299 list_add(&subflow->node, &msk->conn_list); in mptcp_sk_clone_init()
3305 mptcp_token_accept(subflow_req, msk); in mptcp_sk_clone_init()
3313 mptcp_rcv_space_init(msk, ssk); in mptcp_sk_clone_init()
3316 __mptcp_subflow_fully_established(msk, subflow, mp_opt); in mptcp_sk_clone_init()
3323 void mptcp_rcv_space_init(struct mptcp_sock *msk, const struct sock *ssk) in mptcp_rcv_space_init() argument
3327 msk->rcvspace_init = 1; in mptcp_rcv_space_init()
3328 msk->rcvq_space.copied = 0; in mptcp_rcv_space_init()
3329 msk->rcvq_space.rtt_us = 0; in mptcp_rcv_space_init()
3331 msk->rcvq_space.time = tp->tcp_mstamp; in mptcp_rcv_space_init()
3334 msk->rcvq_space.space = min_t(u32, tp->rcv_wnd, in mptcp_rcv_space_init()
3336 if (msk->rcvq_space.space == 0) in mptcp_rcv_space_init()
3337 msk->rcvq_space.space = TCP_INIT_CWND * TCP_MSS_DEFAULT; in mptcp_rcv_space_init()
3340 void mptcp_destroy_common(struct mptcp_sock *msk, unsigned int flags) in mptcp_destroy_common() argument
3343 struct sock *sk = (struct sock *)msk; in mptcp_destroy_common()
3348 mptcp_for_each_subflow_safe(msk, subflow, tmp) in mptcp_destroy_common()
3352 skb_rbtree_purge(&msk->out_of_order_queue); in mptcp_destroy_common()
3357 mptcp_token_destroy(msk); in mptcp_destroy_common()
3358 mptcp_pm_destroy(msk); in mptcp_destroy_common()
3363 struct mptcp_sock *msk = mptcp_sk(sk); in mptcp_destroy() local
3366 msk->free_first = 1; in mptcp_destroy()
3367 mptcp_destroy_common(msk, 0); in mptcp_destroy()
3399 struct mptcp_sock *msk = mptcp_sk(sk); in mptcp_release_cb() local
3402 unsigned long flags = (msk->cb_flags & MPTCP_FLAGS_PROCESS_CTX_NEED); in mptcp_release_cb()
3409 list_splice_init(&msk->join_list, &join_list); in mptcp_release_cb()
3418 msk->cb_flags &= ~flags; in mptcp_release_cb()
3429 mptcp_cleanup_rbuf(msk, 0); in mptcp_release_cb()
3437 if (__test_and_clear_bit(MPTCP_CLEAN_UNA, &msk->cb_flags)) in mptcp_release_cb()
3439 if (unlikely(msk->cb_flags)) { in mptcp_release_cb()
3444 if (__test_and_clear_bit(MPTCP_SYNC_STATE, &msk->cb_flags) && msk->first) in mptcp_release_cb()
3445 __mptcp_sync_state(sk, msk->pending_state); in mptcp_release_cb()
3446 if (__test_and_clear_bit(MPTCP_ERROR_REPORT, &msk->cb_flags)) in mptcp_release_cb()
3448 if (__test_and_clear_bit(MPTCP_SYNC_SNDBUF, &msk->cb_flags)) in mptcp_release_cb()
3520 struct mptcp_sock *msk = mptcp_sk(sk); in mptcp_get_port() local
3522 pr_debug("msk=%p, ssk=%p\n", msk, msk->first); in mptcp_get_port()
3523 if (WARN_ON_ONCE(!msk->first)) in mptcp_get_port()
3526 return inet_csk_get_port(msk->first, snum); in mptcp_get_port()
3532 struct mptcp_sock *msk; in mptcp_finish_connect() local
3537 msk = mptcp_sk(sk); in mptcp_finish_connect()
3547 WRITE_ONCE(msk->local_key, subflow->local_key); in mptcp_finish_connect()
3549 mptcp_pm_new_connection(msk, ssk, 0); in mptcp_finish_connect()
3564 struct mptcp_sock *msk = mptcp_sk(subflow->conn); in mptcp_finish_join() local
3565 struct sock *parent = (void *)msk; in mptcp_finish_join()
3568 pr_debug("msk=%p, subflow=%p\n", msk, subflow); in mptcp_finish_join()
3578 spin_lock_bh(&msk->fallback_lock); in mptcp_finish_join()
3579 if (!msk->allow_subflows) { in mptcp_finish_join()
3580 spin_unlock_bh(&msk->fallback_lock); in mptcp_finish_join()
3583 mptcp_subflow_joined(msk, ssk); in mptcp_finish_join()
3584 spin_unlock_bh(&msk->fallback_lock); in mptcp_finish_join()
3589 if (!mptcp_pm_allow_new_subflow(msk)) { in mptcp_finish_join()
3599 ret = __mptcp_finish_join(msk, ssk); in mptcp_finish_join()
3602 list_add_tail(&subflow->node, &msk->conn_list); in mptcp_finish_join()
3606 list_add_tail(&subflow->node, &msk->join_list); in mptcp_finish_join()
3607 __set_bit(MPTCP_FLUSH_JOIN_LIST, &msk->cb_flags); in mptcp_finish_join()
3628 static int mptcp_ioctl_outq(const struct mptcp_sock *msk, u64 v) in mptcp_ioctl_outq() argument
3630 const struct sock *sk = (void *)msk; in mptcp_ioctl_outq()
3639 delta = msk->write_seq - v; in mptcp_ioctl_outq()
3640 if (__mptcp_check_fallback(msk) && msk->first) { in mptcp_ioctl_outq()
3641 struct tcp_sock *tp = tcp_sk(msk->first); in mptcp_ioctl_outq()
3647 if (!((1 << msk->first->sk_state) & in mptcp_ioctl_outq()
3659 struct mptcp_sock *msk = mptcp_sk(sk); in mptcp_ioctl() local
3669 mptcp_cleanup_rbuf(msk, 0); in mptcp_ioctl()
3675 *karg = mptcp_ioctl_outq(msk, READ_ONCE(msk->snd_una)); in mptcp_ioctl()
3680 *karg = mptcp_ioctl_outq(msk, msk->snd_nxt); in mptcp_ioctl()
3693 struct mptcp_sock *msk = mptcp_sk(sk); in mptcp_connect() local
3697 ssk = __mptcp_nmpc_sk(msk); in mptcp_connect()
3708 mptcp_early_fallback(msk, subflow, MPTCP_MIB_MD5SIGFALLBACK); in mptcp_connect()
3712 mptcp_early_fallback(msk, subflow, in mptcp_connect()
3715 mptcp_early_fallback(msk, subflow, in mptcp_connect()
3719 WRITE_ONCE(msk->write_seq, subflow->idsn); in mptcp_connect()
3720 WRITE_ONCE(msk->snd_nxt, subflow->idsn); in mptcp_connect()
3721 WRITE_ONCE(msk->snd_una, subflow->idsn); in mptcp_connect()
3722 if (likely(!__mptcp_check_fallback(msk))) in mptcp_connect()
3728 if (!msk->fastopening) in mptcp_connect()
3750 if (!msk->fastopening) in mptcp_connect()
3758 mptcp_token_destroy(msk); in mptcp_connect()
3802 struct mptcp_sock *msk = mptcp_sk(sock->sk); in mptcp_bind() local
3807 ssk = __mptcp_nmpc_sk(msk); in mptcp_bind()
3829 struct mptcp_sock *msk = mptcp_sk(sock->sk); in mptcp_listen() local
3834 pr_debug("msk=%p\n", msk); in mptcp_listen()
3842 ssk = __mptcp_nmpc_sk(msk); in mptcp_listen()
3870 struct mptcp_sock *msk = mptcp_sk(sock->sk); in mptcp_stream_accept() local
3873 pr_debug("msk=%p\n", msk); in mptcp_stream_accept()
3878 ssk = READ_ONCE(msk->first); in mptcp_stream_accept()
3911 msk = mptcp_sk(newsk); in mptcp_stream_accept()
3912 msk->in_accept_queue = 0; in mptcp_stream_accept()
3917 mptcp_for_each_subflow(msk, subflow) { in mptcp_stream_accept()
3927 if (unlikely(inet_sk_state_load(msk->first) == TCP_CLOSE)) { in mptcp_stream_accept()
3928 __mptcp_close_ssk(newsk, msk->first, in mptcp_stream_accept()
3929 mptcp_subflow_ctx(msk->first), 0); in mptcp_stream_accept()
3930 if (unlikely(list_is_singular(&msk->conn_list))) in mptcp_stream_accept()
3952 static __poll_t mptcp_check_writeable(struct mptcp_sock *msk) in mptcp_check_writeable() argument
3954 struct sock *sk = (struct sock *)msk; in mptcp_check_writeable()
3971 struct mptcp_sock *msk; in mptcp_poll() local
3976 msk = mptcp_sk(sk); in mptcp_poll()
3980 pr_debug("msk=%p state=%d flags=%lx\n", msk, state, msk->flags); in mptcp_poll()
3982 struct sock *ssk = READ_ONCE(msk->first); in mptcp_poll()
4001 mask |= mptcp_check_writeable(msk); in mptcp_poll()