Lines Matching refs:subflow
93 struct mptcp_subflow_context *subflow; in __mptcp_socket_create() local
104 subflow = mptcp_subflow_ctx(ssock->sk); in __mptcp_socket_create()
105 list_add(&subflow->node, &msk->conn_list); in __mptcp_socket_create()
107 subflow->request_mptcp = 1; in __mptcp_socket_create()
108 subflow->subflow_id = msk->subflow_id++; in __mptcp_socket_create()
111 WRITE_ONCE(subflow->local_id, 0); in __mptcp_socket_create()
300 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); in __mptcp_move_skb() local
322 MPTCP_SKB_CB(skb)->map_seq = mptcp_subflow_get_mapped_dsn(subflow); in __mptcp_move_skb()
443 static long mptcp_timeout_from_subflow(const struct mptcp_subflow_context *subflow) in mptcp_timeout_from_subflow() argument
445 const struct sock *ssk = mptcp_subflow_tcp_sock(subflow); in mptcp_timeout_from_subflow()
447 return inet_csk(ssk)->icsk_pending && !subflow->stale_count ? in mptcp_timeout_from_subflow()
453 struct mptcp_subflow_context *subflow; in mptcp_set_timeout() local
456 mptcp_for_each_subflow(mptcp_sk(sk), subflow) in mptcp_set_timeout()
457 tout = max(tout, mptcp_timeout_from_subflow(subflow)); in mptcp_set_timeout()
484 struct mptcp_subflow_context *subflow; in mptcp_send_ack() local
486 mptcp_for_each_subflow(msk, subflow) in mptcp_send_ack()
487 mptcp_subflow_send_ack(mptcp_subflow_tcp_sock(subflow)); in mptcp_send_ack()
516 struct mptcp_subflow_context *subflow; in mptcp_cleanup_rbuf() local
524 mptcp_for_each_subflow(msk, subflow) { in mptcp_cleanup_rbuf()
525 struct sock *ssk = mptcp_subflow_tcp_sock(subflow); in mptcp_cleanup_rbuf()
593 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); in __mptcp_move_skbs_from_subflow() local
611 map_remaining = subflow->map_data_len - in __mptcp_move_skbs_from_subflow()
612 mptcp_subflow_get_map_offset(subflow); in __mptcp_move_skbs_from_subflow()
624 subflow->map_data_len = skb->len; in __mptcp_move_skbs_from_subflow()
738 struct mptcp_subflow_context *subflow; in __mptcp_error_report() local
741 mptcp_for_each_subflow(msk, subflow) in __mptcp_error_report()
742 if (__mptcp_subflow_error_report(sk, mptcp_subflow_tcp_sock(subflow))) in __mptcp_error_report()
792 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); in mptcp_data_ready() local
798 if (unlikely(subflow->disposable)) in mptcp_data_ready()
846 struct mptcp_subflow_context *tmp, *subflow; in __mptcp_flush_join_list() local
849 list_for_each_entry_safe(subflow, tmp, join_list, node) { in __mptcp_flush_join_list()
850 struct sock *ssk = mptcp_subflow_tcp_sock(subflow); in __mptcp_flush_join_list()
853 list_move_tail(&subflow->node, &msk->conn_list); in __mptcp_flush_join_list()
1012 struct mptcp_subflow_context *subflow; in mptcp_enter_memory_pressure() local
1016 mptcp_for_each_subflow(msk, subflow) { in mptcp_enter_memory_pressure()
1017 struct sock *ssk = mptcp_subflow_tcp_sock(subflow); in mptcp_enter_memory_pressure()
1323 void mptcp_subflow_set_active(struct mptcp_subflow_context *subflow) in mptcp_subflow_set_active() argument
1325 if (!subflow->stale) in mptcp_subflow_set_active()
1328 subflow->stale = 0; in mptcp_subflow_set_active()
1329 MPTCP_INC_STATS(sock_net(mptcp_subflow_tcp_sock(subflow)), MPTCP_MIB_SUBFLOWRECOVER); in mptcp_subflow_set_active()
1332 bool mptcp_subflow_active(struct mptcp_subflow_context *subflow) in mptcp_subflow_active() argument
1334 if (unlikely(subflow->stale)) { in mptcp_subflow_active()
1335 u32 rcv_tstamp = READ_ONCE(tcp_sk(mptcp_subflow_tcp_sock(subflow))->rcv_tstamp); in mptcp_subflow_active()
1337 if (subflow->stale_rcv_tstamp == rcv_tstamp) in mptcp_subflow_active()
1340 mptcp_subflow_set_active(subflow); in mptcp_subflow_active()
1342 return __mptcp_subflow_active(subflow); in mptcp_subflow_active()
1356 struct mptcp_subflow_context *subflow; in mptcp_subflow_get_send() local
1370 mptcp_for_each_subflow(msk, subflow) { in mptcp_subflow_get_send()
1371 bool backup = subflow->backup || subflow->request_bkup; in mptcp_subflow_get_send()
1373 trace_mptcp_subflow_get_send(subflow); in mptcp_subflow_get_send()
1374 ssk = mptcp_subflow_tcp_sock(subflow); in mptcp_subflow_get_send()
1375 if (!mptcp_subflow_active(subflow)) in mptcp_subflow_get_send()
1378 tout = max(tout, mptcp_timeout_from_subflow(subflow)); in mptcp_subflow_get_send()
1380 pace = subflow->avg_pacing_rate; in mptcp_subflow_get_send()
1383 subflow->avg_pacing_rate = READ_ONCE(ssk->sk_pacing_rate); in mptcp_subflow_get_send()
1384 pace = subflow->avg_pacing_rate; in mptcp_subflow_get_send()
1421 subflow = mptcp_subflow_ctx(ssk); in mptcp_subflow_get_send()
1422 subflow->avg_pacing_rate = div_u64((u64)subflow->avg_pacing_rate * wmem + in mptcp_subflow_get_send()
1526 struct mptcp_subflow_context *subflow; in __mptcp_push_pending() local
1534 mptcp_for_each_subflow(msk, subflow) { in __mptcp_push_pending()
1535 if (READ_ONCE(subflow->scheduled)) { in __mptcp_push_pending()
1536 mptcp_subflow_set_scheduled(subflow, false); in __mptcp_push_pending()
1539 ssk = mptcp_subflow_tcp_sock(subflow); in __mptcp_push_pending()
1592 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); in __mptcp_subflow_push_pending() local
1599 mptcp_subflow_set_scheduled(subflow, false); in __mptcp_subflow_push_pending()
1611 if (READ_ONCE(subflow->scheduled)) { in __mptcp_subflow_push_pending()
1612 mptcp_subflow_set_scheduled(subflow, false); in __mptcp_subflow_push_pending()
1619 mptcp_for_each_subflow(msk, subflow) { in __mptcp_subflow_push_pending()
1620 if (READ_ONCE(subflow->scheduled)) { in __mptcp_subflow_push_pending()
1621 xmit_ssk = mptcp_subflow_tcp_sock(subflow); in __mptcp_subflow_push_pending()
1623 mptcp_subflow_delegate(subflow, in __mptcp_subflow_push_pending()
1939 struct mptcp_subflow_context *subflow; in mptcp_rcv_space_adjust() local
1963 mptcp_for_each_subflow(msk, subflow) { in mptcp_rcv_space_adjust()
1968 tp = tcp_sk(mptcp_subflow_tcp_sock(subflow)); in mptcp_rcv_space_adjust()
2012 mptcp_for_each_subflow(msk, subflow) { in mptcp_rcv_space_adjust()
2016 ssk = mptcp_subflow_tcp_sock(subflow); in mptcp_rcv_space_adjust()
2035 struct mptcp_subflow_context *subflow) in __mptcp_first_ready_from() argument
2037 struct mptcp_subflow_context *start_subflow = subflow; in __mptcp_first_ready_from()
2039 while (!READ_ONCE(subflow->data_avail)) { in __mptcp_first_ready_from()
2040 subflow = mptcp_next_subflow(msk, subflow); in __mptcp_first_ready_from()
2041 if (subflow == start_subflow) in __mptcp_first_ready_from()
2044 return subflow; in __mptcp_first_ready_from()
2049 struct mptcp_subflow_context *subflow; in __mptcp_move_skbs() local
2058 mptcp_for_each_subflow(msk, subflow) in __mptcp_move_skbs()
2059 __mptcp_rcvbuf_update(sk, subflow->tcp_sock); in __mptcp_move_skbs()
2061 subflow = list_first_entry(&msk->conn_list, in __mptcp_move_skbs()
2074 subflow = __mptcp_first_ready_from(msk, subflow); in __mptcp_move_skbs()
2075 if (!subflow) in __mptcp_move_skbs()
2078 ssk = mptcp_subflow_tcp_sock(subflow); in __mptcp_move_skbs()
2085 subflow = mptcp_next_subflow(msk, subflow); in __mptcp_move_skbs()
2267 struct mptcp_subflow_context *subflow; in mptcp_subflow_get_retrans() local
2270 mptcp_for_each_subflow(msk, subflow) { in mptcp_subflow_get_retrans()
2271 struct sock *ssk = mptcp_subflow_tcp_sock(subflow); in mptcp_subflow_get_retrans()
2273 if (!__mptcp_subflow_active(subflow)) in mptcp_subflow_get_retrans()
2279 min_stale_count = min_t(int, min_stale_count, subflow->stale_count); in mptcp_subflow_get_retrans()
2283 if (subflow->backup || subflow->request_bkup) { in mptcp_subflow_get_retrans()
2346 struct mptcp_subflow_context *subflow, in __mptcp_subflow_disconnect() argument
2355 mptcp_subflow_ctx_reset(subflow); in __mptcp_subflow_disconnect()
2370 struct mptcp_subflow_context *subflow, in __mptcp_close_ssk() argument
2393 list_del(&subflow->node); in __mptcp_close_ssk()
2403 subflow->send_fastclose = 1; in __mptcp_close_ssk()
2408 __mptcp_subflow_disconnect(ssk, subflow, flags); in __mptcp_close_ssk()
2414 subflow->disposable = 1; in __mptcp_close_ssk()
2422 kfree_rcu(subflow, rcu); in __mptcp_close_ssk()
2463 struct mptcp_subflow_context *subflow) in mptcp_close_ssk() argument
2466 if (subflow->close_event_done) in mptcp_close_ssk()
2469 subflow->close_event_done = true; in mptcp_close_ssk()
2477 mptcp_pm_subflow_check_next(mptcp_sk(sk), subflow); in mptcp_close_ssk()
2479 __mptcp_close_ssk(sk, ssk, subflow, MPTCP_CF_PUSH); in mptcp_close_ssk()
2489 struct mptcp_subflow_context *subflow, *tmp; in __mptcp_close_subflow() local
2494 mptcp_for_each_subflow_safe(msk, subflow, tmp) { in __mptcp_close_subflow()
2495 struct sock *ssk = mptcp_subflow_tcp_sock(subflow); in __mptcp_close_subflow()
2507 mptcp_close_ssk(sk, ssk, subflow); in __mptcp_close_subflow()
2524 struct mptcp_subflow_context *subflow, *tmp; in mptcp_check_fastclose() local
2532 mptcp_for_each_subflow_safe(msk, subflow, tmp) { in mptcp_check_fastclose()
2533 struct sock *tcp_sk = mptcp_subflow_tcp_sock(subflow); in mptcp_check_fastclose()
2575 struct mptcp_subflow_context *subflow; in __mptcp_retrans() local
2606 mptcp_for_each_subflow(msk, subflow) { in __mptcp_retrans()
2607 if (READ_ONCE(subflow->scheduled)) { in __mptcp_retrans()
2610 mptcp_subflow_set_scheduled(subflow, false); in __mptcp_retrans()
2612 ssk = mptcp_subflow_tcp_sock(subflow); in __mptcp_retrans()
2703 struct mptcp_subflow_context *subflow, *tmp; in mptcp_do_fastclose() local
2707 mptcp_for_each_subflow_safe(msk, subflow, tmp) in mptcp_do_fastclose()
2708 __mptcp_close_ssk(sk, mptcp_subflow_tcp_sock(subflow), in mptcp_do_fastclose()
2709 subflow, MPTCP_CF_FASTCLOSE); in mptcp_do_fastclose()
2941 struct mptcp_subflow_context *subflow; in mptcp_check_send_data_fin() local
2957 mptcp_for_each_subflow(msk, subflow) { in mptcp_check_send_data_fin()
2958 struct sock *tcp_sk = mptcp_subflow_tcp_sock(subflow); in mptcp_check_send_data_fin()
3034 struct mptcp_subflow_context *subflow; in __mptcp_close() local
3061 mptcp_for_each_subflow(msk, subflow) { in __mptcp_close()
3062 struct sock *ssk = mptcp_subflow_tcp_sock(subflow); in __mptcp_close()
3071 subflow->fail_tout = 0; in __mptcp_close()
3250 struct mptcp_subflow_context *subflow; in mptcp_sk_clone_init() local
3298 subflow = mptcp_subflow_ctx(ssk); in mptcp_sk_clone_init()
3299 list_add(&subflow->node, &msk->conn_list); in mptcp_sk_clone_init()
3316 __mptcp_subflow_fully_established(msk, subflow, mp_opt); in mptcp_sk_clone_init()
3342 struct mptcp_subflow_context *subflow, *tmp; in mptcp_destroy_common() local
3348 mptcp_for_each_subflow_safe(msk, subflow, tmp) in mptcp_destroy_common()
3349 __mptcp_close_ssk(sk, mptcp_subflow_tcp_sock(subflow), subflow, flags); in mptcp_destroy_common()
3481 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); in mptcp_subflow_process_delegated() local
3482 struct sock *sk = subflow->conn; in mptcp_subflow_process_delegated()
3531 struct mptcp_subflow_context *subflow; in mptcp_finish_connect() local
3535 subflow = mptcp_subflow_ctx(ssk); in mptcp_finish_connect()
3536 sk = subflow->conn; in mptcp_finish_connect()
3539 pr_debug("msk=%p, token=%u\n", sk, subflow->token); in mptcp_finish_connect()
3541 subflow->map_seq = subflow->iasn; in mptcp_finish_connect()
3542 subflow->map_subflow_seq = 1; in mptcp_finish_connect()
3547 WRITE_ONCE(msk->local_key, subflow->local_key); in mptcp_finish_connect()
3563 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); in mptcp_finish_join() local
3564 struct mptcp_sock *msk = mptcp_sk(subflow->conn); in mptcp_finish_join()
3568 pr_debug("msk=%p, subflow=%p\n", msk, subflow); in mptcp_finish_join()
3572 subflow->reset_reason = MPTCP_RST_EMPTCP; in mptcp_finish_join()
3577 if (!list_empty(&subflow->node)) { in mptcp_finish_join()
3602 list_add_tail(&subflow->node, &msk->conn_list); in mptcp_finish_join()
3606 list_add_tail(&subflow->node, &msk->join_list); in mptcp_finish_join()
3613 subflow->reset_reason = MPTCP_RST_EPROHIBIT; in mptcp_finish_join()
3692 struct mptcp_subflow_context *subflow; in mptcp_connect() local
3702 subflow = mptcp_subflow_ctx(ssk); in mptcp_connect()
3708 mptcp_early_fallback(msk, subflow, MPTCP_MIB_MD5SIGFALLBACK); in mptcp_connect()
3710 if (subflow->request_mptcp) { in mptcp_connect()
3712 mptcp_early_fallback(msk, subflow, in mptcp_connect()
3715 mptcp_early_fallback(msk, subflow, in mptcp_connect()
3719 WRITE_ONCE(msk->write_seq, subflow->idsn); in mptcp_connect()
3720 WRITE_ONCE(msk->snd_nxt, subflow->idsn); in mptcp_connect()
3721 WRITE_ONCE(msk->snd_una, subflow->idsn); in mptcp_connect()
3889 struct mptcp_subflow_context *subflow; in mptcp_stream_accept() local
3892 subflow = mptcp_subflow_ctx(newsk); in mptcp_stream_accept()
3893 new_mptcp_sock = subflow->conn; in mptcp_stream_accept()
3917 mptcp_for_each_subflow(msk, subflow) { in mptcp_stream_accept()
3918 struct sock *ssk = mptcp_subflow_tcp_sock(subflow); in mptcp_stream_accept()
4049 struct mptcp_subflow_context *subflow; in mptcp_napi_poll() local
4053 while ((subflow = mptcp_subflow_delegated_next(delegated)) != NULL) { in mptcp_napi_poll()
4054 struct sock *ssk = mptcp_subflow_tcp_sock(subflow); in mptcp_napi_poll()
4058 mptcp_subflow_process_delegated(ssk, xchg(&subflow->delegated_status, 0)); in mptcp_napi_poll()
4066 clear_bit(MPTCP_DELEGATE_SCHEDULED, &subflow->delegated_status); in mptcp_napi_poll()