Lines Matching refs:conn

63 #define TCP_RTO_MS (conn->rto)
82 static enum net_verdict tcp_in(struct tcp *conn, struct net_pkt *pkt);
84 static void tcp_out(struct tcp *conn, uint8_t flags);
88 size_t (*tcp_recv_cb)(struct tcp *conn, struct net_pkt *pkt) = NULL;
248 const struct tcp *conn = ctx->tcp; in net_tcp_endpoint_copy() local
258 if (conn->state < TCP_ESTABLISHED) { in net_tcp_endpoint_copy()
277 memcpy(local, &conn->src.sa, newlen); in net_tcp_endpoint_copy()
282 memcpy(peer, &conn->dst.sa, newlen); in net_tcp_endpoint_copy()
422 static void tcp_derive_rto(struct tcp *conn) in tcp_derive_rto() argument
438 conn->rto = (uint16_t)rto; in tcp_derive_rto()
440 ARG_UNUSED(conn); in tcp_derive_rto()
448 static void tcp_new_reno_log(struct tcp *conn, char *step) in tcp_new_reno_log() argument
451 conn, step, conn->ca.cwnd, conn->ca.ssthresh, in tcp_new_reno_log()
452 conn->ca.pending_fast_retransmit_bytes); in tcp_new_reno_log()
455 static void tcp_new_reno_init(struct tcp *conn) in tcp_new_reno_init() argument
457 conn->ca.cwnd = conn_mss(conn) * TCP_CONGESTION_INITIAL_WIN; in tcp_new_reno_init()
458 conn->ca.ssthresh = conn_mss(conn) * TCP_CONGESTION_INITIAL_SSTHRESH; in tcp_new_reno_init()
459 conn->ca.pending_fast_retransmit_bytes = 0; in tcp_new_reno_init()
460 tcp_new_reno_log(conn, "init"); in tcp_new_reno_init()
463 static void tcp_new_reno_fast_retransmit(struct tcp *conn) in tcp_new_reno_fast_retransmit() argument
465 if (conn->ca.pending_fast_retransmit_bytes == 0) { in tcp_new_reno_fast_retransmit()
466 conn->ca.ssthresh = MAX(conn_mss(conn) * 2, conn->unacked_len / 2); in tcp_new_reno_fast_retransmit()
468 conn->ca.cwnd = conn_mss(conn) * 3 + conn->ca.ssthresh; in tcp_new_reno_fast_retransmit()
469 conn->ca.pending_fast_retransmit_bytes = conn->unacked_len; in tcp_new_reno_fast_retransmit()
470 tcp_new_reno_log(conn, "fast_retransmit"); in tcp_new_reno_fast_retransmit()
474 static void tcp_new_reno_timeout(struct tcp *conn) in tcp_new_reno_timeout() argument
476 conn->ca.ssthresh = MAX(conn_mss(conn) * 2, conn->unacked_len / 2); in tcp_new_reno_timeout()
477 conn->ca.cwnd = conn_mss(conn); in tcp_new_reno_timeout()
478 tcp_new_reno_log(conn, "timeout"); in tcp_new_reno_timeout()
482 static void tcp_new_reno_dup_ack(struct tcp *conn) in tcp_new_reno_dup_ack() argument
484 int32_t new_win = conn->ca.cwnd; in tcp_new_reno_dup_ack()
486 new_win += conn_mss(conn); in tcp_new_reno_dup_ack()
487 conn->ca.cwnd = MIN(new_win, UINT16_MAX); in tcp_new_reno_dup_ack()
488 tcp_new_reno_log(conn, "dup_ack"); in tcp_new_reno_dup_ack()
491 static void tcp_new_reno_pkts_acked(struct tcp *conn, uint32_t acked_len) in tcp_new_reno_pkts_acked() argument
493 int32_t new_win = conn->ca.cwnd; in tcp_new_reno_pkts_acked()
494 int32_t win_inc = MIN(acked_len, conn_mss(conn)); in tcp_new_reno_pkts_acked()
496 if (conn->ca.pending_fast_retransmit_bytes == 0) { in tcp_new_reno_pkts_acked()
497 if (conn->ca.cwnd < conn->ca.ssthresh) { in tcp_new_reno_pkts_acked()
501 new_win += ((win_inc * win_inc) + conn->ca.cwnd - 1) / conn->ca.cwnd; in tcp_new_reno_pkts_acked()
503 conn->ca.cwnd = MIN(new_win, UINT16_MAX); in tcp_new_reno_pkts_acked()
506 if (conn->ca.pending_fast_retransmit_bytes <= acked_len) { in tcp_new_reno_pkts_acked()
507 conn->ca.pending_fast_retransmit_bytes = 0; in tcp_new_reno_pkts_acked()
508 conn->ca.cwnd = conn->ca.ssthresh; in tcp_new_reno_pkts_acked()
510 conn->ca.pending_fast_retransmit_bytes -= acked_len; in tcp_new_reno_pkts_acked()
511 conn->ca.cwnd -= acked_len; in tcp_new_reno_pkts_acked()
514 tcp_new_reno_log(conn, "pkts_acked"); in tcp_new_reno_pkts_acked()
517 static void tcp_ca_init(struct tcp *conn) in tcp_ca_init() argument
519 tcp_new_reno_init(conn); in tcp_ca_init()
522 static void tcp_ca_fast_retransmit(struct tcp *conn) in tcp_ca_fast_retransmit() argument
524 tcp_new_reno_fast_retransmit(conn); in tcp_ca_fast_retransmit()
527 static void tcp_ca_timeout(struct tcp *conn) in tcp_ca_timeout() argument
529 tcp_new_reno_timeout(conn); in tcp_ca_timeout()
532 static void tcp_ca_dup_ack(struct tcp *conn) in tcp_ca_dup_ack() argument
534 tcp_new_reno_dup_ack(conn); in tcp_ca_dup_ack()
537 static void tcp_ca_pkts_acked(struct tcp *conn, uint32_t acked_len) in tcp_ca_pkts_acked() argument
539 tcp_new_reno_pkts_acked(conn, acked_len); in tcp_ca_pkts_acked()
543 static void tcp_ca_init(struct tcp *conn) { } in tcp_ca_init() argument
545 static void tcp_ca_fast_retransmit(struct tcp *conn) { } in tcp_ca_fast_retransmit() argument
547 static void tcp_ca_timeout(struct tcp *conn) { } in tcp_ca_timeout() argument
549 static void tcp_ca_dup_ack(struct tcp *conn) { } in tcp_ca_dup_ack() argument
551 static void tcp_ca_pkts_acked(struct tcp *conn, uint32_t acked_len) { } in tcp_ca_pkts_acked() argument
559 static void keep_alive_timer_init(struct tcp *conn) in keep_alive_timer_init() argument
561 conn->keep_alive = false; in keep_alive_timer_init()
562 conn->keep_idle = CONFIG_NET_TCP_KEEPIDLE_DEFAULT; in keep_alive_timer_init()
563 conn->keep_intvl = CONFIG_NET_TCP_KEEPINTVL_DEFAULT; in keep_alive_timer_init()
564 conn->keep_cnt = CONFIG_NET_TCP_KEEPCNT_DEFAULT; in keep_alive_timer_init()
566 conn->keep_idle, conn->keep_intvl, conn->keep_cnt); in keep_alive_timer_init()
567 k_work_init_delayable(&conn->keepalive_timer, tcp_send_keepalive_probe); in keep_alive_timer_init()
578 static void keep_alive_timer_restart(struct tcp *conn) in keep_alive_timer_restart() argument
580 if (!conn->keep_alive || conn->state != TCP_ESTABLISHED) { in keep_alive_timer_restart()
584 conn->keep_cur = 0; in keep_alive_timer_restart()
585 k_work_reschedule_for_queue(&tcp_work_q, &conn->keepalive_timer, in keep_alive_timer_restart()
586 K_SECONDS(conn->keep_idle)); in keep_alive_timer_restart()
589 static void keep_alive_timer_stop(struct tcp *conn) in keep_alive_timer_stop() argument
591 k_work_cancel_delayable(&conn->keepalive_timer); in keep_alive_timer_stop()
594 static int set_tcp_keep_alive(struct tcp *conn, const void *value, size_t len) in set_tcp_keep_alive() argument
598 if (conn == NULL || value == NULL || len != sizeof(int)) { in set_tcp_keep_alive()
607 conn->keep_alive = (bool)keep_alive; in set_tcp_keep_alive()
610 keep_alive_timer_restart(conn); in set_tcp_keep_alive()
612 keep_alive_timer_stop(conn); in set_tcp_keep_alive()
618 static int set_tcp_keep_idle(struct tcp *conn, const void *value, size_t len) in set_tcp_keep_idle() argument
622 if (conn == NULL || value == NULL || len != sizeof(int)) { in set_tcp_keep_idle()
631 conn->keep_idle = keep_idle; in set_tcp_keep_idle()
633 keep_alive_timer_restart(conn); in set_tcp_keep_idle()
638 static int set_tcp_keep_intvl(struct tcp *conn, const void *value, size_t len) in set_tcp_keep_intvl() argument
642 if (conn == NULL || value == NULL || len != sizeof(int)) { in set_tcp_keep_intvl()
651 conn->keep_intvl = keep_intvl; in set_tcp_keep_intvl()
653 keep_alive_timer_restart(conn); in set_tcp_keep_intvl()
658 static int set_tcp_keep_cnt(struct tcp *conn, const void *value, size_t len) in set_tcp_keep_cnt() argument
662 if (conn == NULL || value == NULL || len != sizeof(int)) { in set_tcp_keep_cnt()
671 conn->keep_cnt = keep_cnt; in set_tcp_keep_cnt()
673 keep_alive_timer_restart(conn); in set_tcp_keep_cnt()
678 static int get_tcp_keep_alive(struct tcp *conn, void *value, size_t *len) in get_tcp_keep_alive() argument
680 if (conn == NULL || value == NULL || len == NULL || in get_tcp_keep_alive()
685 *((int *)value) = (int)conn->keep_alive; in get_tcp_keep_alive()
690 static int get_tcp_keep_idle(struct tcp *conn, void *value, size_t *len) in get_tcp_keep_idle() argument
692 if (conn == NULL || value == NULL || len == NULL || in get_tcp_keep_idle()
697 *((int *)value) = (int)conn->keep_idle; in get_tcp_keep_idle()
702 static int get_tcp_keep_intvl(struct tcp *conn, void *value, size_t *len) in get_tcp_keep_intvl() argument
704 if (conn == NULL || value == NULL || len == NULL || in get_tcp_keep_intvl()
709 *((int *)value) = (int)conn->keep_intvl; in get_tcp_keep_intvl()
714 static int get_tcp_keep_cnt(struct tcp *conn, void *value, size_t *len) in get_tcp_keep_cnt() argument
716 if (conn == NULL || value == NULL || len == NULL || in get_tcp_keep_cnt()
721 *((int *)value) = (int)conn->keep_cnt; in get_tcp_keep_cnt()
743 static void tcp_send_queue_flush(struct tcp *conn) in tcp_send_queue_flush() argument
747 k_work_cancel_delayable(&conn->send_timer); in tcp_send_queue_flush()
749 while ((pkt = tcp_slist(conn, &conn->send_queue, get, in tcp_send_queue_flush()
757 struct tcp *conn = CONTAINER_OF(work, struct tcp, conn_release); in tcp_conn_release() local
761 if (conn->test_closed_cb != NULL) { in tcp_conn_release()
762 conn->test_closed_cb(conn, conn->test_user_data); in tcp_conn_release()
769 while ((pkt = k_fifo_get(&conn->recv_data, K_NO_WAIT)) != NULL) { in tcp_conn_release()
773 k_mutex_lock(&conn->lock, K_FOREVER); in tcp_conn_release()
775 if (conn->context->conn_handler) { in tcp_conn_release()
776 net_conn_unregister(conn->context->conn_handler); in tcp_conn_release()
777 conn->context->conn_handler = NULL; in tcp_conn_release()
783 if (conn->iface != NULL && conn->addr_ref_done) { in tcp_conn_release()
784 net_if_addr_unref(conn->iface, conn->src.sa.sa_family, in tcp_conn_release()
785 conn->src.sa.sa_family == AF_INET ? in tcp_conn_release()
786 (const void *)&conn->src.sin.sin_addr : in tcp_conn_release()
787 (const void *)&conn->src.sin6.sin6_addr, in tcp_conn_release()
791 conn->context->tcp = NULL; in tcp_conn_release()
792 conn->state = TCP_UNUSED; in tcp_conn_release()
794 tcp_send_queue_flush(conn); in tcp_conn_release()
796 (void)k_work_cancel_delayable(&conn->send_data_timer); in tcp_conn_release()
797 tcp_pkt_unref(conn->send_data); in tcp_conn_release()
800 tcp_pkt_unref(conn->queue_recv_data); in tcp_conn_release()
803 (void)k_work_cancel_delayable(&conn->timewait_timer); in tcp_conn_release()
804 (void)k_work_cancel_delayable(&conn->fin_timer); in tcp_conn_release()
805 (void)k_work_cancel_delayable(&conn->persist_timer); in tcp_conn_release()
806 (void)k_work_cancel_delayable(&conn->ack_timer); in tcp_conn_release()
807 (void)k_work_cancel_delayable(&conn->send_timer); in tcp_conn_release()
808 (void)k_work_cancel_delayable(&conn->recv_queue_timer); in tcp_conn_release()
809 keep_alive_timer_stop(conn); in tcp_conn_release()
811 k_mutex_unlock(&conn->lock); in tcp_conn_release()
813 net_context_unref(conn->context); in tcp_conn_release()
814 conn->context = NULL; in tcp_conn_release()
817 sys_slist_find_and_remove(&tcp_conns, &conn->next); in tcp_conn_release()
820 k_mem_slab_free(&tcp_conns_slab, (void *)conn); in tcp_conn_release()
835 static int tcp_conn_unref(struct tcp *conn) in tcp_conn_unref() argument
837 int ref_count = atomic_get(&conn->ref_count); in tcp_conn_unref()
839 NET_DBG("conn: %p, ref_count=%d", conn, ref_count); in tcp_conn_unref()
841 ref_count = atomic_dec(&conn->ref_count) - 1; in tcp_conn_unref()
843 tp_out(net_context_get_family(conn->context), conn->iface, in tcp_conn_unref()
852 k_work_submit_to_queue(&tcp_work_q, &conn->conn_release); in tcp_conn_unref()
858 #define tcp_conn_close(conn, status) \ argument
859 tcp_conn_close_debug(conn, status, __func__, __LINE__)
861 static int tcp_conn_close_debug(struct tcp *conn, int status, in tcp_conn_close_debug() argument
864 static int tcp_conn_close(struct tcp *conn, int status) in tcp_conn_close_debug()
868 NET_DBG("conn: %p closed by TCP stack (%s():%d)", conn, caller, line); in tcp_conn_close_debug()
870 k_mutex_lock(&conn->lock, K_FOREVER); in tcp_conn_close_debug()
871 conn_state(conn, TCP_CLOSED); in tcp_conn_close_debug()
872 keep_alive_timer_stop(conn); in tcp_conn_close_debug()
873 k_mutex_unlock(&conn->lock); in tcp_conn_close_debug()
875 if (conn->in_connect) { in tcp_conn_close_debug()
876 if (conn->connect_cb) { in tcp_conn_close_debug()
877 conn->connect_cb(conn->context, status, conn->context->user_data); in tcp_conn_close_debug()
880 conn->connect_cb = NULL; in tcp_conn_close_debug()
883 conn->in_connect = false; in tcp_conn_close_debug()
884 k_sem_reset(&conn->connect_sem); in tcp_conn_close_debug()
885 } else if (conn->context->recv_cb) { in tcp_conn_close_debug()
886 conn->context->recv_cb(conn->context, NULL, NULL, NULL, in tcp_conn_close_debug()
887 status, conn->recv_user_data); in tcp_conn_close_debug()
890 k_sem_give(&conn->tx_sem); in tcp_conn_close_debug()
892 return tcp_conn_unref(conn); in tcp_conn_close_debug()
895 static void tcp_send_process_no_lock(struct tcp *conn) in tcp_send_process_no_lock() argument
899 while ((pkt = tcp_slist(conn, &conn->send_queue, get, struct net_pkt, next))) { in tcp_send_process_no_lock()
907 struct tcp *conn = CONTAINER_OF(dwork, struct tcp, send_timer); in tcp_send_process() local
909 k_mutex_lock(&conn->lock, K_FOREVER); in tcp_send_process()
911 tcp_send_process_no_lock(conn); in tcp_send_process()
913 k_mutex_unlock(&conn->lock); in tcp_send_process()
918 static void tcp_nbr_reachability_hint(struct tcp *conn) in tcp_nbr_reachability_hint() argument
923 if (net_context_get_family(conn->context) != AF_INET6) { in tcp_nbr_reachability_hint()
928 iface = net_context_get_iface(conn->context); in tcp_nbr_reachability_hint()
933 if ((now - conn->last_nd_hint_time) > (net_if_ipv6_get_reachable_time(iface) / 2)) { in tcp_nbr_reachability_hint()
934 net_ipv6_nbr_reachability_hint(iface, &conn->dst.sin6.sin6_addr); in tcp_nbr_reachability_hint()
935 conn->last_nd_hint_time = now; in tcp_nbr_reachability_hint()
969 static const char *tcp_conn_state(struct tcp *conn, struct net_pkt *pkt) in tcp_conn_state() argument
975 tcp_state_to_str(conn->state, false), in tcp_conn_state()
976 conn->seq, conn->ack); in tcp_conn_state()
1074 static bool tcp_short_window(struct tcp *conn) in tcp_short_window() argument
1076 int32_t threshold = MIN(conn_mss(conn), conn->recv_win_max / 2); in tcp_short_window()
1078 if (conn->recv_win > threshold) { in tcp_short_window()
1085 static bool tcp_need_window_update(struct tcp *conn) in tcp_need_window_update() argument
1087 int32_t threshold = MAX(conn_mss(conn), conn->recv_win_max / 2); in tcp_need_window_update()
1092 return (conn->recv_win == conn->recv_win_max && in tcp_need_window_update()
1093 conn->recv_win_sent <= threshold); in tcp_need_window_update()
1105 static int tcp_update_recv_wnd(struct tcp *conn, int32_t delta) in tcp_update_recv_wnd() argument
1111 new_win = conn->recv_win + delta; in tcp_update_recv_wnd()
1114 } else if (new_win > conn->recv_win_max) { in tcp_update_recv_wnd()
1115 new_win = conn->recv_win_max; in tcp_update_recv_wnd()
1118 short_win_before = tcp_short_window(conn); in tcp_update_recv_wnd()
1120 conn->recv_win = new_win; in tcp_update_recv_wnd()
1122 short_win_after = tcp_short_window(conn); in tcp_update_recv_wnd()
1125 tcp_need_window_update(conn)) && in tcp_update_recv_wnd()
1126 conn->state == TCP_ESTABLISHED) { in tcp_update_recv_wnd()
1127 k_work_cancel_delayable(&conn->ack_timer); in tcp_update_recv_wnd()
1128 tcp_out(conn, ACK); in tcp_update_recv_wnd()
1134 static size_t tcp_check_pending_data(struct tcp *conn, struct net_pkt *pkt, in tcp_check_pending_data() argument
1140 !net_pkt_is_empty(conn->queue_recv_data)) { in tcp_check_pending_data()
1162 pending_seq = tcp_get_seq(conn->queue_recv_data->buffer); in tcp_check_pending_data()
1165 pending_len = net_pkt_get_len(conn->queue_recv_data); in tcp_check_pending_data()
1176 conn->queue_recv_data->buffer); in tcp_check_pending_data()
1177 conn->queue_recv_data->buffer = NULL; in tcp_check_pending_data()
1179 k_work_cancel_delayable(&conn->recv_queue_timer); in tcp_check_pending_data()
1183 net_buf_unref(conn->queue_recv_data->buffer); in tcp_check_pending_data()
1184 conn->queue_recv_data->buffer = NULL; in tcp_check_pending_data()
1186 k_work_cancel_delayable(&conn->recv_queue_timer); in tcp_check_pending_data()
1196 static enum net_verdict tcp_data_get(struct tcp *conn, struct net_pkt *pkt, size_t *len) in tcp_data_get() argument
1201 tcp_recv_cb(conn, pkt); in tcp_data_get()
1205 if (conn->context->recv_cb) { in tcp_data_get()
1209 *len += tcp_check_pending_data(conn, pkt, *len); in tcp_data_get()
1216 tcp_update_recv_wnd(conn, -*len); in tcp_data_get()
1217 if (*len > conn->recv_win_sent) { in tcp_data_get()
1218 conn->recv_win_sent = 0; in tcp_data_get()
1220 conn->recv_win_sent -= *len; in tcp_data_get()
1229 k_fifo_put(&conn->recv_data, pkt); in tcp_data_get()
1252 static int tcp_header_add(struct tcp *conn, struct net_pkt *pkt, uint8_t flags, in tcp_header_add() argument
1265 UNALIGNED_PUT(conn->src.sin.sin_port, UNALIGNED_MEMBER_ADDR(th, th_sport)); in tcp_header_add()
1266 UNALIGNED_PUT(conn->dst.sin.sin_port, UNALIGNED_MEMBER_ADDR(th, th_dport)); in tcp_header_add()
1269 if (conn->send_options.mss_found) { in tcp_header_add()
1274 UNALIGNED_PUT(htons(conn->recv_win), UNALIGNED_MEMBER_ADDR(th, th_win)); in tcp_header_add()
1278 UNALIGNED_PUT(htonl(conn->ack), UNALIGNED_MEMBER_ADDR(th, th_ack)); in tcp_header_add()
1284 static int ip_header_add(struct tcp *conn, struct net_pkt *pkt) in ip_header_add() argument
1287 return net_context_create_ipv4_new(conn->context, pkt, in ip_header_add()
1288 &conn->src.sin.sin_addr, in ip_header_add()
1289 &conn->dst.sin.sin_addr); in ip_header_add()
1293 return net_context_create_ipv6_new(conn->context, pkt, in ip_header_add()
1294 &conn->src.sin6.sin6_addr, in ip_header_add()
1295 &conn->dst.sin6.sin6_addr); in ip_header_add()
1301 static int set_tcp_nodelay(struct tcp *conn, const void *value, size_t len) in set_tcp_nodelay() argument
1315 conn->tcp_nodelay = (bool)no_delay_int; in set_tcp_nodelay()
1320 static int get_tcp_nodelay(struct tcp *conn, void *value, size_t *len) in get_tcp_nodelay() argument
1322 int no_delay_int = (int)conn->tcp_nodelay; in get_tcp_nodelay()
1332 static int net_tcp_set_mss_opt(struct tcp *conn, struct net_pkt *pkt) in net_tcp_set_mss_opt() argument
1343 recv_mss = net_tcp_get_supported_mss(conn); in net_tcp_set_mss_opt()
1460 static int tcp_out_ext(struct tcp *conn, uint8_t flags, struct net_pkt *data, in tcp_out_ext() argument
1467 if (conn->send_options.mss_found) { in tcp_out_ext()
1471 pkt = tcp_pkt_alloc(conn, alloc_len); in tcp_out_ext()
1483 ret = ip_header_add(conn, pkt); in tcp_out_ext()
1489 ret = tcp_header_add(conn, pkt, flags, seq); in tcp_out_ext()
1495 if (conn->send_options.mss_found) { in tcp_out_ext()
1496 ret = net_tcp_set_mss_opt(conn, pkt); in tcp_out_ext()
1515 conn->recv_win_sent = conn->recv_win; in tcp_out_ext()
1523 sys_slist_append(&conn->send_queue, &pkt->next); in tcp_out_ext()
1525 &conn->send_timer, K_NO_WAIT); in tcp_out_ext()
1533 static void tcp_out(struct tcp *conn, uint8_t flags) in tcp_out() argument
1535 (void)tcp_out_ext(conn, flags, NULL /* no data */, conn->seq + conn->unacked_len); in tcp_out()
1556 static int tcp_pkt_trim_data(struct tcp *conn, struct net_pkt *pkt, size_t data_len, in tcp_pkt_trim_data() argument
1573 new_pkt = tcp_pkt_alloc(conn, 4); in tcp_pkt_trim_data()
1667 static bool tcp_window_full(struct tcp *conn) in tcp_window_full() argument
1669 bool window_full = (conn->send_data_total >= conn->send_win); in tcp_window_full()
1672 window_full = window_full || (conn->send_data_total >= conn->ca.cwnd); in tcp_window_full()
1676 NET_DBG("conn: %p TX window_full", conn); in tcp_window_full()
1682 static int tcp_unsent_len(struct tcp *conn) in tcp_unsent_len() argument
1686 if (conn->unacked_len > conn->send_data_total) { in tcp_unsent_len()
1688 conn->send_data_total, conn->unacked_len); in tcp_unsent_len()
1693 unsent_len = conn->send_data_total - conn->unacked_len; in tcp_unsent_len()
1694 if (conn->unacked_len >= conn->send_win) { in tcp_unsent_len()
1697 unsent_len = MIN(unsent_len, conn->send_win - conn->unacked_len); in tcp_unsent_len()
1700 if (conn->unacked_len >= conn->ca.cwnd) { in tcp_unsent_len()
1703 unsent_len = MIN(unsent_len, conn->ca.cwnd - conn->unacked_len); in tcp_unsent_len()
1714 static void tcp_setup_retransmission(struct tcp *conn) in tcp_setup_retransmission() argument
1716 conn->send_data_retries = 0; in tcp_setup_retransmission()
1717 k_work_reschedule_for_queue(&tcp_work_q, &conn->send_data_timer, K_MSEC(TCP_RTO_MS)); in tcp_setup_retransmission()
1720 static int tcp_send_data(struct tcp *conn) in tcp_send_data() argument
1726 len = MIN(tcp_unsent_len(conn), conn_mss(conn)); in tcp_send_data()
1732 NET_DBG("conn: %p no data to send", conn); in tcp_send_data()
1737 pkt = tcp_pkt_alloc(conn, len); in tcp_send_data()
1739 NET_ERR("conn: %p packet allocation failed, len=%d", conn, len); in tcp_send_data()
1744 ret = tcp_pkt_peek(pkt, conn->send_data, conn->unacked_len, len); in tcp_send_data()
1751 ret = tcp_out_ext(conn, PSH | ACK, pkt, conn->seq + conn->unacked_len); in tcp_send_data()
1753 conn->unacked_len += len; in tcp_send_data()
1755 if (conn->data_mode == TCP_DATA_MODE_RESEND) { in tcp_send_data()
1756 net_stats_update_tcp_resent(conn->iface, len); in tcp_send_data()
1757 net_stats_update_tcp_seg_rexmit(conn->iface); in tcp_send_data()
1759 net_stats_update_tcp_sent(conn->iface, len); in tcp_send_data()
1760 net_stats_update_tcp_seg_sent(conn->iface); in tcp_send_data()
1770 conn_send_data_dump(conn); in tcp_send_data()
1778 static int tcp_send_queued_data(struct tcp *conn) in tcp_send_queued_data() argument
1783 if (conn->data_mode == TCP_DATA_MODE_RESEND) { in tcp_send_queued_data()
1787 while (tcp_unsent_len(conn) > 0) { in tcp_send_queued_data()
1789 if ((conn->tcp_nodelay == false) && (conn->unacked_len > 0)) { in tcp_send_queued_data()
1791 if (tcp_unsent_len(conn) < conn_mss(conn)) { in tcp_send_queued_data()
1801 ret = tcp_send_data(conn); in tcp_send_queued_data()
1807 if (conn->send_data_total) { in tcp_send_queued_data()
1811 if (k_work_delayable_remaining_get(&conn->send_data_timer)) { in tcp_send_queued_data()
1816 tcp_setup_retransmission(conn); in tcp_send_queued_data()
1825 struct tcp *conn = CONTAINER_OF(dwork, struct tcp, recv_queue_timer); in tcp_cleanup_recv_queue() local
1827 k_mutex_lock(&conn->lock, K_FOREVER); in tcp_cleanup_recv_queue()
1829 NET_DBG("Cleanup recv queue conn %p len %zd seq %u", conn, in tcp_cleanup_recv_queue()
1830 net_pkt_get_len(conn->queue_recv_data), in tcp_cleanup_recv_queue()
1831 tcp_get_seq(conn->queue_recv_data->buffer)); in tcp_cleanup_recv_queue()
1833 net_buf_unref(conn->queue_recv_data->buffer); in tcp_cleanup_recv_queue()
1834 conn->queue_recv_data->buffer = NULL; in tcp_cleanup_recv_queue()
1836 k_mutex_unlock(&conn->lock); in tcp_cleanup_recv_queue()
1842 struct tcp *conn = CONTAINER_OF(dwork, struct tcp, send_data_timer); in tcp_resend_data() local
1847 k_mutex_lock(&conn->lock, K_FOREVER); in tcp_resend_data()
1849 NET_DBG("send_data_retries=%hu", conn->send_data_retries); in tcp_resend_data()
1851 if (conn->send_data_retries >= tcp_retries) { in tcp_resend_data()
1852 NET_DBG("conn: %p close, data retransmissions exceeded", conn); in tcp_resend_data()
1857 switch (conn->state) { in tcp_resend_data()
1859 (void)tcp_out_ext(conn, SYN, NULL, conn->seq - 1); in tcp_resend_data()
1862 (void)tcp_out_ext(conn, SYN | ACK, NULL, conn->seq - 1); in tcp_resend_data()
1867 (conn->send_data_retries == 0)) { in tcp_resend_data()
1868 tcp_ca_timeout(conn); in tcp_resend_data()
1869 if (tcp_window_full(conn)) { in tcp_resend_data()
1870 (void)k_sem_take(&conn->tx_sem, K_NO_WAIT); in tcp_resend_data()
1874 conn->data_mode = TCP_DATA_MODE_RESEND; in tcp_resend_data()
1875 conn->unacked_len = 0; in tcp_resend_data()
1877 ret = tcp_send_data(conn); in tcp_resend_data()
1880 conn->data_mode = TCP_DATA_MODE_SEND; in tcp_resend_data()
1889 (void)tcp_out_ext(conn, FIN | ACK, NULL, conn->seq - 1); in tcp_resend_data()
1895 conn->send_data_retries++; in tcp_resend_data()
1899 if (conn->send_data_retries < tcp_retries) { in tcp_resend_data()
1901 for (int i = 0; i < conn->send_data_retries; i++) { in tcp_resend_data()
1906 k_work_reschedule_for_queue(&tcp_work_q, &conn->send_data_timer, in tcp_resend_data()
1910 k_mutex_unlock(&conn->lock); in tcp_resend_data()
1913 tcp_conn_close(conn, -ETIMEDOUT); in tcp_resend_data()
1920 struct tcp *conn = CONTAINER_OF(dwork, struct tcp, timewait_timer); in tcp_timewait_timeout() local
1923 NET_DBG("conn: %p %s", conn, tcp_conn_state(conn, NULL)); in tcp_timewait_timeout()
1925 (void)tcp_conn_close(conn, -ETIMEDOUT); in tcp_timewait_timeout()
1928 static void tcp_establish_timeout(struct tcp *conn) in tcp_establish_timeout() argument
1931 NET_DBG("conn: %p %s", conn, tcp_conn_state(conn, NULL)); in tcp_establish_timeout()
1933 (void)tcp_conn_close(conn, -ETIMEDOUT); in tcp_establish_timeout()
1939 struct tcp *conn = CONTAINER_OF(dwork, struct tcp, fin_timer); in tcp_fin_timeout() local
1942 if (conn->state == TCP_SYN_RECEIVED) { in tcp_fin_timeout()
1943 tcp_establish_timeout(conn); in tcp_fin_timeout()
1948 NET_DBG("conn: %p %s", conn, tcp_conn_state(conn, NULL)); in tcp_fin_timeout()
1950 (void)tcp_conn_close(conn, -ETIMEDOUT); in tcp_fin_timeout()
1956 struct tcp *conn = CONTAINER_OF(dwork, struct tcp, fin_timer); in tcp_last_ack_timeout() local
1959 NET_DBG("conn: %p %s", conn, tcp_conn_state(conn, NULL)); in tcp_last_ack_timeout()
1961 (void)tcp_conn_close(conn, -ETIMEDOUT); in tcp_last_ack_timeout()
1964 static void tcp_setup_last_ack_timer(struct tcp *conn) in tcp_setup_last_ack_timer() argument
1972 k_work_init_delayable(&conn->fin_timer, tcp_last_ack_timeout); in tcp_setup_last_ack_timer()
1978 &conn->fin_timer, in tcp_setup_last_ack_timer()
1982 static void tcp_cancel_last_ack_timer(struct tcp *conn) in tcp_cancel_last_ack_timer() argument
1984 k_work_cancel_delayable(&conn->fin_timer); in tcp_cancel_last_ack_timer()
1991 struct tcp *conn = CONTAINER_OF(dwork, struct tcp, keepalive_timer); in tcp_send_keepalive_probe() local
1993 if (conn->state != TCP_ESTABLISHED) { in tcp_send_keepalive_probe()
1994 NET_DBG("conn: %p TCP connection not established", conn); in tcp_send_keepalive_probe()
1998 if (!conn->keep_alive) { in tcp_send_keepalive_probe()
1999 NET_DBG("conn: %p keepalive is not enabled", conn); in tcp_send_keepalive_probe()
2003 conn->keep_cur++; in tcp_send_keepalive_probe()
2004 if (conn->keep_cur > conn->keep_cnt) { in tcp_send_keepalive_probe()
2006 conn); in tcp_send_keepalive_probe()
2007 tcp_conn_close(conn, -ETIMEDOUT); in tcp_send_keepalive_probe()
2011 NET_DBG("conn: %p keepalive probe", conn); in tcp_send_keepalive_probe()
2012 k_work_reschedule_for_queue(&tcp_work_q, &conn->keepalive_timer, in tcp_send_keepalive_probe()
2013 K_SECONDS(conn->keep_intvl)); in tcp_send_keepalive_probe()
2015 (void)tcp_out_ext(conn, ACK, NULL, conn->seq + conn->unacked_len - 1); in tcp_send_keepalive_probe()
2022 struct tcp *conn = CONTAINER_OF(dwork, struct tcp, persist_timer); in tcp_send_zwp() local
2024 k_mutex_lock(&conn->lock, K_FOREVER); in tcp_send_zwp()
2026 (void)tcp_out_ext(conn, ACK, NULL, conn->seq + conn->unacked_len - 1); in tcp_send_zwp()
2028 tcp_derive_rto(conn); in tcp_send_zwp()
2030 if (conn->send_win == 0) { in tcp_send_zwp()
2034 if (conn->zwp_retries < 63) { in tcp_send_zwp()
2035 conn->zwp_retries++; in tcp_send_zwp()
2038 timeout <<= conn->zwp_retries; in tcp_send_zwp()
2044 &tcp_work_q, &conn->persist_timer, K_MSEC(timeout)); in tcp_send_zwp()
2047 k_mutex_unlock(&conn->lock); in tcp_send_zwp()
2053 struct tcp *conn = CONTAINER_OF(dwork, struct tcp, ack_timer); in tcp_send_ack() local
2055 k_mutex_lock(&conn->lock, K_FOREVER); in tcp_send_ack()
2057 tcp_out(conn, ACK); in tcp_send_ack()
2059 k_mutex_unlock(&conn->lock); in tcp_send_ack()
2062 static void tcp_conn_ref(struct tcp *conn) in tcp_conn_ref() argument
2064 int ref_count = atomic_inc(&conn->ref_count) + 1; in tcp_conn_ref()
2066 NET_DBG("conn: %p, ref_count: %d", conn, ref_count); in tcp_conn_ref()
2071 struct tcp *conn = NULL; in tcp_conn_alloc() local
2074 ret = k_mem_slab_alloc(&tcp_conns_slab, (void **)&conn, K_NO_WAIT); in tcp_conn_alloc()
2080 memset(conn, 0, sizeof(*conn)); in tcp_conn_alloc()
2083 conn->queue_recv_data = tcp_rx_pkt_alloc(conn, 0); in tcp_conn_alloc()
2084 if (conn->queue_recv_data == NULL) { in tcp_conn_alloc()
2086 conn); in tcp_conn_alloc()
2091 conn->send_data = tcp_pkt_alloc(conn, 0); in tcp_conn_alloc()
2092 if (conn->send_data == NULL) { in tcp_conn_alloc()
2093 NET_ERR("Cannot allocate %s queue for conn %p", "send", conn); in tcp_conn_alloc()
2097 k_mutex_init(&conn->lock); in tcp_conn_alloc()
2098 k_fifo_init(&conn->recv_data); in tcp_conn_alloc()
2099 k_sem_init(&conn->connect_sem, 0, K_SEM_MAX_LIMIT); in tcp_conn_alloc()
2100 k_sem_init(&conn->tx_sem, 1, 1); in tcp_conn_alloc()
2102 conn->in_connect = false; in tcp_conn_alloc()
2103 conn->state = TCP_LISTEN; in tcp_conn_alloc()
2104 conn->recv_win_max = tcp_rx_window; in tcp_conn_alloc()
2105 conn->recv_win = conn->recv_win_max; in tcp_conn_alloc()
2106 conn->recv_win_sent = conn->recv_win_max; in tcp_conn_alloc()
2107 conn->send_win_max = MAX(tcp_tx_window, NET_IPV6_MTU); in tcp_conn_alloc()
2108 conn->send_win = conn->send_win_max; in tcp_conn_alloc()
2109 conn->tcp_nodelay = false; in tcp_conn_alloc()
2110 conn->addr_ref_done = false; in tcp_conn_alloc()
2112 conn->dup_ack_cnt = 0; in tcp_conn_alloc()
2118 conn->ca.cwnd = UINT16_MAX; in tcp_conn_alloc()
2124 conn->seq = 0U; in tcp_conn_alloc()
2126 sys_slist_init(&conn->send_queue); in tcp_conn_alloc()
2128 k_work_init_delayable(&conn->send_timer, tcp_send_process); in tcp_conn_alloc()
2129 k_work_init_delayable(&conn->timewait_timer, tcp_timewait_timeout); in tcp_conn_alloc()
2130 k_work_init_delayable(&conn->fin_timer, tcp_fin_timeout); in tcp_conn_alloc()
2131 k_work_init_delayable(&conn->send_data_timer, tcp_resend_data); in tcp_conn_alloc()
2132 k_work_init_delayable(&conn->recv_queue_timer, tcp_cleanup_recv_queue); in tcp_conn_alloc()
2133 k_work_init_delayable(&conn->persist_timer, tcp_send_zwp); in tcp_conn_alloc()
2134 k_work_init_delayable(&conn->ack_timer, tcp_send_ack); in tcp_conn_alloc()
2135 k_work_init(&conn->conn_release, tcp_conn_release); in tcp_conn_alloc()
2136 keep_alive_timer_init(conn); in tcp_conn_alloc()
2138 tcp_conn_ref(conn); in tcp_conn_alloc()
2141 sys_slist_append(&tcp_conns, &conn->next); in tcp_conn_alloc()
2144 NET_DBG("conn: %p", conn); in tcp_conn_alloc()
2146 return conn; in tcp_conn_alloc()
2149 if (CONFIG_NET_TCP_RECV_QUEUE_TIMEOUT && conn->queue_recv_data) { in tcp_conn_alloc()
2150 tcp_pkt_unref(conn->queue_recv_data); in tcp_conn_alloc()
2151 conn->queue_recv_data = NULL; in tcp_conn_alloc()
2154 k_mem_slab_free(&tcp_conns_slab, (void *)conn); in tcp_conn_alloc()
2161 struct tcp *conn; in net_tcp_get() local
2163 conn = tcp_conn_alloc(); in net_tcp_get()
2164 if (conn == NULL) { in net_tcp_get()
2170 conn->context = context; in net_tcp_get()
2171 context->tcp = conn; in net_tcp_get()
2188 static bool tcp_conn_cmp(struct tcp *conn, struct net_pkt *pkt) in tcp_conn_cmp() argument
2190 return tcp_endpoint_cmp(&conn->src, pkt, TCP_EP_DST) && in tcp_conn_cmp()
2191 tcp_endpoint_cmp(&conn->dst, pkt, TCP_EP_SRC); in tcp_conn_cmp()
2197 struct tcp *conn; in tcp_conn_search() local
2202 SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&tcp_conns, conn, tmp, next) { in tcp_conn_search()
2203 found = tcp_conn_cmp(conn, pkt); in tcp_conn_search()
2211 return found ? conn : NULL; in tcp_conn_search()
2222 struct tcp *conn; in tcp_recv() local
2229 conn = tcp_conn_search(pkt); in tcp_recv()
2230 if (conn) { in tcp_recv()
2239 conn = tcp_conn_new(pkt); in tcp_recv()
2240 if (!conn) { in tcp_recv()
2245 conn->accepted_conn = conn_old; in tcp_recv()
2248 if (conn) { in tcp_recv()
2249 verdict = tcp_in(conn, pkt); in tcp_recv()
2370 struct tcp *conn = NULL; in tcp_conn_new() local
2382 conn = context->tcp; in tcp_conn_new()
2383 conn->iface = pkt->iface; in tcp_conn_new()
2384 tcp_derive_rto(conn); in tcp_conn_new()
2386 net_context_set_family(conn->context, net_pkt_family(pkt)); in tcp_conn_new()
2388 if (tcp_endpoint_set(&conn->dst, pkt, TCP_EP_SRC) < 0) { in tcp_conn_new()
2390 conn = NULL; in tcp_conn_new()
2394 if (tcp_endpoint_set(&conn->src, pkt, TCP_EP_DST) < 0) { in tcp_conn_new()
2396 conn = NULL; in tcp_conn_new()
2401 net_sprint_addr(conn->src.sa.sa_family, in tcp_conn_new()
2402 (const void *)&conn->src.sin.sin_addr), in tcp_conn_new()
2403 net_sprint_addr(conn->dst.sa.sa_family, in tcp_conn_new()
2404 (const void *)&conn->dst.sin.sin_addr)); in tcp_conn_new()
2406 memcpy(&context->remote, &conn->dst, sizeof(context->remote)); in tcp_conn_new()
2416 &conn->src.sin6.sin6_addr); in tcp_conn_new()
2420 &conn->src.sin.sin_addr); in tcp_conn_new()
2427 conn = NULL; in tcp_conn_new()
2445 net_sin6_ptr(&context->local)->sin6_port = conn->src.sin6.sin6_port; in tcp_conn_new()
2448 net_sin_ptr(&context->local)->sin_port = conn->src.sin.sin_port; in tcp_conn_new()
2453 conn->seq = tcp_init_isn(&local_addr, &context->remote); in tcp_conn_new()
2464 ntohs(conn->dst.sin.sin_port),/* local port */ in tcp_conn_new()
2465 ntohs(conn->src.sin.sin_port),/* remote port */ in tcp_conn_new()
2471 conn = NULL; in tcp_conn_new()
2475 net_if_addr_ref(conn->iface, conn->dst.sa.sa_family, in tcp_conn_new()
2476 conn->src.sa.sa_family == AF_INET ? in tcp_conn_new()
2477 (const void *)&conn->src.sin.sin_addr : in tcp_conn_new()
2478 (const void *)&conn->src.sin6.sin6_addr); in tcp_conn_new()
2479 conn->addr_ref_done = true; in tcp_conn_new()
2482 if (!conn) { in tcp_conn_new()
2486 return conn; in tcp_conn_new()
2499 static bool tcp_validate_seq(struct tcp *conn, struct tcphdr *hdr, size_t len) in tcp_validate_seq() argument
2501 if ((conn->state == TCP_LISTEN) || (conn->state == TCP_SYN_SENT)) { in tcp_validate_seq()
2505 if (conn->recv_win > 0) { in tcp_validate_seq()
2507 return ((net_tcp_seq_cmp(th_seq(hdr), conn->ack) >= 0) && in tcp_validate_seq()
2508 (net_tcp_seq_cmp(th_seq(hdr), conn->ack + conn->recv_win) < 0)); in tcp_validate_seq()
2510 return (((net_tcp_seq_cmp(th_seq(hdr), conn->ack) >= 0) && in tcp_validate_seq()
2511 (net_tcp_seq_cmp(th_seq(hdr), conn->ack + conn->recv_win) < 0)) || in tcp_validate_seq()
2512 ((net_tcp_seq_cmp(th_seq(hdr) + len - 1, conn->ack) >= 0) && in tcp_validate_seq()
2513 (net_tcp_seq_cmp(th_seq(hdr) + len - 1, conn->ack + conn->recv_win) < 0))); in tcp_validate_seq()
2517 return (net_tcp_seq_cmp(th_seq(hdr), conn->ack) == 0); in tcp_validate_seq()
2523 static int32_t tcp_compute_new_length(struct tcp *conn, struct tcphdr *hdr, size_t len, in tcp_compute_new_length() argument
2535 new_len = (int32_t)(len) - net_tcp_seq_cmp(conn->ack, th_seq(hdr)); in tcp_compute_new_length()
2544 static enum tcp_state tcp_enter_time_wait(struct tcp *conn) in tcp_enter_time_wait() argument
2547 k_work_cancel_delayable(&conn->fin_timer); in tcp_enter_time_wait()
2549 &tcp_work_q, &conn->timewait_timer, in tcp_enter_time_wait()
2580 static void tcp_queue_recv_data(struct tcp *conn, struct net_pkt *pkt, in tcp_queue_recv_data() argument
2587 NET_DBG("conn: %p len %zd seq %u ack %u", conn, len, seq, conn->ack); in tcp_queue_recv_data()
2602 NET_DBG("Queuing data: conn %p", conn); in tcp_queue_recv_data()
2605 if (!net_pkt_is_empty(conn->queue_recv_data)) { in tcp_queue_recv_data()
2635 pending_seq = tcp_get_seq(conn->queue_recv_data->buffer); in tcp_queue_recv_data()
2637 pending_len = net_pkt_get_len(conn->queue_recv_data); in tcp_queue_recv_data()
2646 conn->queue_recv_data->buffer); in tcp_queue_recv_data()
2649 conn->queue_recv_data->buffer = pkt->buffer; in tcp_queue_recv_data()
2655 last = net_buf_frag_last(conn->queue_recv_data->buffer); in tcp_queue_recv_data()
2667 net_buf_unref(conn->queue_recv_data->buffer); in tcp_queue_recv_data()
2668 conn->queue_recv_data->buffer = pkt->buffer; in tcp_queue_recv_data()
2673 net_pkt_remove_tail(conn->queue_recv_data, in tcp_queue_recv_data()
2680 net_buf_frag_add(conn->queue_recv_data->buffer, in tcp_queue_recv_data()
2688 NET_DBG("All pending data: conn %p", conn); in tcp_queue_recv_data()
2689 if (check_seq_list(conn->queue_recv_data->buffer) == false) { in tcp_queue_recv_data()
2691 conn); in tcp_queue_recv_data()
2693 net_buf_unref(conn->queue_recv_data->buffer); in tcp_queue_recv_data()
2694 conn->queue_recv_data->buffer = NULL; in tcp_queue_recv_data()
2700 net_pkt_append_buffer(conn->queue_recv_data, pkt->buffer); in tcp_queue_recv_data()
2708 if (!k_work_delayable_is_pending(&conn->recv_queue_timer)) { in tcp_queue_recv_data()
2710 &tcp_work_q, &conn->recv_queue_timer, in tcp_queue_recv_data()
2716 static enum net_verdict tcp_data_received(struct tcp *conn, struct net_pkt *pkt, in tcp_data_received() argument
2725 ret = tcp_data_get(conn, pkt, len); in tcp_data_received()
2727 net_stats_update_tcp_seg_recv(conn->iface); in tcp_data_received()
2728 conn_ack(conn, *len); in tcp_data_received()
2733 if (tcp_short_window(conn) || !psh) { in tcp_data_received()
2734 k_work_schedule_for_queue(&tcp_work_q, &conn->ack_timer, in tcp_data_received()
2737 k_work_cancel_delayable(&conn->ack_timer); in tcp_data_received()
2738 tcp_out(conn, ACK); in tcp_data_received()
2744 static void tcp_out_of_order_data(struct tcp *conn, struct net_pkt *pkt, in tcp_out_of_order_data() argument
2762 tcp_queue_recv_data(conn, pkt, data_len, seq); in tcp_out_of_order_data()
2765 static void tcp_check_sock_options(struct tcp *conn) in tcp_check_sock_options() argument
2771 (void)net_context_get_option(conn->context, NET_OPT_SNDBUF, in tcp_check_sock_options()
2776 (void)net_context_get_option(conn->context, NET_OPT_RCVBUF, in tcp_check_sock_options()
2780 if (sndbuf_opt > 0 && sndbuf_opt != conn->send_win_max) { in tcp_check_sock_options()
2781 k_mutex_lock(&conn->lock, K_FOREVER); in tcp_check_sock_options()
2783 conn->send_win_max = sndbuf_opt; in tcp_check_sock_options()
2784 if (conn->send_win > conn->send_win_max) { in tcp_check_sock_options()
2785 conn->send_win = conn->send_win_max; in tcp_check_sock_options()
2788 k_mutex_unlock(&conn->lock); in tcp_check_sock_options()
2791 if (rcvbuf_opt > 0 && rcvbuf_opt != conn->recv_win_max) { in tcp_check_sock_options()
2794 k_mutex_lock(&conn->lock, K_FOREVER); in tcp_check_sock_options()
2796 diff = rcvbuf_opt - conn->recv_win_max; in tcp_check_sock_options()
2797 conn->recv_win_max = rcvbuf_opt; in tcp_check_sock_options()
2798 tcp_update_recv_wnd(conn, diff); in tcp_check_sock_options()
2800 k_mutex_unlock(&conn->lock); in tcp_check_sock_options()
2805 static enum net_verdict tcp_in(struct tcp *conn, struct net_pkt *pkt) in tcp_in() argument
2821 if (conn == NULL || pkt == NULL) { in tcp_in()
2837 if (conn->state != TCP_SYN_SENT) { in tcp_in()
2838 tcp_check_sock_options(conn); in tcp_in()
2841 k_mutex_lock(&conn->lock, K_FOREVER); in tcp_in()
2844 if (conn->state == TCP_UNUSED) { in tcp_in()
2845 k_mutex_unlock(&conn->lock); in tcp_in()
2849 NET_DBG("%s", tcp_conn_state(conn, pkt)); in tcp_in()
2861 if (!tcp_validate_seq(conn, th, len)) { in tcp_in()
2868 tcp_out(conn, ACK); in tcp_in()
2870 k_mutex_unlock(&conn->lock); in tcp_in()
2880 conn->rst_received = true; in tcp_in()
2886 if (conn->in_connect) { in tcp_in()
2889 if (th_ack(th) != conn->seq) { in tcp_in()
2892 k_mutex_unlock(&conn->lock); in tcp_in()
2903 if (tcp_options_len && !tcp_options_check(&conn->recv_options, pkt, in tcp_in()
2912 if ((conn->state != TCP_LISTEN) && (conn->state != TCP_SYN_SENT) && FL(&fl, &, SYN)) { in tcp_in()
2918 conn, tcp_state_to_str(conn->state, false)); in tcp_in()
2919 net_stats_update_tcp_seg_drop(conn->iface); in tcp_in()
2927 if ((conn->state != TCP_LISTEN) && (conn->state != TCP_SYN_SENT)) { in tcp_in()
2928 uint32_t snduna = conn->seq; in tcp_in()
2936 k_mutex_unlock(&conn->lock); in tcp_in()
2945 if ((conn->state == TCP_SYN_RECEIVED) || (conn->state == TCP_FIN_WAIT_1) || in tcp_in()
2946 (conn->state == TCP_CLOSING) || (conn->state == TCP_LAST_ACK)) { in tcp_in()
2952 net_tcp_seq_cmp(th_ack(th), conn->seq + conn->send_data_total) > 0) { in tcp_in()
2955 tcp_out(conn, ACK); in tcp_in()
2956 k_mutex_unlock(&conn->lock); in tcp_in()
2962 conn->send_win = ntohs(th_win(th)); in tcp_in()
2963 if (conn->send_win > conn->send_win_max) { in tcp_in()
2964 NET_DBG("Lowering send window from %u to %u", conn->send_win, conn->send_win_max); in tcp_in()
2965 conn->send_win = conn->send_win_max; in tcp_in()
2968 if (conn->send_win == 0) { in tcp_in()
2969 if (!k_work_delayable_is_pending(&conn->persist_timer)) { in tcp_in()
2970 conn->zwp_retries = 0; in tcp_in()
2971 (void)k_work_reschedule_for_queue(&tcp_work_q, &conn->persist_timer, in tcp_in()
2975 (void)k_work_cancel_delayable(&conn->persist_timer); in tcp_in()
2978 if (tcp_window_full(conn)) { in tcp_in()
2979 (void)k_sem_take(&conn->tx_sem, K_NO_WAIT); in tcp_in()
2981 k_sem_give(&conn->tx_sem); in tcp_in()
2984 switch (conn->state) { in tcp_in()
2988 conn->send_options.mss_found = true; in tcp_in()
2989 conn_ack(conn, th_seq(th) + 1); /* capture peer's isn */ in tcp_in()
2990 tcp_out(conn, SYN | ACK); in tcp_in()
2991 conn->send_options.mss_found = false; in tcp_in()
2992 conn_seq(conn, + 1); in tcp_in()
2995 tcp_setup_retransmission(conn); in tcp_in()
3000 &conn->establish_timer, in tcp_in()
3004 k_mutex_unlock(&conn->lock); in tcp_in()
3009 if (th_ack(th) == conn->seq && th_seq(th) == conn->ack) { in tcp_in()
3013 if (conn->accepted_conn != NULL) { in tcp_in()
3014 accept_cb = conn->accepted_conn->accept_cb; in tcp_in()
3015 context = conn->accepted_conn->context; in tcp_in()
3016 keep_alive_param_copy(conn, conn->accepted_conn); in tcp_in()
3019 k_work_cancel_delayable(&conn->establish_timer); in tcp_in()
3020 k_work_cancel_delayable(&conn->send_data_timer); in tcp_in()
3021 tcp_conn_ref(conn); in tcp_in()
3022 net_context_set_state(conn->context, in tcp_in()
3026 conn->accepted_conn = NULL; in tcp_in()
3035 net_tcp_put(conn->context); in tcp_in()
3039 memcpy(&conn->context->remote, &conn->dst.sa, sizeof(conn->dst.sa)); in tcp_in()
3045 net_context_get_family(conn->context) == AF_INET && in tcp_in()
3051 &net_sin(&conn->context->remote)->sin_addr, in tcp_in()
3053 net_ipaddr_copy(&net_sin6(&conn->context->remote)->sin6_addr, in tcp_in()
3056 net_sin6(&conn->context->remote)->sin6_family = AF_INET6; in tcp_in()
3069 accept_cb(conn->context, &conn->context->remote, in tcp_in()
3076 tcp_ca_init(conn); in tcp_in()
3079 verdict = tcp_data_get(conn, pkt, &len); in tcp_in()
3085 conn_ack(conn, + len); in tcp_in()
3086 tcp_out(conn, ACK); in tcp_in()
3094 tcp_nbr_reachability_hint(conn); in tcp_in()
3102 if (FL(&fl, &, SYN | ACK, th && th_ack(th) == conn->seq)) { in tcp_in()
3103 k_work_cancel_delayable(&conn->send_data_timer); in tcp_in()
3104 conn_ack(conn, th_seq(th) + 1); in tcp_in()
3106 verdict = tcp_data_get(conn, pkt, &len); in tcp_in()
3112 conn_ack(conn, + len); in tcp_in()
3118 net_context_set_state(conn->context, in tcp_in()
3120 tcp_ca_init(conn); in tcp_in()
3121 tcp_out(conn, ACK); in tcp_in()
3134 tcp_nbr_reachability_hint(conn); in tcp_in()
3142 if (FL(&fl, &, FIN, th_seq(th) == conn->ack)) { in tcp_in()
3144 verdict = tcp_data_get(conn, pkt, &len); in tcp_in()
3153 conn_ack(conn, + len + 1); in tcp_in()
3154 keep_alive_timer_stop(conn); in tcp_in()
3156 if (net_tcp_seq_cmp(th_ack(th), conn->seq) > 0) { in tcp_in()
3157 uint32_t len_acked = th_ack(th) - conn->seq; in tcp_in()
3159 conn_seq(conn, + len_acked); in tcp_in()
3162 tcp_out(conn, FIN | ACK); in tcp_in()
3163 conn_seq(conn, + 1); in tcp_in()
3164 tcp_setup_retransmission(conn); in tcp_in()
3166 tcp_setup_last_ack_timer(conn); in tcp_in()
3175 keep_alive_timer_restart(conn); in tcp_in()
3178 if (net_tcp_seq_cmp(th_ack(th), conn->seq) == 0) { in tcp_in()
3180 if (conn->send_data_total > 0) { in tcp_in()
3186 conn->dup_ack_cnt = MIN(conn->dup_ack_cnt + 1, in tcp_in()
3188 tcp_ca_dup_ack(conn); in tcp_in()
3191 conn->dup_ack_cnt = 0; in tcp_in()
3195 if ((conn->data_mode == TCP_DATA_MODE_SEND) && in tcp_in()
3196 (conn->dup_ack_cnt == DUPLICATE_ACK_RETRANSMIT_TRHESHOLD)) { in tcp_in()
3198 int temp_unacked_len = conn->unacked_len; in tcp_in()
3200 conn->unacked_len = 0; in tcp_in()
3202 (void)tcp_send_data(conn); in tcp_in()
3205 conn->unacked_len = temp_unacked_len; in tcp_in()
3207 tcp_ca_fast_retransmit(conn); in tcp_in()
3208 if (tcp_window_full(conn)) { in tcp_in()
3209 (void)k_sem_take(&conn->tx_sem, K_NO_WAIT); in tcp_in()
3214 NET_ASSERT((conn->send_data_total == 0) || in tcp_in()
3215 k_work_delayable_is_pending(&conn->send_data_timer), in tcp_in()
3217 "of the send_data queue timer", conn); in tcp_in()
3219 if (net_tcp_seq_cmp(th_ack(th), conn->seq) > 0) { in tcp_in()
3220 uint32_t len_acked = th_ack(th) - conn->seq; in tcp_in()
3222 NET_DBG("conn: %p len_acked=%u", conn, len_acked); in tcp_in()
3224 if ((conn->send_data_total < len_acked) || in tcp_in()
3225 (tcp_pkt_pull(conn->send_data, in tcp_in()
3228 "(total=%zu)", conn, len_acked, in tcp_in()
3229 conn->send_data_total); in tcp_in()
3230 net_stats_update_tcp_seg_drop(conn->iface); in tcp_in()
3239 conn->dup_ack_cnt = 0; in tcp_in()
3241 tcp_ca_pkts_acked(conn, len_acked); in tcp_in()
3243 conn->send_data_total -= len_acked; in tcp_in()
3244 if (conn->unacked_len < len_acked) { in tcp_in()
3245 conn->unacked_len = 0; in tcp_in()
3247 conn->unacked_len -= len_acked; in tcp_in()
3250 if (!tcp_window_full(conn)) { in tcp_in()
3251 k_sem_give(&conn->tx_sem); in tcp_in()
3254 conn_seq(conn, + len_acked); in tcp_in()
3255 net_stats_update_tcp_seg_recv(conn->iface); in tcp_in()
3261 tcp_nbr_reachability_hint(conn); in tcp_in()
3263 conn_send_data_dump(conn); in tcp_in()
3265 if (conn->data_mode == TCP_DATA_MODE_RESEND) { in tcp_in()
3266 conn->unacked_len = 0; in tcp_in()
3267 tcp_derive_rto(conn); in tcp_in()
3269 conn->data_mode = TCP_DATA_MODE_SEND; in tcp_in()
3270 if (conn->send_data_total > 0) { in tcp_in()
3271 tcp_setup_retransmission(conn); in tcp_in()
3275 if (conn->in_close && conn->send_data_total == 0) { in tcp_in()
3279 &conn->fin_timer, in tcp_in()
3282 tcp_out(conn, FIN | ACK); in tcp_in()
3283 conn_seq(conn, + 1); in tcp_in()
3284 tcp_setup_retransmission(conn); in tcp_in()
3286 keep_alive_timer_stop(conn); in tcp_in()
3290 ret = tcp_send_queued_data(conn); in tcp_in()
3299 if (tcp_window_full(conn)) { in tcp_in()
3300 (void)k_sem_take(&conn->tx_sem, K_NO_WAIT); in tcp_in()
3304 if (th_seq(th) == conn->ack) { in tcp_in()
3311 verdict = tcp_data_received(conn, pkt, &len, psh); in tcp_in()
3320 } else if (net_tcp_seq_greater(conn->ack, th_seq(th))) { in tcp_in()
3324 int32_t new_len = tcp_compute_new_length(conn, th, len, false); in tcp_in()
3326 if (tcp_pkt_trim_data(conn, pkt, len, (size_t)(len - new_len)) == 0) { in tcp_in()
3330 tcp_out(conn, ACK); in tcp_in()
3334 tcp_out_of_order_data(conn, pkt, len, th_seq(th)); in tcp_in()
3337 tcp_out(conn, ACK); in tcp_in()
3344 if (conn->send_data_total == 0) { in tcp_in()
3345 k_work_cancel_delayable(&conn->send_data_timer); in tcp_in()
3349 if (tcp_window_full(conn)) { in tcp_in()
3350 (void)k_sem_take(&conn->tx_sem, K_NO_WAIT); in tcp_in()
3352 k_sem_give(&conn->tx_sem); in tcp_in()
3360 if (th_ack(th) == conn->seq) { in tcp_in()
3361 k_work_cancel_delayable(&conn->send_data_timer); in tcp_in()
3367 tcp_cancel_last_ack_timer(conn); in tcp_in()
3384 if (tcp_compute_new_length(conn, th, len, false) > 0) { in tcp_in()
3389 net_stats_update_tcp_seg_drop(conn->iface); in tcp_in()
3391 k_work_cancel_delayable(&conn->send_data_timer); in tcp_in()
3392 next = tcp_enter_time_wait(conn); in tcp_in()
3397 if (th_ack(th) == conn->seq) { in tcp_in()
3400 conn, conn->seq, conn->ack); in tcp_in()
3401 k_work_cancel_delayable(&conn->send_data_timer); in tcp_in()
3411 if (FL(&fl, &, FIN, net_tcp_seq_cmp(th_seq(th) + len, conn->ack) == 0)) { in tcp_in()
3412 conn_ack(conn, + 1); in tcp_in()
3417 NET_DBG("conn %p: FIN received, going to TIME WAIT", conn); in tcp_in()
3419 next = tcp_enter_time_wait(conn); in tcp_in()
3425 conn); in tcp_in()
3428 tcp_out(conn, ACK); in tcp_in()
3442 if (tcp_compute_new_length(conn, th, len, false) > 0) { in tcp_in()
3447 net_stats_update_tcp_seg_drop(conn->iface); in tcp_in()
3449 next = tcp_enter_time_wait(conn); in tcp_in()
3458 if (FL(&fl, &, FIN, net_tcp_seq_cmp(th_seq(th) + len, conn->ack) == 0)) { in tcp_in()
3459 conn_ack(conn, + 1); in tcp_in()
3460 NET_DBG("conn %p: FIN received, going to TIME WAIT", conn); in tcp_in()
3462 next = tcp_enter_time_wait(conn); in tcp_in()
3465 tcp_out(conn, ACK); in tcp_in()
3475 int32_t new_len = tcp_compute_new_length(conn, th, len, true); in tcp_in()
3483 conn, new_len); in tcp_in()
3484 net_stats_update_tcp_seg_drop(conn->iface); in tcp_in()
3486 k_work_cancel_delayable(&conn->send_data_timer); in tcp_in()
3487 next = tcp_enter_time_wait(conn); in tcp_in()
3493 if (th_ack(th) == conn->seq) { in tcp_in()
3496 conn, conn->seq, conn->ack); in tcp_in()
3498 k_work_cancel_delayable(&conn->send_data_timer); in tcp_in()
3499 next = tcp_enter_time_wait(conn); in tcp_in()
3506 int32_t new_len = tcp_compute_new_length(conn, th, len, true); in tcp_in()
3514 conn, new_len); in tcp_in()
3515 net_stats_update_tcp_seg_drop(conn->iface); in tcp_in()
3522 NET_ASSERT(false, "%s is unimplemented", tcp_state_to_str(conn->state, true)); in tcp_in()
3536 conn_state(conn, next); in tcp_in()
3539 keep_alive_timer_restart(conn); in tcp_in()
3545 conn->in_connect = false; in tcp_in()
3546 if (conn->connect_cb) { in tcp_in()
3547 conn->connect_cb(conn->context, 0, conn->context->user_data); in tcp_in()
3550 conn->connect_cb = NULL; in tcp_in()
3553 k_sem_give(&conn->connect_sem); in tcp_in()
3557 if (conn->context) { in tcp_in()
3561 conn_handler = (struct net_conn *)conn->context->conn_handler; in tcp_in()
3564 recv_user_data = conn->recv_user_data; in tcp_in()
3565 recv_data_fifo = &conn->recv_data; in tcp_in()
3567 k_mutex_unlock(&conn->lock); in tcp_in()
3573 while (conn_handler && atomic_get(&conn->ref_count) > 0 && in tcp_in()
3586 if (do_close && conn->state != TCP_UNUSED && conn->state != TCP_CLOSED) { in tcp_in()
3587 tcp_conn_close(conn, close_status); in tcp_in()
3596 struct tcp *conn = context->tcp; in net_tcp_put() local
3598 if (!conn) { in net_tcp_put()
3602 k_mutex_lock(&conn->lock, K_FOREVER); in net_tcp_put()
3604 NET_DBG("%s", conn ? tcp_conn_state(conn, NULL) : ""); in net_tcp_put()
3609 if (conn->state == TCP_ESTABLISHED || in net_tcp_put()
3610 conn->state == TCP_SYN_RECEIVED) { in net_tcp_put()
3612 if (conn->send_data_total > 0) { in net_tcp_put()
3613 NET_DBG("conn %p pending %zu bytes", conn, in net_tcp_put()
3614 conn->send_data_total); in net_tcp_put()
3615 conn->in_close = true; in net_tcp_put()
3620 &conn->send_data_timer, in net_tcp_put()
3627 &conn->fin_timer, in net_tcp_put()
3630 tcp_out(conn, FIN | ACK); in net_tcp_put()
3631 conn_seq(conn, + 1); in net_tcp_put()
3632 tcp_setup_retransmission(conn); in net_tcp_put()
3634 conn_state(conn, TCP_FIN_WAIT_1); in net_tcp_put()
3636 keep_alive_timer_stop(conn); in net_tcp_put()
3638 } else if (conn->in_connect) { in net_tcp_put()
3639 conn->in_connect = false; in net_tcp_put()
3640 k_sem_reset(&conn->connect_sem); in net_tcp_put()
3643 k_mutex_unlock(&conn->lock); in net_tcp_put()
3645 tcp_conn_unref(conn); in net_tcp_put()
3660 struct tcp *conn = context->tcp; in net_tcp_update_recv_wnd() local
3663 if (!conn) { in net_tcp_update_recv_wnd()
3668 k_mutex_lock(&conn->lock, K_FOREVER); in net_tcp_update_recv_wnd()
3672 k_mutex_unlock(&conn->lock); in net_tcp_update_recv_wnd()
3680 struct tcp *conn = context->tcp; in net_tcp_queue() local
3684 if (!conn || conn->state != TCP_ESTABLISHED) { in net_tcp_queue()
3688 k_mutex_lock(&conn->lock, K_FOREVER); in net_tcp_queue()
3694 if (tcp_window_full(conn)) { in net_tcp_queue()
3712 len = MIN(conn->send_win - conn->send_data_total, len); in net_tcp_queue()
3718 ret = tcp_pkt_append(conn->send_data, in net_tcp_queue()
3737 ret = tcp_pkt_append(conn->send_data, data, len); in net_tcp_queue()
3745 conn->send_data_total += queued_len; in net_tcp_queue()
3751 ret = tcp_send_queued_data(conn); in net_tcp_queue()
3753 tcp_conn_close(conn, ret); in net_tcp_queue()
3757 if (tcp_window_full(conn)) { in net_tcp_queue()
3758 (void)k_sem_take(&conn->tx_sem, K_NO_WAIT); in net_tcp_queue()
3763 k_mutex_unlock(&conn->lock); in net_tcp_queue()
3779 static int tcp_start_handshake(struct tcp *conn) in tcp_start_handshake() argument
3784 k_mutex_lock(&conn->lock, K_FOREVER); in tcp_start_handshake()
3785 tcp_check_sock_options(conn); in tcp_start_handshake()
3786 conn->send_options.mss_found = true; in tcp_start_handshake()
3787 ret = tcp_out_ext(conn, SYN, NULL /* no data */, conn->seq); in tcp_start_handshake()
3789 k_mutex_unlock(&conn->lock); in tcp_start_handshake()
3792 tcp_setup_retransmission(conn); in tcp_start_handshake()
3794 conn->send_options.mss_found = false; in tcp_start_handshake()
3795 conn_seq(conn, + 1); in tcp_start_handshake()
3796 conn_state(conn, TCP_SYN_SENT); in tcp_start_handshake()
3797 tcp_conn_ref(conn); in tcp_start_handshake()
3798 k_mutex_unlock(&conn->lock); in tcp_start_handshake()
3814 struct tcp *conn; in net_tcp_connect() local
3823 conn = context->tcp; in net_tcp_connect()
3824 conn->iface = net_context_get_iface(context); in net_tcp_connect()
3825 tcp_derive_rto(conn); in net_tcp_connect()
3837 memset(&conn->src, 0, sizeof(struct sockaddr_in)); in net_tcp_connect()
3838 memset(&conn->dst, 0, sizeof(struct sockaddr_in)); in net_tcp_connect()
3840 conn->src.sa.sa_family = AF_INET; in net_tcp_connect()
3841 conn->dst.sa.sa_family = AF_INET; in net_tcp_connect()
3843 conn->dst.sin.sin_port = remote_port; in net_tcp_connect()
3844 conn->src.sin.sin_port = local_port; in net_tcp_connect()
3855 net_ipaddr_copy(&conn->src.sin.sin_addr, ip4); in net_tcp_connect()
3857 net_ipaddr_copy(&conn->src.sin.sin_addr, in net_tcp_connect()
3860 net_ipaddr_copy(&conn->dst.sin.sin_addr, in net_tcp_connect()
3870 memset(&conn->src, 0, sizeof(struct sockaddr_in6)); in net_tcp_connect()
3871 memset(&conn->dst, 0, sizeof(struct sockaddr_in6)); in net_tcp_connect()
3873 conn->src.sin6.sin6_family = AF_INET6; in net_tcp_connect()
3874 conn->dst.sin6.sin6_family = AF_INET6; in net_tcp_connect()
3876 conn->dst.sin6.sin6_port = remote_port; in net_tcp_connect()
3877 conn->src.sin6.sin6_port = local_port; in net_tcp_connect()
3884 net_ipaddr_copy(&conn->src.sin6.sin6_addr, ip6); in net_tcp_connect()
3886 net_ipaddr_copy(&conn->src.sin6.sin6_addr, in net_tcp_connect()
3889 net_ipaddr_copy(&conn->dst.sin6.sin6_addr, in net_tcp_connect()
3899 conn->seq = tcp_init_isn(&conn->src.sa, &conn->dst.sa); in net_tcp_connect()
3902 NET_DBG("conn: %p src: %s, dst: %s", conn, in net_tcp_connect()
3903 net_sprint_addr(conn->src.sa.sa_family, in net_tcp_connect()
3904 (const void *)&conn->src.sin.sin_addr), in net_tcp_connect()
3905 net_sprint_addr(conn->dst.sa.sa_family, in net_tcp_connect()
3906 (const void *)&conn->dst.sin.sin_addr)); in net_tcp_connect()
3921 net_if_addr_ref(conn->iface, conn->src.sa.sa_family, in net_tcp_connect()
3922 conn->src.sa.sa_family == AF_INET ? in net_tcp_connect()
3923 (const void *)&conn->src.sin.sin_addr : in net_tcp_connect()
3924 (const void *)&conn->src.sin6.sin6_addr); in net_tcp_connect()
3925 conn->addr_ref_done = true; in net_tcp_connect()
3927 conn->connect_cb = cb; in net_tcp_connect()
3933 conn->in_connect = !IS_ENABLED(CONFIG_NET_TEST_PROTOCOL); in net_tcp_connect()
3935 ret = tcp_start_handshake(conn); in net_tcp_connect()
3941 if (conn->state == TCP_UNUSED || conn->state == TCP_CLOSED) { in net_tcp_connect()
3942 if (conn->rst_received) { in net_tcp_connect()
3948 } else if ((K_TIMEOUT_EQ(timeout, K_NO_WAIT)) && conn->state != TCP_ESTABLISHED) { in net_tcp_connect()
3951 } else if (k_sem_take(&conn->connect_sem, timeout) != 0 && in net_tcp_connect()
3952 conn->state != TCP_ESTABLISHED) { in net_tcp_connect()
3953 if (conn->in_connect) { in net_tcp_connect()
3954 conn->in_connect = false; in net_tcp_connect()
3955 tcp_conn_close(conn, -ETIMEDOUT); in net_tcp_connect()
3958 if (conn->rst_received) { in net_tcp_connect()
3965 conn->in_connect = false; in net_tcp_connect()
3969 NET_DBG("conn: %p, ret=%d", conn, ret); in net_tcp_connect()
3977 struct tcp *conn = context->tcp; in net_tcp_accept() local
3981 if (!conn) { in net_tcp_accept()
3985 NET_DBG("context: %p, tcp: %p, cb: %p", context, conn, cb); in net_tcp_accept()
3987 if (conn->state != TCP_LISTEN) { in net_tcp_accept()
3991 conn->accept_cb = cb; in net_tcp_accept()
4061 struct tcp *conn = context->tcp; in net_tcp_recv() local
4067 if (conn) { in net_tcp_recv()
4068 conn->recv_user_data = user_data; in net_tcp_recv()
4132 struct tcp *conn = tcp_conn_search(pkt); in tcp_input() local
4134 if (conn == NULL && SYN == th_flags(th)) { in tcp_input()
4139 conn = context->tcp; in tcp_input()
4140 tcp_endpoint_set(&conn->dst, pkt, TCP_EP_SRC); in tcp_input()
4141 tcp_endpoint_set(&conn->src, pkt, TCP_EP_DST); in tcp_input()
4145 tcp_conn_ref(conn); in tcp_input()
4148 if (conn) { in tcp_input()
4149 conn->iface = pkt->iface; in tcp_input()
4150 verdict = tcp_in(conn, pkt); in tcp_input()
4157 static size_t tp_tcp_recv_cb(struct tcp *conn, struct net_pkt *pkt) in tp_tcp_recv_cb() argument
4170 net_tcp_queue(conn->context, buf->data, buf->len); in tp_tcp_recv_cb()
4181 static void tp_init(struct tcp *conn, struct tp *tp) in tp_init() argument
4186 .state = tcp_state_to_str(conn->state, true), in tp_init()
4187 .seq = conn->seq, in tp_init()
4188 .ack = conn->ack, in tp_init()
4197 static void tcp_to_json(struct tcp *conn, void *data, size_t *data_len) in tcp_to_json() argument
4201 tp_init(conn, &tp); in tcp_to_json()
4214 struct tcp *conn = tcp_conn_search(pkt); in tp_input() local
4263 conn = context->tcp; in tp_input()
4264 tcp_endpoint_set(&conn->dst, pkt, TCP_EP_SRC); in tp_input()
4265 tcp_endpoint_set(&conn->src, pkt, TCP_EP_DST); in tp_input()
4266 conn->iface = pkt->iface; in tp_input()
4267 tcp_conn_ref(conn); in tp_input()
4269 conn->seq = tp->seq; in tp_input()
4271 if (tcp_start_handshake(conn) == 0) { in tp_input()
4280 conn = (void *)sys_slist_peek_head(&tcp_conns); in tp_input()
4281 context = conn->context; in tp_input()
4282 while (tcp_conn_close(conn, 0)) { in tp_input()
4292 struct tcp *conn = in tp_input() local
4294 net_tcp_put(conn->context); in tp_input()
4301 tp_init(conn, tp); in tp_input()
4310 struct tcp *conn = in tp_input() local
4317 net_tcp_queue(conn->context, buf, len); in tp_input()
4331 conn = (void *)sys_slist_peek_head(&tcp_conns); in tp_input()
4332 tcp_to_json(conn, buf, &json_len); in tp_input()
4378 struct tcp *conn; in net_tcp_foreach() local
4383 SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&tcp_conns, conn, tmp, next) { in net_tcp_foreach()
4384 if (atomic_get(&conn->ref_count) > 0) { in net_tcp_foreach()
4386 cb(conn, user_data); in net_tcp_foreach()
4446 uint16_t net_tcp_get_supported_mss(const struct tcp *conn) in net_tcp_get_supported_mss() argument
4448 sa_family_t family = net_context_get_family(conn->context); in net_tcp_get_supported_mss()
4451 struct net_if *iface = net_context_get_iface(conn->context); in net_tcp_get_supported_mss()
4454 dest_mtu = get_ipv4_destination_mtu(iface, &conn->dst.sin.sin_addr); in net_tcp_get_supported_mss()
4460 struct net_if *iface = net_context_get_iface(conn->context); in net_tcp_get_supported_mss()
4463 dest_mtu = get_ipv6_destination_mtu(iface, &conn->dst.sin6.sin6_addr); in net_tcp_get_supported_mss()
4478 static void testing_find_conn(struct tcp *conn, void *user_data) in testing_find_conn() argument
4483 net_ipv6_addr_cmp(&conn->dst.sin6.sin6_addr, in testing_find_conn()
4490 NET_DBG("Found connection %p mtu %u", conn, in testing_find_conn()
4491 net_tcp_get_supported_mss(conn) + NET_IPV6TCPH_LEN); in testing_find_conn()
4492 data->mtu = net_tcp_get_supported_mss(conn) + NET_IPV6TCPH_LEN; in testing_find_conn()
4497 net_ipv4_addr_cmp(&conn->dst.sin.sin_addr, in testing_find_conn()
4504 NET_DBG("Found connection %p mtu %u", conn, in testing_find_conn()
4505 net_tcp_get_supported_mss(conn) + NET_IPV4TCPH_LEN); in testing_find_conn()
4506 data->mtu = net_tcp_get_supported_mss(conn) + NET_IPV4TCPH_LEN; in testing_find_conn()
4532 struct tcp *conn = context->tcp; in net_tcp_set_option() local
4534 NET_ASSERT(conn); in net_tcp_set_option()
4536 k_mutex_lock(&conn->lock, K_FOREVER); in net_tcp_set_option()
4540 ret = set_tcp_nodelay(conn, value, len); in net_tcp_set_option()
4543 ret = set_tcp_keep_alive(conn, value, len); in net_tcp_set_option()
4546 ret = set_tcp_keep_idle(conn, value, len); in net_tcp_set_option()
4549 ret = set_tcp_keep_intvl(conn, value, len); in net_tcp_set_option()
4552 ret = set_tcp_keep_cnt(conn, value, len); in net_tcp_set_option()
4556 k_mutex_unlock(&conn->lock); in net_tcp_set_option()
4569 struct tcp *conn = context->tcp; in net_tcp_get_option() local
4571 NET_ASSERT(conn); in net_tcp_get_option()
4573 k_mutex_lock(&conn->lock, K_FOREVER); in net_tcp_get_option()
4577 ret = get_tcp_nodelay(conn, value, len); in net_tcp_get_option()
4580 ret = get_tcp_keep_alive(conn, value, len); in net_tcp_get_option()
4583 ret = get_tcp_keep_idle(conn, value, len); in net_tcp_get_option()
4586 ret = get_tcp_keep_intvl(conn, value, len); in net_tcp_get_option()
4589 ret = get_tcp_keep_cnt(conn, value, len); in net_tcp_get_option()
4593 k_mutex_unlock(&conn->lock); in net_tcp_get_option()
4605 struct tcp *conn = context->tcp; in net_tcp_tx_sem_get() local
4607 return &conn->tx_sem; in net_tcp_tx_sem_get()
4612 struct tcp *conn = context->tcp; in net_tcp_conn_sem_get() local
4614 return &conn->connect_sem; in net_tcp_conn_sem_get()