Lines Matching refs:req
663 struct request_sock *req; in inet_csk_accept() local
689 req = reqsk_queue_remove(queue, sk); in inet_csk_accept()
691 newsk = req->sk; in inet_csk_accept()
694 tcp_rsk(req)->tfo_listener) { in inet_csk_accept()
696 if (tcp_rsk(req)->tfo_listener) { in inet_csk_accept()
703 req->sk = NULL; in inet_csk_accept()
704 req = NULL; in inet_csk_accept()
735 if (req) in inet_csk_accept()
736 reqsk_put(req); in inet_csk_accept()
744 req = NULL; in inet_csk_accept()
797 const struct request_sock *req) in inet_csk_route_req() argument
799 const struct inet_request_sock *ireq = inet_rsk(req); in inet_csk_route_req()
813 security_req_classify_flow(req, flowi4_to_flowi_common(fl4)); in inet_csk_route_req()
832 const struct request_sock *req) in inet_csk_route_child_sock() argument
834 const struct inet_request_sock *ireq = inet_rsk(req); in inet_csk_route_child_sock()
850 security_req_classify_flow(req, flowi4_to_flowi_common(fl4)); in inet_csk_route_child_sock()
867 static void syn_ack_recalc(struct request_sock *req, in syn_ack_recalc() argument
873 *expire = req->num_timeout >= max_syn_ack_retries; in syn_ack_recalc()
877 *expire = req->num_timeout >= max_syn_ack_retries && in syn_ack_recalc()
878 (!inet_rsk(req)->acked || req->num_timeout >= rskq_defer_accept); in syn_ack_recalc()
883 *resend = !inet_rsk(req)->acked || in syn_ack_recalc()
884 req->num_timeout >= rskq_defer_accept - 1; in syn_ack_recalc()
891 struct request_sock *req; in reqsk_alloc_noprof() local
893 req = kmem_cache_alloc_noprof(ops->slab, GFP_ATOMIC | __GFP_NOWARN); in reqsk_alloc_noprof()
894 if (!req) in reqsk_alloc_noprof()
896 req->rsk_listener = NULL; in reqsk_alloc_noprof()
899 kmem_cache_free(ops->slab, req); in reqsk_alloc_noprof()
902 req->rsk_listener = sk_listener; in reqsk_alloc_noprof()
904 req->rsk_ops = ops; in reqsk_alloc_noprof()
905 req_to_sk(req)->sk_prot = sk_listener->sk_prot; in reqsk_alloc_noprof()
906 sk_node_init(&req_to_sk(req)->sk_node); in reqsk_alloc_noprof()
907 sk_tx_queue_clear(req_to_sk(req)); in reqsk_alloc_noprof()
908 req->saved_syn = NULL; in reqsk_alloc_noprof()
909 req->syncookie = 0; in reqsk_alloc_noprof()
910 req->timeout = 0; in reqsk_alloc_noprof()
911 req->num_timeout = 0; in reqsk_alloc_noprof()
912 req->num_retrans = 0; in reqsk_alloc_noprof()
913 req->sk = NULL; in reqsk_alloc_noprof()
914 refcount_set(&req->rsk_refcnt, 0); in reqsk_alloc_noprof()
916 return req; in reqsk_alloc_noprof()
924 struct request_sock *req = reqsk_alloc(ops, sk_listener, in inet_reqsk_alloc() local
927 if (req) { in inet_reqsk_alloc()
928 struct inet_request_sock *ireq = inet_rsk(req); in inet_reqsk_alloc()
938 req->timeout = TCP_TIMEOUT_INIT; in inet_reqsk_alloc()
941 return req; in inet_reqsk_alloc()
945 static struct request_sock *inet_reqsk_clone(struct request_sock *req, in inet_reqsk_clone() argument
951 nreq = kmem_cache_alloc(req->rsk_ops->slab, GFP_ATOMIC | __GFP_NOWARN); in inet_reqsk_clone()
960 req_sk = req_to_sk(req); in inet_reqsk_clone()
966 req->rsk_ops->obj_size - offsetof(struct sock, sk_dontcopy_end), in inet_reqsk_clone()
988 const struct request_sock *req) in reqsk_queue_migrated() argument
990 if (req->num_timeout == 0) in reqsk_queue_migrated()
995 static void reqsk_migrate_reset(struct request_sock *req) in reqsk_migrate_reset() argument
997 req->saved_syn = NULL; in reqsk_migrate_reset()
999 inet_rsk(req)->ipv6_opt = NULL; in reqsk_migrate_reset()
1000 inet_rsk(req)->pktopts = NULL; in reqsk_migrate_reset()
1002 inet_rsk(req)->ireq_opt = NULL; in reqsk_migrate_reset()
1007 static bool reqsk_queue_unlink(struct request_sock *req) in reqsk_queue_unlink() argument
1009 struct sock *sk = req_to_sk(req); in reqsk_queue_unlink()
1016 lock = inet_ehash_lockp(hashinfo, req->rsk_hash); in reqsk_queue_unlink()
1026 struct request_sock *req, in __inet_csk_reqsk_queue_drop() argument
1029 bool unlinked = reqsk_queue_unlink(req); in __inet_csk_reqsk_queue_drop()
1031 if (!from_timer && timer_delete_sync(&req->rsk_timer)) in __inet_csk_reqsk_queue_drop()
1032 reqsk_put(req); in __inet_csk_reqsk_queue_drop()
1035 reqsk_queue_removed(&inet_csk(sk)->icsk_accept_queue, req); in __inet_csk_reqsk_queue_drop()
1036 reqsk_put(req); in __inet_csk_reqsk_queue_drop()
1042 bool inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req) in inet_csk_reqsk_queue_drop() argument
1044 return __inet_csk_reqsk_queue_drop(sk, req, false); in inet_csk_reqsk_queue_drop()
1047 void inet_csk_reqsk_queue_drop_and_put(struct sock *sk, struct request_sock *req) in inet_csk_reqsk_queue_drop_and_put() argument
1049 inet_csk_reqsk_queue_drop(sk, req); in inet_csk_reqsk_queue_drop_and_put()
1050 reqsk_put(req); in inet_csk_reqsk_queue_drop_and_put()
1056 struct request_sock *req = timer_container_of(req, t, rsk_timer); in reqsk_timer_handler() local
1057 struct request_sock *nreq = NULL, *oreq = req; in reqsk_timer_handler()
1058 struct sock *sk_listener = req->rsk_listener; in reqsk_timer_handler()
1067 nsk = reuseport_migrate_sock(sk_listener, req_to_sk(req), NULL); in reqsk_timer_handler()
1071 nreq = inet_reqsk_clone(req, nsk); in reqsk_timer_handler()
1082 reqsk_queue_migrated(&inet_csk(nsk)->icsk_accept_queue, req); in reqsk_timer_handler()
1084 req = nreq; in reqsk_timer_handler()
1121 syn_ack_recalc(req, max_syn_ack_retries, READ_ONCE(queue->rskq_defer_accept), in reqsk_timer_handler()
1123 req->rsk_ops->syn_ack_timeout(req); in reqsk_timer_handler()
1126 !tcp_rtx_synack(sk_listener, req) || in reqsk_timer_handler()
1127 inet_rsk(req)->acked)) { in reqsk_timer_handler()
1128 if (req->num_timeout++ == 0) in reqsk_timer_handler()
1130 mod_timer(&req->rsk_timer, jiffies + reqsk_timeout(req, TCP_RTO_MAX)); in reqsk_timer_handler()
1167 static bool reqsk_queue_hash_req(struct request_sock *req, in reqsk_queue_hash_req() argument
1172 if (!inet_ehash_insert(req_to_sk(req), NULL, &found_dup_sk)) in reqsk_queue_hash_req()
1176 timer_setup(&req->rsk_timer, reqsk_timer_handler, TIMER_PINNED); in reqsk_queue_hash_req()
1177 mod_timer(&req->rsk_timer, jiffies + timeout); in reqsk_queue_hash_req()
1183 refcount_set(&req->rsk_refcnt, 2 + 1); in reqsk_queue_hash_req()
1187 bool inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req, in inet_csk_reqsk_queue_hash_add() argument
1190 if (!reqsk_queue_hash_req(req, timeout)) in inet_csk_reqsk_queue_hash_add()
1197 static void inet_clone_ulp(const struct request_sock *req, struct sock *newsk, in inet_clone_ulp() argument
1205 icsk->icsk_ulp_ops->clone(req, newsk, priority); in inet_clone_ulp()
1217 const struct request_sock *req, in inet_csk_clone_lock() argument
1230 ireq = inet_rsk(req); in inet_csk_clone_lock()
1255 newsk->sk_mark = inet_rsk(req)->ir_mark; in inet_csk_clone_lock()
1257 atomic64_read(&inet_rsk(req)->ir_cookie)); in inet_csk_clone_lock()
1270 inet_clone_ulp(req, newsk, priority); in inet_csk_clone_lock()
1272 security_inet_csk_clone(newsk, req); in inet_csk_clone_lock()
1366 static void inet_child_forget(struct sock *sk, struct request_sock *req, in inet_child_forget() argument
1375 if (sk->sk_protocol == IPPROTO_TCP && tcp_rsk(req)->tfo_listener) { in inet_child_forget()
1376 BUG_ON(rcu_access_pointer(tcp_sk(child)->fastopen_rsk) != req); in inet_child_forget()
1377 BUG_ON(sk != req->rsk_listener); in inet_child_forget()
1391 struct request_sock *req, in inet_csk_reqsk_queue_add() argument
1398 inet_child_forget(sk, req, child); in inet_csk_reqsk_queue_add()
1401 req->sk = child; in inet_csk_reqsk_queue_add()
1402 req->dl_next = NULL; in inet_csk_reqsk_queue_add()
1404 WRITE_ONCE(queue->rskq_accept_head, req); in inet_csk_reqsk_queue_add()
1406 queue->rskq_accept_tail->dl_next = req; in inet_csk_reqsk_queue_add()
1407 queue->rskq_accept_tail = req; in inet_csk_reqsk_queue_add()
1416 struct request_sock *req, bool own_req) in inet_csk_complete_hashdance() argument
1419 inet_csk_reqsk_queue_drop(req->rsk_listener, req); in inet_csk_complete_hashdance()
1420 reqsk_queue_removed(&inet_csk(req->rsk_listener)->icsk_accept_queue, req); in inet_csk_complete_hashdance()
1422 if (sk != req->rsk_listener) { in inet_csk_complete_hashdance()
1432 nreq = inet_reqsk_clone(req, sk); in inet_csk_complete_hashdance()
1434 inet_child_forget(sk, req, child); in inet_csk_complete_hashdance()
1441 reqsk_migrate_reset(req); in inet_csk_complete_hashdance()
1442 reqsk_put(req); in inet_csk_complete_hashdance()
1449 } else if (inet_csk_reqsk_queue_add(sk, req, child)) { in inet_csk_complete_hashdance()
1468 struct request_sock *next, *req; in inet_csk_listen_stop() local
1478 while ((req = reqsk_queue_remove(queue, sk)) != NULL) { in inet_csk_listen_stop()
1479 struct sock *child = req->sk, *nsk; in inet_csk_listen_stop()
1489 nreq = inet_reqsk_clone(req, nsk); in inet_csk_listen_stop()
1496 reqsk_migrate_reset(req); in inet_csk_listen_stop()
1511 inet_child_forget(sk, req, child); in inet_csk_listen_stop()
1513 reqsk_put(req); in inet_csk_listen_stop()
1523 req = queue->fastopenq.rskq_rst_head; in inet_csk_listen_stop()
1526 while (req != NULL) { in inet_csk_listen_stop()
1527 next = req->dl_next; in inet_csk_listen_stop()
1528 reqsk_put(req); in inet_csk_listen_stop()
1529 req = next; in inet_csk_listen_stop()