Lines Matching refs:psock
413 int sk_msg_recvmsg(struct sock *sk, struct sk_psock *psock, struct msghdr *msg, in sk_msg_recvmsg() argument
421 msg_rx = sk_psock_peek_msg(psock); in sk_msg_recvmsg()
474 msg_rx = sk_psock_next_msg(psock, msg_rx); in sk_msg_recvmsg()
482 msg_rx = sk_psock_dequeue_msg(psock); in sk_msg_recvmsg()
485 msg_rx = sk_psock_peek_msg(psock); in sk_msg_recvmsg()
494 struct sk_psock *psock; in sk_msg_is_readable() local
498 psock = sk_psock(sk); in sk_msg_is_readable()
499 if (likely(psock)) in sk_msg_is_readable()
500 empty = list_empty(&psock->ingress_msg); in sk_msg_is_readable()
531 struct sk_psock *psock, in sk_psock_skb_ingress_enqueue() argument
559 psock->ingress_bytes += len; in sk_psock_skb_ingress_enqueue()
567 sk_psock_queue_msg(psock, msg); in sk_psock_skb_ingress_enqueue()
568 sk_psock_data_ready(sk, psock); in sk_psock_skb_ingress_enqueue()
572 static int sk_psock_skb_ingress_self(struct sk_psock *psock, struct sk_buff *skb,
575 static int sk_psock_skb_ingress(struct sk_psock *psock, struct sk_buff *skb, in sk_psock_skb_ingress() argument
578 struct sock *sk = psock->sk; in sk_psock_skb_ingress()
587 return sk_psock_skb_ingress_self(psock, skb, off, len, true); in sk_psock_skb_ingress()
599 err = sk_psock_skb_ingress_enqueue(skb, off, len, psock, sk, msg, true); in sk_psock_skb_ingress()
609 static int sk_psock_skb_ingress_self(struct sk_psock *psock, struct sk_buff *skb, in sk_psock_skb_ingress_self() argument
613 struct sock *sk = psock->sk; in sk_psock_skb_ingress_self()
619 err = sk_psock_skb_ingress_enqueue(skb, off, len, psock, sk, msg, take_ref); in sk_psock_skb_ingress_self()
625 static int sk_psock_handle_skb(struct sk_psock *psock, struct sk_buff *skb, in sk_psock_handle_skb() argument
629 if (!sock_writeable(psock->sk)) in sk_psock_handle_skb()
631 return skb_send_sock(psock->sk, skb, off, len); in sk_psock_handle_skb()
634 return sk_psock_skb_ingress(psock, skb, off, len); in sk_psock_handle_skb()
637 static void sk_psock_skb_state(struct sk_psock *psock, in sk_psock_skb_state() argument
641 spin_lock_bh(&psock->ingress_lock); in sk_psock_skb_state()
642 if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) { in sk_psock_skb_state()
646 spin_unlock_bh(&psock->ingress_lock); in sk_psock_skb_state()
652 struct sk_psock *psock = container_of(dwork, struct sk_psock, work); in sk_psock_backlog() local
653 struct sk_psock_work_state *state = &psock->work_state; in sk_psock_backlog()
663 if (!sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) in sk_psock_backlog()
671 if (!sk_psock_get(psock->sk)) in sk_psock_backlog()
673 mutex_lock(&psock->work_mutex); in sk_psock_backlog()
674 while ((skb = skb_peek(&psock->ingress_skb))) { in sk_psock_backlog()
694 if (!sock_flag(psock->sk, SOCK_DEAD)) in sk_psock_backlog()
695 ret = sk_psock_handle_skb(psock, skb, off, in sk_psock_backlog()
699 sk_psock_skb_state(psock, state, len, off); in sk_psock_backlog()
701 skb_bpf_set_redir(skb, psock->sk, ingress); in sk_psock_backlog()
705 if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) in sk_psock_backlog()
706 schedule_delayed_work(&psock->work, 1); in sk_psock_backlog()
710 sk_psock_report_error(psock, ret ? -ret : EPIPE); in sk_psock_backlog()
711 sk_psock_clear_state(psock, SK_PSOCK_TX_ENABLED); in sk_psock_backlog()
719 sk_psock_skb_state(psock, state, 0, 0); in sk_psock_backlog()
720 skb = skb_dequeue(&psock->ingress_skb); in sk_psock_backlog()
724 mutex_unlock(&psock->work_mutex); in sk_psock_backlog()
725 sk_psock_put(psock->sk, psock); in sk_psock_backlog()
730 struct sk_psock *psock; in sk_psock_init() local
736 psock = ERR_PTR(-EINVAL); in sk_psock_init()
741 psock = ERR_PTR(-EBUSY); in sk_psock_init()
745 psock = kzalloc_node(sizeof(*psock), GFP_ATOMIC | __GFP_NOWARN, node); in sk_psock_init()
746 if (!psock) { in sk_psock_init()
747 psock = ERR_PTR(-ENOMEM); in sk_psock_init()
752 psock->sk = sk; in sk_psock_init()
753 psock->eval = __SK_NONE; in sk_psock_init()
754 psock->sk_proto = prot; in sk_psock_init()
755 psock->saved_unhash = prot->unhash; in sk_psock_init()
756 psock->saved_destroy = prot->destroy; in sk_psock_init()
757 psock->saved_close = prot->close; in sk_psock_init()
758 psock->saved_write_space = sk->sk_write_space; in sk_psock_init()
760 INIT_LIST_HEAD(&psock->link); in sk_psock_init()
761 spin_lock_init(&psock->link_lock); in sk_psock_init()
763 INIT_DELAYED_WORK(&psock->work, sk_psock_backlog); in sk_psock_init()
764 mutex_init(&psock->work_mutex); in sk_psock_init()
765 INIT_LIST_HEAD(&psock->ingress_msg); in sk_psock_init()
766 spin_lock_init(&psock->ingress_lock); in sk_psock_init()
767 skb_queue_head_init(&psock->ingress_skb); in sk_psock_init()
769 sk_psock_set_state(psock, SK_PSOCK_TX_ENABLED); in sk_psock_init()
770 refcount_set(&psock->refcnt, 1); in sk_psock_init()
772 __rcu_assign_sk_user_data_with_flags(sk, psock, in sk_psock_init()
779 return psock; in sk_psock_init()
783 struct sk_psock_link *sk_psock_link_pop(struct sk_psock *psock) in sk_psock_link_pop() argument
787 spin_lock_bh(&psock->link_lock); in sk_psock_link_pop()
788 link = list_first_entry_or_null(&psock->link, struct sk_psock_link, in sk_psock_link_pop()
792 spin_unlock_bh(&psock->link_lock); in sk_psock_link_pop()
796 static void __sk_psock_purge_ingress_msg(struct sk_psock *psock) in __sk_psock_purge_ingress_msg() argument
800 list_for_each_entry_safe(msg, tmp, &psock->ingress_msg, list) { in __sk_psock_purge_ingress_msg()
803 atomic_sub(msg->sg.size, &psock->sk->sk_rmem_alloc); in __sk_psock_purge_ingress_msg()
804 sk_msg_free(psock->sk, msg); in __sk_psock_purge_ingress_msg()
809 static void __sk_psock_zap_ingress(struct sk_psock *psock) in __sk_psock_zap_ingress() argument
813 while ((skb = skb_dequeue(&psock->ingress_skb)) != NULL) { in __sk_psock_zap_ingress()
815 sock_drop(psock->sk, skb); in __sk_psock_zap_ingress()
817 __sk_psock_purge_ingress_msg(psock); in __sk_psock_zap_ingress()
820 static void sk_psock_link_destroy(struct sk_psock *psock) in sk_psock_link_destroy() argument
824 list_for_each_entry_safe(link, tmp, &psock->link, list) { in sk_psock_link_destroy()
830 void sk_psock_stop(struct sk_psock *psock) in sk_psock_stop() argument
832 spin_lock_bh(&psock->ingress_lock); in sk_psock_stop()
833 sk_psock_clear_state(psock, SK_PSOCK_TX_ENABLED); in sk_psock_stop()
834 sk_psock_cork_free(psock); in sk_psock_stop()
835 spin_unlock_bh(&psock->ingress_lock); in sk_psock_stop()
838 static void sk_psock_done_strp(struct sk_psock *psock);
842 struct sk_psock *psock = container_of(to_rcu_work(work), in sk_psock_destroy() local
846 sk_psock_done_strp(psock); in sk_psock_destroy()
848 cancel_delayed_work_sync(&psock->work); in sk_psock_destroy()
849 __sk_psock_zap_ingress(psock); in sk_psock_destroy()
850 mutex_destroy(&psock->work_mutex); in sk_psock_destroy()
852 psock_progs_drop(&psock->progs); in sk_psock_destroy()
854 sk_psock_link_destroy(psock); in sk_psock_destroy()
855 sk_psock_cork_free(psock); in sk_psock_destroy()
857 if (psock->sk_redir) in sk_psock_destroy()
858 sock_put(psock->sk_redir); in sk_psock_destroy()
859 if (psock->sk_pair) in sk_psock_destroy()
860 sock_put(psock->sk_pair); in sk_psock_destroy()
861 sock_put(psock->sk); in sk_psock_destroy()
862 kfree(psock); in sk_psock_destroy()
865 void sk_psock_drop(struct sock *sk, struct sk_psock *psock) in sk_psock_drop() argument
868 sk_psock_restore_proto(sk, psock); in sk_psock_drop()
870 if (psock->progs.stream_parser) in sk_psock_drop()
871 sk_psock_stop_strp(sk, psock); in sk_psock_drop()
872 else if (psock->progs.stream_verdict || psock->progs.skb_verdict) in sk_psock_drop()
873 sk_psock_stop_verdict(sk, psock); in sk_psock_drop()
876 sk_psock_stop(psock); in sk_psock_drop()
878 INIT_RCU_WORK(&psock->rwork, sk_psock_destroy); in sk_psock_drop()
879 queue_rcu_work(system_wq, &psock->rwork); in sk_psock_drop()
896 int sk_psock_msg_verdict(struct sock *sk, struct sk_psock *psock, in sk_psock_msg_verdict() argument
903 prog = READ_ONCE(psock->progs.msg_parser); in sk_psock_msg_verdict()
913 psock->apply_bytes = msg->apply_bytes; in sk_psock_msg_verdict()
915 if (psock->sk_redir) { in sk_psock_msg_verdict()
916 sock_put(psock->sk_redir); in sk_psock_msg_verdict()
917 psock->sk_redir = NULL; in sk_psock_msg_verdict()
923 psock->redir_ingress = sk_msg_to_ingress(msg); in sk_psock_msg_verdict()
924 psock->sk_redir = msg->sk_redir; in sk_psock_msg_verdict()
925 sock_hold(psock->sk_redir); in sk_psock_msg_verdict()
985 int sk_psock_tls_strp_read(struct sk_psock *psock, struct sk_buff *skb) in sk_psock_tls_strp_read() argument
991 prog = READ_ONCE(psock->progs.stream_verdict); in sk_psock_tls_strp_read()
993 skb->sk = psock->sk; in sk_psock_tls_strp_read()
1000 sk_psock_tls_verdict_apply(skb, psock, ret); in sk_psock_tls_strp_read()
1006 static int sk_psock_verdict_apply(struct sk_psock *psock, struct sk_buff *skb, in sk_psock_verdict_apply() argument
1016 sk_other = psock->sk; in sk_psock_verdict_apply()
1018 !sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) in sk_psock_verdict_apply()
1029 if (skb_queue_empty(&psock->ingress_skb)) { in sk_psock_verdict_apply()
1038 err = sk_psock_skb_ingress_self(psock, skb, off, len, false); in sk_psock_verdict_apply()
1041 spin_lock_bh(&psock->ingress_lock); in sk_psock_verdict_apply()
1042 if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) { in sk_psock_verdict_apply()
1043 skb_queue_tail(&psock->ingress_skb, skb); in sk_psock_verdict_apply()
1044 schedule_delayed_work(&psock->work, 0); in sk_psock_verdict_apply()
1047 spin_unlock_bh(&psock->ingress_lock); in sk_psock_verdict_apply()
1053 tcp_eat_skb(psock->sk, skb); in sk_psock_verdict_apply()
1054 err = sk_psock_skb_redirect(psock, skb); in sk_psock_verdict_apply()
1060 tcp_eat_skb(psock->sk, skb); in sk_psock_verdict_apply()
1061 sock_drop(psock->sk, skb); in sk_psock_verdict_apply()
1069 struct sk_psock *psock; in sk_psock_write_space() local
1073 psock = sk_psock(sk); in sk_psock_write_space()
1074 if (likely(psock)) { in sk_psock_write_space()
1075 if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) in sk_psock_write_space()
1076 schedule_delayed_work(&psock->work, 0); in sk_psock_write_space()
1077 write_space = psock->saved_write_space; in sk_psock_write_space()
1087 struct sk_psock *psock; in sk_psock_strp_read() local
1094 psock = sk_psock(sk); in sk_psock_strp_read()
1095 if (unlikely(!psock)) { in sk_psock_strp_read()
1099 prog = READ_ONCE(psock->progs.stream_verdict); in sk_psock_strp_read()
1109 sk_psock_verdict_apply(psock, skb, ret); in sk_psock_strp_read()
1121 struct sk_psock *psock = container_of(strp, struct sk_psock, strp); in sk_psock_strp_parse() local
1126 prog = READ_ONCE(psock->progs.stream_parser); in sk_psock_strp_parse()
1128 skb->sk = psock->sk; in sk_psock_strp_parse()
1139 struct sk_psock *psock; in sk_psock_strp_data_ready() local
1144 psock = sk_psock(sk); in sk_psock_strp_data_ready()
1145 if (likely(psock)) { in sk_psock_strp_data_ready()
1147 psock->saved_data_ready(sk); in sk_psock_strp_data_ready()
1150 strp_data_ready(&psock->strp); in sk_psock_strp_data_ready()
1157 int sk_psock_init_strp(struct sock *sk, struct sk_psock *psock) in sk_psock_init_strp() argument
1167 ret = strp_init(&psock->strp, sk, &cb); in sk_psock_init_strp()
1169 sk_psock_set_state(psock, SK_PSOCK_RX_STRP_ENABLED); in sk_psock_init_strp()
1172 psock->strp.cb.read_sock = tcp_bpf_strp_read_sock; in sk_psock_init_strp()
1173 psock->copied_seq = tcp_sk(sk)->copied_seq; in sk_psock_init_strp()
1178 void sk_psock_start_strp(struct sock *sk, struct sk_psock *psock) in sk_psock_start_strp() argument
1180 if (psock->saved_data_ready) in sk_psock_start_strp()
1183 psock->saved_data_ready = sk->sk_data_ready; in sk_psock_start_strp()
1188 void sk_psock_stop_strp(struct sock *sk, struct sk_psock *psock) in sk_psock_stop_strp() argument
1190 psock_set_prog(&psock->progs.stream_parser, NULL); in sk_psock_stop_strp()
1192 if (!psock->saved_data_ready) in sk_psock_stop_strp()
1195 sk->sk_data_ready = psock->saved_data_ready; in sk_psock_stop_strp()
1196 psock->saved_data_ready = NULL; in sk_psock_stop_strp()
1197 strp_stop(&psock->strp); in sk_psock_stop_strp()
1200 static void sk_psock_done_strp(struct sk_psock *psock) in sk_psock_done_strp() argument
1203 if (sk_psock_test_state(psock, SK_PSOCK_RX_STRP_ENABLED)) in sk_psock_done_strp()
1204 strp_done(&psock->strp); in sk_psock_done_strp()
1207 static void sk_psock_done_strp(struct sk_psock *psock) in sk_psock_done_strp() argument
1214 struct sk_psock *psock; in sk_psock_verdict_recv() local
1220 psock = sk_psock(sk); in sk_psock_verdict_recv()
1221 if (unlikely(!psock)) { in sk_psock_verdict_recv()
1227 prog = READ_ONCE(psock->progs.stream_verdict); in sk_psock_verdict_recv()
1229 prog = READ_ONCE(psock->progs.skb_verdict); in sk_psock_verdict_recv()
1236 ret = sk_psock_verdict_apply(psock, skb, ret); in sk_psock_verdict_recv()
1259 struct sk_psock *psock; in sk_psock_verdict_data_ready() local
1262 psock = sk_psock(sk); in sk_psock_verdict_data_ready()
1263 if (psock) in sk_psock_verdict_data_ready()
1264 sk_psock_data_ready(sk, psock); in sk_psock_verdict_data_ready()
1269 void sk_psock_start_verdict(struct sock *sk, struct sk_psock *psock) in sk_psock_start_verdict() argument
1271 if (psock->saved_data_ready) in sk_psock_start_verdict()
1274 psock->saved_data_ready = sk->sk_data_ready; in sk_psock_start_verdict()
1279 void sk_psock_stop_verdict(struct sock *sk, struct sk_psock *psock) in sk_psock_stop_verdict() argument
1281 psock_set_prog(&psock->progs.stream_verdict, NULL); in sk_psock_stop_verdict()
1282 psock_set_prog(&psock->progs.skb_verdict, NULL); in sk_psock_stop_verdict()
1284 if (!psock->saved_data_ready) in sk_psock_stop_verdict()
1287 sk->sk_data_ready = psock->saved_data_ready; in sk_psock_stop_verdict()
1288 psock->saved_data_ready = NULL; in sk_psock_stop_verdict()