Lines Matching refs:svsk

147 	struct svc_sock *svsk =  in svc_set_cmsg_data()  local
149 switch (svsk->sk_sk->sk_family) { in svc_set_cmsg_data()
185 static int svc_one_sock_name(struct svc_sock *svsk, char *buf, int remaining) in svc_one_sock_name() argument
187 const struct sock *sk = svsk->sk_sk; in svc_one_sock_name()
246 struct svc_sock *svsk = in svc_tcp_read_msg() local
254 clear_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); in svc_tcp_read_msg()
266 len = sock_recvmsg(svsk->sk_sock, &msg, MSG_DONTWAIT); in svc_tcp_read_msg()
274 set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); in svc_tcp_read_msg()
282 static void svc_sock_setbufsize(struct svc_sock *svsk, unsigned int nreqs) in svc_sock_setbufsize() argument
284 unsigned int max_mesg = svsk->sk_xprt.xpt_server->sv_max_mesg; in svc_sock_setbufsize()
285 struct socket *sock = svsk->sk_sock; in svc_sock_setbufsize()
309 struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data; in svc_data_ready() local
313 if (svsk) { in svc_data_ready()
316 svsk->sk_odata(sk); in svc_data_ready()
317 trace_svcsock_data_ready(&svsk->sk_xprt, 0); in svc_data_ready()
318 if (!test_and_set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags)) in svc_data_ready()
319 svc_xprt_enqueue(&svsk->sk_xprt); in svc_data_ready()
328 struct svc_sock *svsk = (struct svc_sock *)(sk->sk_user_data); in svc_write_space() local
330 if (svsk) { in svc_write_space()
333 trace_svcsock_write_space(&svsk->sk_xprt, 0); in svc_write_space()
334 svsk->sk_owspace(sk); in svc_write_space()
335 svc_xprt_enqueue(&svsk->sk_xprt); in svc_write_space()
341 struct svc_sock *svsk = container_of(xprt, struct svc_sock, sk_xprt); in svc_tcp_has_wspace() local
345 return !test_bit(SOCK_NOSPACE, &svsk->sk_sock->flags); in svc_tcp_has_wspace()
350 struct svc_sock *svsk = container_of(xprt, struct svc_sock, sk_xprt); in svc_tcp_kill_temp_xprt() local
352 sock_no_linger(svsk->sk_sock->sk); in svc_tcp_kill_temp_xprt()
422 struct svc_sock *svsk = in svc_udp_recvfrom() local
424 struct svc_serv *serv = svsk->sk_xprt.xpt_server; in svc_udp_recvfrom()
440 if (test_and_clear_bit(XPT_CHNGBUF, &svsk->sk_xprt.xpt_flags)) in svc_udp_recvfrom()
449 svc_sock_setbufsize(svsk, serv->sv_nrthreads + 3); in svc_udp_recvfrom()
451 clear_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); in svc_udp_recvfrom()
452 err = kernel_recvmsg(svsk->sk_sock, &msg, NULL, in svc_udp_recvfrom()
456 skb = skb_recv_udp(svsk->sk_sk, MSG_DONTWAIT, &err); in svc_udp_recvfrom()
467 sock_write_timestamp(svsk->sk_sk, skb->tstamp); in svc_udp_recvfrom()
468 set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); /* there may be more data... */ in svc_udp_recvfrom()
472 trace_svcsock_udp_recv(&svsk->sk_xprt, len); in svc_udp_recvfrom()
518 set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); in svc_udp_recvfrom()
520 trace_svcsock_udp_recv_err(&svsk->sk_xprt, err); in svc_udp_recvfrom()
547 struct svc_sock *svsk = container_of(xprt, struct svc_sock, sk_xprt); in svc_udp_sendto() local
576 err = xprt_sock_sendmsg(svsk->sk_sock, &msg, xdr, 0, 0, &sent); in svc_udp_sendto()
579 err = xprt_sock_sendmsg(svsk->sk_sock, &msg, xdr, 0, 0, &sent); in svc_udp_sendto()
596 struct svc_sock *svsk = container_of(xprt, struct svc_sock, sk_xprt); in svc_udp_has_wspace() local
604 set_bit(SOCK_NOSPACE, &svsk->sk_sock->flags); in svc_udp_has_wspace()
605 required = atomic_read(&svsk->sk_xprt.xpt_reserved) + serv->sv_max_mesg; in svc_udp_has_wspace()
606 if (required*2 > sock_wspace(svsk->sk_sk)) in svc_udp_has_wspace()
608 clear_bit(SOCK_NOSPACE, &svsk->sk_sock->flags); in svc_udp_has_wspace()
651 static void svc_udp_init(struct svc_sock *svsk, struct svc_serv *serv) in svc_udp_init() argument
653 svc_xprt_init(sock_net(svsk->sk_sock->sk), &svc_udp_class, in svc_udp_init()
654 &svsk->sk_xprt, serv); in svc_udp_init()
655 clear_bit(XPT_CACHE_AUTH, &svsk->sk_xprt.xpt_flags); in svc_udp_init()
656 svsk->sk_sk->sk_data_ready = svc_data_ready; in svc_udp_init()
657 svsk->sk_sk->sk_write_space = svc_write_space; in svc_udp_init()
663 svc_sock_setbufsize(svsk, 3); in svc_udp_init()
666 set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); in svc_udp_init()
667 set_bit(XPT_CHNGBUF, &svsk->sk_xprt.xpt_flags); in svc_udp_init()
670 switch (svsk->sk_sk->sk_family) { in svc_udp_init()
672 ip_sock_set_pktinfo(svsk->sk_sock->sk); in svc_udp_init()
675 ip6_sock_set_recvpktinfo(svsk->sk_sock->sk); in svc_udp_init()
688 struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data; in svc_tcp_listen_data_ready() local
692 if (svsk) { in svc_tcp_listen_data_ready()
695 svsk->sk_odata(sk); in svc_tcp_listen_data_ready()
709 if (svsk) { in svc_tcp_listen_data_ready()
710 set_bit(XPT_CONN, &svsk->sk_xprt.xpt_flags); in svc_tcp_listen_data_ready()
711 svc_xprt_enqueue(&svsk->sk_xprt); in svc_tcp_listen_data_ready()
721 struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data; in svc_tcp_state_change() local
723 if (svsk) { in svc_tcp_state_change()
726 svsk->sk_ostate(sk); in svc_tcp_state_change()
727 trace_svcsock_tcp_state(&svsk->sk_xprt, svsk->sk_sock); in svc_tcp_state_change()
729 svc_xprt_deferred_close(&svsk->sk_xprt); in svc_tcp_state_change()
738 struct svc_sock *svsk = container_of(xprt, struct svc_sock, sk_xprt); in svc_tcp_accept() local
741 struct svc_serv *serv = svsk->sk_xprt.xpt_server; in svc_tcp_accept()
742 struct socket *sock = svsk->sk_sock; in svc_tcp_accept()
750 clear_bit(XPT_CONN, &svsk->sk_xprt.xpt_flags); in svc_tcp_accept()
762 set_bit(XPT_CONN, &svsk->sk_xprt.xpt_flags); in svc_tcp_accept()
772 newsock->sk->sk_state_change = svsk->sk_ostate; in svc_tcp_accept()
773 newsock->sk->sk_data_ready = svsk->sk_odata; in svc_tcp_accept()
774 newsock->sk->sk_write_space = svsk->sk_owspace; in svc_tcp_accept()
806 static size_t svc_tcp_restore_pages(struct svc_sock *svsk, in svc_tcp_restore_pages() argument
809 size_t len = svsk->sk_datalen; in svc_tcp_restore_pages()
818 BUG_ON(svsk->sk_pages[i] == NULL); in svc_tcp_restore_pages()
819 rqstp->rq_pages[i] = svsk->sk_pages[i]; in svc_tcp_restore_pages()
820 svsk->sk_pages[i] = NULL; in svc_tcp_restore_pages()
826 static void svc_tcp_save_pages(struct svc_sock *svsk, struct svc_rqst *rqstp) in svc_tcp_save_pages() argument
830 if (svsk->sk_datalen == 0) in svc_tcp_save_pages()
832 len = svsk->sk_datalen; in svc_tcp_save_pages()
835 svsk->sk_pages[i] = rqstp->rq_pages[i]; in svc_tcp_save_pages()
840 static void svc_tcp_clear_pages(struct svc_sock *svsk) in svc_tcp_clear_pages() argument
844 if (svsk->sk_datalen == 0) in svc_tcp_clear_pages()
846 len = svsk->sk_datalen; in svc_tcp_clear_pages()
849 if (svsk->sk_pages[i] == NULL) { in svc_tcp_clear_pages()
853 put_page(svsk->sk_pages[i]); in svc_tcp_clear_pages()
854 svsk->sk_pages[i] = NULL; in svc_tcp_clear_pages()
857 svsk->sk_tcplen = 0; in svc_tcp_clear_pages()
858 svsk->sk_datalen = 0; in svc_tcp_clear_pages()
864 static ssize_t svc_tcp_read_marker(struct svc_sock *svsk, in svc_tcp_read_marker() argument
872 if (svsk->sk_tcplen < sizeof(rpc_fraghdr)) { in svc_tcp_read_marker()
876 want = sizeof(rpc_fraghdr) - svsk->sk_tcplen; in svc_tcp_read_marker()
877 iov.iov_base = ((char *)&svsk->sk_marker) + svsk->sk_tcplen; in svc_tcp_read_marker()
880 len = sock_recvmsg(svsk->sk_sock, &msg, MSG_DONTWAIT); in svc_tcp_read_marker()
883 svsk->sk_tcplen += len; in svc_tcp_read_marker()
888 trace_svcsock_marker(&svsk->sk_xprt, svsk->sk_marker); in svc_tcp_read_marker()
889 if (svc_sock_reclen(svsk) + svsk->sk_datalen > in svc_tcp_read_marker()
890 svsk->sk_xprt.xpt_server->sv_max_mesg) in svc_tcp_read_marker()
893 return svc_sock_reclen(svsk); in svc_tcp_read_marker()
897 __func__, svsk->sk_xprt.xpt_server->sv_name, in svc_tcp_read_marker()
898 svc_sock_reclen(svsk)); in svc_tcp_read_marker()
899 svc_xprt_deferred_close(&svsk->sk_xprt); in svc_tcp_read_marker()
904 static int receive_cb_reply(struct svc_sock *svsk, struct svc_rqst *rqstp) in receive_cb_reply() argument
906 struct rpc_xprt *bc_xprt = svsk->sk_xprt.xpt_bc_xprt; in receive_cb_reply()
949 static void svc_tcp_fragment_received(struct svc_sock *svsk) in svc_tcp_fragment_received() argument
952 svsk->sk_tcplen = 0; in svc_tcp_fragment_received()
953 svsk->sk_marker = xdr_zero; in svc_tcp_fragment_received()
976 struct svc_sock *svsk = in svc_tcp_recvfrom() local
978 struct svc_serv *serv = svsk->sk_xprt.xpt_server; in svc_tcp_recvfrom()
984 clear_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); in svc_tcp_recvfrom()
985 len = svc_tcp_read_marker(svsk, rqstp); in svc_tcp_recvfrom()
989 base = svc_tcp_restore_pages(svsk, rqstp); in svc_tcp_recvfrom()
990 want = len - (svsk->sk_tcplen - sizeof(rpc_fraghdr)); in svc_tcp_recvfrom()
993 trace_svcsock_tcp_recv(&svsk->sk_xprt, len); in svc_tcp_recvfrom()
994 svsk->sk_tcplen += len; in svc_tcp_recvfrom()
995 svsk->sk_datalen += len; in svc_tcp_recvfrom()
997 if (len != want || !svc_sock_final_rec(svsk)) in svc_tcp_recvfrom()
999 if (svsk->sk_datalen < 8) in svc_tcp_recvfrom()
1002 rqstp->rq_arg.len = svsk->sk_datalen; in svc_tcp_recvfrom()
1012 if (test_bit(XPT_LOCAL, &svsk->sk_xprt.xpt_flags)) in svc_tcp_recvfrom()
1020 len = receive_cb_reply(svsk, rqstp); in svc_tcp_recvfrom()
1023 svsk->sk_datalen = 0; in svc_tcp_recvfrom()
1024 svc_tcp_fragment_received(svsk); in svc_tcp_recvfrom()
1029 svc_xprt_copy_addrs(rqstp, &svsk->sk_xprt); in svc_tcp_recvfrom()
1038 svc_tcp_save_pages(svsk, rqstp); in svc_tcp_recvfrom()
1042 svc_tcp_fragment_received(svsk); in svc_tcp_recvfrom()
1044 trace_svcsock_tcp_recv_short(&svsk->sk_xprt, in svc_tcp_recvfrom()
1045 svc_sock_reclen(svsk), in svc_tcp_recvfrom()
1046 svsk->sk_tcplen - sizeof(rpc_fraghdr)); in svc_tcp_recvfrom()
1051 trace_svcsock_tcp_recv_eagain(&svsk->sk_xprt, 0); in svc_tcp_recvfrom()
1054 svsk->sk_datalen = 0; in svc_tcp_recvfrom()
1056 trace_svcsock_tcp_recv_err(&svsk->sk_xprt, len); in svc_tcp_recvfrom()
1057 svc_xprt_deferred_close(&svsk->sk_xprt); in svc_tcp_recvfrom()
1158 struct svc_sock *svsk = container_of(xprt, struct svc_sock, sk_xprt); in svc_tcp_sendto() local
1167 atomic_inc(&svsk->sk_sendqlen); in svc_tcp_sendto()
1171 tcp_sock_set_cork(svsk->sk_sk, true); in svc_tcp_sendto()
1172 err = svc_tcp_sendmsg(svsk->sk_sock, xdr, marker, &sent); in svc_tcp_sendto()
1177 if (atomic_dec_and_test(&svsk->sk_sendqlen)) in svc_tcp_sendto()
1178 tcp_sock_set_cork(svsk->sk_sk, false); in svc_tcp_sendto()
1183 atomic_dec(&svsk->sk_sendqlen); in svc_tcp_sendto()
1192 atomic_dec(&svsk->sk_sendqlen); in svc_tcp_sendto()
1238 static void svc_tcp_init(struct svc_sock *svsk, struct svc_serv *serv) in svc_tcp_init() argument
1240 struct sock *sk = svsk->sk_sk; in svc_tcp_init()
1242 svc_xprt_init(sock_net(svsk->sk_sock->sk), &svc_tcp_class, in svc_tcp_init()
1243 &svsk->sk_xprt, serv); in svc_tcp_init()
1244 set_bit(XPT_CACHE_AUTH, &svsk->sk_xprt.xpt_flags); in svc_tcp_init()
1245 set_bit(XPT_CONG_CTRL, &svsk->sk_xprt.xpt_flags); in svc_tcp_init()
1247 strcpy(svsk->sk_xprt.xpt_remotebuf, "listener"); in svc_tcp_init()
1248 set_bit(XPT_LISTENER, &svsk->sk_xprt.xpt_flags); in svc_tcp_init()
1250 set_bit(XPT_CONN, &svsk->sk_xprt.xpt_flags); in svc_tcp_init()
1256 svsk->sk_marker = xdr_zero; in svc_tcp_init()
1257 svsk->sk_tcplen = 0; in svc_tcp_init()
1258 svsk->sk_datalen = 0; in svc_tcp_init()
1259 memset(&svsk->sk_pages[0], 0, sizeof(svsk->sk_pages)); in svc_tcp_init()
1263 set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); in svc_tcp_init()
1269 svc_xprt_deferred_close(&svsk->sk_xprt); in svc_tcp_init()
1280 struct svc_sock *svsk; in svc_sock_update_bufs() local
1283 list_for_each_entry(svsk, &serv->sv_permsocks, sk_xprt.xpt_list) in svc_sock_update_bufs()
1284 set_bit(XPT_CHNGBUF, &svsk->sk_xprt.xpt_flags); in svc_sock_update_bufs()
1296 struct svc_sock *svsk; in svc_setup_socket() local
1301 svsk = kzalloc(sizeof(*svsk), GFP_KERNEL); in svc_setup_socket()
1302 if (!svsk) in svc_setup_socket()
1314 kfree(svsk); in svc_setup_socket()
1318 svsk->sk_sock = sock; in svc_setup_socket()
1319 svsk->sk_sk = inet; in svc_setup_socket()
1320 svsk->sk_ostate = inet->sk_state_change; in svc_setup_socket()
1321 svsk->sk_odata = inet->sk_data_ready; in svc_setup_socket()
1322 svsk->sk_owspace = inet->sk_write_space; in svc_setup_socket()
1329 inet->sk_user_data = svsk; in svc_setup_socket()
1333 svc_udp_init(svsk, serv); in svc_setup_socket()
1335 svc_tcp_init(svsk, serv); in svc_setup_socket()
1338 return svsk; in svc_setup_socket()
1374 struct svc_sock *svsk = NULL; in svc_addsock() local
1394 svsk = svc_setup_socket(serv, so, SVC_SOCK_DEFAULTS); in svc_addsock()
1395 if (IS_ERR(svsk)) { in svc_addsock()
1397 err = PTR_ERR(svsk); in svc_addsock()
1400 salen = kernel_getsockname(svsk->sk_sock, sin); in svc_addsock()
1402 svc_xprt_set_local(&svsk->sk_xprt, sin, salen); in svc_addsock()
1403 svsk->sk_xprt.xpt_cred = get_cred(cred); in svc_addsock()
1404 svc_add_new_perm_xprt(serv, &svsk->sk_xprt); in svc_addsock()
1405 return svc_one_sock_name(svsk, name_return, len); in svc_addsock()
1421 struct svc_sock *svsk; in svc_create_socket() local
1477 svsk = svc_setup_socket(serv, sock, flags); in svc_create_socket()
1478 if (IS_ERR(svsk)) { in svc_create_socket()
1479 error = PTR_ERR(svsk); in svc_create_socket()
1482 svc_xprt_set_local(&svsk->sk_xprt, newsin, newlen); in svc_create_socket()
1483 return (struct svc_xprt *)svsk; in svc_create_socket()
1495 struct svc_sock *svsk = container_of(xprt, struct svc_sock, sk_xprt); in svc_sock_detach() local
1496 struct sock *sk = svsk->sk_sk; in svc_sock_detach()
1500 sk->sk_state_change = svsk->sk_ostate; in svc_sock_detach()
1501 sk->sk_data_ready = svsk->sk_odata; in svc_sock_detach()
1502 sk->sk_write_space = svsk->sk_owspace; in svc_sock_detach()
1512 struct svc_sock *svsk = container_of(xprt, struct svc_sock, sk_xprt); in svc_tcp_sock_detach() local
1517 svc_tcp_clear_pages(svsk); in svc_tcp_sock_detach()
1518 kernel_sock_shutdown(svsk->sk_sock, SHUT_RDWR); in svc_tcp_sock_detach()
1527 struct svc_sock *svsk = container_of(xprt, struct svc_sock, sk_xprt); in svc_sock_free() local
1529 if (svsk->sk_sock->file) in svc_sock_free()
1530 sockfd_put(svsk->sk_sock); in svc_sock_free()
1532 sock_release(svsk->sk_sock); in svc_sock_free()
1533 kfree(svsk); in svc_sock_free()