Lines Matching refs:kmsg

129 static void io_netmsg_iovec_free(struct io_async_msghdr *kmsg)  in io_netmsg_iovec_free()  argument
131 if (kmsg->free_iov) { in io_netmsg_iovec_free()
132 kfree(kmsg->free_iov); in io_netmsg_iovec_free()
133 kmsg->free_iov_nr = 0; in io_netmsg_iovec_free()
134 kmsg->free_iov = NULL; in io_netmsg_iovec_free()
186 static int io_net_vec_assign(struct io_kiocb *req, struct io_async_msghdr *kmsg, in io_net_vec_assign() argument
191 kmsg->free_iov_nr = kmsg->msg.msg_iter.nr_segs; in io_net_vec_assign()
192 if (kmsg->free_iov) in io_net_vec_assign()
193 kfree(kmsg->free_iov); in io_net_vec_assign()
194 kmsg->free_iov = iov; in io_net_vec_assign()
200 struct io_async_msghdr *kmsg) in io_mshot_prep_retry() argument
362 struct io_async_msghdr *kmsg = req->async_data; in io_send_setup() local
365 kmsg->msg.msg_name = NULL; in io_send_setup()
366 kmsg->msg.msg_namelen = 0; in io_send_setup()
367 kmsg->msg.msg_control = NULL; in io_send_setup()
368 kmsg->msg.msg_controllen = 0; in io_send_setup()
369 kmsg->msg.msg_ubuf = NULL; in io_send_setup()
372 ret = move_addr_to_kernel(sr->addr, sr->addr_len, &kmsg->addr); in io_send_setup()
375 kmsg->msg.msg_name = &kmsg->addr; in io_send_setup()
376 kmsg->msg.msg_namelen = sr->addr_len; in io_send_setup()
380 &kmsg->msg.msg_iter); in io_send_setup()
389 struct io_async_msghdr *kmsg; in io_sendmsg_prep_setup() local
392 kmsg = io_msg_alloc_async(req); in io_sendmsg_prep_setup()
393 if (unlikely(!kmsg)) in io_sendmsg_prep_setup()
397 ret = io_sendmsg_copy_hdr(req, kmsg); in io_sendmsg_prep_setup()
460 static int io_bundle_nbufs(struct io_async_msghdr *kmsg, int ret) in io_bundle_nbufs() argument
468 if (iter_is_ubuf(&kmsg->msg.msg_iter)) in io_bundle_nbufs()
471 iov = kmsg->free_iov; in io_bundle_nbufs()
473 iov = &kmsg->fast_iov; in io_bundle_nbufs()
476 if (!iov_iter_count(&kmsg->msg.msg_iter)) in io_bundle_nbufs()
477 return iter_iov(&kmsg->msg.msg_iter) - iov; in io_bundle_nbufs()
492 struct io_async_msghdr *kmsg, in io_send_finish() argument
504 cflags = io_put_kbufs(req, *ret, io_bundle_nbufs(kmsg, *ret), issue_flags); in io_send_finish()
514 io_mshot_prep_retry(req, kmsg); in io_send_finish()
528 struct io_async_msghdr *kmsg = req->async_data; in io_sendmsg() local
546 min_ret = iov_iter_count(&kmsg->msg.msg_iter); in io_sendmsg()
548 kmsg->msg.msg_control_user = sr->msg_control; in io_sendmsg()
550 ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags); in io_sendmsg()
556 kmsg->msg.msg_controllen = 0; in io_sendmsg()
557 kmsg->msg.msg_control = NULL; in io_sendmsg()
578 struct io_async_msghdr *kmsg = req->async_data; in io_send() local
599 .iovs = &kmsg->fast_iov, in io_send()
604 if (kmsg->free_iov) { in io_send()
605 arg.nr_iovs = kmsg->free_iov_nr; in io_send()
606 arg.iovs = kmsg->free_iov; in io_send()
619 if (arg.iovs != &kmsg->fast_iov && arg.iovs != kmsg->free_iov) { in io_send()
620 kmsg->free_iov_nr = ret; in io_send()
621 kmsg->free_iov = arg.iovs; in io_send()
629 &kmsg->msg.msg_iter); in io_send()
633 iov_iter_init(&kmsg->msg.msg_iter, ITER_SOURCE, in io_send()
644 min_ret = iov_iter_count(&kmsg->msg.msg_iter); in io_send()
647 kmsg->msg.msg_flags = flags; in io_send()
648 ret = sock_sendmsg(sock, &kmsg->msg); in io_send()
669 if (!io_send_finish(req, &ret, kmsg, issue_flags)) in io_send()
741 struct io_async_msghdr *kmsg; in io_recvmsg_prep_setup() local
744 kmsg = io_msg_alloc_async(req); in io_recvmsg_prep_setup()
745 if (unlikely(!kmsg)) in io_recvmsg_prep_setup()
749 kmsg->msg.msg_name = NULL; in io_recvmsg_prep_setup()
750 kmsg->msg.msg_namelen = 0; in io_recvmsg_prep_setup()
751 kmsg->msg.msg_control = NULL; in io_recvmsg_prep_setup()
752 kmsg->msg.msg_get_inq = 1; in io_recvmsg_prep_setup()
753 kmsg->msg.msg_controllen = 0; in io_recvmsg_prep_setup()
754 kmsg->msg.msg_iocb = NULL; in io_recvmsg_prep_setup()
755 kmsg->msg.msg_ubuf = NULL; in io_recvmsg_prep_setup()
759 &kmsg->msg.msg_iter); in io_recvmsg_prep_setup()
766 ret = io_recvmsg_copy_hdr(req, kmsg); in io_recvmsg_prep_setup()
835 struct io_async_msghdr *kmsg, in io_recv_finish() argument
841 if (kmsg->msg.msg_inq > 0) in io_recv_finish()
845 cflags |= io_put_kbufs(req, *ret, io_bundle_nbufs(kmsg, *ret), in io_recv_finish()
862 io_mshot_prep_retry(req, kmsg); in io_recv_finish()
864 if (cflags & IORING_CQE_F_SOCK_NONEMPTY || kmsg->msg.msg_inq < 0) { in io_recv_finish()
890 static int io_recvmsg_prep_multishot(struct io_async_msghdr *kmsg, in io_recvmsg_prep_multishot() argument
897 hdr = sizeof(struct io_uring_recvmsg_out) + kmsg->namelen + in io_recvmsg_prep_multishot()
898 kmsg->controllen; in io_recvmsg_prep_multishot()
902 if (kmsg->controllen) { in io_recvmsg_prep_multishot()
903 unsigned long control = ubuf + hdr - kmsg->controllen; in io_recvmsg_prep_multishot()
905 kmsg->msg.msg_control_user = (void __user *) control; in io_recvmsg_prep_multishot()
906 kmsg->msg.msg_controllen = kmsg->controllen; in io_recvmsg_prep_multishot()
911 kmsg->payloadlen = *len = *len - hdr; in io_recvmsg_prep_multishot()
921 struct io_async_msghdr *kmsg, in io_recvmsg_multishot() argument
928 if (kmsg->namelen) in io_recvmsg_multishot()
929 kmsg->msg.msg_name = &hdr.addr; in io_recvmsg_multishot()
930 kmsg->msg.msg_flags = flags & (MSG_CMSG_CLOEXEC|MSG_CMSG_COMPAT); in io_recvmsg_multishot()
931 kmsg->msg.msg_namelen = 0; in io_recvmsg_multishot()
936 err = sock_recvmsg(sock, &kmsg->msg, flags); in io_recvmsg_multishot()
942 .controllen = kmsg->controllen - kmsg->msg.msg_controllen, in io_recvmsg_multishot()
943 .flags = kmsg->msg.msg_flags & ~MSG_CMSG_COMPAT in io_recvmsg_multishot()
947 if (err > kmsg->payloadlen) in io_recvmsg_multishot()
948 err = kmsg->payloadlen; in io_recvmsg_multishot()
951 if (kmsg->msg.msg_namelen > kmsg->namelen) in io_recvmsg_multishot()
952 copy_len += kmsg->namelen; in io_recvmsg_multishot()
954 copy_len += kmsg->msg.msg_namelen; in io_recvmsg_multishot()
960 hdr.msg.namelen = kmsg->msg.msg_namelen; in io_recvmsg_multishot()
970 return sizeof(struct io_uring_recvmsg_out) + kmsg->namelen + in io_recvmsg_multishot()
971 kmsg->controllen + err; in io_recvmsg_multishot()
977 struct io_async_msghdr *kmsg = req->async_data; in io_recvmsg() local
1006 ret = io_recvmsg_prep_multishot(kmsg, sr, &buf, &len); in io_recvmsg()
1013 iov_iter_ubuf(&kmsg->msg.msg_iter, ITER_DEST, buf, len); in io_recvmsg()
1016 kmsg->msg.msg_get_inq = 1; in io_recvmsg()
1017 kmsg->msg.msg_inq = -1; in io_recvmsg()
1019 ret = io_recvmsg_multishot(sock, sr, kmsg, flags, in io_recvmsg()
1023 if (flags & MSG_WAITALL && !kmsg->msg.msg_controllen) in io_recvmsg()
1024 min_ret = iov_iter_count(&kmsg->msg.msg_iter); in io_recvmsg()
1026 ret = __sys_recvmsg_sock(sock, &kmsg->msg, sr->umsg, in io_recvmsg()
1027 kmsg->uaddr, flags); in io_recvmsg()
1046 } else if ((flags & MSG_WAITALL) && (kmsg->msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) { in io_recvmsg()
1057 if (!io_recv_finish(req, &ret, kmsg, mshot_finished, issue_flags)) in io_recvmsg()
1063 static int io_recv_buf_select(struct io_kiocb *req, struct io_async_msghdr *kmsg, in io_recv_buf_select() argument
1077 .iovs = &kmsg->fast_iov, in io_recv_buf_select()
1082 if (kmsg->free_iov) { in io_recv_buf_select()
1083 arg.nr_iovs = kmsg->free_iov_nr; in io_recv_buf_select()
1084 arg.iovs = kmsg->free_iov; in io_recv_buf_select()
1088 if (kmsg->msg.msg_inq > 0) in io_recv_buf_select()
1089 arg.max_len = min_not_zero(sr->len, kmsg->msg.msg_inq); in io_recv_buf_select()
1101 iov_iter_init(&kmsg->msg.msg_iter, ITER_DEST, arg.iovs, ret, in io_recv_buf_select()
1103 if (arg.iovs != &kmsg->fast_iov && arg.iovs != kmsg->free_iov) { in io_recv_buf_select()
1104 kmsg->free_iov_nr = ret; in io_recv_buf_select()
1105 kmsg->free_iov = arg.iovs; in io_recv_buf_select()
1119 &kmsg->msg.msg_iter); in io_recv_buf_select()
1130 struct io_async_msghdr *kmsg = req->async_data; in io_recv() local
1152 ret = io_recv_buf_select(req, kmsg, &len, issue_flags); in io_recv()
1154 kmsg->msg.msg_inq = -1; in io_recv()
1160 kmsg->msg.msg_flags = 0; in io_recv()
1161 kmsg->msg.msg_inq = -1; in io_recv()
1164 min_ret = iov_iter_count(&kmsg->msg.msg_iter); in io_recv()
1166 ret = sock_recvmsg(sock, &kmsg->msg, flags); in io_recv()
1186 } else if ((flags & MSG_WAITALL) && (kmsg->msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) { in io_recv()
1199 if (!io_recv_finish(req, &ret, kmsg, mshot_finished, issue_flags)) in io_recv()
1342 static int io_send_zc_import(struct io_kiocb *req, struct io_async_msghdr *kmsg) in io_send_zc_import() argument
1348 ret = io_import_fixed(ITER_SOURCE, &kmsg->msg.msg_iter, req->imu, in io_send_zc_import()
1352 kmsg->msg.sg_from_iter = io_sg_from_iter; in io_send_zc_import()
1354 ret = import_ubuf(ITER_SOURCE, sr->buf, sr->len, &kmsg->msg.msg_iter); in io_send_zc_import()
1360 kmsg->msg.sg_from_iter = io_sg_from_iter_iovec; in io_send_zc_import()
1369 struct io_async_msghdr *kmsg = req->async_data; in io_send_zc() local
1385 ret = io_send_zc_import(req, kmsg); in io_send_zc()
1394 min_ret = iov_iter_count(&kmsg->msg.msg_iter); in io_send_zc()
1397 kmsg->msg.msg_flags = msg_flags; in io_send_zc()
1398 kmsg->msg.msg_ubuf = &io_notif_to_data(zc->notif)->uarg; in io_send_zc()
1399 ret = sock_sendmsg(sock, &kmsg->msg); in io_send_zc()
1405 if (ret > 0 && io_net_retry(sock, kmsg->msg.msg_flags)) { in io_send_zc()
1437 struct io_async_msghdr *kmsg = req->async_data; in io_sendmsg_zc() local
1456 min_ret = iov_iter_count(&kmsg->msg.msg_iter); in io_sendmsg_zc()
1458 kmsg->msg.msg_control_user = sr->msg_control; in io_sendmsg_zc()
1459 kmsg->msg.msg_ubuf = &io_notif_to_data(sr->notif)->uarg; in io_sendmsg_zc()
1460 kmsg->msg.sg_from_iter = io_sg_from_iter_iovec; in io_sendmsg_zc()
1461 ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags); in io_sendmsg_zc()
1800 struct io_async_msghdr *kmsg = (struct io_async_msghdr *) entry; in io_netmsg_cache_free() local
1802 if (kmsg->free_iov) { in io_netmsg_cache_free()
1803 kasan_mempool_unpoison_object(kmsg->free_iov, in io_netmsg_cache_free()
1804 kmsg->free_iov_nr * sizeof(struct iovec)); in io_netmsg_cache_free()
1805 io_netmsg_iovec_free(kmsg); in io_netmsg_cache_free()
1807 kfree(kmsg); in io_netmsg_cache_free()