1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/file.h>
5 #include <linux/slab.h>
6 #include <linux/net.h>
7 #include <linux/compat.h>
8 #include <net/compat.h>
9 #include <linux/io_uring.h>
10
11 #include <uapi/linux/io_uring.h>
12
13 #include "io_uring.h"
14 #include "kbuf.h"
15 #include "alloc_cache.h"
16 #include "net.h"
17 #include "notif.h"
18 #include "rsrc.h"
19
20 #if defined(CONFIG_NET)
21 struct io_shutdown {
22 struct file *file;
23 int how;
24 };
25
26 struct io_accept {
27 struct file *file;
28 struct sockaddr __user *addr;
29 int __user *addr_len;
30 int flags;
31 u32 file_slot;
32 unsigned long nofile;
33 };
34
35 struct io_socket {
36 struct file *file;
37 int domain;
38 int type;
39 int protocol;
40 int flags;
41 u32 file_slot;
42 unsigned long nofile;
43 };
44
45 struct io_connect {
46 struct file *file;
47 struct sockaddr __user *addr;
48 int addr_len;
49 bool in_progress;
50 };
51
52 struct io_sr_msg {
53 struct file *file;
54 union {
55 struct compat_msghdr __user *umsg_compat;
56 struct user_msghdr __user *umsg;
57 void __user *buf;
58 };
59 unsigned len;
60 unsigned done_io;
61 unsigned msg_flags;
62 u16 flags;
63 /* initialised and used only by !msg send variants */
64 u16 addr_len;
65 u16 buf_group;
66 void __user *addr;
67 /* used only for send zerocopy */
68 struct io_kiocb *notif;
69 };
70
io_check_multishot(struct io_kiocb * req,unsigned int issue_flags)71 static inline bool io_check_multishot(struct io_kiocb *req,
72 unsigned int issue_flags)
73 {
74 /*
75 * When ->locked_cq is set we only allow to post CQEs from the original
76 * task context. Usual request completions will be handled in other
77 * generic paths but multipoll may decide to post extra cqes.
78 */
79 return !(issue_flags & IO_URING_F_IOWQ) ||
80 !(issue_flags & IO_URING_F_MULTISHOT) ||
81 !req->ctx->task_complete;
82 }
83
io_shutdown_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)84 int io_shutdown_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
85 {
86 struct io_shutdown *shutdown = io_kiocb_to_cmd(req, struct io_shutdown);
87
88 if (unlikely(sqe->off || sqe->addr || sqe->rw_flags ||
89 sqe->buf_index || sqe->splice_fd_in))
90 return -EINVAL;
91
92 shutdown->how = READ_ONCE(sqe->len);
93 req->flags |= REQ_F_FORCE_ASYNC;
94 return 0;
95 }
96
io_shutdown(struct io_kiocb * req,unsigned int issue_flags)97 int io_shutdown(struct io_kiocb *req, unsigned int issue_flags)
98 {
99 struct io_shutdown *shutdown = io_kiocb_to_cmd(req, struct io_shutdown);
100 struct socket *sock;
101 int ret;
102
103 WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK);
104
105 sock = sock_from_file(req->file);
106 if (unlikely(!sock))
107 return -ENOTSOCK;
108
109 ret = __sys_shutdown_sock(sock, shutdown->how);
110 io_req_set_res(req, ret, 0);
111 return IOU_OK;
112 }
113
io_net_retry(struct socket * sock,int flags)114 static bool io_net_retry(struct socket *sock, int flags)
115 {
116 if (!(flags & MSG_WAITALL))
117 return false;
118 return sock->type == SOCK_STREAM || sock->type == SOCK_SEQPACKET;
119 }
120
io_netmsg_recycle(struct io_kiocb * req,unsigned int issue_flags)121 static void io_netmsg_recycle(struct io_kiocb *req, unsigned int issue_flags)
122 {
123 struct io_async_msghdr *hdr = req->async_data;
124
125 if (!req_has_async_data(req) || issue_flags & IO_URING_F_UNLOCKED)
126 return;
127
128 /* Let normal cleanup path reap it if we fail adding to the cache */
129 if (io_alloc_cache_put(&req->ctx->netmsg_cache, &hdr->cache)) {
130 req->async_data = NULL;
131 req->flags &= ~REQ_F_ASYNC_DATA;
132 }
133 }
134
io_msg_alloc_async(struct io_kiocb * req,unsigned int issue_flags)135 static struct io_async_msghdr *io_msg_alloc_async(struct io_kiocb *req,
136 unsigned int issue_flags)
137 {
138 struct io_ring_ctx *ctx = req->ctx;
139 struct io_cache_entry *entry;
140 struct io_async_msghdr *hdr;
141
142 if (!(issue_flags & IO_URING_F_UNLOCKED)) {
143 entry = io_alloc_cache_get(&ctx->netmsg_cache);
144 if (entry) {
145 hdr = container_of(entry, struct io_async_msghdr, cache);
146 hdr->free_iov = NULL;
147 req->flags |= REQ_F_ASYNC_DATA;
148 req->async_data = hdr;
149 return hdr;
150 }
151 }
152
153 if (!io_alloc_async_data(req)) {
154 hdr = req->async_data;
155 hdr->free_iov = NULL;
156 return hdr;
157 }
158 return NULL;
159 }
160
io_msg_alloc_async_prep(struct io_kiocb * req)161 static inline struct io_async_msghdr *io_msg_alloc_async_prep(struct io_kiocb *req)
162 {
163 /* ->prep_async is always called from the submission context */
164 return io_msg_alloc_async(req, 0);
165 }
166
io_setup_async_msg(struct io_kiocb * req,struct io_async_msghdr * kmsg,unsigned int issue_flags)167 static int io_setup_async_msg(struct io_kiocb *req,
168 struct io_async_msghdr *kmsg,
169 unsigned int issue_flags)
170 {
171 struct io_async_msghdr *async_msg;
172
173 if (req_has_async_data(req))
174 return -EAGAIN;
175 async_msg = io_msg_alloc_async(req, issue_flags);
176 if (!async_msg) {
177 kfree(kmsg->free_iov);
178 return -ENOMEM;
179 }
180 req->flags |= REQ_F_NEED_CLEANUP;
181 memcpy(async_msg, kmsg, sizeof(*kmsg));
182 if (async_msg->msg.msg_name)
183 async_msg->msg.msg_name = &async_msg->addr;
184 /* if were using fast_iov, set it to the new one */
185 if (iter_is_iovec(&kmsg->msg.msg_iter) && !kmsg->free_iov) {
186 size_t fast_idx = kmsg->msg.msg_iter.iov - kmsg->fast_iov;
187 async_msg->msg.msg_iter.iov = &async_msg->fast_iov[fast_idx];
188 }
189
190 return -EAGAIN;
191 }
192
io_sendmsg_copy_hdr(struct io_kiocb * req,struct io_async_msghdr * iomsg)193 static int io_sendmsg_copy_hdr(struct io_kiocb *req,
194 struct io_async_msghdr *iomsg)
195 {
196 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
197
198 iomsg->msg.msg_name = &iomsg->addr;
199 iomsg->free_iov = iomsg->fast_iov;
200 return sendmsg_copy_msghdr(&iomsg->msg, sr->umsg, sr->msg_flags,
201 &iomsg->free_iov);
202 }
203
io_send_prep_async(struct io_kiocb * req)204 int io_send_prep_async(struct io_kiocb *req)
205 {
206 struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
207 struct io_async_msghdr *io;
208 int ret;
209
210 if (!zc->addr || req_has_async_data(req))
211 return 0;
212 io = io_msg_alloc_async_prep(req);
213 if (!io)
214 return -ENOMEM;
215 ret = move_addr_to_kernel(zc->addr, zc->addr_len, &io->addr);
216 return ret;
217 }
218
io_setup_async_addr(struct io_kiocb * req,struct sockaddr_storage * addr_storage,unsigned int issue_flags)219 static int io_setup_async_addr(struct io_kiocb *req,
220 struct sockaddr_storage *addr_storage,
221 unsigned int issue_flags)
222 {
223 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
224 struct io_async_msghdr *io;
225
226 if (!sr->addr || req_has_async_data(req))
227 return -EAGAIN;
228 io = io_msg_alloc_async(req, issue_flags);
229 if (!io)
230 return -ENOMEM;
231 memcpy(&io->addr, addr_storage, sizeof(io->addr));
232 return -EAGAIN;
233 }
234
io_sendmsg_prep_async(struct io_kiocb * req)235 int io_sendmsg_prep_async(struct io_kiocb *req)
236 {
237 int ret;
238
239 if (!io_msg_alloc_async_prep(req))
240 return -ENOMEM;
241 ret = io_sendmsg_copy_hdr(req, req->async_data);
242 if (!ret)
243 req->flags |= REQ_F_NEED_CLEANUP;
244 return ret;
245 }
246
io_sendmsg_recvmsg_cleanup(struct io_kiocb * req)247 void io_sendmsg_recvmsg_cleanup(struct io_kiocb *req)
248 {
249 struct io_async_msghdr *io = req->async_data;
250
251 kfree(io->free_iov);
252 }
253
io_sendmsg_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)254 int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
255 {
256 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
257
258 if (req->opcode == IORING_OP_SEND) {
259 if (READ_ONCE(sqe->__pad3[0]))
260 return -EINVAL;
261 sr->addr = u64_to_user_ptr(READ_ONCE(sqe->addr2));
262 sr->addr_len = READ_ONCE(sqe->addr_len);
263 } else if (sqe->addr2 || sqe->file_index) {
264 return -EINVAL;
265 }
266
267 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
268 sr->len = READ_ONCE(sqe->len);
269 sr->flags = READ_ONCE(sqe->ioprio);
270 if (sr->flags & ~IORING_RECVSEND_POLL_FIRST)
271 return -EINVAL;
272 sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
273 if (sr->msg_flags & MSG_DONTWAIT)
274 req->flags |= REQ_F_NOWAIT;
275
276 #ifdef CONFIG_COMPAT
277 if (req->ctx->compat)
278 sr->msg_flags |= MSG_CMSG_COMPAT;
279 #endif
280 sr->done_io = 0;
281 return 0;
282 }
283
io_sendmsg(struct io_kiocb * req,unsigned int issue_flags)284 int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
285 {
286 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
287 struct io_async_msghdr iomsg, *kmsg;
288 struct socket *sock;
289 unsigned flags;
290 int min_ret = 0;
291 int ret;
292
293 sock = sock_from_file(req->file);
294 if (unlikely(!sock))
295 return -ENOTSOCK;
296
297 if (req_has_async_data(req)) {
298 kmsg = req->async_data;
299 } else {
300 ret = io_sendmsg_copy_hdr(req, &iomsg);
301 if (ret)
302 return ret;
303 kmsg = &iomsg;
304 }
305
306 if (!(req->flags & REQ_F_POLLED) &&
307 (sr->flags & IORING_RECVSEND_POLL_FIRST))
308 return io_setup_async_msg(req, kmsg, issue_flags);
309
310 flags = sr->msg_flags;
311 if (issue_flags & IO_URING_F_NONBLOCK)
312 flags |= MSG_DONTWAIT;
313 if (flags & MSG_WAITALL)
314 min_ret = iov_iter_count(&kmsg->msg.msg_iter);
315
316 ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
317
318 if (ret < min_ret) {
319 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
320 return io_setup_async_msg(req, kmsg, issue_flags);
321 if (ret > 0 && io_net_retry(sock, flags)) {
322 sr->done_io += ret;
323 req->flags |= REQ_F_PARTIAL_IO;
324 return io_setup_async_msg(req, kmsg, issue_flags);
325 }
326 if (ret == -ERESTARTSYS)
327 ret = -EINTR;
328 req_set_fail(req);
329 }
330 /* fast path, check for non-NULL to avoid function call */
331 if (kmsg->free_iov)
332 kfree(kmsg->free_iov);
333 req->flags &= ~REQ_F_NEED_CLEANUP;
334 io_netmsg_recycle(req, issue_flags);
335 if (ret >= 0)
336 ret += sr->done_io;
337 else if (sr->done_io)
338 ret = sr->done_io;
339 io_req_set_res(req, ret, 0);
340 return IOU_OK;
341 }
342
io_send(struct io_kiocb * req,unsigned int issue_flags)343 int io_send(struct io_kiocb *req, unsigned int issue_flags)
344 {
345 struct sockaddr_storage __address;
346 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
347 struct msghdr msg;
348 struct socket *sock;
349 unsigned flags;
350 int min_ret = 0;
351 int ret;
352
353 msg.msg_name = NULL;
354 msg.msg_control = NULL;
355 msg.msg_controllen = 0;
356 msg.msg_namelen = 0;
357 msg.msg_ubuf = NULL;
358
359 if (sr->addr) {
360 if (req_has_async_data(req)) {
361 struct io_async_msghdr *io = req->async_data;
362
363 msg.msg_name = &io->addr;
364 } else {
365 ret = move_addr_to_kernel(sr->addr, sr->addr_len, &__address);
366 if (unlikely(ret < 0))
367 return ret;
368 msg.msg_name = (struct sockaddr *)&__address;
369 }
370 msg.msg_namelen = sr->addr_len;
371 }
372
373 if (!(req->flags & REQ_F_POLLED) &&
374 (sr->flags & IORING_RECVSEND_POLL_FIRST))
375 return io_setup_async_addr(req, &__address, issue_flags);
376
377 sock = sock_from_file(req->file);
378 if (unlikely(!sock))
379 return -ENOTSOCK;
380
381 ret = import_ubuf(ITER_SOURCE, sr->buf, sr->len, &msg.msg_iter);
382 if (unlikely(ret))
383 return ret;
384
385 flags = sr->msg_flags;
386 if (issue_flags & IO_URING_F_NONBLOCK)
387 flags |= MSG_DONTWAIT;
388 if (flags & MSG_WAITALL)
389 min_ret = iov_iter_count(&msg.msg_iter);
390
391 msg.msg_flags = flags;
392 ret = sock_sendmsg(sock, &msg);
393 if (ret < min_ret) {
394 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
395 return io_setup_async_addr(req, &__address, issue_flags);
396
397 if (ret > 0 && io_net_retry(sock, flags)) {
398 sr->len -= ret;
399 sr->buf += ret;
400 sr->done_io += ret;
401 req->flags |= REQ_F_PARTIAL_IO;
402 return io_setup_async_addr(req, &__address, issue_flags);
403 }
404 if (ret == -ERESTARTSYS)
405 ret = -EINTR;
406 req_set_fail(req);
407 }
408 if (ret >= 0)
409 ret += sr->done_io;
410 else if (sr->done_io)
411 ret = sr->done_io;
412 io_req_set_res(req, ret, 0);
413 return IOU_OK;
414 }
415
io_recvmsg_multishot_overflow(struct io_async_msghdr * iomsg)416 static bool io_recvmsg_multishot_overflow(struct io_async_msghdr *iomsg)
417 {
418 int hdr;
419
420 if (iomsg->namelen < 0)
421 return true;
422 if (check_add_overflow((int)sizeof(struct io_uring_recvmsg_out),
423 iomsg->namelen, &hdr))
424 return true;
425 if (check_add_overflow(hdr, (int)iomsg->controllen, &hdr))
426 return true;
427
428 return false;
429 }
430
__io_recvmsg_copy_hdr(struct io_kiocb * req,struct io_async_msghdr * iomsg)431 static int __io_recvmsg_copy_hdr(struct io_kiocb *req,
432 struct io_async_msghdr *iomsg)
433 {
434 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
435 struct user_msghdr msg;
436 int ret;
437
438 if (copy_from_user(&msg, sr->umsg, sizeof(*sr->umsg)))
439 return -EFAULT;
440
441 ret = __copy_msghdr(&iomsg->msg, &msg, &iomsg->uaddr);
442 if (ret)
443 return ret;
444
445 if (req->flags & REQ_F_BUFFER_SELECT) {
446 if (msg.msg_iovlen == 0) {
447 sr->len = iomsg->fast_iov[0].iov_len = 0;
448 iomsg->fast_iov[0].iov_base = NULL;
449 iomsg->free_iov = NULL;
450 } else if (msg.msg_iovlen > 1) {
451 return -EINVAL;
452 } else {
453 if (copy_from_user(iomsg->fast_iov, msg.msg_iov, sizeof(*msg.msg_iov)))
454 return -EFAULT;
455 sr->len = iomsg->fast_iov[0].iov_len;
456 iomsg->free_iov = NULL;
457 }
458
459 if (req->flags & REQ_F_APOLL_MULTISHOT) {
460 iomsg->namelen = msg.msg_namelen;
461 iomsg->controllen = msg.msg_controllen;
462 if (io_recvmsg_multishot_overflow(iomsg))
463 return -EOVERFLOW;
464 }
465 } else {
466 iomsg->free_iov = iomsg->fast_iov;
467 ret = __import_iovec(ITER_DEST, msg.msg_iov, msg.msg_iovlen, UIO_FASTIOV,
468 &iomsg->free_iov, &iomsg->msg.msg_iter,
469 false);
470 if (ret > 0)
471 ret = 0;
472 }
473
474 return ret;
475 }
476
477 #ifdef CONFIG_COMPAT
__io_compat_recvmsg_copy_hdr(struct io_kiocb * req,struct io_async_msghdr * iomsg)478 static int __io_compat_recvmsg_copy_hdr(struct io_kiocb *req,
479 struct io_async_msghdr *iomsg)
480 {
481 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
482 struct compat_msghdr msg;
483 struct compat_iovec __user *uiov;
484 int ret;
485
486 if (copy_from_user(&msg, sr->umsg_compat, sizeof(msg)))
487 return -EFAULT;
488
489 ret = __get_compat_msghdr(&iomsg->msg, &msg, &iomsg->uaddr);
490 if (ret)
491 return ret;
492
493 uiov = compat_ptr(msg.msg_iov);
494 if (req->flags & REQ_F_BUFFER_SELECT) {
495 compat_ssize_t clen;
496
497 iomsg->free_iov = NULL;
498 if (msg.msg_iovlen == 0) {
499 sr->len = 0;
500 } else if (msg.msg_iovlen > 1) {
501 return -EINVAL;
502 } else {
503 if (!access_ok(uiov, sizeof(*uiov)))
504 return -EFAULT;
505 if (__get_user(clen, &uiov->iov_len))
506 return -EFAULT;
507 if (clen < 0)
508 return -EINVAL;
509 sr->len = clen;
510 }
511
512 if (req->flags & REQ_F_APOLL_MULTISHOT) {
513 iomsg->namelen = msg.msg_namelen;
514 iomsg->controllen = msg.msg_controllen;
515 if (io_recvmsg_multishot_overflow(iomsg))
516 return -EOVERFLOW;
517 }
518 } else {
519 iomsg->free_iov = iomsg->fast_iov;
520 ret = __import_iovec(ITER_DEST, (struct iovec __user *)uiov, msg.msg_iovlen,
521 UIO_FASTIOV, &iomsg->free_iov,
522 &iomsg->msg.msg_iter, true);
523 if (ret < 0)
524 return ret;
525 }
526
527 return 0;
528 }
529 #endif
530
io_recvmsg_copy_hdr(struct io_kiocb * req,struct io_async_msghdr * iomsg)531 static int io_recvmsg_copy_hdr(struct io_kiocb *req,
532 struct io_async_msghdr *iomsg)
533 {
534 iomsg->msg.msg_name = &iomsg->addr;
535
536 #ifdef CONFIG_COMPAT
537 if (req->ctx->compat)
538 return __io_compat_recvmsg_copy_hdr(req, iomsg);
539 #endif
540
541 return __io_recvmsg_copy_hdr(req, iomsg);
542 }
543
io_recvmsg_prep_async(struct io_kiocb * req)544 int io_recvmsg_prep_async(struct io_kiocb *req)
545 {
546 int ret;
547
548 if (!io_msg_alloc_async_prep(req))
549 return -ENOMEM;
550 ret = io_recvmsg_copy_hdr(req, req->async_data);
551 if (!ret)
552 req->flags |= REQ_F_NEED_CLEANUP;
553 return ret;
554 }
555
556 #define RECVMSG_FLAGS (IORING_RECVSEND_POLL_FIRST | IORING_RECV_MULTISHOT)
557
io_recvmsg_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)558 int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
559 {
560 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
561
562 if (unlikely(sqe->file_index || sqe->addr2))
563 return -EINVAL;
564
565 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
566 sr->len = READ_ONCE(sqe->len);
567 sr->flags = READ_ONCE(sqe->ioprio);
568 if (sr->flags & ~(RECVMSG_FLAGS))
569 return -EINVAL;
570 sr->msg_flags = READ_ONCE(sqe->msg_flags);
571 if (sr->msg_flags & MSG_DONTWAIT)
572 req->flags |= REQ_F_NOWAIT;
573 if (sr->msg_flags & MSG_ERRQUEUE)
574 req->flags |= REQ_F_CLEAR_POLLIN;
575 if (sr->flags & IORING_RECV_MULTISHOT) {
576 if (!(req->flags & REQ_F_BUFFER_SELECT))
577 return -EINVAL;
578 if (sr->msg_flags & MSG_WAITALL)
579 return -EINVAL;
580 if (req->opcode == IORING_OP_RECV && sr->len)
581 return -EINVAL;
582 req->flags |= REQ_F_APOLL_MULTISHOT;
583 /*
584 * Store the buffer group for this multishot receive separately,
585 * as if we end up doing an io-wq based issue that selects a
586 * buffer, it has to be committed immediately and that will
587 * clear ->buf_list. This means we lose the link to the buffer
588 * list, and the eventual buffer put on completion then cannot
589 * restore it.
590 */
591 sr->buf_group = req->buf_index;
592 }
593
594 #ifdef CONFIG_COMPAT
595 if (req->ctx->compat)
596 sr->msg_flags |= MSG_CMSG_COMPAT;
597 #endif
598 sr->done_io = 0;
599 return 0;
600 }
601
io_recv_prep_retry(struct io_kiocb * req)602 static inline void io_recv_prep_retry(struct io_kiocb *req)
603 {
604 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
605
606 sr->done_io = 0;
607 sr->len = 0; /* get from the provided buffer */
608 req->buf_index = sr->buf_group;
609 }
610
611 /*
612 * Finishes io_recv and io_recvmsg.
613 *
614 * Returns true if it is actually finished, or false if it should run
615 * again (for multishot).
616 */
io_recv_finish(struct io_kiocb * req,int * ret,unsigned int cflags,bool mshot_finished,unsigned issue_flags)617 static inline bool io_recv_finish(struct io_kiocb *req, int *ret,
618 unsigned int cflags, bool mshot_finished,
619 unsigned issue_flags)
620 {
621 if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
622 io_req_set_res(req, *ret, cflags);
623 *ret = IOU_OK;
624 return true;
625 }
626
627 if (!mshot_finished) {
628 if (io_aux_cqe(req->ctx, issue_flags & IO_URING_F_COMPLETE_DEFER,
629 req->cqe.user_data, *ret, cflags | IORING_CQE_F_MORE, true)) {
630 io_recv_prep_retry(req);
631 return false;
632 }
633 /* Otherwise stop multishot but use the current result. */
634 }
635
636 io_req_set_res(req, *ret, cflags);
637
638 if (issue_flags & IO_URING_F_MULTISHOT)
639 *ret = IOU_STOP_MULTISHOT;
640 else
641 *ret = IOU_OK;
642 return true;
643 }
644
io_recvmsg_prep_multishot(struct io_async_msghdr * kmsg,struct io_sr_msg * sr,void __user ** buf,size_t * len)645 static int io_recvmsg_prep_multishot(struct io_async_msghdr *kmsg,
646 struct io_sr_msg *sr, void __user **buf,
647 size_t *len)
648 {
649 unsigned long ubuf = (unsigned long) *buf;
650 unsigned long hdr;
651
652 hdr = sizeof(struct io_uring_recvmsg_out) + kmsg->namelen +
653 kmsg->controllen;
654 if (*len < hdr)
655 return -EFAULT;
656
657 if (kmsg->controllen) {
658 unsigned long control = ubuf + hdr - kmsg->controllen;
659
660 kmsg->msg.msg_control_user = (void __user *) control;
661 kmsg->msg.msg_controllen = kmsg->controllen;
662 }
663
664 sr->buf = *buf; /* stash for later copy */
665 *buf = (void __user *) (ubuf + hdr);
666 kmsg->payloadlen = *len = *len - hdr;
667 return 0;
668 }
669
670 struct io_recvmsg_multishot_hdr {
671 struct io_uring_recvmsg_out msg;
672 struct sockaddr_storage addr;
673 };
674
io_recvmsg_multishot(struct socket * sock,struct io_sr_msg * io,struct io_async_msghdr * kmsg,unsigned int flags,bool * finished)675 static int io_recvmsg_multishot(struct socket *sock, struct io_sr_msg *io,
676 struct io_async_msghdr *kmsg,
677 unsigned int flags, bool *finished)
678 {
679 int err;
680 int copy_len;
681 struct io_recvmsg_multishot_hdr hdr;
682
683 if (kmsg->namelen)
684 kmsg->msg.msg_name = &hdr.addr;
685 kmsg->msg.msg_flags = flags & (MSG_CMSG_CLOEXEC|MSG_CMSG_COMPAT);
686 kmsg->msg.msg_namelen = 0;
687
688 if (sock->file->f_flags & O_NONBLOCK)
689 flags |= MSG_DONTWAIT;
690
691 err = sock_recvmsg(sock, &kmsg->msg, flags);
692 *finished = err <= 0;
693 if (err < 0)
694 return err;
695
696 hdr.msg = (struct io_uring_recvmsg_out) {
697 .controllen = kmsg->controllen - kmsg->msg.msg_controllen,
698 .flags = kmsg->msg.msg_flags & ~MSG_CMSG_COMPAT
699 };
700
701 hdr.msg.payloadlen = err;
702 if (err > kmsg->payloadlen)
703 err = kmsg->payloadlen;
704
705 copy_len = sizeof(struct io_uring_recvmsg_out);
706 if (kmsg->msg.msg_namelen > kmsg->namelen)
707 copy_len += kmsg->namelen;
708 else
709 copy_len += kmsg->msg.msg_namelen;
710
711 /*
712 * "fromlen shall refer to the value before truncation.."
713 * 1003.1g
714 */
715 hdr.msg.namelen = kmsg->msg.msg_namelen;
716
717 /* ensure that there is no gap between hdr and sockaddr_storage */
718 BUILD_BUG_ON(offsetof(struct io_recvmsg_multishot_hdr, addr) !=
719 sizeof(struct io_uring_recvmsg_out));
720 if (copy_to_user(io->buf, &hdr, copy_len)) {
721 *finished = true;
722 return -EFAULT;
723 }
724
725 return sizeof(struct io_uring_recvmsg_out) + kmsg->namelen +
726 kmsg->controllen + err;
727 }
728
io_recvmsg(struct io_kiocb * req,unsigned int issue_flags)729 int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
730 {
731 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
732 struct io_async_msghdr iomsg, *kmsg;
733 struct socket *sock;
734 unsigned int cflags;
735 unsigned flags;
736 int ret, min_ret = 0;
737 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
738 bool mshot_finished = true;
739
740 sock = sock_from_file(req->file);
741 if (unlikely(!sock))
742 return -ENOTSOCK;
743
744 if (req_has_async_data(req)) {
745 kmsg = req->async_data;
746 } else {
747 ret = io_recvmsg_copy_hdr(req, &iomsg);
748 if (ret)
749 return ret;
750 kmsg = &iomsg;
751 }
752
753 if (!(req->flags & REQ_F_POLLED) &&
754 (sr->flags & IORING_RECVSEND_POLL_FIRST))
755 return io_setup_async_msg(req, kmsg, issue_flags);
756
757 if (!io_check_multishot(req, issue_flags))
758 return io_setup_async_msg(req, kmsg, issue_flags);
759
760 retry_multishot:
761 if (io_do_buffer_select(req)) {
762 void __user *buf;
763 size_t len = sr->len;
764
765 buf = io_buffer_select(req, &len, issue_flags);
766 if (!buf)
767 return -ENOBUFS;
768
769 if (req->flags & REQ_F_APOLL_MULTISHOT) {
770 ret = io_recvmsg_prep_multishot(kmsg, sr, &buf, &len);
771 if (ret) {
772 io_kbuf_recycle(req, issue_flags);
773 return ret;
774 }
775 }
776
777 iov_iter_ubuf(&kmsg->msg.msg_iter, ITER_DEST, buf, len);
778 }
779
780 flags = sr->msg_flags;
781 if (force_nonblock)
782 flags |= MSG_DONTWAIT;
783 if (flags & MSG_WAITALL)
784 min_ret = iov_iter_count(&kmsg->msg.msg_iter);
785
786 kmsg->msg.msg_get_inq = 1;
787 if (req->flags & REQ_F_APOLL_MULTISHOT)
788 ret = io_recvmsg_multishot(sock, sr, kmsg, flags,
789 &mshot_finished);
790 else
791 ret = __sys_recvmsg_sock(sock, &kmsg->msg, sr->umsg,
792 kmsg->uaddr, flags);
793
794 if (ret < min_ret) {
795 if (ret == -EAGAIN && force_nonblock) {
796 ret = io_setup_async_msg(req, kmsg, issue_flags);
797 if (ret == -EAGAIN && (issue_flags & IO_URING_F_MULTISHOT)) {
798 io_kbuf_recycle(req, issue_flags);
799 return IOU_ISSUE_SKIP_COMPLETE;
800 }
801 return ret;
802 }
803 if (ret > 0 && io_net_retry(sock, flags)) {
804 sr->done_io += ret;
805 req->flags |= REQ_F_PARTIAL_IO;
806 return io_setup_async_msg(req, kmsg, issue_flags);
807 }
808 if (ret == -ERESTARTSYS)
809 ret = -EINTR;
810 req_set_fail(req);
811 } else if ((flags & MSG_WAITALL) && (kmsg->msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
812 req_set_fail(req);
813 }
814
815 if (ret > 0)
816 ret += sr->done_io;
817 else if (sr->done_io)
818 ret = sr->done_io;
819 else
820 io_kbuf_recycle(req, issue_flags);
821
822 cflags = io_put_kbuf(req, issue_flags);
823 if (kmsg->msg.msg_inq)
824 cflags |= IORING_CQE_F_SOCK_NONEMPTY;
825
826 if (!io_recv_finish(req, &ret, cflags, mshot_finished, issue_flags))
827 goto retry_multishot;
828
829 if (mshot_finished) {
830 /* fast path, check for non-NULL to avoid function call */
831 if (kmsg->free_iov)
832 kfree(kmsg->free_iov);
833 io_netmsg_recycle(req, issue_flags);
834 req->flags &= ~REQ_F_NEED_CLEANUP;
835 }
836
837 return ret;
838 }
839
io_recv(struct io_kiocb * req,unsigned int issue_flags)840 int io_recv(struct io_kiocb *req, unsigned int issue_flags)
841 {
842 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
843 struct msghdr msg;
844 struct socket *sock;
845 unsigned int cflags;
846 unsigned flags;
847 int ret, min_ret = 0;
848 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
849 size_t len = sr->len;
850
851 if (!(req->flags & REQ_F_POLLED) &&
852 (sr->flags & IORING_RECVSEND_POLL_FIRST))
853 return -EAGAIN;
854
855 if (!io_check_multishot(req, issue_flags))
856 return -EAGAIN;
857
858 sock = sock_from_file(req->file);
859 if (unlikely(!sock))
860 return -ENOTSOCK;
861
862 retry_multishot:
863 if (io_do_buffer_select(req)) {
864 void __user *buf;
865
866 buf = io_buffer_select(req, &len, issue_flags);
867 if (!buf)
868 return -ENOBUFS;
869 sr->buf = buf;
870 }
871
872 ret = import_ubuf(ITER_DEST, sr->buf, len, &msg.msg_iter);
873 if (unlikely(ret))
874 goto out_free;
875
876 msg.msg_name = NULL;
877 msg.msg_namelen = 0;
878 msg.msg_control = NULL;
879 msg.msg_get_inq = 1;
880 msg.msg_flags = 0;
881 msg.msg_controllen = 0;
882 msg.msg_iocb = NULL;
883 msg.msg_ubuf = NULL;
884
885 flags = sr->msg_flags;
886 if (force_nonblock)
887 flags |= MSG_DONTWAIT;
888 if (flags & MSG_WAITALL)
889 min_ret = iov_iter_count(&msg.msg_iter);
890
891 ret = sock_recvmsg(sock, &msg, flags);
892 if (ret < min_ret) {
893 if (ret == -EAGAIN && force_nonblock) {
894 if (issue_flags & IO_URING_F_MULTISHOT) {
895 io_kbuf_recycle(req, issue_flags);
896 return IOU_ISSUE_SKIP_COMPLETE;
897 }
898
899 return -EAGAIN;
900 }
901 if (ret > 0 && io_net_retry(sock, flags)) {
902 sr->len -= ret;
903 sr->buf += ret;
904 sr->done_io += ret;
905 req->flags |= REQ_F_PARTIAL_IO;
906 return -EAGAIN;
907 }
908 if (ret == -ERESTARTSYS)
909 ret = -EINTR;
910 req_set_fail(req);
911 } else if ((flags & MSG_WAITALL) && (msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
912 out_free:
913 req_set_fail(req);
914 }
915
916 if (ret > 0)
917 ret += sr->done_io;
918 else if (sr->done_io)
919 ret = sr->done_io;
920 else
921 io_kbuf_recycle(req, issue_flags);
922
923 cflags = io_put_kbuf(req, issue_flags);
924 if (msg.msg_inq)
925 cflags |= IORING_CQE_F_SOCK_NONEMPTY;
926
927 if (!io_recv_finish(req, &ret, cflags, ret <= 0, issue_flags))
928 goto retry_multishot;
929
930 return ret;
931 }
932
io_send_zc_cleanup(struct io_kiocb * req)933 void io_send_zc_cleanup(struct io_kiocb *req)
934 {
935 struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
936 struct io_async_msghdr *io;
937
938 if (req_has_async_data(req)) {
939 io = req->async_data;
940 /* might be ->fast_iov if *msg_copy_hdr failed */
941 if (io->free_iov != io->fast_iov)
942 kfree(io->free_iov);
943 }
944 if (zc->notif) {
945 io_notif_flush(zc->notif);
946 zc->notif = NULL;
947 }
948 }
949
950 #define IO_ZC_FLAGS_COMMON (IORING_RECVSEND_POLL_FIRST | IORING_RECVSEND_FIXED_BUF)
951 #define IO_ZC_FLAGS_VALID (IO_ZC_FLAGS_COMMON | IORING_SEND_ZC_REPORT_USAGE)
952
io_send_zc_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)953 int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
954 {
955 struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
956 struct io_ring_ctx *ctx = req->ctx;
957 struct io_kiocb *notif;
958
959 if (unlikely(READ_ONCE(sqe->__pad2[0]) || READ_ONCE(sqe->addr3)))
960 return -EINVAL;
961 /* we don't support IOSQE_CQE_SKIP_SUCCESS just yet */
962 if (req->flags & REQ_F_CQE_SKIP)
963 return -EINVAL;
964
965 notif = zc->notif = io_alloc_notif(ctx);
966 if (!notif)
967 return -ENOMEM;
968 notif->cqe.user_data = req->cqe.user_data;
969 notif->cqe.res = 0;
970 notif->cqe.flags = IORING_CQE_F_NOTIF;
971 req->flags |= REQ_F_NEED_CLEANUP;
972
973 zc->flags = READ_ONCE(sqe->ioprio);
974 if (unlikely(zc->flags & ~IO_ZC_FLAGS_COMMON)) {
975 if (zc->flags & ~IO_ZC_FLAGS_VALID)
976 return -EINVAL;
977 if (zc->flags & IORING_SEND_ZC_REPORT_USAGE) {
978 io_notif_set_extended(notif);
979 io_notif_to_data(notif)->zc_report = true;
980 }
981 }
982
983 if (zc->flags & IORING_RECVSEND_FIXED_BUF) {
984 unsigned idx = READ_ONCE(sqe->buf_index);
985
986 if (unlikely(idx >= ctx->nr_user_bufs))
987 return -EFAULT;
988 idx = array_index_nospec(idx, ctx->nr_user_bufs);
989 req->imu = READ_ONCE(ctx->user_bufs[idx]);
990 io_req_set_rsrc_node(notif, ctx, 0);
991 }
992
993 if (req->opcode == IORING_OP_SEND_ZC) {
994 if (READ_ONCE(sqe->__pad3[0]))
995 return -EINVAL;
996 zc->addr = u64_to_user_ptr(READ_ONCE(sqe->addr2));
997 zc->addr_len = READ_ONCE(sqe->addr_len);
998 } else {
999 if (unlikely(sqe->addr2 || sqe->file_index))
1000 return -EINVAL;
1001 if (unlikely(zc->flags & IORING_RECVSEND_FIXED_BUF))
1002 return -EINVAL;
1003 }
1004
1005 zc->buf = u64_to_user_ptr(READ_ONCE(sqe->addr));
1006 zc->len = READ_ONCE(sqe->len);
1007 zc->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
1008 if (zc->msg_flags & MSG_DONTWAIT)
1009 req->flags |= REQ_F_NOWAIT;
1010
1011 zc->done_io = 0;
1012
1013 #ifdef CONFIG_COMPAT
1014 if (req->ctx->compat)
1015 zc->msg_flags |= MSG_CMSG_COMPAT;
1016 #endif
1017 return 0;
1018 }
1019
io_sg_from_iter_iovec(struct sock * sk,struct sk_buff * skb,struct iov_iter * from,size_t length)1020 static int io_sg_from_iter_iovec(struct sock *sk, struct sk_buff *skb,
1021 struct iov_iter *from, size_t length)
1022 {
1023 skb_zcopy_downgrade_managed(skb);
1024 return __zerocopy_sg_from_iter(NULL, sk, skb, from, length);
1025 }
1026
io_sg_from_iter(struct sock * sk,struct sk_buff * skb,struct iov_iter * from,size_t length)1027 static int io_sg_from_iter(struct sock *sk, struct sk_buff *skb,
1028 struct iov_iter *from, size_t length)
1029 {
1030 struct skb_shared_info *shinfo = skb_shinfo(skb);
1031 int frag = shinfo->nr_frags;
1032 int ret = 0;
1033 struct bvec_iter bi;
1034 ssize_t copied = 0;
1035 unsigned long truesize = 0;
1036
1037 if (!frag)
1038 shinfo->flags |= SKBFL_MANAGED_FRAG_REFS;
1039 else if (unlikely(!skb_zcopy_managed(skb)))
1040 return __zerocopy_sg_from_iter(NULL, sk, skb, from, length);
1041
1042 bi.bi_size = min(from->count, length);
1043 bi.bi_bvec_done = from->iov_offset;
1044 bi.bi_idx = 0;
1045
1046 while (bi.bi_size && frag < MAX_SKB_FRAGS) {
1047 struct bio_vec v = mp_bvec_iter_bvec(from->bvec, bi);
1048
1049 copied += v.bv_len;
1050 truesize += PAGE_ALIGN(v.bv_len + v.bv_offset);
1051 __skb_fill_page_desc_noacc(shinfo, frag++, v.bv_page,
1052 v.bv_offset, v.bv_len);
1053 bvec_iter_advance_single(from->bvec, &bi, v.bv_len);
1054 }
1055 if (bi.bi_size)
1056 ret = -EMSGSIZE;
1057
1058 shinfo->nr_frags = frag;
1059 from->bvec += bi.bi_idx;
1060 from->nr_segs -= bi.bi_idx;
1061 from->count -= copied;
1062 from->iov_offset = bi.bi_bvec_done;
1063
1064 skb->data_len += copied;
1065 skb->len += copied;
1066 skb->truesize += truesize;
1067
1068 if (sk && sk->sk_type == SOCK_STREAM) {
1069 sk_wmem_queued_add(sk, truesize);
1070 if (!skb_zcopy_pure(skb))
1071 sk_mem_charge(sk, truesize);
1072 } else {
1073 refcount_add(truesize, &skb->sk->sk_wmem_alloc);
1074 }
1075 return ret;
1076 }
1077
io_send_zc(struct io_kiocb * req,unsigned int issue_flags)1078 int io_send_zc(struct io_kiocb *req, unsigned int issue_flags)
1079 {
1080 struct sockaddr_storage __address;
1081 struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
1082 struct msghdr msg;
1083 struct socket *sock;
1084 unsigned msg_flags;
1085 int ret, min_ret = 0;
1086
1087 sock = sock_from_file(req->file);
1088 if (unlikely(!sock))
1089 return -ENOTSOCK;
1090 if (!test_bit(SOCK_SUPPORT_ZC, &sock->flags))
1091 return -EOPNOTSUPP;
1092
1093 msg.msg_name = NULL;
1094 msg.msg_control = NULL;
1095 msg.msg_controllen = 0;
1096 msg.msg_namelen = 0;
1097
1098 if (zc->addr) {
1099 if (req_has_async_data(req)) {
1100 struct io_async_msghdr *io = req->async_data;
1101
1102 msg.msg_name = &io->addr;
1103 } else {
1104 ret = move_addr_to_kernel(zc->addr, zc->addr_len, &__address);
1105 if (unlikely(ret < 0))
1106 return ret;
1107 msg.msg_name = (struct sockaddr *)&__address;
1108 }
1109 msg.msg_namelen = zc->addr_len;
1110 }
1111
1112 if (!(req->flags & REQ_F_POLLED) &&
1113 (zc->flags & IORING_RECVSEND_POLL_FIRST))
1114 return io_setup_async_addr(req, &__address, issue_flags);
1115
1116 if (zc->flags & IORING_RECVSEND_FIXED_BUF) {
1117 ret = io_import_fixed(ITER_SOURCE, &msg.msg_iter, req->imu,
1118 (u64)(uintptr_t)zc->buf, zc->len);
1119 if (unlikely(ret))
1120 return ret;
1121 msg.sg_from_iter = io_sg_from_iter;
1122 } else {
1123 io_notif_set_extended(zc->notif);
1124 ret = import_ubuf(ITER_SOURCE, zc->buf, zc->len, &msg.msg_iter);
1125 if (unlikely(ret))
1126 return ret;
1127 ret = io_notif_account_mem(zc->notif, zc->len);
1128 if (unlikely(ret))
1129 return ret;
1130 msg.sg_from_iter = io_sg_from_iter_iovec;
1131 }
1132
1133 msg_flags = zc->msg_flags | MSG_ZEROCOPY;
1134 if (issue_flags & IO_URING_F_NONBLOCK)
1135 msg_flags |= MSG_DONTWAIT;
1136 if (msg_flags & MSG_WAITALL)
1137 min_ret = iov_iter_count(&msg.msg_iter);
1138
1139 msg.msg_flags = msg_flags;
1140 msg.msg_ubuf = &io_notif_to_data(zc->notif)->uarg;
1141 ret = sock_sendmsg(sock, &msg);
1142
1143 if (unlikely(ret < min_ret)) {
1144 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
1145 return io_setup_async_addr(req, &__address, issue_flags);
1146
1147 if (ret > 0 && io_net_retry(sock, msg.msg_flags)) {
1148 zc->len -= ret;
1149 zc->buf += ret;
1150 zc->done_io += ret;
1151 req->flags |= REQ_F_PARTIAL_IO;
1152 return io_setup_async_addr(req, &__address, issue_flags);
1153 }
1154 if (ret == -ERESTARTSYS)
1155 ret = -EINTR;
1156 req_set_fail(req);
1157 }
1158
1159 if (ret >= 0)
1160 ret += zc->done_io;
1161 else if (zc->done_io)
1162 ret = zc->done_io;
1163
1164 /*
1165 * If we're in io-wq we can't rely on tw ordering guarantees, defer
1166 * flushing notif to io_send_zc_cleanup()
1167 */
1168 if (!(issue_flags & IO_URING_F_UNLOCKED)) {
1169 io_notif_flush(zc->notif);
1170 req->flags &= ~REQ_F_NEED_CLEANUP;
1171 }
1172 io_req_set_res(req, ret, IORING_CQE_F_MORE);
1173 return IOU_OK;
1174 }
1175
io_sendmsg_zc(struct io_kiocb * req,unsigned int issue_flags)1176 int io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags)
1177 {
1178 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
1179 struct io_async_msghdr iomsg, *kmsg;
1180 struct socket *sock;
1181 unsigned flags;
1182 int ret, min_ret = 0;
1183
1184 io_notif_set_extended(sr->notif);
1185
1186 sock = sock_from_file(req->file);
1187 if (unlikely(!sock))
1188 return -ENOTSOCK;
1189 if (!test_bit(SOCK_SUPPORT_ZC, &sock->flags))
1190 return -EOPNOTSUPP;
1191
1192 if (req_has_async_data(req)) {
1193 kmsg = req->async_data;
1194 } else {
1195 ret = io_sendmsg_copy_hdr(req, &iomsg);
1196 if (ret)
1197 return ret;
1198 kmsg = &iomsg;
1199 }
1200
1201 if (!(req->flags & REQ_F_POLLED) &&
1202 (sr->flags & IORING_RECVSEND_POLL_FIRST))
1203 return io_setup_async_msg(req, kmsg, issue_flags);
1204
1205 flags = sr->msg_flags | MSG_ZEROCOPY;
1206 if (issue_flags & IO_URING_F_NONBLOCK)
1207 flags |= MSG_DONTWAIT;
1208 if (flags & MSG_WAITALL)
1209 min_ret = iov_iter_count(&kmsg->msg.msg_iter);
1210
1211 kmsg->msg.msg_ubuf = &io_notif_to_data(sr->notif)->uarg;
1212 kmsg->msg.sg_from_iter = io_sg_from_iter_iovec;
1213 ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
1214
1215 if (unlikely(ret < min_ret)) {
1216 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
1217 return io_setup_async_msg(req, kmsg, issue_flags);
1218
1219 if (ret > 0 && io_net_retry(sock, flags)) {
1220 sr->done_io += ret;
1221 req->flags |= REQ_F_PARTIAL_IO;
1222 return io_setup_async_msg(req, kmsg, issue_flags);
1223 }
1224 if (ret == -ERESTARTSYS)
1225 ret = -EINTR;
1226 req_set_fail(req);
1227 }
1228 /* fast path, check for non-NULL to avoid function call */
1229 if (kmsg->free_iov) {
1230 kfree(kmsg->free_iov);
1231 kmsg->free_iov = NULL;
1232 }
1233
1234 io_netmsg_recycle(req, issue_flags);
1235 if (ret >= 0)
1236 ret += sr->done_io;
1237 else if (sr->done_io)
1238 ret = sr->done_io;
1239
1240 /*
1241 * If we're in io-wq we can't rely on tw ordering guarantees, defer
1242 * flushing notif to io_send_zc_cleanup()
1243 */
1244 if (!(issue_flags & IO_URING_F_UNLOCKED)) {
1245 io_notif_flush(sr->notif);
1246 req->flags &= ~REQ_F_NEED_CLEANUP;
1247 }
1248 io_req_set_res(req, ret, IORING_CQE_F_MORE);
1249 return IOU_OK;
1250 }
1251
io_sendrecv_fail(struct io_kiocb * req)1252 void io_sendrecv_fail(struct io_kiocb *req)
1253 {
1254 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
1255
1256 if (req->flags & REQ_F_PARTIAL_IO)
1257 req->cqe.res = sr->done_io;
1258
1259 if ((req->flags & REQ_F_NEED_CLEANUP) &&
1260 (req->opcode == IORING_OP_SEND_ZC || req->opcode == IORING_OP_SENDMSG_ZC))
1261 req->cqe.flags |= IORING_CQE_F_MORE;
1262 }
1263
io_accept_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)1264 int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1265 {
1266 struct io_accept *accept = io_kiocb_to_cmd(req, struct io_accept);
1267 unsigned flags;
1268
1269 if (sqe->len || sqe->buf_index)
1270 return -EINVAL;
1271
1272 accept->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
1273 accept->addr_len = u64_to_user_ptr(READ_ONCE(sqe->addr2));
1274 accept->flags = READ_ONCE(sqe->accept_flags);
1275 accept->nofile = rlimit(RLIMIT_NOFILE);
1276 flags = READ_ONCE(sqe->ioprio);
1277 if (flags & ~IORING_ACCEPT_MULTISHOT)
1278 return -EINVAL;
1279
1280 accept->file_slot = READ_ONCE(sqe->file_index);
1281 if (accept->file_slot) {
1282 if (accept->flags & SOCK_CLOEXEC)
1283 return -EINVAL;
1284 if (flags & IORING_ACCEPT_MULTISHOT &&
1285 accept->file_slot != IORING_FILE_INDEX_ALLOC)
1286 return -EINVAL;
1287 }
1288 if (accept->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK))
1289 return -EINVAL;
1290 if (SOCK_NONBLOCK != O_NONBLOCK && (accept->flags & SOCK_NONBLOCK))
1291 accept->flags = (accept->flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
1292 if (flags & IORING_ACCEPT_MULTISHOT)
1293 req->flags |= REQ_F_APOLL_MULTISHOT;
1294 return 0;
1295 }
1296
io_accept(struct io_kiocb * req,unsigned int issue_flags)1297 int io_accept(struct io_kiocb *req, unsigned int issue_flags)
1298 {
1299 struct io_ring_ctx *ctx = req->ctx;
1300 struct io_accept *accept = io_kiocb_to_cmd(req, struct io_accept);
1301 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
1302 unsigned int file_flags = force_nonblock ? O_NONBLOCK : 0;
1303 bool fixed = !!accept->file_slot;
1304 struct file *file;
1305 int ret, fd;
1306
1307 if (!io_check_multishot(req, issue_flags))
1308 return -EAGAIN;
1309 retry:
1310 if (!fixed) {
1311 fd = __get_unused_fd_flags(accept->flags, accept->nofile);
1312 if (unlikely(fd < 0))
1313 return fd;
1314 }
1315 file = do_accept(req->file, file_flags, accept->addr, accept->addr_len,
1316 accept->flags);
1317 if (IS_ERR(file)) {
1318 if (!fixed)
1319 put_unused_fd(fd);
1320 ret = PTR_ERR(file);
1321 if (ret == -EAGAIN && force_nonblock) {
1322 /*
1323 * if it's multishot and polled, we don't need to
1324 * return EAGAIN to arm the poll infra since it
1325 * has already been done
1326 */
1327 if (issue_flags & IO_URING_F_MULTISHOT)
1328 ret = IOU_ISSUE_SKIP_COMPLETE;
1329 return ret;
1330 }
1331 if (ret == -ERESTARTSYS)
1332 ret = -EINTR;
1333 req_set_fail(req);
1334 } else if (!fixed) {
1335 fd_install(fd, file);
1336 ret = fd;
1337 } else {
1338 ret = io_fixed_fd_install(req, issue_flags, file,
1339 accept->file_slot);
1340 }
1341
1342 if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
1343 io_req_set_res(req, ret, 0);
1344 return IOU_OK;
1345 }
1346
1347 if (ret < 0)
1348 return ret;
1349 if (io_aux_cqe(ctx, issue_flags & IO_URING_F_COMPLETE_DEFER,
1350 req->cqe.user_data, ret, IORING_CQE_F_MORE, true))
1351 goto retry;
1352
1353 return -ECANCELED;
1354 }
1355
io_socket_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)1356 int io_socket_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1357 {
1358 struct io_socket *sock = io_kiocb_to_cmd(req, struct io_socket);
1359
1360 if (sqe->addr || sqe->rw_flags || sqe->buf_index)
1361 return -EINVAL;
1362
1363 sock->domain = READ_ONCE(sqe->fd);
1364 sock->type = READ_ONCE(sqe->off);
1365 sock->protocol = READ_ONCE(sqe->len);
1366 sock->file_slot = READ_ONCE(sqe->file_index);
1367 sock->nofile = rlimit(RLIMIT_NOFILE);
1368
1369 sock->flags = sock->type & ~SOCK_TYPE_MASK;
1370 if (sock->file_slot && (sock->flags & SOCK_CLOEXEC))
1371 return -EINVAL;
1372 if (sock->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK))
1373 return -EINVAL;
1374 return 0;
1375 }
1376
io_socket(struct io_kiocb * req,unsigned int issue_flags)1377 int io_socket(struct io_kiocb *req, unsigned int issue_flags)
1378 {
1379 struct io_socket *sock = io_kiocb_to_cmd(req, struct io_socket);
1380 bool fixed = !!sock->file_slot;
1381 struct file *file;
1382 int ret, fd;
1383
1384 if (!fixed) {
1385 fd = __get_unused_fd_flags(sock->flags, sock->nofile);
1386 if (unlikely(fd < 0))
1387 return fd;
1388 }
1389 file = __sys_socket_file(sock->domain, sock->type, sock->protocol);
1390 if (IS_ERR(file)) {
1391 if (!fixed)
1392 put_unused_fd(fd);
1393 ret = PTR_ERR(file);
1394 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
1395 return -EAGAIN;
1396 if (ret == -ERESTARTSYS)
1397 ret = -EINTR;
1398 req_set_fail(req);
1399 } else if (!fixed) {
1400 fd_install(fd, file);
1401 ret = fd;
1402 } else {
1403 ret = io_fixed_fd_install(req, issue_flags, file,
1404 sock->file_slot);
1405 }
1406 io_req_set_res(req, ret, 0);
1407 return IOU_OK;
1408 }
1409
io_connect_prep_async(struct io_kiocb * req)1410 int io_connect_prep_async(struct io_kiocb *req)
1411 {
1412 struct io_async_connect *io = req->async_data;
1413 struct io_connect *conn = io_kiocb_to_cmd(req, struct io_connect);
1414
1415 return move_addr_to_kernel(conn->addr, conn->addr_len, &io->address);
1416 }
1417
io_connect_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)1418 int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1419 {
1420 struct io_connect *conn = io_kiocb_to_cmd(req, struct io_connect);
1421
1422 if (sqe->len || sqe->buf_index || sqe->rw_flags || sqe->splice_fd_in)
1423 return -EINVAL;
1424
1425 conn->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
1426 conn->addr_len = READ_ONCE(sqe->addr2);
1427 conn->in_progress = false;
1428 return 0;
1429 }
1430
io_connect(struct io_kiocb * req,unsigned int issue_flags)1431 int io_connect(struct io_kiocb *req, unsigned int issue_flags)
1432 {
1433 struct io_connect *connect = io_kiocb_to_cmd(req, struct io_connect);
1434 struct io_async_connect __io, *io;
1435 unsigned file_flags;
1436 int ret;
1437 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
1438
1439 if (connect->in_progress) {
1440 struct socket *socket;
1441
1442 ret = -ENOTSOCK;
1443 socket = sock_from_file(req->file);
1444 if (socket)
1445 ret = sock_error(socket->sk);
1446 goto out;
1447 }
1448
1449 if (req_has_async_data(req)) {
1450 io = req->async_data;
1451 } else {
1452 ret = move_addr_to_kernel(connect->addr,
1453 connect->addr_len,
1454 &__io.address);
1455 if (ret)
1456 goto out;
1457 io = &__io;
1458 }
1459
1460 file_flags = force_nonblock ? O_NONBLOCK : 0;
1461
1462 ret = __sys_connect_file(req->file, &io->address,
1463 connect->addr_len, file_flags);
1464 if ((ret == -EAGAIN || ret == -EINPROGRESS) && force_nonblock) {
1465 if (ret == -EINPROGRESS) {
1466 connect->in_progress = true;
1467 } else {
1468 if (req_has_async_data(req))
1469 return -EAGAIN;
1470 if (io_alloc_async_data(req)) {
1471 ret = -ENOMEM;
1472 goto out;
1473 }
1474 memcpy(req->async_data, &__io, sizeof(__io));
1475 }
1476 return -EAGAIN;
1477 }
1478 if (ret == -ERESTARTSYS)
1479 ret = -EINTR;
1480 out:
1481 if (ret < 0)
1482 req_set_fail(req);
1483 io_req_set_res(req, ret, 0);
1484 return IOU_OK;
1485 }
1486
io_netmsg_cache_free(struct io_cache_entry * entry)1487 void io_netmsg_cache_free(struct io_cache_entry *entry)
1488 {
1489 kfree(container_of(entry, struct io_async_msghdr, cache));
1490 }
1491 #endif
1492