Lines Matching refs:ret

302 	bool ret;

329 ret = io_alloc_cache_init(&ctx->apoll_cache, IO_POLL_ALLOC_CACHE_MAX,
331 ret |= io_alloc_cache_init(&ctx->netmsg_cache, IO_ALLOC_CACHE_MAX,
334 ret |= io_alloc_cache_init(&ctx->rw_cache, IO_ALLOC_CACHE_MAX,
337 ret |= io_alloc_cache_init(&ctx->cmd_cache, IO_ALLOC_CACHE_MAX,
341 ret |= io_alloc_cache_init(&ctx->msg_cache, IO_ALLOC_CACHE_MAX,
343 ret |= io_futex_cache_init(ctx);
344 ret |= io_rsrc_cache_init(ctx);
345 if (ret)
1005 int ret;
1007 ret = kmem_cache_alloc_bulk(req_cachep, gfp, ARRAY_SIZE(reqs), reqs);
1013 if (unlikely(ret <= 0)) {
1017 ret = 1;
1020 percpu_ref_get_many(&ctx->refs, ret);
1021 ctx->nr_req_allocated += ret;
1023 while (ret--) {
1024 struct io_kiocb *req = reqs[ret];
1181 struct llist_node *ret;
1185 ret = tctx_task_work_run(tctx, UINT_MAX, &count);
1187 WARN_ON_ONCE(ret);
1324 int ret = 0;
1334 if (++ret >= events)
1338 return ret;
1346 int ret = 0;
1353 min_events -= ret;
1354 ret = __io_run_local_work_loop(&ctx->retry_llist.first, tw, max_events);
1363 ret += __io_run_local_work_loop(&node, tw, max_events - ret);
1367 if (io_run_local_work_continue(ctx, ret, min_events))
1371 if (io_run_local_work_continue(ctx, ret, min_events))
1374 trace_io_uring_local_work_run(ctx, ret, loops);
1375 return ret;
1393 int ret;
1396 ret = __io_run_local_work(ctx, ts, min_events, max_events);
1398 return ret;
1418 void io_req_task_queue_fail(struct io_kiocb *req, int ret)
1420 io_req_set_res(req, ret, 0);
1598 int ret = 0;
1627 ret = io_do_iopoll(ctx, !min_events);
1628 if (unlikely(ret < 0))
1629 return ret;
1636 nr_events += ret;
1761 int ret;
1773 ret = def->issue(req, issue_flags);
1776 audit_uring_exit(!ret, ret);
1785 return ret;
1791 int ret;
1796 ret = __io_issue_sqe(req, issue_flags, def);
1798 if (ret == IOU_COMPLETE) {
1807 if (ret == IOU_ISSUE_SKIP_COMPLETE) {
1808 ret = 0;
1814 return ret;
1822 int ret;
1830 ret = __io_issue_sqe(req, issue_flags, &io_issue_defs[req->opcode]);
1832 WARN_ON_ONCE(ret == IOU_ISSUE_SKIP_COMPLETE);
1833 return ret;
1855 int ret = 0, err = -ECANCELED;
1908 ret = io_issue_sqe(req, issue_flags);
1909 if (ret != -EAGAIN)
1941 if (ret)
1942 io_req_task_queue_fail(req, ret);
1991 static void io_queue_async(struct io_kiocb *req, unsigned int issue_flags, int ret)
1994 if (ret != -EAGAIN || (req->flags & REQ_F_NOWAIT)) {
1996 io_req_defer_failed(req, ret);
2000 ret = io_req_sqe_copy(req, issue_flags);
2001 if (unlikely(ret))
2023 int ret;
2025 ret = io_issue_sqe(req, issue_flags);
2031 if (unlikely(ret))
2032 io_queue_async(req, issue_flags, ret);
2184 int ret;
2190 ret = security_uring_override_creds(req->creds);
2191 if (ret) {
2193 return io_init_fail_req(req, ret);
2202 struct io_kiocb *req, int ret)
2208 trace_io_uring_req_failed(sqe, req, ret);
2216 req_fail_link_node(req, ret);
2227 return ret;
2243 int ret;
2245 ret = io_init_req(ctx, req, sqe);
2246 if (unlikely(ret))
2247 return io_submit_fail_init(sqe, req, ret);
2373 int ret;
2378 ret = left = min(nr, entries);
2405 ret -= left;
2407 if (!ret && io_req_cache_empty(ctx))
2408 ret = -EAGAIN;
2415 return ret;
2551 int ret = 0;
2561 ret = io_cqring_schedule_timeout(iowq, ctx->clockid, start_time);
2565 return ret;
2598 int ret;
2635 ret = set_compat_user_sigmask((const compat_sigset_t __user *)ext_arg->sig,
2639 ret = set_user_sigmask(ext_arg->sig, ext_arg->argsz);
2641 if (ret)
2642 return ret;
2667 ret = io_cqring_wait_schedule(ctx, &iowq, ext_arg, start_time);
2689 if (ret < 0)
2698 ret = -EBADR;
2704 ret = 0;
2712 restore_saved_sigmask_unless(ret == -EINTR);
2714 return READ_ONCE(rings->cq.head) == READ_ONCE(rings->cq.tail) ? ret : 0;
2940 int ret;
3010 ret = task_work_add(node->task, &exit.task_work, TWA_SIGNAL);
3011 if (WARN_ON_ONCE(ret))
3109 bool ret = false;
3122 ret |= (cret != IO_WQ_CANCEL_NOTFOUND);
3126 return ret;
3136 bool ret = false;
3149 ret |= io_uring_try_cancel_iowq(ctx);
3157 ret |= (cret != IO_WQ_CANCEL_NOTFOUND);
3165 ret = true;
3172 ret |= io_run_local_work(ctx, INT_MAX, INT_MAX) > 0;
3174 ret |= io_cancel_defer_files(ctx, tctx, cancel_all);
3175 ret |= io_poll_remove_all(ctx, tctx, cancel_all);
3176 ret |= io_waitid_remove_all(ctx, tctx, cancel_all);
3177 ret |= io_futex_remove_all(ctx, tctx, cancel_all);
3178 ret |= io_uring_try_cancel_uring_cmd(ctx, tctx, cancel_all);
3180 ret |= io_kill_timeouts(ctx, tctx, cancel_all);
3182 ret |= io_run_task_work() > 0;
3184 ret |= flush_delayed_work(&ctx->fallback_work);
3185 return ret;
3404 long ret;
3431 ret = -EOPNOTSUPP;
3437 ret = -EBADFD;
3446 ret = 0;
3449 ret = -EOWNERDEAD;
3457 ret = to_submit;
3459 ret = io_uring_add_tctx_node(ctx);
3460 if (unlikely(ret))
3464 ret = io_submit_sqes(ctx, to_submit);
3465 if (ret != to_submit) {
3507 if (!ret) {
3508 ret = ret2;
3523 return ret;
3550 int ret;
3567 ret = io_create_region(ctx, &ctx->ring_region, &rd, IORING_OFF_CQ_RING);
3568 if (ret)
3569 return ret;
3594 ret = io_create_region(ctx, &ctx->sq_region, &rd, IORING_OFF_SQES);
3595 if (ret) {
3597 return ret;
3733 int ret;
3735 ret = io_uring_sanitise_params(p);
3736 if (ret)
3737 return ret;
3739 ret = io_uring_fill_params(entries, p);
3740 if (unlikely(ret))
3741 return ret;
3800 ret = io_allocate_scq_urings(ctx, p);
3801 if (ret)
3807 ret = io_sq_offload_create(ctx, p);
3808 if (ret)
3822 ret = -EFAULT;
3832 ret = PTR_ERR(file);
3836 ret = __io_uring_add_tctx_node(ctx);
3837 if (ret)
3846 ret = io_ring_add_registered_file(tctx, file, 0, IO_RINGFD_REG_MAX);
3848 ret = io_uring_install_fd(file);
3849 if (ret < 0)
3852 trace_io_uring_create(ret, ctx, p->sq_entries, p->cq_entries, p->flags);
3853 return ret;
3856 return ret;
3859 return ret;
3918 int ret;
3920 ret = io_uring_allowed();
3921 if (ret)
3922 return ret;