Lines Matching refs:io_ring_ctx
146 static bool io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
179 static inline unsigned int __io_cqring_events(struct io_ring_ctx *ctx) in __io_cqring_events()
184 static inline unsigned int __io_cqring_events_user(struct io_ring_ctx *ctx) in __io_cqring_events_user()
215 struct io_ring_ctx *ctx = head->ctx; in io_match_task_safe()
233 static inline void io_req_add_to_cache(struct io_kiocb *req, struct io_ring_ctx *ctx) in io_req_add_to_cache()
240 struct io_ring_ctx *ctx = container_of(ref, struct io_ring_ctx, refs); in io_ring_ctx_ref_free()
247 struct io_ring_ctx *ctx = container_of(work, struct io_ring_ctx, in io_fallback_req_func()
276 static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p) in io_ring_ctx_alloc()
278 struct io_ring_ctx *ctx; in io_ring_ctx_alloc()
370 static void io_account_cq_overflow(struct io_ring_ctx *ctx) in io_account_cq_overflow()
381 struct io_ring_ctx *ctx = req->ctx; in req_need_defer()
465 struct io_ring_ctx *ctx = req->ctx; in io_prep_async_work()
500 struct io_ring_ctx *ctx = req->ctx; in io_prep_async_link()
550 static __cold void io_queue_deferred(struct io_ring_ctx *ctx) in io_queue_deferred()
564 void __io_commit_cqring_flush(struct io_ring_ctx *ctx) in __io_commit_cqring_flush()
579 static inline void __io_cq_lock(struct io_ring_ctx *ctx) in __io_cq_lock()
585 static inline void io_cq_lock(struct io_ring_ctx *ctx) in io_cq_lock()
591 static inline void __io_cq_unlock_post(struct io_ring_ctx *ctx) in __io_cq_unlock_post()
604 static void io_cq_unlock_post(struct io_ring_ctx *ctx) in io_cq_unlock_post()
613 static void __io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool dying) in __io_cqring_overflow_flush()
665 static void io_cqring_overflow_kill(struct io_ring_ctx *ctx) in io_cqring_overflow_kill()
671 static void io_cqring_do_overflow_flush(struct io_ring_ctx *ctx) in io_cqring_do_overflow_flush()
725 static bool io_cqring_event_overflow(struct io_ring_ctx *ctx, u64 user_data, in io_cqring_event_overflow()
778 bool io_cqe_cache_refill(struct io_ring_ctx *ctx, bool overflow) in io_cqe_cache_refill()
810 static bool io_fill_cqe_aux(struct io_ring_ctx *ctx, u64 user_data, s32 res, in io_fill_cqe_aux()
838 static bool __io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, in __io_post_aux_cqe()
850 bool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags) in io_post_aux_cqe()
864 void io_add_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags) in io_add_aux_cqe()
880 struct io_ring_ctx *ctx = req->ctx; in io_req_post_cqe()
895 struct io_ring_ctx *ctx = req->ctx; in io_req_complete_post()
946 static void io_preinit_req(struct io_kiocb *req, struct io_ring_ctx *ctx) in io_preinit_req()
962 __cold bool __io_alloc_req_refill(struct io_ring_ctx *ctx) in __io_alloc_req_refill()
1004 struct io_ring_ctx *ctx = req->ctx; in __io_req_find_next_prep()
1028 static void ctx_flush_and_put(struct io_ring_ctx *ctx, struct io_tw_state *ts) in ctx_flush_and_put()
1049 struct io_ring_ctx *ctx = NULL; in io_handle_tw_list()
1096 struct io_ring_ctx *last_ctx = NULL; in io_fallback_tw()
1159 struct io_ring_ctx *ctx, in io_req_local_work_add()
1232 struct io_ring_ctx *ctx = req->ctx; in io_req_normal_work_add()
1264 void io_req_task_work_add_remote(struct io_kiocb *req, struct io_ring_ctx *ctx, in io_req_task_work_add_remote()
1272 static void __cold io_move_task_work_from_local(struct io_ring_ctx *ctx) in io_move_task_work_from_local()
1286 static bool io_run_local_work_continue(struct io_ring_ctx *ctx, int events, in io_run_local_work_continue()
1298 static int __io_run_local_work(struct io_ring_ctx *ctx, struct io_tw_state *ts, in __io_run_local_work()
1337 static inline int io_run_local_work_locked(struct io_ring_ctx *ctx, in io_run_local_work_locked()
1347 static int io_run_local_work(struct io_ring_ctx *ctx, int min_events) in io_run_local_work()
1397 static void io_free_batch_list(struct io_ring_ctx *ctx, in io_free_batch_list()
1434 void __io_submit_flush_completions(struct io_ring_ctx *ctx) in __io_submit_flush_completions()
1465 static unsigned io_cqring_events(struct io_ring_ctx *ctx) in io_cqring_events()
1476 static __cold void io_iopoll_try_reap_events(struct io_ring_ctx *ctx) in io_iopoll_try_reap_events()
1500 static int io_iopoll_check(struct io_ring_ctx *ctx, long min) in io_iopoll_check()
1587 struct io_ring_ctx *ctx = req->ctx; in io_iopoll_req_issued()
1672 struct io_ring_ctx *ctx = req->ctx; in io_drain_req()
1888 struct io_ring_ctx *ctx = req->ctx; in io_file_get_fixed()
1987 static inline bool io_check_restriction(struct io_ring_ctx *ctx, in io_check_restriction()
2007 struct io_ring_ctx *ctx = req->ctx; in io_init_req_drain()
2031 static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req, in io_init_req()
2130 struct io_ring_ctx *ctx = req->ctx; in io_submit_fail_init()
2164 static inline int io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req, in io_submit_sqe()
2216 static void io_submit_state_end(struct io_ring_ctx *ctx) in io_submit_state_end()
2241 static void io_commit_sqring(struct io_ring_ctx *ctx) in io_commit_sqring()
2261 static bool io_get_sqe(struct io_ring_ctx *ctx, const struct io_uring_sqe **sqe) in io_get_sqe()
2295 int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr) in io_submit_sqes()
2359 int io_run_task_work_sig(struct io_ring_ctx *ctx) in io_run_task_work_sig()
2400 struct io_ring_ctx *ctx = iowq->ctx; in io_cqring_min_timer_wakeup()
2463 static int __io_cqring_wait_schedule(struct io_ring_ctx *ctx, in __io_cqring_wait_schedule()
2485 static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx, in io_cqring_wait_schedule()
2514 static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events, u32 flags, in io_cqring_wait()
2641 static void *io_rings_map(struct io_ring_ctx *ctx, unsigned long uaddr, in io_rings_map()
2648 static void *io_sqes_map(struct io_ring_ctx *ctx, unsigned long uaddr, in io_sqes_map()
2655 static void io_rings_free(struct io_ring_ctx *ctx) in io_rings_free()
2675 static unsigned long rings_size(struct io_ring_ctx *ctx, unsigned int sq_entries, in rings_size()
2712 static void io_req_caches_free(struct io_ring_ctx *ctx) in io_req_caches_free()
2729 static __cold void io_ring_ctx_free(struct io_ring_ctx *ctx) in io_ring_ctx_free()
2784 struct io_ring_ctx *ctx = container_of(cb, struct io_ring_ctx, in io_activate_pollwq_cb()
2799 __cold void io_activate_pollwq(struct io_ring_ctx *ctx) in io_activate_pollwq()
2823 struct io_ring_ctx *ctx = file->private_data; in io_uring_poll()
2861 struct io_ring_ctx *ctx;
2890 struct io_ring_ctx *ctx = container_of(work, struct io_ring_ctx, exit_work); in io_ring_exit_work()
2983 static __cold void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx) in io_ring_ctx_wait_and_kill()
3008 struct io_ring_ctx *ctx = file->private_data; in io_uring_release()
3028 static __cold bool io_cancel_defer_files(struct io_ring_ctx *ctx, in io_cancel_defer_files()
3055 static __cold bool io_uring_try_cancel_iowq(struct io_ring_ctx *ctx) in io_uring_try_cancel_iowq()
3079 static __cold bool io_uring_try_cancel_requests(struct io_ring_ctx *ctx, in io_uring_try_cancel_requests()
3152 struct io_ring_ctx *ctx; in io_uring_cancel_generic()
3283 struct io_ring_ctx *ctx; in SYSCALL_DEFINE6()
3429 static __cold int io_allocate_scq_urings(struct io_ring_ctx *ctx, in io_allocate_scq_urings()
3499 static struct file *io_uring_get_file(struct io_ring_ctx *ctx) in io_uring_get_file()
3509 struct io_ring_ctx *ctx; in io_uring_create()