Lines Matching refs:wait

154 		wait_queue_head_t wait;  member
187 struct wait_queue_entry wait; member
778 init_waitqueue_head(&ctx->wait); in ioctx_alloc()
849 struct ctx_rq_wait *wait) in kill_ioctx() argument
865 wake_up_all(&ctx->wait); in kill_ioctx()
879 ctx->rq_wait = wait; in kill_ioctx()
895 struct ctx_rq_wait wait; in exit_aio() local
901 atomic_set(&wait.count, table->nr); in exit_aio()
902 init_completion(&wait.comp); in exit_aio()
922 kill_ioctx(mm, ctx, &wait); in exit_aio()
925 if (!atomic_sub_and_test(skipped, &wait.count)) { in exit_aio()
927 wait_for_completion(&wait.comp); in exit_aio()
1192 if (waitqueue_active(&ctx->wait)) { in aio_complete()
1196 spin_lock_irqsave(&ctx->wait.lock, flags); in aio_complete()
1197 list_for_each_entry_safe(curr, next, &ctx->wait.head, w.entry) in aio_complete()
1202 spin_unlock_irqrestore(&ctx->wait.lock, flags); in aio_complete()
1351 ret2 = prepare_to_wait_event(&ctx->wait, &w.w, TASK_INTERRUPTIBLE); in read_events()
1362 finish_wait(&ctx->wait, &w.w); in read_events()
1455 struct ctx_rq_wait wait; in SYSCALL_DEFINE1() local
1458 init_completion(&wait.comp); in SYSCALL_DEFINE1()
1459 atomic_set(&wait.count, 1); in SYSCALL_DEFINE1()
1465 ret = kill_ioctx(current->mm, ioctx, &wait); in SYSCALL_DEFINE1()
1473 wait_for_completion(&wait.comp); in SYSCALL_DEFINE1()
1710 if (!list_empty(&req->wait.entry)) in poll_iocb_lock_wq()
1759 list_del_init(&req->wait.entry); in aio_poll_complete_work()
1787 static int aio_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync, in aio_poll_wake() argument
1790 struct poll_iocb *req = container_of(wait, struct poll_iocb, wait); in aio_poll_wake()
1816 list_del_init(&req->wait.entry); in aio_poll_wake()
1855 list_del_init(&req->wait.entry); in aio_poll_wake()
1891 add_wait_queue(head, &pt->iocb->poll.wait); in aio_poll_queue_proc()
1924 INIT_LIST_HEAD(&req->wait.entry); in aio_poll()
1925 init_waitqueue_func_entry(&req->wait, aio_poll_wake); in aio_poll()
1944 list_del_init(&req->wait.entry); in aio_poll()