Lines Matching refs:poll

797 	struct io_poll_iocb	poll;  member
826 struct io_poll_iocb poll; member
5306 static int __io_async_wake(struct io_kiocb *req, struct io_poll_iocb *poll, in __io_async_wake() argument
5310 if (mask && !(mask & poll->events)) in __io_async_wake()
5315 list_del_init(&poll->wait.entry); in __io_async_wake()
5330 static bool io_poll_rewait(struct io_kiocb *req, struct io_poll_iocb *poll) in io_poll_rewait() argument
5337 WRITE_ONCE(poll->canceled, true); in io_poll_rewait()
5339 if (!req->result && !READ_ONCE(poll->canceled)) { in io_poll_rewait()
5340 struct poll_table_struct pt = { ._key = poll->events }; in io_poll_rewait()
5342 req->result = vfs_poll(req->file, &pt) & poll->events; in io_poll_rewait()
5346 if (!req->result && !READ_ONCE(poll->canceled)) { in io_poll_rewait()
5347 add_wait_queue(poll->head, &poll->wait); in io_poll_rewait()
5365 return &req->poll; in io_poll_get_single()
5366 return &req->apoll->poll; in io_poll_get_single()
5372 struct io_poll_iocb *poll = io_poll_get_double(req); in io_poll_remove_double() local
5376 if (poll && poll->head) { in io_poll_remove_double()
5377 struct wait_queue_head *head = poll->head; in io_poll_remove_double()
5380 list_del_init(&poll->wait.entry); in io_poll_remove_double()
5381 if (poll->wait.private) in io_poll_remove_double()
5383 poll->head = NULL; in io_poll_remove_double()
5395 if (READ_ONCE(req->poll.canceled)) { in __io_poll_complete()
5397 req->poll.events |= EPOLLONESHOT; in __io_poll_complete()
5401 if (req->poll.events & EPOLLONESHOT) in __io_poll_complete()
5404 req->poll.events |= EPOLLONESHOT; in __io_poll_complete()
5418 if (io_poll_rewait(req, &req->poll)) { in io_poll_task_func()
5423 if (req->poll.done) { in io_poll_task_func()
5431 req->poll.done = true; in io_poll_task_func()
5434 add_wait_queue(req->poll.head, &req->poll.wait); in io_poll_task_func()
5452 struct io_poll_iocb *poll = io_poll_get_single(req); in io_poll_double_wake() local
5457 if (mask && !(mask & poll->events)) in io_poll_double_wake()
5459 if (!(poll->events & EPOLLONESHOT)) in io_poll_double_wake()
5460 return poll->wait.func(&poll->wait, mode, sync, key); in io_poll_double_wake()
5464 if (poll->head) { in io_poll_double_wake()
5467 spin_lock_irqsave(&poll->head->lock, flags); in io_poll_double_wake()
5468 done = list_empty(&poll->wait.entry); in io_poll_double_wake()
5470 list_del_init(&poll->wait.entry); in io_poll_double_wake()
5473 spin_unlock_irqrestore(&poll->head->lock, flags); in io_poll_double_wake()
5476 poll->wait.func(&poll->wait, mode, sync, key); in io_poll_double_wake()
5483 static void io_init_poll_iocb(struct io_poll_iocb *poll, __poll_t events, in io_init_poll_iocb() argument
5486 poll->head = NULL; in io_init_poll_iocb()
5487 poll->done = false; in io_init_poll_iocb()
5488 poll->canceled = false; in io_init_poll_iocb()
5491 poll->events = events | IO_POLL_UNMASK; in io_init_poll_iocb()
5492 INIT_LIST_HEAD(&poll->wait.entry); in io_init_poll_iocb()
5493 init_waitqueue_func_entry(&poll->wait, wake_func); in io_init_poll_iocb()
5496 static void __io_queue_proc(struct io_poll_iocb *poll, struct io_poll_table *pt, in __io_queue_proc() argument
5508 struct io_poll_iocb *poll_one = poll; in __io_queue_proc()
5526 poll = kmalloc(sizeof(*poll), GFP_ATOMIC); in __io_queue_proc()
5527 if (!poll) { in __io_queue_proc()
5531 io_init_poll_iocb(poll, poll_one->events, io_poll_double_wake); in __io_queue_proc()
5533 poll->wait.private = req; in __io_queue_proc()
5535 *poll_ptr = poll; in __io_queue_proc()
5541 poll->head = head; in __io_queue_proc()
5543 if (poll->events & EPOLLEXCLUSIVE) in __io_queue_proc()
5544 add_wait_queue_exclusive(head, &poll->wait); in __io_queue_proc()
5546 add_wait_queue(head, &poll->wait); in __io_queue_proc()
5555 __io_queue_proc(&apoll->poll, pt, head, &apoll->double_poll); in io_async_queue_proc()
5565 if (io_poll_rewait(req, &apoll->poll)) { in io_async_task_func()
5572 apoll->poll.done = true; in io_async_task_func()
5575 if (!READ_ONCE(apoll->poll.canceled)) in io_async_task_func()
5585 struct io_poll_iocb *poll = &req->apoll->poll; in io_async_wake() local
5590 return __io_async_wake(req, poll, key_to_poll(key), io_async_task_func); in io_async_wake()
5603 struct io_poll_iocb *poll, in __io_arm_poll_handler() argument
5612 io_init_poll_iocb(poll, mask, wake_func); in __io_arm_poll_handler()
5613 poll->file = req->file; in __io_arm_poll_handler()
5614 poll->wait.private = req; in __io_arm_poll_handler()
5621 mask = vfs_poll(req->file, &ipt->pt) & poll->events; in __io_arm_poll_handler()
5626 if (ipt->error || (mask && (poll->events & EPOLLONESHOT))) in __io_arm_poll_handler()
5628 if (likely(poll->head)) { in __io_arm_poll_handler()
5629 spin_lock_irq(&poll->head->lock); in __io_arm_poll_handler()
5630 if (unlikely(list_empty(&poll->wait.entry))) { in __io_arm_poll_handler()
5636 if ((mask && (poll->events & EPOLLONESHOT)) || ipt->error) in __io_arm_poll_handler()
5637 list_del_init(&poll->wait.entry); in __io_arm_poll_handler()
5639 WRITE_ONCE(poll->canceled, true); in __io_arm_poll_handler()
5640 else if (!poll->done) /* actually waiting for an event */ in __io_arm_poll_handler()
5642 spin_unlock_irq(&poll->head->lock); in __io_arm_poll_handler()
5687 ret = __io_arm_poll_handler(req, &apoll->poll, &ipt, mask, in io_arm_poll_handler()
5694 mask, apoll->poll.events); in io_arm_poll_handler()
5699 struct io_poll_iocb *poll, bool do_cancel) in __io_poll_remove_one() argument
5704 if (!poll->head) in __io_poll_remove_one()
5706 spin_lock_irq(&poll->head->lock); in __io_poll_remove_one()
5708 WRITE_ONCE(poll->canceled, true); in __io_poll_remove_one()
5709 if (!list_empty(&poll->wait.entry)) { in __io_poll_remove_one()
5710 list_del_init(&poll->wait.entry); in __io_poll_remove_one()
5713 spin_unlock_irq(&poll->head->lock); in __io_poll_remove_one()
5847 struct io_poll_iocb *poll = &req->poll; in io_poll_wake() local
5849 return __io_async_wake(req, poll, key_to_poll(key), io_poll_task_func); in io_poll_wake()
5857 __io_queue_proc(&pt->req->poll, pt, head, (struct io_poll_iocb **) &pt->req->async_data); in io_poll_queue_proc()
5862 struct io_poll_iocb *poll = &req->poll; in io_poll_add_prep() local
5874 poll->events = io_poll_parse_events(sqe, flags); in io_poll_add_prep()
5880 struct io_poll_iocb *poll = &req->poll; in io_poll_add() local
5888 mask = __io_arm_poll_handler(req, &req->poll, &ipt, poll->events, in io_poll_add()
5931 completing = !__io_poll_remove_one(preq, &preq->poll, false); in io_poll_update()
5932 if (completing && (preq->poll.events & EPOLLONESHOT)) { in io_poll_update()
5947 preq->poll.events &= ~0xffff; in io_poll_update()
5948 preq->poll.events |= req->poll_update.events & 0xffff; in io_poll_update()
5949 preq->poll.events |= IO_POLL_UNMASK; in io_poll_update()
10284 .poll = io_uring_poll,