Lines Matching refs:req

586 	struct aio_kiocb *req;  in kiocb_set_cancel_fn()  local
597 req = container_of(iocb, struct aio_kiocb, rw); in kiocb_set_cancel_fn()
599 if (WARN_ON_ONCE(!list_empty(&req->ki_list))) in kiocb_set_cancel_fn()
602 ctx = req->ki_ctx; in kiocb_set_cancel_fn()
605 list_add_tail(&req->ki_list, &ctx->active_reqs); in kiocb_set_cancel_fn()
606 req->ki_cancel = cancel; in kiocb_set_cancel_fn()
650 struct aio_kiocb *req; in free_ioctx_users() local
655 req = list_first_entry(&ctx->active_reqs, in free_ioctx_users()
657 req->ki_cancel(&req->rw); in free_ioctx_users()
658 list_del_init(&req->ki_list); in free_ioctx_users()
1056 struct aio_kiocb *req; in aio_get_req() local
1058 req = kmem_cache_alloc(kiocb_cachep, GFP_KERNEL); in aio_get_req()
1059 if (unlikely(!req)) in aio_get_req()
1063 kmem_cache_free(kiocb_cachep, req); in aio_get_req()
1068 req->ki_ctx = ctx; in aio_get_req()
1069 INIT_LIST_HEAD(&req->ki_list); in aio_get_req()
1070 refcount_set(&req->ki_refcnt, 2); in aio_get_req()
1071 req->ki_eventfd = NULL; in aio_get_req()
1072 return req; in aio_get_req()
1510 static int aio_prep_rw(struct kiocb *req, const struct iocb *iocb, int rw_type) in aio_prep_rw() argument
1514 req->ki_write_stream = 0; in aio_prep_rw()
1515 req->ki_complete = aio_complete_rw; in aio_prep_rw()
1516 req->private = NULL; in aio_prep_rw()
1517 req->ki_pos = iocb->aio_offset; in aio_prep_rw()
1518 req->ki_flags = req->ki_filp->f_iocb_flags | IOCB_AIO_RW; in aio_prep_rw()
1520 req->ki_flags |= IOCB_EVENTFD; in aio_prep_rw()
1533 req->ki_ioprio = iocb->aio_reqprio; in aio_prep_rw()
1535 req->ki_ioprio = get_current_ioprio(); in aio_prep_rw()
1537 ret = kiocb_set_rw_flags(req, iocb->aio_rw_flags, rw_type); in aio_prep_rw()
1541 req->ki_flags &= ~IOCB_HIPRI; /* no one is going to poll for this I/O */ in aio_prep_rw()
1561 static inline void aio_rw_done(struct kiocb *req, ssize_t ret) in aio_rw_done() argument
1577 req->ki_complete(req, ret); in aio_rw_done()
1581 static int aio_read(struct kiocb *req, const struct iocb *iocb, in aio_read() argument
1589 ret = aio_prep_rw(req, iocb, READ); in aio_read()
1592 file = req->ki_filp; in aio_read()
1601 ret = rw_verify_area(READ, file, &req->ki_pos, iov_iter_count(&iter)); in aio_read()
1603 aio_rw_done(req, file->f_op->read_iter(req, &iter)); in aio_read()
1608 static int aio_write(struct kiocb *req, const struct iocb *iocb, in aio_write() argument
1616 ret = aio_prep_rw(req, iocb, WRITE); in aio_write()
1619 file = req->ki_filp; in aio_write()
1629 ret = rw_verify_area(WRITE, file, &req->ki_pos, iov_iter_count(&iter)); in aio_write()
1632 kiocb_start_write(req); in aio_write()
1633 req->ki_flags |= IOCB_WRITE; in aio_write()
1634 aio_rw_done(req, file->f_op->write_iter(req, &iter)); in aio_write()
1651 static int aio_fsync(struct fsync_iocb *req, const struct iocb *iocb, in aio_fsync() argument
1658 if (unlikely(!req->file->f_op->fsync)) in aio_fsync()
1661 req->creds = prepare_creds(); in aio_fsync()
1662 if (!req->creds) in aio_fsync()
1665 req->datasync = datasync; in aio_fsync()
1666 INIT_WORK(&req->work, aio_fsync_work); in aio_fsync()
1667 schedule_work(&req->work); in aio_fsync()
1673 struct poll_iocb *req = container_of(work, struct poll_iocb, work); in aio_poll_put_work() local
1674 struct aio_kiocb *iocb = container_of(req, struct aio_kiocb, poll); in aio_poll_put_work()
1687 static bool poll_iocb_lock_wq(struct poll_iocb *req) in poll_iocb_lock_wq() argument
1707 head = smp_load_acquire(&req->head); in poll_iocb_lock_wq()
1710 if (!list_empty(&req->wait.entry)) in poll_iocb_lock_wq()
1718 static void poll_iocb_unlock_wq(struct poll_iocb *req) in poll_iocb_unlock_wq() argument
1720 spin_unlock(&req->head->lock); in poll_iocb_unlock_wq()
1726 struct poll_iocb *req = container_of(work, struct poll_iocb, work); in aio_poll_complete_work() local
1727 struct aio_kiocb *iocb = container_of(req, struct aio_kiocb, poll); in aio_poll_complete_work()
1728 struct poll_table_struct pt = { ._key = req->events }; in aio_poll_complete_work()
1732 if (!READ_ONCE(req->cancelled)) in aio_poll_complete_work()
1733 mask = vfs_poll(req->file, &pt) & req->events; in aio_poll_complete_work()
1743 if (poll_iocb_lock_wq(req)) { in aio_poll_complete_work()
1744 if (!mask && !READ_ONCE(req->cancelled)) { in aio_poll_complete_work()
1749 if (req->work_need_resched) { in aio_poll_complete_work()
1750 schedule_work(&req->work); in aio_poll_complete_work()
1751 req->work_need_resched = false; in aio_poll_complete_work()
1753 req->work_scheduled = false; in aio_poll_complete_work()
1755 poll_iocb_unlock_wq(req); in aio_poll_complete_work()
1759 list_del_init(&req->wait.entry); in aio_poll_complete_work()
1760 poll_iocb_unlock_wq(req); in aio_poll_complete_work()
1773 struct poll_iocb *req = &aiocb->poll; in aio_poll_cancel() local
1775 if (poll_iocb_lock_wq(req)) { in aio_poll_cancel()
1776 WRITE_ONCE(req->cancelled, true); in aio_poll_cancel()
1777 if (!req->work_scheduled) { in aio_poll_cancel()
1779 req->work_scheduled = true; in aio_poll_cancel()
1781 poll_iocb_unlock_wq(req); in aio_poll_cancel()
1790 struct poll_iocb *req = container_of(wait, struct poll_iocb, wait); in aio_poll_wake() local
1791 struct aio_kiocb *iocb = container_of(req, struct aio_kiocb, poll); in aio_poll_wake()
1796 if (mask && !(mask & req->events)) in aio_poll_wake()
1812 if (mask && !req->work_scheduled && in aio_poll_wake()
1816 list_del_init(&req->wait.entry); in aio_poll_wake()
1821 INIT_WORK(&req->work, aio_poll_put_work); in aio_poll_wake()
1822 schedule_work(&req->work); in aio_poll_wake()
1837 if (req->work_scheduled) { in aio_poll_wake()
1838 req->work_need_resched = true; in aio_poll_wake()
1840 schedule_work(&req->work); in aio_poll_wake()
1841 req->work_scheduled = true; in aio_poll_wake()
1854 WRITE_ONCE(req->cancelled, true); in aio_poll_wake()
1855 list_del_init(&req->wait.entry); in aio_poll_wake()
1863 smp_store_release(&req->head, NULL); in aio_poll_wake()
1897 struct poll_iocb *req = &aiocb->poll; in aio_poll() local
1909 INIT_WORK(&req->work, aio_poll_complete_work); in aio_poll()
1910 req->events = demangle_poll(iocb->aio_buf) | EPOLLERR | EPOLLHUP; in aio_poll()
1912 req->head = NULL; in aio_poll()
1913 req->cancelled = false; in aio_poll()
1914 req->work_scheduled = false; in aio_poll()
1915 req->work_need_resched = false; in aio_poll()
1918 apt.pt._key = req->events; in aio_poll()
1924 INIT_LIST_HEAD(&req->wait.entry); in aio_poll()
1925 init_waitqueue_func_entry(&req->wait, aio_poll_wake); in aio_poll()
1927 mask = vfs_poll(req->file, &apt.pt) & req->events; in aio_poll()
1930 bool on_queue = poll_iocb_lock_wq(req); in aio_poll()
1932 if (!on_queue || req->work_scheduled) { in aio_poll()
1944 list_del_init(&req->wait.entry); in aio_poll()
1947 WRITE_ONCE(req->cancelled, true); in aio_poll()
1957 poll_iocb_unlock_wq(req); in aio_poll()
1970 struct iocb __user *user_iocb, struct aio_kiocb *req, in __io_submit_one() argument
1973 req->ki_filp = fget(iocb->aio_fildes); in __io_submit_one()
1974 if (unlikely(!req->ki_filp)) in __io_submit_one()
1989 req->ki_eventfd = eventfd; in __io_submit_one()
1997 req->ki_res.obj = (u64)(unsigned long)user_iocb; in __io_submit_one()
1998 req->ki_res.data = iocb->aio_data; in __io_submit_one()
1999 req->ki_res.res = 0; in __io_submit_one()
2000 req->ki_res.res2 = 0; in __io_submit_one()
2004 return aio_read(&req->rw, iocb, false, compat); in __io_submit_one()
2006 return aio_write(&req->rw, iocb, false, compat); in __io_submit_one()
2008 return aio_read(&req->rw, iocb, true, compat); in __io_submit_one()
2010 return aio_write(&req->rw, iocb, true, compat); in __io_submit_one()
2012 return aio_fsync(&req->fsync, iocb, false); in __io_submit_one()
2014 return aio_fsync(&req->fsync, iocb, true); in __io_submit_one()
2016 return aio_poll(req, iocb); in __io_submit_one()
2026 struct aio_kiocb *req; in io_submit_one() local
2049 req = aio_get_req(ctx); in io_submit_one()
2050 if (unlikely(!req)) in io_submit_one()
2053 err = __io_submit_one(ctx, &iocb, user_iocb, req, compat); in io_submit_one()
2056 iocb_put(req); in io_submit_one()
2064 iocb_destroy(req); in io_submit_one()