Lines Matching refs:req

41 	struct fuse_req *req;  in fuse_request_expired()  local
43 req = list_first_entry_or_null(list, struct fuse_req, list); in fuse_request_expired()
44 if (!req) in fuse_request_expired()
46 return time_is_before_jiffies(req->create_time + fc->timeout.req_timeout); in fuse_request_expired()
130 static void fuse_request_init(struct fuse_mount *fm, struct fuse_req *req) in fuse_request_init() argument
132 INIT_LIST_HEAD(&req->list); in fuse_request_init()
133 INIT_LIST_HEAD(&req->intr_entry); in fuse_request_init()
134 init_waitqueue_head(&req->waitq); in fuse_request_init()
135 refcount_set(&req->count, 1); in fuse_request_init()
136 __set_bit(FR_PENDING, &req->flags); in fuse_request_init()
137 req->fm = fm; in fuse_request_init()
138 req->create_time = jiffies; in fuse_request_init()
143 struct fuse_req *req = kmem_cache_zalloc(fuse_req_cachep, flags); in fuse_request_alloc() local
144 if (req) in fuse_request_alloc()
145 fuse_request_init(fm, req); in fuse_request_alloc()
147 return req; in fuse_request_alloc()
150 static void fuse_request_free(struct fuse_req *req) in fuse_request_free() argument
152 kmem_cache_free(fuse_req_cachep, req); in fuse_request_free()
155 static void __fuse_get_request(struct fuse_req *req) in __fuse_get_request() argument
157 refcount_inc(&req->count); in __fuse_get_request()
161 static void __fuse_put_request(struct fuse_req *req) in __fuse_put_request() argument
163 refcount_dec(&req->count); in __fuse_put_request()
193 static void fuse_put_request(struct fuse_req *req);
200 struct fuse_req *req; in fuse_get_req() local
225 req = fuse_request_alloc(fm, GFP_KERNEL); in fuse_get_req()
227 if (!req) { in fuse_get_req()
233 req->in.h.pid = pid_nr_ns(task_pid(current), fc->pid_ns); in fuse_get_req()
235 __set_bit(FR_WAITING, &req->flags); in fuse_get_req()
237 __set_bit(FR_BACKGROUND, &req->flags); in fuse_get_req()
250 req->in.h.uid = from_kuid(fc->user_ns, fsuid); in fuse_get_req()
251 req->in.h.gid = from_kgid(fc->user_ns, fsgid); in fuse_get_req()
253 if (no_idmap && unlikely(req->in.h.uid == ((uid_t)-1) || in fuse_get_req()
254 req->in.h.gid == ((gid_t)-1))) { in fuse_get_req()
255 fuse_put_request(req); in fuse_get_req()
259 return req; in fuse_get_req()
266 static void fuse_put_request(struct fuse_req *req) in fuse_put_request() argument
268 struct fuse_conn *fc = req->fm->fc; in fuse_put_request()
270 if (refcount_dec_and_test(&req->count)) { in fuse_put_request()
271 if (test_bit(FR_BACKGROUND, &req->flags)) { in fuse_put_request()
282 if (test_bit(FR_WAITING, &req->flags)) { in fuse_put_request()
283 __clear_bit(FR_WAITING, &req->flags); in fuse_put_request()
287 fuse_request_free(req); in fuse_put_request()
351 void fuse_dev_queue_interrupt(struct fuse_iqueue *fiq, struct fuse_req *req) in fuse_dev_queue_interrupt() argument
354 if (list_empty(&req->intr_entry)) { in fuse_dev_queue_interrupt()
355 list_add_tail(&req->intr_entry, &fiq->interrupts); in fuse_dev_queue_interrupt()
361 if (test_bit(FR_FINISHED, &req->flags)) { in fuse_dev_queue_interrupt()
362 list_del_init(&req->intr_entry); in fuse_dev_queue_interrupt()
372 static void fuse_dev_queue_req(struct fuse_iqueue *fiq, struct fuse_req *req) in fuse_dev_queue_req() argument
376 if (req->in.h.opcode != FUSE_NOTIFY_REPLY) in fuse_dev_queue_req()
377 req->in.h.unique = fuse_get_unique_locked(fiq); in fuse_dev_queue_req()
378 list_add_tail(&req->list, &fiq->pending); in fuse_dev_queue_req()
382 req->out.h.error = -ENOTCONN; in fuse_dev_queue_req()
383 clear_bit(FR_PENDING, &req->flags); in fuse_dev_queue_req()
384 fuse_request_end(req); in fuse_dev_queue_req()
395 static void fuse_send_one(struct fuse_iqueue *fiq, struct fuse_req *req) in fuse_send_one() argument
397 req->in.h.len = sizeof(struct fuse_in_header) + in fuse_send_one()
398 fuse_len_args(req->args->in_numargs, in fuse_send_one()
399 (struct fuse_arg *) req->args->in_args); in fuse_send_one()
400 trace_fuse_request_send(req); in fuse_send_one()
401 fiq->ops->send_req(fiq, req); in fuse_send_one()
421 struct fuse_req *req; in flush_bg_queue() local
423 req = list_first_entry(&fc->bg_queue, struct fuse_req, list); in flush_bg_queue()
424 list_del(&req->list); in flush_bg_queue()
426 fuse_send_one(fiq, req); in flush_bg_queue()
438 void fuse_request_end(struct fuse_req *req) in fuse_request_end() argument
440 struct fuse_mount *fm = req->fm; in fuse_request_end()
444 if (test_and_set_bit(FR_FINISHED, &req->flags)) in fuse_request_end()
447 trace_fuse_request_end(req); in fuse_request_end()
453 if (test_bit(FR_INTERRUPTED, &req->flags)) { in fuse_request_end()
455 list_del_init(&req->intr_entry); in fuse_request_end()
458 WARN_ON(test_bit(FR_PENDING, &req->flags)); in fuse_request_end()
459 WARN_ON(test_bit(FR_SENT, &req->flags)); in fuse_request_end()
460 if (test_bit(FR_BACKGROUND, &req->flags)) { in fuse_request_end()
462 clear_bit(FR_BACKGROUND, &req->flags); in fuse_request_end()
483 wake_up(&req->waitq); in fuse_request_end()
486 if (test_bit(FR_ASYNC, &req->flags)) in fuse_request_end()
487 req->args->end(fm, req->args, req->out.h.error); in fuse_request_end()
489 fuse_put_request(req); in fuse_request_end()
493 static int queue_interrupt(struct fuse_req *req) in queue_interrupt() argument
495 struct fuse_iqueue *fiq = &req->fm->fc->iq; in queue_interrupt()
498 if (unlikely(!test_bit(FR_INTERRUPTED, &req->flags))) in queue_interrupt()
501 fiq->ops->send_interrupt(fiq, req); in queue_interrupt()
506 bool fuse_remove_pending_req(struct fuse_req *req, spinlock_t *lock) in fuse_remove_pending_req() argument
509 if (test_bit(FR_PENDING, &req->flags)) { in fuse_remove_pending_req()
514 list_del(&req->list); in fuse_remove_pending_req()
516 __fuse_put_request(req); in fuse_remove_pending_req()
517 req->out.h.error = -EINTR; in fuse_remove_pending_req()
524 static void request_wait_answer(struct fuse_req *req) in request_wait_answer() argument
526 struct fuse_conn *fc = req->fm->fc; in request_wait_answer()
532 err = wait_event_interruptible(req->waitq, in request_wait_answer()
533 test_bit(FR_FINISHED, &req->flags)); in request_wait_answer()
537 set_bit(FR_INTERRUPTED, &req->flags); in request_wait_answer()
540 if (test_bit(FR_SENT, &req->flags)) in request_wait_answer()
541 queue_interrupt(req); in request_wait_answer()
544 if (!test_bit(FR_FORCE, &req->flags)) { in request_wait_answer()
548 err = wait_event_killable(req->waitq, in request_wait_answer()
549 test_bit(FR_FINISHED, &req->flags)); in request_wait_answer()
553 if (test_bit(FR_URING, &req->flags)) in request_wait_answer()
554 removed = fuse_uring_remove_pending_req(req); in request_wait_answer()
556 removed = fuse_remove_pending_req(req, &fiq->lock); in request_wait_answer()
565 wait_event(req->waitq, test_bit(FR_FINISHED, &req->flags)); in request_wait_answer()
568 static void __fuse_request_send(struct fuse_req *req) in __fuse_request_send() argument
570 struct fuse_iqueue *fiq = &req->fm->fc->iq; in __fuse_request_send()
572 BUG_ON(test_bit(FR_BACKGROUND, &req->flags)); in __fuse_request_send()
576 __fuse_get_request(req); in __fuse_request_send()
577 fuse_send_one(fiq, req); in __fuse_request_send()
579 request_wait_answer(req); in __fuse_request_send()
617 static void fuse_force_creds(struct fuse_req *req) in fuse_force_creds() argument
619 struct fuse_conn *fc = req->fm->fc; in fuse_force_creds()
621 if (!req->fm->sb || req->fm->sb->s_iflags & SB_I_NOIDMAP) { in fuse_force_creds()
622 req->in.h.uid = from_kuid_munged(fc->user_ns, current_fsuid()); in fuse_force_creds()
623 req->in.h.gid = from_kgid_munged(fc->user_ns, current_fsgid()); in fuse_force_creds()
625 req->in.h.uid = FUSE_INVALID_UIDGID; in fuse_force_creds()
626 req->in.h.gid = FUSE_INVALID_UIDGID; in fuse_force_creds()
629 req->in.h.pid = pid_nr_ns(task_pid(current), fc->pid_ns); in fuse_force_creds()
632 static void fuse_args_to_req(struct fuse_req *req, struct fuse_args *args) in fuse_args_to_req() argument
634 req->in.h.opcode = args->opcode; in fuse_args_to_req()
635 req->in.h.nodeid = args->nodeid; in fuse_args_to_req()
636 req->args = args; in fuse_args_to_req()
638 req->in.h.total_extlen = args->in_args[args->ext_idx].size / 8; in fuse_args_to_req()
640 __set_bit(FR_ASYNC, &req->flags); in fuse_args_to_req()
648 struct fuse_req *req; in __fuse_simple_request() local
653 req = fuse_request_alloc(fm, GFP_KERNEL | __GFP_NOFAIL); in __fuse_simple_request()
656 fuse_force_creds(req); in __fuse_simple_request()
658 __set_bit(FR_WAITING, &req->flags); in __fuse_simple_request()
659 __set_bit(FR_FORCE, &req->flags); in __fuse_simple_request()
662 req = fuse_get_req(idmap, fm, false); in __fuse_simple_request()
663 if (IS_ERR(req)) in __fuse_simple_request()
664 return PTR_ERR(req); in __fuse_simple_request()
669 fuse_args_to_req(req, args); in __fuse_simple_request()
672 __set_bit(FR_ISREPLY, &req->flags); in __fuse_simple_request()
673 __fuse_request_send(req); in __fuse_simple_request()
674 ret = req->out.h.error; in __fuse_simple_request()
679 fuse_put_request(req); in __fuse_simple_request()
686 struct fuse_req *req) in fuse_request_queue_background_uring() argument
690 req->in.h.unique = fuse_get_unique(fiq); in fuse_request_queue_background_uring()
691 req->in.h.len = sizeof(struct fuse_in_header) + in fuse_request_queue_background_uring()
692 fuse_len_args(req->args->in_numargs, in fuse_request_queue_background_uring()
693 (struct fuse_arg *) req->args->in_args); in fuse_request_queue_background_uring()
695 return fuse_uring_queue_bq_req(req); in fuse_request_queue_background_uring()
702 static int fuse_request_queue_background(struct fuse_req *req) in fuse_request_queue_background() argument
704 struct fuse_mount *fm = req->fm; in fuse_request_queue_background()
708 WARN_ON(!test_bit(FR_BACKGROUND, &req->flags)); in fuse_request_queue_background()
709 if (!test_bit(FR_WAITING, &req->flags)) { in fuse_request_queue_background()
710 __set_bit(FR_WAITING, &req->flags); in fuse_request_queue_background()
713 __set_bit(FR_ISREPLY, &req->flags); in fuse_request_queue_background()
717 return fuse_request_queue_background_uring(fc, req); in fuse_request_queue_background()
725 list_add_tail(&req->list, &fc->bg_queue); in fuse_request_queue_background()
737 struct fuse_req *req; in fuse_simple_background() local
741 req = fuse_request_alloc(fm, gfp_flags); in fuse_simple_background()
742 if (!req) in fuse_simple_background()
744 __set_bit(FR_BACKGROUND, &req->flags); in fuse_simple_background()
747 req = fuse_get_req(&invalid_mnt_idmap, fm, true); in fuse_simple_background()
748 if (IS_ERR(req)) in fuse_simple_background()
749 return PTR_ERR(req); in fuse_simple_background()
752 fuse_args_to_req(req, args); in fuse_simple_background()
754 if (!fuse_request_queue_background(req)) { in fuse_simple_background()
755 fuse_put_request(req); in fuse_simple_background()
766 struct fuse_req *req; in fuse_simple_notify_reply() local
769 req = fuse_get_req(&invalid_mnt_idmap, fm, false); in fuse_simple_notify_reply()
770 if (IS_ERR(req)) in fuse_simple_notify_reply()
771 return PTR_ERR(req); in fuse_simple_notify_reply()
773 __clear_bit(FR_ISREPLY, &req->flags); in fuse_simple_notify_reply()
774 req->in.h.unique = unique; in fuse_simple_notify_reply()
776 fuse_args_to_req(req, args); in fuse_simple_notify_reply()
778 fuse_send_one(fiq, req); in fuse_simple_notify_reply()
788 static int lock_request(struct fuse_req *req) in lock_request() argument
791 if (req) { in lock_request()
792 spin_lock(&req->waitq.lock); in lock_request()
793 if (test_bit(FR_ABORTED, &req->flags)) in lock_request()
796 set_bit(FR_LOCKED, &req->flags); in lock_request()
797 spin_unlock(&req->waitq.lock); in lock_request()
806 static int unlock_request(struct fuse_req *req) in unlock_request() argument
809 if (req) { in unlock_request()
810 spin_lock(&req->waitq.lock); in unlock_request()
811 if (test_bit(FR_ABORTED, &req->flags)) in unlock_request()
814 clear_bit(FR_LOCKED, &req->flags); in unlock_request()
815 spin_unlock(&req->waitq.lock); in unlock_request()
856 err = unlock_request(cs->req); in fuse_copy_fill()
906 return lock_request(cs->req); in fuse_copy_fill()
967 err = unlock_request(cs->req); in fuse_try_move_folio()
1025 spin_lock(&cs->req->waitq.lock); in fuse_try_move_folio()
1026 if (test_bit(FR_ABORTED, &cs->req->flags)) in fuse_try_move_folio()
1030 spin_unlock(&cs->req->waitq.lock); in fuse_try_move_folio()
1055 err = lock_request(cs->req); in fuse_try_move_folio()
1072 err = unlock_request(cs->req); in fuse_ref_folio()
1115 if (cs->req->args->user_pages) { in fuse_copy_folio()
1160 struct fuse_req *req = cs->req; in fuse_copy_folios() local
1161 struct fuse_args_pages *ap = container_of(req->args, typeof(*ap), args); in fuse_copy_folios()
1230 size_t nbytes, struct fuse_req *req) in fuse_read_interrupt() argument
1238 list_del_init(&req->intr_entry); in fuse_read_interrupt()
1243 ih.unique = (req->in.h.unique | FUSE_INT_REQ_BIT); in fuse_read_interrupt()
1244 arg.unique = req->in.h.unique; in fuse_read_interrupt()
1389 struct fuse_req *req; in fuse_dev_do_read() local
1433 req = list_entry(fiq->interrupts.next, struct fuse_req, in fuse_dev_do_read()
1435 return fuse_read_interrupt(fiq, cs, nbytes, req); in fuse_dev_do_read()
1446 req = list_entry(fiq->pending.next, struct fuse_req, list); in fuse_dev_do_read()
1447 clear_bit(FR_PENDING, &req->flags); in fuse_dev_do_read()
1448 list_del_init(&req->list); in fuse_dev_do_read()
1451 args = req->args; in fuse_dev_do_read()
1452 reqsize = req->in.h.len; in fuse_dev_do_read()
1456 req->out.h.error = -EIO; in fuse_dev_do_read()
1459 req->out.h.error = -E2BIG; in fuse_dev_do_read()
1460 fuse_request_end(req); in fuse_dev_do_read()
1469 req->out.h.error = err = -ECONNABORTED; in fuse_dev_do_read()
1473 list_add(&req->list, &fpq->io); in fuse_dev_do_read()
1475 cs->req = req; in fuse_dev_do_read()
1476 err = fuse_copy_one(cs, &req->in.h, sizeof(req->in.h)); in fuse_dev_do_read()
1482 clear_bit(FR_LOCKED, &req->flags); in fuse_dev_do_read()
1488 req->out.h.error = -EIO; in fuse_dev_do_read()
1491 if (!test_bit(FR_ISREPLY, &req->flags)) { in fuse_dev_do_read()
1495 hash = fuse_req_hash(req->in.h.unique); in fuse_dev_do_read()
1496 list_move_tail(&req->list, &fpq->processing[hash]); in fuse_dev_do_read()
1497 __fuse_get_request(req); in fuse_dev_do_read()
1498 set_bit(FR_SENT, &req->flags); in fuse_dev_do_read()
1502 if (test_bit(FR_INTERRUPTED, &req->flags)) in fuse_dev_do_read()
1503 queue_interrupt(req); in fuse_dev_do_read()
1504 fuse_put_request(req); in fuse_dev_do_read()
1509 if (!test_bit(FR_PRIVATE, &req->flags)) in fuse_dev_do_read()
1510 list_del_init(&req->list); in fuse_dev_do_read()
1512 fuse_request_end(req); in fuse_dev_do_read()
1987 struct fuse_req *req, *next; in fuse_resend() local
2008 list_for_each_entry_safe(req, next, &to_queue, list) { in fuse_resend()
2009 set_bit(FR_PENDING, &req->flags); in fuse_resend()
2010 clear_bit(FR_SENT, &req->flags); in fuse_resend()
2012 req->in.h.unique |= FUSE_UNIQUE_RESEND; in fuse_resend()
2018 list_for_each_entry(req, &to_queue, list) in fuse_resend()
2019 clear_bit(FR_PENDING, &req->flags); in fuse_resend()
2088 struct fuse_req *req; in fuse_request_find() local
2090 list_for_each_entry(req, &fpq->processing[hash], list) { in fuse_request_find()
2091 if (req->in.h.unique == unique) in fuse_request_find()
2092 return req; in fuse_request_find()
2138 struct fuse_req *req; in fuse_dev_do_write() local
2167 req = NULL; in fuse_dev_do_write()
2169 req = fuse_request_find(fpq, oh.unique & ~FUSE_INT_REQ_BIT); in fuse_dev_do_write()
2172 if (!req) { in fuse_dev_do_write()
2179 __fuse_get_request(req); in fuse_dev_do_write()
2188 err = queue_interrupt(req); in fuse_dev_do_write()
2190 fuse_put_request(req); in fuse_dev_do_write()
2195 clear_bit(FR_SENT, &req->flags); in fuse_dev_do_write()
2196 list_move(&req->list, &fpq->io); in fuse_dev_do_write()
2197 req->out.h = oh; in fuse_dev_do_write()
2198 set_bit(FR_LOCKED, &req->flags); in fuse_dev_do_write()
2200 cs->req = req; in fuse_dev_do_write()
2201 if (!req->args->page_replace) in fuse_dev_do_write()
2207 err = fuse_copy_out_args(cs, req->args, nbytes); in fuse_dev_do_write()
2211 clear_bit(FR_LOCKED, &req->flags); in fuse_dev_do_write()
2215 req->out.h.error = -EIO; in fuse_dev_do_write()
2216 if (!test_bit(FR_PRIVATE, &req->flags)) in fuse_dev_do_write()
2217 list_del_init(&req->list); in fuse_dev_do_write()
2220 fuse_request_end(req); in fuse_dev_do_write()
2364 struct fuse_req *req; in fuse_dev_end_requests() local
2365 req = list_entry(head->next, struct fuse_req, list); in fuse_dev_end_requests()
2366 req->out.h.error = -ECONNABORTED; in fuse_dev_end_requests()
2367 clear_bit(FR_SENT, &req->flags); in fuse_dev_end_requests()
2368 list_del_init(&req->list); in fuse_dev_end_requests()
2369 fuse_request_end(req); in fuse_dev_end_requests()
2413 struct fuse_req *req, *next; in fuse_abort_conn() local
2431 list_for_each_entry_safe(req, next, &fpq->io, list) { in fuse_abort_conn()
2432 req->out.h.error = -ECONNABORTED; in fuse_abort_conn()
2433 spin_lock(&req->waitq.lock); in fuse_abort_conn()
2434 set_bit(FR_ABORTED, &req->flags); in fuse_abort_conn()
2435 if (!test_bit(FR_LOCKED, &req->flags)) { in fuse_abort_conn()
2436 set_bit(FR_PRIVATE, &req->flags); in fuse_abort_conn()
2437 __fuse_get_request(req); in fuse_abort_conn()
2438 list_move(&req->list, &to_end); in fuse_abort_conn()
2440 spin_unlock(&req->waitq.lock); in fuse_abort_conn()
2455 list_for_each_entry(req, &fiq->pending, list) in fuse_abort_conn()
2456 clear_bit(FR_PENDING, &req->flags); in fuse_abort_conn()