Lines Matching refs:queue
50 static void fuse_uring_flush_bg(struct fuse_ring_queue *queue) in fuse_uring_flush_bg() argument
52 struct fuse_ring *ring = queue->ring; in fuse_uring_flush_bg()
55 lockdep_assert_held(&queue->lock); in fuse_uring_flush_bg()
65 !queue->active_background) && in fuse_uring_flush_bg()
66 (!list_empty(&queue->fuse_req_bg_queue))) { in fuse_uring_flush_bg()
69 req = list_first_entry(&queue->fuse_req_bg_queue, in fuse_uring_flush_bg()
72 queue->active_background++; in fuse_uring_flush_bg()
74 list_move_tail(&req->list, &queue->fuse_req_queue); in fuse_uring_flush_bg()
81 struct fuse_ring_queue *queue = ent->queue; in fuse_uring_req_end() local
82 struct fuse_ring *ring = queue->ring; in fuse_uring_req_end()
85 lockdep_assert_not_held(&queue->lock); in fuse_uring_req_end()
86 spin_lock(&queue->lock); in fuse_uring_req_end()
89 queue->active_background--; in fuse_uring_req_end()
91 fuse_uring_flush_bg(queue); in fuse_uring_req_end()
95 spin_unlock(&queue->lock); in fuse_uring_req_end()
105 static void fuse_uring_abort_end_queue_requests(struct fuse_ring_queue *queue) in fuse_uring_abort_end_queue_requests() argument
110 spin_lock(&queue->lock); in fuse_uring_abort_end_queue_requests()
111 list_for_each_entry(req, &queue->fuse_req_queue, list) in fuse_uring_abort_end_queue_requests()
113 list_splice_init(&queue->fuse_req_queue, &req_list); in fuse_uring_abort_end_queue_requests()
114 spin_unlock(&queue->lock); in fuse_uring_abort_end_queue_requests()
123 struct fuse_ring_queue *queue; in fuse_uring_abort_end_requests() local
127 queue = READ_ONCE(ring->queues[qid]); in fuse_uring_abort_end_requests()
128 if (!queue) in fuse_uring_abort_end_requests()
131 queue->stopped = true; in fuse_uring_abort_end_requests()
134 spin_lock(&queue->lock); in fuse_uring_abort_end_requests()
136 fuse_uring_flush_bg(queue); in fuse_uring_abort_end_requests()
138 spin_unlock(&queue->lock); in fuse_uring_abort_end_requests()
139 fuse_uring_abort_end_queue_requests(queue); in fuse_uring_abort_end_requests()
161 struct fuse_ring_queue *queue; in fuse_uring_request_expired() local
168 queue = READ_ONCE(ring->queues[qid]); in fuse_uring_request_expired()
169 if (!queue) in fuse_uring_request_expired()
172 spin_lock(&queue->lock); in fuse_uring_request_expired()
173 if (fuse_request_expired(fc, &queue->fuse_req_queue) || in fuse_uring_request_expired()
174 fuse_request_expired(fc, &queue->fuse_req_bg_queue) || in fuse_uring_request_expired()
175 ent_list_request_expired(fc, &queue->ent_w_req_queue) || in fuse_uring_request_expired()
176 ent_list_request_expired(fc, &queue->ent_in_userspace)) { in fuse_uring_request_expired()
177 spin_unlock(&queue->lock); in fuse_uring_request_expired()
180 spin_unlock(&queue->lock); in fuse_uring_request_expired()
195 struct fuse_ring_queue *queue = ring->queues[qid]; in fuse_uring_destruct() local
198 if (!queue) in fuse_uring_destruct()
201 WARN_ON(!list_empty(&queue->ent_avail_queue)); in fuse_uring_destruct()
202 WARN_ON(!list_empty(&queue->ent_w_req_queue)); in fuse_uring_destruct()
203 WARN_ON(!list_empty(&queue->ent_commit_queue)); in fuse_uring_destruct()
204 WARN_ON(!list_empty(&queue->ent_in_userspace)); in fuse_uring_destruct()
206 list_for_each_entry_safe(ent, next, &queue->ent_released, in fuse_uring_destruct()
212 kfree(queue->fpq.processing); in fuse_uring_destruct()
213 kfree(queue); in fuse_uring_destruct()
272 struct fuse_ring_queue *queue; in fuse_uring_create_queue() local
275 queue = kzalloc(sizeof(*queue), GFP_KERNEL_ACCOUNT); in fuse_uring_create_queue()
276 if (!queue) in fuse_uring_create_queue()
280 kfree(queue); in fuse_uring_create_queue()
284 queue->qid = qid; in fuse_uring_create_queue()
285 queue->ring = ring; in fuse_uring_create_queue()
286 spin_lock_init(&queue->lock); in fuse_uring_create_queue()
288 INIT_LIST_HEAD(&queue->ent_avail_queue); in fuse_uring_create_queue()
289 INIT_LIST_HEAD(&queue->ent_commit_queue); in fuse_uring_create_queue()
290 INIT_LIST_HEAD(&queue->ent_w_req_queue); in fuse_uring_create_queue()
291 INIT_LIST_HEAD(&queue->ent_in_userspace); in fuse_uring_create_queue()
292 INIT_LIST_HEAD(&queue->fuse_req_queue); in fuse_uring_create_queue()
293 INIT_LIST_HEAD(&queue->fuse_req_bg_queue); in fuse_uring_create_queue()
294 INIT_LIST_HEAD(&queue->ent_released); in fuse_uring_create_queue()
296 queue->fpq.processing = pq; in fuse_uring_create_queue()
297 fuse_pqueue_init(&queue->fpq); in fuse_uring_create_queue()
302 kfree(queue->fpq.processing); in fuse_uring_create_queue()
303 kfree(queue); in fuse_uring_create_queue()
310 WRITE_ONCE(ring->queues[qid], queue); in fuse_uring_create_queue()
313 return queue; in fuse_uring_create_queue()
331 struct fuse_ring_queue *queue = ent->queue; in fuse_uring_entry_teardown() local
333 spin_lock(&queue->lock); in fuse_uring_entry_teardown()
349 list_move(&ent->list, &queue->ent_released); in fuse_uring_entry_teardown()
351 spin_unlock(&queue->lock); in fuse_uring_entry_teardown()
361 struct fuse_ring_queue *queue, in fuse_uring_stop_list_entries() argument
364 struct fuse_ring *ring = queue->ring; in fuse_uring_stop_list_entries()
369 spin_lock(&queue->lock); in fuse_uring_stop_list_entries()
373 queue->qid, ent->state, exp_state); in fuse_uring_stop_list_entries()
380 spin_unlock(&queue->lock); in fuse_uring_stop_list_entries()
390 static void fuse_uring_teardown_entries(struct fuse_ring_queue *queue) in fuse_uring_teardown_entries() argument
392 fuse_uring_stop_list_entries(&queue->ent_in_userspace, queue, in fuse_uring_teardown_entries()
394 fuse_uring_stop_list_entries(&queue->ent_avail_queue, queue, in fuse_uring_teardown_entries()
407 struct fuse_ring_queue *queue = ring->queues[qid]; in fuse_uring_log_ent_state() local
409 if (!queue) in fuse_uring_log_ent_state()
412 spin_lock(&queue->lock); in fuse_uring_log_ent_state()
417 list_for_each_entry(ent, &queue->ent_w_req_queue, list) { in fuse_uring_log_ent_state()
421 list_for_each_entry(ent, &queue->ent_commit_queue, list) { in fuse_uring_log_ent_state()
425 spin_unlock(&queue->lock); in fuse_uring_log_ent_state()
438 struct fuse_ring_queue *queue = READ_ONCE(ring->queues[qid]); in fuse_uring_async_stop_queues() local
440 if (!queue) in fuse_uring_async_stop_queues()
443 fuse_uring_teardown_entries(queue); in fuse_uring_async_stop_queues()
473 struct fuse_ring_queue *queue = READ_ONCE(ring->queues[qid]); in fuse_uring_stop_queues() local
475 if (!queue) in fuse_uring_stop_queues()
478 fuse_uring_teardown_entries(queue); in fuse_uring_stop_queues()
502 struct fuse_ring_queue *queue; in fuse_uring_cancel() local
509 queue = ent->queue; in fuse_uring_cancel()
510 spin_lock(&queue->lock); in fuse_uring_cancel()
513 list_move_tail(&ent->list, &queue->ent_in_userspace); in fuse_uring_cancel()
517 spin_unlock(&queue->lock); in fuse_uring_cancel()
665 struct fuse_ring_queue *queue = ent->queue; in fuse_uring_copy_to_ring() local
666 struct fuse_ring *ring = queue->ring; in fuse_uring_copy_to_ring()
672 queue->qid, ent, ent->state); in fuse_uring_copy_to_ring()
721 struct fuse_ring_queue *queue = ent->queue; in fuse_uring_send_next_to_ring() local
729 spin_lock(&queue->lock); in fuse_uring_send_next_to_ring()
733 list_move_tail(&ent->list, &queue->ent_in_userspace); in fuse_uring_send_next_to_ring()
734 spin_unlock(&queue->lock); in fuse_uring_send_next_to_ring()
744 struct fuse_ring_queue *queue) in fuse_uring_ent_avail() argument
747 list_move(&ent->list, &queue->ent_avail_queue); in fuse_uring_ent_avail()
755 struct fuse_ring_queue *queue = ent->queue; in fuse_uring_add_to_pq() local
756 struct fuse_pqueue *fpq = &queue->fpq; in fuse_uring_add_to_pq()
770 struct fuse_ring_queue *queue = ent->queue; in fuse_uring_add_req_to_ring_ent() local
772 lockdep_assert_held(&queue->lock); in fuse_uring_add_req_to_ring_ent()
776 pr_warn("%s qid=%d state=%d\n", __func__, ent->queue->qid, in fuse_uring_add_req_to_ring_ent()
783 list_move_tail(&ent->list, &queue->ent_w_req_queue); in fuse_uring_add_req_to_ring_ent()
789 __must_hold(&queue->lock) in fuse_uring_ent_assign_req()
792 struct fuse_ring_queue *queue = ent->queue; in fuse_uring_ent_assign_req() local
793 struct list_head *req_queue = &queue->fuse_req_queue; in fuse_uring_ent_assign_req()
795 lockdep_assert_held(&queue->lock); in fuse_uring_ent_assign_req()
813 struct fuse_ring *ring = ent->queue->ring; in fuse_uring_commit()
839 struct fuse_ring_queue *queue, in fuse_uring_next_fuse_req() argument
846 spin_lock(&queue->lock); in fuse_uring_next_fuse_req()
847 fuse_uring_ent_avail(ent, queue); in fuse_uring_next_fuse_req()
849 spin_unlock(&queue->lock); in fuse_uring_next_fuse_req()
860 struct fuse_ring_queue *queue = ent->queue; in fuse_ring_ent_set_commit() local
862 lockdep_assert_held(&queue->lock); in fuse_ring_ent_set_commit()
868 list_move(&ent->list, &queue->ent_commit_queue); in fuse_ring_ent_set_commit()
881 struct fuse_ring_queue *queue; in fuse_uring_commit_fetch() local
894 queue = ring->queues[qid]; in fuse_uring_commit_fetch()
895 if (!queue) in fuse_uring_commit_fetch()
897 fpq = &queue->fpq; in fuse_uring_commit_fetch()
899 if (!READ_ONCE(fc->connected) || READ_ONCE(queue->stopped)) in fuse_uring_commit_fetch()
902 spin_lock(&queue->lock); in fuse_uring_commit_fetch()
911 pr_info("qid=%d commit_id %llu not found\n", queue->qid, in fuse_uring_commit_fetch()
913 spin_unlock(&queue->lock); in fuse_uring_commit_fetch()
923 queue->qid, commit_id, ent->state); in fuse_uring_commit_fetch()
924 spin_unlock(&queue->lock); in fuse_uring_commit_fetch()
932 spin_unlock(&queue->lock); in fuse_uring_commit_fetch()
944 fuse_uring_next_fuse_req(ent, queue, issue_flags); in fuse_uring_commit_fetch()
951 struct fuse_ring_queue *queue; in is_ring_ready() local
958 queue = ring->queues[qid]; in is_ring_ready()
959 if (!queue) { in is_ring_ready()
964 spin_lock(&queue->lock); in is_ring_ready()
965 if (list_empty(&queue->ent_avail_queue)) in is_ring_ready()
967 spin_unlock(&queue->lock); in is_ring_ready()
980 struct fuse_ring_queue *queue = ent->queue; in fuse_uring_do_register() local
981 struct fuse_ring *ring = queue->ring; in fuse_uring_do_register()
987 spin_lock(&queue->lock); in fuse_uring_do_register()
989 fuse_uring_ent_avail(ent, queue); in fuse_uring_do_register()
990 spin_unlock(&queue->lock); in fuse_uring_do_register()
993 bool ready = is_ring_ready(ring, queue->qid); in fuse_uring_do_register()
1031 struct fuse_ring_queue *queue) in fuse_uring_create_ring_ent() argument
1033 struct fuse_ring *ring = queue->ring; in fuse_uring_create_ring_ent()
1066 ent->queue = queue; in fuse_uring_create_ring_ent()
1083 struct fuse_ring_queue *queue; in fuse_uring_register() local
1100 queue = ring->queues[qid]; in fuse_uring_register()
1101 if (!queue) { in fuse_uring_register()
1102 queue = fuse_uring_create_queue(ring, qid); in fuse_uring_register()
1103 if (!queue) in fuse_uring_register()
1112 ent = fuse_uring_create_ring_ent(cmd, queue); in fuse_uring_register()
1195 struct fuse_ring_queue *queue = ent->queue; in fuse_uring_send() local
1197 spin_lock(&queue->lock); in fuse_uring_send()
1199 list_move_tail(&ent->list, &queue->ent_in_userspace); in fuse_uring_send()
1201 spin_unlock(&queue->lock); in fuse_uring_send()
1215 struct fuse_ring_queue *queue = ent->queue; in fuse_uring_send_in_task() local
1221 fuse_uring_next_fuse_req(ent, queue, issue_flags); in fuse_uring_send_in_task()
1234 struct fuse_ring_queue *queue; in fuse_uring_task_to_queue() local
1243 queue = ring->queues[qid]; in fuse_uring_task_to_queue()
1244 WARN_ONCE(!queue, "Missing queue for qid %d\n", qid); in fuse_uring_task_to_queue()
1246 return queue; in fuse_uring_task_to_queue()
1262 struct fuse_ring_queue *queue; in fuse_uring_queue_fuse_req() local
1267 queue = fuse_uring_task_to_queue(ring); in fuse_uring_queue_fuse_req()
1268 if (!queue) in fuse_uring_queue_fuse_req()
1274 spin_lock(&queue->lock); in fuse_uring_queue_fuse_req()
1276 if (unlikely(queue->stopped)) in fuse_uring_queue_fuse_req()
1280 req->ring_queue = queue; in fuse_uring_queue_fuse_req()
1281 ent = list_first_entry_or_null(&queue->ent_avail_queue, in fuse_uring_queue_fuse_req()
1286 list_add_tail(&req->list, &queue->fuse_req_queue); in fuse_uring_queue_fuse_req()
1287 spin_unlock(&queue->lock); in fuse_uring_queue_fuse_req()
1295 spin_unlock(&queue->lock); in fuse_uring_queue_fuse_req()
1306 struct fuse_ring_queue *queue; in fuse_uring_queue_bq_req() local
1309 queue = fuse_uring_task_to_queue(ring); in fuse_uring_queue_bq_req()
1310 if (!queue) in fuse_uring_queue_bq_req()
1313 spin_lock(&queue->lock); in fuse_uring_queue_bq_req()
1314 if (unlikely(queue->stopped)) { in fuse_uring_queue_bq_req()
1315 spin_unlock(&queue->lock); in fuse_uring_queue_bq_req()
1320 req->ring_queue = queue; in fuse_uring_queue_bq_req()
1321 list_add_tail(&req->list, &queue->fuse_req_bg_queue); in fuse_uring_queue_bq_req()
1323 ent = list_first_entry_or_null(&queue->ent_avail_queue, in fuse_uring_queue_bq_req()
1329 fuse_uring_flush_bg(queue); in fuse_uring_queue_bq_req()
1337 req = list_first_entry_or_null(&queue->fuse_req_queue, struct fuse_req, in fuse_uring_queue_bq_req()
1341 spin_unlock(&queue->lock); in fuse_uring_queue_bq_req()
1345 spin_unlock(&queue->lock); in fuse_uring_queue_bq_req()
1353 struct fuse_ring_queue *queue = req->ring_queue; in fuse_uring_remove_pending_req() local
1355 return fuse_remove_pending_req(req, &queue->lock); in fuse_uring_remove_pending_req()