Lines Matching refs:rings
184 return ctx->cached_cq_tail - READ_ONCE(ctx->rings->cq.head);
189 return READ_ONCE(ctx->rings->cq.tail) - READ_ONCE(ctx->rings->cq.head);
583 /* IOPOLL rings only need to wake up if it's also SQPOLL */
647 atomic_andnot(IORING_SQ_CQ_OVERFLOW, &ctx->rings->sq_flags);
654 if (ctx->rings)
707 struct io_rings *r = ctx->rings;
720 atomic_or(IORING_SQ_CQ_OVERFLOW, &ctx->rings->sq_flags);
761 struct io_rings *rings = ctx->rings;
786 ctx->cqe_cached = &rings->cqes[off];
1072 atomic_andnot(IORING_SQ_TASKRUN, &ctx->rings->sq_flags);
1245 atomic_or(IORING_SQ_TASKRUN, &ctx->rings->sq_flags);
1270 atomic_or(IORING_SQ_TASKRUN, &ctx->rings->sq_flags);
1316 atomic_or(IORING_SQ_TASKRUN, &ctx->rings->sq_flags);
1351 atomic_andnot(IORING_SQ_TASKRUN, &ctx->rings->sq_flags);
2318 struct io_rings *rings = ctx->rings;
2325 smp_store_release(&rings->sq.head, ctx->cached_sq_head);
2345 WRITE_ONCE(ctx->rings->sq_dropped,
2346 READ_ONCE(ctx->rings->sq_dropped) + 1);
2483 if (iowq->cq_min_tail != READ_ONCE(ctx->rings->cq.tail))
2596 struct io_rings *rings = ctx->rings;
2618 iowq.cq_tail = READ_ONCE(ctx->rings->cq.head) + min_events;
2619 iowq.cq_min_tail = READ_ONCE(ctx->rings->cq.tail);
2655 READ_ONCE(ctx->rings->cq.tail);
2714 return READ_ONCE(rings->cq.head) == READ_ONCE(rings->cq.tail) ? ret : 0;
2721 ctx->rings = NULL;
2728 struct io_rings *rings;
2731 off = struct_size(rings, cqes, cq_entries);
3050 * if we're exiting a ton of rings at the same time. It just adds
3145 if (!ctx->rings)
3152 * Cancels requests of all rings, not only @ctx, but
3548 struct io_rings *rings;
3570 ctx->rings = rings = io_region_get_ptr(&ctx->ring_region);
3573 ctx->sq_array = (u32 *)((char *)rings + sq_array_offset);
3574 rings->sq_ring_mask = p->sq_entries - 1;
3575 rings->cq_ring_mask = p->cq_entries - 1;
3576 rings->sq_ring_entries = p->sq_entries;
3577 rings->cq_ring_entries = p->cq_entries;
3630 /* There is no way to mmap rings without a real fd */
3805 p->sq_off.array = (char *)ctx->sq_array - (char *)ctx->rings;