io_uring: refactor __io_get_cqe()
Make __io_get_cqe simpler by not grabbing the cqe from refilled cached, but letting io_get_cqe() do it for us. That's cleaner and removes some duplication. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Link: https://lore.kernel.org/r/74dc8fdf2657e438b2e05e1d478a3596924604e9.1692916914.git.asml.silence@gmail.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
b24c5d7529
commit
20d6b63387
@ -818,7 +818,7 @@ void io_req_cqe_overflow(struct io_kiocb *req)
|
||||
* control dependency is enough as we're using WRITE_ONCE to
|
||||
* fill the cq entry
|
||||
*/
|
||||
struct io_uring_cqe *__io_get_cqe(struct io_ring_ctx *ctx, bool overflow)
|
||||
bool io_cqe_cache_refill(struct io_ring_ctx *ctx, bool overflow)
|
||||
{
|
||||
struct io_rings *rings = ctx->rings;
|
||||
unsigned int off = ctx->cached_cq_tail & (ctx->cq_entries - 1);
|
||||
@ -830,7 +830,7 @@ struct io_uring_cqe *__io_get_cqe(struct io_ring_ctx *ctx, bool overflow)
|
||||
* Force overflow the completion.
|
||||
*/
|
||||
if (!overflow && (ctx->check_cq & BIT(IO_CHECK_CQ_OVERFLOW_BIT)))
|
||||
return NULL;
|
||||
return false;
|
||||
|
||||
/* userspace may cheat modifying the tail, be safe and do min */
|
||||
queued = min(__io_cqring_events(ctx), ctx->cq_entries);
|
||||
@ -838,7 +838,7 @@ struct io_uring_cqe *__io_get_cqe(struct io_ring_ctx *ctx, bool overflow)
|
||||
/* we need a contiguous range, limit based on the current array offset */
|
||||
len = min(free, ctx->cq_entries - off);
|
||||
if (!len)
|
||||
return NULL;
|
||||
return false;
|
||||
|
||||
if (ctx->flags & IORING_SETUP_CQE32) {
|
||||
off <<= 1;
|
||||
@ -847,12 +847,7 @@ struct io_uring_cqe *__io_get_cqe(struct io_ring_ctx *ctx, bool overflow)
|
||||
|
||||
ctx->cqe_cached = &rings->cqes[off];
|
||||
ctx->cqe_sentinel = ctx->cqe_cached + len;
|
||||
|
||||
ctx->cached_cq_tail++;
|
||||
ctx->cqe_cached++;
|
||||
if (ctx->flags & IORING_SETUP_CQE32)
|
||||
ctx->cqe_cached++;
|
||||
return &rings->cqes[off];
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool io_fill_cqe_aux(struct io_ring_ctx *ctx, u64 user_data, s32 res,
|
||||
|
@ -38,7 +38,7 @@ enum {
|
||||
IOU_STOP_MULTISHOT = -ECANCELED,
|
||||
};
|
||||
|
||||
struct io_uring_cqe *__io_get_cqe(struct io_ring_ctx *ctx, bool overflow);
|
||||
bool io_cqe_cache_refill(struct io_ring_ctx *ctx, bool overflow);
|
||||
void io_req_cqe_overflow(struct io_kiocb *req);
|
||||
int io_run_task_work_sig(struct io_ring_ctx *ctx);
|
||||
void io_req_defer_failed(struct io_kiocb *req, s32 res);
|
||||
@ -112,19 +112,20 @@ static inline void io_req_task_work_add(struct io_kiocb *req)
|
||||
static inline struct io_uring_cqe *io_get_cqe_overflow(struct io_ring_ctx *ctx,
|
||||
bool overflow)
|
||||
{
|
||||
struct io_uring_cqe *cqe;
|
||||
|
||||
io_lockdep_assert_cq_locked(ctx);
|
||||
|
||||
if (likely(ctx->cqe_cached < ctx->cqe_sentinel)) {
|
||||
struct io_uring_cqe *cqe = ctx->cqe_cached;
|
||||
|
||||
ctx->cached_cq_tail++;
|
||||
ctx->cqe_cached++;
|
||||
if (ctx->flags & IORING_SETUP_CQE32)
|
||||
ctx->cqe_cached++;
|
||||
return cqe;
|
||||
if (unlikely(ctx->cqe_cached >= ctx->cqe_sentinel)) {
|
||||
if (unlikely(!io_cqe_cache_refill(ctx, overflow)))
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return __io_get_cqe(ctx, overflow);
|
||||
cqe = ctx->cqe_cached;
|
||||
ctx->cached_cq_tail++;
|
||||
ctx->cqe_cached++;
|
||||
if (ctx->flags & IORING_SETUP_CQE32)
|
||||
ctx->cqe_cached++;
|
||||
return cqe;
|
||||
}
|
||||
|
||||
static inline struct io_uring_cqe *io_get_cqe(struct io_ring_ctx *ctx)
|
||||
|
Loading…
Reference in New Issue
Block a user