io_uring: dedup CQE flushing non-empty checks

We don't do io_submit_flush_completions() when there is no requests
enqueued, and every single caller checks for it. Hide that check into
the function not forgetting about inlining. That will make it much
easier for changing the empty check condition in the future.

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/d7ff8cef5da1b38e8ea648f5aad9a315ddfc7b57.1631115443.git.asml.silence@gmail.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Pavel Begunkov 2021-09-08 16:40:52 +01:00 committed by Jens Axboe
parent d81499bfcd
commit c450178d9b

View File

@ -1092,7 +1092,7 @@ static void __io_queue_sqe(struct io_kiocb *req);
static void io_rsrc_put_work(struct work_struct *work);
static void io_req_task_queue(struct io_kiocb *req);
static void io_submit_flush_completions(struct io_ring_ctx *ctx);
static void __io_submit_flush_completions(struct io_ring_ctx *ctx);
static int io_req_prep_async(struct io_kiocb *req);
static int io_install_fixed_file(struct io_kiocb *req, struct file *file,
@ -1164,6 +1164,12 @@ static inline void req_ref_get(struct io_kiocb *req)
atomic_inc(&req->refs);
}
static inline void io_submit_flush_completions(struct io_ring_ctx *ctx)
{
if (ctx->submit_state.compl_nr)
__io_submit_flush_completions(ctx);
}
static inline void __io_req_set_refcount(struct io_kiocb *req, int nr)
{
if (!(req->flags & REQ_F_REFCOUNT)) {
@ -1252,8 +1258,7 @@ static void io_fallback_req_func(struct work_struct *work)
req->io_task_work.func(req, &locked);
if (locked) {
if (ctx->submit_state.compl_nr)
io_submit_flush_completions(ctx);
io_submit_flush_completions(ctx);
mutex_unlock(&ctx->uring_lock);
}
percpu_ref_put(&ctx->refs);
@ -2112,8 +2117,7 @@ static void ctx_flush_and_put(struct io_ring_ctx *ctx, bool *locked)
if (!ctx)
return;
if (*locked) {
if (ctx->submit_state.compl_nr)
io_submit_flush_completions(ctx);
io_submit_flush_completions(ctx);
mutex_unlock(&ctx->uring_lock);
*locked = false;
}
@ -2130,7 +2134,7 @@ static void tctx_task_work(struct callback_head *cb)
while (1) {
struct io_wq_work_node *node;
if (!tctx->task_list.first && locked && ctx->submit_state.compl_nr)
if (!tctx->task_list.first && locked)
io_submit_flush_completions(ctx);
spin_lock_irq(&tctx->task_lock);
@ -2315,7 +2319,7 @@ static void io_req_free_batch(struct req_batch *rb, struct io_kiocb *req,
list_add(&req->inflight_entry, &state->free_list);
}
static void io_submit_flush_completions(struct io_ring_ctx *ctx)
static void __io_submit_flush_completions(struct io_ring_ctx *ctx)
__must_hold(&ctx->uring_lock)
{
struct io_submit_state *state = &ctx->submit_state;
@ -7195,8 +7199,7 @@ static void io_submit_state_end(struct io_submit_state *state,
{
if (state->link.head)
io_queue_sqe(state->link.head);
if (state->compl_nr)
io_submit_flush_completions(ctx);
io_submit_flush_completions(ctx);
if (state->plug_started)
blk_finish_plug(&state->plug);
}