From 2593553a01c803e01e7c5c2131993885879efbec Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Fri, 19 Mar 2021 17:22:40 +0000 Subject: [PATCH] io_uring: remove __io_req_task_cancel() Both io_req_complete_failed() and __io_req_task_cancel() do the same thing: set failure flag, put both req refs and emit an CQE. The former one is a bit more advance as it puts req back into a req cache, so make it to take over __io_req_task_cancel() and remove the last one. Signed-off-by: Pavel Begunkov Signed-off-by: Jens Axboe --- fs/io_uring.c | 28 +++------------------------- 1 file changed, 3 insertions(+), 25 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index 5eb12f45c6bc..9f9eb853a083 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -1024,7 +1024,6 @@ static bool io_rw_reissue(struct io_kiocb *req); static void io_cqring_fill_event(struct io_kiocb *req, long res); static void io_put_req(struct io_kiocb *req); static void io_put_req_deferred(struct io_kiocb *req, int nr); -static void io_double_put_req(struct io_kiocb *req); static void io_dismantle_req(struct io_kiocb *req); static void io_put_task(struct task_struct *task, int nr); static void io_queue_next(struct io_kiocb *req); @@ -2019,20 +2018,6 @@ static void io_req_task_work_add_fallback(struct io_kiocb *req, io_task_work_add_head(&req->ctx->exit_task_work, &req->task_work); } -static void __io_req_task_cancel(struct io_kiocb *req, int error) -{ - struct io_ring_ctx *ctx = req->ctx; - - spin_lock_irq(&ctx->completion_lock); - io_cqring_fill_event(req, error); - io_commit_cqring(ctx); - spin_unlock_irq(&ctx->completion_lock); - - io_cqring_ev_posted(ctx); - req_set_fail_links(req); - io_double_put_req(req); -} - static void io_req_task_cancel(struct callback_head *cb) { struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work); @@ -2040,7 +2025,7 @@ static void io_req_task_cancel(struct callback_head *cb) /* ctx is guaranteed to stay alive while we hold uring_lock */ mutex_lock(&ctx->uring_lock); - __io_req_task_cancel(req, req->result); + io_req_complete_failed(req, req->result); mutex_unlock(&ctx->uring_lock); } @@ -2053,7 +2038,7 @@ static void __io_req_task_submit(struct io_kiocb *req) if (!(current->flags & PF_EXITING) && !current->in_execve) __io_queue_sqe(req); else - __io_req_task_cancel(req, -EFAULT); + io_req_complete_failed(req, -EFAULT); mutex_unlock(&ctx->uring_lock); } @@ -2208,13 +2193,6 @@ static inline void io_put_req_deferred(struct io_kiocb *req, int refs) io_free_req_deferred(req); } -static void io_double_put_req(struct io_kiocb *req) -{ - /* drop both submit and complete references */ - if (req_ref_sub_and_test(req, 2)) - io_free_req(req); -} - static unsigned io_cqring_events(struct io_ring_ctx *ctx) { /* See comment at the top of this file */ @@ -5106,7 +5084,7 @@ static void io_async_task_func(struct callback_head *cb) if (!READ_ONCE(apoll->poll.canceled)) __io_req_task_submit(req); else - __io_req_task_cancel(req, -ECANCELED); + io_req_complete_failed(req, -ECANCELED); kfree(apoll->double_poll); kfree(apoll);