From cc6b09671d0c2d0172673885e16c74a2c54ad7ba Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Wed, 9 Aug 2023 13:21:41 +0100 Subject: [PATCH] io_uring: fix drain stalls by invalid SQE [ Upstream commit cfdbaa3a291d6fd2cb4a1a70d74e63b4abc2f5ec ] cq_extra is protected by ->completion_lock, which io_get_sqe() misses. The bug is harmless as it doesn't happen in real life, requires invalid SQ index array and racing with submission, and only messes up the userspace, i.e. stall requests execution but will be cleaned up on ring destruction. Fixes: 15641e427070f ("io_uring: don't cache number of dropped SQEs") Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/66096d54651b1a60534bb2023f2947f09f50ef73.1691538547.git.asml.silence@gmail.com Signed-off-by: Jens Axboe Signed-off-by: Sasha Levin --- io_uring/io_uring.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c index 7c98a820c8dd..e4de493bcff4 100644 --- a/io_uring/io_uring.c +++ b/io_uring/io_uring.c @@ -7531,7 +7531,9 @@ static const struct io_uring_sqe *io_get_sqe(struct io_ring_ctx *ctx) return &ctx->sq_sqes[head]; /* drop invalid entries */ + spin_lock(&ctx->completion_lock); ctx->cq_extra--; + spin_unlock(&ctx->completion_lock); WRITE_ONCE(ctx->rings->sq_dropped, READ_ONCE(ctx->rings->sq_dropped) + 1); return NULL;