io_uring: terminate multishot poll for CQ ring overflow
If we hit overflow and fail to allocate an overflow entry for the completion, terminate the multishot poll mode. Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
b2c3f7e171
commit
5082620fb2
@ -1044,6 +1044,7 @@ static void io_rsrc_put_work(struct work_struct *work);
|
||||
static void io_req_task_queue(struct io_kiocb *req);
|
||||
static void io_submit_flush_completions(struct io_comp_state *cs,
|
||||
struct io_ring_ctx *ctx);
|
||||
static bool io_poll_remove_waitqs(struct io_kiocb *req);
|
||||
static int io_req_prep_async(struct io_kiocb *req);
|
||||
|
||||
static struct kmem_cache *req_cachep;
|
||||
@ -1501,7 +1502,7 @@ static inline void req_ref_get(struct io_kiocb *req)
|
||||
atomic_inc(&req->refs);
|
||||
}
|
||||
|
||||
static void __io_cqring_fill_event(struct io_kiocb *req, long res,
|
||||
static bool __io_cqring_fill_event(struct io_kiocb *req, long res,
|
||||
unsigned int cflags)
|
||||
{
|
||||
struct io_ring_ctx *ctx = req->ctx;
|
||||
@ -1519,7 +1520,7 @@ static void __io_cqring_fill_event(struct io_kiocb *req, long res,
|
||||
WRITE_ONCE(cqe->user_data, req->user_data);
|
||||
WRITE_ONCE(cqe->res, res);
|
||||
WRITE_ONCE(cqe->flags, cflags);
|
||||
return;
|
||||
return true;
|
||||
}
|
||||
if (!ctx->cq_overflow_flushed &&
|
||||
!atomic_read(&req->task->io_uring->in_idle)) {
|
||||
@ -1537,7 +1538,7 @@ static void __io_cqring_fill_event(struct io_kiocb *req, long res,
|
||||
ocqe->cqe.res = res;
|
||||
ocqe->cqe.flags = cflags;
|
||||
list_add_tail(&ocqe->list, &ctx->cq_overflow_list);
|
||||
return;
|
||||
return true;
|
||||
}
|
||||
overflow:
|
||||
/*
|
||||
@ -1546,6 +1547,7 @@ overflow:
|
||||
* on the floor.
|
||||
*/
|
||||
WRITE_ONCE(ctx->rings->cq_overflow, ++ctx->cached_cq_overflow);
|
||||
return false;
|
||||
}
|
||||
|
||||
static void io_cqring_fill_event(struct io_kiocb *req, long res)
|
||||
@ -4907,14 +4909,14 @@ static bool io_poll_complete(struct io_kiocb *req, __poll_t mask, int error)
|
||||
error = -ECANCELED;
|
||||
req->poll.events |= EPOLLONESHOT;
|
||||
}
|
||||
if (error || (req->poll.events & EPOLLONESHOT)) {
|
||||
io_poll_remove_double(req);
|
||||
if (!error)
|
||||
error = mangle_poll(mask);
|
||||
if (!__io_cqring_fill_event(req, error, flags) ||
|
||||
(req->poll.events & EPOLLONESHOT)) {
|
||||
io_poll_remove_waitqs(req);
|
||||
req->poll.done = true;
|
||||
flags = 0;
|
||||
}
|
||||
if (!error)
|
||||
error = mangle_poll(mask);
|
||||
__io_cqring_fill_event(req, error, flags);
|
||||
io_commit_cqring(ctx);
|
||||
return !(flags & IORING_CQE_F_MORE);
|
||||
}
|
||||
@ -5205,6 +5207,8 @@ static bool __io_poll_remove_one(struct io_kiocb *req,
|
||||
{
|
||||
bool do_complete = false;
|
||||
|
||||
if (!poll->head)
|
||||
return false;
|
||||
spin_lock(&poll->head->lock);
|
||||
WRITE_ONCE(poll->canceled, true);
|
||||
if (!list_empty(&poll->wait.entry)) {
|
||||
|
Loading…
Reference in New Issue
Block a user