io_uring: break out of iowq iopoll on teardown
io-wq will retry iopoll even when it failed with -EAGAIN. If that
races with task exit, which sets TIF_NOTIFY_SIGNAL for all its workers,
such workers might potentially infinitely spin retrying iopoll again and
again and each time failing on some allocation / waiting / etc. Don't
keep spinning if io-wq is dying.
Fixes: 561fb04a6a
("io_uring: replace workqueue usage with io-wq")
Cc: stable@vger.kernel.org
Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
76d3ccecfa
commit
45500dc4e0
@ -174,6 +174,16 @@ static void io_worker_ref_put(struct io_wq *wq)
|
||||
complete(&wq->worker_done);
|
||||
}
|
||||
|
||||
bool io_wq_worker_stopped(void)
|
||||
{
|
||||
struct io_worker *worker = current->worker_private;
|
||||
|
||||
if (WARN_ON_ONCE(!io_wq_current_is_worker()))
|
||||
return true;
|
||||
|
||||
return test_bit(IO_WQ_BIT_EXIT, &worker->wq->state);
|
||||
}
|
||||
|
||||
static void io_worker_cancel_cb(struct io_worker *worker)
|
||||
{
|
||||
struct io_wq_acct *acct = io_wq_get_acct(worker);
|
||||
|
@ -52,6 +52,7 @@ void io_wq_hash_work(struct io_wq_work *work, void *val);
|
||||
|
||||
int io_wq_cpu_affinity(struct io_uring_task *tctx, cpumask_var_t mask);
|
||||
int io_wq_max_workers(struct io_wq *wq, int *new_count);
|
||||
bool io_wq_worker_stopped(void);
|
||||
|
||||
static inline bool io_wq_is_hashed(struct io_wq_work *work)
|
||||
{
|
||||
|
@ -1975,6 +1975,8 @@ fail:
|
||||
if (!needs_poll) {
|
||||
if (!(req->ctx->flags & IORING_SETUP_IOPOLL))
|
||||
break;
|
||||
if (io_wq_worker_stopped())
|
||||
break;
|
||||
cond_resched();
|
||||
continue;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user