io_uring: process task work in io_uring_register()
We do this for CQ ring wait, in case task_work completions come in. We should do the same in io_uring_register(), to avoid spurious -EINTR if the ring quiescing ends up having to process task_work to complete the operation Reported-by: Dan Melnic <dmm@fb.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
91d8f5191e
commit
af9c1a44f8
@ -6950,6 +6950,22 @@ static int io_wake_function(struct wait_queue_entry *curr, unsigned int mode,
|
|||||||
return autoremove_wake_function(curr, mode, wake_flags, key);
|
return autoremove_wake_function(curr, mode, wake_flags, key);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int io_run_task_work_sig(void)
|
||||||
|
{
|
||||||
|
if (io_run_task_work())
|
||||||
|
return 1;
|
||||||
|
if (!signal_pending(current))
|
||||||
|
return 0;
|
||||||
|
if (current->jobctl & JOBCTL_TASK_WORK) {
|
||||||
|
spin_lock_irq(¤t->sighand->siglock);
|
||||||
|
current->jobctl &= ~JOBCTL_TASK_WORK;
|
||||||
|
recalc_sigpending();
|
||||||
|
spin_unlock_irq(¤t->sighand->siglock);
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
return -EINTR;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Wait until events become available, if we don't already have some. The
|
* Wait until events become available, if we don't already have some. The
|
||||||
* application must reap them itself, as they reside on the shared cq ring.
|
* application must reap them itself, as they reside on the shared cq ring.
|
||||||
@ -6995,19 +7011,11 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
|
|||||||
prepare_to_wait_exclusive(&ctx->wait, &iowq.wq,
|
prepare_to_wait_exclusive(&ctx->wait, &iowq.wq,
|
||||||
TASK_INTERRUPTIBLE);
|
TASK_INTERRUPTIBLE);
|
||||||
/* make sure we run task_work before checking for signals */
|
/* make sure we run task_work before checking for signals */
|
||||||
if (io_run_task_work())
|
ret = io_run_task_work_sig();
|
||||||
|
if (ret > 0)
|
||||||
continue;
|
continue;
|
||||||
if (signal_pending(current)) {
|
else if (ret < 0)
|
||||||
if (current->jobctl & JOBCTL_TASK_WORK) {
|
|
||||||
spin_lock_irq(¤t->sighand->siglock);
|
|
||||||
current->jobctl &= ~JOBCTL_TASK_WORK;
|
|
||||||
recalc_sigpending();
|
|
||||||
spin_unlock_irq(¤t->sighand->siglock);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
ret = -EINTR;
|
|
||||||
break;
|
break;
|
||||||
}
|
|
||||||
if (io_should_wake(&iowq, false))
|
if (io_should_wake(&iowq, false))
|
||||||
break;
|
break;
|
||||||
schedule();
|
schedule();
|
||||||
@ -9666,8 +9674,16 @@ static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
|
|||||||
* after we've killed the percpu ref.
|
* after we've killed the percpu ref.
|
||||||
*/
|
*/
|
||||||
mutex_unlock(&ctx->uring_lock);
|
mutex_unlock(&ctx->uring_lock);
|
||||||
ret = wait_for_completion_interruptible(&ctx->ref_comp);
|
do {
|
||||||
|
ret = wait_for_completion_interruptible(&ctx->ref_comp);
|
||||||
|
if (!ret)
|
||||||
|
break;
|
||||||
|
if (io_run_task_work_sig() > 0)
|
||||||
|
continue;
|
||||||
|
} while (1);
|
||||||
|
|
||||||
mutex_lock(&ctx->uring_lock);
|
mutex_lock(&ctx->uring_lock);
|
||||||
|
|
||||||
if (ret) {
|
if (ret) {
|
||||||
percpu_ref_resurrect(&ctx->refs);
|
percpu_ref_resurrect(&ctx->refs);
|
||||||
ret = -EINTR;
|
ret = -EINTR;
|
||||||
|
Loading…
x
Reference in New Issue
Block a user