io_uring: rsrc ref lock needs to be IRQ safe
Nadav reports running into the below splat on re-enabling softirqs: WARNING: CPU: 2 PID: 1777 at kernel/softirq.c:364 __local_bh_enable_ip+0xaa/0xe0 Modules linked in: CPU: 2 PID: 1777 Comm: umem Not tainted 5.13.1+ #161 Hardware name: VMware, Inc. VMware Virtual Platform/440BX Desktop Reference Platform, BIOS 6.00 07/22/2020 RIP: 0010:__local_bh_enable_ip+0xaa/0xe0 Code: a9 00 ff ff 00 74 38 65 ff 0d a2 21 8c 7a e8 ed 1a 20 00 fb 66 0f 1f 44 00 00 5b 41 5c 5d c3 65 8b 05 e6 2d 8c 7a 85 c0 75 9a <0f> 0b eb 96 e8 2d 1f 20 00 eb a5 4c 89 e7 e8 73 4f 0c 00 eb ae 65 RSP: 0018:ffff88812e58fcc8 EFLAGS: 00010046 RAX: 0000000000000000 RBX: 0000000000000201 RCX: dffffc0000000000 RDX: 0000000000000007 RSI: 0000000000000201 RDI: ffffffff8898c5ac RBP: ffff88812e58fcd8 R08: ffffffff8575dbbf R09: ffffed1028ef14f9 R10: ffff88814778a7c3 R11: ffffed1028ef14f8 R12: ffffffff85c9e9ae R13: ffff88814778a000 R14: ffff88814778a7b0 R15: ffff8881086db890 FS: 00007fbcfee17700(0000) GS:ffff8881e0300000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 000000c0402a5008 CR3: 000000011c1ac003 CR4: 00000000003706e0 Call Trace: _raw_spin_unlock_bh+0x31/0x40 io_rsrc_node_ref_zero+0x13e/0x190 io_dismantle_req+0x215/0x220 io_req_complete_post+0x1b8/0x720 __io_complete_rw.isra.0+0x16b/0x1f0 io_complete_rw+0x10/0x20 where it's clear we end up calling the percpu count release directly from the completion path, as it's in atomic mode and we drop the last ref. For file/block IO, this can be from IRQ context already, and the softirq locking for rsrc isn't enough. Just make the lock fully IRQ safe, and ensure we correctly safe state from the release path as we don't know the full context there. Reported-by: Nadav Amit <nadav.amit@gmail.com> Tested-by: Nadav Amit <nadav.amit@gmail.com> Link: https://lore.kernel.org/io-uring/C187C836-E78B-4A31-B24C-D16919ACA093@gmail.com/ Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
20c0b380f9
commit
4956b9eaad
@ -7138,16 +7138,6 @@ static void **io_alloc_page_table(size_t size)
|
||||
return table;
|
||||
}
|
||||
|
||||
static inline void io_rsrc_ref_lock(struct io_ring_ctx *ctx)
|
||||
{
|
||||
spin_lock_bh(&ctx->rsrc_ref_lock);
|
||||
}
|
||||
|
||||
static inline void io_rsrc_ref_unlock(struct io_ring_ctx *ctx)
|
||||
{
|
||||
spin_unlock_bh(&ctx->rsrc_ref_lock);
|
||||
}
|
||||
|
||||
static void io_rsrc_node_destroy(struct io_rsrc_node *ref_node)
|
||||
{
|
||||
percpu_ref_exit(&ref_node->refs);
|
||||
@ -7164,9 +7154,9 @@ static void io_rsrc_node_switch(struct io_ring_ctx *ctx,
|
||||
struct io_rsrc_node *rsrc_node = ctx->rsrc_node;
|
||||
|
||||
rsrc_node->rsrc_data = data_to_kill;
|
||||
io_rsrc_ref_lock(ctx);
|
||||
spin_lock_irq(&ctx->rsrc_ref_lock);
|
||||
list_add_tail(&rsrc_node->node, &ctx->rsrc_ref_list);
|
||||
io_rsrc_ref_unlock(ctx);
|
||||
spin_unlock_irq(&ctx->rsrc_ref_lock);
|
||||
|
||||
atomic_inc(&data_to_kill->refs);
|
||||
percpu_ref_kill(&rsrc_node->refs);
|
||||
@ -7674,9 +7664,10 @@ static void io_rsrc_node_ref_zero(struct percpu_ref *ref)
|
||||
{
|
||||
struct io_rsrc_node *node = container_of(ref, struct io_rsrc_node, refs);
|
||||
struct io_ring_ctx *ctx = node->rsrc_data->ctx;
|
||||
unsigned long flags;
|
||||
bool first_add = false;
|
||||
|
||||
io_rsrc_ref_lock(ctx);
|
||||
spin_lock_irqsave(&ctx->rsrc_ref_lock, flags);
|
||||
node->done = true;
|
||||
|
||||
while (!list_empty(&ctx->rsrc_ref_list)) {
|
||||
@ -7688,7 +7679,7 @@ static void io_rsrc_node_ref_zero(struct percpu_ref *ref)
|
||||
list_del(&node->node);
|
||||
first_add |= llist_add(&node->llist, &ctx->rsrc_put_llist);
|
||||
}
|
||||
io_rsrc_ref_unlock(ctx);
|
||||
spin_unlock_irqrestore(&ctx->rsrc_ref_lock, flags);
|
||||
|
||||
if (first_add)
|
||||
mod_delayed_work(system_wq, &ctx->rsrc_put_work, HZ);
|
||||
|
Loading…
Reference in New Issue
Block a user