loop: make autoclear operation asynchronous
syzbot is reporting circular locking problem at __loop_clr_fd() [1], for
commit 87579e9b7d
("loop: use worker per cgroup instead of kworker")
is calling destroy_workqueue() with disk->open_mutex held.
This circular dependency cannot be broken unless we call __loop_clr_fd()
without holding disk->open_mutex. Therefore, defer __loop_clr_fd() from
lo_release() to a WQ context.
Link: https://syzkaller.appspot.com/bug?extid=643e4ce4b6ad1347d372 [1]
Reported-by: syzbot <syzbot+643e4ce4b6ad1347d372@syzkaller.appspotmail.com>
Suggested-by: Christoph Hellwig <hch@infradead.org>
Cc: Jan Kara <jack@suse.cz>
Tested-by: syzbot+643e4ce4b6ad1347d372@syzkaller.appspotmail.com
Signed-off-by: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Link: https://lore.kernel.org/r/1ed7df28-ebd6-71fb-70e5-1c2972e05ddb@i-love.sakura.ne.jp
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
c5eafd790e
commit
322c4293ec
@ -1082,7 +1082,7 @@ out_putf:
|
|||||||
return error;
|
return error;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __loop_clr_fd(struct loop_device *lo, bool release)
|
static void __loop_clr_fd(struct loop_device *lo)
|
||||||
{
|
{
|
||||||
struct file *filp;
|
struct file *filp;
|
||||||
gfp_t gfp = lo->old_gfp_mask;
|
gfp_t gfp = lo->old_gfp_mask;
|
||||||
@ -1144,8 +1144,6 @@ static void __loop_clr_fd(struct loop_device *lo, bool release)
|
|||||||
/* let user-space know about this change */
|
/* let user-space know about this change */
|
||||||
kobject_uevent(&disk_to_dev(lo->lo_disk)->kobj, KOBJ_CHANGE);
|
kobject_uevent(&disk_to_dev(lo->lo_disk)->kobj, KOBJ_CHANGE);
|
||||||
mapping_set_gfp_mask(filp->f_mapping, gfp);
|
mapping_set_gfp_mask(filp->f_mapping, gfp);
|
||||||
/* This is safe: open() is still holding a reference. */
|
|
||||||
module_put(THIS_MODULE);
|
|
||||||
blk_mq_unfreeze_queue(lo->lo_queue);
|
blk_mq_unfreeze_queue(lo->lo_queue);
|
||||||
|
|
||||||
disk_force_media_change(lo->lo_disk, DISK_EVENT_MEDIA_CHANGE);
|
disk_force_media_change(lo->lo_disk, DISK_EVENT_MEDIA_CHANGE);
|
||||||
@ -1153,18 +1151,8 @@ static void __loop_clr_fd(struct loop_device *lo, bool release)
|
|||||||
if (lo->lo_flags & LO_FLAGS_PARTSCAN) {
|
if (lo->lo_flags & LO_FLAGS_PARTSCAN) {
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
/*
|
|
||||||
* open_mutex has been held already in release path, so don't
|
|
||||||
* acquire it if this function is called in such case.
|
|
||||||
*
|
|
||||||
* If the reread partition isn't from release path, lo_refcnt
|
|
||||||
* must be at least one and it can only become zero when the
|
|
||||||
* current holder is released.
|
|
||||||
*/
|
|
||||||
if (!release)
|
|
||||||
mutex_lock(&lo->lo_disk->open_mutex);
|
mutex_lock(&lo->lo_disk->open_mutex);
|
||||||
err = bdev_disk_changed(lo->lo_disk, false);
|
err = bdev_disk_changed(lo->lo_disk, false);
|
||||||
if (!release)
|
|
||||||
mutex_unlock(&lo->lo_disk->open_mutex);
|
mutex_unlock(&lo->lo_disk->open_mutex);
|
||||||
if (err)
|
if (err)
|
||||||
pr_warn("%s: partition scan of loop%d failed (rc=%d)\n",
|
pr_warn("%s: partition scan of loop%d failed (rc=%d)\n",
|
||||||
@ -1172,25 +1160,43 @@ static void __loop_clr_fd(struct loop_device *lo, bool release)
|
|||||||
/* Device is gone, no point in returning error */
|
/* Device is gone, no point in returning error */
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* lo->lo_state is set to Lo_unbound here after above partscan has
|
|
||||||
* finished. There cannot be anybody else entering __loop_clr_fd() as
|
|
||||||
* Lo_rundown state protects us from all the other places trying to
|
|
||||||
* change the 'lo' device.
|
|
||||||
*/
|
|
||||||
lo->lo_flags = 0;
|
lo->lo_flags = 0;
|
||||||
if (!part_shift)
|
if (!part_shift)
|
||||||
lo->lo_disk->flags |= GENHD_FL_NO_PART;
|
lo->lo_disk->flags |= GENHD_FL_NO_PART;
|
||||||
|
|
||||||
|
fput(filp);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void loop_rundown_completed(struct loop_device *lo)
|
||||||
|
{
|
||||||
mutex_lock(&lo->lo_mutex);
|
mutex_lock(&lo->lo_mutex);
|
||||||
lo->lo_state = Lo_unbound;
|
lo->lo_state = Lo_unbound;
|
||||||
mutex_unlock(&lo->lo_mutex);
|
mutex_unlock(&lo->lo_mutex);
|
||||||
|
module_put(THIS_MODULE);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
static void loop_rundown_workfn(struct work_struct *work)
|
||||||
* Need not hold lo_mutex to fput backing file. Calling fput holding
|
{
|
||||||
* lo_mutex triggers a circular lock dependency possibility warning as
|
struct loop_device *lo = container_of(work, struct loop_device,
|
||||||
* fput can take open_mutex which is usually taken before lo_mutex.
|
rundown_work);
|
||||||
*/
|
struct block_device *bdev = lo->lo_device;
|
||||||
fput(filp);
|
struct gendisk *disk = lo->lo_disk;
|
||||||
|
|
||||||
|
__loop_clr_fd(lo);
|
||||||
|
kobject_put(&bdev->bd_device.kobj);
|
||||||
|
module_put(disk->fops->owner);
|
||||||
|
loop_rundown_completed(lo);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void loop_schedule_rundown(struct loop_device *lo)
|
||||||
|
{
|
||||||
|
struct block_device *bdev = lo->lo_device;
|
||||||
|
struct gendisk *disk = lo->lo_disk;
|
||||||
|
|
||||||
|
__module_get(disk->fops->owner);
|
||||||
|
kobject_get(&bdev->bd_device.kobj);
|
||||||
|
INIT_WORK(&lo->rundown_work, loop_rundown_workfn);
|
||||||
|
queue_work(system_long_wq, &lo->rundown_work);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int loop_clr_fd(struct loop_device *lo)
|
static int loop_clr_fd(struct loop_device *lo)
|
||||||
@ -1222,7 +1228,8 @@ static int loop_clr_fd(struct loop_device *lo)
|
|||||||
lo->lo_state = Lo_rundown;
|
lo->lo_state = Lo_rundown;
|
||||||
mutex_unlock(&lo->lo_mutex);
|
mutex_unlock(&lo->lo_mutex);
|
||||||
|
|
||||||
__loop_clr_fd(lo, false);
|
__loop_clr_fd(lo);
|
||||||
|
loop_rundown_completed(lo);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1747,7 +1754,7 @@ static void lo_release(struct gendisk *disk, fmode_t mode)
|
|||||||
* In autoclear mode, stop the loop thread
|
* In autoclear mode, stop the loop thread
|
||||||
* and remove configuration after last close.
|
* and remove configuration after last close.
|
||||||
*/
|
*/
|
||||||
__loop_clr_fd(lo, true);
|
loop_schedule_rundown(lo);
|
||||||
return;
|
return;
|
||||||
} else if (lo->lo_state == Lo_bound) {
|
} else if (lo->lo_state == Lo_bound) {
|
||||||
/*
|
/*
|
||||||
|
@ -56,6 +56,7 @@ struct loop_device {
|
|||||||
struct gendisk *lo_disk;
|
struct gendisk *lo_disk;
|
||||||
struct mutex lo_mutex;
|
struct mutex lo_mutex;
|
||||||
bool idr_visible;
|
bool idr_visible;
|
||||||
|
struct work_struct rundown_work;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct loop_cmd {
|
struct loop_cmd {
|
||||||
|
Loading…
Reference in New Issue
Block a user