dm: wait until embedded kobject is released before destroying a device
There may be other parts of the kernel holding a reference on the dm kobject. We must wait until all references are dropped before deallocating the mapped_device structure. The dm_kobject_release method signals that all references are dropped via completion. But dm_kobject_release doesn't free the kobject (which is embedded in the mapped_device structure). This is the sequence of operations: * when destroying a DM device, call kobject_put from dm_sysfs_exit * wait until all users stop using the kobject, when it happens the release method is called * the release method signals the completion and should return without delay * the dm device removal code that waits on the completion continues * the dm device removal code drops the dm_mod reference the device had * the dm device removal code frees the mapped_device structure that contains the kobject Using kobject this way should avoid the module unload race that was mentioned at the beginning of this thread: https://lkml.org/lkml/2014/1/4/83 Signed-off-by: Mikulas Patocka <mpatocka@redhat.com> Signed-off-by: Mike Snitzer <snitzer@redhat.com> Cc: stable@vger.kernel.org
This commit is contained in:
parent
1ddd641ddc
commit
be35f48610
@ -79,6 +79,11 @@ static const struct sysfs_ops dm_sysfs_ops = {
|
||||
.show = dm_attr_show,
|
||||
};
|
||||
|
||||
static void dm_kobject_release(struct kobject *kobj)
|
||||
{
|
||||
complete(dm_get_completion_from_kobject(kobj));
|
||||
}
|
||||
|
||||
/*
|
||||
* dm kobject is embedded in mapped_device structure
|
||||
* no need to define release function here
|
||||
@ -86,6 +91,7 @@ static const struct sysfs_ops dm_sysfs_ops = {
|
||||
static struct kobj_type dm_ktype = {
|
||||
.sysfs_ops = &dm_sysfs_ops,
|
||||
.default_attrs = dm_attrs,
|
||||
.release = dm_kobject_release,
|
||||
};
|
||||
|
||||
/*
|
||||
@ -104,5 +110,7 @@ int dm_sysfs_init(struct mapped_device *md)
|
||||
*/
|
||||
void dm_sysfs_exit(struct mapped_device *md)
|
||||
{
|
||||
kobject_put(dm_kobject(md));
|
||||
struct kobject *kobj = dm_kobject(md);
|
||||
kobject_put(kobj);
|
||||
wait_for_completion(dm_get_completion_from_kobject(kobj));
|
||||
}
|
||||
|
@ -203,6 +203,9 @@ struct mapped_device {
|
||||
/* sysfs handle */
|
||||
struct kobject kobj;
|
||||
|
||||
/* wait until the kobject is released */
|
||||
struct completion kobj_completion;
|
||||
|
||||
/* zero-length flush that will be cloned and submitted to targets */
|
||||
struct bio flush_bio;
|
||||
|
||||
@ -2041,6 +2044,7 @@ static struct mapped_device *alloc_dev(int minor)
|
||||
init_waitqueue_head(&md->wait);
|
||||
INIT_WORK(&md->work, dm_wq_work);
|
||||
init_waitqueue_head(&md->eventq);
|
||||
init_completion(&md->kobj_completion);
|
||||
|
||||
md->disk->major = _major;
|
||||
md->disk->first_minor = minor;
|
||||
@ -2919,6 +2923,13 @@ struct mapped_device *dm_get_from_kobject(struct kobject *kobj)
|
||||
return md;
|
||||
}
|
||||
|
||||
struct completion *dm_get_completion_from_kobject(struct kobject *kobj)
|
||||
{
|
||||
struct mapped_device *md = container_of(kobj, struct mapped_device, kobj);
|
||||
|
||||
return &md->kobj_completion;
|
||||
}
|
||||
|
||||
int dm_suspended_md(struct mapped_device *md)
|
||||
{
|
||||
return test_bit(DMF_SUSPENDED, &md->flags);
|
||||
|
@ -15,6 +15,7 @@
|
||||
#include <linux/list.h>
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/hdreg.h>
|
||||
#include <linux/completion.h>
|
||||
|
||||
#include "dm-stats.h"
|
||||
|
||||
@ -152,6 +153,7 @@ int dm_sysfs_init(struct mapped_device *md);
|
||||
void dm_sysfs_exit(struct mapped_device *md);
|
||||
struct kobject *dm_kobject(struct mapped_device *md);
|
||||
struct mapped_device *dm_get_from_kobject(struct kobject *kobj);
|
||||
struct completion *dm_get_completion_from_kobject(struct kobject *kobj);
|
||||
|
||||
/*
|
||||
* Targets for linear and striped mappings
|
||||
|
Loading…
Reference in New Issue
Block a user