2019-05-27 09:55:05 +03:00
// SPDX-License-Identifier: GPL-2.0-or-later
2007-10-20 01:48:00 +04:00
/*
* Device Mapper Uevent Support ( dm - uevent )
*
* Copyright IBM Corporation , 2007
* Author : Mike Anderson < andmike @ linux . vnet . ibm . com >
*/
# include <linux/list.h>
# include <linux/slab.h>
# include <linux/kobject.h>
2007-10-20 01:48:01 +04:00
# include <linux/dm-ioctl.h>
2011-05-27 23:50:58 +04:00
# include <linux/export.h>
2007-10-20 01:48:00 +04:00
# include "dm.h"
# include "dm-uevent.h"
# define DM_MSG_PREFIX "uevent"
2007-10-20 01:48:01 +04:00
static const struct {
enum dm_uevent_type type ;
enum kobject_action action ;
char * name ;
} _dm_uevent_type_names [ ] = {
{ DM_UEVENT_PATH_FAILED , KOBJ_CHANGE , " PATH_FAILED " } ,
{ DM_UEVENT_PATH_REINSTATED , KOBJ_CHANGE , " PATH_REINSTATED " } ,
} ;
2007-10-20 01:48:00 +04:00
static struct kmem_cache * _dm_event_cache ;
struct dm_uevent {
struct mapped_device * md ;
enum kobject_action action ;
struct kobj_uevent_env ku_env ;
struct list_head elist ;
2007-10-20 01:48:01 +04:00
char name [ DM_NAME_LEN ] ;
char uuid [ DM_UUID_LEN ] ;
2007-10-20 01:48:00 +04:00
} ;
static void dm_uevent_free ( struct dm_uevent * event )
{
kmem_cache_free ( _dm_event_cache , event ) ;
}
static struct dm_uevent * dm_uevent_alloc ( struct mapped_device * md )
{
struct dm_uevent * event ;
event = kmem_cache_zalloc ( _dm_event_cache , GFP_ATOMIC ) ;
if ( ! event )
return NULL ;
INIT_LIST_HEAD ( & event - > elist ) ;
event - > md = md ;
return event ;
}
2007-10-20 01:48:01 +04:00
static struct dm_uevent * dm_build_path_uevent ( struct mapped_device * md ,
struct dm_target * ti ,
enum kobject_action action ,
const char * dm_action ,
const char * path ,
unsigned nr_valid_paths )
{
struct dm_uevent * event ;
event = dm_uevent_alloc ( md ) ;
if ( ! event ) {
2008-04-28 13:15:50 +04:00
DMERR ( " %s: dm_uevent_alloc() failed " , __func__ ) ;
2007-10-20 01:48:01 +04:00
goto err_nomem ;
}
event - > action = action ;
if ( add_uevent_var ( & event - > ku_env , " DM_TARGET=%s " , ti - > type - > name ) ) {
DMERR ( " %s: add_uevent_var() for DM_TARGET failed " ,
2008-04-28 13:15:50 +04:00
__func__ ) ;
2007-10-20 01:48:01 +04:00
goto err_add ;
}
if ( add_uevent_var ( & event - > ku_env , " DM_ACTION=%s " , dm_action ) ) {
DMERR ( " %s: add_uevent_var() for DM_ACTION failed " ,
2008-04-28 13:15:50 +04:00
__func__ ) ;
2007-10-20 01:48:01 +04:00
goto err_add ;
}
if ( add_uevent_var ( & event - > ku_env , " DM_SEQNUM=%u " ,
dm_next_uevent_seq ( md ) ) ) {
DMERR ( " %s: add_uevent_var() for DM_SEQNUM failed " ,
2008-04-28 13:15:50 +04:00
__func__ ) ;
2007-10-20 01:48:01 +04:00
goto err_add ;
}
if ( add_uevent_var ( & event - > ku_env , " DM_PATH=%s " , path ) ) {
2008-04-28 13:15:50 +04:00
DMERR ( " %s: add_uevent_var() for DM_PATH failed " , __func__ ) ;
2007-10-20 01:48:01 +04:00
goto err_add ;
}
if ( add_uevent_var ( & event - > ku_env , " DM_NR_VALID_PATHS=%d " ,
nr_valid_paths ) ) {
DMERR ( " %s: add_uevent_var() for DM_NR_VALID_PATHS failed " ,
2008-04-28 13:15:50 +04:00
__func__ ) ;
2007-10-20 01:48:01 +04:00
goto err_add ;
}
return event ;
err_add :
dm_uevent_free ( event ) ;
err_nomem :
return ERR_PTR ( - ENOMEM ) ;
}
/**
* dm_send_uevents - send uevents for given list
*
* @ events : list of events to send
* @ kobj : kobject generating event
*
*/
void dm_send_uevents ( struct list_head * events , struct kobject * kobj )
{
int r ;
struct dm_uevent * event , * next ;
list_for_each_entry_safe ( event , next , events , elist ) {
list_del_init ( & event - > elist ) ;
/*
2009-12-11 02:51:52 +03:00
* When a device is being removed this copy fails and we
* discard these unsent events .
2007-10-20 01:48:01 +04:00
*/
if ( dm_copy_name_and_uuid ( event - > md , event - > name ,
event - > uuid ) ) {
2009-12-11 02:51:52 +03:00
DMINFO ( " %s: skipping sending uevent for lost device " ,
__func__ ) ;
2007-10-20 01:48:01 +04:00
goto uevent_free ;
}
if ( add_uevent_var ( & event - > ku_env , " DM_NAME=%s " , event - > name ) ) {
DMERR ( " %s: add_uevent_var() for DM_NAME failed " ,
2008-04-28 13:15:50 +04:00
__func__ ) ;
2007-10-20 01:48:01 +04:00
goto uevent_free ;
}
if ( add_uevent_var ( & event - > ku_env , " DM_UUID=%s " , event - > uuid ) ) {
DMERR ( " %s: add_uevent_var() for DM_UUID failed " ,
2008-04-28 13:15:50 +04:00
__func__ ) ;
2007-10-20 01:48:01 +04:00
goto uevent_free ;
}
r = kobject_uevent_env ( kobj , event - > action , event - > ku_env . envp ) ;
if ( r )
2008-04-28 13:15:50 +04:00
DMERR ( " %s: kobject_uevent_env failed " , __func__ ) ;
2007-10-20 01:48:01 +04:00
uevent_free :
dm_uevent_free ( event ) ;
}
}
EXPORT_SYMBOL_GPL ( dm_send_uevents ) ;
/**
* dm_path_uevent - called to create a new path event and queue it
*
* @ event_type : path event type enum
* @ ti : pointer to a dm_target
* @ path : string containing pathname
* @ nr_valid_paths : number of valid paths remaining
*
*/
void dm_path_uevent ( enum dm_uevent_type event_type , struct dm_target * ti ,
const char * path , unsigned nr_valid_paths )
{
struct mapped_device * md = dm_table_get_md ( ti - > table ) ;
struct dm_uevent * event ;
if ( event_type > = ARRAY_SIZE ( _dm_uevent_type_names ) ) {
2008-04-28 13:15:50 +04:00
DMERR ( " %s: Invalid event_type %d " , __func__ , event_type ) ;
dm table: remove dm_get from dm_table_get_md
Remove the dm_get() in dm_table_get_md() because dm_table_get_md() could
be called from presuspend/postsuspend, which are called while
mapped_device is in DMF_FREEING state, where dm_get() is not allowed.
Justification for that is the lifetime of both objects: As far as the
current dm design/implementation, mapped_device is never freed while
targets are doing something, because dm core waits for targets to become
quiet in dm_put() using presuspend/postsuspend. So targets should be
able to touch mapped_device without holding reference count of the
mapped_device, and we should allow targets to touch mapped_device even
if it is in DMF_FREEING state.
Backgrounds:
I'm trying to remove the multipath internal queue, since dm core now has
a generic queue for request-based dm. In the patch-set, the multipath
target wants to request dm core to start/stop queue. One of such
start/stop requests can happen during postsuspend() while the target
waits for pg-init to complete, because the target stops queue when
starting pg-init and tries to restart it when completing pg-init. Since
queue belongs to mapped_device, it involves calling dm_table_get_md()
and dm_put(). On the other hand, postsuspend() is called in dm_put()
for mapped_device which is in DMF_FREEING state, and that triggers
BUG_ON(DMF_FREEING) in the 2nd dm_put().
I had tried to solve this problem by changing only multipath not to
touch mapped_device which is in DMF_FREEING state, but I couldn't and I
came up with a question why we need dm_get() in dm_table_get_md().
Signed-off-by: Kiyoshi Ueda <k-ueda@ct.jp.nec.com>
Signed-off-by: Jun'ichi Nomura <j-nomura@ce.jp.nec.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
2010-03-06 05:29:52 +03:00
return ;
2007-10-20 01:48:01 +04:00
}
event = dm_build_path_uevent ( md , ti ,
_dm_uevent_type_names [ event_type ] . action ,
_dm_uevent_type_names [ event_type ] . name ,
path , nr_valid_paths ) ;
if ( IS_ERR ( event ) )
dm table: remove dm_get from dm_table_get_md
Remove the dm_get() in dm_table_get_md() because dm_table_get_md() could
be called from presuspend/postsuspend, which are called while
mapped_device is in DMF_FREEING state, where dm_get() is not allowed.
Justification for that is the lifetime of both objects: As far as the
current dm design/implementation, mapped_device is never freed while
targets are doing something, because dm core waits for targets to become
quiet in dm_put() using presuspend/postsuspend. So targets should be
able to touch mapped_device without holding reference count of the
mapped_device, and we should allow targets to touch mapped_device even
if it is in DMF_FREEING state.
Backgrounds:
I'm trying to remove the multipath internal queue, since dm core now has
a generic queue for request-based dm. In the patch-set, the multipath
target wants to request dm core to start/stop queue. One of such
start/stop requests can happen during postsuspend() while the target
waits for pg-init to complete, because the target stops queue when
starting pg-init and tries to restart it when completing pg-init. Since
queue belongs to mapped_device, it involves calling dm_table_get_md()
and dm_put(). On the other hand, postsuspend() is called in dm_put()
for mapped_device which is in DMF_FREEING state, and that triggers
BUG_ON(DMF_FREEING) in the 2nd dm_put().
I had tried to solve this problem by changing only multipath not to
touch mapped_device which is in DMF_FREEING state, but I couldn't and I
came up with a question why we need dm_get() in dm_table_get_md().
Signed-off-by: Kiyoshi Ueda <k-ueda@ct.jp.nec.com>
Signed-off-by: Jun'ichi Nomura <j-nomura@ce.jp.nec.com>
Signed-off-by: Alasdair G Kergon <agk@redhat.com>
2010-03-06 05:29:52 +03:00
return ;
2007-10-20 01:48:01 +04:00
dm_uevent_add ( md , & event - > elist ) ;
}
EXPORT_SYMBOL_GPL ( dm_path_uevent ) ;
2007-10-20 01:48:00 +04:00
int dm_uevent_init ( void )
{
_dm_event_cache = KMEM_CACHE ( dm_uevent , 0 ) ;
if ( ! _dm_event_cache )
return - ENOMEM ;
DMINFO ( " version 1.0.3 " ) ;
return 0 ;
}
void dm_uevent_exit ( void )
{
kmem_cache_destroy ( _dm_event_cache ) ;
}