2001-10-09 20:05:34 +04:00
/*
2008-01-30 17:00:02 +03:00
* Copyright ( C ) 2001 - 2004 Sistina Software , Inc . All rights reserved .
2018-02-27 16:13:00 +03:00
* Copyright ( C ) 2004 - 2018 Red Hat , Inc . All rights reserved .
2001-10-09 20:05:34 +04:00
*
2004-03-30 23:35:44 +04:00
* This file is part of LVM2 .
*
* This copyrighted material is made available to anyone wishing to use ,
* modify , copy , or redistribute it subject to the terms and conditions
2007-08-21 00:55:30 +04:00
* of the GNU Lesser General Public License v .2 .1 .
2004-03-30 23:35:44 +04:00
*
2007-08-21 00:55:30 +04:00
* You should have received a copy of the GNU Lesser General Public License
2004-03-30 23:35:44 +04:00
* along with this program ; if not , write to the Free Software Foundation ,
2016-01-21 13:49:46 +03:00
* Inc . , 51 Franklin Street , Fifth Floor , Boston , MA 02110 - 1301 USA
2001-10-09 20:05:34 +04:00
*/
2018-05-14 12:30:20 +03:00
# include "lib/misc/lib.h"
# include "lib/metadata/metadata.h"
# include "lib/activate/activate.h"
# include "lib/mm/memlock.h"
# include "lib/display/display.h"
2001-11-12 15:20:58 +03:00
# include "fs.h"
2018-05-14 12:30:20 +03:00
# include "lib/misc/lvm-exec.h"
# include "lib/misc/lvm-file.h"
# include "lib/misc/lvm-string.h"
# include "lib/commands/toolcontext.h"
2002-02-26 14:49:17 +03:00
# include "dev_manager.h"
2018-05-14 12:30:20 +03:00
# include "lib/datastruct/str_list.h"
# include "lib/config/config.h"
# include "lib/metadata/segtype.h"
# include "lib/misc/sharedlib.h"
# include "lib/metadata/metadata.h"
2021-03-11 22:50:39 +03:00
# include "lib/misc/lvm-signal.h"
2002-01-21 14:06:32 +03:00
# include <limits.h>
2002-02-18 18:52:48 +03:00
# include <fcntl.h>
2003-07-05 02:34:56 +04:00
# include <unistd.h>
2002-01-21 14:06:32 +03:00
2002-02-11 18:48:34 +03:00
# define _skip(fmt, args...) log_very_verbose("Skipping: " fmt , ## args)
2001-10-09 20:05:34 +04:00
2006-10-03 21:55:20 +04:00
int list_segment_modules ( struct dm_pool * mem , const struct lv_segment * seg ,
2008-11-04 01:14:30 +03:00
struct dm_list * modules )
2006-10-03 21:55:20 +04:00
{
unsigned int s ;
struct lv_segment * seg2 , * snap_seg ;
2008-11-04 01:14:30 +03:00
struct dm_list * snh ;
2006-10-03 21:55:20 +04:00
if ( seg - > segtype - > ops - > modules_needed & &
! seg - > segtype - > ops - > modules_needed ( mem , seg , modules ) ) {
log_error ( " module string allocation failed " ) ;
return 0 ;
}
if ( lv_is_origin ( seg - > lv ) )
2008-11-04 01:14:30 +03:00
dm_list_iterate ( snh , & seg - > lv - > snapshot_segs )
2006-10-03 21:55:20 +04:00
if ( ! list_lv_modules ( mem ,
2008-11-04 01:14:30 +03:00
dm_list_struct_base ( snh ,
2006-10-03 21:55:20 +04:00
struct lv_segment ,
origin_list ) - > cow ,
modules ) )
return_0 ;
if ( lv_is_cow ( seg - > lv ) ) {
2013-07-03 00:26:03 +04:00
snap_seg = find_snapshot ( seg - > lv ) ;
2006-10-03 21:55:20 +04:00
if ( snap_seg - > segtype - > ops - > modules_needed & &
! snap_seg - > segtype - > ops - > modules_needed ( mem , snap_seg ,
modules ) ) {
log_error ( " snap_seg module string allocation failed " ) ;
return 0 ;
}
}
for ( s = 0 ; s < seg - > area_count ; s + + ) {
switch ( seg_type ( seg , s ) ) {
case AREA_LV :
seg2 = find_seg_by_le ( seg_lv ( seg , s ) , seg_le ( seg , s ) ) ;
if ( seg2 & & ! list_segment_modules ( mem , seg2 , modules ) )
return_0 ;
break ;
case AREA_PV :
case AREA_UNASSIGNED :
;
}
}
return 1 ;
}
int list_lv_modules ( struct dm_pool * mem , const struct logical_volume * lv ,
2008-11-04 01:14:30 +03:00
struct dm_list * modules )
2006-10-03 21:55:20 +04:00
{
struct lv_segment * seg ;
2008-11-04 01:14:30 +03:00
dm_list_iterate_items ( seg , & lv - > segments )
2006-10-03 21:55:20 +04:00
if ( ! list_segment_modules ( mem , seg , modules ) )
return_0 ;
return 1 ;
}
2014-09-22 17:50:07 +04:00
static int _lv_passes_volumes_filter ( struct cmd_context * cmd , const struct logical_volume * lv ,
2013-09-27 15:58:55 +04:00
const struct dm_config_node * cn , const int cfg_id )
{
const struct dm_config_value * cv ;
const char * str ;
static char config_path [ PATH_MAX ] ;
2014-09-21 03:08:43 +04:00
size_t len = strlen ( lv - > vg - > name ) ;
2013-09-27 15:58:55 +04:00
config_def_get_path ( config_path , sizeof ( config_path ) , cfg_id ) ;
log_verbose ( " %s configuration setting defined: "
2015-11-25 18:06:31 +03:00
" Checking the list to match %s. " ,
config_path , display_lvname ( lv ) ) ;
2013-09-27 15:58:55 +04:00
for ( cv = cn - > v ; cv ; cv = cv - > next ) {
if ( cv - > type = = DM_CFG_EMPTY_ARRAY )
goto out ;
if ( cv - > type ! = DM_CFG_STRING ) {
2014-09-21 00:05:29 +04:00
log_print_unless_silent ( " Ignoring invalid string in config file %s. " ,
config_path ) ;
2013-09-27 15:58:55 +04:00
continue ;
}
str = cv - > v . str ;
if ( ! * str ) {
2014-09-21 00:05:29 +04:00
log_print_unless_silent ( " Ignoring empty string in config file %s. " ,
config_path ) ;
2013-09-27 15:58:55 +04:00
continue ;
}
/* Tag? */
if ( * str = = ' @ ' ) {
str + + ;
if ( ! * str ) {
2014-09-21 00:05:29 +04:00
log_print_unless_silent ( " Ignoring empty tag in config file %s " ,
config_path ) ;
2013-09-27 15:58:55 +04:00
continue ;
}
/* If any host tag matches any LV or VG tag, activate */
if ( ! strcmp ( str , " * " ) ) {
if ( str_list_match_list ( & cmd - > tags , & lv - > tags , NULL )
| | str_list_match_list ( & cmd - > tags ,
& lv - > vg - > tags , NULL ) )
return 1 ;
2017-07-20 00:12:48 +03:00
continue ;
2013-09-27 15:58:55 +04:00
}
/* If supplied tag matches LV or VG tag, activate */
if ( str_list_match_item ( & lv - > tags , str ) | |
str_list_match_item ( & lv - > vg - > tags , str ) )
return 1 ;
2017-07-20 00:12:48 +03:00
continue ;
2013-09-27 15:58:55 +04:00
}
2014-09-21 03:08:43 +04:00
/* If supplied name is vgname[/lvname] */
if ( ( strncmp ( str , lv - > vg - > name , len ) = = 0 ) & &
( ! str [ len ] | |
( ( str [ len ] = = ' / ' ) & &
! strcmp ( str + len + 1 , lv - > name ) ) ) )
2013-09-27 15:58:55 +04:00
return 1 ;
}
out :
2015-11-25 18:06:31 +03:00
log_verbose ( " No item supplied in %s configuration setting matches %s. " ,
config_path , display_lvname ( lv ) ) ;
2013-09-27 15:58:55 +04:00
return 0 ;
}
int lv_passes_auto_activation_filter ( struct cmd_context * cmd , struct logical_volume * lv )
{
const struct dm_config_node * cn ;
2015-07-08 12:22:24 +03:00
if ( ! ( cn = find_config_tree_array ( cmd , activation_auto_activation_volume_list_CFG , NULL ) ) ) {
2013-09-27 15:58:55 +04:00
log_verbose ( " activation/auto_activation_volume_list configuration setting "
" not defined: All logical volumes will be auto-activated. " ) ;
return 1 ;
}
return _lv_passes_volumes_filter ( cmd , lv , cn , activation_auto_activation_volume_list_CFG ) ;
}
2021-03-19 13:02:21 +03:00
static int _passes_readonly_filter ( struct cmd_context * cmd ,
const struct logical_volume * lv )
{
const struct dm_config_node * cn ;
if ( ! ( cn = find_config_tree_array ( cmd , activation_read_only_volume_list_CFG , NULL ) ) )
return 0 ;
return _lv_passes_volumes_filter ( cmd , lv , cn , activation_read_only_volume_list_CFG ) ;
}
int lv_passes_readonly_filter ( const struct logical_volume * lv )
{
return _passes_readonly_filter ( lv - > vg - > cmd , lv ) ;
}
2003-01-09 01:44:07 +03:00
# ifndef DEVMAPPER_SUPPORT
2014-04-29 15:22:10 +04:00
void set_activation ( int act , int silent )
2003-01-09 01:44:07 +03:00
{
2004-03-19 19:26:46 +03:00
static int warned = 0 ;
if ( warned | | ! act )
return ;
2020-09-28 19:56:58 +03:00
log_warn ( " WARNING: Compiled without libdevmapper support. "
" Can't enable activation. " ) ;
2004-03-19 19:26:46 +03:00
warned = 1 ;
2003-01-09 01:44:07 +03:00
}
int activation ( void )
{
return 0 ;
}
int library_version ( char * version , size_t size )
{
return 0 ;
}
int driver_version ( char * version , size_t size )
{
return 0 ;
}
2005-12-20 00:01:39 +03:00
int target_version ( const char * target_name , uint32_t * maj ,
uint32_t * min , uint32_t * patchlevel )
{
return 0 ;
}
2009-02-28 03:54:06 +03:00
int target_present ( struct cmd_context * cmd , const char * target_name ,
int use_modprobe )
2004-03-26 22:52:09 +03:00
{
return 0 ;
}
2012-01-25 17:12:59 +04:00
int lvm_dm_prefix_check ( int major , int minor , const char * prefix )
2011-11-11 19:11:08 +04:00
{
return 0 ;
}
2012-01-25 17:10:26 +04:00
int lv_info ( struct cmd_context * cmd , const struct logical_volume * lv , int use_layer ,
2010-08-17 20:25:32 +04:00
struct lvinfo * info , int with_open_count , int with_read_ahead )
2003-01-09 01:44:07 +03:00
{
return 0 ;
}
2020-09-28 19:56:58 +03:00
int lv_info_with_seg_status ( struct cmd_context * cmd ,
const struct lv_segment * lv_seg ,
2015-01-14 15:03:52 +03:00
struct lv_with_info_and_seg_status * status ,
int with_open_count , int with_read_ahead )
{
return 0 ;
}
int lv_cache_status ( const struct logical_volume * cache_lv ,
struct lv_status_cache * * status )
{
2016-06-03 13:38:46 +03:00
return 0 ;
2015-01-14 15:03:52 +03:00
}
2016-06-03 13:38:46 +03:00
int lv_check_not_in_use ( const struct logical_volume * lv , int error_if_used )
2012-01-25 17:12:59 +04:00
{
return 0 ;
}
2014-06-09 14:08:27 +04:00
int lv_snapshot_percent ( const struct logical_volume * lv , dm_percent_t * percent )
2003-01-09 01:44:07 +03:00
{
return 0 ;
}
2011-02-18 17:47:28 +03:00
int lv_mirror_percent ( struct cmd_context * cmd , const struct logical_volume * lv ,
2014-06-09 14:08:27 +04:00
int wait , dm_percent_t * percent , uint32_t * event_nr )
2003-04-30 19:26:25 +04:00
{
return 0 ;
}
2014-06-09 14:08:27 +04:00
int lv_raid_percent ( const struct logical_volume * lv , dm_percent_t * percent )
2012-01-25 17:12:59 +04:00
{
return 0 ;
}
2017-02-24 02:50:00 +03:00
int lv_raid_data_offset ( const struct logical_volume * lv , uint64_t * data_offset )
{
return 0 ;
}
2013-02-01 21:31:47 +04:00
int lv_raid_dev_health ( const struct logical_volume * lv , char * * dev_health )
{
return 0 ;
}
2017-02-24 02:50:00 +03:00
int lv_raid_dev_count ( const struct logical_volume * lv , uint32_t * dev_cnt )
{
return 0 ;
}
2013-04-12 00:33:59 +04:00
int lv_raid_mismatch_count ( const struct logical_volume * lv , uint64_t * cnt )
{
return 0 ;
}
int lv_raid_sync_action ( const struct logical_volume * lv , char * * sync_action )
{
return 0 ;
}
int lv_raid_message ( const struct logical_volume * lv , const char * msg )
{
return 0 ;
}
2021-03-19 13:02:21 +03:00
int lv_raid_status ( const struct logical_volume * lv , struct lv_status_raid * * status )
{
return 0 ;
}
2020-09-28 19:56:58 +03:00
int lv_writecache_message ( const struct logical_volume * lv , const char * msg )
{
return 0 ;
}
2020-09-27 02:11:47 +03:00
int lv_thin_pool_status ( const struct logical_volume * lv , int flush ,
struct lv_status_thin_pool * * thin_pool_status )
2012-01-25 17:12:59 +04:00
{
return 0 ;
}
2020-09-27 02:11:47 +03:00
int lv_thin_status ( const struct logical_volume * lv , int flush ,
struct lv_status_thin * * thin_status )
2012-01-25 17:12:59 +04:00
{
return 0 ;
}
2014-04-29 15:22:10 +04:00
int lv_thin_device_id ( const struct logical_volume * lv , uint32_t * device_id )
{
return 0 ;
}
2020-09-28 19:56:58 +03:00
int lv_vdo_pool_status ( const struct logical_volume * lv , int flush ,
struct lv_status_vdo * * vdo_status )
{
return 0 ;
}
int lv_vdo_pool_percent ( const struct logical_volume * lv , dm_percent_t * percent )
{
return 0 ;
}
2023-01-15 23:24:28 +03:00
int lv_vdo_pool_size_config ( const struct logical_volume * lv ,
struct vdo_pool_size_config * cfg )
{
return 0 ;
}
2012-02-24 02:41:57 +04:00
int lvs_in_vg_activated ( const struct volume_group * vg )
2003-01-09 01:44:07 +03:00
{
return 0 ;
}
2011-02-18 17:29:39 +03:00
int lvs_in_vg_opened ( const struct volume_group * vg )
2003-01-09 01:44:07 +03:00
{
return 0 ;
}
2013-09-27 15:58:55 +04:00
int lv_suspend_if_active ( struct cmd_context * cmd , const char * lvid_s , unsigned origin_only , unsigned exclusive ,
2015-11-25 12:52:22 +03:00
const struct logical_volume * lv , const struct logical_volume * lv_pre )
2003-01-09 01:44:07 +03:00
{
return 1 ;
}
2014-09-22 17:50:07 +04:00
int lv_resume ( struct cmd_context * cmd , const char * lvid_s , unsigned origin_only , const struct logical_volume * lv )
2004-03-08 21:54:13 +03:00
{
return 1 ;
}
2013-09-27 15:58:55 +04:00
int lv_resume_if_active ( struct cmd_context * cmd , const char * lvid_s , unsigned origin_only ,
2014-09-22 17:50:07 +04:00
unsigned exclusive , unsigned revert , const struct logical_volume * lv )
2003-01-09 01:44:07 +03:00
{
return 1 ;
}
2014-09-22 17:50:07 +04:00
int lv_deactivate ( struct cmd_context * cmd , const char * lvid_s , const struct logical_volume * lv )
2003-01-09 01:44:07 +03:00
{
return 1 ;
}
2004-03-08 21:54:13 +03:00
int lv_activation_filter ( struct cmd_context * cmd , const char * lvid_s ,
2014-09-22 17:50:07 +04:00
int * activate_lv , const struct logical_volume * lv )
2004-03-08 21:54:13 +03:00
{
return 1 ;
}
2013-10-08 15:27:21 +04:00
int lv_activate ( struct cmd_context * cmd , const char * lvid_s , int exclusive , int noscan ,
2014-09-22 17:50:07 +04:00
int temporary , const struct logical_volume * lv )
2003-01-09 01:44:07 +03:00
{
return 1 ;
}
2013-09-27 15:58:55 +04:00
int lv_activate_with_filter ( struct cmd_context * cmd , const char * lvid_s , int exclusive ,
2014-09-22 17:50:07 +04:00
int noscan , int temporary , const struct logical_volume * lv )
2004-03-08 21:54:13 +03:00
{
return 1 ;
}
2003-11-12 22:16:48 +03:00
int lv_mknodes ( struct cmd_context * cmd , const struct logical_volume * lv )
{
return 1 ;
}
2016-12-23 05:35:13 +03:00
int lv_deactivate_any_missing_subdevs ( const struct logical_volume * lv )
{
return 1 ;
}
2006-12-20 19:19:01 +03:00
int pv_uses_vg ( struct physical_volume * pv ,
2006-05-12 23:16:48 +04:00
struct volume_group * vg )
2005-10-25 23:08:21 +04:00
{
return 0 ;
}
2006-05-16 20:48:31 +04:00
void activation_release ( void )
{
}
2003-07-05 02:34:56 +04:00
void activation_exit ( void )
{
}
2011-10-06 18:55:39 +04:00
2016-10-27 12:38:16 +03:00
int raid4_is_supported ( struct cmd_context * cmd , const struct segment_type * segtype )
{
return 1 ;
}
2012-02-24 02:41:57 +04:00
int lv_is_active ( const struct logical_volume * lv )
2011-02-18 17:29:39 +03:00
{
return 0 ;
}
int lv_check_transient ( struct logical_volume * lv )
{
return 1 ;
}
2014-09-22 17:50:07 +04:00
int monitor_dev_for_events ( struct cmd_context * cmd , const struct logical_volume * lv ,
2012-01-25 17:12:59 +04:00
const struct lv_activate_opts * laopts , int monitor )
2011-02-18 17:29:39 +03:00
{
return 1 ;
}
2012-01-25 17:12:59 +04:00
/* fs.c */
void fs_unlock ( void )
{
}
/* dev_manager.c */
2018-05-14 12:30:20 +03:00
# include "lib/activate/targets.h"
2012-01-25 17:12:59 +04:00
int add_areas_line ( struct dev_manager * dm , struct lv_segment * seg ,
struct dm_tree_node * node , uint32_t start_area ,
uint32_t areas )
{
return 0 ;
}
2021-12-15 13:26:21 +03:00
int device_is_usable ( struct cmd_context * cmd , struct device * dev , struct dev_usable_check_params check , int * is_lv )
2012-01-25 17:12:59 +04:00
{
return 0 ;
}
2014-09-22 17:50:07 +04:00
int lv_has_target_type ( struct dm_pool * mem , const struct logical_volume * lv ,
2012-01-25 17:12:59 +04:00
const char * layer , const char * target_type )
{
return 0 ;
}
2003-01-09 01:44:07 +03:00
# else /* DEVMAPPER_SUPPORT */
2002-11-18 17:01:16 +03:00
static int _activation = 1 ;
2014-04-18 05:46:34 +04:00
void set_activation ( int act , int silent )
2002-11-18 17:01:16 +03:00
{
2002-12-20 02:25:55 +03:00
if ( act = = _activation )
2002-11-18 17:01:16 +03:00
return ;
2002-12-20 02:25:55 +03:00
_activation = act ;
2002-11-18 17:01:16 +03:00
if ( _activation )
log_verbose ( " Activation enabled. Device-mapper kernel "
" driver will be used. " ) ;
2014-04-18 05:46:34 +04:00
else if ( ! silent )
2007-06-28 21:33:44 +04:00
log_warn ( " WARNING: Activation disabled. No device-mapper "
2005-12-22 19:13:38 +03:00
" interaction will be attempted. " ) ;
2014-04-18 05:46:34 +04:00
else
log_verbose ( " Activation disabled. No device-mapper "
" interaction will be attempted. " ) ;
2002-11-18 17:01:16 +03:00
}
2002-12-20 02:25:55 +03:00
int activation ( void )
2002-11-18 17:01:16 +03:00
{
return _activation ;
}
2012-01-12 05:51:56 +04:00
static int _passes_activation_filter ( struct cmd_context * cmd ,
2014-09-22 17:50:07 +04:00
const struct logical_volume * lv )
2012-01-12 05:51:56 +04:00
{
const struct dm_config_node * cn ;
2015-07-08 12:22:24 +03:00
if ( ! ( cn = find_config_tree_array ( cmd , activation_volume_list_CFG , NULL ) ) ) {
2012-01-12 05:51:56 +04:00
log_verbose ( " activation/volume_list configuration setting "
2015-11-25 18:06:31 +03:00
" not defined: Checking only host tags for %s. " ,
display_lvname ( lv ) ) ;
2012-01-12 05:51:56 +04:00
/* If no host tags defined, activate */
if ( dm_list_empty ( & cmd - > tags ) )
return 1 ;
/* If any host tag matches any LV or VG tag, activate */
if ( str_list_match_list ( & cmd - > tags , & lv - > tags , NULL ) | |
str_list_match_list ( & cmd - > tags , & lv - > vg - > tags , NULL ) )
return 1 ;
2015-11-25 18:06:31 +03:00
log_verbose ( " No host tag matches %s " , display_lvname ( lv ) ) ;
2012-01-12 05:51:56 +04:00
/* Don't activate */
return 0 ;
}
2013-03-05 20:00:43 +04:00
return _lv_passes_volumes_filter ( cmd , lv , cn , activation_volume_list_CFG ) ;
2012-01-12 05:51:56 +04:00
}
2002-01-17 19:39:24 +03:00
int library_version ( char * version , size_t size )
{
2002-11-18 17:01:16 +03:00
if ( ! activation ( ) )
return 0 ;
2005-10-17 22:00:02 +04:00
return dm_get_library_version ( version , size ) ;
2002-01-17 19:39:24 +03:00
}
int driver_version ( char * version , size_t size )
{
2021-12-08 12:25:02 +03:00
static char _vsn [ 80 ] = { 0 } ;
2002-11-18 17:01:16 +03:00
if ( ! activation ( ) )
return 0 ;
2002-01-17 19:39:24 +03:00
log_very_verbose ( " Getting driver version " ) ;
2021-12-08 12:25:02 +03:00
if ( ! _vsn [ 0 ] & &
! dm_driver_version ( _vsn , sizeof ( _vsn ) ) )
return_0 ;
( void ) dm_strncpy ( version , _vsn , size ) ;
return 1 ;
2002-01-17 19:39:24 +03:00
}
2005-12-20 00:01:39 +03:00
int target_version ( const char * target_name , uint32_t * maj ,
uint32_t * min , uint32_t * patchlevel )
2004-03-26 22:52:09 +03:00
{
int r = 0 ;
struct dm_task * dmt ;
struct dm_versions * target , * last_target ;
log_very_verbose ( " Getting target version for %s " , target_name ) ;
2005-11-09 01:52:26 +03:00
if ( ! ( dmt = dm_task_create ( DM_DEVICE_LIST_VERSIONS ) ) )
return_0 ;
2004-03-26 22:52:09 +03:00
2011-07-01 18:09:19 +04:00
if ( activation_checks ( ) & & ! dm_task_enable_checks ( dmt ) )
goto_out ;
2004-03-26 22:52:09 +03:00
if ( ! dm_task_run ( dmt ) ) {
2013-01-08 02:30:29 +04:00
log_debug_activation ( " Failed to get %s target version " , target_name ) ;
2004-03-26 22:52:09 +03:00
/* Assume this was because LIST_VERSIONS isn't supported */
2012-01-26 02:16:04 +04:00
* maj = 0 ;
* min = 0 ;
* patchlevel = 0 ;
r = 1 ;
goto out ;
2004-03-26 22:52:09 +03:00
}
target = dm_task_get_versions ( dmt ) ;
do {
last_target = target ;
if ( ! strcmp ( target_name , target - > name ) ) {
r = 1 ;
2005-12-20 00:01:39 +03:00
* maj = target - > version [ 0 ] ;
* min = target - > version [ 1 ] ;
* patchlevel = target - > version [ 2 ] ;
2004-03-26 22:52:09 +03:00
goto out ;
}
2010-12-20 16:37:26 +03:00
target = ( struct dm_versions * ) ( ( char * ) target + target - > next ) ;
2004-03-26 22:52:09 +03:00
} while ( last_target ! = target ) ;
out :
2012-08-07 21:47:33 +04:00
if ( r )
log_very_verbose ( " Found %s target "
" v% " PRIu32 " .% " PRIu32 " .% " PRIu32 " . " ,
target_name , * maj , * min , * patchlevel ) ;
2004-03-26 22:52:09 +03:00
dm_task_destroy ( dmt ) ;
return r ;
}
2011-11-11 20:41:37 +04:00
int lvm_dm_prefix_check ( int major , int minor , const char * prefix )
2011-11-11 19:11:08 +04:00
{
2019-03-13 14:59:59 +03:00
return dev_manager_check_prefix_dm_major_minor ( major , minor , prefix ) ;
2011-11-11 19:11:08 +04:00
}
2024-02-08 16:58:32 +03:00
/* Search modules.builtin file for built-in kernel module */
static int _check_modules_builtin ( struct cmd_context * cmd , const char * target )
{
FILE * fp ;
char * line = NULL ;
size_t len ;
int r = 0 ;
char path [ PATH_MAX ] ;
if ( dm_snprintf ( path , sizeof ( path ) , " %s/%s/modules.builtin " ,
MODULES_PATH , cmd - > kernel_vsn ) < 0 ) {
log_debug ( " Modules path %s/%s/modules.builtin is too long. " ,
MODULES_PATH , cmd - > kernel_vsn ) ;
return 0 ;
}
if ( ! ( fp = fopen ( path , " r " ) ) ) {
if ( errno ! = ENOENT )
log_sys_debug ( " fopen " , path ) ;
return 0 ;
}
while ( getline ( & line , & len , fp ) > 0 )
if ( strstr ( line , target ) ) {
log_debug ( " Found %s as built-in kernel module. " , target ) ;
r = 1 ;
break ;
}
free ( line ) ;
if ( fclose ( fp ) )
log_sys_debug ( " fclose " , path ) ;
return r ;
}
2009-02-28 03:54:06 +03:00
int module_present ( struct cmd_context * cmd , const char * target_name )
2005-10-17 22:00:02 +04:00
{
2008-04-07 14:23:47 +04:00
int ret = 0 ;
2005-10-19 17:59:18 +04:00
# ifdef MODPROBE_CMD
2005-10-17 22:00:02 +04:00
char module [ 128 ] ;
2014-09-23 18:47:27 +04:00
const char * argv [ ] = { MODPROBE_CMD , module , NULL } ;
2016-04-26 22:41:04 +03:00
# endif
struct stat st ;
char path [ PATH_MAX ] ;
2018-02-12 23:50:07 +03:00
int i = dm_snprintf ( path , sizeof ( path ) , " %smodule/dm_%s " ,
2016-04-26 22:41:04 +03:00
dm_sysfs_dir ( ) , target_name ) ;
if ( i > 0 ) {
2021-07-27 16:53:48 +03:00
while ( ( i > 0 ) & & path [ - - i ] ! = ' / ' ) /* stop on dm_ */
2016-04-26 22:41:04 +03:00
if ( path [ i ] = = ' - ' )
path [ i ] = ' _ ' ; /* replace '-' with '_' */
if ( ( lstat ( path , & st ) = = 0 ) & & S_ISDIR ( st . st_mode ) ) {
2016-04-27 01:01:08 +03:00
log_debug_activation ( " Module directory %s exists. " , path ) ;
2016-04-26 22:41:04 +03:00
return 1 ;
}
2024-02-08 16:58:32 +03:00
if ( path [ i ] = = ' / ' & & _check_modules_builtin ( cmd , path + i + 1 ) )
return 1 ;
2016-04-26 22:41:04 +03:00
}
2008-04-07 14:23:47 +04:00
2016-04-26 22:41:04 +03:00
# ifdef MODPROBE_CMD
2021-05-26 01:19:28 +03:00
if ( strcmp ( target_name , TARGET_NAME_VDO ) = = 0 )
argv [ 1 ] = MODULE_NAME_VDO ; /* ATM kvdo is without dm- prefix */
else if ( dm_snprintf ( module , sizeof ( module ) , " dm-%s " , target_name ) < 0 ) {
2008-04-07 14:23:47 +04:00
log_error ( " module_present module name too long: %s " ,
target_name ) ;
return 0 ;
}
2011-01-13 17:51:32 +03:00
ret = exec_cmd ( cmd , argv , NULL , 0 ) ;
2005-10-19 17:59:18 +04:00
# endif
2008-04-07 14:23:47 +04:00
return ret ;
}
2016-04-27 12:11:58 +03:00
int target_present_version ( struct cmd_context * cmd , const char * target_name ,
int use_modprobe ,
uint32_t * maj , uint32_t * min , uint32_t * patchlevel )
2008-04-07 14:23:47 +04:00
{
2016-05-06 14:59:50 +03:00
if ( ! activation ( ) ) {
log_error ( INTERNAL_ERROR " Target present version called when activation is disabled. " ) ;
return 0 ;
}
2005-10-17 22:00:02 +04:00
# ifdef MODPROBE_CMD
2005-11-09 01:52:26 +03:00
if ( use_modprobe ) {
2016-04-27 12:11:58 +03:00
if ( target_version ( target_name , maj , min , patchlevel ) )
2005-11-09 01:52:26 +03:00
return 1 ;
2005-10-17 22:00:02 +04:00
2009-02-28 03:54:06 +03:00
if ( ! module_present ( cmd , target_name ) )
2005-11-09 01:52:26 +03:00
return_0 ;
2005-10-17 22:00:02 +04:00
}
# endif
2016-04-27 12:11:58 +03:00
return target_version ( target_name , maj , min , patchlevel ) ;
}
int target_present ( struct cmd_context * cmd , const char * target_name ,
int use_modprobe )
{
uint32_t maj , min , patchlevel ;
2005-10-17 22:00:02 +04:00
2016-04-27 12:11:58 +03:00
return target_present_version ( cmd , target_name , use_modprobe ,
& maj , & min , & patchlevel ) ;
2005-10-17 22:00:02 +04:00
}
2021-12-15 13:24:31 +03:00
int get_device_list ( const struct volume_group * vg , struct dm_list * * devs ,
unsigned * devs_features )
{
if ( ! activation ( ) )
return 0 ;
return dev_manager_get_device_list ( NULL , devs , devs_features ) ;
}
2016-12-01 16:53:35 +03:00
/*
* When ' * info ' is NULL , returns 1 only when LV is active .
* When ' * info ' ! = NULL , returns 1 when info structure is populated .
*/
2014-11-04 17:00:32 +03:00
static int _lv_info ( struct cmd_context * cmd , const struct logical_volume * lv ,
2014-11-13 13:41:49 +03:00
int use_layer , struct lvinfo * info ,
2015-01-14 12:31:24 +03:00
const struct lv_segment * seg ,
struct lv_seg_status * seg_status ,
2019-10-31 13:45:28 +03:00
int with_open_count , int with_read_ahead , int with_name_check )
2001-11-07 14:51:42 +03:00
{
2003-01-09 01:44:07 +03:00
struct dm_info dminfo ;
2002-01-11 02:21:07 +03:00
2011-02-03 04:16:35 +03:00
/*
* If open_count info is requested and we have to be sure our own udev
* transactions are finished
* For non - clustered locking type we are only interested for non - delete operation
* in progress - as only those could lead to opened files
*/
if ( with_open_count ) {
2018-06-05 18:47:01 +03:00
if ( fs_has_non_delete_ops ( ) )
2011-02-03 04:16:35 +03:00
fs_unlock ( ) ; /* For non clustered - wait if there are non-delete ops */
}
2002-11-18 17:01:16 +03:00
2014-11-04 12:33:35 +03:00
/* New thin-pool has no layer, but -tpool suffix needs to be queried */
2015-01-30 18:22:11 +03:00
if ( ! use_layer & & lv_is_new_thin_pool ( lv ) ) {
/* Check if there isn't existing old thin pool mapping in the table */
2019-10-31 13:45:28 +03:00
if ( ! dev_manager_info ( cmd , lv , NULL , 0 , 0 , 0 , & dminfo , NULL , NULL ) )
2015-01-30 18:22:11 +03:00
return_0 ;
if ( ! dminfo . exists )
use_layer = 1 ;
}
2014-11-04 12:33:35 +03:00
2016-05-25 17:14:46 +03:00
if ( seg_status ) {
/* TODO: for now it's mess with seg_status */
2014-11-13 13:41:49 +03:00
seg_status - > seg = seg ;
2016-05-25 17:14:46 +03:00
}
2014-11-13 13:41:49 +03:00
2016-04-08 18:27:12 +03:00
if ( ! dev_manager_info ( cmd , lv ,
2013-02-01 14:09:34 +04:00
( use_layer ) ? lv_layer ( lv ) : NULL ,
2019-10-31 13:45:28 +03:00
with_open_count , with_read_ahead , with_name_check ,
& dminfo ,
( info ) ? & info - > read_ahead : NULL ,
2014-11-04 17:00:32 +03:00
seg_status ) )
2005-11-09 01:52:26 +03:00
return_0 ;
2002-01-11 02:21:07 +03:00
2013-09-19 16:05:55 +04:00
if ( ! info )
return dminfo . exists ;
2003-01-09 01:44:07 +03:00
info - > exists = dminfo . exists ;
info - > suspended = dminfo . suspended ;
info - > open_count = dminfo . open_count ;
info - > major = dminfo . major ;
info - > minor = dminfo . minor ;
info - > read_only = dminfo . read_only ;
2005-11-09 01:52:26 +03:00
info - > live_table = dminfo . live_table ;
info - > inactive_table = dminfo . inactive_table ;
2003-01-09 01:44:07 +03:00
2005-10-17 22:00:02 +04:00
return 1 ;
2002-02-26 14:49:17 +03:00
}
2002-02-11 20:42:02 +03:00
2014-11-04 17:00:32 +03:00
/*
* Returns 1 if info structure populated , else 0 on failure .
* When lvinfo * is NULL , it returns 1 if the device is locally active , 0 otherwise .
*/
int lv_info ( struct cmd_context * cmd , const struct logical_volume * lv , int use_layer ,
struct lvinfo * info , int with_open_count , int with_read_ahead )
{
if ( ! activation ( ) )
return 0 ;
2019-10-31 13:45:28 +03:00
return _lv_info ( cmd , lv , use_layer , info , NULL , NULL , with_open_count , with_read_ahead , 0 ) ;
}
int lv_info_with_name_check ( struct cmd_context * cmd , const struct logical_volume * lv ,
int use_layer , struct lvinfo * info )
{
if ( ! activation ( ) )
return 0 ;
return _lv_info ( cmd , lv , use_layer , info , NULL , NULL , 0 , 0 , 1 ) ;
2014-11-04 17:00:32 +03:00
}
2014-11-13 13:41:49 +03:00
/*
2016-12-05 16:31:25 +03:00
* Returns 1 if lv_with_info_and_seg_status info structure populated ,
2014-11-13 13:41:49 +03:00
* else 0 on failure or if device not active locally .
*
2016-12-05 12:20:42 +03:00
* When seg_status parsing had troubles it will set type to SEG_STATUS_UNKNOWN .
*
2016-12-05 16:31:25 +03:00
* Using usually one ioctl to obtain info and status .
* More complex segment do collect info from one device ,
* but status from another device .
*
* TODO : further improve with more statuses ( i . e . snapshot ' s origin / merge )
2014-11-13 13:41:49 +03:00
*/
2016-12-05 16:31:25 +03:00
int lv_info_with_seg_status ( struct cmd_context * cmd ,
const struct lv_segment * lv_seg ,
2015-01-14 12:31:24 +03:00
struct lv_with_info_and_seg_status * status ,
int with_open_count , int with_read_ahead )
2014-11-04 17:00:32 +03:00
{
2016-12-05 17:23:18 +03:00
const struct logical_volume * olv , * lv = status - > lv = lv_seg - > lv ;
2016-12-05 16:31:25 +03:00
2014-11-04 17:00:32 +03:00
if ( ! activation ( ) )
return 0 ;
2016-12-05 12:20:42 +03:00
if ( lv_is_used_cache_pool ( lv ) ) {
/* INFO is not set as cache-pool cannot be active.
* STATUS is collected from cache LV */
2017-06-27 01:24:34 +03:00
if ( ! ( lv_seg = get_only_segment_using_this_lv ( lv ) ) )
return_0 ;
2019-10-31 13:45:28 +03:00
( void ) _lv_info ( cmd , lv_seg - > lv , 1 , NULL , lv_seg , & status - > seg_status , 0 , 0 , 0 ) ;
2016-12-05 12:20:42 +03:00
return 1 ;
}
if ( lv_is_thin_pool ( lv ) ) {
/* Always collect status for '-tpool' */
2019-10-31 13:45:28 +03:00
if ( _lv_info ( cmd , lv , 1 , & status - > info , lv_seg , & status - > seg_status , 0 , 0 , 0 ) & &
2016-12-05 12:20:42 +03:00
( status - > seg_status . type = = SEG_STATUS_THIN_POOL ) ) {
/* There is -tpool device, but query 'active' state of 'fake' thin-pool */
2019-10-31 13:45:28 +03:00
if ( ! _lv_info ( cmd , lv , 0 , NULL , NULL , NULL , 0 , 0 , 0 ) & &
2016-12-05 12:20:42 +03:00
! status - > seg_status . thin_pool - > needs_check )
status - > info . exists = 0 ; /* So pool LV is not active */
}
return 1 ;
2017-07-19 17:16:12 +03:00
}
if ( lv_is_external_origin ( lv ) ) {
2016-12-17 23:54:51 +03:00
if ( ! _lv_info ( cmd , lv , 0 , & status - > info , NULL , NULL ,
2019-10-31 13:45:28 +03:00
with_open_count , with_read_ahead , 0 ) )
2016-12-17 23:54:51 +03:00
return_0 ;
2019-10-31 13:45:28 +03:00
( void ) _lv_info ( cmd , lv , 1 , NULL , lv_seg , & status - > seg_status , 0 , 0 , 0 ) ;
2016-12-17 23:54:51 +03:00
return 1 ;
2017-07-19 17:16:12 +03:00
}
if ( lv_is_origin ( lv ) ) {
2016-12-05 12:20:42 +03:00
/* Query segment status for 'layered' (-real) device most of the time,
* only for merging snapshot , query its progress .
* TODO : single LV may need couple status to be exposed at once . . . .
* but this needs more logical background
*/
2016-12-05 17:23:18 +03:00
/* Show INFO for actual origin and grab status for merging origin */
if ( ! _lv_info ( cmd , lv , 0 , & status - > info , lv_seg ,
lv_is_merging_origin ( lv ) ? & status - > seg_status : NULL ,
2019-10-31 13:45:28 +03:00
with_open_count , with_read_ahead , 0 ) )
2016-12-05 12:20:42 +03:00
return_0 ;
2016-12-05 17:23:18 +03:00
if ( status - > info . exists & &
( status - > seg_status . type ! = SEG_STATUS_SNAPSHOT ) ) /* Not merging */
2016-12-05 12:20:42 +03:00
/* Grab STATUS from layered -real */
2019-10-31 13:45:28 +03:00
( void ) _lv_info ( cmd , lv , 1 , NULL , lv_seg , & status - > seg_status , 0 , 0 , 0 ) ;
2016-12-05 12:20:42 +03:00
return 1 ;
2017-07-19 17:16:12 +03:00
}
if ( lv_is_cow ( lv ) ) {
2016-12-05 17:23:18 +03:00
if ( lv_is_merging_cow ( lv ) ) {
olv = origin_from_cow ( lv ) ;
if ( ! _lv_info ( cmd , olv , 0 , & status - > info , first_seg ( olv ) , & status - > seg_status ,
2019-10-31 13:45:28 +03:00
with_open_count , with_read_ahead , 0 ) )
2016-12-05 17:23:18 +03:00
return_0 ;
2019-10-26 00:30:26 +03:00
if ( status - > seg_status . type = = SEG_STATUS_SNAPSHOT | |
( lv_is_thin_volume ( olv ) & & ( status - > seg_status . type = = SEG_STATUS_THIN ) ) ) {
2016-12-05 17:23:18 +03:00
log_debug_activation ( " Snapshot merge is in progress, querying status of %s instead. " ,
display_lvname ( lv ) ) ;
/*
* When merge is in progress , query merging origin LV instead .
* COW volume is already mapped as error target in this case .
*/
return 1 ;
}
/* Merge not yet started, still a snapshot... */
}
/* Hadle fictional lvm2 snapshot and query snapshotX volume */
lv_seg = find_snapshot ( lv ) ;
2016-12-05 12:20:42 +03:00
}
2018-07-05 00:17:38 +03:00
if ( lv_is_vdo ( lv ) ) {
if ( ! _lv_info ( cmd , lv , 0 , & status - > info , NULL , NULL ,
2019-10-31 13:45:28 +03:00
with_open_count , with_read_ahead , 0 ) )
2018-07-05 00:17:38 +03:00
return_0 ;
if ( status - > info . exists ) {
/* Status for VDO pool */
( void ) _lv_info ( cmd , seg_lv ( lv_seg , 0 ) , 1 , NULL ,
first_seg ( seg_lv ( lv_seg , 0 ) ) ,
2019-10-31 13:45:28 +03:00
& status - > seg_status , 0 , 0 , 0 ) ;
2018-07-05 00:17:38 +03:00
/* Use VDO pool segtype result for VDO segtype */
status - > seg_status . seg = lv_seg ;
}
return 1 ;
}
2019-09-14 02:13:33 +03:00
if ( lv_is_vdo_pool ( lv ) ) {
/* Always collect status for '-vpool' */
2019-10-31 13:45:28 +03:00
if ( _lv_info ( cmd , lv , 1 , & status - > info , lv_seg , & status - > seg_status , 0 , 0 , 0 ) & &
2019-09-14 02:13:33 +03:00
( status - > seg_status . type = = SEG_STATUS_VDO_POOL ) ) {
/* There is -tpool device, but query 'active' state of 'fake' vdo-pool */
2019-10-31 13:45:28 +03:00
if ( ! _lv_info ( cmd , lv , 0 , NULL , NULL , NULL , 0 , 0 , 0 ) )
2019-09-14 02:13:33 +03:00
status - > info . exists = 0 ; /* So VDO pool LV is not active */
}
return 1 ;
}
2016-12-05 16:31:25 +03:00
return _lv_info ( cmd , lv , 0 , & status - > info , lv_seg , & status - > seg_status ,
2019-10-31 13:45:28 +03:00
with_open_count , with_read_ahead , 0 ) ;
2014-11-04 17:00:32 +03:00
}
2013-10-15 14:44:42 +04:00
# define OPEN_COUNT_CHECK_RETRIES 25
# define OPEN_COUNT_CHECK_USLEEP_DELAY 200000
2016-04-22 00:14:10 +03:00
/* Only report error if error_if_used is set */
2021-03-07 03:54:50 +03:00
/* Returns 0 if in use, 1 if it is unused, 2 when it is not present in table */
2016-04-22 00:14:10 +03:00
int lv_check_not_in_use ( const struct logical_volume * lv , int error_if_used )
2011-09-22 21:33:50 +04:00
{
2014-09-24 12:05:26 +04:00
struct lvinfo info ;
2013-10-15 14:44:42 +04:00
unsigned int open_count_check_retries ;
2021-03-10 16:05:03 +03:00
if ( ! lv_info ( lv - > vg - > cmd , lv , 0 , & info , 1 , 0 ) | | ! info . exists )
return 2 ;
else if ( ! info . open_count )
return 1 ;
2011-09-22 21:33:50 +04:00
/* If sysfs is not used, use open_count information only. */
2013-10-15 14:44:42 +04:00
if ( dm_sysfs_dir ( ) ) {
2014-09-24 12:05:26 +04:00
if ( dm_device_has_holders ( info . major , info . minor ) ) {
2016-04-22 00:14:10 +03:00
if ( error_if_used )
log_error ( " Logical volume %s is used by another device. " ,
display_lvname ( lv ) ) ;
else
log_debug_activation ( " Logical volume %s is used by another device. " ,
display_lvname ( lv ) ) ;
2011-09-26 14:17:51 +04:00
return 0 ;
}
2014-09-24 12:05:26 +04:00
if ( dm_device_has_mounted_fs ( info . major , info . minor ) ) {
2016-04-22 00:14:10 +03:00
if ( error_if_used )
log_error ( " Logical volume %s contains a filesystem in use. " ,
display_lvname ( lv ) ) ;
else
log_debug_activation ( " Logical volume %s contains a filesystem in use. " ,
display_lvname ( lv ) ) ;
2013-10-15 14:44:42 +04:00
return 0 ;
}
2011-09-22 21:33:50 +04:00
}
2013-10-15 14:44:42 +04:00
open_count_check_retries = retry_deactivation ( ) ? OPEN_COUNT_CHECK_RETRIES : 1 ;
2021-03-11 22:50:39 +03:00
while ( open_count_check_retries - - ) {
if ( interruptible_usleep ( OPEN_COUNT_CHECK_USLEEP_DELAY ) )
break ; /* interrupted */
2014-05-27 19:07:04 +04:00
2015-11-25 18:06:31 +03:00
log_debug_activation ( " Retrying open_count check for %s. " ,
display_lvname ( lv ) ) ;
2021-03-11 22:50:39 +03:00
if ( ! lv_info ( lv - > vg - > cmd , lv , 0 , & info , 1 , 0 ) | | ! info . exists ) {
2014-05-27 19:07:04 +04:00
stack ; /* device dissappeared? */
2021-03-11 22:50:39 +03:00
return 1 ;
} else if ( ! info . open_count )
return 1 ;
2011-09-22 21:33:50 +04:00
}
2021-03-11 22:50:39 +03:00
if ( error_if_used )
log_error ( " Logical volume %s in use. " , display_lvname ( lv ) ) ;
else
log_debug_activation ( " Logical volume %s in use. " , display_lvname ( lv ) ) ;
return 0 ;
2011-09-22 21:33:50 +04:00
}
2010-05-24 19:32:20 +04:00
/*
* Returns 1 if percent set , else 0 on failure .
*/
int lv_check_transient ( struct logical_volume * lv )
{
int r ;
struct dev_manager * dm ;
if ( ! activation ( ) )
return 0 ;
2015-11-25 18:06:31 +03:00
log_debug_activation ( " Checking transient status for LV %s. " ,
display_lvname ( lv ) ) ;
2011-06-14 02:28:04 +04:00
2011-06-11 04:03:06 +04:00
if ( ! ( dm = dev_manager_create ( lv - > vg - > cmd , lv - > vg - > name , 1 ) ) )
2010-05-24 19:32:20 +04:00
return_0 ;
if ( ! ( r = dev_manager_transient ( dm , lv ) ) )
stack ;
dev_manager_destroy ( dm ) ;
return r ;
}
2002-05-10 01:17:57 +04:00
/*
* Returns 1 if percent set , else 0 on failure .
*/
2014-06-09 14:08:27 +04:00
int lv_snapshot_percent ( const struct logical_volume * lv , dm_percent_t * percent )
2002-05-10 01:17:57 +04:00
{
int r ;
struct dev_manager * dm ;
2013-09-20 00:18:16 +04:00
if ( ! lv_info ( lv - > vg - > cmd , lv , 0 , NULL , 0 , 0 ) )
2002-11-18 17:01:16 +03:00
return 0 ;
2015-11-25 18:06:31 +03:00
log_debug_activation ( " Checking snapshot percent for LV %s. " ,
display_lvname ( lv ) ) ;
2011-06-14 02:28:04 +04:00
2011-06-11 04:03:06 +04:00
if ( ! ( dm = dev_manager_create ( lv - > vg - > cmd , lv - > vg - > name , 1 ) ) )
2005-11-09 01:52:26 +03:00
return_0 ;
2002-05-10 01:17:57 +04:00
2010-11-30 14:53:31 +03:00
if ( ! ( r = dev_manager_snapshot_percent ( dm , lv , percent ) ) )
2002-05-10 01:17:57 +04:00
stack ;
2002-05-22 18:03:45 +04:00
2002-05-10 01:17:57 +04:00
dev_manager_destroy ( dm ) ;
return r ;
}
2003-04-30 19:26:25 +04:00
/* FIXME Merge with snapshot_percent */
2011-02-18 17:47:28 +03:00
int lv_mirror_percent ( struct cmd_context * cmd , const struct logical_volume * lv ,
2014-06-09 14:08:27 +04:00
int wait , dm_percent_t * percent , uint32_t * event_nr )
2003-04-30 19:26:25 +04:00
{
int r ;
struct dev_manager * dm ;
2008-01-16 22:18:51 +03:00
/* If mirrored LV is temporarily shrinked to 1 area (= linear),
* it should be considered in - sync . */
2008-11-04 01:14:30 +03:00
if ( dm_list_size ( & lv - > segments ) = = 1 & & first_seg ( lv ) - > area_count = = 1 ) {
2014-06-09 14:08:27 +04:00
* percent = DM_PERCENT_100 ;
2008-01-16 22:18:51 +03:00
return 1 ;
}
2013-09-20 00:18:16 +04:00
if ( ! lv_info ( cmd , lv , 0 , NULL , 0 , 0 ) )
2003-04-30 19:26:25 +04:00
return 0 ;
2015-11-25 18:06:31 +03:00
log_debug_activation ( " Checking mirror percent for LV %s. " ,
display_lvname ( lv ) ) ;
2011-06-14 02:28:04 +04:00
2011-06-11 04:03:06 +04:00
if ( ! ( dm = dev_manager_create ( lv - > vg - > cmd , lv - > vg - > name , 1 ) ) )
2005-11-09 01:52:26 +03:00
return_0 ;
2003-04-30 19:26:25 +04:00
2010-11-30 14:53:31 +03:00
if ( ! ( r = dev_manager_mirror_percent ( dm , lv , wait , percent , event_nr ) ) )
2003-04-30 19:26:25 +04:00
stack ;
dev_manager_destroy ( dm ) ;
return r ;
}
2014-06-09 14:08:27 +04:00
int lv_raid_percent ( const struct logical_volume * lv , dm_percent_t * percent )
2011-08-11 22:24:40 +04:00
{
return lv_mirror_percent ( lv - > vg - > cmd , lv , 0 , percent , NULL ) ;
}
2017-02-24 02:50:00 +03:00
int lv_raid_data_offset ( const struct logical_volume * lv , uint64_t * data_offset )
{
2021-03-17 11:50:09 +03:00
struct lv_status_raid * raid_status ;
2017-02-24 02:50:00 +03:00
if ( ! lv_info ( lv - > vg - > cmd , lv , 0 , NULL , 0 , 0 ) )
return 0 ;
log_debug_activation ( " Checking raid data offset and dev sectors for LV %s/%s " ,
lv - > vg - > name , lv - > name ) ;
2021-03-17 11:50:09 +03:00
if ( ! lv_raid_status ( lv , & raid_status ) )
return_0 ;
2017-02-24 02:50:00 +03:00
2021-03-17 11:50:09 +03:00
* data_offset = raid_status - > raid - > data_offset ;
2017-02-24 02:50:00 +03:00
2021-03-17 11:50:09 +03:00
dm_pool_destroy ( raid_status - > mem ) ;
2017-02-24 02:50:00 +03:00
2021-03-17 11:50:09 +03:00
return 1 ;
2017-02-24 02:50:00 +03:00
}
2013-02-01 21:31:47 +04:00
int lv_raid_dev_health ( const struct logical_volume * lv , char * * dev_health )
{
2021-03-17 11:50:09 +03:00
int r = 1 ;
struct lv_status_raid * raid_status ;
2013-02-01 21:32:18 +04:00
2013-02-01 21:31:47 +04:00
* dev_health = NULL ;
2013-09-20 00:18:16 +04:00
if ( ! lv_info ( lv - > vg - > cmd , lv , 0 , NULL , 0 , 0 ) )
return 0 ;
2013-02-01 21:31:47 +04:00
2015-11-25 18:06:31 +03:00
log_debug_activation ( " Checking raid device health for LV %s. " ,
display_lvname ( lv ) ) ;
2013-02-01 21:31:47 +04:00
2021-03-17 11:50:09 +03:00
if ( ! lv_raid_status ( lv , & raid_status ) )
return_0 ;
2013-02-01 21:31:47 +04:00
2021-03-17 11:50:09 +03:00
if ( ! ( * dev_health = dm_pool_strdup ( lv - > vg - > cmd - > mem ,
raid_status - > raid - > dev_health ) ) ) {
stack ;
r = 0 ;
2013-04-09 00:04:08 +04:00
}
2013-02-01 21:31:47 +04:00
2021-03-17 11:50:09 +03:00
dm_pool_destroy ( raid_status - > mem ) ;
2013-02-01 21:31:47 +04:00
return r ;
}
2017-02-24 02:50:00 +03:00
int lv_raid_dev_count ( const struct logical_volume * lv , uint32_t * dev_cnt )
{
2021-03-17 11:50:09 +03:00
struct lv_status_raid * raid_status ;
2017-02-24 02:50:00 +03:00
* dev_cnt = 0 ;
if ( ! lv_info ( lv - > vg - > cmd , lv , 0 , NULL , 0 , 0 ) )
return 0 ;
log_debug_activation ( " Checking raid device count for LV %s/%s " ,
lv - > vg - > name , lv - > name ) ;
2021-03-17 11:50:09 +03:00
if ( ! lv_raid_status ( lv , & raid_status ) )
2017-02-24 02:50:00 +03:00
return_0 ;
2021-03-17 11:50:09 +03:00
* dev_cnt = raid_status - > raid - > dev_count ;
dm_pool_destroy ( raid_status - > mem ) ;
2017-02-24 02:50:00 +03:00
return 1 ;
}
2013-04-12 00:33:59 +04:00
int lv_raid_mismatch_count ( const struct logical_volume * lv , uint64_t * cnt )
{
2021-03-17 11:50:09 +03:00
struct lv_status_raid * raid_status ;
2013-04-12 00:33:59 +04:00
* cnt = 0 ;
2013-09-20 00:18:16 +04:00
if ( ! lv_info ( lv - > vg - > cmd , lv , 0 , NULL , 0 , 0 ) )
2013-04-12 00:33:59 +04:00
return 0 ;
2015-11-25 18:06:31 +03:00
log_debug_activation ( " Checking raid mismatch count for LV %s. " ,
display_lvname ( lv ) ) ;
2013-04-12 00:33:59 +04:00
2021-03-17 11:50:09 +03:00
if ( ! lv_raid_status ( lv , & raid_status ) )
2013-04-12 00:33:59 +04:00
return_0 ;
2021-03-17 11:50:09 +03:00
* cnt = raid_status - > raid - > mismatch_count ;
2013-04-12 00:33:59 +04:00
2021-03-17 11:50:09 +03:00
dm_pool_destroy ( raid_status - > mem ) ;
2013-04-12 00:33:59 +04:00
return 1 ;
}
int lv_raid_sync_action ( const struct logical_volume * lv , char * * sync_action )
{
2021-03-17 11:50:09 +03:00
struct lv_status_raid * raid_status ;
int r = 1 ;
2013-04-12 00:33:59 +04:00
* sync_action = NULL ;
2013-09-20 00:18:16 +04:00
if ( ! lv_info ( lv - > vg - > cmd , lv , 0 , NULL , 0 , 0 ) )
2013-04-12 00:33:59 +04:00
return 0 ;
2015-11-25 18:06:31 +03:00
log_debug_activation ( " Checking raid sync_action for LV %s. " ,
display_lvname ( lv ) ) ;
2013-04-12 00:33:59 +04:00
2021-03-17 11:50:09 +03:00
if ( ! lv_raid_status ( lv , & raid_status ) )
2013-04-12 00:33:59 +04:00
return_0 ;
2013-07-19 19:01:48 +04:00
/* status->sync_action can be NULL if dm-raid version < 1.5.0 */
2021-03-17 11:50:09 +03:00
if ( ! raid_status - > raid - > sync_action | |
! ( * sync_action = dm_pool_strdup ( lv - > vg - > cmd - > mem ,
raid_status - > raid - > sync_action ) ) ) {
stack ;
r = 0 ;
2013-04-12 00:33:59 +04:00
}
2021-03-17 11:50:09 +03:00
dm_pool_destroy ( raid_status - > mem ) ;
2013-04-12 00:33:59 +04:00
2021-03-17 11:50:09 +03:00
return r ;
2013-04-12 00:33:59 +04:00
}
int lv_raid_message ( const struct logical_volume * lv , const char * msg )
{
2021-03-17 11:50:09 +03:00
struct lv_status_raid * raid_status ;
struct dev_manager * dm = NULL ;
2013-04-12 00:33:59 +04:00
int r = 0 ;
2013-09-20 07:33:01 +04:00
if ( ! seg_is_raid ( first_seg ( lv ) ) ) {
2013-10-15 00:14:16 +04:00
/*
* Make it easier for user to know what to do when
* they are using thinpool .
*/
if ( lv_is_thin_pool ( lv ) & &
( lv_is_raid ( seg_lv ( first_seg ( lv ) , 0 ) ) | |
lv_is_raid ( first_seg ( lv ) - > metadata_lv ) ) ) {
2013-11-13 17:56:29 +04:00
log_error ( " Thin pool data or metadata volume "
2015-11-25 18:06:31 +03:00
" must be specified. (E.g. \" %s_tdata \" ) " ,
display_lvname ( lv ) ) ;
2013-10-15 00:14:16 +04:00
return 0 ;
}
2015-11-25 18:06:31 +03:00
log_error ( " %s must be a RAID logical volume to perform this action. " ,
display_lvname ( lv ) ) ;
2013-09-20 07:33:01 +04:00
return 0 ;
}
2013-09-20 00:18:16 +04:00
if ( ! lv_info ( lv - > vg - > cmd , lv , 0 , NULL , 0 , 0 ) ) {
2013-04-12 00:33:59 +04:00
log_error ( " Unable to send message to an inactive logical volume. " ) ;
return 0 ;
}
2021-03-17 11:50:09 +03:00
if ( ! lv_raid_status ( lv , & raid_status ) )
2013-04-12 00:33:59 +04:00
return_0 ;
2021-03-17 11:50:09 +03:00
if ( ! raid_status - > raid - > sync_action ) {
2013-04-12 00:33:59 +04:00
log_error ( " Kernel driver does not support this action: %s " , msg ) ;
goto out ;
}
/*
* Note that ' dev_manager_raid_message ' allows us to pass down any
* currently valid message . However , this function restricts the
* number of user available combinations to a minimum . Specifically ,
* " idle " - > " check "
* " idle " - > " repair "
* ( The state automatically switches to " idle " when a sync process is
* complete . )
*/
if ( strcmp ( msg , " check " ) & & strcmp ( msg , " repair " ) ) {
/*
* MD allows " frozen " to operate in a toggling fashion .
* We could allow this if we like . . .
*/
log_error ( " \" %s \" is not a supported sync operation. " , msg ) ;
goto out ;
}
2021-03-17 11:50:09 +03:00
if ( strcmp ( raid_status - > raid - > sync_action , " idle " ) ) {
2015-11-25 18:06:31 +03:00
log_error ( " %s state is currently \" %s \" . Unable to switch to \" %s \" . " ,
2021-03-17 11:50:09 +03:00
display_lvname ( lv ) , raid_status - > raid - > sync_action , msg ) ;
2013-04-12 00:33:59 +04:00
goto out ;
}
2021-03-17 11:50:09 +03:00
if ( ! ( dm = dev_manager_create ( lv - > vg - > cmd , lv - > vg - > name , 1 ) ) )
return_0 ;
2013-04-12 00:33:59 +04:00
r = dev_manager_raid_message ( dm , lv , msg ) ;
out :
2021-03-17 11:50:09 +03:00
if ( dm )
dev_manager_destroy ( dm ) ;
dm_pool_destroy ( raid_status - > mem ) ;
2013-04-12 00:33:59 +04:00
return r ;
}
2021-03-17 11:50:09 +03:00
int lv_raid_status ( const struct logical_volume * lv , struct lv_status_raid * * status )
{
struct dev_manager * dm ;
int exists ;
if ( ! ( dm = dev_manager_create ( lv - > vg - > cmd , lv - > vg - > name , 1 ) ) )
return_0 ;
if ( ! dev_manager_raid_status ( dm , lv , status , & exists ) ) {
dev_manager_destroy ( dm ) ;
if ( exists )
stack ;
return 0 ;
}
/* User has to call dm_pool_destroy(status->mem)! */
return 1 ;
}
2018-08-27 22:53:09 +03:00
int lv_writecache_message ( const struct logical_volume * lv , const char * msg )
{
int r = 0 ;
struct dev_manager * dm ;
if ( ! lv_info ( lv - > vg - > cmd , lv , 0 , NULL , 0 , 0 ) ) {
log_error ( " Unable to send message to an inactive logical volume. " ) ;
return 0 ;
}
if ( ! ( dm = dev_manager_create ( lv - > vg - > cmd , lv - > vg - > name , 1 ) ) )
return_0 ;
r = dev_manager_writecache_message ( dm , lv , msg ) ;
dev_manager_destroy ( dm ) ;
return r ;
}
2014-11-03 14:52:29 +03:00
/*
* Return dm_status_cache for cache volume , accept also cache pool
*
* As there are too many variable for cache volumes , and it hard
* to make good API - so let ' s obtain dm_status_cache and return
* all info we have - user just has to release struct after its use .
*/
int lv_cache_status ( const struct logical_volume * cache_lv ,
struct lv_status_cache * * status )
2014-01-28 22:24:51 +04:00
{
struct dev_manager * dm ;
struct lv_segment * cache_seg ;
2021-03-17 13:20:23 +03:00
int exists ;
2014-01-28 22:24:51 +04:00
2016-05-25 17:27:12 +03:00
if ( lv_is_cache_pool ( cache_lv ) ) {
if ( dm_list_empty ( & cache_lv - > segs_using_this_lv ) | |
! ( cache_seg = get_only_segment_using_this_lv ( cache_lv ) ) ) {
log_error ( INTERNAL_ERROR " Cannot check status for unused cache pool %s. " ,
display_lvname ( cache_lv ) ) ;
return 0 ;
}
2014-01-28 22:24:51 +04:00
cache_lv = cache_seg - > lv ;
}
2016-05-25 17:27:12 +03:00
if ( lv_is_pending_delete ( cache_lv ) ) {
log_error ( " Cannot check status for deleted cache volume %s. " ,
display_lvname ( cache_lv ) ) ;
2014-11-11 13:00:35 +03:00
return 0 ;
2016-05-25 17:27:12 +03:00
}
2014-11-11 13:00:35 +03:00
2014-01-28 22:24:51 +04:00
if ( ! ( dm = dev_manager_create ( cache_lv - > vg - > cmd , cache_lv - > vg - > name , 1 ) ) )
return_0 ;
2021-03-17 13:20:23 +03:00
if ( ! dev_manager_cache_status ( dm , cache_lv , status , & exists ) ) {
2014-01-28 22:24:51 +04:00
dev_manager_destroy ( dm ) ;
2021-03-17 13:20:23 +03:00
if ( exists )
stack ;
return 0 ;
2014-01-28 22:24:51 +04:00
}
2014-11-03 14:52:29 +03:00
/* User has to call dm_pool_destroy(status->mem)! */
2014-01-28 22:24:51 +04:00
return 1 ;
}
2020-09-27 02:11:47 +03:00
int lv_thin_pool_status ( const struct logical_volume * lv , int flush ,
struct lv_status_thin_pool * * thin_pool_status )
2011-12-21 17:10:05 +04:00
{
struct dev_manager * dm ;
2021-03-17 13:20:23 +03:00
int exists ;
2011-12-21 17:10:05 +04:00
if ( ! ( dm = dev_manager_create ( lv - > vg - > cmd , lv - > vg - > name , 1 ) ) )
return_0 ;
2021-03-17 13:20:23 +03:00
if ( ! dev_manager_thin_pool_status ( dm , lv , flush , thin_pool_status , & exists ) ) {
2020-09-27 02:11:47 +03:00
dev_manager_destroy ( dm ) ;
2021-03-17 13:20:23 +03:00
if ( exists )
stack ;
return 0 ;
2020-09-27 02:11:47 +03:00
}
2011-12-21 17:10:05 +04:00
2020-09-27 02:11:47 +03:00
/* User has to call dm_pool_destroy(thin_pool_status->mem)! */
2011-12-21 17:10:05 +04:00
2020-09-27 02:11:47 +03:00
return 1 ;
2011-12-21 17:10:05 +04:00
}
2020-09-27 02:11:47 +03:00
int lv_thin_status ( const struct logical_volume * lv , int flush ,
struct lv_status_thin * * thin_status )
2012-01-19 19:27:54 +04:00
{
struct dev_manager * dm ;
2021-03-17 13:20:23 +03:00
int exists ;
2012-01-19 19:27:54 +04:00
if ( ! ( dm = dev_manager_create ( lv - > vg - > cmd , lv - > vg - > name , 1 ) ) )
return_0 ;
2021-03-17 13:20:23 +03:00
if ( ! dev_manager_thin_status ( dm , lv , flush , thin_status , & exists ) ) {
2020-09-27 02:11:47 +03:00
dev_manager_destroy ( dm ) ;
2021-03-17 13:20:23 +03:00
if ( exists )
stack ;
return 0 ;
2020-09-27 02:11:47 +03:00
}
2012-01-25 12:48:42 +04:00
2020-09-27 02:11:47 +03:00
/* User has to call dm_pool_destroy(thin_status->mem)! */
2012-01-25 12:48:42 +04:00
2020-09-27 02:11:47 +03:00
return 1 ;
2012-01-25 12:48:42 +04:00
}
2013-12-04 16:57:27 +04:00
int lv_thin_device_id ( const struct logical_volume * lv , uint32_t * device_id )
{
struct dev_manager * dm ;
2021-03-17 13:20:23 +03:00
int exists ;
int r ;
2013-12-04 16:57:27 +04:00
if ( ! ( dm = dev_manager_create ( lv - > vg - > cmd , lv - > vg - > name , 1 ) ) )
return_0 ;
2021-03-17 13:20:23 +03:00
if ( ! ( r = dev_manager_thin_device_id ( dm , lv , device_id , & exists ) ) )
if ( exists )
stack ;
2013-12-04 16:57:27 +04:00
dev_manager_destroy ( dm ) ;
return r ;
}
2018-06-29 12:15:54 +03:00
/*
* lv_vdo_pool_status obtains status information about VDO pool
*
* If the ' params ' string has been already retrieved , use it .
* If the mempool already exists , use it .
*
*/
int lv_vdo_pool_status ( const struct logical_volume * lv , int flush ,
struct lv_status_vdo * * vdo_status )
{
struct dev_manager * dm ;
2021-03-17 13:20:23 +03:00
int exists ;
2018-06-29 12:15:54 +03:00
if ( ! ( dm = dev_manager_create ( lv - > vg - > cmd , lv - > vg - > name , ! lv_is_pvmove ( lv ) ) ) )
return_0 ;
2021-03-17 13:20:23 +03:00
if ( ! dev_manager_vdo_pool_status ( dm , lv , flush , vdo_status , & exists ) ) {
dev_manager_destroy ( dm ) ;
if ( exists )
stack ;
return 0 ;
}
2018-06-29 12:15:54 +03:00
2019-01-21 02:48:05 +03:00
/* User has to call dm_pool_destroy(vdo_status->mem) */
2018-06-29 12:15:54 +03:00
2021-03-17 13:20:23 +03:00
return 1 ;
2018-06-29 12:15:54 +03:00
}
2019-01-21 02:24:30 +03:00
int lv_vdo_pool_percent ( const struct logical_volume * lv , dm_percent_t * percent )
{
struct lv_status_vdo * vdo_status ;
if ( ! lv_vdo_pool_status ( lv , 0 , & vdo_status ) )
return_0 ;
* percent = vdo_status - > usage ;
dm_pool_destroy ( vdo_status - > mem ) ;
return 1 ;
}
2023-01-15 23:24:28 +03:00
/*
* lv_vdo_pool_size_config obtains size configuration from active VDO table line
*
* If the ' params ' string has been already retrieved , use it .
* If the mempool already exists , use it .
*
*/
int lv_vdo_pool_size_config ( const struct logical_volume * lv ,
struct vdo_pool_size_config * cfg )
{
struct dev_manager * dm ;
int r ;
if ( ! lv_info ( lv - > vg - > cmd , lv , 1 , NULL , 0 , 0 ) )
return 1 ; /* Inactive VDO pool -> no runtime config */
if ( ! ( dm = dev_manager_create ( lv - > vg - > cmd , lv - > vg - > name , ! lv_is_pvmove ( lv ) ) ) )
return_0 ;
r = dev_manager_vdo_pool_size_config ( dm , lv , cfg ) ;
dev_manager_destroy ( dm ) ;
return r ;
}
2012-02-24 02:41:57 +04:00
static int _lv_active ( struct cmd_context * cmd , const struct logical_volume * lv )
2001-11-07 18:02:07 +03:00
{
2003-01-09 01:44:07 +03:00
struct lvinfo info ;
2001-11-07 18:02:07 +03:00
2010-08-17 20:25:32 +04:00
if ( ! lv_info ( cmd , lv , 0 , & info , 0 , 0 ) ) {
2016-06-23 00:04:53 +03:00
log_debug ( " Cannot determine activation status of %s%s. " ,
display_lvname ( lv ) ,
activation ( ) ? " " : " (no device driver) " ) ;
return 0 ;
2001-11-07 18:02:07 +03:00
}
2002-02-26 14:49:17 +03:00
return info . exists ;
2001-11-07 18:02:07 +03:00
}
2014-09-22 17:50:07 +04:00
static int _lv_open_count ( struct cmd_context * cmd , const struct logical_volume * lv )
2002-02-18 18:52:48 +03:00
{
2003-01-09 01:44:07 +03:00
struct lvinfo info ;
2002-02-18 18:52:48 +03:00
2010-08-17 20:25:32 +04:00
if ( ! lv_info ( cmd , lv , 0 , & info , 1 , 0 ) ) {
2002-02-18 18:52:48 +03:00
stack ;
2002-02-26 14:49:17 +03:00
return - 1 ;
2002-02-18 18:52:48 +03:00
}
2002-02-26 14:49:17 +03:00
return info . open_count ;
2002-02-18 18:52:48 +03:00
}
2014-09-22 17:50:07 +04:00
static int _lv_activate_lv ( const struct logical_volume * lv , struct lv_activate_opts * laopts )
2001-10-09 20:05:34 +04:00
{
2002-02-11 18:48:34 +03:00
int r ;
2002-02-26 14:49:17 +03:00
struct dev_manager * dm ;
2001-10-09 20:05:34 +04:00
2014-09-16 00:33:53 +04:00
if ( ! ( dm = dev_manager_create ( lv - > vg - > cmd , lv - > vg - > name , ! lv_is_pvmove ( lv ) ) ) )
2005-11-09 01:52:26 +03:00
return_0 ;
2001-11-02 16:45:05 +03:00
2011-06-17 18:14:19 +04:00
if ( ! ( r = dev_manager_activate ( dm , lv , laopts ) ) )
2002-02-11 18:48:34 +03:00
stack ;
2001-11-02 16:45:05 +03:00
2002-02-26 14:49:17 +03:00
dev_manager_destroy ( dm ) ;
2001-11-02 16:45:05 +03:00
return r ;
2001-10-09 20:05:34 +04:00
}
2001-10-16 20:25:28 +04:00
2014-09-22 17:50:07 +04:00
static int _lv_preload ( const struct logical_volume * lv , struct lv_activate_opts * laopts ,
2011-06-17 18:14:19 +04:00
int * flush_required )
2001-10-31 20:59:52 +03:00
{
2012-01-12 05:51:56 +04:00
int r = 0 ;
2002-02-26 14:49:17 +03:00
struct dev_manager * dm ;
2012-01-12 05:51:56 +04:00
int old_readonly = laopts - > read_only ;
2014-09-16 00:33:53 +04:00
if ( ! ( dm = dev_manager_create ( lv - > vg - > cmd , lv - > vg - > name , ! lv_is_pvmove ( lv ) ) ) )
2012-01-12 05:51:56 +04:00
goto_out ;
2005-11-09 01:52:26 +03:00
2013-11-22 13:00:00 +04:00
laopts - > read_only = _passes_readonly_filter ( lv - > vg - > cmd , lv ) ;
2011-06-17 18:14:19 +04:00
if ( ! ( r = dev_manager_preload ( dm , lv , laopts , flush_required ) ) )
2002-02-11 18:48:34 +03:00
stack ;
2005-11-09 01:52:26 +03:00
dev_manager_destroy ( dm ) ;
2012-01-12 05:51:56 +04:00
laopts - > read_only = old_readonly ;
out :
2005-11-09 01:52:26 +03:00
return r ;
}
2014-09-22 17:50:07 +04:00
static int _lv_deactivate ( const struct logical_volume * lv )
2005-11-09 01:52:26 +03:00
{
int r ;
struct dev_manager * dm ;
2011-06-11 04:03:06 +04:00
if ( ! ( dm = dev_manager_create ( lv - > vg - > cmd , lv - > vg - > name , 1 ) ) )
2005-11-09 01:52:26 +03:00
return_0 ;
2001-11-07 14:51:42 +03:00
2002-02-26 14:49:17 +03:00
if ( ! ( r = dev_manager_deactivate ( dm , lv ) ) )
2001-11-07 14:51:42 +03:00
stack ;
2002-02-26 14:49:17 +03:00
dev_manager_destroy ( dm ) ;
return r ;
2001-11-07 14:51:42 +03:00
}
2014-09-22 17:50:07 +04:00
static int _lv_suspend_lv ( const struct logical_volume * lv , struct lv_activate_opts * laopts ,
2011-06-17 18:14:19 +04:00
int lockfs , int flush_required )
2002-01-11 02:21:07 +03:00
{
2002-03-14 18:36:07 +03:00
int r ;
struct dev_manager * dm ;
2001-11-28 21:03:11 +03:00
2012-01-12 05:51:56 +04:00
laopts - > read_only = _passes_readonly_filter ( lv - > vg - > cmd , lv ) ;
2011-06-11 04:03:06 +04:00
/*
* When we are asked to manipulate ( normally suspend / resume ) the PVMOVE
* device directly , we don ' t want to touch the devices that use it .
*/
2014-09-16 00:33:53 +04:00
if ( ! ( dm = dev_manager_create ( lv - > vg - > cmd , lv - > vg - > name , ! lv_is_pvmove ( lv ) ) ) )
2005-11-09 01:52:26 +03:00
return_0 ;
2001-10-31 20:59:52 +03:00
2011-06-17 18:14:19 +04:00
if ( ! ( r = dev_manager_suspend ( dm , lv , laopts , lockfs , flush_required ) ) )
2001-11-07 14:51:42 +03:00
stack ;
2001-10-31 20:59:52 +03:00
2002-03-14 18:36:07 +03:00
dev_manager_destroy ( dm ) ;
return r ;
2002-02-11 18:48:34 +03:00
}
2002-01-11 02:21:07 +03:00
2002-03-01 22:08:11 +03:00
/*
2004-05-11 22:18:14 +04:00
* These two functions return the number of visible LVs in the state ,
2011-06-14 02:28:04 +04:00
* or - 1 on error . FIXME Check this .
2002-03-01 22:08:11 +03:00
*/
2012-02-24 02:41:57 +04:00
int lvs_in_vg_activated ( const struct volume_group * vg )
2001-11-02 19:28:04 +03:00
{
2005-06-01 20:51:55 +04:00
struct lv_list * lvl ;
2001-11-08 19:15:58 +03:00
int count = 0 ;
2001-11-07 14:51:42 +03:00
2002-11-18 17:01:16 +03:00
if ( ! activation ( ) )
return 0 ;
2011-06-14 02:28:04 +04:00
dm_list_iterate_items ( lvl , & vg - > lvs )
2009-05-14 01:26:45 +04:00
if ( lv_is_visible ( lvl - > lv ) )
2010-02-24 23:00:56 +03:00
count + = ( _lv_active ( vg - > cmd , lvl - > lv ) = = 1 ) ;
2011-06-14 02:28:04 +04:00
2013-01-08 02:30:29 +04:00
log_debug_activation ( " Counted %d active LVs in VG %s " , count , vg - > name ) ;
2001-11-07 14:51:42 +03:00
return count ;
2001-11-02 19:28:04 +03:00
}
2001-11-07 18:02:07 +03:00
2007-08-07 13:06:05 +04:00
int lvs_in_vg_opened ( const struct volume_group * vg )
2001-11-07 18:02:07 +03:00
{
2007-08-07 13:06:05 +04:00
const struct lv_list * lvl ;
2001-11-08 19:15:58 +03:00
int count = 0 ;
2001-11-07 18:02:07 +03:00
2002-11-18 17:01:16 +03:00
if ( ! activation ( ) )
return 0 ;
2011-06-14 02:28:04 +04:00
dm_list_iterate_items ( lvl , & vg - > lvs )
2011-11-07 14:58:13 +04:00
if ( lv_is_visible ( lvl - > lv ) )
2005-10-17 22:00:02 +04:00
count + = ( _lv_open_count ( vg - > cmd , lvl - > lv ) > 0 ) ;
2011-06-14 02:28:04 +04:00
2016-06-14 15:56:17 +03:00
log_debug_activation ( " Counted %d open LVs in VG %s. " , count , vg - > name ) ;
2001-11-07 18:02:07 +03:00
return count ;
}
2002-02-25 15:56:16 +03:00
2016-10-27 12:38:16 +03:00
/*
* Check if " raid4 " @ segtype is supported by kernel .
*
* if segment type is not raid4 , return 1.
*/
int raid4_is_supported ( struct cmd_context * cmd , const struct segment_type * segtype )
{
2021-09-10 23:32:26 +03:00
unsigned attrs = 0 ;
2016-10-27 12:38:16 +03:00
if ( segtype_is_raid4 ( segtype ) & &
( ! segtype - > ops - > target_present | |
! segtype - > ops - > target_present ( cmd , NULL , & attrs ) | |
! ( attrs & RAID_FEATURE_RAID4 ) ) ) {
log_error ( " RAID module does not support RAID4. " ) ;
return 0 ;
}
return 1 ;
}
2018-06-05 21:21:28 +03:00
/*
* The VG lock must be held to call this function .
*
* Returns : 0 or 1
*/
2012-02-24 02:41:57 +04:00
int lv_is_active ( const struct logical_volume * lv )
2011-02-04 23:30:17 +03:00
{
2018-06-05 21:21:28 +03:00
return _lv_active ( lv - > vg - > cmd , lv ) ;
2008-04-11 01:34:18 +04:00
}
2010-08-17 02:54:35 +04:00
# ifdef DMEVENTD
static struct dm_event_handler * _create_dm_event_handler ( struct cmd_context * cmd , const char * dmuuid , const char * dso ,
const int timeout , enum dm_event_mask mask )
{
struct dm_event_handler * dmevh ;
if ( ! ( dmevh = dm_event_handler_create ( ) ) )
return_NULL ;
2018-02-10 01:38:02 +03:00
if ( ! cmd - > default_settings . dmeventd_executable )
cmd - > default_settings . dmeventd_executable = find_config_tree_str ( cmd , dmeventd_executable_CFG , NULL ) ;
if ( dm_event_handler_set_dmeventd_path ( dmevh , cmd - > default_settings . dmeventd_executable ) )
2010-08-17 02:54:35 +04:00
goto_bad ;
2016-08-23 04:24:30 +03:00
if ( dso & & dm_event_handler_set_dso ( dmevh , dso ) )
2010-08-17 02:54:35 +04:00
goto_bad ;
if ( dm_event_handler_set_uuid ( dmevh , dmuuid ) )
goto_bad ;
dm_event_handler_set_timeout ( dmevh , timeout ) ;
dm_event_handler_set_event_mask ( dmevh , mask ) ;
return dmevh ;
bad :
dm_event_handler_destroy ( dmevh ) ;
2018-01-29 18:28:57 +03:00
2010-08-17 02:54:35 +04:00
return NULL ;
}
2018-02-10 22:22:32 +03:00
char * get_monitor_dso_path ( struct cmd_context * cmd , int id )
2010-08-17 02:54:35 +04:00
{
2018-02-10 22:22:32 +03:00
const char * libpath = find_config_tree_str ( cmd , id , NULL ) ;
char path [ PATH_MAX ] ;
2010-08-17 02:54:35 +04:00
2018-02-10 22:22:32 +03:00
get_shared_library_path ( cmd , libpath , path , sizeof ( path ) ) ;
2010-08-17 02:54:35 +04:00
2018-06-08 15:40:53 +03:00
return strdup ( path ) ;
2010-08-17 02:54:35 +04:00
}
2014-09-22 17:50:07 +04:00
static char * _build_target_uuid ( struct cmd_context * cmd , const struct logical_volume * lv )
2011-12-21 17:08:11 +04:00
{
2023-11-09 17:51:51 +03:00
return build_dm_uuid ( cmd - > mem , lv , lv_layer ( lv ) ) ;
2011-12-21 17:08:11 +04:00
}
2018-01-29 18:28:57 +03:00
static int _device_registered_with_dmeventd ( struct cmd_context * cmd ,
const struct logical_volume * lv ,
const char * * dso ,
int * pending , int * monitored )
2016-08-23 04:24:30 +03:00
{
char * uuid ;
2018-01-29 18:28:57 +03:00
enum dm_event_mask evmask ;
2016-08-23 04:24:30 +03:00
struct dm_event_handler * dmevh ;
2018-01-29 18:28:57 +03:00
int r ;
2016-08-23 04:24:30 +03:00
* pending = 0 ;
2018-01-29 18:28:57 +03:00
* monitored = 0 ;
2016-08-23 04:24:30 +03:00
if ( ! ( uuid = _build_target_uuid ( cmd , lv ) ) )
return_0 ;
if ( ! ( dmevh = _create_dm_event_handler ( cmd , uuid , NULL , 0 , DM_EVENT_ALL_ERRORS ) ) )
return_0 ;
2018-01-29 18:28:57 +03:00
if ( ( r = dm_event_get_registered_device ( dmevh , 0 ) ) ) {
if ( r = = - ENOENT ) {
r = 1 ;
goto out ;
}
r = 0 ;
goto_out ;
}
/* FIXME: why do we care which 'dso' is monitoring? */
if ( dso & & ( * dso = dm_event_handler_get_dso ( dmevh ) ) & &
! ( * dso = dm_pool_strdup ( cmd - > mem , * dso ) ) ) {
r = 0 ;
goto_out ;
2016-08-23 04:24:30 +03:00
}
evmask = dm_event_handler_get_event_mask ( dmevh ) ;
if ( evmask & DM_EVENT_REGISTRATION_PENDING ) {
* pending = 1 ;
evmask & = ~ DM_EVENT_REGISTRATION_PENDING ;
}
2018-01-29 18:28:57 +03:00
* monitored = evmask ;
r = 1 ;
out :
2016-08-23 04:24:30 +03:00
dm_event_handler_destroy ( dmevh ) ;
2018-01-29 18:28:57 +03:00
return r ;
2016-08-23 04:24:30 +03:00
}
2010-08-17 05:16:41 +04:00
int target_registered_with_dmeventd ( struct cmd_context * cmd , const char * dso ,
2018-01-29 18:28:57 +03:00
const struct logical_volume * lv ,
int * pending , int * monitored )
2010-08-17 02:54:35 +04:00
{
char * uuid ;
2018-01-29 18:28:57 +03:00
enum dm_event_mask evmask ;
2010-08-17 02:54:35 +04:00
struct dm_event_handler * dmevh ;
2018-01-29 18:28:57 +03:00
int r ;
2010-08-17 02:54:35 +04:00
* pending = 0 ;
2018-01-29 18:28:57 +03:00
* monitored = 0 ;
2010-08-17 02:54:35 +04:00
if ( ! dso )
return_0 ;
2011-12-21 17:08:11 +04:00
if ( ! ( uuid = _build_target_uuid ( cmd , lv ) ) )
2010-08-17 02:54:35 +04:00
return_0 ;
if ( ! ( dmevh = _create_dm_event_handler ( cmd , uuid , dso , 0 , DM_EVENT_ALL_ERRORS ) ) )
return_0 ;
2018-01-29 18:28:57 +03:00
if ( ( r = dm_event_get_registered_device ( dmevh , 0 ) ) ) {
if ( r = = - ENOENT ) {
r = 1 ;
goto out ;
}
r = 0 ;
goto_out ;
2010-08-17 02:54:35 +04:00
}
evmask = dm_event_handler_get_event_mask ( dmevh ) ;
if ( evmask & DM_EVENT_REGISTRATION_PENDING ) {
* pending = 1 ;
evmask & = ~ DM_EVENT_REGISTRATION_PENDING ;
}
2018-01-29 18:28:57 +03:00
* monitored = evmask ;
r = 1 ;
out :
2010-08-17 02:54:35 +04:00
dm_event_handler_destroy ( dmevh ) ;
2018-01-29 18:28:57 +03:00
return r ;
2010-08-17 02:54:35 +04:00
}
2014-09-22 17:50:07 +04:00
int target_register_events ( struct cmd_context * cmd , const char * dso , const struct logical_volume * lv ,
2010-08-17 02:54:35 +04:00
int evmask __attribute__ ( ( unused ) ) , int set , int timeout )
{
char * uuid ;
struct dm_event_handler * dmevh ;
int r ;
if ( ! dso )
return_0 ;
2010-08-17 05:16:41 +04:00
/* We always monitor the "real" device, never the "snapshot-origin" itself. */
2011-12-21 17:08:11 +04:00
if ( ! ( uuid = _build_target_uuid ( cmd , lv ) ) )
2010-08-17 02:54:35 +04:00
return_0 ;
if ( ! ( dmevh = _create_dm_event_handler ( cmd , uuid , dso , timeout ,
DM_EVENT_ALL_ERRORS | ( timeout ? DM_EVENT_TIMEOUT : 0 ) ) ) )
return_0 ;
r = set ? dm_event_register_handler ( dmevh ) : dm_event_unregister_handler ( dmevh ) ;
dm_event_handler_destroy ( dmevh ) ;
if ( ! r )
return_0 ;
2018-02-10 01:40:37 +03:00
log_verbose ( " %s %s for events " , set ? " Monitored " : " Unmonitored " , uuid ) ;
2010-08-17 02:54:35 +04:00
return 1 ;
}
# endif
2006-05-12 23:16:48 +04:00
/*
2007-01-20 01:21:45 +03:00
* Returns 0 if an attempt to ( un ) monitor the device failed .
* Returns 1 otherwise .
2006-05-12 23:16:48 +04:00
*/
2014-09-22 17:50:07 +04:00
int monitor_dev_for_events ( struct cmd_context * cmd , const struct logical_volume * lv ,
2011-06-17 18:14:19 +04:00
const struct lv_activate_opts * laopts , int monitor )
2005-12-02 23:35:07 +03:00
{
2006-01-27 21:38:14 +03:00
# ifdef DMEVENTD
2018-01-29 18:28:57 +03:00
int i , pending = 0 , monitored = 0 ;
2007-01-20 01:21:45 +03:00
int r = 1 ;
2013-04-25 13:46:17 +04:00
struct dm_list * snh , * snht ;
2005-12-02 23:35:07 +03:00
struct lv_segment * seg ;
2010-03-27 01:15:43 +03:00
struct lv_segment * log_seg ;
2007-01-25 01:06:11 +03:00
int ( * monitor_fn ) ( struct lv_segment * s , int e ) ;
2008-01-31 15:19:36 +03:00
uint32_t s ;
2011-06-17 18:14:19 +04:00
static const struct lv_activate_opts zlaopts = { 0 } ;
2018-04-28 23:14:47 +03:00
struct lv_activate_opts mirr_laopts = { . origin_only = 1 } ;
2012-03-23 13:58:04 +04:00
struct lvinfo info ;
2016-09-12 17:37:31 +03:00
const char * dso = NULL ;
2016-08-23 13:30:34 +03:00
int new_unmonitor ;
2011-06-17 18:14:19 +04:00
if ( ! laopts )
laopts = & zlaopts ;
2018-08-02 19:26:59 +03:00
else
mirr_laopts . read_only = laopts - > read_only ;
2005-12-02 23:35:07 +03:00
2007-01-25 02:43:27 +03:00
/* skip dmeventd code altogether */
if ( dmeventd_monitor_mode ( ) = = DMEVENTD_MONITOR_IGNORE )
return 1 ;
2007-01-20 01:21:45 +03:00
/*
* Nothing to do if dmeventd configured not to be used .
*/
if ( monitor & & ! dmeventd_monitor_mode ( ) )
2006-05-12 23:16:48 +04:00
return 1 ;
2016-08-24 11:05:09 +03:00
/*
* Activation of unused cache - pool activates metadata device as
* a public LV for clearing purpose .
* FIXME :
* As VG lock is held across whole operation unmonitored volume
* is usually OK since dmeventd couldn ' t do anything .
* However in case command would have crashed , such LV is
* left unmonitored and may potentially require dmeventd .
*/
2017-06-27 11:16:13 +03:00
if ( lv_is_cache_pool_data ( lv ) | | lv_is_cache_pool_metadata ( lv ) ) {
if ( ! ( seg = find_pool_seg ( first_seg ( lv ) ) ) )
return_0 ;
if ( ! lv_is_used_cache_pool ( seg - > lv ) ) {
log_debug_activation ( " Skipping %smonitor of %s.%s " ,
( monitor ) ? " " : " un " , display_lvname ( lv ) ,
( monitor ) ? " Cache pool activation for clearing only. " : " " ) ;
return 1 ;
}
2016-08-24 11:05:09 +03:00
}
2012-03-23 13:58:04 +04:00
/*
* Allow to unmonitor thin pool via explicit pool unmonitor
* or unmonitor before the last thin pool user deactivation
2013-09-07 04:46:48 +04:00
* Skip unmonitor , if invoked via deactivation of thin volume
2012-03-23 13:58:04 +04:00
* and there is another thin pool user ( open_count > 1 )
2013-09-07 04:46:48 +04:00
* FIXME think about watch ruler influence .
2012-03-23 13:58:04 +04:00
*/
2013-09-07 04:46:48 +04:00
if ( laopts - > skip_in_use & & lv_is_thin_pool ( lv ) & &
lv_info ( lv - > vg - > cmd , lv , 1 , & info , 1 , 0 ) & & ( info . open_count > 1 ) ) {
2013-01-08 02:30:29 +04:00
log_debug_activation ( " Skipping unmonitor of opened %s (open:%d) " ,
2015-11-25 18:06:31 +03:00
display_lvname ( lv ) , info . open_count ) ;
2012-03-23 13:58:04 +04:00
return 1 ;
}
2013-05-27 12:20:06 +04:00
/* Do not monitor snapshot that already covers origin */
if ( monitor & & lv_is_cow_covering_origin ( lv ) ) {
log_debug_activation ( " Skipping monitor of snapshot larger "
2015-11-25 18:06:31 +03:00
" then origin %s. " , display_lvname ( lv ) ) ;
2013-05-27 12:20:06 +04:00
return 1 ;
}
2008-01-09 18:32:19 +03:00
/*
* In case of a snapshot device , we monitor lv - > snapshot - > lv ,
* not the actual LV itself .
*/
2018-06-11 23:19:20 +03:00
if ( lv_is_cow ( lv ) & & ( laopts - > no_merging | | ! lv_is_merging_cow ( lv ) | |
lv_has_target_type ( lv - > vg - > cmd - > mem , lv , NULL , TARGET_NAME_SNAPSHOT ) ) ) {
2014-09-24 11:58:04 +04:00
if ( ! ( r = monitor_dev_for_events ( cmd , lv - > snapshot - > lv , NULL , monitor ) ) )
stack ;
return r ;
}
2008-01-09 18:32:19 +03:00
/*
* In case this LV is a snapshot origin , we instead monitor
2010-08-17 05:16:41 +04:00
* each of its respective snapshots . The origin itself may
2016-09-20 04:30:58 +03:00
* also need to be monitored if it is a mirror , for example ,
* so fall through to process it afterwards .
2024-02-23 14:54:54 +03:00
* Before monitoring snapshots verify origin is active as with
* external origin only read - only - real device can be active .
2008-01-09 18:32:19 +03:00
*/
2024-02-23 14:54:54 +03:00
if ( ! laopts - > origin_only & & lv_is_origin ( lv ) & & lv_info ( lv - > vg - > cmd , lv , 0 , NULL , 0 , 0 ) )
2008-11-04 01:14:30 +03:00
dm_list_iterate_safe ( snh , snht , & lv - > snapshot_segs )
if ( ! monitor_dev_for_events ( cmd , dm_list_struct_base ( snh ,
2014-09-24 11:58:04 +04:00
struct lv_segment , origin_list ) - > cow , NULL , monitor ) ) {
stack ;
2008-01-17 20:17:09 +03:00
r = 0 ;
2014-09-24 11:58:04 +04:00
}
2008-01-09 18:32:19 +03:00
2010-03-27 01:15:43 +03:00
/*
* If the volume is mirrored and its log is also mirrored , monitor
* the log volume as well .
*/
if ( ( seg = first_seg ( lv ) ) ! = NULL & & seg - > log_lv ! = NULL & &
( log_seg = first_seg ( seg - > log_lv ) ) ! = NULL & &
seg_is_mirrored ( log_seg ) )
2014-09-24 11:58:04 +04:00
if ( ! monitor_dev_for_events ( cmd , seg - > log_lv , NULL , monitor ) ) {
stack ;
2010-03-27 01:15:43 +03:00
r = 0 ;
2014-09-24 11:58:04 +04:00
}
2010-03-27 01:15:43 +03:00
2013-04-25 13:46:17 +04:00
dm_list_iterate_items ( seg , & lv - > segments ) {
2008-01-31 15:19:36 +03:00
/* Recurse for AREA_LV */
for ( s = 0 ; s < seg - > area_count ; s + + ) {
if ( seg_type ( seg , s ) ! = AREA_LV )
continue ;
2011-06-17 18:14:19 +04:00
if ( ! monitor_dev_for_events ( cmd , seg_lv ( seg , s ) , NULL ,
2008-01-31 15:19:36 +03:00
monitor ) ) {
2018-02-12 18:15:35 +03:00
stack ;
2008-01-31 15:19:36 +03:00
r = 0 ;
}
}
2012-03-23 13:58:04 +04:00
/*
2013-09-07 04:46:48 +04:00
* If requested unmonitoring of thin volume , preserve skip_in_use flag .
2012-03-23 13:58:04 +04:00
*
* FIXME : code here looks like _lv_postorder ( )
*/
if ( seg - > pool_lv & &
! monitor_dev_for_events ( cmd , seg - > pool_lv ,
2014-09-24 11:58:04 +04:00
( ! monitor ) ? laopts : NULL , monitor ) ) {
stack ;
2012-03-23 13:58:04 +04:00
r = 0 ;
2014-09-24 11:58:04 +04:00
}
2012-03-23 13:58:04 +04:00
2017-10-16 16:05:57 +03:00
if ( seg - > external_lv & &
! monitor_dev_for_events ( cmd , seg - > external_lv ,
( ! monitor ) ? laopts : NULL , monitor ) ) {
stack ;
r = 0 ;
}
2012-03-23 13:58:04 +04:00
if ( seg - > metadata_lv & &
2014-09-24 11:58:04 +04:00
! monitor_dev_for_events ( cmd , seg - > metadata_lv , NULL , monitor ) ) {
stack ;
2012-03-23 13:58:04 +04:00
r = 0 ;
2014-09-24 11:58:04 +04:00
}
2012-03-23 13:58:04 +04:00
2013-09-05 13:32:42 +04:00
if ( ! seg_monitored ( seg ) | |
( seg - > status & PVMOVE ) | |
! seg - > segtype - > ops - > target_monitored ) /* doesn't support registration */
2007-01-12 23:38:30 +03:00
continue ;
2007-01-20 01:21:45 +03:00
2018-01-29 18:28:57 +03:00
if ( ! monitor ) {
2016-08-23 04:24:30 +03:00
/* When unmonitoring, obtain existing dso being used. */
2018-01-29 18:28:57 +03:00
if ( ! _device_registered_with_dmeventd ( cmd , seg_is_snapshot ( seg ) ? seg - > cow : seg - > lv ,
& dso , & pending , & monitored ) ) {
log_warn ( " WARNING: Failed to %smonitor %s. " ,
monitor ? " " : " un " ,
display_lvname ( seg_is_snapshot ( seg ) ? seg - > cow : seg - > lv ) ) ;
return 0 ;
}
} else if ( ! seg - > segtype - > ops - > target_monitored ( seg , & pending , & monitored ) ) {
log_warn ( " WARNING: Failed to %smonitor %s. " ,
monitor ? " " : " un " ,
display_lvname ( seg - > lv ) ) ;
return 0 ;
}
2007-01-12 23:38:30 +03:00
2013-09-05 13:32:42 +04:00
/* FIXME: We should really try again if pending */
2007-01-20 01:21:45 +03:00
monitored = ( pending ) ? 0 : monitored ;
2007-01-12 23:38:30 +03:00
2013-09-05 13:32:42 +04:00
monitor_fn = NULL ;
2016-08-23 13:30:34 +03:00
new_unmonitor = 0 ;
2013-09-05 13:32:42 +04:00
2007-01-20 01:21:45 +03:00
if ( monitor ) {
if ( monitored )
2015-11-25 18:06:31 +03:00
log_verbose ( " %s already monitored. " , display_lvname ( lv ) ) ;
2016-08-23 14:16:39 +03:00
else if ( seg - > segtype - > ops - > target_monitor_events ) {
2018-02-10 01:40:37 +03:00
log_very_verbose ( " Monitoring %s with %s.%s " , display_lvname ( lv ) ,
seg - > segtype - > dso ,
test_mode ( ) ? " [Test mode: skipping this] " : " " ) ;
2007-01-20 01:21:45 +03:00
monitor_fn = seg - > segtype - > ops - > target_monitor_events ;
2016-08-23 14:16:39 +03:00
}
2007-01-12 23:38:30 +03:00
} else {
2007-01-20 01:21:45 +03:00
if ( ! monitored )
2015-11-25 18:06:31 +03:00
log_verbose ( " %s already not monitored. " , display_lvname ( lv ) ) ;
2016-09-12 17:37:31 +03:00
else if ( dso & & * dso ) {
2016-08-23 04:24:30 +03:00
/*
2016-08-23 13:30:34 +03:00
* Divert unmonitor away from code that depends on the new segment
2016-08-23 04:24:30 +03:00
* type instead of the existing one if it ' s changing .
*/
2016-08-23 14:16:39 +03:00
log_verbose ( " Not monitoring %s with %s%s " , display_lvname ( lv ) , dso , test_mode ( ) ? " [Test mode: skipping this] " : " " ) ;
2016-08-23 13:30:34 +03:00
new_unmonitor = 1 ;
2016-08-23 14:16:39 +03:00
}
2007-01-12 23:38:30 +03:00
}
2005-12-02 23:35:07 +03:00
2016-08-23 14:16:39 +03:00
/* FIXME Test mode should really continue a bit further. */
if ( test_mode ( ) )
continue ;
2010-08-17 03:29:09 +04:00
2016-08-23 14:16:39 +03:00
if ( new_unmonitor ) {
2016-09-20 04:30:58 +03:00
if ( ! target_register_events ( cmd , dso , seg_is_snapshot ( seg ) ? seg - > cow : lv , 0 , 0 , 10 ) ) {
2018-02-12 18:15:35 +03:00
log_warn ( " WARNING: %s: segment unmonitoring failed. " ,
display_lvname ( lv ) ) ;
2016-08-23 13:30:34 +03:00
return 0 ;
}
2016-08-23 14:16:39 +03:00
} else if ( monitor_fn ) {
2016-08-23 13:30:34 +03:00
/* FIXME specify events */
if ( ! monitor_fn ( seg , 0 ) ) {
2018-02-12 18:15:35 +03:00
log_warn ( " WARNING: %s: %s segment monitoring function failed. " ,
display_lvname ( lv ) , lvseg_name ( seg ) ) ;
2016-08-23 13:30:34 +03:00
return 0 ;
}
2016-08-23 14:16:39 +03:00
} else
continue ;
2006-05-12 23:16:48 +04:00
2018-04-28 23:14:47 +03:00
if ( ! vg_write_lock_held ( ) & & lv_is_mirror ( lv ) ) {
2018-06-05 21:21:28 +03:00
mirr_laopts . exclusive = lv_is_active ( lv ) ? 1 : 0 ;
2018-04-28 23:14:47 +03:00
/*
* Commands vgchange and lvchange do use read - only lock when changing
* monitoring ( - - monitor y | n ) . All other use cases hold ' write - lock '
* so they skip this dm mirror table refreshing step .
*/
if ( ! _lv_activate_lv ( lv , & mirr_laopts ) ) {
stack ;
r = 0 ;
}
}
2007-01-12 23:38:30 +03:00
/* Check [un]monitor results */
/* Try a couple times if pending, but not forever... */
2017-02-14 00:50:19 +03:00
for ( i = 0 ; ; i + + ) {
2007-01-12 23:38:30 +03:00
pending = 0 ;
2018-01-29 18:28:57 +03:00
if ( ! seg - > segtype - > ops - > target_monitored ( seg , & pending , & monitored ) ) {
stack ;
r = 0 ;
break ;
}
2017-02-14 00:50:19 +03:00
if ( ! pending | | i > = 40 )
2007-01-12 23:38:30 +03:00
break ;
2017-02-14 00:50:19 +03:00
log_very_verbose ( " %s %smonitoring still pending: waiting... " ,
display_lvname ( lv ) , monitor ? " " : " un " ) ;
2014-11-08 03:29:01 +03:00
usleep ( 10000 * i ) ;
2007-01-12 23:38:30 +03:00
}
2010-08-17 03:29:09 +04:00
if ( r )
r = ( monitored & & monitor ) | | ( ! monitored & & ! monitor ) ;
2005-12-02 23:35:07 +03:00
}
2005-12-08 20:49:34 +03:00
2012-06-29 12:18:28 +04:00
if ( ! r & & ! error_message_produced ( ) )
2018-02-12 18:15:35 +03:00
log_warn ( " WARNING: %sonitoring %s failed. " , monitor ? " M " : " Not m " ,
display_lvname ( lv ) ) ;
2006-05-12 23:16:48 +04:00
return r ;
# else
2005-12-02 23:35:07 +03:00
return 1 ;
2006-05-12 23:16:48 +04:00
# endif
2005-12-02 23:35:07 +03:00
}
2011-06-30 22:25:18 +04:00
struct detached_lv_data {
2014-09-22 17:50:07 +04:00
const struct logical_volume * lv_pre ;
2011-06-30 22:25:18 +04:00
struct lv_activate_opts * laopts ;
int * flush_required ;
} ;
2014-03-27 13:35:07 +04:00
static int _preload_detached_lv ( struct logical_volume * lv , void * data )
2011-06-30 22:25:18 +04:00
{
struct detached_lv_data * detached = data ;
2015-11-22 01:31:44 +03:00
struct logical_volume * lv_pre ;
2011-06-30 22:25:18 +04:00
2015-01-28 20:30:08 +03:00
/* Check and preload removed raid image leg or metadata */
2015-01-28 15:36:25 +03:00
if ( lv_is_raid_image ( lv ) ) {
2015-11-22 01:31:44 +03:00
if ( ( lv_pre = find_lv_in_vg_by_lvid ( detached - > lv_pre - > vg , & lv - > lvid ) ) & &
! lv_is_raid_image ( lv_pre ) & & lv_is_active ( lv ) & &
! _lv_preload ( lv_pre , detached - > laopts , detached - > flush_required ) )
2015-01-28 15:36:25 +03:00
return_0 ;
2015-01-28 20:30:08 +03:00
} else if ( lv_is_raid_metadata ( lv ) ) {
2015-11-22 01:31:44 +03:00
if ( ( lv_pre = find_lv_in_vg_by_lvid ( detached - > lv_pre - > vg , & lv - > lvid ) ) & &
! lv_is_raid_metadata ( lv_pre ) & & lv_is_active ( lv ) & &
! _lv_preload ( lv_pre , detached - > laopts , detached - > flush_required ) )
2015-01-28 15:36:25 +03:00
return_0 ;
2018-08-07 11:34:17 +03:00
} else if ( lv_is_mirror_image ( lv ) ) {
if ( ( lv_pre = find_lv_in_vg_by_lvid ( detached - > lv_pre - > vg , & lv - > lvid ) ) & &
! lv_is_mirror_image ( lv_pre ) & & lv_is_active ( lv ) & &
! _lv_preload ( lv_pre , detached - > laopts , detached - > flush_required ) )
return_0 ;
2015-01-28 15:36:25 +03:00
}
2018-04-20 12:55:22 +03:00
if ( ! lv_is_visible ( lv ) & & ( lv_pre = find_lv ( detached - > lv_pre - > vg , lv - > name ) ) & &
lv_is_visible ( lv_pre ) ) {
if ( ! _lv_preload ( lv_pre , detached - > laopts , detached - > flush_required ) )
return_0 ;
}
2015-11-23 01:04:11 +03:00
/* FIXME: condition here should be far more limiting to really
* detect detached LVs */
2015-11-22 01:31:44 +03:00
if ( ( lv_pre = find_lv ( detached - > lv_pre - > vg , lv - > name ) ) ) {
if ( lv_is_visible ( lv_pre ) & & lv_is_active ( lv ) & &
2015-11-23 01:04:11 +03:00
! lv_is_pool ( lv ) & &
2015-11-22 01:31:44 +03:00
( ! lv_is_cow ( lv ) | | ! lv_is_cow ( lv_pre ) ) & &
! _lv_preload ( lv_pre , detached - > laopts , detached - > flush_required ) )
2011-06-30 22:25:18 +04:00
return_0 ;
}
return 1 ;
}
2004-03-08 21:54:13 +03:00
static int _lv_suspend ( struct cmd_context * cmd , const char * lvid_s ,
2013-03-18 00:29:58 +04:00
struct lv_activate_opts * laopts , int error_if_not_suspended ,
2015-11-25 12:52:22 +03:00
const struct logical_volume * lv , const struct logical_volume * lv_pre )
2002-02-25 15:56:16 +03:00
{
2014-09-22 17:50:07 +04:00
const struct logical_volume * pvmove_lv = NULL ;
2017-11-30 15:24:41 +03:00
struct logical_volume * lv_pre_tmp , * lv_tmp ;
2011-06-11 04:03:06 +04:00
struct seg_list * sl ;
2016-04-06 11:28:02 +03:00
struct lv_segment * snap_seg ;
2003-01-09 01:44:07 +03:00
struct lvinfo info ;
2009-05-20 13:52:37 +04:00
int r = 0 , lockfs = 0 , flush_required = 0 ;
2011-06-30 22:25:18 +04:00
struct detached_lv_data detached ;
2017-11-24 15:51:17 +03:00
struct dm_pool * mem = NULL ;
struct dm_list suspend_lvs ;
struct lv_list * lvl ;
2017-11-30 15:24:41 +03:00
int found ;
2002-02-25 15:56:16 +03:00
2002-11-18 17:01:16 +03:00
if ( ! activation ( ) )
return 1 ;
2002-03-14 18:36:07 +03:00
if ( test_mode ( ) ) {
2015-11-25 12:52:22 +03:00
_skip ( " Suspending %s%s. " , display_lvname ( lv ) ,
2013-03-20 03:00:11 +04:00
laopts - > origin_only ? " origin without snapshots " : " " ) ;
2009-04-10 14:00:04 +04:00
r = 1 ;
goto out ;
2002-03-14 18:36:07 +03:00
}
2015-11-25 12:52:22 +03:00
if ( ! lv_info ( cmd , lv , laopts - > origin_only , & info , 0 , 0 ) )
2009-04-10 14:00:04 +04:00
goto_out ;
2002-03-11 22:02:28 +03:00
2009-04-10 14:00:04 +04:00
if ( ! info . exists | | info . suspended ) {
2009-12-03 22:23:40 +03:00
if ( ! error_if_not_suspended ) {
r = 1 ;
if ( info . suspended )
2011-06-11 04:03:06 +04:00
critical_section_inc ( cmd , " already suspended " ) ;
2009-12-03 22:23:40 +03:00
}
2009-04-10 14:00:04 +04:00
goto out ;
}
2003-07-05 02:34:56 +04:00
2015-11-25 12:52:22 +03:00
lv_calculate_readahead ( lv , NULL ) ;
2009-06-01 16:43:31 +04:00
2021-03-07 20:10:48 +03:00
/* Ignore origin_only unless LV is origin in both old and new metadata */
/* or LV is thin or thin pool volume */
if ( ! lv_is_thin_volume ( lv ) & & ! lv_is_thin_pool ( lv ) & &
! ( lv_is_origin ( lv ) & & lv_is_origin ( lv_pre ) ) )
laopts - > origin_only = 0 ;
2011-06-11 04:03:06 +04:00
/*
2011-07-05 22:36:37 +04:00
* Preload devices for the LV .
2011-06-11 04:03:06 +04:00
* If the PVMOVE LV is being removed , it ' s only present in the old
* metadata and not the new , so we must explicitly add the new
* tables for all the changed LVs here , as the relationships
* are not found by walking the new metadata .
*/
2015-11-25 12:52:22 +03:00
if ( lv_is_locked ( lv ) & & ! lv_is_locked ( lv_pre ) & &
( pvmove_lv = find_pvmove_lv_in_lv ( lv ) ) ) {
2011-07-05 22:36:37 +04:00
/* Preload all the LVs above the PVMOVE LV */
dm_list_iterate_items ( sl , & pvmove_lv - > segs_using_this_lv ) {
2015-11-25 12:52:22 +03:00
if ( ! ( lv_pre_tmp = find_lv ( lv_pre - > vg , sl - > seg - > lv - > name ) ) ) {
2015-11-25 18:06:31 +03:00
log_error ( INTERNAL_ERROR " LV %s missing from preload metadata. " ,
display_lvname ( sl - > seg - > lv ) ) ;
2011-06-11 04:03:06 +04:00
goto out ;
}
2015-11-25 12:52:22 +03:00
if ( ! _lv_preload ( lv_pre_tmp , laopts , & flush_required ) )
2011-06-11 04:03:06 +04:00
goto_out ;
2011-07-05 22:36:37 +04:00
}
/* Now preload the PVMOVE LV itself */
2015-11-25 12:52:22 +03:00
if ( ! ( lv_pre_tmp = find_lv ( lv_pre - > vg , pvmove_lv - > name ) ) ) {
2015-11-25 18:06:31 +03:00
log_error ( INTERNAL_ERROR " LV %s missing from preload metadata. " ,
display_lvname ( pvmove_lv ) ) ;
2011-07-05 22:36:37 +04:00
goto out ;
}
2015-11-25 12:52:22 +03:00
if ( ! _lv_preload ( lv_pre_tmp , laopts , & flush_required ) )
2011-07-05 22:36:37 +04:00
goto_out ;
2017-11-15 14:08:33 +03:00
/* Suspending 1st. LV above PVMOVE suspends whole tree */
dm_list_iterate_items ( sl , & pvmove_lv - > segs_using_this_lv ) {
lv = sl - > seg - > lv ;
break ;
}
2011-07-05 22:36:37 +04:00
} else {
2015-11-25 12:52:22 +03:00
if ( ! _lv_preload ( lv_pre , laopts , & flush_required ) )
2011-07-05 22:36:37 +04:00
/* FIXME Revert preloading */
goto_out ;
2011-06-30 22:25:18 +04:00
2011-07-05 22:36:37 +04:00
/*
* Search for existing LVs that have become detached and preload them .
*/
2015-11-25 12:52:22 +03:00
detached . lv_pre = lv_pre ;
2011-07-05 22:36:37 +04:00
detached . laopts = laopts ;
detached . flush_required = & flush_required ;
2011-06-30 22:25:18 +04:00
2015-11-25 12:52:22 +03:00
if ( ! for_each_sub_lv ( ( struct logical_volume * ) lv , & _preload_detached_lv , & detached ) )
2011-07-05 22:36:37 +04:00
goto_out ;
2011-07-08 16:48:41 +04:00
/*
* Preload any snapshots that are being removed .
*/
2015-11-25 12:52:22 +03:00
if ( ! laopts - > origin_only & & lv_is_origin ( lv ) ) {
dm_list_iterate_items_gen ( snap_seg , & lv - > snapshot_segs , origin_list ) {
if ( ! ( lv_pre_tmp = find_lv_in_vg_by_lvid ( lv_pre - > vg , & snap_seg - > cow - > lvid ) ) ) {
2015-11-25 18:06:31 +03:00
log_error ( INTERNAL_ERROR " LV %s (%s) missing from preload metadata. " ,
display_lvname ( snap_seg - > cow ) ,
snap_seg - > cow - > lvid . id [ 1 ] . uuid ) ;
2011-07-08 16:48:41 +04:00
goto out ;
}
2015-11-25 12:52:22 +03:00
if ( ! lv_is_cow ( lv_pre_tmp ) & &
! _lv_preload ( lv_pre_tmp , laopts , & flush_required ) )
2011-07-08 16:48:41 +04:00
goto_out ;
}
}
2005-11-09 01:52:26 +03:00
}
2016-04-06 11:29:05 +03:00
/* Flush is ATM required for the tested cases
* NOTE : Mirror repair requires noflush for proper repair !
* TODO : Relax this limiting condition further */
if ( ! flush_required & &
2017-11-15 14:07:47 +03:00
( lv_is_pvmove ( lv ) | | pvmove_lv | |
2022-08-19 15:48:01 +03:00
( ! lv_is_mirror ( lv ) & &
! lv_is_thin_volume ( lv ) & &
! lv_is_thin_pool ( lv ) & &
! lv_is_vdo ( lv ) & &
! lv_is_vdo_pool ( lv ) ) ) ) {
2016-04-06 11:29:05 +03:00
log_debug ( " Requiring flush for LV %s. " , display_lvname ( lv ) ) ;
flush_required = 1 ;
}
2015-11-25 12:52:22 +03:00
if ( ! monitor_dev_for_events ( cmd , lv , laopts , 0 ) )
2006-04-28 18:06:06 +04:00
/* FIXME Consider aborting here */
2006-01-27 21:38:14 +03:00
stack ;
2011-06-17 18:14:19 +04:00
if ( ! laopts - > origin_only & &
2015-11-25 12:52:22 +03:00
( lv_is_origin ( lv_pre ) | | lv_is_cow ( lv_pre ) ) )
2006-08-09 01:20:00 +04:00
lockfs = 1 ;
2013-02-05 14:26:27 +04:00
/* Converting non-thin LV to thin external origin ? */
2015-11-25 12:52:22 +03:00
if ( ! lv_is_thin_volume ( lv ) & & lv_is_thin_volume ( lv_pre ) )
2013-02-05 14:26:27 +04:00
lockfs = 1 ; /* Sync before conversion */
2015-11-25 12:52:22 +03:00
if ( laopts - > origin_only & & lv_is_thin_volume ( lv ) & & lv_is_thin_volume ( lv_pre ) )
2012-06-05 13:26:54 +04:00
lockfs = 1 ;
2017-11-24 15:51:17 +03:00
if ( ! lv_is_locked ( lv ) & & lv_is_locked ( lv_pre ) & &
( pvmove_lv = find_pvmove_lv_in_lv ( lv_pre ) ) ) {
/*
* When starting PVMOVE , suspend participating LVs first
* with committed metadata by looking at precommited pvmove list .
* In committed metadata these LVs are not connected in any way .
*
* TODO : prepare list of LVs needed to be suspended and pass them
* via ' struct laopts ' directly to _lv_suspend_lv ( ) and handle this
* with a single ' dmtree ' call .
*/
if ( ! ( mem = dm_pool_create ( " suspend_lvs " , 128 ) ) )
goto_out ;
/* Prepare list of all LVs for suspend ahead */
dm_list_init ( & suspend_lvs ) ;
dm_list_iterate_items ( sl , & pvmove_lv - > segs_using_this_lv ) {
2017-11-30 15:24:41 +03:00
lv_tmp = sl - > seg - > lv ;
if ( lv_is_cow ( lv_tmp ) )
/* Never suspend COW, always has to be origin */
lv_tmp = origin_from_cow ( lv_tmp ) ;
found = 0 ;
dm_list_iterate_items ( lvl , & suspend_lvs )
if ( strcmp ( lvl - > lv - > name , lv_tmp - > name ) = = 0 ) {
found = 1 ;
break ;
}
if ( found )
continue ; /* LV is already in the list */
2017-11-24 15:51:17 +03:00
if ( ! ( lvl = dm_pool_alloc ( mem , sizeof ( * lvl ) ) ) ) {
log_error ( " lv_list alloc failed. " ) ;
goto out ;
}
/* Look for precommitted LV name in commmitted VG */
2017-11-30 15:24:41 +03:00
if ( ! ( lvl - > lv = find_lv ( lv - > vg , lv_tmp - > name ) ) ) {
2017-11-24 15:51:17 +03:00
log_error ( INTERNAL_ERROR " LV %s missing from preload metadata. " ,
2017-11-30 15:24:41 +03:00
display_lvname ( lv_tmp ) ) ;
2017-11-24 15:51:17 +03:00
goto out ;
}
dm_list_add ( & suspend_lvs , & lvl - > list ) ;
}
2020-09-26 15:56:44 +03:00
critical_section_inc ( cmd , " suspending " ) ;
2017-11-24 15:51:17 +03:00
dm_list_iterate_items ( lvl , & suspend_lvs )
if ( ! _lv_suspend_lv ( lvl - > lv , laopts , lockfs , 1 ) ) {
critical_section_dec ( cmd , " failed suspend " ) ;
goto_out ; /* FIXME: resume on recovery path? */
}
2020-09-26 15:56:44 +03:00
} else { /* Standard suspend */
critical_section_inc ( cmd , " suspending " ) ;
2017-11-24 15:51:17 +03:00
if ( ! _lv_suspend_lv ( lv , laopts , lockfs , flush_required ) ) {
critical_section_dec ( cmd , " failed suspend " ) ;
goto_out ;
}
2020-09-26 15:56:44 +03:00
}
2002-03-01 22:08:11 +03:00
2009-04-10 14:00:04 +04:00
r = 1 ;
out :
2017-11-24 15:51:17 +03:00
if ( mem )
dm_pool_destroy ( mem ) ;
2009-04-10 14:00:04 +04:00
return r ;
2002-02-25 15:56:16 +03:00
}
2012-01-20 07:46:52 +04:00
/*
* In a cluster , set exclusive to indicate that only one node is using the
* device . Any preloaded tables may then use non - clustered targets .
*
* Returns success if the device is not active
*/
2014-09-22 17:50:07 +04:00
int lv_suspend_if_active ( struct cmd_context * cmd , const char * lvid_s , unsigned origin_only , unsigned exclusive ,
2015-11-25 12:52:22 +03:00
const struct logical_volume * lv , const struct logical_volume * lv_pre )
2004-03-08 21:54:13 +03:00
{
2012-01-20 04:27:18 +04:00
struct lv_activate_opts laopts = {
2020-08-29 22:37:39 +03:00
. exclusive = exclusive ,
. origin_only = origin_only
2012-01-20 04:27:18 +04:00
} ;
2011-06-17 18:14:19 +04:00
2015-11-25 12:52:22 +03:00
return _lv_suspend ( cmd , lvid_s , & laopts , 0 , lv , lv_pre ) ;
2004-03-08 21:54:13 +03:00
}
2017-11-10 23:15:50 +03:00
static int _check_suspended_lv ( struct logical_volume * lv , void * data )
{
struct lvinfo info ;
if ( lv_info ( lv - > vg - > cmd , lv , 0 , & info , 0 , 0 ) & & info . exists & & info . suspended ) {
log_debug ( " Found suspended LV %s in critical section(). " , display_lvname ( lv ) ) ;
return 0 ; /* There is suspended subLV in the tree */
}
2017-11-29 01:11:20 +03:00
if ( lv_layer ( lv ) & & lv_info ( lv - > vg - > cmd , lv , 1 , & info , 0 , 0 ) & & info . exists & & info . suspended ) {
log_debug ( " Found suspended layered LV %s in critical section(). " , display_lvname ( lv ) ) ;
return 0 ; /* There is suspended subLV in the tree */
}
2017-11-10 23:15:50 +03:00
return 1 ;
}
2004-03-08 21:54:13 +03:00
static int _lv_resume ( struct cmd_context * cmd , const char * lvid_s ,
2013-03-18 00:29:58 +04:00
struct lv_activate_opts * laopts , int error_if_not_active ,
2014-09-22 17:50:07 +04:00
const struct logical_volume * lv )
2002-02-25 15:56:16 +03:00
{
2017-11-29 01:11:20 +03:00
struct dm_list * snh ;
2003-01-09 01:44:07 +03:00
struct lvinfo info ;
2009-04-10 14:00:04 +04:00
int r = 0 ;
2002-02-25 15:56:16 +03:00
2002-11-18 17:01:16 +03:00
if ( ! activation ( ) )
return 1 ;
thin: move pool messaging from resume to suspend
Existing messaging intarface for thin-pool has a few 'weak' points:
* Message were posted with each 'resume' operation, thus not allowing
activation of thin-pool with the existing state.
* Acceleration skipped suspend step has not worked in cluster,
since clvmd resumes only nodes which are suspended (have proper lock
state).
* Resume may fail and code is not really designed to 'fail' in this
phase (generic rule here is resume DOES NOT fail unless something serious
is wrong and lvm2 tool usually doesn't handle recovery path in this case.)
* Full thin-pool suspend happened, when taken a thin-volume snapshot.
With this patch the new method relocates message passing into suspend
state.
This has a few drawbacks with current API, but overal it performs
better and gives are more posibilities to deal with errors.
Patch introduces a new logic for 'origin-only' suspend of thin-pool and
this also relates to thin-volume when taking snapshot.
When suspend_origin_only operation is invoked on a pool with
queued messages then only those messages are posted to thin-pool and
actual suspend of thin pool and data and metadata volume is skipped.
This makes taking a snapshot of thin-volume lighter operation and
avoids blocking of other unrelated active thin volumes.
Also fail now happens in 'suspend' state where the 'Fail' is more expected
and it is better handled through error paths.
Activation of thin-pool is now not sending any message and leaves upto a tool
to decided later how to finish unfinished double-commit transaction.
Problem which needs some API improvements relates to the lvm2 tree
construction. For the suspend tree we do not add target table line
into the tree, but only a device is inserted into a tree.
Current mechanism to attach messages for thin-pool requires the libdm
to know about thin-pool target, so lvm2 currently takes assumption, node
is really a thin-pool and fills in the table line for this node (which
should be ensured by the PRELOAD phase, but it's a misuse of internal API)
we would possibly need to be able to attach message to 'any' node.
Other thing to notice - current messaging interface in thin-pool
target requires to suspend thin volume origin first and then send
a create message, but this could not have any 'nice' solution on lvm2
side and IMHO we should introduce something like 'create_after_resume'
message.
Patch also changes the moment, where lvm2 transaction id is increased.
Now it happens only after successful finish of kernel transaction id
change. This change was needed to handle properly activation of pool,
which is in the middle of unfinished transaction, and also this corrects
usage of thin-pool by external apps like Docker.
2015-07-01 14:31:37 +03:00
if ( ! lv_is_origin ( lv ) & & ! lv_is_thin_volume ( lv ) & & ! lv_is_thin_pool ( lv ) )
2011-06-17 18:14:19 +04:00
laopts - > origin_only = 0 ;
2010-08-17 20:25:32 +04:00
2002-03-14 18:36:07 +03:00
if ( test_mode ( ) ) {
2015-11-25 18:06:31 +03:00
_skip ( " Resuming %s%s%s. " , display_lvname ( lv ) ,
laopts - > origin_only ? " without snapshots " : " " ,
2011-09-28 02:43:40 +04:00
laopts - > revert ? " (reverting) " : " " ) ;
2009-04-10 14:00:04 +04:00
r = 1 ;
goto out ;
2002-03-14 18:36:07 +03:00
}
2015-11-25 18:06:31 +03:00
log_debug_activation ( " Resuming LV %s%s%s%s. " , display_lvname ( lv ) ,
2013-01-08 02:30:29 +04:00
error_if_not_active ? " " : " if active " ,
2015-06-15 15:33:29 +03:00
laopts - > origin_only ?
( lv_is_thin_pool ( lv ) ? " pool only " :
lv_is_thin_volume ( lv ) ? " thin only " : " without snapshots " ) : " " ,
2013-01-08 02:30:29 +04:00
laopts - > revert ? " (reverting) " : " " ) ;
2011-06-14 02:28:04 +04:00
2020-09-22 14:35:27 +03:00
if ( laopts - > revert )
goto needs_resume ;
2011-06-17 18:14:19 +04:00
if ( ! lv_info ( cmd , lv , laopts - > origin_only , & info , 0 , 0 ) )
2009-04-10 14:00:04 +04:00
goto_out ;
2002-03-11 22:02:28 +03:00
thin: move pool messaging from resume to suspend
Existing messaging intarface for thin-pool has a few 'weak' points:
* Message were posted with each 'resume' operation, thus not allowing
activation of thin-pool with the existing state.
* Acceleration skipped suspend step has not worked in cluster,
since clvmd resumes only nodes which are suspended (have proper lock
state).
* Resume may fail and code is not really designed to 'fail' in this
phase (generic rule here is resume DOES NOT fail unless something serious
is wrong and lvm2 tool usually doesn't handle recovery path in this case.)
* Full thin-pool suspend happened, when taken a thin-volume snapshot.
With this patch the new method relocates message passing into suspend
state.
This has a few drawbacks with current API, but overal it performs
better and gives are more posibilities to deal with errors.
Patch introduces a new logic for 'origin-only' suspend of thin-pool and
this also relates to thin-volume when taking snapshot.
When suspend_origin_only operation is invoked on a pool with
queued messages then only those messages are posted to thin-pool and
actual suspend of thin pool and data and metadata volume is skipped.
This makes taking a snapshot of thin-volume lighter operation and
avoids blocking of other unrelated active thin volumes.
Also fail now happens in 'suspend' state where the 'Fail' is more expected
and it is better handled through error paths.
Activation of thin-pool is now not sending any message and leaves upto a tool
to decided later how to finish unfinished double-commit transaction.
Problem which needs some API improvements relates to the lvm2 tree
construction. For the suspend tree we do not add target table line
into the tree, but only a device is inserted into a tree.
Current mechanism to attach messages for thin-pool requires the libdm
to know about thin-pool target, so lvm2 currently takes assumption, node
is really a thin-pool and fills in the table line for this node (which
should be ensured by the PRELOAD phase, but it's a misuse of internal API)
we would possibly need to be able to attach message to 'any' node.
Other thing to notice - current messaging interface in thin-pool
target requires to suspend thin volume origin first and then send
a create message, but this could not have any 'nice' solution on lvm2
side and IMHO we should introduce something like 'create_after_resume'
message.
Patch also changes the moment, where lvm2 transaction id is increased.
Now it happens only after successful finish of kernel transaction id
change. This change was needed to handle properly activation of pool,
which is in the middle of unfinished transaction, and also this corrects
usage of thin-pool by external apps like Docker.
2015-07-01 14:31:37 +03:00
if ( ! info . exists | | ! info . suspended ) {
2010-07-08 16:24:04 +04:00
if ( error_if_not_active )
goto_out ;
2017-11-10 23:15:50 +03:00
2017-11-24 15:53:02 +03:00
/* ATM only thin-pool with origin-only suspend does not really suspend anything
* it ' s used only for message passing to thin - pool */
if ( laopts - > origin_only & & lv_is_thin_pool ( lv ) )
critical_section_dec ( cmd , " resumed " ) ;
2017-11-10 23:15:50 +03:00
if ( ! info . suspended & & critical_section ( ) ) {
2017-11-29 01:11:20 +03:00
/* Validation check if any subLV is suspended */
if ( ! laopts - > origin_only & & lv_is_origin ( lv ) ) {
/* Check all snapshots for this origin LV */
dm_list_iterate ( snh , & lv - > snapshot_segs )
if ( ! _check_suspended_lv ( dm_list_struct_base ( snh , struct lv_segment , origin_list ) - > cow , NULL ) )
goto needs_resume ; /* Found suspended snapshot */
2017-11-10 23:15:50 +03:00
}
2017-11-29 01:11:20 +03:00
if ( ( r = for_each_sub_lv ( ( struct logical_volume * ) lv , & _check_suspended_lv , NULL ) ) )
goto out ; /* Nothing was found suspended */
2017-11-10 23:15:50 +03:00
} else {
r = 1 ;
goto out ;
}
2009-04-10 14:00:04 +04:00
}
2017-11-29 01:11:20 +03:00
needs_resume :
2012-01-12 05:51:56 +04:00
laopts - > read_only = _passes_readonly_filter ( cmd , lv ) ;
2016-04-15 04:21:27 +03:00
laopts - > resuming = 1 ;
2012-01-12 05:51:56 +04:00
2011-06-17 18:14:19 +04:00
if ( ! _lv_activate_lv ( lv , laopts ) )
2009-10-30 16:07:49 +03:00
goto_out ;
2003-07-05 02:34:56 +04:00
2011-06-11 04:03:06 +04:00
critical_section_dec ( cmd , " resumed " ) ;
2002-02-25 15:56:16 +03:00
2011-06-17 18:14:19 +04:00
if ( ! monitor_dev_for_events ( cmd , lv , laopts , 1 ) )
2006-01-27 21:38:14 +03:00
stack ;
2005-12-02 23:35:07 +03:00
2009-04-10 14:00:04 +04:00
r = 1 ;
out :
return r ;
2002-02-25 15:56:16 +03:00
}
2012-01-20 07:46:52 +04:00
/*
* In a cluster , set exclusive to indicate that only one node is using the
* device . Any tables loaded may then use non - clustered targets .
*
2012-01-25 12:51:29 +04:00
* @ origin_only
* @ exclusive This parameter only has an affect in cluster - context .
* It forces local target type to be used ( instead of
* cluster - aware type ) .
2012-01-20 07:46:52 +04:00
* Returns success if the device is not active
*/
2011-02-18 03:36:04 +03:00
int lv_resume_if_active ( struct cmd_context * cmd , const char * lvid_s ,
2012-01-20 07:46:52 +04:00
unsigned origin_only , unsigned exclusive ,
2014-09-22 17:50:07 +04:00
unsigned revert , const struct logical_volume * lv )
2004-03-08 21:54:13 +03:00
{
2011-06-17 18:14:19 +04:00
struct lv_activate_opts laopts = {
2011-09-28 02:43:40 +04:00
. exclusive = exclusive ,
2020-08-29 22:37:39 +03:00
. origin_only = origin_only ,
2011-09-28 02:43:40 +04:00
. revert = revert
2011-06-17 18:14:19 +04:00
} ;
2013-03-18 00:29:58 +04:00
return _lv_resume ( cmd , lvid_s , & laopts , 0 , lv ) ;
2004-03-08 21:54:13 +03:00
}
2014-09-22 17:50:07 +04:00
int lv_resume ( struct cmd_context * cmd , const char * lvid_s , unsigned origin_only ,
const struct logical_volume * lv )
2004-03-08 21:54:13 +03:00
{
2011-06-17 18:14:19 +04:00
struct lv_activate_opts laopts = { . origin_only = origin_only , } ;
2013-03-18 00:29:58 +04:00
return _lv_resume ( cmd , lvid_s , & laopts , 1 , lv ) ;
2004-03-08 21:54:13 +03:00
}
2014-09-22 17:50:07 +04:00
static int _lv_has_open_snapshots ( const struct logical_volume * lv )
2009-09-29 22:50:28 +04:00
{
struct lv_segment * snap_seg ;
int r = 0 ;
2013-11-29 18:02:57 +04:00
dm_list_iterate_items_gen ( snap_seg , & lv - > snapshot_segs , origin_list )
2016-04-22 00:14:10 +03:00
if ( ! lv_check_not_in_use ( snap_seg - > cow , 1 ) )
2013-11-29 18:02:57 +04:00
r + + ;
2009-09-29 22:50:28 +04:00
2013-11-29 18:02:57 +04:00
if ( r )
2015-11-25 18:06:31 +03:00
log_error ( " LV %s has open %d snapshot(s), not deactivating. " ,
display_lvname ( lv ) , r ) ;
2009-09-29 22:50:28 +04:00
return r ;
}
2014-09-22 17:50:07 +04:00
int lv_deactivate ( struct cmd_context * cmd , const char * lvid_s , const struct logical_volume * lv )
2002-02-27 15:26:41 +03:00
{
2003-01-09 01:44:07 +03:00
struct lvinfo info ;
2013-09-07 04:46:48 +04:00
static const struct lv_activate_opts laopts = { . skip_in_use = 1 } ;
2014-11-05 13:53:11 +03:00
struct dm_list * snh ;
2009-04-10 14:00:04 +04:00
int r = 0 ;
2021-12-15 13:45:22 +03:00
unsigned tmp_state ;
2002-02-27 15:26:41 +03:00
2002-11-18 17:01:16 +03:00
if ( ! activation ( ) )
return 1 ;
2002-03-14 18:36:07 +03:00
if ( test_mode ( ) ) {
2015-11-25 18:06:31 +03:00
_skip ( " Deactivating %s. " , display_lvname ( lv ) ) ;
2009-04-10 14:00:04 +04:00
r = 1 ;
goto out ;
2002-03-14 18:36:07 +03:00
}
2015-11-25 18:06:31 +03:00
log_debug_activation ( " Deactivating %s. " , display_lvname ( lv ) ) ;
2011-06-14 02:28:04 +04:00
2021-03-07 03:54:50 +03:00
if ( lv_is_visible ( lv ) | | lv_is_virtual_origin ( lv ) | |
lv_is_merging_thin_snapshot ( lv ) ) {
switch ( lv_check_not_in_use ( lv , 1 ) ) {
case 0 : goto_out ;
case 2 : goto no_exists ;
2014-11-05 13:53:11 +03:00
}
2021-03-07 03:54:50 +03:00
if ( lv_is_origin ( lv ) & & _lv_has_open_snapshots ( lv ) )
goto_out ;
} else {
if ( ! lv_info ( cmd , lv , 0 , & info , 0 , 0 ) )
goto_out ;
2019-09-30 14:22:42 +03:00
2021-03-07 03:54:50 +03:00
if ( ! info . exists ) {
no_exists :
r = 1 ;
/* Check attached snapshot segments are also inactive */
dm_list_iterate ( snh , & lv - > snapshot_segs ) {
if ( ! lv_info ( cmd , dm_list_struct_base ( snh , struct lv_segment , origin_list ) - > cow ,
0 , & info , 0 , 0 ) )
goto_out ;
if ( info . exists ) {
r = 0 ; /* Snapshot left in table? */
break ;
}
}
2019-09-30 14:22:42 +03:00
2021-03-07 03:54:50 +03:00
if ( lv_is_vdo_pool ( lv ) ) {
/* If someone has remove 'linear' mapping over VDO device
* we may still be able to deactivate the rest of the tree
* i . e . in test - suite we simulate this via ' dmsetup remove ' */
if ( ! lv_info ( cmd , lv , 1 , & info , 1 , 0 ) )
goto_out ;
2002-02-27 15:26:41 +03:00
2021-03-07 03:54:50 +03:00
if ( info . exists & & ! info . open_count )
r = 0 ; /* Unused VDO device left in table? */
}
2011-09-22 21:33:50 +04:00
2021-03-07 03:54:50 +03:00
if ( r )
goto out ;
}
2003-10-22 02:00:36 +04:00
}
2013-09-07 04:46:48 +04:00
if ( ! monitor_dev_for_events ( cmd , lv , & laopts , 0 ) )
2006-01-27 21:38:14 +03:00
stack ;
2005-12-02 23:35:07 +03:00
2011-06-11 04:03:06 +04:00
critical_section_inc ( cmd , " deactivating " ) ;
2003-07-05 02:34:56 +04:00
r = _lv_deactivate ( lv ) ;
2017-12-05 20:48:06 +03:00
/*
* Remove any transiently activated error
* devices which arean ' t used any more .
*/
if ( r & & lv_is_raid ( lv ) & & ! lv_deactivate_any_missing_subdevs ( lv ) ) {
log_error ( " Failed to remove temporary SubLVs from %s " ,
display_lvname ( lv ) ) ;
r = 0 ;
}
2011-06-11 04:03:06 +04:00
critical_section_dec ( cmd , " deactivated " ) ;
2003-07-05 02:34:56 +04:00
2021-12-15 13:45:22 +03:00
tmp_state = cmd - > disable_dm_devs ;
cmd - > disable_dm_devs = 1 ;
2014-09-24 12:03:55 +04:00
if ( ! lv_info ( cmd , lv , 0 , & info , 0 , 0 ) | | info . exists ) {
/* Turn into log_error, but we do not log error */
log_debug_activation ( " Deactivated volume is still %s present. " ,
display_lvname ( lv ) ) ;
2009-09-29 19:17:54 +04:00
r = 0 ;
2014-09-24 12:03:55 +04:00
}
2021-12-15 13:45:22 +03:00
cmd - > disable_dm_devs = tmp_state ;
2009-04-10 14:00:04 +04:00
out :
2003-07-05 02:34:56 +04:00
return r ;
2002-02-27 15:26:41 +03:00
}
2004-03-08 21:54:13 +03:00
/* Test if LV passes filter */
int lv_activation_filter ( struct cmd_context * cmd , const char * lvid_s ,
2018-11-30 23:50:41 +03:00
int * activate , const struct logical_volume * lv )
2004-03-08 21:54:13 +03:00
{
2009-04-10 14:00:04 +04:00
if ( ! activation ( ) ) {
2018-11-30 23:50:41 +03:00
* activate = 1 ;
2009-04-10 14:00:04 +04:00
return 1 ;
}
2004-03-08 21:54:13 +03:00
if ( ! _passes_activation_filter ( cmd , lv ) ) {
2015-11-25 18:06:31 +03:00
log_verbose ( " Not activating %s since it does not pass "
" activation filter. " , display_lvname ( lv ) ) ;
2018-11-30 23:50:41 +03:00
* activate = 0 ;
2009-04-10 14:00:04 +04:00
} else
2018-11-30 23:50:41 +03:00
* activate = 1 ;
2004-03-08 21:54:13 +03:00
2018-06-05 18:47:01 +03:00
return 1 ;
2004-03-08 21:54:13 +03:00
}
2005-08-15 03:18:28 +04:00
static int _lv_activate ( struct cmd_context * cmd , const char * lvid_s ,
2013-03-18 00:29:58 +04:00
struct lv_activate_opts * laopts , int filter ,
2014-09-22 17:50:07 +04:00
const struct logical_volume * lv )
2002-02-27 15:26:41 +03:00
{
2003-01-09 01:44:07 +03:00
struct lvinfo info ;
2009-04-10 14:00:04 +04:00
int r = 0 ;
2002-02-27 15:26:41 +03:00
2018-01-17 17:15:43 +03:00
if ( ! activation ( ) )
return 1 ;
2004-03-08 21:54:13 +03:00
if ( filter & & ! _passes_activation_filter ( cmd , lv ) ) {
2015-11-25 18:06:31 +03:00
log_verbose ( " Not activating %s since it does not pass "
" activation filter. " , display_lvname ( lv ) ) ;
2013-11-01 13:28:42 +04:00
r = 1 ;
2009-04-10 14:00:04 +04:00
goto out ;
2004-03-08 21:13:22 +03:00
}
2019-11-21 01:07:27 +03:00
if ( ( cmd - > partial_activation | | cmd - > degraded_activation ) & &
lv_is_partial ( lv ) & & lv_is_raid ( lv ) & & lv_raid_has_integrity ( ( struct logical_volume * ) lv ) ) {
cmd - > partial_activation = 0 ;
cmd - > degraded_activation = 0 ;
2023-04-26 15:40:11 +03:00
log_print_unless_silent ( " No degraded or partial activation for raid with integrity. " ) ;
2019-11-21 01:07:27 +03:00
}
2016-03-02 22:59:03 +03:00
if ( ( ! lv - > vg - > cmd - > partial_activation ) & & lv_is_partial ( lv ) ) {
2014-07-23 19:13:12 +04:00
if ( ! lv_is_raid_type ( lv ) | | ! partial_raid_lv_supports_degraded_activation ( lv ) ) {
activation: Add "degraded" activation mode
Currently, we have two modes of activation, an unnamed nominal mode
(which I will refer to as "complete") and "partial" mode. The
"complete" mode requires that a volume group be 'complete' - that
is, no missing PVs. If there are any missing PVs, no affected LVs
are allowed to activate - even RAID LVs which might be able to
tolerate a failure. The "partial" mode allows anything to be
activated (or at least attempted). If a non-redundant LV is
missing a portion of its addressable space due to a device failure,
it will be replaced with an error target. RAID LVs will either
activate or fail to activate depending on how badly their
redundancy is compromised.
This patch adds a third option, "degraded" mode. This mode can
be selected via the '--activationmode {complete|degraded|partial}'
option to lvchange/vgchange. It can also be set in lvm.conf.
The "degraded" activation mode allows RAID LVs with a sufficient
level of redundancy to activate (e.g. a RAID5 LV with one device
failure, a RAID6 with two device failures, or RAID1 with n-1
failures). RAID LVs with too many device failures are not allowed
to activate - nor are any non-redundant LVs that may have been
affected. This patch also makes the "degraded" mode the default
activation mode.
The degraded activation mode does not yet work in a cluster. A
new cluster lock flag (LCK_DEGRADED_MODE) will need to be created
to make that work. Currently, there is limited space for this
extra flag and I am looking for possible solutions. One possible
solution is to usurp LCK_CONVERT, as it is not used. When the
locking_type is 3, the degraded mode flag simply gets dropped and
the old ("complete") behavior is exhibited.
2014-07-10 07:56:11 +04:00
log_error ( " Refusing activation of partial LV %s. "
" Use '--activationmode partial' to override. " ,
2014-07-22 23:50:29 +04:00
display_lvname ( lv ) ) ;
activation: Add "degraded" activation mode
Currently, we have two modes of activation, an unnamed nominal mode
(which I will refer to as "complete") and "partial" mode. The
"complete" mode requires that a volume group be 'complete' - that
is, no missing PVs. If there are any missing PVs, no affected LVs
are allowed to activate - even RAID LVs which might be able to
tolerate a failure. The "partial" mode allows anything to be
activated (or at least attempted). If a non-redundant LV is
missing a portion of its addressable space due to a device failure,
it will be replaced with an error target. RAID LVs will either
activate or fail to activate depending on how badly their
redundancy is compromised.
This patch adds a third option, "degraded" mode. This mode can
be selected via the '--activationmode {complete|degraded|partial}'
option to lvchange/vgchange. It can also be set in lvm.conf.
The "degraded" activation mode allows RAID LVs with a sufficient
level of redundancy to activate (e.g. a RAID5 LV with one device
failure, a RAID6 with two device failures, or RAID1 with n-1
failures). RAID LVs with too many device failures are not allowed
to activate - nor are any non-redundant LVs that may have been
affected. This patch also makes the "degraded" mode the default
activation mode.
The degraded activation mode does not yet work in a cluster. A
new cluster lock flag (LCK_DEGRADED_MODE) will need to be created
to make that work. Currently, there is limited space for this
extra flag and I am looking for possible solutions. One possible
solution is to usurp LCK_CONVERT, as it is not used. When the
locking_type is 3, the degraded mode flag simply gets dropped and
the old ("complete") behavior is exhibited.
2014-07-10 07:56:11 +04:00
goto out ;
2014-07-22 23:50:29 +04:00
}
if ( ! lv - > vg - > cmd - > degraded_activation ) {
activation: Add "degraded" activation mode
Currently, we have two modes of activation, an unnamed nominal mode
(which I will refer to as "complete") and "partial" mode. The
"complete" mode requires that a volume group be 'complete' - that
is, no missing PVs. If there are any missing PVs, no affected LVs
are allowed to activate - even RAID LVs which might be able to
tolerate a failure. The "partial" mode allows anything to be
activated (or at least attempted). If a non-redundant LV is
missing a portion of its addressable space due to a device failure,
it will be replaced with an error target. RAID LVs will either
activate or fail to activate depending on how badly their
redundancy is compromised.
This patch adds a third option, "degraded" mode. This mode can
be selected via the '--activationmode {complete|degraded|partial}'
option to lvchange/vgchange. It can also be set in lvm.conf.
The "degraded" activation mode allows RAID LVs with a sufficient
level of redundancy to activate (e.g. a RAID5 LV with one device
failure, a RAID6 with two device failures, or RAID1 with n-1
failures). RAID LVs with too many device failures are not allowed
to activate - nor are any non-redundant LVs that may have been
affected. This patch also makes the "degraded" mode the default
activation mode.
The degraded activation mode does not yet work in a cluster. A
new cluster lock flag (LCK_DEGRADED_MODE) will need to be created
to make that work. Currently, there is limited space for this
extra flag and I am looking for possible solutions. One possible
solution is to usurp LCK_CONVERT, as it is not used. When the
locking_type is 3, the degraded mode flag simply gets dropped and
the old ("complete") behavior is exhibited.
2014-07-10 07:56:11 +04:00
log_error ( " Refusing activation of partial LV %s. "
" Try '--activationmode degraded'. " ,
2014-07-22 23:50:29 +04:00
display_lvname ( lv ) ) ;
activation: Add "degraded" activation mode
Currently, we have two modes of activation, an unnamed nominal mode
(which I will refer to as "complete") and "partial" mode. The
"complete" mode requires that a volume group be 'complete' - that
is, no missing PVs. If there are any missing PVs, no affected LVs
are allowed to activate - even RAID LVs which might be able to
tolerate a failure. The "partial" mode allows anything to be
activated (or at least attempted). If a non-redundant LV is
missing a portion of its addressable space due to a device failure,
it will be replaced with an error target. RAID LVs will either
activate or fail to activate depending on how badly their
redundancy is compromised.
This patch adds a third option, "degraded" mode. This mode can
be selected via the '--activationmode {complete|degraded|partial}'
option to lvchange/vgchange. It can also be set in lvm.conf.
The "degraded" activation mode allows RAID LVs with a sufficient
level of redundancy to activate (e.g. a RAID5 LV with one device
failure, a RAID6 with two device failures, or RAID1 with n-1
failures). RAID LVs with too many device failures are not allowed
to activate - nor are any non-redundant LVs that may have been
affected. This patch also makes the "degraded" mode the default
activation mode.
The degraded activation mode does not yet work in a cluster. A
new cluster lock flag (LCK_DEGRADED_MODE) will need to be created
to make that work. Currently, there is limited space for this
extra flag and I am looking for possible solutions. One possible
solution is to usurp LCK_CONVERT, as it is not used. When the
locking_type is 3, the degraded mode flag simply gets dropped and
the old ("complete") behavior is exhibited.
2014-07-10 07:56:11 +04:00
goto out ;
}
2008-09-19 10:42:00 +04:00
}
2020-10-26 23:35:23 +03:00
if ( ( cmd - > partial_activation | | cmd - > degraded_activation ) & & lv_is_writecache ( lv ) ) {
struct logical_volume * lv_fast = first_seg ( lv ) - > writecache ;
if ( lv_is_partial ( lv ) | | ( lv_fast & & lv_is_partial ( lv_fast ) ) ) {
log_error ( " Cannot use partial or degraded activation with writecache. " ) ;
goto out ;
}
}
2009-10-16 21:41:49 +04:00
if ( lv_has_unknown_segments ( lv ) ) {
log_error ( " Refusing activation of LV %s containing "
2015-11-25 18:06:31 +03:00
" an unrecognised segment. " , display_lvname ( lv ) ) ;
2013-05-23 18:17:08 +04:00
goto out ;
2009-10-16 21:41:49 +04:00
}
2018-11-01 01:05:08 +03:00
if ( lv_raid_has_visible_sublvs ( lv ) ) {
log_error ( " Refusing activation of RAID LV %s with "
" visible SubLVs. " , display_lvname ( lv ) ) ;
goto out ;
}
2002-03-14 18:36:07 +03:00
if ( test_mode ( ) ) {
2015-11-25 18:06:31 +03:00
_skip ( " Activating %s. " , display_lvname ( lv ) ) ;
2009-04-10 14:00:04 +04:00
r = 1 ;
goto out ;
2002-03-14 18:36:07 +03:00
}
2018-02-28 19:16:17 +03:00
/* Component LV activation is enforced to be 'read-only' */
/* TODO: should not apply for LVs in maintenance mode */
if ( ! lv_is_visible ( lv ) & & lv_is_component ( lv ) ) {
laopts - > read_only = 1 ;
2018-02-28 19:22:09 +03:00
laopts - > component_lv = lv ;
2023-07-16 22:15:00 +03:00
} else if ( lv_is_pool_metadata_spare ( lv ) ) {
laopts - > component_lv = lv ;
2018-02-28 19:16:17 +03:00
} else if ( filter )
2012-01-12 05:51:56 +04:00
laopts - > read_only = _passes_readonly_filter ( cmd , lv ) ;
2015-11-25 18:06:31 +03:00
log_debug_activation ( " Activating %s%s%s%s%s. " , display_lvname ( lv ) ,
2013-01-08 02:30:29 +04:00
laopts - > exclusive ? " exclusively " : " " ,
2013-10-08 15:27:21 +04:00
laopts - > read_only ? " read-only " : " " ,
activation: flag temporary LVs internally
Add LV_TEMPORARY flag for LVs with limited existence during command
execution. Such LVs are temporary in way that they need to be activated,
some action done and then removed immediately. Such LVs are just like
any normal LV - the only difference is that they are removed during
LVM command execution. This is also the case for LVs representing
future pool metadata spare LVs which we need to initialize by using
the usual LV before they are declared as pool metadata spare.
We can optimize some other parts like udev to do a better job if
it knows that the LV is temporary and any processing on it is just
useless.
This flag is orthogonal to LV_NOSCAN flag introduced recently
as LV_NOSCAN flag is primarily used to mark an LV for the scanning
to be avoided before the zeroing of the device happens. The LV_TEMPORARY
flag makes a difference between a full-fledged LV visible in the system
and the LV just used as a temporary overlay for some action that needs to
be done on underlying PVs.
For example: lvcreate --thinpool POOL --zero n -L 1G vg
- first, the usual LV is created to do a clean up for pool metadata
spare. The LV is activated, zeroed, deactivated.
- between "activated" and "zeroed" stage, the LV_NOSCAN flag is used
to avoid any scanning in udev
- betwen "zeroed" and "deactivated" stage, we need to avoid the WATCH
udev rule, but since the LV is just a usual LV, we can't make a
difference. The LV_TEMPORARY internal LV flag helps here. If we
create the LV with this flag, the DM_UDEV_DISABLE_DISK_RULES
and DM_UDEV_DISABLE_OTHER_RULES flag are set (just like as it is
with "invisible" and non-top-level LVs) - udev is directed to
skip WATCH rule use.
- if the LV_TEMPORARY flag was not used, there would normally be
a WATCH event generated once the LV is closed after "zeroed"
stage. This will make problems with immediated deactivation that
follows.
2013-10-23 16:06:39 +04:00
laopts - > noscan ? " noscan " : " " ,
laopts - > temporary ? " temporary " : " " ) ;
2011-06-14 02:28:04 +04:00
2019-10-31 13:45:28 +03:00
if ( ! lv_info_with_name_check ( cmd , lv , 0 , & info ) )
2009-04-10 14:00:04 +04:00
goto_out ;
2002-03-01 22:08:11 +03:00
2012-01-12 05:51:56 +04:00
/*
* Nothing to do ?
*/
if ( info . exists & & ! info . suspended & & info . live_table & &
2018-03-08 12:27:04 +03:00
( info . read_only = = read_only_lv ( lv , laopts , NULL ) ) ) {
2009-04-10 14:00:04 +04:00
r = 1 ;
2016-04-15 00:41:25 +03:00
log_debug_activation ( " LV %s is already active. " , display_lvname ( lv ) ) ;
2009-04-10 14:00:04 +04:00
goto out ;
}
2002-02-27 15:26:41 +03:00
2009-06-01 16:43:31 +04:00
lv_calculate_readahead ( lv , NULL ) ;
2011-06-11 04:03:06 +04:00
critical_section_inc ( cmd , " activating " ) ;
2011-06-17 18:14:19 +04:00
if ( ! ( r = _lv_activate_lv ( lv , laopts ) ) )
2009-10-30 16:07:49 +03:00
stack ;
2011-06-11 04:03:06 +04:00
critical_section_dec ( cmd , " activated " ) ;
2003-07-05 02:34:56 +04:00
2011-06-17 18:14:19 +04:00
if ( r & & ! monitor_dev_for_events ( cmd , lv , laopts , 1 ) )
2006-01-27 21:38:14 +03:00
stack ;
2009-04-10 14:00:04 +04:00
out :
2003-07-05 02:34:56 +04:00
return r ;
2002-02-27 15:26:41 +03:00
}
2003-01-09 01:44:07 +03:00
2004-03-08 21:54:13 +03:00
/* Activate LV */
2013-10-08 15:27:21 +04:00
int lv_activate ( struct cmd_context * cmd , const char * lvid_s , int exclusive ,
2014-09-22 17:50:07 +04:00
int noscan , int temporary , const struct logical_volume * lv )
2004-03-08 21:54:13 +03:00
{
activation: flag temporary LVs internally
Add LV_TEMPORARY flag for LVs with limited existence during command
execution. Such LVs are temporary in way that they need to be activated,
some action done and then removed immediately. Such LVs are just like
any normal LV - the only difference is that they are removed during
LVM command execution. This is also the case for LVs representing
future pool metadata spare LVs which we need to initialize by using
the usual LV before they are declared as pool metadata spare.
We can optimize some other parts like udev to do a better job if
it knows that the LV is temporary and any processing on it is just
useless.
This flag is orthogonal to LV_NOSCAN flag introduced recently
as LV_NOSCAN flag is primarily used to mark an LV for the scanning
to be avoided before the zeroing of the device happens. The LV_TEMPORARY
flag makes a difference between a full-fledged LV visible in the system
and the LV just used as a temporary overlay for some action that needs to
be done on underlying PVs.
For example: lvcreate --thinpool POOL --zero n -L 1G vg
- first, the usual LV is created to do a clean up for pool metadata
spare. The LV is activated, zeroed, deactivated.
- between "activated" and "zeroed" stage, the LV_NOSCAN flag is used
to avoid any scanning in udev
- betwen "zeroed" and "deactivated" stage, we need to avoid the WATCH
udev rule, but since the LV is just a usual LV, we can't make a
difference. The LV_TEMPORARY internal LV flag helps here. If we
create the LV with this flag, the DM_UDEV_DISABLE_DISK_RULES
and DM_UDEV_DISABLE_OTHER_RULES flag are set (just like as it is
with "invisible" and non-top-level LVs) - udev is directed to
skip WATCH rule use.
- if the LV_TEMPORARY flag was not used, there would normally be
a WATCH event generated once the LV is closed after "zeroed"
stage. This will make problems with immediated deactivation that
follows.
2013-10-23 16:06:39 +04:00
struct lv_activate_opts laopts = { . exclusive = exclusive ,
. noscan = noscan ,
. temporary = temporary } ;
2011-06-17 18:14:19 +04:00
2013-03-18 00:29:58 +04:00
if ( ! _lv_activate ( cmd , lvid_s , & laopts , 0 , lv ) )
2009-10-30 16:07:49 +03:00
return_0 ;
return 1 ;
2004-03-08 21:54:13 +03:00
}
/* Activate LV only if it passes filter */
2013-10-08 15:27:21 +04:00
int lv_activate_with_filter ( struct cmd_context * cmd , const char * lvid_s , int exclusive ,
2014-09-22 17:50:07 +04:00
int noscan , int temporary , const struct logical_volume * lv )
2004-03-08 21:54:13 +03:00
{
activation: flag temporary LVs internally
Add LV_TEMPORARY flag for LVs with limited existence during command
execution. Such LVs are temporary in way that they need to be activated,
some action done and then removed immediately. Such LVs are just like
any normal LV - the only difference is that they are removed during
LVM command execution. This is also the case for LVs representing
future pool metadata spare LVs which we need to initialize by using
the usual LV before they are declared as pool metadata spare.
We can optimize some other parts like udev to do a better job if
it knows that the LV is temporary and any processing on it is just
useless.
This flag is orthogonal to LV_NOSCAN flag introduced recently
as LV_NOSCAN flag is primarily used to mark an LV for the scanning
to be avoided before the zeroing of the device happens. The LV_TEMPORARY
flag makes a difference between a full-fledged LV visible in the system
and the LV just used as a temporary overlay for some action that needs to
be done on underlying PVs.
For example: lvcreate --thinpool POOL --zero n -L 1G vg
- first, the usual LV is created to do a clean up for pool metadata
spare. The LV is activated, zeroed, deactivated.
- between "activated" and "zeroed" stage, the LV_NOSCAN flag is used
to avoid any scanning in udev
- betwen "zeroed" and "deactivated" stage, we need to avoid the WATCH
udev rule, but since the LV is just a usual LV, we can't make a
difference. The LV_TEMPORARY internal LV flag helps here. If we
create the LV with this flag, the DM_UDEV_DISABLE_DISK_RULES
and DM_UDEV_DISABLE_OTHER_RULES flag are set (just like as it is
with "invisible" and non-top-level LVs) - udev is directed to
skip WATCH rule use.
- if the LV_TEMPORARY flag was not used, there would normally be
a WATCH event generated once the LV is closed after "zeroed"
stage. This will make problems with immediated deactivation that
follows.
2013-10-23 16:06:39 +04:00
struct lv_activate_opts laopts = { . exclusive = exclusive ,
. noscan = noscan ,
. temporary = temporary } ;
2011-06-17 18:14:19 +04:00
2013-03-18 00:29:58 +04:00
if ( ! _lv_activate ( cmd , lvid_s , & laopts , 1 , lv ) )
2009-10-30 16:07:49 +03:00
return_0 ;
return 1 ;
2004-03-08 21:54:13 +03:00
}
2003-11-12 22:16:48 +03:00
int lv_mknodes ( struct cmd_context * cmd , const struct logical_volume * lv )
{
2014-08-19 16:19:11 +04:00
int r ;
2003-11-12 22:16:48 +03:00
2004-03-30 18:40:03 +04:00
if ( ! lv ) {
2005-10-17 03:03:59 +04:00
r = dm_mknodes ( NULL ) ;
2004-03-30 18:40:03 +04:00
fs_unlock ( ) ;
return r ;
}
2010-02-24 23:00:56 +03:00
if ( ! activation ( ) )
return 1 ;
2003-11-12 22:16:48 +03:00
2010-02-24 23:00:56 +03:00
r = dev_manager_mknodes ( lv ) ;
2003-11-12 22:16:48 +03:00
fs_unlock ( ) ;
return r ;
}
2016-12-23 05:35:13 +03:00
/* Remove any existing, closed mapped device by @name */
static int _remove_dm_dev_by_name ( const char * name )
{
int r = 0 ;
struct dm_task * dmt ;
struct dm_info info ;
if ( ! ( dmt = dm_task_create ( DM_DEVICE_INFO ) ) )
return_0 ;
/* Check, if the device exists. */
if ( dm_task_set_name ( dmt , name ) & & dm_task_run ( dmt ) & & dm_task_get_info ( dmt , & info ) ) {
dm_task_destroy ( dmt ) ;
/* Ignore non-existing or open dm devices */
if ( ! info . exists | | info . open_count )
return 1 ;
if ( ! ( dmt = dm_task_create ( DM_DEVICE_REMOVE ) ) )
return_0 ;
if ( dm_task_set_name ( dmt , name ) )
r = dm_task_run ( dmt ) ;
}
dm_task_destroy ( dmt ) ;
return r ;
}
/* Work all segments of @lv removing any existing, closed "*-missing_N_0" sub devices. */
static int _lv_remove_any_missing_subdevs ( struct logical_volume * lv )
{
2023-07-17 13:43:39 +03:00
char name [ NAME_LEN ] ;
struct lv_segment * seg ;
uint32_t seg_no = 0 ;
2016-12-23 05:35:13 +03:00
2023-07-17 13:43:39 +03:00
if ( lv ) {
2016-12-23 05:35:13 +03:00
dm_list_iterate_items ( seg , & lv - > segments ) {
if ( dm_snprintf ( name , sizeof ( name ) , " %s-%s-missing_%u_0 " , seg - > lv - > vg - > name , seg - > lv - > name , seg_no ) < 0 )
2017-12-05 20:48:06 +03:00
return_0 ;
2016-12-23 05:35:13 +03:00
if ( ! _remove_dm_dev_by_name ( name ) )
2023-07-17 13:44:04 +03:00
return_0 ;
2016-12-23 05:35:13 +03:00
seg_no + + ;
}
}
return 1 ;
}
/* Remove any "*-missing_*" sub devices added by the activation layer for an rmate/rimage missing PV mapping */
int lv_deactivate_any_missing_subdevs ( const struct logical_volume * lv )
{
uint32_t s ;
struct lv_segment * seg = first_seg ( lv ) ;
for ( s = 0 ; s < seg - > area_count ; s + + ) {
if ( seg_type ( seg , s ) = = AREA_LV & &
! _lv_remove_any_missing_subdevs ( seg_lv ( seg , s ) ) )
2023-07-17 13:44:04 +03:00
return_0 ;
2016-12-23 05:35:13 +03:00
if ( seg - > meta_areas & & seg_metatype ( seg , s ) = = AREA_LV & &
! _lv_remove_any_missing_subdevs ( seg_metalv ( seg , s ) ) )
2023-07-17 13:44:04 +03:00
return_0 ;
2016-12-23 05:35:13 +03:00
}
return 1 ;
}
2005-10-25 23:08:21 +04:00
/*
* Does PV use VG somewhere in its construction ?
* Returns 1 on failure .
*/
2006-05-11 21:58:58 +04:00
int pv_uses_vg ( struct physical_volume * pv ,
2006-05-12 23:16:48 +04:00
struct volume_group * vg )
2005-10-25 23:08:21 +04:00
{
2012-02-23 17:11:07 +04:00
if ( ! activation ( ) | | ! pv - > dev )
2005-10-25 23:08:21 +04:00
return 0 ;
if ( ! dm_is_dm_major ( MAJOR ( pv - > dev - > dev ) ) )
return 0 ;
2006-05-11 21:58:58 +04:00
return dev_manager_device_uses_vg ( pv - > dev , vg ) ;
2005-10-25 23:08:21 +04:00
}
2006-05-16 20:48:31 +04:00
void activation_release ( void )
{
2014-03-20 13:31:21 +04:00
if ( critical_section ( ) )
/* May leak stacked operation */
log_error ( " Releasing activation in critical section. " ) ;
fs_unlock ( ) ; /* Implicit dev_manager_release(); */
2006-05-16 20:48:31 +04:00
}
2003-07-05 02:34:56 +04:00
void activation_exit ( void )
{
2014-03-20 13:31:21 +04:00
activation_release ( ) ;
2003-07-05 02:34:56 +04:00
dev_manager_exit ( ) ;
}
2003-01-09 01:44:07 +03:00
# endif
2018-02-27 16:13:00 +03:00
static int _component_cb ( struct logical_volume * lv , void * data )
{
struct logical_volume * * component_lv = ( struct logical_volume * * ) data ;
if ( lv_is_locked ( lv ) | | lv_is_pvmove ( lv ) | | /* ignoring */
/* thin-pool is special and it's using layered device */
2022-08-24 16:02:07 +03:00
( lv_is_thin_pool ( lv ) & & thin_pool_is_active ( lv ) ) )
2018-02-27 16:13:00 +03:00
return - 1 ;
2021-07-14 13:53:47 +03:00
/* External origin is activated through thinLV and uses -real suffix.
* Note : for old clustered logic we would need to check for all thins */
if ( ( lv_is_external_origin ( lv ) & & lv_info ( lv - > vg - > cmd , lv , 1 , NULL , 0 , 0 ) ) | |
lv_is_active ( lv ) ) {
2018-02-27 16:13:00 +03:00
if ( ! lv_is_component ( lv ) | | lv_is_visible ( lv ) )
return - 1 ; /* skip whole subtree */
log_debug_activation ( " Found active component LV %s. " , display_lvname ( lv ) ) ;
* component_lv = lv ;
return 0 ; /* break any further processing */
}
return 1 ;
}
/*
* Finds out for any LV if any of its component LVs are active .
* Function first checks if an existing LV is visible and active eventually
* it ' s lock holding LV is already active . In such case sub LV cannot be
* actived alone and no further checking is needed .
*
* Returns active component LV if there is such .
*/
const struct logical_volume * lv_component_is_active ( const struct logical_volume * lv )
{
const struct logical_volume * component_lv = NULL ;
const struct logical_volume * holder_lv = lv_lock_holder ( lv ) ;
if ( ( holder_lv ! = lv ) & & lv_is_active ( holder_lv ) )
return NULL ; /* Lock holding LV is active, do not check components */
if ( _component_cb ( ( struct logical_volume * ) lv , & holder_lv ) = = 1 )
( void ) for_each_sub_lv ( ( struct logical_volume * ) lv , _component_cb ,
( void * ) & component_lv ) ;
return component_lv ;
}
/*
* Finds out if any LV above is active , as stacked device tree can be composed of
* chained set of LVs .
*
* Returns active holder LV if there is such .
*/
const struct logical_volume * lv_holder_is_active ( const struct logical_volume * lv )
{
const struct logical_volume * holder ;
const struct seg_list * sl ;
if ( lv_is_locked ( lv ) | | lv_is_pvmove ( lv ) )
return NULL ; /* Skip pvmove/locked LV tracking */
dm_list_iterate_items ( sl , & lv - > segs_using_this_lv ) {
/* Recursive call for upper-stack holder */
if ( ( holder = lv_holder_is_active ( sl - > seg - > lv ) ) )
return holder ;
if ( lv_is_active ( sl - > seg - > lv ) ) {
log_debug_activation ( " Found active holder LV %s. " , display_lvname ( sl - > seg - > lv ) ) ;
return sl - > seg - > lv ;
}
}
return NULL ;
}
static int _deactivate_sub_lv_cb ( struct logical_volume * lv , void * data )
{
struct logical_volume * * slv = data ;
if ( lv_is_thin_pool ( lv ) | | lv_is_external_origin ( lv ) )
return - 1 ;
if ( ! deactivate_lv ( lv - > vg - > cmd , lv ) ) {
* slv = lv ;
return 0 ;
}
return 1 ;
}
/*
* Deactivates LV toghether with explicit deactivation call made also for all its component LVs .
*/
int deactivate_lv_with_sub_lv ( const struct logical_volume * lv )
{
2021-03-09 18:42:38 +03:00
struct logical_volume * flv = NULL ;
2018-02-27 16:13:00 +03:00
if ( ! deactivate_lv ( lv - > vg - > cmd , lv ) ) {
log_error ( " Cannot deactivate logical volume %s. " ,
display_lvname ( lv ) ) ;
return 0 ;
}
if ( ! for_each_sub_lv ( ( struct logical_volume * ) lv , _deactivate_sub_lv_cb , & flv ) ) {
log_error ( " Cannot deactivate subvolume %s of logical volume %s. " ,
2021-03-09 18:42:38 +03:00
( flv ) ? display_lvname ( flv ) : " " , display_lvname ( lv ) ) ;
2018-02-27 16:13:00 +03:00
return 0 ;
}
return 1 ;
}
2018-06-06 00:47:24 +03:00
int activate_lv ( struct cmd_context * cmd , const struct logical_volume * lv )
{
const struct logical_volume * active_lv ;
int ret ;
/*
* When trying activating component LV , make sure none of sub component
* LV or LVs that are using it are active .
*/
if ( ! lv_is_visible ( lv ) )
active_lv = lv_holder_is_active ( lv ) ;
else
active_lv = lv_component_is_active ( lv ) ;
if ( active_lv ) {
log_error ( " Activation of logical volume %s is prohibited while logical volume %s is active. " ,
display_lvname ( lv ) , display_lvname ( active_lv ) ) ;
ret = 0 ;
goto out ;
}
ret = lv_activate_with_filter ( cmd , NULL , 0 ,
( lv - > status & LV_NOSCAN ) ? 1 : 0 ,
( lv - > status & LV_TEMPORARY ) ? 1 : 0 ,
lv_committed ( lv ) ) ;
out :
return ret ;
}
int deactivate_lv ( struct cmd_context * cmd , const struct logical_volume * lv )
{
int ret ;
ret = lv_deactivate ( cmd , NULL , lv_committed ( lv ) ) ;
return ret ;
}
int suspend_lv ( struct cmd_context * cmd , const struct logical_volume * lv )
{
int ret ;
critical_section_inc ( cmd , " locking for suspend " ) ;
ret = lv_suspend_if_active ( cmd , NULL , 0 , 0 , lv_committed ( lv ) , lv ) ;
return ret ;
}
int suspend_lv_origin ( struct cmd_context * cmd , const struct logical_volume * lv )
{
int ret ;
critical_section_inc ( cmd , " locking for suspend " ) ;
ret = lv_suspend_if_active ( cmd , NULL , 1 , 0 , lv_committed ( lv ) , lv ) ;
return ret ;
}
int resume_lv ( struct cmd_context * cmd , const struct logical_volume * lv )
{
int ret ;
ret = lv_resume_if_active ( cmd , NULL , 0 , 0 , 0 , lv_committed ( lv ) ) ;
critical_section_dec ( cmd , " unlocking on resume " ) ;
return ret ;
}
int resume_lv_origin ( struct cmd_context * cmd , const struct logical_volume * lv )
{
int ret ;
ret = lv_resume_if_active ( cmd , NULL , 1 , 0 , 0 , lv_committed ( lv ) ) ;
critical_section_dec ( cmd , " unlocking on resume " ) ;
return ret ;
}
int revert_lv ( struct cmd_context * cmd , const struct logical_volume * lv )
{
int ret ;
ret = lv_resume_if_active ( cmd , NULL , 0 , 0 , 1 , lv_committed ( lv ) ) ;
2020-09-26 15:54:50 +03:00
critical_section_dec ( cmd , " unlocking on revert " ) ;
2018-06-06 00:47:24 +03:00
return ret ;
}