2001-10-16 20:25:28 +04:00
/*
2008-01-30 17:00:02 +03:00
* Copyright ( C ) 2001 - 2004 Sistina Software , Inc . All rights reserved .
2007-08-21 00:55:30 +04:00
* Copyright ( C ) 2004 - 2007 Red Hat , Inc . All rights reserved .
2001-10-16 20:25:28 +04:00
*
2004-03-30 23:35:44 +04:00
* This file is part of LVM2 .
2001-10-16 20:25:28 +04:00
*
2004-03-30 23:35:44 +04:00
* This copyrighted material is made available to anyone wishing to use ,
* modify , copy , or redistribute it subject to the terms and conditions
2007-08-21 00:55:30 +04:00
* of the GNU Lesser General Public License v .2 .1 .
2001-10-16 20:25:28 +04:00
*
2007-08-21 00:55:30 +04:00
* You should have received a copy of the GNU Lesser General Public License
2004-03-30 23:35:44 +04:00
* along with this program ; if not , write to the Free Software Foundation ,
* Inc . , 59 Temple Place , Suite 330 , Boston , MA 02111 - 1307 USA
2001-10-16 20:25:28 +04:00
*/
# include "tools.h"
2010-08-17 02:54:35 +04:00
/*
* Increments * count by the number of _new_ monitored devices .
*/
2007-01-20 01:21:45 +03:00
static int _monitor_lvs_in_vg ( struct cmd_context * cmd ,
2010-08-17 02:54:35 +04:00
struct volume_group * vg , int reg , int * count )
2006-05-12 23:16:48 +04:00
{
struct lv_list * lvl ;
struct logical_volume * lv ;
struct lvinfo info ;
2010-10-30 01:15:23 +04:00
int r = 1 ;
2006-05-12 23:16:48 +04:00
2008-11-04 01:14:30 +03:00
dm_list_iterate_items ( lvl , & vg - > lvs ) {
2006-05-12 23:16:48 +04:00
lv = lvl - > lv ;
2012-03-23 13:58:04 +04:00
if ( ! lv_info ( cmd , lv , lv_is_thin_pool ( lv ) ? 1 : 0 ,
& info , 0 , 0 ) | |
! info . exists )
continue ;
2006-05-12 23:16:48 +04:00
/*
* FIXME : Need to consider all cases . . . PVMOVE , etc
*/
2012-03-23 13:58:04 +04:00
if ( lv - > status & PVMOVE )
2006-05-12 23:16:48 +04:00
continue ;
2010-08-17 20:25:32 +04:00
if ( ! monitor_dev_for_events ( cmd , lv , 0 , reg ) ) {
2010-10-30 01:15:23 +04:00
r = 0 ;
2006-05-12 23:16:48 +04:00
continue ;
2007-01-20 01:21:45 +03:00
} else
2010-08-17 02:54:35 +04:00
( * count ) + + ;
2006-05-12 23:16:48 +04:00
}
2010-08-17 02:54:35 +04:00
return r ;
2006-05-12 23:16:48 +04:00
}
2010-01-05 23:56:51 +03:00
static int _poll_lvs_in_vg ( struct cmd_context * cmd ,
struct volume_group * vg )
{
struct lv_list * lvl ;
struct logical_volume * lv ;
struct lvinfo info ;
int lv_active ;
int count = 0 ;
dm_list_iterate_items ( lvl , & vg - > lvs ) {
lv = lvl - > lv ;
2010-08-17 20:25:32 +04:00
if ( ! lv_info ( cmd , lv , 0 , & info , 0 , 0 ) )
2010-01-05 23:56:51 +03:00
lv_active = 0 ;
else
lv_active = info . exists ;
2010-01-13 04:50:34 +03:00
if ( lv_active & &
2010-01-13 04:56:18 +03:00
( lv - > status & ( PVMOVE | CONVERTING | MERGING ) ) ) {
2010-01-13 04:50:34 +03:00
lv_spawn_background_polling ( cmd , lv ) ;
count + + ;
}
2010-01-05 23:56:51 +03:00
}
/*
* returns the number of polled devices
* - there is no way to know if lv is already being polled
*/
return count ;
}
2012-06-27 16:59:34 +04:00
static int _activate_lvs_in_vg ( struct cmd_context * cmd , struct volume_group * vg ,
activation_change_t activate )
2002-03-01 22:08:11 +03:00
{
2003-10-16 00:02:46 +04:00
struct lv_list * lvl ;
2002-03-01 22:08:11 +03:00
struct logical_volume * lv ;
2009-11-24 19:08:49 +03:00
int count = 0 , expected_count = 0 ;
2002-03-01 22:08:11 +03:00
2011-09-07 12:41:47 +04:00
sigint_allow ( ) ;
2008-11-04 01:14:30 +03:00
dm_list_iterate_items ( lvl , & vg - > lvs ) {
2011-09-07 12:41:47 +04:00
if ( sigint_caught ( ) )
return_0 ;
2003-10-16 00:02:46 +04:00
lv = lvl - > lv ;
2002-03-01 22:08:11 +03:00
2009-11-18 20:20:18 +03:00
if ( ! lv_is_visible ( lv ) )
continue ;
2011-09-14 22:20:03 +04:00
/* If LV is sparse, activate origin instead */
if ( lv_is_cow ( lv ) & & lv_is_virtual_origin ( origin_from_cow ( lv ) ) )
lv = origin_from_cow ( lv ) ;
2003-05-06 16:14:36 +04:00
/* Only request activation of snapshot origin devices */
2005-04-07 16:39:44 +04:00
if ( ( lv - > status & SNAPSHOT ) | | lv_is_cow ( lv ) )
2002-03-18 16:09:27 +03:00
continue ;
2008-06-12 15:49:46 +04:00
/* Only request activation of mirror LV */
if ( ( lv - > status & MIRROR_IMAGE ) | | ( lv - > status & MIRROR_LOG ) )
continue ;
2010-05-24 13:03:39 +04:00
/* Only request activation of the first replicator-dev LV */
/* Avoids retry with all heads in case of failure */
if ( lv_is_replicator_dev ( lv ) & & ( lv ! = first_replicator_dev ( lv ) ) )
continue ;
2008-06-12 15:49:46 +04:00
/* Can't deactivate a pvmove LV */
2004-06-16 21:13:41 +04:00
/* FIXME There needs to be a controlled way of doing this */
if ( ( ( activate = = CHANGE_AN ) | | ( activate = = CHANGE_ALN ) ) & &
2008-06-12 15:49:46 +04:00
( ( lv - > status & PVMOVE ) ) )
2003-05-06 16:14:36 +04:00
continue ;
2011-02-04 23:30:17 +03:00
/*
* If the LV is active exclusive remotely ,
* then ignore it here
*/
if ( lv_is_active_exclusive_remotely ( lv ) ) {
log_verbose ( " %s/%s is exclusively active on "
" a remote node " , vg - > name , lv - > name ) ;
continue ;
}
2013-04-11 15:51:08 +04:00
if ( ( activate = = CHANGE_AAY ) & &
! lv_passes_auto_activation_filter ( cmd , lv ) )
2012-09-12 11:47:40 +04:00
continue ;
2012-06-27 18:21:15 +04:00
2009-11-24 19:08:49 +03:00
expected_count + + ;
2013-04-11 15:51:08 +04:00
if ( ! lv_change_activate ( cmd , lv , activate ) ) {
2010-01-06 00:08:34 +03:00
stack ;
2002-03-01 22:08:11 +03:00
continue ;
2010-01-06 00:08:34 +03:00
}
2002-03-01 22:08:11 +03:00
count + + ;
}
2011-09-07 12:41:47 +04:00
sigint_restore ( ) ;
2009-11-24 19:08:49 +03:00
if ( expected_count )
log_verbose ( " %s %d logical volumes in volume group %s " ,
2009-12-07 22:32:28 +03:00
( activate = = CHANGE_AN | | activate = = CHANGE_ALN ) ?
" Deactivated " : " Activated " , count , vg - > name ) ;
2009-11-24 19:08:49 +03:00
2010-10-30 01:15:23 +04:00
return ( expected_count ! = count ) ? 0 : 1 ;
2002-03-01 22:08:11 +03:00
}
2006-05-12 23:16:48 +04:00
static int _vgchange_monitoring ( struct cmd_context * cmd , struct volume_group * vg )
{
2010-10-30 01:15:23 +04:00
int r = 1 ;
2010-08-17 02:54:35 +04:00
int monitored = 0 ;
2006-05-12 23:16:48 +04:00
2010-07-26 23:03:29 +04:00
if ( lvs_in_vg_activated ( vg ) & &
2007-01-25 02:43:27 +03:00
dmeventd_monitor_mode ( ) ! = DMEVENTD_MONITOR_IGNORE ) {
2010-10-30 01:15:23 +04:00
if ( ! _monitor_lvs_in_vg ( cmd , vg , dmeventd_monitor_mode ( ) , & monitored ) )
r = 0 ;
config: add silent mode
Accept -q as the short form of --quiet.
Suppress non-essential standard output if -q is given twice.
Treat log/silent in lvm.conf as equivalent to -qq.
Review all log_print messages and change some to
log_print_unless_silent.
When silent, the following commands still produce output:
dumpconfig, lvdisplay, lvmdiskscan, lvs, pvck, pvdisplay,
pvs, version, vgcfgrestore -l, vgdisplay, vgs.
[Needs checking.]
Non-essential messages are shifted from log level 4 to log level 5
for syslog and lvm2_log_fn purposes.
2012-08-25 23:35:48 +04:00
log_print_unless_silent ( " %d logical volume(s) in volume group "
" \" %s \" %smonitored " ,
monitored , vg - > name , ( dmeventd_monitor_mode ( ) ) ? " " : " un " ) ;
2006-05-12 23:16:48 +04:00
}
2010-10-30 01:15:23 +04:00
return r ;
2006-05-12 23:16:48 +04:00
}
2010-01-05 23:56:51 +03:00
static int _vgchange_background_polling ( struct cmd_context * cmd , struct volume_group * vg )
{
int polled ;
if ( lvs_in_vg_activated ( vg ) & & background_polling ( ) ) {
polled = _poll_lvs_in_vg ( cmd , vg ) ;
2010-10-26 05:37:59 +04:00
if ( polled )
config: add silent mode
Accept -q as the short form of --quiet.
Suppress non-essential standard output if -q is given twice.
Treat log/silent in lvm.conf as equivalent to -qq.
Review all log_print messages and change some to
log_print_unless_silent.
When silent, the following commands still produce output:
dumpconfig, lvdisplay, lvmdiskscan, lvs, pvck, pvdisplay,
pvs, version, vgcfgrestore -l, vgdisplay, vgs.
[Needs checking.]
Non-essential messages are shifted from log level 4 to log level 5
for syslog and lvm2_log_fn purposes.
2012-08-25 23:35:48 +04:00
log_print_unless_silent ( " Background polling started for %d logical volume(s) "
" in volume group \" %s \" " ,
polled , vg - > name ) ;
2010-01-05 23:56:51 +03:00
}
2010-10-30 01:15:23 +04:00
return 1 ;
2010-01-05 23:56:51 +03:00
}
2012-06-27 16:59:34 +04:00
int vgchange_activate ( struct cmd_context * cmd , struct volume_group * vg ,
activation_change_t activate )
2001-10-16 20:25:28 +04:00
{
2012-06-27 16:59:34 +04:00
int lv_open , active , monitored = 0 , r = 1 , do_activate = 1 ;
if ( ( activate = = CHANGE_AN ) | | ( activate = = CHANGE_ALN ) )
do_activate = 0 ;
2004-05-24 17:44:10 +04:00
2009-07-15 09:47:55 +04:00
/*
* Safe , since we never write out new metadata here . Required for
* partial activation to work .
*/
2012-06-27 16:59:34 +04:00
cmd - > handles_missing_pvs = 1 ;
2004-06-24 18:48:01 +04:00
2001-10-16 20:25:28 +04:00
/* FIXME: Force argument to deactivate them? */
2012-06-27 16:59:34 +04:00
if ( ! do_activate & & ( lv_open = lvs_in_vg_opened ( vg ) ) ) {
2002-01-30 18:04:48 +03:00
log_error ( " Can't deactivate volume group \" %s \" with %d open "
2001-10-16 20:25:28 +04:00
" logical volume(s) " , vg - > name , lv_open ) ;
2010-10-30 01:15:23 +04:00
return 0 ;
2001-10-16 20:25:28 +04:00
}
2005-05-17 17:44:02 +04:00
/* FIXME Move into library where clvmd can use it */
2012-06-27 16:59:34 +04:00
if ( do_activate )
2005-05-17 17:44:02 +04:00
check_current_backup ( vg ) ;
2012-06-27 16:59:34 +04:00
if ( do_activate & & ( active = lvs_in_vg_activated ( vg ) ) ) {
2002-01-30 18:04:48 +03:00
log_verbose ( " %d logical volume(s) in volume group \" %s \" "
2002-03-01 22:08:11 +03:00
" already active " , active , vg - > name ) ;
2007-01-25 02:43:27 +03:00
if ( dmeventd_monitor_mode ( ) ! = DMEVENTD_MONITOR_IGNORE ) {
2010-10-30 01:15:23 +04:00
if ( ! _monitor_lvs_in_vg ( cmd , vg , dmeventd_monitor_mode ( ) , & monitored ) )
r = 0 ;
2007-01-25 02:43:27 +03:00
log_verbose ( " %d existing logical volume(s) in volume "
" group \" %s \" %smonitored " ,
monitored , vg - > name ,
dmeventd_monitor_mode ( ) ? " " : " un " ) ;
}
2006-05-12 23:16:48 +04:00
}
2001-11-21 22:32:35 +03:00
2012-06-27 16:59:34 +04:00
if ( ! _activate_lvs_in_vg ( cmd , vg , activate ) )
2010-10-30 01:15:23 +04:00
r = 0 ;
2001-10-16 20:25:28 +04:00
2010-05-24 12:59:29 +04:00
/* Print message only if there was not found a missing VG */
if ( ! vg - > cmd_missing_vgs )
config: add silent mode
Accept -q as the short form of --quiet.
Suppress non-essential standard output if -q is given twice.
Treat log/silent in lvm.conf as equivalent to -qq.
Review all log_print messages and change some to
log_print_unless_silent.
When silent, the following commands still produce output:
dumpconfig, lvdisplay, lvmdiskscan, lvs, pvck, pvdisplay,
pvs, version, vgcfgrestore -l, vgdisplay, vgs.
[Needs checking.]
Non-essential messages are shifted from log level 4 to log level 5
for syslog and lvm2_log_fn purposes.
2012-08-25 23:35:48 +04:00
log_print_unless_silent ( " %d logical volume(s) in volume group \" %s \" now active " ,
lvs_in_vg_activated ( vg ) , vg - > name ) ;
2010-10-30 01:15:23 +04:00
return r ;
}
static int _vgchange_refresh ( struct cmd_context * cmd , struct volume_group * vg )
{
log_verbose ( " Refreshing volume group \" %s \" " , vg - > name ) ;
if ( ! vg_refresh_visible ( cmd , vg ) ) {
stack ;
return 0 ;
}
return 1 ;
2001-10-16 20:25:28 +04:00
}
2004-05-19 02:12:53 +04:00
static int _vgchange_alloc ( struct cmd_context * cmd , struct volume_group * vg )
{
alloc_policy_t alloc ;
2012-02-28 18:24:57 +04:00
alloc = ( alloc_policy_t ) arg_uint_value ( cmd , alloc_ARG , ALLOC_NORMAL ) ;
2004-05-19 02:12:53 +04:00
2009-07-09 14:08:54 +04:00
/* FIXME: make consistent with vg_set_alloc_policy() */
2004-05-19 02:12:53 +04:00
if ( alloc = = vg - > alloc ) {
log_error ( " Volume group allocation policy is already %s " ,
get_alloc_string ( vg - > alloc ) ) ;
2010-10-30 01:15:23 +04:00
return 0 ;
2009-09-15 02:47:49 +04:00
}
2004-05-19 02:12:53 +04:00
2010-10-30 01:15:23 +04:00
if ( ! vg_set_alloc_policy ( vg , alloc ) )
return_0 ;
2004-05-19 02:12:53 +04:00
2010-10-30 01:15:23 +04:00
return 1 ;
2004-05-19 02:12:53 +04:00
}
2003-10-22 02:06:07 +04:00
static int _vgchange_resizeable ( struct cmd_context * cmd ,
struct volume_group * vg )
2001-10-16 20:25:28 +04:00
{
2002-02-12 00:00:35 +03:00
int resizeable = ! strcmp ( arg_str_value ( cmd , resizeable_ARG , " n " ) , " y " ) ;
2001-10-16 20:25:28 +04:00
2009-09-15 22:35:13 +04:00
if ( resizeable & & vg_is_resizeable ( vg ) ) {
2002-02-12 00:00:35 +03:00
log_error ( " Volume group \" %s \" is already resizeable " ,
vg - > name ) ;
2010-10-30 01:15:23 +04:00
return 0 ;
2001-10-16 20:25:28 +04:00
}
2009-09-15 22:35:13 +04:00
if ( ! resizeable & & ! vg_is_resizeable ( vg ) ) {
2002-01-30 18:04:48 +03:00
log_error ( " Volume group \" %s \" is already not resizeable " ,
2001-10-16 20:25:28 +04:00
vg - > name ) ;
2010-10-30 01:15:23 +04:00
return 0 ;
2009-09-15 02:47:49 +04:00
}
2002-01-09 16:17:14 +03:00
2002-01-10 18:09:51 +03:00
if ( resizeable )
vg - > status | = RESIZEABLE_VG ;
2001-10-16 20:25:28 +04:00
else
2002-01-10 18:09:51 +03:00
vg - > status & = ~ RESIZEABLE_VG ;
2001-10-16 20:25:28 +04:00
2010-10-30 01:15:23 +04:00
return 1 ;
2001-10-16 20:25:28 +04:00
}
2005-03-22 01:55:12 +03:00
static int _vgchange_clustered ( struct cmd_context * cmd ,
struct volume_group * vg )
{
int clustered = ! strcmp ( arg_str_value ( cmd , clustered_ARG , " n " ) , " y " ) ;
2008-04-10 21:09:32 +04:00
if ( clustered & & ( vg_is_clustered ( vg ) ) ) {
2005-03-22 01:55:12 +03:00
log_error ( " Volume group \" %s \" is already clustered " ,
vg - > name ) ;
2010-10-30 01:15:23 +04:00
return 0 ;
2005-03-22 01:55:12 +03:00
}
2008-04-10 21:09:32 +04:00
if ( ! clustered & & ! ( vg_is_clustered ( vg ) ) ) {
2005-03-22 01:55:12 +03:00
log_error ( " Volume group \" %s \" is already not clustered " ,
vg - > name ) ;
2010-10-30 01:15:23 +04:00
return 0 ;
2009-09-15 02:47:49 +04:00
}
2005-03-22 01:55:12 +03:00
2009-10-31 20:30:52 +03:00
if ( ! vg_set_clustered ( vg , clustered ) )
2010-10-30 01:15:23 +04:00
return_0 ;
2005-03-22 01:55:12 +03:00
2010-10-30 01:15:23 +04:00
return 1 ;
2005-03-22 01:55:12 +03:00
}
2003-10-22 02:06:07 +04:00
static int _vgchange_logicalvolume ( struct cmd_context * cmd ,
struct volume_group * vg )
2001-10-16 20:25:28 +04:00
{
2002-12-20 02:25:55 +03:00
uint32_t max_lv = arg_uint_value ( cmd , logicalvolume_ARG , 0 ) ;
2001-10-16 20:25:28 +04:00
2010-10-30 01:15:23 +04:00
if ( ! vg_set_max_lv ( vg , max_lv ) )
return_0 ;
2001-10-16 20:25:28 +04:00
2010-10-30 01:15:23 +04:00
return 1 ;
2001-10-16 20:25:28 +04:00
}
2002-11-18 17:04:08 +03:00
2006-08-16 18:41:42 +04:00
static int _vgchange_physicalvolumes ( struct cmd_context * cmd ,
struct volume_group * vg )
{
uint32_t max_pv = arg_uint_value ( cmd , maxphysicalvolumes_ARG , 0 ) ;
2010-10-30 01:15:23 +04:00
if ( ! vg_set_max_pv ( vg , max_pv ) )
return_0 ;
2006-08-16 18:41:42 +04:00
2010-10-30 01:15:23 +04:00
return 1 ;
2006-08-16 18:41:42 +04:00
}
2005-04-18 18:56:42 +04:00
static int _vgchange_pesize ( struct cmd_context * cmd , struct volume_group * vg )
{
uint32_t extent_size ;
2011-03-02 23:00:09 +03:00
if ( arg_uint64_value ( cmd , physicalextentsize_ARG , 0 ) > MAX_EXTENT_SIZE ) {
2012-10-16 12:14:41 +04:00
log_warn ( " Physical extent size cannot be larger than %s. " ,
display_size ( cmd , ( uint64_t ) MAX_EXTENT_SIZE ) ) ;
2011-03-02 23:00:09 +03:00
return 1 ;
}
2007-11-14 03:08:25 +03:00
extent_size = arg_uint_value ( cmd , physicalextentsize_ARG , 0 ) ;
2009-07-09 14:02:15 +04:00
/* FIXME: remove check - redundant with vg_change_pesize */
2005-04-18 18:56:42 +04:00
if ( extent_size = = vg - > extent_size ) {
2012-10-16 12:14:41 +04:00
log_warn ( " Physical extent size of VG %s is already %s. " ,
vg - > name , display_size ( cmd , ( uint64_t ) extent_size ) ) ;
2010-10-30 01:15:23 +04:00
return 1 ;
2005-04-18 18:56:42 +04:00
}
2010-10-30 01:15:23 +04:00
if ( ! vg_set_extent_size ( vg , extent_size ) )
return_0 ;
2005-04-18 18:56:42 +04:00
2010-10-30 01:15:23 +04:00
return 1 ;
2005-04-18 18:56:42 +04:00
}
2010-10-30 01:15:23 +04:00
static int _vgchange_addtag ( struct cmd_context * cmd , struct volume_group * vg )
{
2011-01-24 16:38:31 +03:00
return change_tag ( cmd , vg , NULL , NULL , addtag_ARG ) ;
2010-10-30 01:15:23 +04:00
}
2004-03-08 20:19:15 +03:00
2010-10-30 01:15:23 +04:00
static int _vgchange_deltag ( struct cmd_context * cmd , struct volume_group * vg )
{
2011-01-24 16:38:31 +03:00
return change_tag ( cmd , vg , NULL , NULL , deltag_ARG ) ;
2004-03-08 20:19:15 +03:00
}
2010-07-09 19:34:40 +04:00
static int _vgchange_uuid ( struct cmd_context * cmd __attribute__ ( ( unused ) ) ,
2006-05-10 01:23:51 +04:00
struct volume_group * vg )
2004-01-13 21:42:05 +03:00
{
struct lv_list * lvl ;
if ( lvs_in_vg_activated ( vg ) ) {
log_error ( " Volume group has active logical volumes " ) ;
2010-10-30 01:15:23 +04:00
return 0 ;
2009-09-15 02:47:49 +04:00
}
2004-01-13 21:42:05 +03:00
2005-01-20 21:11:53 +03:00
if ( ! id_create ( & vg - > id ) ) {
log_error ( " Failed to generate new random UUID for VG %s. " ,
vg - > name ) ;
2010-10-30 01:15:23 +04:00
return 0 ;
2005-01-20 21:11:53 +03:00
}
2004-01-13 21:42:05 +03:00
2008-11-04 01:14:30 +03:00
dm_list_iterate_items ( lvl , & vg - > lvs ) {
2004-01-13 21:42:05 +03:00
memcpy ( & lvl - > lv - > lvid , & vg - > id , sizeof ( vg - > id ) ) ;
}
2010-10-30 01:15:23 +04:00
return 1 ;
2008-12-22 12:00:51 +03:00
}
2010-06-29 00:37:37 +04:00
static int _vgchange_metadata_copies ( struct cmd_context * cmd ,
struct volume_group * vg )
{
2010-07-01 00:03:52 +04:00
uint32_t mda_copies = arg_uint_value ( cmd , vgmetadatacopies_ARG , DEFAULT_VGMETADATACOPIES ) ;
2010-06-29 00:37:37 +04:00
if ( mda_copies = = vg_mda_copies ( vg ) ) {
2010-07-01 00:03:52 +04:00
if ( vg_mda_copies ( vg ) = = VGMETADATACOPIES_UNMANAGED )
2012-10-16 12:14:41 +04:00
log_warn ( " Number of metadata copies for VG %s is already unmanaged. " ,
vg - > name ) ;
2010-07-01 00:03:52 +04:00
else
2012-10-16 12:14:41 +04:00
log_warn ( " Number of metadata copies for VG %s is already %u. " ,
vg - > name , mda_copies ) ;
2010-10-30 01:15:23 +04:00
return 1 ;
2010-06-29 00:37:37 +04:00
}
2010-10-30 01:15:23 +04:00
if ( ! vg_set_mda_copies ( vg , mda_copies ) )
return_0 ;
2010-06-29 00:37:37 +04:00
2010-10-30 01:15:23 +04:00
return 1 ;
2010-06-29 00:37:37 +04:00
}
2002-11-18 17:04:08 +03:00
static int vgchange_single ( struct cmd_context * cmd , const char * vg_name ,
2009-07-01 21:00:50 +04:00
struct volume_group * vg ,
2010-07-09 19:34:40 +04:00
void * handle __attribute__ ( ( unused ) ) )
2002-11-18 17:04:08 +03:00
{
2010-10-30 01:15:23 +04:00
int archived = 0 ;
int i ;
static struct {
int arg ;
int ( * fn ) ( struct cmd_context * cmd , struct volume_group * vg ) ;
} _vgchange_args [ ] = {
{ logicalvolume_ARG , & _vgchange_logicalvolume } ,
{ maxphysicalvolumes_ARG , & _vgchange_physicalvolumes } ,
{ resizeable_ARG , & _vgchange_resizeable } ,
{ deltag_ARG , & _vgchange_deltag } ,
{ addtag_ARG , & _vgchange_addtag } ,
{ physicalextentsize_ARG , & _vgchange_pesize } ,
{ uuid_ARG , & _vgchange_uuid } ,
{ alloc_ARG , & _vgchange_alloc } ,
{ clustered_ARG , & _vgchange_clustered } ,
{ vgmetadatacopies_ARG , & _vgchange_metadata_copies } ,
{ - 1 , NULL } ,
} ;
2003-10-22 02:06:07 +04:00
2009-09-14 23:44:15 +04:00
if ( vg_is_exported ( vg ) ) {
2002-11-18 17:04:08 +03:00
log_error ( " Volume group \" %s \" is exported " , vg_name ) ;
return ECMD_FAILED ;
}
2010-01-06 22:08:58 +03:00
/*
* FIXME : DEFAULT_BACKGROUND_POLLING should be " unspecified " .
* If - - poll is explicitly provided use it ; otherwise polling
* should only be started if the LV is not already active . So :
* 1 ) change the activation code to say if the LV was actually activated
* 2 ) make polling of an LV tightly coupled with LV activation
2010-05-06 15:15:55 +04:00
*
* Do not initiate any polling if - - sysinit option is used .
2010-01-06 22:08:58 +03:00
*/
2010-05-06 15:15:55 +04:00
init_background_polling ( arg_count ( cmd , sysinit_ARG ) ? 0 :
arg_int_value ( cmd , poll_ARG ,
DEFAULT_BACKGROUND_POLLING ) ) ;
2010-01-05 23:56:51 +03:00
2010-10-30 01:15:23 +04:00
for ( i = 0 ; _vgchange_args [ i ] . arg > = 0 ; i + + ) {
if ( arg_count ( cmd , _vgchange_args [ i ] . arg ) ) {
if ( ! archived & & ! archive ( vg ) ) {
stack ;
return ECMD_FAILED ;
}
archived = 1 ;
if ( ! _vgchange_args [ i ] . fn ( cmd , vg ) ) {
stack ;
return ECMD_FAILED ;
}
}
}
if ( archived ) {
if ( ! vg_write ( vg ) | | ! vg_commit ( vg ) ) {
stack ;
return ECMD_FAILED ;
}
backup ( vg ) ;
config: add silent mode
Accept -q as the short form of --quiet.
Suppress non-essential standard output if -q is given twice.
Treat log/silent in lvm.conf as equivalent to -qq.
Review all log_print messages and change some to
log_print_unless_silent.
When silent, the following commands still produce output:
dumpconfig, lvdisplay, lvmdiskscan, lvs, pvck, pvdisplay,
pvs, version, vgcfgrestore -l, vgdisplay, vgs.
[Needs checking.]
Non-essential messages are shifted from log level 4 to log level 5
for syslog and lvm2_log_fn purposes.
2012-08-25 23:35:48 +04:00
log_print_unless_silent ( " Volume group \" %s \" successfully changed " , vg - > name ) ;
2010-10-30 01:15:23 +04:00
}
2012-06-27 15:48:31 +04:00
if ( arg_count ( cmd , activate_ARG ) ) {
2012-08-21 17:49:23 +04:00
if ( ! vgchange_activate ( cmd , vg , ( activation_change_t )
arg_uint_value ( cmd , activate_ARG , CHANGE_AY ) ) )
2010-10-30 01:15:23 +04:00
return ECMD_FAILED ;
2010-10-26 05:37:59 +04:00
}
if ( arg_count ( cmd , refresh_ARG ) ) {
/* refreshes the visible LVs (which starts polling) */
2010-10-30 01:15:23 +04:00
if ( ! _vgchange_refresh ( cmd , vg ) )
return ECMD_FAILED ;
2010-10-26 05:37:59 +04:00
}
2002-11-18 17:04:08 +03:00
2012-06-27 15:48:31 +04:00
if ( ! arg_count ( cmd , activate_ARG ) & &
2010-10-26 05:37:59 +04:00
! arg_count ( cmd , refresh_ARG ) & &
arg_count ( cmd , monitor_ARG ) ) {
/* -ay* will have already done monitoring changes */
2010-10-30 01:15:23 +04:00
if ( ! _vgchange_monitoring ( cmd , vg ) )
return ECMD_FAILED ;
2010-10-26 05:37:59 +04:00
}
2006-05-12 23:16:48 +04:00
2010-10-26 05:37:59 +04:00
if ( ! arg_count ( cmd , refresh_ARG ) & &
2011-06-11 04:03:06 +04:00
background_polling ( ) )
2010-10-30 01:15:23 +04:00
if ( ! _vgchange_background_polling ( cmd , vg ) )
return ECMD_FAILED ;
2004-03-08 20:19:15 +03:00
2010-10-30 01:15:23 +04:00
return ECMD_PROCESSED ;
2002-11-18 17:04:08 +03:00
}
int vgchange ( struct cmd_context * cmd , int argc , char * * argv )
{
2010-10-30 01:15:23 +04:00
/* Update commands that can be combined */
[lv|vg]change: Allow limited metadata changes when PVs are missing
A while back, the behavior of LVM changed from allowing metadata changes
when PVs were missing to not allowing changes. Until recently, this
change was tolerated by HA-LVM by forcing a 'vgreduce --removemissing'
before trying (again) to add tags to an LV and then activate it. LVM
mirroring requires that failed devices are removed anyway, so this was
largely harmless. However, RAID LVs do not require devices to be removed
from the array in order to be activated. In fact, in an HA-LVM
environment this would be very undesirable. Device failures in such an
environment can often be transient and it would be much better to restore
the device to the array than synchronize an entirely new device.
There are two methods that can be used to setup an HA-LVM environment:
"clvm" or "tagging". For RAID LVs, "clvm" is out of the question because
RAID LVs are not supported in clustered VGs - not even in an exclusively
activated manner. That leaves "tagging". HA-LVM uses tagging - coupled
with 'volume_list' - to ensure that only one machine can have an LV active
at a time. If updates are not allowed when a PV is missing, it is
impossible to add or remove tags to allow for activation. This removes
one of the most basic functionalities of HA-LVM - site redundancy. If
mirroring or RAID is used to replicate the storage in two data centers
and one of them goes down, a server and a storage device are lost. When
the service fails-over to the alternate site, the VG will be "partial".
Unable to add a tag to the VG/LV, the RAID device will be unable to
activate.
The solution is to allow vgchange and lvchange to alter the LVM metadata
for a limited set of options - --[add|del]tag included. The set of
allowable options are ones that do not cause changes to the DM kernel
target (like --resync would) or could alter the structure of the LV
(like allocation or conversion).
2012-10-10 20:33:10 +04:00
int update_partial_safe =
arg_count ( cmd , deltag_ARG ) | |
arg_count ( cmd , addtag_ARG ) ;
int update_partial_unsafe =
2010-10-30 01:15:23 +04:00
arg_count ( cmd , logicalvolume_ARG ) | |
arg_count ( cmd , maxphysicalvolumes_ARG ) | |
arg_count ( cmd , resizeable_ARG ) | |
arg_count ( cmd , uuid_ARG ) | |
arg_count ( cmd , physicalextentsize_ARG ) | |
arg_count ( cmd , clustered_ARG ) | |
arg_count ( cmd , alloc_ARG ) | |
arg_count ( cmd , vgmetadatacopies_ARG ) ;
[lv|vg]change: Allow limited metadata changes when PVs are missing
A while back, the behavior of LVM changed from allowing metadata changes
when PVs were missing to not allowing changes. Until recently, this
change was tolerated by HA-LVM by forcing a 'vgreduce --removemissing'
before trying (again) to add tags to an LV and then activate it. LVM
mirroring requires that failed devices are removed anyway, so this was
largely harmless. However, RAID LVs do not require devices to be removed
from the array in order to be activated. In fact, in an HA-LVM
environment this would be very undesirable. Device failures in such an
environment can often be transient and it would be much better to restore
the device to the array than synchronize an entirely new device.
There are two methods that can be used to setup an HA-LVM environment:
"clvm" or "tagging". For RAID LVs, "clvm" is out of the question because
RAID LVs are not supported in clustered VGs - not even in an exclusively
activated manner. That leaves "tagging". HA-LVM uses tagging - coupled
with 'volume_list' - to ensure that only one machine can have an LV active
at a time. If updates are not allowed when a PV is missing, it is
impossible to add or remove tags to allow for activation. This removes
one of the most basic functionalities of HA-LVM - site redundancy. If
mirroring or RAID is used to replicate the storage in two data centers
and one of them goes down, a server and a storage device are lost. When
the service fails-over to the alternate site, the VG will be "partial".
Unable to add a tag to the VG/LV, the RAID device will be unable to
activate.
The solution is to allow vgchange and lvchange to alter the LVM metadata
for a limited set of options - --[add|del]tag included. The set of
allowable options are ones that do not cause changes to the DM kernel
target (like --resync would) or could alter the structure of the LV
(like allocation or conversion).
2012-10-10 20:33:10 +04:00
int update = update_partial_safe | | update_partial_unsafe ;
2010-10-30 01:15:23 +04:00
if ( ! update & &
2012-06-27 15:48:31 +04:00
! arg_count ( cmd , activate_ARG ) & &
2010-10-30 01:15:23 +04:00
! arg_count ( cmd , monitor_ARG ) & &
! arg_count ( cmd , poll_ARG ) & &
! arg_count ( cmd , refresh_ARG ) ) {
2010-01-05 23:56:51 +03:00
log_error ( " Need 1 or more of -a, -c, -l, -p, -s, -x, "
" --refresh, --uuid, --alloc, --addtag, --deltag, "
2010-06-29 00:37:37 +04:00
" --monitor, --poll, --vgmetadatacopies or "
" --metadatacopies " ) ;
2002-11-18 17:04:08 +03:00
return EINVALID_CMD_LINE ;
}
2012-06-27 15:48:31 +04:00
if ( arg_count ( cmd , activate_ARG ) & & arg_count ( cmd , refresh_ARG ) ) {
2010-10-26 05:37:59 +04:00
log_error ( " Only one of -a and --refresh permitted. " ) ;
return EINVALID_CMD_LINE ;
}
2010-05-06 15:15:55 +04:00
if ( ( arg_count ( cmd , ignorelockingfailure_ARG ) | |
2010-10-30 01:15:23 +04:00
arg_count ( cmd , sysinit_ARG ) ) & & update ) {
2010-06-23 14:22:59 +04:00
log_error ( " Only -a permitted with --ignorelockingfailure and --sysinit " ) ;
2010-05-06 15:15:55 +04:00
return EINVALID_CMD_LINE ;
}
2012-06-27 15:48:31 +04:00
if ( arg_count ( cmd , activate_ARG ) & &
2010-10-26 05:37:59 +04:00
( arg_count ( cmd , monitor_ARG ) | | arg_count ( cmd , poll_ARG ) ) ) {
2012-06-27 15:48:31 +04:00
int activate = arg_uint_value ( cmd , activate_ARG , 0 ) ;
2010-10-26 05:37:59 +04:00
if ( activate = = CHANGE_AN | | activate = = CHANGE_ALN ) {
log_error ( " Only -ay* allowed with --monitor or --poll. " ) ;
return EINVALID_CMD_LINE ;
}
}
2010-05-06 15:15:55 +04:00
if ( arg_count ( cmd , poll_ARG ) & & arg_count ( cmd , sysinit_ARG ) ) {
log_error ( " Only one of --poll and --sysinit permitted. " ) ;
2002-11-18 17:04:08 +03:00
return EINVALID_CMD_LINE ;
}
2012-06-27 15:48:31 +04:00
if ( arg_count ( cmd , activate_ARG ) = = 1
2002-11-18 17:04:08 +03:00
& & arg_count ( cmd , autobackup_ARG ) ) {
log_error ( " -A option not necessary with -a option " ) ;
return EINVALID_CMD_LINE ;
}
2010-10-30 01:15:23 +04:00
if ( arg_count ( cmd , maxphysicalvolumes_ARG ) & &
2012-02-28 18:24:57 +04:00
arg_sign_value ( cmd , maxphysicalvolumes_ARG , SIGN_NONE ) = = SIGN_MINUS ) {
2010-10-30 01:15:23 +04:00
log_error ( " MaxPhysicalVolumes may not be negative " ) ;
return EINVALID_CMD_LINE ;
}
if ( arg_count ( cmd , physicalextentsize_ARG ) & &
2012-02-28 18:24:57 +04:00
arg_sign_value ( cmd , physicalextentsize_ARG , SIGN_NONE ) = = SIGN_MINUS ) {
2010-10-30 01:15:23 +04:00
log_error ( " Physical extent size may not be negative " ) ;
return EINVALID_CMD_LINE ;
}
2012-07-10 15:49:46 +04:00
if ( arg_count ( cmd , sysinit_ARG ) & & lvmetad_active ( ) & &
arg_uint_value ( cmd , activate_ARG , 0 ) = = CHANGE_AAY ) {
log_warn ( " lvmetad is active while using --sysinit -a ay, "
" skipping manual activation " ) ;
return ECMD_PROCESSED ;
}
[lv|vg]change: Allow limited metadata changes when PVs are missing
A while back, the behavior of LVM changed from allowing metadata changes
when PVs were missing to not allowing changes. Until recently, this
change was tolerated by HA-LVM by forcing a 'vgreduce --removemissing'
before trying (again) to add tags to an LV and then activate it. LVM
mirroring requires that failed devices are removed anyway, so this was
largely harmless. However, RAID LVs do not require devices to be removed
from the array in order to be activated. In fact, in an HA-LVM
environment this would be very undesirable. Device failures in such an
environment can often be transient and it would be much better to restore
the device to the array than synchronize an entirely new device.
There are two methods that can be used to setup an HA-LVM environment:
"clvm" or "tagging". For RAID LVs, "clvm" is out of the question because
RAID LVs are not supported in clustered VGs - not even in an exclusively
activated manner. That leaves "tagging". HA-LVM uses tagging - coupled
with 'volume_list' - to ensure that only one machine can have an LV active
at a time. If updates are not allowed when a PV is missing, it is
impossible to add or remove tags to allow for activation. This removes
one of the most basic functionalities of HA-LVM - site redundancy. If
mirroring or RAID is used to replicate the storage in two data centers
and one of them goes down, a server and a storage device are lost. When
the service fails-over to the alternate site, the VG will be "partial".
Unable to add a tag to the VG/LV, the RAID device will be unable to
activate.
The solution is to allow vgchange and lvchange to alter the LVM metadata
for a limited set of options - --[add|del]tag included. The set of
allowable options are ones that do not cause changes to the DM kernel
target (like --resync would) or could alter the structure of the LV
(like allocation or conversion).
2012-10-10 20:33:10 +04:00
if ( ! update | | ! update_partial_unsafe )
cmd - > handles_missing_pvs = 1 ;
2010-10-30 01:15:23 +04:00
return process_each_vg ( cmd , argc , argv , update ? READ_FOR_UPDATE : 0 ,
NULL , & vgchange_single ) ;
2002-11-18 17:04:08 +03:00
}