2001-10-16 20:25:28 +04:00
/*
2008-01-30 17:00:02 +03:00
* Copyright ( C ) 2001 - 2004 Sistina Software , Inc . All rights reserved .
2013-07-01 13:27:22 +04:00
* Copyright ( C ) 2004 - 2013 Red Hat , Inc . All rights reserved .
2001-10-16 20:25:28 +04:00
*
2004-03-30 23:35:44 +04:00
* This file is part of LVM2 .
2001-10-16 20:25:28 +04:00
*
2004-03-30 23:35:44 +04:00
* This copyrighted material is made available to anyone wishing to use ,
* modify , copy , or redistribute it subject to the terms and conditions
2007-08-21 00:55:30 +04:00
* of the GNU Lesser General Public License v .2 .1 .
2001-10-16 20:25:28 +04:00
*
2007-08-21 00:55:30 +04:00
* You should have received a copy of the GNU Lesser General Public License
2004-03-30 23:35:44 +04:00
* along with this program ; if not , write to the Free Software Foundation ,
2016-01-21 13:49:46 +03:00
* Inc . , 51 Franklin Street , Fifth Floor , Boston , MA 02110 - 1301 USA
2001-10-16 20:25:28 +04:00
*/
# include "tools.h"
2021-10-04 23:47:25 +03:00
# include "lib/device/device_id.h"
2021-11-05 20:19:35 +03:00
# include "lib/label/hints.h"
2001-10-16 20:25:28 +04:00
2016-07-25 18:40:48 +03:00
struct vgchange_params {
int lock_start_count ;
unsigned int lock_start_sanlock : 1 ;
2021-11-03 20:03:29 +03:00
unsigned int vg_complete_to_activate : 1 ;
Allow system.devices to be automatically created on first boot
An OS installer can create system.devices for the system and
disks, but an OS image cannot create the system-specific
system.devices. The OS image can instead configure the
image so that lvm will create system.devices on first boot.
Image preparation steps to enable auto creation of system.devices:
- create empty file /etc/lvm/devices/auto-import-rootvg
- remove any existing /etc/lvm/devices/system.devices
- enable lvm-devices-import.path
- enable lvm-devices-import.service
On first boot of the prepared image:
- udev triggers vgchange -aay --autoactivation event <rootvg>
- vgchange activates LVs in the root VG
- vgchange finds the file /etc/lvm/devices/auto-import-rootvg,
and no /etc/lvm/devices/system.devices, so it creates
/run/lvm/lvm-devices-import
- lvm-devices-import.path is run when /run/lvm/lvm-devices-import
appears, and triggers lvm-devices-import.service
- lvm-devices-import.service runs vgimportdevices --rootvg --auto
- vgimportdevices finds /etc/lvm/devices/auto-import-rootvg,
and no system.devices, so it creates system.devices containing
PVs in the root VG, and removes /etc/lvm/devices/auto-import-rootvg
and /run/lvm/lvm-devices-import
Run directly, vgimportdevices --rootvg (without --auto), will create
a new system.devices for the root VG, or will add devices for the
root VG to an existing system.devices.
2024-04-24 01:08:26 +03:00
char * root_dm_uuid ; /* dm uuid of LV under root fs */
2016-07-25 18:40:48 +03:00
} ;
2010-08-17 02:54:35 +04:00
/*
* Increments * count by the number of _new_ monitored devices .
*/
2007-01-20 01:21:45 +03:00
static int _monitor_lvs_in_vg ( struct cmd_context * cmd ,
2010-08-17 02:54:35 +04:00
struct volume_group * vg , int reg , int * count )
2006-05-12 23:16:48 +04:00
{
struct lv_list * lvl ;
struct logical_volume * lv ;
2010-10-30 01:15:23 +04:00
int r = 1 ;
2006-05-12 23:16:48 +04:00
2008-11-04 01:14:30 +03:00
dm_list_iterate_items ( lvl , & vg - > lvs ) {
2006-05-12 23:16:48 +04:00
lv = lvl - > lv ;
2012-03-23 13:58:04 +04:00
if ( ! lv_info ( cmd , lv , lv_is_thin_pool ( lv ) ? 1 : 0 ,
2014-09-24 12:01:14 +04:00
NULL , 0 , 0 ) )
2012-03-23 13:58:04 +04:00
continue ;
2006-05-12 23:16:48 +04:00
/*
* FIXME : Need to consider all cases . . . PVMOVE , etc
*/
2014-09-16 00:33:53 +04:00
if ( lv_is_pvmove ( lv ) )
2006-05-12 23:16:48 +04:00
continue ;
2010-08-17 20:25:32 +04:00
if ( ! monitor_dev_for_events ( cmd , lv , 0 , reg ) ) {
2010-10-30 01:15:23 +04:00
r = 0 ;
2006-05-12 23:16:48 +04:00
continue ;
2014-09-23 18:47:27 +04:00
}
( * count ) + + ;
2006-05-12 23:16:48 +04:00
}
2010-08-17 02:54:35 +04:00
return r ;
2006-05-12 23:16:48 +04:00
}
2010-01-05 23:56:51 +03:00
static int _poll_lvs_in_vg ( struct cmd_context * cmd ,
struct volume_group * vg )
{
struct lv_list * lvl ;
struct logical_volume * lv ;
int count = 0 ;
dm_list_iterate_items ( lvl , & vg - > lvs ) {
lv = lvl - > lv ;
2018-06-05 21:21:28 +03:00
if ( lv_is_active ( lv ) & &
2014-09-16 00:33:53 +04:00
( lv_is_pvmove ( lv ) | | lv_is_converting ( lv ) | | lv_is_merging ( lv ) ) ) {
2010-01-13 04:50:34 +03:00
lv_spawn_background_polling ( cmd , lv ) ;
count + + ;
}
2010-01-05 23:56:51 +03:00
}
/*
* returns the number of polled devices
* - there is no way to know if lv is already being polled
*/
return count ;
}
2012-06-27 16:59:34 +04:00
static int _activate_lvs_in_vg ( struct cmd_context * cmd , struct volume_group * vg ,
activation_change_t activate )
2002-03-01 22:08:11 +03:00
{
2003-10-16 00:02:46 +04:00
struct lv_list * lvl ;
2002-03-01 22:08:11 +03:00
struct logical_volume * lv ;
2015-06-30 20:54:38 +03:00
int count = 0 , expected_count = 0 , r = 1 ;
2002-03-01 22:08:11 +03:00
2011-09-07 12:41:47 +04:00
sigint_allow ( ) ;
2008-11-04 01:14:30 +03:00
dm_list_iterate_items ( lvl , & vg - > lvs ) {
2011-09-07 12:41:47 +04:00
if ( sigint_caught ( ) )
return_0 ;
2003-10-16 00:02:46 +04:00
lv = lvl - > lv ;
2002-03-01 22:08:11 +03:00
2018-02-28 19:16:17 +03:00
if ( ! lv_is_visible ( lv ) & & ( ! cmd - > process_component_lvs | | ! lv_is_component ( lv ) ) )
2009-11-18 20:20:18 +03:00
continue ;
2011-09-14 22:20:03 +04:00
/* If LV is sparse, activate origin instead */
if ( lv_is_cow ( lv ) & & lv_is_virtual_origin ( origin_from_cow ( lv ) ) )
lv = origin_from_cow ( lv ) ;
2003-05-06 16:14:36 +04:00
/* Only request activation of snapshot origin devices */
2016-12-13 02:09:15 +03:00
if ( lv_is_snapshot ( lv ) | | lv_is_cow ( lv ) )
2002-03-18 16:09:27 +03:00
continue ;
2008-06-12 15:49:46 +04:00
/* Only request activation of mirror LV */
2016-12-13 02:09:15 +03:00
if ( lv_is_mirror_image ( lv ) | | lv_is_mirror_log ( lv ) )
2008-06-12 15:49:46 +04:00
continue ;
2018-07-01 13:03:23 +03:00
if ( lv_is_vdo_pool ( lv ) )
continue ;
2016-06-22 00:24:52 +03:00
if ( lv_activation_skip ( lv , activate , arg_is_set ( cmd , ignoreactivationskip_ARG ) ) )
2013-07-11 14:44:36 +04:00
continue ;
2013-04-11 15:51:08 +04:00
if ( ( activate = = CHANGE_AAY ) & &
! lv_passes_auto_activation_filter ( cmd , lv ) )
2012-09-12 11:47:40 +04:00
continue ;
2012-06-27 18:21:15 +04:00
Add metadata-based autoactivation property for VG and LV
The autoactivation property can be specified in lvcreate
or vgcreate for new LVs/VGs, and the property can be changed
by lvchange or vgchange for existing LVs/VGs.
--setautoactivation y|n
enables|disables autoactivation of a VG or LV.
Autoactivation is enabled by default, which is consistent with
past behavior. The disabled state is stored as a new flag
in the VG metadata, and the absence of the flag allows
autoactivation.
If autoactivation is disabled for the VG, then no LVs in the VG
will be autoactivated (the LV autoactivation property will have
no effect.) When autoactivation is enabled for the VG, then
autoactivation can be controlled on individual LVs.
The state of this property can be reported for LVs/VGs using
the "-o autoactivation" option in lvs/vgs commands, which will
report "enabled", or "" for the disabled state.
Previous versions of lvm do not recognize this property. Since
autoactivation is enabled by default, the disabled setting will
have no effect in older lvm versions. If the VG is modified by
older lvm versions, the disabled state will also be dropped from
the metadata.
The autoactivation property is an alternative to using the lvm.conf
auto_activation_volume_list, which is still applied to to VGs/LVs
in addition to the new property.
If VG or LV autoactivation is disabled either in metadata or in
auto_activation_volume_list, it will not be autoactivated.
An autoactivation command will silently skip activating an LV
when the autoactivation property is disabled.
To determine the effective autoactivation behavior for a specific
LV, multiple settings would need to be checked:
the VG autoactivation property, the LV autoactivation property,
the auto_activation_volume_list. The "activation skip" property
would also be relevant, since it applies to both normal and auto
activation.
2021-04-02 01:20:00 +03:00
/* vg NOAUTOACTIVATE flag was already checked */
if ( ( activate = = CHANGE_AAY ) & & ( lv - > status & LV_NOAUTOACTIVATE ) )
continue ;
2009-11-24 19:08:49 +03:00
expected_count + + ;
2018-06-14 22:04:53 +03:00
if ( ! lv_change_activate ( cmd , lv , activate ) ) {
stack ;
r = 0 ;
2002-03-01 22:08:11 +03:00
continue ;
2018-06-14 22:04:53 +03:00
}
2002-03-01 22:08:11 +03:00
count + + ;
}
2011-09-07 12:41:47 +04:00
sigint_restore ( ) ;
2015-06-30 20:54:38 +03:00
2018-06-14 22:07:59 +03:00
if ( expected_count )
log_verbose ( " %sctivated %d logical volumes in volume group %s. " ,
is_change_activating ( activate ) ? " A " : " Dea " ,
count , vg - > name ) ;
/*
* After sucessfull activation we need to initialise polling
* for all activated LVs in a VG . Possible enhancement would
* be adding - - poll y | n cmdline option for pvscan and call
* init_background_polling routine in autoactivation handler .
*/
if ( count & & is_change_activating ( activate ) & &
! vgchange_background_polling ( cmd , vg ) ) {
stack ;
r = 0 ;
}
2015-06-30 20:54:38 +03:00
/* Wait until devices are available */
if ( ! sync_local_dev_names ( vg - > cmd ) ) {
log_error ( " Failed to sync local devices for VG %s. " , vg - > name ) ;
r = 0 ;
}
2011-09-07 12:41:47 +04:00
2018-06-14 22:04:53 +03:00
return r ;
2002-03-01 22:08:11 +03:00
}
2006-05-12 23:16:48 +04:00
static int _vgchange_monitoring ( struct cmd_context * cmd , struct volume_group * vg )
{
2010-10-30 01:15:23 +04:00
int r = 1 ;
2010-08-17 02:54:35 +04:00
int monitored = 0 ;
2006-05-12 23:16:48 +04:00
2010-07-26 23:03:29 +04:00
if ( lvs_in_vg_activated ( vg ) & &
2007-01-25 02:43:27 +03:00
dmeventd_monitor_mode ( ) ! = DMEVENTD_MONITOR_IGNORE ) {
2010-10-30 01:15:23 +04:00
if ( ! _monitor_lvs_in_vg ( cmd , vg , dmeventd_monitor_mode ( ) , & monitored ) )
r = 0 ;
config: add silent mode
Accept -q as the short form of --quiet.
Suppress non-essential standard output if -q is given twice.
Treat log/silent in lvm.conf as equivalent to -qq.
Review all log_print messages and change some to
log_print_unless_silent.
When silent, the following commands still produce output:
dumpconfig, lvdisplay, lvmdiskscan, lvs, pvck, pvdisplay,
pvs, version, vgcfgrestore -l, vgdisplay, vgs.
[Needs checking.]
Non-essential messages are shifted from log level 4 to log level 5
for syslog and lvm2_log_fn purposes.
2012-08-25 23:35:48 +04:00
log_print_unless_silent ( " %d logical volume(s) in volume group "
" \" %s \" %smonitored " ,
monitored , vg - > name , ( dmeventd_monitor_mode ( ) ) ? " " : " un " ) ;
2006-05-12 23:16:48 +04:00
}
2010-10-30 01:15:23 +04:00
return r ;
2006-05-12 23:16:48 +04:00
}
2016-01-07 17:17:08 +03:00
int vgchange_background_polling ( struct cmd_context * cmd , struct volume_group * vg )
2010-01-05 23:56:51 +03:00
{
int polled ;
2017-12-08 15:17:34 +03:00
if ( background_polling ( ) ) {
2018-06-14 22:07:59 +03:00
log_debug_activation ( " Starting background polling for volume group \" %s \" . " , vg - > name ) ;
polled = _poll_lvs_in_vg ( cmd , vg ) ;
2010-10-26 05:37:59 +04:00
if ( polled )
config: add silent mode
Accept -q as the short form of --quiet.
Suppress non-essential standard output if -q is given twice.
Treat log/silent in lvm.conf as equivalent to -qq.
Review all log_print messages and change some to
log_print_unless_silent.
When silent, the following commands still produce output:
dumpconfig, lvdisplay, lvmdiskscan, lvs, pvck, pvdisplay,
pvs, version, vgcfgrestore -l, vgdisplay, vgs.
[Needs checking.]
Non-essential messages are shifted from log level 4 to log level 5
for syslog and lvm2_log_fn purposes.
2012-08-25 23:35:48 +04:00
log_print_unless_silent ( " Background polling started for %d logical volume(s) "
" in volume group \" %s \" " ,
polled , vg - > name ) ;
2010-01-05 23:56:51 +03:00
}
2010-10-30 01:15:23 +04:00
return 1 ;
2010-01-05 23:56:51 +03:00
}
2012-06-27 16:59:34 +04:00
int vgchange_activate ( struct cmd_context * cmd , struct volume_group * vg ,
Allow system.devices to be automatically created on first boot
An OS installer can create system.devices for the system and
disks, but an OS image cannot create the system-specific
system.devices. The OS image can instead configure the
image so that lvm will create system.devices on first boot.
Image preparation steps to enable auto creation of system.devices:
- create empty file /etc/lvm/devices/auto-import-rootvg
- remove any existing /etc/lvm/devices/system.devices
- enable lvm-devices-import.path
- enable lvm-devices-import.service
On first boot of the prepared image:
- udev triggers vgchange -aay --autoactivation event <rootvg>
- vgchange activates LVs in the root VG
- vgchange finds the file /etc/lvm/devices/auto-import-rootvg,
and no /etc/lvm/devices/system.devices, so it creates
/run/lvm/lvm-devices-import
- lvm-devices-import.path is run when /run/lvm/lvm-devices-import
appears, and triggers lvm-devices-import.service
- lvm-devices-import.service runs vgimportdevices --rootvg --auto
- vgimportdevices finds /etc/lvm/devices/auto-import-rootvg,
and no system.devices, so it creates system.devices containing
PVs in the root VG, and removes /etc/lvm/devices/auto-import-rootvg
and /run/lvm/lvm-devices-import
Run directly, vgimportdevices --rootvg (without --auto), will create
a new system.devices for the root VG, or will add devices for the
root VG to an existing system.devices.
2024-04-24 01:08:26 +03:00
activation_change_t activate , int vg_complete_to_activate , char * root_dm_uuid )
2001-10-16 20:25:28 +04:00
{
2014-02-18 23:52:17 +04:00
int lv_open , active , monitored = 0 , r = 1 ;
2012-03-09 02:43:28 +04:00
const struct lv_list * lvl ;
2021-11-03 20:03:29 +03:00
struct pv_list * pvl ;
2014-02-18 23:52:17 +04:00
int do_activate = is_change_activating ( activate ) ;
2004-05-24 17:44:10 +04:00
2015-03-03 22:23:13 +03:00
/*
* We can get here in the odd case where an LV is already active in
* a foreign VG , which allows the VG to be accessed by vgchange - a
* so the LV can be deactivated .
*/
2015-03-04 01:45:16 +03:00
if ( vg - > system_id & & vg - > system_id [ 0 ] & &
cmd - > system_id & & cmd - > system_id [ 0 ] & &
2015-03-03 22:23:13 +03:00
strcmp ( vg - > system_id , cmd - > system_id ) & &
2015-07-31 22:11:24 +03:00
do_activate ) {
2015-03-03 22:23:13 +03:00
log_error ( " Cannot activate LVs in a foreign VG. " ) ;
2018-06-13 16:57:51 +03:00
return 0 ;
2015-03-03 22:23:13 +03:00
}
Add metadata-based autoactivation property for VG and LV
The autoactivation property can be specified in lvcreate
or vgcreate for new LVs/VGs, and the property can be changed
by lvchange or vgchange for existing LVs/VGs.
--setautoactivation y|n
enables|disables autoactivation of a VG or LV.
Autoactivation is enabled by default, which is consistent with
past behavior. The disabled state is stored as a new flag
in the VG metadata, and the absence of the flag allows
autoactivation.
If autoactivation is disabled for the VG, then no LVs in the VG
will be autoactivated (the LV autoactivation property will have
no effect.) When autoactivation is enabled for the VG, then
autoactivation can be controlled on individual LVs.
The state of this property can be reported for LVs/VGs using
the "-o autoactivation" option in lvs/vgs commands, which will
report "enabled", or "" for the disabled state.
Previous versions of lvm do not recognize this property. Since
autoactivation is enabled by default, the disabled setting will
have no effect in older lvm versions. If the VG is modified by
older lvm versions, the disabled state will also be dropped from
the metadata.
The autoactivation property is an alternative to using the lvm.conf
auto_activation_volume_list, which is still applied to to VGs/LVs
in addition to the new property.
If VG or LV autoactivation is disabled either in metadata or in
auto_activation_volume_list, it will not be autoactivated.
An autoactivation command will silently skip activating an LV
when the autoactivation property is disabled.
To determine the effective autoactivation behavior for a specific
LV, multiple settings would need to be checked:
the VG autoactivation property, the LV autoactivation property,
the auto_activation_volume_list. The "activation skip" property
would also be relevant, since it applies to both normal and auto
activation.
2021-04-02 01:20:00 +03:00
if ( ( activate = = CHANGE_AAY ) & & ( vg - > status & NOAUTOACTIVATE ) ) {
log_debug ( " Autoactivation is disabled for VG %s. " , vg - > name ) ;
return 1 ;
}
2021-11-03 20:03:29 +03:00
if ( do_activate & & vg_complete_to_activate ) {
dm_list_iterate_items ( pvl , & vg - > pvs ) {
if ( ! pvl - > pv - > dev ) {
log_print ( " VG %s is incomplete. " , vg - > name ) ;
return 1 ;
}
}
}
2009-07-15 09:47:55 +04:00
/*
* Safe , since we never write out new metadata here . Required for
* partial activation to work .
*/
2012-06-27 16:59:34 +04:00
cmd - > handles_missing_pvs = 1 ;
2004-06-24 18:48:01 +04:00
2001-10-16 20:25:28 +04:00
/* FIXME: Force argument to deactivate them? */
2018-02-16 23:18:55 +03:00
if ( ! do_activate ) {
2012-03-09 02:43:28 +04:00
dm_list_iterate_items ( lvl , & vg - > lvs )
2018-02-16 23:18:55 +03:00
label_scan_invalidate_lv ( cmd , lvl - > lv ) ;
if ( ( lv_open = lvs_in_vg_opened ( vg ) ) ) {
dm_list_iterate_items ( lvl , & vg - > lvs ) {
2018-07-01 13:03:23 +03:00
if ( lv_is_visible ( lvl - > lv ) & &
! lv_is_vdo_pool ( lvl - > lv ) & & // FIXME: API skip flag missing
! lv_check_not_in_use ( lvl - > lv , 1 ) ) {
2018-02-16 23:18:55 +03:00
log_error ( " Can't deactivate volume group \" %s \" with %d open logical volume(s) " ,
vg - > name , lv_open ) ;
return 0 ;
}
2012-03-09 02:43:28 +04:00
}
2018-02-16 23:18:55 +03:00
}
2001-10-16 20:25:28 +04:00
}
2005-05-17 17:44:02 +04:00
/* FIXME Move into library where clvmd can use it */
2012-06-27 16:59:34 +04:00
if ( do_activate )
2005-05-17 17:44:02 +04:00
check_current_backup ( vg ) ;
2018-02-28 19:16:17 +03:00
else /* Component LVs might be active, support easy deactivation */
cmd - > process_component_lvs = 1 ;
2005-05-17 17:44:02 +04:00
2012-06-27 16:59:34 +04:00
if ( do_activate & & ( active = lvs_in_vg_activated ( vg ) ) ) {
2002-01-30 18:04:48 +03:00
log_verbose ( " %d logical volume(s) in volume group \" %s \" "
2002-03-01 22:08:11 +03:00
" already active " , active , vg - > name ) ;
2007-01-25 02:43:27 +03:00
if ( dmeventd_monitor_mode ( ) ! = DMEVENTD_MONITOR_IGNORE ) {
2010-10-30 01:15:23 +04:00
if ( ! _monitor_lvs_in_vg ( cmd , vg , dmeventd_monitor_mode ( ) , & monitored ) )
r = 0 ;
2007-01-25 02:43:27 +03:00
log_verbose ( " %d existing logical volume(s) in volume "
" group \" %s \" %smonitored " ,
monitored , vg - > name ,
dmeventd_monitor_mode ( ) ? " " : " un " ) ;
}
2006-05-12 23:16:48 +04:00
}
2001-11-21 22:32:35 +03:00
2014-09-21 13:48:09 +04:00
if ( ! _activate_lvs_in_vg ( cmd , vg , activate ) ) {
stack ;
2010-10-30 01:15:23 +04:00
r = 0 ;
2014-09-21 13:48:09 +04:00
}
2001-10-16 20:25:28 +04:00
Allow system.devices to be automatically created on first boot
An OS installer can create system.devices for the system and
disks, but an OS image cannot create the system-specific
system.devices. The OS image can instead configure the
image so that lvm will create system.devices on first boot.
Image preparation steps to enable auto creation of system.devices:
- create empty file /etc/lvm/devices/auto-import-rootvg
- remove any existing /etc/lvm/devices/system.devices
- enable lvm-devices-import.path
- enable lvm-devices-import.service
On first boot of the prepared image:
- udev triggers vgchange -aay --autoactivation event <rootvg>
- vgchange activates LVs in the root VG
- vgchange finds the file /etc/lvm/devices/auto-import-rootvg,
and no /etc/lvm/devices/system.devices, so it creates
/run/lvm/lvm-devices-import
- lvm-devices-import.path is run when /run/lvm/lvm-devices-import
appears, and triggers lvm-devices-import.service
- lvm-devices-import.service runs vgimportdevices --rootvg --auto
- vgimportdevices finds /etc/lvm/devices/auto-import-rootvg,
and no system.devices, so it creates system.devices containing
PVs in the root VG, and removes /etc/lvm/devices/auto-import-rootvg
and /run/lvm/lvm-devices-import
Run directly, vgimportdevices --rootvg (without --auto), will create
a new system.devices for the root VG, or will add devices for the
root VG to an existing system.devices.
2024-04-24 01:08:26 +03:00
/*
* Possibly trigger auto - generation of system . devices :
* - if root_dm_uuid contains vg - > id , and
* - / etc / lvm / devices / auto - import - rootvg exists , and
* - / etc / lvm / devices / system . devices does not exist , then
* - create / run / lvm / lvm - devices - import to
* trigger lvm - devices - import . path and . service
* - lvm - devices - import will run vgimportdevices - - rootvg
* to create system . devices
*/
if ( root_dm_uuid ) {
char path [ PATH_MAX ] ;
struct stat info ;
FILE * fp ;
if ( memcmp ( root_dm_uuid + 4 , & vg - > id , ID_LEN ) )
goto out ;
if ( cmd - > enable_devices_file | | devices_file_exists ( cmd ) )
goto out ;
if ( dm_snprintf ( path , sizeof ( path ) , " %s/devices/auto-import-rootvg " , cmd - > system_dir ) < 0 )
goto out ;
if ( stat ( path , & info ) < 0 )
goto out ;
log_debug ( " Found %s creating %s " , path , DEVICES_IMPORT_PATH ) ;
if ( ! ( fp = fopen ( DEVICES_IMPORT_PATH , " w " ) ) ) {
log_debug ( " failed to create %s " , DEVICES_IMPORT_PATH ) ;
goto out ;
}
if ( fclose ( fp ) )
stack ;
}
out :
2010-05-24 12:59:29 +04:00
/* Print message only if there was not found a missing VG */
2017-10-13 22:43:15 +03:00
log_print_unless_silent ( " %d logical volume(s) in volume group \" %s \" now active " ,
lvs_in_vg_activated ( vg ) , vg - > name ) ;
2010-10-30 01:15:23 +04:00
return r ;
}
static int _vgchange_refresh ( struct cmd_context * cmd , struct volume_group * vg )
{
log_verbose ( " Refreshing volume group \" %s \" " , vg - > name ) ;
2013-07-01 13:27:11 +04:00
if ( ! vg_refresh_visible ( cmd , vg ) )
return_0 ;
2010-10-30 01:15:23 +04:00
return 1 ;
2001-10-16 20:25:28 +04:00
}
2004-05-19 02:12:53 +04:00
static int _vgchange_alloc ( struct cmd_context * cmd , struct volume_group * vg )
{
alloc_policy_t alloc ;
2012-02-28 18:24:57 +04:00
alloc = ( alloc_policy_t ) arg_uint_value ( cmd , alloc_ARG , ALLOC_NORMAL ) ;
2004-05-19 02:12:53 +04:00
2009-07-09 14:08:54 +04:00
/* FIXME: make consistent with vg_set_alloc_policy() */
2004-05-19 02:12:53 +04:00
if ( alloc = = vg - > alloc ) {
log_error ( " Volume group allocation policy is already %s " ,
get_alloc_string ( vg - > alloc ) ) ;
2010-10-30 01:15:23 +04:00
return 0 ;
2009-09-15 02:47:49 +04:00
}
2004-05-19 02:12:53 +04:00
2010-10-30 01:15:23 +04:00
if ( ! vg_set_alloc_policy ( vg , alloc ) )
return_0 ;
2004-05-19 02:12:53 +04:00
2010-10-30 01:15:23 +04:00
return 1 ;
2004-05-19 02:12:53 +04:00
}
2003-10-22 02:06:07 +04:00
static int _vgchange_resizeable ( struct cmd_context * cmd ,
struct volume_group * vg )
2001-10-16 20:25:28 +04:00
{
2014-10-11 20:17:46 +04:00
int resizeable = arg_int_value ( cmd , resizeable_ARG , 0 ) ;
2001-10-16 20:25:28 +04:00
2009-09-15 22:35:13 +04:00
if ( resizeable & & vg_is_resizeable ( vg ) ) {
2002-02-12 00:00:35 +03:00
log_error ( " Volume group \" %s \" is already resizeable " ,
vg - > name ) ;
2010-10-30 01:15:23 +04:00
return 0 ;
2001-10-16 20:25:28 +04:00
}
2009-09-15 22:35:13 +04:00
if ( ! resizeable & & ! vg_is_resizeable ( vg ) ) {
2002-01-30 18:04:48 +03:00
log_error ( " Volume group \" %s \" is already not resizeable " ,
2001-10-16 20:25:28 +04:00
vg - > name ) ;
2010-10-30 01:15:23 +04:00
return 0 ;
2009-09-15 02:47:49 +04:00
}
2002-01-09 16:17:14 +03:00
2002-01-10 18:09:51 +03:00
if ( resizeable )
vg - > status | = RESIZEABLE_VG ;
2001-10-16 20:25:28 +04:00
else
2002-01-10 18:09:51 +03:00
vg - > status & = ~ RESIZEABLE_VG ;
2001-10-16 20:25:28 +04:00
2010-10-30 01:15:23 +04:00
return 1 ;
2001-10-16 20:25:28 +04:00
}
Add metadata-based autoactivation property for VG and LV
The autoactivation property can be specified in lvcreate
or vgcreate for new LVs/VGs, and the property can be changed
by lvchange or vgchange for existing LVs/VGs.
--setautoactivation y|n
enables|disables autoactivation of a VG or LV.
Autoactivation is enabled by default, which is consistent with
past behavior. The disabled state is stored as a new flag
in the VG metadata, and the absence of the flag allows
autoactivation.
If autoactivation is disabled for the VG, then no LVs in the VG
will be autoactivated (the LV autoactivation property will have
no effect.) When autoactivation is enabled for the VG, then
autoactivation can be controlled on individual LVs.
The state of this property can be reported for LVs/VGs using
the "-o autoactivation" option in lvs/vgs commands, which will
report "enabled", or "" for the disabled state.
Previous versions of lvm do not recognize this property. Since
autoactivation is enabled by default, the disabled setting will
have no effect in older lvm versions. If the VG is modified by
older lvm versions, the disabled state will also be dropped from
the metadata.
The autoactivation property is an alternative to using the lvm.conf
auto_activation_volume_list, which is still applied to to VGs/LVs
in addition to the new property.
If VG or LV autoactivation is disabled either in metadata or in
auto_activation_volume_list, it will not be autoactivated.
An autoactivation command will silently skip activating an LV
when the autoactivation property is disabled.
To determine the effective autoactivation behavior for a specific
LV, multiple settings would need to be checked:
the VG autoactivation property, the LV autoactivation property,
the auto_activation_volume_list. The "activation skip" property
would also be relevant, since it applies to both normal and auto
activation.
2021-04-02 01:20:00 +03:00
static int _vgchange_autoactivation ( struct cmd_context * cmd ,
struct volume_group * vg )
{
int aa_no_arg = ! arg_int_value ( cmd , setautoactivation_ARG , 0 ) ;
int aa_no_meta = ( vg - > status & NOAUTOACTIVATE ) ? 1 : 0 ;
if ( ( aa_no_arg & & aa_no_meta ) | | ( ! aa_no_arg & & ! aa_no_meta ) ) {
log_error ( " Volume group autoactivation is already %s. " ,
aa_no_arg ? " no " : " yes " ) ;
return 0 ;
}
if ( aa_no_arg )
vg - > status | = NOAUTOACTIVATE ;
else
vg - > status & = ~ NOAUTOACTIVATE ;
return 1 ;
}
2003-10-22 02:06:07 +04:00
static int _vgchange_logicalvolume ( struct cmd_context * cmd ,
struct volume_group * vg )
2001-10-16 20:25:28 +04:00
{
2002-12-20 02:25:55 +03:00
uint32_t max_lv = arg_uint_value ( cmd , logicalvolume_ARG , 0 ) ;
2001-10-16 20:25:28 +04:00
2010-10-30 01:15:23 +04:00
if ( ! vg_set_max_lv ( vg , max_lv ) )
return_0 ;
2001-10-16 20:25:28 +04:00
2010-10-30 01:15:23 +04:00
return 1 ;
2001-10-16 20:25:28 +04:00
}
2002-11-18 17:04:08 +03:00
2006-08-16 18:41:42 +04:00
static int _vgchange_physicalvolumes ( struct cmd_context * cmd ,
struct volume_group * vg )
{
uint32_t max_pv = arg_uint_value ( cmd , maxphysicalvolumes_ARG , 0 ) ;
2010-10-30 01:15:23 +04:00
if ( ! vg_set_max_pv ( vg , max_pv ) )
return_0 ;
2006-08-16 18:41:42 +04:00
2010-10-30 01:15:23 +04:00
return 1 ;
2006-08-16 18:41:42 +04:00
}
2005-04-18 18:56:42 +04:00
static int _vgchange_pesize ( struct cmd_context * cmd , struct volume_group * vg )
{
uint32_t extent_size ;
2011-03-02 23:00:09 +03:00
if ( arg_uint64_value ( cmd , physicalextentsize_ARG , 0 ) > MAX_EXTENT_SIZE ) {
2023-07-15 11:57:37 +03:00
log_warn ( " WARNING: Physical extent size cannot be larger than %s. " ,
2012-10-16 12:14:41 +04:00
display_size ( cmd , ( uint64_t ) MAX_EXTENT_SIZE ) ) ;
2011-03-02 23:00:09 +03:00
return 1 ;
}
2007-11-14 03:08:25 +03:00
extent_size = arg_uint_value ( cmd , physicalextentsize_ARG , 0 ) ;
2009-07-09 14:02:15 +04:00
/* FIXME: remove check - redundant with vg_change_pesize */
2005-04-18 18:56:42 +04:00
if ( extent_size = = vg - > extent_size ) {
2023-07-15 11:57:37 +03:00
log_warn ( " WARNING: Physical extent size of VG %s is already %s. " ,
2012-10-16 12:14:41 +04:00
vg - > name , display_size ( cmd , ( uint64_t ) extent_size ) ) ;
2010-10-30 01:15:23 +04:00
return 1 ;
2005-04-18 18:56:42 +04:00
}
2010-10-30 01:15:23 +04:00
if ( ! vg_set_extent_size ( vg , extent_size ) )
return_0 ;
2005-04-18 18:56:42 +04:00
2013-12-12 14:26:35 +04:00
if ( ! vg_check_pv_dev_block_sizes ( vg ) ) {
log_error ( " Failed to change physical extent size for VG %s. " ,
vg - > name ) ;
return 0 ;
}
2010-10-30 01:15:23 +04:00
return 1 ;
2005-04-18 18:56:42 +04:00
}
2010-10-30 01:15:23 +04:00
static int _vgchange_addtag ( struct cmd_context * cmd , struct volume_group * vg )
{
2011-01-24 16:38:31 +03:00
return change_tag ( cmd , vg , NULL , NULL , addtag_ARG ) ;
2010-10-30 01:15:23 +04:00
}
2004-03-08 20:19:15 +03:00
2010-10-30 01:15:23 +04:00
static int _vgchange_deltag ( struct cmd_context * cmd , struct volume_group * vg )
{
2011-01-24 16:38:31 +03:00
return change_tag ( cmd , vg , NULL , NULL , deltag_ARG ) ;
2004-03-08 20:19:15 +03:00
}
2010-07-09 19:34:40 +04:00
static int _vgchange_uuid ( struct cmd_context * cmd __attribute__ ( ( unused ) ) ,
2006-05-10 01:23:51 +04:00
struct volume_group * vg )
2004-01-13 21:42:05 +03:00
{
struct lv_list * lvl ;
2021-10-04 23:47:25 +03:00
struct id old_vg_id ;
2004-01-13 21:42:05 +03:00
if ( lvs_in_vg_activated ( vg ) ) {
2023-09-09 01:13:46 +03:00
log_error ( " Volume group has active logical volumes. " ) ;
2010-10-30 01:15:23 +04:00
return 0 ;
2009-09-15 02:47:49 +04:00
}
2004-01-13 21:42:05 +03:00
2021-10-04 23:47:25 +03:00
memcpy ( & old_vg_id , & vg - > id , ID_LEN ) ;
2005-01-20 21:11:53 +03:00
if ( ! id_create ( & vg - > id ) ) {
log_error ( " Failed to generate new random UUID for VG %s. " ,
vg - > name ) ;
2010-10-30 01:15:23 +04:00
return 0 ;
2005-01-20 21:11:53 +03:00
}
2004-01-13 21:42:05 +03:00
2008-11-04 01:14:30 +03:00
dm_list_iterate_items ( lvl , & vg - > lvs ) {
2004-01-13 21:42:05 +03:00
memcpy ( & lvl - > lv - > lvid , & vg - > id , sizeof ( vg - > id ) ) ;
}
2021-10-04 23:47:25 +03:00
/*
* If any LVs in this VG have PVs stacked on them , then
* update the device_id of the stacked PV .
*/
device_id_update_vg_uuid ( cmd , vg , & old_vg_id ) ;
2010-10-30 01:15:23 +04:00
return 1 ;
2008-12-22 12:00:51 +03:00
}
2010-06-29 00:37:37 +04:00
static int _vgchange_metadata_copies ( struct cmd_context * cmd ,
struct volume_group * vg )
{
2010-07-01 00:03:52 +04:00
uint32_t mda_copies = arg_uint_value ( cmd , vgmetadatacopies_ARG , DEFAULT_VGMETADATACOPIES ) ;
2019-06-12 00:14:07 +03:00
log_debug ( " vgchange_metadata_copies new %u vg_mda_copies %u D %u " ,
mda_copies , vg_mda_copies ( vg ) , DEFAULT_VGMETADATACOPIES ) ;
commands: new method for defining commands
. Define a prototype for every lvm command.
. Match every user command with one definition.
. Generate help text and man pages from them.
The new file command-lines.in defines a prototype for every
unique lvm command. A unique lvm command is a unique
combination of: command name + required option args +
required positional args. Each of these prototypes also
includes the optional option args and optional positional
args that the command will accept, a description, and a
unique string ID for the definition. Any valid command
will match one of the prototypes.
Here's an example of the lvresize command definitions from
command-lines.in, there are three unique lvresize commands:
lvresize --size SizeMB LV
OO: --alloc Alloc, --autobackup Bool, --force,
--nofsck, --nosync, --noudevsync, --reportformat String, --resizefs,
--stripes Number, --stripesize SizeKB, --poolmetadatasize SizeMB
OP: PV ...
ID: lvresize_by_size
DESC: Resize an LV by a specified size.
lvresize LV PV ...
OO: --alloc Alloc, --autobackup Bool, --force,
--nofsck, --nosync, --noudevsync,
--reportformat String, --resizefs, --stripes Number, --stripesize SizeKB
ID: lvresize_by_pv
DESC: Resize an LV by specified PV extents.
FLAGS: SECONDARY_SYNTAX
lvresize --poolmetadatasize SizeMB LV_thinpool
OO: --alloc Alloc, --autobackup Bool, --force,
--nofsck, --nosync, --noudevsync,
--reportformat String, --stripes Number, --stripesize SizeKB
OP: PV ...
ID: lvresize_pool_metadata_by_size
DESC: Resize a pool metadata SubLV by a specified size.
The three commands have separate definitions because they have
different required parameters. Required parameters are specified
on the first line of the definition. Optional options are
listed after OO, and optional positional args are listed after OP.
This data is used to generate corresponding command definition
structures for lvm in command-lines.h. usage/help output is also
auto generated, so it is always in sync with the definitions.
Every user-entered command is compared against the set of
command structures, and matched with one. An error is
reported if an entered command does not have the required
parameters for any definition. The closest match is printed
as a suggestion, and running lvresize --help will display
the usage for each possible lvresize command.
The prototype syntax used for help/man output includes
required --option and positional args on the first line,
and optional --option and positional args enclosed in [ ]
on subsequent lines.
command_name <required_opt_args> <required_pos_args>
[ <optional_opt_args> ]
[ <optional_pos_args> ]
Command definitions that are not to be advertised/suggested
have the flag SECONDARY_SYNTAX. These commands will not be
printed in the normal help output.
Man page prototypes are also generated from the same original
command definitions, and are always in sync with the code
and help text.
Very early in command execution, a matching command definition
is found. lvm then knows the operation being done, and that
the provided args conform to the definition. This will allow
lots of ad hoc checking/validation to be removed throughout
the code.
Each command definition can also be routed to a specific
function to implement it. The function is associated with
an enum value for the command definition (generated from
the ID string.) These per-command-definition implementation
functions have not yet been created, so all commands
currently fall back to the existing per-command-name
implementation functions.
Using per-command-definition functions will allow lots of
code to be removed which tries to figure out what the
command is meant to do. This is currently based on ad hoc
and complicated option analysis. When using the new
functions, what the command is doing is already known
from the associated command definition.
2016-08-12 23:52:18 +03:00
2010-06-29 00:37:37 +04:00
if ( mda_copies = = vg_mda_copies ( vg ) ) {
2010-07-01 00:03:52 +04:00
if ( vg_mda_copies ( vg ) = = VGMETADATACOPIES_UNMANAGED )
2023-07-15 11:57:37 +03:00
log_warn ( " WARNING: Number of metadata copies for VG %s is already unmanaged. " ,
2012-10-16 12:14:41 +04:00
vg - > name ) ;
2010-07-01 00:03:52 +04:00
else
2023-07-15 11:57:37 +03:00
log_warn ( " WARNING: Number of metadata copies for VG %s is already %u. " ,
2012-10-16 12:14:41 +04:00
vg - > name , mda_copies ) ;
2010-10-30 01:15:23 +04:00
return 1 ;
2010-06-29 00:37:37 +04:00
}
2010-10-30 01:15:23 +04:00
if ( ! vg_set_mda_copies ( vg , mda_copies ) )
return_0 ;
2010-06-29 00:37:37 +04:00
2010-10-30 01:15:23 +04:00
return 1 ;
2010-06-29 00:37:37 +04:00
}
2013-06-25 14:33:06 +04:00
static int _vgchange_profile ( struct cmd_context * cmd ,
struct volume_group * vg )
{
const char * old_profile_name , * new_profile_name ;
struct profile * new_profile ;
old_profile_name = vg - > profile ? vg - > profile - > name : " (no profile) " ;
2016-06-22 00:24:52 +03:00
if ( arg_is_set ( cmd , detachprofile_ARG ) ) {
2013-06-25 14:33:06 +04:00
new_profile_name = " (no profile) " ;
vg - > profile = NULL ;
} else {
2016-06-22 00:24:52 +03:00
if ( arg_is_set ( cmd , metadataprofile_ARG ) )
config: differentiate command and metadata profiles and consolidate profile handling code
- When defining configuration source, the code now uses separate
CONFIG_PROFILE_COMMAND and CONFIG_PROFILE_METADATA markers
(before, it was just CONFIG_PROFILE that did not make the
difference between the two). This helps when checking the
configuration if it contains correct set of options which
are all in either command-profilable or metadata-profilable
group without mixing these groups together - so it's a firm
distinction. The "command profile" can't contain
"metadata profile" and vice versa! This is strictly checked
and if the settings are mixed, such profile is rejected and
it's not used. So in the end, the CONFIG_PROFILE_COMMAND
set of options and CONFIG_PROFILE_METADATA are mutually exclusive
sets.
- Marking configuration with one or the other marker will also
determine the way these configuration sources are positioned
in the configuration cascade which is now:
CONFIG_STRING -> CONFIG_PROFILE_COMMAND -> CONFIG_PROFILE_METADATA -> CONFIG_FILE/CONFIG_MERGED_FILES
- Marking configuration with one or the other marker will also make
it possible to issue a command context refresh (will be probably
a part of a future patch) if needed for settings in global profile
set. For settings in metadata profile set this is impossible since
we can't refresh cmd context in the middle of reading VG/LV metadata
and for each VG/LV separately because each VG/LV can have a different
metadata profile assinged and it's not possible to change these
settings at this level.
- When command profile is incorrect, it's rejected *and also* the
command exits immediately - the profile *must* be correct for the
command that was run with a profile to be executed. Before this
patch, when the profile was found incorrect, there was just the
warning message and the command continued without profile applied.
But it's more correct to exit immediately in this case.
- When metadata profile is incorrect, we reject it during command
runtime (as we know the profile name from metadata and not early
from command line as it is in case of command profiles) and we
*do continue* with the command as we're in the middle of operation.
Also, the metadata profile is applied directly and on the fly on
find_config_tree_* fn call and even if the metadata profile is
found incorrect, we still need to return the non-profiled value
as found in the other configuration provided or default value.
To exit immediately even in this case, we'd need to refactor
existing find_config_tree_* fns so they can return error. Currently,
these fns return only config values (which end up with default
values in the end if the config is not found).
- To check the profile validity before use to be sure it's correct,
one can use :
lvm dumpconfig --commandprofile/--metadataprofile ProfileName --validate
(the --commandprofile/--metadataprofile for dumpconfig will come
as part of the subsequent patch)
- This patch also adds a reference to --commandprofile and
--metadataprofile in the cmd help string (which was missing before
for the --profile for some commands). We do not mention --profile
now as people should use --commandprofile or --metadataprofile
directly. However, the --profile is still supported for backward
compatibility and it's translated as:
--profile == --metadataprofile for lvcreate, vgcreate, lvchange and vgchange
(as these commands are able to attach profile to metadata)
--profile == --commandprofile for all the other commands
(--metadataprofile is not allowed there as it makes no sense)
- This patch also contains some cleanups to make the code handling
the profiles more readable...
2014-05-20 16:13:10 +04:00
new_profile_name = arg_str_value ( cmd , metadataprofile_ARG , NULL ) ;
else
new_profile_name = arg_str_value ( cmd , profile_ARG , NULL ) ;
if ( ! ( new_profile = add_profile ( cmd , new_profile_name , CONFIG_PROFILE_METADATA ) ) )
2013-06-25 14:33:06 +04:00
return_0 ;
vg - > profile = new_profile ;
}
log_verbose ( " Changing configuration profile for VG %s: %s -> %s. " ,
vg - > name , old_profile_name , new_profile_name ) ;
return 1 ;
}
2014-10-24 21:29:04 +04:00
/*
* This function will not be called unless the local host is allowed to use the
* VG . Either the VG has no system_id , or the VG and host have matching
* system_ids , or the host has the VG ' s current system_id in its
2015-02-24 01:19:08 +03:00
* extra_system_ids list . This function is not allowed to change the system_id
2014-10-24 21:29:04 +04:00
* of a foreign VG ( VG owned by another host ) .
*/
static int _vgchange_system_id ( struct cmd_context * cmd , struct volume_group * vg )
{
2015-02-24 02:19:36 +03:00
const char * system_id ;
const char * system_id_arg_str = arg_str_value ( cmd , systemid_ARG , NULL ) ;
2014-10-24 21:29:04 +04:00
2015-02-25 17:12:24 +03:00
if ( ! ( system_id = system_id_from_string ( cmd , system_id_arg_str ) ) ) {
log_error ( " Unable to set system ID. " ) ;
2014-10-24 21:29:04 +04:00
return 0 ;
2015-02-24 02:19:36 +03:00
}
if ( ! strcmp ( vg - > system_id , system_id ) ) {
2015-03-09 22:03:11 +03:00
log_error ( " Volume Group system ID is already \" %s \" . " , vg - > system_id ) ;
2015-02-24 02:19:36 +03:00
return 0 ;
}
2014-10-24 21:29:04 +04:00
2015-03-09 22:03:11 +03:00
if ( ! * system_id & & cmd - > system_id & & strcmp ( system_id , cmd - > system_id ) ) {
log_warn ( " WARNING: Removing the system ID allows unsafe access from other hosts. " ) ;
2015-02-25 17:12:24 +03:00
2016-06-22 00:24:52 +03:00
if ( ! arg_is_set ( cmd , yes_ARG ) & &
2015-03-09 22:03:11 +03:00
yes_no_prompt ( " Remove system ID %s from volume group %s? [y/n]: " ,
vg - > system_id , vg - > name ) = = ' n ' ) {
log_error ( " System ID of volume group %s not changed. " , vg - > name ) ;
return 0 ;
}
}
2014-10-24 21:29:04 +04:00
2015-03-09 22:03:11 +03:00
if ( * system_id & & ( ! cmd - > system_id | | strcmp ( system_id , cmd - > system_id ) ) ) {
if ( lvs_in_vg_activated ( vg ) ) {
log_error ( " Logical Volumes in VG %s must be deactivated before system ID can be changed. " ,
vg - > name ) ;
return 0 ;
}
2014-10-24 21:29:04 +04:00
2015-03-09 22:03:11 +03:00
if ( cmd - > system_id )
log_warn ( " WARNING: Requested system ID %s does not match local system ID %s. " ,
system_id , cmd - > system_id ? : " " ) ;
else
log_warn ( " WARNING: No local system ID is set. " ) ;
log_warn ( " WARNING: Volume group %s might become inaccessible from this machine. " ,
vg - > name ) ;
2016-06-22 00:24:52 +03:00
if ( ! arg_is_set ( cmd , yes_ARG ) & &
2015-03-09 22:03:11 +03:00
yes_no_prompt ( " Set foreign system ID %s on volume group %s? [y/n]: " ,
system_id , vg - > name ) = = ' n ' ) {
log_error ( " Volume group %s system ID not changed. " , vg - > name ) ;
return 0 ;
2014-10-24 21:29:04 +04:00
}
}
2015-03-09 22:03:11 +03:00
log_verbose ( " Changing system ID for VG %s from \" %s \" to \" %s \" . " ,
2015-02-24 02:19:36 +03:00
vg - > name , vg - > system_id , system_id ) ;
2014-10-24 21:29:04 +04:00
vg - > system_id = system_id ;
2015-03-04 20:11:10 +03:00
2014-10-24 21:29:04 +04:00
return 1 ;
}
2015-03-05 23:00:44 +03:00
static int _passes_lock_start_filter ( struct cmd_context * cmd ,
struct volume_group * vg ,
const int cfg_id )
{
const struct dm_config_node * cn ;
const struct dm_config_value * cv ;
const char * str ;
/* undefined list means no restrictions, all vg names pass */
2015-07-08 12:22:24 +03:00
cn = find_config_tree_array ( cmd , cfg_id , NULL ) ;
2015-03-05 23:00:44 +03:00
if ( ! cn )
return 1 ;
/* with a defined list, the vg name must be included to pass */
for ( cv = cn - > v ; cv ; cv = cv - > next ) {
if ( cv - > type = = DM_CFG_EMPTY_ARRAY )
break ;
if ( cv - > type ! = DM_CFG_STRING ) {
2023-09-09 01:13:46 +03:00
log_error ( " Ignoring invalid string in lock_start list. " ) ;
2015-03-05 23:00:44 +03:00
continue ;
}
str = cv - > v . str ;
if ( ! * str ) {
2023-09-09 01:13:46 +03:00
log_error ( " Ignoring empty string in config file. " ) ;
2015-03-05 23:00:44 +03:00
continue ;
}
/* ignoring tags for now */
if ( ! strcmp ( str , vg - > name ) )
return 1 ;
}
return 0 ;
}
2016-07-25 18:40:48 +03:00
static int _vgchange_lock_start ( struct cmd_context * cmd , struct volume_group * vg ,
struct vgchange_params * vp )
2015-03-05 23:00:44 +03:00
{
const char * start_opt = arg_str_value ( cmd , lockopt_ARG , NULL ) ;
int auto_opt = 0 ;
2019-01-16 19:41:43 +03:00
int exists = 0 ;
2016-07-25 18:40:48 +03:00
int r ;
2015-03-05 23:00:44 +03:00
2018-06-01 18:04:54 +03:00
if ( ! vg_is_shared ( vg ) )
2015-07-14 22:30:01 +03:00
return 1 ;
if ( arg_is_set ( cmd , force_ARG ) )
2015-03-05 23:00:44 +03:00
goto do_start ;
2015-07-14 22:30:01 +03:00
/*
* Recognize both " auto " and " autonowait " options .
* Any waiting is done at the end of vgchange .
*/
if ( start_opt & & ! strncmp ( start_opt , " auto " , 4 ) )
2015-03-05 23:00:44 +03:00
auto_opt = 1 ;
if ( ! _passes_lock_start_filter ( cmd , vg , activation_lock_start_list_CFG ) ) {
log_verbose ( " Not starting %s since it does not pass lock_start_list " , vg - > name ) ;
return 1 ;
}
if ( auto_opt & & ! _passes_lock_start_filter ( cmd , vg , activation_auto_lock_start_list_CFG ) ) {
log_verbose ( " Not starting %s since it does not pass auto_lock_start_list " , vg - > name ) ;
return 1 ;
}
do_start :
2019-01-16 19:41:43 +03:00
r = lockd_start_vg ( cmd , vg , 0 , & exists ) ;
2016-07-25 18:40:48 +03:00
if ( r )
vp - > lock_start_count + + ;
2019-01-16 19:41:43 +03:00
else if ( exists )
vp - > lock_start_count + + ;
2016-07-25 18:40:48 +03:00
if ( ! strcmp ( vg - > lock_type , " sanlock " ) )
vp - > lock_start_sanlock = 1 ;
return r ;
2015-03-05 23:00:44 +03:00
}
static int _vgchange_lock_stop ( struct cmd_context * cmd , struct volume_group * vg )
{
return lockd_stop_vg ( cmd , vg ) ;
}
2017-10-18 17:57:46 +03:00
static int _vgchange_single ( struct cmd_context * cmd , const char * vg_name ,
struct volume_group * vg ,
struct processing_handle * handle )
2002-11-18 17:04:08 +03:00
{
2021-11-03 20:03:29 +03:00
struct vgchange_params * vp = ( struct vgchange_params * ) handle - > custom_handle ;
2014-09-17 16:27:46 +04:00
int ret = ECMD_PROCESSED ;
2013-07-06 21:40:09 +04:00
unsigned i ;
2018-11-03 18:48:20 +03:00
activation_change_t activate ;
2021-06-09 17:16:26 +03:00
int changed = 0 ;
2010-10-30 01:15:23 +04:00
2013-07-06 21:40:09 +04:00
static const struct {
2010-10-30 01:15:23 +04:00
int arg ;
int ( * fn ) ( struct cmd_context * cmd , struct volume_group * vg ) ;
} _vgchange_args [ ] = {
{ logicalvolume_ARG , & _vgchange_logicalvolume } ,
{ maxphysicalvolumes_ARG , & _vgchange_physicalvolumes } ,
{ resizeable_ARG , & _vgchange_resizeable } ,
Add metadata-based autoactivation property for VG and LV
The autoactivation property can be specified in lvcreate
or vgcreate for new LVs/VGs, and the property can be changed
by lvchange or vgchange for existing LVs/VGs.
--setautoactivation y|n
enables|disables autoactivation of a VG or LV.
Autoactivation is enabled by default, which is consistent with
past behavior. The disabled state is stored as a new flag
in the VG metadata, and the absence of the flag allows
autoactivation.
If autoactivation is disabled for the VG, then no LVs in the VG
will be autoactivated (the LV autoactivation property will have
no effect.) When autoactivation is enabled for the VG, then
autoactivation can be controlled on individual LVs.
The state of this property can be reported for LVs/VGs using
the "-o autoactivation" option in lvs/vgs commands, which will
report "enabled", or "" for the disabled state.
Previous versions of lvm do not recognize this property. Since
autoactivation is enabled by default, the disabled setting will
have no effect in older lvm versions. If the VG is modified by
older lvm versions, the disabled state will also be dropped from
the metadata.
The autoactivation property is an alternative to using the lvm.conf
auto_activation_volume_list, which is still applied to to VGs/LVs
in addition to the new property.
If VG or LV autoactivation is disabled either in metadata or in
auto_activation_volume_list, it will not be autoactivated.
An autoactivation command will silently skip activating an LV
when the autoactivation property is disabled.
To determine the effective autoactivation behavior for a specific
LV, multiple settings would need to be checked:
the VG autoactivation property, the LV autoactivation property,
the auto_activation_volume_list. The "activation skip" property
would also be relevant, since it applies to both normal and auto
activation.
2021-04-02 01:20:00 +03:00
{ setautoactivation_ARG , & _vgchange_autoactivation } ,
2010-10-30 01:15:23 +04:00
{ deltag_ARG , & _vgchange_deltag } ,
{ addtag_ARG , & _vgchange_addtag } ,
{ physicalextentsize_ARG , & _vgchange_pesize } ,
{ uuid_ARG , & _vgchange_uuid } ,
{ alloc_ARG , & _vgchange_alloc } ,
{ vgmetadatacopies_ARG , & _vgchange_metadata_copies } ,
config: differentiate command and metadata profiles and consolidate profile handling code
- When defining configuration source, the code now uses separate
CONFIG_PROFILE_COMMAND and CONFIG_PROFILE_METADATA markers
(before, it was just CONFIG_PROFILE that did not make the
difference between the two). This helps when checking the
configuration if it contains correct set of options which
are all in either command-profilable or metadata-profilable
group without mixing these groups together - so it's a firm
distinction. The "command profile" can't contain
"metadata profile" and vice versa! This is strictly checked
and if the settings are mixed, such profile is rejected and
it's not used. So in the end, the CONFIG_PROFILE_COMMAND
set of options and CONFIG_PROFILE_METADATA are mutually exclusive
sets.
- Marking configuration with one or the other marker will also
determine the way these configuration sources are positioned
in the configuration cascade which is now:
CONFIG_STRING -> CONFIG_PROFILE_COMMAND -> CONFIG_PROFILE_METADATA -> CONFIG_FILE/CONFIG_MERGED_FILES
- Marking configuration with one or the other marker will also make
it possible to issue a command context refresh (will be probably
a part of a future patch) if needed for settings in global profile
set. For settings in metadata profile set this is impossible since
we can't refresh cmd context in the middle of reading VG/LV metadata
and for each VG/LV separately because each VG/LV can have a different
metadata profile assinged and it's not possible to change these
settings at this level.
- When command profile is incorrect, it's rejected *and also* the
command exits immediately - the profile *must* be correct for the
command that was run with a profile to be executed. Before this
patch, when the profile was found incorrect, there was just the
warning message and the command continued without profile applied.
But it's more correct to exit immediately in this case.
- When metadata profile is incorrect, we reject it during command
runtime (as we know the profile name from metadata and not early
from command line as it is in case of command profiles) and we
*do continue* with the command as we're in the middle of operation.
Also, the metadata profile is applied directly and on the fly on
find_config_tree_* fn call and even if the metadata profile is
found incorrect, we still need to return the non-profiled value
as found in the other configuration provided or default value.
To exit immediately even in this case, we'd need to refactor
existing find_config_tree_* fns so they can return error. Currently,
these fns return only config values (which end up with default
values in the end if the config is not found).
- To check the profile validity before use to be sure it's correct,
one can use :
lvm dumpconfig --commandprofile/--metadataprofile ProfileName --validate
(the --commandprofile/--metadataprofile for dumpconfig will come
as part of the subsequent patch)
- This patch also adds a reference to --commandprofile and
--metadataprofile in the cmd help string (which was missing before
for the --profile for some commands). We do not mention --profile
now as people should use --commandprofile or --metadataprofile
directly. However, the --profile is still supported for backward
compatibility and it's translated as:
--profile == --metadataprofile for lvcreate, vgcreate, lvchange and vgchange
(as these commands are able to attach profile to metadata)
--profile == --commandprofile for all the other commands
(--metadataprofile is not allowed there as it makes no sense)
- This patch also contains some cleanups to make the code handling
the profiles more readable...
2014-05-20 16:13:10 +04:00
{ metadataprofile_ARG , & _vgchange_profile } ,
2014-10-24 21:29:04 +04:00
{ profile_ARG , & _vgchange_profile } ,
{ detachprofile_ARG , & _vgchange_profile } ,
2010-10-30 01:15:23 +04:00
} ;
2003-10-22 02:06:07 +04:00
2010-01-06 22:08:58 +03:00
/*
* FIXME : DEFAULT_BACKGROUND_POLLING should be " unspecified " .
* If - - poll is explicitly provided use it ; otherwise polling
* should only be started if the LV is not already active . So :
* 1 ) change the activation code to say if the LV was actually activated
* 2 ) make polling of an LV tightly coupled with LV activation
2010-05-06 15:15:55 +04:00
*
* Do not initiate any polling if - - sysinit option is used .
2010-01-06 22:08:58 +03:00
*/
2016-06-22 00:24:52 +03:00
init_background_polling ( arg_is_set ( cmd , sysinit_ARG ) ? 0 :
2010-05-06 15:15:55 +04:00
arg_int_value ( cmd , poll_ARG ,
DEFAULT_BACKGROUND_POLLING ) ) ;
2010-01-05 23:56:51 +03:00
2013-07-06 21:40:09 +04:00
for ( i = 0 ; i < DM_ARRAY_SIZE ( _vgchange_args ) ; + + i ) {
2016-06-22 00:24:52 +03:00
if ( arg_is_set ( cmd , _vgchange_args [ i ] . arg ) ) {
2013-07-01 13:27:22 +04:00
if ( ! _vgchange_args [ i ] . fn ( cmd , vg ) )
return_ECMD_FAILED ;
2021-06-09 17:16:26 +03:00
changed = 1 ;
2010-10-30 01:15:23 +04:00
}
}
2021-06-09 17:16:26 +03:00
if ( changed ) {
2013-07-01 13:27:22 +04:00
if ( ! vg_write ( vg ) | | ! vg_commit ( vg ) )
return_ECMD_FAILED ;
2010-10-30 01:15:23 +04:00
2023-09-09 01:13:46 +03:00
log_print_unless_silent ( " Volume group \" %s \" successfully changed. " , vg - > name ) ;
2010-10-30 01:15:23 +04:00
}
2016-06-22 00:24:52 +03:00
if ( arg_is_set ( cmd , activate_ARG ) ) {
2018-06-14 22:00:16 +03:00
activate = ( activation_change_t ) arg_uint_value ( cmd , activate_ARG , 0 ) ;
Allow system.devices to be automatically created on first boot
An OS installer can create system.devices for the system and
disks, but an OS image cannot create the system-specific
system.devices. The OS image can instead configure the
image so that lvm will create system.devices on first boot.
Image preparation steps to enable auto creation of system.devices:
- create empty file /etc/lvm/devices/auto-import-rootvg
- remove any existing /etc/lvm/devices/system.devices
- enable lvm-devices-import.path
- enable lvm-devices-import.service
On first boot of the prepared image:
- udev triggers vgchange -aay --autoactivation event <rootvg>
- vgchange activates LVs in the root VG
- vgchange finds the file /etc/lvm/devices/auto-import-rootvg,
and no /etc/lvm/devices/system.devices, so it creates
/run/lvm/lvm-devices-import
- lvm-devices-import.path is run when /run/lvm/lvm-devices-import
appears, and triggers lvm-devices-import.service
- lvm-devices-import.service runs vgimportdevices --rootvg --auto
- vgimportdevices finds /etc/lvm/devices/auto-import-rootvg,
and no system.devices, so it creates system.devices containing
PVs in the root VG, and removes /etc/lvm/devices/auto-import-rootvg
and /run/lvm/lvm-devices-import
Run directly, vgimportdevices --rootvg (without --auto), will create
a new system.devices for the root VG, or will add devices for the
root VG to an existing system.devices.
2024-04-24 01:08:26 +03:00
if ( ! vgchange_activate ( cmd , vg , activate , vp - > vg_complete_to_activate , vp - > root_dm_uuid ) )
2013-07-01 13:27:22 +04:00
return_ECMD_FAILED ;
2018-06-14 22:05:41 +03:00
} else if ( arg_is_set ( cmd , refresh_ARG ) ) {
2010-10-26 05:37:59 +04:00
/* refreshes the visible LVs (which starts polling) */
2010-10-30 01:15:23 +04:00
if ( ! _vgchange_refresh ( cmd , vg ) )
2013-07-01 13:27:22 +04:00
return_ECMD_FAILED ;
2018-06-14 22:05:41 +03:00
} else {
2010-10-26 05:37:59 +04:00
/* -ay* will have already done monitoring changes */
2018-06-14 22:05:41 +03:00
if ( arg_is_set ( cmd , monitor_ARG ) & &
! _vgchange_monitoring ( cmd , vg ) )
2013-07-01 13:27:22 +04:00
return_ECMD_FAILED ;
2006-05-12 23:16:48 +04:00
2018-06-14 22:05:41 +03:00
/* When explicitelly specified --poll */
if ( arg_is_set ( cmd , poll_ARG ) & &
! vgchange_background_polling ( cmd , vg ) )
return_ECMD_FAILED ;
}
2004-03-08 20:19:15 +03:00
2017-07-13 21:26:10 +03:00
return ret ;
2002-11-18 17:04:08 +03:00
}
Allow system.devices to be automatically created on first boot
An OS installer can create system.devices for the system and
disks, but an OS image cannot create the system-specific
system.devices. The OS image can instead configure the
image so that lvm will create system.devices on first boot.
Image preparation steps to enable auto creation of system.devices:
- create empty file /etc/lvm/devices/auto-import-rootvg
- remove any existing /etc/lvm/devices/system.devices
- enable lvm-devices-import.path
- enable lvm-devices-import.service
On first boot of the prepared image:
- udev triggers vgchange -aay --autoactivation event <rootvg>
- vgchange activates LVs in the root VG
- vgchange finds the file /etc/lvm/devices/auto-import-rootvg,
and no /etc/lvm/devices/system.devices, so it creates
/run/lvm/lvm-devices-import
- lvm-devices-import.path is run when /run/lvm/lvm-devices-import
appears, and triggers lvm-devices-import.service
- lvm-devices-import.service runs vgimportdevices --rootvg --auto
- vgimportdevices finds /etc/lvm/devices/auto-import-rootvg,
and no system.devices, so it creates system.devices containing
PVs in the root VG, and removes /etc/lvm/devices/auto-import-rootvg
and /run/lvm/lvm-devices-import
Run directly, vgimportdevices --rootvg (without --auto), will create
a new system.devices for the root VG, or will add devices for the
root VG to an existing system.devices.
2024-04-24 01:08:26 +03:00
/*
* Automatic creation of system . devices for root VG on first boot
* is useful for OS images where the OS installer is not used to
* customize the OS for system .
*
* - OS image prep :
* . rm / etc / lvm / devices / system . devices ( if it exists )
* . touch / etc / lvm / devices / auto - import - rootvg
* . enable lvm - devices - import . path
* . enable lvm - devices - import . service
*
* - lvchange - ay < rootvg > / < rootlv >
* . run by initrd so root fs can be mounted
* . does not use system . devices
* . named < rootvg > / < rootlv > comes from kernel command line rd . lvm
* . uses first device that appears containing the named root LV
*
* - vgchange - aay < rootvg >
* . triggered by udev when all PVs from root VG are online
* . activate LVs in root VG ( in addition to the already active root LV )
* . check for / etc / lvm / devices / auto - import - rootvg ( found )
* . check for / etc / lvm / devices / system . devices ( not found )
* . create / run / lvm / lvm - devices - import because
* auto - import - rootvg was found and system . devices was not found
*
* - lvm - devices - import . path
* . triggered by / run / lvm / lvm - devices - import
* . start lvm - devices - import . service
*
* - lvm - devices - import . service
* . check for / etc / lvm / devices / system . devices , do nothing if found
* . run vgimportdevices - - rootvg - - auto
*
* - vgimportdevices - - rootvg - - auto
* . check for / etc / lvm / devices / auto - import - rootvg ( found )
* . check for / etc / lvm / devices / system . devices ( not found )
* . creates / etc / lvm / devices / system . devices for PVs in root VG
* . removes / etc / lvm / devices / auto - import - rootvg
* . removes / run / lvm / lvm - devices - import
*
* On future startup , / etc / lvm / devices / system . devices will exist ,
* and / etc / lvm / devices / auto - import - rootvg will not exist , so
* vgchange - aay < rootvg > will not create / run / lvm / lvm - devices - import ,
* and lvm - devices - import . path and lvm - device - import . service will not run .
*
* lvm - devices - import . path :
* [ Path ]
* PathExists = / run / lvm / lvm - devices - import
* Unit = lvm - devices - import . service
* ConditionPathExists = ! / etc / lvm / devices / system . devices
*
* lvm - devices - import . service :
* [ Service ]
* Type = oneshot
* RemainAfterExit = no
* ExecStart = / usr / sbin / vgimportdevices - - rootvg - - auto
* ConditionPathExists = ! / etc / lvm / devices / system . devices
*/
static void _get_rootvg_dev ( struct cmd_context * cmd , char * * dm_uuid_out )
{
char path [ PATH_MAX ] ;
struct stat info ;
if ( cmd - > enable_devices_file | | devices_file_exists ( cmd ) )
return ;
if ( dm_snprintf ( path , sizeof ( path ) , " %s/devices/auto-import-rootvg " , cmd - > system_dir ) < 0 )
return ;
if ( stat ( path , & info ) < 0 )
return ;
2024-05-27 16:56:53 +03:00
if ( ! get_rootvg_dev_uuid ( cmd , dm_uuid_out ) )
stack ;
Allow system.devices to be automatically created on first boot
An OS installer can create system.devices for the system and
disks, but an OS image cannot create the system-specific
system.devices. The OS image can instead configure the
image so that lvm will create system.devices on first boot.
Image preparation steps to enable auto creation of system.devices:
- create empty file /etc/lvm/devices/auto-import-rootvg
- remove any existing /etc/lvm/devices/system.devices
- enable lvm-devices-import.path
- enable lvm-devices-import.service
On first boot of the prepared image:
- udev triggers vgchange -aay --autoactivation event <rootvg>
- vgchange activates LVs in the root VG
- vgchange finds the file /etc/lvm/devices/auto-import-rootvg,
and no /etc/lvm/devices/system.devices, so it creates
/run/lvm/lvm-devices-import
- lvm-devices-import.path is run when /run/lvm/lvm-devices-import
appears, and triggers lvm-devices-import.service
- lvm-devices-import.service runs vgimportdevices --rootvg --auto
- vgimportdevices finds /etc/lvm/devices/auto-import-rootvg,
and no system.devices, so it creates system.devices containing
PVs in the root VG, and removes /etc/lvm/devices/auto-import-rootvg
and /run/lvm/lvm-devices-import
Run directly, vgimportdevices --rootvg (without --auto), will create
a new system.devices for the root VG, or will add devices for the
root VG to an existing system.devices.
2024-04-24 01:08:26 +03:00
}
2021-11-10 23:44:11 +03:00
static int _vgchange_autoactivation_setup ( struct cmd_context * cmd ,
struct vgchange_params * vp ,
int * skip_command ,
const char * * vgname_ret ,
uint32_t * flags )
2021-11-03 20:03:29 +03:00
{
const char * aa ;
2021-11-10 23:44:11 +03:00
char * vgname = NULL ;
2021-11-11 01:43:21 +03:00
int vg_locked = 0 ;
2021-11-10 23:44:11 +03:00
int found_none = 0 , found_all = 0 , found_incomplete = 0 ;
2021-11-03 20:03:29 +03:00
if ( ! ( aa = arg_str_value ( cmd , autoactivation_ARG , NULL ) ) )
2021-11-10 23:44:11 +03:00
return_0 ;
2021-11-03 20:03:29 +03:00
if ( strcmp ( aa , " event " ) ) {
log_print ( " Skip vgchange for unknown autoactivation value. " ) ;
* skip_command = 1 ;
return 1 ;
}
if ( ! find_config_tree_bool ( cmd , global_event_activation_CFG , NULL ) ) {
log_print ( " Skip vgchange for event and event_activation=0. " ) ;
* skip_command = 1 ;
return 1 ;
}
vp - > vg_complete_to_activate = 1 ;
2021-11-05 20:19:35 +03:00
cmd - > use_hints = 0 ;
2021-11-03 20:03:29 +03:00
2021-11-10 23:44:11 +03:00
/*
2021-11-11 01:43:21 +03:00
* Add an option to skip the pvs_online optimization ? e . g .
2021-11-10 23:44:11 +03:00
* " online_skip " in - - autoactivation / auto_activation_settings
*
2021-11-11 01:43:21 +03:00
* if ( online_skip )
* return 1 ;
*/
/* reads devices file, does not populate dev-cache */
if ( ! setup_devices_for_online_autoactivation ( cmd ) )
return_0 ;
get_single_vgname_cmd_arg ( cmd , NULL , & vgname ) ;
Allow system.devices to be automatically created on first boot
An OS installer can create system.devices for the system and
disks, but an OS image cannot create the system-specific
system.devices. The OS image can instead configure the
image so that lvm will create system.devices on first boot.
Image preparation steps to enable auto creation of system.devices:
- create empty file /etc/lvm/devices/auto-import-rootvg
- remove any existing /etc/lvm/devices/system.devices
- enable lvm-devices-import.path
- enable lvm-devices-import.service
On first boot of the prepared image:
- udev triggers vgchange -aay --autoactivation event <rootvg>
- vgchange activates LVs in the root VG
- vgchange finds the file /etc/lvm/devices/auto-import-rootvg,
and no /etc/lvm/devices/system.devices, so it creates
/run/lvm/lvm-devices-import
- lvm-devices-import.path is run when /run/lvm/lvm-devices-import
appears, and triggers lvm-devices-import.service
- lvm-devices-import.service runs vgimportdevices --rootvg --auto
- vgimportdevices finds /etc/lvm/devices/auto-import-rootvg,
and no system.devices, so it creates system.devices containing
PVs in the root VG, and removes /etc/lvm/devices/auto-import-rootvg
and /run/lvm/lvm-devices-import
Run directly, vgimportdevices --rootvg (without --auto), will create
a new system.devices for the root VG, or will add devices for the
root VG to an existing system.devices.
2024-04-24 01:08:26 +03:00
_get_rootvg_dev ( cmd , & vp - > root_dm_uuid ) ;
2021-11-11 01:43:21 +03:00
/*
* Lock the VG before scanning the PVs so _vg_read can avoid the normal
* lock_vol + rescan ( READ_WITHOUT_LOCK avoids the normal lock_vol and
* can_use_one_scan avoids the normal rescan . ) If this early lock_vol
* fails , continue and use the normal lock_vol in _vg_read .
2021-11-10 23:44:11 +03:00
*/
2021-11-11 01:43:21 +03:00
if ( vgname ) {
if ( ! lock_vol ( cmd , vgname , LCK_VG_WRITE , NULL ) ) {
log_debug ( " Failed early VG locking for autoactivation. " ) ;
} else {
* flags | = READ_WITHOUT_LOCK ;
cmd - > can_use_one_scan = 1 ;
vg_locked = 1 ;
}
}
2021-11-10 23:44:11 +03:00
/*
* Perform label_scan on PVs that are online ( per / run / lvm files )
* for the given VG ( or when no VG name is given , all online PVs . )
* If this fails , the caller will do a normal process_each_vg without
* optimizations ( which will do a full label_scan . )
*/
if ( ! label_scan_vg_online ( cmd , vgname , & found_none , & found_all , & found_incomplete ) ) {
log_print ( " PVs online error%s%s, using all devices. " , vgname ? " for VG " : " " , vgname ? : " " ) ;
2021-11-13 01:46:39 +03:00
goto bad ;
2021-11-10 23:44:11 +03:00
}
/*
* Not the expected usage , activate any VGs that are complete based on
* pvs_online . Only online pvs are used .
*/
if ( ! vgname ) {
* flags | = PROCESS_SKIP_SCAN ;
return 1 ;
}
/*
2021-11-13 01:46:39 +03:00
* The expected and optimal usage , which is the purpose of
* this function . We expect online files to be found for
* all PVs because the udev rule calls
* vgchange - aay - - autoactivation event < vgname >
* only after all PVs for vgname are found online .
2021-11-10 23:44:11 +03:00
*/
if ( found_all ) {
* flags | = PROCESS_SKIP_SCAN ;
* vgname_ret = vgname ;
return 1 ;
}
/*
* Not expected usage , no online pvs for the vgname were found . The
* caller will fall back to process_each doing a full label_scan to
* look for the VG . ( No optimization used . )
*/
if ( found_none ) {
log_print ( " PVs online not found for VG %s, using all devices. " , vgname ) ;
2021-11-13 01:46:39 +03:00
goto bad ;
2021-11-10 23:44:11 +03:00
}
/*
* Not expected usage , only some online pvs for the vgname were found .
* The caller will fall back to process_each doing a full label_scan to
* look for all PVs in the VG . ( No optimization used . )
*/
if ( found_incomplete ) {
log_print ( " PVs online incomplete for VG %s, using all devicess. " , vgname ) ;
2021-11-13 01:46:39 +03:00
goto bad ;
2021-11-10 23:44:11 +03:00
}
/*
* Shouldn ' t happen , the caller will fall back to standard
* process_each . ( No optimization used . )
*/
log_print ( " PVs online unknown for VG %s, using all devices. " , vgname ) ;
2021-11-13 01:46:39 +03:00
bad :
/*
* The online scanning optimization didn ' t work , so undo the vg
* locking optimization before falling back to normal processing .
*/
if ( vg_locked ) {
unlock_vg ( cmd , NULL , vgname ) ;
* flags & = ~ READ_WITHOUT_LOCK ;
cmd - > can_use_one_scan = 0 ;
}
2023-07-12 15:15:41 +03:00
free ( vgname ) ;
2021-11-03 20:03:29 +03:00
return 1 ;
2021-11-13 01:46:39 +03:00
2021-11-03 20:03:29 +03:00
}
2002-11-18 17:04:08 +03:00
int vgchange ( struct cmd_context * cmd , int argc , char * * argv )
{
2021-11-03 20:03:29 +03:00
struct vgchange_params vp = { 0 } ;
2016-07-25 18:40:48 +03:00
struct processing_handle * handle ;
2021-11-10 23:44:11 +03:00
const char * vgname = NULL ;
2015-07-13 21:48:39 +03:00
uint32_t flags = 0 ;
2015-03-05 23:00:44 +03:00
int ret ;
2014-10-24 21:29:04 +04:00
int noupdate =
2016-06-22 00:24:52 +03:00
arg_is_set ( cmd , activate_ARG ) | |
arg_is_set ( cmd , monitor_ARG ) | |
arg_is_set ( cmd , poll_ARG ) | |
arg_is_set ( cmd , refresh_ARG ) ;
2014-10-24 21:29:04 +04:00
[lv|vg]change: Allow limited metadata changes when PVs are missing
A while back, the behavior of LVM changed from allowing metadata changes
when PVs were missing to not allowing changes. Until recently, this
change was tolerated by HA-LVM by forcing a 'vgreduce --removemissing'
before trying (again) to add tags to an LV and then activate it. LVM
mirroring requires that failed devices are removed anyway, so this was
largely harmless. However, RAID LVs do not require devices to be removed
from the array in order to be activated. In fact, in an HA-LVM
environment this would be very undesirable. Device failures in such an
environment can often be transient and it would be much better to restore
the device to the array than synchronize an entirely new device.
There are two methods that can be used to setup an HA-LVM environment:
"clvm" or "tagging". For RAID LVs, "clvm" is out of the question because
RAID LVs are not supported in clustered VGs - not even in an exclusively
activated manner. That leaves "tagging". HA-LVM uses tagging - coupled
with 'volume_list' - to ensure that only one machine can have an LV active
at a time. If updates are not allowed when a PV is missing, it is
impossible to add or remove tags to allow for activation. This removes
one of the most basic functionalities of HA-LVM - site redundancy. If
mirroring or RAID is used to replicate the storage in two data centers
and one of them goes down, a server and a storage device are lost. When
the service fails-over to the alternate site, the VG will be "partial".
Unable to add a tag to the VG/LV, the RAID device will be unable to
activate.
The solution is to allow vgchange and lvchange to alter the LVM metadata
for a limited set of options - --[add|del]tag included. The set of
allowable options are ones that do not cause changes to the DM kernel
target (like --resync would) or could alter the structure of the LV
(like allocation or conversion).
2012-10-10 20:33:10 +04:00
int update_partial_safe =
2016-06-22 00:24:52 +03:00
arg_is_set ( cmd , deltag_ARG ) | |
arg_is_set ( cmd , addtag_ARG ) | |
arg_is_set ( cmd , metadataprofile_ARG ) | |
arg_is_set ( cmd , profile_ARG ) | |
arg_is_set ( cmd , detachprofile_ARG ) ;
2014-10-24 21:29:04 +04:00
[lv|vg]change: Allow limited metadata changes when PVs are missing
A while back, the behavior of LVM changed from allowing metadata changes
when PVs were missing to not allowing changes. Until recently, this
change was tolerated by HA-LVM by forcing a 'vgreduce --removemissing'
before trying (again) to add tags to an LV and then activate it. LVM
mirroring requires that failed devices are removed anyway, so this was
largely harmless. However, RAID LVs do not require devices to be removed
from the array in order to be activated. In fact, in an HA-LVM
environment this would be very undesirable. Device failures in such an
environment can often be transient and it would be much better to restore
the device to the array than synchronize an entirely new device.
There are two methods that can be used to setup an HA-LVM environment:
"clvm" or "tagging". For RAID LVs, "clvm" is out of the question because
RAID LVs are not supported in clustered VGs - not even in an exclusively
activated manner. That leaves "tagging". HA-LVM uses tagging - coupled
with 'volume_list' - to ensure that only one machine can have an LV active
at a time. If updates are not allowed when a PV is missing, it is
impossible to add or remove tags to allow for activation. This removes
one of the most basic functionalities of HA-LVM - site redundancy. If
mirroring or RAID is used to replicate the storage in two data centers
and one of them goes down, a server and a storage device are lost. When
the service fails-over to the alternate site, the VG will be "partial".
Unable to add a tag to the VG/LV, the RAID device will be unable to
activate.
The solution is to allow vgchange and lvchange to alter the LVM metadata
for a limited set of options - --[add|del]tag included. The set of
allowable options are ones that do not cause changes to the DM kernel
target (like --resync would) or could alter the structure of the LV
(like allocation or conversion).
2012-10-10 20:33:10 +04:00
int update_partial_unsafe =
2016-06-22 00:24:52 +03:00
arg_is_set ( cmd , logicalvolume_ARG ) | |
arg_is_set ( cmd , maxphysicalvolumes_ARG ) | |
arg_is_set ( cmd , resizeable_ARG ) | |
Add metadata-based autoactivation property for VG and LV
The autoactivation property can be specified in lvcreate
or vgcreate for new LVs/VGs, and the property can be changed
by lvchange or vgchange for existing LVs/VGs.
--setautoactivation y|n
enables|disables autoactivation of a VG or LV.
Autoactivation is enabled by default, which is consistent with
past behavior. The disabled state is stored as a new flag
in the VG metadata, and the absence of the flag allows
autoactivation.
If autoactivation is disabled for the VG, then no LVs in the VG
will be autoactivated (the LV autoactivation property will have
no effect.) When autoactivation is enabled for the VG, then
autoactivation can be controlled on individual LVs.
The state of this property can be reported for LVs/VGs using
the "-o autoactivation" option in lvs/vgs commands, which will
report "enabled", or "" for the disabled state.
Previous versions of lvm do not recognize this property. Since
autoactivation is enabled by default, the disabled setting will
have no effect in older lvm versions. If the VG is modified by
older lvm versions, the disabled state will also be dropped from
the metadata.
The autoactivation property is an alternative to using the lvm.conf
auto_activation_volume_list, which is still applied to to VGs/LVs
in addition to the new property.
If VG or LV autoactivation is disabled either in metadata or in
auto_activation_volume_list, it will not be autoactivated.
An autoactivation command will silently skip activating an LV
when the autoactivation property is disabled.
To determine the effective autoactivation behavior for a specific
LV, multiple settings would need to be checked:
the VG autoactivation property, the LV autoactivation property,
the auto_activation_volume_list. The "activation skip" property
would also be relevant, since it applies to both normal and auto
activation.
2021-04-02 01:20:00 +03:00
arg_is_set ( cmd , setautoactivation_ARG ) | |
2016-06-22 00:24:52 +03:00
arg_is_set ( cmd , uuid_ARG ) | |
arg_is_set ( cmd , physicalextentsize_ARG ) | |
arg_is_set ( cmd , alloc_ARG ) | |
2017-07-13 21:43:39 +03:00
arg_is_set ( cmd , vgmetadatacopies_ARG ) ;
2014-10-24 21:29:04 +04:00
[lv|vg]change: Allow limited metadata changes when PVs are missing
A while back, the behavior of LVM changed from allowing metadata changes
when PVs were missing to not allowing changes. Until recently, this
change was tolerated by HA-LVM by forcing a 'vgreduce --removemissing'
before trying (again) to add tags to an LV and then activate it. LVM
mirroring requires that failed devices are removed anyway, so this was
largely harmless. However, RAID LVs do not require devices to be removed
from the array in order to be activated. In fact, in an HA-LVM
environment this would be very undesirable. Device failures in such an
environment can often be transient and it would be much better to restore
the device to the array than synchronize an entirely new device.
There are two methods that can be used to setup an HA-LVM environment:
"clvm" or "tagging". For RAID LVs, "clvm" is out of the question because
RAID LVs are not supported in clustered VGs - not even in an exclusively
activated manner. That leaves "tagging". HA-LVM uses tagging - coupled
with 'volume_list' - to ensure that only one machine can have an LV active
at a time. If updates are not allowed when a PV is missing, it is
impossible to add or remove tags to allow for activation. This removes
one of the most basic functionalities of HA-LVM - site redundancy. If
mirroring or RAID is used to replicate the storage in two data centers
and one of them goes down, a server and a storage device are lost. When
the service fails-over to the alternate site, the VG will be "partial".
Unable to add a tag to the VG/LV, the RAID device will be unable to
activate.
The solution is to allow vgchange and lvchange to alter the LVM metadata
for a limited set of options - --[add|del]tag included. The set of
allowable options are ones that do not cause changes to the DM kernel
target (like --resync would) or could alter the structure of the LV
(like allocation or conversion).
2012-10-10 20:33:10 +04:00
int update = update_partial_safe | | update_partial_unsafe ;
2010-10-30 01:15:23 +04:00
2014-10-24 21:29:04 +04:00
if ( ! update & & ! noupdate ) {
log_error ( " Need one or more command options. " ) ;
2002-11-18 17:04:08 +03:00
return EINVALID_CMD_LINE ;
}
2016-06-22 00:24:52 +03:00
if ( ( arg_is_set ( cmd , profile_ARG ) | | arg_is_set ( cmd , metadataprofile_ARG ) ) & &
arg_is_set ( cmd , detachprofile_ARG ) ) {
config: differentiate command and metadata profiles and consolidate profile handling code
- When defining configuration source, the code now uses separate
CONFIG_PROFILE_COMMAND and CONFIG_PROFILE_METADATA markers
(before, it was just CONFIG_PROFILE that did not make the
difference between the two). This helps when checking the
configuration if it contains correct set of options which
are all in either command-profilable or metadata-profilable
group without mixing these groups together - so it's a firm
distinction. The "command profile" can't contain
"metadata profile" and vice versa! This is strictly checked
and if the settings are mixed, such profile is rejected and
it's not used. So in the end, the CONFIG_PROFILE_COMMAND
set of options and CONFIG_PROFILE_METADATA are mutually exclusive
sets.
- Marking configuration with one or the other marker will also
determine the way these configuration sources are positioned
in the configuration cascade which is now:
CONFIG_STRING -> CONFIG_PROFILE_COMMAND -> CONFIG_PROFILE_METADATA -> CONFIG_FILE/CONFIG_MERGED_FILES
- Marking configuration with one or the other marker will also make
it possible to issue a command context refresh (will be probably
a part of a future patch) if needed for settings in global profile
set. For settings in metadata profile set this is impossible since
we can't refresh cmd context in the middle of reading VG/LV metadata
and for each VG/LV separately because each VG/LV can have a different
metadata profile assinged and it's not possible to change these
settings at this level.
- When command profile is incorrect, it's rejected *and also* the
command exits immediately - the profile *must* be correct for the
command that was run with a profile to be executed. Before this
patch, when the profile was found incorrect, there was just the
warning message and the command continued without profile applied.
But it's more correct to exit immediately in this case.
- When metadata profile is incorrect, we reject it during command
runtime (as we know the profile name from metadata and not early
from command line as it is in case of command profiles) and we
*do continue* with the command as we're in the middle of operation.
Also, the metadata profile is applied directly and on the fly on
find_config_tree_* fn call and even if the metadata profile is
found incorrect, we still need to return the non-profiled value
as found in the other configuration provided or default value.
To exit immediately even in this case, we'd need to refactor
existing find_config_tree_* fns so they can return error. Currently,
these fns return only config values (which end up with default
values in the end if the config is not found).
- To check the profile validity before use to be sure it's correct,
one can use :
lvm dumpconfig --commandprofile/--metadataprofile ProfileName --validate
(the --commandprofile/--metadataprofile for dumpconfig will come
as part of the subsequent patch)
- This patch also adds a reference to --commandprofile and
--metadataprofile in the cmd help string (which was missing before
for the --profile for some commands). We do not mention --profile
now as people should use --commandprofile or --metadataprofile
directly. However, the --profile is still supported for backward
compatibility and it's translated as:
--profile == --metadataprofile for lvcreate, vgcreate, lvchange and vgchange
(as these commands are able to attach profile to metadata)
--profile == --commandprofile for all the other commands
(--metadataprofile is not allowed there as it makes no sense)
- This patch also contains some cleanups to make the code handling
the profiles more readable...
2014-05-20 16:13:10 +04:00
log_error ( " Only one of --metadataprofile and --detachprofile permitted. " ) ;
2013-06-25 14:33:06 +04:00
return EINVALID_CMD_LINE ;
}
2016-06-22 00:24:52 +03:00
if ( arg_is_set ( cmd , activate_ARG ) & & arg_is_set ( cmd , refresh_ARG ) ) {
2010-10-26 05:37:59 +04:00
log_error ( " Only one of -a and --refresh permitted. " ) ;
return EINVALID_CMD_LINE ;
}
2016-06-22 00:24:52 +03:00
if ( ( arg_is_set ( cmd , ignorelockingfailure_ARG ) | |
arg_is_set ( cmd , sysinit_ARG ) ) & & update ) {
2023-09-09 01:13:46 +03:00
log_error ( " Only -a permitted with --ignorelockingfailure and --sysinit. " ) ;
2010-05-06 15:15:55 +04:00
return EINVALID_CMD_LINE ;
}
2016-06-22 00:24:52 +03:00
if ( arg_is_set ( cmd , activate_ARG ) & &
( arg_is_set ( cmd , monitor_ARG ) | | arg_is_set ( cmd , poll_ARG ) ) ) {
2014-05-09 18:13:48 +04:00
if ( ! is_change_activating ( ( activation_change_t ) arg_uint_value ( cmd , activate_ARG , 0 ) ) ) {
2010-10-26 05:37:59 +04:00
log_error ( " Only -ay* allowed with --monitor or --poll. " ) ;
return EINVALID_CMD_LINE ;
}
}
2016-06-22 00:24:52 +03:00
if ( arg_is_set ( cmd , poll_ARG ) & & arg_is_set ( cmd , sysinit_ARG ) ) {
2010-05-06 15:15:55 +04:00
log_error ( " Only one of --poll and --sysinit permitted. " ) ;
2002-11-18 17:04:08 +03:00
return EINVALID_CMD_LINE ;
}
2016-06-22 00:24:52 +03:00
if ( arg_is_set ( cmd , maxphysicalvolumes_ARG ) & &
2012-02-28 18:24:57 +04:00
arg_sign_value ( cmd , maxphysicalvolumes_ARG , SIGN_NONE ) = = SIGN_MINUS ) {
2023-09-09 01:13:46 +03:00
log_error ( " MaxPhysicalVolumes may not be negative. " ) ;
2010-10-30 01:15:23 +04:00
return EINVALID_CMD_LINE ;
}
2016-06-22 00:24:52 +03:00
if ( arg_is_set ( cmd , physicalextentsize_ARG ) & &
2012-02-28 18:24:57 +04:00
arg_sign_value ( cmd , physicalextentsize_ARG , SIGN_NONE ) = = SIGN_MINUS ) {
2023-09-09 01:13:46 +03:00
log_error ( " Physical extent size may not be negative. " ) ;
2010-10-30 01:15:23 +04:00
return EINVALID_CMD_LINE ;
}
2016-06-22 00:24:52 +03:00
if ( arg_is_set ( cmd , clustered_ARG ) & & ! argc & & ! arg_is_set ( cmd , yes_ARG ) & &
2013-08-13 21:20:11 +04:00
( yes_no_prompt ( " Change clustered property of all volumes groups? [y/n]: " ) = = ' n ' ) ) {
log_error ( " No volume groups changed. " ) ;
return ECMD_FAILED ;
}
[lv|vg]change: Allow limited metadata changes when PVs are missing
A while back, the behavior of LVM changed from allowing metadata changes
when PVs were missing to not allowing changes. Until recently, this
change was tolerated by HA-LVM by forcing a 'vgreduce --removemissing'
before trying (again) to add tags to an LV and then activate it. LVM
mirroring requires that failed devices are removed anyway, so this was
largely harmless. However, RAID LVs do not require devices to be removed
from the array in order to be activated. In fact, in an HA-LVM
environment this would be very undesirable. Device failures in such an
environment can often be transient and it would be much better to restore
the device to the array than synchronize an entirely new device.
There are two methods that can be used to setup an HA-LVM environment:
"clvm" or "tagging". For RAID LVs, "clvm" is out of the question because
RAID LVs are not supported in clustered VGs - not even in an exclusively
activated manner. That leaves "tagging". HA-LVM uses tagging - coupled
with 'volume_list' - to ensure that only one machine can have an LV active
at a time. If updates are not allowed when a PV is missing, it is
impossible to add or remove tags to allow for activation. This removes
one of the most basic functionalities of HA-LVM - site redundancy. If
mirroring or RAID is used to replicate the storage in two data centers
and one of them goes down, a server and a storage device are lost. When
the service fails-over to the alternate site, the VG will be "partial".
Unable to add a tag to the VG/LV, the RAID device will be unable to
activate.
The solution is to allow vgchange and lvchange to alter the LVM metadata
for a limited set of options - --[add|del]tag included. The set of
allowable options are ones that do not cause changes to the DM kernel
target (like --resync would) or could alter the structure of the LV
(like allocation or conversion).
2012-10-10 20:33:10 +04:00
if ( ! update | | ! update_partial_unsafe )
cmd - > handles_missing_pvs = 1 ;
2021-07-28 00:56:20 +03:00
if ( noupdate )
cmd - > ignore_device_name_mismatch = 1 ;
2021-10-04 23:47:25 +03:00
/*
* If the devices file includes PVs stacked on LVs , then
* vgchange - - uuid may need to update the devices file .
* No PV - on - LV stacked is done without scan_lvs set .
*/
if ( arg_is_set ( cmd , uuid_ARG ) & & cmd - > scan_lvs )
cmd - > edit_devices_file = 1 ;
2015-03-05 23:00:44 +03:00
/*
* Include foreign VGs that contain active LVs .
* That shouldn ' t happen in general , but if it does by some
* mistake , then we want to allow those LVs to be deactivated .
*/
2015-03-03 22:23:13 +03:00
if ( arg_is_set ( cmd , activate_ARG ) )
cmd - > include_active_foreign_vgs = 1 ;
2017-07-13 21:43:39 +03:00
/* The default vg lock mode is ex, but these options only need sh. */
if ( ( cmd - > command - > command_enum = = vgchange_activate_CMD ) | |
( cmd - > command - > command_enum = = vgchange_refresh_CMD ) ) {
cmd - > lockd_vg_default_sh = 1 ;
/* Allow deactivating if locks fail. */
if ( is_change_activating ( ( activation_change_t ) arg_uint_value ( cmd , activate_ARG , CHANGE_AY ) ) )
cmd - > lockd_vg_enforce_sh = 1 ;
}
2015-03-05 23:00:44 +03:00
2021-11-03 20:03:29 +03:00
if ( arg_is_set ( cmd , autoactivation_ARG ) ) {
int skip_command = 0 ;
2021-11-10 23:44:11 +03:00
if ( ! _vgchange_autoactivation_setup ( cmd , & vp , & skip_command , & vgname , & flags ) )
2021-11-03 20:03:29 +03:00
return ECMD_FAILED ;
if ( skip_command )
return ECMD_PROCESSED ;
}
2022-03-25 22:13:56 +03:00
/*
* Do not use udev for device listing or device info because
* vgchange - - monitor y is called during boot when udev is being
* initialized and is not yet ready to be used .
*/
if ( arg_is_set ( cmd , monitor_ARG ) & &
arg_int_value ( cmd , monitor_ARG , DEFAULT_DMEVENTD_MONITOR ) ) {
init_obtain_device_list_from_udev ( 0 ) ;
init_external_device_info_source ( DEV_EXT_NONE ) ;
}
2019-06-12 00:17:24 +03:00
if ( update )
2015-07-13 21:48:39 +03:00
flags | = READ_FOR_UPDATE ;
2023-08-30 23:47:42 +03:00
else if ( arg_is_set ( cmd , activate_ARG ) | |
arg_is_set ( cmd , refresh_ARG ) )
2019-06-12 00:17:24 +03:00
flags | = READ_FOR_ACTIVATE ;
2015-07-13 21:48:39 +03:00
2016-07-25 18:40:48 +03:00
if ( ! ( handle = init_processing_handle ( cmd , NULL ) ) ) {
log_error ( " Failed to initialize processing handle. " ) ;
return ECMD_FAILED ;
}
2021-11-03 20:03:29 +03:00
handle - > custom_handle = & vp ;
2021-11-10 23:44:11 +03:00
ret = process_each_vg ( cmd , argc , argv , vgname , NULL , flags , 0 , handle , & _vgchange_single ) ;
2015-03-05 23:00:44 +03:00
2016-07-25 18:40:48 +03:00
destroy_processing_handle ( cmd , handle ) ;
2015-03-05 23:00:44 +03:00
return ret ;
2002-11-18 17:04:08 +03:00
}
2017-07-13 00:03:41 +03:00
2024-06-13 21:34:23 +03:00
static int _vgchange_locktype ( struct cmd_context * cmd , struct volume_group * vg , int * no_change )
2017-07-13 00:03:41 +03:00
{
const char * lock_type = arg_str_value ( cmd , locktype_ARG , NULL ) ;
const char * lockopt = arg_str_value ( cmd , lockopt_ARG , NULL ) ;
struct lv_list * lvl ;
struct logical_volume * lv ;
int lv_lock_count = 0 ;
/* Special recovery case. */
2018-12-21 22:04:35 +03:00
if ( lock_type & & lockopt & & ! strcmp ( lock_type , " none " ) & & ! strcmp ( lockopt , " force " ) ) {
2017-07-13 00:03:41 +03:00
vg - > status & = ~ CLUSTERED ;
vg - > lock_type = " none " ;
vg - > lock_args = NULL ;
dm_list_iterate_items ( lvl , & vg - > lvs )
lvl - > lv - > lock_args = NULL ;
return 1 ;
}
if ( ! vg - > lock_type ) {
if ( vg_is_clustered ( vg ) )
vg - > lock_type = " clvm " ;
else
vg - > lock_type = " none " ;
}
2018-03-17 16:16:09 +03:00
if ( lock_type & & ! strcmp ( vg - > lock_type , lock_type ) ) {
2023-07-15 11:57:37 +03:00
log_warn ( " WARNING: New lock type %s matches the current lock type %s. " ,
2017-07-13 00:03:41 +03:00
lock_type , vg - > lock_type ) ;
2024-06-13 21:34:23 +03:00
* no_change = 1 ;
2017-07-13 00:03:41 +03:00
return 1 ;
}
if ( is_lockd_type ( vg - > lock_type ) & & is_lockd_type ( lock_type ) ) {
log_error ( " Cannot change lock type directly from \" %s \" to \" %s \" . " ,
vg - > lock_type , lock_type ) ;
log_error ( " First change lock type to \" none \" , then to \" %s \" . " ,
lock_type ) ;
return 0 ;
}
/*
* When lvm is currently using lvmlockd , this function can :
* - change none to lockd type
* - change none to clvm ( with warning about not being able to use it )
* - change lockd type to none
* - change lockd type to clvm ( with warning about not being able to use it )
* - change clvm to none
* - change clvm to lockd type
*/
if ( lvs_in_vg_activated ( vg ) ) {
log_error ( " Changing VG %s lock type not allowed with active LVs " ,
vg - > name ) ;
return 0 ;
}
/* clvm to none */
2018-12-21 22:04:35 +03:00
if ( lock_type & & ! strcmp ( vg - > lock_type , " clvm " ) & & ! strcmp ( lock_type , " none " ) ) {
2017-07-13 00:03:41 +03:00
vg - > status & = ~ CLUSTERED ;
vg - > lock_type = " none " ;
return 1 ;
}
/* clvm to ..., first undo clvm */
if ( ! strcmp ( vg - > lock_type , " clvm " ) ) {
vg - > status & = ~ CLUSTERED ;
}
/*
* lockd type to . . . , first undo lockd type
*/
if ( is_lockd_type ( vg - > lock_type ) ) {
2024-06-13 23:50:36 +03:00
if ( ! lockd_free_vg_before ( cmd , vg , 1 , NULL , 0 ) )
2017-07-13 00:03:41 +03:00
return 0 ;
lockd_free_vg_final ( cmd , vg ) ;
vg - > status & = ~ CLUSTERED ;
vg - > lock_type = " none " ;
vg - > lock_args = NULL ;
dm_list_iterate_items ( lvl , & vg - > lvs )
lvl - > lv - > lock_args = NULL ;
}
/* ... to lockd type */
if ( is_lockd_type ( lock_type ) ) {
/*
* For lock_type dlm , lockd_init_vg ( ) will do a single
* vg_write ( ) that sets lock_type , sets lock_args , clears
* system_id , and sets all LV lock_args to dlm .
* For lock_type sanlock , lockd_init_vg ( ) needs to know
* how many LV locks are needed so that it can make the
* sanlock lv large enough .
*/
dm_list_iterate_items ( lvl , & vg - > lvs ) {
lv = lvl - > lv ;
if ( lockd_lv_uses_lock ( lv ) ) {
lv_lock_count + + ;
if ( ! strcmp ( lock_type , " dlm " ) )
lv - > lock_args = " dlm " ;
}
}
/*
* See below . We cannot set valid LV lock_args until stage 1
* of the change is done , so we need to skip the validation of
* the lock_args during stage 1.
*/
if ( ! strcmp ( lock_type , " sanlock " ) )
vg - > skip_validate_lock_args = 1 ;
vg - > system_id = NULL ;
if ( ! lockd_init_vg ( cmd , vg , lock_type , lv_lock_count ) ) {
log_error ( " Failed to initialize lock args for lock type %s " , lock_type ) ;
return 0 ;
}
/*
* For lock_type sanlock , there must be multiple steps
* because the VG needs an active lvmlock LV before
* LV lock areas can be allocated , which must be done
* before LV lock_args are written . So , the LV lock_args
* remain unset during the first stage of the conversion .
*
* Stage 1 :
* lockd_init_vg ( ) creates and activates the lvmlock LV ,
* then sets lock_type , sets lock_args , and clears system_id .
*
* Stage 2 :
* We get here , and can now set LV lock_args . This uses
* the standard code path for allocating LV locks in
* vg_write ( ) by setting LV lock_args to " pending " ,
* which tells vg_write ( ) to call lockd_init_lv ( )
* and sets the lv - > lock_args value before writing the VG .
*/
if ( ! strcmp ( lock_type , " sanlock " ) ) {
dm_list_iterate_items ( lvl , & vg - > lvs ) {
lv = lvl - > lv ;
if ( lockd_lv_uses_lock ( lv ) )
lv - > lock_args = " pending " ;
}
vg - > skip_validate_lock_args = 0 ;
}
return 1 ;
}
/* ... to none */
2018-12-21 22:04:35 +03:00
if ( lock_type & & ! strcmp ( lock_type , " none " ) ) {
2017-07-13 00:03:41 +03:00
vg - > lock_type = NULL ;
vg - > system_id = cmd - > system_id ? dm_pool_strdup ( vg - > vgmem , cmd - > system_id ) : NULL ;
return 1 ;
}
log_error ( " Cannot change to unknown lock type %s " , lock_type ) ;
return 0 ;
}
static int _vgchange_locktype_single ( struct cmd_context * cmd , const char * vg_name ,
struct volume_group * vg ,
struct processing_handle * handle )
{
2024-06-13 21:34:23 +03:00
int no_change = 0 ;
if ( ! _vgchange_locktype ( cmd , vg , & no_change ) )
2017-07-13 00:03:41 +03:00
return_ECMD_FAILED ;
2024-06-13 21:34:23 +03:00
if ( no_change )
return ECMD_PROCESSED ;
2017-07-13 00:03:41 +03:00
if ( ! vg_write ( vg ) | | ! vg_commit ( vg ) )
return_ECMD_FAILED ;
2018-10-25 18:51:25 +03:00
/*
* When init_vg_sanlock is called for vgcreate , the lockspace remains
* started and lvmlock remains active , but when called for
* vgchange - - locktype sanlock , the lockspace is not started so the
* lvmlock LV should be deactivated at the end . vg_write writes the
* new leases to lvmlock , so we need to wait until after vg_write to
* deactivate it .
*/
if ( vg - > lock_type & & ! strcmp ( vg - > lock_type , " sanlock " ) & &
2020-05-15 18:44:03 +03:00
( cmd - > command - > command_enum = = vgchange_locktype_CMD ) ) {
if ( ! deactivate_lv ( cmd , vg - > sanlock_lv ) ) {
log_error ( " Failed to deativate %s. " ,
display_lvname ( vg - > sanlock_lv ) ) ;
return ECMD_FAILED ;
}
}
2018-10-25 18:51:25 +03:00
2023-09-09 01:13:46 +03:00
log_print_unless_silent ( " Volume group \" %s \" successfully changed. " , vg - > name ) ;
2017-07-13 00:03:41 +03:00
return ECMD_PROCESSED ;
}
int vgchange_locktype_cmd ( struct cmd_context * cmd , int argc , char * * argv )
{
struct processing_handle * handle ;
const char * lock_type = arg_str_value ( cmd , locktype_ARG , NULL ) ;
const char * lockopt = arg_str_value ( cmd , lockopt_ARG , NULL ) ;
int ret ;
/*
* vgchange - - locktype none - - lockopt force VG
*
* This is a special / forced exception to change the lock type to none .
* It ' s needed for recovery cases and skips the normal steps of undoing
* the current lock type . It ' s a way to forcibly get access to a VG
* when the normal locking mechanisms are not working .
*
* It ignores : the current lvm locking config , lvmlockd , the state of
* the vg on other hosts , etc . It is meant to just remove any locking
* related metadata from the VG ( cluster / lock_type flags , lock_type ,
* lock_args ) .
*
* This can be necessary when manually recovering from certain failures .
* e . g . when a pv is lost containing the lvmlock lv ( holding sanlock
* leases ) , the vg lock_type needs to be changed to none , and then
* back to sanlock , which recreates the lvmlock lv and leases .
*
* Set lockd_gl_disable , lockd_vg_disable , lockd_lv_disable to
* disable locking . lockd_gl ( ) , lockd_vg ( ) and lockd_lv ( ) will
* just return success when they see the disable flag set .
*/
if ( lockopt & & ! strcmp ( lockopt , " force " ) ) {
2018-11-27 23:38:39 +03:00
if ( ! arg_is_set ( cmd , yes_ARG ) & &
2024-06-13 21:34:23 +03:00
yes_no_prompt ( " Forcibly change VG lock type to %s? [y/n]: " , lock_type ) = = ' n ' ) {
2017-07-13 00:03:41 +03:00
log_error ( " VG lock type not changed. " ) ;
return 0 ;
}
cmd - > lockd_gl_disable = 1 ;
cmd - > lockd_vg_disable = 1 ;
cmd - > lockd_lv_disable = 1 ;
cmd - > handles_missing_pvs = 1 ;
2018-06-13 23:30:28 +03:00
cmd - > force_access_clustered = 1 ;
2017-07-13 00:03:41 +03:00
goto process ;
}
if ( ! lvmlockd_use ( ) ) {
log_error ( " Using lock type requires lvmlockd. " ) ;
return 0 ;
}
/*
* This is a special case where taking the global lock is
* not needed to protect global state , because the change is
* only to an existing VG . But , taking the global lock ex is
* helpful in this case to trigger a global cache validation
* on other hosts , to cause them to see the new system_id or
* lock_type .
*/
locking: unify global lock for flock and lockd
There have been two file locks used to protect lvm
"global state": "ORPHANS" and "GLOBAL".
Commands that used the ORPHAN flock in exclusive mode:
pvcreate, pvremove, vgcreate, vgextend, vgremove,
vgcfgrestore
Commands that used the ORPHAN flock in shared mode:
vgimportclone, pvs, pvscan, pvresize, pvmove,
pvdisplay, pvchange, fullreport
Commands that used the GLOBAL flock in exclusive mode:
pvchange, pvscan, vgimportclone, vgscan
Commands that used the GLOBAL flock in shared mode:
pvscan --cache, pvs
The ORPHAN lock covers the important cases of serializing
the use of orphan PVs. It also partially covers the
reporting of orphan PVs (although not correctly as
explained below.)
The GLOBAL lock doesn't seem to have a clear purpose
(it may have eroded over time.)
Neither lock correctly protects the VG namespace, or
orphan PV properties.
To simplify and correct these issues, the two separate
flocks are combined into the one GLOBAL flock, and this flock
is used from the locking sites that are in place for the
lvmlockd global lock.
The logic behind the lvmlockd (distributed) global lock is
that any command that changes "global state" needs to take
the global lock in ex mode. Global state in lvm is: the list
of VG names, the set of orphan PVs, and any properties of
orphan PVs. Reading this global state can use the global lock
in sh mode to ensure it doesn't change while being reported.
The locking of global state now looks like:
lockd_global()
previously named lockd_gl(), acquires the distributed
global lock through lvmlockd. This is unchanged.
It serializes distributed lvm commands that are changing
global state. This is a no-op when lvmlockd is not in use.
lockf_global()
acquires an flock on a local file. It serializes local lvm
commands that are changing global state.
lock_global()
first calls lockf_global() to acquire the local flock for
global state, and if this succeeds, it calls lockd_global()
to acquire the distributed lock for global state.
Replace instances of lockd_gl() with lock_global(), so that the
existing sites for lvmlockd global state locking are now also
used for local file locking of global state. Remove the previous
file locking calls lock_vol(GLOBAL) and lock_vol(ORPHAN).
The following commands which change global state are now
serialized with the exclusive global flock:
pvchange (of orphan), pvresize (of orphan), pvcreate, pvremove,
vgcreate, vgextend, vgremove, vgreduce, vgrename,
vgcfgrestore, vgimportclone, vgmerge, vgsplit
Commands that use a shared flock to read global state (and will
be serialized against the prior list) are those that use
process_each functions that are based on processing a list of
all VG names, or all PVs. The list of all VGs or all PVs is
global state and the shared lock prevents those lists from
changing while the command is processing them.
The ORPHAN lock previously attempted to produce an accurate
listing of orphan PVs, but it was only acquired at the end of
the command during the fake vg_read of the fake orphan vg.
This is not when orphan PVs were determined; they were
determined by elimination beforehand by processing all real
VGs, and subtracting the PVs in the real VGs from the list
of all PVs that had been identified during the initial scan.
This is fixed by holding the single global lock in shared mode
while processing all VGs to determine the list of orphan PVs.
2019-04-18 23:01:19 +03:00
if ( ! lockd_global ( cmd , " ex " ) )
2017-07-13 00:03:41 +03:00
return 0 ;
process :
if ( ! ( handle = init_processing_handle ( cmd , NULL ) ) ) {
log_error ( " Failed to initialize processing handle. " ) ;
return ECMD_FAILED ;
}
ret = process_each_vg ( cmd , argc , argv , NULL , NULL , READ_FOR_UPDATE , 0 , handle , & _vgchange_locktype_single ) ;
destroy_processing_handle ( cmd , handle ) ;
return ret ;
}
2017-07-13 21:26:10 +03:00
static int _vgchange_lock_start_stop_single ( struct cmd_context * cmd , const char * vg_name ,
struct volume_group * vg ,
struct processing_handle * handle )
{
struct vgchange_params * vp = ( struct vgchange_params * ) handle - > custom_handle ;
if ( arg_is_set ( cmd , lockstart_ARG ) ) {
if ( ! _vgchange_lock_start ( cmd , vg , vp ) )
return_ECMD_FAILED ;
} else if ( arg_is_set ( cmd , lockstop_ARG ) ) {
if ( ! _vgchange_lock_stop ( cmd , vg ) )
return_ECMD_FAILED ;
}
return ECMD_PROCESSED ;
}
int vgchange_lock_start_stop_cmd ( struct cmd_context * cmd , int argc , char * * argv )
{
struct processing_handle * handle ;
struct vgchange_params vp = { 0 } ;
int ret ;
if ( ! lvmlockd_use ( ) ) {
log_error ( " Using lock start and lock stop requires lvmlockd. " ) ;
return 0 ;
}
if ( ! ( handle = init_processing_handle ( cmd , NULL ) ) ) {
log_error ( " Failed to initialize processing handle. " ) ;
return ECMD_FAILED ;
}
if ( arg_is_set ( cmd , lockstop_ARG ) )
cmd - > lockd_vg_default_sh = 1 ;
/*
* Starting lockspaces . For VGs not yet started , locks are not
* available to acquire , and for VGs already started , there ' s nothing
* to do , so disable VG locks . Try to acquire the global lock sh to
* validate the cache ( if no gl is available , lockd_gl will force a
* cache validation ) . If the global lock is available , it can be
* benficial to hold sh to serialize lock - start with vgremove of the
* same VG from another host .
*/
if ( arg_is_set ( cmd , lockstart_ARG ) ) {
cmd - > lockd_vg_disable = 1 ;
locking: unify global lock for flock and lockd
There have been two file locks used to protect lvm
"global state": "ORPHANS" and "GLOBAL".
Commands that used the ORPHAN flock in exclusive mode:
pvcreate, pvremove, vgcreate, vgextend, vgremove,
vgcfgrestore
Commands that used the ORPHAN flock in shared mode:
vgimportclone, pvs, pvscan, pvresize, pvmove,
pvdisplay, pvchange, fullreport
Commands that used the GLOBAL flock in exclusive mode:
pvchange, pvscan, vgimportclone, vgscan
Commands that used the GLOBAL flock in shared mode:
pvscan --cache, pvs
The ORPHAN lock covers the important cases of serializing
the use of orphan PVs. It also partially covers the
reporting of orphan PVs (although not correctly as
explained below.)
The GLOBAL lock doesn't seem to have a clear purpose
(it may have eroded over time.)
Neither lock correctly protects the VG namespace, or
orphan PV properties.
To simplify and correct these issues, the two separate
flocks are combined into the one GLOBAL flock, and this flock
is used from the locking sites that are in place for the
lvmlockd global lock.
The logic behind the lvmlockd (distributed) global lock is
that any command that changes "global state" needs to take
the global lock in ex mode. Global state in lvm is: the list
of VG names, the set of orphan PVs, and any properties of
orphan PVs. Reading this global state can use the global lock
in sh mode to ensure it doesn't change while being reported.
The locking of global state now looks like:
lockd_global()
previously named lockd_gl(), acquires the distributed
global lock through lvmlockd. This is unchanged.
It serializes distributed lvm commands that are changing
global state. This is a no-op when lvmlockd is not in use.
lockf_global()
acquires an flock on a local file. It serializes local lvm
commands that are changing global state.
lock_global()
first calls lockf_global() to acquire the local flock for
global state, and if this succeeds, it calls lockd_global()
to acquire the distributed lock for global state.
Replace instances of lockd_gl() with lock_global(), so that the
existing sites for lvmlockd global state locking are now also
used for local file locking of global state. Remove the previous
file locking calls lock_vol(GLOBAL) and lock_vol(ORPHAN).
The following commands which change global state are now
serialized with the exclusive global flock:
pvchange (of orphan), pvresize (of orphan), pvcreate, pvremove,
vgcreate, vgextend, vgremove, vgreduce, vgrename,
vgcfgrestore, vgimportclone, vgmerge, vgsplit
Commands that use a shared flock to read global state (and will
be serialized against the prior list) are those that use
process_each functions that are based on processing a list of
all VG names, or all PVs. The list of all VGs or all PVs is
global state and the shared lock prevents those lists from
changing while the command is processing them.
The ORPHAN lock previously attempted to produce an accurate
listing of orphan PVs, but it was only acquired at the end of
the command during the fake vg_read of the fake orphan vg.
This is not when orphan PVs were determined; they were
determined by elimination beforehand by processing all real
VGs, and subtracting the PVs in the real VGs from the list
of all PVs that had been identified during the initial scan.
This is fixed by holding the single global lock in shared mode
while processing all VGs to determine the list of orphan PVs.
2019-04-18 23:01:19 +03:00
if ( ! lockd_global ( cmd , " sh " ) )
2023-09-09 01:13:46 +03:00
log_debug ( " No global lock for lock start. " ) ;
2017-07-13 21:26:10 +03:00
/* Disable the lockd_gl in process_each_vg. */
cmd - > lockd_gl_disable = 1 ;
2019-06-21 21:37:11 +03:00
} else {
/* If the VG was started when it was exported, allow it to be stopped. */
cmd - > include_exported_vgs = 1 ;
2017-07-13 21:26:10 +03:00
}
handle - > custom_handle = & vp ;
2019-06-21 21:37:11 +03:00
ret = process_each_vg ( cmd , argc , argv , NULL , NULL , 0 , 0 , handle , & _vgchange_lock_start_stop_single ) ;
2017-07-13 21:26:10 +03:00
/* Wait for lock-start ops that were initiated in vgchange_lockstart. */
if ( arg_is_set ( cmd , lockstart_ARG ) & & vp . lock_start_count ) {
const char * start_opt = arg_str_value ( cmd , lockopt_ARG , NULL ) ;
locking: unify global lock for flock and lockd
There have been two file locks used to protect lvm
"global state": "ORPHANS" and "GLOBAL".
Commands that used the ORPHAN flock in exclusive mode:
pvcreate, pvremove, vgcreate, vgextend, vgremove,
vgcfgrestore
Commands that used the ORPHAN flock in shared mode:
vgimportclone, pvs, pvscan, pvresize, pvmove,
pvdisplay, pvchange, fullreport
Commands that used the GLOBAL flock in exclusive mode:
pvchange, pvscan, vgimportclone, vgscan
Commands that used the GLOBAL flock in shared mode:
pvscan --cache, pvs
The ORPHAN lock covers the important cases of serializing
the use of orphan PVs. It also partially covers the
reporting of orphan PVs (although not correctly as
explained below.)
The GLOBAL lock doesn't seem to have a clear purpose
(it may have eroded over time.)
Neither lock correctly protects the VG namespace, or
orphan PV properties.
To simplify and correct these issues, the two separate
flocks are combined into the one GLOBAL flock, and this flock
is used from the locking sites that are in place for the
lvmlockd global lock.
The logic behind the lvmlockd (distributed) global lock is
that any command that changes "global state" needs to take
the global lock in ex mode. Global state in lvm is: the list
of VG names, the set of orphan PVs, and any properties of
orphan PVs. Reading this global state can use the global lock
in sh mode to ensure it doesn't change while being reported.
The locking of global state now looks like:
lockd_global()
previously named lockd_gl(), acquires the distributed
global lock through lvmlockd. This is unchanged.
It serializes distributed lvm commands that are changing
global state. This is a no-op when lvmlockd is not in use.
lockf_global()
acquires an flock on a local file. It serializes local lvm
commands that are changing global state.
lock_global()
first calls lockf_global() to acquire the local flock for
global state, and if this succeeds, it calls lockd_global()
to acquire the distributed lock for global state.
Replace instances of lockd_gl() with lock_global(), so that the
existing sites for lvmlockd global state locking are now also
used for local file locking of global state. Remove the previous
file locking calls lock_vol(GLOBAL) and lock_vol(ORPHAN).
The following commands which change global state are now
serialized with the exclusive global flock:
pvchange (of orphan), pvresize (of orphan), pvcreate, pvremove,
vgcreate, vgextend, vgremove, vgreduce, vgrename,
vgcfgrestore, vgimportclone, vgmerge, vgsplit
Commands that use a shared flock to read global state (and will
be serialized against the prior list) are those that use
process_each functions that are based on processing a list of
all VG names, or all PVs. The list of all VGs or all PVs is
global state and the shared lock prevents those lists from
changing while the command is processing them.
The ORPHAN lock previously attempted to produce an accurate
listing of orphan PVs, but it was only acquired at the end of
the command during the fake vg_read of the fake orphan vg.
This is not when orphan PVs were determined; they were
determined by elimination beforehand by processing all real
VGs, and subtracting the PVs in the real VGs from the list
of all PVs that had been identified during the initial scan.
This is fixed by holding the single global lock in shared mode
while processing all VGs to determine the list of orphan PVs.
2019-04-18 23:01:19 +03:00
if ( ! lockd_global ( cmd , " un " ) )
2017-07-13 21:26:10 +03:00
stack ;
if ( ! start_opt | | ! strcmp ( start_opt , " auto " ) ) {
if ( vp . lock_start_sanlock )
log_print_unless_silent ( " Starting locking. Waiting for sanlock may take 20 sec to 3 min... " ) ;
else
log_print_unless_silent ( " Starting locking. Waiting until locks are ready... " ) ;
lockd_start_wait ( cmd ) ;
} else if ( ! strcmp ( start_opt , " nowait " ) | | ! strcmp ( start_opt , " autonowait " ) ) {
log_print_unless_silent ( " Starting locking. VG can only be read until locks are ready. " ) ;
}
}
destroy_processing_handle ( cmd , handle ) ;
return ret ;
}
2017-07-13 21:43:39 +03:00
static int _vgchange_systemid_single ( struct cmd_context * cmd , const char * vg_name ,
struct volume_group * vg ,
struct processing_handle * handle )
{
2022-07-08 00:06:01 +03:00
if ( arg_is_set ( cmd , majoritypvs_ARG ) ) {
struct pv_list * pvl ;
int missing_pvs = 0 ;
int found_pvs = 0 ;
dm_list_iterate_items ( pvl , & vg - > pvs ) {
if ( ! pvl - > pv - > dev )
missing_pvs + + ;
else
found_pvs + + ;
}
if ( found_pvs < = missing_pvs ) {
2023-09-09 01:13:46 +03:00
log_error ( " Cannot change system ID without the majority of PVs (found %d of %d). " ,
2022-07-08 00:06:01 +03:00
found_pvs , found_pvs + missing_pvs ) ;
return ECMD_FAILED ;
}
}
2017-07-13 21:43:39 +03:00
if ( ! _vgchange_system_id ( cmd , vg ) )
return_ECMD_FAILED ;
if ( ! vg_write ( vg ) | | ! vg_commit ( vg ) )
return_ECMD_FAILED ;
2023-09-09 01:13:46 +03:00
log_print_unless_silent ( " Volume group \" %s \" successfully changed. " , vg - > name ) ;
2017-07-13 21:43:39 +03:00
return ECMD_PROCESSED ;
}
int vgchange_systemid_cmd ( struct cmd_context * cmd , int argc , char * * argv )
{
struct processing_handle * handle ;
int ret ;
/*
* This is a special case where taking the global lock is
* not needed to protect global state , because the change is
* only to an existing VG . But , taking the global lock ex is
* helpful in this case to trigger a global cache validation
* on other hosts , to cause them to see the new system_id or
* lock_type .
*/
locking: unify global lock for flock and lockd
There have been two file locks used to protect lvm
"global state": "ORPHANS" and "GLOBAL".
Commands that used the ORPHAN flock in exclusive mode:
pvcreate, pvremove, vgcreate, vgextend, vgremove,
vgcfgrestore
Commands that used the ORPHAN flock in shared mode:
vgimportclone, pvs, pvscan, pvresize, pvmove,
pvdisplay, pvchange, fullreport
Commands that used the GLOBAL flock in exclusive mode:
pvchange, pvscan, vgimportclone, vgscan
Commands that used the GLOBAL flock in shared mode:
pvscan --cache, pvs
The ORPHAN lock covers the important cases of serializing
the use of orphan PVs. It also partially covers the
reporting of orphan PVs (although not correctly as
explained below.)
The GLOBAL lock doesn't seem to have a clear purpose
(it may have eroded over time.)
Neither lock correctly protects the VG namespace, or
orphan PV properties.
To simplify and correct these issues, the two separate
flocks are combined into the one GLOBAL flock, and this flock
is used from the locking sites that are in place for the
lvmlockd global lock.
The logic behind the lvmlockd (distributed) global lock is
that any command that changes "global state" needs to take
the global lock in ex mode. Global state in lvm is: the list
of VG names, the set of orphan PVs, and any properties of
orphan PVs. Reading this global state can use the global lock
in sh mode to ensure it doesn't change while being reported.
The locking of global state now looks like:
lockd_global()
previously named lockd_gl(), acquires the distributed
global lock through lvmlockd. This is unchanged.
It serializes distributed lvm commands that are changing
global state. This is a no-op when lvmlockd is not in use.
lockf_global()
acquires an flock on a local file. It serializes local lvm
commands that are changing global state.
lock_global()
first calls lockf_global() to acquire the local flock for
global state, and if this succeeds, it calls lockd_global()
to acquire the distributed lock for global state.
Replace instances of lockd_gl() with lock_global(), so that the
existing sites for lvmlockd global state locking are now also
used for local file locking of global state. Remove the previous
file locking calls lock_vol(GLOBAL) and lock_vol(ORPHAN).
The following commands which change global state are now
serialized with the exclusive global flock:
pvchange (of orphan), pvresize (of orphan), pvcreate, pvremove,
vgcreate, vgextend, vgremove, vgreduce, vgrename,
vgcfgrestore, vgimportclone, vgmerge, vgsplit
Commands that use a shared flock to read global state (and will
be serialized against the prior list) are those that use
process_each functions that are based on processing a list of
all VG names, or all PVs. The list of all VGs or all PVs is
global state and the shared lock prevents those lists from
changing while the command is processing them.
The ORPHAN lock previously attempted to produce an accurate
listing of orphan PVs, but it was only acquired at the end of
the command during the fake vg_read of the fake orphan vg.
This is not when orphan PVs were determined; they were
determined by elimination beforehand by processing all real
VGs, and subtracting the PVs in the real VGs from the list
of all PVs that had been identified during the initial scan.
This is fixed by holding the single global lock in shared mode
while processing all VGs to determine the list of orphan PVs.
2019-04-18 23:01:19 +03:00
if ( ! lockd_global ( cmd , " ex " ) )
2017-07-13 21:43:39 +03:00
return 0 ;
if ( ! ( handle = init_processing_handle ( cmd , NULL ) ) ) {
log_error ( " Failed to initialize processing handle. " ) ;
return ECMD_FAILED ;
}
2022-07-08 00:06:01 +03:00
if ( arg_is_set ( cmd , majoritypvs_ARG ) )
cmd - > handles_missing_pvs = 1 ;
2017-07-13 21:43:39 +03:00
ret = process_each_vg ( cmd , argc , argv , NULL , NULL , READ_FOR_UPDATE , 0 , handle , & _vgchange_systemid_single ) ;
destroy_processing_handle ( cmd , handle ) ;
return ret ;
}