2001-10-12 01:35:55 +04:00
/*
2008-01-30 17:00:02 +03:00
* Copyright ( C ) 2001 - 2004 Sistina Software , Inc . All rights reserved .
2009-02-09 12:45:49 +03:00
* Copyright ( C ) 2004 - 2009 Red Hat , Inc . All rights reserved .
2001-10-12 01:35:55 +04:00
*
2004-03-30 23:35:44 +04:00
* This file is part of LVM2 .
2001-10-12 01:35:55 +04:00
*
2004-03-30 23:35:44 +04:00
* This copyrighted material is made available to anyone wishing to use ,
* modify , copy , or redistribute it subject to the terms and conditions
2007-08-21 00:55:30 +04:00
* of the GNU Lesser General Public License v .2 .1 .
2001-10-12 01:35:55 +04:00
*
2007-08-21 00:55:30 +04:00
* You should have received a copy of the GNU Lesser General Public License
2004-03-30 23:35:44 +04:00
* along with this program ; if not , write to the Free Software Foundation ,
2016-01-21 13:49:46 +03:00
* Inc . , 51 Franklin Street , Fifth Floor , Boston , MA 02110 - 1301 USA
2001-10-12 01:35:55 +04:00
*/
# include "tools.h"
2016-05-25 23:04:30 +03:00
struct vgreduce_params {
int force ;
int fixed ;
int already_consistent ;
} ;
2008-09-19 10:42:00 +04:00
static int _remove_pv ( struct volume_group * vg , struct pv_list * pvl , int silent )
2003-01-18 00:04:26 +03:00
{
2010-07-09 19:34:40 +04:00
char uuid [ 64 ] __attribute__ ( ( aligned ( 8 ) ) ) ;
2003-01-18 00:04:26 +03:00
if ( vg - > pv_count = = 1 ) {
2016-11-25 16:08:39 +03:00
log_error ( " Volume Groups must always contain at least one PV. " ) ;
2003-01-18 00:04:26 +03:00
return 0 ;
}
2008-01-30 16:19:47 +03:00
if ( ! id_write_format ( & pvl - > pv - > id , uuid , sizeof ( uuid ) ) )
return_0 ;
2003-01-18 00:04:26 +03:00
2016-11-25 16:08:39 +03:00
log_verbose ( " Removing PV with UUID %s from VG %s. " , uuid , vg - > name ) ;
2003-01-18 00:04:26 +03:00
if ( pvl - > pv - > pe_alloc_count ) {
2008-09-19 10:42:00 +04:00
if ( ! silent )
log_error ( " LVs still present on PV with UUID %s: "
2016-11-25 16:08:39 +03:00
" Can't remove from VG %s. " , uuid , vg - > name ) ;
2003-01-18 00:04:26 +03:00
return 0 ;
}
vg - > free_count - = pvl - > pv - > pe_count ;
vg - > extent_count - = pvl - > pv - > pe_count ;
2010-04-13 21:26:03 +04:00
del_pvl_from_vgs ( vg , pvl ) ;
2011-03-11 17:56:56 +03:00
free_pv_fid ( pvl - > pv ) ;
2003-01-18 00:04:26 +03:00
return 1 ;
}
2008-09-19 10:42:00 +04:00
static int _consolidate_vg ( struct cmd_context * cmd , struct volume_group * vg )
{
struct pv_list * pvl ;
struct lv_list * lvl ;
int r = 1 ;
2008-11-04 01:14:30 +03:00
dm_list_iterate_items ( lvl , & vg - > lvs )
2016-03-02 22:59:03 +03:00
if ( lv_is_partial ( lvl - > lv ) ) {
2008-09-19 10:42:00 +04:00
log_warn ( " WARNING: Partial LV %s needs to be repaired "
" or removed. " , lvl - > lv - > name ) ;
r = 0 ;
}
if ( ! r ) {
cmd - > handles_missing_pvs = 1 ;
2012-02-15 16:30:46 +04:00
log_error ( " There are still partial LVs in VG %s. " , vg - > name ) ;
log_error ( " To remove them unconditionally use: vgreduce --removemissing --force. " ) ;
2020-01-15 05:44:30 +03:00
log_error ( " To remove them unconditionally from mirror LVs use: vgreduce "
" --removemissing --mirrorsonly --force. " ) ;
2016-11-25 16:17:56 +03:00
log_warn ( " WARNING: Proceeding to remove empty missing PVs. " ) ;
2008-09-19 10:42:00 +04:00
}
2008-11-04 01:14:30 +03:00
dm_list_iterate_items ( pvl , & vg - > pvs ) {
2010-03-16 17:37:38 +03:00
if ( pvl - > pv - > dev & & ! is_missing_pv ( pvl - > pv ) )
2008-09-19 10:42:00 +04:00
continue ;
if ( r & & ! _remove_pv ( vg , pvl , 0 ) )
return_0 ;
}
return r ;
}
2003-01-18 00:04:26 +03:00
static int _make_vg_consistent ( struct cmd_context * cmd , struct volume_group * vg )
{
2011-05-07 19:52:16 +04:00
struct lv_list * lvl ;
2003-01-18 00:04:26 +03:00
struct logical_volume * lv ;
2005-12-21 21:51:50 +03:00
2011-05-07 19:52:16 +04:00
cmd - > partial_activation = 1 ;
2005-12-21 21:51:50 +03:00
2011-05-07 19:52:16 +04:00
restart :
vg_mark_partial_lvs ( vg , 1 ) ;
2006-05-11 23:01:11 +04:00
2011-05-07 19:52:16 +04:00
dm_list_iterate_items ( lvl , & vg - > lvs ) {
lv = lvl - > lv ;
2006-05-11 23:01:11 +04:00
2011-05-07 19:52:16 +04:00
/* Are any segments of this LV on missing PVs? */
2016-03-02 22:59:03 +03:00
if ( lv_is_partial ( lv ) ) {
2013-02-21 00:52:46 +04:00
if ( seg_is_raid ( first_seg ( lv ) ) ) {
if ( ! lv_raid_remove_missing ( lv ) )
return_0 ;
goto restart ;
}
2014-09-16 03:13:46 +04:00
if ( lv_is_mirror ( lv ) ) {
2011-05-07 19:52:16 +04:00
if ( ! mirror_remove_missing ( cmd , lv , 1 ) )
2008-01-30 16:19:47 +03:00
return_0 ;
2011-05-07 19:52:16 +04:00
goto restart ;
2006-01-04 21:09:52 +03:00
}
2005-12-21 21:51:50 +03:00
2016-06-22 00:24:52 +03:00
if ( arg_is_set ( cmd , mirrorsonly_ARG ) & & ! lv_is_mirrored ( lv ) ) {
2011-05-07 19:52:16 +04:00
log_error ( " Non-mirror-image LV %s found: can't remove. " , lv - > name ) ;
continue ;
2005-12-21 21:51:50 +03:00
}
2011-05-07 19:52:16 +04:00
if ( ! lv_is_visible ( lv ) )
continue ;
2016-11-25 16:17:56 +03:00
log_warn ( " WARNING: Removing partial LV %s. " , display_lvname ( lv ) ) ;
2012-02-27 14:06:58 +04:00
if ( ! lv_remove_with_dependencies ( cmd , lv , DONT_PROMPT , 0 ) )
2011-05-07 19:52:16 +04:00
return_0 ;
goto restart ;
2005-12-21 21:51:50 +03:00
}
}
2011-05-07 19:52:16 +04:00
_consolidate_vg ( cmd , vg ) ;
2003-01-18 00:04:26 +03:00
return 1 ;
}
2002-11-18 17:04:08 +03:00
/* Or take pv_name instead? */
2003-01-18 00:04:26 +03:00
static int _vgreduce_single ( struct cmd_context * cmd , struct volume_group * vg ,
2006-05-10 01:23:51 +04:00
struct physical_volume * pv ,
2014-11-27 17:02:13 +03:00
struct processing_handle * handle __attribute__ ( ( unused ) ) )
2002-11-18 17:04:08 +03:00
{
2014-10-07 03:53:56 +04:00
int r ;
2012-06-21 14:43:31 +04:00
2019-06-21 21:37:11 +03:00
if ( ! vg_check_status ( vg , LVM_WRITE | RESIZEABLE_VG ) )
2014-10-07 03:53:56 +04:00
return ECMD_FAILED ;
r = vgreduce_single ( cmd , vg , pv , 1 ) ;
2013-09-04 02:31:45 +04:00
if ( ! r )
2002-11-18 17:04:08 +03:00
return ECMD_FAILED ;
2013-09-04 02:31:45 +04:00
return ECMD_PROCESSED ;
}
2002-11-18 17:04:08 +03:00
2016-05-25 23:04:30 +03:00
static int _vgreduce_repair_single ( struct cmd_context * cmd , const char * vg_name ,
struct volume_group * vg , struct processing_handle * handle )
{
struct vgreduce_params * vp = ( struct vgreduce_params * ) handle - > custom_handle ;
if ( ! vg_missing_pv_count ( vg ) ) {
vp - > already_consistent = 1 ;
return ECMD_PROCESSED ;
}
if ( ! archive ( vg ) )
return_ECMD_FAILED ;
if ( vp - > force ) {
if ( ! _make_vg_consistent ( cmd , vg ) )
return_ECMD_FAILED ;
vp - > fixed = 1 ;
} else
vp - > fixed = _consolidate_vg ( cmd , vg ) ;
if ( ! vg_write ( vg ) | | ! vg_commit ( vg ) ) {
log_error ( " Failed to write out a consistent VG for %s " , vg_name ) ;
return ECMD_FAILED ;
}
backup ( vg ) ;
return ECMD_PROCESSED ;
}
2002-02-11 23:50:53 +03:00
int vgreduce ( struct cmd_context * cmd , int argc , char * * argv )
2001-10-12 01:35:55 +04:00
{
2016-05-25 23:04:30 +03:00
struct processing_handle * handle ;
struct vgreduce_params vp = { 0 } ;
2011-02-18 17:47:28 +03:00
const char * vg_name ;
2016-06-22 00:24:52 +03:00
int repairing = arg_is_set ( cmd , removemissing_ARG ) ;
2009-06-15 18:47:39 +04:00
int saved_ignore_suspended_devices = ignore_suspended_devices ( ) ;
2016-05-25 23:04:30 +03:00
int ret ;
2001-10-12 01:35:55 +04:00
2008-09-19 10:42:00 +04:00
if ( ! argc & & ! repairing ) {
2001-10-12 01:35:55 +04:00
log_error ( " Please give volume group name and "
2016-11-25 16:08:39 +03:00
" physical volume paths. " ) ;
2001-10-12 01:35:55 +04:00
return EINVALID_CMD_LINE ;
}
2008-09-19 10:42:00 +04:00
2012-02-08 15:41:18 +04:00
if ( ! argc ) { /* repairing */
2016-11-25 16:08:39 +03:00
log_error ( " Please give volume group name. " ) ;
2003-01-18 00:04:26 +03:00
return EINVALID_CMD_LINE ;
}
2016-06-22 00:24:52 +03:00
if ( arg_is_set ( cmd , mirrorsonly_ARG ) & & ! repairing ) {
2016-11-25 16:08:39 +03:00
log_error ( " --mirrorsonly requires --removemissing. " ) ;
2005-12-22 00:21:45 +03:00
return EINVALID_CMD_LINE ;
}
2016-06-22 00:24:52 +03:00
if ( argc = = 1 & & ! arg_is_set ( cmd , all_ARG ) & & ! repairing ) {
2016-11-25 16:08:39 +03:00
log_error ( " Please enter physical volume paths or option -a. " ) ;
2001-10-12 01:35:55 +04:00
return EINVALID_CMD_LINE ;
}
2016-06-22 00:24:52 +03:00
if ( argc > 1 & & arg_is_set ( cmd , all_ARG ) ) {
2001-10-12 01:35:55 +04:00
log_error ( " Option -a and physical volume paths mutually "
2016-11-25 16:08:39 +03:00
" exclusive. " ) ;
2001-10-12 01:35:55 +04:00
return EINVALID_CMD_LINE ;
}
2008-09-19 10:42:00 +04:00
if ( argc > 1 & & repairing ) {
2016-11-25 16:08:39 +03:00
log_error ( " Please only specify the volume group. " ) ;
2003-01-18 00:04:26 +03:00
return EINVALID_CMD_LINE ;
}
2007-03-09 23:47:41 +03:00
vg_name = skip_dev_dir ( cmd , argv [ 0 ] , NULL ) ;
2001-10-12 01:35:55 +04:00
argv + + ;
argc - - ;
locking: unify global lock for flock and lockd
There have been two file locks used to protect lvm
"global state": "ORPHANS" and "GLOBAL".
Commands that used the ORPHAN flock in exclusive mode:
pvcreate, pvremove, vgcreate, vgextend, vgremove,
vgcfgrestore
Commands that used the ORPHAN flock in shared mode:
vgimportclone, pvs, pvscan, pvresize, pvmove,
pvdisplay, pvchange, fullreport
Commands that used the GLOBAL flock in exclusive mode:
pvchange, pvscan, vgimportclone, vgscan
Commands that used the GLOBAL flock in shared mode:
pvscan --cache, pvs
The ORPHAN lock covers the important cases of serializing
the use of orphan PVs. It also partially covers the
reporting of orphan PVs (although not correctly as
explained below.)
The GLOBAL lock doesn't seem to have a clear purpose
(it may have eroded over time.)
Neither lock correctly protects the VG namespace, or
orphan PV properties.
To simplify and correct these issues, the two separate
flocks are combined into the one GLOBAL flock, and this flock
is used from the locking sites that are in place for the
lvmlockd global lock.
The logic behind the lvmlockd (distributed) global lock is
that any command that changes "global state" needs to take
the global lock in ex mode. Global state in lvm is: the list
of VG names, the set of orphan PVs, and any properties of
orphan PVs. Reading this global state can use the global lock
in sh mode to ensure it doesn't change while being reported.
The locking of global state now looks like:
lockd_global()
previously named lockd_gl(), acquires the distributed
global lock through lvmlockd. This is unchanged.
It serializes distributed lvm commands that are changing
global state. This is a no-op when lvmlockd is not in use.
lockf_global()
acquires an flock on a local file. It serializes local lvm
commands that are changing global state.
lock_global()
first calls lockf_global() to acquire the local flock for
global state, and if this succeeds, it calls lockd_global()
to acquire the distributed lock for global state.
Replace instances of lockd_gl() with lock_global(), so that the
existing sites for lvmlockd global state locking are now also
used for local file locking of global state. Remove the previous
file locking calls lock_vol(GLOBAL) and lock_vol(ORPHAN).
The following commands which change global state are now
serialized with the exclusive global flock:
pvchange (of orphan), pvresize (of orphan), pvcreate, pvremove,
vgcreate, vgextend, vgremove, vgreduce, vgrename,
vgcfgrestore, vgimportclone, vgmerge, vgsplit
Commands that use a shared flock to read global state (and will
be serialized against the prior list) are those that use
process_each functions that are based on processing a list of
all VG names, or all PVs. The list of all VGs or all PVs is
global state and the shared lock prevents those lists from
changing while the command is processing them.
The ORPHAN lock previously attempted to produce an accurate
listing of orphan PVs, but it was only acquired at the end of
the command during the fake vg_read of the fake orphan vg.
This is not when orphan PVs were determined; they were
determined by elimination beforehand by processing all real
VGs, and subtracting the PVs in the real VGs from the list
of all PVs that had been identified during the initial scan.
This is fixed by holding the single global lock in shared mode
while processing all VGs to determine the list of orphan PVs.
2019-04-18 23:01:19 +03:00
if ( ! lock_global ( cmd , " ex " ) )
2016-06-17 20:10:43 +03:00
return_ECMD_FAILED ;
2018-12-07 23:35:22 +03:00
clear_hint_file ( cmd ) ;
2016-05-31 13:24:05 +03:00
if ( ! ( handle = init_processing_handle ( cmd , NULL ) ) ) {
2016-05-25 23:04:30 +03:00
log_error ( " Failed to initialize processing handle. " ) ;
return ECMD_FAILED ;
}
handle - > custom_handle = & vp ;
if ( ! repairing ) {
2014-10-07 03:53:56 +04:00
/* FIXME: Pass private struct through to all these functions */
/* and update in batch afterwards? */
2018-06-11 23:08:23 +03:00
locking: unify global lock for flock and lockd
There have been two file locks used to protect lvm
"global state": "ORPHANS" and "GLOBAL".
Commands that used the ORPHAN flock in exclusive mode:
pvcreate, pvremove, vgcreate, vgextend, vgremove,
vgcfgrestore
Commands that used the ORPHAN flock in shared mode:
vgimportclone, pvs, pvscan, pvresize, pvmove,
pvdisplay, pvchange, fullreport
Commands that used the GLOBAL flock in exclusive mode:
pvchange, pvscan, vgimportclone, vgscan
Commands that used the GLOBAL flock in shared mode:
pvscan --cache, pvs
The ORPHAN lock covers the important cases of serializing
the use of orphan PVs. It also partially covers the
reporting of orphan PVs (although not correctly as
explained below.)
The GLOBAL lock doesn't seem to have a clear purpose
(it may have eroded over time.)
Neither lock correctly protects the VG namespace, or
orphan PV properties.
To simplify and correct these issues, the two separate
flocks are combined into the one GLOBAL flock, and this flock
is used from the locking sites that are in place for the
lvmlockd global lock.
The logic behind the lvmlockd (distributed) global lock is
that any command that changes "global state" needs to take
the global lock in ex mode. Global state in lvm is: the list
of VG names, the set of orphan PVs, and any properties of
orphan PVs. Reading this global state can use the global lock
in sh mode to ensure it doesn't change while being reported.
The locking of global state now looks like:
lockd_global()
previously named lockd_gl(), acquires the distributed
global lock through lvmlockd. This is unchanged.
It serializes distributed lvm commands that are changing
global state. This is a no-op when lvmlockd is not in use.
lockf_global()
acquires an flock on a local file. It serializes local lvm
commands that are changing global state.
lock_global()
first calls lockf_global() to acquire the local flock for
global state, and if this succeeds, it calls lockd_global()
to acquire the distributed lock for global state.
Replace instances of lockd_gl() with lock_global(), so that the
existing sites for lvmlockd global state locking are now also
used for local file locking of global state. Remove the previous
file locking calls lock_vol(GLOBAL) and lock_vol(ORPHAN).
The following commands which change global state are now
serialized with the exclusive global flock:
pvchange (of orphan), pvresize (of orphan), pvcreate, pvremove,
vgcreate, vgextend, vgremove, vgreduce, vgrename,
vgcfgrestore, vgimportclone, vgmerge, vgsplit
Commands that use a shared flock to read global state (and will
be serialized against the prior list) are those that use
process_each functions that are based on processing a list of
all VG names, or all PVs. The list of all VGs or all PVs is
global state and the shared lock prevents those lists from
changing while the command is processing them.
The ORPHAN lock previously attempted to produce an accurate
listing of orphan PVs, but it was only acquired at the end of
the command during the fake vg_read of the fake orphan vg.
This is not when orphan PVs were determined; they were
determined by elimination beforehand by processing all real
VGs, and subtracting the PVs in the real VGs from the list
of all PVs that had been identified during the initial scan.
This is fixed by holding the single global lock in shared mode
while processing all VGs to determine the list of orphan PVs.
2019-04-18 23:01:19 +03:00
ret = process_each_pv ( cmd , argc , argv , vg_name , 0 , READ_FOR_UPDATE ,
2018-06-11 23:08:23 +03:00
handle , _vgreduce_single ) ;
2016-05-25 23:04:30 +03:00
goto out ;
}
2014-10-07 03:53:56 +04:00
2016-05-25 23:04:30 +03:00
/*
* VG repair ( removemissing )
*/
2002-02-11 18:42:34 +03:00
2016-05-25 23:04:30 +03:00
vp . force = arg_count ( cmd , force_ARG ) ;
2009-06-15 18:47:39 +04:00
2016-05-25 23:04:30 +03:00
cmd - > handles_missing_pvs = 1 ;
2001-10-12 01:35:55 +04:00
2016-05-25 23:04:30 +03:00
init_ignore_suspended_devices ( 1 ) ;
2006-09-02 05:18:17 +04:00
2019-06-21 21:37:11 +03:00
process_each_vg ( cmd , 0 , NULL , vg_name , NULL , READ_FOR_UPDATE ,
2016-05-03 12:46:28 +03:00
0 , handle , & _vgreduce_repair_single ) ;
2012-03-30 18:59:35 +04:00
2016-05-25 23:04:30 +03:00
if ( vp . already_consistent ) {
2016-11-25 16:08:39 +03:00
log_print_unless_silent ( " Volume group \" %s \" is already consistent. " , vg_name ) ;
2014-10-07 03:53:56 +04:00
ret = ECMD_PROCESSED ;
2016-05-25 23:04:30 +03:00
} else if ( vp . fixed ) {
2016-11-25 16:08:39 +03:00
log_print_unless_silent ( " Wrote out consistent volume group %s. " , vg_name ) ;
2014-10-07 03:53:56 +04:00
ret = ECMD_PROCESSED ;
} else
ret = ECMD_FAILED ;
2009-04-10 14:01:38 +04:00
out :
2009-06-15 18:47:39 +04:00
init_ignore_suspended_devices ( saved_ignore_suspended_devices ) ;
2016-06-17 14:29:33 +03:00
destroy_processing_handle ( cmd , handle ) ;
2002-02-11 18:42:34 +03:00
return ret ;
2001-10-12 01:35:55 +04:00
}