2010-09-30 17:16:55 +04:00
/*
2010-10-01 00:47:18 +04:00
* Copyright ( C ) 2001 - 2004 Sistina Software , Inc . All rights reserved .
* Copyright ( C ) 2004 - 2010 Red Hat , Inc . All rights reserved .
2010-09-30 17:16:55 +04:00
*
* This file is part of LVM2 .
*
* This copyrighted material is made available to anyone wishing to use ,
* modify , copy , or redistribute it subject to the terms and conditions
* of the GNU Lesser General Public License v .2 .1 .
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program ; if not , write to the Free Software Foundation ,
2016-01-21 13:49:46 +03:00
* Inc . , 51 Franklin Street , Fifth Floor , Boston , MA 02110 - 1301 USA
2010-09-30 17:16:55 +04:00
*/
2018-05-14 12:30:20 +03:00
# include "lib/misc/lib.h"
# include "lib/metadata/metadata.h"
# include "lib/display/display.h"
# include "lib/activate/activate.h"
# include "lib/commands/toolcontext.h"
# include "lib/format_text/archiver.h"
2011-03-10 15:43:29 +03:00
struct volume_group * alloc_vg ( const char * pool_name , struct cmd_context * cmd ,
const char * vg_name )
{
struct dm_pool * vgmem ;
struct volume_group * vg ;
if ( ! ( vgmem = dm_pool_create ( pool_name , VG_MEMPOOL_CHUNK ) ) | |
! ( vg = dm_pool_zalloc ( vgmem , sizeof ( * vg ) ) ) ) {
log_error ( " Failed to allocate volume group structure " ) ;
if ( vgmem )
dm_pool_destroy ( vgmem ) ;
return NULL ;
}
if ( vg_name & & ! ( vg - > name = dm_pool_strdup ( vgmem , vg_name ) ) ) {
log_error ( " Failed to allocate VG name. " ) ;
dm_pool_destroy ( vgmem ) ;
return NULL ;
}
2015-03-04 04:00:51 +03:00
vg - > system_id = " " ;
2011-03-10 15:43:29 +03:00
vg - > cmd = cmd ;
vg - > vgmem = vgmem ;
vg - > alloc = ALLOC_NORMAL ;
2021-03-07 17:33:50 +03:00
if ( ! ( vg - > hostnames = dm_hash_create ( 14 ) ) ) {
2012-01-19 19:31:45 +04:00
log_error ( " Failed to allocate VG hostname hashtable. " ) ;
dm_pool_destroy ( vgmem ) ;
return NULL ;
}
2011-03-10 15:43:29 +03:00
dm_list_init ( & vg - > pvs ) ;
2016-02-16 21:43:24 +03:00
dm_list_init ( & vg - > pv_write_list ) ;
2011-03-10 15:43:29 +03:00
dm_list_init ( & vg - > lvs ) ;
2016-03-01 17:18:42 +03:00
dm_list_init ( & vg - > historical_lvs ) ;
2011-03-10 15:43:29 +03:00
dm_list_init ( & vg - > tags ) ;
2015-03-23 15:32:00 +03:00
dm_list_init ( & vg - > removed_lvs ) ;
2016-03-01 17:26:57 +03:00
dm_list_init ( & vg - > removed_historical_lvs ) ;
2011-03-10 15:43:29 +03:00
dm_list_init ( & vg - > removed_pvs ) ;
2021-03-05 18:21:50 +03:00
dm_list_init ( & vg - > msg_list ) ;
2011-03-10 15:43:29 +03:00
2020-08-28 20:35:25 +03:00
log_debug_mem ( " Allocated VG %s at %p. " , vg - > name ? : " <no name> " , ( void * ) vg ) ;
2011-08-11 21:24:23 +04:00
2011-03-10 15:43:29 +03:00
return vg ;
}
2011-08-11 00:25:29 +04:00
static void _free_vg ( struct volume_group * vg )
{
vg_set_fid ( vg , NULL ) ;
if ( vg - > cmd & & vg - > vgmem = = vg - > cmd - > mem ) {
log_error ( INTERNAL_ERROR " global memory pool used for VG %s " ,
vg - > name ) ;
return ;
}
2020-08-28 20:35:25 +03:00
log_debug_mem ( " Freeing VG %s at %p. " , vg - > name ? : " <no name> " , ( void * ) vg ) ;
2011-08-11 21:24:23 +04:00
2021-03-06 01:04:44 +03:00
if ( vg - > committed_cft )
config_destroy ( vg - > committed_cft ) ;
2012-01-19 19:31:45 +04:00
dm_hash_destroy ( vg - > hostnames ) ;
2011-08-11 00:25:29 +04:00
dm_pool_destroy ( vg - > vgmem ) ;
}
void release_vg ( struct volume_group * vg )
{
improve reading and repairing vg metadata
The fact that vg repair is implemented as a part of vg read
has led to a messy and complicated implementation of vg_read,
and limited and uncontrolled repair capability. This splits
read and repair apart.
Summary
-------
- take all kinds of various repairs out of vg_read
- vg_read no longer writes anything
- vg_read now simply reads and returns vg metadata
- vg_read ignores bad or old copies of metadata
- vg_read proceeds with a single good copy of metadata
- improve error checks and handling when reading
- keep track of bad (corrupt) copies of metadata in lvmcache
- keep track of old (seqno) copies of metadata in lvmcache
- keep track of outdated PVs in lvmcache
- vg_write will do basic repairs
- new command vgck --updatemetdata will do all repairs
Details
-------
- In scan, do not delete dev from lvmcache if reading/processing fails;
the dev is still present, and removing it makes it look like the dev
is not there. Records are now kept about the problems with each PV
so they be fixed/repaired in the appropriate places.
- In scan, record a bad mda on failure, and delete the mda from
mda in use list so it will not be used by vg_read or vg_write,
only by repair.
- In scan, succeed if any good mda on a device is found, instead of
failing if any is bad. The bad/old copies of metadata should not
interfere with normal usage while good copies can be used.
- In scan, add a record of old mdas in lvmcache for later, do not repair
them while reading, and do not let them prevent us from finding and
using a good copy of metadata from elsewhere. One result is that
"inconsistent metadata" is no longer a read error, but instead a
record in lvmcache that can be addressed separate from the read.
- Treat a dev with no good mdas like a dev with no mdas, which is an
existing case we already handle.
- Don't use a fake vg "handle" for returning an error from vg_read,
or the vg_read_error function for getting that error number;
just return null if the vg cannot be read or used, and an error_flags
arg with flags set for the specific kind of error (which can be used
later for determining the kind of repair.)
- Saving an original copy of the vg metadata, for purposes of reverting
a write, is now done explicitly in vg_read instead of being hidden in
the vg_make_handle function.
- When a vg is not accessible due to "access restrictions" but is
otherwise fine, return the vg through the new error_vg arg so that
process_each_pv can skip the PVs in the VG while processing.
(This is a temporary accomodation for the way process_each_pv
tracks which devs have been looked at, and can be dropped later
when process_each_pv implementation dev tracking is changed.)
- vg_read does not try to fix or recover a vg, but now just reads the
metadata, checks access restrictions and returns it.
(Checking access restrictions might be better done outside of vg_read,
but this is a later improvement.)
- _vg_read now simply makes one attempt to read metadata from
each mda, and uses the most recent copy to return to the caller
in the form of a 'vg' struct.
(bad mdas were excluded during the scan and are not retried)
(old mdas were not excluded during scan and are retried here)
- vg_read uses _vg_read to get the latest copy of metadata from mdas,
and then makes various checks against it to produce warnings,
and to check if VG access is allowed (access restrictions include:
writable, foreign, shared, clustered, missing pvs).
- Things that were previously silently/automatically written by vg_read
that are now done by vg_write, based on the records made in lvmcache
during the scan and read:
. clearing the missing flag
. updating old copies of metadata
. clearing outdated pvs
. updating pv header flags
- Bad/corrupt metadata are now repaired; they were not before.
Test changes
------------
- A read command no longer writes the VG to repair it, so add a write
command to do a repair.
(inconsistent-metadata, unlost-pv)
- When a missing PV is removed from a VG, and then the device is
enabled again, vgck --updatemetadata is needed to clear the
outdated PV before it can be used again, where it wasn't before.
(lvconvert-repair-policy, lvconvert-repair-raid, lvconvert-repair,
mirror-vgreduce-removemissing, pv-ext-flags, unlost-pv)
Reading bad/old metadata
------------------------
- "bad metadata": the mda_header or metadata text has invalid fields
or can't be parsed by lvm. This is a form of corruption that would
not be caused by known failure scenarios. A checksum error is
typically included among the errors reported.
- "old metadata": a valid copy of the metadata that has a smaller seqno
than other copies of the metadata. This can happen if the device
failed, or io failed, or lvm failed while commiting new metadata
to all the metadata areas. Old metadata on a PV that has been
removed from the VG is the "outdated" case below.
When a VG has some PVs with bad/old metadata, lvm can simply ignore
the bad/old copies, and use a good copy. This is why there are
multiple copies of the metadata -- so it's available even when some
of the copies cannot be used. The bad/old copies do not have to be
repaired before the VG can be used (the repair can happen later.)
A PV with no good copies of the metadata simply falls back to being
treated like a PV with no mdas; a common and harmless configuration.
When bad/old metadata exists, lvm warns the user about it, and
suggests repairing it using a new metadata repair command.
Bad metadata in particular is something that users will want to
investigate and repair themselves, since it should not happen and
may indicate some other problem that needs to be fixed.
PVs with bad/old metadata are not the same as missing devices.
Missing devices will block various kinds of VG modification or
activation, but bad/old metadata will not.
Previously, lvm would attempt to repair bad/old metadata whenever
it was read. This was unnecessary since lvm does not require every
copy of the metadata to be used. It would also hide potential
problems that should be investigated by the user. It was also
dangerous in cases where the VG was on shared storage. The user
is now allowed to investigate potential problems and decide how
and when to repair them.
Repairing bad/old metadata
--------------------------
When label scan sees bad metadata in an mda, that mda is removed
from the lvmcache info->mdas list. This means that vg_read will
skip it, and not attempt to read/process it again. If it was
the only in-use mda on a PV, that PV is treated like a PV with
no mdas. It also means that vg_write will also skip the bad mda,
and not attempt to write new metadata to it. The only way to
repair bad metadata is with the metadata repair command.
When label scan sees old metadata in an mda, that mda is kept
in the lvmcache info->mdas list. This means that vg_read will
read/process it again, and likely see the same mismatch with
the other copies of the metadata. Like the label_scan, the
vg_read will simply ignore the old copy of the metadata and
use the latest copy. If the command is modifying the vg
(e.g. lvcreate), then vg_write, which writes new metadata to
every mda on info->mdas, will write the new metadata to the
mda that had the old version. If successful, this will resolve
the old metadata problem (without needing to run a metadata
repair command.)
Outdated PVs
------------
An outdated PV is a PV that has an old copy of VG metadata
that shows it is a member of the VG, but the latest copy of
the VG metadata does not include this PV. This happens if
the PV is disconnected, vgreduce --removemissing is run to
remove the PV from the VG, then the PV is reconnected.
In this case, the outdated PV needs have its outdated metadata
removed and the PV used flag needs to be cleared. This repair
will be done by the subsequent repair command. It is also done
if vgremove is run on the VG.
MISSING PVs
-----------
When a device is missing, most commands will refuse to modify
the VG. This is the simple case. More complicated is when
a command is allowed to modify the VG while it is missing a
device.
When a VG is written while a device is missing for one of it's PVs,
the VG metadata is written to disk with the MISSING flag on the PV
with the missing device. When the VG is next used, it is treated
as if the PV with the MISSING flag still has a missing device, even
if that device has reappeared.
If all LVs that were using a PV with the MISSING flag are removed
or repaired so that the MISSING PV is no longer used, then the
next time the VG metadata is written, the MISSING flag will be
dropped.
Alternative methods of clearing the MISSING flag are:
vgreduce --removemissing will remove PVs with missing devices,
or PVs with the MISSING flag where the device has reappeared.
vgextend --restoremissing will clear the MISSING flag on PVs
where the device has reappeared, allowing the VG to be used
normally. This must be done with caution since the reappeared
device may have old data that is inconsistent with data on other PVs.
Bad mda repair
--------------
The new command:
vgck --updatemetadata VG
first uses vg_write to repair old metadata, and other basic
issues mentioned above (old metadata, outdated PVs, pv_header
flags, MISSING_PV flags). It will also go further and repair
bad metadata:
. text metadata that has a bad checksum
. text metadata that is not parsable
. corrupt mda_header checksum and version fields
(To keep a clean diff, #if 0 is added around functions that
are replaced by new code. These commented functions are
removed by the following commit.)
2019-05-24 20:04:37 +03:00
if ( ! vg | | is_orphan_vg ( vg - > name ) )
2011-08-11 00:25:29 +04:00
return ;
2015-11-25 01:29:18 +03:00
release_vg ( vg - > vg_committed ) ;
2014-02-22 04:44:21 +04:00
release_vg ( vg - > vg_precommitted ) ;
2011-08-11 00:25:29 +04:00
_free_vg ( vg ) ;
}
2010-09-30 17:16:55 +04:00
2012-02-13 15:03:59 +04:00
/*
* FIXME out of place , but the main ( cmd ) pool has been already
* destroyed and touching the fid ( also via release_vg ) will crash the
* program
*
* For now quick wrapper to allow destroy of orphan vg
*/
void free_orphan_vg ( struct volume_group * vg )
{
_free_vg ( vg ) ;
}
2016-02-08 14:53:54 +03:00
int link_lv_to_vg ( struct volume_group * vg , struct logical_volume * lv )
{
struct lv_list * lvl ;
if ( vg_max_lv_reached ( vg ) )
stack ;
if ( ! ( lvl = dm_pool_zalloc ( vg - > vgmem , sizeof ( * lvl ) ) ) )
return_0 ;
lvl - > lv = lv ;
lv - > vg = vg ;
dm_list_add ( & vg - > lvs , & lvl - > list ) ;
lv - > status & = ~ LV_REMOVED ;
return 1 ;
}
int unlink_lv_from_vg ( struct logical_volume * lv )
{
struct lv_list * lvl ;
if ( ! ( lvl = find_lv_in_vg ( lv - > vg , lv - > name ) ) )
return_0 ;
dm_list_move ( & lv - > vg - > removed_lvs , & lvl - > list ) ;
lv - > status | = LV_REMOVED ;
return 1 ;
}
int vg_max_lv_reached ( struct volume_group * vg )
{
if ( ! vg - > max_lv )
return 0 ;
if ( vg - > max_lv > vg_visible_lvs ( vg ) )
return 0 ;
log_verbose ( " Maximum number of logical volumes (%u) reached "
" in volume group %s " , vg - > max_lv , vg - > name ) ;
return 1 ;
}
2010-09-30 18:08:33 +04:00
char * vg_fmt_dup ( const struct volume_group * vg )
{
if ( ! vg - > fid | | ! vg - > fid - > fmt )
return NULL ;
return dm_pool_strdup ( vg - > vgmem , vg - > fid - > fmt - > name ) ;
}
char * vg_name_dup ( const struct volume_group * vg )
{
return dm_pool_strdup ( vg - > vgmem , vg - > name ) ;
}
char * vg_system_id_dup ( const struct volume_group * vg )
{
2018-04-28 00:22:46 +03:00
return dm_pool_strdup ( vg - > vgmem , vg - > system_id ? : " " ) ;
2010-09-30 18:08:33 +04:00
}
2015-03-05 23:00:44 +03:00
char * vg_lock_type_dup ( const struct volume_group * vg )
{
return dm_pool_strdup ( vg - > vgmem , vg - > lock_type ? : vg - > lock_type ? : " " ) ;
}
char * vg_lock_args_dup ( const struct volume_group * vg )
{
return dm_pool_strdup ( vg - > vgmem , vg - > lock_args ? : vg - > lock_args ? : " " ) ;
}
2010-09-30 18:07:47 +04:00
char * vg_uuid_dup ( const struct volume_group * vg )
{
return id_format_and_copy ( vg - > vgmem , & vg - > id ) ;
}
2010-09-30 18:08:19 +04:00
char * vg_tags_dup ( const struct volume_group * vg )
{
return tags_format_and_copy ( vg - > vgmem , & vg - > tags ) ;
}
2010-09-30 17:16:55 +04:00
uint32_t vg_seqno ( const struct volume_group * vg )
{
return vg - > seqno ;
}
uint64_t vg_status ( const struct volume_group * vg )
{
return vg - > status ;
}
uint64_t vg_size ( const struct volume_group * vg )
{
return ( uint64_t ) vg - > extent_count * vg - > extent_size ;
}
uint64_t vg_free ( const struct volume_group * vg )
{
return ( uint64_t ) vg - > free_count * vg - > extent_size ;
}
uint64_t vg_extent_size ( const struct volume_group * vg )
{
return ( uint64_t ) vg - > extent_size ;
}
uint64_t vg_extent_count ( const struct volume_group * vg )
{
return ( uint64_t ) vg - > extent_count ;
}
uint64_t vg_free_count ( const struct volume_group * vg )
{
return ( uint64_t ) vg - > free_count ;
}
uint64_t vg_pv_count ( const struct volume_group * vg )
{
return ( uint64_t ) vg - > pv_count ;
}
uint64_t vg_max_pv ( const struct volume_group * vg )
{
return ( uint64_t ) vg - > max_pv ;
}
uint64_t vg_max_lv ( const struct volume_group * vg )
{
return ( uint64_t ) vg - > max_lv ;
}
unsigned snapshot_count ( const struct volume_group * vg )
{
struct lv_list * lvl ;
unsigned num_snapshots = 0 ;
dm_list_iterate_items ( lvl , & vg - > lvs )
if ( lv_is_cow ( lvl - > lv ) )
num_snapshots + + ;
return num_snapshots ;
}
unsigned vg_visible_lvs ( const struct volume_group * vg )
{
struct lv_list * lvl ;
unsigned lv_count = 0 ;
dm_list_iterate_items ( lvl , & vg - > lvs ) {
if ( lv_is_visible ( lvl - > lv ) )
lv_count + + ;
}
return lv_count ;
}
uint32_t vg_mda_count ( const struct volume_group * vg )
{
return dm_list_size ( & vg - > fid - > metadata_areas_in_use ) +
dm_list_size ( & vg - > fid - > metadata_areas_ignored ) ;
}
uint32_t vg_mda_used_count ( const struct volume_group * vg )
{
uint32_t used_count = 0 ;
struct metadata_area * mda ;
/*
* Ignored mdas could be on either list - the reason being the state
* may have changed from ignored to un - ignored and we need to write
* the state to disk .
*/
dm_list_iterate_items ( mda , & vg - > fid - > metadata_areas_in_use )
if ( ! mda_is_ignored ( mda ) )
used_count + + ;
return used_count ;
}
uint32_t vg_mda_copies ( const struct volume_group * vg )
{
return vg - > mda_copies ;
}
uint64_t vg_mda_size ( const struct volume_group * vg )
{
return find_min_mda_size ( & vg - > fid - > metadata_areas_in_use ) ;
}
uint64_t vg_mda_free ( const struct volume_group * vg )
{
uint64_t freespace = UINT64_MAX , mda_free ;
struct metadata_area * mda ;
dm_list_iterate_items ( mda , & vg - > fid - > metadata_areas_in_use ) {
if ( ! mda - > ops - > mda_free_sectors )
continue ;
mda_free = mda - > ops - > mda_free_sectors ( mda ) ;
if ( mda_free < freespace )
freespace = mda_free ;
}
if ( freespace = = UINT64_MAX )
freespace = UINT64_C ( 0 ) ;
return freespace ;
}
int vg_set_mda_copies ( struct volume_group * vg , uint32_t mda_copies )
{
vg - > mda_copies = mda_copies ;
/* FIXME Use log_verbose when this is due to specific cmdline request. */
2013-01-08 02:30:29 +04:00
log_debug_metadata ( " Setting mda_copies to % " PRIu32 " for VG %s " ,
mda_copies , vg - > name ) ;
2010-09-30 17:16:55 +04:00
return 1 ;
}
2013-07-02 16:34:52 +04:00
char * vg_profile_dup ( const struct volume_group * vg )
{
const char * profile_name = vg - > profile ? vg - > profile - > name : " " ;
return dm_pool_strdup ( vg - > vgmem , profile_name ) ;
}
2010-09-30 17:16:55 +04:00
static int _recalc_extents ( uint32_t * extents , const char * desc1 ,
2014-10-14 21:12:15 +04:00
const char * desc2 , uint32_t old_extent_size ,
uint32_t new_extent_size )
2010-09-30 17:16:55 +04:00
{
2014-10-14 21:12:15 +04:00
uint64_t size = ( uint64_t ) old_extent_size * ( * extents ) ;
2010-09-30 17:16:55 +04:00
2014-10-14 21:12:15 +04:00
if ( size % new_extent_size ) {
2010-09-30 17:16:55 +04:00
log_error ( " New size % " PRIu64 " for %s%s not an exact number "
" of new extents. " , size , desc1 , desc2 ) ;
return 0 ;
}
2014-10-14 21:12:15 +04:00
size / = new_extent_size ;
2010-09-30 17:16:55 +04:00
2011-11-05 02:49:53 +04:00
if ( size > MAX_EXTENT_COUNT ) {
2010-09-30 17:16:55 +04:00
log_error ( " New extent count % " PRIu64 " for %s%s exceeds "
" 32 bits. " , size , desc1 , desc2 ) ;
return 0 ;
}
* extents = ( uint32_t ) size ;
return 1 ;
}
2014-10-14 21:12:15 +04:00
int vg_check_new_extent_size ( const struct format_type * fmt , uint32_t new_extent_size )
2010-09-30 17:16:55 +04:00
{
2014-10-14 21:12:15 +04:00
if ( ! new_extent_size ) {
log_error ( " Physical extent size may not be zero " ) ;
return 0 ;
}
if ( ( fmt - > features & FMT_NON_POWER2_EXTENTS ) ) {
2016-06-30 19:59:44 +03:00
if ( ! is_power_of_2 ( new_extent_size ) & &
2014-10-14 21:12:15 +04:00
( new_extent_size % MIN_NON_POWER2_EXTENT_SIZE ) ) {
log_error ( " Physical Extent size must be a multiple of %s when not a power of 2. " ,
display_size ( fmt - > cmd , ( uint64_t ) MIN_NON_POWER2_EXTENT_SIZE ) ) ;
return 0 ;
}
return 1 ;
}
/* Apply original format1 restrictions */
2016-06-30 19:59:44 +03:00
if ( ! is_power_of_2 ( new_extent_size ) ) {
2014-10-14 21:12:15 +04:00
log_error ( " Metadata format only supports Physical Extent sizes that are powers of 2. " ) ;
return 0 ;
}
if ( new_extent_size > MAX_PE_SIZE | | new_extent_size < MIN_PE_SIZE ) {
log_error ( " Extent size must be between %s and %s " ,
display_size ( fmt - > cmd , ( uint64_t ) MIN_PE_SIZE ) ,
display_size ( fmt - > cmd , ( uint64_t ) MAX_PE_SIZE ) ) ;
return 0 ;
}
if ( new_extent_size % MIN_PE_SIZE ) {
log_error ( " Extent size must be multiple of %s " ,
display_size ( fmt - > cmd , ( uint64_t ) MIN_PE_SIZE ) ) ;
return 0 ;
}
return 1 ;
}
int vg_set_extent_size ( struct volume_group * vg , uint32_t new_extent_size )
{
uint32_t old_extent_size = vg - > extent_size ;
2010-09-30 17:16:55 +04:00
struct pv_list * pvl ;
struct lv_list * lvl ;
struct physical_volume * pv ;
struct logical_volume * lv ;
struct lv_segment * seg ;
struct pv_segment * pvseg ;
uint32_t s ;
if ( ! vg_is_resizeable ( vg ) ) {
log_error ( " Volume group \" %s \" must be resizeable "
" to change PE size " , vg - > name ) ;
return 0 ;
}
2014-10-14 21:12:15 +04:00
if ( new_extent_size = = vg - > extent_size )
2010-09-30 17:16:55 +04:00
return 1 ;
2014-10-14 21:12:15 +04:00
if ( ! vg_check_new_extent_size ( vg - > fid - > fmt , new_extent_size ) )
return_0 ;
2010-09-30 17:16:55 +04:00
2014-10-14 21:12:15 +04:00
if ( new_extent_size > vg - > extent_size ) {
if ( ( uint64_t ) vg_size ( vg ) % new_extent_size ) {
2010-09-30 17:16:55 +04:00
/* FIXME Adjust used PV sizes instead */
log_error ( " New extent size is not a perfect fit " ) ;
return 0 ;
}
}
2014-10-14 21:12:15 +04:00
vg - > extent_size = new_extent_size ;
2010-09-30 17:16:55 +04:00
if ( vg - > fid - > fmt - > ops - > vg_setup & &
! vg - > fid - > fmt - > ops - > vg_setup ( vg - > fid , vg ) )
return_0 ;
2014-10-14 21:12:15 +04:00
if ( ! _recalc_extents ( & vg - > extent_count , vg - > name , " " , old_extent_size ,
new_extent_size ) )
2010-09-30 17:16:55 +04:00
return_0 ;
if ( ! _recalc_extents ( & vg - > free_count , vg - > name , " free space " ,
2014-10-14 21:12:15 +04:00
old_extent_size , new_extent_size ) )
2010-09-30 17:16:55 +04:00
return_0 ;
/* foreach PV */
dm_list_iterate_items ( pvl , & vg - > pvs ) {
pv = pvl - > pv ;
2014-10-14 21:12:15 +04:00
pv - > pe_size = new_extent_size ;
2010-09-30 17:16:55 +04:00
if ( ! _recalc_extents ( & pv - > pe_count , pv_dev_name ( pv ) , " " ,
2014-10-14 21:12:15 +04:00
old_extent_size , new_extent_size ) )
2010-09-30 17:16:55 +04:00
return_0 ;
if ( ! _recalc_extents ( & pv - > pe_alloc_count , pv_dev_name ( pv ) ,
2014-10-14 21:12:15 +04:00
" allocated space " , old_extent_size , new_extent_size ) )
2010-09-30 17:16:55 +04:00
return_0 ;
/* foreach free PV Segment */
dm_list_iterate_items ( pvseg , & pv - > segments ) {
if ( pvseg_is_allocated ( pvseg ) )
continue ;
if ( ! _recalc_extents ( & pvseg - > pe , pv_dev_name ( pv ) ,
2014-10-14 21:12:15 +04:00
" PV segment start " , old_extent_size ,
new_extent_size ) )
2010-09-30 17:16:55 +04:00
return_0 ;
if ( ! _recalc_extents ( & pvseg - > len , pv_dev_name ( pv ) ,
2014-10-14 21:12:15 +04:00
" PV segment length " , old_extent_size ,
new_extent_size ) )
2010-09-30 17:16:55 +04:00
return_0 ;
}
}
/* foreach LV */
dm_list_iterate_items ( lvl , & vg - > lvs ) {
lv = lvl - > lv ;
2014-10-14 21:12:15 +04:00
if ( ! _recalc_extents ( & lv - > le_count , lv - > name , " " , old_extent_size ,
new_extent_size ) )
2010-09-30 17:16:55 +04:00
return_0 ;
dm_list_iterate_items ( seg , & lv - > segments ) {
if ( ! _recalc_extents ( & seg - > le , lv - > name ,
2014-10-14 21:12:15 +04:00
" segment start " , old_extent_size ,
new_extent_size ) )
2010-09-30 17:16:55 +04:00
return_0 ;
if ( ! _recalc_extents ( & seg - > len , lv - > name ,
2014-10-14 21:12:15 +04:00
" segment length " , old_extent_size ,
new_extent_size ) )
2010-09-30 17:16:55 +04:00
return_0 ;
if ( ! _recalc_extents ( & seg - > area_len , lv - > name ,
2014-10-14 21:12:15 +04:00
" area length " , old_extent_size ,
new_extent_size ) )
2010-09-30 17:16:55 +04:00
return_0 ;
if ( ! _recalc_extents ( & seg - > extents_copied , lv - > name ,
2014-10-14 21:12:15 +04:00
" extents moved " , old_extent_size ,
new_extent_size ) )
2010-09-30 17:16:55 +04:00
return_0 ;
2018-07-01 13:03:23 +03:00
if ( ! _recalc_extents ( & seg - > vdo_pool_virtual_extents , lv - > name ,
" virtual extents " , old_extent_size ,
new_extent_size ) )
return_0 ;
2010-09-30 17:16:55 +04:00
/* foreach area */
for ( s = 0 ; s < seg - > area_count ; s + + ) {
switch ( seg_type ( seg , s ) ) {
case AREA_PV :
if ( ! _recalc_extents
( & seg_pe ( seg , s ) ,
lv - > name ,
2014-10-14 21:12:15 +04:00
" pvseg start " , old_extent_size ,
new_extent_size ) )
2010-09-30 17:16:55 +04:00
return_0 ;
if ( ! _recalc_extents
( & seg_pvseg ( seg , s ) - > len ,
lv - > name ,
2014-10-14 21:12:15 +04:00
" pvseg length " , old_extent_size ,
new_extent_size ) )
2010-09-30 17:16:55 +04:00
return_0 ;
break ;
case AREA_LV :
if ( ! _recalc_extents
( & seg_le ( seg , s ) , lv - > name ,
2014-10-14 21:12:15 +04:00
" area start " , old_extent_size ,
new_extent_size ) )
2010-09-30 17:16:55 +04:00
return_0 ;
break ;
case AREA_UNASSIGNED :
log_error ( " Unassigned area %u found in "
" segment " , s ) ;
return 0 ;
}
}
}
}
return 1 ;
}
int vg_set_max_lv ( struct volume_group * vg , uint32_t max_lv )
{
if ( ! vg_is_resizeable ( vg ) ) {
log_error ( " Volume group \" %s \" must be resizeable "
" to change MaxLogicalVolume " , vg - > name ) ;
return 0 ;
}
if ( ! ( vg - > fid - > fmt - > features & FMT_UNLIMITED_VOLS ) ) {
if ( ! max_lv )
max_lv = 255 ;
else if ( max_lv > 255 ) {
log_error ( " MaxLogicalVolume limit is 255 " ) ;
return 0 ;
}
}
if ( max_lv & & max_lv < vg_visible_lvs ( vg ) ) {
log_error ( " MaxLogicalVolume is less than the current number "
" %d of LVs for %s " , vg_visible_lvs ( vg ) ,
vg - > name ) ;
return 0 ;
}
vg - > max_lv = max_lv ;
return 1 ;
}
int vg_set_max_pv ( struct volume_group * vg , uint32_t max_pv )
{
if ( ! vg_is_resizeable ( vg ) ) {
log_error ( " Volume group \" %s \" must be resizeable "
" to change MaxPhysicalVolumes " , vg - > name ) ;
return 0 ;
}
if ( ! ( vg - > fid - > fmt - > features & FMT_UNLIMITED_VOLS ) ) {
if ( ! max_pv )
max_pv = 255 ;
else if ( max_pv > 255 ) {
log_error ( " MaxPhysicalVolume limit is 255 " ) ;
return 0 ;
}
}
if ( max_pv & & max_pv < vg - > pv_count ) {
log_error ( " MaxPhysicalVolumes is less than the current number "
" %d of PVs for \" %s \" " , vg - > pv_count ,
vg - > name ) ;
return 0 ;
}
vg - > max_pv = max_pv ;
return 1 ;
}
int vg_set_alloc_policy ( struct volume_group * vg , alloc_policy_t alloc )
{
if ( alloc = = ALLOC_INHERIT ) {
log_error ( " Volume Group allocation policy cannot inherit "
" from anything " ) ;
return 0 ;
}
if ( alloc = = vg - > alloc )
return 1 ;
vg - > alloc = alloc ;
return 1 ;
}
2014-10-24 21:29:04 +04:00
/* The input string has already been validated. */
int vg_set_system_id ( struct volume_group * vg , const char * system_id )
{
2015-02-24 02:03:52 +03:00
if ( ! system_id | | ! * system_id ) {
2014-10-24 21:29:04 +04:00
vg - > system_id = NULL ;
return 1 ;
}
if ( ! ( vg - > system_id = dm_pool_strdup ( vg - > vgmem , system_id ) ) ) {
2015-02-24 02:03:52 +03:00
log_error ( " Failed to allocate memory for system_id in vg_set_system_id. " ) ;
2014-10-24 21:29:04 +04:00
return 0 ;
}
2015-02-24 02:03:52 +03:00
2014-10-24 21:29:04 +04:00
return 1 ;
}
2015-03-05 23:00:44 +03:00
int vg_set_lock_type ( struct volume_group * vg , const char * lock_type )
{
if ( ! lock_type )
lock_type = " none " ;
if ( ! ( vg - > lock_type = dm_pool_strdup ( vg - > vgmem , lock_type ) ) ) {
log_error ( " vg_set_lock_type %s no mem " , lock_type ) ;
return 0 ;
}
return 1 ;
}
2010-09-30 17:52:55 +04:00
char * vg_attr_dup ( struct dm_pool * mem , const struct volume_group * vg )
{
char * repstr ;
if ( ! ( repstr = dm_pool_zalloc ( mem , 7 ) ) ) {
log_error ( " dm_pool_alloc failed " ) ;
return NULL ;
}
2010-09-30 18:07:19 +04:00
repstr [ 0 ] = ( vg - > status & LVM_WRITE ) ? ' w ' : ' r ' ;
repstr [ 1 ] = ( vg_is_resizeable ( vg ) ) ? ' z ' : ' - ' ;
repstr [ 2 ] = ( vg_is_exported ( vg ) ) ? ' x ' : ' - ' ;
repstr [ 3 ] = ( vg_missing_pv_count ( vg ) ) ? ' p ' : ' - ' ;
2010-09-30 17:52:55 +04:00
repstr [ 4 ] = alloc_policy_char ( vg - > alloc ) ;
2015-03-05 23:00:44 +03:00
if ( vg_is_clustered ( vg ) )
repstr [ 5 ] = ' c ' ;
2018-06-01 18:04:54 +03:00
else if ( vg_is_shared ( vg ) )
2015-03-05 23:00:44 +03:00
repstr [ 5 ] = ' s ' ;
else
repstr [ 5 ] = ' - ' ;
2010-09-30 17:52:55 +04:00
return repstr ;
}
2013-09-04 02:31:45 +04:00
int vgreduce_single ( struct cmd_context * cmd , struct volume_group * vg ,
2013-09-04 03:07:43 +04:00
struct physical_volume * pv , int commit )
2013-09-04 02:31:45 +04:00
{
struct pv_list * pvl ;
struct volume_group * orphan_vg = NULL ;
int r = 0 ;
const char * name = pv_dev_name ( pv ) ;
if ( ! vg ) {
log_error ( INTERNAL_ERROR " VG is NULL. " ) ;
return r ;
}
2022-02-23 00:03:11 +03:00
if ( ! pv - > dev | | dm_list_empty ( & pv - > dev - > aliases ) ) {
log_error ( " No device found for PV. " ) ;
return r ;
}
2018-06-11 23:08:23 +03:00
log_debug ( " vgreduce_single VG %s PV %s " , vg - > name , pv_dev_name ( pv ) ) ;
2013-09-04 02:31:45 +04:00
if ( pv_pe_alloc_count ( pv ) ) {
log_error ( " Physical volume \" %s \" still in use " , name ) ;
return r ;
}
if ( vg - > pv_count = = 1 ) {
log_error ( " Can't remove final physical volume \" %s \" from "
" volume group \" %s \" " , name , vg - > name ) ;
return r ;
}
pvl = find_pv_in_vg ( vg , name ) ;
log_verbose ( " Removing \" %s \" from volume group \" %s \" " , name , vg - > name ) ;
if ( pvl )
del_pvl_from_vgs ( vg , pvl ) ;
pv - > vg_name = vg - > fid - > fmt - > orphan_vg_name ;
pv - > status = ALLOCATABLE_PV ;
if ( ! dev_get_size ( pv_dev ( pv ) , & pv - > size ) ) {
log_error ( " %s: Couldn't get size. " , pv_dev_name ( pv ) ) ;
goto bad ;
}
vg - > free_count - = pv_pe_count ( pv ) - pv_pe_alloc_count ( pv ) ;
vg - > extent_count - = pv_pe_count ( pv ) ;
2018-06-11 23:08:23 +03:00
/* FIXME: we don't need to vg_read the orphan vg here */
improve reading and repairing vg metadata
The fact that vg repair is implemented as a part of vg read
has led to a messy and complicated implementation of vg_read,
and limited and uncontrolled repair capability. This splits
read and repair apart.
Summary
-------
- take all kinds of various repairs out of vg_read
- vg_read no longer writes anything
- vg_read now simply reads and returns vg metadata
- vg_read ignores bad or old copies of metadata
- vg_read proceeds with a single good copy of metadata
- improve error checks and handling when reading
- keep track of bad (corrupt) copies of metadata in lvmcache
- keep track of old (seqno) copies of metadata in lvmcache
- keep track of outdated PVs in lvmcache
- vg_write will do basic repairs
- new command vgck --updatemetdata will do all repairs
Details
-------
- In scan, do not delete dev from lvmcache if reading/processing fails;
the dev is still present, and removing it makes it look like the dev
is not there. Records are now kept about the problems with each PV
so they be fixed/repaired in the appropriate places.
- In scan, record a bad mda on failure, and delete the mda from
mda in use list so it will not be used by vg_read or vg_write,
only by repair.
- In scan, succeed if any good mda on a device is found, instead of
failing if any is bad. The bad/old copies of metadata should not
interfere with normal usage while good copies can be used.
- In scan, add a record of old mdas in lvmcache for later, do not repair
them while reading, and do not let them prevent us from finding and
using a good copy of metadata from elsewhere. One result is that
"inconsistent metadata" is no longer a read error, but instead a
record in lvmcache that can be addressed separate from the read.
- Treat a dev with no good mdas like a dev with no mdas, which is an
existing case we already handle.
- Don't use a fake vg "handle" for returning an error from vg_read,
or the vg_read_error function for getting that error number;
just return null if the vg cannot be read or used, and an error_flags
arg with flags set for the specific kind of error (which can be used
later for determining the kind of repair.)
- Saving an original copy of the vg metadata, for purposes of reverting
a write, is now done explicitly in vg_read instead of being hidden in
the vg_make_handle function.
- When a vg is not accessible due to "access restrictions" but is
otherwise fine, return the vg through the new error_vg arg so that
process_each_pv can skip the PVs in the VG while processing.
(This is a temporary accomodation for the way process_each_pv
tracks which devs have been looked at, and can be dropped later
when process_each_pv implementation dev tracking is changed.)
- vg_read does not try to fix or recover a vg, but now just reads the
metadata, checks access restrictions and returns it.
(Checking access restrictions might be better done outside of vg_read,
but this is a later improvement.)
- _vg_read now simply makes one attempt to read metadata from
each mda, and uses the most recent copy to return to the caller
in the form of a 'vg' struct.
(bad mdas were excluded during the scan and are not retried)
(old mdas were not excluded during scan and are retried here)
- vg_read uses _vg_read to get the latest copy of metadata from mdas,
and then makes various checks against it to produce warnings,
and to check if VG access is allowed (access restrictions include:
writable, foreign, shared, clustered, missing pvs).
- Things that were previously silently/automatically written by vg_read
that are now done by vg_write, based on the records made in lvmcache
during the scan and read:
. clearing the missing flag
. updating old copies of metadata
. clearing outdated pvs
. updating pv header flags
- Bad/corrupt metadata are now repaired; they were not before.
Test changes
------------
- A read command no longer writes the VG to repair it, so add a write
command to do a repair.
(inconsistent-metadata, unlost-pv)
- When a missing PV is removed from a VG, and then the device is
enabled again, vgck --updatemetadata is needed to clear the
outdated PV before it can be used again, where it wasn't before.
(lvconvert-repair-policy, lvconvert-repair-raid, lvconvert-repair,
mirror-vgreduce-removemissing, pv-ext-flags, unlost-pv)
Reading bad/old metadata
------------------------
- "bad metadata": the mda_header or metadata text has invalid fields
or can't be parsed by lvm. This is a form of corruption that would
not be caused by known failure scenarios. A checksum error is
typically included among the errors reported.
- "old metadata": a valid copy of the metadata that has a smaller seqno
than other copies of the metadata. This can happen if the device
failed, or io failed, or lvm failed while commiting new metadata
to all the metadata areas. Old metadata on a PV that has been
removed from the VG is the "outdated" case below.
When a VG has some PVs with bad/old metadata, lvm can simply ignore
the bad/old copies, and use a good copy. This is why there are
multiple copies of the metadata -- so it's available even when some
of the copies cannot be used. The bad/old copies do not have to be
repaired before the VG can be used (the repair can happen later.)
A PV with no good copies of the metadata simply falls back to being
treated like a PV with no mdas; a common and harmless configuration.
When bad/old metadata exists, lvm warns the user about it, and
suggests repairing it using a new metadata repair command.
Bad metadata in particular is something that users will want to
investigate and repair themselves, since it should not happen and
may indicate some other problem that needs to be fixed.
PVs with bad/old metadata are not the same as missing devices.
Missing devices will block various kinds of VG modification or
activation, but bad/old metadata will not.
Previously, lvm would attempt to repair bad/old metadata whenever
it was read. This was unnecessary since lvm does not require every
copy of the metadata to be used. It would also hide potential
problems that should be investigated by the user. It was also
dangerous in cases where the VG was on shared storage. The user
is now allowed to investigate potential problems and decide how
and when to repair them.
Repairing bad/old metadata
--------------------------
When label scan sees bad metadata in an mda, that mda is removed
from the lvmcache info->mdas list. This means that vg_read will
skip it, and not attempt to read/process it again. If it was
the only in-use mda on a PV, that PV is treated like a PV with
no mdas. It also means that vg_write will also skip the bad mda,
and not attempt to write new metadata to it. The only way to
repair bad metadata is with the metadata repair command.
When label scan sees old metadata in an mda, that mda is kept
in the lvmcache info->mdas list. This means that vg_read will
read/process it again, and likely see the same mismatch with
the other copies of the metadata. Like the label_scan, the
vg_read will simply ignore the old copy of the metadata and
use the latest copy. If the command is modifying the vg
(e.g. lvcreate), then vg_write, which writes new metadata to
every mda on info->mdas, will write the new metadata to the
mda that had the old version. If successful, this will resolve
the old metadata problem (without needing to run a metadata
repair command.)
Outdated PVs
------------
An outdated PV is a PV that has an old copy of VG metadata
that shows it is a member of the VG, but the latest copy of
the VG metadata does not include this PV. This happens if
the PV is disconnected, vgreduce --removemissing is run to
remove the PV from the VG, then the PV is reconnected.
In this case, the outdated PV needs have its outdated metadata
removed and the PV used flag needs to be cleared. This repair
will be done by the subsequent repair command. It is also done
if vgremove is run on the VG.
MISSING PVs
-----------
When a device is missing, most commands will refuse to modify
the VG. This is the simple case. More complicated is when
a command is allowed to modify the VG while it is missing a
device.
When a VG is written while a device is missing for one of it's PVs,
the VG metadata is written to disk with the MISSING flag on the PV
with the missing device. When the VG is next used, it is treated
as if the PV with the MISSING flag still has a missing device, even
if that device has reappeared.
If all LVs that were using a PV with the MISSING flag are removed
or repaired so that the MISSING PV is no longer used, then the
next time the VG metadata is written, the MISSING flag will be
dropped.
Alternative methods of clearing the MISSING flag are:
vgreduce --removemissing will remove PVs with missing devices,
or PVs with the MISSING flag where the device has reappeared.
vgextend --restoremissing will clear the MISSING flag on PVs
where the device has reappeared, allowing the VG to be used
normally. This must be done with caution since the reappeared
device may have old data that is inconsistent with data on other PVs.
Bad mda repair
--------------
The new command:
vgck --updatemetadata VG
first uses vg_write to repair old metadata, and other basic
issues mentioned above (old metadata, outdated PVs, pv_header
flags, MISSING_PV flags). It will also go further and repair
bad metadata:
. text metadata that has a bad checksum
. text metadata that is not parsable
. corrupt mda_header checksum and version fields
(To keep a clean diff, #if 0 is added around functions that
are replaced by new code. These commented functions are
removed by the following commit.)
2019-05-24 20:04:37 +03:00
orphan_vg = vg_read_orphans ( cmd , vg - > fid - > fmt - > orphan_vg_name ) ;
2013-09-04 02:31:45 +04:00
improve reading and repairing vg metadata
The fact that vg repair is implemented as a part of vg read
has led to a messy and complicated implementation of vg_read,
and limited and uncontrolled repair capability. This splits
read and repair apart.
Summary
-------
- take all kinds of various repairs out of vg_read
- vg_read no longer writes anything
- vg_read now simply reads and returns vg metadata
- vg_read ignores bad or old copies of metadata
- vg_read proceeds with a single good copy of metadata
- improve error checks and handling when reading
- keep track of bad (corrupt) copies of metadata in lvmcache
- keep track of old (seqno) copies of metadata in lvmcache
- keep track of outdated PVs in lvmcache
- vg_write will do basic repairs
- new command vgck --updatemetdata will do all repairs
Details
-------
- In scan, do not delete dev from lvmcache if reading/processing fails;
the dev is still present, and removing it makes it look like the dev
is not there. Records are now kept about the problems with each PV
so they be fixed/repaired in the appropriate places.
- In scan, record a bad mda on failure, and delete the mda from
mda in use list so it will not be used by vg_read or vg_write,
only by repair.
- In scan, succeed if any good mda on a device is found, instead of
failing if any is bad. The bad/old copies of metadata should not
interfere with normal usage while good copies can be used.
- In scan, add a record of old mdas in lvmcache for later, do not repair
them while reading, and do not let them prevent us from finding and
using a good copy of metadata from elsewhere. One result is that
"inconsistent metadata" is no longer a read error, but instead a
record in lvmcache that can be addressed separate from the read.
- Treat a dev with no good mdas like a dev with no mdas, which is an
existing case we already handle.
- Don't use a fake vg "handle" for returning an error from vg_read,
or the vg_read_error function for getting that error number;
just return null if the vg cannot be read or used, and an error_flags
arg with flags set for the specific kind of error (which can be used
later for determining the kind of repair.)
- Saving an original copy of the vg metadata, for purposes of reverting
a write, is now done explicitly in vg_read instead of being hidden in
the vg_make_handle function.
- When a vg is not accessible due to "access restrictions" but is
otherwise fine, return the vg through the new error_vg arg so that
process_each_pv can skip the PVs in the VG while processing.
(This is a temporary accomodation for the way process_each_pv
tracks which devs have been looked at, and can be dropped later
when process_each_pv implementation dev tracking is changed.)
- vg_read does not try to fix or recover a vg, but now just reads the
metadata, checks access restrictions and returns it.
(Checking access restrictions might be better done outside of vg_read,
but this is a later improvement.)
- _vg_read now simply makes one attempt to read metadata from
each mda, and uses the most recent copy to return to the caller
in the form of a 'vg' struct.
(bad mdas were excluded during the scan and are not retried)
(old mdas were not excluded during scan and are retried here)
- vg_read uses _vg_read to get the latest copy of metadata from mdas,
and then makes various checks against it to produce warnings,
and to check if VG access is allowed (access restrictions include:
writable, foreign, shared, clustered, missing pvs).
- Things that were previously silently/automatically written by vg_read
that are now done by vg_write, based on the records made in lvmcache
during the scan and read:
. clearing the missing flag
. updating old copies of metadata
. clearing outdated pvs
. updating pv header flags
- Bad/corrupt metadata are now repaired; they were not before.
Test changes
------------
- A read command no longer writes the VG to repair it, so add a write
command to do a repair.
(inconsistent-metadata, unlost-pv)
- When a missing PV is removed from a VG, and then the device is
enabled again, vgck --updatemetadata is needed to clear the
outdated PV before it can be used again, where it wasn't before.
(lvconvert-repair-policy, lvconvert-repair-raid, lvconvert-repair,
mirror-vgreduce-removemissing, pv-ext-flags, unlost-pv)
Reading bad/old metadata
------------------------
- "bad metadata": the mda_header or metadata text has invalid fields
or can't be parsed by lvm. This is a form of corruption that would
not be caused by known failure scenarios. A checksum error is
typically included among the errors reported.
- "old metadata": a valid copy of the metadata that has a smaller seqno
than other copies of the metadata. This can happen if the device
failed, or io failed, or lvm failed while commiting new metadata
to all the metadata areas. Old metadata on a PV that has been
removed from the VG is the "outdated" case below.
When a VG has some PVs with bad/old metadata, lvm can simply ignore
the bad/old copies, and use a good copy. This is why there are
multiple copies of the metadata -- so it's available even when some
of the copies cannot be used. The bad/old copies do not have to be
repaired before the VG can be used (the repair can happen later.)
A PV with no good copies of the metadata simply falls back to being
treated like a PV with no mdas; a common and harmless configuration.
When bad/old metadata exists, lvm warns the user about it, and
suggests repairing it using a new metadata repair command.
Bad metadata in particular is something that users will want to
investigate and repair themselves, since it should not happen and
may indicate some other problem that needs to be fixed.
PVs with bad/old metadata are not the same as missing devices.
Missing devices will block various kinds of VG modification or
activation, but bad/old metadata will not.
Previously, lvm would attempt to repair bad/old metadata whenever
it was read. This was unnecessary since lvm does not require every
copy of the metadata to be used. It would also hide potential
problems that should be investigated by the user. It was also
dangerous in cases where the VG was on shared storage. The user
is now allowed to investigate potential problems and decide how
and when to repair them.
Repairing bad/old metadata
--------------------------
When label scan sees bad metadata in an mda, that mda is removed
from the lvmcache info->mdas list. This means that vg_read will
skip it, and not attempt to read/process it again. If it was
the only in-use mda on a PV, that PV is treated like a PV with
no mdas. It also means that vg_write will also skip the bad mda,
and not attempt to write new metadata to it. The only way to
repair bad metadata is with the metadata repair command.
When label scan sees old metadata in an mda, that mda is kept
in the lvmcache info->mdas list. This means that vg_read will
read/process it again, and likely see the same mismatch with
the other copies of the metadata. Like the label_scan, the
vg_read will simply ignore the old copy of the metadata and
use the latest copy. If the command is modifying the vg
(e.g. lvcreate), then vg_write, which writes new metadata to
every mda on info->mdas, will write the new metadata to the
mda that had the old version. If successful, this will resolve
the old metadata problem (without needing to run a metadata
repair command.)
Outdated PVs
------------
An outdated PV is a PV that has an old copy of VG metadata
that shows it is a member of the VG, but the latest copy of
the VG metadata does not include this PV. This happens if
the PV is disconnected, vgreduce --removemissing is run to
remove the PV from the VG, then the PV is reconnected.
In this case, the outdated PV needs have its outdated metadata
removed and the PV used flag needs to be cleared. This repair
will be done by the subsequent repair command. It is also done
if vgremove is run on the VG.
MISSING PVs
-----------
When a device is missing, most commands will refuse to modify
the VG. This is the simple case. More complicated is when
a command is allowed to modify the VG while it is missing a
device.
When a VG is written while a device is missing for one of it's PVs,
the VG metadata is written to disk with the MISSING flag on the PV
with the missing device. When the VG is next used, it is treated
as if the PV with the MISSING flag still has a missing device, even
if that device has reappeared.
If all LVs that were using a PV with the MISSING flag are removed
or repaired so that the MISSING PV is no longer used, then the
next time the VG metadata is written, the MISSING flag will be
dropped.
Alternative methods of clearing the MISSING flag are:
vgreduce --removemissing will remove PVs with missing devices,
or PVs with the MISSING flag where the device has reappeared.
vgextend --restoremissing will clear the MISSING flag on PVs
where the device has reappeared, allowing the VG to be used
normally. This must be done with caution since the reappeared
device may have old data that is inconsistent with data on other PVs.
Bad mda repair
--------------
The new command:
vgck --updatemetadata VG
first uses vg_write to repair old metadata, and other basic
issues mentioned above (old metadata, outdated PVs, pv_header
flags, MISSING_PV flags). It will also go further and repair
bad metadata:
. text metadata that has a bad checksum
. text metadata that is not parsable
. corrupt mda_header checksum and version fields
(To keep a clean diff, #if 0 is added around functions that
are replaced by new code. These commented functions are
removed by the following commit.)
2019-05-24 20:04:37 +03:00
if ( ! orphan_vg )
2013-09-04 02:31:45 +04:00
goto bad ;
if ( ! vg_split_mdas ( cmd , vg , orphan_vg ) | | ! vg - > pv_count ) {
log_error ( " Cannot remove final metadata area on \" %s \" from \" %s \" " ,
name , vg - > name ) ;
goto bad ;
}
2013-09-04 03:07:43 +04:00
/*
* Only write out the needed changes if so requested by caller .
*/
if ( commit ) {
if ( ! vg_write ( vg ) | | ! vg_commit ( vg ) ) {
log_error ( " Removal of physical volume \" %s \" from "
" \" %s \" failed " , name , vg - > name ) ;
goto bad ;
}
2013-09-04 02:31:45 +04:00
2013-09-04 03:07:43 +04:00
if ( ! pv_write ( cmd , pv , 0 ) ) {
log_error ( " Failed to clear metadata from physical "
" volume \" %s \" "
" after removal from \" %s \" " , name , vg - > name ) ;
goto bad ;
}
2013-09-04 02:31:45 +04:00
2013-09-04 03:07:43 +04:00
log_print_unless_silent ( " Removed \" %s \" from volume group \" %s \" " ,
name , vg - > name ) ;
}
2013-09-04 02:31:45 +04:00
r = 1 ;
bad :
2013-09-04 03:07:43 +04:00
/* If we are committing here or we had an error then we will free fid */
if ( pvl & & ( commit | | r ! = 1 ) )
2013-09-04 02:31:45 +04:00
free_pv_fid ( pvl - > pv ) ;
2018-06-11 23:08:23 +03:00
release_vg ( orphan_vg ) ;
2013-09-04 02:31:45 +04:00
return r ;
}
2021-06-08 20:39:15 +03:00
void vg_backup_if_needed ( struct volume_group * vg )
{
if ( ! vg | | ! vg - > needs_backup )
return ;
vg - > needs_backup = 0 ;
backup ( vg - > vg_committed ) ;
}