2004-05-05 21:56:20 +04:00
/*
2008-01-30 17:00:02 +03:00
* Copyright ( C ) 2003 - 2004 Sistina Software , Inc . All rights reserved .
2015-03-17 20:31:41 +03:00
* Copyright ( C ) 2004 - 2015 Red Hat , Inc . All rights reserved .
2004-05-05 21:56:20 +04:00
*
* This file is part of LVM2 .
*
* This copyrighted material is made available to anyone wishing to use ,
* modify , copy , or redistribute it subject to the terms and conditions
2007-08-21 00:55:30 +04:00
* of the GNU Lesser General Public License v .2 .1 .
2004-05-05 21:56:20 +04:00
*
2007-08-21 00:55:30 +04:00
* You should have received a copy of the GNU Lesser General Public License
2004-05-05 21:56:20 +04:00
* along with this program ; if not , write to the Free Software Foundation ,
2016-01-21 13:49:46 +03:00
* Inc . , 51 Franklin Street , Fifth Floor , Boston , MA 02110 - 1301 USA
2004-05-05 21:56:20 +04:00
*/
# include "tools.h"
2015-07-06 19:30:18 +03:00
2018-05-14 12:30:20 +03:00
# include "lib/lvmpolld/polldaemon.h"
2010-01-11 22:19:17 +03:00
# include "lvm2cmdline.h"
2018-05-14 12:30:20 +03:00
# include "lib/lvmpolld/lvmpolld-client.h"
2004-05-05 21:56:20 +04:00
2015-07-06 19:30:18 +03:00
# include <time.h>
2021-03-15 00:00:42 +03:00
# define WAIT_AT_LEAST_NANOSECS 100000000
2015-04-10 17:31:28 +03:00
2009-09-30 21:43:51 +04:00
progress_t poll_mirror_progress ( struct cmd_context * cmd ,
struct logical_volume * lv , const char * name ,
struct daemon_parms * parms )
{
2014-06-09 14:08:27 +04:00
dm_percent_t segment_percent = DM_PERCENT_0 , overall_percent = DM_PERCENT_0 ;
2009-09-30 21:43:51 +04:00
uint32_t event_nr = 0 ;
2010-08-26 20:29:12 +04:00
if ( ! lv_is_mirrored ( lv ) | |
! lv_mirror_percent ( cmd , lv , ! parms - > interval , & segment_percent ,
2010-11-30 14:53:31 +03:00
& event_nr ) | |
2014-06-09 14:08:27 +04:00
( segment_percent = = DM_PERCENT_INVALID ) ) {
2009-09-30 21:43:51 +04:00
log_error ( " ABORTING: Mirror percentage check failed. " ) ;
return PROGRESS_CHECK_FAILED ;
}
2010-11-30 14:53:31 +03:00
overall_percent = copy_percent ( lv ) ;
2009-09-30 21:43:51 +04:00
if ( parms - > progress_display )
2017-06-24 17:22:36 +03:00
log_print_unless_silent ( " %s: %s: %s%% " , name , parms - > progress_title ,
display_percent ( cmd , overall_percent ) ) ;
2009-09-30 21:43:51 +04:00
else
2017-06-24 17:22:36 +03:00
log_verbose ( " %s: %s: %s%% " , name , parms - > progress_title ,
display_percent ( cmd , overall_percent ) ) ;
2009-09-30 21:43:51 +04:00
2014-06-09 14:08:27 +04:00
if ( segment_percent ! = DM_PERCENT_100 )
2009-09-30 21:43:51 +04:00
return PROGRESS_UNFINISHED ;
2014-06-09 14:08:27 +04:00
if ( overall_percent = = DM_PERCENT_100 )
2009-09-30 21:43:51 +04:00
return PROGRESS_FINISHED_ALL ;
return PROGRESS_FINISHED_SEGMENT ;
}
2009-09-29 23:35:26 +04:00
static int _check_lv_status ( struct cmd_context * cmd ,
struct volume_group * vg ,
struct logical_volume * lv ,
const char * name , struct daemon_parms * parms ,
int * finished )
2004-05-05 21:56:20 +04:00
{
2008-11-04 01:14:30 +03:00
struct dm_list * lvs_changed ;
2009-09-30 21:43:51 +04:00
progress_t progress ;
2004-05-05 21:56:20 +04:00
/* By default, caller should not retry */
* finished = 1 ;
if ( parms - > aborting ) {
2009-09-29 23:35:26 +04:00
if ( ! ( lvs_changed = lvs_using_lv ( cmd , vg , lv ) ) ) {
2004-05-05 21:56:20 +04:00
log_error ( " Failed to generate list of copied LVs: "
" can't abort. " ) ;
return 0 ;
}
2010-08-23 15:34:40 +04:00
if ( ! parms - > poll_fns - > finish_copy ( cmd , vg , lv , lvs_changed ) )
return_0 ;
return 1 ;
2004-05-05 21:56:20 +04:00
}
2009-09-30 22:15:06 +04:00
progress = parms - > poll_fns - > poll_progress ( cmd , lv , name , parms ) ;
2018-05-07 12:46:09 +03:00
fflush ( stdout ) ;
2009-09-30 21:43:51 +04:00
if ( progress = = PROGRESS_CHECK_FAILED )
return_0 ;
2004-05-05 21:56:20 +04:00
2009-09-30 21:43:51 +04:00
if ( progress = = PROGRESS_UNFINISHED ) {
2004-05-05 21:56:20 +04:00
/* The only case the caller *should* try again later */
* finished = 0 ;
return 1 ;
}
2009-09-29 23:35:26 +04:00
if ( ! ( lvs_changed = lvs_using_lv ( cmd , vg , lv ) ) ) {
2004-05-05 21:56:20 +04:00
log_error ( " ABORTING: Failed to generate list of copied LVs " ) ;
return 0 ;
}
/* Finished? Or progress to next segment? */
2009-09-30 21:43:51 +04:00
if ( progress = = PROGRESS_FINISHED_ALL ) {
2009-09-29 23:35:26 +04:00
if ( ! parms - > poll_fns - > finish_copy ( cmd , vg , lv , lvs_changed ) )
2011-02-18 18:05:40 +03:00
return_0 ;
2004-05-05 21:56:20 +04:00
} else {
2010-01-09 00:53:07 +03:00
if ( parms - > poll_fns - > update_metadata & &
! parms - > poll_fns - > update_metadata ( cmd , vg , lv , lvs_changed , 0 ) ) {
2004-05-05 21:56:20 +04:00
log_error ( " ABORTING: Segment progression failed. " ) ;
2009-09-29 23:35:26 +04:00
parms - > poll_fns - > finish_copy ( cmd , vg , lv , lvs_changed ) ;
2004-05-05 21:56:20 +04:00
return 0 ;
}
* finished = 0 ; /* Another segment */
}
return 1 ;
}
2015-04-10 17:31:28 +03:00
static void _nanosleep ( unsigned secs , unsigned allow_zero_time )
{
struct timespec wtime = {
. tv_sec = secs ,
} ;
if ( ! secs & & ! allow_zero_time )
wtime . tv_nsec = WAIT_AT_LEAST_NANOSECS ;
2021-04-06 15:57:42 +03:00
sigint_allow ( ) ;
nanosleep ( & wtime , & wtime ) ;
sigint_restore ( ) ;
2015-04-10 17:31:28 +03:00
}
2021-04-06 15:57:42 +03:00
static int _sleep_and_rescan_devices ( struct cmd_context * cmd , struct daemon_parms * parms )
2010-01-23 00:59:42 +03:00
{
2021-03-15 00:00:42 +03:00
if ( ! parms - > aborting ) {
2018-04-13 22:40:00 +03:00
/*
* FIXME : do we really need to drop everything and then rescan
* everything between each iteration ? What change exactly does
* each iteration check for , and does seeing that require
* rescanning everything ?
*/
lvmcache_destroy ( cmd , 1 , 0 ) ;
label_scan_destroy ( cmd ) ;
2021-03-15 00:00:42 +03:00
_nanosleep ( parms - > interval , 0 ) ;
2021-04-06 15:57:42 +03:00
if ( sigint_caught ( ) )
return_0 ;
2023-09-26 18:55:53 +03:00
if ( ! lvmcache_label_scan ( cmd ) )
stack ;
2010-01-23 00:59:42 +03:00
}
2021-04-06 15:57:42 +03:00
return 1 ;
2010-01-23 00:59:42 +03:00
}
2015-04-10 17:36:50 +03:00
int wait_for_single_lv ( struct cmd_context * cmd , struct poll_operation_id * id ,
struct daemon_parms * parms )
2004-05-05 21:56:20 +04:00
{
2015-07-22 19:42:57 +03:00
struct volume_group * vg = NULL ;
2009-09-29 23:35:26 +04:00
struct logical_volume * lv ;
2004-05-05 21:56:20 +04:00
int finished = 0 ;
2015-07-08 15:53:23 +03:00
uint32_t lockd_state = 0 ;
improve reading and repairing vg metadata
The fact that vg repair is implemented as a part of vg read
has led to a messy and complicated implementation of vg_read,
and limited and uncontrolled repair capability. This splits
read and repair apart.
Summary
-------
- take all kinds of various repairs out of vg_read
- vg_read no longer writes anything
- vg_read now simply reads and returns vg metadata
- vg_read ignores bad or old copies of metadata
- vg_read proceeds with a single good copy of metadata
- improve error checks and handling when reading
- keep track of bad (corrupt) copies of metadata in lvmcache
- keep track of old (seqno) copies of metadata in lvmcache
- keep track of outdated PVs in lvmcache
- vg_write will do basic repairs
- new command vgck --updatemetdata will do all repairs
Details
-------
- In scan, do not delete dev from lvmcache if reading/processing fails;
the dev is still present, and removing it makes it look like the dev
is not there. Records are now kept about the problems with each PV
so they be fixed/repaired in the appropriate places.
- In scan, record a bad mda on failure, and delete the mda from
mda in use list so it will not be used by vg_read or vg_write,
only by repair.
- In scan, succeed if any good mda on a device is found, instead of
failing if any is bad. The bad/old copies of metadata should not
interfere with normal usage while good copies can be used.
- In scan, add a record of old mdas in lvmcache for later, do not repair
them while reading, and do not let them prevent us from finding and
using a good copy of metadata from elsewhere. One result is that
"inconsistent metadata" is no longer a read error, but instead a
record in lvmcache that can be addressed separate from the read.
- Treat a dev with no good mdas like a dev with no mdas, which is an
existing case we already handle.
- Don't use a fake vg "handle" for returning an error from vg_read,
or the vg_read_error function for getting that error number;
just return null if the vg cannot be read or used, and an error_flags
arg with flags set for the specific kind of error (which can be used
later for determining the kind of repair.)
- Saving an original copy of the vg metadata, for purposes of reverting
a write, is now done explicitly in vg_read instead of being hidden in
the vg_make_handle function.
- When a vg is not accessible due to "access restrictions" but is
otherwise fine, return the vg through the new error_vg arg so that
process_each_pv can skip the PVs in the VG while processing.
(This is a temporary accomodation for the way process_each_pv
tracks which devs have been looked at, and can be dropped later
when process_each_pv implementation dev tracking is changed.)
- vg_read does not try to fix or recover a vg, but now just reads the
metadata, checks access restrictions and returns it.
(Checking access restrictions might be better done outside of vg_read,
but this is a later improvement.)
- _vg_read now simply makes one attempt to read metadata from
each mda, and uses the most recent copy to return to the caller
in the form of a 'vg' struct.
(bad mdas were excluded during the scan and are not retried)
(old mdas were not excluded during scan and are retried here)
- vg_read uses _vg_read to get the latest copy of metadata from mdas,
and then makes various checks against it to produce warnings,
and to check if VG access is allowed (access restrictions include:
writable, foreign, shared, clustered, missing pvs).
- Things that were previously silently/automatically written by vg_read
that are now done by vg_write, based on the records made in lvmcache
during the scan and read:
. clearing the missing flag
. updating old copies of metadata
. clearing outdated pvs
. updating pv header flags
- Bad/corrupt metadata are now repaired; they were not before.
Test changes
------------
- A read command no longer writes the VG to repair it, so add a write
command to do a repair.
(inconsistent-metadata, unlost-pv)
- When a missing PV is removed from a VG, and then the device is
enabled again, vgck --updatemetadata is needed to clear the
outdated PV before it can be used again, where it wasn't before.
(lvconvert-repair-policy, lvconvert-repair-raid, lvconvert-repair,
mirror-vgreduce-removemissing, pv-ext-flags, unlost-pv)
Reading bad/old metadata
------------------------
- "bad metadata": the mda_header or metadata text has invalid fields
or can't be parsed by lvm. This is a form of corruption that would
not be caused by known failure scenarios. A checksum error is
typically included among the errors reported.
- "old metadata": a valid copy of the metadata that has a smaller seqno
than other copies of the metadata. This can happen if the device
failed, or io failed, or lvm failed while commiting new metadata
to all the metadata areas. Old metadata on a PV that has been
removed from the VG is the "outdated" case below.
When a VG has some PVs with bad/old metadata, lvm can simply ignore
the bad/old copies, and use a good copy. This is why there are
multiple copies of the metadata -- so it's available even when some
of the copies cannot be used. The bad/old copies do not have to be
repaired before the VG can be used (the repair can happen later.)
A PV with no good copies of the metadata simply falls back to being
treated like a PV with no mdas; a common and harmless configuration.
When bad/old metadata exists, lvm warns the user about it, and
suggests repairing it using a new metadata repair command.
Bad metadata in particular is something that users will want to
investigate and repair themselves, since it should not happen and
may indicate some other problem that needs to be fixed.
PVs with bad/old metadata are not the same as missing devices.
Missing devices will block various kinds of VG modification or
activation, but bad/old metadata will not.
Previously, lvm would attempt to repair bad/old metadata whenever
it was read. This was unnecessary since lvm does not require every
copy of the metadata to be used. It would also hide potential
problems that should be investigated by the user. It was also
dangerous in cases where the VG was on shared storage. The user
is now allowed to investigate potential problems and decide how
and when to repair them.
Repairing bad/old metadata
--------------------------
When label scan sees bad metadata in an mda, that mda is removed
from the lvmcache info->mdas list. This means that vg_read will
skip it, and not attempt to read/process it again. If it was
the only in-use mda on a PV, that PV is treated like a PV with
no mdas. It also means that vg_write will also skip the bad mda,
and not attempt to write new metadata to it. The only way to
repair bad metadata is with the metadata repair command.
When label scan sees old metadata in an mda, that mda is kept
in the lvmcache info->mdas list. This means that vg_read will
read/process it again, and likely see the same mismatch with
the other copies of the metadata. Like the label_scan, the
vg_read will simply ignore the old copy of the metadata and
use the latest copy. If the command is modifying the vg
(e.g. lvcreate), then vg_write, which writes new metadata to
every mda on info->mdas, will write the new metadata to the
mda that had the old version. If successful, this will resolve
the old metadata problem (without needing to run a metadata
repair command.)
Outdated PVs
------------
An outdated PV is a PV that has an old copy of VG metadata
that shows it is a member of the VG, but the latest copy of
the VG metadata does not include this PV. This happens if
the PV is disconnected, vgreduce --removemissing is run to
remove the PV from the VG, then the PV is reconnected.
In this case, the outdated PV needs have its outdated metadata
removed and the PV used flag needs to be cleared. This repair
will be done by the subsequent repair command. It is also done
if vgremove is run on the VG.
MISSING PVs
-----------
When a device is missing, most commands will refuse to modify
the VG. This is the simple case. More complicated is when
a command is allowed to modify the VG while it is missing a
device.
When a VG is written while a device is missing for one of it's PVs,
the VG metadata is written to disk with the MISSING flag on the PV
with the missing device. When the VG is next used, it is treated
as if the PV with the MISSING flag still has a missing device, even
if that device has reappeared.
If all LVs that were using a PV with the MISSING flag are removed
or repaired so that the MISSING PV is no longer used, then the
next time the VG metadata is written, the MISSING flag will be
dropped.
Alternative methods of clearing the MISSING flag are:
vgreduce --removemissing will remove PVs with missing devices,
or PVs with the MISSING flag where the device has reappeared.
vgextend --restoremissing will clear the MISSING flag on PVs
where the device has reappeared, allowing the VG to be used
normally. This must be done with caution since the reappeared
device may have old data that is inconsistent with data on other PVs.
Bad mda repair
--------------
The new command:
vgck --updatemetadata VG
first uses vg_write to repair old metadata, and other basic
issues mentioned above (old metadata, outdated PVs, pv_header
flags, MISSING_PV flags). It will also go further and repair
bad metadata:
. text metadata that has a bad checksum
. text metadata that is not parsable
. corrupt mda_header checksum and version fields
(To keep a clean diff, #if 0 is added around functions that
are replaced by new code. These commented functions are
removed by the following commit.)
2019-05-24 20:04:37 +03:00
uint32_t error_flags = 0 ;
2024-06-12 23:36:45 +03:00
int is_lockd ;
2015-07-22 19:42:57 +03:00
int ret ;
2021-04-06 15:57:42 +03:00
unsigned wait_before_testing = parms - > wait_before_testing ;
2004-05-05 21:56:20 +04:00
2021-04-06 15:57:42 +03:00
if ( ! wait_before_testing )
2023-09-26 18:55:53 +03:00
if ( ! lvmcache_label_scan ( cmd ) )
stack ;
2018-04-13 22:40:00 +03:00
2009-09-29 23:35:26 +04:00
/* Poll for completion */
2004-05-05 21:56:20 +04:00
while ( ! finished ) {
2021-04-06 15:57:42 +03:00
if ( wait_before_testing & &
! _sleep_and_rescan_devices ( cmd , parms ) ) {
log_error ( " ABORTING: Polling interrupted for %s. " , id - > display_name ) ;
return 0 ;
}
2004-05-05 21:56:20 +04:00
2024-06-12 23:36:45 +03:00
is_lockd = lvmcache_vg_is_lockd_type ( cmd , id - > vg_name , NULL ) ;
2015-07-22 19:42:57 +03:00
/*
* An ex VG lock is needed because the check can call finish_copy
* which writes the VG .
*/
2024-06-12 23:36:45 +03:00
if ( is_lockd & & ! lockd_vg ( cmd , id - > vg_name , " ex " , 0 , & lockd_state ) ) {
2015-03-05 23:00:44 +03:00
log_error ( " ABORTING: Can't lock VG for %s. " , id - > display_name ) ;
return 0 ;
}
2004-05-05 21:56:20 +04:00
/* Locks the (possibly renamed) VG again */
improve reading and repairing vg metadata
The fact that vg repair is implemented as a part of vg read
has led to a messy and complicated implementation of vg_read,
and limited and uncontrolled repair capability. This splits
read and repair apart.
Summary
-------
- take all kinds of various repairs out of vg_read
- vg_read no longer writes anything
- vg_read now simply reads and returns vg metadata
- vg_read ignores bad or old copies of metadata
- vg_read proceeds with a single good copy of metadata
- improve error checks and handling when reading
- keep track of bad (corrupt) copies of metadata in lvmcache
- keep track of old (seqno) copies of metadata in lvmcache
- keep track of outdated PVs in lvmcache
- vg_write will do basic repairs
- new command vgck --updatemetdata will do all repairs
Details
-------
- In scan, do not delete dev from lvmcache if reading/processing fails;
the dev is still present, and removing it makes it look like the dev
is not there. Records are now kept about the problems with each PV
so they be fixed/repaired in the appropriate places.
- In scan, record a bad mda on failure, and delete the mda from
mda in use list so it will not be used by vg_read or vg_write,
only by repair.
- In scan, succeed if any good mda on a device is found, instead of
failing if any is bad. The bad/old copies of metadata should not
interfere with normal usage while good copies can be used.
- In scan, add a record of old mdas in lvmcache for later, do not repair
them while reading, and do not let them prevent us from finding and
using a good copy of metadata from elsewhere. One result is that
"inconsistent metadata" is no longer a read error, but instead a
record in lvmcache that can be addressed separate from the read.
- Treat a dev with no good mdas like a dev with no mdas, which is an
existing case we already handle.
- Don't use a fake vg "handle" for returning an error from vg_read,
or the vg_read_error function for getting that error number;
just return null if the vg cannot be read or used, and an error_flags
arg with flags set for the specific kind of error (which can be used
later for determining the kind of repair.)
- Saving an original copy of the vg metadata, for purposes of reverting
a write, is now done explicitly in vg_read instead of being hidden in
the vg_make_handle function.
- When a vg is not accessible due to "access restrictions" but is
otherwise fine, return the vg through the new error_vg arg so that
process_each_pv can skip the PVs in the VG while processing.
(This is a temporary accomodation for the way process_each_pv
tracks which devs have been looked at, and can be dropped later
when process_each_pv implementation dev tracking is changed.)
- vg_read does not try to fix or recover a vg, but now just reads the
metadata, checks access restrictions and returns it.
(Checking access restrictions might be better done outside of vg_read,
but this is a later improvement.)
- _vg_read now simply makes one attempt to read metadata from
each mda, and uses the most recent copy to return to the caller
in the form of a 'vg' struct.
(bad mdas were excluded during the scan and are not retried)
(old mdas were not excluded during scan and are retried here)
- vg_read uses _vg_read to get the latest copy of metadata from mdas,
and then makes various checks against it to produce warnings,
and to check if VG access is allowed (access restrictions include:
writable, foreign, shared, clustered, missing pvs).
- Things that were previously silently/automatically written by vg_read
that are now done by vg_write, based on the records made in lvmcache
during the scan and read:
. clearing the missing flag
. updating old copies of metadata
. clearing outdated pvs
. updating pv header flags
- Bad/corrupt metadata are now repaired; they were not before.
Test changes
------------
- A read command no longer writes the VG to repair it, so add a write
command to do a repair.
(inconsistent-metadata, unlost-pv)
- When a missing PV is removed from a VG, and then the device is
enabled again, vgck --updatemetadata is needed to clear the
outdated PV before it can be used again, where it wasn't before.
(lvconvert-repair-policy, lvconvert-repair-raid, lvconvert-repair,
mirror-vgreduce-removemissing, pv-ext-flags, unlost-pv)
Reading bad/old metadata
------------------------
- "bad metadata": the mda_header or metadata text has invalid fields
or can't be parsed by lvm. This is a form of corruption that would
not be caused by known failure scenarios. A checksum error is
typically included among the errors reported.
- "old metadata": a valid copy of the metadata that has a smaller seqno
than other copies of the metadata. This can happen if the device
failed, or io failed, or lvm failed while commiting new metadata
to all the metadata areas. Old metadata on a PV that has been
removed from the VG is the "outdated" case below.
When a VG has some PVs with bad/old metadata, lvm can simply ignore
the bad/old copies, and use a good copy. This is why there are
multiple copies of the metadata -- so it's available even when some
of the copies cannot be used. The bad/old copies do not have to be
repaired before the VG can be used (the repair can happen later.)
A PV with no good copies of the metadata simply falls back to being
treated like a PV with no mdas; a common and harmless configuration.
When bad/old metadata exists, lvm warns the user about it, and
suggests repairing it using a new metadata repair command.
Bad metadata in particular is something that users will want to
investigate and repair themselves, since it should not happen and
may indicate some other problem that needs to be fixed.
PVs with bad/old metadata are not the same as missing devices.
Missing devices will block various kinds of VG modification or
activation, but bad/old metadata will not.
Previously, lvm would attempt to repair bad/old metadata whenever
it was read. This was unnecessary since lvm does not require every
copy of the metadata to be used. It would also hide potential
problems that should be investigated by the user. It was also
dangerous in cases where the VG was on shared storage. The user
is now allowed to investigate potential problems and decide how
and when to repair them.
Repairing bad/old metadata
--------------------------
When label scan sees bad metadata in an mda, that mda is removed
from the lvmcache info->mdas list. This means that vg_read will
skip it, and not attempt to read/process it again. If it was
the only in-use mda on a PV, that PV is treated like a PV with
no mdas. It also means that vg_write will also skip the bad mda,
and not attempt to write new metadata to it. The only way to
repair bad metadata is with the metadata repair command.
When label scan sees old metadata in an mda, that mda is kept
in the lvmcache info->mdas list. This means that vg_read will
read/process it again, and likely see the same mismatch with
the other copies of the metadata. Like the label_scan, the
vg_read will simply ignore the old copy of the metadata and
use the latest copy. If the command is modifying the vg
(e.g. lvcreate), then vg_write, which writes new metadata to
every mda on info->mdas, will write the new metadata to the
mda that had the old version. If successful, this will resolve
the old metadata problem (without needing to run a metadata
repair command.)
Outdated PVs
------------
An outdated PV is a PV that has an old copy of VG metadata
that shows it is a member of the VG, but the latest copy of
the VG metadata does not include this PV. This happens if
the PV is disconnected, vgreduce --removemissing is run to
remove the PV from the VG, then the PV is reconnected.
In this case, the outdated PV needs have its outdated metadata
removed and the PV used flag needs to be cleared. This repair
will be done by the subsequent repair command. It is also done
if vgremove is run on the VG.
MISSING PVs
-----------
When a device is missing, most commands will refuse to modify
the VG. This is the simple case. More complicated is when
a command is allowed to modify the VG while it is missing a
device.
When a VG is written while a device is missing for one of it's PVs,
the VG metadata is written to disk with the MISSING flag on the PV
with the missing device. When the VG is next used, it is treated
as if the PV with the MISSING flag still has a missing device, even
if that device has reappeared.
If all LVs that were using a PV with the MISSING flag are removed
or repaired so that the MISSING PV is no longer used, then the
next time the VG metadata is written, the MISSING flag will be
dropped.
Alternative methods of clearing the MISSING flag are:
vgreduce --removemissing will remove PVs with missing devices,
or PVs with the MISSING flag where the device has reappeared.
vgextend --restoremissing will clear the MISSING flag on PVs
where the device has reappeared, allowing the VG to be used
normally. This must be done with caution since the reappeared
device may have old data that is inconsistent with data on other PVs.
Bad mda repair
--------------
The new command:
vgck --updatemetadata VG
first uses vg_write to repair old metadata, and other basic
issues mentioned above (old metadata, outdated PVs, pv_header
flags, MISSING_PV flags). It will also go further and repair
bad metadata:
. text metadata that has a bad checksum
. text metadata that is not parsable
. corrupt mda_header checksum and version fields
(To keep a clean diff, #if 0 is added around functions that
are replaced by new code. These commented functions are
removed by the following commit.)
2019-05-24 20:04:37 +03:00
vg = vg_read ( cmd , id - > vg_name , NULL , READ_FOR_UPDATE , lockd_state , & error_flags , NULL ) ;
if ( ! vg ) {
2004-05-05 21:56:20 +04:00
/* What more could we do here? */
2021-03-15 00:03:41 +03:00
if ( error_flags & FAILED_NOTFOUND ) {
log_print_unless_silent ( " Can't find VG %s. No longer active. " , id - > display_name ) ;
ret = 1 ;
} else {
log_error ( " ABORTING: Can't reread VG for %s error flags %x. " , id - > display_name , error_flags ) ;
ret = 0 ;
}
2015-07-22 19:42:57 +03:00
goto out ;
2004-05-05 21:56:20 +04:00
}
2015-05-19 16:08:50 +03:00
lv = find_lv ( vg , id - > lv_name ) ;
if ( lv & & id - > uuid & & strcmp ( id - > uuid , ( char * ) & lv - > lvid ) )
lv = NULL ;
if ( lv & & parms - > lv_type & & ! ( lv - > status & parms - > lv_type ) )
lv = NULL ;
2011-01-20 02:11:39 +03:00
if ( ! lv ) {
2015-05-19 18:04:29 +03:00
if ( parms - > lv_type = = PVMOVE )
log_print_unless_silent ( " %s: No pvmove in progress - already finished or aborted. " ,
id - > display_name ) ;
else
log_print_unless_silent ( " Can't find LV in %s for %s. " ,
vg - > name , id - > display_name ) ;
2015-07-22 19:42:57 +03:00
ret = 1 ;
goto out ;
2004-05-05 21:56:20 +04:00
}
2014-06-17 03:56:32 +04:00
/*
* If the LV is not active locally , the kernel cannot be
* queried for its status . We must exit in this case .
*/
2018-06-05 21:21:28 +03:00
if ( ! lv_is_active ( lv ) ) {
2015-04-10 15:08:19 +03:00
log_print_unless_silent ( " %s: Interrupted: No longer active. " , id - > display_name ) ;
2015-07-22 19:42:57 +03:00
ret = 1 ;
goto out ;
2014-06-17 03:56:32 +04:00
}
2015-04-10 15:08:19 +03:00
if ( ! _check_lv_status ( cmd , vg , lv , id - > display_name , parms , & finished ) ) {
2015-07-22 19:42:57 +03:00
ret = 0 ;
goto_out ;
2004-05-05 21:56:20 +04:00
}
2011-08-11 00:25:29 +04:00
unlock_and_release_vg ( cmd , vg , vg - > name ) ;
2010-01-23 00:59:42 +03:00
2024-06-12 23:36:45 +03:00
if ( is_lockd & & ! lockd_vg ( cmd , id - > vg_name , " un " , 0 , & lockd_state ) )
2015-08-18 18:39:40 +03:00
stack ;
2015-03-05 23:00:44 +03:00
2021-04-06 15:57:42 +03:00
wait_before_testing = 1 ;
2004-05-05 21:56:20 +04:00
}
return 1 ;
2015-07-22 19:42:57 +03:00
out :
if ( vg )
unlock_and_release_vg ( cmd , vg , vg - > name ) ;
2024-06-12 23:36:45 +03:00
if ( is_lockd & & ! lockd_vg ( cmd , id - > vg_name , " un " , 0 , & lockd_state ) )
2015-08-18 12:46:42 +03:00
stack ;
2015-07-22 19:42:57 +03:00
return ret ;
2004-05-05 21:56:20 +04:00
}
2015-04-10 15:08:19 +03:00
struct poll_id_list {
struct dm_list list ;
struct poll_operation_id * id ;
} ;
2017-10-18 17:57:46 +03:00
static struct poll_operation_id * _copy_poll_operation_id ( struct dm_pool * mem ,
2015-04-10 15:08:19 +03:00
const struct poll_operation_id * id )
{
struct poll_operation_id * copy ;
2016-02-25 15:31:31 +03:00
if ( ! id | | ! id - > vg_name | | ! id - > lv_name | | ! id - > display_name | | ! id - > uuid ) {
2017-10-18 17:57:46 +03:00
log_error ( INTERNAL_ERROR " Wrong params for _copy_poll_operation_id. " ) ;
2016-02-25 15:31:31 +03:00
return NULL ;
}
2015-04-10 15:08:19 +03:00
2016-02-25 15:31:31 +03:00
if ( ! ( copy = dm_pool_alloc ( mem , sizeof ( * copy ) ) ) ) {
2015-04-10 15:08:19 +03:00
log_error ( " Poll operation ID allocation failed. " ) ;
return NULL ;
}
2016-02-25 15:31:31 +03:00
if ( ! ( copy - > display_name = dm_pool_strdup ( mem , id - > display_name ) ) | |
! ( copy - > lv_name = dm_pool_strdup ( mem , id - > lv_name ) ) | |
! ( copy - > vg_name = dm_pool_strdup ( mem , id - > vg_name ) ) | |
! ( copy - > uuid = dm_pool_strdup ( mem , id - > uuid ) ) ) {
2015-04-10 15:08:19 +03:00
log_error ( " Failed to copy one or more poll_operation_id members. " ) ;
2016-02-25 15:31:31 +03:00
dm_pool_free ( mem , copy ) ;
2015-04-10 15:08:19 +03:00
return NULL ;
}
return copy ;
}
2017-10-18 17:57:46 +03:00
static struct poll_id_list * _poll_id_list_create ( struct dm_pool * mem ,
const struct poll_operation_id * id )
2015-04-10 15:08:19 +03:00
{
struct poll_id_list * idl = ( struct poll_id_list * ) dm_pool_alloc ( mem , sizeof ( struct poll_id_list ) ) ;
if ( ! idl ) {
log_error ( " Poll ID list allocation failed. " ) ;
return NULL ;
}
2017-10-18 17:57:46 +03:00
if ( ! ( idl - > id = _copy_poll_operation_id ( mem , id ) ) ) {
2015-04-10 15:08:19 +03:00
dm_pool_free ( mem , idl ) ;
return NULL ;
}
return idl ;
}
2004-05-05 21:56:20 +04:00
static int _poll_vg ( struct cmd_context * cmd , const char * vgname ,
2014-11-27 17:02:13 +03:00
struct volume_group * vg , struct processing_handle * handle )
2004-05-05 21:56:20 +04:00
{
2015-03-31 12:26:53 +03:00
struct daemon_parms * parms ;
2015-03-30 17:25:04 +03:00
struct lv_list * lvl ;
2015-04-10 15:08:19 +03:00
struct dm_list idls ;
struct poll_id_list * idl ;
struct poll_operation_id id ;
2009-09-29 23:35:26 +04:00
struct logical_volume * lv ;
2004-05-05 21:56:20 +04:00
int finished ;
2015-03-31 12:26:53 +03:00
if ( ! handle | | ! ( parms = ( struct daemon_parms * ) handle - > custom_handle ) ) {
2012-06-21 14:43:31 +04:00
log_error ( INTERNAL_ERROR " Handle is undefined. " ) ;
return ECMD_FAILED ;
}
2015-04-10 15:08:19 +03:00
dm_list_init ( & idls ) ;
2015-03-30 17:25:04 +03:00
/*
2015-04-14 15:43:16 +03:00
* first iterate all LVs in a VG and collect LVs suitable
* for polling ( or an abort ) which takes place below
2015-03-30 17:25:04 +03:00
*/
dm_list_iterate_items ( lvl , & vg - > lvs ) {
2009-09-29 23:35:26 +04:00
lv = lvl - > lv ;
if ( ! ( lv - > status & parms - > lv_type ) )
2004-05-05 21:56:20 +04:00
continue ;
2015-04-10 15:08:19 +03:00
id . display_name = parms - > poll_fns - > get_copy_name_from_lv ( lv ) ;
if ( ! id . display_name & & ! parms - > aborting )
2004-05-05 21:56:20 +04:00
continue ;
2010-08-23 15:34:10 +04:00
2015-04-10 15:08:19 +03:00
if ( ! id . display_name ) {
2015-04-03 18:14:50 +03:00
log_error ( " Device name for LV %s not found in metadata. "
" (unfinished pvmove mirror removal?) " , display_lvname ( lv ) ) ;
goto err ;
}
2008-01-30 17:00:02 +03:00
/* FIXME Need to do the activation from _set_up_pvmove here
2013-09-23 22:46:28 +04:00
* if it ' s not running and we ' re not aborting . */
if ( ! lv_is_active ( lv ) ) {
2015-04-10 15:08:19 +03:00
log_print_unless_silent ( " %s: Skipping inactive LV. Try lvchange or vgchange. " , id . display_name ) ;
2013-09-23 22:46:28 +04:00
continue ;
}
2015-04-10 15:08:19 +03:00
id . lv_name = lv - > name ;
id . vg_name = vg - > name ;
id . uuid = lv - > lvid . s ;
2017-10-18 17:57:46 +03:00
idl = _poll_id_list_create ( cmd - > mem , & id ) ;
2015-04-10 15:08:19 +03:00
if ( ! idl ) {
log_error ( " Failed to create poll_id_list. " ) ;
2015-03-30 17:25:04 +03:00
goto err ;
}
2015-04-10 15:08:19 +03:00
dm_list_add ( & idls , & idl - > list ) ;
2004-05-05 21:56:20 +04:00
}
2015-04-14 15:43:16 +03:00
/* perform the poll operation on LVs collected in previous cycle */
2015-04-10 15:08:19 +03:00
dm_list_iterate_items ( idl , & idls ) {
2015-05-19 16:08:50 +03:00
if ( ! ( lv = find_lv ( vg , idl - > id - > lv_name ) ) )
continue ;
if ( idl - > id - > uuid & & strcmp ( idl - > id - > uuid , ( char * ) & lv - > lvid ) )
continue ;
if ( parms - > lv_type & & ! ( lv - > status & parms - > lv_type ) )
continue ;
if ( _check_lv_status ( cmd , vg , lv , idl - > id - > display_name , parms , & finished ) & & ! finished )
2015-04-14 15:43:16 +03:00
parms - > outstanding_count + + ;
2015-03-30 17:25:04 +03:00
}
2004-05-05 21:56:20 +04:00
2015-03-30 17:25:04 +03:00
err :
2015-04-10 15:08:19 +03:00
if ( ! dm_list_empty ( & idls ) )
dm_pool_free ( cmd - > mem , dm_list_item ( dm_list_first ( & idls ) , struct poll_id_list ) ) ;
2015-03-30 17:25:04 +03:00
return ECMD_PROCESSED ;
2004-05-05 21:56:20 +04:00
}
static void _poll_for_all_vgs ( struct cmd_context * cmd ,
2015-02-13 12:36:06 +03:00
struct processing_handle * handle )
2004-05-05 21:56:20 +04:00
{
2015-02-13 12:36:06 +03:00
struct daemon_parms * parms = ( struct daemon_parms * ) handle - > custom_handle ;
2014-11-27 17:02:13 +03:00
2004-05-05 21:56:20 +04:00
while ( 1 ) {
parms - > outstanding_count = 0 ;
2016-05-03 12:46:28 +03:00
process_each_vg ( cmd , 0 , NULL , NULL , NULL , READ_FOR_UPDATE , 0 , handle , _poll_vg ) ;
locking: unify global lock for flock and lockd
There have been two file locks used to protect lvm
"global state": "ORPHANS" and "GLOBAL".
Commands that used the ORPHAN flock in exclusive mode:
pvcreate, pvremove, vgcreate, vgextend, vgremove,
vgcfgrestore
Commands that used the ORPHAN flock in shared mode:
vgimportclone, pvs, pvscan, pvresize, pvmove,
pvdisplay, pvchange, fullreport
Commands that used the GLOBAL flock in exclusive mode:
pvchange, pvscan, vgimportclone, vgscan
Commands that used the GLOBAL flock in shared mode:
pvscan --cache, pvs
The ORPHAN lock covers the important cases of serializing
the use of orphan PVs. It also partially covers the
reporting of orphan PVs (although not correctly as
explained below.)
The GLOBAL lock doesn't seem to have a clear purpose
(it may have eroded over time.)
Neither lock correctly protects the VG namespace, or
orphan PV properties.
To simplify and correct these issues, the two separate
flocks are combined into the one GLOBAL flock, and this flock
is used from the locking sites that are in place for the
lvmlockd global lock.
The logic behind the lvmlockd (distributed) global lock is
that any command that changes "global state" needs to take
the global lock in ex mode. Global state in lvm is: the list
of VG names, the set of orphan PVs, and any properties of
orphan PVs. Reading this global state can use the global lock
in sh mode to ensure it doesn't change while being reported.
The locking of global state now looks like:
lockd_global()
previously named lockd_gl(), acquires the distributed
global lock through lvmlockd. This is unchanged.
It serializes distributed lvm commands that are changing
global state. This is a no-op when lvmlockd is not in use.
lockf_global()
acquires an flock on a local file. It serializes local lvm
commands that are changing global state.
lock_global()
first calls lockf_global() to acquire the local flock for
global state, and if this succeeds, it calls lockd_global()
to acquire the distributed lock for global state.
Replace instances of lockd_gl() with lock_global(), so that the
existing sites for lvmlockd global state locking are now also
used for local file locking of global state. Remove the previous
file locking calls lock_vol(GLOBAL) and lock_vol(ORPHAN).
The following commands which change global state are now
serialized with the exclusive global flock:
pvchange (of orphan), pvresize (of orphan), pvcreate, pvremove,
vgcreate, vgextend, vgremove, vgreduce, vgrename,
vgcfgrestore, vgimportclone, vgmerge, vgsplit
Commands that use a shared flock to read global state (and will
be serialized against the prior list) are those that use
process_each functions that are based on processing a list of
all VG names, or all PVs. The list of all VGs or all PVs is
global state and the shared lock prevents those lists from
changing while the command is processing them.
The ORPHAN lock previously attempted to produce an accurate
listing of orphan PVs, but it was only acquired at the end of
the command during the fake vg_read of the fake orphan vg.
This is not when orphan PVs were determined; they were
determined by elimination beforehand by processing all real
VGs, and subtracting the PVs in the real VGs from the list
of all PVs that had been identified during the initial scan.
This is fixed by holding the single global lock in shared mode
while processing all VGs to determine the list of orphan PVs.
2019-04-18 23:01:19 +03:00
lock_global ( cmd , " un " ) ;
2004-05-05 21:56:20 +04:00
if ( ! parms - > outstanding_count )
break ;
2015-04-10 17:31:28 +03:00
_nanosleep ( parms - > interval , 1 ) ;
2004-05-05 21:56:20 +04:00
}
}
2015-05-09 02:59:18 +03:00
# ifdef LVMPOLLD_SUPPORT
typedef struct {
struct daemon_parms * parms ;
struct dm_list idls ;
} lvmpolld_parms_t ;
2017-10-18 17:57:46 +03:00
static int _report_progress ( struct cmd_context * cmd , struct poll_operation_id * id ,
struct daemon_parms * parms )
2015-05-09 02:59:18 +03:00
{
struct volume_group * vg ;
struct logical_volume * lv ;
2015-07-08 15:53:23 +03:00
uint32_t lockd_state = 0 ;
improve reading and repairing vg metadata
The fact that vg repair is implemented as a part of vg read
has led to a messy and complicated implementation of vg_read,
and limited and uncontrolled repair capability. This splits
read and repair apart.
Summary
-------
- take all kinds of various repairs out of vg_read
- vg_read no longer writes anything
- vg_read now simply reads and returns vg metadata
- vg_read ignores bad or old copies of metadata
- vg_read proceeds with a single good copy of metadata
- improve error checks and handling when reading
- keep track of bad (corrupt) copies of metadata in lvmcache
- keep track of old (seqno) copies of metadata in lvmcache
- keep track of outdated PVs in lvmcache
- vg_write will do basic repairs
- new command vgck --updatemetdata will do all repairs
Details
-------
- In scan, do not delete dev from lvmcache if reading/processing fails;
the dev is still present, and removing it makes it look like the dev
is not there. Records are now kept about the problems with each PV
so they be fixed/repaired in the appropriate places.
- In scan, record a bad mda on failure, and delete the mda from
mda in use list so it will not be used by vg_read or vg_write,
only by repair.
- In scan, succeed if any good mda on a device is found, instead of
failing if any is bad. The bad/old copies of metadata should not
interfere with normal usage while good copies can be used.
- In scan, add a record of old mdas in lvmcache for later, do not repair
them while reading, and do not let them prevent us from finding and
using a good copy of metadata from elsewhere. One result is that
"inconsistent metadata" is no longer a read error, but instead a
record in lvmcache that can be addressed separate from the read.
- Treat a dev with no good mdas like a dev with no mdas, which is an
existing case we already handle.
- Don't use a fake vg "handle" for returning an error from vg_read,
or the vg_read_error function for getting that error number;
just return null if the vg cannot be read or used, and an error_flags
arg with flags set for the specific kind of error (which can be used
later for determining the kind of repair.)
- Saving an original copy of the vg metadata, for purposes of reverting
a write, is now done explicitly in vg_read instead of being hidden in
the vg_make_handle function.
- When a vg is not accessible due to "access restrictions" but is
otherwise fine, return the vg through the new error_vg arg so that
process_each_pv can skip the PVs in the VG while processing.
(This is a temporary accomodation for the way process_each_pv
tracks which devs have been looked at, and can be dropped later
when process_each_pv implementation dev tracking is changed.)
- vg_read does not try to fix or recover a vg, but now just reads the
metadata, checks access restrictions and returns it.
(Checking access restrictions might be better done outside of vg_read,
but this is a later improvement.)
- _vg_read now simply makes one attempt to read metadata from
each mda, and uses the most recent copy to return to the caller
in the form of a 'vg' struct.
(bad mdas were excluded during the scan and are not retried)
(old mdas were not excluded during scan and are retried here)
- vg_read uses _vg_read to get the latest copy of metadata from mdas,
and then makes various checks against it to produce warnings,
and to check if VG access is allowed (access restrictions include:
writable, foreign, shared, clustered, missing pvs).
- Things that were previously silently/automatically written by vg_read
that are now done by vg_write, based on the records made in lvmcache
during the scan and read:
. clearing the missing flag
. updating old copies of metadata
. clearing outdated pvs
. updating pv header flags
- Bad/corrupt metadata are now repaired; they were not before.
Test changes
------------
- A read command no longer writes the VG to repair it, so add a write
command to do a repair.
(inconsistent-metadata, unlost-pv)
- When a missing PV is removed from a VG, and then the device is
enabled again, vgck --updatemetadata is needed to clear the
outdated PV before it can be used again, where it wasn't before.
(lvconvert-repair-policy, lvconvert-repair-raid, lvconvert-repair,
mirror-vgreduce-removemissing, pv-ext-flags, unlost-pv)
Reading bad/old metadata
------------------------
- "bad metadata": the mda_header or metadata text has invalid fields
or can't be parsed by lvm. This is a form of corruption that would
not be caused by known failure scenarios. A checksum error is
typically included among the errors reported.
- "old metadata": a valid copy of the metadata that has a smaller seqno
than other copies of the metadata. This can happen if the device
failed, or io failed, or lvm failed while commiting new metadata
to all the metadata areas. Old metadata on a PV that has been
removed from the VG is the "outdated" case below.
When a VG has some PVs with bad/old metadata, lvm can simply ignore
the bad/old copies, and use a good copy. This is why there are
multiple copies of the metadata -- so it's available even when some
of the copies cannot be used. The bad/old copies do not have to be
repaired before the VG can be used (the repair can happen later.)
A PV with no good copies of the metadata simply falls back to being
treated like a PV with no mdas; a common and harmless configuration.
When bad/old metadata exists, lvm warns the user about it, and
suggests repairing it using a new metadata repair command.
Bad metadata in particular is something that users will want to
investigate and repair themselves, since it should not happen and
may indicate some other problem that needs to be fixed.
PVs with bad/old metadata are not the same as missing devices.
Missing devices will block various kinds of VG modification or
activation, but bad/old metadata will not.
Previously, lvm would attempt to repair bad/old metadata whenever
it was read. This was unnecessary since lvm does not require every
copy of the metadata to be used. It would also hide potential
problems that should be investigated by the user. It was also
dangerous in cases where the VG was on shared storage. The user
is now allowed to investigate potential problems and decide how
and when to repair them.
Repairing bad/old metadata
--------------------------
When label scan sees bad metadata in an mda, that mda is removed
from the lvmcache info->mdas list. This means that vg_read will
skip it, and not attempt to read/process it again. If it was
the only in-use mda on a PV, that PV is treated like a PV with
no mdas. It also means that vg_write will also skip the bad mda,
and not attempt to write new metadata to it. The only way to
repair bad metadata is with the metadata repair command.
When label scan sees old metadata in an mda, that mda is kept
in the lvmcache info->mdas list. This means that vg_read will
read/process it again, and likely see the same mismatch with
the other copies of the metadata. Like the label_scan, the
vg_read will simply ignore the old copy of the metadata and
use the latest copy. If the command is modifying the vg
(e.g. lvcreate), then vg_write, which writes new metadata to
every mda on info->mdas, will write the new metadata to the
mda that had the old version. If successful, this will resolve
the old metadata problem (without needing to run a metadata
repair command.)
Outdated PVs
------------
An outdated PV is a PV that has an old copy of VG metadata
that shows it is a member of the VG, but the latest copy of
the VG metadata does not include this PV. This happens if
the PV is disconnected, vgreduce --removemissing is run to
remove the PV from the VG, then the PV is reconnected.
In this case, the outdated PV needs have its outdated metadata
removed and the PV used flag needs to be cleared. This repair
will be done by the subsequent repair command. It is also done
if vgremove is run on the VG.
MISSING PVs
-----------
When a device is missing, most commands will refuse to modify
the VG. This is the simple case. More complicated is when
a command is allowed to modify the VG while it is missing a
device.
When a VG is written while a device is missing for one of it's PVs,
the VG metadata is written to disk with the MISSING flag on the PV
with the missing device. When the VG is next used, it is treated
as if the PV with the MISSING flag still has a missing device, even
if that device has reappeared.
If all LVs that were using a PV with the MISSING flag are removed
or repaired so that the MISSING PV is no longer used, then the
next time the VG metadata is written, the MISSING flag will be
dropped.
Alternative methods of clearing the MISSING flag are:
vgreduce --removemissing will remove PVs with missing devices,
or PVs with the MISSING flag where the device has reappeared.
vgextend --restoremissing will clear the MISSING flag on PVs
where the device has reappeared, allowing the VG to be used
normally. This must be done with caution since the reappeared
device may have old data that is inconsistent with data on other PVs.
Bad mda repair
--------------
The new command:
vgck --updatemetadata VG
first uses vg_write to repair old metadata, and other basic
issues mentioned above (old metadata, outdated PVs, pv_header
flags, MISSING_PV flags). It will also go further and repair
bad metadata:
. text metadata that has a bad checksum
. text metadata that is not parsable
. corrupt mda_header checksum and version fields
(To keep a clean diff, #if 0 is added around functions that
are replaced by new code. These commented functions are
removed by the following commit.)
2019-05-24 20:04:37 +03:00
uint32_t error_flags = 0 ;
2015-03-05 23:00:44 +03:00
int ret ;
/*
2018-07-10 21:39:29 +03:00
* It ' s reasonable to expect a lockd_vg ( " sh " ) here , but it should not
* actually be needed , because we only report the progress on the same
* host where the pvmove / lvconvert is happening . No VG lock is needed
* to protect anything here ( we ' re just reading the VG ) , and no VG lock
* is needed to force a VG read from disk to get changes from other
* hosts , because the only change to the VG we ' re interested in is the
* change done locally .
2015-03-05 23:00:44 +03:00
*/
2015-05-09 02:59:18 +03:00
improve reading and repairing vg metadata
The fact that vg repair is implemented as a part of vg read
has led to a messy and complicated implementation of vg_read,
and limited and uncontrolled repair capability. This splits
read and repair apart.
Summary
-------
- take all kinds of various repairs out of vg_read
- vg_read no longer writes anything
- vg_read now simply reads and returns vg metadata
- vg_read ignores bad or old copies of metadata
- vg_read proceeds with a single good copy of metadata
- improve error checks and handling when reading
- keep track of bad (corrupt) copies of metadata in lvmcache
- keep track of old (seqno) copies of metadata in lvmcache
- keep track of outdated PVs in lvmcache
- vg_write will do basic repairs
- new command vgck --updatemetdata will do all repairs
Details
-------
- In scan, do not delete dev from lvmcache if reading/processing fails;
the dev is still present, and removing it makes it look like the dev
is not there. Records are now kept about the problems with each PV
so they be fixed/repaired in the appropriate places.
- In scan, record a bad mda on failure, and delete the mda from
mda in use list so it will not be used by vg_read or vg_write,
only by repair.
- In scan, succeed if any good mda on a device is found, instead of
failing if any is bad. The bad/old copies of metadata should not
interfere with normal usage while good copies can be used.
- In scan, add a record of old mdas in lvmcache for later, do not repair
them while reading, and do not let them prevent us from finding and
using a good copy of metadata from elsewhere. One result is that
"inconsistent metadata" is no longer a read error, but instead a
record in lvmcache that can be addressed separate from the read.
- Treat a dev with no good mdas like a dev with no mdas, which is an
existing case we already handle.
- Don't use a fake vg "handle" for returning an error from vg_read,
or the vg_read_error function for getting that error number;
just return null if the vg cannot be read or used, and an error_flags
arg with flags set for the specific kind of error (which can be used
later for determining the kind of repair.)
- Saving an original copy of the vg metadata, for purposes of reverting
a write, is now done explicitly in vg_read instead of being hidden in
the vg_make_handle function.
- When a vg is not accessible due to "access restrictions" but is
otherwise fine, return the vg through the new error_vg arg so that
process_each_pv can skip the PVs in the VG while processing.
(This is a temporary accomodation for the way process_each_pv
tracks which devs have been looked at, and can be dropped later
when process_each_pv implementation dev tracking is changed.)
- vg_read does not try to fix or recover a vg, but now just reads the
metadata, checks access restrictions and returns it.
(Checking access restrictions might be better done outside of vg_read,
but this is a later improvement.)
- _vg_read now simply makes one attempt to read metadata from
each mda, and uses the most recent copy to return to the caller
in the form of a 'vg' struct.
(bad mdas were excluded during the scan and are not retried)
(old mdas were not excluded during scan and are retried here)
- vg_read uses _vg_read to get the latest copy of metadata from mdas,
and then makes various checks against it to produce warnings,
and to check if VG access is allowed (access restrictions include:
writable, foreign, shared, clustered, missing pvs).
- Things that were previously silently/automatically written by vg_read
that are now done by vg_write, based on the records made in lvmcache
during the scan and read:
. clearing the missing flag
. updating old copies of metadata
. clearing outdated pvs
. updating pv header flags
- Bad/corrupt metadata are now repaired; they were not before.
Test changes
------------
- A read command no longer writes the VG to repair it, so add a write
command to do a repair.
(inconsistent-metadata, unlost-pv)
- When a missing PV is removed from a VG, and then the device is
enabled again, vgck --updatemetadata is needed to clear the
outdated PV before it can be used again, where it wasn't before.
(lvconvert-repair-policy, lvconvert-repair-raid, lvconvert-repair,
mirror-vgreduce-removemissing, pv-ext-flags, unlost-pv)
Reading bad/old metadata
------------------------
- "bad metadata": the mda_header or metadata text has invalid fields
or can't be parsed by lvm. This is a form of corruption that would
not be caused by known failure scenarios. A checksum error is
typically included among the errors reported.
- "old metadata": a valid copy of the metadata that has a smaller seqno
than other copies of the metadata. This can happen if the device
failed, or io failed, or lvm failed while commiting new metadata
to all the metadata areas. Old metadata on a PV that has been
removed from the VG is the "outdated" case below.
When a VG has some PVs with bad/old metadata, lvm can simply ignore
the bad/old copies, and use a good copy. This is why there are
multiple copies of the metadata -- so it's available even when some
of the copies cannot be used. The bad/old copies do not have to be
repaired before the VG can be used (the repair can happen later.)
A PV with no good copies of the metadata simply falls back to being
treated like a PV with no mdas; a common and harmless configuration.
When bad/old metadata exists, lvm warns the user about it, and
suggests repairing it using a new metadata repair command.
Bad metadata in particular is something that users will want to
investigate and repair themselves, since it should not happen and
may indicate some other problem that needs to be fixed.
PVs with bad/old metadata are not the same as missing devices.
Missing devices will block various kinds of VG modification or
activation, but bad/old metadata will not.
Previously, lvm would attempt to repair bad/old metadata whenever
it was read. This was unnecessary since lvm does not require every
copy of the metadata to be used. It would also hide potential
problems that should be investigated by the user. It was also
dangerous in cases where the VG was on shared storage. The user
is now allowed to investigate potential problems and decide how
and when to repair them.
Repairing bad/old metadata
--------------------------
When label scan sees bad metadata in an mda, that mda is removed
from the lvmcache info->mdas list. This means that vg_read will
skip it, and not attempt to read/process it again. If it was
the only in-use mda on a PV, that PV is treated like a PV with
no mdas. It also means that vg_write will also skip the bad mda,
and not attempt to write new metadata to it. The only way to
repair bad metadata is with the metadata repair command.
When label scan sees old metadata in an mda, that mda is kept
in the lvmcache info->mdas list. This means that vg_read will
read/process it again, and likely see the same mismatch with
the other copies of the metadata. Like the label_scan, the
vg_read will simply ignore the old copy of the metadata and
use the latest copy. If the command is modifying the vg
(e.g. lvcreate), then vg_write, which writes new metadata to
every mda on info->mdas, will write the new metadata to the
mda that had the old version. If successful, this will resolve
the old metadata problem (without needing to run a metadata
repair command.)
Outdated PVs
------------
An outdated PV is a PV that has an old copy of VG metadata
that shows it is a member of the VG, but the latest copy of
the VG metadata does not include this PV. This happens if
the PV is disconnected, vgreduce --removemissing is run to
remove the PV from the VG, then the PV is reconnected.
In this case, the outdated PV needs have its outdated metadata
removed and the PV used flag needs to be cleared. This repair
will be done by the subsequent repair command. It is also done
if vgremove is run on the VG.
MISSING PVs
-----------
When a device is missing, most commands will refuse to modify
the VG. This is the simple case. More complicated is when
a command is allowed to modify the VG while it is missing a
device.
When a VG is written while a device is missing for one of it's PVs,
the VG metadata is written to disk with the MISSING flag on the PV
with the missing device. When the VG is next used, it is treated
as if the PV with the MISSING flag still has a missing device, even
if that device has reappeared.
If all LVs that were using a PV with the MISSING flag are removed
or repaired so that the MISSING PV is no longer used, then the
next time the VG metadata is written, the MISSING flag will be
dropped.
Alternative methods of clearing the MISSING flag are:
vgreduce --removemissing will remove PVs with missing devices,
or PVs with the MISSING flag where the device has reappeared.
vgextend --restoremissing will clear the MISSING flag on PVs
where the device has reappeared, allowing the VG to be used
normally. This must be done with caution since the reappeared
device may have old data that is inconsistent with data on other PVs.
Bad mda repair
--------------
The new command:
vgck --updatemetadata VG
first uses vg_write to repair old metadata, and other basic
issues mentioned above (old metadata, outdated PVs, pv_header
flags, MISSING_PV flags). It will also go further and repair
bad metadata:
. text metadata that has a bad checksum
. text metadata that is not parsable
. corrupt mda_header checksum and version fields
(To keep a clean diff, #if 0 is added around functions that
are replaced by new code. These commented functions are
removed by the following commit.)
2019-05-24 20:04:37 +03:00
vg = vg_read ( cmd , id - > vg_name , NULL , 0 , lockd_state , & error_flags , NULL ) ;
if ( ! vg ) {
log_error ( " Can't reread VG for %s error flags %x " , id - > display_name , error_flags ) ;
2015-03-05 23:00:44 +03:00
ret = 0 ;
goto out_ret ;
2015-05-09 02:59:18 +03:00
}
2015-05-19 16:08:50 +03:00
lv = find_lv ( vg , id - > lv_name ) ;
if ( lv & & id - > uuid & & strcmp ( id - > uuid , ( char * ) & lv - > lvid ) )
lv = NULL ;
2015-10-19 17:56:45 +03:00
/*
* CONVERTING is set only during mirror upconversion but we may need to
* read LV ' s progress info even when it ' s not converting ( linear - > mirror )
*/
if ( lv & & ( parms - > lv_type ^ CONVERTING ) & & ! ( lv - > status & parms - > lv_type ) )
2015-05-19 16:08:50 +03:00
lv = NULL ;
2015-05-09 02:59:18 +03:00
if ( ! lv ) {
2015-05-21 11:17:29 +03:00
if ( parms - > lv_type = = PVMOVE )
log_verbose ( " %s: No pvmove in progress - already finished or aborted. " ,
id - > display_name ) ;
else
log_verbose ( " Can't find LV in %s for %s. Already finished or removed. " ,
vg - > name , id - > display_name ) ;
2015-03-05 23:00:44 +03:00
ret = 1 ;
2015-05-21 11:17:29 +03:00
goto out ;
2015-05-09 02:59:18 +03:00
}
2018-06-05 21:21:28 +03:00
if ( ! lv_is_active ( lv ) ) {
2015-05-21 11:17:29 +03:00
log_verbose ( " %s: Interrupted: No longer active. " , id - > display_name ) ;
2015-03-05 23:00:44 +03:00
ret = 1 ;
2015-05-21 11:17:29 +03:00
goto out ;
2015-05-09 02:59:18 +03:00
}
if ( parms - > poll_fns - > poll_progress ( cmd , lv , id - > display_name , parms ) = = PROGRESS_CHECK_FAILED ) {
2015-03-05 23:00:44 +03:00
ret = 0 ;
goto out ;
2015-05-09 02:59:18 +03:00
}
2018-05-07 12:46:09 +03:00
fflush ( stdout ) ;
2015-05-09 02:59:18 +03:00
2015-03-05 23:00:44 +03:00
ret = 1 ;
2015-05-21 11:17:29 +03:00
out :
2015-05-09 02:59:18 +03:00
unlock_and_release_vg ( cmd , vg , vg - > name ) ;
2015-03-05 23:00:44 +03:00
out_ret :
return ret ;
2015-05-09 02:59:18 +03:00
}
static int _lvmpolld_init_poll_vg ( struct cmd_context * cmd , const char * vgname ,
struct volume_group * vg , struct processing_handle * handle )
{
int r ;
struct lv_list * lvl ;
struct logical_volume * lv ;
struct poll_id_list * idl ;
struct poll_operation_id id ;
lvmpolld_parms_t * lpdp = ( lvmpolld_parms_t * ) handle - > custom_handle ;
dm_list_iterate_items ( lvl , & vg - > lvs ) {
lv = lvl - > lv ;
if ( ! ( lv - > status & lpdp - > parms - > lv_type ) )
continue ;
id . display_name = lpdp - > parms - > poll_fns - > get_copy_name_from_lv ( lv ) ;
if ( ! id . display_name & & ! lpdp - > parms - > aborting )
continue ;
2015-08-04 10:51:16 +03:00
id . vg_name = lv - > vg - > name ;
id . lv_name = lv - > name ;
2015-07-08 16:08:39 +03:00
if ( ! * lv - > lvid . s ) {
2015-05-09 02:59:18 +03:00
log_print_unless_silent ( " Missing LV uuid within: %s/%s " , id . vg_name , id . lv_name ) ;
continue ;
}
id . uuid = lv - > lvid . s ;
r = lvmpolld_poll_init ( cmd , & id , lpdp - > parms ) ;
if ( r & & ! lpdp - > parms - > background ) {
2017-10-18 17:57:46 +03:00
if ( ! ( idl = _poll_id_list_create ( cmd - > mem , & id ) ) )
2015-05-09 02:59:18 +03:00
return ECMD_FAILED ;
dm_list_add ( & lpdp - > idls , & idl - > list ) ;
}
}
return ECMD_PROCESSED ;
}
static void _lvmpolld_poll_for_all_vgs ( struct cmd_context * cmd ,
struct daemon_parms * parms ,
struct processing_handle * handle )
{
int r ;
struct dm_list * first ;
struct poll_id_list * idl , * tlv ;
unsigned finished ;
lvmpolld_parms_t lpdp = {
. parms = parms
} ;
dm_list_init ( & lpdp . idls ) ;
handle - > custom_handle = & lpdp ;
2016-05-03 12:46:28 +03:00
process_each_vg ( cmd , 0 , NULL , NULL , NULL , 0 , 0 , handle , _lvmpolld_init_poll_vg ) ;
2015-05-09 02:59:18 +03:00
first = dm_list_first ( & lpdp . idls ) ;
while ( ! dm_list_empty ( & lpdp . idls ) ) {
dm_list_iterate_items_safe ( idl , tlv , & lpdp . idls ) {
r = lvmpolld_request_info ( idl - > id , lpdp . parms ,
& finished ) ;
if ( ! r | | finished )
dm_list_del ( & idl - > list ) ;
else if ( ! parms - > aborting )
2017-10-18 17:57:46 +03:00
_report_progress ( cmd , idl - > id , lpdp . parms ) ;
2015-05-09 02:59:18 +03:00
}
_nanosleep ( lpdp . parms - > interval , 0 ) ;
}
if ( first )
dm_pool_free ( cmd - > mem , dm_list_item ( first , struct poll_id_list ) ) ;
}
static int _lvmpoll_daemon ( struct cmd_context * cmd , struct poll_operation_id * id ,
struct daemon_parms * parms )
{
int r ;
struct processing_handle * handle = NULL ;
unsigned finished = 0 ;
if ( parms - > aborting )
parms - > interval = 0 ;
if ( id ) {
r = lvmpolld_poll_init ( cmd , id , parms ) ;
if ( r & & ! parms - > background ) {
while ( 1 ) {
if ( ! ( r = lvmpolld_request_info ( id , parms , & finished ) ) | |
finished | |
2017-10-18 17:57:46 +03:00
( ! parms - > aborting & & ! ( r = _report_progress ( cmd , id , parms ) ) ) )
2015-05-09 02:59:18 +03:00
break ;
_nanosleep ( parms - > interval , 0 ) ;
}
}
return r ? ECMD_PROCESSED : ECMD_FAILED ;
}
2017-07-19 17:16:12 +03:00
/* process all in-flight operations */
if ( ! ( handle = init_processing_handle ( cmd , NULL ) ) ) {
log_error ( " Failed to initialize processing handle. " ) ;
return ECMD_FAILED ;
}
_lvmpolld_poll_for_all_vgs ( cmd , parms , handle ) ;
destroy_processing_handle ( cmd , handle ) ;
return ECMD_PROCESSED ;
2015-05-09 02:59:18 +03:00
}
# else
# define _lvmpoll_daemon(cmd, id, parms) (ECMD_FAILED)
# endif /* LVMPOLLD_SUPPORT */
2010-01-11 22:19:17 +03:00
/*
* Only allow * one * return from poll_daemon ( ) ( the parent ) .
* If there is a child it must exit ( ignoring the memory leak messages ) .
* - ' background ' is advisory so a child polldaemon may not be used even
* if it was requested .
*/
2015-04-10 15:08:19 +03:00
static int _poll_daemon ( struct cmd_context * cmd , struct poll_operation_id * id ,
struct daemon_parms * parms )
2004-05-05 21:56:20 +04:00
{
2015-02-13 12:36:06 +03:00
struct processing_handle * handle = NULL ;
2010-01-11 22:19:17 +03:00
int daemon_mode = 0 ;
int ret = ECMD_PROCESSED ;
2004-05-05 21:56:20 +04:00
2015-03-17 20:40:43 +03:00
if ( parms - > background ) {
2013-09-03 18:06:16 +04:00
daemon_mode = become_daemon ( cmd , 0 ) ;
2010-01-11 22:19:17 +03:00
if ( daemon_mode = = 0 )
return ECMD_PROCESSED ; /* Parent */
2017-07-19 17:16:12 +03:00
if ( daemon_mode = = 1 )
2015-03-17 20:40:43 +03:00
parms - > progress_display = 0 ; /* Child */
2004-05-05 21:56:20 +04:00
/* FIXME Use wait_event (i.e. interval = 0) and */
/* fork one daemon per copy? */
}
2009-09-29 23:35:26 +04:00
/*
* Process one specific task or all incomplete tasks ?
*/
2018-04-26 00:23:55 +03:00
/* clear lvmcache/bcache/fds from the parent */
lvmcache_destroy ( cmd , 1 , 0 ) ;
label_scan_destroy ( cmd ) ;
2015-04-10 15:08:19 +03:00
if ( id ) {
2015-04-10 17:36:50 +03:00
if ( ! wait_for_single_lv ( cmd , id , parms ) ) {
2009-09-15 02:47:49 +04:00
stack ;
2010-01-11 22:19:17 +03:00
ret = ECMD_FAILED ;
2009-09-15 02:47:49 +04:00
}
2015-02-13 12:36:06 +03:00
} else {
2015-03-17 20:44:25 +03:00
if ( ! parms - > interval )
parms - > interval = find_config_tree_int ( cmd , activation_polling_interval_CFG , NULL ) ;
2018-04-26 00:23:55 +03:00
2016-05-31 13:24:05 +03:00
if ( ! ( handle = init_processing_handle ( cmd , NULL ) ) ) {
2015-02-13 12:36:06 +03:00
log_error ( " Failed to initialize processing handle. " ) ;
ret = ECMD_FAILED ;
} else {
2015-03-17 20:40:43 +03:00
handle - > custom_handle = parms ;
2015-02-13 12:36:06 +03:00
_poll_for_all_vgs ( cmd , handle ) ;
}
}
2004-05-05 21:56:20 +04:00
2015-03-17 20:40:43 +03:00
if ( parms - > background & & daemon_mode = = 1 ) {
2015-02-13 12:42:21 +03:00
destroy_processing_handle ( cmd , handle ) ;
2010-01-11 22:19:17 +03:00
/*
* child was successfully forked :
* background polldaemon must not return to the caller
* because it will redundantly continue performing the
* caller ' s task ( that the parent already performed )
*/
/* FIXME Attempt proper cleanup */
_exit ( lvm_return_code ( ret ) ) ;
}
2015-02-13 12:42:21 +03:00
destroy_processing_handle ( cmd , handle ) ;
2010-01-11 22:19:17 +03:00
return ret ;
2004-05-05 21:56:20 +04:00
}
2015-03-17 20:31:41 +03:00
2015-03-17 20:40:43 +03:00
static int _daemon_parms_init ( struct cmd_context * cmd , struct daemon_parms * parms ,
2024-05-03 15:01:59 +03:00
unsigned background , const struct poll_functions * poll_fns ,
2015-03-17 20:40:43 +03:00
const char * progress_title , uint64_t lv_type )
{
sign_t interval_sign ;
parms - > aborting = arg_is_set ( cmd , abort_ARG ) ;
parms - > background = background ;
interval_sign = arg_sign_value ( cmd , interval_ARG , SIGN_NONE ) ;
if ( interval_sign = = SIGN_MINUS ) {
log_error ( " Argument to --interval cannot be negative. " ) ;
return 0 ;
}
parms - > interval = arg_uint_value ( cmd , interval_ARG ,
find_config_tree_int ( cmd , activation_polling_interval_CFG , NULL ) ) ;
parms - > wait_before_testing = ( interval_sign = = SIGN_PLUS ) ;
parms - > progress_title = progress_title ;
parms - > lv_type = lv_type ;
parms - > poll_fns = poll_fns ;
if ( parms - > interval & & ! parms - > aborting )
log_verbose ( " Checking progress %s waiting every %u seconds. " ,
( parms - > wait_before_testing ? " after " : " before " ) ,
parms - > interval ) ;
2015-03-17 20:44:25 +03:00
parms - > progress_display = parms - > interval ? 1 : 0 ;
device usage based on devices file
The LVM devices file lists devices that lvm can use. The default
file is /etc/lvm/devices/system.devices, and the lvmdevices(8)
command is used to add or remove device entries. If the file
does not exist, or if lvm.conf includes use_devicesfile=0, then
lvm will not use a devices file. When the devices file is in use,
the regex filter is not used, and the filter settings in lvm.conf
or on the command line are ignored.
LVM records devices in the devices file using hardware-specific
IDs, such as the WWID, and attempts to use subsystem-specific
IDs for virtual device types. These device IDs are also written
in the VG metadata. When no hardware or virtual ID is available,
lvm falls back using the unstable device name as the device ID.
When devnames are used, lvm performs extra scanning to find
devices if their devname changes, e.g. after reboot.
When proper device IDs are used, an lvm command will not look
at devices outside the devices file, but when devnames are used
as a fallback, lvm will scan devices outside the devices file
to locate PVs on renamed devices. A config setting
search_for_devnames can be used to control the scanning for
renamed devname entries.
Related to the devices file, the new command option
--devices <devnames> allows a list of devices to be specified for
the command to use, overriding the devices file. The listed
devices act as a sort of devices file in terms of limiting which
devices lvm will see and use. Devices that are not listed will
appear to be missing to the lvm command.
Multiple devices files can be kept in /etc/lvm/devices, which
allows lvm to be used with different sets of devices, e.g.
system devices do not need to be exposed to a specific application,
and the application can use lvm on its own set of devices that are
not exposed to the system. The option --devicesfile <filename> is
used to select the devices file to use with the command. Without
the option set, the default system devices file is used.
Setting --devicesfile "" causes lvm to not use a devices file.
An existing, empty devices file means lvm will see no devices.
The new command vgimportdevices adds PVs from a VG to the devices
file and updates the VG metadata to include the device IDs.
vgimportdevices -a will import all VGs into the system devices file.
LVM commands run by dmeventd not use a devices file by default,
and will look at all devices on the system. A devices file can
be created for dmeventd (/etc/lvm/devices/dmeventd.devices) If
this file exists, lvm commands run by dmeventd will use it.
Internal implementaion:
- device_ids_read - read the devices file
. add struct dev_use (du) to cmd->use_devices for each devices file entry
- dev_cache_scan - get /dev entries
. add struct device (dev) to dev_cache for each device on the system
- device_ids_match - match devices file entries to /dev entries
. match each du on cmd->use_devices to a dev in dev_cache, using device ID
. on match, set du->dev, dev->id, dev->flags MATCHED_USE_ID
- label_scan - read lvm headers and metadata from devices
. filters are applied, those that do not need data from the device
. filter-deviceid skips devs without MATCHED_USE_ID, i.e.
skips /dev entries that are not listed in the devices file
. read lvm label from dev
. filters are applied, those that use data from the device
. read lvm metadata from dev
. add info/vginfo structs for PVs/VGs (info is "lvmcache")
- device_ids_find_renamed_devs - handle devices with unstable devname ID
where devname changed
. this step only needed when devs do not have proper device IDs,
and their dev names change, e.g. after reboot sdb becomes sdc.
. detect incorrect match because PVID in the devices file entry
does not match the PVID found when the device was read above
. undo incorrect match between du and dev above
. search system devices for new location of PVID
. update devices file with new devnames for PVIDs on renamed devices
. label_scan the renamed devs
- continue with command processing
2020-06-23 21:25:41 +03:00
memset ( parms - > devicesfile , 0 , sizeof ( parms - > devicesfile ) ) ;
if ( cmd - > devicesfile ) {
2024-04-09 12:43:14 +03:00
if ( ! _dm_strncpy ( parms - > devicesfile , cmd - > devicesfile ,
sizeof ( parms - > devicesfile ) ) ) {
device usage based on devices file
The LVM devices file lists devices that lvm can use. The default
file is /etc/lvm/devices/system.devices, and the lvmdevices(8)
command is used to add or remove device entries. If the file
does not exist, or if lvm.conf includes use_devicesfile=0, then
lvm will not use a devices file. When the devices file is in use,
the regex filter is not used, and the filter settings in lvm.conf
or on the command line are ignored.
LVM records devices in the devices file using hardware-specific
IDs, such as the WWID, and attempts to use subsystem-specific
IDs for virtual device types. These device IDs are also written
in the VG metadata. When no hardware or virtual ID is available,
lvm falls back using the unstable device name as the device ID.
When devnames are used, lvm performs extra scanning to find
devices if their devname changes, e.g. after reboot.
When proper device IDs are used, an lvm command will not look
at devices outside the devices file, but when devnames are used
as a fallback, lvm will scan devices outside the devices file
to locate PVs on renamed devices. A config setting
search_for_devnames can be used to control the scanning for
renamed devname entries.
Related to the devices file, the new command option
--devices <devnames> allows a list of devices to be specified for
the command to use, overriding the devices file. The listed
devices act as a sort of devices file in terms of limiting which
devices lvm will see and use. Devices that are not listed will
appear to be missing to the lvm command.
Multiple devices files can be kept in /etc/lvm/devices, which
allows lvm to be used with different sets of devices, e.g.
system devices do not need to be exposed to a specific application,
and the application can use lvm on its own set of devices that are
not exposed to the system. The option --devicesfile <filename> is
used to select the devices file to use with the command. Without
the option set, the default system devices file is used.
Setting --devicesfile "" causes lvm to not use a devices file.
An existing, empty devices file means lvm will see no devices.
The new command vgimportdevices adds PVs from a VG to the devices
file and updates the VG metadata to include the device IDs.
vgimportdevices -a will import all VGs into the system devices file.
LVM commands run by dmeventd not use a devices file by default,
and will look at all devices on the system. A devices file can
be created for dmeventd (/etc/lvm/devices/dmeventd.devices) If
this file exists, lvm commands run by dmeventd will use it.
Internal implementaion:
- device_ids_read - read the devices file
. add struct dev_use (du) to cmd->use_devices for each devices file entry
- dev_cache_scan - get /dev entries
. add struct device (dev) to dev_cache for each device on the system
- device_ids_match - match devices file entries to /dev entries
. match each du on cmd->use_devices to a dev in dev_cache, using device ID
. on match, set du->dev, dev->id, dev->flags MATCHED_USE_ID
- label_scan - read lvm headers and metadata from devices
. filters are applied, those that do not need data from the device
. filter-deviceid skips devs without MATCHED_USE_ID, i.e.
skips /dev entries that are not listed in the devices file
. read lvm label from dev
. filters are applied, those that use data from the device
. read lvm metadata from dev
. add info/vginfo structs for PVs/VGs (info is "lvmcache")
- device_ids_find_renamed_devs - handle devices with unstable devname ID
where devname changed
. this step only needed when devs do not have proper device IDs,
and their dev names change, e.g. after reboot sdb becomes sdc.
. detect incorrect match because PVID in the devices file entry
does not match the PVID found when the device was read above
. undo incorrect match between du and dev above
. search system devices for new location of PVID
. update devices file with new devnames for PVIDs on renamed devices
. label_scan the renamed devs
- continue with command processing
2020-06-23 21:25:41 +03:00
log_error ( " devicefile name too long for lvmpolld " ) ;
return 0 ;
}
}
2015-03-17 20:40:43 +03:00
return 1 ;
}
2015-04-10 15:08:19 +03:00
int poll_daemon ( struct cmd_context * cmd , unsigned background ,
2024-05-03 15:01:59 +03:00
uint64_t lv_type , const struct poll_functions * poll_fns ,
2015-04-10 15:08:19 +03:00
const char * progress_title , struct poll_operation_id * id )
2015-03-17 20:31:41 +03:00
{
2015-03-17 20:40:43 +03:00
struct daemon_parms parms ;
if ( ! _daemon_parms_init ( cmd , & parms , background , poll_fns , progress_title , lv_type ) )
return_EINVALID_CMD_LINE ;
2015-05-09 02:59:18 +03:00
if ( lvmpolld_use ( ) )
return _lvmpoll_daemon ( cmd , id , & parms ) ;
2017-07-19 17:16:12 +03:00
/* classical polling allows only PMVOVE or 0 values */
parms . lv_type & = PVMOVE ;
return _poll_daemon ( cmd , id , & parms ) ;
2015-03-17 20:31:41 +03:00
}