2002-11-18 16:53:58 +03:00
/*
2008-01-30 17:00:02 +03:00
* Copyright ( C ) 2001 - 2004 Sistina Software , Inc . All rights reserved .
2011-08-11 21:24:23 +04:00
* Copyright ( C ) 2004 - 2011 Red Hat , Inc . All rights reserved .
2002-11-18 16:53:58 +03:00
*
2004-03-30 23:35:44 +04:00
* This file is part of LVM2 .
*
* This copyrighted material is made available to anyone wishing to use ,
* modify , copy , or redistribute it subject to the terms and conditions
2007-08-21 00:55:30 +04:00
* of the GNU Lesser General Public License v .2 .1 .
2004-03-30 23:35:44 +04:00
*
2007-08-21 00:55:30 +04:00
* You should have received a copy of the GNU Lesser General Public License
2004-03-30 23:35:44 +04:00
* along with this program ; if not , write to the Free Software Foundation ,
2016-01-21 13:49:46 +03:00
* Inc . , 51 Franklin Street , Fifth Floor , Boston , MA 02110 - 1301 USA
2002-11-18 16:53:58 +03:00
*/
2018-06-08 15:40:53 +03:00
# include "base/memory/zalloc.h"
2018-05-14 12:30:20 +03:00
# include "lib/misc/lib.h"
# include "lib/cache/lvmcache.h"
# include "lib/commands/toolcontext.h"
# include "lib/device/dev-cache.h"
# include "lib/locking/locking.h"
# include "lib/metadata/metadata.h"
# include "lib/mm/memlock.h"
# include "lib/datastruct/str_list.h"
# include "lib/format_text/format-text.h"
# include "lib/config/config.h"
2003-01-07 00:10:43 +03:00
2012-02-10 05:28:27 +04:00
/* One per device */
struct lvmcache_info {
struct dm_list list ; /* Join VG members together */
struct dm_list mdas ; /* list head for metadata areas */
struct dm_list das ; /* list head for data areas */
2013-05-28 14:37:22 +04:00
struct dm_list bas ; /* list head for bootloader areas */
2019-02-05 21:39:08 +03:00
struct dm_list bad_mdas ; /* list head for bad metadata areas */
2012-02-10 05:28:27 +04:00
struct lvmcache_vginfo * vginfo ; /* NULL == unknown */
struct label * label ;
const struct format_type * fmt ;
struct device * dev ;
uint64_t device_size ; /* Bytes */
2016-02-11 18:25:36 +03:00
uint32_t ext_version ; /* Extension version */
2015-03-09 14:52:07 +03:00
uint32_t ext_flags ; /* Extension flags */
2012-02-10 05:28:27 +04:00
uint32_t status ;
2019-02-05 21:39:08 +03:00
bool mda1_bad ; /* label scan found bad metadata in mda1 */
bool mda2_bad ; /* label scan found bad metadata in mda2 */
2019-02-05 22:09:56 +03:00
bool summary_seqno_mismatch ; /* two mdas on this dev has mismatching metadata */
int summary_seqno ; /* vg seqno found on this dev during scan */
int mda1_seqno ;
int mda2_seqno ;
2012-02-10 05:28:27 +04:00
} ;
/* One per VG */
struct lvmcache_vginfo {
struct dm_list list ; /* Join these vginfos together */
struct dm_list infos ; /* List head for lvmcache_infos */
2019-02-05 21:55:51 +03:00
struct dm_list outdated_infos ; /* vg_read moves info from infos to outdated_infos */
2012-02-10 05:28:27 +04:00
const struct format_type * fmt ;
char * vgname ; /* "" == orphan */
uint32_t status ;
char vgid [ ID_LEN + 1 ] ;
char _padding [ 7 ] ;
struct lvmcache_vginfo * next ; /* Another VG with same name? */
char * creation_host ;
2015-11-30 20:32:17 +03:00
char * system_id ;
2015-07-24 23:20:37 +03:00
char * lock_type ;
2015-03-19 02:43:02 +03:00
uint32_t mda_checksum ;
size_t mda_size ;
2018-04-19 00:29:42 +03:00
int seqno ;
2019-02-05 22:09:56 +03:00
bool scan_summary_mismatch ; /* vgsummary from devs had mismatching seqno or checksum */
2018-05-03 00:58:49 +03:00
} ;
lvmcache: simplify metadata cache
The copy of VG metadata stored in lvmcache was not being used
in general. It pretended to be a generic VG metadata cache,
but was not being used except for clvmd activation. There
it was used to avoid reading from disk while devices were
suspended, i.e. in resume.
This removes the code that attempted to make this look
like a generic metadata cache, and replaces with with
something narrowly targetted to what it's actually used for.
This is a way of passing the VG from suspend to resume in
clvmd. Since in the case of clvmd one caller can't simply
pass the same VG to both suspend and resume, suspend needs
to stash the VG somewhere that resume can grab it from.
(resume doesn't want to read it from disk since devices
are suspended.) The lvmcache vginfo struct is used as a
convenient place to stash the VG to pass it from suspend
to resume, even though it isn't related to the lvmcache
or vginfo. These suspended_vg* vginfo fields should
not be used or touched anywhere else, they are only to
be used for passing the VG data from suspend to resume
in clvmd. The VG data being passed between suspend and
resume is never modified, and will only exist in the
brief period between suspend and resume in clvmd.
suspend has both old (current) and new (precommitted)
copies of the VG metadata. It stashes both of these in
the vginfo prior to suspending devices. When vg_commit
is successful, it sets a flag in vginfo as before,
signaling the transition from old to new metadata.
resume grabs the VG stashed by suspend. If the vg_commit
happened, it grabs the new VG, and if the vg_commit didn't
happen it grabs the old VG. The VG is then used to resume
LVs.
This isolates clvmd-specific code and usage from the
normal lvm vg_read code, making the code simpler and
the behavior easier to verify.
Sequence of operations:
- lv_suspend() has both vg_old and vg_new
and stashes a copy of each onto the vginfo:
lvmcache_save_suspended_vg(vg_old);
lvmcache_save_suspended_vg(vg_new);
- vg_commit() happens, which causes all clvmd
instances to call lvmcache_commit_metadata(vg).
A flag is set in the vginfo indicating the
transition from the old to new VG:
vginfo->suspended_vg_committed = 1;
- lv_resume() needs either vg_old or vg_new
to use in resuming LVs. It doesn't want to
read the VG from disk since devices are
suspended, so it gets the VG stashed by
lv_suspend:
vg = lvmcache_get_suspended_vg(vgid);
If the vg_commit did not happen, suspended_vg_committed
will not be set, and in this case, lvmcache_get_suspended_vg()
will return the old VG instead of the new VG, and it will
resume LVs based on the old metadata.
2017-11-01 17:35:40 +03:00
2005-10-17 03:03:59 +04:00
static struct dm_hash_table * _pvid_hash = NULL ;
static struct dm_hash_table * _vgid_hash = NULL ;
static struct dm_hash_table * _vgname_hash = NULL ;
2010-04-30 16:54:31 +04:00
static DM_LIST_INIT ( _vginfos ) ;
lvmcache: improve duplicate PV handling
Wait to compare and choose alternate duplicate devices until
after all devices are scanned. During scanning, the first
duplicate dev is kept in lvmcache, and others are kept in a
new list (_found_duplicate_devs).
After all devices are scanned, compare all the duplicates
available for a given PVID and decide which is best.
If the dev used in lvmcache is changed, drop the old dev
from lvmcache entirely and rescan the replacement dev.
Previously the VG metadata from the old dev was kept in
lvmcache and only the dev was replaced.
A new config setting devices/allow_changes_with_duplicate_pvs
can be set to 0 which disallows modifying a VG or activating
LVs in it when the VG contains PVs with duplicate devices.
Set to 1 is the old behavior which allowed the VG to be
changed.
The logic for which of two devs is preferred has changed.
The primary goal is to choose a device that is currently
in use if the other isn't, e.g. by an active LV.
. prefer dev with fs mounted if the other doesn't, else
. prefer dev that is dm if the other isn't, else
. prefer dev in subsystem if the other isn't
If neither device is preferred by these rules, then don't
change devices in lvmcache, leaving the one that was found
first.
The previous logic for preferring a device was:
. prefer dev in subsystem if the other isn't, else
. prefer dev without holders if the other has holders, else
. prefer dev that is dm if the other isn't
2016-02-09 22:06:27 +03:00
static DM_LIST_INIT ( _found_duplicate_devs ) ;
static DM_LIST_INIT ( _unused_duplicate_devs ) ;
2019-06-07 18:12:52 +03:00
static DM_LIST_INIT ( _prev_unused_duplicate_devs ) ;
2003-07-05 02:34:56 +04:00
static int _vgs_locked = 0 ;
2015-01-14 23:38:05 +03:00
static int _found_duplicate_pvs = 0 ; /* If we never see a duplicate PV we can skip checking for them later. */
2018-12-07 23:35:22 +03:00
static int _found_duplicate_vgnames = 0 ;
2002-11-18 16:53:58 +03:00
2018-05-03 00:58:49 +03:00
int lvmcache_init ( struct cmd_context * cmd )
2002-11-18 16:53:58 +03:00
{
2010-01-11 22:08:18 +03:00
/*
* FIXME add a proper lvmcache_locking_reset ( ) that
* resets the cache so no previous locks are locked
*/
_vgs_locked = 0 ;
2008-11-04 01:14:30 +03:00
dm_list_init ( & _vginfos ) ;
2016-06-06 23:20:55 +03:00
dm_list_init ( & _found_duplicate_devs ) ;
dm_list_init ( & _unused_duplicate_devs ) ;
2019-06-07 18:12:52 +03:00
dm_list_init ( & _prev_unused_duplicate_devs ) ;
2002-11-18 16:53:58 +03:00
2005-10-17 03:03:59 +04:00
if ( ! ( _vgname_hash = dm_hash_create ( 128 ) ) )
2002-11-18 16:53:58 +03:00
return 0 ;
2005-10-17 03:03:59 +04:00
if ( ! ( _vgid_hash = dm_hash_create ( 128 ) ) )
2002-11-18 16:53:58 +03:00
return 0 ;
2005-10-17 03:03:59 +04:00
if ( ! ( _pvid_hash = dm_hash_create ( 128 ) ) )
2002-11-18 16:53:58 +03:00
return 0 ;
return 1 ;
}
2010-07-09 19:34:40 +04:00
void lvmcache_lock_vgname ( const char * vgname , int read_only __attribute__ ( ( unused ) ) )
2002-11-18 16:53:58 +03:00
{
locking: unify global lock for flock and lockd
There have been two file locks used to protect lvm
"global state": "ORPHANS" and "GLOBAL".
Commands that used the ORPHAN flock in exclusive mode:
pvcreate, pvremove, vgcreate, vgextend, vgremove,
vgcfgrestore
Commands that used the ORPHAN flock in shared mode:
vgimportclone, pvs, pvscan, pvresize, pvmove,
pvdisplay, pvchange, fullreport
Commands that used the GLOBAL flock in exclusive mode:
pvchange, pvscan, vgimportclone, vgscan
Commands that used the GLOBAL flock in shared mode:
pvscan --cache, pvs
The ORPHAN lock covers the important cases of serializing
the use of orphan PVs. It also partially covers the
reporting of orphan PVs (although not correctly as
explained below.)
The GLOBAL lock doesn't seem to have a clear purpose
(it may have eroded over time.)
Neither lock correctly protects the VG namespace, or
orphan PV properties.
To simplify and correct these issues, the two separate
flocks are combined into the one GLOBAL flock, and this flock
is used from the locking sites that are in place for the
lvmlockd global lock.
The logic behind the lvmlockd (distributed) global lock is
that any command that changes "global state" needs to take
the global lock in ex mode. Global state in lvm is: the list
of VG names, the set of orphan PVs, and any properties of
orphan PVs. Reading this global state can use the global lock
in sh mode to ensure it doesn't change while being reported.
The locking of global state now looks like:
lockd_global()
previously named lockd_gl(), acquires the distributed
global lock through lvmlockd. This is unchanged.
It serializes distributed lvm commands that are changing
global state. This is a no-op when lvmlockd is not in use.
lockf_global()
acquires an flock on a local file. It serializes local lvm
commands that are changing global state.
lock_global()
first calls lockf_global() to acquire the local flock for
global state, and if this succeeds, it calls lockd_global()
to acquire the distributed lock for global state.
Replace instances of lockd_gl() with lock_global(), so that the
existing sites for lvmlockd global state locking are now also
used for local file locking of global state. Remove the previous
file locking calls lock_vol(GLOBAL) and lock_vol(ORPHAN).
The following commands which change global state are now
serialized with the exclusive global flock:
pvchange (of orphan), pvresize (of orphan), pvcreate, pvremove,
vgcreate, vgextend, vgremove, vgreduce, vgrename,
vgcfgrestore, vgimportclone, vgmerge, vgsplit
Commands that use a shared flock to read global state (and will
be serialized against the prior list) are those that use
process_each functions that are based on processing a list of
all VG names, or all PVs. The list of all VGs or all PVs is
global state and the shared lock prevents those lists from
changing while the command is processing them.
The ORPHAN lock previously attempted to produce an accurate
listing of orphan PVs, but it was only acquired at the end of
the command during the fake vg_read of the fake orphan vg.
This is not when orphan PVs were determined; they were
determined by elimination beforehand by processing all real
VGs, and subtracting the PVs in the real VGs from the list
of all PVs that had been identified during the initial scan.
This is fixed by holding the single global lock in shared mode
while processing all VGs to determine the list of orphan PVs.
2019-04-18 23:01:19 +03:00
_vgs_locked + + ;
2003-07-05 02:34:56 +04:00
}
void lvmcache_unlock_vgname ( const char * vgname )
{
/* FIXME Do this per-VG */
locking: unify global lock for flock and lockd
There have been two file locks used to protect lvm
"global state": "ORPHANS" and "GLOBAL".
Commands that used the ORPHAN flock in exclusive mode:
pvcreate, pvremove, vgcreate, vgextend, vgremove,
vgcfgrestore
Commands that used the ORPHAN flock in shared mode:
vgimportclone, pvs, pvscan, pvresize, pvmove,
pvdisplay, pvchange, fullreport
Commands that used the GLOBAL flock in exclusive mode:
pvchange, pvscan, vgimportclone, vgscan
Commands that used the GLOBAL flock in shared mode:
pvscan --cache, pvs
The ORPHAN lock covers the important cases of serializing
the use of orphan PVs. It also partially covers the
reporting of orphan PVs (although not correctly as
explained below.)
The GLOBAL lock doesn't seem to have a clear purpose
(it may have eroded over time.)
Neither lock correctly protects the VG namespace, or
orphan PV properties.
To simplify and correct these issues, the two separate
flocks are combined into the one GLOBAL flock, and this flock
is used from the locking sites that are in place for the
lvmlockd global lock.
The logic behind the lvmlockd (distributed) global lock is
that any command that changes "global state" needs to take
the global lock in ex mode. Global state in lvm is: the list
of VG names, the set of orphan PVs, and any properties of
orphan PVs. Reading this global state can use the global lock
in sh mode to ensure it doesn't change while being reported.
The locking of global state now looks like:
lockd_global()
previously named lockd_gl(), acquires the distributed
global lock through lvmlockd. This is unchanged.
It serializes distributed lvm commands that are changing
global state. This is a no-op when lvmlockd is not in use.
lockf_global()
acquires an flock on a local file. It serializes local lvm
commands that are changing global state.
lock_global()
first calls lockf_global() to acquire the local flock for
global state, and if this succeeds, it calls lockd_global()
to acquire the distributed lock for global state.
Replace instances of lockd_gl() with lock_global(), so that the
existing sites for lvmlockd global state locking are now also
used for local file locking of global state. Remove the previous
file locking calls lock_vol(GLOBAL) and lock_vol(ORPHAN).
The following commands which change global state are now
serialized with the exclusive global flock:
pvchange (of orphan), pvresize (of orphan), pvcreate, pvremove,
vgcreate, vgextend, vgremove, vgreduce, vgrename,
vgcfgrestore, vgimportclone, vgmerge, vgsplit
Commands that use a shared flock to read global state (and will
be serialized against the prior list) are those that use
process_each functions that are based on processing a list of
all VG names, or all PVs. The list of all VGs or all PVs is
global state and the shared lock prevents those lists from
changing while the command is processing them.
The ORPHAN lock previously attempted to produce an accurate
listing of orphan PVs, but it was only acquired at the end of
the command during the fake vg_read of the fake orphan vg.
This is not when orphan PVs were determined; they were
determined by elimination beforehand by processing all real
VGs, and subtracting the PVs in the real VGs from the list
of all PVs that had been identified during the initial scan.
This is fixed by holding the single global lock in shared mode
while processing all VGs to determine the list of orphan PVs.
2019-04-18 23:01:19 +03:00
if ( ! - - _vgs_locked ) {
2016-01-22 13:13:00 +03:00
dev_size_seqno_inc ( ) ; /* invalidate all cached dev sizes */
}
2003-07-05 02:34:56 +04:00
}
2012-02-10 05:28:27 +04:00
int lvmcache_vgs_locked ( void )
2003-07-05 02:34:56 +04:00
{
return _vgs_locked ;
}
2015-01-14 23:38:05 +03:00
/*
* When lvmcache sees a duplicate PV , this is set .
* process_each_pv ( ) can avoid searching for duplicates
* by checking this and seeing that no duplicate PVs exist .
lvmcache: improve duplicate PV handling
Wait to compare and choose alternate duplicate devices until
after all devices are scanned. During scanning, the first
duplicate dev is kept in lvmcache, and others are kept in a
new list (_found_duplicate_devs).
After all devices are scanned, compare all the duplicates
available for a given PVID and decide which is best.
If the dev used in lvmcache is changed, drop the old dev
from lvmcache entirely and rescan the replacement dev.
Previously the VG metadata from the old dev was kept in
lvmcache and only the dev was replaced.
A new config setting devices/allow_changes_with_duplicate_pvs
can be set to 0 which disallows modifying a VG or activating
LVs in it when the VG contains PVs with duplicate devices.
Set to 1 is the old behavior which allowed the VG to be
changed.
The logic for which of two devs is preferred has changed.
The primary goal is to choose a device that is currently
in use if the other isn't, e.g. by an active LV.
. prefer dev with fs mounted if the other doesn't, else
. prefer dev that is dm if the other isn't, else
. prefer dev in subsystem if the other isn't
If neither device is preferred by these rules, then don't
change devices in lvmcache, leaving the one that was found
first.
The previous logic for preferring a device was:
. prefer dev in subsystem if the other isn't, else
. prefer dev without holders if the other has holders, else
. prefer dev that is dm if the other isn't
2016-02-09 22:06:27 +03:00
*
*
* found_duplicate_pvs tells the process_each_pv code
* to search the devices list for duplicates , so that
* devices can be processed together with their
* duplicates ( while processing the VG , rather than
* reporting pv - > dev under the VG , and its duplicate
* outside the VG context . )
2015-01-14 23:38:05 +03:00
*/
int lvmcache_found_duplicate_pvs ( void )
{
return _found_duplicate_pvs ;
}
2018-12-07 23:35:22 +03:00
int lvmcache_found_duplicate_vgnames ( void )
{
return _found_duplicate_vgnames ;
}
lvmcache: process duplicate PVs directly
Previously, duplicate PVs were processed as a side effect
of processing the "chosen" PV in lvmcache. The duplicate
PV would be hacked into lvmcache temporarily in place of
the chosen PV.
In the old way, we had to always process the "chosen" PV
device, even if a duplicate of it was named on the command
line. This meant we were processing a different device than
was asked for. This could be worked around by naming
multiple duplicate devs on the command line in which case
they were swapped in and out of lvmcache for processing.
Now, the duplicate devs are processed directly in their
own processing loop. This means we can remove the old
hacks related to processing dups as a side effect of
processing the chosen device. We can now simply process
the device that was named on the command line.
When the same PVID exists on two or more devices, one device
is preferred and used in the VG, and the others are duplicates
and are not used in the VG. The preferred device exists in
lvmcache as usual. The duplicates exist in a specical list
of unused duplicate devices.
The duplicate devs have the "d" attribute and the "duplicate"
reporting field displays "duplicate" for them.
'pvs' warns about duplicates, but the formal output only
includes the single preferred PV.
'pvs -a' has the same warnings, and the duplicate devs are
included in the output.
'pvs <path>' has the same warnings, and displays the named
device, whether it is preferred or a duplicate.
2016-02-11 21:37:36 +03:00
int lvmcache_get_unused_duplicate_devs ( struct cmd_context * cmd , struct dm_list * head )
{
struct device_list * devl , * devl2 ;
dm_list_iterate_items ( devl , & _unused_duplicate_devs ) {
if ( ! ( devl2 = dm_pool_alloc ( cmd - > mem , sizeof ( * devl2 ) ) ) ) {
log_error ( " device_list element allocation failed " ) ;
return 0 ;
}
devl2 - > dev = devl - > dev ;
dm_list_add ( head , & devl2 - > list ) ;
}
return 1 ;
}
2016-05-13 00:19:57 +03:00
void lvmcache_remove_unchosen_duplicate ( struct device * dev )
{
struct device_list * devl ;
dm_list_iterate_items ( devl , & _unused_duplicate_devs ) {
if ( devl - > dev = = dev ) {
dm_list_del ( & devl - > list ) ;
return ;
}
}
}
2016-06-06 23:20:55 +03:00
static void _destroy_duplicate_device_list ( struct dm_list * head )
{
struct device_list * devl , * devl2 ;
dm_list_iterate_items_safe ( devl , devl2 , head ) {
dm_list_del ( & devl - > list ) ;
2018-06-08 15:40:53 +03:00
free ( devl ) ;
2016-06-06 23:20:55 +03:00
}
dm_list_init ( head ) ;
}
2016-05-13 00:19:57 +03:00
2019-02-05 21:39:08 +03:00
bool lvmcache_has_bad_metadata ( struct device * dev )
{
struct lvmcache_info * info ;
if ( ! ( info = lvmcache_info_from_pvid ( dev - > pvid , dev , 0 ) ) ) {
/* shouldn't happen */
log_error ( " No lvmcache info for checking bad metadata on %s " , dev_name ( dev ) ) ;
return false ;
}
if ( info - > mda1_bad | | info - > mda2_bad )
return true ;
return false ;
}
void lvmcache_save_bad_mda ( struct lvmcache_info * info , struct metadata_area * mda )
{
if ( mda - > mda_num = = 1 )
info - > mda1_bad = true ;
else if ( mda - > mda_num = = 2 )
info - > mda2_bad = true ;
dm_list_add ( & info - > bad_mdas , & mda - > list ) ;
}
void lvmcache_get_bad_mdas ( struct cmd_context * cmd ,
const char * vgname , const char * vgid ,
struct dm_list * bad_mda_list )
{
struct lvmcache_vginfo * vginfo ;
struct lvmcache_info * info ;
struct mda_list * mdal ;
struct metadata_area * mda , * mda2 ;
if ( ! ( vginfo = lvmcache_vginfo_from_vgname ( vgname , vgid ) ) ) {
log_error ( INTERNAL_ERROR " lvmcache_get_bad_mdas no vginfo %s " , vgname ) ;
return ;
}
dm_list_iterate_items ( info , & vginfo - > infos ) {
dm_list_iterate_items_safe ( mda , mda2 , & info - > bad_mdas ) {
if ( ! ( mdal = zalloc ( sizeof ( * mdal ) ) ) )
continue ;
mdal - > mda = mda ;
dm_list_add ( bad_mda_list , & mdal - > list ) ;
}
}
}
2008-05-19 23:49:56 +04:00
static void _vginfo_attach_info ( struct lvmcache_vginfo * vginfo ,
struct lvmcache_info * info )
{
2008-05-29 02:27:47 +04:00
if ( ! vginfo )
return ;
2008-05-19 23:49:56 +04:00
info - > vginfo = vginfo ;
2008-11-04 01:14:30 +03:00
dm_list_add ( & vginfo - > infos , & info - > list ) ;
2008-05-19 23:49:56 +04:00
}
static void _vginfo_detach_info ( struct lvmcache_info * info )
{
2008-11-04 01:14:30 +03:00
if ( ! dm_list_empty ( & info - > list ) ) {
dm_list_del ( & info - > list ) ;
dm_list_init ( & info - > list ) ;
2008-05-19 23:49:56 +04:00
}
info - > vginfo = NULL ;
}
2006-04-12 21:54:11 +04:00
/* If vgid supplied, require a match. */
2012-02-10 05:28:27 +04:00
struct lvmcache_vginfo * lvmcache_vginfo_from_vgname ( const char * vgname , const char * vgid )
2003-07-05 02:34:56 +04:00
{
struct lvmcache_vginfo * vginfo ;
2002-11-18 16:53:58 +03:00
2008-06-06 15:12:50 +04:00
if ( ! vgname )
2012-02-10 05:28:27 +04:00
return lvmcache_vginfo_from_vgid ( vgid ) ;
2008-06-06 15:12:50 +04:00
2014-03-18 13:24:32 +04:00
if ( ! _vgname_hash ) {
2016-04-12 14:06:16 +03:00
log_debug_cache ( INTERNAL_ERROR " Internal lvmcache is no yet initialized. " ) ;
2002-11-18 16:53:58 +03:00
return NULL ;
2014-03-18 13:24:32 +04:00
}
2002-11-18 16:53:58 +03:00
2014-03-18 13:24:32 +04:00
if ( ! ( vginfo = dm_hash_lookup ( _vgname_hash , vgname ) ) ) {
2016-04-12 14:06:16 +03:00
log_debug_cache ( " lvmcache has no info for vgname \" %s \" %s " FMTVGID " . " ,
vgname , ( vgid ) ? " with VGID " : " " , ( vgid ) ? : " " ) ;
2002-11-18 16:53:58 +03:00
return NULL ;
2014-03-18 13:24:32 +04:00
}
2002-11-18 16:53:58 +03:00
2006-04-12 21:54:11 +04:00
if ( vgid )
2008-01-30 17:00:02 +03:00
do
2006-04-21 18:44:33 +04:00
if ( ! strncmp ( vgid , vginfo - > vgid , ID_LEN ) )
2006-04-12 21:54:11 +04:00
return vginfo ;
while ( ( vginfo = vginfo - > next ) ) ;
2014-03-18 13:24:32 +04:00
if ( ! vginfo )
2016-04-12 14:06:16 +03:00
log_debug_cache ( " lvmcache has not found vgname \" %s \" %s " FMTVGID " . " ,
vgname , ( vgid ) ? " with VGID " : " " , ( vgid ) ? : " " ) ;
2014-03-18 13:24:32 +04:00
2002-11-18 16:53:58 +03:00
return vginfo ;
}
2012-02-23 17:11:07 +04:00
const struct format_type * lvmcache_fmt_from_vgname ( struct cmd_context * cmd ,
const char * vgname , const char * vgid ,
unsigned revalidate_labels )
2002-11-18 16:53:58 +03:00
{
2003-07-05 02:34:56 +04:00
struct lvmcache_vginfo * vginfo ;
2005-06-01 20:51:55 +04:00
struct lvmcache_info * info ;
2008-11-04 01:14:30 +03:00
struct dm_list * devh , * tmp ;
struct dm_list devs ;
2005-03-22 01:40:35 +03:00
struct device_list * devl ;
2010-07-09 19:34:40 +04:00
char vgid_found [ ID_LEN + 1 ] __attribute__ ( ( aligned ( 8 ) ) ) ;
2002-11-18 16:53:58 +03:00
2012-02-23 17:11:07 +04:00
if ( ! ( vginfo = lvmcache_vginfo_from_vgname ( vgname , vgid ) ) ) {
2018-07-10 21:39:29 +03:00
stack ;
2002-11-18 16:53:58 +03:00
return NULL ;
2012-02-23 17:11:07 +04:00
}
2002-11-18 16:53:58 +03:00
2010-12-11 01:39:52 +03:00
/*
* If this function is called repeatedly , only the first one needs to revalidate .
*/
if ( ! revalidate_labels )
goto out ;
/*
* This function is normally called before reading metadata so
* we check cached labels here . Unfortunately vginfo is volatile .
*/
2008-11-04 01:14:30 +03:00
dm_list_init ( & devs ) ;
dm_list_iterate_items ( info , & vginfo - > infos ) {
2018-06-08 15:40:53 +03:00
if ( ! ( devl = malloc ( sizeof ( * devl ) ) ) ) {
2006-05-10 21:49:25 +04:00
log_error ( " device_list element allocation failed " ) ;
return NULL ;
}
2005-06-01 20:51:55 +04:00
devl - > dev = info - > dev ;
2008-11-04 01:14:30 +03:00
dm_list_add ( & devs , & devl - > list ) ;
2005-03-22 01:40:35 +03:00
}
2006-04-13 01:23:04 +04:00
memcpy ( vgid_found , vginfo - > vgid , sizeof ( vgid_found ) ) ;
2006-04-12 21:54:11 +04:00
2008-11-04 01:14:30 +03:00
dm_list_iterate_safe ( devh , tmp , & devs ) {
devl = dm_list_item ( devh , struct device_list ) ;
2018-05-11 22:16:49 +03:00
label_read ( devl - > dev ) ;
2008-11-04 01:14:30 +03:00
dm_list_del ( & devl - > list ) ;
2018-06-08 15:40:53 +03:00
free ( devl ) ;
2005-03-22 01:40:35 +03:00
}
2006-04-12 21:54:11 +04:00
/* If vginfo changed, caller needs to rescan */
2012-02-10 05:28:27 +04:00
if ( ! ( vginfo = lvmcache_vginfo_from_vgname ( vgname , vgid_found ) ) | |
2006-04-21 18:44:33 +04:00
strncmp ( vginfo - > vgid , vgid_found , ID_LEN ) )
2006-04-12 21:54:11 +04:00
return NULL ;
2010-12-11 01:39:52 +03:00
out :
2002-11-18 16:53:58 +03:00
return vginfo - > fmt ;
}
2012-02-10 05:28:27 +04:00
struct lvmcache_vginfo * lvmcache_vginfo_from_vgid ( const char * vgid )
2002-11-18 16:53:58 +03:00
{
2003-07-05 02:34:56 +04:00
struct lvmcache_vginfo * vginfo ;
2010-07-09 19:34:40 +04:00
char id [ ID_LEN + 1 ] __attribute__ ( ( aligned ( 8 ) ) ) ;
2002-11-18 16:53:58 +03:00
2014-03-18 13:24:32 +04:00
if ( ! _vgid_hash | | ! vgid ) {
log_debug_cache ( INTERNAL_ERROR " Internal cache cannot lookup vgid. " ) ;
2002-11-18 16:53:58 +03:00
return NULL ;
2014-03-18 13:24:32 +04:00
}
2002-11-18 16:53:58 +03:00
2003-01-10 22:14:01 +03:00
/* vgid not necessarily NULL-terminated */
2018-03-02 18:25:37 +03:00
( void ) dm_strncpy ( id , vgid , sizeof ( id ) ) ;
2003-01-10 22:14:01 +03:00
2014-03-18 13:24:32 +04:00
if ( ! ( vginfo = dm_hash_lookup ( _vgid_hash , id ) ) ) {
2017-11-13 17:43:32 +03:00
log_debug_cache ( " lvmcache has no info for vgid \" %s \" " , id ) ;
2002-11-18 16:53:58 +03:00
return NULL ;
2014-03-18 13:24:32 +04:00
}
2002-11-18 16:53:58 +03:00
return vginfo ;
}
2012-02-10 05:28:27 +04:00
const char * lvmcache_vgname_from_vgid ( struct dm_pool * mem , const char * vgid )
2006-04-13 01:23:04 +04:00
{
struct lvmcache_vginfo * vginfo ;
2006-06-15 00:11:22 +04:00
const char * vgname = NULL ;
2006-04-13 01:23:04 +04:00
2012-02-10 05:28:27 +04:00
if ( ( vginfo = lvmcache_vginfo_from_vgid ( vgid ) ) )
2006-06-15 00:11:22 +04:00
vgname = vginfo - > vgname ;
if ( mem & & vgname )
return dm_pool_strdup ( mem , vgname ) ;
2006-04-13 01:23:04 +04:00
2006-06-15 00:11:22 +04:00
return vgname ;
2006-04-13 01:23:04 +04:00
}
vg_read: look up vgid from name
After recent changes to process_each, vg_read() is usually
given both the vgname and vgid for the intended VG.
However, in some cases vg_read() is given a vgid with
no vgname, or is given a vgname with no vgid.
When given a vgid with no vgname, vg_read() uses lvmcache
to look up the vgname using the vgid. If the vgname is
not found, vg_read() fails.
When given a vgname with no vgid, vg_read() should also
use lvmcache to look up the vgid using the vgname.
If the vgid is not found, vg_read() fails.
If the lvmcache lookup finds multiple vgids for the
vgname, then the lookup fails, causing vg_read() to fail
because the intended VG is uncertain.
Usually, both vgname and vgid for the intended VG are passed
to vg_read(), which means the lvmcache translations
between vgname and vgid are not done.
2015-12-01 00:12:01 +03:00
const char * lvmcache_vgid_from_vgname ( struct cmd_context * cmd , const char * vgname )
{
struct lvmcache_vginfo * vginfo ;
if ( ! ( vginfo = dm_hash_lookup ( _vgname_hash , vgname ) ) )
return_NULL ;
if ( ! vginfo - > next )
return dm_pool_strdup ( cmd - > mem , vginfo - > vgid ) ;
/*
* There are multiple VGs with this name to choose from .
* Return an error because we don ' t know which VG is intended .
*/
return NULL ;
}
2008-01-30 02:45:48 +03:00
/*
* If valid_only is set , data will only be returned if the cached data is
* known still to be valid .
2016-06-06 22:04:17 +03:00
*
* When the device being worked with is known , pass that dev as the second arg .
* This ensures that when duplicates exist , the wrong dev isn ' t used .
2008-01-30 02:45:48 +03:00
*/
2016-06-06 22:04:17 +03:00
struct lvmcache_info * lvmcache_info_from_pvid ( const char * pvid , struct device * dev , int valid_only )
2002-11-18 16:53:58 +03:00
{
2003-07-05 02:34:56 +04:00
struct lvmcache_info * info ;
2010-07-09 19:34:40 +04:00
char id [ ID_LEN + 1 ] __attribute__ ( ( aligned ( 8 ) ) ) ;
2002-11-18 16:53:58 +03:00
if ( ! _pvid_hash | | ! pvid )
return NULL ;
2018-03-02 18:25:37 +03:00
( void ) dm_strncpy ( id , pvid , sizeof ( id ) ) ;
2003-01-10 22:14:01 +03:00
2005-10-17 03:03:59 +04:00
if ( ! ( info = dm_hash_lookup ( _pvid_hash , id ) ) )
2002-11-18 16:53:58 +03:00
return NULL ;
2016-06-06 22:04:17 +03:00
/*
* When handling duplicate PVs , more than one device can have this pvid .
*/
if ( dev & & info - > dev & & ( info - > dev ! = dev ) ) {
log_debug_cache ( " Ignoring lvmcache info for dev %s because dev %s was requested for PVID %s. " ,
dev_name ( info - > dev ) , dev_name ( dev ) , id ) ;
return NULL ;
}
2002-11-18 16:53:58 +03:00
return info ;
}
lvmcache: process duplicate PVs directly
Previously, duplicate PVs were processed as a side effect
of processing the "chosen" PV in lvmcache. The duplicate
PV would be hacked into lvmcache temporarily in place of
the chosen PV.
In the old way, we had to always process the "chosen" PV
device, even if a duplicate of it was named on the command
line. This meant we were processing a different device than
was asked for. This could be worked around by naming
multiple duplicate devs on the command line in which case
they were swapped in and out of lvmcache for processing.
Now, the duplicate devs are processed directly in their
own processing loop. This means we can remove the old
hacks related to processing dups as a side effect of
processing the chosen device. We can now simply process
the device that was named on the command line.
When the same PVID exists on two or more devices, one device
is preferred and used in the VG, and the others are duplicates
and are not used in the VG. The preferred device exists in
lvmcache as usual. The duplicates exist in a specical list
of unused duplicate devices.
The duplicate devs have the "d" attribute and the "duplicate"
reporting field displays "duplicate" for them.
'pvs' warns about duplicates, but the formal output only
includes the single preferred PV.
'pvs -a' has the same warnings, and the duplicate devs are
included in the output.
'pvs <path>' has the same warnings, and displays the named
device, whether it is preferred or a duplicate.
2016-02-11 21:37:36 +03:00
const struct format_type * lvmcache_fmt_from_info ( struct lvmcache_info * info )
{
return info - > fmt ;
}
2012-02-23 17:11:07 +04:00
const char * lvmcache_vgname_from_info ( struct lvmcache_info * info )
{
if ( info - > vginfo )
return info - > vginfo - > vgname ;
return NULL ;
}
lvmcache: improve duplicate PV handling
Wait to compare and choose alternate duplicate devices until
after all devices are scanned. During scanning, the first
duplicate dev is kept in lvmcache, and others are kept in a
new list (_found_duplicate_devs).
After all devices are scanned, compare all the duplicates
available for a given PVID and decide which is best.
If the dev used in lvmcache is changed, drop the old dev
from lvmcache entirely and rescan the replacement dev.
Previously the VG metadata from the old dev was kept in
lvmcache and only the dev was replaced.
A new config setting devices/allow_changes_with_duplicate_pvs
can be set to 0 which disallows modifying a VG or activating
LVs in it when the VG contains PVs with duplicate devices.
Set to 1 is the old behavior which allowed the VG to be
changed.
The logic for which of two devs is preferred has changed.
The primary goal is to choose a device that is currently
in use if the other isn't, e.g. by an active LV.
. prefer dev with fs mounted if the other doesn't, else
. prefer dev that is dm if the other isn't, else
. prefer dev in subsystem if the other isn't
If neither device is preferred by these rules, then don't
change devices in lvmcache, leaving the one that was found
first.
The previous logic for preferring a device was:
. prefer dev in subsystem if the other isn't, else
. prefer dev without holders if the other has holders, else
. prefer dev that is dm if the other isn't
2016-02-09 22:06:27 +03:00
/*
* Check if any PVs in vg - > pvs have the same PVID as any
* entries in _unused_duplicate_devices .
*/
int vg_has_duplicate_pvs ( struct volume_group * vg )
{
struct pv_list * pvl ;
struct device_list * devl ;
dm_list_iterate_items ( pvl , & vg - > pvs ) {
dm_list_iterate_items ( devl , & _unused_duplicate_devs ) {
if ( id_equal ( & pvl - > pv - > id , ( const struct id * ) devl - > dev - > pvid ) )
return 1 ;
}
}
return 0 ;
}
2018-06-28 22:48:03 +03:00
int dev_in_device_list ( struct device * dev , struct dm_list * head )
2016-04-29 22:42:14 +03:00
{
struct device_list * devl ;
2016-06-06 23:20:55 +03:00
dm_list_iterate_items ( devl , head ) {
2016-04-29 22:42:14 +03:00
if ( devl - > dev = = dev )
return 1 ;
}
return 0 ;
}
2016-06-06 23:20:55 +03:00
int lvmcache_dev_is_unchosen_duplicate ( struct device * dev )
{
2018-06-28 22:48:03 +03:00
return dev_in_device_list ( dev , & _unused_duplicate_devs ) ;
2016-06-06 23:20:55 +03:00
}
2018-05-11 23:52:22 +03:00
/*
* Treat some duplicate devs as if they were filtered out by filters .
* The actual filters are evaluated too early , before a complete
* picture of all PVs is available , to eliminate these duplicates .
*
2018-05-21 22:20:19 +03:00
* By removing some duplicates from unused_duplicate_devs here , we remove
2018-05-11 23:52:22 +03:00
* the restrictions that are placed on using duplicate devs or VGs with
* duplicate devs .
*
2018-05-21 22:20:19 +03:00
* In cases where we know that two duplicates refer to the same underlying
* storage , and we know which dev path to use , it ' s best for us to just
* use that one preferred device path and ignore the others . It is the cases
* where we are unsure whether dups refer to the same underlying storage where
* we need to keep the unused duplicate referenced in the
* unused_duplicate_devs list , and restrict what we allow done with it .
*
* In the case of md components , we usually filter these out in filter - md ,
2018-06-15 19:42:10 +03:00
* but in the special case of md superblock version 1.0 where the superblock
2018-05-21 22:20:19 +03:00
* is at the end of the device , filter - md doesn ' t always eliminate them
* first , so we eliminate them here .
*
* There may other kinds of duplicates that we want to eliminate at
* this point ( using the knowledge from the scan ) that we couldn ' t
* eliminate in the filters prior to the scan .
2018-05-11 23:52:22 +03:00
*/
static void _filter_duplicate_devs ( struct cmd_context * cmd )
{
struct dev_types * dt = cmd - > dev_types ;
struct lvmcache_info * info ;
struct device_list * devl , * devl2 ;
dm_list_iterate_items_safe ( devl , devl2 , & _unused_duplicate_devs ) {
2018-06-01 18:15:48 +03:00
if ( ! ( info = lvmcache_info_from_pvid ( devl - > dev - > pvid , NULL , 0 ) ) )
continue ;
2018-05-11 23:52:22 +03:00
if ( MAJOR ( info - > dev - > dev ) = = dt - > md_major ) {
log_debug_devs ( " Ignoring md component duplicate %s " , dev_name ( devl - > dev ) ) ;
dm_list_del ( & devl - > list ) ;
2018-06-08 15:40:53 +03:00
free ( devl ) ;
2018-05-11 23:52:22 +03:00
}
}
2018-05-21 22:20:19 +03:00
if ( dm_list_empty ( & _unused_duplicate_devs ) )
_found_duplicate_pvs = 0 ;
}
static void _warn_duplicate_devs ( struct cmd_context * cmd )
{
char uuid [ 64 ] __attribute__ ( ( aligned ( 8 ) ) ) ;
struct lvmcache_info * info ;
struct device_list * devl , * devl2 ;
dm_list_iterate_items_safe ( devl , devl2 , & _unused_duplicate_devs ) {
if ( ! id_write_format ( ( const struct id * ) devl - > dev - > pvid , uuid , sizeof ( uuid ) ) )
stack ;
log_warn ( " WARNING: Not using device %s for PV %s. " , dev_name ( devl - > dev ) , uuid ) ;
}
dm_list_iterate_items_safe ( devl , devl2 , & _unused_duplicate_devs ) {
/* info for the preferred device that we're actually using */
2018-06-01 18:15:48 +03:00
if ( ! ( info = lvmcache_info_from_pvid ( devl - > dev - > pvid , NULL , 0 ) ) )
continue ;
2018-05-21 22:20:19 +03:00
if ( ! id_write_format ( ( const struct id * ) info - > dev - > pvid , uuid , sizeof ( uuid ) ) )
stack ;
log_warn ( " WARNING: PV %s prefers device %s because %s. " ,
uuid , dev_name ( info - > dev ) , info - > dev - > duplicate_prefer_reason ) ;
}
2018-05-11 23:52:22 +03:00
}
lvmcache: improve duplicate PV handling
Wait to compare and choose alternate duplicate devices until
after all devices are scanned. During scanning, the first
duplicate dev is kept in lvmcache, and others are kept in a
new list (_found_duplicate_devs).
After all devices are scanned, compare all the duplicates
available for a given PVID and decide which is best.
If the dev used in lvmcache is changed, drop the old dev
from lvmcache entirely and rescan the replacement dev.
Previously the VG metadata from the old dev was kept in
lvmcache and only the dev was replaced.
A new config setting devices/allow_changes_with_duplicate_pvs
can be set to 0 which disallows modifying a VG or activating
LVs in it when the VG contains PVs with duplicate devices.
Set to 1 is the old behavior which allowed the VG to be
changed.
The logic for which of two devs is preferred has changed.
The primary goal is to choose a device that is currently
in use if the other isn't, e.g. by an active LV.
. prefer dev with fs mounted if the other doesn't, else
. prefer dev that is dm if the other isn't, else
. prefer dev in subsystem if the other isn't
If neither device is preferred by these rules, then don't
change devices in lvmcache, leaving the one that was found
first.
The previous logic for preferring a device was:
. prefer dev in subsystem if the other isn't, else
. prefer dev without holders if the other has holders, else
. prefer dev that is dm if the other isn't
2016-02-09 22:06:27 +03:00
/*
* Compare _found_duplicate_devs entries with the corresponding duplicate dev
* in lvmcache . There may be multiple duplicates in _found_duplicate_devs for
* a given pvid . If a dev from _found_duplicate_devs is preferred over the dev
* in lvmcache , then drop the dev in lvmcache and rescan the preferred dev to
* add it to lvmcache .
*
* _found_duplicate_devs : duplicate devs found during initial scan .
* These are compared to lvmcache devs to see if any are preferred .
*
* _unused_duplicate_devs : duplicate devs not chosen to be used .
* These are _found_duplicate_devs entries that were not chosen ,
* or unpreferred lvmcache devs that were dropped .
*
* del_cache_devs : devices to drop from lvmcache
* add_cache_devs : devices to scan to add to lvmcache
*/
static void _choose_preferred_devs ( struct cmd_context * cmd ,
struct dm_list * del_cache_devs ,
struct dm_list * add_cache_devs )
{
2016-10-03 14:04:16 +03:00
const char * reason ;
lvmcache: improve duplicate PV handling
Wait to compare and choose alternate duplicate devices until
after all devices are scanned. During scanning, the first
duplicate dev is kept in lvmcache, and others are kept in a
new list (_found_duplicate_devs).
After all devices are scanned, compare all the duplicates
available for a given PVID and decide which is best.
If the dev used in lvmcache is changed, drop the old dev
from lvmcache entirely and rescan the replacement dev.
Previously the VG metadata from the old dev was kept in
lvmcache and only the dev was replaced.
A new config setting devices/allow_changes_with_duplicate_pvs
can be set to 0 which disallows modifying a VG or activating
LVs in it when the VG contains PVs with duplicate devices.
Set to 1 is the old behavior which allowed the VG to be
changed.
The logic for which of two devs is preferred has changed.
The primary goal is to choose a device that is currently
in use if the other isn't, e.g. by an active LV.
. prefer dev with fs mounted if the other doesn't, else
. prefer dev that is dm if the other isn't, else
. prefer dev in subsystem if the other isn't
If neither device is preferred by these rules, then don't
change devices in lvmcache, leaving the one that was found
first.
The previous logic for preferring a device was:
. prefer dev in subsystem if the other isn't, else
. prefer dev without holders if the other has holders, else
. prefer dev that is dm if the other isn't
2016-02-09 22:06:27 +03:00
struct dm_list altdevs ;
2016-06-06 23:20:55 +03:00
struct dm_list new_unused ;
lvmcache: improve duplicate PV handling
Wait to compare and choose alternate duplicate devices until
after all devices are scanned. During scanning, the first
duplicate dev is kept in lvmcache, and others are kept in a
new list (_found_duplicate_devs).
After all devices are scanned, compare all the duplicates
available for a given PVID and decide which is best.
If the dev used in lvmcache is changed, drop the old dev
from lvmcache entirely and rescan the replacement dev.
Previously the VG metadata from the old dev was kept in
lvmcache and only the dev was replaced.
A new config setting devices/allow_changes_with_duplicate_pvs
can be set to 0 which disallows modifying a VG or activating
LVs in it when the VG contains PVs with duplicate devices.
Set to 1 is the old behavior which allowed the VG to be
changed.
The logic for which of two devs is preferred has changed.
The primary goal is to choose a device that is currently
in use if the other isn't, e.g. by an active LV.
. prefer dev with fs mounted if the other doesn't, else
. prefer dev that is dm if the other isn't, else
. prefer dev in subsystem if the other isn't
If neither device is preferred by these rules, then don't
change devices in lvmcache, leaving the one that was found
first.
The previous logic for preferring a device was:
. prefer dev in subsystem if the other isn't, else
. prefer dev without holders if the other has holders, else
. prefer dev that is dm if the other isn't
2016-02-09 22:06:27 +03:00
struct dev_types * dt = cmd - > dev_types ;
struct device_list * devl , * devl_safe , * alt , * del ;
struct lvmcache_info * info ;
struct device * dev1 , * dev2 ;
uint32_t dev1_major , dev1_minor , dev2_major , dev2_minor ;
2016-03-29 21:29:39 +03:00
uint64_t info_size , dev1_size , dev2_size ;
lvmcache: improve duplicate PV handling
Wait to compare and choose alternate duplicate devices until
after all devices are scanned. During scanning, the first
duplicate dev is kept in lvmcache, and others are kept in a
new list (_found_duplicate_devs).
After all devices are scanned, compare all the duplicates
available for a given PVID and decide which is best.
If the dev used in lvmcache is changed, drop the old dev
from lvmcache entirely and rescan the replacement dev.
Previously the VG metadata from the old dev was kept in
lvmcache and only the dev was replaced.
A new config setting devices/allow_changes_with_duplicate_pvs
can be set to 0 which disallows modifying a VG or activating
LVs in it when the VG contains PVs with duplicate devices.
Set to 1 is the old behavior which allowed the VG to be
changed.
The logic for which of two devs is preferred has changed.
The primary goal is to choose a device that is currently
in use if the other isn't, e.g. by an active LV.
. prefer dev with fs mounted if the other doesn't, else
. prefer dev that is dm if the other isn't, else
. prefer dev in subsystem if the other isn't
If neither device is preferred by these rules, then don't
change devices in lvmcache, leaving the one that was found
first.
The previous logic for preferring a device was:
. prefer dev in subsystem if the other isn't, else
. prefer dev without holders if the other has holders, else
. prefer dev that is dm if the other isn't
2016-02-09 22:06:27 +03:00
int in_subsys1 , in_subsys2 ;
int is_dm1 , is_dm2 ;
int has_fs1 , has_fs2 ;
2016-03-29 21:29:39 +03:00
int has_lv1 , has_lv2 ;
int same_size1 , same_size2 ;
2016-06-06 23:20:55 +03:00
int prev_unchosen1 , prev_unchosen2 ;
lvmcache: improve duplicate PV handling
Wait to compare and choose alternate duplicate devices until
after all devices are scanned. During scanning, the first
duplicate dev is kept in lvmcache, and others are kept in a
new list (_found_duplicate_devs).
After all devices are scanned, compare all the duplicates
available for a given PVID and decide which is best.
If the dev used in lvmcache is changed, drop the old dev
from lvmcache entirely and rescan the replacement dev.
Previously the VG metadata from the old dev was kept in
lvmcache and only the dev was replaced.
A new config setting devices/allow_changes_with_duplicate_pvs
can be set to 0 which disallows modifying a VG or activating
LVs in it when the VG contains PVs with duplicate devices.
Set to 1 is the old behavior which allowed the VG to be
changed.
The logic for which of two devs is preferred has changed.
The primary goal is to choose a device that is currently
in use if the other isn't, e.g. by an active LV.
. prefer dev with fs mounted if the other doesn't, else
. prefer dev that is dm if the other isn't, else
. prefer dev in subsystem if the other isn't
If neither device is preferred by these rules, then don't
change devices in lvmcache, leaving the one that was found
first.
The previous logic for preferring a device was:
. prefer dev in subsystem if the other isn't, else
. prefer dev without holders if the other has holders, else
. prefer dev that is dm if the other isn't
2016-02-09 22:06:27 +03:00
int change ;
2016-06-06 23:20:55 +03:00
dm_list_init ( & new_unused ) ;
lvmcache: improve duplicate PV handling
Wait to compare and choose alternate duplicate devices until
after all devices are scanned. During scanning, the first
duplicate dev is kept in lvmcache, and others are kept in a
new list (_found_duplicate_devs).
After all devices are scanned, compare all the duplicates
available for a given PVID and decide which is best.
If the dev used in lvmcache is changed, drop the old dev
from lvmcache entirely and rescan the replacement dev.
Previously the VG metadata from the old dev was kept in
lvmcache and only the dev was replaced.
A new config setting devices/allow_changes_with_duplicate_pvs
can be set to 0 which disallows modifying a VG or activating
LVs in it when the VG contains PVs with duplicate devices.
Set to 1 is the old behavior which allowed the VG to be
changed.
The logic for which of two devs is preferred has changed.
The primary goal is to choose a device that is currently
in use if the other isn't, e.g. by an active LV.
. prefer dev with fs mounted if the other doesn't, else
. prefer dev that is dm if the other isn't, else
. prefer dev in subsystem if the other isn't
If neither device is preferred by these rules, then don't
change devices in lvmcache, leaving the one that was found
first.
The previous logic for preferring a device was:
. prefer dev in subsystem if the other isn't, else
. prefer dev without holders if the other has holders, else
. prefer dev that is dm if the other isn't
2016-02-09 22:06:27 +03:00
/*
* Create a list of all alternate devs for the same pvid : altdevs .
*/
next :
dm_list_init ( & altdevs ) ;
alt = NULL ;
dm_list_iterate_items_safe ( devl , devl_safe , & _found_duplicate_devs ) {
if ( ! alt ) {
dm_list_move ( & altdevs , & devl - > list ) ;
alt = devl ;
} else {
if ( ! strcmp ( alt - > dev - > pvid , devl - > dev - > pvid ) )
dm_list_move ( & altdevs , & devl - > list ) ;
}
}
2016-06-06 23:20:55 +03:00
if ( ! alt ) {
_destroy_duplicate_device_list ( & _unused_duplicate_devs ) ;
dm_list_splice ( & _unused_duplicate_devs , & new_unused ) ;
lvmcache: improve duplicate PV handling
Wait to compare and choose alternate duplicate devices until
after all devices are scanned. During scanning, the first
duplicate dev is kept in lvmcache, and others are kept in a
new list (_found_duplicate_devs).
After all devices are scanned, compare all the duplicates
available for a given PVID and decide which is best.
If the dev used in lvmcache is changed, drop the old dev
from lvmcache entirely and rescan the replacement dev.
Previously the VG metadata from the old dev was kept in
lvmcache and only the dev was replaced.
A new config setting devices/allow_changes_with_duplicate_pvs
can be set to 0 which disallows modifying a VG or activating
LVs in it when the VG contains PVs with duplicate devices.
Set to 1 is the old behavior which allowed the VG to be
changed.
The logic for which of two devs is preferred has changed.
The primary goal is to choose a device that is currently
in use if the other isn't, e.g. by an active LV.
. prefer dev with fs mounted if the other doesn't, else
. prefer dev that is dm if the other isn't, else
. prefer dev in subsystem if the other isn't
If neither device is preferred by these rules, then don't
change devices in lvmcache, leaving the one that was found
first.
The previous logic for preferring a device was:
. prefer dev in subsystem if the other isn't, else
. prefer dev without holders if the other has holders, else
. prefer dev that is dm if the other isn't
2016-02-09 22:06:27 +03:00
return ;
2016-06-06 23:20:55 +03:00
}
lvmcache: improve duplicate PV handling
Wait to compare and choose alternate duplicate devices until
after all devices are scanned. During scanning, the first
duplicate dev is kept in lvmcache, and others are kept in a
new list (_found_duplicate_devs).
After all devices are scanned, compare all the duplicates
available for a given PVID and decide which is best.
If the dev used in lvmcache is changed, drop the old dev
from lvmcache entirely and rescan the replacement dev.
Previously the VG metadata from the old dev was kept in
lvmcache and only the dev was replaced.
A new config setting devices/allow_changes_with_duplicate_pvs
can be set to 0 which disallows modifying a VG or activating
LVs in it when the VG contains PVs with duplicate devices.
Set to 1 is the old behavior which allowed the VG to be
changed.
The logic for which of two devs is preferred has changed.
The primary goal is to choose a device that is currently
in use if the other isn't, e.g. by an active LV.
. prefer dev with fs mounted if the other doesn't, else
. prefer dev that is dm if the other isn't, else
. prefer dev in subsystem if the other isn't
If neither device is preferred by these rules, then don't
change devices in lvmcache, leaving the one that was found
first.
The previous logic for preferring a device was:
. prefer dev in subsystem if the other isn't, else
. prefer dev without holders if the other has holders, else
. prefer dev that is dm if the other isn't
2016-02-09 22:06:27 +03:00
/*
* Find the device for the pvid that ' s currently in lvmcache .
*/
2016-06-06 22:04:17 +03:00
if ( ! ( info = lvmcache_info_from_pvid ( alt - > dev - > pvid , NULL , 0 ) ) ) {
2019-05-21 20:06:34 +03:00
/*
* This will happen if a duplicate dev has been dropped already ,
* e . g . it was found to be an md component .
*/
log_debug ( " PVID %s on duplicate device %s not found in cache. " ,
lvmcache: improve duplicate PV handling
Wait to compare and choose alternate duplicate devices until
after all devices are scanned. During scanning, the first
duplicate dev is kept in lvmcache, and others are kept in a
new list (_found_duplicate_devs).
After all devices are scanned, compare all the duplicates
available for a given PVID and decide which is best.
If the dev used in lvmcache is changed, drop the old dev
from lvmcache entirely and rescan the replacement dev.
Previously the VG metadata from the old dev was kept in
lvmcache and only the dev was replaced.
A new config setting devices/allow_changes_with_duplicate_pvs
can be set to 0 which disallows modifying a VG or activating
LVs in it when the VG contains PVs with duplicate devices.
Set to 1 is the old behavior which allowed the VG to be
changed.
The logic for which of two devs is preferred has changed.
The primary goal is to choose a device that is currently
in use if the other isn't, e.g. by an active LV.
. prefer dev with fs mounted if the other doesn't, else
. prefer dev that is dm if the other isn't, else
. prefer dev in subsystem if the other isn't
If neither device is preferred by these rules, then don't
change devices in lvmcache, leaving the one that was found
first.
The previous logic for preferring a device was:
. prefer dev in subsystem if the other isn't, else
. prefer dev without holders if the other has holders, else
. prefer dev that is dm if the other isn't
2016-02-09 22:06:27 +03:00
alt - > dev - > pvid , dev_name ( alt - > dev ) ) ;
goto next ;
}
/*
* Compare devices for the given pvid to find one that ' s preferred .
* " dev1 " is the currently preferred device , starting with the device
* currently in lvmcache .
*/
dev1 = info - > dev ;
dm_list_iterate_items ( devl , & altdevs ) {
dev2 = devl - > dev ;
if ( dev1 = = dev2 ) {
/* This shouldn't happen */
log_warn ( " Same duplicate device repeated %s " , dev_name ( dev1 ) ) ;
continue ;
}
2018-06-28 22:48:03 +03:00
prev_unchosen1 = dev_in_device_list ( dev1 , & _unused_duplicate_devs ) ;
prev_unchosen2 = dev_in_device_list ( dev2 , & _unused_duplicate_devs ) ;
2016-06-06 23:20:55 +03:00
if ( ! prev_unchosen1 & & ! prev_unchosen2 ) {
/*
2019-06-07 18:12:52 +03:00
* The prev list saves the unchosen preference across
2016-06-06 23:20:55 +03:00
* lvmcache_destroy . Sometimes a single command will
* fill lvmcache , destroy it , and refill it , and we
* want the same duplicate preference to be preserved
* in each instance of lvmcache for a single command .
*/
2019-06-07 18:12:52 +03:00
prev_unchosen1 = dev_in_device_list ( dev1 , & _prev_unused_duplicate_devs ) ;
prev_unchosen2 = dev_in_device_list ( dev2 , & _prev_unused_duplicate_devs ) ;
2016-06-06 23:20:55 +03:00
}
lvmcache: improve duplicate PV handling
Wait to compare and choose alternate duplicate devices until
after all devices are scanned. During scanning, the first
duplicate dev is kept in lvmcache, and others are kept in a
new list (_found_duplicate_devs).
After all devices are scanned, compare all the duplicates
available for a given PVID and decide which is best.
If the dev used in lvmcache is changed, drop the old dev
from lvmcache entirely and rescan the replacement dev.
Previously the VG metadata from the old dev was kept in
lvmcache and only the dev was replaced.
A new config setting devices/allow_changes_with_duplicate_pvs
can be set to 0 which disallows modifying a VG or activating
LVs in it when the VG contains PVs with duplicate devices.
Set to 1 is the old behavior which allowed the VG to be
changed.
The logic for which of two devs is preferred has changed.
The primary goal is to choose a device that is currently
in use if the other isn't, e.g. by an active LV.
. prefer dev with fs mounted if the other doesn't, else
. prefer dev that is dm if the other isn't, else
. prefer dev in subsystem if the other isn't
If neither device is preferred by these rules, then don't
change devices in lvmcache, leaving the one that was found
first.
The previous logic for preferring a device was:
. prefer dev in subsystem if the other isn't, else
. prefer dev without holders if the other has holders, else
. prefer dev that is dm if the other isn't
2016-02-09 22:06:27 +03:00
dev1_major = MAJOR ( dev1 - > dev ) ;
dev1_minor = MINOR ( dev1 - > dev ) ;
dev2_major = MAJOR ( dev2 - > dev ) ;
dev2_minor = MINOR ( dev2 - > dev ) ;
2016-03-29 21:29:39 +03:00
if ( ! dev_get_size ( dev1 , & dev1_size ) )
dev1_size = 0 ;
if ( ! dev_get_size ( dev2 , & dev2_size ) )
dev2_size = 0 ;
has_lv1 = ( dev1 - > flags & DEV_USED_FOR_LV ) ? 1 : 0 ;
has_lv2 = ( dev2 - > flags & DEV_USED_FOR_LV ) ? 1 : 0 ;
lvmcache: improve duplicate PV handling
Wait to compare and choose alternate duplicate devices until
after all devices are scanned. During scanning, the first
duplicate dev is kept in lvmcache, and others are kept in a
new list (_found_duplicate_devs).
After all devices are scanned, compare all the duplicates
available for a given PVID and decide which is best.
If the dev used in lvmcache is changed, drop the old dev
from lvmcache entirely and rescan the replacement dev.
Previously the VG metadata from the old dev was kept in
lvmcache and only the dev was replaced.
A new config setting devices/allow_changes_with_duplicate_pvs
can be set to 0 which disallows modifying a VG or activating
LVs in it when the VG contains PVs with duplicate devices.
Set to 1 is the old behavior which allowed the VG to be
changed.
The logic for which of two devs is preferred has changed.
The primary goal is to choose a device that is currently
in use if the other isn't, e.g. by an active LV.
. prefer dev with fs mounted if the other doesn't, else
. prefer dev that is dm if the other isn't, else
. prefer dev in subsystem if the other isn't
If neither device is preferred by these rules, then don't
change devices in lvmcache, leaving the one that was found
first.
The previous logic for preferring a device was:
. prefer dev in subsystem if the other isn't, else
. prefer dev without holders if the other has holders, else
. prefer dev that is dm if the other isn't
2016-02-09 22:06:27 +03:00
in_subsys1 = dev_subsystem_part_major ( dt , dev1 ) ;
in_subsys2 = dev_subsystem_part_major ( dt , dev2 ) ;
is_dm1 = dm_is_dm_major ( dev1_major ) ;
is_dm2 = dm_is_dm_major ( dev2_major ) ;
has_fs1 = dm_device_has_mounted_fs ( dev1_major , dev1_minor ) ;
has_fs2 = dm_device_has_mounted_fs ( dev2_major , dev2_minor ) ;
2016-03-29 21:29:39 +03:00
info_size = info - > device_size > > SECTOR_SHIFT ;
same_size1 = ( dev1_size = = info_size ) ;
same_size2 = ( dev2_size = = info_size ) ;
log_debug_cache ( " PV %s compare duplicates: %s %u:%u. %s %u:%u. " ,
devl - > dev - > pvid ,
dev_name ( dev1 ) , dev1_major , dev1_minor ,
dev_name ( dev2 ) , dev2_major , dev2_minor ) ;
log_debug_cache ( " PV %s: wants size %llu. %s is %llu. %s is %llu. " ,
devl - > dev - > pvid ,
( unsigned long long ) info_size ,
dev_name ( dev1 ) , ( unsigned long long ) dev1_size ,
dev_name ( dev2 ) , ( unsigned long long ) dev2_size ) ;
2016-06-06 23:20:55 +03:00
log_debug_cache ( " PV %s: %s was prev %s. %s was prev %s. " ,
devl - > dev - > pvid ,
dev_name ( dev1 ) , prev_unchosen1 ? " not chosen " : " <none> " ,
dev_name ( dev2 ) , prev_unchosen2 ? " not chosen " : " <none> " ) ;
2016-03-29 21:29:39 +03:00
log_debug_cache ( " PV %s: %s %s subsystem. %s %s subsystem. " ,
devl - > dev - > pvid ,
dev_name ( dev1 ) , in_subsys1 ? " is in " : " is not in " ,
dev_name ( dev2 ) , in_subsys2 ? " is in " : " is not in " ) ;
log_debug_cache ( " PV %s: %s %s dm. %s %s dm. " ,
devl - > dev - > pvid ,
dev_name ( dev1 ) , is_dm1 ? " is " : " is not " ,
dev_name ( dev2 ) , is_dm2 ? " is " : " is not " ) ;
log_debug_cache ( " PV %s: %s %s mounted fs. %s %s mounted fs. " ,
devl - > dev - > pvid ,
dev_name ( dev1 ) , has_fs1 ? " has " : " has no " ,
dev_name ( dev2 ) , has_fs2 ? " has " : " has no " ) ;
log_debug_cache ( " PV %s: %s %s LV. %s %s LV. " ,
devl - > dev - > pvid ,
dev_name ( dev1 ) , has_lv1 ? " is used for " : " is not used for " ,
dev_name ( dev2 ) , has_lv2 ? " is used for " : " is not used for " ) ;
lvmcache: improve duplicate PV handling
Wait to compare and choose alternate duplicate devices until
after all devices are scanned. During scanning, the first
duplicate dev is kept in lvmcache, and others are kept in a
new list (_found_duplicate_devs).
After all devices are scanned, compare all the duplicates
available for a given PVID and decide which is best.
If the dev used in lvmcache is changed, drop the old dev
from lvmcache entirely and rescan the replacement dev.
Previously the VG metadata from the old dev was kept in
lvmcache and only the dev was replaced.
A new config setting devices/allow_changes_with_duplicate_pvs
can be set to 0 which disallows modifying a VG or activating
LVs in it when the VG contains PVs with duplicate devices.
Set to 1 is the old behavior which allowed the VG to be
changed.
The logic for which of two devs is preferred has changed.
The primary goal is to choose a device that is currently
in use if the other isn't, e.g. by an active LV.
. prefer dev with fs mounted if the other doesn't, else
. prefer dev that is dm if the other isn't, else
. prefer dev in subsystem if the other isn't
If neither device is preferred by these rules, then don't
change devices in lvmcache, leaving the one that was found
first.
The previous logic for preferring a device was:
. prefer dev in subsystem if the other isn't, else
. prefer dev without holders if the other has holders, else
. prefer dev that is dm if the other isn't
2016-02-09 22:06:27 +03:00
change = 0 ;
2016-06-06 23:20:55 +03:00
if ( prev_unchosen1 & & ! prev_unchosen2 ) {
/* change to 2 (NB when unchosen is set we unprefer) */
change = 1 ;
reason = " of previous preference " ;
} else if ( prev_unchosen2 & & ! prev_unchosen1 ) {
/* keep 1 (NB when unchosen is set we unprefer) */
reason = " of previous preference " ;
} else if ( has_lv1 & & ! has_lv2 ) {
lvmcache: improve duplicate PV handling
Wait to compare and choose alternate duplicate devices until
after all devices are scanned. During scanning, the first
duplicate dev is kept in lvmcache, and others are kept in a
new list (_found_duplicate_devs).
After all devices are scanned, compare all the duplicates
available for a given PVID and decide which is best.
If the dev used in lvmcache is changed, drop the old dev
from lvmcache entirely and rescan the replacement dev.
Previously the VG metadata from the old dev was kept in
lvmcache and only the dev was replaced.
A new config setting devices/allow_changes_with_duplicate_pvs
can be set to 0 which disallows modifying a VG or activating
LVs in it when the VG contains PVs with duplicate devices.
Set to 1 is the old behavior which allowed the VG to be
changed.
The logic for which of two devs is preferred has changed.
The primary goal is to choose a device that is currently
in use if the other isn't, e.g. by an active LV.
. prefer dev with fs mounted if the other doesn't, else
. prefer dev that is dm if the other isn't, else
. prefer dev in subsystem if the other isn't
If neither device is preferred by these rules, then don't
change devices in lvmcache, leaving the one that was found
first.
The previous logic for preferring a device was:
. prefer dev in subsystem if the other isn't, else
. prefer dev without holders if the other has holders, else
. prefer dev that is dm if the other isn't
2016-02-09 22:06:27 +03:00
/* keep 1 */
2016-03-29 21:29:39 +03:00
reason = " device is used by LV " ;
} else if ( has_lv2 & & ! has_lv1 ) {
/* change to 2 */
change = 1 ;
reason = " device is used by LV " ;
} else if ( same_size1 & & ! same_size2 ) {
/* keep 1 */
reason = " device size is correct " ;
} else if ( same_size2 & & ! same_size1 ) {
/* change to 2 */
change = 1 ;
reason = " device size is correct " ;
} else if ( has_fs1 & & ! has_fs2 ) {
/* keep 1 */
reason = " device has fs mounted " ;
lvmcache: improve duplicate PV handling
Wait to compare and choose alternate duplicate devices until
after all devices are scanned. During scanning, the first
duplicate dev is kept in lvmcache, and others are kept in a
new list (_found_duplicate_devs).
After all devices are scanned, compare all the duplicates
available for a given PVID and decide which is best.
If the dev used in lvmcache is changed, drop the old dev
from lvmcache entirely and rescan the replacement dev.
Previously the VG metadata from the old dev was kept in
lvmcache and only the dev was replaced.
A new config setting devices/allow_changes_with_duplicate_pvs
can be set to 0 which disallows modifying a VG or activating
LVs in it when the VG contains PVs with duplicate devices.
Set to 1 is the old behavior which allowed the VG to be
changed.
The logic for which of two devs is preferred has changed.
The primary goal is to choose a device that is currently
in use if the other isn't, e.g. by an active LV.
. prefer dev with fs mounted if the other doesn't, else
. prefer dev that is dm if the other isn't, else
. prefer dev in subsystem if the other isn't
If neither device is preferred by these rules, then don't
change devices in lvmcache, leaving the one that was found
first.
The previous logic for preferring a device was:
. prefer dev in subsystem if the other isn't, else
. prefer dev without holders if the other has holders, else
. prefer dev that is dm if the other isn't
2016-02-09 22:06:27 +03:00
} else if ( has_fs2 & & ! has_fs1 ) {
/* change to 2 */
change = 1 ;
2016-03-29 21:29:39 +03:00
reason = " device has fs mounted " ;
lvmcache: improve duplicate PV handling
Wait to compare and choose alternate duplicate devices until
after all devices are scanned. During scanning, the first
duplicate dev is kept in lvmcache, and others are kept in a
new list (_found_duplicate_devs).
After all devices are scanned, compare all the duplicates
available for a given PVID and decide which is best.
If the dev used in lvmcache is changed, drop the old dev
from lvmcache entirely and rescan the replacement dev.
Previously the VG metadata from the old dev was kept in
lvmcache and only the dev was replaced.
A new config setting devices/allow_changes_with_duplicate_pvs
can be set to 0 which disallows modifying a VG or activating
LVs in it when the VG contains PVs with duplicate devices.
Set to 1 is the old behavior which allowed the VG to be
changed.
The logic for which of two devs is preferred has changed.
The primary goal is to choose a device that is currently
in use if the other isn't, e.g. by an active LV.
. prefer dev with fs mounted if the other doesn't, else
. prefer dev that is dm if the other isn't, else
. prefer dev in subsystem if the other isn't
If neither device is preferred by these rules, then don't
change devices in lvmcache, leaving the one that was found
first.
The previous logic for preferring a device was:
. prefer dev in subsystem if the other isn't, else
. prefer dev without holders if the other has holders, else
. prefer dev that is dm if the other isn't
2016-02-09 22:06:27 +03:00
} else if ( is_dm1 & & ! is_dm2 ) {
/* keep 1 */
2016-03-29 21:29:39 +03:00
reason = " device is in dm subsystem " ;
lvmcache: improve duplicate PV handling
Wait to compare and choose alternate duplicate devices until
after all devices are scanned. During scanning, the first
duplicate dev is kept in lvmcache, and others are kept in a
new list (_found_duplicate_devs).
After all devices are scanned, compare all the duplicates
available for a given PVID and decide which is best.
If the dev used in lvmcache is changed, drop the old dev
from lvmcache entirely and rescan the replacement dev.
Previously the VG metadata from the old dev was kept in
lvmcache and only the dev was replaced.
A new config setting devices/allow_changes_with_duplicate_pvs
can be set to 0 which disallows modifying a VG or activating
LVs in it when the VG contains PVs with duplicate devices.
Set to 1 is the old behavior which allowed the VG to be
changed.
The logic for which of two devs is preferred has changed.
The primary goal is to choose a device that is currently
in use if the other isn't, e.g. by an active LV.
. prefer dev with fs mounted if the other doesn't, else
. prefer dev that is dm if the other isn't, else
. prefer dev in subsystem if the other isn't
If neither device is preferred by these rules, then don't
change devices in lvmcache, leaving the one that was found
first.
The previous logic for preferring a device was:
. prefer dev in subsystem if the other isn't, else
. prefer dev without holders if the other has holders, else
. prefer dev that is dm if the other isn't
2016-02-09 22:06:27 +03:00
} else if ( is_dm2 & & ! is_dm1 ) {
/* change to 2 */
change = 1 ;
2016-03-29 21:29:39 +03:00
reason = " device is in dm subsystem " ;
lvmcache: improve duplicate PV handling
Wait to compare and choose alternate duplicate devices until
after all devices are scanned. During scanning, the first
duplicate dev is kept in lvmcache, and others are kept in a
new list (_found_duplicate_devs).
After all devices are scanned, compare all the duplicates
available for a given PVID and decide which is best.
If the dev used in lvmcache is changed, drop the old dev
from lvmcache entirely and rescan the replacement dev.
Previously the VG metadata from the old dev was kept in
lvmcache and only the dev was replaced.
A new config setting devices/allow_changes_with_duplicate_pvs
can be set to 0 which disallows modifying a VG or activating
LVs in it when the VG contains PVs with duplicate devices.
Set to 1 is the old behavior which allowed the VG to be
changed.
The logic for which of two devs is preferred has changed.
The primary goal is to choose a device that is currently
in use if the other isn't, e.g. by an active LV.
. prefer dev with fs mounted if the other doesn't, else
. prefer dev that is dm if the other isn't, else
. prefer dev in subsystem if the other isn't
If neither device is preferred by these rules, then don't
change devices in lvmcache, leaving the one that was found
first.
The previous logic for preferring a device was:
. prefer dev in subsystem if the other isn't, else
. prefer dev without holders if the other has holders, else
. prefer dev that is dm if the other isn't
2016-02-09 22:06:27 +03:00
} else if ( in_subsys1 & & ! in_subsys2 ) {
/* keep 1 */
2016-03-29 21:29:39 +03:00
reason = " device is in subsystem " ;
lvmcache: improve duplicate PV handling
Wait to compare and choose alternate duplicate devices until
after all devices are scanned. During scanning, the first
duplicate dev is kept in lvmcache, and others are kept in a
new list (_found_duplicate_devs).
After all devices are scanned, compare all the duplicates
available for a given PVID and decide which is best.
If the dev used in lvmcache is changed, drop the old dev
from lvmcache entirely and rescan the replacement dev.
Previously the VG metadata from the old dev was kept in
lvmcache and only the dev was replaced.
A new config setting devices/allow_changes_with_duplicate_pvs
can be set to 0 which disallows modifying a VG or activating
LVs in it when the VG contains PVs with duplicate devices.
Set to 1 is the old behavior which allowed the VG to be
changed.
The logic for which of two devs is preferred has changed.
The primary goal is to choose a device that is currently
in use if the other isn't, e.g. by an active LV.
. prefer dev with fs mounted if the other doesn't, else
. prefer dev that is dm if the other isn't, else
. prefer dev in subsystem if the other isn't
If neither device is preferred by these rules, then don't
change devices in lvmcache, leaving the one that was found
first.
The previous logic for preferring a device was:
. prefer dev in subsystem if the other isn't, else
. prefer dev without holders if the other has holders, else
. prefer dev that is dm if the other isn't
2016-02-09 22:06:27 +03:00
} else if ( in_subsys2 & & ! in_subsys1 ) {
/* change to 2 */
change = 1 ;
2016-03-29 21:29:39 +03:00
reason = " device is in subsystem " ;
} else {
reason = " device was seen first " ;
lvmcache: improve duplicate PV handling
Wait to compare and choose alternate duplicate devices until
after all devices are scanned. During scanning, the first
duplicate dev is kept in lvmcache, and others are kept in a
new list (_found_duplicate_devs).
After all devices are scanned, compare all the duplicates
available for a given PVID and decide which is best.
If the dev used in lvmcache is changed, drop the old dev
from lvmcache entirely and rescan the replacement dev.
Previously the VG metadata from the old dev was kept in
lvmcache and only the dev was replaced.
A new config setting devices/allow_changes_with_duplicate_pvs
can be set to 0 which disallows modifying a VG or activating
LVs in it when the VG contains PVs with duplicate devices.
Set to 1 is the old behavior which allowed the VG to be
changed.
The logic for which of two devs is preferred has changed.
The primary goal is to choose a device that is currently
in use if the other isn't, e.g. by an active LV.
. prefer dev with fs mounted if the other doesn't, else
. prefer dev that is dm if the other isn't, else
. prefer dev in subsystem if the other isn't
If neither device is preferred by these rules, then don't
change devices in lvmcache, leaving the one that was found
first.
The previous logic for preferring a device was:
. prefer dev in subsystem if the other isn't, else
. prefer dev without holders if the other has holders, else
. prefer dev that is dm if the other isn't
2016-02-09 22:06:27 +03:00
}
if ( change ) {
dev1 = dev2 ;
alt = devl ;
}
2018-05-21 22:20:19 +03:00
dev1 - > duplicate_prefer_reason = reason ;
2016-03-29 21:29:39 +03:00
}
lvmcache: improve duplicate PV handling
Wait to compare and choose alternate duplicate devices until
after all devices are scanned. During scanning, the first
duplicate dev is kept in lvmcache, and others are kept in a
new list (_found_duplicate_devs).
After all devices are scanned, compare all the duplicates
available for a given PVID and decide which is best.
If the dev used in lvmcache is changed, drop the old dev
from lvmcache entirely and rescan the replacement dev.
Previously the VG metadata from the old dev was kept in
lvmcache and only the dev was replaced.
A new config setting devices/allow_changes_with_duplicate_pvs
can be set to 0 which disallows modifying a VG or activating
LVs in it when the VG contains PVs with duplicate devices.
Set to 1 is the old behavior which allowed the VG to be
changed.
The logic for which of two devs is preferred has changed.
The primary goal is to choose a device that is currently
in use if the other isn't, e.g. by an active LV.
. prefer dev with fs mounted if the other doesn't, else
. prefer dev that is dm if the other isn't, else
. prefer dev in subsystem if the other isn't
If neither device is preferred by these rules, then don't
change devices in lvmcache, leaving the one that was found
first.
The previous logic for preferring a device was:
. prefer dev in subsystem if the other isn't, else
. prefer dev without holders if the other has holders, else
. prefer dev that is dm if the other isn't
2016-02-09 22:06:27 +03:00
if ( dev1 ! = info - > dev ) {
2016-03-29 21:29:39 +03:00
log_debug_cache ( " PV %s: switching to device %s instead of device %s. " ,
dev1 - > pvid , dev_name ( dev1 ) , dev_name ( info - > dev ) ) ;
lvmcache: improve duplicate PV handling
Wait to compare and choose alternate duplicate devices until
after all devices are scanned. During scanning, the first
duplicate dev is kept in lvmcache, and others are kept in a
new list (_found_duplicate_devs).
After all devices are scanned, compare all the duplicates
available for a given PVID and decide which is best.
If the dev used in lvmcache is changed, drop the old dev
from lvmcache entirely and rescan the replacement dev.
Previously the VG metadata from the old dev was kept in
lvmcache and only the dev was replaced.
A new config setting devices/allow_changes_with_duplicate_pvs
can be set to 0 which disallows modifying a VG or activating
LVs in it when the VG contains PVs with duplicate devices.
Set to 1 is the old behavior which allowed the VG to be
changed.
The logic for which of two devs is preferred has changed.
The primary goal is to choose a device that is currently
in use if the other isn't, e.g. by an active LV.
. prefer dev with fs mounted if the other doesn't, else
. prefer dev that is dm if the other isn't, else
. prefer dev in subsystem if the other isn't
If neither device is preferred by these rules, then don't
change devices in lvmcache, leaving the one that was found
first.
The previous logic for preferring a device was:
. prefer dev in subsystem if the other isn't, else
. prefer dev without holders if the other has holders, else
. prefer dev that is dm if the other isn't
2016-02-09 22:06:27 +03:00
/*
* Move the preferred device from altdevs to add_cache_devs .
* Create a del_cache_devs entry for the current lvmcache
* device to drop .
*/
dm_list_move ( add_cache_devs , & alt - > list ) ;
2018-06-08 15:40:53 +03:00
if ( ( del = zalloc ( sizeof ( * del ) ) ) ) {
lvmcache: improve duplicate PV handling
Wait to compare and choose alternate duplicate devices until
after all devices are scanned. During scanning, the first
duplicate dev is kept in lvmcache, and others are kept in a
new list (_found_duplicate_devs).
After all devices are scanned, compare all the duplicates
available for a given PVID and decide which is best.
If the dev used in lvmcache is changed, drop the old dev
from lvmcache entirely and rescan the replacement dev.
Previously the VG metadata from the old dev was kept in
lvmcache and only the dev was replaced.
A new config setting devices/allow_changes_with_duplicate_pvs
can be set to 0 which disallows modifying a VG or activating
LVs in it when the VG contains PVs with duplicate devices.
Set to 1 is the old behavior which allowed the VG to be
changed.
The logic for which of two devs is preferred has changed.
The primary goal is to choose a device that is currently
in use if the other isn't, e.g. by an active LV.
. prefer dev with fs mounted if the other doesn't, else
. prefer dev that is dm if the other isn't, else
. prefer dev in subsystem if the other isn't
If neither device is preferred by these rules, then don't
change devices in lvmcache, leaving the one that was found
first.
The previous logic for preferring a device was:
. prefer dev in subsystem if the other isn't, else
. prefer dev without holders if the other has holders, else
. prefer dev that is dm if the other isn't
2016-02-09 22:06:27 +03:00
del - > dev = info - > dev ;
dm_list_add ( del_cache_devs , & del - > list ) ;
}
2016-03-29 21:29:39 +03:00
lvmcache: improve duplicate PV handling
Wait to compare and choose alternate duplicate devices until
after all devices are scanned. During scanning, the first
duplicate dev is kept in lvmcache, and others are kept in a
new list (_found_duplicate_devs).
After all devices are scanned, compare all the duplicates
available for a given PVID and decide which is best.
If the dev used in lvmcache is changed, drop the old dev
from lvmcache entirely and rescan the replacement dev.
Previously the VG metadata from the old dev was kept in
lvmcache and only the dev was replaced.
A new config setting devices/allow_changes_with_duplicate_pvs
can be set to 0 which disallows modifying a VG or activating
LVs in it when the VG contains PVs with duplicate devices.
Set to 1 is the old behavior which allowed the VG to be
changed.
The logic for which of two devs is preferred has changed.
The primary goal is to choose a device that is currently
in use if the other isn't, e.g. by an active LV.
. prefer dev with fs mounted if the other doesn't, else
. prefer dev that is dm if the other isn't, else
. prefer dev in subsystem if the other isn't
If neither device is preferred by these rules, then don't
change devices in lvmcache, leaving the one that was found
first.
The previous logic for preferring a device was:
. prefer dev in subsystem if the other isn't, else
. prefer dev without holders if the other has holders, else
. prefer dev that is dm if the other isn't
2016-02-09 22:06:27 +03:00
} else {
2016-03-29 21:29:39 +03:00
log_debug_cache ( " PV %s: keeping current device %s. " , dev1 - > pvid , dev_name ( info - > dev ) ) ;
lvmcache: improve duplicate PV handling
Wait to compare and choose alternate duplicate devices until
after all devices are scanned. During scanning, the first
duplicate dev is kept in lvmcache, and others are kept in a
new list (_found_duplicate_devs).
After all devices are scanned, compare all the duplicates
available for a given PVID and decide which is best.
If the dev used in lvmcache is changed, drop the old dev
from lvmcache entirely and rescan the replacement dev.
Previously the VG metadata from the old dev was kept in
lvmcache and only the dev was replaced.
A new config setting devices/allow_changes_with_duplicate_pvs
can be set to 0 which disallows modifying a VG or activating
LVs in it when the VG contains PVs with duplicate devices.
Set to 1 is the old behavior which allowed the VG to be
changed.
The logic for which of two devs is preferred has changed.
The primary goal is to choose a device that is currently
in use if the other isn't, e.g. by an active LV.
. prefer dev with fs mounted if the other doesn't, else
. prefer dev that is dm if the other isn't, else
. prefer dev in subsystem if the other isn't
If neither device is preferred by these rules, then don't
change devices in lvmcache, leaving the one that was found
first.
The previous logic for preferring a device was:
. prefer dev in subsystem if the other isn't, else
. prefer dev without holders if the other has holders, else
. prefer dev that is dm if the other isn't
2016-02-09 22:06:27 +03:00
}
/*
* alt devs not chosen are moved to _unused_duplicate_devs .
* del_cache_devs being dropped are moved to _unused_duplicate_devs
* after being dropped . So , _unused_duplicate_devs represents all
* duplicates not being used in lvmcache .
*/
2016-06-06 23:20:55 +03:00
dm_list_splice ( & new_unused , & altdevs ) ;
lvmcache: improve duplicate PV handling
Wait to compare and choose alternate duplicate devices until
after all devices are scanned. During scanning, the first
duplicate dev is kept in lvmcache, and others are kept in a
new list (_found_duplicate_devs).
After all devices are scanned, compare all the duplicates
available for a given PVID and decide which is best.
If the dev used in lvmcache is changed, drop the old dev
from lvmcache entirely and rescan the replacement dev.
Previously the VG metadata from the old dev was kept in
lvmcache and only the dev was replaced.
A new config setting devices/allow_changes_with_duplicate_pvs
can be set to 0 which disallows modifying a VG or activating
LVs in it when the VG contains PVs with duplicate devices.
Set to 1 is the old behavior which allowed the VG to be
changed.
The logic for which of two devs is preferred has changed.
The primary goal is to choose a device that is currently
in use if the other isn't, e.g. by an active LV.
. prefer dev with fs mounted if the other doesn't, else
. prefer dev that is dm if the other isn't, else
. prefer dev in subsystem if the other isn't
If neither device is preferred by these rules, then don't
change devices in lvmcache, leaving the one that was found
first.
The previous logic for preferring a device was:
. prefer dev in subsystem if the other isn't, else
. prefer dev without holders if the other has holders, else
. prefer dev that is dm if the other isn't
2016-02-09 22:06:27 +03:00
goto next ;
}
2018-02-07 22:14:08 +03:00
/*
* The initial label_scan at the start of the command is done without
* holding VG locks . Then for each VG identified during the label_scan ,
* vg_read ( vgname ) is called while holding the VG lock . The labels
* and metadata on this VG ' s devices could have changed between the
* initial unlocked label_scan and the current vg_read ( ) . So , we reread
* the labels / metadata for each device in the VG now that we hold the
* lock , and use this for processing the VG .
*
* A label scan is ultimately creating associations between devices
* and VGs so that when vg_read wants to get VG metadata , it knows
2018-04-19 00:29:42 +03:00
* which devices to read .
*
* It ' s possible that a VG is being modified during the first label
* scan , causing the scan to see inconsistent metadata on different
* devs in the VG . It ' s possible that those modifications are
* adding / removing devs from the VG , in which case the device / VG
* associations in lvmcache after the scan are not correct .
* NB . It ' s even possible the VG was removed completely between
* label scan and here , in which case we ' d not find the VG in
* lvmcache after this rescan .
*
* A scan will also create in incorrect / incomplete picture of a VG
* when devices have no metadata areas . The scan does not use
* VG metadata to figure out that a dev with no metadata belongs
* to a particular VG , so a device with no mdas will not be linked
* to that VG after a scan .
2018-02-07 22:14:08 +03:00
*/
int lvmcache_label_rescan_vg ( struct cmd_context * cmd , const char * vgname , const char * vgid )
{
struct dm_list devs ;
2018-05-14 21:38:16 +03:00
struct device_list * devl , * devl2 ;
2018-05-03 00:58:49 +03:00
struct lvmcache_vginfo * vginfo ;
2018-05-14 21:45:55 +03:00
struct lvmcache_info * info ;
2018-02-07 22:14:08 +03:00
dm_list_init ( & devs ) ;
if ( ! ( vginfo = lvmcache_vginfo_from_vgname ( vgname , vgid ) ) )
return_0 ;
dm_list_iterate_items ( info , & vginfo - > infos ) {
2018-06-08 15:40:53 +03:00
if ( ! ( devl = malloc ( sizeof ( * devl ) ) ) ) {
2018-02-07 22:14:08 +03:00
log_error ( " device_list element allocation failed " ) ;
return 0 ;
}
devl - > dev = info - > dev ;
dm_list_add ( & devs , & devl - > list ) ;
}
2018-05-14 21:45:55 +03:00
/* Delete info for each dev, deleting the last info will delete vginfo. */
dm_list_iterate_items ( devl , & devs )
lvmcache_del_dev ( devl - > dev ) ;
2018-02-07 22:14:08 +03:00
2018-04-19 00:29:42 +03:00
/* Dropping the last info struct is supposed to drop vginfo. */
if ( ( vginfo = lvmcache_vginfo_from_vgname ( vgname , vgid ) ) )
log_warn ( " VG info not dropped before rescan of %s " , vgname ) ;
/* FIXME: should we also rescan unused_duplicate_devs for devs
being rescanned here and then repeat resolving the duplicates ? */
2018-05-04 01:12:07 +03:00
label_scan_devs ( cmd , cmd - > filter , & devs ) ;
2018-02-07 22:14:08 +03:00
2018-05-14 21:38:16 +03:00
dm_list_iterate_items_safe ( devl , devl2 , & devs ) {
dm_list_del ( & devl - > list ) ;
2018-06-08 15:40:53 +03:00
free ( devl ) ;
2018-05-14 21:38:16 +03:00
}
2018-05-01 00:48:53 +03:00
if ( ! ( vginfo = lvmcache_vginfo_from_vgname ( vgname , vgid ) ) ) {
log_warn ( " VG info not found after rescan of %s " , vgname ) ;
return 0 ;
}
2018-02-07 22:14:08 +03:00
return 1 ;
}
2018-04-09 21:40:49 +03:00
/*
* Uses label_scan to populate lvmcache with ' vginfo ' struct for each VG
* and associated ' info ' structs for those VGs . Only VG summary information
* is used to assemble the vginfo / info during the scan , so the resulting
* representation of VG / PV state is incomplete and even incorrect .
* Specifically , PVs with no MDAs are considered orphans and placed in the
* orphan vginfo by lvmcache_label_scan . This is corrected during the
* processing phase as each vg_read ( ) uses VG metadata for each VG to correct
* the lvmcache state , i . e . it moves no - MDA PVs from the orphan vginfo onto
* the correct vginfo . Once vg_read ( ) is finished for all VGs , all of the
* incorrectly placed PVs should have been moved from the orphan vginfo
* onto their correct vginfo ' s , and the orphan vginfo should ( in theory )
* represent only real orphan PVs . ( Note : if lvmcache_label_scan is run
* after vg_read udpates to lvmcache state , then the lvmcache will be
* incorrect again , so do not run lvmcache_label_scan during the
* processing phase . )
*
* TODO : in this label scan phase , don ' t stash no - MDA PVs into the
* orphan VG . We know that ' s a fiction , and it can have harmful / damaging
* results . Instead , put them into a temporary list where they can be
* pulled from later when vg_read uses metadata to resolve which VG
* they actually belong to .
*/
2015-12-01 23:09:01 +03:00
int lvmcache_label_scan ( struct cmd_context * cmd )
2002-11-18 16:53:58 +03:00
{
lvmcache: improve duplicate PV handling
Wait to compare and choose alternate duplicate devices until
after all devices are scanned. During scanning, the first
duplicate dev is kept in lvmcache, and others are kept in a
new list (_found_duplicate_devs).
After all devices are scanned, compare all the duplicates
available for a given PVID and decide which is best.
If the dev used in lvmcache is changed, drop the old dev
from lvmcache entirely and rescan the replacement dev.
Previously the VG metadata from the old dev was kept in
lvmcache and only the dev was replaced.
A new config setting devices/allow_changes_with_duplicate_pvs
can be set to 0 which disallows modifying a VG or activating
LVs in it when the VG contains PVs with duplicate devices.
Set to 1 is the old behavior which allowed the VG to be
changed.
The logic for which of two devs is preferred has changed.
The primary goal is to choose a device that is currently
in use if the other isn't, e.g. by an active LV.
. prefer dev with fs mounted if the other doesn't, else
. prefer dev that is dm if the other isn't, else
. prefer dev in subsystem if the other isn't
If neither device is preferred by these rules, then don't
change devices in lvmcache, leaving the one that was found
first.
The previous logic for preferring a device was:
. prefer dev in subsystem if the other isn't, else
. prefer dev without holders if the other has holders, else
. prefer dev that is dm if the other isn't
2016-02-09 22:06:27 +03:00
struct dm_list del_cache_devs ;
struct dm_list add_cache_devs ;
struct lvmcache_info * info ;
2018-04-06 21:05:17 +03:00
struct lvmcache_vginfo * vginfo ;
lvmcache: improve duplicate PV handling
Wait to compare and choose alternate duplicate devices until
after all devices are scanned. During scanning, the first
duplicate dev is kept in lvmcache, and others are kept in a
new list (_found_duplicate_devs).
After all devices are scanned, compare all the duplicates
available for a given PVID and decide which is best.
If the dev used in lvmcache is changed, drop the old dev
from lvmcache entirely and rescan the replacement dev.
Previously the VG metadata from the old dev was kept in
lvmcache and only the dev was replaced.
A new config setting devices/allow_changes_with_duplicate_pvs
can be set to 0 which disallows modifying a VG or activating
LVs in it when the VG contains PVs with duplicate devices.
Set to 1 is the old behavior which allowed the VG to be
changed.
The logic for which of two devs is preferred has changed.
The primary goal is to choose a device that is currently
in use if the other isn't, e.g. by an active LV.
. prefer dev with fs mounted if the other doesn't, else
. prefer dev that is dm if the other isn't, else
. prefer dev in subsystem if the other isn't
If neither device is preferred by these rules, then don't
change devices in lvmcache, leaving the one that was found
first.
The previous logic for preferring a device was:
. prefer dev in subsystem if the other isn't, else
. prefer dev without holders if the other has holders, else
. prefer dev that is dm if the other isn't
2016-02-09 22:06:27 +03:00
struct device_list * devl ;
2018-04-06 21:05:17 +03:00
int vginfo_count = 0 ;
2002-11-18 16:53:58 +03:00
int r = 0 ;
2018-04-06 21:05:17 +03:00
log_debug_cache ( " Finding VG info " ) ;
2018-04-26 22:41:57 +03:00
/* FIXME: can this happen? */
2018-12-04 23:06:46 +03:00
if ( ! cmd - > filter ) {
log_error ( " label scan is missing filter " ) ;
2002-11-18 16:53:58 +03:00
goto out ;
}
2018-04-26 22:48:13 +03:00
if ( ! refresh_filters ( cmd ) )
log_error ( " Scan failed to refresh device filter. " ) ;
2016-06-06 23:20:55 +03:00
/*
* Duplicates found during this label scan are added to _found_duplicate_devs ( ) .
*/
_destroy_duplicate_device_list ( & _found_duplicate_devs ) ;
2018-02-07 22:14:08 +03:00
/*
* Do the actual scanning . This populates lvmcache
* with infos / vginfos based on reading headers from
* each device , and a vg summary from each mda .
*
* Note that this will * skip * scanning a device if
* an info struct already exists in lvmcache for
* the device .
*/
label_scan ( cmd ) ;
2015-12-11 23:02:36 +03:00
lvmcache: improve duplicate PV handling
Wait to compare and choose alternate duplicate devices until
after all devices are scanned. During scanning, the first
duplicate dev is kept in lvmcache, and others are kept in a
new list (_found_duplicate_devs).
After all devices are scanned, compare all the duplicates
available for a given PVID and decide which is best.
If the dev used in lvmcache is changed, drop the old dev
from lvmcache entirely and rescan the replacement dev.
Previously the VG metadata from the old dev was kept in
lvmcache and only the dev was replaced.
A new config setting devices/allow_changes_with_duplicate_pvs
can be set to 0 which disallows modifying a VG or activating
LVs in it when the VG contains PVs with duplicate devices.
Set to 1 is the old behavior which allowed the VG to be
changed.
The logic for which of two devs is preferred has changed.
The primary goal is to choose a device that is currently
in use if the other isn't, e.g. by an active LV.
. prefer dev with fs mounted if the other doesn't, else
. prefer dev that is dm if the other isn't, else
. prefer dev in subsystem if the other isn't
If neither device is preferred by these rules, then don't
change devices in lvmcache, leaving the one that was found
first.
The previous logic for preferring a device was:
. prefer dev in subsystem if the other isn't, else
. prefer dev without holders if the other has holders, else
. prefer dev that is dm if the other isn't
2016-02-09 22:06:27 +03:00
/*
* _choose_preferred_devs ( ) returns :
*
* . del_cache_devs : a list of devs currently in lvmcache that should
* be removed from lvmcache because they will be replaced with
* alternative devs for the same PV .
*
* . add_cache_devs : a list of devs that are preferred over devs in
* lvmcache for the same PV . These devices should be rescanned to
* populate lvmcache from them .
*
* First remove lvmcache info for the devs to be dropped , then rescan
* the devs that are preferred to add them to lvmcache .
*
* Keep a complete list of all devs that are unused by moving the
* del_cache_devs onto _unused_duplicate_devs .
*/
if ( ! dm_list_empty ( & _found_duplicate_devs ) ) {
dm_list_init ( & del_cache_devs ) ;
dm_list_init ( & add_cache_devs ) ;
2018-04-06 21:05:17 +03:00
log_debug_cache ( " Resolving duplicate devices " ) ;
lvmcache: improve duplicate PV handling
Wait to compare and choose alternate duplicate devices until
after all devices are scanned. During scanning, the first
duplicate dev is kept in lvmcache, and others are kept in a
new list (_found_duplicate_devs).
After all devices are scanned, compare all the duplicates
available for a given PVID and decide which is best.
If the dev used in lvmcache is changed, drop the old dev
from lvmcache entirely and rescan the replacement dev.
Previously the VG metadata from the old dev was kept in
lvmcache and only the dev was replaced.
A new config setting devices/allow_changes_with_duplicate_pvs
can be set to 0 which disallows modifying a VG or activating
LVs in it when the VG contains PVs with duplicate devices.
Set to 1 is the old behavior which allowed the VG to be
changed.
The logic for which of two devs is preferred has changed.
The primary goal is to choose a device that is currently
in use if the other isn't, e.g. by an active LV.
. prefer dev with fs mounted if the other doesn't, else
. prefer dev that is dm if the other isn't, else
. prefer dev in subsystem if the other isn't
If neither device is preferred by these rules, then don't
change devices in lvmcache, leaving the one that was found
first.
The previous logic for preferring a device was:
. prefer dev in subsystem if the other isn't, else
. prefer dev without holders if the other has holders, else
. prefer dev that is dm if the other isn't
2016-02-09 22:06:27 +03:00
_choose_preferred_devs ( cmd , & del_cache_devs , & add_cache_devs ) ;
dm_list_iterate_items ( devl , & del_cache_devs ) {
log_debug_cache ( " Drop duplicate device %s in lvmcache " , dev_name ( devl - > dev ) ) ;
2016-06-06 22:04:17 +03:00
if ( ( info = lvmcache_info_from_pvid ( devl - > dev - > pvid , NULL , 0 ) ) )
lvmcache: improve duplicate PV handling
Wait to compare and choose alternate duplicate devices until
after all devices are scanned. During scanning, the first
duplicate dev is kept in lvmcache, and others are kept in a
new list (_found_duplicate_devs).
After all devices are scanned, compare all the duplicates
available for a given PVID and decide which is best.
If the dev used in lvmcache is changed, drop the old dev
from lvmcache entirely and rescan the replacement dev.
Previously the VG metadata from the old dev was kept in
lvmcache and only the dev was replaced.
A new config setting devices/allow_changes_with_duplicate_pvs
can be set to 0 which disallows modifying a VG or activating
LVs in it when the VG contains PVs with duplicate devices.
Set to 1 is the old behavior which allowed the VG to be
changed.
The logic for which of two devs is preferred has changed.
The primary goal is to choose a device that is currently
in use if the other isn't, e.g. by an active LV.
. prefer dev with fs mounted if the other doesn't, else
. prefer dev that is dm if the other isn't, else
. prefer dev in subsystem if the other isn't
If neither device is preferred by these rules, then don't
change devices in lvmcache, leaving the one that was found
first.
The previous logic for preferring a device was:
. prefer dev in subsystem if the other isn't, else
. prefer dev without holders if the other has holders, else
. prefer dev that is dm if the other isn't
2016-02-09 22:06:27 +03:00
lvmcache_del ( info ) ;
}
dm_list_iterate_items ( devl , & add_cache_devs ) {
log_debug_cache ( " Rescan preferred device %s for lvmcache " , dev_name ( devl - > dev ) ) ;
2018-05-11 22:16:49 +03:00
label_read ( devl - > dev ) ;
lvmcache: improve duplicate PV handling
Wait to compare and choose alternate duplicate devices until
after all devices are scanned. During scanning, the first
duplicate dev is kept in lvmcache, and others are kept in a
new list (_found_duplicate_devs).
After all devices are scanned, compare all the duplicates
available for a given PVID and decide which is best.
If the dev used in lvmcache is changed, drop the old dev
from lvmcache entirely and rescan the replacement dev.
Previously the VG metadata from the old dev was kept in
lvmcache and only the dev was replaced.
A new config setting devices/allow_changes_with_duplicate_pvs
can be set to 0 which disallows modifying a VG or activating
LVs in it when the VG contains PVs with duplicate devices.
Set to 1 is the old behavior which allowed the VG to be
changed.
The logic for which of two devs is preferred has changed.
The primary goal is to choose a device that is currently
in use if the other isn't, e.g. by an active LV.
. prefer dev with fs mounted if the other doesn't, else
. prefer dev that is dm if the other isn't, else
. prefer dev in subsystem if the other isn't
If neither device is preferred by these rules, then don't
change devices in lvmcache, leaving the one that was found
first.
The previous logic for preferring a device was:
. prefer dev in subsystem if the other isn't, else
. prefer dev without holders if the other has holders, else
. prefer dev that is dm if the other isn't
2016-02-09 22:06:27 +03:00
}
dm_list_splice ( & _unused_duplicate_devs , & del_cache_devs ) ;
2018-05-11 23:52:22 +03:00
/*
2018-05-21 22:20:19 +03:00
* This may remove some entries from the unused_duplicates list for
* devs that we know are the same underlying dev .
2018-05-11 23:52:22 +03:00
*/
_filter_duplicate_devs ( cmd ) ;
2018-05-21 22:20:19 +03:00
/*
* Warn about remaining duplicates that may actually be separate copies of
* the same device .
*/
_warn_duplicate_devs ( cmd ) ;
lvmcache: improve duplicate PV handling
Wait to compare and choose alternate duplicate devices until
after all devices are scanned. During scanning, the first
duplicate dev is kept in lvmcache, and others are kept in a
new list (_found_duplicate_devs).
After all devices are scanned, compare all the duplicates
available for a given PVID and decide which is best.
If the dev used in lvmcache is changed, drop the old dev
from lvmcache entirely and rescan the replacement dev.
Previously the VG metadata from the old dev was kept in
lvmcache and only the dev was replaced.
A new config setting devices/allow_changes_with_duplicate_pvs
can be set to 0 which disallows modifying a VG or activating
LVs in it when the VG contains PVs with duplicate devices.
Set to 1 is the old behavior which allowed the VG to be
changed.
The logic for which of two devs is preferred has changed.
The primary goal is to choose a device that is currently
in use if the other isn't, e.g. by an active LV.
. prefer dev with fs mounted if the other doesn't, else
. prefer dev that is dm if the other isn't, else
. prefer dev in subsystem if the other isn't
If neither device is preferred by these rules, then don't
change devices in lvmcache, leaving the one that was found
first.
The previous logic for preferring a device was:
. prefer dev in subsystem if the other isn't, else
. prefer dev without holders if the other has holders, else
. prefer dev that is dm if the other isn't
2016-02-09 22:06:27 +03:00
}
2002-11-18 16:53:58 +03:00
r = 1 ;
out :
2018-04-06 21:05:17 +03:00
dm_list_iterate_items ( vginfo , & _vginfos ) {
if ( is_orphan_vg ( vginfo - > vgname ) )
continue ;
vginfo_count + + ;
}
log_debug_cache ( " Found VG info for %d VGs " , vginfo_count ) ;
2002-11-18 16:53:58 +03:00
return r ;
}
2018-05-21 22:20:19 +03:00
/*
2018-07-10 21:39:29 +03:00
* lvmcache_label_scan ( ) detects duplicates in the basic label_scan ( ) , then
* filters out some dups , and chooses preferred duplicates to use .
2018-05-21 22:20:19 +03:00
*/
void lvmcache_pvscan_duplicate_check ( struct cmd_context * cmd )
{
struct device_list * devl ;
/* Check if label_scan() detected any dups. */
if ( ! _found_duplicate_pvs )
return ;
/*
* Once all the dups are identified , they are moved from the
* " found " list to the " unused " list to sort out .
*/
dm_list_splice ( & _unused_duplicate_devs , & _found_duplicate_devs ) ;
/*
* Remove items from the dups list that we know are the same
* underlying dev , e . g . md components , that we want to just ignore .
*/
_filter_duplicate_devs ( cmd ) ;
/*
2018-07-10 21:39:29 +03:00
* no more dups after ignoring some
2018-05-21 22:20:19 +03:00
*/
if ( ! _found_duplicate_pvs )
return ;
2018-07-10 21:39:29 +03:00
/* Duplicates are found where we would have to pick one. */
2018-05-21 22:20:19 +03:00
dm_list_iterate_items ( devl , & _unused_duplicate_devs )
log_warn ( " WARNING: found device with duplicate %s " , dev_name ( devl - > dev ) ) ;
}
2015-05-06 00:24:50 +03:00
int lvmcache_get_vgnameids ( struct cmd_context * cmd , int include_internal ,
struct dm_list * vgnameids )
{
struct vgnameid_list * vgnl ;
struct lvmcache_vginfo * vginfo ;
dm_list_iterate_items ( vginfo , & _vginfos ) {
if ( ! include_internal & & is_orphan_vg ( vginfo - > vgname ) )
continue ;
if ( ! ( vgnl = dm_pool_alloc ( cmd - > mem , sizeof ( * vgnl ) ) ) ) {
log_error ( " vgnameid_list allocation failed. " ) ;
return 0 ;
}
vgnl - > vgid = dm_pool_strdup ( cmd - > mem , vginfo - > vgid ) ;
vgnl - > vg_name = dm_pool_strdup ( cmd - > mem , vginfo - > vgname ) ;
if ( ! vgnl - > vgid | | ! vgnl - > vg_name ) {
log_error ( " vgnameid_list member allocation failed. " ) ;
return 0 ;
}
dm_list_add ( vgnameids , & vgnl - > list ) ;
}
return 1 ;
}
2008-11-04 01:14:30 +03:00
struct dm_list * lvmcache_get_pvids ( struct cmd_context * cmd , const char * vgname ,
2006-04-21 23:12:41 +04:00
const char * vgid )
{
2008-11-04 01:14:30 +03:00
struct dm_list * pvids ;
2006-04-21 23:12:41 +04:00
struct lvmcache_vginfo * vginfo ;
struct lvmcache_info * info ;
if ( ! ( pvids = str_list_create ( cmd - > mem ) ) ) {
log_error ( " pvids list allocation failed " ) ;
return NULL ;
}
2012-02-10 05:28:27 +04:00
if ( ! ( vginfo = lvmcache_vginfo_from_vgname ( vgname , vgid ) ) )
2006-04-21 23:12:41 +04:00
return pvids ;
2008-11-04 01:14:30 +03:00
dm_list_iterate_items ( info , & vginfo - > infos ) {
2008-01-30 17:00:02 +03:00
if ( ! str_list_add ( cmd - > mem , pvids ,
2006-04-21 23:12:41 +04:00
dm_pool_strdup ( cmd - > mem , info - > dev - > pvid ) ) ) {
log_error ( " strlist allocation failed " ) ;
return NULL ;
}
}
return pvids ;
}
2018-02-07 22:14:08 +03:00
int lvmcache_get_vg_devs ( struct cmd_context * cmd ,
struct lvmcache_vginfo * vginfo ,
struct dm_list * devs )
2002-11-18 16:53:58 +03:00
{
2003-07-05 02:34:56 +04:00
struct lvmcache_info * info ;
2018-02-07 22:14:08 +03:00
struct device_list * devl ;
2002-11-18 16:53:58 +03:00
2018-02-07 22:14:08 +03:00
dm_list_iterate_items ( info , & vginfo - > infos ) {
if ( ! ( devl = dm_pool_zalloc ( cmd - > mem , sizeof ( * devl ) ) ) )
return_0 ;
2012-02-23 17:11:07 +04:00
2018-02-07 22:14:08 +03:00
devl - > dev = info - > dev ;
dm_list_add ( devs , & devl - > list ) ;
2002-11-18 16:53:58 +03:00
}
2018-02-07 22:14:08 +03:00
return 1 ;
2011-06-01 23:29:31 +04:00
}
2018-02-07 22:14:08 +03:00
static struct device * _device_from_pvid ( const struct id * pvid , uint64_t * label_sector )
2011-06-01 23:29:31 +04:00
{
2018-02-07 22:14:08 +03:00
struct lvmcache_info * info ;
2002-11-18 16:53:58 +03:00
2018-02-07 22:14:08 +03:00
if ( ( info = lvmcache_info_from_pvid ( ( const char * ) pvid , NULL , 0 ) ) ) {
if ( info - > label & & label_sector )
* label_sector = info - > label - > sector ;
return info - > dev ;
}
2002-11-18 16:53:58 +03:00
2018-02-07 22:14:08 +03:00
return NULL ;
}
2003-07-05 02:34:56 +04:00
2018-02-07 22:14:08 +03:00
struct device * lvmcache_device_from_pvid ( struct cmd_context * cmd , const struct id * pvid , uint64_t * label_sector )
{
struct device * dev ;
2002-11-18 16:53:58 +03:00
2011-06-01 23:29:31 +04:00
dev = _device_from_pvid ( pvid , label_sector ) ;
if ( dev )
return dev ;
2002-11-18 16:53:58 +03:00
2018-02-07 22:14:08 +03:00
log_debug_devs ( " No device with uuid %s. " , ( const char * ) pvid ) ;
2002-11-18 16:53:58 +03:00
return NULL ;
}
2016-06-06 23:39:51 +03:00
int lvmcache_pvid_in_unchosen_duplicates ( const char * pvid )
{
struct device_list * devl ;
dm_list_iterate_items ( devl , & _unused_duplicate_devs ) {
if ( ! strncmp ( devl - > dev - > pvid , pvid , ID_LEN ) )
return 1 ;
}
return 0 ;
}
2010-05-19 15:52:07 +04:00
2008-05-19 23:49:56 +04:00
static int _free_vginfo ( struct lvmcache_vginfo * vginfo )
{
2008-06-09 20:22:33 +04:00
struct lvmcache_vginfo * primary_vginfo , * vginfo2 ;
2008-05-19 23:49:56 +04:00
int r = 1 ;
2012-02-10 05:28:27 +04:00
vginfo2 = primary_vginfo = lvmcache_vginfo_from_vgname ( vginfo - > vgname , NULL ) ;
2008-06-09 20:22:33 +04:00
if ( vginfo = = primary_vginfo ) {
2008-05-29 02:27:47 +04:00
dm_hash_remove ( _vgname_hash , vginfo - > vgname ) ;
if ( vginfo - > next & & ! dm_hash_insert ( _vgname_hash , vginfo - > vgname ,
vginfo - > next ) ) {
log_error ( " _vgname_hash re-insertion for %s failed " ,
vginfo - > vgname ) ;
r = 0 ;
}
2012-06-21 14:43:31 +04:00
} else
while ( vginfo2 ) {
if ( vginfo2 - > next = = vginfo ) {
vginfo2 - > next = vginfo - > next ;
break ;
}
vginfo2 = vginfo2 - > next ;
2008-06-09 20:22:33 +04:00
}
2008-05-19 23:49:56 +04:00
2018-06-08 15:40:53 +03:00
free ( vginfo - > system_id ) ;
free ( vginfo - > vgname ) ;
free ( vginfo - > creation_host ) ;
2008-05-19 23:49:56 +04:00
2008-06-06 16:43:40 +04:00
if ( * vginfo - > vgid & & _vgid_hash & &
2012-02-10 05:28:27 +04:00
lvmcache_vginfo_from_vgid ( vginfo - > vgid ) = = vginfo )
2008-05-19 23:49:56 +04:00
dm_hash_remove ( _vgid_hash , vginfo - > vgid ) ;
2008-11-04 01:14:30 +03:00
dm_list_del ( & vginfo - > list ) ;
2008-05-19 23:49:56 +04:00
2018-06-08 15:40:53 +03:00
free ( vginfo ) ;
2008-05-19 23:49:56 +04:00
return r ;
}
2008-04-08 16:49:21 +04:00
/*
* vginfo must be info - > vginfo unless info is NULL
*/
static int _drop_vginfo ( struct lvmcache_info * info , struct lvmcache_vginfo * vginfo )
2002-11-18 16:53:58 +03:00
{
2008-05-19 23:49:56 +04:00
if ( info )
_vginfo_detach_info ( info ) ;
2006-04-12 21:54:11 +04:00
2008-05-19 23:49:56 +04:00
/* vginfo still referenced? */
if ( ! vginfo | | is_orphan_vg ( vginfo - > vgname ) | |
2008-11-04 01:14:30 +03:00
! dm_list_empty ( & vginfo - > infos ) )
2008-05-19 23:49:56 +04:00
return 1 ;
2002-11-18 16:53:58 +03:00
2008-05-19 23:49:56 +04:00
if ( ! _free_vginfo ( vginfo ) )
return_0 ;
2006-04-12 21:54:11 +04:00
return 1 ;
2002-11-18 16:53:58 +03:00
}
2003-07-05 02:34:56 +04:00
void lvmcache_del ( struct lvmcache_info * info )
2002-11-18 16:53:58 +03:00
{
if ( info - > dev - > pvid [ 0 ] & & _pvid_hash )
2005-10-17 03:03:59 +04:00
dm_hash_remove ( _pvid_hash , info - > dev - > pvid ) ;
2002-11-18 16:53:58 +03:00
2008-04-08 16:49:21 +04:00
_drop_vginfo ( info , info - > vginfo ) ;
2002-11-18 16:53:58 +03:00
info - > label - > labeller - > ops - > destroy_label ( info - > label - > labeller ,
2014-06-09 00:57:04 +04:00
info - > label ) ;
2018-05-14 21:38:16 +03:00
label_destroy ( info - > label ) ;
2018-06-08 15:40:53 +03:00
free ( info ) ;
2014-06-09 00:57:04 +04:00
}
2002-11-18 16:53:58 +03:00
2018-03-01 19:20:34 +03:00
void lvmcache_del_dev ( struct device * dev )
{
struct lvmcache_info * info ;
if ( ( info = lvmcache_info_from_pvid ( ( const char * ) dev - > pvid , dev , 0 ) ) )
lvmcache_del ( info ) ;
}
2008-04-08 16:49:21 +04:00
/*
* vginfo must be info - > vginfo unless info is NULL ( orphans )
*/
static int _lvmcache_update_vgid ( struct lvmcache_info * info ,
struct lvmcache_vginfo * vginfo ,
const char * vgid )
2002-11-18 16:53:58 +03:00
{
2008-04-08 16:49:21 +04:00
if ( ! vgid | | ! vginfo | |
! strncmp ( vginfo - > vgid , vgid , ID_LEN ) )
2002-11-18 16:53:58 +03:00
return 1 ;
2008-04-08 16:49:21 +04:00
if ( vginfo & & * vginfo - > vgid )
dm_hash_remove ( _vgid_hash , vginfo - > vgid ) ;
2006-04-11 02:09:00 +04:00
if ( ! vgid ) {
2012-02-08 16:57:15 +04:00
/* FIXME: unreachable code path */
2013-01-08 02:30:29 +04:00
log_debug_cache ( " lvmcache: %s: clearing VGID " , info ? dev_name ( info - > dev ) : vginfo - > vgname ) ;
2002-11-18 16:53:58 +03:00
return 1 ;
2006-04-11 02:09:00 +04:00
}
2002-11-18 16:53:58 +03:00
2018-03-02 18:25:37 +03:00
( void ) dm_strncpy ( vginfo - > vgid , vgid , sizeof ( vginfo - > vgid ) ) ;
2008-04-08 16:49:21 +04:00
if ( ! dm_hash_insert ( _vgid_hash , vginfo - > vgid , vginfo ) ) {
2003-07-05 02:34:56 +04:00
log_error ( " _lvmcache_update: vgid hash insertion failed: %s " ,
2008-04-08 16:49:21 +04:00
vginfo - > vgid ) ;
2002-11-18 16:53:58 +03:00
return 0 ;
}
2008-04-08 16:49:21 +04:00
if ( ! is_orphan_vg ( vginfo - > vgname ) )
2016-04-12 14:06:16 +03:00
log_debug_cache ( " lvmcache %s: VG %s: set VGID to " FMTVGID " . " ,
2013-01-08 02:30:29 +04:00
( info ) ? dev_name ( info - > dev ) : " " ,
vginfo - > vgname , vginfo - > vgid ) ;
2006-04-11 02:09:00 +04:00
2002-11-18 16:53:58 +03:00
return 1 ;
}
2006-04-12 21:54:11 +04:00
static int _insert_vginfo ( struct lvmcache_vginfo * new_vginfo , const char * vgid ,
2006-04-13 21:32:24 +04:00
uint32_t vgstatus , const char * creation_host ,
2006-04-12 21:54:11 +04:00
struct lvmcache_vginfo * primary_vginfo )
{
struct lvmcache_vginfo * last_vginfo = primary_vginfo ;
2010-07-09 19:34:40 +04:00
char uuid_primary [ 64 ] __attribute__ ( ( aligned ( 8 ) ) ) ;
char uuid_new [ 64 ] __attribute__ ( ( aligned ( 8 ) ) ) ;
2006-04-13 21:32:24 +04:00
int use_new = 0 ;
2009-09-14 23:44:15 +04:00
2006-04-12 21:54:11 +04:00
/* Pre-existing VG takes precedence. Unexported VG takes precedence. */
if ( primary_vginfo ) {
2006-05-10 01:23:51 +04:00
if ( ! id_write_format ( ( const struct id * ) vgid , uuid_new , sizeof ( uuid_new ) ) )
2006-04-13 01:23:04 +04:00
return_0 ;
2006-05-10 01:23:51 +04:00
if ( ! id_write_format ( ( const struct id * ) & primary_vginfo - > vgid , uuid_primary ,
2006-04-13 01:23:04 +04:00
sizeof ( uuid_primary ) ) )
return_0 ;
2018-12-07 23:35:22 +03:00
_found_duplicate_vgnames = 1 ;
2006-04-13 21:32:24 +04:00
/*
2015-12-01 00:28:22 +03:00
* vginfo is kept for each VG with the same name .
* They are saved with the vginfo - > next list .
* These checks just decide the ordering of
* that list .
*
* FIXME : it should no longer matter what order
* the vginfo ' s are kept in , so we can probably
* remove these comparisons and reordering entirely .
*
2006-04-13 21:32:24 +04:00
* If Primary not exported , new exported = > keep
* Else Primary exported , new not exported = > change
* Else Primary has hostname for this machine = > keep
* Else Primary has no hostname , new has one = > change
* Else New has hostname for this machine = > change
* Else Keep primary .
*/
if ( ! ( primary_vginfo - > status & EXPORTED_VG ) & &
( vgstatus & EXPORTED_VG ) )
2015-12-01 00:28:22 +03:00
log_verbose ( " Cache: Duplicate VG name %s: "
" Existing %s takes precedence over "
" exported %s " , new_vginfo - > vgname ,
uuid_primary , uuid_new ) ;
2006-04-13 21:32:24 +04:00
else if ( ( primary_vginfo - > status & EXPORTED_VG ) & &
! ( vgstatus & EXPORTED_VG ) ) {
2015-12-01 00:28:22 +03:00
log_verbose ( " Cache: Duplicate VG name %s: "
" %s takes precedence over exported %s " ,
new_vginfo - > vgname , uuid_new ,
uuid_primary ) ;
2006-04-13 21:32:24 +04:00
use_new = 1 ;
} else if ( primary_vginfo - > creation_host & &
! strcmp ( primary_vginfo - > creation_host ,
primary_vginfo - > fmt - > cmd - > hostname ) )
2015-12-01 00:28:22 +03:00
log_verbose ( " Cache: Duplicate VG name %s: "
" Existing %s (created here) takes precedence "
" over %s " , new_vginfo - > vgname , uuid_primary ,
uuid_new ) ;
2006-04-13 21:32:24 +04:00
else if ( ! primary_vginfo - > creation_host & & creation_host ) {
2015-12-01 00:28:22 +03:00
log_verbose ( " Cache: Duplicate VG name %s: "
" %s (with creation_host) takes precedence over %s " ,
new_vginfo - > vgname , uuid_new ,
uuid_primary ) ;
2006-04-13 21:32:24 +04:00
use_new = 1 ;
} else if ( creation_host & &
! strcmp ( creation_host ,
primary_vginfo - > fmt - > cmd - > hostname ) ) {
2015-12-01 00:28:22 +03:00
log_verbose ( " Cache: Duplicate VG name %s: "
" %s (created here) takes precedence over %s " ,
new_vginfo - > vgname , uuid_new ,
uuid_primary ) ;
2006-04-13 21:32:24 +04:00
use_new = 1 ;
2015-12-01 00:28:22 +03:00
} else {
log_verbose ( " Cache: Duplicate VG name %s: "
" Prefer existing %s vs new %s " ,
new_vginfo - > vgname , uuid_primary , uuid_new ) ;
2006-04-13 21:32:24 +04:00
}
if ( ! use_new ) {
while ( last_vginfo - > next )
last_vginfo = last_vginfo - > next ;
last_vginfo - > next = new_vginfo ;
return 1 ;
}
2006-04-12 21:54:11 +04:00
dm_hash_remove ( _vgname_hash , primary_vginfo - > vgname ) ;
}
if ( ! dm_hash_insert ( _vgname_hash , new_vginfo - > vgname , new_vginfo ) ) {
log_error ( " cache_update: vg hash insertion failed: %s " ,
new_vginfo - > vgname ) ;
return 0 ;
}
if ( primary_vginfo )
new_vginfo - > next = primary_vginfo ;
return 1 ;
}
2006-04-11 20:00:26 +04:00
static int _lvmcache_update_vgname ( struct lvmcache_info * info ,
2006-04-13 01:23:04 +04:00
const char * vgname , const char * vgid ,
2008-04-08 16:49:21 +04:00
uint32_t vgstatus , const char * creation_host ,
const struct format_type * fmt )
2002-11-18 16:53:58 +03:00
{
2018-04-09 21:40:49 +03:00
struct lvmcache_vginfo * vginfo , * primary_vginfo ;
2008-06-27 19:18:31 +04:00
char mdabuf [ 32 ] ;
2002-11-18 16:53:58 +03:00
2008-04-08 16:49:21 +04:00
if ( ! vgname | | ( info & & info - > vginfo & & ! strcmp ( info - > vginfo - > vgname , vgname ) ) )
2002-11-18 16:53:58 +03:00
return 1 ;
/* Remove existing vginfo entry */
2008-04-08 16:49:21 +04:00
if ( info )
_drop_vginfo ( info , info - > vginfo ) ;
2002-11-18 16:53:58 +03:00
2012-02-10 05:28:27 +04:00
if ( ! ( vginfo = lvmcache_vginfo_from_vgname ( vgname , vgid ) ) ) {
2018-04-09 21:40:49 +03:00
/*
* Create a vginfo struct for this VG and put the vginfo
* into the hash table .
*/
2006-04-14 01:08:29 +04:00
2018-06-08 15:40:53 +03:00
if ( ! ( vginfo = zalloc ( sizeof ( * vginfo ) ) ) ) {
2003-07-05 02:34:56 +04:00
log_error ( " lvmcache_update_vgname: list alloc failed " ) ;
2002-11-18 16:53:58 +03:00
return 0 ;
}
2018-06-08 15:40:53 +03:00
if ( ! ( vginfo - > vgname = strdup ( vgname ) ) ) {
free ( vginfo ) ;
2002-11-18 16:53:58 +03:00
log_error ( " cache vgname alloc failed for %s " , vgname ) ;
return 0 ;
}
2008-11-04 01:14:30 +03:00
dm_list_init ( & vginfo - > infos ) ;
2019-02-05 21:55:51 +03:00
dm_list_init ( & vginfo - > outdated_infos ) ;
2008-05-29 02:27:47 +04:00
/*
2018-04-09 21:40:49 +03:00
* A different VG ( different uuid ) can exist with the same name .
* In this case , the two VGs will have separate vginfo structs ,
* but the second will be linked onto the existing vginfo - > next ,
* not in the hash .
2008-05-29 02:27:47 +04:00
*/
2018-04-09 21:40:49 +03:00
primary_vginfo = lvmcache_vginfo_from_vgname ( vgname , NULL ) ;
2008-05-29 02:27:47 +04:00
2018-04-09 21:40:49 +03:00
if ( ! _insert_vginfo ( vginfo , vgid , vgstatus , creation_host , primary_vginfo ) ) {
2018-06-08 15:40:53 +03:00
free ( vginfo - > vgname ) ;
free ( vginfo ) ;
2002-11-18 16:53:58 +03:00
return 0 ;
}
2018-04-09 21:40:49 +03:00
2002-11-18 16:53:58 +03:00
/* Ensure orphans appear last on list_iterate */
2007-11-02 16:06:42 +03:00
if ( is_orphan_vg ( vgname ) )
2008-11-04 01:14:30 +03:00
dm_list_add ( & _vginfos , & vginfo - > list ) ;
2002-11-18 16:53:58 +03:00
else
2008-11-04 01:14:30 +03:00
dm_list_add_h ( & _vginfos , & vginfo - > list ) ;
2002-11-18 16:53:58 +03:00
}
2008-05-19 23:49:56 +04:00
if ( info )
_vginfo_attach_info ( vginfo , info ) ;
else if ( ! _lvmcache_update_vgid ( NULL , vginfo , vgid ) ) /* Orphans */
2008-04-08 16:49:21 +04:00
return_0 ;
2002-11-18 16:53:58 +03:00
/* FIXME Check consistency of list! */
2008-04-08 16:49:21 +04:00
vginfo - > fmt = fmt ;
2008-06-27 19:18:31 +04:00
if ( info ) {
if ( info - > mdas . n )
2016-04-12 14:06:16 +03:00
sprintf ( mdabuf , " with %u mda(s) " , dm_list_size ( & info - > mdas ) ) ;
2008-06-27 19:18:31 +04:00
else
mdabuf [ 0 ] = ' \0 ' ;
2016-04-12 14:06:16 +03:00
log_debug_cache ( " lvmcache %s: now in VG %s%s%s%s%s. " ,
2013-01-08 02:30:29 +04:00
dev_name ( info - > dev ) ,
vgname , vginfo - > vgid [ 0 ] ? " ( " : " " ,
vginfo - > vgid [ 0 ] ? vginfo - > vgid : " " ,
vginfo - > vgid [ 0 ] ? " ) " : " " , mdabuf ) ;
2008-06-27 19:18:31 +04:00
} else
2017-11-13 17:43:32 +03:00
log_debug_cache ( " lvmcache: Initialised VG %s. " , vgname ) ;
2005-03-22 01:40:35 +03:00
2002-11-18 16:53:58 +03:00
return 1 ;
}
2006-04-13 21:32:24 +04:00
static int _lvmcache_update_vgstatus ( struct lvmcache_info * info , uint32_t vgstatus ,
2015-11-30 20:32:17 +03:00
const char * creation_host , const char * lock_type ,
const char * system_id )
2006-04-11 21:42:15 +04:00
{
if ( ! info | | ! info - > vginfo )
return 1 ;
if ( ( info - > vginfo - > status & EXPORTED_VG ) ! = ( vgstatus & EXPORTED_VG ) )
2016-04-12 14:06:16 +03:00
log_debug_cache ( " lvmcache %s: VG %s %s exported. " ,
2013-01-08 02:30:29 +04:00
dev_name ( info - > dev ) , info - > vginfo - > vgname ,
vgstatus & EXPORTED_VG ? " now " : " no longer " ) ;
2006-04-11 21:42:15 +04:00
info - > vginfo - > status = vgstatus ;
2006-04-13 21:32:24 +04:00
if ( ! creation_host )
2015-07-24 23:20:37 +03:00
goto set_lock_type ;
2006-04-13 21:32:24 +04:00
if ( info - > vginfo - > creation_host & & ! strcmp ( creation_host ,
info - > vginfo - > creation_host ) )
2015-07-24 23:20:37 +03:00
goto set_lock_type ;
2006-04-13 21:32:24 +04:00
2018-06-08 15:40:53 +03:00
free ( info - > vginfo - > creation_host ) ;
2006-04-13 21:32:24 +04:00
2018-06-08 15:40:53 +03:00
if ( ! ( info - > vginfo - > creation_host = strdup ( creation_host ) ) ) {
2016-04-12 14:06:16 +03:00
log_error ( " cache creation host alloc failed for %s. " ,
2006-04-13 21:32:24 +04:00
creation_host ) ;
return 0 ;
}
2016-04-12 14:06:16 +03:00
log_debug_cache ( " lvmcache %s: VG %s: set creation host to %s. " ,
2013-01-08 02:30:29 +04:00
dev_name ( info - > dev ) , info - > vginfo - > vgname , creation_host ) ;
2006-04-13 21:32:24 +04:00
2015-07-24 23:20:37 +03:00
set_lock_type :
if ( ! lock_type )
2015-11-30 20:32:17 +03:00
goto set_system_id ;
2015-07-24 23:20:37 +03:00
if ( info - > vginfo - > lock_type & & ! strcmp ( lock_type , info - > vginfo - > lock_type ) )
2015-11-30 20:32:17 +03:00
goto set_system_id ;
2015-07-24 23:20:37 +03:00
2018-06-08 15:40:53 +03:00
free ( info - > vginfo - > lock_type ) ;
2015-07-24 23:20:37 +03:00
2018-06-08 15:40:53 +03:00
if ( ! ( info - > vginfo - > lock_type = strdup ( lock_type ) ) ) {
2015-11-30 20:32:17 +03:00
log_error ( " cache lock_type alloc failed for %s " , lock_type ) ;
return 0 ;
}
2016-04-12 14:06:16 +03:00
log_debug_cache ( " lvmcache %s: VG %s: set lock_type to %s. " ,
2015-11-30 20:32:17 +03:00
dev_name ( info - > dev ) , info - > vginfo - > vgname , lock_type ) ;
set_system_id :
if ( ! system_id )
goto out ;
if ( info - > vginfo - > system_id & & ! strcmp ( system_id , info - > vginfo - > system_id ) )
goto out ;
2018-06-08 15:40:53 +03:00
free ( info - > vginfo - > system_id ) ;
2015-11-30 20:32:17 +03:00
2018-06-08 15:40:53 +03:00
if ( ! ( info - > vginfo - > system_id = strdup ( system_id ) ) ) {
2015-11-30 20:32:17 +03:00
log_error ( " cache system_id alloc failed for %s " , system_id ) ;
2015-07-24 23:20:37 +03:00
return 0 ;
}
2016-04-12 14:06:16 +03:00
log_debug_cache ( " lvmcache %s: VG %s: set system_id to %s. " ,
2015-11-30 20:32:17 +03:00
dev_name ( info - > dev ) , info - > vginfo - > vgname , system_id ) ;
2015-07-24 23:20:37 +03:00
out :
2006-04-11 21:42:15 +04:00
return 1 ;
}
2008-04-08 16:49:21 +04:00
int lvmcache_add_orphan_vginfo ( const char * vgname , struct format_type * fmt )
{
return _lvmcache_update_vgname ( NULL , vgname , vgname , 0 , " " , fmt ) ;
}
2018-04-19 00:29:42 +03:00
/*
2019-02-05 22:09:56 +03:00
* Returning 0 causes the caller to remove the info struct for this
* device from lvmcache , which will make it look like a missing device .
2018-04-19 00:29:42 +03:00
*/
2018-04-20 18:43:50 +03:00
int lvmcache_update_vgname_and_id ( struct lvmcache_info * info , struct lvmcache_vgsummary * vgsummary )
2006-04-11 20:00:26 +04:00
{
2015-03-19 02:43:02 +03:00
const char * vgname = vgsummary - > vgname ;
const char * vgid = ( char * ) & vgsummary - > vgid ;
2018-04-19 00:29:42 +03:00
struct lvmcache_vginfo * vginfo ;
2015-03-19 02:43:02 +03:00
2008-02-06 18:47:28 +03:00
if ( ! vgname & & ! info - > vginfo ) {
2009-12-16 22:22:11 +03:00
log_error ( INTERNAL_ERROR " NULL vgname handed to cache " ) ;
2008-02-06 18:47:28 +03:00
/* FIXME Remove this */
vgname = info - > fmt - > orphan_vg_name ;
vgid = vgname ;
}
2008-04-08 16:49:21 +04:00
2008-06-11 15:02:05 +04:00
/* If PV without mdas is already in a real VG, don't make it orphan */
2010-06-29 00:34:58 +04:00
if ( is_orphan_vg ( vgname ) & & info - > vginfo & &
mdas_empty_or_ignored ( & info - > mdas ) & &
2011-02-18 17:16:11 +03:00
! is_orphan_vg ( info - > vginfo - > vgname ) & & critical_section ( ) )
2008-06-11 15:02:05 +04:00
return 1 ;
2018-04-19 00:29:42 +03:00
/*
* Creates a new vginfo struct for this vgname / vgid if none exists ,
* and attaches the info struct for the dev to the vginfo .
* Puts the vginfo into the vgname hash table .
*/
if ( ! _lvmcache_update_vgname ( info , vgname , vgid , vgsummary - > vgstatus , vgsummary - > creation_host , info - > fmt ) ) {
2019-02-05 22:09:56 +03:00
/* shouldn't happen, internal error */
2018-04-19 00:29:42 +03:00
log_error ( " Failed to update VG %s info in lvmcache. " , vgname ) ;
return 0 ;
}
/*
* Puts the vginfo into the vgid hash table .
*/
if ( ! _lvmcache_update_vgid ( info , info - > vginfo , vgid ) ) {
2019-02-05 22:09:56 +03:00
/* shouldn't happen, internal error */
2018-04-19 00:29:42 +03:00
log_error ( " Failed to update VG %s info in lvmcache. " , vgname ) ;
return 0 ;
}
/*
* FIXME : identify which case this is and why this is needed , then
* change that so it doesn ' t use this function and we can remove
* this special case .
* ( I think this distinguishes the scan path , where these things
* are set from the vg_read path where lvmcache_update_vg ( ) is
* called which calls this function without seqno / mda_size / mda_checksum . )
*/
if ( ! vgsummary - > seqno & & ! vgsummary - > mda_size & & ! vgsummary - > mda_checksum )
return 1 ;
2019-02-05 22:09:56 +03:00
/*
* Keep track of which devs / mdas have old versions of the metadata .
* The values we keep in vginfo are from the metadata with the largest
* seqno . One dev may have more recent metadata than another dev , and
* one mda may have more recent metadata than the other mda on the same
* device .
*
* When a device holds old metadata , the info struct for the device
* remains in lvmcache , so the device is not treated as missing .
* Also the mda struct containing the old metadata is kept on
* info - > mdas . This means that vg_read will read metadata from
* the mda again ( and probably see the same old metadata ) . It
* also means that vg_write will use the mda to write new metadata
* into the mda that currently has the old metadata .
*/
if ( vgsummary - > mda_num = = 1 )
info - > mda1_seqno = vgsummary - > seqno ;
else if ( vgsummary - > mda_num = = 2 )
info - > mda2_seqno = vgsummary - > seqno ;
if ( ! info - > summary_seqno )
info - > summary_seqno = vgsummary - > seqno ;
else {
if ( info - > summary_seqno = = vgsummary - > seqno ) {
/* This mda has the same metadata as the prev mda on this dev. */
return 1 ;
} else if ( info - > summary_seqno > vgsummary - > seqno ) {
/* This mda has older metadata than the prev mda on this dev. */
info - > summary_seqno_mismatch = true ;
} else if ( info - > summary_seqno < vgsummary - > seqno ) {
/* This mda has newer metadata than the prev mda on this dev. */
info - > summary_seqno_mismatch = true ;
info - > summary_seqno = vgsummary - > seqno ;
}
}
/* this shouldn't happen */
2018-04-19 00:29:42 +03:00
if ( ! ( vginfo = info - > vginfo ) )
return 1 ;
if ( ! vginfo - > seqno ) {
vginfo - > seqno = vgsummary - > seqno ;
2019-02-05 22:09:56 +03:00
vginfo - > mda_checksum = vgsummary - > mda_checksum ;
vginfo - > mda_size = vgsummary - > mda_size ;
2018-04-19 00:29:42 +03:00
2019-02-05 22:09:56 +03:00
log_debug_cache ( " lvmcache %s mda%d VG %s set seqno %u checksum %x mda_size %zu " ,
dev_name ( info - > dev ) , vgsummary - > mda_num , vgname ,
vgsummary - > seqno , vgsummary - > mda_checksum , vgsummary - > mda_size ) ;
goto update_vginfo ;
} else if ( vgsummary - > seqno < vginfo - > seqno ) {
vginfo - > scan_summary_mismatch = true ;
2018-04-19 00:29:42 +03:00
2019-02-05 22:09:56 +03:00
log_debug_cache ( " lvmcache %s mda%d VG %s older seqno %u checksum %x mda_size %zu " ,
dev_name ( info - > dev ) , vgsummary - > mda_num , vgname ,
vgsummary - > seqno , vgsummary - > mda_checksum , vgsummary - > mda_size ) ;
2018-04-19 00:29:42 +03:00
return 1 ;
2019-02-05 22:09:56 +03:00
} else if ( vgsummary - > seqno > vginfo - > seqno ) {
vginfo - > scan_summary_mismatch = true ;
/* Replace vginfo values with values from newer metadata. */
vginfo - > seqno = vgsummary - > seqno ;
2018-04-19 00:29:42 +03:00
vginfo - > mda_checksum = vgsummary - > mda_checksum ;
vginfo - > mda_size = vgsummary - > mda_size ;
2019-02-05 22:09:56 +03:00
log_debug_cache ( " lvmcache %s mda%d VG %s newer seqno %u checksum %x mda_size %zu " ,
dev_name ( info - > dev ) , vgsummary - > mda_num , vgname ,
vgsummary - > seqno , vgsummary - > mda_checksum , vgsummary - > mda_size ) ;
goto update_vginfo ;
} else {
/*
* Same seqno as previous metadata we saw for this VG .
* If the metadata somehow has a different checksum or size ,
* even though it has the same seqno , something has gone wrong .
* FIXME : test this case : VG has two PVs , first goes missing ,
* second updated to seqno 4 , first comes back and second goes
* missing , first updated to seqno 4 , second comes back , now
* both are present with same seqno but different checksums .
*/
if ( ( vginfo - > mda_size ! = vgsummary - > mda_size ) | | ( vginfo - > mda_checksum ! = vgsummary - > mda_checksum ) ) {
log_warn ( " WARNING: scan of VG %s from %s mda%d found mda_checksum %x mda_size %zu vs %x %zu " ,
vgname , dev_name ( info - > dev ) , vgsummary - > mda_num ,
vgsummary - > mda_checksum , vgsummary - > mda_size ,
vginfo - > mda_checksum , vginfo - > mda_size ) ;
vginfo - > scan_summary_mismatch = true ;
return 0 ;
}
/*
* The seqno and checksum matches what was previously seen ;
* the summary values have already been saved in vginfo .
*/
2018-04-19 00:29:42 +03:00
return 1 ;
}
2019-02-05 22:09:56 +03:00
update_vginfo :
2018-04-19 00:29:42 +03:00
if ( ! _lvmcache_update_vgstatus ( info , vgsummary - > vgstatus , vgsummary - > creation_host ,
vgsummary - > lock_type , vgsummary - > system_id ) ) {
2019-02-05 22:09:56 +03:00
/*
* This shouldn ' t happen , it ' s an internal errror , and we can leave
* the info in place without saving the summary values in vginfo .
*/
2018-04-19 00:29:42 +03:00
log_error ( " Failed to update VG %s info in lvmcache. " , vgname ) ;
}
2006-04-11 20:00:26 +04:00
return 1 ;
}
2008-03-17 19:51:31 +03:00
int lvmcache_update_vg ( struct volume_group * vg , unsigned precommitted )
2002-11-18 16:53:58 +03:00
{
2005-06-01 20:51:55 +04:00
struct pv_list * pvl ;
2003-07-05 02:34:56 +04:00
struct lvmcache_info * info ;
2010-07-09 19:34:40 +04:00
char pvid_s [ ID_LEN + 1 ] __attribute__ ( ( aligned ( 8 ) ) ) ;
2015-03-19 02:43:02 +03:00
struct lvmcache_vgsummary vgsummary = {
. vgname = vg - > name ,
. vgstatus = vg - > status ,
2015-07-24 23:20:37 +03:00
. vgid = vg - > id ,
2015-11-30 20:32:17 +03:00
. system_id = vg - > system_id ,
2015-07-24 23:20:37 +03:00
. lock_type = vg - > lock_type
2015-03-19 02:43:02 +03:00
} ;
2002-11-18 16:53:58 +03:00
2008-11-04 01:14:30 +03:00
dm_list_iterate_items ( pvl , & vg - > pvs ) {
2018-03-02 18:25:37 +03:00
( void ) dm_strncpy ( pvid_s , ( char * ) & pvl - > pv - > id , sizeof ( pvid_s ) ) ;
2005-06-01 20:51:55 +04:00
/* FIXME Could pvl->pv->dev->pvid ever be different? */
2016-06-06 22:04:17 +03:00
if ( ( info = lvmcache_info_from_pvid ( pvid_s , pvl - > pv - > dev , 0 ) ) & &
2015-03-19 02:43:02 +03:00
! lvmcache_update_vgname_and_id ( info , & vgsummary ) )
2006-04-11 20:00:26 +04:00
return_0 ;
2002-11-18 16:53:58 +03:00
}
return 1 ;
}
2015-04-24 22:58:58 +03:00
/*
* We can see multiple different devices with the
* same pvid , i . e . duplicates .
*
* There may be different reasons for seeing two
* devices with the same pvid :
* - multipath showing two paths to the same thing
* - one device copied to another , e . g . with dd ,
* also referred to as cloned devices .
* - a " subsystem " taking a device and creating
* another device of its own that represents the
* underlying device it is using , e . g . using dm
* to create an identity mapping of a PV .
*
* Given duplicate devices , we have to choose one
* of them to be the " preferred " dev , i . e . the one
* that will be referenced in lvmcache , by pv - > dev .
* We can keep the existing dev , that ' s currently
* used in lvmcache , or we can replace the existing
* dev with the new duplicate .
*
* Regardless of which device is preferred , we need
* to print messages explaining which devices were
* found so that a user can sort out for themselves
* what has happened if the preferred device is not
* the one they are interested in .
*
* If a user wants to use the non - preferred device ,
* they will need to filter out the device that
* lvm is preferring .
*
* The dev_subsystem calls check if the major number
* of the dev is part of a subsystem like DM / MD / DRBD .
* A dev that ' s part of a subsystem is preferred over a
* duplicate of that dev that is not part of a
* subsystem .
*
* FIXME : there may be other reasons to prefer one
* device over another :
*
* . are there other use / open counts we could check
* beyond the holders ?
*
* . check if either is bad / usable and prefer
* the good one ?
*
* . prefer the one with smaller minor number ?
* Might avoid disturbing things due to a new
* transient duplicate ?
*/
lvmcache: improve duplicate PV handling
Wait to compare and choose alternate duplicate devices until
after all devices are scanned. During scanning, the first
duplicate dev is kept in lvmcache, and others are kept in a
new list (_found_duplicate_devs).
After all devices are scanned, compare all the duplicates
available for a given PVID and decide which is best.
If the dev used in lvmcache is changed, drop the old dev
from lvmcache entirely and rescan the replacement dev.
Previously the VG metadata from the old dev was kept in
lvmcache and only the dev was replaced.
A new config setting devices/allow_changes_with_duplicate_pvs
can be set to 0 which disallows modifying a VG or activating
LVs in it when the VG contains PVs with duplicate devices.
Set to 1 is the old behavior which allowed the VG to be
changed.
The logic for which of two devs is preferred has changed.
The primary goal is to choose a device that is currently
in use if the other isn't, e.g. by an active LV.
. prefer dev with fs mounted if the other doesn't, else
. prefer dev that is dm if the other isn't, else
. prefer dev in subsystem if the other isn't
If neither device is preferred by these rules, then don't
change devices in lvmcache, leaving the one that was found
first.
The previous logic for preferring a device was:
. prefer dev in subsystem if the other isn't, else
. prefer dev without holders if the other has holders, else
. prefer dev that is dm if the other isn't
2016-02-09 22:06:27 +03:00
static struct lvmcache_info * _create_info ( struct labeller * labeller , struct device * dev )
2002-11-18 16:53:58 +03:00
{
lvmcache: improve duplicate PV handling
Wait to compare and choose alternate duplicate devices until
after all devices are scanned. During scanning, the first
duplicate dev is kept in lvmcache, and others are kept in a
new list (_found_duplicate_devs).
After all devices are scanned, compare all the duplicates
available for a given PVID and decide which is best.
If the dev used in lvmcache is changed, drop the old dev
from lvmcache entirely and rescan the replacement dev.
Previously the VG metadata from the old dev was kept in
lvmcache and only the dev was replaced.
A new config setting devices/allow_changes_with_duplicate_pvs
can be set to 0 which disallows modifying a VG or activating
LVs in it when the VG contains PVs with duplicate devices.
Set to 1 is the old behavior which allowed the VG to be
changed.
The logic for which of two devs is preferred has changed.
The primary goal is to choose a device that is currently
in use if the other isn't, e.g. by an active LV.
. prefer dev with fs mounted if the other doesn't, else
. prefer dev that is dm if the other isn't, else
. prefer dev in subsystem if the other isn't
If neither device is preferred by these rules, then don't
change devices in lvmcache, leaving the one that was found
first.
The previous logic for preferring a device was:
. prefer dev in subsystem if the other isn't, else
. prefer dev without holders if the other has holders, else
. prefer dev that is dm if the other isn't
2016-02-09 22:06:27 +03:00
struct lvmcache_info * info ;
2002-11-18 16:53:58 +03:00
struct label * label ;
lvmcache: improve duplicate PV handling
Wait to compare and choose alternate duplicate devices until
after all devices are scanned. During scanning, the first
duplicate dev is kept in lvmcache, and others are kept in a
new list (_found_duplicate_devs).
After all devices are scanned, compare all the duplicates
available for a given PVID and decide which is best.
If the dev used in lvmcache is changed, drop the old dev
from lvmcache entirely and rescan the replacement dev.
Previously the VG metadata from the old dev was kept in
lvmcache and only the dev was replaced.
A new config setting devices/allow_changes_with_duplicate_pvs
can be set to 0 which disallows modifying a VG or activating
LVs in it when the VG contains PVs with duplicate devices.
Set to 1 is the old behavior which allowed the VG to be
changed.
The logic for which of two devs is preferred has changed.
The primary goal is to choose a device that is currently
in use if the other isn't, e.g. by an active LV.
. prefer dev with fs mounted if the other doesn't, else
. prefer dev that is dm if the other isn't, else
. prefer dev in subsystem if the other isn't
If neither device is preferred by these rules, then don't
change devices in lvmcache, leaving the one that was found
first.
The previous logic for preferring a device was:
. prefer dev in subsystem if the other isn't, else
. prefer dev without holders if the other has holders, else
. prefer dev that is dm if the other isn't
2016-02-09 22:06:27 +03:00
if ( ! ( label = label_create ( labeller ) ) )
return_NULL ;
2018-06-08 15:40:53 +03:00
if ( ! ( info = zalloc ( sizeof ( * info ) ) ) ) {
lvmcache: improve duplicate PV handling
Wait to compare and choose alternate duplicate devices until
after all devices are scanned. During scanning, the first
duplicate dev is kept in lvmcache, and others are kept in a
new list (_found_duplicate_devs).
After all devices are scanned, compare all the duplicates
available for a given PVID and decide which is best.
If the dev used in lvmcache is changed, drop the old dev
from lvmcache entirely and rescan the replacement dev.
Previously the VG metadata from the old dev was kept in
lvmcache and only the dev was replaced.
A new config setting devices/allow_changes_with_duplicate_pvs
can be set to 0 which disallows modifying a VG or activating
LVs in it when the VG contains PVs with duplicate devices.
Set to 1 is the old behavior which allowed the VG to be
changed.
The logic for which of two devs is preferred has changed.
The primary goal is to choose a device that is currently
in use if the other isn't, e.g. by an active LV.
. prefer dev with fs mounted if the other doesn't, else
. prefer dev that is dm if the other isn't, else
. prefer dev in subsystem if the other isn't
If neither device is preferred by these rules, then don't
change devices in lvmcache, leaving the one that was found
first.
The previous logic for preferring a device was:
. prefer dev in subsystem if the other isn't, else
. prefer dev without holders if the other has holders, else
. prefer dev that is dm if the other isn't
2016-02-09 22:06:27 +03:00
log_error ( " lvmcache_info allocation failed " ) ;
label_destroy ( label ) ;
2002-11-18 16:53:58 +03:00
return NULL ;
}
lvmcache: improve duplicate PV handling
Wait to compare and choose alternate duplicate devices until
after all devices are scanned. During scanning, the first
duplicate dev is kept in lvmcache, and others are kept in a
new list (_found_duplicate_devs).
After all devices are scanned, compare all the duplicates
available for a given PVID and decide which is best.
If the dev used in lvmcache is changed, drop the old dev
from lvmcache entirely and rescan the replacement dev.
Previously the VG metadata from the old dev was kept in
lvmcache and only the dev was replaced.
A new config setting devices/allow_changes_with_duplicate_pvs
can be set to 0 which disallows modifying a VG or activating
LVs in it when the VG contains PVs with duplicate devices.
Set to 1 is the old behavior which allowed the VG to be
changed.
The logic for which of two devs is preferred has changed.
The primary goal is to choose a device that is currently
in use if the other isn't, e.g. by an active LV.
. prefer dev with fs mounted if the other doesn't, else
. prefer dev that is dm if the other isn't, else
. prefer dev in subsystem if the other isn't
If neither device is preferred by these rules, then don't
change devices in lvmcache, leaving the one that was found
first.
The previous logic for preferring a device was:
. prefer dev in subsystem if the other isn't, else
. prefer dev without holders if the other has holders, else
. prefer dev that is dm if the other isn't
2016-02-09 22:06:27 +03:00
info - > dev = dev ;
info - > fmt = labeller - > fmt ;
label - > info = info ;
info - > label = label ;
dm_list_init ( & info - > list ) ;
lvmcache_del_mdas ( info ) ;
lvmcache_del_das ( info ) ;
lvmcache_del_bas ( info ) ;
return info ;
}
struct lvmcache_info * lvmcache_add ( struct labeller * labeller ,
const char * pvid , struct device * dev ,
const char * vgname , const char * vgid , uint32_t vgstatus )
{
char pvid_s [ ID_LEN + 1 ] __attribute__ ( ( aligned ( 8 ) ) ) ;
char uuid [ 64 ] __attribute__ ( ( aligned ( 8 ) ) ) ;
struct lvmcache_vgsummary vgsummary = { 0 } ;
struct lvmcache_info * info ;
struct lvmcache_info * info_lookup ;
struct device_list * devl ;
int created = 0 ;
2018-03-02 18:25:37 +03:00
( void ) dm_strncpy ( pvid_s , pvid , sizeof ( pvid_s ) ) ;
2016-05-31 10:56:10 +03:00
if ( ! id_write_format ( ( const struct id * ) & pvid_s , uuid , sizeof ( uuid ) ) )
stack ;
2002-11-18 16:53:58 +03:00
lvmcache: improve duplicate PV handling
Wait to compare and choose alternate duplicate devices until
after all devices are scanned. During scanning, the first
duplicate dev is kept in lvmcache, and others are kept in a
new list (_found_duplicate_devs).
After all devices are scanned, compare all the duplicates
available for a given PVID and decide which is best.
If the dev used in lvmcache is changed, drop the old dev
from lvmcache entirely and rescan the replacement dev.
Previously the VG metadata from the old dev was kept in
lvmcache and only the dev was replaced.
A new config setting devices/allow_changes_with_duplicate_pvs
can be set to 0 which disallows modifying a VG or activating
LVs in it when the VG contains PVs with duplicate devices.
Set to 1 is the old behavior which allowed the VG to be
changed.
The logic for which of two devs is preferred has changed.
The primary goal is to choose a device that is currently
in use if the other isn't, e.g. by an active LV.
. prefer dev with fs mounted if the other doesn't, else
. prefer dev that is dm if the other isn't, else
. prefer dev in subsystem if the other isn't
If neither device is preferred by these rules, then don't
change devices in lvmcache, leaving the one that was found
first.
The previous logic for preferring a device was:
. prefer dev in subsystem if the other isn't, else
. prefer dev without holders if the other has holders, else
. prefer dev that is dm if the other isn't
2016-02-09 22:06:27 +03:00
/*
* Find existing info struct in _pvid_hash or create a new one .
2016-06-06 22:04:17 +03:00
*
* Don ' t pass the known " dev " as an arg here . The mismatching
* devs for the duplicate case is checked below .
lvmcache: improve duplicate PV handling
Wait to compare and choose alternate duplicate devices until
after all devices are scanned. During scanning, the first
duplicate dev is kept in lvmcache, and others are kept in a
new list (_found_duplicate_devs).
After all devices are scanned, compare all the duplicates
available for a given PVID and decide which is best.
If the dev used in lvmcache is changed, drop the old dev
from lvmcache entirely and rescan the replacement dev.
Previously the VG metadata from the old dev was kept in
lvmcache and only the dev was replaced.
A new config setting devices/allow_changes_with_duplicate_pvs
can be set to 0 which disallows modifying a VG or activating
LVs in it when the VG contains PVs with duplicate devices.
Set to 1 is the old behavior which allowed the VG to be
changed.
The logic for which of two devs is preferred has changed.
The primary goal is to choose a device that is currently
in use if the other isn't, e.g. by an active LV.
. prefer dev with fs mounted if the other doesn't, else
. prefer dev that is dm if the other isn't, else
. prefer dev in subsystem if the other isn't
If neither device is preferred by these rules, then don't
change devices in lvmcache, leaving the one that was found
first.
The previous logic for preferring a device was:
. prefer dev in subsystem if the other isn't, else
. prefer dev without holders if the other has holders, else
. prefer dev that is dm if the other isn't
2016-02-09 22:06:27 +03:00
*/
2002-11-18 16:53:58 +03:00
2016-06-06 22:04:17 +03:00
info = lvmcache_info_from_pvid ( pvid_s , NULL , 0 ) ;
2012-02-23 17:11:07 +04:00
lvmcache: improve duplicate PV handling
Wait to compare and choose alternate duplicate devices until
after all devices are scanned. During scanning, the first
duplicate dev is kept in lvmcache, and others are kept in a
new list (_found_duplicate_devs).
After all devices are scanned, compare all the duplicates
available for a given PVID and decide which is best.
If the dev used in lvmcache is changed, drop the old dev
from lvmcache entirely and rescan the replacement dev.
Previously the VG metadata from the old dev was kept in
lvmcache and only the dev was replaced.
A new config setting devices/allow_changes_with_duplicate_pvs
can be set to 0 which disallows modifying a VG or activating
LVs in it when the VG contains PVs with duplicate devices.
Set to 1 is the old behavior which allowed the VG to be
changed.
The logic for which of two devs is preferred has changed.
The primary goal is to choose a device that is currently
in use if the other isn't, e.g. by an active LV.
. prefer dev with fs mounted if the other doesn't, else
. prefer dev that is dm if the other isn't, else
. prefer dev in subsystem if the other isn't
If neither device is preferred by these rules, then don't
change devices in lvmcache, leaving the one that was found
first.
The previous logic for preferring a device was:
. prefer dev in subsystem if the other isn't, else
. prefer dev without holders if the other has holders, else
. prefer dev that is dm if the other isn't
2016-02-09 22:06:27 +03:00
if ( ! info )
2016-06-06 22:04:17 +03:00
info = lvmcache_info_from_pvid ( dev - > pvid , NULL , 0 ) ;
2015-04-24 22:58:58 +03:00
lvmcache: improve duplicate PV handling
Wait to compare and choose alternate duplicate devices until
after all devices are scanned. During scanning, the first
duplicate dev is kept in lvmcache, and others are kept in a
new list (_found_duplicate_devs).
After all devices are scanned, compare all the duplicates
available for a given PVID and decide which is best.
If the dev used in lvmcache is changed, drop the old dev
from lvmcache entirely and rescan the replacement dev.
Previously the VG metadata from the old dev was kept in
lvmcache and only the dev was replaced.
A new config setting devices/allow_changes_with_duplicate_pvs
can be set to 0 which disallows modifying a VG or activating
LVs in it when the VG contains PVs with duplicate devices.
Set to 1 is the old behavior which allowed the VG to be
changed.
The logic for which of two devs is preferred has changed.
The primary goal is to choose a device that is currently
in use if the other isn't, e.g. by an active LV.
. prefer dev with fs mounted if the other doesn't, else
. prefer dev that is dm if the other isn't, else
. prefer dev in subsystem if the other isn't
If neither device is preferred by these rules, then don't
change devices in lvmcache, leaving the one that was found
first.
The previous logic for preferring a device was:
. prefer dev in subsystem if the other isn't, else
. prefer dev without holders if the other has holders, else
. prefer dev that is dm if the other isn't
2016-02-09 22:06:27 +03:00
if ( ! info ) {
info = _create_info ( labeller , dev ) ;
created = 1 ;
}
2015-04-24 22:58:58 +03:00
lvmcache: improve duplicate PV handling
Wait to compare and choose alternate duplicate devices until
after all devices are scanned. During scanning, the first
duplicate dev is kept in lvmcache, and others are kept in a
new list (_found_duplicate_devs).
After all devices are scanned, compare all the duplicates
available for a given PVID and decide which is best.
If the dev used in lvmcache is changed, drop the old dev
from lvmcache entirely and rescan the replacement dev.
Previously the VG metadata from the old dev was kept in
lvmcache and only the dev was replaced.
A new config setting devices/allow_changes_with_duplicate_pvs
can be set to 0 which disallows modifying a VG or activating
LVs in it when the VG contains PVs with duplicate devices.
Set to 1 is the old behavior which allowed the VG to be
changed.
The logic for which of two devs is preferred has changed.
The primary goal is to choose a device that is currently
in use if the other isn't, e.g. by an active LV.
. prefer dev with fs mounted if the other doesn't, else
. prefer dev that is dm if the other isn't, else
. prefer dev in subsystem if the other isn't
If neither device is preferred by these rules, then don't
change devices in lvmcache, leaving the one that was found
first.
The previous logic for preferring a device was:
. prefer dev in subsystem if the other isn't, else
. prefer dev without holders if the other has holders, else
. prefer dev that is dm if the other isn't
2016-02-09 22:06:27 +03:00
if ( ! info )
return_NULL ;
/*
* If an existing info struct was found , check if any values are new .
*/
if ( ! created ) {
if ( info - > dev ! = dev ) {
2018-05-21 22:20:19 +03:00
log_debug_cache ( " PV %s on %s was already found on %s. " ,
uuid , dev_name ( dev ) , dev_name ( info - > dev ) ) ;
2015-04-24 22:58:58 +03:00
strncpy ( dev - > pvid , pvid_s , sizeof ( dev - > pvid ) ) ;
/*
lvmcache: improve duplicate PV handling
Wait to compare and choose alternate duplicate devices until
after all devices are scanned. During scanning, the first
duplicate dev is kept in lvmcache, and others are kept in a
new list (_found_duplicate_devs).
After all devices are scanned, compare all the duplicates
available for a given PVID and decide which is best.
If the dev used in lvmcache is changed, drop the old dev
from lvmcache entirely and rescan the replacement dev.
Previously the VG metadata from the old dev was kept in
lvmcache and only the dev was replaced.
A new config setting devices/allow_changes_with_duplicate_pvs
can be set to 0 which disallows modifying a VG or activating
LVs in it when the VG contains PVs with duplicate devices.
Set to 1 is the old behavior which allowed the VG to be
changed.
The logic for which of two devs is preferred has changed.
The primary goal is to choose a device that is currently
in use if the other isn't, e.g. by an active LV.
. prefer dev with fs mounted if the other doesn't, else
. prefer dev that is dm if the other isn't, else
. prefer dev in subsystem if the other isn't
If neither device is preferred by these rules, then don't
change devices in lvmcache, leaving the one that was found
first.
The previous logic for preferring a device was:
. prefer dev in subsystem if the other isn't, else
. prefer dev without holders if the other has holders, else
. prefer dev that is dm if the other isn't
2016-02-09 22:06:27 +03:00
* Keep the existing PV / dev in lvmcache , and save the
* new duplicate in the list of duplicates . After
* scanning is complete , compare the duplicate devs
* with those in lvmcache to check if one of the
* duplicates is preferred and if so switch lvmcache to
* use it .
2015-04-24 22:58:58 +03:00
*/
2018-06-08 15:40:53 +03:00
if ( ! ( devl = zalloc ( sizeof ( * devl ) ) ) )
lvmcache: improve duplicate PV handling
Wait to compare and choose alternate duplicate devices until
after all devices are scanned. During scanning, the first
duplicate dev is kept in lvmcache, and others are kept in a
new list (_found_duplicate_devs).
After all devices are scanned, compare all the duplicates
available for a given PVID and decide which is best.
If the dev used in lvmcache is changed, drop the old dev
from lvmcache entirely and rescan the replacement dev.
Previously the VG metadata from the old dev was kept in
lvmcache and only the dev was replaced.
A new config setting devices/allow_changes_with_duplicate_pvs
can be set to 0 which disallows modifying a VG or activating
LVs in it when the VG contains PVs with duplicate devices.
Set to 1 is the old behavior which allowed the VG to be
changed.
The logic for which of two devs is preferred has changed.
The primary goal is to choose a device that is currently
in use if the other isn't, e.g. by an active LV.
. prefer dev with fs mounted if the other doesn't, else
. prefer dev that is dm if the other isn't, else
. prefer dev in subsystem if the other isn't
If neither device is preferred by these rules, then don't
change devices in lvmcache, leaving the one that was found
first.
The previous logic for preferring a device was:
. prefer dev in subsystem if the other isn't, else
. prefer dev without holders if the other has holders, else
. prefer dev that is dm if the other isn't
2016-02-09 22:06:27 +03:00
return_NULL ;
devl - > dev = dev ;
2015-05-06 22:49:44 +03:00
lvmcache: improve duplicate PV handling
Wait to compare and choose alternate duplicate devices until
after all devices are scanned. During scanning, the first
duplicate dev is kept in lvmcache, and others are kept in a
new list (_found_duplicate_devs).
After all devices are scanned, compare all the duplicates
available for a given PVID and decide which is best.
If the dev used in lvmcache is changed, drop the old dev
from lvmcache entirely and rescan the replacement dev.
Previously the VG metadata from the old dev was kept in
lvmcache and only the dev was replaced.
A new config setting devices/allow_changes_with_duplicate_pvs
can be set to 0 which disallows modifying a VG or activating
LVs in it when the VG contains PVs with duplicate devices.
Set to 1 is the old behavior which allowed the VG to be
changed.
The logic for which of two devs is preferred has changed.
The primary goal is to choose a device that is currently
in use if the other isn't, e.g. by an active LV.
. prefer dev with fs mounted if the other doesn't, else
. prefer dev that is dm if the other isn't, else
. prefer dev in subsystem if the other isn't
If neither device is preferred by these rules, then don't
change devices in lvmcache, leaving the one that was found
first.
The previous logic for preferring a device was:
. prefer dev in subsystem if the other isn't, else
. prefer dev without holders if the other has holders, else
. prefer dev that is dm if the other isn't
2016-02-09 22:06:27 +03:00
dm_list_add ( & _found_duplicate_devs , & devl - > list ) ;
2018-05-21 22:20:19 +03:00
_found_duplicate_pvs = 1 ;
lvmcache: improve duplicate PV handling
Wait to compare and choose alternate duplicate devices until
after all devices are scanned. During scanning, the first
duplicate dev is kept in lvmcache, and others are kept in a
new list (_found_duplicate_devs).
After all devices are scanned, compare all the duplicates
available for a given PVID and decide which is best.
If the dev used in lvmcache is changed, drop the old dev
from lvmcache entirely and rescan the replacement dev.
Previously the VG metadata from the old dev was kept in
lvmcache and only the dev was replaced.
A new config setting devices/allow_changes_with_duplicate_pvs
can be set to 0 which disallows modifying a VG or activating
LVs in it when the VG contains PVs with duplicate devices.
Set to 1 is the old behavior which allowed the VG to be
changed.
The logic for which of two devs is preferred has changed.
The primary goal is to choose a device that is currently
in use if the other isn't, e.g. by an active LV.
. prefer dev with fs mounted if the other doesn't, else
. prefer dev that is dm if the other isn't, else
. prefer dev in subsystem if the other isn't
If neither device is preferred by these rules, then don't
change devices in lvmcache, leaving the one that was found
first.
The previous logic for preferring a device was:
. prefer dev in subsystem if the other isn't, else
. prefer dev without holders if the other has holders, else
. prefer dev that is dm if the other isn't
2016-02-09 22:06:27 +03:00
return NULL ;
2003-01-07 00:10:43 +03:00
}
2015-04-24 22:58:58 +03:00
lvmcache: improve duplicate PV handling
Wait to compare and choose alternate duplicate devices until
after all devices are scanned. During scanning, the first
duplicate dev is kept in lvmcache, and others are kept in a
new list (_found_duplicate_devs).
After all devices are scanned, compare all the duplicates
available for a given PVID and decide which is best.
If the dev used in lvmcache is changed, drop the old dev
from lvmcache entirely and rescan the replacement dev.
Previously the VG metadata from the old dev was kept in
lvmcache and only the dev was replaced.
A new config setting devices/allow_changes_with_duplicate_pvs
can be set to 0 which disallows modifying a VG or activating
LVs in it when the VG contains PVs with duplicate devices.
Set to 1 is the old behavior which allowed the VG to be
changed.
The logic for which of two devs is preferred has changed.
The primary goal is to choose a device that is currently
in use if the other isn't, e.g. by an active LV.
. prefer dev with fs mounted if the other doesn't, else
. prefer dev that is dm if the other isn't, else
. prefer dev in subsystem if the other isn't
If neither device is preferred by these rules, then don't
change devices in lvmcache, leaving the one that was found
first.
The previous logic for preferring a device was:
. prefer dev in subsystem if the other isn't, else
. prefer dev without holders if the other has holders, else
. prefer dev that is dm if the other isn't
2016-02-09 22:06:27 +03:00
if ( info - > dev - > pvid [ 0 ] & & pvid [ 0 ] & & strcmp ( pvid_s , info - > dev - > pvid ) ) {
/* This happens when running pvcreate on an existing PV. */
log_verbose ( " Changing pvid on dev %s from %s to %s " ,
dev_name ( info - > dev ) , info - > dev - > pvid , pvid_s ) ;
2015-04-24 22:58:58 +03:00
}
2002-11-18 16:53:58 +03:00
if ( info - > label - > labeller ! = labeller ) {
lvmcache: improve duplicate PV handling
Wait to compare and choose alternate duplicate devices until
after all devices are scanned. During scanning, the first
duplicate dev is kept in lvmcache, and others are kept in a
new list (_found_duplicate_devs).
After all devices are scanned, compare all the duplicates
available for a given PVID and decide which is best.
If the dev used in lvmcache is changed, drop the old dev
from lvmcache entirely and rescan the replacement dev.
Previously the VG metadata from the old dev was kept in
lvmcache and only the dev was replaced.
A new config setting devices/allow_changes_with_duplicate_pvs
can be set to 0 which disallows modifying a VG or activating
LVs in it when the VG contains PVs with duplicate devices.
Set to 1 is the old behavior which allowed the VG to be
changed.
The logic for which of two devs is preferred has changed.
The primary goal is to choose a device that is currently
in use if the other isn't, e.g. by an active LV.
. prefer dev with fs mounted if the other doesn't, else
. prefer dev that is dm if the other isn't, else
. prefer dev in subsystem if the other isn't
If neither device is preferred by these rules, then don't
change devices in lvmcache, leaving the one that was found
first.
The previous logic for preferring a device was:
. prefer dev in subsystem if the other isn't, else
. prefer dev without holders if the other has holders, else
. prefer dev that is dm if the other isn't
2016-02-09 22:06:27 +03:00
log_verbose ( " Changing labeller on dev %s from %s to %s " ,
dev_name ( info - > dev ) ,
info - > label - > labeller - > fmt - > name ,
labeller - > fmt - > name ) ;
2002-11-18 16:53:58 +03:00
label_destroy ( info - > label ) ;
2008-01-30 16:19:47 +03:00
if ( ! ( info - > label = label_create ( labeller ) ) )
return_NULL ;
2002-11-18 16:53:58 +03:00
info - > label - > info = info ;
}
}
lvmcache: improve duplicate PV handling
Wait to compare and choose alternate duplicate devices until
after all devices are scanned. During scanning, the first
duplicate dev is kept in lvmcache, and others are kept in a
new list (_found_duplicate_devs).
After all devices are scanned, compare all the duplicates
available for a given PVID and decide which is best.
If the dev used in lvmcache is changed, drop the old dev
from lvmcache entirely and rescan the replacement dev.
Previously the VG metadata from the old dev was kept in
lvmcache and only the dev was replaced.
A new config setting devices/allow_changes_with_duplicate_pvs
can be set to 0 which disallows modifying a VG or activating
LVs in it when the VG contains PVs with duplicate devices.
Set to 1 is the old behavior which allowed the VG to be
changed.
The logic for which of two devs is preferred has changed.
The primary goal is to choose a device that is currently
in use if the other isn't, e.g. by an active LV.
. prefer dev with fs mounted if the other doesn't, else
. prefer dev that is dm if the other isn't, else
. prefer dev in subsystem if the other isn't
If neither device is preferred by these rules, then don't
change devices in lvmcache, leaving the one that was found
first.
The previous logic for preferring a device was:
. prefer dev in subsystem if the other isn't, else
. prefer dev without holders if the other has holders, else
. prefer dev that is dm if the other isn't
2016-02-09 22:06:27 +03:00
/*
* Add or update the _pvid_hash mapping , pvid to info .
*/
info_lookup = dm_hash_lookup ( _pvid_hash , pvid_s ) ;
if ( ( info_lookup = = info ) & & ! strcmp ( info - > dev - > pvid , pvid_s ) )
goto update_vginfo ;
if ( info - > dev - > pvid [ 0 ] )
dm_hash_remove ( _pvid_hash , info - > dev - > pvid ) ;
strncpy ( info - > dev - > pvid , pvid_s , sizeof ( info - > dev - > pvid ) ) ;
if ( ! dm_hash_insert ( _pvid_hash , pvid_s , info ) ) {
log_error ( " Adding pvid to hash failed %s " , pvid_s ) ;
2002-11-18 16:53:58 +03:00
return NULL ;
}
lvmcache: improve duplicate PV handling
Wait to compare and choose alternate duplicate devices until
after all devices are scanned. During scanning, the first
duplicate dev is kept in lvmcache, and others are kept in a
new list (_found_duplicate_devs).
After all devices are scanned, compare all the duplicates
available for a given PVID and decide which is best.
If the dev used in lvmcache is changed, drop the old dev
from lvmcache entirely and rescan the replacement dev.
Previously the VG metadata from the old dev was kept in
lvmcache and only the dev was replaced.
A new config setting devices/allow_changes_with_duplicate_pvs
can be set to 0 which disallows modifying a VG or activating
LVs in it when the VG contains PVs with duplicate devices.
Set to 1 is the old behavior which allowed the VG to be
changed.
The logic for which of two devs is preferred has changed.
The primary goal is to choose a device that is currently
in use if the other isn't, e.g. by an active LV.
. prefer dev with fs mounted if the other doesn't, else
. prefer dev that is dm if the other isn't, else
. prefer dev in subsystem if the other isn't
If neither device is preferred by these rules, then don't
change devices in lvmcache, leaving the one that was found
first.
The previous logic for preferring a device was:
. prefer dev in subsystem if the other isn't, else
. prefer dev without holders if the other has holders, else
. prefer dev that is dm if the other isn't
2016-02-09 22:06:27 +03:00
update_vginfo :
vgsummary . vgstatus = vgstatus ;
vgsummary . vgname = vgname ;
if ( vgid )
strncpy ( ( char * ) & vgsummary . vgid , vgid , sizeof ( vgsummary . vgid ) ) ;
2015-03-19 02:43:02 +03:00
if ( ! lvmcache_update_vgname_and_id ( info , & vgsummary ) ) {
lvmcache: improve duplicate PV handling
Wait to compare and choose alternate duplicate devices until
after all devices are scanned. During scanning, the first
duplicate dev is kept in lvmcache, and others are kept in a
new list (_found_duplicate_devs).
After all devices are scanned, compare all the duplicates
available for a given PVID and decide which is best.
If the dev used in lvmcache is changed, drop the old dev
from lvmcache entirely and rescan the replacement dev.
Previously the VG metadata from the old dev was kept in
lvmcache and only the dev was replaced.
A new config setting devices/allow_changes_with_duplicate_pvs
can be set to 0 which disallows modifying a VG or activating
LVs in it when the VG contains PVs with duplicate devices.
Set to 1 is the old behavior which allowed the VG to be
changed.
The logic for which of two devs is preferred has changed.
The primary goal is to choose a device that is currently
in use if the other isn't, e.g. by an active LV.
. prefer dev with fs mounted if the other doesn't, else
. prefer dev that is dm if the other isn't, else
. prefer dev in subsystem if the other isn't
If neither device is preferred by these rules, then don't
change devices in lvmcache, leaving the one that was found
first.
The previous logic for preferring a device was:
. prefer dev in subsystem if the other isn't, else
. prefer dev without holders if the other has holders, else
. prefer dev that is dm if the other isn't
2016-02-09 22:06:27 +03:00
if ( created ) {
2005-10-17 03:03:59 +04:00
dm_hash_remove ( _pvid_hash , pvid_s ) ;
2002-11-18 16:53:58 +03:00
strcpy ( info - > dev - > pvid , " " ) ;
2018-06-08 15:40:53 +03:00
free ( info - > label ) ;
free ( info ) ;
2002-11-18 16:53:58 +03:00
}
return NULL ;
}
return info ;
}
2003-07-05 02:34:56 +04:00
static void _lvmcache_destroy_entry ( struct lvmcache_info * info )
2002-11-18 16:53:58 +03:00
{
2008-05-19 23:49:56 +04:00
_vginfo_detach_info ( info ) ;
2013-07-19 17:32:49 +04:00
info - > dev - > pvid [ 0 ] = 0 ;
2002-11-18 16:53:58 +03:00
label_destroy ( info - > label ) ;
2018-06-08 15:40:53 +03:00
free ( info ) ;
2002-11-18 16:53:58 +03:00
}
2003-07-05 02:34:56 +04:00
static void _lvmcache_destroy_vgnamelist ( struct lvmcache_vginfo * vginfo )
2002-11-18 16:53:58 +03:00
{
2006-04-12 21:54:11 +04:00
struct lvmcache_vginfo * next ;
do {
next = vginfo - > next ;
2008-05-19 23:49:56 +04:00
if ( ! _free_vginfo ( vginfo ) )
stack ;
2006-04-12 21:54:11 +04:00
} while ( ( vginfo = next ) ) ;
2002-11-18 16:53:58 +03:00
}
2014-03-22 01:26:39 +04:00
void lvmcache_destroy ( struct cmd_context * cmd , int retain_orphans , int reset )
2002-11-18 16:53:58 +03:00
{
2018-04-06 21:05:17 +03:00
log_debug_cache ( " Dropping VG info " ) ;
2005-03-22 01:40:35 +03:00
2002-11-18 16:53:58 +03:00
if ( _vgid_hash ) {
2005-10-17 03:03:59 +04:00
dm_hash_destroy ( _vgid_hash ) ;
2002-11-18 16:53:58 +03:00
_vgid_hash = NULL ;
}
if ( _pvid_hash ) {
2005-10-17 03:03:59 +04:00
dm_hash_iter ( _pvid_hash , ( dm_hash_iterate_fn ) _lvmcache_destroy_entry ) ;
dm_hash_destroy ( _pvid_hash ) ;
2002-11-18 16:53:58 +03:00
_pvid_hash = NULL ;
}
if ( _vgname_hash ) {
2005-10-17 03:03:59 +04:00
dm_hash_iter ( _vgname_hash ,
( dm_hash_iterate_fn ) _lvmcache_destroy_vgnamelist ) ;
dm_hash_destroy ( _vgname_hash ) ;
2002-11-18 16:53:58 +03:00
_vgname_hash = NULL ;
}
2003-07-05 02:34:56 +04:00
2008-11-04 01:14:30 +03:00
if ( ! dm_list_empty ( & _vginfos ) )
2009-12-16 22:22:11 +03:00
log_error ( INTERNAL_ERROR " _vginfos list should be empty " ) ;
2008-11-04 01:14:30 +03:00
dm_list_init ( & _vginfos ) ;
2008-04-08 16:49:21 +04:00
2016-06-06 23:20:55 +03:00
/*
2019-06-07 18:12:52 +03:00
* Move the current _unused_duplicate_devs to _prev_unused_duplicate_devs
* before destroying _unused_duplicate_devs .
2016-06-06 23:20:55 +03:00
*
* One command can init / populate / destroy lvmcache multiple times . Each
* time it will encounter duplicates and choose the preferrred devs .
* We want the same preferred devices to be chosen each time , so save
* the unpreferred devs here so that _choose_preferred_devs can use
* this to make the same choice each time .
*/
2019-06-07 18:12:52 +03:00
_destroy_duplicate_device_list ( & _prev_unused_duplicate_devs ) ;
dm_list_splice ( & _prev_unused_duplicate_devs , & _unused_duplicate_devs ) ;
2016-06-06 23:20:55 +03:00
_destroy_duplicate_device_list ( & _unused_duplicate_devs ) ;
_destroy_duplicate_device_list ( & _found_duplicate_devs ) ; /* should be empty anyway */
_found_duplicate_pvs = 0 ;
2018-05-03 00:58:49 +03:00
if ( retain_orphans ) {
struct format_type * fmt ;
2018-11-03 00:19:26 +03:00
if ( ! lvmcache_init ( cmd ) )
stack ;
2018-05-03 00:58:49 +03:00
dm_list_iterate_items ( fmt , & cmd - > formats ) {
if ( ! lvmcache_add_orphan_vginfo ( fmt - > orphan_vg_name , fmt ) )
stack ;
}
}
2002-11-18 16:53:58 +03:00
}
2012-02-10 05:28:27 +04:00
int lvmcache_fid_add_mdas ( struct lvmcache_info * info , struct format_instance * fid ,
const char * id , int id_len )
{
return fid_add_mdas ( fid , & info - > mdas , id , id_len ) ;
}
int lvmcache_fid_add_mdas_pv ( struct lvmcache_info * info , struct format_instance * fid )
{
return lvmcache_fid_add_mdas ( info , fid , info - > dev - > pvid , ID_LEN ) ;
}
int lvmcache_fid_add_mdas_vg ( struct lvmcache_vginfo * vginfo , struct format_instance * fid )
{
struct lvmcache_info * info ;
dm_list_iterate_items ( info , & vginfo - > infos ) {
if ( ! lvmcache_fid_add_mdas_pv ( info , fid ) )
return_0 ;
}
return 1 ;
}
int lvmcache_populate_pv_fields ( struct lvmcache_info * info ,
2017-11-06 21:09:52 +03:00
struct volume_group * vg ,
struct physical_volume * pv )
2012-02-10 05:28:27 +04:00
{
struct data_area_list * da ;
2017-11-06 21:09:52 +03:00
if ( ! info - > label ) {
log_error ( " No cached label for orphan PV %s " , pv_dev_name ( pv ) ) ;
return 0 ;
2012-02-10 05:28:27 +04:00
}
2017-11-06 21:09:52 +03:00
pv - > label_sector = info - > label - > sector ;
2012-02-10 05:28:27 +04:00
pv - > dev = info - > dev ;
pv - > fmt = info - > fmt ;
pv - > size = info - > device_size > > SECTOR_SHIFT ;
pv - > vg_name = FMT_TEXT_ORPHAN_VG_NAME ;
memcpy ( & pv - > id , & info - > dev - > pvid , sizeof ( pv - > id ) ) ;
2017-11-06 21:09:52 +03:00
if ( ! pv - > size ) {
log_error ( " PV %s size is zero. " , dev_name ( info - > dev ) ) ;
return 0 ;
}
2012-02-10 05:28:27 +04:00
/* Currently only support exactly one data area */
if ( dm_list_size ( & info - > das ) ! = 1 ) {
log_error ( " Must be exactly one data area (found %d) on PV %s " ,
dm_list_size ( & info - > das ) , dev_name ( info - > dev ) ) ;
return 0 ;
}
2013-05-28 14:37:22 +04:00
/* Currently only support one bootloader area at most */
if ( dm_list_size ( & info - > bas ) > 1 ) {
log_error ( " Must be at most one bootloader area (found %d) on PV %s " ,
dm_list_size ( & info - > bas ) , dev_name ( info - > dev ) ) ;
2013-02-14 19:04:35 +04:00
return 0 ;
}
2012-02-10 05:28:27 +04:00
dm_list_iterate_items ( da , & info - > das )
pv - > pe_start = da - > disk_locn . offset > > SECTOR_SHIFT ;
2013-05-28 14:37:22 +04:00
dm_list_iterate_items ( da , & info - > bas ) {
pv - > ba_start = da - > disk_locn . offset > > SECTOR_SHIFT ;
pv - > ba_size = da - > disk_locn . size > > SECTOR_SHIFT ;
2013-02-14 19:04:35 +04:00
}
2012-02-10 05:28:27 +04:00
return 1 ;
}
int lvmcache_check_format ( struct lvmcache_info * info , const struct format_type * fmt )
{
if ( info - > fmt ! = fmt ) {
log_error ( " PV %s is a different format (seqno %s) " ,
dev_name ( info - > dev ) , info - > fmt - > name ) ;
return 0 ;
}
return 1 ;
}
void lvmcache_del_mdas ( struct lvmcache_info * info )
{
if ( info - > mdas . n )
del_mdas ( & info - > mdas ) ;
dm_list_init ( & info - > mdas ) ;
2019-02-05 21:39:08 +03:00
if ( info - > bad_mdas . n )
del_mdas ( & info - > bad_mdas ) ;
dm_list_init ( & info - > bad_mdas ) ;
2012-02-10 05:28:27 +04:00
}
void lvmcache_del_das ( struct lvmcache_info * info )
{
if ( info - > das . n )
del_das ( & info - > das ) ;
dm_list_init ( & info - > das ) ;
}
2013-05-28 14:37:22 +04:00
void lvmcache_del_bas ( struct lvmcache_info * info )
2013-02-14 18:35:57 +04:00
{
2013-05-28 14:37:22 +04:00
if ( info - > bas . n )
del_bas ( & info - > bas ) ;
dm_list_init ( & info - > bas ) ;
2013-02-14 18:35:57 +04:00
}
2012-02-10 05:28:27 +04:00
int lvmcache_add_mda ( struct lvmcache_info * info , struct device * dev ,
uint64_t start , uint64_t size , unsigned ignored )
{
return add_mda ( info - > fmt , NULL , & info - > mdas , dev , start , size , ignored ) ;
}
int lvmcache_add_da ( struct lvmcache_info * info , uint64_t start , uint64_t size )
{
return add_da ( NULL , & info - > das , start , size ) ;
}
2013-05-28 14:37:22 +04:00
int lvmcache_add_ba ( struct lvmcache_info * info , uint64_t start , uint64_t size )
2013-02-14 18:35:57 +04:00
{
2013-05-28 14:37:22 +04:00
return add_ba ( NULL , & info - > bas , start , size ) ;
2013-02-14 18:35:57 +04:00
}
2012-02-10 05:28:27 +04:00
void lvmcache_update_pv ( struct lvmcache_info * info , struct physical_volume * pv ,
const struct format_type * fmt )
{
info - > device_size = pv - > size < < SECTOR_SHIFT ;
info - > fmt = fmt ;
}
int lvmcache_update_das ( struct lvmcache_info * info , struct physical_volume * pv )
{
struct data_area_list * da ;
if ( info - > das . n ) {
if ( ! pv - > pe_start )
dm_list_iterate_items ( da , & info - > das )
pv - > pe_start = da - > disk_locn . offset > > SECTOR_SHIFT ;
del_das ( & info - > das ) ;
} else
dm_list_init ( & info - > das ) ;
2012-02-23 17:11:07 +04:00
if ( ! add_da ( NULL , & info - > das , pv - > pe_start < < SECTOR_SHIFT , 0 /*pv->size << SECTOR_SHIFT*/ ) )
2012-02-10 05:28:27 +04:00
return_0 ;
return 1 ;
}
2013-05-28 14:37:22 +04:00
int lvmcache_update_bas ( struct lvmcache_info * info , struct physical_volume * pv )
2013-02-14 18:35:57 +04:00
{
2013-05-28 14:37:22 +04:00
struct data_area_list * ba ;
if ( info - > bas . n ) {
if ( ! pv - > ba_start & & ! pv - > ba_size )
dm_list_iterate_items ( ba , & info - > bas ) {
pv - > ba_start = ba - > disk_locn . offset > > SECTOR_SHIFT ;
pv - > ba_size = ba - > disk_locn . size > > SECTOR_SHIFT ;
2013-02-14 18:35:57 +04:00
}
2013-05-28 14:37:22 +04:00
del_das ( & info - > bas ) ;
2013-02-14 18:35:57 +04:00
} else
2013-05-28 14:37:22 +04:00
dm_list_init ( & info - > bas ) ;
2013-02-14 18:35:57 +04:00
2013-05-28 14:37:22 +04:00
if ( ! add_ba ( NULL , & info - > bas , pv - > ba_start < < SECTOR_SHIFT , pv - > ba_size < < SECTOR_SHIFT ) )
2013-02-14 18:35:57 +04:00
return_0 ;
return 1 ;
}
2012-02-10 05:28:27 +04:00
int lvmcache_foreach_pv ( struct lvmcache_vginfo * vginfo ,
int ( * fun ) ( struct lvmcache_info * , void * ) ,
void * baton )
{
struct lvmcache_info * info ;
dm_list_iterate_items ( info , & vginfo - > infos ) {
if ( ! fun ( info , baton ) )
return_0 ;
}
return 1 ;
}
int lvmcache_foreach_mda ( struct lvmcache_info * info ,
int ( * fun ) ( struct metadata_area * , void * ) ,
void * baton )
{
struct metadata_area * mda ;
dm_list_iterate_items ( mda , & info - > mdas ) {
if ( ! fun ( mda , baton ) )
return_0 ;
}
return 1 ;
}
2013-02-05 19:43:16 +04:00
unsigned lvmcache_mda_count ( struct lvmcache_info * info )
2012-02-10 05:28:27 +04:00
{
return dm_list_size ( & info - > mdas ) ;
}
int lvmcache_foreach_da ( struct lvmcache_info * info ,
2012-02-23 17:11:07 +04:00
int ( * fun ) ( struct disk_locn * , void * ) ,
2012-02-10 05:28:27 +04:00
void * baton )
{
struct data_area_list * da ;
dm_list_iterate_items ( da , & info - > das ) {
2012-02-23 17:11:07 +04:00
if ( ! fun ( & da - > disk_locn , baton ) )
2012-02-10 05:28:27 +04:00
return_0 ;
}
return 1 ;
}
2013-05-28 14:37:22 +04:00
int lvmcache_foreach_ba ( struct lvmcache_info * info ,
2013-02-14 18:35:57 +04:00
int ( * fun ) ( struct disk_locn * , void * ) ,
void * baton )
{
2013-05-28 14:37:22 +04:00
struct data_area_list * ba ;
dm_list_iterate_items ( ba , & info - > bas ) {
if ( ! fun ( & ba - > disk_locn , baton ) )
2013-02-14 18:35:57 +04:00
return_0 ;
}
return 1 ;
}
2018-02-09 21:43:12 +03:00
struct label * lvmcache_get_dev_label ( struct device * dev )
{
struct lvmcache_info * info ;
if ( ( info = lvmcache_info_from_pvid ( dev - > pvid , NULL , 0 ) ) ) {
/* dev would be different for a duplicate */
if ( info - > dev = = dev )
return info - > label ;
}
return NULL ;
}
int lvmcache_has_dev_info ( struct device * dev )
{
if ( lvmcache_info_from_pvid ( dev - > pvid , NULL , 0 ) )
return 1 ;
return 0 ;
}
2012-02-10 05:28:27 +04:00
/*
* The lifetime of the label returned is tied to the lifetime of the
* lvmcache_info which is the same as lvmcache itself .
*/
struct label * lvmcache_get_label ( struct lvmcache_info * info ) {
return info - > label ;
}
uint64_t lvmcache_device_size ( struct lvmcache_info * info ) {
return info - > device_size ;
}
void lvmcache_set_device_size ( struct lvmcache_info * info , uint64_t size ) {
info - > device_size = size ;
}
struct device * lvmcache_device ( struct lvmcache_info * info ) {
return info - > dev ;
}
2016-02-11 18:25:36 +03:00
void lvmcache_set_ext_version ( struct lvmcache_info * info , uint32_t version )
{
info - > ext_version = version ;
}
uint32_t lvmcache_ext_version ( struct lvmcache_info * info ) {
return info - > ext_version ;
}
2012-02-10 05:28:27 +04:00
2015-03-09 14:52:07 +03:00
void lvmcache_set_ext_flags ( struct lvmcache_info * info , uint32_t flags ) {
info - > ext_flags = flags ;
}
uint32_t lvmcache_ext_flags ( struct lvmcache_info * info ) {
return info - > ext_flags ;
}
2012-02-10 05:28:27 +04:00
int lvmcache_is_orphan ( struct lvmcache_info * info ) {
if ( ! info - > vginfo )
return 1 ; /* FIXME? */
return is_orphan_vg ( info - > vginfo - > vgname ) ;
}
int lvmcache_vgid_is_cached ( const char * vgid ) {
struct lvmcache_vginfo * vginfo ;
2012-02-23 17:11:07 +04:00
2012-02-10 05:28:27 +04:00
vginfo = lvmcache_vginfo_from_vgid ( vgid ) ;
if ( ! vginfo | | ! vginfo - > vgname )
return 0 ;
if ( is_orphan_vg ( vginfo - > vgname ) )
return 0 ;
return 1 ;
}
2013-02-05 19:43:16 +04:00
uint64_t lvmcache_smallest_mda_size ( struct lvmcache_info * info )
2012-02-10 05:28:27 +04:00
{
2014-01-14 07:17:27 +04:00
if ( ! info )
return UINT64_C ( 0 ) ;
2012-02-10 05:28:27 +04:00
return find_min_mda_size ( & info - > mdas ) ;
}
const struct format_type * lvmcache_fmt ( struct lvmcache_info * info ) {
return info - > fmt ;
}
2015-03-19 02:43:02 +03:00
int lvmcache_lookup_mda ( struct lvmcache_vgsummary * vgsummary )
{
struct lvmcache_vginfo * vginfo ;
if ( ! vgsummary - > mda_size )
return 0 ;
/* FIXME Index the checksums */
dm_list_iterate_items ( vginfo , & _vginfos ) {
if ( vgsummary - > mda_checksum = = vginfo - > mda_checksum & &
vgsummary - > mda_size = = vginfo - > mda_size & &
! is_orphan_vg ( vginfo - > vgname ) ) {
vgsummary - > vgname = vginfo - > vgname ;
vgsummary - > creation_host = vginfo - > creation_host ;
vgsummary - > vgstatus = vginfo - > status ;
2018-04-19 00:29:42 +03:00
vgsummary - > seqno = vginfo - > seqno ;
2015-05-07 12:28:22 +03:00
/* vginfo->vgid has 1 extra byte then vgsummary->vgid */
memcpy ( & vgsummary - > vgid , vginfo - > vgid , sizeof ( vgsummary - > vgid ) ) ;
2015-03-19 02:43:02 +03:00
return 1 ;
}
}
return 0 ;
}
2015-07-24 18:06:58 +03:00
int lvmcache_contains_lock_type_sanlock ( struct cmd_context * cmd )
{
struct lvmcache_vginfo * vginfo ;
dm_list_iterate_items ( vginfo , & _vginfos ) {
if ( vginfo - > lock_type & & ! strcmp ( vginfo - > lock_type , " sanlock " ) )
return 1 ;
}
return 0 ;
}
2015-10-19 21:58:43 +03:00
void lvmcache_get_max_name_lengths ( struct cmd_context * cmd ,
unsigned * pv_max_name_len ,
unsigned * vg_max_name_len )
{
struct lvmcache_vginfo * vginfo ;
struct lvmcache_info * info ;
unsigned len ;
* vg_max_name_len = 0 ;
* pv_max_name_len = 0 ;
dm_list_iterate_items ( vginfo , & _vginfos ) {
len = strlen ( vginfo - > vgname ) ;
if ( * vg_max_name_len < len )
* vg_max_name_len = len ;
dm_list_iterate_items ( info , & vginfo - > infos ) {
len = strlen ( dev_name ( info - > dev ) ) ;
if ( * pv_max_name_len < len )
* pv_max_name_len = len ;
}
}
}
2015-11-30 20:54:56 +03:00
int lvmcache_vg_is_foreign ( struct cmd_context * cmd , const char * vgname , const char * vgid )
{
struct lvmcache_vginfo * vginfo ;
int ret = 0 ;
if ( ( vginfo = lvmcache_vginfo_from_vgid ( vgid ) ) )
ret = ! is_system_id_allowed ( cmd , vginfo - > system_id ) ;
return ret ;
}
2018-04-19 00:29:42 +03:00
/*
* Example of reading four devs in sequence from the same VG :
*
* dev1 :
* lvmcache : creates vginfo with initial values
*
* dev2 : all checksums match .
* mda_header checksum matches vginfo from dev1
* metadata checksum matches vginfo from dev1
* metadata is not parsed , and the vgsummary values copied
* from lvmcache from dev1 and passed back to lvmcache for dev2 .
* lvmcache : attach info for dev2 to existing vginfo
*
* dev3 : mda_header and metadata have unmatching checksums .
* mda_header checksum matches vginfo from dev1
* metadata checksum doesn ' t match vginfo from dev1
* produces read error in config . c
* lvmcache : info for dev3 is deleted , FIXME : use a defective state
*
* dev4 : mda_header and metadata have matching checksums , but
* does not match checksum in lvmcache from prev dev .
* mda_header checksum doesn ' t match vginfo from dev1
* lvmcache_lookup_mda returns 0 , no vgname , no checksum_only
* lvmcache : update_vgname_and_id sees checksum from dev4 does not
* match vginfo from dev1 , so vginfo - > scan_summary_mismatch is set .
* attach info for dev4 to existing vginfo
*
* dev5 : config parsing error .
* lvmcache : info for dev5 is deleted , FIXME : use a defective state
*/
2019-02-05 22:09:56 +03:00
bool lvmcache_scan_mismatch ( struct cmd_context * cmd , const char * vgname , const char * vgid )
2018-04-19 00:29:42 +03:00
{
struct lvmcache_vginfo * vginfo ;
if ( ! vgname | | ! vgid )
2019-02-05 22:09:56 +03:00
return true ;
2018-04-19 00:29:42 +03:00
if ( ( vginfo = lvmcache_vginfo_from_vgid ( vgid ) ) )
return vginfo - > scan_summary_mismatch ;
2019-02-05 22:09:56 +03:00
return true ;
2018-04-19 00:29:42 +03:00
}
2019-03-04 21:13:09 +03:00
static uint64_t _max_metadata_size ;
void lvmcache_save_metadata_size ( uint64_t val )
{
if ( ! _max_metadata_size )
_max_metadata_size = val ;
else if ( _max_metadata_size < val )
_max_metadata_size = val ;
}
uint64_t lvmcache_max_metadata_size ( void )
{
return _max_metadata_size ;
}
2018-12-07 23:35:22 +03:00
int lvmcache_vginfo_has_pvid ( struct lvmcache_vginfo * vginfo , char * pvid )
{
struct lvmcache_info * info ;
dm_list_iterate_items ( info , & vginfo - > infos ) {
if ( ! strcmp ( info - > dev - > pvid , pvid ) )
return 1 ;
}
return 0 ;
}
2019-05-22 22:25:08 +03:00
struct metadata_area * lvmcache_get_mda ( struct cmd_context * cmd ,
const char * vgname ,
struct device * dev ,
int use_mda_num )
{
struct lvmcache_vginfo * vginfo ;
struct lvmcache_info * info ;
struct metadata_area * mda ;
if ( ! use_mda_num )
use_mda_num = 1 ;
if ( ! ( vginfo = lvmcache_vginfo_from_vgname ( vgname , NULL ) ) )
return NULL ;
dm_list_iterate_items ( info , & vginfo - > infos ) {
if ( info - > dev ! = dev )
continue ;
dm_list_iterate_items ( mda , & info - > mdas ) {
if ( ( use_mda_num = = 1 ) & & ( mda - > status & MDA_PRIMARY ) )
return mda ;
if ( ( use_mda_num = = 2 ) & & ! ( mda - > status & MDA_PRIMARY ) )
return mda ;
}
return NULL ;
}
return NULL ;
}
2019-02-05 22:09:56 +03:00
/*
* This is used by the metadata repair command to check if
* the metadata on a dev needs repair because it ' s old .
*/
bool lvmcache_has_old_metadata ( struct cmd_context * cmd , const char * vgname , const char * vgid , struct device * dev )
{
struct lvmcache_vginfo * vginfo ;
struct lvmcache_info * info ;
/* shouldn't happen */
if ( ! vgname | | ! vgid )
return false ;
/* shouldn't happen */
if ( ! ( vginfo = lvmcache_vginfo_from_vgid ( vgid ) ) )
return false ;
/* shouldn't happen */
if ( ! ( info = lvmcache_info_from_pvid ( dev - > pvid , NULL , 0 ) ) )
return false ;
/* writing to a new PV */
if ( ! info - > summary_seqno )
return false ;
/* on same dev, one mda has newer metadata than the other */
if ( info - > summary_seqno_mismatch )
return true ;
/* one or both mdas on this dev has older metadata than another dev */
if ( vginfo - > seqno > info - > summary_seqno )
return true ;
return false ;
}
2019-02-05 21:55:51 +03:00
void lvmcache_get_outdated_devs ( struct cmd_context * cmd ,
const char * vgname , const char * vgid ,
struct dm_list * devs )
{
struct lvmcache_vginfo * vginfo ;
struct lvmcache_info * info ;
struct device_list * devl ;
if ( ! ( vginfo = lvmcache_vginfo_from_vgname ( vgname , vgid ) ) ) {
log_error ( INTERNAL_ERROR " lvmcache_get_outdated_devs no vginfo %s " , vgname ) ;
return ;
}
dm_list_iterate_items ( info , & vginfo - > outdated_infos ) {
if ( ! ( devl = zalloc ( sizeof ( * devl ) ) ) )
return ;
devl - > dev = info - > dev ;
dm_list_add ( devs , & devl - > list ) ;
}
}
void lvmcache_del_outdated_devs ( struct cmd_context * cmd ,
const char * vgname , const char * vgid )
{
struct lvmcache_vginfo * vginfo ;
struct lvmcache_info * info , * info2 ;
if ( ! ( vginfo = lvmcache_vginfo_from_vgname ( vgname , vgid ) ) ) {
log_error ( INTERNAL_ERROR " lvmcache_get_outdated_devs no vginfo " ) ;
return ;
}
dm_list_iterate_items_safe ( info , info2 , & vginfo - > outdated_infos )
lvmcache_del ( info ) ;
}
void lvmcache_get_outdated_mdas ( struct cmd_context * cmd ,
const char * vgname , const char * vgid ,
struct device * dev ,
struct dm_list * * mdas )
{
struct lvmcache_vginfo * vginfo ;
struct lvmcache_info * info ;
* mdas = NULL ;
if ( ! ( vginfo = lvmcache_vginfo_from_vgname ( vgname , vgid ) ) ) {
log_error ( INTERNAL_ERROR " lvmcache_get_outdated_mdas no vginfo " ) ;
return ;
}
dm_list_iterate_items ( info , & vginfo - > outdated_infos ) {
if ( info - > dev ! = dev )
continue ;
* mdas = & info - > mdas ;
return ;
}
}
bool lvmcache_is_outdated_dev ( struct cmd_context * cmd ,
const char * vgname , const char * vgid ,
struct device * dev )
{
struct lvmcache_vginfo * vginfo ;
struct lvmcache_info * info ;
if ( ! ( vginfo = lvmcache_vginfo_from_vgname ( vgname , vgid ) ) ) {
log_error ( INTERNAL_ERROR " lvmcache_get_outdated_mdas no vginfo " ) ;
return false ;
}
dm_list_iterate_items ( info , & vginfo - > outdated_infos ) {
if ( info - > dev = = dev )
return true ;
}
return false ;
}