2002-11-18 16:53:58 +03:00
/*
2004-03-30 23:35:44 +04:00
* Copyright ( C ) 2001 - 2004 Sistina Software , Inc . All rights reserved .
2008-03-17 19:51:31 +03:00
* Copyright ( C ) 2004 - 2008 Red Hat , Inc . All rights reserved .
2002-11-18 16:53:58 +03:00
*
2004-03-30 23:35:44 +04:00
* This file is part of LVM2 .
*
* This copyrighted material is made available to anyone wishing to use ,
* modify , copy , or redistribute it subject to the terms and conditions
2007-08-21 00:55:30 +04:00
* of the GNU Lesser General Public License v .2 .1 .
2004-03-30 23:35:44 +04:00
*
2007-08-21 00:55:30 +04:00
* You should have received a copy of the GNU Lesser General Public License
2004-03-30 23:35:44 +04:00
* along with this program ; if not , write to the Free Software Foundation ,
2016-01-21 13:49:46 +03:00
* Inc . , 51 Franklin Street , Fifth Floor , Boston , MA 02110 - 1301 USA
2002-11-18 16:53:58 +03:00
*/
# ifndef _LVM_CACHE_H
# define _LVM_CACHE_H
2018-05-14 12:30:20 +03:00
# include "lib/device/dev-cache.h"
# include "lib/device/dev-type.h"
# include "lib/uuid/uuid.h"
# include "lib/label/label.h"
# include "lib/locking/locking.h"
2002-11-18 16:53:58 +03:00
2010-05-19 06:36:33 +04:00
# define ORPHAN_PREFIX VG_ORPHANS
# define ORPHAN_VG_NAME(fmt) ORPHAN_PREFIX "_" fmt
2002-11-18 16:53:58 +03:00
/* LVM specific per-volume info */
/* Eventual replacement for struct physical_volume perhaps? */
2004-05-05 01:25:57 +04:00
struct cmd_context ;
struct format_type ;
struct volume_group ;
2012-02-10 05:28:27 +04:00
struct physical_volume ;
2011-08-30 18:55:15 +04:00
struct dm_config_tree ;
2012-02-10 05:28:27 +04:00
struct format_instance ;
struct metadata_area ;
2012-02-23 17:11:07 +04:00
struct disk_locn ;
2004-05-05 01:25:57 +04:00
2012-02-10 05:28:27 +04:00
struct lvmcache_vginfo ;
2002-11-18 16:53:58 +03:00
2015-07-24 23:20:37 +03:00
/*
* vgsummary represents a summary of the VG that is read
* without a lock . The info does not come through vg_read ( ) ,
* but through reading mdas . It provides information about
* the VG that is needed to lock the VG and then read it fully
* with vg_read ( ) , after which the VG summary should be checked
* against the full VG metadata to verify it was correct ( since
* it was read without a lock . )
*
* Once read , vgsummary information is saved in lvmcache_vginfo .
*/
2015-03-19 02:43:02 +03:00
struct lvmcache_vgsummary {
const char * vgname ;
struct id vgid ;
uint64_t vgstatus ;
char * creation_host ;
2015-11-30 20:32:17 +03:00
const char * system_id ;
2015-07-24 23:20:37 +03:00
const char * lock_type ;
2019-02-05 22:09:56 +03:00
uint32_t seqno ;
2015-03-19 02:43:02 +03:00
uint32_t mda_checksum ;
size_t mda_size ;
2019-02-05 22:09:56 +03:00
int mda_num ; /* 1 = summary from mda1, 2 = summary from mda2 */
unsigned mda_ignored : 1 ;
unsigned zero_offset : 1 ;
2015-03-19 02:43:02 +03:00
} ;
2018-05-03 00:58:49 +03:00
int lvmcache_init ( struct cmd_context * cmd ) ;
2012-02-23 17:11:07 +04:00
2014-03-22 01:26:39 +04:00
void lvmcache_destroy ( struct cmd_context * cmd , int retain_orphans , int reset ) ;
2002-11-18 16:53:58 +03:00
2015-12-01 23:09:01 +03:00
int lvmcache_label_scan ( struct cmd_context * cmd ) ;
2018-02-07 22:14:08 +03:00
int lvmcache_label_rescan_vg ( struct cmd_context * cmd , const char * vgname , const char * vgid ) ;
2019-06-12 00:17:24 +03:00
int lvmcache_label_rescan_vg_rw ( struct cmd_context * cmd , const char * vgname , const char * vgid ) ;
2002-11-18 16:53:58 +03:00
/* Add/delete a device */
2003-07-05 02:34:56 +04:00
struct lvmcache_info * lvmcache_add ( struct labeller * labeller , const char * pvid ,
2019-02-05 22:40:34 +03:00
struct device * dev , uint64_t label_sector ,
const char * vgname , const char * vgid ,
uint32_t vgstatus , int * is_duplicate ) ;
2008-04-08 16:49:21 +04:00
int lvmcache_add_orphan_vginfo ( const char * vgname , struct format_type * fmt ) ;
2003-07-05 02:34:56 +04:00
void lvmcache_del ( struct lvmcache_info * info ) ;
2018-03-01 19:20:34 +03:00
void lvmcache_del_dev ( struct device * dev ) ;
2002-11-18 16:53:58 +03:00
/* Update things */
2006-04-11 21:42:15 +04:00
int lvmcache_update_vgname_and_id ( struct lvmcache_info * info ,
2018-04-20 18:43:50 +03:00
struct lvmcache_vgsummary * vgsummary ) ;
2019-02-06 21:10:13 +03:00
int lvmcache_update_vg_from_read ( struct volume_group * vg , unsigned precommitted ) ;
int lvmcache_update_vg_from_write ( struct volume_group * vg ) ;
2003-07-05 02:34:56 +04:00
void lvmcache_lock_vgname ( const char * vgname , int read_only ) ;
void lvmcache_unlock_vgname ( const char * vgname ) ;
2002-11-18 16:53:58 +03:00
/* Queries */
2015-03-19 02:43:02 +03:00
int lvmcache_lookup_mda ( struct lvmcache_vgsummary * vgsummary ) ;
2012-02-10 05:28:27 +04:00
struct lvmcache_vginfo * lvmcache_vginfo_from_vgname ( const char * vgname ,
2006-04-12 21:54:11 +04:00
const char * vgid ) ;
2012-02-10 05:28:27 +04:00
struct lvmcache_vginfo * lvmcache_vginfo_from_vgid ( const char * vgid ) ;
2016-06-06 22:04:17 +03:00
struct lvmcache_info * lvmcache_info_from_pvid ( const char * pvid , struct device * dev , int valid_only ) ;
2012-02-10 05:28:27 +04:00
const char * lvmcache_vgname_from_vgid ( struct dm_pool * mem , const char * vgid ) ;
vg_read: look up vgid from name
After recent changes to process_each, vg_read() is usually
given both the vgname and vgid for the intended VG.
However, in some cases vg_read() is given a vgid with
no vgname, or is given a vgname with no vgid.
When given a vgid with no vgname, vg_read() uses lvmcache
to look up the vgname using the vgid. If the vgname is
not found, vg_read() fails.
When given a vgname with no vgid, vg_read() should also
use lvmcache to look up the vgid using the vgname.
If the vgid is not found, vg_read() fails.
If the lvmcache lookup finds multiple vgids for the
vgname, then the lookup fails, causing vg_read() to fail
because the intended VG is uncertain.
Usually, both vgname and vgid for the intended VG are passed
to vg_read(), which means the lvmcache translations
between vgname and vgid are not done.
2015-12-01 00:12:01 +03:00
const char * lvmcache_vgid_from_vgname ( struct cmd_context * cmd , const char * vgname ) ;
2018-02-07 22:14:08 +03:00
struct device * lvmcache_device_from_pvid ( struct cmd_context * cmd , const struct id * pvid , uint64_t * label_sector ) ;
2012-02-23 17:11:07 +04:00
const char * lvmcache_vgname_from_info ( struct lvmcache_info * info ) ;
lvmcache: process duplicate PVs directly
Previously, duplicate PVs were processed as a side effect
of processing the "chosen" PV in lvmcache. The duplicate
PV would be hacked into lvmcache temporarily in place of
the chosen PV.
In the old way, we had to always process the "chosen" PV
device, even if a duplicate of it was named on the command
line. This meant we were processing a different device than
was asked for. This could be worked around by naming
multiple duplicate devs on the command line in which case
they were swapped in and out of lvmcache for processing.
Now, the duplicate devs are processed directly in their
own processing loop. This means we can remove the old
hacks related to processing dups as a side effect of
processing the chosen device. We can now simply process
the device that was named on the command line.
When the same PVID exists on two or more devices, one device
is preferred and used in the VG, and the others are duplicates
and are not used in the VG. The preferred device exists in
lvmcache as usual. The duplicates exist in a specical list
of unused duplicate devices.
The duplicate devs have the "d" attribute and the "duplicate"
reporting field displays "duplicate" for them.
'pvs' warns about duplicates, but the formal output only
includes the single preferred PV.
'pvs -a' has the same warnings, and the duplicate devs are
included in the output.
'pvs <path>' has the same warnings, and displays the named
device, whether it is preferred or a duplicate.
2016-02-11 21:37:36 +03:00
const struct format_type * lvmcache_fmt_from_info ( struct lvmcache_info * info ) ;
2002-11-18 16:53:58 +03:00
2019-06-11 22:09:13 +03:00
int lvmcache_get_vgnameids ( struct cmd_context * cmd ,
struct dm_list * vgnameids ,
const char * only_this_vgname ,
int include_internal ) ;
2015-05-06 00:24:50 +03:00
2010-01-05 19:06:42 +03:00
void lvmcache_drop_metadata ( const char * vgname , int drop_precommitted ) ;
void lvmcache_commit_metadata ( const char * vgname ) ;
2008-04-02 02:40:13 +04:00
2012-02-10 05:28:27 +04:00
int lvmcache_fid_add_mdas ( struct lvmcache_info * info , struct format_instance * fid ,
const char * id , int id_len ) ;
int lvmcache_fid_add_mdas_pv ( struct lvmcache_info * info , struct format_instance * fid ) ;
int lvmcache_fid_add_mdas_vg ( struct lvmcache_vginfo * vginfo , struct format_instance * fid ) ;
int lvmcache_populate_pv_fields ( struct lvmcache_info * info ,
2017-11-06 21:09:52 +03:00
struct volume_group * vg ,
struct physical_volume * pv ) ;
2012-02-10 05:28:27 +04:00
int lvmcache_check_format ( struct lvmcache_info * info , const struct format_type * fmt ) ;
void lvmcache_del_mdas ( struct lvmcache_info * info ) ;
void lvmcache_del_das ( struct lvmcache_info * info ) ;
2013-05-28 14:37:22 +04:00
void lvmcache_del_bas ( struct lvmcache_info * info ) ;
2012-02-10 05:28:27 +04:00
int lvmcache_add_mda ( struct lvmcache_info * info , struct device * dev ,
2019-02-05 22:24:23 +03:00
uint64_t start , uint64_t size , unsigned ignored ,
struct metadata_area * * mda_new ) ;
2012-02-10 05:28:27 +04:00
int lvmcache_add_da ( struct lvmcache_info * info , uint64_t start , uint64_t size ) ;
2013-05-28 14:37:22 +04:00
int lvmcache_add_ba ( struct lvmcache_info * info , uint64_t start , uint64_t size ) ;
2012-02-10 05:28:27 +04:00
2016-02-11 18:25:36 +03:00
void lvmcache_set_ext_version ( struct lvmcache_info * info , uint32_t version ) ;
uint32_t lvmcache_ext_version ( struct lvmcache_info * info ) ;
2015-03-09 14:52:07 +03:00
void lvmcache_set_ext_flags ( struct lvmcache_info * info , uint32_t flags ) ;
uint32_t lvmcache_ext_flags ( struct lvmcache_info * info ) ;
2012-02-10 05:28:27 +04:00
const struct format_type * lvmcache_fmt ( struct lvmcache_info * info ) ;
struct label * lvmcache_get_label ( struct lvmcache_info * info ) ;
2018-02-09 21:43:12 +03:00
struct label * lvmcache_get_dev_label ( struct device * dev ) ;
int lvmcache_has_dev_info ( struct device * dev ) ;
2012-02-10 05:28:27 +04:00
void lvmcache_update_pv ( struct lvmcache_info * info , struct physical_volume * pv ,
const struct format_type * fmt ) ;
int lvmcache_update_das ( struct lvmcache_info * info , struct physical_volume * pv ) ;
2013-05-28 14:37:22 +04:00
int lvmcache_update_bas ( struct lvmcache_info * info , struct physical_volume * pv ) ;
2012-02-10 05:28:27 +04:00
int lvmcache_foreach_mda ( struct lvmcache_info * info ,
int ( * fun ) ( struct metadata_area * , void * ) ,
void * baton ) ;
int lvmcache_foreach_da ( struct lvmcache_info * info ,
2012-02-23 17:11:07 +04:00
int ( * fun ) ( struct disk_locn * , void * ) ,
2012-02-10 05:28:27 +04:00
void * baton ) ;
2013-05-28 14:37:22 +04:00
int lvmcache_foreach_ba ( struct lvmcache_info * info ,
2013-02-14 18:35:57 +04:00
int ( * fun ) ( struct disk_locn * , void * ) ,
void * baton ) ;
2017-07-20 20:13:32 +03:00
int lvmcache_foreach_pv ( struct lvmcache_vginfo * vginfo ,
2012-02-10 05:28:27 +04:00
int ( * fun ) ( struct lvmcache_info * , void * ) , void * baton ) ;
uint64_t lvmcache_device_size ( struct lvmcache_info * info ) ;
void lvmcache_set_device_size ( struct lvmcache_info * info , uint64_t size ) ;
struct device * lvmcache_device ( struct lvmcache_info * info ) ;
2013-02-05 19:43:16 +04:00
unsigned lvmcache_mda_count ( struct lvmcache_info * info ) ;
uint64_t lvmcache_smallest_mda_size ( struct lvmcache_info * info ) ;
2012-02-10 05:28:27 +04:00
2019-05-22 22:25:08 +03:00
struct metadata_area * lvmcache_get_mda ( struct cmd_context * cmd ,
const char * vgname ,
struct device * dev ,
int use_mda_num ) ;
2019-08-01 21:50:04 +03:00
bool lvmcache_has_duplicate_devs ( void ) ;
2018-12-07 23:35:22 +03:00
int lvmcache_found_duplicate_vgnames ( void ) ;
2015-01-14 23:38:05 +03:00
2019-08-01 21:50:04 +03:00
int lvmcache_get_unused_duplicates ( struct cmd_context * cmd , struct dm_list * head ) ;
lvmcache: process duplicate PVs directly
Previously, duplicate PVs were processed as a side effect
of processing the "chosen" PV in lvmcache. The duplicate
PV would be hacked into lvmcache temporarily in place of
the chosen PV.
In the old way, we had to always process the "chosen" PV
device, even if a duplicate of it was named on the command
line. This meant we were processing a different device than
was asked for. This could be worked around by naming
multiple duplicate devs on the command line in which case
they were swapped in and out of lvmcache for processing.
Now, the duplicate devs are processed directly in their
own processing loop. This means we can remove the old
hacks related to processing dups as a side effect of
processing the chosen device. We can now simply process
the device that was named on the command line.
When the same PVID exists on two or more devices, one device
is preferred and used in the VG, and the others are duplicates
and are not used in the VG. The preferred device exists in
lvmcache as usual. The duplicates exist in a specical list
of unused duplicate devices.
The duplicate devs have the "d" attribute and the "duplicate"
reporting field displays "duplicate" for them.
'pvs' warns about duplicates, but the formal output only
includes the single preferred PV.
'pvs -a' has the same warnings, and the duplicate devs are
included in the output.
'pvs <path>' has the same warnings, and displays the named
device, whether it is preferred or a duplicate.
2016-02-11 21:37:36 +03:00
lvmcache: improve duplicate PV handling
Wait to compare and choose alternate duplicate devices until
after all devices are scanned. During scanning, the first
duplicate dev is kept in lvmcache, and others are kept in a
new list (_found_duplicate_devs).
After all devices are scanned, compare all the duplicates
available for a given PVID and decide which is best.
If the dev used in lvmcache is changed, drop the old dev
from lvmcache entirely and rescan the replacement dev.
Previously the VG metadata from the old dev was kept in
lvmcache and only the dev was replaced.
A new config setting devices/allow_changes_with_duplicate_pvs
can be set to 0 which disallows modifying a VG or activating
LVs in it when the VG contains PVs with duplicate devices.
Set to 1 is the old behavior which allowed the VG to be
changed.
The logic for which of two devs is preferred has changed.
The primary goal is to choose a device that is currently
in use if the other isn't, e.g. by an active LV.
. prefer dev with fs mounted if the other doesn't, else
. prefer dev that is dm if the other isn't, else
. prefer dev in subsystem if the other isn't
If neither device is preferred by these rules, then don't
change devices in lvmcache, leaving the one that was found
first.
The previous logic for preferring a device was:
. prefer dev in subsystem if the other isn't, else
. prefer dev without holders if the other has holders, else
. prefer dev that is dm if the other isn't
2016-02-09 22:06:27 +03:00
int vg_has_duplicate_pvs ( struct volume_group * vg ) ;
2015-07-24 18:06:58 +03:00
int lvmcache_contains_lock_type_sanlock ( struct cmd_context * cmd ) ;
2015-10-19 21:58:43 +03:00
void lvmcache_get_max_name_lengths ( struct cmd_context * cmd ,
unsigned * pv_max_name_len , unsigned * vg_max_name_len ) ;
2015-11-30 20:54:56 +03:00
int lvmcache_vg_is_foreign ( struct cmd_context * cmd , const char * vgname , const char * vgid ) ;
2015-10-19 21:58:43 +03:00
2019-08-01 21:50:04 +03:00
bool lvmcache_dev_is_unused_duplicate ( struct device * dev ) ;
2016-04-29 22:42:14 +03:00
2019-08-01 21:50:04 +03:00
void lvmcache_del_dev_from_duplicates ( struct device * dev ) ;
2016-05-13 00:19:57 +03:00
2019-08-01 21:50:04 +03:00
int lvmcache_pvid_in_unused_duplicates ( const char * pvid ) ;
2016-06-06 23:39:51 +03:00
2019-02-05 22:09:56 +03:00
bool lvmcache_scan_mismatch ( struct cmd_context * cmd , const char * vgname , const char * vgid ) ;
2018-05-03 00:58:49 +03:00
2018-12-07 23:35:22 +03:00
int lvmcache_vginfo_has_pvid ( struct lvmcache_vginfo * vginfo , char * pvid ) ;
2019-03-04 21:13:09 +03:00
uint64_t lvmcache_max_metadata_size ( void ) ;
void lvmcache_save_metadata_size ( uint64_t val ) ;
2018-06-28 22:48:03 +03:00
int dev_in_device_list ( struct device * dev , struct dm_list * head ) ;
2019-02-05 21:39:08 +03:00
bool lvmcache_has_bad_metadata ( struct device * dev ) ;
2019-02-05 22:09:56 +03:00
bool lvmcache_has_old_metadata ( struct cmd_context * cmd , const char * vgname , const char * vgid , struct device * dev ) ;
2019-02-05 21:55:51 +03:00
void lvmcache_get_outdated_devs ( struct cmd_context * cmd ,
const char * vgname , const char * vgid ,
struct dm_list * devs ) ;
void lvmcache_get_outdated_mdas ( struct cmd_context * cmd ,
const char * vgname , const char * vgid ,
struct device * dev ,
struct dm_list * * mdas ) ;
bool lvmcache_is_outdated_dev ( struct cmd_context * cmd ,
const char * vgname , const char * vgid ,
struct device * dev ) ;
void lvmcache_del_outdated_devs ( struct cmd_context * cmd ,
const char * vgname , const char * vgid ) ;
2019-02-05 21:39:08 +03:00
void lvmcache_save_bad_mda ( struct lvmcache_info * info , struct metadata_area * mda ) ;
void lvmcache_get_bad_mdas ( struct cmd_context * cmd ,
const char * vgname , const char * vgid ,
struct dm_list * bad_mda_list ) ;
2002-11-18 16:53:58 +03:00
# endif