2002-11-18 17:04:08 +03:00
/*
2008-01-30 17:00:02 +03:00
* Copyright ( C ) 2001 - 2004 Sistina Software , Inc . All rights reserved .
2017-02-24 02:50:00 +03:00
* Copyright ( C ) 2004 - 2017 Red Hat , Inc . All rights reserved .
2002-11-18 17:04:08 +03:00
*
2004-03-30 23:35:44 +04:00
* This file is part of LVM2 .
*
* This copyrighted material is made available to anyone wishing to use ,
* modify , copy , or redistribute it subject to the terms and conditions
2007-08-21 00:55:30 +04:00
* of the GNU Lesser General Public License v .2 .1 .
2004-03-30 23:35:44 +04:00
*
2007-08-21 00:55:30 +04:00
* You should have received a copy of the GNU Lesser General Public License
2004-03-30 23:35:44 +04:00
* along with this program ; if not , write to the Free Software Foundation ,
2016-01-21 13:49:46 +03:00
* Inc . , 51 Franklin Street , Fifth Floor , Boston , MA 02110 - 1301 USA
2002-11-18 17:04:08 +03:00
*/
2018-05-14 12:30:20 +03:00
# include "lib/misc/lib.h"
# include "lib/metadata/metadata.h"
2002-11-18 17:04:08 +03:00
# include "import-export.h"
2018-05-14 12:30:20 +03:00
# include "lib/display/display.h"
# include "lib/commands/toolcontext.h"
# include "lib/cache/lvmcache.h"
# include "lib/locking/lvmlockd.h"
# include "lib/metadata/lv_alloc.h"
# include "lib/metadata/pv_alloc.h"
# include "lib/metadata/segtype.h"
# include "lib/format_text/text_import.h"
# include "lib/config/defaults.h"
# include "lib/datastruct/str_list.h"
2002-11-18 17:04:08 +03:00
2019-09-26 19:50:51 +03:00
typedef int ( * section_fn ) ( struct cmd_context * cmd ,
struct format_type * fmt ,
struct format_instance * fid ,
struct dm_pool * mem ,
2019-09-26 19:27:38 +03:00
struct volume_group * vg ,
struct lvmcache_vgsummary * vgsummary ,
const struct dm_config_node * pvn ,
const struct dm_config_node * vgn ,
struct dm_hash_table * pv_hash ,
struct dm_hash_table * lv_hash ) ;
2002-11-18 17:04:08 +03:00
# define _read_int32(root, path, result) \
2017-07-19 17:17:30 +03:00
dm_config_get_uint32 ( root , path , ( uint32_t * ) ( result ) )
2002-11-18 17:04:08 +03:00
# define _read_uint32(root, path, result) \
2017-07-19 17:17:30 +03:00
dm_config_get_uint32 ( root , path , ( result ) )
2002-11-18 17:04:08 +03:00
2012-01-19 19:17:46 +04:00
# define _read_uint64(root, path, result) \
2017-07-19 17:17:30 +03:00
dm_config_get_uint64 ( root , path , ( result ) )
2002-11-18 17:04:08 +03:00
/*
* Logs an attempt to read an invalid format file .
*/
static void _invalid_format ( const char * str )
{
log_error ( " Can't process text format file - %s. " , str ) ;
}
/*
* Checks that the config file contains vg metadata , and that it
* we recognise the version number ,
*/
2011-08-30 18:55:15 +04:00
static int _vsn1_check_version ( const struct dm_config_tree * cft )
2002-11-18 17:04:08 +03:00
{
2011-08-30 18:55:15 +04:00
const struct dm_config_node * cn ;
const struct dm_config_value * cv ;
2002-11-18 17:04:08 +03:00
/*
* Check the contents field .
*/
2011-08-30 18:55:15 +04:00
if ( ! ( cn = dm_config_find_node ( cft - > root , CONTENTS_FIELD ) ) ) {
2002-11-18 17:04:08 +03:00
_invalid_format ( " missing contents field " ) ;
return 0 ;
}
cv = cn - > v ;
2011-08-30 18:55:15 +04:00
if ( ! cv | | cv - > type ! = DM_CFG_STRING | | strcmp ( cv - > v . str , CONTENTS_VALUE ) ) {
2002-11-18 17:04:08 +03:00
_invalid_format ( " unrecognised contents field " ) ;
return 0 ;
}
/*
* Check the version number .
*/
2011-08-30 18:55:15 +04:00
if ( ! ( cn = dm_config_find_node ( cft - > root , FORMAT_VERSION_FIELD ) ) ) {
2002-11-18 17:04:08 +03:00
_invalid_format ( " missing version number " ) ;
return 0 ;
}
cv = cn - > v ;
2011-08-30 18:55:15 +04:00
if ( ! cv | | cv - > type ! = DM_CFG_INT | | cv - > v . i ! = FORMAT_VERSION_VALUE ) {
2002-11-18 17:04:08 +03:00
_invalid_format ( " unrecognised version number " ) ;
return 0 ;
}
return 1 ;
}
2008-01-10 21:35:51 +03:00
static int _is_converting ( struct logical_volume * lv )
{
struct lv_segment * seg ;
2014-09-16 00:33:53 +04:00
if ( lv_is_mirrored ( lv ) ) {
2008-01-10 21:35:51 +03:00
seg = first_seg ( lv ) ;
/* Can't use is_temporary_mirror() because the metadata for
* seg_lv may not be read in and flags may not be set yet . */
if ( seg_type ( seg , 0 ) = = AREA_LV & &
strstr ( seg_lv ( seg , 0 ) - > name , MIRROR_SYNC_LAYER ) )
return 1 ;
}
return 0 ;
}
2011-08-30 18:55:15 +04:00
static int _read_id ( struct id * id , const struct dm_config_node * cn , const char * path )
2002-11-18 17:04:08 +03:00
{
2011-08-31 19:19:19 +04:00
const char * uuid ;
2002-11-18 17:04:08 +03:00
2011-08-31 19:19:19 +04:00
if ( ! dm_config_get_str ( cn , path , & uuid ) ) {
2002-11-18 17:04:08 +03:00
log_error ( " Couldn't find uuid. " ) ;
return 0 ;
}
2011-08-31 19:19:19 +04:00
if ( ! id_read_format ( id , uuid ) ) {
2002-11-18 17:04:08 +03:00
log_error ( " Invalid uuid. " ) ;
return 0 ;
}
return 1 ;
}
2021-04-22 18:31:17 +03:00
static int _read_flag_config ( const struct dm_config_node * n , uint64_t * status , enum pv_vg_lv_e type )
2008-07-10 15:30:57 +04:00
{
2011-08-31 19:19:19 +04:00
const struct dm_config_value * cv ;
2008-07-10 15:30:57 +04:00
* status = 0 ;
2011-08-31 19:19:19 +04:00
if ( ! dm_config_get_list ( n , " status " , & cv ) ) {
2008-07-10 15:30:57 +04:00
log_error ( " Could not find status flags. " ) ;
return 0 ;
}
2017-05-29 15:20:38 +03:00
/* For backward compatible metadata accept both type of flags */
if ( ! ( read_flags ( status , type , STATUS_FLAG | SEGTYPE_FLAG , cv ) ) ) {
2008-07-10 15:30:57 +04:00
log_error ( " Could not read status flags. " ) ;
return 0 ;
}
2011-08-31 19:19:19 +04:00
if ( dm_config_get_list ( n , " flags " , & cv ) ) {
2017-05-26 16:47:17 +03:00
if ( ! ( read_flags ( status , type , COMPATIBLE_FLAG , cv ) ) ) {
2008-07-10 15:30:57 +04:00
log_error ( " Could not read flags. " ) ;
return 0 ;
}
}
return 1 ;
}
2015-05-06 14:19:21 +03:00
static int _read_str_list ( struct dm_pool * mem , struct dm_list * list , const struct dm_config_value * cv )
{
if ( cv - > type = = DM_CFG_EMPTY_ARRAY )
return 1 ;
2024-06-14 00:06:09 +03:00
do {
2015-05-06 14:19:21 +03:00
if ( cv - > type ! = DM_CFG_STRING ) {
log_error ( " Found an item that is not a string " ) ;
return 0 ;
}
if ( ! str_list_add ( mem , list , dm_pool_strdup ( mem , cv - > v . str ) ) )
return_0 ;
2024-06-14 00:06:09 +03:00
} while ( ( cv = cv - > next ) ) ;
2015-05-06 14:19:21 +03:00
return 1 ;
}
2019-09-26 19:50:51 +03:00
static int _read_pv ( struct cmd_context * cmd ,
struct format_type * fmt ,
struct format_instance * fid ,
struct dm_pool * mem ,
2019-09-26 19:27:38 +03:00
struct volume_group * vg ,
struct lvmcache_vgsummary * vgsummary ,
const struct dm_config_node * pvn ,
2011-08-30 18:55:15 +04:00
const struct dm_config_node * vgn __attribute__ ( ( unused ) ) ,
2010-03-31 21:20:02 +04:00
struct dm_hash_table * pv_hash ,
2018-02-07 00:18:11 +03:00
struct dm_hash_table * lv_hash __attribute__ ( ( unused ) ) )
2002-11-18 17:04:08 +03:00
{
struct physical_volume * pv ;
struct pv_list * pvl ;
2011-08-31 19:19:19 +04:00
const struct dm_config_value * cv ;
device usage based on devices file
The LVM devices file lists devices that lvm can use. The default
file is /etc/lvm/devices/system.devices, and the lvmdevices(8)
command is used to add or remove device entries. If the file
does not exist, or if lvm.conf includes use_devicesfile=0, then
lvm will not use a devices file. When the devices file is in use,
the regex filter is not used, and the filter settings in lvm.conf
or on the command line are ignored.
LVM records devices in the devices file using hardware-specific
IDs, such as the WWID, and attempts to use subsystem-specific
IDs for virtual device types. These device IDs are also written
in the VG metadata. When no hardware or virtual ID is available,
lvm falls back using the unstable device name as the device ID.
When devnames are used, lvm performs extra scanning to find
devices if their devname changes, e.g. after reboot.
When proper device IDs are used, an lvm command will not look
at devices outside the devices file, but when devnames are used
as a fallback, lvm will scan devices outside the devices file
to locate PVs on renamed devices. A config setting
search_for_devnames can be used to control the scanning for
renamed devname entries.
Related to the devices file, the new command option
--devices <devnames> allows a list of devices to be specified for
the command to use, overriding the devices file. The listed
devices act as a sort of devices file in terms of limiting which
devices lvm will see and use. Devices that are not listed will
appear to be missing to the lvm command.
Multiple devices files can be kept in /etc/lvm/devices, which
allows lvm to be used with different sets of devices, e.g.
system devices do not need to be exposed to a specific application,
and the application can use lvm on its own set of devices that are
not exposed to the system. The option --devicesfile <filename> is
used to select the devices file to use with the command. Without
the option set, the default system devices file is used.
Setting --devicesfile "" causes lvm to not use a devices file.
An existing, empty devices file means lvm will see no devices.
The new command vgimportdevices adds PVs from a VG to the devices
file and updates the VG metadata to include the device IDs.
vgimportdevices -a will import all VGs into the system devices file.
LVM commands run by dmeventd not use a devices file by default,
and will look at all devices on the system. A devices file can
be created for dmeventd (/etc/lvm/devices/dmeventd.devices) If
this file exists, lvm commands run by dmeventd will use it.
Internal implementaion:
- device_ids_read - read the devices file
. add struct dev_use (du) to cmd->use_devices for each devices file entry
- dev_cache_scan - get /dev entries
. add struct device (dev) to dev_cache for each device on the system
- device_ids_match - match devices file entries to /dev entries
. match each du on cmd->use_devices to a dev in dev_cache, using device ID
. on match, set du->dev, dev->id, dev->flags MATCHED_USE_ID
- label_scan - read lvm headers and metadata from devices
. filters are applied, those that do not need data from the device
. filter-deviceid skips devs without MATCHED_USE_ID, i.e.
skips /dev entries that are not listed in the devices file
. read lvm label from dev
. filters are applied, those that use data from the device
. read lvm metadata from dev
. add info/vginfo structs for PVs/VGs (info is "lvmcache")
- device_ids_find_renamed_devs - handle devices with unstable devname ID
where devname changed
. this step only needed when devs do not have proper device IDs,
and their dev names change, e.g. after reboot sdb becomes sdc.
. detect incorrect match because PVID in the devices file entry
does not match the PVID found when the device was read above
. undo incorrect match between du and dev above
. search system devices for new location of PVID
. update devices file with new devnames for PVIDs on renamed devices
. label_scan the renamed devs
- continue with command processing
2020-06-23 21:25:41 +03:00
const char * str ;
2013-05-28 14:37:22 +04:00
uint64_t size , ba_start ;
2002-11-18 17:04:08 +03:00
2005-10-17 03:03:59 +04:00
if ( ! ( pvl = dm_pool_zalloc ( mem , sizeof ( * pvl ) ) ) | |
2008-01-30 16:19:47 +03:00
! ( pvl - > pv = dm_pool_zalloc ( mem , sizeof ( * pvl - > pv ) ) ) )
return_0 ;
2002-11-18 17:04:08 +03:00
pv = pvl - > pv ;
/*
* Add the pv to the pv hash for quick lookup when we read
* the lv segments .
*/
2008-01-30 16:19:47 +03:00
if ( ! dm_hash_insert ( pv_hash , pvn - > key , pv ) )
return_0 ;
2002-11-18 17:04:08 +03:00
if ( ! ( pvn = pvn - > child ) ) {
log_error ( " Empty pv section. " ) ;
return 0 ;
}
if ( ! _read_id ( & pv - > id , pvn , " id " ) ) {
2009-03-09 18:42:10 +03:00
log_error ( " Couldn't read uuid for physical volume. " ) ;
2002-11-18 17:04:08 +03:00
return 0 ;
}
2011-06-01 23:29:31 +04:00
pv - > is_labelled = 1 ; /* All format_text PVs are labelled. */
2008-01-30 16:19:47 +03:00
if ( ! ( pv - > vg_name = dm_pool_strdup ( mem , vg - > name ) ) )
return_0 ;
2002-11-18 17:04:08 +03:00
2021-08-03 23:32:33 +03:00
/* both are struct id */
memcpy ( & pv - > vg_id , & vg - > id , sizeof ( struct id ) ) ;
2006-04-13 01:23:04 +04:00
2018-07-10 21:39:29 +03:00
if ( ! _read_flag_config ( pvn , & pv - > status , PV_FLAGS ) ) {
2002-11-18 17:04:08 +03:00
log_error ( " Couldn't read status flags for physical volume. " ) ;
return 0 ;
}
2006-10-08 03:17:17 +04:00
/* Late addition */
2012-01-26 01:43:51 +04:00
if ( dm_config_has_node ( pvn , " dev_size " ) & &
! _read_uint64 ( pvn , " dev_size " , & pv - > size ) ) {
log_error ( " Couldn't read dev size for physical volume. " ) ;
return 0 ;
}
2006-10-08 03:17:17 +04:00
device usage based on devices file
The LVM devices file lists devices that lvm can use. The default
file is /etc/lvm/devices/system.devices, and the lvmdevices(8)
command is used to add or remove device entries. If the file
does not exist, or if lvm.conf includes use_devicesfile=0, then
lvm will not use a devices file. When the devices file is in use,
the regex filter is not used, and the filter settings in lvm.conf
or on the command line are ignored.
LVM records devices in the devices file using hardware-specific
IDs, such as the WWID, and attempts to use subsystem-specific
IDs for virtual device types. These device IDs are also written
in the VG metadata. When no hardware or virtual ID is available,
lvm falls back using the unstable device name as the device ID.
When devnames are used, lvm performs extra scanning to find
devices if their devname changes, e.g. after reboot.
When proper device IDs are used, an lvm command will not look
at devices outside the devices file, but when devnames are used
as a fallback, lvm will scan devices outside the devices file
to locate PVs on renamed devices. A config setting
search_for_devnames can be used to control the scanning for
renamed devname entries.
Related to the devices file, the new command option
--devices <devnames> allows a list of devices to be specified for
the command to use, overriding the devices file. The listed
devices act as a sort of devices file in terms of limiting which
devices lvm will see and use. Devices that are not listed will
appear to be missing to the lvm command.
Multiple devices files can be kept in /etc/lvm/devices, which
allows lvm to be used with different sets of devices, e.g.
system devices do not need to be exposed to a specific application,
and the application can use lvm on its own set of devices that are
not exposed to the system. The option --devicesfile <filename> is
used to select the devices file to use with the command. Without
the option set, the default system devices file is used.
Setting --devicesfile "" causes lvm to not use a devices file.
An existing, empty devices file means lvm will see no devices.
The new command vgimportdevices adds PVs from a VG to the devices
file and updates the VG metadata to include the device IDs.
vgimportdevices -a will import all VGs into the system devices file.
LVM commands run by dmeventd not use a devices file by default,
and will look at all devices on the system. A devices file can
be created for dmeventd (/etc/lvm/devices/dmeventd.devices) If
this file exists, lvm commands run by dmeventd will use it.
Internal implementaion:
- device_ids_read - read the devices file
. add struct dev_use (du) to cmd->use_devices for each devices file entry
- dev_cache_scan - get /dev entries
. add struct device (dev) to dev_cache for each device on the system
- device_ids_match - match devices file entries to /dev entries
. match each du on cmd->use_devices to a dev in dev_cache, using device ID
. on match, set du->dev, dev->id, dev->flags MATCHED_USE_ID
- label_scan - read lvm headers and metadata from devices
. filters are applied, those that do not need data from the device
. filter-deviceid skips devs without MATCHED_USE_ID, i.e.
skips /dev entries that are not listed in the devices file
. read lvm label from dev
. filters are applied, those that use data from the device
. read lvm metadata from dev
. add info/vginfo structs for PVs/VGs (info is "lvmcache")
- device_ids_find_renamed_devs - handle devices with unstable devname ID
where devname changed
. this step only needed when devs do not have proper device IDs,
and their dev names change, e.g. after reboot sdb becomes sdc.
. detect incorrect match because PVID in the devices file entry
does not match the PVID found when the device was read above
. undo incorrect match between du and dev above
. search system devices for new location of PVID
. update devices file with new devnames for PVIDs on renamed devices
. label_scan the renamed devs
- continue with command processing
2020-06-23 21:25:41 +03:00
if ( dm_config_get_str ( pvn , " device " , & str ) ) {
if ( ! ( pv - > device_hint = dm_pool_strdup ( mem , str ) ) )
2019-09-04 22:13:14 +03:00
log_error ( " Failed to allocate memory for device hint in read_pv. " ) ;
}
device usage based on devices file
The LVM devices file lists devices that lvm can use. The default
file is /etc/lvm/devices/system.devices, and the lvmdevices(8)
command is used to add or remove device entries. If the file
does not exist, or if lvm.conf includes use_devicesfile=0, then
lvm will not use a devices file. When the devices file is in use,
the regex filter is not used, and the filter settings in lvm.conf
or on the command line are ignored.
LVM records devices in the devices file using hardware-specific
IDs, such as the WWID, and attempts to use subsystem-specific
IDs for virtual device types. These device IDs are also written
in the VG metadata. When no hardware or virtual ID is available,
lvm falls back using the unstable device name as the device ID.
When devnames are used, lvm performs extra scanning to find
devices if their devname changes, e.g. after reboot.
When proper device IDs are used, an lvm command will not look
at devices outside the devices file, but when devnames are used
as a fallback, lvm will scan devices outside the devices file
to locate PVs on renamed devices. A config setting
search_for_devnames can be used to control the scanning for
renamed devname entries.
Related to the devices file, the new command option
--devices <devnames> allows a list of devices to be specified for
the command to use, overriding the devices file. The listed
devices act as a sort of devices file in terms of limiting which
devices lvm will see and use. Devices that are not listed will
appear to be missing to the lvm command.
Multiple devices files can be kept in /etc/lvm/devices, which
allows lvm to be used with different sets of devices, e.g.
system devices do not need to be exposed to a specific application,
and the application can use lvm on its own set of devices that are
not exposed to the system. The option --devicesfile <filename> is
used to select the devices file to use with the command. Without
the option set, the default system devices file is used.
Setting --devicesfile "" causes lvm to not use a devices file.
An existing, empty devices file means lvm will see no devices.
The new command vgimportdevices adds PVs from a VG to the devices
file and updates the VG metadata to include the device IDs.
vgimportdevices -a will import all VGs into the system devices file.
LVM commands run by dmeventd not use a devices file by default,
and will look at all devices on the system. A devices file can
be created for dmeventd (/etc/lvm/devices/dmeventd.devices) If
this file exists, lvm commands run by dmeventd will use it.
Internal implementaion:
- device_ids_read - read the devices file
. add struct dev_use (du) to cmd->use_devices for each devices file entry
- dev_cache_scan - get /dev entries
. add struct device (dev) to dev_cache for each device on the system
- device_ids_match - match devices file entries to /dev entries
. match each du on cmd->use_devices to a dev in dev_cache, using device ID
. on match, set du->dev, dev->id, dev->flags MATCHED_USE_ID
- label_scan - read lvm headers and metadata from devices
. filters are applied, those that do not need data from the device
. filter-deviceid skips devs without MATCHED_USE_ID, i.e.
skips /dev entries that are not listed in the devices file
. read lvm label from dev
. filters are applied, those that use data from the device
. read lvm metadata from dev
. add info/vginfo structs for PVs/VGs (info is "lvmcache")
- device_ids_find_renamed_devs - handle devices with unstable devname ID
where devname changed
. this step only needed when devs do not have proper device IDs,
and their dev names change, e.g. after reboot sdb becomes sdc.
. detect incorrect match because PVID in the devices file entry
does not match the PVID found when the device was read above
. undo incorrect match between du and dev above
. search system devices for new location of PVID
. update devices file with new devnames for PVIDs on renamed devices
. label_scan the renamed devs
- continue with command processing
2020-06-23 21:25:41 +03:00
if ( dm_config_get_str ( pvn , " device_id " , & str ) ) {
if ( ! ( pv - > device_id = dm_pool_strdup ( mem , str ) ) )
log_error ( " Failed to allocate memory for device_id in read_pv. " ) ;
}
if ( dm_config_get_str ( pvn , " device_id_type " , & str ) ) {
if ( ! ( pv - > device_id_type = dm_pool_strdup ( mem , str ) ) )
log_error ( " Failed to allocate memory for device_id_type in read_pv. " ) ;
}
2018-07-10 21:39:29 +03:00
if ( ! _read_uint64 ( pvn , " pe_start " , & pv - > pe_start ) ) {
2013-02-21 16:33:27 +04:00
log_error ( " Couldn't read extent start value (pe_start) "
" for physical volume. " ) ;
2002-11-18 17:04:08 +03:00
return 0 ;
}
2018-07-10 21:39:29 +03:00
if ( ! _read_int32 ( pvn , " pe_count " , & pv - > pe_count ) ) {
2002-11-18 17:04:08 +03:00
log_error ( " Couldn't find extent count (pe_count) for "
" physical volume. " ) ;
return 0 ;
}
2013-06-27 18:03:35 +04:00
/* Bootloader area is not compulsory - just log_debug for the record if found. */
2013-05-28 14:37:22 +04:00
ba_start = size = 0 ;
2013-06-27 18:03:35 +04:00
_read_uint64 ( pvn , " ba_start " , & ba_start ) ;
_read_uint64 ( pvn , " ba_size " , & size ) ;
if ( ba_start & & size ) {
2015-05-11 12:07:53 +03:00
log_debug_metadata ( " Found bootloader area specification for PV %s "
2013-06-27 18:03:35 +04:00
" in metadata: ba_start=% " PRIu64 " , ba_size=% " PRIu64 " . " ,
pv_dev_name ( pv ) , ba_start , size ) ;
2013-05-28 14:37:22 +04:00
pv - > ba_start = ba_start ;
pv - > ba_size = size ;
2013-06-27 18:03:35 +04:00
} else if ( ( ! ba_start & & size ) | | ( ba_start & & ! size ) ) {
log_error ( " Found incomplete bootloader area specification "
" for PV %s in metadata. " , pv_dev_name ( pv ) ) ;
return 0 ;
2013-02-14 19:04:35 +04:00
}
2008-11-04 01:14:30 +03:00
dm_list_init ( & pv - > tags ) ;
dm_list_init ( & pv - > segments ) ;
2004-03-08 20:19:15 +03:00
/* Optional tags */
2011-08-31 19:19:19 +04:00
if ( dm_config_get_list ( pvn , " tags " , & cv ) & &
2015-05-06 14:19:21 +03:00
! ( _read_str_list ( mem , & pv - > tags , cv ) ) ) {
2004-03-08 20:19:15 +03:00
log_error ( " Couldn't read tags for physical volume %s in %s. " ,
2007-10-12 18:29:32 +04:00
pv_dev_name ( pv ) , vg - > name ) ;
2004-03-08 20:19:15 +03:00
return 0 ;
}
2002-11-18 17:04:08 +03:00
pv - > pe_size = vg - > extent_size ;
2006-10-08 03:17:17 +04:00
2002-11-18 17:04:08 +03:00
pv - > pe_alloc_count = 0 ;
2008-09-19 08:28:58 +04:00
pv - > pe_align = 0 ;
2019-09-26 19:50:51 +03:00
pv - > fmt = fmt ;
2002-11-18 17:04:08 +03:00
2008-01-30 16:19:47 +03:00
if ( ! alloc_pv_segment_whole_pv ( mem , pv ) )
return_0 ;
2005-04-20 00:58:25 +04:00
2010-04-06 18:04:03 +04:00
vg - > extent_count + = pv - > pe_count ;
vg - > free_count + = pv - > pe_count ;
2018-07-10 21:39:29 +03:00
add_pvl_to_vgs ( vg , pvl ) ;
2002-11-18 17:04:08 +03:00
return 1 ;
}
2019-09-26 19:27:38 +03:00
static int _read_pvsummary ( struct cmd_context * cmd ,
struct format_type * fmt ,
struct format_instance * fid ,
struct dm_pool * mem ,
struct volume_group * vg ,
struct lvmcache_vgsummary * vgsummary ,
const struct dm_config_node * pvn ,
const struct dm_config_node * vgn __attribute__ ( ( unused ) ) ,
struct dm_hash_table * pv_hash __attribute__ ( ( unused ) ) ,
struct dm_hash_table * lv_hash __attribute__ ( ( unused ) ) )
{
struct physical_volume * pv ;
struct pv_list * pvl ;
device usage based on devices file
The LVM devices file lists devices that lvm can use. The default
file is /etc/lvm/devices/system.devices, and the lvmdevices(8)
command is used to add or remove device entries. If the file
does not exist, or if lvm.conf includes use_devicesfile=0, then
lvm will not use a devices file. When the devices file is in use,
the regex filter is not used, and the filter settings in lvm.conf
or on the command line are ignored.
LVM records devices in the devices file using hardware-specific
IDs, such as the WWID, and attempts to use subsystem-specific
IDs for virtual device types. These device IDs are also written
in the VG metadata. When no hardware or virtual ID is available,
lvm falls back using the unstable device name as the device ID.
When devnames are used, lvm performs extra scanning to find
devices if their devname changes, e.g. after reboot.
When proper device IDs are used, an lvm command will not look
at devices outside the devices file, but when devnames are used
as a fallback, lvm will scan devices outside the devices file
to locate PVs on renamed devices. A config setting
search_for_devnames can be used to control the scanning for
renamed devname entries.
Related to the devices file, the new command option
--devices <devnames> allows a list of devices to be specified for
the command to use, overriding the devices file. The listed
devices act as a sort of devices file in terms of limiting which
devices lvm will see and use. Devices that are not listed will
appear to be missing to the lvm command.
Multiple devices files can be kept in /etc/lvm/devices, which
allows lvm to be used with different sets of devices, e.g.
system devices do not need to be exposed to a specific application,
and the application can use lvm on its own set of devices that are
not exposed to the system. The option --devicesfile <filename> is
used to select the devices file to use with the command. Without
the option set, the default system devices file is used.
Setting --devicesfile "" causes lvm to not use a devices file.
An existing, empty devices file means lvm will see no devices.
The new command vgimportdevices adds PVs from a VG to the devices
file and updates the VG metadata to include the device IDs.
vgimportdevices -a will import all VGs into the system devices file.
LVM commands run by dmeventd not use a devices file by default,
and will look at all devices on the system. A devices file can
be created for dmeventd (/etc/lvm/devices/dmeventd.devices) If
this file exists, lvm commands run by dmeventd will use it.
Internal implementaion:
- device_ids_read - read the devices file
. add struct dev_use (du) to cmd->use_devices for each devices file entry
- dev_cache_scan - get /dev entries
. add struct device (dev) to dev_cache for each device on the system
- device_ids_match - match devices file entries to /dev entries
. match each du on cmd->use_devices to a dev in dev_cache, using device ID
. on match, set du->dev, dev->id, dev->flags MATCHED_USE_ID
- label_scan - read lvm headers and metadata from devices
. filters are applied, those that do not need data from the device
. filter-deviceid skips devs without MATCHED_USE_ID, i.e.
skips /dev entries that are not listed in the devices file
. read lvm label from dev
. filters are applied, those that use data from the device
. read lvm metadata from dev
. add info/vginfo structs for PVs/VGs (info is "lvmcache")
- device_ids_find_renamed_devs - handle devices with unstable devname ID
where devname changed
. this step only needed when devs do not have proper device IDs,
and their dev names change, e.g. after reboot sdb becomes sdc.
. detect incorrect match because PVID in the devices file entry
does not match the PVID found when the device was read above
. undo incorrect match between du and dev above
. search system devices for new location of PVID
. update devices file with new devnames for PVIDs on renamed devices
. label_scan the renamed devs
- continue with command processing
2020-06-23 21:25:41 +03:00
const char * str ;
2019-09-26 19:27:38 +03:00
if ( ! ( pvl = dm_pool_zalloc ( mem , sizeof ( * pvl ) ) ) | |
! ( pvl - > pv = dm_pool_zalloc ( mem , sizeof ( * pvl - > pv ) ) ) )
return_0 ;
pv = pvl - > pv ;
if ( ! ( pvn = pvn - > child ) ) {
log_error ( " Empty pv section. " ) ;
return 0 ;
}
if ( ! _read_id ( & pv - > id , pvn , " id " ) )
log_warn ( " Couldn't read uuid for physical volume. " ) ;
if ( dm_config_has_node ( pvn , " dev_size " ) & &
! _read_uint64 ( pvn , " dev_size " , & pv - > size ) )
log_warn ( " Couldn't read dev size for physical volume. " ) ;
device usage based on devices file
The LVM devices file lists devices that lvm can use. The default
file is /etc/lvm/devices/system.devices, and the lvmdevices(8)
command is used to add or remove device entries. If the file
does not exist, or if lvm.conf includes use_devicesfile=0, then
lvm will not use a devices file. When the devices file is in use,
the regex filter is not used, and the filter settings in lvm.conf
or on the command line are ignored.
LVM records devices in the devices file using hardware-specific
IDs, such as the WWID, and attempts to use subsystem-specific
IDs for virtual device types. These device IDs are also written
in the VG metadata. When no hardware or virtual ID is available,
lvm falls back using the unstable device name as the device ID.
When devnames are used, lvm performs extra scanning to find
devices if their devname changes, e.g. after reboot.
When proper device IDs are used, an lvm command will not look
at devices outside the devices file, but when devnames are used
as a fallback, lvm will scan devices outside the devices file
to locate PVs on renamed devices. A config setting
search_for_devnames can be used to control the scanning for
renamed devname entries.
Related to the devices file, the new command option
--devices <devnames> allows a list of devices to be specified for
the command to use, overriding the devices file. The listed
devices act as a sort of devices file in terms of limiting which
devices lvm will see and use. Devices that are not listed will
appear to be missing to the lvm command.
Multiple devices files can be kept in /etc/lvm/devices, which
allows lvm to be used with different sets of devices, e.g.
system devices do not need to be exposed to a specific application,
and the application can use lvm on its own set of devices that are
not exposed to the system. The option --devicesfile <filename> is
used to select the devices file to use with the command. Without
the option set, the default system devices file is used.
Setting --devicesfile "" causes lvm to not use a devices file.
An existing, empty devices file means lvm will see no devices.
The new command vgimportdevices adds PVs from a VG to the devices
file and updates the VG metadata to include the device IDs.
vgimportdevices -a will import all VGs into the system devices file.
LVM commands run by dmeventd not use a devices file by default,
and will look at all devices on the system. A devices file can
be created for dmeventd (/etc/lvm/devices/dmeventd.devices) If
this file exists, lvm commands run by dmeventd will use it.
Internal implementaion:
- device_ids_read - read the devices file
. add struct dev_use (du) to cmd->use_devices for each devices file entry
- dev_cache_scan - get /dev entries
. add struct device (dev) to dev_cache for each device on the system
- device_ids_match - match devices file entries to /dev entries
. match each du on cmd->use_devices to a dev in dev_cache, using device ID
. on match, set du->dev, dev->id, dev->flags MATCHED_USE_ID
- label_scan - read lvm headers and metadata from devices
. filters are applied, those that do not need data from the device
. filter-deviceid skips devs without MATCHED_USE_ID, i.e.
skips /dev entries that are not listed in the devices file
. read lvm label from dev
. filters are applied, those that use data from the device
. read lvm metadata from dev
. add info/vginfo structs for PVs/VGs (info is "lvmcache")
- device_ids_find_renamed_devs - handle devices with unstable devname ID
where devname changed
. this step only needed when devs do not have proper device IDs,
and their dev names change, e.g. after reboot sdb becomes sdc.
. detect incorrect match because PVID in the devices file entry
does not match the PVID found when the device was read above
. undo incorrect match between du and dev above
. search system devices for new location of PVID
. update devices file with new devnames for PVIDs on renamed devices
. label_scan the renamed devs
- continue with command processing
2020-06-23 21:25:41 +03:00
if ( dm_config_get_str ( pvn , " device " , & str ) ) {
if ( ! ( pv - > device_hint = dm_pool_strdup ( mem , str ) ) )
log_error ( " Failed to allocate memory for device hint in read_pv_sum. " ) ;
}
if ( dm_config_get_str ( pvn , " device_id " , & str ) ) {
if ( ! ( pv - > device_id = dm_pool_strdup ( mem , str ) ) )
log_error ( " Failed to allocate memory for device_id in read_pv_sum. " ) ;
}
if ( dm_config_get_str ( pvn , " device_id_type " , & str ) ) {
if ( ! ( pv - > device_id_type = dm_pool_strdup ( mem , str ) ) )
log_error ( " Failed to allocate memory for device_id_type in read_pv_sum. " ) ;
2019-09-26 19:27:38 +03:00
}
dm_list_add ( & vgsummary - > pvsummaries , & pvl - > list ) ;
return 1 ;
}
2002-11-18 17:04:08 +03:00
static void _insert_segment ( struct logical_volume * lv , struct lv_segment * seg )
{
struct lv_segment * comp ;
2008-11-04 01:14:30 +03:00
dm_list_iterate_items ( comp , & lv - > segments ) {
2002-11-18 17:04:08 +03:00
if ( comp - > le > seg - > le ) {
2008-11-04 01:14:30 +03:00
dm_list_add ( & comp - > list , & seg - > list ) ;
2002-11-18 17:04:08 +03:00
return ;
}
}
lv - > le_count + = seg - > len ;
2008-11-04 01:14:30 +03:00
dm_list_add ( & lv - > segments , & seg - > list ) ;
2002-11-18 17:04:08 +03:00
}
2019-09-26 19:50:51 +03:00
static int _read_segment ( struct cmd_context * cmd ,
struct format_type * fmt ,
struct format_instance * fid ,
struct dm_pool * mem ,
struct logical_volume * lv , const struct dm_config_node * sn ,
2024-09-10 19:51:15 +03:00
struct dm_hash_table * pv_hash ,
struct dm_hash_table * lv_hash )
2002-11-18 17:04:08 +03:00
{
2004-05-05 01:25:57 +04:00
uint32_t area_count = 0u ;
2002-11-18 17:04:08 +03:00
struct lv_segment * seg ;
2011-08-31 19:19:19 +04:00
const struct dm_config_node * sn_child = sn - > child ;
2011-08-30 18:55:15 +04:00
const struct dm_config_value * cv ;
2017-02-24 02:50:00 +03:00
uint32_t area_extents , start_extent , extent_count , reshape_count , data_copies ;
2004-05-05 01:25:57 +04:00
struct segment_type * segtype ;
const char * segtype_str ;
2017-05-29 15:20:38 +03:00
char * segtype_with_flags ;
2002-11-18 17:04:08 +03:00
2009-07-09 15:28:09 +04:00
if ( ! sn_child ) {
2002-11-18 17:04:08 +03:00
log_error ( " Empty segment section. " ) ;
return 0 ;
}
2009-07-09 15:28:09 +04:00
if ( ! _read_int32 ( sn_child , " start_extent " , & start_extent ) ) {
log_error ( " Couldn't read 'start_extent' for segment '%s' "
" of logical volume %s. " , sn - > key , lv - > name ) ;
2002-11-18 17:04:08 +03:00
return 0 ;
}
2009-07-09 15:28:09 +04:00
if ( ! _read_int32 ( sn_child , " extent_count " , & extent_count ) ) {
log_error ( " Couldn't read 'extent_count' for segment '%s' "
" of logical volume %s. " , sn - > key , lv - > name ) ;
2002-11-18 17:04:08 +03:00
return 0 ;
}
2017-02-24 02:50:00 +03:00
if ( ! _read_int32 ( sn_child , " reshape_count " , & reshape_count ) )
reshape_count = 0 ;
if ( ! _read_int32 ( sn_child , " data_copies " , & data_copies ) )
data_copies = 1 ;
2015-09-22 21:04:12 +03:00
segtype_str = SEG_TYPE_NAME_STRIPED ;
2004-05-05 01:25:57 +04:00
2011-08-31 19:19:19 +04:00
if ( ! dm_config_get_str ( sn_child , " type " , & segtype_str ) ) {
log_error ( " Segment type must be a string. " ) ;
return 0 ;
2002-11-18 17:04:08 +03:00
}
2017-05-29 15:20:38 +03:00
/* Locally duplicate to parse out status flag bits */
if ( ! ( segtype_with_flags = dm_pool_strdup ( mem , segtype_str ) ) ) {
log_error ( " Cannot duplicate segtype string. " ) ;
return 0 ;
}
if ( ! read_segtype_lvflags ( & lv - > status , segtype_with_flags ) ) {
log_error ( " Couldn't read segtype for logical volume %s. " ,
display_lvname ( lv ) ) ;
return 0 ;
}
2019-09-26 19:50:51 +03:00
if ( ! ( segtype = get_segtype_from_string ( cmd , segtype_with_flags ) ) )
2008-01-30 16:19:47 +03:00
return_0 ;
2002-11-18 17:04:08 +03:00
2017-05-29 15:20:38 +03:00
/* Can drop temporary string here as nothing has allocated from VGMEM meanwhile */
dm_pool_free ( mem , segtype_with_flags ) ;
2004-05-05 01:25:57 +04:00
if ( segtype - > ops - > text_import_area_count & &
2009-07-09 15:28:09 +04:00
! segtype - > ops - > text_import_area_count ( sn_child , & area_count ) )
2008-01-30 16:19:47 +03:00
return_0 ;
2003-04-30 19:22:36 +04:00
2017-02-24 02:50:00 +03:00
area_extents = segtype - > parity_devs ?
raid_rimage_extents ( segtype , extent_count , area_count - segtype - > parity_devs , data_copies ) : extent_count ;
2011-10-23 20:02:01 +04:00
if ( ! ( seg = alloc_lv_segment ( segtype , lv , start_extent ,
2017-02-24 02:50:00 +03:00
extent_count , reshape_count , 0 , 0 , NULL , area_count ,
area_extents , data_copies , 0 , 0 , 0 , NULL ) ) ) {
2003-09-18 00:35:57 +04:00
log_error ( " Segment allocation failed " ) ;
2002-11-18 17:04:08 +03:00
return 0 ;
}
2004-05-05 01:25:57 +04:00
if ( seg - > segtype - > ops - > text_import & &
2024-09-10 19:51:15 +03:00
! seg - > segtype - > ops - > text_import ( seg , sn_child , pv_hash , lv_hash ) )
2008-01-30 16:19:47 +03:00
return_0 ;
2002-11-18 17:04:08 +03:00
2004-03-08 20:19:15 +03:00
/* Optional tags */
2011-08-31 19:19:19 +04:00
if ( dm_config_get_list ( sn_child , " tags " , & cv ) & &
2015-05-06 14:19:21 +03:00
! ( _read_str_list ( mem , & seg - > tags , cv ) ) ) {
2004-03-08 20:19:15 +03:00
log_error ( " Couldn't read tags for a segment of %s/%s. " ,
2011-10-23 20:05:45 +04:00
lv - > vg - > name , lv - > name ) ;
2004-03-08 20:19:15 +03:00
return 0 ;
}
2004-05-05 01:25:57 +04:00
/*
* Insert into correct part of segment list .
*/
_insert_segment ( lv , seg ) ;
2002-11-18 17:04:08 +03:00
2014-09-16 03:13:46 +04:00
if ( seg_is_mirror ( seg ) )
lv - > status | = MIRROR ;
2005-05-09 20:59:01 +04:00
if ( seg_is_mirrored ( seg ) )
2004-05-05 01:25:57 +04:00
lv - > status | = MIRRORED ;
2002-11-18 17:04:08 +03:00
2011-08-03 02:07:20 +04:00
if ( seg_is_raid ( seg ) )
lv - > status | = RAID ;
2005-05-09 20:59:01 +04:00
if ( seg_is_virtual ( seg ) )
2004-05-11 20:01:58 +04:00
lv - > status | = VIRTUAL ;
2011-08-03 02:07:20 +04:00
if ( ! seg_is_raid ( seg ) & & _is_converting ( lv ) )
2008-01-10 21:35:51 +03:00
lv - > status | = CONVERTING ;
2004-05-05 01:25:57 +04:00
return 1 ;
}
2002-11-18 17:04:08 +03:00
2011-08-30 18:55:15 +04:00
int text_import_areas ( struct lv_segment * seg , const struct dm_config_node * sn ,
2011-08-31 19:19:19 +04:00
const struct dm_config_value * cv , struct dm_hash_table * pv_hash ,
2009-12-04 20:48:32 +03:00
uint64_t status )
2004-05-05 01:25:57 +04:00
{
unsigned int s ;
struct logical_volume * lv1 ;
2009-07-09 15:29:41 +04:00
struct physical_volume * pv ;
2011-08-30 18:55:15 +04:00
const char * seg_name = dm_config_parent_name ( sn ) ;
2002-11-18 17:04:08 +03:00
2004-05-05 01:25:57 +04:00
if ( ! seg - > area_count ) {
2009-07-09 15:29:41 +04:00
log_error ( " Zero areas not allowed for segment %s " , seg_name ) ;
2004-05-05 01:25:57 +04:00
return 0 ;
}
2002-11-18 17:04:08 +03:00
2011-08-31 19:19:19 +04:00
for ( s = 0 ; cv & & s < seg - > area_count ; s + + , cv = cv - > next ) {
2002-11-18 17:04:08 +03:00
2004-05-05 01:25:57 +04:00
/* first we read the pv */
2011-08-30 18:55:15 +04:00
if ( cv - > type ! = DM_CFG_STRING ) {
2009-07-09 15:29:41 +04:00
log_error ( " Bad volume name in areas array for segment %s. " , seg_name ) ;
2002-11-18 17:04:08 +03:00
return 0 ;
}
2004-05-05 01:25:57 +04:00
if ( ! cv - > next ) {
2009-07-09 15:29:41 +04:00
log_error ( " Missing offset in areas array for segment %s. " , seg_name ) ;
2003-04-25 02:23:24 +04:00
return 0 ;
}
2011-08-30 18:55:15 +04:00
if ( cv - > next - > type ! = DM_CFG_INT ) {
2009-07-09 15:29:41 +04:00
log_error ( " Bad offset in areas array for segment %s. " , seg_name ) ;
2003-04-30 19:22:36 +04:00
return 0 ;
}
2004-05-05 01:25:57 +04:00
/* FIXME Cope if LV not yet read in */
2005-10-17 03:03:59 +04:00
if ( ( pv = dm_hash_lookup ( pv_hash , cv - > v . str ) ) ) {
2008-01-30 16:19:47 +03:00
if ( ! set_lv_segment_area_pv ( seg , s , pv , ( uint32_t ) cv - > next - > v . i ) )
return_0 ;
2004-05-05 01:25:57 +04:00
} else if ( ( lv1 = find_lv ( seg - > lv - > vg , cv - > v . str ) ) ) {
2008-01-16 22:00:59 +03:00
if ( ! set_lv_segment_area_lv ( seg , s , lv1 ,
( uint32_t ) cv - > next - > v . i ,
2009-12-04 20:48:32 +03:00
status ) )
2008-01-16 22:00:59 +03:00
return_0 ;
2004-05-05 01:25:57 +04:00
} else {
log_error ( " Couldn't find volume '%s' "
" for segment '%s'. " ,
2006-05-10 01:23:51 +04:00
cv - > v . str ? : " NULL " , seg_name ) ;
2002-11-18 17:04:08 +03:00
return 0 ;
}
2004-05-05 01:25:57 +04:00
cv = cv - > next ;
2002-11-18 17:04:08 +03:00
}
/*
2004-05-05 01:25:57 +04:00
* Check we read the correct number of stripes .
2002-11-18 17:04:08 +03:00
*/
2004-05-05 01:25:57 +04:00
if ( cv | | ( s < seg - > area_count ) ) {
log_error ( " Incorrect number of areas in area array "
" for segment '%s'. " , seg_name ) ;
return 0 ;
}
2002-11-18 17:04:08 +03:00
return 1 ;
}
2019-09-26 19:50:51 +03:00
static int _read_segments ( struct cmd_context * cmd ,
struct format_type * fmt ,
struct format_instance * fid ,
struct dm_pool * mem ,
struct logical_volume * lv , const struct dm_config_node * lvn ,
2024-09-10 19:51:15 +03:00
struct dm_hash_table * pv_hash ,
struct dm_hash_table * lv_hash )
2002-11-18 17:04:08 +03:00
{
2011-08-30 18:55:15 +04:00
const struct dm_config_node * sn ;
2002-11-18 17:04:08 +03:00
int count = 0 , seg_count ;
for ( sn = lvn ; sn ; sn = sn - > sib ) {
/*
* All sub - sections are assumed to be segments .
*/
if ( ! sn - > v ) {
2024-09-10 19:51:15 +03:00
if ( ! _read_segment ( cmd , fmt , fid , mem , lv , sn , pv_hash , lv_hash ) )
2008-01-30 16:19:47 +03:00
return_0 ;
2002-11-18 17:04:08 +03:00
count + + ;
}
/* FIXME Remove this restriction */
2016-12-13 02:09:15 +03:00
if ( lv_is_snapshot ( lv ) & & count > 1 ) {
2002-11-18 17:04:08 +03:00
log_error ( " Only one segment permitted for snapshot " ) ;
return 0 ;
}
}
if ( ! _read_int32 ( lvn , " segment_count " , & seg_count ) ) {
2009-07-09 15:28:09 +04:00
log_error ( " Couldn't read segment count for logical volume %s. " ,
lv - > name ) ;
2002-11-18 17:04:08 +03:00
return 0 ;
}
if ( seg_count ! = count ) {
log_error ( " segment_count and actual number of segments "
2009-07-09 15:28:09 +04:00
" disagree for logical volume %s. " , lv - > name ) ;
2002-11-18 17:04:08 +03:00
return 0 ;
}
/*
* Check there are no gaps or overlaps in the lv .
*/
2024-10-19 01:05:45 +03:00
if ( ! check_lv_segments_incomplete_vg ( lv ) )
2008-01-30 16:19:47 +03:00
return_0 ;
2002-11-18 17:04:08 +03:00
/*
* Merge segments in case someones been editing things by hand .
*/
2008-01-30 16:19:47 +03:00
if ( ! lv_merge_segments ( lv ) )
return_0 ;
2002-11-18 17:04:08 +03:00
return 1 ;
}
2019-09-26 19:50:51 +03:00
static int _read_lvnames ( struct cmd_context * cmd ,
struct format_type * fmt ,
struct format_instance * fid __attribute__ ( ( unused ) ) ,
struct dm_pool * mem ,
2019-09-26 19:27:38 +03:00
struct volume_group * vg ,
struct lvmcache_vgsummary * vgsummary ,
const struct dm_config_node * lvn ,
2011-08-30 18:55:15 +04:00
const struct dm_config_node * vgn __attribute__ ( ( unused ) ) ,
2010-07-09 19:34:40 +04:00
struct dm_hash_table * pv_hash __attribute__ ( ( unused ) ) ,
2018-02-07 00:18:11 +03:00
struct dm_hash_table * lv_hash )
2002-11-18 17:04:08 +03:00
{
struct logical_volume * lv ;
2013-06-25 14:28:36 +04:00
const char * str ;
2011-08-31 19:19:19 +04:00
const struct dm_config_value * cv ;
2012-01-19 19:31:45 +04:00
const char * hostname ;
2015-03-09 22:18:14 +03:00
uint64_t timestamp = 0 , lvstatus ;
2002-11-18 17:04:08 +03:00
2009-09-28 21:46:15 +04:00
if ( ! ( lv = alloc_lv ( mem ) ) )
2008-01-30 16:19:47 +03:00
return_0 ;
2002-11-18 17:04:08 +03:00
2015-11-23 01:03:01 +03:00
if ( ! link_lv_to_vg ( vg , lv ) )
return_0 ;
2008-01-30 16:19:47 +03:00
if ( ! ( lv - > name = dm_pool_strdup ( mem , lvn - > key ) ) )
return_0 ;
2002-11-18 17:04:08 +03:00
2024-10-20 21:48:56 +03:00
log_debug_metadata ( " Importing logical volume %s. " , lv - > name ) ;
2016-05-18 16:22:26 +03:00
2002-11-18 17:04:08 +03:00
if ( ! ( lvn = lvn - > child ) ) {
2016-05-18 16:22:26 +03:00
log_error ( " Empty logical volume section for %s. " ,
display_lvname ( lv ) ) ;
2002-11-18 17:04:08 +03:00
return 0 ;
}
2015-03-09 22:18:14 +03:00
if ( ! _read_flag_config ( lvn , & lvstatus , LV_FLAGS ) ) {
2008-07-10 15:30:57 +04:00
log_error ( " Couldn't read status flags for logical volume %s. " ,
2015-11-23 01:03:01 +03:00
display_lvname ( lv ) ) ;
2002-11-18 17:04:08 +03:00
return 0 ;
}
2015-03-09 22:18:14 +03:00
if ( lvstatus & LVM_WRITE_LOCKED ) {
lvstatus | = LVM_WRITE ;
lvstatus & = ~ LVM_WRITE_LOCKED ;
system_id: make new VGs read-only for old lvm versions
Previous versions of lvm will not obey the restrictions
imposed by the new system_id, and would allow such a VG
to be written. So, a VG with a new system_id is further
changed to force previous lvm versions to treat it as
read-only. This is done by removing the WRITE flag from
the metadata status line of these VGs, and putting a new
WRITE_LOCKED flag in the flags line of the metadata.
Versions of lvm that recognize WRITE_LOCKED, also obey the
new system_id. For these lvm versions, WRITE_LOCKED is
identical to WRITE, and the rules associated with matching
system_id's are imposed.
A new VG lock_type field is also added that causes the same
WRITE/WRITE_LOCKED transformation when set. A previous
version of lvm will also see a VG with lock_type as read-only.
Versions of lvm that recognize WRITE_LOCKED, must also obey
the lock_type setting. Until the lock_type feature is added,
lvm will fail to read any VG with lock_type set and report an
error about an unsupported lock_type. Once the lock_type
feature is added, lvm will allow VGs with lock_type to be
used according to the rules imposed by the lock_type.
When both system_id and lock_type settings are removed, a VG
is written with the old WRITE status flag, and without the
new WRITE_LOCKED flag. This allows old versions of lvm to
use the VG as before.
2015-03-04 20:30:53 +03:00
}
2015-03-09 22:18:14 +03:00
lv - > status = lvstatus ;
system_id: make new VGs read-only for old lvm versions
Previous versions of lvm will not obey the restrictions
imposed by the new system_id, and would allow such a VG
to be written. So, a VG with a new system_id is further
changed to force previous lvm versions to treat it as
read-only. This is done by removing the WRITE flag from
the metadata status line of these VGs, and putting a new
WRITE_LOCKED flag in the flags line of the metadata.
Versions of lvm that recognize WRITE_LOCKED, also obey the
new system_id. For these lvm versions, WRITE_LOCKED is
identical to WRITE, and the rules associated with matching
system_id's are imposed.
A new VG lock_type field is also added that causes the same
WRITE/WRITE_LOCKED transformation when set. A previous
version of lvm will also see a VG with lock_type as read-only.
Versions of lvm that recognize WRITE_LOCKED, must also obey
the lock_type setting. Until the lock_type feature is added,
lvm will fail to read any VG with lock_type set and report an
error about an unsupported lock_type. Once the lock_type
feature is added, lvm will allow VGs with lock_type to be
used according to the rules imposed by the lock_type.
When both system_id and lock_type settings are removed, a VG
is written with the old WRITE status flag, and without the
new WRITE_LOCKED flag. This allows old versions of lvm to
use the VG as before.
2015-03-04 20:30:53 +03:00
2012-01-19 19:31:45 +04:00
if ( dm_config_has_node ( lvn , " creation_time " ) ) {
if ( ! _read_uint64 ( lvn , " creation_time " , & timestamp ) ) {
log_error ( " Invalid creation_time for logical volume %s. " ,
2015-11-23 01:03:01 +03:00
display_lvname ( lv ) ) ;
2012-01-19 19:31:45 +04:00
return 0 ;
}
if ( ! dm_config_get_str ( lvn , " creation_host " , & hostname ) ) {
log_error ( " Couldn't read creation_host for logical volume %s. " ,
2015-11-23 01:03:01 +03:00
display_lvname ( lv ) ) ;
2012-01-19 19:31:45 +04:00
return 0 ;
}
} else if ( dm_config_has_node ( lvn , " creation_host " ) ) {
log_error ( " Missing creation_time for logical volume %s. " ,
2015-11-23 01:03:01 +03:00
display_lvname ( lv ) ) ;
2012-01-19 19:31:45 +04:00
return 0 ;
}
2015-07-09 23:14:19 +03:00
/*
* The LV lock_args string is generated in lvmlockd , and the content
* depends on the lock_type .
*
* lock_type dlm does not use LV lock_args , so the LV lock_args field
* is just set to " dlm " .
*
* lock_type sanlock uses the LV lock_args field to save the
* location on disk of that LV ' s sanlock lock . The disk name is
* specified in the VG lock_args . The lock_args string begins
* with a version number , e . g . 1.0 .0 , followed by a colon , followed
* by a number . The number is the offset on disk where sanlock is
* told to find the LV ' s lock .
* e . g . lock_args = 1.0 .0 : 70254592
* means that the lock is located at offset 70254592.
*
* The lvmlockd code for each specific lock manager also validates
* the lock_args before using it to access the lock manager .
*/
2015-03-05 23:00:44 +03:00
if ( dm_config_get_str ( lvn , " lock_args " , & str ) ) {
if ( ! ( lv - > lock_args = dm_pool_strdup ( mem , str ) ) )
return_0 ;
}
2013-06-25 14:28:36 +04:00
if ( dm_config_get_str ( lvn , " allocation_policy " , & str ) ) {
lv - > alloc = get_alloc_from_string ( str ) ;
2010-11-29 21:35:37 +03:00
if ( lv - > alloc = = ALLOC_INVALID ) {
2015-11-23 01:03:01 +03:00
log_warn ( " WARNING: Ignoring unrecognised allocation policy %s for LV %s. " ,
str , display_lvname ( lv ) ) ;
2010-11-29 21:35:37 +03:00
lv - > alloc = ALLOC_INHERIT ;
}
2015-11-23 01:03:01 +03:00
} else
lv - > alloc = ALLOC_INHERIT ;
2002-11-18 17:04:08 +03:00
2013-06-25 14:28:36 +04:00
if ( dm_config_get_str ( lvn , " profile " , & str ) ) {
2015-11-23 01:03:01 +03:00
log_debug_metadata ( " Adding profile configuration %s for LV %s. " ,
str , display_lvname ( lv ) ) ;
2019-09-26 19:50:51 +03:00
if ( ! ( lv - > profile = add_profile ( cmd , str , CONFIG_PROFILE_METADATA ) ) ) {
2015-11-23 01:03:01 +03:00
log_error ( " Failed to add configuration profile %s for LV %s. " ,
str , display_lvname ( lv ) ) ;
2013-06-25 14:28:36 +04:00
return 0 ;
}
}
2002-11-18 17:04:08 +03:00
if ( ! _read_int32 ( lvn , " read_ahead " , & lv - > read_ahead ) )
2007-11-09 19:51:54 +03:00
/* If not present, choice of auto or none is configurable */
2019-09-26 19:50:51 +03:00
lv - > read_ahead = cmd - > default_settings . read_ahead ;
2007-11-09 19:51:54 +03:00
else {
switch ( lv - > read_ahead ) {
case 0 :
lv - > read_ahead = DM_READ_AHEAD_AUTO ;
break ;
2015-11-23 01:03:01 +03:00
case UINT32_C ( - 1 ) :
2007-11-09 19:51:54 +03:00
lv - > read_ahead = DM_READ_AHEAD_NONE ;
break ;
default :
;
}
}
2002-11-18 17:04:08 +03:00
2004-03-08 20:19:15 +03:00
/* Optional tags */
2011-08-31 19:19:19 +04:00
if ( dm_config_get_list ( lvn , " tags " , & cv ) & &
2015-05-06 14:19:21 +03:00
! ( _read_str_list ( mem , & lv - > tags , cv ) ) ) {
2015-11-23 01:03:01 +03:00
log_error ( " Couldn't read tags for logical volume %s. " ,
display_lvname ( lv ) ) ;
2004-03-08 20:19:15 +03:00
return 0 ;
}
2003-04-25 02:23:24 +04:00
2010-03-31 21:20:02 +04:00
if ( ! dm_hash_insert ( lv_hash , lv - > name , lv ) )
return_0 ;
2012-01-19 19:31:45 +04:00
if ( timestamp & & ! lv_set_creation ( lv , hostname , timestamp ) )
return_0 ;
2013-07-05 19:10:47 +04:00
if ( ! lv_is_visible ( lv ) & & strstr ( lv - > name , " _pmspare " ) ) {
if ( vg - > pool_metadata_spare_lv ) {
log_error ( " Couldn't use another pool metadata spare "
2015-11-23 01:03:01 +03:00
" logical volume %s. " , display_lvname ( lv ) ) ;
2013-07-05 19:10:47 +04:00
return 0 ;
}
log_debug_metadata ( " Logical volume %s is pool metadata spare. " ,
2015-11-23 01:03:01 +03:00
display_lvname ( lv ) ) ;
2013-07-05 19:10:47 +04:00
lv - > status | = POOL_METADATA_SPARE ;
vg - > pool_metadata_spare_lv = lv ;
}
2015-03-05 23:00:44 +03:00
if ( ! lv_is_visible ( lv ) & & ! strcmp ( lv - > name , LOCKD_SANLOCK_LV_NAME ) ) {
2015-11-23 01:03:01 +03:00
log_debug_metadata ( " Logical volume %s is sanlock lv. " ,
display_lvname ( lv ) ) ;
2015-03-05 23:00:44 +03:00
lv - > status | = LOCKD_SANLOCK_LV ;
vg - > sanlock_lv = lv ;
}
2012-01-19 19:31:45 +04:00
return 1 ;
2003-04-25 02:23:24 +04:00
}
2019-09-26 19:50:51 +03:00
static int _read_historical_lvnames ( struct cmd_context * cmd ,
struct format_type * fmt ,
struct format_instance * fid __attribute__ ( ( unused ) ) ,
struct dm_pool * mem ,
2019-09-26 19:27:38 +03:00
struct volume_group * vg ,
struct lvmcache_vgsummary * vgsummary ,
const struct dm_config_node * hlvn ,
2016-03-01 17:54:02 +03:00
const struct dm_config_node * vgn __attribute__ ( ( unused ) ) ,
struct dm_hash_table * pv_hash __attribute__ ( ( unused ) ) ,
2018-02-07 00:18:11 +03:00
struct dm_hash_table * lv_hash __attribute__ ( ( unused ) ) )
2016-03-01 17:54:02 +03:00
{
struct generic_logical_volume * glv ;
struct glv_list * glvl ;
const char * str ;
uint64_t timestamp ;
if ( ! ( glv = dm_pool_zalloc ( mem , sizeof ( struct generic_logical_volume ) ) ) | |
! ( glv - > historical = dm_pool_zalloc ( mem , sizeof ( struct historical_logical_volume ) ) ) | |
! ( glvl = dm_pool_zalloc ( mem , sizeof ( struct glv_list ) ) ) ) {
log_error ( " Removed logical volume structure allocation failed " ) ;
goto bad ;
}
glv - > is_historical = 1 ;
glv - > historical - > vg = vg ;
dm_list_init ( & glv - > historical - > indirect_glvs ) ;
if ( ! ( glv - > historical - > name = dm_pool_strdup ( mem , hlvn - > key ) ) )
goto_bad ;
if ( ! ( hlvn = hlvn - > child ) ) {
log_error ( " Empty removed logical volume section. " ) ;
2016-11-25 15:46:06 +03:00
goto bad ;
2016-03-01 17:54:02 +03:00
}
if ( ! _read_id ( & glv - > historical - > lvid . id [ 1 ] , hlvn , " id " ) ) {
log_error ( " Couldn't read uuid for removed logical volume %s in vg %s. " ,
glv - > historical - > name , vg - > name ) ;
return 0 ;
}
memcpy ( & glv - > historical - > lvid . id [ 0 ] , & glv - > historical - > vg - > id , sizeof ( glv - > historical - > lvid . id [ 0 ] ) ) ;
if ( dm_config_get_str ( hlvn , " name " , & str ) ) {
if ( ! ( glv - > historical - > name = dm_pool_strdup ( mem , str ) ) )
goto_bad ;
}
if ( dm_config_has_node ( hlvn , " creation_time " ) ) {
if ( ! _read_uint64 ( hlvn , " creation_time " , & timestamp ) ) {
log_error ( " Invalid creation_time for removed logical volume %s. " , str ) ;
goto bad ;
}
glv - > historical - > timestamp = timestamp ;
}
if ( dm_config_has_node ( hlvn , " removal_time " ) ) {
if ( ! _read_uint64 ( hlvn , " removal_time " , & timestamp ) ) {
log_error ( " Invalid removal_time for removed logical volume %s. " , str ) ;
goto bad ;
}
glv - > historical - > timestamp_removed = timestamp ;
}
glvl - > glv = glv ;
dm_list_add ( & vg - > historical_lvs , & glvl - > list ) ;
return 1 ;
bad :
if ( glv )
dm_pool_free ( mem , glv ) ;
return 0 ;
}
2019-09-26 19:50:51 +03:00
static int _read_historical_lvnames_interconnections ( struct cmd_context * cmd ,
struct format_type * fmt ,
struct format_instance * fid __attribute__ ( ( unused ) ) ,
struct dm_pool * mem ,
2019-09-26 19:27:38 +03:00
struct volume_group * vg ,
struct lvmcache_vgsummary * vgsummary ,
const struct dm_config_node * hlvn ,
2016-03-01 17:22:28 +03:00
const struct dm_config_node * vgn __attribute__ ( ( unused ) ) ,
struct dm_hash_table * pv_hash __attribute__ ( ( unused ) ) ,
2018-02-07 00:18:11 +03:00
struct dm_hash_table * lv_hash __attribute__ ( ( unused ) ) )
2016-03-01 17:22:28 +03:00
{
const char * historical_lv_name , * origin_name = NULL ;
struct generic_logical_volume * glv , * origin_glv , * descendant_glv ;
struct logical_volume * tmp_lv ;
struct glv_list * glvl = NULL ;
const struct dm_config_value * descendants = NULL ;
historical_lv_name = hlvn - > key ;
hlvn = hlvn - > child ;
2016-03-01 17:26:57 +03:00
if ( ! ( glv = find_historical_glv ( vg , historical_lv_name , 0 , NULL ) ) ) {
2016-03-01 17:22:28 +03:00
log_error ( " Unknown historical logical volume %s/%s%s " ,
vg - > name , HISTORICAL_LV_PREFIX , historical_lv_name ) ;
goto bad ;
}
if ( dm_config_has_node ( hlvn , " origin " ) ) {
if ( ! dm_config_get_str ( hlvn , " origin " , & origin_name ) ) {
log_error ( " Couldn't read origin for historical logical "
" volume %s/%s%s " , vg - > name , HISTORICAL_LV_PREFIX , historical_lv_name ) ;
goto bad ;
}
}
if ( dm_config_has_node ( hlvn , " descendants " ) ) {
if ( ! dm_config_get_list ( hlvn , " descendants " , & descendants ) ) {
log_error ( " Couldn't get descendants list for historical logical "
" volume %s/%s%s " , vg - > name , HISTORICAL_LV_PREFIX , historical_lv_name ) ;
goto bad ;
}
if ( descendants - > type = = DM_CFG_EMPTY_ARRAY ) {
log_error ( " Found empty descendants list for historical logical "
" volume %s/%s%s " , vg - > name , HISTORICAL_LV_PREFIX , historical_lv_name ) ;
goto bad ;
}
}
if ( ! origin_name & & ! descendants )
/* no interconnections */
return 1 ;
if ( origin_name ) {
if ( ! ( glvl = dm_pool_zalloc ( mem , sizeof ( struct glv_list ) ) ) ) {
log_error ( " Failed to allocate list item for historical logical "
" volume %s/%s%s " , vg - > name , HISTORICAL_LV_PREFIX , historical_lv_name ) ;
goto bad ;
}
glvl - > glv = glv ;
if ( ! strncmp ( origin_name , HISTORICAL_LV_PREFIX , strlen ( HISTORICAL_LV_PREFIX ) ) ) {
2016-03-01 17:26:57 +03:00
if ( ! ( origin_glv = find_historical_glv ( vg , origin_name + strlen ( HISTORICAL_LV_PREFIX ) , 0 , NULL ) ) ) {
2016-03-01 17:22:28 +03:00
log_error ( " Unknown origin %s for historical logical volume %s/%s%s " ,
origin_name , vg - > name , HISTORICAL_LV_PREFIX , historical_lv_name ) ;
goto bad ;
}
} else {
if ( ! ( tmp_lv = find_lv ( vg , origin_name ) ) ) {
log_error ( " Unknown origin %s for historical logical volume %s/%s%s " ,
origin_name , vg - > name , HISTORICAL_LV_PREFIX , historical_lv_name ) ;
goto bad ;
}
if ( ! ( origin_glv = get_or_create_glv ( mem , tmp_lv , NULL ) ) )
goto bad ;
}
glv - > historical - > indirect_origin = origin_glv ;
if ( origin_glv - > is_historical )
dm_list_add ( & origin_glv - > historical - > indirect_glvs , & glvl - > list ) ;
else
dm_list_add ( & origin_glv - > live - > indirect_glvs , & glvl - > list ) ;
}
if ( descendants ) {
do {
if ( descendants - > type ! = DM_CFG_STRING ) {
log_error ( " Descendant value for historical logical volume %s/%s%s "
" is not a string. " , vg - > name , HISTORICAL_LV_PREFIX , historical_lv_name ) ;
goto bad ;
}
if ( ! ( tmp_lv = find_lv ( vg , descendants - > v . str ) ) ) {
log_error ( " Failed to find descendant %s for historical LV %s. " ,
descendants - > v . str , historical_lv_name ) ;
goto bad ;
}
if ( ! ( descendant_glv = get_or_create_glv ( mem , tmp_lv , NULL ) ) )
goto bad ;
if ( ! add_glv_to_indirect_glvs ( mem , glv , descendant_glv ) )
goto bad ;
descendants = descendants - > next ;
} while ( descendants ) ;
}
return 1 ;
bad :
if ( glvl )
dm_pool_free ( mem , glvl ) ;
return 0 ;
}
2019-09-26 19:50:51 +03:00
static int _read_lvsegs ( struct cmd_context * cmd ,
struct format_type * fmt ,
struct format_instance * fid ,
struct dm_pool * mem ,
2019-09-26 19:27:38 +03:00
struct volume_group * vg ,
struct lvmcache_vgsummary * vgsummary ,
const struct dm_config_node * lvn ,
2011-08-30 18:55:15 +04:00
const struct dm_config_node * vgn __attribute__ ( ( unused ) ) ,
2010-03-16 20:30:00 +03:00
struct dm_hash_table * pv_hash ,
2018-02-07 00:18:11 +03:00
struct dm_hash_table * lv_hash )
2003-04-25 02:23:24 +04:00
{
struct logical_volume * lv ;
2010-03-31 21:20:02 +04:00
if ( ! ( lv = dm_hash_lookup ( lv_hash , lvn - > key ) ) ) {
2003-04-25 02:23:24 +04:00
log_error ( " Lost logical volume reference %s " , lvn - > key ) ;
return 0 ;
}
if ( ! ( lvn = lvn - > child ) ) {
log_error ( " Empty logical volume section. " ) ;
return 0 ;
}
/* FIXME: read full lvid */
if ( ! _read_id ( & lv - > lvid . id [ 1 ] , lvn , " id " ) ) {
log_error ( " Couldn't read uuid for logical volume %s. " ,
2015-11-23 01:03:01 +03:00
display_lvname ( lv ) ) ;
2003-04-25 02:23:24 +04:00
return 0 ;
}
memcpy ( & lv - > lvid . id [ 0 ] , & lv - > vg - > id , sizeof ( lv - > lvid . id [ 0 ] ) ) ;
2024-09-10 19:51:15 +03:00
if ( ! _read_segments ( cmd , fmt , fid , mem , lv , lvn , pv_hash , lv_hash ) )
2008-01-30 16:19:47 +03:00
return_0 ;
2003-04-25 02:23:24 +04:00
2002-11-18 17:04:08 +03:00
lv - > size = ( uint64_t ) lv - > le_count * ( uint64_t ) vg - > extent_size ;
2005-04-07 16:39:44 +04:00
lv - > minor = - 1 ;
lv - > major = - 1 ;
2014-09-19 16:51:41 +04:00
if ( lv - > status & FIXED_MINOR ) {
if ( ! _read_int32 ( lvn , " minor " , & lv - > minor ) ) {
2015-11-23 01:03:01 +03:00
log_error ( " Couldn't read minor number for logical volume %s. " ,
display_lvname ( lv ) ) ;
2014-09-19 16:51:41 +04:00
return 0 ;
}
2014-09-19 19:05:35 +04:00
if ( ! dm_config_has_node ( lvn , " major " ) )
/* If major is missing, pick default */
2019-09-26 19:50:51 +03:00
lv - > major = cmd - > dev_types - > device_mapper_major ;
2014-09-19 19:05:35 +04:00
else if ( ! _read_int32 ( lvn , " major " , & lv - > major ) ) {
log_warn ( " WARNING: Couldn't read major number for logical "
2015-11-23 01:03:01 +03:00
" volume %s. " , display_lvname ( lv ) ) ;
2019-09-26 19:50:51 +03:00
lv - > major = cmd - > dev_types - > device_mapper_major ;
2014-09-19 16:51:41 +04:00
}
2019-09-26 19:50:51 +03:00
if ( ! validate_major_minor ( cmd , fmt , lv - > major , lv - > minor ) ) {
2014-09-19 19:05:35 +04:00
log_warn ( " WARNING: Ignoring invalid major, minor number for "
2015-11-23 01:03:01 +03:00
" logical volume %s. " , display_lvname ( lv ) ) ;
2014-09-19 19:05:35 +04:00
lv - > major = lv - > minor = - 1 ;
2014-09-19 16:51:41 +04:00
}
2002-11-18 17:04:08 +03:00
}
return 1 ;
}
2019-09-26 19:50:51 +03:00
static int _read_sections ( struct cmd_context * cmd ,
2019-10-29 22:31:37 +03:00
const struct format_type * fmt ,
2019-09-26 19:50:51 +03:00
struct format_instance * fid ,
struct dm_pool * mem ,
2002-11-18 17:04:08 +03:00
const char * section , section_fn fn ,
2019-09-26 19:27:38 +03:00
struct volume_group * vg ,
struct lvmcache_vgsummary * vgsummary ,
const struct dm_config_node * vgn ,
2010-03-31 21:20:02 +04:00
struct dm_hash_table * pv_hash ,
struct dm_hash_table * lv_hash ,
2018-02-07 00:18:11 +03:00
int optional )
2002-11-18 17:04:08 +03:00
{
2011-08-30 18:55:15 +04:00
const struct dm_config_node * n ;
2002-11-18 17:04:08 +03:00
2011-08-31 19:19:19 +04:00
if ( ! dm_config_get_section ( vgn , section , & n ) ) {
2002-11-18 17:04:08 +03:00
if ( ! optional ) {
log_error ( " Couldn't find section '%s'. " , section ) ;
return 0 ;
}
return 1 ;
}
for ( n = n - > child ; n ; n = n - > sib ) {
2019-10-29 22:31:37 +03:00
if ( ! fn ( cmd , ( struct format_type * ) fmt , fid , mem , vg , vgsummary , n , vgn , pv_hash , lv_hash ) )
2008-01-30 16:19:47 +03:00
return_0 ;
2002-11-18 17:04:08 +03:00
}
return 1 ;
}
2019-10-29 22:31:37 +03:00
static struct volume_group * _read_vg ( struct cmd_context * cmd ,
const struct format_type * fmt ,
struct format_instance * fid ,
const struct dm_config_tree * cft )
2002-11-18 17:04:08 +03:00
{
2019-09-26 19:50:51 +03:00
struct dm_pool * mem ;
2011-08-31 19:19:19 +04:00
const struct dm_config_node * vgn ;
const struct dm_config_value * cv ;
2015-03-09 22:33:27 +03:00
const char * str , * format_str , * system_id ;
2002-11-18 17:04:08 +03:00
struct volume_group * vg ;
2010-03-31 21:20:02 +04:00
struct dm_hash_table * pv_hash = NULL , * lv_hash = NULL ;
2015-03-09 22:18:14 +03:00
uint64_t vgstatus ;
2009-04-10 13:59:18 +04:00
2002-11-18 17:04:08 +03:00
/* skip any top-level values */
2010-11-29 21:35:37 +03:00
for ( vgn = cft - > root ; ( vgn & & vgn - > v ) ; vgn = vgn - > sib )
;
2002-11-18 17:04:08 +03:00
if ( ! vgn ) {
log_error ( " Couldn't find volume group in file. " ) ;
2011-03-10 15:43:29 +03:00
return NULL ;
2002-11-18 17:04:08 +03:00
}
2019-09-26 19:50:51 +03:00
if ( ! ( vg = alloc_vg ( " read_vg " , cmd , vgn - > key ) ) )
2011-03-10 15:43:29 +03:00
return_NULL ;
2002-11-18 17:04:08 +03:00
2019-09-26 19:50:51 +03:00
mem = vg - > vgmem ;
2012-02-27 15:40:58 +04:00
/*
2024-08-30 00:05:41 +03:00
* The pv hash memorizes the pv section names - > pv
2012-02-27 15:40:58 +04:00
* structures .
*/
2021-03-07 17:33:50 +03:00
if ( ! ( pv_hash = dm_hash_create ( 59 ) ) ) {
2012-02-27 15:40:58 +04:00
log_error ( " Couldn't create pv hash table. " ) ;
goto bad ;
}
/*
2024-08-30 00:05:41 +03:00
* The lv hash memorizes the lv section names - > lv
2012-02-27 15:40:58 +04:00
* structures .
*/
2021-03-07 17:33:50 +03:00
if ( ! ( lv_hash = dm_hash_create ( 1023 ) ) ) {
2012-02-27 15:40:58 +04:00
log_error ( " Couldn't create lv hash table. " ) ;
goto bad ;
}
2002-11-18 17:04:08 +03:00
vgn = vgn - > child ;
2015-03-04 03:30:26 +03:00
/* A backup file might be a backup of a different format */
if ( dm_config_get_str ( vgn , " format " , & format_str ) & &
2019-09-26 19:50:51 +03:00
! ( vg - > original_fmt = get_format_by_name ( cmd , format_str ) ) ) {
2015-03-04 03:30:26 +03:00
log_error ( " Unrecognised format %s for volume group %s. " , format_str , vg - > name ) ;
goto bad ;
}
system_id: make new VGs read-only for old lvm versions
Previous versions of lvm will not obey the restrictions
imposed by the new system_id, and would allow such a VG
to be written. So, a VG with a new system_id is further
changed to force previous lvm versions to treat it as
read-only. This is done by removing the WRITE flag from
the metadata status line of these VGs, and putting a new
WRITE_LOCKED flag in the flags line of the metadata.
Versions of lvm that recognize WRITE_LOCKED, also obey the
new system_id. For these lvm versions, WRITE_LOCKED is
identical to WRITE, and the rules associated with matching
system_id's are imposed.
A new VG lock_type field is also added that causes the same
WRITE/WRITE_LOCKED transformation when set. A previous
version of lvm will also see a VG with lock_type as read-only.
Versions of lvm that recognize WRITE_LOCKED, must also obey
the lock_type setting. Until the lock_type feature is added,
lvm will fail to read any VG with lock_type set and report an
error about an unsupported lock_type. Once the lock_type
feature is added, lvm will allow VGs with lock_type to be
used according to the rules imposed by the lock_type.
When both system_id and lock_type settings are removed, a VG
is written with the old WRITE status flag, and without the
new WRITE_LOCKED flag. This allows old versions of lvm to
use the VG as before.
2015-03-04 20:30:53 +03:00
if ( dm_config_get_str ( vgn , " lock_type " , & str ) ) {
2019-09-26 19:50:51 +03:00
if ( ! ( vg - > lock_type = dm_pool_strdup ( mem , str ) ) )
system_id: make new VGs read-only for old lvm versions
Previous versions of lvm will not obey the restrictions
imposed by the new system_id, and would allow such a VG
to be written. So, a VG with a new system_id is further
changed to force previous lvm versions to treat it as
read-only. This is done by removing the WRITE flag from
the metadata status line of these VGs, and putting a new
WRITE_LOCKED flag in the flags line of the metadata.
Versions of lvm that recognize WRITE_LOCKED, also obey the
new system_id. For these lvm versions, WRITE_LOCKED is
identical to WRITE, and the rules associated with matching
system_id's are imposed.
A new VG lock_type field is also added that causes the same
WRITE/WRITE_LOCKED transformation when set. A previous
version of lvm will also see a VG with lock_type as read-only.
Versions of lvm that recognize WRITE_LOCKED, must also obey
the lock_type setting. Until the lock_type feature is added,
lvm will fail to read any VG with lock_type set and report an
error about an unsupported lock_type. Once the lock_type
feature is added, lvm will allow VGs with lock_type to be
used according to the rules imposed by the lock_type.
When both system_id and lock_type settings are removed, a VG
is written with the old WRITE status flag, and without the
new WRITE_LOCKED flag. This allows old versions of lvm to
use the VG as before.
2015-03-04 20:30:53 +03:00
goto bad ;
}
2015-07-09 23:14:19 +03:00
/*
* The VG lock_args string is generated in lvmlockd , and the content
* depends on the lock_type . lvmlockd begins the lock_args string
* with a version number , e . g . 1.0 .0 , followed by a colon , followed
* by a string that depends on the lock manager . The string after
* the colon is information needed to use the lock manager for the VG .
*
* For sanlock , the string is the name of the internal LV used to store
* sanlock locks . lvmlockd needs to know where the locks are located
* so it can pass that location to sanlock which needs to access the locks .
* e . g . lock_args = 1.0 .0 : lvmlock
* means that the locks are located on the the LV " lvmlock " .
*
* For dlm , the string is the dlm cluster name . lvmlockd needs to use
* a dlm lockspace in this cluster to use the VG .
* e . g . lock_args = 1.0 .0 : foo
* means that the host needs to be a member of the cluster " foo " .
*
* The lvmlockd code for each specific lock manager also validates
* the lock_args before using it to access the lock manager .
*/
2015-03-05 23:00:44 +03:00
if ( dm_config_get_str ( vgn , " lock_args " , & str ) ) {
2019-09-26 19:50:51 +03:00
if ( ! ( vg - > lock_args = dm_pool_strdup ( mem , str ) ) )
2015-03-05 23:00:44 +03:00
goto bad ;
}
2002-11-18 17:04:08 +03:00
if ( ! _read_id ( & vg - > id , vgn , " id " ) ) {
log_error ( " Couldn't read uuid for volume group %s. " , vg - > name ) ;
goto bad ;
}
if ( ! _read_int32 ( vgn , " seqno " , & vg - > seqno ) ) {
log_error ( " Couldn't read 'seqno' for volume group %s. " ,
vg - > name ) ;
goto bad ;
}
2015-03-09 22:18:14 +03:00
if ( ! _read_flag_config ( vgn , & vgstatus , VG_FLAGS ) ) {
2008-07-10 15:30:57 +04:00
log_error ( " Error reading flags of volume group %s. " ,
2002-11-18 17:04:08 +03:00
vg - > name ) ;
goto bad ;
}
2015-03-09 22:33:27 +03:00
if ( dm_config_get_str ( vgn , " system_id " , & system_id ) ) {
2019-09-26 19:50:51 +03:00
if ( ! ( vg - > system_id = dm_pool_strdup ( mem , system_id ) ) ) {
2015-03-09 22:33:27 +03:00
log_error ( " Failed to allocate memory for system_id in _read_vg. " ) ;
goto bad ;
}
2015-03-09 21:27:34 +03:00
}
2015-03-09 22:18:14 +03:00
if ( vgstatus & LVM_WRITE_LOCKED ) {
vgstatus | = LVM_WRITE ;
vgstatus & = ~ LVM_WRITE_LOCKED ;
system_id: make new VGs read-only for old lvm versions
Previous versions of lvm will not obey the restrictions
imposed by the new system_id, and would allow such a VG
to be written. So, a VG with a new system_id is further
changed to force previous lvm versions to treat it as
read-only. This is done by removing the WRITE flag from
the metadata status line of these VGs, and putting a new
WRITE_LOCKED flag in the flags line of the metadata.
Versions of lvm that recognize WRITE_LOCKED, also obey the
new system_id. For these lvm versions, WRITE_LOCKED is
identical to WRITE, and the rules associated with matching
system_id's are imposed.
A new VG lock_type field is also added that causes the same
WRITE/WRITE_LOCKED transformation when set. A previous
version of lvm will also see a VG with lock_type as read-only.
Versions of lvm that recognize WRITE_LOCKED, must also obey
the lock_type setting. Until the lock_type feature is added,
lvm will fail to read any VG with lock_type set and report an
error about an unsupported lock_type. Once the lock_type
feature is added, lvm will allow VGs with lock_type to be
used according to the rules imposed by the lock_type.
When both system_id and lock_type settings are removed, a VG
is written with the old WRITE status flag, and without the
new WRITE_LOCKED flag. This allows old versions of lvm to
use the VG as before.
2015-03-04 20:30:53 +03:00
}
2015-03-09 22:18:14 +03:00
vg - > status = vgstatus ;
system_id: make new VGs read-only for old lvm versions
Previous versions of lvm will not obey the restrictions
imposed by the new system_id, and would allow such a VG
to be written. So, a VG with a new system_id is further
changed to force previous lvm versions to treat it as
read-only. This is done by removing the WRITE flag from
the metadata status line of these VGs, and putting a new
WRITE_LOCKED flag in the flags line of the metadata.
Versions of lvm that recognize WRITE_LOCKED, also obey the
new system_id. For these lvm versions, WRITE_LOCKED is
identical to WRITE, and the rules associated with matching
system_id's are imposed.
A new VG lock_type field is also added that causes the same
WRITE/WRITE_LOCKED transformation when set. A previous
version of lvm will also see a VG with lock_type as read-only.
Versions of lvm that recognize WRITE_LOCKED, must also obey
the lock_type setting. Until the lock_type feature is added,
lvm will fail to read any VG with lock_type set and report an
error about an unsupported lock_type. Once the lock_type
feature is added, lvm will allow VGs with lock_type to be
used according to the rules imposed by the lock_type.
When both system_id and lock_type settings are removed, a VG
is written with the old WRITE status flag, and without the
new WRITE_LOCKED flag. This allows old versions of lvm to
use the VG as before.
2015-03-04 20:30:53 +03:00
2002-11-18 17:04:08 +03:00
if ( ! _read_int32 ( vgn , " extent_size " , & vg - > extent_size ) ) {
log_error ( " Couldn't read extent size for volume group %s. " ,
vg - > name ) ;
goto bad ;
}
/*
* ' extent_count ' and ' free_count ' get filled in
* implicitly when reading in the pv ' s and lv ' s .
*/
if ( ! _read_int32 ( vgn , " max_lv " , & vg - > max_lv ) ) {
log_error ( " Couldn't read 'max_lv' for volume group %s. " ,
vg - > name ) ;
goto bad ;
}
if ( ! _read_int32 ( vgn , " max_pv " , & vg - > max_pv ) ) {
log_error ( " Couldn't read 'max_pv' for volume group %s. " ,
vg - > name ) ;
goto bad ;
}
2011-08-31 19:19:19 +04:00
if ( dm_config_get_str ( vgn , " allocation_policy " , & str ) ) {
vg - > alloc = get_alloc_from_string ( str ) ;
2010-11-29 21:35:37 +03:00
if ( vg - > alloc = = ALLOC_INVALID ) {
2011-08-31 19:19:19 +04:00
log_warn ( " WARNING: Ignoring unrecognised allocation policy %s for VG %s " , str , vg - > name ) ;
2010-11-29 21:35:37 +03:00
vg - > alloc = ALLOC_NORMAL ;
}
2004-05-19 02:12:53 +04:00
}
2013-06-25 14:28:36 +04:00
if ( dm_config_get_str ( vgn , " profile " , & str ) ) {
log_debug_metadata ( " Adding profile configuration %s for VG %s. " , str , vg - > name ) ;
2019-09-26 19:50:51 +03:00
vg - > profile = add_profile ( cmd , str , CONFIG_PROFILE_METADATA ) ;
2013-06-25 14:28:36 +04:00
if ( ! vg - > profile ) {
log_error ( " Failed to add configuration profile %s for VG %s " , str , vg - > name ) ;
goto bad ;
}
}
2010-06-29 00:37:10 +04:00
if ( ! _read_uint32 ( vgn , " metadata_copies " , & vg - > mda_copies ) ) {
vg - > mda_copies = DEFAULT_VGMETADATACOPIES ;
}
2019-09-26 19:27:38 +03:00
if ( ! _read_sections ( cmd , fmt , fid , mem , " physical_volumes " , _read_pv , vg , NULL ,
2018-02-07 00:18:11 +03:00
vgn , pv_hash , lv_hash , 0 ) ) {
2002-11-18 17:04:08 +03:00
log_error ( " Couldn't find all physical volumes for volume "
" group %s. " , vg - > name ) ;
goto bad ;
}
2004-03-08 20:19:15 +03:00
/* Optional tags */
2011-08-31 19:19:19 +04:00
if ( dm_config_get_list ( vgn , " tags " , & cv ) & &
2019-09-26 19:50:51 +03:00
! ( _read_str_list ( mem , & vg - > tags , cv ) ) ) {
2004-03-08 20:19:15 +03:00
log_error ( " Couldn't read tags for volume group %s. " , vg - > name ) ;
goto bad ;
}
2003-04-25 02:23:24 +04:00
2019-09-26 19:27:38 +03:00
if ( ! _read_sections ( cmd , fmt , fid , mem , " logical_volumes " , _read_lvnames , vg , NULL ,
2018-02-07 00:18:11 +03:00
vgn , pv_hash , lv_hash , 1 ) ) {
2003-04-25 02:23:24 +04:00
log_error ( " Couldn't read all logical volume names for volume "
2002-11-18 17:04:08 +03:00
" group %s. " , vg - > name ) ;
goto bad ;
}
2019-09-26 19:27:38 +03:00
if ( ! _read_sections ( cmd , fmt , fid , mem , " historical_logical_volumes " , _read_historical_lvnames , vg , NULL ,
2018-02-07 00:18:11 +03:00
vgn , pv_hash , lv_hash , 1 ) ) {
2016-03-01 17:54:02 +03:00
log_error ( " Couldn't read all historical logical volumes for volume "
" group %s. " , vg - > name ) ;
goto bad ;
}
2019-09-26 19:27:38 +03:00
if ( ! _read_sections ( cmd , fmt , fid , mem , " logical_volumes " , _read_lvsegs , vg , NULL ,
2018-02-07 00:18:11 +03:00
vgn , pv_hash , lv_hash , 1 ) ) {
2003-04-25 02:23:24 +04:00
log_error ( " Couldn't read all logical volumes for "
" volume group %s. " , vg - > name ) ;
goto bad ;
}
2019-09-26 19:50:51 +03:00
if ( ! _read_sections ( cmd , fmt , fid , mem , " historical_logical_volumes " , _read_historical_lvnames_interconnections ,
2019-09-26 19:27:38 +03:00
vg , NULL , vgn , pv_hash , lv_hash , 1 ) ) {
2016-03-01 17:22:28 +03:00
log_error ( " Couldn't read all removed logical volume interconnections "
" for volume group %s. " , vg - > name ) ;
goto bad ;
}
2024-10-19 20:37:25 +03:00
if ( vg - > fixup_imported_mirrors & &
! fixup_imported_mirrors ( vg ) ) {
2005-10-28 01:51:28 +04:00
log_error ( " Failed to fixup mirror pointers after import for "
" volume group %s. " , vg - > name ) ;
goto bad ;
}
2005-10-17 03:03:59 +04:00
dm_hash_destroy ( pv_hash ) ;
2010-03-31 21:20:02 +04:00
dm_hash_destroy ( lv_hash ) ;
2002-11-18 17:04:08 +03:00
2019-10-29 22:31:37 +03:00
if ( fid )
vg_set_fid ( vg , fid ) ;
2011-03-11 17:50:13 +03:00
2002-11-18 17:04:08 +03:00
/*
* Finished .
*/
return vg ;
bad :
if ( pv_hash )
2005-10-17 03:03:59 +04:00
dm_hash_destroy ( pv_hash ) ;
2002-11-18 17:04:08 +03:00
2010-03-31 21:20:02 +04:00
if ( lv_hash )
dm_hash_destroy ( lv_hash ) ;
2011-08-11 00:25:29 +04:00
release_vg ( vg ) ;
2002-11-18 17:04:08 +03:00
return NULL ;
}
2005-10-17 03:03:59 +04:00
static void _read_desc ( struct dm_pool * mem ,
2011-08-30 18:55:15 +04:00
const struct dm_config_tree * cft , time_t * when , char * * desc )
2002-11-18 17:04:08 +03:00
{
2015-12-03 19:51:43 +03:00
const char * str ;
2002-11-18 17:04:08 +03:00
unsigned int u = 0u ;
2015-12-03 19:51:43 +03:00
if ( ! dm_config_get_str ( cft - > root , " description " , & str ) )
str = " " ;
* desc = dm_pool_strdup ( mem , str ) ;
2002-11-18 17:04:08 +03:00
2011-08-30 18:55:15 +04:00
( void ) dm_config_get_uint32 ( cft - > root , " creation_time " , & u ) ;
2002-11-18 17:04:08 +03:00
* when = u ;
}
2015-07-24 23:20:37 +03:00
/*
* It is used to read vgsummary information about a VG
* before locking and reading the VG via vg_read ( ) .
2016-02-09 22:32:26 +03:00
* read_vgsummary : read VG metadata before VG is locked
* and save the data in struct vgsummary
* read_vg : read VG metadata after VG is locked
* and save the data in struct volume_group
* FIXME : why are these separate ?
2015-07-24 23:20:37 +03:00
*/
2016-02-09 22:32:26 +03:00
static int _read_vgsummary ( const struct format_type * fmt , const struct dm_config_tree * cft ,
struct lvmcache_vgsummary * vgsummary )
2006-04-11 17:55:59 +04:00
{
2011-08-30 18:55:15 +04:00
const struct dm_config_node * vgn ;
2006-04-11 17:55:59 +04:00
struct dm_pool * mem = fmt - > cmd - > mem ;
2015-11-30 20:32:17 +03:00
const char * str ;
2021-08-03 23:32:33 +03:00
struct id id ;
2006-04-13 21:32:24 +04:00
2015-12-03 19:51:43 +03:00
if ( ! dm_config_get_str ( cft - > root , " creation_host " , & str ) )
str = " " ;
if ( ! ( vgsummary - > creation_host = dm_pool_strdup ( mem , str ) ) )
return_0 ;
2006-04-11 17:55:59 +04:00
/* skip any top-level values */
for ( vgn = cft - > root ; ( vgn & & vgn - > v ) ; vgn = vgn - > sib ) ;
if ( ! vgn ) {
log_error ( " Couldn't find volume group in file. " ) ;
return 0 ;
}
2015-03-19 02:43:02 +03:00
if ( ! ( vgsummary - > vgname = dm_pool_strdup ( mem , vgn - > key ) ) )
2006-04-11 17:55:59 +04:00
return_0 ;
vgn = vgn - > child ;
2021-08-03 23:32:33 +03:00
if ( ! _read_id ( & id , vgn , " id " ) ) {
2015-03-19 02:43:02 +03:00
log_error ( " Couldn't read uuid for volume group %s. " , vgsummary - > vgname ) ;
2006-04-11 17:55:59 +04:00
return 0 ;
}
2021-08-03 23:32:33 +03:00
memcpy ( vgsummary - > vgid , & id , ID_LEN ) ;
2015-03-19 02:43:02 +03:00
if ( ! _read_flag_config ( vgn , & vgsummary - > vgstatus , VG_FLAGS ) ) {
2006-04-11 21:42:15 +04:00
log_error ( " Couldn't find status flags for volume group %s. " ,
2015-03-19 02:43:02 +03:00
vgsummary - > vgname ) ;
2006-04-11 21:42:15 +04:00
return 0 ;
}
2015-12-03 19:51:43 +03:00
if ( dm_config_get_str ( vgn , " system_id " , & str ) & &
( ! ( vgsummary - > system_id = dm_pool_strdup ( mem , str ) ) ) )
return_0 ;
2015-11-30 20:32:17 +03:00
2015-12-03 19:51:43 +03:00
if ( dm_config_get_str ( vgn , " lock_type " , & str ) & &
( ! ( vgsummary - > lock_type = dm_pool_strdup ( mem , str ) ) ) )
return_0 ;
2015-07-24 23:20:37 +03:00
2018-04-19 00:29:42 +03:00
if ( ! _read_int32 ( vgn , " seqno " , & vgsummary - > seqno ) ) {
log_error ( " Couldn't read seqno for volume group %s. " ,
vgsummary - > vgname ) ;
return 0 ;
}
2019-09-26 19:27:38 +03:00
if ( ! _read_sections ( fmt - > cmd , NULL , NULL , mem , " physical_volumes " , _read_pvsummary , NULL , vgsummary ,
vgn , NULL , NULL , 0 ) ) {
log_debug ( " Couldn't read pv summaries " ) ;
}
2015-03-19 02:43:02 +03:00
return 1 ;
2006-04-11 17:55:59 +04:00
}
2024-05-03 15:43:22 +03:00
static const struct text_vg_version_ops _vsn1_ops = {
2011-03-27 17:44:08 +04:00
. check_version = _vsn1_check_version ,
2006-05-10 01:23:51 +04:00
. read_vg = _read_vg ,
. read_desc = _read_desc ,
2016-02-09 22:32:26 +03:00
. read_vgsummary = _read_vgsummary
2002-11-18 17:04:08 +03:00
} ;
2024-05-03 15:43:22 +03:00
const struct text_vg_version_ops * text_vg_vsn1_init ( void )
2002-11-18 17:04:08 +03:00
{
return & _vsn1_ops ;
2006-05-10 01:23:51 +04:00
}