1
0
mirror of git://sourceware.org/git/lvm2.git synced 2024-12-21 13:34:40 +03:00
lvm2/lib/format_text/import_vsn1.c

1373 lines
37 KiB
C
Raw Normal View History

2002-11-18 17:04:08 +03:00
/*
2008-01-30 17:00:02 +03:00
* Copyright (C) 2001-2004 Sistina Software, Inc. All rights reserved.
lvconvert: add infrastructure for RaidLV reshaping support In order to support striped raid5/6/10 LV reshaping (change of LV type, stripesize or number of legs), this patch introduces infrastructure prerequisites to be used by raid_manip.c extensions in followup patches. This base is needed for allocation of out-of-place reshape space required by the MD raid personalities to avoid writing over data in-place when reading off the current RAID layout or number of legs and writing out the new layout or to a different number of legs (i.e. restripe) Changes: - add members reshape_len to 'struct lv_segment' to store out-of-place reshape length per component rimage - add member data_copies to struct lv_segment to support more than 2 raid10 data copies - make alloc_lv_segment() aware of both reshape_len and data_copies - adjust all alloc_lv_segment() callers to the new API - add functions to retrieve the current data offset (needed for out-of-place reshaping space allocation) and the devices count from the kernel - make libdm deptree code aware of reshape_len - add LV flags for disk add/remove reshaping - support import/export of the new 'struct lv_segment' members - enhance lv_extend/_lv_reduce to cope with reshape_len - add seg_is_*/segtype_is_* macros related to reshaping - add target version check for reshaping - grow rebuilds/writemostly bitmaps to 246 bit to support kernel maximal - enhance libdm deptree code to support data_offset (out-of-place reshaping) and delta_disk (legs add/remove reshaping) target arguments Related: rhbz834579 Related: rhbz1191935 Related: rhbz1191978
2017-02-24 02:50:00 +03:00
* Copyright (C) 2004-2017 Red Hat, Inc. All rights reserved.
2002-11-18 17:04:08 +03:00
*
2004-03-30 23:35:44 +04:00
* This file is part of LVM2.
*
* This copyrighted material is made available to anyone wishing to use,
* modify, copy, or redistribute it subject to the terms and conditions
* of the GNU Lesser General Public License v.2.1.
2004-03-30 23:35:44 +04:00
*
* You should have received a copy of the GNU Lesser General Public License
2004-03-30 23:35:44 +04:00
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
2002-11-18 17:04:08 +03:00
*/
#include "lib/misc/lib.h"
#include "lib/metadata/metadata.h"
2002-11-18 17:04:08 +03:00
#include "import-export.h"
#include "lib/display/display.h"
#include "lib/commands/toolcontext.h"
#include "lib/cache/lvmcache.h"
#include "lib/locking/lvmlockd.h"
#include "lib/metadata/lv_alloc.h"
#include "lib/metadata/pv_alloc.h"
#include "lib/metadata/segtype.h"
#include "lib/format_text/text_import.h"
#include "lib/config/defaults.h"
#include "lib/datastruct/str_list.h"
2002-11-18 17:04:08 +03:00
typedef int (*section_fn) (struct cmd_context *cmd,
struct format_type *fmt,
struct format_instance *fid,
struct dm_pool *mem,
struct volume_group *vg,
struct lvmcache_vgsummary *vgsummary,
const struct dm_config_node *pvn,
const struct dm_config_node *vgn,
struct dm_hash_table *pv_hash,
struct dm_hash_table *lv_hash);
2002-11-18 17:04:08 +03:00
#define _read_int32(root, path, result) \
2017-07-19 17:17:30 +03:00
dm_config_get_uint32(root, path, (uint32_t *) (result))
2002-11-18 17:04:08 +03:00
#define _read_uint32(root, path, result) \
2017-07-19 17:17:30 +03:00
dm_config_get_uint32(root, path, (result))
2002-11-18 17:04:08 +03:00
#define _read_uint64(root, path, result) \
2017-07-19 17:17:30 +03:00
dm_config_get_uint64(root, path, (result))
2002-11-18 17:04:08 +03:00
/*
* Logs an attempt to read an invalid format file.
*/
static void _invalid_format(const char *str)
{
log_error("Can't process text format file - %s.", str);
}
/*
* Checks that the config file contains vg metadata, and that it
* we recognise the version number,
*/
static int _vsn1_check_version(const struct dm_config_tree *cft)
2002-11-18 17:04:08 +03:00
{
const struct dm_config_node *cn;
const struct dm_config_value *cv;
2002-11-18 17:04:08 +03:00
/*
* Check the contents field.
*/
if (!(cn = dm_config_find_node(cft->root, CONTENTS_FIELD))) {
2002-11-18 17:04:08 +03:00
_invalid_format("missing contents field");
return 0;
}
cv = cn->v;
if (!cv || cv->type != DM_CFG_STRING || strcmp(cv->v.str, CONTENTS_VALUE)) {
2002-11-18 17:04:08 +03:00
_invalid_format("unrecognised contents field");
return 0;
}
/*
* Check the version number.
*/
if (!(cn = dm_config_find_node(cft->root, FORMAT_VERSION_FIELD))) {
2002-11-18 17:04:08 +03:00
_invalid_format("missing version number");
return 0;
}
cv = cn->v;
if (!cv || cv->type != DM_CFG_INT || cv->v.i != FORMAT_VERSION_VALUE) {
2002-11-18 17:04:08 +03:00
_invalid_format("unrecognised version number");
return 0;
}
return 1;
}
static int _is_converting(struct logical_volume *lv)
{
struct lv_segment *seg;
if (lv_is_mirrored(lv)) {
seg = first_seg(lv);
/* Can't use is_temporary_mirror() because the metadata for
* seg_lv may not be read in and flags may not be set yet. */
if (seg_type(seg, 0) == AREA_LV &&
strstr(seg_lv(seg, 0)->name, MIRROR_SYNC_LAYER))
return 1;
}
return 0;
}
static int _read_id(struct id *id, const struct dm_config_node *cn, const char *path)
2002-11-18 17:04:08 +03:00
{
const char *uuid;
2002-11-18 17:04:08 +03:00
if (!dm_config_get_str(cn, path, &uuid)) {
2002-11-18 17:04:08 +03:00
log_error("Couldn't find uuid.");
return 0;
}
if (!id_read_format(id, uuid)) {
2002-11-18 17:04:08 +03:00
log_error("Invalid uuid.");
return 0;
}
return 1;
}
static int _read_flag_config(const struct dm_config_node *n, uint64_t *status, enum pv_vg_lv_e type)
{
const struct dm_config_value *cv;
*status = 0;
if (!dm_config_get_list(n, "status", &cv)) {
log_error("Could not find status flags.");
return 0;
}
/* For backward compatible metadata accept both type of flags */
if (!(read_flags(status, type, STATUS_FLAG | SEGTYPE_FLAG, cv))) {
log_error("Could not read status flags.");
return 0;
}
if (dm_config_get_list(n, "flags", &cv)) {
if (!(read_flags(status, type, COMPATIBLE_FLAG, cv))) {
log_error("Could not read flags.");
return 0;
}
}
return 1;
}
static int _read_str_list(struct dm_pool *mem, struct dm_list *list, const struct dm_config_value *cv)
{
if (cv->type == DM_CFG_EMPTY_ARRAY)
return 1;
while (cv) {
if (cv->type != DM_CFG_STRING) {
log_error("Found an item that is not a string");
return 0;
}
if (!str_list_add(mem, list, dm_pool_strdup(mem, cv->v.str)))
return_0;
cv = cv->next;
}
return 1;
}
static int _read_pv(struct cmd_context *cmd,
struct format_type *fmt,
struct format_instance *fid,
struct dm_pool *mem,
struct volume_group *vg,
struct lvmcache_vgsummary *vgsummary,
const struct dm_config_node *pvn,
const struct dm_config_node *vgn __attribute__((unused)),
struct dm_hash_table *pv_hash,
struct dm_hash_table *lv_hash __attribute__((unused)))
2002-11-18 17:04:08 +03:00
{
struct physical_volume *pv;
struct pv_list *pvl;
const struct dm_config_value *cv;
device usage based on devices file The LVM devices file lists devices that lvm can use. The default file is /etc/lvm/devices/system.devices, and the lvmdevices(8) command is used to add or remove device entries. If the file does not exist, or if lvm.conf includes use_devicesfile=0, then lvm will not use a devices file. When the devices file is in use, the regex filter is not used, and the filter settings in lvm.conf or on the command line are ignored. LVM records devices in the devices file using hardware-specific IDs, such as the WWID, and attempts to use subsystem-specific IDs for virtual device types. These device IDs are also written in the VG metadata. When no hardware or virtual ID is available, lvm falls back using the unstable device name as the device ID. When devnames are used, lvm performs extra scanning to find devices if their devname changes, e.g. after reboot. When proper device IDs are used, an lvm command will not look at devices outside the devices file, but when devnames are used as a fallback, lvm will scan devices outside the devices file to locate PVs on renamed devices. A config setting search_for_devnames can be used to control the scanning for renamed devname entries. Related to the devices file, the new command option --devices <devnames> allows a list of devices to be specified for the command to use, overriding the devices file. The listed devices act as a sort of devices file in terms of limiting which devices lvm will see and use. Devices that are not listed will appear to be missing to the lvm command. Multiple devices files can be kept in /etc/lvm/devices, which allows lvm to be used with different sets of devices, e.g. system devices do not need to be exposed to a specific application, and the application can use lvm on its own set of devices that are not exposed to the system. The option --devicesfile <filename> is used to select the devices file to use with the command. Without the option set, the default system devices file is used. Setting --devicesfile "" causes lvm to not use a devices file. An existing, empty devices file means lvm will see no devices. The new command vgimportdevices adds PVs from a VG to the devices file and updates the VG metadata to include the device IDs. vgimportdevices -a will import all VGs into the system devices file. LVM commands run by dmeventd not use a devices file by default, and will look at all devices on the system. A devices file can be created for dmeventd (/etc/lvm/devices/dmeventd.devices) If this file exists, lvm commands run by dmeventd will use it. Internal implementaion: - device_ids_read - read the devices file . add struct dev_use (du) to cmd->use_devices for each devices file entry - dev_cache_scan - get /dev entries . add struct device (dev) to dev_cache for each device on the system - device_ids_match - match devices file entries to /dev entries . match each du on cmd->use_devices to a dev in dev_cache, using device ID . on match, set du->dev, dev->id, dev->flags MATCHED_USE_ID - label_scan - read lvm headers and metadata from devices . filters are applied, those that do not need data from the device . filter-deviceid skips devs without MATCHED_USE_ID, i.e. skips /dev entries that are not listed in the devices file . read lvm label from dev . filters are applied, those that use data from the device . read lvm metadata from dev . add info/vginfo structs for PVs/VGs (info is "lvmcache") - device_ids_find_renamed_devs - handle devices with unstable devname ID where devname changed . this step only needed when devs do not have proper device IDs, and their dev names change, e.g. after reboot sdb becomes sdc. . detect incorrect match because PVID in the devices file entry does not match the PVID found when the device was read above . undo incorrect match between du and dev above . search system devices for new location of PVID . update devices file with new devnames for PVIDs on renamed devices . label_scan the renamed devs - continue with command processing
2020-06-23 21:25:41 +03:00
const char *str;
uint64_t size, ba_start;
2002-11-18 17:04:08 +03:00
if (!(pvl = dm_pool_zalloc(mem, sizeof(*pvl))) ||
2008-01-30 16:19:47 +03:00
!(pvl->pv = dm_pool_zalloc(mem, sizeof(*pvl->pv))))
return_0;
2002-11-18 17:04:08 +03:00
pv = pvl->pv;
/*
* Add the pv to the pv hash for quick lookup when we read
* the lv segments.
*/
2008-01-30 16:19:47 +03:00
if (!dm_hash_insert(pv_hash, pvn->key, pv))
return_0;
2002-11-18 17:04:08 +03:00
if (!(pvn = pvn->child)) {
log_error("Empty pv section.");
return 0;
}
if (!_read_id(&pv->id, pvn, "id")) {
log_error("Couldn't read uuid for physical volume.");
2002-11-18 17:04:08 +03:00
return 0;
}
pv->is_labelled = 1; /* All format_text PVs are labelled. */
2008-01-30 16:19:47 +03:00
if (!(pv->vg_name = dm_pool_strdup(mem, vg->name)))
return_0;
2002-11-18 17:04:08 +03:00
/* both are struct id */
memcpy(&pv->vg_id, &vg->id, sizeof(struct id));
if (!_read_flag_config(pvn, &pv->status, PV_FLAGS)) {
2002-11-18 17:04:08 +03:00
log_error("Couldn't read status flags for physical volume.");
return 0;
}
/* Late addition */
if (dm_config_has_node(pvn, "dev_size") &&
!_read_uint64(pvn, "dev_size", &pv->size)) {
log_error("Couldn't read dev size for physical volume.");
return 0;
}
device usage based on devices file The LVM devices file lists devices that lvm can use. The default file is /etc/lvm/devices/system.devices, and the lvmdevices(8) command is used to add or remove device entries. If the file does not exist, or if lvm.conf includes use_devicesfile=0, then lvm will not use a devices file. When the devices file is in use, the regex filter is not used, and the filter settings in lvm.conf or on the command line are ignored. LVM records devices in the devices file using hardware-specific IDs, such as the WWID, and attempts to use subsystem-specific IDs for virtual device types. These device IDs are also written in the VG metadata. When no hardware or virtual ID is available, lvm falls back using the unstable device name as the device ID. When devnames are used, lvm performs extra scanning to find devices if their devname changes, e.g. after reboot. When proper device IDs are used, an lvm command will not look at devices outside the devices file, but when devnames are used as a fallback, lvm will scan devices outside the devices file to locate PVs on renamed devices. A config setting search_for_devnames can be used to control the scanning for renamed devname entries. Related to the devices file, the new command option --devices <devnames> allows a list of devices to be specified for the command to use, overriding the devices file. The listed devices act as a sort of devices file in terms of limiting which devices lvm will see and use. Devices that are not listed will appear to be missing to the lvm command. Multiple devices files can be kept in /etc/lvm/devices, which allows lvm to be used with different sets of devices, e.g. system devices do not need to be exposed to a specific application, and the application can use lvm on its own set of devices that are not exposed to the system. The option --devicesfile <filename> is used to select the devices file to use with the command. Without the option set, the default system devices file is used. Setting --devicesfile "" causes lvm to not use a devices file. An existing, empty devices file means lvm will see no devices. The new command vgimportdevices adds PVs from a VG to the devices file and updates the VG metadata to include the device IDs. vgimportdevices -a will import all VGs into the system devices file. LVM commands run by dmeventd not use a devices file by default, and will look at all devices on the system. A devices file can be created for dmeventd (/etc/lvm/devices/dmeventd.devices) If this file exists, lvm commands run by dmeventd will use it. Internal implementaion: - device_ids_read - read the devices file . add struct dev_use (du) to cmd->use_devices for each devices file entry - dev_cache_scan - get /dev entries . add struct device (dev) to dev_cache for each device on the system - device_ids_match - match devices file entries to /dev entries . match each du on cmd->use_devices to a dev in dev_cache, using device ID . on match, set du->dev, dev->id, dev->flags MATCHED_USE_ID - label_scan - read lvm headers and metadata from devices . filters are applied, those that do not need data from the device . filter-deviceid skips devs without MATCHED_USE_ID, i.e. skips /dev entries that are not listed in the devices file . read lvm label from dev . filters are applied, those that use data from the device . read lvm metadata from dev . add info/vginfo structs for PVs/VGs (info is "lvmcache") - device_ids_find_renamed_devs - handle devices with unstable devname ID where devname changed . this step only needed when devs do not have proper device IDs, and their dev names change, e.g. after reboot sdb becomes sdc. . detect incorrect match because PVID in the devices file entry does not match the PVID found when the device was read above . undo incorrect match between du and dev above . search system devices for new location of PVID . update devices file with new devnames for PVIDs on renamed devices . label_scan the renamed devs - continue with command processing
2020-06-23 21:25:41 +03:00
if (dm_config_get_str(pvn, "device", &str)) {
if (!(pv->device_hint = dm_pool_strdup(mem, str)))
log_error("Failed to allocate memory for device hint in read_pv.");
}
device usage based on devices file The LVM devices file lists devices that lvm can use. The default file is /etc/lvm/devices/system.devices, and the lvmdevices(8) command is used to add or remove device entries. If the file does not exist, or if lvm.conf includes use_devicesfile=0, then lvm will not use a devices file. When the devices file is in use, the regex filter is not used, and the filter settings in lvm.conf or on the command line are ignored. LVM records devices in the devices file using hardware-specific IDs, such as the WWID, and attempts to use subsystem-specific IDs for virtual device types. These device IDs are also written in the VG metadata. When no hardware or virtual ID is available, lvm falls back using the unstable device name as the device ID. When devnames are used, lvm performs extra scanning to find devices if their devname changes, e.g. after reboot. When proper device IDs are used, an lvm command will not look at devices outside the devices file, but when devnames are used as a fallback, lvm will scan devices outside the devices file to locate PVs on renamed devices. A config setting search_for_devnames can be used to control the scanning for renamed devname entries. Related to the devices file, the new command option --devices <devnames> allows a list of devices to be specified for the command to use, overriding the devices file. The listed devices act as a sort of devices file in terms of limiting which devices lvm will see and use. Devices that are not listed will appear to be missing to the lvm command. Multiple devices files can be kept in /etc/lvm/devices, which allows lvm to be used with different sets of devices, e.g. system devices do not need to be exposed to a specific application, and the application can use lvm on its own set of devices that are not exposed to the system. The option --devicesfile <filename> is used to select the devices file to use with the command. Without the option set, the default system devices file is used. Setting --devicesfile "" causes lvm to not use a devices file. An existing, empty devices file means lvm will see no devices. The new command vgimportdevices adds PVs from a VG to the devices file and updates the VG metadata to include the device IDs. vgimportdevices -a will import all VGs into the system devices file. LVM commands run by dmeventd not use a devices file by default, and will look at all devices on the system. A devices file can be created for dmeventd (/etc/lvm/devices/dmeventd.devices) If this file exists, lvm commands run by dmeventd will use it. Internal implementaion: - device_ids_read - read the devices file . add struct dev_use (du) to cmd->use_devices for each devices file entry - dev_cache_scan - get /dev entries . add struct device (dev) to dev_cache for each device on the system - device_ids_match - match devices file entries to /dev entries . match each du on cmd->use_devices to a dev in dev_cache, using device ID . on match, set du->dev, dev->id, dev->flags MATCHED_USE_ID - label_scan - read lvm headers and metadata from devices . filters are applied, those that do not need data from the device . filter-deviceid skips devs without MATCHED_USE_ID, i.e. skips /dev entries that are not listed in the devices file . read lvm label from dev . filters are applied, those that use data from the device . read lvm metadata from dev . add info/vginfo structs for PVs/VGs (info is "lvmcache") - device_ids_find_renamed_devs - handle devices with unstable devname ID where devname changed . this step only needed when devs do not have proper device IDs, and their dev names change, e.g. after reboot sdb becomes sdc. . detect incorrect match because PVID in the devices file entry does not match the PVID found when the device was read above . undo incorrect match between du and dev above . search system devices for new location of PVID . update devices file with new devnames for PVIDs on renamed devices . label_scan the renamed devs - continue with command processing
2020-06-23 21:25:41 +03:00
if (dm_config_get_str(pvn, "device_id", &str)) {
if (!(pv->device_id = dm_pool_strdup(mem, str)))
log_error("Failed to allocate memory for device_id in read_pv.");
}
if (dm_config_get_str(pvn, "device_id_type", &str)) {
if (!(pv->device_id_type = dm_pool_strdup(mem, str)))
log_error("Failed to allocate memory for device_id_type in read_pv.");
}
if (!_read_uint64(pvn, "pe_start", &pv->pe_start)) {
log_error("Couldn't read extent start value (pe_start) "
"for physical volume.");
2002-11-18 17:04:08 +03:00
return 0;
}
if (!_read_int32(pvn, "pe_count", &pv->pe_count)) {
2002-11-18 17:04:08 +03:00
log_error("Couldn't find extent count (pe_count) for "
"physical volume.");
return 0;
}
/* Bootloader area is not compulsory - just log_debug for the record if found. */
ba_start = size = 0;
_read_uint64(pvn, "ba_start", &ba_start);
_read_uint64(pvn, "ba_size", &size);
if (ba_start && size) {
log_debug_metadata("Found bootloader area specification for PV %s "
"in metadata: ba_start=%" PRIu64 ", ba_size=%" PRIu64 ".",
pv_dev_name(pv), ba_start, size);
pv->ba_start = ba_start;
pv->ba_size = size;
} else if ((!ba_start && size) || (ba_start && !size)) {
log_error("Found incomplete bootloader area specification "
"for PV %s in metadata.", pv_dev_name(pv));
return 0;
}
dm_list_init(&pv->tags);
dm_list_init(&pv->segments);
2004-03-08 20:19:15 +03:00
/* Optional tags */
if (dm_config_get_list(pvn, "tags", &cv) &&
!(_read_str_list(mem, &pv->tags, cv))) {
2004-03-08 20:19:15 +03:00
log_error("Couldn't read tags for physical volume %s in %s.",
pv_dev_name(pv), vg->name);
2004-03-08 20:19:15 +03:00
return 0;
}
2002-11-18 17:04:08 +03:00
pv->pe_size = vg->extent_size;
2002-11-18 17:04:08 +03:00
pv->pe_alloc_count = 0;
pv->pe_align = 0;
pv->fmt = fmt;
2002-11-18 17:04:08 +03:00
2008-01-30 16:19:47 +03:00
if (!alloc_pv_segment_whole_pv(mem, pv))
return_0;
2005-04-20 00:58:25 +04:00
vg->extent_count += pv->pe_count;
vg->free_count += pv->pe_count;
add_pvl_to_vgs(vg, pvl);
2002-11-18 17:04:08 +03:00
return 1;
}
static int _read_pvsummary(struct cmd_context *cmd,
struct format_type *fmt,
struct format_instance *fid,
struct dm_pool *mem,
struct volume_group *vg,
struct lvmcache_vgsummary *vgsummary,
const struct dm_config_node *pvn,
const struct dm_config_node *vgn __attribute__((unused)),
struct dm_hash_table *pv_hash __attribute__((unused)),
struct dm_hash_table *lv_hash __attribute__((unused)))
{
struct physical_volume *pv;
struct pv_list *pvl;
device usage based on devices file The LVM devices file lists devices that lvm can use. The default file is /etc/lvm/devices/system.devices, and the lvmdevices(8) command is used to add or remove device entries. If the file does not exist, or if lvm.conf includes use_devicesfile=0, then lvm will not use a devices file. When the devices file is in use, the regex filter is not used, and the filter settings in lvm.conf or on the command line are ignored. LVM records devices in the devices file using hardware-specific IDs, such as the WWID, and attempts to use subsystem-specific IDs for virtual device types. These device IDs are also written in the VG metadata. When no hardware or virtual ID is available, lvm falls back using the unstable device name as the device ID. When devnames are used, lvm performs extra scanning to find devices if their devname changes, e.g. after reboot. When proper device IDs are used, an lvm command will not look at devices outside the devices file, but when devnames are used as a fallback, lvm will scan devices outside the devices file to locate PVs on renamed devices. A config setting search_for_devnames can be used to control the scanning for renamed devname entries. Related to the devices file, the new command option --devices <devnames> allows a list of devices to be specified for the command to use, overriding the devices file. The listed devices act as a sort of devices file in terms of limiting which devices lvm will see and use. Devices that are not listed will appear to be missing to the lvm command. Multiple devices files can be kept in /etc/lvm/devices, which allows lvm to be used with different sets of devices, e.g. system devices do not need to be exposed to a specific application, and the application can use lvm on its own set of devices that are not exposed to the system. The option --devicesfile <filename> is used to select the devices file to use with the command. Without the option set, the default system devices file is used. Setting --devicesfile "" causes lvm to not use a devices file. An existing, empty devices file means lvm will see no devices. The new command vgimportdevices adds PVs from a VG to the devices file and updates the VG metadata to include the device IDs. vgimportdevices -a will import all VGs into the system devices file. LVM commands run by dmeventd not use a devices file by default, and will look at all devices on the system. A devices file can be created for dmeventd (/etc/lvm/devices/dmeventd.devices) If this file exists, lvm commands run by dmeventd will use it. Internal implementaion: - device_ids_read - read the devices file . add struct dev_use (du) to cmd->use_devices for each devices file entry - dev_cache_scan - get /dev entries . add struct device (dev) to dev_cache for each device on the system - device_ids_match - match devices file entries to /dev entries . match each du on cmd->use_devices to a dev in dev_cache, using device ID . on match, set du->dev, dev->id, dev->flags MATCHED_USE_ID - label_scan - read lvm headers and metadata from devices . filters are applied, those that do not need data from the device . filter-deviceid skips devs without MATCHED_USE_ID, i.e. skips /dev entries that are not listed in the devices file . read lvm label from dev . filters are applied, those that use data from the device . read lvm metadata from dev . add info/vginfo structs for PVs/VGs (info is "lvmcache") - device_ids_find_renamed_devs - handle devices with unstable devname ID where devname changed . this step only needed when devs do not have proper device IDs, and their dev names change, e.g. after reboot sdb becomes sdc. . detect incorrect match because PVID in the devices file entry does not match the PVID found when the device was read above . undo incorrect match between du and dev above . search system devices for new location of PVID . update devices file with new devnames for PVIDs on renamed devices . label_scan the renamed devs - continue with command processing
2020-06-23 21:25:41 +03:00
const char *str;
if (!(pvl = dm_pool_zalloc(mem, sizeof(*pvl))) ||
!(pvl->pv = dm_pool_zalloc(mem, sizeof(*pvl->pv))))
return_0;
pv = pvl->pv;
if (!(pvn = pvn->child)) {
log_error("Empty pv section.");
return 0;
}
if (!_read_id(&pv->id, pvn, "id"))
log_warn("Couldn't read uuid for physical volume.");
if (dm_config_has_node(pvn, "dev_size") &&
!_read_uint64(pvn, "dev_size", &pv->size))
log_warn("Couldn't read dev size for physical volume.");
device usage based on devices file The LVM devices file lists devices that lvm can use. The default file is /etc/lvm/devices/system.devices, and the lvmdevices(8) command is used to add or remove device entries. If the file does not exist, or if lvm.conf includes use_devicesfile=0, then lvm will not use a devices file. When the devices file is in use, the regex filter is not used, and the filter settings in lvm.conf or on the command line are ignored. LVM records devices in the devices file using hardware-specific IDs, such as the WWID, and attempts to use subsystem-specific IDs for virtual device types. These device IDs are also written in the VG metadata. When no hardware or virtual ID is available, lvm falls back using the unstable device name as the device ID. When devnames are used, lvm performs extra scanning to find devices if their devname changes, e.g. after reboot. When proper device IDs are used, an lvm command will not look at devices outside the devices file, but when devnames are used as a fallback, lvm will scan devices outside the devices file to locate PVs on renamed devices. A config setting search_for_devnames can be used to control the scanning for renamed devname entries. Related to the devices file, the new command option --devices <devnames> allows a list of devices to be specified for the command to use, overriding the devices file. The listed devices act as a sort of devices file in terms of limiting which devices lvm will see and use. Devices that are not listed will appear to be missing to the lvm command. Multiple devices files can be kept in /etc/lvm/devices, which allows lvm to be used with different sets of devices, e.g. system devices do not need to be exposed to a specific application, and the application can use lvm on its own set of devices that are not exposed to the system. The option --devicesfile <filename> is used to select the devices file to use with the command. Without the option set, the default system devices file is used. Setting --devicesfile "" causes lvm to not use a devices file. An existing, empty devices file means lvm will see no devices. The new command vgimportdevices adds PVs from a VG to the devices file and updates the VG metadata to include the device IDs. vgimportdevices -a will import all VGs into the system devices file. LVM commands run by dmeventd not use a devices file by default, and will look at all devices on the system. A devices file can be created for dmeventd (/etc/lvm/devices/dmeventd.devices) If this file exists, lvm commands run by dmeventd will use it. Internal implementaion: - device_ids_read - read the devices file . add struct dev_use (du) to cmd->use_devices for each devices file entry - dev_cache_scan - get /dev entries . add struct device (dev) to dev_cache for each device on the system - device_ids_match - match devices file entries to /dev entries . match each du on cmd->use_devices to a dev in dev_cache, using device ID . on match, set du->dev, dev->id, dev->flags MATCHED_USE_ID - label_scan - read lvm headers and metadata from devices . filters are applied, those that do not need data from the device . filter-deviceid skips devs without MATCHED_USE_ID, i.e. skips /dev entries that are not listed in the devices file . read lvm label from dev . filters are applied, those that use data from the device . read lvm metadata from dev . add info/vginfo structs for PVs/VGs (info is "lvmcache") - device_ids_find_renamed_devs - handle devices with unstable devname ID where devname changed . this step only needed when devs do not have proper device IDs, and their dev names change, e.g. after reboot sdb becomes sdc. . detect incorrect match because PVID in the devices file entry does not match the PVID found when the device was read above . undo incorrect match between du and dev above . search system devices for new location of PVID . update devices file with new devnames for PVIDs on renamed devices . label_scan the renamed devs - continue with command processing
2020-06-23 21:25:41 +03:00
if (dm_config_get_str(pvn, "device", &str)) {
if (!(pv->device_hint = dm_pool_strdup(mem, str)))
log_error("Failed to allocate memory for device hint in read_pv_sum.");
}
if (dm_config_get_str(pvn, "device_id", &str)) {
if (!(pv->device_id = dm_pool_strdup(mem, str)))
log_error("Failed to allocate memory for device_id in read_pv_sum.");
}
if (dm_config_get_str(pvn, "device_id_type", &str)) {
if (!(pv->device_id_type = dm_pool_strdup(mem, str)))
log_error("Failed to allocate memory for device_id_type in read_pv_sum.");
}
dm_list_add(&vgsummary->pvsummaries, &pvl->list);
return 1;
}
2002-11-18 17:04:08 +03:00
static void _insert_segment(struct logical_volume *lv, struct lv_segment *seg)
{
struct lv_segment *comp;
dm_list_iterate_items(comp, &lv->segments) {
2002-11-18 17:04:08 +03:00
if (comp->le > seg->le) {
dm_list_add(&comp->list, &seg->list);
2002-11-18 17:04:08 +03:00
return;
}
}
lv->le_count += seg->len;
dm_list_add(&lv->segments, &seg->list);
2002-11-18 17:04:08 +03:00
}
static int _read_segment(struct cmd_context *cmd,
struct format_type *fmt,
struct format_instance *fid,
struct dm_pool *mem,
struct logical_volume *lv, const struct dm_config_node *sn,
struct dm_hash_table *pv_hash)
2002-11-18 17:04:08 +03:00
{
2004-05-05 01:25:57 +04:00
uint32_t area_count = 0u;
2002-11-18 17:04:08 +03:00
struct lv_segment *seg;
const struct dm_config_node *sn_child = sn->child;
const struct dm_config_value *cv;
lvconvert: add infrastructure for RaidLV reshaping support In order to support striped raid5/6/10 LV reshaping (change of LV type, stripesize or number of legs), this patch introduces infrastructure prerequisites to be used by raid_manip.c extensions in followup patches. This base is needed for allocation of out-of-place reshape space required by the MD raid personalities to avoid writing over data in-place when reading off the current RAID layout or number of legs and writing out the new layout or to a different number of legs (i.e. restripe) Changes: - add members reshape_len to 'struct lv_segment' to store out-of-place reshape length per component rimage - add member data_copies to struct lv_segment to support more than 2 raid10 data copies - make alloc_lv_segment() aware of both reshape_len and data_copies - adjust all alloc_lv_segment() callers to the new API - add functions to retrieve the current data offset (needed for out-of-place reshaping space allocation) and the devices count from the kernel - make libdm deptree code aware of reshape_len - add LV flags for disk add/remove reshaping - support import/export of the new 'struct lv_segment' members - enhance lv_extend/_lv_reduce to cope with reshape_len - add seg_is_*/segtype_is_* macros related to reshaping - add target version check for reshaping - grow rebuilds/writemostly bitmaps to 246 bit to support kernel maximal - enhance libdm deptree code to support data_offset (out-of-place reshaping) and delta_disk (legs add/remove reshaping) target arguments Related: rhbz834579 Related: rhbz1191935 Related: rhbz1191978
2017-02-24 02:50:00 +03:00
uint32_t area_extents, start_extent, extent_count, reshape_count, data_copies;
2004-05-05 01:25:57 +04:00
struct segment_type *segtype;
const char *segtype_str;
char *segtype_with_flags;
2002-11-18 17:04:08 +03:00
if (!sn_child) {
2002-11-18 17:04:08 +03:00
log_error("Empty segment section.");
return 0;
}
if (!_read_int32(sn_child, "start_extent", &start_extent)) {
log_error("Couldn't read 'start_extent' for segment '%s' "
"of logical volume %s.", sn->key, lv->name);
2002-11-18 17:04:08 +03:00
return 0;
}
if (!_read_int32(sn_child, "extent_count", &extent_count)) {
log_error("Couldn't read 'extent_count' for segment '%s' "
"of logical volume %s.", sn->key, lv->name);
2002-11-18 17:04:08 +03:00
return 0;
}
lvconvert: add infrastructure for RaidLV reshaping support In order to support striped raid5/6/10 LV reshaping (change of LV type, stripesize or number of legs), this patch introduces infrastructure prerequisites to be used by raid_manip.c extensions in followup patches. This base is needed for allocation of out-of-place reshape space required by the MD raid personalities to avoid writing over data in-place when reading off the current RAID layout or number of legs and writing out the new layout or to a different number of legs (i.e. restripe) Changes: - add members reshape_len to 'struct lv_segment' to store out-of-place reshape length per component rimage - add member data_copies to struct lv_segment to support more than 2 raid10 data copies - make alloc_lv_segment() aware of both reshape_len and data_copies - adjust all alloc_lv_segment() callers to the new API - add functions to retrieve the current data offset (needed for out-of-place reshaping space allocation) and the devices count from the kernel - make libdm deptree code aware of reshape_len - add LV flags for disk add/remove reshaping - support import/export of the new 'struct lv_segment' members - enhance lv_extend/_lv_reduce to cope with reshape_len - add seg_is_*/segtype_is_* macros related to reshaping - add target version check for reshaping - grow rebuilds/writemostly bitmaps to 246 bit to support kernel maximal - enhance libdm deptree code to support data_offset (out-of-place reshaping) and delta_disk (legs add/remove reshaping) target arguments Related: rhbz834579 Related: rhbz1191935 Related: rhbz1191978
2017-02-24 02:50:00 +03:00
if (!_read_int32(sn_child, "reshape_count", &reshape_count))
reshape_count = 0;
if (!_read_int32(sn_child, "data_copies", &data_copies))
data_copies = 1;
segtype_str = SEG_TYPE_NAME_STRIPED;
2004-05-05 01:25:57 +04:00
if (!dm_config_get_str(sn_child, "type", &segtype_str)) {
log_error("Segment type must be a string.");
return 0;
2002-11-18 17:04:08 +03:00
}
/* Locally duplicate to parse out status flag bits */
if (!(segtype_with_flags = dm_pool_strdup(mem, segtype_str))) {
log_error("Cannot duplicate segtype string.");
return 0;
}
if (!read_segtype_lvflags(&lv->status, segtype_with_flags)) {
log_error("Couldn't read segtype for logical volume %s.",
display_lvname(lv));
return 0;
}
if (!(segtype = get_segtype_from_string(cmd, segtype_with_flags)))
2008-01-30 16:19:47 +03:00
return_0;
2002-11-18 17:04:08 +03:00
/* Can drop temporary string here as nothing has allocated from VGMEM meanwhile */
dm_pool_free(mem, segtype_with_flags);
2004-05-05 01:25:57 +04:00
if (segtype->ops->text_import_area_count &&
!segtype->ops->text_import_area_count(sn_child, &area_count))
2008-01-30 16:19:47 +03:00
return_0;
2003-04-30 19:22:36 +04:00
lvconvert: add infrastructure for RaidLV reshaping support In order to support striped raid5/6/10 LV reshaping (change of LV type, stripesize or number of legs), this patch introduces infrastructure prerequisites to be used by raid_manip.c extensions in followup patches. This base is needed for allocation of out-of-place reshape space required by the MD raid personalities to avoid writing over data in-place when reading off the current RAID layout or number of legs and writing out the new layout or to a different number of legs (i.e. restripe) Changes: - add members reshape_len to 'struct lv_segment' to store out-of-place reshape length per component rimage - add member data_copies to struct lv_segment to support more than 2 raid10 data copies - make alloc_lv_segment() aware of both reshape_len and data_copies - adjust all alloc_lv_segment() callers to the new API - add functions to retrieve the current data offset (needed for out-of-place reshaping space allocation) and the devices count from the kernel - make libdm deptree code aware of reshape_len - add LV flags for disk add/remove reshaping - support import/export of the new 'struct lv_segment' members - enhance lv_extend/_lv_reduce to cope with reshape_len - add seg_is_*/segtype_is_* macros related to reshaping - add target version check for reshaping - grow rebuilds/writemostly bitmaps to 246 bit to support kernel maximal - enhance libdm deptree code to support data_offset (out-of-place reshaping) and delta_disk (legs add/remove reshaping) target arguments Related: rhbz834579 Related: rhbz1191935 Related: rhbz1191978
2017-02-24 02:50:00 +03:00
area_extents = segtype->parity_devs ?
raid_rimage_extents(segtype, extent_count, area_count - segtype->parity_devs, data_copies) : extent_count;
if (!(seg = alloc_lv_segment(segtype, lv, start_extent,
lvconvert: add infrastructure for RaidLV reshaping support In order to support striped raid5/6/10 LV reshaping (change of LV type, stripesize or number of legs), this patch introduces infrastructure prerequisites to be used by raid_manip.c extensions in followup patches. This base is needed for allocation of out-of-place reshape space required by the MD raid personalities to avoid writing over data in-place when reading off the current RAID layout or number of legs and writing out the new layout or to a different number of legs (i.e. restripe) Changes: - add members reshape_len to 'struct lv_segment' to store out-of-place reshape length per component rimage - add member data_copies to struct lv_segment to support more than 2 raid10 data copies - make alloc_lv_segment() aware of both reshape_len and data_copies - adjust all alloc_lv_segment() callers to the new API - add functions to retrieve the current data offset (needed for out-of-place reshaping space allocation) and the devices count from the kernel - make libdm deptree code aware of reshape_len - add LV flags for disk add/remove reshaping - support import/export of the new 'struct lv_segment' members - enhance lv_extend/_lv_reduce to cope with reshape_len - add seg_is_*/segtype_is_* macros related to reshaping - add target version check for reshaping - grow rebuilds/writemostly bitmaps to 246 bit to support kernel maximal - enhance libdm deptree code to support data_offset (out-of-place reshaping) and delta_disk (legs add/remove reshaping) target arguments Related: rhbz834579 Related: rhbz1191935 Related: rhbz1191978
2017-02-24 02:50:00 +03:00
extent_count, reshape_count, 0, 0, NULL, area_count,
area_extents, data_copies, 0, 0, 0, NULL))) {
2003-09-18 00:35:57 +04:00
log_error("Segment allocation failed");
2002-11-18 17:04:08 +03:00
return 0;
}
2004-05-05 01:25:57 +04:00
if (seg->segtype->ops->text_import &&
!seg->segtype->ops->text_import(seg, sn_child, pv_hash))
2008-01-30 16:19:47 +03:00
return_0;
2002-11-18 17:04:08 +03:00
2004-03-08 20:19:15 +03:00
/* Optional tags */
if (dm_config_get_list(sn_child, "tags", &cv) &&
!(_read_str_list(mem, &seg->tags, cv))) {
2004-03-08 20:19:15 +03:00
log_error("Couldn't read tags for a segment of %s/%s.",
lv->vg->name, lv->name);
2004-03-08 20:19:15 +03:00
return 0;
}
2004-05-05 01:25:57 +04:00
/*
* Insert into correct part of segment list.
*/
_insert_segment(lv, seg);
2002-11-18 17:04:08 +03:00
if (seg_is_mirror(seg))
lv->status |= MIRROR;
if (seg_is_mirrored(seg))
2004-05-05 01:25:57 +04:00
lv->status |= MIRRORED;
2002-11-18 17:04:08 +03:00
if (seg_is_raid(seg))
lv->status |= RAID;
if (seg_is_virtual(seg))
lv->status |= VIRTUAL;
if (!seg_is_raid(seg) && _is_converting(lv))
lv->status |= CONVERTING;
2004-05-05 01:25:57 +04:00
return 1;
}
2002-11-18 17:04:08 +03:00
int text_import_areas(struct lv_segment *seg, const struct dm_config_node *sn,
const struct dm_config_value *cv, struct dm_hash_table *pv_hash,
uint64_t status)
2004-05-05 01:25:57 +04:00
{
unsigned int s;
struct logical_volume *lv1;
struct physical_volume *pv;
const char *seg_name = dm_config_parent_name(sn);
2002-11-18 17:04:08 +03:00
2004-05-05 01:25:57 +04:00
if (!seg->area_count) {
log_error("Zero areas not allowed for segment %s", seg_name);
2004-05-05 01:25:57 +04:00
return 0;
}
2002-11-18 17:04:08 +03:00
for (s = 0; cv && s < seg->area_count; s++, cv = cv->next) {
2002-11-18 17:04:08 +03:00
2004-05-05 01:25:57 +04:00
/* first we read the pv */
if (cv->type != DM_CFG_STRING) {
log_error("Bad volume name in areas array for segment %s.", seg_name);
2002-11-18 17:04:08 +03:00
return 0;
}
2004-05-05 01:25:57 +04:00
if (!cv->next) {
log_error("Missing offset in areas array for segment %s.", seg_name);
return 0;
}
if (cv->next->type != DM_CFG_INT) {
log_error("Bad offset in areas array for segment %s.", seg_name);
2003-04-30 19:22:36 +04:00
return 0;
}
2004-05-05 01:25:57 +04:00
/* FIXME Cope if LV not yet read in */
if ((pv = dm_hash_lookup(pv_hash, cv->v.str))) {
2008-01-30 16:19:47 +03:00
if (!set_lv_segment_area_pv(seg, s, pv, (uint32_t) cv->next->v.i))
return_0;
2004-05-05 01:25:57 +04:00
} else if ((lv1 = find_lv(seg->lv->vg, cv->v.str))) {
if (!set_lv_segment_area_lv(seg, s, lv1,
(uint32_t) cv->next->v.i,
status))
return_0;
2004-05-05 01:25:57 +04:00
} else {
log_error("Couldn't find volume '%s' "
"for segment '%s'.",
cv->v.str ? : "NULL", seg_name);
2002-11-18 17:04:08 +03:00
return 0;
}
2004-05-05 01:25:57 +04:00
cv = cv->next;
2002-11-18 17:04:08 +03:00
}
/*
2004-05-05 01:25:57 +04:00
* Check we read the correct number of stripes.
2002-11-18 17:04:08 +03:00
*/
2004-05-05 01:25:57 +04:00
if (cv || (s < seg->area_count)) {
log_error("Incorrect number of areas in area array "
"for segment '%s'.", seg_name);
return 0;
}
2002-11-18 17:04:08 +03:00
return 1;
}
static int _read_segments(struct cmd_context *cmd,
struct format_type *fmt,
struct format_instance *fid,
struct dm_pool *mem,
struct logical_volume *lv, const struct dm_config_node *lvn,
struct dm_hash_table *pv_hash)
2002-11-18 17:04:08 +03:00
{
const struct dm_config_node *sn;
2002-11-18 17:04:08 +03:00
int count = 0, seg_count;
for (sn = lvn; sn; sn = sn->sib) {
/*
* All sub-sections are assumed to be segments.
*/
if (!sn->v) {
if (!_read_segment(cmd, fmt, fid, mem, lv, sn, pv_hash))
2008-01-30 16:19:47 +03:00
return_0;
2002-11-18 17:04:08 +03:00
count++;
}
/* FIXME Remove this restriction */
2016-12-13 02:09:15 +03:00
if (lv_is_snapshot(lv) && count > 1) {
2002-11-18 17:04:08 +03:00
log_error("Only one segment permitted for snapshot");
return 0;
}
}
if (!_read_int32(lvn, "segment_count", &seg_count)) {
log_error("Couldn't read segment count for logical volume %s.",
lv->name);
2002-11-18 17:04:08 +03:00
return 0;
}
if (seg_count != count) {
log_error("segment_count and actual number of segments "
"disagree for logical volume %s.", lv->name);
2002-11-18 17:04:08 +03:00
return 0;
}
/*
* Check there are no gaps or overlaps in the lv.
*/
2008-01-30 16:19:47 +03:00
if (!check_lv_segments(lv, 0))
return_0;
2002-11-18 17:04:08 +03:00
/*
* Merge segments in case someones been editing things by hand.
*/
2008-01-30 16:19:47 +03:00
if (!lv_merge_segments(lv))
return_0;
2002-11-18 17:04:08 +03:00
return 1;
}
static int _read_lvnames(struct cmd_context *cmd,
struct format_type *fmt,
struct format_instance *fid __attribute__((unused)),
struct dm_pool *mem,
struct volume_group *vg,
struct lvmcache_vgsummary *vgsummary,
const struct dm_config_node *lvn,
const struct dm_config_node *vgn __attribute__((unused)),
struct dm_hash_table *pv_hash __attribute__((unused)),
struct dm_hash_table *lv_hash)
2002-11-18 17:04:08 +03:00
{
struct logical_volume *lv;
const char *str;
const struct dm_config_value *cv;
const char *hostname;
uint64_t timestamp = 0, lvstatus;
2002-11-18 17:04:08 +03:00
if (!(lv = alloc_lv(mem)))
2008-01-30 16:19:47 +03:00
return_0;
2002-11-18 17:04:08 +03:00
if (!link_lv_to_vg(vg, lv))
return_0;
2008-01-30 16:19:47 +03:00
if (!(lv->name = dm_pool_strdup(mem, lvn->key)))
return_0;
2002-11-18 17:04:08 +03:00
log_debug_metadata("Importing logical volume %s.", display_lvname(lv));
2002-11-18 17:04:08 +03:00
if (!(lvn = lvn->child)) {
log_error("Empty logical volume section for %s.",
display_lvname(lv));
2002-11-18 17:04:08 +03:00
return 0;
}
if (!_read_flag_config(lvn, &lvstatus, LV_FLAGS)) {
log_error("Couldn't read status flags for logical volume %s.",
display_lvname(lv));
2002-11-18 17:04:08 +03:00
return 0;
}
if (lvstatus & LVM_WRITE_LOCKED) {
lvstatus |= LVM_WRITE;
lvstatus &= ~LVM_WRITE_LOCKED;
}
lv->status = lvstatus;
if (dm_config_has_node(lvn, "creation_time")) {
if (!_read_uint64(lvn, "creation_time", &timestamp)) {
log_error("Invalid creation_time for logical volume %s.",
display_lvname(lv));
return 0;
}
if (!dm_config_get_str(lvn, "creation_host", &hostname)) {
log_error("Couldn't read creation_host for logical volume %s.",
display_lvname(lv));
return 0;
}
} else if (dm_config_has_node(lvn, "creation_host")) {
log_error("Missing creation_time for logical volume %s.",
display_lvname(lv));
return 0;
}
/*
* The LV lock_args string is generated in lvmlockd, and the content
* depends on the lock_type.
*
* lock_type dlm does not use LV lock_args, so the LV lock_args field
* is just set to "dlm".
*
* lock_type sanlock uses the LV lock_args field to save the
* location on disk of that LV's sanlock lock. The disk name is
* specified in the VG lock_args. The lock_args string begins
* with a version number, e.g. 1.0.0, followed by a colon, followed
* by a number. The number is the offset on disk where sanlock is
* told to find the LV's lock.
* e.g. lock_args = 1.0.0:70254592
* means that the lock is located at offset 70254592.
*
* The lvmlockd code for each specific lock manager also validates
* the lock_args before using it to access the lock manager.
*/
2015-03-05 23:00:44 +03:00
if (dm_config_get_str(lvn, "lock_args", &str)) {
if (!(lv->lock_args = dm_pool_strdup(mem, str)))
return_0;
}
if (dm_config_get_str(lvn, "allocation_policy", &str)) {
lv->alloc = get_alloc_from_string(str);
if (lv->alloc == ALLOC_INVALID) {
log_warn("WARNING: Ignoring unrecognised allocation policy %s for LV %s.",
str, display_lvname(lv));
lv->alloc = ALLOC_INHERIT;
}
} else
lv->alloc = ALLOC_INHERIT;
2002-11-18 17:04:08 +03:00
if (dm_config_get_str(lvn, "profile", &str)) {
log_debug_metadata("Adding profile configuration %s for LV %s.",
str, display_lvname(lv));
if (!(lv->profile = add_profile(cmd, str, CONFIG_PROFILE_METADATA))) {
log_error("Failed to add configuration profile %s for LV %s.",
str, display_lvname(lv));
return 0;
}
}
2002-11-18 17:04:08 +03:00
if (!_read_int32(lvn, "read_ahead", &lv->read_ahead))
/* If not present, choice of auto or none is configurable */
lv->read_ahead = cmd->default_settings.read_ahead;
else {
switch (lv->read_ahead) {
case 0:
lv->read_ahead = DM_READ_AHEAD_AUTO;
break;
case UINT32_C(-1):
lv->read_ahead = DM_READ_AHEAD_NONE;
break;
default:
;
}
}
2002-11-18 17:04:08 +03:00
2004-03-08 20:19:15 +03:00
/* Optional tags */
if (dm_config_get_list(lvn, "tags", &cv) &&
!(_read_str_list(mem, &lv->tags, cv))) {
log_error("Couldn't read tags for logical volume %s.",
display_lvname(lv));
2004-03-08 20:19:15 +03:00
return 0;
}
if (!dm_hash_insert(lv_hash, lv->name, lv))
return_0;
if (timestamp && !lv_set_creation(lv, hostname, timestamp))
return_0;
if (!lv_is_visible(lv) && strstr(lv->name, "_pmspare")) {
if (vg->pool_metadata_spare_lv) {
log_error("Couldn't use another pool metadata spare "
"logical volume %s.", display_lvname(lv));
return 0;
}
log_debug_metadata("Logical volume %s is pool metadata spare.",
display_lvname(lv));
lv->status |= POOL_METADATA_SPARE;
vg->pool_metadata_spare_lv = lv;
}
2015-03-05 23:00:44 +03:00
if (!lv_is_visible(lv) && !strcmp(lv->name, LOCKD_SANLOCK_LV_NAME)) {
log_debug_metadata("Logical volume %s is sanlock lv.",
display_lvname(lv));
2015-03-05 23:00:44 +03:00
lv->status |= LOCKD_SANLOCK_LV;
vg->sanlock_lv = lv;
}
return 1;
}
static int _read_historical_lvnames(struct cmd_context *cmd,
struct format_type *fmt,
struct format_instance *fid __attribute__((unused)),
struct dm_pool *mem,
struct volume_group *vg,
struct lvmcache_vgsummary *vgsummary,
const struct dm_config_node *hlvn,
const struct dm_config_node *vgn __attribute__((unused)),
struct dm_hash_table *pv_hash __attribute__((unused)),
struct dm_hash_table *lv_hash __attribute__((unused)))
{
struct generic_logical_volume *glv;
struct glv_list *glvl;
const char *str;
uint64_t timestamp;
if (!(glv = dm_pool_zalloc(mem, sizeof(struct generic_logical_volume))) ||
!(glv->historical = dm_pool_zalloc(mem, sizeof(struct historical_logical_volume))) ||
!(glvl = dm_pool_zalloc(mem, sizeof(struct glv_list)))) {
log_error("Removed logical volume structure allocation failed");
goto bad;
}
glv->is_historical = 1;
glv->historical->vg = vg;
dm_list_init(&glv->historical->indirect_glvs);
if (!(glv->historical->name = dm_pool_strdup(mem, hlvn->key)))
goto_bad;
if (!(hlvn = hlvn->child)) {
log_error("Empty removed logical volume section.");
goto bad;
}
if (!_read_id(&glv->historical->lvid.id[1], hlvn, "id")) {
log_error("Couldn't read uuid for removed logical volume %s in vg %s.",
glv->historical->name, vg->name);
return 0;
}
memcpy(&glv->historical->lvid.id[0], &glv->historical->vg->id, sizeof(glv->historical->lvid.id[0]));
if (dm_config_get_str(hlvn, "name", &str)) {
if (!(glv->historical->name = dm_pool_strdup(mem, str)))
goto_bad;
}
if (dm_config_has_node(hlvn, "creation_time")) {
if (!_read_uint64(hlvn, "creation_time", &timestamp)) {
log_error("Invalid creation_time for removed logical volume %s.", str);
goto bad;
}
glv->historical->timestamp = timestamp;
}
if (dm_config_has_node(hlvn, "removal_time")) {
if (!_read_uint64(hlvn, "removal_time", &timestamp)) {
log_error("Invalid removal_time for removed logical volume %s.", str);
goto bad;
}
glv->historical->timestamp_removed = timestamp;
}
glvl->glv = glv;
dm_list_add(&vg->historical_lvs, &glvl->list);
return 1;
bad:
if (glv)
dm_pool_free(mem, glv);
return 0;
}
static int _read_historical_lvnames_interconnections(struct cmd_context *cmd,
struct format_type *fmt,
struct format_instance *fid __attribute__((unused)),
struct dm_pool *mem,
struct volume_group *vg,
struct lvmcache_vgsummary *vgsummary,
const struct dm_config_node *hlvn,
const struct dm_config_node *vgn __attribute__((unused)),
struct dm_hash_table *pv_hash __attribute__((unused)),
struct dm_hash_table *lv_hash __attribute__((unused)))
{
const char *historical_lv_name, *origin_name = NULL;
struct generic_logical_volume *glv, *origin_glv, *descendant_glv;
struct logical_volume *tmp_lv;
struct glv_list *glvl = NULL;
const struct dm_config_value *descendants = NULL;
historical_lv_name = hlvn->key;
hlvn = hlvn->child;
2016-03-01 17:26:57 +03:00
if (!(glv = find_historical_glv(vg, historical_lv_name, 0, NULL))) {
log_error("Unknown historical logical volume %s/%s%s",
vg->name, HISTORICAL_LV_PREFIX, historical_lv_name);
goto bad;
}
if (dm_config_has_node(hlvn, "origin")) {
if (!dm_config_get_str(hlvn, "origin", &origin_name)) {
log_error("Couldn't read origin for historical logical "
"volume %s/%s%s", vg->name, HISTORICAL_LV_PREFIX, historical_lv_name);
goto bad;
}
}
if (dm_config_has_node(hlvn, "descendants")) {
if (!dm_config_get_list(hlvn, "descendants", &descendants)) {
log_error("Couldn't get descendants list for historical logical "
"volume %s/%s%s", vg->name, HISTORICAL_LV_PREFIX, historical_lv_name);
goto bad;
}
if (descendants->type == DM_CFG_EMPTY_ARRAY) {
log_error("Found empty descendants list for historical logical "
"volume %s/%s%s", vg->name, HISTORICAL_LV_PREFIX, historical_lv_name);
goto bad;
}
}
if (!origin_name && !descendants)
/* no interconnections */
return 1;
if (origin_name) {
if (!(glvl = dm_pool_zalloc(mem, sizeof(struct glv_list)))) {
log_error("Failed to allocate list item for historical logical "
"volume %s/%s%s", vg->name, HISTORICAL_LV_PREFIX, historical_lv_name);
goto bad;
}
glvl->glv = glv;
if (!strncmp(origin_name, HISTORICAL_LV_PREFIX, strlen(HISTORICAL_LV_PREFIX))) {
2016-03-01 17:26:57 +03:00
if (!(origin_glv = find_historical_glv(vg, origin_name + strlen(HISTORICAL_LV_PREFIX), 0, NULL))) {
log_error("Unknown origin %s for historical logical volume %s/%s%s",
origin_name, vg->name, HISTORICAL_LV_PREFIX, historical_lv_name);
goto bad;
}
} else {
if (!(tmp_lv = find_lv(vg, origin_name))) {
log_error("Unknown origin %s for historical logical volume %s/%s%s",
origin_name, vg->name, HISTORICAL_LV_PREFIX, historical_lv_name);
goto bad;
}
if (!(origin_glv = get_or_create_glv(mem, tmp_lv, NULL)))
goto bad;
}
glv->historical->indirect_origin = origin_glv;
if (origin_glv->is_historical)
dm_list_add(&origin_glv->historical->indirect_glvs, &glvl->list);
else
dm_list_add(&origin_glv->live->indirect_glvs, &glvl->list);
}
if (descendants) {
do {
if (descendants->type != DM_CFG_STRING) {
log_error("Descendant value for historical logical volume %s/%s%s "
"is not a string.", vg->name, HISTORICAL_LV_PREFIX, historical_lv_name);
goto bad;
}
if (!(tmp_lv = find_lv(vg, descendants->v.str))) {
log_error("Failed to find descendant %s for historical LV %s.",
descendants->v.str, historical_lv_name);
goto bad;
}
if (!(descendant_glv = get_or_create_glv(mem, tmp_lv, NULL)))
goto bad;
if (!add_glv_to_indirect_glvs(mem, glv, descendant_glv))
goto bad;
descendants = descendants->next;
} while (descendants);
}
return 1;
bad:
if (glvl)
dm_pool_free(mem, glvl);
return 0;
}
static int _read_lvsegs(struct cmd_context *cmd,
struct format_type *fmt,
struct format_instance *fid,
struct dm_pool *mem,
struct volume_group *vg,
struct lvmcache_vgsummary *vgsummary,
const struct dm_config_node *lvn,
const struct dm_config_node *vgn __attribute__((unused)),
struct dm_hash_table *pv_hash,
struct dm_hash_table *lv_hash)
{
struct logical_volume *lv;
if (!(lv = dm_hash_lookup(lv_hash, lvn->key))) {
log_error("Lost logical volume reference %s", lvn->key);
return 0;
}
if (!(lvn = lvn->child)) {
log_error("Empty logical volume section.");
return 0;
}
/* FIXME: read full lvid */
if (!_read_id(&lv->lvid.id[1], lvn, "id")) {
log_error("Couldn't read uuid for logical volume %s.",
display_lvname(lv));
return 0;
}
memcpy(&lv->lvid.id[0], &lv->vg->id, sizeof(lv->lvid.id[0]));
if (!_read_segments(cmd, fmt, fid, mem, lv, lvn, pv_hash))
2008-01-30 16:19:47 +03:00
return_0;
2002-11-18 17:04:08 +03:00
lv->size = (uint64_t) lv->le_count * (uint64_t) vg->extent_size;
2005-04-07 16:39:44 +04:00
lv->minor = -1;
lv->major = -1;
if (lv->status & FIXED_MINOR) {
if (!_read_int32(lvn, "minor", &lv->minor)) {
log_error("Couldn't read minor number for logical volume %s.",
display_lvname(lv));
return 0;
}
if (!dm_config_has_node(lvn, "major"))
/* If major is missing, pick default */
lv->major = cmd->dev_types->device_mapper_major;
else if (!_read_int32(lvn, "major", &lv->major)) {
log_warn("WARNING: Couldn't read major number for logical "
"volume %s.", display_lvname(lv));
lv->major = cmd->dev_types->device_mapper_major;
}
if (!validate_major_minor(cmd, fmt, lv->major, lv->minor)) {
log_warn("WARNING: Ignoring invalid major, minor number for "
"logical volume %s.", display_lvname(lv));
lv->major = lv->minor = -1;
}
2002-11-18 17:04:08 +03:00
}
return 1;
}
static int _read_sections(struct cmd_context *cmd,
const struct format_type *fmt,
struct format_instance *fid,
struct dm_pool *mem,
2002-11-18 17:04:08 +03:00
const char *section, section_fn fn,
struct volume_group *vg,
struct lvmcache_vgsummary *vgsummary,
const struct dm_config_node *vgn,
struct dm_hash_table *pv_hash,
struct dm_hash_table *lv_hash,
int optional)
2002-11-18 17:04:08 +03:00
{
const struct dm_config_node *n;
2002-11-18 17:04:08 +03:00
if (!dm_config_get_section(vgn, section, &n)) {
2002-11-18 17:04:08 +03:00
if (!optional) {
log_error("Couldn't find section '%s'.", section);
return 0;
}
return 1;
}
for (n = n->child; n; n = n->sib) {
if (!fn(cmd, (struct format_type *)fmt, fid, mem, vg, vgsummary, n, vgn, pv_hash, lv_hash))
2008-01-30 16:19:47 +03:00
return_0;
2002-11-18 17:04:08 +03:00
}
return 1;
}
static struct volume_group *_read_vg(struct cmd_context *cmd,
const struct format_type *fmt,
struct format_instance *fid,
const struct dm_config_tree *cft)
2002-11-18 17:04:08 +03:00
{
struct dm_pool *mem;
const struct dm_config_node *vgn;
const struct dm_config_value *cv;
const char *str, *format_str, *system_id;
2002-11-18 17:04:08 +03:00
struct volume_group *vg;
struct dm_hash_table *pv_hash = NULL, *lv_hash = NULL;
uint64_t vgstatus;
2002-11-18 17:04:08 +03:00
/* skip any top-level values */
for (vgn = cft->root; (vgn && vgn->v); vgn = vgn->sib)
;
2002-11-18 17:04:08 +03:00
if (!vgn) {
log_error("Couldn't find volume group in file.");
return NULL;
2002-11-18 17:04:08 +03:00
}
if (!(vg = alloc_vg("read_vg", cmd, vgn->key)))
return_NULL;
2002-11-18 17:04:08 +03:00
mem = vg->vgmem;
/*
* The pv hash memorises the pv section names -> pv
* structures.
*/
if (!(pv_hash = dm_hash_create(59))) {
log_error("Couldn't create pv hash table.");
goto bad;
}
/*
* The lv hash memorises the lv section names -> lv
* structures.
*/
if (!(lv_hash = dm_hash_create(1023))) {
log_error("Couldn't create lv hash table.");
goto bad;
}
2002-11-18 17:04:08 +03:00
vgn = vgn->child;
/* A backup file might be a backup of a different format */
if (dm_config_get_str(vgn, "format", &format_str) &&
!(vg->original_fmt = get_format_by_name(cmd, format_str))) {
log_error("Unrecognised format %s for volume group %s.", format_str, vg->name);
goto bad;
}
if (dm_config_get_str(vgn, "lock_type", &str)) {
if (!(vg->lock_type = dm_pool_strdup(mem, str)))
goto bad;
}
/*
* The VG lock_args string is generated in lvmlockd, and the content
* depends on the lock_type. lvmlockd begins the lock_args string
* with a version number, e.g. 1.0.0, followed by a colon, followed
* by a string that depends on the lock manager. The string after
* the colon is information needed to use the lock manager for the VG.
*
* For sanlock, the string is the name of the internal LV used to store
* sanlock locks. lvmlockd needs to know where the locks are located
* so it can pass that location to sanlock which needs to access the locks.
* e.g. lock_args = 1.0.0:lvmlock
* means that the locks are located on the the LV "lvmlock".
*
* For dlm, the string is the dlm cluster name. lvmlockd needs to use
* a dlm lockspace in this cluster to use the VG.
* e.g. lock_args = 1.0.0:foo
* means that the host needs to be a member of the cluster "foo".
*
* The lvmlockd code for each specific lock manager also validates
* the lock_args before using it to access the lock manager.
*/
2015-03-05 23:00:44 +03:00
if (dm_config_get_str(vgn, "lock_args", &str)) {
if (!(vg->lock_args = dm_pool_strdup(mem, str)))
2015-03-05 23:00:44 +03:00
goto bad;
}
2002-11-18 17:04:08 +03:00
if (!_read_id(&vg->id, vgn, "id")) {
log_error("Couldn't read uuid for volume group %s.", vg->name);
goto bad;
}
if (!_read_int32(vgn, "seqno", &vg->seqno)) {
log_error("Couldn't read 'seqno' for volume group %s.",
vg->name);
goto bad;
}
if (!_read_flag_config(vgn, &vgstatus, VG_FLAGS)) {
log_error("Error reading flags of volume group %s.",
2002-11-18 17:04:08 +03:00
vg->name);
goto bad;
}
if (dm_config_get_str(vgn, "system_id", &system_id)) {
if (!(vg->system_id = dm_pool_strdup(mem, system_id))) {
log_error("Failed to allocate memory for system_id in _read_vg.");
goto bad;
}
}
if (vgstatus & LVM_WRITE_LOCKED) {
vgstatus |= LVM_WRITE;
vgstatus &= ~LVM_WRITE_LOCKED;
}
vg->status = vgstatus;
2002-11-18 17:04:08 +03:00
if (!_read_int32(vgn, "extent_size", &vg->extent_size)) {
log_error("Couldn't read extent size for volume group %s.",
vg->name);
goto bad;
}
/*
* 'extent_count' and 'free_count' get filled in
* implicitly when reading in the pv's and lv's.
*/
if (!_read_int32(vgn, "max_lv", &vg->max_lv)) {
log_error("Couldn't read 'max_lv' for volume group %s.",
vg->name);
goto bad;
}
if (!_read_int32(vgn, "max_pv", &vg->max_pv)) {
log_error("Couldn't read 'max_pv' for volume group %s.",
vg->name);
goto bad;
}
if (dm_config_get_str(vgn, "allocation_policy", &str)) {
vg->alloc = get_alloc_from_string(str);
if (vg->alloc == ALLOC_INVALID) {
log_warn("WARNING: Ignoring unrecognised allocation policy %s for VG %s", str, vg->name);
vg->alloc = ALLOC_NORMAL;
}
}
if (dm_config_get_str(vgn, "profile", &str)) {
log_debug_metadata("Adding profile configuration %s for VG %s.", str, vg->name);
vg->profile = add_profile(cmd, str, CONFIG_PROFILE_METADATA);
if (!vg->profile) {
log_error("Failed to add configuration profile %s for VG %s", str, vg->name);
goto bad;
}
}
if (!_read_uint32(vgn, "metadata_copies", &vg->mda_copies)) {
vg->mda_copies = DEFAULT_VGMETADATACOPIES;
}
if (!_read_sections(cmd, fmt, fid, mem, "physical_volumes", _read_pv, vg, NULL,
vgn, pv_hash, lv_hash, 0)) {
2002-11-18 17:04:08 +03:00
log_error("Couldn't find all physical volumes for volume "
"group %s.", vg->name);
goto bad;
}
2004-03-08 20:19:15 +03:00
/* Optional tags */
if (dm_config_get_list(vgn, "tags", &cv) &&
!(_read_str_list(mem, &vg->tags, cv))) {
2004-03-08 20:19:15 +03:00
log_error("Couldn't read tags for volume group %s.", vg->name);
goto bad;
}
if (!_read_sections(cmd, fmt, fid, mem, "logical_volumes", _read_lvnames, vg, NULL,
vgn, pv_hash, lv_hash, 1)) {
log_error("Couldn't read all logical volume names for volume "
2002-11-18 17:04:08 +03:00
"group %s.", vg->name);
goto bad;
}
if (!_read_sections(cmd, fmt, fid, mem, "historical_logical_volumes", _read_historical_lvnames, vg, NULL,
vgn, pv_hash, lv_hash, 1)) {
log_error("Couldn't read all historical logical volumes for volume "
"group %s.", vg->name);
goto bad;
}
if (!_read_sections(cmd, fmt, fid, mem, "logical_volumes", _read_lvsegs, vg, NULL,
vgn, pv_hash, lv_hash, 1)) {
log_error("Couldn't read all logical volumes for "
"volume group %s.", vg->name);
goto bad;
}
if (!_read_sections(cmd, fmt, fid, mem, "historical_logical_volumes", _read_historical_lvnames_interconnections,
vg, NULL, vgn, pv_hash, lv_hash, 1)) {
log_error("Couldn't read all removed logical volume interconnections "
"for volume group %s.", vg->name);
goto bad;
}
2005-10-28 01:51:28 +04:00
if (!fixup_imported_mirrors(vg)) {
log_error("Failed to fixup mirror pointers after import for "
"volume group %s.", vg->name);
goto bad;
}
dm_hash_destroy(pv_hash);
dm_hash_destroy(lv_hash);
2002-11-18 17:04:08 +03:00
if (fid)
vg_set_fid(vg, fid);
2002-11-18 17:04:08 +03:00
/*
* Finished.
*/
return vg;
bad:
if (pv_hash)
dm_hash_destroy(pv_hash);
2002-11-18 17:04:08 +03:00
if (lv_hash)
dm_hash_destroy(lv_hash);
release_vg(vg);
2002-11-18 17:04:08 +03:00
return NULL;
}
static void _read_desc(struct dm_pool *mem,
const struct dm_config_tree *cft, time_t *when, char **desc)
2002-11-18 17:04:08 +03:00
{
const char *str;
2002-11-18 17:04:08 +03:00
unsigned int u = 0u;
if (!dm_config_get_str(cft->root, "description", &str))
str = "";
*desc = dm_pool_strdup(mem, str);
2002-11-18 17:04:08 +03:00
(void) dm_config_get_uint32(cft->root, "creation_time", &u);
2002-11-18 17:04:08 +03:00
*when = u;
}
/*
* It is used to read vgsummary information about a VG
* before locking and reading the VG via vg_read().
* read_vgsummary: read VG metadata before VG is locked
* and save the data in struct vgsummary
* read_vg: read VG metadata after VG is locked
* and save the data in struct volume_group
* FIXME: why are these separate?
*/
static int _read_vgsummary(const struct format_type *fmt, const struct dm_config_tree *cft,
struct lvmcache_vgsummary *vgsummary)
{
const struct dm_config_node *vgn;
struct dm_pool *mem = fmt->cmd->mem;
const char *str;
struct id id;
if (!dm_config_get_str(cft->root, "creation_host", &str))
str = "";
if (!(vgsummary->creation_host = dm_pool_strdup(mem, str)))
return_0;
/* skip any top-level values */
for (vgn = cft->root; (vgn && vgn->v); vgn = vgn->sib) ;
if (!vgn) {
log_error("Couldn't find volume group in file.");
return 0;
}
if (!(vgsummary->vgname = dm_pool_strdup(mem, vgn->key)))
return_0;
vgn = vgn->child;
if (!_read_id(&id, vgn, "id")) {
log_error("Couldn't read uuid for volume group %s.", vgsummary->vgname);
return 0;
}
memcpy(vgsummary->vgid, &id, ID_LEN);
if (!_read_flag_config(vgn, &vgsummary->vgstatus, VG_FLAGS)) {
log_error("Couldn't find status flags for volume group %s.",
vgsummary->vgname);
return 0;
}
if (dm_config_get_str(vgn, "system_id", &str) &&
(!(vgsummary->system_id = dm_pool_strdup(mem, str))))
return_0;
if (dm_config_get_str(vgn, "lock_type", &str) &&
(!(vgsummary->lock_type = dm_pool_strdup(mem, str))))
return_0;
if (!_read_int32(vgn, "seqno", &vgsummary->seqno)) {
log_error("Couldn't read seqno for volume group %s.",
vgsummary->vgname);
return 0;
}
if (!_read_sections(fmt->cmd, NULL, NULL, mem, "physical_volumes", _read_pvsummary, NULL, vgsummary,
vgn, NULL, NULL, 0)) {
log_debug("Couldn't read pv summaries");
}
return 1;
}
2024-05-03 15:43:22 +03:00
static const struct text_vg_version_ops _vsn1_ops = {
2011-03-27 17:44:08 +04:00
.check_version = _vsn1_check_version,
.read_vg = _read_vg,
.read_desc = _read_desc,
.read_vgsummary = _read_vgsummary
2002-11-18 17:04:08 +03:00
};
2024-05-03 15:43:22 +03:00
const struct text_vg_version_ops *text_vg_vsn1_init(void)
2002-11-18 17:04:08 +03:00
{
return &_vsn1_ops;
}