1
0
mirror of git://sourceware.org/git/lvm2.git synced 2024-12-21 13:34:40 +03:00
lvm2/tools/pvcreate.c

163 lines
4.5 KiB
C
Raw Normal View History

2001-10-01 19:29:39 +04:00
/*
2004-03-30 23:35:44 +04:00
* Copyright (C) 2001-2004 Sistina Software, Inc. All rights reserved.
* Copyright (C) 2004-2009 Red Hat, Inc. All rights reserved.
2001-11-07 11:50:07 +03:00
*
2004-03-30 23:35:44 +04:00
* This file is part of LVM2.
2001-11-07 11:50:07 +03:00
*
2004-03-30 23:35:44 +04:00
* This copyrighted material is made available to anyone wishing to use,
* modify, copy, or redistribute it subject to the terms and conditions
* of the GNU Lesser General Public License v.2.1.
2001-11-07 11:50:07 +03:00
*
* You should have received a copy of the GNU Lesser General Public License
2004-03-30 23:35:44 +04:00
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
2001-10-01 19:29:39 +04:00
*/
#include "tools.h"
/*
* Intial sanity checking of recovery-related command-line arguments.
* These args are: --restorefile, --uuid, and --physicalvolumesize
*
* Output arguments:
* pp: structure allocated by caller, fields written / validated here
*/
static int _pvcreate_restore_params_from_args(struct cmd_context *cmd, int argc,
struct pvcreate_params *pp)
{
pp->restorefile = arg_str_value(cmd, restorefile_ARG, NULL);
if (arg_is_set(cmd, restorefile_ARG) && !arg_is_set(cmd, uuidstr_ARG)) {
2002-11-18 17:04:08 +03:00
log_error("--uuid is required with --restorefile");
return 0;
2002-11-18 17:04:08 +03:00
}
if (!arg_is_set(cmd, restorefile_ARG) && arg_is_set(cmd, uuidstr_ARG)) {
if (!arg_is_set(cmd, norestorefile_ARG) &&
find_config_tree_bool(cmd, devices_require_restorefile_with_uuid_CFG, NULL)) {
log_error("--restorefile is required with --uuid");
return 0;
}
}
if (arg_is_set(cmd, uuidstr_ARG) && argc != 1) {
log_error("Can only set uuid on one volume at once");
return 0;
}
if (arg_is_set(cmd, uuidstr_ARG)) {
pp->uuid_str = arg_str_value(cmd, uuidstr_ARG, "");
if (!id_read_format(&pp->pva.id, pp->uuid_str))
return 0;
pp->pva.idp = &pp->pva.id;
}
if (arg_sign_value(cmd, setphysicalvolumesize_ARG, SIGN_NONE) == SIGN_MINUS) {
log_error("Physical volume size may not be negative");
return 0;
}
pp->pva.size = arg_uint64_value(cmd, setphysicalvolumesize_ARG, UINT64_C(0));
if (arg_is_set(cmd, restorefile_ARG) || arg_is_set(cmd, uuidstr_ARG))
pp->zero = 0;
return 1;
}
static int _pvcreate_restore_params_from_backup(struct cmd_context *cmd,
struct pvcreate_params *pp)
{
struct volume_group *vg;
struct pv_list *existing_pvl;
/*
* When restoring a PV, params need to be read from a backup file.
*/
if (!pp->restorefile)
return 1;
if (!(vg = backup_read_vg(cmd, NULL, pp->restorefile))) {
log_error("Unable to read volume group from %s", pp->restorefile);
return 0;
}
if (!(existing_pvl = find_pv_in_vg_by_uuid(vg, &pp->pva.id))) {
release_vg(vg);
log_error("Can't find uuid %s in backup file %s",
pp->uuid_str, pp->restorefile);
return 0;
}
pp->pva.ba_start = pv_ba_start(existing_pvl->pv);
pp->pva.ba_size = pv_ba_size(existing_pvl->pv);
pp->pva.pe_start = pv_pe_start(existing_pvl->pv);
pp->pva.extent_size = pv_pe_size(existing_pvl->pv);
pp->pva.extent_count = pv_pe_count(existing_pvl->pv);
release_vg(vg);
return 1;
}
int pvcreate(struct cmd_context *cmd, int argc, char **argv)
{
struct processing_handle *handle;
struct pvcreate_params pp;
int ret;
/*
* Five kinds of pvcreate param values:
* 1. defaults
* 2. recovery-related command line args
* 3. recovery-related args from backup file
* 4. normal command line args
* (this also checks some settings from 2 & 3)
* 5. argc/argv free args specifying devices
*/
pvcreate_params_set_defaults(&pp);
if (!_pvcreate_restore_params_from_args(cmd, argc, &pp))
return EINVALID_CMD_LINE;
if (!_pvcreate_restore_params_from_backup(cmd, &pp))
return EINVALID_CMD_LINE;
if (!pvcreate_params_from_args(cmd, &pp))
return EINVALID_CMD_LINE;
/*
* If --metadatasize was not given with --restorefile, set it to pe_start.
* Later code treats this as a maximum size and reduces it to fit.
*/
if (!arg_is_set(cmd, metadatasize_ARG) && arg_is_set(cmd, restorefile_ARG))
pp.pva.pvmetadatasize = pp.pva.pe_start;
/* FIXME Also needs to check any 2nd metadata area isn't inside the data area! */
pp.pv_count = argc;
pp.pv_names = argv;
locking: unify global lock for flock and lockd There have been two file locks used to protect lvm "global state": "ORPHANS" and "GLOBAL". Commands that used the ORPHAN flock in exclusive mode: pvcreate, pvremove, vgcreate, vgextend, vgremove, vgcfgrestore Commands that used the ORPHAN flock in shared mode: vgimportclone, pvs, pvscan, pvresize, pvmove, pvdisplay, pvchange, fullreport Commands that used the GLOBAL flock in exclusive mode: pvchange, pvscan, vgimportclone, vgscan Commands that used the GLOBAL flock in shared mode: pvscan --cache, pvs The ORPHAN lock covers the important cases of serializing the use of orphan PVs. It also partially covers the reporting of orphan PVs (although not correctly as explained below.) The GLOBAL lock doesn't seem to have a clear purpose (it may have eroded over time.) Neither lock correctly protects the VG namespace, or orphan PV properties. To simplify and correct these issues, the two separate flocks are combined into the one GLOBAL flock, and this flock is used from the locking sites that are in place for the lvmlockd global lock. The logic behind the lvmlockd (distributed) global lock is that any command that changes "global state" needs to take the global lock in ex mode. Global state in lvm is: the list of VG names, the set of orphan PVs, and any properties of orphan PVs. Reading this global state can use the global lock in sh mode to ensure it doesn't change while being reported. The locking of global state now looks like: lockd_global() previously named lockd_gl(), acquires the distributed global lock through lvmlockd. This is unchanged. It serializes distributed lvm commands that are changing global state. This is a no-op when lvmlockd is not in use. lockf_global() acquires an flock on a local file. It serializes local lvm commands that are changing global state. lock_global() first calls lockf_global() to acquire the local flock for global state, and if this succeeds, it calls lockd_global() to acquire the distributed lock for global state. Replace instances of lockd_gl() with lock_global(), so that the existing sites for lvmlockd global state locking are now also used for local file locking of global state. Remove the previous file locking calls lock_vol(GLOBAL) and lock_vol(ORPHAN). The following commands which change global state are now serialized with the exclusive global flock: pvchange (of orphan), pvresize (of orphan), pvcreate, pvremove, vgcreate, vgextend, vgremove, vgreduce, vgrename, vgcfgrestore, vgimportclone, vgmerge, vgsplit Commands that use a shared flock to read global state (and will be serialized against the prior list) are those that use process_each functions that are based on processing a list of all VG names, or all PVs. The list of all VGs or all PVs is global state and the shared lock prevents those lists from changing while the command is processing them. The ORPHAN lock previously attempted to produce an accurate listing of orphan PVs, but it was only acquired at the end of the command during the fake vg_read of the fake orphan vg. This is not when orphan PVs were determined; they were determined by elimination beforehand by processing all real VGs, and subtracting the PVs in the real VGs from the list of all PVs that had been identified during the initial scan. This is fixed by holding the single global lock in shared mode while processing all VGs to determine the list of orphan PVs.
2019-04-18 23:01:19 +03:00
/* Needed to change the set of orphan PVs. */
if (!lock_global(cmd, "ex"))
return_ECMD_FAILED;
clear_hint_file(cmd);
device usage based on devices file The LVM devices file lists devices that lvm can use. The default file is /etc/lvm/devices/system.devices, and the lvmdevices(8) command is used to add or remove device entries. If the file does not exist, or if lvm.conf includes use_devicesfile=0, then lvm will not use a devices file. When the devices file is in use, the regex filter is not used, and the filter settings in lvm.conf or on the command line are ignored. LVM records devices in the devices file using hardware-specific IDs, such as the WWID, and attempts to use subsystem-specific IDs for virtual device types. These device IDs are also written in the VG metadata. When no hardware or virtual ID is available, lvm falls back using the unstable device name as the device ID. When devnames are used, lvm performs extra scanning to find devices if their devname changes, e.g. after reboot. When proper device IDs are used, an lvm command will not look at devices outside the devices file, but when devnames are used as a fallback, lvm will scan devices outside the devices file to locate PVs on renamed devices. A config setting search_for_devnames can be used to control the scanning for renamed devname entries. Related to the devices file, the new command option --devices <devnames> allows a list of devices to be specified for the command to use, overriding the devices file. The listed devices act as a sort of devices file in terms of limiting which devices lvm will see and use. Devices that are not listed will appear to be missing to the lvm command. Multiple devices files can be kept in /etc/lvm/devices, which allows lvm to be used with different sets of devices, e.g. system devices do not need to be exposed to a specific application, and the application can use lvm on its own set of devices that are not exposed to the system. The option --devicesfile <filename> is used to select the devices file to use with the command. Without the option set, the default system devices file is used. Setting --devicesfile "" causes lvm to not use a devices file. An existing, empty devices file means lvm will see no devices. The new command vgimportdevices adds PVs from a VG to the devices file and updates the VG metadata to include the device IDs. vgimportdevices -a will import all VGs into the system devices file. LVM commands run by dmeventd not use a devices file by default, and will look at all devices on the system. A devices file can be created for dmeventd (/etc/lvm/devices/dmeventd.devices) If this file exists, lvm commands run by dmeventd will use it. Internal implementaion: - device_ids_read - read the devices file . add struct dev_use (du) to cmd->use_devices for each devices file entry - dev_cache_scan - get /dev entries . add struct device (dev) to dev_cache for each device on the system - device_ids_match - match devices file entries to /dev entries . match each du on cmd->use_devices to a dev in dev_cache, using device ID . on match, set du->dev, dev->id, dev->flags MATCHED_USE_ID - label_scan - read lvm headers and metadata from devices . filters are applied, those that do not need data from the device . filter-deviceid skips devs without MATCHED_USE_ID, i.e. skips /dev entries that are not listed in the devices file . read lvm label from dev . filters are applied, those that use data from the device . read lvm metadata from dev . add info/vginfo structs for PVs/VGs (info is "lvmcache") - device_ids_find_renamed_devs - handle devices with unstable devname ID where devname changed . this step only needed when devs do not have proper device IDs, and their dev names change, e.g. after reboot sdb becomes sdc. . detect incorrect match because PVID in the devices file entry does not match the PVID found when the device was read above . undo incorrect match between du and dev above . search system devices for new location of PVID . update devices file with new devnames for PVIDs on renamed devices . label_scan the renamed devs - continue with command processing
2020-06-23 21:25:41 +03:00
cmd->create_edit_devices_file = 1;
if (!lvmcache_label_scan(cmd))
return_ECMD_FAILED;
if (!(handle = init_processing_handle(cmd, NULL))) {
log_error("Failed to initialize processing handle.");
return ECMD_FAILED;
}
if (!pvcreate_each_device(cmd, handle, &pp))
ret = ECMD_FAILED;
else
ret = ECMD_PROCESSED;
2001-10-01 19:29:39 +04:00
destroy_processing_handle(cmd, handle);
2003-10-22 02:06:07 +04:00
return ret;
2001-10-01 19:29:39 +04:00
}