1
0
mirror of git://sourceware.org/git/lvm2.git synced 2025-02-17 17:57:56 +03:00
lvm2/tools/toollib.c
Zdenek Kabelac 45d9b2c470 command: more static const declaration
Use static const for declared arrays.
Access to arrays through get_ functions().
2024-04-29 00:13:43 +02:00

6007 lines
164 KiB
C

/*
* Copyright (C) 2001-2004 Sistina Software, Inc. All rights reserved.
* Copyright (C) 2004-2017 Red Hat, Inc. All rights reserved.
*
* This file is part of LVM2.
*
* This copyrighted material is made available to anyone wishing to use,
* modify, copy, or redistribute it subject to the terms and conditions
* of the GNU Lesser General Public License v.2.1.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "tools.h"
#include "lib/format_text/format-text.h"
#include "lib/label/hints.h"
#include "lib/device/device_id.h"
#include "lib/device/online.h"
#include <sys/stat.h>
#include <signal.h>
#include <sys/wait.h>
#include <sys/utsname.h>
#define report_log_ret_code(ret_code) report_current_object_cmdlog(REPORT_OBJECT_CMDLOG_NAME, \
((ret_code) == ECMD_PROCESSED) ? REPORT_OBJECT_CMDLOG_SUCCESS \
: REPORT_OBJECT_CMDLOG_FAILURE, (ret_code))
const char *command_name(struct cmd_context *cmd)
{
return cmd->command->name;
}
static void _sigchld_handler(int sig __attribute__((unused)))
{
while (wait4(-1, NULL, WNOHANG | WUNTRACED, NULL) > 0) ;
}
/*
* returns:
* -1 if the fork failed
* 0 if the parent
* 1 if the child
*/
int become_daemon(struct cmd_context *cmd, int skip_lvm)
{
static const char devnull[] = "/dev/null";
int null_fd;
pid_t pid;
struct sigaction act = {
.sa_handler = _sigchld_handler,
.sa_flags = SA_NOCLDSTOP,
};
log_verbose("Forking background process from command: %s", cmd->cmd_line);
if (sigaction(SIGCHLD, &act, NULL))
log_warn("WARNING: Failed to set SIGCHLD action.");
if (!skip_lvm)
if (!sync_local_dev_names(cmd)) { /* Flush ops and reset dm cookie */
log_error("Failed to sync local devices before forking.");
return -1;
}
if ((pid = fork()) == -1) {
log_error("fork failed: %s", strerror(errno));
return -1;
}
/* Parent */
if (pid > 0)
return 0;
/* Child */
if (setsid() == -1)
log_error("Background process failed to setsid: %s",
strerror(errno));
/* Set this to avoid discarding output from background process */
// #define DEBUG_CHILD
#ifndef DEBUG_CHILD
if ((null_fd = open(devnull, O_RDWR)) == -1) {
log_sys_error("open", devnull);
_exit(ECMD_FAILED);
}
/* coverity[leaked_handle] don't care */
if ((dup2(null_fd, STDIN_FILENO) < 0) || /* reopen stdin */
(dup2(null_fd, STDOUT_FILENO) < 0) || /* reopen stdout */
(dup2(null_fd, STDERR_FILENO) < 0)) { /* reopen stderr */
log_sys_error("dup2", "redirect");
(void) close(null_fd);
_exit(ECMD_FAILED);
}
if (null_fd > STDERR_FILENO)
(void) close(null_fd);
init_verbose(VERBOSE_BASE_LEVEL);
#endif /* DEBUG_CHILD */
strncpy(*cmd->argv, "(lvm2)", strlen(*cmd->argv));
if (!skip_lvm) {
reset_locking();
lvmcache_destroy(cmd, 1, 1);
if (!lvmcache_init(cmd))
/* FIXME Clean up properly here */
_exit(ECMD_FAILED);
}
/* coverity[leaked_handle] null_fd does not leak here */
return 1;
}
/*
* Strip dev_dir if present
*/
const char *skip_dev_dir(struct cmd_context *cmd, const char *vg_name,
unsigned *dev_dir_found)
{
size_t devdir_len = strlen(cmd->dev_dir);
const char *dmdir = dm_dir() + devdir_len;
size_t dmdir_len = strlen(dmdir), vglv_sz;
char *vgname = NULL, *lvname, *layer, *vglv;
/* FIXME Do this properly */
if (*vg_name == '/')
while (vg_name[1] == '/')
vg_name++;
if (strncmp(vg_name, cmd->dev_dir, devdir_len)) {
if (dev_dir_found)
*dev_dir_found = 0;
} else {
if (dev_dir_found)
*dev_dir_found = 1;
vg_name += devdir_len;
while (*vg_name == '/')
vg_name++;
/* Reformat string if /dev/mapper found */
if (!strncmp(vg_name, dmdir, dmdir_len) && vg_name[dmdir_len] == '/') {
vg_name += dmdir_len + 1;
while (*vg_name == '/')
vg_name++;
if (!dm_split_lvm_name(cmd->mem, vg_name, &vgname, &lvname, &layer) ||
*layer) {
log_error("skip_dev_dir: Couldn't split up device name %s.",
vg_name);
return vg_name;
}
vglv_sz = strlen(vgname) + strlen(lvname) + 2;
if (!(vglv = dm_pool_alloc(cmd->mem, vglv_sz)) ||
dm_snprintf(vglv, vglv_sz, "%s%s%s", vgname,
*lvname ? "/" : "",
lvname) < 0) {
log_error("vg/lv string alloc failed.");
return vg_name;
}
return vglv;
}
}
return vg_name;
}
static int _printed_clustered_vg_advice = 0;
/*
* Three possible results:
* a) return 0, skip 0: take the VG, and cmd will end in success
* b) return 0, skip 1: skip the VG, and cmd will end in success
* c) return 1, skip *: skip the VG, and cmd will end in failure
*
* Case b is the special case, and includes the following:
* . The VG is inconsistent, and the command allows for inconsistent VGs.
* . The VG is clustered, the host cannot access clustered VG's,
* and the command option has been used to ignore clustered vgs.
*
* Case c covers the other errors returned when reading the VG.
* If *skip is 1, it's OK for the caller to read the list of PVs in the VG.
*/
static int _ignore_vg(struct cmd_context *cmd,
uint32_t error_flags, struct volume_group *error_vg,
const char *vg_name, struct dm_list *arg_vgnames,
uint32_t read_flags, int *skip, int *notfound)
{
uint32_t read_error = error_flags;
*skip = 0;
*notfound = 0;
if ((read_error & FAILED_NOTFOUND) && (read_flags & READ_OK_NOTFOUND)) {
*notfound = 1;
return 0;
}
if (read_error & FAILED_CLUSTERED) {
if (arg_vgnames && str_list_match_item(arg_vgnames, vg_name)) {
log_error("Cannot access clustered VG %s.", vg_name);
if (!_printed_clustered_vg_advice) {
_printed_clustered_vg_advice = 1;
log_error("See lvmlockd(8) for changing a clvm/clustered VG to a shared VG.");
}
return 1;
} else {
log_warn("WARNING: Skipping clustered VG %s.", vg_name);
if (!_printed_clustered_vg_advice) {
_printed_clustered_vg_advice = 1;
log_error("See lvmlockd(8) for changing a clvm/clustered VG to a shared VG.");
}
*skip = 1;
return 0;
}
}
if (read_error & FAILED_EXPORTED) {
if (arg_vgnames && str_list_match_item(arg_vgnames, vg_name)) {
log_error("Volume group %s is exported", vg_name);
return 1;
} else {
read_error &= ~FAILED_EXPORTED; /* Check for other errors */
log_verbose("Skipping exported volume group %s", vg_name);
*skip = 1;
}
}
/*
* Commands that operate on "all vgs" shouldn't be bothered by
* skipping a foreign VG, and the command shouldn't fail when
* one is skipped. But, if the command explicitly asked to
* operate on a foreign VG and it's skipped, then the command
* would expect to fail.
*/
if (read_error & FAILED_SYSTEMID) {
if (arg_vgnames && str_list_match_item(arg_vgnames, vg_name)) {
log_error("Cannot access VG %s with system ID %s with %slocal system ID%s%s.",
vg_name,
error_vg ? error_vg->system_id : "unknown ",
cmd->system_id ? "" : "unknown ",
cmd->system_id ? " " : "",
cmd->system_id ? cmd->system_id : "");
return 1;
} else {
read_error &= ~FAILED_SYSTEMID; /* Check for other errors */
log_verbose("Skipping foreign volume group %s", vg_name);
*skip = 1;
}
}
/*
* Accessing a lockd VG when lvmlockd is not used is similar
* to accessing a foreign VG.
* This is also the point where a command fails if it failed
* to acquire the necessary lock from lvmlockd.
* The two cases are distinguished by FAILED_LOCK_TYPE (the
* VG lock_type requires lvmlockd), and FAILED_LOCK_MODE (the
* command failed to acquire the necessary lock.)
*/
if (read_error & (FAILED_LOCK_TYPE | FAILED_LOCK_MODE)) {
if (arg_vgnames && str_list_match_item(arg_vgnames, vg_name)) {
if (read_error & FAILED_LOCK_TYPE)
log_error("Cannot access VG %s with lock type %s that requires lvmlockd.",
vg_name,
error_vg ? error_vg->lock_type : "unknown");
/* For FAILED_LOCK_MODE, the error is printed in vg_read. */
return 1;
} else {
read_error &= ~FAILED_LOCK_TYPE; /* Check for other errors */
read_error &= ~FAILED_LOCK_MODE;
log_verbose("Skipping volume group %s", vg_name);
*skip = 1;
}
}
if (read_error != SUCCESS) {
*skip = 0;
if (is_orphan_vg(vg_name))
log_error("Cannot process standalone physical volumes");
else
log_error("Cannot process volume group %s", vg_name);
return 1;
}
return 0;
}
/*
* This functiona updates the "selected" arg only if last item processed
* is selected so this implements the "whole structure is selected if
* at least one of its items is selected".
*/
static void _update_selection_result(struct processing_handle *handle, int *selected)
{
if (!handle || !handle->selection_handle)
return;
if (handle->selection_handle->selected)
*selected = 1;
}
static void _set_final_selection_result(struct processing_handle *handle, int selected)
{
if (!handle || !handle->selection_handle)
return;
handle->selection_handle->selected = selected;
}
/*
* Metadata iteration functions
*/
int process_each_segment_in_pv(struct cmd_context *cmd,
struct volume_group *vg,
struct physical_volume *pv,
struct processing_handle *handle,
process_single_pvseg_fn_t process_single_pvseg)
{
struct pv_segment *pvseg;
int whole_selected = 0;
int ret_max = ECMD_PROCESSED;
int ret;
struct pv_segment _free_pv_segment = { .pv = pv };
if (dm_list_empty(&pv->segments)) {
ret = process_single_pvseg(cmd, NULL, &_free_pv_segment, handle);
if (ret != ECMD_PROCESSED)
stack;
if (ret > ret_max)
ret_max = ret;
} else {
dm_list_iterate_items(pvseg, &pv->segments) {
if (sigint_caught())
return_ECMD_FAILED;
ret = process_single_pvseg(cmd, vg, pvseg, handle);
_update_selection_result(handle, &whole_selected);
if (ret != ECMD_PROCESSED)
stack;
if (ret > ret_max)
ret_max = ret;
}
}
/* the PV is selected if at least one PV segment is selected */
_set_final_selection_result(handle, whole_selected);
return ret_max;
}
int process_each_segment_in_lv(struct cmd_context *cmd,
struct logical_volume *lv,
struct processing_handle *handle,
process_single_seg_fn_t process_single_seg)
{
struct lv_segment *seg;
int whole_selected = 0;
int ret_max = ECMD_PROCESSED;
int ret;
dm_list_iterate_items(seg, &lv->segments) {
if (sigint_caught())
return_ECMD_FAILED;
ret = process_single_seg(cmd, seg, handle);
_update_selection_result(handle, &whole_selected);
if (ret != ECMD_PROCESSED)
stack;
if (ret > ret_max)
ret_max = ret;
}
/* the LV is selected if at least one LV segment is selected */
_set_final_selection_result(handle, whole_selected);
return ret_max;
}
static const char *_extract_vgname(struct cmd_context *cmd, const char *lv_name,
const char **after)
{
const char *vg_name = lv_name;
char *st, *pos;
/* Strip dev_dir (optional) */
if (!(vg_name = skip_dev_dir(cmd, vg_name, NULL)))
return_0;
/* Require exactly one set of consecutive slashes */
if ((st = pos = strchr(vg_name, '/')))
while (*st == '/')
st++;
if (!st || strchr(st, '/')) {
log_error("\"%s\": Invalid path for Logical Volume.",
lv_name);
return 0;
}
if (!(vg_name = dm_pool_strndup(cmd->mem, vg_name, pos - vg_name))) {
log_error("Allocation of vg_name failed.");
return 0;
}
if (after)
*after = st;
return vg_name;
}
/*
* Extract default volume group name from environment
*/
static const char *_default_vgname(struct cmd_context *cmd)
{
const char *vg_path;
/* Take default VG from environment? */
vg_path = getenv("LVM_VG_NAME");
if (!vg_path)
return 0;
vg_path = skip_dev_dir(cmd, vg_path, NULL);
if (strchr(vg_path, '/')) {
log_error("\"%s\": Invalid environment var LVM_VG_NAME set for Volume Group.",
vg_path);
return 0;
}
return dm_pool_strdup(cmd->mem, vg_path);
}
/*
* Determine volume group name from a logical volume name
*/
const char *extract_vgname(struct cmd_context *cmd, const char *lv_name)
{
const char *vg_name = lv_name;
/* Path supplied? */
if (vg_name && strchr(vg_name, '/')) {
if (!(vg_name = _extract_vgname(cmd, lv_name, NULL)))
return_NULL;
return vg_name;
}
if (!(vg_name = _default_vgname(cmd))) {
if (lv_name)
log_error("Path required for Logical Volume \"%s\".",
lv_name);
return NULL;
}
return vg_name;
}
const char _pe_size_may_not_be_negative_msg[] = "Physical extent size may not be negative.";
int vgcreate_params_set_defaults(struct cmd_context *cmd,
struct vgcreate_params *vp_def,
struct volume_group *vg)
{
int64_t extent_size;
/* Only vgsplit sets vg */
if (vg) {
vp_def->vg_name = NULL;
vp_def->extent_size = vg->extent_size;
vp_def->max_pv = vg->max_pv;
vp_def->max_lv = vg->max_lv;
vp_def->alloc = vg->alloc;
vp_def->vgmetadatacopies = vg->mda_copies;
vp_def->system_id = vg->system_id; /* No need to clone this */
} else {
vp_def->vg_name = NULL;
extent_size = find_config_tree_int64(cmd,
allocation_physical_extent_size_CFG, NULL) * 2;
if (extent_size < 0) {
log_error(_pe_size_may_not_be_negative_msg);
return 0;
}
vp_def->extent_size = (uint32_t) extent_size;
vp_def->max_pv = DEFAULT_MAX_PV;
vp_def->max_lv = DEFAULT_MAX_LV;
vp_def->alloc = DEFAULT_ALLOC_POLICY;
vp_def->vgmetadatacopies = DEFAULT_VGMETADATACOPIES;
vp_def->system_id = cmd->system_id;
}
return 1;
}
/*
* Set members of struct vgcreate_params from cmdline arguments.
* Do preliminary validation with arg_*() interface.
* Further, more generic validation is done in validate_vgcreate_params().
* This function is to remain in tools directory.
*/
int vgcreate_params_set_from_args(struct cmd_context *cmd,
struct vgcreate_params *vp_new,
struct vgcreate_params *vp_def)
{
const char *system_id_arg_str;
const char *lock_type = NULL;
int use_lvmlockd;
lock_type_t lock_type_num;
if (arg_is_set(cmd, clustered_ARG)) {
log_error("The clustered option is deprecated, see --shared.");
return 0;
}
vp_new->vg_name = skip_dev_dir(cmd, vp_def->vg_name, NULL);
vp_new->max_lv = arg_uint_value(cmd, maxlogicalvolumes_ARG,
vp_def->max_lv);
vp_new->max_pv = arg_uint_value(cmd, maxphysicalvolumes_ARG,
vp_def->max_pv);
vp_new->alloc = (alloc_policy_t) arg_uint_value(cmd, alloc_ARG, vp_def->alloc);
/* Units of 512-byte sectors */
vp_new->extent_size =
arg_uint_value(cmd, physicalextentsize_ARG, vp_def->extent_size);
if (arg_sign_value(cmd, physicalextentsize_ARG, SIGN_NONE) == SIGN_MINUS) {
log_error(_pe_size_may_not_be_negative_msg);
return 0;
}
if (arg_uint64_value(cmd, physicalextentsize_ARG, 0) > MAX_EXTENT_SIZE) {
log_error("Physical extent size must be smaller than %s.",
display_size(cmd, (uint64_t) MAX_EXTENT_SIZE));
return 0;
}
if (arg_sign_value(cmd, maxlogicalvolumes_ARG, SIGN_NONE) == SIGN_MINUS) {
log_error("Max Logical Volumes may not be negative.");
return 0;
}
if (arg_sign_value(cmd, maxphysicalvolumes_ARG, SIGN_NONE) == SIGN_MINUS) {
log_error("Max Physical Volumes may not be negative.");
return 0;
}
if (arg_is_set(cmd, vgmetadatacopies_ARG))
vp_new->vgmetadatacopies = arg_int_value(cmd, vgmetadatacopies_ARG,
DEFAULT_VGMETADATACOPIES);
else
vp_new->vgmetadatacopies = find_config_tree_int(cmd, metadata_vgmetadatacopies_CFG, NULL);
if (!(system_id_arg_str = arg_str_value(cmd, systemid_ARG, NULL))) {
vp_new->system_id = vp_def->system_id;
} else {
if (!(vp_new->system_id = system_id_from_string(cmd, system_id_arg_str)))
return_0;
/* FIXME Take local/extra_system_ids into account */
if (vp_new->system_id && cmd->system_id &&
strcmp(vp_new->system_id, cmd->system_id)) {
if (*vp_new->system_id)
log_warn("WARNING: VG with system ID %s might become inaccessible as local system ID is %s",
vp_new->system_id, cmd->system_id);
else
log_warn("WARNING: A VG without a system ID allows unsafe access from other hosts.");
}
}
if ((system_id_arg_str = arg_str_value(cmd, systemid_ARG, NULL))) {
vp_new->system_id = system_id_from_string(cmd, system_id_arg_str);
} else {
vp_new->system_id = vp_def->system_id;
}
if (system_id_arg_str) {
if (!vp_new->system_id || !vp_new->system_id[0])
log_warn("WARNING: A VG without a system ID allows unsafe access from other hosts.");
if (vp_new->system_id && cmd->system_id &&
strcmp(vp_new->system_id, cmd->system_id)) {
log_warn("WARNING: VG with system ID %s might become inaccessible as local system ID is %s",
vp_new->system_id, cmd->system_id);
}
}
/*
* Locking: what kind of locking should be used for the
* new VG, and is it compatible with current lvm.conf settings.
*
* The end result is to set vp_new->lock_type to:
* none | clvm | dlm | sanlock | idm.
*
* If 'vgcreate --lock-type <arg>' is set, the answer is given
* directly by <arg> which is one of none|clvm|dlm|sanlock|idm.
*
* 'vgcreate --clustered y' is the way to create clvm VGs.
*
* 'vgcreate --shared' is the way to create lockd VGs.
* lock_type of sanlock, dlm or idm is selected based on
* which lock manager is running.
*
*
* 1. Using neither clvmd nor lvmlockd.
* ------------------------------------------------
* lvm.conf:
* global/use_lvmlockd = 0
* global/locking_type = 1
*
* - no locking is enabled
* - clvmd is not used
* - lvmlockd is not used
* - VGs with CLUSTERED set are ignored (requires clvmd)
* - VGs with lockd type are ignored (requires lvmlockd)
* - vgcreate can create new VGs with lock_type none
* - 'vgcreate --clustered y' fails
* - 'vgcreate --shared' fails
* - 'vgcreate' (neither option) creates a local VG
*
* 2. Using clvmd.
* ------------------------------------------------
* lvm.conf:
* global/use_lvmlockd = 0
* global/locking_type = 3
*
* - locking through clvmd is enabled (traditional clvm config)
* - clvmd is used
* - lvmlockd is not used
* - VGs with CLUSTERED set can be used
* - VGs with lockd type are ignored (requires lvmlockd)
* - vgcreate can create new VGs with CLUSTERED status flag
* - 'vgcreate --clustered y' works
* - 'vgcreate --shared' fails
* - 'vgcreate' (neither option) creates a clvm VG
*
* 3. Using lvmlockd.
* ------------------------------------------------
* lvm.conf:
* global/use_lvmlockd = 1
* global/locking_type = 1
*
* - locking through lvmlockd is enabled
* - clvmd is not used
* - lvmlockd is used
* - VGs with CLUSTERED set are ignored (requires clvmd)
* - VGs with lockd type can be used
* - vgcreate can create new VGs with lock_type sanlock, dlm or idm
* - 'vgcreate --clustered y' fails
* - 'vgcreate --shared' works
* - 'vgcreate' (neither option) creates a local VG
*/
use_lvmlockd = find_config_tree_bool(cmd, global_use_lvmlockd_CFG, NULL);
if (arg_is_set(cmd, locktype_ARG)) {
lock_type = arg_str_value(cmd, locktype_ARG, "");
if (arg_is_set(cmd, shared_ARG) && !is_lockd_type(lock_type)) {
log_error("The --shared option requires lock type sanlock, dlm or idm.");
return 0;
}
} else if (arg_is_set(cmd, shared_ARG)) {
int found_multiple = 0;
if (use_lvmlockd) {
if (!(lock_type = lockd_running_lock_type(cmd, &found_multiple))) {
if (found_multiple)
log_error("Found multiple lock managers, select one with --lock-type.");
else
log_error("Failed to detect a running lock manager to select lock type.");
return 0;
}
} else {
log_error("Using a shared lock type requires lvmlockd (lvm.conf use_lvmlockd.)");
return 0;
}
} else {
lock_type = "none";
}
/*
* Check that the lock_type is recognized, and is being
* used with the correct lvm.conf settings.
*/
lock_type_num = get_lock_type_from_string(lock_type);
switch (lock_type_num) {
case LOCK_TYPE_INVALID:
case LOCK_TYPE_CLVM:
log_error("lock_type %s is invalid", lock_type);
return 0;
case LOCK_TYPE_SANLOCK:
case LOCK_TYPE_DLM:
case LOCK_TYPE_IDM:
if (!use_lvmlockd) {
log_error("Using a shared lock type requires lvmlockd.");
return 0;
}
break;
case LOCK_TYPE_NONE:
break;
};
/*
* The vg is not owned by one host/system_id.
* Locking coordinates access from multiple hosts.
*/
if (lock_type_num == LOCK_TYPE_DLM || lock_type_num == LOCK_TYPE_SANLOCK)
vp_new->system_id = NULL;
vp_new->lock_type = lock_type;
log_debug("Setting lock_type to %s", vp_new->lock_type);
return 1;
}
/* Shared code for changing activation state for vgchange/lvchange */
int lv_change_activate(struct cmd_context *cmd, struct logical_volume *lv,
activation_change_t activate)
{
int r = 1;
int integrity_recalculate;
struct logical_volume *snapshot_lv;
if (lv_is_cache_pool(lv)) {
if (is_change_activating(activate)) {
log_verbose("Skipping activation of cache pool %s.",
display_lvname(lv));
return 1;
}
if (!dm_list_empty(&lv->segs_using_this_lv)) {
log_verbose("Skipping deactivation of used cache pool %s.",
display_lvname(lv));
return 1;
}
/*
* Allow to pass only deactivation of unused cache pool.
* Useful only for recovery of failed zeroing of metadata LV.
*/
}
if (lv_is_merging_origin(lv)) {
/*
* For merging origin, its snapshot must be inactive.
* If it's still active and cannot be deactivated
* activation or deactivation of origin fails!
*
* When origin is deactivated and merging snapshot is thin
* it allows to deactivate origin, but still report error,
* since the thin snapshot remains active.
*
* User could retry to deactivate it with another
* deactivation of origin, which is the only visible LV
*/
snapshot_lv = find_snapshot(lv)->lv;
if (lv_is_thin_type(snapshot_lv) && !deactivate_lv(cmd, snapshot_lv)) {
if (is_change_activating(activate)) {
log_error("Refusing to activate merging volume %s while "
"snapshot volume %s is still active.",
display_lvname(lv), display_lvname(snapshot_lv));
return 0;
}
log_error("Cannot fully deactivate merging origin volume %s while "
"snapshot volume %s is still active.",
display_lvname(lv), display_lvname(snapshot_lv));
r = 0; /* and continue to deactivate origin... */
}
}
if (is_change_activating(activate) &&
lvmcache_has_duplicate_devs() &&
vg_has_duplicate_pvs(lv->vg) &&
!find_config_tree_bool(cmd, devices_allow_changes_with_duplicate_pvs_CFG, NULL)) {
log_error("Cannot activate LVs in VG %s while PVs appear on duplicate devices.",
lv->vg->name);
return 0;
}
if ((integrity_recalculate = lv_has_integrity_recalculate_metadata(lv))) {
/* Don't want pvscan to write VG while running from systemd service. */
if (!strcmp(cmd->name, "pvscan")) {
log_error("Cannot activate uninitialized integrity LV %s from pvscan.",
display_lvname(lv));
return 0;
}
if (vg_is_shared(lv->vg)) {
uint32_t lockd_state = 0;
if (!lockd_vg(cmd, lv->vg->name, "ex", 0, &lockd_state)) {
log_error("Cannot activate uninitialized integrity LV %s without lock.",
display_lvname(lv));
return 0;
}
}
}
if (!lv_active_change(cmd, lv, activate))
return_0;
/* Write VG metadata to clear the integrity recalculate flag. */
if (integrity_recalculate && lv_is_active(lv)) {
log_print_unless_silent("Updating VG to complete initialization of integrity LV %s.",
display_lvname(lv));
lv_clear_integrity_recalculate_metadata(lv);
}
/*
* When LVs are deactivated, then autoactivation of the VG is
* "re-armed" by removing the vg online file. So, after deactivation
* of LVs, if PVs are disconnected and reconnected again, event
* activation will trigger autoactivation again. This secondary
* autoactivation is somewhat different from, and not as important as
* the initial autoactivation during system startup. The secondary
* autoactivation will happen to a VG on a running system and may be
* mixing with user commands, so the end result is unpredictable.
*
* It's possible that we might want a config setting for usersto
* disable secondary autoactivations. Once a system is up, the
* user may want to take charge of activation changes to the VG
* and not have the system autoactivation interfere.
*/
if (!is_change_activating(activate) && cmd->event_activation &&
!cmd->online_vg_file_removed) {
cmd->online_vg_file_removed = 1;
online_vg_file_remove(lv->vg->name);
}
set_lv_notify(lv->vg->cmd);
return r;
}
int lv_refresh(struct cmd_context *cmd, struct logical_volume *lv)
{
struct logical_volume *snapshot_lv;
if (lv_is_merging_origin(lv)) {
snapshot_lv = find_snapshot(lv)->lv;
if (lv_is_thin_type(snapshot_lv) && !deactivate_lv(cmd, snapshot_lv))
log_print_unless_silent("Delaying merge for origin volume %s since "
"snapshot volume %s is still active.",
display_lvname(lv), display_lvname(snapshot_lv));
}
if (!lv_refresh_suspend_resume(lv))
return_0;
/*
* check if snapshot merge should be polled
* - unfortunately: even though the dev_manager will clear
* the lv's merge attributes if a merge is not possible;
* it is clearing a different instance of the lv (as
* retrieved with lv_from_lvid)
* - fortunately: polldaemon will immediately shutdown if the
* origin doesn't have a status with a snapshot percentage
*/
if (background_polling() && lv_is_merging_origin(lv) && lv_is_active(lv))
lv_spawn_background_polling(cmd, lv);
return 1;
}
int vg_refresh_visible(struct cmd_context *cmd, struct volume_group *vg)
{
struct lv_list *lvl;
int r = 1;
sigint_allow();
dm_list_iterate_items(lvl, &vg->lvs) {
if (sigint_caught()) {
r = 0;
stack;
break;
}
if (lv_is_visible(lvl->lv) &&
!(lv_is_cow(lvl->lv) && !lv_is_virtual_origin(origin_from_cow(lvl->lv))) &&
!lv_refresh(cmd, lvl->lv)) {
r = 0;
stack;
}
}
sigint_restore();
return r;
}
void lv_spawn_background_polling(struct cmd_context *cmd,
struct logical_volume *lv)
{
const char *pvname;
const struct logical_volume *lv_mirr = NULL;
/* Ensure there is nothing waiting on cookie */
if (!sync_local_dev_names(cmd))
log_warn("WARNING: Failed to sync local dev names.");
if (lv_is_pvmove(lv))
lv_mirr = lv;
else if (lv_is_locked(lv))
lv_mirr = find_pvmove_lv_in_lv(lv);
if (lv_mirr &&
(pvname = get_pvmove_pvname_from_lv_mirr(lv_mirr))) {
log_verbose("Spawning background pvmove process for %s.",
pvname);
pvmove_poll(cmd, pvname, lv_mirr->lvid.s, lv_mirr->vg->name, lv_mirr->name, 1);
}
if (lv_is_converting(lv) || lv_is_merging(lv)) {
log_verbose("Spawning background lvconvert process for %s.",
lv->name);
lvconvert_poll(cmd, lv, 1);
}
}
int get_activation_monitoring_mode(struct cmd_context *cmd,
int *monitoring_mode)
{
*monitoring_mode = DEFAULT_DMEVENTD_MONITOR;
if (arg_is_set(cmd, monitor_ARG) &&
(arg_is_set(cmd, ignoremonitoring_ARG) ||
arg_is_set(cmd, sysinit_ARG))) {
log_error("--ignoremonitoring or --sysinit option not allowed with --monitor option.");
return 0;
}
if (arg_is_set(cmd, monitor_ARG))
*monitoring_mode = arg_int_value(cmd, monitor_ARG,
DEFAULT_DMEVENTD_MONITOR);
else if (is_static() || arg_is_set(cmd, ignoremonitoring_ARG) ||
arg_is_set(cmd, sysinit_ARG) ||
!find_config_tree_bool(cmd, activation_monitoring_CFG, NULL))
*monitoring_mode = DMEVENTD_MONITOR_IGNORE;
return 1;
}
/*
* Read pool options from cmdline
*/
int get_pool_params(struct cmd_context *cmd,
const struct segment_type *segtype,
int *pool_data_vdo,
uint64_t *pool_metadata_size,
int *pool_metadata_spare,
uint32_t *chunk_size,
thin_discards_t *discards,
thin_zero_t *zero_new_blocks)
{
if ((*pool_data_vdo = arg_int_value(cmd, pooldatavdo_ARG, 0))) {
if (!(segtype = get_segtype_from_string(cmd, SEG_TYPE_NAME_VDO)))
return_0;
if (activation() && segtype->ops->target_present) {
if (!segtype->ops->target_present(cmd, NULL, NULL)) {
log_error("%s: Required device-mapper target(s) not detected in your kernel.",
segtype->name);
return_0;
}
}
}
if (segtype_is_thin_pool(segtype) || segtype_is_thin(segtype) || *pool_data_vdo) {
if (arg_is_set(cmd, zero_ARG)) {
*zero_new_blocks = arg_int_value(cmd, zero_ARG, 0) ? THIN_ZERO_YES : THIN_ZERO_NO;
log_very_verbose("%s pool zeroing.",
(*zero_new_blocks == THIN_ZERO_YES) ? "Enabling" : "Disabling");
} else
*zero_new_blocks = THIN_ZERO_UNSELECTED;
if (arg_is_set(cmd, discards_ARG)) {
*discards = (thin_discards_t) arg_uint_value(cmd, discards_ARG, 0);
log_very_verbose("Setting pool discards to %s.",
get_pool_discards_name(*discards));
} else
*discards = THIN_DISCARDS_UNSELECTED;
}
if (arg_from_list_is_negative(cmd, "may not be negative",
chunksize_ARG,
pooldatasize_ARG,
poolmetadatasize_ARG,
-1))
return_0;
if (arg_from_list_is_zero(cmd, "may not be zero",
chunksize_ARG,
pooldatasize_ARG,
poolmetadatasize_ARG,
-1))
return_0;
if (arg_is_set(cmd, chunksize_ARG)) {
*chunk_size = arg_uint_value(cmd, chunksize_ARG, 0);
if (!validate_pool_chunk_size(cmd, segtype, *chunk_size))
return_0;
log_very_verbose("Setting pool chunk size to %s.",
display_size(cmd, *chunk_size));
} else
*chunk_size = 0;
if (arg_is_set(cmd, poolmetadatasize_ARG)) {
if (arg_is_set(cmd, poolmetadata_ARG)) {
log_error("Please specify either metadata logical volume or its size.");
return 0;
}
*pool_metadata_size = arg_uint64_value(cmd, poolmetadatasize_ARG,
UINT64_C(0));
} else
*pool_metadata_size = 0;
/* TODO: default in lvm.conf and metadata profile ? */
*pool_metadata_spare = arg_int_value(cmd, poolmetadataspare_ARG,
DEFAULT_POOL_METADATA_SPARE);
return 1;
}
/*
* Generic stripe parameter checks.
*/
static int _validate_stripe_params(struct cmd_context *cmd, const struct segment_type *segtype,
uint32_t *stripes, uint32_t *stripe_size)
{
if (*stripes < 1 || *stripes > MAX_STRIPES) {
log_error("Number of stripes (%d) must be between %d and %d.",
*stripes, 1, MAX_STRIPES);
return 0;
}
if (!segtype_supports_stripe_size(segtype)) {
if (*stripe_size) {
log_print_unless_silent("Ignoring stripesize argument for %s devices.",
segtype->name);
*stripe_size = 0;
}
} else if (*stripes == 1) {
if (*stripe_size) {
log_print_unless_silent("Ignoring stripesize argument with single stripe.");
*stripe_size = 0;
}
} else {
if (!*stripe_size) {
*stripe_size = find_config_tree_int(cmd, metadata_stripesize_CFG, NULL) * 2;
log_print_unless_silent("Using default stripesize %s.",
display_size(cmd, (uint64_t) *stripe_size));
}
if (*stripe_size > STRIPE_SIZE_LIMIT * 2) {
log_error("Stripe size cannot be larger than %s.",
display_size(cmd, (uint64_t) STRIPE_SIZE_LIMIT));
return 0;
} else if (*stripe_size < STRIPE_SIZE_MIN || !is_power_of_2(*stripe_size)) {
log_error("Invalid stripe size %s.",
display_size(cmd, (uint64_t) *stripe_size));
return 0;
}
}
return 1;
}
/*
* The stripe size is limited by the size of a uint32_t, but since the
* value given by the user is doubled, and the final result must be a
* power of 2, we must divide UINT_MAX by four and add 1 (to round it
* up to the power of 2)
*/
int get_stripe_params(struct cmd_context *cmd, const struct segment_type *segtype,
uint32_t *stripes, uint32_t *stripe_size,
unsigned *stripes_supplied, unsigned *stripe_size_supplied)
{
/* stripes_long_ARG takes precedence (for lvconvert) */
/* FIXME Cope with relative +/- changes for lvconvert. */
if (arg_is_set(cmd, stripes_long_ARG)) {
*stripes = arg_uint_value(cmd, stripes_long_ARG, 0);
*stripes_supplied = 1;
} else if (arg_is_set(cmd, stripes_ARG)) {
*stripes = arg_uint_value(cmd, stripes_ARG, 0);
*stripes_supplied = 1;
} else {
/*
* FIXME add segtype parameter for min_stripes and remove logic for this
* from all other places
*/
if (segtype_is_any_raid6(segtype))
*stripes = 3;
else if (segtype_is_striped_raid(segtype))
*stripes = 2;
else
*stripes = 1;
*stripes_supplied = 0;
}
if ((*stripe_size = arg_uint_value(cmd, stripesize_ARG, 0))) {
if (arg_sign_value(cmd, stripesize_ARG, SIGN_NONE) == SIGN_MINUS) {
log_error("Negative stripesize is invalid.");
return 0;
}
}
*stripe_size_supplied = arg_is_set(cmd, stripesize_ARG);
return _validate_stripe_params(cmd, segtype, stripes, stripe_size);
}
static int _validate_cachepool_params(const char *policy_name, cache_mode_t cache_mode)
{
/*
* FIXME: it might be nice if cmd def rules could check option values,
* then a rule could do this.
*/
if ((cache_mode == CACHE_MODE_WRITEBACK) && policy_name && !strcmp(policy_name, "cleaner")) {
log_error("Cache mode \"writeback\" is not compatible with cache policy \"cleaner\".");
return 0;
}
return 1;
}
int get_cache_params(struct cmd_context *cmd,
uint32_t *chunk_size,
cache_metadata_format_t *cache_metadata_format,
cache_mode_t *cache_mode,
const char **name,
struct dm_config_tree **settings)
{
const char *str;
struct arg_value_group_list *group;
struct dm_config_tree *result = NULL, *prev = NULL, *current = NULL;
struct dm_config_node *cn;
int ok = 0;
if (arg_is_set(cmd, chunksize_ARG)) {
*chunk_size = arg_uint_value(cmd, chunksize_ARG, 0);
if (!validate_cache_chunk_size(cmd, *chunk_size))
return_0;
log_very_verbose("Setting pool chunk size to %s.",
display_size(cmd, *chunk_size));
}
*cache_metadata_format = (cache_metadata_format_t)
arg_uint_value(cmd, cachemetadataformat_ARG, CACHE_METADATA_FORMAT_UNSELECTED);
*cache_mode = (cache_mode_t) arg_uint_value(cmd, cachemode_ARG, CACHE_MODE_UNSELECTED);
*name = arg_str_value(cmd, cachepolicy_ARG, NULL);
if (!_validate_cachepool_params(*name, *cache_mode))
goto_out;
dm_list_iterate_items(group, &cmd->arg_value_groups) {
if (!grouped_arg_is_set(group->arg_values, cachesettings_ARG))
continue;
if (!(current = dm_config_create()))
goto_out;
if (prev)
current->cascade = prev;
prev = current;
if (!(str = grouped_arg_str_value(group->arg_values,
cachesettings_ARG,
NULL)))
goto_out;
if (!dm_config_parse_without_dup_node_check(current, str, str + strlen(str)))
goto_out;
}
if (current) {
if (!(result = dm_config_flatten(current)))
goto_out;
if (result->root) {
if (!(cn = dm_config_create_node(result, "policy_settings")))
goto_out;
cn->child = result->root;
result->root = cn;
}
}
ok = 1;
out:
if (!ok && result) {
dm_config_destroy(result);
result = NULL;
}
while (prev) {
current = prev->cascade;
dm_config_destroy(prev);
prev = current;
}
*settings = result;
return ok;
}
/*
* Compare VDO option name, skip any '_' in name
* and also allow to use it without vdo_[use_] prefix
*/
static int _compare_vdo_option(const char *b1, const char *b2)
{
int use_skipped = 0;
if (strncasecmp(b1, "vdo", 3) == 0) // skip vdo prefix
b1 += 3;
while (*b1 && *b2) {
if (tolower(*b1) == tolower(*b2)) {
++b1;
++b2;
continue; // matching char
}
if (*b1 == '_')
++b1; // skip to next char
else if (*b2 == '_')
++b2; // skip to next char
else {
if (!use_skipped++ && (strncmp(b2, "use_", 4) == 0)) {
b2 += 4; // try again with skipped prefix 'use_'
continue;
}
break; // mismatch
}
}
return (*b1 || *b2) ? 0 : 1;
}
#define CHECK_AND_SET(var, onoff) \
option = #var;\
if (_compare_vdo_option(cn->key, option)) {\
if (is_lvchange || !cn->v || (cn->v->type != DM_CFG_INT))\
goto err;\
if (vtp->var != cn->v->v.i) {\
vtp->var = cn->v->v.i;\
u |= onoff;\
}\
continue;\
}
#define DO_OFFLINE(var) \
CHECK_AND_SET(var, VDO_CHANGE_OFFLINE)
#define DO_ONLINE(var) \
CHECK_AND_SET(var, VDO_CHANGE_ONLINE)
int get_vdo_settings(struct cmd_context *cmd,
struct dm_vdo_target_params *vtp,
int *updated)
{
const char *str, *option = NULL;
struct arg_value_group_list *group;
struct dm_config_tree *result = NULL, *prev = NULL, *current = NULL;
struct dm_config_node *cn;
int r = 0, u = 0, is_lvchange;
int use_compression = vtp->use_compression;
int use_deduplication = vtp->use_deduplication;
int checked_lvchange;
if (updated)
*updated = 0;
// Group all --vdosettings
dm_list_iterate_items(group, &cmd->arg_value_groups) {
if (!grouped_arg_is_set(group->arg_values, vdosettings_ARG))
continue;
if (!(current = dm_config_create()))
goto_out;
if (prev)
current->cascade = prev;
prev = current;
if (!(str = grouped_arg_str_value(group->arg_values,
vdosettings_ARG,
NULL)))
goto_out;
if (!dm_config_parse_without_dup_node_check(current, str, str + strlen(str)))
goto_out;
}
if (current) {
if (!(result = dm_config_flatten(current)))
goto_out;
checked_lvchange = !strcmp(cmd->name, "lvchange");
/* Use all acceptable VDO options */
for (cn = result->root; cn; cn = cn->sib) {
is_lvchange = 0;
DO_OFFLINE(ack_threads);
DO_OFFLINE(bio_rotation);
DO_OFFLINE(bio_threads);
DO_OFFLINE(block_map_cache_size_mb);
DO_OFFLINE(block_map_era_length);
DO_OFFLINE(block_map_period); // alias for block_map_era_length
DO_OFFLINE(cpu_threads);
DO_OFFLINE(hash_zone_threads);
DO_OFFLINE(logical_threads);
DO_OFFLINE(max_discard);
DO_OFFLINE(physical_threads);
// Support also these - even when we have regular opts for them
DO_ONLINE(use_compression);
DO_ONLINE(use_deduplication);
// Settings bellow cannot be changed with lvchange command
is_lvchange = checked_lvchange;
DO_OFFLINE(index_memory_size_mb);
DO_OFFLINE(minimum_io_size);
DO_OFFLINE(slab_size_mb);
DO_OFFLINE(use_metadata_hints);
DO_OFFLINE(use_sparse_index);
option = "write_policy";
if (_compare_vdo_option(cn->key, option)) {
if (is_lvchange || !cn->v || (cn->v->type != DM_CFG_STRING))
goto err;
if (!set_vdo_write_policy(&vtp->write_policy, cn->v->v.str))
goto_out;
u |= VDO_CHANGE_OFFLINE;
continue;
}
if (_compare_vdo_option(cn->key, "check_point_frequency")) {
log_verbose("Ignoring deprecated --vdosettings option \"%s\" and its value.", cn->key);
continue; /* Accept & ignore deprecated option */
}
log_error("Unknown VDO setting \"%s\".", cn->key);
goto out;
}
}
if (arg_is_set(cmd, compression_ARG)) {
vtp->use_compression = arg_int_value(cmd, compression_ARG, 0);
if (vtp->use_compression != use_compression)
u |= VDO_CHANGE_ONLINE;
}
if (arg_is_set(cmd, deduplication_ARG)) {
vtp->use_deduplication = arg_int_value(cmd, deduplication_ARG, 0);
if (vtp->use_deduplication != use_deduplication)
u |= VDO_CHANGE_ONLINE;
}
// validation of updated VDO option
if (!dm_vdo_validate_target_params(vtp, 0 /* vdo_size */))
goto_out;
if (updated)
*updated = u;
r = 1; // success
goto out;
err:
if (is_lvchange)
log_error("Cannot change VDO setting \"vdo_%s\" in existing VDO pool.",
option);
else
log_error("Invalid argument for VDO setting \"vdo_%s\".",
option);
out:
if (result)
dm_config_destroy(result);
while (prev) {
current = prev->cascade;
dm_config_destroy(prev);
prev = current;
}
return r;
}
static int _get_one_writecache_setting(struct cmd_context *cmd, struct writecache_settings *settings,
char *key, char *val, uint32_t *block_size_sectors)
{
/* special case: block_size is not a setting but is set with the --cachesettings option */
if (!strncmp(key, "block_size", strlen("block_size"))) {
uint32_t block_size = 0;
if (sscanf(val, "%u", &block_size) != 1)
goto_bad;
if (block_size == 512)
*block_size_sectors = 1;
else if (block_size == 4096)
*block_size_sectors = 8;
else
goto_bad;
return 1;
}
if (!strncmp(key, "high_watermark", strlen("high_watermark"))) {
if (sscanf(val, "%llu", (unsigned long long *)&settings->high_watermark) != 1)
goto_bad;
if (settings->high_watermark > 100)
goto_bad;
settings->high_watermark_set = 1;
return 1;
}
if (!strncmp(key, "low_watermark", strlen("low_watermark"))) {
if (sscanf(val, "%llu", (unsigned long long *)&settings->low_watermark) != 1)
goto_bad;
if (settings->low_watermark > 100)
goto_bad;
settings->low_watermark_set = 1;
return 1;
}
if (!strncmp(key, "writeback_jobs", strlen("writeback_jobs"))) {
if (sscanf(val, "%llu", (unsigned long long *)&settings->writeback_jobs) != 1)
goto_bad;
settings->writeback_jobs_set = 1;
return 1;
}
if (!strncmp(key, "autocommit_blocks", strlen("autocommit_blocks"))) {
if (sscanf(val, "%llu", (unsigned long long *)&settings->autocommit_blocks) != 1)
goto_bad;
settings->autocommit_blocks_set = 1;
return 1;
}
if (!strncmp(key, "autocommit_time", strlen("autocommit_time"))) {
if (sscanf(val, "%llu", (unsigned long long *)&settings->autocommit_time) != 1)
goto_bad;
settings->autocommit_time_set = 1;
return 1;
}
if (!strncmp(key, "fua", strlen("fua"))) {
if (settings->nofua_set) {
log_error("Setting fua and nofua cannot both be set.");
return 0;
}
if (sscanf(val, "%u", &settings->fua) != 1)
goto_bad;
settings->fua_set = 1;
return 1;
}
if (!strncmp(key, "nofua", strlen("nofua"))) {
if (settings->fua_set) {
log_error("Setting fua and nofua cannot both be set.");
return 0;
}
if (sscanf(val, "%u", &settings->nofua) != 1)
goto_bad;
settings->nofua_set = 1;
return 1;
}
if (!strncmp(key, "cleaner", strlen("cleaner"))) {
if (sscanf(val, "%u", &settings->cleaner) != 1)
goto_bad;
settings->cleaner_set = 1;
return 1;
}
if (!strncmp(key, "max_age", strlen("max_age"))) {
if (sscanf(val, "%u", &settings->max_age) != 1)
goto_bad;
settings->max_age_set = 1;
return 1;
}
if (!strncmp(key, "metadata_only", strlen("metadata_only"))) {
if (sscanf(val, "%u", &settings->metadata_only) != 1)
goto_bad;
settings->metadata_only_set = 1;
return 1;
}
if (!strncmp(key, "pause_writeback", strlen("pause_writeback"))) {
if (sscanf(val, "%u", &settings->pause_writeback) != 1)
goto_bad;
settings->pause_writeback_set = 1;
return 1;
}
if (settings->new_key) {
log_error("Setting %s is not recognized. Only one unrecognized setting is allowed.", key);
return 0;
}
log_warn("WARNING: Unrecognized writecache setting \"%s\" may cause activation failure.", key);
if (yes_no_prompt("Use unrecognized writecache setting? [y/n]: ") == 'n') {
log_error("Aborting writecache conversion.");
return 0;
}
log_warn("WARNING: Using unrecognized writecache setting: %s = %s.", key, val);
settings->new_key = dm_pool_strdup(cmd->mem, key);
settings->new_val = dm_pool_strdup(cmd->mem, val);
return 1;
bad:
log_error("Invalid setting: %s", key);
return 0;
}
int get_writecache_settings(struct cmd_context *cmd, struct writecache_settings *settings,
uint32_t *block_size_sectors)
{
const struct dm_config_node *cns, *cn1, *cn2;
struct arg_value_group_list *group;
const char *str;
char key[64];
char val[64];
int num;
unsigned pos;
int rn;
int found = 0;
/*
* "grouped" means that multiple --cachesettings options can be used.
* Each option is also allowed to contain multiple key = val pairs.
*/
dm_list_iterate_items(group, &cmd->arg_value_groups) {
if (!grouped_arg_is_set(group->arg_values, cachesettings_ARG))
continue;
if (!(str = grouped_arg_str_value(group->arg_values, cachesettings_ARG, NULL)))
break;
pos = 0;
while (pos < strlen(str)) {
/* scan for "key1=val1 key2 = val2 key3= val3" */
memset(key, 0, sizeof(key));
memset(val, 0, sizeof(val));
if (sscanf(str + pos, " %63[^=]=%63s %n", key, val, &num) != 2) {
log_error("Invalid setting at: %s", str+pos);
return 0;
}
pos += num;
if (!_get_one_writecache_setting(cmd, settings, key, val, block_size_sectors))
return_0;
}
found = 1;
}
if (found)
goto out;
/*
* If there were no settings on the command line, look for settings in
* lvm.conf
*
* TODO: support profiles
*/
if (!(cns = find_config_tree_node(cmd, allocation_cache_settings_CFG_SECTION, NULL)))
goto out;
for (cn1 = cns->child; cn1; cn1 = cn1->sib) {
if (!cn1->child)
continue; /* Ignore section without settings */
if (cn1->v || strcmp(cn1->key, "writecache") != 0)
continue; /* Ignore non-matching settings */
cn2 = cn1->child;
for (; cn2; cn2 = cn2->sib) {
memset(val, 0, sizeof(val));
if (cn2->v->type == DM_CFG_INT)
rn = dm_snprintf(val, sizeof(val), FMTd64, cn2->v->v.i);
else if (cn2->v->type == DM_CFG_STRING)
rn = dm_snprintf(val, sizeof(val), "%s", cn2->v->v.str);
else
rn = -1;
if (rn < 0) {
log_error("Invalid lvm.conf writecache setting value for %s.", cn2->key);
return 0;
}
if (!_get_one_writecache_setting(cmd, settings, (char *)cn2->key, val, block_size_sectors))
return_0;
}
}
out:
if (settings->high_watermark_set && settings->low_watermark_set &&
(settings->high_watermark <= settings->low_watermark)) {
log_error("High watermark must be greater than low watermark.");
return 0;
}
return 1;
}
/* FIXME move to lib */
static int _pv_change_tag(struct physical_volume *pv, const char *tag, int addtag)
{
if (addtag) {
if (!str_list_add(pv->fmt->cmd->mem, &pv->tags, tag)) {
log_error("Failed to add tag %s to physical volume %s.",
tag, pv_dev_name(pv));
return 0;
}
} else
str_list_del(&pv->tags, tag);
return 1;
}
/* Set exactly one of VG, LV or PV */
int change_tag(struct cmd_context *cmd, struct volume_group *vg,
struct logical_volume *lv, struct physical_volume *pv, int arg)
{
const char *tag;
struct arg_value_group_list *current_group;
dm_list_iterate_items(current_group, &cmd->arg_value_groups) {
if (!grouped_arg_is_set(current_group->arg_values, arg))
continue;
if (!(tag = grouped_arg_str_value(current_group->arg_values, arg, NULL))) {
log_error("Failed to get tag.");
return 0;
}
if (vg && !vg_change_tag(vg, tag, arg == addtag_ARG))
return_0;
else if (lv && !lv_change_tag(lv, tag, arg == addtag_ARG))
return_0;
else if (pv && !_pv_change_tag(pv, tag, arg == addtag_ARG))
return_0;
}
return 1;
}
/*
* FIXME: replace process_each_label() with process_each_vg() which is
* based on performing vg_read(), which provides a correct representation
* of VGs/PVs, that is not provided by lvmcache_label_scan().
*/
int process_each_label(struct cmd_context *cmd, int argc, char **argv,
struct processing_handle *handle,
process_single_label_fn_t process_single_label)
{
log_report_t saved_log_report_state = log_get_report_state();
struct label *label;
struct dev_iter *iter;
struct device *dev;
struct lvmcache_info *info;
struct dm_list process_duplicates;
struct device_list *devl;
int ret_max = ECMD_PROCESSED;
int ret;
int opt = 0;
dm_list_init(&process_duplicates);
log_set_report_object_type(LOG_REPORT_OBJECT_TYPE_LABEL);
if (!lvmcache_label_scan(cmd)) {
ret_max = ECMD_FAILED;
goto_out;
}
if (argc) {
for (; opt < argc; opt++) {
if (sigint_caught()) {
log_error("Interrupted.");
ret_max = ECMD_FAILED;
goto out;
}
if (!(dev = dev_cache_get_existing(cmd, argv[opt], cmd->filter))) {
log_error("Failed to find device "
"\"%s\".", argv[opt]);
ret_max = ECMD_FAILED;
continue;
}
if (!(label = lvmcache_get_dev_label(dev))) {
if (!lvmcache_dev_is_unused_duplicate(dev)) {
log_error("No physical volume label read from %s.", argv[opt]);
ret_max = ECMD_FAILED;
} else {
if (!(devl = malloc(sizeof(*devl))))
return_0;
devl->dev = dev;
dm_list_add(&process_duplicates, &devl->list);
}
continue;
}
log_set_report_object_name_and_id(dev_name(dev), NULL);
ret = process_single_label(cmd, label, handle);
report_log_ret_code(ret);
if (ret > ret_max)
ret_max = ret;
log_set_report_object_name_and_id(NULL, NULL);
}
dm_list_iterate_items(devl, &process_duplicates) {
if (sigint_caught()) {
log_error("Interrupted.");
ret_max = ECMD_FAILED;
goto out;
}
/*
* remove the existing dev for this pvid from lvmcache
* so that the duplicate dev can replace it.
*/
if ((info = lvmcache_info_from_pvid(devl->dev->pvid, NULL, 0)))
lvmcache_del(info);
/*
* add info to lvmcache from the duplicate dev.
*/
label_scan_dev(cmd, devl->dev);
/*
* the info/label should now be found because
* the label_read should have added it.
*/
if (!(label = lvmcache_get_dev_label(devl->dev)))
continue;
log_set_report_object_name_and_id(dev_name(devl->dev), NULL);
ret = process_single_label(cmd, label, handle);
report_log_ret_code(ret);
if (ret > ret_max)
ret_max = ret;
log_set_report_object_name_and_id(NULL, NULL);
}
goto out;
}
if (!(iter = dev_iter_create(cmd->filter, 1))) {
log_error("dev_iter creation failed.");
ret_max = ECMD_FAILED;
goto out;
}
while ((dev = dev_iter_get(cmd, iter))) {
if (sigint_caught()) {
log_error("Interrupted.");
ret_max = ECMD_FAILED;
break;
}
if (!(label = lvmcache_get_dev_label(dev)))
continue;
log_set_report_object_name_and_id(dev_name(label->dev), NULL);
ret = process_single_label(cmd, label, handle);
report_log_ret_code(ret);
if (ret > ret_max)
ret_max = ret;
log_set_report_object_name_and_id(NULL, NULL);
}
dev_iter_destroy(iter);
out:
log_restore_report_state(saved_log_report_state);
return ret_max;
}
/*
* Parse persistent major minor parameters.
*
* --persistent is unspecified => state is deduced
* from presence of options --minor or --major.
*
* -Mn => --minor or --major not allowed.
*
* -My => --minor is required (and also --major on <=2.4)
*/
int get_and_validate_major_minor(const struct cmd_context *cmd,
const struct format_type *fmt,
int32_t *major, int32_t *minor)
{
if (arg_count(cmd, minor_ARG) > 1) {
log_error("Option --minor may not be repeated.");
return 0;
}
if (arg_count(cmd, major_ARG) > 1) {
log_error("Option -j|--major may not be repeated.");
return 0;
}
/* Check with default 'y' */
if (!arg_int_value(cmd, persistent_ARG, 1)) { /* -Mn */
if (arg_is_set(cmd, minor_ARG) || arg_is_set(cmd, major_ARG)) {
log_error("Options --major and --minor are incompatible with -Mn.");
return 0;
}
*major = *minor = -1;
return 1;
}
/* -1 cannot be entered as an argument for --major, --minor */
*major = arg_int_value(cmd, major_ARG, -1);
*minor = arg_int_value(cmd, minor_ARG, -1);
if (arg_is_set(cmd, persistent_ARG)) { /* -My */
if (*minor == -1) {
log_error("Please specify minor number with --minor when using -My.");
return 0;
}
}
if (!strncmp(cmd->kernel_vsn, "2.4.", 4)) {
/* Major is required for 2.4 */
if (arg_is_set(cmd, persistent_ARG) && *major < 0) {
log_error("Please specify major number with --major when using -My.");
return 0;
}
} else {
if (*major != -1) {
log_warn("WARNING: Ignoring supplied major number %d - "
"kernel assigns major numbers dynamically. "
"Using major number %d instead.",
*major, cmd->dev_types->device_mapper_major);
}
/* Stay with dynamic major:minor if minor is not specified. */
*major = (*minor == -1) ? -1 : (int)cmd->dev_types->device_mapper_major;
}
if ((*minor != -1) && !validate_major_minor(cmd, fmt, *major, *minor))
return_0;
return 1;
}
/*
* Validate lvname parameter
*
* If it contains vgname, it is extracted from lvname.
* If there is passed vgname, it is compared whether its the same name.
*/
int validate_lvname_param(struct cmd_context *cmd, const char **vg_name,
const char **lv_name)
{
const char *vgname;
const char *lvname;
if (!lv_name || !*lv_name)
return 1; /* NULL lvname is ok */
/* If contains VG name, extract it. */
if (strchr(*lv_name, (int) '/')) {
if (!(vgname = _extract_vgname(cmd, *lv_name, &lvname)))
return_0;
if (!*vg_name)
*vg_name = vgname;
else if (strcmp(vgname, *vg_name)) {
log_error("Please use a single volume group name "
"(\"%s\" or \"%s\").", vgname, *vg_name);
return 0;
}
*lv_name = lvname;
}
if (!validate_name(*lv_name)) {
log_error("Logical volume name \"%s\" is invalid.",
*lv_name);
return 0;
}
return 1;
}
/*
* Validate lvname parameter
* This name must follow restriction rules on prefixes and suffixes.
*
* If it contains vgname, it is extracted from lvname.
* If there is passed vgname, it is compared whether its the same name.
*/
int validate_restricted_lvname_param(struct cmd_context *cmd, const char **vg_name,
const char **lv_name)
{
if (!validate_lvname_param(cmd, vg_name, lv_name))
return_0;
if (lv_name && *lv_name && !apply_lvname_restrictions(*lv_name))
return_0;
return 1;
}
/*
* Extract list of VG names and list of tags from command line arguments.
*/
static int _get_arg_vgnames(struct cmd_context *cmd,
int argc, char **argv,
const char *one_vgname,
struct dm_list *use_vgnames,
struct dm_list *arg_vgnames,
struct dm_list *arg_tags)
{
int opt = 0;
int ret_max = ECMD_PROCESSED;
const char *vg_name;
if (one_vgname) {
if (!str_list_add(cmd->mem, arg_vgnames,
dm_pool_strdup(cmd->mem, one_vgname))) {
log_error("strlist allocation failed.");
return ECMD_FAILED;
}
return ret_max;
}
if (use_vgnames && !dm_list_empty(use_vgnames)) {
dm_list_splice(arg_vgnames, use_vgnames);
return ret_max;
}
for (; opt < argc; opt++) {
vg_name = argv[opt];
if (*vg_name == '@') {
if (!validate_tag(vg_name + 1)) {
log_error("Skipping invalid tag: %s", vg_name);
if (ret_max < EINVALID_CMD_LINE)
ret_max = EINVALID_CMD_LINE;
continue;
}
if (!str_list_add(cmd->mem, arg_tags,
dm_pool_strdup(cmd->mem, vg_name + 1))) {
log_error("strlist allocation failed.");
return ECMD_FAILED;
}
continue;
}
vg_name = skip_dev_dir(cmd, vg_name, NULL);
if (strchr(vg_name, '/')) {
log_error("Invalid volume group name %s.", vg_name);
if (ret_max < EINVALID_CMD_LINE)
ret_max = EINVALID_CMD_LINE;
continue;
}
if (!str_list_add(cmd->mem, arg_vgnames,
dm_pool_strdup(cmd->mem, vg_name))) {
log_error("strlist allocation failed.");
return ECMD_FAILED;
}
}
return ret_max;
}
struct processing_handle *init_processing_handle(struct cmd_context *cmd, struct processing_handle *parent_handle)
{
struct processing_handle *handle;
if (!(handle = dm_pool_zalloc(cmd->mem, sizeof(struct processing_handle)))) {
log_error("_init_processing_handle: failed to allocate memory for processing handle");
return NULL;
}
handle->parent = parent_handle;
/*
* For any reporting tool, the internal_report_for_select is reset to 0
* automatically because the internal reporting/selection is simply not
* needed - the reporting/selection is already a part of the code path
* used there.
*
* *The internal report for select is only needed for non-reporting tools!*
*/
handle->internal_report_for_select = arg_is_set(cmd, select_ARG);
handle->include_historical_lvs = cmd->include_historical_lvs;
if (!parent_handle && !cmd->cmd_report.report_group) {
if (!report_format_init(cmd)) {
dm_pool_free(cmd->mem, handle);
return NULL;
}
} else
cmd->cmd_report.saved_log_report_state = log_get_report_state();
log_set_report_context(LOG_REPORT_CONTEXT_PROCESSING);
return handle;
}
int init_selection_handle(struct cmd_context *cmd, struct processing_handle *handle,
report_type_t initial_report_type)
{
struct selection_handle *sh;
const char *selection;
if (!(sh = dm_pool_zalloc(cmd->mem, sizeof(struct selection_handle)))) {
log_error("_init_selection_handle: failed to allocate memory for selection handle");
return 0;
}
if (!report_get_single_selection(cmd, initial_report_type, &selection))
return_0;
sh->report_type = initial_report_type;
if (!(sh->selection_rh = report_init_for_selection(cmd, &sh->report_type, selection))) {
dm_pool_free(cmd->mem, sh);
return_0;
}
handle->selection_handle = sh;
return 1;
}
void destroy_processing_handle(struct cmd_context *cmd, struct processing_handle *handle)
{
if (handle) {
if (handle->selection_handle && handle->selection_handle->selection_rh)
dm_report_free(handle->selection_handle->selection_rh);
log_restore_report_state(cmd->cmd_report.saved_log_report_state);
/*
* Do not destroy current cmd->report_group and cmd->log_rh
* (the log report) yet if we're running interactively
* (== running in lvm shell) or if there's a parent handle
* (== we're executing nested processing, like it is when
* doing selection for parent's process_each_* processing).
*
* In both cases, there's still possible further processing
* to do outside the processing covered by the handle we are
* destroying here and for which we may still need to access
* the log report to cover the rest of the processing.
*
*/
if (!cmd->is_interactive && !handle->parent) {
if (!dm_report_group_destroy(cmd->cmd_report.report_group))
stack;
cmd->cmd_report.report_group = NULL;
if (cmd->cmd_report.log_rh) {
dm_report_free(cmd->cmd_report.log_rh);
cmd->cmd_report.log_rh = NULL;
}
}
/*
* TODO: think about better alternatives:
* handle mempool, dm_alloc for handle memory...
*/
memset(handle, 0, sizeof(*handle));
}
}
int select_match_vg(struct cmd_context *cmd, struct processing_handle *handle,
struct volume_group *vg)
{
int r;
if (!handle->internal_report_for_select)
return 1;
handle->selection_handle->orig_report_type = VGS;
if (!(r = report_for_selection(cmd, handle, NULL, vg, NULL)))
log_error("Selection failed for VG %s.", vg->name);
handle->selection_handle->orig_report_type = 0;
return r;
}
int select_match_lv(struct cmd_context *cmd, struct processing_handle *handle,
struct volume_group *vg, struct logical_volume *lv)
{
int r;
if (!handle->internal_report_for_select)
return 1;
handle->selection_handle->orig_report_type = LVS;
if (!(r = report_for_selection(cmd, handle, NULL, vg, lv)))
log_error("Selection failed for LV %s.", lv->name);
handle->selection_handle->orig_report_type = 0;
return r;
}
int select_match_pv(struct cmd_context *cmd, struct processing_handle *handle,
struct volume_group *vg, struct physical_volume *pv)
{
int r;
if (!handle->internal_report_for_select)
return 1;
handle->selection_handle->orig_report_type = PVS;
if (!(r = report_for_selection(cmd, handle, pv, vg, NULL)))
log_error("Selection failed for PV %s.", dev_name(pv->dev));
handle->selection_handle->orig_report_type = 0;
return r;
}
static int _select_matches(struct processing_handle *handle)
{
if (!handle->internal_report_for_select)
return 1;
return handle->selection_handle->selected;
}
static int _process_vgnameid_list(struct cmd_context *cmd, uint32_t read_flags,
struct dm_list *vgnameids_to_process,
struct dm_list *arg_vgnames,
struct dm_list *arg_tags,
struct processing_handle *handle,
process_single_vg_fn_t process_single_vg)
{
log_report_t saved_log_report_state = log_get_report_state();
char uuid[64] __attribute__((aligned(8)));
struct volume_group *vg;
struct volume_group *error_vg = NULL;
struct vgnameid_list *vgnl;
const char *vg_name;
const char *vg_uuid;
uint32_t lockd_state = 0;
uint32_t error_flags = 0;
int whole_selected = 0;
int ret_max = ECMD_PROCESSED;
int ret;
int skip;
int notfound;
int process_all = 0;
int do_report_ret_code = 1;
log_set_report_object_type(LOG_REPORT_OBJECT_TYPE_VG);
/*
* If no VG names or tags were supplied, then process all VGs.
*/
if (dm_list_empty(arg_vgnames) && dm_list_empty(arg_tags))
process_all = 1;
/*
* FIXME If one_vgname, only proceed if exactly one VG matches tags or selection.
*/
dm_list_iterate_items(vgnl, vgnameids_to_process) {
vg_name = vgnl->vg_name;
vg_uuid = vgnl->vgid;
skip = 0;
notfound = 0;
uuid[0] = '\0';
if (is_orphan_vg(vg_name)) {
log_set_report_object_type(LOG_REPORT_OBJECT_TYPE_ORPHAN);
log_set_report_object_name_and_id(vg_name + sizeof(VG_ORPHANS), uuid);
} else {
if (vg_uuid && !id_write_format((const struct id*)vg_uuid, uuid, sizeof(uuid)))
stack;
log_set_report_object_name_and_id(vg_name, uuid);
}
if (sigint_caught()) {
ret_max = ECMD_FAILED;
goto_out;
}
log_very_verbose("Processing VG %s %s", vg_name, uuid);
if (!lockd_vg(cmd, vg_name, NULL, 0, &lockd_state)) {
stack;
ret_max = ECMD_FAILED;
report_log_ret_code(ret_max);
continue;
}
vg = vg_read(cmd, vg_name, vg_uuid, read_flags, lockd_state, &error_flags, &error_vg);
if (_ignore_vg(cmd, error_flags, error_vg, vg_name, arg_vgnames, read_flags, &skip, &notfound)) {
stack;
ret_max = ECMD_FAILED;
report_log_ret_code(ret_max);
if (error_vg)
unlock_and_release_vg(cmd, error_vg, vg_name);
goto endvg;
}
if (error_vg)
unlock_and_release_vg(cmd, error_vg, vg_name);
if (skip || notfound)
goto endvg;
/* Process this VG? */
if ((process_all ||
(!dm_list_empty(arg_vgnames) && str_list_match_item(arg_vgnames, vg_name)) ||
(!dm_list_empty(arg_tags) && str_list_match_list(arg_tags, &vg->tags, NULL))) &&
select_match_vg(cmd, handle, vg) && _select_matches(handle)) {
log_very_verbose("Running command for VG %s %s", vg_name, vg_uuid ? uuid : "");
ret = process_single_vg(cmd, vg_name, vg, handle);
_update_selection_result(handle, &whole_selected);
if (ret != ECMD_PROCESSED)
stack;
report_log_ret_code(ret);
if (ret > ret_max)
ret_max = ret;
}
unlock_vg(cmd, vg, vg_name);
endvg:
release_vg(vg);
if (!lockd_vg(cmd, vg_name, "un", 0, &lockd_state))
stack;
log_set_report_object_name_and_id(NULL, NULL);
}
/* the VG is selected if at least one LV is selected */
_set_final_selection_result(handle, whole_selected);
do_report_ret_code = 0;
out:
if (do_report_ret_code)
report_log_ret_code(ret_max);
log_restore_report_state(saved_log_report_state);
return ret_max;
}
/*
* Check if a command line VG name is ambiguous, i.e. there are multiple VGs on
* the system that have the given name. If *one* VG with the given name is
* local and the rest are foreign, then use the local VG (removing foreign VGs
* with the same name from the vgnameids_on_system list). If multiple VGs with
* the given name are local, we don't know which VG is intended, so remove the
* ambiguous name from the list of args.
*/
static int _resolve_duplicate_vgnames(struct cmd_context *cmd,
struct dm_list *arg_vgnames,
struct dm_list *vgnameids_on_system)
{
struct dm_str_list *sl, *sl2;
struct vgnameid_list *vgnl, *vgnl2;
char uuid[64] __attribute__((aligned(8)));
int found;
int ret = ECMD_PROCESSED;
dm_list_iterate_items_safe(sl, sl2, arg_vgnames) {
found = 0;
dm_list_iterate_items(vgnl, vgnameids_on_system) {
if (strcmp(sl->str, vgnl->vg_name))
continue;
found++;
}
if (found < 2)
continue;
/*
* More than one VG match the given name.
* If only one is local, use that one.
*/
found = 0;
dm_list_iterate_items_safe(vgnl, vgnl2, vgnameids_on_system) {
if (strcmp(sl->str, vgnl->vg_name))
continue;
/*
* label scan has already populated lvmcache vginfo with
* this information.
*/
if (lvmcache_vg_is_foreign(cmd, vgnl->vg_name, vgnl->vgid)) {
if (!id_write_format((const struct id*)vgnl->vgid, uuid, sizeof(uuid)))
stack;
dm_list_del(&vgnl->list);
} else {
found++;
}
}
if (found < 2)
continue;
/*
* More than one VG with this name is local so the intended VG
* is unknown.
*/
log_error("Multiple VGs found with the same name: skipping %s", sl->str);
if (arg_is_valid_for_command(cmd, select_ARG))
log_error("Use --select vg_uuid=<uuid> in place of the VG name.");
else
log_error("Use VG uuid in place of the VG name.");
dm_list_del(&sl->list);
ret = ECMD_FAILED;
}
return ret;
}
/*
* For each arg_vgname, move the corresponding entry from
* vgnameids_on_system to vgnameids_to_process. If an
* item in arg_vgnames doesn't exist in vgnameids_on_system,
* then add a new entry for it to vgnameids_to_process.
*/
static void _choose_vgs_to_process(struct cmd_context *cmd,
struct dm_list *arg_vgnames,
struct dm_list *vgnameids_on_system,
struct dm_list *vgnameids_to_process)
{
char uuid[64] __attribute__((aligned(8)));
struct dm_str_list *sl, *sl2;
struct vgnameid_list *vgnl, *vgnl2;
struct id id;
int arg_is_uuid = 0;
int found;
dm_list_iterate_items_safe(sl, sl2, arg_vgnames) {
found = 0;
dm_list_iterate_items_safe(vgnl, vgnl2, vgnameids_on_system) {
if (strcmp(sl->str, vgnl->vg_name))
continue;
dm_list_del(&vgnl->list);
dm_list_add(vgnameids_to_process, &vgnl->list);
found = 1;
break;
}
/*
* If the VG name arg looks like a UUID, then check if it
* matches the UUID of a VG. (--select should generally
* be used to select a VG by uuid instead.)
*/
if (!found && (cmd->cname->flags & ALLOW_UUID_AS_NAME))
arg_is_uuid = id_read_format_try(&id, sl->str);
if (!found && arg_is_uuid) {
dm_list_iterate_items_safe(vgnl, vgnl2, vgnameids_on_system) {
if (!(id_write_format((const struct id*)vgnl->vgid, uuid, sizeof(uuid))))
continue;
if (strcmp(sl->str, uuid))
continue;
log_print("Processing VG %s because of matching UUID %s",
vgnl->vg_name, uuid);
dm_list_del(&vgnl->list);
dm_list_add(vgnameids_to_process, &vgnl->list);
/* Make the arg_vgnames entry use the actual VG name. */
sl->str = dm_pool_strdup(cmd->mem, vgnl->vg_name);
found = 1;
break;
}
}
/*
* If the name arg was not found in the list of all VGs, then
* it probably doesn't exist, but we want the "VG not found"
* failure to be handled by the existing vg_read() code for
* that error. So, create an entry with just the VG name so
* that the processing loop will attempt to process it and use
* the vg_read() error path.
*/
if (!found) {
log_verbose("VG name on command line not found in list of VGs: %s", sl->str);
if (!(vgnl = dm_pool_alloc(cmd->mem, sizeof(*vgnl))))
continue;
vgnl->vgid = NULL;
if (!(vgnl->vg_name = dm_pool_strdup(cmd->mem, sl->str)))
continue;
dm_list_add(vgnameids_to_process, &vgnl->list);
}
}
}
/*
* Call process_single_vg() for each VG selected by the command line arguments.
* If one_vgname is set, process only that VG and ignore argc/argv (which should be 0/NULL).
* If one_vgname is not set, get VG names to process from argc/argv.
*/
int process_each_vg(struct cmd_context *cmd,
int argc, char **argv,
const char *one_vgname,
struct dm_list *use_vgnames,
uint32_t read_flags,
int include_internal,
struct processing_handle *handle,
process_single_vg_fn_t process_single_vg)
{
log_report_t saved_log_report_state = log_get_report_state();
int handle_supplied = handle != NULL;
struct dm_list arg_tags; /* str_list */
struct dm_list arg_vgnames; /* str_list */
struct dm_list vgnameids_on_system; /* vgnameid_list */
struct dm_list vgnameids_to_process; /* vgnameid_list */
int enable_all_vgs = (cmd->cname->flags & ALL_VGS_IS_DEFAULT);
int process_all_vgs_on_system = 0;
int ret_max = ECMD_PROCESSED;
int ret;
log_set_report_object_type(LOG_REPORT_OBJECT_TYPE_VG);
log_debug("Processing each VG");
/* Disable error in vg_read so we can print it from ignore_vg. */
cmd->vg_read_print_access_error = 0;
dm_list_init(&arg_tags);
dm_list_init(&arg_vgnames);
dm_list_init(&vgnameids_on_system);
dm_list_init(&vgnameids_to_process);
/*
* Find any VGs or tags explicitly provided on the command line.
*/
if ((ret = _get_arg_vgnames(cmd, argc, argv, one_vgname, use_vgnames, &arg_vgnames, &arg_tags)) != ECMD_PROCESSED) {
ret_max = ret;
goto_out;
}
/*
* Process all VGs on the system when:
* . tags are specified and all VGs need to be read to
* look for matching tags.
* . no VG names are specified and the command defaults
* to processing all VGs when none are specified.
*/
if ((dm_list_empty(&arg_vgnames) && enable_all_vgs) || !dm_list_empty(&arg_tags))
process_all_vgs_on_system = 1;
/*
* Needed for a current listing of the global VG namespace.
*/
if (process_all_vgs_on_system && !lock_global(cmd, "sh")) {
ret_max = ECMD_FAILED;
goto_out;
}
/*
* Scan all devices to populate lvmcache with initial
* list of PVs and VGs.
*/
if (!(read_flags & PROCESS_SKIP_SCAN)) {
if (!lvmcache_label_scan(cmd)) {
ret_max = ECMD_FAILED;
goto_out;
}
}
/*
* A list of all VGs on the system is needed when:
* . processing all VGs on the system
* . A VG name is specified which may refer to one
* of multiple VGs on the system with that name.
*/
log_very_verbose("Obtaining the complete list of VGs to process");
if (!lvmcache_get_vgnameids(cmd, &vgnameids_on_system, NULL, include_internal)) {
ret_max = ECMD_FAILED;
goto_out;
}
if (!dm_list_empty(&arg_vgnames)) {
/* This may remove entries from arg_vgnames or vgnameids_on_system. */
ret = _resolve_duplicate_vgnames(cmd, &arg_vgnames, &vgnameids_on_system);
if (ret > ret_max)
ret_max = ret;
if (dm_list_empty(&arg_vgnames) && dm_list_empty(&arg_tags)) {
ret_max = ECMD_FAILED;
goto out;
}
}
if (dm_list_empty(&arg_vgnames) && dm_list_empty(&vgnameids_on_system)) {
/* FIXME Should be log_print, but suppressed for reporting cmds */
log_verbose("No volume groups found.");
ret_max = ECMD_PROCESSED;
goto out;
}
if (dm_list_empty(&arg_vgnames))
read_flags |= READ_OK_NOTFOUND;
/*
* When processing all VGs, vgnameids_on_system simply becomes
* vgnameids_to_process.
* When processing only specified VGs, then for each item in
* arg_vgnames, move the corresponding entry from
* vgnameids_on_system to vgnameids_to_process.
*/
if (process_all_vgs_on_system)
dm_list_splice(&vgnameids_to_process, &vgnameids_on_system);
else
_choose_vgs_to_process(cmd, &arg_vgnames, &vgnameids_on_system, &vgnameids_to_process);
if (!handle && !(handle = init_processing_handle(cmd, NULL))) {
ret_max = ECMD_FAILED;
goto_out;
}
if (handle->internal_report_for_select && !handle->selection_handle &&
!init_selection_handle(cmd, handle, VGS)) {
ret_max = ECMD_FAILED;
goto_out;
}
ret = _process_vgnameid_list(cmd, read_flags, &vgnameids_to_process,
&arg_vgnames, &arg_tags, handle, process_single_vg);
if (ret > ret_max)
ret_max = ret;
out:
if (!handle_supplied)
destroy_processing_handle(cmd, handle);
log_restore_report_state(saved_log_report_state);
return ret_max;
}
static struct dm_str_list *_str_list_match_item_with_prefix(const struct dm_list *sll, const char *prefix, const char *str)
{
struct dm_str_list *sl;
size_t prefix_len = strlen(prefix);
dm_list_iterate_items(sl, sll) {
if (!strncmp(prefix, sl->str, prefix_len) &&
!strcmp(sl->str + prefix_len, str))
return sl;
}
return NULL;
}
/*
* Dummy LV, segment type and segment to represent all historical LVs.
*/
static struct logical_volume _historical_lv = {
.name = "",
.major = -1,
.minor = -1,
.snapshot_segs = DM_LIST_HEAD_INIT(_historical_lv.snapshot_segs),
.segments = DM_LIST_HEAD_INIT(_historical_lv.segments),
.tags = DM_LIST_HEAD_INIT(_historical_lv.tags),
.segs_using_this_lv = DM_LIST_HEAD_INIT(_historical_lv.segs_using_this_lv),
.indirect_glvs = DM_LIST_HEAD_INIT(_historical_lv.indirect_glvs),
.hostname = "",
};
static struct segment_type _historical_segment_type = {
.name = "historical",
.flags = SEG_VIRTUAL | SEG_CANNOT_BE_ZEROED,
};
static struct lv_segment _historical_lv_segment = {
.lv = &_historical_lv,
.segtype = &_historical_segment_type,
.len = 0,
.tags = DM_LIST_HEAD_INIT(_historical_lv_segment.tags),
.origin_list = DM_LIST_HEAD_INIT(_historical_lv_segment.origin_list),
};
int opt_in_list_is_set(struct cmd_context *cmd, int *opts, int count,
int *match_count, int *unmatch_count)
{
int match = 0;
int unmatch = 0;
int i;
for (i = 0; i < count; i++) {
if (arg_is_set(cmd, opts[i]))
match++;
else
unmatch++;
}
if (match_count)
*match_count = match;
if (unmatch_count)
*unmatch_count = unmatch;
return match ? 1 : 0;
}
void opt_array_to_str(struct cmd_context *cmd, int *opts, int count,
char *buf, int len)
{
int pos = 0;
int ret;
int i;
for (i = 0; i < count; i++) {
ret = snprintf(buf + pos, len - pos, "%s ", arg_long_option_name(opts[i]));
if (ret >= len - pos)
break;
pos += ret;
}
buf[len - 1] = '\0';
}
static void _lvp_bits_to_str(uint64_t bits, char *buf, int len)
{
const struct lv_prop *prop;
int lvp_enum;
int pos = 0;
int ret;
for (lvp_enum = 0; lvp_enum < LVP_COUNT; lvp_enum++) {
if (!(prop = get_lv_prop(lvp_enum)))
continue;
if (lvp_bit_is_set(bits, lvp_enum)) {
ret = snprintf(buf + pos, len - pos, "%s ", prop->name);
if (ret >= len - pos)
break;
pos += ret;
}
}
buf[len - 1] = '\0';
}
static void _lvt_bits_to_str(uint64_t bits, char *buf, int len)
{
const struct lv_type *type;
int lvt_enum;
int pos = 0;
int ret;
for (lvt_enum = 0; lvt_enum < LVT_COUNT; lvt_enum++) {
if (!(type = get_lv_type(lvt_enum)))
continue;
if (lvt_bit_is_set(bits, lvt_enum)) {
ret = snprintf(buf + pos, len - pos, "%s ", type->name);
if (ret >= len - pos)
break;
pos += ret;
}
}
buf[len - 1] = '\0';
}
/*
* This is the lv_prop function pointer used for lv_is_foo() #defines.
* Alternatively, lv_is_foo() could all be turned into functions.
*/
static int _lv_is_prop(struct cmd_context *cmd, struct logical_volume *lv, int lvp_enum)
{
switch (lvp_enum) {
case is_locked_LVP:
return lv_is_locked(lv);
case is_partial_LVP:
return lv_is_partial(lv);
case is_virtual_LVP:
return lv_is_virtual(lv);
case is_merging_LVP:
return lv_is_merging(lv);
case is_merging_origin_LVP:
return lv_is_merging_origin(lv);
case is_converting_LVP:
return lv_is_converting(lv);
case is_external_origin_LVP:
return lv_is_external_origin(lv);
case is_virtual_origin_LVP:
return lv_is_virtual_origin(lv);
case is_not_synced_LVP:
return lv_is_not_synced(lv);
case is_pending_delete_LVP:
return lv_is_pending_delete(lv);
case is_error_when_full_LVP:
return lv_is_error_when_full(lv);
case is_pvmove_LVP:
return lv_is_pvmove(lv);
case is_removed_LVP:
return lv_is_removed(lv);
case is_writable_LVP:
return lv_is_writable(lv);
case is_vg_writable_LVP:
return (lv->vg->status & LVM_WRITE) ? 1 : 0;
case is_thinpool_data_LVP:
return lv_is_thin_pool_data(lv);
case is_thinpool_metadata_LVP:
return lv_is_thin_pool_metadata(lv);
case is_cachepool_data_LVP:
return lv_is_cache_pool_data(lv);
case is_cachepool_metadata_LVP:
return lv_is_cache_pool_metadata(lv);
case is_mirror_image_LVP:
return lv_is_mirror_image(lv);
case is_mirror_log_LVP:
return lv_is_mirror_log(lv);
case is_raid_image_LVP:
return lv_is_raid_image(lv);
case is_raid_metadata_LVP:
return lv_is_raid_metadata(lv);
case is_origin_LVP: /* use lv_is_thick_origin */
return lv_is_origin(lv);
case is_thick_origin_LVP:
return lv_is_thick_origin(lv);
case is_thick_snapshot_LVP:
return lv_is_thick_snapshot(lv);
case is_thin_origin_LVP:
return lv_is_thin_origin(lv, NULL);
case is_thin_snapshot_LVP:
return lv_is_thin_snapshot(lv);
case is_cache_origin_LVP:
return lv_is_cache_origin(lv);
case is_merging_cow_LVP:
return lv_is_merging_cow(lv);
case is_cow_LVP:
return lv_is_cow(lv);
case is_cow_covering_origin_LVP:
return lv_is_cow_covering_origin(lv);
case is_visible_LVP:
return lv_is_visible(lv);
case is_error_LVP:
return lv_is_error(lv);
case is_zero_LVP:
return lv_is_zero(lv);
case is_historical_LVP:
return lv_is_historical(lv);
case is_raid_with_tracking_LVP:
return lv_is_raid_with_tracking(lv);
case is_raid_with_integrity_LVP:
return lv_raid_has_integrity(lv);
default:
log_error(INTERNAL_ERROR "unknown lv property value lvp_enum %d", lvp_enum);
}
return 0;
}
/*
* Check if an LV matches a given LV type enum.
*/
static int _lv_is_type(struct cmd_context *cmd, struct logical_volume *lv, int lvt_enum)
{
struct lv_segment *seg = first_seg(lv);
switch (lvt_enum) {
case striped_LVT:
return seg_is_striped(seg) && !lv_is_cow(lv);
case linear_LVT:
return seg_is_linear(seg) && !lv_is_cow(lv);
case snapshot_LVT:
return lv_is_cow(lv);
case thin_LVT:
return lv_is_thin_volume(lv);
case thinpool_LVT:
return lv_is_thin_pool(lv);
case cache_LVT:
return lv_is_cache(lv);
case cachepool_LVT:
return lv_is_cache_pool(lv);
case vdo_LVT:
return lv_is_vdo(lv);
case vdopool_LVT:
return lv_is_vdo_pool(lv);
case vdopooldata_LVT:
return lv_is_vdo_pool_data(lv);
case mirror_LVT