1
0
mirror of git://sourceware.org/git/lvm2.git synced 2025-02-16 13:57:49 +03:00
lvm2/tools/toollib.c
Joe Thornber 7f97c7ea9a build: Don't generate symlinks in include/ dir
As we start refactoring the code to break dependencies (see doc/refactoring.txt),
I want us to use full paths in the includes (eg, #include "base/data-struct/list.h").
This makes it more obvious when we're breaking abstraction boundaries, eg, including a file in
metadata/ from base/
2018-05-14 10:30:20 +01:00

5894 lines
161 KiB
C

/*
* Copyright (C) 2001-2004 Sistina Software, Inc. All rights reserved.
* Copyright (C) 2004-2017 Red Hat, Inc. All rights reserved.
*
* This file is part of LVM2.
*
* This copyrighted material is made available to anyone wishing to use,
* modify, copy, or redistribute it subject to the terms and conditions
* of the GNU Lesser General Public License v.2.1.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "tools.h"
#include "lib/format_text/format-text.h"
#include <sys/stat.h>
#include <signal.h>
#include <sys/wait.h>
#include <sys/utsname.h>
#define report_log_ret_code(ret_code) report_current_object_cmdlog(REPORT_OBJECT_CMDLOG_NAME, \
((ret_code) == ECMD_PROCESSED) ? REPORT_OBJECT_CMDLOG_SUCCESS \
: REPORT_OBJECT_CMDLOG_FAILURE, (ret_code))
struct device_id_list {
struct dm_list list;
struct device *dev;
char pvid[ID_LEN + 1];
};
const char *command_name(struct cmd_context *cmd)
{
return cmd->command->name;
}
static void _sigchld_handler(int sig __attribute__((unused)))
{
while (wait4(-1, NULL, WNOHANG | WUNTRACED, NULL) > 0) ;
}
/*
* returns:
* -1 if the fork failed
* 0 if the parent
* 1 if the child
*/
int become_daemon(struct cmd_context *cmd, int skip_lvm)
{
static const char devnull[] = "/dev/null";
int null_fd;
pid_t pid;
struct sigaction act = {
{_sigchld_handler},
.sa_flags = SA_NOCLDSTOP,
};
log_verbose("Forking background process from command: %s", cmd->cmd_line);
sigaction(SIGCHLD, &act, NULL);
if (!skip_lvm)
if (!sync_local_dev_names(cmd)) { /* Flush ops and reset dm cookie */
log_error("Failed to sync local devices before forking.");
return -1;
}
if ((pid = fork()) == -1) {
log_error("fork failed: %s", strerror(errno));
return -1;
}
/* Parent */
if (pid > 0)
return 0;
/* Child */
if (setsid() == -1)
log_error("Background process failed to setsid: %s",
strerror(errno));
/* Set this to avoid discarding output from background process */
// #define DEBUG_CHILD
#ifndef DEBUG_CHILD
if ((null_fd = open(devnull, O_RDWR)) == -1) {
log_sys_error("open", devnull);
_exit(ECMD_FAILED);
}
if ((dup2(null_fd, STDIN_FILENO) < 0) || /* reopen stdin */
(dup2(null_fd, STDOUT_FILENO) < 0) || /* reopen stdout */
(dup2(null_fd, STDERR_FILENO) < 0)) { /* reopen stderr */
log_sys_error("dup2", "redirect");
(void) close(null_fd);
_exit(ECMD_FAILED);
}
if (null_fd > STDERR_FILENO)
(void) close(null_fd);
init_verbose(VERBOSE_BASE_LEVEL);
#endif /* DEBUG_CHILD */
strncpy(*cmd->argv, "(lvm2)", strlen(*cmd->argv));
lvmetad_disconnect();
if (!skip_lvm) {
reset_locking();
lvmcache_destroy(cmd, 1, 1);
if (!lvmcache_init(cmd))
/* FIXME Clean up properly here */
_exit(ECMD_FAILED);
}
dev_close_all();
return 1;
}
/*
* Strip dev_dir if present
*/
const char *skip_dev_dir(struct cmd_context *cmd, const char *vg_name,
unsigned *dev_dir_found)
{
size_t devdir_len = strlen(cmd->dev_dir);
const char *dmdir = dm_dir() + devdir_len;
size_t dmdir_len = strlen(dmdir), vglv_sz;
char *vgname, *lvname, *layer, *vglv;
/* FIXME Do this properly */
if (*vg_name == '/')
while (vg_name[1] == '/')
vg_name++;
if (strncmp(vg_name, cmd->dev_dir, devdir_len)) {
if (dev_dir_found)
*dev_dir_found = 0;
} else {
if (dev_dir_found)
*dev_dir_found = 1;
vg_name += devdir_len;
while (*vg_name == '/')
vg_name++;
/* Reformat string if /dev/mapper found */
if (!strncmp(vg_name, dmdir, dmdir_len) && vg_name[dmdir_len] == '/') {
vg_name += dmdir_len + 1;
while (*vg_name == '/')
vg_name++;
if (!dm_split_lvm_name(cmd->mem, vg_name, &vgname, &lvname, &layer) ||
*layer) {
log_error("skip_dev_dir: Couldn't split up device name %s.",
vg_name);
return vg_name;
}
vglv_sz = strlen(vgname) + strlen(lvname) + 2;
if (!(vglv = dm_pool_alloc(cmd->mem, vglv_sz)) ||
dm_snprintf(vglv, vglv_sz, "%s%s%s", vgname,
*lvname ? "/" : "",
lvname) < 0) {
log_error("vg/lv string alloc failed.");
return vg_name;
}
return vglv;
}
}
return vg_name;
}
/*
* Three possible results:
* a) return 0, skip 0: take the VG, and cmd will end in success
* b) return 0, skip 1: skip the VG, and cmd will end in success
* c) return 1, skip *: skip the VG, and cmd will end in failure
*
* Case b is the special case, and includes the following:
* . The VG is inconsistent, and the command allows for inconsistent VGs.
* . The VG is clustered, the host cannot access clustered VG's,
* and the command option has been used to ignore clustered vgs.
*
* Case c covers the other errors returned when reading the VG.
* If *skip is 1, it's OK for the caller to read the list of PVs in the VG.
*/
static int _ignore_vg(struct volume_group *vg, const char *vg_name,
struct dm_list *arg_vgnames, uint32_t read_flags,
int *skip, int *notfound)
{
uint32_t read_error = vg_read_error(vg);
*skip = 0;
*notfound = 0;
if ((read_error & FAILED_NOTFOUND) && (read_flags & READ_OK_NOTFOUND)) {
*notfound = 1;
return 0;
}
if ((read_error & FAILED_INCONSISTENT) && (read_flags & READ_ALLOW_INCONSISTENT))
read_error &= ~FAILED_INCONSISTENT; /* Check for other errors */
if ((read_error & FAILED_CLUSTERED) && vg->cmd->ignore_clustered_vgs) {
read_error &= ~FAILED_CLUSTERED; /* Check for other errors */
log_verbose("Skipping volume group %s", vg_name);
*skip = 1;
}
/*
* Commands that operate on "all vgs" shouldn't be bothered by
* skipping a foreign VG, and the command shouldn't fail when
* one is skipped. But, if the command explicitly asked to
* operate on a foreign VG and it's skipped, then the command
* would expect to fail.
*/
if (read_error & FAILED_SYSTEMID) {
if (arg_vgnames && str_list_match_item(arg_vgnames, vg->name)) {
log_error("Cannot access VG %s with system ID %s with %slocal system ID%s%s.",
vg->name, vg->system_id, vg->cmd->system_id ? "" : "unknown ",
vg->cmd->system_id ? " " : "", vg->cmd->system_id ? vg->cmd->system_id : "");
return 1;
} else {
read_error &= ~FAILED_SYSTEMID; /* Check for other errors */
log_verbose("Skipping foreign volume group %s", vg_name);
*skip = 1;
}
}
/*
* Accessing a lockd VG when lvmlockd is not used is similar
* to accessing a foreign VG.
* This is also the point where a command fails if it failed
* to acquire the necessary lock from lvmlockd.
* The two cases are distinguished by FAILED_LOCK_TYPE (the
* VG lock_type requires lvmlockd), and FAILED_LOCK_MODE (the
* command failed to acquire the necessary lock.)
*/
if (read_error & (FAILED_LOCK_TYPE | FAILED_LOCK_MODE)) {
if (arg_vgnames && str_list_match_item(arg_vgnames, vg->name)) {
if (read_error & FAILED_LOCK_TYPE)
log_error("Cannot access VG %s with lock type %s that requires lvmlockd.",
vg->name, vg->lock_type);
/* For FAILED_LOCK_MODE, the error is printed in vg_read. */
return 1;
} else {
read_error &= ~FAILED_LOCK_TYPE; /* Check for other errors */
read_error &= ~FAILED_LOCK_MODE;
log_verbose("Skipping volume group %s", vg_name);
*skip = 1;
}
}
if (read_error == FAILED_CLUSTERED) {
*skip = 1;
stack; /* Error already logged */
return 1;
}
if (read_error != SUCCESS) {
*skip = 0;
if (is_orphan_vg(vg_name))
log_error("Cannot process standalone physical volumes");
else
log_error("Cannot process volume group %s", vg_name);
return 1;
}
return 0;
}
/*
* This functiona updates the "selected" arg only if last item processed
* is selected so this implements the "whole structure is selected if
* at least one of its items is selected".
*/
static void _update_selection_result(struct processing_handle *handle, int *selected)
{
if (!handle || !handle->selection_handle)
return;
if (handle->selection_handle->selected)
*selected = 1;
}
static void _set_final_selection_result(struct processing_handle *handle, int selected)
{
if (!handle || !handle->selection_handle)
return;
handle->selection_handle->selected = selected;
}
/*
* Metadata iteration functions
*/
int process_each_segment_in_pv(struct cmd_context *cmd,
struct volume_group *vg,
struct physical_volume *pv,
struct processing_handle *handle,
process_single_pvseg_fn_t process_single_pvseg)
{
struct pv_segment *pvseg;
int whole_selected = 0;
int ret_max = ECMD_PROCESSED;
int ret;
struct pv_segment _free_pv_segment = { .pv = pv };
if (dm_list_empty(&pv->segments)) {
ret = process_single_pvseg(cmd, NULL, &_free_pv_segment, handle);
if (ret != ECMD_PROCESSED)
stack;
if (ret > ret_max)
ret_max = ret;
} else {
dm_list_iterate_items(pvseg, &pv->segments) {
if (sigint_caught())
return_ECMD_FAILED;
ret = process_single_pvseg(cmd, vg, pvseg, handle);
_update_selection_result(handle, &whole_selected);
if (ret != ECMD_PROCESSED)
stack;
if (ret > ret_max)
ret_max = ret;
}
}
/* the PV is selected if at least one PV segment is selected */
_set_final_selection_result(handle, whole_selected);
return ret_max;
}
int process_each_segment_in_lv(struct cmd_context *cmd,
struct logical_volume *lv,
struct processing_handle *handle,
process_single_seg_fn_t process_single_seg)
{
struct lv_segment *seg;
int whole_selected = 0;
int ret_max = ECMD_PROCESSED;
int ret;
dm_list_iterate_items(seg, &lv->segments) {
if (sigint_caught())
return_ECMD_FAILED;
ret = process_single_seg(cmd, seg, handle);
_update_selection_result(handle, &whole_selected);
if (ret != ECMD_PROCESSED)
stack;
if (ret > ret_max)
ret_max = ret;
}
/* the LV is selected if at least one LV segment is selected */
_set_final_selection_result(handle, whole_selected);
return ret_max;
}
static const char *_extract_vgname(struct cmd_context *cmd, const char *lv_name,
const char **after)
{
const char *vg_name = lv_name;
char *st, *pos;
/* Strip dev_dir (optional) */
if (!(vg_name = skip_dev_dir(cmd, vg_name, NULL)))
return_0;
/* Require exactly one set of consecutive slashes */
if ((st = pos = strchr(vg_name, '/')))
while (*st == '/')
st++;
if (!st || strchr(st, '/')) {
log_error("\"%s\": Invalid path for Logical Volume.",
lv_name);
return 0;
}
if (!(vg_name = dm_pool_strndup(cmd->mem, vg_name, pos - vg_name))) {
log_error("Allocation of vg_name failed.");
return 0;
}
if (after)
*after = st;
return vg_name;
}
/*
* Extract default volume group name from environment
*/
static const char *_default_vgname(struct cmd_context *cmd)
{
const char *vg_path;
/* Take default VG from environment? */
vg_path = getenv("LVM_VG_NAME");
if (!vg_path)
return 0;
vg_path = skip_dev_dir(cmd, vg_path, NULL);
if (strchr(vg_path, '/')) {
log_error("\"%s\": Invalid environment var LVM_VG_NAME set for Volume Group.",
vg_path);
return 0;
}
return dm_pool_strdup(cmd->mem, vg_path);
}
/*
* Determine volume group name from a logical volume name
*/
const char *extract_vgname(struct cmd_context *cmd, const char *lv_name)
{
const char *vg_name = lv_name;
/* Path supplied? */
if (vg_name && strchr(vg_name, '/')) {
if (!(vg_name = _extract_vgname(cmd, lv_name, NULL)))
return_NULL;
return vg_name;
}
if (!(vg_name = _default_vgname(cmd))) {
if (lv_name)
log_error("Path required for Logical Volume \"%s\".",
lv_name);
return NULL;
}
return vg_name;
}
/*
* Process physical extent range specifiers
*/
static int _add_pe_range(struct dm_pool *mem, const char *pvname,
struct dm_list *pe_ranges, uint32_t start, uint32_t count)
{
struct pe_range *per;
log_debug("Adding PE range: start PE " FMTu32 " length " FMTu32 " on %s.",
start, count, pvname);
/* Ensure no overlap with existing areas */
dm_list_iterate_items(per, pe_ranges) {
if (((start < per->start) && (start + count - 1 >= per->start)) ||
((start >= per->start) &&
(per->start + per->count - 1) >= start)) {
log_error("Overlapping PE ranges specified (" FMTu32
"-" FMTu32 ", " FMTu32 "-" FMTu32 ") on %s.",
start, start + count - 1, per->start,
per->start + per->count - 1, pvname);
return 0;
}
}
if (!(per = dm_pool_alloc(mem, sizeof(*per)))) {
log_error("Allocation of list failed.");
return 0;
}
per->start = start;
per->count = count;
dm_list_add(pe_ranges, &per->list);
return 1;
}
static int _xstrtouint32(const char *s, char **p, int base, uint32_t *result)
{
unsigned long ul;
errno = 0;
ul = strtoul(s, p, base);
if (errno || *p == s || ul > UINT32_MAX)
return 0;
*result = ul;
return 1;
}
static int _parse_pes(struct dm_pool *mem, char *c, struct dm_list *pe_ranges,
const char *pvname, uint32_t size)
{
char *endptr;
uint32_t start, end, len;
/* Default to whole PV */
if (!c) {
if (!_add_pe_range(mem, pvname, pe_ranges, UINT32_C(0), size))
return_0;
return 1;
}
while (*c) {
if (*c != ':')
goto error;
c++;
/* Disallow :: and :\0 */
if (*c == ':' || !*c)
goto error;
/* Default to whole range */
start = UINT32_C(0);
end = size - 1;
/* Start extent given? */
if (isdigit(*c)) {
if (!_xstrtouint32(c, &endptr, 10, &start))
goto error;
c = endptr;
/* Just one number given? */
if (!*c || *c == ':')
end = start;
}
/* Range? */
if (*c == '-') {
c++;
if (isdigit(*c)) {
if (!_xstrtouint32(c, &endptr, 10, &end))
goto error;
c = endptr;
}
} else if (*c == '+') { /* Length? */
c++;
if (isdigit(*c)) {
if (!_xstrtouint32(c, &endptr, 10, &len))
goto error;
c = endptr;
end = start + (len ? (len - 1) : 0);
}
}
if (*c && *c != ':')
goto error;
if ((start > end) || (end > size - 1)) {
log_error("PE range error: start extent %" PRIu32 " to "
"end extent %" PRIu32 ".", start, end);
return 0;
}
if (!_add_pe_range(mem, pvname, pe_ranges, start, end - start + 1))
return_0;
}
return 1;
error:
log_error("Physical extent parsing error at %s.", c);
return 0;
}
static int _create_pv_entry(struct dm_pool *mem, struct pv_list *pvl,
char *colon, int allocatable_only, struct dm_list *r)
{
const char *pvname;
struct pv_list *new_pvl = NULL, *pvl2;
struct dm_list *pe_ranges;
pvname = pv_dev_name(pvl->pv);
if (allocatable_only && !(pvl->pv->status & ALLOCATABLE_PV)) {
log_warn("WARNING: Physical volume %s not allocatable.", pvname);
return 1;
}
if (allocatable_only && is_missing_pv(pvl->pv)) {
log_warn("WARNING: Physical volume %s is missing.", pvname);
return 1;
}
if (allocatable_only &&
(pvl->pv->pe_count == pvl->pv->pe_alloc_count)) {
log_warn("WARNING: No free extents on physical volume \"%s\".", pvname);
return 1;
}
dm_list_iterate_items(pvl2, r)
if (pvl->pv->dev == pvl2->pv->dev) {
new_pvl = pvl2;
break;
}
if (!new_pvl) {
if (!(new_pvl = dm_pool_alloc(mem, sizeof(*new_pvl)))) {
log_error("Unable to allocate physical volume list.");
return 0;
}
memcpy(new_pvl, pvl, sizeof(*new_pvl));
if (!(pe_ranges = dm_pool_alloc(mem, sizeof(*pe_ranges)))) {
log_error("Allocation of pe_ranges list failed.");
return 0;
}
dm_list_init(pe_ranges);
new_pvl->pe_ranges = pe_ranges;
dm_list_add(r, &new_pvl->list);
}
/* Determine selected physical extents */
if (!_parse_pes(mem, colon, new_pvl->pe_ranges, pv_dev_name(pvl->pv),
pvl->pv->pe_count))
return_0;
return 1;
}
struct dm_list *create_pv_list(struct dm_pool *mem, struct volume_group *vg, int argc,
char **argv, int allocatable_only)
{
struct dm_list *r;
struct pv_list *pvl;
struct dm_list tagsl, arg_pvnames;
char *pvname = NULL;
char *colon, *at_sign, *tagname;
int i;
/* Build up list of PVs */
if (!(r = dm_pool_alloc(mem, sizeof(*r)))) {
log_error("Allocation of list failed.");
return NULL;
}
dm_list_init(r);
dm_list_init(&tagsl);
dm_list_init(&arg_pvnames);
for (i = 0; i < argc; i++) {
dm_unescape_colons_and_at_signs(argv[i], &colon, &at_sign);
if (at_sign && (at_sign == argv[i])) {
tagname = at_sign + 1;
if (!validate_tag(tagname)) {
log_error("Skipping invalid tag %s.", tagname);
continue;
}
dm_list_iterate_items(pvl, &vg->pvs) {
if (str_list_match_item(&pvl->pv->tags,
tagname)) {
if (!_create_pv_entry(mem, pvl, NULL,
allocatable_only,
r))
return_NULL;
}
}
continue;
}
pvname = argv[i];
if (colon && !(pvname = dm_pool_strndup(mem, pvname,
(unsigned) (colon - pvname)))) {
log_error("Failed to clone PV name.");
return NULL;
}
if (!(pvl = find_pv_in_vg(vg, pvname))) {
log_error("Physical Volume \"%s\" not found in "
"Volume Group \"%s\".", pvname, vg->name);
return NULL;
}
if (!_create_pv_entry(mem, pvl, colon, allocatable_only, r))
return_NULL;
}
if (dm_list_empty(r))
log_error("No specified PVs have space available.");
return dm_list_empty(r) ? NULL : r;
}
struct dm_list *clone_pv_list(struct dm_pool *mem, struct dm_list *pvsl)
{
struct dm_list *r;
struct pv_list *pvl, *new_pvl;
/* Build up list of PVs */
if (!(r = dm_pool_alloc(mem, sizeof(*r)))) {
log_error("Allocation of list failed.");
return NULL;
}
dm_list_init(r);
dm_list_iterate_items(pvl, pvsl) {
if (!(new_pvl = dm_pool_zalloc(mem, sizeof(*new_pvl)))) {
log_error("Unable to allocate physical volume list.");
return NULL;
}
memcpy(new_pvl, pvl, sizeof(*new_pvl));
dm_list_add(r, &new_pvl->list);
}
return r;
}
const char _pe_size_may_not_be_negative_msg[] = "Physical extent size may not be negative.";
int vgcreate_params_set_defaults(struct cmd_context *cmd,
struct vgcreate_params *vp_def,
struct volume_group *vg)
{
int64_t extent_size;
/* Only vgsplit sets vg */
if (vg) {
vp_def->vg_name = NULL;
vp_def->extent_size = vg->extent_size;
vp_def->max_pv = vg->max_pv;
vp_def->max_lv = vg->max_lv;
vp_def->alloc = vg->alloc;
vp_def->clustered = vg_is_clustered(vg);
vp_def->vgmetadatacopies = vg->mda_copies;
vp_def->system_id = vg->system_id; /* No need to clone this */
} else {
vp_def->vg_name = NULL;
extent_size = find_config_tree_int64(cmd,
allocation_physical_extent_size_CFG, NULL) * 2;
if (extent_size < 0) {
log_error(_pe_size_may_not_be_negative_msg);
return 0;
}
vp_def->extent_size = (uint32_t) extent_size;
vp_def->max_pv = DEFAULT_MAX_PV;
vp_def->max_lv = DEFAULT_MAX_LV;
vp_def->alloc = DEFAULT_ALLOC_POLICY;
vp_def->clustered = DEFAULT_CLUSTERED;
vp_def->vgmetadatacopies = DEFAULT_VGMETADATACOPIES;
vp_def->system_id = cmd->system_id;
}
return 1;
}
/*
* Set members of struct vgcreate_params from cmdline arguments.
* Do preliminary validation with arg_*() interface.
* Further, more generic validation is done in validate_vgcreate_params().
* This function is to remain in tools directory.
*/
int vgcreate_params_set_from_args(struct cmd_context *cmd,
struct vgcreate_params *vp_new,
struct vgcreate_params *vp_def)
{
const char *system_id_arg_str;
const char *lock_type = NULL;
int locking_type;
int use_lvmlockd;
int use_clvmd;
lock_type_t lock_type_num;
vp_new->vg_name = skip_dev_dir(cmd, vp_def->vg_name, NULL);
vp_new->max_lv = arg_uint_value(cmd, maxlogicalvolumes_ARG,
vp_def->max_lv);
vp_new->max_pv = arg_uint_value(cmd, maxphysicalvolumes_ARG,
vp_def->max_pv);
vp_new->alloc = (alloc_policy_t) arg_uint_value(cmd, alloc_ARG, vp_def->alloc);
/* Units of 512-byte sectors */
vp_new->extent_size =
arg_uint_value(cmd, physicalextentsize_ARG, vp_def->extent_size);
if (arg_sign_value(cmd, physicalextentsize_ARG, SIGN_NONE) == SIGN_MINUS) {
log_error(_pe_size_may_not_be_negative_msg);
return 0;
}
if (arg_uint64_value(cmd, physicalextentsize_ARG, 0) > MAX_EXTENT_SIZE) {
log_error("Physical extent size must be smaller than %s.",
display_size(cmd, (uint64_t) MAX_EXTENT_SIZE));
return 0;
}
if (arg_sign_value(cmd, maxlogicalvolumes_ARG, SIGN_NONE) == SIGN_MINUS) {
log_error("Max Logical Volumes may not be negative.");
return 0;
}
if (arg_sign_value(cmd, maxphysicalvolumes_ARG, SIGN_NONE) == SIGN_MINUS) {
log_error("Max Physical Volumes may not be negative.");
return 0;
}
if (arg_is_set(cmd, vgmetadatacopies_ARG))
vp_new->vgmetadatacopies = arg_int_value(cmd, vgmetadatacopies_ARG,
DEFAULT_VGMETADATACOPIES);
else
vp_new->vgmetadatacopies = find_config_tree_int(cmd, metadata_vgmetadatacopies_CFG, NULL);
if (!(system_id_arg_str = arg_str_value(cmd, systemid_ARG, NULL))) {
vp_new->system_id = vp_def->system_id;
} else {
if (!(vp_new->system_id = system_id_from_string(cmd, system_id_arg_str)))
return_0;
/* FIXME Take local/extra_system_ids into account */
if (vp_new->system_id && cmd->system_id &&
strcmp(vp_new->system_id, cmd->system_id)) {
if (*vp_new->system_id)
log_warn("VG with system ID %s might become inaccessible as local system ID is %s",
vp_new->system_id, cmd->system_id);
else
log_warn("WARNING: A VG without a system ID allows unsafe access from other hosts.");
}
}
if ((system_id_arg_str = arg_str_value(cmd, systemid_ARG, NULL))) {
vp_new->system_id = system_id_from_string(cmd, system_id_arg_str);
} else {
vp_new->system_id = vp_def->system_id;
}
if (system_id_arg_str) {
if (!vp_new->system_id || !vp_new->system_id[0])
log_warn("WARNING: A VG without a system ID allows unsafe access from other hosts.");
if (vp_new->system_id && cmd->system_id &&
strcmp(vp_new->system_id, cmd->system_id)) {
log_warn("VG with system ID %s might become inaccessible as local system ID is %s",
vp_new->system_id, cmd->system_id);
}
}
/*
* Locking: what kind of locking should be used for the
* new VG, and is it compatible with current lvm.conf settings.
*
* The end result is to set vp_new->lock_type to:
* none | clvm | dlm | sanlock.
*
* If 'vgcreate --lock-type <arg>' is set, the answer is given
* directly by <arg> which is one of none|clvm|dlm|sanlock.
*
* 'vgcreate --clustered y' is the way to create clvm VGs.
*
* 'vgcreate --shared' is the way to create lockd VGs.
* lock_type of sanlock or dlm is selected based on
* which lock manager is running.
*
*
* 1. Using neither clvmd nor lvmlockd.
* ------------------------------------------------
* lvm.conf:
* global/use_lvmlockd = 0
* global/locking_type = 1
*
* - no locking is enabled
* - clvmd is not used
* - lvmlockd is not used
* - VGs with CLUSTERED set are ignored (requires clvmd)
* - VGs with lockd type are ignored (requires lvmlockd)
* - vgcreate can create new VGs with lock_type none
* - 'vgcreate --clustered y' fails
* - 'vgcreate --shared' fails
* - 'vgcreate' (neither option) creates a local VG
*
* 2. Using clvmd.
* ------------------------------------------------
* lvm.conf:
* global/use_lvmlockd = 0
* global/locking_type = 3
*
* - locking through clvmd is enabled (traditional clvm config)
* - clvmd is used
* - lvmlockd is not used
* - VGs with CLUSTERED set can be used
* - VGs with lockd type are ignored (requires lvmlockd)
* - vgcreate can create new VGs with CLUSTERED status flag
* - 'vgcreate --clustered y' works
* - 'vgcreate --shared' fails
* - 'vgcreate' (neither option) creates a clvm VG
*
* 3. Using lvmlockd.
* ------------------------------------------------
* lvm.conf:
* global/use_lvmlockd = 1
* global/locking_type = 1
*
* - locking through lvmlockd is enabled
* - clvmd is not used
* - lvmlockd is used
* - VGs with CLUSTERED set are ignored (requires clvmd)
* - VGs with lockd type can be used
* - vgcreate can create new VGs with lock_type sanlock or dlm
* - 'vgcreate --clustered y' fails
* - 'vgcreate --shared' works
* - 'vgcreate' (neither option) creates a local VG
*/
locking_type = find_config_tree_int(cmd, global_locking_type_CFG, NULL);
use_lvmlockd = find_config_tree_bool(cmd, global_use_lvmlockd_CFG, NULL);
use_clvmd = (locking_type == 3);
if (arg_is_set(cmd, locktype_ARG)) {
if (arg_is_set(cmd, clustered_ARG)) {
log_error("A lock type cannot be specified with --clustered.");
return 0;
}
lock_type = arg_str_value(cmd, locktype_ARG, "");
if (arg_is_set(cmd, shared_ARG) && !is_lockd_type(lock_type)) {
log_error("The --shared option requires lock type sanlock or dlm.");
return 0;
}
} else if (arg_is_set(cmd, clustered_ARG)) {
const char *arg_str = arg_str_value(cmd, clustered_ARG, "");
int clustery = strcmp(arg_str, "y") ? 0 : 1;
if (use_clvmd) {
lock_type = clustery ? "clvm" : "none";
} else if (use_lvmlockd) {
log_error("lvmlockd is configured, use --shared with lvmlockd, and --clustered with clvmd.");
return 0;
} else {
if (clustery) {
log_error("The --clustered option requires clvmd (locking_type=3).");
return 0;
}
lock_type = "none";
}
} else if (arg_is_set(cmd, shared_ARG)) {
int found_multiple = 0;
if (use_lvmlockd) {
if (!(lock_type = lockd_running_lock_type(cmd, &found_multiple))) {
if (found_multiple)
log_error("Found multiple lock managers, select one with --lock-type.");
else
log_error("Failed to detect a running lock manager to select lock type.");
return 0;
}
} else if (use_clvmd) {
log_error("Use --shared with lvmlockd, and --clustered with clvmd.");
return 0;
} else {
log_error("Using a shared lock type requires lvmlockd.");
return 0;
}
} else {
if (use_clvmd)
lock_type = locking_is_clustered() ? "clvm" : "none";
else
lock_type = "none";
}
/*
* Check that the lock_type is recognized, and is being
* used with the correct lvm.conf settings.
*/
lock_type_num = get_lock_type_from_string(lock_type);
switch (lock_type_num) {
case LOCK_TYPE_INVALID:
log_error("lock_type %s is invalid", lock_type);
return 0;
case LOCK_TYPE_SANLOCK:
case LOCK_TYPE_DLM:
if (!use_lvmlockd) {
log_error("Using a shared lock type requires lvmlockd.");
return 0;
}
break;
case LOCK_TYPE_CLVM:
if (!use_clvmd) {
log_error("Using clvm requires locking_type 3.");
return 0;
}
break;
case LOCK_TYPE_NONE:
break;
};
/*
* The vg is not owned by one host/system_id.
* Locking coordinates access from multiple hosts.
*/
if (lock_type_num == LOCK_TYPE_DLM || lock_type_num == LOCK_TYPE_SANLOCK || lock_type_num == LOCK_TYPE_CLVM)
vp_new->system_id = NULL;
vp_new->lock_type = lock_type;
if (lock_type_num == LOCK_TYPE_CLVM)
vp_new->clustered = 1;
else
vp_new->clustered = 0;
log_debug("Setting lock_type to %s", vp_new->lock_type);
return 1;
}
/* Shared code for changing activation state for vgchange/lvchange */
int lv_change_activate(struct cmd_context *cmd, struct logical_volume *lv,
activation_change_t activate)
{
int r = 1;
struct logical_volume *snapshot_lv;
if (lv_is_cache_pool(lv)) {
if (is_change_activating(activate)) {
log_verbose("Skipping activation of cache pool %s.",
display_lvname(lv));
return 1;
}
if (!dm_list_empty(&lv->segs_using_this_lv)) {
log_verbose("Skipping deactivation of used cache pool %s.",
display_lvname(lv));
return 1;
}
/*
* Allow to pass only deactivation of unused cache pool.
* Useful only for recovery of failed zeroing of metadata LV.
*/
}
if (lv_is_merging_origin(lv)) {
/*
* For merging origin, its snapshot must be inactive.
* If it's still active and cannot be deactivated
* activation or deactivation of origin fails!
*
* When origin is deactivated and merging snapshot is thin
* it allows to deactivate origin, but still report error,
* since the thin snapshot remains active.
*
* User could retry to deactivate it with another
* deactivation of origin, which is the only visible LV
*/
snapshot_lv = find_snapshot(lv)->lv;
if (lv_is_thin_type(snapshot_lv) && !deactivate_lv(cmd, snapshot_lv)) {
if (is_change_activating(activate)) {
log_error("Refusing to activate merging volume %s while "
"snapshot volume %s is still active.",
display_lvname(lv), display_lvname(snapshot_lv));
return 0;
}
log_error("Cannot fully deactivate merging origin volume %s while "
"snapshot volume %s is still active.",
display_lvname(lv), display_lvname(snapshot_lv));
r = 0; /* and continue to deactivate origin... */
}
}
if (is_change_activating(activate) &&
lvmcache_found_duplicate_pvs() &&
vg_has_duplicate_pvs(lv->vg) &&
!find_config_tree_bool(cmd, devices_allow_changes_with_duplicate_pvs_CFG, NULL)) {
log_error("Cannot activate LVs in VG %s while PVs appear on duplicate devices.",
lv->vg->name);
return 0;
}
if (!lv_active_change(cmd, lv, activate, 0))
return_0;
set_lv_notify(lv->vg->cmd);
return r;
}
int lv_refresh(struct cmd_context *cmd, struct logical_volume *lv)
{
struct logical_volume *snapshot_lv;
if (lv_is_merging_origin(lv)) {
snapshot_lv = find_snapshot(lv)->lv;
if (lv_is_thin_type(snapshot_lv) && !deactivate_lv(cmd, snapshot_lv))
log_print_unless_silent("Delaying merge for origin volume %s since "
"snapshot volume %s is still active.",
display_lvname(lv), display_lvname(snapshot_lv));
}
if (!lv_refresh_suspend_resume(lv))
return_0;
/*
* check if snapshot merge should be polled
* - unfortunately: even though the dev_manager will clear
* the lv's merge attributes if a merge is not possible;
* it is clearing a different instance of the lv (as
* retrieved with lv_from_lvid)
* - fortunately: polldaemon will immediately shutdown if the
* origin doesn't have a status with a snapshot percentage
*/
if (background_polling() && lv_is_merging_origin(lv) && lv_is_active_locally(lv))
lv_spawn_background_polling(cmd, lv);
return 1;
}
int vg_refresh_visible(struct cmd_context *cmd, struct volume_group *vg)
{
struct lv_list *lvl;
int r = 1;
sigint_allow();
dm_list_iterate_items(lvl, &vg->lvs) {
if (sigint_caught()) {
r = 0;
stack;
break;
}
if (lv_is_visible(lvl->lv) && !lv_refresh(cmd, lvl->lv)) {
r = 0;
stack;
}
}
sigint_restore();
return r;
}
void lv_spawn_background_polling(struct cmd_context *cmd,
struct logical_volume *lv)
{
const char *pvname;
const struct logical_volume *lv_mirr = NULL;
if (lv_is_pvmove(lv))
lv_mirr = lv;
else if (lv_is_locked(lv))
lv_mirr = find_pvmove_lv_in_lv(lv);
if (lv_mirr &&
(pvname = get_pvmove_pvname_from_lv_mirr(lv_mirr))) {
log_verbose("Spawning background pvmove process for %s.",
pvname);
pvmove_poll(cmd, pvname, lv_mirr->lvid.s, lv_mirr->vg->name, lv_mirr->name, 1);
}
if (lv_is_converting(lv) || lv_is_merging(lv)) {
log_verbose("Spawning background lvconvert process for %s.",
lv->name);
lvconvert_poll(cmd, lv, 1);
}
}
int get_activation_monitoring_mode(struct cmd_context *cmd,
int *monitoring_mode)
{
*monitoring_mode = DEFAULT_DMEVENTD_MONITOR;
if (arg_is_set(cmd, monitor_ARG) &&
(arg_is_set(cmd, ignoremonitoring_ARG) ||
arg_is_set(cmd, sysinit_ARG))) {
log_error("--ignoremonitoring or --sysinit option not allowed with --monitor option.");
return 0;
}
if (arg_is_set(cmd, monitor_ARG))
*monitoring_mode = arg_int_value(cmd, monitor_ARG,
DEFAULT_DMEVENTD_MONITOR);
else if (is_static() || arg_is_set(cmd, ignoremonitoring_ARG) ||
arg_is_set(cmd, sysinit_ARG) ||
!find_config_tree_bool(cmd, activation_monitoring_CFG, NULL))
*monitoring_mode = DMEVENTD_MONITOR_IGNORE;
return 1;
}
/*
* Read pool options from cmdline
*/
int get_pool_params(struct cmd_context *cmd,
const struct segment_type *segtype,
uint64_t *pool_metadata_size,
int *pool_metadata_spare,
uint32_t *chunk_size,
thin_discards_t *discards,
thin_zero_t *zero_new_blocks)
{
if (segtype_is_thin_pool(segtype) || segtype_is_thin(segtype)) {
if (arg_is_set(cmd, zero_ARG)) {
*zero_new_blocks = arg_int_value(cmd, zero_ARG, 0) ? THIN_ZERO_YES : THIN_ZERO_NO;
log_very_verbose("%s pool zeroing.",
(*zero_new_blocks == THIN_ZERO_YES) ? "Enabling" : "Disabling");
} else
*zero_new_blocks = THIN_ZERO_UNSELECTED;
if (arg_is_set(cmd, discards_ARG)) {
*discards = (thin_discards_t) arg_uint_value(cmd, discards_ARG, 0);
log_very_verbose("Setting pool discards to %s.",
get_pool_discards_name(*discards));
} else
*discards = THIN_DISCARDS_UNSELECTED;
}
if (arg_from_list_is_negative(cmd, "may not be negative",
chunksize_ARG,
pooldatasize_ARG,
poolmetadatasize_ARG,
-1))
return_0;
if (arg_from_list_is_zero(cmd, "may not be zero",
chunksize_ARG,
pooldatasize_ARG,
poolmetadatasize_ARG,
-1))
return_0;
if (arg_is_set(cmd, chunksize_ARG)) {
*chunk_size = arg_uint_value(cmd, chunksize_ARG, 0);
if (!validate_pool_chunk_size(cmd, segtype, *chunk_size))
return_0;
log_very_verbose("Setting pool chunk size to %s.",
display_size(cmd, *chunk_size));
} else
*chunk_size = 0;
if (arg_is_set(cmd, poolmetadatasize_ARG)) {
if (arg_is_set(cmd, poolmetadata_ARG)) {
log_error("Please specify either metadata logical volume or its size.");
return 0;
}
*pool_metadata_size = arg_uint64_value(cmd, poolmetadatasize_ARG,
UINT64_C(0));
} else
*pool_metadata_size = 0;
/* TODO: default in lvm.conf and metadata profile ? */
*pool_metadata_spare = arg_int_value(cmd, poolmetadataspare_ARG,
DEFAULT_POOL_METADATA_SPARE);
return 1;
}
/*
* Generic stripe parameter checks.
*/
static int _validate_stripe_params(struct cmd_context *cmd, const struct segment_type *segtype,
uint32_t *stripes, uint32_t *stripe_size)
{
if (*stripes < 1 || *stripes > MAX_STRIPES) {
log_error("Number of stripes (%d) must be between %d and %d.",
*stripes, 1, MAX_STRIPES);
return 0;
}
if (!segtype_supports_stripe_size(segtype)) {
if (*stripe_size) {
log_print_unless_silent("Ignoring stripesize argument for %s devices.",
segtype->name);
*stripe_size = 0;
}
} else if (*stripes == 1) {
if (*stripe_size) {
log_print_unless_silent("Ignoring stripesize argument with single stripe.");
*stripe_size = 0;
}
} else {
if (!*stripe_size) {
*stripe_size = find_config_tree_int(cmd, metadata_stripesize_CFG, NULL) * 2;
log_print_unless_silent("Using default stripesize %s.",
display_size(cmd, (uint64_t) *stripe_size));
}
if (*stripe_size > STRIPE_SIZE_LIMIT * 2) {
log_error("Stripe size cannot be larger than %s.",
display_size(cmd, (uint64_t) STRIPE_SIZE_LIMIT));
return 0;
} else if (*stripe_size < STRIPE_SIZE_MIN || !is_power_of_2(*stripe_size)) {
log_error("Invalid stripe size %s.",
display_size(cmd, (uint64_t) *stripe_size));
return 0;
}
}
return 1;
}
/*
* The stripe size is limited by the size of a uint32_t, but since the
* value given by the user is doubled, and the final result must be a
* power of 2, we must divide UINT_MAX by four and add 1 (to round it
* up to the power of 2)
*/
int get_stripe_params(struct cmd_context *cmd, const struct segment_type *segtype,
uint32_t *stripes, uint32_t *stripe_size,
unsigned *stripes_supplied, unsigned *stripe_size_supplied)
{
/* stripes_long_ARG takes precedence (for lvconvert) */
/* FIXME Cope with relative +/- changes for lvconvert. */
if (arg_is_set(cmd, stripes_long_ARG)) {
*stripes = arg_uint_value(cmd, stripes_long_ARG, 0);
*stripes_supplied = 1;
} else if (arg_is_set(cmd, stripes_ARG)) {
*stripes = arg_uint_value(cmd, stripes_ARG, 0);
*stripes_supplied = 1;
} else {
/*
* FIXME add segtype parameter for min_stripes and remove logic for this
* from all other places
*/
if (segtype_is_any_raid6(segtype))
*stripes = 3;
else if (segtype_is_striped_raid(segtype))
*stripes = 2;
else
*stripes = 1;
*stripes_supplied = 0;
}
if ((*stripe_size = arg_uint_value(cmd, stripesize_ARG, 0))) {
if (arg_sign_value(cmd, stripesize_ARG, SIGN_NONE) == SIGN_MINUS) {
log_error("Negative stripesize is invalid.");
return 0;
}
}
*stripe_size_supplied = arg_is_set(cmd, stripesize_ARG);
return _validate_stripe_params(cmd, segtype, stripes, stripe_size);
}
static int _validate_cachepool_params(const char *policy_name, cache_mode_t cache_mode)
{
/*
* FIXME: it might be nice if cmd def rules could check option values,
* then a rule could do this.
*/
if ((cache_mode == CACHE_MODE_WRITEBACK) && policy_name && !strcmp(policy_name, "cleaner")) {
log_error("Cache mode \"writeback\" is not compatible with cache policy \"cleaner\".");
return 0;
}
return 1;
}
int get_cache_params(struct cmd_context *cmd,
uint32_t *chunk_size,
cache_metadata_format_t *cache_metadata_format,
cache_mode_t *cache_mode,
const char **name,
struct dm_config_tree **settings)
{
const char *str;
struct arg_value_group_list *group;
struct dm_config_tree *result = NULL, *prev = NULL, *current = NULL;
struct dm_config_node *cn;
int ok = 0;
if (arg_is_set(cmd, chunksize_ARG)) {
*chunk_size = arg_uint_value(cmd, chunksize_ARG, 0);
if (!validate_cache_chunk_size(cmd, *chunk_size))
return_0;
log_very_verbose("Setting pool chunk size to %s.",
display_size(cmd, *chunk_size));
}
*cache_metadata_format = (cache_metadata_format_t)
arg_uint_value(cmd, cachemetadataformat_ARG, CACHE_METADATA_FORMAT_UNSELECTED);
*cache_mode = (cache_mode_t) arg_uint_value(cmd, cachemode_ARG, CACHE_MODE_UNSELECTED);
*name = arg_str_value(cmd, cachepolicy_ARG, NULL);
if (!_validate_cachepool_params(*name, *cache_mode))
goto_out;
dm_list_iterate_items(group, &cmd->arg_value_groups) {
if (!grouped_arg_is_set(group->arg_values, cachesettings_ARG))
continue;
if (!(current = dm_config_create()))
goto_out;
if (prev)
current->cascade = prev;
prev = current;
if (!(str = grouped_arg_str_value(group->arg_values,
cachesettings_ARG,
NULL)))
goto_out;
if (!dm_config_parse_without_dup_node_check(current, str, str + strlen(str)))
goto_out;
}
if (current) {
if (!(result = dm_config_flatten(current)))
goto_out;
if (result->root) {
if (!(cn = dm_config_create_node(result, "policy_settings")))
goto_out;
cn->child = result->root;
result->root = cn;
}
}
ok = 1;
out:
if (!ok && result) {
dm_config_destroy(result);
result = NULL;
}
while (prev) {
current = prev->cascade;
dm_config_destroy(prev);
prev = current;
}
*settings = result;
return ok;
}
/* FIXME move to lib */
static int _pv_change_tag(struct physical_volume *pv, const char *tag, int addtag)
{
if (addtag) {
if (!str_list_add(pv->fmt->cmd->mem, &pv->tags, tag)) {
log_error("Failed to add tag %s to physical volume %s.",
tag, pv_dev_name(pv));
return 0;
}
} else
str_list_del(&pv->tags, tag);
return 1;
}
/* Set exactly one of VG, LV or PV */
int change_tag(struct cmd_context *cmd, struct volume_group *vg,
struct logical_volume *lv, struct physical_volume *pv, int arg)
{
const char *tag;
struct arg_value_group_list *current_group;
dm_list_iterate_items(current_group, &cmd->arg_value_groups) {
if (!grouped_arg_is_set(current_group->arg_values, arg))
continue;
if (!(tag = grouped_arg_str_value(current_group->arg_values, arg, NULL))) {
log_error("Failed to get tag.");
return 0;
}
if (vg && !vg_change_tag(vg, tag, arg == addtag_ARG))
return_0;
else if (lv && !lv_change_tag(lv, tag, arg == addtag_ARG))
return_0;
else if (pv && !_pv_change_tag(pv, tag, arg == addtag_ARG))
return_0;
}
return 1;
}
/*
* FIXME: replace process_each_label() with process_each_vg() which is
* based on performing vg_read(), which provides a correct representation
* of VGs/PVs, that is not provided by lvmcache_label_scan().
*/
int process_each_label(struct cmd_context *cmd, int argc, char **argv,
struct processing_handle *handle,
process_single_label_fn_t process_single_label)
{
log_report_t saved_log_report_state = log_get_report_state();
struct label *label;
struct dev_iter *iter;
struct device *dev;
struct lvmcache_info *info;
struct dm_list process_duplicates;
struct device_list *devl;
int ret_max = ECMD_PROCESSED;
int ret;
int opt = 0;
dm_list_init(&process_duplicates);
log_set_report_object_type(LOG_REPORT_OBJECT_TYPE_LABEL);
lvmcache_label_scan(cmd);
lvmcache_seed_infos_from_lvmetad(cmd);
if (argc) {
for (; opt < argc; opt++) {
if (!(dev = dev_cache_get(argv[opt], cmd->full_filter))) {
log_error("Failed to find device "
"\"%s\".", argv[opt]);
ret_max = ECMD_FAILED;
continue;
}
if (!(label = lvmcache_get_dev_label(dev))) {
if (!lvmcache_dev_is_unchosen_duplicate(dev)) {
log_error("No physical volume label read from %s.", argv[opt]);
ret_max = ECMD_FAILED;
} else {
if (!(devl = dm_malloc(sizeof(*devl))))
return_0;
devl->dev = dev;
dm_list_add(&process_duplicates, &devl->list);
}
continue;
}
log_set_report_object_name_and_id(dev_name(dev), NULL);
ret = process_single_label(cmd, label, handle);
report_log_ret_code(ret);
if (ret > ret_max)
ret_max = ret;
log_set_report_object_name_and_id(NULL, NULL);
if (sigint_caught())
break;
}
dm_list_iterate_items(devl, &process_duplicates) {
/*
* remove the existing dev for this pvid from lvmcache
* so that the duplicate dev can replace it.
*/
if ((info = lvmcache_info_from_pvid(devl->dev->pvid, NULL, 0)))
lvmcache_del(info);
/*
* add info to lvmcache from the duplicate dev.
*/
label_read(devl->dev, NULL, 0);
/*
* the info/label should now be found because
* the label_read should have added it.
*/
if (!(label = lvmcache_get_dev_label(devl->dev)))
continue;
log_set_report_object_name_and_id(dev_name(devl->dev), NULL);
ret = process_single_label(cmd, label, handle);
report_log_ret_code(ret);
if (ret > ret_max)
ret_max = ret;
log_set_report_object_name_and_id(NULL, NULL);
if (sigint_caught())
break;
}
goto out;
}
if (!(iter = dev_iter_create(cmd->full_filter, 1))) {
log_error("dev_iter creation failed.");
ret_max = ECMD_FAILED;
goto out;
}
while ((dev = dev_iter_get(iter)))
{
if (!(label = lvmcache_get_dev_label(dev)))
continue;
log_set_report_object_name_and_id(dev_name(label->dev), NULL);
ret = process_single_label(cmd, label, handle);
report_log_ret_code(ret);
if (ret > ret_max)
ret_max = ret;
log_set_report_object_name_and_id(NULL, NULL);
if (sigint_caught())
break;
}
dev_iter_destroy(iter);
out:
log_restore_report_state(saved_log_report_state);
return ret_max;
}
/*
* Parse persistent major minor parameters.
*
* --persistent is unspecified => state is deduced
* from presence of options --minor or --major.
*
* -Mn => --minor or --major not allowed.
*
* -My => --minor is required (and also --major on <=2.4)
*/
int get_and_validate_major_minor(const struct cmd_context *cmd,
const struct format_type *fmt,
int32_t *major, int32_t *minor)
{
if (arg_count(cmd, minor_ARG) > 1) {
log_error("Option --minor may not be repeated.");
return 0;
}
if (arg_count(cmd, major_ARG) > 1) {
log_error("Option -j|--major may not be repeated.");
return 0;
}
/* Check with default 'y' */
if (!arg_int_value(cmd, persistent_ARG, 1)) { /* -Mn */
if (arg_is_set(cmd, minor_ARG) || arg_is_set(cmd, major_ARG)) {
log_error("Options --major and --minor are incompatible with -Mn.");
return 0;
}
*major = *minor = -1;
return 1;
}
/* -1 cannot be entered as an argument for --major, --minor */
*major = arg_int_value(cmd, major_ARG, -1);
*minor = arg_int_value(cmd, minor_ARG, -1);
if (arg_is_set(cmd, persistent_ARG)) { /* -My */
if (*minor == -1) {
log_error("Please specify minor number with --minor when using -My.");
return 0;
}
}
if (!strncmp(cmd->kernel_vsn, "2.4.", 4)) {
/* Major is required for 2.4 */
if (arg_is_set(cmd, persistent_ARG) && *major < 0) {
log_error("Please specify major number with --major when using -My.");
return 0;
}
} else {
if (*major != -1) {
log_warn("WARNING: Ignoring supplied major number %d - "
"kernel assigns major numbers dynamically. "
"Using major number %d instead.",
*major, cmd->dev_types->device_mapper_major);
}
/* Stay with dynamic major:minor if minor is not specified. */
*major = (*minor == -1) ? -1 : cmd->dev_types->device_mapper_major;
}
if ((*minor != -1) && !validate_major_minor(cmd, fmt, *major, *minor))
return_0;
return 1;
}
/*
* Validate lvname parameter
*
* If it contains vgname, it is extracted from lvname.
* If there is passed vgname, it is compared whether its the same name.
*/
int validate_lvname_param(struct cmd_context *cmd, const char **vg_name,
const char **lv_name)
{
const char *vgname;
const char *lvname;
if (!lv_name || !*lv_name)
return 1; /* NULL lvname is ok */
/* If contains VG name, extract it. */
if (strchr(*lv_name, (int) '/')) {
if (!(vgname = _extract_vgname(cmd, *lv_name, &lvname)))
return_0;
if (!*vg_name)
*vg_name = vgname;
else if (strcmp(vgname, *vg_name)) {
log_error("Please use a single volume group name "
"(\"%s\" or \"%s\").", vgname, *vg_name);
return 0;
}
*lv_name = lvname;
}
if (!validate_name(*lv_name)) {
log_error("Logical volume name \"%s\" is invalid.",
*lv_name);
return 0;
}
return 1;
}
/*
* Validate lvname parameter
* This name must follow restriction rules on prefixes and suffixes.
*
* If it contains vgname, it is extracted from lvname.
* If there is passed vgname, it is compared whether its the same name.
*/
int validate_restricted_lvname_param(struct cmd_context *cmd, const char **vg_name,
const char **lv_name)
{
if (!validate_lvname_param(cmd, vg_name, lv_name))
return_0;
if (lv_name && *lv_name && !apply_lvname_restrictions(*lv_name))
return_0;
return 1;
}
/*
* Extract list of VG names and list of tags from command line arguments.
*/
static int _get_arg_vgnames(struct cmd_context *cmd,
int argc, char **argv,
const char *one_vgname,
struct dm_list *use_vgnames,
struct dm_list *arg_vgnames,
struct dm_list *arg_tags)
{
int opt = 0;
int ret_max = ECMD_PROCESSED;
const char *vg_name;
if (one_vgname) {
if (!str_list_add(cmd->mem, arg_vgnames,
dm_pool_strdup(cmd->mem, one_vgname))) {
log_error("strlist allocation failed.");
return ECMD_FAILED;
}
return ret_max;
}
if (use_vgnames && !dm_list_empty(use_vgnames)) {
dm_list_splice(arg_vgnames, use_vgnames);
return ret_max;
}
for (; opt < argc; opt++) {
vg_name = argv[opt];
if (*vg_name == '@') {
if (!validate_tag(vg_name + 1)) {
log_error("Skipping invalid tag: %s", vg_name);
if (ret_max < EINVALID_CMD_LINE)
ret_max = EINVALID_CMD_LINE;
continue;
}
if (!str_list_add(cmd->mem, arg_tags,
dm_pool_strdup(cmd->mem, vg_name + 1))) {
log_error("strlist allocation failed.");
return ECMD_FAILED;
}
continue;
}
vg_name = skip_dev_dir(cmd, vg_name, NULL);
if (strchr(vg_name, '/')) {
log_error("Invalid volume group name %s.", vg_name);
if (ret_max < EINVALID_CMD_LINE)
ret_max = EINVALID_CMD_LINE;
continue;
}
if (!str_list_add(cmd->mem, arg_vgnames,
dm_pool_strdup(cmd->mem, vg_name))) {
log_error("strlist allocation failed.");
return ECMD_FAILED;
}
}
return ret_max;
}
struct processing_handle *init_processing_handle(struct cmd_context *cmd, struct processing_handle *parent_handle)
{
struct processing_handle *handle;
if (!(handle = dm_pool_zalloc(cmd->mem, sizeof(struct processing_handle)))) {
log_error("_init_processing_handle: failed to allocate memory for processing handle");
return NULL;
}
handle->parent = parent_handle;
/*
* For any reporting tool, the internal_report_for_select is reset to 0
* automatically because the internal reporting/selection is simply not
* needed - the reporting/selection is already a part of the code path
* used there.
*
* *The internal report for select is only needed for non-reporting tools!*
*/
handle->internal_report_for_select = arg_is_set(cmd, select_ARG);
handle->include_historical_lvs = cmd->include_historical_lvs;
if (!parent_handle && !cmd->cmd_report.report_group) {
if (!report_format_init(cmd)) {
dm_pool_free(cmd->mem, handle);
return NULL;
}
} else
cmd->cmd_report.saved_log_report_state = log_get_report_state();
log_set_report_context(LOG_REPORT_CONTEXT_PROCESSING);
return handle;
}
int init_selection_handle(struct cmd_context *cmd, struct processing_handle *handle,
report_type_t initial_report_type)
{
struct selection_handle *sh;
const char *selection;
if (!(sh = dm_pool_zalloc(cmd->mem, sizeof(struct selection_handle)))) {
log_error("_init_selection_handle: failed to allocate memory for selection handle");
return 0;
}
if (!report_get_single_selection(cmd, initial_report_type, &selection))
return_0;
sh->report_type = initial_report_type;
if (!(sh->selection_rh = report_init_for_selection(cmd, &sh->report_type, selection))) {
dm_pool_free(cmd->mem, sh);
return_0;
}
handle->selection_handle = sh;
return 1;
}
void destroy_processing_handle(struct cmd_context *cmd, struct processing_handle *handle)
{
if (handle) {
if (handle->selection_handle && handle->selection_handle->selection_rh)
dm_report_free(handle->selection_handle->selection_rh);
log_restore_report_state(cmd->cmd_report.saved_log_report_state);
if (!cmd->is_interactive) {
if (!dm_report_group_destroy(cmd->cmd_report.report_group))
stack;
cmd->cmd_report.report_group = NULL;
if (cmd->cmd_report.log_rh) {
dm_report_free(cmd->cmd_report.log_rh);
cmd->cmd_report.log_rh = NULL;
}
}
/*
* TODO: think about better alternatives:
* handle mempool, dm_alloc for handle memory...
*/
memset(handle, 0, sizeof(*handle));
}
}
int select_match_vg(struct cmd_context *cmd, struct processing_handle *handle,
struct volume_group *vg)
{
int r;
if (!handle->internal_report_for_select)
return 1;
handle->selection_handle->orig_report_type = VGS;
if (!(r = report_for_selection(cmd, handle, NULL, vg, NULL)))
log_error("Selection failed for VG %s.", vg->name);
handle->selection_handle->orig_report_type = 0;
return r;
}
int select_match_lv(struct cmd_context *cmd, struct processing_handle *handle,
struct volume_group *vg, struct logical_volume *lv)
{
int r;
if (!handle->internal_report_for_select)
return 1;
handle->selection_handle->orig_report_type = LVS;
if (!(r = report_for_selection(cmd, handle, NULL, vg, lv)))
log_error("Selection failed for LV %s.", lv->name);
handle->selection_handle->orig_report_type = 0;
return r;
}
int select_match_pv(struct cmd_context *cmd, struct processing_handle *handle,
struct volume_group *vg, struct physical_volume *pv)
{
int r;
if (!handle->internal_report_for_select)
return 1;
handle->selection_handle->orig_report_type = PVS;
if (!(r = report_for_selection(cmd, handle, pv, vg, NULL)))
log_error("Selection failed for PV %s.", dev_name(pv->dev));
handle->selection_handle->orig_report_type = 0;
return r;
}
static int _select_matches(struct processing_handle *handle)
{
if (!handle->internal_report_for_select)
return 1;
return handle->selection_handle->selected;
}
static int _process_vgnameid_list(struct cmd_context *cmd, uint32_t read_flags,
struct dm_list *vgnameids_to_process,
struct dm_list *arg_vgnames,
struct dm_list *arg_tags,
struct processing_handle *handle,
process_single_vg_fn_t process_single_vg)
{
log_report_t saved_log_report_state = log_get_report_state();
char uuid[64] __attribute__((aligned(8)));
struct volume_group *vg;
struct vgnameid_list *vgnl;
const char *vg_name;
const char *vg_uuid;
uint32_t lockd_state = 0;
int whole_selected = 0;
int ret_max = ECMD_PROCESSED;
int ret;
int skip;
int notfound;
int process_all = 0;
int already_locked;
int do_report_ret_code = 1;
log_set_report_object_type(LOG_REPORT_OBJECT_TYPE_VG);
/*
* If no VG names or tags were supplied, then process all VGs.
*/
if (dm_list_empty(arg_vgnames) && dm_list_empty(arg_tags))
process_all = 1;
/*
* FIXME If one_vgname, only proceed if exactly one VG matches tags or selection.
*/
dm_list_iterate_items(vgnl, vgnameids_to_process) {
vg_name = vgnl->vg_name;
vg_uuid = vgnl->vgid;
skip = 0;
notfound = 0;
uuid[0] = '\0';
if (is_orphan_vg(vg_name)) {
log_set_report_object_type(LOG_REPORT_OBJECT_TYPE_ORPHAN);
log_set_report_object_name_and_id(vg_name + sizeof(VG_ORPHANS), uuid);
} else {
if (vg_uuid && !id_write_format((const struct id*)vg_uuid, uuid, sizeof(uuid)))
stack;
log_set_report_object_name_and_id(vg_name, uuid);
}
if (sigint_caught()) {
ret_max = ECMD_FAILED;
goto_out;
}
log_very_verbose("Processing VG %s %s", vg_name, uuid);
if (!lockd_vg(cmd, vg_name, NULL, 0, &lockd_state)) {
ret_max = ECMD_FAILED;
report_log_ret_code(ret_max);
continue;
}
already_locked = lvmcache_vgname_is_locked(vg_name);
vg = vg_read(cmd, vg_name, vg_uuid, read_flags, lockd_state);
if (_ignore_vg(vg, vg_name, arg_vgnames, read_flags, &skip, &notfound)) {
stack;
ret_max = ECMD_FAILED;
report_log_ret_code(ret_max);
goto endvg;
}
if (skip || notfound)
goto endvg;
/* Process this VG? */
if ((process_all ||
(!dm_list_empty(arg_vgnames) && str_list_match_item(arg_vgnames, vg_name)) ||
(!dm_list_empty(arg_tags) && str_list_match_list(arg_tags, &vg->tags, NULL))) &&
select_match_vg(cmd, handle, vg) && _select_matches(handle)) {
log_very_verbose("Running command for VG %s %s", vg_name, vg_uuid ? uuid : "");
ret = process_single_vg(cmd, vg_name, vg, handle);
_update_selection_result(handle, &whole_selected);
if (ret != ECMD_PROCESSED)
stack;
report_log_ret_code(ret);
if (ret > ret_max)
ret_max = ret;
}
if (!vg_read_error(vg) && !already_locked)
unlock_vg(cmd, vg, vg_name);
endvg:
release_vg(vg);
if (!lockd_vg(cmd, vg_name, "un", 0, &lockd_state))
stack;
log_set_report_object_name_and_id(NULL, NULL);
}
/* the VG is selected if at least one LV is selected */
_set_final_selection_result(handle, whole_selected);
do_report_ret_code = 0;
out:
if (do_report_ret_code)
report_log_ret_code(ret_max);
log_restore_report_state(saved_log_report_state);
return ret_max;
}
/*
* Check if a command line VG name is ambiguous, i.e. there are multiple VGs on
* the system that have the given name. If *one* VG with the given name is
* local and the rest are foreign, then use the local VG (removing foreign VGs
* with the same name from the vgnameids_on_system list). If multiple VGs with
* the given name are local, we don't know which VG is intended, so remove the
* ambiguous name from the list of args.
*/
static int _resolve_duplicate_vgnames(struct cmd_context *cmd,
struct dm_list *arg_vgnames,
struct dm_list *vgnameids_on_system)
{
struct dm_str_list *sl, *sl2;
struct vgnameid_list *vgnl, *vgnl2;
char uuid[64] __attribute__((aligned(8)));
int found;
int ret = ECMD_PROCESSED;
dm_list_iterate_items_safe(sl, sl2, arg_vgnames) {
found = 0;
dm_list_iterate_items(vgnl, vgnameids_on_system) {
if (strcmp(sl->str, vgnl->vg_name))
continue;
found++;
}
if (found < 2)
continue;
/*
* More than one VG match the given name.
* If only one is local, use that one.
*/
found = 0;
dm_list_iterate_items_safe(vgnl, vgnl2, vgnameids_on_system) {
if (strcmp(sl->str, vgnl->vg_name))
continue;
/*
* Without lvmetad, a label scan has already populated
* lvmcache vginfo with this information.
* With lvmetad, this function does vg_lookup on this
* name/vgid and checks system_id in the metadata.
*/
if (lvmcache_vg_is_foreign(cmd, vgnl->vg_name, vgnl->vgid)) {
if (!id_write_format((const struct id*)vgnl->vgid, uuid, sizeof(uuid)))
stack;
log_warn("WARNING: Ignoring foreign VG with matching name %s UUID %s.",
vgnl->vg_name, uuid);
dm_list_del(&vgnl->list);
} else {
found++;
}
}
if (found < 2)
continue;
/*
* More than one VG with this name is local so the intended VG
* is unknown.
*/
log_error("Multiple VGs found with the same name: skipping %s", sl->str);
log_error("Use --select vg_uuid=<uuid> in place of the VG name.");
dm_list_del(&sl->list);
ret = ECMD_FAILED;
}
return ret;
}
/*
* For each arg_vgname, move the corresponding entry from
* vgnameids_on_system to vgnameids_to_process. If an
* item in arg_vgnames doesn't exist in vgnameids_on_system,
* then add a new entry for it to vgnameids_to_process.
*/
static void _choose_vgs_to_process(struct cmd_context *cmd,
struct dm_list *arg_vgnames,
struct dm_list *vgnameids_on_system,
struct dm_list *vgnameids_to_process)
{
char uuid[64] __attribute__((aligned(8)));
struct dm_str_list *sl, *sl2;
struct vgnameid_list *vgnl, *vgnl2;
struct id id;
int arg_is_uuid = 0;
int found;
dm_list_iterate_items_safe(sl, sl2, arg_vgnames) {
found = 0;
dm_list_iterate_items_safe(vgnl, vgnl2, vgnameids_on_system) {
if (strcmp(sl->str, vgnl->vg_name))
continue;
dm_list_del(&vgnl->list);
dm_list_add(vgnameids_to_process, &vgnl->list);
found = 1;
break;
}
/*
* If the VG name arg looks like a UUID, then check if it
* matches the UUID of a VG. (--select should generally
* be used to select a VG by uuid instead.)
*/
if (!found && (cmd->cname->flags & ALLOW_UUID_AS_NAME))
arg_is_uuid = id_read_format_try(&id, sl->str);
if (!found && arg_is_uuid) {
dm_list_iterate_items_safe(vgnl, vgnl2, vgnameids_on_system) {
if (!(id_write_format((const struct id*)vgnl->vgid, uuid, sizeof(uuid))))
continue;
if (strcmp(sl->str, uuid))
continue;
log_print("Processing VG %s because of matching UUID %s",
vgnl->vg_name, uuid);
dm_list_del(&vgnl->list);
dm_list_add(vgnameids_to_process, &vgnl->list);
/* Make the arg_vgnames entry use the actual VG name. */
sl->str = dm_pool_strdup(cmd->mem, vgnl->vg_name);
found = 1;
break;
}
}
/*
* If the name arg was not found in the list of all VGs, then
* it probably doesn't exist, but we want the "VG not found"
* failure to be handled by the existing vg_read() code for
* that error. So, create an entry with just the VG name so
* that the processing loop will attempt to process it and use
* the vg_read() error path.
*/
if (!found) {
log_verbose("VG name on command line not found in list of VGs: %s", sl->str);
if (!(vgnl = dm_pool_alloc(cmd->mem, sizeof(*vgnl))))
continue;
vgnl->vgid = NULL;
if (!(vgnl->vg_name = dm_pool_strdup(cmd->mem, sl->str)))
continue;
dm_list_add(vgnameids_to_process, &vgnl->list);
}
}
}
/*
* Call process_single_vg() for each VG selected by the command line arguments.
* If one_vgname is set, process only that VG and ignore argc/argv (which should be 0/NULL).
* If one_vgname is not set, get VG names to process from argc/argv.
*/
int process_each_vg(struct cmd_context *cmd,
int argc, char **argv,
const char *one_vgname,
struct dm_list *use_vgnames,
uint32_t read_flags,
int include_internal,
struct processing_handle *handle,
process_single_vg_fn_t process_single_vg)
{
log_report_t saved_log_report_state = log_get_report_state();
int handle_supplied = handle != NULL;
struct dm_list arg_tags; /* str_list */
struct dm_list arg_vgnames; /* str_list */
struct dm_list vgnameids_on_system; /* vgnameid_list */
struct dm_list vgnameids_to_process; /* vgnameid_list */
int enable_all_vgs = (cmd->cname->flags & ALL_VGS_IS_DEFAULT);
int process_all_vgs_on_system = 0;
int ret_max = ECMD_PROCESSED;
int ret;
log_set_report_object_type(LOG_REPORT_OBJECT_TYPE_VG);
log_debug("Processing each VG");
/* Disable error in vg_read so we can print it from ignore_vg. */
cmd->vg_read_print_access_error = 0;
dm_list_init(&arg_tags);
dm_list_init(&arg_vgnames);
dm_list_init(&vgnameids_on_system);
dm_list_init(&vgnameids_to_process);
/*
* Find any VGs or tags explicitly provided on the command line.
*/
if ((ret = _get_arg_vgnames(cmd, argc, argv, one_vgname, use_vgnames, &arg_vgnames, &arg_tags)) != ECMD_PROCESSED) {
ret_max = ret;
goto_out;
}
/*
* Process all VGs on the system when:
* . tags are specified and all VGs need to be read to
* look for matching tags.
* . no VG names are specified and the command defaults
* to processing all VGs when none are specified.
*/
if ((dm_list_empty(&arg_vgnames) && enable_all_vgs) || !dm_list_empty(&arg_tags))
process_all_vgs_on_system = 1;
/*
* Needed for a current listing of the global VG namespace.
*/
if (process_all_vgs_on_system && !lockd_gl(cmd, "sh", 0)) {
ret_max = ECMD_FAILED;
goto_out;
}
/*
* Scan all devices to populate lvmcache with initial
* list of PVs and VGs.
*/
lvmcache_label_scan(cmd);
/*
* A list of all VGs on the system is needed when:
* . processing all VGs on the system
* . A VG name is specified which may refer to one
* of multiple VGs on the system with that name.
*/
log_very_verbose("Obtaining the complete list of VGs to process");
if (!get_vgnameids(cmd, &vgnameids_on_system, NULL, include_internal)) {
ret_max = ECMD_FAILED;
goto_out;
}
if (!dm_list_empty(&arg_vgnames)) {
/* This may remove entries from arg_vgnames or vgnameids_on_system. */
ret = _resolve_duplicate_vgnames(cmd, &arg_vgnames, &vgnameids_on_system);
if (ret > ret_max)
ret_max = ret;
if (dm_list_empty(&arg_vgnames) && dm_list_empty(&arg_tags)) {
ret_max = ECMD_FAILED;
goto out;
}
}
if (dm_list_empty(&arg_vgnames) && dm_list_empty(&vgnameids_on_system)) {
/* FIXME Should be log_print, but suppressed for reporting cmds */
log_verbose("No volume groups found.");
ret_max = ECMD_PROCESSED;
goto out;
}
if (dm_list_empty(&arg_vgnames))
read_flags |= READ_OK_NOTFOUND;
/*
* When processing all VGs, vgnameids_on_system simply becomes
* vgnameids_to_process.
* When processing only specified VGs, then for each item in
* arg_vgnames, move the corresponding entry from
* vgnameids_on_system to vgnameids_to_process.
*/
if (process_all_vgs_on_system)
dm_list_splice(&vgnameids_to_process, &vgnameids_on_system);
else
_choose_vgs_to_process(cmd, &arg_vgnames, &vgnameids_on_system, &vgnameids_to_process);
if (!handle && !(handle = init_processing_handle(cmd, NULL))) {
ret_max = ECMD_FAILED;
goto_out;
}
if (handle->internal_report_for_select && !handle->selection_handle &&
!init_selection_handle(cmd, handle, VGS)) {
ret_max = ECMD_FAILED;
goto_out;
}
ret = _process_vgnameid_list(cmd, read_flags, &vgnameids_to_process,
&arg_vgnames, &arg_tags, handle, process_single_vg);
if (ret > ret_max)
ret_max = ret;
out:
if (!handle_supplied)
destroy_processing_handle(cmd, handle);
log_restore_report_state(saved_log_report_state);
return ret_max;
}
static struct dm_str_list *_str_list_match_item_with_prefix(const struct dm_list *sll, const char *prefix, const char *str)
{
struct dm_str_list *sl;
size_t prefix_len = strlen(prefix);
dm_list_iterate_items(sl, sll) {
if (!strncmp(prefix, sl->str, prefix_len) &&
!strcmp(sl->str + prefix_len, str))
return sl;
}
return NULL;
}
/*
* Dummy LV, segment type and segment to represent all historical LVs.
*/
static struct logical_volume _historical_lv = {
.name = "",
.major = -1,
.minor = -1,
.snapshot_segs = DM_LIST_HEAD_INIT(_historical_lv.snapshot_segs),
.segments = DM_LIST_HEAD_INIT(_historical_lv.segments),
.tags = DM_LIST_HEAD_INIT(_historical_lv.tags),
.segs_using_this_lv = DM_LIST_HEAD_INIT(_historical_lv.segs_using_this_lv),
.indirect_glvs = DM_LIST_HEAD_INIT(_historical_lv.indirect_glvs),
.hostname = "",
};
static struct segment_type _historical_segment_type = {
.name = "historical",
.flags = SEG_VIRTUAL | SEG_CANNOT_BE_ZEROED,
};
static struct lv_segment _historical_lv_segment = {
.lv = &_historical_lv,
.segtype = &_historical_segment_type,
.len = 0,
.tags = DM_LIST_HEAD_INIT(_historical_lv_segment.tags),
.origin_list = DM_LIST_HEAD_INIT(_historical_lv_segment.origin_list),
};
int opt_in_list_is_set(struct cmd_context *cmd, int *opts, int count,
int *match_count, int *unmatch_count)
{
int match = 0;
int unmatch = 0;
int i;
for (i = 0; i < count; i++) {
if (arg_is_set(cmd, opts[i]))
match++;
else
unmatch++;
}
if (match_count)
*match_count = match;
if (unmatch_count)
*unmatch_count = unmatch;
return match ? 1 : 0;
}
void opt_array_to_str(struct cmd_context *cmd, int *opts, int count,
char *buf, int len)
{
int pos = 0;
int ret;
int i;
for (i = 0; i < count; i++) {
ret = snprintf(buf + pos, len - pos, "%s ", arg_long_option_name(opts[i]));
if (ret >= len - pos)
break;
pos += ret;
}
buf[len - 1] = '\0';
}
static void _lvp_bits_to_str(uint64_t bits, char *buf, int len)
{
struct lv_prop *prop;
int lvp_enum;
int pos = 0;
int ret;
for (lvp_enum = 0; lvp_enum < LVP_COUNT; lvp_enum++) {
if (!(prop = get_lv_prop(lvp_enum)))
continue;
if (lvp_bit_is_set(bits, lvp_enum)) {
ret = snprintf(buf + pos, len - pos, "%s ", prop->name);
if (ret >= len - pos)
break;
pos += ret;
}
}
buf[len - 1] = '\0';
}
static void _lvt_bits_to_str(uint64_t bits, char *buf, int len)
{
struct lv_type *type;
int lvt_enum;
int pos = 0;
int ret;
for (lvt_enum = 0; lvt_enum < LVT_COUNT; lvt_enum++) {
if (!(type = get_lv_type(lvt_enum)))
continue;
if (lvt_bit_is_set(bits, lvt_enum)) {
ret = snprintf(buf + pos, len - pos, "%s ", type->name);
if (ret >= len - pos)
break;
pos += ret;
}
}
buf[len - 1] = '\0';
}
/*
* This is the lv_prop function pointer used for lv_is_foo() #defines.
* Alternatively, lv_is_foo() could all be turned into functions.
*/
static int _lv_is_prop(struct cmd_context *cmd, struct logical_volume *lv, int lvp_enum)
{
switch (lvp_enum) {
case is_locked_LVP:
return lv_is_locked(lv);
case is_partial_LVP:
return lv_is_partial(lv);
case is_virtual_LVP:
return lv_is_virtual(lv);
case is_merging_LVP:
return lv_is_merging(lv);
case is_merging_origin_LVP:
return lv_is_merging_origin(lv);
case is_converting_LVP:
return lv_is_converting(lv);
case is_external_origin_LVP:
return lv_is_external_origin(lv);
case is_virtual_origin_LVP:
return lv_is_virtual_origin(lv);
case is_not_synced_LVP:
return lv_is_not_synced(lv);
case is_pending_delete_LVP:
return lv_is_pending_delete(lv);
case is_error_when_full_LVP:
return lv_is_error_when_full(lv);
case is_pvmove_LVP:
return lv_is_pvmove(lv);
case is_removed_LVP:
return lv_is_removed(lv);
case is_vg_writable_LVP:
return (lv->vg->status & LVM_WRITE) ? 1 : 0;
case is_thinpool_data_LVP:
return lv_is_thin_pool_data(lv);
case is_thinpool_metadata_LVP:
return lv_is_thin_pool_metadata(lv);
case is_cachepool_data_LVP:
return lv_is_cache_pool_data(lv);
case is_cachepool_metadata_LVP:
return lv_is_cache_pool_metadata(lv);
case is_mirror_image_LVP:
return lv_is_mirror_image(lv);
case is_mirror_log_LVP:
return lv_is_mirror_log(lv);
case is_raid_image_LVP:
return lv_is_raid_image(lv);
case is_raid_metadata_LVP:
return lv_is_raid_metadata(lv);
case is_origin_LVP: /* use lv_is_thick_origin */
return lv_is_origin(lv);
case is_thick_origin_LVP:
return lv_is_thick_origin(lv);
case is_thick_snapshot_LVP:
return lv_is_thick_snapshot(lv);
case is_thin_origin_LVP:
return lv_is_thin_origin(lv, NULL);
case is_thin_snapshot_LVP:
return lv_is_thin_snapshot(lv);
case is_cache_origin_LVP:
return lv_is_cache_origin(lv);
case is_merging_cow_LVP:
return lv_is_merging_cow(lv);
case is_cow_covering_origin_LVP:
return lv_is_cow_covering_origin(lv);
case is_visible_LVP:
return lv_is_visible(lv);
case is_historical_LVP:
return lv_is_historical(lv);
case is_raid_with_tracking_LVP:
return lv_is_raid_with_tracking(lv);
default:
log_error(INTERNAL_ERROR "unknown lv property value lvp_enum %d", lvp_enum);
}
return 0;
}
/*
* Check if an LV matches a given LV type enum.
*/
static int _lv_is_type(struct cmd_context *cmd, struct logical_volume *lv, int lvt_enum)
{
struct lv_segment *seg = first_seg(lv);
switch (lvt_enum) {
case striped_LVT:
return seg_is_striped(seg) && !lv_is_cow(lv);
case linear_LVT:
return seg_is_linear(seg) && !lv_is_cow(lv);
case snapshot_LVT:
return lv_is_cow(lv);
case thin_LVT:
return lv_is_thin_volume(lv);
case thinpool_LVT:
return lv_is_thin_pool(lv);
case cache_LVT:
return lv_is_cache(lv);
case cachepool_LVT:
return lv_is_cache_pool(lv);
case mirror_LVT:
return lv_is_mirror(lv);
case raid_LVT:
return lv_is_raid(lv);
case raid0_LVT:
return seg_is_any_raid0(seg);
case raid1_LVT:
return seg_is_raid1(seg);
case raid4_LVT:
return seg_is_raid4(seg);
case raid5_LVT:
return seg_is_any_raid5(seg);
case raid6_LVT:
return seg_is_any_raid6(seg);
case raid10_LVT:
return seg_is_raid10(seg);
case error_LVT:
return !strcmp(seg->segtype->name, SEG_TYPE_NAME_ERROR);
case zero_LVT:
return !strcmp(seg->segtype->name, SEG_TYPE_NAME_ZERO);
default:
log_error(INTERNAL_ERROR "unknown lv type value lvt_enum %d", lvt_enum);
}
return 0;
}
int get_lvt_enum(struct logical_volume *lv)
{
struct lv_segment *seg = first_seg(lv);
/*
* The order these are checked is important, because a snapshot LV has
* a linear seg type.
*/
if (lv_is_cow(lv))
return snapshot_LVT;
if (seg_is_linear(seg))
return linear_LVT;
if (seg_is_striped(seg))
return striped_LVT;
if (lv_is_thin_volume(lv))
return thin_LVT;
if (lv_is_thin_pool(lv))
return thinpool_LVT;
if (lv_is_cache(lv))
return cache_LVT;
if (lv_is_cache_pool(lv))
return cachepool_LVT;
if (lv_is_mirror(lv))
return mirror_LVT;
if (lv_is_raid(lv))
return raid_LVT;
if (seg_is_any_raid0(seg))
return raid0_LVT;
if (seg_is_raid1(seg))
return raid1_LVT;
if (seg_is_raid4(seg))
return raid4_LVT;
if (seg_is_any_raid5(seg))
return raid5_LVT;
if (seg_is_any_raid6(seg))
return raid6_LVT;
if (seg_is_raid10(seg))
return raid10_LVT;
if (!strcmp(seg->segtype->name, SEG_TYPE_NAME_ERROR))
return error_LVT;
if (!strcmp(seg->segtype->name, SEG_TYPE_NAME_ZERO))
return zero_LVT;
return 0;
}
/*
* Call lv_is_<type> for each <type>_LVT bit set in lvt_bits.
* If lv matches one of the specified lv types, then return 1.
*/
static int _lv_types_match(struct cmd_context *cmd, struct logical_volume *lv, uint64_t lvt_bits,
uint64_t *match_bits, uint64_t *unmatch_bits)
{
struct lv_type *type;
int lvt_enum;
int found_a_match = 0;
int match;
if (match_bits)
*match_bits = 0;
if (unmatch_bits)
*unmatch_bits = 0;
for (lvt_enum = 1; lvt_enum < LVT_COUNT; lvt_enum++) {
if (!lvt_bit_is_set(lvt_bits, lvt_enum))
continue;
if (!(type = get_lv_type(lvt_enum)))
continue;
/*
* All types are currently handled by _lv_is_type()
* because lv_is_type() are #defines and not exposed
* in tools.h
*/
if (!type->fn)
match = _lv_is_type(cmd, lv, lvt_enum);
else
match = type->fn(cmd, lv);
if (match)
found_a_match = 1;
if (match_bits && match)
*match_bits |= lvt_enum_to_bit(lvt_enum);
if (unmatch_bits && !match)
*unmatch_bits |= lvt_enum_to_bit(lvt_enum);
}
return found_a_match;
}
/*
* Call lv_is_<prop> for each <prop>_LVP bit set in lvp_bits.
* If lv matches all of the specified lv properties, then return 1.
*/
static int _lv_props_match(struct cmd_context *cmd, struct logical_volume *lv, uint64_t lvp_bits,
uint64_t *match_bits, uint64_t *unmatch_bits)
{
struct lv_prop *prop;
int lvp_enum;
int found_a_mismatch = 0;
int match;
if (match_bits)
*match_bits = 0;
if (unmatch_bits)
*unmatch_bits = 0;
for (lvp_enum = 1; lvp_enum < LVP_COUNT; lvp_enum++) {
if (!lvp_bit_is_set(lvp_bits, lvp_enum))
continue;
if (!(prop = get_lv_prop(lvp_enum)))
continue;
if (!prop->fn)
match = _lv_is_prop(cmd, lv, lvp_enum);
else
match = prop->fn(cmd, lv);
if (!match)
found_a_mismatch = 1;
if (match_bits && match)
*match_bits |= lvp_enum_to_bit(lvp_enum);
if (unmatch_bits && !match)
*unmatch_bits |= lvp_enum_to_bit(lvp_enum);
}
return !found_a_mismatch;
}
static int _check_lv_types(struct cmd_context *cmd, struct logical_volume *lv, int pos)
{
int ret;
if (!pos)
return 1;
if (!cmd->command->required_pos_args[pos-1].def.lvt_bits)
return 1;
if (!val_bit_is_set(cmd->command->required_pos_args[pos-1].def.val_bits, lv_VAL)) {
log_error(INTERNAL_ERROR "Command %d:%s arg position %d does not permit an LV (%llx)",
cmd->command->command_index, cmd->command->command_id,
pos, (unsigned long long)cmd->command->required_pos_args[pos-1].def.val_bits);
return 0;
}
ret = _lv_types_match(cmd, lv, cmd->command->required_pos_args[pos-1].def.lvt_bits, NULL, NULL);
if (!ret) {
int lvt_enum = get_lvt_enum(lv);
struct lv_type *type = get_lv_type(lvt_enum);
log_warn("Command on LV %s does not accept LV type %s.",
display_lvname(lv), type ? type->name : "unknown");
}
return ret;
}
/* Check if LV passes each rule specified in command definition. */
static int _check_lv_rules(struct cmd_context *cmd, struct logical_volume *lv)
{
char buf[64];
struct cmd_rule *rule;
struct lv_type *lvtype = NULL;
uint64_t lv_props_match_bits = 0, lv_props_unmatch_bits = 0;
uint64_t lv_types_match_bits = 0, lv_types_unmatch_bits = 0;
int opts_match_count = 0, opts_unmatch_count = 0;
int lvt_enum;
int ret = 1;
int i;
lvt_enum = get_lvt_enum(lv);
if (lvt_enum)
lvtype = get_lv_type(lvt_enum);
for (i = 0; i < cmd->command->rule_count; i++) {
rule = &cmd->command->rules[i];
/*
* RULE: <conditions> INVALID|REQUIRE <checks>
*
* If all the conditions apply to the command+LV, then
* the checks are performed. If all conditions are zero
* (!opts_count, !lvt_bits, !lvp_bits), then the check
* is always performed.
*
* Conditions:
*
* 1. options (opts): if any of the specified options are set,
* then the checks may apply.
*
* 2. LV types (lvt_bits): if any of the specified LV types
* match the LV, then the checks may apply.
*
* 3. LV properties (lvp_bits): if all of the specified
* LV properties match the LV, then the checks may apply.
*
* If conditions 1, 2, 3 all pass, then the checks apply.
*
* Checks:
*
* 1. options (check_opts):
* INVALID: if any of the specified options are set,
* then the command fails.
* REQUIRE: if any of the specified options are not set,
* then the command fails.
*
* 2. LV types (check_lvt_bits):
* INVALID: if any of the specified LV types match the LV,
* then the command fails.
* REQUIRE: if none of the specified LV types match the LV,
* then the command fails.