1
0
mirror of git://sourceware.org/git/lvm2.git synced 2025-02-16 13:57:49 +03:00
lvm2/tools/lvconvert.c
Zdenek Kabelac 45d9b2c470 command: more static const declaration
Use static const for declared arrays.
Access to arrays through get_ functions().
2024-04-29 00:13:43 +02:00

6552 lines
184 KiB
C

/*
* Copyright (C) 2005-2023 Red Hat, Inc. All rights reserved.
*
* This file is part of LVM2.
*
* This copyrighted material is made available to anyone wishing to use,
* modify, copy, or redistribute it subject to the terms and conditions
* of the GNU Lesser General Public License v.2.1.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "tools.h"
#include "lib/lvmpolld/polldaemon.h"
#include "lib/metadata/lv_alloc.h"
#include "lib/metadata/metadata.h"
#include "lvconvert_poll.h"
typedef enum {
/* Split:
* For a mirrored or raid LV, split mirror into two mirrors, optionally tracking
* future changes to the main mirror to allow future recombination.
*/
CONV_SPLIT_MIRRORS = 2,
/* Every other segment type or mirror log conversion we haven't separated out */
CONV_OTHER = 3,
} conversion_type_t;
struct lvconvert_params {
/* Exactly one of these 12 command categories is determined */
int keep_mimages; /* 2 */ /* --splitmirrors */
/* other */ /* 3 */
/* FIXME Eliminate all cases where more than one of the above are set then use conv_type instead */
conversion_type_t conv_type;
int track_changes; /* CONV_SPLIT_MIRRORS is set */
int corelog; /* Equivalent to --mirrorlog core */
int mirrorlog; /* Only one of corelog and mirrorlog may be set */
int mirrors_supplied; /* When type_str is not set, this may be set with keep_mimages for --splitmirrors */
const char *type_str; /* When this is set, mirrors_supplied may optionally also be set */
/* Holds what you asked for based on --type or other arguments, else "" */
const struct segment_type *segtype; /* Holds what segment type you will get */
int force;
int yes;
int zero;
const char *lv_name;
const char *lv_split_name;
const char *lv_name_full;
const char *vg_name;
int wait_completion;
int need_polling;
uint32_t region_size;
unsigned region_size_supplied;
uint32_t mirrors;
sign_t mirrors_sign;
uint32_t stripes;
uint32_t stripe_size;
unsigned stripes_supplied;
unsigned stripe_size_supplied;
uint32_t read_ahead;
unsigned target_attr;
alloc_policy_t alloc;
int pv_count;
char **pvs;
struct dm_list *pvh;
struct logical_volume *lv_to_poll;
struct dm_list idls;
const char *origin_name;
};
struct convert_poll_id_list {
struct dm_list list;
struct poll_operation_id *id;
unsigned is_merging_origin:1;
unsigned is_merging_origin_thin:1;
};
/* FIXME Temporary function until the enum replaces the separate variables */
static void _set_conv_type(struct lvconvert_params *lp, conversion_type_t conv_type)
{
if (lp->conv_type != CONV_OTHER)
log_error(INTERNAL_ERROR "Changing conv_type from %d to %d.", lp->conv_type, conv_type);
lp->conv_type = conv_type;
}
static int _raid0_type_requested(const char *type_str)
{
return (!strcmp(type_str, SEG_TYPE_NAME_RAID0) || !strcmp(type_str, SEG_TYPE_NAME_RAID0_META));
}
/* mirror/raid* (1,10,4,5,6 and their variants) reshape */
static int _mirror_or_raid_type_requested(struct cmd_context *cmd, const char *type_str)
{
return (arg_is_set(cmd, mirrors_ARG) || !strcmp(type_str, SEG_TYPE_NAME_MIRROR) ||
(!strncmp(type_str, SEG_TYPE_NAME_RAID, 4) && !_raid0_type_requested(type_str)));
}
static int _linear_type_requested(const char *type_str)
{
return (!strcmp(type_str, SEG_TYPE_NAME_LINEAR));
}
static int _striped_type_requested(const char *type_str)
{
return (!strcmp(type_str, SEG_TYPE_NAME_STRIPED) || _linear_type_requested(type_str));
}
static int _read_conversion_type(struct cmd_context *cmd,
struct lvconvert_params *lp)
{
const char *type_str = arg_str_value(cmd, type_ARG, "");
lp->type_str = type_str;
if (!lp->type_str[0])
return 1;
/* FIXME: Check thin-pool and thin more thoroughly! */
if (!strcmp(type_str, SEG_TYPE_NAME_SNAPSHOT) || _striped_type_requested(type_str) ||
!strncmp(type_str, SEG_TYPE_NAME_RAID, 4) || !strcmp(type_str, SEG_TYPE_NAME_MIRROR) ||
!strcmp(type_str, SEG_TYPE_NAME_CACHE_POOL) || !strcmp(type_str, SEG_TYPE_NAME_CACHE) ||
!strcmp(type_str, SEG_TYPE_NAME_THIN_POOL) || !strcmp(type_str, SEG_TYPE_NAME_THIN))
return 1;
log_error("Conversion using --type %s is not supported.", type_str);
return 0;
}
static int _read_params(struct cmd_context *cmd, struct lvconvert_params *lp)
{
const char *vg_name = NULL;
if (!_read_conversion_type(cmd, lp))
return_0;
if (!arg_is_set(cmd, background_ARG))
lp->wait_completion = 1;
if (arg_is_set(cmd, corelog_ARG))
lp->corelog = 1;
if (arg_is_set(cmd, mirrorlog_ARG)) {
if (lp->corelog) {
log_error("--mirrorlog and --corelog are incompatible.");
return 0;
}
lp->mirrorlog = 1;
}
if (arg_is_set(cmd, trackchanges_ARG))
lp->track_changes = 1;
/*
* The '--splitmirrors n' argument is equivalent to '--mirrors -n'
* (note the minus sign), except that it signifies the additional
* intent to keep the mimage that is detached, rather than
* discarding it.
*/
if (arg_is_set(cmd, splitmirrors_ARG)) {
if ((lp->lv_split_name = arg_str_value(cmd, name_ARG, NULL))) {
if (!validate_restricted_lvname_param(cmd, &vg_name, &lp->lv_split_name))
return_0;
}
if (_mirror_or_raid_type_requested(cmd, lp->type_str)) {
log_error("--mirrors/--type mirror/--type raid* and --splitmirrors are "
"mutually exclusive.");
return 0;
}
if (!arg_is_set(cmd, name_ARG) && !lp->track_changes) {
log_error("Please name the new logical volume using '--name'");
return 0;
}
if ((lp->lv_split_name = arg_str_value(cmd, name_ARG, NULL))) {
if (!validate_restricted_lvname_param(cmd, &vg_name, &lp->lv_split_name))
return_0;
}
lp->keep_mimages = 1;
_set_conv_type(lp, CONV_SPLIT_MIRRORS);
lp->mirrors = arg_uint_value(cmd, splitmirrors_ARG, 0);
lp->mirrors_sign = SIGN_MINUS;
}
/* If no other case was identified, then use of --stripes means --type striped */
if (!arg_is_set(cmd, type_ARG) && !*lp->type_str &&
!lp->mirrorlog && !lp->corelog &&
(arg_is_set(cmd, stripes_long_ARG) || arg_is_set(cmd, stripesize_ARG)))
lp->type_str = SEG_TYPE_NAME_STRIPED;
if ((arg_is_set(cmd, stripes_long_ARG) || arg_is_set(cmd, stripesize_ARG)) &&
!(_mirror_or_raid_type_requested(cmd, lp->type_str) || _striped_type_requested(lp->type_str) ||
_raid0_type_requested(lp->type_str) || arg_is_set(cmd, thinpool_ARG))) {
log_error("--stripes or --stripesize argument is only valid "
"with --mirrors/--type mirror/--type raid*/--type striped/--type linear, --repair and --thinpool");
return 0;
}
if (arg_is_set(cmd, mirrors_ARG)) {
/* --splitmirrors is the mechanism for detaching and keeping a mimage */
lp->mirrors_supplied = 1;
lp->mirrors = arg_uint_value(cmd, mirrors_ARG, 0);
lp->mirrors_sign = arg_sign_value(cmd, mirrors_ARG, SIGN_NONE);
}
lp->alloc = (alloc_policy_t) arg_uint_value(cmd, alloc_ARG, ALLOC_INHERIT);
/*
* Final checking of each case:
* lp->keep_mimages
* --type mirror|raid lp->mirrorlog lp->corelog
* --type raid0|striped
*/
switch(lp->conv_type) {
case CONV_SPLIT_MIRRORS:
break;
case CONV_OTHER:
if (arg_is_set(cmd, regionsize_ARG)) {
lp->region_size = arg_uint_value(cmd, regionsize_ARG, 0);
lp->region_size_supplied = 1;
} else {
lp->region_size = get_default_region_size(cmd);
lp->region_size_supplied = 0;
}
if (_mirror_or_raid_type_requested(cmd, lp->type_str) ||
lp->mirrorlog || lp->corelog) { /* Mirrors (and some RAID functions) */
if (arg_is_set(cmd, chunksize_ARG)) {
log_error("--chunksize is only available with snapshots or pools.");
return 0;
}
if (arg_is_set(cmd, zero_ARG)) {
log_error("--zero is only available with snapshots or pools.");
return 0;
}
/* FIXME man page says in one place that --type and --mirrors can't be mixed */
if (lp->mirrors_supplied && !lp->mirrors)
/* down-converting to linear/stripe? */
lp->type_str = SEG_TYPE_NAME_STRIPED;
} else if (_raid0_type_requested(lp->type_str) || _striped_type_requested(lp->type_str)) { /* striped or linear or raid0 */
if (arg_from_list_is_set(cmd, "cannot be used with --type raid0 or --type striped or --type linear",
chunksize_ARG, corelog_ARG, mirrors_ARG, mirrorlog_ARG, zero_ARG,
-1))
return_0;
} /* else segtype will default to current type */
}
lp->force = arg_count(cmd, force_ARG);
lp->yes = arg_count(cmd, yes_ARG);
return 1;
}
static struct poll_functions _lvconvert_mirror_fns = {
.poll_progress = poll_mirror_progress,
.finish_copy = lvconvert_mirror_finish,
};
static struct poll_functions _lvconvert_merge_fns = {
.poll_progress = poll_merge_progress,
.finish_copy = lvconvert_merge_finish,
};
static struct poll_functions _lvconvert_thin_merge_fns = {
.poll_progress = poll_thin_merge_progress,
.finish_copy = lvconvert_merge_finish,
};
static struct poll_operation_id *_create_id(struct cmd_context *cmd,
const char *vg_name,
const char *lv_name,
const char *uuid)
{
struct poll_operation_id *id;
char lv_full_name[NAME_LEN];
if (!vg_name || !lv_name || !uuid) {
log_error(INTERNAL_ERROR "Wrong params for lvconvert _create_id.");
return NULL;
}
if (dm_snprintf(lv_full_name, sizeof(lv_full_name), "%s/%s", vg_name, lv_name) < 0) {
log_error(INTERNAL_ERROR "Name \"%s/%s\" is too long.", vg_name, lv_name);
return NULL;
}
if (!(id = dm_pool_alloc(cmd->mem, sizeof(*id)))) {
log_error("Poll operation ID allocation failed.");
return NULL;
}
if (!(id->display_name = dm_pool_strdup(cmd->mem, lv_full_name)) ||
!(id->lv_name = strchr(id->display_name, '/')) ||
!(id->vg_name = dm_pool_strdup(cmd->mem, vg_name)) ||
!(id->uuid = dm_pool_strdup(cmd->mem, uuid))) {
log_error("Failed to copy one or more poll operation ID members.");
dm_pool_free(cmd->mem, id);
return NULL;
}
id->lv_name++; /* skip over '/' */
return id;
}
static int _lvconvert_poll_by_id(struct cmd_context *cmd, struct poll_operation_id *id,
unsigned background,
int is_merging_origin,
int is_merging_origin_thin)
{
if (test_mode())
return ECMD_PROCESSED;
if (is_merging_origin)
return poll_daemon(cmd, background,
(MERGING | (is_merging_origin_thin ? THIN_VOLUME : SNAPSHOT)),
is_merging_origin_thin ? &_lvconvert_thin_merge_fns : &_lvconvert_merge_fns,
"Merged", id);
return poll_daemon(cmd, background, CONVERTING,
&_lvconvert_mirror_fns, "Converted", id);
}
int lvconvert_poll(struct cmd_context *cmd, struct logical_volume *lv,
unsigned background)
{
int r;
struct poll_operation_id *id = _create_id(cmd, lv->vg->name, lv->name, lv->lvid.s);
int is_merging_origin = 0;
int is_merging_origin_thin = 0;
if (!id) {
log_error("Failed to allocate poll identifier for lvconvert.");
return ECMD_FAILED;
}
/* FIXME: check this in polling instead */
if (lv_is_merging_origin(lv)) {
is_merging_origin = 1;
is_merging_origin_thin = seg_is_thin_volume(find_snapshot(lv));
}
r = _lvconvert_poll_by_id(cmd, id, background, is_merging_origin, is_merging_origin_thin);
return r;
}
static int _insert_lvconvert_layer(struct cmd_context *cmd,
struct logical_volume *lv)
{
char format[NAME_LEN], layer_name[NAME_LEN];
int i;
/*
* We would like to give the same number for this layer
* and the newly added mimage.
* However, LV name of newly added mimage is determined *after*
* the LV name of this layer is determined.
*
* So, use generate_lv_name() to generate mimage name first
* and take the number from it.
*/
if (dm_snprintf(format, sizeof(format), "%s_mimage_%%d", lv->name) < 0) {
log_error("lvconvert: layer name creation failed.");
return 0;
}
if (!generate_lv_name(lv->vg, format, layer_name, sizeof(layer_name)) ||
sscanf(layer_name, format, &i) != 1) {
log_error("lvconvert: layer name generation failed.");
return 0;
}
if (dm_snprintf(layer_name, sizeof(layer_name), MIRROR_SYNC_LAYER "_%d", i) < 0) {
log_error("layer name creation failed.");
return 0;
}
if (!insert_layer_for_lv(cmd, lv, 0, layer_name)) {
log_error("Failed to insert resync layer");
return 0;
}
return 1;
}
static int _failed_mirrors_count(struct logical_volume *lv)
{
struct lv_segment *lvseg;
int ret = 0;
unsigned s;
dm_list_iterate_items(lvseg, &lv->segments) {
if (!seg_is_mirrored(lvseg))
return -1;
for (s = 0; s < lvseg->area_count; s++) {
if (seg_type(lvseg, s) == AREA_LV) {
if (is_temporary_mirror_layer(seg_lv(lvseg, s)))
ret += _failed_mirrors_count(seg_lv(lvseg, s));
else if (lv_is_partial(seg_lv(lvseg, s)))
++ ret;
}
else if (seg_type(lvseg, s) == AREA_PV &&
is_missing_pv(seg_pv(lvseg, s)))
++ret;
}
}
return ret;
}
static int _failed_logs_count(struct logical_volume *lv)
{
int ret = 0;
unsigned s;
struct logical_volume *log_lv = first_seg(lv)->log_lv;
if (log_lv && lv_is_partial(log_lv)) {
if (lv_is_mirrored(log_lv))
ret += _failed_mirrors_count(log_lv);
else
ret += 1;
}
for (s = 0; s < first_seg(lv)->area_count; s++) {
if (seg_type(first_seg(lv), s) == AREA_LV &&
is_temporary_mirror_layer(seg_lv(first_seg(lv), s)))
ret += _failed_logs_count(seg_lv(first_seg(lv), s));
}
return ret;
}
static struct dm_list *_failed_pv_list(struct volume_group *vg)
{
struct dm_list *failed_pvs;
struct pv_list *pvl, *new_pvl;
if (!(failed_pvs = dm_pool_alloc(vg->vgmem, sizeof(*failed_pvs)))) {
log_error("Allocation of list of failed_pvs failed.");
return NULL;
}
dm_list_init(failed_pvs);
dm_list_iterate_items(pvl, &vg->pvs) {
if (!is_missing_pv(pvl->pv))
continue;
/*
* Finally, --repair will remove empty PVs.
* But we only want remove these which are output of repair,
* Do not count these which are already empty here.
* FIXME: code should traverse PV in LV not in whole VG.
* FIXME: layer violation? should it depend on vgreduce --removemising?
*/
if (pvl->pv->pe_alloc_count == 0)
continue;
if (!(new_pvl = dm_pool_zalloc(vg->vgmem, sizeof(*new_pvl)))) {
log_error("Allocation of failed_pvs list entry failed.");
return NULL;
}
new_pvl->pv = pvl->pv;
dm_list_add(failed_pvs, &new_pvl->list);
}
return failed_pvs;
}
static int _is_partial_lv(struct logical_volume *lv,
void *baton __attribute__((unused)))
{
return lv_is_partial(lv);
}
/*
* Walk down the stacked mirror LV to the original mirror LV.
*/
static struct logical_volume *_original_lv(struct logical_volume *lv)
{
struct logical_volume *next_lv = lv, *tmp_lv;
while ((tmp_lv = find_temporary_mirror(next_lv)))
next_lv = tmp_lv;
return next_lv;
}
static void _lvconvert_mirrors_repair_ask(struct cmd_context *cmd,
int failed_log, int failed_mirrors,
int *replace_log, int *replace_mirrors)
{
const char *leg_policy, *log_policy;
int force = arg_count(cmd, force_ARG);
int yes = arg_count(cmd, yes_ARG);
if (arg_is_set(cmd, usepolicies_ARG)) {
leg_policy = find_config_tree_str(cmd, activation_mirror_image_fault_policy_CFG, NULL);
log_policy = find_config_tree_str(cmd, activation_mirror_log_fault_policy_CFG, NULL);
*replace_mirrors = strcmp(leg_policy, "remove");
*replace_log = strcmp(log_policy, "remove");
return;
}
if (force != PROMPT) {
*replace_log = *replace_mirrors = 0;
return;
}
*replace_log = *replace_mirrors = 1;
if (yes)
return;
if (failed_log &&
yes_no_prompt("Attempt to replace failed mirror log? [y/n]: ") == 'n')
*replace_log = 0;
if (failed_mirrors &&
yes_no_prompt("Attempt to replace failed mirror images "
"(requires full device resync)? [y/n]: ") == 'n')
*replace_mirrors = 0;
}
/*
* _get_log_count
* @lv: the mirror LV
*
* Get the number of on-disk copies of the log.
* 0 = 'core'
* 1 = 'disk'
* 2+ = 'mirrored'
*/
static uint32_t _get_log_count(struct logical_volume *lv)
{
struct logical_volume *log_lv;
log_lv = first_seg(_original_lv(lv))->log_lv;
if (log_lv)
return lv_mirror_count(log_lv);
return 0;
}
static int _lv_update_mirrored_log(struct logical_volume *lv,
struct dm_list *operable_pvs,
int log_count)
{
int old_log_count;
struct logical_volume *log_lv;
/*
* When log_count is 0, mirrored log doesn't need to be
* updated here but it will be removed later.
*/
if (!log_count)
return 1;
log_lv = first_seg(_original_lv(lv))->log_lv;
if (!log_lv || !lv_is_mirrored(log_lv))
return 1;
old_log_count = _get_log_count(lv);
if (old_log_count == log_count)
return 1;
/* Reducing redundancy of the log */
return remove_mirror_images(log_lv, log_count,
is_mirror_image_removable,
operable_pvs, 0U);
}
static int _lv_update_log_type(struct cmd_context *cmd,
struct lvconvert_params *lp,
struct logical_volume *lv,
struct dm_list *operable_pvs,
int log_count)
{
int old_log_count;
uint32_t region_size = (lp) ? lp->region_size :
first_seg(lv)->region_size;
alloc_policy_t alloc = (lp) ? lp->alloc : lv->alloc;
struct logical_volume *original_lv;
struct logical_volume *log_lv;
old_log_count = _get_log_count(lv);
if (old_log_count == log_count)
return 1;
original_lv = _original_lv(lv);
/* Remove an existing log completely */
if (!log_count) {
if (!remove_mirror_log(cmd, original_lv, operable_pvs,
arg_count(cmd, yes_ARG) ||
arg_count(cmd, force_ARG)))
return_0;
return 1;
}
log_lv = first_seg(original_lv)->log_lv;
/* Adding redundancy to the log */
if (old_log_count < log_count) {
if (!(region_size = adjusted_mirror_region_size(cmd, lv->vg->extent_size,
lv->le_count,
region_size, 0,
vg_is_clustered(lv->vg))))
return_0;
if (!add_mirror_log(cmd, original_lv, log_count,
region_size, operable_pvs, alloc))
return_0;
/*
* FIXME: This simple approach won't work in cluster mirrors,
* but it doesn't matter because we don't support
* mirrored logs in cluster mirrors.
*/
if (old_log_count &&
!lv_update_and_reload(log_lv))
return_0;
return 1;
}
/* Reducing redundancy of the log */
return remove_mirror_images(log_lv, log_count,
is_mirror_image_removable, operable_pvs, 1U);
}
/*
* Reomove missing and empty PVs from VG, if are also in provided list
*/
static void _remove_missing_empty_pv(struct volume_group *vg, struct dm_list *remove_pvs)
{
struct pv_list *pvl, *pvl_vg, *pvlt;
int removed = 0;
if (!remove_pvs)
return;
dm_list_iterate_items(pvl, remove_pvs) {
dm_list_iterate_items_safe(pvl_vg, pvlt, &vg->pvs) {
if (!id_equal(&pvl->pv->id, &pvl_vg->pv->id) ||
!is_missing_pv(pvl_vg->pv) ||
pvl_vg->pv->pe_alloc_count != 0)
continue;
/* FIXME: duplication of vgreduce code, move this to library */
vg->free_count -= pvl_vg->pv->pe_count;
vg->extent_count -= pvl_vg->pv->pe_count;
del_pvl_from_vgs(vg, pvl_vg);
free_pv_fid(pvl_vg->pv);
removed++;
}
}
if (removed) {
if (!vg_write(vg) || !vg_commit(vg)) {
stack;
return;
}
log_warn("WARNING: %d missing and now unallocated Physical Volumes removed from VG.", removed);
}
}
/*
* _lvconvert_mirrors_parse_params
*
* This function performs the following:
* 1) Gets the old values of mimage and log counts
* 2) Parses the CLI args to find the new desired values
* 3) Adjusts 'lp->mirrors' to the appropriate absolute value.
* (Remember, 'lp->mirrors' is specified in terms of the number of "copies"
* vs. the number of mimages. It can also be a relative value.)
* 4) Sets 'lp->need_polling' if collapsing
* 5) Validates other mirror params
*
* Returns: 1 on success, 0 on error
*/
static int _lvconvert_mirrors_parse_params(struct cmd_context *cmd,
struct logical_volume *lv,
struct lvconvert_params *lp,
uint32_t *old_mimage_count,
uint32_t *old_log_count,
uint32_t *new_mimage_count,
uint32_t *new_log_count)
{
*old_mimage_count = lv_mirror_count(lv);
*old_log_count = _get_log_count(lv);
if (lv->vg->lock_type && !strcmp(lv->vg->lock_type, "sanlock") && lp->keep_mimages) {
/* FIXME: we need to create a sanlock lock on disk for the new LV. */
log_error("Unable to split mirrors in VG with lock_type %s", lv->vg->lock_type);
return 0;
}
/*
* Adjusting mimage count?
*/
if (!lp->mirrors_supplied && !lp->keep_mimages)
lp->mirrors = *old_mimage_count;
else if (lp->mirrors_sign == SIGN_PLUS)
lp->mirrors = *old_mimage_count + lp->mirrors;
else if (lp->mirrors_sign == SIGN_MINUS)
lp->mirrors = (*old_mimage_count > lp->mirrors) ?
*old_mimage_count - lp->mirrors: 0;
else
lp->mirrors += 1;
*new_mimage_count = lp->mirrors;
/* Too many mimages? */
if (lp->mirrors > DEFAULT_MIRROR_MAX_IMAGES) {
log_error("Only up to %d images in mirror supported currently.",
DEFAULT_MIRROR_MAX_IMAGES);
return 0;
}
/* Did the user try to subtract more legs than available? */
if (lp->mirrors < 1) {
log_error("Unable to reduce images by specified amount - only %d in %s",
*old_mimage_count, lv->name);
return 0;
}
/*
* FIXME: It would be nice to say what we are adjusting to, but
* I really don't know whether to specify the # of copies or mimages.
*/
if (*old_mimage_count != *new_mimage_count)
log_verbose("Adjusting mirror image count of %s", lv->name);
/* If region size is not given by user - use value from mirror */
if (lv_is_mirrored(lv) && !lp->region_size_supplied) {
lp->region_size = first_seg(lv)->region_size;
log_debug("Copying region size %s from existing mirror.",
display_size(lv->vg->cmd, lp->region_size));
}
/*
* Adjust log type
*
* If we are converting from a mirror to another mirror or simply
* changing the log type, we start by assuming they want the log
* type the same and then parse the given args. OTOH, If we are
* converting from linear to mirror, then we start from the default
* position that the user would like a 'disk' log.
*/
*new_log_count = (*old_mimage_count > 1) ? *old_log_count : 1;
if (!lp->corelog && !lp->mirrorlog)
return 1;
*new_log_count = arg_int_value(cmd, mirrorlog_ARG, lp->corelog ? MIRROR_LOG_CORE : DEFAULT_MIRRORLOG);
log_verbose("Setting logging type to %s.", get_mirror_log_name(*new_log_count));
/*
* Region size must not change on existing mirrors
*/
if (arg_is_set(cmd, regionsize_ARG) && lv_is_mirrored(lv) &&
(lp->region_size != first_seg(lv)->region_size)) {
log_error("Mirror log region size cannot be changed on "
"an existing mirror.");
return 0;
}
/*
* For the most part, we cannot handle multi-segment mirrors. Bail out
* early if we have encountered one.
*/
if (lv_is_mirrored(lv) && dm_list_size(&lv->segments) != 1) {
log_error("Logical volume %s has multiple "
"mirror segments.", display_lvname(lv));
return 0;
}
return 1;
}
/*
* _lvconvert_mirrors_aux
*
* Add/remove mirror images and adjust log type. 'operable_pvs'
* are the set of PVs open to removal or allocation - depending
* on the operation being performed.
*/
static int _lvconvert_mirrors_aux(struct cmd_context *cmd,
struct logical_volume *lv,
struct lvconvert_params *lp,
struct dm_list *operable_pvs,
uint32_t new_mimage_count,
uint32_t new_log_count,
struct dm_list *pvh)
{
uint32_t region_size;
struct lv_segment *seg = first_seg(lv);
struct logical_volume *layer_lv;
uint32_t old_mimage_count = lv_mirror_count(lv);
uint32_t old_log_count = _get_log_count(lv);
if ((lp->mirrors == 1) && !lv_is_mirrored(lv)) {
log_warn("WARNING: Logical volume %s is already not mirrored.",
display_lvname(lv));
return 1;
}
if (!(region_size = adjusted_mirror_region_size(cmd, lv->vg->extent_size,
lv->le_count,
lp->region_size ? : seg->region_size, 0,
vg_is_clustered(lv->vg))))
return_0;
if (lv_component_is_active(lv)) {
log_error("Cannot convert logical volume %s with active component LV(s).",
display_lvname(lv));
return 0;
}
if (!operable_pvs)
operable_pvs = pvh;
/*
* Up-convert from linear to mirror
*/
if (!lv_is_mirrored(lv)) {
/* FIXME Share code with lvcreate */
/*
* FIXME should we give not only pvh, but also all PVs
* currently taken by the mirror? Would make more sense from
* user perspective.
*/
if (!lv_add_mirrors(cmd, lv, new_mimage_count - 1, lp->stripes,
lp->stripe_size, region_size, new_log_count, operable_pvs,
lp->alloc, MIRROR_BY_LV))
return_0;
if (!arg_is_set(cmd, background_ARG))
lp->need_polling = 1;
goto out;
}
/*
* Up-convert m-way mirror to n-way mirror
*/
if (new_mimage_count > old_mimage_count) {
if (lv_is_not_synced(lv)) {
log_error("Can't add mirror to out-of-sync mirrored "
"LV: use lvchange --resync first.");
return 0;
}
/*
* We allow snapshots of mirrors, but for now, we
* do not allow up converting mirrors that are under
* snapshots. The layering logic is somewhat complex,
* and preliminary test show that the conversion can't
* seem to get the correct %'age of completion.
*/
if (lv_is_origin(lv)) {
log_error("Can't add additional mirror images to "
"mirror %s which is under snapshots.",
display_lvname(lv));
return 0;
}
/*
* Is there already a convert in progress? We do not
* currently allow more than one.
*/
if (find_temporary_mirror(lv) || lv_is_converting(lv)) {
log_error("%s is already being converted. Unable to start another conversion.",
display_lvname(lv));
return 0;
}
/*
* Log addition/removal should be done before the layer
* insertion to make the end result consistent with
* linear-to-mirror conversion.
*/
if (!_lv_update_log_type(cmd, lp, lv,
operable_pvs, new_log_count))
return_0;
/* Insert a temporary layer for syncing,
* only if the original lv is using disk log. */
if (seg->log_lv && !_insert_lvconvert_layer(cmd, lv)) {
log_error("Failed to insert resync layer.");
return 0;
}
/* FIXME: can't have multiple mlogs. force corelog. */
if (!lv_add_mirrors(cmd, lv,
new_mimage_count - old_mimage_count,
lp->stripes, lp->stripe_size,
region_size, 0U, operable_pvs, lp->alloc,
MIRROR_BY_LV)) {
/* FIXME: failure inside library -> move error path processing into library. */
layer_lv = seg_lv(first_seg(lv), 0);
if (!remove_layer_from_lv(lv, layer_lv) ||
(lv_is_active(lv) && !deactivate_lv(cmd, layer_lv)) ||
!lv_remove(layer_lv) ||
!vg_write(lv->vg) || !vg_commit(lv->vg)) {
log_error("ABORTING: Failed to remove "
"temporary mirror layer %s.",
display_lvname(layer_lv));
log_error("Manual cleanup with vgcfgrestore "
"and dmsetup may be required.");
return 0;
}
return_0;
}
if (seg->log_lv)
lv->status |= CONVERTING;
lp->need_polling = 1;
goto out_skip_log_convert;
}
/*
* Down-convert (reduce # of mimages).
*/
if (new_mimage_count < old_mimage_count) {
uint32_t nmc = old_mimage_count - new_mimage_count;
uint32_t nlc = (!new_log_count || lp->mirrors == 1) ? 1U : 0U;
/* FIXME: Why did nlc used to be calculated that way? */
/* Reduce number of mirrors */
if (lp->keep_mimages) {
if (lp->track_changes) {
log_error("--trackchanges is not available "
"to 'mirror' segment type.");
return 0;
}
if (!lv_split_mirror_images(lv, lp->lv_split_name,
nmc, operable_pvs))
return_0;
} else if (!lv_remove_mirrors(cmd, lv, nmc, nlc,
is_mirror_image_removable, operable_pvs, 0))
return_0;
goto out; /* Just in case someone puts code between */
}
out:
/*
* Converting the log type
*/
if (lv_is_mirrored(lv) && (old_log_count != new_log_count)) {
if (!_lv_update_log_type(cmd, lp, lv,
operable_pvs, new_log_count))
return_0;
}
out_skip_log_convert:
if (!lv_update_and_reload(lv))
return_0;
return 1;
}
int mirror_remove_missing(struct cmd_context *cmd,
struct logical_volume *lv, int force)
{
struct dm_list *failed_pvs;
int log_count = _get_log_count(lv) - _failed_logs_count(lv);
if (!(failed_pvs = _failed_pv_list(lv->vg)))
return_0;
if (force && _failed_mirrors_count(lv) == (int)lv_mirror_count(lv)) {
log_error("No usable images left in %s.", display_lvname(lv));
return lv_remove_with_dependencies(cmd, lv, DONT_PROMPT, 0);
}
/*
* We must adjust the log first, or the entire mirror
* will get stuck during a suspend.
*/
if (!_lv_update_mirrored_log(lv, failed_pvs, log_count))
return_0;
if (_failed_mirrors_count(lv) > 0 &&
!lv_remove_mirrors(cmd, lv, _failed_mirrors_count(lv),
log_count ? 0U : 1U,
_is_partial_lv, NULL, 0))
return_0;
if (lv_is_mirrored(lv) &&
!_lv_update_log_type(cmd, NULL, lv, failed_pvs, log_count))
return_0;
if (!lv_update_and_reload(lv))
return_0;
return 1;
}
/*
* _lvconvert_mirrors_repair
*
* This function operates in two phases. First, all of the bad
* devices are removed from the mirror. Then, if desired by the
* user, the devices are replaced.
*
* 'old_mimage_count' and 'old_log_count' are there so we know
* what to convert to after the removal of devices.
*/
static int _lvconvert_mirrors_repair(struct cmd_context *cmd,
struct logical_volume *lv,
struct lvconvert_params *lp,
struct dm_list *pvh)
{
int failed_logs;
int failed_mimages;
int replace_logs = 0;
int replace_mimages = 0;
uint32_t log_count;
uint32_t original_mimages = lv_mirror_count(lv);
uint32_t original_logs = _get_log_count(lv);
cmd->partial_activation = 1;
lp->need_polling = 0;
lv_check_transient(lv); /* TODO check this in lib for all commands? */
if (!lv_is_partial(lv)) {
log_print_unless_silent("Volume %s is consistent. Nothing to repair.",
display_lvname(lv));
return 1;
}
failed_mimages = _failed_mirrors_count(lv);
failed_logs = _failed_logs_count(lv);
/* Retain existing region size in case we need it later */
if (!lp->region_size)
lp->region_size = first_seg(lv)->region_size;
if (!mirror_remove_missing(cmd, lv, 0))
return_0;
if (failed_mimages)
log_print_unless_silent("Mirror status: %d of %d images failed.",
failed_mimages, original_mimages);
/*
* Count the failed log devices
*/
if (failed_logs)
log_print_unless_silent("Mirror log status: %d of %d images failed.",
failed_logs, original_logs);
/*
* Find out our policies
*/
_lvconvert_mirrors_repair_ask(cmd, failed_logs, failed_mimages,
&replace_logs, &replace_mimages);
/*
* Second phase - replace faulty devices
*/
lp->mirrors = replace_mimages ? original_mimages : (original_mimages - failed_mimages);
/*
* It does not make sense to replace the log if the volume is no longer
* a mirror.
*/
if (lp->mirrors == 1)
replace_logs = 0;
log_count = replace_logs ? original_logs : (original_logs - failed_logs);
while (replace_mimages || replace_logs) {
log_warn("WARNING: Trying to up-convert to %d images, %d logs.", lp->mirrors, log_count);
if (_lvconvert_mirrors_aux(cmd, lv, lp, NULL,
lp->mirrors, log_count, pvh))
break;
if (lp->mirrors > 2)
--lp->mirrors;
else if (log_count > 0)
--log_count;
else
break; /* nowhere to go, anymore... */
}
if (replace_mimages && lv_mirror_count(lv) != original_mimages)
log_warn("WARNING: Failed to replace %d of %d images in volume %s.",
original_mimages - lv_mirror_count(lv), original_mimages,
display_lvname(lv));
if (replace_logs && _get_log_count(lv) != original_logs)
log_warn("WARNING: Failed to replace %d of %d logs in volume %s.",
original_logs - _get_log_count(lv), original_logs,
display_lvname(lv));
/* if (!arg_is_set(cmd, use_policies_ARG) && (lp->mirrors != old_mimage_count
|| log_count != old_log_count))
return 0; */
return 1;
}
static int _lvconvert_validate_thin(struct logical_volume *lv,
struct lvconvert_params *lp)
{
if (!lv_is_thin_pool(lv) && !lv_is_thin_volume(lv))
return 1;
log_error("Converting thin%s segment type for %s to %s is not supported.",
lv_is_thin_pool(lv) ? " pool" : "",
display_lvname(lv), lp->segtype->name);
if (lv_is_thin_volume(lv))
return 0;
/* Give advice for thin pool conversion */
log_error("For pool data volume conversion use %s.",
display_lvname(seg_lv(first_seg(lv), 0)));
log_error("For pool metadata volume conversion use %s.",
display_lvname(first_seg(lv)->metadata_lv));
return 0;
}
/* Check for raid1 split trackchanges image to reject conversions on it. */
static int _raid_split_image_conversion(struct logical_volume *lv)
{
const char *s;
char raidlv_name[NAME_LEN];
const struct logical_volume *tmp_lv;
if (lv_is_raid_with_tracking(lv)) {
log_error("Conversion of tracking raid1 LV %s is not supported.",
display_lvname(lv));
return 0;
}
if (lv_is_raid_image(lv) &&
(s = strstr(lv->name, "_rimage_"))) {
dm_strncpy(raidlv_name, lv->name, s - lv->name);
if (!(tmp_lv = find_lv(lv->vg, raidlv_name))) {
log_error("Failed to find RaidLV of RAID subvolume %s.",
display_lvname(lv));
return 0;
}
if (lv_is_raid_with_tracking(tmp_lv)) {
log_error("Conversion of tracked raid1 subvolume %s is not supported.",
display_lvname(lv));
return 0;
}
}
return 1;
}
/*
* _lvconvert_mirrors
*
* Determine what is being done. Are we doing a conversion, repair, or
* collapsing a stack? Once determined, call helper functions.
*/
static int _lvconvert_mirrors(struct cmd_context *cmd,
struct logical_volume *lv,
struct lvconvert_params *lp)
{
uint32_t old_mimage_count = 0;
uint32_t old_log_count = 0;
uint32_t new_mimage_count = 0;
uint32_t new_log_count = 0;
if (!_raid_split_image_conversion(lv))
return_0;
if ((lp->corelog || lp->mirrorlog) && *lp->type_str && strcmp(lp->type_str, SEG_TYPE_NAME_MIRROR)) {
log_error("--corelog and --mirrorlog are only compatible with mirror devices.");
return 0;
}
if (!_lvconvert_validate_thin(lv, lp))
return_0;
if (lv_is_thin_type(lv)) {
log_error("Mirror segment type cannot be used for thinpool%s.\n"
"Try \"%s\" segment type instead.",
lv_is_thin_pool_data(lv) ? "s" : " metadata",
SEG_TYPE_NAME_RAID1);
return 0;
}
if (lv_is_cache_type(lv)) {
log_error("Mirrors are not yet supported on cache LVs %s.",
display_lvname(lv));
return 0;
}
if (_linear_type_requested(lp->type_str)) {
if (arg_is_set(cmd, mirrors_ARG) && (arg_uint_value(cmd, mirrors_ARG, 0) != 0)) {
log_error("Cannot specify mirrors with linear type.");
return 0;
}
lp->mirrors_supplied = 1;
lp->mirrors = 0;
}
/* Adjust mimage and/or log count */
if (!_lvconvert_mirrors_parse_params(cmd, lv, lp,
&old_mimage_count, &old_log_count,
&new_mimage_count, &new_log_count))
return_0;
if (((old_mimage_count < new_mimage_count && old_log_count > new_log_count) ||
(old_mimage_count > new_mimage_count && old_log_count < new_log_count)) &&
lp->pv_count) {
log_error("Cannot both allocate and free extents when "
"specifying physical volumes to use.");
log_error("Please specify the operation in two steps.");
return 0;
}
/* Nothing to do? (Probably finishing collapse.) */
if ((old_mimage_count == new_mimage_count) &&
(old_log_count == new_log_count))
return 1;
if (!_lvconvert_mirrors_aux(cmd, lv, lp, NULL,
new_mimage_count, new_log_count, lp->pvh))
return_0;
if (!lp->need_polling)
log_print_unless_silent("Logical volume %s converted.",
display_lvname(lv));
else
log_print_unless_silent("Logical volume %s being converted.",
display_lvname(lv));
return 1;
}
static int _is_valid_raid_conversion(const struct segment_type *from_segtype,
const struct segment_type *to_segtype)
{
if (!from_segtype)
return 1;
/* linear/striped/raid0 <-> striped/raid0/linear (restriping via raid) */
if (segtype_is_striped(from_segtype) && segtype_is_striped(to_segtype))
return 0;
if (from_segtype == to_segtype)
return 1;
if (!segtype_is_raid(from_segtype) && !segtype_is_raid(to_segtype))
return_0; /* Not converting to or from RAID? */
return 1;
}
/* Check for dm-raid target supporting raid4 conversion properly. */
static int _raid4_conversion_supported(struct logical_volume *lv, struct lvconvert_params *lp)
{
int ret = 1;
struct lv_segment *seg = first_seg(lv);
if (seg_is_raid4(seg))
ret = raid4_is_supported(lv->vg->cmd, seg->segtype);
else if (segtype_is_raid4(lp->segtype))
ret = raid4_is_supported(lv->vg->cmd, lp->segtype);
if (ret)
return 1;
log_error("Cannot convert %s LV %s to %s.",
lvseg_name(seg), display_lvname(lv), lp->segtype->name);
return 0;
}
static int _lvconvert_raid(struct logical_volume *lv, struct lvconvert_params *lp)
{
int image_count = 0;
int images_reduced = 0;
int type_enforced = 0;
struct cmd_context *cmd = lv->vg->cmd;
struct lv_segment *seg = first_seg(lv);
if (!_raid_split_image_conversion(lv))
return_0;
if (_linear_type_requested(lp->type_str)) {
if (arg_is_set(cmd, mirrors_ARG) && (arg_uint_value(cmd, mirrors_ARG, 0) != 0)) {
log_error("Cannot specify mirrors with linear type.");
return 0;
}
lp->mirrors_supplied = 1;
lp->mirrors = 0;
}
if (!_lvconvert_validate_thin(lv, lp))
return_0;
if (!_is_valid_raid_conversion(seg->segtype, lp->segtype) &&
!lp->mirrors_supplied)
goto try_new_takeover_or_reshape;
if (seg_is_striped(seg) && !lp->mirrors_supplied)
goto try_new_takeover_or_reshape;
if (seg_is_linear(seg) && !lp->mirrors_supplied)
goto try_new_takeover_or_reshape;
/* Change number of RAID1 images */
if (lp->mirrors_supplied || lp->keep_mimages) {
image_count = lv_raid_image_count(lv);
if (lp->mirrors_sign == SIGN_PLUS)
image_count += lp->mirrors;
else if (lp->mirrors_sign == SIGN_MINUS)
image_count -= lp->mirrors;
else
image_count = lp->mirrors + 1;
images_reduced = (image_count < (int) lv_raid_image_count(lv));
if (image_count < 1) {
log_error("Unable to %s images by specified amount.",
lp->keep_mimages ? "split" : "reduce");
return 0;
}
/* --trackchanges requires --splitmirrors which always has SIGN_MINUS */
if (lp->track_changes && lp->mirrors != 1) {
log_error("Exactly one image must be split off from %s when tracking changes.",
display_lvname(lv));
return 0;
}
if (!*lp->type_str) {
lp->type_str = SEG_TYPE_NAME_RAID1;
if (!(lp->segtype = get_segtype_from_string(lv->vg->cmd, SEG_TYPE_NAME_RAID1)))
return_0;
type_enforced = 1;
}
}
if ((lp->corelog || lp->mirrorlog) && strcmp(lp->type_str, SEG_TYPE_NAME_MIRROR)) {
log_error("--corelog and --mirrorlog are only compatible with mirror devices");
return 0;
}
if (lp->track_changes)
return lv_raid_split_and_track(lv, lp->yes, lp->pvh);
if (lp->keep_mimages)
return lv_raid_split(lv, lp->yes, lp->lv_split_name, image_count, lp->pvh);
if (lp->mirrors_supplied) {
if (seg_is_linear(seg) || seg_is_raid1(seg)) { /* ??? */
if (!*lp->type_str || !strcmp(lp->type_str, SEG_TYPE_NAME_RAID1) || !strcmp(lp->type_str, SEG_TYPE_NAME_LINEAR) ||
(!strcmp(lp->type_str, SEG_TYPE_NAME_STRIPED) && image_count == 1)) {
if (image_count > DEFAULT_RAID1_MAX_IMAGES) {
log_error("Only up to %u mirrors in %s LV %s supported currently.",
DEFAULT_RAID1_MAX_IMAGES, lp->segtype->name, display_lvname(lv));
return 0;
}
if (!seg_is_raid1(seg) && lv_raid_has_integrity(lv)) {
log_error("Cannot add raid images with integrity for this raid level.");
return 0;
}
if (!lv_raid_change_image_count(lv, lp->yes, image_count,
(lp->region_size_supplied || !seg->region_size) ?
lp->region_size : seg->region_size , lp->pvh))
return_0;
if (lv_raid_has_integrity(lv) && !images_reduced) {
struct integrity_settings *isettings = NULL;
if (!lv_get_raid_integrity_settings(lv, &isettings))
return_0;
if (!lv_add_integrity_to_raid(lv, isettings, lp->pvh, NULL))
return_0;
}
log_print_unless_silent("Logical volume %s successfully converted.",
display_lvname(lv));
return 1;
}
}
goto try_new_takeover_or_reshape;
}
if ((seg_is_linear(seg) || seg_is_striped(seg) || seg_is_mirrored(seg) || lv_is_raid(lv)) &&
(lp->type_str && lp->type_str[0])) {
/* Activation is required later which precludes existing supported raid0 segment */
if ((seg_is_any_raid0(seg) || segtype_is_any_raid0(lp->segtype)) &&
!(lp->target_attr & RAID_FEATURE_RAID0)) {
log_error("RAID module does not support RAID0.");
return 0;
}
/* Activation is required later which precludes existing supported raid4 segment */
if (!_raid4_conversion_supported(lv, lp))
return_0;
/* Activation is required later which precludes existing supported raid10 segment */
if ((seg_is_raid10(seg) || segtype_is_raid10(lp->segtype)) &&
!(lp->target_attr & RAID_FEATURE_RAID10)) {
log_error("RAID module does not support RAID10.");
return 0;
}
if (lv_raid_has_integrity(lv)) {
/* FIXME: which conversions are happening here? */
log_error("This conversion is not supported for raid with integrity.");
return 0;
}
/* FIXME This needs changing globally. */
if (!arg_is_set(cmd, stripes_long_ARG))
lp->stripes = 0;
if (!type_enforced && !arg_is_set(cmd, type_ARG))
lp->segtype = NULL;
if (!arg_is_set(cmd, regionsize_ARG))
lp->region_size = 0;
if (!lv_raid_convert(lv, lp->segtype,
lp->yes, lp->force, lp->stripes, lp->stripe_size_supplied, lp->stripe_size,
lp->region_size, lp->pvh))
return_0;
log_print_unless_silent("Logical volume %s successfully converted.",
display_lvname(lv));
return 1;
}
try_new_takeover_or_reshape:
if (lv_raid_has_integrity(lv)) {
/* FIXME: which conversions are happening here? */
log_error("This conversion is not supported for raid with integrity.");
return 0;
}
if (!_raid4_conversion_supported(lv, lp))
return 0;
/* FIXME This needs changing globally. */
if (!arg_is_set(cmd, stripes_long_ARG))
lp->stripes = 0;
if (!type_enforced && !arg_is_set(cmd, type_ARG))
lp->segtype = NULL;
if (!lv_raid_convert(lv, lp->segtype,
lp->yes, lp->force, lp->stripes, lp->stripe_size_supplied, lp->stripe_size,
(lp->region_size_supplied || !seg->region_size) ?
lp->region_size : seg->region_size , lp->pvh))
return_0;
log_print_unless_silent("Logical volume %s successfully converted.",
display_lvname(lv));
return 1;
}
/*
* Functions called to perform a specific operation on a specific LV type.
*
* _convert_<lvtype>_<operation>
*
* For cases where an operation does not apply to the LV itself, but
* is implicitly redirected to a sub-LV, these functions locate the
* correct sub-LV and call the operation on that sub-LV. If a sub-LV
* of the proper type is not found, these functions report the error.
*
* FIXME: the _lvconvert_foo() functions can be cleaned up since they
* are now only called for valid combinations of LV type and operation.
* After that happens, the code remaining in those functions can be
* moved into the _convert_lvtype_operation() functions below.
*/
/*
* Change the number of images in a mirror LV.
* lvconvert --mirrors Number LV
*/
static int _convert_mirror_number(struct cmd_context *cmd, struct logical_volume *lv,
struct lvconvert_params *lp)
{
return _lvconvert_mirrors(cmd, lv, lp);
}
/*
* Split images from a mirror LV and use them to create a new LV.
* lvconvert --splitmirrors Number LV
*
* Required options:
* --name Name
*/
static int _convert_mirror_splitmirrors(struct cmd_context *cmd, struct logical_volume *lv,
struct lvconvert_params *lp)
{
return _lvconvert_mirrors(cmd, lv, lp);
}
/*
* Change the type of log used by a mirror LV.
* lvconvert --mirrorlog Type LV
*/
static int _convert_mirror_log(struct cmd_context *cmd, struct logical_volume *lv,
struct lvconvert_params *lp)
{
return _lvconvert_mirrors(cmd, lv, lp);
}
/*
* Convert mirror LV to linear LV.
* lvconvert --type linear LV
*
* Alternate syntax:
* lvconvert --mirrors 0 LV
*/
static int _convert_mirror_linear(struct cmd_context *cmd, struct logical_volume *lv,
struct lvconvert_params *lp)
{
return _lvconvert_mirrors(cmd, lv, lp);
}
/*
* Convert mirror LV to raid1 LV.
* lvconvert --type raid1 LV
*/
static int _convert_mirror_raid(struct cmd_context *cmd, struct logical_volume *lv,
struct lvconvert_params *lp)
{
return _lvconvert_raid(lv, lp);
}
/*
* Change the number of images in a raid1 LV.
* lvconvert --mirrors Number LV
*/
static int _convert_raid_number(struct cmd_context *cmd, struct logical_volume *lv,
struct lvconvert_params *lp)
{
return _lvconvert_raid(lv, lp);
}
/*
* Split images from a raid1 LV and use them to create a new LV.
* lvconvert --splitmirrors Number LV
*
* Required options:
* --trackchanges | --name Name
*/
static int _convert_raid_splitmirrors(struct cmd_context *cmd, struct logical_volume *lv,
struct lvconvert_params *lp)
{
/* FIXME: split the splitmirrors section out of _lvconvert_raid and call it here. */
return _lvconvert_raid(lv, lp);
}
/*
* Convert a raid* LV to use a different raid level.
* lvconvert --type raid* LV
*/
static int _convert_raid_raid(struct cmd_context *cmd, struct logical_volume *lv,
struct lvconvert_params *lp)
{
return _lvconvert_raid(lv, lp);
}
/*
* Convert a raid* LV to a mirror LV.
* lvconvert --type mirror LV
*/
static int _convert_raid_mirror(struct cmd_context *cmd, struct logical_volume *lv,
struct lvconvert_params *lp)
{
return _lvconvert_raid(lv, lp);
}
/*
* Convert a raid* LV to a striped LV.
* lvconvert --type striped LV
*/
static int _convert_raid_striped(struct cmd_context *cmd, struct logical_volume *lv,
struct lvconvert_params *lp)
{
return _lvconvert_raid(lv, lp);
}
/*
* Convert a raid* LV to a linear LV.
* lvconvert --type linear LV
*/
static int _convert_raid_linear(struct cmd_context *cmd, struct logical_volume *lv,
struct lvconvert_params *lp)
{
return _lvconvert_raid(lv, lp);
}
/*
* Convert a striped/linear LV to a mirror LV.
* lvconvert --type mirror LV
*
* Required options:
* --mirrors Number
*
* Alternate syntax:
* This is equivalent to above when global/mirror_segtype_default="mirror".
* lvconvert --mirrors Number LV
*/
static int _convert_striped_mirror(struct cmd_context *cmd, struct logical_volume *lv,
struct lvconvert_params *lp)
{
return _lvconvert_mirrors(cmd, lv, lp);
}
/*
* Convert a striped/linear LV to a raid* LV.
* lvconvert --type raid* LV
*
* Required options:
* --mirrors Number
*
* Alternate syntax:
* This is equivalent to above when global/mirror_segtype_default="raid1".
* lvconvert --mirrors Number LV
*/
static int _convert_striped_raid(struct cmd_context *cmd, struct logical_volume *lv,
struct lvconvert_params *lp)
{
return _lvconvert_raid(lv, lp);
}
static int _convert_mirror(struct cmd_context *cmd, struct logical_volume *lv,
struct lvconvert_params *lp)
{
if (arg_is_set(cmd, mirrors_ARG))
return _convert_mirror_number(cmd, lv, lp);
if (arg_is_set(cmd, splitmirrors_ARG))
return _convert_mirror_splitmirrors(cmd, lv, lp);
if (arg_is_set(cmd, mirrorlog_ARG) || arg_is_set(cmd, corelog_ARG))
return _convert_mirror_log(cmd, lv, lp);
if (_linear_type_requested(lp->type_str))
return _convert_mirror_linear(cmd, lv, lp);
if (segtype_is_raid(lp->segtype))
return _convert_mirror_raid(cmd, lv, lp);
log_error("Unknown operation on mirror LV %s.", display_lvname(lv));
return 0;
}
static int _convert_raid(struct cmd_context *cmd, struct logical_volume *lv,
struct lvconvert_params *lp)
{
if (arg_is_set(cmd, mirrors_ARG))
return _convert_raid_number(cmd, lv, lp);
if (arg_is_set(cmd, splitmirrors_ARG))
return _convert_raid_splitmirrors(cmd, lv, lp);
if (segtype_is_raid(lp->segtype))
return _convert_raid_raid(cmd, lv, lp);
if (segtype_is_mirror(lp->segtype))
return _convert_raid_mirror(cmd, lv, lp);
if (!strcmp(lp->type_str, SEG_TYPE_NAME_STRIPED))
return _convert_raid_striped(cmd, lv, lp);
if (_linear_type_requested(lp->type_str))
return _convert_raid_linear(cmd, lv, lp);
log_error("Unknown operation on raid LV %s.", display_lvname(lv));
return 0;
}
static int _convert_striped(struct cmd_context *cmd, struct logical_volume *lv,
struct lvconvert_params *lp)
{
const char *mirrors_type = find_config_tree_str(cmd, global_mirror_segtype_default_CFG, NULL);
int raid_type = *lp->type_str && !strncmp(lp->type_str, "raid", 4);
if (!raid_type) {
if (!strcmp(lp->type_str, SEG_TYPE_NAME_MIRROR))
return _convert_striped_mirror(cmd, lv, lp);
/* --mirrors can mean --type mirror or --type raid1 depending on config setting. */
if (arg_is_set(cmd, mirrors_ARG) && mirrors_type && !strcmp(mirrors_type, SEG_TYPE_NAME_MIRROR))
return _convert_striped_mirror(cmd, lv, lp);
}
if (arg_is_set(cmd, mirrors_ARG) && mirrors_type && !strcmp(mirrors_type, SEG_TYPE_NAME_RAID1))
return _convert_striped_raid(cmd, lv, lp);
if (segtype_is_striped(lp->segtype) || segtype_is_raid(lp->segtype))
return _convert_striped_raid(cmd, lv, lp);
log_error("Unknown operation on striped or linear LV %s.", display_lvname(lv));
return 0;
}
static int _lvconvert_raid_types(struct cmd_context *cmd, struct logical_volume *lv,
struct lvconvert_params *lp)
{
struct lv_segment *seg = first_seg(lv);
int ret = 0;
/* If LV is inactive here, ensure it's not active elsewhere. */
if (!lockd_lv(cmd, lv, "ex", 0))
return_ECMD_FAILED;
/* Set up segtype either from type_str or else to match the existing one. */
if (!*lp->type_str)
lp->segtype = seg->segtype;
else if (!(lp->segtype = get_segtype_from_string(cmd, lp->type_str)))
goto_out;
if (!strcmp(lp->type_str, SEG_TYPE_NAME_MIRROR)) {
if (!lp->mirrors_supplied && !seg_is_raid1(seg)) {
log_error("Conversions to --type mirror require -m/--mirrors");
goto out;
}
}
/* lv->segtype can't be NULL */
if (activation() && lp->segtype->ops->target_present &&
!lp->segtype->ops->target_present(cmd, NULL, &lp->target_attr)) {
log_error("%s: Required device-mapper target(s) not "
"detected in your kernel.", lp->segtype->name);
goto out;
}
/* Process striping parameters */
/* FIXME This is incomplete */
if (_mirror_or_raid_type_requested(cmd, lp->type_str) || _raid0_type_requested(lp->type_str) ||
_striped_type_requested(lp->type_str) || lp->mirrorlog || lp->corelog) {
if (!arg_is_set(cmd, type_ARG))
lp->segtype = first_seg(lv)->segtype;
/* FIXME Handle +/- adjustments too? */
if (!get_stripe_params(cmd, lp->segtype, &lp->stripes, &lp->stripe_size, &lp->stripes_supplied, &lp->stripe_size_supplied))
goto_out;
if (_raid0_type_requested(lp->type_str) || _striped_type_requested(lp->type_str))
/* FIXME Shouldn't need to override get_stripe_params which defaults to 1 stripe (i.e. linear)! */
/* The default keeps existing number of stripes, handled inside the library code */
if (!arg_is_set(cmd, stripes_long_ARG))
lp->stripes = 0;
}
if (lv_is_cache(lv))
lv = seg_lv(first_seg(lv), 0);
if (lv_is_vdo_pool(lv))
return _lvconvert_raid_types(cmd, seg_lv(first_seg(lv), 0), lp);
if (lv_is_mirror(lv)) {
ret = _convert_mirror(cmd, lv, lp);
goto out;
}
if (lv_is_raid(lv)) {
ret = _convert_raid(cmd, lv, lp);
goto out;
}
/*
* FIXME: add lv_is_striped() and lv_is_linear()?
* This does not include raid0 which is caught by the test above.
* If operations differ between striped and linear, split this case.
*/
if (segtype_is_striped(seg->segtype) || segtype_is_linear(seg->segtype)) {
ret = _convert_striped(cmd, lv, lp);
goto out;
}
/*
* The intention is to explicitly check all cases above and never
* reach here, but this covers anything that was missed.
*/
log_error("Cannot convert LV %s.", display_lvname(lv));
out:
return ret ? ECMD_PROCESSED : ECMD_FAILED;
}
static int _lvconvert_splitsnapshot(struct cmd_context *cmd, struct logical_volume *cow)
{
struct volume_group *vg = cow->vg;
const char *cow_name = display_lvname(cow);
if (!lv_is_cow(cow)) {
log_error(INTERNAL_ERROR "Volume %s is not a COW.", cow_name);
return 0;
}
if (lv_is_virtual_origin(origin_from_cow(cow))) {
log_error("Unable to split off snapshot %s with virtual origin.", cow_name);
return 0;
}
if (vg_is_shared(vg)) {
/* FIXME: we need to create a lock for the new LV. */
log_error("Unable to split snapshots in VG with lock_type %s.", vg->lock_type);
return 0;
}
if (lv_is_active(cow)) {
if (!lv_check_not_in_use(cow, 1))
return_0;
if ((arg_count(cmd, force_ARG) == PROMPT) &&
!arg_count(cmd, yes_ARG) &&
lv_is_visible(cow) &&
lv_is_active(cow)) {
if (yes_no_prompt("Do you really want to split off active "
"logical volume %s? [y/n]: ", display_lvname(cow)) == 'n') {
log_error("Logical volume %s not split.", display_lvname(cow));
return 0;
}
}
}
log_verbose("Splitting snapshot %s from its origin.", display_lvname(cow));
if (!vg_remove_snapshot(cow))
return_0;
log_print_unless_silent("Logical Volume %s split from its origin.", display_lvname(cow));
return 1;
}
static int _lvconvert_split_and_keep_cachevol(struct cmd_context *cmd,
struct logical_volume *lv,
struct logical_volume *lv_fast)
{
char cvol_name[NAME_LEN];
struct lv_segment *cache_seg = first_seg(lv);
int cache_mode = cache_seg->cache_mode;
int direct_detach = 0;
if (!archive(lv->vg))
return_0;
log_debug("Detaching cachevol %s from LV %s.", display_lvname(lv_fast), display_lvname(lv));
/*
* Allow forcible detach without activating or flushing
* in case the cache is corrupt/damaged/invalid.
* This would generally be done to rescue data from
* the origin if the cache could not be repaired.
*/
if (!lv_is_active(lv) && arg_count(cmd, force_ARG))
direct_detach = 1;
/*
* Detaching a writeback cache generally requires flushing;
* doing otherwise can mean data loss/corruption.
* If the cache devices are missing, the cache can't be
* flushed, so require the user to use a force option to
* detach the cache in this case.
*/
if ((cache_mode != CACHE_MODE_WRITETHROUGH) && lv_is_partial(lv_fast)) {
if (!arg_count(cmd, force_ARG)) {
log_warn("WARNING: writeback cache on %s is not complete and cannot be flushed.", display_lvname(lv_fast));
log_warn("WARNING: cannot detach writeback cache from %s without --force.", display_lvname(lv));
log_error("Conversion aborted.");
return 0;
}
direct_detach = 1;
}
if (direct_detach) {
log_warn("WARNING: Data may be lost by detaching writeback cache without flushing.");
if (!arg_count(cmd, yes_ARG) &&
yes_no_prompt("Detach writeback cache %s from %s without flushing data?",
display_lvname(lv_fast), display_lvname(lv)) == 'n') {
log_error("Conversion aborted.");
return 0;
}
/* Switch internally to WRITETHROUGH which does not require flushing */
cache_seg->cache_mode = CACHE_MODE_WRITETHROUGH;
}
if (!lv_cache_remove(lv))
return_0;
/* Cut off suffix _cvol */
if (!drop_lvname_suffix(cvol_name, lv_fast->name, "cvol")) {
/* likely older instance of metadata */
log_debug("LV %s has no suffix for cachevol (skipping rename).",
display_lvname(lv_fast));
} else if (!lv_uniq_rename_update(cmd, lv_fast, cvol_name, 0))
return_0;
if (!vg_write(lv->vg) || !vg_commit(lv->vg))
return_0;
return 1;
}
static int _lvconvert_split_and_remove_cachevol(struct cmd_context *cmd,
struct logical_volume *lv,
struct logical_volume *lv_fast)
{
if (!_lvconvert_split_and_keep_cachevol(cmd, lv, lv_fast))
return_0;
if (lvremove_single(cmd, lv_fast, NULL) != ECMD_PROCESSED)
return_0;
return 1;
}
static int _lvconvert_split_and_keep_cachepool(struct cmd_context *cmd,
struct logical_volume *lv,
struct logical_volume *lv_fast)
{
char name[NAME_LEN];
if (!archive(lv->vg))
return_0;
log_debug("Detaching cachepool %s from LV %s.", display_lvname(lv_fast), display_lvname(lv));
if (vg_missing_pv_count(lv->vg)) {
log_error("Cannot split cache pool while PVs are missing, see --uncache to delete cache pool.");
return 0;
}
if (!lv_cache_remove(lv))
return_0;
/* Cut off suffix _cpool */
if (!drop_lvname_suffix(name, lv_fast->name, "cpool")) {
/* likely older instance of metadata */
log_debug("LV %s has no suffix for cachepool (skipping rename).",
display_lvname(lv_fast));
} else if (!lv_uniq_rename_update(cmd, lv_fast, name, 0))
return_0;
if (!vg_write(lv->vg) || !vg_commit(lv->vg))
return_0;
log_print_unless_silent("Logical volume %s is not cached and %s is unused.",
display_lvname(lv), display_lvname(lv_fast));
return 1;
}
static int _lvconvert_split_and_remove_cachepool(struct cmd_context *cmd,
struct logical_volume *lv,
struct logical_volume *cachepool_lv)
{
struct lv_segment *seg;
struct logical_volume *remove_lv;
seg = first_seg(lv);
if (lv_is_partial(seg_lv(seg, 0))) {
log_warn("WARNING: Cache origin logical volume %s is missing.",
display_lvname(seg_lv(seg, 0)));
remove_lv = lv; /* When origin is missing, drop everything */
} else
remove_lv = seg->pool_lv;
if (lv_is_partial(seg_lv(first_seg(seg->pool_lv), 0)))
log_warn("WARNING: Cache pool data logical volume %s is missing.",
display_lvname(seg_lv(first_seg(seg->pool_lv), 0)));
if (lv_is_partial(first_seg(seg->pool_lv)->metadata_lv))
log_warn("WARNING: Cache pool metadata logical volume %s is missing.",
display_lvname(first_seg(seg->pool_lv)->metadata_lv));
/* TODO: Check for failed cache as well to get prompting? */
if (lv_is_partial(lv)) {
if (first_seg(seg->pool_lv)->cache_mode != CACHE_MODE_WRITETHROUGH) {
if (!arg_count(cmd, force_ARG)) {
log_error("Conversion aborted.");
log_error("Cannot uncache writeback cache volume %s without --force.",
display_lvname(lv));
return 0;
}
log_warn("WARNING: Uncaching of partially missing %s cache volume %s might destroy your data.",
cache_mode_num_to_str(first_seg(seg->pool_lv)->cache_mode), display_lvname(lv));
}
if (!arg_count(cmd, yes_ARG) &&
yes_no_prompt("Do you really want to uncache %s with missing LVs? [y/n]: ",
display_lvname(lv)) == 'n') {
log_error("Conversion aborted.");
return 0;
}
}
if (lvremove_single(cmd, remove_lv, NULL) != ECMD_PROCESSED)
return_0;
if (remove_lv != lv)
log_print_unless_silent("Logical volume %s is not cached.", display_lvname(lv));
return 1;
}
static int _lvconvert_snapshot(struct cmd_context *cmd,
struct logical_volume *lv,
const char *origin_name)
{
struct logical_volume *org;
const char *snap_name;
uint32_t chunk_size;
int zero;
if (!(snap_name = dm_pool_strdup(lv->vg->vgmem, (display_lvname(lv) ? : ""))))
return_0;
if (strcmp(lv->name, origin_name) == 0) {
log_error("Unable to use %s as both snapshot and origin.", snap_name);
return 0;
}
chunk_size = arg_uint_value(cmd, chunksize_ARG, 8);
if (chunk_size < 8 || chunk_size > 1024 || !is_power_of_2(chunk_size)) {
log_error("Chunk size must be a power of 2 in the range 4K to 512K.");
return 0;
}
if (!cow_has_min_chunks(lv->vg, lv->le_count, chunk_size))
return_0;
log_verbose("Setting chunk size to %s.", display_size(cmd, chunk_size));
if (!(org = find_lv(lv->vg, origin_name))) {
log_error("Couldn't find origin volume %s in Volume group %s.",
origin_name, lv->vg->name);
return 0;
}
/*
* check_lv_rules() checks cannot be done via command definition
* rules because this LV is not processed by process_each_lv.
*/
/*
* check_lv_types() checks cannot be done via command definition
* LV_foo specification because this LV is not processed by process_each_lv.
*/
if (!validate_snapshot_origin(org))
return_0;
if (lv_component_is_active(org)) {
log_error("Cannot use logical volume %s with active component LVs for snapshot origin.",
display_lvname(org));
return 0;
}
log_warn("WARNING: Converting logical volume %s to snapshot exception store.",
snap_name);
log_warn("THIS WILL DESTROY CONTENT OF LOGICAL VOLUME (filesystem etc.)");
if (!arg_count(cmd, yes_ARG) &&
yes_no_prompt("Do you really want to convert %s? [y/n]: ",
snap_name) == 'n') {
log_error("Conversion aborted.");
return 0;
}
if (!deactivate_lv(cmd, lv)) {
log_error("Couldn't deactivate logical volume %s.", snap_name);
return 0;
}
if (first_seg(lv)->segtype->flags & SEG_CANNOT_BE_ZEROED)
zero = 0;
else
zero = arg_int_value(cmd, zero_ARG, 1);
if (!zero || !(lv->status & LVM_WRITE))
log_warn("WARNING: %s not zeroed.", snap_name);
else if (!activate_and_wipe_lv(lv, 0)) {
log_error("Aborting. Failed to wipe snapshot exception store.");
return 0;
}
if (!archive(lv->vg))
return_0;
if (!vg_add_snapshot(org, lv, NULL, org->le_count, chunk_size)) {
log_error("Couldn't create snapshot.");
return 0;
}
/* store vg on disk(s) */
if (!lv_update_and_reload(org))
return_0;
log_print_unless_silent("Logical volume %s converted to snapshot.", snap_name);
return 1;
}
static int _lvconvert_merge_old_snapshot(struct cmd_context *cmd,
struct logical_volume *lv,
struct logical_volume **lv_to_poll)
{
int merge_on_activate = 0;
struct logical_volume *origin;
struct lv_segment *snap_seg = find_snapshot(lv);
struct lvinfo info;
dm_percent_t snap_percent;
if (!snap_seg)
return_0;
if (!(origin = origin_from_cow(lv))) {
log_error(INTERNAL_ERROR "Cannot get origin from %s COW.",
display_lvname(lv));
return 0;
}
/* Check if merge is possible */
if (lv_is_merging_origin(origin)) {
log_error("Cannot merge snapshot %s into the origin %s "
"with merging snapshot %s.",
display_lvname(lv), display_lvname(origin),
display_lvname(snap_seg->lv));
return 0;
}
if (lv_is_external_origin(origin)) {
log_error("Cannot merge snapshot %s into "
"the read-only external origin %s.",
display_lvname(lv), display_lvname(origin));
return 0;
}
if (!(origin->status & LVM_WRITE)) {
log_error("Cannot merge snapshot %s into "
"the read-only origin %s. (Use lvchange -p rw).",
display_lvname(lv), display_lvname(origin));
return 0;
}
/* FIXME: test when snapshot is remotely active */
if (lv_info(cmd, lv, 0, &info, 1, 0)
&& info.exists && info.live_table &&
(!lv_snapshot_percent(lv, &snap_percent) ||
snap_percent == DM_PERCENT_INVALID)) {
log_error("Unable to merge invalidated snapshot LV %s.",
display_lvname(lv));
return 0;
}
if (snap_seg->segtype->ops->target_present &&
!snap_seg->segtype->ops->target_present(cmd, snap_seg, NULL)) {
log_error("Can't initialize snapshot merge. "
"Missing support in kernel?");
return 0;
}
if (!archive(lv->vg))
return_0;
/*
* Prevent merge with open device(s) as it would likely lead
* to application/filesystem failure. Merge on origin's next
* activation if either the origin or snapshot LV are currently
* open.
*
* FIXME testing open_count is racey; snapshot-merge target's
* constructor and DM should prevent appropriate devices from
* being open.
*/
if (lv_is_active(origin)) {
if (!lv_check_not_in_use(origin, 0)) {
log_print_unless_silent("Delaying merge since origin is open.");
merge_on_activate = 1;
} else if (!lv_check_not_in_use(lv, 0)) {
log_print_unless_silent("Delaying merge since snapshot is open.");
merge_on_activate = 1;
}
}
init_snapshot_merge(snap_seg, origin);
if (merge_on_activate) {
/* Store and commit vg but skip starting the merge */
if (!vg_write(lv->vg) || !vg_commit(lv->vg))
return_0;
} else {
/* Perform merge */
if (!lv_update_and_reload(origin))
return_0;
if (!lv_has_target_type(origin->vg->vgmem, origin, NULL,
TARGET_NAME_SNAPSHOT_MERGE)) {
/* Race during table reload prevented merging */
merge_on_activate = 1;
} else if (!lv_is_active(origin)) {
log_print_unless_silent("Conversion starts after activation.");
merge_on_activate = 1;
} else {
*lv_to_poll = origin;
}
}
if (merge_on_activate)
log_print_unless_silent("Merging of snapshot %s will occur on "
"next activation of %s.",
display_lvname(lv), display_lvname(origin));
else
log_print_unless_silent("Merging of volume %s started.",
display_lvname(lv));
return 1;
}
static int _lvconvert_merge_thin_snapshot(struct cmd_context *cmd,
struct logical_volume *lv)
{
int origin_is_active = 0;
struct lv_segment *snap_seg = first_seg(lv);
struct logical_volume *origin = snap_seg->origin;
if (!origin) {
log_error("%s is not a mergeable logical volume.",
display_lvname(lv));
return 0;
}
/* Check if merge is possible */
if (lv_is_merging_origin(origin)) {
log_error("Cannot merge snapshot %s into the origin %s "
"with merging snapshot %s.",
display_lvname(lv), display_lvname(origin),
display_lvname(find_snapshot(origin)->lv));
return 0;
}
if (lv_is_external_origin(origin)) {
if (!(origin = origin_from_cow(lv)))
log_error(INTERNAL_ERROR "%s is missing origin.",
display_lvname(lv));
else
log_error("%s is read-only external origin %s.",
display_lvname(lv), display_lvname(origin));
return 0;
}
if (lv_is_origin(origin)) {
log_error("Merging into the old snapshot origin %s is not supported.",
display_lvname(origin));
return 0;
}
if (!archive(lv->vg))
return_0;
/*
* Prevent merge with open device(s) as it would likely lead
* to application/filesystem failure. Merge on origin's next
* activation if either the origin or snapshot LV can't be
* deactivated.
*/
if (!deactivate_lv(cmd, lv))
log_print_unless_silent("Delaying merge since snapshot is open.");
else if ((origin_is_active = lv_is_active(origin)) &&
!deactivate_lv(cmd, origin))
log_print_unless_silent("Delaying merge since origin volume is open.");
else {
/*
* Both thin snapshot and origin are inactive,
* replace the origin LV with its snapshot LV.
*/
if (!thin_merge_finish(cmd, origin, lv))
return_0;
log_print_unless_silent("Volume %s replaced origin %s.",
display_lvname(origin), display_lvname(lv));
if (origin_is_active && !activate_lv(cmd, lv)) {
log_error("Failed to reactivate origin %s.",
display_lvname(lv));
return 0;
}
return 1;
}
init_snapshot_merge(snap_seg, origin);
/* Commit vg, merge will start with next activation */
if (!vg_write(lv->vg) || !vg_commit(lv->vg))
return_0;
log_print_unless_silent("Merging of thin snapshot %s will occur on "
"next activation of %s.",
display_lvname(lv), display_lvname(origin));
return 1;
}
static void _swap_lv_uuid(struct logical_volume *lv1, struct logical_volume *lv2)
{
union lvid lvid;
if (lv1 && lv2) {
lvid = lv1->lvid;
lv1->lvid = lv2->lvid;
lv2->lvid = lvid;
}
}
static int _lvconvert_thin_pool_repair(struct cmd_context *cmd,
struct logical_volume *pool_lv,
struct dm_list *pvh, int poolmetadataspare)
{
const char *thin_dump =
find_config_tree_str_allow_empty(cmd, global_thin_dump_executable_CFG, NULL);
int ret = 0, status;
int args = 0;
const char *argv[DEFAULT_MAX_EXEC_ARGS + 7] = { /* Max supported args */
find_config_tree_str_allow_empty(cmd, global_thin_repair_executable_CFG, NULL)
};
char *trans_id_str;
char meta_path[PATH_MAX];
char pms_path[PATH_MAX];
uint64_t trans_id;
struct logical_volume *pmslv;
struct logical_volume *mlv = first_seg(pool_lv)->metadata_lv;
struct pipe_data pdata;
FILE *f;
if (!argv[0] || !*argv[0]) {
log_error("Thin repair command is not configured. Repair is disabled.");
return 0;
}
if (thin_pool_is_active(pool_lv)) {
log_error("Cannot repair active pool %s. Use lvchange -an first.",
display_lvname(pool_lv));
return 0;
}
pmslv = pool_lv->vg->pool_metadata_spare_lv;
/* Check we have pool metadata spare LV */
if (!handle_pool_metadata_spare(pool_lv->vg, 0, pvh, 1))
return_0;
if (pmslv != pool_lv->vg->pool_metadata_spare_lv) {
if (!vg_write(pool_lv->vg) || !vg_commit(pool_lv->vg))
return_0;
pmslv = pool_lv->vg->pool_metadata_spare_lv;
}
if (dm_snprintf(meta_path, sizeof(meta_path), "%s%s/%s",
cmd->dev_dir, mlv->vg->name, mlv->name) < 0) {
log_error("Failed to build thin metadata path.");
return 0;
}
if (dm_snprintf(pms_path, sizeof(pms_path), "%s%s/%s",
cmd->dev_dir, pmslv->vg->name, pmslv->name) < 0) {
log_error("Failed to build pool metadata spare path.");
return 0;
}
if (!prepare_exec_args(cmd, argv, &args, global_thin_repair_options_CFG))
return_0;
argv[++args] = "-i";
argv[++args] = meta_path;
argv[++args] = "-o";
argv[++args] = pms_path;
if (!activate_lv(cmd, pmslv)) {
log_error("Cannot activate pool metadata spare volume %s.",
pmslv->name);
return 0;
}
if (!activate_lv(cmd, mlv)) {
log_error("Cannot activate thin pool metadata volume %s.",
mlv->name);
goto deactivate_pmslv;
}
if (!(ret = exec_cmd(cmd, (const char * const *)argv, &status, 1))) {
log_error("Repair of thin metadata volume of thin pool %s failed (status:%d). "
"Manual repair required!",
display_lvname(pool_lv), status);
goto deactivate_mlv;
}
/* Check matching transactionId when thin-pool is used by lvm2 (transactionId != 0) */
if (first_seg(pool_lv)->transaction_id && thin_dump && thin_dump[0]) {
argv[0] = thin_dump;
argv[1] = pms_path;
argv[2] = NULL;
if (!(f = pipe_open(cmd, argv, 0, &pdata)))
log_warn("WARNING: Cannot read output from %s %s.", thin_dump, pms_path);
else {
/*
* Scan only the 1st. line for transation id.
* Watch out, if the thin_dump format changes
*/
if (fgets(meta_path, sizeof(meta_path), f) &&
(trans_id_str = strstr(meta_path, "transaction=\"")) &&
(sscanf(trans_id_str + 13, FMTu64, &trans_id) == 1) &&
(trans_id != first_seg(pool_lv)->transaction_id) &&
((trans_id - 1) != first_seg(pool_lv)->transaction_id)) {
log_error("Transaction id " FMTu64 " from pool \"%s/%s\" "
"does not match repaired transaction id "
FMTu64 " from %s.",
first_seg(pool_lv)->transaction_id,
pool_lv->vg->name, pool_lv->name, trans_id,
pms_path);
ret = 0;
}
(void) pipe_close(&pdata); /* killing pipe */
}
}
deactivate_mlv:
if (!deactivate_lv(cmd, mlv)) {
log_error("Cannot deactivate thin pool metadata volume %s.",
display_lvname(mlv));
ret = 0;
}
deactivate_pmslv:
if (!deactivate_lv(cmd, pmslv)) {
log_error("Cannot deactivate pool metadata spare volume %s.",
display_lvname(pmslv));
ret = 0;
}
if (!ret)
return 0;
if (dm_snprintf(meta_path, sizeof(meta_path), "%s_meta%%d", pool_lv->name) < 0) {
log_error("Can't prepare new metadata name for %s.", pool_lv->name);
return 0;
}
if (!generate_lv_name(pool_lv->vg, meta_path, pms_path, sizeof(pms_path))) {
log_error("Can't generate new name for %s.", meta_path);
return 0;
}
if (pmslv == pool_lv->vg->pool_metadata_spare_lv) {
pool_lv->vg->pool_metadata_spare_lv = NULL;
pmslv->status &= ~POOL_METADATA_SPARE;
lv_set_visible(pmslv);
}
/* Try to allocate new pool metadata spare LV */
if (!handle_pool_metadata_spare(pool_lv->vg, 0, pvh, poolmetadataspare))
stack;
if (!detach_pool_metadata_lv(first_seg(pool_lv), &mlv))
return_0;
/* TODO: change default to skip */
lv_set_activation_skip(mlv, 1, arg_int_value(cmd, setactivationskip_ARG, 0));
mlv->status &= ~LVM_WRITE; /* read-only metadata backup */
/* Swap _pmspare and _tmeta name */
if (!swap_lv_identifiers(cmd, mlv, pmslv))
return_0;
if (!attach_pool_metadata_lv(first_seg(pool_lv), pmslv))
return_0;
/* Used _tmeta (now _pmspare) becomes _meta%d */
if (!lv_rename_update(cmd, mlv, pms_path, 0))
return_0;
/* Preserve UUID for _pmspare if possible */
_swap_lv_uuid(mlv, mlv->vg->pool_metadata_spare_lv);
if (!vg_write(pool_lv->vg) || !vg_commit(pool_lv->vg))
return_0;
log_warn("WARNING: LV %s holds a backup of the unrepaired metadata. Use lvremove when no longer required.",
display_lvname(mlv));
if (dm_list_size(&pool_lv->vg->pvs) > 1)
log_warn("WARNING: New metadata LV %s might use different PVs. Move it with pvmove if required.",
display_lvname(first_seg(pool_lv)->metadata_lv));
return 1;
}
/* TODO: lots of similar code with thinpool repair
* investigate possible better code sharing...
*/
static int _lvconvert_cache_repair(struct cmd_context *cmd,
struct logical_volume *cache_lv,
struct dm_list *pvh, int poolmetadataspare)
{
int ret = 0, status;
int args = 0;
const char *argv[DEFAULT_MAX_EXEC_ARGS + 7] = { /* Max supported args */
find_config_tree_str_allow_empty(cmd, global_cache_repair_executable_CFG, NULL)
};
char meta_path[PATH_MAX];
char pms_path[PATH_MAX];
struct logical_volume *pool_lv;
struct logical_volume *pmslv;
struct logical_volume *mlv;
if (lv_is_cache(cache_lv) && lv_is_cache_vol(first_seg(cache_lv)->pool_lv)) {
log_error("Manual repair required.");
return 0;
}
if (lv_is_active(cache_lv)) {
log_error("Only inactive cache can be repaired.");
return 0;
}
pool_lv = lv_is_cache_pool(cache_lv) ? cache_lv : first_seg(cache_lv)->pool_lv;
mlv = first_seg(pool_lv)->metadata_lv;
if (!argv[0] || !*argv[0]) {
log_error("Cache repair command is not configured. Repair is disabled.");
return 0; /* Checking disabled */
}
pmslv = cache_lv->vg->pool_metadata_spare_lv;
/* Check we have pool metadata spare LV */
if (!handle_pool_metadata_spare(cache_lv->vg, 0, pvh, 1))
return_0;
if (pmslv != cache_lv->vg->pool_metadata_spare_lv) {
if (!vg_write(cache_lv->vg) || !vg_commit(cache_lv->vg))
return_0;
pmslv = cache_lv->vg->pool_metadata_spare_lv;
}
if (dm_snprintf(meta_path, sizeof(meta_path), "%s%s/%s",
cmd->dev_dir, mlv->vg->name, mlv->name) < 0) {
log_error("Failed to build cache metadata path.");
return 0;
}
if (dm_snprintf(pms_path, sizeof(pms_path), "%s%s/%s",
cmd->dev_dir, pmslv->vg->name, pmslv->name) < 0) {
log_error("Failed to build pool metadata spare path.");
return 0;
}
if (!prepare_exec_args(cmd, argv, &args, global_cache_repair_options_CFG))
return_0;
argv[++args] = "-i";
argv[++args] = meta_path;
argv[++args] = "-o";
argv[++args] = pms_path;
if (!activate_lv(cmd, pmslv)) {
log_error("Cannot activate pool metadata spare volume %s.",
pmslv->name);
return 0;
}
if (!activate_lv(cmd, mlv)) {
log_error("Cannot activate cache pool metadata volume %s.",
mlv->name);
goto deactivate_pmslv;
}
if (!(ret = exec_cmd(cmd, (const char * const *)argv, &status, 1))) {
log_error("Repair of cache metadata volume of cache %s failed (status:%d). "
"Manual repair required!",
display_lvname(cache_lv), status);
goto deactivate_mlv;
}
/* TODO: any active validation of cache-pool metadata? */
deactivate_mlv:
if (!deactivate_lv(cmd, mlv)) {
log_error("Cannot deactivate pool metadata volume %s.",
display_lvname(mlv));
ret = 0;
}
deactivate_pmslv:
if (!deactivate_lv(cmd, pmslv)) {
log_error("Cannot deactivate pool metadata spare volume %s.",
display_lvname(pmslv));
ret = 0;
}
if (!ret)
return 0;
if (dm_snprintf(meta_path, sizeof(meta_path), "%s_meta%%d", pool_lv->name) < 0) {
log_error("Can't prepare new metadata name for %s.", display_lvname(pool_lv));
return 0;
}
if (!generate_lv_name(cache_lv->vg, meta_path, pms_path, sizeof(pms_path))) {
log_error("Can't generate new name for %s.", meta_path);
return 0;
}
if (pmslv == cache_lv->vg->pool_metadata_spare_lv) {
cache_lv->vg->pool_metadata_spare_lv = NULL;
pmslv->status &= ~POOL_METADATA_SPARE;
lv_set_visible(pmslv);
}
/* Try to allocate new pool metadata spare LV */
if (!handle_pool_metadata_spare(cache_lv->vg, 0, pvh, poolmetadataspare))
stack;
if (!detach_pool_metadata_lv(first_seg(pool_lv), &mlv))
return_0;
/* TODO: change default to skip */
lv_set_activation_skip(mlv, 1, arg_int_value(cmd, setactivationskip_ARG, 0));
mlv->status &= ~LVM_WRITE; /* read-only metadata backup */
/* Swap _pmspare and _cmeta name */
if (!swap_lv_identifiers(cmd, mlv, pmslv))
return_0;
if (!attach_pool_metadata_lv(first_seg(pool_lv), pmslv))
return_0;
/* Used _cmeta (now _pmspare) becomes _meta%d */
if (!lv_rename_update(cmd, mlv, pms_path, 0))
return_0;
/* Preserve UUID for _pmspare if possible */
_swap_lv_uuid(mlv, mlv->vg->pool_metadata_spare_lv);
if (!vg_write(cache_lv->vg) || !vg_commit(cache_lv->vg))
return_0;
/* FIXME: just as with thinpool repair - fix the warning
* where moving doesn't make any sense (same disk storage)
*/
log_warn("WARNING: If everything works, remove %s volume.",
display_lvname(mlv));
log_warn("WARNING: Use pvmove command to move %s on the best fitting PV.",
display_lvname(first_seg(pool_lv)->metadata_lv));
return 1;
}
static int _lvconvert_to_thin_with_external(struct cmd_context *cmd,
struct logical_volume *lv,
struct logical_volume *thinpool_lv)
{
struct volume_group *vg = lv->vg;
struct logical_volume *thin_lv;
const char *origin_name;
int lv_was_active;
struct lvcreate_params lvc = {
.activate = CHANGE_AEY,
.alloc = ALLOC_INHERIT,
.major = -1,
.minor = -1,
.suppress_zero_warn = 1, /* Suppress warning for this thin */
.permission = LVM_READ,
.pool_name = thinpool_lv->name,
.pvh = &vg->pvs,
.read_ahead = DM_READ_AHEAD_AUTO,
.stripes = 1,
.virtual_extents = lv->le_count,
.tags = DM_LIST_HEAD_INIT(lvc.tags),
};
if (!_raid_split_image_conversion(lv))
return_0;
if (lv == thinpool_lv) {
log_error("Can't use same LV %s for thin pool and thin volume.",
display_lvname(thinpool_lv));
return 0;
}
if ((origin_name = arg_str_value(cmd, originname_ARG, NULL)))
if (!validate_restricted_lvname_param(cmd, &vg->name, &origin_name))
return_0;
/*
* If NULL, an auto-generated 'lvol' name is used.
* If set, the lv create code checks the name isn't used.
*/
lvc.lv_name = origin_name;
if (vg_is_shared(vg)) {
/*
* FIXME: external origins don't work in lockd VGs.
* Prior to the lvconvert, there's a lock associated with
* the uuid of the external origin LV. After the convert,
* that uuid belongs to the new thin LV, and a new LV with
* a new uuid exists as the non-thin, readonly external LV.
* We'd need to remove the lock for the previous uuid
* (the new thin LV will have no lock), and create a new
* lock for the new LV uuid used by the external LV.
*/
log_error("Can't use lock_type %s LV as external origin.",
vg->lock_type);
return 0;
}
if (!thin_pool_supports_external_origin(first_seg(thinpool_lv), lv))
return_0;
if (!(lvc.segtype = get_segtype_from_string(cmd, SEG_TYPE_NAME_THIN)))
return_0;
lv_was_active = lv_is_active(lv);
/* When converted LV is not holding lock, but some other LV keeps it
* 'active' i.e. being an external origin for such LV, activate this LV
* so the reload of table can properly update device tree. */
if (!lv_was_active && (lv != lv_lock_holder(lv)) && !activate_lv(cmd, lv)) {
log_error("Failed to activate %s. Conversion cannot proceed."