1
0
mirror of git://sourceware.org/git/lvm2.git synced 2024-12-22 17:35:59 +03:00
lvm2/tools/lvchange.c
David Teigland 52c60b7e7b lvchange: make use of command definitions
Reorganize the lvchange code to take advantage of
the command definition, and remove the validation
that is done by the command definintion rules.
2017-02-13 08:20:10 -06:00

1393 lines
36 KiB
C

/*
* Copyright (C) 2001-2004 Sistina Software, Inc. All rights reserved.
* Copyright (C) 2004-2016 Red Hat, Inc. All rights reserved.
*
* This file is part of LVM2.
*
* This copyrighted material is made available to anyone wishing to use,
* modify, copy, or redistribute it subject to the terms and conditions
* of the GNU Lesser General Public License v.2.1.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "tools.h"
#include "memlock.h"
static int _lvchange_permission(struct cmd_context *cmd,
struct logical_volume *lv)
{
uint32_t lv_access;
struct lvinfo info;
lv_access = arg_uint_value(cmd, permission_ARG, 0);
if (!(lv_access & LVM_WRITE) && !(lv->status & LVM_WRITE)) {
/* Refresh if it's read-only in metadata but read-write in kernel */
if (lv_info(cmd, lv, 0, &info, 0, 0) && info.exists && !info.read_only) {
log_print_unless_silent("Logical volume %s is already read-only. Refreshing kernel state.",
display_lvname(lv));
return lv_refresh(cmd, lv);
}
log_error("Logical volume \"%s\" is already read only.",
display_lvname(lv));
return 0;
}
if ((lv_access & LVM_WRITE) && (lv->status & LVM_WRITE)) {
/* Refresh if it's read-write in metadata but read-only in kernel */
if (lv_info(cmd, lv, 0, &info, 0, 0) && info.exists && info.read_only) {
log_print_unless_silent("Logical volume %s is already writable. Refreshing kernel state.",
display_lvname(lv));
return lv_refresh(cmd, lv);
}
log_error("Logical volume %s is already writable.",
display_lvname(lv));
return 0;
}
if (lv_is_mirrored(lv) && vg_is_clustered(lv->vg) &&
lv_info(cmd, lv, 0, &info, 0, 0) && info.exists) {
log_error("Cannot change permissions of mirror %s while active.",
display_lvname(lv));
return 0;
}
if (lv_access & LVM_WRITE) {
lv->status |= LVM_WRITE;
log_verbose("Setting logical volume %s read/write.",
display_lvname(lv));
} else {
lv->status &= ~LVM_WRITE;
log_verbose("Setting logical volume %s read-only.",
display_lvname(lv));
}
if (!lv_update_and_reload(lv))
return_0;
return 1;
}
static int _lvchange_pool_update(struct cmd_context *cmd,
struct logical_volume *lv)
{
int update = 0;
unsigned val;
thin_discards_t discards;
if (arg_is_set(cmd, discards_ARG)) {
discards = (thin_discards_t) arg_uint_value(cmd, discards_ARG, THIN_DISCARDS_IGNORE);
if (discards != first_seg(lv)->discards) {
if (((discards == THIN_DISCARDS_IGNORE) ||
(first_seg(lv)->discards == THIN_DISCARDS_IGNORE)) &&
pool_is_active(lv))
log_error("Cannot change support for discards while pool volume %s is active.",
display_lvname(lv));
else {
first_seg(lv)->discards = discards;
update++;
}
} else
log_error("Logical volume %s already uses --discards %s.",
display_lvname(lv), get_pool_discards_name(discards));
}
if (arg_is_set(cmd, zero_ARG)) {
val = arg_uint_value(cmd, zero_ARG, 1);
if (val != first_seg(lv)->zero_new_blocks) {
first_seg(lv)->zero_new_blocks = val;
update++;
} else
log_error("Logical volume %s already %szero new blocks.",
display_lvname(lv), val ? "" : "does not ");
}
if (!update)
return 0;
if (!lv_update_and_reload(lv))
return_0;
return 1;
}
static int _lvchange_monitoring(struct cmd_context *cmd,
struct logical_volume *lv)
{
struct lvinfo info;
if (!lv_info(cmd, lv, lv_is_thin_pool(lv) ? 1 : 0,
&info, 0, 0) || !info.exists) {
log_error("Logical volume %s is not active.", display_lvname(lv));
return 0;
}
if ((dmeventd_monitor_mode() != DMEVENTD_MONITOR_IGNORE) &&
!monitor_dev_for_events(cmd, lv, 0, dmeventd_monitor_mode()))
return_0;
return 1;
}
static int _lvchange_background_polling(struct cmd_context *cmd,
struct logical_volume *lv)
{
struct lvinfo info;
if (!lv_info(cmd, lv, 0, &info, 0, 0) || !info.exists) {
log_error("Logical volume %s is not active.", display_lvname(lv));
return 0;
}
if (background_polling())
lv_spawn_background_polling(cmd, lv);
return 1;
}
static int _lvchange_activate(struct cmd_context *cmd, struct logical_volume *lv)
{
activation_change_t activate;
activate = (activation_change_t) arg_uint_value(cmd, activate_ARG, CHANGE_AY);
/*
* We can get here in the odd case where an LV is already active in
* a foreign VG, which allows the VG to be accessed by lvchange -a
* so the LV can be deactivated.
*/
if (lv->vg->system_id && lv->vg->system_id[0] &&
cmd->system_id && cmd->system_id[0] &&
strcmp(lv->vg->system_id, cmd->system_id) &&
is_change_activating(activate)) {
log_error("Cannot activate LVs in a foreign VG.");
return ECMD_FAILED;
}
if (lv_activation_skip(lv, activate, arg_is_set(cmd, ignoreactivationskip_ARG)))
return 1;
if (lv_is_cow(lv) && !lv_is_virtual_origin(origin_from_cow(lv)))
lv = origin_from_cow(lv);
if ((activate == CHANGE_AAY) &&
!lv_passes_auto_activation_filter(cmd, lv))
return 1;
if (!lv_change_activate(cmd, lv, activate))
return_0;
/*
* FIXME: lvchange should defer background polling in a similar
* way as vgchange does. First activate all relevant LVs
* initate background polling later (for all actually
* activated LVs). So we can avoid duplicate background
* polling for pvmove (2 or more locked LVs on single pvmove
* LV)
*/
if (background_polling() && is_change_activating(activate) &&
(lv_is_pvmove(lv) || lv_is_locked(lv) || lv_is_converting(lv) ||
lv_is_merging(lv)))
lv_spawn_background_polling(cmd, lv);
return 1;
}
static int detach_metadata_devices(struct lv_segment *seg, struct dm_list *list)
{
uint32_t s;
uint32_t num_meta_lvs;
struct lv_list *lvl;
num_meta_lvs = seg_is_raid(seg) ? seg->area_count : !!seg->log_lv;
if (!num_meta_lvs)
return_0;
if (!(lvl = dm_pool_alloc(seg->lv->vg->vgmem, sizeof(*lvl) * num_meta_lvs)))
return_0;
if (seg_is_raid_with_meta(seg)) {
for (s = 0; s < seg->area_count; s++) {
if (!seg_metalv(seg, s))
return_0; /* Trap this future possibility */
lvl[s].lv = seg_metalv(seg, s);
lv_set_visible(lvl[s].lv);
dm_list_add(list, &lvl[s].list);
}
return 1;
}
lvl[0].lv = detach_mirror_log(seg);
dm_list_add(list, &lvl[0].list);
return 1;
}
static int attach_metadata_devices(struct lv_segment *seg, struct dm_list *list)
{
struct lv_list *lvl;
if (seg_is_raid(seg)) {
dm_list_iterate_items(lvl, list)
lv_set_hidden(lvl->lv);
return 1;
}
dm_list_iterate_items(lvl, list)
break; /* get first item */
if (!attach_mirror_log(seg, lvl->lv))
return_0;
return 1;
}
static int _reactivate_lv(struct logical_volume *lv,
int active, int exclusive)
{
struct cmd_context *cmd = lv->vg->cmd;
if (!active)
return 1;
if (exclusive)
return activate_lv_excl_local(cmd, lv);
return activate_lv(cmd, lv);
}
/*
* lvchange_resync
* @cmd
* @lv
*
* Force a mirror or RAID array to undergo a complete initializing resync.
*/
static int _lvchange_resync(struct cmd_context *cmd, struct logical_volume *lv)
{
int active = 0;
int exclusive = 0;
int monitored;
struct lv_segment *seg = first_seg(lv);
struct dm_list device_list;
struct lv_list *lvl;
dm_list_init(&device_list);
if (lv_is_active_locally(lv)) {
if (!lv_check_not_in_use(lv, 1)) {
log_error("Can't resync open logical volume %s.",
display_lvname(lv));
return 0;
}
if (!arg_is_set(cmd, yes_ARG) &&
yes_no_prompt("Do you really want to deactivate "
"logical volume %s to resync it? [y/n]: ",
display_lvname(lv)) == 'n') {
log_error("Logical volume %s not resynced.",
display_lvname(lv));
return 0;
}
active = 1;
if (lv_is_active_exclusive_locally(lv))
exclusive = 1;
}
if (seg_is_raid(seg) && active && !exclusive) {
log_error("RAID logical volume %s cannot be active remotely.",
display_lvname(lv));
return 0;
}
/* Activate exclusively to ensure no nodes still have LV active */
monitored = dmeventd_monitor_mode();
if (monitored != DMEVENTD_MONITOR_IGNORE)
init_dmeventd_monitor(0);
if (!deactivate_lv(cmd, lv)) {
log_error("Unable to deactivate %s for resync.", display_lvname(lv));
return 0;
}
if (vg_is_clustered(lv->vg) && lv_is_active(lv)) {
log_error("Can't get exclusive access to clustered volume %s.",
display_lvname(lv));
return 0;
}
if (monitored != DMEVENTD_MONITOR_IGNORE)
init_dmeventd_monitor(monitored);
init_mirror_in_sync(0);
log_very_verbose("Starting resync of %s%s%s%s %s.",
(active) ? "active " : "",
vg_is_clustered(lv->vg) ? "clustered " : "",
(seg->log_lv) ? "disk-logged " :
seg_is_raid(seg) ? "" : "core-logged ",
lvseg_name(seg), display_lvname(lv));
/*
* If this mirror has a core log (i.e. !seg->log_lv),
* then simply deactivating/activating will cause
* it to reset the sync status. We only need to
* worry about persistent logs.
*/
if (!seg_is_raid(seg) && !seg->log_lv) {
if (lv_is_not_synced(lv)) {
lv->status &= ~LV_NOTSYNCED;
log_very_verbose("Updating logical volume %s on disk(s).",
display_lvname(lv));
if (!vg_write(lv->vg) || !vg_commit(lv->vg)) {
log_error("Failed to update metadata on disk.");
return 0;
}
}
if (!_reactivate_lv(lv, active, exclusive)) {
log_error("Failed to reactivate %s to resynchronize mirror.",
display_lvname(lv));
return 0;
}
return 1;
}
/*
* Now we handle mirrors with log devices
*/
lv->status &= ~LV_NOTSYNCED;
/* Separate mirror log or metadata devices so we can clear them */
if (!detach_metadata_devices(seg, &device_list)) {
log_error("Failed to clear %s %s for %s.",
lvseg_name(seg), seg_is_raid(seg) ?
"metadata area" : "mirror log", display_lvname(lv));
return 0;
}
if (!vg_write(lv->vg) || !vg_commit(lv->vg)) {
log_error("Failed to update intermediate VG metadata on disk.");
if (!_reactivate_lv(lv, active, exclusive))
stack;
return 0;
}
/* No backup for intermediate metadata, so just unlock memory */
memlock_unlock(lv->vg->cmd);
dm_list_iterate_items(lvl, &device_list) {
if (!activate_lv_excl_local(cmd, lvl->lv)) {
log_error("Unable to activate %s for %s clearing.",
display_lvname(lvl->lv), (seg_is_raid(seg)) ?
"metadata area" : "mirror log");
return 0;
}
if (!wipe_lv(lvl->lv, (struct wipe_params)
{ .do_zero = 1, .zero_sectors = lvl->lv->size })) {
log_error("Unable to reset sync status for %s.",
display_lvname(lv));
if (!deactivate_lv(cmd, lvl->lv))
log_error("Failed to deactivate log LV after "
"wiping failed");
return 0;
}
if (!deactivate_lv(cmd, lvl->lv)) {
log_error("Unable to deactivate %s LV %s "
"after wiping for resync.",
(seg_is_raid(seg)) ? "metadata" : "log",
display_lvname(lvl->lv));
return 0;
}
}
/* Wait until devices are away */
if (!sync_local_dev_names(lv->vg->cmd)) {
log_error("Failed to sync local devices after updating %s.",
display_lvname(lv));
return 0;
}
/* Put metadata sub-LVs back in place */
if (!attach_metadata_devices(seg, &device_list)) {
log_error("Failed to reattach %s device after clearing.",
(seg_is_raid(seg)) ? "metadata" : "log");
return 0;
}
if (!vg_write(lv->vg) || !vg_commit(lv->vg)) {
log_error("Failed to update metadata on disk for %s.",
display_lvname(lv));
return 0;
}
if (!_reactivate_lv(lv, active, exclusive)) {
backup(lv->vg);
log_error("Failed to reactivate %s after resync.",
display_lvname(lv));
return 0;
}
backup(lv->vg);
return 1;
}
static int _lvchange_alloc(struct cmd_context *cmd, struct logical_volume *lv)
{
int want_contiguous = arg_int_value(cmd, contiguous_ARG, 0);
alloc_policy_t alloc = (alloc_policy_t)
arg_uint_value(cmd, alloc_ARG, (want_contiguous)
? ALLOC_CONTIGUOUS : ALLOC_INHERIT);
if (alloc == lv->alloc) {
log_error("Allocation policy of logical volume %s is already %s.",
display_lvname(lv), get_alloc_string(alloc));
return 0;
}
lv->alloc = alloc;
/* FIXME If contiguous, check existing extents already are */
log_verbose("Setting contiguous allocation policy for %s to %s.",
display_lvname(lv), get_alloc_string(alloc));
log_very_verbose("Updating logical volume %s on disk(s).", display_lvname(lv));
/* No need to suspend LV for this change */
if (!vg_write(lv->vg) || !vg_commit(lv->vg))
return_0;
backup(lv->vg);
return 1;
}
static int _lvchange_errorwhenfull(struct cmd_context *cmd,
struct logical_volume *lv)
{
unsigned ewf = arg_int_value(cmd, errorwhenfull_ARG, 0);
if (ewf == lv_is_error_when_full(lv)) {
log_error("Error when full is already %sset for %s.",
(ewf) ? "" : "un", display_lvname(lv));
return 0;
}
if (ewf)
lv->status |= LV_ERROR_WHEN_FULL;
else
lv->status &= ~LV_ERROR_WHEN_FULL;
if (!lv_update_and_reload(lv))
return_0;
return 1;
}
static int _lvchange_readahead(struct cmd_context *cmd,
struct logical_volume *lv)
{
unsigned read_ahead = 0;
unsigned pagesize = (unsigned) lvm_getpagesize() >> SECTOR_SHIFT;
read_ahead = arg_uint_value(cmd, readahead_ARG, 0);
if (read_ahead != DM_READ_AHEAD_AUTO &&
(lv->vg->fid->fmt->features & FMT_RESTRICTED_READAHEAD) &&
(read_ahead < 2 || read_ahead > 120)) {
log_error("Metadata only supports readahead values between 2 and 120.");
return 0;
}
if (read_ahead != DM_READ_AHEAD_AUTO &&
read_ahead != DM_READ_AHEAD_NONE && read_ahead % pagesize) {
if (read_ahead < pagesize)
read_ahead = pagesize;
else
read_ahead = (read_ahead / pagesize) * pagesize;
log_warn("WARNING: Overriding readahead to %u sectors, a multiple "
"of %uK page size.", read_ahead, pagesize >> 1);
}
if (lv->read_ahead == read_ahead) {
if (read_ahead == DM_READ_AHEAD_AUTO)
log_error("Read ahead is already auto for %s.",
display_lvname(lv));
else
log_error("Read ahead is already %u for %s.",
read_ahead, display_lvname(lv));
return 0;
}
lv->read_ahead = read_ahead;
log_verbose("Setting read ahead to %u for %s.",
read_ahead, display_lvname(lv));
if (!lv_update_and_reload(lv))
return_0;
return 1;
}
static int _lvchange_persistent(struct cmd_context *cmd,
struct logical_volume *lv)
{
enum activation_change activate = CHANGE_AN;
/* The LV lock in lvmlockd should remain as it is. */
cmd->lockd_lv_disable = 1;
if (!get_and_validate_major_minor(cmd, lv->vg->fid->fmt,
&lv->major, &lv->minor))
return_0;
if (lv->minor == -1) {
if (!(lv->status & FIXED_MINOR)) {
log_error("Minor number is already not persistent for %s.",
display_lvname(lv));
return 0;
}
lv->status &= ~FIXED_MINOR;
log_verbose("Disabling persistent device number for %s.",
display_lvname(lv));
} else {
if (lv_is_active(lv)) {
if (!arg_is_set(cmd, force_ARG) &&
!arg_is_set(cmd, yes_ARG) &&
yes_no_prompt("Logical volume %s will be "
"deactivated temporarily. "
"Continue? [y/n]: ",
display_lvname(lv)) == 'n') {
log_error("%s device number not changed.",
display_lvname(lv));
return 0;
}
activate = CHANGE_AEY;
if (vg_is_clustered(lv->vg) &&
locking_is_clustered() &&
locking_supports_remote_queries() &&
!lv_is_active_exclusive_locally(lv)) {
/* Reliable reactivate only locally */
log_print_unless_silent("Remotely active LV %s needs "
"individual reactivation.",
display_lvname(lv));
activate = CHANGE_ALY;
}
}
/* Ensuring LV is not active */
if (!deactivate_lv(cmd, lv)) {
log_error("Cannot deactivate %s.", display_lvname(lv));
return 0;
}
lv->status |= FIXED_MINOR;
log_verbose("Setting persistent device number to (%d, %d) for %s.",
lv->major, lv->minor, display_lvname(lv));
}
log_very_verbose("Updating logical volume %s on disk(s).",
display_lvname(lv));
if (!vg_write(lv->vg) || !vg_commit(lv->vg))
return_0;
if (activate != CHANGE_AN) {
log_verbose("Re-activating logical volume %s.", display_lvname(lv));
if (!lv_active_change(cmd, lv, activate, 0)) {
log_error("%s: reactivation failed.", display_lvname(lv));
backup(lv->vg);
return 0;
}
}
backup(lv->vg);
return 1;
}
static int _lvchange_cache(struct cmd_context *cmd, struct logical_volume *lv)
{
cache_mode_t mode;
const char *name;
struct dm_config_tree *settings = NULL;
struct lv_segment *pool_seg = first_seg(lv);
int r = 0, is_clean;
if (lv_is_cache(lv))
pool_seg = first_seg(pool_seg->pool_lv);
if (!get_cache_params(cmd, &mode, &name, &settings))
goto_out;
if ((mode != CACHE_MODE_UNDEFINED) &&
(mode != pool_seg->cache_mode) &&
lv_is_cache(lv)) {
if (!lv_cache_wait_for_clean(lv, &is_clean))
return_0;
if (!is_clean) {
log_error("Cache %s is not clean, refusing to switch cache mode.",
display_lvname(lv));
return 0;
}
}
if (mode && !cache_set_cache_mode(first_seg(lv), mode))
goto_out;
if ((name || settings) &&
!cache_set_policy(first_seg(lv), name, settings))
goto_out;
if (!lv_update_and_reload(lv))
goto_out;
r = 1;
out:
if (settings)
dm_config_destroy(settings);
return r;
}
static int _lvchange_tag(struct cmd_context *cmd, struct logical_volume *lv, int arg)
{
if (!change_tag(cmd, NULL, lv, NULL, arg))
return_0;
log_very_verbose("Updating logical volume %s on disk(s).", display_lvname(lv));
/* No need to suspend LV for this change */
if (!vg_write(lv->vg) || !vg_commit(lv->vg))
return_0;
backup(lv->vg);
return 1;
}
static int _lvchange_rebuild(struct logical_volume *lv)
{
int pv_count, i = 0;
char **rebuild_pvs;
const char *tmp_str;
struct dm_list *rebuild_pvh = NULL;
struct arg_value_group_list *group;
struct volume_group *vg = lv->vg;
struct cmd_context *cmd = vg->cmd;
if (!(pv_count = arg_count(cmd, rebuild_ARG))) {
log_error("No --rebuild found!");
return 0;
}
if (!arg_is_set(cmd, yes_ARG) &&
yes_no_prompt("Do you really want to rebuild %u PVs "
"of logical volume %s [y/n]: ",
pv_count, display_lvname(lv)) == 'n') {
log_error("Logical volume %s not rebuild.",
display_lvname(lv));
return 0;
}
/* rebuild can be specified more than once */
if (!(rebuild_pvs = dm_pool_alloc(vg->vgmem, sizeof(char *) * pv_count)))
return_0;
dm_list_iterate_items(group, &cmd->arg_value_groups) {
if (!grouped_arg_is_set(group->arg_values, rebuild_ARG))
continue;
if (!(tmp_str = grouped_arg_str_value(group->arg_values,
rebuild_ARG, NULL)))
return_0;
if (!(rebuild_pvs[i++] = dm_pool_strdup(cmd->mem, tmp_str)))
return_0;
}
if (!(rebuild_pvh = create_pv_list(cmd->mem, vg,
pv_count, rebuild_pvs, 0)))
return_ECMD_FAILED;
/* Rebuild PVs listed on @rebuild_pvh */
return lv_raid_rebuild(lv, rebuild_pvh);
}
static int _lvchange_writemostly(struct logical_volume *lv)
{
int s, pv_count, i = 0;
char **pv_names;
const char *tmp_str;
size_t tmp_str_len;
struct pv_list *pvl;
struct arg_value_group_list *group;
struct cmd_context *cmd = lv->vg->cmd;
struct lv_segment *raid_seg = first_seg(lv);
if (arg_is_set(cmd, writebehind_ARG))
raid_seg->writebehind = arg_uint_value(cmd, writebehind_ARG, 0);
if ((pv_count = arg_count(cmd, writemostly_ARG))) {
/* writemostly can be specified more than once */
pv_names = dm_pool_alloc(lv->vg->vgmem, sizeof(char *) * pv_count);
if (!pv_names)
return_0;
dm_list_iterate_items(group, &cmd->arg_value_groups) {
if (!grouped_arg_is_set(group->arg_values,
writemostly_ARG))
continue;
if (!(tmp_str = grouped_arg_str_value(group->arg_values,
writemostly_ARG,
NULL)))
return_0;
/*
* Writemostly PV specifications can be:
* <PV> - Turn on writemostly
* <PV>:t - Toggle writemostly
* <PV>:n - Turn off writemostly
* <PV>:y - Turn on writemostly
*
* We allocate strlen + 3 to add our own ':{t|n|y}' if
* not present plus the trailing '\0'.
*/
tmp_str_len = strlen(tmp_str);
if (!(pv_names[i] = dm_pool_zalloc(lv->vg->vgmem, tmp_str_len + 3)))
return_0;
if ((tmp_str_len < 3) ||
(tmp_str[tmp_str_len - 2] != ':'))
/* Default to 'y' if no mode specified */
sprintf(pv_names[i], "%s:y", tmp_str);
else
sprintf(pv_names[i], "%s", tmp_str);
i++;
}
for (i = 0; i < pv_count; i++)
pv_names[i][strlen(pv_names[i]) - 2] = '\0';
for (i = 0; i < pv_count; i++) {
if (!(pvl = find_pv_in_vg(lv->vg, pv_names[i]))) {
log_error("%s not found in volume group, %s",
pv_names[i], lv->vg->name);
return 0;
}
for (s = 0; s < (int) raid_seg->area_count; s++) {
/*
* We don't bother checking the metadata area,
* since writemostly only affects the data areas.
*/
if (seg_type(raid_seg, s) == AREA_UNASSIGNED)
continue;
if (lv_is_on_pv(seg_lv(raid_seg, s), pvl->pv)) {
if (pv_names[i][strlen(pv_names[i]) + 1] == 'y')
seg_lv(raid_seg, s)->status |=
LV_WRITEMOSTLY;
else if (pv_names[i][strlen(pv_names[i]) + 1] == 'n')
seg_lv(raid_seg, s)->status &=
~LV_WRITEMOSTLY;
else if (pv_names[i][strlen(pv_names[i]) + 1] == 't')
seg_lv(raid_seg, s)->status ^=
LV_WRITEMOSTLY;
else
return_0;
}
}
}
}
if (!lv_update_and_reload(lv))
return_0;
return 1;
}
static int _lvchange_recovery_rate(struct logical_volume *lv)
{
struct cmd_context *cmd = lv->vg->cmd;
struct lv_segment *raid_seg = first_seg(lv);
if (arg_is_set(cmd, minrecoveryrate_ARG))
raid_seg->min_recovery_rate =
arg_uint_value(cmd, minrecoveryrate_ARG, 0) / 2;
if (arg_is_set(cmd, maxrecoveryrate_ARG))
raid_seg->max_recovery_rate =
arg_uint_value(cmd, maxrecoveryrate_ARG, 0) / 2;
if (raid_seg->max_recovery_rate &&
(raid_seg->max_recovery_rate < raid_seg->min_recovery_rate)) {
log_error("Minimum recovery rate cannot be higher than maximum.");
return 0;
}
if (!lv_update_and_reload(lv))
return_0;
return 1;
}
static int _lvchange_profile(struct logical_volume *lv)
{
const char *old_profile_name, *new_profile_name;
struct profile *new_profile;
old_profile_name = lv->profile ? lv->profile->name : "(inherited)";
if (arg_is_set(lv->vg->cmd, detachprofile_ARG)) {
new_profile_name = "(inherited)";
lv->profile = NULL;
} else {
if (arg_is_set(lv->vg->cmd, metadataprofile_ARG))
new_profile_name = arg_str_value(lv->vg->cmd, metadataprofile_ARG, NULL);
else
new_profile_name = arg_str_value(lv->vg->cmd, profile_ARG, NULL);
if (!(new_profile = add_profile(lv->vg->cmd, new_profile_name, CONFIG_PROFILE_METADATA)))
return_0;
lv->profile = new_profile;
}
log_verbose("Changing configuration profile for LV %s: %s -> %s.",
display_lvname(lv), old_profile_name, new_profile_name);
if (!vg_write(lv->vg) || !vg_commit(lv->vg))
return_0;
backup(lv->vg);
return 1;
}
static int _lvchange_activation_skip(struct logical_volume *lv)
{
int skip = arg_int_value(lv->vg->cmd, setactivationskip_ARG, 0);
lv_set_activation_skip(lv, 1, skip);
log_verbose("Changing activation skip flag to %s for LV %s.",
display_lvname(lv), skip ? "enabled" : "disabled");
if (!vg_write(lv->vg) || !vg_commit(lv->vg))
return_0;
backup(lv->vg);
return 1;
}
/*
* For each lvchange command definintion:
*
* lvchange_foo_cmd(cmd, argc, argv);
* . set cmd fields that apply to "foo"
* . set any other things that affect behavior of process_each
* . process_each_lv(_lvchange_foo_single);
*
* _lvchange_foo_single(lv);
* . _lvchange_foo(lv);
* . (or all the code could live in the _single fn)
*/
static int _lvchange_properties_single(struct cmd_context *cmd,
struct logical_volume *lv,
struct processing_handle *handle)
{
int doit = 0, docmds = 0;
int i, opt_enum;
/*
* If a persistent lv lock already exists from activation
* (with the needed mode or higher), this will be a no-op.
* Otherwise, the lv lock will be taken as non-persistent
* and released when this command exits.
*/
if (!lockd_lv(cmd, lv, "ex", 0)) {
stack;
return ECMD_FAILED;
}
for (i = 0; i < cmd->command->ro_count; i++) {
opt_enum = cmd->command->required_opt_args[i].opt;
if (!arg_is_set(cmd, opt_enum))
continue;
if (!archive(lv->vg))
return_ECMD_FAILED;
docmds++;
switch (opt_enum) {
case permission_ARG:
doit += _lvchange_permission(cmd, lv);
break;
case alloc_ARG:
case contiguous_ARG:
doit += _lvchange_alloc(cmd, lv);
break;
case errorwhenfull_ARG:
doit += _lvchange_errorwhenfull(cmd, lv);
break;
case readahead_ARG:
doit += _lvchange_readahead(cmd, lv);
break;
case persistent_ARG:
doit += _lvchange_persistent(cmd, lv);
break;
case discards_ARG:
case zero_ARG:
doit += _lvchange_pool_update(cmd, lv);
break;
case addtag_ARG:
case deltag_ARG:
doit += _lvchange_tag(cmd, lv, opt_enum);
break;
case writemostly_ARG:
case writebehind_ARG:
doit += _lvchange_writemostly(lv);
break;
case minrecoveryrate_ARG:
case maxrecoveryrate_ARG:
doit += _lvchange_recovery_rate(lv);
break;
case profile_ARG:
case metadataprofile_ARG:
case detachprofile_ARG:
doit += _lvchange_profile(lv);
break;
case setactivationskip_ARG:
doit += _lvchange_activation_skip(lv);
break;
case cachemode_ARG:
case cachepolicy_ARG:
case cachesettings_ARG:
doit += _lvchange_cache(cmd, lv);
break;
default:
log_error(INTERNAL_ERROR "Failed to check for option %s",
arg_long_option_name(i));
}
}
if (doit)
log_print_unless_silent("Logical volume %s changed.", display_lvname(lv));
if (doit != docmds)
return_ECMD_FAILED;
return ECMD_PROCESSED;
}
static int _lvchange_properties_check(struct cmd_context *cmd,
struct logical_volume *lv,
struct processing_handle *handle,
int lv_is_named_arg)
{
if (!lv_is_visible(lv)) {
if (lv_is_named_arg)
log_error("Operation not permitted (%s %d) on hidden LV %s.",
cmd->command->command_line_id, cmd->command->command_line_enum,
display_lvname(lv));
return 0;
}
if (vg_is_clustered(lv->vg) && lv_is_cache_origin(lv) && lv_is_raid(lv)) {
log_error("Unable to change internal LV %s directly in a cluster.",
display_lvname(lv));
return 0;
}
return 1;
}
int lvchange_properties_cmd(struct cmd_context *cmd, int argc, char **argv)
{
int ret;
/*
* A command def rule allows only some options when LV is partial,
* so handles_missing_pvs will only affect those.
*/
cmd->handles_missing_pvs = 1;
ret = process_each_lv(cmd, argc, argv, NULL, NULL, READ_FOR_UPDATE,
NULL, &_lvchange_properties_check, &_lvchange_properties_single);
if (ret != ECMD_PROCESSED)
return ret;
/*
* Unfortunately, lvchange has previously allowed changing an LV
* property and changing LV activation in a single command. This was
* not a good idea because the behavior/results are hard to predict and
* not possible to sensibly describe. It's also unnecessary. So, this
* is here for the sake of compatibility.
*
* This is extremely ugly; activation should always be done separately.
* This is not the full-featured lvchange capability, just the basic
* (the advanced activate options are not provided.)
*
* FIXME: wrap this in a config setting that we can disable by default
* to phase this out?
*/
if (arg_is_set(cmd, activate_ARG)) {
log_warn("WARNING: Combining activation change with other commands is not advised.");
ret = lvchange_activate_cmd(cmd, argc, argv);
}
return ret;
}
static int _lvchange_activate_single(struct cmd_context *cmd,
struct logical_volume *lv,
struct processing_handle *handle)
{
struct logical_volume *origin;
char snaps_msg[128];
/* FIXME: untangle the proper logic for cow / sparse / virtual origin */
/* If LV is sparse, activate origin instead */
if (lv_is_cow(lv) && lv_is_virtual_origin(origin = origin_from_cow(lv)))
lv = origin;
if (lv_is_cow(lv)) {
origin = origin_from_cow(lv);
if (origin->origin_count < 2)
snaps_msg[0] = '\0';
else if (dm_snprintf(snaps_msg, sizeof(snaps_msg),
" and %u other snapshot(s)",
origin->origin_count - 1) < 0) {
log_error("Failed to prepare message.");
return ECMD_FAILED;
}
if (!arg_is_set(cmd, yes_ARG) &&
(yes_no_prompt("Change of snapshot %s will also change its "
"origin %s%s. Proceed? [y/n]: ",
display_lvname(lv), display_lvname(origin),
snaps_msg) == 'n')) {
log_error("Logical volume %s not changed.", display_lvname(lv));
return ECMD_FAILED;
}
}
/*
* If --sysinit -aay is used and at the same time lvmetad is used,
* we want to rely on autoactivation to take place. Also, we
* need to take special care here as lvmetad service does
* not neet to be running at this moment yet - it could be
* just too early during system initialization time.
*/
if (arg_is_set(cmd, sysinit_ARG) && (arg_uint_value(cmd, activate_ARG, 0) == CHANGE_AAY)) {
if (lvmetad_used()) {
log_warn("WARNING: lvmetad is active, skipping direct activation during sysinit.");
return ECMD_PROCESSED;
}
}
if (!_lvchange_activate(cmd, lv))
return_ECMD_FAILED;
return ECMD_PROCESSED;
}
static int _lvchange_activate_check(struct cmd_context *cmd,
struct logical_volume *lv,
struct processing_handle *handle,
int lv_is_named_arg)
{
if (!lv_is_visible(lv)) {
if (lv_is_named_arg)
log_error("Operation not permitted (%s %d) on hidden LV %s.",
cmd->command->command_line_id, cmd->command->command_line_enum,
display_lvname(lv));
return 0;
}
return 1;
}
int lvchange_activate_cmd(struct cmd_context *cmd, int argc, char **argv)
{
cmd->handles_missing_pvs = 1;
cmd->lockd_vg_default_sh = 1;
/*
* Include foreign VGs that contain active LVs.
* That shouldn't happen in general, but if it does by some
* mistake, then we want to allow those LVs to be deactivated.
*/
cmd->include_active_foreign_vgs = 1;
/* Allow deactivating if locks fail. */
if (is_change_activating((activation_change_t)arg_uint_value(cmd, activate_ARG, CHANGE_AY)))
cmd->lockd_vg_enforce_sh = 1;
return process_each_lv(cmd, argc, argv, NULL, NULL, 0,
NULL, &_lvchange_activate_check, &_lvchange_activate_single);
}
static int _lvchange_refresh_single(struct cmd_context *cmd,
struct logical_volume *lv,
struct processing_handle *handle)
{
log_verbose("Refreshing logical volume %s (if active).", display_lvname(lv));
if (!lv_refresh(cmd, lv))
return_ECMD_FAILED;
/*
* FIXME: In some cases, the lv_refresh() starts polling without
* checking poll arg. Pull that out of lv_refresh.
*/
if (arg_is_set(cmd, poll_ARG) &&
!_lvchange_background_polling(cmd, lv))
return_ECMD_FAILED;
return ECMD_PROCESSED;
}
static int _lvchange_refresh_check(struct cmd_context *cmd,
struct logical_volume *lv,
struct processing_handle *handle,
int lv_is_named_arg)
{
if (!lv_is_visible(lv)) {
if (lv_is_named_arg)
log_error("Operation not permitted (%s %d) on hidden LV %s.",
cmd->command->command_line_id, cmd->command->command_line_enum,
display_lvname(lv));
return 0;
}
return 1;
}
int lvchange_refresh_cmd(struct cmd_context *cmd, int argc, char **argv)
{
cmd->handles_missing_pvs = 1;
cmd->lockd_vg_default_sh = 1;
return process_each_lv(cmd, argc, argv, NULL, NULL, 0,
NULL, &_lvchange_refresh_check, &_lvchange_refresh_single);
}
static int _lvchange_resync_single(struct cmd_context *cmd,
struct logical_volume *lv,
struct processing_handle *handle)
{
if (!_lvchange_resync(cmd, lv))
return_ECMD_FAILED;
return ECMD_PROCESSED;
}
static int _lvchange_resync_check(struct cmd_context *cmd,
struct logical_volume *lv,
struct processing_handle *handle,
int lv_is_named_arg)
{
if (!lv_is_visible(lv)) {
if (lv_is_named_arg)
return 1;
return 0;
}
return 1;
}
int lvchange_resync_cmd(struct cmd_context *cmd, int argc, char **argv)
{
int ret;
ret = process_each_lv(cmd, argc, argv, NULL, NULL, READ_FOR_UPDATE,
NULL, &_lvchange_resync_check, &_lvchange_resync_single);
if (ret != ECMD_PROCESSED)
return ret;
/*
* Unfortunately, lvchange has previously allowed resync and changing
* activation to be combined in one command. activate should be
* done separately, but this is here to avoid breaking commands that
* used this.
*
* FIXME: wrap this in a config setting that we can disable by default
* to phase this out?
*/
if (arg_is_set(cmd, activate_ARG)) {
log_warn("WARNING: Combining activation change with other commands is not advised.");
ret = lvchange_activate_cmd(cmd, argc, argv);
}
return ret;
}
static int _lvchange_syncaction_single(struct cmd_context *cmd,
struct logical_volume *lv,
struct processing_handle *handle)
{
if (!lv_raid_message(lv, arg_str_value(cmd, syncaction_ARG, NULL)))
return_ECMD_FAILED;
return ECMD_PROCESSED;
}
static int _lvchange_syncaction_check(struct cmd_context *cmd,
struct logical_volume *lv,
struct processing_handle *handle,
int lv_is_named_arg)
{
if (!lv_is_visible(lv)) {
if (lv_is_named_arg)
return 1;
return 0;
}
return 1;
}
int lvchange_syncaction_cmd(struct cmd_context *cmd, int argc, char **argv)
{
return process_each_lv(cmd, argc, argv, NULL, NULL, READ_FOR_UPDATE,
NULL, &_lvchange_syncaction_check, &_lvchange_syncaction_single);
}
static int _lvchange_rebuild_single(struct cmd_context *cmd,
struct logical_volume *lv,
struct processing_handle *handle)
{
if (!_lvchange_rebuild(lv))
return_ECMD_FAILED;
return ECMD_PROCESSED;
}
static int _lvchange_rebuild_check(struct cmd_context *cmd,
struct logical_volume *lv,
struct processing_handle *handle,
int lv_is_named_arg)
{
if (!lv_is_visible(lv)) {
if (lv_is_named_arg)
return 1;
return 0;
}
return 1;
}
int lvchange_rebuild_cmd(struct cmd_context *cmd, int argc, char **argv)
{
return process_each_lv(cmd, argc, argv, NULL, NULL, READ_FOR_UPDATE,
NULL, &_lvchange_rebuild_check, &_lvchange_rebuild_single);
}
static int _lvchange_monitor_poll_single(struct cmd_context *cmd,
struct logical_volume *lv,
struct processing_handle *handle)
{
if (arg_is_set(cmd, monitor_ARG) &&
!_lvchange_monitoring(cmd, lv))
return_ECMD_FAILED;
if (arg_is_set(cmd, poll_ARG) &&
!_lvchange_background_polling(cmd, lv))
return_ECMD_FAILED;
return ECMD_PROCESSED;
}
static int _lvchange_monitor_poll_check(struct cmd_context *cmd,
struct logical_volume *lv,
struct processing_handle *handle,
int lv_is_named_arg)
{
if (!lv_is_visible(lv)) {
if (lv_is_named_arg)
return 1;
return 0;
}
return 1;
}
int lvchange_monitor_poll_cmd(struct cmd_context *cmd, int argc, char **argv)
{
cmd->handles_missing_pvs = 1;
return process_each_lv(cmd, argc, argv, NULL, NULL, 0,
NULL, &_lvchange_monitor_poll_check, &_lvchange_monitor_poll_single);
}
static int _lvchange_persistent_single(struct cmd_context *cmd,
struct logical_volume *lv,
struct processing_handle *handle)
{
if (!_lvchange_persistent(cmd, lv))
return_ECMD_FAILED;
return ECMD_PROCESSED;
}
static int _lvchange_persistent_check(struct cmd_context *cmd,
struct logical_volume *lv,
struct processing_handle *handle,
int lv_is_named_arg)
{
if (!lv_is_visible(lv)) {
if (lv_is_named_arg)
log_error("Operation not permitted (%s %d) on hidden LV %s.",
cmd->command->command_line_id, cmd->command->command_line_enum,
display_lvname(lv));
return 0;
}
return 1;
}
int lvchange_persistent_cmd(struct cmd_context *cmd, int argc, char **argv)
{
cmd->handles_missing_pvs = 1;
return process_each_lv(cmd, argc, argv, NULL, NULL, READ_FOR_UPDATE,
NULL, &_lvchange_persistent_check, &_lvchange_persistent_single);
}
int lvchange(struct cmd_context *cmd, int argc, char **argv)
{
log_error(INTERNAL_ERROR "Missing function for command definition %s.",
cmd->command->command_line_id);
return ECMD_FAILED;
}