mirror of
git://sourceware.org/git/lvm2.git
synced 2024-12-21 13:34:40 +03:00
bb45e33518
Previously there have been necessary explicit call of backup (often either forgotten or over-used). With this patch the necessity to store backup is remember at vg_commit and once the VG is unlocked, the committed metadata are automatically store in backup file. This may possibly alter some printed messages from command when the backup is now taken later.
197 lines
5.0 KiB
C
197 lines
5.0 KiB
C
/*
|
|
* Copyright (C) 2001-2004 Sistina Software, Inc. All rights reserved.
|
|
* Copyright (C) 2004-2009 Red Hat, Inc. All rights reserved.
|
|
*
|
|
* This file is part of LVM2.
|
|
*
|
|
* This copyrighted material is made available to anyone wishing to use,
|
|
* modify, copy, or redistribute it subject to the terms and conditions
|
|
* of the GNU Lesser General Public License v.2.1.
|
|
*
|
|
* You should have received a copy of the GNU Lesser General Public License
|
|
* along with this program; if not, write to the Free Software Foundation,
|
|
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
|
*/
|
|
|
|
#include "tools.h"
|
|
|
|
struct vgextend_params {
|
|
struct pvcreate_params pp;
|
|
};
|
|
|
|
static int _restore_pv(struct volume_group *vg, const char *pv_name)
|
|
{
|
|
struct pv_list *pvl = NULL;
|
|
pvl = find_pv_in_vg(vg, pv_name);
|
|
if (!pvl) {
|
|
log_warn("WARNING: PV %s not found in VG %s", pv_name, vg->name);
|
|
return 0;
|
|
}
|
|
|
|
if (!pvl->pv->dev) {
|
|
log_warn("WARNING: The PV %s is still missing.", pv_name);
|
|
return 0;
|
|
}
|
|
|
|
if (pvl->pv->status & MISSING_PV)
|
|
goto clear_flag;
|
|
|
|
/*
|
|
* when the PV has no used PE's vg_read clears the MISSING_PV flag
|
|
* and sets this so we know.
|
|
*/
|
|
if (pvl->pv->unused_missing_cleared)
|
|
goto clear_flag;
|
|
|
|
log_warn("WARNING: PV %s was not missing in VG %s", pv_name, vg->name);
|
|
return 0;
|
|
|
|
clear_flag:
|
|
pvl->pv->status &= ~MISSING_PV;
|
|
return 1;
|
|
}
|
|
|
|
static int _vgextend_restoremissing(struct cmd_context *cmd __attribute__((unused)),
|
|
const char *vg_name, struct volume_group *vg,
|
|
struct processing_handle *handle)
|
|
{
|
|
struct vgextend_params *vp = (struct vgextend_params *) handle->custom_handle;
|
|
struct pvcreate_params *pp = &vp->pp;
|
|
int fixed = 0;
|
|
unsigned i;
|
|
|
|
for (i = 0; i < pp->pv_count; i++)
|
|
if (_restore_pv(vg, pp->pv_names[i]))
|
|
fixed++;
|
|
|
|
if (!fixed) {
|
|
log_error("No PV has been restored.");
|
|
return ECMD_FAILED;
|
|
}
|
|
|
|
if (!vg_write(vg) || !vg_commit(vg))
|
|
return_ECMD_FAILED;
|
|
|
|
log_print_unless_silent("Volume group \"%s\" successfully extended", vg_name);
|
|
|
|
return ECMD_PROCESSED;
|
|
}
|
|
|
|
static int _vgextend_single(struct cmd_context *cmd, const char *vg_name,
|
|
struct volume_group *vg, struct processing_handle *handle)
|
|
{
|
|
struct vgextend_params *vp = (struct vgextend_params *) handle->custom_handle;
|
|
struct pvcreate_params *pp = &vp->pp;
|
|
uint32_t mda_copies;
|
|
uint32_t mda_used;
|
|
int ret = ECMD_FAILED;
|
|
|
|
if (arg_is_set(cmd, metadataignore_ARG) &&
|
|
(pp->force == PROMPT) && !pp->yes &&
|
|
(vg_mda_copies(vg) != VGMETADATACOPIES_UNMANAGED) &&
|
|
(yes_no_prompt("Override preferred number of copies of VG %s metadata? [y/n]: ", vg_name) == 'n')) {
|
|
log_error("Volume group %s not changed", vg_name);
|
|
return ECMD_FAILED;
|
|
}
|
|
|
|
if (!vg_extend_each_pv(vg, pp))
|
|
goto_out;
|
|
|
|
if (arg_is_set(cmd, metadataignore_ARG)) {
|
|
mda_copies = vg_mda_copies(vg);
|
|
mda_used = vg_mda_used_count(vg);
|
|
|
|
if ((mda_copies != VGMETADATACOPIES_UNMANAGED) &&
|
|
(mda_copies != mda_used)) {
|
|
log_warn("WARNING: Changing preferred number of copies of VG %s metadata from %" PRIu32 " to %" PRIu32,
|
|
vg_name, mda_copies, mda_used);
|
|
vg_set_mda_copies(vg, mda_used);
|
|
}
|
|
}
|
|
|
|
log_verbose("Volume group \"%s\" will be extended by %d new physical volumes", vg_name, pp->pv_count);
|
|
|
|
if (!vg_write(vg) || !vg_commit(vg))
|
|
goto_out;
|
|
|
|
log_print_unless_silent("Volume group \"%s\" successfully extended", vg_name);
|
|
ret = ECMD_PROCESSED;
|
|
out:
|
|
return ret;
|
|
}
|
|
|
|
int vgextend(struct cmd_context *cmd, int argc, char **argv)
|
|
{
|
|
struct processing_handle *handle;
|
|
struct vgextend_params vp;
|
|
struct pvcreate_params *pp = &vp.pp;
|
|
unsigned restoremissing = arg_is_set(cmd, restoremissing_ARG);
|
|
const char *vg_name;
|
|
int ret;
|
|
|
|
if (!argc) {
|
|
log_error("Please enter volume group name and "
|
|
"physical volume(s)");
|
|
return EINVALID_CMD_LINE;
|
|
}
|
|
|
|
vg_name = skip_dev_dir(cmd, argv[0], NULL);
|
|
argc--;
|
|
argv++;
|
|
|
|
pvcreate_params_set_defaults(pp);
|
|
|
|
if (!pvcreate_params_from_args(cmd, pp))
|
|
return EINVALID_CMD_LINE;
|
|
|
|
pp->pv_count = argc;
|
|
pp->pv_names = argv;
|
|
|
|
/* Don't create a new PV on top of an existing PV like pvcreate does. */
|
|
pp->preserve_existing = 1;
|
|
|
|
/* pvcreate within vgextend cannot be forced. */
|
|
pp->force = PROMPT;
|
|
|
|
if (!lock_global(cmd, "ex"))
|
|
return_ECMD_FAILED;
|
|
|
|
clear_hint_file(cmd);
|
|
|
|
cmd->edit_devices_file = 1;
|
|
|
|
lvmcache_label_scan(cmd);
|
|
|
|
if (!(handle = init_processing_handle(cmd, NULL))) {
|
|
log_error("Failed to initialize processing handle.");
|
|
return ECMD_FAILED;
|
|
}
|
|
|
|
if (!restoremissing) {
|
|
if (!pvcreate_each_device(cmd, handle, pp)) {
|
|
destroy_processing_handle(cmd, handle);
|
|
return_ECMD_FAILED;
|
|
}
|
|
}
|
|
|
|
unlock_devices_file(cmd);
|
|
|
|
/*
|
|
* It is always ok to add new PVs to a VG - even if there are
|
|
* missing PVs. No LVs are affected by this operation, but
|
|
* repair processes - particularly for RAID segtypes - can
|
|
* be facilitated.
|
|
*/
|
|
cmd->handles_missing_pvs = 1;
|
|
|
|
handle->custom_handle = &vp;
|
|
|
|
ret = process_each_vg(cmd, 0, NULL, vg_name, NULL,
|
|
READ_FOR_UPDATE | PROCESS_SKIP_SCAN, 0, handle,
|
|
restoremissing ? &_vgextend_restoremissing : &_vgextend_single);
|
|
|
|
destroy_processing_handle(cmd, handle);
|
|
|
|
return ret;
|
|
}
|