mirror of
git://sourceware.org/git/lvm2.git
synced 2025-01-02 01:18:26 +03:00
RAID: Limit replacement of devices when array is not in-sync.
If a RAID array is not in-sync, replacing devices should not be allowed as a general rule. This is because the contents used to populate the incoming device may be undefined because the devices being read where not in-sync. The kernel enforces this rule unless overridden by not allowing the creation of an array that is not in-sync and includes a devices that needs to be rebuilt. Since we cannot know the sync state of an LV if it is inactive, we must also enforce the rule that an array must be active to replace devices. That leaves us with the following conditions: 1) never allow replacement or repair of devices if the LV is in-active 2) never allow replacement if the LV is not in-sync 3) allow repair if the LV is not in-sync, but warn that contents may not be recoverable. In the case where a user is performing the repair on the command line via 'lvconvert --repair', the warning is printed before the user is prompted if they would like to replace the device(s). If the repair is automated (i.e. via dmeventd and policy is "allocate"), then the device is replaced if possible and the warning is printed.
This commit is contained in:
parent
0379c480e0
commit
970dfbcd69
@ -1,5 +1,7 @@
|
||||
Version 2.02.99 -
|
||||
===================================
|
||||
Limit RAID device replacement to repair only if LV is not in-sync.
|
||||
Disallow RAID device replacement or repair on inactive LVs.
|
||||
Fix possible race while removing metadata from lvmetad.
|
||||
Fix possible deadlock when querying and updating lvmetad at the same time.
|
||||
Check lvmcache_info_from_pvid and recall only when needed in _pv_read.
|
||||
|
@ -1617,6 +1617,18 @@ int lv_raid_replace(struct logical_volume *lv,
|
||||
dm_list_init(&new_meta_lvs);
|
||||
dm_list_init(&new_data_lvs);
|
||||
|
||||
if (!lv_is_active(lv)) {
|
||||
log_error("%s/%s must be active to perform this operation.",
|
||||
lv->vg->name, lv->name);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!mirror_in_sync() && !_raid_in_sync(lv)) {
|
||||
log_error("Unable to replace devices in %s/%s while it is"
|
||||
" not in-sync.", lv->vg->name, lv->name);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* How many sub-LVs are being removed?
|
||||
*/
|
||||
|
@ -1566,6 +1566,7 @@ static int lvconvert_raid(struct logical_volume *lv, struct lvconvert_params *lp
|
||||
struct dm_list *failed_pvs;
|
||||
struct cmd_context *cmd = lv->vg->cmd;
|
||||
struct lv_segment *seg = first_seg(lv);
|
||||
percent_t sync_percent;
|
||||
|
||||
if (!arg_count(cmd, type_ARG))
|
||||
lp->segtype = seg->segtype;
|
||||
@ -1623,6 +1624,32 @@ static int lvconvert_raid(struct logical_volume *lv, struct lvconvert_params *lp
|
||||
return lv_raid_replace(lv, lp->replace_pvh, lp->pvh);
|
||||
|
||||
if (arg_count(cmd, repair_ARG)) {
|
||||
if (!lv_is_active(lv)) {
|
||||
log_error("%s/%s must be active to perform"
|
||||
"this operation.", lv->vg->name, lv->name);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!lv_raid_percent(lv, &sync_percent)) {
|
||||
log_error("Unable to determine sync status of %s/%s.",
|
||||
lv->vg->name, lv->name);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (sync_percent != PERCENT_100) {
|
||||
log_error("WARNING: %s/%s is not in-sync.",
|
||||
lv->vg->name, lv->name);
|
||||
log_error("WARNING: Portions of the array may"
|
||||
" be unrecoverable.");
|
||||
|
||||
/*
|
||||
* The kernel will not allow a device to be replaced
|
||||
* in an array that is not in-sync unless we override
|
||||
* by forcing the array to be considered "in-sync".
|
||||
*/
|
||||
init_mirror_in_sync(1);
|
||||
}
|
||||
|
||||
_lvconvert_raid_repair_ask(cmd, &replace);
|
||||
|
||||
if (replace) {
|
||||
|
Loading…
Reference in New Issue
Block a user