1
0
mirror of git://sourceware.org/git/lvm2.git synced 2024-12-21 13:34:40 +03:00

pvmove: enhance accepted states of active LVs

Improve pvmove to accept 'locally' active LVs together with
exclusive active LVs.

In the 1st. phase it now recognizes whether exclusive pvmove is needed.
For this case only 'exclusively' or 'locally-only without remote
activative state' LVs are acceptable and all others are skipped.

During build-up of pvmove 'activation' steps are taken, so if
there is any problem we can now 'skip' LVs from pvmove operation
rather then giving-up whole pvmove operation.

Also when pvmove is restarted, recognize need of exclusive pvmove,
and use it whenever there is LV, that require exclusive activation.
This commit is contained in:
Zdenek Kabelac 2018-02-15 13:39:58 +01:00
parent a2d2fe3a8c
commit 552e60b3a1

View File

@ -340,8 +340,8 @@ static struct logical_volume *_set_up_pvmove_lv(struct cmd_context *cmd,
uint32_t log_count = 0; uint32_t log_count = 0;
int lv_found = 0; int lv_found = 0;
int lv_skipped = 0; int lv_skipped = 0;
int lv_active_count = 0; int needs_exclusive = *exclusive;
int lv_exclusive_count = 0; const struct logical_volume *holder;
/* FIXME Cope with non-contiguous => splitting existing segments */ /* FIXME Cope with non-contiguous => splitting existing segments */
if (!(lv_mirr = lv_create_empty("pvmove%d", NULL, if (!(lv_mirr = lv_create_empty("pvmove%d", NULL,
@ -392,8 +392,13 @@ static struct logical_volume *_set_up_pvmove_lv(struct cmd_context *cmd,
return NULL; return NULL;
} }
if (seg_is_raid(first_seg(lv)) || seg = first_seg(lv);
seg_is_mirrored(first_seg(lv))) {
/* Presence of exclusive LV decides whether pvmove must be also exclusive */
if ((seg_only_exclusive(seg) || lv_is_origin(lv) || lv_is_cow(lv)))
needs_exclusive = 1;
if (seg_is_raid(seg) || seg_is_mirrored(seg)) {
dm_list_init(&trim_list); dm_list_init(&trim_list);
if (!get_pv_list_for_lv(vg->cmd->mem, lv, &trim_list)) if (!get_pv_list_for_lv(vg->cmd->mem, lv, &trim_list))
@ -432,6 +437,14 @@ static struct logical_volume *_set_up_pvmove_lv(struct cmd_context *cmd,
lv_found = 1; lv_found = 1;
} }
seg = first_seg(lv);
if (seg_is_cache(seg) || seg_is_cache_pool(seg) ||
seg_is_mirrored(seg) || seg_is_raid(seg) ||
seg_is_snapshot(seg) ||
seg_is_thin(seg) || seg_is_thin_pool(seg))
continue; /* bottom-level LVs only... */
if (!lv_is_on_pvs(lv, source_pvl)) if (!lv_is_on_pvs(lv, source_pvl))
continue; continue;
@ -441,47 +454,36 @@ static struct logical_volume *_set_up_pvmove_lv(struct cmd_context *cmd,
continue; continue;
} }
if (vg_is_clustered(vg) && lv_is_visible(lv)) { holder = lv_lock_holder(lv);
if (lv_is_active_exclusive_locally(lv)) {
if (lv_active_count) {
log_error("Cannot move in clustered VG %s "
"if some LVs are activated "
"exclusively while others don't.",
vg->name);
return NULL;
}
lv_exclusive_count++; if (needs_exclusive) {
} else if (lv_is_active(lv)) { /* With exclusive pvmove skip LV when:
if (seg_only_exclusive(first_seg(lv))) { * - is active remotely
lv_skipped = 1; * - is not active locally and cannot be activated exclusively locally
log_print_unless_silent("Skipping LV %s which is active, " *
"but not locally exclusively.", * Note: lvm2 can proceed with exclusive pvmove for 'just' locally active LVs
display_lvname(lv)); * in the case it's NOT active anywhere else, since LOCKED LVs cannot be
continue; * later activated by user.
}
if (*exclusive) {
log_error("Cannot move in clustered VG %s, "
"clustered mirror (cmirror) not detected "
"and LVs are activated non-exclusively.",
vg->name);
return NULL;
}
lv_active_count++;
}
}
seg = first_seg(lv);
if (seg_is_raid(seg) || seg_is_mirrored(seg) ||
seg_is_cache(seg) || seg_is_cache_pool(seg) ||
seg_is_thin(seg) || seg_is_thin_pool(seg))
/*
* Pass over top-level LVs - they were handled.
* Allow sub-LVs to proceed.
*/ */
if (lv_is_active_remotely(holder) ||
(!lv_is_active_locally(holder) && !activate_lv_excl_local(cmd, holder))) {
lv_skipped = 1;
log_print_unless_silent("Skipping LV %s which is not locally exclusive%s.",
display_lvname(lv),
/* Report missing cmirrord cases that matterd.
* With exclusive LV types cmirrord would not help. */
(*exclusive &&
!lv_is_origin(holder) &&
!seg_only_exclusive(first_seg(holder))) ?
" and clustered mirror (cmirror) not detected" : "");
continue;
}
} else if (!activate_lv(cmd, holder)) {
lv_skipped = 1;
log_print_unless_silent("Skipping LV %s which cannot be activated.",
display_lvname(lv));
continue; continue;
}
if (!_insert_pvmove_mirrors(cmd, lv_mirr, source_pvl, lv, if (!_insert_pvmove_mirrors(cmd, lv_mirr, source_pvl, lv,
*lvs_changed)) *lvs_changed))
@ -517,7 +519,7 @@ static struct logical_volume *_set_up_pvmove_lv(struct cmd_context *cmd,
return NULL; return NULL;
} }
if (lv_exclusive_count) if (needs_exclusive)
*exclusive = 1; *exclusive = 1;
return lv_mirr; return lv_mirr;
@ -600,6 +602,8 @@ static int _pvmove_setup_single(struct cmd_context *cmd,
struct dm_list *lvs_changed; struct dm_list *lvs_changed;
struct logical_volume *lv_mirr; struct logical_volume *lv_mirr;
struct logical_volume *lv = NULL; struct logical_volume *lv = NULL;
struct lv_list *lvl;
const struct logical_volume *lvh;
const char *pv_name = pv_dev_name(pv); const char *pv_name = pv_dev_name(pv);
unsigned flags = PVMOVE_FIRST_TIME; unsigned flags = PVMOVE_FIRST_TIME;
unsigned exclusive; unsigned exclusive;
@ -661,6 +665,13 @@ static int _pvmove_setup_single(struct cmd_context *cmd,
goto out; goto out;
} }
dm_list_iterate_items(lvl, lvs_changed) {
lvh = lv_lock_holder(lvl->lv);
/* Exclusive LV decides whether pvmove must be also exclusive */
if (lv_is_origin(lvh) || seg_only_exclusive(first_seg(lvh)))
exclusive = 1;
}
/* Ensure mirror LV is active */ /* Ensure mirror LV is active */
if (!_activate_lv(cmd, lv_mirr, exclusive)) { if (!_activate_lv(cmd, lv_mirr, exclusive)) {
log_error("ABORTING: Temporary mirror activation failed."); log_error("ABORTING: Temporary mirror activation failed.");