1
0
mirror of git://sourceware.org/git/lvm2.git synced 2025-03-10 16:58:47 +03:00

vgsplit: support for VDO volumes

Enable support and ensure VDO always moves with VDOPOOL.
This commit is contained in:
Zdenek Kabelac 2020-09-24 20:49:18 +02:00
parent 502b895bb4
commit fc9e732811
2 changed files with 45 additions and 0 deletions

View File

@ -1,5 +1,6 @@
Version 2.03.11 -
==================================
Enable vgsplit for VDO volumes.
Lvextend of vdo pool volumes ensure at least 1 new VDO slab is added.
Use revert_lv() on reload error path after vg_revert().
Configure --with-integrity enabled.

View File

@ -125,6 +125,10 @@ static int _move_lvs(struct volume_group *vg_from, struct volume_group *vg_to)
lv_is_thin_volume(lv))
continue;
if (lv_is_vdo_pool(lv) ||
lv_is_vdo(lv))
continue;
if (lv_is_cache(lv) || lv_is_cache_pool(lv))
/* further checks by _move_cache() */
continue;
@ -374,6 +378,42 @@ static int _move_thins(struct volume_group *vg_from,
return 1;
}
static int _move_vdos(struct volume_group *vg_from,
struct volume_group *vg_to)
{
struct dm_list *lvh, *lvht;
struct logical_volume *lv, *vdo_data_lv;
struct lv_segment *seg;
dm_list_iterate_safe(lvh, lvht, &vg_from->lvs) {
lv = dm_list_item(lvh, struct lv_list)->lv;
if (lv_is_vdo(lv)) {
seg = first_seg(lv);
vdo_data_lv = seg_lv(first_seg(seg_lv(seg, 0)), 0);
/* Ignore, if no allocations on PVs of @vg_to */
if (!lv_is_on_pvs(vdo_data_lv, &vg_to->pvs))
continue;
if (!_move_one_lv(vg_from, vg_to, lvh, &lvht))
return_0;
} else if (lv_is_vdo_pool(lv)) {
seg = first_seg(lv);
vdo_data_lv = seg_lv(seg, 0);
/* Ignore, if no allocations on PVs of @vg_to */
if (!lv_is_on_pvs(vdo_data_lv, &vg_to->pvs))
continue;
if (!_move_one_lv(vg_from, vg_to, lvh, &lvht))
return_0;
}
}
return 1;
}
static int _move_cache(struct volume_group *vg_from,
struct volume_group *vg_to)
{
@ -621,6 +661,10 @@ int vgsplit(struct cmd_context *cmd, int argc, char **argv)
if (!(_move_thins(vg_from, vg_to)))
goto_bad;
/* Move required vdo pools across */
if (!(_move_vdos(vg_from, vg_to)))
goto_bad;
/* Move required cache LVs across */
if (!(_move_cache(vg_from, vg_to)))
goto_bad;