1
0
mirror of git://sourceware.org/git/lvm2.git synced 2024-12-21 13:34:40 +03:00

vdo: enhance activation with layer -vpool

Enhance 'activation' experience for VDO pool to more closely match
what happens for thin-pools where we do use a 'fake' LV to keep pool
running even when no thinLVs are active. This gives user a choice
whether he want to keep thin-pool running (wihout possibly lenghty
activation/deactivation process)

As we do plan to support multple VDO LVs to be mapped into a single VDO,
we want to give user same experience and 'use-patter' as with thin-pools.

This patch gives option to activate VDO pool only without activating
VDO LV.

Also due to 'fake' layering LV we can protect usage of VDO pool from
command like 'mkfs' which do require exlusive access to the volume,
which is no longer possible.

Note: VDO pool contains 1024 initial sectors as 'empty' header - such
header is also exposed in layered LV (as read-only LV).
For blkid we are indentified as LV with UUID suffix - thus private DM
device of lvm2 - so we do not need to store any extra info in this
header space (aka zero is good enough).
This commit is contained in:
Zdenek Kabelac 2019-09-14 01:13:33 +02:00
parent 66f69e766e
commit 6612d8dd5e
6 changed files with 45 additions and 12 deletions

View File

@ -1,5 +1,6 @@
Version 2.03.06 - Version 2.03.06 -
================================ ================================
Allow standalone activation of VDO pool just like for thin-pools.
Activate thin-pool layered volume as 'read-only' device. Activate thin-pool layered volume as 'read-only' device.
Ignore crypto devices with UUID signature CRYPT-SUBDEV. Ignore crypto devices with UUID signature CRYPT-SUBDEV.
Enhance validation for thin and cache pool conversion and swapping. Enhance validation for thin and cache pool conversion and swapping.

View File

@ -794,6 +794,18 @@ int lv_info_with_seg_status(struct cmd_context *cmd,
return 1; return 1;
} }
if (lv_is_vdo_pool(lv)) {
/* Always collect status for '-vpool' */
if (_lv_info(cmd, lv, 1, &status->info, lv_seg, &status->seg_status, 0, 0) &&
(status->seg_status.type == SEG_STATUS_VDO_POOL)) {
/* There is -tpool device, but query 'active' state of 'fake' vdo-pool */
if (!_lv_info(cmd, lv, 0, NULL, NULL, NULL, 0, 0))
status->info.exists = 0; /* So VDO pool LV is not active */
}
return 1;
}
return _lv_info(cmd, lv, 0, &status->info, lv_seg, &status->seg_status, return _lv_info(cmd, lv, 0, &status->info, lv_seg, &status->seg_status,
with_open_count, with_read_ahead); with_open_count, with_read_ahead);
} }
@ -1342,7 +1354,7 @@ int lv_vdo_pool_status(const struct logical_volume *lv, int flush,
int r = 0; int r = 0;
struct dev_manager *dm; struct dev_manager *dm;
if (!lv_info(lv->vg->cmd, lv, 0, NULL, 0, 0)) if (!lv_info(lv->vg->cmd, lv, 1, NULL, 0, 0))
return 0; return 0;
log_debug_activation("Checking VDO pool status for LV %s.", log_debug_activation("Checking VDO pool status for LV %s.",

View File

@ -1991,7 +1991,7 @@ static uint16_t _get_udev_flags(struct dev_manager *dm, const struct logical_vol
/* New thin-pool is regular LV with -tpool UUID suffix. */ /* New thin-pool is regular LV with -tpool UUID suffix. */
udev_flags |= DM_UDEV_DISABLE_DISK_RULES_FLAG | udev_flags |= DM_UDEV_DISABLE_DISK_RULES_FLAG |
DM_UDEV_DISABLE_OTHER_RULES_FLAG; DM_UDEV_DISABLE_OTHER_RULES_FLAG;
else if (layer || !lv_is_visible(lv) || lv_is_thin_pool(lv)) else if (layer || !lv_is_visible(lv) || lv_is_thin_pool(lv) || lv_is_vdo_pool(lv))
udev_flags |= DM_UDEV_DISABLE_SUBSYSTEM_RULES_FLAG | udev_flags |= DM_UDEV_DISABLE_SUBSYSTEM_RULES_FLAG |
DM_UDEV_DISABLE_DISK_RULES_FLAG | DM_UDEV_DISABLE_DISK_RULES_FLAG |
DM_UDEV_DISABLE_OTHER_RULES_FLAG; DM_UDEV_DISABLE_OTHER_RULES_FLAG;
@ -2611,6 +2611,15 @@ static int _add_lv_to_dtree(struct dev_manager *dm, struct dm_tree *dtree,
} }
} }
if (lv_is_vdo_pool(lv)) {
/*
* For both origin_only and !origin_only
* skips test for -vpool-real and vpool-cow
*/
if (!_add_dev_to_dtree(dm, dtree, lv, lv_layer(lv)))
return_0;
}
if (lv_is_cache(lv)) { if (lv_is_cache(lv)) {
if (!origin_only && !dm->activation && !dm->track_pending_delete) { if (!origin_only && !dm->activation && !dm->track_pending_delete) {
/* Setup callback for non-activation partial tree */ /* Setup callback for non-activation partial tree */
@ -2682,7 +2691,8 @@ static int _add_lv_to_dtree(struct dev_manager *dm, struct dm_tree *dtree,
if (seg_type(seg, s) == AREA_LV && seg_lv(seg, s) && if (seg_type(seg, s) == AREA_LV && seg_lv(seg, s) &&
/* origin only for cache without pending delete */ /* origin only for cache without pending delete */
(!dm->track_pending_delete || !lv_is_cache(lv)) && (!dm->track_pending_delete || !lv_is_cache(lv)) &&
!_add_lv_to_dtree(dm, dtree, seg_lv(seg, s), 0)) !_add_lv_to_dtree(dm, dtree, seg_lv(seg, s),
lv_is_vdo_pool(seg_lv(seg, s)) ? 1 : 0))
return_0; return_0;
if (seg_is_raid_with_meta(seg) && seg->meta_areas && seg_metalv(seg, s) && if (seg_is_raid_with_meta(seg) && seg->meta_areas && seg_metalv(seg, s) &&
!_add_lv_to_dtree(dm, dtree, seg_metalv(seg, s), 0)) !_add_lv_to_dtree(dm, dtree, seg_metalv(seg, s), 0))
@ -2908,8 +2918,11 @@ static int _add_layer_target_to_dtree(struct dev_manager *dm,
if (!(layer_dlid = build_dm_uuid(dm->mem, lv, lv_layer(lv)))) if (!(layer_dlid = build_dm_uuid(dm->mem, lv, lv_layer(lv))))
return_0; return_0;
/* Add linear mapping over layered LV */ /* Add linear mapping over layered LV */
if (!add_linear_area_to_dtree(dnode, lv->size, lv->vg->extent_size, /* From VDO layer expose ONLY vdo pool header, we would need to use virtual size otherwise */
if (!add_linear_area_to_dtree(dnode, lv_is_vdo_pool(lv) ? first_seg(lv)->vdo_pool_header_size : lv->size,
lv->vg->extent_size,
lv->vg->cmd->use_linear_target, lv->vg->cmd->use_linear_target,
lv->vg->name, lv->name) || lv->vg->name, lv->name) ||
!dm_tree_node_add_target_area(dnode, NULL, layer_dlid, 0)) !dm_tree_node_add_target_area(dnode, NULL, layer_dlid, 0))
@ -3132,7 +3145,9 @@ static int _add_segment_to_dtree(struct dev_manager *dm,
/* origin only for cache without pending delete */ /* origin only for cache without pending delete */
(!dm->track_pending_delete || !seg_is_cache(seg)) && (!dm->track_pending_delete || !seg_is_cache(seg)) &&
!_add_new_lv_to_dtree(dm, dtree, seg_lv(seg, s), !_add_new_lv_to_dtree(dm, dtree, seg_lv(seg, s),
laopts, NULL)) laopts,
lv_is_vdo_pool(seg_lv(seg, s)) ?
lv_layer(seg_lv(seg, s)) : NULL))
return_0; return_0;
if (seg_is_raid_with_meta(seg) && seg->meta_areas && seg_metalv(seg, s) && if (seg_is_raid_with_meta(seg) && seg->meta_areas && seg_metalv(seg, s) &&
!lv_is_raid_image_with_tracking(seg_lv(seg, s)) && !lv_is_raid_image_with_tracking(seg_lv(seg, s)) &&
@ -3424,8 +3439,9 @@ static int _add_new_lv_to_dtree(struct dev_manager *dm, struct dm_tree *dtree,
if (!_add_snapshot_target_to_dtree(dm, dnode, lv, laopts)) if (!_add_snapshot_target_to_dtree(dm, dnode, lv, laopts))
return_0; return_0;
} else if (!layer && ((lv_is_thin_pool(lv) && !lv_is_new_thin_pool(lv)) || } else if (!layer && ((lv_is_thin_pool(lv) && !lv_is_new_thin_pool(lv)) ||
lv_is_vdo_pool(lv) ||
lv_is_external_origin(lv))) { lv_is_external_origin(lv))) {
/* External origin or 'used' Thin pool is using layer */ /* External origin or 'used' Thin pool or VDO pool is using layer */
if (!_add_new_lv_to_dtree(dm, dtree, lv, laopts, lv_layer(lv))) if (!_add_new_lv_to_dtree(dm, dtree, lv, laopts, lv_layer(lv)))
return_0; return_0;
if (!_add_layer_target_to_dtree(dm, dnode, lv)) if (!_add_layer_target_to_dtree(dm, dnode, lv))
@ -3438,6 +3454,10 @@ static int _add_new_lv_to_dtree(struct dev_manager *dm, struct dm_tree *dtree,
if (max_stripe_size < seg->stripe_size * seg->area_count) if (max_stripe_size < seg->stripe_size * seg->area_count)
max_stripe_size = seg->stripe_size * seg->area_count; max_stripe_size = seg->stripe_size * seg->area_count;
} }
if (!layer && lv_is_vdo_pool(lv) &&
!_add_layer_target_to_dtree(dm, dnode, lv))
return_0;
} }
/* Setup thin pool callback */ /* Setup thin pool callback */
@ -3705,7 +3725,10 @@ static int _tree_action(struct dev_manager *dm, const struct logical_volume *lv,
/* Add all required new devices to tree */ /* Add all required new devices to tree */
if (!_add_new_lv_to_dtree(dm, dtree, lv, laopts, if (!_add_new_lv_to_dtree(dm, dtree, lv, laopts,
(lv_is_origin(lv) && laopts->origin_only) ? "real" : (lv_is_origin(lv) && laopts->origin_only) ? "real" :
(lv_is_thin_pool(lv) && laopts->origin_only) ? "tpool" : NULL)) (laopts->origin_only &&
(lv_is_thin_pool(lv) ||
lv_is_vdo_pool(lv))) ?
lv_layer(lv) : NULL))
goto_out; goto_out;
/* Preload any devices required before any suspensions */ /* Preload any devices required before any suspensions */

View File

@ -159,7 +159,7 @@ int parse_vdo_pool_status(struct dm_pool *mem, const struct logical_volume *vdo_
status->data_usage = DM_PERCENT_INVALID; status->data_usage = DM_PERCENT_INVALID;
if (!(dm_name = dm_build_dm_name(mem, vdo_pool_lv->vg->name, if (!(dm_name = dm_build_dm_name(mem, vdo_pool_lv->vg->name,
vdo_pool_lv->name, NULL))) { vdo_pool_lv->name, lv_layer(vdo_pool_lv)))) {
log_error("Failed to build VDO DM name %s.", log_error("Failed to build VDO DM name %s.",
display_lvname(vdo_pool_lv)); display_lvname(vdo_pool_lv));
return 0; return 0;

View File

@ -259,7 +259,7 @@ char *build_dm_uuid(struct dm_pool *mem, const struct logical_volume *lv,
lv_is_thin_pool(lv) ? "pool" : lv_is_thin_pool(lv) ? "pool" :
lv_is_thin_pool_data(lv) ? "tdata" : lv_is_thin_pool_data(lv) ? "tdata" :
lv_is_thin_pool_metadata(lv) ? "tmeta" : lv_is_thin_pool_metadata(lv) ? "tmeta" :
lv_is_vdo_pool(lv) ? "vpool" : lv_is_vdo_pool(lv) ? "pool" :
lv_is_vdo_pool_data(lv) ? "vdata" : lv_is_vdo_pool_data(lv) ? "vdata" :
NULL; NULL;
} }

View File

@ -1400,9 +1400,6 @@ static int _lvchange_activate_check(struct cmd_context *cmd,
return 0; return 0;
} }
if (lv_is_vdo_pool(lv) && !lv_is_named_arg)
return 0; /* Skip VDO pool processing unless explicitely named */
return 1; return 1;
} }