1
0
mirror of git://sourceware.org/git/lvm2.git synced 2025-01-03 05:18:29 +03:00

cache: warn if caching causes troubles

Certain stacks of cached LVs may have unexpected consequences.
So add a warning function called when LV is cached to detect
such caces and WARN user about them - the best we could do ATM.
This commit is contained in:
Zdenek Kabelac 2015-09-10 15:07:59 +02:00
parent e1edb5676e
commit 5911fa1d91
6 changed files with 32 additions and 7 deletions

View File

@ -1,5 +1,6 @@
Version 2.02.131 - Version 2.02.131 -
===================================== =====================================
Warn user when caching raid or thin pool data LV.
When layering LV, move LV flags with segments. When layering LV, move LV flags with segments.
Ignore persistent cache if configuration changed. (2.02.127) Ignore persistent cache if configuration changed. (2.02.127)
Fix devices/filter to be applied before disk-accessing filters. (2.02.112) Fix devices/filter to be applied before disk-accessing filters. (2.02.112)

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (C) 2014 Red Hat, Inc. All rights reserved. * Copyright (C) 2014-2015 Red Hat, Inc. All rights reserved.
* *
* This file is part of LVM2. * This file is part of LVM2.
* *
@ -101,6 +101,23 @@ int cache_set_mode(struct lv_segment *seg, const char *str)
return 1; return 1;
} }
/*
* At least warn a user if certain cache stacks may present some problems
*/
void cache_check_for_warns(const struct lv_segment *seg)
{
struct logical_volume *origin_lv = seg_lv(seg, 0);
if (lv_is_raid(origin_lv) &&
first_seg(seg->pool_lv)->feature_flags & DM_CACHE_FEATURE_WRITEBACK)
log_warn("WARNING: Data redundancy is lost with writeback "
"caching of raid logical volume!");
if (lv_is_thin_pool_data(seg->lv))
log_warn("WARNING: Cached thin pool's data cannot be currently "
"resized and require manual uncache before resize!");
}
int update_cache_pool_params(const struct segment_type *segtype, int update_cache_pool_params(const struct segment_type *segtype,
struct volume_group *vg, unsigned attr, struct volume_group *vg, unsigned attr,
int passed_args, uint32_t pool_data_extents, int passed_args, uint32_t pool_data_extents,

View File

@ -7516,6 +7516,8 @@ static struct logical_volume *_lv_create_an_lv(struct volume_group *vg,
if (!cache_set_policy(first_seg(lv), lp->policy_name, lp->policy_settings)) if (!cache_set_policy(first_seg(lv), lp->policy_name, lp->policy_settings))
return_NULL; /* revert? */ return_NULL; /* revert? */
cache_check_for_warns(first_seg(lv));
if (!lv_update_and_reload(lv)) { if (!lv_update_and_reload(lv)) {
/* FIXME Do a better revert */ /* FIXME Do a better revert */
log_error("Aborting. Manual intervention required."); log_error("Aborting. Manual intervention required.");

View File

@ -1158,6 +1158,7 @@ int cache_mode_is_set(const struct lv_segment *seg);
int cache_set_mode(struct lv_segment *cache_seg, const char *str); int cache_set_mode(struct lv_segment *cache_seg, const char *str);
int cache_set_policy(struct lv_segment *cache_seg, const char *name, int cache_set_policy(struct lv_segment *cache_seg, const char *name,
const struct dm_config_tree *settings); const struct dm_config_tree *settings);
void cache_check_for_warns(const struct lv_segment *seg);
int update_cache_pool_params(const struct segment_type *segtype, int update_cache_pool_params(const struct segment_type *segtype,
struct volume_group *vg, unsigned attr, struct volume_group *vg, unsigned attr,
int passed_args, uint32_t pool_data_extents, int passed_args, uint32_t pool_data_extents,

View File

@ -24,7 +24,7 @@ aux prepare_vg 5 80
# Bug 1095843 # Bug 1095843
# lvcreate RAID1 origin, lvcreate cache-pool, and lvconvert to cache # lvcreate RAID1 origin, lvcreate cache-pool, and lvconvert to cache
lvcreate --type raid1 -m 1 -l 2 -n $lv1 $vg lvcreate --type raid1 -m 1 --nosync -l 2 -n $lv1 $vg
lvcreate --type cache-pool -l 1 -n ${lv1}_cachepool $vg lvcreate --type cache-pool -l 1 -n ${lv1}_cachepool $vg
lvconvert --cache --cachepool $vg/${lv1}_cachepool $vg/$lv1 lvconvert --cache --cachepool $vg/${lv1}_cachepool $vg/$lv1
check lv_exists $vg/${lv1}_corig_rimage_0 # ensure images are properly renamed check lv_exists $vg/${lv1}_corig_rimage_0 # ensure images are properly renamed
@ -33,17 +33,19 @@ lvremove -f $vg
# lvcreate RAID1 origin, lvcreate RAID1 cache-pool, and lvconvert to cache # lvcreate RAID1 origin, lvcreate RAID1 cache-pool, and lvconvert to cache
lvcreate --type raid1 -m 1 -l 2 -n $lv1 $vg lvcreate --type raid1 -m 1 --nosync -l 2 -n $lv1 $vg
lvcreate --type raid1 -m 1 -l 2 -n ${lv1}_cachepool $vg lvcreate --type raid1 -m 1 --nosync -l 2 -n ${lv1}_cachepool $vg
#should lvs -a $vg/${lv1}_cdata_rimage_0 # ensure images are properly renamed #should lvs -a $vg/${lv1}_cdata_rimage_0 # ensure images are properly renamed
lvconvert --yes --type cache --cachepool $vg/${lv1}_cachepool $vg/$lv1 lvconvert --yes --type cache --cachemode writeback --cachepool $vg/${lv1}_cachepool $vg/$lv1 2>&1 | tee out
grep "WARNING: Data redundancy is lost" out
check lv_exists $vg/${lv1}_corig_rimage_0 # ensure images are properly renamed check lv_exists $vg/${lv1}_corig_rimage_0 # ensure images are properly renamed
dmsetup table ${vg}-$lv1 | grep cache # ensure it is loaded in kernel dmsetup table ${vg}-$lv1 | grep cache # ensure it is loaded in kernel
lvremove -f $vg lvremove -f $vg
lvcreate -n corigin -m 1 --type raid1 -l 10 $vg lvcreate -n corigin -m 1 --type raid1 --nosync -l 10 $vg
lvcreate -n cpool --type cache $vg/corigin -l 10 lvcreate -n cpool --type cache $vg/corigin --cachemode writeback -l 10 2>&1 | tee out
grep "WARNING: Data redundancy is lost" out
lvconvert --splitmirrors 1 --name split $vg/corigin "$dev1" lvconvert --splitmirrors 1 --name split $vg/corigin "$dev1"
lvremove -f $vg lvremove -f $vg

View File

@ -3191,6 +3191,8 @@ static int _lvconvert_cache(struct cmd_context *cmd,
if (!cache_set_policy(first_seg(cache_lv), lp->policy_name, lp->policy_settings)) if (!cache_set_policy(first_seg(cache_lv), lp->policy_name, lp->policy_settings))
return_0; return_0;
cache_check_for_warns(first_seg(cache_lv));
if (!lv_update_and_reload(cache_lv)) if (!lv_update_and_reload(cache_lv))
return_0; return_0;