1
0
mirror of git://sourceware.org/git/lvm2.git synced 2024-12-22 17:35:59 +03:00
lvm2/test/shell/lvconvert-cache-raid.sh

116 lines
3.9 KiB
Bash
Raw Normal View History

2017-07-02 22:38:32 +03:00
#!/usr/bin/env bash
2015-11-19 16:31:58 +03:00
# Copyright (C) 2014-2015 Red Hat, Inc. All rights reserved.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions
# of the GNU General Public License v.2.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
# Exercise usage of stacked cache volume using raid volume
SKIP_WITH_LVMPOLLD=1
. lib/inittest
aux have_cache 1 3 0 || skip
aux have_raid 1 0 0 || skip
aux lvmconf 'global/cache_disabled_features = [ "policy_smq" ]'
aux prepare_vg 5 80
# Bug 1095843
# lvcreate RAID1 origin, lvcreate cache-pool, and lvconvert to cache
lvcreate --type raid1 -m 1 --nosync -l 2 -n $lv1 $vg
lvcreate --type cache-pool -l 1 -n ${lv1}_cachepool $vg
lvconvert --cache -Zy --cachepool $vg/${lv1}_cachepool $vg/$lv1
check lv_exists $vg/${lv1}_corig_rimage_0 # ensure images are properly renamed
dmsetup table ${vg}-$lv1 | grep cache # ensure it is loaded in kernel
lvremove -f $vg
# lvcreate RAID1 origin, lvcreate RAID1 cache-pool, and lvconvert to cache
lvcreate --type raid1 -m 1 --nosync -l 2 -n $lv1 $vg
lvcreate --type raid1 -m 1 --nosync -l 2 -n ${lv1}_cachepool $vg
#should lvs -a $vg/${lv1}_cdata_rimage_0 # ensure images are properly renamed
lvconvert --yes --type cache --cachemode writeback --cachepool $vg/${lv1}_cachepool $vg/$lv1 2>&1 | tee out
grep "WARNING: Data redundancy could be lost" out
check lv_exists $vg/${lv1}_corig_rimage_0 # ensure images are properly renamed
dmsetup table ${vg}-$lv1 | grep cache # ensure it is loaded in kernel
lvremove -f $vg
lvcreate -n corigin -m 1 --type raid1 --nosync -l 10 $vg
lvcreate -n cpool --type cache $vg/corigin --cachemode writeback -l 10 2>&1 | tee out
grep "WARNING: Data redundancy could be lost" out
not lvconvert --splitmirrors 1 --name split $vg/corigin "$dev1"
lvconvert --yes --splitmirrors 1 --name split $vg/corigin "$dev1"
lvremove -f $vg
lvcreate -n cpool_meta -m 1 --type raid1 -l 10 $vg
lvcreate -n cpool -m 1 --type raid1 -l 10 $vg
2014-11-13 23:52:02 +03:00
aux wait_for_sync $vg cpool_meta
2014-10-31 02:37:35 +03:00
aux wait_for_sync $vg cpool
lvs -a -o+seg_pe_ranges $vg
lvconvert --yes --type cache-pool --poolmetadata $vg/cpool_meta $vg/cpool
lvcreate -n corigin --type cache --cachepool $vg/cpool -l 10
lvchange --syncaction repair $vg/cpool_cmeta
aux wait_for_sync $vg cpool_cmeta
lvchange --syncaction repair $vg/cpool_cdata
aux wait_for_sync $vg cpool_cdata
lvconvert -y --repair $vg/cpool_cmeta
lvconvert -y --repair $vg/cpool_cdata
# do not allow reserved names for *new* LVs
not lvconvert --splitmirrors 1 --name split_cmeta $vg/cpool_cmeta "$dev1"
not lvconvert --splitmirrors 1 --name split_cdata $vg/cpool_cdata "$dev1"
# but allow manipulating existing LVs with reserved names
2014-11-13 23:52:02 +03:00
aux wait_for_sync $vg cpool_cmeta
aux wait_for_sync $vg cpool_cdata
lvconvert --yes --splitmirrors 1 --name split_meta $vg/cpool_cmeta "$dev1"
lvconvert --yes --splitmirrors 1 --name split_data $vg/cpool_cdata "$dev1"
not lvconvert --splitmirrors 1 --name split_data $vg/cpool_cdata "$dev1"
2015-11-19 16:31:58 +03:00
lvremove -f $vg
# Test up/down raid conversion of cache pool data and metadata
Allow dm-cache cache device to be standard LV If a single, standard LV is specified as the cache, use it directly instead of converting it into a cache-pool object with two separate LVs (for data and metadata). With a single LV as the cache, lvm will use blocks at the beginning for metadata, and the rest for data. Separate dm linear devices are set up to point at the metadata and data areas of the LV. These dm devs are given to the dm-cache target to use. The single LV cache cannot be resized without recreating it. If the --poolmetadata option is used to specify an LV for metadata, then a cache pool will be created (with separate LVs for data and metadata.) Usage: $ lvcreate -n main -L 128M vg /dev/loop0 $ lvcreate -n fast -L 64M vg /dev/loop1 $ lvs -a vg LV VG Attr LSize Type Devices main vg -wi-a----- 128.00m linear /dev/loop0(0) fast vg -wi-a----- 64.00m linear /dev/loop1(0) $ lvconvert --type cache --cachepool fast vg/main $ lvs -a vg LV VG Attr LSize Origin Pool Type Devices [fast] vg Cwi---C--- 64.00m linear /dev/loop1(0) main vg Cwi---C--- 128.00m [main_corig] [fast] cache main_corig(0) [main_corig] vg owi---C--- 128.00m linear /dev/loop0(0) $ lvchange -ay vg/main $ dmsetup ls vg-fast_cdata (253:4) vg-fast_cmeta (253:5) vg-main_corig (253:6) vg-main (253:24) vg-fast (253:3) $ dmsetup table vg-fast_cdata: 0 98304 linear 253:3 32768 vg-fast_cmeta: 0 32768 linear 253:3 0 vg-main_corig: 0 262144 linear 7:0 2048 vg-main: 0 262144 cache 253:5 253:4 253:6 128 2 metadata2 writethrough mq 0 vg-fast: 0 131072 linear 7:1 2048 $ lvchange -an vg/min $ lvconvert --splitcache vg/main $ lvs -a vg LV VG Attr LSize Type Devices fast vg -wi------- 64.00m linear /dev/loop1(0) main vg -wi------- 128.00m linear /dev/loop0(0)
2018-08-17 23:45:52 +03:00
lvcreate -l 10 -n cp1 $vg
lvconvert -y --type cache-pool $vg/cp1
lvcreate -l 20 -n co1 $vg
lvconvert -y --type cache --cachepool cp1 $vg/co1
lvconvert -y -m +1 --type raid1 $vg/cp1_cmeta
check lv_field $vg/cp1_cmeta layout "raid,raid1"
check lv_field $vg/cp1_cmeta role "private,cache,pool,metadata"
lvconvert -y -m +1 --type raid1 $vg/cp1_cdata
check lv_field $vg/cp1_cdata layout "raid,raid1"
check lv_field $vg/cp1_cdata role "private,cache,pool,data"
sleep 5
lvs -a -o+devices $vg
not lvconvert -m -1 $vg/cp1_cmeta
lvconvert -y -m -1 $vg/cp1_cmeta
check lv_field $vg/cp1_cmeta layout "linear"
lvconvert -y -m -1 $vg/cp1_cdata
check lv_field $vg/cp1_cdata layout "linear"
2015-11-19 16:31:58 +03:00
lvremove -f $vg
vgremove -f $vg