2017-07-02 22:38:32 +03:00
#!/usr/bin/env bash
2013-05-30 01:36:44 +04:00
# Copyright (C) 2013 Red Hat, Inc. All rights reserved.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions
# of the GNU General Public License v.2.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
2016-01-21 13:49:46 +03:00
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
2013-05-30 01:36:44 +04:00
# no automatic extensions please
2016-02-23 01:13:42 +03:00
SKIP_WITH_LVMLOCKD = 1
2015-10-27 17:10:06 +03:00
SKIP_WITH_LVMPOLLD = 1
2013-05-30 01:36:44 +04:00
2015-10-27 17:10:06 +03:00
. lib/inittest
2015-05-09 02:59:18 +03:00
2014-02-26 13:19:46 +04:00
MKFS = mkfs.ext2
which $MKFS || skip
2013-05-30 01:36:44 +04:00
fill( ) {
2014-06-17 15:35:40 +04:00
dd if = /dev/zero of = " $DM_DEV_DIR / ${ 2 :- $vg1 /lvol0 } " bs = $1 count = 1 oflag = direct || \
die " Snapshot does not fit $1 "
2013-05-30 01:36:44 +04:00
}
2013-10-12 01:59:14 +04:00
cleanup_tail( )
{
2014-03-17 16:08:23 +04:00
test -z " $SLEEP_PID " || kill $SLEEP_PID || true
2013-10-14 22:23:23 +04:00
wait
2014-03-17 16:08:23 +04:00
vgremove -ff $vg1 || true
vgremove -ff $vg
2013-10-12 01:59:14 +04:00
aux teardown
}
2013-11-16 02:47:14 +04:00
TSIZE = 15P
aux can_use_16T || TSIZE = 15T
2014-02-27 15:55:50 +04:00
# With different snapshot target driver we may obtain different results.
# Older targets have metadata leak bug which needs extra compenstion.
# Ancient targets do not even provide separate info for metadata.
EXPECT1 = "16.00k"
EXPECT2 = "512.00k"
EXPECT3 = "32.00k"
EXPECT4 = "66.67"
if aux target_at_least dm-snapshot 1 10 0 ; then
# Extra metadata size
EXPECT4 = "0.00"
if aux target_at_least dm-snapshot 1 12 0 ; then
# When fixed leak, expect smaller sizes
EXPECT1 = "12.00k"
EXPECT2 = "384.00k"
EXPECT3 = "28.00k"
fi
fi
2014-06-17 15:35:40 +04:00
aux prepare_pvs 1
2017-07-06 20:41:25 +03:00
get_devs
vgcreate -s 4M " $vg " " ${ DEVICES [@] } "
2014-06-17 15:35:40 +04:00
# Play with 1 extent
lvcreate -aey -l1 -n $lv $vg
# 100%LV is not supported for snapshot
2015-05-14 11:20:24 +03:00
fail lvcreate -s -l 100%LV -n snap $vg /$lv 2>& 1 | tee out
2014-10-20 16:56:55 +04:00
grep 'Please express size as %FREE, %ORIGIN, %PVS or %VG' out
2014-06-17 15:35:40 +04:00
# 100%ORIGIN needs to have enough space for all data and needs to round-up
lvcreate -s -l 100%ORIGIN -n $lv1 $vg /$lv
# everything needs to fit
fill 4M $vg /$lv1
lvremove -f $vg
2014-03-07 22:26:37 +04:00
# Automatically activates exclusively in cluster
2014-11-20 01:24:18 +03:00
lvcreate --type snapshot -s -l 100%FREE -n $lv $vg --virtualsize $TSIZE
2013-05-30 01:36:44 +04:00
2013-05-27 04:03:00 +04:00
aux extend_filter_LVMTEST
2013-05-30 01:36:44 +04:00
aux lvmconf "activation/snapshot_autoextend_percent = 20" \
"activation/snapshot_autoextend_threshold = 50"
2014-03-17 16:08:23 +04:00
# Check usability with smallest (1k) extent size ($lv has 15P)
2017-04-27 20:13:09 +03:00
pvcreate --yes --setphysicalvolumesize 4T " $DM_DEV_DIR / $vg / $lv "
2014-03-17 16:08:23 +04:00
trap 'cleanup_tail' EXIT
2014-02-23 02:08:55 +04:00
vgcreate -s 1K $vg1 " $DM_DEV_DIR / $vg / $lv "
2013-05-30 01:36:44 +04:00
2014-06-17 15:35:40 +04:00
# Play with small 1k 128 extents
lvcreate -aey -L128K -n $lv $vg1
# 100%ORIGIN needs to have enough space for all data
lvcreate -s -l 100%ORIGIN -n snap100 $vg1 /$lv
# everything needs to fit
fill 128k $vg1 /snap100
# 50%ORIGIN needs to have enough space for 50% of data
lvcreate -s -l 50%ORIGIN -n snap50 $vg1 /$lv
fill 64k $vg1 /snap50
lvcreate -s -l 25%ORIGIN -n snap25 $vg1 /$lv
fill 32k $vg1 /snap25
# Check we do not provide too much extra space
not fill 33k $vg1 /snap25
lvs -a $vg1
lvremove -f $vg1
2014-03-17 16:08:23 +04:00
# Test virtual snapshot over /dev/zero
2014-10-31 02:28:25 +03:00
lvcreate --type snapshot -V50 -L10 -n $lv1 -s $vg1
2014-03-17 16:08:23 +04:00
CHECK_ACTIVE = "active"
test ! -e LOCAL_CLVMD || CHECK_ACTIVE = "local exclusive"
2014-03-17 19:30:52 +04:00
check lv_field $vg1 /$lv1 lv_active " $CHECK_ACTIVE "
2013-10-12 01:59:14 +04:00
lvchange -an $vg1
2014-03-17 16:08:23 +04:00
# On cluster snapshot gets exclusive activation
2013-10-12 01:59:14 +04:00
lvchange -ay $vg1
2014-03-17 19:30:52 +04:00
check lv_field $vg1 /$lv1 lv_active " $CHECK_ACTIVE "
2013-10-12 01:59:14 +04:00
2014-06-02 11:38:10 +04:00
# Test removal of opened (but unmounted) snapshot (device busy) for a while
2016-12-23 01:31:22 +03:00
SLEEP_PID = $( aux hold_device_open $vg1 $lv1 60)
2014-02-15 16:02:41 +04:00
2013-10-12 01:59:14 +04:00
# Opened virtual snapshot device is not removable
# it should retry device removal for a few seconds
not lvremove -f $vg1 /$lv1
kill $SLEEP_PID
SLEEP_PID =
2013-10-14 22:23:23 +04:00
# Wait for killed task, so there is no device holder
wait
2013-10-12 01:59:14 +04:00
lvremove -f $vg1 /$lv1
2014-03-17 16:08:23 +04:00
check lv_not_exists $vg1 $lv1
2013-10-12 01:59:14 +04:00
2013-05-30 01:36:44 +04:00
# Check border size
lvcreate -aey -L4095G $vg1
lvcreate -s -L100K $vg1 /lvol0
fill 1K
check lv_field $vg1 /lvol1 data_percent "12.00"
2014-03-17 16:08:23 +04:00
2013-05-30 01:36:44 +04:00
lvremove -ff $vg1
2014-03-17 16:08:23 +04:00
# Create 1KB snapshot, does not need to be active here
2013-06-16 23:32:11 +04:00
lvcreate -an -Zn -l1 -n $lv1 $vg1
not lvcreate -s -l1 $vg1 /$lv1
not lvcreate -s -l3 $vg1 /$lv1
lvcreate -s -l30 -n $lv2 $vg1 /$lv1
2014-02-27 15:55:50 +04:00
check lv_field $vg1 /$lv2 size " $EXPECT1 "
2013-06-16 23:32:11 +04:00
not lvcreate -s -c512 -l512 $vg1 /$lv1
lvcreate -s -c128 -l1700 -n $lv3 $vg1 /$lv1
2013-05-30 01:36:44 +04:00
# 3 * 128
2014-02-27 15:55:50 +04:00
check lv_field $vg1 /$lv3 size " $EXPECT2 "
2013-05-30 01:36:44 +04:00
lvremove -ff $vg1
lvcreate -aey -l20 $vg1
lvcreate -s -l12 $vg1 /lvol0
# Fill 1KB -> 100% snapshot (1x 4KB chunk)
fill 1K
check lv_field $vg1 /lvol1 data_percent "100.00"
2015-10-26 09:38:23 +03:00
# Check it resizes 100% full valid snapshot to fit threshold
2013-05-30 01:36:44 +04:00
lvextend --use-policies $vg1 /lvol1
2015-10-26 09:38:23 +03:00
check lv_field $vg1 /lvol1 data_percent "50.00"
2013-05-30 01:36:44 +04:00
fill 4K
lvextend --use-policies $vg1 /lvol1
2015-10-26 09:38:23 +03:00
check lv_field $vg1 /lvol1 size "24.00k"
2013-05-30 01:36:44 +04:00
lvextend -l+33 $vg1 /lvol1
2014-02-27 15:55:50 +04:00
check lv_field $vg1 /lvol1 size " $EXPECT3 "
2013-05-30 01:36:44 +04:00
fill 20K
2014-05-13 12:28:55 +04:00
2014-02-26 13:19:46 +04:00
lvremove -f $vg1
# Check snapshot really deletes COW header for read-only snapshot
# Test needs special relation between chunk size and extent size
# This test expects extent size 1K
aux lvmconf "allocation/wipe_signatures_when_zeroing_new_lvs = 1"
lvcreate -aey -L4 -n $lv $vg1
lvcreate -c 8 -s -L1 -n snap $vg1 /$lv
# Populate snapshot
#dd if=/dev/urandom of="$DM_DEV_DIR/$vg1/$lv" bs=4096 count=10
$MKFS " $DM_DEV_DIR / $vg1 / $lv "
lvremove -f $vg1 /snap
# Undeleted header would trigger attempt to access
# beyond end of COW device
# Fails to create when chunk size is different
lvcreate -s -pr -l12 -n snap $vg1 /$lv
# When header is undelete, fails to read snapshot without read errors
#dd if="$DM_DEV_DIR/$vg1/snap" of=/dev/null bs=1M count=2
fsck -n " $DM_DEV_DIR / $vg1 /snap "
2014-02-26 14:16:08 +04:00
# This test would trigger read of weird percentage for undeleted header
# And since older snapshot target counts with metadata sectors
# we have 2 valid results (unsure about correct version number)
2014-02-27 15:55:50 +04:00
check lv_field $vg1 /snap data_percent " $EXPECT4 "
2014-02-26 13:19:46 +04:00
2014-03-17 16:08:23 +04:00
vgremove -ff $vg1