1
0
mirror of git://sourceware.org/git/lvm2.git synced 2025-02-24 17:57:48 +03:00

tests: lowering disc usage

Correction for aux test result  ([] -> if;then;fi)

Use issue_discard to lower memory demands on discardable test devices
Use large devices directly through prepare_pvs

I'm still observing more then 0.5G of data usage through.

Particullary:

'lvcreate' followed by 'lvconvert' (which doesn't yet support --nosync
option)  is quite demanging, and resume returns quite 'late' when
a lot of data has been already written on PV.
This commit is contained in:
Zdenek Kabelac 2016-09-09 17:12:10 +02:00
parent 454b891f6d
commit e5ec348d68

View File

@ -21,27 +21,17 @@ aux can_use_16T || skip
aux have_raid 1 3 0 || skip
aux prepare_vg 5 32
# Prepare 5x ~1P sized devices
aux prepare_pvs 5 1000000000
# Fake 5 PiB volume group $vg1 via snapshot LVs
for device in "$lv1" "$lv2" "$lv3" "$lv4" "$lv5"
vgcreate $vg1 $(< DEVICES)
aux lvmconf 'devices/issue_discards = 1'
# Delay PVs so that resynchronization doesn't fill too much space
for device in $(< DEVICES)
do
lvcreate --type snapshot -s -l 20%FREE -n $device $vg --virtualsize 1P
done
#FIXME this should be 1024T
#check lv_field $vg/$lv size "128.00m"
aux extend_filter_LVMTEST
pvcreate "$DM_DEV_DIR"/$vg/$lv[12345]
vgcreate -s 2M $vg1 "$DM_DEV_DIR"/$vg/$lv[12345]
# Delay PVs so that resynchronization doesn't fill
# the snapshots before removal of the RaidLV
for device in "$dev1" "$dev2" "$dev3" "$dev4" "$dev5"
do
aux delay_dev "$device" 0 1
aux delay_dev "$device" 0 10 $(get first_extent_sector "$device")
done
# bz837927 START
@ -70,15 +60,30 @@ for segtype in raid4 raid5; do
lvremove -ff $vg1
done
# 750 TiB raid6 (with --nosync rejection check)
[ aux have_raid 1 9 0 ] && not lvcreate --type raid6 -i 3 -L 750T -n $lv1 $vg1 --nosync
#
# Extending large 200 TiB RAID LV to 400 TiB (belong in different script?)
#
lvcreate --type raid1 -m 1 -L 200T -n $lv1 $vg1 --nosync
check lv_field $vg1/$lv1 size "200.00t"
aux check_status_chars $vg1 $lv1 "AA"
lvextend -L +200T $vg1/$lv1
check lv_field $vg1/$lv1 size "400.00t"
aux check_status_chars $vg1 $lv1 "AA"
lvremove -ff $vg1
# Check --nosync is rejected for raid6
if aux have_raid 1 9 0 ; then
not lvcreate --type raid6 -i 3 -L 750T -n $lv1 $vg1 --nosync
fi
# 750 TiB raid6
lvcreate --type raid6 -i 3 -L 750T -n $lv1 $vg1
check lv_field $vg1/$lv1 size "750.00t"
aux check_status_chars $vg1 $lv1 "aaaaa"
lvremove -ff $vg1
# 1 PiB raid6 (with --nosync rejection check), then extend up to 2 PiB
[ aux have_raid 1 9 0 ] && not lvcreate --type raid6 -i 3 -L -L 1P -n $lv1 $vg1 --nosync
# 1 PiB raid6, then extend up to 2 PiB
lvcreate --type raid6 -i 3 -L 1P -n $lv1 $vg1
check lv_field $vg1/$lv1 size "1.00p"
aux check_status_chars $vg1 $lv1 "aaaaa"
@ -96,18 +101,6 @@ check lv_field $vg1/$lv1 size "200.00t"
aux check_status_chars $vg1 $lv1 "aa"
lvremove -ff $vg1
#
# Extending large 200 TiB RAID LV to 400 TiB (belong in different script?)
#
lvcreate --type raid1 -m 1 -L 200T -n $lv1 $vg1 --nosync
check lv_field $vg1/$lv1 size "200.00t"
aux check_status_chars $vg1 $lv1 "AA"
lvextend -L +200T $vg1/$lv1
check lv_field $vg1/$lv1 size "400.00t"
aux check_status_chars $vg1 $lv1 "AA"
lvremove -ff $vg1
# bz837927 END
vgremove -ff $vg1
vgremove -ff $vg