mirror of
git://sourceware.org/git/lvm2.git
synced 2024-12-21 13:34:40 +03:00
c7851b9c34
It looks like there is some kernel bug/limitation that may cause invalid table load processing: dmsetup load LVMTEST-LV1 device-mapper: reload ioctl on LVMTEST-LV1 failed: Invalid argument md/raid:mdX: reshape_position too early for auto-recovery - aborting. md: pers->run() failed ... device-mapper: table: 253:38: raid: Failed to run raid array (-EINVAL) device-mapper: ioctl: error adding target to table However ATM there is not much we can do then make delays bigger. TODO: fixing md core...
104 lines
2.5 KiB
Bash
104 lines
2.5 KiB
Bash
#!/usr/bin/env bash
|
|
|
|
# Copyright (C) 2017 Red Hat, Inc. All rights reserved.
|
|
#
|
|
# This copyrighted material is made available to anyone wishing to use,
|
|
# modify, copy, or redistribute it subject to the terms and conditions
|
|
# of the GNU General Public License v.2.
|
|
#
|
|
# You should have received a copy of the GNU General Public License
|
|
# along with this program; if not, write to the Free Software Foundation,
|
|
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA2110-1301 USA
|
|
|
|
|
|
SKIP_WITH_LVMPOLLD=1
|
|
|
|
. lib/inittest
|
|
|
|
# Test reshaping under io load
|
|
|
|
which md5sum || skip
|
|
which mkfs.ext4 || skip
|
|
aux have_raid 1 14 || skip
|
|
|
|
mount_dir="mnt"
|
|
|
|
cleanup_mounted_and_teardown()
|
|
{
|
|
umount "$mount_dir" || true
|
|
aux teardown
|
|
}
|
|
|
|
checksum_()
|
|
{
|
|
md5sum "$1" | cut -f1 -d' '
|
|
}
|
|
|
|
aux prepare_pvs 16 32
|
|
|
|
get_devs
|
|
|
|
vgcreate $SHARED -s 1M "$vg" "${DEVICES[@]}"
|
|
|
|
trap 'cleanup_mounted_and_teardown' EXIT
|
|
|
|
# Create 10-way striped raid5 (11 legs total)
|
|
lvcreate --yes --type raid5_ls --stripesize 64K --stripes 10 -L4 -n$lv1 $vg
|
|
check lv_first_seg_field $vg/$lv1 segtype "raid5_ls"
|
|
check lv_first_seg_field $vg/$lv1 stripesize "64.00k"
|
|
check lv_first_seg_field $vg/$lv1 data_stripes 10
|
|
check lv_first_seg_field $vg/$lv1 stripes 11
|
|
wipefs -a "$DM_DEV_DIR/$vg/$lv1"
|
|
mkfs -t ext4 "$DM_DEV_DIR/$vg/$lv1"
|
|
|
|
mkdir -p "$mount_dir"
|
|
mount "$DM_DEV_DIR/$vg/$lv1" "$mount_dir"
|
|
|
|
echo 3 >/proc/sys/vm/drop_caches
|
|
# FIXME: This is filling up ram disk. Use sane amount of data please! Rate limit the data written!
|
|
dd if=/dev/urandom of="$mount_dir/random" bs=1M count=4 conv=fdatasync
|
|
checksum_ "$mount_dir/random" >MD5
|
|
|
|
# FIXME: wait_for_sync - is this really testing anything under load?
|
|
aux wait_for_sync $vg $lv1
|
|
aux delay_dev "$dev2" 0 200
|
|
|
|
# Reshape it to 15 data stripes
|
|
lvconvert --yes --stripes 15 $vg/$lv1
|
|
check lv_first_seg_field $vg/$lv1 segtype "raid5_ls"
|
|
check lv_first_seg_field $vg/$lv1 stripesize "64.00k"
|
|
check lv_first_seg_field $vg/$lv1 data_stripes 15
|
|
check lv_first_seg_field $vg/$lv1 stripes 16
|
|
|
|
# Reload table during reshape to test for data corruption
|
|
case "$(uname -r)" in
|
|
5.[89]*|5.1[012].*|3.10.0-862*|4.18.0-*.el8*)
|
|
should not echo "Skipping table reload test on on unfixed kernel!!!" ;;
|
|
*)
|
|
for i in {0..5}
|
|
do
|
|
dmsetup table $vg-$lv1|dmsetup load $vg-$lv1
|
|
dmsetup suspend --noflush $vg-$lv1
|
|
dmsetup resume $vg-$lv1
|
|
sleep .5
|
|
done
|
|
|
|
esac
|
|
|
|
aux delay_dev "$dev2" 0
|
|
|
|
kill -9 %% || true
|
|
wait
|
|
|
|
checksum_ "$mount_dir/random" >MD5_new
|
|
|
|
umount "$mount_dir"
|
|
|
|
fsck -fn "$DM_DEV_DIR/$vg/$lv1"
|
|
|
|
# Compare checksum is matching
|
|
cat MD5 MD5_new
|
|
diff MD5 MD5_new
|
|
|
|
vgremove -ff $vg
|