1
0
mirror of git://sourceware.org/git/lvm2.git synced 2024-12-22 17:35:59 +03:00
lvm2/test/shell/integrity-large.sh
David Teigland d9e8895a96 Allow dm-integrity to be used for raid images
dm-integrity stores checksums of the data written to an
LV, and returns an error if data read from the LV does
not match the previously saved checksum.  When used on
raid images, dm-raid will correct the error by reading
the block from another image, and the device user sees
no error.  The integrity metadata (checksums) are stored
on an internal LV allocated by lvm for each linear image.
The internal LV is allocated on the same PV as the image.

Create a raid LV with an integrity layer over each
raid image (for raid levels 1,4,5,6,10):

lvcreate --type raidN --raidintegrity y [options]

Add an integrity layer to images of an existing raid LV:

lvconvert --raidintegrity y LV

Remove the integrity layer from images of a raid LV:

lvconvert --raidintegrity n LV

Settings

Use --raidintegritymode journal|bitmap (journal is default)
to configure the method used by dm-integrity to ensure
crash consistency.

Initialization

When integrity is added to an LV, the kernel needs to
initialize the integrity metadata/checksums for all blocks
in the LV.  The data corruption checking performed by
dm-integrity will only operate on areas of the LV that
are already initialized.  The progress of integrity
initialization is reported by the "syncpercent" LV
reporting field (and under the Cpy%Sync lvs column.)

Example: create a raid1 LV with integrity:

$ lvcreate --type raid1 -m1 --raidintegrity y -n rr -L1G foo
  Creating integrity metadata LV rr_rimage_0_imeta with size 12.00 MiB.
  Logical volume "rr_rimage_0_imeta" created.
  Creating integrity metadata LV rr_rimage_1_imeta with size 12.00 MiB.
  Logical volume "rr_rimage_1_imeta" created.
  Logical volume "rr" created.
$ lvs -a foo
  LV                  VG  Attr       LSize  Origin              Cpy%Sync
  rr                  foo rwi-a-r---  1.00g                     4.93
  [rr_rimage_0]       foo gwi-aor---  1.00g [rr_rimage_0_iorig] 41.02
  [rr_rimage_0_imeta] foo ewi-ao---- 12.00m
  [rr_rimage_0_iorig] foo -wi-ao----  1.00g
  [rr_rimage_1]       foo gwi-aor---  1.00g [rr_rimage_1_iorig] 39.45
  [rr_rimage_1_imeta] foo ewi-ao---- 12.00m
  [rr_rimage_1_iorig] foo -wi-ao----  1.00g
  [rr_rmeta_0]        foo ewi-aor---  4.00m
  [rr_rmeta_1]        foo ewi-aor---  4.00m
2020-04-15 12:10:32 -05:00

176 lines
4.4 KiB
Bash

#!/usr/bin/env bash
# Copyright (C) 2018 Red Hat, Inc. All rights reserved.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions
# of the GNU General Public License v.2.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
# Test writecache usage
SKIP_WITH_LVMPOLLD=1
. lib/inittest
aux have_integrity 1 5 0 || skip
which mkfs.xfs || skip
mnt="mnt"
mkdir -p $mnt
# raid1 LV needs to be extended to 512MB to test imeta being exended
aux prepare_devs 4 600
for i in `seq 1 16384`; do echo -n "A" >> fileA; done
for i in `seq 1 16384`; do echo -n "B" >> fileB; done
for i in `seq 1 16384`; do echo -n "C" >> fileC; done
# generate random data
dd if=/dev/urandom of=randA bs=512K count=2
dd if=/dev/urandom of=randB bs=512K count=3
dd if=/dev/urandom of=randC bs=512K count=4
_prepare_vg() {
vgcreate $SHARED $vg "$dev1" "$dev2"
pvs
}
_add_data_to_lv() {
mkfs.xfs -f "$DM_DEV_DIR/$vg/$lv1"
mount "$DM_DEV_DIR/$vg/$lv1" $mnt
# add original data
cp randA $mnt
cp randB $mnt
cp randC $mnt
mkdir $mnt/1
cp fileA $mnt/1
cp fileB $mnt/1
cp fileC $mnt/1
mkdir $mnt/2
cp fileA $mnt/2
cp fileB $mnt/2
cp fileC $mnt/2
umount $mnt
}
_verify_data_on_lv() {
mount "$DM_DEV_DIR/$vg/$lv1" $mnt
diff randA $mnt/randA
diff randB $mnt/randB
diff randC $mnt/randC
diff fileA $mnt/1/fileA
diff fileB $mnt/1/fileB
diff fileC $mnt/1/fileC
diff fileA $mnt/2/fileA
diff fileB $mnt/2/fileB
diff fileC $mnt/2/fileC
umount $mnt
}
_sync_percent() {
local checklv=$1
get lv_field "$checklv" sync_percent | cut -d. -f1
}
_wait_recalc() {
local checklv=$1
for i in $(seq 1 10) ; do
sync=$(_sync_percent "$checklv")
echo "sync_percent is $sync"
if test "$sync" = "100"; then
return
fi
sleep 1
done
echo "timeout waiting for recalc"
return 1
}
# lvextend to 512MB is needed for the imeta LV to
# be extended from 4MB to 8MB.
_prepare_vg
lvcreate --type raid1 -m1 -n $lv1 -l 8 $vg
lvchange -an $vg/$lv1
lvchange -ay $vg/$lv1
_add_data_to_lv
lvconvert --raidintegrity y $vg/$lv1
_wait_recalc $vg/${lv1}_rimage_0
_wait_recalc $vg/${lv1}_rimage_1
lvs -a -o+devices $vg
_verify_data_on_lv
lvchange -an $vg/$lv1
lvextend -L 512M $vg/$lv1
lvs -a -o+devices $vg
lvchange -ay $vg/$lv1
_verify_data_on_lv
_wait_recalc $vg/${lv1}_rimage_0
_wait_recalc $vg/${lv1}_rimage_1
lvs -a -o+devices $vg
check lv_field $vg/${lv1}_rimage_0_imeta size "8.00m"
check lv_field $vg/${lv1}_rimage_1_imeta size "8.00m"
# provide space to extend the images onto new devs
vgextend $vg "$dev3" "$dev4"
# extending the images is possible using dev3,dev4
# but extending imeta on the existing dev1,dev2 fails
not lvextend -L +512M $vg/$lv1
# removing integrity will permit extending the images
# using dev3,dev4 since imeta limitation is gone
lvconvert --raidintegrity n $vg/$lv1
lvextend -L +512M $vg/$lv1
lvs -a -o+devices $vg
# adding integrity again will allocate new 12MB imeta LVs
# on dev3,dev4
lvconvert --raidintegrity y $vg/$lv1
_wait_recalc $vg/${lv1}_rimage_0
_wait_recalc $vg/${lv1}_rimage_1
lvs -a -o+devices $vg
check lv_field $vg/${lv1}_rimage_0_imeta size "12.00m"
check lv_field $vg/${lv1}_rimage_1_imeta size "12.00m"
lvchange -an $vg/$lv1
lvremove $vg/$lv1
# this succeeds because dev1,dev2 can hold rmeta+rimage
lvcreate --type raid1 -n $lv1 -L 592M -an $vg "$dev1" "$dev2"
# this fails because dev1,dev2 can hold rmeta+rimage, but not imeta
# and we require imeta to be on same devs as rmeta/rimeta
not lvcreate --type raid1 --raidintegrity y -n $lv1 -L 592M -an $vg "$dev1" "$dev2"
lvs -a -o+devices $vg
lvremove $vg/$lv1
# this can allocate from more devs so there's enough space for imeta to
# be allocated in the vg, but lvcreate fails because rmeta+rimage are
# allocated from dev1,dev2, we restrict imeta to being allocated on the
# same devs as rmeta/rimage, and dev1,dev2 can't fit imeta.
not lvcreate --type raid1 --raidintegrity y -n $lv1 -L 592M -an $vg
lvs -a -o+devices $vg
# counterintuitively, increasing the size will allow lvcreate to succeed
# because rmeta+rimage are pushed to being allocated on dev1,dev2,dev3,dev4
# which means imeta is now free to be allocated from dev3,dev4 which have
# plenty of space
lvcreate --type raid1 --raidintegrity y -n $lv1 -L 600M -an $vg
lvs -a -o+devices $vg
vgremove -ff $vg