2013-06-10 13:28:47 +04:00
#!/usr/bin/env bash
2017-07-10 13:42:20 +03:00
# Copyright (C) 2011-2017 Red Hat, Inc. All rights reserved.
2011-01-05 03:16:18 +03:00
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions
# of the GNU General Public License v.2.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
2016-01-21 13:49:46 +03:00
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
2011-01-05 03:16:18 +03:00
. lib/utils
2017-07-16 19:27:03 +03:00
test -n " $BASH " && set -euE -o pipefail
2012-03-16 16:59:02 +04:00
run_valgrind( ) {
2012-03-20 14:51:57 +04:00
# Execute script which may use $TESTNAME for creating individual
2012-03-16 16:59:02 +04:00
# log files for each execute command
2014-10-01 10:19:05 +04:00
exec " ${ VALGRIND :- valgrind } " " $@ "
2012-03-16 16:59:02 +04:00
}
2012-05-16 14:43:41 +04:00
expect_failure( ) {
echo "TEST EXPECT FAILURE"
}
2017-07-10 13:43:49 +03:00
check_daemon_in_builddir( ) {
# skip if we don't have our own deamon...
if test -z " ${ installed_testsuite +varset } " ; then
2019-06-17 23:47:35 +03:00
( which " $1 " 2>/dev/null | grep " $abs_builddir " >/dev/null ) || skip " $1 is not in executed path. "
2017-07-10 13:43:49 +03:00
fi
rm -f debug.log strace.log
}
2016-02-23 22:58:22 +03:00
create_corosync_conf( ) {
2023-07-15 11:20:32 +03:00
local COROSYNC_CONF = "/etc/corosync/corosync.conf"
local COROSYNC_NODE = $( hostname)
2017-07-10 13:42:20 +03:00
2017-07-04 12:55:17 +03:00
if test -a " $COROSYNC_CONF " ; then
if ! grep "created by lvm test suite" " $COROSYNC_CONF " ; then
rm " $COROSYNC_CONF "
2016-02-23 22:58:22 +03:00
else
2017-07-04 12:55:17 +03:00
mv " $COROSYNC_CONF " " $COROSYNC_CONF .prelvmtest "
2016-02-23 22:58:22 +03:00
fi
fi
2017-07-04 12:55:17 +03:00
sed -e " s/@LOCAL_NODE@/ $COROSYNC_NODE / " lib/test-corosync-conf > " $COROSYNC_CONF "
2016-02-23 22:58:22 +03:00
echo " created new $COROSYNC_CONF "
}
create_dlm_conf( ) {
2023-07-15 11:20:32 +03:00
local DLM_CONF = "/etc/dlm/dlm.conf"
2017-07-04 12:55:17 +03:00
if test -a " $DLM_CONF " ; then
if ! grep "created by lvm test suite" " $DLM_CONF " ; then
rm " $DLM_CONF "
2016-02-23 22:58:22 +03:00
else
2017-07-04 12:55:17 +03:00
mv " $DLM_CONF " " $DLM_CONF .prelvmtest "
2016-02-23 22:58:22 +03:00
fi
fi
2023-07-12 14:58:50 +03:00
mkdir -p " $( dirname " $DLM_CONF " ) "
2017-07-04 12:55:17 +03:00
cp lib/test-dlm-conf " $DLM_CONF "
2016-02-23 22:58:22 +03:00
echo " created new $DLM_CONF "
}
prepare_dlm( ) {
2023-07-13 18:42:50 +03:00
pgrep dlm_controld && skip "Cannot run while existing dlm_controld process exists."
pgrep corosync && skip "Cannot run while existing corosync process exists."
2016-02-23 22:58:22 +03:00
create_corosync_conf
create_dlm_conf
systemctl start corosync
sleep 1
if ! pgrep corosync; then
2017-07-06 12:04:07 +03:00
echo "Failed to start corosync."
2016-02-23 22:58:22 +03:00
exit 1
fi
systemctl start dlm
sleep 1
if ! pgrep dlm_controld; then
2017-07-06 12:04:07 +03:00
echo "Failed to start dlm."
2016-02-23 22:58:22 +03:00
exit 1
fi
}
create_sanlock_conf( ) {
2023-07-15 11:20:32 +03:00
local SANLOCK_CONF = "/etc/sanlock/sanlock.conf"
2017-07-04 12:55:17 +03:00
if test -a " $SANLOCK_CONF " ; then
if ! grep "created by lvm test suite" " $SANLOCK_CONF " ; then
rm " $SANLOCK_CONF "
2016-02-23 22:58:22 +03:00
else
2017-07-04 12:55:17 +03:00
mv " $SANLOCK_CONF " " $SANLOCK_CONF .prelvmtest "
2016-02-23 22:58:22 +03:00
fi
fi
2023-07-13 18:42:50 +03:00
mkdir -p " $( dirname " $SANLOCK_CONF " ) "
2017-07-04 12:55:17 +03:00
cp lib/test-sanlock-conf " $SANLOCK_CONF "
2016-02-23 22:58:22 +03:00
echo " created new $SANLOCK_CONF "
}
prepare_sanlock( ) {
2023-07-13 18:42:50 +03:00
pgrep sanlock && skip "Cannot run while existing sanlock process exists"
2016-02-23 22:58:22 +03:00
create_sanlock_conf
systemctl start sanlock
if ! pgrep sanlock; then
echo "Failed to start sanlock"
exit 1
fi
}
2021-06-03 12:59:09 +03:00
prepare_idm( ) {
2023-07-13 18:42:50 +03:00
pgrep seagate_ilm && skip "Cannot run while existing seagate_ilm process exists"
2021-06-03 12:59:09 +03:00
seagate_ilm -D 0 -l 0 -L 7 -E 7 -S 7
if ! pgrep seagate_ilm; then
echo "Failed to start seagate_ilm"
exit 1
fi
}
2016-02-23 22:58:22 +03:00
prepare_lvmlockd( ) {
2023-07-13 18:42:50 +03:00
pgrep lvmlockd && skip "Cannot run while existing lvmlockd process exists"
2016-02-23 22:58:22 +03:00
if test -n " $LVM_TEST_LOCK_TYPE_SANLOCK " ; then
# make check_lvmlockd_sanlock
echo "starting lvmlockd for sanlock"
lvmlockd -o 2
elif test -n " $LVM_TEST_LOCK_TYPE_DLM " ; then
# make check_lvmlockd_dlm
echo "starting lvmlockd for dlm"
lvmlockd
2021-06-03 12:59:09 +03:00
elif test -n " $LVM_TEST_LOCK_TYPE_IDM " ; then
# make check_lvmlockd_idm
echo "starting lvmlockd for idm"
lvmlockd -g idm
2016-02-23 22:58:22 +03:00
elif test -n " $LVM_TEST_LVMLOCKD_TEST_DLM " ; then
# make check_lvmlockd_test
echo "starting lvmlockd --test (dlm)"
lvmlockd --test -g dlm
elif test -n " $LVM_TEST_LVMLOCKD_TEST_SANLOCK " ; then
# FIXME: add option for this combination of --test and sanlock
echo "starting lvmlockd --test (sanlock)"
lvmlockd --test -g sanlock -o 2
2021-06-03 12:59:09 +03:00
elif test -n " $LVM_TEST_LVMLOCKD_TEST_IDM " ; then
# make check_lvmlockd_test
echo "starting lvmlockd --test (idm)"
lvmlockd --test -g idm
2016-02-23 22:58:22 +03:00
else
echo "not starting lvmlockd"
exit 0
fi
sleep 1
2023-04-24 23:18:57 +03:00
if ! pgrep lvmlockd >LOCAL_LVMLOCKD; then
2016-02-23 22:58:22 +03:00
echo "Failed to start lvmlockd"
exit 1
fi
}
2011-01-05 03:16:18 +03:00
prepare_clvmd( ) {
2012-03-16 16:59:02 +04:00
test " ${ LVM_TEST_LOCKING :- 0 } " -ne 3 && return # not needed
2011-01-05 03:16:18 +03:00
if pgrep clvmd ; then
2017-07-10 13:43:49 +03:00
skip " Cannot use fake cluster locking with real clvmd ( $( pgrep clvmd) ) running. "
2011-01-05 03:16:18 +03:00
fi
2017-07-10 13:43:49 +03:00
check_daemon_in_builddir clvmd
2011-01-05 03:16:18 +03:00
2015-04-13 15:19:54 +03:00
test -e " $DM_DEV_DIR /control " || dmsetup table >/dev/null # create control node
2014-03-21 14:38:20 +04:00
# skip if singlenode is not compiled in
2019-06-17 23:47:35 +03:00
( clvmd --help 2>& 1 | grep "Available cluster managers" | grep "singlenode" >/dev/null) || \
2017-07-10 13:43:49 +03:00
skip "Compiled clvmd does not support singlenode for testing."
2011-01-05 03:16:18 +03:00
2012-03-16 16:59:02 +04:00
# lvmconf "activation/monitoring = 1"
2017-07-10 13:43:49 +03:00
local run_valgrind = ""
2014-03-09 01:53:23 +04:00
test " ${ LVM_VALGRIND_CLVMD :- 0 } " -eq 0 || run_valgrind = "run_valgrind"
2014-03-21 14:38:20 +04:00
rm -f " $CLVMD_PIDFILE "
2015-04-13 17:30:17 +03:00
echo "<======== Starting CLVMD ========>"
2017-07-10 13:43:49 +03:00
echo -n "## preparing clvmd..."
2015-04-30 22:51:43 +03:00
# lvs is executed from clvmd - use our version
2021-02-23 18:27:44 +03:00
LVM_LOG_FILE_EPOCH = CLVMD LVM_LOG_FILE_MAX_LINES = 1000000 $run_valgrind clvmd -Isinglenode -d 1 -f &
2014-03-21 14:38:20 +04:00
echo $! > LOCAL_CLVMD
2018-01-15 18:26:34 +03:00
for i in { 200..0} ; do
test " $i " -eq 0 && die "Startup of clvmd is too slow."
2017-07-10 13:43:49 +03:00
test -e " $CLVMD_PIDFILE " && test -e " ${ CLVMD_PIDFILE %/* } /lvm/clvmd.sock " && break
echo -n .
sleep .1
2014-03-21 14:38:20 +04:00
done
2017-07-10 13:43:49 +03:00
echo ok
2011-01-05 03:16:18 +03:00
}
prepare_dmeventd( ) {
if pgrep dmeventd ; then
2017-07-10 13:43:49 +03:00
skip " Cannot test dmeventd with real dmeventd ( $( pgrep dmeventd) ) running. "
2011-01-05 03:16:18 +03:00
fi
2017-07-10 13:43:49 +03:00
check_daemon_in_builddir dmeventd
2012-03-16 16:59:02 +04:00
lvmconf "activation/monitoring = 1"
2011-01-07 16:04:17 +03:00
2017-07-16 19:27:03 +03:00
local run_valgrind = ""
2014-03-09 02:19:26 +04:00
test " ${ LVM_VALGRIND_DMEVENTD :- 0 } " -eq 0 || run_valgrind = "run_valgrind"
2017-07-10 13:43:49 +03:00
echo -n "## preparing dmeventd..."
2016-07-04 18:32:58 +03:00
# LVM_LOG_FILE_EPOCH=DMEVENTD $run_valgrind dmeventd -fddddl "$@" 2>&1 &
2015-10-13 10:44:08 +03:00
LVM_LOG_FILE_EPOCH = DMEVENTD $run_valgrind dmeventd -fddddl " $@ " >debug.log_DMEVENTD_out 2>& 1 &
2012-03-16 16:59:02 +04:00
echo $! > LOCAL_DMEVENTD
2011-03-04 17:19:18 +03:00
# FIXME wait for pipe in /var/run instead
2018-01-15 18:26:34 +03:00
for i in { 200..0} ; do
test " $i " -eq 0 && die "Startup of dmeventd is too slow."
2014-03-14 14:12:46 +04:00
test -e " ${ DMEVENTD_PIDFILE } " && break
2017-07-10 13:43:49 +03:00
echo -n .
sleep .1
2014-03-14 14:12:46 +04:00
done
2014-03-09 02:19:26 +04:00
echo ok
2011-01-05 03:16:18 +03:00
}
2015-05-09 02:59:18 +03:00
prepare_lvmpolld( ) {
2021-04-01 12:33:40 +03:00
test -e LOCAL_LVMPOLLD || lvmconf "global/use_lvmpolld = 1"
2015-05-09 02:59:18 +03:00
2017-07-17 21:32:48 +03:00
local run_valgrind = ""
2015-05-09 02:59:18 +03:00
test " ${ LVM_VALGRIND_LVMPOLLD :- 0 } " -eq 0 || run_valgrind = "run_valgrind"
2017-06-29 01:02:19 +03:00
kill_sleep_kill_ LOCAL_LVMPOLLD " ${ LVM_VALGRIND_LVMPOLLD :- 0 } "
2015-05-09 02:59:18 +03:00
2017-07-10 13:43:49 +03:00
echo -n "## preparing lvmpolld..."
2015-05-09 02:59:18 +03:00
$run_valgrind lvmpolld -f " $@ " -s " $TESTDIR /lvmpolld.socket " -B " $TESTDIR /lib/lvm " -l all &
echo $! > LOCAL_LVMPOLLD
2018-01-15 18:26:34 +03:00
for i in { 200..0} ; do
2017-07-10 13:43:49 +03:00
test -e " $TESTDIR /lvmpolld.socket " && break
2023-09-13 01:39:33 +03:00
echo -n .
sleep .1
2017-07-10 13:43:49 +03:00
done # wait for the socket
2021-04-01 12:33:40 +03:00
test " $i " -gt 0 || die "Startup of lvmpolld is too slow."
2015-05-09 02:59:18 +03:00
echo ok
}
lvmpolld_talk( ) {
local use = nc
if type -p socat >& /dev/null; then
use = socat
elif echo | not nc -U " $TESTDIR /lvmpolld.socket " ; then
echo "WARNING: Neither socat nor nc -U seems to be available." 1>& 2
2017-07-06 12:04:07 +03:00
echo "## failed to contact lvmpolld."
2015-05-09 02:59:18 +03:00
return 1
fi
if test " $use " = nc ; then
nc -U " $TESTDIR /lvmpolld.socket "
else
socat " unix-connect: $TESTDIR /lvmpolld.socket " -
fi | tee -a lvmpolld-talk.txt
}
lvmpolld_dump( ) {
( echo 'request="dump"' ; echo '##' ) | lvmpolld_talk " $@ "
}
2016-03-01 13:49:05 +03:00
prepare_lvmdbusd( ) {
2023-02-16 18:50:07 +03:00
local lvmdbusdebug =
2017-07-06 12:04:07 +03:00
local daemon
2016-03-01 13:49:05 +03:00
rm -f debug.log_LVMDBUSD_out
2016-03-23 13:56:25 +03:00
kill_sleep_kill_ LOCAL_LVMDBUSD 0
2023-09-04 19:43:12 +03:00
# FIXME: This is not correct! Daemon is auto started.
2017-07-06 12:04:07 +03:00
echo -n "## checking lvmdbusd is NOT running..."
2017-12-12 14:14:15 +03:00
if pgrep -f -l lvmdbusd | grep python3 || pgrep -x -l lvmdbusd ; then
2017-07-06 12:04:07 +03:00
skip "Cannot run lvmdbusd while existing lvmdbusd process exists"
2016-03-01 13:49:05 +03:00
fi
echo ok
# skip if we don't have our own lvmdbusd...
2017-12-12 16:18:27 +03:00
echo -n "## find lvmdbusd to use..."
2016-03-23 13:56:25 +03:00
if test -z " ${ installed_testsuite +varset } " ; then
# NOTE: this is always present - additional checks are needed:
daemon = " $abs_top_builddir /daemons/lvmdbusd/lvmdbusd "
2017-12-12 16:18:27 +03:00
if test -x " $daemon " || chmod ugo+x " $daemon " ; then
echo " $daemon "
else
2017-12-12 14:14:15 +03:00
echo " Failed to make ' $daemon ' executable " >& 2
return 1
fi
2016-03-23 13:56:25 +03:00
# Setup the python path so we can run
2017-06-30 13:07:30 +03:00
export PYTHONPATH = " $abs_top_builddir /daemons "
2016-03-23 13:56:25 +03:00
else
2016-11-28 19:36:43 +03:00
daemon = $( which lvmdbusd || :)
2017-12-12 16:18:27 +03:00
echo " $daemon "
2016-03-23 13:56:25 +03:00
fi
2017-07-06 12:04:07 +03:00
test -x " $daemon " || skip "The lvmdbusd daemon is missing"
2016-03-23 13:45:21 +03:00
which python3 >/dev/null || skip "Missing python3"
2016-10-05 12:48:03 +03:00
2016-03-23 13:45:21 +03:00
python3 -c "import pyudev, dbus, gi.repository" || skip "Missing python modules"
2023-02-23 18:25:10 +03:00
python3 -c "from json.decoder import JSONDecodeError" || skip "Python json module is missing JSONDecodeError"
2016-03-23 13:45:21 +03:00
2016-03-22 11:15:40 +03:00
# Copy the needed file to run on the system bus if it doesn't
# already exist
if [ ! -f /etc/dbus-1/system.d/com.redhat.lvmdbus1.conf ] ; then
2017-06-29 01:02:19 +03:00
install -m 644 " $abs_top_builddir /scripts/com.redhat.lvmdbus1.conf " /etc/dbus-1/system.d/
2016-03-22 11:15:40 +03:00
fi
2017-07-06 12:04:07 +03:00
echo "## preparing lvmdbusd..."
2016-10-05 12:48:03 +03:00
lvmconf "global/notify_dbus = 1"
2023-02-16 18:50:07 +03:00
test " ${ LVM_DEBUG_LVMDBUS :- 0 } " != "0" && lvmdbusdebug = "--debug"
2023-09-04 19:43:12 +03:00
# Currently do not interfere with lvmdbusd testing of the file logging
unset LVM_LOG_FILE_EPOCH
unset LVM_LOG_FILE_MAX_LINES
unset LVM_EXPECTED_EXIT_STATUS
2023-02-16 18:50:07 +03:00
" $daemon " $lvmdbusdebug > debug.log_LVMDBUSD_out 2>& 1 &
2016-03-01 13:49:05 +03:00
local pid = $!
sleep 1
2017-07-06 12:04:07 +03:00
echo -n "## checking lvmdbusd IS running..."
2017-12-12 14:14:15 +03:00
comm =
2016-03-01 13:49:05 +03:00
# TODO: Is there a better check than wait 1 second and check pid?
2017-12-12 14:14:15 +03:00
if ! comm = $( ps -p $pid -o comm = ) >/dev/null || [ [ $comm != lvmdbusd ] ] ; then
2016-03-01 13:49:05 +03:00
echo "Failed to start lvmdbusd daemon"
return 1
fi
2017-07-07 00:58:28 +03:00
echo " $pid " > LOCAL_LVMDBUSD
2016-03-01 13:49:05 +03:00
echo ok
}
2016-06-13 15:35:09 +03:00
#
# Temporary solution to create some occupied thin metadata
# This heavily depends on thin metadata output format to stay as is.
# Currently it expects 2MB thin metadata and 200MB data volume size
# Argument specifies how many devices should be created.
#
prepare_thin_metadata( ) {
local devices = $1
local transaction_id = ${ 2 :- 0 }
local data_block_size = ${ 3 :- 128 }
local nr_data_blocks = ${ 4 :- 3200 }
local i
2017-06-29 01:02:19 +03:00
echo '<superblock uuid="" time="1" transaction="' " $transaction_id " '" data_block_size="' " $data_block_size " '" nr_data_blocks="' " $nr_data_blocks " '">'
for i in $( seq 1 " $devices " )
2016-06-13 15:35:09 +03:00
do
2017-06-29 01:02:19 +03:00
echo ' <device dev_id="' " $i " '" mapped_blocks="37" transaction="' " $i " '" creation_time="0" snap_time="1">'
2016-06-13 15:35:09 +03:00
echo ' <range_mapping origin_begin="0" data_begin="0" length="37" time="0"/>'
echo ' </device>'
done
echo "</superblock>"
}
2012-03-20 14:51:57 +04:00
teardown_devs_prefixed( ) {
local prefix = $1
local stray = ${ 2 :- 0 }
local IFS = $IFS_NL
2018-03-08 19:32:47 +03:00
local once = 1
2012-03-20 14:51:57 +04:00
local dm
2012-03-16 16:59:02 +04:00
2018-03-23 18:58:48 +03:00
rm -rf " ${ TESTDIR : ? } /dev/ $prefix * "
2015-04-29 14:37:13 +03:00
2018-03-08 19:32:47 +03:00
# Send idle message to frozen raids (with hope to unfreeze them)
2018-03-23 18:59:51 +03:00
for dm in $( dm_status | grep -E " $prefix .*raid.*frozen " ) ; do
2018-03-08 19:32:47 +03:00
echo " ## unfreezing: dmsetup message \" ${ dm % : * } \" "
dmsetup message " ${ dm % : * } " 0 "idle" &
done
2012-03-20 14:51:57 +04:00
# Resume suspended devices first
2018-03-08 19:32:47 +03:00
for dm in $( dm_info name -S " name=~ $PREFIX &&suspended=Suspended " ) ; do
test " $dm " != "No devices found" || break
echo " ## resuming: dmsetup resume \" $dm \" "
dmsetup clear " $dm "
dmsetup resume " $dm " &
2012-03-20 14:51:57 +04:00
done
2011-06-29 22:14:08 +04:00
2012-11-29 20:39:36 +04:00
wait
2023-02-09 18:04:54 +03:00
local mounts
mounts = ( $( grep " $prefix " /proc/mounts | cut -d' ' -f1) ) || true
2012-03-20 14:51:57 +04:00
if test ${# mounts [@] } -gt 0; then
2017-07-06 12:04:07 +03:00
test " $stray " -eq 0 || echo " ## removing stray mounted devices containing $prefix : " " ${ mounts [@] } "
2012-03-20 14:51:57 +04:00
if umount -fl " ${ mounts [@] } " ; then
udev_wait
fi
fi
2012-03-16 16:59:02 +04:00
2012-03-20 14:51:57 +04:00
# Remove devices, start with closed (sorted by open count)
2017-02-21 02:49:49 +03:00
# Run 'dmsetup remove' in parallel
2017-02-21 11:40:21 +03:00
rm -f REMOVE_FAILED
#local listdevs=( $(dm_info name,open --sort open,name | grep "$prefix.*:0") )
#dmsetup remove --deferred ${listdevs[@]%%:0} || touch REMOVE_FAILED
2017-07-14 14:22:15 +03:00
# 2nd. loop is trying --force removal which can possibly 'unstuck' some bloked operations
for i in 0 1; do
test " $i " = 1 && test " $stray " = 0 && break # no stray device removal
2023-09-13 01:39:33 +03:00
local progress = 1
2017-07-14 14:22:15 +03:00
while :; do
local sortby = "name"
2012-03-20 14:51:57 +04:00
2017-02-21 02:49:49 +03:00
# HACK: sort also by minors - so we try to close 'possibly later' created device first
2017-07-14 14:22:15 +03:00
test " $i " = 0 || sortby = "-minor"
2023-09-02 22:36:34 +03:00
for dm in $( dm_info name,open --separator ';' --nameprefixes --unquoted --sort open," $sortby " -S " name=~ $prefix || uuid=~ $prefix " --mangle none || true ) ; do
2018-03-08 19:32:47 +03:00
test " $dm " != "No devices found" || break 2
2023-05-06 23:38:58 +03:00
eval " $dm "
2019-12-10 15:30:11 +03:00
local force = "-f"
2017-07-14 14:22:15 +03:00
if test " $i " = 0; then
2018-03-08 19:32:47 +03:00
if test " $once " = 1 ; then
once = 0
echo " ## removing stray mapped devices with names beginning with $prefix : "
fi
test " $DM_OPEN " = 0 || break # stop loop with 1st. opened device
2019-12-10 15:30:11 +03:00
force = ""
2017-07-14 14:22:15 +03:00
fi
2018-03-08 19:32:47 +03:00
2019-12-10 15:30:11 +03:00
# Succesfull 'remove' signals progress
dmsetup remove $force " $DM_NAME " --mangle none && progress = 1
2018-03-08 19:32:47 +03:00
done
test " $i " = 0 || break
2019-12-10 15:30:11 +03:00
test " $progress " = 1 || break
2018-03-08 19:32:47 +03:00
2023-09-14 00:27:55 +03:00
sleep .1
2017-07-14 14:22:15 +03:00
udev_wait
wait
2023-09-13 01:39:33 +03:00
progress = 0
2018-03-08 19:32:47 +03:00
done # looping till there are some removed devices
2017-07-14 14:22:15 +03:00
done
2012-03-20 14:51:57 +04:00
}
teardown_devs( ) {
# Delete any remaining dm/udev semaphores
teardown_udev_cookies
2023-04-25 16:07:25 +03:00
restore_dm_mirror
2012-03-20 14:51:57 +04:00
2015-04-29 14:37:13 +03:00
test ! -f MD_DEV || cleanup_md_dev
2023-09-02 22:36:34 +03:00
if [ -f DEVICES ] || [ -f RAMDISK ] || [ -f SCSI_DEBUG_DEV ] ; then
teardown_devs_prefixed " $PREFIX "
fi
2023-05-06 23:38:58 +03:00
if test -f RAMDISK ; then
for i in 1 2 ; do
2023-09-02 22:36:34 +03:00
modprobe -r brd && { rm -f RAMDISK ; break ; }
2023-05-06 23:38:58 +03:00
sleep .1
udev_wait
done
fi
2011-01-05 03:16:18 +03:00
# NOTE: SCSI_DEBUG_DEV test must come before the LOOP test because
# prepare_scsi_debug_dev() also sets LOOP to short-circuit prepare_loop()
if test -f SCSI_DEBUG_DEV; then
2017-07-14 21:23:02 +03:00
udev_wait
2023-04-24 16:26:40 +03:00
test " ${ LVM_TEST_PARALLEL :- 0 } " -eq 1 || {
2023-05-06 23:38:58 +03:00
for i in 1 2 ; do
2023-09-02 22:36:34 +03:00
modprobe -r scsi_debug && { rm -f SCSI_DEBUG_DEV ; break ; }
2023-05-06 23:38:58 +03:00
sleep .1
udev_wait
done
2023-04-24 16:26:40 +03:00
}
2011-01-05 03:16:18 +03:00
else
2017-06-29 01:02:19 +03:00
test ! -f LOOP || losetup -d " $( < LOOP) " || true
test ! -f LOOPFILE || rm -f " $( < LOOPFILE) "
2011-01-05 03:16:18 +03:00
fi
2015-04-29 14:37:13 +03:00
2015-02-17 17:53:24 +03:00
not diff LOOP BACKING_DEV >/dev/null 2>& 1 || rm -f BACKING_DEV
2018-05-07 12:35:35 +03:00
rm -f DEVICES LOOP RAMDISK
2011-06-29 21:33:39 +04:00
2011-06-30 13:15:53 +04:00
# Attempt to remove any loop devices that failed to get torn down if earlier tests aborted
2017-07-18 14:14:02 +03:00
test " ${ LVM_TEST_PARALLEL :- 0 } " -eq 1 || test -z " $COMMON_PREFIX " || {
2023-02-09 18:04:54 +03:00
local stray_loops
stray_loops = ( $( losetup -a | grep " $COMMON_PREFIX " | cut -d: -f1) ) || true
2012-03-20 14:51:57 +04:00
test ${# stray_loops [@] } -eq 0 || {
2015-04-29 14:37:13 +03:00
teardown_devs_prefixed " $COMMON_PREFIX " 1
2017-07-06 12:04:07 +03:00
echo " ## removing stray loop devices containing $COMMON_PREFIX : " " ${ stray_loops [@] } "
2017-06-29 01:02:19 +03:00
for i in " ${ stray_loops [@] } " ; do test ! -b " $i " || losetup -d " $i " || true ; done
2015-04-29 14:37:13 +03:00
# Leave test when udev processed all removed devices
udev_wait
2012-03-20 14:51:57 +04:00
}
2011-06-30 13:15:53 +04:00
}
2011-01-05 03:16:18 +03:00
}
2014-03-10 01:55:11 +04:00
kill_sleep_kill_( ) {
2017-07-19 13:15:39 +03:00
local pidfile = $1
local slow = $2
2017-06-29 01:02:19 +03:00
if test -s " $pidfile " ; then
pid = $( < " $pidfile " )
rm -f " $pidfile "
kill -TERM " $pid " 2>/dev/null || return 0
2021-03-23 00:05:22 +03:00
for i in { 0..10} ; do
ps " $pid " >/dev/null || return 0
if test " $slow " -eq 0 ; then sleep .2 ; else sleep 1 ; fi
kill -KILL " $pid " 2>/dev/null || true
2014-10-01 10:53:27 +04:00
done
2014-03-10 01:55:11 +04:00
fi
}
2015-04-27 11:33:45 +03:00
print_procs_by_tag_( ) {
2015-04-30 19:42:31 +03:00
( ps -o pid,args ehax | grep -we" LVM_TEST_TAG= ${ 1 :- kill_me_ $PREFIX } " ) || true
2015-04-27 11:33:45 +03:00
}
count_processes_with_tag( ) {
print_procs_by_tag_ | wc -l
2015-04-03 11:41:33 +03:00
}
2015-04-30 19:42:31 +03:00
kill_tagged_processes( ) {
local pid
local wait
# read uses all vars within pipe subshell
2017-07-12 02:14:36 +03:00
local pids = ( )
while read -r pid wait; do
2015-04-30 19:42:31 +03:00
if test -n " $pid " ; then
2017-07-06 12:04:07 +03:00
echo " ## killing tagged process: $pid ${ wait : 0 : 120 } ... "
2017-06-29 01:02:19 +03:00
kill -TERM " $pid " 2>/dev/null || true
2015-04-30 19:42:31 +03:00
fi
2017-07-12 02:14:36 +03:00
pids += ( " $pid " )
done < <( print_procs_by_tag_ " $@ " )
2017-06-29 22:00:17 +03:00
2017-07-17 16:01:44 +03:00
test ${# pids [@] } -eq 0 && return
2015-04-30 19:42:31 +03:00
# wait if process exited and eventually -KILL
wait = 0
2017-07-12 02:14:36 +03:00
for pid in " ${ pids [@] } " ; do
2017-06-29 01:02:19 +03:00
while ps " $pid " > /dev/null && test " $wait " -le 10; do
2015-04-30 19:42:31 +03:00
sleep .2
2017-06-29 01:17:59 +03:00
wait = $(( wait + 1 ))
2015-04-30 19:42:31 +03:00
done
2017-06-29 01:02:19 +03:00
test " $wait " -le 10 || kill -KILL " $pid " 2>/dev/null || true
2015-04-30 19:42:31 +03:00
done
2015-04-03 11:41:33 +03:00
}
2011-01-05 03:16:18 +03:00
teardown( ) {
2017-07-16 19:27:03 +03:00
local TEST_LEAKED_DEVICES = ""
2012-03-16 16:59:02 +04:00
echo -n "## teardown..."
2015-04-29 14:37:13 +03:00
unset LVM_LOG_FILE_EPOCH
2014-03-10 01:55:11 +04:00
2015-04-22 12:03:31 +03:00
if test -f TESTNAME ; then
2016-12-12 13:22:10 +03:00
if test ! -f SKIP_THIS_TEST ; then
# Evaluate left devices only for non-skipped tests
2020-10-02 20:19:30 +03:00
TEST_LEAKED_DEVICES = $( dmsetup table | grep " $PREFIX " | \
grep -Ev " ${ PREFIX } (pv|[0-9]) " | \
grep -v " $( cat ERR_DEV_NAME 2>/dev/null) " | \
grep -v " $( cat ZERO_DEV_NAME 2>/dev/null) " ) || true
2016-12-12 13:22:10 +03:00
fi
2016-12-11 12:25:15 +03:00
2015-04-30 19:42:31 +03:00
kill_tagged_processes
2015-04-03 11:41:33 +03:00
2016-02-23 22:58:22 +03:00
if test -n " $LVM_TEST_LVMLOCKD_TEST " ; then
echo ""
2017-07-06 12:04:07 +03:00
echo "## stopping lvmlockd in teardown"
2023-04-24 23:18:57 +03:00
kill_sleep_kill_ LOCAL_LVMLOCKD 0
2016-02-23 22:58:22 +03:00
fi
2017-07-01 10:41:14 +03:00
dm_table | not grep -E -q " $vg | $vg1 | $vg2 | $vg3 | $vg4 " || {
2012-03-23 13:41:20 +04:00
# Avoid activation of dmeventd if there is no pid
2012-03-28 15:10:08 +04:00
cfg = $( test -s LOCAL_DMEVENTD || echo "--config activation{monitoring=0}" )
2019-06-17 23:47:35 +03:00
if dm_info suspended,name | grep " ^Suspended:.* $PREFIX " >/dev/null ; then
2017-07-06 12:04:07 +03:00
echo "## skipping vgremove, suspended devices detected."
2015-11-24 01:17:54 +03:00
else
2017-06-29 01:02:19 +03:00
vgremove -ff " $cfg " \
" $vg " " $vg1 " " $vg2 " " $vg3 " " $vg4 " & >/dev/null || rm -f debug.log strace.log
2015-11-24 01:17:54 +03:00
fi
2012-03-23 13:41:20 +04:00
}
2016-03-01 13:49:05 +03:00
kill_sleep_kill_ LOCAL_LVMDBUSD 0
echo -n .
2017-06-29 01:02:19 +03:00
kill_sleep_kill_ LOCAL_LVMPOLLD " ${ LVM_VALGRIND_LVMPOLLD :- 0 } "
2015-05-09 02:59:18 +03:00
echo -n .
2017-06-29 01:02:19 +03:00
kill_sleep_kill_ LOCAL_CLVMD " ${ LVM_VALGRIND_CLVMD :- 0 } "
2012-03-16 16:59:02 +04:00
echo -n .
2011-01-05 03:16:18 +03:00
2017-06-29 01:02:19 +03:00
kill_sleep_kill_ LOCAL_DMEVENTD " ${ LVM_VALGRIND_DMEVENTD :- 0 } "
2011-01-05 03:16:18 +03:00
2012-03-16 16:59:02 +04:00
echo -n .
2011-01-05 03:16:18 +03:00
2012-03-16 16:59:02 +04:00
test -d " $DM_DEV_DIR /mapper " && teardown_devs
2011-01-05 03:16:18 +03:00
2012-03-16 16:59:02 +04:00
echo -n .
2011-01-05 03:16:18 +03:00
2015-04-22 12:03:31 +03:00
fi
2016-12-11 12:25:15 +03:00
test -z " $TEST_LEAKED_DEVICES " || {
2017-07-06 12:04:07 +03:00
echo "## unexpected devices left dm table:"
2016-12-11 12:25:15 +03:00
echo " $TEST_LEAKED_DEVICES "
return 1
}
2017-07-12 14:38:07 +03:00
if test " ${ LVM_TEST_PARALLEL :- 0 } " = 0 && test -z " $RUNNING_DMEVENTD " ; then
not pgrep dmeventd & >/dev/null # printed in STACKTRACE
fi
echo -n .
2012-03-16 16:59:02 +04:00
test -n " $TESTDIR " && {
2017-07-01 10:27:19 +03:00
cd " $TESTOLDPWD " || die " Failed to enter $TESTOLDPWD "
2017-07-12 14:38:07 +03:00
# after this delete no further write is possible
2018-05-12 01:05:27 +03:00
rm -rf " ${ TESTDIR : ? } " || echo BLA
2012-03-16 16:59:02 +04:00
}
2011-01-05 03:16:18 +03:00
2021-03-24 02:39:01 +03:00
# Remove any dangling symlink in /dev/disk (our tests can confuse udev)
2021-03-24 17:52:14 +03:00
test -d /dev/disk && {
2021-09-21 19:02:50 +03:00
find /dev/disk -type l ! -exec /usr/bin/test -e { } \; -print0 | xargs -0 rm -f || true
2021-03-24 17:52:14 +03:00
}
2021-03-24 02:39:01 +03:00
# Remove any metadata archives and backups from this test on system
2021-04-23 00:22:01 +03:00
rm -f /etc/lvm/archive/" ${ PREFIX } " * /etc/lvm/backup/" ${ PREFIX } " *
2021-03-24 02:39:01 +03:00
2012-03-16 16:59:02 +04:00
echo "ok"
2011-01-05 03:16:18 +03:00
}
prepare_loop( ) {
2018-05-07 12:35:35 +03:00
local size = $1
2017-07-17 16:01:44 +03:00
shift # all other params are directly passed to all 'losetup' calls
2012-03-20 14:51:57 +04:00
local i
local slash
2011-01-05 03:16:18 +03:00
2014-06-30 23:00:08 +04:00
test -f LOOP && LOOP = $( < LOOP)
2012-03-16 16:59:02 +04:00
echo -n "## preparing loop device..."
2011-01-05 03:16:18 +03:00
# skip if prepare_scsi_debug_dev() was used
2023-02-09 18:04:54 +03:00
if test -f SCSI_DEBUG_DEV && test -f LOOP ; then
2012-03-16 16:59:02 +04:00
echo "(skipped)"
2011-01-05 03:16:18 +03:00
return 0
fi
test ! -e LOOP
test -n " $DM_DEV_DIR "
for i in 0 1 2 3 4 5 6 7; do
2012-03-16 16:59:02 +04:00
test -e " $DM_DEV_DIR /loop $i " || mknod " $DM_DEV_DIR /loop $i " b 7 $i
2011-01-05 03:16:18 +03:00
done
2012-03-16 16:59:02 +04:00
echo -n .
2011-01-05 03:16:18 +03:00
2012-03-16 16:59:02 +04:00
local LOOPFILE = " $PWD /test.img "
2015-03-26 17:01:54 +03:00
rm -f " $LOOPFILE "
2017-06-29 01:17:59 +03:00
dd if = /dev/zero of = " $LOOPFILE " bs = $(( 1024 * 1024 )) count = 0 seek = $(( size + 1 )) 2> /dev/null
2017-07-17 16:01:44 +03:00
if LOOP = $( losetup " $@ " -s -f " $LOOPFILE " 2>/dev/null) ; then
2011-01-05 03:16:18 +03:00
:
2017-07-17 16:01:44 +03:00
elif LOOP = $( losetup -f) && losetup " $@ " " $LOOP " " $LOOPFILE " ; then
2011-01-05 03:16:18 +03:00
# no -s support
:
else
2012-03-16 16:59:02 +04:00
# no -f support
2011-01-05 03:16:18 +03:00
# Iterate through $DM_DEV_DIR/loop{,/}{0,1,2,3,4,5,6,7}
for slash in '' /; do
for i in 0 1 2 3 4 5 6 7; do
2012-03-16 16:59:02 +04:00
local dev = " $DM_DEV_DIR /loop $slash $i "
! losetup " $dev " >/dev/null 2>& 1 || continue
2011-01-05 03:16:18 +03:00
# got a free
2017-07-17 16:01:44 +03:00
losetup " $@ " " $dev " " $LOOPFILE "
2011-01-05 03:16:18 +03:00
LOOP = $dev
break
done
2012-03-16 16:59:02 +04:00
test -z " $LOOP " || break
2011-01-05 03:16:18 +03:00
done
fi
test -n " $LOOP " # confirm or fail
2023-04-23 01:02:33 +03:00
touch NO_BLKDISCARD_Z # loop devices do not support WRITE_ZEROS
2017-07-07 00:58:28 +03:00
BACKING_DEV = $LOOP
2012-03-16 16:59:02 +04:00
echo " $LOOP " > LOOP
2014-06-27 02:44:53 +04:00
echo " $LOOP " > BACKING_DEV
2012-03-16 16:59:02 +04:00
echo " ok ( $LOOP ) "
2011-01-05 03:16:18 +03:00
}
2018-05-07 12:35:35 +03:00
prepare_ramdisk( ) {
local size = $1
2023-04-25 00:46:50 +03:00
# if brd is unused, remove and use for test
modprobe -r brd || return 0
2018-05-07 12:35:35 +03:00
echo -n "## preparing ramdisk device..."
2023-05-06 23:38:58 +03:00
modprobe brd rd_size = $(( size * 1024 )) rd_nr = 1 || return
2018-05-07 12:35:35 +03:00
BACKING_DEV = /dev/ram0
echo " ok ( $BACKING_DEV ) "
touch RAMDISK
}
2020-11-17 01:05:39 +03:00
prepare_real_devs( ) {
aux lvmconf 'devices/scan = "/dev"'
touch REAL_DEVICES
if test -n " $LVM_TEST_DEVICE_LIST " ; then
local count = 0
while read path; do
2023-02-09 18:04:54 +03:00
REAL_DEVICES[ count] = $path
2020-11-17 01:05:39 +03:00
count = $(( count + 1 ))
aux extend_filter " a| $path | "
dd if = /dev/zero of = " $path " bs = 32k count = 1
wipefs -a " $path " 2>/dev/null || true
2021-04-23 00:22:01 +03:00
done < " $LVM_TEST_DEVICE_LIST "
2020-11-17 01:05:39 +03:00
fi
printf "%s\\n" " ${ REAL_DEVICES [@] } " > REAL_DEVICES
}
2011-01-05 03:16:18 +03:00
# A drop-in replacement for prepare_loop() that uses scsi_debug to create
# a ramdisk-based SCSI device upon which all LVM devices will be created
# - scripts must take care not to use a DEV_SIZE that will enduce OOM-killer
2012-03-16 16:59:02 +04:00
prepare_scsi_debug_dev( ) {
local DEV_SIZE = $1
2017-07-18 14:14:02 +03:00
shift # rest of params directly passed to modprobe
2015-05-04 11:14:52 +03:00
local DEBUG_DEV
2012-03-16 16:59:02 +04:00
2015-04-07 15:56:16 +03:00
rm -f debug.log strace.log
2014-04-24 16:40:15 +04:00
test ! -f "SCSI_DEBUG_DEV" || return 0
2017-07-16 21:41:13 +03:00
test ! -f LOOP
2012-03-16 16:59:02 +04:00
test -n " $DM_DEV_DIR "
# Skip test if scsi_debug module is unavailable or is already in use
modprobe --dry-run scsi_debug || skip
2019-06-17 23:47:35 +03:00
lsmod | not grep scsi_debug >/dev/null || skip
2012-03-16 16:59:02 +04:00
# Create the scsi_debug device and determine the new scsi device's name
# NOTE: it will _never_ make sense to pass num_tgts param;
# last param wins.. so num_tgts=1 is imposed
2015-04-07 15:56:16 +03:00
touch SCSI_DEBUG_DEV
2021-03-18 15:32:48 +03:00
modprobe scsi_debug dev_size_mb = " $(( DEV_SIZE + 2 )) " " $@ " num_tgts = 1 || skip
2017-07-14 14:22:15 +03:00
2015-05-04 11:14:52 +03:00
for i in { 1..20} ; do
sleep .1 # allow for async Linux SCSI device registration
2017-12-13 18:17:56 +03:00
DEBUG_DEV = " /dev/ $( grep -H scsi_debug /sys/block/sd*/device/model | cut -f4 -d /) "
test -b " $DEBUG_DEV " && break
done
2012-03-16 16:59:02 +04:00
test -b " $DEBUG_DEV " || return 1 # should not happen
# Create symlink to scsi_debug device in $DM_DEV_DIR
2017-06-29 01:02:19 +03:00
SCSI_DEBUG_DEV = " $DM_DEV_DIR / $( basename " $DEBUG_DEV " ) "
2012-03-16 16:59:02 +04:00
echo " $SCSI_DEBUG_DEV " > SCSI_DEBUG_DEV
2014-09-30 18:50:31 +04:00
echo " $SCSI_DEBUG_DEV " > BACKING_DEV
2012-03-16 16:59:02 +04:00
# Setting $LOOP provides means for prepare_devs() override
2014-09-30 18:50:31 +04:00
test " $DEBUG_DEV " = " $SCSI_DEBUG_DEV " || ln -snf " $DEBUG_DEV " " $SCSI_DEBUG_DEV "
2011-01-05 03:16:18 +03:00
}
2012-03-16 16:59:02 +04:00
cleanup_scsi_debug_dev( ) {
2012-03-16 23:08:09 +04:00
teardown_devs
2023-09-02 22:36:34 +03:00
rm -f LOOP
2011-01-05 03:16:18 +03:00
}
2021-03-22 20:54:54 +03:00
mdadm_create( ) {
2021-03-23 03:16:21 +03:00
local devid
2021-03-22 20:54:54 +03:00
local mddev
2021-03-23 03:16:21 +03:00
which mdadm >/dev/null || skip "mdadm tool is missing!"
2021-03-22 20:54:54 +03:00
cleanup_md_dev
rm -f debug.log strace.log
2021-03-23 03:16:21 +03:00
# try to find free MD node
# using the old naming /dev/mdXXX
# if we need more MD arrays test suite more likely leaked them
for devid in { 127..150} ; do
2021-03-23 11:48:03 +03:00
grep -q " md ${ devid } " /proc/mdstat || break
2021-03-23 03:16:21 +03:00
done
test " $devid " -lt "150" || skip "Cannot find free /dev/mdXXX node!"
mddev = /dev/md${ devid }
2021-03-22 20:54:54 +03:00
mdadm --create " $mddev " " $@ " || {
# Some older 'mdadm' version managed to open and close devices internaly
# and reporting non-exclusive access on such device
# let's just skip the test if this happens.
# Note: It's pretty complex to get rid of consequences
# the following sequence avoid leaks on f19
# TODO: maybe try here to recreate few times....
mdadm --stop " $mddev " || true
udev_wait
while [ " $# " -ne 0 ] ; do
case " $1 " in
*" $PREFIX " *) mdadm --zero-superblock " $1 " || true ; ;
esac
shift
done
udev_wait
skip "Test skipped, unreliable mdadm detected!"
}
for i in { 10..0} ; do
test -e " $mddev " && break
echo " Waiting for $mddev . "
sleep .5
done
2021-03-23 03:16:21 +03:00
test -b " $mddev " || skip "mdadm has not created device!"
echo " $mddev " > MD_DEV
2021-03-22 20:54:54 +03:00
# LVM/DM will see this device
case " $DM_DEV_DIR " in
2021-03-23 03:16:21 +03:00
"/dev" ) echo " $mddev " > MD_DEV_PV ; ;
*) rm -f " $DM_DEV_DIR /md ${ devid } "
cp -LR " $mddev " " $DM_DEV_DIR "
echo " ${ DM_DEV_DIR } /md ${ devid } " > MD_DEV_PV ; ;
2021-03-22 20:54:54 +03:00
esac
rm -f MD_DEVICES
while [ " $# " -ne 0 ] ; do
case " $1 " in
*" $PREFIX " *) echo " $1 " >> MD_DEVICES ; ;
esac
shift
done
}
2021-03-26 18:05:42 +03:00
mdadm_assemble( ) {
STRACE =
[ " $DM_DEV_DIR " = "/dev" ] && mdadm -V 2>& 1 | grep " v3.2" && {
# use this 'trick' to slow down mdadm which otherwise
# is racing with udev rule since mdadm internally
# opens and closes raid leg devices in RW mode and then
# tries to get exlusive access to the leg device during
# insertion to kernel and fails during assembly
# There can be some other affected version of mdadm.
STRACE = "strace -f -o /dev/null"
}
$STRACE mdadm --assemble " $@ "
udev_wait
}
2015-04-25 01:37:27 +03:00
cleanup_md_dev( ) {
local IFS = $IFS_NL
2021-03-23 03:16:21 +03:00
local i
2017-06-29 13:07:34 +03:00
local dev
2021-03-23 03:16:21 +03:00
local base
2017-07-20 13:15:06 +03:00
local mddev
2021-03-23 03:16:21 +03:00
test -f MD_DEV || return 0
2017-07-20 13:15:06 +03:00
mddev = $( < MD_DEV)
2021-03-23 03:16:21 +03:00
base = $( basename " $mddev " )
2021-03-22 20:54:54 +03:00
2021-03-23 23:28:28 +03:00
# try to find and remove any DM device on top of cleaned MD
# assume /dev/mdXXX is 9:MINOR
local minor = ${ mddev ##/dev/md }
2021-04-23 00:22:01 +03:00
for i in $( dmsetup table | grep 9:" $minor " | cut -d: -f1) ; do
dmsetup remove " $i " || {
dmsetup --force remove " $i " || true
2021-03-23 23:28:28 +03:00
}
done
2021-03-23 03:16:21 +03:00
for i in { 0..10} ; do
grep -q " $base " /proc/mdstat || break
test " $i " = 0 || {
sleep .1
echo " $mddev is still present, stopping again "
2021-03-23 23:28:28 +03:00
cat /proc/mdstat
2021-03-23 03:16:21 +03:00
}
2021-03-22 20:54:54 +03:00
mdadm --stop " $mddev " || true
udev_wait # wait till events are process, not zeroing to early
done
2021-03-23 03:16:21 +03:00
2021-03-23 11:48:03 +03:00
test " $DM_DEV_DIR " = "/dev" || rm -f " $( < MD_DEV_PV) "
2021-03-23 03:16:21 +03:00
2015-04-25 01:37:27 +03:00
for dev in $( < MD_DEVICES) ; do
2021-03-23 14:05:55 +03:00
mdadm --zero-superblock " $dev " 2>/dev/null || true
2015-04-25 01:37:27 +03:00
done
udev_wait
rm -f MD_DEV MD_DEVICES MD_DEV_PV
}
2019-10-02 22:11:08 +03:00
wipefs_a( ) {
2023-05-06 21:36:28 +03:00
local have_wipefs =
2020-11-24 02:15:25 +03:00
2023-05-06 21:36:28 +03:00
if test -e HAVE_WIPEFS; then
2021-03-23 11:48:03 +03:00
have_wipefs = $( < HAVE_WIPEFS)
else
2023-05-06 21:36:28 +03:00
wipefs -V >HAVE_WIPEFS 2>/dev/null && have_wipefs = yes
2021-03-23 11:48:03 +03:00
fi
2021-03-23 16:57:10 +03:00
udev_wait
2020-11-24 02:15:25 +03:00
2023-05-06 21:36:28 +03:00
for dev in " $@ " ; do
if test -n " $LVM_TEST_DEVICES_FILE " ; then
lvmdevices --deldev " $dev " || true
fi
if test -n " $have_wipefs " ; then
wipefs -a " $dev " || {
echo " $dev : device in-use, retrying wipe again. "
sleep .1
udev_wait
wipefs -a " $dev "
}
else
dd if = /dev/zero of = " $dev " bs = 4096 count = 8 oflag = direct >/dev/null || true
mdadm --zero-superblock " $dev " 2>/dev/null || true
fi
if test -n " $LVM_TEST_DEVICES_FILE " ; then
lvmdevices --adddev " $dev " || true
fi
done
2021-03-09 02:19:26 +03:00
2021-03-11 01:36:03 +03:00
udev_wait
2019-10-02 22:11:08 +03:00
}
2021-06-03 12:59:11 +03:00
cleanup_idm_context( ) {
local dev = $1
if [ -n " $LVM_TEST_LOCK_TYPE_IDM " ] ; then
2023-02-09 18:04:54 +03:00
sg_dev = $( sg_map26 " ${ dev } " )
2021-06-03 12:59:11 +03:00
echo " Cleanup IDM context for drive ${ dev } ( $sg_dev ) "
2023-02-09 18:04:54 +03:00
sg_raw -v -r 512 -o idm_tmp_data.bin " $sg_dev " \
2021-06-03 12:59:11 +03:00
88 00 01 00 00 00 00 20 FF 01 00 00 00 01 00 00
2023-02-09 18:04:54 +03:00
sg_raw -v -s 512 -i idm_tmp_data.bin " $sg_dev " \
2021-06-03 12:59:11 +03:00
8E 00 FF 00 00 00 00 00 00 00 00 00 00 01 00 00
2023-02-09 18:04:54 +03:00
rm idm_tmp_data.bin
2021-06-03 12:59:11 +03:00
fi
}
2023-04-21 02:35:31 +03:00
#
# clear device either with blkdiscard -z or fallback to 'dd'
# $1 device_path
# TODO: add support for parametrized [OPTION] usage (Not usable ATM)
# TODO: -bs blocksize (defaults 512K)
# TODO: -count count/length (defaults to whole device, otherwise in BS units)
2023-04-21 23:01:40 +03:00
# TODO: -seek offset/seek (defaults 0, begining of zeroing area in BS unit)
2023-04-21 02:35:31 +03:00
clear_devs( ) {
local bs =
local count =
local seek =
while [ " $# " -ne 0 ] ; do
case " $1 " in
"" ) ; ;
"--bs" ) bs = $2 ; shift ; ;
"--count" ) count = $2 ; shift ; ;
"--seek" ) seek = $2 ; shift ; ;
*TEST*) # Protection: only test devices with TEST in its path name can be zeroed
test -e NO_BLKDISCARD_Z || {
if blkdiscard -f -z " $1 " ; then
shift
continue
fi
echo "Info: can't use 'blkdiscard -z' switch to 'dd'."
touch NO_BLKDISCARD_Z
}
dd if = /dev/zero of = " $1 " bs = 512K oflag = direct $seek $count || true
; ;
esac
shift
done
}
2023-04-21 02:43:13 +03:00
#
# corrupt device content
# $1 file_path
# $2 string/pattern search for curruption
# $3 string/pattern replacing/corruptiong
corrupt_dev( ) {
local a
# search for string on a file
# Note: returned string may possibly start with other ASCII chars
# a[0] is position in file, a[1] is the actual string
a = ( $( strings -t d -n 64 " $1 " | grep -m 1 " $2 " ) ) || true
test -n " ${ a [0]- } " || return 0
# Seek for the sequence and replace it with corruption pattern
2023-07-17 17:24:22 +03:00
echo -n " ${ a [1]/ $2 / $3 } " | LANG = C dd of = " $1 " bs = 1 seek = " ${ a [0] } " conv = fdatasync
2023-04-21 02:43:13 +03:00
}
2014-06-27 02:44:53 +04:00
prepare_backing_dev( ) {
2018-05-07 12:35:35 +03:00
local size = ${ 1 =32 }
shift
2021-06-03 12:59:10 +03:00
if test -n " $LVM_TEST_BACKING_DEVICE " ; then
IFS = ',' read -r -a BACKING_DEVICE_ARRAY <<< " $LVM_TEST_BACKING_DEVICE "
for d in " ${ BACKING_DEVICE_ARRAY [@] } " ; do
if test ! -b " $d " ; then
echo " Device $d doesn't exist! "
return 1
fi
done
fi
2015-04-03 15:22:29 +03:00
if test -f BACKING_DEV; then
2014-09-30 18:50:31 +04:00
BACKING_DEV = $( < BACKING_DEV)
2018-05-07 12:35:35 +03:00
return 0
2021-06-03 12:59:10 +03:00
elif test -n " $LVM_TEST_BACKING_DEVICE " ; then
BACKING_DEV = ${ BACKING_DEVICE_ARRAY [0] }
2014-06-27 02:44:53 +04:00
echo " $BACKING_DEV " > BACKING_DEV
2018-05-07 12:35:35 +03:00
return 0
2018-05-15 17:07:13 +03:00
elif test " ${ LVM_TEST_PREFER_BRD -1 } " = "1" && \
test ! -d /sys/block/ram0 && \
2018-05-17 15:40:17 +03:00
kernel_at_least 4 16 0 && \
2018-05-15 17:07:13 +03:00
test " $size " -lt 16384; then
2018-05-07 12:35:35 +03:00
# try to use ramdisk if possible, but for
# big allocs (>16G) do not try to use ramdisk
2018-05-15 17:07:13 +03:00
# Also we can't use BRD device prior kernel 4.16
# since they were DAX based and lvm2 often relies
# in save table loading between exiting backend device
# and bio-based 'error' device.
# However with request based DAX brd device we get this:
# device-mapper: ioctl: can't change device type after initial table load.
2018-05-07 12:35:35 +03:00
prepare_ramdisk " $size " " $@ " && return
echo "(failed)"
2014-06-27 02:44:53 +04:00
fi
2018-05-07 12:35:35 +03:00
prepare_loop " $size " " $@ "
2014-06-27 02:44:53 +04:00
}
2011-01-05 03:16:18 +03:00
prepare_devs( ) {
2012-03-16 16:59:02 +04:00
local n = ${ 1 :- 3 }
local devsize = ${ 2 :- 34 }
local pvname = ${ 3 :- pv }
2021-03-18 15:32:48 +03:00
local header_shift = 1 # shift header from begin & end of device by 1MiB
2011-01-05 03:16:18 +03:00
2015-03-05 23:00:44 +03:00
# sanlock requires more space for the internal sanlock lv
# This could probably be lower, but what are the units?
if test -n " $LVM_TEST_LOCK_TYPE_SANLOCK " ; then
2015-07-10 15:59:15 +03:00
devsize = 1024
2015-03-05 23:00:44 +03:00
fi
2015-04-29 14:37:13 +03:00
touch DEVICES
2021-03-18 15:32:48 +03:00
prepare_backing_dev $(( n * devsize + 2 * header_shift ))
2018-05-07 12:56:00 +03:00
blkdiscard " $BACKING_DEV " 2>/dev/null || true
2012-03-16 16:59:02 +04:00
echo -n " ## preparing $n devices... "
2011-01-05 03:16:18 +03:00
2017-06-29 01:17:59 +03:00
local size = $(( devsize * 2048 )) # sectors
2014-06-04 15:46:19 +04:00
local count = 0
2017-02-21 02:49:49 +03:00
rm -f CREATE_FAILED
2011-01-05 03:16:18 +03:00
init_udev_transaction
2017-06-29 01:02:19 +03:00
for i in $( seq 1 " $n " ) ; do
2011-01-05 03:16:18 +03:00
local name = " ${ PREFIX } $pvname $i "
local dev = " $DM_DEV_DIR /mapper/ $name "
2023-02-09 18:04:54 +03:00
DEVICES[ count] = $dev
2017-06-29 01:17:59 +03:00
count = $(( count + 1 ))
2021-06-03 12:59:10 +03:00
# If the backing device number can meet the requirement for PV devices,
# then allocate a dedicated backing device for PV; otherwise, rollback
# to use single backing device for device-mapper.
2023-02-09 18:04:54 +03:00
if [ -n " $LVM_TEST_BACKING_DEVICE " ] && [ " $n " -le ${# BACKING_DEVICE_ARRAY [@] } ] ; then
2021-06-03 12:59:10 +03:00
echo 0 $size linear " ${ BACKING_DEVICE_ARRAY [ $(( count - 1 )) ] } " $(( header_shift * 2048 )) > " $name .table "
else
echo 0 $size linear " $BACKING_DEV " $(( ( i - 1 ) * size + ( header_shift * 2048 ) )) > " $name .table "
fi
2017-02-21 02:49:49 +03:00
dmsetup create -u " TEST- $name " " $name " " $name .table " || touch CREATE_FAILED &
test -f CREATE_FAILED && break;
2011-01-05 03:16:18 +03:00
done
2017-02-21 02:49:49 +03:00
wait
2011-01-05 03:16:18 +03:00
finish_udev_transaction
2019-12-10 15:30:55 +03:00
if test -f CREATE_FAILED ; then
if test -z " $LVM_TEST_BACKING_DEVICE " ; then
echo "failed"
return 1
fi
2017-02-21 02:49:49 +03:00
LVM_TEST_BACKING_DEVICE =
rm -f BACKING_DEV CREATE_FAILED
prepare_devs " $@ "
return $?
fi
2021-06-03 12:59:11 +03:00
if [ -n " $LVM_TEST_BACKING_DEVICE " ] ; then
for d in " ${ BACKING_DEVICE_ARRAY [@] } " ; do
2023-02-09 18:04:54 +03:00
cnt = $(( $( blockdev --getsize64 " $d " ) / 1024 / 1024 ))
2021-06-03 12:59:11 +03:00
cnt = $(( cnt < 1000 ? cnt : 1000 ))
dd if = /dev/zero of = " $d " bs = 1MB count = $cnt
wipefs -a " $d " 2>/dev/null || true
cleanup_idm_context " $d "
done
fi
2021-06-03 12:59:10 +03:00
2014-06-27 02:44:53 +04:00
# non-ephemeral devices need to be cleared between tests
2018-05-07 12:35:35 +03:00
test -f LOOP -o -f RAMDISK || for d in " ${ DEVICES [@] } " ; do
2015-04-28 12:32:52 +03:00
# ensure disk header is always zeroed
2015-05-27 11:43:24 +03:00
dd if = /dev/zero of = " $d " bs = 32k count = 1
wipefs -a " $d " 2>/dev/null || true
2014-06-27 02:44:53 +04:00
done
2021-03-09 02:19:26 +03:00
if test -n " $LVM_TEST_DEVICES_FILE " ; then
2021-04-23 00:22:01 +03:00
mkdir -p " $TESTDIR /etc/lvm/devices " || true
rm " $TESTDIR /etc/lvm/devices/system.devices " || true
2021-08-19 00:23:48 +03:00
touch " $TESTDIR /etc/lvm/devices/system.devices "
2021-03-09 02:19:26 +03:00
for d in " ${ DEVICES [@] } " ; do
2021-08-19 00:23:48 +03:00
lvmdevices --adddev " $d " || true
2021-03-09 02:19:26 +03:00
done
fi
2020-11-24 02:15:25 +03:00
2011-01-05 03:16:18 +03:00
#for i in `seq 1 $n`; do
# local name="${PREFIX}$pvname$i"
# dmsetup info -c $name
#done
#for i in `seq 1 $n`; do
# local name="${PREFIX}$pvname$i"
# dmsetup table $name
#done
2018-03-23 19:02:44 +03:00
printf "%s\\n" " ${ DEVICES [@] } " > DEVICES
2014-06-04 15:46:19 +04:00
# ( IFS=$'\n'; echo "${DEVICES[*]}" ) >DEVICES
2012-03-16 16:59:02 +04:00
echo "ok"
2011-01-05 03:16:18 +03:00
}
2015-04-03 15:22:29 +03:00
common_dev_( ) {
local tgtype = $1
2017-07-17 21:32:48 +03:00
local dev = $2
local name = ${ dev ##*/ }
shift 2
local read_ms = ${ 1 -0 }
local write_ms = ${ 2 -0 }
2015-04-03 15:22:29 +03:00
case " $tgtype " in
delay)
2017-07-17 21:32:48 +03:00
test " $read_ms " -eq 0 && test " $write_ms " -eq 0 && {
# zero delay is just equivalent to 'enable_dev'
enable_dev " $dev "
return
}
shift 2
; ;
2020-09-04 14:50:05 +03:00
delayzero)
shift 2
# zero delay is just equivalent to 'zero_dev'
test " $read_ms " -eq 0 && test " $write_ms " -eq 0 && tgtype = "zero"
; ;
2017-07-17 21:32:48 +03:00
# error|zero target does not take read_ms & write_ms only offset list
2015-04-03 15:22:29 +03:00
esac
2013-08-30 16:50:58 +04:00
local pos
local size
local type
local pvdev
local offset
2017-06-29 21:50:06 +03:00
read -r pos size type pvdev offset < " $name .table "
2013-08-30 16:50:58 +04:00
2017-07-17 21:32:48 +03:00
for fromlen in " ${ @-0 : } " ; do
2015-04-03 15:22:29 +03:00
from = ${ fromlen %% : * }
len = ${ fromlen ##* : }
2017-11-28 14:03:35 +03:00
if test " $len " = " $fromlen " ; then
# Missing the colon at the end: empty len
len =
fi
2017-06-29 01:17:59 +03:00
test -n " $len " || len = $(( size - from ))
diff = $(( from - pos ))
2015-04-03 15:22:29 +03:00
if test $diff -gt 0 ; then
2017-06-29 01:17:59 +03:00
echo " $pos $diff $type $pvdev $(( pos + offset )) "
pos = $(( pos + diff ))
2015-04-03 15:22:29 +03:00
elif test $diff -lt 0 ; then
die "Position error"
fi
case " $tgtype " in
delay)
2017-06-29 01:17:59 +03:00
echo " $from $len delay $pvdev $(( pos + offset )) $read_ms $pvdev $(( pos + offset )) $write_ms " ; ;
2020-01-20 15:15:46 +03:00
writeerror)
echo " $from $len delay $pvdev $(( pos + offset )) 0 $( cat ERR_DEV) 0 0 " ; ;
2020-09-04 14:50:05 +03:00
delayzero)
echo " $from $len delay $( cat ZERO_DEV) 0 $read_ms $( cat ZERO_DEV) 0 $write_ms " ; ;
2017-02-22 17:16:20 +03:00
error| zero)
echo " $from $len $tgtype " ; ;
2015-04-03 15:22:29 +03:00
esac
2017-06-29 01:17:59 +03:00
pos = $(( pos + len ))
2015-04-03 15:22:29 +03:00
done > " $name .devtable "
2017-06-29 01:17:59 +03:00
diff = $(( size - pos ))
test " $diff " -gt 0 && echo " $pos $diff $type $pvdev $(( pos + offset )) " >>" $name .devtable "
2015-04-03 15:22:29 +03:00
2017-07-17 21:32:48 +03:00
restore_from_devtable " $dev "
2013-08-30 16:50:58 +04:00
}
2015-04-03 15:22:29 +03:00
# Replace linear PV device with its 'delayed' version
# Could be used to more deterministicaly hit some problems.
2017-11-28 14:03:35 +03:00
# Parameters: {device path} [read delay ms] [write delay ms] [offset[:[size]]]...
2015-04-03 15:22:29 +03:00
# Original device is restored when both delay params are 0 (or missing).
# If the size is missing, the remaing portion of device is taken
# i.e. delay_dev "$dev1" 0 200 256:
delay_dev( ) {
2015-04-30 23:21:18 +03:00
if test ! -f HAVE_DM_DELAY ; then
2015-11-09 14:14:42 +03:00
target_at_least dm-delay 1 1 0 || return 0
2017-07-10 14:01:33 +03:00
touch HAVE_DM_DELAY
2015-04-30 23:21:18 +03:00
fi
2015-04-03 15:22:29 +03:00
common_dev_ delay " $@ "
}
2011-01-05 03:16:18 +03:00
disable_dev( ) {
2012-03-20 14:51:57 +04:00
local dev
2017-07-16 19:27:03 +03:00
local silent = ""
local error = ""
local notify = ""
2014-05-26 16:37:45 +04:00
while test -n " $1 " ; do
if test " $1 " = "--silent" ; then
silent = 1
shift
elif test " $1 " = "--error" ; then
error = 1
shift
else
break
fi
done
2012-03-20 14:51:57 +04:00
2013-08-14 17:57:52 +04:00
udev_wait
2011-01-05 03:16:18 +03:00
for dev in " $@ " ; do
2014-02-28 14:07:56 +04:00
maj = $(( $( stat -L --printf= 0x%t " $dev " ) ))
min = $(( $( stat -L --printf= 0x%T " $dev " ) ))
2012-03-16 16:59:02 +04:00
echo " Disabling device $dev ( $maj : $min ) "
2014-05-26 16:37:45 +04:00
notify = " $notify $maj : $min "
if test -n " $error " ; then
2015-04-08 23:43:52 +03:00
echo 0 10000000 error | dmsetup load " $dev "
dmsetup resume " $dev "
2014-05-26 16:37:45 +04:00
else
dmsetup remove -f " $dev " 2>/dev/null || true
fi
done
2011-01-05 03:16:18 +03:00
}
enable_dev( ) {
2012-03-20 14:51:57 +04:00
local dev
2017-07-16 19:27:03 +03:00
local silent = ""
2014-05-26 16:37:45 +04:00
if test " $1 " = "--silent" ; then
silent = 1
shift
fi
2012-03-20 14:51:57 +04:00
2014-09-30 19:09:26 +04:00
rm -f debug.log strace.log
2011-01-05 03:16:18 +03:00
init_udev_transaction
for dev in " $@ " ; do
2017-07-17 21:32:48 +03:00
local name = ${ dev ##*/ }
2013-12-17 16:55:19 +04:00
dmsetup create -u " TEST- $name " " $name " " $name .table " 2>/dev/null || \
2012-03-16 16:59:02 +04:00
dmsetup load " $name " " $name .table "
2011-10-23 19:43:10 +04:00
# using device name (since device path does not exists yes with udev)
2012-03-16 16:59:02 +04:00
dmsetup resume " $name "
2011-01-05 03:16:18 +03:00
done
finish_udev_transaction
}
2021-03-28 14:29:44 +03:00
# Try to remove list of DM device from table
remove_dm_devs( ) {
local remove = ( " $@ " )
local held
local i
for i in { 1..50} ; do
held = ( )
for d in " ${ remove [@] } " ; do
dmsetup remove " $d " 2>/dev/null || {
dmsetup info -c " $d " 2>/dev/null && {
held += ( " $d " )
dmsetup status " $d "
}
}
done
2021-04-01 12:33:40 +03:00
test ${# held [@] } -eq 0 && {
rm -f debug.log*
return
}
2021-03-28 14:29:44 +03:00
remove = ( " ${ held [@] } " )
done
2021-09-21 19:02:50 +03:00
die " Can't remove device(s) ${ held [*] } "
2021-03-28 14:29:44 +03:00
}
2018-05-07 12:39:28 +03:00
# Throttle down performance of kcopyd when mirroring i.e. disk image
throttle_sys = "/sys/module/dm_mirror/parameters/raid1_resync_throttle"
throttle_dm_mirror( ) {
2022-09-21 15:44:05 +03:00
# if the kernel config file is present, validate whether the kernel uses HZ_1000
# and return failure for this 'throttling' when it does NOT as without this setting
# whole throttling is pointless on modern hardware
2023-02-09 18:04:54 +03:00
local kconfig
kconfig = " /boot/config- $( uname -r) "
2022-09-21 15:44:05 +03:00
if test -e " $kconfig " ; then
grep -q "CONFIG_HZ_1000=y" " $kconfig " 2>/dev/null || {
echo " WARNING: CONFIG_HZ_1000=y is NOT set in $kconfig -> throttling is unusable "
return 1
}
fi
2018-05-15 22:53:05 +03:00
test -e " $throttle_sys " || return
2018-05-07 12:39:28 +03:00
test -f THROTTLE || cat " $throttle_sys " > THROTTLE
2023-02-09 18:04:54 +03:00
echo " ${ 1 -1 } " > " $throttle_sys "
2018-05-07 12:39:28 +03:00
}
# Restore original kcopyd throttle value and have mirroring fast again
restore_dm_mirror( ) {
test ! -f THROTTLE || {
cat THROTTLE > " $throttle_sys "
rm -f THROTTLE
}
}
2017-01-04 18:02:08 +03:00
# Once there is $name.devtable
# this is a quick way to restore to this table entry
restore_from_devtable( ) {
local dev
2017-07-16 19:27:03 +03:00
local silent = ""
2017-01-04 18:02:08 +03:00
if test " $1 " = "--silent" ; then
silent = 1
shift
fi
rm -f debug.log strace.log
init_udev_transaction
for dev in " $@ " ; do
2017-07-17 21:32:48 +03:00
local name = ${ dev ##*/ }
2017-01-04 18:02:08 +03:00
dmsetup load " $name " " $name .devtable "
2020-09-18 15:23:20 +03:00
if not dmsetup resume " $name " ; then
2023-02-09 18:04:54 +03:00
dmsetup clear " $name "
dmsetup resume " $name "
2020-09-18 15:23:20 +03:00
finish_udev_transaction
2023-02-09 18:04:54 +03:00
echo " Device $name has unusable table \" $( cat " $name .devtable " ) \" "
2020-09-18 15:23:20 +03:00
return 1
fi
2017-01-04 18:02:08 +03:00
done
finish_udev_transaction
}
2012-12-01 20:09:18 +04:00
#
# Convert device to device with errors
# Takes the list of pairs of error segment from:len
2017-02-22 17:16:20 +03:00
# Combination with zero or delay is unsupported
# Original device table is replaced with multiple lines
2012-12-01 20:09:18 +04:00
# i.e. error_dev "$dev1" 8:32 96:8
error_dev( ) {
2015-04-03 15:22:29 +03:00
common_dev_ error " $@ "
2012-12-01 20:09:18 +04:00
}
2020-01-20 15:15:46 +03:00
#
# Convert device to device with write errors but normal reads.
# For this 'delay' dev is used and reroutes 'reads' back to original device
# and for writes it will use extra new TEST-errordev (huge error target)
# i.e. writeerror_dev "$dev1" 8:32
writeerror_dev( ) {
local name = ${ PREFIX } -errordev
if test ! -e ERR_DEV; then
# delay target is used for error mapping
if test ! -f HAVE_DM_DELAY ; then
target_at_least dm-delay 1 1 0 || return 0
touch HAVE_DM_DELAY
fi
dmsetup create -u " TEST- $name " " $name " --table "0 4611686018427387904 error"
# Take major:minor of our error device
echo " $name " > ERR_DEV_NAME
dmsetup info -c --noheadings -o major,minor " $name " > ERR_DEV
fi
common_dev_ writeerror " $@ "
}
2020-09-04 14:50:05 +03:00
#
# Convert device to device with sections of delayed zero read and writes.
# For this 'delay' dev will use extra new TEST-zerodev (huge zero target)
# and reroutes reads and writes
# i.e. delayzero_dev "$dev1" 8:32
delayzero_dev( ) {
local name = ${ PREFIX } -zerodev
if test ! -e ZERO_DEV; then
# delay target is used for error mapping
if test ! -f HAVE_DM_DELAY ; then
target_at_least dm-delay 1 1 0 || return 0
touch HAVE_DM_DELAY
fi
dmsetup create -u " TEST- $name " " $name " --table "0 4611686018427387904 zero"
# Take major:minor of our error device
echo " $name " > ZERO_DEV_NAME
dmsetup info -c --noheadings -o major,minor " $name " > ZERO_DEV
fi
common_dev_ delayzero " $@ "
}
2017-02-22 17:16:20 +03:00
#
# Convert existing device to a device with zero segments
# Takes the list of pairs of zero segment from:len
# Combination with error or delay is unsupported
# Original device table is replaced with multiple lines
# i.e. zero_dev "$dev1" 8:32 96:8
zero_dev( ) {
common_dev_ zero " $@ "
}
2011-01-05 03:16:18 +03:00
backup_dev( ) {
2012-03-20 14:51:57 +04:00
local dev
2011-01-05 03:16:18 +03:00
for dev in " $@ " ; do
2023-04-25 16:07:25 +03:00
dd if = " $dev " of = " ${ dev ##*/ } .backup " bs = 16K conv = fdatasync || \
die " Cannot backup device: \" $dev \" with size $( blockdev --getsize64 " $dev " || true ) bytes. "
2011-01-05 03:16:18 +03:00
done
}
restore_dev( ) {
2012-03-20 14:51:57 +04:00
local dev
2011-01-05 03:16:18 +03:00
for dev in " $@ " ; do
2023-04-25 16:07:25 +03:00
test -e " ${ dev ##*/ } .backup " || \
2012-03-16 16:59:02 +04:00
die " Internal error: $dev not backed up, can't restore! "
2023-04-25 16:07:25 +03:00
dd of = " $dev " if = " ${ dev ##*/ } .backup " bs = 16K
2011-01-05 03:16:18 +03:00
done
}
prepare_pvs( ) {
prepare_devs " $@ "
2014-06-04 15:46:19 +04:00
pvcreate -ff " ${ DEVICES [@] } "
2011-01-05 03:16:18 +03:00
}
prepare_vg( ) {
teardown_devs
2015-03-06 15:29:35 +03:00
prepare_devs " $@ "
2018-05-24 17:49:48 +03:00
vgcreate $SHARED -s 512K " $vg " " ${ DEVICES [@] } "
2011-01-05 03:16:18 +03:00
}
2021-08-19 00:23:48 +03:00
extend_devices( ) {
test -z " $LVM_TEST_DEVICES_FILE " && return
for dev in " $@ " ; do
2023-02-09 18:04:54 +03:00
lvmdevices --adddev " $dev "
2021-08-19 00:23:48 +03:00
done
}
2013-05-27 04:03:00 +04:00
extend_filter( ) {
2018-03-23 19:03:00 +03:00
local filter
2021-08-19 00:23:48 +03:00
test -n " $LVM_TEST_DEVICES_FILE " && return
2018-03-23 19:03:00 +03:00
filter = $( grep ^devices/global_filter CONFIG_VALUES | tail -n 1)
2013-05-27 04:03:00 +04:00
for rx in " $@ " ; do
2018-03-23 19:02:44 +03:00
filter = $( echo " $filter " | sed -e " s:\\[:[ \" $rx \", : " )
2013-05-27 04:03:00 +04:00
done
2019-06-05 16:45:30 +03:00
lvmconf " $filter " "devices/scan_lvs = 1"
2013-05-27 04:03:00 +04:00
}
2019-09-11 21:26:41 +03:00
extend_filter_md( ) {
local filter
2021-08-19 00:23:48 +03:00
test -n " $LVM_TEST_DEVICES_FILE " && return
2019-09-11 21:26:41 +03:00
filter = $( grep ^devices/global_filter CONFIG_VALUES | tail -n 1)
for rx in " $@ " ; do
filter = $( echo " $filter " | sed -e " s:\\[:[ \" $rx \", : " )
done
lvmconf " $filter "
lvmconf " devices/scan = [ \" $DM_DEV_DIR \", \"/dev\" ] "
}
2013-05-27 04:03:00 +04:00
extend_filter_LVMTEST( ) {
2018-05-24 12:12:17 +03:00
extend_filter " a| $DM_DEV_DIR / $PREFIX | " " $@ "
2013-05-27 04:03:00 +04:00
}
2013-04-29 00:41:15 +04:00
hide_dev( ) {
2018-03-23 19:03:00 +03:00
local filter
2021-08-19 00:23:48 +03:00
if test -n " $LVM_TEST_DEVICES_FILE " ; then
for dev in " $@ " ; do
2023-02-09 18:04:54 +03:00
lvmdevices --deldev " $dev "
2021-08-19 00:23:48 +03:00
done
else
filter = $( grep ^devices/global_filter CONFIG_VALUES | tail -n 1)
for dev in " $@ " ; do
filter = $( echo " $filter " | sed -e " s:\\[:[ \"r| $dev |\", : " )
done
lvmconf " $filter "
fi
2013-04-29 00:41:15 +04:00
}
unhide_dev( ) {
2018-03-23 19:03:00 +03:00
local filter
2021-08-19 00:23:48 +03:00
if test -n " $LVM_TEST_DEVICES_FILE " ; then
for dev in " $@ " ; do
2023-02-09 18:04:54 +03:00
lvmdevices -y --adddev " $dev "
2021-08-19 00:23:48 +03:00
done
else
filter = $( grep ^devices/global_filter CONFIG_VALUES | tail -n 1)
for dev in " $@ " ; do
filter = $( echo " $filter " | sed -e " s:\"r| $dev |\", :: " )
done
lvmconf " $filter "
fi
2013-04-29 00:41:15 +04:00
}
2013-09-16 13:02:58 +04:00
mkdev_md5sum( ) {
2014-09-30 19:09:26 +04:00
rm -f debug.log strace.log
2013-09-16 13:02:58 +04:00
mkfs.ext2 " $DM_DEV_DIR / $1 / $2 " || return 1
md5sum " $DM_DEV_DIR / $1 / $2 " > " md5. $1 - $2 "
}
2013-07-30 17:44:15 +04:00
generate_config( ) {
if test -n " $profile_name " ; then
2017-06-29 01:02:19 +03:00
config_values = " PROFILE_VALUES_ $profile_name "
config = " PROFILE_ $profile_name "
touch " $config_values "
2013-07-30 17:44:15 +04:00
else
config_values = CONFIG_VALUES
config = CONFIG
fi
2012-03-16 16:59:02 +04:00
LVM_TEST_LOCKING = ${ LVM_TEST_LOCKING :- 1 }
2015-05-11 16:45:34 +03:00
LVM_TEST_LVMPOLLD = ${ LVM_TEST_LVMPOLLD :- 0 }
2015-03-05 23:00:44 +03:00
LVM_TEST_LVMLOCKD = ${ LVM_TEST_LVMLOCKD :- 0 }
2021-03-09 02:19:26 +03:00
LVM_TEST_DEVICES_FILE = ${ LVM_TEST_DEVICES_FILE :- 0 }
2016-03-09 12:56:39 +03:00
# FIXME:dct: This is harmful! Variables are unused here and are tested not being empty elsewhere:
#LVM_TEST_LOCK_TYPE_SANLOCK=${LVM_TEST_LOCK_TYPE_SANLOCK:-0}
#LVM_TEST_LOCK_TYPE_DLM=${LVM_TEST_LOCK_TYPE_DLM:-0}
2012-03-16 16:59:02 +04:00
if test " $DM_DEV_DIR " = "/dev" ; then
LVM_VERIFY_UDEV = ${ LVM_VERIFY_UDEV :- 0 }
else
LVM_VERIFY_UDEV = ${ LVM_VERIFY_UDEV :- 1 }
fi
2014-06-30 23:00:08 +04:00
test -f " $config_values " || {
cat > " $config_values " <<-EOF
2015-05-11 16:45:34 +03:00
activation/checks = 1
activation/monitoring = 0
2016-11-11 17:57:00 +03:00
activation/polling_interval = 1
2015-05-11 16:45:34 +03:00
activation/retry_deactivation = 1
activation/snapshot_autoextend_percent = 50
activation/snapshot_autoextend_threshold = 50
activation/verify_udev_operations = $LVM_VERIFY_UDEV
2017-04-13 22:21:34 +03:00
activation/raid_region_size = 512
2015-05-11 16:45:34 +03:00
allocation/wipe_signatures_when_zeroing_new_lvs = 0
2020-02-26 15:26:46 +03:00
allocation/vdo_slab_size_mb = 128
2020-06-24 15:42:11 +03:00
allocation/zero_metadata = 0
2015-05-11 16:45:34 +03:00
backup/archive = 0
backup/backup = 0
2023-06-20 22:56:28 +03:00
devices/cache_dir = " $LVM_SYSTEM_DIR "
2011-01-05 03:16:18 +03:00
devices/default_data_alignment = 1
2015-05-11 16:45:34 +03:00
devices/dir = " $DM_DEV_DIR "
2019-06-20 01:03:18 +03:00
devices/md_component_detection = 0
2019-09-12 20:55:00 +03:00
devices/scan = " $DM_DEV_DIR "
2015-05-11 16:45:34 +03:00
devices/sysfs_scan = 1
2016-10-05 12:48:03 +03:00
devices/write_cache_state = 0
2021-03-09 02:19:26 +03:00
devices/use_devicesfile = $LVM_TEST_DEVICES_FILE
2021-08-19 00:23:48 +03:00
devices/filter = "a|.*|"
devices/global_filter = [ " a| $DM_DEV_DIR /mapper/ ${ PREFIX } .*pv[0-9_]* $| " , "r|.*|" ]
2011-01-05 03:16:18 +03:00
global/abort_on_internal_errors = 1
2015-05-11 16:45:34 +03:00
global/cache_check_executable = " $LVM_TEST_CACHE_CHECK_CMD "
global/cache_dump_executable = " $LVM_TEST_CACHE_DUMP_CMD "
global/cache_repair_executable = " $LVM_TEST_CACHE_REPAIR_CMD "
2023-06-23 15:43:36 +03:00
global/cache_restore_executable = " $LVM_TEST_CACHE_RESTORE_CMD "
2011-08-11 21:46:13 +04:00
global/detect_internal_vg_cache_corruption = 1
2015-05-11 16:45:34 +03:00
global/fallback_to_local_locking = 0
2023-06-20 22:56:28 +03:00
global/etc = " $LVM_SYSTEM_DIR "
2011-01-05 03:16:18 +03:00
global/locking_type= $LVM_TEST_LOCKING
2016-10-05 12:48:03 +03:00
global/notify_dbus = 0
2011-01-05 03:16:18 +03:00
global/si_unit_consistency = 1
2015-04-14 11:08:57 +03:00
global/thin_check_executable = " $LVM_TEST_THIN_CHECK_CMD "
global/thin_dump_executable = " $LVM_TEST_THIN_DUMP_CMD "
global/thin_repair_executable = " $LVM_TEST_THIN_REPAIR_CMD "
2023-06-23 15:43:36 +03:00
global/thin_restore_executable = " $LVM_TEST_THIN_RESTORE_CMD "
2015-05-11 16:45:34 +03:00
global/use_lvmpolld = $LVM_TEST_LVMPOLLD
2015-03-05 23:00:44 +03:00
global/use_lvmlockd = $LVM_TEST_LVMLOCKD
2015-05-11 16:45:34 +03:00
log/activation = 1
log/file = " $TESTDIR /debug.log "
log/indent = 1
log/level = 9
log/overwrite = 1
log/syslog = 0
log/verbose = 0
2011-01-05 03:16:18 +03:00
EOF
2021-03-21 01:50:16 +03:00
# For 'rpm' builds use system installed binaries
# and libraries and locking dir and some more built-in
# defaults
2017-04-14 02:03:18 +03:00
# For test suite run use binaries from builddir.
test -z " ${ abs_top_builddir +varset } " || {
cat >> " $config_values " <<-EOF
dmeventd/executable = " $abs_top_builddir /test/lib/dmeventd "
2021-03-21 01:50:16 +03:00
activation/udev_rules = 1
activation/udev_sync = 1
2017-04-14 02:03:18 +03:00
global/fsadm_executable = " $abs_top_builddir /test/lib/fsadm "
2021-03-21 01:50:16 +03:00
global/library_dir = " $TESTDIR /lib "
global/locking_dir = " $TESTDIR /var/lock/lvm "
2017-04-14 02:03:18 +03:00
EOF
}
2012-03-16 16:59:02 +04:00
}
2017-07-10 14:01:33 +03:00
# append all parameters (avoid adding empty \n)
2012-03-20 14:51:57 +04:00
local v
2018-03-23 19:02:44 +03:00
test $# -gt 0 && printf "%s\\n" " $@ " >> " $config_values "
2012-03-16 16:59:02 +04:00
2015-04-30 14:57:08 +03:00
declare -A CONF 2>/dev/null || {
2015-04-30 12:12:19 +03:00
# Associative arrays is not available
local s
for s in $( cut -f1 -d/ " $config_values " | sort | uniq) ; do
echo " $s { "
local k
for k in $( grep ^" $s " / " $config_values " | cut -f1 -d= | sed -e 's, *$,,' | sort | uniq) ; do
2021-03-15 12:49:47 +03:00
grep " ^ ${ k } [ \t=] " " $config_values " | tail -n 1 | sed -e " s,^ $s /, , " || true
2015-04-30 12:12:19 +03:00
done
echo "}"
echo
done | tee " $config " | sed -e "s,^,## LVMCONF: ,"
return 0
}
2015-04-29 13:26:12 +03:00
local sec
2017-07-16 19:27:03 +03:00
local last_sec = ""
2015-04-29 13:26:12 +03:00
# read sequential list and put into associative array
2017-07-13 22:06:04 +03:00
while IFS = read -r v; do
2017-07-01 10:28:06 +03:00
CONF[ " ${ v %%[={ ]* } " ] = ${ v #*/ }
2015-04-29 13:26:12 +03:00
done < " $config_values "
# sort by section and iterate through them
2018-03-23 19:02:44 +03:00
printf "%s\\n" " ${ !CONF[@] } " | sort | while read -r v ; do
2015-04-29 13:26:12 +03:00
sec = ${ v %%/* } # split on section'/'param_name
test " $sec " = " $last_sec " || {
test -z " $last_sec " || echo "}"
echo " $sec { "
last_sec = $sec
}
2015-04-30 12:12:19 +03:00
echo " ${ CONF [ $v ] } "
2015-04-29 13:26:12 +03:00
done > " $config "
echo "}" >> " $config "
sed -e "s,^,## LVMCONF: ," " $config "
2013-07-30 17:44:15 +04:00
}
lvmconf( ) {
2017-07-16 19:27:03 +03:00
local profile_name = ""
2017-07-08 00:03:08 +03:00
test $# -eq 0 || {
# Compare if passed args aren't already all in generated lvm.conf
local needed = 0
for i in " $@ " ; do
val = $( grep " ${ i %%[={ ]* } " CONFIG_VALUES 2>/dev/null | tail -1) || { needed = 1; break; }
test " $val " = " $i " || { needed = 1; break; }
done
test " $needed " -eq 0 && {
2017-07-06 12:04:07 +03:00
echo "## Skipping reconfiguring for: (" " $@ " ")"
2017-07-08 00:03:08 +03:00
return 0 # not needed
}
}
2013-07-30 17:44:15 +04:00
generate_config " $@ "
2017-03-03 01:01:30 +03:00
mv -f CONFIG " $LVM_SYSTEM_DIR /lvm.conf "
2011-01-05 03:16:18 +03:00
}
2013-07-30 17:44:15 +04:00
profileconf( ) {
2017-03-03 01:01:30 +03:00
local pdir = " $LVM_SYSTEM_DIR /profile "
2017-07-07 00:58:28 +03:00
local profile_name = $1
2013-07-30 17:44:15 +04:00
shift
generate_config " $@ "
2017-06-29 01:02:19 +03:00
mkdir -p " $pdir "
2017-03-03 01:01:30 +03:00
mv -f " PROFILE_ $profile_name " " $pdir / $profile_name .profile "
2013-07-30 17:44:15 +04:00
}
2014-04-01 17:51:46 +04:00
prepare_profiles( ) {
2017-03-03 01:01:30 +03:00
local pdir = " $LVM_SYSTEM_DIR /profile "
2017-07-10 14:01:33 +03:00
local profile_name
2017-06-29 01:02:19 +03:00
mkdir -p " $pdir "
for profile_name in " $@ " ; do
2015-04-22 12:04:47 +03:00
test -L " lib/ $profile_name .profile " || skip
2017-03-03 01:01:30 +03:00
cp " lib/ $profile_name .profile " " $pdir / $profile_name .profile "
2014-04-01 17:51:46 +04:00
done
}
2018-05-12 19:09:44 +03:00
unittest( ) {
test -x " $TESTOLDPWD /unit/unit-test " || skip
" $TESTOLDPWD /unit/unit-test " " ${ @ } "
}
2014-03-17 19:32:42 +04:00
mirror_recovery_works( ) {
2014-06-30 23:00:08 +04:00
case " $( uname -r) " in
2014-03-17 19:32:42 +04:00
3.3.4-5.fc17.i686| 3.3.4-5.fc17.x86_64) return 1 ; ;
esac
2013-09-03 17:49:14 +04:00
}
2014-03-02 22:46:17 +04:00
raid456_replace_works( ) {
2013-09-20 20:33:29 +04:00
# The way kmem_cache aliasing is done in the kernel is broken.
# It causes RAID 4/5/6 tests to fail.
#
# The problem with kmem_cache* is this:
# *) Assume CONFIG_SLUB is set
# 1) kmem_cache_create(name="foo-a")
# - creates new kmem_cache structure
# 2) kmem_cache_create(name="foo-b")
# - If identical cache characteristics, it will be merged with the previously
# created cache associated with "foo-a". The cache's refcount will be
# incremented and an alias will be created via sysfs_slab_alias().
# 3) kmem_cache_destroy(<ptr>)
# - Attempting to destroy cache associated with "foo-a", but instead the
# refcount is simply decremented. I don't even think the sysfs aliases are
# ever removed...
# 4) kmem_cache_create(name="foo-a")
# - This FAILS because kmem_cache_sanity_check colides with the existing
# name ("foo-a") associated with the non-removed cache.
#
# This is a problem for RAID (specifically dm-raid) because the name used
# for the kmem_cache_create is ("raid%d-%p", level, mddev). If the cache
# persists for long enough, the memory address of an old mddev will be
# reused for a new mddev - causing an identical formulation of the cache
# name. Even though kmem_cache_destory had long ago been used to delete
# the old cache, the merging of caches has cause the name and cache of that
# old instance to be preserved and causes a colision (and thus failure) in
# kmem_cache_create(). I see this regularly in testing the following
# kernels:
2014-03-09 01:38:06 +04:00
#
# This seems to be finaly resolved with this patch:
# http://www.redhat.com/archives/dm-devel/2014-March/msg00008.html
# so we need to put here exlusion for kernes which do trace SLUB
#
2014-06-30 23:00:08 +04:00
case " $( uname -r) " in
2014-03-17 21:38:59 +04:00
3.6.*.fc18.i686*| 3.6.*.fc18.x86_64) return 1 ; ;
3.9.*.fc19.i686*| 3.9.*.fc19.x86_64) return 1 ; ;
2014-03-17 02:46:43 +04:00
3.1[ 0123] .*.fc18.i686*| 3.1[ 0123] .*.fc18.x86_64) return 1 ; ;
2014-05-26 17:58:09 +04:00
3.1[ 01234] .*.fc19.i686*| 3.1[ 01234] .*.fc19.x86_64) return 1 ; ;
2014-11-17 02:32:49 +03:00
3.1[ 123] .*.fc20.i686*| 3.1[ 123] .*.fc20.x86_64) return 1 ; ;
2014-03-25 23:13:25 +04:00
3.14.*.fc21.i686*| 3.14.*.fc21.x86_64) return 1 ; ;
2014-05-24 00:40:31 +04:00
3.15.*rc6*.fc21.i686*| 3.15.*rc6*.fc21.x86_64) return 1 ; ;
2014-07-23 02:29:32 +04:00
3.16.*rc4*.fc21.i686*| 3.16.*rc4*.fc21.x86_64) return 1 ; ;
2014-03-02 22:46:17 +04:00
esac
2013-09-20 20:33:29 +04:00
}
2016-02-22 16:52:01 +03:00
#
# Some 32bit kernel cannot pass some erroring magic which forces
# thin-pool to be falling into Error state.
#
# Skip test on such kernels (see: https://bugzilla.redhat.com/1310661)
#
thin_pool_error_works_32( ) {
case " $( uname -r) " in
2.6.32-618.*.i686) return 1 ; ;
2016-03-08 11:46:34 +03:00
2.6.32-623.*.i686) return 1 ; ;
2.6.32-573.1[ 28] .1.el6.i686) return 1 ; ;
2016-02-22 16:52:01 +03:00
esac
}
2020-09-23 00:37:50 +03:00
thin_restore_needs_more_volumes( ) {
case $( " $LVM_TEST_THIN_RESTORE_CMD " -V) in
# With older version of thin-tool we got slightly more compact metadata
0.[ 0-6] *| 0.7.0*) return 0 ; ;
2021-01-22 22:17:57 +03:00
0.8.5-2.el7) return 0 ; ;
2020-09-23 00:37:50 +03:00
esac
return 1
}
2011-01-28 19:10:21 +03:00
udev_wait( ) {
2011-03-02 02:44:07 +03:00
pgrep udev >/dev/null || return 0
2014-02-03 21:26:55 +04:00
which udevadm & >/dev/null || return 0
2017-07-16 19:27:03 +03:00
if test -n " ${ 1 - } " ; then
2023-04-25 16:07:25 +03:00
udevadm settle --exit-if-exists= " $1 " 2>/dev/null || true
2011-01-28 19:10:21 +03:00
else
2023-04-25 16:07:25 +03:00
udevadm settle --timeout= 15 2>/dev/null || true
2011-01-28 19:10:21 +03:00
fi
}
2012-07-24 23:17:54 +04:00
# wait_for_sync <VG/LV>
wait_for_sync( ) {
local i
2014-11-17 02:34:44 +03:00
for i in { 1..100} ; do
2017-07-16 19:27:03 +03:00
check in_sync " $@ " && return
2012-07-24 23:17:54 +04:00
sleep .2
done
echo "Sync is taking too long - assume stuck"
2020-09-10 18:05:02 +03:00
echo t >/proc/sysrq-trigger 2>/dev/null
2012-07-24 23:17:54 +04:00
return 1
}
2023-04-20 18:08:42 +03:00
wait_recalc( ) {
local checklv = $1
for i in { 1..100} ; do
sync = $( get lv_field " $checklv " sync_percent | cut -d. -f1)
echo " sync_percent is $sync "
test " $sync " = "100" && return
sleep .1
done
# TODO: There is some strange bug, first leg of RAID with integrity
# enabled never gets in sync. I saw this in BB, but not when executing
# the commands manually
# if test -z "$sync"; then
# echo "TEST\ WARNING: Resync of dm-integrity device '$checklv' failed"
# dmsetup status "$DM_DEV_DIR/mapper/${checklv/\//-}"
# exit
# fi
echo "Timeout waiting for recalc"
dmsetup status " $DM_DEV_DIR /mapper/ ${ checklv / \/ /- } "
return 1
}
2013-11-16 02:47:14 +04:00
# Check if tests are running on 64bit architecture
2014-03-21 14:38:20 +04:00
can_use_16T( ) {
2013-11-16 02:47:14 +04:00
test " $( getconf LONG_BIT) " -eq 64
}
2014-11-17 02:41:33 +03:00
# Check if major.minor.revision' string is 'at_least'
version_at_least( ) {
local major
local minor
local revision
2015-04-30 14:23:56 +03:00
IFS = ".-" read -r major minor revision <<< " $1 "
2014-11-17 02:41:33 +03:00
shift
2019-07-23 16:36:13 +03:00
test -n " ${ 1 :- } " || return 0
2014-11-17 02:41:33 +03:00
test -n " $major " || return 1
test " $major " -gt " $1 " && return 0
test " $major " -eq " $1 " || return 1
2019-07-23 16:36:13 +03:00
test -n " ${ 2 :- } " || return 0
2014-11-17 02:41:33 +03:00
test -n " $minor " || return 1
test " $minor " -gt " $2 " && return 0
test " $minor " -eq " $2 " || return 1
2019-07-23 16:36:13 +03:00
test -n " ${ 3 :- } " || return 0
2014-11-17 02:41:33 +03:00
test " $revision " -ge " $3 " 2>/dev/null || return 1
}
2011-11-10 16:44:00 +04:00
#
# Check wheter kernel [dm module] target exist
# at least in expected version
#
# [dm-]target-name major minor revision
#
# i.e. dm_target_at_least dm-thin-pool 1 0
2014-03-21 14:38:20 +04:00
target_at_least( ) {
2014-09-30 19:09:26 +04:00
rm -f debug.log strace.log
2011-11-10 16:44:00 +04:00
case " $1 " in
2019-01-20 14:15:20 +03:00
dm-vdo) modprobe "kvdo" || true ; ;
2012-10-09 12:37:37 +04:00
dm-*) modprobe " $1 " || true ; ;
2011-11-10 16:44:00 +04:00
esac
2013-11-19 14:47:28 +04:00
if test " $1 " = dm-raid; then
case " $( uname -r) " in
3.12.0*) return 1 ; ;
esac
fi
2017-06-29 13:07:34 +03:00
local version
2019-06-11 17:40:44 +03:00
version = $( dmsetup targets 2>/dev/null | grep " ^ ${ 1 ##dm- } " 2>/dev/null)
2011-11-10 16:44:00 +04:00
version = ${ version ##* v }
2015-04-27 18:45:10 +03:00
version_at_least " $version " " ${ @ : 2 } " || {
echo " Found $1 version $version , but requested ${ * : 2 } . " >& 2
return 1
}
2011-11-10 16:44:00 +04:00
}
2015-11-27 20:55:08 +03:00
# Check whether the kernel driver version is greater or equal
# to the specified version. This can be used to skip tests on
# kernels where they are known to not be supported.
#
# e.g. driver_at_least 4 33
#
driver_at_least( ) {
2017-06-29 13:07:34 +03:00
local version
version = $( dmsetup version | tail -1 2>/dev/null)
2015-11-27 20:55:08 +03:00
version = ${ version ##* : }
version_at_least " $version " " $@ " || {
2017-06-29 08:57:59 +03:00
echo " Found driver version $version , but requested " " $@ " "." >& 2
2015-11-27 20:55:08 +03:00
return 1
}
}
2014-03-21 14:38:20 +04:00
have_thin( ) {
2019-06-17 23:47:35 +03:00
lvm segtypes 2>/dev/null | grep thin$ >/dev/null || {
2018-05-12 23:50:36 +03:00
echo "Thin is not built-in." >& 2
return 1
}
2015-04-27 18:45:10 +03:00
target_at_least dm-thin-pool " $@ "
2012-10-10 16:57:02 +04:00
2017-07-16 19:27:03 +03:00
declare -a CONF = ( )
2012-10-15 15:18:32 +04:00
# disable thin_check if not present in system
2017-07-07 22:31:38 +03:00
if test -n " $LVM_TEST_THIN_CHECK_CMD " && test ! -x " $LVM_TEST_THIN_CHECK_CMD " ; then
2015-04-15 12:51:00 +03:00
CONF[ 0] = "global/thin_check_executable = \"\""
fi
2017-07-07 22:31:38 +03:00
if test -n " $LVM_TEST_THIN_DUMP_CMD " && test ! -x " $LVM_TEST_THIN_DUMP_CMD " ; then
2015-04-15 12:51:00 +03:00
CONF[ 1] = "global/thin_dump_executable = \"\""
fi
2017-07-07 22:31:38 +03:00
if test -n " $LVM_TEST_THIN_REPAIR_CMD " && test ! -x " $LVM_TEST_THIN_REPAIR_CMD " ; then
2015-04-15 12:51:00 +03:00
CONF[ 2] = "global/thin_repair_executable = \"\""
fi
if test ${# CONF [@] } -ne 0 ; then
2017-06-29 08:57:59 +03:00
echo "TEST WARNING: Reconfiguring" " ${ CONF [@] } "
2015-04-15 12:51:00 +03:00
lvmconf " ${ CONF [@] } "
fi
2012-10-15 15:18:32 +04:00
}
2018-06-24 21:06:59 +03:00
have_vdo( ) {
2020-10-01 12:33:57 +03:00
lvm segtypes 2>/dev/null | grep 'vdo$' >/dev/null || {
2018-06-24 21:06:59 +03:00
echo "VDO is not built-in." >& 2
return 1
}
target_at_least dm-vdo " $@ "
}
2019-06-13 19:36:18 +03:00
have_writecache( ) {
2020-10-01 12:33:57 +03:00
lvm segtypes 2>/dev/null | grep 'writecache$' >/dev/null || {
2019-06-13 19:36:18 +03:00
echo "writecache is not built-in." >& 2
return 1
}
target_at_least dm-writecache " $@ "
}
2019-11-21 01:07:27 +03:00
have_integrity( ) {
2020-10-01 12:33:57 +03:00
lvm segtypes 2>/dev/null | grep 'integrity$' >/dev/null || {
2019-11-21 01:07:27 +03:00
echo "integrity is not built-in." >& 2
return 1
}
target_at_least dm-integrity " $@ "
}
2014-05-20 21:54:48 +04:00
have_raid( ) {
target_at_least dm-raid " $@ "
2015-09-08 13:00:12 +03:00
# some kernels have broken mdraid bitmaps, don't use them!
# may oops kernel, we know for sure all FC24 are currently broken
# in general any 4.1, 4.2 is likely useless unless patched
case " $( uname -r) " in
2015-10-22 17:51:18 +03:00
4.[ 12] .*fc24*) return 1 ; ;
2015-09-08 13:00:12 +03:00
esac
2014-05-20 21:54:48 +04:00
}
2016-10-28 22:54:10 +03:00
have_raid4 ( ) {
2017-02-10 00:41:28 +03:00
local r = 0
2016-10-28 22:54:10 +03:00
2017-02-10 00:41:28 +03:00
have_raid 1 8 0 && r = 1
have_raid 1 9 1 && r = 0
2016-10-28 22:54:10 +03:00
return $r
}
2014-05-20 21:54:48 +04:00
have_cache( ) {
2020-09-11 12:59:55 +03:00
lvm segtypes 2>/dev/null | grep ' cache-pool$' >/dev/null || {
2018-05-12 23:50:36 +03:00
echo "Cache is not built-in." >& 2
return 1
}
2014-05-20 21:54:48 +04:00
target_at_least dm-cache " $@ "
2014-11-17 02:41:33 +03:00
2017-07-16 19:27:03 +03:00
declare -a CONF = ( )
2015-04-14 11:08:57 +03:00
# disable cache_check if not present in system
2023-02-09 18:04:54 +03:00
if test -n " $LVM_TEST_CACHE_CHECK_CMD " && test ! -x " $LVM_TEST_CACHE_CHECK_CMD " ; then
2015-04-15 12:51:00 +03:00
CONF[ 0] = "global/cache_check_executable = \"\""
fi
2023-02-09 18:04:54 +03:00
if test -n " $LVM_TEST_CACHE_DUMP_CMD " && test ! -x " $LVM_TEST_CACHE_DUMP_CMD " ; then
2015-04-15 12:51:00 +03:00
CONF[ 1] = "global/cache_dump_executable = \"\""
fi
2023-02-09 18:04:54 +03:00
if test -n " $LVM_TEST_CACHE_REPAIR_CMD " && test ! -x " $LVM_TEST_CACHE_REPAIR_CMD " ; then
2015-04-15 12:51:00 +03:00
CONF[ 2] = "global/cache_repair_executable = \"\""
fi
if test ${# CONF [@] } -ne 0 ; then
2017-06-29 08:57:59 +03:00
echo "TEST WARNING: Reconfiguring" " ${ CONF [@] } "
2015-04-15 12:51:00 +03:00
lvmconf " ${ CONF [@] } "
fi
2014-11-17 02:41:33 +03:00
}
have_tool_at_least( ) {
2017-06-29 13:07:34 +03:00
local version
version = $( " $1 " -V 2>/dev/null)
2014-11-17 02:41:33 +03:00
version = ${ version %%-* }
2023-07-15 11:20:32 +03:00
version = ${ version ##* }
2014-11-17 02:41:33 +03:00
shift
version_at_least " $version " " $@ "
2014-05-20 21:54:48 +04:00
}
2012-10-15 15:18:32 +04:00
# check if lvm shell is build-in (needs readline)
2014-03-21 14:38:20 +04:00
have_readline( ) {
2012-10-15 15:18:32 +04:00
echo version | lvm & >/dev/null
2012-10-10 02:20:22 +04:00
}
2017-04-21 02:21:24 +03:00
have_multi_core( ) {
2017-04-20 22:05:07 +03:00
which nproc & >/dev/null || return 0
2017-06-29 01:02:19 +03:00
[ " $( nproc) " -ne 1 ]
2017-04-20 22:05:07 +03:00
}
2014-03-21 14:38:20 +04:00
dmsetup_wrapped( ) {
2014-03-02 23:11:44 +04:00
udev_wait
dmsetup " $@ "
}
2015-05-09 02:59:18 +03:00
awk_parse_init_count_in_lvmpolld_dump( ) {
printf '%s' \
\
2021-03-26 02:33:21 +03:00
$'BEGINFILE { x=0; answ=0 }' \
2015-05-09 02:59:18 +03:00
$'{' \
$'if (/.*{$/) { x++ }' \
$'else if (/.*}$/) { x-- }' \
2021-03-26 02:33:21 +03:00
$'else if ( x == 2 && $1 ~ "[[:space:]]*"vkey) { value=substr($2, 2); value=substr(value, 1, length(value) - 1); }' \
2015-05-09 02:59:18 +03:00
$'if ( x == 2 && value == vvalue && $1 ~ /[[:space:]]*init_requests_count/) { answ=$2 }' \
$'if (answ > 0) { exit 0 }' \
$'}' \
$'END { printf "%d", answ }'
}
check_lvmpolld_init_rq_count( ) {
2017-06-29 13:07:34 +03:00
local ret
2021-03-26 02:33:21 +03:00
ret = $( awk -v vvalue = " $2 " -v vkey = " ${ 3 :- lvname } " -F= " $( awk_parse_init_count_in_lvmpolld_dump) " lvmpolld_dump.txt)
2017-06-29 01:02:19 +03:00
test " $ret " -eq " $1 " || {
2017-07-19 13:15:39 +03:00
die " check_lvmpolld_init_rq_count failed. Expected $1 , got $ret "
2015-05-09 02:59:18 +03:00
}
}
2015-03-27 16:24:16 +03:00
wait_pvmove_lv_ready( ) {
2021-04-01 12:33:40 +03:00
# given sleep .1 this is about 20 secs of waiting
2023-04-24 16:24:20 +03:00
local lvid = ( )
local all
2015-05-09 02:59:18 +03:00
2023-04-24 16:24:20 +03:00
for i in { 100..0} ; do
if [ -e LOCAL_LVMPOLLD ] ; then
if test " ${# lvid [@] } " -eq " $# " ; then
2015-05-09 02:59:18 +03:00
lvmpolld_dump > lvmpolld_dump.txt
2023-04-24 16:24:20 +03:00
all = 1
for l in " ${ lvid [@] } " ; do
check_lvmpolld_init_rq_count 1 " ${ l ##LVM- } " lvid || all = 0
done
test " $all " = 1 && return
else
# wait till wanted LV really appears
lvid = ( $( dmsetup info --noheadings -c -o uuid " $@ " 2>/dev/null) ) || true
fi
else
dmsetup info -c --noheadings -o tables_loaded " $@ " >out 2>/dev/null || true
test " $( grep -c Live out) " = " $# " && return
fi
sleep .1
done
test -e LOCAL_LVMPOLLD && die "Waiting for lvmpolld timed out"
die "Waiting for pvmove LV to get activated has timed out"
2015-03-27 16:24:16 +03:00
}
2016-12-23 00:21:09 +03:00
# Holds device open with sleep which automatically expires after given timeout
# Prints PID of running holding sleep process in background
hold_device_open( ) {
local vgname = $1
local lvname = $2
2017-07-18 22:25:03 +03:00
local sec = ${ 3 -20 } # default 20sec
2016-12-23 00:21:09 +03:00
2017-06-29 01:02:19 +03:00
sleep " $sec " < " $DM_DEV_DIR / $vgname / $lvname " >/dev/null 2>& 1 &
2016-12-23 00:21:09 +03:00
SLEEP_PID = $!
# wait till device is openned
for i in $( seq 1 50) ; do
2017-06-29 01:02:19 +03:00
if test " $( dmsetup info --noheadings -c -o open " $vgname " -" $lvname " ) " -ne 0 ; then
2016-12-23 00:21:09 +03:00
echo " $SLEEP_PID "
return
fi
sleep .1
done
die " $vgname - $lvname expected to be openned, but it's not! "
}
2015-04-30 12:55:38 +03:00
# return total memory size in kB units
total_mem( ) {
2017-07-19 13:15:39 +03:00
local a
local b
2015-04-30 12:55:38 +03:00
while IFS = ":" read -r a b ; do
2017-06-29 01:02:19 +03:00
case " $a " in MemTotal*) echo " ${ b %% kB } " ; break ; ; esac
2015-04-30 12:55:38 +03:00
done < /proc/meminfo
}
2015-04-30 14:23:56 +03:00
kernel_at_least( ) {
version_at_least " $( uname -r) " " $@ "
}
2018-03-19 21:58:53 +03:00
test " ${ LVM_TEST_AUX_TRACE -0 } " = "0" || set -x
2015-03-26 17:01:54 +03:00
2014-06-30 23:00:08 +04:00
test -f DEVICES && devs = $( < DEVICES)
2011-01-05 03:16:18 +03:00
2017-07-07 00:58:28 +03:00
if test " $1 " = "dmsetup" ; then
2014-03-02 23:11:44 +04:00
shift
dmsetup_wrapped " $@ "
else
" $@ "
fi