2021-07-30 17:56:10 +03:00
#!/usr/bin/env bash
2021-10-17 19:13:06 +03:00
# SPDX-License-Identifier: LGPL-2.1-or-later
2021-07-30 17:56:10 +03:00
# vi: ts=4 sw=4 tw=0 et:
set -eux
set -o pipefail
2021-09-10 14:04:58 +03:00
# Check if all symlinks under /dev/disk/ are valid
2021-09-13 20:14:12 +03:00
# shellcheck disable=SC2120
2021-09-18 00:59:38 +03:00
helper_check_device_symlinks( ) { (
set +x
2021-09-14 15:36:04 +03:00
2021-09-14 15:49:11 +03:00
local dev link path paths target
2021-09-13 20:14:12 +03:00
2021-09-18 20:02:10 +03:00
[ [ $# -gt 0 ] ] && paths = ( " $@ " ) || paths = ( "/dev/disk" "/dev/mapper" )
2021-09-10 14:04:58 +03:00
2021-09-14 15:49:11 +03:00
# Check if all given paths are valid
for path in " ${ paths [@] } " ; do
if ! test -e " $path " ; then
echo >& 2 " Path ' $path ' doesn't exist "
return 1
fi
done
2021-09-10 14:04:58 +03:00
while read -r link; do
target = " $( readlink -f " $link " ) "
# Both checks should do virtually the same thing, but check both to be
# on the safe side
if [ [ ! -e " $link " || ! -e " $target " ] ] ; then
echo >& 2 " ERROR: symlink ' $link ' points to ' $target ' which doesn't exist "
return 1
fi
# Check if the symlink points to the correct device in /dev
dev = " /dev/ $( udevadm info -q name " $link " ) "
if [ [ " $target " != " $dev " ] ] ; then
echo >& 2 " ERROR: symlink ' $link ' points to ' $target ' but ' $dev ' was expected "
return 1
fi
2021-09-13 20:14:12 +03:00
done < <( find " ${ paths [@] } " -type l)
2021-09-18 00:59:38 +03:00
) }
2021-09-10 14:04:58 +03:00
2022-04-28 13:28:11 +03:00
helper_check_udev_watch( ) { (
set +x
local link target id dev
while read -r link; do
target = " $( readlink " $link " ) "
if [ [ ! -L " /run/udev/watch/ $target " ] ] ; then
echo >& 2 " ERROR: symlink /run/udev/watch/ $target does not exist "
return 1
fi
if [ [ " $( readlink " /run/udev/watch/ $target " ) " != " $( basename " $link " ) " ] ] ; then
echo >& 2 " ERROR: symlink target of /run/udev/watch/ $target is inconsistent with $link "
return 1
fi
if [ [ " $target " = ~ ^[ 0-9] +$ ] ] ; then
# $link is ID -> wd
id = " $( basename " $link " ) "
else
# $link is wd -> ID
id = " $target "
fi
if [ [ " ${ id : 0 : 1 } " = = "b" ] ] ; then
dev = " /dev/block/ ${ id : 1 } "
elif [ [ " ${ id : 0 : 1 } " = = "c" ] ] ; then
dev = " /dev/char/ ${ id : 1 } "
else
echo >& 2 " ERROR: unexpected device ID ' $id ' "
return 1
fi
if [ [ ! -e " $dev " ] ] ; then
echo >& 2 " ERROR: device ' $dev ' corresponding to symlink ' $link ' does not exist "
return 1
fi
done < <( find /run/udev/watch -type l)
) }
2022-08-31 21:48:02 +03:00
check_device_unit( ) { (
set +x
local log_level link links path syspath unit
log_level = " ${ 1 ? } "
path = " ${ 2 ? } "
unit = $( systemd-escape --path --suffix= device " $path " )
2023-02-01 17:03:54 +03:00
[ [ " $log_level " = = 1 ] ] && echo " INFO: check_device_unit( $unit ) "
2022-08-31 21:48:02 +03:00
syspath = $( systemctl show --value --property SysFSPath " $unit " 2>/dev/null)
if [ [ -z " $syspath " ] ] ; then
[ [ " $log_level " = = 1 ] ] && echo >& 2 " ERROR: $unit not found. "
return 1
fi
if [ [ ! -L " $path " ] ] ; then
if [ [ ! -d " $syspath " ] ] ; then
[ [ " $log_level " = = 1 ] ] && echo >& 2 " ERROR: $unit exists for $syspath but it does not exist. "
return 1
fi
return 0
fi
if [ [ ! -b " $path " && ! -c " $path " ] ] ; then
[ [ " $log_level " = = 1 ] ] && echo >& 2 " ERROR: invalid file type $path "
return 1
fi
read -r -a links < <( udevadm info -q symlink " $syspath " 2>/dev/null)
for link in " ${ links [@] } " ; do
if [ [ " /dev/ $link " = = " $path " ] ] ; then # DEVLINKS= given by -q symlink are relative to /dev
return 0
fi
done
read -r -a links < <( udevadm info " $syspath " | sed -ne '/SYSTEMD_ALIAS=/ { s/^E: SYSTEMD_ALIAS=//; p }' 2>/dev/null)
for link in " ${ links [@] } " ; do
if [ [ " $link " = = " $path " ] ] ; then # SYSTEMD_ALIAS= are absolute
return 0
fi
done
[ [ " $log_level " = = 1 ] ] && echo >& 2 " ERROR: $unit exists for $syspath but it does not have the corresponding DEVLINKS or SYSTEMD_ALIAS. "
return 1
) }
check_device_units( ) { (
set +x
local log_level path paths
log_level = " ${ 1 ? } "
shift
paths = ( " $@ " )
for path in " ${ paths [@] } " ; do
if ! check_device_unit " $log_level " " $path " ; then
return 1
fi
done
while read -r unit _; do
path = $( systemd-escape --path --unescape " $unit " )
if ! check_device_unit " $log_level " " $path " ; then
return 1
fi
2023-11-30 00:34:34 +03:00
done < <( systemctl list-units --all --type= device --no-legend dev-* | awk '$1 !~ /dev-tty.+/ && $4 == "plugged" { print $1 }' | sed -e 's/\.device$//' )
2022-08-31 21:48:02 +03:00
return 0
) }
helper_check_device_units( ) { (
set +x
local i
2023-04-07 11:17:15 +03:00
for i in { 1..20} ; do
( ( i > 1 ) ) && sleep 0.5
2022-08-31 21:48:02 +03:00
if check_device_units 0 " $@ " ; then
return 0
fi
done
check_device_units 1 " $@ "
) }
2024-05-28 15:08:18 +03:00
testcase_virtio_scsi_basic( ) {
2021-07-30 17:56:10 +03:00
lsblk -S
[ [ " $( lsblk --scsi --noheadings | wc -l) " -ge 128 ] ]
}
testcase_nvme_basic( ) {
2023-04-07 21:33:32 +03:00
local expected_symlinks = ( )
local i
for ( ( i = 0; i < 5; i++ ) ) ; do
expected_symlinks += (
# both replace mode provides the same devlink
/dev/disk/by-id/nvme-QEMU_NVMe_Ctrl_deadbeef" $i "
# with nsid
/dev/disk/by-id/nvme-QEMU_NVMe_Ctrl_deadbeef" $i " _1
)
done
for ( ( i = 5; i < 10; i++ ) ) ; do
expected_symlinks += (
# old replace mode
/dev/disk/by-id/nvme-QEMU_NVMe_Ctrl__deadbeef_" $i "
# newer replace mode
/dev/disk/by-id/nvme-QEMU_NVMe_Ctrl_____deadbeef__" $i "
# with nsid
/dev/disk/by-id/nvme-QEMU_NVMe_Ctrl_____deadbeef__" $i " _1
)
done
for ( ( i = 10; i < 15; i++ ) ) ; do
expected_symlinks += (
# old replace mode does not provide devlink, as serial contains "/"
# newer replace mode
/dev/disk/by-id/nvme-QEMU_NVMe_Ctrl_____dead_beef_" $i "
# with nsid
/dev/disk/by-id/nvme-QEMU_NVMe_Ctrl_____dead_beef_" $i " _1
)
done
for ( ( i = 15; i < 20; i++ ) ) ; do
expected_symlinks += (
# old replace mode does not provide devlink, as serial contains "/"
# newer replace mode
/dev/disk/by-id/nvme-QEMU_NVMe_Ctrl_dead_.._.._beef_" $i "
# with nsid
/dev/disk/by-id/nvme-QEMU_NVMe_Ctrl_dead_.._.._beef_" $i " _1
)
done
udevadm settle
ls /dev/disk/by-id
for i in " ${ expected_symlinks [@] } " ; do
udevadm wait --settle --timeout= 30 " $i "
done
2021-07-30 17:56:10 +03:00
lsblk --noheadings | grep "^nvme"
2023-04-07 21:33:32 +03:00
[ [ " $( lsblk --noheadings | grep -c "^nvme" ) " -ge 20 ] ]
2021-07-30 17:56:10 +03:00
}
2022-10-11 10:54:35 +03:00
testcase_nvme_subsystem( ) {
local expected_symlinks = (
# Controller(s)
/dev/disk/by-id/nvme-QEMU_NVMe_Ctrl_deadbeef
2023-03-02 14:06:13 +03:00
/dev/disk/by-id/nvme-QEMU_NVMe_Ctrl_deadbeef_16
/dev/disk/by-id/nvme-QEMU_NVMe_Ctrl_deadbeef_17
2022-10-11 10:54:35 +03:00
# Shared namespaces
2024-06-28 15:20:34 +03:00
/dev/disk/by-path/*pci*-nvme-16
/dev/disk/by-path/*pci*-nvme-17
2022-10-11 10:54:35 +03:00
)
udevadm wait --settle --timeout= 30 " ${ expected_symlinks [@] } "
}
2021-07-30 17:56:10 +03:00
testcase_virtio_scsi_identically_named_partitions( ) {
2024-04-16 18:17:59 +03:00
local num_part num_disk i j
2022-08-28 05:58:52 +03:00
2022-12-07 11:52:35 +03:00
if [ [ -v ASAN_OPTIONS || " $( systemd-detect-virt -v) " = = "qemu" ] ] ; then
2024-04-16 18:17:59 +03:00
num_part = 4
num_disk = 4
2022-08-28 05:58:52 +03:00
else
2024-04-16 18:17:59 +03:00
num_part = 8
num_disk = 16
2022-08-28 05:58:52 +03:00
fi
2024-04-16 18:17:59 +03:00
for ( ( i = 0; i < num_disk; i++) ) ; do
2024-05-28 10:35:32 +03:00
udevadm lock --device " /dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_drive $i " \
sfdisk " /dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_drive $i " <<EOF
2024-04-16 18:17:59 +03:00
label: gpt
$( for ( ( j = 1; j <= num_part; j++) ) ; do echo 'name="Hello world", size=2M' ; done )
EOF
done
2024-05-13 16:06:50 +03:00
udevadm settle
2021-07-30 17:56:10 +03:00
lsblk --noheadings -a -o NAME,PARTLABEL
2024-04-16 18:17:59 +03:00
[ [ " $( lsblk --noheadings -a -o NAME,PARTLABEL | grep -c "Hello world" ) " -eq " $(( num_part * num_disk)) " ] ]
2021-07-30 17:56:10 +03:00
}
2021-09-08 19:26:02 +03:00
testcase_multipath_basic_failover( ) {
local dmpath i path wwid
# Configure multipath
cat >/etc/multipath.conf <<\E OF
defaults {
# Use /dev/mapper/$WWN paths instead of /dev/mapper/mpathX
user_friendly_names no
find_multipaths yes
enable_foreign " ^ $"
}
blacklist_exceptions {
property "(SCSI_IDENT_|ID_WWN)"
}
blacklist {
}
EOF
2024-04-16 19:02:31 +03:00
2024-05-28 10:35:32 +03:00
udevadm lock --device /dev/disk/by-id/wwn-0xdeaddeadbeef0000 \
sfdisk /dev/disk/by-id/wwn-0xdeaddeadbeef0000 <<EOF
2024-04-16 19:02:31 +03:00
label: gpt
name = "first_partition" , size = 5M
uuid = "deadbeef-dead-dead-beef-000000000000" , name = "failover_part" , size = 5M
EOF
udevadm settle
2024-05-28 10:35:32 +03:00
udevadm lock --device /dev/disk/by-id/wwn-0xdeaddeadbeef0000-part2 \
mkfs.ext4 -U "deadbeef-dead-dead-beef-111111111111" -L "failover_vol" /dev/disk/by-id/wwn-0xdeaddeadbeef0000-part2
2024-04-16 19:02:31 +03:00
2021-09-08 19:26:02 +03:00
modprobe -v dm_multipath
systemctl start multipathd.service
systemctl status multipathd.service
multipath -ll
2021-10-26 15:39:02 +03:00
udevadm settle
2021-09-08 19:26:02 +03:00
ls -l /dev/disk/by-id/
2022-08-25 13:57:42 +03:00
for i in { 0..15} ; do
2021-09-08 19:26:02 +03:00
wwid = " deaddeadbeef $( printf "%.4d" " $i " ) "
path = " /dev/disk/by-id/wwn-0x $wwid "
dmpath = " $( readlink -f " $path " ) "
lsblk " $path "
multipath -C " $dmpath "
# We should have 4 active paths for each multipath device
[ [ " $( multipath -l " $path " | grep -c running) " -eq 4 ] ]
done
# Test failover (with the first multipath device that has a partitioned disk)
echo " ${ FUNCNAME [0] } : test failover "
local device expected link mpoint part
local -a devices
2022-11-22 20:43:51 +03:00
mkdir -p /mnt
2021-09-08 19:26:02 +03:00
mpoint = " $( mktemp -d /mnt/mpathXXX) "
wwid = "deaddeadbeef0000"
path = " /dev/disk/by-id/wwn-0x $wwid "
# All following symlinks should exists and should be valid
local -a part_links = (
" /dev/disk/by-id/wwn-0x $wwid -part2 "
"/dev/disk/by-partlabel/failover_part"
"/dev/disk/by-partuuid/deadbeef-dead-dead-beef-000000000000"
"/dev/disk/by-label/failover_vol"
"/dev/disk/by-uuid/deadbeef-dead-dead-beef-111111111111"
)
2022-04-04 21:31:58 +03:00
udevadm wait --settle --timeout= 30 " ${ part_links [@] } "
2022-08-31 21:48:02 +03:00
helper_check_device_units " ${ part_links [@] } "
2021-09-08 19:26:02 +03:00
# Choose a random symlink to the failover data partition each time, for
# a better coverage
part = " ${ part_links [ $RANDOM % ${# part_links [@] } ] } "
# Get all devices attached to a specific multipath device (in H:C:T:L format)
# and sort them in a random order, so we cut off different paths each time
mapfile -t devices < <( multipath -l " $path " | grep -Eo '[0-9]+:[0-9]+:[0-9]+:[0-9]+' | sort -R)
if [ [ " ${# devices [@] } " -ne 4 ] ] ; then
echo " Expected 4 devices attached to WWID= $wwid , got ${# devices [@] } instead "
return 1
fi
# Drop the last path from the array, since we want to leave at least one path active
unset "devices[3]"
# Mount the first multipath partition, write some data we can check later,
# and then disconnect the remaining paths one by one while checking if we
# can still read/write from the mount
mount -t ext4 " $part " " $mpoint "
expected = 0
echo -n " $expected " >" $mpoint /test "
# Sanity check we actually wrote what we wanted
[ [ " $( <" $mpoint /test " ) " = = " $expected " ] ]
for device in " ${ devices [@] } " ; do
echo offline >" /sys/class/scsi_device/ $device /device/state "
[ [ " $( <" $mpoint /test " ) " = = " $expected " ] ]
expected = " $(( expected + 1 )) "
echo -n " $expected " >" $mpoint /test "
# Make sure all symlinks are still valid
2022-04-04 21:31:58 +03:00
udevadm wait --settle --timeout= 30 " ${ part_links [@] } "
2022-08-31 21:48:02 +03:00
helper_check_device_units " ${ part_links [@] } "
2021-09-08 19:26:02 +03:00
done
multipath -l " $path "
# Three paths should be now marked as 'offline' and one as 'running'
[ [ " $( multipath -l " $path " | grep -c offline) " -eq 3 ] ]
[ [ " $( multipath -l " $path " | grep -c running) " -eq 1 ] ]
umount " $mpoint "
rm -fr " $mpoint "
}
2023-01-16 05:00:04 +03:00
testcase_simultaneous_events_1( ) {
2022-09-29 20:38:22 +03:00
local disk expected i iterations key link num_part part partscript rule target timeout
2022-04-15 05:13:29 +03:00
local -a devices symlinks
2022-09-29 20:38:22 +03:00
local -A running
2021-09-10 18:16:51 +03:00
2022-12-07 11:52:35 +03:00
if [ [ -v ASAN_OPTIONS || " $( systemd-detect-virt -v) " = = "qemu" ] ] ; then
2022-04-15 05:13:29 +03:00
num_part = 2
2022-08-28 05:59:44 +03:00
iterations = 10
timeout = 240
else
2022-04-15 05:13:29 +03:00
num_part = 10
2022-08-28 05:59:44 +03:00
iterations = 100
2024-05-13 18:00:11 +03:00
timeout = 60
2022-08-28 05:59:44 +03:00
fi
2022-04-15 05:13:29 +03:00
for disk in { 0..9} ; do
link = " /dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_deadbeeftest ${ disk } "
target = " $( readlink -f " $link " ) "
if [ [ ! -b " $target " ] ] ; then
echo " ERROR: failed to find the test SCSI block device $link "
return 1
fi
devices += ( " $target " )
done
for ( ( part = 1; part <= num_part; part++) ) ; do
symlinks += (
" /dev/disk/by-partlabel/test ${ part } "
)
done
partscript = " $( mktemp) "
2021-09-10 18:16:51 +03:00
cat >" $partscript " <<EOF
2022-04-15 05:13:29 +03:00
$( for ( ( part = 1; part <= num_part; part++) ) ; do printf 'name="test%d", size=2M\n' " $part " ; done )
EOF
rule = /run/udev/rules.d/50-test.rules
mkdir -p " ${ rule %/* } "
cat >" $rule " <<EOF
SUBSYSTEM = = "block" , KERNEL = = " ${ devices [4]##*/ } *| ${ devices [5]##*/ } * " , OPTIONS = "link_priority=10"
2021-09-10 18:16:51 +03:00
EOF
2022-04-15 05:13:29 +03:00
udevadm control --reload
2021-09-10 18:16:51 +03:00
2023-12-29 17:06:50 +03:00
# initialize partition table
for disk in { 0..9} ; do
echo 'label: gpt' | udevadm lock --device= " ${ devices [ $disk ] } " sfdisk -q " ${ devices [ $disk ] } "
done
2021-10-20 10:38:57 +03:00
# Delete the partitions, immediately recreate them, wait for udev to settle
2021-09-10 18:16:51 +03:00
# down, and then check if we have any dangling symlinks in /dev/disk/. Rinse
# and repeat.
#
# On unpatched udev versions the delete-recreate cycle may trigger a race
# leading to dead symlinks in /dev/disk/
2022-07-07 17:56:42 +03:00
for ( ( i = 1; i <= iterations; i++) ) ; do
2022-04-15 05:13:29 +03:00
for disk in { 0..9} ; do
if ( ( disk % 2 = = i % 2) ) ; then
udevadm lock --device= " ${ devices [ $disk ] } " sfdisk -q --delete " ${ devices [ $disk ] } " &
else
udevadm lock --device= " ${ devices [ $disk ] } " sfdisk -q -X gpt " ${ devices [ $disk ] } " <" $partscript " &
fi
2022-09-29 20:38:22 +03:00
running[ $disk ] = $!
2022-04-15 05:13:29 +03:00
done
2022-09-29 20:38:22 +03:00
for key in " ${ !running[@] } " ; do
wait " ${ running [ $key ] } "
unset " running[ $key ] "
2022-04-15 05:13:29 +03:00
done
if ( ( i % 10 <= 1) ) ; then
udevadm wait --settle --timeout= " $timeout " " ${ devices [@] } " " ${ symlinks [@] } "
2021-09-10 18:16:51 +03:00
helper_check_device_symlinks
2022-04-28 13:28:11 +03:00
helper_check_udev_watch
2022-04-15 05:13:29 +03:00
for ( ( part = 1; part <= num_part; part++) ) ; do
link = " /dev/disk/by-partlabel/test ${ part } "
target = " $( readlink -f " $link " ) "
if ( ( i % 2 = = 0) ) ; then
expected = " ${ devices [5] } $part "
else
expected = " ${ devices [4] } $part "
fi
if [ [ " $target " != " $expected " ] ] ; then
echo >& 2 " ERROR: symlink '/dev/disk/by-partlabel/test ${ part } ' points to ' $target ' but ' $expected ' was expected "
return 1
fi
done
2021-09-10 18:16:51 +03:00
fi
done
2022-08-31 21:48:02 +03:00
helper_check_device_units
2022-04-15 05:13:29 +03:00
rm -f " $rule " " $partscript "
udevadm control --reload
2021-09-10 18:16:51 +03:00
}
2023-01-16 05:00:04 +03:00
testcase_simultaneous_events_2( ) {
2023-07-05 06:05:45 +03:00
local disk expected i iterations key link num_part part script_dir target timeout
2023-01-16 05:00:04 +03:00
local -a devices symlinks
local -A running
2023-07-05 06:05:45 +03:00
script_dir = " $( mktemp --directory "/tmp/test-udev-storage.script.XXXXXXXXXX" ) "
# shellcheck disable=SC2064
trap " rm -rf ' $script_dir ' " RETURN
2023-01-16 05:00:04 +03:00
if [ [ -v ASAN_OPTIONS || " $( systemd-detect-virt -v) " = = "qemu" ] ] ; then
num_part = 20
iterations = 1
timeout = 2400
else
num_part = 100
iterations = 3
timeout = 300
fi
for disk in { 0..9} ; do
link = " /dev/disk/by-id/scsi-0QEMU_QEMU_HARDDISK_deadbeeftest ${ disk } "
target = " $( readlink -f " $link " ) "
if [ [ ! -b " $target " ] ] ; then
echo " ERROR: failed to find the test SCSI block device $link "
return 1
fi
devices += ( " $target " )
done
2023-07-05 06:05:45 +03:00
for ( ( i = 1; i <= iterations; i++) ) ; do
cat >" $script_dir /partscript- $i " <<EOF
$( for ( ( part = 1; part <= num_part; part++) ) ; do printf 'name="testlabel-%d", size=1M\n' " $i " ; done )
2023-01-16 05:00:04 +03:00
EOF
2023-07-05 06:05:45 +03:00
done
2023-01-16 05:00:04 +03:00
echo " ## $iterations iterations start: $( date '+%H:%M:%S.%N' ) "
for ( ( i = 1; i <= iterations; i++) ) ; do
for disk in { 0..9} ; do
udevadm lock --device= " ${ devices [ $disk ] } " sfdisk -q --delete " ${ devices [ $disk ] } " &
running[ $disk ] = $!
done
for key in " ${ !running[@] } " ; do
wait " ${ running [ $key ] } "
unset " running[ $key ] "
done
for disk in { 0..9} ; do
2023-07-05 06:05:45 +03:00
udevadm lock --device= " ${ devices [ $disk ] } " sfdisk -q -X gpt " ${ devices [ $disk ] } " <" $script_dir /partscript- $i " &
2023-01-16 05:00:04 +03:00
running[ $disk ] = $!
done
for key in " ${ !running[@] } " ; do
wait " ${ running [ $key ] } "
unset " running[ $key ] "
done
2023-07-05 06:05:45 +03:00
udevadm wait --settle --timeout= " $timeout " " ${ devices [@] } " " /dev/disk/by-partlabel/testlabel- $i "
2023-01-16 05:00:04 +03:00
done
echo " ## $iterations iterations end: $( date '+%H:%M:%S.%N' ) "
}
testcase_simultaneous_events( ) {
testcase_simultaneous_events_1
testcase_simultaneous_events_2
}
2021-09-13 20:30:41 +03:00
testcase_lvm_basic( ) {
2022-09-27 14:57:04 +03:00
local i iterations partitions part timeout
2021-09-13 20:30:41 +03:00
local vgroup = " MyTestGroup $RANDOM "
local devices = (
2024-05-13 16:04:16 +03:00
/dev/disk/by-id/scsi-0systemd_foobar_deadbeeflvm{ 0..3}
2021-09-13 20:30:41 +03:00
)
2024-05-13 20:08:42 +03:00
. /etc/os-release
if [ [ " $ID " = = "ubuntu" ] ] ; then
echo "LVM on Ubuntu is broken, skipping the test" | tee --append /skipped
exit 77
fi
2022-12-07 11:52:35 +03:00
if [ [ -v ASAN_OPTIONS || " $( systemd-detect-virt -v) " = = "qemu" ] ] ; then
2022-09-27 14:05:43 +03:00
timeout = 180
else
timeout = 30
fi
2021-09-13 20:30:41 +03:00
# Make sure all the necessary soon-to-be-LVM devices exist
ls -l " ${ devices [@] } "
# Add all test devices into a volume group, create two logical volumes,
# and check if necessary symlinks exist (and are valid)
lvm pvcreate -y " ${ devices [@] } "
lvm pvs
lvm vgcreate " $vgroup " -y " ${ devices [@] } "
lvm vgs
lvm vgchange -ay " $vgroup "
lvm lvcreate -y -L 4M " $vgroup " -n mypart1
2023-02-22 18:43:42 +03:00
lvm lvcreate -y -L 32M " $vgroup " -n mypart2
2021-09-13 20:30:41 +03:00
lvm lvs
2022-07-07 17:56:42 +03:00
udevadm wait --settle --timeout= " $timeout " " /dev/ $vgroup /mypart1 " " /dev/ $vgroup /mypart2 "
2021-09-13 20:30:41 +03:00
mkfs.ext4 -L mylvpart1 " /dev/ $vgroup /mypart1 "
2024-05-28 10:35:32 +03:00
udevadm trigger --settle " /dev/ $vgroup /mypart1 "
2022-07-07 17:56:42 +03:00
udevadm wait --settle --timeout= " $timeout " "/dev/disk/by-label/mylvpart1"
2021-09-13 20:30:41 +03:00
helper_check_device_symlinks "/dev/disk" " /dev/ $vgroup "
2022-08-31 21:48:02 +03:00
helper_check_device_units
2021-09-13 20:30:41 +03:00
2023-02-01 15:25:40 +03:00
# Mount mypart1 through by-label devlink
mkdir -p /tmp/mypart1-mount-point
mount /dev/disk/by-label/mylvpart1 /tmp/mypart1-mount-point
2023-09-06 15:51:07 +03:00
timeout 30 bash -c "until systemctl -q is-active /tmp/mypart1-mount-point; do sleep .2; done"
2023-02-01 15:25:40 +03:00
# Extend the partition and check if the device and mount units are still active.
# See https://bugzilla.redhat.com/show_bug.cgi?id=2158628
# Note, the test below may be unstable with LVM2 without the following patch:
# https://github.com/lvmteam/lvm2/pull/105
# But, to reproduce the issue, udevd must start to process the first 'change' uevent
# earlier than extending the volume has been finished, and in most case, the extension
# is hopefully fast.
lvm lvextend -y --size 8M " /dev/ $vgroup /mypart1 "
udevadm wait --settle --timeout= " $timeout " "/dev/disk/by-label/mylvpart1"
2023-09-06 15:51:07 +03:00
timeout 30 bash -c " until systemctl -q is-active '/dev/ $vgroup /mypart1'; do sleep .2; done "
timeout 30 bash -c "until systemctl -q is-active /tmp/mypart1-mount-point; do sleep .2; done"
2023-02-01 15:25:40 +03:00
# Umount the partition, otherwise the underlying device unit will stay in
# the inactive state and not be collected, and helper_check_device_units() will fail.
systemctl show /tmp/mypart1-mount-point
umount /tmp/mypart1-mount-point
2022-08-31 22:12:25 +03:00
# Rename partitions (see issue #24518)
lvm lvrename " /dev/ $vgroup /mypart1 " renamed1
lvm lvrename " /dev/ $vgroup /mypart2 " renamed2
udevadm wait --settle --timeout= " $timeout " --removed " /dev/ $vgroup /mypart1 " " /dev/ $vgroup /mypart2 "
udevadm wait --settle --timeout= " $timeout " " /dev/ $vgroup /renamed1 " " /dev/ $vgroup /renamed2 "
helper_check_device_symlinks "/dev/disk" " /dev/ $vgroup "
helper_check_device_units
# Rename them back
lvm lvrename " /dev/ $vgroup /renamed1 " mypart1
lvm lvrename " /dev/ $vgroup /renamed2 " mypart2
udevadm wait --settle --timeout= " $timeout " --removed " /dev/ $vgroup /renamed1 " " /dev/ $vgroup /renamed2 "
udevadm wait --settle --timeout= " $timeout " " /dev/ $vgroup /mypart1 " " /dev/ $vgroup /mypart2 "
helper_check_device_symlinks "/dev/disk" " /dev/ $vgroup "
helper_check_device_units
2023-02-22 18:43:42 +03:00
# Do not "unready" suspended encrypted devices w/o superblock info
# See:
# - https://github.com/systemd/systemd/pull/24177
# - https://bugzilla.redhat.com/show_bug.cgi?id=1985288
dd if = /dev/urandom of = /etc/lvm_keyfile bs = 64 count = 1 iflag = fullblock
chmod 0600 /etc/lvm_keyfile
# Intentionally use weaker cipher-related settings, since we don't care
# about security here as it's a throwaway LUKS partition
cryptsetup luksFormat -q --use-urandom --pbkdf pbkdf2 --pbkdf-force-iterations 1000 \
" /dev/ $vgroup /mypart2 " /etc/lvm_keyfile
# Mount the LUKS partition & create a filesystem on it
mkdir -p /tmp/lvmluksmnt
cryptsetup open --key-file= /etc/lvm_keyfile " /dev/ $vgroup /mypart2 " "lvmluksmap"
udevadm wait --settle --timeout= " $timeout " "/dev/mapper/lvmluksmap"
mkfs.ext4 -L lvmluksfs "/dev/mapper/lvmluksmap"
2024-05-28 10:35:32 +03:00
udevadm trigger --settle "/dev/mapper/lvmluksmap"
2023-02-22 18:43:42 +03:00
udevadm wait --settle --timeout= " $timeout " "/dev/disk/by-label/lvmluksfs"
# Make systemd "interested" in the mount by adding it to /etc/fstab
echo "/dev/disk/by-label/lvmluksfs /tmp/lvmluksmnt ext4 defaults 0 2" >>/etc/fstab
systemctl daemon-reload
mount "/tmp/lvmluksmnt"
mountpoint "/tmp/lvmluksmnt"
# Temporarily suspend the LUKS device and trigger udev - basically what `cryptsetup resize`
# does but in a more deterministic way suitable for a test/reproducer
for _ in { 0..5} ; do
dmsetup suspend "/dev/mapper/lvmluksmap"
udevadm trigger -v --settle "/dev/mapper/lvmluksmap"
dmsetup resume "/dev/mapper/lvmluksmap"
# The mount should survive this sequence of events
mountpoint "/tmp/lvmluksmnt"
done
# Cleanup
umount "/tmp/lvmluksmnt"
cryptsetup close "/dev/mapper/lvmluksmap"
sed -i "/lvmluksfs/d" "/etc/fstab"
systemctl daemon-reload
2021-09-13 20:30:41 +03:00
# Disable the VG and check symlinks...
lvm vgchange -an " $vgroup "
2022-07-07 17:56:42 +03:00
udevadm wait --settle --timeout= " $timeout " --removed " /dev/ $vgroup " "/dev/disk/by-label/mylvpart1"
2021-09-13 20:30:41 +03:00
helper_check_device_symlinks "/dev/disk"
2022-08-31 21:48:02 +03:00
helper_check_device_units
2021-09-13 20:30:41 +03:00
# reenable the VG and check the symlinks again if all LVs are properly activated
lvm vgchange -ay " $vgroup "
2022-07-07 17:56:42 +03:00
udevadm wait --settle --timeout= " $timeout " " /dev/ $vgroup /mypart1 " " /dev/ $vgroup /mypart2 " "/dev/disk/by-label/mylvpart1"
2021-09-13 20:30:41 +03:00
helper_check_device_symlinks "/dev/disk" " /dev/ $vgroup "
2022-08-31 21:48:02 +03:00
helper_check_device_units
2021-09-13 20:30:41 +03:00
# Same as above, but now with more "stress"
2022-12-07 11:52:35 +03:00
if [ [ -v ASAN_OPTIONS || " $( systemd-detect-virt -v) " = = "qemu" ] ] ; then
2022-09-27 14:05:43 +03:00
iterations = 10
else
iterations = 50
fi
2022-07-07 17:56:42 +03:00
for ( ( i = 1; i <= iterations; i++) ) ; do
2021-09-13 20:30:41 +03:00
lvm vgchange -an " $vgroup "
lvm vgchange -ay " $vgroup "
2021-09-18 21:43:50 +03:00
if ( ( i % 5 = = 0) ) ; then
2022-07-07 17:56:42 +03:00
udevadm wait --settle --timeout= " $timeout " " /dev/ $vgroup /mypart1 " " /dev/ $vgroup /mypart2 " "/dev/disk/by-label/mylvpart1"
2021-09-13 20:30:41 +03:00
helper_check_device_symlinks "/dev/disk" " /dev/ $vgroup "
2022-08-31 21:48:02 +03:00
helper_check_device_units
2021-09-13 20:30:41 +03:00
fi
done
# Remove the first LV
lvm lvremove -y " $vgroup /mypart1 "
2022-07-07 17:56:42 +03:00
udevadm wait --settle --timeout= " $timeout " --removed " /dev/ $vgroup /mypart1 "
2022-04-04 21:31:58 +03:00
udevadm wait --timeout= 0 " /dev/ $vgroup /mypart2 "
2021-09-13 20:30:41 +03:00
helper_check_device_symlinks "/dev/disk" " /dev/ $vgroup "
2022-08-31 21:48:02 +03:00
helper_check_device_units
2021-09-13 20:30:41 +03:00
# Create & remove LVs in a loop, i.e. with more "stress"
2022-12-07 11:52:35 +03:00
if [ [ -v ASAN_OPTIONS ] ] ; then
2022-09-27 14:57:04 +03:00
iterations = 8
2022-09-28 12:30:13 +03:00
partitions = 16
2022-09-27 14:57:04 +03:00
elif [ [ " $( systemd-detect-virt -v) " = = "qemu" ] ] ; then
2022-09-27 14:05:43 +03:00
iterations = 8
2022-09-27 14:57:04 +03:00
partitions = 8
2022-09-27 14:05:43 +03:00
else
iterations = 16
2022-09-27 14:57:04 +03:00
partitions = 16
2022-09-27 14:05:43 +03:00
fi
2022-07-07 17:56:42 +03:00
for ( ( i = 1; i <= iterations; i++) ) ; do
2022-09-27 14:57:04 +03:00
# 1) Create some logical volumes
for ( ( part = 0; part < partitions; part++) ) ; do
2021-09-13 20:30:41 +03:00
lvm lvcreate -y -L 4M " $vgroup " -n " looppart $part "
done
# 2) Immediately remove them
2022-09-27 14:57:04 +03:00
lvm lvremove -y $( seq -f " $vgroup /looppart%g " 0 " $(( partitions - 1 )) " )
2021-09-13 20:30:41 +03:00
2021-09-18 21:43:50 +03:00
# 3) On every 4th iteration settle udev and check if all partitions are
2021-09-13 20:30:41 +03:00
# indeed gone, and if all symlinks are still valid
2021-09-18 21:43:50 +03:00
if ( ( i % 4 = = 0) ) ; then
2022-09-27 14:57:04 +03:00
for ( ( part = 0; part < partitions; part++) ) ; do
2022-07-07 17:56:42 +03:00
udevadm wait --settle --timeout= " $timeout " --removed " /dev/ $vgroup /looppart $part "
2021-09-13 20:30:41 +03:00
done
helper_check_device_symlinks "/dev/disk" " /dev/ $vgroup "
2022-08-31 21:48:02 +03:00
helper_check_device_units
2021-09-13 20:30:41 +03:00
fi
done
}
2021-09-17 20:28:38 +03:00
testcase_btrfs_basic( ) {
local dev_stub i label mpoint uuid
local devices = (
2024-05-13 16:04:16 +03:00
/dev/disk/by-id/scsi-0systemd_foobar_deadbeefbtrfs{ 0..3}
2021-09-17 20:28:38 +03:00
)
2024-05-13 17:58:23 +03:00
if ! modinfo btrfs; then
echo "This test requires the btrfs kernel module but it is not installed, skipping the test" | tee --append /skipped
exit 77
fi
2021-09-17 20:28:38 +03:00
ls -l " ${ devices [@] } "
echo "Single device: default settings"
uuid = "deadbeef-dead-dead-beef-000000000000"
label = "btrfs_root"
2023-03-09 15:50:15 +03:00
udevadm lock --device= " ${ devices [0] } " mkfs.btrfs -f -L " $label " -U " $uuid " " ${ devices [0] } "
2022-04-04 21:31:58 +03:00
udevadm wait --settle --timeout= 30 " ${ devices [0] } " " /dev/disk/by-uuid/ $uuid " " /dev/disk/by-label/ $label "
2021-09-17 20:28:38 +03:00
btrfs filesystem show
helper_check_device_symlinks
2022-08-31 21:48:02 +03:00
helper_check_device_units
2021-09-17 20:28:38 +03:00
echo "Multiple devices: using partitions, data: single, metadata: raid1"
uuid = "deadbeef-dead-dead-beef-000000000001"
label = "btrfs_mpart"
2022-04-06 19:35:26 +03:00
udevadm lock --device= " ${ devices [0] } " sfdisk --wipe= always " ${ devices [0] } " <<EOF
2021-09-17 20:28:38 +03:00
label: gpt
name = "diskpart1" , size = 85M
name = "diskpart2" , size = 85M
name = "diskpart3" , size = 85M
name = "diskpart4" , size = 85M
EOF
2022-04-04 21:31:58 +03:00
udevadm wait --settle --timeout= 30 /dev/disk/by-partlabel/diskpart{ 1..4}
2023-03-09 15:50:15 +03:00
udevadm lock --device= " ${ devices [0] } " mkfs.btrfs -f -d single -m raid1 -L " $label " -U " $uuid " /dev/disk/by-partlabel/diskpart{ 1..4}
2022-04-04 21:31:58 +03:00
udevadm wait --settle --timeout= 30 " /dev/disk/by-uuid/ $uuid " " /dev/disk/by-label/ $label "
2021-09-17 20:28:38 +03:00
btrfs filesystem show
helper_check_device_symlinks
2022-08-31 21:48:02 +03:00
helper_check_device_units
2021-09-17 20:28:38 +03:00
wipefs -a -f " ${ devices [0] } "
2022-04-04 21:31:58 +03:00
udevadm wait --settle --timeout= 30 --removed /dev/disk/by-partlabel/diskpart{ 1..4}
2021-09-17 20:28:38 +03:00
echo "Multiple devices: using disks, data: raid10, metadata: raid10, mixed mode"
uuid = "deadbeef-dead-dead-beef-000000000002"
label = "btrfs_mdisk"
2022-04-04 21:31:58 +03:00
udevadm lock \
2024-05-13 16:04:16 +03:00
--device= /dev/disk/by-id/scsi-0systemd_foobar_deadbeefbtrfs0 \
--device= /dev/disk/by-id/scsi-0systemd_foobar_deadbeefbtrfs1 \
--device= /dev/disk/by-id/scsi-0systemd_foobar_deadbeefbtrfs2 \
--device= /dev/disk/by-id/scsi-0systemd_foobar_deadbeefbtrfs3 \
2023-03-09 15:50:15 +03:00
mkfs.btrfs -f -M -d raid10 -m raid10 -L " $label " -U " $uuid " " ${ devices [@] } "
2022-04-04 21:31:58 +03:00
udevadm wait --settle --timeout= 30 " /dev/disk/by-uuid/ $uuid " " /dev/disk/by-label/ $label "
2021-09-17 20:28:38 +03:00
btrfs filesystem show
helper_check_device_symlinks
2022-08-31 21:48:02 +03:00
helper_check_device_units
2021-09-17 20:28:38 +03:00
echo "Multiple devices: using LUKS encrypted disks, data: raid1, metadata: raid1, mixed mode"
uuid = "deadbeef-dead-dead-beef-000000000003"
label = "btrfs_mencdisk"
mpoint = " /btrfs_enc $RANDOM "
mkdir " $mpoint "
# Create a key-file
dd if = /dev/urandom of = /etc/btrfs_keyfile bs = 64 count = 1 iflag = fullblock
chmod 0600 /etc/btrfs_keyfile
# Encrypt each device and add it to /etc/crypttab, so it can be mounted
# automagically later
: >/etc/crypttab
for ( ( i = 0; i < ${# devices [@] } ; i++) ) ; do
# Intentionally use weaker cipher-related settings, since we don't care
# about security here as it's a throwaway LUKS partition
2024-05-21 23:24:05 +03:00
udevadm lock --device= " ${ devices [ $i ] } " \
cryptsetup luksFormat -q \
--use-urandom --pbkdf pbkdf2 --pbkdf-force-iterations 1000 \
--uuid " deadbeef-dead-dead-beef-11111111111 $i " --label " encdisk $i " " ${ devices [ $i ] } " /etc/btrfs_keyfile
2022-04-04 21:31:58 +03:00
udevadm wait --settle --timeout= 30 " /dev/disk/by-uuid/deadbeef-dead-dead-beef-11111111111 $i " " /dev/disk/by-label/encdisk $i "
2021-09-17 20:28:38 +03:00
# Add the device into /etc/crypttab, reload systemd, and then activate
# the device so we can create a filesystem on it later
2023-11-26 00:30:01 +03:00
echo " encbtrfs $i UUID=deadbeef-dead-dead-beef-11111111111 $i /etc/btrfs_keyfile luks " >>/etc/crypttab
2021-09-17 20:28:38 +03:00
systemctl daemon-reload
systemctl start " systemd-cryptsetup@encbtrfs $i "
done
helper_check_device_symlinks
2022-08-31 21:48:02 +03:00
helper_check_device_units
2021-09-17 20:28:38 +03:00
# Check if we have all necessary DM devices
ls -l /dev/mapper/encbtrfs{ 0..3}
# Create a multi-device btrfs filesystem on the LUKS devices
2022-04-04 21:31:58 +03:00
udevadm lock \
--device= /dev/mapper/encbtrfs0 \
--device= /dev/mapper/encbtrfs1 \
--device= /dev/mapper/encbtrfs2 \
--device= /dev/mapper/encbtrfs3 \
2023-03-09 15:50:15 +03:00
mkfs.btrfs -f -M -d raid1 -m raid1 -L " $label " -U " $uuid " /dev/mapper/encbtrfs{ 0..3}
2022-04-04 21:31:58 +03:00
udevadm wait --settle --timeout= 30 " /dev/disk/by-uuid/ $uuid " " /dev/disk/by-label/ $label "
2021-09-17 20:28:38 +03:00
btrfs filesystem show
helper_check_device_symlinks
2022-08-31 21:48:02 +03:00
helper_check_device_units
2021-09-17 20:28:38 +03:00
# Mount it and write some data to it we can compare later
mount -t btrfs /dev/mapper/encbtrfs0 " $mpoint "
echo "hello there" >" $mpoint /test "
# "Deconstruct" the btrfs device and check if we're in a sane state (symlink-wise)
umount " $mpoint "
systemctl stop systemd-cryptsetup@encbtrfs{ 0..3}
2022-04-04 21:31:58 +03:00
udevadm wait --settle --timeout= 30 --removed " /dev/disk/by-uuid/ $uuid "
2021-09-17 20:28:38 +03:00
helper_check_device_symlinks
2022-08-31 21:48:02 +03:00
helper_check_device_units
2021-09-17 20:28:38 +03:00
# Add the mount point to /etc/fstab and check if the device can be put together
# automagically. The source device is the DM name of the first LUKS device
# (from /etc/crypttab). We have to specify all LUKS devices manually, as
# registering the necessary devices is usually initrd's job (via btrfs device scan)
dev_stub = "/dev/mapper/encbtrfs"
echo " /dev/mapper/encbtrfs0 $mpoint btrfs device= ${ dev_stub } 0,device= ${ dev_stub } 1,device= ${ dev_stub } 2,device= ${ dev_stub } 3 0 2 " >>/etc/fstab
# Tell systemd about the new mount
systemctl daemon-reload
# Restart cryptsetup.target to trigger autounlock of partitions in /etc/crypttab
systemctl restart cryptsetup.target
# Start the corresponding mount unit and check if the btrfs device was reconstructed
# correctly
systemctl start " ${ mpoint ##*/ } .mount "
2022-04-04 21:31:58 +03:00
udevadm wait --settle --timeout= 30 " /dev/disk/by-uuid/ $uuid " " /dev/disk/by-label/ $label "
2021-09-17 20:28:38 +03:00
btrfs filesystem show
helper_check_device_symlinks
2022-08-31 21:48:02 +03:00
helper_check_device_units
2021-09-17 20:28:38 +03:00
grep "hello there" " $mpoint /test "
# Cleanup
systemctl stop " ${ mpoint ##*/ } .mount "
systemctl stop systemd-cryptsetup@encbtrfs{ 0..3}
sed -i " / ${ mpoint ##*/ } /d " /etc/fstab
: >/etc/crypttab
rm -fr " $mpoint "
systemctl daemon-reload
udevadm settle
}
2021-09-22 20:26:45 +03:00
testcase_iscsi_lvm( ) {
local dev i label link lun_id mpoint target_name uuid
local target_ip = "127.0.0.1"
local target_port = "3260"
local vgroup = " iscsi_lvm $RANDOM "
local expected_symlinks = ( )
local devices = (
2024-05-13 16:04:16 +03:00
/dev/disk/by-id/scsi-0systemd_foobar_deadbeefiscsi{ 0..3}
2021-09-22 20:26:45 +03:00
)
2024-05-13 20:08:42 +03:00
. /etc/os-release
if [ [ " $ID " = = "ubuntu" ] ] ; then
echo "LVM on Ubuntu is broken, skipping the test" | tee --append /skipped
exit 77
fi
2021-09-22 20:26:45 +03:00
ls -l " ${ devices [@] } "
2024-05-13 16:08:07 +03:00
# Start the target daemon (debian names it tgt.service so make sure we handle that)
if systemctl list-unit-files tgt.service; then
systemctl start tgt
systemctl status tgt
elif systemctl list-unit-files tgtd.service; then
systemctl start tgtd
systemctl status tgtd
else
echo "This test requires tgtd but it is not installed, skipping ..." | tee --append /skipped
exit 77
fi
2021-09-22 20:26:45 +03:00
echo "iSCSI LUNs backed by devices"
# See RFC3721 and RFC7143
target_name = "iqn.2021-09.com.example:iscsi.test"
# Initialize a new iSCSI target <$target_name> consisting of 4 LUNs, each
# backed by a device
tgtadm --lld iscsi --op new --mode target --tid= 1 --targetname " $target_name "
for ( ( i = 0; i < ${# devices [@] } ; i++) ) ; do
# lun-0 is reserved by iSCSI
lun_id = " $(( i + 1 )) "
tgtadm --lld iscsi --op new --mode logicalunit --tid 1 --lun " $lun_id " -b " ${ devices [ $i ] } "
tgtadm --lld iscsi --op update --mode logicalunit --tid 1 --lun " $lun_id "
expected_symlinks += (
" /dev/disk/by-path/ip- $target_ip : $target_port -iscsi- $target_name -lun- $lun_id "
)
done
tgtadm --lld iscsi --op bind --mode target --tid 1 -I ALL
# Configure the iSCSI initiator
iscsiadm --mode discoverydb --type sendtargets --portal " $target_ip " --discover
iscsiadm --mode node --targetname " $target_name " --portal " $target_ip : $target_port " --login
2022-03-25 23:38:18 +03:00
udevadm wait --settle --timeout= 30 " ${ expected_symlinks [@] } "
2021-09-22 20:26:45 +03:00
helper_check_device_symlinks
2022-08-31 21:48:02 +03:00
helper_check_device_units
2021-09-22 20:26:45 +03:00
# Cleanup
iscsiadm --mode node --targetname " $target_name " --portal " $target_ip : $target_port " --logout
tgtadm --lld iscsi --op delete --mode target --tid= 1
echo "iSCSI LUNs backed by files + LVM"
# Note: we use files here to "trick" LVM the disks are indeed on a different
# host, so it doesn't automagically detect another path to the backing
# device once we disconnect the iSCSI devices
target_name = "iqn.2021-09.com.example:iscsi.lvm.test"
mpoint = " $( mktemp -d /iscsi_storeXXX) "
expected_symlinks = ( )
# Use the first device as it's configured with larger capacity
2024-05-28 10:35:32 +03:00
udevadm lock --device " ${ devices [0] } " mkfs.ext4 -L iscsi_store " ${ devices [0] } "
2022-04-04 21:31:58 +03:00
udevadm wait --settle --timeout= 30 " ${ devices [0] } "
2021-09-22 20:26:45 +03:00
mount " ${ devices [0] } " " $mpoint "
for i in { 1..4} ; do
dd if = /dev/zero of = " $mpoint /lun $i .img " bs = 1M count = 32
done
# Initialize a new iSCSI target <$target_name> consisting of 4 LUNs, each
# backed by a file
tgtadm --lld iscsi --op new --mode target --tid= 2 --targetname " $target_name "
# lun-0 is reserved by iSCSI
for i in { 1..4} ; do
tgtadm --lld iscsi --op new --mode logicalunit --tid 2 --lun " $i " -b " $mpoint /lun $i .img "
tgtadm --lld iscsi --op update --mode logicalunit --tid 2 --lun " $i "
expected_symlinks += (
" /dev/disk/by-path/ip- $target_ip : $target_port -iscsi- $target_name -lun- $i "
)
done
tgtadm --lld iscsi --op bind --mode target --tid 2 -I ALL
# Configure the iSCSI initiator
iscsiadm --mode discoverydb --type sendtargets --portal " $target_ip " --discover
iscsiadm --mode node --targetname " $target_name " --portal " $target_ip : $target_port " --login
2022-03-25 23:38:18 +03:00
udevadm wait --settle --timeout= 30 " ${ expected_symlinks [@] } "
2021-09-22 20:26:45 +03:00
helper_check_device_symlinks
2022-08-31 21:48:02 +03:00
helper_check_device_units
2021-09-22 20:26:45 +03:00
# Add all iSCSI devices into a LVM volume group, create two logical volumes,
# and check if necessary symlinks exist (and are valid)
lvm pvcreate -y " ${ expected_symlinks [@] } "
lvm pvs
lvm vgcreate " $vgroup " -y " ${ expected_symlinks [@] } "
lvm vgs
lvm vgchange -ay " $vgroup "
lvm lvcreate -y -L 4M " $vgroup " -n mypart1
lvm lvcreate -y -L 8M " $vgroup " -n mypart2
lvm lvs
2022-04-04 21:31:58 +03:00
udevadm wait --settle --timeout= 30 " /dev/ $vgroup /mypart1 " " /dev/ $vgroup /mypart2 "
2021-09-22 20:26:45 +03:00
mkfs.ext4 -L mylvpart1 " /dev/ $vgroup /mypart1 "
2024-05-28 10:35:32 +03:00
udevadm trigger --settle " /dev/ $vgroup /mypart1 "
2022-04-04 21:31:58 +03:00
udevadm wait --settle --timeout= 30 "/dev/disk/by-label/mylvpart1"
2021-09-22 20:26:45 +03:00
helper_check_device_symlinks "/dev/disk" " /dev/ $vgroup "
2022-08-31 21:48:02 +03:00
helper_check_device_units
2021-09-22 20:26:45 +03:00
# Disconnect the iSCSI devices and check all the symlinks
iscsiadm --mode node --targetname " $target_name " --portal " $target_ip : $target_port " --logout
# "Reset" the DM state, since we yanked the backing storage from under the LVM,
# so the currently active VGs/LVs are invalid
dmsetup remove_all --deferred
# The LVM and iSCSI related symlinks should be gone
2022-03-25 23:38:18 +03:00
udevadm wait --settle --timeout= 30 --removed " /dev/ $vgroup " "/dev/disk/by-label/mylvpart1" " ${ expected_symlinks [@] } "
2021-09-22 20:26:45 +03:00
helper_check_device_symlinks "/dev/disk"
2022-08-31 21:48:02 +03:00
helper_check_device_units
2021-09-22 20:26:45 +03:00
# Reconnect the iSCSI devices and check if everything get detected correctly
iscsiadm --mode discoverydb --type sendtargets --portal " $target_ip " --discover
iscsiadm --mode node --targetname " $target_name " --portal " $target_ip : $target_port " --login
2022-04-04 21:31:58 +03:00
udevadm wait --settle --timeout= 30 " ${ expected_symlinks [@] } " " /dev/ $vgroup /mypart1 " " /dev/ $vgroup /mypart2 " "/dev/disk/by-label/mylvpart1"
2021-09-22 20:26:45 +03:00
helper_check_device_symlinks "/dev/disk" " /dev/ $vgroup "
2022-08-31 21:48:02 +03:00
helper_check_device_units
2021-09-22 20:26:45 +03:00
# Cleanup
iscsiadm --mode node --targetname " $target_name " --portal " $target_ip : $target_port " --logout
tgtadm --lld iscsi --op delete --mode target --tid= 2
umount " $mpoint "
rm -rf " $mpoint "
}
2021-10-01 14:56:25 +03:00
testcase_long_sysfs_path( ) {
2022-04-22 19:03:14 +03:00
local cursor link logfile mpoint
2021-10-01 14:56:25 +03:00
local expected_symlinks = (
"/dev/disk/by-label/data_vol"
"/dev/disk/by-label/swap_vol"
"/dev/disk/by-partlabel/test_swap"
"/dev/disk/by-partlabel/test_part"
"/dev/disk/by-partuuid/deadbeef-dead-dead-beef-000000000000"
"/dev/disk/by-uuid/deadbeef-dead-dead-beef-111111111111"
"/dev/disk/by-uuid/deadbeef-dead-dead-beef-222222222222"
)
2022-04-22 19:03:14 +03:00
# Create a cursor file to skip messages generated by udevd in initrd, as it
# might not be the same up-to-date version as we currently run (hence generating
# messages we check for later and making the test fail)
cursor = " $( mktemp) "
journalctl --cursor-file= " ${ cursor : ? } " -n0 -q
2021-10-01 14:56:25 +03:00
# Make sure the test device is connected and show its "wonderful" path
stat /sys/block/vda
readlink -f /sys/block/vda/dev
2024-04-16 15:18:34 +03:00
dev = "/dev/vda"
2024-05-28 10:35:32 +03:00
udevadm lock --device " $dev " sfdisk " $dev " <<EOF
2024-04-16 15:18:34 +03:00
label: gpt
name = "test_swap" , size = 32M
uuid = "deadbeef-dead-dead-beef-000000000000" , name = "test_part" , size = 5M
EOF
udevadm settle
2024-05-28 10:35:32 +03:00
udevadm lock --device " ${ dev } 1 " mkswap -U "deadbeef-dead-dead-beef-111111111111" -L "swap_vol" " ${ dev } 1 "
udevadm lock --device " ${ dev } 2 " mkfs.ext4 -U "deadbeef-dead-dead-beef-222222222222" -L "data_vol" " ${ dev } 2 "
2022-04-04 21:31:58 +03:00
udevadm wait --settle --timeout= 30 " ${ expected_symlinks [@] } "
2021-10-01 14:56:25 +03:00
# Try to mount the data partition manually (using its label)
mpoint = " $( mktemp -d /logsysfsXXX) "
mount LABEL = data_vol " $mpoint "
touch " $mpoint /test "
umount " $mpoint "
# Do the same, but with UUID and using fstab
echo " UUID=deadbeef-dead-dead-beef-222222222222 $mpoint ext4 defaults 0 0 " >>/etc/fstab
systemctl daemon-reload
mount " $mpoint "
2023-09-06 15:51:07 +03:00
timeout 30 bash -c " until systemctl -q is-active ' $mpoint '; do sleep .2; done "
2021-10-01 14:56:25 +03:00
test -e " $mpoint /test "
umount " $mpoint "
# Test out the swap partition
swapon -v -L swap_vol
swapoff -v -L swap_vol
2021-12-16 13:59:09 +03:00
udevadm settle
2021-10-01 14:56:25 +03:00
logfile = " $( mktemp) "
2022-04-22 19:03:14 +03:00
# Check state of affairs after https://github.com/systemd/systemd/pull/22759
# Note: can't use `--cursor-file` here, since we don't want to update the cursor
# after using it
[ [ " $( journalctl --after-cursor= " $( <" $cursor " ) " -q --no-pager -o short-monotonic -p info --grep "Device path.*vda.?' too long to fit into unit name" | wc -l) " -eq 0 ] ]
[ [ " $( journalctl --after-cursor= " $( <" $cursor " ) " -q --no-pager -o short-monotonic --grep "Unit name .*vda.?\.device\" too long, falling back to hashed unit name" | wc -l) " -gt 0 ] ]
# Check if the respective "hashed" units exist and are active (plugged)
systemctl status --no-pager " $( readlink -f /sys/block/vda/vda1) "
systemctl status --no-pager " $( readlink -f /sys/block/vda/vda2) "
2021-10-01 14:56:25 +03:00
# Make sure we don't unnecessarily spam the log
2022-03-24 21:24:16 +03:00
{ journalctl -b -q --no-pager -o short-monotonic -p info --grep "/sys/devices/.+/vda[0-9]?" _PID = 1 + UNIT = systemd-udevd.service || :; } | tee " $logfile "
2021-10-01 14:56:25 +03:00
[ [ " $( wc -l <" $logfile " ) " -lt 10 ] ]
: >/etc/fstab
2022-04-22 19:03:14 +03:00
rm -fr " ${ cursor : ? } " " ${ logfile : ? } " " ${ mpoint : ? } "
2021-10-01 14:56:25 +03:00
}
2022-04-07 19:03:17 +03:00
testcase_mdadm_basic( ) {
2022-04-15 20:01:45 +03:00
local i part_name raid_name raid_dev uuid
2022-04-07 19:03:17 +03:00
local expected_symlinks = ( )
local devices = (
2024-05-13 16:04:16 +03:00
/dev/disk/by-id/scsi-0systemd_foobar_deadbeefmdadm{ 0..4}
2022-04-07 19:03:17 +03:00
)
ls -l " ${ devices [@] } "
2022-04-14 13:30:42 +03:00
echo "Mirror raid (RAID 1)"
2022-04-07 19:03:17 +03:00
raid_name = "mdmirror"
2022-04-14 13:30:42 +03:00
raid_dev = " /dev/md/ $raid_name "
part_name = " ${ raid_name } _part "
2022-04-07 19:03:17 +03:00
uuid = "aaaaaaaa:bbbbbbbb:cccccccc:00000001"
expected_symlinks = (
2022-04-14 13:30:42 +03:00
" $raid_dev "
" /dev/disk/by-id/md-name-H: $raid_name "
2022-04-07 19:03:17 +03:00
" /dev/disk/by-id/md-uuid- $uuid "
2022-04-14 13:30:42 +03:00
" /dev/disk/by-label/ $part_name " # ext4 partition
2022-04-07 19:03:17 +03:00
)
# Create a simple RAID 1 with an ext4 filesystem
2024-05-13 16:04:16 +03:00
echo y | mdadm --create " $raid_dev " --name " $raid_name " --uuid " $uuid " /dev/disk/by-id/scsi-0systemd_foobar_deadbeefmdadm{ 0..1} -v -f --level= 1 --raid-devices= 2
2022-04-14 13:30:42 +03:00
udevadm wait --settle --timeout= 30 " $raid_dev "
2024-05-28 10:35:32 +03:00
# udevd does not lock md devices, hence we need to trigger uevent after creating filesystem.
2022-04-14 13:30:42 +03:00
mkfs.ext4 -L " $part_name " " $raid_dev "
2024-05-28 10:35:32 +03:00
udevadm trigger --settle " $raid_dev "
2022-04-07 19:03:17 +03:00
udevadm wait --settle --timeout= 30 " ${ expected_symlinks [@] } "
2022-04-15 20:01:45 +03:00
for i in { 0..9} ; do
echo " Disassemble - reassemble loop, iteration # $i "
mdadm -v --stop " $raid_dev "
udevadm wait --settle --timeout= 30 --removed " ${ expected_symlinks [@] } "
mdadm --assemble " $raid_dev " --name " $raid_name " -v
udevadm wait --settle --timeout= 30 " ${ expected_symlinks [@] } "
done
2022-04-14 13:30:42 +03:00
helper_check_device_symlinks
2022-08-31 21:48:02 +03:00
helper_check_device_units
2022-04-14 13:30:42 +03:00
# Cleanup
mdadm -v --stop " $raid_dev "
2022-04-15 20:01:45 +03:00
udevadm wait --settle --timeout= 30 --removed " ${ expected_symlinks [@] } "
2022-04-14 13:30:42 +03:00
echo "Parity raid (RAID 5)"
raid_name = "mdparity"
raid_dev = " /dev/md/ $raid_name "
part_name = " ${ raid_name } _part "
uuid = "aaaaaaaa:bbbbbbbb:cccccccc:00000101"
expected_symlinks = (
" $raid_dev "
" /dev/disk/by-id/md-name-H: $raid_name "
" /dev/disk/by-id/md-uuid- $uuid "
" /dev/disk/by-label/ $part_name " # ext4 partition
)
# Create a simple RAID 5 with an ext4 filesystem
2024-05-13 16:04:16 +03:00
echo y | mdadm --create " $raid_dev " --name " $raid_name " --uuid " $uuid " /dev/disk/by-id/scsi-0systemd_foobar_deadbeefmdadm{ 0..2} -v -f --level= 5 --raid-devices= 3
2022-04-14 13:30:42 +03:00
udevadm wait --settle --timeout= 30 " $raid_dev "
mkfs.ext4 -L " $part_name " " $raid_dev "
2024-05-28 10:35:32 +03:00
udevadm trigger --settle " $raid_dev "
2022-04-14 13:30:42 +03:00
udevadm wait --settle --timeout= 30 " ${ expected_symlinks [@] } "
2022-04-15 20:01:45 +03:00
for i in { 0..9} ; do
echo " Disassemble - reassemble loop, iteration # $i "
mdadm -v --stop " $raid_dev "
udevadm wait --settle --timeout= 30 --removed " ${ expected_symlinks [@] } "
mdadm --assemble " $raid_dev " --name " $raid_name " -v
udevadm wait --settle --timeout= 30 " ${ expected_symlinks [@] } "
done
2022-04-14 13:30:42 +03:00
helper_check_device_symlinks
2022-08-31 21:48:02 +03:00
helper_check_device_units
2022-04-14 13:30:42 +03:00
# Cleanup
mdadm -v --stop " $raid_dev "
2022-04-15 20:01:45 +03:00
udevadm wait --settle --timeout= 30 --removed " ${ expected_symlinks [@] } "
2022-08-31 21:48:02 +03:00
helper_check_device_units
2022-04-14 13:30:42 +03:00
2022-04-15 20:01:45 +03:00
echo "Mirror + parity raid (RAID 10) + multiple partitions"
2022-04-14 13:30:42 +03:00
raid_name = "mdmirpar"
raid_dev = " /dev/md/ $raid_name "
part_name = " ${ raid_name } _part "
uuid = "aaaaaaaa:bbbbbbbb:cccccccc:00001010"
expected_symlinks = (
" $raid_dev "
" /dev/disk/by-id/md-name-H: $raid_name "
" /dev/disk/by-id/md-uuid- $uuid "
" /dev/disk/by-label/ $part_name " # ext4 partition
2022-04-15 20:01:45 +03:00
# Partitions
" ${ raid_dev } 1 "
" ${ raid_dev } 2 "
" ${ raid_dev } 3 "
" /dev/disk/by-id/md-name-H: $raid_name -part1 "
" /dev/disk/by-id/md-name-H: $raid_name -part2 "
" /dev/disk/by-id/md-name-H: $raid_name -part3 "
" /dev/disk/by-id/md-uuid- $uuid -part1 "
" /dev/disk/by-id/md-uuid- $uuid -part2 "
" /dev/disk/by-id/md-uuid- $uuid -part3 "
2022-04-14 13:30:42 +03:00
)
# Create a simple RAID 10 with an ext4 filesystem
2024-05-13 16:04:16 +03:00
echo y | mdadm --create " $raid_dev " --name " $raid_name " --uuid " $uuid " /dev/disk/by-id/scsi-0systemd_foobar_deadbeefmdadm{ 0..3} -v -f --level= 10 --raid-devices= 4
2022-04-14 13:30:42 +03:00
udevadm wait --settle --timeout= 30 " $raid_dev "
2022-04-15 20:01:45 +03:00
# Partition the raid device
# Here, 'udevadm lock' is meaningless, as udevd does not lock MD devices.
2024-05-28 10:35:32 +03:00
# We need to trigger uevents after sfdisk and mkfs.
2022-04-15 20:01:45 +03:00
sfdisk --wipe= always " $raid_dev " <<EOF
label: gpt
uuid = "deadbeef-dead-dead-beef-111111111111" , name = "mdpart1" , size = 8M
uuid = "deadbeef-dead-dead-beef-222222222222" , name = "mdpart2" , size = 32M
uuid = "deadbeef-dead-dead-beef-333333333333" , name = "mdpart3" , size = 16M
EOF
2024-05-28 10:35:32 +03:00
udevadm trigger --settle --parent-match " $raid_dev "
2022-04-15 20:01:45 +03:00
udevadm wait --settle --timeout= 30 " /dev/disk/by-id/md-uuid- $uuid -part2 "
mkfs.ext4 -L " $part_name " " /dev/disk/by-id/md-uuid- $uuid -part2 "
2024-05-28 10:35:32 +03:00
udevadm trigger --settle " /dev/disk/by-id/md-uuid- $uuid -part2 "
2022-04-14 13:54:16 +03:00
udevadm wait --settle --timeout= 30 " ${ expected_symlinks [@] } "
2022-04-15 20:01:45 +03:00
for i in { 0..9} ; do
echo " Disassemble - reassemble loop, iteration # $i "
mdadm -v --stop " $raid_dev "
udevadm wait --settle --timeout= 30 --removed " ${ expected_symlinks [@] } "
mdadm --assemble " $raid_dev " --name " $raid_name " -v
udevadm wait --settle --timeout= 30 " ${ expected_symlinks [@] } "
done
2022-04-14 13:54:16 +03:00
helper_check_device_symlinks
2022-08-31 21:48:02 +03:00
helper_check_device_units
2022-04-15 20:01:45 +03:00
# Cleanup
mdadm -v --stop " $raid_dev "
# Check if all expected symlinks were removed after the cleanup
udevadm wait --settle --timeout= 30 --removed " ${ expected_symlinks [@] } "
2022-08-31 21:48:02 +03:00
helper_check_device_units
2022-04-14 13:54:16 +03:00
}
testcase_mdadm_lvm( ) {
local part_name raid_name raid_dev uuid vgroup
local expected_symlinks = ( )
local devices = (
2024-05-13 16:04:16 +03:00
/dev/disk/by-id/scsi-0systemd_foobar_deadbeefmdadmlvm{ 0..4}
2022-04-14 13:54:16 +03:00
)
ls -l " ${ devices [@] } "
raid_name = "mdlvm"
raid_dev = " /dev/md/ $raid_name "
part_name = " ${ raid_name } _part "
vgroup = " ${ raid_name } _vg "
uuid = "aaaaaaaa:bbbbbbbb:ffffffff:00001010"
expected_symlinks = (
" $raid_dev "
" /dev/ $vgroup /mypart1 " # LVM partition
" /dev/ $vgroup /mypart2 " # LVM partition
" /dev/disk/by-id/md-name-H: $raid_name "
" /dev/disk/by-id/md-uuid- $uuid "
" /dev/disk/by-label/ $part_name " # ext4 partition
)
# Create a RAID 10 with LVM + ext4
2024-05-13 16:04:16 +03:00
echo y | mdadm --create " $raid_dev " --name " $raid_name " --uuid " $uuid " /dev/disk/by-id/scsi-0systemd_foobar_deadbeefmdadmlvm{ 0..3} -v -f --level= 10 --raid-devices= 4
2022-04-14 13:54:16 +03:00
udevadm wait --settle --timeout= 30 " $raid_dev "
# Create an LVM on the MD
lvm pvcreate -y " $raid_dev "
lvm pvs
lvm vgcreate " $vgroup " -y " $raid_dev "
lvm vgs
lvm vgchange -ay " $vgroup "
lvm lvcreate -y -L 4M " $vgroup " -n mypart1
lvm lvcreate -y -L 8M " $vgroup " -n mypart2
lvm lvs
udevadm wait --settle --timeout= 30 " /dev/ $vgroup /mypart1 " " /dev/ $vgroup /mypart2 "
mkfs.ext4 -L " $part_name " " /dev/ $vgroup /mypart2 "
2024-05-28 10:35:32 +03:00
udevadm trigger --settle " /dev/ $vgroup /mypart2 "
2022-04-14 13:54:16 +03:00
udevadm wait --settle --timeout= 30 " ${ expected_symlinks [@] } "
# Disassemble the array
lvm vgchange -an " $vgroup "
mdadm -v --stop " $raid_dev "
2022-04-16 23:43:20 +03:00
udevadm wait --settle --timeout= 30 --removed " ${ expected_symlinks [@] } "
2022-04-14 13:54:16 +03:00
helper_check_device_symlinks
2022-08-31 21:48:02 +03:00
helper_check_device_units
2022-04-14 13:54:16 +03:00
# Reassemble it and check if all required symlinks exist
2022-04-14 13:30:42 +03:00
mdadm --assemble " $raid_dev " --name " $raid_name " -v
2022-04-07 19:03:17 +03:00
udevadm wait --settle --timeout= 30 " ${ expected_symlinks [@] } "
helper_check_device_symlinks
2022-08-31 21:48:02 +03:00
helper_check_device_units
2022-04-16 23:43:20 +03:00
# Cleanup
lvm vgchange -an " $vgroup "
mdadm -v --stop " $raid_dev "
# Check if all expected symlinks were removed after the cleanup
udevadm wait --settle --timeout= 30 --removed " ${ expected_symlinks [@] } "
2022-08-31 21:48:02 +03:00
helper_check_device_units
2022-04-07 19:03:17 +03:00
}
2021-07-30 17:56:10 +03:00
udevadm settle
2021-10-25 23:06:49 +03:00
udevadm control --log-level debug
2021-07-30 17:56:10 +03:00
lsblk -a
2021-09-10 14:04:58 +03:00
echo "Check if all symlinks under /dev/disk/ are valid (pre-test)"
helper_check_device_symlinks
2021-07-30 17:56:10 +03:00
# TEST_FUNCTION_NAME is passed on the kernel command line via systemd.setenv=
# in the respective test.sh file
if ! command -v " ${ TEST_FUNCTION_NAME : ? } " ; then
echo >& 2 " Missing verification handler for test case ' $TEST_FUNCTION_NAME ' "
exit 1
fi
echo " TEST_FUNCTION_NAME= $TEST_FUNCTION_NAME "
" $TEST_FUNCTION_NAME "
2021-10-01 14:56:25 +03:00
udevadm settle
2021-07-30 17:56:10 +03:00
2021-09-10 14:04:58 +03:00
echo "Check if all symlinks under /dev/disk/ are valid (post-test)"
helper_check_device_symlinks
2021-10-25 23:06:49 +03:00
udevadm control --log-level info
2021-07-30 17:56:10 +03:00
systemctl status systemd-udevd
touch /testok