2021-07-30 16:56:10 +02:00
#!/usr/bin/env bash
2021-10-17 18:13:06 +02:00
# SPDX-License-Identifier: LGPL-2.1-or-later
2021-07-30 16:56:10 +02:00
# vi: ts=4 sw=4 tw=0 et:
set -eux
set -o pipefail
2021-09-10 13:04:58 +02:00
# Check if all symlinks under /dev/disk/ are valid
2021-09-13 19:14:12 +02:00
# shellcheck disable=SC2120
2021-09-17 23:59:38 +02:00
helper_check_device_symlinks( ) { (
set +x
2021-09-14 14:36:04 +02:00
2021-09-14 14:49:11 +02:00
local dev link path paths target
2021-09-13 19:14:12 +02:00
2021-09-18 19:02:10 +02:00
[ [ $# -gt 0 ] ] && paths = ( " $@ " ) || paths = ( "/dev/disk" "/dev/mapper" )
2021-09-10 13:04:58 +02:00
2021-09-14 14:49:11 +02:00
# Check if all given paths are valid
for path in " ${ paths [@] } " ; do
if ! test -e " $path " ; then
echo >& 2 " Path ' $path ' doesn't exist "
return 1
fi
done
2021-09-10 13:04:58 +02:00
while read -r link; do
target = " $( readlink -f " $link " ) "
2021-09-14 14:36:04 +02:00
echo " $link -> $target "
2021-09-10 13:04:58 +02:00
# Both checks should do virtually the same thing, but check both to be
# on the safe side
if [ [ ! -e " $link " || ! -e " $target " ] ] ; then
echo >& 2 " ERROR: symlink ' $link ' points to ' $target ' which doesn't exist "
return 1
fi
# Check if the symlink points to the correct device in /dev
dev = " /dev/ $( udevadm info -q name " $link " ) "
if [ [ " $target " != " $dev " ] ] ; then
echo >& 2 " ERROR: symlink ' $link ' points to ' $target ' but ' $dev ' was expected "
return 1
fi
2021-09-13 19:14:12 +02:00
done < <( find " ${ paths [@] } " -type l)
2021-09-17 23:59:38 +02:00
) }
2021-09-10 13:04:58 +02:00
2021-07-30 16:56:10 +02:00
testcase_megasas2_basic( ) {
lsblk -S
[ [ " $( lsblk --scsi --noheadings | wc -l) " -ge 128 ] ]
}
testcase_nvme_basic( ) {
lsblk --noheadings | grep "^nvme"
[ [ " $( lsblk --noheadings | grep -c "^nvme" ) " -ge 28 ] ]
}
testcase_virtio_scsi_identically_named_partitions( ) {
lsblk --noheadings -a -o NAME,PARTLABEL
[ [ " $( lsblk --noheadings -a -o NAME,PARTLABEL | grep -c "Hello world" ) " -eq $(( 16 * 8 )) ] ]
}
2021-09-08 18:26:02 +02:00
testcase_multipath_basic_failover( ) {
local dmpath i path wwid
# Configure multipath
cat >/etc/multipath.conf <<\E OF
defaults {
# Use /dev/mapper/$WWN paths instead of /dev/mapper/mpathX
user_friendly_names no
find_multipaths yes
enable_foreign " ^ $"
}
blacklist_exceptions {
property "(SCSI_IDENT_|ID_WWN)"
}
blacklist {
}
EOF
modprobe -v dm_multipath
systemctl start multipathd.service
systemctl status multipathd.service
multipath -ll
2021-10-26 14:39:02 +02:00
udevadm settle
2021-09-08 18:26:02 +02:00
ls -l /dev/disk/by-id/
for i in { 0..63} ; do
wwid = " deaddeadbeef $( printf "%.4d" " $i " ) "
path = " /dev/disk/by-id/wwn-0x $wwid "
dmpath = " $( readlink -f " $path " ) "
lsblk " $path "
multipath -C " $dmpath "
# We should have 4 active paths for each multipath device
[ [ " $( multipath -l " $path " | grep -c running) " -eq 4 ] ]
done
# Test failover (with the first multipath device that has a partitioned disk)
echo " ${ FUNCNAME [0] } : test failover "
local device expected link mpoint part
local -a devices
mpoint = " $( mktemp -d /mnt/mpathXXX) "
wwid = "deaddeadbeef0000"
path = " /dev/disk/by-id/wwn-0x $wwid "
# All following symlinks should exists and should be valid
local -a part_links = (
" /dev/disk/by-id/wwn-0x $wwid -part2 "
"/dev/disk/by-partlabel/failover_part"
"/dev/disk/by-partuuid/deadbeef-dead-dead-beef-000000000000"
"/dev/disk/by-label/failover_vol"
"/dev/disk/by-uuid/deadbeef-dead-dead-beef-111111111111"
)
2022-04-05 03:31:58 +09:00
udevadm wait --settle --timeout= 30 " ${ part_links [@] } "
2021-09-08 18:26:02 +02:00
# Choose a random symlink to the failover data partition each time, for
# a better coverage
part = " ${ part_links [ $RANDOM % ${# part_links [@] } ] } "
# Get all devices attached to a specific multipath device (in H:C:T:L format)
# and sort them in a random order, so we cut off different paths each time
mapfile -t devices < <( multipath -l " $path " | grep -Eo '[0-9]+:[0-9]+:[0-9]+:[0-9]+' | sort -R)
if [ [ " ${# devices [@] } " -ne 4 ] ] ; then
echo " Expected 4 devices attached to WWID= $wwid , got ${# devices [@] } instead "
return 1
fi
# Drop the last path from the array, since we want to leave at least one path active
unset "devices[3]"
# Mount the first multipath partition, write some data we can check later,
# and then disconnect the remaining paths one by one while checking if we
# can still read/write from the mount
mount -t ext4 " $part " " $mpoint "
expected = 0
echo -n " $expected " >" $mpoint /test "
# Sanity check we actually wrote what we wanted
[ [ " $( <" $mpoint /test " ) " = = " $expected " ] ]
for device in " ${ devices [@] } " ; do
echo offline >" /sys/class/scsi_device/ $device /device/state "
[ [ " $( <" $mpoint /test " ) " = = " $expected " ] ]
expected = " $(( expected + 1 )) "
echo -n " $expected " >" $mpoint /test "
# Make sure all symlinks are still valid
2022-04-05 03:31:58 +09:00
udevadm wait --settle --timeout= 30 " ${ part_links [@] } "
2021-09-08 18:26:02 +02:00
done
multipath -l " $path "
# Three paths should be now marked as 'offline' and one as 'running'
[ [ " $( multipath -l " $path " | grep -c offline) " -eq 3 ] ]
[ [ " $( multipath -l " $path " | grep -c running) " -eq 1 ] ]
umount " $mpoint "
rm -fr " $mpoint "
}
2021-09-10 17:16:51 +02:00
testcase_simultaneous_events( ) {
local blockdev part partscript
blockdev = " $( readlink -f /dev/disk/by-id/scsi-*_deadbeeftest) "
partscript = " $( mktemp) "
if [ [ ! -b " $blockdev " ] ] ; then
echo "ERROR: failed to find the test SCSI block device"
return 1
fi
cat >" $partscript " <<EOF
$( printf 'name="test%d", size=2M\n' { 1..50} )
EOF
# Initial partition table
sfdisk -q -X gpt " $blockdev " <" $partscript "
2021-10-20 09:38:57 +02:00
# Delete the partitions, immediately recreate them, wait for udev to settle
2021-09-10 17:16:51 +02:00
# down, and then check if we have any dangling symlinks in /dev/disk/. Rinse
# and repeat.
#
# On unpatched udev versions the delete-recreate cycle may trigger a race
# leading to dead symlinks in /dev/disk/
for i in { 1..100} ; do
sfdisk -q --delete " $blockdev "
sfdisk -q -X gpt " $blockdev " <" $partscript "
if ( ( i % 10 = = 0) ) ; then
2022-04-05 03:31:58 +09:00
udevadm wait --settle --timeout= 30 " $blockdev "
2021-09-10 17:16:51 +02:00
helper_check_device_symlinks
fi
done
rm -f " $partscript "
}
2021-09-13 19:30:41 +02:00
testcase_lvm_basic( ) {
local i part
local vgroup = " MyTestGroup $RANDOM "
local devices = (
/dev/disk/by-id/ata-foobar_deadbeeflvm{ 0..3}
)
# Make sure all the necessary soon-to-be-LVM devices exist
ls -l " ${ devices [@] } "
# Add all test devices into a volume group, create two logical volumes,
# and check if necessary symlinks exist (and are valid)
lvm pvcreate -y " ${ devices [@] } "
lvm pvs
lvm vgcreate " $vgroup " -y " ${ devices [@] } "
lvm vgs
lvm vgchange -ay " $vgroup "
lvm lvcreate -y -L 4M " $vgroup " -n mypart1
lvm lvcreate -y -L 8M " $vgroup " -n mypart2
lvm lvs
2022-04-05 03:31:58 +09:00
udevadm wait --settle --timeout= 30 " /dev/ $vgroup /mypart1 " " /dev/ $vgroup /mypart2 "
2021-09-13 19:30:41 +02:00
mkfs.ext4 -L mylvpart1 " /dev/ $vgroup /mypart1 "
2022-04-05 03:31:58 +09:00
udevadm wait --settle --timeout= 30 "/dev/disk/by-label/mylvpart1"
2021-09-13 19:30:41 +02:00
helper_check_device_symlinks "/dev/disk" " /dev/ $vgroup "
# Disable the VG and check symlinks...
lvm vgchange -an " $vgroup "
2022-04-05 03:31:58 +09:00
udevadm wait --settle --timeout= 30 --removed " /dev/ $vgroup " "/dev/disk/by-label/mylvpart1"
2021-09-13 19:30:41 +02:00
helper_check_device_symlinks "/dev/disk"
# reenable the VG and check the symlinks again if all LVs are properly activated
lvm vgchange -ay " $vgroup "
2022-04-05 03:31:58 +09:00
udevadm wait --settle --timeout= 30 " /dev/ $vgroup /mypart1 " " /dev/ $vgroup /mypart2 " "/dev/disk/by-label/mylvpart1"
2021-09-13 19:30:41 +02:00
helper_check_device_symlinks "/dev/disk" " /dev/ $vgroup "
# Same as above, but now with more "stress"
2021-09-18 20:43:50 +02:00
for i in { 1..50} ; do
2021-09-13 19:30:41 +02:00
lvm vgchange -an " $vgroup "
lvm vgchange -ay " $vgroup "
2021-09-18 20:43:50 +02:00
if ( ( i % 5 = = 0) ) ; then
2022-04-05 03:31:58 +09:00
udevadm wait --settle --timeout= 30 " /dev/ $vgroup /mypart1 " " /dev/ $vgroup /mypart2 " "/dev/disk/by-label/mylvpart1"
2021-09-13 19:30:41 +02:00
helper_check_device_symlinks "/dev/disk" " /dev/ $vgroup "
fi
done
# Remove the first LV
lvm lvremove -y " $vgroup /mypart1 "
2022-04-05 03:31:58 +09:00
udevadm wait --settle --timeout= 30 --removed " /dev/ $vgroup /mypart1 "
udevadm wait --timeout= 0 " /dev/ $vgroup /mypart2 "
2021-09-13 19:30:41 +02:00
helper_check_device_symlinks "/dev/disk" " /dev/ $vgroup "
# Create & remove LVs in a loop, i.e. with more "stress"
2021-09-18 20:43:50 +02:00
for i in { 1..16} ; do
2021-09-13 19:30:41 +02:00
# 1) Create 16 logical volumes
for part in { 0..15} ; do
lvm lvcreate -y -L 4M " $vgroup " -n " looppart $part "
done
# 2) Immediately remove them
lvm lvremove -y " $vgroup " /looppart{ 0..15}
2021-09-18 20:43:50 +02:00
# 3) On every 4th iteration settle udev and check if all partitions are
2021-09-13 19:30:41 +02:00
# indeed gone, and if all symlinks are still valid
2021-09-18 20:43:50 +02:00
if ( ( i % 4 = = 0) ) ; then
2021-09-13 19:30:41 +02:00
for part in { 0..15} ; do
2022-04-05 03:31:58 +09:00
udevadm wait --settle --timeout= 30 --removed " /dev/ $vgroup /looppart $part "
2021-09-13 19:30:41 +02:00
done
helper_check_device_symlinks "/dev/disk" " /dev/ $vgroup "
fi
done
}
2021-09-17 19:28:38 +02:00
testcase_btrfs_basic( ) {
local dev_stub i label mpoint uuid
local devices = (
/dev/disk/by-id/ata-foobar_deadbeefbtrfs{ 0..3}
)
ls -l " ${ devices [@] } "
echo "Single device: default settings"
uuid = "deadbeef-dead-dead-beef-000000000000"
label = "btrfs_root"
2022-04-05 03:31:58 +09:00
udevadm lock --device= " ${ devices [0] } " mkfs.btrfs -L " $label " -U " $uuid " " ${ devices [0] } "
udevadm wait --settle --timeout= 30 " ${ devices [0] } " " /dev/disk/by-uuid/ $uuid " " /dev/disk/by-label/ $label "
2021-09-17 19:28:38 +02:00
btrfs filesystem show
helper_check_device_symlinks
echo "Multiple devices: using partitions, data: single, metadata: raid1"
uuid = "deadbeef-dead-dead-beef-000000000001"
label = "btrfs_mpart"
sfdisk --wipe= always " ${ devices [0] } " <<EOF
label: gpt
name = "diskpart1" , size = 85M
name = "diskpart2" , size = 85M
name = "diskpart3" , size = 85M
name = "diskpart4" , size = 85M
EOF
2022-04-05 03:31:58 +09:00
udevadm wait --settle --timeout= 30 /dev/disk/by-partlabel/diskpart{ 1..4}
udevadm lock --device= " ${ devices [0] } " mkfs.btrfs -d single -m raid1 -L " $label " -U " $uuid " /dev/disk/by-partlabel/diskpart{ 1..4}
udevadm wait --settle --timeout= 30 " /dev/disk/by-uuid/ $uuid " " /dev/disk/by-label/ $label "
2021-09-17 19:28:38 +02:00
btrfs filesystem show
helper_check_device_symlinks
wipefs -a -f " ${ devices [0] } "
2022-04-05 03:31:58 +09:00
udevadm wait --settle --timeout= 30 --removed /dev/disk/by-partlabel/diskpart{ 1..4}
2021-09-17 19:28:38 +02:00
echo "Multiple devices: using disks, data: raid10, metadata: raid10, mixed mode"
uuid = "deadbeef-dead-dead-beef-000000000002"
label = "btrfs_mdisk"
2022-04-05 03:31:58 +09:00
udevadm lock \
--device= /dev/disk/by-id/ata-foobar_deadbeefbtrfs0 \
--device= /dev/disk/by-id/ata-foobar_deadbeefbtrfs1 \
--device= /dev/disk/by-id/ata-foobar_deadbeefbtrfs2 \
--device= /dev/disk/by-id/ata-foobar_deadbeefbtrfs3 \
mkfs.btrfs -M -d raid10 -m raid10 -L " $label " -U " $uuid " " ${ devices [@] } "
udevadm wait --settle --timeout= 30 " /dev/disk/by-uuid/ $uuid " " /dev/disk/by-label/ $label "
2021-09-17 19:28:38 +02:00
btrfs filesystem show
helper_check_device_symlinks
echo "Multiple devices: using LUKS encrypted disks, data: raid1, metadata: raid1, mixed mode"
uuid = "deadbeef-dead-dead-beef-000000000003"
label = "btrfs_mencdisk"
mpoint = " /btrfs_enc $RANDOM "
mkdir " $mpoint "
# Create a key-file
dd if = /dev/urandom of = /etc/btrfs_keyfile bs = 64 count = 1 iflag = fullblock
chmod 0600 /etc/btrfs_keyfile
# Encrypt each device and add it to /etc/crypttab, so it can be mounted
# automagically later
: >/etc/crypttab
for ( ( i = 0; i < ${# devices [@] } ; i++) ) ; do
# Intentionally use weaker cipher-related settings, since we don't care
# about security here as it's a throwaway LUKS partition
cryptsetup luksFormat -q \
--use-urandom --pbkdf pbkdf2 --pbkdf-force-iterations 1000 \
--uuid " deadbeef-dead-dead-beef-11111111111 $i " --label " encdisk $i " " ${ devices [ $i ] } " /etc/btrfs_keyfile
2022-04-05 03:31:58 +09:00
udevadm wait --settle --timeout= 30 " /dev/disk/by-uuid/deadbeef-dead-dead-beef-11111111111 $i " " /dev/disk/by-label/encdisk $i "
2021-09-17 19:28:38 +02:00
# Add the device into /etc/crypttab, reload systemd, and then activate
# the device so we can create a filesystem on it later
echo " encbtrfs $i UUID=deadbeef-dead-dead-beef-11111111111 $i /etc/btrfs_keyfile luks,noearly " >>/etc/crypttab
systemctl daemon-reload
systemctl start " systemd-cryptsetup@encbtrfs $i "
done
helper_check_device_symlinks
# Check if we have all necessary DM devices
ls -l /dev/mapper/encbtrfs{ 0..3}
# Create a multi-device btrfs filesystem on the LUKS devices
2022-04-05 03:31:58 +09:00
udevadm lock \
--device= /dev/mapper/encbtrfs0 \
--device= /dev/mapper/encbtrfs1 \
--device= /dev/mapper/encbtrfs2 \
--device= /dev/mapper/encbtrfs3 \
mkfs.btrfs -M -d raid1 -m raid1 -L " $label " -U " $uuid " /dev/mapper/encbtrfs{ 0..3}
udevadm wait --settle --timeout= 30 " /dev/disk/by-uuid/ $uuid " " /dev/disk/by-label/ $label "
2021-09-17 19:28:38 +02:00
btrfs filesystem show
helper_check_device_symlinks
# Mount it and write some data to it we can compare later
mount -t btrfs /dev/mapper/encbtrfs0 " $mpoint "
echo "hello there" >" $mpoint /test "
# "Deconstruct" the btrfs device and check if we're in a sane state (symlink-wise)
umount " $mpoint "
systemctl stop systemd-cryptsetup@encbtrfs{ 0..3}
2022-04-05 03:31:58 +09:00
udevadm wait --settle --timeout= 30 --removed " /dev/disk/by-uuid/ $uuid "
2021-09-17 19:28:38 +02:00
helper_check_device_symlinks
# Add the mount point to /etc/fstab and check if the device can be put together
# automagically. The source device is the DM name of the first LUKS device
# (from /etc/crypttab). We have to specify all LUKS devices manually, as
# registering the necessary devices is usually initrd's job (via btrfs device scan)
dev_stub = "/dev/mapper/encbtrfs"
echo " /dev/mapper/encbtrfs0 $mpoint btrfs device= ${ dev_stub } 0,device= ${ dev_stub } 1,device= ${ dev_stub } 2,device= ${ dev_stub } 3 0 2 " >>/etc/fstab
# Tell systemd about the new mount
systemctl daemon-reload
# Restart cryptsetup.target to trigger autounlock of partitions in /etc/crypttab
systemctl restart cryptsetup.target
# Start the corresponding mount unit and check if the btrfs device was reconstructed
# correctly
systemctl start " ${ mpoint ##*/ } .mount "
2022-04-05 03:31:58 +09:00
udevadm wait --settle --timeout= 30 " /dev/disk/by-uuid/ $uuid " " /dev/disk/by-label/ $label "
2021-09-17 19:28:38 +02:00
btrfs filesystem show
helper_check_device_symlinks
grep "hello there" " $mpoint /test "
# Cleanup
systemctl stop " ${ mpoint ##*/ } .mount "
systemctl stop systemd-cryptsetup@encbtrfs{ 0..3}
sed -i " / ${ mpoint ##*/ } /d " /etc/fstab
: >/etc/crypttab
rm -fr " $mpoint "
systemctl daemon-reload
udevadm settle
}
2021-09-22 19:26:45 +02:00
testcase_iscsi_lvm( ) {
local dev i label link lun_id mpoint target_name uuid
local target_ip = "127.0.0.1"
local target_port = "3260"
local vgroup = " iscsi_lvm $RANDOM "
local expected_symlinks = ( )
local devices = (
/dev/disk/by-id/ata-foobar_deadbeefiscsi{ 0..3}
)
ls -l " ${ devices [@] } "
# Start the target daemon
systemctl start tgtd
systemctl status tgtd
echo "iSCSI LUNs backed by devices"
# See RFC3721 and RFC7143
target_name = "iqn.2021-09.com.example:iscsi.test"
# Initialize a new iSCSI target <$target_name> consisting of 4 LUNs, each
# backed by a device
tgtadm --lld iscsi --op new --mode target --tid= 1 --targetname " $target_name "
for ( ( i = 0; i < ${# devices [@] } ; i++) ) ; do
# lun-0 is reserved by iSCSI
lun_id = " $(( i + 1 )) "
tgtadm --lld iscsi --op new --mode logicalunit --tid 1 --lun " $lun_id " -b " ${ devices [ $i ] } "
tgtadm --lld iscsi --op update --mode logicalunit --tid 1 --lun " $lun_id "
expected_symlinks += (
" /dev/disk/by-path/ip- $target_ip : $target_port -iscsi- $target_name -lun- $lun_id "
)
done
tgtadm --lld iscsi --op bind --mode target --tid 1 -I ALL
# Configure the iSCSI initiator
iscsiadm --mode discoverydb --type sendtargets --portal " $target_ip " --discover
iscsiadm --mode node --targetname " $target_name " --portal " $target_ip : $target_port " --login
2022-03-26 05:38:18 +09:00
udevadm wait --settle --timeout= 30 " ${ expected_symlinks [@] } "
2021-09-22 19:26:45 +02:00
helper_check_device_symlinks
# Cleanup
iscsiadm --mode node --targetname " $target_name " --portal " $target_ip : $target_port " --logout
tgtadm --lld iscsi --op delete --mode target --tid= 1
echo "iSCSI LUNs backed by files + LVM"
# Note: we use files here to "trick" LVM the disks are indeed on a different
# host, so it doesn't automagically detect another path to the backing
# device once we disconnect the iSCSI devices
target_name = "iqn.2021-09.com.example:iscsi.lvm.test"
mpoint = " $( mktemp -d /iscsi_storeXXX) "
expected_symlinks = ( )
# Use the first device as it's configured with larger capacity
mkfs.ext4 -L iscsi_store " ${ devices [0] } "
2022-04-05 03:31:58 +09:00
udevadm wait --settle --timeout= 30 " ${ devices [0] } "
2021-09-22 19:26:45 +02:00
mount " ${ devices [0] } " " $mpoint "
for i in { 1..4} ; do
dd if = /dev/zero of = " $mpoint /lun $i .img " bs = 1M count = 32
done
# Initialize a new iSCSI target <$target_name> consisting of 4 LUNs, each
# backed by a file
tgtadm --lld iscsi --op new --mode target --tid= 2 --targetname " $target_name "
# lun-0 is reserved by iSCSI
for i in { 1..4} ; do
tgtadm --lld iscsi --op new --mode logicalunit --tid 2 --lun " $i " -b " $mpoint /lun $i .img "
tgtadm --lld iscsi --op update --mode logicalunit --tid 2 --lun " $i "
expected_symlinks += (
" /dev/disk/by-path/ip- $target_ip : $target_port -iscsi- $target_name -lun- $i "
)
done
tgtadm --lld iscsi --op bind --mode target --tid 2 -I ALL
# Configure the iSCSI initiator
iscsiadm --mode discoverydb --type sendtargets --portal " $target_ip " --discover
iscsiadm --mode node --targetname " $target_name " --portal " $target_ip : $target_port " --login
2022-03-26 05:38:18 +09:00
udevadm wait --settle --timeout= 30 " ${ expected_symlinks [@] } "
2021-09-22 19:26:45 +02:00
helper_check_device_symlinks
# Add all iSCSI devices into a LVM volume group, create two logical volumes,
# and check if necessary symlinks exist (and are valid)
lvm pvcreate -y " ${ expected_symlinks [@] } "
lvm pvs
lvm vgcreate " $vgroup " -y " ${ expected_symlinks [@] } "
lvm vgs
lvm vgchange -ay " $vgroup "
lvm lvcreate -y -L 4M " $vgroup " -n mypart1
lvm lvcreate -y -L 8M " $vgroup " -n mypart2
lvm lvs
2022-04-05 03:31:58 +09:00
udevadm wait --settle --timeout= 30 " /dev/ $vgroup /mypart1 " " /dev/ $vgroup /mypart2 "
2021-09-22 19:26:45 +02:00
mkfs.ext4 -L mylvpart1 " /dev/ $vgroup /mypart1 "
2022-04-05 03:31:58 +09:00
udevadm wait --settle --timeout= 30 "/dev/disk/by-label/mylvpart1"
2021-09-22 19:26:45 +02:00
helper_check_device_symlinks "/dev/disk" " /dev/ $vgroup "
# Disconnect the iSCSI devices and check all the symlinks
iscsiadm --mode node --targetname " $target_name " --portal " $target_ip : $target_port " --logout
# "Reset" the DM state, since we yanked the backing storage from under the LVM,
# so the currently active VGs/LVs are invalid
dmsetup remove_all --deferred
# The LVM and iSCSI related symlinks should be gone
2022-03-26 05:38:18 +09:00
udevadm wait --settle --timeout= 30 --removed " /dev/ $vgroup " "/dev/disk/by-label/mylvpart1" " ${ expected_symlinks [@] } "
2021-09-22 19:26:45 +02:00
helper_check_device_symlinks "/dev/disk"
# Reconnect the iSCSI devices and check if everything get detected correctly
iscsiadm --mode discoverydb --type sendtargets --portal " $target_ip " --discover
iscsiadm --mode node --targetname " $target_name " --portal " $target_ip : $target_port " --login
2022-04-05 03:31:58 +09:00
udevadm wait --settle --timeout= 30 " ${ expected_symlinks [@] } " " /dev/ $vgroup /mypart1 " " /dev/ $vgroup /mypart2 " "/dev/disk/by-label/mylvpart1"
2021-09-22 19:26:45 +02:00
helper_check_device_symlinks "/dev/disk" " /dev/ $vgroup "
# Cleanup
iscsiadm --mode node --targetname " $target_name " --portal " $target_ip : $target_port " --logout
tgtadm --lld iscsi --op delete --mode target --tid= 2
umount " $mpoint "
rm -rf " $mpoint "
}
2021-10-01 13:56:25 +02:00
testcase_long_sysfs_path( ) {
local link logfile mpoint
local expected_symlinks = (
"/dev/disk/by-label/data_vol"
"/dev/disk/by-label/swap_vol"
"/dev/disk/by-partlabel/test_swap"
"/dev/disk/by-partlabel/test_part"
"/dev/disk/by-partuuid/deadbeef-dead-dead-beef-000000000000"
"/dev/disk/by-uuid/deadbeef-dead-dead-beef-111111111111"
"/dev/disk/by-uuid/deadbeef-dead-dead-beef-222222222222"
)
# Make sure the test device is connected and show its "wonderful" path
stat /sys/block/vda
readlink -f /sys/block/vda/dev
2022-04-05 03:31:58 +09:00
udevadm wait --settle --timeout= 30 " ${ expected_symlinks [@] } "
2021-10-01 13:56:25 +02:00
# Try to mount the data partition manually (using its label)
mpoint = " $( mktemp -d /logsysfsXXX) "
mount LABEL = data_vol " $mpoint "
touch " $mpoint /test "
umount " $mpoint "
# Do the same, but with UUID and using fstab
echo " UUID=deadbeef-dead-dead-beef-222222222222 $mpoint ext4 defaults 0 0 " >>/etc/fstab
systemctl daemon-reload
mount " $mpoint "
test -e " $mpoint /test "
umount " $mpoint "
# Test out the swap partition
swapon -v -L swap_vol
swapoff -v -L swap_vol
2021-12-16 11:59:09 +01:00
udevadm settle
2021-10-01 13:56:25 +02:00
logfile = " $( mktemp) "
journalctl -b -q --no-pager -o short-monotonic -p info --grep "Device path.*vda.?' too long to fit into unit name"
# Make sure we don't unnecessarily spam the log
journalctl -b -q --no-pager -o short-monotonic -p info --grep "/sys/devices/.+/vda[0-9]?" _PID = 1 + UNIT = systemd-udevd.service | tee " $logfile "
[ [ " $( wc -l <" $logfile " ) " -lt 10 ] ]
: >/etc/fstab
rm -fr " ${ logfile : ? } " " ${ mpoint : ? } "
}
2021-07-30 16:56:10 +02:00
: >/failed
udevadm settle
2021-10-26 05:06:49 +09:00
udevadm control --log-level debug
2021-07-30 16:56:10 +02:00
lsblk -a
2021-09-10 13:04:58 +02:00
echo "Check if all symlinks under /dev/disk/ are valid (pre-test)"
helper_check_device_symlinks
2021-07-30 16:56:10 +02:00
# TEST_FUNCTION_NAME is passed on the kernel command line via systemd.setenv=
# in the respective test.sh file
if ! command -v " ${ TEST_FUNCTION_NAME : ? } " ; then
echo >& 2 " Missing verification handler for test case ' $TEST_FUNCTION_NAME ' "
exit 1
fi
echo " TEST_FUNCTION_NAME= $TEST_FUNCTION_NAME "
" $TEST_FUNCTION_NAME "
2021-10-01 13:56:25 +02:00
udevadm settle
2021-07-30 16:56:10 +02:00
2021-09-10 13:04:58 +02:00
echo "Check if all symlinks under /dev/disk/ are valid (post-test)"
helper_check_device_symlinks
2021-10-26 05:06:49 +09:00
udevadm control --log-level info
2021-07-30 16:56:10 +02:00
systemctl status systemd-udevd
touch /testok
rm /failed