1
0
mirror of https://github.com/systemd/systemd.git synced 2025-03-19 22:50:17 +03:00

TEST-64-UDEV-STORAGE: Stop using mkosi configure scripts

Now that we have mkosi sandbox, meson runs with the mkosi tools tree
mounted (if one is used at all), so we can implement all the qemu feature
checks in meson itself, removing the need for mkosi configure scripts.

(cherry picked from commit ba29de84cf3967ac3b06707348493d5ddc65c7d8)
This commit is contained in:
Daan De Meyer 2025-02-24 17:14:01 +01:00
parent 985b2e7868
commit 2cbc67e476
13 changed files with 241 additions and 371 deletions

View File

@ -1,26 +0,0 @@
#!/usr/bin/python3
# SPDX-License-Identifier: LGPL-2.1-or-later
import json
import sys
config = json.load(sys.stdin)
config["QemuArgs"] += ["-device", "virtio-scsi-pci,id=scsi0"]
for i in range(4):
id = f"drivebtrfsbasic{i}"
config["Drives"] += [
{
"Id": id,
"Size": "350M" if i == 0 else "128M",
"Options": "cache=unsafe",
}
]
config["QemuArgs"] += [
"-device",
f"scsi-hd,drive={id},vendor=systemd,product=foobar,serial=deadbeefbtrfs{i}",
]
json.dump(config, sys.stdout)

View File

@ -1,26 +0,0 @@
#!/usr/bin/python3
# SPDX-License-Identifier: LGPL-2.1-or-later
import json
import sys
config = json.load(sys.stdin)
config["QemuArgs"] += ["-device", "virtio-scsi-pci,id=scsi0"]
for i in range(4):
id = f"driveiscsibasic{i}"
config["Drives"] += [
{
"Id": id,
"Size": "150M" if i == 0 else "70M",
"Options": "cache=unsafe",
}
]
config["QemuArgs"] += [
"-device",
f"scsi-hd,drive={id},vendor=systemd,product=foobar,serial=deadbeefiscsi{i}",
]
json.dump(config, sys.stdout)

View File

@ -1,31 +0,0 @@
#!/usr/bin/python3
# SPDX-License-Identifier: LGPL-2.1-or-later
import json
import sys
config = json.load(sys.stdin)
config["Drives"] += [
{
"Id": "drive0",
"Size": "64M",
"Options": "cache=unsafe",
}
]
config["QemuArgs"] += ["-device", "pci-bridge,id=pci_bridge0,chassis_nr=64"]
# Create 25 additional PCI bridges, each one connected to the previous one
# (basically a really long extension cable), and attach a virtio drive to
# the last one. This should force udev into attempting to create a device
# unit with a _really_ long name.
for bridge in range(1, 26):
config["QemuArgs"] += [
"-device",
f"pci-bridge,id=pci_bridge{bridge},bus=pci_bridge{bridge - 1},chassis_nr={64 + bridge},addr=1",
]
config["QemuArgs"] += ["-device", f"virtio-blk-pci,drive=drive0,bus=pci_bridge25,addr=1,serial=long-sysfs-path"]
json.dump(config, sys.stdout)

View File

@ -1,25 +0,0 @@
#!/usr/bin/python3
# SPDX-License-Identifier: LGPL-2.1-or-later
import json
import sys
config = json.load(sys.stdin)
config["QemuArgs"] += ["-device", "virtio-scsi-pci,id=scsi0"]
for i in range(4):
id = f"drivelvmbasic{i}"
config["Drives"] += [
{
"Id": id,
"Size": "32M",
"Options": "cache=unsafe",
}
]
config["QemuArgs"] += [
"-device", f"scsi-hd,drive={id},vendor=systemd,product=foobar,serial=deadbeeflvm{i}",
]
json.dump(config, sys.stdout)

View File

@ -1,25 +0,0 @@
#!/usr/bin/python3
# SPDX-License-Identifier: LGPL-2.1-or-later
import json
import sys
config = json.load(sys.stdin)
config["QemuArgs"] += ["-device", "virtio-scsi-pci,id=scsi0"]
for i in range(5):
id = f"drivemdadmbasic{i}"
config["Drives"] += [
{
"Id": id,
"Size": "64M",
"Options": "cache=unsafe",
}
]
config["QemuArgs"] += [
"-device", f"scsi-hd,drive={id},vendor=systemd,product=foobar,serial=deadbeefmdadm{i}",
]
json.dump(config, sys.stdout)

View File

@ -1,25 +0,0 @@
#!/usr/bin/python3
# SPDX-License-Identifier: LGPL-2.1-or-later
import json
import sys
config = json.load(sys.stdin)
config["QemuArgs"] += ["-device", "virtio-scsi-pci,id=scsi0"]
for i in range(5):
id = f"drivemdadmlvm{i}"
config["Drives"] += [
{
"Id": id,
"Size": "64M",
"Options": "cache=unsafe",
}
]
config["QemuArgs"] += [
"-device", f"scsi-hd,drive={id},vendor=systemd,product=foobar,serial=deadbeefmdadmlvm{i}",
]
json.dump(config, sys.stdout)

View File

@ -11,31 +11,253 @@ unit = configure_file(
},
)
foreach testcase : [
'btrfs_basic',
'iscsi_lvm',
'long_sysfs_path',
'lvm_basic',
'mdadm_basic',
'mdadm_lvm',
'multipath_basic_failover',
'nvme_basic',
'nvme_subsystem',
'simultaneous_events',
'virtio_scsi_basic',
'virtio_scsi_identically_named_partitions',
]
udev_storage_tests = []
cmdline = []
qemu_args = ['-device', 'virtio-scsi-pci,id=scsi0']
foreach i : range(4)
id = f'drivebtrfsbasic@i@'
size = i == 0 ? '350M' : '128M'
cmdline += [f'--drive=@id@:@size@::cache=unsafe']
qemu_args += ['-device', f'scsi-hd,drive=@id@,vendor=systemd,product=foobar,serial=deadbeefbtrfs@i@']
endforeach
udev_storage_tests += {
'name' : 'btrfs_basic',
'cmdline' : cmdline,
'qemu-args' : qemu_args,
}
cmdline = []
qemu_args = ['-device', 'virtio-scsi-pci,id=scsi0']
foreach i : range(4)
id = f'driveiscsibasic@i@'
size = i == 0 ? '150M' : '70M'
cmdline += [f'--drive=@id@:@size@::cache=unsafe']
qemu_args += ['-device', f'scsi-hd,drive=@id@,vendor=systemd,product=foobar,serial=deadbeefiscsi@i@']
endforeach
udev_storage_tests += {
'name' : 'iscsi_lvm',
'cmdline' : cmdline,
'qemu-args' : qemu_args,
}
cmdline = ['--drive=drive0:64M::cache=unsafe']
qemu_args = ['-device', 'pci-bridge,id=pci_bridge0,chassis_nr=64']
# Create 25 additional PCI bridges, each one connected to the previous one
# (basically a really long extension cable), and attach a virtio drive to
# the last one. This should force udev into attempting to create a device
# unit with a _really_ long name.
foreach bridge : range(1, 26)
bus = bridge - 1
chassis = 64 + bridge
qemu_args += ['-device', f'pci-bridge,id=pci_bridge@bridge@,bus=pci_bridge@bus@,chassis_nr=@chassis@,addr=1']
endforeach
qemu_args += ['-device', 'virtio-blk-pci,drive=drive0,bus=pci_bridge25,addr=1,serial=long-sysfs-path']
udev_storage_tests += {
'name' : 'long_sysfs_path',
'cmdline' : cmdline,
'qemu-args' : qemu_args,
}
cmdline = []
qemu_args = ['-device', 'virtio-scsi-pci,id=scsi0']
foreach i : range(4)
id = f'drivelvmbasic@i@'
cmdline += [f'--drive=@id@:32M::cache=unsafe']
qemu_args += ['-device', f'scsi-hd,drive=@id@,vendor=systemd,product=foobar,serial=deadbeeflvm@i@']
endforeach
udev_storage_tests += {
'name' : 'lvm_basic',
'cmdline' : cmdline,
'qemu-args' : qemu_args,
}
cmdline = []
qemu_args = ['-device', 'virtio-scsi-pci,id=scsi0']
foreach i : range(5)
id = f'drivemdadmbasic@i@'
cmdline += [f'--drive=@id@:64M::cache=unsafe']
qemu_args += ['-device', f'scsi-hd,drive=@id@,vendor=systemd,product=foobar,serial=deadbeefmdadm@i@']
endforeach
udev_storage_tests += {
'name' : 'mdadm_basic',
'cmdline' : cmdline,
'qemu-args' : qemu_args,
}
cmdline = []
qemu_args = ['-device', 'virtio-scsi-pci,id=scsi0']
foreach i : range(5)
id = f'drivemdadmlvm@i@'
cmdline += [f'--drive=@id@:64M::cache=unsafe']
qemu_args += ['-device', f'scsi-hd,drive=@id@,vendor=systemd,product=foobar,serial=deadbeefmdadmlvm@i@']
endforeach
udev_storage_tests += {
'name' : 'mdadm_lvm',
'cmdline' : cmdline,
'qemu-args' : qemu_args,
}
cmdline = []
qemu_args = []
# Add 16 multipath devices, each backed by 4 paths
foreach ndisk : range(16)
ndiskfmt = ndisk >= 10 ? f'00@ndisk@' : f'000@ndisk@'
wwn = f'0xDEADDEADBEEF@ndiskfmt@'
size = ndisk == 0 ? '16M' : '1M'
foreach nback : range(4)
id = f'drive@ndisk@x@nback@'
cmdline += [f'--drive=@id@:@size@::cache=unsafe:@ndisk@']
qemu_args += ['-device', f'scsi-hd,drive=@id@,serial=MPIO@ndisk@,wwn=@wwn@']
endforeach
endforeach
udev_storage_tests += {
'name' : 'multipath_basic_failover',
'cmdline' : cmdline,
'qemu-args' : qemu_args,
}
cmdline = []
qemu_args = []
foreach i : range(10)
id = f'drivesimultaneousevents@i@'
cmdline += [f'--drive=@id@:128M::cache=unsafe']
qemu_args += ['-device', f'scsi-hd,drive=@id@,serial=deadbeeftest@i@']
endforeach
udev_storage_tests += {
'name' : 'simultaneous_events',
'cmdline' : cmdline,
'qemu-args' : qemu_args,
}
cmdline = []
qemu_args = []
foreach i : range(4)
qemu_args += ['-device', f'virtio-scsi-pci,id=scsi@i@']
endforeach
foreach i : range(128)
id = f'drive@i@'
cmdline += [f'--drive=@id@:1M::cache=unsafe']
div = i / 32
mod = i % 32
qemu_args += ['-device', f'scsi-hd,drive=@id@,bus=scsi@div@.0,channel=0,scsi-id=@mod@,lun=0']
endforeach
udev_storage_tests += {
'name' : 'virtio_scsi_basic',
'cmdline' : cmdline,
'qemu-args' : qemu_args,
}
qemu = find_program('qemu-system-@0@'.format(host_machine.cpu_family()), 'qemu-kvm', dirs : ['/usr/libexec'], native : true, required : false)
if qemu.found()
devices = run_command(qemu, '-device', 'help', check : true).stdout().strip()
if devices.contains('name "nvme"')
cmdline = []
qemu_args = []
foreach i : range(20)
cmdline += [f'--drive=nvme@i@:1M::cache=unsafe']
endforeach
foreach i : range(5)
qemu_args += ['-device', f'nvme,drive=nvme@i@,serial=deadbeef@i@,max_ioqpairs=8']
endforeach
foreach i : range(5, 10)
qemu_args += ['-device', f'"nvme,drive=nvme@i@,serial= deadbeef @i@ ,max_ioqpairs=8"']
endforeach
foreach i : range(10, 15)
qemu_args += ['-device', f'"nvme,drive=nvme@i@,serial= dead/beef/@i@ ,max_ioqpairs=8"']
endforeach
foreach i : range(15, 20)
qemu_args += ['-device', f'"nvme,drive=nvme@i@,serial=dead/../../beef/@i@,max_ioqpairs=8"']
endforeach
udev_storage_tests += {
'name' : 'nvme_basic',
'cmdline' : cmdline,
'qemu-args' : qemu_args,
}
cmdline = []
foreach id : ['nvme0', 'nvme1']
cmdline += [f'--drive=@id@:1M::cache=unsafe']
endforeach
qemu_args = [
# Create an NVM Subsystem Device
'-device', 'nvme-subsys,id=nvme-subsys-64,nqn=subsys64',
# Attach two NVM controllers to it
'-device', 'nvme,subsys=nvme-subsys-64,serial=deadbeef',
'-device', 'nvme,subsys=nvme-subsys-64,serial=deadbeef',
# And create two shared namespaces attached to both controllers
'-device', 'nvme-ns,drive=nvme0,nsid=16,shared=on',
'-device', 'nvme-ns,drive=nvme1,nsid=17,shared=on',
]
udev_storage_tests += {
'name' : 'nvme_subsystem',
'cmdline' : cmdline,
'qemu-args' : qemu_args,
}
endif
if devices.contains('name "virtio-scsi-pci"')
cmdline = []
qemu_args = ['-device', 'virtio-scsi-pci,id=scsi0,num_queues=4']
foreach i : range(16)
id = f'drive@i@'
cmdline += [f'--drive=@id@:40M::cache=unsafe']
qemu_args += ['-device', f'scsi-hd,drive=@id@,bus=scsi0.0,channel=0,scsi-id=0,lun=@i@']
endforeach
udev_storage_tests += {
'name' : 'virtio_scsi_identically_named_partitions',
'cmdline' : cmdline,
'qemu-args' : qemu_args,
}
endif
endif
foreach testcase : udev_storage_tests
qemu_args = ' '.join(testcase['qemu-args'])
cmdline = testcase['cmdline'] + [f'--qemu-args=@qemu_args@']
integration_tests += [
integration_test_template + {
'name' : '@0@-@1@'.format(name, testcase),
'name' : '@0@-@1@'.format(name, testcase['name']),
# Make sure the service is still named TEST-64-UDEV-STORAGE.service.
'unit' : unit,
'cmdline' : integration_test_template['cmdline'] + [
'systemd.setenv=TEST_FUNCTION_NAME=testcase_@0@'.format(testcase)
],
'mkosi-args' : integration_test_template['mkosi-args'] + [
'--configure-script', files('@0@.configure'.format(testcase)),
'systemd.setenv=TEST_FUNCTION_NAME=testcase_@0@'.format(testcase['name'])
],
'mkosi-args' : integration_test_template['mkosi-args'] + cmdline,
'priority' : 10,
'vm' : true,
# Suppress ASan error

View File

@ -1,31 +0,0 @@
#!/usr/bin/python3
# SPDX-License-Identifier: LGPL-2.1-or-later
import json
import sys
config = json.load(sys.stdin)
# Add 16 multipath devices, each backed by 4 paths
# We don't use --qemu-drive for this since they have to share the file.
for ndisk in range(16):
wwn = f"0xDEADDEADBEEF{ndisk:04d}"
if ndisk == 0:
size = "16M"
else:
size = "1M"
for nback in range(4):
id = f"drive{ndisk}x{nback}"
config["Drives"] += [
{
"Id": id,
"Size": size,
"Options": "cache=unsafe",
"FileId": str(ndisk),
}
]
config["QemuArgs"] += ["-device", f"scsi-hd,drive={id},serial=MPIO{ndisk},wwn={wwn}"]
json.dump(config, sys.stdout)

View File

@ -1,40 +0,0 @@
#!/usr/bin/python3
# SPDX-License-Identifier: LGPL-2.1-or-later
import json
import os
import shutil
import subprocess
import sys
config = json.load(sys.stdin)
qemu = shutil.which("/usr/libexec/qemu-kvm") or f"qemu-system-{os.environ['QEMU_ARCHITECTURE']}"
result = subprocess.run([qemu, "-device", "help"], check=True, text=True, stdout=subprocess.PIPE)
if 'name "nvme"' not in result.stdout:
print("nvme device driver is not available, skipping test...", file=sys.stderr)
exit(77)
def add_drive(i: int, serial: str) -> None:
global config
id = f"nvme{i}"
config["Drives"] += [
{
"Id": id,
"Size": "1M",
"Options": "cache=unsafe",
}
]
config["QemuArgs"] += ["-device", f"nvme,drive={id},serial={serial},max_ioqpairs=8"]
for i in range(5):
add_drive(i, serial=f"deadbeef{i}")
for i in range(5, 10):
add_drive(i, serial=f" deadbeef {i} ")
for i in range(10, 15):
add_drive(i, serial=f" dead/beef/{i} ")
for i in range(15, 20):
add_drive(i, serial=f"dead/../../beef/{i}")
json.dump(config, sys.stdout)

View File

@ -1,40 +0,0 @@
#!/usr/bin/python3
# SPDX-License-Identifier: LGPL-2.1-or-later
import json
import os
import shutil
import subprocess
import sys
config = json.load(sys.stdin)
qemu = shutil.which("/usr/libexec/qemu-kvm") or f"qemu-system-{os.environ['QEMU_ARCHITECTURE']}"
result = subprocess.run([qemu, "-device", "help"], check=True, text=True, stdout=subprocess.PIPE)
if 'name "nvme"' not in result.stdout:
print("nvme device driver is not available, skipping test...", file=sys.stderr)
exit(77)
for id in ("nvme0", "nvme1"):
config["Drives"] += [
{
"Id": id,
"Size": "1M",
"Options": "cache=unsafe",
}
]
config["QemuArgs"] += [
# Create an NVM Subsystem Device
"-device", "nvme-subsys,id=nvme-subsys-64,nqn=subsys64",
# Attach two NVM controllers to it
"-device", "nvme,subsys=nvme-subsys-64,serial=deadbeef",
"-device", "nvme,subsys=nvme-subsys-64,serial=deadbeef",
# And create two shared namespaces attached to both controllers
"-device", "nvme-ns,drive=nvme0,nsid=16,shared=on",
"-device", "nvme-ns,drive=nvme1,nsid=17,shared=on",
]
json.dump(config, sys.stdout)

View File

@ -1,21 +0,0 @@
#!/usr/bin/python3
# SPDX-License-Identifier: LGPL-2.1-or-later
import json
import sys
config = json.load(sys.stdin)
for i in range(10):
id = f"drivesimultaneousevents{i}"
config["Drives"] += [
{
"Id": id,
"Size": "128M",
"Options": "cache=unsafe",
}
]
config["QemuArgs"] += ["-device", f"scsi-hd,drive={id},serial=deadbeeftest{i}"]
json.dump(config, sys.stdout)

View File

@ -1,28 +0,0 @@
#!/usr/bin/python3
# SPDX-License-Identifier: LGPL-2.1-or-later
import json
import sys
config = json.load(sys.stdin)
for i in range(4):
config["QemuArgs"] += ['-device', f"virtio-scsi-pci,id=scsi{i}"]
for i in range(128):
id = f"drive{i}"
config["Drives"] += [
{
"Id": id,
"Size": "1M",
"Options": "cache=unsafe",
}
]
config["QemuArgs"] += [
'-device',
f"scsi-hd,drive={id},bus=scsi{i // 32}.0,channel=0,"
f"scsi-id={i % 32},lun=0",
]
json.dump(config, sys.stdout)

View File

@ -1,34 +0,0 @@
#!/usr/bin/python3
# SPDX-License-Identifier: LGPL-2.1-or-later
import json
import os
import shutil
import subprocess
import sys
config = json.load(sys.stdin)
qemu = shutil.which("/usr/libexec/qemu-kvm") or f"qemu-system-{os.environ['QEMU_ARCHITECTURE']}"
result = subprocess.run([qemu, "-device", "help"], check=True, text=True, stdout=subprocess.PIPE)
if 'name "virtio-scsi-pci"' not in result.stdout:
print("virtio-scsi-pci device driver is not available, skipping test...", file=sys.stderr)
exit(77)
num_disk = 16
config["QemuArgs"] += ["-device", "virtio-scsi-pci,id=scsi0,num_queues=4"]
for i in range(0, num_disk):
id = f"drive{i}"
config["Drives"] += [
{
"Id": id,
"Size": "40M",
"Options": "cache=unsafe"
}
]
config["QemuArgs"] += ["-device", f"scsi-hd,drive={id},bus=scsi0.0,channel=0,scsi-id=0,lun={i}"]
json.dump(config, sys.stdout)