#!/usr/bin/env bash # -*- mode: shell-script; indent-tabs-mode: nil; sh-basic-offset: 4; -*- # SPDX-License-Identifier: LGPL-2.1-or-later # # shellcheck disable=SC2030,SC2031 # ex: ts=8 sw=4 sts=4 et filetype=sh tw=180 # Note: the shellcheck line above disables warning for variables which were # modified in a subshell. In our case this behavior is expected, but # `shellcheck` can't distinguish this because of poor variable tracking, # which results in warning for every instance of such variable used # throughout this file. # See: # * comment in function install_verity_minimal() # * koalaman/shellcheck#280 set -o pipefail PATH=/sbin:/bin:/usr/sbin:/usr/bin export PATH os_release=$(test -e /etc/os-release && echo /etc/os-release || echo /usr/lib/os-release) # shellcheck source=/dev/null source "$os_release" [[ "$ID" = "debian" || " $ID_LIKE " = *" debian "* ]] && LOOKS_LIKE_DEBIAN=yes || LOOKS_LIKE_DEBIAN="" [[ "$ID" = "arch" || " $ID_LIKE " = *" arch "* ]] && LOOKS_LIKE_ARCH=yes || LOOKS_LIKE_ARCH="" [[ " $ID_LIKE " = *" suse "* ]] && LOOKS_LIKE_SUSE=yes || LOOKS_LIKE_SUSE="" KERNEL_VER="${KERNEL_VER-$(uname -r)}" QEMU_TIMEOUT="${QEMU_TIMEOUT:-1800}" NSPAWN_TIMEOUT="${NSPAWN_TIMEOUT:-1800}" TIMED_OUT= # will be 1 after run_* if *_TIMEOUT is set and test timed out [[ "$LOOKS_LIKE_SUSE" ]] && FSTYPE="${FSTYPE:-btrfs}" || FSTYPE="${FSTYPE:-ext4}" UNIFIED_CGROUP_HIERARCHY="${UNIFIED_CGROUP_HIERARCHY:-default}" EFI_MOUNT="${EFI_MOUNT:-$(bootctl -x 2>/dev/null || echo /boot)}" # Note that defining a different IMAGE_NAME in a test setup script will only result # in default.img being copied and renamed. It can then be extended by defining # a test_append_files() function. The $1 parameter will be the root directory. # To force creating a new image from scratch (eg: to encrypt it), also define # TEST_FORCE_NEWIMAGE=1 in the test setup script. IMAGE_NAME=${IMAGE_NAME:-default} STRIP_BINARIES="${STRIP_BINARIES:-yes}" TEST_REQUIRE_INSTALL_TESTS="${TEST_REQUIRE_INSTALL_TESTS:-1}" TEST_PARALLELIZE="${TEST_PARALLELIZE:-0}" TEST_SUPPORTING_SERVICES_SHOULD_BE_MASKED="${TEST_SUPPORTING_SERVICES_SHOULD_BE_MASKED:-1}" LOOPDEV= # Simple wrapper to unify boolean checks. # Note: this function needs to stay near the top of the file, so we can use it # in code in the outermost scope. get_bool() { # Make the value lowercase to make the regex matching simpler local _bool="${1,,}" # Consider empty value as "false" if [[ -z "$_bool" || "$_bool" =~ ^(0|no|false)$ ]]; then return 1 elif [[ "$_bool" =~ ^(1|yes|true)$ ]]; then return 0 else echo >&2 "Value '$_bool' is not a valid boolean value" exit 1 fi } # Since in Bash we can have only one handler per signal, let's overcome this # limitation by having one global handler for the EXIT signal which executes # all registered handlers _AT_EXIT_HANDLERS=() _at_exit() { set +e # Run the EXIT handlers in reverse order for ((i = ${#_AT_EXIT_HANDLERS[@]} - 1; i >= 0; i--)); do ddebug "Running EXIT handler '${_AT_EXIT_HANDLERS[$i]}'" "${_AT_EXIT_HANDLERS[$i]}" done } trap _at_exit EXIT add_at_exit_handler() { local handler="${1?}" if [[ "$(type -t "$handler")" != "function" ]]; then dfatal "'$handler' is not a function" exit 1 fi _AT_EXIT_HANDLERS+=("$handler") } # Decide if we can (and want to) run qemu with KVM acceleration. # Check if nested KVM is explicitly enabled (TEST_NESTED_KVM). If not, # check if it's not explicitly disabled (TEST_NO_KVM) and we're not already # running under KVM. If these conditions are met, enable KVM (and possibly # nested KVM), otherwise disable it. if get_bool "${TEST_NESTED_KVM:=}" || (! get_bool "${TEST_NO_KVM:=}" && ! systemd-detect-virt -qv); then QEMU_KVM=yes else QEMU_KVM=no fi if ! ROOTLIBDIR=$(pkg-config --variable=systemdutildir systemd); then echo "WARNING! Cannot determine rootlibdir from pkg-config, assuming /usr/lib/systemd" >&2 ROOTLIBDIR=/usr/lib/systemd fi # The calling test.sh scripts have TEST_BASE_DIR set via their Makefile, but we don't need them to provide it TEST_BASE_DIR=${TEST_BASE_DIR:-$(realpath "$(dirname "${BASH_SOURCE[0]}")")} TEST_UNITS_DIR="$(realpath "$TEST_BASE_DIR/units")" SOURCE_DIR=$(realpath "$TEST_BASE_DIR/..") TOOLS_DIR="$SOURCE_DIR/tools" # These variables are used by test scripts export TEST_BASE_DIR TEST_UNITS_DIR SOURCE_DIR TOOLS_DIR # note that find-build-dir.sh will return $BUILD_DIR if provided, else it will try to find it if get_bool "${NO_BUILD:=}"; then BUILD_DIR="$SOURCE_DIR" elif ! BUILD_DIR="$("$TOOLS_DIR"/find-build-dir.sh)"; then echo "ERROR: no build found, please set BUILD_DIR or use NO_BUILD" >&2 exit 1 fi PATH_TO_INIT="$ROOTLIBDIR/systemd" SYSTEMD_JOURNALD="${SYSTEMD_JOURNALD:-$(command -v "$BUILD_DIR/systemd-journald" || command -v "$ROOTLIBDIR/systemd-journald")}" SYSTEMD_JOURNAL_REMOTE="${SYSTEMD_JOURNAL_REMOTE:-$(command -v "$BUILD_DIR/systemd-journal-remote" || command -v "$ROOTLIBDIR/systemd-journal-remote" || echo "")}" SYSTEMD="${SYSTEMD:-$(command -v "$BUILD_DIR/systemd" || command -v "$ROOTLIBDIR/systemd")}" SYSTEMD_NSPAWN="${SYSTEMD_NSPAWN:-$(command -v "$BUILD_DIR/systemd-nspawn" || command -v systemd-nspawn)}" JOURNALCTL="${JOURNALCTL:-$(command -v "$BUILD_DIR/journalctl" || command -v journalctl)}" SYSTEMCTL="${SYSTEMCTL:-$(command -v "$BUILD_DIR/systemctl" || command -v systemctl)}" TESTFILE="${BASH_SOURCE[1]}" if [ -z "$TESTFILE" ]; then echo "ERROR: test-functions must be sourced from one of the TEST-*/test.sh scripts" >&2 exit 1 fi TESTNAME="$(basename "$(dirname "$(realpath "$TESTFILE")")")" STATEDIR="$BUILD_DIR/test/$TESTNAME" STATEFILE="$STATEDIR/.testdir" IMAGESTATEDIR="$STATEDIR/.." TESTLOG="$STATEDIR/test.log" if ! [[ "$TESTNAME" =~ ^TEST\-([0-9]+)\-.+$ ]]; then echo "ERROR: Test name '$TESTNAME' is not in the expected format: TEST-[0-9]+-*" >&2 exit 1 fi TESTID="${BASH_REMATCH[1]:?}" if [[ ! -f "$TEST_UNITS_DIR/testsuite-$TESTID.service" ]]; then echo "ERROR: Test '$TESTNAME' is missing its service file '$TEST_UNITS_DIR/testsuite-$TESTID.service" >&2 exit 1 fi BASICTOOLS=( awk base64 basename bash capsh cat chmod chown chroot cmp cryptsetup cut date dd diff dirname dmsetup echo env false flock getconf getent getfacl grep gunzip gzip head ionice ip jq killall ldd ln loadkeys login losetup lz4cat mkfifo mktemp modprobe mount mountpoint mv nc nproc pkill readlink realpath rev rm rmdir rmmod sed seq setfattr setfont setsid sfdisk sh sleep stat su sulogin sysctl tail tar tee test timeout touch tr true truncate umount uname unshare useradd userdel wc xargs xzcat ) DEBUGTOOLS=( cp df dhclient dmesg du find free grep hostname id less ln ls mkdir ping ps route sort strace stty tty vi /usr/libexec/vi ) is_built_with_asan() { local _bin="${1:?}" if ! type -P objdump >/dev/null; then ddebug "Failed to find objdump. Assuming systemd hasn't been built with ASAN." return 1 fi # Borrowed from https://github.com/google/oss-fuzz/blob/cd9acd02f9d3f6e80011cc1e9549be526ce5f270/infra/base-images/base-runner/bad_build_check#L182 local _asan_calls _asan_calls="$(objdump -dC "$_bin" | grep -E "(callq?|brasl?|bl)\s.+__asan" -c)" if ((_asan_calls < 1000)); then return 1 else return 0 fi } is_built_with_coverage() { if get_bool "${NO_BUILD:=}" || ! command -v meson >/dev/null; then return 1 fi meson configure "${BUILD_DIR:?}" | grep 'b_coverage' | awk '{ print $2 }' | grep -q 'true' } IS_BUILT_WITH_ASAN=$(is_built_with_asan "$SYSTEMD_JOURNALD" && echo yes || echo no) IS_BUILT_WITH_COVERAGE=$(is_built_with_coverage && echo yes || echo no) if get_bool "$IS_BUILT_WITH_ASAN"; then STRIP_BINARIES=no SKIP_INITRD="${SKIP_INITRD:-yes}" PATH_TO_INIT=$ROOTLIBDIR/systemd-under-asan QEMU_MEM="${QEMU_MEM:-2G}" QEMU_SMP="${QEMU_SMP:-4}" # We need to correctly distinguish between gcc's and clang's ASan DSOs. if ASAN_RT_NAME="$(awk '/libasan.so/ {x=$1; exit} END {print x; exit x==""}' < <(ldd "$SYSTEMD"))"; then ASAN_COMPILER=gcc ASAN_RT_PATH="$(readlink -f "$(${CC:-gcc} --print-file-name "$ASAN_RT_NAME")")" elif ASAN_RT_NAME="$(awk '/libclang_rt.asan/ {x=$1; exit} END {print x; exit x==""}' < <(ldd "$SYSTEMD"))"; then ASAN_COMPILER=clang ASAN_RT_PATH="$(readlink -f "$(${CC:-clang} --print-file-name "$ASAN_RT_NAME")")" # As clang's ASan DSO is usually in a non-standard path, let's check if # the environment is set accordingly. If not, warn the user and exit. # We're not setting the LD_LIBRARY_PATH automagically here, because # user should encounter (and fix) the same issue when running the unit # tests (meson test) if ldd "$SYSTEMD" | grep -q "libclang_rt.asan.*not found"; then echo >&2 "clang's ASan DSO ($ASAN_RT_NAME) is not present in the runtime library path" echo >&2 "Consider setting LD_LIBRARY_PATH=${ASAN_RT_PATH%/*}" exit 1 fi else echo >&2 "systemd is not linked against the ASan DSO" echo >&2 "gcc does this by default, for clang compile with -shared-libasan" exit 1 fi echo "Detected ASan RT '$ASAN_RT_NAME' located at '$ASAN_RT_PATH'" fi find_qemu_bin() { QEMU_BIN="${QEMU_BIN:-""}" # SUSE and Red Hat call the binary qemu-kvm. Debian and Gentoo call it kvm. if get_bool "$QEMU_KVM"; then [[ -n "$QEMU_BIN" ]] || QEMU_BIN="$(command -v kvm qemu-kvm 2>/dev/null | grep '^/' -m1)" fi [[ -n "$ARCH" ]] || ARCH="$(uname -m)" case $ARCH in x86_64) # QEMU's own build system calls it qemu-system-x86_64 [[ -n "$QEMU_BIN" ]] || QEMU_BIN="$(command -v qemu-system-x86_64 2>/dev/null | grep '^/' -m1)" ;; i*86) # new i386 version of QEMU [[ -n "$QEMU_BIN" ]] || QEMU_BIN="$(command -v qemu-system-i386 2>/dev/null | grep '^/' -m1)" # i386 version of QEMU [[ -n "$QEMU_BIN" ]] || QEMU_BIN="$(command -v qemu 2>/dev/null | grep '^/' -m1)" ;; ppc64*) [[ -n "$QEMU_BIN" ]] || QEMU_BIN="$(command -v qemu-system-ppc64 2>/dev/null | grep '^/' -m1)" ;; esac if [[ ! -e "$QEMU_BIN" ]]; then echo "Could not find a suitable qemu binary" >&2 return 1 fi } # Compares argument #1=X.Y.Z (X&Y&Z = numeric) to the version of the installed qemu # returns 0 if newer or equal # returns 1 if older # returns 2 if failing qemu_min_version() { find_qemu_bin || return 2 # get version from binary local qemu_ver qemu_ver="$("$QEMU_BIN" --version | awk '/^QEMU emulator version ([0-9]*\.[0-9]*\.[0-9]*)/ {print $4}')" # Check version string format echo "$qemu_ver" | grep -q '^[0-9]*\.[0-9]*\.[0-9]*$' || return 2 echo "$1" | grep -q '^[0-9]*\.[0-9]*\.[0-9]*$' || return 2 # compare as last command to return that value printf "%s\n%s\n" "$1" "$qemu_ver" | sort -V -C } # Return 0 if qemu did run (then you must check the result state/logs for actual # success), or 1 if qemu is not available. run_qemu() { # If the test provided its own initrd, use it (e.g. TEST-24) if [[ -z "$INITRD" && -f "${TESTDIR:?}/initrd.img" ]]; then INITRD="$TESTDIR/initrd.img" fi if [ -f /etc/machine-id ]; then read -r MACHINE_ID /dev/null 2>&1; then dfatal "mksquashfs not found" exit 1 fi if ! command -v veritysetup >/dev/null 2>&1; then dfatal "veritysetup not found" exit 1 fi # Local modifications of some global variables is intentional in this # subshell (SC2030) # shellcheck disable=SC2030 ( BASICTOOLS=( bash cat grep mount sleep ) oldinitdir="$initdir" rm -rfv "$TESTDIR/minimal" export initdir="$TESTDIR/minimal" # app0 will use TemporaryFileSystem=/var/lib, app1 will need the mount point in the base image mkdir -p "$initdir/usr/lib/systemd/system" "$initdir/usr/lib/extension-release.d" "$initdir/etc" "$initdir/var/tmp" "$initdir/opt" "$initdir/var/lib/app1" setup_basic_dirs install_basic_tools install_ld_so_conf # Shellcheck treats [[ -v VAR ]] as an assignment to avoid a different # issue, thus falsely triggering SC2030 in this case # See: koalaman/shellcheck#1409 if [[ -v ASAN_RT_PATH ]]; then # If we're compiled with ASan, install the ASan RT (and its dependencies) # into the verity images to get rid of the annoying errors about # missing $LD_PRELOAD libraries. inst_libs "$ASAN_RT_PATH" inst_library "$ASAN_RT_PATH" fi cp "$os_release" "$initdir/usr/lib/os-release" ln -s ../usr/lib/os-release "$initdir/etc/os-release" touch "$initdir/etc/machine-id" "$initdir/etc/resolv.conf" touch "$initdir/opt/some_file" echo MARKER=1 >>"$initdir/usr/lib/os-release" echo "PORTABLE_PREFIXES=app0 minimal minimal-app0" >>"$initdir/usr/lib/os-release" cat >"$initdir/usr/lib/systemd/system/minimal-app0.service" <"$oldinitdir/usr/share/minimal_0.roothash" sed -i "s/MARKER=1/MARKER=2/g" "$initdir/usr/lib/os-release" rm "$initdir/usr/lib/systemd/system/minimal-app0-foo.service" cp "$initdir/usr/lib/systemd/system/minimal-app0.service" "$initdir/usr/lib/systemd/system/minimal-app0-bar.service" mksquashfs "$initdir" "$oldinitdir/usr/share/minimal_1.raw" -noappend veritysetup format "$oldinitdir/usr/share/minimal_1.raw" "$oldinitdir/usr/share/minimal_1.verity" | \ grep '^Root hash:' | cut -f2 | tr -d '\n' >"$oldinitdir/usr/share/minimal_1.roothash" # Rolling distros like Arch do not set VERSION_ID local version_id="" if grep -q "^VERSION_ID=" "$os_release"; then version_id="$(grep "^VERSION_ID=" "$os_release")" fi export initdir="$TESTDIR/app0" mkdir -p "$initdir/usr/lib/extension-release.d" "$initdir/usr/lib/systemd/system" "$initdir/opt" grep "^ID=" "$os_release" >"$initdir/usr/lib/extension-release.d/extension-release.app0" echo "${version_id}" >>"$initdir/usr/lib/extension-release.d/extension-release.app0" cat >"$initdir/usr/lib/systemd/system/app0.service" <"$initdir/opt/script0.sh" <\${STATE_DIRECTORY}/foo cat /usr/lib/extension-release.d/extension-release.app0 EOF chmod +x "$initdir/opt/script0.sh" echo MARKER=1 >"$initdir/usr/lib/systemd/system/some_file" mksquashfs "$initdir" "$oldinitdir/usr/share/app0.raw" -noappend export initdir="$TESTDIR/app1" mkdir -p "$initdir/usr/lib/extension-release.d" "$initdir/usr/lib/systemd/system" "$initdir/opt" grep "^ID=" "$os_release" >"$initdir/usr/lib/extension-release.d/extension-release.app2" ( echo "${version_id}" echo "SYSEXT_SCOPE=portable" echo "PORTABLE_PREFIXES=app1" ) >>"$initdir/usr/lib/extension-release.d/extension-release.app2" setfattr -n user.extension-release.strict -v false "$initdir/usr/lib/extension-release.d/extension-release.app2" cat >"$initdir/usr/lib/systemd/system/app1.service" <"$initdir/opt/script1.sh" <\${STATE_DIRECTORY}/foo cat /usr/lib/extension-release.d/extension-release.app2 EOF chmod +x "$initdir/opt/script1.sh" echo MARKER=1 >"$initdir/usr/lib/systemd/system/other_file" mksquashfs "$initdir" "$oldinitdir/usr/share/app1.raw" -noappend export initdir="$TESTDIR/app-nodistro" mkdir -p "$initdir/usr/lib/extension-release.d" "$initdir/usr/lib/systemd/system" ( echo "ID=_any" echo "ARCHITECTURE=_any" ) >"$initdir/usr/lib/extension-release.d/extension-release.app-nodistro" echo MARKER=1 >"$initdir/usr/lib/systemd/system/some_file" mksquashfs "$initdir" "$oldinitdir/usr/share/app-nodistro.raw" -noappend ) } setup_basic_environment() { # create the basic filesystem layout setup_basic_dirs install_systemd install_missing_libraries install_config_files install_zoneinfo create_rc_local install_basic_tools install_libnss install_pam install_dbus install_fonts install_locales install_keymaps install_x11_keymaps install_terminfo install_execs install_fs_tools install_modules install_plymouth install_haveged install_debug_tools install_ld_so_conf install_testuser has_user_dbus_socket && install_user_dbus setup_selinux strip_binaries instmods veth install_depmod_files generate_module_dependencies if get_bool "$IS_BUILT_WITH_ASAN"; then create_asan_wrapper fi if get_bool "$TEST_INSTALL_VERITY_MINIMAL"; then install_verity_minimal fi } setup_selinux() { dinfo "Setup SELinux" # don't forget KERNEL_APPEND='... selinux=1 ...' if ! get_bool "$SETUP_SELINUX"; then dinfo "SETUP_SELINUX != yes, skipping SELinux configuration" return 0 fi local conf_dir=/etc/selinux local fixfiles_tools=(bash uname cat sort uniq awk grep egrep head expr find rm secon setfiles) # Make sure the following statement can't expand to "/" to prevent # a potential where-are-my-backups situation rm -rf "${initdir:?}/$conf_dir" if ! cp -ar "$conf_dir" "$initdir/$conf_dir"; then dfatal "Failed to copy $conf_dir" exit 1 fi touch "$initdir/.autorelabel" mkdir -p "$initdir/usr/lib/systemd/tests/testdata/units/basic.target.wants" ln -sf ../autorelabel.service "$initdir/usr/lib/systemd/tests/testdata/units/basic.target.wants/" image_install "${fixfiles_tools[@]}" image_install fixfiles image_install sestatus } install_valgrind() { if ! type -p valgrind; then dfatal "Failed to install valgrind" exit 1 fi local valgrind_bins valgrind_libs valgrind_dbg_and_supp readarray -t valgrind_bins < <(strace -e execve valgrind /bin/true 2>&1 >/dev/null | perl -lne 'print $1 if /^execve\("([^"]+)"/') image_install "${valgrind_bins[@]}" readarray -t valgrind_libs < <(LD_DEBUG=files valgrind /bin/true 2>&1 >/dev/null | perl -lne 'print $1 if m{calling init: (/.*vgpreload_.*)}') image_install "${valgrind_libs[@]}" readarray -t valgrind_dbg_and_supp < <( strace -e open valgrind /bin/true 2>&1 >/dev/null | perl -lne 'if (my ($fname) = /^open\("([^"]+).*= (?!-)\d+/) { print $fname if $fname =~ /debug|\.supp$/ }' ) image_install "${valgrind_dbg_and_supp[@]}" } create_valgrind_wrapper() { local valgrind_wrapper="$initdir/$ROOTLIBDIR/systemd-under-valgrind" ddebug "Create $valgrind_wrapper" cat >"$valgrind_wrapper" <"$asan_wrapper" <&2 "Couldn't find ASan RT at '$ASAN_RT_PATH', can't continue" exit 1 fi DEFAULT_ASAN_OPTIONS=${ASAN_OPTIONS:-strict_string_checks=1:detect_stack_use_after_return=1:check_initialization_order=1:strict_init_order=1} DEFAULT_UBSAN_OPTIONS=${UBSAN_OPTIONS:-print_stacktrace=1:print_summary=1:halt_on_error=1} DEFAULT_ENVIRONMENT="ASAN_OPTIONS=\$DEFAULT_ASAN_OPTIONS UBSAN_OPTIONS=\$DEFAULT_UBSAN_OPTIONS" # Create a simple environment file which can be included by systemd services # that need it (i.e. services that utilize DynamicUser=true and bash, etc.) cat >/usr/lib/systemd/systemd-asan-env < not found" issues in the future export PATH="/sbin:/bin:/usr/sbin:/usr/bin" mount -t proc proc /proc mount -t sysfs sysfs /sys mount -o remount,rw / DEFAULT_ENVIRONMENT="\$DEFAULT_ENVIRONMENT ASAN_RT_PATH=$ASAN_RT_PATH" if [[ "$ASAN_COMPILER" == "clang" ]]; then # Let's add the ASan DSO's path to the dynamic linker's cache. This is pretty # unnecessary for gcc & libasan, however, for clang this is crucial, as its # runtime ASan DSO is in a non-standard (library) path. echo "${ASAN_RT_PATH%/*}" >/etc/ld.so.conf.d/asan-path-override.conf ldconfig fi echo DefaultEnvironment=\$DEFAULT_ENVIRONMENT >>/etc/systemd/system.conf echo DefaultTimeoutStartSec=180s >>/etc/systemd/system.conf echo DefaultStandardOutput=journal+console >>/etc/systemd/system.conf # ASAN and syscall filters aren't compatible with each other. find / -name '*.service' -type f | xargs sed -i 's/^\\(MemoryDeny\\|SystemCall\\)/#\\1/' # The redirection of ASAN reports to a file prevents them from ending up in /dev/null. # But, apparently, sometimes it doesn't work: https://github.com/google/sanitizers/issues/886. JOURNALD_CONF_DIR=/etc/systemd/system/systemd-journald.service.d mkdir -p "\$JOURNALD_CONF_DIR" printf "[Service]\nEnvironment=ASAN_OPTIONS=\$DEFAULT_ASAN_OPTIONS:log_path=/systemd-journald.asan.log UBSAN_OPTIONS=\$DEFAULT_UBSAN_OPTIONS:log_path=/systemd-journald.ubsan.log\n" >"\$JOURNALD_CONF_DIR/env.conf" # Sometimes UBSan sends its reports to stderr regardless of what is specified in log_path # Let's try to catch them by redirecting stderr (and stdout just in case) to a file # See https://github.com/systemd/systemd/pull/12524#issuecomment-491108821 printf "[Service]\nStandardOutput=file:/systemd-journald.out\n" >"\$JOURNALD_CONF_DIR/out.conf" # 90s isn't enough for some services to finish when literally everything is run # under ASan+UBSan in containers, which, in turn, are run in VMs. # Let's limit which environments such services should be executed in. mkdir -p /etc/systemd/system/systemd-hwdb-update.service.d printf "[Unit]\nConditionVirtualization=container\n\n[Service]\nTimeoutSec=240s\n" >/etc/systemd/system/systemd-hwdb-update.service.d/env-override.conf # Let's override another hard-coded timeout that kicks in too early mkdir -p /etc/systemd/system/systemd-journal-flush.service.d printf "[Service]\nTimeoutSec=180s\n" >/etc/systemd/system/systemd-journal-flush.service.d/timeout.conf export ASAN_OPTIONS=\$DEFAULT_ASAN_OPTIONS:log_path=/systemd.asan.log UBSAN_OPTIONS=\$DEFAULT_UBSAN_OPTIONS exec "$ROOTLIBDIR/systemd" "\$@" EOF chmod 0755 "$asan_wrapper" } create_strace_wrapper() { local strace_wrapper="$initdir/$ROOTLIBDIR/systemd-under-strace" ddebug "Create $strace_wrapper" cat >"$strace_wrapper" <"${initdir:?}/etc/iscsi/iscsid.conf" # Since open-iscsi 2.1.2 [0] the initiator name should be generated via # a one-time service instead of distro package's post-install scripts. # However, some distros still use this approach even after this patch, # so prefer the already existing initiatorname.iscsi file if it exists. # # [0] https://github.com/open-iscsi/open-iscsi/commit/f37d5b653f9f251845db3f29b1a3dcb90ec89731 if [[ ! -e /etc/iscsi/initiatorname.iscsi ]]; then image_install "${ROOTLIBDIR:?}"/system/iscsi-init.service if get_bool "$IS_BUILT_WITH_ASAN"; then # The iscsi-init.service calls `sh` which might, in certain circumstances, # pull in instrumented systemd NSS modules causing `sh` to fail. Let's mitigate # this by pulling in an env file crafted by `create_asan_wrapper()` that # (among others) pre-loads ASan's DSO. mkdir -p "${initdir:?}/etc/systemd/system/iscsi-init.service.d/" printf "[Service]\nEnvironmentFile=/usr/lib/systemd/systemd-asan-env" >"${initdir:?}/etc/systemd/system/iscsi-init.service.d/asan-env.conf" fi else inst_simple "/etc/iscsi/initiatorname.iscsi" fi fi # Install server-side stuff ("target" in iSCSI jargon) - TGT in this case # (tgt on Debian, scsi-target-utils on Fedora, etc.) if [[ -z "$inst" || "$inst" =~ (server|target) ]]; then image_install tgt-admin tgt-setup-lun tgtadm tgtd tgtimg image_install -o /etc/sysconfig/tgtd image_install "${ROOTLIBDIR:?}"/system/tgtd.service mkdir -p "${initdir:?}/etc/tgt" touch "${initdir:?}"/etc/tgt/{tgtd,targets}.conf # Install perl modules required by tgt-admin # # Forgive me father for I have sinned. The monstrosity below appends # a perl snippet to the `tgt-admin` perl script on the fly, which # dumps a list of files (perl modules) required by `tgt-admin` at # the runtime plus any DSOs loaded via DynaLoader. This list is then # passed to `inst_simple` which installs the necessary files into the image # # shellcheck disable=SC2016 while read -r file; do inst_simple "$file" done < <(perl -- <(cat "$(command -v tgt-admin)" <(echo -e 'use DynaLoader; print map { "$_\n" } values %INC; print join("\n", @DynaLoader::dl_shared_objects)')) -p | awk '/^\// { print $1 }') fi } install_mdadm() { local unit local mdadm_units=( system/mdadm-grow-continue@.service system/mdadm-last-resort@.service system/mdadm-last-resort@.timer system/mdmon@.service system/mdmonitor-oneshot.service system/mdmonitor-oneshot.timer system/mdmonitor.service system-shutdown/mdadm.shutdown ) image_install mdadm mdmon inst_rules 01-md-raid-creating.rules 63-md-raid-arrays.rules 64-md-raid-assembly.rules 69-md-clustered-confirm-device.rules # Fedora/CentOS/RHEL ships this rule file [[ -f /lib/udev/rules.d/65-md-incremental.rules ]] && inst_rules 65-md-incremental.rules for unit in "${mdadm_units[@]}"; do image_install "${ROOTLIBDIR:?}/$unit" done } install_compiled_systemd() { dinfo "Install compiled systemd" local ninja_bin ninja_bin="$(type -P ninja || type -P ninja-build)" if [[ -z "$ninja_bin" ]]; then dfatal "ninja was not found" exit 1 fi (set -x; DESTDIR="$initdir" "$ninja_bin" -C "$BUILD_DIR" install) # If we are doing coverage runs, copy over the binary notes files, as lcov expects to # find them in the same directory as the runtime data counts if get_bool "$IS_BUILT_WITH_COVERAGE"; then mkdir -p "${initdir}/${BUILD_DIR:?}/" rsync -am --include='*/' --include='*.gcno' --exclude='*' "${BUILD_DIR:?}/" "${initdir}/${BUILD_DIR:?}/" # Set effective & default ACLs for the build dir so unprivileged # processes can write gcda files with coverage stats setfacl -R -m 'd:o:rwX' -m 'o:rwX' "${initdir}/${BUILD_DIR:?}/" fi } install_debian_systemd() { dinfo "Install debian systemd" local files while read -r deb; do files="$(dpkg-query -L "$deb" 2>/dev/null)" || continue ddebug "Install debian files from package $deb" for file in $files; do [ -e "$file" ] || continue [ -d "$file" ] && continue inst "$file" done done < <(grep -E '^Package:' "${SOURCE_DIR}/debian/control" | cut -d ':' -f 2) } install_suse_systemd() { local pkgs dinfo "Install SUSE systemd" pkgs=( systemd systemd-container systemd-coredump systemd-experimental systemd-journal-remote # Since commit fb6f25d7b979134a, systemd-resolved, which is shipped by # systemd-network sub-package on openSUSE, has its own testsuite. systemd-network systemd-portable udev ) for p in "${pkgs[@]}"; do rpm -q "$p" &>/dev/null || continue ddebug "Install files from package $p" while read -r f; do [ -e "$f" ] || continue [ -d "$f" ] && continue inst "$f" done < <(rpm -ql "$p") done # Embed the files needed by the extended testsuite at runtime. Also include # the unit tests needed by TEST-02-UNITTESTS. This is mostly equivalent to # what `ninja install` does for the tests when '-Dinstall-tests=true'. # # Why? openSUSE ships a package named 'systemd-testsuite' which contains # the minimal set of files that allows to run the testsuite on the host (as # long as it runs an equivalent version of systemd) getting rid of the # hassles of fetching, configuring, building the source code. dinfo "Install the files needed by the tests at runtime" image_install "${SOURCE_DIR}"/test-* inst_recursive "${SOURCE_DIR}/testdata" inst_recursive "${SOURCE_DIR}/manual" # On openSUSE, this directory is not created at package install, at least # for now. mkdir -p "$initdir/var/log/journal/remote" } install_distro_systemd() { dinfo "Install distro systemd" if get_bool "$LOOKS_LIKE_DEBIAN"; then install_debian_systemd elif get_bool "$LOOKS_LIKE_SUSE"; then install_suse_systemd else dfatal "NO_BUILD not supported for this distro" exit 1 fi } install_systemd() { dinfo "Install systemd" if get_bool "$NO_BUILD"; then install_distro_systemd else install_compiled_systemd fi # remove unneeded documentation rm -fr "${initdir:?}"/usr/share/{man,doc} # enable debug logging in PID1 echo LogLevel=debug >>"$initdir/etc/systemd/system.conf" if [[ -n "$TEST_SYSTEMD_LOG_LEVEL" ]]; then echo DefaultEnvironment=SYSTEMD_LOG_LEVEL="$TEST_SYSTEMD_LOG_LEVEL" >>"$initdir/etc/systemd/system.conf" fi # store coredumps in journal echo Storage=journal >>"$initdir/etc/systemd/coredump.conf" # Propagate SYSTEMD_UNIT_PATH to user systemd managers mkdir "$initdir/etc/systemd/system/user@.service.d/" echo -e "[Service]\nPassEnvironment=SYSTEMD_UNIT_PATH\n" >"$initdir/etc/systemd/system/user@.service.d/override.conf" # When built with gcov, disable ProtectSystem= and ProtectHome= in the test # images, since it prevents gcov to write the coverage reports (*.gcda # files) if get_bool "$IS_BUILT_WITH_COVERAGE"; then mkdir -p "$initdir/etc/systemd/system/service.d/" echo -e "[Service]\nProtectSystem=no\nProtectHome=no\n" >"$initdir/etc/systemd/system/service.d/99-gcov-override.conf" # Similarly, set ReadWritePaths= to the $BUILD_DIR in the test image # to make the coverage work with units utilizing DynamicUser=yes. Do # this only for services from TEST-20, as setting this system-wide # has many undesirable side-effects mkdir -p "$initdir/etc/systemd/system/test20-.service.d/" echo -e "[Service]\nReadWritePaths=${BUILD_DIR:?}\n" >"$initdir/etc/systemd/system/test20-.service.d/99-gcov-rwpaths-override.conf" fi # If we're built with -Dportabled=false, tests with systemd-analyze # --profile will fail. Since we need just the profile (text) files, let's # copy them into the image if they don't exist there. local portable_dir="${initdir:?}${ROOTLIBDIR:?}/portable" if [[ ! -d "$portable_dir/profile/strict" ]]; then dinfo "Couldn't find portable profiles in the test image" dinfo "Copying them directly from the source tree" mkdir -p "$portable_dir" cp -frv "${SOURCE_DIR:?}/src/portable/profile" "$portable_dir" fi } get_ldpath() { local rpath rpath="$(objdump -p "${1:?}" 2>/dev/null | awk "/R(UN)?PATH/ { print \"$initdir\" \$2 }" | paste -sd :)" if [ -z "$rpath" ] ; then echo "$BUILD_DIR" else echo "$rpath" fi } install_missing_libraries() { dinfo "Install missing libraries" # install possible missing libraries for i in "${initdir:?}"{,/usr}/{sbin,bin}/* "$initdir"{,/usr}/lib/systemd/{,tests/{,manual/,unsafe/}}*; do LD_LIBRARY_PATH="${LD_LIBRARY_PATH:+$LD_LIBRARY_PATH:}$(get_ldpath "$i")" inst_libs "$i" done # Install libgcc_s.so if available, since it's dlopen()ed by libpthread # and might cause unexpected failures during pthread_exit()/pthread_cancel() # if not present # See: https://github.com/systemd/systemd/pull/23858 while read -r libgcc_s; do [[ -e "$libgcc_s" ]] && inst_library "$libgcc_s" done < <(ldconfig -p | awk '/\/libgcc_s.so.1$/ { print $4 }') local lib path # A number of dependencies is now optional via dlopen, so the install # script will not pick them up, since it looks at linkage. for lib in libcryptsetup libidn libidn2 pwquality libqrencode tss2-esys tss2-rc tss2-mu tss2-tcti-device libfido2 libbpf libelf libdw xkbcommon p11-kit-1; do ddebug "Searching for $lib via pkg-config" if pkg-config --exists "$lib"; then path="$(pkg-config --variable=libdir "$lib")" if [ -z "${path}" ]; then ddebug "$lib.pc does not contain a libdir variable, skipping" continue fi if ! [[ ${lib} =~ ^lib ]]; then lib="lib${lib}" fi # p11-kit-1's .so doesn't have the API level in the name if [[ ${lib} =~ p11-kit-1$ ]]; then lib="libp11-kit" fi # Some pkg-config files are broken and give out the wrong paths # (eg: libcryptsetup), so just ignore them inst_libs "${path}/${lib}.so" || true inst_library "${path}/${lib}.so" || true if [[ "$lib" == "libxkbcommon" ]]; then install_x11_keymaps full fi else ddebug "$lib.pc not found, skipping" continue fi done # Install extra openssl 3 stuff path="$(pkg-config --variable=libdir libcrypto)" inst_simple "${path}/ossl-modules/legacy.so" || true inst_simple "${path}/ossl-modules/fips.so" || true inst_simple "${path}/engines-3/afalg.so" || true inst_simple "${path}/engines-3/capi.so" || true inst_simple "${path}/engines-3/loader_attic.so" || true inst_simple "${path}/engines-3/padlock.so" || true # Binaries from mtools depend on the gconv modules to translate between codepages. Because there's no # pkg-config file for these, we copy every gconv/ directory we can find in /usr/lib and /usr/lib64. # shellcheck disable=SC2046 inst_recursive $(find /usr/lib* -name gconv 2>/dev/null) } cleanup_loopdev() { if [ -n "${LOOPDEV:=}" ]; then ddebug "losetup -d $LOOPDEV" losetup -d "${LOOPDEV}" unset LOOPDEV fi } add_at_exit_handler cleanup_loopdev create_empty_image() { if [ -z "${IMAGE_NAME:=}" ]; then echo "create_empty_image: \$IMAGE_NAME not set" exit 1 fi # Partition sizes are in MiBs local root_size=1000 local data_size=50 if ! get_bool "$NO_BUILD"; then if meson configure "${BUILD_DIR:?}" | grep 'static-lib\|standalone-binaries' | awk '{ print $2 }' | grep -q 'true'; then root_size=$((root_size+=200)) fi if meson configure "${BUILD_DIR:?}" | grep 'link-.*-shared' | awk '{ print $2 }' | grep -q 'false'; then root_size=$((root_size+=200)) fi if get_bool "$IS_BUILT_WITH_COVERAGE"; then root_size=$((root_size+=250)) fi fi if ! get_bool "$STRIP_BINARIES"; then root_size=$((4 * root_size)) data_size=$((2 * data_size)) fi if [ "$IMAGE_NAME" = "repart" ]; then root_size=$((root_size+=1000)) fi echo "Setting up ${IMAGE_PUBLIC:?} (${root_size} MB)" rm -f "${IMAGE_PRIVATE:?}" "$IMAGE_PUBLIC" # Create the blank file to use as a root filesystem truncate -s "${root_size}M" "$IMAGE_PUBLIC" LOOPDEV=$(losetup --show -P -f "$IMAGE_PUBLIC") [ -b "$LOOPDEV" ] || return 1 # Create two partitions - a root one and a data one (utilized by some tests) sfdisk "$LOOPDEV" < undef, "dbus-broker-launch" => undef, ); } print $2 if /\s(\S*)\[(\d+)\]:\s*SUMMARY:\s+\w+Sanitizer/ && !exists $services_to_ignore{$1}' )" if [[ -n "$pids" ]]; then ret=$((ret+1)) for pid in $pids; do "$JOURNALCTL" -D "$root/var/log/journal" _PID="$pid" --no-pager done fi fi return $ret } check_coverage_reports() { local root="${1:?}" if get_bool "$NO_BUILD"; then return 0 fi if ! get_bool "$IS_BUILT_WITH_COVERAGE"; then return 0 fi if [ -n "${ARTIFACT_DIRECTORY}" ]; then dest="${ARTIFACT_DIRECTORY}/${testname:?}.coverage-info" else dest="${TESTDIR:?}/coverage-info" fi # Create a coverage report that will later be uploaded. Remove info about # system libraries/headers, as we don't really care about them. if [[ -f "$dest" ]]; then # If the destination report file already exists, don't overwrite it, but # dump the new report in a temporary file and then merge it with the already # present one - this usually happens when running both "parts" of a test # in one run (the qemu and the nspawn part). lcov --directory "${root}/${BUILD_DIR:?}" --capture --output-file "${dest}.new" lcov --remove "${dest}.new" -o "${dest}.new" '/usr/include/*' '/usr/lib/*' lcov --add-tracefile "${dest}" --add-tracefile "${dest}.new" -o "${dest}" rm -f "${dest}.new" else lcov --directory "${root}/${BUILD_DIR:?}" --capture --output-file "${dest}" lcov --remove "${dest}" -o "${dest}" '/usr/include/*' '/usr/lib/*' fi # If the test logs contain lines like: # # ...systemd-resolved[735885]: profiling:/systemd-meson-build/src/shared/libsystemd-shared-250.a.p/base-filesystem.c.gcda:Cannot open # # it means we're possibly missing some coverage since gcov can't write the stats, # usually due to the sandbox being too restrictive (e.g. ProtectSystem=yes, # ProtectHome=yes) or the $BUILD_DIR being inaccessible to non-root users - see # `setfacl` stuff in install_compiled_systemd(). if ! get_bool "${IGNORE_MISSING_COVERAGE:=}" && \ "${JOURNALCTL:?}" -q --no-pager -D "${root:?}/var/log/journal" --grep "profiling:.+?gcda:[Cc]annot open"; then derror "Detected possibly missing coverage, check the journal" return 1 fi return 0 } save_journal() { # Default to always saving journal local save="yes" if [ "${TEST_SAVE_JOURNAL}" = "no" ]; then save="no" elif [ "${TEST_SAVE_JOURNAL}" = "fail" ] && [ "$2" = "0" ]; then save="no" fi if [ -n "${ARTIFACT_DIRECTORY}" ]; then dest="${ARTIFACT_DIRECTORY}/${testname:?}.journal" else dest="${TESTDIR:?}/system.journal" fi for j in "${1:?}"/*; do if get_bool "$save"; then if [ "$SYSTEMD_JOURNAL_REMOTE" = "" ]; then cp -a "$j" "$dest" else "$SYSTEMD_JOURNAL_REMOTE" -o "$dest" --getter="$JOURNALCTL -o export -D $j" fi fi if [ -n "${TEST_SHOW_JOURNAL}" ]; then echo "---- $j ----" "$JOURNALCTL" --no-pager -o short-monotonic --no-hostname --priority="${TEST_SHOW_JOURNAL}" -D "$j" fi rm -r "$j" done if ! get_bool "$save"; then return 0 fi if [ -n "${SUDO_USER}" ]; then setfacl -m "user:${SUDO_USER:?}:r-X" "$dest"* fi # we want to print this sometime later, so save this in a variable JOURNAL_LIST="$(ls -l "$dest"*)" } check_result_common() { local workspace="${1:?}" local ret if [ -s "$workspace/failed" ]; then # Non-empty …/failed has highest priority cp -a "$workspace/failed" "${TESTDIR:?}/" if [ -n "${SUDO_USER}" ]; then setfacl -m "user:${SUDO_USER:?}:r-X" "${TESTDIR:?}/"failed fi ret=1 elif get_bool "$TIMED_OUT"; then echo "(timeout)" >"${TESTDIR:?}/failed" ret=2 elif [ -e "$workspace/testok" ]; then # …/testok always counts (but with lower priority than …/failed) ret=0 elif [ -e "$workspace/skipped" ]; then # …/skipped always counts (a message is expected) echo "${TESTNAME:?} was skipped:" cat "$workspace/skipped" ret=0 else echo "(failed; see logs)" >"${TESTDIR:?}/failed" ret=3 fi check_asan_reports "$workspace" || ret=4 check_coverage_reports "$workspace" || ret=5 save_journal "$workspace/var/log/journal" $ret if [ -d "${ARTIFACT_DIRECTORY}" ] && [ -f "$workspace/strace.out" ]; then cp "$workspace/strace.out" "${ARTIFACT_DIRECTORY}/" fi if [ ${ret:?} != 0 ] && [ -f "$TESTDIR/failed" ]; then echo -n "${TESTNAME:?}: " cat "$TESTDIR/failed" fi echo "${JOURNAL_LIST:-"No journals were saved"}" return ${ret:?} } check_result_nspawn() { local workspace="${1:?}" local ret # Run a test-specific checks if defined by check_result_nspawn_hook() if declare -F check_result_nspawn_hook >/dev/null; then if ! check_result_nspawn_hook "${workspace}"; then derror "check_result_nspawn_hook() returned with EC > 0" ret=4 fi fi check_result_common "${workspace}" ret=$? _umount_dir "${initdir:?}" return $ret } # can be overridden in specific test check_result_qemu() { local ret mount_initdir # Run a test-specific checks if defined by check_result_qemu_hook() if declare -F check_result_qemu_hook >/dev/null; then if ! check_result_qemu_hook "${initdir:?}"; then derror "check_result_qemu_hook() returned with EC > 0" ret=4 fi fi check_result_common "${initdir:?}" ret=$? _umount_dir "${initdir:?}" return $ret } check_result_nspawn_unittests() { local workspace="${1:?}" local ret=1 [[ -e "$workspace/testok" ]] && ret=0 if [[ -s "$workspace/failed" ]]; then ret=$((ret + 1)) echo "=== Failed test log ===" cat "$workspace/failed" else if [[ -s "$workspace/skipped" ]]; then echo "=== Skipped test log ==" cat "$workspace/skipped" # We might have only skipped tests - that should not fail the job ret=0 fi if [[ -s "$workspace/testok" ]]; then echo "=== Passed tests ===" cat "$workspace/testok" fi fi get_bool "${TIMED_OUT:=}" && ret=1 check_coverage_reports "$workspace" || ret=5 save_journal "$workspace/var/log/journal" $ret _umount_dir "${initdir:?}" return $ret } check_result_qemu_unittests() { local ret=1 mount_initdir [[ -e "${initdir:?}/testok" ]] && ret=0 if [[ -s "$initdir/failed" ]]; then ret=$((ret + 1)) echo "=== Failed test log ===" cat "$initdir/failed" else if [[ -s "$initdir/skipped" ]]; then echo "=== Skipped test log ==" cat "$initdir/skipped" # We might have only skipped tests - that should not fail the job ret=0 fi if [[ -s "$initdir/testok" ]]; then echo "=== Passed tests ===" cat "$initdir/testok" fi fi get_bool "${TIMED_OUT:=}" && ret=1 check_coverage_reports "$initdir" || ret=5 save_journal "$initdir/var/log/journal" $ret _umount_dir "$initdir" return $ret } strip_binaries() { dinfo "Strip binaries" if ! get_bool "$STRIP_BINARIES"; then dinfo "STRIP_BINARIES == no, keeping binaries unstripped" return 0 fi while read -r bin; do strip --strip-unneeded "$bin" |& grep -vi 'file format not recognized' | ddebug || : done < <(find "${initdir:?}" -executable -not -path '*/lib/modules/*.ko' -type f) } create_rc_local() { dinfo "Create rc.local" mkdir -p "${initdir:?}/etc/rc.d" cat >"$initdir/etc/rc.d/rc.local" <"$initdir/etc/sysusers.d/testuser.conf" <"${initdir:?}/etc/environment" : >"$initdir/etc/machine-id" : >"$initdir/etc/resolv.conf" # set the hostname echo 'H' >"$initdir/etc/hostname" # let's set up just one image with the traditional verbose output if [ "${IMAGE_NAME:?}" != "basic" ]; then mkdir -p "$initdir/etc/systemd/system.conf.d" echo -e '[Manager]\nStatusUnitFormat=name' >"$initdir/etc/systemd/system.conf.d/status.conf" fi } install_basic_tools() { dinfo "Install basic tools" image_install "${BASICTOOLS[@]}" image_install -o sushell # in Debian ldconfig is just a shell script wrapper around ldconfig.real image_install -o ldconfig.real } install_debug_tools() { dinfo "Install debug tools" image_install -o "${DEBUGTOOLS[@]}" if get_bool "$INTERACTIVE_DEBUG"; then # Set default TERM from vt220 to linux, so at least basic key shortcuts work local getty_override="${initdir:?}/etc/systemd/system/serial-getty@.service.d" mkdir -p "$getty_override" echo -e "[Service]\nEnvironment=TERM=linux" >"$getty_override/default-TERM.conf" echo 'export TERM=linux' >>"$initdir/etc/profile" if command -v resize >/dev/null; then image_install resize echo "resize" >>"$initdir/etc/profile" fi # Sometimes we might end up with plymouthd still running (especially # with the initrd -> asan_wrapper -> systemd transition), which will eat # our inputs and make debugging via tty impossible. Let's fix this by # killing plymouthd explicitly for the interactive sessions. # Note: we can't use pkill/pidof/etc. here due to a bug in libasan, see: # - https://github.com/llvm/llvm-project/issues/49223 # - https://bugzilla.redhat.com/show_bug.cgi?id=2098125 local plymouth_unit="${initdir:?}/etc/systemd/system/kill-plymouth.service" cat >"$plymouth_unit" <&1 >/dev/null | sed -n '/calling init: .*libnss_/ {s!^.* /!/!; p}') if [[ ${#NSS_LIBS[@]} -gt 0 ]]; then image_install "${NSS_LIBS[@]}" fi } install_dbus() { dinfo "Install dbus" inst "${ROOTLIBDIR:?}/system/dbus.socket" # Newer Fedora versions use dbus-broker by default. Let's install it if it's available. if [ -f "$ROOTLIBDIR/system/dbus-broker.service" ]; then inst "$ROOTLIBDIR/system/dbus-broker.service" inst_symlink /etc/systemd/system/dbus.service inst /usr/bin/dbus-broker inst /usr/bin/dbus-broker-launch elif [ -f "$ROOTLIBDIR/system/dbus-daemon.service" ]; then # Fedora rawhide replaced dbus.service with dbus-daemon.service inst "$ROOTLIBDIR/system/dbus-daemon.service" # Alias symlink inst_symlink /etc/systemd/system/dbus.service else inst "$ROOTLIBDIR/system/dbus.service" fi while read -r file; do inst "$file" done < <(find /etc/dbus-1 /usr/share/dbus-1 -xtype f 2>/dev/null) # setup policy for Type=dbus test mkdir -p "${initdir:?}/etc/dbus-1/system.d" cat >"$initdir/etc/dbus-1/system.d/systemd.test.ExecStopPost.conf" < EOF # If we run without KVM, bump the service start timeout if ! get_bool "$QEMU_KVM"; then cat >"$initdir/etc/dbus-1/system.d/service.timeout.conf" < 60000 EOF # Bump the client-side timeout in sd-bus as well mkdir -p "$initdir/etc/systemd/system.conf.d" echo -e '[Manager]\nDefaultEnvironment=SYSTEMD_BUS_TIMEOUT=60' >"$initdir/etc/systemd/system.conf.d/bus-timeout.conf" fi } install_user_dbus() { dinfo "Install user dbus" local userunitdir if ! userunitdir="$(pkg-config --variable=systemduserunitdir systemd)"; then dwarn "WARNING! Cannot determine userunitdir from pkg-config, assuming /usr/lib/systemd/user" userunitdir=/usr/lib/systemd/user fi inst "$userunitdir/dbus.socket" inst_symlink "$userunitdir/sockets.target.wants/dbus.socket" || inst_symlink /etc/systemd/user/sockets.target.wants/dbus.socket # Append the After= dependency on dbus in case it isn't already set up mkdir -p "${initdir:?}/etc/systemd/system/user@.service.d/" cat >"$initdir/etc/systemd/system/user@.service.d/dbus.conf" </dev/null; then paths+=("/lib/$(dpkg-architecture -qDEB_HOST_MULTIARCH)/security") else paths+=(/lib*/security) fi for d in /etc/pam.d /{usr/,}etc/security /usr/{etc,lib}/pam.d; do [ -d "$d" ] && paths+=("$d") done while read -r file; do inst "$file" done < <(find "${paths[@]}" -xtype f) # pam_unix depends on unix_chkpwd. # see http://www.linux-pam.org/Linux-PAM-html/sag-pam_unix.html image_install -o unix_chkpwd # set empty root password for easy debugging sed -i 's/^root:x:/root::/' "${initdir:?}/etc/passwd" # And make sure pam_unix will accept it by making sure that # the PAM module has the nullok option. for d in /etc/pam.d /usr/{etc,lib}/pam.d; do [ -d "$initdir/$d" ] || continue sed -i '/^auth.*pam_unix.so/s/$/ nullok/' "$initdir/$d"/* done } install_locales() { # install only C.UTF-8 and English locales dinfo "Install locales" if command -v meson >/dev/null \ && (meson configure "${BUILD_DIR:?}" | grep 'localegen-path */') \ || get_bool "$LOOKS_LIKE_DEBIAN"; then # locale-gen support image_install -o locale-gen localedef inst /etc/locale.gen || : inst /usr/share/i18n/SUPPORTED || : inst_recursive /usr/share/i18n/charmaps inst_recursive /usr/share/i18n/locales inst_recursive /usr/share/locale/en* inst_recursive /usr/share/locale/de* image_install /usr/share/locale/locale.alias # locale-gen might either generate each locale separately or merge them # into a single archive if ! (inst_recursive /usr/lib/locale/C.*8 /usr/lib/locale/en_*8 || image_install /usr/lib/locale/locale-archive); then dfatal "Failed to install required locales" exit 1 fi else inst_recursive /usr/lib/locale/C.*8 /usr/lib/locale/en_*8 fi } # shellcheck disable=SC2120 install_keymaps() { local i p local -a prefix=( "/usr/lib" "/usr/share" ) dinfo "Install console keymaps" if command -v meson >/dev/null \ && [[ "$(meson configure "${BUILD_DIR:?}" | grep 'split-usr' | awk '{ print $2 }')" == "true" ]] \ || [[ ! -L /lib ]]; then prefix+=( "/lib" ) fi if (( $# == 0 )); then for p in "${prefix[@]}"; do # The first three paths may be deprecated. # It seems now the last three paths are used by many distributions. for i in \ "$p"/kbd/keymaps/include/* \ "$p"/kbd/keymaps/i386/include/* \ "$p"/kbd/keymaps/i386/qwerty/us.* \ "$p"/kbd/keymaps/legacy/include/* \ "$p"/kbd/keymaps/legacy/i386/qwerty/us.* \ "$p"/kbd/keymaps/xkb/us*; do [[ -f "$i" ]] || continue inst "$i" done done else # When it takes any argument, then install more keymaps. for p in "${prefix[@]}"; do for i in \ "$p"/kbd/keymaps/include/* \ "$p"/kbd/keymaps/i386/*/* \ "$p"/kbd/keymaps/legacy/i386/*/* \ "$p"/kbd/keymaps/xkb/*; do [[ -f "$i" ]] || continue inst "$i" done done fi } install_x11_keymaps() { dinfo "Install x11 keymaps" if (( $# == 0 )); then # Install only keymap list. inst /usr/share/X11/xkb/rules/base.lst else # When it takes any argument, then install all keymaps. inst_recursive /usr/share/X11/xkb fi } install_zoneinfo() { dinfo "Install time zones" inst_any /usr/share/zoneinfo/Asia/Seoul inst_any /usr/share/zoneinfo/Asia/Vladivostok inst_any /usr/share/zoneinfo/Australia/Sydney inst_any /usr/share/zoneinfo/Europe/Berlin inst_any /usr/share/zoneinfo/Europe/Dublin inst_any /usr/share/zoneinfo/Europe/Kiev inst_any /usr/share/zoneinfo/Pacific/Auckland inst_any /usr/share/zoneinfo/Pacific/Honolulu inst_any /usr/share/zoneinfo/CET inst_any /usr/share/zoneinfo/EET inst_any /usr/share/zoneinfo/UTC } install_fonts() { dinfo "Install system fonts" for i in \ /usr/lib/kbd/consolefonts/eurlatgr* \ /usr/lib/kbd/consolefonts/latarcyrheb-sun16*; do [[ -f "$i" ]] || continue inst "$i" done } install_terminfo() { dinfo "Install terminfo files" local terminfodir for terminfodir in /lib/terminfo /etc/terminfo /usr/share/terminfo; do [ -f "${terminfodir}/l/linux" ] && break done image_install -o "${terminfodir}/l/linux" } has_user_dbus_socket() { if [ -f /usr/lib/systemd/user/dbus.socket ] || [ -f /etc/systemd/user/dbus.socket ]; then return 0 else echo "Per-user instances are not supported. Skipping..." return 1 fi } setup_nspawn_root_hook() { :;} setup_nspawn_root() { if [ -z "${initdir}" ]; then dfatal "\$initdir not defined" exit 1 fi rm -rf "${TESTDIR:?}/unprivileged-nspawn-root" if get_bool "$RUN_IN_UNPRIVILEGED_CONTAINER"; then ddebug "cp -ar $initdir $TESTDIR/unprivileged-nspawn-root" cp -ar "$initdir" "$TESTDIR/unprivileged-nspawn-root" fi setup_nspawn_root_hook } setup_basic_dirs() { mkdir -p "${initdir:?}/run" mkdir -p "$initdir/etc/systemd/system" mkdir -p "$initdir/var/log/journal" for d in usr/bin usr/sbin bin etc lib "${libdir:?}" sbin tmp usr var var/log var/tmp dev proc sys sysroot root run run/lock run/initramfs; do if [ -L "/$d" ]; then inst_symlink "/$d" else inst_dir "/$d" fi done ln -sfn /run "$initdir/var/run" ln -sfn /run/lock "$initdir/var/lock" } mask_supporting_services() { # mask some services that we do not want to run in these tests ln -fsv /dev/null "${initdir:?}/etc/systemd/system/systemd-hwdb-update.service" ln -fsv /dev/null "$initdir/etc/systemd/system/systemd-journal-catalog-update.service" ln -fsv /dev/null "$initdir/etc/systemd/system/systemd-networkd.service" ln -fsv /dev/null "$initdir/etc/systemd/system/systemd-networkd.socket" ln -fsv /dev/null "$initdir/etc/systemd/system/systemd-resolved.service" } inst_libs() { local bin="${1:?}" local so_regex='([^ ]*/lib[^/]*/[^ ]*\.so[^ ]*)' local file line while read -r line; do [[ "$line" = 'not a dynamic executable' ]] && break # Ignore errors about our own stuff missing. This is most likely caused # by ldd attempting to use the unprefixed RPATH. [[ "$line" =~ libsystemd.*\ not\ found ]] && continue if [[ "$line" =~ $so_regex ]]; then file="${BASH_REMATCH[1]}" [[ -e "${initdir:?}/$file" ]] && continue inst_library "$file" continue fi if [[ "$line" =~ not\ found ]]; then dfatal "Missing a shared library required by $bin." dfatal "Run \"ldd $bin\" to find out what it is." dfatal "$line" dfatal "Cannot create a test image." exit 1 fi done < <(LC_ALL=C ldd "$bin" 2>/dev/null) } import_testdir() { # make sure we don't get a stale LOOPDEV value from old times local _LOOPDEV="${LOOPDEV:=}" # We don't want shellcheck to follow & check the $STATEFILE # shellcheck source=/dev/null [[ -e "$STATEFILE" ]] && . "$STATEFILE" LOOPDEV="$_LOOPDEV" if [[ ! -d "$TESTDIR" ]]; then if [[ -z "$TESTDIR" ]]; then TESTDIR="$(mktemp --tmpdir=/var/tmp -d -t systemd-test.XXXXXX)" else mkdir -p "$TESTDIR" fi cat >"$STATEFILE" < # Prints the normalized path, where it removes any duplicated # and trailing slashes. # Example: # $ normalize_path ///test/test// # /test/test normalize_path() { shopt -q -s extglob set -- "${1//+(\/)//}" shopt -q -u extglob echo "${1%/}" } # convert_abs_rel # Prints the relative path, when creating a symlink to from . # Example: # $ convert_abs_rel /usr/bin/test /bin/test-2 # ../../bin/test-2 # $ ln -s $(convert_abs_rel /usr/bin/test /bin/test-2) /usr/bin/test convert_abs_rel() { local __current __absolute __abssize __cursize __newpath local -i __i __level set -- "$(normalize_path "${1:?}")" "$(normalize_path "${2:?}")" # corner case #1 - self looping link [[ "$1" == "$2" ]] && { echo "${1##*/}"; return; } # corner case #2 - own dir link [[ "${1%/*}" == "$2" ]] && { echo "."; return; } IFS="/" read -ra __current <<< "$1" IFS="/" read -ra __absolute <<< "$2" __abssize=${#__absolute[@]} __cursize=${#__current[@]} while [[ "${__absolute[__level]}" == "${__current[__level]}" ]]; do (( __level++ )) if (( __level > __abssize || __level > __cursize )) then break fi done for ((__i = __level; __i < __cursize-1; __i++)); do if ((__i > __level)) then __newpath=$__newpath"/" fi __newpath=$__newpath".." done for ((__i = __level; __i < __abssize; __i++)); do if [[ -n $__newpath ]] then __newpath=$__newpath"/" fi __newpath=$__newpath${__absolute[__i]} done echo "$__newpath" } # Install a directory, keeping symlinks as on the original system. # Example: if /lib points to /lib64 on the host, "inst_dir /lib/file" # will create ${initdir}/lib64, ${initdir}/lib64/file, # and a symlink ${initdir}/lib -> lib64. inst_dir() { local dir="${1:?}" local part="${dir%/*}" local file [[ -e "${initdir:?}/${dir}" ]] && return 0 # already there while [[ "$part" != "${part%/*}" ]] && ! [[ -e "${initdir}/${part}" ]]; do dir="$part $dir" part="${part%/*}" done # iterate over parent directories for file in $dir; do [[ -e "${initdir}/$file" ]] && continue if [[ -L $file ]]; then inst_symlink "$file" else # create directory mkdir -m 0755 "${initdir}/$file" || return 1 [[ -e "$file" ]] && chmod --reference="$file" "${initdir}/$file" chmod u+w "${initdir}/$file" fi done } # $1 = file to copy to ramdisk # $2 (optional) Name for the file on the ramdisk # Location of the image dir is assumed to be $initdir # We never overwrite the target if it exists. inst_simple() { [[ -f "${1:?}" ]] || return 1 strstr "$1" "/" || return 1 local src="$1" local target="${2:-$1}" if ! [[ -d ${initdir:?}/$target ]]; then [[ -e ${initdir}/$target ]] && return 0 [[ -L ${initdir}/$target ]] && return 0 [[ -d "${initdir}/${target%/*}" ]] || inst_dir "${target%/*}" fi # install checksum files also if [[ -e "${src%/*}/.${src##*/}.hmac" ]]; then inst "${src%/*}/.${src##*/}.hmac" "${target%/*}/.${target##*/}.hmac" fi ddebug "Installing $src" cp --sparse=always -pfL "$src" "${initdir}/$target" } # find symlinks linked to given library file # $1 = library file # Function searches for symlinks by stripping version numbers appended to # library filename, checks if it points to the same target and finally # prints the list of symlinks to stdout. # # Example: # rev_lib_symlinks libfoo.so.8.1 # output: libfoo.so.8 libfoo.so # (Only if libfoo.so.8 and libfoo.so exists on host system.) rev_lib_symlinks() { local fn="${1:?}" local links="" local orig orig="$(readlink -f "$1")" [[ "${fn}" =~ .*\.so\..* ]] || return 1 until [[ "${fn##*.}" == so ]]; do fn="${fn%.*}" [[ -L "${fn}" && "$(readlink -f "${fn}")" == "${orig}" ]] && links+=" ${fn}" done echo "${links}" } # Same as above, but specialized to handle dynamic libraries. # It handles making symlinks according to how the original library # is referenced. inst_library() { local src="${1:?}" local dest="${2:-$1}" local reallib symlink strstr "$1" "/" || return 1 [[ -e ${initdir:?}/$dest ]] && return 0 if [[ -L $src ]]; then # install checksum files also if [[ -e "${src%/*}/.${src##*/}.hmac" ]]; then inst "${src%/*}/.${src##*/}.hmac" "${dest%/*}/.${dest##*/}.hmac" fi reallib="$(readlink -f "$src")" inst_simple "$reallib" "$reallib" inst_dir "${dest%/*}" [[ -d "${dest%/*}" ]] && dest="$(readlink -f "${dest%/*}")/${dest##*/}" ddebug "Creating symlink $reallib -> $dest" ln -sfn -- "$(convert_abs_rel "${dest}" "${reallib}")" "${initdir}/${dest}" else inst_simple "$src" "$dest" fi # Create additional symlinks. See rev_symlinks description. for symlink in $(rev_lib_symlinks "$src") ${reallib:+$(rev_lib_symlinks "$reallib")}; do if [[ ! -e "$initdir/$symlink" ]]; then ddebug "Creating extra symlink: $symlink" inst_symlink "$symlink" fi done } # find a binary. If we were not passed the full path directly, # search in the usual places to find the binary. find_binary() { local bin="${1:?}" if [[ -z ${bin##/*} ]]; then if [[ -x "$bin" ]] || { strstr "$bin" ".so" && ldd "$bin" &>/dev/null; }; then echo "$bin" return 0 fi fi type -P "$bin" } # Same as above, but specialized to install binary executables. # Install binary executable, and all shared library dependencies, if any. inst_binary() { local bin="${1:?}" local path target # In certain cases we might attempt to install a binary which is already # present in the test image, yet it's missing from the host system. # In such cases, let's check if the binary indeed exists in the image # before doing any other checks. If it does, immediately return with # success. if [[ $# -eq 1 ]]; then for path in "" bin sbin usr/bin usr/sbin; do [[ -e "${initdir:?}${path:+/$path}/${bin}" ]] && return 0 done fi bin="$(find_binary "$bin")" || return 1 target="${2:-$bin}" [[ -e "${initdir:?}/$target" ]] && return 0 [[ -L "$bin" ]] && inst_symlink "$bin" "$target" && return 0 local file line local so_regex='([^ ]*/lib[^/]*/[^ ]*\.so[^ ]*)' # DSOs provided by systemd local systemd_so_regex='/(libudev|libsystemd.*|.+[\-_]systemd([\-_].+)?|libnss_(mymachines|myhostname|resolve)).so' local wrap_binary=0 # I love bash! while read -r line; do [[ "$line" = 'not a dynamic executable' ]] && break # Ignore errors about our own stuff missing. This is most likely caused # by ldd attempting to use the unprefixed RPATH. [[ "$line" =~ libsystemd.*\ not\ found ]] && continue # We're built with ASan and the target binary loads one of the systemd's # DSOs, so we need to tweak the environment before executing the binary if get_bool "$IS_BUILT_WITH_ASAN" && [[ "$line" =~ $systemd_so_regex ]]; then wrap_binary=1 fi if [[ "$line" =~ $so_regex ]]; then file="${BASH_REMATCH[1]}" [[ -e "${initdir}/$file" ]] && continue inst_library "$file" continue fi if [[ "$line" =~ not\ found ]]; then dfatal "Missing a shared library required by $bin." dfatal "Run \"ldd $bin\" to find out what it is." dfatal "$line" dfatal "Cannot create a test image." exit 1 fi done < <(LC_ALL=C ldd "$bin" 2>/dev/null) # Same as above, but we need to wrap certain libraries unconditionally # # chown, getent, login, su, useradd, userdel - dlopen()s (not only) systemd's PAM modules # ls, stat - pulls in nss_systemd with certain options (like ls -l) when # nsswitch.conf uses [SUCCESS=merge] (like on Arch Linux) # delv, dig - pulls in nss_resolve if `resolve` is in nsswitch.conf # tar - called by machinectl in TEST-25 if get_bool "$IS_BUILT_WITH_ASAN" && [[ "$bin" =~ /(chown|delv|dig|getent|login|ls|stat|su|tar|useradd|userdel)$ ]]; then wrap_binary=1 fi # If the target binary is built with ASan support, we don't need to wrap # it, as it should handle everything by itself if get_bool "$wrap_binary" && ! is_built_with_asan "$bin"; then dinfo "Creating ASan-compatible wrapper for binary '$target'" # Install the target binary with a ".orig" suffix inst_simple "$bin" "${target}.orig" # Create a simple shell wrapper in place of the target binary, which # sets necessary ASan-related env variables and then exec()s the # suffixed target binary cat >"$initdir/$target" </dev/null || return 1 while read -r item; do if [[ -d "$item" ]]; then inst_dir "$item" elif [[ -f "$item" ]]; then inst_simple "$item" fi done < <(find "$p" 2>/dev/null) done } # image_install [-o ] [ ... ] # Install to the test image # -o optionally install the and don't fail, if it is not there image_install() { local optional=no local prog="${1:?}" if [[ "$prog" = '-o' ]]; then optional=yes shift fi for prog in "$@"; do if ! inst "$prog" ; then if get_bool "$optional"; then dinfo "Skipping program $prog as it cannot be found and is" \ "flagged to be optional" else dfatal "Failed to install $prog" exit 1 fi fi done } # Install a single kernel module along with any firmware it may require. # $1 = full path to kernel module to install install_kmod_with_fw() { local module="${1:?}" # no need to go further if the module is already installed [[ -e "${initdir:?}/lib/modules/${KERNEL_VER:?}/${module##*"/lib/modules/$KERNEL_VER/"}" ]] && return 0 [[ -e "$initdir/.kernelmodseen/${module##*/}" ]] && return 0 [ -d "$initdir/.kernelmodseen" ] && : >"$initdir/.kernelmodseen/${module##*/}" inst_simple "$module" "/lib/modules/$KERNEL_VER/${module##*"/lib/modules/$KERNEL_VER/"}" || return $? local modname="${module##*/}" local fwdir found fw modname="${modname%.ko*}" while read -r fw; do found= for fwdir in /lib/firmware/updates /lib/firmware; do if [[ -d "$fwdir" && -f "$fwdir/$fw" ]]; then inst_simple "$fwdir/$fw" "/lib/firmware/$fw" found=yes fi done if ! get_bool "$found"; then if ! grep -qe "\<${modname//-/_}\>" /proc/modules; then dinfo "Possible missing firmware \"${fw}\" for kernel module" \ "\"${modname}.ko\"" else dwarn "Possible missing firmware \"${fw}\" for kernel module" \ "\"${modname}.ko\"" fi fi done < <(modinfo -k "$KERNEL_VER" -F firmware "$module" 2>/dev/null) return 0 } # Do something with all the dependencies of a kernel module. # Note that kernel modules depend on themselves using the technique we use # $1 = function to call for each dependency we find # It will be passed the full path to the found kernel module # $2 = module to get dependencies for # rest of args = arguments to modprobe for_each_kmod_dep() { local func="${1:?}" local kmod="${2:?}" local found=0 local cmd modpath shift 2 while read -r cmd modpath _; do [[ "$cmd" = insmod ]] || continue "$func" "$modpath" || return $? found=1 done < <(modprobe "$@" --ignore-install --show-depends "$kmod") ! get_bool "$found" && return 1 return 0 } # instmods [-c] [ ... ] # instmods [-c] # install kernel modules along with all their dependencies. # can be e.g. "=block" or "=drivers/usb/storage" # FIXME(?): dracutdevs/dracut@f4e38c0da8d6bf3764c1ad753d9d52aef63050e5 instmods() { local check=no if [[ $# -ge 0 && "$1" = '-c' ]]; then check=yes shift fi inst1mod() { local mod="${1:?}" local ret=0 local mod_dir="/lib/modules/${KERNEL_VER:?}/" case "$mod" in =*) if [ -f "${mod_dir}/modules.${mod#=}" ]; then ( [[ "$mpargs" ]] && echo "$mpargs" cat "${mod_dir}/modules.${mod#=}" ) | instmods else ( [[ "$mpargs" ]] && echo "$mpargs" find "$mod_dir" -path "*/${mod#=}/*" -name "*.ko*" -type f -printf '%f\n' ) | instmods fi ;; --*) mpargs+=" $mod" ;; i2o_scsi) # Do not load this diagnostic-only module return ;; *) mod=${mod##*/} # if we are already installed, skip this module and go on # to the next one. [[ -f "${initdir:?}/.kernelmodseen/${mod%.ko}.ko" ]] && return # We use '-d' option in modprobe only if modules prefix path # differs from default '/'. This allows us to use Dracut with # old version of modprobe which doesn't have '-d' option. local mod_dirname=${mod_dir%%/lib/modules/*} [[ -n ${mod_dirname} ]] && mod_dirname="-d ${mod_dirname}/" # ok, load the module, all its dependencies, and any firmware # it may require for_each_kmod_dep install_kmod_with_fw "$mod" \ --set-version "$KERNEL_VER" \ ${mod_dirname:+"$mod_dirname"} \ ${mpargs:+"$mpargs"} ((ret+=$?)) ;; esac return "$ret" } local mod mpargs if [[ $# -eq 0 ]]; then # filenames from stdin while read -r mod; do if ! inst1mod "${mod%.ko*}" && [ "$check" = "yes" ]; then dfatal "Failed to install $mod" return 1 fi done fi for mod in "$@"; do # filenames as arguments if ! inst1mod "${mod%.ko*}" && [ "$check" = "yes" ]; then dfatal "Failed to install $mod" return 1 fi done return 0 } _umount_dir() { local mountpoint="${1:?}" if mountpoint -q "$mountpoint"; then ddebug "umount $mountpoint" umount "$mountpoint" fi } # can be overridden in specific test test_setup_cleanup() { cleanup_initdir } _test_cleanup() { # (post-test) cleanup should always ignore failure and cleanup as much as possible ( set +e [[ -n "$initdir" ]] && _umount_dir "$initdir" [[ -n "$IMAGE_PUBLIC" ]] && rm -vf "$IMAGE_PUBLIC" # If multiple setups/cleans are ran in parallel, this can cause a race if [[ -n "$IMAGESTATEDIR" && $TEST_PARALLELIZE -ne 1 ]]; then rm -vf "${IMAGESTATEDIR}/default.img" fi [[ -n "$TESTDIR" ]] && rm -vfr "$TESTDIR" [[ -n "$STATEFILE" ]] && rm -vf "$STATEFILE" ) || : } # can be overridden in specific test test_cleanup() { _test_cleanup } test_cleanup_again() { [ -n "$TESTDIR" ] || return rm -rf "$TESTDIR/unprivileged-nspawn-root" [[ -n "$initdir" ]] && _umount_dir "$initdir" } test_create_image() { create_empty_image_rootdir # Create what will eventually be our root filesystem onto an overlay ( LOG_LEVEL=5 setup_basic_environment ) } test_setup() { if get_bool "${TEST_REQUIRE_INSTALL_TESTS:?}" && \ command -v meson >/dev/null && \ [[ "$(meson configure "${BUILD_DIR:?}" | grep install-tests | awk '{ print $2 }')" != "true" ]]; then dfatal "$BUILD_DIR needs to be built with -Dinstall-tests=true" exit 1 fi if [ -e "${IMAGE_PRIVATE:?}" ]; then echo "Reusing existing image $IMAGE_PRIVATE → $(realpath "$IMAGE_PRIVATE")" mount_initdir else if [ ! -e "${IMAGE_PUBLIC:?}" ]; then # default.img is the base that every test uses and optionally appends to if [ ! -e "${IMAGESTATEDIR:?}/default.img" ] || [ -n "${TEST_FORCE_NEWIMAGE:=}" ]; then # Create the backing public image, but then completely unmount # it and drop the loopback device responsible for it, since we're # going to symlink/copy the image and mount it again from # elsewhere. local image_old="${IMAGE_PUBLIC}" if [ -z "${TEST_FORCE_NEWIMAGE}" ]; then IMAGE_PUBLIC="${IMAGESTATEDIR}/default.img" fi test_create_image test_setup_cleanup umount_loopback cleanup_loopdev IMAGE_PUBLIC="${image_old}" fi if [ "${IMAGE_NAME:?}" != "default" ] && ! get_bool "${TEST_FORCE_NEWIMAGE}"; then cp -v "$(realpath "${IMAGESTATEDIR}/default.img")" "$IMAGE_PUBLIC" fi fi local hook_defined declare -f -F test_append_files >/dev/null && hook_defined=yes || hook_defined=no echo "Reusing existing cached image $IMAGE_PUBLIC → $(realpath "$IMAGE_PUBLIC")" if get_bool "$TEST_PARALLELIZE" || get_bool "$hook_defined"; then cp -v -- "$(realpath "$IMAGE_PUBLIC")" "$IMAGE_PRIVATE" else ln -sv -- "$(realpath "$IMAGE_PUBLIC")" "$IMAGE_PRIVATE" fi mount_initdir if get_bool "${TEST_SUPPORTING_SERVICES_SHOULD_BE_MASKED}"; then dinfo "Masking supporting services" mask_supporting_services fi # Send stdout/stderr of testsuite-*.service units to both journal and # console to make debugging in CIs easier # Note: we can't use a dropin for `testsuite-.service`, since that also # overrides 'sub-units' of some tests that already use a specific # value for Standard(Output|Error)= # (e.g. test/units/testsuite-66-deviceisolation.service) if ! get_bool "$INTERACTIVE_DEBUG"; then local dropin_dir="${initdir:?}/etc/systemd/system/testsuite-${TESTID:?}.service.d" mkdir -p "$dropin_dir" printf '[Service]\nStandardOutput=journal+console\nStandardError=journal+console' >"$dropin_dir/99-stdout.conf" fi if get_bool "$hook_defined"; then test_append_files "${initdir:?}" fi fi setup_nspawn_root } test_run() { local test_id="${1:?}" mount_initdir if ! get_bool "${TEST_NO_QEMU:=}"; then if run_qemu "$test_id"; then check_result_qemu || { echo "qemu test failed"; return 1; } else dwarn "can't run qemu, skipping" fi fi if ! get_bool "${TEST_NO_NSPAWN:=}"; then mount_initdir if run_nspawn "${initdir:?}" "$test_id"; then check_result_nspawn "$initdir" || { echo "nspawn-root test failed"; return 1; } else dwarn "can't run systemd-nspawn, skipping" fi if get_bool "${RUN_IN_UNPRIVILEGED_CONTAINER:=}"; then dir="$TESTDIR/unprivileged-nspawn-root" if NSPAWN_ARGUMENTS="-U --private-network ${NSPAWN_ARGUMENTS:-}" run_nspawn "$dir" "$test_id"; then check_result_nspawn "$dir" || { echo "unprivileged-nspawn-root test failed"; return 1; } else dwarn "can't run systemd-nspawn, skipping" fi fi fi return 0 } do_test() { if [[ $UID != "0" ]]; then echo "TEST: $TEST_DESCRIPTION [SKIPPED]: not root" >&2 exit 0 fi if get_bool "${TEST_NO_QEMU:=}" && get_bool "${TEST_NO_NSPAWN:=}"; then echo "TEST: $TEST_DESCRIPTION [SKIPPED]: both qemu and nspawn disabled" >&2 exit 0 fi if get_bool "${TEST_QEMU_ONLY:=}" && ! get_bool "$TEST_NO_NSPAWN"; then echo "TEST: $TEST_DESCRIPTION [SKIPPED]: qemu-only tests requested" >&2 exit 0 fi if get_bool "${TEST_PREFER_NSPAWN:=}" && ! get_bool "$TEST_NO_NSPAWN"; then TEST_NO_QEMU=1 fi # Detect lib paths [[ "$libdir" ]] || for libdir in /lib64 /lib; do [[ -d $libdir ]] && libdirs+=" $libdir" && break done [[ "$usrlibdir" ]] || for usrlibdir in /usr/lib64 /usr/lib; do [[ -d $usrlibdir ]] && libdirs+=" $usrlibdir" && break done mkdir -p "$STATEDIR" import_testdir import_initdir if [ -n "${SUDO_USER}" ]; then ddebug "Making ${TESTDIR:?} readable for ${SUDO_USER} (acquired from sudo)" setfacl -m "user:${SUDO_USER:?}:r-X" "${TESTDIR:?}" fi testname="$(basename "$PWD")" while (($# > 0)); do case $1 in --run) echo "${testname} RUN: $TEST_DESCRIPTION" test_run "$TESTID" ret=$? if [ $ret -eq 0 ]; then echo "${testname} RUN: $TEST_DESCRIPTION [OK]" else echo "${testname} RUN: $TEST_DESCRIPTION [FAILED]" fi exit $ret ;; --setup) echo "${testname} SETUP: $TEST_DESCRIPTION" test_setup test_setup_cleanup ;; --clean) echo "${testname} CLEANUP: $TEST_DESCRIPTION" test_cleanup ;; --clean-again) echo "${testname} CLEANUP AGAIN: $TEST_DESCRIPTION" test_cleanup_again ;; --all) ret=0 echo -n "${testname}: $TEST_DESCRIPTION " # Do not use a subshell, otherwise cleanup variables (LOOPDEV) will be lost # and loop devices will leak test_setup "$TESTLOG" 2>&1 || ret=$? if [ $ret -eq 0 ]; then test_setup_cleanup >"$TESTLOG" 2>&1 || ret=$? fi if [ $ret -eq 0 ]; then test_run "$TESTID" >"$TESTLOG" 2>&1 || ret=$? fi test_cleanup if [ $ret -eq 0 ]; then rm "$TESTLOG" echo "[OK]" else echo "[FAILED]" echo "see $TESTLOG" fi exit $ret ;; *) break ;; esac shift done }