# Source library for installed virtualized shell script tests # # Copyright (C) 2016 Jonathan Lebon # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the # Free Software Foundation, Inc., 59 Temple Place - Suite 330, # Boston, MA 02111-1307, USA. if test -z "${LIBTEST_SH:-}"; then . ${commondir}/libtest.sh fi # prepares the VM and library for action vm_setup() { export VM=${VM:-vmcheck} export SSH_CONFIG=${SSH_CONFIG:-${topsrcdir}/ssh-config} SSHOPTS="-o User=root -o ControlMaster=auto \ -o ControlPath=/dev/shm/ssh-$VM-$(date +%s%N).sock \ -o ControlPersist=yes" # If we're provided with an ssh-config, make sure we tell # ssh to pick it up. if [ -f "${SSH_CONFIG}" ]; then SSHOPTS="${SSHOPTS} -F ${SSH_CONFIG}" fi export SSHOPTS export SSH="ssh ${SSHOPTS} $VM" export SCP="scp ${SSHOPTS}" } # prepares a fresh VM for action via `kola spawn` vm_kola_spawn() { local outputdir=$1; shift exec 4> info.json mkdir kola-ssh local test_image if test -d ${topsrcdir}/.cosa; then test_image=$(echo ${topsrcdir}/.cosa/*.qcow2) else if test -n "${COSA_DIR:-}"; then test_image=$(cd ${COSA_DIR} && cosa meta --image-path qemu) fi fi if test -z "${test_image}"; then fatal "failed to find .cosa/*.qcow2 or COSA_DIR" fi if [ ! -e "$test_image" ]; then if [ -L "$test_image" ]; then echo "$test_image is an invalid symlink" >&3 else echo "Cannot find $test_image" >&3 fi exit 1 fi setpriv --pdeathsig SIGKILL -- \ env MANTLE_SSH_DIR="$PWD/kola-ssh" kola spawn -p qemu-unpriv \ --qemu-image "$test_image" -v --idle \ --json-info-fd 4 --output-dir "$outputdir" & # hack; need cleaner API for async kola spawn while [ ! -s info.json ]; do sleep 1; done local ssh_ip_port ssh_ip ssh_port ssh_ip_port=$(jq -r .public_ip info.json) ssh_ip=${ssh_ip_port%:*} ssh_port=${ssh_ip_port#*:} cat > ssh-config < ${f} vm_send ${f} ${1} rm -f ${f} } # Takes on its stdin a shell script to run on the node. The special positional # argument `sysroot-rw` indicates that the script needs rw access to /sysroot. vm_shell_inline() { local script=$(mktemp -p $PWD) echo "set -xeuo pipefail" > ${script} if [ "${1:-}" = 'sysroot-rw' ]; then cat >> ${script} <> ${script} vm_send ${script} /tmp/$(basename ${script}) rm -f ${script} vm_cmd bash /tmp/$(basename ${script}) } # Shorthand for `vm_shell_inline sysroot-rw`. vm_shell_inline_sysroot_rw() { vm_shell_inline sysroot-rw } # Like `vm_cmd`, but for commands which need rw access to /sysroot vm_cmd_sysroot_rw() { vm_shell_inline_sysroot_rw <<< "$@" } # rsync wrapper that sets up authentication vm_raw_rsync() { local rsyncopts="ssh -o User=root" if [ -f "${SSH_CONFIG}" ]; then rsyncopts="$rsyncopts -F '${SSH_CONFIG}'" fi rsync -az --no-owner --no-group -e "$rsyncopts" "$@" } vm_rsync() { if ! test -f .vagrant/using_sshfs; then pushd ${topsrcdir} vm_raw_rsync --delete --exclude target/ --exclude .git/ . $VM:/var/roothome/sync popd fi } # run command in vm as user # - $1 username # - $@ command to run vm_cmd_as() { local user=$1; shift # don't reuse root's ControlPath local sshopts="-o User=$user" if [ -f "${SSH_CONFIG}" ]; then sshopts="$sshopts -F ${SSH_CONFIG}" fi ssh $sshopts $VM "$@" } # run command in vm # - $@ command to run vm_cmd() { $SSH "$@" } # Delete anything which we might change between runs vm_clean_caches() { vm_cmd rm /ostree/repo/refs/heads/rpmostree/pkg/* -rf } # run rpm-ostree in vm # - $@ args vm_rpmostree() { vm_cmd env ASAN_OPTIONS=detect_leaks=false rpm-ostree "$@" } # copy the test repo to the vm # $1 - repo file mode: nogpgcheck (default), gpgcheck, skip (don't send) vm_send_test_repo() { mode=${1:-nogpgcheck} # note we use -c here because we might be called twice within a second vm_raw_rsync -c --delete ${test_tmpdir}/yumrepo $VM:/var/tmp/vmcheck if [[ $mode == skip ]]; then return fi cat > vmcheck.repo << EOF [test-repo] name=test-repo baseurl=file:///var/tmp/vmcheck/yumrepo EOF if [[ $mode == gpgcheck ]]; then cat >> vmcheck.repo <> vmcheck.repo fi vm_send vmcheck.repo /etc/yum.repos.d } # wait until ssh is available on the vm # - $1 timeout in second (optional) # - $2 previous bootid (optional) vm_ssh_wait() { local timeout=${1:-0}; shift local old_bootid=${1:-}; shift if ! vm_cmd true; then echo "Failed to log into VM, retrying with debug:" $SSH -o LogLevel=debug true || true fi while [ $timeout -gt 0 ]; do if bootid=$(vm_get_boot_id 2>/dev/null); then if [[ $bootid != $old_bootid ]]; then # if this is a reboot, display some info about new boot if [ -n "$old_bootid" ]; then vm_rpmostree status vm_rpmostree --version fi return 0 fi fi if test $(($timeout % 5)) == 0; then echo "Still failed to log into VM, retrying for $timeout seconds" fi timeout=$((timeout - 1)) sleep 1 done false "Timed out while waiting for SSH." } vm_get_boot_id() { vm_cmd cat /proc/sys/kernel/random/boot_id } # Run a command in the VM that will cause a reboot vm_reboot_cmd() { vm_cmd sync local bootid=$(vm_get_boot_id 2>/dev/null) vm_cmd "$@" || : vm_ssh_wait 120 $bootid } # reboot the vm vm_reboot() { vm_reboot_cmd systemctl reboot } # check that the given files/dirs exist on the VM # - $@ files/dirs to check for vm_has_files() { for file in "$@"; do if ! vm_cmd test -e $file; then return 1 fi done } # check that the packages are installed # - $@ packages to check for vm_has_packages() { for pkg in "$@"; do if ! vm_cmd rpm -q $pkg; then return 1 fi done } # retrieve info from a deployment # - $1 index of deployment (or -1 for booted) # - $2 key to retrieve vm_get_deployment_info() { local idx=$1 local key=$2 vm_rpmostree status --json | \ python3 -c " import sys, json deployments = json.load(sys.stdin)[\"deployments\"] idx = $idx if idx < 0: for i, depl in enumerate(deployments): if depl[\"booted\"]: idx = i if idx < 0: print(\"Failed to determine currently booted deployment\") exit(1) if idx >= len(deployments): print(\"Deployment index $idx is out of range\") exit(1) depl = deployments[idx] if \"$key\" in depl: data = depl[\"$key\"] if type(data) is list: print(\" \".join(data)) else: print(data) " } # retrieve the deployment root # - $1 index of deployment vm_get_deployment_root() { local idx=$1 local csum=$(vm_get_deployment_info $idx checksum) local serial=$(vm_get_deployment_info $idx serial) local osname=$(vm_get_deployment_info $idx osname) echo /ostree/deploy/$osname/deploy/$csum.$serial } # retrieve info from the booted deployment # - $1 key to retrieve vm_get_booted_deployment_info() { vm_get_deployment_info -1 $1 } # print the layered packages vm_get_layered_packages() { vm_get_booted_deployment_info packages } # print the requested packages vm_get_requested_packages() { vm_get_booted_deployment_info requested-packages } vm_get_local_packages() { vm_get_booted_deployment_info requested-local-packages } # check that the packages are currently layered # - $@ packages to check for vm_has_layered_packages() { local pkgs=$(vm_get_layered_packages) for pkg in "$@"; do if [[ " $pkgs " != *$pkg* ]]; then return 1 fi done } # check that the packages are currently requested # - $@ packages to check for vm_has_requested_packages() { local pkgs=$(vm_get_requested_packages) for pkg in "$@"; do if [[ " $pkgs " != *$pkg* ]]; then return 1 fi done } vm_has_local_packages() { local pkgs=$(vm_get_local_packages) for pkg in "$@"; do if [[ " $pkgs " != *$pkg* ]]; then return 1 fi done } vm_has_dormant_packages() { vm_has_requested_packages "$@" && \ ! vm_has_layered_packages "$@" } vm_get_booted_stateroot() { vm_get_booted_deployment_info osname } # retrieve the checksum of the currently booted deployment vm_get_booted_csum() { vm_get_booted_deployment_info checksum } # retrieve the checksum of the pending deployment vm_get_pending_csum() { vm_get_deployment_info 0 checksum } # make multiple consistency checks on a test pkg # - $1 package to check for # - $2 either "present" or "absent" vm_assert_layered_pkg() { local pkg=$1; shift local policy=$1; shift set +e vm_has_packages $pkg; pkg_in_rpmdb=$? vm_has_layered_packages $pkg; pkg_is_layered=$? vm_has_local_packages $pkg; pkg_is_layered_local=$? vm_has_requested_packages $pkg; pkg_is_requested=$? [ $pkg_in_rpmdb == 0 ] && \ ( ( [ $pkg_is_layered == 0 ] && [ $pkg_is_requested == 0 ] ) || [ $pkg_is_layered_local == 0 ] ); pkg_present=$? [ $pkg_in_rpmdb != 0 ] && \ [ $pkg_is_layered != 0 ] && \ [ $pkg_is_layered_local != 0 ] && \ [ $pkg_is_requested != 0 ]; pkg_absent=$? set -e if [ $policy == present ] && [ $pkg_present != 0 ]; then vm_cmd rpm-ostree status assert_not_reached "pkg $pkg is not present" fi if [ $policy == absent ] && [ $pkg_absent != 0 ]; then vm_cmd rpm-ostree status assert_not_reached "pkg $pkg is not absent" fi } # Takes a list of `jq` expressions, each of which should evaluate to a boolean, # and asserts that they are true. vm_assert_status_jq() { vm_rpmostree status --json > status.json assert_jq status.json "$@" } vm_pending_is_staged() { vm_rpmostree status --json > status-staged.json local rc=1 if jq -e ".deployments[0][\"staged\"]" < status-staged.json; then rc=0 fi rm -f status-staged.json return $rc } # Like build_rpm, but also sends it to the VM vm_build_rpm() { build_rpm "$@" vm_send_test_repo } # Like uinfo_cmd, but also sends it to the VM vm_uinfo() { uinfo_cmd "$@" vm_send_test_repo } # Like vm_build_rpm but takes a yumrepo mode vm_build_rpm_repo_mode() { mode=$1; shift build_rpm "$@" vm_send_test_repo $mode } vm_build_selinux_rpm() { build_selinux_rpm "$@" vm_send_test_repo } vm_get_journal_cursor() { vm_cmd journalctl -o json -n 1 | jq -r '.["__CURSOR"]' } # Wait for a message logged after $cursor matching a regexp to appear # $1 - cursor # $2 - regex to wait for vm_wait_content_after_cursor() { from_cursor=$1; shift regex=$1; shift vm_shell_inline < \${tmpf} if grep -q -e "${regex}" \${tmpf}; then exit 0 else cat \${tmpf} sleep 1 fi done echo "timed out after 60s" 1>&2 journalctl -u rpm-ostreed --after-cursor "${from_cursor}" | tail -100 exit 1 EOF } # Minor helper that makes sure to get quoting right vm_get_journal_after_cursor() { from_cursor=$1; shift to_file=$1; shift # add an extra helping of quotes for hungry ssh vm_cmd journalctl --after-cursor "'$from_cursor'" > $to_file } vm_assert_journal_has_content() { from_cursor=$1; shift vm_get_journal_after_cursor $from_cursor tmp-journal.txt assert_file_has_content tmp-journal.txt "$@" rm -f tmp-journal.txt } # usage: -- vm_run_container() { local podman_args= while [ $# -ne 0 ]; do local arg=$1; shift if [[ $arg == -- ]]; then break fi podman_args="$podman_args $arg" done [ $# -ne 0 ] || fatal "No container args provided" # just automatically always share dnf cache so we don't redownload each time # (use -n so this ssh invocation doesn't consume stdin) vm_cmd -n mkdir -p /var/cache/dnf vm_cmd podman run --rm -v /var/cache/dnf:/var/cache/dnf:z $podman_args \ quay.io/fedora/fedora:32-x86_64 "$@" } # $1 - service name # $2 - dir to serve # $3 - port to serve on vm_start_httpd() { local name=$1; shift local dir=$1; shift local port=$1; shift vm_cmd podman rm -f $name || true vm_run_container --net=host -d --name $name --privileged \ -v $dir:/srv --workdir /srv -- \ python3 -m http.server $port # NB: the EXIT trap is used by libtest, but not the ERR trap trap "vm_stop_httpd $name" ERR set -E # inherit trap # Ideally systemd-run would support .socket units or something vm_cmd 'while ! curl --head http://127.0.0.1:8888 &>/dev/null; do sleep 1; done' } # $1 - service name vm_stop_httpd() { local name=$1; shift vm_cmd podman rm -f $name set +E trap - ERR } # start up an ostree server to be used as an http remote vm_ostreeupdate_prepare_repo() { # Really testing this like a user requires a remote ostree server setup. # Let's start by setting up the repo. REMOTE_OSTREE=/ostree/repo/tmp/vmcheck-remote vm_shell_inline_sysroot_rw < status.txt assert_file_has_content_literal status.txt 'AutomaticUpdates: disabled' # start it up again since we rebooted vm_start_httpd ostree_server $REMOTE_OSTREE 8888 } vm_change_update_policy() { policy=$1; shift vm_shell_inline < /etc/rpm-ostreed.conf rpm-ostree reload EOF } # APIs to build up a history on the server. Rather than wasting time # composing trees for real, we just use client package layering to create new # trees that we then "lift" into the server before cleaning them up client-side. # steal a commit from the system repo and tag it as a new version vm_ostreeupdate_lift_commit() { checksum=$1; shift # ostree doesn't support tags, so just shove it in a branch branch=vmcheck_tmp/$1; shift vm_cmd ostree pull-local --repo=$REMOTE_OSTREE --disable-fsync \ /ostree/repo $checksum vm_cmd ostree --repo=$REMOTE_OSTREE refs $branch --delete vm_cmd ostree --repo=$REMOTE_OSTREE refs $checksum --create=$branch } _commit_and_inject_pkglist() { local version=$1; shift local src_ref=$1; shift # Small percentage by default here; unshare to create a new mount namespace to make /sysroot writable vm_cmd unshare -m rpm-ostree testutils generate-synthetic-upgrade --percentage=5 --repo=$REMOTE_OSTREE --ref=vmcheck \ --srcref=$src_ref --commit-version=$version vm_cmd_sysroot_rw rpm-ostree testutils inject-pkglist $REMOTE_OSTREE vmcheck } # use a previously stolen commit to create an update on our vmcheck branch, # complete with version string and pkglist metadata vm_ostreeupdate_create() { version=$1; shift _commit_and_inject_pkglist $version vmcheck_tmp/$version } # create a new no-op update with version metadata $1 vm_ostreeupdate_create_noop() { version=$1; shift _commit_and_inject_pkglist $version vmcheck } # takes a layered commit, and makes it into a base vm_ostree_repo_commit_layered_as_base() { local repo=$1; shift local from_rev=$1; shift local to_ref=$1; shift local d=$repo/tmp/vmcheck_commit.tmp rm -rf $d vm_shell_inline_sysroot_rw </dev/null EOF } vm_ostree_commit_layered_as_base() { vm_ostree_repo_commit_layered_as_base /ostree/repo "$@" } vm_status_watch_start() { rm -rf status-watch.txt while sleep 1; do vm_rpmostree status >> status-watch.txt done & _status_watch_pid=$! # NB: the EXIT trap is used by libtest, but not the ERR trap trap "kill $_status_watch_pid" ERR set -E # inherit trap } vm_status_watch_check() { [ -n "${_status_watch_pid:-}" ] kill $_status_watch_pid _status_watch_pid= set +E [ -f status-watch.txt ] assert_file_has_content_literal status-watch.txt "$@" rm -rf status-watch.txt }