Rework vmcheck to use kola spawn, move off of PAPR

There's a lot going on here, but essentially:

1. We change the `vmcheck` model so that it always operates on an
   immutable base image. It takes that image and dynamically launches a
   separate VM for each test using `kola spawn`. This means we can drop
   a lot of hacks around re-using the same VMs.
2. Following from 1., `vmoverlay` now takes as input a base image,
   overlays the built rpm-ostree bits, then creates a new base image. Of
   course, we don't have to do this in CI, because we build FCOS with
   the freshly built RPMs (so it uses `SKIP_VMOVERLAY=1`). `vmoverlay`
   then will be more for the developer case where one doesn't want to
   iterate via `cosa build` to test rpm-ostree changes. I say "will"
   because the functionality doesn't exist yet; I'd like to enhance
   `cosa dev-overlay` to do this. (Note `vmsync` should still works just
   as before too.)
3. `vmcheck` can be run without building the tree first, as
   `tests/vmcheck.sh`. The `make vmcheck` target still exists though for
   finger compatibility and better meshing with `vmoverlay` in the
   developer case.

What's really nice about using kola spawn is that it takes care of a lot
of things for us, such as the qemu command, journal and console
gathering, and SSH.

Similarly to the compose testsuites, we're using parallel here to run
multiple vmcheck tests at once. (On developer laptops, we cap
parallelism at `$(nproc) - 1`).
This commit is contained in:
Jonathan Lebon 2019-12-13 12:23:41 -05:00 committed by OpenShift Merge Robot
parent d2a4372d4d
commit c7a9c3b1dd
15 changed files with 250 additions and 581 deletions

View File

@ -77,41 +77,38 @@ stage("Build FCOS") {
}
}
/*
stage("Test") {
parallel vmcheck: {
coreos.pod(image: 'quay.io/coreos-assembler/coreos-assembler:latest', runAsUser: 0, kvm: true) {
stage("Run vmcheck") {
def nhosts = 6
def mem = (nhosts * 1024) + 512
coreos.pod(image: COSA_IMAGE, runAsUser: 0, kvm: true, memory: "${mem}Mi", cpu: "${nhosts}") {
checkout scm
unstash 'rpms'
sh """
set -euo pipefail
ci/installdeps.sh # really, we just need test deps, but meh...
# install our built rpm-ostree
find packaging/ ! -name '*.src.rpm' -name '*.rpm' | xargs dnf install -y
rm -rf packaging
"""
unstash 'fcos'
sh """
set -euo pipefail
echo "standing up VMs"
find builds/ -name '*.qcow2'
"""
try {
timeout(time: 30, unit: 'MINUTES') {
sh """
set -xeuo pipefail
fcos=\$(ls builds/latest/*/*.qcow2) # */
ln -sf "\$(realpath \${fcos})" tests/vmcheck/image.qcow2
NHOSTS=${nhosts} tests/vmcheck.sh
"""
}
} finally {
sh """
if [ -d vmcheck-logs ]; then
tar -C vmcheck-logs -cf- . | xz -c9 > vmcheck-logs.tar.xz
fi
"""
archiveArtifacts allowEmptyArchive: true, artifacts: 'vmcheck-logs.tar.xz'
}
}
},
compose: {
coreos.pod(image: 'quay.io/coreos-assembler/coreos-assembler:latest', runAsUser: 0, kvm: true) {
checkout scm
unstash 'rpms'
sh """
set -euo pipefail
# install our built rpm-ostree
find packaging/ ! -name '*.src.rpm' -name '*.rpm' | xargs dnf install -y
rm -rf packaging
echo "starting compose tests in supermin"
"""
}
}}
*/
}

View File

@ -1,37 +1,7 @@
context: f29-primary
cluster:
hosts:
- name: vmcheck1
distro: fedora/29/atomic
- name: vmcheck2
distro: fedora/29/atomic
- name: vmcheck3
distro: fedora/29/atomic
container:
image: registry.fedoraproject.org/fedora:29
env:
HOSTS: vmcheck1 vmcheck2 vmcheck3
# TODO use -fsanitize=address
CFLAGS: '-fsanitize=undefined -fsanitize-undefined-trap-on-error -O2 -Wp,-D_FORTIFY_SOURCE=2'
ASAN_OPTIONS: 'detect_leaks=0' # Right now we're not fully clean, but this gets us use-after-free etc
tests:
- ci/build-check.sh
- ci/vmcheck-provision.sh
- make vmcheck
# make sure we're aware of any tests that were skipped
- "grep -nr '^SKIP: ' vmcheck/ || :"
timeout: 60m
artifacts:
- test-suite.log
- config.log
- vmcheck
---
branches:
- master
- auto
- try
# NB: when bumping 29 here, also bump compose script

View File

@ -18,7 +18,7 @@ if BUILDOPT_ASAN
AM_TESTS_ENVIRONMENT += BUILDOPT_ASAN=yes ASAN_OPTIONS=detect_leaks=false
endif
GITIGNOREFILES += ssh-config ansible-inventory.yml vmcheck/ test-compose-logs/
GITIGNOREFILES += ssh-config ansible-inventory.yml vmcheck-logs/ test-compose-logs/ tests/vmcheck/image.qcow2
testbin_cppflags = $(AM_CPPFLAGS) -I $(srcdir)/src/lib -I $(srcdir)/src/libpriv -I $(srcdir)/libglnx -I $(srcdir)/tests/common
testbin_cflags = $(AM_CFLAGS) -fvisibility=hidden $(PKGDEP_RPMOSTREE_CFLAGS)
@ -74,8 +74,6 @@ check-local:
.PHONY: vmsync vmoverlay vmcheck testenv
HOSTS ?= "vmcheck"
vmsync:
@set -e; if [ -z "$(SKIP_INSTALL)" ]; then \
env $(BASE_TESTS_ENVIRONMENT) ./tests/vmcheck/install.sh; \
@ -83,19 +81,16 @@ vmsync:
env $(BASE_TESTS_ENVIRONMENT) ./tests/vmcheck/sync.sh
vmoverlay:
@set -e; if [ -z "$(SKIP_VMOVERLAY)" ]; then \
if [ -z "$(SKIP_INSTALL)" ]; then \
env $(BASE_TESTS_ENVIRONMENT) ./tests/vmcheck/install.sh; \
fi; \
echo -n "$(HOSTS)" | xargs -P 0 -n 1 -d ' ' -I {} \
env $(BASE_TESTS_ENVIRONMENT) VM={} \
./tests/vmcheck/overlay.sh; \
fi
@set -e; \
if [ -z "$(SKIP_INSTALL)" ] && [ -z "$(SKIP_VMOVERLAY)" ]; then \
env $(BASE_TESTS_ENVIRONMENT) ./tests/vmcheck/install.sh; \
fi; \
env $(BASE_TESTS_ENVIRONMENT) ./tests/vmcheck/overlay.sh;
# set up test environment to somewhat resemble uninstalled tests
# One can run the vmcheck.sh script directly. The make target is useful for local
# development so that e.g. we automatically overlay.
vmcheck: vmoverlay
@env VMTESTS=1 $(BASE_TESTS_ENVIRONMENT) PYTHONUNBUFFERED=1 \
tests/vmcheck/multitest.py $(HOSTS)
@tests/vmcheck.sh
testenv:
@echo "===== ENTERING TESTENV ====="

View File

@ -52,7 +52,9 @@ _cleanup_tmpdir () {
# Create a tmpdir if we're running as a local test (i.e. through `make check`)
# or as a `vmcheck` test, which also needs some scratch space on the host.
if ( test -n "${UNINSTALLEDTESTS:-}" || test -n "${VMTESTS:-}" ) && ! test -f $PWD/.test; then
test_tmpdir=$(mktemp -d test.XXXXXX)
# Use --tmpdir to keep it in /tmp. This also keeps paths short; this is
# important if we want to create UNIX sockets under there.
test_tmpdir=$(mktemp -d test.XXXXXX --tmpdir)
touch ${test_tmpdir}/.test
trap _cleanup_tmpdir EXIT
cd ${test_tmpdir}

View File

@ -19,7 +19,6 @@
# prepares the VM and library for action
vm_setup() {
export VM=${VM:-vmcheck}
export SSH_CONFIG=${SSH_CONFIG:-${topsrcdir}/ssh-config}
SSHOPTS="-o User=root -o ControlMaster=auto \
@ -37,6 +36,62 @@ vm_setup() {
export SCP="scp ${SSHOPTS}"
}
# prepares a fresh VM for action via `kola spawn`
vm_kola_spawn() {
local outputdir=$1; shift
exec 4> info.json
mkdir kola-ssh
setpriv --pdeathsig SIGKILL -- \
env MANTLE_SSH_DIR="$PWD/kola-ssh" kola spawn -p qemu-unpriv \
--qemu-image "${topsrcdir}/tests/vmcheck/image.qcow2" -v --idle \
--json-info-fd 4 --output-dir "$outputdir" &
# hack; need cleaner API for async kola spawn
while [ ! -s info.json ]; do sleep 1; done
local ssh_ip_port ssh_ip ssh_port
ssh_ip_port=$(jq -r .public_ip info.json)
ssh_ip=${ssh_ip_port%:*}
ssh_port=${ssh_ip_port#*:}
cat > ssh-config <<EOF
Host vmcheck
HostName ${ssh_ip}
Port ${ssh_port}
StrictHostKeyChecking no
UserKnownHostsFile /dev/null
EOF
SSH_CONFIG=$PWD/ssh-config
# XXX: should just have kola output the path to the socket
SSH_AUTH_SOCK=$(ls kola-ssh/agent.*)
export SSH_CONFIG SSH_AUTH_SOCK
# Hack around kola's Ignition config only setting up the core user; but we
# want to be able to ssh directly as root. We still want all the other goodies
# that kola injects in its Ignition config though, so we don't want to
# override it. `cosa run`'s merge semantics would do nicely.
ssh -o User=core -F "${SSH_CONFIG}" vmcheck 'sudo cp -RT {/home/core,/root}/.ssh'
vm_setup
# XXX: hack around https://github.com/systemd/systemd/issues/14328
vm_cmd systemctl mask --now systemd-logind
# Some tests expect the ref to be on `vmcheck`. We should drop that
# requirement, but for now let's just mangle the origin
local deployment_root
vm_cmd ostree refs --create vmcheck "$(vm_get_booted_csum)"
deployment_root=$(vm_get_deployment_root 0)
vm_cmd "sed -ie '/^refspec=/ s/=.*/=vmcheck/' ${deployment_root}.origin"
vm_cmd "sed -ie '/^baserefspec=/ s/=.*/=vmcheck/' ${deployment_root}.origin"
vm_cmd systemctl try-restart rpm-ostreed
# also move the default yum repos, we don't want em
vm_cmd mv /etc/yum.repos.d{,.bak}
vm_cmd mkdir /etc/yum.repos.d
}
# $1 - file to send
# $2 - destination path
vm_send() {
@ -176,7 +231,7 @@ vm_get_boot_id() {
vm_reboot_cmd() {
vm_cmd sync
local bootid=$(vm_get_boot_id 2>/dev/null)
vm_cmd $@ || :
vm_cmd "$@" || :
vm_ssh_wait 120 $bootid
}

60
tests/vmcheck.sh Executable file
View File

@ -0,0 +1,60 @@
#!/bin/bash
set -euo pipefail
dn=$(cd "$(dirname "$0")" && pwd)
topsrcdir=$(cd "$dn/.." && pwd)
commondir=$(cd "$dn/common" && pwd)
export topsrcdir commondir
# https://github.com/coreos/coreos-assembler/pull/632
ncpus() {
if ! grep -q kubepods /proc/1/cgroup; then
# this might be a developer laptop; leave one cpu free to be nice
echo $(($(nproc) - 1))
return 0
fi
quota=$(cat /sys/fs/cgroup/cpu/cpu.cfs_quota_us)
period=$(cat /sys/fs/cgroup/cpu/cpu.cfs_period_us)
if [[ ${quota} != -1 ]] && [[ ${period} -gt 0 ]]; then
echo $(("${quota}" / "${period}"))
fi
# just fallback to 1
echo 1
}
# Just match 1:1 the number of processing units available. Ideally, we'd also
# cap based on memory available to us, but that's notoriously difficult to do
# for containers (see:
# https://fabiokung.com/2014/03/13/memory-inside-linux-containers/). We make an
# assumption here that we have at least 1G of RAM we can use per CPU available
# to us.
nhosts=${NHOSTS:-$(ncpus)}
nselected=0
ntotal=0
tests=()
for tf in $(find "${topsrcdir}/tests/vmcheck/" -name 'test-*.sh' | sort); do
ntotal=$((ntotal + 1))
tfbn=$(basename "$tf" .sh)
tfbn=" ${tfbn#test-} "
if [ -n "${TESTS+ }" ]; then
if [[ " $TESTS " != *$tfbn* ]]; then
continue
fi
fi
nselected=$((nselected + 1))
tests+=(${tfbn})
done
echo "Running ${nselected} out of ${ntotal} tests ${nhosts} at a time"
outputdir="${topsrcdir}/vmcheck-logs"
echo "Test results outputting to ${outputdir}/"
if [ "${#tests[*]}" -gt 0 ]; then
echo -n "${tests[*]}" | parallel -d' ' -j "${nhosts}" --line-buffer \
"${topsrcdir}/tests/vmcheck/runtest.sh" "${outputdir}"
fi

View File

@ -1,13 +0,0 @@
#!/bin/bash
set -euo pipefail
. ${commondir}/libvm.sh
vm_setup
if ! vm_ssh_wait 30; then
echo "WARNING: Failed to wait for VM to fetch journal" > ${JOURNAL_LOG}
else
echo "Saving ${JOURNAL_LOG}"
vm_cmd 'journalctl --no-pager || true' > ${JOURNAL_LOG}
fi

View File

@ -1,136 +0,0 @@
#!/bin/env python3
import os
import sys
import glob
import time
import subprocess
def main():
failed = False
hosts = []
for host in sys.argv[1:]:
hosts.append(Host(host))
if len(hosts) == 0:
print("error: no hosts provided")
sys.exit(1)
requested_tests_spec = os.environ.get('TESTS')
if requested_tests_spec is not None:
requested_tests = requested_tests_spec.split()
else:
requested_tests = None
tests = glob.iglob(os.path.join(sys.path[0], "test-*.sh"))
matched_tests = []
unmatched_tests = []
for test in tests:
testname = Host._strip_test(test)
if requested_tests is None or testname in requested_tests:
matched_tests.append(test)
else:
unmatched_tests.append(testname)
if len(matched_tests) == 0:
print("error: no tests match '{}': {}".format(requested_tests_spec, unmatched_tests))
sys.exit(1)
for test in matched_tests:
host = wait_for_next_available_host(hosts)
rc = host.flush()
failed = failed or rc != 0
host.dispatch(test)
if len(unmatched_tests) > 0:
print("NOTE: Skipping tests not matching {}: {}".format(requested_tests_spec, unmatched_tests))
for host in hosts:
rc = host.flush()
failed = failed or rc != 0
# fetch the journal from all the hosts which had a failure
fetcher = os.path.join(sys.path[0], "fetch-journal.sh")
for host in hosts:
if host.saw_fail:
fetcher_env = dict(os.environ)
fetcher_env.update({'VM': host.hostname,
'JOURNAL_LOG':
"vmcheck/%s.journal.log" % host.hostname})
subprocess.check_call([fetcher], env=fetcher_env)
return 1 if failed else 0
def wait_for_next_available_host(hosts):
while True:
for host in hosts:
if host.is_done():
return host
time.sleep(1)
class Host:
def __init__(self, hostname):
self.hostname = hostname
self.test = ""
self._p = None
self._starttime = None
self.saw_fail = False
def is_done(self):
if not self._p:
return True
return self._p.poll() is not None
def dispatch(self, test):
assert self.is_done()
test = self._strip_test(test)
env = dict(os.environ)
env.update({'TESTS': test,
'VM': self.hostname,
'JOURNAL_LOG': "", # we fetch the journal at the end
'LOG': "vmcheck/%s.out" % test})
if not os.path.isdir("vmcheck"):
os.mkdir("vmcheck")
testsh = os.path.join(sys.path[0], "test.sh")
self._starttime = time.time()
self._p = subprocess.Popen([testsh], env=env,
stdout=open("vmcheck/%s.log" % test, 'wb'),
stderr=subprocess.STDOUT)
self.test = test
print("INFO: scheduled", self.test, "on host", self.hostname)
def flush(self):
if not self._p:
return 0
print("WAITING: {} (pid={})".format(self.test, self._p.pid))
rc = self._p.wait()
endtime = time.time()
# just merge the two files
outfile = "vmcheck/{}.out".format(self.test)
if os.path.isfile(outfile):
with open(outfile) as f:
with open("vmcheck/%s.log" % self.test, 'a') as j:
j.write(f.read())
os.remove(outfile)
rcs = "PASS" if rc == 0 else ("FAIL (rc %d)" % rc)
print("{}: {} (took {}s)".format(rcs, self.test, int(endtime - self._starttime)))
self.test = ""
self._p = None
self.saw_fail = self.saw_fail or rc != 0
return rc
@staticmethod
def _strip_test(test):
test = os.path.basename(test)
assert test.startswith('test-') and test.endswith('.sh')
return test[5:-3]
if __name__ == '__main__':
sys.exit(main())

View File

@ -1,99 +1,33 @@
#!/bin/bash
set -xeuo pipefail
set -euo pipefail
# Execute this code path on the host
if test -z "${INSIDE_VM:-}"; then
. ${commondir}/libvm.sh
vm_setup
. ${commondir}/libvm.sh
if ! vm_ssh_wait 30; then
echo "ERROR: A running VM is required for 'make vmcheck'."
exit 1
fi
# Thin wrapper around `cosa dev-overlay`.
vm_rsync
vm_rpmostree status --json > ${VM}-out.json
commit=$(jq -r '.deployments[0]["checksum"]' < ${VM}-out.json)
origin=$(jq -r '.deployments[0]["origin"]' < ${VM}-out.json)
version=$(jq -r '.deployments[0]["version"]' < ${VM}-out.json)
timestamp=$(jq -r '.deployments[0]["timestamp"]' < ${VM}-out.json)
rm -f ${VM}-out.json
vm_cmd env \
RPMOSTREE_TEST_NO_OVERLAY="${RPMOSTREE_TEST_NO_OVERLAY:-}" \
INSIDE_VM=1 /var/roothome/sync/tests/vmcheck/overlay.sh \
$commit $origin $version $timestamp
vm_reboot
exit 0
fi
# And then this code path in the VM
# get details from the current default deployment
rpm-ostree status --json > json.txt
commit=$1; shift
origin=$1; shift
version=$1; shift
timestamp=$1; shift
[ -n "$timestamp" ]
timestamp=$(date -d "@$timestamp" "+%b %d %Y")
if [[ -z $commit ]] || ! ostree rev-parse $commit; then
echo "Error while determining current commit" >&2
exit 1
fi
cd /ostree/repo/tmp
rm vmcheck -rf
ostree checkout $commit vmcheck --fsync=0
rm vmcheck/etc -rf
# Now, overlay our built binaries & config files, unless
# explicitly requested not to (with the goal of testing the
# tree shipped as is with our existing tests).
if test -z "${RPMOSTREE_TEST_NO_OVERLAY}"; then
INSTTREE=/var/roothome/sync/insttree
rsync -rlv $INSTTREE/usr/ vmcheck/usr/
rsync -rlv $INSTTREE/etc/ vmcheck/usr/etc/
# First, we need to find the image to operate on.
if [ -n "${VMIMAGE:-}" ]; then
src_img=${VMIMAGE}
else
echo "Skipping overlay of built rpm-ostree"
fi
# ✀✀✀ BEGIN hack to get --keep-metadata
if ! ostree commit --help | grep -q -e --keep-metadata; then
# this is fine, rsync doesn't modify in place
mount -o rw,remount /usr
# don't overwrite /etc/ to not mess up 3-way merge
rsync -rlv --exclude '/etc/' vmcheck/usr/ /usr/
fi
# ✀✀✀ END hack to get --keep-metadata ✀✀✀
# if the commit already has pkglist metadata (i.e. the tree was composed with at
# least v2018.1), make sure it gets preserved, because it's useful for playing
# around (but note it's not a requirement for our tests)
commit_opts=
if ostree show $commit --raw | grep -q rpmostree.rpmdb.pkglist; then
commit_opts="${commit_opts} --keep-metadata=rpmostree.rpmdb.pkglist"
fi
source_opt= # make this its own var since it contains spaces
if [ $origin != vmcheck ]; then
source_title="${origin}"
if [[ $version != null ]]; then
source_title="${source_title} (${version}; $timestamp)"
else
source_title="${source_title} ($timestamp)"
basearch=$(cosa basearch)
cosa_builds=${COSA_BUILDS:-cosa-builds}
cosa_buildid=${COSA_BUILDID:-latest}
cosa_builddir=${cosa_builds}/${cosa_buildid}/${basearch}
if [ ! -e "${cosa_builddir}/meta.json" ]; then
fatal "No image provide (use VMIMAGE, or cosa-builds/ or COSA_BUILDS)"
fi
source_opt="--add-metadata-string=ostree.source-title=Dev overlay on ${source_title}"
commit_opts="${commit_opts} --add-metadata-string=rpmostree.original-origin=${origin}"
else
source_opt="--keep-metadata=ostree.source-title"
commit_opts="${commit_opts} --keep-metadata=rpmostree.original-origin"
cosa_qemu_path=$(jq -er '.images.qemu.path' "${cosa_builddir}/meta.json")
src_img=${cosa_builddir}/${cosa_qemu_path}
fi
ostree commit --parent=$commit -b vmcheck --consume --no-bindings \
--link-checkout-speedup ${commit_opts} "${source_opt}" \
--selinux-policy=vmcheck --tree=dir=vmcheck
if [ -z "${SKIP_VMOVERLAY:-}" ]; then
# XXX: to develop
cosa dev-overlay --src-image "${src_img}" --add-tree insttree/ \
--output-dir vmoverlay/ --output-ref vmcheck
target_img=vmoverlay/$(jq -er '.images.qemu.path' "vmoverlay/meta.json")
else
target_img=${src_img}
fi
ostree admin deploy vmcheck
ln -sf "$(realpath ${target_img})" tests/vmcheck/image.qcow2

56
tests/vmcheck/runtest.sh Executable file
View File

@ -0,0 +1,56 @@
#!/bin/bash
set -euo pipefail
if [ -n "${V:-}" ]; then
set -x
fi
outputdir=$1; shift
testname=$1; shift
outputdir="${outputdir}/${testname}"
rm -rf ${outputdir}/*
mkdir -p "${outputdir}"
# keep original stdout around; this propagates to the terminal
exec 3>&1
# but redirect everything else to a log file
exec 1>"${outputdir}/output.log"
exec 2>&1
# seed output log with current date
date
if [ -n "${V:-}" ]; then
setpriv --pdeathsig SIGKILL -- tail -f "${outputdir}/output.log" >&3 &
fi
# this will cause libtest.sh to allocate a tmpdir and cd to it
export VMTESTS=1
echo "EXEC: ${testname}" >&3
runtest() {
. ${commondir}/libtest.sh
. ${commondir}/libvm.sh
vm_kola_spawn "${outputdir}/kola"
"${topsrcdir}/tests/vmcheck/test-${testname}.sh"
}
if runtest; then
echo "PASS: ${testname}" >&3
else
echo "FAIL: ${testname}" >&3
if [ -z "${V:-}" ]; then
tail -n10 "${outputdir}/output.log" | sed "s/^/ ${testname}: /g" >&3
fi
if [ -n "${VMCHECK_DEBUG:-}" ]; then
echo "--- VMCHECK_DEBUG ---" >&3
echo "To try SSH:" "SSH_AUTH_SOCK=$(realpath ${SSH_AUTH_SOCK})" ${SSH:-} >&3
echo "Sleeping..." >&3
sleep infinity
fi
exit 1
fi

View File

@ -25,10 +25,7 @@ ostree admin unlock || :
# Now, overlay our built binaries & config files
INSTTREE=/var/roothome/sync/insttree
rsync -rlv $INSTTREE/usr/ /usr/
if [ -d $INSTTREE/etc ]; then # on CentOS, the dbus service file is in /usr
rsync -rlv $INSTTREE/etc/ /etc/
fi
rsync -rlv $INSTTREE/ /
restorecon -v /usr/bin/{rpm-,}ostree /usr/libexec/rpm-ostreed

View File

@ -216,15 +216,12 @@ vm_cmd systemctl restart rpm-ostreed
echo "ok cancel infinite post via `rpm-ostree cancel`"
# Test rm -rf /!
vm_shell_inline <<EOF
getent passwd testuser >/dev/null || useradd testuser
EOF
vm_cmd touch /home/testuser/somedata /tmp/sometmpfile /var/tmp/sometmpfile
vm_cmd touch /home/core/somedata /tmp/sometmpfile /var/tmp/sometmpfile
vm_build_rpm rmrf post "rm --no-preserve-root -rf / &>/dev/null || true"
if vm_rpmostree install rmrf 2>err.txt; then
assert_not_reached "rm -rf / worked? Uh oh."
fi
vm_cmd test -f /home/testuser/somedata -a -f /etc/fstab -a -f /tmp/sometmpfile -a -f /var/tmp/sometmpfile
vm_cmd test -f /home/core/somedata -a -f /etc/passwd -a -f /tmp/sometmpfile -a -f /var/tmp/sometmpfile
# This is the error today, we may improve it later
assert_file_has_content err.txt 'error: Sanity-checking final rootfs: Executing bwrap(/usr/bin/true)'
echo "ok impervious to rm -rf post"

View File

@ -51,12 +51,6 @@ vm_assert_status_jq \
'.deployments[0]["layered-commit-meta"]|not'
echo "ok empty pkg arrays, and commit meta correct in status json"
if test -z "${RPMOSTREE_TEST_NO_OVERLAY:-}"; then
vm_assert_status_jq \
'.deployments[0]["base-commit-meta"]["ostree.source-title"]|contains("overlay")'
echo "ok overlay found in commit meta"
fi
vm_rpmostree status --jsonpath '$.deployments[0].booted' > jsonpath.txt
assert_file_has_content_literal jsonpath.txt 'true'
echo "ok jsonpath"
@ -80,23 +74,13 @@ fi
assert_file_has_content err.txt 'Unknown.*command'
echo "ok error on unknown command"
# Be sure an unprivileged user exists and that we can SSH into it. This is a bit
# underhanded, but we need a bona fide user session to verify non-priv status,
# and logging in through SSH is an easy way to achieve that.
vm_shell_inline <<EOF
getent passwd testuser >/dev/null || useradd testuser
mkdir -pm 0700 /home/testuser/.ssh
cp -a /root/.ssh/authorized_keys /home/testuser/.ssh
chown -R testuser:testuser /home/testuser/.ssh
EOF
# Make sure we can't do various operations as non-root
vm_build_rpm foo
if vm_cmd_as testuser rpm-ostree pkg-add foo &> err.txt; then
if vm_cmd_as core rpm-ostree pkg-add foo &> err.txt; then
assert_not_reached "Was able to install a package as non-root!"
fi
assert_file_has_content err.txt 'PkgChange not allowed for user'
if vm_cmd_as testuser rpm-ostree reload &> err.txt; then
if vm_cmd_as core rpm-ostree reload &> err.txt; then
assert_not_reached "Was able to reload as non-root!"
fi
assert_file_has_content err.txt 'ReloadConfig not allowed for user'
@ -109,10 +93,10 @@ vm_shell_inline > coreos-rootfs.txt << EOF
lsattr -d /var/tmp/coreos-rootfs
rpm-ostree coreos-rootfs seal /var/tmp/coreos-rootfs
EOF
assert_file_has_content_literal coreos-rootfs.txt '----i-------------- /var/tmp/coreos-rootfs'
assert_file_has_content coreos-rootfs.txt '-*i-* /var/tmp/coreos-rootfs'
# Assert that we can do status as non-root
vm_cmd_as testuser rpm-ostree status
vm_cmd_as core rpm-ostree status
echo "ok status doesn't require root"
# StateRoot is only in --verbose
@ -137,7 +121,11 @@ echo "ok reload"
stateroot=$(vm_get_booted_stateroot)
ospath=/org/projectatomic/rpmostree1/${stateroot//-/_}
vm_cmd dbus-send --system --dest=org.projectatomic.rpmostree1 --print-reply=literal $ospath org.projectatomic.rpmostree1.OSExperimental.Moo boolean:true > moo.txt
# related: https://github.com/coreos/fedora-coreos-config/issues/194
vm_cmd env LANG=C.UTF-8 gdbus call \
--system --dest org.projectatomic.rpmostree1 \
--object-path /org/projectatomic/rpmostree1/fedora_coreos \
--method org.projectatomic.rpmostree1.OSExperimental.Moo true > moo.txt
assert_file_has_content moo.txt '🐄'
echo "ok moo"

View File

@ -38,8 +38,7 @@ versionid=${versionid:11} # trim off VERSION_ID=
current=$(vm_get_booted_csum)
vm_cmd rpm-ostree db list "${current}" > current-dblist.txt
case $versionid in
29) kernel_release=4.18.16-300.fc29.x86_64;;
30) kernel_release=5.0.9-301.fc30.x86_64;;
31) kernel_release=5.3.7-301.fc31.x86_64;;
*) assert_not_reached "Unsupported Fedora version: $versionid";;
esac
assert_not_file_has_content current-dblist.txt $kernel_release

View File

@ -1,232 +0,0 @@
#!/bin/bash
set -euo pipefail
. ${commondir}/libvm.sh
LOG=${LOG:-vmcheck.log}
LOG=$(realpath $LOG)
# NB: allow JOURNAL_LOG to be empty, which means we never
# fetch it
JOURNAL_LOG=${JOURNAL_LOG-vmcheck-journal.txt}
if [ -n "$JOURNAL_LOG" ]; then
JOURNAL_LOG=$(realpath $JOURNAL_LOG)
fi
# create ssh-config if needed and export cmds
vm_setup
# stand up ssh connection and sanity check that it all works
if ! vm_ssh_wait 30; then
echo "ERROR: A running VM is required for 'make vmcheck'."
exit 1
fi
echo "VM is running."
# just error out if we're unlocked -- we use the current deployment as the
# fallback between each test, so we need to be sure it's in a state that works.
# also, the user might have forgotten that these tests are somewhat destructive
# and thus would wipe out unlocked changes, hotfix or not.
unlocked_cur=$(vm_get_booted_deployment_info unlocked)
if [[ $unlocked_cur != none ]]; then
echo "ERROR: VM is unlocked."
exit 1
fi
# remember the csum we're currently on and tag it so that ostree doesn't wipe it
csum_orig=$(vm_get_booted_csum)
vm_cmd ostree rev-parse $csum_orig &> /dev/null # validate
vm_cmd ostree refs vmcheck_orig --delete
vm_cmd ostree refs $csum_orig --create vmcheck_orig
# reboot onto the vmcheck ref if we're not already on it
origin=$(vm_get_booted_deployment_info origin)
if [[ $origin != vmcheck ]]; then
vm_cmd ostree refs vmcheck --delete
vm_cmd ostree refs vmcheck_orig --create vmcheck
vm_cmd ostree admin deploy vmcheck &>> ${LOG}
vm_reboot &>> ${LOG}
fi
# delete whatever tmp refs the previous testsuite runs may have created
vm_cmd ostree refs vmcheck_tmp vmcheck_remote --delete
# we bring our own test repo and test packages, so let's neuter any repo that
# comes with the distro to help speed up rpm-ostree metadata fetching since we
# don't cache it (e.g. on Fedora, it takes *forever* to fetch metadata, which we
# have to do dozens of times throughout the suite)
vm_cmd mkdir -p /etc/yum.repos.d/
if ! vm_cmd test -f /etc/yum.repos.d/.vmcheck; then
echo "Neutering /etc/yum.repos.d"
# Move the current one to .bak
vm_cmd mv /etc/yum.repos.d{,.bak}
# And create a new one with a .vmcheck as a stamp file so we recognize it
vm_cmd rm /etc/yum.repos.d.tmp -rf
vm_cmd mkdir /etc/yum.repos.d.tmp
vm_cmd touch /etc/yum.repos.d.tmp/.vmcheck
vm_cmd cp -r /etc/yum.repos.d{.tmp,}
else
echo "Keeping existing vmcheck /etc/yum.repos.d"
fi
# tests expect to run with the default config
# remember the original config, we restore it after the tests
if vm_cmd test -f /etc/rpm-ostreed.conf; then
vm_cmd mv -f /etc/rpm-ostreed.conf{,.bak}
fi
if vm_cmd test -f /usr/etc/rpm-ostreed.conf; then
vm_cmd cp -f /usr/etc/rpm-ostreed.conf /etc
fi
vm_cmd ostree remote delete --if-exists vmcheckmote
origdir=$(pwd)
echo -n '' > ${LOG}
testdir="$(dirname $(realpath $0))"
cd $testdir
colour_print() {
colour=$1; shift
[ ! -t 1 ] || echo -en "\e[${colour}m"
echo -n "$@"
[ ! -t 1 ] || echo -en "\e[0m"
echo
}
pass_print() {
colour_print 32 "$@" # green
}
fail_print() {
colour_print 31 "$@" # red
}
skip_print() {
colour_print 34 "$@" # blue
}
total=0
pass=0
fail=0
skip=0
notrun=0
for tf in $(find . -name 'test-*.sh' | sort); do
if [ -n "${TESTS+ }" ]; then
tfbn=$(basename "$tf" .sh)
tfbn=" ${tfbn#test-} "
if [[ " $TESTS " != *$tfbn* ]]; then
let "notrun += 1"
continue
fi
fi
let "total += 1"
bn=$(basename ${tf})
printf "Running $bn...\n"
printf "\n==== ${bn} ====\n" >> ${LOG}
vm_cmd logger "vmcheck: running $bn..."
# do some dirty piping to get some instant feedback and help debugging
if ${tf} |& tee -a ${LOG} \
| grep -e '^ok ' --line-buffered \
| xargs -d '\n' -n 1 echo " "; then
pass_print "PASS: $bn"
echo "PASS" >> ${LOG}
let "pass += 1"
else
if test $? = 77; then
skip_print "SKIP: $bn"
echo "SKIP" >> ${LOG}
let "skip += 1"
else
fail_print "FAIL: $bn"
echo "FAIL" >> ${LOG}
let "fail += 1"
fi
fi
vm_cmd logger "vmcheck: finished $bn..."
if test -n "${VMCHECK_DEBUG:-}"; then
echo "VMCHECK_DEBUG is set, skipping restoration of original deployment"
break
fi
# nuke all vmcheck and vmcheck_tmp refs and recreate vmcheck from orig
echo "Restoring original vmcheck commit" >> ${LOG}
vm_cmd ostree refs vmcheck vmcheck_tmp vmcheck_remote --delete
vm_cmd ostree refs vmcheck_orig --create vmcheck &>> ${LOG}
# restore the default config
vm_cmd cp -f /usr/etc/rpm-ostreed.conf /etc/
# go back to the original vmcheck deployment if needed
origin_cur=$(vm_get_booted_deployment_info origin)
csum_cur=$(vm_get_booted_csum)
unlocked_cur=$(vm_get_booted_deployment_info unlocked)
live_csum=$(vm_get_booted_deployment_info live-replaced)
if [[ $origin_cur != vmcheck ]] || \
[[ $csum_orig != $csum_cur ]] || \
[[ $unlocked_cur != none ]] || \
[ -n "${live_csum}" ]; then
# redeploy under the name 'vmcheck' so that tests can
# never modify the vmcheck_orig ref itself
vm_cmd ostree admin deploy vmcheck &>> ${LOG}
vm_reboot &>> ${LOG}
else
# make sure we're using the default config vals again
vm_cmd systemctl restart rpm-ostreed
fi
# make sure to clean up any pending & rollback deployments
vm_rpmostree cleanup -b -p -r -m || :
# and put back our tmp repo
vm_cmd rm /etc/yum.repos.d -rf
vm_cmd cp -r /etc/yum.repos.d{.tmp,}
# and clean up any leftovers from our tmp
osname=$(vm_get_booted_deployment_info osname)
vm_cmd rm -rf /ostree/deploy/$osname/var/tmp/vmcheck
vm_cmd ostree remote delete --if-exists vmcheckmote
done
if test -z "${VMCHECK_DEBUG:-}"; then
# put back the original yum repos
if vm_cmd test -f /etc/yum.repos.d/.vmcheck; then
echo "Restoring original /etc/yum.repos.d"
vm_cmd rm -rf /etc/yum.repos.d
vm_cmd mv /etc/yum.repos.d{.bak,}
fi
# put back the original config if any
vm_cmd rm -f /etc/rpm-ostreed.conf
if vm_cmd test -f /etc/rpm-ostreed.conf.bak; then
vm_cmd mv /etc/rpm-ostreed.conf{.bak,}
fi
fi
# Gather post-failure system logs if necessary
ALL_LOGS=$LOG
if [ ${fail} -ne 0 ] && [ -n "$JOURNAL_LOG" ]; then
./fetch-journal.sh
ALL_LOGS="$ALL_LOGS $JOURNAL_LOG"
fi
# tear down ssh connection if needed
if $SSH -O check &>/dev/null; then
$SSH -O exit &>/dev/null
fi
[ ${fail} -eq 0 ] && printer=pass || printer=fail
${printer}_print "TOTAL: $total PASS: $pass SKIP: $skip FAIL: $fail"
if test ${notrun} -gt 0; then
echo "NOTICE: Skipped ${notrun} tests not matching \"${TESTS}\""
fi
echo "See ${ALL_LOGS} for more information."
[ ${fail} -eq 0 ]