2016-06-21 19:37:02 +03:00
# Source library for installed virtualized shell script tests
#
# Copyright (C) 2016 Jonathan Lebon <jlebon@redhat.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
2019-12-14 19:21:03 +03:00
if test -z " ${ LIBTEST_SH :- } " ; then
. ${ commondir } /libtest.sh
fi
2016-07-15 20:30:56 +03:00
# prepares the VM and library for action
vm_setup( ) {
2017-03-11 01:43:23 +03:00
export VM = ${ VM :- vmcheck }
2018-04-03 20:48:58 +03:00
export SSH_CONFIG = ${ SSH_CONFIG :- ${ topsrcdir } /ssh-config }
2018-03-08 23:56:05 +03:00
SSHOPTS = " -o User=root -o ControlMaster=auto \
2019-05-02 17:28:28 +03:00
-o ControlPath = /dev/shm/ssh-$VM -$( date +%s%N) .sock \
2018-03-08 23:56:05 +03:00
-o ControlPersist = yes"
2017-03-11 01:43:23 +03:00
# If we're provided with an ssh-config, make sure we tell
# ssh to pick it up.
2018-04-03 20:48:58 +03:00
if [ -f " ${ SSH_CONFIG } " ] ; then
SSHOPTS = " ${ SSHOPTS } -F ${ SSH_CONFIG } "
2017-03-11 01:43:23 +03:00
fi
2018-03-08 23:56:05 +03:00
export SSHOPTS
2017-03-11 01:43:23 +03:00
2018-03-08 23:56:05 +03:00
export SSH = " ssh ${ SSHOPTS } $VM "
export SCP = " scp ${ SSHOPTS } "
}
Rework vmcheck to use `kola spawn`, move off of PAPR
There's a lot going on here, but essentially:
1. We change the `vmcheck` model so that it always operates on an
immutable base image. It takes that image and dynamically launches a
separate VM for each test using `kola spawn`. This means we can drop
a lot of hacks around re-using the same VMs.
2. Following from 1., `vmoverlay` now takes as input a base image,
overlays the built rpm-ostree bits, then creates a new base image. Of
course, we don't have to do this in CI, because we build FCOS with
the freshly built RPMs (so it uses `SKIP_VMOVERLAY=1`). `vmoverlay`
then will be more for the developer case where one doesn't want to
iterate via `cosa build` to test rpm-ostree changes. I say "will"
because the functionality doesn't exist yet; I'd like to enhance
`cosa dev-overlay` to do this. (Note `vmsync` should still works just
as before too.)
3. `vmcheck` can be run without building the tree first, as
`tests/vmcheck.sh`. The `make vmcheck` target still exists though for
finger compatibility and better meshing with `vmoverlay` in the
developer case.
What's really nice about using kola spawn is that it takes care of a lot
of things for us, such as the qemu command, journal and console
gathering, and SSH.
Similarly to the compose testsuites, we're using parallel here to run
multiple vmcheck tests at once. (On developer laptops, we cap
parallelism at `$(nproc) - 1`).
2019-12-13 20:23:41 +03:00
# prepares a fresh VM for action via `kola spawn`
vm_kola_spawn( ) {
local outputdir = $1 ; shift
exec 4> info.json
mkdir kola-ssh
setpriv --pdeathsig SIGKILL -- \
env MANTLE_SSH_DIR = " $PWD /kola-ssh " kola spawn -p qemu-unpriv \
--qemu-image " ${ topsrcdir } /tests/vmcheck/image.qcow2 " -v --idle \
--json-info-fd 4 --output-dir " $outputdir " &
# hack; need cleaner API for async kola spawn
while [ ! -s info.json ] ; do sleep 1; done
local ssh_ip_port ssh_ip ssh_port
ssh_ip_port = $( jq -r .public_ip info.json)
ssh_ip = ${ ssh_ip_port % : * }
ssh_port = ${ ssh_ip_port #* : }
cat > ssh-config <<EOF
Host vmcheck
HostName ${ ssh_ip }
Port ${ ssh_port }
StrictHostKeyChecking no
UserKnownHostsFile /dev/null
EOF
SSH_CONFIG = $PWD /ssh-config
# XXX: should just have kola output the path to the socket
SSH_AUTH_SOCK = $( ls kola-ssh/agent.*)
export SSH_CONFIG SSH_AUTH_SOCK
# Hack around kola's Ignition config only setting up the core user; but we
# want to be able to ssh directly as root. We still want all the other goodies
# that kola injects in its Ignition config though, so we don't want to
# override it. `cosa run`'s merge semantics would do nicely.
ssh -o User = core -F " ${ SSH_CONFIG } " vmcheck 'sudo cp -RT {/home/core,/root}/.ssh'
vm_setup
# XXX: hack around https://github.com/systemd/systemd/issues/14328
vm_cmd systemctl mask --now systemd-logind
# Some tests expect the ref to be on `vmcheck`. We should drop that
# requirement, but for now let's just mangle the origin
local deployment_root
vm_cmd ostree refs --create vmcheck " $( vm_get_booted_csum) "
deployment_root = $( vm_get_deployment_root 0)
vm_cmd " sed -ie '/^refspec=/ s/=.*/=vmcheck/' ${ deployment_root } .origin "
vm_cmd " sed -ie '/^baserefspec=/ s/=.*/=vmcheck/' ${ deployment_root } .origin "
vm_cmd systemctl try-restart rpm-ostreed
# also move the default yum repos, we don't want em
vm_cmd mv /etc/yum.repos.d{ ,.bak}
vm_cmd mkdir /etc/yum.repos.d
}
2019-06-07 19:32:01 +03:00
# $1 - file to send
# $2 - destination path
vm_send( ) {
$SCP ${ 1 } ${ VM } :${ 2 }
}
# $1 - destination path
vm_send_inline( ) {
f = $( mktemp -p $PWD )
cat > ${ f }
vm_send ${ f } ${ 1 }
rm -f ${ f }
2016-07-15 20:30:56 +03:00
}
2018-03-15 00:06:39 +03:00
vm_shell_inline( ) {
2019-06-07 19:32:01 +03:00
script = $( mktemp -p $PWD )
echo "set -xeuo pipefail" > ${ script }
cat >> ${ script }
vm_send ${ script } /tmp/$( basename ${ script } )
rm -f ${ script }
vm_cmd bash /tmp/$( basename ${ script } )
2018-03-15 00:06:39 +03:00
}
2017-10-05 22:29:57 +03:00
# rsync wrapper that sets up authentication
vm_raw_rsync( ) {
local rsyncopts = "ssh -o User=root"
2018-04-03 20:48:58 +03:00
if [ -f " ${ SSH_CONFIG } " ] ; then
rsyncopts = " $rsyncopts -F ' ${ SSH_CONFIG } ' "
2017-10-05 22:29:57 +03:00
fi
rsync -az --no-owner --no-group -e " $rsyncopts " " $@ "
}
2016-07-15 20:30:56 +03:00
vm_rsync( ) {
2016-11-22 06:22:51 +03:00
if ! test -f .vagrant/using_sshfs; then
pushd ${ topsrcdir }
2018-11-03 21:16:06 +03:00
vm_raw_rsync --delete --exclude target/ --exclude bindgen-target/ --exclude .git/ . $VM :/var/roothome/sync
2016-11-22 06:22:51 +03:00
popd
2016-11-14 20:07:43 +03:00
fi
2016-07-15 20:30:56 +03:00
}
2017-07-27 17:16:23 +03:00
# run command in vm as user
# - $1 username
# - $@ command to run
vm_cmd_as( ) {
local user = $1 ; shift
# don't reuse root's ControlPath
local sshopts = " -o User= $user "
2018-04-03 20:48:58 +03:00
if [ -f " ${ SSH_CONFIG } " ] ; then
sshopts = " $sshopts -F ${ SSH_CONFIG } "
2017-07-27 17:16:23 +03:00
fi
ssh $sshopts $VM " $@ "
}
2016-06-22 00:35:51 +03:00
# run command in vm
# - $@ command to run
2016-06-21 19:37:02 +03:00
vm_cmd( ) {
$SSH " $@ "
}
2016-06-22 00:35:51 +03:00
2017-02-02 23:41:29 +03:00
# Delete anything which we might change between runs
vm_clean_caches( ) {
2017-11-20 19:45:55 +03:00
vm_cmd rm /ostree/repo/refs/heads/rpmostree/pkg/* -rf
2017-02-02 23:41:29 +03:00
}
2017-01-17 21:29:48 +03:00
# run rpm-ostree in vm
# - $@ args
vm_rpmostree( ) {
2017-07-27 17:16:23 +03:00
vm_cmd env ASAN_OPTIONS = detect_leaks = false rpm-ostree " $@ "
2017-01-17 21:29:48 +03:00
}
2016-07-04 18:43:02 +03:00
# copy the test repo to the vm
2017-10-05 22:29:57 +03:00
# $1 - repo file mode: nogpgcheck (default), gpgcheck, skip (don't send)
2016-07-04 18:43:02 +03:00
vm_send_test_repo( ) {
2017-10-05 22:29:57 +03:00
mode = ${ 1 :- nogpgcheck }
2017-10-05 22:33:40 +03:00
# note we use -c here because we might be called twice within a second
2018-09-06 23:09:04 +03:00
vm_raw_rsync -c --delete ${ test_tmpdir } /yumrepo $VM :/var/tmp/vmcheck
2017-10-05 22:29:57 +03:00
if [ [ $mode = = skip ] ] ; then
return
fi
2016-07-04 18:43:02 +03:00
cat > vmcheck.repo << EOF
[ test-repo]
name = test-repo
2018-09-06 23:09:04 +03:00
baseurl = file:///var/tmp/vmcheck/yumrepo
2016-07-04 18:43:02 +03:00
EOF
2017-10-05 22:29:57 +03:00
if [ [ $mode = = gpgcheck ] ] ; then
2017-02-02 23:41:29 +03:00
cat >> vmcheck.repo <<EOF
gpgcheck = 1
gpgkey = file:///etc/pki/rpm-gpg/RPM-GPG-KEY-fedora-25-primary
EOF
else
2017-10-05 22:29:57 +03:00
assert_streq " $mode " nogpgcheck
2017-02-02 23:41:29 +03:00
echo "Enabling vmcheck.repo without GPG"
echo 'gpgcheck=0' >> vmcheck.repo
fi
2019-06-07 19:32:01 +03:00
vm_send vmcheck.repo /etc/yum.repos.d
2016-07-04 18:43:02 +03:00
}
2016-06-22 00:35:51 +03:00
# wait until ssh is available on the vm
2016-06-24 23:40:20 +03:00
# - $1 timeout in second (optional)
2016-12-07 23:14:54 +03:00
# - $2 previous bootid (optional)
2016-06-22 00:35:51 +03:00
vm_ssh_wait( ) {
2017-07-27 17:16:23 +03:00
local timeout = ${ 1 :- 0 } ; shift
local old_bootid = ${ 1 :- } ; shift
2017-01-09 21:09:43 +03:00
if ! vm_cmd true; then
echo "Failed to log into VM, retrying with debug:"
$SSH -o LogLevel = debug true || true
fi
2016-06-24 23:40:20 +03:00
while [ $timeout -gt 0 ] ; do
2016-12-07 23:14:54 +03:00
if bootid = $( vm_get_boot_id 2>/dev/null) ; then
if [ [ $bootid != $old_bootid ] ] ; then
2017-02-24 04:58:27 +03:00
# if this is a reboot, display some info about new boot
if [ -n " $old_bootid " ] ; then
vm_rpmostree status
vm_rpmostree --version
fi
2016-12-07 23:14:54 +03:00
return 0
fi
2016-06-24 23:40:20 +03:00
fi
2017-01-09 21:09:43 +03:00
if test $(( $timeout % 5 )) = = 0; then
echo " Still failed to log into VM, retrying for $timeout seconds "
fi
2016-06-24 23:40:20 +03:00
timeout = $(( timeout - 1 ))
2016-06-22 00:35:51 +03:00
sleep 1
done
2016-12-07 23:14:54 +03:00
false "Timed out while waiting for SSH."
}
vm_get_boot_id( ) {
2017-01-09 21:09:43 +03:00
vm_cmd cat /proc/sys/kernel/random/boot_id
2016-06-22 00:35:51 +03:00
}
2017-01-04 20:29:01 +03:00
# Run a command in the VM that will cause a reboot
vm_reboot_cmd( ) {
vm_cmd sync
2017-07-27 17:16:23 +03:00
local bootid = $( vm_get_boot_id 2>/dev/null)
Rework vmcheck to use `kola spawn`, move off of PAPR
There's a lot going on here, but essentially:
1. We change the `vmcheck` model so that it always operates on an
immutable base image. It takes that image and dynamically launches a
separate VM for each test using `kola spawn`. This means we can drop
a lot of hacks around re-using the same VMs.
2. Following from 1., `vmoverlay` now takes as input a base image,
overlays the built rpm-ostree bits, then creates a new base image. Of
course, we don't have to do this in CI, because we build FCOS with
the freshly built RPMs (so it uses `SKIP_VMOVERLAY=1`). `vmoverlay`
then will be more for the developer case where one doesn't want to
iterate via `cosa build` to test rpm-ostree changes. I say "will"
because the functionality doesn't exist yet; I'd like to enhance
`cosa dev-overlay` to do this. (Note `vmsync` should still works just
as before too.)
3. `vmcheck` can be run without building the tree first, as
`tests/vmcheck.sh`. The `make vmcheck` target still exists though for
finger compatibility and better meshing with `vmoverlay` in the
developer case.
What's really nice about using kola spawn is that it takes care of a lot
of things for us, such as the qemu command, journal and console
gathering, and SSH.
Similarly to the compose testsuites, we're using parallel here to run
multiple vmcheck tests at once. (On developer laptops, we cap
parallelism at `$(nproc) - 1`).
2019-12-13 20:23:41 +03:00
vm_cmd " $@ " || :
2017-01-04 20:29:01 +03:00
vm_ssh_wait 120 $bootid
}
2016-06-22 00:35:51 +03:00
# reboot the vm
vm_reboot( ) {
2017-01-04 20:29:01 +03:00
vm_reboot_cmd systemctl reboot
2016-06-22 00:35:51 +03:00
}
2017-01-06 17:52:57 +03:00
# check that the given files/dirs exist on the VM
# - $@ files/dirs to check for
2016-06-22 00:35:51 +03:00
vm_has_files( ) {
for file in " $@ " ; do
if ! vm_cmd test -e $file ; then
return 1
fi
done
}
# check that the packages are installed
# - $@ packages to check for
vm_has_packages( ) {
for pkg in " $@ " ; do
if ! vm_cmd rpm -q $pkg ; then
return 1
fi
done
}
2017-03-07 20:08:44 +03:00
# retrieve info from a deployment
# - $1 index of deployment (or -1 for booted)
# - $2 key to retrieve
vm_get_deployment_info( ) {
2017-07-27 17:16:23 +03:00
local idx = $1
local key = $2
2017-01-17 21:29:48 +03:00
vm_rpmostree status --json | \
2019-05-08 17:15:48 +03:00
python3 -c "
2016-06-22 00:35:51 +03:00
import sys, json
2016-06-29 00:23:53 +03:00
deployments = json.load( sys.stdin) [ \" deployments\" ]
2017-03-07 20:08:44 +03:00
idx = $idx
if idx < 0:
for i, depl in enumerate( deployments) :
if depl[ \" booted\" ] :
idx = i
if idx < 0:
2019-05-08 17:15:48 +03:00
print( \" Failed to determine currently booted deployment\" )
2016-06-29 00:23:53 +03:00
exit( 1)
2017-03-07 20:08:44 +03:00
if idx >= len( deployments) :
2019-05-08 17:15:48 +03:00
print( \" Deployment index $idx is out of range\" )
2017-03-07 20:08:44 +03:00
exit( 1)
depl = deployments[ idx]
if \" $key \" in depl:
data = depl[ \" $key \" ]
2016-06-29 00:23:53 +03:00
if type( data) is list:
2019-05-08 17:15:48 +03:00
print( \" \" .join( data) )
2016-06-29 00:23:53 +03:00
else :
2019-05-08 17:15:48 +03:00
print( data)
2016-06-29 00:23:53 +03:00
"
}
2017-07-04 20:50:52 +03:00
# retrieve the deployment root
# - $1 index of deployment
vm_get_deployment_root( ) {
2017-07-27 17:16:23 +03:00
local idx = $1
local csum = $( vm_get_deployment_info $idx checksum)
local serial = $( vm_get_deployment_info $idx serial)
local osname = $( vm_get_deployment_info $idx osname)
2017-07-04 20:50:52 +03:00
echo /ostree/deploy/$osname /deploy/$csum .$serial
}
2017-03-07 20:08:44 +03:00
# retrieve info from the booted deployment
# - $1 key to retrieve
vm_get_booted_deployment_info( ) {
vm_get_deployment_info -1 $1
}
2016-06-29 00:23:53 +03:00
# print the layered packages
vm_get_layered_packages( ) {
vm_get_booted_deployment_info packages
2016-06-22 00:35:51 +03:00
}
2017-02-25 00:28:47 +03:00
# print the requested packages
vm_get_requested_packages( ) {
vm_get_booted_deployment_info requested-packages
}
2017-03-07 20:08:44 +03:00
vm_get_local_packages( ) {
vm_get_booted_deployment_info requested-local-packages
}
2016-06-22 00:35:51 +03:00
# check that the packages are currently layered
# - $@ packages to check for
vm_has_layered_packages( ) {
2017-07-27 17:16:23 +03:00
local pkgs = $( vm_get_layered_packages)
2016-06-22 00:35:51 +03:00
for pkg in " $@ " ; do
if [ [ " $pkgs " != *$pkg * ] ] ; then
return 1
fi
done
}
2016-06-29 00:23:53 +03:00
2017-02-25 00:28:47 +03:00
# check that the packages are currently requested
# - $@ packages to check for
vm_has_requested_packages( ) {
2017-07-27 17:16:23 +03:00
local pkgs = $( vm_get_requested_packages)
2017-02-25 00:28:47 +03:00
for pkg in " $@ " ; do
if [ [ " $pkgs " != *$pkg * ] ] ; then
return 1
fi
done
}
2017-03-07 20:08:44 +03:00
vm_has_local_packages( ) {
2017-07-27 17:16:23 +03:00
local pkgs = $( vm_get_local_packages)
2017-03-07 20:08:44 +03:00
for pkg in " $@ " ; do
if [ [ " $pkgs " != *$pkg * ] ] ; then
return 1
fi
done
}
2017-02-25 00:28:47 +03:00
vm_has_dormant_packages( ) {
vm_has_requested_packages " $@ " && \
! vm_has_layered_packages " $@ "
}
2018-04-15 21:44:24 +03:00
vm_get_booted_stateroot( ) {
vm_get_booted_deployment_info osname
}
2016-06-29 00:23:53 +03:00
# retrieve the checksum of the currently booted deployment
vm_get_booted_csum( ) {
vm_get_booted_deployment_info checksum
}
2016-07-04 18:43:02 +03:00
2017-12-21 01:46:04 +03:00
# retrieve the checksum of the pending deployment
vm_get_pending_csum( ) {
vm_get_deployment_info 0 checksum
}
2016-07-04 18:43:02 +03:00
# make multiple consistency checks on a test pkg
# - $1 package to check for
# - $2 either "present" or "absent"
vm_assert_layered_pkg( ) {
2017-07-27 17:16:23 +03:00
local pkg = $1 ; shift
local policy = $1 ; shift
2016-07-04 18:43:02 +03:00
set +e
vm_has_packages $pkg ; pkg_in_rpmdb = $?
vm_has_layered_packages $pkg ; pkg_is_layered = $?
2017-03-07 20:08:44 +03:00
vm_has_local_packages $pkg ; pkg_is_layered_local = $?
2017-02-25 00:28:47 +03:00
vm_has_requested_packages $pkg ; pkg_is_requested = $?
[ $pkg_in_rpmdb = = 0 ] && \
2017-03-07 20:08:44 +03:00
( ( [ $pkg_is_layered = = 0 ] &&
[ $pkg_is_requested = = 0 ] ) ||
[ $pkg_is_layered_local = = 0 ] ) ; pkg_present = $?
2017-02-25 00:28:47 +03:00
[ $pkg_in_rpmdb != 0 ] && \
[ $pkg_is_layered != 0 ] && \
2017-03-07 20:08:44 +03:00
[ $pkg_is_layered_local != 0 ] && \
2017-02-25 00:28:47 +03:00
[ $pkg_is_requested != 0 ] ; pkg_absent = $?
2016-07-04 18:43:02 +03:00
set -e
if [ $policy = = present ] && [ $pkg_present != 0 ] ; then
daemon/upgrader: Rework layer tracking
Working on initramfs, I hit a subtle issue with the fact that
I was trying to "redeploy", but with the origin file changed
during the process.
Previously, it was a bit unclear which parts of the upgrader logic are operating
on the *new* origin versus the "original origin".
The package layering code in the upgrader explicitly carries a delta on top in
the "add/remove" hash sets, which means it isn't visible to
`rpmostree_origin_is_locally_assembled()`.
Whereas for initramfs, I set a new origin. This broke things since we were
expecting to find a parent commit, but the original origin wasn't locally
assembled.
When looking more at this, I realized there's a far simpler model -
rather than keeping track of commit + origin, and using the origin
to try to determine whether or not the commit is layered, we can
keep track of `base_revision` and `final_revision`, and the latter
is only set if we're doing layering.
The diff speaks for itself here - a lot of fragile logic looking at the origin
drops away.
The next step here is probably to drop away the package layering hash sets, but
I'm trying to not change everything at once.
Closes: #579
Approved by: jlebon
2017-01-17 20:25:28 +03:00
vm_cmd rpm-ostree status
2016-07-04 18:43:02 +03:00
assert_not_reached " pkg $pkg is not present "
fi
if [ $policy = = absent ] && [ $pkg_absent != 0 ] ; then
daemon/upgrader: Rework layer tracking
Working on initramfs, I hit a subtle issue with the fact that
I was trying to "redeploy", but with the origin file changed
during the process.
Previously, it was a bit unclear which parts of the upgrader logic are operating
on the *new* origin versus the "original origin".
The package layering code in the upgrader explicitly carries a delta on top in
the "add/remove" hash sets, which means it isn't visible to
`rpmostree_origin_is_locally_assembled()`.
Whereas for initramfs, I set a new origin. This broke things since we were
expecting to find a parent commit, but the original origin wasn't locally
assembled.
When looking more at this, I realized there's a far simpler model -
rather than keeping track of commit + origin, and using the origin
to try to determine whether or not the commit is layered, we can
keep track of `base_revision` and `final_revision`, and the latter
is only set if we're doing layering.
The diff speaks for itself here - a lot of fragile logic looking at the origin
drops away.
The next step here is probably to drop away the package layering hash sets, but
I'm trying to not change everything at once.
Closes: #579
Approved by: jlebon
2017-01-17 20:25:28 +03:00
vm_cmd rpm-ostree status
2016-07-04 18:43:02 +03:00
assert_not_reached " pkg $pkg is not absent "
fi
}
2017-02-08 01:49:20 +03:00
2018-08-03 20:27:53 +03:00
# Takes a list of `jq` expressions, each of which should evaluate to a boolean,
# and asserts that they are true.
2017-02-08 01:49:20 +03:00
vm_assert_status_jq( ) {
vm_rpmostree status --json > status.json
2019-02-26 19:04:48 +03:00
assert_jq status.json " $@ "
2017-02-08 01:49:20 +03:00
}
2017-06-29 17:06:36 +03:00
2018-06-28 17:19:46 +03:00
vm_pending_is_staged( ) {
vm_rpmostree status --json > status-staged.json
local rc = 1
if jq -e ".deployments[0][\"staged\"]" < status-staged.json; then
rc = 0
fi
rm -f status-staged.json
return $rc
}
2017-06-29 17:06:36 +03:00
# Like build_rpm, but also sends it to the VM
vm_build_rpm( ) {
2018-01-18 20:32:43 +03:00
build_rpm " $@ "
2017-10-05 22:29:57 +03:00
vm_send_test_repo
}
2018-02-14 17:27:06 +03:00
# Like uinfo_cmd, but also sends it to the VM
vm_uinfo( ) {
uinfo_cmd " $@ "
vm_send_test_repo
}
2017-10-05 22:29:57 +03:00
# Like vm_build_rpm but takes a yumrepo mode
vm_build_rpm_repo_mode( ) {
mode = $1 ; shift
2018-01-18 20:32:43 +03:00
build_rpm " $@ "
2017-10-05 22:29:57 +03:00
vm_send_test_repo $mode
2017-06-29 17:06:36 +03:00
}
2017-09-20 23:26:29 +03:00
2017-09-18 22:11:26 +03:00
vm_build_selinux_rpm( ) {
build_selinux_rpm " $@ "
2017-10-05 22:29:57 +03:00
vm_send_test_repo
2017-09-18 22:11:26 +03:00
}
2017-09-20 23:26:29 +03:00
vm_get_journal_cursor( ) {
vm_cmd journalctl -o json -n 1 | jq -r '.["__CURSOR"]'
}
2017-09-29 20:09:38 +03:00
# Wait for a message logged after $cursor matching a regexp to appear
2017-11-20 19:45:55 +03:00
# $1 - cursor
# $2 - regex to wait for
2017-09-29 20:09:38 +03:00
vm_wait_content_after_cursor( ) {
from_cursor = $1 ; shift
regex = $1 ; shift
2019-06-07 19:32:01 +03:00
vm_shell_inline <<EOF
2018-03-09 22:52:12 +03:00
tmpf = \$ ( mktemp /var/tmp/journal.XXXXXX)
for x in \$ ( seq 60) ; do
journalctl -u rpm-ostreed --after-cursor " ${ from_cursor } " > \$ { tmpf}
if grep -q -e " ${ regex } " \$ { tmpf} ; then
exit 0
else
cat \$ { tmpf}
sleep 1
fi
done
echo "timed out after 60s" 1>& 2
journalctl -u rpm-ostreed --after-cursor " ${ from_cursor } " | tail -100
exit 1
2017-09-29 20:09:38 +03:00
EOF
}
2018-07-10 18:49:41 +03:00
# Minor helper that makes sure to get quoting right
vm_get_journal_after_cursor( ) {
2017-09-20 23:26:29 +03:00
from_cursor = $1 ; shift
2018-07-10 18:49:41 +03:00
to_file = $1 ; shift
2017-09-20 23:26:29 +03:00
# add an extra helping of quotes for hungry ssh
2018-07-10 18:49:41 +03:00
vm_cmd journalctl --after-cursor " ' $from_cursor ' " > $to_file
}
vm_assert_journal_has_content( ) {
from_cursor = $1 ; shift
vm_get_journal_after_cursor $from_cursor tmp-journal.txt
2017-09-20 23:26:29 +03:00
assert_file_has_content tmp-journal.txt " $@ "
rm -f tmp-journal.txt
}
2017-10-13 16:20:10 +03:00
2019-06-07 19:32:01 +03:00
# usage: <podman args> -- <container args>
vm_run_container( ) {
local podman_args =
while [ $# -ne 0 ] ; do
local arg = $1 ; shift
if [ [ $arg = = -- ] ] ; then
break
fi
podman_args = " $podman_args $arg "
done
[ $# -ne 0 ] || fatal "No container args provided"
# just automatically always share dnf cache so we don't redownload each time
# (use -n so this ssh invocation doesn't consume stdin)
vm_cmd -n mkdir -p /var/cache/dnf
vm_cmd podman run --rm -v /var/cache/dnf:/var/cache/dnf:z $podman_args \
registry.fedoraproject.org/fedora:30 " $@ "
}
2017-10-13 16:20:10 +03:00
# $1 - service name
# $2 - dir to serve
# $3 - port to serve on
vm_start_httpd( ) {
local name = $1 ; shift
local dir = $1 ; shift
local port = $1 ; shift
2019-06-07 19:32:01 +03:00
vm_cmd podman rm -f $name || true
vm_run_container --net= host -d --name $name --privileged \
-v $dir :/srv --workdir /srv -- \
python3 -m http.server $port
2017-10-13 16:20:10 +03:00
# NB: the EXIT trap is used by libtest, but not the ERR trap
trap " vm_stop_httpd $name " ERR
set -E # inherit trap
# Ideally systemd-run would support .socket units or something
vm_cmd 'while ! curl --head http://127.0.0.1:8888 &>/dev/null; do sleep 1; done'
}
# $1 - service name
vm_stop_httpd( ) {
local name = $1 ; shift
2019-06-07 19:32:01 +03:00
vm_cmd podman rm -f $name
2017-10-13 16:20:10 +03:00
set +E
trap - ERR
}
2018-04-16 02:41:53 +03:00
2018-04-17 15:05:19 +03:00
# start up an ostree server to be used as an http remote
2018-04-16 02:41:53 +03:00
vm_ostreeupdate_prepare_repo( ) {
# Really testing this like a user requires a remote ostree server setup.
# Let's start by setting up the repo.
REMOTE_OSTREE = /ostree/repo/tmp/vmcheck-remote
vm_cmd mkdir -p $REMOTE_OSTREE
vm_cmd ostree init --repo= $REMOTE_OSTREE --mode= archive
vm_start_httpd ostree_server $REMOTE_OSTREE 8888
}
2018-04-17 15:05:19 +03:00
# this is split out for the sole purpose of making iterating easier when hacking
# (see below for more details)
_init_updated_rpmmd_repo( ) {
vm_build_rpm base-pkg-foo version 1.4 release 8 # upgraded
vm_build_rpm base-pkg-bar version 0.9 release 3 # downgraded
vm_build_rpm base-pkg-boo version 3.7 release 2.11 # added
vm_uinfo add VMCHECK-ENH enhancement
vm_uinfo add VMCHECK-SEC-NONE security none
vm_uinfo add VMCHECK-SEC-LOW security low
vm_uinfo add VMCHECK-SEC-CRIT security critical
vm_build_rpm base-pkg-enh version 2.0 uinfo VMCHECK-ENH
vm_build_rpm base-pkg-sec-none version 2.0 uinfo VMCHECK-SEC-NONE
vm_build_rpm base-pkg-sec-low version 2.0 uinfo VMCHECK-SEC-LOW
vm_build_rpm base-pkg-sec-crit version 2.0 uinfo VMCHECK-SEC-CRIT
Print CVEs fixed in available updates
One question I often have when looking at the output of `status -a`:
```
AvailableUpdate:
Version: 29.20181202.0 (2018-12-02T08:37:50Z)
Commit: dece5737a087d5c6038efdb86cb4512f867082ccfc6eb0fa97b2734c1f6d99c3
GPGSignature: Valid signature by 5A03B4DD8254ECA02FDA1637A20AA56B429476B4
SecAdvisories: FEDORA-2018-042156f164 Unknown net-snmp-libs-1:5.8-3.fc29.x86_64
FEDORA-2018-87ba0312c2 Moderate kernel-4.19.5-300.fc29.x86_64
FEDORA-2018-87ba0312c2 Moderate kernel-core-4.19.5-300.fc29.x86_64
FEDORA-2018-87ba0312c2 Moderate kernel-modules-4.19.5-300.fc29.x86_64
FEDORA-2018-87ba0312c2 Moderate kernel-modules-extra-4.19.5-300.fc29.x86_64
FEDORA-2018-f467c36c2b Moderate git-core-2.19.2-1.fc29.x86_64
Diff: 67 upgraded, 1 removed, 16 added
```
is "How serious and relevant are these advisories to me? How soon should
I reboot?". For the packages that I'm most familiar with, e.g. `kernel`
and `git-core`, I usually look up the advisory and check why it was
marked as a security update, mentioned CVEs, and how those affect me.
The updateinfo metadata includes a wealth of information that could be
useful here. In Fedora, CVEs treated by the security response team
result in RHBZs, which end up attached to the advisories and thus make
it into that metadata.
This patch tries to reduce friction in answering some of those questions
above by checking for those CVEs and printing a short description in the
output of `status -a`. Example:
```
AvailableUpdate:
Version: 29.20181202.0 (2018-12-02T08:37:50Z)
Commit: dece5737a087d5c6038efdb86cb4512f867082ccfc6eb0fa97b2734c1f6d99c3
GPGSignature: Valid signature by 5A03B4DD8254ECA02FDA1637A20AA56B429476B4
SecAdvisories: FEDORA-2018-042156f164 Unknown net-snmp-libs-1:5.8-3.fc29.x86_64
CVE-2018-18065 CVE-2018-18066 net-snmp: various flaws [fedora-all]
https://bugzilla.redhat.com/show_bug.cgi?id=1637573
FEDORA-2018-87ba0312c2 Moderate kernel-4.19.5-300.fc29.x86_64
FEDORA-2018-87ba0312c2 Moderate kernel-core-4.19.5-300.fc29.x86_64
FEDORA-2018-87ba0312c2 Moderate kernel-modules-4.19.5-300.fc29.x86_64
FEDORA-2018-87ba0312c2 Moderate kernel-modules-extra-4.19.5-300.fc29.x86_64
CVE-2018-16862 kernel: cleancache: Infoleak of deleted files after reuse of old inodes
https://bugzilla.redhat.com/show_bug.cgi?id=1649017
CVE-2018-19407 kernel: kvm: NULL pointer dereference in vcpu_scan_ioapic in arch/x86/kvm/x86.c
https://bugzilla.redhat.com/show_bug.cgi?id=1652656
FEDORA-2018-f467c36c2b Moderate git-core-2.19.2-1.fc29.x86_64
CVE-2018-19486 git: Improper handling of PATH allows for commands to executed from current directory
https://bugzilla.redhat.com/show_bug.cgi?id=1653143
Diff: 67 upgraded, 1 removed, 16 added
```
Including the CVE name and RHBZ link also makes it easier to look for
more details if desired.
Closes: #1695
Approved by: rfairley
2018-12-03 18:32:30 +03:00
vm_uinfo add-ref VMCHECK-SEC-LOW 1 http://example.com/vuln1 "CVE-12-34 vuln1"
vm_uinfo add-ref VMCHECK-SEC-LOW 2 http://example.com/vuln2 "CVE-12-34 vuln2"
vm_uinfo add-ref VMCHECK-SEC-LOW 3 http://example.com/vuln3 "CVE-56-78 CVE-90-12 vuln3"
vm_uinfo add-ref VMCHECK-SEC-LOW 4 http://example.com/vuln4 "CVE-12-JUNK CVE-JUNK vuln4"
2018-04-17 15:05:19 +03:00
}
# Start up a remote, and create two new commits (v1 and v2) which contain new
# pkgs. The 'vmcheck' ref on the remote is set at v1. You can then make a new
# update appear later using "vm_ostreeupdate_create v2".
vm_ostreeupdate_prepare( ) {
# first, let's make sure the timer is disabled so it doesn't mess up with our
# tests
vm_cmd systemctl disable --now rpm-ostreed-automatic.timer
# Prepare an OSTree repo with updates
vm_ostreeupdate_prepare_repo
# (delete ref but don't prune for easier debugging)
vm_cmd ostree refs --repo= $REMOTE_OSTREE vmcheck --delete
# now let's build some pkgs that we'll jury-rig into a base update
# this whole block can be commented out (except the init_updated_rpmmd_repo
# call) after the first run for a speed-up when iterating locally
vm_build_rpm base-pkg-foo version 1.4 release 7
vm_build_rpm base-pkg-bar
vm_build_rpm base-pkg-baz version 1.1 release 1
vm_build_rpm base-pkg-enh
vm_build_rpm base-pkg-sec-none
vm_build_rpm base-pkg-sec-low
vm_build_rpm base-pkg-sec-crit
vm_rpmostree install base-pkg-{ foo,bar,baz,enh,sec-{ none,low,crit} }
vm_ostreeupdate_lift_commit $( vm_get_pending_csum) v1
vm_rpmostree cleanup -p
# ok, we don't need those RPMs anymore since they're part of the base
rm -rf $test_tmpdir /yumrepo
# create new versions of those RPMs that we install in v2; we keep the repo
# around since that's where e.g. advisories are stored too when analyzing
# the v2 ostree update
_init_updated_rpmmd_repo
vm_rpmostree install base-pkg-{ foo,bar,boo,enh,sec-{ none,low,crit} }
vm_ostreeupdate_lift_commit $( vm_get_pending_csum) v2
vm_rpmostree cleanup -p
vm_ostreeupdate_create v1
vm_cmd ostree remote add vmcheckmote --no-gpg-verify http://localhost:8888/
2018-03-29 16:24:53 +03:00
vm_rpmostree reload
}
vm_ostreeupdate_prepare_reboot( ) {
vm_ostreeupdate_prepare
vm_rpmostree rebase vmcheckmote:vmcheck
vm_reboot
vm_rpmostree cleanup -pr
vm_assert_status_jq ".deployments[0][\"origin\"] == \"vmcheckmote:vmcheck\"" \
".deployments[0][\"booted\"]" \
".deployments[0][\"version\"] == \"v1\""
vm_rpmostree status > status.txt
2018-06-28 17:54:52 +03:00
assert_file_has_content_literal status.txt 'AutomaticUpdates: disabled'
2018-03-29 16:24:53 +03:00
# start it up again since we rebooted
vm_start_httpd ostree_server $REMOTE_OSTREE 8888
}
vm_change_update_policy( ) {
policy = $1 ; shift
2019-06-07 19:32:01 +03:00
vm_shell_inline <<EOF
2018-03-29 16:24:53 +03:00
cp /usr/etc/rpm-ostreed.conf /etc
echo -e " [Daemon]\nAutomaticUpdatePolicy= $policy " > /etc/rpm-ostreed.conf
rpm-ostree reload
EOF
2018-04-17 15:05:19 +03:00
}
2018-04-16 02:41:53 +03:00
# APIs to build up a history on the server. Rather than wasting time
# composing trees for real, we just use client package layering to create new
# trees that we then "lift" into the server before cleaning them up client-side.
2018-04-17 15:05:19 +03:00
# steal a commit from the system repo and tag it as a new version
2018-04-16 02:41:53 +03:00
vm_ostreeupdate_lift_commit( ) {
checksum = $1 ; shift
2018-04-17 15:05:19 +03:00
# ostree doesn't support tags, so just shove it in a branch
branch = vmcheck_tmp/$1 ; shift
2018-04-16 02:41:53 +03:00
vm_cmd ostree pull-local --repo= $REMOTE_OSTREE --disable-fsync \
/ostree/repo $checksum
vm_cmd ostree --repo= $REMOTE_OSTREE refs $branch --delete
vm_cmd ostree --repo= $REMOTE_OSTREE refs $checksum --create= $branch
}
2018-04-18 21:02:02 +03:00
_commit_and_inject_pkglist( ) {
2019-12-13 20:19:56 +03:00
local version = $1 ; shift
local src_ref = $1 ; shift
2018-04-17 15:05:19 +03:00
vm_cmd ostree commit --repo= $REMOTE_OSTREE -b vmcheck --fsync= no \
2018-04-18 21:02:02 +03:00
--tree= ref = $src_ref --add-metadata-string= version = $version
2019-12-13 20:19:56 +03:00
vm_rpmostree testutils inject-pkglist $REMOTE_OSTREE vmcheck
2018-04-16 02:41:53 +03:00
}
2018-04-18 21:02:02 +03:00
# use a previously stolen commit to create an update on our vmcheck branch,
# complete with version string and pkglist metadata
vm_ostreeupdate_create( ) {
version = $1 ; shift
_commit_and_inject_pkglist $version vmcheck_tmp/$version
}
# create a new no-op update with version metadata $1
vm_ostreeupdate_create_noop( ) {
version = $1 ; shift
_commit_and_inject_pkglist $version vmcheck
}
2018-09-14 00:35:38 +03:00
# takes a layered commit, and makes it into a base
vm_ostree_repo_commit_layered_as_base( ) {
2019-12-13 20:19:56 +03:00
local repo = $1 ; shift
local from_rev = $1 ; shift
local to_ref = $1 ; shift
local d = $repo /tmp/vmcheck_commit.tmp
2018-09-14 00:35:38 +03:00
rm -rf $d
vm_cmd ostree checkout --repo= $repo -H --fsync= no $from_rev $d
# need to update the base rpmdb
vm_cmd mkdir -p $d /usr/lib/sysimage/rpm-ostree-base-db
vm_cmd rsync -qa --delete $d /usr/share/rpm/ $d /usr/lib/sysimage/rpm-ostree-base-db
vm_cmd ostree commit --repo= $repo -b $to_ref --link-checkout-speedup --fsync= no --consume $d
# and inject pkglist metadata
2019-12-13 20:19:56 +03:00
vm_rpmostree testutils inject-pkglist $repo $to_ref >/dev/null
2018-09-14 00:35:38 +03:00
}
vm_ostree_commit_layered_as_base( ) {
vm_ostree_repo_commit_layered_as_base /ostree/repo " $@ "
}
2019-05-01 17:07:31 +03:00
vm_status_watch_start( ) {
rm -rf status-watch.txt
while sleep 1; do
vm_rpmostree status >> status-watch.txt
done &
_status_watch_pid = $!
# NB: the EXIT trap is used by libtest, but not the ERR trap
trap " kill $_status_watch_pid " ERR
set -E # inherit trap
}
vm_status_watch_check( ) {
[ -n " ${ _status_watch_pid :- } " ]
kill $_status_watch_pid
_status_watch_pid =
set +E
[ -f status-watch.txt ]
assert_file_has_content_literal status-watch.txt " $@ "
rm -rf status-watch.txt
}