2016-06-21 19:37:02 +03:00
# Source library for installed virtualized shell script tests
#
# Copyright (C) 2016 Jonathan Lebon <jlebon@redhat.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
2016-07-15 20:30:56 +03:00
# prepares the VM and library for action
vm_setup( ) {
2017-03-11 01:43:23 +03:00
export VM = ${ VM :- vmcheck }
local sshopts = " -o User=root \
2016-07-15 20:30:56 +03:00
-o ControlMaster = auto \
2017-08-24 21:43:14 +03:00
-o ControlPath = /var/tmp/ssh-$VM -$( date +%s%N) .sock \
2016-07-15 20:30:56 +03:00
-o ControlPersist = yes"
2017-03-11 01:43:23 +03:00
# If we're provided with an ssh-config, make sure we tell
# ssh to pick it up.
if [ -f " ${ topsrcdir } /ssh-config " ] ; then
sshopts = " $sshopts -F ${ topsrcdir } /ssh-config "
fi
export SSH = " ssh $sshopts $VM "
2016-07-15 20:30:56 +03:00
export SCP = " scp $sshopts "
}
2017-10-05 22:29:57 +03:00
# rsync wrapper that sets up authentication
vm_raw_rsync( ) {
local rsyncopts = "ssh -o User=root"
if [ -f ${ topsrcdir } /ssh-config ] ; then
rsyncopts = " $rsyncopts -F ' ${ topsrcdir } /ssh-config' "
fi
rsync -az --no-owner --no-group -e " $rsyncopts " " $@ "
}
2016-07-15 20:30:56 +03:00
vm_rsync( ) {
2016-11-22 06:22:51 +03:00
if ! test -f .vagrant/using_sshfs; then
pushd ${ topsrcdir }
2017-12-15 20:01:46 +03:00
vm_raw_rsync --delete --exclude .git/ . $VM :/var/roothome/sync
2016-11-22 06:22:51 +03:00
popd
2016-11-14 20:07:43 +03:00
fi
2016-07-15 20:30:56 +03:00
}
2017-07-27 17:16:23 +03:00
# run command in vm as user
# - $1 username
# - $@ command to run
vm_cmd_as( ) {
local user = $1 ; shift
# don't reuse root's ControlPath
local sshopts = " -o User= $user "
if [ -f " ${ topsrcdir } /ssh-config " ] ; then
sshopts = " $sshopts -F ${ topsrcdir } /ssh-config "
fi
ssh $sshopts $VM " $@ "
}
2016-06-22 00:35:51 +03:00
# run command in vm
# - $@ command to run
2016-06-21 19:37:02 +03:00
vm_cmd( ) {
$SSH " $@ "
}
2016-06-22 00:35:51 +03:00
Introduce `ex livefs`
There are a few different use cases here. First, for layering new packages,
there's no good reason for us to force a reboot. Second, we want some support
for cherry-picking security updates and allowing admins to restart services. Finally,
at some point we should offer support for entirely replacing the running tree
if that's what the user wants.
Until now we've been very conservative, but there's a spectrum here. In
particular, this patch changes things so we push a rollback before we start
doing anything live. I think in practice, many use cases would be totally fine
with doing most changes live, and falling back to the rollback if something went
wrong.
This initial code drop *only* supports live layering of new packages. However,
a lot of the base infrastructure is laid for future work.
For now, this will be classified as an experimental feature, hence `ex livefs`.
Part of: https://github.com/projectatomic/rpm-ostree/issues/639
Closes: #652
Approved by: jlebon
2017-03-01 01:16:48 +03:00
# Copy argument (usually shell script) to VM, execute it there
vm_cmdfile( ) {
2017-07-27 17:16:23 +03:00
local bin = $1
Introduce `ex livefs`
There are a few different use cases here. First, for layering new packages,
there's no good reason for us to force a reboot. Second, we want some support
for cherry-picking security updates and allowing admins to restart services. Finally,
at some point we should offer support for entirely replacing the running tree
if that's what the user wants.
Until now we've been very conservative, but there's a spectrum here. In
particular, this patch changes things so we push a rollback before we start
doing anything live. I think in practice, many use cases would be totally fine
with doing most changes live, and falling back to the rollback if something went
wrong.
This initial code drop *only* supports live layering of new packages. However,
a lot of the base infrastructure is laid for future work.
For now, this will be classified as an experimental feature, hence `ex livefs`.
Part of: https://github.com/projectatomic/rpm-ostree/issues/639
Closes: #652
Approved by: jlebon
2017-03-01 01:16:48 +03:00
chmod a+x ${ bin }
2017-07-27 17:16:23 +03:00
local bn = $( basename ${ bin } )
Introduce `ex livefs`
There are a few different use cases here. First, for layering new packages,
there's no good reason for us to force a reboot. Second, we want some support
for cherry-picking security updates and allowing admins to restart services. Finally,
at some point we should offer support for entirely replacing the running tree
if that's what the user wants.
Until now we've been very conservative, but there's a spectrum here. In
particular, this patch changes things so we push a rollback before we start
doing anything live. I think in practice, many use cases would be totally fine
with doing most changes live, and falling back to the rollback if something went
wrong.
This initial code drop *only* supports live layering of new packages. However,
a lot of the base infrastructure is laid for future work.
For now, this will be classified as an experimental feature, hence `ex livefs`.
Part of: https://github.com/projectatomic/rpm-ostree/issues/639
Closes: #652
Approved by: jlebon
2017-03-01 01:16:48 +03:00
$SCP $1 $VM :/root/${ bn }
$SSH /root/${ bn }
}
2017-02-02 23:41:29 +03:00
# Delete anything which we might change between runs
vm_clean_caches( ) {
2017-11-20 19:45:55 +03:00
vm_cmd rm /ostree/repo/refs/heads/rpmostree/pkg/* -rf
2017-02-02 23:41:29 +03:00
}
2017-01-17 21:29:48 +03:00
# run rpm-ostree in vm
# - $@ args
vm_rpmostree( ) {
2017-07-27 17:16:23 +03:00
vm_cmd env ASAN_OPTIONS = detect_leaks = false rpm-ostree " $@ "
2017-01-17 21:29:48 +03:00
}
2016-06-22 00:35:51 +03:00
# copy files to a directory in the vm
# - $1 target directory
# - $2.. files & dirs to copy
vm_send( ) {
2017-07-27 17:16:23 +03:00
local dir = $1 ; shift
2016-06-22 00:35:51 +03:00
vm_cmd mkdir -p $dir
2017-03-11 01:43:23 +03:00
$SCP -r " $@ " $VM :$dir
2016-06-22 00:35:51 +03:00
}
2016-07-04 18:43:02 +03:00
# copy the test repo to the vm
2017-10-05 22:29:57 +03:00
# $1 - repo file mode: nogpgcheck (default), gpgcheck, skip (don't send)
2016-07-04 18:43:02 +03:00
vm_send_test_repo( ) {
2017-10-05 22:29:57 +03:00
mode = ${ 1 :- nogpgcheck }
2017-10-05 22:33:40 +03:00
# note we use -c here because we might be called twice within a second
vm_raw_rsync -c --delete ${ test_tmpdir } /yumrepo $VM :/tmp/vmcheck
2017-10-05 22:29:57 +03:00
if [ [ $mode = = skip ] ] ; then
return
fi
2016-07-04 18:43:02 +03:00
cat > vmcheck.repo << EOF
[ test-repo]
name = test-repo
2017-06-29 17:06:36 +03:00
baseurl = file:///tmp/vmcheck/yumrepo
2016-07-04 18:43:02 +03:00
EOF
2017-10-05 22:29:57 +03:00
if [ [ $mode = = gpgcheck ] ] ; then
2017-02-02 23:41:29 +03:00
cat >> vmcheck.repo <<EOF
gpgcheck = 1
gpgkey = file:///etc/pki/rpm-gpg/RPM-GPG-KEY-fedora-25-primary
EOF
else
2017-10-05 22:29:57 +03:00
assert_streq " $mode " nogpgcheck
2017-02-02 23:41:29 +03:00
echo "Enabling vmcheck.repo without GPG"
echo 'gpgcheck=0' >> vmcheck.repo
fi
2016-07-04 18:43:02 +03:00
vm_send /etc/yum.repos.d vmcheck.repo
}
2016-06-22 00:35:51 +03:00
# wait until ssh is available on the vm
2016-06-24 23:40:20 +03:00
# - $1 timeout in second (optional)
2016-12-07 23:14:54 +03:00
# - $2 previous bootid (optional)
2016-06-22 00:35:51 +03:00
vm_ssh_wait( ) {
2017-07-27 17:16:23 +03:00
local timeout = ${ 1 :- 0 } ; shift
local old_bootid = ${ 1 :- } ; shift
2017-01-09 21:09:43 +03:00
if ! vm_cmd true; then
echo "Failed to log into VM, retrying with debug:"
$SSH -o LogLevel = debug true || true
fi
2016-06-24 23:40:20 +03:00
while [ $timeout -gt 0 ] ; do
2016-12-07 23:14:54 +03:00
if bootid = $( vm_get_boot_id 2>/dev/null) ; then
if [ [ $bootid != $old_bootid ] ] ; then
2017-02-24 04:58:27 +03:00
# if this is a reboot, display some info about new boot
if [ -n " $old_bootid " ] ; then
vm_rpmostree status
vm_rpmostree --version
fi
2016-12-07 23:14:54 +03:00
return 0
fi
2016-06-24 23:40:20 +03:00
fi
2017-01-09 21:09:43 +03:00
if test $(( $timeout % 5 )) = = 0; then
echo " Still failed to log into VM, retrying for $timeout seconds "
fi
2016-06-24 23:40:20 +03:00
timeout = $(( timeout - 1 ))
2016-06-22 00:35:51 +03:00
sleep 1
done
2016-12-07 23:14:54 +03:00
false "Timed out while waiting for SSH."
}
vm_get_boot_id( ) {
2017-01-09 21:09:43 +03:00
vm_cmd cat /proc/sys/kernel/random/boot_id
2016-06-22 00:35:51 +03:00
}
2017-01-04 20:29:01 +03:00
# Run a command in the VM that will cause a reboot
vm_reboot_cmd( ) {
vm_cmd sync
2017-07-27 17:16:23 +03:00
local bootid = $( vm_get_boot_id 2>/dev/null)
2017-01-04 20:29:01 +03:00
vm_cmd $@ || :
vm_ssh_wait 120 $bootid
}
2016-06-22 00:35:51 +03:00
# reboot the vm
vm_reboot( ) {
2017-01-04 20:29:01 +03:00
vm_reboot_cmd systemctl reboot
2016-06-22 00:35:51 +03:00
}
2017-01-06 17:52:57 +03:00
# check that the given files/dirs exist on the VM
# - $@ files/dirs to check for
2016-06-22 00:35:51 +03:00
vm_has_files( ) {
for file in " $@ " ; do
if ! vm_cmd test -e $file ; then
return 1
fi
done
}
# check that the packages are installed
# - $@ packages to check for
vm_has_packages( ) {
for pkg in " $@ " ; do
if ! vm_cmd rpm -q $pkg ; then
return 1
fi
done
}
2017-03-07 20:08:44 +03:00
# retrieve info from a deployment
# - $1 index of deployment (or -1 for booted)
# - $2 key to retrieve
vm_get_deployment_info( ) {
2017-07-27 17:16:23 +03:00
local idx = $1
local key = $2
2017-01-17 21:29:48 +03:00
vm_rpmostree status --json | \
2016-06-29 00:23:53 +03:00
python -c "
2016-06-22 00:35:51 +03:00
import sys, json
2016-06-29 00:23:53 +03:00
deployments = json.load( sys.stdin) [ \" deployments\" ]
2017-03-07 20:08:44 +03:00
idx = $idx
if idx < 0:
for i, depl in enumerate( deployments) :
if depl[ \" booted\" ] :
idx = i
if idx < 0:
2016-06-29 00:23:53 +03:00
print \" Failed to determine currently booted deployment\"
exit( 1)
2017-03-07 20:08:44 +03:00
if idx >= len( deployments) :
print \" Deployment index $idx is out of range\"
exit( 1)
depl = deployments[ idx]
if \" $key \" in depl:
data = depl[ \" $key \" ]
2016-06-29 00:23:53 +03:00
if type( data) is list:
print \" \" .join( data)
else :
print data
"
}
2017-07-04 20:50:52 +03:00
# retrieve the deployment root
# - $1 index of deployment
vm_get_deployment_root( ) {
2017-07-27 17:16:23 +03:00
local idx = $1
local csum = $( vm_get_deployment_info $idx checksum)
local serial = $( vm_get_deployment_info $idx serial)
local osname = $( vm_get_deployment_info $idx osname)
2017-07-04 20:50:52 +03:00
echo /ostree/deploy/$osname /deploy/$csum .$serial
}
2017-03-07 20:08:44 +03:00
# retrieve info from the booted deployment
# - $1 key to retrieve
vm_get_booted_deployment_info( ) {
vm_get_deployment_info -1 $1
}
2016-06-29 00:23:53 +03:00
# print the layered packages
vm_get_layered_packages( ) {
vm_get_booted_deployment_info packages
2016-06-22 00:35:51 +03:00
}
2017-02-25 00:28:47 +03:00
# print the requested packages
vm_get_requested_packages( ) {
vm_get_booted_deployment_info requested-packages
}
2017-03-07 20:08:44 +03:00
vm_get_local_packages( ) {
vm_get_booted_deployment_info requested-local-packages
}
2016-06-22 00:35:51 +03:00
# check that the packages are currently layered
# - $@ packages to check for
vm_has_layered_packages( ) {
2017-07-27 17:16:23 +03:00
local pkgs = $( vm_get_layered_packages)
2016-06-22 00:35:51 +03:00
for pkg in " $@ " ; do
if [ [ " $pkgs " != *$pkg * ] ] ; then
return 1
fi
done
}
2016-06-29 00:23:53 +03:00
2017-02-25 00:28:47 +03:00
# check that the packages are currently requested
# - $@ packages to check for
vm_has_requested_packages( ) {
2017-07-27 17:16:23 +03:00
local pkgs = $( vm_get_requested_packages)
2017-02-25 00:28:47 +03:00
for pkg in " $@ " ; do
if [ [ " $pkgs " != *$pkg * ] ] ; then
return 1
fi
done
}
2017-03-07 20:08:44 +03:00
vm_has_local_packages( ) {
2017-07-27 17:16:23 +03:00
local pkgs = $( vm_get_local_packages)
2017-03-07 20:08:44 +03:00
for pkg in " $@ " ; do
if [ [ " $pkgs " != *$pkg * ] ] ; then
return 1
fi
done
}
2017-02-25 00:28:47 +03:00
vm_has_dormant_packages( ) {
vm_has_requested_packages " $@ " && \
! vm_has_layered_packages " $@ "
}
2016-06-29 00:23:53 +03:00
# retrieve the checksum of the currently booted deployment
vm_get_booted_csum( ) {
vm_get_booted_deployment_info checksum
}
2016-07-04 18:43:02 +03:00
2017-12-21 01:46:04 +03:00
# retrieve the checksum of the pending deployment
vm_get_pending_csum( ) {
vm_get_deployment_info 0 checksum
}
2016-07-04 18:43:02 +03:00
# make multiple consistency checks on a test pkg
# - $1 package to check for
# - $2 either "present" or "absent"
vm_assert_layered_pkg( ) {
2017-07-27 17:16:23 +03:00
local pkg = $1 ; shift
local policy = $1 ; shift
2016-07-04 18:43:02 +03:00
set +e
vm_has_packages $pkg ; pkg_in_rpmdb = $?
vm_has_layered_packages $pkg ; pkg_is_layered = $?
2017-03-07 20:08:44 +03:00
vm_has_local_packages $pkg ; pkg_is_layered_local = $?
2017-02-25 00:28:47 +03:00
vm_has_requested_packages $pkg ; pkg_is_requested = $?
[ $pkg_in_rpmdb = = 0 ] && \
2017-03-07 20:08:44 +03:00
( ( [ $pkg_is_layered = = 0 ] &&
[ $pkg_is_requested = = 0 ] ) ||
[ $pkg_is_layered_local = = 0 ] ) ; pkg_present = $?
2017-02-25 00:28:47 +03:00
[ $pkg_in_rpmdb != 0 ] && \
[ $pkg_is_layered != 0 ] && \
2017-03-07 20:08:44 +03:00
[ $pkg_is_layered_local != 0 ] && \
2017-02-25 00:28:47 +03:00
[ $pkg_is_requested != 0 ] ; pkg_absent = $?
2016-07-04 18:43:02 +03:00
set -e
if [ $policy = = present ] && [ $pkg_present != 0 ] ; then
daemon/upgrader: Rework layer tracking
Working on initramfs, I hit a subtle issue with the fact that
I was trying to "redeploy", but with the origin file changed
during the process.
Previously, it was a bit unclear which parts of the upgrader logic are operating
on the *new* origin versus the "original origin".
The package layering code in the upgrader explicitly carries a delta on top in
the "add/remove" hash sets, which means it isn't visible to
`rpmostree_origin_is_locally_assembled()`.
Whereas for initramfs, I set a new origin. This broke things since we were
expecting to find a parent commit, but the original origin wasn't locally
assembled.
When looking more at this, I realized there's a far simpler model -
rather than keeping track of commit + origin, and using the origin
to try to determine whether or not the commit is layered, we can
keep track of `base_revision` and `final_revision`, and the latter
is only set if we're doing layering.
The diff speaks for itself here - a lot of fragile logic looking at the origin
drops away.
The next step here is probably to drop away the package layering hash sets, but
I'm trying to not change everything at once.
Closes: #579
Approved by: jlebon
2017-01-17 20:25:28 +03:00
vm_cmd rpm-ostree status
2016-07-04 18:43:02 +03:00
assert_not_reached " pkg $pkg is not present "
fi
if [ $policy = = absent ] && [ $pkg_absent != 0 ] ; then
daemon/upgrader: Rework layer tracking
Working on initramfs, I hit a subtle issue with the fact that
I was trying to "redeploy", but with the origin file changed
during the process.
Previously, it was a bit unclear which parts of the upgrader logic are operating
on the *new* origin versus the "original origin".
The package layering code in the upgrader explicitly carries a delta on top in
the "add/remove" hash sets, which means it isn't visible to
`rpmostree_origin_is_locally_assembled()`.
Whereas for initramfs, I set a new origin. This broke things since we were
expecting to find a parent commit, but the original origin wasn't locally
assembled.
When looking more at this, I realized there's a far simpler model -
rather than keeping track of commit + origin, and using the origin
to try to determine whether or not the commit is layered, we can
keep track of `base_revision` and `final_revision`, and the latter
is only set if we're doing layering.
The diff speaks for itself here - a lot of fragile logic looking at the origin
drops away.
The next step here is probably to drop away the package layering hash sets, but
I'm trying to not change everything at once.
Closes: #579
Approved by: jlebon
2017-01-17 20:25:28 +03:00
vm_cmd rpm-ostree status
2016-07-04 18:43:02 +03:00
assert_not_reached " pkg $pkg is not absent "
fi
}
2017-02-08 01:49:20 +03:00
vm_assert_status_jq( ) {
vm_rpmostree status --json > status.json
assert_status_file_jq status.json " $@ "
}
2017-06-29 17:06:36 +03:00
# Like build_rpm, but also sends it to the VM
vm_build_rpm( ) {
2018-01-18 20:32:43 +03:00
build_rpm " $@ "
2017-10-05 22:29:57 +03:00
vm_send_test_repo
}
2018-02-14 17:27:06 +03:00
# Like uinfo_cmd, but also sends it to the VM
vm_uinfo( ) {
uinfo_cmd " $@ "
vm_send_test_repo
}
2017-10-05 22:29:57 +03:00
# Like vm_build_rpm but takes a yumrepo mode
vm_build_rpm_repo_mode( ) {
mode = $1 ; shift
2018-01-18 20:32:43 +03:00
build_rpm " $@ "
2017-10-05 22:29:57 +03:00
vm_send_test_repo $mode
2017-06-29 17:06:36 +03:00
}
2017-09-20 23:26:29 +03:00
2017-09-18 22:11:26 +03:00
vm_build_selinux_rpm( ) {
build_selinux_rpm " $@ "
2017-10-05 22:29:57 +03:00
vm_send_test_repo
2017-09-18 22:11:26 +03:00
}
2017-09-20 23:26:29 +03:00
vm_get_journal_cursor( ) {
vm_cmd journalctl -o json -n 1 | jq -r '.["__CURSOR"]'
}
2017-09-29 20:09:38 +03:00
# Wait for a message logged after $cursor matching a regexp to appear
2017-11-20 19:45:55 +03:00
# $1 - cursor
# $2 - regex to wait for
2017-09-29 20:09:38 +03:00
vm_wait_content_after_cursor( ) {
from_cursor = $1 ; shift
regex = $1 ; shift
cat > wait.sh <<EOF
#!/usr/bin/bash
set -xeuo pipefail
tmpf = \$ ( mktemp /var/tmp/journal.XXXXXX)
for x in \$ ( seq 60) ; do
journalctl -u rpm-ostreed --after-cursor " ${ from_cursor } " > \$ { tmpf}
if grep -q -e " ${ regex } " \$ { tmpf} ; then
exit 0
else
cat \$ { tmpf}
sleep 1
fi
done
echo "timed out after 60s" 1>& 2
journalctl -u rpm-ostreed --after-cursor " ${ from_cursor } " | tail -100
exit 1
EOF
vm_cmdfile wait.sh
}
2017-09-20 23:26:29 +03:00
vm_assert_journal_has_content( ) {
from_cursor = $1 ; shift
# add an extra helping of quotes for hungry ssh
vm_cmd journalctl --after-cursor " ' $from_cursor ' " > tmp-journal.txt
assert_file_has_content tmp-journal.txt " $@ "
rm -f tmp-journal.txt
}
2017-10-13 16:20:10 +03:00
# $1 - service name
# $2 - dir to serve
# $3 - port to serve on
vm_start_httpd( ) {
local name = $1 ; shift
local dir = $1 ; shift
local port = $1 ; shift
2018-03-01 20:31:13 +03:00
# just nuke the service of the same name if it exists and is also transient
if vm_cmd systemctl show $name | grep -q UnitFileState = transient; then
vm_cmd systemctl stop $name
fi
2017-10-13 16:20:10 +03:00
# CentOS systemd is too old for -p WorkingDirectory
vm_cmd systemd-run --unit $name sh -c \
" 'cd $dir && python -m SimpleHTTPServer $port ' "
# NB: the EXIT trap is used by libtest, but not the ERR trap
trap " vm_stop_httpd $name " ERR
set -E # inherit trap
# Ideally systemd-run would support .socket units or something
vm_cmd 'while ! curl --head http://127.0.0.1:8888 &>/dev/null; do sleep 1; done'
}
# $1 - service name
vm_stop_httpd( ) {
local name = $1 ; shift
vm_cmd systemctl stop $name
set +E
trap - ERR
}