2016-06-21 19:37:02 +03:00
# Source library for installed virtualized shell script tests
#
# Copyright (C) 2016 Jonathan Lebon <jlebon@redhat.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
2016-07-15 20:30:56 +03:00
# prepares the VM and library for action
vm_setup( ) {
2016-10-31 23:26:56 +03:00
# We assume that there's already a configured ssh-config
# file available to tell us how to connect to the VM.
if [ ! -f " ${ topsrcdir } /ssh-config " ] ; then
echo "ERROR: No ssh-config found."
exit 1
2016-07-15 20:30:56 +03:00
fi
local sshopts = " -F ${ topsrcdir } /ssh-config \
2016-11-22 06:22:51 +03:00
-o User = root \
2016-07-15 20:30:56 +03:00
-o ControlMaster = auto \
-o ControlPath = ${ topsrcdir } /ssh.sock \
-o ControlPersist = yes"
export SSH = " ssh $sshopts vmcheck "
export SCP = " scp $sshopts "
}
vm_rsync( ) {
2016-11-22 06:22:51 +03:00
if ! test -f .vagrant/using_sshfs; then
pushd ${ topsrcdir }
2017-01-09 21:10:40 +03:00
rsync -az --no-owner --no-group -e "ssh -o User=root -F ssh-config" \
2016-11-22 06:22:51 +03:00
--exclude .git/ . vmcheck:/var/roothome/sync
popd
2016-11-14 20:07:43 +03:00
fi
2016-07-15 20:30:56 +03:00
}
2016-06-22 00:35:51 +03:00
# run command in vm
# - $@ command to run
2016-06-21 19:37:02 +03:00
vm_cmd( ) {
$SSH " $@ "
}
2016-06-22 00:35:51 +03:00
2017-01-17 21:29:48 +03:00
# run rpm-ostree in vm
# - $@ args
vm_rpmostree( ) {
$SSH env ASAN_OPTIONS = detect_leaks = false rpm-ostree " $@ "
}
2016-06-22 00:35:51 +03:00
# copy files to a directory in the vm
# - $1 target directory
# - $2.. files & dirs to copy
vm_send( ) {
dir = $1 ; shift
vm_cmd mkdir -p $dir
$SCP -r " $@ " vmcheck:$dir
}
2016-07-04 18:43:02 +03:00
# copy the test repo to the vm
vm_send_test_repo( ) {
2017-01-06 17:52:57 +03:00
vm_cmd rm -rf /tmp/vmcheck
2016-07-04 18:43:02 +03:00
vm_send /tmp/vmcheck ${ commondir } /compose/yum/repo
cat > vmcheck.repo << EOF
[ test-repo]
name = test-repo
baseurl = file:///tmp/vmcheck/repo
EOF
vm_send /etc/yum.repos.d vmcheck.repo
}
2016-06-22 00:35:51 +03:00
# wait until ssh is available on the vm
2016-06-24 23:40:20 +03:00
# - $1 timeout in second (optional)
2016-12-07 23:14:54 +03:00
# - $2 previous bootid (optional)
2016-06-22 00:35:51 +03:00
vm_ssh_wait( ) {
2016-12-07 23:14:54 +03:00
timeout = ${ 1 :- 0 } ; shift
old_bootid = ${ 1 :- } ; shift
2017-01-09 21:09:43 +03:00
if ! vm_cmd true; then
echo "Failed to log into VM, retrying with debug:"
$SSH -o LogLevel = debug true || true
fi
2016-06-24 23:40:20 +03:00
while [ $timeout -gt 0 ] ; do
2016-12-07 23:14:54 +03:00
if bootid = $( vm_get_boot_id 2>/dev/null) ; then
if [ [ $bootid != $old_bootid ] ] ; then
2017-02-24 04:58:27 +03:00
# if this is a reboot, display some info about new boot
if [ -n " $old_bootid " ] ; then
vm_rpmostree status
vm_rpmostree --version
fi
2016-12-07 23:14:54 +03:00
return 0
fi
2016-06-24 23:40:20 +03:00
fi
2017-01-09 21:09:43 +03:00
if test $(( $timeout % 5 )) = = 0; then
echo " Still failed to log into VM, retrying for $timeout seconds "
fi
2016-06-24 23:40:20 +03:00
timeout = $(( timeout - 1 ))
2016-06-22 00:35:51 +03:00
sleep 1
done
2016-12-07 23:14:54 +03:00
false "Timed out while waiting for SSH."
}
vm_get_boot_id( ) {
2017-01-09 21:09:43 +03:00
vm_cmd cat /proc/sys/kernel/random/boot_id
2016-06-22 00:35:51 +03:00
}
2017-01-04 20:29:01 +03:00
# Run a command in the VM that will cause a reboot
vm_reboot_cmd( ) {
vm_cmd sync
bootid = $( vm_get_boot_id 2>/dev/null)
vm_cmd $@ || :
vm_ssh_wait 120 $bootid
}
2016-06-22 00:35:51 +03:00
# reboot the vm
vm_reboot( ) {
2017-01-04 20:29:01 +03:00
vm_reboot_cmd systemctl reboot
2016-06-22 00:35:51 +03:00
}
2017-01-06 17:52:57 +03:00
# check that the given files/dirs exist on the VM
# - $@ files/dirs to check for
2016-06-22 00:35:51 +03:00
vm_has_files( ) {
for file in " $@ " ; do
if ! vm_cmd test -e $file ; then
return 1
fi
done
}
# check that the packages are installed
# - $@ packages to check for
vm_has_packages( ) {
for pkg in " $@ " ; do
if ! vm_cmd rpm -q $pkg ; then
return 1
fi
done
}
2016-06-29 00:23:53 +03:00
# retrieve info from the booted deployment
# - $1 key to retrieve
vm_get_booted_deployment_info( ) {
key = $1
2017-01-17 21:29:48 +03:00
vm_rpmostree status --json | \
2016-06-29 00:23:53 +03:00
python -c "
2016-06-22 00:35:51 +03:00
import sys, json
2016-06-29 00:23:53 +03:00
deployments = json.load( sys.stdin) [ \" deployments\" ]
booted = None
for deployment in deployments:
if deployment[ \" booted\" ] :
booted = deployment
break
if not booted:
print \" Failed to determine currently booted deployment\"
exit( 1)
if \" $key \" in booted:
data = booted[ \" $key \" ]
if type( data) is list:
print \" \" .join( data)
else :
print data
"
}
# print the layered packages
vm_get_layered_packages( ) {
vm_get_booted_deployment_info packages
2016-06-22 00:35:51 +03:00
}
2017-02-25 00:28:47 +03:00
# print the requested packages
vm_get_requested_packages( ) {
vm_get_booted_deployment_info requested-packages
}
2016-06-22 00:35:51 +03:00
# check that the packages are currently layered
# - $@ packages to check for
vm_has_layered_packages( ) {
pkgs = $( vm_get_layered_packages)
for pkg in " $@ " ; do
if [ [ " $pkgs " != *$pkg * ] ] ; then
return 1
fi
done
}
2016-06-29 00:23:53 +03:00
2017-02-25 00:28:47 +03:00
# check that the packages are currently requested
# - $@ packages to check for
vm_has_requested_packages( ) {
pkgs = $( vm_get_requested_packages)
for pkg in " $@ " ; do
if [ [ " $pkgs " != *$pkg * ] ] ; then
return 1
fi
done
}
vm_has_dormant_packages( ) {
vm_has_requested_packages " $@ " && \
! vm_has_layered_packages " $@ "
}
2016-06-29 00:23:53 +03:00
# retrieve the checksum of the currently booted deployment
vm_get_booted_csum( ) {
vm_get_booted_deployment_info checksum
}
2016-07-04 18:43:02 +03:00
# make multiple consistency checks on a test pkg
# - $1 package to check for
# - $2 either "present" or "absent"
vm_assert_layered_pkg( ) {
pkg = $1 ; shift
policy = $1 ; shift
set +e
vm_has_packages $pkg ; pkg_in_rpmdb = $?
vm_has_layered_packages $pkg ; pkg_is_layered = $?
2017-02-25 00:28:47 +03:00
vm_has_requested_packages $pkg ; pkg_is_requested = $?
[ $pkg_in_rpmdb = = 0 ] && \
[ $pkg_is_layered = = 0 ] && \
[ $pkg_is_requested = = 0 ] ; pkg_present = $?
[ $pkg_in_rpmdb != 0 ] && \
[ $pkg_is_layered != 0 ] && \
[ $pkg_is_requested != 0 ] ; pkg_absent = $?
2016-07-04 18:43:02 +03:00
set -e
if [ $policy = = present ] && [ $pkg_present != 0 ] ; then
daemon/upgrader: Rework layer tracking
Working on initramfs, I hit a subtle issue with the fact that
I was trying to "redeploy", but with the origin file changed
during the process.
Previously, it was a bit unclear which parts of the upgrader logic are operating
on the *new* origin versus the "original origin".
The package layering code in the upgrader explicitly carries a delta on top in
the "add/remove" hash sets, which means it isn't visible to
`rpmostree_origin_is_locally_assembled()`.
Whereas for initramfs, I set a new origin. This broke things since we were
expecting to find a parent commit, but the original origin wasn't locally
assembled.
When looking more at this, I realized there's a far simpler model -
rather than keeping track of commit + origin, and using the origin
to try to determine whether or not the commit is layered, we can
keep track of `base_revision` and `final_revision`, and the latter
is only set if we're doing layering.
The diff speaks for itself here - a lot of fragile logic looking at the origin
drops away.
The next step here is probably to drop away the package layering hash sets, but
I'm trying to not change everything at once.
Closes: #579
Approved by: jlebon
2017-01-17 20:25:28 +03:00
vm_cmd rpm-ostree status
2016-07-04 18:43:02 +03:00
assert_not_reached " pkg $pkg is not present "
fi
if [ $policy = = absent ] && [ $pkg_absent != 0 ] ; then
daemon/upgrader: Rework layer tracking
Working on initramfs, I hit a subtle issue with the fact that
I was trying to "redeploy", but with the origin file changed
during the process.
Previously, it was a bit unclear which parts of the upgrader logic are operating
on the *new* origin versus the "original origin".
The package layering code in the upgrader explicitly carries a delta on top in
the "add/remove" hash sets, which means it isn't visible to
`rpmostree_origin_is_locally_assembled()`.
Whereas for initramfs, I set a new origin. This broke things since we were
expecting to find a parent commit, but the original origin wasn't locally
assembled.
When looking more at this, I realized there's a far simpler model -
rather than keeping track of commit + origin, and using the origin
to try to determine whether or not the commit is layered, we can
keep track of `base_revision` and `final_revision`, and the latter
is only set if we're doing layering.
The diff speaks for itself here - a lot of fragile logic looking at the origin
drops away.
The next step here is probably to drop away the package layering hash sets, but
I'm trying to not change everything at once.
Closes: #579
Approved by: jlebon
2017-01-17 20:25:28 +03:00
vm_cmd rpm-ostree status
2016-07-04 18:43:02 +03:00
assert_not_reached " pkg $pkg is not absent "
fi
}
2017-02-08 01:49:20 +03:00
vm_assert_status_jq( ) {
vm_rpmostree status --json > status.json
assert_status_file_jq status.json " $@ "
}