2016-04-01 14:51:18 +03:00
# Makefile for boot module
2013-06-03 22:12:40 +04:00
#
# Copyright (C) 2013 Colin Walters <walters@verbum.org>
#
2018-01-30 22:26:26 +03:00
# SPDX-License-Identifier: LGPL-2.0+
#
2013-06-03 22:12:40 +04:00
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
2021-12-07 04:20:55 +03:00
# License along with this library. If not, see <https://www.gnu.org/licenses/>.
2013-06-03 22:12:40 +04:00
if BUILDOPT_DRACUT
# Not using $(libdir) here is intentional, dracut modules go in prefix/lib
dracutmoddir = $(prefix)/lib/dracut/modules.d/98ostree
2013-10-23 00:04:32 +04:00
dracutmod_SCRIPTS = src/boot/dracut/module-setup.sh
2016-04-28 18:34:27 +03:00
endif
if BUILDOPT_DRACUT_CONF
2013-06-03 22:12:40 +04:00
dracutconfdir = $(sysconfdir)/dracut.conf.d
2013-10-23 00:04:32 +04:00
dracutconf_DATA = src/boot/dracut/ostree.conf
endif
if BUILDOPT_MKINITCPIO
mkinitcpioinstalldir = $(prefix)/lib/initcpio/install
mkinitcpioinstall_SCRIPTS = src/boot/mkinitcpio/ostree
mkinitcpioconfdir = $(sysconfdir)
mkinitcpioconf_DATA = src/boot/mkinitcpio/ostree-mkinitcpio.conf
endif
if BUILDOPT_SYSTEMD
systemdsystemunit_DATA = src/boot/ostree-prepare-root.service \
2018-09-28 21:30:38 +03:00
src/boot/ostree-remount.service \
Add an `ostree-boot-complete.service` to propagate staging failures
Quite a while ago we added staged deployments, which solved
a bunch of issues around the `/etc` merge. However...a persistent
problem since then is that any failures in that process that
happened in the *previous* boot are not very visible.
We ship custom code in `rpm-ostree status` to query the previous
journal. But that has a few problems - one is that on systems
that have been up a while, that failure message may even get
rotated out. And second, some systems may not even have a persistent
journal at all.
A general thing we do in e.g. Fedora CoreOS testing is to check
for systemd unit failures. We do that both in our automated tests,
and we even ship code that displays them on ssh logins. And beyond
that obviously a lot of other projects do the same; it's easy via
`systemctl --failed`.
So to make failures more visible, change our `ostree-finalize-staged.service`
to have an internal wrapper around the process that "catches" any
errors, and copies the error message into a file in `/boot/ostree`.
Then, a new `ostree-boot-complete.service` looks for this file on
startup and re-emits the error message, and fails.
It also deletes the file. The rationale is to avoid *continually*
warning. For example we need to handle the case when an upgrade
process creates a new staged deployment. Now, we could change the
ostree core code to delete the warning file when that happens instead,
but this is trying to be a conservative change.
This should make failures here much more visible as is.
2022-04-23 01:46:28 +03:00
src/boot/ostree-boot-complete.service \
2018-09-28 21:30:38 +03:00
src/boot/ostree-finalize-staged.service \
src/boot/ostree-finalize-staged.path \
2022-02-17 01:58:58 +03:00
src/boot/ostree-finalize-staged-hold.service \
2018-09-28 21:30:38 +03:00
$(NULL)
2017-08-17 21:00:08 +03:00
systemdtmpfilesdir = $(prefix)/lib/tmpfiles.d
2017-08-22 20:33:57 +03:00
dist_systemdtmpfiles_DATA = src/boot/ostree-tmpfiles.conf
2016-06-27 23:06:23 +03:00
# Allow the distcheck install under $prefix test to pass
AM_DISTCHECK_CONFIGURE_FLAGS += --with-systemdsystemunitdir='$${libdir}/systemd/system'
2013-06-03 22:12:40 +04:00
endif
2016-04-01 14:51:18 +03:00
if !BUILDOPT_BUILTIN_GRUB2_MKCONFIG
# We're using the system grub2-mkconfig generator
2016-04-08 17:28:17 +03:00
pkglibexec_SCRIPTS += src/boot/grub2/grub2-15_ostree
2014-10-11 16:59:06 +04:00
install-grub2-config-hook:
mkdir -p $(DESTDIR)$(grub2configdir)
2016-04-08 17:28:17 +03:00
ln -sf $(pkglibexecdir)/grub2-15_ostree $(DESTDIR)$(grub2configdir)/15_ostree
2014-10-11 16:59:06 +04:00
grub2configdir = $(sysconfdir)/grub.d
INSTALL_DATA_HOOKS += install-grub2-config-hook
2016-04-01 14:51:18 +03:00
else
# We're using our internal generator
2019-11-06 03:16:07 +03:00
ostree_boot_SCRIPTS += src/boot/grub2/ostree-grub-generator
2014-10-11 16:59:06 +04:00
endif
2013-10-23 00:04:32 +04:00
EXTRA_DIST += src/boot/dracut/module-setup.sh \
src/boot/dracut/ostree.conf \
2021-04-06 00:00:07 +03:00
src/boot/mkinitcpio \
Add an `ostree-boot-complete.service` to propagate staging failures
Quite a while ago we added staged deployments, which solved
a bunch of issues around the `/etc` merge. However...a persistent
problem since then is that any failures in that process that
happened in the *previous* boot are not very visible.
We ship custom code in `rpm-ostree status` to query the previous
journal. But that has a few problems - one is that on systems
that have been up a while, that failure message may even get
rotated out. And second, some systems may not even have a persistent
journal at all.
A general thing we do in e.g. Fedora CoreOS testing is to check
for systemd unit failures. We do that both in our automated tests,
and we even ship code that displays them on ssh logins. And beyond
that obviously a lot of other projects do the same; it's easy via
`systemctl --failed`.
So to make failures more visible, change our `ostree-finalize-staged.service`
to have an internal wrapper around the process that "catches" any
errors, and copies the error message into a file in `/boot/ostree`.
Then, a new `ostree-boot-complete.service` looks for this file on
startup and re-emits the error message, and fails.
It also deletes the file. The rationale is to avoid *continually*
warning. For example we need to handle the case when an upgrade
process creates a new staged deployment. Now, we could change the
ostree core code to delete the warning file when that happens instead,
but this is trying to be a conservative change.
This should make failures here much more visible as is.
2022-04-23 01:46:28 +03:00
src/boot/ostree-boot-complete.service \
2013-10-23 00:04:32 +04:00
src/boot/ostree-prepare-root.service \
2018-10-25 18:15:04 +03:00
src/boot/ostree-finalize-staged.path \
2013-10-23 00:04:32 +04:00
src/boot/ostree-remount.service \
2018-02-22 23:27:59 +03:00
src/boot/ostree-finalize-staged.service \
2022-02-17 01:58:58 +03:00
src/boot/ostree-finalize-staged-hold.service \
2016-04-01 14:51:18 +03:00
src/boot/grub2/grub2-15_ostree \
src/boot/grub2/ostree-grub-generator \
2013-07-08 05:35:15 +04:00
$(NULL)