2021-03-05 12:36:04 +03:00
#!/usr/bin/env bash
2021-10-17 19:13:06 +03:00
# SPDX-License-Identifier: LGPL-2.1-or-later
2021-04-09 20:39:41 +03:00
set -eux
2018-12-13 19:22:01 +03:00
set -o pipefail
2023-10-19 13:28:37 +03:00
# shellcheck source=test/units/util.sh
. " $( dirname " $0 " ) " /util.sh
2021-03-05 12:36:04 +03:00
systemd-analyze log-level debug
2018-12-13 19:22:01 +03:00
2023-01-21 02:00:38 +03:00
# Ensure that the init.scope.d drop-in is applied on boot
test " $( cat /sys/fs/cgroup/init.scope/memory.high) " != "max"
2021-03-05 12:36:04 +03:00
# Loose checks to ensure the environment has the necessary features for systemd-oomd
2021-04-08 01:09:55 +03:00
[ [ -e /proc/pressure ] ] || echo "no PSI" >>/skipped
2024-02-09 20:53:19 +03:00
[ [ " $( get_cgroup_hierarchy) " = = "unified" ] ] || echo "no cgroupsv2" >>/skipped
[ [ -x /usr/lib/systemd/systemd-oomd ] ] || echo "no oomd" >>/skipped
if [ [ -s /skipped ] ] ; then
2024-04-02 21:37:30 +03:00
exit 77
2021-04-09 20:49:32 +03:00
fi
2018-12-13 19:22:01 +03:00
2023-06-17 02:01:24 +03:00
rm -rf /run/systemd/system/testsuite-55-testbloat.service.d
2018-12-13 19:22:01 +03:00
2023-06-24 01:19:31 +03:00
# Activate swap file if we are in a VM
if systemd-detect-virt --vm --quiet; then
2024-02-14 13:48:56 +03:00
if [ [ " $( findmnt -n -o FSTYPE /) " = = btrfs ] ] ; then
btrfs filesystem mkswapfile -s 64M /swapfile
else
dd if = /dev/zero of = /swapfile bs = 1M count = 64
chmod 0600 /swapfile
mkswap /swapfile
fi
2023-06-24 01:19:31 +03:00
swapon /swapfile
swapon --show
fi
2022-01-06 23:37:21 +03:00
# Configure oomd explicitly to avoid conflicts with distro dropins
2023-06-17 02:01:24 +03:00
mkdir -p /run/systemd/oomd.conf.d/
2023-06-17 02:06:38 +03:00
cat >/run/systemd/oomd.conf.d/99-oomd-test.conf <<EOF
[ OOM]
DefaultMemoryPressureDurationSec = 2s
EOF
2023-06-17 02:01:24 +03:00
mkdir -p /run/systemd/system/-.slice.d/
2023-06-17 02:06:38 +03:00
cat >/run/systemd/system/-.slice.d/99-oomd-test.conf <<EOF
[ Slice]
ManagedOOMSwap = auto
EOF
2023-06-17 02:01:24 +03:00
mkdir -p /run/systemd/system/user@.service.d/
2023-06-17 02:06:38 +03:00
cat >/run/systemd/system/user@.service.d/99-oomd-test.conf <<EOF
[ Service]
ManagedOOMMemoryPressure = auto
ManagedOOMMemoryPressureLimit = 0%
EOF
2018-12-13 19:22:01 +03:00
2023-06-17 02:01:24 +03:00
mkdir -p /run/systemd/system/systemd-oomd.service.d/
2023-06-17 02:06:38 +03:00
cat >/run/systemd/system/systemd-oomd.service.d/debug.conf <<EOF
[ Service]
Environment = SYSTEMD_LOG_LEVEL = debug
EOF
2021-07-02 20:04:31 +03:00
systemctl daemon-reload
2022-01-12 14:29:34 +03:00
# enable the service to ensure dbus-org.freedesktop.oom1.service exists
# and D-Bus activation works
systemctl enable systemd-oomd.service
2021-07-02 20:04:31 +03:00
# if oomd is already running for some reasons, then restart it to make sure the above settings to be applied
if systemctl is-active systemd-oomd.service; then
systemctl restart systemd-oomd.service
fi
2024-02-09 20:44:58 +03:00
if [ [ -v ASAN_OPTIONS || -v UBSAN_OPTIONS ] ] ; then
# If we're running with sanitizers, sd-executor might pull in quite a significant chunk of shared
# libraries, which in turn causes a lot of pressure that can put us in the front when sd-oomd decides to
# go on a killing spree. This fact is exacerbated further on Arch Linux which ships unstripped gcc-libs,
# so sd-executor pulls in over 30M of libs on startup. Let's make the MemoryHigh= limit a bit more
# generous when running with sanitizers to make the test happy.
systemctl edit --runtime --stdin --drop-in= 99-MemoryHigh.conf testsuite-55-testchill.service <<EOF
[ Service]
MemoryHigh = 60M
EOF
# Do the same for the user instance as well
mkdir -p /run/systemd/user/
cp -rfv /run/systemd/system/testsuite-55-testchill.service.d/ /run/systemd/user/
else
# Ensure that we can start services even with a very low hard memory cap without oom-kills, but skip
# under sanitizers as they balloon memory usage.
core: add systemd-executor binary
Currently we spawn services by forking a child process, doing a bunch
of work, and then exec'ing the service executable.
There are some advantages to this approach:
- quick: we immediately have access to all the enourmous amount of
state simply by virtue of sharing the memory with the parent
- easy to refactor and add features
- part of the same binary, will never be out of sync
There are however significant drawbacks:
- doing work after fork and before exec is against glibc's supported
case for several APIs we call
- copy-on-write trap: anytime any memory is touched in either parent
or child, a copy of that page will be triggered
- memory footprint of the child process will be memory footprint of
PID1, but using the cgroup memory limits of the unit
The last issue is especially problematic on resource constrained
systems where hard memory caps are enforced and swap is not allowed.
As soon as PID1 is under load, with no page out due to no swap, and a
service with a low MemoryMax= tries to start, hilarity ensues.
Add a new systemd-executor binary, that is able to receive all the
required state via memfd, deserialize it, prepare the appropriate
data structures and call exec_child.
Use posix_spawn which uses CLONE_VM + CLONE_VFORK, to ensure there is
no copy-on-write (same address space will be used, and parent process
will be frozen, until exec).
The sd-executor binary is pinned by FD on startup, so that we can
guarantee there will be no incompatibilities during upgrades.
2023-06-01 21:51:42 +03:00
systemd-run -t -p MemoryMax = 10M -p MemorySwapMax = 0 -p MemoryZSwapMax = 0 /bin/true
fi
2021-03-05 12:36:04 +03:00
systemctl start testsuite-55-testchill.service
systemctl start testsuite-55-testbloat.service
2018-12-13 19:22:01 +03:00
2021-03-05 12:36:04 +03:00
# Verify systemd-oomd is monitoring the expected units
2024-02-09 20:53:19 +03:00
timeout 1m bash -xec 'until oomctl | grep "/testsuite-55-workload.slice"; do sleep 1; done'
oomctl | grep "/testsuite-55-workload.slice"
oomctl | grep "20.00%"
oomctl | grep "Default Memory Pressure Duration: 2s"
2018-12-13 19:22:01 +03:00
2021-07-02 20:24:30 +03:00
systemctl status testsuite-55-testchill.service
2021-07-02 20:23:11 +03:00
# systemd-oomd watches for elevated pressure for 2 seconds before acting.
2021-03-05 12:36:04 +03:00
# It can take time to build up pressure so either wait 2 minutes or for the service to fail.
2024-02-09 20:53:19 +03:00
for _ in { 0..59} ; do
2021-03-05 12:36:04 +03:00
if ! systemctl status testsuite-55-testbloat.service; then
2018-12-13 19:22:01 +03:00
break
2021-03-05 12:36:04 +03:00
fi
2023-06-17 02:07:32 +03:00
oomctl
2021-07-02 20:23:11 +03:00
sleep 2
2018-12-13 19:22:01 +03:00
done
2021-03-05 12:36:04 +03:00
# testbloat should be killed and testchill should be fine
if systemctl status testsuite-55-testbloat.service; then exit 42; fi
if ! systemctl status testsuite-55-testchill.service; then exit 24; fi
2021-09-09 18:12:55 +03:00
# Make sure we also work correctly on user units.
2024-02-21 17:42:35 +03:00
loginctl enable-linger testuser
2021-09-09 18:12:55 +03:00
2021-11-23 00:12:09 +03:00
systemctl start --machine "testuser@.host" --user testsuite-55-testchill.service
systemctl start --machine "testuser@.host" --user testsuite-55-testbloat.service
2021-09-09 18:12:55 +03:00
# Verify systemd-oomd is monitoring the expected units
2021-11-24 12:02:22 +03:00
# Try to avoid racing the oomctl output check by checking in a loop with a timeout
2024-02-09 20:53:19 +03:00
timeout 1m bash -xec 'until oomctl | grep "/testsuite-55-workload.slice"; do sleep 1; done'
oomctl | grep -E "/user.slice.*/testsuite-55-workload.slice"
oomctl | grep "20.00%"
oomctl | grep "Default Memory Pressure Duration: 2s"
2021-09-09 18:12:55 +03:00
2021-11-23 00:12:09 +03:00
systemctl --machine "testuser@.host" --user status testsuite-55-testchill.service
2021-09-09 18:12:55 +03:00
# systemd-oomd watches for elevated pressure for 2 seconds before acting.
# It can take time to build up pressure so either wait 2 minutes or for the service to fail.
2024-02-09 20:53:19 +03:00
for _ in { 0..59} ; do
2021-11-23 00:12:09 +03:00
if ! systemctl --machine "testuser@.host" --user status testsuite-55-testbloat.service; then
2021-09-09 18:12:55 +03:00
break
fi
2023-06-17 02:07:32 +03:00
oomctl
2021-09-09 18:12:55 +03:00
sleep 2
done
# testbloat should be killed and testchill should be fine
2021-11-23 00:12:09 +03:00
if systemctl --machine "testuser@.host" --user status testsuite-55-testbloat.service; then exit 42; fi
if ! systemctl --machine "testuser@.host" --user status testsuite-55-testchill.service; then exit 24; fi
2021-09-09 18:12:55 +03:00
2024-02-21 17:42:35 +03:00
loginctl disable-linger testuser
2021-03-05 12:36:04 +03:00
# only run this portion of the test if we can set xattrs
2023-10-19 13:28:37 +03:00
if cgroupfs_supports_user_xattrs; then
2021-03-05 12:36:04 +03:00
sleep 120 # wait for systemd-oomd kill cool down and elevated memory pressure to come down
2023-06-17 02:01:24 +03:00
mkdir -p /run/systemd/system/testsuite-55-testbloat.service.d/
2023-06-17 02:06:38 +03:00
cat >/run/systemd/system/testsuite-55-testbloat.service.d/override.conf <<EOF
[ Service]
ManagedOOMPreference = avoid
EOF
2021-03-05 12:36:04 +03:00
systemctl daemon-reload
systemctl start testsuite-55-testchill.service
systemctl start testsuite-55-testmunch.service
systemctl start testsuite-55-testbloat.service
2024-02-09 20:53:19 +03:00
for _ in { 0..59} ; do
2021-03-05 12:36:04 +03:00
if ! systemctl status testsuite-55-testmunch.service; then
break
fi
2023-06-17 02:07:32 +03:00
oomctl
2021-09-12 10:02:31 +03:00
sleep 2
2021-03-05 12:36:04 +03:00
done
# testmunch should be killed since testbloat had the avoid xattr on it
if ! systemctl status testsuite-55-testbloat.service; then exit 25; fi
if systemctl status testsuite-55-testmunch.service; then exit 43; fi
if ! systemctl status testsuite-55-testchill.service; then exit 24; fi
fi
systemd-analyze log-level info
2023-07-12 16:49:55 +03:00
touch /testok