1
1
mirror of https://github.com/systemd/systemd-stable.git synced 2024-12-23 17:34:00 +03:00

Merge pull request #8205 from poettering/bpf-multi

bpf/cgroup improvements
This commit is contained in:
Zbigniew Jędrzejewski-Szmek 2018-02-22 14:52:48 +01:00 committed by GitHub
commit 94be6463bd
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
15 changed files with 704 additions and 124 deletions

View File

@ -28,6 +28,8 @@
#include "fd-util.h" #include "fd-util.h"
#include "log.h" #include "log.h"
#include "missing.h" #include "missing.h"
#include "path-util.h"
#include "util.h"
int bpf_program_new(uint32_t prog_type, BPFProgram **ret) { int bpf_program_new(uint32_t prog_type, BPFProgram **ret) {
_cleanup_(bpf_program_unrefp) BPFProgram *p = NULL; _cleanup_(bpf_program_unrefp) BPFProgram *p = NULL;
@ -36,6 +38,7 @@ int bpf_program_new(uint32_t prog_type, BPFProgram **ret) {
if (!p) if (!p)
return log_oom(); return log_oom();
p->n_ref = 1;
p->prog_type = prog_type; p->prog_type = prog_type;
p->kernel_fd = -1; p->kernel_fd = -1;
@ -44,12 +47,39 @@ int bpf_program_new(uint32_t prog_type, BPFProgram **ret) {
return 0; return 0;
} }
BPFProgram *bpf_program_ref(BPFProgram *p) {
if (!p)
return NULL;
assert(p->n_ref > 0);
p->n_ref++;
return p;
}
BPFProgram *bpf_program_unref(BPFProgram *p) { BPFProgram *bpf_program_unref(BPFProgram *p) {
if (!p) if (!p)
return NULL; return NULL;
assert(p->n_ref > 0);
p->n_ref--;
if (p->n_ref > 0)
return NULL;
/* Unfortunately, the kernel currently doesn't implicitly detach BPF programs from their cgroups when the last
* fd to the BPF program is closed. This has nasty side-effects since this means that abnormally terminated
* programs that attached one of their BPF programs to a cgroup will leave this programs pinned for good with
* zero chance of recovery, until the cgroup is removed. This is particularly problematic if the cgroup in
* question is the root cgroup (or any other cgroup belonging to a service that cannot be restarted during
* operation, such as dbus), as the memory for the BPF program can only be reclaimed through a reboot. To
* counter this, we track closely to which cgroup a program was attached to and will detach it on our own
* whenever we close the BPF fd. */
(void) bpf_program_cgroup_detach(p);
safe_close(p->kernel_fd); safe_close(p->kernel_fd);
free(p->instructions); free(p->instructions);
free(p->attached_path);
return mfree(p); return mfree(p);
} }
@ -58,6 +88,9 @@ int bpf_program_add_instructions(BPFProgram *p, const struct bpf_insn *instructi
assert(p); assert(p);
if (p->kernel_fd >= 0) /* don't allow modification after we uploaded things to the kernel */
return -EBUSY;
if (!GREEDY_REALLOC(p->instructions, p->allocated, p->n_instructions + count)) if (!GREEDY_REALLOC(p->instructions, p->allocated, p->n_instructions + count))
return -ENOMEM; return -ENOMEM;
@ -72,8 +105,10 @@ int bpf_program_load_kernel(BPFProgram *p, char *log_buf, size_t log_size) {
assert(p); assert(p);
if (p->kernel_fd >= 0) if (p->kernel_fd >= 0) { /* make this idempotent */
return -EBUSY; memzero(log_buf, log_size);
return 0;
}
attr = (union bpf_attr) { attr = (union bpf_attr) {
.prog_type = p->prog_type, .prog_type = p->prog_type,
@ -93,13 +128,47 @@ int bpf_program_load_kernel(BPFProgram *p, char *log_buf, size_t log_size) {
} }
int bpf_program_cgroup_attach(BPFProgram *p, int type, const char *path, uint32_t flags) { int bpf_program_cgroup_attach(BPFProgram *p, int type, const char *path, uint32_t flags) {
_cleanup_free_ char *copy = NULL;
_cleanup_close_ int fd = -1; _cleanup_close_ int fd = -1;
union bpf_attr attr; union bpf_attr attr;
int r;
assert(p); assert(p);
assert(type >= 0); assert(type >= 0);
assert(path); assert(path);
if (!IN_SET(flags, 0, BPF_F_ALLOW_OVERRIDE, BPF_F_ALLOW_MULTI))
return -EINVAL;
/* We need to track which cgroup the program is attached to, and we can only track one attachment, hence let's
* refuse this early. */
if (p->attached_path) {
if (!path_equal(p->attached_path, path))
return -EBUSY;
if (p->attached_type != type)
return -EBUSY;
if (p->attached_flags != flags)
return -EBUSY;
/* Here's a shortcut: if we previously attached this program already, then we don't have to do so
* again. Well, with one exception: if we are in BPF_F_ALLOW_OVERRIDE mode then someone else might have
* replaced our program since the last time, hence let's reattach it again, just to be safe. In flags
* == 0 mode this is not an issue since nobody else can replace our program in that case, and in flags
* == BPF_F_ALLOW_MULTI mode any other's program would be installed in addition to ours hence ours
* would remain in effect. */
if (flags != BPF_F_ALLOW_OVERRIDE)
return 0;
}
/* Ensure we have a kernel object for this. */
r = bpf_program_load_kernel(p, NULL, 0);
if (r < 0)
return r;
copy = strdup(path);
if (!copy)
return -ENOMEM;
fd = open(path, O_DIRECTORY|O_RDONLY|O_CLOEXEC); fd = open(path, O_DIRECTORY|O_RDONLY|O_CLOEXEC);
if (fd < 0) if (fd < 0)
return -errno; return -errno;
@ -114,26 +183,43 @@ int bpf_program_cgroup_attach(BPFProgram *p, int type, const char *path, uint32_
if (bpf(BPF_PROG_ATTACH, &attr, sizeof(attr)) < 0) if (bpf(BPF_PROG_ATTACH, &attr, sizeof(attr)) < 0)
return -errno; return -errno;
free_and_replace(p->attached_path, copy);
p->attached_type = type;
p->attached_flags = flags;
return 0; return 0;
} }
int bpf_program_cgroup_detach(int type, const char *path) { int bpf_program_cgroup_detach(BPFProgram *p) {
_cleanup_close_ int fd = -1; _cleanup_close_ int fd = -1;
union bpf_attr attr;
assert(path); assert(p);
fd = open(path, O_DIRECTORY|O_RDONLY|O_CLOEXEC); if (!p->attached_path)
if (fd < 0) return -EUNATCH;
return -errno;
attr = (union bpf_attr) { fd = open(p->attached_path, O_DIRECTORY|O_RDONLY|O_CLOEXEC);
.attach_type = type, if (fd < 0) {
.target_fd = fd, if (errno != ENOENT)
}; return -errno;
if (bpf(BPF_PROG_DETACH, &attr, sizeof(attr)) < 0) /* If the cgroup does not exist anymore, then we don't have to explicitly detach, it got detached
return -errno; * implicitly by the removal, hence don't complain */
} else {
union bpf_attr attr;
attr = (union bpf_attr) {
.attach_type = p->attached_type,
.target_fd = fd,
.attach_bpf_fd = p->kernel_fd,
};
if (bpf(BPF_PROG_DETACH, &attr, sizeof(attr)) < 0)
return -errno;
}
p->attached_path = mfree(p->attached_path);
return 0; return 0;
} }

View File

@ -32,22 +32,29 @@
typedef struct BPFProgram BPFProgram; typedef struct BPFProgram BPFProgram;
struct BPFProgram { struct BPFProgram {
unsigned n_ref;
int kernel_fd; int kernel_fd;
uint32_t prog_type; uint32_t prog_type;
size_t n_instructions; size_t n_instructions;
size_t allocated; size_t allocated;
struct bpf_insn *instructions; struct bpf_insn *instructions;
char *attached_path;
int attached_type;
uint32_t attached_flags;
}; };
int bpf_program_new(uint32_t prog_type, BPFProgram **ret); int bpf_program_new(uint32_t prog_type, BPFProgram **ret);
BPFProgram *bpf_program_unref(BPFProgram *p); BPFProgram *bpf_program_unref(BPFProgram *p);
BPFProgram *bpf_program_ref(BPFProgram *p);
int bpf_program_add_instructions(BPFProgram *p, const struct bpf_insn *insn, size_t count); int bpf_program_add_instructions(BPFProgram *p, const struct bpf_insn *insn, size_t count);
int bpf_program_load_kernel(BPFProgram *p, char *log_buf, size_t log_size); int bpf_program_load_kernel(BPFProgram *p, char *log_buf, size_t log_size);
int bpf_program_cgroup_attach(BPFProgram *p, int type, const char *path, uint32_t flags); int bpf_program_cgroup_attach(BPFProgram *p, int type, const char *path, uint32_t flags);
int bpf_program_cgroup_detach(int type, const char *path); int bpf_program_cgroup_detach(BPFProgram *p);
int bpf_map_new(enum bpf_map_type type, size_t key_size, size_t value_size, size_t max_entries, uint32_t flags); int bpf_map_new(enum bpf_map_type type, size_t key_size, size_t value_size, size_t max_entries, uint32_t flags);
int bpf_map_update_element(int fd, const void *key, void *value); int bpf_map_update_element(int fd, const void *key, void *value);

View File

@ -453,9 +453,10 @@ static int bpf_firewall_prepare_access_maps(
return 0; return 0;
} }
static int bpf_firewall_prepare_accounting_maps(bool enabled, int *fd_ingress, int *fd_egress) { static int bpf_firewall_prepare_accounting_maps(Unit *u, bool enabled, int *fd_ingress, int *fd_egress) {
int r; int r;
assert(u);
assert(fd_ingress); assert(fd_ingress);
assert(fd_egress); assert(fd_egress);
@ -476,9 +477,12 @@ static int bpf_firewall_prepare_accounting_maps(bool enabled, int *fd_ingress, i
*fd_egress = r; *fd_egress = r;
} }
} else { } else {
*fd_ingress = safe_close(*fd_ingress); *fd_ingress = safe_close(*fd_ingress);
*fd_egress = safe_close(*fd_egress); *fd_egress = safe_close(*fd_egress);
zero(u->ip_accounting_extra);
} }
return 0; return 0;
@ -486,17 +490,30 @@ static int bpf_firewall_prepare_accounting_maps(bool enabled, int *fd_ingress, i
int bpf_firewall_compile(Unit *u) { int bpf_firewall_compile(Unit *u) {
CGroupContext *cc; CGroupContext *cc;
int r; int r, supported;
assert(u); assert(u);
r = bpf_firewall_supported(); cc = unit_get_cgroup_context(u);
if (r < 0) if (!cc)
return r; return -EINVAL;
if (r == 0) {
supported = bpf_firewall_supported();
if (supported < 0)
return supported;
if (supported == BPF_FIREWALL_UNSUPPORTED) {
log_debug("BPF firewalling not supported on this manager, proceeding without."); log_debug("BPF firewalling not supported on this manager, proceeding without.");
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
if (supported != BPF_FIREWALL_SUPPORTED_WITH_MULTI && u->type == UNIT_SLICE) {
/* If BPF_F_ALLOW_MULTI is not supported we don't support any BPF magic on inner nodes (i.e. on slice
* units), since that would mean leaf nodes couldn't do any BPF anymore at all. Under the assumption
* that BPF is more interesting on leaf nodes we hence avoid it on inner nodes in that case. This is
* consistent with old systemd behaviour from before v238, where BPF wasn't supported in inner nodes at
* all, either. */
log_debug("BPF_F_ALLOW_MULTI is not supported on this manager, not doing BPF firewall on slice units.");
return -EOPNOTSUPP;
}
/* Note that when we compile a new firewall we first flush out the access maps and the BPF programs themselves, /* Note that when we compile a new firewall we first flush out the access maps and the BPF programs themselves,
* but we reuse the the accounting maps. That way the firewall in effect always maps to the actual * but we reuse the the accounting maps. That way the firewall in effect always maps to the actual
@ -511,19 +528,23 @@ int bpf_firewall_compile(Unit *u) {
u->ipv6_allow_map_fd = safe_close(u->ipv6_allow_map_fd); u->ipv6_allow_map_fd = safe_close(u->ipv6_allow_map_fd);
u->ipv6_deny_map_fd = safe_close(u->ipv6_deny_map_fd); u->ipv6_deny_map_fd = safe_close(u->ipv6_deny_map_fd);
cc = unit_get_cgroup_context(u); if (u->type != UNIT_SLICE) {
if (!cc) /* In inner nodes we only do accounting, we do not actually bother with access control. However, leaf
return -EINVAL; * nodes will incorporate all IP access rules set on all their parent nodes. This has the benefit that
* they can optionally cancel out system-wide rules. Since inner nodes can't contain processes this
* means that all configure IP access rules *will* take effect on processes, even though we never
* compile them for inner nodes. */
r = bpf_firewall_prepare_access_maps(u, ACCESS_ALLOWED, &u->ipv4_allow_map_fd, &u->ipv6_allow_map_fd); r = bpf_firewall_prepare_access_maps(u, ACCESS_ALLOWED, &u->ipv4_allow_map_fd, &u->ipv6_allow_map_fd);
if (r < 0) if (r < 0)
return log_error_errno(r, "Preparation of eBPF allow maps failed: %m"); return log_error_errno(r, "Preparation of eBPF allow maps failed: %m");
r = bpf_firewall_prepare_access_maps(u, ACCESS_DENIED, &u->ipv4_deny_map_fd, &u->ipv6_deny_map_fd); r = bpf_firewall_prepare_access_maps(u, ACCESS_DENIED, &u->ipv4_deny_map_fd, &u->ipv6_deny_map_fd);
if (r < 0) if (r < 0)
return log_error_errno(r, "Preparation of eBPF deny maps failed: %m"); return log_error_errno(r, "Preparation of eBPF deny maps failed: %m");
}
r = bpf_firewall_prepare_accounting_maps(cc->ip_accounting, &u->ip_accounting_ingress_map_fd, &u->ip_accounting_egress_map_fd); r = bpf_firewall_prepare_accounting_maps(u, cc->ip_accounting, &u->ip_accounting_ingress_map_fd, &u->ip_accounting_egress_map_fd);
if (r < 0) if (r < 0)
return log_error_errno(r, "Preparation of eBPF accounting maps failed: %m"); return log_error_errno(r, "Preparation of eBPF accounting maps failed: %m");
@ -541,57 +562,58 @@ int bpf_firewall_compile(Unit *u) {
int bpf_firewall_install(Unit *u) { int bpf_firewall_install(Unit *u) {
_cleanup_free_ char *path = NULL; _cleanup_free_ char *path = NULL;
CGroupContext *cc; CGroupContext *cc;
int r; int r, supported;
uint32_t flags;
assert(u); assert(u);
if (!u->cgroup_path)
return -EINVAL;
cc = unit_get_cgroup_context(u); cc = unit_get_cgroup_context(u);
if (!cc) if (!cc)
return -EINVAL; return -EINVAL;
if (!u->cgroup_path)
return -EINVAL;
if (!u->cgroup_realized)
return -EINVAL;
r = bpf_firewall_supported(); supported = bpf_firewall_supported();
if (r < 0) if (supported < 0)
return r; return supported;
if (r == 0) { if (supported == BPF_FIREWALL_UNSUPPORTED) {
log_debug("BPF firewalling not supported on this manager, proceeding without."); log_debug("BPF firewalling not supported on this manager, proceeding without.");
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
if (supported != BPF_FIREWALL_SUPPORTED_WITH_MULTI && u->type == UNIT_SLICE) {
log_debug("BPF_F_ALLOW_MULTI is not supported on this manager, not doing BPF firewall on slice units.");
return -EOPNOTSUPP;
}
r = cg_get_path(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, NULL, &path); r = cg_get_path(SYSTEMD_CGROUP_CONTROLLER, u->cgroup_path, NULL, &path);
if (r < 0) if (r < 0)
return log_error_errno(r, "Failed to determine cgroup path: %m"); return log_error_errno(r, "Failed to determine cgroup path: %m");
if (u->ip_bpf_egress) { flags = (supported == BPF_FIREWALL_SUPPORTED_WITH_MULTI &&
r = bpf_program_load_kernel(u->ip_bpf_egress, NULL, 0); (u->type == UNIT_SLICE || unit_cgroup_delegate(u))) ? BPF_F_ALLOW_MULTI : 0;
if (r < 0)
return log_error_errno(r, "Kernel upload of egress BPF program failed: %m");
r = bpf_program_cgroup_attach(u->ip_bpf_egress, BPF_CGROUP_INET_EGRESS, path, unit_cgroup_delegate(u) ? BPF_F_ALLOW_OVERRIDE : 0); /* Unref the old BPF program (which will implicitly detach it) right before attaching the new program, to
* minimize the time window when we don't account for IP traffic. */
u->ip_bpf_egress_installed = bpf_program_unref(u->ip_bpf_egress_installed);
u->ip_bpf_ingress_installed = bpf_program_unref(u->ip_bpf_ingress_installed);
if (u->ip_bpf_egress) {
r = bpf_program_cgroup_attach(u->ip_bpf_egress, BPF_CGROUP_INET_EGRESS, path, flags);
if (r < 0) if (r < 0)
return log_error_errno(r, "Attaching egress BPF program to cgroup %s failed: %m", path); return log_error_errno(r, "Attaching egress BPF program to cgroup %s failed: %m", path);
} else {
r = bpf_program_cgroup_detach(BPF_CGROUP_INET_EGRESS, path); /* Remember that this BPF program is installed now. */
if (r < 0) u->ip_bpf_egress_installed = bpf_program_ref(u->ip_bpf_egress);
return log_full_errno(r == -ENOENT ? LOG_DEBUG : LOG_ERR, r,
"Detaching egress BPF program from cgroup failed: %m");
} }
if (u->ip_bpf_ingress) { if (u->ip_bpf_ingress) {
r = bpf_program_load_kernel(u->ip_bpf_ingress, NULL, 0); r = bpf_program_cgroup_attach(u->ip_bpf_ingress, BPF_CGROUP_INET_INGRESS, path, flags);
if (r < 0)
return log_error_errno(r, "Kernel upload of ingress BPF program failed: %m");
r = bpf_program_cgroup_attach(u->ip_bpf_ingress, BPF_CGROUP_INET_INGRESS, path, unit_cgroup_delegate(u) ? BPF_F_ALLOW_OVERRIDE : 0);
if (r < 0) if (r < 0)
return log_error_errno(r, "Attaching ingress BPF program to cgroup %s failed: %m", path); return log_error_errno(r, "Attaching ingress BPF program to cgroup %s failed: %m", path);
} else {
r = bpf_program_cgroup_detach(BPF_CGROUP_INET_INGRESS, path); u->ip_bpf_ingress_installed = bpf_program_ref(u->ip_bpf_ingress);
if (r < 0)
return log_full_errno(r == -ENOENT ? LOG_DEBUG : LOG_ERR, r,
"Detaching ingress BPF program from cgroup failed: %m");
} }
return 0; return 0;
@ -640,7 +662,6 @@ int bpf_firewall_reset_accounting(int map_fd) {
return bpf_map_update_element(map_fd, &key, &value); return bpf_map_update_element(map_fd, &key, &value);
} }
int bpf_firewall_supported(void) { int bpf_firewall_supported(void) {
struct bpf_insn trivial[] = { struct bpf_insn trivial[] = {
BPF_MOV64_IMM(BPF_REG_0, 1), BPF_MOV64_IMM(BPF_REG_0, 1),
@ -667,7 +688,7 @@ int bpf_firewall_supported(void) {
if (geteuid() != 0) { if (geteuid() != 0) {
log_debug("Not enough privileges, BPF firewalling is not supported."); log_debug("Not enough privileges, BPF firewalling is not supported.");
return supported = false; return supported = BPF_FIREWALL_UNSUPPORTED;
} }
r = cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER); r = cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER);
@ -675,7 +696,7 @@ int bpf_firewall_supported(void) {
return log_error_errno(r, "Can't determine whether the unified hierarchy is used: %m"); return log_error_errno(r, "Can't determine whether the unified hierarchy is used: %m");
if (r == 0) { if (r == 0) {
log_debug("Not running with unified cgroups, BPF firewalling is not supported."); log_debug("Not running with unified cgroups, BPF firewalling is not supported.");
return supported = false; return supported = BPF_FIREWALL_UNSUPPORTED;
} }
fd = bpf_map_new(BPF_MAP_TYPE_LPM_TRIE, fd = bpf_map_new(BPF_MAP_TYPE_LPM_TRIE,
@ -685,26 +706,26 @@ int bpf_firewall_supported(void) {
BPF_F_NO_PREALLOC); BPF_F_NO_PREALLOC);
if (fd < 0) { if (fd < 0) {
log_debug_errno(r, "Can't allocate BPF LPM TRIE map, BPF firewalling is not supported: %m"); log_debug_errno(r, "Can't allocate BPF LPM TRIE map, BPF firewalling is not supported: %m");
return supported = false; return supported = BPF_FIREWALL_UNSUPPORTED;
} }
safe_close(fd); safe_close(fd);
if (bpf_program_new(BPF_PROG_TYPE_CGROUP_SKB, &program) < 0) { if (bpf_program_new(BPF_PROG_TYPE_CGROUP_SKB, &program) < 0) {
log_debug_errno(r, "Can't allocate CGROUP SKB BPF program, BPF firewalling is not supported: %m"); log_debug_errno(r, "Can't allocate CGROUP SKB BPF program, BPF firewalling is not supported: %m");
return supported = false; return supported = BPF_FIREWALL_UNSUPPORTED;
} }
r = bpf_program_add_instructions(program, trivial, ELEMENTSOF(trivial)); r = bpf_program_add_instructions(program, trivial, ELEMENTSOF(trivial));
if (r < 0) { if (r < 0) {
log_debug_errno(r, "Can't add trivial instructions to CGROUP SKB BPF program, BPF firewalling is not supported: %m"); log_debug_errno(r, "Can't add trivial instructions to CGROUP SKB BPF program, BPF firewalling is not supported: %m");
return supported = false; return supported = BPF_FIREWALL_UNSUPPORTED;
} }
r = bpf_program_load_kernel(program, NULL, 0); r = bpf_program_load_kernel(program, NULL, 0);
if (r < 0) { if (r < 0) {
log_debug_errno(r, "Can't load kernel CGROUP SKB BPF program, BPF firewalling is not supported: %m"); log_debug_errno(r, "Can't load kernel CGROUP SKB BPF program, BPF firewalling is not supported: %m");
return supported = false; return supported = BPF_FIREWALL_UNSUPPORTED;
} }
/* Unfortunately the kernel allows us to create BPF_PROG_TYPE_CGROUP_SKB programs even when CONFIG_CGROUP_BPF /* Unfortunately the kernel allows us to create BPF_PROG_TYPE_CGROUP_SKB programs even when CONFIG_CGROUP_BPF
@ -723,12 +744,44 @@ int bpf_firewall_supported(void) {
r = bpf(BPF_PROG_ATTACH, &attr, sizeof(attr)); r = bpf(BPF_PROG_ATTACH, &attr, sizeof(attr));
if (r < 0) { if (r < 0) {
if (errno == EBADF) /* YAY! */ if (errno != EBADF) {
return supported = true; log_debug_errno(errno, "Didn't get EBADF from BPF_PROG_ATTACH, BPF firewalling is not supported: %m");
return supported = BPF_FIREWALL_UNSUPPORTED;
}
log_debug_errno(errno, "Didn't get EBADF from BPF_PROG_ATTACH, BPF firewalling is not supported: %m"); /* YAY! */
} else } else {
log_debug("Wut? kernel accepted our invalid BPF_PROG_ATTACH call? Something is weird, assuming BPF firewalling is broken and hence not supported."); log_debug("Wut? Kernel accepted our invalid BPF_PROG_ATTACH call? Something is weird, assuming BPF firewalling is broken and hence not supported.");
return supported = BPF_FIREWALL_UNSUPPORTED;
}
return supported = false; /* So now we know that the BPF program is generally available, let's see if BPF_F_ALLOW_MULTI is also supported
* (which was added in kernel 4.15). We use a similar logic as before, but this time we use
* BPF_F_ALLOW_MULTI. Since the flags are checked early in the system call we'll get EINVAL if it's not
* supported, and EBADF as before if it is available. */
attr = (union bpf_attr) {
.attach_type = BPF_CGROUP_INET_EGRESS,
.target_fd = -1,
.attach_bpf_fd = -1,
.attach_flags = BPF_F_ALLOW_MULTI,
};
r = bpf(BPF_PROG_ATTACH, &attr, sizeof(attr));
if (r < 0) {
if (errno == EBADF) {
log_debug_errno(errno, "Got EBADF when using BPF_F_ALLOW_MULTI, which indicates it is supported. Yay!");
return supported = BPF_FIREWALL_SUPPORTED_WITH_MULTI;
}
if (errno == EINVAL)
log_debug_errno(errno, "Got EINVAL error when using BPF_F_ALLOW_MULTI, which indicates it's not supported.");
else
log_debug_errno(errno, "Got unexpected error when using BPF_F_ALLOW_MULTI, assuming it's not supported: %m");
return supported = BPF_FIREWALL_SUPPORTED;
} else {
log_debug("Wut? Kernel accepted our invalid BPF_PROG_ATTACH+BPF_F_ALLOW_MULTI call? Something is weird, assuming BPF firewalling is broken and hence not supported.");
return supported = BPF_FIREWALL_UNSUPPORTED;
}
} }

View File

@ -24,6 +24,12 @@
#include "unit.h" #include "unit.h"
enum {
BPF_FIREWALL_UNSUPPORTED = 0,
BPF_FIREWALL_SUPPORTED = 1,
BPF_FIREWALL_SUPPORTED_WITH_MULTI = 2,
};
int bpf_firewall_supported(void); int bpf_firewall_supported(void);
int bpf_firewall_compile(Unit *u); int bpf_firewall_compile(Unit *u);

View File

@ -693,20 +693,14 @@ static void cgroup_apply_unified_memory_limit(Unit *u, const char *file, uint64_
} }
static void cgroup_apply_firewall(Unit *u) { static void cgroup_apply_firewall(Unit *u) {
int r;
assert(u); assert(u);
if (u->type == UNIT_SLICE) /* Skip this for slice units, they are inner cgroup nodes, and since bpf/cgroup is /* Best-effort: let's apply IP firewalling and/or accounting if that's enabled */
* not recursive we don't ever touch the bpf on them */
return;
r = bpf_firewall_compile(u); if (bpf_firewall_compile(u) < 0)
if (r < 0)
return; return;
(void) bpf_firewall_install(u); (void) bpf_firewall_install(u);
return;
} }
static void cgroup_context_apply( static void cgroup_context_apply(
@ -1227,11 +1221,6 @@ bool unit_get_needs_bpf(Unit *u) {
Unit *p; Unit *p;
assert(u); assert(u);
/* We never attach BPF to slice units, as they are inner cgroup nodes and cgroup/BPF is not recursive at the
* moment. */
if (u->type == UNIT_SLICE)
return false;
c = unit_get_cgroup_context(u); c = unit_get_cgroup_context(u);
if (!c) if (!c)
return false; return false;
@ -2564,13 +2553,6 @@ int unit_get_ip_accounting(
assert(metric < _CGROUP_IP_ACCOUNTING_METRIC_MAX); assert(metric < _CGROUP_IP_ACCOUNTING_METRIC_MAX);
assert(ret); assert(ret);
/* IP accounting is currently not recursive, and hence we refuse to return any data for slice nodes. Slices are
* inner cgroup nodes and hence have no processes directly attached, hence their counters would be zero
* anyway. And if we block this now we can later open this up, if the kernel learns recursive BPF cgroup
* filters. */
if (u->type == UNIT_SLICE)
return -ENODATA;
if (!UNIT_CGROUP_BOOL(u, ip_accounting)) if (!UNIT_CGROUP_BOOL(u, ip_accounting))
return -ENODATA; return -ENODATA;

View File

@ -1167,7 +1167,7 @@ int bus_cgroup_set_property(
r = bpf_firewall_supported(); r = bpf_firewall_supported();
if (r < 0) if (r < 0)
return r; return r;
if (r == 0) { if (r == BPF_FIREWALL_UNSUPPORTED) {
static bool warned = false; static bool warned = false;
log_full(warned ? LOG_DEBUG : LOG_WARNING, log_full(warned ? LOG_DEBUG : LOG_WARNING,

View File

@ -156,7 +156,7 @@ int config_parse_ip_address_access(
r = bpf_firewall_supported(); r = bpf_firewall_supported();
if (r < 0) if (r < 0)
return r; return r;
if (r == 0) { if (r == BPF_FIREWALL_UNSUPPORTED) {
static bool warned = false; static bool warned = false;
log_full(warned ? LOG_DEBUG : LOG_WARNING, log_full(warned ? LOG_DEBUG : LOG_WARNING,

View File

@ -98,15 +98,15 @@ static const MountPoint mount_table[] = {
#endif #endif
{ "tmpfs", "/run", "tmpfs", "mode=755", MS_NOSUID|MS_NODEV|MS_STRICTATIME, { "tmpfs", "/run", "tmpfs", "mode=755", MS_NOSUID|MS_NODEV|MS_STRICTATIME,
NULL, MNT_FATAL|MNT_IN_CONTAINER }, NULL, MNT_FATAL|MNT_IN_CONTAINER },
{ "cgroup", "/sys/fs/cgroup", "cgroup2", "nsdelegate", MS_NOSUID|MS_NOEXEC|MS_NODEV, { "cgroup2", "/sys/fs/cgroup", "cgroup2", "nsdelegate", MS_NOSUID|MS_NOEXEC|MS_NODEV,
cg_is_unified_wanted, MNT_IN_CONTAINER|MNT_CHECK_WRITABLE }, cg_is_unified_wanted, MNT_IN_CONTAINER|MNT_CHECK_WRITABLE },
{ "cgroup", "/sys/fs/cgroup", "cgroup2", NULL, MS_NOSUID|MS_NOEXEC|MS_NODEV, { "cgroup2", "/sys/fs/cgroup", "cgroup2", NULL, MS_NOSUID|MS_NOEXEC|MS_NODEV,
cg_is_unified_wanted, MNT_IN_CONTAINER|MNT_CHECK_WRITABLE }, cg_is_unified_wanted, MNT_IN_CONTAINER|MNT_CHECK_WRITABLE },
{ "tmpfs", "/sys/fs/cgroup", "tmpfs", "mode=755", MS_NOSUID|MS_NOEXEC|MS_NODEV|MS_STRICTATIME, { "tmpfs", "/sys/fs/cgroup", "tmpfs", "mode=755", MS_NOSUID|MS_NOEXEC|MS_NODEV|MS_STRICTATIME,
cg_is_legacy_wanted, MNT_FATAL|MNT_IN_CONTAINER }, cg_is_legacy_wanted, MNT_FATAL|MNT_IN_CONTAINER },
{ "cgroup", "/sys/fs/cgroup/unified", "cgroup2", "nsdelegate", MS_NOSUID|MS_NOEXEC|MS_NODEV, { "cgroup2", "/sys/fs/cgroup/unified", "cgroup2", "nsdelegate", MS_NOSUID|MS_NOEXEC|MS_NODEV,
cg_is_hybrid_wanted, MNT_IN_CONTAINER|MNT_CHECK_WRITABLE }, cg_is_hybrid_wanted, MNT_IN_CONTAINER|MNT_CHECK_WRITABLE },
{ "cgroup", "/sys/fs/cgroup/unified", "cgroup2", NULL, MS_NOSUID|MS_NOEXEC|MS_NODEV, { "cgroup2", "/sys/fs/cgroup/unified", "cgroup2", NULL, MS_NOSUID|MS_NOEXEC|MS_NODEV,
cg_is_hybrid_wanted, MNT_IN_CONTAINER|MNT_CHECK_WRITABLE }, cg_is_hybrid_wanted, MNT_IN_CONTAINER|MNT_CHECK_WRITABLE },
{ "cgroup", "/sys/fs/cgroup/systemd", "cgroup", "none,name=systemd,xattr", MS_NOSUID|MS_NOEXEC|MS_NODEV, { "cgroup", "/sys/fs/cgroup/systemd", "cgroup", "none,name=systemd,xattr", MS_NOSUID|MS_NOEXEC|MS_NODEV,
cg_is_legacy_wanted, MNT_IN_CONTAINER }, cg_is_legacy_wanted, MNT_IN_CONTAINER },
@ -118,6 +118,8 @@ static const MountPoint mount_table[] = {
{ "efivarfs", "/sys/firmware/efi/efivars", "efivarfs", NULL, MS_NOSUID|MS_NOEXEC|MS_NODEV, { "efivarfs", "/sys/firmware/efi/efivars", "efivarfs", NULL, MS_NOSUID|MS_NOEXEC|MS_NODEV,
is_efi_boot, MNT_NONE }, is_efi_boot, MNT_NONE },
#endif #endif
{ "bpf", "/sys/fs/bpf", "bpf", NULL, MS_NOSUID|MS_NOEXEC|MS_NODEV,
NULL, MNT_NONE, },
}; };
/* These are API file systems that might be mounted by other software, /* These are API file systems that might be mounted by other software,

View File

@ -106,6 +106,7 @@ static const MountEntry protect_kernel_tunables_table[] = {
{ "/sys", READONLY, false }, { "/sys", READONLY, false },
{ "/sys/kernel/debug", READONLY, true }, { "/sys/kernel/debug", READONLY, true },
{ "/sys/kernel/tracing", READONLY, true }, { "/sys/kernel/tracing", READONLY, true },
{ "/sys/fs/bpf", READONLY, true },
{ "/sys/fs/cgroup", READWRITE, false }, /* READONLY is set by ProtectControlGroups= option */ { "/sys/fs/cgroup", READWRITE, false }, /* READONLY is set by ProtectControlGroups= option */
{ "/sys/fs/selinux", READWRITE, true }, { "/sys/fs/selinux", READWRITE, true },
}; };

View File

@ -1521,7 +1521,7 @@ static int socket_address_listen_in_cgroup(
r = bpf_firewall_supported(); r = bpf_firewall_supported();
if (r < 0) if (r < 0)
return r; return r;
if (r == 0) /* If BPF firewalling isn't supported anyway — there's no point in this forking complexity */ if (r == BPF_FIREWALL_UNSUPPORTED) /* If BPF firewalling isn't supported anyway — there's no point in this forking complexity */
goto shortcut; goto shortcut;
if (socketpair(AF_UNIX, SOCK_SEQPACKET|SOCK_CLOEXEC, 0, pair) < 0) if (socketpair(AF_UNIX, SOCK_SEQPACKET|SOCK_CLOEXEC, 0, pair) < 0)
@ -2865,7 +2865,7 @@ static int socket_accept_in_cgroup(Socket *s, SocketPort *p, int fd) {
r = bpf_firewall_supported(); r = bpf_firewall_supported();
if (r < 0) if (r < 0)
return r; return r;
if (r == 0) if (r == BPF_FIREWALL_UNSUPPORTED)
goto shortcut; goto shortcut;
if (socketpair(AF_UNIX, SOCK_SEQPACKET|SOCK_CLOEXEC, 0, pair) < 0) if (socketpair(AF_UNIX, SOCK_SEQPACKET|SOCK_CLOEXEC, 0, pair) < 0)

View File

@ -659,7 +659,9 @@ void unit_free(Unit *u) {
safe_close(u->ipv6_deny_map_fd); safe_close(u->ipv6_deny_map_fd);
bpf_program_unref(u->ip_bpf_ingress); bpf_program_unref(u->ip_bpf_ingress);
bpf_program_unref(u->ip_bpf_ingress_installed);
bpf_program_unref(u->ip_bpf_egress); bpf_program_unref(u->ip_bpf_egress);
bpf_program_unref(u->ip_bpf_egress_installed);
condition_free_list(u->conditions); condition_free_list(u->conditions);
condition_free_list(u->asserts); condition_free_list(u->asserts);

View File

@ -287,8 +287,8 @@ struct Unit {
int ipv4_deny_map_fd; int ipv4_deny_map_fd;
int ipv6_deny_map_fd; int ipv6_deny_map_fd;
BPFProgram *ip_bpf_ingress; BPFProgram *ip_bpf_ingress, *ip_bpf_ingress_installed;
BPFProgram *ip_bpf_egress; BPFProgram *ip_bpf_egress, *ip_bpf_egress_installed;
uint64_t ip_accounting_extra[_CGROUP_IP_ACCOUNTING_METRIC_MAX]; uint64_t ip_accounting_extra[_CGROUP_IP_ACCOUNTING_METRIC_MAX];

View File

@ -1,11 +1,12 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
* *
* This program is free software; you can redistribute it and/or * This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public * modify it under the terms of version 2 of the GNU General Public
* License as published by the Free Software Foundation. * License as published by the Free Software Foundation.
*/ */
#ifndef __LINUX_BPF_H__ #ifndef _UAPI__LINUX_BPF_H__
#define __LINUX_BPF_H__ #define _UAPI__LINUX_BPF_H__
#include <linux/types.h> #include <linux/types.h>
#include <linux/bpf_common.h> #include <linux/bpf_common.h>
@ -16,7 +17,7 @@
#define BPF_ALU64 0x07 /* alu mode in double word width */ #define BPF_ALU64 0x07 /* alu mode in double word width */
/* ld/ldx fields */ /* ld/ldx fields */
#define BPF_DW 0x18 /* double word */ #define BPF_DW 0x18 /* double word (64-bit) */
#define BPF_XADD 0xc0 /* exclusive add */ #define BPF_XADD 0xc0 /* exclusive add */
/* alu/jmp fields */ /* alu/jmp fields */
@ -30,9 +31,14 @@
#define BPF_FROM_LE BPF_TO_LE #define BPF_FROM_LE BPF_TO_LE
#define BPF_FROM_BE BPF_TO_BE #define BPF_FROM_BE BPF_TO_BE
/* jmp encodings */
#define BPF_JNE 0x50 /* jump != */ #define BPF_JNE 0x50 /* jump != */
#define BPF_JLT 0xa0 /* LT is unsigned, '<' */
#define BPF_JLE 0xb0 /* LE is unsigned, '<=' */
#define BPF_JSGT 0x60 /* SGT is signed '>', GT in x86 */ #define BPF_JSGT 0x60 /* SGT is signed '>', GT in x86 */
#define BPF_JSGE 0x70 /* SGE is signed '>=', GE in x86 */ #define BPF_JSGE 0x70 /* SGE is signed '>=', GE in x86 */
#define BPF_JSLT 0xc0 /* SLT is signed, '<' */
#define BPF_JSLE 0xd0 /* SLE is signed, '<=' */
#define BPF_CALL 0x80 /* function call */ #define BPF_CALL 0x80 /* function call */
#define BPF_EXIT 0x90 /* function return */ #define BPF_EXIT 0x90 /* function return */
@ -82,6 +88,12 @@ enum bpf_cmd {
BPF_PROG_ATTACH, BPF_PROG_ATTACH,
BPF_PROG_DETACH, BPF_PROG_DETACH,
BPF_PROG_TEST_RUN, BPF_PROG_TEST_RUN,
BPF_PROG_GET_NEXT_ID,
BPF_MAP_GET_NEXT_ID,
BPF_PROG_GET_FD_BY_ID,
BPF_MAP_GET_FD_BY_ID,
BPF_OBJ_GET_INFO_BY_FD,
BPF_PROG_QUERY,
}; };
enum bpf_map_type { enum bpf_map_type {
@ -99,6 +111,9 @@ enum bpf_map_type {
BPF_MAP_TYPE_LPM_TRIE, BPF_MAP_TYPE_LPM_TRIE,
BPF_MAP_TYPE_ARRAY_OF_MAPS, BPF_MAP_TYPE_ARRAY_OF_MAPS,
BPF_MAP_TYPE_HASH_OF_MAPS, BPF_MAP_TYPE_HASH_OF_MAPS,
BPF_MAP_TYPE_DEVMAP,
BPF_MAP_TYPE_SOCKMAP,
BPF_MAP_TYPE_CPUMAP,
}; };
enum bpf_prog_type { enum bpf_prog_type {
@ -115,22 +130,65 @@ enum bpf_prog_type {
BPF_PROG_TYPE_LWT_IN, BPF_PROG_TYPE_LWT_IN,
BPF_PROG_TYPE_LWT_OUT, BPF_PROG_TYPE_LWT_OUT,
BPF_PROG_TYPE_LWT_XMIT, BPF_PROG_TYPE_LWT_XMIT,
BPF_PROG_TYPE_SOCK_OPS,
BPF_PROG_TYPE_SK_SKB,
BPF_PROG_TYPE_CGROUP_DEVICE,
}; };
enum bpf_attach_type { enum bpf_attach_type {
BPF_CGROUP_INET_INGRESS, BPF_CGROUP_INET_INGRESS,
BPF_CGROUP_INET_EGRESS, BPF_CGROUP_INET_EGRESS,
BPF_CGROUP_INET_SOCK_CREATE, BPF_CGROUP_INET_SOCK_CREATE,
BPF_CGROUP_SOCK_OPS,
BPF_SK_SKB_STREAM_PARSER,
BPF_SK_SKB_STREAM_VERDICT,
BPF_CGROUP_DEVICE,
__MAX_BPF_ATTACH_TYPE __MAX_BPF_ATTACH_TYPE
}; };
#define MAX_BPF_ATTACH_TYPE __MAX_BPF_ATTACH_TYPE #define MAX_BPF_ATTACH_TYPE __MAX_BPF_ATTACH_TYPE
/* If BPF_F_ALLOW_OVERRIDE flag is used in BPF_PROG_ATTACH command /* cgroup-bpf attach flags used in BPF_PROG_ATTACH command
* to the given target_fd cgroup the descendent cgroup will be able to *
* override effective bpf program that was inherited from this cgroup * NONE(default): No further bpf programs allowed in the subtree.
*
* BPF_F_ALLOW_OVERRIDE: If a sub-cgroup installs some bpf program,
* the program in this cgroup yields to sub-cgroup program.
*
* BPF_F_ALLOW_MULTI: If a sub-cgroup installs some bpf program,
* that cgroup program gets run in addition to the program in this cgroup.
*
* Only one program is allowed to be attached to a cgroup with
* NONE or BPF_F_ALLOW_OVERRIDE flag.
* Attaching another program on top of NONE or BPF_F_ALLOW_OVERRIDE will
* release old program and attach the new one. Attach flags has to match.
*
* Multiple programs are allowed to be attached to a cgroup with
* BPF_F_ALLOW_MULTI flag. They are executed in FIFO order
* (those that were attached first, run first)
* The programs of sub-cgroup are executed first, then programs of
* this cgroup and then programs of parent cgroup.
* When children program makes decision (like picking TCP CA or sock bind)
* parent program has a chance to override it.
*
* A cgroup with MULTI or OVERRIDE flag allows any attach flags in sub-cgroups.
* A cgroup with NONE doesn't allow any programs in sub-cgroups.
* Ex1:
* cgrp1 (MULTI progs A, B) ->
* cgrp2 (OVERRIDE prog C) ->
* cgrp3 (MULTI prog D) ->
* cgrp4 (OVERRIDE prog E) ->
* cgrp5 (NONE prog F)
* the event in cgrp5 triggers execution of F,D,A,B in that order.
* if prog F is detached, the execution is E,D,A,B
* if prog F and D are detached, the execution is E,A,B
* if prog F, E and D are detached, the execution is C,A,B
*
* All eligible programs are executed regardless of return code from
* earlier programs.
*/ */
#define BPF_F_ALLOW_OVERRIDE (1U << 0) #define BPF_F_ALLOW_OVERRIDE (1U << 0)
#define BPF_F_ALLOW_MULTI (1U << 1)
/* If BPF_F_STRICT_ALIGNMENT is used in BPF_PROG_LOAD command, the /* If BPF_F_STRICT_ALIGNMENT is used in BPF_PROG_LOAD command, the
* verifier will perform strict alignment checking as if the kernel * verifier will perform strict alignment checking as if the kernel
@ -139,13 +197,20 @@ enum bpf_attach_type {
*/ */
#define BPF_F_STRICT_ALIGNMENT (1U << 0) #define BPF_F_STRICT_ALIGNMENT (1U << 0)
/* when bpf_ldimm64->src_reg == BPF_PSEUDO_MAP_FD, bpf_ldimm64->imm == fd */
#define BPF_PSEUDO_MAP_FD 1 #define BPF_PSEUDO_MAP_FD 1
/* when bpf_call->src_reg == BPF_PSEUDO_CALL, bpf_call->imm == pc-relative
* offset to another bpf function
*/
#define BPF_PSEUDO_CALL 1
/* flags for BPF_MAP_UPDATE_ELEM command */ /* flags for BPF_MAP_UPDATE_ELEM command */
#define BPF_ANY 0 /* create new element or update existing */ #define BPF_ANY 0 /* create new element or update existing */
#define BPF_NOEXIST 1 /* create new element if it didn't exist */ #define BPF_NOEXIST 1 /* create new element if it didn't exist */
#define BPF_EXIST 2 /* update existing element */ #define BPF_EXIST 2 /* update existing element */
/* flags for BPF_MAP_CREATE command */
#define BPF_F_NO_PREALLOC (1U << 0) #define BPF_F_NO_PREALLOC (1U << 0)
/* Instead of having one common LRU list in the /* Instead of having one common LRU list in the
* BPF_MAP_TYPE_LRU_[PERCPU_]HASH map, use a percpu LRU list * BPF_MAP_TYPE_LRU_[PERCPU_]HASH map, use a percpu LRU list
@ -154,6 +219,17 @@ enum bpf_attach_type {
* across different LRU lists. * across different LRU lists.
*/ */
#define BPF_F_NO_COMMON_LRU (1U << 1) #define BPF_F_NO_COMMON_LRU (1U << 1)
/* Specify numa node during map creation */
#define BPF_F_NUMA_NODE (1U << 2)
/* flags for BPF_PROG_QUERY */
#define BPF_F_QUERY_EFFECTIVE (1U << 0)
#define BPF_OBJ_NAME_LEN 16U
/* Flags for accessing BPF object */
#define BPF_F_RDONLY (1U << 3)
#define BPF_F_WRONLY (1U << 4)
union bpf_attr { union bpf_attr {
struct { /* anonymous struct used by BPF_MAP_CREATE command */ struct { /* anonymous struct used by BPF_MAP_CREATE command */
@ -161,8 +237,15 @@ union bpf_attr {
__u32 key_size; /* size of key in bytes */ __u32 key_size; /* size of key in bytes */
__u32 value_size; /* size of value in bytes */ __u32 value_size; /* size of value in bytes */
__u32 max_entries; /* max number of entries in a map */ __u32 max_entries; /* max number of entries in a map */
__u32 map_flags; /* prealloc or not */ __u32 map_flags; /* BPF_MAP_CREATE related
* flags defined above.
*/
__u32 inner_map_fd; /* fd pointing to the inner map */ __u32 inner_map_fd; /* fd pointing to the inner map */
__u32 numa_node; /* numa node (effective only if
* BPF_F_NUMA_NODE is set).
*/
char map_name[BPF_OBJ_NAME_LEN];
__u32 map_ifindex; /* ifindex of netdev to create on */
}; };
struct { /* anonymous struct used by BPF_MAP_*_ELEM commands */ struct { /* anonymous struct used by BPF_MAP_*_ELEM commands */
@ -185,11 +268,14 @@ union bpf_attr {
__aligned_u64 log_buf; /* user supplied buffer */ __aligned_u64 log_buf; /* user supplied buffer */
__u32 kern_version; /* checked when prog_type=kprobe */ __u32 kern_version; /* checked when prog_type=kprobe */
__u32 prog_flags; __u32 prog_flags;
char prog_name[BPF_OBJ_NAME_LEN];
__u32 prog_ifindex; /* ifindex of netdev to prep for */
}; };
struct { /* anonymous struct used by BPF_OBJ_* commands */ struct { /* anonymous struct used by BPF_OBJ_* commands */
__aligned_u64 pathname; __aligned_u64 pathname;
__u32 bpf_fd; __u32 bpf_fd;
__u32 file_flags;
}; };
struct { /* anonymous struct used by BPF_PROG_ATTACH/DETACH commands */ struct { /* anonymous struct used by BPF_PROG_ATTACH/DETACH commands */
@ -209,6 +295,31 @@ union bpf_attr {
__u32 repeat; __u32 repeat;
__u32 duration; __u32 duration;
} test; } test;
struct { /* anonymous struct used by BPF_*_GET_*_ID */
union {
__u32 start_id;
__u32 prog_id;
__u32 map_id;
};
__u32 next_id;
__u32 open_flags;
};
struct { /* anonymous struct used by BPF_OBJ_GET_INFO_BY_FD */
__u32 bpf_fd;
__u32 info_len;
__aligned_u64 info;
} info;
struct { /* anonymous struct used by BPF_PROG_QUERY command */
__u32 target_fd; /* container object to query */
__u32 attach_type;
__u32 query_flags;
__u32 attach_flags;
__aligned_u64 prog_ids;
__u32 prog_cnt;
} query;
} __attribute__((aligned(8))); } __attribute__((aligned(8)));
/* BPF helper function descriptions: /* BPF helper function descriptions:
@ -272,7 +383,7 @@ union bpf_attr {
* jump into another BPF program * jump into another BPF program
* @ctx: context pointer passed to next program * @ctx: context pointer passed to next program
* @prog_array_map: pointer to map which type is BPF_MAP_TYPE_PROG_ARRAY * @prog_array_map: pointer to map which type is BPF_MAP_TYPE_PROG_ARRAY
* @index: index inside array that selects specific program to run * @index: 32-bit index inside array that selects specific program to run
* Return: 0 on success or negative error * Return: 0 on success or negative error
* *
* int bpf_clone_redirect(skb, ifindex, flags) * int bpf_clone_redirect(skb, ifindex, flags)
@ -313,26 +424,40 @@ union bpf_attr {
* @flags: room for future extensions * @flags: room for future extensions
* Return: 0 on success or negative error * Return: 0 on success or negative error
* *
* u64 bpf_perf_event_read(&map, index) * u64 bpf_perf_event_read(map, flags)
* Return: Number events read or error code * read perf event counter value
* @map: pointer to perf_event_array map
* @flags: index of event in the map or bitmask flags
* Return: value of perf event counter read or error code
* *
* int bpf_redirect(ifindex, flags) * int bpf_redirect(ifindex, flags)
* redirect to another netdev * redirect to another netdev
* @ifindex: ifindex of the net device * @ifindex: ifindex of the net device
* @flags: bit 0 - if set, redirect to ingress instead of egress * @flags:
* other bits - reserved * cls_bpf:
* Return: TC_ACT_REDIRECT * bit 0 - if set, redirect to ingress instead of egress
* other bits - reserved
* xdp_bpf:
* all bits - reserved
* Return: cls_bpf: TC_ACT_REDIRECT on success or TC_ACT_SHOT on error
* xdp_bfp: XDP_REDIRECT on success or XDP_ABORT on error
* int bpf_redirect_map(map, key, flags)
* redirect to endpoint in map
* @map: pointer to dev map
* @key: index in map to lookup
* @flags: --
* Return: XDP_REDIRECT on success or XDP_ABORT on error
* *
* u32 bpf_get_route_realm(skb) * u32 bpf_get_route_realm(skb)
* retrieve a dst's tclassid * retrieve a dst's tclassid
* @skb: pointer to skb * @skb: pointer to skb
* Return: realm if != 0 * Return: realm if != 0
* *
* int bpf_perf_event_output(ctx, map, index, data, size) * int bpf_perf_event_output(ctx, map, flags, data, size)
* output perf raw sample * output perf raw sample
* @ctx: struct pt_regs* * @ctx: struct pt_regs*
* @map: pointer to perf_event_array map * @map: pointer to perf_event_array map
* @index: index of event in the map * @flags: index of event in the map or bitmask flags
* @data: data on stack to be output as raw data * @data: data on stack to be output as raw data
* @size: size of data * @size: size of data
* Return: 0 on success or negative error * Return: 0 on success or negative error
@ -490,6 +615,87 @@ union bpf_attr {
* Get the owner uid of the socket stored inside sk_buff. * Get the owner uid of the socket stored inside sk_buff.
* @skb: pointer to skb * @skb: pointer to skb
* Return: uid of the socket owner on success or overflowuid if failed. * Return: uid of the socket owner on success or overflowuid if failed.
*
* u32 bpf_set_hash(skb, hash)
* Set full skb->hash.
* @skb: pointer to skb
* @hash: hash to set
*
* int bpf_setsockopt(bpf_socket, level, optname, optval, optlen)
* Calls setsockopt. Not all opts are available, only those with
* integer optvals plus TCP_CONGESTION.
* Supported levels: SOL_SOCKET and IPPROTO_TCP
* @bpf_socket: pointer to bpf_socket
* @level: SOL_SOCKET or IPPROTO_TCP
* @optname: option name
* @optval: pointer to option value
* @optlen: length of optval in bytes
* Return: 0 or negative error
*
* int bpf_getsockopt(bpf_socket, level, optname, optval, optlen)
* Calls getsockopt. Not all opts are available.
* Supported levels: IPPROTO_TCP
* @bpf_socket: pointer to bpf_socket
* @level: IPPROTO_TCP
* @optname: option name
* @optval: pointer to option value
* @optlen: length of optval in bytes
* Return: 0 or negative error
*
* int bpf_sock_ops_cb_flags_set(bpf_sock_ops, flags)
* Set callback flags for sock_ops
* @bpf_sock_ops: pointer to bpf_sock_ops_kern struct
* @flags: flags value
* Return: 0 for no error
* -EINVAL if there is no full tcp socket
* bits in flags that are not supported by current kernel
*
* int bpf_skb_adjust_room(skb, len_diff, mode, flags)
* Grow or shrink room in sk_buff.
* @skb: pointer to skb
* @len_diff: (signed) amount of room to grow/shrink
* @mode: operation mode (enum bpf_adj_room_mode)
* @flags: reserved for future use
* Return: 0 on success or negative error code
*
* int bpf_sk_redirect_map(map, key, flags)
* Redirect skb to a sock in map using key as a lookup key for the
* sock in map.
* @map: pointer to sockmap
* @key: key to lookup sock in map
* @flags: reserved for future use
* Return: SK_PASS
*
* int bpf_sock_map_update(skops, map, key, flags)
* @skops: pointer to bpf_sock_ops
* @map: pointer to sockmap to update
* @key: key to insert/update sock in map
* @flags: same flags as map update elem
*
* int bpf_xdp_adjust_meta(xdp_md, delta)
* Adjust the xdp_md.data_meta by delta
* @xdp_md: pointer to xdp_md
* @delta: An positive/negative integer to be added to xdp_md.data_meta
* Return: 0 on success or negative on error
*
* int bpf_perf_event_read_value(map, flags, buf, buf_size)
* read perf event counter value and perf event enabled/running time
* @map: pointer to perf_event_array map
* @flags: index of event in the map or bitmask flags
* @buf: buf to fill
* @buf_size: size of the buf
* Return: 0 on success or negative error code
*
* int bpf_perf_prog_read_value(ctx, buf, buf_size)
* read perf prog attached perf event counter and enabled/running time
* @ctx: pointer to ctx
* @buf: buf to fill
* @buf_size: size of the buf
* Return : 0 on success or negative error code
*
* int bpf_override_return(pt_regs, rc)
* @pt_regs: pointer to struct pt_regs
* @rc: the return value to set
*/ */
#define __BPF_FUNC_MAPPER(FN) \ #define __BPF_FUNC_MAPPER(FN) \
FN(unspec), \ FN(unspec), \
@ -539,7 +745,19 @@ union bpf_attr {
FN(xdp_adjust_head), \ FN(xdp_adjust_head), \
FN(probe_read_str), \ FN(probe_read_str), \
FN(get_socket_cookie), \ FN(get_socket_cookie), \
FN(get_socket_uid), FN(get_socket_uid), \
FN(set_hash), \
FN(setsockopt), \
FN(skb_adjust_room), \
FN(redirect_map), \
FN(sk_redirect_map), \
FN(sock_map_update), \
FN(xdp_adjust_meta), \
FN(perf_event_read_value), \
FN(perf_prog_read_value), \
FN(getsockopt), \
FN(override_return), \
FN(sock_ops_cb_flags_set),
/* integer value in 'imm' field of BPF_CALL instruction selects which helper /* integer value in 'imm' field of BPF_CALL instruction selects which helper
* function eBPF program intends to call * function eBPF program intends to call
@ -583,12 +801,19 @@ enum bpf_func_id {
#define BPF_F_ZERO_CSUM_TX (1ULL << 1) #define BPF_F_ZERO_CSUM_TX (1ULL << 1)
#define BPF_F_DONT_FRAGMENT (1ULL << 2) #define BPF_F_DONT_FRAGMENT (1ULL << 2)
/* BPF_FUNC_perf_event_output and BPF_FUNC_perf_event_read flags. */ /* BPF_FUNC_perf_event_output, BPF_FUNC_perf_event_read and
* BPF_FUNC_perf_event_read_value flags.
*/
#define BPF_F_INDEX_MASK 0xffffffffULL #define BPF_F_INDEX_MASK 0xffffffffULL
#define BPF_F_CURRENT_CPU BPF_F_INDEX_MASK #define BPF_F_CURRENT_CPU BPF_F_INDEX_MASK
/* BPF_FUNC_perf_event_output for sk_buff input context. */ /* BPF_FUNC_perf_event_output for sk_buff input context. */
#define BPF_F_CTXLEN_MASK (0xfffffULL << 32) #define BPF_F_CTXLEN_MASK (0xfffffULL << 32)
/* Mode for BPF_FUNC_skb_adjust_room helper. */
enum bpf_adj_room_mode {
BPF_ADJ_ROOM_NET,
};
/* user accessible mirror of in-kernel sk_buff. /* user accessible mirror of in-kernel sk_buff.
* new fields can only be added to the end of this structure * new fields can only be added to the end of this structure
*/ */
@ -611,6 +836,18 @@ struct __sk_buff {
__u32 data; __u32 data;
__u32 data_end; __u32 data_end;
__u32 napi_id; __u32 napi_id;
/* Accessed by BPF_PROG_TYPE_sk_skb types from here to ... */
__u32 family;
__u32 remote_ip4; /* Stored in network byte order */
__u32 local_ip4; /* Stored in network byte order */
__u32 remote_ip6[4]; /* Stored in network byte order */
__u32 local_ip6[4]; /* Stored in network byte order */
__u32 remote_port; /* Stored in network byte order */
__u32 local_port; /* stored in host byte order */
/* ... here. */
__u32 data_meta;
}; };
struct bpf_tunnel_key { struct bpf_tunnel_key {
@ -646,20 +883,23 @@ struct bpf_sock {
__u32 family; __u32 family;
__u32 type; __u32 type;
__u32 protocol; __u32 protocol;
__u32 mark;
__u32 priority;
}; };
#define XDP_PACKET_HEADROOM 256 #define XDP_PACKET_HEADROOM 256
/* User return codes for XDP prog type. /* User return codes for XDP prog type.
* A valid XDP program must return one of these defined values. All other * A valid XDP program must return one of these defined values. All other
* return codes are reserved for future use. Unknown return codes will result * return codes are reserved for future use. Unknown return codes will
* in packet drop. * result in packet drops and a warning via bpf_warn_invalid_xdp_action().
*/ */
enum xdp_action { enum xdp_action {
XDP_ABORTED = 0, XDP_ABORTED = 0,
XDP_DROP, XDP_DROP,
XDP_PASS, XDP_PASS,
XDP_TX, XDP_TX,
XDP_REDIRECT,
}; };
/* user accessible metadata for XDP packet hook /* user accessible metadata for XDP packet hook
@ -668,6 +908,202 @@ enum xdp_action {
struct xdp_md { struct xdp_md {
__u32 data; __u32 data;
__u32 data_end; __u32 data_end;
__u32 data_meta;
/* Below access go through struct xdp_rxq_info */
__u32 ingress_ifindex; /* rxq->dev->ifindex */
__u32 rx_queue_index; /* rxq->queue_index */
}; };
#endif /* __LINUX_BPF_H__ */ enum sk_action {
SK_DROP = 0,
SK_PASS,
};
#define BPF_TAG_SIZE 8
struct bpf_prog_info {
__u32 type;
__u32 id;
__u8 tag[BPF_TAG_SIZE];
__u32 jited_prog_len;
__u32 xlated_prog_len;
__aligned_u64 jited_prog_insns;
__aligned_u64 xlated_prog_insns;
__u64 load_time; /* ns since boottime */
__u32 created_by_uid;
__u32 nr_map_ids;
__aligned_u64 map_ids;
char name[BPF_OBJ_NAME_LEN];
__u32 ifindex;
__u64 netns_dev;
__u64 netns_ino;
} __attribute__((aligned(8)));
struct bpf_map_info {
__u32 type;
__u32 id;
__u32 key_size;
__u32 value_size;
__u32 max_entries;
__u32 map_flags;
char name[BPF_OBJ_NAME_LEN];
__u32 ifindex;
__u64 netns_dev;
__u64 netns_ino;
} __attribute__((aligned(8)));
/* User bpf_sock_ops struct to access socket values and specify request ops
* and their replies.
* Some of this fields are in network (bigendian) byte order and may need
* to be converted before use (bpf_ntohl() defined in samples/bpf/bpf_endian.h).
* New fields can only be added at the end of this structure
*/
struct bpf_sock_ops {
__u32 op;
union {
__u32 args[4]; /* Optionally passed to bpf program */
__u32 reply; /* Returned by bpf program */
__u32 replylong[4]; /* Optionally returned by bpf prog */
};
__u32 family;
__u32 remote_ip4; /* Stored in network byte order */
__u32 local_ip4; /* Stored in network byte order */
__u32 remote_ip6[4]; /* Stored in network byte order */
__u32 local_ip6[4]; /* Stored in network byte order */
__u32 remote_port; /* Stored in network byte order */
__u32 local_port; /* stored in host byte order */
__u32 is_fullsock; /* Some TCP fields are only valid if
* there is a full socket. If not, the
* fields read as zero.
*/
__u32 snd_cwnd;
__u32 srtt_us; /* Averaged RTT << 3 in usecs */
__u32 bpf_sock_ops_cb_flags; /* flags defined in uapi/linux/tcp.h */
__u32 state;
__u32 rtt_min;
__u32 snd_ssthresh;
__u32 rcv_nxt;
__u32 snd_nxt;
__u32 snd_una;
__u32 mss_cache;
__u32 ecn_flags;
__u32 rate_delivered;
__u32 rate_interval_us;
__u32 packets_out;
__u32 retrans_out;
__u32 total_retrans;
__u32 segs_in;
__u32 data_segs_in;
__u32 segs_out;
__u32 data_segs_out;
__u32 lost_out;
__u32 sacked_out;
__u32 sk_txhash;
__u64 bytes_received;
__u64 bytes_acked;
};
/* Definitions for bpf_sock_ops_cb_flags */
#define BPF_SOCK_OPS_RTO_CB_FLAG (1<<0)
#define BPF_SOCK_OPS_RETRANS_CB_FLAG (1<<1)
#define BPF_SOCK_OPS_STATE_CB_FLAG (1<<2)
#define BPF_SOCK_OPS_ALL_CB_FLAGS 0x7 /* Mask of all currently
* supported cb flags
*/
/* List of known BPF sock_ops operators.
* New entries can only be added at the end
*/
enum {
BPF_SOCK_OPS_VOID,
BPF_SOCK_OPS_TIMEOUT_INIT, /* Should return SYN-RTO value to use or
* -1 if default value should be used
*/
BPF_SOCK_OPS_RWND_INIT, /* Should return initial advertized
* window (in packets) or -1 if default
* value should be used
*/
BPF_SOCK_OPS_TCP_CONNECT_CB, /* Calls BPF program right before an
* active connection is initialized
*/
BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB, /* Calls BPF program when an
* active connection is
* established
*/
BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB, /* Calls BPF program when a
* passive connection is
* established
*/
BPF_SOCK_OPS_NEEDS_ECN, /* If connection's congestion control
* needs ECN
*/
BPF_SOCK_OPS_BASE_RTT, /* Get base RTT. The correct value is
* based on the path and may be
* dependent on the congestion control
* algorithm. In general it indicates
* a congestion threshold. RTTs above
* this indicate congestion
*/
BPF_SOCK_OPS_RTO_CB, /* Called when an RTO has triggered.
* Arg1: value of icsk_retransmits
* Arg2: value of icsk_rto
* Arg3: whether RTO has expired
*/
BPF_SOCK_OPS_RETRANS_CB, /* Called when skb is retransmitted.
* Arg1: sequence number of 1st byte
* Arg2: # segments
* Arg3: return value of
* tcp_transmit_skb (0 => success)
*/
BPF_SOCK_OPS_STATE_CB, /* Called when TCP changes state.
* Arg1: old_state
* Arg2: new_state
*/
};
/* List of TCP states. There is a build check in net/ipv4/tcp.c to detect
* changes between the TCP and BPF versions. Ideally this should never happen.
* If it does, we need to add code to convert them before calling
* the BPF sock_ops function.
*/
enum {
BPF_TCP_ESTABLISHED = 1,
BPF_TCP_SYN_SENT,
BPF_TCP_SYN_RECV,
BPF_TCP_FIN_WAIT1,
BPF_TCP_FIN_WAIT2,
BPF_TCP_TIME_WAIT,
BPF_TCP_CLOSE,
BPF_TCP_CLOSE_WAIT,
BPF_TCP_LAST_ACK,
BPF_TCP_LISTEN,
BPF_TCP_CLOSING, /* Now a valid state */
BPF_TCP_NEW_SYN_RECV,
BPF_TCP_MAX_STATES /* Leave at the end! */
};
#define TCP_BPF_IW 1001 /* Set TCP initial congestion window */
#define TCP_BPF_SNDCWND_CLAMP 1002 /* Set sndcwnd_clamp */
struct bpf_perf_event_value {
__u64 counter;
__u64 enabled;
__u64 running;
};
#define BPF_DEVCG_ACC_MKNOD (1ULL << 0)
#define BPF_DEVCG_ACC_READ (1ULL << 1)
#define BPF_DEVCG_ACC_WRITE (1ULL << 2)
#define BPF_DEVCG_DEV_BLOCK (1ULL << 0)
#define BPF_DEVCG_DEV_CHAR (1ULL << 1)
struct bpf_cgroup_dev_ctx {
/* access_type encoded as (BPF_DEVCG_ACC_* << 16) | BPF_DEVCG_DEV_* */
__u32 access_type;
__u32 major;
__u32 minor;
};
#endif /* _UAPI__LINUX_BPF_H__ */

View File

@ -71,12 +71,17 @@ int main(int argc, char *argv[]) {
} }
r = bpf_firewall_supported(); r = bpf_firewall_supported();
if (r == 0) { if (r == BPF_FIREWALL_UNSUPPORTED) {
log_notice("BPF firewalling not supported, skipping"); log_notice("BPF firewalling not supported, skipping");
return EXIT_TEST_SKIP; return EXIT_TEST_SKIP;
} }
assert_se(r > 0); assert_se(r > 0);
if (r == BPF_FIREWALL_SUPPORTED_WITH_MULTI)
log_notice("BPF firewalling with BPF_F_ALLOW_MULTI supported. Yay!");
else
log_notice("BPF firewalling (though without BPF_F_ALLOW_MULTI) supported. Good.");
r = bpf_program_load_kernel(p, log_buf, ELEMENTSOF(log_buf)); r = bpf_program_load_kernel(p, log_buf, ELEMENTSOF(log_buf));
assert(r >= 0); assert(r >= 0);

View File

@ -12,7 +12,7 @@ ExecStart=/bin/sh -c 'test -f /var/lib/private/waldo/yay'
ExecStart=/bin/sh -c 'test -f /var/lib/private/quux/pief/yayyay' ExecStart=/bin/sh -c 'test -f /var/lib/private/quux/pief/yayyay'
# Make sure that /var/lib/private/waldo is really the only writable directory besides the obvious candidates # Make sure that /var/lib/private/waldo is really the only writable directory besides the obvious candidates
ExecStart=/bin/sh -x -c 'test $$(find / -type d -writable 2> /dev/null | egrep -v -e \'^(/var/tmp$$|/tmp$$|/proc/|/dev/mqueue$$|/dev/shm$$)\' | sort -u | tr -d '\\\\n') = /var/lib/private/quux/pief/var/lib/private/waldo' ExecStart=/bin/sh -x -c 'test $$(find / -type d -writable 2> /dev/null | egrep -v -e \'^(/var/tmp$$|/tmp$$|/proc/|/dev/mqueue$$|/dev/shm$$|/sys/fs/bpf$$)\' | sort -u | tr -d '\\\\n') = /var/lib/private/quux/pief/var/lib/private/waldo'
Type=oneshot Type=oneshot
DynamicUser=yes DynamicUser=yes