bpf-next-for-netdev
-----BEGIN PGP SIGNATURE----- iHUEABYIAB0WIQTFp0I1jqZrAX+hPRXbK58LschIgwUCZDhSiwAKCRDbK58LschI g8cbAQCH4xrquOeDmYyGXFQGchHZAIj++tKg8ABU4+hYeJtrlwEA6D4W6wjoSZRk mLSptZ9qro8yZA86BvyPvlBT1h9ELQA= =StAc -----END PGP SIGNATURE----- Daniel Borkmann says: ==================== pull-request: bpf-next 2023-04-13 We've added 260 non-merge commits during the last 36 day(s) which contain a total of 356 files changed, 21786 insertions(+), 11275 deletions(-). The main changes are: 1) Rework BPF verifier log behavior and implement it as a rotating log by default with the option to retain old-style fixed log behavior, from Andrii Nakryiko. 2) Adds support for using {FOU,GUE} encap with an ipip device operating in collect_md mode and add a set of BPF kfuncs for controlling encap params, from Christian Ehrig. 3) Allow BPF programs to detect at load time whether a particular kfunc exists or not, and also add support for this in light skeleton, from Alexei Starovoitov. 4) Optimize hashmap lookups when key size is multiple of 4, from Anton Protopopov. 5) Enable RCU semantics for task BPF kptrs and allow referenced kptr tasks to be stored in BPF maps, from David Vernet. 6) Add support for stashing local BPF kptr into a map value via bpf_kptr_xchg(). This is useful e.g. for rbtree node creation for new cgroups, from Dave Marchevsky. 7) Fix BTF handling of is_int_ptr to skip modifiers to work around tracing issues where a program cannot be attached, from Feng Zhou. 8) Migrate a big portion of test_verifier unit tests over to test_progs -a verifier_* via inline asm to ease {read,debug}ability, from Eduard Zingerman. 9) Several updates to the instruction-set.rst documentation which is subject to future IETF standardization (https://lwn.net/Articles/926882/), from Dave Thaler. 10) Fix BPF verifier in the __reg_bound_offset's 64->32 tnum sub-register known bits information propagation, from Daniel Borkmann. 11) Add skb bitfield compaction work related to BPF with the overall goal to make more of the sk_buff bits optional, from Jakub Kicinski. 12) BPF selftest cleanups for build id extraction which stand on its own from the upcoming integration work of build id into struct file object, from Jiri Olsa. 13) Add fixes and optimizations for xsk descriptor validation and several selftest improvements for xsk sockets, from Kal Conley. 14) Add BPF links for struct_ops and enable switching implementations of BPF TCP cong-ctls under a given name by replacing backing struct_ops map, from Kui-Feng Lee. 15) Remove a misleading BPF verifier env->bypass_spec_v1 check on variable offset stack read as earlier Spectre checks cover this, from Luis Gerhorst. 16) Fix issues in copy_from_user_nofault() for BPF and other tracers to resemble copy_from_user_nmi() from safety PoV, from Florian Lehner and Alexei Starovoitov. 17) Add --json-summary option to test_progs in order for CI tooling to ease parsing of test results, from Manu Bretelle. 18) Batch of improvements and refactoring to prep for upcoming bpf_local_storage conversion to bpf_mem_cache_{alloc,free} allocator, from Martin KaFai Lau. 19) Improve bpftool's visual program dump which produces the control flow graph in a DOT format by adding C source inline annotations, from Quentin Monnet. 20) Fix attaching fentry/fexit/fmod_ret/lsm to modules by extracting the module name from BTF of the target and searching kallsyms of the correct module, from Viktor Malik. 21) Improve BPF verifier handling of '<const> <cond> <non_const>' to better detect whether in particular jmp32 branches are taken, from Yonghong Song. 22) Allow BPF TCP cong-ctls to write app_limited of struct tcp_sock. A built-in cc or one from a kernel module is already able to write to app_limited, from Yixin Shen. Conflicts: Documentation/bpf/bpf_devel_QA.rst b7abcd9c656b ("bpf, doc: Link to submitting-patches.rst for general patch submission info") 0f10f647f455 ("bpf, docs: Use internal linking for link to netdev subsystem doc") https://lore.kernel.org/all/20230307095812.236eb1be@canb.auug.org.au/ include/net/ip_tunnels.h bc9d003dc48c3 ("ip_tunnel: Preserve pointer const in ip_tunnel_info_opts") ac931d4cdec3d ("ipip,ip_tunnel,sit: Add FOU support for externally controlled ipip devices") https://lore.kernel.org/all/20230413161235.4093777-1-broonie@kernel.org/ net/bpf/test_run.c e5995bc7e2ba ("bpf, test_run: fix crashes due to XDP frame overwriting/corruption") 294635a8165a ("bpf, test_run: fix &xdp_frame misplacement for LIVE_FRAMES") https://lore.kernel.org/all/20230320102619.05b80a98@canb.auug.org.au/ ==================== Link: https://lore.kernel.org/r/20230413191525.7295-1-daniel@iogearbox.net Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
commit
c2865b1122
@ -128,7 +128,8 @@ into the bpf-next tree will make their way into net-next tree. net and
|
||||
net-next are both run by David S. Miller. From there, they will go
|
||||
into the kernel mainline tree run by Linus Torvalds. To read up on the
|
||||
process of net and net-next being merged into the mainline tree, see
|
||||
the `netdev-FAQ`_.
|
||||
the documentation on netdev subsystem at
|
||||
Documentation/process/maintainer-netdev.rst.
|
||||
|
||||
|
||||
|
||||
@ -147,7 +148,8 @@ request)::
|
||||
Q: How do I indicate which tree (bpf vs. bpf-next) my patch should be applied to?
|
||||
---------------------------------------------------------------------------------
|
||||
|
||||
A: The process is the very same as described in the `netdev-FAQ`_,
|
||||
A: The process is the very same as described in the netdev subsystem
|
||||
documentation at Documentation/process/maintainer-netdev.rst,
|
||||
so please read up on it. The subject line must indicate whether the
|
||||
patch is a fix or rather "next-like" content in order to let the
|
||||
maintainers know whether it is targeted at bpf or bpf-next.
|
||||
@ -206,8 +208,9 @@ ii) run extensive BPF test suite and
|
||||
Once the BPF pull request was accepted by David S. Miller, then
|
||||
the patches end up in net or net-next tree, respectively, and
|
||||
make their way from there further into mainline. Again, see the
|
||||
`netdev-FAQ`_ for additional information e.g. on how often they are
|
||||
merged to mainline.
|
||||
documentation for netdev subsystem at
|
||||
Documentation/process/maintainer-netdev.rst for additional information
|
||||
e.g. on how often they are merged to mainline.
|
||||
|
||||
Q: How long do I need to wait for feedback on my BPF patches?
|
||||
-------------------------------------------------------------
|
||||
@ -230,7 +233,8 @@ Q: Are patches applied to bpf-next when the merge window is open?
|
||||
-----------------------------------------------------------------
|
||||
A: For the time when the merge window is open, bpf-next will not be
|
||||
processed. This is roughly analogous to net-next patch processing,
|
||||
so feel free to read up on the `netdev-FAQ`_ about further details.
|
||||
so feel free to read up on the netdev docs at
|
||||
Documentation/process/maintainer-netdev.rst about further details.
|
||||
|
||||
During those two weeks of merge window, we might ask you to resend
|
||||
your patch series once bpf-next is open again. Once Linus released
|
||||
@ -394,7 +398,8 @@ netdev kernel mailing list in Cc and ask for the fix to be queued up:
|
||||
netdev@vger.kernel.org
|
||||
|
||||
The process in general is the same as on netdev itself, see also the
|
||||
`netdev-FAQ`_.
|
||||
the documentation on networking subsystem at
|
||||
Documentation/process/maintainer-netdev.rst.
|
||||
|
||||
Q: Do you also backport to kernels not currently maintained as stable?
|
||||
----------------------------------------------------------------------
|
||||
@ -410,7 +415,7 @@ Q: The BPF patch I am about to submit needs to go to stable as well
|
||||
What should I do?
|
||||
|
||||
A: The same rules apply as with netdev patch submissions in general, see
|
||||
the `netdev-FAQ`_.
|
||||
the netdev docs at Documentation/process/maintainer-netdev.rst.
|
||||
|
||||
Never add "``Cc: stable@vger.kernel.org``" to the patch description, but
|
||||
ask the BPF maintainers to queue the patches instead. This can be done
|
||||
@ -684,7 +689,6 @@ when:
|
||||
|
||||
|
||||
.. Links
|
||||
.. _netdev-FAQ: https://www.kernel.org/doc/html/latest/process/maintainer-netdev.html
|
||||
.. _selftests:
|
||||
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/tools/testing/selftests/bpf/
|
||||
|
||||
|
@ -20,6 +20,12 @@ Arithmetic instructions
|
||||
For CPU versions prior to 3, Clang v7.0 and later can enable ``BPF_ALU`` support with
|
||||
``-Xclang -target-feature -Xclang +alu32``. In CPU version 3, support is automatically included.
|
||||
|
||||
Jump instructions
|
||||
=================
|
||||
|
||||
If ``-O0`` is used, Clang will generate the ``BPF_CALL | BPF_X | BPF_JMP`` (0x8d)
|
||||
instruction, which is not supported by the Linux kernel verifier.
|
||||
|
||||
Atomic operations
|
||||
=================
|
||||
|
||||
|
@ -117,12 +117,7 @@ For example:
|
||||
As mentioned and illustrated above, these ``struct bpf_cpumask *`` objects can
|
||||
also be stored in a map and used as kptrs. If a ``struct bpf_cpumask *`` is in
|
||||
a map, the reference can be removed from the map with bpf_kptr_xchg(), or
|
||||
opportunistically acquired with bpf_cpumask_kptr_get():
|
||||
|
||||
.. kernel-doc:: kernel/bpf/cpumask.c
|
||||
:identifiers: bpf_cpumask_kptr_get
|
||||
|
||||
Here is an example of a ``struct bpf_cpumask *`` being retrieved from a map:
|
||||
opportunistically acquired using RCU:
|
||||
|
||||
.. code-block:: c
|
||||
|
||||
@ -144,7 +139,7 @@ Here is an example of a ``struct bpf_cpumask *`` being retrieved from a map:
|
||||
/**
|
||||
* A simple example tracepoint program showing how a
|
||||
* struct bpf_cpumask * kptr that is stored in a map can
|
||||
* be acquired using the bpf_cpumask_kptr_get() kfunc.
|
||||
* be passed to kfuncs using RCU protection.
|
||||
*/
|
||||
SEC("tp_btf/cgroup_mkdir")
|
||||
int BPF_PROG(cgrp_ancestor_example, struct cgroup *cgrp, const char *path)
|
||||
@ -158,26 +153,21 @@ Here is an example of a ``struct bpf_cpumask *`` being retrieved from a map:
|
||||
if (!v)
|
||||
return -ENOENT;
|
||||
|
||||
bpf_rcu_read_lock();
|
||||
/* Acquire a reference to the bpf_cpumask * kptr that's already stored in the map. */
|
||||
kptr = bpf_cpumask_kptr_get(&v->cpumask);
|
||||
if (!kptr)
|
||||
kptr = v->cpumask;
|
||||
if (!kptr) {
|
||||
/* If no bpf_cpumask was present in the map, it's because
|
||||
* we're racing with another CPU that removed it with
|
||||
* bpf_kptr_xchg() between the bpf_map_lookup_elem()
|
||||
* above, and our call to bpf_cpumask_kptr_get().
|
||||
* bpf_cpumask_kptr_get() internally safely handles this
|
||||
* race, and will return NULL if the cpumask is no longer
|
||||
* present in the map by the time we invoke the kfunc.
|
||||
* above, and our load of the pointer from the map.
|
||||
*/
|
||||
bpf_rcu_read_unlock();
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
/* Free the reference we just took above. Note that the
|
||||
* original struct bpf_cpumask * kptr is still in the map. It will
|
||||
* be freed either at a later time if another context deletes
|
||||
* it from the map, or automatically by the BPF subsystem if
|
||||
* it's still present when the map is destroyed.
|
||||
*/
|
||||
bpf_cpumask_release(kptr);
|
||||
bpf_cpumask_setall(kptr);
|
||||
bpf_rcu_read_unlock();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -11,7 +11,8 @@ Documentation conventions
|
||||
=========================
|
||||
|
||||
For brevity, this document uses the type notion "u64", "u32", etc.
|
||||
to mean an unsigned integer whose width is the specified number of bits.
|
||||
to mean an unsigned integer whose width is the specified number of bits,
|
||||
and "s32", etc. to mean a signed integer of the specified number of bits.
|
||||
|
||||
Registers and calling convention
|
||||
================================
|
||||
@ -242,28 +243,58 @@ Jump instructions
|
||||
otherwise identical operations.
|
||||
The 'code' field encodes the operation as below:
|
||||
|
||||
======== ===== ========================= ============
|
||||
code value description notes
|
||||
======== ===== ========================= ============
|
||||
BPF_JA 0x00 PC += off BPF_JMP only
|
||||
BPF_JEQ 0x10 PC += off if dst == src
|
||||
BPF_JGT 0x20 PC += off if dst > src unsigned
|
||||
BPF_JGE 0x30 PC += off if dst >= src unsigned
|
||||
BPF_JSET 0x40 PC += off if dst & src
|
||||
BPF_JNE 0x50 PC += off if dst != src
|
||||
BPF_JSGT 0x60 PC += off if dst > src signed
|
||||
BPF_JSGE 0x70 PC += off if dst >= src signed
|
||||
BPF_CALL 0x80 function call
|
||||
BPF_EXIT 0x90 function / program return BPF_JMP only
|
||||
BPF_JLT 0xa0 PC += off if dst < src unsigned
|
||||
BPF_JLE 0xb0 PC += off if dst <= src unsigned
|
||||
BPF_JSLT 0xc0 PC += off if dst < src signed
|
||||
BPF_JSLE 0xd0 PC += off if dst <= src signed
|
||||
======== ===== ========================= ============
|
||||
======== ===== === =========================================== =========================================
|
||||
code value src description notes
|
||||
======== ===== === =========================================== =========================================
|
||||
BPF_JA 0x0 0x0 PC += offset BPF_JMP only
|
||||
BPF_JEQ 0x1 any PC += offset if dst == src
|
||||
BPF_JGT 0x2 any PC += offset if dst > src unsigned
|
||||
BPF_JGE 0x3 any PC += offset if dst >= src unsigned
|
||||
BPF_JSET 0x4 any PC += offset if dst & src
|
||||
BPF_JNE 0x5 any PC += offset if dst != src
|
||||
BPF_JSGT 0x6 any PC += offset if dst > src signed
|
||||
BPF_JSGE 0x7 any PC += offset if dst >= src signed
|
||||
BPF_CALL 0x8 0x0 call helper function by address see `Helper functions`_
|
||||
BPF_CALL 0x8 0x1 call PC += offset see `Program-local functions`_
|
||||
BPF_CALL 0x8 0x2 call helper function by BTF ID see `Helper functions`_
|
||||
BPF_EXIT 0x9 0x0 return BPF_JMP only
|
||||
BPF_JLT 0xa any PC += offset if dst < src unsigned
|
||||
BPF_JLE 0xb any PC += offset if dst <= src unsigned
|
||||
BPF_JSLT 0xc any PC += offset if dst < src signed
|
||||
BPF_JSLE 0xd any PC += offset if dst <= src signed
|
||||
======== ===== === =========================================== =========================================
|
||||
|
||||
The eBPF program needs to store the return value into register R0 before doing a
|
||||
BPF_EXIT.
|
||||
``BPF_EXIT``.
|
||||
|
||||
Example:
|
||||
|
||||
``BPF_JSGE | BPF_X | BPF_JMP32`` (0x7e) means::
|
||||
|
||||
if (s32)dst s>= (s32)src goto +offset
|
||||
|
||||
where 's>=' indicates a signed '>=' comparison.
|
||||
|
||||
Helper functions
|
||||
~~~~~~~~~~~~~~~~
|
||||
|
||||
Helper functions are a concept whereby BPF programs can call into a
|
||||
set of function calls exposed by the underlying platform.
|
||||
|
||||
Historically, each helper function was identified by an address
|
||||
encoded in the imm field. The available helper functions may differ
|
||||
for each program type, but address values are unique across all program types.
|
||||
|
||||
Platforms that support the BPF Type Format (BTF) support identifying
|
||||
a helper function by a BTF ID encoded in the imm field, where the BTF ID
|
||||
identifies the helper name and type.
|
||||
|
||||
Program-local functions
|
||||
~~~~~~~~~~~~~~~~~~~~~~~
|
||||
Program-local functions are functions exposed by the same BPF program as the
|
||||
caller, and are referenced by offset from the call instruction, similar to
|
||||
``BPF_JA``. A ``BPF_EXIT`` within the program-local function will return to
|
||||
the caller.
|
||||
|
||||
Load and store instructions
|
||||
===========================
|
||||
@ -385,14 +416,56 @@ and loaded back to ``R0``.
|
||||
-----------------------------
|
||||
|
||||
Instructions with the ``BPF_IMM`` 'mode' modifier use the wide instruction
|
||||
encoding for an extra imm64 value.
|
||||
encoding defined in `Instruction encoding`_, and use the 'src' field of the
|
||||
basic instruction to hold an opcode subtype.
|
||||
|
||||
There is currently only one such instruction.
|
||||
The following table defines a set of ``BPF_IMM | BPF_DW | BPF_LD`` instructions
|
||||
with opcode subtypes in the 'src' field, using new terms such as "map"
|
||||
defined further below:
|
||||
|
||||
``BPF_LD | BPF_DW | BPF_IMM`` means::
|
||||
========================= ====== === ========================================= =========== ==============
|
||||
opcode construction opcode src pseudocode imm type dst type
|
||||
========================= ====== === ========================================= =========== ==============
|
||||
BPF_IMM | BPF_DW | BPF_LD 0x18 0x0 dst = imm64 integer integer
|
||||
BPF_IMM | BPF_DW | BPF_LD 0x18 0x1 dst = map_by_fd(imm) map fd map
|
||||
BPF_IMM | BPF_DW | BPF_LD 0x18 0x2 dst = map_val(map_by_fd(imm)) + next_imm map fd data pointer
|
||||
BPF_IMM | BPF_DW | BPF_LD 0x18 0x3 dst = var_addr(imm) variable id data pointer
|
||||
BPF_IMM | BPF_DW | BPF_LD 0x18 0x4 dst = code_addr(imm) integer code pointer
|
||||
BPF_IMM | BPF_DW | BPF_LD 0x18 0x5 dst = map_by_idx(imm) map index map
|
||||
BPF_IMM | BPF_DW | BPF_LD 0x18 0x6 dst = map_val(map_by_idx(imm)) + next_imm map index data pointer
|
||||
========================= ====== === ========================================= =========== ==============
|
||||
|
||||
dst = imm64
|
||||
where
|
||||
|
||||
* map_by_fd(imm) means to convert a 32-bit file descriptor into an address of a map (see `Maps`_)
|
||||
* map_by_idx(imm) means to convert a 32-bit index into an address of a map
|
||||
* map_val(map) gets the address of the first value in a given map
|
||||
* var_addr(imm) gets the address of a platform variable (see `Platform Variables`_) with a given id
|
||||
* code_addr(imm) gets the address of the instruction at a specified relative offset in number of (64-bit) instructions
|
||||
* the 'imm type' can be used by disassemblers for display
|
||||
* the 'dst type' can be used for verification and JIT compilation purposes
|
||||
|
||||
Maps
|
||||
~~~~
|
||||
|
||||
Maps are shared memory regions accessible by eBPF programs on some platforms.
|
||||
A map can have various semantics as defined in a separate document, and may or
|
||||
may not have a single contiguous memory region, but the 'map_val(map)' is
|
||||
currently only defined for maps that do have a single contiguous memory region.
|
||||
|
||||
Each map can have a file descriptor (fd) if supported by the platform, where
|
||||
'map_by_fd(imm)' means to get the map with the specified file descriptor. Each
|
||||
BPF program can also be defined to use a set of maps associated with the
|
||||
program at load time, and 'map_by_idx(imm)' means to get the map with the given
|
||||
index in the set associated with the BPF program containing the instruction.
|
||||
|
||||
Platform Variables
|
||||
~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Platform variables are memory regions, identified by integer ids, exposed by
|
||||
the runtime and accessible by BPF programs on some platforms. The
|
||||
'var_addr(imm)' operation means to get the address of the memory region
|
||||
identified by the given id.
|
||||
|
||||
Legacy BPF Packet access instructions
|
||||
-------------------------------------
|
||||
|
@ -179,9 +179,10 @@ both are orthogonal to each other.
|
||||
---------------------
|
||||
|
||||
The KF_RELEASE flag is used to indicate that the kfunc releases the pointer
|
||||
passed in to it. There can be only one referenced pointer that can be passed in.
|
||||
All copies of the pointer being released are invalidated as a result of invoking
|
||||
kfunc with this flag.
|
||||
passed in to it. There can be only one referenced pointer that can be passed
|
||||
in. All copies of the pointer being released are invalidated as a result of
|
||||
invoking kfunc with this flag. KF_RELEASE kfuncs automatically receive the
|
||||
protection afforded by the KF_TRUSTED_ARGS flag described below.
|
||||
|
||||
2.4.4 KF_KPTR_GET flag
|
||||
----------------------
|
||||
@ -470,13 +471,50 @@ struct_ops callback arg. For example:
|
||||
struct task_struct *acquired;
|
||||
|
||||
acquired = bpf_task_acquire(task);
|
||||
if (acquired)
|
||||
/*
|
||||
* In a typical program you'd do something like store
|
||||
* the task in a map, and the map will automatically
|
||||
* release it later. Here, we release it manually.
|
||||
*/
|
||||
bpf_task_release(acquired);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
References acquired on ``struct task_struct *`` objects are RCU protected.
|
||||
Therefore, when in an RCU read region, you can obtain a pointer to a task
|
||||
embedded in a map value without having to acquire a reference:
|
||||
|
||||
.. code-block:: c
|
||||
|
||||
#define private(name) SEC(".data." #name) __hidden __attribute__((aligned(8)))
|
||||
private(TASK) static struct task_struct *global;
|
||||
|
||||
/**
|
||||
* A trivial example showing how to access a task stored
|
||||
* in a map using RCU.
|
||||
*/
|
||||
SEC("tp_btf/task_newtask")
|
||||
int BPF_PROG(task_rcu_read_example, struct task_struct *task, u64 clone_flags)
|
||||
{
|
||||
struct task_struct *local_copy;
|
||||
|
||||
bpf_rcu_read_lock();
|
||||
local_copy = global;
|
||||
if (local_copy)
|
||||
/*
|
||||
* We could also pass local_copy to kfuncs or helper functions here,
|
||||
* as we're guaranteed that local_copy will be valid until we exit
|
||||
* the RCU read region below.
|
||||
*/
|
||||
bpf_printk("Global task %s is valid", local_copy->comm);
|
||||
else
|
||||
bpf_printk("No global task found");
|
||||
bpf_rcu_read_unlock();
|
||||
|
||||
/* At this point we can no longer reference local_copy. */
|
||||
|
||||
/*
|
||||
* In a typical program you'd do something like store
|
||||
* the task in a map, and the map will automatically
|
||||
* release it later. Here, we release it manually.
|
||||
*/
|
||||
bpf_task_release(acquired);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -534,74 +572,6 @@ bpf_task_release() respectively, so we won't provide examples for them.
|
||||
|
||||
----
|
||||
|
||||
You may also acquire a reference to a ``struct cgroup`` kptr that's already
|
||||
stored in a map using bpf_cgroup_kptr_get():
|
||||
|
||||
.. kernel-doc:: kernel/bpf/helpers.c
|
||||
:identifiers: bpf_cgroup_kptr_get
|
||||
|
||||
Here's an example of how it can be used:
|
||||
|
||||
.. code-block:: c
|
||||
|
||||
/* struct containing the struct task_struct kptr which is actually stored in the map. */
|
||||
struct __cgroups_kfunc_map_value {
|
||||
struct cgroup __kptr * cgroup;
|
||||
};
|
||||
|
||||
/* The map containing struct __cgroups_kfunc_map_value entries. */
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_HASH);
|
||||
__type(key, int);
|
||||
__type(value, struct __cgroups_kfunc_map_value);
|
||||
__uint(max_entries, 1);
|
||||
} __cgroups_kfunc_map SEC(".maps");
|
||||
|
||||
/* ... */
|
||||
|
||||
/**
|
||||
* A simple example tracepoint program showing how a
|
||||
* struct cgroup kptr that is stored in a map can
|
||||
* be acquired using the bpf_cgroup_kptr_get() kfunc.
|
||||
*/
|
||||
SEC("tp_btf/cgroup_mkdir")
|
||||
int BPF_PROG(cgroup_kptr_get_example, struct cgroup *cgrp, const char *path)
|
||||
{
|
||||
struct cgroup *kptr;
|
||||
struct __cgroups_kfunc_map_value *v;
|
||||
s32 id = cgrp->self.id;
|
||||
|
||||
/* Assume a cgroup kptr was previously stored in the map. */
|
||||
v = bpf_map_lookup_elem(&__cgroups_kfunc_map, &id);
|
||||
if (!v)
|
||||
return -ENOENT;
|
||||
|
||||
/* Acquire a reference to the cgroup kptr that's already stored in the map. */
|
||||
kptr = bpf_cgroup_kptr_get(&v->cgroup);
|
||||
if (!kptr)
|
||||
/* If no cgroup was present in the map, it's because
|
||||
* we're racing with another CPU that removed it with
|
||||
* bpf_kptr_xchg() between the bpf_map_lookup_elem()
|
||||
* above, and our call to bpf_cgroup_kptr_get().
|
||||
* bpf_cgroup_kptr_get() internally safely handles this
|
||||
* race, and will return NULL if the task is no longer
|
||||
* present in the map by the time we invoke the kfunc.
|
||||
*/
|
||||
return -EBUSY;
|
||||
|
||||
/* Free the reference we just took above. Note that the
|
||||
* original struct cgroup kptr is still in the map. It will
|
||||
* be freed either at a later time if another context deletes
|
||||
* it from the map, or automatically by the BPF subsystem if
|
||||
* it's still present when the map is destroyed.
|
||||
*/
|
||||
bpf_cgroup_release(kptr);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
----
|
||||
|
||||
Other kfuncs available for interacting with ``struct cgroup *`` objects are
|
||||
bpf_cgroup_ancestor() and bpf_cgroup_from_id(), allowing callers to access
|
||||
the ancestor of a cgroup and find a cgroup by its ID, respectively. Both
|
||||
|
@ -2,23 +2,32 @@
|
||||
|
||||
.. _libbpf:
|
||||
|
||||
======
|
||||
libbpf
|
||||
======
|
||||
|
||||
If you are looking to develop BPF applications using the libbpf library, this
|
||||
directory contains important documentation that you should read.
|
||||
|
||||
To get started, it is recommended to begin with the :doc:`libbpf Overview
|
||||
<libbpf_overview>` document, which provides a high-level understanding of the
|
||||
libbpf APIs and their usage. This will give you a solid foundation to start
|
||||
exploring and utilizing the various features of libbpf to develop your BPF
|
||||
applications.
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
|
||||
libbpf_overview
|
||||
API Documentation <https://libbpf.readthedocs.io/en/latest/api.html>
|
||||
program_types
|
||||
libbpf_naming_convention
|
||||
libbpf_build
|
||||
|
||||
This is documentation for libbpf, a userspace library for loading and
|
||||
interacting with bpf programs.
|
||||
|
||||
All general BPF questions, including kernel functionality, libbpf APIs and
|
||||
their application, should be sent to bpf@vger.kernel.org mailing list.
|
||||
You can `subscribe <http://vger.kernel.org/vger-lists.html#bpf>`_ to the
|
||||
mailing list search its `archive <https://lore.kernel.org/bpf/>`_.
|
||||
Please search the archive before asking new questions. It very well might
|
||||
be that this was already addressed or answered before.
|
||||
All general BPF questions, including kernel functionality, libbpf APIs and their
|
||||
application, should be sent to bpf@vger.kernel.org mailing list. You can
|
||||
`subscribe <http://vger.kernel.org/vger-lists.html#bpf>`_ to the mailing list
|
||||
search its `archive <https://lore.kernel.org/bpf/>`_. Please search the archive
|
||||
before asking new questions. It may be that this was already addressed or
|
||||
answered before.
|
||||
|
228
Documentation/bpf/libbpf/libbpf_overview.rst
Normal file
228
Documentation/bpf/libbpf/libbpf_overview.rst
Normal file
@ -0,0 +1,228 @@
|
||||
.. SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
===============
|
||||
libbpf Overview
|
||||
===============
|
||||
|
||||
libbpf is a C-based library containing a BPF loader that takes compiled BPF
|
||||
object files and prepares and loads them into the Linux kernel. libbpf takes the
|
||||
heavy lifting of loading, verifying, and attaching BPF programs to various
|
||||
kernel hooks, allowing BPF application developers to focus only on BPF program
|
||||
correctness and performance.
|
||||
|
||||
The following are the high-level features supported by libbpf:
|
||||
|
||||
* Provides high-level and low-level APIs for user space programs to interact
|
||||
with BPF programs. The low-level APIs wrap all the bpf system call
|
||||
functionality, which is useful when users need more fine-grained control
|
||||
over the interactions between user space and BPF programs.
|
||||
* Provides overall support for the BPF object skeleton generated by bpftool.
|
||||
The skeleton file simplifies the process for the user space programs to access
|
||||
global variables and work with BPF programs.
|
||||
* Provides BPF-side APIS, including BPF helper definitions, BPF maps support,
|
||||
and tracing helpers, allowing developers to simplify BPF code writing.
|
||||
* Supports BPF CO-RE mechanism, enabling BPF developers to write portable
|
||||
BPF programs that can be compiled once and run across different kernel
|
||||
versions.
|
||||
|
||||
This document will delve into the above concepts in detail, providing a deeper
|
||||
understanding of the capabilities and advantages of libbpf and how it can help
|
||||
you develop BPF applications efficiently.
|
||||
|
||||
BPF App Lifecycle and libbpf APIs
|
||||
==================================
|
||||
|
||||
A BPF application consists of one or more BPF programs (either cooperating or
|
||||
completely independent), BPF maps, and global variables. The global
|
||||
variables are shared between all BPF programs, which allows them to cooperate on
|
||||
a common set of data. libbpf provides APIs that user space programs can use to
|
||||
manipulate the BPF programs by triggering different phases of a BPF application
|
||||
lifecycle.
|
||||
|
||||
The following section provides a brief overview of each phase in the BPF life
|
||||
cycle:
|
||||
|
||||
* **Open phase**: In this phase, libbpf parses the BPF
|
||||
object file and discovers BPF maps, BPF programs, and global variables. After
|
||||
a BPF app is opened, user space apps can make additional adjustments
|
||||
(setting BPF program types, if necessary; pre-setting initial values for
|
||||
global variables, etc.) before all the entities are created and loaded.
|
||||
|
||||
* **Load phase**: In the load phase, libbpf creates BPF
|
||||
maps, resolves various relocations, and verifies and loads BPF programs into
|
||||
the kernel. At this point, libbpf validates all the parts of a BPF application
|
||||
and loads the BPF program into the kernel, but no BPF program has yet been
|
||||
executed. After the load phase, it’s possible to set up the initial BPF map
|
||||
state without racing with the BPF program code execution.
|
||||
|
||||
* **Attachment phase**: In this phase, libbpf
|
||||
attaches BPF programs to various BPF hook points (e.g., tracepoints, kprobes,
|
||||
cgroup hooks, network packet processing pipeline, etc.). During this
|
||||
phase, BPF programs perform useful work such as processing
|
||||
packets, or updating BPF maps and global variables that can be read from user
|
||||
space.
|
||||
|
||||
* **Tear down phase**: In the tear down phase,
|
||||
libbpf detaches BPF programs and unloads them from the kernel. BPF maps are
|
||||
destroyed, and all the resources used by the BPF app are freed.
|
||||
|
||||
BPF Object Skeleton File
|
||||
========================
|
||||
|
||||
BPF skeleton is an alternative interface to libbpf APIs for working with BPF
|
||||
objects. Skeleton code abstract away generic libbpf APIs to significantly
|
||||
simplify code for manipulating BPF programs from user space. Skeleton code
|
||||
includes a bytecode representation of the BPF object file, simplifying the
|
||||
process of distributing your BPF code. With BPF bytecode embedded, there are no
|
||||
extra files to deploy along with your application binary.
|
||||
|
||||
You can generate the skeleton header file ``(.skel.h)`` for a specific object
|
||||
file by passing the BPF object to the bpftool. The generated BPF skeleton
|
||||
provides the following custom functions that correspond to the BPF lifecycle,
|
||||
each of them prefixed with the specific object name:
|
||||
|
||||
* ``<name>__open()`` – creates and opens BPF application (``<name>`` stands for
|
||||
the specific bpf object name)
|
||||
* ``<name>__load()`` – instantiates, loads,and verifies BPF application parts
|
||||
* ``<name>__attach()`` – attaches all auto-attachable BPF programs (it’s
|
||||
optional, you can have more control by using libbpf APIs directly)
|
||||
* ``<name>__destroy()`` – detaches all BPF programs and
|
||||
frees up all used resources
|
||||
|
||||
Using the skeleton code is the recommended way to work with bpf programs. Keep
|
||||
in mind, BPF skeleton provides access to the underlying BPF object, so whatever
|
||||
was possible to do with generic libbpf APIs is still possible even when the BPF
|
||||
skeleton is used. It's an additive convenience feature, with no syscalls, and no
|
||||
cumbersome code.
|
||||
|
||||
Other Advantages of Using Skeleton File
|
||||
---------------------------------------
|
||||
|
||||
* BPF skeleton provides an interface for user space programs to work with BPF
|
||||
global variables. The skeleton code memory maps global variables as a struct
|
||||
into user space. The struct interface allows user space programs to initialize
|
||||
BPF programs before the BPF load phase and fetch and update data from user
|
||||
space afterward.
|
||||
|
||||
* The ``skel.h`` file reflects the object file structure by listing out the
|
||||
available maps, programs, etc. BPF skeleton provides direct access to all the
|
||||
BPF maps and BPF programs as struct fields. This eliminates the need for
|
||||
string-based lookups with ``bpf_object_find_map_by_name()`` and
|
||||
``bpf_object_find_program_by_name()`` APIs, reducing errors due to BPF source
|
||||
code and user-space code getting out of sync.
|
||||
|
||||
* The embedded bytecode representation of the object file ensures that the
|
||||
skeleton and the BPF object file are always in sync.
|
||||
|
||||
BPF Helpers
|
||||
===========
|
||||
|
||||
libbpf provides BPF-side APIs that BPF programs can use to interact with the
|
||||
system. The BPF helpers definition allows developers to use them in BPF code as
|
||||
any other plain C function. For example, there are helper functions to print
|
||||
debugging messages, get the time since the system was booted, interact with BPF
|
||||
maps, manipulate network packets, etc.
|
||||
|
||||
For a complete description of what the helpers do, the arguments they take, and
|
||||
the return value, see the `bpf-helpers
|
||||
<https://man7.org/linux/man-pages/man7/bpf-helpers.7.html>`_ man page.
|
||||
|
||||
BPF CO-RE (Compile Once – Run Everywhere)
|
||||
=========================================
|
||||
|
||||
BPF programs work in the kernel space and have access to kernel memory and data
|
||||
structures. One limitation that BPF applications come across is the lack of
|
||||
portability across different kernel versions and configurations. `BCC
|
||||
<https://github.com/iovisor/bcc/>`_ is one of the solutions for BPF
|
||||
portability. However, it comes with runtime overhead and a large binary size
|
||||
from embedding the compiler with the application.
|
||||
|
||||
libbpf steps up the BPF program portability by supporting the BPF CO-RE concept.
|
||||
BPF CO-RE brings together BTF type information, libbpf, and the compiler to
|
||||
produce a single executable binary that you can run on multiple kernel versions
|
||||
and configurations.
|
||||
|
||||
To make BPF programs portable libbpf relies on the BTF type information of the
|
||||
running kernel. Kernel also exposes this self-describing authoritative BTF
|
||||
information through ``sysfs`` at ``/sys/kernel/btf/vmlinux``.
|
||||
|
||||
You can generate the BTF information for the running kernel with the following
|
||||
command:
|
||||
|
||||
::
|
||||
|
||||
$ bpftool btf dump file /sys/kernel/btf/vmlinux format c > vmlinux.h
|
||||
|
||||
The command generates a ``vmlinux.h`` header file with all kernel types
|
||||
(:doc:`BTF types <../btf>`) that the running kernel uses. Including
|
||||
``vmlinux.h`` in your BPF program eliminates dependency on system-wide kernel
|
||||
headers.
|
||||
|
||||
libbpf enables portability of BPF programs by looking at the BPF program’s
|
||||
recorded BTF type and relocation information and matching them to BTF
|
||||
information (vmlinux) provided by the running kernel. libbpf then resolves and
|
||||
matches all the types and fields, and updates necessary offsets and other
|
||||
relocatable data to ensure that BPF program’s logic functions correctly for a
|
||||
specific kernel on the host. BPF CO-RE concept thus eliminates overhead
|
||||
associated with BPF development and allows developers to write portable BPF
|
||||
applications without modifications and runtime source code compilation on the
|
||||
target machine.
|
||||
|
||||
The following code snippet shows how to read the parent field of a kernel
|
||||
``task_struct`` using BPF CO-RE and libbf. The basic helper to read a field in a
|
||||
CO-RE relocatable manner is ``bpf_core_read(dst, sz, src)``, which will read
|
||||
``sz`` bytes from the field referenced by ``src`` into the memory pointed to by
|
||||
``dst``.
|
||||
|
||||
.. code-block:: C
|
||||
:emphasize-lines: 6
|
||||
|
||||
//...
|
||||
struct task_struct *task = (void *)bpf_get_current_task();
|
||||
struct task_struct *parent_task;
|
||||
int err;
|
||||
|
||||
err = bpf_core_read(&parent_task, sizeof(void *), &task->parent);
|
||||
if (err) {
|
||||
/* handle error */
|
||||
}
|
||||
|
||||
/* parent_task contains the value of task->parent pointer */
|
||||
|
||||
In the code snippet, we first get a pointer to the current ``task_struct`` using
|
||||
``bpf_get_current_task()``. We then use ``bpf_core_read()`` to read the parent
|
||||
field of task struct into the ``parent_task`` variable. ``bpf_core_read()`` is
|
||||
just like ``bpf_probe_read_kernel()`` BPF helper, except it records information
|
||||
about the field that should be relocated on the target kernel. i.e, if the
|
||||
``parent`` field gets shifted to a different offset within
|
||||
``struct task_struct`` due to some new field added in front of it, libbpf will
|
||||
automatically adjust the actual offset to the proper value.
|
||||
|
||||
Getting Started with libbpf
|
||||
===========================
|
||||
|
||||
Check out the `libbpf-bootstrap <https://github.com/libbpf/libbpf-bootstrap>`_
|
||||
repository with simple examples of using libbpf to build various BPF
|
||||
applications.
|
||||
|
||||
See also `libbpf API documentation
|
||||
<https://libbpf.readthedocs.io/en/latest/api.html>`_.
|
||||
|
||||
libbpf and Rust
|
||||
===============
|
||||
|
||||
If you are building BPF applications in Rust, it is recommended to use the
|
||||
`Libbpf-rs <https://github.com/libbpf/libbpf-rs>`_ library instead of bindgen
|
||||
bindings directly to libbpf. Libbpf-rs wraps libbpf functionality in
|
||||
Rust-idiomatic interfaces and provides libbpf-cargo plugin to handle BPF code
|
||||
compilation and skeleton generation. Using Libbpf-rs will make building user
|
||||
space part of the BPF application easier. Note that the BPF program themselves
|
||||
must still be written in plain C.
|
||||
|
||||
Additional Documentation
|
||||
========================
|
||||
|
||||
* `Program types and ELF Sections <https://libbpf.readthedocs.io/en/latest/program_types.html>`_
|
||||
* `API naming convention <https://libbpf.readthedocs.io/en/latest/libbpf_naming_convention.html>`_
|
||||
* `Building libbpf <https://libbpf.readthedocs.io/en/latest/libbpf_build.html>`_
|
||||
* `API documentation Convention <https://libbpf.readthedocs.io/en/latest/libbpf_naming_convention.html#api-documentation-convention>`_
|
@ -12,6 +12,36 @@ Byte swap instructions
|
||||
|
||||
``BPF_FROM_LE`` and ``BPF_FROM_BE`` exist as aliases for ``BPF_TO_LE`` and ``BPF_TO_BE`` respectively.
|
||||
|
||||
Jump instructions
|
||||
=================
|
||||
|
||||
``BPF_CALL | BPF_X | BPF_JMP`` (0x8d), where the helper function
|
||||
integer would be read from a specified register, is not currently supported
|
||||
by the verifier. Any programs with this instruction will fail to load
|
||||
until such support is added.
|
||||
|
||||
Maps
|
||||
====
|
||||
|
||||
Linux only supports the 'map_val(map)' operation on array maps with a single element.
|
||||
|
||||
Linux uses an fd_array to store maps associated with a BPF program. Thus,
|
||||
map_by_idx(imm) uses the fd at that index in the array.
|
||||
|
||||
Variables
|
||||
=========
|
||||
|
||||
The following 64-bit immediate instruction specifies that a variable address,
|
||||
which corresponds to some integer stored in the 'imm' field, should be loaded:
|
||||
|
||||
========================= ====== === ========================================= =========== ==============
|
||||
opcode construction opcode src pseudocode imm type dst type
|
||||
========================= ====== === ========================================= =========== ==============
|
||||
BPF_IMM | BPF_DW | BPF_LD 0x18 0x3 dst = var_addr(imm) variable id data pointer
|
||||
========================= ====== === ========================================= =========== ==============
|
||||
|
||||
On Linux, this integer is a BTF ID.
|
||||
|
||||
Legacy BPF Packet access instructions
|
||||
=====================================
|
||||
|
||||
|
@ -342,9 +342,6 @@ hid_bpf_release_context(struct hid_bpf_ctx *ctx)
|
||||
{
|
||||
struct hid_bpf_ctx_kern *ctx_kern;
|
||||
|
||||
if (!ctx)
|
||||
return;
|
||||
|
||||
ctx_kern = container_of(ctx, struct hid_bpf_ctx_kern, ctx);
|
||||
|
||||
kfree(ctx_kern);
|
||||
|
@ -96,11 +96,11 @@ struct bpf_map_ops {
|
||||
|
||||
/* funcs callable from userspace and from eBPF programs */
|
||||
void *(*map_lookup_elem)(struct bpf_map *map, void *key);
|
||||
int (*map_update_elem)(struct bpf_map *map, void *key, void *value, u64 flags);
|
||||
int (*map_delete_elem)(struct bpf_map *map, void *key);
|
||||
int (*map_push_elem)(struct bpf_map *map, void *value, u64 flags);
|
||||
int (*map_pop_elem)(struct bpf_map *map, void *value);
|
||||
int (*map_peek_elem)(struct bpf_map *map, void *value);
|
||||
long (*map_update_elem)(struct bpf_map *map, void *key, void *value, u64 flags);
|
||||
long (*map_delete_elem)(struct bpf_map *map, void *key);
|
||||
long (*map_push_elem)(struct bpf_map *map, void *value, u64 flags);
|
||||
long (*map_pop_elem)(struct bpf_map *map, void *value);
|
||||
long (*map_peek_elem)(struct bpf_map *map, void *value);
|
||||
void *(*map_lookup_percpu_elem)(struct bpf_map *map, void *key, u32 cpu);
|
||||
|
||||
/* funcs called by prog_array and perf_event_array map */
|
||||
@ -139,7 +139,7 @@ struct bpf_map_ops {
|
||||
struct bpf_local_storage __rcu ** (*map_owner_storage_ptr)(void *owner);
|
||||
|
||||
/* Misc helpers.*/
|
||||
int (*map_redirect)(struct bpf_map *map, u64 key, u64 flags);
|
||||
long (*map_redirect)(struct bpf_map *map, u64 key, u64 flags);
|
||||
|
||||
/* map_meta_equal must be implemented for maps that can be
|
||||
* used as an inner map. It is a runtime check to ensure
|
||||
@ -157,7 +157,7 @@ struct bpf_map_ops {
|
||||
int (*map_set_for_each_callback_args)(struct bpf_verifier_env *env,
|
||||
struct bpf_func_state *caller,
|
||||
struct bpf_func_state *callee);
|
||||
int (*map_for_each_callback)(struct bpf_map *map,
|
||||
long (*map_for_each_callback)(struct bpf_map *map,
|
||||
bpf_callback_t callback_fn,
|
||||
void *callback_ctx, u64 flags);
|
||||
|
||||
@ -189,9 +189,14 @@ enum btf_field_type {
|
||||
BPF_RB_NODE | BPF_RB_ROOT,
|
||||
};
|
||||
|
||||
typedef void (*btf_dtor_kfunc_t)(void *);
|
||||
|
||||
struct btf_field_kptr {
|
||||
struct btf *btf;
|
||||
struct module *module;
|
||||
/* dtor used if btf_is_kernel(btf), otherwise the type is
|
||||
* program-allocated, dtor is NULL, and __bpf_obj_drop_impl is used
|
||||
*/
|
||||
btf_dtor_kfunc_t dtor;
|
||||
u32 btf_id;
|
||||
};
|
||||
@ -888,8 +893,7 @@ struct bpf_verifier_ops {
|
||||
struct bpf_prog *prog, u32 *target_size);
|
||||
int (*btf_struct_access)(struct bpf_verifier_log *log,
|
||||
const struct bpf_reg_state *reg,
|
||||
int off, int size, enum bpf_access_type atype,
|
||||
u32 *next_btf_id, enum bpf_type_flag *flag);
|
||||
int off, int size);
|
||||
};
|
||||
|
||||
struct bpf_prog_offload_ops {
|
||||
@ -1098,6 +1102,7 @@ struct bpf_trampoline {
|
||||
struct bpf_attach_target_info {
|
||||
struct btf_func_model fmodel;
|
||||
long tgt_addr;
|
||||
struct module *tgt_mod;
|
||||
const char *tgt_name;
|
||||
const struct btf_type *tgt_type;
|
||||
};
|
||||
@ -1401,6 +1406,7 @@ struct bpf_prog_aux {
|
||||
* main prog always has linfo_idx == 0
|
||||
*/
|
||||
u32 linfo_idx;
|
||||
struct module *mod;
|
||||
u32 num_exentries;
|
||||
struct exception_table_entry *extable;
|
||||
union {
|
||||
@ -1469,6 +1475,8 @@ struct bpf_link_ops {
|
||||
void (*show_fdinfo)(const struct bpf_link *link, struct seq_file *seq);
|
||||
int (*fill_link_info)(const struct bpf_link *link,
|
||||
struct bpf_link_info *info);
|
||||
int (*update_map)(struct bpf_link *link, struct bpf_map *new_map,
|
||||
struct bpf_map *old_map);
|
||||
};
|
||||
|
||||
struct bpf_tramp_link {
|
||||
@ -1511,6 +1519,8 @@ struct bpf_struct_ops {
|
||||
void *kdata, const void *udata);
|
||||
int (*reg)(void *kdata);
|
||||
void (*unreg)(void *kdata);
|
||||
int (*update)(void *kdata, void *old_kdata);
|
||||
int (*validate)(void *kdata);
|
||||
const struct btf_type *type;
|
||||
const struct btf_type *value_type;
|
||||
const char *name;
|
||||
@ -1545,6 +1555,7 @@ static inline void bpf_module_put(const void *data, struct module *owner)
|
||||
else
|
||||
module_put(owner);
|
||||
}
|
||||
int bpf_struct_ops_link_create(union bpf_attr *attr);
|
||||
|
||||
#ifdef CONFIG_NET
|
||||
/* Define it here to avoid the use of forward declaration */
|
||||
@ -1585,6 +1596,11 @@ static inline int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map,
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
static inline int bpf_struct_ops_link_create(union bpf_attr *attr)
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_CGROUP_BPF) && defined(CONFIG_BPF_LSM)
|
||||
@ -1617,8 +1633,12 @@ struct bpf_array {
|
||||
#define BPF_COMPLEXITY_LIMIT_INSNS 1000000 /* yes. 1M insns */
|
||||
#define MAX_TAIL_CALL_CNT 33
|
||||
|
||||
/* Maximum number of loops for bpf_loop */
|
||||
#define BPF_MAX_LOOPS BIT(23)
|
||||
/* Maximum number of loops for bpf_loop and bpf_iter_num.
|
||||
* It's enum to expose it (and thus make it discoverable) through BTF.
|
||||
*/
|
||||
enum {
|
||||
BPF_MAX_LOOPS = 8 * 1024 * 1024,
|
||||
};
|
||||
|
||||
#define BPF_F_ACCESS_MASK (BPF_F_RDONLY | \
|
||||
BPF_F_RDONLY_PROG | \
|
||||
@ -1921,7 +1941,7 @@ void bpf_prog_free_id(struct bpf_prog *prog);
|
||||
void bpf_map_free_id(struct bpf_map *map);
|
||||
|
||||
struct btf_field *btf_record_find(const struct btf_record *rec,
|
||||
u32 offset, enum btf_field_type type);
|
||||
u32 offset, u32 field_mask);
|
||||
void btf_record_free(struct btf_record *rec);
|
||||
void bpf_map_free_record(struct bpf_map *map);
|
||||
struct btf_record *btf_record_dup(const struct btf_record *rec);
|
||||
@ -1934,6 +1954,7 @@ struct bpf_map *bpf_map_get_with_uref(u32 ufd);
|
||||
struct bpf_map *__bpf_map_get(struct fd f);
|
||||
void bpf_map_inc(struct bpf_map *map);
|
||||
void bpf_map_inc_with_uref(struct bpf_map *map);
|
||||
struct bpf_map *__bpf_map_inc_not_zero(struct bpf_map *map, bool uref);
|
||||
struct bpf_map * __must_check bpf_map_inc_not_zero(struct bpf_map *map);
|
||||
void bpf_map_put_with_uref(struct bpf_map *map);
|
||||
void bpf_map_put(struct bpf_map *map);
|
||||
@ -2154,7 +2175,7 @@ int bpf_check_uarg_tail_zero(bpfptr_t uaddr, size_t expected_size,
|
||||
size_t actual_size);
|
||||
|
||||
/* verify correctness of eBPF program */
|
||||
int bpf_check(struct bpf_prog **fp, union bpf_attr *attr, bpfptr_t uattr);
|
||||
int bpf_check(struct bpf_prog **fp, union bpf_attr *attr, bpfptr_t uattr, u32 uattr_size);
|
||||
|
||||
#ifndef CONFIG_BPF_JIT_ALWAYS_ON
|
||||
void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth);
|
||||
@ -2242,7 +2263,7 @@ static inline bool bpf_tracing_btf_ctx_access(int off, int size,
|
||||
int btf_struct_access(struct bpf_verifier_log *log,
|
||||
const struct bpf_reg_state *reg,
|
||||
int off, int size, enum bpf_access_type atype,
|
||||
u32 *next_btf_id, enum bpf_type_flag *flag);
|
||||
u32 *next_btf_id, enum bpf_type_flag *flag, const char **field_name);
|
||||
bool btf_struct_ids_match(struct bpf_verifier_log *log,
|
||||
const struct btf *btf, u32 id, int off,
|
||||
const struct btf *need_btf, u32 need_type_id,
|
||||
@ -2281,7 +2302,7 @@ struct bpf_core_ctx {
|
||||
|
||||
bool btf_nested_type_is_trusted(struct bpf_verifier_log *log,
|
||||
const struct bpf_reg_state *reg,
|
||||
int off, const char *suffix);
|
||||
const char *field_name, u32 btf_id, const char *suffix);
|
||||
|
||||
bool btf_type_ids_nocast_alias(struct bpf_verifier_log *log,
|
||||
const struct btf *reg_btf, u32 reg_id,
|
||||
@ -2496,7 +2517,8 @@ static inline struct bpf_prog *bpf_prog_by_id(u32 id)
|
||||
static inline int btf_struct_access(struct bpf_verifier_log *log,
|
||||
const struct bpf_reg_state *reg,
|
||||
int off, int size, enum bpf_access_type atype,
|
||||
u32 *next_btf_id, enum bpf_type_flag *flag)
|
||||
u32 *next_btf_id, enum bpf_type_flag *flag,
|
||||
const char **field_name)
|
||||
{
|
||||
return -EACCES;
|
||||
}
|
||||
|
@ -13,6 +13,7 @@
|
||||
#include <linux/list.h>
|
||||
#include <linux/hash.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/bpf_mem_alloc.h>
|
||||
#include <uapi/linux/btf.h>
|
||||
|
||||
#define BPF_LOCAL_STORAGE_CACHE_SIZE 16
|
||||
@ -55,6 +56,9 @@ struct bpf_local_storage_map {
|
||||
u32 bucket_log;
|
||||
u16 elem_size;
|
||||
u16 cache_idx;
|
||||
struct bpf_mem_alloc selem_ma;
|
||||
struct bpf_mem_alloc storage_ma;
|
||||
bool bpf_ma;
|
||||
};
|
||||
|
||||
struct bpf_local_storage_data {
|
||||
@ -83,6 +87,7 @@ struct bpf_local_storage_elem {
|
||||
|
||||
struct bpf_local_storage {
|
||||
struct bpf_local_storage_data __rcu *cache[BPF_LOCAL_STORAGE_CACHE_SIZE];
|
||||
struct bpf_local_storage_map __rcu *smap;
|
||||
struct hlist_head list; /* List of bpf_local_storage_elem */
|
||||
void *owner; /* The object that owns the above "list" of
|
||||
* bpf_local_storage_elem.
|
||||
@ -121,14 +126,15 @@ int bpf_local_storage_map_alloc_check(union bpf_attr *attr);
|
||||
|
||||
struct bpf_map *
|
||||
bpf_local_storage_map_alloc(union bpf_attr *attr,
|
||||
struct bpf_local_storage_cache *cache);
|
||||
struct bpf_local_storage_cache *cache,
|
||||
bool bpf_ma);
|
||||
|
||||
struct bpf_local_storage_data *
|
||||
bpf_local_storage_lookup(struct bpf_local_storage *local_storage,
|
||||
struct bpf_local_storage_map *smap,
|
||||
bool cacheit_lockit);
|
||||
|
||||
bool bpf_local_storage_unlink_nolock(struct bpf_local_storage *local_storage);
|
||||
void bpf_local_storage_destroy(struct bpf_local_storage *local_storage);
|
||||
|
||||
void bpf_local_storage_map_free(struct bpf_map *map,
|
||||
struct bpf_local_storage_cache *cache,
|
||||
@ -142,17 +148,19 @@ int bpf_local_storage_map_check_btf(const struct bpf_map *map,
|
||||
void bpf_selem_link_storage_nolock(struct bpf_local_storage *local_storage,
|
||||
struct bpf_local_storage_elem *selem);
|
||||
|
||||
void bpf_selem_unlink(struct bpf_local_storage_elem *selem, bool use_trace_rcu);
|
||||
void bpf_selem_unlink(struct bpf_local_storage_elem *selem, bool reuse_now);
|
||||
|
||||
void bpf_selem_link_map(struct bpf_local_storage_map *smap,
|
||||
struct bpf_local_storage_elem *selem);
|
||||
|
||||
void bpf_selem_unlink_map(struct bpf_local_storage_elem *selem);
|
||||
|
||||
struct bpf_local_storage_elem *
|
||||
bpf_selem_alloc(struct bpf_local_storage_map *smap, void *owner, void *value,
|
||||
bool charge_mem, gfp_t gfp_flags);
|
||||
|
||||
void bpf_selem_free(struct bpf_local_storage_elem *selem,
|
||||
struct bpf_local_storage_map *smap,
|
||||
bool reuse_now);
|
||||
|
||||
int
|
||||
bpf_local_storage_alloc(void *owner,
|
||||
struct bpf_local_storage_map *smap,
|
||||
@ -163,7 +171,6 @@ struct bpf_local_storage_data *
|
||||
bpf_local_storage_update(void *owner, struct bpf_local_storage_map *smap,
|
||||
void *value, u64 map_flags, gfp_t gfp_flags);
|
||||
|
||||
void bpf_local_storage_free_rcu(struct rcu_head *rcu);
|
||||
u64 bpf_local_storage_map_mem_usage(const struct bpf_map *map);
|
||||
|
||||
#endif /* _BPF_LOCAL_STORAGE_H */
|
||||
|
@ -31,5 +31,7 @@ void bpf_mem_free(struct bpf_mem_alloc *ma, void *ptr);
|
||||
/* kmem_cache_alloc/free equivalent: */
|
||||
void *bpf_mem_cache_alloc(struct bpf_mem_alloc *ma);
|
||||
void bpf_mem_cache_free(struct bpf_mem_alloc *ma, void *ptr);
|
||||
void bpf_mem_cache_raw_free(void *ptr);
|
||||
void *bpf_mem_cache_alloc_flags(struct bpf_mem_alloc *ma, gfp_t flags);
|
||||
|
||||
#endif /* _BPF_MEM_ALLOC_H */
|
||||
|
@ -59,6 +59,14 @@ struct bpf_active_lock {
|
||||
u32 id;
|
||||
};
|
||||
|
||||
#define ITER_PREFIX "bpf_iter_"
|
||||
|
||||
enum bpf_iter_state {
|
||||
BPF_ITER_STATE_INVALID, /* for non-first slot */
|
||||
BPF_ITER_STATE_ACTIVE,
|
||||
BPF_ITER_STATE_DRAINED,
|
||||
};
|
||||
|
||||
struct bpf_reg_state {
|
||||
/* Ordering of fields matters. See states_equal() */
|
||||
enum bpf_reg_type type;
|
||||
@ -103,6 +111,18 @@ struct bpf_reg_state {
|
||||
bool first_slot;
|
||||
} dynptr;
|
||||
|
||||
/* For bpf_iter stack slots */
|
||||
struct {
|
||||
/* BTF container and BTF type ID describing
|
||||
* struct bpf_iter_<type> of an iterator state
|
||||
*/
|
||||
struct btf *btf;
|
||||
u32 btf_id;
|
||||
/* packing following two fields to fit iter state into 16 bytes */
|
||||
enum bpf_iter_state state:2;
|
||||
int depth:30;
|
||||
} iter;
|
||||
|
||||
/* Max size from any of the above. */
|
||||
struct {
|
||||
unsigned long raw1;
|
||||
@ -141,6 +161,8 @@ struct bpf_reg_state {
|
||||
* same reference to the socket, to determine proper reference freeing.
|
||||
* For stack slots that are dynptrs, this is used to track references to
|
||||
* the dynptr to determine proper reference freeing.
|
||||
* Similarly to dynptrs, we use ID to track "belonging" of a reference
|
||||
* to a specific instance of bpf_iter.
|
||||
*/
|
||||
u32 id;
|
||||
/* PTR_TO_SOCKET and PTR_TO_TCP_SOCK could be a ptr returned
|
||||
@ -211,9 +233,11 @@ enum bpf_stack_slot_type {
|
||||
* is stored in bpf_stack_state->spilled_ptr.dynptr.type
|
||||
*/
|
||||
STACK_DYNPTR,
|
||||
STACK_ITER,
|
||||
};
|
||||
|
||||
#define BPF_REG_SIZE 8 /* size of eBPF register in bytes */
|
||||
|
||||
#define BPF_DYNPTR_SIZE sizeof(struct bpf_dynptr_kern)
|
||||
#define BPF_DYNPTR_NR_SLOTS (BPF_DYNPTR_SIZE / BPF_REG_SIZE)
|
||||
|
||||
@ -448,12 +472,17 @@ struct bpf_insn_aux_data {
|
||||
bool sanitize_stack_spill; /* subject to Spectre v4 sanitation */
|
||||
bool zext_dst; /* this insn zero extends dst reg */
|
||||
bool storage_get_func_atomic; /* bpf_*_storage_get() with atomic memory alloc */
|
||||
bool is_iter_next; /* bpf_iter_<type>_next() kfunc call */
|
||||
u8 alu_state; /* used in combination with alu_limit */
|
||||
|
||||
/* below fields are initialized once */
|
||||
unsigned int orig_idx; /* original instruction index */
|
||||
bool prune_point;
|
||||
bool jmp_point;
|
||||
bool prune_point;
|
||||
/* ensure we check state equivalence and save state checkpoint and
|
||||
* this instruction, regardless of any heuristics
|
||||
*/
|
||||
bool force_checkpoint;
|
||||
};
|
||||
|
||||
#define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */
|
||||
@ -462,39 +491,36 @@ struct bpf_insn_aux_data {
|
||||
#define BPF_VERIFIER_TMP_LOG_SIZE 1024
|
||||
|
||||
struct bpf_verifier_log {
|
||||
u32 level;
|
||||
char kbuf[BPF_VERIFIER_TMP_LOG_SIZE];
|
||||
/* Logical start and end positions of a "log window" of the verifier log.
|
||||
* start_pos == 0 means we haven't truncated anything.
|
||||
* Once truncation starts to happen, start_pos + len_total == end_pos,
|
||||
* except during log reset situations, in which (end_pos - start_pos)
|
||||
* might get smaller than len_total (see bpf_vlog_reset()).
|
||||
* Generally, (end_pos - start_pos) gives number of useful data in
|
||||
* user log buffer.
|
||||
*/
|
||||
u64 start_pos;
|
||||
u64 end_pos;
|
||||
char __user *ubuf;
|
||||
u32 len_used;
|
||||
u32 level;
|
||||
u32 len_total;
|
||||
u32 len_max;
|
||||
char kbuf[BPF_VERIFIER_TMP_LOG_SIZE];
|
||||
};
|
||||
|
||||
static inline bool bpf_verifier_log_full(const struct bpf_verifier_log *log)
|
||||
{
|
||||
return log->len_used >= log->len_total - 1;
|
||||
}
|
||||
|
||||
#define BPF_LOG_LEVEL1 1
|
||||
#define BPF_LOG_LEVEL2 2
|
||||
#define BPF_LOG_STATS 4
|
||||
#define BPF_LOG_FIXED 8
|
||||
#define BPF_LOG_LEVEL (BPF_LOG_LEVEL1 | BPF_LOG_LEVEL2)
|
||||
#define BPF_LOG_MASK (BPF_LOG_LEVEL | BPF_LOG_STATS)
|
||||
#define BPF_LOG_MASK (BPF_LOG_LEVEL | BPF_LOG_STATS | BPF_LOG_FIXED)
|
||||
#define BPF_LOG_KERNEL (BPF_LOG_MASK + 1) /* kernel internal flag */
|
||||
#define BPF_LOG_MIN_ALIGNMENT 8U
|
||||
#define BPF_LOG_ALIGNMENT 40U
|
||||
|
||||
static inline bool bpf_verifier_log_needed(const struct bpf_verifier_log *log)
|
||||
{
|
||||
return log &&
|
||||
((log->level && log->ubuf && !bpf_verifier_log_full(log)) ||
|
||||
log->level == BPF_LOG_KERNEL);
|
||||
}
|
||||
|
||||
static inline bool
|
||||
bpf_verifier_log_attr_valid(const struct bpf_verifier_log *log)
|
||||
{
|
||||
return log->len_total >= 128 && log->len_total <= UINT_MAX >> 2 &&
|
||||
log->level && log->ubuf && !(log->level & ~BPF_LOG_MASK);
|
||||
return log && log->level;
|
||||
}
|
||||
|
||||
#define BPF_MAX_SUBPROGS 256
|
||||
@ -574,7 +600,7 @@ struct bpf_verifier_env {
|
||||
u32 scratched_regs;
|
||||
/* Same as scratched_regs but for stack slots */
|
||||
u64 scratched_stack_slots;
|
||||
u32 prev_log_len, prev_insn_print_len;
|
||||
u64 prev_log_pos, prev_insn_print_pos;
|
||||
/* buffer used in reg_type_str() to generate reg_type string */
|
||||
char type_str_buf[TYPE_STR_BUF_LEN];
|
||||
};
|
||||
@ -585,6 +611,10 @@ __printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env *env,
|
||||
const char *fmt, ...);
|
||||
__printf(2, 3) void bpf_log(struct bpf_verifier_log *log,
|
||||
const char *fmt, ...);
|
||||
int bpf_vlog_init(struct bpf_verifier_log *log, u32 log_level,
|
||||
char __user *log_buf, u32 log_size);
|
||||
void bpf_vlog_reset(struct bpf_verifier_log *log, u64 new_pos);
|
||||
int bpf_vlog_finalize(struct bpf_verifier_log *log, u32 *log_size_actual);
|
||||
|
||||
static inline struct bpf_func_state *cur_func(struct bpf_verifier_env *env)
|
||||
{
|
||||
|
@ -71,6 +71,10 @@
|
||||
#define KF_SLEEPABLE (1 << 5) /* kfunc may sleep */
|
||||
#define KF_DESTRUCTIVE (1 << 6) /* kfunc performs destructive actions */
|
||||
#define KF_RCU (1 << 7) /* kfunc takes either rcu or trusted pointer arguments */
|
||||
/* only one of KF_ITER_{NEW,NEXT,DESTROY} could be specified per kfunc */
|
||||
#define KF_ITER_NEW (1 << 8) /* kfunc implements BPF iter constructor */
|
||||
#define KF_ITER_NEXT (1 << 9) /* kfunc implements BPF iter next method */
|
||||
#define KF_ITER_DESTROY (1 << 10) /* kfunc implements BPF iter destructor */
|
||||
|
||||
/*
|
||||
* Tag marking a kernel function as a kfunc. This is meant to minimize the
|
||||
@ -117,13 +121,11 @@ struct btf_struct_metas {
|
||||
struct btf_struct_meta types[];
|
||||
};
|
||||
|
||||
typedef void (*btf_dtor_kfunc_t)(void *);
|
||||
|
||||
extern const struct file_operations btf_fops;
|
||||
|
||||
void btf_get(struct btf *btf);
|
||||
void btf_put(struct btf *btf);
|
||||
int btf_new_fd(const union bpf_attr *attr, bpfptr_t uattr);
|
||||
int btf_new_fd(const union bpf_attr *attr, bpfptr_t uattr, u32 uattr_sz);
|
||||
struct btf *btf_get_by_fd(int fd);
|
||||
int btf_get_info_by_fd(const struct btf *btf,
|
||||
const union bpf_attr *attr,
|
||||
|
@ -571,8 +571,7 @@ DECLARE_STATIC_KEY_FALSE(bpf_stats_enabled_key);
|
||||
extern struct mutex nf_conn_btf_access_lock;
|
||||
extern int (*nfct_btf_struct_access)(struct bpf_verifier_log *log,
|
||||
const struct bpf_reg_state *reg,
|
||||
int off, int size, enum bpf_access_type atype,
|
||||
u32 *next_btf_id, enum bpf_type_flag *flag);
|
||||
int off, int size);
|
||||
|
||||
typedef unsigned int (*bpf_dispatcher_fn)(const void *ctx,
|
||||
const struct bpf_insn *insnsi,
|
||||
@ -1504,9 +1503,9 @@ static inline bool bpf_sk_lookup_run_v6(struct net *net, int protocol,
|
||||
}
|
||||
#endif /* IS_ENABLED(CONFIG_IPV6) */
|
||||
|
||||
static __always_inline int __bpf_xdp_redirect_map(struct bpf_map *map, u64 index,
|
||||
u64 flags, const u64 flag_mask,
|
||||
void *lookup_elem(struct bpf_map *map, u32 key))
|
||||
static __always_inline long __bpf_xdp_redirect_map(struct bpf_map *map, u64 index,
|
||||
u64 flags, const u64 flag_mask,
|
||||
void *lookup_elem(struct bpf_map *map, u32 key))
|
||||
{
|
||||
struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
|
||||
const u64 action_mask = XDP_ABORTED | XDP_DROP | XDP_PASS | XDP_TX;
|
||||
|
@ -608,14 +608,6 @@ static inline bool within_module(unsigned long addr, const struct module *mod)
|
||||
/* Search for module by name: must be in a RCU-sched critical section. */
|
||||
struct module *find_module(const char *name);
|
||||
|
||||
/* Returns 0 and fills in value, defined and namebuf, or -ERANGE if
|
||||
symnum out of range. */
|
||||
int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
|
||||
char *name, char *module_name, int *exported);
|
||||
|
||||
/* Look for this name: can be of form module:name. */
|
||||
unsigned long module_kallsyms_lookup_name(const char *name);
|
||||
|
||||
extern void __noreturn __module_put_and_kthread_exit(struct module *mod,
|
||||
long code);
|
||||
#define module_put_and_kthread_exit(code) __module_put_and_kthread_exit(THIS_MODULE, code)
|
||||
@ -662,17 +654,6 @@ static inline void __module_get(struct module *module)
|
||||
/* Dereference module function descriptor */
|
||||
void *dereference_module_function_descriptor(struct module *mod, void *ptr);
|
||||
|
||||
/* For kallsyms to ask for address resolution. namebuf should be at
|
||||
* least KSYM_NAME_LEN long: a pointer to namebuf is returned if
|
||||
* found, otherwise NULL. */
|
||||
const char *module_address_lookup(unsigned long addr,
|
||||
unsigned long *symbolsize,
|
||||
unsigned long *offset,
|
||||
char **modname, const unsigned char **modbuildid,
|
||||
char *namebuf);
|
||||
int lookup_module_symbol_name(unsigned long addr, char *symname);
|
||||
int lookup_module_symbol_attrs(unsigned long addr, unsigned long *size, unsigned long *offset, char *modname, char *name);
|
||||
|
||||
int register_module_notifier(struct notifier_block *nb);
|
||||
int unregister_module_notifier(struct notifier_block *nb);
|
||||
|
||||
@ -763,39 +744,6 @@ static inline void module_put(struct module *module)
|
||||
|
||||
#define module_name(mod) "kernel"
|
||||
|
||||
/* For kallsyms to ask for address resolution. NULL means not found. */
|
||||
static inline const char *module_address_lookup(unsigned long addr,
|
||||
unsigned long *symbolsize,
|
||||
unsigned long *offset,
|
||||
char **modname,
|
||||
const unsigned char **modbuildid,
|
||||
char *namebuf)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline int lookup_module_symbol_name(unsigned long addr, char *symname)
|
||||
{
|
||||
return -ERANGE;
|
||||
}
|
||||
|
||||
static inline int lookup_module_symbol_attrs(unsigned long addr, unsigned long *size, unsigned long *offset, char *modname, char *name)
|
||||
{
|
||||
return -ERANGE;
|
||||
}
|
||||
|
||||
static inline int module_get_kallsym(unsigned int symnum, unsigned long *value,
|
||||
char *type, char *name,
|
||||
char *module_name, int *exported)
|
||||
{
|
||||
return -ERANGE;
|
||||
}
|
||||
|
||||
static inline unsigned long module_kallsyms_lookup_name(const char *name)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int register_module_notifier(struct notifier_block *nb)
|
||||
{
|
||||
/* no events will happen anyway, so this can always succeed */
|
||||
@ -891,7 +839,36 @@ int module_kallsyms_on_each_symbol(const char *modname,
|
||||
int (*fn)(void *, const char *,
|
||||
struct module *, unsigned long),
|
||||
void *data);
|
||||
#else
|
||||
|
||||
/* For kallsyms to ask for address resolution. namebuf should be at
|
||||
* least KSYM_NAME_LEN long: a pointer to namebuf is returned if
|
||||
* found, otherwise NULL.
|
||||
*/
|
||||
const char *module_address_lookup(unsigned long addr,
|
||||
unsigned long *symbolsize,
|
||||
unsigned long *offset,
|
||||
char **modname, const unsigned char **modbuildid,
|
||||
char *namebuf);
|
||||
int lookup_module_symbol_name(unsigned long addr, char *symname);
|
||||
int lookup_module_symbol_attrs(unsigned long addr,
|
||||
unsigned long *size,
|
||||
unsigned long *offset,
|
||||
char *modname,
|
||||
char *name);
|
||||
|
||||
/* Returns 0 and fills in value, defined and namebuf, or -ERANGE if
|
||||
* symnum out of range.
|
||||
*/
|
||||
int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
|
||||
char *name, char *module_name, int *exported);
|
||||
|
||||
/* Look for this name: can be of form module:name. */
|
||||
unsigned long module_kallsyms_lookup_name(const char *name);
|
||||
|
||||
unsigned long find_kallsyms_symbol_value(struct module *mod, const char *name);
|
||||
|
||||
#else /* CONFIG_MODULES && CONFIG_KALLSYMS */
|
||||
|
||||
static inline int module_kallsyms_on_each_symbol(const char *modname,
|
||||
int (*fn)(void *, const char *,
|
||||
struct module *, unsigned long),
|
||||
@ -899,6 +876,50 @@ static inline int module_kallsyms_on_each_symbol(const char *modname,
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
/* For kallsyms to ask for address resolution. NULL means not found. */
|
||||
static inline const char *module_address_lookup(unsigned long addr,
|
||||
unsigned long *symbolsize,
|
||||
unsigned long *offset,
|
||||
char **modname,
|
||||
const unsigned char **modbuildid,
|
||||
char *namebuf)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline int lookup_module_symbol_name(unsigned long addr, char *symname)
|
||||
{
|
||||
return -ERANGE;
|
||||
}
|
||||
|
||||
static inline int lookup_module_symbol_attrs(unsigned long addr,
|
||||
unsigned long *size,
|
||||
unsigned long *offset,
|
||||
char *modname,
|
||||
char *name)
|
||||
{
|
||||
return -ERANGE;
|
||||
}
|
||||
|
||||
static inline int module_get_kallsym(unsigned int symnum, unsigned long *value,
|
||||
char *type, char *name,
|
||||
char *module_name, int *exported)
|
||||
{
|
||||
return -ERANGE;
|
||||
}
|
||||
|
||||
static inline unsigned long module_kallsyms_lookup_name(const char *name)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline unsigned long find_kallsyms_symbol_value(struct module *mod,
|
||||
const char *name)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_MODULES && CONFIG_KALLSYMS */
|
||||
|
||||
#endif /* _LINUX_MODULE_H */
|
||||
|
@ -1318,11 +1318,6 @@ struct task_struct {
|
||||
|
||||
struct tlbflush_unmap_batch tlb_ubc;
|
||||
|
||||
union {
|
||||
refcount_t rcu_users;
|
||||
struct rcu_head rcu;
|
||||
};
|
||||
|
||||
/* Cache last used pipe for splice(): */
|
||||
struct pipe_inode_info *splice_pipe;
|
||||
|
||||
@ -1459,6 +1454,8 @@ struct task_struct {
|
||||
unsigned long saved_state_change;
|
||||
# endif
|
||||
#endif
|
||||
struct rcu_head rcu;
|
||||
refcount_t rcu_users;
|
||||
int pagefault_disabled;
|
||||
#ifdef CONFIG_MMU
|
||||
struct task_struct *oom_reaper_list;
|
||||
|
@ -938,6 +938,19 @@ struct sk_buff {
|
||||
__u8 ip_summed:2;
|
||||
__u8 ooo_okay:1;
|
||||
|
||||
/* private: */
|
||||
__u8 __mono_tc_offset[0];
|
||||
/* public: */
|
||||
__u8 mono_delivery_time:1; /* See SKB_MONO_DELIVERY_TIME_MASK */
|
||||
#ifdef CONFIG_NET_CLS_ACT
|
||||
__u8 tc_at_ingress:1; /* See TC_AT_INGRESS_MASK */
|
||||
__u8 tc_skip_classify:1;
|
||||
#endif
|
||||
__u8 remcsum_offload:1;
|
||||
__u8 csum_complete_sw:1;
|
||||
__u8 csum_level:2;
|
||||
__u8 dst_pending_confirm:1;
|
||||
|
||||
__u8 l4_hash:1;
|
||||
__u8 sw_hash:1;
|
||||
__u8 wifi_acked_valid:1;
|
||||
@ -947,19 +960,6 @@ struct sk_buff {
|
||||
__u8 encapsulation:1;
|
||||
__u8 encap_hdr_csum:1;
|
||||
__u8 csum_valid:1;
|
||||
|
||||
/* private: */
|
||||
__u8 __pkt_vlan_present_offset[0];
|
||||
/* public: */
|
||||
__u8 remcsum_offload:1;
|
||||
__u8 csum_complete_sw:1;
|
||||
__u8 csum_level:2;
|
||||
__u8 dst_pending_confirm:1;
|
||||
__u8 mono_delivery_time:1; /* See SKB_MONO_DELIVERY_TIME_MASK */
|
||||
#ifdef CONFIG_NET_CLS_ACT
|
||||
__u8 tc_skip_classify:1;
|
||||
__u8 tc_at_ingress:1; /* See TC_AT_INGRESS_MASK */
|
||||
#endif
|
||||
#ifdef CONFIG_IPV6_NDISC_NODETYPE
|
||||
__u8 ndisc_nodetype:2;
|
||||
#endif
|
||||
@ -1066,13 +1066,13 @@ struct sk_buff {
|
||||
* around, you also must adapt these constants.
|
||||
*/
|
||||
#ifdef __BIG_ENDIAN_BITFIELD
|
||||
#define TC_AT_INGRESS_MASK (1 << 0)
|
||||
#define SKB_MONO_DELIVERY_TIME_MASK (1 << 2)
|
||||
#define SKB_MONO_DELIVERY_TIME_MASK (1 << 7)
|
||||
#define TC_AT_INGRESS_MASK (1 << 6)
|
||||
#else
|
||||
#define TC_AT_INGRESS_MASK (1 << 7)
|
||||
#define SKB_MONO_DELIVERY_TIME_MASK (1 << 5)
|
||||
#define SKB_MONO_DELIVERY_TIME_MASK (1 << 0)
|
||||
#define TC_AT_INGRESS_MASK (1 << 1)
|
||||
#endif
|
||||
#define PKT_VLAN_PRESENT_OFFSET offsetof(struct sk_buff, __pkt_vlan_present_offset)
|
||||
#define SKB_BF_MONO_TC_OFFSET offsetof(struct sk_buff, __mono_tc_offset)
|
||||
|
||||
#ifdef __KERNEL__
|
||||
/*
|
||||
@ -5063,12 +5063,12 @@ static inline u64 skb_get_kcov_handle(struct sk_buff *skb)
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PAGE_POOL
|
||||
static inline void skb_mark_for_recycle(struct sk_buff *skb)
|
||||
{
|
||||
#ifdef CONFIG_PAGE_POOL
|
||||
skb->pp_recycle = 1;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
#endif /* _LINUX_SKBUFF_H */
|
||||
|
@ -17,4 +17,6 @@ int __fou_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e,
|
||||
int __gue_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e,
|
||||
u8 *protocol, __be16 *sport, int type);
|
||||
|
||||
int register_fou_bpf(void);
|
||||
|
||||
#endif
|
||||
|
@ -57,6 +57,13 @@ struct ip_tunnel_key {
|
||||
__u8 flow_flags;
|
||||
};
|
||||
|
||||
struct ip_tunnel_encap {
|
||||
u16 type;
|
||||
u16 flags;
|
||||
__be16 sport;
|
||||
__be16 dport;
|
||||
};
|
||||
|
||||
/* Flags for ip_tunnel_info mode. */
|
||||
#define IP_TUNNEL_INFO_TX 0x01 /* represents tx tunnel parameters */
|
||||
#define IP_TUNNEL_INFO_IPV6 0x02 /* key contains IPv6 addresses */
|
||||
@ -75,6 +82,7 @@ struct ip_tunnel_key {
|
||||
|
||||
struct ip_tunnel_info {
|
||||
struct ip_tunnel_key key;
|
||||
struct ip_tunnel_encap encap;
|
||||
#ifdef CONFIG_DST_CACHE
|
||||
struct dst_cache dst_cache;
|
||||
#endif
|
||||
@ -92,13 +100,6 @@ struct ip_tunnel_6rd_parm {
|
||||
};
|
||||
#endif
|
||||
|
||||
struct ip_tunnel_encap {
|
||||
u16 type;
|
||||
u16 flags;
|
||||
__be16 sport;
|
||||
__be16 dport;
|
||||
};
|
||||
|
||||
struct ip_tunnel_prl_entry {
|
||||
struct ip_tunnel_prl_entry __rcu *next;
|
||||
__be32 addr;
|
||||
@ -299,6 +300,7 @@ struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn,
|
||||
__be32 remote, __be32 local,
|
||||
__be32 key);
|
||||
|
||||
void ip_tunnel_md_udp_encap(struct sk_buff *skb, struct ip_tunnel_info *info);
|
||||
int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb,
|
||||
const struct tnl_ptk_info *tpi, struct metadata_dst *tun_dst,
|
||||
bool log_ecn_error);
|
||||
@ -377,22 +379,23 @@ static inline int ip_encap_hlen(struct ip_tunnel_encap *e)
|
||||
return hlen;
|
||||
}
|
||||
|
||||
static inline int ip_tunnel_encap(struct sk_buff *skb, struct ip_tunnel *t,
|
||||
static inline int ip_tunnel_encap(struct sk_buff *skb,
|
||||
struct ip_tunnel_encap *e,
|
||||
u8 *protocol, struct flowi4 *fl4)
|
||||
{
|
||||
const struct ip_tunnel_encap_ops *ops;
|
||||
int ret = -EINVAL;
|
||||
|
||||
if (t->encap.type == TUNNEL_ENCAP_NONE)
|
||||
if (e->type == TUNNEL_ENCAP_NONE)
|
||||
return 0;
|
||||
|
||||
if (t->encap.type >= MAX_IPTUN_ENCAP_OPS)
|
||||
if (e->type >= MAX_IPTUN_ENCAP_OPS)
|
||||
return -EINVAL;
|
||||
|
||||
rcu_read_lock();
|
||||
ops = rcu_dereference(iptun_encaps[t->encap.type]);
|
||||
ops = rcu_dereference(iptun_encaps[e->type]);
|
||||
if (likely(ops && ops->build_header))
|
||||
ret = ops->build_header(skb, &t->encap, protocol, fl4);
|
||||
ret = ops->build_header(skb, e, protocol, fl4);
|
||||
rcu_read_unlock();
|
||||
|
||||
return ret;
|
||||
|
@ -1117,6 +1117,9 @@ struct tcp_congestion_ops {
|
||||
|
||||
int tcp_register_congestion_control(struct tcp_congestion_ops *type);
|
||||
void tcp_unregister_congestion_control(struct tcp_congestion_ops *type);
|
||||
int tcp_update_congestion_control(struct tcp_congestion_ops *type,
|
||||
struct tcp_congestion_ops *old_type);
|
||||
int tcp_validate_congestion_control(struct tcp_congestion_ops *ca);
|
||||
|
||||
void tcp_assign_congestion_control(struct sock *sk);
|
||||
void tcp_init_congestion_control(struct sock *sk);
|
||||
|
@ -318,35 +318,6 @@ void xdp_flush_frame_bulk(struct xdp_frame_bulk *bq);
|
||||
void xdp_return_frame_bulk(struct xdp_frame *xdpf,
|
||||
struct xdp_frame_bulk *bq);
|
||||
|
||||
/* When sending xdp_frame into the network stack, then there is no
|
||||
* return point callback, which is needed to release e.g. DMA-mapping
|
||||
* resources with page_pool. Thus, have explicit function to release
|
||||
* frame resources.
|
||||
*/
|
||||
void __xdp_release_frame(void *data, struct xdp_mem_info *mem);
|
||||
static inline void xdp_release_frame(struct xdp_frame *xdpf)
|
||||
{
|
||||
struct xdp_mem_info *mem = &xdpf->mem;
|
||||
struct skb_shared_info *sinfo;
|
||||
int i;
|
||||
|
||||
/* Curr only page_pool needs this */
|
||||
if (mem->type != MEM_TYPE_PAGE_POOL)
|
||||
return;
|
||||
|
||||
if (likely(!xdp_frame_has_frags(xdpf)))
|
||||
goto out;
|
||||
|
||||
sinfo = xdp_get_shared_info_from_frame(xdpf);
|
||||
for (i = 0; i < sinfo->nr_frags; i++) {
|
||||
struct page *page = skb_frag_page(&sinfo->frags[i]);
|
||||
|
||||
__xdp_release_frame(page_address(page), mem);
|
||||
}
|
||||
out:
|
||||
__xdp_release_frame(xdpf->data, mem);
|
||||
}
|
||||
|
||||
static __always_inline unsigned int xdp_get_frame_len(struct xdp_frame *xdpf)
|
||||
{
|
||||
struct skb_shared_info *sinfo;
|
||||
|
@ -180,13 +180,8 @@ static inline bool xp_desc_crosses_non_contig_pg(struct xsk_buff_pool *pool,
|
||||
if (likely(!cross_pg))
|
||||
return false;
|
||||
|
||||
if (pool->dma_pages_cnt) {
|
||||
return !(pool->dma_pages[addr >> PAGE_SHIFT] &
|
||||
XSK_NEXT_PG_CONTIG_MASK);
|
||||
}
|
||||
|
||||
/* skb path */
|
||||
return addr + len > pool->addrs_cnt;
|
||||
return pool->dma_pages_cnt &&
|
||||
!(pool->dma_pages[addr >> PAGE_SHIFT] & XSK_NEXT_PG_CONTIG_MASK);
|
||||
}
|
||||
|
||||
static inline u64 xp_aligned_extract_addr(struct xsk_buff_pool *pool, u64 addr)
|
||||
|
@ -1033,6 +1033,7 @@ enum bpf_attach_type {
|
||||
BPF_PERF_EVENT,
|
||||
BPF_TRACE_KPROBE_MULTI,
|
||||
BPF_LSM_CGROUP,
|
||||
BPF_STRUCT_OPS,
|
||||
__MAX_BPF_ATTACH_TYPE
|
||||
};
|
||||
|
||||
@ -1108,7 +1109,7 @@ enum bpf_link_type {
|
||||
*/
|
||||
#define BPF_F_STRICT_ALIGNMENT (1U << 0)
|
||||
|
||||
/* If BPF_F_ANY_ALIGNMENT is used in BPF_PROF_LOAD command, the
|
||||
/* If BPF_F_ANY_ALIGNMENT is used in BPF_PROG_LOAD command, the
|
||||
* verifier will allow any alignment whatsoever. On platforms
|
||||
* with strict alignment requirements for loads ands stores (such
|
||||
* as sparc and mips) the verifier validates that all loads and
|
||||
@ -1266,6 +1267,9 @@ enum {
|
||||
|
||||
/* Create a map that is suitable to be an inner map with dynamic max entries */
|
||||
BPF_F_INNER_MAP = (1U << 12),
|
||||
|
||||
/* Create a map that will be registered/unregesitered by the backed bpf_link */
|
||||
BPF_F_LINK = (1U << 13),
|
||||
};
|
||||
|
||||
/* Flags for BPF_PROG_QUERY. */
|
||||
@ -1403,6 +1407,11 @@ union bpf_attr {
|
||||
__aligned_u64 fd_array; /* array of FDs */
|
||||
__aligned_u64 core_relos;
|
||||
__u32 core_relo_rec_size; /* sizeof(struct bpf_core_relo) */
|
||||
/* output: actual total log contents size (including termintaing zero).
|
||||
* It could be both larger than original log_size (if log was
|
||||
* truncated), or smaller (if log buffer wasn't filled completely).
|
||||
*/
|
||||
__u32 log_true_size;
|
||||
};
|
||||
|
||||
struct { /* anonymous struct used by BPF_OBJ_* commands */
|
||||
@ -1488,6 +1497,11 @@ union bpf_attr {
|
||||
__u32 btf_size;
|
||||
__u32 btf_log_size;
|
||||
__u32 btf_log_level;
|
||||
/* output: actual total log contents size (including termintaing zero).
|
||||
* It could be both larger than original log_size (if log was
|
||||
* truncated), or smaller (if log buffer wasn't filled completely).
|
||||
*/
|
||||
__u32 btf_log_true_size;
|
||||
};
|
||||
|
||||
struct {
|
||||
@ -1507,7 +1521,10 @@ union bpf_attr {
|
||||
} task_fd_query;
|
||||
|
||||
struct { /* struct used by BPF_LINK_CREATE command */
|
||||
__u32 prog_fd; /* eBPF program to attach */
|
||||
union {
|
||||
__u32 prog_fd; /* eBPF program to attach */
|
||||
__u32 map_fd; /* struct_ops to attach */
|
||||
};
|
||||
union {
|
||||
__u32 target_fd; /* object to attach to */
|
||||
__u32 target_ifindex; /* target ifindex */
|
||||
@ -1548,12 +1565,23 @@ union bpf_attr {
|
||||
|
||||
struct { /* struct used by BPF_LINK_UPDATE command */
|
||||
__u32 link_fd; /* link fd */
|
||||
/* new program fd to update link with */
|
||||
__u32 new_prog_fd;
|
||||
union {
|
||||
/* new program fd to update link with */
|
||||
__u32 new_prog_fd;
|
||||
/* new struct_ops map fd to update link with */
|
||||
__u32 new_map_fd;
|
||||
};
|
||||
__u32 flags; /* extra flags */
|
||||
/* expected link's program fd; is specified only if
|
||||
* BPF_F_REPLACE flag is set in flags */
|
||||
__u32 old_prog_fd;
|
||||
union {
|
||||
/* expected link's program fd; is specified only if
|
||||
* BPF_F_REPLACE flag is set in flags.
|
||||
*/
|
||||
__u32 old_prog_fd;
|
||||
/* expected link's map fd; is specified only
|
||||
* if BPF_F_REPLACE flag is set.
|
||||
*/
|
||||
__u32 old_map_fd;
|
||||
};
|
||||
} link_update;
|
||||
|
||||
struct {
|
||||
@ -1647,17 +1675,17 @@ union bpf_attr {
|
||||
* Description
|
||||
* This helper is a "printk()-like" facility for debugging. It
|
||||
* prints a message defined by format *fmt* (of size *fmt_size*)
|
||||
* to file *\/sys/kernel/debug/tracing/trace* from DebugFS, if
|
||||
* to file *\/sys/kernel/tracing/trace* from TraceFS, if
|
||||
* available. It can take up to three additional **u64**
|
||||
* arguments (as an eBPF helpers, the total number of arguments is
|
||||
* limited to five).
|
||||
*
|
||||
* Each time the helper is called, it appends a line to the trace.
|
||||
* Lines are discarded while *\/sys/kernel/debug/tracing/trace* is
|
||||
* open, use *\/sys/kernel/debug/tracing/trace_pipe* to avoid this.
|
||||
* Lines are discarded while *\/sys/kernel/tracing/trace* is
|
||||
* open, use *\/sys/kernel/tracing/trace_pipe* to avoid this.
|
||||
* The format of the trace is customizable, and the exact output
|
||||
* one will get depends on the options set in
|
||||
* *\/sys/kernel/debug/tracing/trace_options* (see also the
|
||||
* *\/sys/kernel/tracing/trace_options* (see also the
|
||||
* *README* file under the same directory). However, it usually
|
||||
* defaults to something like:
|
||||
*
|
||||
@ -6379,6 +6407,9 @@ struct bpf_link_info {
|
||||
struct {
|
||||
__u32 ifindex;
|
||||
} xdp;
|
||||
struct {
|
||||
__u32 map_id;
|
||||
} struct_ops;
|
||||
};
|
||||
} __attribute__((aligned(8)));
|
||||
|
||||
@ -7112,4 +7143,12 @@ enum {
|
||||
BPF_F_TIMER_ABS = (1ULL << 0),
|
||||
};
|
||||
|
||||
/* BPF numbers iterator state */
|
||||
struct bpf_iter_num {
|
||||
/* opaque iterator state; having __u64 here allows to preserve correct
|
||||
* alignment requirements in vmlinux.h, generated from BTF
|
||||
*/
|
||||
__u64 __opaque[1];
|
||||
} __attribute__((aligned(8)));
|
||||
|
||||
#endif /* _UAPI__LINUX_BPF_H__ */
|
||||
|
@ -6,7 +6,8 @@ cflags-nogcse-$(CONFIG_X86)$(CONFIG_CC_IS_GCC) := -fno-gcse
|
||||
endif
|
||||
CFLAGS_core.o += $(call cc-disable-warning, override-init) $(cflags-nogcse-yy)
|
||||
|
||||
obj-$(CONFIG_BPF_SYSCALL) += syscall.o verifier.o inode.o helpers.o tnum.o bpf_iter.o map_iter.o task_iter.o prog_iter.o link_iter.o
|
||||
obj-$(CONFIG_BPF_SYSCALL) += syscall.o verifier.o inode.o helpers.o tnum.o log.o
|
||||
obj-$(CONFIG_BPF_SYSCALL) += bpf_iter.o map_iter.o task_iter.o prog_iter.o link_iter.o
|
||||
obj-$(CONFIG_BPF_SYSCALL) += hashtab.o arraymap.o percpu_freelist.o bpf_lru_list.o lpm_trie.o map_in_map.o bloom_filter.o
|
||||
obj-$(CONFIG_BPF_SYSCALL) += local_storage.o queue_stack_maps.o ringbuf.o
|
||||
obj-$(CONFIG_BPF_SYSCALL) += bpf_local_storage.o bpf_task_storage.o
|
||||
|
@ -307,8 +307,8 @@ static int array_map_get_next_key(struct bpf_map *map, void *key, void *next_key
|
||||
}
|
||||
|
||||
/* Called from syscall or from eBPF program */
|
||||
static int array_map_update_elem(struct bpf_map *map, void *key, void *value,
|
||||
u64 map_flags)
|
||||
static long array_map_update_elem(struct bpf_map *map, void *key, void *value,
|
||||
u64 map_flags)
|
||||
{
|
||||
struct bpf_array *array = container_of(map, struct bpf_array, map);
|
||||
u32 index = *(u32 *)key;
|
||||
@ -386,7 +386,7 @@ int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value,
|
||||
}
|
||||
|
||||
/* Called from syscall or from eBPF program */
|
||||
static int array_map_delete_elem(struct bpf_map *map, void *key)
|
||||
static long array_map_delete_elem(struct bpf_map *map, void *key)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -686,8 +686,8 @@ static const struct bpf_iter_seq_info iter_seq_info = {
|
||||
.seq_priv_size = sizeof(struct bpf_iter_seq_array_map_info),
|
||||
};
|
||||
|
||||
static int bpf_for_each_array_elem(struct bpf_map *map, bpf_callback_t callback_fn,
|
||||
void *callback_ctx, u64 flags)
|
||||
static long bpf_for_each_array_elem(struct bpf_map *map, bpf_callback_t callback_fn,
|
||||
void *callback_ctx, u64 flags)
|
||||
{
|
||||
u32 i, key, num_elems = 0;
|
||||
struct bpf_array *array;
|
||||
@ -871,7 +871,7 @@ int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int fd_array_map_delete_elem(struct bpf_map *map, void *key)
|
||||
static long fd_array_map_delete_elem(struct bpf_map *map, void *key)
|
||||
{
|
||||
struct bpf_array *array = container_of(map, struct bpf_array, map);
|
||||
void *old_ptr;
|
||||
|
@ -16,13 +16,6 @@ struct bpf_bloom_filter {
|
||||
struct bpf_map map;
|
||||
u32 bitset_mask;
|
||||
u32 hash_seed;
|
||||
/* If the size of the values in the bloom filter is u32 aligned,
|
||||
* then it is more performant to use jhash2 as the underlying hash
|
||||
* function, else we use jhash. This tracks the number of u32s
|
||||
* in an u32-aligned value size. If the value size is not u32 aligned,
|
||||
* this will be 0.
|
||||
*/
|
||||
u32 aligned_u32_count;
|
||||
u32 nr_hash_funcs;
|
||||
unsigned long bitset[];
|
||||
};
|
||||
@ -32,16 +25,15 @@ static u32 hash(struct bpf_bloom_filter *bloom, void *value,
|
||||
{
|
||||
u32 h;
|
||||
|
||||
if (bloom->aligned_u32_count)
|
||||
h = jhash2(value, bloom->aligned_u32_count,
|
||||
bloom->hash_seed + index);
|
||||
if (likely(value_size % 4 == 0))
|
||||
h = jhash2(value, value_size / 4, bloom->hash_seed + index);
|
||||
else
|
||||
h = jhash(value, value_size, bloom->hash_seed + index);
|
||||
|
||||
return h & bloom->bitset_mask;
|
||||
}
|
||||
|
||||
static int bloom_map_peek_elem(struct bpf_map *map, void *value)
|
||||
static long bloom_map_peek_elem(struct bpf_map *map, void *value)
|
||||
{
|
||||
struct bpf_bloom_filter *bloom =
|
||||
container_of(map, struct bpf_bloom_filter, map);
|
||||
@ -56,7 +48,7 @@ static int bloom_map_peek_elem(struct bpf_map *map, void *value)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int bloom_map_push_elem(struct bpf_map *map, void *value, u64 flags)
|
||||
static long bloom_map_push_elem(struct bpf_map *map, void *value, u64 flags)
|
||||
{
|
||||
struct bpf_bloom_filter *bloom =
|
||||
container_of(map, struct bpf_bloom_filter, map);
|
||||
@ -73,12 +65,12 @@ static int bloom_map_push_elem(struct bpf_map *map, void *value, u64 flags)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int bloom_map_pop_elem(struct bpf_map *map, void *value)
|
||||
static long bloom_map_pop_elem(struct bpf_map *map, void *value)
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
static int bloom_map_delete_elem(struct bpf_map *map, void *value)
|
||||
static long bloom_map_delete_elem(struct bpf_map *map, void *value)
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
@ -152,11 +144,6 @@ static struct bpf_map *bloom_map_alloc(union bpf_attr *attr)
|
||||
bloom->nr_hash_funcs = nr_hash_funcs;
|
||||
bloom->bitset_mask = bitset_mask;
|
||||
|
||||
/* Check whether the value size is u32-aligned */
|
||||
if ((attr->value_size & (sizeof(u32) - 1)) == 0)
|
||||
bloom->aligned_u32_count =
|
||||
attr->value_size / sizeof(u32);
|
||||
|
||||
if (!(attr->map_flags & BPF_F_ZERO_SEED))
|
||||
bloom->hash_seed = get_random_u32();
|
||||
|
||||
@ -177,8 +164,8 @@ static void *bloom_map_lookup_elem(struct bpf_map *map, void *key)
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
static int bloom_map_update_elem(struct bpf_map *map, void *key,
|
||||
void *value, u64 flags)
|
||||
static long bloom_map_update_elem(struct bpf_map *map, void *key,
|
||||
void *value, u64 flags)
|
||||
{
|
||||
/* The eBPF program should use map_push_elem instead */
|
||||
return -EINVAL;
|
||||
|
@ -46,8 +46,6 @@ static struct bpf_local_storage __rcu **cgroup_storage_ptr(void *owner)
|
||||
void bpf_cgrp_storage_free(struct cgroup *cgroup)
|
||||
{
|
||||
struct bpf_local_storage *local_storage;
|
||||
bool free_cgroup_storage = false;
|
||||
unsigned long flags;
|
||||
|
||||
rcu_read_lock();
|
||||
local_storage = rcu_dereference(cgroup->bpf_cgrp_storage);
|
||||
@ -57,14 +55,9 @@ void bpf_cgrp_storage_free(struct cgroup *cgroup)
|
||||
}
|
||||
|
||||
bpf_cgrp_storage_lock();
|
||||
raw_spin_lock_irqsave(&local_storage->lock, flags);
|
||||
free_cgroup_storage = bpf_local_storage_unlink_nolock(local_storage);
|
||||
raw_spin_unlock_irqrestore(&local_storage->lock, flags);
|
||||
bpf_local_storage_destroy(local_storage);
|
||||
bpf_cgrp_storage_unlock();
|
||||
rcu_read_unlock();
|
||||
|
||||
if (free_cgroup_storage)
|
||||
kfree_rcu(local_storage, rcu);
|
||||
}
|
||||
|
||||
static struct bpf_local_storage_data *
|
||||
@ -100,8 +93,8 @@ static void *bpf_cgrp_storage_lookup_elem(struct bpf_map *map, void *key)
|
||||
return sdata ? sdata->data : NULL;
|
||||
}
|
||||
|
||||
static int bpf_cgrp_storage_update_elem(struct bpf_map *map, void *key,
|
||||
void *value, u64 map_flags)
|
||||
static long bpf_cgrp_storage_update_elem(struct bpf_map *map, void *key,
|
||||
void *value, u64 map_flags)
|
||||
{
|
||||
struct bpf_local_storage_data *sdata;
|
||||
struct cgroup *cgroup;
|
||||
@ -128,11 +121,11 @@ static int cgroup_storage_delete(struct cgroup *cgroup, struct bpf_map *map)
|
||||
if (!sdata)
|
||||
return -ENOENT;
|
||||
|
||||
bpf_selem_unlink(SELEM(sdata), true);
|
||||
bpf_selem_unlink(SELEM(sdata), false);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int bpf_cgrp_storage_delete_elem(struct bpf_map *map, void *key)
|
||||
static long bpf_cgrp_storage_delete_elem(struct bpf_map *map, void *key)
|
||||
{
|
||||
struct cgroup *cgroup;
|
||||
int err, fd;
|
||||
@ -156,7 +149,7 @@ static int notsupp_get_next_key(struct bpf_map *map, void *key, void *next_key)
|
||||
|
||||
static struct bpf_map *cgroup_storage_map_alloc(union bpf_attr *attr)
|
||||
{
|
||||
return bpf_local_storage_map_alloc(attr, &cgroup_cache);
|
||||
return bpf_local_storage_map_alloc(attr, &cgroup_cache, true);
|
||||
}
|
||||
|
||||
static void cgroup_storage_map_free(struct bpf_map *map)
|
||||
@ -231,7 +224,7 @@ const struct bpf_func_proto bpf_cgrp_storage_get_proto = {
|
||||
.gpl_only = false,
|
||||
.ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
|
||||
.arg1_type = ARG_CONST_MAP_PTR,
|
||||
.arg2_type = ARG_PTR_TO_BTF_ID,
|
||||
.arg2_type = ARG_PTR_TO_BTF_ID_OR_NULL,
|
||||
.arg2_btf_id = &bpf_cgroup_btf_id[0],
|
||||
.arg3_type = ARG_PTR_TO_MAP_VALUE_OR_NULL,
|
||||
.arg4_type = ARG_ANYTHING,
|
||||
@ -242,6 +235,6 @@ const struct bpf_func_proto bpf_cgrp_storage_delete_proto = {
|
||||
.gpl_only = false,
|
||||
.ret_type = RET_INTEGER,
|
||||
.arg1_type = ARG_CONST_MAP_PTR,
|
||||
.arg2_type = ARG_PTR_TO_BTF_ID,
|
||||
.arg2_type = ARG_PTR_TO_BTF_ID_OR_NULL,
|
||||
.arg2_btf_id = &bpf_cgroup_btf_id[0],
|
||||
};
|
||||
|
@ -57,7 +57,6 @@ static struct bpf_local_storage_data *inode_storage_lookup(struct inode *inode,
|
||||
void bpf_inode_storage_free(struct inode *inode)
|
||||
{
|
||||
struct bpf_local_storage *local_storage;
|
||||
bool free_inode_storage = false;
|
||||
struct bpf_storage_blob *bsb;
|
||||
|
||||
bsb = bpf_inode(inode);
|
||||
@ -72,13 +71,8 @@ void bpf_inode_storage_free(struct inode *inode)
|
||||
return;
|
||||
}
|
||||
|
||||
raw_spin_lock_bh(&local_storage->lock);
|
||||
free_inode_storage = bpf_local_storage_unlink_nolock(local_storage);
|
||||
raw_spin_unlock_bh(&local_storage->lock);
|
||||
bpf_local_storage_destroy(local_storage);
|
||||
rcu_read_unlock();
|
||||
|
||||
if (free_inode_storage)
|
||||
kfree_rcu(local_storage, rcu);
|
||||
}
|
||||
|
||||
static void *bpf_fd_inode_storage_lookup_elem(struct bpf_map *map, void *key)
|
||||
@ -97,8 +91,8 @@ static void *bpf_fd_inode_storage_lookup_elem(struct bpf_map *map, void *key)
|
||||
return sdata ? sdata->data : NULL;
|
||||
}
|
||||
|
||||
static int bpf_fd_inode_storage_update_elem(struct bpf_map *map, void *key,
|
||||
void *value, u64 map_flags)
|
||||
static long bpf_fd_inode_storage_update_elem(struct bpf_map *map, void *key,
|
||||
void *value, u64 map_flags)
|
||||
{
|
||||
struct bpf_local_storage_data *sdata;
|
||||
struct file *f;
|
||||
@ -128,12 +122,12 @@ static int inode_storage_delete(struct inode *inode, struct bpf_map *map)
|
||||
if (!sdata)
|
||||
return -ENOENT;
|
||||
|
||||
bpf_selem_unlink(SELEM(sdata), true);
|
||||
bpf_selem_unlink(SELEM(sdata), false);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int bpf_fd_inode_storage_delete_elem(struct bpf_map *map, void *key)
|
||||
static long bpf_fd_inode_storage_delete_elem(struct bpf_map *map, void *key)
|
||||
{
|
||||
struct file *f;
|
||||
int fd, err;
|
||||
@ -205,7 +199,7 @@ static int notsupp_get_next_key(struct bpf_map *map, void *key,
|
||||
|
||||
static struct bpf_map *inode_storage_map_alloc(union bpf_attr *attr)
|
||||
{
|
||||
return bpf_local_storage_map_alloc(attr, &inode_cache);
|
||||
return bpf_local_storage_map_alloc(attr, &inode_cache, false);
|
||||
}
|
||||
|
||||
static void inode_storage_map_free(struct bpf_map *map)
|
||||
@ -235,7 +229,7 @@ const struct bpf_func_proto bpf_inode_storage_get_proto = {
|
||||
.gpl_only = false,
|
||||
.ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
|
||||
.arg1_type = ARG_CONST_MAP_PTR,
|
||||
.arg2_type = ARG_PTR_TO_BTF_ID,
|
||||
.arg2_type = ARG_PTR_TO_BTF_ID_OR_NULL,
|
||||
.arg2_btf_id = &bpf_inode_storage_btf_ids[0],
|
||||
.arg3_type = ARG_PTR_TO_MAP_VALUE_OR_NULL,
|
||||
.arg4_type = ARG_ANYTHING,
|
||||
@ -246,6 +240,6 @@ const struct bpf_func_proto bpf_inode_storage_delete_proto = {
|
||||
.gpl_only = false,
|
||||
.ret_type = RET_INTEGER,
|
||||
.arg1_type = ARG_CONST_MAP_PTR,
|
||||
.arg2_type = ARG_PTR_TO_BTF_ID,
|
||||
.arg2_type = ARG_PTR_TO_BTF_ID_OR_NULL,
|
||||
.arg2_btf_id = &bpf_inode_storage_btf_ids[0],
|
||||
};
|
||||
|
@ -776,3 +776,73 @@ const struct bpf_func_proto bpf_loop_proto = {
|
||||
.arg3_type = ARG_PTR_TO_STACK_OR_NULL,
|
||||
.arg4_type = ARG_ANYTHING,
|
||||
};
|
||||
|
||||
struct bpf_iter_num_kern {
|
||||
int cur; /* current value, inclusive */
|
||||
int end; /* final value, exclusive */
|
||||
} __aligned(8);
|
||||
|
||||
__diag_push();
|
||||
__diag_ignore_all("-Wmissing-prototypes",
|
||||
"Global functions as their definitions will be in vmlinux BTF");
|
||||
|
||||
__bpf_kfunc int bpf_iter_num_new(struct bpf_iter_num *it, int start, int end)
|
||||
{
|
||||
struct bpf_iter_num_kern *s = (void *)it;
|
||||
|
||||
BUILD_BUG_ON(sizeof(struct bpf_iter_num_kern) != sizeof(struct bpf_iter_num));
|
||||
BUILD_BUG_ON(__alignof__(struct bpf_iter_num_kern) != __alignof__(struct bpf_iter_num));
|
||||
|
||||
BTF_TYPE_EMIT(struct btf_iter_num);
|
||||
|
||||
/* start == end is legit, it's an empty range and we'll just get NULL
|
||||
* on first (and any subsequent) bpf_iter_num_next() call
|
||||
*/
|
||||
if (start > end) {
|
||||
s->cur = s->end = 0;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* avoid overflows, e.g., if start == INT_MIN and end == INT_MAX */
|
||||
if ((s64)end - (s64)start > BPF_MAX_LOOPS) {
|
||||
s->cur = s->end = 0;
|
||||
return -E2BIG;
|
||||
}
|
||||
|
||||
/* user will call bpf_iter_num_next() first,
|
||||
* which will set s->cur to exactly start value;
|
||||
* underflow shouldn't matter
|
||||
*/
|
||||
s->cur = start - 1;
|
||||
s->end = end;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
__bpf_kfunc int *bpf_iter_num_next(struct bpf_iter_num* it)
|
||||
{
|
||||
struct bpf_iter_num_kern *s = (void *)it;
|
||||
|
||||
/* check failed initialization or if we are done (same behavior);
|
||||
* need to be careful about overflow, so convert to s64 for checks,
|
||||
* e.g., if s->cur == s->end == INT_MAX, we can't just do
|
||||
* s->cur + 1 >= s->end
|
||||
*/
|
||||
if ((s64)(s->cur + 1) >= s->end) {
|
||||
s->cur = s->end = 0;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
s->cur++;
|
||||
|
||||
return &s->cur;
|
||||
}
|
||||
|
||||
__bpf_kfunc void bpf_iter_num_destroy(struct bpf_iter_num *it)
|
||||
{
|
||||
struct bpf_iter_num_kern *s = (void *)it;
|
||||
|
||||
s->cur = s->end = 0;
|
||||
}
|
||||
|
||||
__diag_pop();
|
||||
|
@ -80,8 +80,24 @@ bpf_selem_alloc(struct bpf_local_storage_map *smap, void *owner,
|
||||
if (charge_mem && mem_charge(smap, owner, smap->elem_size))
|
||||
return NULL;
|
||||
|
||||
selem = bpf_map_kzalloc(&smap->map, smap->elem_size,
|
||||
gfp_flags | __GFP_NOWARN);
|
||||
if (smap->bpf_ma) {
|
||||
migrate_disable();
|
||||
selem = bpf_mem_cache_alloc_flags(&smap->selem_ma, gfp_flags);
|
||||
migrate_enable();
|
||||
if (selem)
|
||||
/* Keep the original bpf_map_kzalloc behavior
|
||||
* before started using the bpf_mem_cache_alloc.
|
||||
*
|
||||
* No need to use zero_map_value. The bpf_selem_free()
|
||||
* only does bpf_mem_cache_free when there is
|
||||
* no other bpf prog is using the selem.
|
||||
*/
|
||||
memset(SDATA(selem)->data, 0, smap->map.value_size);
|
||||
} else {
|
||||
selem = bpf_map_kzalloc(&smap->map, smap->elem_size,
|
||||
gfp_flags | __GFP_NOWARN);
|
||||
}
|
||||
|
||||
if (selem) {
|
||||
if (value)
|
||||
copy_map_value(&smap->map, SDATA(selem)->data, value);
|
||||
@ -95,7 +111,8 @@ bpf_selem_alloc(struct bpf_local_storage_map *smap, void *owner,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void bpf_local_storage_free_rcu(struct rcu_head *rcu)
|
||||
/* rcu tasks trace callback for bpf_ma == false */
|
||||
static void __bpf_local_storage_free_trace_rcu(struct rcu_head *rcu)
|
||||
{
|
||||
struct bpf_local_storage *local_storage;
|
||||
|
||||
@ -109,28 +126,66 @@ void bpf_local_storage_free_rcu(struct rcu_head *rcu)
|
||||
kfree_rcu(local_storage, rcu);
|
||||
}
|
||||
|
||||
static void bpf_selem_free_fields_rcu(struct rcu_head *rcu)
|
||||
static void bpf_local_storage_free_rcu(struct rcu_head *rcu)
|
||||
{
|
||||
struct bpf_local_storage_elem *selem;
|
||||
struct bpf_local_storage_map *smap;
|
||||
struct bpf_local_storage *local_storage;
|
||||
|
||||
selem = container_of(rcu, struct bpf_local_storage_elem, rcu);
|
||||
/* protected by the rcu_barrier*() */
|
||||
smap = rcu_dereference_protected(SDATA(selem)->smap, true);
|
||||
bpf_obj_free_fields(smap->map.record, SDATA(selem)->data);
|
||||
kfree(selem);
|
||||
local_storage = container_of(rcu, struct bpf_local_storage, rcu);
|
||||
bpf_mem_cache_raw_free(local_storage);
|
||||
}
|
||||
|
||||
static void bpf_selem_free_fields_trace_rcu(struct rcu_head *rcu)
|
||||
static void bpf_local_storage_free_trace_rcu(struct rcu_head *rcu)
|
||||
{
|
||||
/* Free directly if Tasks Trace RCU GP also implies RCU GP */
|
||||
if (rcu_trace_implies_rcu_gp())
|
||||
bpf_selem_free_fields_rcu(rcu);
|
||||
bpf_local_storage_free_rcu(rcu);
|
||||
else
|
||||
call_rcu(rcu, bpf_selem_free_fields_rcu);
|
||||
call_rcu(rcu, bpf_local_storage_free_rcu);
|
||||
}
|
||||
|
||||
static void bpf_selem_free_trace_rcu(struct rcu_head *rcu)
|
||||
/* Handle bpf_ma == false */
|
||||
static void __bpf_local_storage_free(struct bpf_local_storage *local_storage,
|
||||
bool vanilla_rcu)
|
||||
{
|
||||
if (vanilla_rcu)
|
||||
kfree_rcu(local_storage, rcu);
|
||||
else
|
||||
call_rcu_tasks_trace(&local_storage->rcu,
|
||||
__bpf_local_storage_free_trace_rcu);
|
||||
}
|
||||
|
||||
static void bpf_local_storage_free(struct bpf_local_storage *local_storage,
|
||||
struct bpf_local_storage_map *smap,
|
||||
bool bpf_ma, bool reuse_now)
|
||||
{
|
||||
if (!local_storage)
|
||||
return;
|
||||
|
||||
if (!bpf_ma) {
|
||||
__bpf_local_storage_free(local_storage, reuse_now);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!reuse_now) {
|
||||
call_rcu_tasks_trace(&local_storage->rcu,
|
||||
bpf_local_storage_free_trace_rcu);
|
||||
return;
|
||||
}
|
||||
|
||||
if (smap) {
|
||||
migrate_disable();
|
||||
bpf_mem_cache_free(&smap->storage_ma, local_storage);
|
||||
migrate_enable();
|
||||
} else {
|
||||
/* smap could be NULL if the selem that triggered
|
||||
* this 'local_storage' creation had been long gone.
|
||||
* In this case, directly do call_rcu().
|
||||
*/
|
||||
call_rcu(&local_storage->rcu, bpf_local_storage_free_rcu);
|
||||
}
|
||||
}
|
||||
|
||||
/* rcu tasks trace callback for bpf_ma == false */
|
||||
static void __bpf_selem_free_trace_rcu(struct rcu_head *rcu)
|
||||
{
|
||||
struct bpf_local_storage_elem *selem;
|
||||
|
||||
@ -141,17 +196,66 @@ static void bpf_selem_free_trace_rcu(struct rcu_head *rcu)
|
||||
kfree_rcu(selem, rcu);
|
||||
}
|
||||
|
||||
/* Handle bpf_ma == false */
|
||||
static void __bpf_selem_free(struct bpf_local_storage_elem *selem,
|
||||
bool vanilla_rcu)
|
||||
{
|
||||
if (vanilla_rcu)
|
||||
kfree_rcu(selem, rcu);
|
||||
else
|
||||
call_rcu_tasks_trace(&selem->rcu, __bpf_selem_free_trace_rcu);
|
||||
}
|
||||
|
||||
static void bpf_selem_free_rcu(struct rcu_head *rcu)
|
||||
{
|
||||
struct bpf_local_storage_elem *selem;
|
||||
|
||||
selem = container_of(rcu, struct bpf_local_storage_elem, rcu);
|
||||
bpf_mem_cache_raw_free(selem);
|
||||
}
|
||||
|
||||
static void bpf_selem_free_trace_rcu(struct rcu_head *rcu)
|
||||
{
|
||||
if (rcu_trace_implies_rcu_gp())
|
||||
bpf_selem_free_rcu(rcu);
|
||||
else
|
||||
call_rcu(rcu, bpf_selem_free_rcu);
|
||||
}
|
||||
|
||||
void bpf_selem_free(struct bpf_local_storage_elem *selem,
|
||||
struct bpf_local_storage_map *smap,
|
||||
bool reuse_now)
|
||||
{
|
||||
bpf_obj_free_fields(smap->map.record, SDATA(selem)->data);
|
||||
|
||||
if (!smap->bpf_ma) {
|
||||
__bpf_selem_free(selem, reuse_now);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!reuse_now) {
|
||||
call_rcu_tasks_trace(&selem->rcu, bpf_selem_free_trace_rcu);
|
||||
} else {
|
||||
/* Instead of using the vanilla call_rcu(),
|
||||
* bpf_mem_cache_free will be able to reuse selem
|
||||
* immediately.
|
||||
*/
|
||||
migrate_disable();
|
||||
bpf_mem_cache_free(&smap->selem_ma, selem);
|
||||
migrate_enable();
|
||||
}
|
||||
}
|
||||
|
||||
/* local_storage->lock must be held and selem->local_storage == local_storage.
|
||||
* The caller must ensure selem->smap is still valid to be
|
||||
* dereferenced for its smap->elem_size and smap->cache_idx.
|
||||
*/
|
||||
static bool bpf_selem_unlink_storage_nolock(struct bpf_local_storage *local_storage,
|
||||
struct bpf_local_storage_elem *selem,
|
||||
bool uncharge_mem, bool use_trace_rcu)
|
||||
bool uncharge_mem, bool reuse_now)
|
||||
{
|
||||
struct bpf_local_storage_map *smap;
|
||||
bool free_local_storage;
|
||||
struct btf_record *rec;
|
||||
void *owner;
|
||||
|
||||
smap = rcu_dereference_check(SDATA(selem)->smap, bpf_rcu_lock_held());
|
||||
@ -192,35 +296,55 @@ static bool bpf_selem_unlink_storage_nolock(struct bpf_local_storage *local_stor
|
||||
SDATA(selem))
|
||||
RCU_INIT_POINTER(local_storage->cache[smap->cache_idx], NULL);
|
||||
|
||||
/* A different RCU callback is chosen whenever we need to free
|
||||
* additional fields in selem data before freeing selem.
|
||||
* bpf_local_storage_map_free only executes rcu_barrier to wait for RCU
|
||||
* callbacks when it has special fields, hence we can only conditionally
|
||||
* dereference smap, as by this time the map might have already been
|
||||
* freed without waiting for our call_rcu callback if it did not have
|
||||
* any special fields.
|
||||
*/
|
||||
rec = smap->map.record;
|
||||
if (use_trace_rcu) {
|
||||
if (!IS_ERR_OR_NULL(rec))
|
||||
call_rcu_tasks_trace(&selem->rcu, bpf_selem_free_fields_trace_rcu);
|
||||
else
|
||||
call_rcu_tasks_trace(&selem->rcu, bpf_selem_free_trace_rcu);
|
||||
} else {
|
||||
if (!IS_ERR_OR_NULL(rec))
|
||||
call_rcu(&selem->rcu, bpf_selem_free_fields_rcu);
|
||||
else
|
||||
kfree_rcu(selem, rcu);
|
||||
}
|
||||
bpf_selem_free(selem, smap, reuse_now);
|
||||
|
||||
if (rcu_access_pointer(local_storage->smap) == smap)
|
||||
RCU_INIT_POINTER(local_storage->smap, NULL);
|
||||
|
||||
return free_local_storage;
|
||||
}
|
||||
|
||||
static void __bpf_selem_unlink_storage(struct bpf_local_storage_elem *selem,
|
||||
bool use_trace_rcu)
|
||||
static bool check_storage_bpf_ma(struct bpf_local_storage *local_storage,
|
||||
struct bpf_local_storage_map *storage_smap,
|
||||
struct bpf_local_storage_elem *selem)
|
||||
{
|
||||
|
||||
struct bpf_local_storage_map *selem_smap;
|
||||
|
||||
/* local_storage->smap may be NULL. If it is, get the bpf_ma
|
||||
* from any selem in the local_storage->list. The bpf_ma of all
|
||||
* local_storage and selem should have the same value
|
||||
* for the same map type.
|
||||
*
|
||||
* If the local_storage->list is already empty, the caller will not
|
||||
* care about the bpf_ma value also because the caller is not
|
||||
* responsibile to free the local_storage.
|
||||
*/
|
||||
|
||||
if (storage_smap)
|
||||
return storage_smap->bpf_ma;
|
||||
|
||||
if (!selem) {
|
||||
struct hlist_node *n;
|
||||
|
||||
n = rcu_dereference_check(hlist_first_rcu(&local_storage->list),
|
||||
bpf_rcu_lock_held());
|
||||
if (!n)
|
||||
return false;
|
||||
|
||||
selem = hlist_entry(n, struct bpf_local_storage_elem, snode);
|
||||
}
|
||||
selem_smap = rcu_dereference_check(SDATA(selem)->smap, bpf_rcu_lock_held());
|
||||
|
||||
return selem_smap->bpf_ma;
|
||||
}
|
||||
|
||||
static void bpf_selem_unlink_storage(struct bpf_local_storage_elem *selem,
|
||||
bool reuse_now)
|
||||
{
|
||||
struct bpf_local_storage_map *storage_smap;
|
||||
struct bpf_local_storage *local_storage;
|
||||
bool free_local_storage = false;
|
||||
bool bpf_ma, free_local_storage = false;
|
||||
unsigned long flags;
|
||||
|
||||
if (unlikely(!selem_linked_to_storage_lockless(selem)))
|
||||
@ -229,19 +353,18 @@ static void __bpf_selem_unlink_storage(struct bpf_local_storage_elem *selem,
|
||||
|
||||
local_storage = rcu_dereference_check(selem->local_storage,
|
||||
bpf_rcu_lock_held());
|
||||
storage_smap = rcu_dereference_check(local_storage->smap,
|
||||
bpf_rcu_lock_held());
|
||||
bpf_ma = check_storage_bpf_ma(local_storage, storage_smap, selem);
|
||||
|
||||
raw_spin_lock_irqsave(&local_storage->lock, flags);
|
||||
if (likely(selem_linked_to_storage(selem)))
|
||||
free_local_storage = bpf_selem_unlink_storage_nolock(
|
||||
local_storage, selem, true, use_trace_rcu);
|
||||
local_storage, selem, true, reuse_now);
|
||||
raw_spin_unlock_irqrestore(&local_storage->lock, flags);
|
||||
|
||||
if (free_local_storage) {
|
||||
if (use_trace_rcu)
|
||||
call_rcu_tasks_trace(&local_storage->rcu,
|
||||
bpf_local_storage_free_rcu);
|
||||
else
|
||||
kfree_rcu(local_storage, rcu);
|
||||
}
|
||||
if (free_local_storage)
|
||||
bpf_local_storage_free(local_storage, storage_smap, bpf_ma, reuse_now);
|
||||
}
|
||||
|
||||
void bpf_selem_link_storage_nolock(struct bpf_local_storage *local_storage,
|
||||
@ -251,7 +374,7 @@ void bpf_selem_link_storage_nolock(struct bpf_local_storage *local_storage,
|
||||
hlist_add_head_rcu(&selem->snode, &local_storage->list);
|
||||
}
|
||||
|
||||
void bpf_selem_unlink_map(struct bpf_local_storage_elem *selem)
|
||||
static void bpf_selem_unlink_map(struct bpf_local_storage_elem *selem)
|
||||
{
|
||||
struct bpf_local_storage_map *smap;
|
||||
struct bpf_local_storage_map_bucket *b;
|
||||
@ -281,14 +404,14 @@ void bpf_selem_link_map(struct bpf_local_storage_map *smap,
|
||||
raw_spin_unlock_irqrestore(&b->lock, flags);
|
||||
}
|
||||
|
||||
void bpf_selem_unlink(struct bpf_local_storage_elem *selem, bool use_trace_rcu)
|
||||
void bpf_selem_unlink(struct bpf_local_storage_elem *selem, bool reuse_now)
|
||||
{
|
||||
/* Always unlink from map before unlinking from local_storage
|
||||
* because selem will be freed after successfully unlinked from
|
||||
* the local_storage.
|
||||
*/
|
||||
bpf_selem_unlink_map(selem);
|
||||
__bpf_selem_unlink_storage(selem, use_trace_rcu);
|
||||
bpf_selem_unlink_storage(selem, reuse_now);
|
||||
}
|
||||
|
||||
/* If cacheit_lockit is false, this lookup function is lockless */
|
||||
@ -361,13 +484,21 @@ int bpf_local_storage_alloc(void *owner,
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
storage = bpf_map_kzalloc(&smap->map, sizeof(*storage),
|
||||
gfp_flags | __GFP_NOWARN);
|
||||
if (smap->bpf_ma) {
|
||||
migrate_disable();
|
||||
storage = bpf_mem_cache_alloc_flags(&smap->storage_ma, gfp_flags);
|
||||
migrate_enable();
|
||||
} else {
|
||||
storage = bpf_map_kzalloc(&smap->map, sizeof(*storage),
|
||||
gfp_flags | __GFP_NOWARN);
|
||||
}
|
||||
|
||||
if (!storage) {
|
||||
err = -ENOMEM;
|
||||
goto uncharge;
|
||||
}
|
||||
|
||||
RCU_INIT_POINTER(storage->smap, smap);
|
||||
INIT_HLIST_HEAD(&storage->list);
|
||||
raw_spin_lock_init(&storage->lock);
|
||||
storage->owner = owner;
|
||||
@ -407,7 +538,7 @@ int bpf_local_storage_alloc(void *owner,
|
||||
return 0;
|
||||
|
||||
uncharge:
|
||||
kfree(storage);
|
||||
bpf_local_storage_free(storage, smap, smap->bpf_ma, true);
|
||||
mem_uncharge(smap, owner, sizeof(*storage));
|
||||
return err;
|
||||
}
|
||||
@ -451,7 +582,7 @@ bpf_local_storage_update(void *owner, struct bpf_local_storage_map *smap,
|
||||
|
||||
err = bpf_local_storage_alloc(owner, smap, selem, gfp_flags);
|
||||
if (err) {
|
||||
kfree(selem);
|
||||
bpf_selem_free(selem, smap, true);
|
||||
mem_uncharge(smap, owner, smap->elem_size);
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
@ -534,7 +665,7 @@ bpf_local_storage_update(void *owner, struct bpf_local_storage_map *smap,
|
||||
if (old_sdata) {
|
||||
bpf_selem_unlink_map(SELEM(old_sdata));
|
||||
bpf_selem_unlink_storage_nolock(local_storage, SELEM(old_sdata),
|
||||
false, true);
|
||||
false, false);
|
||||
}
|
||||
|
||||
unlock:
|
||||
@ -545,7 +676,7 @@ unlock_err:
|
||||
raw_spin_unlock_irqrestore(&local_storage->lock, flags);
|
||||
if (selem) {
|
||||
mem_uncharge(smap, owner, smap->elem_size);
|
||||
kfree(selem);
|
||||
bpf_selem_free(selem, smap, true);
|
||||
}
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
@ -601,40 +732,6 @@ int bpf_local_storage_map_alloc_check(union bpf_attr *attr)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct bpf_local_storage_map *__bpf_local_storage_map_alloc(union bpf_attr *attr)
|
||||
{
|
||||
struct bpf_local_storage_map *smap;
|
||||
unsigned int i;
|
||||
u32 nbuckets;
|
||||
|
||||
smap = bpf_map_area_alloc(sizeof(*smap), NUMA_NO_NODE);
|
||||
if (!smap)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
bpf_map_init_from_attr(&smap->map, attr);
|
||||
|
||||
nbuckets = roundup_pow_of_two(num_possible_cpus());
|
||||
/* Use at least 2 buckets, select_bucket() is undefined behavior with 1 bucket */
|
||||
nbuckets = max_t(u32, 2, nbuckets);
|
||||
smap->bucket_log = ilog2(nbuckets);
|
||||
|
||||
smap->buckets = bpf_map_kvcalloc(&smap->map, sizeof(*smap->buckets),
|
||||
nbuckets, GFP_USER | __GFP_NOWARN);
|
||||
if (!smap->buckets) {
|
||||
bpf_map_area_free(smap);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
for (i = 0; i < nbuckets; i++) {
|
||||
INIT_HLIST_HEAD(&smap->buckets[i].list);
|
||||
raw_spin_lock_init(&smap->buckets[i].lock);
|
||||
}
|
||||
|
||||
smap->elem_size = offsetof(struct bpf_local_storage_elem,
|
||||
sdata.data[attr->value_size]);
|
||||
|
||||
return smap;
|
||||
}
|
||||
|
||||
int bpf_local_storage_map_check_btf(const struct bpf_map *map,
|
||||
const struct btf *btf,
|
||||
const struct btf_type *key_type,
|
||||
@ -652,11 +749,16 @@ int bpf_local_storage_map_check_btf(const struct bpf_map *map,
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool bpf_local_storage_unlink_nolock(struct bpf_local_storage *local_storage)
|
||||
void bpf_local_storage_destroy(struct bpf_local_storage *local_storage)
|
||||
{
|
||||
struct bpf_local_storage_map *storage_smap;
|
||||
struct bpf_local_storage_elem *selem;
|
||||
bool free_storage = false;
|
||||
bool bpf_ma, free_storage = false;
|
||||
struct hlist_node *n;
|
||||
unsigned long flags;
|
||||
|
||||
storage_smap = rcu_dereference_check(local_storage->smap, bpf_rcu_lock_held());
|
||||
bpf_ma = check_storage_bpf_ma(local_storage, storage_smap, NULL);
|
||||
|
||||
/* Neither the bpf_prog nor the bpf_map's syscall
|
||||
* could be modifying the local_storage->list now.
|
||||
@ -667,6 +769,7 @@ bool bpf_local_storage_unlink_nolock(struct bpf_local_storage *local_storage)
|
||||
* when unlinking elem from the local_storage->list and
|
||||
* the map's bucket->list.
|
||||
*/
|
||||
raw_spin_lock_irqsave(&local_storage->lock, flags);
|
||||
hlist_for_each_entry_safe(selem, n, &local_storage->list, snode) {
|
||||
/* Always unlink from map before unlinking from
|
||||
* local_storage.
|
||||
@ -679,10 +782,12 @@ bool bpf_local_storage_unlink_nolock(struct bpf_local_storage *local_storage)
|
||||
* of the loop will set the free_cgroup_storage to true.
|
||||
*/
|
||||
free_storage = bpf_selem_unlink_storage_nolock(
|
||||
local_storage, selem, false, false);
|
||||
local_storage, selem, false, true);
|
||||
}
|
||||
raw_spin_unlock_irqrestore(&local_storage->lock, flags);
|
||||
|
||||
return free_storage;
|
||||
if (free_storage)
|
||||
bpf_local_storage_free(local_storage, storage_smap, bpf_ma, true);
|
||||
}
|
||||
|
||||
u64 bpf_local_storage_map_mem_usage(const struct bpf_map *map)
|
||||
@ -695,18 +800,71 @@ u64 bpf_local_storage_map_mem_usage(const struct bpf_map *map)
|
||||
return usage;
|
||||
}
|
||||
|
||||
/* When bpf_ma == true, the bpf_mem_alloc is used to allocate and free memory.
|
||||
* A deadlock free allocator is useful for storage that the bpf prog can easily
|
||||
* get a hold of the owner PTR_TO_BTF_ID in any context. eg. bpf_get_current_task_btf.
|
||||
* The task and cgroup storage fall into this case. The bpf_mem_alloc reuses
|
||||
* memory immediately. To be reuse-immediate safe, the owner destruction
|
||||
* code path needs to go through a rcu grace period before calling
|
||||
* bpf_local_storage_destroy().
|
||||
*
|
||||
* When bpf_ma == false, the kmalloc and kfree are used.
|
||||
*/
|
||||
struct bpf_map *
|
||||
bpf_local_storage_map_alloc(union bpf_attr *attr,
|
||||
struct bpf_local_storage_cache *cache)
|
||||
struct bpf_local_storage_cache *cache,
|
||||
bool bpf_ma)
|
||||
{
|
||||
struct bpf_local_storage_map *smap;
|
||||
unsigned int i;
|
||||
u32 nbuckets;
|
||||
int err;
|
||||
|
||||
smap = __bpf_local_storage_map_alloc(attr);
|
||||
if (IS_ERR(smap))
|
||||
return ERR_CAST(smap);
|
||||
smap = bpf_map_area_alloc(sizeof(*smap), NUMA_NO_NODE);
|
||||
if (!smap)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
bpf_map_init_from_attr(&smap->map, attr);
|
||||
|
||||
nbuckets = roundup_pow_of_two(num_possible_cpus());
|
||||
/* Use at least 2 buckets, select_bucket() is undefined behavior with 1 bucket */
|
||||
nbuckets = max_t(u32, 2, nbuckets);
|
||||
smap->bucket_log = ilog2(nbuckets);
|
||||
|
||||
smap->buckets = bpf_map_kvcalloc(&smap->map, sizeof(*smap->buckets),
|
||||
nbuckets, GFP_USER | __GFP_NOWARN);
|
||||
if (!smap->buckets) {
|
||||
err = -ENOMEM;
|
||||
goto free_smap;
|
||||
}
|
||||
|
||||
for (i = 0; i < nbuckets; i++) {
|
||||
INIT_HLIST_HEAD(&smap->buckets[i].list);
|
||||
raw_spin_lock_init(&smap->buckets[i].lock);
|
||||
}
|
||||
|
||||
smap->elem_size = offsetof(struct bpf_local_storage_elem,
|
||||
sdata.data[attr->value_size]);
|
||||
|
||||
smap->bpf_ma = bpf_ma;
|
||||
if (bpf_ma) {
|
||||
err = bpf_mem_alloc_init(&smap->selem_ma, smap->elem_size, false);
|
||||
if (err)
|
||||
goto free_smap;
|
||||
|
||||
err = bpf_mem_alloc_init(&smap->storage_ma, sizeof(struct bpf_local_storage), false);
|
||||
if (err) {
|
||||
bpf_mem_alloc_destroy(&smap->selem_ma);
|
||||
goto free_smap;
|
||||
}
|
||||
}
|
||||
|
||||
smap->cache_idx = bpf_local_storage_cache_idx_get(cache);
|
||||
return &smap->map;
|
||||
|
||||
free_smap:
|
||||
kvfree(smap->buckets);
|
||||
bpf_map_area_free(smap);
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
void bpf_local_storage_map_free(struct bpf_map *map,
|
||||
@ -748,7 +906,7 @@ void bpf_local_storage_map_free(struct bpf_map *map,
|
||||
migrate_disable();
|
||||
this_cpu_inc(*busy_counter);
|
||||
}
|
||||
bpf_selem_unlink(selem, false);
|
||||
bpf_selem_unlink(selem, true);
|
||||
if (busy_counter) {
|
||||
this_cpu_dec(*busy_counter);
|
||||
migrate_enable();
|
||||
@ -772,26 +930,10 @@ void bpf_local_storage_map_free(struct bpf_map *map,
|
||||
*/
|
||||
synchronize_rcu();
|
||||
|
||||
/* Only delay freeing of smap, buckets are not needed anymore */
|
||||
kvfree(smap->buckets);
|
||||
|
||||
/* When local storage has special fields, callbacks for
|
||||
* bpf_selem_free_fields_rcu and bpf_selem_free_fields_trace_rcu will
|
||||
* keep using the map BTF record, we need to execute an RCU barrier to
|
||||
* wait for them as the record will be freed right after our map_free
|
||||
* callback.
|
||||
*/
|
||||
if (!IS_ERR_OR_NULL(smap->map.record)) {
|
||||
rcu_barrier_tasks_trace();
|
||||
/* We cannot skip rcu_barrier() when rcu_trace_implies_rcu_gp()
|
||||
* is true, because while call_rcu invocation is skipped in that
|
||||
* case in bpf_selem_free_fields_trace_rcu (and all local
|
||||
* storage maps pass use_trace_rcu = true), there can be
|
||||
* call_rcu callbacks based on use_trace_rcu = false in the
|
||||
* while ((selem = ...)) loop above or when owner's free path
|
||||
* calls bpf_local_storage_unlink_nolock.
|
||||
*/
|
||||
rcu_barrier();
|
||||
if (smap->bpf_ma) {
|
||||
bpf_mem_alloc_destroy(&smap->selem_ma);
|
||||
bpf_mem_alloc_destroy(&smap->storage_ma);
|
||||
}
|
||||
kvfree(smap->buckets);
|
||||
bpf_map_area_free(smap);
|
||||
}
|
||||
|
@ -11,11 +11,13 @@
|
||||
#include <linux/refcount.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/btf_ids.h>
|
||||
#include <linux/rcupdate_wait.h>
|
||||
|
||||
enum bpf_struct_ops_state {
|
||||
BPF_STRUCT_OPS_STATE_INIT,
|
||||
BPF_STRUCT_OPS_STATE_INUSE,
|
||||
BPF_STRUCT_OPS_STATE_TOBEFREE,
|
||||
BPF_STRUCT_OPS_STATE_READY,
|
||||
};
|
||||
|
||||
#define BPF_STRUCT_OPS_COMMON_VALUE \
|
||||
@ -58,6 +60,13 @@ struct bpf_struct_ops_map {
|
||||
struct bpf_struct_ops_value kvalue;
|
||||
};
|
||||
|
||||
struct bpf_struct_ops_link {
|
||||
struct bpf_link link;
|
||||
struct bpf_map __rcu *map;
|
||||
};
|
||||
|
||||
static DEFINE_MUTEX(update_mutex);
|
||||
|
||||
#define VALUE_PREFIX "bpf_struct_ops_"
|
||||
#define VALUE_PREFIX_LEN (sizeof(VALUE_PREFIX) - 1)
|
||||
|
||||
@ -249,6 +258,7 @@ int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, void *key,
|
||||
struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map;
|
||||
struct bpf_struct_ops_value *uvalue, *kvalue;
|
||||
enum bpf_struct_ops_state state;
|
||||
s64 refcnt;
|
||||
|
||||
if (unlikely(*(u32 *)key != 0))
|
||||
return -ENOENT;
|
||||
@ -267,7 +277,14 @@ int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, void *key,
|
||||
uvalue = value;
|
||||
memcpy(uvalue, st_map->uvalue, map->value_size);
|
||||
uvalue->state = state;
|
||||
refcount_set(&uvalue->refcnt, refcount_read(&kvalue->refcnt));
|
||||
|
||||
/* This value offers the user space a general estimate of how
|
||||
* many sockets are still utilizing this struct_ops for TCP
|
||||
* congestion control. The number might not be exact, but it
|
||||
* should sufficiently meet our present goals.
|
||||
*/
|
||||
refcnt = atomic64_read(&map->refcnt) - atomic64_read(&map->usercnt);
|
||||
refcount_set(&uvalue->refcnt, max_t(s64, refcnt, 0));
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -349,8 +366,8 @@ int bpf_struct_ops_prepare_trampoline(struct bpf_tramp_links *tlinks,
|
||||
model, flags, tlinks, NULL);
|
||||
}
|
||||
|
||||
static int bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key,
|
||||
void *value, u64 flags)
|
||||
static long bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key,
|
||||
void *value, u64 flags)
|
||||
{
|
||||
struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map;
|
||||
const struct bpf_struct_ops *st_ops = st_map->st_ops;
|
||||
@ -491,12 +508,29 @@ static int bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key,
|
||||
*(unsigned long *)(udata + moff) = prog->aux->id;
|
||||
}
|
||||
|
||||
refcount_set(&kvalue->refcnt, 1);
|
||||
bpf_map_inc(map);
|
||||
if (st_map->map.map_flags & BPF_F_LINK) {
|
||||
err = st_ops->validate(kdata);
|
||||
if (err)
|
||||
goto reset_unlock;
|
||||
set_memory_rox((long)st_map->image, 1);
|
||||
/* Let bpf_link handle registration & unregistration.
|
||||
*
|
||||
* Pair with smp_load_acquire() during lookup_elem().
|
||||
*/
|
||||
smp_store_release(&kvalue->state, BPF_STRUCT_OPS_STATE_READY);
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
set_memory_rox((long)st_map->image, 1);
|
||||
err = st_ops->reg(kdata);
|
||||
if (likely(!err)) {
|
||||
/* This refcnt increment on the map here after
|
||||
* 'st_ops->reg()' is secure since the state of the
|
||||
* map must be set to INIT at this moment, and thus
|
||||
* bpf_struct_ops_map_delete_elem() can't unregister
|
||||
* or transition it to TOBEFREE concurrently.
|
||||
*/
|
||||
bpf_map_inc(map);
|
||||
/* Pair with smp_load_acquire() during lookup_elem().
|
||||
* It ensures the above udata updates (e.g. prog->aux->id)
|
||||
* can be seen once BPF_STRUCT_OPS_STATE_INUSE is set.
|
||||
@ -512,7 +546,6 @@ static int bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key,
|
||||
*/
|
||||
set_memory_nx((long)st_map->image, 1);
|
||||
set_memory_rw((long)st_map->image, 1);
|
||||
bpf_map_put(map);
|
||||
|
||||
reset_unlock:
|
||||
bpf_struct_ops_map_put_progs(st_map);
|
||||
@ -524,20 +557,22 @@ unlock:
|
||||
return err;
|
||||
}
|
||||
|
||||
static int bpf_struct_ops_map_delete_elem(struct bpf_map *map, void *key)
|
||||
static long bpf_struct_ops_map_delete_elem(struct bpf_map *map, void *key)
|
||||
{
|
||||
enum bpf_struct_ops_state prev_state;
|
||||
struct bpf_struct_ops_map *st_map;
|
||||
|
||||
st_map = (struct bpf_struct_ops_map *)map;
|
||||
if (st_map->map.map_flags & BPF_F_LINK)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
prev_state = cmpxchg(&st_map->kvalue.state,
|
||||
BPF_STRUCT_OPS_STATE_INUSE,
|
||||
BPF_STRUCT_OPS_STATE_TOBEFREE);
|
||||
switch (prev_state) {
|
||||
case BPF_STRUCT_OPS_STATE_INUSE:
|
||||
st_map->st_ops->unreg(&st_map->kvalue.data);
|
||||
if (refcount_dec_and_test(&st_map->kvalue.refcnt))
|
||||
bpf_map_put(map);
|
||||
bpf_map_put(map);
|
||||
return 0;
|
||||
case BPF_STRUCT_OPS_STATE_TOBEFREE:
|
||||
return -EINPROGRESS;
|
||||
@ -570,7 +605,7 @@ static void bpf_struct_ops_map_seq_show_elem(struct bpf_map *map, void *key,
|
||||
kfree(value);
|
||||
}
|
||||
|
||||
static void bpf_struct_ops_map_free(struct bpf_map *map)
|
||||
static void __bpf_struct_ops_map_free(struct bpf_map *map)
|
||||
{
|
||||
struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map;
|
||||
|
||||
@ -582,10 +617,32 @@ static void bpf_struct_ops_map_free(struct bpf_map *map)
|
||||
bpf_map_area_free(st_map);
|
||||
}
|
||||
|
||||
static void bpf_struct_ops_map_free(struct bpf_map *map)
|
||||
{
|
||||
/* The struct_ops's function may switch to another struct_ops.
|
||||
*
|
||||
* For example, bpf_tcp_cc_x->init() may switch to
|
||||
* another tcp_cc_y by calling
|
||||
* setsockopt(TCP_CONGESTION, "tcp_cc_y").
|
||||
* During the switch, bpf_struct_ops_put(tcp_cc_x) is called
|
||||
* and its refcount may reach 0 which then free its
|
||||
* trampoline image while tcp_cc_x is still running.
|
||||
*
|
||||
* A vanilla rcu gp is to wait for all bpf-tcp-cc prog
|
||||
* to finish. bpf-tcp-cc prog is non sleepable.
|
||||
* A rcu_tasks gp is to wait for the last few insn
|
||||
* in the tramopline image to finish before releasing
|
||||
* the trampoline image.
|
||||
*/
|
||||
synchronize_rcu_mult(call_rcu, call_rcu_tasks);
|
||||
|
||||
__bpf_struct_ops_map_free(map);
|
||||
}
|
||||
|
||||
static int bpf_struct_ops_map_alloc_check(union bpf_attr *attr)
|
||||
{
|
||||
if (attr->key_size != sizeof(unsigned int) || attr->max_entries != 1 ||
|
||||
attr->map_flags || !attr->btf_vmlinux_value_type_id)
|
||||
(attr->map_flags & ~BPF_F_LINK) || !attr->btf_vmlinux_value_type_id)
|
||||
return -EINVAL;
|
||||
return 0;
|
||||
}
|
||||
@ -609,6 +666,9 @@ static struct bpf_map *bpf_struct_ops_map_alloc(union bpf_attr *attr)
|
||||
if (attr->value_size != vt->size)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
if (attr->map_flags & BPF_F_LINK && (!st_ops->validate || !st_ops->update))
|
||||
return ERR_PTR(-EOPNOTSUPP);
|
||||
|
||||
t = st_ops->type;
|
||||
|
||||
st_map_size = sizeof(*st_map) +
|
||||
@ -630,7 +690,7 @@ static struct bpf_map *bpf_struct_ops_map_alloc(union bpf_attr *attr)
|
||||
NUMA_NO_NODE);
|
||||
st_map->image = bpf_jit_alloc_exec(PAGE_SIZE);
|
||||
if (!st_map->uvalue || !st_map->links || !st_map->image) {
|
||||
bpf_struct_ops_map_free(map);
|
||||
__bpf_struct_ops_map_free(map);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
@ -676,41 +736,175 @@ const struct bpf_map_ops bpf_struct_ops_map_ops = {
|
||||
bool bpf_struct_ops_get(const void *kdata)
|
||||
{
|
||||
struct bpf_struct_ops_value *kvalue;
|
||||
struct bpf_struct_ops_map *st_map;
|
||||
struct bpf_map *map;
|
||||
|
||||
kvalue = container_of(kdata, struct bpf_struct_ops_value, data);
|
||||
st_map = container_of(kvalue, struct bpf_struct_ops_map, kvalue);
|
||||
|
||||
return refcount_inc_not_zero(&kvalue->refcnt);
|
||||
}
|
||||
|
||||
static void bpf_struct_ops_put_rcu(struct rcu_head *head)
|
||||
{
|
||||
struct bpf_struct_ops_map *st_map;
|
||||
|
||||
st_map = container_of(head, struct bpf_struct_ops_map, rcu);
|
||||
bpf_map_put(&st_map->map);
|
||||
map = __bpf_map_inc_not_zero(&st_map->map, false);
|
||||
return !IS_ERR(map);
|
||||
}
|
||||
|
||||
void bpf_struct_ops_put(const void *kdata)
|
||||
{
|
||||
struct bpf_struct_ops_value *kvalue;
|
||||
struct bpf_struct_ops_map *st_map;
|
||||
|
||||
kvalue = container_of(kdata, struct bpf_struct_ops_value, data);
|
||||
if (refcount_dec_and_test(&kvalue->refcnt)) {
|
||||
struct bpf_struct_ops_map *st_map;
|
||||
st_map = container_of(kvalue, struct bpf_struct_ops_map, kvalue);
|
||||
|
||||
st_map = container_of(kvalue, struct bpf_struct_ops_map,
|
||||
kvalue);
|
||||
/* The struct_ops's function may switch to another struct_ops.
|
||||
*
|
||||
* For example, bpf_tcp_cc_x->init() may switch to
|
||||
* another tcp_cc_y by calling
|
||||
* setsockopt(TCP_CONGESTION, "tcp_cc_y").
|
||||
* During the switch, bpf_struct_ops_put(tcp_cc_x) is called
|
||||
* and its map->refcnt may reach 0 which then free its
|
||||
* trampoline image while tcp_cc_x is still running.
|
||||
*
|
||||
* Thus, a rcu grace period is needed here.
|
||||
*/
|
||||
call_rcu(&st_map->rcu, bpf_struct_ops_put_rcu);
|
||||
}
|
||||
bpf_map_put(&st_map->map);
|
||||
}
|
||||
|
||||
static bool bpf_struct_ops_valid_to_reg(struct bpf_map *map)
|
||||
{
|
||||
struct bpf_struct_ops_map *st_map = (struct bpf_struct_ops_map *)map;
|
||||
|
||||
return map->map_type == BPF_MAP_TYPE_STRUCT_OPS &&
|
||||
map->map_flags & BPF_F_LINK &&
|
||||
/* Pair with smp_store_release() during map_update */
|
||||
smp_load_acquire(&st_map->kvalue.state) == BPF_STRUCT_OPS_STATE_READY;
|
||||
}
|
||||
|
||||
static void bpf_struct_ops_map_link_dealloc(struct bpf_link *link)
|
||||
{
|
||||
struct bpf_struct_ops_link *st_link;
|
||||
struct bpf_struct_ops_map *st_map;
|
||||
|
||||
st_link = container_of(link, struct bpf_struct_ops_link, link);
|
||||
st_map = (struct bpf_struct_ops_map *)
|
||||
rcu_dereference_protected(st_link->map, true);
|
||||
if (st_map) {
|
||||
/* st_link->map can be NULL if
|
||||
* bpf_struct_ops_link_create() fails to register.
|
||||
*/
|
||||
st_map->st_ops->unreg(&st_map->kvalue.data);
|
||||
bpf_map_put(&st_map->map);
|
||||
}
|
||||
kfree(st_link);
|
||||
}
|
||||
|
||||
static void bpf_struct_ops_map_link_show_fdinfo(const struct bpf_link *link,
|
||||
struct seq_file *seq)
|
||||
{
|
||||
struct bpf_struct_ops_link *st_link;
|
||||
struct bpf_map *map;
|
||||
|
||||
st_link = container_of(link, struct bpf_struct_ops_link, link);
|
||||
rcu_read_lock();
|
||||
map = rcu_dereference(st_link->map);
|
||||
seq_printf(seq, "map_id:\t%d\n", map->id);
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
static int bpf_struct_ops_map_link_fill_link_info(const struct bpf_link *link,
|
||||
struct bpf_link_info *info)
|
||||
{
|
||||
struct bpf_struct_ops_link *st_link;
|
||||
struct bpf_map *map;
|
||||
|
||||
st_link = container_of(link, struct bpf_struct_ops_link, link);
|
||||
rcu_read_lock();
|
||||
map = rcu_dereference(st_link->map);
|
||||
info->struct_ops.map_id = map->id;
|
||||
rcu_read_unlock();
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int bpf_struct_ops_map_link_update(struct bpf_link *link, struct bpf_map *new_map,
|
||||
struct bpf_map *expected_old_map)
|
||||
{
|
||||
struct bpf_struct_ops_map *st_map, *old_st_map;
|
||||
struct bpf_map *old_map;
|
||||
struct bpf_struct_ops_link *st_link;
|
||||
int err = 0;
|
||||
|
||||
st_link = container_of(link, struct bpf_struct_ops_link, link);
|
||||
st_map = container_of(new_map, struct bpf_struct_ops_map, map);
|
||||
|
||||
if (!bpf_struct_ops_valid_to_reg(new_map))
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&update_mutex);
|
||||
|
||||
old_map = rcu_dereference_protected(st_link->map, lockdep_is_held(&update_mutex));
|
||||
if (expected_old_map && old_map != expected_old_map) {
|
||||
err = -EPERM;
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
old_st_map = container_of(old_map, struct bpf_struct_ops_map, map);
|
||||
/* The new and old struct_ops must be the same type. */
|
||||
if (st_map->st_ops != old_st_map->st_ops) {
|
||||
err = -EINVAL;
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
err = st_map->st_ops->update(st_map->kvalue.data, old_st_map->kvalue.data);
|
||||
if (err)
|
||||
goto err_out;
|
||||
|
||||
bpf_map_inc(new_map);
|
||||
rcu_assign_pointer(st_link->map, new_map);
|
||||
bpf_map_put(old_map);
|
||||
|
||||
err_out:
|
||||
mutex_unlock(&update_mutex);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static const struct bpf_link_ops bpf_struct_ops_map_lops = {
|
||||
.dealloc = bpf_struct_ops_map_link_dealloc,
|
||||
.show_fdinfo = bpf_struct_ops_map_link_show_fdinfo,
|
||||
.fill_link_info = bpf_struct_ops_map_link_fill_link_info,
|
||||
.update_map = bpf_struct_ops_map_link_update,
|
||||
};
|
||||
|
||||
int bpf_struct_ops_link_create(union bpf_attr *attr)
|
||||
{
|
||||
struct bpf_struct_ops_link *link = NULL;
|
||||
struct bpf_link_primer link_primer;
|
||||
struct bpf_struct_ops_map *st_map;
|
||||
struct bpf_map *map;
|
||||
int err;
|
||||
|
||||
map = bpf_map_get(attr->link_create.map_fd);
|
||||
if (IS_ERR(map))
|
||||
return PTR_ERR(map);
|
||||
|
||||
st_map = (struct bpf_struct_ops_map *)map;
|
||||
|
||||
if (!bpf_struct_ops_valid_to_reg(map)) {
|
||||
err = -EINVAL;
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
link = kzalloc(sizeof(*link), GFP_USER);
|
||||
if (!link) {
|
||||
err = -ENOMEM;
|
||||
goto err_out;
|
||||
}
|
||||
bpf_link_init(&link->link, BPF_LINK_TYPE_STRUCT_OPS, &bpf_struct_ops_map_lops, NULL);
|
||||
|
||||
err = bpf_link_prime(&link->link, &link_primer);
|
||||
if (err)
|
||||
goto err_out;
|
||||
|
||||
err = st_map->st_ops->reg(st_map->kvalue.data);
|
||||
if (err) {
|
||||
bpf_link_cleanup(&link_primer);
|
||||
link = NULL;
|
||||
goto err_out;
|
||||
}
|
||||
RCU_INIT_POINTER(link->map, map);
|
||||
|
||||
return bpf_link_settle(&link_primer);
|
||||
|
||||
err_out:
|
||||
bpf_map_put(map);
|
||||
kfree(link);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -72,8 +72,6 @@ task_storage_lookup(struct task_struct *task, struct bpf_map *map,
|
||||
void bpf_task_storage_free(struct task_struct *task)
|
||||
{
|
||||
struct bpf_local_storage *local_storage;
|
||||
bool free_task_storage = false;
|
||||
unsigned long flags;
|
||||
|
||||
rcu_read_lock();
|
||||
|
||||
@ -84,14 +82,9 @@ void bpf_task_storage_free(struct task_struct *task)
|
||||
}
|
||||
|
||||
bpf_task_storage_lock();
|
||||
raw_spin_lock_irqsave(&local_storage->lock, flags);
|
||||
free_task_storage = bpf_local_storage_unlink_nolock(local_storage);
|
||||
raw_spin_unlock_irqrestore(&local_storage->lock, flags);
|
||||
bpf_local_storage_destroy(local_storage);
|
||||
bpf_task_storage_unlock();
|
||||
rcu_read_unlock();
|
||||
|
||||
if (free_task_storage)
|
||||
kfree_rcu(local_storage, rcu);
|
||||
}
|
||||
|
||||
static void *bpf_pid_task_storage_lookup_elem(struct bpf_map *map, void *key)
|
||||
@ -127,8 +120,8 @@ out:
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
static int bpf_pid_task_storage_update_elem(struct bpf_map *map, void *key,
|
||||
void *value, u64 map_flags)
|
||||
static long bpf_pid_task_storage_update_elem(struct bpf_map *map, void *key,
|
||||
void *value, u64 map_flags)
|
||||
{
|
||||
struct bpf_local_storage_data *sdata;
|
||||
struct task_struct *task;
|
||||
@ -175,12 +168,12 @@ static int task_storage_delete(struct task_struct *task, struct bpf_map *map,
|
||||
if (!nobusy)
|
||||
return -EBUSY;
|
||||
|
||||
bpf_selem_unlink(SELEM(sdata), true);
|
||||
bpf_selem_unlink(SELEM(sdata), false);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int bpf_pid_task_storage_delete_elem(struct bpf_map *map, void *key)
|
||||
static long bpf_pid_task_storage_delete_elem(struct bpf_map *map, void *key)
|
||||
{
|
||||
struct task_struct *task;
|
||||
unsigned int f_flags;
|
||||
@ -316,7 +309,7 @@ static int notsupp_get_next_key(struct bpf_map *map, void *key, void *next_key)
|
||||
|
||||
static struct bpf_map *task_storage_map_alloc(union bpf_attr *attr)
|
||||
{
|
||||
return bpf_local_storage_map_alloc(attr, &task_cache);
|
||||
return bpf_local_storage_map_alloc(attr, &task_cache, true);
|
||||
}
|
||||
|
||||
static void task_storage_map_free(struct bpf_map *map)
|
||||
@ -345,7 +338,7 @@ const struct bpf_func_proto bpf_task_storage_get_recur_proto = {
|
||||
.gpl_only = false,
|
||||
.ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
|
||||
.arg1_type = ARG_CONST_MAP_PTR,
|
||||
.arg2_type = ARG_PTR_TO_BTF_ID,
|
||||
.arg2_type = ARG_PTR_TO_BTF_ID_OR_NULL,
|
||||
.arg2_btf_id = &btf_tracing_ids[BTF_TRACING_TYPE_TASK],
|
||||
.arg3_type = ARG_PTR_TO_MAP_VALUE_OR_NULL,
|
||||
.arg4_type = ARG_ANYTHING,
|
||||
@ -356,7 +349,7 @@ const struct bpf_func_proto bpf_task_storage_get_proto = {
|
||||
.gpl_only = false,
|
||||
.ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
|
||||
.arg1_type = ARG_CONST_MAP_PTR,
|
||||
.arg2_type = ARG_PTR_TO_BTF_ID,
|
||||
.arg2_type = ARG_PTR_TO_BTF_ID_OR_NULL,
|
||||
.arg2_btf_id = &btf_tracing_ids[BTF_TRACING_TYPE_TASK],
|
||||
.arg3_type = ARG_PTR_TO_MAP_VALUE_OR_NULL,
|
||||
.arg4_type = ARG_ANYTHING,
|
||||
@ -367,7 +360,7 @@ const struct bpf_func_proto bpf_task_storage_delete_recur_proto = {
|
||||
.gpl_only = false,
|
||||
.ret_type = RET_INTEGER,
|
||||
.arg1_type = ARG_CONST_MAP_PTR,
|
||||
.arg2_type = ARG_PTR_TO_BTF_ID,
|
||||
.arg2_type = ARG_PTR_TO_BTF_ID_OR_NULL,
|
||||
.arg2_btf_id = &btf_tracing_ids[BTF_TRACING_TYPE_TASK],
|
||||
};
|
||||
|
||||
@ -376,6 +369,6 @@ const struct bpf_func_proto bpf_task_storage_delete_proto = {
|
||||
.gpl_only = false,
|
||||
.ret_type = RET_INTEGER,
|
||||
.arg1_type = ARG_CONST_MAP_PTR,
|
||||
.arg2_type = ARG_PTR_TO_BTF_ID,
|
||||
.arg2_type = ARG_PTR_TO_BTF_ID_OR_NULL,
|
||||
.arg2_btf_id = &btf_tracing_ids[BTF_TRACING_TYPE_TASK],
|
||||
};
|
||||
|
285
kernel/bpf/btf.c
285
kernel/bpf/btf.c
@ -3231,12 +3231,6 @@ static void btf_struct_log(struct btf_verifier_env *env,
|
||||
btf_verifier_log(env, "size=%u vlen=%u", t->size, btf_type_vlen(t));
|
||||
}
|
||||
|
||||
enum btf_field_info_type {
|
||||
BTF_FIELD_SPIN_LOCK,
|
||||
BTF_FIELD_TIMER,
|
||||
BTF_FIELD_KPTR,
|
||||
};
|
||||
|
||||
enum {
|
||||
BTF_FIELD_IGNORE = 0,
|
||||
BTF_FIELD_FOUND = 1,
|
||||
@ -3562,7 +3556,10 @@ static int btf_parse_kptr(const struct btf *btf, struct btf_field *field,
|
||||
{
|
||||
struct module *mod = NULL;
|
||||
const struct btf_type *t;
|
||||
struct btf *kernel_btf;
|
||||
/* If a matching btf type is found in kernel or module BTFs, kptr_ref
|
||||
* is that BTF, otherwise it's program BTF
|
||||
*/
|
||||
struct btf *kptr_btf;
|
||||
int ret;
|
||||
s32 id;
|
||||
|
||||
@ -3571,7 +3568,20 @@ static int btf_parse_kptr(const struct btf *btf, struct btf_field *field,
|
||||
*/
|
||||
t = btf_type_by_id(btf, info->kptr.type_id);
|
||||
id = bpf_find_btf_id(__btf_name_by_offset(btf, t->name_off), BTF_INFO_KIND(t->info),
|
||||
&kernel_btf);
|
||||
&kptr_btf);
|
||||
if (id == -ENOENT) {
|
||||
/* btf_parse_kptr should only be called w/ btf = program BTF */
|
||||
WARN_ON_ONCE(btf_is_kernel(btf));
|
||||
|
||||
/* Type exists only in program BTF. Assume that it's a MEM_ALLOC
|
||||
* kptr allocated via bpf_obj_new
|
||||
*/
|
||||
field->kptr.dtor = NULL;
|
||||
id = info->kptr.type_id;
|
||||
kptr_btf = (struct btf *)btf;
|
||||
btf_get(kptr_btf);
|
||||
goto found_dtor;
|
||||
}
|
||||
if (id < 0)
|
||||
return id;
|
||||
|
||||
@ -3588,20 +3598,20 @@ static int btf_parse_kptr(const struct btf *btf, struct btf_field *field,
|
||||
* can be used as a referenced pointer and be stored in a map at
|
||||
* the same time.
|
||||
*/
|
||||
dtor_btf_id = btf_find_dtor_kfunc(kernel_btf, id);
|
||||
dtor_btf_id = btf_find_dtor_kfunc(kptr_btf, id);
|
||||
if (dtor_btf_id < 0) {
|
||||
ret = dtor_btf_id;
|
||||
goto end_btf;
|
||||
}
|
||||
|
||||
dtor_func = btf_type_by_id(kernel_btf, dtor_btf_id);
|
||||
dtor_func = btf_type_by_id(kptr_btf, dtor_btf_id);
|
||||
if (!dtor_func) {
|
||||
ret = -ENOENT;
|
||||
goto end_btf;
|
||||
}
|
||||
|
||||
if (btf_is_module(kernel_btf)) {
|
||||
mod = btf_try_get_module(kernel_btf);
|
||||
if (btf_is_module(kptr_btf)) {
|
||||
mod = btf_try_get_module(kptr_btf);
|
||||
if (!mod) {
|
||||
ret = -ENXIO;
|
||||
goto end_btf;
|
||||
@ -3611,7 +3621,7 @@ static int btf_parse_kptr(const struct btf *btf, struct btf_field *field,
|
||||
/* We already verified dtor_func to be btf_type_is_func
|
||||
* in register_btf_id_dtor_kfuncs.
|
||||
*/
|
||||
dtor_func_name = __btf_name_by_offset(kernel_btf, dtor_func->name_off);
|
||||
dtor_func_name = __btf_name_by_offset(kptr_btf, dtor_func->name_off);
|
||||
addr = kallsyms_lookup_name(dtor_func_name);
|
||||
if (!addr) {
|
||||
ret = -EINVAL;
|
||||
@ -3620,14 +3630,15 @@ static int btf_parse_kptr(const struct btf *btf, struct btf_field *field,
|
||||
field->kptr.dtor = (void *)addr;
|
||||
}
|
||||
|
||||
found_dtor:
|
||||
field->kptr.btf_id = id;
|
||||
field->kptr.btf = kernel_btf;
|
||||
field->kptr.btf = kptr_btf;
|
||||
field->kptr.module = mod;
|
||||
return 0;
|
||||
end_mod:
|
||||
module_put(mod);
|
||||
end_btf:
|
||||
btf_put(kernel_btf);
|
||||
btf_put(kptr_btf);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -5494,38 +5505,45 @@ static int btf_check_type_tags(struct btf_verifier_env *env,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct btf *btf_parse(bpfptr_t btf_data, u32 btf_data_size,
|
||||
u32 log_level, char __user *log_ubuf, u32 log_size)
|
||||
static int finalize_log(struct bpf_verifier_log *log, bpfptr_t uattr, u32 uattr_size)
|
||||
{
|
||||
struct btf_struct_metas *struct_meta_tab;
|
||||
struct btf_verifier_env *env = NULL;
|
||||
struct bpf_verifier_log *log;
|
||||
struct btf *btf = NULL;
|
||||
u8 *data;
|
||||
u32 log_true_size;
|
||||
int err;
|
||||
|
||||
if (btf_data_size > BTF_MAX_SIZE)
|
||||
err = bpf_vlog_finalize(log, &log_true_size);
|
||||
|
||||
if (uattr_size >= offsetofend(union bpf_attr, btf_log_true_size) &&
|
||||
copy_to_bpfptr_offset(uattr, offsetof(union bpf_attr, btf_log_true_size),
|
||||
&log_true_size, sizeof(log_true_size)))
|
||||
err = -EFAULT;
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static struct btf *btf_parse(const union bpf_attr *attr, bpfptr_t uattr, u32 uattr_size)
|
||||
{
|
||||
bpfptr_t btf_data = make_bpfptr(attr->btf, uattr.is_kernel);
|
||||
char __user *log_ubuf = u64_to_user_ptr(attr->btf_log_buf);
|
||||
struct btf_struct_metas *struct_meta_tab;
|
||||
struct btf_verifier_env *env = NULL;
|
||||
struct btf *btf = NULL;
|
||||
u8 *data;
|
||||
int err, ret;
|
||||
|
||||
if (attr->btf_size > BTF_MAX_SIZE)
|
||||
return ERR_PTR(-E2BIG);
|
||||
|
||||
env = kzalloc(sizeof(*env), GFP_KERNEL | __GFP_NOWARN);
|
||||
if (!env)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
log = &env->log;
|
||||
if (log_level || log_ubuf || log_size) {
|
||||
/* user requested verbose verifier output
|
||||
* and supplied buffer to store the verification trace
|
||||
*/
|
||||
log->level = log_level;
|
||||
log->ubuf = log_ubuf;
|
||||
log->len_total = log_size;
|
||||
|
||||
/* log attributes have to be sane */
|
||||
if (!bpf_verifier_log_attr_valid(log)) {
|
||||
err = -EINVAL;
|
||||
goto errout;
|
||||
}
|
||||
}
|
||||
/* user could have requested verbose verifier output
|
||||
* and supplied buffer to store the verification trace
|
||||
*/
|
||||
err = bpf_vlog_init(&env->log, attr->btf_log_level,
|
||||
log_ubuf, attr->btf_log_size);
|
||||
if (err)
|
||||
goto errout_free;
|
||||
|
||||
btf = kzalloc(sizeof(*btf), GFP_KERNEL | __GFP_NOWARN);
|
||||
if (!btf) {
|
||||
@ -5534,16 +5552,16 @@ static struct btf *btf_parse(bpfptr_t btf_data, u32 btf_data_size,
|
||||
}
|
||||
env->btf = btf;
|
||||
|
||||
data = kvmalloc(btf_data_size, GFP_KERNEL | __GFP_NOWARN);
|
||||
data = kvmalloc(attr->btf_size, GFP_KERNEL | __GFP_NOWARN);
|
||||
if (!data) {
|
||||
err = -ENOMEM;
|
||||
goto errout;
|
||||
}
|
||||
|
||||
btf->data = data;
|
||||
btf->data_size = btf_data_size;
|
||||
btf->data_size = attr->btf_size;
|
||||
|
||||
if (copy_from_bpfptr(data, btf_data, btf_data_size)) {
|
||||
if (copy_from_bpfptr(data, btf_data, attr->btf_size)) {
|
||||
err = -EFAULT;
|
||||
goto errout;
|
||||
}
|
||||
@ -5566,7 +5584,7 @@ static struct btf *btf_parse(bpfptr_t btf_data, u32 btf_data_size,
|
||||
if (err)
|
||||
goto errout;
|
||||
|
||||
struct_meta_tab = btf_parse_struct_metas(log, btf);
|
||||
struct_meta_tab = btf_parse_struct_metas(&env->log, btf);
|
||||
if (IS_ERR(struct_meta_tab)) {
|
||||
err = PTR_ERR(struct_meta_tab);
|
||||
goto errout;
|
||||
@ -5583,10 +5601,9 @@ static struct btf *btf_parse(bpfptr_t btf_data, u32 btf_data_size,
|
||||
}
|
||||
}
|
||||
|
||||
if (log->level && bpf_verifier_log_full(log)) {
|
||||
err = -ENOSPC;
|
||||
goto errout_meta;
|
||||
}
|
||||
err = finalize_log(&env->log, uattr, uattr_size);
|
||||
if (err)
|
||||
goto errout_free;
|
||||
|
||||
btf_verifier_env_free(env);
|
||||
refcount_set(&btf->refcnt, 1);
|
||||
@ -5595,6 +5612,11 @@ static struct btf *btf_parse(bpfptr_t btf_data, u32 btf_data_size,
|
||||
errout_meta:
|
||||
btf_free_struct_meta_tab(btf);
|
||||
errout:
|
||||
/* overwrite err with -ENOSPC or -EFAULT */
|
||||
ret = finalize_log(&env->log, uattr, uattr_size);
|
||||
if (ret)
|
||||
err = ret;
|
||||
errout_free:
|
||||
btf_verifier_env_free(env);
|
||||
if (btf)
|
||||
btf_free(btf);
|
||||
@ -5900,12 +5922,8 @@ struct btf *bpf_prog_get_target_btf(const struct bpf_prog *prog)
|
||||
|
||||
static bool is_int_ptr(struct btf *btf, const struct btf_type *t)
|
||||
{
|
||||
/* t comes in already as a pointer */
|
||||
t = btf_type_by_id(btf, t->type);
|
||||
|
||||
/* allow const */
|
||||
if (BTF_INFO_KIND(t->info) == BTF_KIND_CONST)
|
||||
t = btf_type_by_id(btf, t->type);
|
||||
/* skip modifiers */
|
||||
t = btf_type_skip_modifiers(btf, t->type, NULL);
|
||||
|
||||
return btf_type_is_int(t);
|
||||
}
|
||||
@ -6156,7 +6174,8 @@ enum bpf_struct_walk_result {
|
||||
|
||||
static int btf_struct_walk(struct bpf_verifier_log *log, const struct btf *btf,
|
||||
const struct btf_type *t, int off, int size,
|
||||
u32 *next_btf_id, enum bpf_type_flag *flag)
|
||||
u32 *next_btf_id, enum bpf_type_flag *flag,
|
||||
const char **field_name)
|
||||
{
|
||||
u32 i, moff, mtrue_end, msize = 0, total_nelems = 0;
|
||||
const struct btf_type *mtype, *elem_type = NULL;
|
||||
@ -6385,6 +6404,8 @@ error:
|
||||
if (btf_type_is_struct(stype)) {
|
||||
*next_btf_id = id;
|
||||
*flag |= tmp_flag;
|
||||
if (field_name)
|
||||
*field_name = mname;
|
||||
return WALK_PTR;
|
||||
}
|
||||
}
|
||||
@ -6411,7 +6432,8 @@ error:
|
||||
int btf_struct_access(struct bpf_verifier_log *log,
|
||||
const struct bpf_reg_state *reg,
|
||||
int off, int size, enum bpf_access_type atype __maybe_unused,
|
||||
u32 *next_btf_id, enum bpf_type_flag *flag)
|
||||
u32 *next_btf_id, enum bpf_type_flag *flag,
|
||||
const char **field_name)
|
||||
{
|
||||
const struct btf *btf = reg->btf;
|
||||
enum bpf_type_flag tmp_flag = 0;
|
||||
@ -6443,7 +6465,7 @@ int btf_struct_access(struct bpf_verifier_log *log,
|
||||
|
||||
t = btf_type_by_id(btf, id);
|
||||
do {
|
||||
err = btf_struct_walk(log, btf, t, off, size, &id, &tmp_flag);
|
||||
err = btf_struct_walk(log, btf, t, off, size, &id, &tmp_flag, field_name);
|
||||
|
||||
switch (err) {
|
||||
case WALK_PTR:
|
||||
@ -6518,7 +6540,7 @@ again:
|
||||
type = btf_type_by_id(btf, id);
|
||||
if (!type)
|
||||
return false;
|
||||
err = btf_struct_walk(log, btf, type, off, 1, &id, &flag);
|
||||
err = btf_struct_walk(log, btf, type, off, 1, &id, &flag, NULL);
|
||||
if (err != WALK_STRUCT)
|
||||
return false;
|
||||
|
||||
@ -7199,15 +7221,12 @@ static int __btf_new_fd(struct btf *btf)
|
||||
return anon_inode_getfd("btf", &btf_fops, btf, O_RDONLY | O_CLOEXEC);
|
||||
}
|
||||
|
||||
int btf_new_fd(const union bpf_attr *attr, bpfptr_t uattr)
|
||||
int btf_new_fd(const union bpf_attr *attr, bpfptr_t uattr, u32 uattr_size)
|
||||
{
|
||||
struct btf *btf;
|
||||
int ret;
|
||||
|
||||
btf = btf_parse(make_bpfptr(attr->btf, uattr.is_kernel),
|
||||
attr->btf_size, attr->btf_log_level,
|
||||
u64_to_user_ptr(attr->btf_log_buf),
|
||||
attr->btf_log_size);
|
||||
btf = btf_parse(attr, uattr, uattr_size);
|
||||
if (IS_ERR(btf))
|
||||
return PTR_ERR(btf);
|
||||
|
||||
@ -7597,6 +7616,108 @@ BTF_ID_LIST_GLOBAL(btf_tracing_ids, MAX_BTF_TRACING_TYPE)
|
||||
BTF_TRACING_TYPE_xxx
|
||||
#undef BTF_TRACING_TYPE
|
||||
|
||||
static int btf_check_iter_kfuncs(struct btf *btf, const char *func_name,
|
||||
const struct btf_type *func, u32 func_flags)
|
||||
{
|
||||
u32 flags = func_flags & (KF_ITER_NEW | KF_ITER_NEXT | KF_ITER_DESTROY);
|
||||
const char *name, *sfx, *iter_name;
|
||||
const struct btf_param *arg;
|
||||
const struct btf_type *t;
|
||||
char exp_name[128];
|
||||
u32 nr_args;
|
||||
|
||||
/* exactly one of KF_ITER_{NEW,NEXT,DESTROY} can be set */
|
||||
if (!flags || (flags & (flags - 1)))
|
||||
return -EINVAL;
|
||||
|
||||
/* any BPF iter kfunc should have `struct bpf_iter_<type> *` first arg */
|
||||
nr_args = btf_type_vlen(func);
|
||||
if (nr_args < 1)
|
||||
return -EINVAL;
|
||||
|
||||
arg = &btf_params(func)[0];
|
||||
t = btf_type_skip_modifiers(btf, arg->type, NULL);
|
||||
if (!t || !btf_type_is_ptr(t))
|
||||
return -EINVAL;
|
||||
t = btf_type_skip_modifiers(btf, t->type, NULL);
|
||||
if (!t || !__btf_type_is_struct(t))
|
||||
return -EINVAL;
|
||||
|
||||
name = btf_name_by_offset(btf, t->name_off);
|
||||
if (!name || strncmp(name, ITER_PREFIX, sizeof(ITER_PREFIX) - 1))
|
||||
return -EINVAL;
|
||||
|
||||
/* sizeof(struct bpf_iter_<type>) should be a multiple of 8 to
|
||||
* fit nicely in stack slots
|
||||
*/
|
||||
if (t->size == 0 || (t->size % 8))
|
||||
return -EINVAL;
|
||||
|
||||
/* validate bpf_iter_<type>_{new,next,destroy}(struct bpf_iter_<type> *)
|
||||
* naming pattern
|
||||
*/
|
||||
iter_name = name + sizeof(ITER_PREFIX) - 1;
|
||||
if (flags & KF_ITER_NEW)
|
||||
sfx = "new";
|
||||
else if (flags & KF_ITER_NEXT)
|
||||
sfx = "next";
|
||||
else /* (flags & KF_ITER_DESTROY) */
|
||||
sfx = "destroy";
|
||||
|
||||
snprintf(exp_name, sizeof(exp_name), "bpf_iter_%s_%s", iter_name, sfx);
|
||||
if (strcmp(func_name, exp_name))
|
||||
return -EINVAL;
|
||||
|
||||
/* only iter constructor should have extra arguments */
|
||||
if (!(flags & KF_ITER_NEW) && nr_args != 1)
|
||||
return -EINVAL;
|
||||
|
||||
if (flags & KF_ITER_NEXT) {
|
||||
/* bpf_iter_<type>_next() should return pointer */
|
||||
t = btf_type_skip_modifiers(btf, func->type, NULL);
|
||||
if (!t || !btf_type_is_ptr(t))
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (flags & KF_ITER_DESTROY) {
|
||||
/* bpf_iter_<type>_destroy() should return void */
|
||||
t = btf_type_by_id(btf, func->type);
|
||||
if (!t || !btf_type_is_void(t))
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int btf_check_kfunc_protos(struct btf *btf, u32 func_id, u32 func_flags)
|
||||
{
|
||||
const struct btf_type *func;
|
||||
const char *func_name;
|
||||
int err;
|
||||
|
||||
/* any kfunc should be FUNC -> FUNC_PROTO */
|
||||
func = btf_type_by_id(btf, func_id);
|
||||
if (!func || !btf_type_is_func(func))
|
||||
return -EINVAL;
|
||||
|
||||
/* sanity check kfunc name */
|
||||
func_name = btf_name_by_offset(btf, func->name_off);
|
||||
if (!func_name || !func_name[0])
|
||||
return -EINVAL;
|
||||
|
||||
func = btf_type_by_id(btf, func->type);
|
||||
if (!func || !btf_type_is_func_proto(func))
|
||||
return -EINVAL;
|
||||
|
||||
if (func_flags & (KF_ITER_NEW | KF_ITER_NEXT | KF_ITER_DESTROY)) {
|
||||
err = btf_check_iter_kfuncs(btf, func_name, func, func_flags);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Kernel Function (kfunc) BTF ID set registration API */
|
||||
|
||||
static int btf_populate_kfunc_set(struct btf *btf, enum btf_kfunc_hook hook,
|
||||
@ -7773,7 +7894,7 @@ static int __register_btf_kfunc_id_set(enum btf_kfunc_hook hook,
|
||||
const struct btf_kfunc_id_set *kset)
|
||||
{
|
||||
struct btf *btf;
|
||||
int ret;
|
||||
int ret, i;
|
||||
|
||||
btf = btf_get_module_btf(kset->owner);
|
||||
if (!btf) {
|
||||
@ -7790,7 +7911,15 @@ static int __register_btf_kfunc_id_set(enum btf_kfunc_hook hook,
|
||||
if (IS_ERR(btf))
|
||||
return PTR_ERR(btf);
|
||||
|
||||
for (i = 0; i < kset->set->cnt; i++) {
|
||||
ret = btf_check_kfunc_protos(btf, kset->set->pairs[i].id,
|
||||
kset->set->pairs[i].flags);
|
||||
if (ret)
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
ret = btf_populate_kfunc_set(btf, hook, kset->set);
|
||||
err_out:
|
||||
btf_put(btf);
|
||||
return ret;
|
||||
}
|
||||
@ -8368,16 +8497,15 @@ out:
|
||||
|
||||
bool btf_nested_type_is_trusted(struct bpf_verifier_log *log,
|
||||
const struct bpf_reg_state *reg,
|
||||
int off, const char *suffix)
|
||||
const char *field_name, u32 btf_id, const char *suffix)
|
||||
{
|
||||
struct btf *btf = reg->btf;
|
||||
const struct btf_type *walk_type, *safe_type;
|
||||
const char *tname;
|
||||
char safe_tname[64];
|
||||
long ret, safe_id;
|
||||
const struct btf_member *member, *m_walk = NULL;
|
||||
const struct btf_member *member;
|
||||
u32 i;
|
||||
const char *walk_name;
|
||||
|
||||
walk_type = btf_type_by_id(btf, reg->btf_id);
|
||||
if (!walk_type)
|
||||
@ -8397,30 +8525,17 @@ bool btf_nested_type_is_trusted(struct bpf_verifier_log *log,
|
||||
if (!safe_type)
|
||||
return false;
|
||||
|
||||
for_each_member(i, walk_type, member) {
|
||||
u32 moff;
|
||||
|
||||
/* We're looking for the PTR_TO_BTF_ID member in the struct
|
||||
* type we're walking which matches the specified offset.
|
||||
* Below, we'll iterate over the fields in the safe variant of
|
||||
* the struct and see if any of them has a matching type /
|
||||
* name.
|
||||
*/
|
||||
moff = __btf_member_bit_offset(walk_type, member) / 8;
|
||||
if (off == moff) {
|
||||
m_walk = member;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (m_walk == NULL)
|
||||
return false;
|
||||
|
||||
walk_name = __btf_name_by_offset(btf, m_walk->name_off);
|
||||
for_each_member(i, safe_type, member) {
|
||||
const char *m_name = __btf_name_by_offset(btf, member->name_off);
|
||||
const struct btf_type *mtype = btf_type_by_id(btf, member->type);
|
||||
u32 id;
|
||||
|
||||
if (!btf_type_is_ptr(mtype))
|
||||
continue;
|
||||
|
||||
btf_type_skip_modifiers(btf, mtype->type, &id);
|
||||
/* If we match on both type and name, the field is considered trusted. */
|
||||
if (m_walk->type == member->type && !strcmp(walk_name, m_name))
|
||||
if (btf_id == id && !strcmp(field_name, m_name))
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -540,7 +540,7 @@ static void __cpu_map_entry_replace(struct bpf_cpu_map *cmap,
|
||||
}
|
||||
}
|
||||
|
||||
static int cpu_map_delete_elem(struct bpf_map *map, void *key)
|
||||
static long cpu_map_delete_elem(struct bpf_map *map, void *key)
|
||||
{
|
||||
struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map);
|
||||
u32 key_cpu = *(u32 *)key;
|
||||
@ -553,8 +553,8 @@ static int cpu_map_delete_elem(struct bpf_map *map, void *key)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int cpu_map_update_elem(struct bpf_map *map, void *key, void *value,
|
||||
u64 map_flags)
|
||||
static long cpu_map_update_elem(struct bpf_map *map, void *key, void *value,
|
||||
u64 map_flags)
|
||||
{
|
||||
struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map);
|
||||
struct bpf_cpumap_val cpumap_value = {};
|
||||
@ -667,7 +667,7 @@ static int cpu_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int cpu_map_redirect(struct bpf_map *map, u64 index, u64 flags)
|
||||
static long cpu_map_redirect(struct bpf_map *map, u64 index, u64 flags)
|
||||
{
|
||||
return __bpf_xdp_redirect_map(map, index, flags, 0,
|
||||
__cpu_map_lookup_elem);
|
||||
|
@ -9,6 +9,7 @@
|
||||
/**
|
||||
* struct bpf_cpumask - refcounted BPF cpumask wrapper structure
|
||||
* @cpumask: The actual cpumask embedded in the struct.
|
||||
* @rcu: The RCU head used to free the cpumask with RCU safety.
|
||||
* @usage: Object reference counter. When the refcount goes to 0, the
|
||||
* memory is released back to the BPF allocator, which provides
|
||||
* RCU safety.
|
||||
@ -24,6 +25,7 @@
|
||||
*/
|
||||
struct bpf_cpumask {
|
||||
cpumask_t cpumask;
|
||||
struct rcu_head rcu;
|
||||
refcount_t usage;
|
||||
};
|
||||
|
||||
@ -80,32 +82,14 @@ __bpf_kfunc struct bpf_cpumask *bpf_cpumask_acquire(struct bpf_cpumask *cpumask)
|
||||
return cpumask;
|
||||
}
|
||||
|
||||
/**
|
||||
* bpf_cpumask_kptr_get() - Attempt to acquire a reference to a BPF cpumask
|
||||
* stored in a map.
|
||||
* @cpumaskp: A pointer to a BPF cpumask map value.
|
||||
*
|
||||
* Attempts to acquire a reference to a BPF cpumask stored in a map value. The
|
||||
* cpumask returned by this function must either be embedded in a map as a
|
||||
* kptr, or freed with bpf_cpumask_release(). This function may return NULL if
|
||||
* no BPF cpumask was found in the specified map value.
|
||||
*/
|
||||
__bpf_kfunc struct bpf_cpumask *bpf_cpumask_kptr_get(struct bpf_cpumask **cpumaskp)
|
||||
static void cpumask_free_cb(struct rcu_head *head)
|
||||
{
|
||||
struct bpf_cpumask *cpumask;
|
||||
|
||||
/* The BPF memory allocator frees memory backing its caches in an RCU
|
||||
* callback. Thus, we can safely use RCU to ensure that the cpumask is
|
||||
* safe to read.
|
||||
*/
|
||||
rcu_read_lock();
|
||||
|
||||
cpumask = READ_ONCE(*cpumaskp);
|
||||
if (cpumask && !refcount_inc_not_zero(&cpumask->usage))
|
||||
cpumask = NULL;
|
||||
|
||||
rcu_read_unlock();
|
||||
return cpumask;
|
||||
cpumask = container_of(head, struct bpf_cpumask, rcu);
|
||||
migrate_disable();
|
||||
bpf_mem_cache_free(&bpf_cpumask_ma, cpumask);
|
||||
migrate_enable();
|
||||
}
|
||||
|
||||
/**
|
||||
@ -118,14 +102,8 @@ __bpf_kfunc struct bpf_cpumask *bpf_cpumask_kptr_get(struct bpf_cpumask **cpumas
|
||||
*/
|
||||
__bpf_kfunc void bpf_cpumask_release(struct bpf_cpumask *cpumask)
|
||||
{
|
||||
if (!cpumask)
|
||||
return;
|
||||
|
||||
if (refcount_dec_and_test(&cpumask->usage)) {
|
||||
migrate_disable();
|
||||
bpf_mem_cache_free(&bpf_cpumask_ma, cpumask);
|
||||
migrate_enable();
|
||||
}
|
||||
if (refcount_dec_and_test(&cpumask->usage))
|
||||
call_rcu(&cpumask->rcu, cpumask_free_cb);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -424,9 +402,8 @@ __diag_pop();
|
||||
|
||||
BTF_SET8_START(cpumask_kfunc_btf_ids)
|
||||
BTF_ID_FLAGS(func, bpf_cpumask_create, KF_ACQUIRE | KF_RET_NULL)
|
||||
BTF_ID_FLAGS(func, bpf_cpumask_release, KF_RELEASE | KF_TRUSTED_ARGS)
|
||||
BTF_ID_FLAGS(func, bpf_cpumask_release, KF_RELEASE)
|
||||
BTF_ID_FLAGS(func, bpf_cpumask_acquire, KF_ACQUIRE | KF_TRUSTED_ARGS)
|
||||
BTF_ID_FLAGS(func, bpf_cpumask_kptr_get, KF_ACQUIRE | KF_KPTR_GET | KF_RET_NULL)
|
||||
BTF_ID_FLAGS(func, bpf_cpumask_first, KF_RCU)
|
||||
BTF_ID_FLAGS(func, bpf_cpumask_first_zero, KF_RCU)
|
||||
BTF_ID_FLAGS(func, bpf_cpumask_set_cpu, KF_RCU)
|
||||
|
@ -809,7 +809,7 @@ static void __dev_map_entry_free(struct rcu_head *rcu)
|
||||
kfree(dev);
|
||||
}
|
||||
|
||||
static int dev_map_delete_elem(struct bpf_map *map, void *key)
|
||||
static long dev_map_delete_elem(struct bpf_map *map, void *key)
|
||||
{
|
||||
struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
|
||||
struct bpf_dtab_netdev *old_dev;
|
||||
@ -826,7 +826,7 @@ static int dev_map_delete_elem(struct bpf_map *map, void *key)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int dev_map_hash_delete_elem(struct bpf_map *map, void *key)
|
||||
static long dev_map_hash_delete_elem(struct bpf_map *map, void *key)
|
||||
{
|
||||
struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
|
||||
struct bpf_dtab_netdev *old_dev;
|
||||
@ -897,8 +897,8 @@ err_out:
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
static int __dev_map_update_elem(struct net *net, struct bpf_map *map,
|
||||
void *key, void *value, u64 map_flags)
|
||||
static long __dev_map_update_elem(struct net *net, struct bpf_map *map,
|
||||
void *key, void *value, u64 map_flags)
|
||||
{
|
||||
struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
|
||||
struct bpf_dtab_netdev *dev, *old_dev;
|
||||
@ -939,15 +939,15 @@ static int __dev_map_update_elem(struct net *net, struct bpf_map *map,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int dev_map_update_elem(struct bpf_map *map, void *key, void *value,
|
||||
u64 map_flags)
|
||||
static long dev_map_update_elem(struct bpf_map *map, void *key, void *value,
|
||||
u64 map_flags)
|
||||
{
|
||||
return __dev_map_update_elem(current->nsproxy->net_ns,
|
||||
map, key, value, map_flags);
|
||||
}
|
||||
|
||||
static int __dev_map_hash_update_elem(struct net *net, struct bpf_map *map,
|
||||
void *key, void *value, u64 map_flags)
|
||||
static long __dev_map_hash_update_elem(struct net *net, struct bpf_map *map,
|
||||
void *key, void *value, u64 map_flags)
|
||||
{
|
||||
struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
|
||||
struct bpf_dtab_netdev *dev, *old_dev;
|
||||
@ -999,21 +999,21 @@ out_err:
|
||||
return err;
|
||||
}
|
||||
|
||||
static int dev_map_hash_update_elem(struct bpf_map *map, void *key, void *value,
|
||||
u64 map_flags)
|
||||
static long dev_map_hash_update_elem(struct bpf_map *map, void *key, void *value,
|
||||
u64 map_flags)
|
||||
{
|
||||
return __dev_map_hash_update_elem(current->nsproxy->net_ns,
|
||||
map, key, value, map_flags);
|
||||
}
|
||||
|
||||
static int dev_map_redirect(struct bpf_map *map, u64 ifindex, u64 flags)
|
||||
static long dev_map_redirect(struct bpf_map *map, u64 ifindex, u64 flags)
|
||||
{
|
||||
return __bpf_xdp_redirect_map(map, ifindex, flags,
|
||||
BPF_F_BROADCAST | BPF_F_EXCLUDE_INGRESS,
|
||||
__dev_map_lookup_elem);
|
||||
}
|
||||
|
||||
static int dev_hash_map_redirect(struct bpf_map *map, u64 ifindex, u64 flags)
|
||||
static long dev_hash_map_redirect(struct bpf_map *map, u64 ifindex, u64 flags)
|
||||
{
|
||||
return __bpf_xdp_redirect_map(map, ifindex, flags,
|
||||
BPF_F_BROADCAST | BPF_F_EXCLUDE_INGRESS,
|
||||
|
@ -607,6 +607,8 @@ free_htab:
|
||||
|
||||
static inline u32 htab_map_hash(const void *key, u32 key_len, u32 hashrnd)
|
||||
{
|
||||
if (likely(key_len % 4 == 0))
|
||||
return jhash2(key, key_len / 4, hashrnd);
|
||||
return jhash(key, key_len, hashrnd);
|
||||
}
|
||||
|
||||
@ -1073,8 +1075,8 @@ static int check_flags(struct bpf_htab *htab, struct htab_elem *l_old,
|
||||
}
|
||||
|
||||
/* Called from syscall or from eBPF program */
|
||||
static int htab_map_update_elem(struct bpf_map *map, void *key, void *value,
|
||||
u64 map_flags)
|
||||
static long htab_map_update_elem(struct bpf_map *map, void *key, void *value,
|
||||
u64 map_flags)
|
||||
{
|
||||
struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
|
||||
struct htab_elem *l_new = NULL, *l_old;
|
||||
@ -1175,8 +1177,8 @@ static void htab_lru_push_free(struct bpf_htab *htab, struct htab_elem *elem)
|
||||
bpf_lru_push_free(&htab->lru, &elem->lru_node);
|
||||
}
|
||||
|
||||
static int htab_lru_map_update_elem(struct bpf_map *map, void *key, void *value,
|
||||
u64 map_flags)
|
||||
static long htab_lru_map_update_elem(struct bpf_map *map, void *key, void *value,
|
||||
u64 map_flags)
|
||||
{
|
||||
struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
|
||||
struct htab_elem *l_new, *l_old = NULL;
|
||||
@ -1242,9 +1244,9 @@ err:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int __htab_percpu_map_update_elem(struct bpf_map *map, void *key,
|
||||
void *value, u64 map_flags,
|
||||
bool onallcpus)
|
||||
static long __htab_percpu_map_update_elem(struct bpf_map *map, void *key,
|
||||
void *value, u64 map_flags,
|
||||
bool onallcpus)
|
||||
{
|
||||
struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
|
||||
struct htab_elem *l_new = NULL, *l_old;
|
||||
@ -1297,9 +1299,9 @@ err:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int __htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key,
|
||||
void *value, u64 map_flags,
|
||||
bool onallcpus)
|
||||
static long __htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key,
|
||||
void *value, u64 map_flags,
|
||||
bool onallcpus)
|
||||
{
|
||||
struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
|
||||
struct htab_elem *l_new = NULL, *l_old;
|
||||
@ -1364,21 +1366,21 @@ err:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int htab_percpu_map_update_elem(struct bpf_map *map, void *key,
|
||||
void *value, u64 map_flags)
|
||||
static long htab_percpu_map_update_elem(struct bpf_map *map, void *key,
|
||||
void *value, u64 map_flags)
|
||||
{
|
||||
return __htab_percpu_map_update_elem(map, key, value, map_flags, false);
|
||||
}
|
||||
|
||||
static int htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key,
|
||||
void *value, u64 map_flags)
|
||||
static long htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key,
|
||||
void *value, u64 map_flags)
|
||||
{
|
||||
return __htab_lru_percpu_map_update_elem(map, key, value, map_flags,
|
||||
false);
|
||||
}
|
||||
|
||||
/* Called from syscall or from eBPF program */
|
||||
static int htab_map_delete_elem(struct bpf_map *map, void *key)
|
||||
static long htab_map_delete_elem(struct bpf_map *map, void *key)
|
||||
{
|
||||
struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
|
||||
struct hlist_nulls_head *head;
|
||||
@ -1414,7 +1416,7 @@ static int htab_map_delete_elem(struct bpf_map *map, void *key)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int htab_lru_map_delete_elem(struct bpf_map *map, void *key)
|
||||
static long htab_lru_map_delete_elem(struct bpf_map *map, void *key)
|
||||
{
|
||||
struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
|
||||
struct hlist_nulls_head *head;
|
||||
@ -2134,8 +2136,8 @@ static const struct bpf_iter_seq_info iter_seq_info = {
|
||||
.seq_priv_size = sizeof(struct bpf_iter_seq_hash_map_info),
|
||||
};
|
||||
|
||||
static int bpf_for_each_hash_elem(struct bpf_map *map, bpf_callback_t callback_fn,
|
||||
void *callback_ctx, u64 flags)
|
||||
static long bpf_for_each_hash_elem(struct bpf_map *map, bpf_callback_t callback_fn,
|
||||
void *callback_ctx, u64 flags)
|
||||
{
|
||||
struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
|
||||
struct hlist_nulls_head *head;
|
||||
|
@ -18,6 +18,7 @@
|
||||
#include <linux/pid_namespace.h>
|
||||
#include <linux/poison.h>
|
||||
#include <linux/proc_ns.h>
|
||||
#include <linux/sched/task.h>
|
||||
#include <linux/security.h>
|
||||
#include <linux/btf_ids.h>
|
||||
#include <linux/bpf_mem_alloc.h>
|
||||
@ -257,7 +258,7 @@ BPF_CALL_2(bpf_get_current_comm, char *, buf, u32, size)
|
||||
goto err_clear;
|
||||
|
||||
/* Verifier guarantees that size > 0 */
|
||||
strscpy(buf, task->comm, size);
|
||||
strscpy_pad(buf, task->comm, size);
|
||||
return 0;
|
||||
err_clear:
|
||||
memset(buf, 0, size);
|
||||
@ -571,7 +572,7 @@ static const struct bpf_func_proto bpf_strncmp_proto = {
|
||||
.func = bpf_strncmp,
|
||||
.gpl_only = false,
|
||||
.ret_type = RET_INTEGER,
|
||||
.arg1_type = ARG_PTR_TO_MEM,
|
||||
.arg1_type = ARG_PTR_TO_MEM | MEM_RDONLY,
|
||||
.arg2_type = ARG_CONST_SIZE,
|
||||
.arg3_type = ARG_PTR_TO_CONST_STR,
|
||||
};
|
||||
@ -1896,14 +1897,19 @@ __bpf_kfunc void *bpf_obj_new_impl(u64 local_type_id__k, void *meta__ign)
|
||||
return p;
|
||||
}
|
||||
|
||||
void __bpf_obj_drop_impl(void *p, const struct btf_record *rec)
|
||||
{
|
||||
if (rec)
|
||||
bpf_obj_free_fields(rec, p);
|
||||
bpf_mem_free(&bpf_global_ma, p);
|
||||
}
|
||||
|
||||
__bpf_kfunc void bpf_obj_drop_impl(void *p__alloc, void *meta__ign)
|
||||
{
|
||||
struct btf_struct_meta *meta = meta__ign;
|
||||
void *p = p__alloc;
|
||||
|
||||
if (meta)
|
||||
bpf_obj_free_fields(meta->record, p);
|
||||
bpf_mem_free(&bpf_global_ma, p);
|
||||
__bpf_obj_drop_impl(p, meta ? meta->record : NULL);
|
||||
}
|
||||
|
||||
static void __bpf_list_add(struct bpf_list_node *node, struct bpf_list_head *head, bool tail)
|
||||
@ -2008,73 +2014,8 @@ __bpf_kfunc struct bpf_rb_node *bpf_rbtree_first(struct bpf_rb_root *root)
|
||||
*/
|
||||
__bpf_kfunc struct task_struct *bpf_task_acquire(struct task_struct *p)
|
||||
{
|
||||
return get_task_struct(p);
|
||||
}
|
||||
|
||||
/**
|
||||
* bpf_task_acquire_not_zero - Acquire a reference to a rcu task object. A task
|
||||
* acquired by this kfunc which is not stored in a map as a kptr, must be
|
||||
* released by calling bpf_task_release().
|
||||
* @p: The task on which a reference is being acquired.
|
||||
*/
|
||||
__bpf_kfunc struct task_struct *bpf_task_acquire_not_zero(struct task_struct *p)
|
||||
{
|
||||
/* For the time being this function returns NULL, as it's not currently
|
||||
* possible to safely acquire a reference to a task with RCU protection
|
||||
* using get_task_struct() and put_task_struct(). This is due to the
|
||||
* slightly odd mechanics of p->rcu_users, and how task RCU protection
|
||||
* works.
|
||||
*
|
||||
* A struct task_struct is refcounted by two different refcount_t
|
||||
* fields:
|
||||
*
|
||||
* 1. p->usage: The "true" refcount field which tracks a task's
|
||||
* lifetime. The task is freed as soon as this
|
||||
* refcount drops to 0.
|
||||
*
|
||||
* 2. p->rcu_users: An "RCU users" refcount field which is statically
|
||||
* initialized to 2, and is co-located in a union with
|
||||
* a struct rcu_head field (p->rcu). p->rcu_users
|
||||
* essentially encapsulates a single p->usage
|
||||
* refcount, and when p->rcu_users goes to 0, an RCU
|
||||
* callback is scheduled on the struct rcu_head which
|
||||
* decrements the p->usage refcount.
|
||||
*
|
||||
* There are two important implications to this task refcounting logic
|
||||
* described above. The first is that
|
||||
* refcount_inc_not_zero(&p->rcu_users) cannot be used anywhere, as
|
||||
* after the refcount goes to 0, the RCU callback being scheduled will
|
||||
* cause the memory backing the refcount to again be nonzero due to the
|
||||
* fields sharing a union. The other is that we can't rely on RCU to
|
||||
* guarantee that a task is valid in a BPF program. This is because a
|
||||
* task could have already transitioned to being in the TASK_DEAD
|
||||
* state, had its rcu_users refcount go to 0, and its rcu callback
|
||||
* invoked in which it drops its single p->usage reference. At this
|
||||
* point the task will be freed as soon as the last p->usage reference
|
||||
* goes to 0, without waiting for another RCU gp to elapse. The only
|
||||
* way that a BPF program can guarantee that a task is valid is in this
|
||||
* scenario is to hold a p->usage refcount itself.
|
||||
*
|
||||
* Until we're able to resolve this issue, either by pulling
|
||||
* p->rcu_users and p->rcu out of the union, or by getting rid of
|
||||
* p->usage and just using p->rcu_users for refcounting, we'll just
|
||||
* return NULL here.
|
||||
*/
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* bpf_task_kptr_get - Acquire a reference on a struct task_struct kptr. A task
|
||||
* kptr acquired by this kfunc which is not subsequently stored in a map, must
|
||||
* be released by calling bpf_task_release().
|
||||
* @pp: A pointer to a task kptr on which a reference is being acquired.
|
||||
*/
|
||||
__bpf_kfunc struct task_struct *bpf_task_kptr_get(struct task_struct **pp)
|
||||
{
|
||||
/* We must return NULL here until we have clarity on how to properly
|
||||
* leverage RCU for ensuring a task's lifetime. See the comment above
|
||||
* in bpf_task_acquire_not_zero() for more details.
|
||||
*/
|
||||
if (refcount_inc_not_zero(&p->rcu_users))
|
||||
return p;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@ -2084,10 +2025,7 @@ __bpf_kfunc struct task_struct *bpf_task_kptr_get(struct task_struct **pp)
|
||||
*/
|
||||
__bpf_kfunc void bpf_task_release(struct task_struct *p)
|
||||
{
|
||||
if (!p)
|
||||
return;
|
||||
|
||||
put_task_struct(p);
|
||||
put_task_struct_rcu_user(p);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_CGROUPS
|
||||
@ -2099,39 +2037,7 @@ __bpf_kfunc void bpf_task_release(struct task_struct *p)
|
||||
*/
|
||||
__bpf_kfunc struct cgroup *bpf_cgroup_acquire(struct cgroup *cgrp)
|
||||
{
|
||||
cgroup_get(cgrp);
|
||||
return cgrp;
|
||||
}
|
||||
|
||||
/**
|
||||
* bpf_cgroup_kptr_get - Acquire a reference on a struct cgroup kptr. A cgroup
|
||||
* kptr acquired by this kfunc which is not subsequently stored in a map, must
|
||||
* be released by calling bpf_cgroup_release().
|
||||
* @cgrpp: A pointer to a cgroup kptr on which a reference is being acquired.
|
||||
*/
|
||||
__bpf_kfunc struct cgroup *bpf_cgroup_kptr_get(struct cgroup **cgrpp)
|
||||
{
|
||||
struct cgroup *cgrp;
|
||||
|
||||
rcu_read_lock();
|
||||
/* Another context could remove the cgroup from the map and release it
|
||||
* at any time, including after we've done the lookup above. This is
|
||||
* safe because we're in an RCU read region, so the cgroup is
|
||||
* guaranteed to remain valid until at least the rcu_read_unlock()
|
||||
* below.
|
||||
*/
|
||||
cgrp = READ_ONCE(*cgrpp);
|
||||
|
||||
if (cgrp && !cgroup_tryget(cgrp))
|
||||
/* If the cgroup had been removed from the map and freed as
|
||||
* described above, cgroup_tryget() will return false. The
|
||||
* cgroup will be freed at some point after the current RCU gp
|
||||
* has ended, so just return NULL to the user.
|
||||
*/
|
||||
cgrp = NULL;
|
||||
rcu_read_unlock();
|
||||
|
||||
return cgrp;
|
||||
return cgroup_tryget(cgrp) ? cgrp : NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -2143,9 +2049,6 @@ __bpf_kfunc struct cgroup *bpf_cgroup_kptr_get(struct cgroup **cgrpp)
|
||||
*/
|
||||
__bpf_kfunc void bpf_cgroup_release(struct cgroup *cgrp)
|
||||
{
|
||||
if (!cgrp)
|
||||
return;
|
||||
|
||||
cgroup_put(cgrp);
|
||||
}
|
||||
|
||||
@ -2200,7 +2103,7 @@ __bpf_kfunc struct task_struct *bpf_task_from_pid(s32 pid)
|
||||
rcu_read_lock();
|
||||
p = find_task_by_pid_ns(pid, &init_pid_ns);
|
||||
if (p)
|
||||
bpf_task_acquire(p);
|
||||
p = bpf_task_acquire(p);
|
||||
rcu_read_unlock();
|
||||
|
||||
return p;
|
||||
@ -2372,17 +2275,14 @@ BTF_ID_FLAGS(func, bpf_list_push_front)
|
||||
BTF_ID_FLAGS(func, bpf_list_push_back)
|
||||
BTF_ID_FLAGS(func, bpf_list_pop_front, KF_ACQUIRE | KF_RET_NULL)
|
||||
BTF_ID_FLAGS(func, bpf_list_pop_back, KF_ACQUIRE | KF_RET_NULL)
|
||||
BTF_ID_FLAGS(func, bpf_task_acquire, KF_ACQUIRE | KF_TRUSTED_ARGS)
|
||||
BTF_ID_FLAGS(func, bpf_task_acquire_not_zero, KF_ACQUIRE | KF_RCU | KF_RET_NULL)
|
||||
BTF_ID_FLAGS(func, bpf_task_kptr_get, KF_ACQUIRE | KF_KPTR_GET | KF_RET_NULL)
|
||||
BTF_ID_FLAGS(func, bpf_task_acquire, KF_ACQUIRE | KF_RCU | KF_RET_NULL)
|
||||
BTF_ID_FLAGS(func, bpf_task_release, KF_RELEASE)
|
||||
BTF_ID_FLAGS(func, bpf_rbtree_remove, KF_ACQUIRE)
|
||||
BTF_ID_FLAGS(func, bpf_rbtree_add)
|
||||
BTF_ID_FLAGS(func, bpf_rbtree_first, KF_RET_NULL)
|
||||
|
||||
#ifdef CONFIG_CGROUPS
|
||||
BTF_ID_FLAGS(func, bpf_cgroup_acquire, KF_ACQUIRE | KF_TRUSTED_ARGS)
|
||||
BTF_ID_FLAGS(func, bpf_cgroup_kptr_get, KF_ACQUIRE | KF_KPTR_GET | KF_RET_NULL)
|
||||
BTF_ID_FLAGS(func, bpf_cgroup_acquire, KF_ACQUIRE | KF_RCU | KF_RET_NULL)
|
||||
BTF_ID_FLAGS(func, bpf_cgroup_release, KF_RELEASE)
|
||||
BTF_ID_FLAGS(func, bpf_cgroup_ancestor, KF_ACQUIRE | KF_RCU | KF_RET_NULL)
|
||||
BTF_ID_FLAGS(func, bpf_cgroup_from_id, KF_ACQUIRE | KF_RET_NULL)
|
||||
@ -2411,6 +2311,9 @@ BTF_ID_FLAGS(func, bpf_rcu_read_lock)
|
||||
BTF_ID_FLAGS(func, bpf_rcu_read_unlock)
|
||||
BTF_ID_FLAGS(func, bpf_dynptr_slice, KF_RET_NULL)
|
||||
BTF_ID_FLAGS(func, bpf_dynptr_slice_rdwr, KF_RET_NULL)
|
||||
BTF_ID_FLAGS(func, bpf_iter_num_new, KF_ITER_NEW)
|
||||
BTF_ID_FLAGS(func, bpf_iter_num_next, KF_ITER_NEXT | KF_RET_NULL)
|
||||
BTF_ID_FLAGS(func, bpf_iter_num_destroy, KF_ITER_DESTROY)
|
||||
BTF_SET8_END(common_btf_ids)
|
||||
|
||||
static const struct btf_kfunc_id_set common_kfunc_set = {
|
||||
|
@ -141,8 +141,8 @@ static void *cgroup_storage_lookup_elem(struct bpf_map *_map, void *key)
|
||||
return &READ_ONCE(storage->buf)->data[0];
|
||||
}
|
||||
|
||||
static int cgroup_storage_update_elem(struct bpf_map *map, void *key,
|
||||
void *value, u64 flags)
|
||||
static long cgroup_storage_update_elem(struct bpf_map *map, void *key,
|
||||
void *value, u64 flags)
|
||||
{
|
||||
struct bpf_cgroup_storage *storage;
|
||||
struct bpf_storage_buffer *new;
|
||||
@ -348,7 +348,7 @@ static void cgroup_storage_map_free(struct bpf_map *_map)
|
||||
bpf_map_area_free(map);
|
||||
}
|
||||
|
||||
static int cgroup_storage_delete_elem(struct bpf_map *map, void *key)
|
||||
static long cgroup_storage_delete_elem(struct bpf_map *map, void *key)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
330
kernel/bpf/log.c
Normal file
330
kernel/bpf/log.c
Normal file
@ -0,0 +1,330 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
|
||||
* Copyright (c) 2016 Facebook
|
||||
* Copyright (c) 2018 Covalent IO, Inc. http://covalent.io
|
||||
*/
|
||||
#include <uapi/linux/btf.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/bpf.h>
|
||||
#include <linux/bpf_verifier.h>
|
||||
#include <linux/math64.h>
|
||||
|
||||
static bool bpf_verifier_log_attr_valid(const struct bpf_verifier_log *log)
|
||||
{
|
||||
/* ubuf and len_total should both be specified (or not) together */
|
||||
if (!!log->ubuf != !!log->len_total)
|
||||
return false;
|
||||
/* log buf without log_level is meaningless */
|
||||
if (log->ubuf && log->level == 0)
|
||||
return false;
|
||||
if (log->level & ~BPF_LOG_MASK)
|
||||
return false;
|
||||
if (log->len_total > UINT_MAX >> 2)
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
int bpf_vlog_init(struct bpf_verifier_log *log, u32 log_level,
|
||||
char __user *log_buf, u32 log_size)
|
||||
{
|
||||
log->level = log_level;
|
||||
log->ubuf = log_buf;
|
||||
log->len_total = log_size;
|
||||
|
||||
/* log attributes have to be sane */
|
||||
if (!bpf_verifier_log_attr_valid(log))
|
||||
return -EINVAL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void bpf_vlog_update_len_max(struct bpf_verifier_log *log, u32 add_len)
|
||||
{
|
||||
/* add_len includes terminal \0, so no need for +1. */
|
||||
u64 len = log->end_pos + add_len;
|
||||
|
||||
/* log->len_max could be larger than our current len due to
|
||||
* bpf_vlog_reset() calls, so we maintain the max of any length at any
|
||||
* previous point
|
||||
*/
|
||||
if (len > UINT_MAX)
|
||||
log->len_max = UINT_MAX;
|
||||
else if (len > log->len_max)
|
||||
log->len_max = len;
|
||||
}
|
||||
|
||||
void bpf_verifier_vlog(struct bpf_verifier_log *log, const char *fmt,
|
||||
va_list args)
|
||||
{
|
||||
u64 cur_pos;
|
||||
u32 new_n, n;
|
||||
|
||||
n = vscnprintf(log->kbuf, BPF_VERIFIER_TMP_LOG_SIZE, fmt, args);
|
||||
|
||||
WARN_ONCE(n >= BPF_VERIFIER_TMP_LOG_SIZE - 1,
|
||||
"verifier log line truncated - local buffer too short\n");
|
||||
|
||||
if (log->level == BPF_LOG_KERNEL) {
|
||||
bool newline = n > 0 && log->kbuf[n - 1] == '\n';
|
||||
|
||||
pr_err("BPF: %s%s", log->kbuf, newline ? "" : "\n");
|
||||
return;
|
||||
}
|
||||
|
||||
n += 1; /* include terminating zero */
|
||||
bpf_vlog_update_len_max(log, n);
|
||||
|
||||
if (log->level & BPF_LOG_FIXED) {
|
||||
/* check if we have at least something to put into user buf */
|
||||
new_n = 0;
|
||||
if (log->end_pos < log->len_total) {
|
||||
new_n = min_t(u32, log->len_total - log->end_pos, n);
|
||||
log->kbuf[new_n - 1] = '\0';
|
||||
}
|
||||
|
||||
cur_pos = log->end_pos;
|
||||
log->end_pos += n - 1; /* don't count terminating '\0' */
|
||||
|
||||
if (log->ubuf && new_n &&
|
||||
copy_to_user(log->ubuf + cur_pos, log->kbuf, new_n))
|
||||
goto fail;
|
||||
} else {
|
||||
u64 new_end, new_start;
|
||||
u32 buf_start, buf_end, new_n;
|
||||
|
||||
new_end = log->end_pos + n;
|
||||
if (new_end - log->start_pos >= log->len_total)
|
||||
new_start = new_end - log->len_total;
|
||||
else
|
||||
new_start = log->start_pos;
|
||||
|
||||
log->start_pos = new_start;
|
||||
log->end_pos = new_end - 1; /* don't count terminating '\0' */
|
||||
|
||||
if (!log->ubuf)
|
||||
return;
|
||||
|
||||
new_n = min(n, log->len_total);
|
||||
cur_pos = new_end - new_n;
|
||||
div_u64_rem(cur_pos, log->len_total, &buf_start);
|
||||
div_u64_rem(new_end, log->len_total, &buf_end);
|
||||
/* new_end and buf_end are exclusive indices, so if buf_end is
|
||||
* exactly zero, then it actually points right to the end of
|
||||
* ubuf and there is no wrap around
|
||||
*/
|
||||
if (buf_end == 0)
|
||||
buf_end = log->len_total;
|
||||
|
||||
/* if buf_start > buf_end, we wrapped around;
|
||||
* if buf_start == buf_end, then we fill ubuf completely; we
|
||||
* can't have buf_start == buf_end to mean that there is
|
||||
* nothing to write, because we always write at least
|
||||
* something, even if terminal '\0'
|
||||
*/
|
||||
if (buf_start < buf_end) {
|
||||
/* message fits within contiguous chunk of ubuf */
|
||||
if (copy_to_user(log->ubuf + buf_start,
|
||||
log->kbuf + n - new_n,
|
||||
buf_end - buf_start))
|
||||
goto fail;
|
||||
} else {
|
||||
/* message wraps around the end of ubuf, copy in two chunks */
|
||||
if (copy_to_user(log->ubuf + buf_start,
|
||||
log->kbuf + n - new_n,
|
||||
log->len_total - buf_start))
|
||||
goto fail;
|
||||
if (copy_to_user(log->ubuf,
|
||||
log->kbuf + n - buf_end,
|
||||
buf_end))
|
||||
goto fail;
|
||||
}
|
||||
}
|
||||
|
||||
return;
|
||||
fail:
|
||||
log->ubuf = NULL;
|
||||
}
|
||||
|
||||
void bpf_vlog_reset(struct bpf_verifier_log *log, u64 new_pos)
|
||||
{
|
||||
char zero = 0;
|
||||
u32 pos;
|
||||
|
||||
if (WARN_ON_ONCE(new_pos > log->end_pos))
|
||||
return;
|
||||
|
||||
if (!bpf_verifier_log_needed(log) || log->level == BPF_LOG_KERNEL)
|
||||
return;
|
||||
|
||||
/* if position to which we reset is beyond current log window,
|
||||
* then we didn't preserve any useful content and should adjust
|
||||
* start_pos to end up with an empty log (start_pos == end_pos)
|
||||
*/
|
||||
log->end_pos = new_pos;
|
||||
if (log->end_pos < log->start_pos)
|
||||
log->start_pos = log->end_pos;
|
||||
|
||||
if (!log->ubuf)
|
||||
return;
|
||||
|
||||
if (log->level & BPF_LOG_FIXED)
|
||||
pos = log->end_pos + 1;
|
||||
else
|
||||
div_u64_rem(new_pos, log->len_total, &pos);
|
||||
|
||||
if (pos < log->len_total && put_user(zero, log->ubuf + pos))
|
||||
log->ubuf = NULL;
|
||||
}
|
||||
|
||||
static void bpf_vlog_reverse_kbuf(char *buf, int len)
|
||||
{
|
||||
int i, j;
|
||||
|
||||
for (i = 0, j = len - 1; i < j; i++, j--)
|
||||
swap(buf[i], buf[j]);
|
||||
}
|
||||
|
||||
static int bpf_vlog_reverse_ubuf(struct bpf_verifier_log *log, int start, int end)
|
||||
{
|
||||
/* we split log->kbuf into two equal parts for both ends of array */
|
||||
int n = sizeof(log->kbuf) / 2, nn;
|
||||
char *lbuf = log->kbuf, *rbuf = log->kbuf + n;
|
||||
|
||||
/* Read ubuf's section [start, end) two chunks at a time, from left
|
||||
* and right side; within each chunk, swap all the bytes; after that
|
||||
* reverse the order of lbuf and rbuf and write result back to ubuf.
|
||||
* This way we'll end up with swapped contents of specified
|
||||
* [start, end) ubuf segment.
|
||||
*/
|
||||
while (end - start > 1) {
|
||||
nn = min(n, (end - start ) / 2);
|
||||
|
||||
if (copy_from_user(lbuf, log->ubuf + start, nn))
|
||||
return -EFAULT;
|
||||
if (copy_from_user(rbuf, log->ubuf + end - nn, nn))
|
||||
return -EFAULT;
|
||||
|
||||
bpf_vlog_reverse_kbuf(lbuf, nn);
|
||||
bpf_vlog_reverse_kbuf(rbuf, nn);
|
||||
|
||||
/* we write lbuf to the right end of ubuf, while rbuf to the
|
||||
* left one to end up with properly reversed overall ubuf
|
||||
*/
|
||||
if (copy_to_user(log->ubuf + start, rbuf, nn))
|
||||
return -EFAULT;
|
||||
if (copy_to_user(log->ubuf + end - nn, lbuf, nn))
|
||||
return -EFAULT;
|
||||
|
||||
start += nn;
|
||||
end -= nn;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int bpf_vlog_finalize(struct bpf_verifier_log *log, u32 *log_size_actual)
|
||||
{
|
||||
u32 sublen;
|
||||
int err;
|
||||
|
||||
*log_size_actual = 0;
|
||||
if (!log || log->level == 0 || log->level == BPF_LOG_KERNEL)
|
||||
return 0;
|
||||
|
||||
if (!log->ubuf)
|
||||
goto skip_log_rotate;
|
||||
/* If we never truncated log, there is nothing to move around. */
|
||||
if (log->start_pos == 0)
|
||||
goto skip_log_rotate;
|
||||
|
||||
/* Otherwise we need to rotate log contents to make it start from the
|
||||
* buffer beginning and be a continuous zero-terminated string. Note
|
||||
* that if log->start_pos != 0 then we definitely filled up entire log
|
||||
* buffer with no gaps, and we just need to shift buffer contents to
|
||||
* the left by (log->start_pos % log->len_total) bytes.
|
||||
*
|
||||
* Unfortunately, user buffer could be huge and we don't want to
|
||||
* allocate temporary kernel memory of the same size just to shift
|
||||
* contents in a straightforward fashion. Instead, we'll be clever and
|
||||
* do in-place array rotation. This is a leetcode-style problem, which
|
||||
* could be solved by three rotations.
|
||||
*
|
||||
* Let's say we have log buffer that has to be shifted left by 7 bytes
|
||||
* (spaces and vertical bar is just for demonstrative purposes):
|
||||
* E F G H I J K | A B C D
|
||||
*
|
||||
* First, we reverse entire array:
|
||||
* D C B A | K J I H G F E
|
||||
*
|
||||
* Then we rotate first 4 bytes (DCBA) and separately last 7 bytes
|
||||
* (KJIHGFE), resulting in a properly rotated array:
|
||||
* A B C D | E F G H I J K
|
||||
*
|
||||
* We'll utilize log->kbuf to read user memory chunk by chunk, swap
|
||||
* bytes, and write them back. Doing it byte-by-byte would be
|
||||
* unnecessarily inefficient. Altogether we are going to read and
|
||||
* write each byte twice, for total 4 memory copies between kernel and
|
||||
* user space.
|
||||
*/
|
||||
|
||||
/* length of the chopped off part that will be the beginning;
|
||||
* len(ABCD) in the example above
|
||||
*/
|
||||
div_u64_rem(log->start_pos, log->len_total, &sublen);
|
||||
sublen = log->len_total - sublen;
|
||||
|
||||
err = bpf_vlog_reverse_ubuf(log, 0, log->len_total);
|
||||
err = err ?: bpf_vlog_reverse_ubuf(log, 0, sublen);
|
||||
err = err ?: bpf_vlog_reverse_ubuf(log, sublen, log->len_total);
|
||||
if (err)
|
||||
log->ubuf = NULL;
|
||||
|
||||
skip_log_rotate:
|
||||
*log_size_actual = log->len_max;
|
||||
|
||||
/* properly initialized log has either both ubuf!=NULL and len_total>0
|
||||
* or ubuf==NULL and len_total==0, so if this condition doesn't hold,
|
||||
* we got a fault somewhere along the way, so report it back
|
||||
*/
|
||||
if (!!log->ubuf != !!log->len_total)
|
||||
return -EFAULT;
|
||||
|
||||
/* did truncation actually happen? */
|
||||
if (log->ubuf && log->len_max > log->len_total)
|
||||
return -ENOSPC;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* log_level controls verbosity level of eBPF verifier.
|
||||
* bpf_verifier_log_write() is used to dump the verification trace to the log,
|
||||
* so the user can figure out what's wrong with the program
|
||||
*/
|
||||
__printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env *env,
|
||||
const char *fmt, ...)
|
||||
{
|
||||
va_list args;
|
||||
|
||||
if (!bpf_verifier_log_needed(&env->log))
|
||||
return;
|
||||
|
||||
va_start(args, fmt);
|
||||
bpf_verifier_vlog(&env->log, fmt, args);
|
||||
va_end(args);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(bpf_verifier_log_write);
|
||||
|
||||
__printf(2, 3) void bpf_log(struct bpf_verifier_log *log,
|
||||
const char *fmt, ...)
|
||||
{
|
||||
va_list args;
|
||||
|
||||
if (!bpf_verifier_log_needed(log))
|
||||
return;
|
||||
|
||||
va_start(args, fmt);
|
||||
bpf_verifier_vlog(log, fmt, args);
|
||||
va_end(args);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(bpf_log);
|
@ -300,8 +300,8 @@ static struct lpm_trie_node *lpm_trie_node_alloc(const struct lpm_trie *trie,
|
||||
}
|
||||
|
||||
/* Called from syscall or from eBPF program */
|
||||
static int trie_update_elem(struct bpf_map *map,
|
||||
void *_key, void *value, u64 flags)
|
||||
static long trie_update_elem(struct bpf_map *map,
|
||||
void *_key, void *value, u64 flags)
|
||||
{
|
||||
struct lpm_trie *trie = container_of(map, struct lpm_trie, map);
|
||||
struct lpm_trie_node *node, *im_node = NULL, *new_node = NULL;
|
||||
@ -431,7 +431,7 @@ out:
|
||||
}
|
||||
|
||||
/* Called from syscall or from eBPF program */
|
||||
static int trie_delete_elem(struct bpf_map *map, void *_key)
|
||||
static long trie_delete_elem(struct bpf_map *map, void *_key)
|
||||
{
|
||||
struct lpm_trie *trie = container_of(map, struct lpm_trie, map);
|
||||
struct bpf_lpm_trie_key *key = _key;
|
||||
|
@ -121,15 +121,8 @@ static struct llist_node notrace *__llist_del_first(struct llist_head *head)
|
||||
return entry;
|
||||
}
|
||||
|
||||
static void *__alloc(struct bpf_mem_cache *c, int node)
|
||||
static void *__alloc(struct bpf_mem_cache *c, int node, gfp_t flags)
|
||||
{
|
||||
/* Allocate, but don't deplete atomic reserves that typical
|
||||
* GFP_ATOMIC would do. irq_work runs on this cpu and kmalloc
|
||||
* will allocate from the current numa node which is what we
|
||||
* want here.
|
||||
*/
|
||||
gfp_t flags = GFP_NOWAIT | __GFP_NOWARN | __GFP_ACCOUNT;
|
||||
|
||||
if (c->percpu_size) {
|
||||
void **obj = kmalloc_node(c->percpu_size, flags, node);
|
||||
void *pptr = __alloc_percpu_gfp(c->unit_size, 8, flags);
|
||||
@ -185,7 +178,12 @@ static void alloc_bulk(struct bpf_mem_cache *c, int cnt, int node)
|
||||
*/
|
||||
obj = __llist_del_first(&c->free_by_rcu);
|
||||
if (!obj) {
|
||||
obj = __alloc(c, node);
|
||||
/* Allocate, but don't deplete atomic reserves that typical
|
||||
* GFP_ATOMIC would do. irq_work runs on this cpu and kmalloc
|
||||
* will allocate from the current numa node which is what we
|
||||
* want here.
|
||||
*/
|
||||
obj = __alloc(c, node, GFP_NOWAIT | __GFP_NOWARN | __GFP_ACCOUNT);
|
||||
if (!obj)
|
||||
break;
|
||||
}
|
||||
@ -676,3 +674,46 @@ void notrace bpf_mem_cache_free(struct bpf_mem_alloc *ma, void *ptr)
|
||||
|
||||
unit_free(this_cpu_ptr(ma->cache), ptr);
|
||||
}
|
||||
|
||||
/* Directly does a kfree() without putting 'ptr' back to the free_llist
|
||||
* for reuse and without waiting for a rcu_tasks_trace gp.
|
||||
* The caller must first go through the rcu_tasks_trace gp for 'ptr'
|
||||
* before calling bpf_mem_cache_raw_free().
|
||||
* It could be used when the rcu_tasks_trace callback does not have
|
||||
* a hold on the original bpf_mem_alloc object that allocated the
|
||||
* 'ptr'. This should only be used in the uncommon code path.
|
||||
* Otherwise, the bpf_mem_alloc's free_llist cannot be refilled
|
||||
* and may affect performance.
|
||||
*/
|
||||
void bpf_mem_cache_raw_free(void *ptr)
|
||||
{
|
||||
if (!ptr)
|
||||
return;
|
||||
|
||||
kfree(ptr - LLIST_NODE_SZ);
|
||||
}
|
||||
|
||||
/* When flags == GFP_KERNEL, it signals that the caller will not cause
|
||||
* deadlock when using kmalloc. bpf_mem_cache_alloc_flags() will use
|
||||
* kmalloc if the free_llist is empty.
|
||||
*/
|
||||
void notrace *bpf_mem_cache_alloc_flags(struct bpf_mem_alloc *ma, gfp_t flags)
|
||||
{
|
||||
struct bpf_mem_cache *c;
|
||||
void *ret;
|
||||
|
||||
c = this_cpu_ptr(ma->cache);
|
||||
|
||||
ret = unit_alloc(c);
|
||||
if (!ret && flags == GFP_KERNEL) {
|
||||
struct mem_cgroup *memcg, *old_memcg;
|
||||
|
||||
memcg = get_memcg(c);
|
||||
old_memcg = set_active_memcg(memcg);
|
||||
ret = __alloc(c, NUMA_NO_NODE, GFP_KERNEL | __GFP_NOWARN | __GFP_ACCOUNT);
|
||||
set_active_memcg(old_memcg);
|
||||
mem_cgroup_put(memcg);
|
||||
}
|
||||
|
||||
return !ret ? NULL : ret + LLIST_NODE_SZ;
|
||||
}
|
||||
|
@ -95,7 +95,7 @@ static void queue_stack_map_free(struct bpf_map *map)
|
||||
bpf_map_area_free(qs);
|
||||
}
|
||||
|
||||
static int __queue_map_get(struct bpf_map *map, void *value, bool delete)
|
||||
static long __queue_map_get(struct bpf_map *map, void *value, bool delete)
|
||||
{
|
||||
struct bpf_queue_stack *qs = bpf_queue_stack(map);
|
||||
unsigned long flags;
|
||||
@ -124,7 +124,7 @@ out:
|
||||
}
|
||||
|
||||
|
||||
static int __stack_map_get(struct bpf_map *map, void *value, bool delete)
|
||||
static long __stack_map_get(struct bpf_map *map, void *value, bool delete)
|
||||
{
|
||||
struct bpf_queue_stack *qs = bpf_queue_stack(map);
|
||||
unsigned long flags;
|
||||
@ -156,32 +156,32 @@ out:
|
||||
}
|
||||
|
||||
/* Called from syscall or from eBPF program */
|
||||
static int queue_map_peek_elem(struct bpf_map *map, void *value)
|
||||
static long queue_map_peek_elem(struct bpf_map *map, void *value)
|
||||
{
|
||||
return __queue_map_get(map, value, false);
|
||||
}
|
||||
|
||||
/* Called from syscall or from eBPF program */
|
||||
static int stack_map_peek_elem(struct bpf_map *map, void *value)
|
||||
static long stack_map_peek_elem(struct bpf_map *map, void *value)
|
||||
{
|
||||
return __stack_map_get(map, value, false);
|
||||
}
|
||||
|
||||
/* Called from syscall or from eBPF program */
|
||||
static int queue_map_pop_elem(struct bpf_map *map, void *value)
|
||||
static long queue_map_pop_elem(struct bpf_map *map, void *value)
|
||||
{
|
||||
return __queue_map_get(map, value, true);
|
||||
}
|
||||
|
||||
/* Called from syscall or from eBPF program */
|
||||
static int stack_map_pop_elem(struct bpf_map *map, void *value)
|
||||
static long stack_map_pop_elem(struct bpf_map *map, void *value)
|
||||
{
|
||||
return __stack_map_get(map, value, true);
|
||||
}
|
||||
|
||||
/* Called from syscall or from eBPF program */
|
||||
static int queue_stack_map_push_elem(struct bpf_map *map, void *value,
|
||||
u64 flags)
|
||||
static long queue_stack_map_push_elem(struct bpf_map *map, void *value,
|
||||
u64 flags)
|
||||
{
|
||||
struct bpf_queue_stack *qs = bpf_queue_stack(map);
|
||||
unsigned long irq_flags;
|
||||
@ -227,14 +227,14 @@ static void *queue_stack_map_lookup_elem(struct bpf_map *map, void *key)
|
||||
}
|
||||
|
||||
/* Called from syscall or from eBPF program */
|
||||
static int queue_stack_map_update_elem(struct bpf_map *map, void *key,
|
||||
void *value, u64 flags)
|
||||
static long queue_stack_map_update_elem(struct bpf_map *map, void *key,
|
||||
void *value, u64 flags)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Called from syscall or from eBPF program */
|
||||
static int queue_stack_map_delete_elem(struct bpf_map *map, void *key)
|
||||
static long queue_stack_map_delete_elem(struct bpf_map *map, void *key)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -59,7 +59,7 @@ static void *reuseport_array_lookup_elem(struct bpf_map *map, void *key)
|
||||
}
|
||||
|
||||
/* Called from syscall only */
|
||||
static int reuseport_array_delete_elem(struct bpf_map *map, void *key)
|
||||
static long reuseport_array_delete_elem(struct bpf_map *map, void *key)
|
||||
{
|
||||
struct reuseport_array *array = reuseport_array(map);
|
||||
u32 index = *(u32 *)key;
|
||||
|
@ -242,13 +242,13 @@ static void *ringbuf_map_lookup_elem(struct bpf_map *map, void *key)
|
||||
return ERR_PTR(-ENOTSUPP);
|
||||
}
|
||||
|
||||
static int ringbuf_map_update_elem(struct bpf_map *map, void *key, void *value,
|
||||
u64 flags)
|
||||
static long ringbuf_map_update_elem(struct bpf_map *map, void *key, void *value,
|
||||
u64 flags)
|
||||
{
|
||||
return -ENOTSUPP;
|
||||
}
|
||||
|
||||
static int ringbuf_map_delete_elem(struct bpf_map *map, void *key)
|
||||
static long ringbuf_map_delete_elem(struct bpf_map *map, void *key)
|
||||
{
|
||||
return -ENOTSUPP;
|
||||
}
|
||||
|
@ -618,14 +618,14 @@ static int stack_map_get_next_key(struct bpf_map *map, void *key,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int stack_map_update_elem(struct bpf_map *map, void *key, void *value,
|
||||
u64 map_flags)
|
||||
static long stack_map_update_elem(struct bpf_map *map, void *key, void *value,
|
||||
u64 map_flags)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Called from syscall or from eBPF program */
|
||||
static int stack_map_delete_elem(struct bpf_map *map, void *key)
|
||||
static long stack_map_delete_elem(struct bpf_map *map, void *key)
|
||||
{
|
||||
struct bpf_stack_map *smap = container_of(map, struct bpf_stack_map, map);
|
||||
struct stack_map_bucket *old_bucket;
|
||||
|
@ -520,14 +520,14 @@ static int btf_field_cmp(const void *a, const void *b)
|
||||
}
|
||||
|
||||
struct btf_field *btf_record_find(const struct btf_record *rec, u32 offset,
|
||||
enum btf_field_type type)
|
||||
u32 field_mask)
|
||||
{
|
||||
struct btf_field *field;
|
||||
|
||||
if (IS_ERR_OR_NULL(rec) || !(rec->field_mask & type))
|
||||
if (IS_ERR_OR_NULL(rec) || !(rec->field_mask & field_mask))
|
||||
return NULL;
|
||||
field = bsearch(&offset, rec->fields, rec->cnt, sizeof(rec->fields[0]), btf_field_cmp);
|
||||
if (!field || !(field->type & type))
|
||||
if (!field || !(field->type & field_mask))
|
||||
return NULL;
|
||||
return field;
|
||||
}
|
||||
@ -650,6 +650,8 @@ void bpf_obj_free_timer(const struct btf_record *rec, void *obj)
|
||||
bpf_timer_cancel_and_free(obj + rec->timer_off);
|
||||
}
|
||||
|
||||
extern void __bpf_obj_drop_impl(void *p, const struct btf_record *rec);
|
||||
|
||||
void bpf_obj_free_fields(const struct btf_record *rec, void *obj)
|
||||
{
|
||||
const struct btf_field *fields;
|
||||
@ -659,8 +661,10 @@ void bpf_obj_free_fields(const struct btf_record *rec, void *obj)
|
||||
return;
|
||||
fields = rec->fields;
|
||||
for (i = 0; i < rec->cnt; i++) {
|
||||
struct btf_struct_meta *pointee_struct_meta;
|
||||
const struct btf_field *field = &fields[i];
|
||||
void *field_ptr = obj + field->offset;
|
||||
void *xchgd_field;
|
||||
|
||||
switch (fields[i].type) {
|
||||
case BPF_SPIN_LOCK:
|
||||
@ -672,7 +676,22 @@ void bpf_obj_free_fields(const struct btf_record *rec, void *obj)
|
||||
WRITE_ONCE(*(u64 *)field_ptr, 0);
|
||||
break;
|
||||
case BPF_KPTR_REF:
|
||||
field->kptr.dtor((void *)xchg((unsigned long *)field_ptr, 0));
|
||||
xchgd_field = (void *)xchg((unsigned long *)field_ptr, 0);
|
||||
if (!xchgd_field)
|
||||
break;
|
||||
|
||||
if (!btf_is_kernel(field->kptr.btf)) {
|
||||
pointee_struct_meta = btf_find_struct_meta(field->kptr.btf,
|
||||
field->kptr.btf_id);
|
||||
WARN_ON_ONCE(!pointee_struct_meta);
|
||||
migrate_disable();
|
||||
__bpf_obj_drop_impl(xchgd_field, pointee_struct_meta ?
|
||||
pointee_struct_meta->record :
|
||||
NULL);
|
||||
migrate_enable();
|
||||
} else {
|
||||
field->kptr.dtor(xchgd_field);
|
||||
}
|
||||
break;
|
||||
case BPF_LIST_HEAD:
|
||||
if (WARN_ON_ONCE(rec->spin_lock_off < 0))
|
||||
@ -1287,8 +1306,10 @@ struct bpf_map *bpf_map_get_with_uref(u32 ufd)
|
||||
return map;
|
||||
}
|
||||
|
||||
/* map_idr_lock should have been held */
|
||||
static struct bpf_map *__bpf_map_inc_not_zero(struct bpf_map *map, bool uref)
|
||||
/* map_idr_lock should have been held or the map should have been
|
||||
* protected by rcu read lock.
|
||||
*/
|
||||
struct bpf_map *__bpf_map_inc_not_zero(struct bpf_map *map, bool uref)
|
||||
{
|
||||
int refold;
|
||||
|
||||
@ -2051,6 +2072,7 @@ static void __bpf_prog_put_noref(struct bpf_prog *prog, bool deferred)
|
||||
{
|
||||
bpf_prog_kallsyms_del_all(prog);
|
||||
btf_put(prog->aux->btf);
|
||||
module_put(prog->aux->mod);
|
||||
kvfree(prog->aux->jited_linfo);
|
||||
kvfree(prog->aux->linfo);
|
||||
kfree(prog->aux->kfunc_tab);
|
||||
@ -2479,9 +2501,9 @@ static bool is_perfmon_prog_type(enum bpf_prog_type prog_type)
|
||||
}
|
||||
|
||||
/* last field in 'union bpf_attr' used by this command */
|
||||
#define BPF_PROG_LOAD_LAST_FIELD core_relo_rec_size
|
||||
#define BPF_PROG_LOAD_LAST_FIELD log_true_size
|
||||
|
||||
static int bpf_prog_load(union bpf_attr *attr, bpfptr_t uattr)
|
||||
static int bpf_prog_load(union bpf_attr *attr, bpfptr_t uattr, u32 uattr_size)
|
||||
{
|
||||
enum bpf_prog_type type = attr->prog_type;
|
||||
struct bpf_prog *prog, *dst_prog = NULL;
|
||||
@ -2631,7 +2653,7 @@ static int bpf_prog_load(union bpf_attr *attr, bpfptr_t uattr)
|
||||
goto free_prog_sec;
|
||||
|
||||
/* run eBPF verifier */
|
||||
err = bpf_check(&prog, attr, uattr);
|
||||
err = bpf_check(&prog, attr, uattr, uattr_size);
|
||||
if (err < 0)
|
||||
goto free_used_maps;
|
||||
|
||||
@ -2806,16 +2828,19 @@ static void bpf_link_show_fdinfo(struct seq_file *m, struct file *filp)
|
||||
const struct bpf_prog *prog = link->prog;
|
||||
char prog_tag[sizeof(prog->tag) * 2 + 1] = { };
|
||||
|
||||
bin2hex(prog_tag, prog->tag, sizeof(prog->tag));
|
||||
seq_printf(m,
|
||||
"link_type:\t%s\n"
|
||||
"link_id:\t%u\n"
|
||||
"prog_tag:\t%s\n"
|
||||
"prog_id:\t%u\n",
|
||||
"link_id:\t%u\n",
|
||||
bpf_link_type_strs[link->type],
|
||||
link->id,
|
||||
prog_tag,
|
||||
prog->aux->id);
|
||||
link->id);
|
||||
if (prog) {
|
||||
bin2hex(prog_tag, prog->tag, sizeof(prog->tag));
|
||||
seq_printf(m,
|
||||
"prog_tag:\t%s\n"
|
||||
"prog_id:\t%u\n",
|
||||
prog_tag,
|
||||
prog->aux->id);
|
||||
}
|
||||
if (link->ops->show_fdinfo)
|
||||
link->ops->show_fdinfo(link, m);
|
||||
}
|
||||
@ -3097,6 +3122,11 @@ static int bpf_tracing_prog_attach(struct bpf_prog *prog,
|
||||
if (err)
|
||||
goto out_unlock;
|
||||
|
||||
if (tgt_info.tgt_mod) {
|
||||
module_put(prog->aux->mod);
|
||||
prog->aux->mod = tgt_info.tgt_mod;
|
||||
}
|
||||
|
||||
tr = bpf_trampoline_get(key, &tgt_info);
|
||||
if (!tr) {
|
||||
err = -ENOMEM;
|
||||
@ -4290,7 +4320,8 @@ static int bpf_link_get_info_by_fd(struct file *file,
|
||||
|
||||
info.type = link->type;
|
||||
info.id = link->id;
|
||||
info.prog_id = link->prog->aux->id;
|
||||
if (link->prog)
|
||||
info.prog_id = link->prog->aux->id;
|
||||
|
||||
if (link->ops->fill_link_info) {
|
||||
err = link->ops->fill_link_info(link, &info);
|
||||
@ -4340,9 +4371,9 @@ static int bpf_obj_get_info_by_fd(const union bpf_attr *attr,
|
||||
return err;
|
||||
}
|
||||
|
||||
#define BPF_BTF_LOAD_LAST_FIELD btf_log_level
|
||||
#define BPF_BTF_LOAD_LAST_FIELD btf_log_true_size
|
||||
|
||||
static int bpf_btf_load(const union bpf_attr *attr, bpfptr_t uattr)
|
||||
static int bpf_btf_load(const union bpf_attr *attr, bpfptr_t uattr, __u32 uattr_size)
|
||||
{
|
||||
if (CHECK_ATTR(BPF_BTF_LOAD))
|
||||
return -EINVAL;
|
||||
@ -4350,7 +4381,7 @@ static int bpf_btf_load(const union bpf_attr *attr, bpfptr_t uattr)
|
||||
if (!bpf_capable())
|
||||
return -EPERM;
|
||||
|
||||
return btf_new_fd(attr, uattr);
|
||||
return btf_new_fd(attr, uattr, uattr_size);
|
||||
}
|
||||
|
||||
#define BPF_BTF_GET_FD_BY_ID_LAST_FIELD btf_id
|
||||
@ -4553,6 +4584,9 @@ static int link_create(union bpf_attr *attr, bpfptr_t uattr)
|
||||
if (CHECK_ATTR(BPF_LINK_CREATE))
|
||||
return -EINVAL;
|
||||
|
||||
if (attr->link_create.attach_type == BPF_STRUCT_OPS)
|
||||
return bpf_struct_ops_link_create(attr);
|
||||
|
||||
prog = bpf_prog_get(attr->link_create.prog_fd);
|
||||
if (IS_ERR(prog))
|
||||
return PTR_ERR(prog);
|
||||
@ -4651,6 +4685,35 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int link_update_map(struct bpf_link *link, union bpf_attr *attr)
|
||||
{
|
||||
struct bpf_map *new_map, *old_map = NULL;
|
||||
int ret;
|
||||
|
||||
new_map = bpf_map_get(attr->link_update.new_map_fd);
|
||||
if (IS_ERR(new_map))
|
||||
return PTR_ERR(new_map);
|
||||
|
||||
if (attr->link_update.flags & BPF_F_REPLACE) {
|
||||
old_map = bpf_map_get(attr->link_update.old_map_fd);
|
||||
if (IS_ERR(old_map)) {
|
||||
ret = PTR_ERR(old_map);
|
||||
goto out_put;
|
||||
}
|
||||
} else if (attr->link_update.old_map_fd) {
|
||||
ret = -EINVAL;
|
||||
goto out_put;
|
||||
}
|
||||
|
||||
ret = link->ops->update_map(link, new_map, old_map);
|
||||
|
||||
if (old_map)
|
||||
bpf_map_put(old_map);
|
||||
out_put:
|
||||
bpf_map_put(new_map);
|
||||
return ret;
|
||||
}
|
||||
|
||||
#define BPF_LINK_UPDATE_LAST_FIELD link_update.old_prog_fd
|
||||
|
||||
static int link_update(union bpf_attr *attr)
|
||||
@ -4671,6 +4734,11 @@ static int link_update(union bpf_attr *attr)
|
||||
if (IS_ERR(link))
|
||||
return PTR_ERR(link);
|
||||
|
||||
if (link->ops->update_map) {
|
||||
ret = link_update_map(link, attr);
|
||||
goto out_put_link;
|
||||
}
|
||||
|
||||
new_prog = bpf_prog_get(attr->link_update.new_prog_fd);
|
||||
if (IS_ERR(new_prog)) {
|
||||
ret = PTR_ERR(new_prog);
|
||||
@ -4991,7 +5059,7 @@ static int __sys_bpf(int cmd, bpfptr_t uattr, unsigned int size)
|
||||
err = map_freeze(&attr);
|
||||
break;
|
||||
case BPF_PROG_LOAD:
|
||||
err = bpf_prog_load(&attr, uattr);
|
||||
err = bpf_prog_load(&attr, uattr, size);
|
||||
break;
|
||||
case BPF_OBJ_PIN:
|
||||
err = bpf_obj_pin(&attr);
|
||||
@ -5036,7 +5104,7 @@ static int __sys_bpf(int cmd, bpfptr_t uattr, unsigned int size)
|
||||
err = bpf_raw_tracepoint_open(&attr);
|
||||
break;
|
||||
case BPF_BTF_LOAD:
|
||||
err = bpf_btf_load(&attr, uattr);
|
||||
err = bpf_btf_load(&attr, uattr, size);
|
||||
break;
|
||||
case BPF_BTF_GET_FD_BY_ID:
|
||||
err = bpf_btf_get_fd_by_id(&attr);
|
||||
|
@ -9,7 +9,6 @@
|
||||
#include <linux/btf.h>
|
||||
#include <linux/rcupdate_trace.h>
|
||||
#include <linux/rcupdate_wait.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/static_call.h>
|
||||
#include <linux/bpf_verifier.h>
|
||||
#include <linux/bpf_lsm.h>
|
||||
@ -172,26 +171,6 @@ out:
|
||||
return tr;
|
||||
}
|
||||
|
||||
static int bpf_trampoline_module_get(struct bpf_trampoline *tr)
|
||||
{
|
||||
struct module *mod;
|
||||
int err = 0;
|
||||
|
||||
preempt_disable();
|
||||
mod = __module_text_address((unsigned long) tr->func.addr);
|
||||
if (mod && !try_module_get(mod))
|
||||
err = -ENOENT;
|
||||
preempt_enable();
|
||||
tr->mod = mod;
|
||||
return err;
|
||||
}
|
||||
|
||||
static void bpf_trampoline_module_put(struct bpf_trampoline *tr)
|
||||
{
|
||||
module_put(tr->mod);
|
||||
tr->mod = NULL;
|
||||
}
|
||||
|
||||
static int unregister_fentry(struct bpf_trampoline *tr, void *old_addr)
|
||||
{
|
||||
void *ip = tr->func.addr;
|
||||
@ -202,8 +181,6 @@ static int unregister_fentry(struct bpf_trampoline *tr, void *old_addr)
|
||||
else
|
||||
ret = bpf_arch_text_poke(ip, BPF_MOD_CALL, old_addr, NULL);
|
||||
|
||||
if (!ret)
|
||||
bpf_trampoline_module_put(tr);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -238,9 +215,6 @@ static int register_fentry(struct bpf_trampoline *tr, void *new_addr)
|
||||
tr->func.ftrace_managed = true;
|
||||
}
|
||||
|
||||
if (bpf_trampoline_module_get(tr))
|
||||
return -ENOENT;
|
||||
|
||||
if (tr->func.ftrace_managed) {
|
||||
ftrace_set_filter_ip(tr->fops, (unsigned long)ip, 0, 1);
|
||||
ret = register_ftrace_direct_multi(tr->fops, (long)new_addr);
|
||||
@ -248,8 +222,6 @@ static int register_fentry(struct bpf_trampoline *tr, void *new_addr)
|
||||
ret = bpf_arch_text_poke(ip, BPF_MOD_CALL, NULL, new_addr);
|
||||
}
|
||||
|
||||
if (ret)
|
||||
bpf_trampoline_module_put(tr);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -1465,8 +1465,18 @@ static struct cgroup *current_cgns_cgroup_dfl(void)
|
||||
{
|
||||
struct css_set *cset;
|
||||
|
||||
cset = current->nsproxy->cgroup_ns->root_cset;
|
||||
return __cset_cgroup_from_root(cset, &cgrp_dfl_root);
|
||||
if (current->nsproxy) {
|
||||
cset = current->nsproxy->cgroup_ns->root_cset;
|
||||
return __cset_cgroup_from_root(cset, &cgrp_dfl_root);
|
||||
} else {
|
||||
/*
|
||||
* NOTE: This function may be called from bpf_cgroup_from_id()
|
||||
* on a task which has already passed exit_task_namespaces() and
|
||||
* nsproxy == NULL. Fall back to cgrp_dfl_root which will make all
|
||||
* cgroups visible for lookups.
|
||||
*/
|
||||
return &cgrp_dfl_root.cgrp;
|
||||
}
|
||||
}
|
||||
|
||||
/* look up cgroup associated with given css_set on the specified hierarchy */
|
||||
|
@ -246,7 +246,6 @@ static inline void kmemleak_load_module(const struct module *mod,
|
||||
void init_build_id(struct module *mod, const struct load_info *info);
|
||||
void layout_symtab(struct module *mod, struct load_info *info);
|
||||
void add_kallsyms(struct module *mod, const struct load_info *info);
|
||||
unsigned long find_kallsyms_symbol_value(struct module *mod, const char *name);
|
||||
|
||||
static inline bool sect_empty(const Elf_Shdr *sect)
|
||||
{
|
||||
|
@ -442,7 +442,7 @@ int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
|
||||
}
|
||||
|
||||
/* Given a module and name of symbol, find and return the symbol's value */
|
||||
unsigned long find_kallsyms_symbol_value(struct module *mod, const char *name)
|
||||
static unsigned long __find_kallsyms_symbol_value(struct module *mod, const char *name)
|
||||
{
|
||||
unsigned int i;
|
||||
struct mod_kallsyms *kallsyms = rcu_dereference_sched(mod->kallsyms);
|
||||
@ -466,7 +466,7 @@ static unsigned long __module_kallsyms_lookup_name(const char *name)
|
||||
if (colon) {
|
||||
mod = find_module_all(name, colon - name, false);
|
||||
if (mod)
|
||||
return find_kallsyms_symbol_value(mod, colon + 1);
|
||||
return __find_kallsyms_symbol_value(mod, colon + 1);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -475,7 +475,7 @@ static unsigned long __module_kallsyms_lookup_name(const char *name)
|
||||
|
||||
if (mod->state == MODULE_STATE_UNFORMED)
|
||||
continue;
|
||||
ret = find_kallsyms_symbol_value(mod, name);
|
||||
ret = __find_kallsyms_symbol_value(mod, name);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
@ -494,6 +494,16 @@ unsigned long module_kallsyms_lookup_name(const char *name)
|
||||
return ret;
|
||||
}
|
||||
|
||||
unsigned long find_kallsyms_symbol_value(struct module *mod, const char *name)
|
||||
{
|
||||
unsigned long ret;
|
||||
|
||||
preempt_disable();
|
||||
ret = __find_kallsyms_symbol_value(mod, name);
|
||||
preempt_enable();
|
||||
return ret;
|
||||
}
|
||||
|
||||
int module_kallsyms_on_each_symbol(const char *modname,
|
||||
int (*fn)(void *, const char *,
|
||||
struct module *, unsigned long),
|
||||
|
16
mm/maccess.c
16
mm/maccess.c
@ -5,6 +5,7 @@
|
||||
#include <linux/export.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <asm/tlb.h>
|
||||
|
||||
bool __weak copy_from_kernel_nofault_allowed(const void *unsafe_src,
|
||||
size_t size)
|
||||
@ -113,11 +114,16 @@ Efault:
|
||||
long copy_from_user_nofault(void *dst, const void __user *src, size_t size)
|
||||
{
|
||||
long ret = -EFAULT;
|
||||
if (access_ok(src, size)) {
|
||||
pagefault_disable();
|
||||
ret = __copy_from_user_inatomic(dst, src, size);
|
||||
pagefault_enable();
|
||||
}
|
||||
|
||||
if (!__access_ok(src, size))
|
||||
return ret;
|
||||
|
||||
if (!nmi_uaccess_okay())
|
||||
return ret;
|
||||
|
||||
pagefault_disable();
|
||||
ret = __copy_from_user_inatomic(dst, src, size);
|
||||
pagefault_enable();
|
||||
|
||||
if (ret)
|
||||
return -EFAULT;
|
||||
|
@ -173,7 +173,7 @@ static inline void check_heap_object(const void *ptr, unsigned long n,
|
||||
return;
|
||||
}
|
||||
|
||||
if (is_vmalloc_addr(ptr)) {
|
||||
if (is_vmalloc_addr(ptr) && !pagefault_disabled()) {
|
||||
struct vmap_area *area = find_vmap_area(addr);
|
||||
|
||||
if (!area)
|
||||
|
@ -173,14 +173,11 @@ static int bpf_dummy_ops_check_member(const struct btf_type *t,
|
||||
|
||||
static int bpf_dummy_ops_btf_struct_access(struct bpf_verifier_log *log,
|
||||
const struct bpf_reg_state *reg,
|
||||
int off, int size, enum bpf_access_type atype,
|
||||
u32 *next_btf_id,
|
||||
enum bpf_type_flag *flag)
|
||||
int off, int size)
|
||||
{
|
||||
const struct btf_type *state;
|
||||
const struct btf_type *t;
|
||||
s32 type_id;
|
||||
int err;
|
||||
|
||||
type_id = btf_find_by_name_kind(reg->btf, "bpf_dummy_ops_state",
|
||||
BTF_KIND_STRUCT);
|
||||
@ -194,11 +191,12 @@ static int bpf_dummy_ops_btf_struct_access(struct bpf_verifier_log *log,
|
||||
return -EACCES;
|
||||
}
|
||||
|
||||
err = btf_struct_access(log, reg, off, size, atype, next_btf_id, flag);
|
||||
if (err < 0)
|
||||
return err;
|
||||
if (off + size > sizeof(struct bpf_dummy_ops_state)) {
|
||||
bpf_log(log, "write access at off %d with size %d\n", off, size);
|
||||
return -EACCES;
|
||||
}
|
||||
|
||||
return atype == BPF_READ ? err : NOT_INIT;
|
||||
return NOT_INIT;
|
||||
}
|
||||
|
||||
static const struct bpf_verifier_ops bpf_dummy_verifier_ops = {
|
||||
|
@ -215,6 +215,16 @@ static void xdp_test_run_teardown(struct xdp_test_data *xdp)
|
||||
kfree(xdp->skbs);
|
||||
}
|
||||
|
||||
static bool frame_was_changed(const struct xdp_page_head *head)
|
||||
{
|
||||
/* xdp_scrub_frame() zeroes the data pointer, flags is the last field,
|
||||
* i.e. has the highest chances to be overwritten. If those two are
|
||||
* untouched, it's most likely safe to skip the context reset.
|
||||
*/
|
||||
return head->frame->data != head->orig_ctx.data ||
|
||||
head->frame->flags != head->orig_ctx.flags;
|
||||
}
|
||||
|
||||
static bool ctx_was_changed(struct xdp_page_head *head)
|
||||
{
|
||||
return head->orig_ctx.data != head->ctx.data ||
|
||||
@ -224,7 +234,7 @@ static bool ctx_was_changed(struct xdp_page_head *head)
|
||||
|
||||
static void reset_ctx(struct xdp_page_head *head)
|
||||
{
|
||||
if (likely(!ctx_was_changed(head)))
|
||||
if (likely(!frame_was_changed(head) && !ctx_was_changed(head)))
|
||||
return;
|
||||
|
||||
head->ctx.data = head->orig_ctx.data;
|
||||
@ -538,6 +548,11 @@ int noinline bpf_fentry_test8(struct bpf_fentry_test_t *arg)
|
||||
return (long)arg->a;
|
||||
}
|
||||
|
||||
__bpf_kfunc u32 bpf_fentry_test9(u32 *a)
|
||||
{
|
||||
return *a;
|
||||
}
|
||||
|
||||
__bpf_kfunc int bpf_modify_return_test(int a, int *b)
|
||||
{
|
||||
*b += 1;
|
||||
@ -567,6 +582,11 @@ long noinline bpf_kfunc_call_test4(signed char a, short b, int c, long d)
|
||||
return (long)a + (long)b + (long)c + d;
|
||||
}
|
||||
|
||||
int noinline bpf_fentry_shadow_test(int a)
|
||||
{
|
||||
return a + 1;
|
||||
}
|
||||
|
||||
struct prog_test_member1 {
|
||||
int a;
|
||||
};
|
||||
@ -598,6 +618,11 @@ bpf_kfunc_call_test_acquire(unsigned long *scalar_ptr)
|
||||
return &prog_test_struct;
|
||||
}
|
||||
|
||||
__bpf_kfunc void bpf_kfunc_call_test_offset(struct prog_test_ref_kfunc *p)
|
||||
{
|
||||
WARN_ON_ONCE(1);
|
||||
}
|
||||
|
||||
__bpf_kfunc struct prog_test_member *
|
||||
bpf_kfunc_call_memb_acquire(void)
|
||||
{
|
||||
@ -607,9 +632,6 @@ bpf_kfunc_call_memb_acquire(void)
|
||||
|
||||
__bpf_kfunc void bpf_kfunc_call_test_release(struct prog_test_ref_kfunc *p)
|
||||
{
|
||||
if (!p)
|
||||
return;
|
||||
|
||||
refcount_dec(&p->cnt);
|
||||
}
|
||||
|
||||
@ -795,6 +817,7 @@ BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_fail2)
|
||||
BTF_ID_FLAGS(func, bpf_kfunc_call_test_ref, KF_TRUSTED_ARGS | KF_RCU)
|
||||
BTF_ID_FLAGS(func, bpf_kfunc_call_test_destructive, KF_DESTRUCTIVE)
|
||||
BTF_ID_FLAGS(func, bpf_kfunc_call_test_static_unused_arg)
|
||||
BTF_ID_FLAGS(func, bpf_kfunc_call_test_offset)
|
||||
BTF_SET8_END(test_sk_check_kfunc_ids)
|
||||
|
||||
static void *bpf_test_init(const union bpf_attr *kattr, u32 user_size,
|
||||
@ -844,7 +867,8 @@ int bpf_prog_test_run_tracing(struct bpf_prog *prog,
|
||||
bpf_fentry_test5(11, (void *)12, 13, 14, 15) != 65 ||
|
||||
bpf_fentry_test6(16, (void *)17, 18, 19, (void *)20, 21) != 111 ||
|
||||
bpf_fentry_test7((struct bpf_fentry_test_t *)0) != 0 ||
|
||||
bpf_fentry_test8(&arg) != 0)
|
||||
bpf_fentry_test8(&arg) != 0 ||
|
||||
bpf_fentry_test9(&retval) != 0)
|
||||
goto out;
|
||||
break;
|
||||
case BPF_MODIFY_RETURN:
|
||||
|
@ -40,7 +40,7 @@ static int bpf_sk_storage_del(struct sock *sk, struct bpf_map *map)
|
||||
if (!sdata)
|
||||
return -ENOENT;
|
||||
|
||||
bpf_selem_unlink(SELEM(sdata), true);
|
||||
bpf_selem_unlink(SELEM(sdata), false);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -49,7 +49,6 @@ static int bpf_sk_storage_del(struct sock *sk, struct bpf_map *map)
|
||||
void bpf_sk_storage_free(struct sock *sk)
|
||||
{
|
||||
struct bpf_local_storage *sk_storage;
|
||||
bool free_sk_storage = false;
|
||||
|
||||
rcu_read_lock();
|
||||
sk_storage = rcu_dereference(sk->sk_bpf_storage);
|
||||
@ -58,13 +57,8 @@ void bpf_sk_storage_free(struct sock *sk)
|
||||
return;
|
||||
}
|
||||
|
||||
raw_spin_lock_bh(&sk_storage->lock);
|
||||
free_sk_storage = bpf_local_storage_unlink_nolock(sk_storage);
|
||||
raw_spin_unlock_bh(&sk_storage->lock);
|
||||
bpf_local_storage_destroy(sk_storage);
|
||||
rcu_read_unlock();
|
||||
|
||||
if (free_sk_storage)
|
||||
kfree_rcu(sk_storage, rcu);
|
||||
}
|
||||
|
||||
static void bpf_sk_storage_map_free(struct bpf_map *map)
|
||||
@ -74,7 +68,7 @@ static void bpf_sk_storage_map_free(struct bpf_map *map)
|
||||
|
||||
static struct bpf_map *bpf_sk_storage_map_alloc(union bpf_attr *attr)
|
||||
{
|
||||
return bpf_local_storage_map_alloc(attr, &sk_cache);
|
||||
return bpf_local_storage_map_alloc(attr, &sk_cache, false);
|
||||
}
|
||||
|
||||
static int notsupp_get_next_key(struct bpf_map *map, void *key,
|
||||
@ -100,8 +94,8 @@ static void *bpf_fd_sk_storage_lookup_elem(struct bpf_map *map, void *key)
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
static int bpf_fd_sk_storage_update_elem(struct bpf_map *map, void *key,
|
||||
void *value, u64 map_flags)
|
||||
static long bpf_fd_sk_storage_update_elem(struct bpf_map *map, void *key,
|
||||
void *value, u64 map_flags)
|
||||
{
|
||||
struct bpf_local_storage_data *sdata;
|
||||
struct socket *sock;
|
||||
@ -120,7 +114,7 @@ static int bpf_fd_sk_storage_update_elem(struct bpf_map *map, void *key,
|
||||
return err;
|
||||
}
|
||||
|
||||
static int bpf_fd_sk_storage_delete_elem(struct bpf_map *map, void *key)
|
||||
static long bpf_fd_sk_storage_delete_elem(struct bpf_map *map, void *key)
|
||||
{
|
||||
struct socket *sock;
|
||||
int fd, err;
|
||||
@ -203,7 +197,7 @@ int bpf_sk_storage_clone(const struct sock *sk, struct sock *newsk)
|
||||
} else {
|
||||
ret = bpf_local_storage_alloc(newsk, smap, copy_selem, GFP_ATOMIC);
|
||||
if (ret) {
|
||||
kfree(copy_selem);
|
||||
bpf_selem_free(copy_selem, smap, true);
|
||||
atomic_sub(smap->elem_size,
|
||||
&newsk->sk_omem_alloc);
|
||||
bpf_map_put(map);
|
||||
@ -418,7 +412,7 @@ const struct bpf_func_proto bpf_sk_storage_get_tracing_proto = {
|
||||
.gpl_only = false,
|
||||
.ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
|
||||
.arg1_type = ARG_CONST_MAP_PTR,
|
||||
.arg2_type = ARG_PTR_TO_BTF_ID,
|
||||
.arg2_type = ARG_PTR_TO_BTF_ID_OR_NULL,
|
||||
.arg2_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_SOCK_COMMON],
|
||||
.arg3_type = ARG_PTR_TO_MAP_VALUE_OR_NULL,
|
||||
.arg4_type = ARG_ANYTHING,
|
||||
@ -430,7 +424,7 @@ const struct bpf_func_proto bpf_sk_storage_delete_tracing_proto = {
|
||||
.gpl_only = false,
|
||||
.ret_type = RET_INTEGER,
|
||||
.arg1_type = ARG_CONST_MAP_PTR,
|
||||
.arg2_type = ARG_PTR_TO_BTF_ID,
|
||||
.arg2_type = ARG_PTR_TO_BTF_ID_OR_NULL,
|
||||
.arg2_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_SOCK_COMMON],
|
||||
.allowed = bpf_sk_storage_tracing_allowed,
|
||||
};
|
||||
|
@ -5002,7 +5002,7 @@ const struct bpf_func_proto bpf_get_socket_ptr_cookie_proto = {
|
||||
.func = bpf_get_socket_ptr_cookie,
|
||||
.gpl_only = false,
|
||||
.ret_type = RET_INTEGER,
|
||||
.arg1_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON,
|
||||
.arg1_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON | PTR_MAYBE_NULL,
|
||||
};
|
||||
|
||||
BPF_CALL_1(bpf_get_socket_cookie_sock_ops, struct bpf_sock_ops_kern *, ctx)
|
||||
@ -8746,23 +8746,18 @@ EXPORT_SYMBOL_GPL(nf_conn_btf_access_lock);
|
||||
|
||||
int (*nfct_btf_struct_access)(struct bpf_verifier_log *log,
|
||||
const struct bpf_reg_state *reg,
|
||||
int off, int size, enum bpf_access_type atype,
|
||||
u32 *next_btf_id, enum bpf_type_flag *flag);
|
||||
int off, int size);
|
||||
EXPORT_SYMBOL_GPL(nfct_btf_struct_access);
|
||||
|
||||
static int tc_cls_act_btf_struct_access(struct bpf_verifier_log *log,
|
||||
const struct bpf_reg_state *reg,
|
||||
int off, int size, enum bpf_access_type atype,
|
||||
u32 *next_btf_id, enum bpf_type_flag *flag)
|
||||
int off, int size)
|
||||
{
|
||||
int ret = -EACCES;
|
||||
|
||||
if (atype == BPF_READ)
|
||||
return btf_struct_access(log, reg, off, size, atype, next_btf_id, flag);
|
||||
|
||||
mutex_lock(&nf_conn_btf_access_lock);
|
||||
if (nfct_btf_struct_access)
|
||||
ret = nfct_btf_struct_access(log, reg, off, size, atype, next_btf_id, flag);
|
||||
ret = nfct_btf_struct_access(log, reg, off, size);
|
||||
mutex_unlock(&nf_conn_btf_access_lock);
|
||||
|
||||
return ret;
|
||||
@ -8829,17 +8824,13 @@ EXPORT_SYMBOL_GPL(bpf_warn_invalid_xdp_action);
|
||||
|
||||
static int xdp_btf_struct_access(struct bpf_verifier_log *log,
|
||||
const struct bpf_reg_state *reg,
|
||||
int off, int size, enum bpf_access_type atype,
|
||||
u32 *next_btf_id, enum bpf_type_flag *flag)
|
||||
int off, int size)
|
||||
{
|
||||
int ret = -EACCES;
|
||||
|
||||
if (atype == BPF_READ)
|
||||
return btf_struct_access(log, reg, off, size, atype, next_btf_id, flag);
|
||||
|
||||
mutex_lock(&nf_conn_btf_access_lock);
|
||||
if (nfct_btf_struct_access)
|
||||
ret = nfct_btf_struct_access(log, reg, off, size, atype, next_btf_id, flag);
|
||||
ret = nfct_btf_struct_access(log, reg, off, size);
|
||||
mutex_unlock(&nf_conn_btf_access_lock);
|
||||
|
||||
return ret;
|
||||
@ -9189,7 +9180,7 @@ static struct bpf_insn *bpf_convert_tstamp_type_read(const struct bpf_insn *si,
|
||||
__u8 tmp_reg = BPF_REG_AX;
|
||||
|
||||
*insn++ = BPF_LDX_MEM(BPF_B, tmp_reg, skb_reg,
|
||||
PKT_VLAN_PRESENT_OFFSET);
|
||||
SKB_BF_MONO_TC_OFFSET);
|
||||
*insn++ = BPF_JMP32_IMM(BPF_JSET, tmp_reg,
|
||||
SKB_MONO_DELIVERY_TIME_MASK, 2);
|
||||
*insn++ = BPF_MOV32_IMM(value_reg, BPF_SKB_TSTAMP_UNSPEC);
|
||||
@ -9236,7 +9227,7 @@ static struct bpf_insn *bpf_convert_tstamp_read(const struct bpf_prog *prog,
|
||||
/* AX is needed because src_reg and dst_reg could be the same */
|
||||
__u8 tmp_reg = BPF_REG_AX;
|
||||
|
||||
*insn++ = BPF_LDX_MEM(BPF_B, tmp_reg, skb_reg, PKT_VLAN_PRESENT_OFFSET);
|
||||
*insn++ = BPF_LDX_MEM(BPF_B, tmp_reg, skb_reg, SKB_BF_MONO_TC_OFFSET);
|
||||
*insn++ = BPF_ALU32_IMM(BPF_AND, tmp_reg,
|
||||
TC_AT_INGRESS_MASK | SKB_MONO_DELIVERY_TIME_MASK);
|
||||
*insn++ = BPF_JMP32_IMM(BPF_JNE, tmp_reg,
|
||||
@ -9271,14 +9262,14 @@ static struct bpf_insn *bpf_convert_tstamp_write(const struct bpf_prog *prog,
|
||||
if (!prog->tstamp_type_access) {
|
||||
__u8 tmp_reg = BPF_REG_AX;
|
||||
|
||||
*insn++ = BPF_LDX_MEM(BPF_B, tmp_reg, skb_reg, PKT_VLAN_PRESENT_OFFSET);
|
||||
*insn++ = BPF_LDX_MEM(BPF_B, tmp_reg, skb_reg, SKB_BF_MONO_TC_OFFSET);
|
||||
/* Writing __sk_buff->tstamp as ingress, goto <clear> */
|
||||
*insn++ = BPF_JMP32_IMM(BPF_JSET, tmp_reg, TC_AT_INGRESS_MASK, 1);
|
||||
/* goto <store> */
|
||||
*insn++ = BPF_JMP_A(2);
|
||||
/* <clear>: mono_delivery_time */
|
||||
*insn++ = BPF_ALU32_IMM(BPF_AND, tmp_reg, ~SKB_MONO_DELIVERY_TIME_MASK);
|
||||
*insn++ = BPF_STX_MEM(BPF_B, skb_reg, tmp_reg, PKT_VLAN_PRESENT_OFFSET);
|
||||
*insn++ = BPF_STX_MEM(BPF_B, skb_reg, tmp_reg, SKB_BF_MONO_TC_OFFSET);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -437,7 +437,7 @@ static void sock_map_delete_from_link(struct bpf_map *map, struct sock *sk,
|
||||
__sock_map_delete(stab, sk, link_raw);
|
||||
}
|
||||
|
||||
static int sock_map_delete_elem(struct bpf_map *map, void *key)
|
||||
static long sock_map_delete_elem(struct bpf_map *map, void *key)
|
||||
{
|
||||
struct bpf_stab *stab = container_of(map, struct bpf_stab, map);
|
||||
u32 i = *(u32 *)key;
|
||||
@ -587,8 +587,8 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int sock_map_update_elem(struct bpf_map *map, void *key,
|
||||
void *value, u64 flags)
|
||||
static long sock_map_update_elem(struct bpf_map *map, void *key,
|
||||
void *value, u64 flags)
|
||||
{
|
||||
struct sock *sk = (struct sock *)value;
|
||||
int ret;
|
||||
@ -925,7 +925,7 @@ static void sock_hash_delete_from_link(struct bpf_map *map, struct sock *sk,
|
||||
raw_spin_unlock_bh(&bucket->lock);
|
||||
}
|
||||
|
||||
static int sock_hash_delete_elem(struct bpf_map *map, void *key)
|
||||
static long sock_hash_delete_elem(struct bpf_map *map, void *key)
|
||||
{
|
||||
struct bpf_shtab *htab = container_of(map, struct bpf_shtab, map);
|
||||
u32 hash, key_size = map->key_size;
|
||||
|
@ -531,21 +531,6 @@ out:
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(xdp_return_buff);
|
||||
|
||||
/* Only called for MEM_TYPE_PAGE_POOL see xdp.h */
|
||||
void __xdp_release_frame(void *data, struct xdp_mem_info *mem)
|
||||
{
|
||||
struct xdp_mem_allocator *xa;
|
||||
struct page *page;
|
||||
|
||||
rcu_read_lock();
|
||||
xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params);
|
||||
page = virt_to_head_page(data);
|
||||
if (xa)
|
||||
page_pool_release_page(xa->page_pool, page);
|
||||
rcu_read_unlock();
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__xdp_release_frame);
|
||||
|
||||
void xdp_attachment_setup(struct xdp_attachment_info *info,
|
||||
struct netdev_bpf *bpf)
|
||||
{
|
||||
@ -658,8 +643,8 @@ struct sk_buff *__xdp_build_skb_from_frame(struct xdp_frame *xdpf,
|
||||
* - RX ring dev queue index (skb_record_rx_queue)
|
||||
*/
|
||||
|
||||
/* Until page_pool get SKB return path, release DMA here */
|
||||
xdp_release_frame(xdpf);
|
||||
if (xdpf->mem.type == MEM_TYPE_PAGE_POOL)
|
||||
skb_mark_for_recycle(skb);
|
||||
|
||||
/* Allow SKB to reuse area used by xdp_frame */
|
||||
xdp_scrub_frame(xdpf);
|
||||
|
@ -26,7 +26,7 @@ obj-$(CONFIG_IP_MROUTE) += ipmr.o
|
||||
obj-$(CONFIG_IP_MROUTE_COMMON) += ipmr_base.o
|
||||
obj-$(CONFIG_NET_IPIP) += ipip.o
|
||||
gre-y := gre_demux.o
|
||||
fou-y := fou_core.o fou_nl.o
|
||||
fou-y := fou_core.o fou_nl.o fou_bpf.o
|
||||
obj-$(CONFIG_NET_FOU) += fou.o
|
||||
obj-$(CONFIG_NET_IPGRE_DEMUX) += gre.o
|
||||
obj-$(CONFIG_NET_IPGRE) += ip_gre.o
|
||||
|
@ -72,15 +72,11 @@ static bool bpf_tcp_ca_is_valid_access(int off, int size,
|
||||
|
||||
static int bpf_tcp_ca_btf_struct_access(struct bpf_verifier_log *log,
|
||||
const struct bpf_reg_state *reg,
|
||||
int off, int size, enum bpf_access_type atype,
|
||||
u32 *next_btf_id, enum bpf_type_flag *flag)
|
||||
int off, int size)
|
||||
{
|
||||
const struct btf_type *t;
|
||||
size_t end;
|
||||
|
||||
if (atype == BPF_READ)
|
||||
return btf_struct_access(log, reg, off, size, atype, next_btf_id, flag);
|
||||
|
||||
t = btf_type_by_id(reg->btf, reg->btf_id);
|
||||
if (t != tcp_sock_type) {
|
||||
bpf_log(log, "only read is supported\n");
|
||||
@ -113,6 +109,9 @@ static int bpf_tcp_ca_btf_struct_access(struct bpf_verifier_log *log,
|
||||
case offsetof(struct tcp_sock, ecn_flags):
|
||||
end = offsetofend(struct tcp_sock, ecn_flags);
|
||||
break;
|
||||
case offsetof(struct tcp_sock, app_limited):
|
||||
end = offsetofend(struct tcp_sock, app_limited);
|
||||
break;
|
||||
default:
|
||||
bpf_log(log, "no write support to tcp_sock at off %d\n", off);
|
||||
return -EACCES;
|
||||
@ -239,8 +238,6 @@ static int bpf_tcp_ca_init_member(const struct btf_type *t,
|
||||
if (bpf_obj_name_cpy(tcp_ca->name, utcp_ca->name,
|
||||
sizeof(tcp_ca->name)) <= 0)
|
||||
return -EINVAL;
|
||||
if (tcp_ca_find(utcp_ca->name))
|
||||
return -EEXIST;
|
||||
return 1;
|
||||
}
|
||||
|
||||
@ -266,13 +263,25 @@ static void bpf_tcp_ca_unreg(void *kdata)
|
||||
tcp_unregister_congestion_control(kdata);
|
||||
}
|
||||
|
||||
static int bpf_tcp_ca_update(void *kdata, void *old_kdata)
|
||||
{
|
||||
return tcp_update_congestion_control(kdata, old_kdata);
|
||||
}
|
||||
|
||||
static int bpf_tcp_ca_validate(void *kdata)
|
||||
{
|
||||
return tcp_validate_congestion_control(kdata);
|
||||
}
|
||||
|
||||
struct bpf_struct_ops bpf_tcp_congestion_ops = {
|
||||
.verifier_ops = &bpf_tcp_ca_verifier_ops,
|
||||
.reg = bpf_tcp_ca_reg,
|
||||
.unreg = bpf_tcp_ca_unreg,
|
||||
.update = bpf_tcp_ca_update,
|
||||
.check_member = bpf_tcp_ca_check_member,
|
||||
.init_member = bpf_tcp_ca_init_member,
|
||||
.init = bpf_tcp_ca_init,
|
||||
.validate = bpf_tcp_ca_validate,
|
||||
.name = "tcp_congestion_ops",
|
||||
};
|
||||
|
||||
|
119
net/ipv4/fou_bpf.c
Normal file
119
net/ipv4/fou_bpf.c
Normal file
@ -0,0 +1,119 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/* Unstable Fou Helpers for TC-BPF hook
|
||||
*
|
||||
* These are called from SCHED_CLS BPF programs. Note that it is
|
||||
* allowed to break compatibility for these functions since the interface they
|
||||
* are exposed through to BPF programs is explicitly unstable.
|
||||
*/
|
||||
|
||||
#include <linux/bpf.h>
|
||||
#include <linux/btf_ids.h>
|
||||
|
||||
#include <net/dst_metadata.h>
|
||||
#include <net/fou.h>
|
||||
|
||||
struct bpf_fou_encap {
|
||||
__be16 sport;
|
||||
__be16 dport;
|
||||
};
|
||||
|
||||
enum bpf_fou_encap_type {
|
||||
FOU_BPF_ENCAP_FOU,
|
||||
FOU_BPF_ENCAP_GUE,
|
||||
};
|
||||
|
||||
__diag_push();
|
||||
__diag_ignore_all("-Wmissing-prototypes",
|
||||
"Global functions as their definitions will be in BTF");
|
||||
|
||||
/* bpf_skb_set_fou_encap - Set FOU encap parameters
|
||||
*
|
||||
* This function allows for using GUE or FOU encapsulation together with an
|
||||
* ipip device in collect-metadata mode.
|
||||
*
|
||||
* It is meant to be used in BPF tc-hooks and after a call to the
|
||||
* bpf_skb_set_tunnel_key helper, responsible for setting IP addresses.
|
||||
*
|
||||
* Parameters:
|
||||
* @skb_ctx Pointer to ctx (__sk_buff) in TC program. Cannot be NULL
|
||||
* @encap Pointer to a `struct bpf_fou_encap` storing UDP src and
|
||||
* dst ports. If sport is set to 0 the kernel will auto-assign a
|
||||
* port. This is similar to using `encap-sport auto`.
|
||||
* Cannot be NULL
|
||||
* @type Encapsulation type for the packet. Their definitions are
|
||||
* specified in `enum bpf_fou_encap_type`
|
||||
*/
|
||||
__bpf_kfunc int bpf_skb_set_fou_encap(struct __sk_buff *skb_ctx,
|
||||
struct bpf_fou_encap *encap, int type)
|
||||
{
|
||||
struct sk_buff *skb = (struct sk_buff *)skb_ctx;
|
||||
struct ip_tunnel_info *info = skb_tunnel_info(skb);
|
||||
|
||||
if (unlikely(!encap))
|
||||
return -EINVAL;
|
||||
|
||||
if (unlikely(!info || !(info->mode & IP_TUNNEL_INFO_TX)))
|
||||
return -EINVAL;
|
||||
|
||||
switch (type) {
|
||||
case FOU_BPF_ENCAP_FOU:
|
||||
info->encap.type = TUNNEL_ENCAP_FOU;
|
||||
break;
|
||||
case FOU_BPF_ENCAP_GUE:
|
||||
info->encap.type = TUNNEL_ENCAP_GUE;
|
||||
break;
|
||||
default:
|
||||
info->encap.type = TUNNEL_ENCAP_NONE;
|
||||
}
|
||||
|
||||
if (info->key.tun_flags & TUNNEL_CSUM)
|
||||
info->encap.flags |= TUNNEL_ENCAP_FLAG_CSUM;
|
||||
|
||||
info->encap.sport = encap->sport;
|
||||
info->encap.dport = encap->dport;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* bpf_skb_get_fou_encap - Get FOU encap parameters
|
||||
*
|
||||
* This function allows for reading encap metadata from a packet received
|
||||
* on an ipip device in collect-metadata mode.
|
||||
*
|
||||
* Parameters:
|
||||
* @skb_ctx Pointer to ctx (__sk_buff) in TC program. Cannot be NULL
|
||||
* @encap Pointer to a struct bpf_fou_encap storing UDP source and
|
||||
* destination port. Cannot be NULL
|
||||
*/
|
||||
__bpf_kfunc int bpf_skb_get_fou_encap(struct __sk_buff *skb_ctx,
|
||||
struct bpf_fou_encap *encap)
|
||||
{
|
||||
struct sk_buff *skb = (struct sk_buff *)skb_ctx;
|
||||
struct ip_tunnel_info *info = skb_tunnel_info(skb);
|
||||
|
||||
if (unlikely(!info))
|
||||
return -EINVAL;
|
||||
|
||||
encap->sport = info->encap.sport;
|
||||
encap->dport = info->encap.dport;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
__diag_pop()
|
||||
|
||||
BTF_SET8_START(fou_kfunc_set)
|
||||
BTF_ID_FLAGS(func, bpf_skb_set_fou_encap)
|
||||
BTF_ID_FLAGS(func, bpf_skb_get_fou_encap)
|
||||
BTF_SET8_END(fou_kfunc_set)
|
||||
|
||||
static const struct btf_kfunc_id_set fou_bpf_kfunc_set = {
|
||||
.owner = THIS_MODULE,
|
||||
.set = &fou_kfunc_set,
|
||||
};
|
||||
|
||||
int register_fou_bpf(void)
|
||||
{
|
||||
return register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS,
|
||||
&fou_bpf_kfunc_set);
|
||||
}
|
@ -1236,10 +1236,15 @@ static int __init fou_init(void)
|
||||
if (ret < 0)
|
||||
goto unregister;
|
||||
|
||||
ret = register_fou_bpf();
|
||||
if (ret < 0)
|
||||
goto kfunc_failed;
|
||||
|
||||
ret = ip_tunnel_encap_add_fou_ops();
|
||||
if (ret == 0)
|
||||
return 0;
|
||||
|
||||
kfunc_failed:
|
||||
genl_unregister_family(&fou_nl_family);
|
||||
unregister:
|
||||
unregister_pernet_device(&fou_net_ops);
|
||||
|
@ -359,6 +359,20 @@ err_dev_set_mtu:
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
void ip_tunnel_md_udp_encap(struct sk_buff *skb, struct ip_tunnel_info *info)
|
||||
{
|
||||
const struct iphdr *iph = ip_hdr(skb);
|
||||
const struct udphdr *udph;
|
||||
|
||||
if (iph->protocol != IPPROTO_UDP)
|
||||
return;
|
||||
|
||||
udph = (struct udphdr *)((__u8 *)iph + (iph->ihl << 2));
|
||||
info->encap.sport = udph->source;
|
||||
info->encap.dport = udph->dest;
|
||||
}
|
||||
EXPORT_SYMBOL(ip_tunnel_md_udp_encap);
|
||||
|
||||
int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb,
|
||||
const struct tnl_ptk_info *tpi, struct metadata_dst *tun_dst,
|
||||
bool log_ecn_error)
|
||||
@ -572,7 +586,11 @@ void ip_md_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
|
||||
tunnel_id_to_key32(key->tun_id), RT_TOS(tos),
|
||||
dev_net(dev), 0, skb->mark, skb_get_hash(skb),
|
||||
key->flow_flags);
|
||||
if (tunnel->encap.type != TUNNEL_ENCAP_NONE)
|
||||
|
||||
if (!tunnel_hlen)
|
||||
tunnel_hlen = ip_encap_hlen(&tun_info->encap);
|
||||
|
||||
if (ip_tunnel_encap(skb, &tun_info->encap, &proto, &fl4) < 0)
|
||||
goto tx_error;
|
||||
|
||||
use_cache = ip_tunnel_dst_cache_usable(skb, tun_info);
|
||||
@ -732,7 +750,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
|
||||
dev_net(dev), tunnel->parms.link,
|
||||
tunnel->fwmark, skb_get_hash(skb), 0);
|
||||
|
||||
if (ip_tunnel_encap(skb, tunnel, &protocol, &fl4) < 0)
|
||||
if (ip_tunnel_encap(skb, &tunnel->encap, &protocol, &fl4) < 0)
|
||||
goto tx_error;
|
||||
|
||||
if (connected && md) {
|
||||
|
@ -241,6 +241,7 @@ static int ipip_tunnel_rcv(struct sk_buff *skb, u8 ipproto)
|
||||
tun_dst = ip_tun_rx_dst(skb, 0, 0, 0);
|
||||
if (!tun_dst)
|
||||
return 0;
|
||||
ip_tunnel_md_udp_encap(skb, &tun_dst->u.tun_info);
|
||||
}
|
||||
skb_reset_mac_header(skb);
|
||||
|
||||
|
@ -75,14 +75,8 @@ struct tcp_congestion_ops *tcp_ca_find_key(u32 key)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Attach new congestion control algorithm to the list
|
||||
* of available options.
|
||||
*/
|
||||
int tcp_register_congestion_control(struct tcp_congestion_ops *ca)
|
||||
int tcp_validate_congestion_control(struct tcp_congestion_ops *ca)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
/* all algorithms must implement these */
|
||||
if (!ca->ssthresh || !ca->undo_cwnd ||
|
||||
!(ca->cong_avoid || ca->cong_control)) {
|
||||
@ -90,6 +84,20 @@ int tcp_register_congestion_control(struct tcp_congestion_ops *ca)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Attach new congestion control algorithm to the list
|
||||
* of available options.
|
||||
*/
|
||||
int tcp_register_congestion_control(struct tcp_congestion_ops *ca)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = tcp_validate_congestion_control(ca);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ca->key = jhash(ca->name, sizeof(ca->name), strlen(ca->name));
|
||||
|
||||
spin_lock(&tcp_cong_list_lock);
|
||||
@ -130,6 +138,50 @@ void tcp_unregister_congestion_control(struct tcp_congestion_ops *ca)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(tcp_unregister_congestion_control);
|
||||
|
||||
/* Replace a registered old ca with a new one.
|
||||
*
|
||||
* The new ca must have the same name as the old one, that has been
|
||||
* registered.
|
||||
*/
|
||||
int tcp_update_congestion_control(struct tcp_congestion_ops *ca, struct tcp_congestion_ops *old_ca)
|
||||
{
|
||||
struct tcp_congestion_ops *existing;
|
||||
int ret;
|
||||
|
||||
ret = tcp_validate_congestion_control(ca);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ca->key = jhash(ca->name, sizeof(ca->name), strlen(ca->name));
|
||||
|
||||
spin_lock(&tcp_cong_list_lock);
|
||||
existing = tcp_ca_find_key(old_ca->key);
|
||||
if (ca->key == TCP_CA_UNSPEC || !existing || strcmp(existing->name, ca->name)) {
|
||||
pr_notice("%s not registered or non-unique key\n",
|
||||
ca->name);
|
||||
ret = -EINVAL;
|
||||
} else if (existing != old_ca) {
|
||||
pr_notice("invalid old congestion control algorithm to replace\n");
|
||||
ret = -EINVAL;
|
||||
} else {
|
||||
/* Add the new one before removing the old one to keep
|
||||
* one implementation available all the time.
|
||||
*/
|
||||
list_add_tail_rcu(&ca->list, &tcp_cong_list);
|
||||
list_del_rcu(&existing->list);
|
||||
pr_debug("%s updated\n", ca->name);
|
||||
}
|
||||
spin_unlock(&tcp_cong_list_lock);
|
||||
|
||||
/* Wait for outstanding readers to complete before the
|
||||
* module or struct_ops gets removed entirely.
|
||||
*/
|
||||
if (!ret)
|
||||
synchronize_rcu();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
u32 tcp_ca_get_key_by_name(struct net *net, const char *name, bool *ecn_ca)
|
||||
{
|
||||
const struct tcp_congestion_ops *ca;
|
||||
|
@ -1024,7 +1024,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
|
||||
ttl = iph6->hop_limit;
|
||||
tos = INET_ECN_encapsulate(tos, ipv6_get_dsfield(iph6));
|
||||
|
||||
if (ip_tunnel_encap(skb, tunnel, &protocol, &fl4) < 0) {
|
||||
if (ip_tunnel_encap(skb, &tunnel->encap, &protocol, &fl4) < 0) {
|
||||
ip_rt_put(rt);
|
||||
goto tx_error;
|
||||
}
|
||||
|
@ -192,8 +192,7 @@ BTF_ID(struct, nf_conn___init)
|
||||
/* Check writes into `struct nf_conn` */
|
||||
static int _nf_conntrack_btf_struct_access(struct bpf_verifier_log *log,
|
||||
const struct bpf_reg_state *reg,
|
||||
int off, int size, enum bpf_access_type atype,
|
||||
u32 *next_btf_id, enum bpf_type_flag *flag)
|
||||
int off, int size)
|
||||
{
|
||||
const struct btf_type *ncit, *nct, *t;
|
||||
size_t end;
|
||||
@ -401,8 +400,6 @@ __bpf_kfunc struct nf_conn *bpf_ct_insert_entry(struct nf_conn___init *nfct_i)
|
||||
*/
|
||||
__bpf_kfunc void bpf_ct_release(struct nf_conn *nfct)
|
||||
{
|
||||
if (!nfct)
|
||||
return;
|
||||
nf_ct_put(nfct);
|
||||
}
|
||||
|
||||
|
@ -1301,9 +1301,10 @@ static int xsk_mmap(struct file *file, struct socket *sock,
|
||||
loff_t offset = (loff_t)vma->vm_pgoff << PAGE_SHIFT;
|
||||
unsigned long size = vma->vm_end - vma->vm_start;
|
||||
struct xdp_sock *xs = xdp_sk(sock->sk);
|
||||
int state = READ_ONCE(xs->state);
|
||||
struct xsk_queue *q = NULL;
|
||||
|
||||
if (READ_ONCE(xs->state) != XSK_READY)
|
||||
if (state != XSK_READY && state != XSK_BOUND)
|
||||
return -EBUSY;
|
||||
|
||||
if (offset == XDP_PGOFF_RX_RING) {
|
||||
@ -1314,9 +1315,11 @@ static int xsk_mmap(struct file *file, struct socket *sock,
|
||||
/* Matches the smp_wmb() in XDP_UMEM_REG */
|
||||
smp_rmb();
|
||||
if (offset == XDP_UMEM_PGOFF_FILL_RING)
|
||||
q = READ_ONCE(xs->fq_tmp);
|
||||
q = state == XSK_READY ? READ_ONCE(xs->fq_tmp) :
|
||||
READ_ONCE(xs->pool->fq);
|
||||
else if (offset == XDP_UMEM_PGOFF_COMPLETION_RING)
|
||||
q = READ_ONCE(xs->cq_tmp);
|
||||
q = state == XSK_READY ? READ_ONCE(xs->cq_tmp) :
|
||||
READ_ONCE(xs->pool->cq);
|
||||
}
|
||||
|
||||
if (!q)
|
||||
|
@ -133,16 +133,12 @@ static inline bool xskq_cons_read_addr_unchecked(struct xsk_queue *q, u64 *addr)
|
||||
static inline bool xp_aligned_validate_desc(struct xsk_buff_pool *pool,
|
||||
struct xdp_desc *desc)
|
||||
{
|
||||
u64 chunk, chunk_end;
|
||||
u64 offset = desc->addr & (pool->chunk_size - 1);
|
||||
|
||||
chunk = xp_aligned_extract_addr(pool, desc->addr);
|
||||
if (likely(desc->len)) {
|
||||
chunk_end = xp_aligned_extract_addr(pool, desc->addr + desc->len - 1);
|
||||
if (chunk != chunk_end)
|
||||
return false;
|
||||
}
|
||||
if (offset + desc->len > pool->chunk_size)
|
||||
return false;
|
||||
|
||||
if (chunk >= pool->addrs_cnt)
|
||||
if (desc->addr >= pool->addrs_cnt)
|
||||
return false;
|
||||
|
||||
if (desc->options)
|
||||
@ -153,15 +149,12 @@ static inline bool xp_aligned_validate_desc(struct xsk_buff_pool *pool,
|
||||
static inline bool xp_unaligned_validate_desc(struct xsk_buff_pool *pool,
|
||||
struct xdp_desc *desc)
|
||||
{
|
||||
u64 addr, base_addr;
|
||||
|
||||
base_addr = xp_unaligned_extract_addr(desc->addr);
|
||||
addr = xp_unaligned_add_offset_to_addr(desc->addr);
|
||||
u64 addr = xp_unaligned_add_offset_to_addr(desc->addr);
|
||||
|
||||
if (desc->len > pool->chunk_size)
|
||||
return false;
|
||||
|
||||
if (base_addr >= pool->addrs_cnt || addr >= pool->addrs_cnt ||
|
||||
if (addr >= pool->addrs_cnt || addr + desc->len > pool->addrs_cnt ||
|
||||
xp_desc_crosses_non_contig_pg(pool, addr, desc->len))
|
||||
return false;
|
||||
|
||||
|
@ -162,8 +162,8 @@ static void *xsk_map_lookup_elem_sys_only(struct bpf_map *map, void *key)
|
||||
return ERR_PTR(-EOPNOTSUPP);
|
||||
}
|
||||
|
||||
static int xsk_map_update_elem(struct bpf_map *map, void *key, void *value,
|
||||
u64 map_flags)
|
||||
static long xsk_map_update_elem(struct bpf_map *map, void *key, void *value,
|
||||
u64 map_flags)
|
||||
{
|
||||
struct xsk_map *m = container_of(map, struct xsk_map, map);
|
||||
struct xdp_sock __rcu **map_entry;
|
||||
@ -223,7 +223,7 @@ out:
|
||||
return err;
|
||||
}
|
||||
|
||||
static int xsk_map_delete_elem(struct bpf_map *map, void *key)
|
||||
static long xsk_map_delete_elem(struct bpf_map *map, void *key)
|
||||
{
|
||||
struct xsk_map *m = container_of(map, struct xsk_map, map);
|
||||
struct xdp_sock __rcu **map_entry;
|
||||
@ -243,7 +243,7 @@ static int xsk_map_delete_elem(struct bpf_map *map, void *key)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int xsk_map_redirect(struct bpf_map *map, u64 index, u64 flags)
|
||||
static long xsk_map_redirect(struct bpf_map *map, u64 index, u64 flags)
|
||||
{
|
||||
return __bpf_xdp_redirect_map(map, index, flags, 0,
|
||||
__xsk_map_lookup_elem);
|
||||
|
@ -76,8 +76,8 @@ struct {
|
||||
|
||||
/*
|
||||
* The trace events for cpu_idle and cpu_frequency are taken from:
|
||||
* /sys/kernel/debug/tracing/events/power/cpu_idle/format
|
||||
* /sys/kernel/debug/tracing/events/power/cpu_frequency/format
|
||||
* /sys/kernel/tracing/events/power/cpu_idle/format
|
||||
* /sys/kernel/tracing/events/power/cpu_frequency/format
|
||||
*
|
||||
* These two events have same format, so define one common structure.
|
||||
*/
|
||||
|
@ -65,7 +65,7 @@ static void Usage(void);
|
||||
static void read_trace_pipe2(void);
|
||||
static void do_error(char *msg, bool errno_flag);
|
||||
|
||||
#define DEBUGFS "/sys/kernel/debug/tracing/"
|
||||
#define TRACEFS "/sys/kernel/tracing/"
|
||||
|
||||
static struct bpf_program *bpf_prog;
|
||||
static struct bpf_object *obj;
|
||||
@ -77,7 +77,7 @@ static void read_trace_pipe2(void)
|
||||
FILE *outf;
|
||||
char *outFname = "hbm_out.log";
|
||||
|
||||
trace_fd = open(DEBUGFS "trace_pipe", O_RDONLY, 0);
|
||||
trace_fd = open(TRACEFS "trace_pipe", O_RDONLY, 0);
|
||||
if (trace_fd < 0) {
|
||||
printf("Error opening trace_pipe\n");
|
||||
return;
|
||||
@ -315,6 +315,7 @@ static int run_bpf_prog(char *prog, int cg_id)
|
||||
fout = fopen(fname, "w");
|
||||
fprintf(fout, "id:%d\n", cg_id);
|
||||
fprintf(fout, "ERROR: Could not lookup queue_stats\n");
|
||||
fclose(fout);
|
||||
} else if (stats_flag && qstats.lastPacketTime >
|
||||
qstats.firstPacketTime) {
|
||||
long long delta_us = (qstats.lastPacketTime -
|
||||
|
@ -39,8 +39,8 @@ struct {
|
||||
/* Taken from the current format defined in
|
||||
* include/trace/events/ib_umad.h
|
||||
* and
|
||||
* /sys/kernel/debug/tracing/events/ib_umad/ib_umad_read/format
|
||||
* /sys/kernel/debug/tracing/events/ib_umad/ib_umad_write/format
|
||||
* /sys/kernel/tracing/events/ib_umad/ib_umad_read/format
|
||||
* /sys/kernel/tracing/events/ib_umad/ib_umad_write/format
|
||||
*/
|
||||
struct ib_umad_rw_args {
|
||||
u64 pad;
|
||||
|
@ -5,7 +5,7 @@ NS1=lwt_ns1
|
||||
VETH0=tst_lwt1a
|
||||
VETH1=tst_lwt1b
|
||||
BPF_PROG=lwt_len_hist.bpf.o
|
||||
TRACE_ROOT=/sys/kernel/debug/tracing
|
||||
TRACE_ROOT=/sys/kernel/tracing
|
||||
|
||||
function cleanup {
|
||||
# To reset saved histogram, remove pinned map
|
||||
|
@ -110,7 +110,7 @@ static inline int update_counts(void *ctx, u32 pid, u64 delta)
|
||||
}
|
||||
|
||||
#if 1
|
||||
/* taken from /sys/kernel/debug/tracing/events/sched/sched_switch/format */
|
||||
/* taken from /sys/kernel/tracing/events/sched/sched_switch/format */
|
||||
struct sched_switch_args {
|
||||
unsigned long long pad;
|
||||
char prev_comm[TASK_COMM_LEN];
|
||||
|
@ -235,7 +235,7 @@ static int test_debug_fs_uprobe(char *binary_path, long offset, bool is_return)
|
||||
struct bpf_link *link;
|
||||
ssize_t bytes;
|
||||
|
||||
snprintf(buf, sizeof(buf), "/sys/kernel/debug/tracing/%s_events",
|
||||
snprintf(buf, sizeof(buf), "/sys/kernel/tracing/%s_events",
|
||||
event_type);
|
||||
kfd = open(buf, O_WRONLY | O_TRUNC, 0);
|
||||
CHECK_PERROR_RET(kfd < 0);
|
||||
@ -252,7 +252,7 @@ static int test_debug_fs_uprobe(char *binary_path, long offset, bool is_return)
|
||||
close(kfd);
|
||||
kfd = -1;
|
||||
|
||||
snprintf(buf, sizeof(buf), "/sys/kernel/debug/tracing/events/%ss/%s/id",
|
||||
snprintf(buf, sizeof(buf), "/sys/kernel/tracing/events/%ss/%s/id",
|
||||
event_type, event_alias);
|
||||
efd = open(buf, O_RDONLY, 0);
|
||||
CHECK_PERROR_RET(efd < 0);
|
||||
|
@ -21,7 +21,7 @@ IP_LOCAL="192.168.99.1"
|
||||
|
||||
PROG_SRC="test_lwt_bpf.c"
|
||||
BPF_PROG="test_lwt_bpf.o"
|
||||
TRACE_ROOT=/sys/kernel/debug/tracing
|
||||
TRACE_ROOT=/sys/kernel/tracing
|
||||
CONTEXT_INFO=$(cat ${TRACE_ROOT}/trace_options | grep context)
|
||||
|
||||
function lookup_mac()
|
||||
|
@ -7,7 +7,7 @@
|
||||
#include "vmlinux.h"
|
||||
#include <bpf/bpf_helpers.h>
|
||||
|
||||
/* from /sys/kernel/debug/tracing/events/task/task_rename/format */
|
||||
/* from /sys/kernel/tracing/events/task/task_rename/format */
|
||||
struct task_rename {
|
||||
__u64 pad;
|
||||
__u32 pid;
|
||||
@ -21,7 +21,7 @@ int prog(struct task_rename *ctx)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* from /sys/kernel/debug/tracing/events/fib/fib_table_lookup/format */
|
||||
/* from /sys/kernel/tracing/events/fib/fib_table_lookup/format */
|
||||
struct fib_table_lookup {
|
||||
__u64 pad;
|
||||
__u32 tb_id;
|
||||
|
@ -383,7 +383,7 @@ class PrinterRST(Printer):
|
||||
.. Copyright (C) All BPF authors and contributors from 2014 to present.
|
||||
.. See git log include/uapi/linux/bpf.h in kernel tree for details.
|
||||
..
|
||||
.. SPDX-License-Identifier: Linux-man-pages-copyleft
|
||||
.. SPDX-License-Identifier: Linux-man-pages-copyleft
|
||||
..
|
||||
.. Please do not edit this file. It was generated from the documentation
|
||||
.. located in file include/uapi/linux/bpf.h of the Linux kernel sources
|
||||
|
@ -28,8 +28,8 @@ PROG COMMANDS
|
||||
=============
|
||||
|
||||
| **bpftool** **prog** { **show** | **list** } [*PROG*]
|
||||
| **bpftool** **prog dump xlated** *PROG* [{**file** *FILE* | **opcodes** | **visual** | **linum**}]
|
||||
| **bpftool** **prog dump jited** *PROG* [{**file** *FILE* | **opcodes** | **linum**}]
|
||||
| **bpftool** **prog dump xlated** *PROG* [{ **file** *FILE* | [**opcodes**] [**linum**] [**visual**] }]
|
||||
| **bpftool** **prog dump jited** *PROG* [{ **file** *FILE* | [**opcodes**] [**linum**] }]
|
||||
| **bpftool** **prog pin** *PROG* *FILE*
|
||||
| **bpftool** **prog** { **load** | **loadall** } *OBJ* *PATH* [**type** *TYPE*] [**map** {**idx** *IDX* | **name** *NAME*} *MAP*] [**dev** *NAME*] [**pinmaps** *MAP_DIR*] [**autoattach**]
|
||||
| **bpftool** **prog attach** *PROG* *ATTACH_TYPE* [*MAP*]
|
||||
@ -88,7 +88,7 @@ DESCRIPTION
|
||||
programs. On such kernels bpftool will automatically emit this
|
||||
information as well.
|
||||
|
||||
**bpftool prog dump xlated** *PROG* [{ **file** *FILE* | **opcodes** | **visual** | **linum** }]
|
||||
**bpftool prog dump xlated** *PROG* [{ **file** *FILE* | [**opcodes**] [**linum**] [**visual**] }]
|
||||
Dump eBPF instructions of the programs from the kernel. By
|
||||
default, eBPF will be disassembled and printed to standard
|
||||
output in human-readable format. In this case, **opcodes**
|
||||
@ -106,11 +106,10 @@ DESCRIPTION
|
||||
CFG in DOT format, on standard output.
|
||||
|
||||
If the programs have line_info available, the source line will
|
||||
be displayed by default. If **linum** is specified,
|
||||
the filename, line number and line column will also be
|
||||
displayed on top of the source line.
|
||||
be displayed. If **linum** is specified, the filename, line
|
||||
number and line column will also be displayed.
|
||||
|
||||
**bpftool prog dump jited** *PROG* [{ **file** *FILE* | **opcodes** | **linum** }]
|
||||
**bpftool prog dump jited** *PROG* [{ **file** *FILE* | [**opcodes**] [**linum**] }]
|
||||
Dump jited image (host machine code) of the program.
|
||||
|
||||
If *FILE* is specified image will be written to a file,
|
||||
@ -120,9 +119,8 @@ DESCRIPTION
|
||||
**opcodes** controls if raw opcodes will be printed.
|
||||
|
||||
If the prog has line_info available, the source line will
|
||||
be displayed by default. If **linum** is specified,
|
||||
the filename, line number and line column will also be
|
||||
displayed on top of the source line.
|
||||
be displayed. If **linum** is specified, the filename, line
|
||||
number and line column will also be displayed.
|
||||
|
||||
**bpftool prog pin** *PROG* *FILE*
|
||||
Pin program *PROG* as *FILE*.
|
||||
|
@ -255,20 +255,23 @@ _bpftool_map_update_get_name()
|
||||
|
||||
_bpftool()
|
||||
{
|
||||
local cur prev words objword
|
||||
local cur prev words objword json=0
|
||||
_init_completion || return
|
||||
|
||||
# Deal with options
|
||||
if [[ ${words[cword]} == -* ]]; then
|
||||
local c='--version --json --pretty --bpffs --mapcompat --debug \
|
||||
--use-loader --base-btf'
|
||||
--use-loader --base-btf'
|
||||
COMPREPLY=( $( compgen -W "$c" -- "$cur" ) )
|
||||
return 0
|
||||
fi
|
||||
if _bpftool_search_list -j --json -p --pretty; then
|
||||
json=1
|
||||
fi
|
||||
|
||||
# Deal with simplest keywords
|
||||
case $prev in
|
||||
help|hex|opcodes|visual|linum)
|
||||
help|hex)
|
||||
return 0
|
||||
;;
|
||||
tag)
|
||||
@ -366,13 +369,16 @@ _bpftool()
|
||||
return 0
|
||||
;;
|
||||
*)
|
||||
_bpftool_once_attr 'file'
|
||||
if _bpftool_search_list 'xlated'; then
|
||||
COMPREPLY+=( $( compgen -W 'opcodes visual linum' -- \
|
||||
"$cur" ) )
|
||||
else
|
||||
COMPREPLY+=( $( compgen -W 'opcodes linum' -- \
|
||||
"$cur" ) )
|
||||
# "file" is not compatible with other keywords here
|
||||
if _bpftool_search_list 'file'; then
|
||||
return 0
|
||||
fi
|
||||
if ! _bpftool_search_list 'linum opcodes visual'; then
|
||||
_bpftool_once_attr 'file'
|
||||
fi
|
||||
_bpftool_once_attr 'linum opcodes'
|
||||
if _bpftool_search_list 'xlated' && [[ "$json" == 0 ]]; then
|
||||
_bpftool_once_attr 'visual'
|
||||
fi
|
||||
return 0
|
||||
;;
|
||||
@ -502,10 +508,7 @@ _bpftool()
|
||||
;;
|
||||
*)
|
||||
COMPREPLY=( $( compgen -W "map" -- "$cur" ) )
|
||||
_bpftool_once_attr 'type'
|
||||
_bpftool_once_attr 'dev'
|
||||
_bpftool_once_attr 'pinmaps'
|
||||
_bpftool_once_attr 'autoattach'
|
||||
_bpftool_once_attr 'type dev pinmaps autoattach'
|
||||
return 0
|
||||
;;
|
||||
esac
|
||||
@ -730,16 +733,10 @@ _bpftool()
|
||||
esac
|
||||
;;
|
||||
*)
|
||||
_bpftool_once_attr 'type'
|
||||
_bpftool_once_attr 'key'
|
||||
_bpftool_once_attr 'value'
|
||||
_bpftool_once_attr 'entries'
|
||||
_bpftool_once_attr 'name'
|
||||
_bpftool_once_attr 'flags'
|
||||
_bpftool_once_attr 'type key value entries name flags dev'
|
||||
if _bpftool_search_list 'array_of_maps' 'hash_of_maps'; then
|
||||
_bpftool_once_attr 'inner_map'
|
||||
fi
|
||||
_bpftool_once_attr 'dev'
|
||||
return 0
|
||||
;;
|
||||
esac
|
||||
@ -880,8 +877,7 @@ _bpftool()
|
||||
return 0
|
||||
;;
|
||||
*)
|
||||
_bpftool_once_attr 'cpu'
|
||||
_bpftool_once_attr 'index'
|
||||
_bpftool_once_attr 'cpu index'
|
||||
return 0
|
||||
;;
|
||||
esac
|
||||
|
@ -821,3 +821,86 @@ void btf_dump_linfo_json(const struct btf *btf,
|
||||
BPF_LINE_INFO_LINE_COL(linfo->line_col));
|
||||
}
|
||||
}
|
||||
|
||||
static void dotlabel_puts(const char *s)
|
||||
{
|
||||
for (; *s; ++s) {
|
||||
switch (*s) {
|
||||
case '\\':
|
||||
case '"':
|
||||
case '{':
|
||||
case '}':
|
||||
case '<':
|
||||
case '>':
|
||||
case '|':
|
||||
case ' ':
|
||||
putchar('\\');
|
||||
__fallthrough;
|
||||
default:
|
||||
putchar(*s);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static const char *shorten_path(const char *path)
|
||||
{
|
||||
const unsigned int MAX_PATH_LEN = 32;
|
||||
size_t len = strlen(path);
|
||||
const char *shortpath;
|
||||
|
||||
if (len <= MAX_PATH_LEN)
|
||||
return path;
|
||||
|
||||
/* Search for last '/' under the MAX_PATH_LEN limit */
|
||||
shortpath = strchr(path + len - MAX_PATH_LEN, '/');
|
||||
if (shortpath) {
|
||||
if (shortpath < path + strlen("..."))
|
||||
/* We removed a very short prefix, e.g. "/w", and we'll
|
||||
* make the path longer by prefixing with the ellipsis.
|
||||
* Not worth it, keep initial path.
|
||||
*/
|
||||
return path;
|
||||
return shortpath;
|
||||
}
|
||||
|
||||
/* File base name length is > MAX_PATH_LEN, search for last '/' */
|
||||
shortpath = strrchr(path, '/');
|
||||
if (shortpath)
|
||||
return shortpath;
|
||||
|
||||
return path;
|
||||
}
|
||||
|
||||
void btf_dump_linfo_dotlabel(const struct btf *btf,
|
||||
const struct bpf_line_info *linfo, bool linum)
|
||||
{
|
||||
const char *line = btf__name_by_offset(btf, linfo->line_off);
|
||||
|
||||
if (!line || !strlen(line))
|
||||
return;
|
||||
line = ltrim(line);
|
||||
|
||||
if (linum) {
|
||||
const char *file = btf__name_by_offset(btf, linfo->file_name_off);
|
||||
const char *shortfile;
|
||||
|
||||
/* More forgiving on file because linum option is
|
||||
* expected to provide more info than the already
|
||||
* available src line.
|
||||
*/
|
||||
if (!file)
|
||||
shortfile = "";
|
||||
else
|
||||
shortfile = shorten_path(file);
|
||||
|
||||
printf("; [%s", shortfile > file ? "..." : "");
|
||||
dotlabel_puts(shortfile);
|
||||
printf(" line:%u col:%u]\\l\\\n",
|
||||
BPF_LINE_INFO_LINE_NUM(linfo->line_col),
|
||||
BPF_LINE_INFO_LINE_COL(linfo->line_col));
|
||||
}
|
||||
|
||||
printf("; ");
|
||||
dotlabel_puts(line);
|
||||
printf("\\l\\\n");
|
||||
}
|
||||
|
@ -380,7 +380,9 @@ static void cfg_destroy(struct cfg *cfg)
|
||||
}
|
||||
}
|
||||
|
||||
static void draw_bb_node(struct func_node *func, struct bb_node *bb)
|
||||
static void
|
||||
draw_bb_node(struct func_node *func, struct bb_node *bb, struct dump_data *dd,
|
||||
bool opcodes, bool linum)
|
||||
{
|
||||
const char *shape;
|
||||
|
||||
@ -398,13 +400,10 @@ static void draw_bb_node(struct func_node *func, struct bb_node *bb)
|
||||
printf("EXIT");
|
||||
} else {
|
||||
unsigned int start_idx;
|
||||
struct dump_data dd = {};
|
||||
|
||||
printf("{");
|
||||
kernel_syms_load(&dd);
|
||||
printf("{\\\n");
|
||||
start_idx = bb->head - func->start;
|
||||
dump_xlated_for_graph(&dd, bb->head, bb->tail, start_idx);
|
||||
kernel_syms_destroy(&dd);
|
||||
dump_xlated_for_graph(dd, bb->head, bb->tail, start_idx,
|
||||
opcodes, linum);
|
||||
printf("}");
|
||||
}
|
||||
|
||||
@ -430,12 +429,14 @@ static void draw_bb_succ_edges(struct func_node *func, struct bb_node *bb)
|
||||
}
|
||||
}
|
||||
|
||||
static void func_output_bb_def(struct func_node *func)
|
||||
static void
|
||||
func_output_bb_def(struct func_node *func, struct dump_data *dd,
|
||||
bool opcodes, bool linum)
|
||||
{
|
||||
struct bb_node *bb;
|
||||
|
||||
list_for_each_entry(bb, &func->bbs, l) {
|
||||
draw_bb_node(func, bb);
|
||||
draw_bb_node(func, bb, dd, opcodes, linum);
|
||||
}
|
||||
}
|
||||
|
||||
@ -455,7 +456,8 @@ static void func_output_edges(struct func_node *func)
|
||||
func_idx, ENTRY_BLOCK_INDEX, func_idx, EXIT_BLOCK_INDEX);
|
||||
}
|
||||
|
||||
static void cfg_dump(struct cfg *cfg)
|
||||
static void
|
||||
cfg_dump(struct cfg *cfg, struct dump_data *dd, bool opcodes, bool linum)
|
||||
{
|
||||
struct func_node *func;
|
||||
|
||||
@ -463,14 +465,15 @@ static void cfg_dump(struct cfg *cfg)
|
||||
list_for_each_entry(func, &cfg->funcs, l) {
|
||||
printf("subgraph \"cluster_%d\" {\n\tstyle=\"dashed\";\n\tcolor=\"black\";\n\tlabel=\"func_%d ()\";\n",
|
||||
func->idx, func->idx);
|
||||
func_output_bb_def(func);
|
||||
func_output_bb_def(func, dd, opcodes, linum);
|
||||
func_output_edges(func);
|
||||
printf("}\n");
|
||||
}
|
||||
printf("}\n");
|
||||
}
|
||||
|
||||
void dump_xlated_cfg(void *buf, unsigned int len)
|
||||
void dump_xlated_cfg(struct dump_data *dd, void *buf, unsigned int len,
|
||||
bool opcodes, bool linum)
|
||||
{
|
||||
struct bpf_insn *insn = buf;
|
||||
struct cfg cfg;
|
||||
@ -479,7 +482,7 @@ void dump_xlated_cfg(void *buf, unsigned int len)
|
||||
if (cfg_build(&cfg, insn, len))
|
||||
return;
|
||||
|
||||
cfg_dump(&cfg);
|
||||
cfg_dump(&cfg, dd, opcodes, linum);
|
||||
|
||||
cfg_destroy(&cfg);
|
||||
}
|
||||
|
@ -4,6 +4,9 @@
|
||||
#ifndef __BPF_TOOL_CFG_H
|
||||
#define __BPF_TOOL_CFG_H
|
||||
|
||||
void dump_xlated_cfg(void *buf, unsigned int len);
|
||||
#include "xlated_dumper.h"
|
||||
|
||||
void dump_xlated_cfg(struct dump_data *dd, void *buf, unsigned int len,
|
||||
bool opcodes, bool linum);
|
||||
|
||||
#endif /* __BPF_TOOL_CFG_H */
|
||||
|
@ -75,7 +75,7 @@ static void jsonw_puts(json_writer_t *self, const char *str)
|
||||
fputs("\\b", self->out);
|
||||
break;
|
||||
case '\\':
|
||||
fputs("\\n", self->out);
|
||||
fputs("\\\\", self->out);
|
||||
break;
|
||||
case '"':
|
||||
fputs("\\\"", self->out);
|
||||
|
@ -14,6 +14,7 @@
|
||||
#include <stdbool.h>
|
||||
#include <stdint.h>
|
||||
#include <stdarg.h>
|
||||
#include <stdio.h>
|
||||
#include <linux/compiler.h>
|
||||
|
||||
/* Opaque class structure */
|
||||
|
@ -229,6 +229,8 @@ void btf_dump_linfo_plain(const struct btf *btf,
|
||||
const char *prefix, bool linum);
|
||||
void btf_dump_linfo_json(const struct btf *btf,
|
||||
const struct bpf_line_info *linfo, bool linum);
|
||||
void btf_dump_linfo_dotlabel(const struct btf *btf,
|
||||
const struct bpf_line_info *linfo, bool linum);
|
||||
|
||||
struct nlattr;
|
||||
struct ifinfomsg;
|
||||
|
@ -840,11 +840,6 @@ prog_dump(struct bpf_prog_info *info, enum dump_mode mode,
|
||||
false))
|
||||
goto exit_free;
|
||||
}
|
||||
} else if (visual) {
|
||||
if (json_output)
|
||||
jsonw_null(json_wtr);
|
||||
else
|
||||
dump_xlated_cfg(buf, member_len);
|
||||
} else {
|
||||
kernel_syms_load(&dd);
|
||||
dd.nr_jited_ksyms = info->nr_jited_ksyms;
|
||||
@ -855,11 +850,11 @@ prog_dump(struct bpf_prog_info *info, enum dump_mode mode,
|
||||
dd.prog_linfo = prog_linfo;
|
||||
|
||||
if (json_output)
|
||||
dump_xlated_json(&dd, buf, member_len, opcodes,
|
||||
linum);
|
||||
dump_xlated_json(&dd, buf, member_len, opcodes, linum);
|
||||
else if (visual)
|
||||
dump_xlated_cfg(&dd, buf, member_len, opcodes, linum);
|
||||
else
|
||||
dump_xlated_plain(&dd, buf, member_len, opcodes,
|
||||
linum);
|
||||
dump_xlated_plain(&dd, buf, member_len, opcodes, linum);
|
||||
kernel_syms_destroy(&dd);
|
||||
}
|
||||
|
||||
@ -910,37 +905,46 @@ static int do_dump(int argc, char **argv)
|
||||
if (nb_fds < 1)
|
||||
goto exit_free;
|
||||
|
||||
if (is_prefix(*argv, "file")) {
|
||||
NEXT_ARG();
|
||||
if (!argc) {
|
||||
p_err("expected file path");
|
||||
goto exit_close;
|
||||
}
|
||||
if (nb_fds > 1) {
|
||||
p_err("several programs matched");
|
||||
goto exit_close;
|
||||
}
|
||||
while (argc) {
|
||||
if (is_prefix(*argv, "file")) {
|
||||
NEXT_ARG();
|
||||
if (!argc) {
|
||||
p_err("expected file path");
|
||||
goto exit_close;
|
||||
}
|
||||
if (nb_fds > 1) {
|
||||
p_err("several programs matched");
|
||||
goto exit_close;
|
||||
}
|
||||
|
||||
filepath = *argv;
|
||||
NEXT_ARG();
|
||||
} else if (is_prefix(*argv, "opcodes")) {
|
||||
opcodes = true;
|
||||
NEXT_ARG();
|
||||
} else if (is_prefix(*argv, "visual")) {
|
||||
if (nb_fds > 1) {
|
||||
p_err("several programs matched");
|
||||
filepath = *argv;
|
||||
NEXT_ARG();
|
||||
} else if (is_prefix(*argv, "opcodes")) {
|
||||
opcodes = true;
|
||||
NEXT_ARG();
|
||||
} else if (is_prefix(*argv, "visual")) {
|
||||
if (nb_fds > 1) {
|
||||
p_err("several programs matched");
|
||||
goto exit_close;
|
||||
}
|
||||
|
||||
visual = true;
|
||||
NEXT_ARG();
|
||||
} else if (is_prefix(*argv, "linum")) {
|
||||
linum = true;
|
||||
NEXT_ARG();
|
||||
} else {
|
||||
usage();
|
||||
goto exit_close;
|
||||
}
|
||||
|
||||
visual = true;
|
||||
NEXT_ARG();
|
||||
} else if (is_prefix(*argv, "linum")) {
|
||||
linum = true;
|
||||
NEXT_ARG();
|
||||
}
|
||||
|
||||
if (argc) {
|
||||
usage();
|
||||
if (filepath && (opcodes || visual || linum)) {
|
||||
p_err("'file' is not compatible with 'opcodes', 'visual', or 'linum'");
|
||||
goto exit_close;
|
||||
}
|
||||
if (json_output && visual) {
|
||||
p_err("'visual' is not compatible with JSON output");
|
||||
goto exit_close;
|
||||
}
|
||||
|
||||
@ -1681,7 +1685,8 @@ static int load_with_options(int argc, char **argv, bool first_prog_only)
|
||||
}
|
||||
|
||||
bpf_program__set_ifindex(pos, ifindex);
|
||||
bpf_program__set_type(pos, prog_type);
|
||||
if (bpf_program__type(pos) != prog_type)
|
||||
bpf_program__set_type(pos, prog_type);
|
||||
bpf_program__set_expected_attach_type(pos, expected_attach_type);
|
||||
}
|
||||
|
||||
@ -2420,8 +2425,8 @@ static int do_help(int argc, char **argv)
|
||||
|
||||
fprintf(stderr,
|
||||
"Usage: %1$s %2$s { show | list } [PROG]\n"
|
||||
" %1$s %2$s dump xlated PROG [{ file FILE | opcodes | visual | linum }]\n"
|
||||
" %1$s %2$s dump jited PROG [{ file FILE | opcodes | linum }]\n"
|
||||
" %1$s %2$s dump xlated PROG [{ file FILE | [opcodes] [linum] [visual] }]\n"
|
||||
" %1$s %2$s dump jited PROG [{ file FILE | [opcodes] [linum] }]\n"
|
||||
" %1$s %2$s pin PROG FILE\n"
|
||||
" %1$s %2$s { load | loadall } OBJ PATH \\\n"
|
||||
" [type TYPE] [dev NAME] \\\n"
|
||||
|
@ -361,7 +361,8 @@ void dump_xlated_plain(struct dump_data *dd, void *buf, unsigned int len,
|
||||
}
|
||||
|
||||
void dump_xlated_for_graph(struct dump_data *dd, void *buf_start, void *buf_end,
|
||||
unsigned int start_idx)
|
||||
unsigned int start_idx,
|
||||
bool opcodes, bool linum)
|
||||
{
|
||||
const struct bpf_insn_cbs cbs = {
|
||||
.cb_print = print_insn_for_graph,
|
||||
@ -369,14 +370,61 @@ void dump_xlated_for_graph(struct dump_data *dd, void *buf_start, void *buf_end,
|
||||
.cb_imm = print_imm,
|
||||
.private_data = dd,
|
||||
};
|
||||
const struct bpf_prog_linfo *prog_linfo = dd->prog_linfo;
|
||||
const struct bpf_line_info *last_linfo = NULL;
|
||||
struct bpf_func_info *record = dd->func_info;
|
||||
struct bpf_insn *insn_start = buf_start;
|
||||
struct bpf_insn *insn_end = buf_end;
|
||||
struct bpf_insn *cur = insn_start;
|
||||
struct btf *btf = dd->btf;
|
||||
bool double_insn = false;
|
||||
char func_sig[1024];
|
||||
|
||||
for (; cur <= insn_end; cur++) {
|
||||
printf("% 4d: ", (int)(cur - insn_start + start_idx));
|
||||
unsigned int insn_off;
|
||||
|
||||
if (double_insn) {
|
||||
double_insn = false;
|
||||
continue;
|
||||
}
|
||||
double_insn = cur->code == (BPF_LD | BPF_IMM | BPF_DW);
|
||||
|
||||
insn_off = (unsigned int)(cur - insn_start + start_idx);
|
||||
if (btf && record) {
|
||||
if (record->insn_off == insn_off) {
|
||||
btf_dumper_type_only(btf, record->type_id,
|
||||
func_sig,
|
||||
sizeof(func_sig));
|
||||
if (func_sig[0] != '\0')
|
||||
printf("; %s:\\l\\\n", func_sig);
|
||||
record = (void *)record + dd->finfo_rec_size;
|
||||
}
|
||||
}
|
||||
|
||||
if (prog_linfo) {
|
||||
const struct bpf_line_info *linfo;
|
||||
|
||||
linfo = bpf_prog_linfo__lfind(prog_linfo, insn_off, 0);
|
||||
if (linfo && linfo != last_linfo) {
|
||||
btf_dump_linfo_dotlabel(btf, linfo, linum);
|
||||
last_linfo = linfo;
|
||||
}
|
||||
}
|
||||
|
||||
printf("%d: ", insn_off);
|
||||
print_bpf_insn(&cbs, cur, true);
|
||||
|
||||
if (opcodes) {
|
||||
printf("\\ \\ \\ \\ ");
|
||||
fprint_hex(stdout, cur, 8, " ");
|
||||
if (double_insn && cur <= insn_end - 1) {
|
||||
printf(" ");
|
||||
fprint_hex(stdout, cur + 1, 8, " ");
|
||||
}
|
||||
printf("\\l\\\n");
|
||||
}
|
||||
|
||||
if (cur != insn_end)
|
||||
printf(" | ");
|
||||
printf("| ");
|
||||
}
|
||||
}
|
||||
|
@ -34,6 +34,7 @@ void dump_xlated_json(struct dump_data *dd, void *buf, unsigned int len,
|
||||
void dump_xlated_plain(struct dump_data *dd, void *buf, unsigned int len,
|
||||
bool opcodes, bool linum);
|
||||
void dump_xlated_for_graph(struct dump_data *dd, void *buf, void *buf_end,
|
||||
unsigned int start_index);
|
||||
unsigned int start_index,
|
||||
bool opcodes, bool linum);
|
||||
|
||||
#endif
|
||||
|
@ -1033,6 +1033,7 @@ enum bpf_attach_type {
|
||||
BPF_PERF_EVENT,
|
||||
BPF_TRACE_KPROBE_MULTI,
|
||||
BPF_LSM_CGROUP,
|
||||
BPF_STRUCT_OPS,
|
||||
__MAX_BPF_ATTACH_TYPE
|
||||
};
|
||||
|
||||
@ -1108,7 +1109,7 @@ enum bpf_link_type {
|
||||
*/
|
||||
#define BPF_F_STRICT_ALIGNMENT (1U << 0)
|
||||
|
||||
/* If BPF_F_ANY_ALIGNMENT is used in BPF_PROF_LOAD command, the
|
||||
/* If BPF_F_ANY_ALIGNMENT is used in BPF_PROG_LOAD command, the
|
||||
* verifier will allow any alignment whatsoever. On platforms
|
||||
* with strict alignment requirements for loads ands stores (such
|
||||
* as sparc and mips) the verifier validates that all loads and
|
||||
@ -1266,6 +1267,9 @@ enum {
|
||||
|
||||
/* Create a map that is suitable to be an inner map with dynamic max entries */
|
||||
BPF_F_INNER_MAP = (1U << 12),
|
||||
|
||||
/* Create a map that will be registered/unregesitered by the backed bpf_link */
|
||||
BPF_F_LINK = (1U << 13),
|
||||
};
|
||||
|
||||
/* Flags for BPF_PROG_QUERY. */
|
||||
@ -1403,6 +1407,11 @@ union bpf_attr {
|
||||
__aligned_u64 fd_array; /* array of FDs */
|
||||
__aligned_u64 core_relos;
|
||||
__u32 core_relo_rec_size; /* sizeof(struct bpf_core_relo) */
|
||||
/* output: actual total log contents size (including termintaing zero).
|
||||
* It could be both larger than original log_size (if log was
|
||||
* truncated), or smaller (if log buffer wasn't filled completely).
|
||||
*/
|
||||
__u32 log_true_size;
|
||||
};
|
||||
|
||||
struct { /* anonymous struct used by BPF_OBJ_* commands */
|
||||
@ -1488,6 +1497,11 @@ union bpf_attr {
|
||||
__u32 btf_size;
|
||||
__u32 btf_log_size;
|
||||
__u32 btf_log_level;
|
||||
/* output: actual total log contents size (including termintaing zero).
|
||||
* It could be both larger than original log_size (if log was
|
||||
* truncated), or smaller (if log buffer wasn't filled completely).
|
||||
*/
|
||||
__u32 btf_log_true_size;
|
||||
};
|
||||
|
||||
struct {
|
||||
@ -1507,7 +1521,10 @@ union bpf_attr {
|
||||
} task_fd_query;
|
||||
|
||||
struct { /* struct used by BPF_LINK_CREATE command */
|
||||
__u32 prog_fd; /* eBPF program to attach */
|
||||
union {
|
||||
__u32 prog_fd; /* eBPF program to attach */
|
||||
__u32 map_fd; /* struct_ops to attach */
|
||||
};
|
||||
union {
|
||||
__u32 target_fd; /* object to attach to */
|
||||
__u32 target_ifindex; /* target ifindex */
|
||||
@ -1548,12 +1565,23 @@ union bpf_attr {
|
||||
|
||||
struct { /* struct used by BPF_LINK_UPDATE command */
|
||||
__u32 link_fd; /* link fd */
|
||||
/* new program fd to update link with */
|
||||
__u32 new_prog_fd;
|
||||
union {
|
||||
/* new program fd to update link with */
|
||||
__u32 new_prog_fd;
|
||||
/* new struct_ops map fd to update link with */
|
||||
__u32 new_map_fd;
|
||||
};
|
||||
__u32 flags; /* extra flags */
|
||||
/* expected link's program fd; is specified only if
|
||||
* BPF_F_REPLACE flag is set in flags */
|
||||
__u32 old_prog_fd;
|
||||
union {
|
||||
/* expected link's program fd; is specified only if
|
||||
* BPF_F_REPLACE flag is set in flags.
|
||||
*/
|
||||
__u32 old_prog_fd;
|
||||
/* expected link's map fd; is specified only
|
||||
* if BPF_F_REPLACE flag is set.
|
||||
*/
|
||||
__u32 old_map_fd;
|
||||
};
|
||||
} link_update;
|
||||
|
||||
struct {
|
||||
@ -1647,17 +1675,17 @@ union bpf_attr {
|
||||
* Description
|
||||
* This helper is a "printk()-like" facility for debugging. It
|
||||
* prints a message defined by format *fmt* (of size *fmt_size*)
|
||||
* to file *\/sys/kernel/debug/tracing/trace* from DebugFS, if
|
||||
* to file *\/sys/kernel/tracing/trace* from TraceFS, if
|
||||
* available. It can take up to three additional **u64**
|
||||
* arguments (as an eBPF helpers, the total number of arguments is
|
||||
* limited to five).
|
||||
*
|
||||
* Each time the helper is called, it appends a line to the trace.
|
||||
* Lines are discarded while *\/sys/kernel/debug/tracing/trace* is
|
||||
* open, use *\/sys/kernel/debug/tracing/trace_pipe* to avoid this.
|
||||
* Lines are discarded while *\/sys/kernel/tracing/trace* is
|
||||
* open, use *\/sys/kernel/tracing/trace_pipe* to avoid this.
|
||||
* The format of the trace is customizable, and the exact output
|
||||
* one will get depends on the options set in
|
||||
* *\/sys/kernel/debug/tracing/trace_options* (see also the
|
||||
* *\/sys/kernel/tracing/trace_options* (see also the
|
||||
* *README* file under the same directory). However, it usually
|
||||
* defaults to something like:
|
||||
*
|
||||
@ -6379,6 +6407,9 @@ struct bpf_link_info {
|
||||
struct {
|
||||
__u32 ifindex;
|
||||
} xdp;
|
||||
struct {
|
||||
__u32 map_id;
|
||||
} struct_ops;
|
||||
};
|
||||
} __attribute__((aligned(8)));
|
||||
|
||||
@ -7112,4 +7143,12 @@ enum {
|
||||
BPF_F_TIMER_ABS = (1ULL << 0),
|
||||
};
|
||||
|
||||
/* BPF numbers iterator state */
|
||||
struct bpf_iter_num {
|
||||
/* opaque iterator state; having __u64 here allows to preserve correct
|
||||
* alignment requirements in vmlinux.h, generated from BTF
|
||||
*/
|
||||
__u64 __opaque[1];
|
||||
} __attribute__((aligned(8)));
|
||||
|
||||
#endif /* _UAPI__LINUX_BPF_H__ */
|
||||
|
@ -230,9 +230,9 @@ alloc_zero_tailing_info(const void *orecord, __u32 cnt,
|
||||
int bpf_prog_load(enum bpf_prog_type prog_type,
|
||||
const char *prog_name, const char *license,
|
||||
const struct bpf_insn *insns, size_t insn_cnt,
|
||||
const struct bpf_prog_load_opts *opts)
|
||||
struct bpf_prog_load_opts *opts)
|
||||
{
|
||||
const size_t attr_sz = offsetofend(union bpf_attr, fd_array);
|
||||
const size_t attr_sz = offsetofend(union bpf_attr, log_true_size);
|
||||
void *finfo = NULL, *linfo = NULL;
|
||||
const char *func_info, *line_info;
|
||||
__u32 log_size, log_level, attach_prog_fd, attach_btf_obj_fd;
|
||||
@ -290,10 +290,6 @@ int bpf_prog_load(enum bpf_prog_type prog_type,
|
||||
|
||||
if (!!log_buf != !!log_size)
|
||||
return libbpf_err(-EINVAL);
|
||||
if (log_level > (4 | 2 | 1))
|
||||
return libbpf_err(-EINVAL);
|
||||
if (log_level && !log_buf)
|
||||
return libbpf_err(-EINVAL);
|
||||
|
||||
func_info_rec_size = OPTS_GET(opts, func_info_rec_size, 0);
|
||||
func_info = OPTS_GET(opts, func_info, NULL);
|
||||
@ -316,6 +312,7 @@ int bpf_prog_load(enum bpf_prog_type prog_type,
|
||||
}
|
||||
|
||||
fd = sys_bpf_prog_load(&attr, attr_sz, attempts);
|
||||
OPTS_SET(opts, log_true_size, attr.log_true_size);
|
||||
if (fd >= 0)
|
||||
return fd;
|
||||
|
||||
@ -356,6 +353,7 @@ int bpf_prog_load(enum bpf_prog_type prog_type,
|
||||
}
|
||||
|
||||
fd = sys_bpf_prog_load(&attr, attr_sz, attempts);
|
||||
OPTS_SET(opts, log_true_size, attr.log_true_size);
|
||||
if (fd >= 0)
|
||||
goto done;
|
||||
}
|
||||
@ -370,6 +368,7 @@ int bpf_prog_load(enum bpf_prog_type prog_type,
|
||||
attr.log_level = 1;
|
||||
|
||||
fd = sys_bpf_prog_load(&attr, attr_sz, attempts);
|
||||
OPTS_SET(opts, log_true_size, attr.log_true_size);
|
||||
}
|
||||
done:
|
||||
/* free() doesn't affect errno, so we don't need to restore it */
|
||||
@ -794,11 +793,17 @@ int bpf_link_update(int link_fd, int new_prog_fd,
|
||||
if (!OPTS_VALID(opts, bpf_link_update_opts))
|
||||
return libbpf_err(-EINVAL);
|
||||
|
||||
if (OPTS_GET(opts, old_prog_fd, 0) && OPTS_GET(opts, old_map_fd, 0))
|
||||
return libbpf_err(-EINVAL);
|
||||
|
||||
memset(&attr, 0, attr_sz);
|
||||
attr.link_update.link_fd = link_fd;
|
||||
attr.link_update.new_prog_fd = new_prog_fd;
|
||||
attr.link_update.flags = OPTS_GET(opts, flags, 0);
|
||||
attr.link_update.old_prog_fd = OPTS_GET(opts, old_prog_fd, 0);
|
||||
if (OPTS_GET(opts, old_prog_fd, 0))
|
||||
attr.link_update.old_prog_fd = OPTS_GET(opts, old_prog_fd, 0);
|
||||
else if (OPTS_GET(opts, old_map_fd, 0))
|
||||
attr.link_update.old_map_fd = OPTS_GET(opts, old_map_fd, 0);
|
||||
|
||||
ret = sys_bpf(BPF_LINK_UPDATE, &attr, attr_sz);
|
||||
return libbpf_err_errno(ret);
|
||||
@ -1078,9 +1083,9 @@ int bpf_raw_tracepoint_open(const char *name, int prog_fd)
|
||||
return libbpf_err_errno(fd);
|
||||
}
|
||||
|
||||
int bpf_btf_load(const void *btf_data, size_t btf_size, const struct bpf_btf_load_opts *opts)
|
||||
int bpf_btf_load(const void *btf_data, size_t btf_size, struct bpf_btf_load_opts *opts)
|
||||
{
|
||||
const size_t attr_sz = offsetofend(union bpf_attr, btf_log_level);
|
||||
const size_t attr_sz = offsetofend(union bpf_attr, btf_log_true_size);
|
||||
union bpf_attr attr;
|
||||
char *log_buf;
|
||||
size_t log_size;
|
||||
@ -1123,6 +1128,8 @@ int bpf_btf_load(const void *btf_data, size_t btf_size, const struct bpf_btf_loa
|
||||
attr.btf_log_level = 1;
|
||||
fd = sys_bpf_fd(BPF_BTF_LOAD, &attr, attr_sz);
|
||||
}
|
||||
|
||||
OPTS_SET(opts, log_true_size, attr.btf_log_true_size);
|
||||
return libbpf_err_errno(fd);
|
||||
}
|
||||
|
||||
|
@ -96,13 +96,20 @@ struct bpf_prog_load_opts {
|
||||
__u32 log_level;
|
||||
__u32 log_size;
|
||||
char *log_buf;
|
||||
/* output: actual total log contents size (including termintaing zero).
|
||||
* It could be both larger than original log_size (if log was
|
||||
* truncated), or smaller (if log buffer wasn't filled completely).
|
||||
* If kernel doesn't support this feature, log_size is left unchanged.
|
||||
*/
|
||||
__u32 log_true_size;
|
||||
size_t :0;
|
||||
};
|
||||
#define bpf_prog_load_opts__last_field log_buf
|
||||
#define bpf_prog_load_opts__last_field log_true_size
|
||||
|
||||
LIBBPF_API int bpf_prog_load(enum bpf_prog_type prog_type,
|
||||
const char *prog_name, const char *license,
|
||||
const struct bpf_insn *insns, size_t insn_cnt,
|
||||
const struct bpf_prog_load_opts *opts);
|
||||
struct bpf_prog_load_opts *opts);
|
||||
|
||||
/* Flags to direct loading requirements */
|
||||
#define MAPS_RELAX_COMPAT 0x01
|
||||
@ -117,11 +124,18 @@ struct bpf_btf_load_opts {
|
||||
char *log_buf;
|
||||
__u32 log_level;
|
||||
__u32 log_size;
|
||||
/* output: actual total log contents size (including termintaing zero).
|
||||
* It could be both larger than original log_size (if log was
|
||||
* truncated), or smaller (if log buffer wasn't filled completely).
|
||||
* If kernel doesn't support this feature, log_size is left unchanged.
|
||||
*/
|
||||
__u32 log_true_size;
|
||||
size_t :0;
|
||||
};
|
||||
#define bpf_btf_load_opts__last_field log_size
|
||||
#define bpf_btf_load_opts__last_field log_true_size
|
||||
|
||||
LIBBPF_API int bpf_btf_load(const void *btf_data, size_t btf_size,
|
||||
const struct bpf_btf_load_opts *opts);
|
||||
struct bpf_btf_load_opts *opts);
|
||||
|
||||
LIBBPF_API int bpf_map_update_elem(int fd, const void *key, const void *value,
|
||||
__u64 flags);
|
||||
@ -336,8 +350,9 @@ struct bpf_link_update_opts {
|
||||
size_t sz; /* size of this struct for forward/backward compatibility */
|
||||
__u32 flags; /* extra flags */
|
||||
__u32 old_prog_fd; /* expected old program FD */
|
||||
__u32 old_map_fd; /* expected old map FD */
|
||||
};
|
||||
#define bpf_link_update_opts__last_field old_prog_fd
|
||||
#define bpf_link_update_opts__last_field old_map_fd
|
||||
|
||||
LIBBPF_API int bpf_link_update(int link_fd, int new_prog_fd,
|
||||
const struct bpf_link_update_opts *opts);
|
||||
|
@ -11,6 +11,7 @@ struct ksym_relo_desc {
|
||||
int insn_idx;
|
||||
bool is_weak;
|
||||
bool is_typeless;
|
||||
bool is_ld64;
|
||||
};
|
||||
|
||||
struct ksym_desc {
|
||||
@ -24,6 +25,7 @@ struct ksym_desc {
|
||||
bool typeless;
|
||||
};
|
||||
int insn;
|
||||
bool is_ld64;
|
||||
};
|
||||
|
||||
struct bpf_gen {
|
||||
@ -65,7 +67,7 @@ void bpf_gen__map_update_elem(struct bpf_gen *gen, int map_idx, void *value, __u
|
||||
void bpf_gen__map_freeze(struct bpf_gen *gen, int map_idx);
|
||||
void bpf_gen__record_attach_target(struct bpf_gen *gen, const char *name, enum bpf_attach_type type);
|
||||
void bpf_gen__record_extern(struct bpf_gen *gen, const char *name, bool is_weak,
|
||||
bool is_typeless, int kind, int insn_idx);
|
||||
bool is_typeless, bool is_ld64, int kind, int insn_idx);
|
||||
void bpf_gen__record_relo_core(struct bpf_gen *gen, const struct bpf_core_relo *core_relo);
|
||||
void bpf_gen__populate_outer_map(struct bpf_gen *gen, int outer_map_idx, int key, int inner_map_idx);
|
||||
|
||||
|
@ -177,6 +177,11 @@ enum libbpf_tristate {
|
||||
#define __kptr_untrusted __attribute__((btf_type_tag("kptr_untrusted")))
|
||||
#define __kptr __attribute__((btf_type_tag("kptr")))
|
||||
|
||||
#define bpf_ksym_exists(sym) ({ \
|
||||
_Static_assert(!__builtin_constant_p(!!sym), #sym " should be marked as __weak"); \
|
||||
!!sym; \
|
||||
})
|
||||
|
||||
#ifndef ___bpf_concat
|
||||
#define ___bpf_concat(a, b) a ## b
|
||||
#endif
|
||||
|
@ -560,7 +560,7 @@ static void emit_find_attach_target(struct bpf_gen *gen)
|
||||
}
|
||||
|
||||
void bpf_gen__record_extern(struct bpf_gen *gen, const char *name, bool is_weak,
|
||||
bool is_typeless, int kind, int insn_idx)
|
||||
bool is_typeless, bool is_ld64, int kind, int insn_idx)
|
||||
{
|
||||
struct ksym_relo_desc *relo;
|
||||
|
||||
@ -574,6 +574,7 @@ void bpf_gen__record_extern(struct bpf_gen *gen, const char *name, bool is_weak,
|
||||
relo->name = name;
|
||||
relo->is_weak = is_weak;
|
||||
relo->is_typeless = is_typeless;
|
||||
relo->is_ld64 = is_ld64;
|
||||
relo->kind = kind;
|
||||
relo->insn_idx = insn_idx;
|
||||
gen->relo_cnt++;
|
||||
@ -586,9 +587,11 @@ static struct ksym_desc *get_ksym_desc(struct bpf_gen *gen, struct ksym_relo_des
|
||||
int i;
|
||||
|
||||
for (i = 0; i < gen->nr_ksyms; i++) {
|
||||
if (!strcmp(gen->ksyms[i].name, relo->name)) {
|
||||
gen->ksyms[i].ref++;
|
||||
return &gen->ksyms[i];
|
||||
kdesc = &gen->ksyms[i];
|
||||
if (kdesc->kind == relo->kind && kdesc->is_ld64 == relo->is_ld64 &&
|
||||
!strcmp(kdesc->name, relo->name)) {
|
||||
kdesc->ref++;
|
||||
return kdesc;
|
||||
}
|
||||
}
|
||||
kdesc = libbpf_reallocarray(gen->ksyms, gen->nr_ksyms + 1, sizeof(*kdesc));
|
||||
@ -603,6 +606,7 @@ static struct ksym_desc *get_ksym_desc(struct bpf_gen *gen, struct ksym_relo_des
|
||||
kdesc->ref = 1;
|
||||
kdesc->off = 0;
|
||||
kdesc->insn = 0;
|
||||
kdesc->is_ld64 = relo->is_ld64;
|
||||
return kdesc;
|
||||
}
|
||||
|
||||
@ -804,11 +808,13 @@ static void emit_relo_ksym_btf(struct bpf_gen *gen, struct ksym_relo_desc *relo,
|
||||
return;
|
||||
/* try to copy from existing ldimm64 insn */
|
||||
if (kdesc->ref > 1) {
|
||||
move_blob2blob(gen, insn + offsetof(struct bpf_insn, imm), 4,
|
||||
kdesc->insn + offsetof(struct bpf_insn, imm));
|
||||
move_blob2blob(gen, insn + sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm), 4,
|
||||
kdesc->insn + sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm));
|
||||
/* jump over src_reg adjustment if imm is not 0, reuse BPF_REG_0 from move_blob2blob */
|
||||
move_blob2blob(gen, insn + offsetof(struct bpf_insn, imm), 4,
|
||||
kdesc->insn + offsetof(struct bpf_insn, imm));
|
||||
/* jump over src_reg adjustment if imm (btf_id) is not 0, reuse BPF_REG_0 from move_blob2blob
|
||||
* If btf_id is zero, clear BPF_PSEUDO_BTF_ID flag in src_reg of ld_imm64 insn
|
||||
*/
|
||||
emit(gen, BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 3));
|
||||
goto clear_src_reg;
|
||||
}
|
||||
@ -831,7 +837,7 @@ static void emit_relo_ksym_btf(struct bpf_gen *gen, struct ksym_relo_desc *relo,
|
||||
emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_8, BPF_REG_7,
|
||||
sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm)));
|
||||
/* skip src_reg adjustment */
|
||||
emit(gen, BPF_JMP_IMM(BPF_JSGE, BPF_REG_7, 0, 3));
|
||||
emit(gen, BPF_JMP_IMM(BPF_JA, 0, 0, 3));
|
||||
clear_src_reg:
|
||||
/* clear bpf_object__relocate_data's src_reg assignment, otherwise we get a verifier failure */
|
||||
reg_mask = src_reg_mask();
|
||||
@ -862,23 +868,17 @@ static void emit_relo(struct bpf_gen *gen, struct ksym_relo_desc *relo, int insn
|
||||
{
|
||||
int insn;
|
||||
|
||||
pr_debug("gen: emit_relo (%d): %s at %d\n", relo->kind, relo->name, relo->insn_idx);
|
||||
pr_debug("gen: emit_relo (%d): %s at %d %s\n",
|
||||
relo->kind, relo->name, relo->insn_idx, relo->is_ld64 ? "ld64" : "call");
|
||||
insn = insns + sizeof(struct bpf_insn) * relo->insn_idx;
|
||||
emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_8, BPF_PSEUDO_MAP_IDX_VALUE, 0, 0, 0, insn));
|
||||
switch (relo->kind) {
|
||||
case BTF_KIND_VAR:
|
||||
if (relo->is_ld64) {
|
||||
if (relo->is_typeless)
|
||||
emit_relo_ksym_typeless(gen, relo, insn);
|
||||
else
|
||||
emit_relo_ksym_btf(gen, relo, insn);
|
||||
break;
|
||||
case BTF_KIND_FUNC:
|
||||
} else {
|
||||
emit_relo_kfunc_btf(gen, relo, insn);
|
||||
break;
|
||||
default:
|
||||
pr_warn("Unknown relocation kind '%d'\n", relo->kind);
|
||||
gen->error = -EDOM;
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
@ -901,18 +901,20 @@ static void cleanup_core_relo(struct bpf_gen *gen)
|
||||
|
||||
static void cleanup_relos(struct bpf_gen *gen, int insns)
|
||||
{
|
||||
struct ksym_desc *kdesc;
|
||||
int i, insn;
|
||||
|
||||
for (i = 0; i < gen->nr_ksyms; i++) {
|
||||
kdesc = &gen->ksyms[i];
|
||||
/* only close fds for typed ksyms and kfuncs */
|
||||
if (gen->ksyms[i].kind == BTF_KIND_VAR && !gen->ksyms[i].typeless) {
|
||||
if (kdesc->is_ld64 && !kdesc->typeless) {
|
||||
/* close fd recorded in insn[insn_idx + 1].imm */
|
||||
insn = gen->ksyms[i].insn;
|
||||
insn = kdesc->insn;
|
||||
insn += sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm);
|
||||
emit_sys_close_blob(gen, insn);
|
||||
} else if (gen->ksyms[i].kind == BTF_KIND_FUNC) {
|
||||
emit_sys_close_blob(gen, blob_fd_array_off(gen, gen->ksyms[i].off));
|
||||
if (gen->ksyms[i].off < MAX_FD_ARRAY_SZ)
|
||||
} else if (!kdesc->is_ld64) {
|
||||
emit_sys_close_blob(gen, blob_fd_array_off(gen, kdesc->off));
|
||||
if (kdesc->off < MAX_FD_ARRAY_SZ)
|
||||
gen->nr_fd_array--;
|
||||
}
|
||||
}
|
||||
|
@ -116,6 +116,7 @@ static const char * const attach_type_name[] = {
|
||||
[BPF_SK_REUSEPORT_SELECT_OR_MIGRATE] = "sk_reuseport_select_or_migrate",
|
||||
[BPF_PERF_EVENT] = "perf_event",
|
||||
[BPF_TRACE_KPROBE_MULTI] = "trace_kprobe_multi",
|
||||
[BPF_STRUCT_OPS] = "struct_ops",
|
||||
};
|
||||
|
||||
static const char * const link_type_name[] = {
|
||||
@ -215,9 +216,10 @@ static libbpf_print_fn_t __libbpf_pr = __base_pr;
|
||||
|
||||
libbpf_print_fn_t libbpf_set_print(libbpf_print_fn_t fn)
|
||||
{
|
||||
libbpf_print_fn_t old_print_fn = __libbpf_pr;
|
||||
libbpf_print_fn_t old_print_fn;
|
||||
|
||||
old_print_fn = __atomic_exchange_n(&__libbpf_pr, fn, __ATOMIC_RELAXED);
|
||||
|
||||
__libbpf_pr = fn;
|
||||
return old_print_fn;
|
||||
}
|
||||
|
||||
@ -226,8 +228,10 @@ void libbpf_print(enum libbpf_print_level level, const char *format, ...)
|
||||
{
|
||||
va_list args;
|
||||
int old_errno;
|
||||
libbpf_print_fn_t print_fn;
|
||||
|
||||
if (!__libbpf_pr)
|
||||
print_fn = __atomic_load_n(&__libbpf_pr, __ATOMIC_RELAXED);
|
||||
if (!print_fn)
|
||||
return;
|
||||
|
||||
old_errno = errno;
|
||||
@ -315,8 +319,8 @@ enum reloc_type {
|
||||
RELO_LD64,
|
||||
RELO_CALL,
|
||||
RELO_DATA,
|
||||
RELO_EXTERN_VAR,
|
||||
RELO_EXTERN_FUNC,
|
||||
RELO_EXTERN_LD64,
|
||||
RELO_EXTERN_CALL,
|
||||
RELO_SUBPROG_ADDR,
|
||||
RELO_CORE,
|
||||
};
|
||||
@ -467,6 +471,7 @@ struct bpf_struct_ops {
|
||||
#define KCONFIG_SEC ".kconfig"
|
||||
#define KSYMS_SEC ".ksyms"
|
||||
#define STRUCT_OPS_SEC ".struct_ops"
|
||||
#define STRUCT_OPS_LINK_SEC ".struct_ops.link"
|
||||
|
||||
enum libbpf_map_type {
|
||||
LIBBPF_MAP_UNSPEC,
|
||||
@ -596,6 +601,7 @@ struct elf_state {
|
||||
Elf64_Ehdr *ehdr;
|
||||
Elf_Data *symbols;
|
||||
Elf_Data *st_ops_data;
|
||||
Elf_Data *st_ops_link_data;
|
||||
size_t shstrndx; /* section index for section name strings */
|
||||
size_t strtabidx;
|
||||
struct elf_sec_desc *secs;
|
||||
@ -605,6 +611,7 @@ struct elf_state {
|
||||
int text_shndx;
|
||||
int symbols_shndx;
|
||||
int st_ops_shndx;
|
||||
int st_ops_link_shndx;
|
||||
};
|
||||
|
||||
struct usdt_manager;
|
||||
@ -1118,7 +1125,8 @@ static int bpf_object__init_kern_struct_ops_maps(struct bpf_object *obj)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int bpf_object__init_struct_ops_maps(struct bpf_object *obj)
|
||||
static int init_struct_ops_maps(struct bpf_object *obj, const char *sec_name,
|
||||
int shndx, Elf_Data *data, __u32 map_flags)
|
||||
{
|
||||
const struct btf_type *type, *datasec;
|
||||
const struct btf_var_secinfo *vsi;
|
||||
@ -1129,15 +1137,15 @@ static int bpf_object__init_struct_ops_maps(struct bpf_object *obj)
|
||||
struct bpf_map *map;
|
||||
__u32 i;
|
||||
|
||||
if (obj->efile.st_ops_shndx == -1)
|
||||
if (shndx == -1)
|
||||
return 0;
|
||||
|
||||
btf = obj->btf;
|
||||
datasec_id = btf__find_by_name_kind(btf, STRUCT_OPS_SEC,
|
||||
datasec_id = btf__find_by_name_kind(btf, sec_name,
|
||||
BTF_KIND_DATASEC);
|
||||
if (datasec_id < 0) {
|
||||
pr_warn("struct_ops init: DATASEC %s not found\n",
|
||||
STRUCT_OPS_SEC);
|
||||
sec_name);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -1150,7 +1158,7 @@ static int bpf_object__init_struct_ops_maps(struct bpf_object *obj)
|
||||
type_id = btf__resolve_type(obj->btf, vsi->type);
|
||||
if (type_id < 0) {
|
||||
pr_warn("struct_ops init: Cannot resolve var type_id %u in DATASEC %s\n",
|
||||
vsi->type, STRUCT_OPS_SEC);
|
||||
vsi->type, sec_name);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -1169,7 +1177,7 @@ static int bpf_object__init_struct_ops_maps(struct bpf_object *obj)
|
||||
if (IS_ERR(map))
|
||||
return PTR_ERR(map);
|
||||
|
||||
map->sec_idx = obj->efile.st_ops_shndx;
|
||||
map->sec_idx = shndx;
|
||||
map->sec_offset = vsi->offset;
|
||||
map->name = strdup(var_name);
|
||||
if (!map->name)
|
||||
@ -1179,6 +1187,7 @@ static int bpf_object__init_struct_ops_maps(struct bpf_object *obj)
|
||||
map->def.key_size = sizeof(int);
|
||||
map->def.value_size = type->size;
|
||||
map->def.max_entries = 1;
|
||||
map->def.map_flags = map_flags;
|
||||
|
||||
map->st_ops = calloc(1, sizeof(*map->st_ops));
|
||||
if (!map->st_ops)
|
||||
@ -1191,14 +1200,14 @@ static int bpf_object__init_struct_ops_maps(struct bpf_object *obj)
|
||||
if (!st_ops->data || !st_ops->progs || !st_ops->kern_func_off)
|
||||
return -ENOMEM;
|
||||
|
||||
if (vsi->offset + type->size > obj->efile.st_ops_data->d_size) {
|
||||
if (vsi->offset + type->size > data->d_size) {
|
||||
pr_warn("struct_ops init: var %s is beyond the end of DATASEC %s\n",
|
||||
var_name, STRUCT_OPS_SEC);
|
||||
var_name, sec_name);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
memcpy(st_ops->data,
|
||||
obj->efile.st_ops_data->d_buf + vsi->offset,
|
||||
data->d_buf + vsi->offset,
|
||||
type->size);
|
||||
st_ops->tname = tname;
|
||||
st_ops->type = type;
|
||||
@ -1211,6 +1220,19 @@ static int bpf_object__init_struct_ops_maps(struct bpf_object *obj)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int bpf_object_init_struct_ops(struct bpf_object *obj)
|
||||
{
|
||||
int err;
|
||||
|
||||
err = init_struct_ops_maps(obj, STRUCT_OPS_SEC, obj->efile.st_ops_shndx,
|
||||
obj->efile.st_ops_data, 0);
|
||||
err = err ?: init_struct_ops_maps(obj, STRUCT_OPS_LINK_SEC,
|
||||
obj->efile.st_ops_link_shndx,
|
||||
obj->efile.st_ops_link_data,
|
||||
BPF_F_LINK);
|
||||
return err;
|
||||
}
|
||||
|
||||
static struct bpf_object *bpf_object__new(const char *path,
|
||||
const void *obj_buf,
|
||||
size_t obj_buf_sz,
|
||||
@ -1247,6 +1269,7 @@ static struct bpf_object *bpf_object__new(const char *path,
|
||||
obj->efile.obj_buf_sz = obj_buf_sz;
|
||||
obj->efile.btf_maps_shndx = -1;
|
||||
obj->efile.st_ops_shndx = -1;
|
||||
obj->efile.st_ops_link_shndx = -1;
|
||||
obj->kconfig_map_idx = -1;
|
||||
|
||||
obj->kern_version = get_kernel_version();
|
||||
@ -1264,6 +1287,7 @@ static void bpf_object__elf_finish(struct bpf_object *obj)
|
||||
obj->efile.elf = NULL;
|
||||
obj->efile.symbols = NULL;
|
||||
obj->efile.st_ops_data = NULL;
|
||||
obj->efile.st_ops_link_data = NULL;
|
||||
|
||||
zfree(&obj->efile.secs);
|
||||
obj->efile.sec_cnt = 0;
|
||||
@ -2618,7 +2642,7 @@ static int bpf_object__init_maps(struct bpf_object *obj,
|
||||
err = bpf_object__init_user_btf_maps(obj, strict, pin_root_path);
|
||||
err = err ?: bpf_object__init_global_data_maps(obj);
|
||||
err = err ?: bpf_object__init_kconfig_map(obj);
|
||||
err = err ?: bpf_object__init_struct_ops_maps(obj);
|
||||
err = err ?: bpf_object_init_struct_ops(obj);
|
||||
|
||||
return err;
|
||||
}
|
||||
@ -2752,12 +2776,13 @@ static bool libbpf_needs_btf(const struct bpf_object *obj)
|
||||
{
|
||||
return obj->efile.btf_maps_shndx >= 0 ||
|
||||
obj->efile.st_ops_shndx >= 0 ||
|
||||
obj->efile.st_ops_link_shndx >= 0 ||
|
||||
obj->nr_extern > 0;
|
||||
}
|
||||
|
||||
static bool kernel_needs_btf(const struct bpf_object *obj)
|
||||
{
|
||||
return obj->efile.st_ops_shndx >= 0;
|
||||
return obj->efile.st_ops_shndx >= 0 || obj->efile.st_ops_link_shndx >= 0;
|
||||
}
|
||||
|
||||
static int bpf_object__init_btf(struct bpf_object *obj,
|
||||
@ -3450,6 +3475,9 @@ static int bpf_object__elf_collect(struct bpf_object *obj)
|
||||
} else if (strcmp(name, STRUCT_OPS_SEC) == 0) {
|
||||
obj->efile.st_ops_data = data;
|
||||
obj->efile.st_ops_shndx = idx;
|
||||
} else if (strcmp(name, STRUCT_OPS_LINK_SEC) == 0) {
|
||||
obj->efile.st_ops_link_data = data;
|
||||
obj->efile.st_ops_link_shndx = idx;
|
||||
} else {
|
||||
pr_info("elf: skipping unrecognized data section(%d) %s\n",
|
||||
idx, name);
|
||||
@ -3464,6 +3492,7 @@ static int bpf_object__elf_collect(struct bpf_object *obj)
|
||||
/* Only do relo for section with exec instructions */
|
||||
if (!section_have_execinstr(obj, targ_sec_idx) &&
|
||||
strcmp(name, ".rel" STRUCT_OPS_SEC) &&
|
||||
strcmp(name, ".rel" STRUCT_OPS_LINK_SEC) &&
|
||||
strcmp(name, ".rel" MAPS_ELF_SEC)) {
|
||||
pr_info("elf: skipping relo section(%d) %s for section(%d) %s\n",
|
||||
idx, name, targ_sec_idx,
|
||||
@ -4009,9 +4038,9 @@ static int bpf_program__record_reloc(struct bpf_program *prog,
|
||||
pr_debug("prog '%s': found extern #%d '%s' (sym %d) for insn #%u\n",
|
||||
prog->name, i, ext->name, ext->sym_idx, insn_idx);
|
||||
if (insn->code == (BPF_JMP | BPF_CALL))
|
||||
reloc_desc->type = RELO_EXTERN_FUNC;
|
||||
reloc_desc->type = RELO_EXTERN_CALL;
|
||||
else
|
||||
reloc_desc->type = RELO_EXTERN_VAR;
|
||||
reloc_desc->type = RELO_EXTERN_LD64;
|
||||
reloc_desc->insn_idx = insn_idx;
|
||||
reloc_desc->sym_off = i; /* sym_off stores extern index */
|
||||
return 0;
|
||||
@ -5855,7 +5884,7 @@ bpf_object__relocate_data(struct bpf_object *obj, struct bpf_program *prog)
|
||||
relo->map_idx, map);
|
||||
}
|
||||
break;
|
||||
case RELO_EXTERN_VAR:
|
||||
case RELO_EXTERN_LD64:
|
||||
ext = &obj->externs[relo->sym_off];
|
||||
if (ext->type == EXT_KCFG) {
|
||||
if (obj->gen_loader) {
|
||||
@ -5877,7 +5906,7 @@ bpf_object__relocate_data(struct bpf_object *obj, struct bpf_program *prog)
|
||||
}
|
||||
}
|
||||
break;
|
||||
case RELO_EXTERN_FUNC:
|
||||
case RELO_EXTERN_CALL:
|
||||
ext = &obj->externs[relo->sym_off];
|
||||
insn[0].src_reg = BPF_PSEUDO_KFUNC_CALL;
|
||||
if (ext->is_set) {
|
||||
@ -6115,7 +6144,7 @@ bpf_object__reloc_code(struct bpf_object *obj, struct bpf_program *main_prog,
|
||||
continue;
|
||||
|
||||
relo = find_prog_insn_relo(prog, insn_idx);
|
||||
if (relo && relo->type == RELO_EXTERN_FUNC)
|
||||
if (relo && relo->type == RELO_EXTERN_CALL)
|
||||
/* kfunc relocations will be handled later
|
||||
* in bpf_object__relocate_data()
|
||||
*/
|
||||
@ -6610,7 +6639,7 @@ static int bpf_object__collect_relos(struct bpf_object *obj)
|
||||
return -LIBBPF_ERRNO__INTERNAL;
|
||||
}
|
||||
|
||||
if (idx == obj->efile.st_ops_shndx)
|
||||
if (idx == obj->efile.st_ops_shndx || idx == obj->efile.st_ops_link_shndx)
|
||||
err = bpf_object__collect_st_ops_relos(obj, shdr, data);
|
||||
else if (idx == obj->efile.btf_maps_shndx)
|
||||
err = bpf_object__collect_map_relos(obj, shdr, data);
|
||||
@ -7070,18 +7099,21 @@ static int bpf_program_record_relos(struct bpf_program *prog)
|
||||
for (i = 0; i < prog->nr_reloc; i++) {
|
||||
struct reloc_desc *relo = &prog->reloc_desc[i];
|
||||
struct extern_desc *ext = &obj->externs[relo->sym_off];
|
||||
int kind;
|
||||
|
||||
switch (relo->type) {
|
||||
case RELO_EXTERN_VAR:
|
||||
case RELO_EXTERN_LD64:
|
||||
if (ext->type != EXT_KSYM)
|
||||
continue;
|
||||
kind = btf_is_var(btf__type_by_id(obj->btf, ext->btf_id)) ?
|
||||
BTF_KIND_VAR : BTF_KIND_FUNC;
|
||||
bpf_gen__record_extern(obj->gen_loader, ext->name,
|
||||
ext->is_weak, !ext->ksym.type_id,
|
||||
BTF_KIND_VAR, relo->insn_idx);
|
||||
true, kind, relo->insn_idx);
|
||||
break;
|
||||
case RELO_EXTERN_FUNC:
|
||||
case RELO_EXTERN_CALL:
|
||||
bpf_gen__record_extern(obj->gen_loader, ext->name,
|
||||
ext->is_weak, false, BTF_KIND_FUNC,
|
||||
ext->is_weak, false, false, BTF_KIND_FUNC,
|
||||
relo->insn_idx);
|
||||
break;
|
||||
case RELO_CORE: {
|
||||
@ -7533,6 +7565,12 @@ static int bpf_object__resolve_ksym_func_btf_id(struct bpf_object *obj,
|
||||
ext->is_set = true;
|
||||
ext->ksym.kernel_btf_id = kfunc_id;
|
||||
ext->ksym.btf_fd_idx = mod_btf ? mod_btf->fd_array_idx : 0;
|
||||
/* Also set kernel_btf_obj_fd to make sure that bpf_object__relocate_data()
|
||||
* populates FD into ld_imm64 insn when it's used to point to kfunc.
|
||||
* {kernel_btf_id, btf_fd_idx} -> fixup bpf_call.
|
||||
* {kernel_btf_id, kernel_btf_obj_fd} -> fixup ld_imm64.
|
||||
*/
|
||||
ext->ksym.kernel_btf_obj_fd = mod_btf ? mod_btf->fd : 0;
|
||||
pr_debug("extern (func ksym) '%s': resolved to kernel [%d]\n",
|
||||
ext->name, kfunc_id);
|
||||
|
||||
@ -7677,6 +7715,37 @@ static int bpf_object__resolve_externs(struct bpf_object *obj,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void bpf_map_prepare_vdata(const struct bpf_map *map)
|
||||
{
|
||||
struct bpf_struct_ops *st_ops;
|
||||
__u32 i;
|
||||
|
||||
st_ops = map->st_ops;
|
||||
for (i = 0; i < btf_vlen(st_ops->type); i++) {
|
||||
struct bpf_program *prog = st_ops->progs[i];
|
||||
void *kern_data;
|
||||
int prog_fd;
|
||||
|
||||
if (!prog)
|
||||
continue;
|
||||
|
||||
prog_fd = bpf_program__fd(prog);
|
||||
kern_data = st_ops->kern_vdata + st_ops->kern_func_off[i];
|
||||
*(unsigned long *)kern_data = prog_fd;
|
||||
}
|
||||
}
|
||||
|
||||
static int bpf_object_prepare_struct_ops(struct bpf_object *obj)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < obj->nr_maps; i++)
|
||||
if (bpf_map__is_struct_ops(&obj->maps[i]))
|
||||
bpf_map_prepare_vdata(&obj->maps[i]);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int bpf_object_load(struct bpf_object *obj, int extra_log_level, const char *target_btf_path)
|
||||
{
|
||||
int err, i;
|
||||
@ -7702,6 +7771,7 @@ static int bpf_object_load(struct bpf_object *obj, int extra_log_level, const ch
|
||||
err = err ? : bpf_object__relocate(obj, obj->btf_custom_path ? : target_btf_path);
|
||||
err = err ? : bpf_object__load_progs(obj, extra_log_level);
|
||||
err = err ? : bpf_object_init_prog_arrays(obj);
|
||||
err = err ? : bpf_object_prepare_struct_ops(obj);
|
||||
|
||||
if (obj->gen_loader) {
|
||||
/* reset FDs */
|
||||
@ -8398,6 +8468,7 @@ int bpf_program__set_type(struct bpf_program *prog, enum bpf_prog_type type)
|
||||
return libbpf_err(-EBUSY);
|
||||
|
||||
prog->type = type;
|
||||
prog->sec_def = NULL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -8811,6 +8882,7 @@ const char *libbpf_bpf_prog_type_str(enum bpf_prog_type t)
|
||||
}
|
||||
|
||||
static struct bpf_map *find_struct_ops_map_by_offset(struct bpf_object *obj,
|
||||
int sec_idx,
|
||||
size_t offset)
|
||||
{
|
||||
struct bpf_map *map;
|
||||
@ -8820,7 +8892,8 @@ static struct bpf_map *find_struct_ops_map_by_offset(struct bpf_object *obj,
|
||||
map = &obj->maps[i];
|
||||
if (!bpf_map__is_struct_ops(map))
|
||||
continue;
|
||||
if (map->sec_offset <= offset &&
|
||||
if (map->sec_idx == sec_idx &&
|
||||
map->sec_offset <= offset &&
|
||||
offset - map->sec_offset < map->def.value_size)
|
||||
return map;
|
||||
}
|
||||
@ -8862,7 +8935,7 @@ static int bpf_object__collect_st_ops_relos(struct bpf_object *obj,
|
||||
}
|
||||
|
||||
name = elf_sym_str(obj, sym->st_name) ?: "<?>";
|
||||
map = find_struct_ops_map_by_offset(obj, rel->r_offset);
|
||||
map = find_struct_ops_map_by_offset(obj, shdr->sh_info, rel->r_offset);
|
||||
if (!map) {
|
||||
pr_warn("struct_ops reloc: cannot find map at rel->r_offset %zu\n",
|
||||
(size_t)rel->r_offset);
|
||||
@ -8929,8 +9002,9 @@ static int bpf_object__collect_st_ops_relos(struct bpf_object *obj,
|
||||
}
|
||||
|
||||
/* struct_ops BPF prog can be re-used between multiple
|
||||
* .struct_ops as long as it's the same struct_ops struct
|
||||
* definition and the same function pointer field
|
||||
* .struct_ops & .struct_ops.link as long as it's the
|
||||
* same struct_ops struct definition and the same
|
||||
* function pointer field
|
||||
*/
|
||||
if (prog->attach_btf_id != st_ops->type_id ||
|
||||
prog->expected_attach_type != member_idx) {
|
||||
@ -9912,16 +9986,20 @@ static int append_to_file(const char *file, const char *fmt, ...)
|
||||
{
|
||||
int fd, n, err = 0;
|
||||
va_list ap;
|
||||
char buf[1024];
|
||||
|
||||
va_start(ap, fmt);
|
||||
n = vsnprintf(buf, sizeof(buf), fmt, ap);
|
||||
va_end(ap);
|
||||
|
||||
if (n < 0 || n >= sizeof(buf))
|
||||
return -EINVAL;
|
||||
|
||||
fd = open(file, O_WRONLY | O_APPEND | O_CLOEXEC, 0);
|
||||
if (fd < 0)
|
||||
return -errno;
|
||||
|
||||
va_start(ap, fmt);
|
||||
n = vdprintf(fd, fmt, ap);
|
||||
va_end(ap);
|
||||
|
||||
if (n < 0)
|
||||
if (write(fd, buf, n) < 0)
|
||||
err = -errno;
|
||||
|
||||
close(fd);
|
||||
@ -11566,22 +11644,30 @@ struct bpf_link *bpf_program__attach(const struct bpf_program *prog)
|
||||
return link;
|
||||
}
|
||||
|
||||
struct bpf_link_struct_ops {
|
||||
struct bpf_link link;
|
||||
int map_fd;
|
||||
};
|
||||
|
||||
static int bpf_link__detach_struct_ops(struct bpf_link *link)
|
||||
{
|
||||
struct bpf_link_struct_ops *st_link;
|
||||
__u32 zero = 0;
|
||||
|
||||
if (bpf_map_delete_elem(link->fd, &zero))
|
||||
return -errno;
|
||||
st_link = container_of(link, struct bpf_link_struct_ops, link);
|
||||
|
||||
return 0;
|
||||
if (st_link->map_fd < 0)
|
||||
/* w/o a real link */
|
||||
return bpf_map_delete_elem(link->fd, &zero);
|
||||
|
||||
return close(link->fd);
|
||||
}
|
||||
|
||||
struct bpf_link *bpf_map__attach_struct_ops(const struct bpf_map *map)
|
||||
{
|
||||
struct bpf_struct_ops *st_ops;
|
||||
struct bpf_link *link;
|
||||
__u32 i, zero = 0;
|
||||
int err;
|
||||
struct bpf_link_struct_ops *link;
|
||||
__u32 zero = 0;
|
||||
int err, fd;
|
||||
|
||||
if (!bpf_map__is_struct_ops(map) || map->fd == -1)
|
||||
return libbpf_err_ptr(-EINVAL);
|
||||
@ -11590,31 +11676,72 @@ struct bpf_link *bpf_map__attach_struct_ops(const struct bpf_map *map)
|
||||
if (!link)
|
||||
return libbpf_err_ptr(-EINVAL);
|
||||
|
||||
st_ops = map->st_ops;
|
||||
for (i = 0; i < btf_vlen(st_ops->type); i++) {
|
||||
struct bpf_program *prog = st_ops->progs[i];
|
||||
void *kern_data;
|
||||
int prog_fd;
|
||||
|
||||
if (!prog)
|
||||
continue;
|
||||
|
||||
prog_fd = bpf_program__fd(prog);
|
||||
kern_data = st_ops->kern_vdata + st_ops->kern_func_off[i];
|
||||
*(unsigned long *)kern_data = prog_fd;
|
||||
}
|
||||
|
||||
err = bpf_map_update_elem(map->fd, &zero, st_ops->kern_vdata, 0);
|
||||
if (err) {
|
||||
err = -errno;
|
||||
/* kern_vdata should be prepared during the loading phase. */
|
||||
err = bpf_map_update_elem(map->fd, &zero, map->st_ops->kern_vdata, 0);
|
||||
/* It can be EBUSY if the map has been used to create or
|
||||
* update a link before. We don't allow updating the value of
|
||||
* a struct_ops once it is set. That ensures that the value
|
||||
* never changed. So, it is safe to skip EBUSY.
|
||||
*/
|
||||
if (err && (!(map->def.map_flags & BPF_F_LINK) || err != -EBUSY)) {
|
||||
free(link);
|
||||
return libbpf_err_ptr(err);
|
||||
}
|
||||
|
||||
link->detach = bpf_link__detach_struct_ops;
|
||||
link->fd = map->fd;
|
||||
link->link.detach = bpf_link__detach_struct_ops;
|
||||
|
||||
return link;
|
||||
if (!(map->def.map_flags & BPF_F_LINK)) {
|
||||
/* w/o a real link */
|
||||
link->link.fd = map->fd;
|
||||
link->map_fd = -1;
|
||||
return &link->link;
|
||||
}
|
||||
|
||||
fd = bpf_link_create(map->fd, 0, BPF_STRUCT_OPS, NULL);
|
||||
if (fd < 0) {
|
||||
free(link);
|
||||
return libbpf_err_ptr(fd);
|
||||
}
|
||||
|
||||
link->link.fd = fd;
|
||||
link->map_fd = map->fd;
|
||||
|
||||
return &link->link;
|
||||
}
|
||||
|
||||
/*
|
||||
* Swap the back struct_ops of a link with a new struct_ops map.
|
||||
*/
|
||||
int bpf_link__update_map(struct bpf_link *link, const struct bpf_map *map)
|
||||
{
|
||||
struct bpf_link_struct_ops *st_ops_link;
|
||||
__u32 zero = 0;
|
||||
int err;
|
||||
|
||||
if (!bpf_map__is_struct_ops(map) || map->fd < 0)
|
||||
return -EINVAL;
|
||||
|
||||
st_ops_link = container_of(link, struct bpf_link_struct_ops, link);
|
||||
/* Ensure the type of a link is correct */
|
||||
if (st_ops_link->map_fd < 0)
|
||||
return -EINVAL;
|
||||
|
||||
err = bpf_map_update_elem(map->fd, &zero, map->st_ops->kern_vdata, 0);
|
||||
/* It can be EBUSY if the map has been used to create or
|
||||
* update a link before. We don't allow updating the value of
|
||||
* a struct_ops once it is set. That ensures that the value
|
||||
* never changed. So, it is safe to skip EBUSY.
|
||||
*/
|
||||
if (err && err != -EBUSY)
|
||||
return err;
|
||||
|
||||
err = bpf_link_update(link->fd, map->fd, NULL);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
st_ops_link->map_fd = map->fd;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
typedef enum bpf_perf_event_ret (*bpf_perf_event_print_t)(struct perf_event_header *hdr,
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user