2019-12-13 20:51:08 +03:00
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright(c) 2019 Intel Corporation. */
# include <linux/hash.h>
# include <linux/bpf.h>
# include <linux/filter.h>
2022-11-03 15:00:14 +03:00
# include <linux/static_call.h>
2019-12-13 20:51:08 +03:00
/* The BPF dispatcher is a multiway branch code generator. The
* dispatcher is a mechanism to avoid the performance penalty of an
* indirect call , which is expensive when retpolines are enabled . A
* dispatch client registers a BPF program into the dispatcher , and if
* there is available room in the dispatcher a direct call to the BPF
* program will be generated . All calls to the BPF programs called via
* the dispatcher will then be a direct call , instead of an
* indirect . The dispatcher hijacks a trampoline function it via the
* __fentry__ of the trampoline . The trampoline function has the
* following signature :
*
* unsigned int trampoline ( const void * ctx , const struct bpf_insn * insnsi ,
* unsigned int ( * bpf_func ) ( const void * ,
* const struct bpf_insn * ) ) ;
*/
static struct bpf_dispatcher_prog * bpf_dispatcher_find_prog (
struct bpf_dispatcher * d , struct bpf_prog * prog )
{
int i ;
for ( i = 0 ; i < BPF_DISPATCHER_MAX ; i + + ) {
if ( prog = = d - > progs [ i ] . prog )
return & d - > progs [ i ] ;
}
return NULL ;
}
static struct bpf_dispatcher_prog * bpf_dispatcher_find_free (
struct bpf_dispatcher * d )
{
return bpf_dispatcher_find_prog ( d , NULL ) ;
}
static bool bpf_dispatcher_add_prog ( struct bpf_dispatcher * d ,
struct bpf_prog * prog )
{
struct bpf_dispatcher_prog * entry ;
if ( ! prog )
return false ;
entry = bpf_dispatcher_find_prog ( d , prog ) ;
if ( entry ) {
refcount_inc ( & entry - > users ) ;
return false ;
}
entry = bpf_dispatcher_find_free ( d ) ;
if ( ! entry )
return false ;
bpf_prog_inc ( prog ) ;
entry - > prog = prog ;
refcount_set ( & entry - > users , 1 ) ;
d - > num_progs + + ;
return true ;
}
static bool bpf_dispatcher_remove_prog ( struct bpf_dispatcher * d ,
struct bpf_prog * prog )
{
struct bpf_dispatcher_prog * entry ;
if ( ! prog )
return false ;
entry = bpf_dispatcher_find_prog ( d , prog ) ;
if ( ! entry )
return false ;
if ( refcount_dec_and_test ( & entry - > users ) ) {
entry - > prog = NULL ;
bpf_prog_put ( prog ) ;
d - > num_progs - - ;
return true ;
}
return false ;
}
2022-09-26 21:47:38 +03:00
int __weak arch_prepare_bpf_dispatcher ( void * image , void * buf , s64 * funcs , int num_funcs )
2019-12-13 20:51:08 +03:00
{
return - ENOTSUPP ;
}
2022-09-26 21:47:38 +03:00
static int bpf_dispatcher_prepare ( struct bpf_dispatcher * d , void * image , void * buf )
2019-12-13 20:51:08 +03:00
{
s64 ips [ BPF_DISPATCHER_MAX ] = { } , * ipsp = & ips [ 0 ] ;
int i ;
for ( i = 0 ; i < BPF_DISPATCHER_MAX ; i + + ) {
if ( d - > progs [ i ] . prog )
* ipsp + + = ( s64 ) ( uintptr_t ) d - > progs [ i ] . prog - > bpf_func ;
}
2022-09-26 21:47:38 +03:00
return arch_prepare_bpf_dispatcher ( image , buf , & ips [ 0 ] , d - > num_progs ) ;
2019-12-13 20:51:08 +03:00
}
static void bpf_dispatcher_update ( struct bpf_dispatcher * d , int prev_num_progs )
{
2022-11-03 15:00:14 +03:00
void * new , * tmp ;
u32 noff = 0 ;
if ( prev_num_progs )
2020-03-12 22:56:07 +03:00
noff = d - > image_off ^ ( PAGE_SIZE / 2 ) ;
2019-12-13 20:51:08 +03:00
new = d - > num_progs ? d - > image + noff : NULL ;
2022-09-26 21:47:38 +03:00
tmp = d - > num_progs ? d - > rw_image + noff : NULL ;
2019-12-13 20:51:08 +03:00
if ( new ) {
2022-09-26 21:47:38 +03:00
/* Prepare the dispatcher in d->rw_image. Then use
* bpf_arch_text_copy to update d - > image , which is RO + X .
*/
if ( bpf_dispatcher_prepare ( d , new , tmp ) )
return ;
if ( IS_ERR ( bpf_arch_text_copy ( new , tmp , PAGE_SIZE / 2 ) ) )
2019-12-13 20:51:08 +03:00
return ;
}
bpf: Add explicit cast to 'void *' for __BPF_DISPATCHER_UPDATE()
When building with clang:
kernel/bpf/dispatcher.c:126:33: error: pointer type mismatch ('void *' and 'unsigned int (*)(const void *, const struct bpf_insn *, bpf_func_t)' (aka 'unsigned int (*)(const void *, const struct bpf_insn *, unsigned int (*)(const void *, const struct bpf_insn *))')) [-Werror,-Wpointer-type-mismatch]
__BPF_DISPATCHER_UPDATE(d, new ?: &bpf_dispatcher_nop_func);
~~~ ^ ~~~~~~~~~~~~~~~~~~~~~~~~
./include/linux/bpf.h:1045:54: note: expanded from macro '__BPF_DISPATCHER_UPDATE'
__static_call_update((_d)->sc_key, (_d)->sc_tramp, (_new))
^~~~
1 error generated.
The warning is pointing out that the type of new ('void *') and
&bpf_dispatcher_nop_func are not compatible, which could have side
effects coming out of a conditional operator due to promotion rules.
Add the explicit cast to 'void *' to make it clear that this is
expected, as __BPF_DISPATCHER_UPDATE() expands to a call to
__static_call_update(), which expects a 'void *' as its final argument.
Fixes: c86df29d11df ("bpf: Convert BPF_DISPATCHER to use static_call() (not ftrace)")
Link: https://github.com/ClangBuiltLinux/linux/issues/1755
Reported-by: kernel test robot <lkp@intel.com>
Reported-by: "kernelci.org bot" <bot@kernelci.org>
Signed-off-by: Nathan Chancellor <nathan@kernel.org>
Acked-by: Björn Töpel <bjorn@kernel.org>
Acked-by: Yonghong Song <yhs@fb.com>
Link: https://lore.kernel.org/r/20221107170711.42409-1-nathan@kernel.org
Signed-off-by: Martin KaFai Lau <martin.lau@kernel.org>
2022-11-07 20:07:11 +03:00
__BPF_DISPATCHER_UPDATE ( d , new ? : ( void * ) & bpf_dispatcher_nop_func ) ;
2019-12-13 20:51:08 +03:00
2022-11-03 15:00:14 +03:00
if ( new )
d - > image_off = noff ;
2019-12-13 20:51:08 +03:00
}
void bpf_dispatcher_change_prog ( struct bpf_dispatcher * d , struct bpf_prog * from ,
struct bpf_prog * to )
{
bool changed = false ;
int prev_num_progs ;
if ( from = = to )
return ;
mutex_lock ( & d - > mutex ) ;
if ( ! d - > image ) {
2022-09-26 21:47:38 +03:00
d - > image = bpf_prog_pack_alloc ( PAGE_SIZE , bpf_jit_fill_hole_with_zero ) ;
2019-12-13 20:51:08 +03:00
if ( ! d - > image )
goto out ;
2022-09-26 21:47:38 +03:00
d - > rw_image = bpf_jit_alloc_exec ( PAGE_SIZE ) ;
if ( ! d - > rw_image ) {
u32 size = PAGE_SIZE ;
bpf_arch_text_copy ( d - > image , & size , sizeof ( size ) ) ;
bpf_prog_pack_free ( ( struct bpf_binary_header * ) d - > image ) ;
d - > image = NULL ;
goto out ;
}
2020-03-12 22:56:06 +03:00
bpf_image_ksym_add ( d - > image , & d - > ksym ) ;
2019-12-13 20:51:08 +03:00
}
prev_num_progs = d - > num_progs ;
changed | = bpf_dispatcher_remove_prog ( d , from ) ;
changed | = bpf_dispatcher_add_prog ( d , to ) ;
if ( ! changed )
goto out ;
bpf_dispatcher_update ( d , prev_num_progs ) ;
out :
mutex_unlock ( & d - > mutex ) ;
}