2020-11-06 13:37:40 +03:00
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright ( c ) 2020 Facebook
* Copyright 2020 Google LLC .
*/
# include <linux/pid.h>
# include <linux/sched.h>
# include <linux/rculist.h>
# include <linux/list.h>
# include <linux/hash.h>
# include <linux/types.h>
# include <linux/spinlock.h>
# include <linux/bpf.h>
# include <linux/bpf_local_storage.h>
# include <linux/filter.h>
# include <uapi/linux/btf.h>
# include <linux/btf_ids.h>
# include <linux/fdtable.h>
DEFINE_BPF_STORAGE_CACHE ( task_cache ) ;
2021-03-11 16:15:05 +03:00
static DEFINE_PER_CPU ( int , bpf_task_storage_busy ) ;
2021-02-26 02:43:15 +03:00
static void bpf_task_storage_lock ( void )
{
migrate_disable ( ) ;
__this_cpu_inc ( bpf_task_storage_busy ) ;
}
static void bpf_task_storage_unlock ( void )
{
__this_cpu_dec ( bpf_task_storage_busy ) ;
migrate_enable ( ) ;
}
static bool bpf_task_storage_trylock ( void )
{
migrate_disable ( ) ;
if ( unlikely ( __this_cpu_inc_return ( bpf_task_storage_busy ) ! = 1 ) ) {
__this_cpu_dec ( bpf_task_storage_busy ) ;
migrate_enable ( ) ;
return false ;
}
return true ;
}
2020-11-06 13:37:40 +03:00
static struct bpf_local_storage __rcu * * task_storage_ptr ( void * owner )
{
struct task_struct * task = owner ;
2021-02-26 02:43:14 +03:00
return & task - > bpf_storage ;
2020-11-06 13:37:40 +03:00
}
static struct bpf_local_storage_data *
task_storage_lookup ( struct task_struct * task , struct bpf_map * map ,
bool cacheit_lockit )
{
struct bpf_local_storage * task_storage ;
struct bpf_local_storage_map * smap ;
2021-02-26 02:43:14 +03:00
task_storage = rcu_dereference ( task - > bpf_storage ) ;
2020-11-06 13:37:40 +03:00
if ( ! task_storage )
return NULL ;
smap = ( struct bpf_local_storage_map * ) map ;
return bpf_local_storage_lookup ( task_storage , smap , cacheit_lockit ) ;
}
void bpf_task_storage_free ( struct task_struct * task )
{
struct bpf_local_storage_elem * selem ;
struct bpf_local_storage * local_storage ;
bool free_task_storage = false ;
struct hlist_node * n ;
2021-02-26 02:43:14 +03:00
unsigned long flags ;
2020-11-06 13:37:40 +03:00
rcu_read_lock ( ) ;
2021-02-26 02:43:14 +03:00
local_storage = rcu_dereference ( task - > bpf_storage ) ;
2020-11-06 13:37:40 +03:00
if ( ! local_storage ) {
rcu_read_unlock ( ) ;
return ;
}
/* Neither the bpf_prog nor the bpf-map's syscall
* could be modifying the local_storage - > list now .
* Thus , no elem can be added - to or deleted - from the
* local_storage - > list by the bpf_prog or by the bpf - map ' s syscall .
*
* It is racing with bpf_local_storage_map_free ( ) alone
* when unlinking elem from the local_storage - > list and
* the map ' s bucket - > list .
*/
2021-02-26 02:43:15 +03:00
bpf_task_storage_lock ( ) ;
2021-02-26 02:43:14 +03:00
raw_spin_lock_irqsave ( & local_storage - > lock , flags ) ;
2020-11-06 13:37:40 +03:00
hlist_for_each_entry_safe ( selem , n , & local_storage - > list , snode ) {
/* Always unlink from map before unlinking from
* local_storage .
*/
bpf_selem_unlink_map ( selem ) ;
free_task_storage = bpf_selem_unlink_storage_nolock (
local_storage , selem , false ) ;
}
2021-02-26 02:43:14 +03:00
raw_spin_unlock_irqrestore ( & local_storage - > lock , flags ) ;
2021-02-26 02:43:15 +03:00
bpf_task_storage_unlock ( ) ;
2020-11-06 13:37:40 +03:00
rcu_read_unlock ( ) ;
/* free_task_storage should always be true as long as
* local_storage - > list was non - empty .
*/
if ( free_task_storage )
kfree_rcu ( local_storage , rcu ) ;
}
static void * bpf_pid_task_storage_lookup_elem ( struct bpf_map * map , void * key )
{
struct bpf_local_storage_data * sdata ;
struct task_struct * task ;
unsigned int f_flags ;
struct pid * pid ;
int fd , err ;
fd = * ( int * ) key ;
pid = pidfd_get_pid ( fd , & f_flags ) ;
if ( IS_ERR ( pid ) )
return ERR_CAST ( pid ) ;
/* We should be in an RCU read side critical section, it should be safe
* to call pid_task .
*/
WARN_ON_ONCE ( ! rcu_read_lock_held ( ) ) ;
task = pid_task ( pid , PIDTYPE_PID ) ;
if ( ! task ) {
err = - ENOENT ;
goto out ;
}
2021-02-26 02:43:15 +03:00
bpf_task_storage_lock ( ) ;
2020-11-06 13:37:40 +03:00
sdata = task_storage_lookup ( task , map , true ) ;
2021-02-26 02:43:15 +03:00
bpf_task_storage_unlock ( ) ;
2020-11-06 13:37:40 +03:00
put_pid ( pid ) ;
return sdata ? sdata - > data : NULL ;
out :
put_pid ( pid ) ;
return ERR_PTR ( err ) ;
}
static int bpf_pid_task_storage_update_elem ( struct bpf_map * map , void * key ,
void * value , u64 map_flags )
{
struct bpf_local_storage_data * sdata ;
struct task_struct * task ;
unsigned int f_flags ;
struct pid * pid ;
int fd , err ;
fd = * ( int * ) key ;
pid = pidfd_get_pid ( fd , & f_flags ) ;
if ( IS_ERR ( pid ) )
return PTR_ERR ( pid ) ;
/* We should be in an RCU read side critical section, it should be safe
* to call pid_task .
*/
WARN_ON_ONCE ( ! rcu_read_lock_held ( ) ) ;
task = pid_task ( pid , PIDTYPE_PID ) ;
2021-02-26 02:43:14 +03:00
if ( ! task ) {
2020-11-06 13:37:40 +03:00
err = - ENOENT ;
goto out ;
}
2021-02-26 02:43:15 +03:00
bpf_task_storage_lock ( ) ;
2020-11-06 13:37:40 +03:00
sdata = bpf_local_storage_update (
task , ( struct bpf_local_storage_map * ) map , value , map_flags ) ;
2021-02-26 02:43:15 +03:00
bpf_task_storage_unlock ( ) ;
2020-11-06 13:37:40 +03:00
err = PTR_ERR_OR_ZERO ( sdata ) ;
out :
put_pid ( pid ) ;
return err ;
}
static int task_storage_delete ( struct task_struct * task , struct bpf_map * map )
{
struct bpf_local_storage_data * sdata ;
sdata = task_storage_lookup ( task , map , false ) ;
if ( ! sdata )
return - ENOENT ;
bpf_selem_unlink ( SELEM ( sdata ) ) ;
return 0 ;
}
static int bpf_pid_task_storage_delete_elem ( struct bpf_map * map , void * key )
{
struct task_struct * task ;
unsigned int f_flags ;
struct pid * pid ;
int fd , err ;
fd = * ( int * ) key ;
pid = pidfd_get_pid ( fd , & f_flags ) ;
if ( IS_ERR ( pid ) )
return PTR_ERR ( pid ) ;
/* We should be in an RCU read side critical section, it should be safe
* to call pid_task .
*/
WARN_ON_ONCE ( ! rcu_read_lock_held ( ) ) ;
task = pid_task ( pid , PIDTYPE_PID ) ;
if ( ! task ) {
err = - ENOENT ;
goto out ;
}
2021-02-26 02:43:15 +03:00
bpf_task_storage_lock ( ) ;
2020-11-06 13:37:40 +03:00
err = task_storage_delete ( task , map ) ;
2021-02-26 02:43:15 +03:00
bpf_task_storage_unlock ( ) ;
2020-11-06 13:37:40 +03:00
out :
put_pid ( pid ) ;
return err ;
}
BPF_CALL_4 ( bpf_task_storage_get , struct bpf_map * , map , struct task_struct * ,
task , void * , value , u64 , flags )
{
struct bpf_local_storage_data * sdata ;
if ( flags & ~ ( BPF_LOCAL_STORAGE_GET_F_CREATE ) )
return ( unsigned long ) NULL ;
2021-02-26 02:43:14 +03:00
if ( ! task )
2020-11-06 13:37:40 +03:00
return ( unsigned long ) NULL ;
2021-02-26 02:43:15 +03:00
if ( ! bpf_task_storage_trylock ( ) )
return ( unsigned long ) NULL ;
2020-11-06 13:37:40 +03:00
sdata = task_storage_lookup ( task , map , true ) ;
if ( sdata )
2021-02-26 02:43:15 +03:00
goto unlock ;
2020-11-06 13:37:40 +03:00
2021-02-26 02:43:14 +03:00
/* only allocate new storage, when the task is refcounted */
if ( refcount_read ( & task - > usage ) & &
2021-02-26 02:43:15 +03:00
( flags & BPF_LOCAL_STORAGE_GET_F_CREATE ) )
2020-11-06 13:37:40 +03:00
sdata = bpf_local_storage_update (
task , ( struct bpf_local_storage_map * ) map , value ,
BPF_NOEXIST ) ;
2021-02-26 02:43:15 +03:00
unlock :
bpf_task_storage_unlock ( ) ;
return IS_ERR_OR_NULL ( sdata ) ? ( unsigned long ) NULL :
( unsigned long ) sdata - > data ;
2020-11-06 13:37:40 +03:00
}
BPF_CALL_2 ( bpf_task_storage_delete , struct bpf_map * , map , struct task_struct * ,
task )
{
2021-02-26 02:43:15 +03:00
int ret ;
2021-01-12 10:55:24 +03:00
if ( ! task )
return - EINVAL ;
2021-02-26 02:43:15 +03:00
if ( ! bpf_task_storage_trylock ( ) )
return - EBUSY ;
2020-11-06 13:37:40 +03:00
/* This helper must only be called from places where the lifetime of the task
* is guaranteed . Either by being refcounted or by being protected
* by an RCU read - side critical section .
*/
2021-02-26 02:43:15 +03:00
ret = task_storage_delete ( task , map ) ;
bpf_task_storage_unlock ( ) ;
return ret ;
2020-11-06 13:37:40 +03:00
}
static int notsupp_get_next_key ( struct bpf_map * map , void * key , void * next_key )
{
return - ENOTSUPP ;
}
static struct bpf_map * task_storage_map_alloc ( union bpf_attr * attr )
{
struct bpf_local_storage_map * smap ;
smap = bpf_local_storage_map_alloc ( attr ) ;
if ( IS_ERR ( smap ) )
return ERR_CAST ( smap ) ;
smap - > cache_idx = bpf_local_storage_cache_idx_get ( & task_cache ) ;
return & smap - > map ;
}
static void task_storage_map_free ( struct bpf_map * map )
{
struct bpf_local_storage_map * smap ;
smap = ( struct bpf_local_storage_map * ) map ;
bpf_local_storage_cache_idx_free ( & task_cache , smap - > cache_idx ) ;
2021-02-26 02:43:15 +03:00
bpf_local_storage_map_free ( smap , & bpf_task_storage_busy ) ;
2020-11-06 13:37:40 +03:00
}
static int task_storage_map_btf_id ;
const struct bpf_map_ops task_storage_map_ops = {
. map_meta_equal = bpf_map_meta_equal ,
. map_alloc_check = bpf_local_storage_map_alloc_check ,
. map_alloc = task_storage_map_alloc ,
. map_free = task_storage_map_free ,
. map_get_next_key = notsupp_get_next_key ,
. map_lookup_elem = bpf_pid_task_storage_lookup_elem ,
. map_update_elem = bpf_pid_task_storage_update_elem ,
. map_delete_elem = bpf_pid_task_storage_delete_elem ,
. map_check_btf = bpf_local_storage_map_check_btf ,
. map_btf_name = " bpf_local_storage_map " ,
. map_btf_id = & task_storage_map_btf_id ,
. map_owner_storage_ptr = task_storage_ptr ,
} ;
BTF_ID_LIST_SINGLE ( bpf_task_storage_btf_ids , struct , task_struct )
const struct bpf_func_proto bpf_task_storage_get_proto = {
. func = bpf_task_storage_get ,
. gpl_only = false ,
. ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL ,
. arg1_type = ARG_CONST_MAP_PTR ,
. arg2_type = ARG_PTR_TO_BTF_ID ,
. arg2_btf_id = & bpf_task_storage_btf_ids [ 0 ] ,
. arg3_type = ARG_PTR_TO_MAP_VALUE_OR_NULL ,
. arg4_type = ARG_ANYTHING ,
} ;
const struct bpf_func_proto bpf_task_storage_delete_proto = {
. func = bpf_task_storage_delete ,
. gpl_only = false ,
. ret_type = RET_INTEGER ,
. arg1_type = ARG_CONST_MAP_PTR ,
. arg2_type = ARG_PTR_TO_BTF_ID ,
. arg2_btf_id = & bpf_task_storage_btf_ids [ 0 ] ,
} ;