2019-01-16 14:10:59 +03:00
// SPDX-License-Identifier: GPL-2.0
2011-10-16 19:15:04 +04:00
/*
* Performance events callchain code , extracted from core . c :
*
* Copyright ( C ) 2008 Thomas Gleixner < tglx @ linutronix . de >
* Copyright ( C ) 2008 - 2011 Red Hat , Inc . , Ingo Molnar
2015-11-16 13:08:45 +03:00
* Copyright ( C ) 2008 - 2011 Red Hat , Inc . , Peter Zijlstra
2018-08-24 03:01:26 +03:00
* Copyright © 2009 Paul Mackerras , IBM Corp . < paulus @ au1 . ibm . com >
2011-10-16 19:15:04 +04:00
*/
# include <linux/perf_event.h>
# include <linux/slab.h>
2017-02-08 20:51:37 +03:00
# include <linux/sched/task_stack.h>
2011-10-16 19:15:04 +04:00
# include "internal.h"
struct callchain_cpus_entries {
struct rcu_head rcu_head ;
2020-05-11 23:12:27 +03:00
struct perf_callchain_entry * cpu_entries [ ] ;
2011-10-16 19:15:04 +04:00
} ;
2016-04-21 18:28:50 +03:00
int sysctl_perf_event_max_stack __read_mostly = PERF_MAX_STACK_DEPTH ;
2016-05-12 19:06:21 +03:00
int sysctl_perf_event_max_contexts_per_stack __read_mostly = PERF_MAX_CONTEXTS_PER_STACK ;
2016-04-21 18:28:50 +03:00
static inline size_t perf_callchain_entry__sizeof ( void )
{
return ( sizeof ( struct perf_callchain_entry ) +
2016-05-12 19:06:21 +03:00
sizeof ( __u64 ) * ( sysctl_perf_event_max_stack +
sysctl_perf_event_max_contexts_per_stack ) ) ;
2016-04-21 18:28:50 +03:00
}
2011-10-16 19:15:04 +04:00
static DEFINE_PER_CPU ( int , callchain_recursion [ PERF_NR_CONTEXTS ] ) ;
static atomic_t nr_callchain_events ;
static DEFINE_MUTEX ( callchain_mutex ) ;
static struct callchain_cpus_entries * callchain_cpus_entries ;
2016-04-28 18:30:53 +03:00
__weak void perf_callchain_kernel ( struct perf_callchain_entry_ctx * entry ,
2011-10-16 19:15:04 +04:00
struct pt_regs * regs )
{
}
2016-04-28 18:30:53 +03:00
__weak void perf_callchain_user ( struct perf_callchain_entry_ctx * entry ,
2011-10-16 19:15:04 +04:00
struct pt_regs * regs )
{
}
static void release_callchain_buffers_rcu ( struct rcu_head * head )
{
struct callchain_cpus_entries * entries ;
int cpu ;
entries = container_of ( head , struct callchain_cpus_entries , rcu_head ) ;
for_each_possible_cpu ( cpu )
kfree ( entries - > cpu_entries [ cpu ] ) ;
kfree ( entries ) ;
}
static void release_callchain_buffers ( void )
{
struct callchain_cpus_entries * entries ;
entries = callchain_cpus_entries ;
2014-08-22 18:15:36 +04:00
RCU_INIT_POINTER ( callchain_cpus_entries , NULL ) ;
2011-10-16 19:15:04 +04:00
call_rcu ( & entries - > rcu_head , release_callchain_buffers_rcu ) ;
}
static int alloc_callchain_buffers ( void )
{
int cpu ;
int size ;
struct callchain_cpus_entries * entries ;
/*
* We can ' t use the percpu allocation API for data that can be
* accessed from NMI . Use a temporary manual per cpu allocation
* until that gets sorted out .
*/
size = offsetof ( struct callchain_cpus_entries , cpu_entries [ nr_cpu_ids ] ) ;
entries = kzalloc ( size , GFP_KERNEL ) ;
if ( ! entries )
return - ENOMEM ;
2016-04-21 18:28:50 +03:00
size = perf_callchain_entry__sizeof ( ) * PERF_NR_CONTEXTS ;
2011-10-16 19:15:04 +04:00
for_each_possible_cpu ( cpu ) {
entries - > cpu_entries [ cpu ] = kmalloc_node ( size , GFP_KERNEL ,
cpu_to_node ( cpu ) ) ;
if ( ! entries - > cpu_entries [ cpu ] )
goto fail ;
}
rcu_assign_pointer ( callchain_cpus_entries , entries ) ;
return 0 ;
fail :
for_each_possible_cpu ( cpu )
kfree ( entries - > cpu_entries [ cpu ] ) ;
kfree ( entries ) ;
return - ENOMEM ;
}
2016-04-28 19:16:33 +03:00
int get_callchain_buffers ( int event_max_stack )
2011-10-16 19:15:04 +04:00
{
int err = 0 ;
int count ;
mutex_lock ( & callchain_mutex ) ;
count = atomic_inc_return ( & nr_callchain_events ) ;
if ( WARN_ON_ONCE ( count < 1 ) ) {
err = - EINVAL ;
goto exit ;
}
2018-04-15 12:23:51 +03:00
/*
* If requesting per event more than the global cap ,
* return a different error to help userspace figure
* this out .
*
* And also do it here so that we have & callchain_mutex held .
*/
if ( event_max_stack > sysctl_perf_event_max_stack ) {
err = - EOVERFLOW ;
goto exit ;
}
2018-04-15 12:23:52 +03:00
if ( count = = 1 )
err = alloc_callchain_buffers ( ) ;
2011-10-16 19:15:04 +04:00
exit :
2013-07-23 04:31:00 +04:00
if ( err )
atomic_dec ( & nr_callchain_events ) ;
2011-10-16 19:15:04 +04:00
2013-08-02 20:29:54 +04:00
mutex_unlock ( & callchain_mutex ) ;
2011-10-16 19:15:04 +04:00
return err ;
}
void put_callchain_buffers ( void )
{
if ( atomic_dec_and_mutex_lock ( & nr_callchain_events , & callchain_mutex ) ) {
release_callchain_buffers ( ) ;
mutex_unlock ( & callchain_mutex ) ;
}
}
static struct perf_callchain_entry * get_callchain_entry ( int * rctx )
{
int cpu ;
struct callchain_cpus_entries * entries ;
2014-08-17 21:30:27 +04:00
* rctx = get_recursion_context ( this_cpu_ptr ( callchain_recursion ) ) ;
2011-10-16 19:15:04 +04:00
if ( * rctx = = - 1 )
return NULL ;
entries = rcu_dereference ( callchain_cpus_entries ) ;
if ( ! entries )
return NULL ;
cpu = smp_processor_id ( ) ;
2016-04-21 18:28:50 +03:00
return ( ( ( void * ) entries - > cpu_entries [ cpu ] ) +
( * rctx * perf_callchain_entry__sizeof ( ) ) ) ;
2011-10-16 19:15:04 +04:00
}
static void
put_callchain_entry ( int rctx )
{
2014-08-17 21:30:27 +04:00
put_recursion_context ( this_cpu_ptr ( callchain_recursion ) , rctx ) ;
2011-10-16 19:15:04 +04:00
}
2016-02-18 06:58:57 +03:00
struct perf_callchain_entry *
get_perf_callchain ( struct pt_regs * regs , u32 init_nr , bool kernel , bool user ,
2016-04-28 18:30:53 +03:00
u32 max_stack , bool crosstask , bool add_mark )
2016-02-18 06:58:57 +03:00
{
struct perf_callchain_entry * entry ;
2016-04-28 18:30:53 +03:00
struct perf_callchain_entry_ctx ctx ;
2016-02-18 06:58:57 +03:00
int rctx ;
2011-10-16 19:15:04 +04:00
entry = get_callchain_entry ( & rctx ) ;
if ( rctx = = - 1 )
return NULL ;
if ( ! entry )
goto exit_put ;
2016-04-28 18:30:53 +03:00
ctx . entry = entry ;
ctx . max_stack = max_stack ;
perf core: Add a 'nr' field to perf_event_callchain_context
We will use it to count how many addresses are in the entry->ip[] array,
excluding PERF_CONTEXT_{KERNEL,USER,etc} entries, so that we can really
return the number of entries specified by the user via the relevant
sysctl, kernel.perf_event_max_contexts, or via the per event
perf_event_attr.sample_max_stack knob.
This way we keep the perf_sample->ip_callchain->nr meaning, that is the
number of entries, be it real addresses or PERF_CONTEXT_ entries, while
honouring the max_stack knobs, i.e. the end result will be max_stack
entries if we have at least that many entries in a given stack trace.
Cc: David Ahern <dsahern@gmail.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/n/tip-s8teto51tdqvlfhefndtat9r@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2016-05-11 00:08:32 +03:00
ctx . nr = entry - > nr = init_nr ;
2016-05-12 19:06:21 +03:00
ctx . contexts = 0 ;
ctx . contexts_maxed = false ;
2011-10-16 19:15:04 +04:00
2012-08-07 17:20:41 +04:00
if ( kernel & & ! user_mode ( regs ) ) {
2016-02-18 06:58:57 +03:00
if ( add_mark )
2016-05-12 19:01:50 +03:00
perf_callchain_store_context ( & ctx , PERF_CONTEXT_KERNEL ) ;
2016-04-28 18:30:53 +03:00
perf_callchain_kernel ( & ctx , regs ) ;
2011-10-16 19:15:04 +04:00
}
2012-08-07 17:20:41 +04:00
if ( user ) {
if ( ! user_mode ( regs ) ) {
if ( current - > mm )
regs = task_pt_regs ( current ) ;
else
regs = NULL ;
}
if ( regs ) {
2017-05-09 20:00:04 +03:00
mm_segment_t fs ;
2016-02-18 06:58:57 +03:00
if ( crosstask )
2012-08-07 17:20:41 +04:00
goto exit_put ;
2016-02-18 06:58:57 +03:00
if ( add_mark )
2016-05-12 19:01:50 +03:00
perf_callchain_store_context ( & ctx , PERF_CONTEXT_USER ) ;
2017-05-09 20:00:04 +03:00
fs = get_fs ( ) ;
set_fs ( USER_DS ) ;
2016-04-28 18:30:53 +03:00
perf_callchain_user ( & ctx , regs ) ;
2017-05-09 20:00:04 +03:00
set_fs ( fs ) ;
2012-08-07 17:20:41 +04:00
}
2011-10-16 19:15:04 +04:00
}
exit_put :
put_callchain_entry ( rctx ) ;
return entry ;
}
2016-04-21 18:28:50 +03:00
2016-05-12 19:06:21 +03:00
/*
* Used for sysctl_perf_event_max_stack and
* sysctl_perf_event_max_contexts_per_stack .
*/
2016-04-21 18:28:50 +03:00
int perf_event_max_stack_handler ( struct ctl_table * table , int write ,
2020-04-24 09:43:38 +03:00
void * buffer , size_t * lenp , loff_t * ppos )
2016-04-21 18:28:50 +03:00
{
2016-05-10 22:34:53 +03:00
int * value = table - > data ;
int new_value = * value , ret ;
2016-04-21 18:28:50 +03:00
struct ctl_table new_table = * table ;
new_table . data = & new_value ;
ret = proc_dointvec_minmax ( & new_table , write , buffer , lenp , ppos ) ;
if ( ret | | ! write )
return ret ;
mutex_lock ( & callchain_mutex ) ;
if ( atomic_read ( & nr_callchain_events ) )
ret = - EBUSY ;
else
2016-05-10 22:34:53 +03:00
* value = new_value ;
2016-04-21 18:28:50 +03:00
mutex_unlock ( & callchain_mutex ) ;
return ret ;
}