2010-09-27 23:45:08 +04:00
/*
* Copyright 2010 ARM Ltd .
2012-02-23 20:07:06 +04:00
* Copyright 2012 Advanced Micro Devices , Inc . , Robert Richter
2010-09-27 23:45:08 +04:00
*
* Perf - events backend for OProfile .
*/
# include <linux/perf_event.h>
2010-10-14 19:31:43 +04:00
# include <linux/platform_device.h>
2010-09-27 23:45:08 +04:00
# include <linux/oprofile.h>
# include <linux/slab.h>
/*
* Per performance monitor configuration as set via oprofilefs .
*/
struct op_counter_config {
unsigned long count ;
unsigned long enabled ;
unsigned long event ;
unsigned long unit_mask ;
unsigned long kernel ;
unsigned long user ;
struct perf_event_attr attr ;
} ;
static int oprofile_perf_enabled ;
static DEFINE_MUTEX ( oprofile_perf_mutex ) ;
static struct op_counter_config * counter_config ;
2012-02-23 20:07:06 +04:00
static DEFINE_PER_CPU ( struct perf_event * * , perf_events ) ;
2010-09-27 23:45:08 +04:00
static int num_counters ;
/*
* Overflow callback for oprofile .
*/
2011-07-08 21:34:42 +04:00
static void op_overflow_handler ( struct perf_event * event ,
2010-09-27 23:45:08 +04:00
struct perf_sample_data * data , struct pt_regs * regs )
{
int id ;
u32 cpu = smp_processor_id ( ) ;
for ( id = 0 ; id < num_counters ; + + id )
2012-02-23 20:07:06 +04:00
if ( per_cpu ( perf_events , cpu ) [ id ] = = event )
2010-09-27 23:45:08 +04:00
break ;
if ( id ! = num_counters )
oprofile_add_sample ( regs , id ) ;
else
pr_warning ( " oprofile: ignoring spurious overflow "
" on cpu %u \n " , cpu ) ;
}
/*
* Called by oprofile_perf_setup to create perf attributes to mirror the oprofile
* settings in counter_config . Attributes are created as ` pinned ' events and
* so are permanently scheduled on the PMU .
*/
static void op_perf_setup ( void )
{
int i ;
u32 size = sizeof ( struct perf_event_attr ) ;
struct perf_event_attr * attr ;
for ( i = 0 ; i < num_counters ; + + i ) {
attr = & counter_config [ i ] . attr ;
memset ( attr , 0 , size ) ;
attr - > type = PERF_TYPE_RAW ;
attr - > size = size ;
attr - > config = counter_config [ i ] . event ;
attr - > sample_period = counter_config [ i ] . count ;
attr - > pinned = 1 ;
}
}
static int op_create_counter ( int cpu , int event )
{
struct perf_event * pevent ;
2012-02-23 20:07:06 +04:00
if ( ! counter_config [ event ] . enabled | | per_cpu ( perf_events , cpu ) [ event ] )
2010-09-29 16:43:29 +04:00
return 0 ;
2010-09-27 23:45:08 +04:00
pevent = perf_event_create_kernel_counter ( & counter_config [ event ] . attr ,
2010-10-15 14:45:00 +04:00
cpu , NULL ,
2011-06-29 19:42:35 +04:00
op_overflow_handler , NULL ) ;
2010-09-27 23:45:08 +04:00
2010-09-29 16:43:29 +04:00
if ( IS_ERR ( pevent ) )
return PTR_ERR ( pevent ) ;
if ( pevent - > state ! = PERF_EVENT_STATE_ACTIVE ) {
2010-09-29 18:52:25 +04:00
perf_event_release_kernel ( pevent ) ;
2010-09-27 23:45:08 +04:00
pr_warning ( " oprofile: failed to enable event %d "
" on CPU %d \n " , event , cpu ) ;
2010-09-29 16:43:29 +04:00
return - EBUSY ;
2010-09-27 23:45:08 +04:00
}
2012-02-23 20:07:06 +04:00
per_cpu ( perf_events , cpu ) [ event ] = pevent ;
2010-09-29 16:43:29 +04:00
return 0 ;
2010-09-27 23:45:08 +04:00
}
static void op_destroy_counter ( int cpu , int event )
{
2012-02-23 20:07:06 +04:00
struct perf_event * pevent = per_cpu ( perf_events , cpu ) [ event ] ;
2010-09-27 23:45:08 +04:00
if ( pevent ) {
perf_event_release_kernel ( pevent ) ;
2012-02-23 20:07:06 +04:00
per_cpu ( perf_events , cpu ) [ event ] = NULL ;
2010-09-27 23:45:08 +04:00
}
}
/*
* Called by oprofile_perf_start to create active perf events based on the
* perviously configured attributes .
*/
static int op_perf_start ( void )
{
int cpu , event , ret = 0 ;
for_each_online_cpu ( cpu ) {
for ( event = 0 ; event < num_counters ; + + event ) {
ret = op_create_counter ( cpu , event ) ;
if ( ret )
2010-08-27 16:32:41 +04:00
return ret ;
2010-09-27 23:45:08 +04:00
}
}
return ret ;
}
/*
* Called by oprofile_perf_stop at the end of a profiling run .
*/
static void op_perf_stop ( void )
{
int cpu , event ;
for_each_online_cpu ( cpu )
for ( event = 0 ; event < num_counters ; + + event )
op_destroy_counter ( cpu , event ) ;
}
2013-07-19 15:52:42 +04:00
static int oprofile_perf_create_files ( struct dentry * root )
2010-09-27 23:45:08 +04:00
{
unsigned int i ;
for ( i = 0 ; i < num_counters ; i + + ) {
struct dentry * dir ;
char buf [ 4 ] ;
snprintf ( buf , sizeof buf , " %d " , i ) ;
2013-07-19 15:58:27 +04:00
dir = oprofilefs_mkdir ( root , buf ) ;
2013-07-19 16:10:36 +04:00
oprofilefs_create_ulong ( dir , " enabled " , & counter_config [ i ] . enabled ) ;
oprofilefs_create_ulong ( dir , " event " , & counter_config [ i ] . event ) ;
oprofilefs_create_ulong ( dir , " count " , & counter_config [ i ] . count ) ;
oprofilefs_create_ulong ( dir , " unit_mask " , & counter_config [ i ] . unit_mask ) ;
oprofilefs_create_ulong ( dir , " kernel " , & counter_config [ i ] . kernel ) ;
oprofilefs_create_ulong ( dir , " user " , & counter_config [ i ] . user ) ;
2010-09-27 23:45:08 +04:00
}
return 0 ;
}
static int oprofile_perf_setup ( void )
{
2009-07-25 18:18:34 +04:00
raw_spin_lock ( & oprofilefs_lock ) ;
2010-09-27 23:45:08 +04:00
op_perf_setup ( ) ;
2009-07-25 18:18:34 +04:00
raw_spin_unlock ( & oprofilefs_lock ) ;
2010-09-27 23:45:08 +04:00
return 0 ;
}
static int oprofile_perf_start ( void )
{
int ret = - EBUSY ;
mutex_lock ( & oprofile_perf_mutex ) ;
if ( ! oprofile_perf_enabled ) {
ret = 0 ;
op_perf_start ( ) ;
oprofile_perf_enabled = 1 ;
}
mutex_unlock ( & oprofile_perf_mutex ) ;
return ret ;
}
static void oprofile_perf_stop ( void )
{
mutex_lock ( & oprofile_perf_mutex ) ;
if ( oprofile_perf_enabled )
op_perf_stop ( ) ;
oprofile_perf_enabled = 0 ;
mutex_unlock ( & oprofile_perf_mutex ) ;
}
# ifdef CONFIG_PM
2010-10-15 13:28:07 +04:00
2010-09-27 23:45:08 +04:00
static int oprofile_perf_suspend ( struct platform_device * dev , pm_message_t state )
{
mutex_lock ( & oprofile_perf_mutex ) ;
if ( oprofile_perf_enabled )
op_perf_stop ( ) ;
mutex_unlock ( & oprofile_perf_mutex ) ;
return 0 ;
}
static int oprofile_perf_resume ( struct platform_device * dev )
{
mutex_lock ( & oprofile_perf_mutex ) ;
if ( oprofile_perf_enabled & & op_perf_start ( ) )
oprofile_perf_enabled = 0 ;
mutex_unlock ( & oprofile_perf_mutex ) ;
return 0 ;
}
static struct platform_driver oprofile_driver = {
. driver = {
. name = " oprofile-perf " ,
} ,
. resume = oprofile_perf_resume ,
. suspend = oprofile_perf_suspend ,
} ;
static struct platform_device * oprofile_pdev ;
static int __init init_driverfs ( void )
{
int ret ;
ret = platform_driver_register ( & oprofile_driver ) ;
if ( ret )
2010-08-27 16:32:41 +04:00
return ret ;
2010-09-27 23:45:08 +04:00
oprofile_pdev = platform_device_register_simple (
oprofile_driver . driver . name , 0 , NULL , 0 ) ;
if ( IS_ERR ( oprofile_pdev ) ) {
ret = PTR_ERR ( oprofile_pdev ) ;
platform_driver_unregister ( & oprofile_driver ) ;
}
return ret ;
}
2010-10-14 19:31:42 +04:00
static void exit_driverfs ( void )
2010-09-27 23:45:08 +04:00
{
platform_device_unregister ( oprofile_pdev ) ;
platform_driver_unregister ( & oprofile_driver ) ;
}
2010-10-15 13:28:07 +04:00
2010-09-27 23:45:08 +04:00
# else
2010-10-15 13:28:07 +04:00
static inline int init_driverfs ( void ) { return 0 ; }
static inline void exit_driverfs ( void ) { }
2010-09-27 23:45:08 +04:00
# endif /* CONFIG_PM */
2010-09-29 17:42:30 +04:00
void oprofile_perf_exit ( void )
{
int cpu , id ;
struct perf_event * event ;
for_each_possible_cpu ( cpu ) {
for ( id = 0 ; id < num_counters ; + + id ) {
2012-02-23 20:07:06 +04:00
event = per_cpu ( perf_events , cpu ) [ id ] ;
2010-09-29 17:42:30 +04:00
if ( event )
perf_event_release_kernel ( event ) ;
}
2012-02-23 20:07:06 +04:00
kfree ( per_cpu ( perf_events , cpu ) ) ;
2010-09-29 17:42:30 +04:00
}
kfree ( counter_config ) ;
exit_driverfs ( ) ;
}
2010-09-27 23:45:08 +04:00
int __init oprofile_perf_init ( struct oprofile_operations * ops )
{
int cpu , ret = 0 ;
2010-09-29 17:42:30 +04:00
ret = init_driverfs ( ) ;
if ( ret )
return ret ;
2010-09-27 23:45:08 +04:00
num_counters = perf_num_counters ( ) ;
if ( num_counters < = 0 ) {
pr_info ( " oprofile: no performance counters \n " ) ;
ret = - ENODEV ;
goto out ;
}
counter_config = kcalloc ( num_counters ,
sizeof ( struct op_counter_config ) , GFP_KERNEL ) ;
if ( ! counter_config ) {
pr_info ( " oprofile: failed to allocate %d "
" counters \n " , num_counters ) ;
ret = - ENOMEM ;
2010-09-29 17:42:30 +04:00
num_counters = 0 ;
2010-09-27 23:45:08 +04:00
goto out ;
}
for_each_possible_cpu ( cpu ) {
2012-02-23 20:07:06 +04:00
per_cpu ( perf_events , cpu ) = kcalloc ( num_counters ,
2010-09-27 23:45:08 +04:00
sizeof ( struct perf_event * ) , GFP_KERNEL ) ;
2012-02-23 20:07:06 +04:00
if ( ! per_cpu ( perf_events , cpu ) ) {
2010-09-27 23:45:08 +04:00
pr_info ( " oprofile: failed to allocate %d perf events "
" for cpu %d \n " , num_counters , cpu ) ;
ret = - ENOMEM ;
goto out ;
}
}
ops - > create_files = oprofile_perf_create_files ;
ops - > setup = oprofile_perf_setup ;
ops - > start = oprofile_perf_start ;
ops - > stop = oprofile_perf_stop ;
ops - > shutdown = oprofile_perf_stop ;
ops - > cpu_type = op_name_from_perf_id ( ) ;
if ( ! ops - > cpu_type )
ret = - ENODEV ;
else
pr_info ( " oprofile: using %s \n " , ops - > cpu_type ) ;
out :
2010-09-29 17:42:30 +04:00
if ( ret )
oprofile_perf_exit ( ) ;
2010-09-27 23:45:08 +04:00
return ret ;
}