2005-04-17 02:20:36 +04:00
/**
* @ file oprof . c
*
* @ remark Copyright 2002 OProfile authors
* @ remark Read the file COPYING
*
* @ author John Levon < levon @ movementarian . org >
*/
# include <linux/kernel.h>
# include <linux/module.h>
# include <linux/init.h>
# include <linux/oprofile.h>
# include <linux/moduleparam.h>
2009-07-08 15:49:38 +04:00
# include <linux/workqueue.h>
# include <linux/time.h>
2011-05-09 03:32:36 +04:00
# include <linux/mutex.h>
2005-04-17 02:20:36 +04:00
# include "oprof.h"
# include "event_buffer.h"
# include "cpu_buffer.h"
# include "buffer_sync.h"
# include "oprofile_stats.h"
2008-09-05 19:12:36 +04:00
2005-04-17 02:20:36 +04:00
struct oprofile_operations oprofile_ops ;
unsigned long oprofile_started ;
2008-12-16 18:19:54 +03:00
unsigned long oprofile_backtrace_depth ;
2008-09-24 13:08:52 +04:00
static unsigned long is_setup ;
static DEFINE_MUTEX ( start_mutex ) ;
2005-04-17 02:20:36 +04:00
/* timer
0 - use performance monitoring hardware if available
1 - use the timer int mechanism regardless
*/
static int timer = 0 ;
int oprofile_setup ( void )
{
int err ;
2008-09-05 19:12:36 +04:00
2006-06-25 16:47:33 +04:00
mutex_lock ( & start_mutex ) ;
2005-04-17 02:20:36 +04:00
if ( ( err = alloc_cpu_buffers ( ) ) )
goto out ;
if ( ( err = alloc_event_buffer ( ) ) )
goto out1 ;
2008-09-05 19:12:36 +04:00
2005-04-17 02:20:36 +04:00
if ( oprofile_ops . setup & & ( err = oprofile_ops . setup ( ) ) )
goto out2 ;
2008-09-05 19:12:36 +04:00
2005-04-17 02:20:36 +04:00
/* Note even though this starts part of the
* profiling overhead , it ' s necessary to prevent
* us missing task deaths and eventually oopsing
* when trying to process the event buffer .
*/
2007-07-20 23:39:53 +04:00
if ( oprofile_ops . sync_start ) {
int sync_ret = oprofile_ops . sync_start ( ) ;
switch ( sync_ret ) {
case 0 :
goto post_sync ;
case 1 :
goto do_generic ;
case - 1 :
goto out3 ;
default :
goto out3 ;
}
}
do_generic :
2005-04-17 02:20:36 +04:00
if ( ( err = sync_start ( ) ) )
goto out3 ;
2007-07-20 23:39:53 +04:00
post_sync :
2005-04-17 02:20:36 +04:00
is_setup = 1 ;
2006-06-25 16:47:33 +04:00
mutex_unlock ( & start_mutex ) ;
2005-04-17 02:20:36 +04:00
return 0 ;
2008-09-05 19:12:36 +04:00
2005-04-17 02:20:36 +04:00
out3 :
if ( oprofile_ops . shutdown )
oprofile_ops . shutdown ( ) ;
out2 :
free_event_buffer ( ) ;
out1 :
free_cpu_buffers ( ) ;
out :
2006-06-25 16:47:33 +04:00
mutex_unlock ( & start_mutex ) ;
2005-04-17 02:20:36 +04:00
return err ;
}
2009-07-08 15:49:38 +04:00
# ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
2009-06-19 18:45:34 +04:00
static void switch_worker ( struct work_struct * work ) ;
static DECLARE_DELAYED_WORK ( switch_work , switch_worker ) ;
2009-07-08 15:49:38 +04:00
static void start_switch_worker ( void )
{
2009-06-19 18:45:34 +04:00
if ( oprofile_ops . switch_events )
schedule_delayed_work ( & switch_work , oprofile_time_slice ) ;
}
static void stop_switch_worker ( void )
{
cancel_delayed_work_sync ( & switch_work ) ;
2009-07-08 15:49:38 +04:00
}
static void switch_worker ( struct work_struct * work )
{
2009-07-09 16:56:25 +04:00
if ( oprofile_ops . switch_events ( ) )
return ;
atomic_inc ( & oprofile_stats . multiplex_counter ) ;
start_switch_worker ( ) ;
2009-07-08 15:49:38 +04:00
}
2009-06-19 18:45:34 +04:00
/* User inputs in ms, converts to jiffies */
int oprofile_set_timeout ( unsigned long val_msec )
{
int err = 0 ;
unsigned long time_slice ;
mutex_lock ( & start_mutex ) ;
if ( oprofile_started ) {
err = - EBUSY ;
goto out ;
}
if ( ! oprofile_ops . switch_events ) {
err = - EINVAL ;
goto out ;
}
time_slice = msecs_to_jiffies ( val_msec ) ;
if ( time_slice = = MAX_JIFFY_OFFSET ) {
err = - EINVAL ;
goto out ;
}
oprofile_time_slice = time_slice ;
out :
mutex_unlock ( & start_mutex ) ;
return err ;
}
# else
static inline void start_switch_worker ( void ) { }
static inline void stop_switch_worker ( void ) { }
2009-07-08 15:49:38 +04:00
# endif
2005-04-17 02:20:36 +04:00
/* Actually start profiling (echo 1>/dev/oprofile/enable) */
int oprofile_start ( void )
{
int err = - EINVAL ;
2008-09-05 19:12:36 +04:00
2006-06-25 16:47:33 +04:00
mutex_lock ( & start_mutex ) ;
2008-10-16 17:01:40 +04:00
2005-04-17 02:20:36 +04:00
if ( ! is_setup )
goto out ;
2008-09-05 19:12:36 +04:00
err = 0 ;
2005-04-17 02:20:36 +04:00
if ( oprofile_started )
goto out ;
2008-09-05 19:12:36 +04:00
2005-04-17 02:20:36 +04:00
oprofile_reset_stats ( ) ;
if ( ( err = oprofile_ops . start ( ) ) )
goto out ;
2009-06-19 18:45:34 +04:00
start_switch_worker ( ) ;
2009-07-08 15:49:38 +04:00
2005-04-17 02:20:36 +04:00
oprofile_started = 1 ;
out :
2006-06-25 16:47:33 +04:00
mutex_unlock ( & start_mutex ) ;
2005-04-17 02:20:36 +04:00
return err ;
}
2008-09-05 19:12:36 +04:00
2005-04-17 02:20:36 +04:00
/* echo 0>/dev/oprofile/enable */
void oprofile_stop ( void )
{
2006-06-25 16:47:33 +04:00
mutex_lock ( & start_mutex ) ;
2005-04-17 02:20:36 +04:00
if ( ! oprofile_started )
goto out ;
oprofile_ops . stop ( ) ;
oprofile_started = 0 ;
2009-07-08 15:49:38 +04:00
2009-06-19 18:45:34 +04:00
stop_switch_worker ( ) ;
2009-07-08 15:49:38 +04:00
2005-04-17 02:20:36 +04:00
/* wake up the daemon to read what remains */
wake_up_buffer_waiter ( ) ;
out :
2006-06-25 16:47:33 +04:00
mutex_unlock ( & start_mutex ) ;
2005-04-17 02:20:36 +04:00
}
void oprofile_shutdown ( void )
{
2006-06-25 16:47:33 +04:00
mutex_lock ( & start_mutex ) ;
2007-07-20 23:39:53 +04:00
if ( oprofile_ops . sync_stop ) {
int sync_ret = oprofile_ops . sync_stop ( ) ;
switch ( sync_ret ) {
case 0 :
goto post_sync ;
case 1 :
goto do_generic ;
default :
goto post_sync ;
}
}
do_generic :
2005-04-17 02:20:36 +04:00
sync_stop ( ) ;
2007-07-20 23:39:53 +04:00
post_sync :
2005-04-17 02:20:36 +04:00
if ( oprofile_ops . shutdown )
oprofile_ops . shutdown ( ) ;
is_setup = 0 ;
free_event_buffer ( ) ;
free_cpu_buffers ( ) ;
2006-06-25 16:47:33 +04:00
mutex_unlock ( & start_mutex ) ;
2005-04-17 02:20:36 +04:00
}
2010-10-04 23:09:36 +04:00
int oprofile_set_ulong ( unsigned long * addr , unsigned long val )
2005-04-17 02:20:36 +04:00
{
2010-10-04 23:09:36 +04:00
int err = - EBUSY ;
2005-04-17 02:20:36 +04:00
2006-06-25 16:47:33 +04:00
mutex_lock ( & start_mutex ) ;
2010-10-04 23:09:36 +04:00
if ( ! oprofile_started ) {
* addr = val ;
err = 0 ;
2005-04-17 02:20:36 +04:00
}
2006-06-25 16:47:33 +04:00
mutex_unlock ( & start_mutex ) ;
2010-10-04 23:09:36 +04:00
2005-04-17 02:20:36 +04:00
return err ;
}
2011-10-07 18:31:46 +04:00
static int timer_mode ;
2005-04-17 02:20:36 +04:00
static int __init oprofile_init ( void )
{
int err ;
2011-10-07 18:31:46 +04:00
/* always init architecture to setup backtrace support */
2011-10-11 19:11:08 +04:00
timer_mode = 0 ;
2005-04-17 02:20:36 +04:00
err = oprofile_arch_init ( & oprofile_ops ) ;
2011-10-11 19:11:08 +04:00
if ( ! err ) {
if ( ! timer & & ! oprofilefs_register ( ) )
return 0 ;
oprofile_arch_exit ( ) ;
}
2011-10-07 18:31:46 +04:00
2011-10-11 19:11:08 +04:00
/* setup timer mode: */
timer_mode = 1 ;
/* no nmi timer mode if oprofile.timer is set */
if ( timer | | op_nmi_timer_init ( & oprofile_ops ) ) {
2010-03-02 18:01:10 +03:00
err = oprofile_timer_init ( & oprofile_ops ) ;
if ( err )
2010-08-29 22:51:59 +04:00
return err ;
2005-04-17 02:20:36 +04:00
}
2011-10-07 18:31:46 +04:00
2011-10-11 19:11:08 +04:00
return oprofilefs_register ( ) ;
2005-04-17 02:20:36 +04:00
}
static void __exit oprofile_exit ( void )
{
oprofilefs_unregister ( ) ;
2011-10-14 17:46:10 +04:00
if ( ! timer_mode )
2011-10-07 18:31:46 +04:00
oprofile_arch_exit ( ) ;
2005-04-17 02:20:36 +04:00
}
2008-09-05 19:12:36 +04:00
2005-04-17 02:20:36 +04:00
module_init ( oprofile_init ) ;
module_exit ( oprofile_exit ) ;
module_param_named ( timer , timer , int , 0644 ) ;
MODULE_PARM_DESC ( timer , " force use of timer interrupt " ) ;
2008-09-05 19:12:36 +04:00
2005-04-17 02:20:36 +04:00
MODULE_LICENSE ( " GPL " ) ;
MODULE_AUTHOR ( " John Levon <levon@movementarian.org> " ) ;
MODULE_DESCRIPTION ( " OProfile system profiler " ) ;