2005-04-17 02:20:36 +04:00
/**
* @ file event_buffer . c
*
* @ remark Copyright 2002 OProfile authors
* @ remark Read the file COPYING
*
* @ author John Levon < levon @ movementarian . org >
*
* This is the global event buffer that the user - space
* daemon reads from . The event buffer is an untyped array
* of unsigned longs . Entries are prefixed by the
* escape value ESCAPE_CODE followed by an identifying code .
*/
# include <linux/vmalloc.h>
# include <linux/oprofile.h>
# include <linux/sched.h>
2006-01-11 23:17:46 +03:00
# include <linux/capability.h>
2005-04-17 02:20:36 +04:00
# include <linux/dcookies.h>
# include <linux/fs.h>
# include <asm/uaccess.h>
2008-10-16 17:01:40 +04:00
2005-04-17 02:20:36 +04:00
# include "oprof.h"
# include "event_buffer.h"
# include "oprofile_stats.h"
2006-06-25 16:47:33 +04:00
DEFINE_MUTEX ( buffer_mutex ) ;
2008-10-16 17:01:40 +04:00
2005-04-17 02:20:36 +04:00
static unsigned long buffer_opened ;
static DECLARE_WAIT_QUEUE_HEAD ( buffer_wait ) ;
2008-09-05 19:12:36 +04:00
static unsigned long * event_buffer ;
2005-04-17 02:20:36 +04:00
static unsigned long buffer_size ;
static unsigned long buffer_watershed ;
static size_t buffer_pos ;
2006-06-25 16:47:33 +04:00
/* atomic_t because wait_event checks it outside of buffer_mutex */
2005-04-17 02:20:36 +04:00
static atomic_t buffer_ready = ATOMIC_INIT ( 0 ) ;
2009-10-09 05:17:44 +04:00
/*
* Add an entry to the event buffer . When we get near to the end we
* wake up the process sleeping on the read ( ) of the file . To protect
* the event_buffer this function may only be called when buffer_mutex
* is set .
2005-04-17 02:20:36 +04:00
*/
void add_event_entry ( unsigned long value )
{
2009-09-09 17:02:33 +04:00
/*
2009-10-09 05:17:44 +04:00
* This shouldn ' t happen since all workqueues or handlers are
* canceled or flushed before the event buffer is freed .
2009-09-09 17:02:33 +04:00
*/
2009-10-09 05:17:44 +04:00
if ( ! event_buffer ) {
WARN_ON_ONCE ( 1 ) ;
2009-09-09 17:02:33 +04:00
return ;
2009-10-09 05:17:44 +04:00
}
2009-09-09 17:02:33 +04:00
2005-04-17 02:20:36 +04:00
if ( buffer_pos = = buffer_size ) {
atomic_inc ( & oprofile_stats . event_lost_overflow ) ;
return ;
}
event_buffer [ buffer_pos ] = value ;
if ( + + buffer_pos = = buffer_size - buffer_watershed ) {
atomic_set ( & buffer_ready , 1 ) ;
wake_up ( & buffer_wait ) ;
}
}
/* Wake up the waiting process if any. This happens
* on " echo 0 >/dev/oprofile/enable " so the daemon
* processes the data remaining in the event buffer .
*/
void wake_up_buffer_waiter ( void )
{
2006-06-25 16:47:33 +04:00
mutex_lock ( & buffer_mutex ) ;
2005-04-17 02:20:36 +04:00
atomic_set ( & buffer_ready , 1 ) ;
wake_up ( & buffer_wait ) ;
2006-06-25 16:47:33 +04:00
mutex_unlock ( & buffer_mutex ) ;
2005-04-17 02:20:36 +04:00
}
2008-10-16 17:01:40 +04:00
2005-04-17 02:20:36 +04:00
int alloc_event_buffer ( void )
{
2007-03-28 20:12:34 +04:00
unsigned long flags ;
2005-04-17 02:20:36 +04:00
2009-07-25 18:18:34 +04:00
raw_spin_lock_irqsave ( & oprofilefs_lock , flags ) ;
2008-12-16 18:19:54 +03:00
buffer_size = oprofile_buffer_size ;
buffer_watershed = oprofile_buffer_watershed ;
2009-07-25 18:18:34 +04:00
raw_spin_unlock_irqrestore ( & oprofilefs_lock , flags ) ;
2008-10-16 17:01:40 +04:00
2005-04-17 02:20:36 +04:00
if ( buffer_watershed > = buffer_size )
return - EINVAL ;
2008-10-16 17:01:40 +04:00
2009-10-09 05:17:44 +04:00
buffer_pos = 0 ;
2005-04-17 02:20:36 +04:00
event_buffer = vmalloc ( sizeof ( unsigned long ) * buffer_size ) ;
if ( ! event_buffer )
2009-10-09 05:17:44 +04:00
return - ENOMEM ;
2005-04-17 02:20:36 +04:00
2009-10-09 05:17:44 +04:00
return 0 ;
2005-04-17 02:20:36 +04:00
}
void free_event_buffer ( void )
{
2009-09-09 17:02:33 +04:00
mutex_lock ( & buffer_mutex ) ;
2005-04-17 02:20:36 +04:00
vfree ( event_buffer ) ;
2009-10-09 05:17:44 +04:00
buffer_pos = 0 ;
2008-08-11 11:25:43 +04:00
event_buffer = NULL ;
2009-09-09 17:02:33 +04:00
mutex_unlock ( & buffer_mutex ) ;
2005-04-17 02:20:36 +04:00
}
2008-10-16 17:01:40 +04:00
2008-09-05 19:12:36 +04:00
static int event_buffer_open ( struct inode * inode , struct file * file )
2005-04-17 02:20:36 +04:00
{
int err = - EPERM ;
if ( ! capable ( CAP_SYS_ADMIN ) )
return - EPERM ;
2008-10-23 18:25:54 +04:00
if ( test_and_set_bit_lock ( 0 , & buffer_opened ) )
2005-04-17 02:20:36 +04:00
return - EBUSY ;
/* Register as a user of dcookies
* to ensure they persist for the lifetime of
* the open event file
*/
err = - EINVAL ;
file - > private_data = dcookie_register ( ) ;
if ( ! file - > private_data )
goto out ;
2008-10-16 17:01:40 +04:00
2005-04-17 02:20:36 +04:00
if ( ( err = oprofile_setup ( ) ) )
goto fail ;
/* NB: the actual start happens from userspace
* echo 1 > / dev / oprofile / enable
*/
2008-10-16 17:01:40 +04:00
2010-07-08 01:40:13 +04:00
return nonseekable_open ( inode , file ) ;
2005-04-17 02:20:36 +04:00
fail :
dcookie_unregister ( file - > private_data ) ;
out :
2008-10-23 18:25:54 +04:00
__clear_bit_unlock ( 0 , & buffer_opened ) ;
2005-04-17 02:20:36 +04:00
return err ;
}
2008-09-05 19:12:36 +04:00
static int event_buffer_release ( struct inode * inode , struct file * file )
2005-04-17 02:20:36 +04:00
{
oprofile_stop ( ) ;
oprofile_shutdown ( ) ;
dcookie_unregister ( file - > private_data ) ;
buffer_pos = 0 ;
atomic_set ( & buffer_ready , 0 ) ;
2008-10-23 18:25:54 +04:00
__clear_bit_unlock ( 0 , & buffer_opened ) ;
2005-04-17 02:20:36 +04:00
return 0 ;
}
2008-09-05 19:12:36 +04:00
static ssize_t event_buffer_read ( struct file * file , char __user * buf ,
size_t count , loff_t * offset )
2005-04-17 02:20:36 +04:00
{
int retval = - EINVAL ;
size_t const max = buffer_size * sizeof ( unsigned long ) ;
/* handling partial reads is more trouble than it's worth */
if ( count ! = max | | * offset )
return - EINVAL ;
wait_event_interruptible ( buffer_wait , atomic_read ( & buffer_ready ) ) ;
if ( signal_pending ( current ) )
return - EINTR ;
/* can't currently happen */
if ( ! atomic_read ( & buffer_ready ) )
return - EAGAIN ;
2006-06-25 16:47:33 +04:00
mutex_lock ( & buffer_mutex ) ;
2005-04-17 02:20:36 +04:00
2009-10-09 05:17:44 +04:00
/* May happen if the buffer is freed during pending reads. */
2009-09-09 17:02:33 +04:00
if ( ! event_buffer ) {
retval = - EINTR ;
goto out ;
}
2005-04-17 02:20:36 +04:00
atomic_set ( & buffer_ready , 0 ) ;
retval = - EFAULT ;
count = buffer_pos * sizeof ( unsigned long ) ;
2008-10-16 17:01:40 +04:00
2005-04-17 02:20:36 +04:00
if ( copy_to_user ( buf , event_buffer , count ) )
goto out ;
retval = count ;
buffer_pos = 0 ;
2008-10-16 17:01:40 +04:00
2005-04-17 02:20:36 +04:00
out :
2006-06-25 16:47:33 +04:00
mutex_unlock ( & buffer_mutex ) ;
2005-04-17 02:20:36 +04:00
return retval ;
}
2008-10-16 17:01:40 +04:00
2007-02-12 11:55:34 +03:00
const struct file_operations event_buffer_fops = {
2005-04-17 02:20:36 +04:00
. open = event_buffer_open ,
. release = event_buffer_release ,
. read = event_buffer_read ,
2010-07-08 01:40:13 +04:00
. llseek = no_llseek ,
2005-04-17 02:20:36 +04:00
} ;