2009-01-12 23:15:46 +01:00
/*
* Workqueue statistical tracer .
*
* Copyright ( C ) 2008 Frederic Weisbecker < fweisbec @ gmail . com >
*
*/
2009-04-17 15:15:51 +08:00
# include <trace/events/workqueue.h>
2009-01-12 23:15:46 +01:00
# include <linux/list.h>
2009-01-16 16:32:25 +08:00
# include <linux/percpu.h>
2009-07-06 16:10:23 +08:00
# include <linux/kref.h>
2009-01-12 23:15:46 +01:00
# include "trace_stat.h"
# include "trace.h"
/* A cpu workqueue thread */
struct cpu_workqueue_stats {
struct list_head list ;
2009-07-06 16:10:23 +08:00
struct kref kref ;
2009-01-12 23:15:46 +01:00
int cpu ;
2009-03-10 14:10:56 -04:00
pid_t pid ;
2009-01-12 23:15:46 +01:00
/* Can be inserted from interrupt or user context, need to be atomic */
2009-03-10 14:10:56 -04:00
atomic_t inserted ;
2009-01-12 23:15:46 +01:00
/*
* Don ' t need to be atomic , works are serialized in a single workqueue thread
* on a single CPU .
*/
2009-03-10 14:10:56 -04:00
unsigned int executed ;
2009-01-12 23:15:46 +01:00
} ;
/* List of workqueue threads on one cpu */
struct workqueue_global_stats {
struct list_head list ;
spinlock_t lock ;
} ;
/* Don't need a global lock because allocated before the workqueues, and
* never freed .
*/
2009-01-16 16:32:25 +08:00
static DEFINE_PER_CPU ( struct workqueue_global_stats , all_workqueue_stat ) ;
# define workqueue_cpu_stat(cpu) (&per_cpu(all_workqueue_stat, cpu))
2009-01-12 23:15:46 +01:00
2009-07-06 16:10:23 +08:00
static void cpu_workqueue_stat_free ( struct kref * kref )
{
kfree ( container_of ( kref , struct cpu_workqueue_stats , kref ) ) ;
}
2009-01-12 23:15:46 +01:00
/* Insertion of a work */
static void
probe_workqueue_insertion ( struct task_struct * wq_thread ,
struct work_struct * work )
{
int cpu = cpumask_first ( & wq_thread - > cpus_allowed ) ;
2009-04-20 14:58:26 +08:00
struct cpu_workqueue_stats * node ;
2009-01-12 23:15:46 +01:00
unsigned long flags ;
2009-01-16 16:32:25 +08:00
spin_lock_irqsave ( & workqueue_cpu_stat ( cpu ) - > lock , flags ) ;
2009-04-20 14:58:26 +08:00
list_for_each_entry ( node , & workqueue_cpu_stat ( cpu ) - > list , list ) {
2009-01-12 23:15:46 +01:00
if ( node - > pid = = wq_thread - > pid ) {
atomic_inc ( & node - > inserted ) ;
goto found ;
}
}
pr_debug ( " trace_workqueue: entry not found \n " ) ;
found :
2009-01-16 16:32:25 +08:00
spin_unlock_irqrestore ( & workqueue_cpu_stat ( cpu ) - > lock , flags ) ;
2009-01-12 23:15:46 +01:00
}
/* Execution of a work */
static void
probe_workqueue_execution ( struct task_struct * wq_thread ,
struct work_struct * work )
{
int cpu = cpumask_first ( & wq_thread - > cpus_allowed ) ;
2009-04-20 14:58:26 +08:00
struct cpu_workqueue_stats * node ;
2009-01-12 23:15:46 +01:00
unsigned long flags ;
2009-01-16 16:32:25 +08:00
spin_lock_irqsave ( & workqueue_cpu_stat ( cpu ) - > lock , flags ) ;
2009-04-20 14:58:26 +08:00
list_for_each_entry ( node , & workqueue_cpu_stat ( cpu ) - > list , list ) {
2009-01-12 23:15:46 +01:00
if ( node - > pid = = wq_thread - > pid ) {
node - > executed + + ;
goto found ;
}
}
pr_debug ( " trace_workqueue: entry not found \n " ) ;
found :
2009-01-16 16:32:25 +08:00
spin_unlock_irqrestore ( & workqueue_cpu_stat ( cpu ) - > lock , flags ) ;
2009-01-12 23:15:46 +01:00
}
/* Creation of a cpu workqueue thread */
static void probe_workqueue_creation ( struct task_struct * wq_thread , int cpu )
{
struct cpu_workqueue_stats * cws ;
unsigned long flags ;
2009-03-10 10:49:53 +09:00
WARN_ON ( cpu < 0 ) ;
2009-01-12 23:15:46 +01:00
/* Workqueues are sometimes created in atomic context */
cws = kzalloc ( sizeof ( struct cpu_workqueue_stats ) , GFP_ATOMIC ) ;
if ( ! cws ) {
pr_warning ( " trace_workqueue: not enough memory \n " ) ;
return ;
}
INIT_LIST_HEAD ( & cws - > list ) ;
2009-07-06 16:10:23 +08:00
kref_init ( & cws - > kref ) ;
2009-01-12 23:15:46 +01:00
cws - > cpu = cpu ;
cws - > pid = wq_thread - > pid ;
2009-01-16 16:32:25 +08:00
spin_lock_irqsave ( & workqueue_cpu_stat ( cpu ) - > lock , flags ) ;
list_add_tail ( & cws - > list , & workqueue_cpu_stat ( cpu ) - > list ) ;
spin_unlock_irqrestore ( & workqueue_cpu_stat ( cpu ) - > lock , flags ) ;
2009-01-12 23:15:46 +01:00
}
/* Destruction of a cpu workqueue thread */
static void probe_workqueue_destruction ( struct task_struct * wq_thread )
{
/* Workqueue only execute on one cpu */
int cpu = cpumask_first ( & wq_thread - > cpus_allowed ) ;
struct cpu_workqueue_stats * node , * next ;
unsigned long flags ;
2009-01-16 16:32:25 +08:00
spin_lock_irqsave ( & workqueue_cpu_stat ( cpu ) - > lock , flags ) ;
list_for_each_entry_safe ( node , next , & workqueue_cpu_stat ( cpu ) - > list ,
2009-01-12 23:15:46 +01:00
list ) {
if ( node - > pid = = wq_thread - > pid ) {
list_del ( & node - > list ) ;
2009-07-06 16:10:23 +08:00
kref_put ( & node - > kref , cpu_workqueue_stat_free ) ;
2009-01-12 23:15:46 +01:00
goto found ;
}
}
pr_debug ( " trace_workqueue: don't find workqueue to destroy \n " ) ;
found :
2009-01-16 16:32:25 +08:00
spin_unlock_irqrestore ( & workqueue_cpu_stat ( cpu ) - > lock , flags ) ;
2009-01-12 23:15:46 +01:00
}
static struct cpu_workqueue_stats * workqueue_stat_start_cpu ( int cpu )
{
unsigned long flags ;
struct cpu_workqueue_stats * ret = NULL ;
2009-01-16 16:32:25 +08:00
spin_lock_irqsave ( & workqueue_cpu_stat ( cpu ) - > lock , flags ) ;
2009-01-12 23:15:46 +01:00
2009-07-06 16:10:23 +08:00
if ( ! list_empty ( & workqueue_cpu_stat ( cpu ) - > list ) ) {
2009-01-16 16:32:25 +08:00
ret = list_entry ( workqueue_cpu_stat ( cpu ) - > list . next ,
2009-01-12 23:15:46 +01:00
struct cpu_workqueue_stats , list ) ;
2009-07-06 16:10:23 +08:00
kref_get ( & ret - > kref ) ;
}
2009-01-12 23:15:46 +01:00
2009-01-16 16:32:25 +08:00
spin_unlock_irqrestore ( & workqueue_cpu_stat ( cpu ) - > lock , flags ) ;
2009-01-12 23:15:46 +01:00
return ret ;
}
2009-03-24 13:38:36 -04:00
static void * workqueue_stat_start ( struct tracer_stat * trace )
2009-01-12 23:15:46 +01:00
{
int cpu ;
void * ret = NULL ;
for_each_possible_cpu ( cpu ) {
ret = workqueue_stat_start_cpu ( cpu ) ;
if ( ret )
return ret ;
}
return NULL ;
}
static void * workqueue_stat_next ( void * prev , int idx )
{
struct cpu_workqueue_stats * prev_cws = prev ;
2009-07-06 16:10:23 +08:00
struct cpu_workqueue_stats * ret ;
2009-01-12 23:15:46 +01:00
int cpu = prev_cws - > cpu ;
unsigned long flags ;
2009-01-16 16:32:25 +08:00
spin_lock_irqsave ( & workqueue_cpu_stat ( cpu ) - > lock , flags ) ;
if ( list_is_last ( & prev_cws - > list , & workqueue_cpu_stat ( cpu ) - > list ) ) {
spin_unlock_irqrestore ( & workqueue_cpu_stat ( cpu ) - > lock , flags ) ;
2009-03-10 10:49:53 +09:00
do {
cpu = cpumask_next ( cpu , cpu_possible_mask ) ;
if ( cpu > = nr_cpu_ids )
return NULL ;
} while ( ! ( ret = workqueue_stat_start_cpu ( cpu ) ) ) ;
return ret ;
2009-07-06 16:10:23 +08:00
} else {
ret = list_entry ( prev_cws - > list . next ,
struct cpu_workqueue_stats , list ) ;
kref_get ( & ret - > kref ) ;
2009-01-12 23:15:46 +01:00
}
2009-01-16 16:32:25 +08:00
spin_unlock_irqrestore ( & workqueue_cpu_stat ( cpu ) - > lock , flags ) ;
2009-01-12 23:15:46 +01:00
2009-07-06 16:10:23 +08:00
return ret ;
2009-01-12 23:15:46 +01:00
}
static int workqueue_stat_show ( struct seq_file * s , void * p )
{
struct cpu_workqueue_stats * cws = p ;
2009-03-13 09:03:04 +09:00
struct pid * pid ;
struct task_struct * tsk ;
pid = find_get_pid ( cws - > pid ) ;
if ( pid ) {
tsk = get_pid_task ( pid , PIDTYPE_PID ) ;
if ( tsk ) {
seq_printf ( s , " %3d %6d %6u %s \n " , cws - > cpu ,
atomic_read ( & cws - > inserted ) , cws - > executed ,
tsk - > comm ) ;
put_task_struct ( tsk ) ;
}
put_pid ( pid ) ;
}
2009-01-12 23:15:46 +01:00
return 0 ;
}
2009-07-06 16:10:23 +08:00
static void workqueue_stat_release ( void * stat )
{
struct cpu_workqueue_stats * node = stat ;
kref_put ( & node - > kref , cpu_workqueue_stat_free ) ;
}
2009-01-12 23:15:46 +01:00
static int workqueue_stat_headers ( struct seq_file * s )
{
seq_printf ( s , " # CPU INSERTED EXECUTED NAME \n " ) ;
2009-03-25 16:59:18 +08:00
seq_printf ( s , " # | | | | \n " ) ;
2009-01-12 23:15:46 +01:00
return 0 ;
}
struct tracer_stat workqueue_stats __read_mostly = {
. name = " workqueues " ,
. stat_start = workqueue_stat_start ,
. stat_next = workqueue_stat_next ,
. stat_show = workqueue_stat_show ,
2009-07-06 16:10:23 +08:00
. stat_release = workqueue_stat_release ,
2009-01-12 23:15:46 +01:00
. stat_headers = workqueue_stat_headers
} ;
int __init stat_workqueue_init ( void )
{
if ( register_stat_tracer ( & workqueue_stats ) ) {
pr_warning ( " Unable to register workqueue stat tracer \n " ) ;
return 1 ;
}
return 0 ;
}
fs_initcall ( stat_workqueue_init ) ;
/*
* Workqueues are created very early , just after pre - smp initcalls .
* So we must register our tracepoints at this stage .
*/
int __init trace_workqueue_early_init ( void )
{
int ret , cpu ;
ret = register_trace_workqueue_insertion ( probe_workqueue_insertion ) ;
if ( ret )
goto out ;
ret = register_trace_workqueue_execution ( probe_workqueue_execution ) ;
if ( ret )
goto no_insertion ;
ret = register_trace_workqueue_creation ( probe_workqueue_creation ) ;
if ( ret )
goto no_execution ;
ret = register_trace_workqueue_destruction ( probe_workqueue_destruction ) ;
if ( ret )
goto no_creation ;
for_each_possible_cpu ( cpu ) {
2009-01-16 16:32:25 +08:00
spin_lock_init ( & workqueue_cpu_stat ( cpu ) - > lock ) ;
INIT_LIST_HEAD ( & workqueue_cpu_stat ( cpu ) - > list ) ;
2009-01-12 23:15:46 +01:00
}
return 0 ;
no_creation :
unregister_trace_workqueue_creation ( probe_workqueue_creation ) ;
no_execution :
unregister_trace_workqueue_execution ( probe_workqueue_execution ) ;
no_insertion :
unregister_trace_workqueue_insertion ( probe_workqueue_insertion ) ;
out :
pr_warning ( " trace_workqueue: unable to trace workqueues \n " ) ;
return 1 ;
}
early_initcall ( trace_workqueue_early_init ) ;