2007-07-09 20:52:00 +04:00
/*
* kernel / time / sched_debug . c
*
* Print the CFS rbtree
*
* Copyright ( C ) 2007 , Red Hat , Inc . , Ingo Molnar
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation .
*/
# include <linux/proc_fs.h>
# include <linux/sched.h>
# include <linux/seq_file.h>
# include <linux/kallsyms.h>
# include <linux/utsname.h>
/*
* This allows printing both to / proc / sched_debug and
* to the console
*/
# define SEQ_printf(m, x...) \
do { \
if ( m ) \
seq_printf ( m , x ) ; \
else \
printk ( x ) ; \
} while ( 0 )
static void
2007-08-09 13:16:51 +04:00
print_task ( struct seq_file * m , struct rq * rq , struct task_struct * p )
2007-07-09 20:52:00 +04:00
{
if ( rq - > curr = = p )
SEQ_printf ( m , " R " ) ;
else
SEQ_printf ( m , " " ) ;
2007-08-06 07:26:59 +04:00
SEQ_printf ( m , " %15s %5d %15Ld %13Ld %13Ld %9Ld %5d " ,
2007-07-09 20:52:00 +04:00
p - > comm , p - > pid ,
( long long ) p - > se . fair_key ,
( long long ) ( p - > se . fair_key - rq - > cfs . fair_clock ) ,
( long long ) p - > se . wait_runtime ,
( long long ) ( p - > nvcsw + p - > nivcsw ) ,
2007-08-06 07:26:59 +04:00
p - > prio ) ;
2007-08-02 19:41:40 +04:00
# ifdef CONFIG_SCHEDSTATS
2007-10-15 19:00:05 +04:00
SEQ_printf ( m , " %15Ld %15Ld %15Ld %15Ld %15Ld %15Ld \n " ,
( long long ) p - > se . vruntime ,
2007-07-09 20:52:00 +04:00
( long long ) p - > se . sum_exec_runtime ,
( long long ) p - > se . sum_wait_runtime ,
( long long ) p - > se . sum_sleep_runtime ,
( long long ) p - > se . wait_runtime_overruns ,
2007-08-06 07:26:59 +04:00
( long long ) p - > se . wait_runtime_underruns ) ;
2007-08-02 19:41:40 +04:00
# else
2007-08-06 07:26:59 +04:00
SEQ_printf ( m , " %15Ld %15Ld %15Ld %15Ld %15Ld \n " ,
0LL , 0LL , 0LL , 0LL , 0LL ) ;
2007-08-02 19:41:40 +04:00
# endif
2007-07-09 20:52:00 +04:00
}
2007-08-09 13:16:51 +04:00
static void print_rq ( struct seq_file * m , struct rq * rq , int rq_cpu )
2007-07-09 20:52:00 +04:00
{
struct task_struct * g , * p ;
SEQ_printf ( m ,
" \n runnable tasks: \n "
" task PID tree-key delta waiting "
" switches prio "
2007-10-15 19:00:05 +04:00
" exec-runtime sum-exec sum-wait sum-sleep "
2007-07-09 20:52:00 +04:00
" wait-overrun wait-underrun \n "
" ------------------------------------------------------------------ "
2007-10-15 19:00:05 +04:00
" -------------------------------- "
2007-07-09 20:52:00 +04:00
" ------------------------------------------------ "
" -------------------------------- \n " ) ;
read_lock_irq ( & tasklist_lock ) ;
do_each_thread ( g , p ) {
if ( ! p - > se . on_rq | | task_cpu ( p ) ! = rq_cpu )
continue ;
2007-08-09 13:16:51 +04:00
print_task ( m , rq , p ) ;
2007-07-09 20:52:00 +04:00
} while_each_thread ( g , p ) ;
read_unlock_irq ( & tasklist_lock ) ;
}
static void
print_cfs_rq_runtime_sum ( struct seq_file * m , int cpu , struct cfs_rq * cfs_rq )
{
s64 wait_runtime_rq_sum = 0 ;
struct task_struct * p ;
struct rb_node * curr ;
unsigned long flags ;
struct rq * rq = & per_cpu ( runqueues , cpu ) ;
spin_lock_irqsave ( & rq - > lock , flags ) ;
curr = first_fair ( cfs_rq ) ;
while ( curr ) {
p = rb_entry ( curr , struct task_struct , se . run_node ) ;
wait_runtime_rq_sum + = p - > se . wait_runtime ;
curr = rb_next ( curr ) ;
}
spin_unlock_irqrestore ( & rq - > lock , flags ) ;
SEQ_printf ( m , " .%-30s: %Ld \n " , " wait_runtime_rq_sum " ,
( long long ) wait_runtime_rq_sum ) ;
}
2007-08-09 13:16:47 +04:00
void print_cfs_rq ( struct seq_file * m , int cpu , struct cfs_rq * cfs_rq )
2007-07-09 20:52:00 +04:00
{
2007-10-15 19:00:05 +04:00
s64 MIN_vruntime = - 1 , max_vruntime = - 1 , spread ;
struct rq * rq = & per_cpu ( runqueues , cpu ) ;
struct sched_entity * last ;
unsigned long flags ;
2007-08-11 01:05:11 +04:00
SEQ_printf ( m , " \n cfs_rq \n " ) ;
2007-07-09 20:52:00 +04:00
# define P(x) \
SEQ_printf ( m , " .%-30s: %Ld \n " , # x , ( long long ) ( cfs_rq - > x ) )
P ( fair_clock ) ;
P ( exec_clock ) ;
2007-10-15 19:00:05 +04:00
P ( min_vruntime ) ;
spin_lock_irqsave ( & rq - > lock , flags ) ;
if ( cfs_rq - > rb_leftmost )
MIN_vruntime = ( __pick_next_entity ( cfs_rq ) ) - > vruntime ;
last = __pick_last_entity ( cfs_rq ) ;
if ( last )
max_vruntime = last - > vruntime ;
spin_unlock_irqrestore ( & rq - > lock , flags ) ;
SEQ_printf ( m , " .%-30s: %Ld \n " , " MIN_vruntime " ,
( long long ) MIN_vruntime ) ;
SEQ_printf ( m , " .%-30s: %Ld \n " , " max_vruntime " ,
( long long ) max_vruntime ) ;
spread = max_vruntime - MIN_vruntime ;
SEQ_printf ( m , " .%-30s: %Ld \n " , " spread " ,
( long long ) spread ) ;
2007-07-09 20:52:00 +04:00
P ( wait_runtime ) ;
P ( wait_runtime_overruns ) ;
P ( wait_runtime_underruns ) ;
P ( sleeper_bonus ) ;
# undef P
print_cfs_rq_runtime_sum ( m , cpu , cfs_rq ) ;
}
2007-08-09 13:16:51 +04:00
static void print_cpu ( struct seq_file * m , int cpu )
2007-07-09 20:52:00 +04:00
{
struct rq * rq = & per_cpu ( runqueues , cpu ) ;
# ifdef CONFIG_X86
{
unsigned int freq = cpu_khz ? : 1 ;
SEQ_printf ( m , " \n cpu#%d, %u.%03u MHz \n " ,
cpu , freq / 1000 , ( freq % 1000 ) ) ;
}
# else
SEQ_printf ( m , " \n cpu#%d \n " , cpu ) ;
# endif
# define P(x) \
SEQ_printf ( m , " .%-30s: %Ld \n " , # x , ( long long ) ( rq - > x ) )
P ( nr_running ) ;
SEQ_printf ( m , " .%-30s: %lu \n " , " load " ,
rq - > ls . load . weight ) ;
P ( nr_switches ) ;
P ( nr_load_updates ) ;
P ( nr_uninterruptible ) ;
SEQ_printf ( m , " .%-30s: %lu \n " , " jiffies " , jiffies ) ;
P ( next_balance ) ;
P ( curr - > pid ) ;
P ( clock ) ;
2007-08-23 17:18:02 +04:00
P ( idle_clock ) ;
2007-07-09 20:52:00 +04:00
P ( prev_clock_raw ) ;
P ( clock_warps ) ;
P ( clock_overflows ) ;
2007-08-23 17:18:02 +04:00
P ( clock_deep_idle_events ) ;
2007-07-09 20:52:00 +04:00
P ( clock_max_delta ) ;
P ( cpu_load [ 0 ] ) ;
P ( cpu_load [ 1 ] ) ;
P ( cpu_load [ 2 ] ) ;
P ( cpu_load [ 3 ] ) ;
P ( cpu_load [ 4 ] ) ;
# undef P
2007-08-09 13:16:47 +04:00
print_cfs_stats ( m , cpu ) ;
2007-07-09 20:52:00 +04:00
2007-08-09 13:16:51 +04:00
print_rq ( m , rq , cpu ) ;
2007-07-09 20:52:00 +04:00
}
static int sched_debug_show ( struct seq_file * m , void * v )
{
u64 now = ktime_to_ns ( ktime_get ( ) ) ;
int cpu ;
2007-08-02 19:41:40 +04:00
SEQ_printf ( m , " Sched Debug Version: v0.05-v20, %s %.*s \n " ,
2007-07-09 20:52:00 +04:00
init_utsname ( ) - > release ,
( int ) strcspn ( init_utsname ( ) - > version , " " ) ,
init_utsname ( ) - > version ) ;
SEQ_printf ( m , " now at %Lu nsecs \n " , ( unsigned long long ) now ) ;
for_each_online_cpu ( cpu )
2007-08-09 13:16:51 +04:00
print_cpu ( m , cpu ) ;
2007-07-09 20:52:00 +04:00
SEQ_printf ( m , " \n " ) ;
return 0 ;
}
2007-07-26 15:40:43 +04:00
static void sysrq_sched_debug_show ( void )
2007-07-09 20:52:00 +04:00
{
sched_debug_show ( NULL , NULL ) ;
}
static int sched_debug_open ( struct inode * inode , struct file * filp )
{
return single_open ( filp , sched_debug_show , NULL ) ;
}
static struct file_operations sched_debug_fops = {
. open = sched_debug_open ,
. read = seq_read ,
. llseek = seq_lseek ,
2007-07-31 11:38:50 +04:00
. release = single_release ,
2007-07-09 20:52:00 +04:00
} ;
static int __init init_sched_debug_procfs ( void )
{
struct proc_dir_entry * pe ;
pe = create_proc_entry ( " sched_debug " , 0644 , NULL ) ;
if ( ! pe )
return - ENOMEM ;
pe - > proc_fops = & sched_debug_fops ;
return 0 ;
}
__initcall ( init_sched_debug_procfs ) ;
void proc_sched_show_task ( struct task_struct * p , struct seq_file * m )
{
unsigned long flags ;
int num_threads = 1 ;
rcu_read_lock ( ) ;
if ( lock_task_sighand ( p , & flags ) ) {
num_threads = atomic_read ( & p - > signal - > count ) ;
unlock_task_sighand ( p , & flags ) ;
}
rcu_read_unlock ( ) ;
SEQ_printf ( m , " %s (%d, #threads: %d) \n " , p - > comm , p - > pid , num_threads ) ;
SEQ_printf ( m , " ---------------------------------------------- \n " ) ;
# define P(F) \
SEQ_printf ( m , " %-25s:%20Ld \n " , # F , ( long long ) p - > F )
2007-08-02 19:41:40 +04:00
P ( se . wait_runtime ) ;
2007-07-09 20:52:00 +04:00
P ( se . wait_start_fair ) ;
P ( se . exec_start ) ;
P ( se . sleep_start_fair ) ;
2007-10-15 19:00:05 +04:00
P ( se . vruntime ) ;
2007-08-02 19:41:40 +04:00
P ( se . sum_exec_runtime ) ;
# ifdef CONFIG_SCHEDSTATS
P ( se . wait_start ) ;
P ( se . sleep_start ) ;
2007-07-09 20:52:00 +04:00
P ( se . block_start ) ;
P ( se . sleep_max ) ;
P ( se . block_max ) ;
P ( se . exec_max ) ;
2007-10-15 19:00:02 +04:00
P ( se . slice_max ) ;
2007-07-09 20:52:00 +04:00
P ( se . wait_max ) ;
P ( se . wait_runtime_overruns ) ;
P ( se . wait_runtime_underruns ) ;
P ( se . sum_wait_runtime ) ;
2007-08-02 19:41:40 +04:00
# endif
2007-07-09 20:52:00 +04:00
SEQ_printf ( m , " %-25s:%20Ld \n " ,
" nr_switches " , ( long long ) ( p - > nvcsw + p - > nivcsw ) ) ;
P ( se . load . weight ) ;
P ( policy ) ;
P ( prio ) ;
# undef P
{
u64 t0 , t1 ;
t0 = sched_clock ( ) ;
t1 = sched_clock ( ) ;
SEQ_printf ( m , " %-25s:%20Ld \n " ,
" clock-delta " , ( long long ) ( t1 - t0 ) ) ;
}
}
void proc_sched_set_task ( struct task_struct * p )
{
2007-08-02 19:41:40 +04:00
# ifdef CONFIG_SCHEDSTATS
2007-10-15 19:00:02 +04:00
p - > se . sleep_max = 0 ;
p - > se . block_max = 0 ;
p - > se . exec_max = 0 ;
2007-10-15 19:00:02 +04:00
p - > se . slice_max = 0 ;
2007-10-15 19:00:02 +04:00
p - > se . wait_max = 0 ;
p - > se . wait_runtime_overruns = 0 ;
p - > se . wait_runtime_underruns = 0 ;
2007-08-02 19:41:40 +04:00
# endif
2007-10-15 19:00:02 +04:00
p - > se . sum_exec_runtime = 0 ;
2007-09-05 16:32:49 +04:00
p - > se . prev_sum_exec_runtime = 0 ;
2007-07-09 20:52:00 +04:00
}