2007-07-09 20:52:00 +04:00
/*
* kernel / time / sched_debug . c
*
* Print the CFS rbtree
*
* Copyright ( C ) 2007 , Red Hat , Inc . , Ingo Molnar
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation .
*/
# include <linux/proc_fs.h>
# include <linux/sched.h>
# include <linux/seq_file.h>
# include <linux/kallsyms.h>
# include <linux/utsname.h>
/*
* This allows printing both to / proc / sched_debug and
* to the console
*/
# define SEQ_printf(m, x...) \
do { \
if ( m ) \
seq_printf ( m , x ) ; \
else \
printk ( x ) ; \
} while ( 0 )
2007-10-15 19:00:08 +04:00
/*
* Ease the printing of nsec fields :
*/
2007-12-30 19:24:35 +03:00
static long long nsec_high ( unsigned long long nsec )
2007-10-15 19:00:08 +04:00
{
2007-12-30 19:24:35 +03:00
if ( ( long long ) nsec < 0 ) {
2007-10-15 19:00:08 +04:00
nsec = - nsec ;
do_div ( nsec , 1000000 ) ;
return - nsec ;
}
do_div ( nsec , 1000000 ) ;
return nsec ;
}
2007-12-30 19:24:35 +03:00
static unsigned long nsec_low ( unsigned long long nsec )
2007-10-15 19:00:08 +04:00
{
2007-12-30 19:24:35 +03:00
if ( ( long long ) nsec < 0 )
2007-10-15 19:00:08 +04:00
nsec = - nsec ;
return do_div ( nsec , 1000000 ) ;
}
# define SPLIT_NS(x) nsec_high(x), nsec_low(x)
2008-11-10 19:04:09 +03:00
# ifdef CONFIG_FAIR_GROUP_SCHED
static void print_cfs_group_stats ( struct seq_file * m , int cpu ,
struct task_group * tg )
{
struct sched_entity * se = tg - > se [ cpu ] ;
if ( ! se )
return ;
# define P(F) \
SEQ_printf ( m , " .%-30s: %lld \n " , # F , ( long long ) F )
# define PN(F) \
SEQ_printf ( m , " .%-30s: %lld.%06ld \n " , # F , SPLIT_NS ( ( long long ) F ) )
PN ( se - > exec_start ) ;
PN ( se - > vruntime ) ;
PN ( se - > sum_exec_runtime ) ;
# ifdef CONFIG_SCHEDSTATS
PN ( se - > wait_start ) ;
PN ( se - > sleep_start ) ;
PN ( se - > block_start ) ;
PN ( se - > sleep_max ) ;
PN ( se - > block_max ) ;
PN ( se - > exec_max ) ;
PN ( se - > slice_max ) ;
PN ( se - > wait_max ) ;
PN ( se - > wait_sum ) ;
P ( se - > wait_count ) ;
# endif
P ( se - > load . weight ) ;
# undef PN
# undef P
}
# endif
2007-07-09 20:52:00 +04:00
static void
2007-08-09 13:16:51 +04:00
print_task ( struct seq_file * m , struct rq * rq , struct task_struct * p )
2007-07-09 20:52:00 +04:00
{
if ( rq - > curr = = p )
SEQ_printf ( m , " R " ) ;
else
SEQ_printf ( m , " " ) ;
2007-10-15 19:00:08 +04:00
SEQ_printf ( m , " %15s %5d %9Ld.%06ld %9Ld %5d " ,
2007-07-09 20:52:00 +04:00
p - > comm , p - > pid ,
2007-10-15 19:00:08 +04:00
SPLIT_NS ( p - > se . vruntime ) ,
2007-07-09 20:52:00 +04:00
( long long ) ( p - > nvcsw + p - > nivcsw ) ,
2007-08-06 07:26:59 +04:00
p - > prio ) ;
2007-08-02 19:41:40 +04:00
# ifdef CONFIG_SCHEDSTATS
2008-04-19 21:45:00 +04:00
SEQ_printf ( m , " %9Ld.%06ld %9Ld.%06ld %9Ld.%06ld " ,
2007-10-15 19:00:08 +04:00
SPLIT_NS ( p - > se . vruntime ) ,
SPLIT_NS ( p - > se . sum_exec_runtime ) ,
SPLIT_NS ( p - > se . sum_sleep_runtime ) ) ;
2007-08-02 19:41:40 +04:00
# else
2008-04-19 21:45:00 +04:00
SEQ_printf ( m , " %15Ld %15Ld %15Ld.%06ld %15Ld.%06ld %15Ld.%06ld " ,
2007-10-15 19:00:08 +04:00
0LL , 0LL , 0LL , 0L , 0LL , 0L , 0LL , 0L ) ;
2007-08-02 19:41:40 +04:00
# endif
2008-04-19 21:45:00 +04:00
# ifdef CONFIG_CGROUP_SCHED
{
char path [ 64 ] ;
cgroup_path ( task_group ( p ) - > css . cgroup , path , sizeof ( path ) ) ;
SEQ_printf ( m , " %s " , path ) ;
}
# endif
SEQ_printf ( m , " \n " ) ;
2007-07-09 20:52:00 +04:00
}
2007-08-09 13:16:51 +04:00
static void print_rq ( struct seq_file * m , struct rq * rq , int rq_cpu )
2007-07-09 20:52:00 +04:00
{
struct task_struct * g , * p ;
2007-10-25 16:02:45 +04:00
unsigned long flags ;
2007-07-09 20:52:00 +04:00
SEQ_printf ( m ,
" \n runnable tasks: \n "
2007-10-15 19:00:08 +04:00
" task PID tree-key switches prio "
" exec-runtime sum-exec sum-sleep \n "
2007-10-15 19:00:08 +04:00
" ------------------------------------------------------ "
2007-10-15 19:00:08 +04:00
" ---------------------------------------------------- \n " ) ;
2007-07-09 20:52:00 +04:00
2007-10-25 16:02:45 +04:00
read_lock_irqsave ( & tasklist_lock , flags ) ;
2007-07-09 20:52:00 +04:00
do_each_thread ( g , p ) {
if ( ! p - > se . on_rq | | task_cpu ( p ) ! = rq_cpu )
continue ;
2007-08-09 13:16:51 +04:00
print_task ( m , rq , p ) ;
2007-07-09 20:52:00 +04:00
} while_each_thread ( g , p ) ;
2007-10-25 16:02:45 +04:00
read_unlock_irqrestore ( & tasklist_lock , flags ) ;
2007-07-09 20:52:00 +04:00
}
2009-01-10 10:43:15 +03:00
# if defined(CONFIG_CGROUP_SCHED) && \
( defined ( CONFIG_FAIR_GROUP_SCHED ) | | defined ( CONFIG_RT_GROUP_SCHED ) )
static void task_group_path ( struct task_group * tg , char * buf , int buflen )
{
/* may be NULL if the underlying cgroup isn't fully-created yet */
if ( ! tg - > css . cgroup ) {
buf [ 0 ] = ' \0 ' ;
return ;
}
cgroup_path ( tg - > css . cgroup , buf , buflen ) ;
}
# endif
2007-08-09 13:16:47 +04:00
void print_cfs_rq ( struct seq_file * m , int cpu , struct cfs_rq * cfs_rq )
2007-07-09 20:52:00 +04:00
{
2007-10-15 19:00:06 +04:00
s64 MIN_vruntime = - 1 , min_vruntime , max_vruntime = - 1 ,
spread , rq0_min_vruntime , spread0 ;
2009-06-17 17:20:55 +04:00
struct rq * rq = cpu_rq ( cpu ) ;
2007-10-15 19:00:05 +04:00
struct sched_entity * last ;
unsigned long flags ;
2008-06-19 16:22:24 +04:00
# if defined(CONFIG_CGROUP_SCHED) && defined(CONFIG_FAIR_GROUP_SCHED)
2009-01-10 10:43:15 +03:00
char path [ 128 ] ;
2008-04-19 21:45:00 +04:00
struct task_group * tg = cfs_rq - > tg ;
2009-01-10 10:43:15 +03:00
task_group_path ( tg , path , sizeof ( path ) ) ;
2008-04-19 21:45:00 +04:00
SEQ_printf ( m , " \n cfs_rq[%d]:%s \n " , cpu , path ) ;
2008-12-01 18:19:05 +03:00
# elif defined(CONFIG_USER_SCHED) && defined(CONFIG_FAIR_GROUP_SCHED)
{
uid_t uid = cfs_rq - > tg - > uid ;
SEQ_printf ( m , " \n cfs_rq[%d] for UID: %u \n " , cpu , uid ) ;
}
2008-06-19 16:22:24 +04:00
# else
SEQ_printf ( m , " \n cfs_rq[%d]: \n " , cpu ) ;
2008-04-19 21:45:00 +04:00
# endif
2007-10-15 19:00:08 +04:00
SEQ_printf ( m , " .%-30s: %Ld.%06ld \n " , " exec_clock " ,
SPLIT_NS ( cfs_rq - > exec_clock ) ) ;
2007-10-15 19:00:05 +04:00
spin_lock_irqsave ( & rq - > lock , flags ) ;
if ( cfs_rq - > rb_leftmost )
MIN_vruntime = ( __pick_next_entity ( cfs_rq ) ) - > vruntime ;
last = __pick_last_entity ( cfs_rq ) ;
if ( last )
max_vruntime = last - > vruntime ;
2008-11-10 12:46:32 +03:00
min_vruntime = cfs_rq - > min_vruntime ;
2009-06-17 17:20:55 +04:00
rq0_min_vruntime = cpu_rq ( 0 ) - > cfs . min_vruntime ;
2007-10-15 19:00:05 +04:00
spin_unlock_irqrestore ( & rq - > lock , flags ) ;
2007-10-15 19:00:08 +04:00
SEQ_printf ( m , " .%-30s: %Ld.%06ld \n " , " MIN_vruntime " ,
SPLIT_NS ( MIN_vruntime ) ) ;
SEQ_printf ( m , " .%-30s: %Ld.%06ld \n " , " min_vruntime " ,
SPLIT_NS ( min_vruntime ) ) ;
SEQ_printf ( m , " .%-30s: %Ld.%06ld \n " , " max_vruntime " ,
SPLIT_NS ( max_vruntime ) ) ;
2007-10-15 19:00:05 +04:00
spread = max_vruntime - MIN_vruntime ;
2007-10-15 19:00:08 +04:00
SEQ_printf ( m , " .%-30s: %Ld.%06ld \n " , " spread " ,
SPLIT_NS ( spread ) ) ;
2007-10-15 19:00:06 +04:00
spread0 = min_vruntime - rq0_min_vruntime ;
2007-10-15 19:00:08 +04:00
SEQ_printf ( m , " .%-30s: %Ld.%06ld \n " , " spread0 " ,
SPLIT_NS ( spread0 ) ) ;
2007-10-15 19:00:09 +04:00
SEQ_printf ( m , " .%-30s: %ld \n " , " nr_running " , cfs_rq - > nr_running ) ;
SEQ_printf ( m , " .%-30s: %ld \n " , " load " , cfs_rq - > load . weight ) ;
2008-06-27 15:41:17 +04:00
2008-11-10 12:46:32 +03:00
SEQ_printf ( m , " .%-30s: %d \n " , " nr_spread_over " ,
2007-10-15 19:00:10 +04:00
cfs_rq - > nr_spread_over ) ;
2008-06-27 15:41:14 +04:00
# ifdef CONFIG_FAIR_GROUP_SCHED
# ifdef CONFIG_SMP
SEQ_printf ( m , " .%-30s: %lu \n " , " shares " , cfs_rq - > shares ) ;
# endif
2008-11-10 19:04:09 +03:00
print_cfs_group_stats ( m , cpu , cfs_rq - > tg ) ;
2008-06-27 15:41:14 +04:00
# endif
2007-07-09 20:52:00 +04:00
}
2008-06-19 16:22:24 +04:00
void print_rt_rq ( struct seq_file * m , int cpu , struct rt_rq * rt_rq )
{
# if defined(CONFIG_CGROUP_SCHED) && defined(CONFIG_RT_GROUP_SCHED)
2009-01-10 10:43:15 +03:00
char path [ 128 ] ;
2008-06-19 16:22:24 +04:00
struct task_group * tg = rt_rq - > tg ;
2009-01-10 10:43:15 +03:00
task_group_path ( tg , path , sizeof ( path ) ) ;
2008-06-19 16:22:24 +04:00
SEQ_printf ( m , " \n rt_rq[%d]:%s \n " , cpu , path ) ;
# else
SEQ_printf ( m , " \n rt_rq[%d]: \n " , cpu ) ;
# endif
# define P(x) \
SEQ_printf ( m , " .%-30s: %Ld \n " , # x , ( long long ) ( rt_rq - > x ) )
# define PN(x) \
SEQ_printf ( m , " .%-30s: %Ld.%06ld \n " , # x , SPLIT_NS ( rt_rq - > x ) )
P ( rt_nr_running ) ;
P ( rt_throttled ) ;
PN ( rt_time ) ;
PN ( rt_runtime ) ;
# undef PN
# undef P
}
2007-08-09 13:16:51 +04:00
static void print_cpu ( struct seq_file * m , int cpu )
2007-07-09 20:52:00 +04:00
{
2009-06-17 17:20:55 +04:00
struct rq * rq = cpu_rq ( cpu ) ;
2007-07-09 20:52:00 +04:00
# ifdef CONFIG_X86
{
unsigned int freq = cpu_khz ? : 1 ;
SEQ_printf ( m , " \n cpu#%d, %u.%03u MHz \n " ,
cpu , freq / 1000 , ( freq % 1000 ) ) ;
}
# else
SEQ_printf ( m , " \n cpu#%d \n " , cpu ) ;
# endif
# define P(x) \
SEQ_printf ( m , " .%-30s: %Ld \n " , # x , ( long long ) ( rq - > x ) )
2007-10-15 19:00:08 +04:00
# define PN(x) \
SEQ_printf ( m , " .%-30s: %Ld.%06ld \n " , # x , SPLIT_NS ( rq - > x ) )
2007-07-09 20:52:00 +04:00
P ( nr_running ) ;
SEQ_printf ( m , " .%-30s: %lu \n " , " load " ,
2007-10-15 19:00:06 +04:00
rq - > load . weight ) ;
2007-07-09 20:52:00 +04:00
P ( nr_switches ) ;
P ( nr_load_updates ) ;
P ( nr_uninterruptible ) ;
2007-10-15 19:00:08 +04:00
PN ( next_balance ) ;
2007-07-09 20:52:00 +04:00
P ( curr - > pid ) ;
2007-10-15 19:00:08 +04:00
PN ( clock ) ;
2007-07-09 20:52:00 +04:00
P ( cpu_load [ 0 ] ) ;
P ( cpu_load [ 1 ] ) ;
P ( cpu_load [ 2 ] ) ;
P ( cpu_load [ 3 ] ) ;
P ( cpu_load [ 4 ] ) ;
# undef P
2007-10-15 19:00:08 +04:00
# undef PN
2007-07-09 20:52:00 +04:00
2008-11-10 12:46:32 +03:00
# ifdef CONFIG_SCHEDSTATS
# define P(n) SEQ_printf(m, " .%-30s: %d\n", #n, rq->n);
P ( yld_count ) ;
P ( sched_switch ) ;
P ( sched_count ) ;
P ( sched_goidle ) ;
P ( ttwu_count ) ;
P ( ttwu_local ) ;
P ( bkl_count ) ;
# undef P
# endif
2007-08-09 13:16:47 +04:00
print_cfs_stats ( m , cpu ) ;
2008-06-19 16:22:24 +04:00
print_rt_stats ( m , cpu ) ;
2007-07-09 20:52:00 +04:00
2007-08-09 13:16:51 +04:00
print_rq ( m , rq , cpu ) ;
2007-07-09 20:52:00 +04:00
}
static int sched_debug_show ( struct seq_file * m , void * v )
{
u64 now = ktime_to_ns ( ktime_get ( ) ) ;
int cpu ;
2009-03-25 01:10:02 +03:00
SEQ_printf ( m , " Sched Debug Version: v0.09, %s %.*s \n " ,
2007-07-09 20:52:00 +04:00
init_utsname ( ) - > release ,
( int ) strcspn ( init_utsname ( ) - > version , " " ) ,
init_utsname ( ) - > version ) ;
2007-10-15 19:00:08 +04:00
SEQ_printf ( m , " now at %Lu.%06ld msecs \n " , SPLIT_NS ( now ) ) ;
2007-07-09 20:52:00 +04:00
2007-10-15 19:00:10 +04:00
# define P(x) \
2007-10-15 19:00:10 +04:00
SEQ_printf ( m , " .%-40s: %Ld \n " , # x , ( long long ) ( x ) )
2007-10-15 19:00:10 +04:00
# define PN(x) \
2007-10-15 19:00:10 +04:00
SEQ_printf ( m , " .%-40s: %Ld.%06ld \n " , # x , SPLIT_NS ( x ) )
2009-03-18 03:04:25 +03:00
P ( jiffies ) ;
2007-10-15 19:00:10 +04:00
PN ( sysctl_sched_latency ) ;
2007-11-10 00:39:37 +03:00
PN ( sysctl_sched_min_granularity ) ;
2007-10-15 19:00:10 +04:00
PN ( sysctl_sched_wakeup_granularity ) ;
PN ( sysctl_sched_child_runs_first ) ;
P ( sysctl_sched_features ) ;
# undef PN
# undef P
2007-07-09 20:52:00 +04:00
for_each_online_cpu ( cpu )
2007-08-09 13:16:51 +04:00
print_cpu ( m , cpu ) ;
2007-07-09 20:52:00 +04:00
SEQ_printf ( m , " \n " ) ;
return 0 ;
}
2007-07-26 15:40:43 +04:00
static void sysrq_sched_debug_show ( void )
2007-07-09 20:52:00 +04:00
{
sched_debug_show ( NULL , NULL ) ;
}
static int sched_debug_open ( struct inode * inode , struct file * filp )
{
return single_open ( filp , sched_debug_show , NULL ) ;
}
2007-10-15 19:00:19 +04:00
static const struct file_operations sched_debug_fops = {
2007-07-09 20:52:00 +04:00
. open = sched_debug_open ,
. read = seq_read ,
. llseek = seq_lseek ,
2007-07-31 11:38:50 +04:00
. release = single_release ,
2007-07-09 20:52:00 +04:00
} ;
static int __init init_sched_debug_procfs ( void )
{
struct proc_dir_entry * pe ;
2008-10-30 10:23:34 +03:00
pe = proc_create ( " sched_debug " , 0444 , NULL , & sched_debug_fops ) ;
2007-07-09 20:52:00 +04:00
if ( ! pe )
return - ENOMEM ;
return 0 ;
}
__initcall ( init_sched_debug_procfs ) ;
void proc_sched_show_task ( struct task_struct * p , struct seq_file * m )
{
2007-10-15 19:00:18 +04:00
unsigned long nr_switches ;
2007-07-09 20:52:00 +04:00
unsigned long flags ;
int num_threads = 1 ;
if ( lock_task_sighand ( p , & flags ) ) {
num_threads = atomic_read ( & p - > signal - > count ) ;
unlock_task_sighand ( p , & flags ) ;
}
SEQ_printf ( m , " %s (%d, #threads: %d) \n " , p - > comm , p - > pid , num_threads ) ;
2007-10-15 19:00:18 +04:00
SEQ_printf ( m ,
" --------------------------------------------------------- \n " ) ;
2007-10-15 19:00:18 +04:00
# define __P(F) \
SEQ_printf ( m , " %-35s:%21Ld \n " , # F , ( long long ) F )
2007-07-09 20:52:00 +04:00
# define P(F) \
2007-10-15 19:00:18 +04:00
SEQ_printf ( m , " %-35s:%21Ld \n " , # F , ( long long ) p - > F )
2007-10-15 19:00:18 +04:00
# define __PN(F) \
SEQ_printf ( m , " %-35s:%14Ld.%06ld \n " , # F , SPLIT_NS ( ( long long ) F ) )
2007-10-15 19:00:08 +04:00
# define PN(F) \
2007-10-15 19:00:18 +04:00
SEQ_printf ( m , " %-35s:%14Ld.%06ld \n " , # F , SPLIT_NS ( ( long long ) p - > F ) )
2007-07-09 20:52:00 +04:00
2007-10-15 19:00:08 +04:00
PN ( se . exec_start ) ;
PN ( se . vruntime ) ;
PN ( se . sum_exec_runtime ) ;
2008-03-19 03:42:00 +03:00
PN ( se . avg_overlap ) ;
2009-01-14 14:39:18 +03:00
PN ( se . avg_wakeup ) ;
2007-08-02 19:41:40 +04:00
2007-10-15 19:00:18 +04:00
nr_switches = p - > nvcsw + p - > nivcsw ;
2007-08-02 19:41:40 +04:00
# ifdef CONFIG_SCHEDSTATS
2007-10-15 19:00:08 +04:00
PN ( se . wait_start ) ;
PN ( se . sleep_start ) ;
PN ( se . block_start ) ;
PN ( se . sleep_max ) ;
PN ( se . block_max ) ;
PN ( se . exec_max ) ;
PN ( se . slice_max ) ;
PN ( se . wait_max ) ;
2008-01-25 23:08:35 +03:00
PN ( se . wait_sum ) ;
P ( se . wait_count ) ;
2007-10-15 19:00:12 +04:00
P ( sched_info . bkl_count ) ;
2007-10-15 19:00:18 +04:00
P ( se . nr_migrations ) ;
P ( se . nr_migrations_cold ) ;
P ( se . nr_failed_migrations_affine ) ;
P ( se . nr_failed_migrations_running ) ;
P ( se . nr_failed_migrations_hot ) ;
P ( se . nr_forced_migrations ) ;
P ( se . nr_forced2_migrations ) ;
P ( se . nr_wakeups ) ;
P ( se . nr_wakeups_sync ) ;
P ( se . nr_wakeups_migrate ) ;
P ( se . nr_wakeups_local ) ;
P ( se . nr_wakeups_remote ) ;
P ( se . nr_wakeups_affine ) ;
P ( se . nr_wakeups_affine_attempts ) ;
P ( se . nr_wakeups_passive ) ;
P ( se . nr_wakeups_idle ) ;
{
u64 avg_atom , avg_per_cpu ;
avg_atom = p - > se . sum_exec_runtime ;
if ( nr_switches )
do_div ( avg_atom , nr_switches ) ;
else
avg_atom = - 1LL ;
avg_per_cpu = p - > se . sum_exec_runtime ;
2007-11-28 17:52:56 +03:00
if ( p - > se . nr_migrations ) {
2008-05-01 15:34:28 +04:00
avg_per_cpu = div64_u64 ( avg_per_cpu ,
p - > se . nr_migrations ) ;
2007-11-28 17:52:56 +03:00
} else {
2007-10-15 19:00:18 +04:00
avg_per_cpu = - 1LL ;
2007-11-28 17:52:56 +03:00
}
2007-10-15 19:00:18 +04:00
__PN ( avg_atom ) ;
__PN ( avg_per_cpu ) ;
}
2007-08-02 19:41:40 +04:00
# endif
2007-10-15 19:00:18 +04:00
__P ( nr_switches ) ;
2007-10-15 19:00:18 +04:00
SEQ_printf ( m , " %-35s:%21Ld \n " ,
2007-10-15 19:00:18 +04:00
" nr_voluntary_switches " , ( long long ) p - > nvcsw ) ;
SEQ_printf ( m , " %-35s:%21Ld \n " ,
" nr_involuntary_switches " , ( long long ) p - > nivcsw ) ;
2007-07-09 20:52:00 +04:00
P ( se . load . weight ) ;
P ( policy ) ;
P ( prio ) ;
2007-10-15 19:00:08 +04:00
# undef PN
2007-10-15 19:00:18 +04:00
# undef __PN
# undef P
# undef __P
2007-07-09 20:52:00 +04:00
{
2008-11-16 10:07:15 +03:00
unsigned int this_cpu = raw_smp_processor_id ( ) ;
2007-07-09 20:52:00 +04:00
u64 t0 , t1 ;
2008-11-16 10:07:15 +03:00
t0 = cpu_clock ( this_cpu ) ;
t1 = cpu_clock ( this_cpu ) ;
2007-10-15 19:00:18 +04:00
SEQ_printf ( m , " %-35s:%21Ld \n " ,
2007-07-09 20:52:00 +04:00
" clock-delta " , ( long long ) ( t1 - t0 ) ) ;
}
}
void proc_sched_set_task ( struct task_struct * p )
{
2007-08-02 19:41:40 +04:00
# ifdef CONFIG_SCHEDSTATS
2007-10-15 19:00:18 +04:00
p - > se . wait_max = 0 ;
2008-01-25 23:08:35 +03:00
p - > se . wait_sum = 0 ;
p - > se . wait_count = 0 ;
2007-10-15 19:00:18 +04:00
p - > se . sleep_max = 0 ;
p - > se . sum_sleep_runtime = 0 ;
p - > se . block_max = 0 ;
p - > se . exec_max = 0 ;
p - > se . slice_max = 0 ;
p - > se . nr_migrations = 0 ;
p - > se . nr_migrations_cold = 0 ;
p - > se . nr_failed_migrations_affine = 0 ;
p - > se . nr_failed_migrations_running = 0 ;
p - > se . nr_failed_migrations_hot = 0 ;
p - > se . nr_forced_migrations = 0 ;
p - > se . nr_forced2_migrations = 0 ;
p - > se . nr_wakeups = 0 ;
p - > se . nr_wakeups_sync = 0 ;
p - > se . nr_wakeups_migrate = 0 ;
p - > se . nr_wakeups_local = 0 ;
p - > se . nr_wakeups_remote = 0 ;
p - > se . nr_wakeups_affine = 0 ;
p - > se . nr_wakeups_affine_attempts = 0 ;
p - > se . nr_wakeups_passive = 0 ;
p - > se . nr_wakeups_idle = 0 ;
p - > sched_info . bkl_count = 0 ;
2007-08-02 19:41:40 +04:00
# endif
2007-10-15 19:00:18 +04:00
p - > se . sum_exec_runtime = 0 ;
p - > se . prev_sum_exec_runtime = 0 ;
p - > nvcsw = 0 ;
p - > nivcsw = 0 ;
2007-07-09 20:52:00 +04:00
}