2007-07-09 20:51:58 +04:00
# ifdef CONFIG_SCHEDSTATS
/*
* bump this up when changing the output format or the meaning of an existing
* format , so that tools can adapt ( or abort )
*/
2009-03-25 01:10:02 +03:00
# define SCHEDSTAT_VERSION 15
2007-07-09 20:51:58 +04:00
static int show_schedstat ( struct seq_file * seq , void * v )
{
int cpu ;
2008-10-17 14:58:19 +04:00
int mask_len = DIV_ROUND_UP ( NR_CPUS , 32 ) * 9 ;
2008-04-08 22:43:03 +04:00
char * mask_str = kmalloc ( mask_len , GFP_KERNEL ) ;
if ( mask_str = = NULL )
return - ENOMEM ;
2007-07-09 20:51:58 +04:00
seq_printf ( seq , " version %d \n " , SCHEDSTAT_VERSION ) ;
seq_printf ( seq , " timestamp %lu \n " , jiffies ) ;
for_each_online_cpu ( cpu ) {
struct rq * rq = cpu_rq ( cpu ) ;
# ifdef CONFIG_SMP
struct sched_domain * sd ;
2007-10-15 19:00:12 +04:00
int dcount = 0 ;
2007-07-09 20:51:58 +04:00
# endif
/* runqueue-specific stats */
seq_printf ( seq ,
2009-03-25 01:10:02 +03:00
" cpu%d %u %u %u %u %u %u %llu %llu %lu " ,
cpu , rq - > yld_count ,
2007-10-15 19:00:12 +04:00
rq - > sched_switch , rq - > sched_count , rq - > sched_goidle ,
rq - > ttwu_count , rq - > ttwu_local ,
2008-12-17 10:41:22 +03:00
rq - > rq_cpu_time ,
2007-10-15 19:00:12 +04:00
rq - > rq_sched_info . run_delay , rq - > rq_sched_info . pcount ) ;
2007-07-09 20:51:58 +04:00
seq_printf ( seq , " \n " ) ;
# ifdef CONFIG_SMP
/* domain-specific stats */
preempt_disable ( ) ;
for_each_domain ( cpu , sd ) {
enum cpu_idle_type itype ;
2008-11-24 19:05:04 +03:00
cpumask_scnprintf ( mask_str , mask_len ,
2008-12-13 14:25:51 +03:00
sched_domain_span ( sd ) ) ;
2007-10-15 19:00:12 +04:00
seq_printf ( seq , " domain%d %s " , dcount + + , mask_str ) ;
2007-07-09 20:51:58 +04:00
for ( itype = CPU_IDLE ; itype < CPU_MAX_IDLE_TYPES ;
itype + + ) {
2007-10-18 23:32:56 +04:00
seq_printf ( seq , " %u %u %u %u %u %u %u %u " ,
2007-10-15 19:00:12 +04:00
sd - > lb_count [ itype ] ,
2007-07-09 20:51:58 +04:00
sd - > lb_balanced [ itype ] ,
sd - > lb_failed [ itype ] ,
sd - > lb_imbalance [ itype ] ,
sd - > lb_gained [ itype ] ,
sd - > lb_hot_gained [ itype ] ,
sd - > lb_nobusyq [ itype ] ,
sd - > lb_nobusyg [ itype ] ) ;
}
2007-11-28 17:52:56 +03:00
seq_printf ( seq ,
" %u %u %u %u %u %u %u %u %u %u %u %u \n " ,
2007-10-15 19:00:12 +04:00
sd - > alb_count , sd - > alb_failed , sd - > alb_pushed ,
sd - > sbe_count , sd - > sbe_balanced , sd - > sbe_pushed ,
sd - > sbf_count , sd - > sbf_balanced , sd - > sbf_pushed ,
2007-07-09 20:51:58 +04:00
sd - > ttwu_wake_remote , sd - > ttwu_move_affine ,
sd - > ttwu_move_balance ) ;
}
preempt_enable ( ) ;
# endif
}
2008-05-15 03:22:59 +04:00
kfree ( mask_str ) ;
2007-07-09 20:51:58 +04:00
return 0 ;
}
static int schedstat_open ( struct inode * inode , struct file * file )
{
unsigned int size = PAGE_SIZE * ( 1 + num_online_cpus ( ) / 32 ) ;
char * buf = kmalloc ( size , GFP_KERNEL ) ;
struct seq_file * m ;
int res ;
if ( ! buf )
return - ENOMEM ;
res = single_open ( file , show_schedstat , NULL ) ;
if ( ! res ) {
m = file - > private_data ;
m - > buf = buf ;
m - > size = size ;
} else
kfree ( buf ) ;
return res ;
}
2008-10-06 13:23:43 +04:00
static const struct file_operations proc_schedstat_operations = {
2007-07-09 20:51:58 +04:00
. open = schedstat_open ,
. read = seq_read ,
. llseek = seq_lseek ,
. release = single_release ,
} ;
2008-10-06 13:23:43 +04:00
static int __init proc_schedstat_init ( void )
{
proc_create ( " schedstat " , 0 , NULL , & proc_schedstat_operations ) ;
return 0 ;
}
module_init ( proc_schedstat_init ) ;
2007-07-09 20:51:58 +04:00
/*
* Expects runqueue lock to be held for atomicity of update
*/
static inline void
rq_sched_info_arrive ( struct rq * rq , unsigned long long delta )
{
if ( rq ) {
rq - > rq_sched_info . run_delay + = delta ;
2007-10-15 19:00:12 +04:00
rq - > rq_sched_info . pcount + + ;
2007-07-09 20:51:58 +04:00
}
}
/*
* Expects runqueue lock to be held for atomicity of update
*/
static inline void
rq_sched_info_depart ( struct rq * rq , unsigned long long delta )
{
if ( rq )
2008-12-17 10:41:22 +03:00
rq - > rq_cpu_time + = delta ;
2007-07-09 20:51:58 +04:00
}
sched: fix accounting in task delay accounting & migration
On Thu, Jun 19, 2008 at 12:27:14PM +0200, Peter Zijlstra wrote:
> On Thu, 2008-06-05 at 10:50 +0530, Ankita Garg wrote:
>
> > Thanks Peter for the explanation...
> >
> > I agree with the above and that is the reason why I did not see weird
> > values with cpu_time. But, run_delay still would suffer skews as the end
> > points for delta could be taken on different cpus due to migration (more
> > so on RT kernel due to the push-pull operations). With the below patch,
> > I could not reproduce the issue I had seen earlier. After every dequeue,
> > we take the delta and start wait measurements from zero when moved to a
> > different rq.
>
> OK, so task delay delay accounting is broken because it doesn't take
> migration into account.
>
> What you've done is make it symmetric wrt enqueue, and account it like
>
> cpu0 cpu1
>
> enqueue
> <wait-d1>
> dequeue
> enqueue
> <wait-d2>
> run
>
> Where you add both d1 and d2 to the run_delay,.. right?
>
Thanks for reviewing the patch. The above is exactly what I have done.
> This seems like a good fix, however it looks like the patch will break
> compilation in !CONFIG_SCHEDSTATS && !CONFIG_TASK_DELAY_ACCT, of it
> failing to provide a stub for sched_info_dequeue() in that case.
Fixed. Pl. find the new patch below.
Signed-off-by: Ankita Garg <ankita@in.ibm.com>
Acked-by: Peter Zijlstra <peterz@infradead.org>
Cc: Gregory Haskins <ghaskins@novell.com>
Cc: rostedt@goodmis.org
Cc: suresh.b.siddha@intel.com
Cc: aneesh.kumar@linux.vnet.ibm.com
Cc: dhaval@linux.vnet.ibm.com
Cc: vatsa@linux.vnet.ibm.com
Cc: David Bahi <DBahi@novell.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2008-07-01 13:00:06 +04:00
static inline void
rq_sched_info_dequeued ( struct rq * rq , unsigned long long delta )
{
if ( rq )
rq - > rq_sched_info . run_delay + = delta ;
}
2007-07-09 20:51:58 +04:00
# define schedstat_inc(rq, field) do { (rq)->field++; } while (0)
# define schedstat_add(rq, field, amt) do { (rq)->field += (amt); } while (0)
2007-08-02 19:41:40 +04:00
# define schedstat_set(var, val) do { var = (val); } while (0)
2007-07-09 20:51:58 +04:00
# else /* !CONFIG_SCHEDSTATS */
static inline void
rq_sched_info_arrive ( struct rq * rq , unsigned long long delta )
{ }
static inline void
sched: fix accounting in task delay accounting & migration
On Thu, Jun 19, 2008 at 12:27:14PM +0200, Peter Zijlstra wrote:
> On Thu, 2008-06-05 at 10:50 +0530, Ankita Garg wrote:
>
> > Thanks Peter for the explanation...
> >
> > I agree with the above and that is the reason why I did not see weird
> > values with cpu_time. But, run_delay still would suffer skews as the end
> > points for delta could be taken on different cpus due to migration (more
> > so on RT kernel due to the push-pull operations). With the below patch,
> > I could not reproduce the issue I had seen earlier. After every dequeue,
> > we take the delta and start wait measurements from zero when moved to a
> > different rq.
>
> OK, so task delay delay accounting is broken because it doesn't take
> migration into account.
>
> What you've done is make it symmetric wrt enqueue, and account it like
>
> cpu0 cpu1
>
> enqueue
> <wait-d1>
> dequeue
> enqueue
> <wait-d2>
> run
>
> Where you add both d1 and d2 to the run_delay,.. right?
>
Thanks for reviewing the patch. The above is exactly what I have done.
> This seems like a good fix, however it looks like the patch will break
> compilation in !CONFIG_SCHEDSTATS && !CONFIG_TASK_DELAY_ACCT, of it
> failing to provide a stub for sched_info_dequeue() in that case.
Fixed. Pl. find the new patch below.
Signed-off-by: Ankita Garg <ankita@in.ibm.com>
Acked-by: Peter Zijlstra <peterz@infradead.org>
Cc: Gregory Haskins <ghaskins@novell.com>
Cc: rostedt@goodmis.org
Cc: suresh.b.siddha@intel.com
Cc: aneesh.kumar@linux.vnet.ibm.com
Cc: dhaval@linux.vnet.ibm.com
Cc: vatsa@linux.vnet.ibm.com
Cc: David Bahi <DBahi@novell.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2008-07-01 13:00:06 +04:00
rq_sched_info_dequeued ( struct rq * rq , unsigned long long delta )
{ }
static inline void
2007-07-09 20:51:58 +04:00
rq_sched_info_depart ( struct rq * rq , unsigned long long delta )
{ }
# define schedstat_inc(rq, field) do { } while (0)
# define schedstat_add(rq, field, amt) do { } while (0)
2007-08-02 19:41:40 +04:00
# define schedstat_set(var, val) do { } while (0)
2007-07-09 20:51:58 +04:00
# endif
2007-11-10 00:39:37 +03:00
# if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
sched: fix accounting in task delay accounting & migration
On Thu, Jun 19, 2008 at 12:27:14PM +0200, Peter Zijlstra wrote:
> On Thu, 2008-06-05 at 10:50 +0530, Ankita Garg wrote:
>
> > Thanks Peter for the explanation...
> >
> > I agree with the above and that is the reason why I did not see weird
> > values with cpu_time. But, run_delay still would suffer skews as the end
> > points for delta could be taken on different cpus due to migration (more
> > so on RT kernel due to the push-pull operations). With the below patch,
> > I could not reproduce the issue I had seen earlier. After every dequeue,
> > we take the delta and start wait measurements from zero when moved to a
> > different rq.
>
> OK, so task delay delay accounting is broken because it doesn't take
> migration into account.
>
> What you've done is make it symmetric wrt enqueue, and account it like
>
> cpu0 cpu1
>
> enqueue
> <wait-d1>
> dequeue
> enqueue
> <wait-d2>
> run
>
> Where you add both d1 and d2 to the run_delay,.. right?
>
Thanks for reviewing the patch. The above is exactly what I have done.
> This seems like a good fix, however it looks like the patch will break
> compilation in !CONFIG_SCHEDSTATS && !CONFIG_TASK_DELAY_ACCT, of it
> failing to provide a stub for sched_info_dequeue() in that case.
Fixed. Pl. find the new patch below.
Signed-off-by: Ankita Garg <ankita@in.ibm.com>
Acked-by: Peter Zijlstra <peterz@infradead.org>
Cc: Gregory Haskins <ghaskins@novell.com>
Cc: rostedt@goodmis.org
Cc: suresh.b.siddha@intel.com
Cc: aneesh.kumar@linux.vnet.ibm.com
Cc: dhaval@linux.vnet.ibm.com
Cc: vatsa@linux.vnet.ibm.com
Cc: David Bahi <DBahi@novell.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2008-07-01 13:00:06 +04:00
static inline void sched_info_reset_dequeued ( struct task_struct * t )
{
t - > sched_info . last_queued = 0 ;
}
2007-07-09 20:51:58 +04:00
/*
* Called when a process is dequeued from the active array and given
* the cpu . We should note that with the exception of interactive
* tasks , the expired queue will become the active queue after the active
* queue is empty , without explicitly dequeuing and requeuing tasks in the
* expired queue . ( Interactive tasks may be requeued directly to the
* active queue , thus delaying tasks in the expired queue from running ;
* see scheduler_tick ( ) ) .
*
sched: fix accounting in task delay accounting & migration
On Thu, Jun 19, 2008 at 12:27:14PM +0200, Peter Zijlstra wrote:
> On Thu, 2008-06-05 at 10:50 +0530, Ankita Garg wrote:
>
> > Thanks Peter for the explanation...
> >
> > I agree with the above and that is the reason why I did not see weird
> > values with cpu_time. But, run_delay still would suffer skews as the end
> > points for delta could be taken on different cpus due to migration (more
> > so on RT kernel due to the push-pull operations). With the below patch,
> > I could not reproduce the issue I had seen earlier. After every dequeue,
> > we take the delta and start wait measurements from zero when moved to a
> > different rq.
>
> OK, so task delay delay accounting is broken because it doesn't take
> migration into account.
>
> What you've done is make it symmetric wrt enqueue, and account it like
>
> cpu0 cpu1
>
> enqueue
> <wait-d1>
> dequeue
> enqueue
> <wait-d2>
> run
>
> Where you add both d1 and d2 to the run_delay,.. right?
>
Thanks for reviewing the patch. The above is exactly what I have done.
> This seems like a good fix, however it looks like the patch will break
> compilation in !CONFIG_SCHEDSTATS && !CONFIG_TASK_DELAY_ACCT, of it
> failing to provide a stub for sched_info_dequeue() in that case.
Fixed. Pl. find the new patch below.
Signed-off-by: Ankita Garg <ankita@in.ibm.com>
Acked-by: Peter Zijlstra <peterz@infradead.org>
Cc: Gregory Haskins <ghaskins@novell.com>
Cc: rostedt@goodmis.org
Cc: suresh.b.siddha@intel.com
Cc: aneesh.kumar@linux.vnet.ibm.com
Cc: dhaval@linux.vnet.ibm.com
Cc: vatsa@linux.vnet.ibm.com
Cc: David Bahi <DBahi@novell.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2008-07-01 13:00:06 +04:00
* Though we are interested in knowing how long it was from the * first * time a
* task was queued to the time that it finally hit a cpu , we call this routine
* from dequeue_task ( ) to account for possible rq - > clock skew across cpus . The
* delta taken on each cpu would annul the skew .
2007-07-09 20:51:58 +04:00
*/
static inline void sched_info_dequeued ( struct task_struct * t )
{
sched: fix accounting in task delay accounting & migration
On Thu, Jun 19, 2008 at 12:27:14PM +0200, Peter Zijlstra wrote:
> On Thu, 2008-06-05 at 10:50 +0530, Ankita Garg wrote:
>
> > Thanks Peter for the explanation...
> >
> > I agree with the above and that is the reason why I did not see weird
> > values with cpu_time. But, run_delay still would suffer skews as the end
> > points for delta could be taken on different cpus due to migration (more
> > so on RT kernel due to the push-pull operations). With the below patch,
> > I could not reproduce the issue I had seen earlier. After every dequeue,
> > we take the delta and start wait measurements from zero when moved to a
> > different rq.
>
> OK, so task delay delay accounting is broken because it doesn't take
> migration into account.
>
> What you've done is make it symmetric wrt enqueue, and account it like
>
> cpu0 cpu1
>
> enqueue
> <wait-d1>
> dequeue
> enqueue
> <wait-d2>
> run
>
> Where you add both d1 and d2 to the run_delay,.. right?
>
Thanks for reviewing the patch. The above is exactly what I have done.
> This seems like a good fix, however it looks like the patch will break
> compilation in !CONFIG_SCHEDSTATS && !CONFIG_TASK_DELAY_ACCT, of it
> failing to provide a stub for sched_info_dequeue() in that case.
Fixed. Pl. find the new patch below.
Signed-off-by: Ankita Garg <ankita@in.ibm.com>
Acked-by: Peter Zijlstra <peterz@infradead.org>
Cc: Gregory Haskins <ghaskins@novell.com>
Cc: rostedt@goodmis.org
Cc: suresh.b.siddha@intel.com
Cc: aneesh.kumar@linux.vnet.ibm.com
Cc: dhaval@linux.vnet.ibm.com
Cc: vatsa@linux.vnet.ibm.com
Cc: David Bahi <DBahi@novell.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2008-07-01 13:00:06 +04:00
unsigned long long now = task_rq ( t ) - > clock , delta = 0 ;
if ( unlikely ( sched_info_on ( ) ) )
if ( t - > sched_info . last_queued )
delta = now - t - > sched_info . last_queued ;
sched_info_reset_dequeued ( t ) ;
t - > sched_info . run_delay + = delta ;
rq_sched_info_dequeued ( task_rq ( t ) , delta ) ;
2007-07-09 20:51:58 +04:00
}
/*
* Called when a task finally hits the cpu . We can now calculate how
* long it was waiting to run . We also note when it began so that we
* can keep stats on how long its timeslice is .
*/
static void sched_info_arrive ( struct task_struct * t )
{
2007-11-10 00:39:37 +03:00
unsigned long long now = task_rq ( t ) - > clock , delta = 0 ;
2007-07-09 20:51:58 +04:00
if ( t - > sched_info . last_queued )
delta = now - t - > sched_info . last_queued ;
sched: fix accounting in task delay accounting & migration
On Thu, Jun 19, 2008 at 12:27:14PM +0200, Peter Zijlstra wrote:
> On Thu, 2008-06-05 at 10:50 +0530, Ankita Garg wrote:
>
> > Thanks Peter for the explanation...
> >
> > I agree with the above and that is the reason why I did not see weird
> > values with cpu_time. But, run_delay still would suffer skews as the end
> > points for delta could be taken on different cpus due to migration (more
> > so on RT kernel due to the push-pull operations). With the below patch,
> > I could not reproduce the issue I had seen earlier. After every dequeue,
> > we take the delta and start wait measurements from zero when moved to a
> > different rq.
>
> OK, so task delay delay accounting is broken because it doesn't take
> migration into account.
>
> What you've done is make it symmetric wrt enqueue, and account it like
>
> cpu0 cpu1
>
> enqueue
> <wait-d1>
> dequeue
> enqueue
> <wait-d2>
> run
>
> Where you add both d1 and d2 to the run_delay,.. right?
>
Thanks for reviewing the patch. The above is exactly what I have done.
> This seems like a good fix, however it looks like the patch will break
> compilation in !CONFIG_SCHEDSTATS && !CONFIG_TASK_DELAY_ACCT, of it
> failing to provide a stub for sched_info_dequeue() in that case.
Fixed. Pl. find the new patch below.
Signed-off-by: Ankita Garg <ankita@in.ibm.com>
Acked-by: Peter Zijlstra <peterz@infradead.org>
Cc: Gregory Haskins <ghaskins@novell.com>
Cc: rostedt@goodmis.org
Cc: suresh.b.siddha@intel.com
Cc: aneesh.kumar@linux.vnet.ibm.com
Cc: dhaval@linux.vnet.ibm.com
Cc: vatsa@linux.vnet.ibm.com
Cc: David Bahi <DBahi@novell.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2008-07-01 13:00:06 +04:00
sched_info_reset_dequeued ( t ) ;
2007-07-09 20:51:58 +04:00
t - > sched_info . run_delay + = delta ;
t - > sched_info . last_arrival = now ;
2007-10-15 19:00:12 +04:00
t - > sched_info . pcount + + ;
2007-07-09 20:51:58 +04:00
rq_sched_info_arrive ( task_rq ( t ) , delta ) ;
}
/*
* Called when a process is queued into either the active or expired
* array . The time is noted and later used to determine how long we
* had to wait for us to reach the cpu . Since the expired queue will
* become the active queue after active queue is empty , without dequeuing
* and requeuing any tasks , we are interested in queuing to either . It
* is unusual but not impossible for tasks to be dequeued and immediately
* requeued in the same or another array : this can happen in sched_yield ( ) ,
* set_user_nice ( ) , and even load_balance ( ) as it moves tasks from runqueue
* to runqueue .
*
* This function is only called from enqueue_task ( ) , but also only updates
* the timestamp if it is already not set . It ' s assumed that
* sched_info_dequeued ( ) will clear that stamp when appropriate .
*/
static inline void sched_info_queued ( struct task_struct * t )
{
if ( unlikely ( sched_info_on ( ) ) )
if ( ! t - > sched_info . last_queued )
2007-11-10 00:39:37 +03:00
t - > sched_info . last_queued = task_rq ( t ) - > clock ;
2007-07-09 20:51:58 +04:00
}
/*
* Called when a process ceases being the active - running process , either
* voluntarily or involuntarily . Now we can calculate how long we ran .
2008-06-16 13:41:01 +04:00
* Also , if the process is still in the TASK_RUNNING state , call
* sched_info_queued ( ) to mark that it has now again started waiting on
* the runqueue .
2007-07-09 20:51:58 +04:00
*/
static inline void sched_info_depart ( struct task_struct * t )
{
2007-11-10 00:39:37 +03:00
unsigned long long delta = task_rq ( t ) - > clock -
t - > sched_info . last_arrival ;
2007-07-09 20:51:58 +04:00
rq_sched_info_depart ( task_rq ( t ) , delta ) ;
2008-06-16 13:41:01 +04:00
if ( t - > state = = TASK_RUNNING )
sched_info_queued ( t ) ;
2007-07-09 20:51:58 +04:00
}
/*
* Called when tasks are switched involuntarily due , typically , to expiring
* their time slice . ( This may also be called when switching to or from
* the idle task . ) We are only called when prev ! = next .
*/
static inline void
__sched_info_switch ( struct task_struct * prev , struct task_struct * next )
{
struct rq * rq = task_rq ( prev ) ;
/*
* prev now departs the cpu . It ' s not interesting to record
* stats about how efficient we were at scheduling the idle
* process , however .
*/
if ( prev ! = rq - > idle )
sched_info_depart ( prev ) ;
if ( next ! = rq - > idle )
sched_info_arrive ( next ) ;
}
static inline void
sched_info_switch ( struct task_struct * prev , struct task_struct * next )
{
if ( unlikely ( sched_info_on ( ) ) )
__sched_info_switch ( prev , next ) ;
}
# else
sched: fix accounting in task delay accounting & migration
On Thu, Jun 19, 2008 at 12:27:14PM +0200, Peter Zijlstra wrote:
> On Thu, 2008-06-05 at 10:50 +0530, Ankita Garg wrote:
>
> > Thanks Peter for the explanation...
> >
> > I agree with the above and that is the reason why I did not see weird
> > values with cpu_time. But, run_delay still would suffer skews as the end
> > points for delta could be taken on different cpus due to migration (more
> > so on RT kernel due to the push-pull operations). With the below patch,
> > I could not reproduce the issue I had seen earlier. After every dequeue,
> > we take the delta and start wait measurements from zero when moved to a
> > different rq.
>
> OK, so task delay delay accounting is broken because it doesn't take
> migration into account.
>
> What you've done is make it symmetric wrt enqueue, and account it like
>
> cpu0 cpu1
>
> enqueue
> <wait-d1>
> dequeue
> enqueue
> <wait-d2>
> run
>
> Where you add both d1 and d2 to the run_delay,.. right?
>
Thanks for reviewing the patch. The above is exactly what I have done.
> This seems like a good fix, however it looks like the patch will break
> compilation in !CONFIG_SCHEDSTATS && !CONFIG_TASK_DELAY_ACCT, of it
> failing to provide a stub for sched_info_dequeue() in that case.
Fixed. Pl. find the new patch below.
Signed-off-by: Ankita Garg <ankita@in.ibm.com>
Acked-by: Peter Zijlstra <peterz@infradead.org>
Cc: Gregory Haskins <ghaskins@novell.com>
Cc: rostedt@goodmis.org
Cc: suresh.b.siddha@intel.com
Cc: aneesh.kumar@linux.vnet.ibm.com
Cc: dhaval@linux.vnet.ibm.com
Cc: vatsa@linux.vnet.ibm.com
Cc: David Bahi <DBahi@novell.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2008-07-01 13:00:06 +04:00
# define sched_info_queued(t) do { } while (0)
# define sched_info_reset_dequeued(t) do { } while (0)
# define sched_info_dequeued(t) do { } while (0)
# define sched_info_switch(t, next) do { } while (0)
2007-11-10 00:39:37 +03:00
# endif /* CONFIG_SCHEDSTATS || CONFIG_TASK_DELAY_ACCT */
2007-07-09 20:51:58 +04:00
2008-09-12 20:54:39 +04:00
/*
* The following are functions that support scheduler - internal time accounting .
* These functions are generally called at the timer tick . None of this depends
* on CONFIG_SCHEDSTATS .
*/
/**
2008-09-12 20:54:39 +04:00
* account_group_user_time - Maintain utime for a thread group .
2008-09-12 20:54:39 +04:00
*
2008-09-12 20:54:39 +04:00
* @ tsk : Pointer to task structure .
* @ cputime : Time value by which to increment the utime field of the
* thread_group_cputime structure .
2008-09-12 20:54:39 +04:00
*
* If thread group time is being maintained , get the structure for the
* running CPU and update the utime field there .
*/
2008-09-12 20:54:39 +04:00
static inline void account_group_user_time ( struct task_struct * tsk ,
cputime_t cputime )
2008-09-12 20:54:39 +04:00
{
2009-02-05 14:24:16 +03:00
struct thread_group_cputimer * cputimer ;
2008-09-12 20:54:39 +04:00
2008-11-17 17:39:47 +03:00
/* tsk == current, ensure it is safe to use ->signal */
if ( unlikely ( tsk - > exit_state ) )
2008-09-12 20:54:39 +04:00
return ;
2008-11-17 17:39:47 +03:00
2009-02-05 14:24:16 +03:00
cputimer = & tsk - > signal - > cputimer ;
2008-09-12 20:54:39 +04:00
2009-02-05 14:24:16 +03:00
if ( ! cputimer - > running )
return ;
spin_lock ( & cputimer - > lock ) ;
cputimer - > cputime . utime =
cputime_add ( cputimer - > cputime . utime , cputime ) ;
spin_unlock ( & cputimer - > lock ) ;
2008-09-12 20:54:39 +04:00
}
/**
2008-09-12 20:54:39 +04:00
* account_group_system_time - Maintain stime for a thread group .
2008-09-12 20:54:39 +04:00
*
2008-09-12 20:54:39 +04:00
* @ tsk : Pointer to task structure .
* @ cputime : Time value by which to increment the stime field of the
* thread_group_cputime structure .
2008-09-12 20:54:39 +04:00
*
* If thread group time is being maintained , get the structure for the
* running CPU and update the stime field there .
*/
2008-09-12 20:54:39 +04:00
static inline void account_group_system_time ( struct task_struct * tsk ,
cputime_t cputime )
2008-09-12 20:54:39 +04:00
{
2009-02-05 14:24:16 +03:00
struct thread_group_cputimer * cputimer ;
2008-09-12 20:54:39 +04:00
2008-11-17 17:39:47 +03:00
/* tsk == current, ensure it is safe to use ->signal */
if ( unlikely ( tsk - > exit_state ) )
2008-09-12 20:54:39 +04:00
return ;
2008-11-17 17:39:47 +03:00
2009-02-05 14:24:16 +03:00
cputimer = & tsk - > signal - > cputimer ;
if ( ! cputimer - > running )
return ;
2008-09-12 20:54:39 +04:00
2009-02-05 14:24:16 +03:00
spin_lock ( & cputimer - > lock ) ;
cputimer - > cputime . stime =
cputime_add ( cputimer - > cputime . stime , cputime ) ;
spin_unlock ( & cputimer - > lock ) ;
2008-09-12 20:54:39 +04:00
}
/**
2008-09-12 20:54:39 +04:00
* account_group_exec_runtime - Maintain exec runtime for a thread group .
2008-09-12 20:54:39 +04:00
*
2008-09-12 20:54:39 +04:00
* @ tsk : Pointer to task structure .
2008-09-12 20:54:39 +04:00
* @ ns : Time value by which to increment the sum_exec_runtime field
2008-09-12 20:54:39 +04:00
* of the thread_group_cputime structure .
2008-09-12 20:54:39 +04:00
*
* If thread group time is being maintained , get the structure for the
* running CPU and update the sum_exec_runtime field there .
*/
2008-09-12 20:54:39 +04:00
static inline void account_group_exec_runtime ( struct task_struct * tsk ,
unsigned long long ns )
2008-09-12 20:54:39 +04:00
{
2009-02-05 14:24:16 +03:00
struct thread_group_cputimer * cputimer ;
2008-09-12 20:54:39 +04:00
struct signal_struct * sig ;
sig = tsk - > signal ;
2008-11-17 17:39:47 +03:00
/* see __exit_signal()->task_rq_unlock_wait() */
barrier ( ) ;
2008-09-12 20:54:39 +04:00
if ( unlikely ( ! sig ) )
return ;
2008-11-17 17:39:47 +03:00
2009-02-05 14:24:16 +03:00
cputimer = & sig - > cputimer ;
if ( ! cputimer - > running )
return ;
2008-09-12 20:54:39 +04:00
2009-02-05 14:24:16 +03:00
spin_lock ( & cputimer - > lock ) ;
cputimer - > cputime . sum_exec_runtime + = ns ;
spin_unlock ( & cputimer - > lock ) ;
2008-09-12 20:54:39 +04:00
}