2019-05-19 16:51:55 +03:00
// SPDX-License-Identifier: GPL-2.0-or-later
2006-07-14 11:24:36 +04:00
/* delayacct.c - per-task delay accounting
*
* Copyright ( C ) Shailabh Nagar , IBM Corp . 2006
*/
# include <linux/sched.h>
2017-02-04 03:20:53 +03:00
# include <linux/sched/task.h>
2017-02-05 13:48:36 +03:00
# include <linux/sched/cputime.h>
2006-07-14 11:24:36 +04:00
# include <linux/slab.h>
2009-09-18 23:55:55 +04:00
# include <linux/taskstats.h>
2006-07-14 11:24:36 +04:00
# include <linux/time.h>
# include <linux/sysctl.h>
# include <linux/delayacct.h>
2011-07-11 23:28:14 +04:00
# include <linux/module.h>
2006-07-14 11:24:36 +04:00
2006-07-30 14:03:11 +04:00
int delayacct_on __read_mostly = 1 ; /* Delay accounting turned on/off */
2011-07-11 23:28:14 +04:00
EXPORT_SYMBOL_GPL ( delayacct_on ) ;
2006-12-07 07:33:20 +03:00
struct kmem_cache * delayacct_cache ;
2006-07-14 11:24:36 +04:00
2006-07-30 14:03:11 +04:00
static int __init delayacct_setup_disable ( char * str )
2006-07-14 11:24:36 +04:00
{
2006-07-30 14:03:11 +04:00
delayacct_on = 0 ;
2006-07-14 11:24:36 +04:00
return 1 ;
}
2006-07-30 14:03:11 +04:00
__setup ( " nodelayacct " , delayacct_setup_disable ) ;
2006-07-14 11:24:36 +04:00
void delayacct_init ( void )
{
2016-01-15 02:18:21 +03:00
delayacct_cache = KMEM_CACHE ( task_delay_info , SLAB_PANIC | SLAB_ACCOUNT ) ;
2006-07-14 11:24:36 +04:00
delayacct_tsk_init ( & init_task ) ;
}
void __delayacct_tsk_init ( struct task_struct * tsk )
{
2006-12-07 07:33:17 +03:00
tsk - > delays = kmem_cache_zalloc ( delayacct_cache , GFP_KERNEL ) ;
2006-07-14 11:24:36 +04:00
if ( tsk - > delays )
2018-04-23 19:10:23 +03:00
raw_spin_lock_init ( & tsk - > delays - > lock ) ;
2006-07-14 11:24:36 +04:00
}
/*
2014-07-17 01:04:35 +04:00
* Finish delay accounting for a statistic using its timestamps ( @ start ) ,
* accumalator ( @ total ) and @ count
2006-07-14 11:24:36 +04:00
*/
2018-04-23 19:10:23 +03:00
static void delayacct_end ( raw_spinlock_t * lock , u64 * start , u64 * total ,
u32 * count )
2006-07-14 11:24:36 +04:00
{
2014-07-17 01:04:35 +04:00
s64 ns = ktime_get_ns ( ) - * start ;
2006-11-06 10:52:10 +03:00
unsigned long flags ;
2006-07-14 11:24:36 +04:00
2014-07-17 01:04:35 +04:00
if ( ns > 0 ) {
2018-04-23 19:10:23 +03:00
raw_spin_lock_irqsave ( lock , flags ) ;
2014-07-17 01:04:35 +04:00
* total + = ns ;
( * count ) + + ;
2018-04-23 19:10:23 +03:00
raw_spin_unlock_irqrestore ( lock , flags ) ;
2014-07-17 01:04:35 +04:00
}
2006-07-14 11:24:36 +04:00
}
2006-07-14 11:24:37 +04:00
void __delayacct_blkio_start ( void )
{
2014-07-17 01:04:35 +04:00
current - > delays - > blkio_start = ktime_get_ns ( ) ;
2006-07-14 11:24:37 +04:00
}
2017-12-18 19:15:10 +03:00
/*
* We cannot rely on the ` current ` macro , as we haven ' t yet switched back to
* the process being woken .
*/
void __delayacct_blkio_end ( struct task_struct * p )
2006-07-14 11:24:37 +04:00
{
2017-12-18 19:15:10 +03:00
struct task_delay_info * delays = p - > delays ;
u64 * total ;
u32 * count ;
if ( p - > delays - > flags & DELAYACCT_PF_SWAPIN ) {
total = & delays - > swapin_delay ;
count = & delays - > swapin_count ;
} else {
total = & delays - > blkio_delay ;
count = & delays - > blkio_count ;
}
delayacct_end ( & delays - > lock , & delays - > blkio_start , total , count ) ;
2006-07-14 11:24:37 +04:00
}
2006-07-14 11:24:41 +04:00
int __delayacct_add_tsk ( struct taskstats * d , struct task_struct * tsk )
{
2017-01-31 06:09:29 +03:00
u64 utime , stime , stimescaled , utimescaled ;
2014-07-17 01:04:37 +04:00
unsigned long long t2 , t3 ;
unsigned long flags , t1 ;
s64 tmp ;
2006-07-14 11:24:41 +04:00
2017-01-31 06:09:29 +03:00
task_cputime ( tsk , & utime , & stime ) ;
2014-07-17 01:04:37 +04:00
tmp = ( s64 ) d - > cpu_run_real_total ;
2017-01-31 06:09:29 +03:00
tmp + = utime + stime ;
2006-07-14 11:24:41 +04:00
d - > cpu_run_real_total = ( tmp < ( s64 ) d - > cpu_run_real_total ) ? 0 : tmp ;
2017-01-31 06:09:29 +03:00
task_cputime_scaled ( tsk , & utimescaled , & stimescaled ) ;
2014-07-17 01:04:37 +04:00
tmp = ( s64 ) d - > cpu_scaled_run_real_total ;
2017-01-31 06:09:29 +03:00
tmp + = utimescaled + stimescaled ;
2007-10-18 14:06:34 +04:00
d - > cpu_scaled_run_real_total =
( tmp < ( s64 ) d - > cpu_scaled_run_real_total ) ? 0 : tmp ;
2006-07-14 11:24:41 +04:00
/*
* No locking available for sched_info ( and too expensive to add one )
* Mitigate by taking snapshot of values
*/
2007-10-15 19:00:12 +04:00
t1 = tsk - > sched_info . pcount ;
2006-07-14 11:24:41 +04:00
t2 = tsk - > sched_info . run_delay ;
2008-12-17 10:41:22 +03:00
t3 = tsk - > se . sum_exec_runtime ;
2006-07-14 11:24:41 +04:00
d - > cpu_count + = t1 ;
2007-07-09 20:52:00 +04:00
tmp = ( s64 ) d - > cpu_delay_total + t2 ;
2006-07-14 11:24:41 +04:00
d - > cpu_delay_total = ( tmp < ( s64 ) d - > cpu_delay_total ) ? 0 : tmp ;
2007-07-09 20:52:00 +04:00
tmp = ( s64 ) d - > cpu_run_virtual_total + t3 ;
2006-07-14 11:24:41 +04:00
d - > cpu_run_virtual_total =
( tmp < ( s64 ) d - > cpu_run_virtual_total ) ? 0 : tmp ;
/* zero XXX_total, non-zero XXX_count implies XXX stat overflowed */
2018-04-23 19:10:23 +03:00
raw_spin_lock_irqsave ( & tsk - > delays - > lock , flags ) ;
2006-07-14 11:24:41 +04:00
tmp = d - > blkio_delay_total + tsk - > delays - > blkio_delay ;
d - > blkio_delay_total = ( tmp < d - > blkio_delay_total ) ? 0 : tmp ;
tmp = d - > swapin_delay_total + tsk - > delays - > swapin_delay ;
d - > swapin_delay_total = ( tmp < d - > swapin_delay_total ) ? 0 : tmp ;
2008-07-25 12:48:53 +04:00
tmp = d - > freepages_delay_total + tsk - > delays - > freepages_delay ;
d - > freepages_delay_total = ( tmp < d - > freepages_delay_total ) ? 0 : tmp ;
2018-10-27 01:06:08 +03:00
tmp = d - > thrashing_delay_total + tsk - > delays - > thrashing_delay ;
d - > thrashing_delay_total = ( tmp < d - > thrashing_delay_total ) ? 0 : tmp ;
2006-07-14 11:24:41 +04:00
d - > blkio_count + = tsk - > delays - > blkio_count ;
d - > swapin_count + = tsk - > delays - > swapin_count ;
2008-07-25 12:48:53 +04:00
d - > freepages_count + = tsk - > delays - > freepages_count ;
2018-10-27 01:06:08 +03:00
d - > thrashing_count + = tsk - > delays - > thrashing_count ;
2018-04-23 19:10:23 +03:00
raw_spin_unlock_irqrestore ( & tsk - > delays - > lock , flags ) ;
2006-07-14 11:24:41 +04:00
return 0 ;
}
2006-07-14 11:24:43 +04:00
__u64 __delayacct_blkio_ticks ( struct task_struct * tsk )
{
__u64 ret ;
2006-11-06 10:52:10 +03:00
unsigned long flags ;
2006-07-14 11:24:43 +04:00
2018-04-23 19:10:23 +03:00
raw_spin_lock_irqsave ( & tsk - > delays - > lock , flags ) ;
2006-07-14 11:24:43 +04:00
ret = nsec_to_clock_t ( tsk - > delays - > blkio_delay +
tsk - > delays - > swapin_delay ) ;
2018-04-23 19:10:23 +03:00
raw_spin_unlock_irqrestore ( & tsk - > delays - > lock , flags ) ;
2006-07-14 11:24:43 +04:00
return ret ;
}
2008-07-25 12:48:52 +04:00
void __delayacct_freepages_start ( void )
{
2014-07-17 01:04:35 +04:00
current - > delays - > freepages_start = ktime_get_ns ( ) ;
2008-07-25 12:48:52 +04:00
}
void __delayacct_freepages_end ( void )
{
2017-12-18 19:15:10 +03:00
delayacct_end (
& current - > delays - > lock ,
& current - > delays - > freepages_start ,
& current - > delays - > freepages_delay ,
& current - > delays - > freepages_count ) ;
2008-07-25 12:48:52 +04:00
}
2018-10-27 01:06:08 +03:00
void __delayacct_thrashing_start ( void )
{
current - > delays - > thrashing_start = ktime_get_ns ( ) ;
}
void __delayacct_thrashing_end ( void )
{
delayacct_end ( & current - > delays - > lock ,
& current - > delays - > thrashing_start ,
& current - > delays - > thrashing_delay ,
& current - > delays - > thrashing_count ) ;
}