2019-06-01 11:08:55 +03:00
// SPDX-License-Identifier: GPL-2.0-only
2008-01-25 23:08:34 +03:00
/*
* latencytop . c : Latency display infrastructure
*
* ( C ) Copyright 2008 Intel Corporation
* Author : Arjan van de Ven < arjan @ linux . intel . com >
*/
2009-02-10 22:42:26 +03:00
/*
* CONFIG_LATENCYTOP enables a kernel latency tracking infrastructure that is
* used by the " latencytop " userspace tool . The latency that is tracked is not
* the ' traditional ' interrupt latency ( which is primarily caused by something
* else consuming CPU ) , but instead , it is the latency an application encounters
* because the kernel sleeps on its behalf for various reasons .
*
* This code tracks 2 levels of statistics :
* 1 ) System level latency
* 2 ) Per process latency
*
* The latency is stored in fixed sized data structures in an accumulated form ;
* if the " same " latency cause is hit twice , this will be tracked as one entry
* in the data structure . Both the count , total accumulated latency and maximum
* latency are tracked in this data structure . When the fixed size structure is
* full , no new causes are tracked until the buffer is flushed by writing to
* the / proc file ; the userspace tool does this on a regular basis .
*
* A latency cause is identified by a stringified backtrace at the point that
* the scheduler gets invoked . The userland tool will use this string to
* identify the cause of the latency in human readable form .
*
* The information is exported via / proc / latency_stats and / proc / < pid > / latency .
* These files look like this :
*
* Latency Top version : v0 .1
* 70 59433 4897 i915_irq_wait drm_ioctl vfs_ioctl do_vfs_ioctl sys_ioctl
* | | | |
* | | | + - - - - > the stringified backtrace
* | | + - - - - - - - - - > The maximum latency for this entry in microseconds
* | + - - - - - - - - - - - - - - > The accumulated latency for this entry ( microseconds )
* + - - - - - - - - - - - - - - - - - - - > The number of times this entry is hit
*
* ( note : the average latency is the accumulated latency divided by the number
* of times )
*/
2008-01-25 23:08:34 +03:00
# include <linux/kallsyms.h>
# include <linux/seq_file.h>
# include <linux/notifier.h>
# include <linux/spinlock.h>
# include <linux/proc_fs.h>
2016-02-05 12:08:36 +03:00
# include <linux/latencytop.h>
2011-05-23 22:51:41 +04:00
# include <linux/export.h>
2008-01-25 23:08:34 +03:00
# include <linux/sched.h>
2017-02-08 20:51:35 +03:00
# include <linux/sched/debug.h>
2017-02-05 14:07:04 +03:00
# include <linux/sched/stat.h>
2008-01-25 23:08:34 +03:00
# include <linux/list.h>
# include <linux/stacktrace.h>
2009-09-17 19:31:09 +04:00
static DEFINE_RAW_SPINLOCK ( latency_lock ) ;
2008-01-25 23:08:34 +03:00
# define MAXLR 128
static struct latency_record latency_record [ MAXLR ] ;
int latencytop_enabled ;
2019-05-15 01:42:34 +03:00
void clear_tsk_latency_tracing ( struct task_struct * p )
2008-01-25 23:08:34 +03:00
{
unsigned long flags ;
2009-09-17 19:31:09 +04:00
raw_spin_lock_irqsave ( & latency_lock , flags ) ;
2008-01-25 23:08:34 +03:00
memset ( & p - > latency_record , 0 , sizeof ( p - > latency_record ) ) ;
p - > latency_record_count = 0 ;
2009-09-17 19:31:09 +04:00
raw_spin_unlock_irqrestore ( & latency_lock , flags ) ;
2008-01-25 23:08:34 +03:00
}
static void clear_global_latency_tracing ( void )
{
unsigned long flags ;
2009-09-17 19:31:09 +04:00
raw_spin_lock_irqsave ( & latency_lock , flags ) ;
2008-01-25 23:08:34 +03:00
memset ( & latency_record , 0 , sizeof ( latency_record ) ) ;
2009-09-17 19:31:09 +04:00
raw_spin_unlock_irqrestore ( & latency_lock , flags ) ;
2008-01-25 23:08:34 +03:00
}
static void __sched
2014-06-05 03:11:21 +04:00
account_global_scheduler_latency ( struct task_struct * tsk ,
struct latency_record * lat )
2008-01-25 23:08:34 +03:00
{
int firstnonnull = MAXLR + 1 ;
int i ;
/* skip kernel threads for now */
if ( ! tsk - > mm )
return ;
for ( i = 0 ; i < MAXLR ; i + + ) {
2008-02-18 00:34:07 +03:00
int q , same = 1 ;
2008-01-25 23:08:34 +03:00
/* Nothing stored: */
if ( ! latency_record [ i ] . backtrace [ 0 ] ) {
if ( firstnonnull > i )
firstnonnull = i ;
continue ;
}
2009-02-10 22:42:26 +03:00
for ( q = 0 ; q < LT_BACKTRACEDEPTH ; q + + ) {
2008-02-18 00:34:07 +03:00
unsigned long record = lat - > backtrace [ q ] ;
if ( latency_record [ i ] . backtrace [ q ] ! = record ) {
2008-01-25 23:08:34 +03:00
same = 0 ;
break ;
2008-02-18 00:34:07 +03:00
}
2019-04-10 13:28:08 +03:00
/* 0 entry marks end of backtrace: */
if ( ! record )
2008-01-25 23:08:34 +03:00
break ;
}
if ( same ) {
latency_record [ i ] . count + + ;
latency_record [ i ] . time + = lat - > time ;
if ( lat - > time > latency_record [ i ] . max )
latency_record [ i ] . max = lat - > time ;
return ;
}
}
i = firstnonnull ;
if ( i > = MAXLR - 1 )
return ;
/* Allocted a new one: */
memcpy ( & latency_record [ i ] , lat , sizeof ( struct latency_record ) ) ;
}
2009-02-10 22:42:26 +03:00
/**
2011-03-31 05:57:33 +04:00
* __account_scheduler_latency - record an occurred latency
2009-02-10 22:42:26 +03:00
* @ tsk - the task struct of the task hitting the latency
* @ usecs - the duration of the latency in microseconds
* @ inter - 1 if the sleep was interruptible , 0 if uninterruptible
*
* This function is the main entry point for recording latency entries
* as called by the scheduler .
*
* This function has a few special cases to deal with normal ' non - latency '
* sleeps : specifically , interruptible sleep longer than 5 msec is skipped
* since this usually is caused by waiting for events via select ( ) and co .
*
* Negative latencies ( caused by time going backwards ) are also explicitly
* skipped .
*/
2008-01-25 23:08:34 +03:00
void __sched
2009-02-10 22:42:26 +03:00
__account_scheduler_latency ( struct task_struct * tsk , int usecs , int inter )
2008-01-25 23:08:34 +03:00
{
unsigned long flags ;
int i , q ;
struct latency_record lat ;
/* Long interruptible waits are generally user requested... */
if ( inter & & usecs > 5000 )
return ;
2009-02-10 22:42:26 +03:00
/* Negative sleeps are time going backwards */
/* Zero-time sleeps are non-interesting */
if ( usecs < = 0 )
return ;
2008-01-25 23:08:34 +03:00
memset ( & lat , 0 , sizeof ( lat ) ) ;
lat . count = 1 ;
lat . time = usecs ;
lat . max = usecs ;
2019-04-25 12:44:59 +03:00
stack_trace_save_tsk ( tsk , lat . backtrace , LT_BACKTRACEDEPTH , 0 ) ;
2008-01-25 23:08:34 +03:00
2009-09-17 19:31:09 +04:00
raw_spin_lock_irqsave ( & latency_lock , flags ) ;
2008-01-25 23:08:34 +03:00
account_global_scheduler_latency ( tsk , & lat ) ;
2010-11-12 01:05:16 +03:00
for ( i = 0 ; i < tsk - > latency_record_count ; i + + ) {
2008-01-25 23:08:34 +03:00
struct latency_record * mylat ;
int same = 1 ;
2008-02-18 00:34:07 +03:00
2008-01-25 23:08:34 +03:00
mylat = & tsk - > latency_record [ i ] ;
2009-02-10 22:42:26 +03:00
for ( q = 0 ; q < LT_BACKTRACEDEPTH ; q + + ) {
2008-02-18 00:34:07 +03:00
unsigned long record = lat . backtrace [ q ] ;
if ( mylat - > backtrace [ q ] ! = record ) {
2008-01-25 23:08:34 +03:00
same = 0 ;
break ;
2008-02-18 00:34:07 +03:00
}
2019-04-10 13:28:08 +03:00
/* 0 entry is end of backtrace */
if ( ! record )
2008-01-25 23:08:34 +03:00
break ;
}
if ( same ) {
mylat - > count + + ;
mylat - > time + = lat . time ;
if ( lat . time > mylat - > max )
mylat - > max = lat . time ;
goto out_unlock ;
}
}
2010-11-12 01:05:16 +03:00
/*
* short term hack ; if we ' re > 32 we stop ; future we recycle :
*/
if ( tsk - > latency_record_count > = LT_SAVECOUNT )
goto out_unlock ;
2008-01-25 23:08:34 +03:00
/* Allocated a new one: */
2010-11-12 01:05:16 +03:00
i = tsk - > latency_record_count + + ;
2008-01-25 23:08:34 +03:00
memcpy ( & tsk - > latency_record [ i ] , & lat , sizeof ( struct latency_record ) ) ;
out_unlock :
2009-09-17 19:31:09 +04:00
raw_spin_unlock_irqrestore ( & latency_lock , flags ) ;
2008-01-25 23:08:34 +03:00
}
static int lstats_show ( struct seq_file * m , void * v )
{
int i ;
seq_puts ( m , " Latency Top version : v0.1 \n " ) ;
for ( i = 0 ; i < MAXLR ; i + + ) {
2011-01-13 04:00:30 +03:00
struct latency_record * lr = & latency_record [ i ] ;
if ( lr - > backtrace [ 0 ] ) {
2008-01-25 23:08:34 +03:00
int q ;
2011-01-13 04:00:30 +03:00
seq_printf ( m , " %i %lu %lu " ,
lr - > count , lr - > time , lr - > max ) ;
2008-01-25 23:08:34 +03:00
for ( q = 0 ; q < LT_BACKTRACEDEPTH ; q + + ) {
2011-01-13 04:00:30 +03:00
unsigned long bt = lr - > backtrace [ q ] ;
2019-04-10 13:28:08 +03:00
2011-01-13 04:00:30 +03:00
if ( ! bt )
2008-01-25 23:08:34 +03:00
break ;
2019-04-10 13:28:08 +03:00
2011-01-13 04:00:30 +03:00
seq_printf ( m , " %ps " , ( void * ) bt ) ;
2008-01-25 23:08:34 +03:00
}
2014-06-05 03:11:21 +04:00
seq_puts ( m , " \n " ) ;
2008-01-25 23:08:34 +03:00
}
}
return 0 ;
}
static ssize_t
lstats_write ( struct file * file , const char __user * buf , size_t count ,
loff_t * offs )
{
clear_global_latency_tracing ( ) ;
return count ;
}
static int lstats_open ( struct inode * inode , struct file * filp )
{
return single_open ( filp , lstats_show , NULL ) ;
}
2009-02-10 22:42:26 +03:00
static const struct file_operations lstats_fops = {
2008-01-25 23:08:34 +03:00
. open = lstats_open ,
. read = seq_read ,
. write = lstats_write ,
. llseek = seq_lseek ,
. release = single_release ,
} ;
static int __init init_lstats_procfs ( void )
{
2008-04-29 12:02:31 +04:00
proc_create ( " latency_stats " , 0644 , NULL , & lstats_fops ) ;
2008-01-25 23:08:34 +03:00
return 0 ;
}
2016-02-05 12:08:36 +03:00
int sysctl_latencytop ( struct ctl_table * table , int write ,
void __user * buffer , size_t * lenp , loff_t * ppos )
{
int err ;
err = proc_dointvec ( table , write , buffer , lenp , ppos ) ;
if ( latencytop_enabled )
force_schedstat_enabled ( ) ;
return err ;
}
2009-02-10 22:42:26 +03:00
device_initcall ( init_lstats_procfs ) ;