2019-04-04 20:43:17 +03:00
/* SPDX-License-Identifier: GPL-2.0 */
/*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation ; either version 2 of the License , or
* ( at your option ) any later version .
*
* This program is distributed in the hope that it will be useful ,
* but WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the
* GNU General Public License for more details .
*
* Authors : Waiman Long < waiman . long @ hpe . com >
*/
/*
* Collect locking event counts
*/
# include <linux/debugfs.h>
# include <linux/sched.h>
# include <linux/sched/clock.h>
# include <linux/fs.h>
# include "lock_events.h"
# undef LOCK_EVENT
# define LOCK_EVENT(name) [LOCKEVENT_ ## name] = #name,
# define LOCK_EVENTS_DIR "lock_event_counts"
/*
* When CONFIG_LOCK_EVENT_COUNTS is enabled , event counts of different
* types of locks will be reported under the < debugfs > / lock_event_counts /
* directory . See lock_events_list . h for the list of available locking
* events .
*
* Writing to the special " .reset_counts " file will reset all the above
* locking event counts . This is a very slow operation and so should not
* be done frequently .
*
* These event counts are implemented as per - cpu variables which are
* summed and computed whenever the corresponding debugfs files are read . This
* minimizes added overhead making the counts usable even in a production
* environment .
*/
static const char * const lockevent_names [ lockevent_num + 1 ] = {
# include "lock_events_list.h"
[ LOCKEVENT_reset_cnts ] = " .reset_counts " ,
} ;
/*
* Per - cpu counts
*/
DEFINE_PER_CPU ( unsigned long , lockevents [ lockevent_num ] ) ;
/*
* The lockevent_read ( ) function can be overridden .
*/
ssize_t __weak lockevent_read ( struct file * file , char __user * user_buf ,
size_t count , loff_t * ppos )
{
char buf [ 64 ] ;
int cpu , id , len ;
u64 sum = 0 ;
/*
* Get the counter ID stored in file - > f_inode - > i_private
*/
id = ( long ) file_inode ( file ) - > i_private ;
if ( id > = lockevent_num )
return - EBADF ;
for_each_possible_cpu ( cpu )
sum + = per_cpu ( lockevents [ id ] , cpu ) ;
len = snprintf ( buf , sizeof ( buf ) - 1 , " %llu \n " , sum ) ;
return simple_read_from_buffer ( user_buf , count , ppos , buf , len ) ;
}
/*
* Function to handle write request
*
* When idx = reset_cnts , reset all the counts .
*/
static ssize_t lockevent_write ( struct file * file , const char __user * user_buf ,
size_t count , loff_t * ppos )
{
int cpu ;
/*
* Get the counter ID stored in file - > f_inode - > i_private
*/
if ( ( long ) file_inode ( file ) - > i_private ! = LOCKEVENT_reset_cnts )
return count ;
for_each_possible_cpu ( cpu ) {
int i ;
unsigned long * ptr = per_cpu_ptr ( lockevents , cpu ) ;
for ( i = 0 ; i < lockevent_num ; i + + )
WRITE_ONCE ( ptr [ i ] , 0 ) ;
}
return count ;
}
/*
* Debugfs data structures
*/
static const struct file_operations fops_lockevent = {
. read = lockevent_read ,
. write = lockevent_write ,
. llseek = default_llseek ,
} ;
2019-04-04 20:43:18 +03:00
# ifdef CONFIG_PARAVIRT_SPINLOCKS
# include <asm/paravirt.h>
static bool __init skip_lockevent ( const char * name )
{
static int pv_on __initdata = - 1 ;
if ( pv_on < 0 )
pv_on = ! pv_is_native_spin_unlock ( ) ;
/*
* Skip PV qspinlock events on bare metal .
*/
if ( ! pv_on & & ! memcmp ( name , " pv_ " , 3 ) )
return true ;
return false ;
}
# else
static inline bool skip_lockevent ( const char * name )
{
return false ;
}
# endif
2019-04-04 20:43:17 +03:00
/*
* Initialize debugfs for the locking event counts .
*/
static int __init init_lockevent_counts ( void )
{
struct dentry * d_counts = debugfs_create_dir ( LOCK_EVENTS_DIR , NULL ) ;
int i ;
if ( ! d_counts )
goto out ;
/*
* Create the debugfs files
*
* As reading from and writing to the stat files can be slow , only
* root is allowed to do the read / write to limit impact to system
* performance .
*/
2019-04-04 20:43:18 +03:00
for ( i = 0 ; i < lockevent_num ; i + + ) {
if ( skip_lockevent ( lockevent_names [ i ] ) )
continue ;
2019-04-04 20:43:17 +03:00
if ( ! debugfs_create_file ( lockevent_names [ i ] , 0400 , d_counts ,
( void * ) ( long ) i , & fops_lockevent ) )
goto fail_undo ;
2019-04-04 20:43:18 +03:00
}
2019-04-04 20:43:17 +03:00
if ( ! debugfs_create_file ( lockevent_names [ LOCKEVENT_reset_cnts ] , 0200 ,
d_counts , ( void * ) ( long ) LOCKEVENT_reset_cnts ,
& fops_lockevent ) )
goto fail_undo ;
return 0 ;
fail_undo :
debugfs_remove_recursive ( d_counts ) ;
out :
pr_warn ( " Could not create '%s' debugfs entries \n " , LOCK_EVENTS_DIR ) ;
return - ENOMEM ;
}
fs_initcall ( init_lockevent_counts ) ;