2019-11-14 21:02:54 +03:00
// SPDX-License-Identifier: GPL-2.0
2021-01-15 20:09:53 +03:00
/*
* KCSAN debugfs interface .
*
* Copyright ( C ) 2019 , Google LLC .
*/
2019-11-14 21:02:54 +03:00
2020-07-31 11:17:23 +03:00
# define pr_fmt(fmt) "kcsan: " fmt
2019-11-14 21:02:54 +03:00
# include <linux/atomic.h>
# include <linux/bsearch.h>
# include <linux/bug.h>
# include <linux/debugfs.h>
# include <linux/init.h>
# include <linux/kallsyms.h>
2020-02-06 18:46:26 +03:00
# include <linux/sched.h>
2019-11-14 21:02:54 +03:00
# include <linux/seq_file.h>
# include <linux/slab.h>
# include <linux/sort.h>
# include <linux/string.h>
# include <linux/uaccess.h>
# include "kcsan.h"
2020-08-10 11:06:25 +03:00
atomic_long_t kcsan_counters [ KCSAN_COUNTER_COUNT ] ;
2020-07-31 11:17:19 +03:00
static const char * const counter_names [ ] = {
[ KCSAN_COUNTER_USED_WATCHPOINTS ] = " used_watchpoints " ,
[ KCSAN_COUNTER_SETUP_WATCHPOINTS ] = " setup_watchpoints " ,
[ KCSAN_COUNTER_DATA_RACES ] = " data_races " ,
[ KCSAN_COUNTER_ASSERT_FAILURES ] = " assert_failures " ,
[ KCSAN_COUNTER_NO_CAPACITY ] = " no_capacity " ,
[ KCSAN_COUNTER_REPORT_RACES ] = " report_races " ,
[ KCSAN_COUNTER_RACES_UNKNOWN_ORIGIN ] = " races_unknown_origin " ,
[ KCSAN_COUNTER_UNENCODABLE_ACCESSES ] = " unencodable_accesses " ,
[ KCSAN_COUNTER_ENCODING_FALSE_POSITIVES ] = " encoding_false_positives " ,
} ;
static_assert ( ARRAY_SIZE ( counter_names ) = = KCSAN_COUNTER_COUNT ) ;
2019-11-14 21:02:54 +03:00
/*
* Addresses for filtering functions from reporting . This list can be used as a
* whitelist or blacklist .
*/
static struct {
2019-11-20 12:41:43 +03:00
unsigned long * addrs ; /* array of addresses */
size_t size ; /* current size */
int used ; /* number of elements used */
bool sorted ; /* if elements are sorted */
bool whitelist ; /* if list is a blacklist or whitelist */
2019-11-14 21:02:54 +03:00
} report_filterlist = {
2019-11-20 12:41:43 +03:00
. addrs = NULL ,
. size = 8 , /* small initial size */
. used = 0 ,
. sorted = false ,
. whitelist = false , /* default is blacklist */
2019-11-14 21:02:54 +03:00
} ;
static DEFINE_SPINLOCK ( report_filterlist_lock ) ;
/*
* The microbenchmark allows benchmarking KCSAN core runtime only . To run
* multiple threads , pipe ' microbench = < iters > ' from multiple tasks into the
2020-02-06 18:46:26 +03:00
* debugfs file . This will not generate any conflicts , and tests fast - path only .
2019-11-14 21:02:54 +03:00
*/
2020-02-06 18:46:26 +03:00
static noinline void microbenchmark ( unsigned long iters )
2019-11-14 21:02:54 +03:00
{
2020-02-25 17:32:58 +03:00
const struct kcsan_ctx ctx_save = current - > kcsan_ctx ;
const bool was_enabled = READ_ONCE ( kcsan_enabled ) ;
2019-11-14 21:02:54 +03:00
cycles_t cycles ;
2020-02-25 17:32:58 +03:00
/* We may have been called from an atomic region; reset context. */
memset ( & current - > kcsan_ctx , 0 , sizeof ( current - > kcsan_ctx ) ) ;
/*
* Disable to benchmark fast - path for all accesses , and ( expected
* negligible ) call into slow - path , but never set up watchpoints .
*/
WRITE_ONCE ( kcsan_enabled , false ) ;
2020-07-31 11:17:23 +03:00
pr_info ( " %s begin | iters: %lu \n " , __func__ , iters ) ;
2019-11-14 21:02:54 +03:00
cycles = get_cycles ( ) ;
while ( iters - - ) {
2020-02-25 17:32:58 +03:00
unsigned long addr = iters & ( ( PAGE_SIZE < < 8 ) - 1 ) ;
int type = ! ( iters & 0x7f ) ? KCSAN_ACCESS_ATOMIC :
( ! ( iters & 0xf ) ? KCSAN_ACCESS_WRITE : 0 ) ;
__kcsan_check_access ( ( void * ) addr , sizeof ( long ) , type ) ;
2019-11-14 21:02:54 +03:00
}
cycles = get_cycles ( ) - cycles ;
2020-07-31 11:17:23 +03:00
pr_info ( " %s end | cycles: %llu \n " , __func__ , cycles ) ;
2020-02-25 17:32:58 +03:00
WRITE_ONCE ( kcsan_enabled , was_enabled ) ;
/* restore context */
current - > kcsan_ctx = ctx_save ;
2019-11-14 21:02:54 +03:00
}
static int cmp_filterlist_addrs ( const void * rhs , const void * lhs )
{
const unsigned long a = * ( const unsigned long * ) rhs ;
const unsigned long b = * ( const unsigned long * ) lhs ;
return a < b ? - 1 : a = = b ? 0 : 1 ;
}
bool kcsan_skip_report_debugfs ( unsigned long func_addr )
{
unsigned long symbolsize , offset ;
unsigned long flags ;
bool ret = false ;
if ( ! kallsyms_lookup_size_offset ( func_addr , & symbolsize , & offset ) )
return false ;
2019-11-20 12:41:43 +03:00
func_addr - = offset ; /* Get function start */
2019-11-14 21:02:54 +03:00
spin_lock_irqsave ( & report_filterlist_lock , flags ) ;
if ( report_filterlist . used = = 0 )
goto out ;
/* Sort array if it is unsorted, and then do a binary search. */
if ( ! report_filterlist . sorted ) {
sort ( report_filterlist . addrs , report_filterlist . used ,
sizeof ( unsigned long ) , cmp_filterlist_addrs , NULL ) ;
report_filterlist . sorted = true ;
}
ret = ! ! bsearch ( & func_addr , report_filterlist . addrs ,
report_filterlist . used , sizeof ( unsigned long ) ,
cmp_filterlist_addrs ) ;
if ( report_filterlist . whitelist )
ret = ! ret ;
out :
spin_unlock_irqrestore ( & report_filterlist_lock , flags ) ;
return ret ;
}
static void set_report_filterlist_whitelist ( bool whitelist )
{
unsigned long flags ;
spin_lock_irqsave ( & report_filterlist_lock , flags ) ;
report_filterlist . whitelist = whitelist ;
spin_unlock_irqrestore ( & report_filterlist_lock , flags ) ;
}
/* Returns 0 on success, error-code otherwise. */
static ssize_t insert_report_filterlist ( const char * func )
{
unsigned long flags ;
unsigned long addr = kallsyms_lookup_name ( func ) ;
ssize_t ret = 0 ;
if ( ! addr ) {
2020-07-31 11:17:23 +03:00
pr_err ( " could not find function: '%s' \n " , func ) ;
2019-11-14 21:02:54 +03:00
return - ENOENT ;
}
spin_lock_irqsave ( & report_filterlist_lock , flags ) ;
if ( report_filterlist . addrs = = NULL ) {
/* initial allocation */
report_filterlist . addrs =
kmalloc_array ( report_filterlist . size ,
2020-04-17 05:58:37 +03:00
sizeof ( unsigned long ) , GFP_ATOMIC ) ;
2019-11-14 21:02:54 +03:00
if ( report_filterlist . addrs = = NULL ) {
ret = - ENOMEM ;
goto out ;
}
} else if ( report_filterlist . used = = report_filterlist . size ) {
/* resize filterlist */
size_t new_size = report_filterlist . size * 2 ;
unsigned long * new_addrs =
krealloc ( report_filterlist . addrs ,
2020-04-17 05:58:37 +03:00
new_size * sizeof ( unsigned long ) , GFP_ATOMIC ) ;
2019-11-14 21:02:54 +03:00
if ( new_addrs = = NULL ) {
/* leave filterlist itself untouched */
ret = - ENOMEM ;
goto out ;
}
report_filterlist . size = new_size ;
report_filterlist . addrs = new_addrs ;
}
/* Note: deduplicating should be done in userspace. */
report_filterlist . addrs [ report_filterlist . used + + ] =
kallsyms_lookup_name ( func ) ;
report_filterlist . sorted = false ;
out :
spin_unlock_irqrestore ( & report_filterlist_lock , flags ) ;
2019-11-20 12:41:43 +03:00
2019-11-14 21:02:54 +03:00
return ret ;
}
static int show_info ( struct seq_file * file , void * v )
{
int i ;
unsigned long flags ;
/* show stats */
seq_printf ( file , " enabled: %i \n " , READ_ONCE ( kcsan_enabled ) ) ;
2020-08-10 11:06:25 +03:00
for ( i = 0 ; i < KCSAN_COUNTER_COUNT ; + + i ) {
seq_printf ( file , " %s: %ld \n " , counter_names [ i ] ,
atomic_long_read ( & kcsan_counters [ i ] ) ) ;
}
2019-11-14 21:02:54 +03:00
/* show filter functions, and filter type */
spin_lock_irqsave ( & report_filterlist_lock , flags ) ;
seq_printf ( file , " \n %s functions: %s \n " ,
report_filterlist . whitelist ? " whitelisted " : " blacklisted " ,
report_filterlist . used = = 0 ? " none " : " " ) ;
for ( i = 0 ; i < report_filterlist . used ; + + i )
seq_printf ( file , " %ps \n " , ( void * ) report_filterlist . addrs [ i ] ) ;
spin_unlock_irqrestore ( & report_filterlist_lock , flags ) ;
return 0 ;
}
static int debugfs_open ( struct inode * inode , struct file * file )
{
return single_open ( file , show_info , NULL ) ;
}
2019-11-20 12:41:43 +03:00
static ssize_t
debugfs_write ( struct file * file , const char __user * buf , size_t count , loff_t * off )
2019-11-14 21:02:54 +03:00
{
char kbuf [ KSYM_NAME_LEN ] ;
char * arg ;
int read_len = count < ( sizeof ( kbuf ) - 1 ) ? count : ( sizeof ( kbuf ) - 1 ) ;
if ( copy_from_user ( kbuf , buf , read_len ) )
return - EFAULT ;
kbuf [ read_len ] = ' \0 ' ;
arg = strstrip ( kbuf ) ;
if ( ! strcmp ( arg , " on " ) ) {
WRITE_ONCE ( kcsan_enabled , true ) ;
} else if ( ! strcmp ( arg , " off " ) ) {
WRITE_ONCE ( kcsan_enabled , false ) ;
2020-07-31 11:17:20 +03:00
} else if ( str_has_prefix ( arg , " microbench= " ) ) {
2019-11-14 21:02:54 +03:00
unsigned long iters ;
2020-07-31 11:17:20 +03:00
if ( kstrtoul ( & arg [ strlen ( " microbench= " ) ] , 0 , & iters ) )
2019-11-14 21:02:54 +03:00
return - EINVAL ;
microbenchmark ( iters ) ;
} else if ( ! strcmp ( arg , " whitelist " ) ) {
set_report_filterlist_whitelist ( true ) ;
} else if ( ! strcmp ( arg , " blacklist " ) ) {
set_report_filterlist_whitelist ( false ) ;
} else if ( arg [ 0 ] = = ' ! ' ) {
ssize_t ret = insert_report_filterlist ( & arg [ 1 ] ) ;
if ( ret < 0 )
return ret ;
} else {
return - EINVAL ;
}
return count ;
}
2019-11-20 12:41:43 +03:00
static const struct file_operations debugfs_ops =
{
. read = seq_read ,
. open = debugfs_open ,
. write = debugfs_write ,
. release = single_release
} ;
2019-11-14 21:02:54 +03:00
2021-03-03 12:38:45 +03:00
static void __init kcsan_debugfs_init ( void )
2019-11-14 21:02:54 +03:00
{
debugfs_create_file ( " kcsan " , 0644 , NULL , NULL , & debugfs_ops ) ;
}
2021-03-03 12:38:45 +03:00
late_initcall ( kcsan_debugfs_init ) ;