2020-06-25 01:59:59 +03:00
// SPDX-License-Identifier: GPL-2.0+
//
// Torture test for smp_call_function() and friends.
//
// Copyright (C) Facebook, 2020.
//
// Author: Paul E. McKenney <paulmck@kernel.org>
# define pr_fmt(fmt) fmt
# include <linux/atomic.h>
# include <linux/bitops.h>
# include <linux/completion.h>
# include <linux/cpu.h>
# include <linux/delay.h>
# include <linux/err.h>
# include <linux/init.h>
# include <linux/interrupt.h>
# include <linux/kthread.h>
# include <linux/kernel.h>
# include <linux/mm.h>
# include <linux/module.h>
# include <linux/moduleparam.h>
# include <linux/notifier.h>
# include <linux/percpu.h>
# include <linux/rcupdate.h>
# include <linux/rcupdate_trace.h>
# include <linux/reboot.h>
# include <linux/sched.h>
# include <linux/spinlock.h>
# include <linux/smp.h>
# include <linux/stat.h>
# include <linux/srcu.h>
# include <linux/slab.h>
# include <linux/torture.h>
# include <linux/types.h>
# define SCFTORT_STRING "scftorture"
# define SCFTORT_FLAG SCFTORT_STRING ": "
# define VERBOSE_SCFTORTOUT(s, x...) \
2021-10-29 12:40:25 +03:00
do { if ( verbose ) pr_alert ( SCFTORT_FLAG s " \n " , # # x ) ; } while ( 0 )
2020-06-25 01:59:59 +03:00
2021-11-03 11:30:27 +03:00
# define SCFTORTOUT_ERRSTRING(s, x...) pr_alert(SCFTORT_FLAG "!!! " s "\n", ## x)
2020-06-25 01:59:59 +03:00
MODULE_LICENSE ( " GPL " ) ;
MODULE_AUTHOR ( " Paul E. McKenney <paulmck@kernel.org> " ) ;
// Wait until there are multiple CPUs before starting test.
torture_param ( int , holdoff , IS_BUILTIN ( CONFIG_SCF_TORTURE_TEST ) ? 10 : 0 ,
" Holdoff time before test start (s) " ) ;
torture_param ( int , longwait , 0 , " Include ridiculously long waits? (seconds) " ) ;
torture_param ( int , nthreads , - 1 , " # threads, defaults to -1 for all CPUs. " ) ;
torture_param ( int , onoff_holdoff , 0 , " Time after boot before CPU hotplugs (s) " ) ;
torture_param ( int , onoff_interval , 0 , " Time between CPU hotplugs (s), 0=disable " ) ;
torture_param ( int , shutdown_secs , 0 , " Shutdown time (ms), <= zero to disable. " ) ;
torture_param ( int , stat_interval , 60 , " Number of seconds between stats printk()s. " ) ;
2020-09-24 22:11:57 +03:00
torture_param ( int , stutter , 5 , " Number of jiffies to run/halt test, 0=disable " ) ;
2020-06-25 01:59:59 +03:00
torture_param ( bool , use_cpus_read_lock , 0 , " Use cpus_read_lock() to exclude CPU hotplug. " ) ;
torture_param ( int , verbose , 0 , " Enable verbose debugging printk()s " ) ;
2020-09-03 23:09:47 +03:00
torture_param ( int , weight_resched , - 1 , " Testing weight for resched_cpu() operations. " ) ;
2020-06-25 01:59:59 +03:00
torture_param ( int , weight_single , - 1 , " Testing weight for single-CPU no-wait operations. " ) ;
2021-06-25 03:53:43 +03:00
torture_param ( int , weight_single_rpc , - 1 , " Testing weight for single-CPU RPC operations. " ) ;
2020-06-25 01:59:59 +03:00
torture_param ( int , weight_single_wait , - 1 , " Testing weight for single-CPU operations. " ) ;
2020-06-26 03:05:58 +03:00
torture_param ( int , weight_many , - 1 , " Testing weight for multi-CPU no-wait operations. " ) ;
torture_param ( int , weight_many_wait , - 1 , " Testing weight for multi-CPU operations. " ) ;
2020-06-25 01:59:59 +03:00
torture_param ( int , weight_all , - 1 , " Testing weight for all-CPU no-wait operations. " ) ;
torture_param ( int , weight_all_wait , - 1 , " Testing weight for all-CPU operations. " ) ;
char * torture_type = " " ;
# ifdef MODULE
# define SCFTORT_SHUTDOWN 0
# else
# define SCFTORT_SHUTDOWN 1
# endif
torture_param ( bool , shutdown , SCFTORT_SHUTDOWN , " Shutdown at end of torture test. " ) ;
struct scf_statistics {
struct task_struct * task ;
int cpu ;
2020-09-03 23:09:47 +03:00
long long n_resched ;
2020-06-25 01:59:59 +03:00
long long n_single ;
2020-06-26 03:05:58 +03:00
long long n_single_ofl ;
2021-06-25 03:53:43 +03:00
long long n_single_rpc ;
long long n_single_rpc_ofl ;
2020-06-25 01:59:59 +03:00
long long n_single_wait ;
2020-06-26 03:05:58 +03:00
long long n_single_wait_ofl ;
long long n_many ;
long long n_many_wait ;
2020-06-25 01:59:59 +03:00
long long n_all ;
long long n_all_wait ;
} ;
static struct scf_statistics * scf_stats_p ;
static struct task_struct * scf_torture_stats_task ;
static DEFINE_PER_CPU ( long long , scf_invoked_count ) ;
2020-06-26 03:05:58 +03:00
// Data for random primitive selection
2020-09-03 23:09:47 +03:00
# define SCF_PRIM_RESCHED 0
# define SCF_PRIM_SINGLE 1
2021-06-25 03:53:43 +03:00
# define SCF_PRIM_SINGLE_RPC 2
# define SCF_PRIM_MANY 3
# define SCF_PRIM_ALL 4
# define SCF_NPRIMS 8 // Need wait and no-wait versions of each,
// except for SCF_PRIM_RESCHED and
// SCF_PRIM_SINGLE_RPC.
2020-06-26 03:05:58 +03:00
static char * scf_prim_name [ ] = {
2020-09-03 23:09:47 +03:00
" resched_cpu " ,
2020-06-26 03:05:58 +03:00
" smp_call_function_single " ,
2021-06-25 03:53:43 +03:00
" smp_call_function_single_rpc " ,
2020-06-26 03:05:58 +03:00
" smp_call_function_many " ,
" smp_call_function " ,
} ;
struct scf_selector {
unsigned long scfs_weight ;
int scfs_prim ;
bool scfs_wait ;
} ;
static struct scf_selector scf_sel_array [ SCF_NPRIMS ] ;
static int scf_sel_array_len ;
static unsigned long scf_sel_totweight ;
2020-07-01 06:49:50 +03:00
// Communicate between caller and handler.
struct scf_check {
bool scfc_in ;
bool scfc_out ;
int scfc_cpu ; // -1 for not _single().
bool scfc_wait ;
2021-06-25 03:53:43 +03:00
bool scfc_rpc ;
struct completion scfc_completion ;
2020-07-01 06:49:50 +03:00
} ;
2020-06-25 01:59:59 +03:00
// Use to wait for all threads to start.
static atomic_t n_started ;
static atomic_t n_errs ;
2020-07-01 06:49:50 +03:00
static atomic_t n_mb_in_errs ;
static atomic_t n_mb_out_errs ;
static atomic_t n_alloc_errs ;
2020-06-25 01:59:59 +03:00
static bool scfdone ;
2020-07-02 02:06:22 +03:00
static char * bangstr = " " ;
2020-06-25 01:59:59 +03:00
2020-07-02 19:56:50 +03:00
static DEFINE_TORTURE_RANDOM_PERCPU ( scf_torture_rand ) ;
2020-06-25 01:59:59 +03:00
2020-09-03 23:09:47 +03:00
extern void resched_cpu ( int cpu ) ; // An alternative IPI vector.
2020-06-25 01:59:59 +03:00
// Print torture statistics. Caller must ensure serialization.
static void scf_torture_stats_print ( void )
{
int cpu ;
2020-07-01 02:13:37 +03:00
int i ;
2020-06-25 01:59:59 +03:00
long long invoked_count = 0 ;
bool isdone = READ_ONCE ( scfdone ) ;
2020-07-01 02:13:37 +03:00
struct scf_statistics scfs = { } ;
2020-06-25 01:59:59 +03:00
for_each_possible_cpu ( cpu )
invoked_count + = data_race ( per_cpu ( scf_invoked_count , cpu ) ) ;
2020-07-01 02:13:37 +03:00
for ( i = 0 ; i < nthreads ; i + + ) {
2020-09-03 23:09:47 +03:00
scfs . n_resched + = scf_stats_p [ i ] . n_resched ;
2020-07-01 02:13:37 +03:00
scfs . n_single + = scf_stats_p [ i ] . n_single ;
scfs . n_single_ofl + = scf_stats_p [ i ] . n_single_ofl ;
2021-06-25 03:53:43 +03:00
scfs . n_single_rpc + = scf_stats_p [ i ] . n_single_rpc ;
2020-07-01 02:13:37 +03:00
scfs . n_single_wait + = scf_stats_p [ i ] . n_single_wait ;
scfs . n_single_wait_ofl + = scf_stats_p [ i ] . n_single_wait_ofl ;
scfs . n_many + = scf_stats_p [ i ] . n_many ;
scfs . n_many_wait + = scf_stats_p [ i ] . n_many_wait ;
scfs . n_all + = scf_stats_p [ i ] . n_all ;
scfs . n_all_wait + = scf_stats_p [ i ] . n_all_wait ;
}
2020-07-02 02:06:22 +03:00
if ( atomic_read ( & n_errs ) | | atomic_read ( & n_mb_in_errs ) | |
atomic_read ( & n_mb_out_errs ) | | atomic_read ( & n_alloc_errs ) )
bangstr = " !!! " ;
2021-06-25 03:53:43 +03:00
pr_alert ( " %s %sscf_invoked_count %s: %lld resched: %lld single: %lld/%lld single_ofl: %lld/%lld single_rpc: %lld single_rpc_ofl: %lld many: %lld/%lld all: %lld/%lld " ,
2020-09-03 23:09:47 +03:00
SCFTORT_FLAG , bangstr , isdone ? " VER " : " ver " , invoked_count , scfs . n_resched ,
2020-07-01 02:13:37 +03:00
scfs . n_single , scfs . n_single_wait , scfs . n_single_ofl , scfs . n_single_wait_ofl ,
2021-06-25 03:53:43 +03:00
scfs . n_single_rpc , scfs . n_single_rpc_ofl ,
2020-07-01 02:13:37 +03:00
scfs . n_many , scfs . n_many_wait , scfs . n_all , scfs . n_all_wait ) ;
2020-06-25 01:59:59 +03:00
torture_onoff_stats ( ) ;
2020-07-02 02:06:22 +03:00
pr_cont ( " ste: %d stnmie: %d stnmoe: %d staf: %d \n " , atomic_read ( & n_errs ) ,
atomic_read ( & n_mb_in_errs ) , atomic_read ( & n_mb_out_errs ) ,
atomic_read ( & n_alloc_errs ) ) ;
2020-06-25 01:59:59 +03:00
}
// Periodically prints torture statistics, if periodic statistics printing
// was specified via the stat_interval module parameter.
static int
scf_torture_stats ( void * arg )
{
VERBOSE_TOROUT_STRING ( " scf_torture_stats task started " ) ;
do {
schedule_timeout_interruptible ( stat_interval * HZ ) ;
scf_torture_stats_print ( ) ;
torture_shutdown_absorb ( " scf_torture_stats " ) ;
} while ( ! torture_must_stop ( ) ) ;
torture_kthread_stopping ( " scf_torture_stats " ) ;
return 0 ;
}
2020-06-26 03:05:58 +03:00
// Add a primitive to the scf_sel_array[].
static void scf_sel_add ( unsigned long weight , int prim , bool wait )
{
struct scf_selector * scfsp = & scf_sel_array [ scf_sel_array_len ] ;
// If no weight, if array would overflow, if computing three-place
// percentages would overflow, or if the scf_prim_name[] array would
// overflow, don't bother. In the last three two cases, complain.
if ( ! weight | |
WARN_ON_ONCE ( scf_sel_array_len > = ARRAY_SIZE ( scf_sel_array ) ) | |
WARN_ON_ONCE ( 0 - 100000 * weight < = 100000 * scf_sel_totweight ) | |
WARN_ON_ONCE ( prim > = ARRAY_SIZE ( scf_prim_name ) ) )
return ;
scf_sel_totweight + = weight ;
scfsp - > scfs_weight = scf_sel_totweight ;
scfsp - > scfs_prim = prim ;
scfsp - > scfs_wait = wait ;
scf_sel_array_len + + ;
}
// Dump out weighting percentages for scf_prim_name[] array.
static void scf_sel_dump ( void )
{
int i ;
unsigned long oldw = 0 ;
struct scf_selector * scfsp ;
unsigned long w ;
for ( i = 0 ; i < scf_sel_array_len ; i + + ) {
scfsp = & scf_sel_array [ i ] ;
w = ( scfsp - > scfs_weight - oldw ) * 100000 / scf_sel_totweight ;
pr_info ( " %s: %3lu.%03lu %s(%s) \n " , __func__ , w / 1000 , w % 1000 ,
scf_prim_name [ scfsp - > scfs_prim ] ,
scfsp - > scfs_wait ? " wait " : " nowait " ) ;
oldw = scfsp - > scfs_weight ;
}
}
// Randomly pick a primitive and wait/nowait, based on weightings.
static struct scf_selector * scf_sel_rand ( struct torture_random_state * trsp )
{
int i ;
unsigned long w = torture_random ( trsp ) % ( scf_sel_totweight + 1 ) ;
for ( i = 0 ; i < scf_sel_array_len ; i + + )
if ( scf_sel_array [ i ] . scfs_weight > = w )
return & scf_sel_array [ i ] ;
WARN_ON_ONCE ( 1 ) ;
return & scf_sel_array [ 0 ] ;
}
2020-06-25 01:59:59 +03:00
// Update statistics and occasionally burn up mass quantities of CPU time,
// if told to do so via scftorture.longwait. Otherwise, occasionally burn
// a little bit.
2020-07-01 06:49:50 +03:00
static void scf_handler ( void * scfc_in )
2020-06-25 01:59:59 +03:00
{
int i ;
int j ;
unsigned long r = torture_random ( this_cpu_ptr ( & scf_torture_rand ) ) ;
2020-07-01 06:49:50 +03:00
struct scf_check * scfcp = scfc_in ;
2020-06-25 01:59:59 +03:00
2020-07-01 22:30:02 +03:00
if ( likely ( scfcp ) ) {
WRITE_ONCE ( scfcp - > scfc_out , false ) ; // For multiple receivers.
if ( WARN_ON_ONCE ( unlikely ( ! READ_ONCE ( scfcp - > scfc_in ) ) ) )
atomic_inc ( & n_mb_in_errs ) ;
}
2020-06-25 01:59:59 +03:00
this_cpu_inc ( scf_invoked_count ) ;
if ( longwait < = 0 ) {
2022-03-01 04:40:49 +03:00
if ( ! ( r & 0xffc0 ) ) {
2020-06-25 01:59:59 +03:00
udelay ( r & 0x3f ) ;
2022-03-01 04:40:49 +03:00
goto out ;
}
2020-06-25 01:59:59 +03:00
}
if ( r & 0xfff )
2020-07-01 06:49:50 +03:00
goto out ;
2020-06-25 01:59:59 +03:00
r = ( r > > 12 ) ;
if ( longwait < = 0 ) {
udelay ( ( r & 0xff ) + 1 ) ;
2020-07-01 06:49:50 +03:00
goto out ;
2020-06-25 01:59:59 +03:00
}
r = r % longwait + 1 ;
for ( i = 0 ; i < r ; i + + ) {
for ( j = 0 ; j < 1000 ; j + + ) {
udelay ( 1000 ) ;
cpu_relax ( ) ;
}
}
2020-07-01 06:49:50 +03:00
out :
if ( unlikely ( ! scfcp ) )
return ;
2021-06-25 03:53:43 +03:00
if ( scfcp - > scfc_wait ) {
2020-07-01 06:49:50 +03:00
WRITE_ONCE ( scfcp - > scfc_out , true ) ;
2021-06-25 03:53:43 +03:00
if ( scfcp - > scfc_rpc )
complete ( & scfcp - > scfc_completion ) ;
} else {
2020-07-01 06:49:50 +03:00
kfree ( scfcp ) ;
2021-06-25 03:53:43 +03:00
}
2020-06-25 01:59:59 +03:00
}
2020-06-26 03:05:58 +03:00
// As above, but check for correct CPU.
2020-07-01 06:49:50 +03:00
static void scf_handler_1 ( void * scfc_in )
2020-06-26 03:05:58 +03:00
{
2020-07-01 06:49:50 +03:00
struct scf_check * scfcp = scfc_in ;
if ( likely ( scfcp ) & & WARN_ONCE ( smp_processor_id ( ) ! = scfcp - > scfc_cpu , " %s: Wanted CPU %d got CPU %d \n " , __func__ , scfcp - > scfc_cpu , smp_processor_id ( ) ) ) {
2020-06-26 03:05:58 +03:00
atomic_inc ( & n_errs ) ;
2020-07-01 06:49:50 +03:00
}
scf_handler ( scfcp ) ;
2020-06-26 03:05:58 +03:00
}
2020-06-25 01:59:59 +03:00
// Randomly do an smp_call_function*() invocation.
2020-06-26 03:05:58 +03:00
static void scftorture_invoke_one ( struct scf_statistics * scfp , struct torture_random_state * trsp )
2020-06-25 01:59:59 +03:00
{
2020-06-26 03:05:58 +03:00
uintptr_t cpu ;
2020-07-02 00:13:02 +03:00
int ret = 0 ;
2020-07-01 06:49:50 +03:00
struct scf_check * scfcp = NULL ;
2020-06-26 03:05:58 +03:00
struct scf_selector * scfsp = scf_sel_rand ( trsp ) ;
2020-06-25 01:59:59 +03:00
if ( use_cpus_read_lock )
cpus_read_lock ( ) ;
else
preempt_disable ( ) ;
2020-07-01 23:49:06 +03:00
if ( scfsp - > scfs_prim = = SCF_PRIM_SINGLE | | scfsp - > scfs_wait ) {
2020-07-01 06:49:50 +03:00
scfcp = kmalloc ( sizeof ( * scfcp ) , GFP_ATOMIC ) ;
2020-07-09 23:58:32 +03:00
if ( WARN_ON_ONCE ( ! scfcp ) ) {
2020-07-01 06:49:50 +03:00
atomic_inc ( & n_alloc_errs ) ;
2020-07-09 23:58:32 +03:00
} else {
scfcp - > scfc_cpu = - 1 ;
scfcp - > scfc_wait = scfsp - > scfs_wait ;
scfcp - > scfc_out = false ;
2021-06-25 03:53:43 +03:00
scfcp - > scfc_rpc = false ;
2020-07-09 23:58:32 +03:00
}
2020-07-01 23:49:06 +03:00
}
switch ( scfsp - > scfs_prim ) {
2020-09-03 23:09:47 +03:00
case SCF_PRIM_RESCHED :
if ( IS_BUILTIN ( CONFIG_SCF_TORTURE_TEST ) ) {
cpu = torture_random ( trsp ) % nr_cpu_ids ;
scfp - > n_resched + + ;
resched_cpu ( cpu ) ;
2021-07-14 16:53:51 +03:00
this_cpu_inc ( scf_invoked_count ) ;
2020-09-03 23:09:47 +03:00
}
break ;
2020-07-01 23:49:06 +03:00
case SCF_PRIM_SINGLE :
2020-06-26 03:05:58 +03:00
cpu = torture_random ( trsp ) % nr_cpu_ids ;
if ( scfsp - > scfs_wait )
scfp - > n_single_wait + + ;
else
scfp - > n_single + + ;
2020-07-01 06:49:50 +03:00
if ( scfcp ) {
scfcp - > scfc_cpu = cpu ;
2020-07-02 02:38:16 +03:00
barrier ( ) ; // Prevent race-reduction compiler optimizations.
2020-07-01 06:49:50 +03:00
scfcp - > scfc_in = true ;
}
ret = smp_call_function_single ( cpu , scf_handler_1 , ( void * ) scfcp , scfsp - > scfs_wait ) ;
2020-06-26 03:05:58 +03:00
if ( ret ) {
if ( scfsp - > scfs_wait )
scfp - > n_single_wait_ofl + + ;
else
scfp - > n_single_ofl + + ;
2020-07-01 06:49:50 +03:00
kfree ( scfcp ) ;
2020-07-02 00:13:02 +03:00
scfcp = NULL ;
2020-06-26 03:05:58 +03:00
}
break ;
2021-06-25 03:53:43 +03:00
case SCF_PRIM_SINGLE_RPC :
if ( ! scfcp )
break ;
cpu = torture_random ( trsp ) % nr_cpu_ids ;
scfp - > n_single_rpc + + ;
scfcp - > scfc_cpu = cpu ;
scfcp - > scfc_wait = true ;
init_completion ( & scfcp - > scfc_completion ) ;
scfcp - > scfc_rpc = true ;
barrier ( ) ; // Prevent race-reduction compiler optimizations.
scfcp - > scfc_in = true ;
ret = smp_call_function_single ( cpu , scf_handler_1 , ( void * ) scfcp , 0 ) ;
if ( ! ret ) {
if ( use_cpus_read_lock )
cpus_read_unlock ( ) ;
else
preempt_enable ( ) ;
wait_for_completion ( & scfcp - > scfc_completion ) ;
if ( use_cpus_read_lock )
cpus_read_lock ( ) ;
else
preempt_disable ( ) ;
} else {
scfp - > n_single_rpc_ofl + + ;
kfree ( scfcp ) ;
scfcp = NULL ;
}
break ;
2020-06-26 03:05:58 +03:00
case SCF_PRIM_MANY :
if ( scfsp - > scfs_wait )
scfp - > n_many_wait + + ;
else
scfp - > n_many + + ;
2020-07-02 02:38:16 +03:00
if ( scfcp ) {
barrier ( ) ; // Prevent race-reduction compiler optimizations.
2020-07-01 22:30:02 +03:00
scfcp - > scfc_in = true ;
2020-07-02 02:38:16 +03:00
}
2020-07-01 22:30:02 +03:00
smp_call_function_many ( cpu_online_mask , scf_handler , scfcp , scfsp - > scfs_wait ) ;
2020-06-26 03:05:58 +03:00
break ;
case SCF_PRIM_ALL :
if ( scfsp - > scfs_wait )
scfp - > n_all_wait + + ;
else
scfp - > n_all + + ;
2020-07-02 02:38:16 +03:00
if ( scfcp ) {
barrier ( ) ; // Prevent race-reduction compiler optimizations.
2020-07-01 23:49:06 +03:00
scfcp - > scfc_in = true ;
2020-07-02 02:38:16 +03:00
}
2020-07-01 23:49:06 +03:00
smp_call_function ( scf_handler , scfcp , scfsp - > scfs_wait ) ;
2020-06-26 03:05:58 +03:00
break ;
2020-07-02 22:15:37 +03:00
default :
WARN_ON_ONCE ( 1 ) ;
if ( scfcp )
scfcp - > scfc_out = true ;
2020-06-26 03:05:58 +03:00
}
2020-07-02 00:13:02 +03:00
if ( scfcp & & scfsp - > scfs_wait ) {
2020-07-04 01:23:19 +03:00
if ( WARN_ON_ONCE ( ( num_online_cpus ( ) > 1 | | scfsp - > scfs_prim = = SCF_PRIM_SINGLE ) & &
2021-06-25 03:53:43 +03:00
! scfcp - > scfc_out ) ) {
pr_warn ( " %s: Memory-ordering failure, scfs_prim: %d. \n " , __func__ , scfsp - > scfs_prim ) ;
2020-07-02 00:13:02 +03:00
atomic_inc ( & n_mb_out_errs ) ; // Leak rather than trash!
2021-06-25 03:53:43 +03:00
} else {
2020-07-02 00:13:02 +03:00
kfree ( scfcp ) ;
2021-06-25 03:53:43 +03:00
}
2020-07-02 02:38:16 +03:00
barrier ( ) ; // Prevent race-reduction compiler optimizations.
2020-07-02 00:13:02 +03:00
}
2020-06-25 01:59:59 +03:00
if ( use_cpus_read_lock )
cpus_read_unlock ( ) ;
else
preempt_enable ( ) ;
if ( ! ( torture_random ( trsp ) & 0xfff ) )
schedule_timeout_uninterruptible ( 1 ) ;
}
// SCF test kthread. Repeatedly does calls to members of the
// smp_call_function() family of functions.
static int scftorture_invoker ( void * arg )
{
2020-07-03 00:15:33 +03:00
int cpu ;
2020-11-11 21:12:05 +03:00
int curcpu ;
2020-06-25 01:59:59 +03:00
DEFINE_TORTURE_RANDOM ( rand ) ;
struct scf_statistics * scfp = ( struct scf_statistics * ) arg ;
2020-07-03 00:15:33 +03:00
bool was_offline = false ;
2020-06-25 01:59:59 +03:00
VERBOSE_SCFTORTOUT ( " scftorture_invoker %d: task started " , scfp - > cpu ) ;
2020-07-03 00:15:33 +03:00
cpu = scfp - > cpu % nr_cpu_ids ;
2021-06-04 22:37:44 +03:00
WARN_ON_ONCE ( set_cpus_allowed_ptr ( current , cpumask_of ( cpu ) ) ) ;
2020-06-25 01:59:59 +03:00
set_user_nice ( current , MAX_NICE ) ;
if ( holdoff )
schedule_timeout_interruptible ( holdoff * HZ ) ;
2021-06-04 22:37:44 +03:00
VERBOSE_SCFTORTOUT ( " scftorture_invoker %d: Waiting for all SCF torturers from cpu %d " , scfp - > cpu , raw_smp_processor_id ( ) ) ;
2020-06-25 01:59:59 +03:00
// Make sure that the CPU is affinitized appropriately during testing.
2021-06-04 22:37:44 +03:00
curcpu = raw_smp_processor_id ( ) ;
2020-11-11 21:12:05 +03:00
WARN_ONCE ( curcpu ! = scfp - > cpu % nr_cpu_ids ,
" %s: Wanted CPU %d, running on %d, nr_cpu_ids = %d \n " ,
__func__ , scfp - > cpu , curcpu , nr_cpu_ids ) ;
2020-06-25 01:59:59 +03:00
if ( ! atomic_dec_return ( & n_started ) )
while ( atomic_read_acquire ( & n_started ) ) {
if ( torture_must_stop ( ) ) {
VERBOSE_SCFTORTOUT ( " scftorture_invoker %d ended before starting " , scfp - > cpu ) ;
goto end ;
}
schedule_timeout_uninterruptible ( 1 ) ;
}
VERBOSE_SCFTORTOUT ( " scftorture_invoker %d started " , scfp - > cpu ) ;
do {
scftorture_invoke_one ( scfp , & rand ) ;
2020-07-03 00:15:33 +03:00
while ( cpu_is_offline ( cpu ) & & ! torture_must_stop ( ) ) {
schedule_timeout_interruptible ( HZ / 5 ) ;
was_offline = true ;
}
if ( was_offline ) {
set_cpus_allowed_ptr ( current , cpumask_of ( cpu ) ) ;
was_offline = false ;
}
2020-07-24 01:53:02 +03:00
cond_resched ( ) ;
2020-09-24 22:11:57 +03:00
stutter_wait ( " scftorture_invoker " ) ;
2020-06-25 01:59:59 +03:00
} while ( ! torture_must_stop ( ) ) ;
VERBOSE_SCFTORTOUT ( " scftorture_invoker %d ended " , scfp - > cpu ) ;
end :
torture_kthread_stopping ( " scftorture_invoker " ) ;
return 0 ;
}
static void
scftorture_print_module_parms ( const char * tag )
{
pr_alert ( SCFTORT_FLAG
2021-06-25 03:53:43 +03:00
" --- %s: verbose=%d holdoff=%d longwait=%d nthreads=%d onoff_holdoff=%d onoff_interval=%d shutdown_secs=%d stat_interval=%d stutter=%d use_cpus_read_lock=%d, weight_resched=%d, weight_single=%d, weight_single_rpc=%d, weight_single_wait=%d, weight_many=%d, weight_many_wait=%d, weight_all=%d, weight_all_wait=%d \n " , tag ,
verbose , holdoff , longwait , nthreads , onoff_holdoff , onoff_interval , shutdown , stat_interval , stutter , use_cpus_read_lock , weight_resched , weight_single , weight_single_rpc , weight_single_wait , weight_many , weight_many_wait , weight_all , weight_all_wait ) ;
2020-06-25 01:59:59 +03:00
}
static void scf_cleanup_handler ( void * unused )
{
}
static void scf_torture_cleanup ( void )
{
int i ;
if ( torture_cleanup_begin ( ) )
return ;
WRITE_ONCE ( scfdone , true ) ;
2021-07-10 03:53:27 +03:00
if ( nthreads & & scf_stats_p )
2020-06-25 01:59:59 +03:00
for ( i = 0 ; i < nthreads ; i + + )
torture_stop_kthread ( " scftorture_invoker " , scf_stats_p [ i ] . task ) ;
else
goto end ;
smp_call_function ( scf_cleanup_handler , NULL , 0 ) ;
torture_stop_kthread ( scf_torture_stats , scf_torture_stats_task ) ;
scf_torture_stats_print ( ) ; // -After- the stats thread is stopped!
2020-07-01 02:13:37 +03:00
kfree ( scf_stats_p ) ; // -After- the last stats print has completed!
scf_stats_p = NULL ;
2020-06-25 01:59:59 +03:00
2020-07-02 02:06:22 +03:00
if ( atomic_read ( & n_errs ) | | atomic_read ( & n_mb_in_errs ) | | atomic_read ( & n_mb_out_errs ) )
2020-06-25 01:59:59 +03:00
scftorture_print_module_parms ( " End of test: FAILURE " ) ;
else if ( torture_onoff_failures ( ) )
scftorture_print_module_parms ( " End of test: LOCK_HOTPLUG " ) ;
else
scftorture_print_module_parms ( " End of test: SUCCESS " ) ;
end :
torture_cleanup_end ( ) ;
}
static int __init scf_torture_init ( void )
{
long i ;
int firsterr = 0 ;
2020-09-03 23:09:47 +03:00
unsigned long weight_resched1 = weight_resched ;
2020-06-26 03:05:58 +03:00
unsigned long weight_single1 = weight_single ;
2021-06-25 03:53:43 +03:00
unsigned long weight_single_rpc1 = weight_single_rpc ;
2020-06-26 03:05:58 +03:00
unsigned long weight_single_wait1 = weight_single_wait ;
unsigned long weight_many1 = weight_many ;
unsigned long weight_many_wait1 = weight_many_wait ;
unsigned long weight_all1 = weight_all ;
unsigned long weight_all_wait1 = weight_all_wait ;
2020-06-25 01:59:59 +03:00
if ( ! torture_init_begin ( SCFTORT_STRING , verbose ) )
return - EBUSY ;
scftorture_print_module_parms ( " Start of test " ) ;
2021-07-14 00:12:54 +03:00
if ( weight_resched < = 0 & &
weight_single < = 0 & & weight_single_rpc < = 0 & & weight_single_wait < = 0 & &
weight_many < = 0 & & weight_many_wait < = 0 & &
weight_all < = 0 & & weight_all_wait < = 0 ) {
weight_resched1 = weight_resched = = 0 ? 0 : 2 * nr_cpu_ids ;
weight_single1 = weight_single = = 0 ? 0 : 2 * nr_cpu_ids ;
weight_single_rpc1 = weight_single_rpc = = 0 ? 0 : 2 * nr_cpu_ids ;
weight_single_wait1 = weight_single_wait = = 0 ? 0 : 2 * nr_cpu_ids ;
weight_many1 = weight_many = = 0 ? 0 : 2 ;
weight_many_wait1 = weight_many_wait = = 0 ? 0 : 2 ;
weight_all1 = weight_all = = 0 ? 0 : 1 ;
weight_all_wait1 = weight_all_wait = = 0 ? 0 : 1 ;
2020-06-25 01:59:59 +03:00
} else {
2020-09-03 23:09:47 +03:00
if ( weight_resched = = - 1 )
weight_resched1 = 0 ;
2020-06-25 01:59:59 +03:00
if ( weight_single = = - 1 )
2020-06-26 03:05:58 +03:00
weight_single1 = 0 ;
2021-06-25 03:53:43 +03:00
if ( weight_single_rpc = = - 1 )
weight_single_rpc1 = 0 ;
2020-06-25 01:59:59 +03:00
if ( weight_single_wait = = - 1 )
2020-06-26 03:05:58 +03:00
weight_single_wait1 = 0 ;
if ( weight_many = = - 1 )
weight_many1 = 0 ;
if ( weight_many_wait = = - 1 )
weight_many_wait1 = 0 ;
2020-06-25 01:59:59 +03:00
if ( weight_all = = - 1 )
2020-06-26 03:05:58 +03:00
weight_all1 = 0 ;
2020-06-25 01:59:59 +03:00
if ( weight_all_wait = = - 1 )
2020-06-26 03:05:58 +03:00
weight_all_wait1 = 0 ;
2020-06-25 01:59:59 +03:00
}
2021-07-14 01:13:56 +03:00
if ( weight_resched1 = = 0 & & weight_single1 = = 0 & & weight_single_rpc1 = = 0 & &
weight_single_wait1 = = 0 & & weight_many1 = = 0 & & weight_many_wait1 = = 0 & &
2020-06-26 03:05:58 +03:00
weight_all1 = = 0 & & weight_all_wait1 = = 0 ) {
2021-11-03 11:30:27 +03:00
SCFTORTOUT_ERRSTRING ( " all zero weights makes no sense " ) ;
2020-06-25 01:59:59 +03:00
firsterr = - EINVAL ;
goto unwind ;
}
2020-09-03 23:09:47 +03:00
if ( IS_BUILTIN ( CONFIG_SCF_TORTURE_TEST ) )
scf_sel_add ( weight_resched1 , SCF_PRIM_RESCHED , false ) ;
else if ( weight_resched1 )
2021-11-03 11:30:27 +03:00
SCFTORTOUT_ERRSTRING ( " built as module, weight_resched ignored " ) ;
2020-06-26 03:05:58 +03:00
scf_sel_add ( weight_single1 , SCF_PRIM_SINGLE , false ) ;
2021-06-25 03:53:43 +03:00
scf_sel_add ( weight_single_rpc1 , SCF_PRIM_SINGLE_RPC , true ) ;
2020-06-26 03:05:58 +03:00
scf_sel_add ( weight_single_wait1 , SCF_PRIM_SINGLE , true ) ;
scf_sel_add ( weight_many1 , SCF_PRIM_MANY , false ) ;
scf_sel_add ( weight_many_wait1 , SCF_PRIM_MANY , true ) ;
scf_sel_add ( weight_all1 , SCF_PRIM_ALL , false ) ;
scf_sel_add ( weight_all_wait1 , SCF_PRIM_ALL , true ) ;
scf_sel_dump ( ) ;
2020-06-25 01:59:59 +03:00
if ( onoff_interval > 0 ) {
firsterr = torture_onoff_init ( onoff_holdoff * HZ , onoff_interval , NULL ) ;
2021-08-06 02:01:51 +03:00
if ( torture_init_error ( firsterr ) )
2020-06-25 01:59:59 +03:00
goto unwind ;
}
if ( shutdown_secs > 0 ) {
firsterr = torture_shutdown_init ( shutdown_secs , scf_torture_cleanup ) ;
2021-08-06 02:01:51 +03:00
if ( torture_init_error ( firsterr ) )
2020-06-25 01:59:59 +03:00
goto unwind ;
}
2020-09-24 22:11:57 +03:00
if ( stutter > 0 ) {
firsterr = torture_stutter_init ( stutter , stutter ) ;
2021-08-06 02:01:51 +03:00
if ( torture_init_error ( firsterr ) )
2020-09-24 22:11:57 +03:00
goto unwind ;
}
2020-06-25 01:59:59 +03:00
// Worker tasks invoking smp_call_function().
if ( nthreads < 0 )
nthreads = num_online_cpus ( ) ;
scf_stats_p = kcalloc ( nthreads , sizeof ( scf_stats_p [ 0 ] ) , GFP_KERNEL ) ;
if ( ! scf_stats_p ) {
2021-11-03 11:30:27 +03:00
SCFTORTOUT_ERRSTRING ( " out of memory " ) ;
2020-06-25 01:59:59 +03:00
firsterr = - ENOMEM ;
goto unwind ;
}
2021-10-29 12:40:25 +03:00
VERBOSE_SCFTORTOUT ( " Starting %d smp_call_function() threads " , nthreads ) ;
2020-06-25 01:59:59 +03:00
atomic_set ( & n_started , nthreads ) ;
for ( i = 0 ; i < nthreads ; i + + ) {
scf_stats_p [ i ] . cpu = i ;
firsterr = torture_create_kthread ( scftorture_invoker , ( void * ) & scf_stats_p [ i ] ,
scf_stats_p [ i ] . task ) ;
2021-08-06 02:01:51 +03:00
if ( torture_init_error ( firsterr ) )
2020-06-25 01:59:59 +03:00
goto unwind ;
}
if ( stat_interval > 0 ) {
firsterr = torture_create_kthread ( scf_torture_stats , NULL , scf_torture_stats_task ) ;
2021-08-06 02:01:51 +03:00
if ( torture_init_error ( firsterr ) )
2020-06-25 01:59:59 +03:00
goto unwind ;
}
torture_init_end ( ) ;
return 0 ;
unwind :
torture_init_end ( ) ;
scf_torture_cleanup ( ) ;
2021-07-14 00:20:35 +03:00
if ( shutdown_secs ) {
WARN_ON ( ! IS_MODULE ( CONFIG_SCF_TORTURE_TEST ) ) ;
kernel_power_off ( ) ;
}
2020-06-25 01:59:59 +03:00
return firsterr ;
}
module_init ( scf_torture_init ) ;
module_exit ( scf_torture_cleanup ) ;