2019-01-17 10:09:19 -08:00
// SPDX-License-Identifier: GPL-2.0+
2015-12-31 18:33:22 -08:00
/*
* Read - Copy Update module - based performance - test facility
*
* Copyright ( C ) IBM Corporation , 2015
*
2019-01-17 10:09:19 -08:00
* Authors : Paul E . McKenney < paulmck @ linux . ibm . com >
2015-12-31 18:33:22 -08:00
*/
2018-05-15 12:25:05 -07:00
# define pr_fmt(fmt) fmt
2015-12-31 18:33:22 -08:00
# include <linux/types.h>
# include <linux/kernel.h>
# include <linux/init.h>
# include <linux/module.h>
# include <linux/kthread.h>
# include <linux/err.h>
# include <linux/spinlock.h>
# include <linux/smp.h>
# include <linux/rcupdate.h>
# include <linux/interrupt.h>
# include <linux/sched.h>
2017-02-01 18:07:51 +01:00
# include <uapi/linux/sched/types.h>
2015-12-31 18:33:22 -08:00
# include <linux/atomic.h>
# include <linux/bitops.h>
# include <linux/completion.h>
# include <linux/moduleparam.h>
# include <linux/percpu.h>
# include <linux/notifier.h>
# include <linux/reboot.h>
# include <linux/freezer.h>
# include <linux/cpu.h>
# include <linux/delay.h>
# include <linux/stat.h>
# include <linux/srcu.h>
# include <linux/slab.h>
# include <asm/byteorder.h>
# include <linux/torture.h>
# include <linux/vmalloc.h>
2017-05-03 09:51:55 -07:00
# include "rcu.h"
2015-12-31 18:33:22 -08:00
MODULE_LICENSE ( " GPL " ) ;
2019-01-17 10:09:19 -08:00
MODULE_AUTHOR ( " Paul E. McKenney <paulmck@linux.ibm.com> " ) ;
2015-12-31 18:33:22 -08:00
# define PERF_FLAG "-perf:"
# define PERFOUT_STRING(s) \
2016-08-21 16:54:39 +09:00
pr_alert ( " %s " PERF_FLAG " %s \n " , perf_type , s )
2015-12-31 18:33:22 -08:00
# define VERBOSE_PERFOUT_STRING(s) \
do { if ( verbose ) pr_alert ( " %s " PERF_FLAG " %s \n " , perf_type , s ) ; } while ( 0 )
# define VERBOSE_PERFOUT_ERRSTRING(s) \
do { if ( verbose ) pr_alert ( " %s " PERF_FLAG " !!! %s \n " , perf_type , s ) ; } while ( 0 )
2018-02-01 19:19:04 -08:00
/*
* The intended use cases for the nreaders and nwriters module parameters
* are as follows :
*
* 1. Specify only the nr_cpus kernel boot parameter . This will
* set both nreaders and nwriters to the value specified by
* nr_cpus for a mixed reader / writer test .
*
* 2. Specify the nr_cpus kernel boot parameter , but set
* rcuperf . nreaders to zero . This will set nwriters to the
* value specified by nr_cpus for an update - only test .
*
* 3. Specify the nr_cpus kernel boot parameter , but set
* rcuperf . nwriters to zero . This will set nreaders to the
* value specified by nr_cpus for a read - only test .
*
* Various other use cases may of course be specified .
*/
2018-12-28 07:48:43 -08:00
# ifdef MODULE
# define RCUPERF_SHUTDOWN 0
# else
# define RCUPERF_SHUTDOWN 1
# endif
2017-04-17 12:47:10 -07:00
torture_param ( bool , gp_async , false , " Use asynchronous GP wait primitives " ) ;
torture_param ( int , gp_async_max , 1000 , " Max # outstanding waits per reader " ) ;
2016-05-25 09:25:33 +08:00
torture_param ( bool , gp_exp , false , " Use expedited GP wait primitives " ) ;
2016-01-30 20:56:38 -08:00
torture_param ( int , holdoff , 10 , " Holdoff time before test start (s) " ) ;
2018-02-01 19:19:04 -08:00
torture_param ( int , nreaders , - 1 , " Number of RCU reader threads " ) ;
2015-12-31 18:33:22 -08:00
torture_param ( int , nwriters , - 1 , " Number of RCU updater threads " ) ;
2018-12-28 07:48:43 -08:00
torture_param ( bool , shutdown , RCUPERF_SHUTDOWN ,
2017-04-21 16:09:15 -07:00
" Shutdown at end of performance tests. " ) ;
2018-05-09 10:29:18 -07:00
torture_param ( int , verbose , 1 , " Enable verbose debugging printk()s " ) ;
2017-04-25 15:12:56 -07:00
torture_param ( int , writer_holdoff , 0 , " Holdoff (us) between GPs, zero to disable " ) ;
2015-12-31 18:33:22 -08:00
static char * perf_type = " rcu " ;
module_param ( perf_type , charp , 0444 ) ;
2019-05-31 23:15:45 +08:00
MODULE_PARM_DESC ( perf_type , " Type of RCU to performance-test (rcu, srcu, ...) " ) ;
2015-12-31 18:33:22 -08:00
static int nrealreaders ;
static int nrealwriters ;
static struct task_struct * * writer_tasks ;
static struct task_struct * * reader_tasks ;
static struct task_struct * shutdown_task ;
static u64 * * writer_durations ;
static int * writer_n_durations ;
static atomic_t n_rcu_perf_reader_started ;
static atomic_t n_rcu_perf_writer_started ;
static atomic_t n_rcu_perf_writer_finished ;
static wait_queue_head_t shutdown_wq ;
static u64 t_rcu_perf_writer_started ;
static u64 t_rcu_perf_writer_finished ;
static unsigned long b_rcu_perf_writer_started ;
static unsigned long b_rcu_perf_writer_finished ;
2017-04-17 12:47:10 -07:00
static DEFINE_PER_CPU ( atomic_t , n_async_inflight ) ;
2015-12-31 18:33:22 -08:00
# define MAX_MEAS 10000
# define MIN_MEAS 100
/*
* Operations vector for selecting different types of tests .
*/
struct rcu_perf_ops {
int ptype ;
void ( * init ) ( void ) ;
void ( * cleanup ) ( void ) ;
int ( * readlock ) ( void ) ;
void ( * readunlock ) ( int idx ) ;
2018-04-27 11:39:34 -07:00
unsigned long ( * get_gp_seq ) ( void ) ;
2018-05-15 15:24:41 -07:00
unsigned long ( * gp_diff ) ( unsigned long new , unsigned long old ) ;
2015-12-31 18:33:22 -08:00
unsigned long ( * exp_completed ) ( void ) ;
2017-04-17 12:47:10 -07:00
void ( * async ) ( struct rcu_head * head , rcu_callback_t func ) ;
void ( * gp_barrier ) ( void ) ;
2015-12-31 18:33:22 -08:00
void ( * sync ) ( void ) ;
void ( * exp_sync ) ( void ) ;
const char * name ;
} ;
static struct rcu_perf_ops * cur_ops ;
/*
* Definitions for rcu perf testing .
*/
static int rcu_perf_read_lock ( void ) __acquires ( RCU )
{
rcu_read_lock ( ) ;
return 0 ;
}
static void rcu_perf_read_unlock ( int idx ) __releases ( RCU )
{
rcu_read_unlock ( ) ;
}
static unsigned long __maybe_unused rcu_no_completed ( void )
{
return 0 ;
}
static void rcu_sync_perf_init ( void )
{
}
static struct rcu_perf_ops rcu_ops = {
. ptype = RCU_FLAVOR ,
. init = rcu_sync_perf_init ,
. readlock = rcu_perf_read_lock ,
. readunlock = rcu_perf_read_unlock ,
2018-04-27 11:39:34 -07:00
. get_gp_seq = rcu_get_gp_seq ,
2018-05-15 15:24:41 -07:00
. gp_diff = rcu_seq_diff ,
2015-12-31 18:33:22 -08:00
. exp_completed = rcu_exp_batches_completed ,
2017-04-17 12:47:10 -07:00
. async = call_rcu ,
. gp_barrier = rcu_barrier ,
2015-12-31 18:33:22 -08:00
. sync = synchronize_rcu ,
. exp_sync = synchronize_rcu_expedited ,
. name = " rcu "
} ;
/*
* Definitions for srcu perf testing .
*/
DEFINE_STATIC_SRCU ( srcu_ctl_perf ) ;
static struct srcu_struct * srcu_ctlp = & srcu_ctl_perf ;
static int srcu_perf_read_lock ( void ) __acquires ( srcu_ctlp )
{
return srcu_read_lock ( srcu_ctlp ) ;
}
static void srcu_perf_read_unlock ( int idx ) __releases ( srcu_ctlp )
{
srcu_read_unlock ( srcu_ctlp , idx ) ;
}
static unsigned long srcu_perf_completed ( void )
{
return srcu_batches_completed ( srcu_ctlp ) ;
}
2017-04-17 12:47:10 -07:00
static void srcu_call_rcu ( struct rcu_head * head , rcu_callback_t func )
{
call_srcu ( srcu_ctlp , head , func ) ;
}
static void srcu_rcu_barrier ( void )
{
srcu_barrier ( srcu_ctlp ) ;
}
2015-12-31 18:33:22 -08:00
static void srcu_perf_synchronize ( void )
{
synchronize_srcu ( srcu_ctlp ) ;
}
static void srcu_perf_synchronize_expedited ( void )
{
synchronize_srcu_expedited ( srcu_ctlp ) ;
}
static struct rcu_perf_ops srcu_ops = {
. ptype = SRCU_FLAVOR ,
. init = rcu_sync_perf_init ,
. readlock = srcu_perf_read_lock ,
. readunlock = srcu_perf_read_unlock ,
2018-04-27 11:39:34 -07:00
. get_gp_seq = srcu_perf_completed ,
2018-05-15 15:24:41 -07:00
. gp_diff = rcu_seq_diff ,
2015-12-31 18:33:22 -08:00
. exp_completed = srcu_perf_completed ,
2017-04-17 12:47:10 -07:00
. async = srcu_call_rcu ,
. gp_barrier = srcu_rcu_barrier ,
2015-12-31 18:33:22 -08:00
. sync = srcu_perf_synchronize ,
. exp_sync = srcu_perf_synchronize_expedited ,
. name = " srcu "
} ;
2017-04-19 13:43:21 -07:00
static struct srcu_struct srcud ;
static void srcu_sync_perf_init ( void )
{
srcu_ctlp = & srcud ;
init_srcu_struct ( srcu_ctlp ) ;
}
static void srcu_sync_perf_cleanup ( void )
{
cleanup_srcu_struct ( srcu_ctlp ) ;
}
static struct rcu_perf_ops srcud_ops = {
. ptype = SRCU_FLAVOR ,
. init = srcu_sync_perf_init ,
. cleanup = srcu_sync_perf_cleanup ,
. readlock = srcu_perf_read_lock ,
. readunlock = srcu_perf_read_unlock ,
2018-04-27 11:39:34 -07:00
. get_gp_seq = srcu_perf_completed ,
2018-05-15 15:24:41 -07:00
. gp_diff = rcu_seq_diff ,
2017-04-19 13:43:21 -07:00
. exp_completed = srcu_perf_completed ,
. async = srcu_call_rcu ,
. gp_barrier = srcu_rcu_barrier ,
. sync = srcu_perf_synchronize ,
. exp_sync = srcu_perf_synchronize_expedited ,
. name = " srcud "
} ;
2015-12-31 18:33:22 -08:00
/*
* Definitions for RCU - tasks perf testing .
*/
static int tasks_perf_read_lock ( void )
{
return 0 ;
}
static void tasks_perf_read_unlock ( int idx )
{
}
static struct rcu_perf_ops tasks_ops = {
. ptype = RCU_TASKS_FLAVOR ,
. init = rcu_sync_perf_init ,
. readlock = tasks_perf_read_lock ,
. readunlock = tasks_perf_read_unlock ,
2018-04-27 11:39:34 -07:00
. get_gp_seq = rcu_no_completed ,
2018-05-15 15:24:41 -07:00
. gp_diff = rcu_seq_diff ,
2017-04-17 12:47:10 -07:00
. async = call_rcu_tasks ,
. gp_barrier = rcu_barrier_tasks ,
2015-12-31 18:33:22 -08:00
. sync = synchronize_rcu_tasks ,
. exp_sync = synchronize_rcu_tasks ,
. name = " tasks "
} ;
2018-05-15 15:24:41 -07:00
static unsigned long rcuperf_seq_diff ( unsigned long new , unsigned long old )
{
if ( ! cur_ops - > gp_diff )
return new - old ;
return cur_ops - > gp_diff ( new , old ) ;
}
2015-12-31 18:33:22 -08:00
/*
* If performance tests complete , wait for shutdown to commence .
*/
static void rcu_perf_wait_shutdown ( void )
{
2018-03-02 16:35:27 -08:00
cond_resched_tasks_rcu_qs ( ) ;
2015-12-31 18:33:22 -08:00
if ( atomic_read ( & n_rcu_perf_writer_finished ) < nrealwriters )
return ;
while ( ! torture_must_stop ( ) )
schedule_timeout_uninterruptible ( 1 ) ;
}
/*
* RCU perf reader kthread . Repeatedly does empty RCU read - side
* critical section , minimizing update - side interference .
*/
static int
rcu_perf_reader ( void * arg )
{
unsigned long flags ;
int idx ;
2016-01-12 14:15:40 -08:00
long me = ( long ) arg ;
2015-12-31 18:33:22 -08:00
VERBOSE_PERFOUT_STRING ( " rcu_perf_reader task started " ) ;
2016-01-12 14:15:40 -08:00
set_cpus_allowed_ptr ( current , cpumask_of ( me % nr_cpu_ids ) ) ;
2015-12-31 18:33:22 -08:00
set_user_nice ( current , MAX_NICE ) ;
atomic_inc ( & n_rcu_perf_reader_started ) ;
do {
local_irq_save ( flags ) ;
idx = cur_ops - > readlock ( ) ;
cur_ops - > readunlock ( idx ) ;
local_irq_restore ( flags ) ;
rcu_perf_wait_shutdown ( ) ;
} while ( ! torture_must_stop ( ) ) ;
torture_kthread_stopping ( " rcu_perf_reader " ) ;
return 0 ;
}
2017-04-17 12:47:10 -07:00
/*
* Callback function for asynchronous grace periods from rcu_perf_writer ( ) .
*/
static void rcu_perf_async_cb ( struct rcu_head * rhp )
{
atomic_dec ( this_cpu_ptr ( & n_async_inflight ) ) ;
kfree ( rhp ) ;
}
2015-12-31 18:33:22 -08:00
/*
* RCU perf writer kthread . Repeatedly does a grace period .
*/
static int
rcu_perf_writer ( void * arg )
{
int i = 0 ;
int i_max ;
long me = ( long ) arg ;
2017-04-17 12:47:10 -07:00
struct rcu_head * rhp = NULL ;
2016-01-12 15:17:21 -08:00
struct sched_param sp ;
2015-12-31 18:33:22 -08:00
bool started = false , done = false , alldone = false ;
u64 t ;
u64 * wdp ;
u64 * wdpp = writer_durations [ me ] ;
VERBOSE_PERFOUT_STRING ( " rcu_perf_writer task started " ) ;
WARN_ON ( ! wdpp ) ;
2016-01-12 14:15:40 -08:00
set_cpus_allowed_ptr ( current , cpumask_of ( me % nr_cpu_ids ) ) ;
2016-01-12 15:17:21 -08:00
sp . sched_priority = 1 ;
sched_setscheduler_nocheck ( current , SCHED_FIFO , & sp ) ;
2016-01-30 20:56:38 -08:00
if ( holdoff )
schedule_timeout_uninterruptible ( holdoff * HZ ) ;
2019-07-04 00:34:30 -04:00
/*
* Wait until rcu_end_inkernel_boot ( ) is called for normal GP tests
* so that RCU is not always expedited for normal GP tests .
* The system_state test is approximate , but works well in practice .
*/
while ( ! gp_exp & & system_state ! = SYSTEM_RUNNING )
schedule_timeout_uninterruptible ( 1 ) ;
2015-12-31 18:33:22 -08:00
t = ktime_get_mono_fast_ns ( ) ;
if ( atomic_inc_return ( & n_rcu_perf_writer_started ) > = nrealwriters ) {
t_rcu_perf_writer_started = t ;
if ( gp_exp ) {
b_rcu_perf_writer_started =
cur_ops - > exp_completed ( ) / 2 ;
} else {
2018-04-27 11:39:34 -07:00
b_rcu_perf_writer_started = cur_ops - > get_gp_seq ( ) ;
2015-12-31 18:33:22 -08:00
}
}
do {
2017-04-25 15:12:56 -07:00
if ( writer_holdoff )
udelay ( writer_holdoff ) ;
2015-12-31 18:33:22 -08:00
wdp = & wdpp [ i ] ;
* wdp = ktime_get_mono_fast_ns ( ) ;
2017-04-17 12:47:10 -07:00
if ( gp_async ) {
retry :
if ( ! rhp )
rhp = kmalloc ( sizeof ( * rhp ) , GFP_KERNEL ) ;
if ( rhp & & atomic_read ( this_cpu_ptr ( & n_async_inflight ) ) < gp_async_max ) {
atomic_inc ( this_cpu_ptr ( & n_async_inflight ) ) ;
cur_ops - > async ( rhp , rcu_perf_async_cb ) ;
rhp = NULL ;
} else if ( ! kthread_should_stop ( ) ) {
cur_ops - > gp_barrier ( ) ;
goto retry ;
} else {
kfree ( rhp ) ; /* Because we are stopping. */
}
} else if ( gp_exp ) {
2015-12-31 18:33:22 -08:00
cur_ops - > exp_sync ( ) ;
} else {
cur_ops - > sync ( ) ;
}
t = ktime_get_mono_fast_ns ( ) ;
* wdp = t - * wdp ;
i_max = i ;
if ( ! started & &
atomic_read ( & n_rcu_perf_writer_started ) > = nrealwriters )
started = true ;
if ( ! done & & i > = MIN_MEAS ) {
done = true ;
2016-01-30 21:32:09 -08:00
sp . sched_priority = 0 ;
sched_setscheduler_nocheck ( current ,
SCHED_NORMAL , & sp ) ;
2016-08-21 16:54:39 +09:00
pr_alert ( " %s%s rcu_perf_writer %ld has %d measurements \n " ,
perf_type , PERF_FLAG , me , MIN_MEAS ) ;
2015-12-31 18:33:22 -08:00
if ( atomic_inc_return ( & n_rcu_perf_writer_finished ) > =
nrealwriters ) {
2016-01-30 21:32:09 -08:00
schedule_timeout_interruptible ( 10 ) ;
2016-01-29 14:58:17 -08:00
rcu_ftrace_dump ( DUMP_ALL ) ;
2015-12-31 18:33:22 -08:00
PERFOUT_STRING ( " Test complete " ) ;
t_rcu_perf_writer_finished = t ;
if ( gp_exp ) {
b_rcu_perf_writer_finished =
cur_ops - > exp_completed ( ) / 2 ;
} else {
b_rcu_perf_writer_finished =
2018-04-27 11:39:34 -07:00
cur_ops - > get_gp_seq ( ) ;
2015-12-31 18:33:22 -08:00
}
2016-02-07 13:31:39 +01:00
if ( shutdown ) {
smp_mb ( ) ; /* Assign before wake. */
wake_up ( & shutdown_wq ) ;
}
2015-12-31 18:33:22 -08:00
}
}
if ( done & & ! alldone & &
atomic_read ( & n_rcu_perf_writer_finished ) > = nrealwriters )
alldone = true ;
if ( started & & ! alldone & & i < MAX_MEAS - 1 )
i + + ;
rcu_perf_wait_shutdown ( ) ;
} while ( ! torture_must_stop ( ) ) ;
2017-04-17 12:47:10 -07:00
if ( gp_async ) {
cur_ops - > gp_barrier ( ) ;
}
2015-12-31 18:33:22 -08:00
writer_n_durations [ me ] = i_max ;
torture_kthread_stopping ( " rcu_perf_writer " ) ;
return 0 ;
}
2018-05-17 11:33:17 -07:00
static void
2015-12-31 18:33:22 -08:00
rcu_perf_print_module_parms ( struct rcu_perf_ops * cur_ops , const char * tag )
{
pr_alert ( " %s " PERF_FLAG
" --- %s: nreaders=%d nwriters=%d verbose=%d shutdown=%d \n " ,
perf_type , tag , nrealreaders , nrealwriters , verbose , shutdown ) ;
}
static void
rcu_perf_cleanup ( void )
{
int i ;
int j ;
int ngps = 0 ;
u64 * wdp ;
u64 * wdpp ;
2017-04-14 16:12:52 -07:00
/*
* Would like warning at start , but everything is expedited
* during the mid - boot phase , so have to wait till the end .
*/
if ( rcu_gp_is_expedited ( ) & & ! rcu_gp_is_normal ( ) & & ! gp_exp )
VERBOSE_PERFOUT_ERRSTRING ( " All grace periods expedited, no normal ones to measure! " ) ;
if ( rcu_gp_is_normal ( ) & & gp_exp )
VERBOSE_PERFOUT_ERRSTRING ( " All grace periods normal, no expedited ones to measure! " ) ;
2017-04-17 12:47:10 -07:00
if ( gp_exp & & gp_async )
VERBOSE_PERFOUT_ERRSTRING ( " No expedited async GPs, so went with async! " ) ;
2017-04-14 16:12:52 -07:00
2015-12-31 18:33:22 -08:00
if ( torture_cleanup_begin ( ) )
return ;
2019-03-21 10:26:41 -07:00
if ( ! cur_ops ) {
torture_cleanup_end ( ) ;
return ;
}
2015-12-31 18:33:22 -08:00
if ( reader_tasks ) {
for ( i = 0 ; i < nrealreaders ; i + + )
torture_stop_kthread ( rcu_perf_reader ,
reader_tasks [ i ] ) ;
kfree ( reader_tasks ) ;
}
if ( writer_tasks ) {
for ( i = 0 ; i < nrealwriters ; i + + ) {
torture_stop_kthread ( rcu_perf_writer ,
writer_tasks [ i ] ) ;
if ( ! writer_n_durations )
continue ;
j = writer_n_durations [ i ] ;
pr_alert ( " %s%s writer %d gps: %d \n " ,
perf_type , PERF_FLAG , i , j ) ;
ngps + = j ;
}
pr_alert ( " %s%s start: %llu end: %llu duration: %llu gps: %d batches: %ld \n " ,
perf_type , PERF_FLAG ,
t_rcu_perf_writer_started , t_rcu_perf_writer_finished ,
t_rcu_perf_writer_finished -
t_rcu_perf_writer_started ,
ngps ,
2018-05-15 15:24:41 -07:00
rcuperf_seq_diff ( b_rcu_perf_writer_finished ,
b_rcu_perf_writer_started ) ) ;
2015-12-31 18:33:22 -08:00
for ( i = 0 ; i < nrealwriters ; i + + ) {
if ( ! writer_durations )
break ;
if ( ! writer_n_durations )
continue ;
wdpp = writer_durations [ i ] ;
if ( ! wdpp )
continue ;
for ( j = 0 ; j < = writer_n_durations [ i ] ; j + + ) {
wdp = & wdpp [ j ] ;
pr_alert ( " %s%s %4d writer-duration: %5d %llu \n " ,
perf_type , PERF_FLAG ,
i , j , * wdp ) ;
if ( j % 100 = = 0 )
schedule_timeout_uninterruptible ( 1 ) ;
}
kfree ( writer_durations [ i ] ) ;
}
kfree ( writer_tasks ) ;
kfree ( writer_durations ) ;
kfree ( writer_n_durations ) ;
}
2018-07-07 18:25:10 -07:00
/* Do torture-type-specific cleanup operations. */
2015-12-31 18:33:22 -08:00
if ( cur_ops - > cleanup ! = NULL )
cur_ops - > cleanup ( ) ;
torture_cleanup_end ( ) ;
}
/*
* Return the number if non - negative . If - 1 , the number of CPUs .
* If less than - 1 , that much less than the number of CPUs , but
* at least one .
*/
static int compute_real ( int n )
{
int nr ;
if ( n > = 0 ) {
nr = n ;
} else {
nr = num_online_cpus ( ) + 1 + n ;
if ( nr < = 0 )
nr = 1 ;
}
return nr ;
}
/*
* RCU perf shutdown kthread . Just waits to be awakened , then shuts
* down system .
*/
static int
rcu_perf_shutdown ( void * arg )
{
do {
wait_event ( shutdown_wq ,
atomic_read ( & n_rcu_perf_writer_finished ) > =
nrealwriters ) ;
} while ( atomic_read ( & n_rcu_perf_writer_finished ) < nrealwriters ) ;
smp_mb ( ) ; /* Wake before output. */
rcu_perf_cleanup ( ) ;
kernel_power_off ( ) ;
return - EINVAL ;
}
static int __init
rcu_perf_init ( void )
{
long i ;
int firsterr = 0 ;
static struct rcu_perf_ops * perf_ops [ ] = {
2018-07-07 18:25:10 -07:00
& rcu_ops , & srcu_ops , & srcud_ops , & tasks_ops ,
2015-12-31 18:33:22 -08:00
} ;
2017-11-21 20:19:17 -08:00
if ( ! torture_init_begin ( perf_type , verbose ) )
2015-12-31 18:33:22 -08:00
return - EBUSY ;
/* Process args and tell the world that the perf'er is on the job. */
for ( i = 0 ; i < ARRAY_SIZE ( perf_ops ) ; i + + ) {
cur_ops = perf_ops [ i ] ;
if ( strcmp ( perf_type , cur_ops - > name ) = = 0 )
break ;
}
if ( i = = ARRAY_SIZE ( perf_ops ) ) {
2018-05-14 13:27:33 -07:00
pr_alert ( " rcu-perf: invalid perf type: \" %s \" \n " , perf_type ) ;
2015-12-31 18:33:22 -08:00
pr_alert ( " rcu-perf types: " ) ;
for ( i = 0 ; i < ARRAY_SIZE ( perf_ops ) ; i + + )
2018-05-14 13:27:33 -07:00
pr_cont ( " %s " , perf_ops [ i ] - > name ) ;
pr_cont ( " \n " ) ;
2018-07-07 18:26:50 -07:00
WARN_ON ( ! IS_MODULE ( CONFIG_RCU_PERF_TEST ) ) ;
2015-12-31 18:33:22 -08:00
firsterr = - EINVAL ;
2019-03-21 10:26:41 -07:00
cur_ops = NULL ;
2015-12-31 18:33:22 -08:00
goto unwind ;
}
if ( cur_ops - > init )
cur_ops - > init ( ) ;
nrealwriters = compute_real ( nwriters ) ;
nrealreaders = compute_real ( nreaders ) ;
atomic_set ( & n_rcu_perf_reader_started , 0 ) ;
atomic_set ( & n_rcu_perf_writer_started , 0 ) ;
atomic_set ( & n_rcu_perf_writer_finished , 0 ) ;
rcu_perf_print_module_parms ( cur_ops , " Start of test " ) ;
/* Start up the kthreads. */
if ( shutdown ) {
init_waitqueue_head ( & shutdown_wq ) ;
firsterr = torture_create_kthread ( rcu_perf_shutdown , NULL ,
shutdown_task ) ;
if ( firsterr )
goto unwind ;
schedule_timeout_uninterruptible ( 1 ) ;
}
reader_tasks = kcalloc ( nrealreaders , sizeof ( reader_tasks [ 0 ] ) ,
GFP_KERNEL ) ;
if ( reader_tasks = = NULL ) {
VERBOSE_PERFOUT_ERRSTRING ( " out of memory " ) ;
firsterr = - ENOMEM ;
goto unwind ;
}
for ( i = 0 ; i < nrealreaders ; i + + ) {
2016-01-12 14:15:40 -08:00
firsterr = torture_create_kthread ( rcu_perf_reader , ( void * ) i ,
2015-12-31 18:33:22 -08:00
reader_tasks [ i ] ) ;
if ( firsterr )
goto unwind ;
}
while ( atomic_read ( & n_rcu_perf_reader_started ) < nrealreaders )
schedule_timeout_uninterruptible ( 1 ) ;
writer_tasks = kcalloc ( nrealwriters , sizeof ( reader_tasks [ 0 ] ) ,
GFP_KERNEL ) ;
writer_durations = kcalloc ( nrealwriters , sizeof ( * writer_durations ) ,
GFP_KERNEL ) ;
writer_n_durations =
kcalloc ( nrealwriters , sizeof ( * writer_n_durations ) ,
GFP_KERNEL ) ;
if ( ! writer_tasks | | ! writer_durations | | ! writer_n_durations ) {
VERBOSE_PERFOUT_ERRSTRING ( " out of memory " ) ;
firsterr = - ENOMEM ;
goto unwind ;
}
for ( i = 0 ; i < nrealwriters ; i + + ) {
writer_durations [ i ] =
kcalloc ( MAX_MEAS , sizeof ( * writer_durations [ i ] ) ,
GFP_KERNEL ) ;
2016-06-13 15:20:39 +00:00
if ( ! writer_durations [ i ] ) {
firsterr = - ENOMEM ;
2015-12-31 18:33:22 -08:00
goto unwind ;
2016-06-13 15:20:39 +00:00
}
2015-12-31 18:33:22 -08:00
firsterr = torture_create_kthread ( rcu_perf_writer , ( void * ) i ,
writer_tasks [ i ] ) ;
if ( firsterr )
goto unwind ;
}
torture_init_end ( ) ;
return 0 ;
unwind :
torture_init_end ( ) ;
rcu_perf_cleanup ( ) ;
return firsterr ;
}
module_init ( rcu_perf_init ) ;
module_exit ( rcu_perf_cleanup ) ;