2016-01-01 05:33:22 +03:00
/*
* Read - Copy Update module - based performance - test facility
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation ; either version 2 of the License , or
* ( at your option ) any later version .
*
* This program is distributed in the hope that it will be useful ,
* but WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the
* GNU General Public License for more details .
*
* You should have received a copy of the GNU General Public License
* along with this program ; if not , you can access it online at
* http : //www.gnu.org/licenses/gpl-2.0.html.
*
* Copyright ( C ) IBM Corporation , 2015
*
* Authors : Paul E . McKenney < paulmck @ us . ibm . com >
*/
# include <linux/types.h>
# include <linux/kernel.h>
# include <linux/init.h>
# include <linux/module.h>
# include <linux/kthread.h>
# include <linux/err.h>
# include <linux/spinlock.h>
# include <linux/smp.h>
# include <linux/rcupdate.h>
# include <linux/interrupt.h>
# include <linux/sched.h>
2017-02-01 20:07:51 +03:00
# include <uapi/linux/sched/types.h>
2016-01-01 05:33:22 +03:00
# include <linux/atomic.h>
# include <linux/bitops.h>
# include <linux/completion.h>
# include <linux/moduleparam.h>
# include <linux/percpu.h>
# include <linux/notifier.h>
# include <linux/reboot.h>
# include <linux/freezer.h>
# include <linux/cpu.h>
# include <linux/delay.h>
# include <linux/stat.h>
# include <linux/srcu.h>
# include <linux/slab.h>
# include <asm/byteorder.h>
# include <linux/torture.h>
# include <linux/vmalloc.h>
2017-05-03 19:51:55 +03:00
# include "rcu.h"
2016-01-01 05:33:22 +03:00
MODULE_LICENSE ( " GPL " ) ;
MODULE_AUTHOR ( " Paul E. McKenney <paulmck@linux.vnet.ibm.com> " ) ;
# define PERF_FLAG "-perf:"
# define PERFOUT_STRING(s) \
2016-08-21 10:54:39 +03:00
pr_alert ( " %s " PERF_FLAG " %s \n " , perf_type , s )
2016-01-01 05:33:22 +03:00
# define VERBOSE_PERFOUT_STRING(s) \
do { if ( verbose ) pr_alert ( " %s " PERF_FLAG " %s \n " , perf_type , s ) ; } while ( 0 )
# define VERBOSE_PERFOUT_ERRSTRING(s) \
do { if ( verbose ) pr_alert ( " %s " PERF_FLAG " !!! %s \n " , perf_type , s ) ; } while ( 0 )
2018-02-02 06:19:04 +03:00
/*
* The intended use cases for the nreaders and nwriters module parameters
* are as follows :
*
* 1. Specify only the nr_cpus kernel boot parameter . This will
* set both nreaders and nwriters to the value specified by
* nr_cpus for a mixed reader / writer test .
*
* 2. Specify the nr_cpus kernel boot parameter , but set
* rcuperf . nreaders to zero . This will set nwriters to the
* value specified by nr_cpus for an update - only test .
*
* 3. Specify the nr_cpus kernel boot parameter , but set
* rcuperf . nwriters to zero . This will set nreaders to the
* value specified by nr_cpus for a read - only test .
*
* Various other use cases may of course be specified .
*/
2017-04-17 22:47:10 +03:00
torture_param ( bool , gp_async , false , " Use asynchronous GP wait primitives " ) ;
torture_param ( int , gp_async_max , 1000 , " Max # outstanding waits per reader " ) ;
2016-05-25 04:25:33 +03:00
torture_param ( bool , gp_exp , false , " Use expedited GP wait primitives " ) ;
2016-01-31 07:56:38 +03:00
torture_param ( int , holdoff , 10 , " Holdoff time before test start (s) " ) ;
2018-02-02 06:19:04 +03:00
torture_param ( int , nreaders , - 1 , " Number of RCU reader threads " ) ;
2016-01-01 05:33:22 +03:00
torture_param ( int , nwriters , - 1 , " Number of RCU updater threads " ) ;
2017-04-22 02:09:15 +03:00
torture_param ( bool , shutdown , ! IS_ENABLED ( MODULE ) ,
" Shutdown at end of performance tests. " ) ;
2016-01-01 05:33:22 +03:00
torture_param ( bool , verbose , true , " Enable verbose debugging printk()s " ) ;
2017-04-26 01:12:56 +03:00
torture_param ( int , writer_holdoff , 0 , " Holdoff (us) between GPs, zero to disable " ) ;
2016-01-01 05:33:22 +03:00
static char * perf_type = " rcu " ;
module_param ( perf_type , charp , 0444 ) ;
MODULE_PARM_DESC ( perf_type , " Type of RCU to performance-test (rcu, rcu_bh, ...) " ) ;
static int nrealreaders ;
static int nrealwriters ;
static struct task_struct * * writer_tasks ;
static struct task_struct * * reader_tasks ;
static struct task_struct * shutdown_task ;
static u64 * * writer_durations ;
static int * writer_n_durations ;
static atomic_t n_rcu_perf_reader_started ;
static atomic_t n_rcu_perf_writer_started ;
static atomic_t n_rcu_perf_writer_finished ;
static wait_queue_head_t shutdown_wq ;
static u64 t_rcu_perf_writer_started ;
static u64 t_rcu_perf_writer_finished ;
static unsigned long b_rcu_perf_writer_started ;
static unsigned long b_rcu_perf_writer_finished ;
2017-04-17 22:47:10 +03:00
static DEFINE_PER_CPU ( atomic_t , n_async_inflight ) ;
2016-01-01 05:33:22 +03:00
static int rcu_perf_writer_state ;
# define RTWS_INIT 0
2017-04-17 22:47:10 +03:00
# define RTWS_ASYNC 1
# define RTWS_BARRIER 2
# define RTWS_EXP_SYNC 3
# define RTWS_SYNC 4
# define RTWS_IDLE 5
# define RTWS_STOPPING 6
2016-01-01 05:33:22 +03:00
# define MAX_MEAS 10000
# define MIN_MEAS 100
/*
* Operations vector for selecting different types of tests .
*/
struct rcu_perf_ops {
int ptype ;
void ( * init ) ( void ) ;
void ( * cleanup ) ( void ) ;
int ( * readlock ) ( void ) ;
void ( * readunlock ) ( int idx ) ;
unsigned long ( * started ) ( void ) ;
unsigned long ( * completed ) ( void ) ;
unsigned long ( * exp_completed ) ( void ) ;
2017-04-17 22:47:10 +03:00
void ( * async ) ( struct rcu_head * head , rcu_callback_t func ) ;
void ( * gp_barrier ) ( void ) ;
2016-01-01 05:33:22 +03:00
void ( * sync ) ( void ) ;
void ( * exp_sync ) ( void ) ;
const char * name ;
} ;
static struct rcu_perf_ops * cur_ops ;
/*
* Definitions for rcu perf testing .
*/
static int rcu_perf_read_lock ( void ) __acquires ( RCU )
{
rcu_read_lock ( ) ;
return 0 ;
}
static void rcu_perf_read_unlock ( int idx ) __releases ( RCU )
{
rcu_read_unlock ( ) ;
}
static unsigned long __maybe_unused rcu_no_completed ( void )
{
return 0 ;
}
static void rcu_sync_perf_init ( void )
{
}
static struct rcu_perf_ops rcu_ops = {
. ptype = RCU_FLAVOR ,
. init = rcu_sync_perf_init ,
. readlock = rcu_perf_read_lock ,
. readunlock = rcu_perf_read_unlock ,
. started = rcu_batches_started ,
. completed = rcu_batches_completed ,
. exp_completed = rcu_exp_batches_completed ,
2017-04-17 22:47:10 +03:00
. async = call_rcu ,
. gp_barrier = rcu_barrier ,
2016-01-01 05:33:22 +03:00
. sync = synchronize_rcu ,
. exp_sync = synchronize_rcu_expedited ,
. name = " rcu "
} ;
/*
* Definitions for rcu_bh perf testing .
*/
static int rcu_bh_perf_read_lock ( void ) __acquires ( RCU_BH )
{
rcu_read_lock_bh ( ) ;
return 0 ;
}
static void rcu_bh_perf_read_unlock ( int idx ) __releases ( RCU_BH )
{
rcu_read_unlock_bh ( ) ;
}
static struct rcu_perf_ops rcu_bh_ops = {
. ptype = RCU_BH_FLAVOR ,
. init = rcu_sync_perf_init ,
. readlock = rcu_bh_perf_read_lock ,
. readunlock = rcu_bh_perf_read_unlock ,
. started = rcu_batches_started_bh ,
. completed = rcu_batches_completed_bh ,
. exp_completed = rcu_exp_batches_completed_sched ,
2017-04-17 22:47:10 +03:00
. async = call_rcu_bh ,
. gp_barrier = rcu_barrier_bh ,
2016-01-01 05:33:22 +03:00
. sync = synchronize_rcu_bh ,
. exp_sync = synchronize_rcu_bh_expedited ,
. name = " rcu_bh "
} ;
/*
* Definitions for srcu perf testing .
*/
DEFINE_STATIC_SRCU ( srcu_ctl_perf ) ;
static struct srcu_struct * srcu_ctlp = & srcu_ctl_perf ;
static int srcu_perf_read_lock ( void ) __acquires ( srcu_ctlp )
{
return srcu_read_lock ( srcu_ctlp ) ;
}
static void srcu_perf_read_unlock ( int idx ) __releases ( srcu_ctlp )
{
srcu_read_unlock ( srcu_ctlp , idx ) ;
}
static unsigned long srcu_perf_completed ( void )
{
return srcu_batches_completed ( srcu_ctlp ) ;
}
2017-04-17 22:47:10 +03:00
static void srcu_call_rcu ( struct rcu_head * head , rcu_callback_t func )
{
call_srcu ( srcu_ctlp , head , func ) ;
}
static void srcu_rcu_barrier ( void )
{
srcu_barrier ( srcu_ctlp ) ;
}
2016-01-01 05:33:22 +03:00
static void srcu_perf_synchronize ( void )
{
synchronize_srcu ( srcu_ctlp ) ;
}
static void srcu_perf_synchronize_expedited ( void )
{
synchronize_srcu_expedited ( srcu_ctlp ) ;
}
static struct rcu_perf_ops srcu_ops = {
. ptype = SRCU_FLAVOR ,
. init = rcu_sync_perf_init ,
. readlock = srcu_perf_read_lock ,
. readunlock = srcu_perf_read_unlock ,
. started = NULL ,
. completed = srcu_perf_completed ,
. exp_completed = srcu_perf_completed ,
2017-04-17 22:47:10 +03:00
. async = srcu_call_rcu ,
. gp_barrier = srcu_rcu_barrier ,
2016-01-01 05:33:22 +03:00
. sync = srcu_perf_synchronize ,
. exp_sync = srcu_perf_synchronize_expedited ,
. name = " srcu "
} ;
2017-04-19 23:43:21 +03:00
static struct srcu_struct srcud ;
static void srcu_sync_perf_init ( void )
{
srcu_ctlp = & srcud ;
init_srcu_struct ( srcu_ctlp ) ;
}
static void srcu_sync_perf_cleanup ( void )
{
cleanup_srcu_struct ( srcu_ctlp ) ;
}
static struct rcu_perf_ops srcud_ops = {
. ptype = SRCU_FLAVOR ,
. init = srcu_sync_perf_init ,
. cleanup = srcu_sync_perf_cleanup ,
. readlock = srcu_perf_read_lock ,
. readunlock = srcu_perf_read_unlock ,
. started = NULL ,
. completed = srcu_perf_completed ,
. exp_completed = srcu_perf_completed ,
. async = srcu_call_rcu ,
. gp_barrier = srcu_rcu_barrier ,
. sync = srcu_perf_synchronize ,
. exp_sync = srcu_perf_synchronize_expedited ,
. name = " srcud "
} ;
2016-01-01 05:33:22 +03:00
/*
* Definitions for sched perf testing .
*/
static int sched_perf_read_lock ( void )
{
preempt_disable ( ) ;
return 0 ;
}
static void sched_perf_read_unlock ( int idx )
{
preempt_enable ( ) ;
}
static struct rcu_perf_ops sched_ops = {
. ptype = RCU_SCHED_FLAVOR ,
. init = rcu_sync_perf_init ,
. readlock = sched_perf_read_lock ,
. readunlock = sched_perf_read_unlock ,
. started = rcu_batches_started_sched ,
. completed = rcu_batches_completed_sched ,
. exp_completed = rcu_exp_batches_completed_sched ,
2017-04-17 22:47:10 +03:00
. async = call_rcu_sched ,
. gp_barrier = rcu_barrier_sched ,
2016-01-01 05:33:22 +03:00
. sync = synchronize_sched ,
. exp_sync = synchronize_sched_expedited ,
. name = " sched "
} ;
/*
* Definitions for RCU - tasks perf testing .
*/
static int tasks_perf_read_lock ( void )
{
return 0 ;
}
static void tasks_perf_read_unlock ( int idx )
{
}
static struct rcu_perf_ops tasks_ops = {
. ptype = RCU_TASKS_FLAVOR ,
. init = rcu_sync_perf_init ,
. readlock = tasks_perf_read_lock ,
. readunlock = tasks_perf_read_unlock ,
. started = rcu_no_completed ,
. completed = rcu_no_completed ,
2017-04-17 22:47:10 +03:00
. async = call_rcu_tasks ,
. gp_barrier = rcu_barrier_tasks ,
2016-01-01 05:33:22 +03:00
. sync = synchronize_rcu_tasks ,
. exp_sync = synchronize_rcu_tasks ,
. name = " tasks "
} ;
static bool __maybe_unused torturing_tasks ( void )
{
return cur_ops = = & tasks_ops ;
}
/*
* If performance tests complete , wait for shutdown to commence .
*/
static void rcu_perf_wait_shutdown ( void )
{
2018-03-03 03:35:27 +03:00
cond_resched_tasks_rcu_qs ( ) ;
2016-01-01 05:33:22 +03:00
if ( atomic_read ( & n_rcu_perf_writer_finished ) < nrealwriters )
return ;
while ( ! torture_must_stop ( ) )
schedule_timeout_uninterruptible ( 1 ) ;
}
/*
* RCU perf reader kthread . Repeatedly does empty RCU read - side
* critical section , minimizing update - side interference .
*/
static int
rcu_perf_reader ( void * arg )
{
unsigned long flags ;
int idx ;
2016-01-13 01:15:40 +03:00
long me = ( long ) arg ;
2016-01-01 05:33:22 +03:00
VERBOSE_PERFOUT_STRING ( " rcu_perf_reader task started " ) ;
2016-01-13 01:15:40 +03:00
set_cpus_allowed_ptr ( current , cpumask_of ( me % nr_cpu_ids ) ) ;
2016-01-01 05:33:22 +03:00
set_user_nice ( current , MAX_NICE ) ;
atomic_inc ( & n_rcu_perf_reader_started ) ;
do {
local_irq_save ( flags ) ;
idx = cur_ops - > readlock ( ) ;
cur_ops - > readunlock ( idx ) ;
local_irq_restore ( flags ) ;
rcu_perf_wait_shutdown ( ) ;
} while ( ! torture_must_stop ( ) ) ;
torture_kthread_stopping ( " rcu_perf_reader " ) ;
return 0 ;
}
2017-04-17 22:47:10 +03:00
/*
* Callback function for asynchronous grace periods from rcu_perf_writer ( ) .
*/
static void rcu_perf_async_cb ( struct rcu_head * rhp )
{
atomic_dec ( this_cpu_ptr ( & n_async_inflight ) ) ;
kfree ( rhp ) ;
}
2016-01-01 05:33:22 +03:00
/*
* RCU perf writer kthread . Repeatedly does a grace period .
*/
static int
rcu_perf_writer ( void * arg )
{
int i = 0 ;
int i_max ;
long me = ( long ) arg ;
2017-04-17 22:47:10 +03:00
struct rcu_head * rhp = NULL ;
2016-01-13 02:17:21 +03:00
struct sched_param sp ;
2016-01-01 05:33:22 +03:00
bool started = false , done = false , alldone = false ;
u64 t ;
u64 * wdp ;
u64 * wdpp = writer_durations [ me ] ;
VERBOSE_PERFOUT_STRING ( " rcu_perf_writer task started " ) ;
WARN_ON ( ! wdpp ) ;
2016-01-13 01:15:40 +03:00
set_cpus_allowed_ptr ( current , cpumask_of ( me % nr_cpu_ids ) ) ;
2016-01-13 02:17:21 +03:00
sp . sched_priority = 1 ;
sched_setscheduler_nocheck ( current , SCHED_FIFO , & sp ) ;
2016-01-31 07:56:38 +03:00
if ( holdoff )
schedule_timeout_uninterruptible ( holdoff * HZ ) ;
2016-01-01 05:33:22 +03:00
t = ktime_get_mono_fast_ns ( ) ;
if ( atomic_inc_return ( & n_rcu_perf_writer_started ) > = nrealwriters ) {
t_rcu_perf_writer_started = t ;
if ( gp_exp ) {
b_rcu_perf_writer_started =
cur_ops - > exp_completed ( ) / 2 ;
} else {
b_rcu_perf_writer_started =
cur_ops - > completed ( ) ;
}
}
do {
2017-04-26 01:12:56 +03:00
if ( writer_holdoff )
udelay ( writer_holdoff ) ;
2016-01-01 05:33:22 +03:00
wdp = & wdpp [ i ] ;
* wdp = ktime_get_mono_fast_ns ( ) ;
2017-04-17 22:47:10 +03:00
if ( gp_async ) {
retry :
if ( ! rhp )
rhp = kmalloc ( sizeof ( * rhp ) , GFP_KERNEL ) ;
if ( rhp & & atomic_read ( this_cpu_ptr ( & n_async_inflight ) ) < gp_async_max ) {
rcu_perf_writer_state = RTWS_ASYNC ;
atomic_inc ( this_cpu_ptr ( & n_async_inflight ) ) ;
cur_ops - > async ( rhp , rcu_perf_async_cb ) ;
rhp = NULL ;
} else if ( ! kthread_should_stop ( ) ) {
rcu_perf_writer_state = RTWS_BARRIER ;
cur_ops - > gp_barrier ( ) ;
goto retry ;
} else {
kfree ( rhp ) ; /* Because we are stopping. */
}
} else if ( gp_exp ) {
2016-01-01 05:33:22 +03:00
rcu_perf_writer_state = RTWS_EXP_SYNC ;
cur_ops - > exp_sync ( ) ;
} else {
rcu_perf_writer_state = RTWS_SYNC ;
cur_ops - > sync ( ) ;
}
rcu_perf_writer_state = RTWS_IDLE ;
t = ktime_get_mono_fast_ns ( ) ;
* wdp = t - * wdp ;
i_max = i ;
if ( ! started & &
atomic_read ( & n_rcu_perf_writer_started ) > = nrealwriters )
started = true ;
if ( ! done & & i > = MIN_MEAS ) {
done = true ;
2016-01-31 08:32:09 +03:00
sp . sched_priority = 0 ;
sched_setscheduler_nocheck ( current ,
SCHED_NORMAL , & sp ) ;
2016-08-21 10:54:39 +03:00
pr_alert ( " %s%s rcu_perf_writer %ld has %d measurements \n " ,
perf_type , PERF_FLAG , me , MIN_MEAS ) ;
2016-01-01 05:33:22 +03:00
if ( atomic_inc_return ( & n_rcu_perf_writer_finished ) > =
nrealwriters ) {
2016-01-31 08:32:09 +03:00
schedule_timeout_interruptible ( 10 ) ;
2016-01-30 01:58:17 +03:00
rcu_ftrace_dump ( DUMP_ALL ) ;
2016-01-01 05:33:22 +03:00
PERFOUT_STRING ( " Test complete " ) ;
t_rcu_perf_writer_finished = t ;
if ( gp_exp ) {
b_rcu_perf_writer_finished =
cur_ops - > exp_completed ( ) / 2 ;
} else {
b_rcu_perf_writer_finished =
cur_ops - > completed ( ) ;
}
2016-02-07 15:31:39 +03:00
if ( shutdown ) {
smp_mb ( ) ; /* Assign before wake. */
wake_up ( & shutdown_wq ) ;
}
2016-01-01 05:33:22 +03:00
}
}
if ( done & & ! alldone & &
atomic_read ( & n_rcu_perf_writer_finished ) > = nrealwriters )
alldone = true ;
if ( started & & ! alldone & & i < MAX_MEAS - 1 )
i + + ;
rcu_perf_wait_shutdown ( ) ;
} while ( ! torture_must_stop ( ) ) ;
2017-04-17 22:47:10 +03:00
if ( gp_async ) {
rcu_perf_writer_state = RTWS_BARRIER ;
cur_ops - > gp_barrier ( ) ;
}
2016-01-01 05:33:22 +03:00
rcu_perf_writer_state = RTWS_STOPPING ;
writer_n_durations [ me ] = i_max ;
torture_kthread_stopping ( " rcu_perf_writer " ) ;
return 0 ;
}
static inline void
rcu_perf_print_module_parms ( struct rcu_perf_ops * cur_ops , const char * tag )
{
pr_alert ( " %s " PERF_FLAG
" --- %s: nreaders=%d nwriters=%d verbose=%d shutdown=%d \n " ,
perf_type , tag , nrealreaders , nrealwriters , verbose , shutdown ) ;
}
static void
rcu_perf_cleanup ( void )
{
int i ;
int j ;
int ngps = 0 ;
u64 * wdp ;
u64 * wdpp ;
2017-04-15 02:12:52 +03:00
/*
* Would like warning at start , but everything is expedited
* during the mid - boot phase , so have to wait till the end .
*/
if ( rcu_gp_is_expedited ( ) & & ! rcu_gp_is_normal ( ) & & ! gp_exp )
VERBOSE_PERFOUT_ERRSTRING ( " All grace periods expedited, no normal ones to measure! " ) ;
if ( rcu_gp_is_normal ( ) & & gp_exp )
VERBOSE_PERFOUT_ERRSTRING ( " All grace periods normal, no expedited ones to measure! " ) ;
2017-04-17 22:47:10 +03:00
if ( gp_exp & & gp_async )
VERBOSE_PERFOUT_ERRSTRING ( " No expedited async GPs, so went with async! " ) ;
2017-04-15 02:12:52 +03:00
2016-01-01 05:33:22 +03:00
if ( torture_cleanup_begin ( ) )
return ;
if ( reader_tasks ) {
for ( i = 0 ; i < nrealreaders ; i + + )
torture_stop_kthread ( rcu_perf_reader ,
reader_tasks [ i ] ) ;
kfree ( reader_tasks ) ;
}
if ( writer_tasks ) {
for ( i = 0 ; i < nrealwriters ; i + + ) {
torture_stop_kthread ( rcu_perf_writer ,
writer_tasks [ i ] ) ;
if ( ! writer_n_durations )
continue ;
j = writer_n_durations [ i ] ;
pr_alert ( " %s%s writer %d gps: %d \n " ,
perf_type , PERF_FLAG , i , j ) ;
ngps + = j ;
}
pr_alert ( " %s%s start: %llu end: %llu duration: %llu gps: %d batches: %ld \n " ,
perf_type , PERF_FLAG ,
t_rcu_perf_writer_started , t_rcu_perf_writer_finished ,
t_rcu_perf_writer_finished -
t_rcu_perf_writer_started ,
ngps ,
b_rcu_perf_writer_finished -
b_rcu_perf_writer_started ) ;
for ( i = 0 ; i < nrealwriters ; i + + ) {
if ( ! writer_durations )
break ;
if ( ! writer_n_durations )
continue ;
wdpp = writer_durations [ i ] ;
if ( ! wdpp )
continue ;
for ( j = 0 ; j < = writer_n_durations [ i ] ; j + + ) {
wdp = & wdpp [ j ] ;
pr_alert ( " %s%s %4d writer-duration: %5d %llu \n " ,
perf_type , PERF_FLAG ,
i , j , * wdp ) ;
if ( j % 100 = = 0 )
schedule_timeout_uninterruptible ( 1 ) ;
}
kfree ( writer_durations [ i ] ) ;
}
kfree ( writer_tasks ) ;
kfree ( writer_durations ) ;
kfree ( writer_n_durations ) ;
}
/* Do flavor-specific cleanup operations. */
if ( cur_ops - > cleanup ! = NULL )
cur_ops - > cleanup ( ) ;
torture_cleanup_end ( ) ;
}
/*
* Return the number if non - negative . If - 1 , the number of CPUs .
* If less than - 1 , that much less than the number of CPUs , but
* at least one .
*/
static int compute_real ( int n )
{
int nr ;
if ( n > = 0 ) {
nr = n ;
} else {
nr = num_online_cpus ( ) + 1 + n ;
if ( nr < = 0 )
nr = 1 ;
}
return nr ;
}
/*
* RCU perf shutdown kthread . Just waits to be awakened , then shuts
* down system .
*/
static int
rcu_perf_shutdown ( void * arg )
{
do {
wait_event ( shutdown_wq ,
atomic_read ( & n_rcu_perf_writer_finished ) > =
nrealwriters ) ;
} while ( atomic_read ( & n_rcu_perf_writer_finished ) < nrealwriters ) ;
smp_mb ( ) ; /* Wake before output. */
rcu_perf_cleanup ( ) ;
kernel_power_off ( ) ;
return - EINVAL ;
}
static int __init
rcu_perf_init ( void )
{
long i ;
int firsterr = 0 ;
static struct rcu_perf_ops * perf_ops [ ] = {
2017-04-19 23:43:21 +03:00
& rcu_ops , & rcu_bh_ops , & srcu_ops , & srcud_ops , & sched_ops ,
2017-05-25 18:23:06 +03:00
& tasks_ops ,
2016-01-01 05:33:22 +03:00
} ;
2017-11-22 07:19:17 +03:00
if ( ! torture_init_begin ( perf_type , verbose ) )
2016-01-01 05:33:22 +03:00
return - EBUSY ;
/* Process args and tell the world that the perf'er is on the job. */
for ( i = 0 ; i < ARRAY_SIZE ( perf_ops ) ; i + + ) {
cur_ops = perf_ops [ i ] ;
if ( strcmp ( perf_type , cur_ops - > name ) = = 0 )
break ;
}
if ( i = = ARRAY_SIZE ( perf_ops ) ) {
pr_alert ( " rcu-perf: invalid perf type: \" %s \" \n " ,
perf_type ) ;
pr_alert ( " rcu-perf types: " ) ;
for ( i = 0 ; i < ARRAY_SIZE ( perf_ops ) ; i + + )
pr_alert ( " %s " , perf_ops [ i ] - > name ) ;
pr_alert ( " \n " ) ;
firsterr = - EINVAL ;
goto unwind ;
}
if ( cur_ops - > init )
cur_ops - > init ( ) ;
nrealwriters = compute_real ( nwriters ) ;
nrealreaders = compute_real ( nreaders ) ;
atomic_set ( & n_rcu_perf_reader_started , 0 ) ;
atomic_set ( & n_rcu_perf_writer_started , 0 ) ;
atomic_set ( & n_rcu_perf_writer_finished , 0 ) ;
rcu_perf_print_module_parms ( cur_ops , " Start of test " ) ;
/* Start up the kthreads. */
if ( shutdown ) {
init_waitqueue_head ( & shutdown_wq ) ;
firsterr = torture_create_kthread ( rcu_perf_shutdown , NULL ,
shutdown_task ) ;
if ( firsterr )
goto unwind ;
schedule_timeout_uninterruptible ( 1 ) ;
}
reader_tasks = kcalloc ( nrealreaders , sizeof ( reader_tasks [ 0 ] ) ,
GFP_KERNEL ) ;
if ( reader_tasks = = NULL ) {
VERBOSE_PERFOUT_ERRSTRING ( " out of memory " ) ;
firsterr = - ENOMEM ;
goto unwind ;
}
for ( i = 0 ; i < nrealreaders ; i + + ) {
2016-01-13 01:15:40 +03:00
firsterr = torture_create_kthread ( rcu_perf_reader , ( void * ) i ,
2016-01-01 05:33:22 +03:00
reader_tasks [ i ] ) ;
if ( firsterr )
goto unwind ;
}
while ( atomic_read ( & n_rcu_perf_reader_started ) < nrealreaders )
schedule_timeout_uninterruptible ( 1 ) ;
writer_tasks = kcalloc ( nrealwriters , sizeof ( reader_tasks [ 0 ] ) ,
GFP_KERNEL ) ;
writer_durations = kcalloc ( nrealwriters , sizeof ( * writer_durations ) ,
GFP_KERNEL ) ;
writer_n_durations =
kcalloc ( nrealwriters , sizeof ( * writer_n_durations ) ,
GFP_KERNEL ) ;
if ( ! writer_tasks | | ! writer_durations | | ! writer_n_durations ) {
VERBOSE_PERFOUT_ERRSTRING ( " out of memory " ) ;
firsterr = - ENOMEM ;
goto unwind ;
}
for ( i = 0 ; i < nrealwriters ; i + + ) {
writer_durations [ i ] =
kcalloc ( MAX_MEAS , sizeof ( * writer_durations [ i ] ) ,
GFP_KERNEL ) ;
2016-06-13 18:20:39 +03:00
if ( ! writer_durations [ i ] ) {
firsterr = - ENOMEM ;
2016-01-01 05:33:22 +03:00
goto unwind ;
2016-06-13 18:20:39 +03:00
}
2016-01-01 05:33:22 +03:00
firsterr = torture_create_kthread ( rcu_perf_writer , ( void * ) i ,
writer_tasks [ i ] ) ;
if ( firsterr )
goto unwind ;
}
torture_init_end ( ) ;
return 0 ;
unwind :
torture_init_end ( ) ;
rcu_perf_cleanup ( ) ;
return firsterr ;
}
module_init ( rcu_perf_init ) ;
module_exit ( rcu_perf_cleanup ) ;