2020-05-25 07:36:48 +03:00
// SPDX-License-Identifier: GPL-2.0+
//
2020-06-17 21:33:54 +03:00
// Scalability test comparing RCU vs other mechanisms
2020-05-25 07:36:48 +03:00
// for acquiring references on objects.
//
// Copyright (C) Google, 2020.
//
// Author: Joel Fernandes <joel@joelfernandes.org>
# define pr_fmt(fmt) fmt
# include <linux/atomic.h>
# include <linux/bitops.h>
# include <linux/completion.h>
# include <linux/cpu.h>
# include <linux/delay.h>
# include <linux/err.h>
# include <linux/init.h>
# include <linux/interrupt.h>
# include <linux/kthread.h>
# include <linux/kernel.h>
# include <linux/mm.h>
# include <linux/module.h>
# include <linux/moduleparam.h>
# include <linux/notifier.h>
# include <linux/percpu.h>
# include <linux/rcupdate.h>
2020-06-02 18:34:41 +03:00
# include <linux/rcupdate_trace.h>
2020-05-25 07:36:48 +03:00
# include <linux/reboot.h>
# include <linux/sched.h>
# include <linux/spinlock.h>
# include <linux/smp.h>
# include <linux/stat.h>
# include <linux/srcu.h>
# include <linux/slab.h>
# include <linux/torture.h>
# include <linux/types.h>
# include "rcu.h"
# define PERF_FLAG "-ref-perf: "
# define PERFOUT(s, x...) \
pr_alert ( " %s " PERF_FLAG s , perf_type , # # x )
# define VERBOSE_PERFOUT(s, x...) \
do { if ( verbose ) pr_alert ( " %s " PERF_FLAG s , perf_type , # # x ) ; } while ( 0 )
# define VERBOSE_PERFOUT_ERRSTRING(s, x...) \
do { if ( verbose ) pr_alert ( " %s " PERF_FLAG " !!! " s , perf_type , # # x ) ; } while ( 0 )
MODULE_LICENSE ( " GPL " ) ;
MODULE_AUTHOR ( " Joel Fernandes (Google) <joel@joelfernandes.org> " ) ;
static char * perf_type = " rcu " ;
module_param ( perf_type , charp , 0444 ) ;
MODULE_PARM_DESC ( perf_type , " Type of test (rcu, srcu, refcnt, rwsem, rwlock. " ) ;
torture_param ( int , verbose , 0 , " Enable verbose debugging printk()s " ) ;
2020-05-26 00:16:44 +03:00
// Wait until there are multiple CPUs before starting test.
2020-06-17 21:33:54 +03:00
torture_param ( int , holdoff , IS_BUILTIN ( CONFIG_RCU_REF_SCALE_TEST ) ? 10 : 0 ,
2020-05-26 00:16:44 +03:00
" Holdoff time before test start (s) " ) ;
// Number of loops per experiment, all readers execute operations concurrently.
2020-05-29 23:11:26 +03:00
torture_param ( long , loops , 10000 , " Number of loops per experiment. " ) ;
2020-05-26 01:48:38 +03:00
// Number of readers, with -1 defaulting to about 75% of the CPUs.
torture_param ( int , nreaders , - 1 , " Number of readers, -1 for 75% of CPUs. " ) ;
// Number of runs.
torture_param ( int , nruns , 30 , " Number of experiments to run. " ) ;
2020-06-01 04:14:57 +03:00
// Reader delay in nanoseconds, 0 for no delay.
torture_param ( int , readdelay , 0 , " Read-side delay in nanoseconds. " ) ;
2020-05-25 07:36:48 +03:00
# ifdef MODULE
# define REFPERF_SHUTDOWN 0
# else
# define REFPERF_SHUTDOWN 1
# endif
torture_param ( bool , shutdown , REFPERF_SHUTDOWN ,
" Shutdown at end of performance tests. " ) ;
struct reader_task {
struct task_struct * task ;
2020-05-26 21:22:03 +03:00
int start_reader ;
2020-05-25 07:36:48 +03:00
wait_queue_head_t wq ;
u64 last_duration_ns ;
} ;
static struct task_struct * shutdown_task ;
static wait_queue_head_t shutdown_wq ;
static struct task_struct * main_task ;
static wait_queue_head_t main_wq ;
static int shutdown_start ;
static struct reader_task * reader_tasks ;
// Number of readers that are part of the current experiment.
static atomic_t nreaders_exp ;
// Use to wait for all threads to start.
static atomic_t n_init ;
2020-05-26 21:40:52 +03:00
static atomic_t n_started ;
2020-05-26 22:34:57 +03:00
static atomic_t n_warmedup ;
static atomic_t n_cooleddown ;
2020-05-25 07:36:48 +03:00
// Track which experiment is currently running.
static int exp_idx ;
// Operations vector for selecting different types of tests.
struct ref_perf_ops {
void ( * init ) ( void ) ;
void ( * cleanup ) ( void ) ;
2020-05-26 00:59:06 +03:00
void ( * readsection ) ( const int nloops ) ;
2020-06-01 04:14:57 +03:00
void ( * delaysection ) ( const int nloops , const int udl , const int ndl ) ;
2020-05-25 07:36:48 +03:00
const char * name ;
} ;
static struct ref_perf_ops * cur_ops ;
2020-06-01 04:14:57 +03:00
static void un_delay ( const int udl , const int ndl )
{
if ( udl )
udelay ( udl ) ;
if ( ndl )
ndelay ( ndl ) ;
}
2020-05-26 00:59:06 +03:00
static void ref_rcu_read_section ( const int nloops )
2020-05-25 07:36:48 +03:00
{
2020-05-26 00:59:06 +03:00
int i ;
2020-05-25 07:36:48 +03:00
2020-05-26 00:59:06 +03:00
for ( i = nloops ; i > = 0 ; i - - ) {
rcu_read_lock ( ) ;
rcu_read_unlock ( ) ;
}
2020-05-25 07:36:48 +03:00
}
2020-06-01 04:14:57 +03:00
static void ref_rcu_delay_section ( const int nloops , const int udl , const int ndl )
2020-05-29 02:37:35 +03:00
{
int i ;
for ( i = nloops ; i > = 0 ; i - - ) {
rcu_read_lock ( ) ;
2020-06-01 04:14:57 +03:00
un_delay ( udl , ndl ) ;
2020-05-29 02:37:35 +03:00
rcu_read_unlock ( ) ;
}
}
2020-05-25 07:36:48 +03:00
static void rcu_sync_perf_init ( void )
{
}
static struct ref_perf_ops rcu_ops = {
. init = rcu_sync_perf_init ,
2020-05-26 00:59:06 +03:00
. readsection = ref_rcu_read_section ,
2020-05-29 02:37:35 +03:00
. delaysection = ref_rcu_delay_section ,
2020-05-25 07:36:48 +03:00
. name = " rcu "
} ;
// Definitions for SRCU ref perf testing.
DEFINE_STATIC_SRCU ( srcu_refctl_perf ) ;
static struct srcu_struct * srcu_ctlp = & srcu_refctl_perf ;
2020-05-29 02:37:35 +03:00
static void srcu_ref_perf_read_section ( const int nloops )
2020-05-25 07:36:48 +03:00
{
2020-05-26 00:59:06 +03:00
int i ;
int idx ;
2020-05-25 07:36:48 +03:00
2020-05-26 00:59:06 +03:00
for ( i = nloops ; i > = 0 ; i - - ) {
idx = srcu_read_lock ( srcu_ctlp ) ;
srcu_read_unlock ( srcu_ctlp , idx ) ;
}
2020-05-25 07:36:48 +03:00
}
2020-06-01 04:14:57 +03:00
static void srcu_ref_perf_delay_section ( const int nloops , const int udl , const int ndl )
2020-05-29 02:37:35 +03:00
{
int i ;
int idx ;
for ( i = nloops ; i > = 0 ; i - - ) {
idx = srcu_read_lock ( srcu_ctlp ) ;
2020-06-01 04:14:57 +03:00
un_delay ( udl , ndl ) ;
2020-05-29 02:37:35 +03:00
srcu_read_unlock ( srcu_ctlp , idx ) ;
}
}
2020-05-25 07:36:48 +03:00
static struct ref_perf_ops srcu_ops = {
. init = rcu_sync_perf_init ,
2020-05-26 00:59:06 +03:00
. readsection = srcu_ref_perf_read_section ,
2020-05-29 02:37:35 +03:00
. delaysection = srcu_ref_perf_delay_section ,
2020-05-25 07:36:48 +03:00
. name = " srcu "
} ;
2020-06-03 21:56:34 +03:00
// Definitions for RCU Tasks ref perf testing: Empty read markers.
// These definitions also work for RCU Rude readers.
static void rcu_tasks_ref_perf_read_section ( const int nloops )
{
int i ;
for ( i = nloops ; i > = 0 ; i - - )
continue ;
}
static void rcu_tasks_ref_perf_delay_section ( const int nloops , const int udl , const int ndl )
{
int i ;
for ( i = nloops ; i > = 0 ; i - - )
un_delay ( udl , ndl ) ;
}
static struct ref_perf_ops rcu_tasks_ops = {
. init = rcu_sync_perf_init ,
. readsection = rcu_tasks_ref_perf_read_section ,
. delaysection = rcu_tasks_ref_perf_delay_section ,
. name = " rcu-tasks "
} ;
2020-06-02 18:34:41 +03:00
// Definitions for RCU Tasks Trace ref perf testing.
static void rcu_trace_ref_perf_read_section ( const int nloops )
{
int i ;
for ( i = nloops ; i > = 0 ; i - - ) {
rcu_read_lock_trace ( ) ;
rcu_read_unlock_trace ( ) ;
}
}
static void rcu_trace_ref_perf_delay_section ( const int nloops , const int udl , const int ndl )
{
int i ;
for ( i = nloops ; i > = 0 ; i - - ) {
rcu_read_lock_trace ( ) ;
un_delay ( udl , ndl ) ;
rcu_read_unlock_trace ( ) ;
}
}
static struct ref_perf_ops rcu_trace_ops = {
. init = rcu_sync_perf_init ,
. readsection = rcu_trace_ref_perf_read_section ,
. delaysection = rcu_trace_ref_perf_delay_section ,
. name = " rcu-trace "
} ;
2020-05-25 07:36:48 +03:00
// Definitions for reference count
static atomic_t refcnt ;
2020-05-29 02:37:35 +03:00
static void ref_refcnt_section ( const int nloops )
2020-05-25 07:36:48 +03:00
{
2020-05-26 00:59:06 +03:00
int i ;
2020-05-25 07:36:48 +03:00
2020-05-26 00:59:06 +03:00
for ( i = nloops ; i > = 0 ; i - - ) {
atomic_inc ( & refcnt ) ;
atomic_dec ( & refcnt ) ;
}
2020-05-25 07:36:48 +03:00
}
2020-06-01 04:14:57 +03:00
static void ref_refcnt_delay_section ( const int nloops , const int udl , const int ndl )
2020-05-29 02:37:35 +03:00
{
int i ;
for ( i = nloops ; i > = 0 ; i - - ) {
atomic_inc ( & refcnt ) ;
2020-06-01 04:14:57 +03:00
un_delay ( udl , ndl ) ;
2020-05-29 02:37:35 +03:00
atomic_dec ( & refcnt ) ;
}
}
2020-05-25 07:36:48 +03:00
static struct ref_perf_ops refcnt_ops = {
. init = rcu_sync_perf_init ,
2020-05-29 02:37:35 +03:00
. readsection = ref_refcnt_section ,
. delaysection = ref_refcnt_delay_section ,
2020-05-25 07:36:48 +03:00
. name = " refcnt "
} ;
// Definitions for rwlock
static rwlock_t test_rwlock ;
2020-05-29 02:37:35 +03:00
static void ref_rwlock_init ( void )
2020-05-25 07:36:48 +03:00
{
rwlock_init ( & test_rwlock ) ;
}
2020-05-29 02:37:35 +03:00
static void ref_rwlock_section ( const int nloops )
{
int i ;
for ( i = nloops ; i > = 0 ; i - - ) {
read_lock ( & test_rwlock ) ;
read_unlock ( & test_rwlock ) ;
}
}
2020-06-01 04:14:57 +03:00
static void ref_rwlock_delay_section ( const int nloops , const int udl , const int ndl )
2020-05-25 07:36:48 +03:00
{
2020-05-26 00:59:06 +03:00
int i ;
2020-05-25 07:36:48 +03:00
2020-05-26 00:59:06 +03:00
for ( i = nloops ; i > = 0 ; i - - ) {
read_lock ( & test_rwlock ) ;
2020-06-01 04:14:57 +03:00
un_delay ( udl , ndl ) ;
2020-05-26 00:59:06 +03:00
read_unlock ( & test_rwlock ) ;
}
2020-05-25 07:36:48 +03:00
}
static struct ref_perf_ops rwlock_ops = {
2020-05-29 02:37:35 +03:00
. init = ref_rwlock_init ,
. readsection = ref_rwlock_section ,
. delaysection = ref_rwlock_delay_section ,
2020-05-25 07:36:48 +03:00
. name = " rwlock "
} ;
// Definitions for rwsem
static struct rw_semaphore test_rwsem ;
2020-05-29 02:37:35 +03:00
static void ref_rwsem_init ( void )
2020-05-25 07:36:48 +03:00
{
init_rwsem ( & test_rwsem ) ;
}
2020-05-29 02:37:35 +03:00
static void ref_rwsem_section ( const int nloops )
2020-05-25 07:36:48 +03:00
{
2020-05-26 00:59:06 +03:00
int i ;
2020-05-25 07:36:48 +03:00
2020-05-26 00:59:06 +03:00
for ( i = nloops ; i > = 0 ; i - - ) {
down_read ( & test_rwsem ) ;
up_read ( & test_rwsem ) ;
}
2020-05-25 07:36:48 +03:00
}
2020-06-01 04:14:57 +03:00
static void ref_rwsem_delay_section ( const int nloops , const int udl , const int ndl )
2020-05-29 02:37:35 +03:00
{
int i ;
for ( i = nloops ; i > = 0 ; i - - ) {
down_read ( & test_rwsem ) ;
2020-06-01 04:14:57 +03:00
un_delay ( udl , ndl ) ;
2020-05-29 02:37:35 +03:00
up_read ( & test_rwsem ) ;
}
}
2020-05-25 07:36:48 +03:00
static struct ref_perf_ops rwsem_ops = {
2020-05-29 02:37:35 +03:00
. init = ref_rwsem_init ,
. readsection = ref_rwsem_section ,
. delaysection = ref_rwsem_delay_section ,
2020-05-25 07:36:48 +03:00
. name = " rwsem "
} ;
2020-05-29 02:37:35 +03:00
static void rcu_perf_one_reader ( void )
{
if ( readdelay < = 0 )
cur_ops - > readsection ( loops ) ;
else
2020-06-01 04:14:57 +03:00
cur_ops - > delaysection ( loops , readdelay / 1000 , readdelay % 1000 ) ;
2020-05-29 02:37:35 +03:00
}
2020-05-25 07:36:48 +03:00
// Reader kthread. Repeatedly does empty RCU read-side
// critical section, minimizing update-side interference.
static int
ref_perf_reader ( void * arg )
{
unsigned long flags ;
long me = ( long ) arg ;
struct reader_task * rt = & ( reader_tasks [ me ] ) ;
u64 start ;
s64 duration ;
VERBOSE_PERFOUT ( " ref_perf_reader %ld: task started " , me ) ;
set_cpus_allowed_ptr ( current , cpumask_of ( me % nr_cpu_ids ) ) ;
set_user_nice ( current , MAX_NICE ) ;
atomic_inc ( & n_init ) ;
2020-05-26 00:16:44 +03:00
if ( holdoff )
schedule_timeout_interruptible ( holdoff * HZ ) ;
2020-05-25 07:36:48 +03:00
repeat :
VERBOSE_PERFOUT ( " ref_perf_reader %ld: waiting to start next experiment on cpu %d " , me , smp_processor_id ( ) ) ;
// Wait for signal that this reader can start.
2020-05-26 21:22:03 +03:00
wait_event ( rt - > wq , ( atomic_read ( & nreaders_exp ) & & smp_load_acquire ( & rt - > start_reader ) ) | |
2020-05-25 07:36:48 +03:00
torture_must_stop ( ) ) ;
if ( torture_must_stop ( ) )
goto end ;
// Make sure that the CPU is affinitized appropriately during testing.
WARN_ON_ONCE ( smp_processor_id ( ) ! = me ) ;
2020-05-26 21:22:03 +03:00
WRITE_ONCE ( rt - > start_reader , 0 ) ;
2020-05-26 21:40:52 +03:00
if ( ! atomic_dec_return ( & n_started ) )
while ( atomic_read_acquire ( & n_started ) )
cpu_relax ( ) ;
2020-05-25 07:36:48 +03:00
2020-05-26 20:57:34 +03:00
VERBOSE_PERFOUT ( " ref_perf_reader %ld: experiment %d started " , me , exp_idx ) ;
2020-05-26 22:34:57 +03:00
// To reduce noise, do an initial cache-warming invocation, check
// in, and then keep warming until everyone has checked in.
2020-05-29 02:37:35 +03:00
rcu_perf_one_reader ( ) ;
2020-05-26 22:34:57 +03:00
if ( ! atomic_dec_return ( & n_warmedup ) )
while ( atomic_read_acquire ( & n_warmedup ) )
2020-05-29 02:37:35 +03:00
rcu_perf_one_reader ( ) ;
2020-05-26 22:34:57 +03:00
// Also keep interrupts disabled. This also has the effect
// of preventing entries into slow path for rcu_read_unlock().
2020-05-25 07:36:48 +03:00
local_irq_save ( flags ) ;
start = ktime_get_mono_fast_ns ( ) ;
2020-05-29 02:37:35 +03:00
rcu_perf_one_reader ( ) ;
2020-05-25 07:36:48 +03:00
duration = ktime_get_mono_fast_ns ( ) - start ;
local_irq_restore ( flags ) ;
rt - > last_duration_ns = WARN_ON_ONCE ( duration < 0 ) ? 0 : duration ;
2020-05-26 22:34:57 +03:00
// To reduce runtime-skew noise, do maintain-load invocations until
// everyone is done.
if ( ! atomic_dec_return ( & n_cooleddown ) )
while ( atomic_read_acquire ( & n_cooleddown ) )
2020-05-29 02:37:35 +03:00
rcu_perf_one_reader ( ) ;
2020-05-25 07:36:48 +03:00
2020-05-26 20:57:34 +03:00
if ( atomic_dec_and_test ( & nreaders_exp ) )
wake_up ( & main_wq ) ;
2020-05-25 07:36:48 +03:00
VERBOSE_PERFOUT ( " ref_perf_reader %ld: experiment %d ended, (readers remaining=%d) " ,
me , exp_idx , atomic_read ( & nreaders_exp ) ) ;
if ( ! torture_must_stop ( ) )
goto repeat ;
end :
torture_kthread_stopping ( " ref_perf_reader " ) ;
return 0 ;
}
2020-05-26 19:32:57 +03:00
static void reset_readers ( void )
2020-05-25 07:36:48 +03:00
{
int i ;
struct reader_task * rt ;
2020-05-26 03:22:24 +03:00
for ( i = 0 ; i < nreaders ; i + + ) {
2020-05-25 07:36:48 +03:00
rt = & ( reader_tasks [ i ] ) ;
rt - > last_duration_ns = 0 ;
}
}
// Print the results of each reader and return the sum of all their durations.
2020-05-26 19:32:57 +03:00
static u64 process_durations ( int n )
2020-05-25 07:36:48 +03:00
{
int i ;
struct reader_task * rt ;
char buf1 [ 64 ] ;
2020-05-26 03:45:03 +03:00
char * buf ;
2020-05-25 07:36:48 +03:00
u64 sum = 0 ;
2020-05-26 03:45:03 +03:00
buf = kmalloc ( 128 + nreaders * 32 , GFP_KERNEL ) ;
if ( ! buf )
return 0 ;
2020-05-25 07:36:48 +03:00
buf [ 0 ] = 0 ;
sprintf ( buf , " Experiment #%d (Format: <THREAD-NUM>:<Total loop time in ns>) " ,
exp_idx ) ;
2020-05-26 03:22:24 +03:00
for ( i = 0 ; i < n & & ! torture_must_stop ( ) ; i + + ) {
2020-05-25 07:36:48 +03:00
rt = & ( reader_tasks [ i ] ) ;
sprintf ( buf1 , " %d: %llu \t " , i , rt - > last_duration_ns ) ;
if ( i % 5 = = 0 )
strcat ( buf , " \n " ) ;
strcat ( buf , buf1 ) ;
sum + = rt - > last_duration_ns ;
}
strcat ( buf , " \n " ) ;
PERFOUT ( " %s \n " , buf ) ;
2020-05-26 03:45:03 +03:00
kfree ( buf ) ;
2020-05-25 07:36:48 +03:00
return sum ;
}
// The main_func is the main orchestrator, it performs a bunch of
// experiments. For every experiment, it orders all the readers
// involved to start and waits for them to finish the experiment. It
// then reads their timestamps and starts the next experiment. Each
// experiment progresses from 1 concurrent reader to N of them at which
// point all the timestamps are printed.
static int main_func ( void * arg )
{
2020-05-26 03:32:56 +03:00
bool errexit = false ;
2020-05-25 07:36:48 +03:00
int exp , r ;
char buf1 [ 64 ] ;
2020-05-26 03:32:56 +03:00
char * buf ;
2020-05-26 03:22:24 +03:00
u64 * result_avg ;
2020-05-25 07:36:48 +03:00
set_cpus_allowed_ptr ( current , cpumask_of ( nreaders % nr_cpu_ids ) ) ;
set_user_nice ( current , MAX_NICE ) ;
VERBOSE_PERFOUT ( " main_func task started " ) ;
2020-05-26 03:22:24 +03:00
result_avg = kzalloc ( nruns * sizeof ( * result_avg ) , GFP_KERNEL ) ;
2020-05-26 03:32:56 +03:00
buf = kzalloc ( 64 + nruns * 32 , GFP_KERNEL ) ;
if ( ! result_avg | | ! buf ) {
2020-05-26 03:22:24 +03:00
VERBOSE_PERFOUT_ERRSTRING ( " out of memory " ) ;
2020-05-26 03:32:56 +03:00
errexit = true ;
}
2020-05-26 00:16:44 +03:00
if ( holdoff )
schedule_timeout_interruptible ( holdoff * HZ ) ;
2020-05-25 07:36:48 +03:00
2020-05-28 02:46:56 +03:00
// Wait for all threads to start.
atomic_inc ( & n_init ) ;
while ( atomic_read ( & n_init ) < nreaders + 1 )
schedule_timeout_uninterruptible ( 1 ) ;
2020-05-25 07:36:48 +03:00
// Start exp readers up per experiment
2020-05-26 03:22:24 +03:00
for ( exp = 0 ; exp < nruns & & ! torture_must_stop ( ) ; exp + + ) {
2020-05-26 03:32:56 +03:00
if ( errexit )
2020-05-26 03:22:24 +03:00
break ;
2020-05-25 07:36:48 +03:00
if ( torture_must_stop ( ) )
goto end ;
2020-05-26 03:22:24 +03:00
reset_readers ( ) ;
atomic_set ( & nreaders_exp , nreaders ) ;
2020-05-26 21:40:52 +03:00
atomic_set ( & n_started , nreaders ) ;
2020-05-26 22:34:57 +03:00
atomic_set ( & n_warmedup , nreaders ) ;
atomic_set ( & n_cooleddown , nreaders ) ;
2020-05-25 07:36:48 +03:00
exp_idx = exp ;
2020-05-26 03:22:24 +03:00
for ( r = 0 ; r < nreaders ; r + + ) {
2020-05-26 21:22:03 +03:00
smp_store_release ( & reader_tasks [ r ] . start_reader , 1 ) ;
2020-05-25 07:36:48 +03:00
wake_up ( & reader_tasks [ r ] . wq ) ;
}
VERBOSE_PERFOUT ( " main_func: experiment started, waiting for %d readers " ,
2020-05-26 03:22:24 +03:00
nreaders ) ;
2020-05-25 07:36:48 +03:00
wait_event ( main_wq ,
! atomic_read ( & nreaders_exp ) | | torture_must_stop ( ) ) ;
VERBOSE_PERFOUT ( " main_func: experiment ended " ) ;
if ( torture_must_stop ( ) )
goto end ;
2020-05-30 00:36:26 +03:00
result_avg [ exp ] = div_u64 ( 1000 * process_durations ( nreaders ) , nreaders * loops ) ;
2020-05-25 07:36:48 +03:00
}
// Print the average of all experiments
PERFOUT ( " END OF TEST. Calculating average duration per loop (nanoseconds)... \n " ) ;
buf [ 0 ] = 0 ;
strcat ( buf , " \n " ) ;
2020-05-27 00:26:25 +03:00
strcat ( buf , " Runs \t Time(ns) \n " ) ;
2020-05-25 07:36:48 +03:00
2020-05-26 03:22:24 +03:00
for ( exp = 0 ; exp < nruns ; exp + + ) {
2020-05-30 00:36:26 +03:00
u64 avg ;
u32 rem ;
2020-05-26 03:32:56 +03:00
if ( errexit )
2020-05-26 03:22:24 +03:00
break ;
2020-05-30 00:36:26 +03:00
avg = div_u64_rem ( result_avg [ exp ] , 1000 , & rem ) ;
sprintf ( buf1 , " %d \t %llu.%03u \n " , exp + 1 , avg , rem ) ;
2020-05-25 07:36:48 +03:00
strcat ( buf , buf1 ) ;
}
2020-05-26 03:32:56 +03:00
if ( ! errexit )
2020-05-26 03:22:24 +03:00
PERFOUT ( " %s " , buf ) ;
2020-05-25 07:36:48 +03:00
// This will shutdown everything including us.
if ( shutdown ) {
shutdown_start = 1 ;
wake_up ( & shutdown_wq ) ;
}
// Wait for torture to stop us
while ( ! torture_must_stop ( ) )
schedule_timeout_uninterruptible ( 1 ) ;
end :
torture_kthread_stopping ( " main_func " ) ;
2020-05-26 03:32:56 +03:00
kfree ( result_avg ) ;
kfree ( buf ) ;
2020-05-25 07:36:48 +03:00
return 0 ;
}
static void
ref_perf_print_module_parms ( struct ref_perf_ops * cur_ops , const char * tag )
{
pr_alert ( " %s " PERF_FLAG
2020-05-29 02:37:35 +03:00
" --- %s: verbose=%d shutdown=%d holdoff=%d loops=%ld nreaders=%d nruns=%d readdelay=%d \n " , perf_type , tag ,
verbose , shutdown , holdoff , loops , nreaders , nruns , readdelay ) ;
2020-05-25 07:36:48 +03:00
}
static void
ref_perf_cleanup ( void )
{
int i ;
if ( torture_cleanup_begin ( ) )
return ;
if ( ! cur_ops ) {
torture_cleanup_end ( ) ;
return ;
}
if ( reader_tasks ) {
for ( i = 0 ; i < nreaders ; i + + )
torture_stop_kthread ( " ref_perf_reader " ,
reader_tasks [ i ] . task ) ;
}
kfree ( reader_tasks ) ;
torture_stop_kthread ( " main_task " , main_task ) ;
kfree ( main_task ) ;
// Do perf-type-specific cleanup operations.
if ( cur_ops - > cleanup ! = NULL )
cur_ops - > cleanup ( ) ;
torture_cleanup_end ( ) ;
}
// Shutdown kthread. Just waits to be awakened, then shuts down system.
static int
ref_perf_shutdown ( void * arg )
{
wait_event ( shutdown_wq , shutdown_start ) ;
smp_mb ( ) ; // Wake before output.
ref_perf_cleanup ( ) ;
kernel_power_off ( ) ;
return - EINVAL ;
}
static int __init
ref_perf_init ( void )
{
long i ;
int firsterr = 0 ;
static struct ref_perf_ops * perf_ops [ ] = {
2020-06-03 21:56:34 +03:00
& rcu_ops , & srcu_ops , & rcu_trace_ops , & rcu_tasks_ops ,
& refcnt_ops , & rwlock_ops , & rwsem_ops ,
2020-05-25 07:36:48 +03:00
} ;
if ( ! torture_init_begin ( perf_type , verbose ) )
return - EBUSY ;
for ( i = 0 ; i < ARRAY_SIZE ( perf_ops ) ; i + + ) {
cur_ops = perf_ops [ i ] ;
if ( strcmp ( perf_type , cur_ops - > name ) = = 0 )
break ;
}
if ( i = = ARRAY_SIZE ( perf_ops ) ) {
pr_alert ( " rcu-perf: invalid perf type: \" %s \" \n " , perf_type ) ;
pr_alert ( " rcu-perf types: " ) ;
for ( i = 0 ; i < ARRAY_SIZE ( perf_ops ) ; i + + )
pr_cont ( " %s " , perf_ops [ i ] - > name ) ;
pr_cont ( " \n " ) ;
2020-06-17 21:33:54 +03:00
WARN_ON ( ! IS_MODULE ( CONFIG_RCU_REF_SCALE_TEST ) ) ;
2020-05-25 07:36:48 +03:00
firsterr = - EINVAL ;
cur_ops = NULL ;
goto unwind ;
}
if ( cur_ops - > init )
cur_ops - > init ( ) ;
ref_perf_print_module_parms ( cur_ops , " Start of test " ) ;
// Shutdown task
if ( shutdown ) {
init_waitqueue_head ( & shutdown_wq ) ;
firsterr = torture_create_kthread ( ref_perf_shutdown , NULL ,
shutdown_task ) ;
if ( firsterr )
goto unwind ;
schedule_timeout_uninterruptible ( 1 ) ;
}
2020-05-26 01:48:38 +03:00
// Reader tasks (default to ~75% of online CPUs).
if ( nreaders < 0 )
nreaders = ( num_online_cpus ( ) > > 1 ) + ( num_online_cpus ( ) > > 2 ) ;
2020-05-25 07:36:48 +03:00
reader_tasks = kcalloc ( nreaders , sizeof ( reader_tasks [ 0 ] ) ,
GFP_KERNEL ) ;
if ( ! reader_tasks ) {
VERBOSE_PERFOUT_ERRSTRING ( " out of memory " ) ;
firsterr = - ENOMEM ;
goto unwind ;
}
VERBOSE_PERFOUT ( " Starting %d reader threads \n " , nreaders ) ;
for ( i = 0 ; i < nreaders ; i + + ) {
firsterr = torture_create_kthread ( ref_perf_reader , ( void * ) i ,
reader_tasks [ i ] . task ) ;
if ( firsterr )
goto unwind ;
init_waitqueue_head ( & ( reader_tasks [ i ] . wq ) ) ;
}
// Main Task
init_waitqueue_head ( & main_wq ) ;
firsterr = torture_create_kthread ( main_func , NULL , main_task ) ;
if ( firsterr )
goto unwind ;
torture_init_end ( ) ;
return 0 ;
unwind :
torture_init_end ( ) ;
ref_perf_cleanup ( ) ;
return firsterr ;
}
module_init ( ref_perf_init ) ;
module_exit ( ref_perf_cleanup ) ;