2019-01-17 21:09:19 +03:00
// SPDX-License-Identifier: GPL-2.0+
2016-01-01 05:33:22 +03:00
/*
2020-08-12 07:18:12 +03:00
* Read - Copy Update module - based scalability - test facility
2016-01-01 05:33:22 +03:00
*
* Copyright ( C ) IBM Corporation , 2015
*
2019-01-17 21:09:19 +03:00
* Authors : Paul E . McKenney < paulmck @ linux . ibm . com >
2016-01-01 05:33:22 +03:00
*/
2018-05-15 22:25:05 +03:00
# define pr_fmt(fmt) fmt
2016-01-01 05:33:22 +03:00
# include <linux/types.h>
# include <linux/kernel.h>
# include <linux/init.h>
2019-12-19 19:22:42 +03:00
# include <linux/mm.h>
2016-01-01 05:33:22 +03:00
# include <linux/module.h>
# include <linux/kthread.h>
# include <linux/err.h>
# include <linux/spinlock.h>
# include <linux/smp.h>
# include <linux/rcupdate.h>
# include <linux/interrupt.h>
# include <linux/sched.h>
2017-02-01 20:07:51 +03:00
# include <uapi/linux/sched/types.h>
2016-01-01 05:33:22 +03:00
# include <linux/atomic.h>
# include <linux/bitops.h>
# include <linux/completion.h>
# include <linux/moduleparam.h>
# include <linux/percpu.h>
# include <linux/notifier.h>
# include <linux/reboot.h>
# include <linux/freezer.h>
# include <linux/cpu.h>
# include <linux/delay.h>
# include <linux/stat.h>
# include <linux/srcu.h>
# include <linux/slab.h>
# include <asm/byteorder.h>
# include <linux/torture.h>
# include <linux/vmalloc.h>
2020-09-09 22:27:03 +03:00
# include <linux/rcupdate_trace.h>
2016-01-01 05:33:22 +03:00
2017-05-03 19:51:55 +03:00
# include "rcu.h"
2016-01-01 05:33:22 +03:00
MODULE_LICENSE ( " GPL " ) ;
2019-01-17 21:09:19 +03:00
MODULE_AUTHOR ( " Paul E. McKenney <paulmck@linux.ibm.com> " ) ;
2016-01-01 05:33:22 +03:00
2020-08-12 07:18:12 +03:00
# define SCALE_FLAG "-scale:"
# define SCALEOUT_STRING(s) \
pr_alert ( " %s " SCALE_FLAG " %s \n " , scale_type , s )
# define VERBOSE_SCALEOUT_STRING(s) \
do { if ( verbose ) pr_alert ( " %s " SCALE_FLAG " %s \n " , scale_type , s ) ; } while ( 0 )
2021-10-29 12:40:28 +03:00
# define SCALEOUT_ERRSTRING(s) \
pr_alert ( " %s " SCALE_FLAG " !!! %s \n " , scale_type , s )
2016-01-01 05:33:22 +03:00
2018-02-02 06:19:04 +03:00
/*
* The intended use cases for the nreaders and nwriters module parameters
* are as follows :
*
* 1. Specify only the nr_cpus kernel boot parameter . This will
* set both nreaders and nwriters to the value specified by
* nr_cpus for a mixed reader / writer test .
*
* 2. Specify the nr_cpus kernel boot parameter , but set
2020-08-12 07:18:12 +03:00
* rcuscale . nreaders to zero . This will set nwriters to the
2018-02-02 06:19:04 +03:00
* value specified by nr_cpus for an update - only test .
*
* 3. Specify the nr_cpus kernel boot parameter , but set
2020-08-12 07:18:12 +03:00
* rcuscale . nwriters to zero . This will set nreaders to the
2018-02-02 06:19:04 +03:00
* value specified by nr_cpus for a read - only test .
*
* Various other use cases may of course be specified .
2020-05-25 19:22:24 +03:00
*
* Note that this test ' s readers are intended only as a test load for
2020-08-12 07:18:12 +03:00
* the writers . The reader scalability statistics will be overly
2020-05-25 19:22:24 +03:00
* pessimistic due to the per - critical - section interrupt disabling ,
* test - end checks , and the pair of calls through pointers .
2018-02-02 06:19:04 +03:00
*/
2018-12-28 18:48:43 +03:00
# ifdef MODULE
2020-08-12 07:18:12 +03:00
# define RCUSCALE_SHUTDOWN 0
2018-12-28 18:48:43 +03:00
# else
2020-08-12 07:18:12 +03:00
# define RCUSCALE_SHUTDOWN 1
2018-12-28 18:48:43 +03:00
# endif
2017-04-17 22:47:10 +03:00
torture_param ( bool , gp_async , false , " Use asynchronous GP wait primitives " ) ;
2023-05-16 18:21:17 +03:00
torture_param ( int , gp_async_max , 1000 , " Max # outstanding waits per writer " ) ;
2016-05-25 04:25:33 +03:00
torture_param ( bool , gp_exp , false , " Use expedited GP wait primitives " ) ;
2016-01-31 07:56:38 +03:00
torture_param ( int , holdoff , 10 , " Holdoff time before test start (s) " ) ;
2023-05-16 18:22:31 +03:00
torture_param ( int , minruntime , 0 , " Minimum run time (s) " ) ;
2018-02-02 06:19:04 +03:00
torture_param ( int , nreaders , - 1 , " Number of RCU reader threads " ) ;
2016-01-01 05:33:22 +03:00
torture_param ( int , nwriters , - 1 , " Number of RCU updater threads " ) ;
2020-08-12 07:18:12 +03:00
torture_param ( bool , shutdown , RCUSCALE_SHUTDOWN ,
" Shutdown at end of scalability tests. " ) ;
2018-05-09 20:29:18 +03:00
torture_param ( int , verbose , 1 , " Enable verbose debugging printk()s " ) ;
2017-04-26 01:12:56 +03:00
torture_param ( int , writer_holdoff , 0 , " Holdoff (us) between GPs, zero to disable " ) ;
2023-05-16 17:02:01 +03:00
torture_param ( int , writer_holdoff_jiffies , 0 , " Holdoff (jiffies) between GPs, zero to disable " ) ;
2020-08-12 07:18:12 +03:00
torture_param ( int , kfree_rcu_test , 0 , " Do we run a kfree_rcu() scale test? " ) ;
2020-03-16 19:32:26 +03:00
torture_param ( int , kfree_mult , 1 , " Multiple of kfree_obj size to allocate. " ) ;
2022-10-16 19:22:57 +03:00
torture_param ( int , kfree_by_call_rcu , 0 , " Use call_rcu() to emulate kfree_rcu()? " ) ;
2016-01-01 05:33:22 +03:00
2020-08-12 07:18:12 +03:00
static char * scale_type = " rcu " ;
module_param ( scale_type , charp , 0444 ) ;
MODULE_PARM_DESC ( scale_type , " Type of RCU to scalability-test (rcu, srcu, ...) " ) ;
2016-01-01 05:33:22 +03:00
static int nrealreaders ;
static int nrealwriters ;
static struct task_struct * * writer_tasks ;
static struct task_struct * * reader_tasks ;
static struct task_struct * shutdown_task ;
static u64 * * writer_durations ;
static int * writer_n_durations ;
2020-08-12 07:18:12 +03:00
static atomic_t n_rcu_scale_reader_started ;
static atomic_t n_rcu_scale_writer_started ;
static atomic_t n_rcu_scale_writer_finished ;
2016-01-01 05:33:22 +03:00
static wait_queue_head_t shutdown_wq ;
2020-08-12 07:18:12 +03:00
static u64 t_rcu_scale_writer_started ;
static u64 t_rcu_scale_writer_finished ;
2019-08-30 19:36:29 +03:00
static unsigned long b_rcu_gp_test_started ;
static unsigned long b_rcu_gp_test_finished ;
2017-04-17 22:47:10 +03:00
static DEFINE_PER_CPU ( atomic_t , n_async_inflight ) ;
2016-01-01 05:33:22 +03:00
# define MAX_MEAS 10000
# define MIN_MEAS 100
/*
* Operations vector for selecting different types of tests .
*/
2020-08-12 07:18:12 +03:00
struct rcu_scale_ops {
2016-01-01 05:33:22 +03:00
int ptype ;
void ( * init ) ( void ) ;
void ( * cleanup ) ( void ) ;
int ( * readlock ) ( void ) ;
void ( * readunlock ) ( int idx ) ;
2018-04-27 21:39:34 +03:00
unsigned long ( * get_gp_seq ) ( void ) ;
2018-05-16 01:24:41 +03:00
unsigned long ( * gp_diff ) ( unsigned long new , unsigned long old ) ;
2016-01-01 05:33:22 +03:00
unsigned long ( * exp_completed ) ( void ) ;
2017-04-17 22:47:10 +03:00
void ( * async ) ( struct rcu_head * head , rcu_callback_t func ) ;
void ( * gp_barrier ) ( void ) ;
2016-01-01 05:33:22 +03:00
void ( * sync ) ( void ) ;
void ( * exp_sync ) ( void ) ;
2023-05-17 15:01:28 +03:00
struct task_struct * ( * rso_gp_kthread ) ( void ) ;
2016-01-01 05:33:22 +03:00
const char * name ;
} ;
2020-08-12 07:18:12 +03:00
static struct rcu_scale_ops * cur_ops ;
2016-01-01 05:33:22 +03:00
/*
2020-08-12 07:18:12 +03:00
* Definitions for rcu scalability testing .
2016-01-01 05:33:22 +03:00
*/
2020-08-12 07:18:12 +03:00
static int rcu_scale_read_lock ( void ) __acquires ( RCU )
2016-01-01 05:33:22 +03:00
{
rcu_read_lock ( ) ;
return 0 ;
}
2020-08-12 07:18:12 +03:00
static void rcu_scale_read_unlock ( int idx ) __releases ( RCU )
2016-01-01 05:33:22 +03:00
{
rcu_read_unlock ( ) ;
}
static unsigned long __maybe_unused rcu_no_completed ( void )
{
return 0 ;
}
2020-08-12 07:18:12 +03:00
static void rcu_sync_scale_init ( void )
2016-01-01 05:33:22 +03:00
{
}
2020-08-12 07:18:12 +03:00
static struct rcu_scale_ops rcu_ops = {
2016-01-01 05:33:22 +03:00
. ptype = RCU_FLAVOR ,
2020-08-12 07:18:12 +03:00
. init = rcu_sync_scale_init ,
. readlock = rcu_scale_read_lock ,
. readunlock = rcu_scale_read_unlock ,
2018-04-27 21:39:34 +03:00
. get_gp_seq = rcu_get_gp_seq ,
2018-05-16 01:24:41 +03:00
. gp_diff = rcu_seq_diff ,
2016-01-01 05:33:22 +03:00
. exp_completed = rcu_exp_batches_completed ,
2022-10-16 19:23:00 +03:00
. async = call_rcu_hurry ,
2017-04-17 22:47:10 +03:00
. gp_barrier = rcu_barrier ,
2016-01-01 05:33:22 +03:00
. sync = synchronize_rcu ,
. exp_sync = synchronize_rcu_expedited ,
. name = " rcu "
} ;
/*
2020-08-12 07:18:12 +03:00
* Definitions for srcu scalability testing .
2016-01-01 05:33:22 +03:00
*/
2020-08-12 07:18:12 +03:00
DEFINE_STATIC_SRCU ( srcu_ctl_scale ) ;
static struct srcu_struct * srcu_ctlp = & srcu_ctl_scale ;
2016-01-01 05:33:22 +03:00
2020-08-12 07:18:12 +03:00
static int srcu_scale_read_lock ( void ) __acquires ( srcu_ctlp )
2016-01-01 05:33:22 +03:00
{
return srcu_read_lock ( srcu_ctlp ) ;
}
2020-08-12 07:18:12 +03:00
static void srcu_scale_read_unlock ( int idx ) __releases ( srcu_ctlp )
2016-01-01 05:33:22 +03:00
{
srcu_read_unlock ( srcu_ctlp , idx ) ;
}
2020-08-12 07:18:12 +03:00
static unsigned long srcu_scale_completed ( void )
2016-01-01 05:33:22 +03:00
{
return srcu_batches_completed ( srcu_ctlp ) ;
}
2017-04-17 22:47:10 +03:00
static void srcu_call_rcu ( struct rcu_head * head , rcu_callback_t func )
{
call_srcu ( srcu_ctlp , head , func ) ;
}
static void srcu_rcu_barrier ( void )
{
srcu_barrier ( srcu_ctlp ) ;
}
2020-08-12 07:18:12 +03:00
static void srcu_scale_synchronize ( void )
2016-01-01 05:33:22 +03:00
{
synchronize_srcu ( srcu_ctlp ) ;
}
2020-08-12 07:18:12 +03:00
static void srcu_scale_synchronize_expedited ( void )
2016-01-01 05:33:22 +03:00
{
synchronize_srcu_expedited ( srcu_ctlp ) ;
}
2020-08-12 07:18:12 +03:00
static struct rcu_scale_ops srcu_ops = {
2016-01-01 05:33:22 +03:00
. ptype = SRCU_FLAVOR ,
2020-08-12 07:18:12 +03:00
. init = rcu_sync_scale_init ,
. readlock = srcu_scale_read_lock ,
. readunlock = srcu_scale_read_unlock ,
. get_gp_seq = srcu_scale_completed ,
2018-05-16 01:24:41 +03:00
. gp_diff = rcu_seq_diff ,
2020-08-12 07:18:12 +03:00
. exp_completed = srcu_scale_completed ,
2017-04-17 22:47:10 +03:00
. async = srcu_call_rcu ,
. gp_barrier = srcu_rcu_barrier ,
2020-08-12 07:18:12 +03:00
. sync = srcu_scale_synchronize ,
. exp_sync = srcu_scale_synchronize_expedited ,
2016-01-01 05:33:22 +03:00
. name = " srcu "
} ;
2017-04-19 23:43:21 +03:00
static struct srcu_struct srcud ;
2020-08-12 07:18:12 +03:00
static void srcu_sync_scale_init ( void )
2017-04-19 23:43:21 +03:00
{
srcu_ctlp = & srcud ;
init_srcu_struct ( srcu_ctlp ) ;
}
2020-08-12 07:18:12 +03:00
static void srcu_sync_scale_cleanup ( void )
2017-04-19 23:43:21 +03:00
{
cleanup_srcu_struct ( srcu_ctlp ) ;
}
2020-08-12 07:18:12 +03:00
static struct rcu_scale_ops srcud_ops = {
2017-04-19 23:43:21 +03:00
. ptype = SRCU_FLAVOR ,
2020-08-12 07:18:12 +03:00
. init = srcu_sync_scale_init ,
. cleanup = srcu_sync_scale_cleanup ,
. readlock = srcu_scale_read_lock ,
. readunlock = srcu_scale_read_unlock ,
. get_gp_seq = srcu_scale_completed ,
2018-05-16 01:24:41 +03:00
. gp_diff = rcu_seq_diff ,
2020-08-12 07:18:12 +03:00
. exp_completed = srcu_scale_completed ,
2017-04-19 23:43:21 +03:00
. async = srcu_call_rcu ,
. gp_barrier = srcu_rcu_barrier ,
2020-08-12 07:18:12 +03:00
. sync = srcu_scale_synchronize ,
. exp_sync = srcu_scale_synchronize_expedited ,
2017-04-19 23:43:21 +03:00
. name = " srcud "
} ;
2022-03-26 02:39:01 +03:00
# ifdef CONFIG_TASKS_RCU
2016-01-01 05:33:22 +03:00
/*
2020-08-12 07:18:12 +03:00
* Definitions for RCU - tasks scalability testing .
2016-01-01 05:33:22 +03:00
*/
2020-08-12 07:18:12 +03:00
static int tasks_scale_read_lock ( void )
2016-01-01 05:33:22 +03:00
{
return 0 ;
}
2020-08-12 07:18:12 +03:00
static void tasks_scale_read_unlock ( int idx )
2016-01-01 05:33:22 +03:00
{
}
2020-08-12 07:18:12 +03:00
static struct rcu_scale_ops tasks_ops = {
2016-01-01 05:33:22 +03:00
. ptype = RCU_TASKS_FLAVOR ,
2020-08-12 07:18:12 +03:00
. init = rcu_sync_scale_init ,
. readlock = tasks_scale_read_lock ,
. readunlock = tasks_scale_read_unlock ,
2018-04-27 21:39:34 +03:00
. get_gp_seq = rcu_no_completed ,
2018-05-16 01:24:41 +03:00
. gp_diff = rcu_seq_diff ,
2017-04-17 22:47:10 +03:00
. async = call_rcu_tasks ,
. gp_barrier = rcu_barrier_tasks ,
2016-01-01 05:33:22 +03:00
. sync = synchronize_rcu_tasks ,
. exp_sync = synchronize_rcu_tasks ,
2023-06-03 00:38:44 +03:00
. rso_gp_kthread = get_rcu_tasks_gp_kthread ,
2016-01-01 05:33:22 +03:00
. name = " tasks "
} ;
2022-03-26 02:39:01 +03:00
# define TASKS_OPS &tasks_ops,
# else // #ifdef CONFIG_TASKS_RCU
# define TASKS_OPS
# endif // #else // #ifdef CONFIG_TASKS_RCU
2023-06-03 19:07:44 +03:00
# ifdef CONFIG_TASKS_RUDE_RCU
/*
* Definitions for RCU - tasks - rude scalability testing .
*/
2023-06-09 15:05:14 +03:00
static int tasks_rude_scale_read_lock ( void )
{
return 0 ;
}
static void tasks_rude_scale_read_unlock ( int idx )
{
}
2023-06-03 19:07:44 +03:00
static struct rcu_scale_ops tasks_rude_ops = {
. ptype = RCU_TASKS_RUDE_FLAVOR ,
. init = rcu_sync_scale_init ,
2023-06-09 15:05:14 +03:00
. readlock = tasks_rude_scale_read_lock ,
. readunlock = tasks_rude_scale_read_unlock ,
2023-06-03 19:07:44 +03:00
. get_gp_seq = rcu_no_completed ,
. gp_diff = rcu_seq_diff ,
. async = call_rcu_tasks_rude ,
. gp_barrier = rcu_barrier_tasks_rude ,
. sync = synchronize_rcu_tasks_rude ,
. exp_sync = synchronize_rcu_tasks_rude ,
. rso_gp_kthread = get_rcu_tasks_rude_gp_kthread ,
. name = " tasks-rude "
} ;
# define TASKS_RUDE_OPS &tasks_rude_ops,
# else // #ifdef CONFIG_TASKS_RUDE_RCU
# define TASKS_RUDE_OPS
# endif // #else // #ifdef CONFIG_TASKS_RUDE_RCU
2022-03-26 03:05:40 +03:00
# ifdef CONFIG_TASKS_TRACE_RCU
2020-09-09 22:27:03 +03:00
/*
* Definitions for RCU - tasks - trace scalability testing .
*/
static int tasks_trace_scale_read_lock ( void )
{
rcu_read_lock_trace ( ) ;
return 0 ;
}
static void tasks_trace_scale_read_unlock ( int idx )
{
rcu_read_unlock_trace ( ) ;
}
static struct rcu_scale_ops tasks_tracing_ops = {
. ptype = RCU_TASKS_FLAVOR ,
. init = rcu_sync_scale_init ,
. readlock = tasks_trace_scale_read_lock ,
. readunlock = tasks_trace_scale_read_unlock ,
. get_gp_seq = rcu_no_completed ,
. gp_diff = rcu_seq_diff ,
. async = call_rcu_tasks_trace ,
. gp_barrier = rcu_barrier_tasks_trace ,
. sync = synchronize_rcu_tasks_trace ,
. exp_sync = synchronize_rcu_tasks_trace ,
2023-05-17 15:01:28 +03:00
. rso_gp_kthread = get_rcu_tasks_trace_gp_kthread ,
2020-09-09 22:27:03 +03:00
. name = " tasks-tracing "
} ;
2022-03-26 03:05:40 +03:00
# define TASKS_TRACING_OPS &tasks_tracing_ops,
# else // #ifdef CONFIG_TASKS_TRACE_RCU
# define TASKS_TRACING_OPS
# endif // #else // #ifdef CONFIG_TASKS_TRACE_RCU
2020-08-12 07:18:12 +03:00
static unsigned long rcuscale_seq_diff ( unsigned long new , unsigned long old )
2018-05-16 01:24:41 +03:00
{
if ( ! cur_ops - > gp_diff )
return new - old ;
return cur_ops - > gp_diff ( new , old ) ;
}
2016-01-01 05:33:22 +03:00
/*
2020-08-12 07:18:12 +03:00
* If scalability tests complete , wait for shutdown to commence .
2016-01-01 05:33:22 +03:00
*/
2020-08-12 07:18:12 +03:00
static void rcu_scale_wait_shutdown ( void )
2016-01-01 05:33:22 +03:00
{
2018-03-03 03:35:27 +03:00
cond_resched_tasks_rcu_qs ( ) ;
2020-08-12 07:18:12 +03:00
if ( atomic_read ( & n_rcu_scale_writer_finished ) < nrealwriters )
2016-01-01 05:33:22 +03:00
return ;
while ( ! torture_must_stop ( ) )
schedule_timeout_uninterruptible ( 1 ) ;
}
/*
2020-08-12 07:18:12 +03:00
* RCU scalability reader kthread . Repeatedly does empty RCU read - side
* critical section , minimizing update - side interference . However , the
* point of this test is not to evaluate reader scalability , but instead
* to serve as a test load for update - side scalability testing .
2016-01-01 05:33:22 +03:00
*/
static int
2020-08-12 07:18:12 +03:00
rcu_scale_reader ( void * arg )
2016-01-01 05:33:22 +03:00
{
unsigned long flags ;
int idx ;
2016-01-13 01:15:40 +03:00
long me = ( long ) arg ;
2016-01-01 05:33:22 +03:00
2020-08-12 07:18:12 +03:00
VERBOSE_SCALEOUT_STRING ( " rcu_scale_reader task started " ) ;
2016-01-13 01:15:40 +03:00
set_cpus_allowed_ptr ( current , cpumask_of ( me % nr_cpu_ids ) ) ;
2016-01-01 05:33:22 +03:00
set_user_nice ( current , MAX_NICE ) ;
2020-08-12 07:18:12 +03:00
atomic_inc ( & n_rcu_scale_reader_started ) ;
2016-01-01 05:33:22 +03:00
do {
local_irq_save ( flags ) ;
idx = cur_ops - > readlock ( ) ;
cur_ops - > readunlock ( idx ) ;
local_irq_restore ( flags ) ;
2020-08-12 07:18:12 +03:00
rcu_scale_wait_shutdown ( ) ;
2016-01-01 05:33:22 +03:00
} while ( ! torture_must_stop ( ) ) ;
2020-08-12 07:18:12 +03:00
torture_kthread_stopping ( " rcu_scale_reader " ) ;
2016-01-01 05:33:22 +03:00
return 0 ;
}
2017-04-17 22:47:10 +03:00
/*
2020-08-12 07:18:12 +03:00
* Callback function for asynchronous grace periods from rcu_scale_writer ( ) .
2017-04-17 22:47:10 +03:00
*/
2020-08-12 07:18:12 +03:00
static void rcu_scale_async_cb ( struct rcu_head * rhp )
2017-04-17 22:47:10 +03:00
{
atomic_dec ( this_cpu_ptr ( & n_async_inflight ) ) ;
kfree ( rhp ) ;
}
2016-01-01 05:33:22 +03:00
/*
2020-08-12 07:18:12 +03:00
* RCU scale writer kthread . Repeatedly does a grace period .
2016-01-01 05:33:22 +03:00
*/
static int
2020-08-12 07:18:12 +03:00
rcu_scale_writer ( void * arg )
2016-01-01 05:33:22 +03:00
{
int i = 0 ;
int i_max ;
2023-05-16 18:22:31 +03:00
unsigned long jdone ;
2016-01-01 05:33:22 +03:00
long me = ( long ) arg ;
2017-04-17 22:47:10 +03:00
struct rcu_head * rhp = NULL ;
2016-01-01 05:33:22 +03:00
bool started = false , done = false , alldone = false ;
u64 t ;
2023-05-16 17:02:01 +03:00
DEFINE_TORTURE_RANDOM ( tr ) ;
2016-01-01 05:33:22 +03:00
u64 * wdp ;
u64 * wdpp = writer_durations [ me ] ;
2020-08-12 07:18:12 +03:00
VERBOSE_SCALEOUT_STRING ( " rcu_scale_writer task started " ) ;
2016-01-01 05:33:22 +03:00
WARN_ON ( ! wdpp ) ;
2016-01-13 01:15:40 +03:00
set_cpus_allowed_ptr ( current , cpumask_of ( me % nr_cpu_ids ) ) ;
2022-05-21 09:56:26 +03:00
current - > flags | = PF_NO_SETAFFINITY ;
2020-04-21 13:09:13 +03:00
sched_set_fifo_low ( current ) ;
2016-01-31 07:56:38 +03:00
if ( holdoff )
2023-06-16 10:39:26 +03:00
schedule_timeout_idle ( holdoff * HZ ) ;
2016-01-31 07:56:38 +03:00
2019-07-04 07:34:30 +03:00
/*
* Wait until rcu_end_inkernel_boot ( ) is called for normal GP tests
* so that RCU is not always expedited for normal GP tests .
* The system_state test is approximate , but works well in practice .
*/
while ( ! gp_exp & & system_state ! = SYSTEM_RUNNING )
schedule_timeout_uninterruptible ( 1 ) ;
2016-01-01 05:33:22 +03:00
t = ktime_get_mono_fast_ns ( ) ;
2020-08-12 07:18:12 +03:00
if ( atomic_inc_return ( & n_rcu_scale_writer_started ) > = nrealwriters ) {
t_rcu_scale_writer_started = t ;
2016-01-01 05:33:22 +03:00
if ( gp_exp ) {
2019-08-30 19:36:29 +03:00
b_rcu_gp_test_started =
2016-01-01 05:33:22 +03:00
cur_ops - > exp_completed ( ) / 2 ;
} else {
2019-08-30 19:36:29 +03:00
b_rcu_gp_test_started = cur_ops - > get_gp_seq ( ) ;
2016-01-01 05:33:22 +03:00
}
}
2023-05-16 18:22:31 +03:00
jdone = jiffies + minruntime * HZ ;
2016-01-01 05:33:22 +03:00
do {
2017-04-26 01:12:56 +03:00
if ( writer_holdoff )
udelay ( writer_holdoff ) ;
2023-05-16 17:02:01 +03:00
if ( writer_holdoff_jiffies )
schedule_timeout_idle ( torture_random ( & tr ) % writer_holdoff_jiffies + 1 ) ;
2016-01-01 05:33:22 +03:00
wdp = & wdpp [ i ] ;
* wdp = ktime_get_mono_fast_ns ( ) ;
2017-04-17 22:47:10 +03:00
if ( gp_async ) {
retry :
if ( ! rhp )
rhp = kmalloc ( sizeof ( * rhp ) , GFP_KERNEL ) ;
if ( rhp & & atomic_read ( this_cpu_ptr ( & n_async_inflight ) ) < gp_async_max ) {
atomic_inc ( this_cpu_ptr ( & n_async_inflight ) ) ;
2020-08-12 07:18:12 +03:00
cur_ops - > async ( rhp , rcu_scale_async_cb ) ;
2017-04-17 22:47:10 +03:00
rhp = NULL ;
} else if ( ! kthread_should_stop ( ) ) {
cur_ops - > gp_barrier ( ) ;
goto retry ;
} else {
kfree ( rhp ) ; /* Because we are stopping. */
}
} else if ( gp_exp ) {
2016-01-01 05:33:22 +03:00
cur_ops - > exp_sync ( ) ;
} else {
cur_ops - > sync ( ) ;
}
t = ktime_get_mono_fast_ns ( ) ;
* wdp = t - * wdp ;
i_max = i ;
if ( ! started & &
2020-08-12 07:18:12 +03:00
atomic_read ( & n_rcu_scale_writer_started ) > = nrealwriters )
2016-01-01 05:33:22 +03:00
started = true ;
2023-05-16 18:22:31 +03:00
if ( ! done & & i > = MIN_MEAS & & time_after ( jiffies , jdone ) ) {
2016-01-01 05:33:22 +03:00
done = true ;
2020-04-21 13:09:13 +03:00
sched_set_normal ( current , 0 ) ;
2020-08-12 07:18:12 +03:00
pr_alert ( " %s%s rcu_scale_writer %ld has %d measurements \n " ,
scale_type , SCALE_FLAG , me , MIN_MEAS ) ;
if ( atomic_inc_return ( & n_rcu_scale_writer_finished ) > =
2016-01-01 05:33:22 +03:00
nrealwriters ) {
2016-01-31 08:32:09 +03:00
schedule_timeout_interruptible ( 10 ) ;
2016-01-30 01:58:17 +03:00
rcu_ftrace_dump ( DUMP_ALL ) ;
2020-08-12 07:18:12 +03:00
SCALEOUT_STRING ( " Test complete " ) ;
t_rcu_scale_writer_finished = t ;
2016-01-01 05:33:22 +03:00
if ( gp_exp ) {
2019-08-30 19:36:29 +03:00
b_rcu_gp_test_finished =
2016-01-01 05:33:22 +03:00
cur_ops - > exp_completed ( ) / 2 ;
} else {
2019-08-30 19:36:29 +03:00
b_rcu_gp_test_finished =
2018-04-27 21:39:34 +03:00
cur_ops - > get_gp_seq ( ) ;
2016-01-01 05:33:22 +03:00
}
2016-02-07 15:31:39 +03:00
if ( shutdown ) {
smp_mb ( ) ; /* Assign before wake. */
wake_up ( & shutdown_wq ) ;
}
2016-01-01 05:33:22 +03:00
}
}
if ( done & & ! alldone & &
2020-08-12 07:18:12 +03:00
atomic_read ( & n_rcu_scale_writer_finished ) > = nrealwriters )
2016-01-01 05:33:22 +03:00
alldone = true ;
if ( started & & ! alldone & & i < MAX_MEAS - 1 )
i + + ;
2020-08-12 07:18:12 +03:00
rcu_scale_wait_shutdown ( ) ;
2016-01-01 05:33:22 +03:00
} while ( ! torture_must_stop ( ) ) ;
2017-04-17 22:47:10 +03:00
if ( gp_async ) {
cur_ops - > gp_barrier ( ) ;
}
2021-06-22 13:37:08 +03:00
writer_n_durations [ me ] = i_max + 1 ;
2020-08-12 07:18:12 +03:00
torture_kthread_stopping ( " rcu_scale_writer " ) ;
2016-01-01 05:33:22 +03:00
return 0 ;
}
2018-05-17 21:33:17 +03:00
static void
2020-08-12 07:18:12 +03:00
rcu_scale_print_module_parms ( struct rcu_scale_ops * cur_ops , const char * tag )
2016-01-01 05:33:22 +03:00
{
2020-08-12 07:18:12 +03:00
pr_alert ( " %s " SCALE_FLAG
2023-05-16 23:45:16 +03:00
" --- %s: gp_async=%d gp_async_max=%d gp_exp=%d holdoff=%d minruntime=%d nreaders=%d nwriters=%d writer_holdoff=%d writer_holdoff_jiffies=%d verbose=%d shutdown=%d \n " ,
scale_type , tag , gp_async , gp_async_max , gp_exp , holdoff , minruntime , nrealreaders , nrealwriters , writer_holdoff , writer_holdoff_jiffies , verbose , shutdown ) ;
2016-01-01 05:33:22 +03:00
}
/*
* Return the number if non - negative . If - 1 , the number of CPUs .
* If less than - 1 , that much less than the number of CPUs , but
* at least one .
*/
static int compute_real ( int n )
{
int nr ;
if ( n > = 0 ) {
nr = n ;
} else {
nr = num_online_cpus ( ) + 1 + n ;
if ( nr < = 0 )
nr = 1 ;
}
return nr ;
}
2019-08-30 19:36:29 +03:00
/*
2020-08-12 07:18:12 +03:00
* kfree_rcu ( ) scalability tests : Start a kfree_rcu ( ) loop on all CPUs for number
2019-08-30 19:36:29 +03:00
* of iterations and measure total time and number of GP for all iterations to complete .
*/
torture_param ( int , kfree_nthreads , - 1 , " Number of threads running loops of kfree_rcu(). " ) ;
torture_param ( int , kfree_alloc_num , 8000 , " Number of allocations and frees done in an iteration. " ) ;
torture_param ( int , kfree_loops , 10 , " Number of loops doing kfree_alloc_num allocations and frees. " ) ;
2021-02-17 21:51:10 +03:00
torture_param ( bool , kfree_rcu_test_double , false , " Do we run a kfree_rcu() double-argument scale test? " ) ;
torture_param ( bool , kfree_rcu_test_single , false , " Do we run a kfree_rcu() single-argument scale test? " ) ;
2019-08-30 19:36:29 +03:00
static struct task_struct * * kfree_reader_tasks ;
static int kfree_nrealthreads ;
2020-08-12 07:18:12 +03:00
static atomic_t n_kfree_scale_thread_started ;
static atomic_t n_kfree_scale_thread_ended ;
2023-05-17 15:01:28 +03:00
static struct task_struct * kthread_tp ;
static u64 kthread_stime ;
2019-08-30 19:36:29 +03:00
struct kfree_obj {
char kfree_obj [ 8 ] ;
struct rcu_head rh ;
} ;
2022-10-16 19:22:57 +03:00
/* Used if doing RCU-kfree'ing via call_rcu(). */
static void kfree_call_rcu ( struct rcu_head * rh )
{
struct kfree_obj * obj = container_of ( rh , struct kfree_obj , rh ) ;
kfree ( obj ) ;
}
2019-08-30 19:36:29 +03:00
static int
2020-08-12 07:18:12 +03:00
kfree_scale_thread ( void * arg )
2019-08-30 19:36:29 +03:00
{
int i , loop = 0 ;
long me = ( long ) arg ;
struct kfree_obj * alloc_ptr ;
u64 start_time , end_time ;
2019-12-19 19:22:42 +03:00
long long mem_begin , mem_during = 0 ;
2021-02-17 21:51:10 +03:00
bool kfree_rcu_test_both ;
DEFINE_TORTURE_RANDOM ( tr ) ;
2019-08-30 19:36:29 +03:00
2020-08-12 07:18:12 +03:00
VERBOSE_SCALEOUT_STRING ( " kfree_scale_thread task started " ) ;
2019-08-30 19:36:29 +03:00
set_cpus_allowed_ptr ( current , cpumask_of ( me % nr_cpu_ids ) ) ;
set_user_nice ( current , MAX_NICE ) ;
2021-02-17 21:51:10 +03:00
kfree_rcu_test_both = ( kfree_rcu_test_single = = kfree_rcu_test_double ) ;
2019-08-30 19:36:29 +03:00
start_time = ktime_get_mono_fast_ns ( ) ;
2020-08-12 07:18:12 +03:00
if ( atomic_inc_return ( & n_kfree_scale_thread_started ) > = kfree_nrealthreads ) {
2019-08-30 19:36:29 +03:00
if ( gp_exp )
b_rcu_gp_test_started = cur_ops - > exp_completed ( ) / 2 ;
else
b_rcu_gp_test_started = cur_ops - > get_gp_seq ( ) ;
}
do {
2019-12-19 19:22:42 +03:00
if ( ! mem_during ) {
mem_during = mem_begin = si_mem_available ( ) ;
} else if ( loop % ( kfree_loops / 4 ) = = 0 ) {
mem_during = ( mem_during + si_mem_available ( ) ) / 2 ;
}
2019-08-30 19:36:29 +03:00
for ( i = 0 ; i < kfree_alloc_num ; i + + ) {
2020-03-16 19:32:26 +03:00
alloc_ptr = kmalloc ( kfree_mult * sizeof ( struct kfree_obj ) , GFP_KERNEL ) ;
2019-08-30 19:36:29 +03:00
if ( ! alloc_ptr )
return - ENOMEM ;
2022-10-16 19:22:57 +03:00
if ( kfree_by_call_rcu ) {
call_rcu ( & ( alloc_ptr - > rh ) , kfree_call_rcu ) ;
continue ;
}
2021-02-17 21:51:10 +03:00
// By default kfree_rcu_test_single and kfree_rcu_test_double are
// initialized to false. If both have the same value (false or true)
// both are randomly tested, otherwise only the one with value true
// is tested.
if ( ( kfree_rcu_test_single & & ! kfree_rcu_test_double ) | |
( kfree_rcu_test_both & & torture_random ( & tr ) & 0x800 ) )
2023-02-01 18:09:52 +03:00
kfree_rcu_mightsleep ( alloc_ptr ) ;
2021-02-17 21:51:10 +03:00
else
kfree_rcu ( alloc_ptr , rh ) ;
2019-08-30 19:36:29 +03:00
}
cond_resched ( ) ;
} while ( ! torture_must_stop ( ) & & + + loop < kfree_loops ) ;
2020-08-12 07:18:12 +03:00
if ( atomic_inc_return ( & n_kfree_scale_thread_ended ) > = kfree_nrealthreads ) {
2019-08-30 19:36:29 +03:00
end_time = ktime_get_mono_fast_ns ( ) ;
if ( gp_exp )
b_rcu_gp_test_finished = cur_ops - > exp_completed ( ) / 2 ;
else
b_rcu_gp_test_finished = cur_ops - > get_gp_seq ( ) ;
2019-12-19 19:22:42 +03:00
pr_alert ( " Total time taken by all kfree'ers: %llu ns, loops: %d, batches: %ld, memory footprint: %lldMB \n " ,
2019-08-30 19:36:29 +03:00
( unsigned long long ) ( end_time - start_time ) , kfree_loops ,
2020-08-12 07:18:12 +03:00
rcuscale_seq_diff ( b_rcu_gp_test_finished , b_rcu_gp_test_started ) ,
2019-12-19 19:22:42 +03:00
( mem_begin - mem_during ) > > ( 20 - PAGE_SHIFT ) ) ;
2019-08-30 19:36:29 +03:00
if ( shutdown ) {
smp_mb ( ) ; /* Assign before wake. */
wake_up ( & shutdown_wq ) ;
}
}
2020-08-12 07:18:12 +03:00
torture_kthread_stopping ( " kfree_scale_thread " ) ;
2019-08-30 19:36:29 +03:00
return 0 ;
}
static void
2020-08-12 07:18:12 +03:00
kfree_scale_cleanup ( void )
2019-08-30 19:36:29 +03:00
{
int i ;
if ( torture_cleanup_begin ( ) )
return ;
if ( kfree_reader_tasks ) {
for ( i = 0 ; i < kfree_nrealthreads ; i + + )
2020-08-12 07:18:12 +03:00
torture_stop_kthread ( kfree_scale_thread ,
2019-08-30 19:36:29 +03:00
kfree_reader_tasks [ i ] ) ;
kfree ( kfree_reader_tasks ) ;
}
torture_cleanup_end ( ) ;
}
/*
* shutdown kthread . Just waits to be awakened , then shuts down system .
*/
static int
2020-08-12 07:18:12 +03:00
kfree_scale_shutdown ( void * arg )
2019-08-30 19:36:29 +03:00
{
2023-01-31 23:08:54 +03:00
wait_event_idle ( shutdown_wq ,
atomic_read ( & n_kfree_scale_thread_ended ) > = kfree_nrealthreads ) ;
2019-08-30 19:36:29 +03:00
smp_mb ( ) ; /* Wake before output. */
2020-08-12 07:18:12 +03:00
kfree_scale_cleanup ( ) ;
2019-08-30 19:36:29 +03:00
kernel_power_off ( ) ;
return - EINVAL ;
}
2022-10-16 19:22:57 +03:00
// Used if doing RCU-kfree'ing via call_rcu().
static unsigned long jiffies_at_lazy_cb ;
static struct rcu_head lazy_test1_rh ;
static int rcu_lazy_test1_cb_called ;
static void call_rcu_lazy_test1 ( struct rcu_head * rh )
{
jiffies_at_lazy_cb = jiffies ;
WRITE_ONCE ( rcu_lazy_test1_cb_called , 1 ) ;
}
2019-08-30 19:36:29 +03:00
static int __init
2020-08-12 07:18:12 +03:00
kfree_scale_init ( void )
2019-08-30 19:36:29 +03:00
{
int firsterr = 0 ;
2022-10-16 19:22:57 +03:00
long i ;
unsigned long jif_start ;
unsigned long orig_jif ;
2023-05-17 02:04:52 +03:00
pr_alert ( " %s " SCALE_FLAG
" --- kfree_rcu_test: kfree_mult=%d kfree_by_call_rcu=%d kfree_nthreads=%d kfree_alloc_num=%d kfree_loops=%d kfree_rcu_test_double=%d kfree_rcu_test_single=%d \n " ,
scale_type , kfree_mult , kfree_by_call_rcu , kfree_nthreads , kfree_alloc_num , kfree_loops , kfree_rcu_test_double , kfree_rcu_test_single ) ;
2022-10-16 19:22:57 +03:00
// Also, do a quick self-test to ensure laziness is as much as
// expected.
if ( kfree_by_call_rcu & & ! IS_ENABLED ( CONFIG_RCU_LAZY ) ) {
pr_alert ( " CONFIG_RCU_LAZY is disabled, falling back to kfree_rcu() for delayed RCU kfree'ing \n " ) ;
kfree_by_call_rcu = 0 ;
}
if ( kfree_by_call_rcu ) {
/* do a test to check the timeout. */
orig_jif = rcu_lazy_get_jiffies_till_flush ( ) ;
rcu_lazy_set_jiffies_till_flush ( 2 * HZ ) ;
rcu_barrier ( ) ;
jif_start = jiffies ;
jiffies_at_lazy_cb = 0 ;
call_rcu ( & lazy_test1_rh , call_rcu_lazy_test1 ) ;
smp_cond_load_relaxed ( & rcu_lazy_test1_cb_called , VAL = = 1 ) ;
rcu_lazy_set_jiffies_till_flush ( orig_jif ) ;
if ( WARN_ON_ONCE ( jiffies_at_lazy_cb - jif_start < 2 * HZ ) ) {
pr_alert ( " ERROR: call_rcu() CBs are not being lazy as expected! \n " ) ;
WARN_ON_ONCE ( 1 ) ;
return - 1 ;
}
if ( WARN_ON_ONCE ( jiffies_at_lazy_cb - jif_start > 3 * HZ ) ) {
pr_alert ( " ERROR: call_rcu() CBs are being too lazy! \n " ) ;
WARN_ON_ONCE ( 1 ) ;
return - 1 ;
}
}
2019-08-30 19:36:29 +03:00
kfree_nrealthreads = compute_real ( kfree_nthreads ) ;
/* Start up the kthreads. */
if ( shutdown ) {
init_waitqueue_head ( & shutdown_wq ) ;
2020-08-12 07:18:12 +03:00
firsterr = torture_create_kthread ( kfree_scale_shutdown , NULL ,
2019-08-30 19:36:29 +03:00
shutdown_task ) ;
2021-08-06 01:58:53 +03:00
if ( torture_init_error ( firsterr ) )
2019-08-30 19:36:29 +03:00
goto unwind ;
schedule_timeout_uninterruptible ( 1 ) ;
}
2022-10-16 19:22:57 +03:00
pr_alert ( " kfree object size=%zu, kfree_by_call_rcu=%d \n " ,
kfree_mult * sizeof ( struct kfree_obj ) ,
kfree_by_call_rcu ) ;
2020-03-16 19:32:26 +03:00
2019-08-30 19:36:29 +03:00
kfree_reader_tasks = kcalloc ( kfree_nrealthreads , sizeof ( kfree_reader_tasks [ 0 ] ) ,
GFP_KERNEL ) ;
if ( kfree_reader_tasks = = NULL ) {
firsterr = - ENOMEM ;
goto unwind ;
}
for ( i = 0 ; i < kfree_nrealthreads ; i + + ) {
2020-08-12 07:18:12 +03:00
firsterr = torture_create_kthread ( kfree_scale_thread , ( void * ) i ,
2019-08-30 19:36:29 +03:00
kfree_reader_tasks [ i ] ) ;
2021-08-06 01:58:53 +03:00
if ( torture_init_error ( firsterr ) )
2019-08-30 19:36:29 +03:00
goto unwind ;
}
2020-08-12 07:18:12 +03:00
while ( atomic_read ( & n_kfree_scale_thread_started ) < kfree_nrealthreads )
2019-08-30 19:36:29 +03:00
schedule_timeout_uninterruptible ( 1 ) ;
torture_init_end ( ) ;
return 0 ;
unwind :
torture_init_end ( ) ;
2020-08-12 07:18:12 +03:00
kfree_scale_cleanup ( ) ;
2019-08-30 19:36:29 +03:00
return firsterr ;
}
2023-03-22 14:42:40 +03:00
static void
rcu_scale_cleanup ( void )
{
int i ;
int j ;
int ngps = 0 ;
u64 * wdp ;
u64 * wdpp ;
/*
* Would like warning at start , but everything is expedited
* during the mid - boot phase , so have to wait till the end .
*/
if ( rcu_gp_is_expedited ( ) & & ! rcu_gp_is_normal ( ) & & ! gp_exp )
SCALEOUT_ERRSTRING ( " All grace periods expedited, no normal ones to measure! " ) ;
if ( rcu_gp_is_normal ( ) & & gp_exp )
SCALEOUT_ERRSTRING ( " All grace periods normal, no expedited ones to measure! " ) ;
if ( gp_exp & & gp_async )
SCALEOUT_ERRSTRING ( " No expedited async GPs, so went with async! " ) ;
2023-05-17 15:01:28 +03:00
// If built-in, just report all of the GP kthread's CPU time.
if ( IS_BUILTIN ( CONFIG_RCU_SCALE_TEST ) & & ! kthread_tp & & cur_ops - > rso_gp_kthread )
kthread_tp = cur_ops - > rso_gp_kthread ( ) ;
if ( kthread_tp ) {
u32 ns ;
u64 us ;
kthread_stime = kthread_tp - > stime - kthread_stime ;
us = div_u64_rem ( kthread_stime , 1000 , & ns ) ;
pr_info ( " rcu_scale: Grace-period kthread CPU time: %llu.%03u us \n " , us , ns ) ;
show_rcu_gp_kthreads ( ) ;
}
2023-03-22 14:42:41 +03:00
if ( kfree_rcu_test ) {
kfree_scale_cleanup ( ) ;
return ;
}
2023-03-22 14:42:40 +03:00
if ( torture_cleanup_begin ( ) )
return ;
if ( ! cur_ops ) {
torture_cleanup_end ( ) ;
return ;
}
if ( reader_tasks ) {
for ( i = 0 ; i < nrealreaders ; i + + )
torture_stop_kthread ( rcu_scale_reader ,
reader_tasks [ i ] ) ;
kfree ( reader_tasks ) ;
}
if ( writer_tasks ) {
for ( i = 0 ; i < nrealwriters ; i + + ) {
torture_stop_kthread ( rcu_scale_writer ,
writer_tasks [ i ] ) ;
if ( ! writer_n_durations )
continue ;
j = writer_n_durations [ i ] ;
pr_alert ( " %s%s writer %d gps: %d \n " ,
scale_type , SCALE_FLAG , i , j ) ;
ngps + = j ;
}
pr_alert ( " %s%s start: %llu end: %llu duration: %llu gps: %d batches: %ld \n " ,
scale_type , SCALE_FLAG ,
t_rcu_scale_writer_started , t_rcu_scale_writer_finished ,
t_rcu_scale_writer_finished -
t_rcu_scale_writer_started ,
ngps ,
rcuscale_seq_diff ( b_rcu_gp_test_finished ,
b_rcu_gp_test_started ) ) ;
for ( i = 0 ; i < nrealwriters ; i + + ) {
if ( ! writer_durations )
break ;
if ( ! writer_n_durations )
continue ;
wdpp = writer_durations [ i ] ;
if ( ! wdpp )
continue ;
for ( j = 0 ; j < writer_n_durations [ i ] ; j + + ) {
wdp = & wdpp [ j ] ;
pr_alert ( " %s%s %4d writer-duration: %5d %llu \n " ,
scale_type , SCALE_FLAG ,
i , j , * wdp ) ;
if ( j % 100 = = 0 )
schedule_timeout_uninterruptible ( 1 ) ;
}
kfree ( writer_durations [ i ] ) ;
}
kfree ( writer_tasks ) ;
kfree ( writer_durations ) ;
kfree ( writer_n_durations ) ;
}
/* Do torture-type-specific cleanup operations. */
if ( cur_ops - > cleanup ! = NULL )
cur_ops - > cleanup ( ) ;
torture_cleanup_end ( ) ;
}
/*
* RCU scalability shutdown kthread . Just waits to be awakened , then shuts
* down system .
*/
static int
rcu_scale_shutdown ( void * arg )
{
wait_event_idle ( shutdown_wq , atomic_read ( & n_rcu_scale_writer_finished ) > = nrealwriters ) ;
smp_mb ( ) ; /* Wake before output. */
rcu_scale_cleanup ( ) ;
kernel_power_off ( ) ;
return - EINVAL ;
}
2016-01-01 05:33:22 +03:00
static int __init
2020-08-12 07:18:12 +03:00
rcu_scale_init ( void )
2016-01-01 05:33:22 +03:00
{
long i ;
int firsterr = 0 ;
2020-08-12 07:18:12 +03:00
static struct rcu_scale_ops * scale_ops [ ] = {
2023-06-03 19:07:44 +03:00
& rcu_ops , & srcu_ops , & srcud_ops , TASKS_OPS TASKS_RUDE_OPS TASKS_TRACING_OPS
2016-01-01 05:33:22 +03:00
} ;
2020-08-12 07:18:12 +03:00
if ( ! torture_init_begin ( scale_type , verbose ) )
2016-01-01 05:33:22 +03:00
return - EBUSY ;
2020-08-12 07:18:12 +03:00
/* Process args and announce that the scalability'er is on the job. */
for ( i = 0 ; i < ARRAY_SIZE ( scale_ops ) ; i + + ) {
cur_ops = scale_ops [ i ] ;
if ( strcmp ( scale_type , cur_ops - > name ) = = 0 )
2016-01-01 05:33:22 +03:00
break ;
}
2020-08-12 07:18:12 +03:00
if ( i = = ARRAY_SIZE ( scale_ops ) ) {
pr_alert ( " rcu-scale: invalid scale type: \" %s \" \n " , scale_type ) ;
pr_alert ( " rcu-scale types: " ) ;
for ( i = 0 ; i < ARRAY_SIZE ( scale_ops ) ; i + + )
pr_cont ( " %s " , scale_ops [ i ] - > name ) ;
2018-05-14 23:27:33 +03:00
pr_cont ( " \n " ) ;
2016-01-01 05:33:22 +03:00
firsterr = - EINVAL ;
2019-03-21 20:26:41 +03:00
cur_ops = NULL ;
2016-01-01 05:33:22 +03:00
goto unwind ;
}
if ( cur_ops - > init )
cur_ops - > init ( ) ;
2023-05-17 15:01:28 +03:00
if ( cur_ops - > rso_gp_kthread ) {
kthread_tp = cur_ops - > rso_gp_kthread ( ) ;
if ( kthread_tp )
kthread_stime = kthread_tp - > stime ;
}
2019-08-30 19:36:29 +03:00
if ( kfree_rcu_test )
2020-08-12 07:18:12 +03:00
return kfree_scale_init ( ) ;
2019-08-30 19:36:29 +03:00
2016-01-01 05:33:22 +03:00
nrealwriters = compute_real ( nwriters ) ;
nrealreaders = compute_real ( nreaders ) ;
2020-08-12 07:18:12 +03:00
atomic_set ( & n_rcu_scale_reader_started , 0 ) ;
atomic_set ( & n_rcu_scale_writer_started , 0 ) ;
atomic_set ( & n_rcu_scale_writer_finished , 0 ) ;
rcu_scale_print_module_parms ( cur_ops , " Start of test " ) ;
2016-01-01 05:33:22 +03:00
/* Start up the kthreads. */
if ( shutdown ) {
init_waitqueue_head ( & shutdown_wq ) ;
2020-08-12 07:18:12 +03:00
firsterr = torture_create_kthread ( rcu_scale_shutdown , NULL ,
2016-01-01 05:33:22 +03:00
shutdown_task ) ;
2021-08-06 01:58:53 +03:00
if ( torture_init_error ( firsterr ) )
2016-01-01 05:33:22 +03:00
goto unwind ;
schedule_timeout_uninterruptible ( 1 ) ;
}
reader_tasks = kcalloc ( nrealreaders , sizeof ( reader_tasks [ 0 ] ) ,
GFP_KERNEL ) ;
if ( reader_tasks = = NULL ) {
2021-10-29 12:40:28 +03:00
SCALEOUT_ERRSTRING ( " out of memory " ) ;
2016-01-01 05:33:22 +03:00
firsterr = - ENOMEM ;
goto unwind ;
}
for ( i = 0 ; i < nrealreaders ; i + + ) {
2020-08-12 07:18:12 +03:00
firsterr = torture_create_kthread ( rcu_scale_reader , ( void * ) i ,
2016-01-01 05:33:22 +03:00
reader_tasks [ i ] ) ;
2021-08-06 01:58:53 +03:00
if ( torture_init_error ( firsterr ) )
2016-01-01 05:33:22 +03:00
goto unwind ;
}
2020-08-12 07:18:12 +03:00
while ( atomic_read ( & n_rcu_scale_reader_started ) < nrealreaders )
2016-01-01 05:33:22 +03:00
schedule_timeout_uninterruptible ( 1 ) ;
writer_tasks = kcalloc ( nrealwriters , sizeof ( reader_tasks [ 0 ] ) ,
GFP_KERNEL ) ;
writer_durations = kcalloc ( nrealwriters , sizeof ( * writer_durations ) ,
GFP_KERNEL ) ;
writer_n_durations =
kcalloc ( nrealwriters , sizeof ( * writer_n_durations ) ,
GFP_KERNEL ) ;
if ( ! writer_tasks | | ! writer_durations | | ! writer_n_durations ) {
2021-10-29 12:40:28 +03:00
SCALEOUT_ERRSTRING ( " out of memory " ) ;
2016-01-01 05:33:22 +03:00
firsterr = - ENOMEM ;
goto unwind ;
}
for ( i = 0 ; i < nrealwriters ; i + + ) {
writer_durations [ i ] =
kcalloc ( MAX_MEAS , sizeof ( * writer_durations [ i ] ) ,
GFP_KERNEL ) ;
2016-06-13 18:20:39 +03:00
if ( ! writer_durations [ i ] ) {
firsterr = - ENOMEM ;
2016-01-01 05:33:22 +03:00
goto unwind ;
2016-06-13 18:20:39 +03:00
}
2020-08-12 07:18:12 +03:00
firsterr = torture_create_kthread ( rcu_scale_writer , ( void * ) i ,
2016-01-01 05:33:22 +03:00
writer_tasks [ i ] ) ;
2021-08-06 01:58:53 +03:00
if ( torture_init_error ( firsterr ) )
2016-01-01 05:33:22 +03:00
goto unwind ;
}
torture_init_end ( ) ;
return 0 ;
unwind :
torture_init_end ( ) ;
2020-08-12 07:18:12 +03:00
rcu_scale_cleanup ( ) ;
2020-09-17 20:30:46 +03:00
if ( shutdown ) {
WARN_ON ( ! IS_MODULE ( CONFIG_RCU_SCALE_TEST ) ) ;
kernel_power_off ( ) ;
}
2016-01-01 05:33:22 +03:00
return firsterr ;
}
2020-08-12 07:18:12 +03:00
module_init ( rcu_scale_init ) ;
module_exit ( rcu_scale_cleanup ) ;