2019-01-17 22:11:00 +03:00
// SPDX-License-Identifier: GPL-2.0+
2014-02-05 03:51:41 +04:00
/*
* Module - based torture test facility for locking
*
* Copyright ( C ) IBM Corporation , 2014
*
2019-01-17 22:11:00 +03:00
* Authors : Paul E . McKenney < paulmck @ linux . ibm . com >
2015-07-23 00:07:27 +03:00
* Davidlohr Bueso < dave @ stgolabs . net >
2014-02-05 03:51:41 +04:00
* Based on kernel / rcu / torture . c .
*/
2018-05-15 22:25:05 +03:00
# define pr_fmt(fmt) fmt
2014-02-05 03:51:41 +04:00
# include <linux/kernel.h>
# include <linux/module.h>
# include <linux/kthread.h>
2015-07-23 00:07:27 +03:00
# include <linux/sched/rt.h>
2014-02-05 03:51:41 +04:00
# include <linux/spinlock.h>
2014-09-12 07:40:18 +04:00
# include <linux/mutex.h>
2014-09-29 17:14:26 +04:00
# include <linux/rwsem.h>
2014-02-05 03:51:41 +04:00
# include <linux/smp.h>
# include <linux/interrupt.h>
# include <linux/sched.h>
2017-02-01 20:07:51 +03:00
# include <uapi/linux/sched/types.h>
2017-02-03 12:08:30 +03:00
# include <linux/rtmutex.h>
2014-02-05 03:51:41 +04:00
# include <linux/atomic.h>
# include <linux/moduleparam.h>
# include <linux/delay.h>
# include <linux/slab.h>
# include <linux/torture.h>
2020-09-18 21:18:06 +03:00
# include <linux/reboot.h>
2014-02-05 03:51:41 +04:00
MODULE_LICENSE ( " GPL " ) ;
2019-01-17 22:11:00 +03:00
MODULE_AUTHOR ( " Paul E. McKenney <paulmck@linux.ibm.com> " ) ;
2014-02-05 03:51:41 +04:00
2023-08-03 02:32:06 +03:00
torture_param ( int , acq_writer_lim , 0 , " Write_acquisition time limit (jiffies). " ) ;
2023-08-22 05:36:10 +03:00
torture_param ( int , call_rcu_chains , 0 , " Self-propagate call_rcu() chains during test (0=disable). " ) ;
2023-03-07 07:48:13 +03:00
torture_param ( int , long_hold , 100 , " Do occasional long hold of lock (ms), 0=disable " ) ;
2023-08-03 01:30:31 +03:00
torture_param ( int , nested_locks , 0 , " Number of nested locks (max = 8) " ) ;
torture_param ( int , nreaders_stress , - 1 , " Number of read-locking stress-test threads " ) ;
torture_param ( int , nwriters_stress , - 1 , " Number of write-locking stress-test threads " ) ;
2014-02-05 03:51:41 +04:00
torture_param ( int , onoff_holdoff , 0 , " Time after boot before CPU hotplugs (s) " ) ;
2023-03-07 07:48:13 +03:00
torture_param ( int , onoff_interval , 0 , " Time between CPU hotplugs (s), 0=disable " ) ;
2023-08-03 01:30:31 +03:00
torture_param ( int , rt_boost , 2 ,
" Do periodic rt-boost. 0=Disable, 1=Only for rt_mutex, 2=For all lock types. " ) ;
torture_param ( int , rt_boost_factor , 50 , " A factor determining how often rt-boost happens. " ) ;
2023-03-07 07:48:13 +03:00
torture_param ( int , shuffle_interval , 3 , " Number of jiffies between shuffles, 0=disable " ) ;
2014-02-05 03:51:41 +04:00
torture_param ( int , shutdown_secs , 0 , " Shutdown time (j), <= zero to disable. " ) ;
2023-03-07 07:48:13 +03:00
torture_param ( int , stat_interval , 60 , " Number of seconds between stats printk()s " ) ;
2014-02-05 03:51:41 +04:00
torture_param ( int , stutter , 5 , " Number of jiffies to run/halt test, 0=disable " ) ;
2023-03-07 07:48:13 +03:00
torture_param ( int , verbose , 1 , " Enable verbose debugging printk()s " ) ;
2023-08-22 02:42:21 +03:00
torture_param ( int , writer_fifo , 0 , " Run writers at sched_set_fifo() priority " ) ;
2023-02-21 22:02:35 +03:00
/* Going much higher trips "BUG: MAX_LOCKDEP_CHAIN_HLOCKS too low!" errors */
# define MAX_NESTED_LOCKS 8
2014-02-05 03:51:41 +04:00
2023-02-24 04:20:35 +03:00
static char * torture_type = IS_ENABLED ( CONFIG_PREEMPT_RT ) ? " raw_spin_lock " : " spin_lock " ;
2014-02-05 03:51:41 +04:00
module_param ( torture_type , charp , 0444 ) ;
MODULE_PARM_DESC ( torture_type ,
2014-09-12 07:40:18 +04:00
" Type of lock to torture (spin_lock, spin_lock_irq, mutex_lock, ...) " ) ;
2014-02-05 03:51:41 +04:00
2023-08-22 18:48:16 +03:00
static cpumask_var_t bind_readers ; // Bind the readers to the specified set of CPUs.
static cpumask_var_t bind_writers ; // Bind the writers to the specified set of CPUs.
2023-07-28 06:04:06 +03:00
// Parse a cpumask kernel parameter. If there are more users later on,
// this might need to got to a more central location.
static int param_set_cpumask ( const char * val , const struct kernel_param * kp )
{
cpumask_var_t * cm_bind = kp - > arg ;
int ret ;
char * s ;
if ( ! alloc_cpumask_var ( cm_bind , GFP_KERNEL ) ) {
s = " Out of memory " ;
ret = - ENOMEM ;
goto out_err ;
}
ret = cpulist_parse ( val , * cm_bind ) ;
if ( ! ret )
return ret ;
s = " Bad CPU range " ;
out_err :
pr_warn ( " %s: %s, all CPUs set \n " , kp - > name , s ) ;
cpumask_setall ( * cm_bind ) ;
return ret ;
}
// Output a cpumask kernel parameter.
static int param_get_cpumask ( char * buffer , const struct kernel_param * kp )
{
cpumask_var_t * cm_bind = kp - > arg ;
return sprintf ( buffer , " %*pbl " , cpumask_pr_args ( * cm_bind ) ) ;
}
static bool cpumask_nonempty ( cpumask_var_t mask )
{
return cpumask_available ( mask ) & & ! cpumask_empty ( mask ) ;
}
static const struct kernel_param_ops lt_bind_ops = {
. set = param_set_cpumask ,
. get = param_get_cpumask ,
} ;
2023-08-22 18:48:16 +03:00
module_param_cb ( bind_readers , & lt_bind_ops , & bind_readers , 0644 ) ;
module_param_cb ( bind_writers , & lt_bind_ops , & bind_writers , 0644 ) ;
2023-07-28 06:04:06 +03:00
long torture_sched_setaffinity ( pid_t pid , const struct cpumask * in_mask ) ;
2014-02-05 03:51:41 +04:00
static struct task_struct * stats_task ;
static struct task_struct * * writer_tasks ;
2014-09-12 08:40:41 +04:00
static struct task_struct * * reader_tasks ;
2014-02-05 03:51:41 +04:00
static bool lock_is_write_held ;
2021-06-03 02:04:11 +03:00
static atomic_t lock_is_read_held ;
2020-08-31 07:48:23 +03:00
static unsigned long last_lock_release ;
2014-02-05 03:51:41 +04:00
2014-09-12 07:40:20 +04:00
struct lock_stress_stats {
long n_lock_fail ;
long n_lock_acquired ;
2014-02-05 03:51:41 +04:00
} ;
2023-08-22 05:36:10 +03:00
struct call_rcu_chain {
struct rcu_head crc_rh ;
bool crc_stop ;
} ;
2023-10-10 19:52:19 +03:00
struct call_rcu_chain * call_rcu_chain_list ;
2023-08-22 05:36:10 +03:00
2014-02-05 03:51:41 +04:00
/* Forward reference. */
static void lock_torture_cleanup ( void ) ;
/*
* Operations vector for selecting different types of tests .
*/
struct lock_torture_ops {
void ( * init ) ( void ) ;
2020-09-24 17:18:54 +03:00
void ( * exit ) ( void ) ;
2023-02-21 22:02:35 +03:00
int ( * nested_lock ) ( int tid , u32 lockset ) ;
2021-03-18 20:28:13 +03:00
int ( * writelock ) ( int tid ) ;
2014-02-05 03:51:41 +04:00
void ( * write_delay ) ( struct torture_random_state * trsp ) ;
2015-07-23 00:07:27 +03:00
void ( * task_boost ) ( struct torture_random_state * trsp ) ;
2021-03-18 20:28:13 +03:00
void ( * writeunlock ) ( int tid ) ;
2023-02-21 22:02:35 +03:00
void ( * nested_unlock ) ( int tid , u32 lockset ) ;
2021-03-18 20:28:13 +03:00
int ( * readlock ) ( int tid ) ;
2014-09-12 08:40:41 +04:00
void ( * read_delay ) ( struct torture_random_state * trsp ) ;
2021-03-18 20:28:13 +03:00
void ( * readunlock ) ( int tid ) ;
2015-07-23 00:07:27 +03:00
unsigned long flags ; /* for irq spinlocks */
2014-02-05 03:51:41 +04:00
const char * name ;
} ;
2014-09-12 08:42:25 +04:00
struct lock_torture_cxt {
int nrealwriters_stress ;
int nrealreaders_stress ;
bool debug_lock ;
2020-09-24 17:18:54 +03:00
bool init_called ;
2014-09-12 08:42:25 +04:00
atomic_t n_lock_torture_errors ;
struct lock_torture_ops * cur_ops ;
struct lock_stress_stats * lwsa ; /* writer statistics */
struct lock_stress_stats * lrsa ; /* reader statistics */
} ;
2020-09-24 17:18:54 +03:00
static struct lock_torture_cxt cxt = { 0 , 0 , false , false ,
2014-09-12 08:42:25 +04:00
ATOMIC_INIT ( 0 ) ,
NULL , NULL } ;
2014-02-05 03:51:41 +04:00
/*
* Definitions for lock torture testing .
*/
2021-03-18 20:28:13 +03:00
static int torture_lock_busted_write_lock ( int tid __maybe_unused )
2014-02-11 20:05:07 +04:00
{
return 0 ; /* BUGGY, do not use in real life!!! */
}
static void torture_lock_busted_write_delay ( struct torture_random_state * trsp )
{
/* We want a long delay occasionally to force massive contention. */
2023-08-22 02:42:21 +03:00
if ( long_hold & & ! ( torture_random ( trsp ) % ( cxt . nrealwriters_stress * 2000 * long_hold ) ) )
mdelay ( long_hold ) ;
2014-09-12 08:42:25 +04:00
if ( ! ( torture_random ( trsp ) % ( cxt . nrealwriters_stress * 20000 ) ) )
2017-10-16 21:05:03 +03:00
torture_preempt_schedule ( ) ; /* Allow test to be preempted. */
2014-02-11 20:05:07 +04:00
}
2021-03-18 20:28:13 +03:00
static void torture_lock_busted_write_unlock ( int tid __maybe_unused )
2014-02-11 20:05:07 +04:00
{
/* BUGGY, do not use in real life!!! */
}
2022-12-13 23:48:38 +03:00
static void __torture_rt_boost ( struct torture_random_state * trsp )
2015-07-23 00:07:27 +03:00
{
2022-12-13 23:48:39 +03:00
const unsigned int factor = rt_boost_factor ;
2022-12-13 23:48:38 +03:00
if ( ! rt_task ( current ) ) {
/*
2022-12-13 23:48:39 +03:00
* Boost priority once every rt_boost_factor operations . When
* the task tries to take the lock , the rtmutex it will account
2022-12-13 23:48:38 +03:00
* for the new priority , and do any corresponding pi - dance .
*/
if ( trsp & & ! ( torture_random ( trsp ) %
( cxt . nrealwriters_stress * factor ) ) ) {
sched_set_fifo ( current ) ;
} else /* common case, do nothing */
return ;
} else {
/*
2022-12-13 23:48:39 +03:00
* The task will remain boosted for another 10 * rt_boost_factor
* operations , then restored back to its original prio , and so
* forth .
2022-12-13 23:48:38 +03:00
*
* When @ trsp is nil , we want to force - reset the task for
* stopping the kthread .
*/
if ( ! trsp | | ! ( torture_random ( trsp ) %
( cxt . nrealwriters_stress * factor * 2 ) ) ) {
sched_set_normal ( current , 0 ) ;
} else /* common case, do nothing */
return ;
}
}
static void torture_rt_boost ( struct torture_random_state * trsp )
{
if ( rt_boost ! = 2 )
return ;
__torture_rt_boost ( trsp ) ;
2015-07-23 00:07:27 +03:00
}
2014-02-11 20:05:07 +04:00
static struct lock_torture_ops lock_busted_ops = {
. writelock = torture_lock_busted_write_lock ,
. write_delay = torture_lock_busted_write_delay ,
2022-12-13 23:48:38 +03:00
. task_boost = torture_rt_boost ,
2014-02-11 20:05:07 +04:00
. writeunlock = torture_lock_busted_write_unlock ,
2014-09-12 08:40:41 +04:00
. readlock = NULL ,
. read_delay = NULL ,
. readunlock = NULL ,
2014-02-11 20:05:07 +04:00
. name = " lock_busted "
} ;
2014-02-05 03:51:41 +04:00
static DEFINE_SPINLOCK ( torture_spinlock ) ;
2021-03-18 20:28:13 +03:00
static int torture_spin_lock_write_lock ( int tid __maybe_unused )
__acquires ( torture_spinlock )
2014-02-05 03:51:41 +04:00
{
spin_lock ( & torture_spinlock ) ;
return 0 ;
}
static void torture_spin_lock_write_delay ( struct torture_random_state * trsp )
{
const unsigned long shortdelay_us = 2 ;
2023-03-07 07:48:13 +03:00
unsigned long j ;
2014-02-05 03:51:41 +04:00
/* We want a short delay mostly to emulate likely code, and
* we want a long delay occasionally to force massive contention .
*/
2023-08-22 02:42:21 +03:00
if ( long_hold & & ! ( torture_random ( trsp ) % ( cxt . nrealwriters_stress * 2000 * long_hold ) ) ) {
2023-03-07 07:48:13 +03:00
j = jiffies ;
2023-08-22 02:42:21 +03:00
mdelay ( long_hold ) ;
2023-03-07 07:48:13 +03:00
pr_alert ( " %s: delay = %lu jiffies. \n " , __func__ , jiffies - j ) ;
}
if ( ! ( torture_random ( trsp ) % ( cxt . nrealwriters_stress * 200 * shortdelay_us ) ) )
2014-02-05 03:51:41 +04:00
udelay ( shortdelay_us ) ;
2014-09-12 08:42:25 +04:00
if ( ! ( torture_random ( trsp ) % ( cxt . nrealwriters_stress * 20000 ) ) )
2017-10-16 21:05:03 +03:00
torture_preempt_schedule ( ) ; /* Allow test to be preempted. */
2014-02-05 03:51:41 +04:00
}
2021-03-18 20:28:13 +03:00
static void torture_spin_lock_write_unlock ( int tid __maybe_unused )
__releases ( torture_spinlock )
2014-02-05 03:51:41 +04:00
{
spin_unlock ( & torture_spinlock ) ;
}
static struct lock_torture_ops spin_lock_ops = {
. writelock = torture_spin_lock_write_lock ,
. write_delay = torture_spin_lock_write_delay ,
2022-12-13 23:48:38 +03:00
. task_boost = torture_rt_boost ,
2014-02-05 03:51:41 +04:00
. writeunlock = torture_spin_lock_write_unlock ,
2014-09-12 08:40:41 +04:00
. readlock = NULL ,
. read_delay = NULL ,
. readunlock = NULL ,
2014-02-05 03:51:41 +04:00
. name = " spin_lock "
} ;
2021-03-18 20:28:13 +03:00
static int torture_spin_lock_write_lock_irq ( int tid __maybe_unused )
2014-09-29 17:14:24 +04:00
__acquires ( torture_spinlock )
2014-02-05 03:51:41 +04:00
{
unsigned long flags ;
spin_lock_irqsave ( & torture_spinlock , flags ) ;
2014-09-12 08:42:25 +04:00
cxt . cur_ops - > flags = flags ;
2014-02-05 03:51:41 +04:00
return 0 ;
}
2021-03-18 20:28:13 +03:00
static void torture_lock_spin_write_unlock_irq ( int tid __maybe_unused )
2014-02-05 03:51:41 +04:00
__releases ( torture_spinlock )
{
2014-09-12 08:42:25 +04:00
spin_unlock_irqrestore ( & torture_spinlock , cxt . cur_ops - > flags ) ;
2014-02-05 03:51:41 +04:00
}
static struct lock_torture_ops spin_lock_irq_ops = {
. writelock = torture_spin_lock_write_lock_irq ,
. write_delay = torture_spin_lock_write_delay ,
2022-12-13 23:48:38 +03:00
. task_boost = torture_rt_boost ,
2014-02-05 03:51:41 +04:00
. writeunlock = torture_lock_spin_write_unlock_irq ,
2014-09-12 08:40:41 +04:00
. readlock = NULL ,
. read_delay = NULL ,
. readunlock = NULL ,
2014-02-05 03:51:41 +04:00
. name = " spin_lock_irq "
} ;
2023-02-24 04:20:35 +03:00
static DEFINE_RAW_SPINLOCK ( torture_raw_spinlock ) ;
static int torture_raw_spin_lock_write_lock ( int tid __maybe_unused )
__acquires ( torture_raw_spinlock )
{
raw_spin_lock ( & torture_raw_spinlock ) ;
return 0 ;
}
static void torture_raw_spin_lock_write_unlock ( int tid __maybe_unused )
__releases ( torture_raw_spinlock )
{
raw_spin_unlock ( & torture_raw_spinlock ) ;
}
static struct lock_torture_ops raw_spin_lock_ops = {
. writelock = torture_raw_spin_lock_write_lock ,
. write_delay = torture_spin_lock_write_delay ,
. task_boost = torture_rt_boost ,
. writeunlock = torture_raw_spin_lock_write_unlock ,
. readlock = NULL ,
. read_delay = NULL ,
. readunlock = NULL ,
. name = " raw_spin_lock "
} ;
static int torture_raw_spin_lock_write_lock_irq ( int tid __maybe_unused )
__acquires ( torture_raw_spinlock )
{
unsigned long flags ;
raw_spin_lock_irqsave ( & torture_raw_spinlock , flags ) ;
cxt . cur_ops - > flags = flags ;
return 0 ;
}
static void torture_raw_spin_lock_write_unlock_irq ( int tid __maybe_unused )
__releases ( torture_raw_spinlock )
{
raw_spin_unlock_irqrestore ( & torture_raw_spinlock , cxt . cur_ops - > flags ) ;
}
static struct lock_torture_ops raw_spin_lock_irq_ops = {
. writelock = torture_raw_spin_lock_write_lock_irq ,
. write_delay = torture_spin_lock_write_delay ,
. task_boost = torture_rt_boost ,
. writeunlock = torture_raw_spin_lock_write_unlock_irq ,
. readlock = NULL ,
. read_delay = NULL ,
. readunlock = NULL ,
. name = " raw_spin_lock_irq "
} ;
2014-09-29 17:14:23 +04:00
static DEFINE_RWLOCK ( torture_rwlock ) ;
2021-03-18 20:28:13 +03:00
static int torture_rwlock_write_lock ( int tid __maybe_unused )
__acquires ( torture_rwlock )
2014-09-29 17:14:23 +04:00
{
write_lock ( & torture_rwlock ) ;
return 0 ;
}
static void torture_rwlock_write_delay ( struct torture_random_state * trsp )
{
const unsigned long shortdelay_us = 2 ;
/* We want a short delay mostly to emulate likely code, and
* we want a long delay occasionally to force massive contention .
*/
2023-08-22 02:42:21 +03:00
if ( long_hold & & ! ( torture_random ( trsp ) % ( cxt . nrealwriters_stress * 2000 * long_hold ) ) )
mdelay ( long_hold ) ;
2014-09-29 17:14:23 +04:00
else
udelay ( shortdelay_us ) ;
}
2021-03-18 20:28:13 +03:00
static void torture_rwlock_write_unlock ( int tid __maybe_unused )
__releases ( torture_rwlock )
2014-09-29 17:14:23 +04:00
{
write_unlock ( & torture_rwlock ) ;
}
2021-03-18 20:28:13 +03:00
static int torture_rwlock_read_lock ( int tid __maybe_unused )
__acquires ( torture_rwlock )
2014-09-29 17:14:23 +04:00
{
read_lock ( & torture_rwlock ) ;
return 0 ;
}
static void torture_rwlock_read_delay ( struct torture_random_state * trsp )
{
const unsigned long shortdelay_us = 10 ;
/* We want a short delay mostly to emulate likely code, and
* we want a long delay occasionally to force massive contention .
*/
2023-08-22 02:42:21 +03:00
if ( long_hold & & ! ( torture_random ( trsp ) % ( cxt . nrealreaders_stress * 2000 * long_hold ) ) )
mdelay ( long_hold ) ;
2014-09-29 17:14:23 +04:00
else
udelay ( shortdelay_us ) ;
}
2021-03-18 20:28:13 +03:00
static void torture_rwlock_read_unlock ( int tid __maybe_unused )
__releases ( torture_rwlock )
2014-09-29 17:14:23 +04:00
{
read_unlock ( & torture_rwlock ) ;
}
static struct lock_torture_ops rw_lock_ops = {
. writelock = torture_rwlock_write_lock ,
. write_delay = torture_rwlock_write_delay ,
2022-12-13 23:48:38 +03:00
. task_boost = torture_rt_boost ,
2014-09-29 17:14:23 +04:00
. writeunlock = torture_rwlock_write_unlock ,
. readlock = torture_rwlock_read_lock ,
. read_delay = torture_rwlock_read_delay ,
. readunlock = torture_rwlock_read_unlock ,
. name = " rw_lock "
} ;
2021-03-18 20:28:13 +03:00
static int torture_rwlock_write_lock_irq ( int tid __maybe_unused )
__acquires ( torture_rwlock )
2014-09-29 17:14:23 +04:00
{
unsigned long flags ;
write_lock_irqsave ( & torture_rwlock , flags ) ;
cxt . cur_ops - > flags = flags ;
return 0 ;
}
2021-03-18 20:28:13 +03:00
static void torture_rwlock_write_unlock_irq ( int tid __maybe_unused )
2014-09-29 17:14:23 +04:00
__releases ( torture_rwlock )
{
write_unlock_irqrestore ( & torture_rwlock , cxt . cur_ops - > flags ) ;
}
2021-03-18 20:28:13 +03:00
static int torture_rwlock_read_lock_irq ( int tid __maybe_unused )
__acquires ( torture_rwlock )
2014-09-29 17:14:23 +04:00
{
unsigned long flags ;
read_lock_irqsave ( & torture_rwlock , flags ) ;
cxt . cur_ops - > flags = flags ;
return 0 ;
}
2021-03-18 20:28:13 +03:00
static void torture_rwlock_read_unlock_irq ( int tid __maybe_unused )
2014-09-29 17:14:23 +04:00
__releases ( torture_rwlock )
{
2015-03-07 03:06:53 +03:00
read_unlock_irqrestore ( & torture_rwlock , cxt . cur_ops - > flags ) ;
2014-09-29 17:14:23 +04:00
}
static struct lock_torture_ops rw_lock_irq_ops = {
. writelock = torture_rwlock_write_lock_irq ,
. write_delay = torture_rwlock_write_delay ,
2022-12-13 23:48:38 +03:00
. task_boost = torture_rt_boost ,
2014-09-29 17:14:23 +04:00
. writeunlock = torture_rwlock_write_unlock_irq ,
. readlock = torture_rwlock_read_lock_irq ,
. read_delay = torture_rwlock_read_delay ,
. readunlock = torture_rwlock_read_unlock_irq ,
. name = " rw_lock_irq "
} ;
2014-09-12 07:40:18 +04:00
static DEFINE_MUTEX ( torture_mutex ) ;
2023-02-21 22:02:36 +03:00
static struct mutex torture_nested_mutexes [ MAX_NESTED_LOCKS ] ;
static struct lock_class_key nested_mutex_keys [ MAX_NESTED_LOCKS ] ;
static void torture_mutex_init ( void )
{
int i ;
for ( i = 0 ; i < MAX_NESTED_LOCKS ; i + + )
__mutex_init ( & torture_nested_mutexes [ i ] , __func__ ,
& nested_mutex_keys [ i ] ) ;
}
static int torture_mutex_nested_lock ( int tid __maybe_unused ,
u32 lockset )
{
int i ;
for ( i = 0 ; i < nested_locks ; i + + )
if ( lockset & ( 1 < < i ) )
mutex_lock ( & torture_nested_mutexes [ i ] ) ;
return 0 ;
}
2014-09-12 07:40:18 +04:00
2021-03-18 20:28:13 +03:00
static int torture_mutex_lock ( int tid __maybe_unused )
__acquires ( torture_mutex )
2014-09-12 07:40:18 +04:00
{
mutex_lock ( & torture_mutex ) ;
return 0 ;
}
static void torture_mutex_delay ( struct torture_random_state * trsp )
{
/* We want a long delay occasionally to force massive contention. */
2023-08-22 02:42:21 +03:00
if ( long_hold & & ! ( torture_random ( trsp ) % ( cxt . nrealwriters_stress * 2000 * long_hold ) ) )
mdelay ( long_hold * 5 ) ;
2014-09-12 08:42:25 +04:00
if ( ! ( torture_random ( trsp ) % ( cxt . nrealwriters_stress * 20000 ) ) )
2017-10-16 21:05:03 +03:00
torture_preempt_schedule ( ) ; /* Allow test to be preempted. */
2014-09-12 07:40:18 +04:00
}
2021-03-18 20:28:13 +03:00
static void torture_mutex_unlock ( int tid __maybe_unused )
__releases ( torture_mutex )
2014-09-12 07:40:18 +04:00
{
mutex_unlock ( & torture_mutex ) ;
}
2023-02-21 22:02:36 +03:00
static void torture_mutex_nested_unlock ( int tid __maybe_unused ,
u32 lockset )
{
int i ;
for ( i = nested_locks - 1 ; i > = 0 ; i - - )
if ( lockset & ( 1 < < i ) )
mutex_unlock ( & torture_nested_mutexes [ i ] ) ;
}
2014-09-12 07:40:18 +04:00
static struct lock_torture_ops mutex_lock_ops = {
2023-02-21 22:02:36 +03:00
. init = torture_mutex_init ,
. nested_lock = torture_mutex_nested_lock ,
2014-09-12 07:40:18 +04:00
. writelock = torture_mutex_lock ,
. write_delay = torture_mutex_delay ,
2022-12-13 23:48:38 +03:00
. task_boost = torture_rt_boost ,
2014-09-12 07:40:18 +04:00
. writeunlock = torture_mutex_unlock ,
2023-02-21 22:02:36 +03:00
. nested_unlock = torture_mutex_nested_unlock ,
2014-09-12 08:40:41 +04:00
. readlock = NULL ,
. read_delay = NULL ,
. readunlock = NULL ,
2014-09-12 07:40:18 +04:00
. name = " mutex_lock "
} ;
2016-12-01 14:47:05 +03:00
# include <linux/ww_mutex.h>
2021-03-18 20:28:11 +03:00
/*
* The torture ww_mutexes should belong to the same lock class as
* torture_ww_class to avoid lockdep problem . The ww_mutex_init ( )
* function is called for initialization to ensure that .
*/
2018-06-15 11:17:38 +03:00
static DEFINE_WD_CLASS ( torture_ww_class ) ;
2021-03-18 20:28:11 +03:00
static struct ww_mutex torture_ww_mutex_0 , torture_ww_mutex_1 , torture_ww_mutex_2 ;
2021-03-18 20:28:14 +03:00
static struct ww_acquire_ctx * ww_acquire_ctxs ;
2021-03-18 20:28:11 +03:00
static void torture_ww_mutex_init ( void )
{
ww_mutex_init ( & torture_ww_mutex_0 , & torture_ww_class ) ;
ww_mutex_init ( & torture_ww_mutex_1 , & torture_ww_class ) ;
ww_mutex_init ( & torture_ww_mutex_2 , & torture_ww_class ) ;
2021-03-18 20:28:14 +03:00
ww_acquire_ctxs = kmalloc_array ( cxt . nrealwriters_stress ,
sizeof ( * ww_acquire_ctxs ) ,
GFP_KERNEL ) ;
if ( ! ww_acquire_ctxs )
VERBOSE_TOROUT_STRING ( " ww_acquire_ctx: Out of memory " ) ;
}
static void torture_ww_mutex_exit ( void )
{
kfree ( ww_acquire_ctxs ) ;
2021-03-18 20:28:11 +03:00
}
2016-12-01 14:47:05 +03:00
2021-03-18 20:28:14 +03:00
static int torture_ww_mutex_lock ( int tid )
2016-12-01 14:47:05 +03:00
__acquires ( torture_ww_mutex_0 )
__acquires ( torture_ww_mutex_1 )
__acquires ( torture_ww_mutex_2 )
{
LIST_HEAD ( list ) ;
struct reorder_lock {
struct list_head link ;
struct ww_mutex * lock ;
} locks [ 3 ] , * ll , * ln ;
2021-03-18 20:28:14 +03:00
struct ww_acquire_ctx * ctx = & ww_acquire_ctxs [ tid ] ;
2016-12-01 14:47:05 +03:00
locks [ 0 ] . lock = & torture_ww_mutex_0 ;
list_add ( & locks [ 0 ] . link , & list ) ;
locks [ 1 ] . lock = & torture_ww_mutex_1 ;
list_add ( & locks [ 1 ] . link , & list ) ;
locks [ 2 ] . lock = & torture_ww_mutex_2 ;
list_add ( & locks [ 2 ] . link , & list ) ;
2021-03-18 20:28:14 +03:00
ww_acquire_init ( ctx , & torture_ww_class ) ;
2016-12-01 14:47:05 +03:00
list_for_each_entry ( ll , & list , link ) {
int err ;
2021-03-18 20:28:14 +03:00
err = ww_mutex_lock ( ll - > lock , ctx ) ;
2016-12-01 14:47:05 +03:00
if ( ! err )
continue ;
ln = ll ;
list_for_each_entry_continue_reverse ( ln , & list , link )
ww_mutex_unlock ( ln - > lock ) ;
if ( err ! = - EDEADLK )
return err ;
2021-03-18 20:28:14 +03:00
ww_mutex_lock_slow ( ll - > lock , ctx ) ;
2016-12-01 14:47:05 +03:00
list_move ( & ll - > link , & list ) ;
}
return 0 ;
}
2021-03-18 20:28:14 +03:00
static void torture_ww_mutex_unlock ( int tid )
2016-12-01 14:47:05 +03:00
__releases ( torture_ww_mutex_0 )
__releases ( torture_ww_mutex_1 )
__releases ( torture_ww_mutex_2 )
{
2021-03-18 20:28:14 +03:00
struct ww_acquire_ctx * ctx = & ww_acquire_ctxs [ tid ] ;
2016-12-01 14:47:05 +03:00
ww_mutex_unlock ( & torture_ww_mutex_0 ) ;
ww_mutex_unlock ( & torture_ww_mutex_1 ) ;
ww_mutex_unlock ( & torture_ww_mutex_2 ) ;
2021-03-18 20:28:14 +03:00
ww_acquire_fini ( ctx ) ;
2016-12-01 14:47:05 +03:00
}
static struct lock_torture_ops ww_mutex_lock_ops = {
2021-03-18 20:28:11 +03:00
. init = torture_ww_mutex_init ,
2021-03-18 20:28:14 +03:00
. exit = torture_ww_mutex_exit ,
2016-12-01 14:47:05 +03:00
. writelock = torture_ww_mutex_lock ,
. write_delay = torture_mutex_delay ,
2022-12-13 23:48:38 +03:00
. task_boost = torture_rt_boost ,
2016-12-01 14:47:05 +03:00
. writeunlock = torture_ww_mutex_unlock ,
. readlock = NULL ,
. read_delay = NULL ,
. readunlock = NULL ,
. name = " ww_mutex_lock "
} ;
2015-07-23 00:07:27 +03:00
# ifdef CONFIG_RT_MUTEXES
static DEFINE_RT_MUTEX ( torture_rtmutex ) ;
2023-02-21 22:02:37 +03:00
static struct rt_mutex torture_nested_rtmutexes [ MAX_NESTED_LOCKS ] ;
static struct lock_class_key nested_rtmutex_keys [ MAX_NESTED_LOCKS ] ;
static void torture_rtmutex_init ( void )
{
int i ;
for ( i = 0 ; i < MAX_NESTED_LOCKS ; i + + )
__rt_mutex_init ( & torture_nested_rtmutexes [ i ] , __func__ ,
& nested_rtmutex_keys [ i ] ) ;
}
static int torture_rtmutex_nested_lock ( int tid __maybe_unused ,
u32 lockset )
{
int i ;
for ( i = 0 ; i < nested_locks ; i + + )
if ( lockset & ( 1 < < i ) )
rt_mutex_lock ( & torture_nested_rtmutexes [ i ] ) ;
return 0 ;
}
2015-07-23 00:07:27 +03:00
2021-03-18 20:28:13 +03:00
static int torture_rtmutex_lock ( int tid __maybe_unused )
__acquires ( torture_rtmutex )
2015-07-23 00:07:27 +03:00
{
rt_mutex_lock ( & torture_rtmutex ) ;
return 0 ;
}
static void torture_rtmutex_delay ( struct torture_random_state * trsp )
{
const unsigned long shortdelay_us = 2 ;
/*
* We want a short delay mostly to emulate likely code , and
* we want a long delay occasionally to force massive contention .
*/
2023-08-22 02:42:21 +03:00
if ( long_hold & & ! ( torture_random ( trsp ) % ( cxt . nrealwriters_stress * 2000 * long_hold ) ) )
mdelay ( long_hold ) ;
2015-07-23 00:07:27 +03:00
if ( ! ( torture_random ( trsp ) %
2023-03-07 07:48:13 +03:00
( cxt . nrealwriters_stress * 200 * shortdelay_us ) ) )
2015-07-23 00:07:27 +03:00
udelay ( shortdelay_us ) ;
if ( ! ( torture_random ( trsp ) % ( cxt . nrealwriters_stress * 20000 ) ) )
2017-10-16 21:05:03 +03:00
torture_preempt_schedule ( ) ; /* Allow test to be preempted. */
2015-07-23 00:07:27 +03:00
}
2021-03-18 20:28:13 +03:00
static void torture_rtmutex_unlock ( int tid __maybe_unused )
__releases ( torture_rtmutex )
2015-07-23 00:07:27 +03:00
{
rt_mutex_unlock ( & torture_rtmutex ) ;
}
2022-12-13 23:48:38 +03:00
static void torture_rt_boost_rtmutex ( struct torture_random_state * trsp )
{
if ( ! rt_boost )
return ;
__torture_rt_boost ( trsp ) ;
}
2023-02-21 22:02:37 +03:00
static void torture_rtmutex_nested_unlock ( int tid __maybe_unused ,
u32 lockset )
{
int i ;
for ( i = nested_locks - 1 ; i > = 0 ; i - - )
if ( lockset & ( 1 < < i ) )
rt_mutex_unlock ( & torture_nested_rtmutexes [ i ] ) ;
}
2015-07-23 00:07:27 +03:00
static struct lock_torture_ops rtmutex_lock_ops = {
2023-02-21 22:02:37 +03:00
. init = torture_rtmutex_init ,
. nested_lock = torture_rtmutex_nested_lock ,
2015-07-23 00:07:27 +03:00
. writelock = torture_rtmutex_lock ,
. write_delay = torture_rtmutex_delay ,
2022-12-13 23:48:38 +03:00
. task_boost = torture_rt_boost_rtmutex ,
2015-07-23 00:07:27 +03:00
. writeunlock = torture_rtmutex_unlock ,
2023-02-21 22:02:37 +03:00
. nested_unlock = torture_rtmutex_nested_unlock ,
2015-07-23 00:07:27 +03:00
. readlock = NULL ,
. read_delay = NULL ,
. readunlock = NULL ,
. name = " rtmutex_lock "
} ;
# endif
2014-09-12 08:41:30 +04:00
static DECLARE_RWSEM ( torture_rwsem ) ;
2021-03-18 20:28:13 +03:00
static int torture_rwsem_down_write ( int tid __maybe_unused )
__acquires ( torture_rwsem )
2014-09-12 08:41:30 +04:00
{
down_write ( & torture_rwsem ) ;
return 0 ;
}
static void torture_rwsem_write_delay ( struct torture_random_state * trsp )
{
/* We want a long delay occasionally to force massive contention. */
2023-08-22 02:42:21 +03:00
if ( long_hold & & ! ( torture_random ( trsp ) % ( cxt . nrealwriters_stress * 2000 * long_hold ) ) )
mdelay ( long_hold * 10 ) ;
2014-09-12 08:42:25 +04:00
if ( ! ( torture_random ( trsp ) % ( cxt . nrealwriters_stress * 20000 ) ) )
2017-10-16 21:05:03 +03:00
torture_preempt_schedule ( ) ; /* Allow test to be preempted. */
2014-09-12 08:41:30 +04:00
}
2021-03-18 20:28:13 +03:00
static void torture_rwsem_up_write ( int tid __maybe_unused )
__releases ( torture_rwsem )
2014-09-12 08:41:30 +04:00
{
up_write ( & torture_rwsem ) ;
}
2021-03-18 20:28:13 +03:00
static int torture_rwsem_down_read ( int tid __maybe_unused )
__acquires ( torture_rwsem )
2014-09-12 08:41:30 +04:00
{
down_read ( & torture_rwsem ) ;
return 0 ;
}
static void torture_rwsem_read_delay ( struct torture_random_state * trsp )
{
/* We want a long delay occasionally to force massive contention. */
2023-08-22 02:42:21 +03:00
if ( long_hold & & ! ( torture_random ( trsp ) % ( cxt . nrealreaders_stress * 2000 * long_hold ) ) )
mdelay ( long_hold * 2 ) ;
2014-09-12 08:41:30 +04:00
else
2023-08-22 02:42:21 +03:00
mdelay ( long_hold / 2 ) ;
2014-09-12 08:42:25 +04:00
if ( ! ( torture_random ( trsp ) % ( cxt . nrealreaders_stress * 20000 ) ) )
2017-10-16 21:05:03 +03:00
torture_preempt_schedule ( ) ; /* Allow test to be preempted. */
2014-09-12 08:41:30 +04:00
}
2021-03-18 20:28:13 +03:00
static void torture_rwsem_up_read ( int tid __maybe_unused )
__releases ( torture_rwsem )
2014-09-12 08:41:30 +04:00
{
up_read ( & torture_rwsem ) ;
}
static struct lock_torture_ops rwsem_lock_ops = {
. writelock = torture_rwsem_down_write ,
. write_delay = torture_rwsem_write_delay ,
2022-12-13 23:48:38 +03:00
. task_boost = torture_rt_boost ,
2014-09-12 08:41:30 +04:00
. writeunlock = torture_rwsem_up_write ,
. readlock = torture_rwsem_down_read ,
. read_delay = torture_rwsem_read_delay ,
. readunlock = torture_rwsem_up_read ,
. name = " rwsem_lock "
} ;
2015-08-30 00:46:29 +03:00
# include <linux/percpu-rwsem.h>
static struct percpu_rw_semaphore pcpu_rwsem ;
2020-07-03 08:05:27 +03:00
static void torture_percpu_rwsem_init ( void )
2015-08-30 00:46:29 +03:00
{
BUG_ON ( percpu_init_rwsem ( & pcpu_rwsem ) ) ;
}
2020-09-24 17:18:54 +03:00
static void torture_percpu_rwsem_exit ( void )
{
percpu_free_rwsem ( & pcpu_rwsem ) ;
}
2021-03-18 20:28:13 +03:00
static int torture_percpu_rwsem_down_write ( int tid __maybe_unused )
__acquires ( pcpu_rwsem )
2015-08-30 00:46:29 +03:00
{
percpu_down_write ( & pcpu_rwsem ) ;
return 0 ;
}
2021-03-18 20:28:13 +03:00
static void torture_percpu_rwsem_up_write ( int tid __maybe_unused )
__releases ( pcpu_rwsem )
2015-08-30 00:46:29 +03:00
{
percpu_up_write ( & pcpu_rwsem ) ;
}
2021-03-18 20:28:13 +03:00
static int torture_percpu_rwsem_down_read ( int tid __maybe_unused )
__acquires ( pcpu_rwsem )
2015-08-30 00:46:29 +03:00
{
percpu_down_read ( & pcpu_rwsem ) ;
return 0 ;
}
2021-03-18 20:28:13 +03:00
static void torture_percpu_rwsem_up_read ( int tid __maybe_unused )
__releases ( pcpu_rwsem )
2015-08-30 00:46:29 +03:00
{
percpu_up_read ( & pcpu_rwsem ) ;
}
static struct lock_torture_ops percpu_rwsem_lock_ops = {
. init = torture_percpu_rwsem_init ,
2020-09-24 17:18:54 +03:00
. exit = torture_percpu_rwsem_exit ,
2015-08-30 00:46:29 +03:00
. writelock = torture_percpu_rwsem_down_write ,
. write_delay = torture_rwsem_write_delay ,
2022-12-13 23:48:38 +03:00
. task_boost = torture_rt_boost ,
2015-08-30 00:46:29 +03:00
. writeunlock = torture_percpu_rwsem_up_write ,
. readlock = torture_percpu_rwsem_down_read ,
. read_delay = torture_rwsem_read_delay ,
. readunlock = torture_percpu_rwsem_up_read ,
. name = " percpu_rwsem_lock "
} ;
2014-02-05 03:51:41 +04:00
/*
* Lock torture writer kthread . Repeatedly acquires and releases
* the lock , checking for duplicate acquisitions .
*/
static int lock_torture_writer ( void * arg )
{
2023-08-03 02:32:06 +03:00
unsigned long j ;
unsigned long j1 ;
u32 lockset_mask ;
2014-09-12 07:40:20 +04:00
struct lock_stress_stats * lwsp = arg ;
2020-01-24 23:58:15 +03:00
DEFINE_TORTURE_RANDOM ( rand ) ;
2023-02-21 22:02:38 +03:00
bool skip_main_lock ;
2023-08-03 02:32:06 +03:00
int tid = lwsp - cxt . lwsa ;
2014-02-05 03:51:41 +04:00
VERBOSE_TOROUT_STRING ( " lock_torture_writer task started " ) ;
2023-06-03 01:02:10 +03:00
if ( ! rt_task ( current ) )
set_user_nice ( current , MAX_NICE ) ;
2014-02-05 03:51:41 +04:00
do {
2014-02-27 00:14:51 +04:00
if ( ( torture_random ( & rand ) & 0xfffff ) = = 0 )
schedule_timeout_uninterruptible ( 1 ) ;
2014-09-29 17:14:25 +04:00
2023-02-21 22:02:35 +03:00
lockset_mask = torture_random ( & rand ) ;
2023-02-21 22:02:38 +03:00
/*
* When using nested_locks , we want to occasionally
* skip the main lock so we can avoid always serializing
* the lock chains on that central lock . By skipping the
* main lock occasionally , we can create different
* contention patterns ( allowing for multiple disjoint
* blocked trees )
*/
skip_main_lock = ( nested_locks & &
! ( torture_random ( & rand ) % 100 ) ) ;
2015-07-23 00:07:27 +03:00
cxt . cur_ops - > task_boost ( & rand ) ;
2023-02-21 22:02:35 +03:00
if ( cxt . cur_ops - > nested_lock )
cxt . cur_ops - > nested_lock ( tid , lockset_mask ) ;
2023-02-21 22:02:38 +03:00
if ( ! skip_main_lock ) {
2023-08-03 02:32:06 +03:00
if ( acq_writer_lim > 0 )
j = jiffies ;
2023-02-21 22:02:38 +03:00
cxt . cur_ops - > writelock ( tid ) ;
if ( WARN_ON_ONCE ( lock_is_write_held ) )
lwsp - > n_lock_fail + + ;
lock_is_write_held = true ;
if ( WARN_ON_ONCE ( atomic_read ( & lock_is_read_held ) ) )
lwsp - > n_lock_fail + + ; /* rare, but... */
2023-08-03 02:32:06 +03:00
if ( acq_writer_lim > 0 ) {
j1 = jiffies ;
WARN_ONCE ( time_after ( j1 , j + acq_writer_lim ) ,
" %s: Lock acquisition took %lu jiffies. \n " ,
__func__ , j1 - j ) ;
}
2023-02-21 22:02:38 +03:00
lwsp - > n_lock_acquired + + ;
2023-08-03 01:42:03 +03:00
2023-03-07 07:48:13 +03:00
cxt . cur_ops - > write_delay ( & rand ) ;
2023-08-03 01:42:03 +03:00
2023-02-21 22:02:38 +03:00
lock_is_write_held = false ;
WRITE_ONCE ( last_lock_release , jiffies ) ;
cxt . cur_ops - > writeunlock ( tid ) ;
}
2023-02-21 22:02:35 +03:00
if ( cxt . cur_ops - > nested_unlock )
cxt . cur_ops - > nested_unlock ( tid , lockset_mask ) ;
2014-09-29 17:14:25 +04:00
2014-02-05 03:51:41 +04:00
stutter_wait ( " lock_torture_writer " ) ;
} while ( ! torture_must_stop ( ) ) ;
2015-07-23 00:07:27 +03:00
cxt . cur_ops - > task_boost ( NULL ) ; /* reset prio */
2014-02-05 03:51:41 +04:00
torture_kthread_stopping ( " lock_torture_writer " ) ;
return 0 ;
}
2014-09-12 08:40:41 +04:00
/*
* Lock torture reader kthread . Repeatedly acquires and releases
* the reader lock .
*/
static int lock_torture_reader ( void * arg )
{
struct lock_stress_stats * lrsp = arg ;
2021-03-18 20:28:13 +03:00
int tid = lrsp - cxt . lrsa ;
2020-01-24 23:58:15 +03:00
DEFINE_TORTURE_RANDOM ( rand ) ;
2014-09-12 08:40:41 +04:00
VERBOSE_TOROUT_STRING ( " lock_torture_reader task started " ) ;
set_user_nice ( current , MAX_NICE ) ;
do {
if ( ( torture_random ( & rand ) & 0xfffff ) = = 0 )
schedule_timeout_uninterruptible ( 1 ) ;
2014-09-29 17:14:25 +04:00
2021-03-18 20:28:13 +03:00
cxt . cur_ops - > readlock ( tid ) ;
2021-06-03 02:04:11 +03:00
atomic_inc ( & lock_is_read_held ) ;
2014-09-29 17:14:25 +04:00
if ( WARN_ON_ONCE ( lock_is_write_held ) )
lrsp - > n_lock_fail + + ; /* rare, but... */
2014-09-12 08:40:41 +04:00
lrsp - > n_lock_acquired + + ;
2014-09-12 08:42:25 +04:00
cxt . cur_ops - > read_delay ( & rand ) ;
2021-06-03 02:04:11 +03:00
atomic_dec ( & lock_is_read_held ) ;
2021-03-18 20:28:13 +03:00
cxt . cur_ops - > readunlock ( tid ) ;
2014-09-29 17:14:25 +04:00
2014-09-12 08:40:41 +04:00
stutter_wait ( " lock_torture_reader " ) ;
} while ( ! torture_must_stop ( ) ) ;
torture_kthread_stopping ( " lock_torture_reader " ) ;
return 0 ;
}
2014-02-05 03:51:41 +04:00
/*
* Create an lock - torture - statistics message in the specified buffer .
*/
2014-09-12 08:40:41 +04:00
static void __torture_print_stats ( char * page ,
struct lock_stress_stats * statp , bool write )
2014-02-05 03:51:41 +04:00
{
2021-06-03 01:51:48 +03:00
long cur ;
2020-04-13 15:02:59 +03:00
bool fail = false ;
2014-09-12 08:40:41 +04:00
int i , n_stress ;
2021-06-03 01:51:48 +03:00
long max = 0 , min = statp ? data_race ( statp [ 0 ] . n_lock_acquired ) : 0 ;
2014-02-05 03:51:41 +04:00
long long sum = 0 ;
2014-09-12 08:42:25 +04:00
n_stress = write ? cxt . nrealwriters_stress : cxt . nrealreaders_stress ;
2014-09-12 08:40:41 +04:00
for ( i = 0 ; i < n_stress ; i + + ) {
2021-06-03 01:51:48 +03:00
if ( data_race ( statp [ i ] . n_lock_fail ) )
2014-02-05 03:51:41 +04:00
fail = true ;
2021-06-03 01:51:48 +03:00
cur = data_race ( statp [ i ] . n_lock_acquired ) ;
sum + = cur ;
if ( max < cur )
max = cur ;
if ( min > cur )
min = cur ;
2014-02-05 03:51:41 +04:00
}
page + = sprintf ( page ,
2014-09-12 08:40:41 +04:00
" %s: Total: %lld Max/Min: %ld/%ld %s Fail: %d %s \n " ,
write ? " Writes " : " Reads " ,
2020-01-31 07:37:04 +03:00
sum , max , min ,
! onoff_interval & & max / 2 > min ? " ??? " : " " ,
2014-02-05 03:51:41 +04:00
fail , fail ? " !!! " : " " ) ;
if ( fail )
2014-09-12 08:42:25 +04:00
atomic_inc ( & cxt . n_lock_torture_errors ) ;
2014-02-05 03:51:41 +04:00
}
/*
* Print torture statistics . Caller must ensure that there is only one
* call to this function at a given time ! ! ! This is normally accomplished
* by relying on the module system to only have one copy of the module
* loaded , and then by giving the lock_torture_stats kthread full control
* ( or the init / cleanup functions when lock_torture_stats thread is not
* running ) .
*/
static void lock_torture_stats_print ( void )
{
2014-09-12 08:42:25 +04:00
int size = cxt . nrealwriters_stress * 200 + 8192 ;
2014-02-05 03:51:41 +04:00
char * buf ;
2014-09-12 08:42:25 +04:00
if ( cxt . cur_ops - > readlock )
size + = cxt . nrealreaders_stress * 200 + 8192 ;
2014-09-12 08:40:41 +04:00
2014-02-05 03:51:41 +04:00
buf = kmalloc ( size , GFP_KERNEL ) ;
if ( ! buf ) {
pr_err ( " lock_torture_stats_print: Out of memory, need: %d " ,
size ) ;
return ;
}
2014-09-12 08:40:41 +04:00
2014-09-12 08:42:25 +04:00
__torture_print_stats ( buf , cxt . lwsa , true ) ;
2014-02-05 03:51:41 +04:00
pr_alert ( " %s " , buf ) ;
kfree ( buf ) ;
2014-09-12 08:40:41 +04:00
2014-09-12 08:42:25 +04:00
if ( cxt . cur_ops - > readlock ) {
2014-09-12 08:40:41 +04:00
buf = kmalloc ( size , GFP_KERNEL ) ;
if ( ! buf ) {
pr_err ( " lock_torture_stats_print: Out of memory, need: %d " ,
size ) ;
return ;
}
2014-09-12 08:42:25 +04:00
__torture_print_stats ( buf , cxt . lrsa , false ) ;
2014-09-12 08:40:41 +04:00
pr_alert ( " %s " , buf ) ;
kfree ( buf ) ;
}
2014-02-05 03:51:41 +04:00
}
/*
* Periodically prints torture statistics , if periodic statistics printing
* was specified via the stat_interval module parameter .
*
* No need to worry about fullstop here , since this one doesn ' t reference
* volatile state or register callbacks .
*/
static int lock_torture_stats ( void * arg )
{
VERBOSE_TOROUT_STRING ( " lock_torture_stats task started " ) ;
do {
schedule_timeout_interruptible ( stat_interval * HZ ) ;
lock_torture_stats_print ( ) ;
torture_shutdown_absorb ( " lock_torture_stats " ) ;
} while ( ! torture_must_stop ( ) ) ;
torture_kthread_stopping ( " lock_torture_stats " ) ;
return 0 ;
}
2023-07-28 06:04:06 +03:00
2014-02-05 03:51:41 +04:00
static inline void
lock_torture_print_module_parms ( struct lock_torture_ops * cur_ops ,
const char * tag )
{
2023-07-28 06:04:06 +03:00
static cpumask_t cpumask_all ;
2023-08-22 18:48:16 +03:00
cpumask_t * rcmp = cpumask_nonempty ( bind_readers ) ? bind_readers : & cpumask_all ;
cpumask_t * wcmp = cpumask_nonempty ( bind_writers ) ? bind_writers : & cpumask_all ;
2023-07-28 06:04:06 +03:00
cpumask_setall ( & cpumask_all ) ;
2014-02-05 03:51:41 +04:00
pr_alert ( " %s " TORTURE_FLAG
2023-08-22 18:48:16 +03:00
" --- %s%s: acq_writer_lim=%d bind_readers=%*pbl bind_writers=%*pbl call_rcu_chains=%d long_hold=%d nested_locks=%d nreaders_stress=%d nwriters_stress=%d onoff_holdoff=%d onoff_interval=%d rt_boost=%d rt_boost_factor=%d shuffle_interval=%d shutdown_secs=%d stat_interval=%d stutter=%d verbose=%d writer_fifo=%d \n " ,
2014-09-12 08:42:25 +04:00
torture_type , tag , cxt . debug_lock ? " [debug] " : " " ,
2023-08-22 18:48:16 +03:00
acq_writer_lim , cpumask_pr_args ( rcmp ) , cpumask_pr_args ( wcmp ) ,
call_rcu_chains , long_hold , nested_locks , cxt . nrealreaders_stress ,
2023-08-22 02:42:21 +03:00
cxt . nrealwriters_stress , onoff_holdoff , onoff_interval , rt_boost ,
rt_boost_factor , shuffle_interval , shutdown_secs , stat_interval , stutter ,
2023-08-22 18:48:16 +03:00
verbose , writer_fifo ) ;
2014-02-05 03:51:41 +04:00
}
2023-08-22 05:36:10 +03:00
// If requested, maintain call_rcu() chains to keep a grace period always
// in flight. These increase the probability of getting an RCU CPU stall
// warning and associated diagnostics when a locking primitive stalls.
static void call_rcu_chain_cb ( struct rcu_head * rhp )
{
struct call_rcu_chain * crcp = container_of ( rhp , struct call_rcu_chain , crc_rh ) ;
if ( ! smp_load_acquire ( & crcp - > crc_stop ) ) {
( void ) start_poll_synchronize_rcu ( ) ; // Start one grace period...
call_rcu ( & crcp - > crc_rh , call_rcu_chain_cb ) ; // ... and later start another.
}
}
// Start the requested number of call_rcu() chains.
static int call_rcu_chain_init ( void )
{
int i ;
if ( call_rcu_chains < = 0 )
return 0 ;
2023-10-10 19:52:19 +03:00
call_rcu_chain_list = kcalloc ( call_rcu_chains , sizeof ( * call_rcu_chain_list ) , GFP_KERNEL ) ;
if ( ! call_rcu_chain_list )
2023-08-22 05:36:10 +03:00
return - ENOMEM ;
for ( i = 0 ; i < call_rcu_chains ; i + + ) {
2023-10-10 19:52:19 +03:00
call_rcu_chain_list [ i ] . crc_stop = false ;
call_rcu ( & call_rcu_chain_list [ i ] . crc_rh , call_rcu_chain_cb ) ;
2023-08-22 05:36:10 +03:00
}
return 0 ;
}
// Stop all of the call_rcu() chains.
static void call_rcu_chain_cleanup ( void )
{
int i ;
2023-10-10 19:52:19 +03:00
if ( ! call_rcu_chain_list )
2023-08-22 05:36:10 +03:00
return ;
for ( i = 0 ; i < call_rcu_chains ; i + + )
2023-10-10 19:52:19 +03:00
smp_store_release ( & call_rcu_chain_list [ i ] . crc_stop , true ) ;
2023-08-22 05:36:10 +03:00
rcu_barrier ( ) ;
2023-10-10 19:52:19 +03:00
kfree ( call_rcu_chain_list ) ;
call_rcu_chain_list = NULL ;
2023-08-22 05:36:10 +03:00
}
2014-02-05 03:51:41 +04:00
static void lock_torture_cleanup ( void )
{
int i ;
2014-09-12 07:40:21 +04:00
if ( torture_cleanup_begin ( ) )
2014-02-05 03:51:41 +04:00
return ;
2016-04-12 18:47:18 +03:00
/*
* Indicates early cleanup , meaning that the test has not run ,
2020-09-24 17:18:54 +03:00
* such as when passing bogus args when loading the module .
* However cxt - > cur_ops . init ( ) may have been invoked , so beside
* perform the underlying torture - specific cleanups , cur_ops . exit ( )
* will be invoked if needed .
2016-04-12 18:47:18 +03:00
*/
2017-05-15 12:07:23 +03:00
if ( ! cxt . lwsa & & ! cxt . lrsa )
2016-04-12 18:47:18 +03:00
goto end ;
2014-02-05 03:51:41 +04:00
if ( writer_tasks ) {
2014-09-12 08:42:25 +04:00
for ( i = 0 ; i < cxt . nrealwriters_stress ; i + + )
2023-06-03 01:02:10 +03:00
torture_stop_kthread ( lock_torture_writer , writer_tasks [ i ] ) ;
2014-02-05 03:51:41 +04:00
kfree ( writer_tasks ) ;
writer_tasks = NULL ;
}
2014-09-12 08:40:41 +04:00
if ( reader_tasks ) {
2014-09-12 08:42:25 +04:00
for ( i = 0 ; i < cxt . nrealreaders_stress ; i + + )
2014-09-12 08:40:41 +04:00
torture_stop_kthread ( lock_torture_reader ,
reader_tasks [ i ] ) ;
kfree ( reader_tasks ) ;
reader_tasks = NULL ;
}
2014-02-05 03:51:41 +04:00
torture_stop_kthread ( lock_torture_stats , stats_task ) ;
lock_torture_stats_print ( ) ; /* -After- the stats thread is stopped! */
2014-09-12 08:42:25 +04:00
if ( atomic_read ( & cxt . n_lock_torture_errors ) )
lock_torture_print_module_parms ( cxt . cur_ops ,
2014-02-05 03:51:41 +04:00
" End of test: FAILURE " ) ;
else if ( torture_onoff_failures ( ) )
2014-09-12 08:42:25 +04:00
lock_torture_print_module_parms ( cxt . cur_ops ,
2014-02-05 03:51:41 +04:00
" End of test: LOCK_HOTPLUG " ) ;
else
2014-09-12 08:42:25 +04:00
lock_torture_print_module_parms ( cxt . cur_ops ,
2014-02-05 03:51:41 +04:00
" End of test: SUCCESS " ) ;
2016-11-11 00:06:39 +03:00
kfree ( cxt . lwsa ) ;
2019-03-21 23:30:01 +03:00
cxt . lwsa = NULL ;
2016-11-11 00:06:39 +03:00
kfree ( cxt . lrsa ) ;
2019-03-21 23:30:01 +03:00
cxt . lrsa = NULL ;
2016-11-11 00:06:39 +03:00
2023-08-22 05:36:10 +03:00
call_rcu_chain_cleanup ( ) ;
2016-04-12 18:47:18 +03:00
end :
2020-09-24 17:18:54 +03:00
if ( cxt . init_called ) {
if ( cxt . cur_ops - > exit )
cxt . cur_ops - > exit ( ) ;
cxt . init_called = false ;
}
2014-09-12 07:40:21 +04:00
torture_cleanup_end ( ) ;
2014-02-05 03:51:41 +04:00
}
static int __init lock_torture_init ( void )
{
2014-09-12 08:40:41 +04:00
int i , j ;
2014-02-05 03:51:41 +04:00
int firsterr = 0 ;
static struct lock_torture_ops * torture_ops [ ] = {
2014-09-29 17:14:23 +04:00
& lock_busted_ops ,
& spin_lock_ops , & spin_lock_irq_ops ,
2023-02-24 04:20:35 +03:00
& raw_spin_lock_ops , & raw_spin_lock_irq_ops ,
2014-09-29 17:14:23 +04:00
& rw_lock_ops , & rw_lock_irq_ops ,
& mutex_lock_ops ,
2016-12-01 14:47:05 +03:00
& ww_mutex_lock_ops ,
2015-07-23 00:07:27 +03:00
# ifdef CONFIG_RT_MUTEXES
& rtmutex_lock_ops ,
# endif
2014-09-29 17:14:23 +04:00
& rwsem_lock_ops ,
2015-08-30 00:46:29 +03:00
& percpu_rwsem_lock_ops ,
2014-02-05 03:51:41 +04:00
} ;
2017-11-22 07:19:17 +03:00
if ( ! torture_init_begin ( torture_type , verbose ) )
2014-04-07 20:14:11 +04:00
return - EBUSY ;
2014-02-05 03:51:41 +04:00
/* Process args and tell the world that the torturer is on the job. */
for ( i = 0 ; i < ARRAY_SIZE ( torture_ops ) ; i + + ) {
2014-09-12 08:42:25 +04:00
cxt . cur_ops = torture_ops [ i ] ;
if ( strcmp ( torture_type , cxt . cur_ops - > name ) = = 0 )
2014-02-05 03:51:41 +04:00
break ;
}
if ( i = = ARRAY_SIZE ( torture_ops ) ) {
pr_alert ( " lock-torture: invalid torture type: \" %s \" \n " ,
torture_type ) ;
pr_alert ( " lock-torture types: " ) ;
for ( i = 0 ; i < ARRAY_SIZE ( torture_ops ) ; i + + )
pr_alert ( " %s " , torture_ops [ i ] - > name ) ;
pr_alert ( " \n " ) ;
2015-08-31 06:01:48 +03:00
firsterr = - EINVAL ;
goto unwind ;
2014-02-05 03:51:41 +04:00
}
2017-05-15 12:07:23 +03:00
2020-09-18 14:44:24 +03:00
if ( nwriters_stress = = 0 & &
( ! cxt . cur_ops - > readlock | | nreaders_stress = = 0 ) ) {
2017-05-15 12:07:23 +03:00
pr_alert ( " lock-torture: must run at least one locking thread \n " ) ;
firsterr = - EINVAL ;
goto unwind ;
}
2014-02-05 03:51:41 +04:00
if ( nwriters_stress > = 0 )
2014-09-12 08:42:25 +04:00
cxt . nrealwriters_stress = nwriters_stress ;
2014-02-05 03:51:41 +04:00
else
2014-09-12 08:42:25 +04:00
cxt . nrealwriters_stress = 2 * num_online_cpus ( ) ;
2014-09-12 07:40:19 +04:00
2021-03-18 20:28:14 +03:00
if ( cxt . cur_ops - > init ) {
cxt . cur_ops - > init ( ) ;
cxt . init_called = true ;
}
2014-09-12 07:40:19 +04:00
# ifdef CONFIG_DEBUG_MUTEXES
2019-08-02 04:46:56 +03:00
if ( str_has_prefix ( torture_type , " mutex " ) )
2014-09-12 08:42:25 +04:00
cxt . debug_lock = true ;
2014-09-12 07:40:19 +04:00
# endif
2015-07-23 00:07:27 +03:00
# ifdef CONFIG_DEBUG_RT_MUTEXES
2019-08-02 04:46:56 +03:00
if ( str_has_prefix ( torture_type , " rtmutex " ) )
2015-07-23 00:07:27 +03:00
cxt . debug_lock = true ;
# endif
2014-09-12 07:40:19 +04:00
# ifdef CONFIG_DEBUG_SPINLOCK
2019-08-02 04:46:56 +03:00
if ( ( str_has_prefix ( torture_type , " spin " ) ) | |
( str_has_prefix ( torture_type , " rw_lock " ) ) )
2014-09-12 08:42:25 +04:00
cxt . debug_lock = true ;
2014-09-12 07:40:19 +04:00
# endif
2014-02-05 03:51:41 +04:00
/* Initialize the statistics so that each run gets its own numbers. */
2017-05-15 12:07:23 +03:00
if ( nwriters_stress ) {
2020-04-13 15:02:59 +03:00
lock_is_write_held = false ;
treewide: kmalloc() -> kmalloc_array()
The kmalloc() function has a 2-factor argument form, kmalloc_array(). This
patch replaces cases of:
kmalloc(a * b, gfp)
with:
kmalloc_array(a * b, gfp)
as well as handling cases of:
kmalloc(a * b * c, gfp)
with:
kmalloc(array3_size(a, b, c), gfp)
as it's slightly less ugly than:
kmalloc_array(array_size(a, b), c, gfp)
This does, however, attempt to ignore constant size factors like:
kmalloc(4 * 1024, gfp)
though any constants defined via macros get caught up in the conversion.
Any factors with a sizeof() of "unsigned char", "char", and "u8" were
dropped, since they're redundant.
The tools/ directory was manually excluded, since it has its own
implementation of kmalloc().
The Coccinelle script used for this was:
// Fix redundant parens around sizeof().
@@
type TYPE;
expression THING, E;
@@
(
kmalloc(
- (sizeof(TYPE)) * E
+ sizeof(TYPE) * E
, ...)
|
kmalloc(
- (sizeof(THING)) * E
+ sizeof(THING) * E
, ...)
)
// Drop single-byte sizes and redundant parens.
@@
expression COUNT;
typedef u8;
typedef __u8;
@@
(
kmalloc(
- sizeof(u8) * (COUNT)
+ COUNT
, ...)
|
kmalloc(
- sizeof(__u8) * (COUNT)
+ COUNT
, ...)
|
kmalloc(
- sizeof(char) * (COUNT)
+ COUNT
, ...)
|
kmalloc(
- sizeof(unsigned char) * (COUNT)
+ COUNT
, ...)
|
kmalloc(
- sizeof(u8) * COUNT
+ COUNT
, ...)
|
kmalloc(
- sizeof(__u8) * COUNT
+ COUNT
, ...)
|
kmalloc(
- sizeof(char) * COUNT
+ COUNT
, ...)
|
kmalloc(
- sizeof(unsigned char) * COUNT
+ COUNT
, ...)
)
// 2-factor product with sizeof(type/expression) and identifier or constant.
@@
type TYPE;
expression THING;
identifier COUNT_ID;
constant COUNT_CONST;
@@
(
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * (COUNT_ID)
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * COUNT_ID
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * (COUNT_CONST)
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * COUNT_CONST
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * (COUNT_ID)
+ COUNT_ID, sizeof(THING)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * COUNT_ID
+ COUNT_ID, sizeof(THING)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * (COUNT_CONST)
+ COUNT_CONST, sizeof(THING)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * COUNT_CONST
+ COUNT_CONST, sizeof(THING)
, ...)
)
// 2-factor product, only identifiers.
@@
identifier SIZE, COUNT;
@@
- kmalloc
+ kmalloc_array
(
- SIZE * COUNT
+ COUNT, SIZE
, ...)
// 3-factor product with 1 sizeof(type) or sizeof(expression), with
// redundant parens removed.
@@
expression THING;
identifier STRIDE, COUNT;
type TYPE;
@@
(
kmalloc(
- sizeof(TYPE) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kmalloc(
- sizeof(TYPE) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kmalloc(
- sizeof(TYPE) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kmalloc(
- sizeof(TYPE) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kmalloc(
- sizeof(THING) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kmalloc(
- sizeof(THING) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kmalloc(
- sizeof(THING) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kmalloc(
- sizeof(THING) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
)
// 3-factor product with 2 sizeof(variable), with redundant parens removed.
@@
expression THING1, THING2;
identifier COUNT;
type TYPE1, TYPE2;
@@
(
kmalloc(
- sizeof(TYPE1) * sizeof(TYPE2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kmalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kmalloc(
- sizeof(THING1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kmalloc(
- sizeof(THING1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kmalloc(
- sizeof(TYPE1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
|
kmalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
)
// 3-factor product, only identifiers, with redundant parens removed.
@@
identifier STRIDE, SIZE, COUNT;
@@
(
kmalloc(
- (COUNT) * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- COUNT * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- COUNT * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- (COUNT) * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- COUNT * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- (COUNT) * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- (COUNT) * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- COUNT * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
)
// Any remaining multi-factor products, first at least 3-factor products,
// when they're not all constants...
@@
expression E1, E2, E3;
constant C1, C2, C3;
@@
(
kmalloc(C1 * C2 * C3, ...)
|
kmalloc(
- (E1) * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
|
kmalloc(
- (E1) * (E2) * E3
+ array3_size(E1, E2, E3)
, ...)
|
kmalloc(
- (E1) * (E2) * (E3)
+ array3_size(E1, E2, E3)
, ...)
|
kmalloc(
- E1 * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
)
// And then all remaining 2 factors products when they're not all constants,
// keeping sizeof() as the second factor argument.
@@
expression THING, E1, E2;
type TYPE;
constant C1, C2, C3;
@@
(
kmalloc(sizeof(THING) * C2, ...)
|
kmalloc(sizeof(TYPE) * C2, ...)
|
kmalloc(C1 * C2 * C3, ...)
|
kmalloc(C1 * C2, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * (E2)
+ E2, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * E2
+ E2, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * (E2)
+ E2, sizeof(THING)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * E2
+ E2, sizeof(THING)
, ...)
|
- kmalloc
+ kmalloc_array
(
- (E1) * E2
+ E1, E2
, ...)
|
- kmalloc
+ kmalloc_array
(
- (E1) * (E2)
+ E1, E2
, ...)
|
- kmalloc
+ kmalloc_array
(
- E1 * E2
+ E1, E2
, ...)
)
Signed-off-by: Kees Cook <keescook@chromium.org>
2018-06-12 23:55:00 +03:00
cxt . lwsa = kmalloc_array ( cxt . nrealwriters_stress ,
sizeof ( * cxt . lwsa ) ,
GFP_KERNEL ) ;
2017-05-15 12:07:23 +03:00
if ( cxt . lwsa = = NULL ) {
VERBOSE_TOROUT_STRING ( " cxt.lwsa: Out of memory " ) ;
firsterr = - ENOMEM ;
goto unwind ;
}
2014-02-05 03:51:41 +04:00
2017-05-15 12:07:23 +03:00
for ( i = 0 ; i < cxt . nrealwriters_stress ; i + + ) {
cxt . lwsa [ i ] . n_lock_fail = 0 ;
cxt . lwsa [ i ] . n_lock_acquired = 0 ;
}
2014-02-05 03:51:41 +04:00
}
2014-09-12 08:42:25 +04:00
if ( cxt . cur_ops - > readlock ) {
2014-09-12 08:40:41 +04:00
if ( nreaders_stress > = 0 )
2014-09-12 08:42:25 +04:00
cxt . nrealreaders_stress = nreaders_stress ;
2014-09-12 08:40:41 +04:00
else {
/*
* By default distribute evenly the number of
* readers and writers . We still run the same number
* of threads as the writer - only locks default .
*/
if ( nwriters_stress < 0 ) /* user doesn't care */
2014-09-12 08:42:25 +04:00
cxt . nrealwriters_stress = num_online_cpus ( ) ;
cxt . nrealreaders_stress = cxt . nrealwriters_stress ;
2014-09-12 08:40:41 +04:00
}
2017-05-15 12:07:23 +03:00
if ( nreaders_stress ) {
treewide: kmalloc() -> kmalloc_array()
The kmalloc() function has a 2-factor argument form, kmalloc_array(). This
patch replaces cases of:
kmalloc(a * b, gfp)
with:
kmalloc_array(a * b, gfp)
as well as handling cases of:
kmalloc(a * b * c, gfp)
with:
kmalloc(array3_size(a, b, c), gfp)
as it's slightly less ugly than:
kmalloc_array(array_size(a, b), c, gfp)
This does, however, attempt to ignore constant size factors like:
kmalloc(4 * 1024, gfp)
though any constants defined via macros get caught up in the conversion.
Any factors with a sizeof() of "unsigned char", "char", and "u8" were
dropped, since they're redundant.
The tools/ directory was manually excluded, since it has its own
implementation of kmalloc().
The Coccinelle script used for this was:
// Fix redundant parens around sizeof().
@@
type TYPE;
expression THING, E;
@@
(
kmalloc(
- (sizeof(TYPE)) * E
+ sizeof(TYPE) * E
, ...)
|
kmalloc(
- (sizeof(THING)) * E
+ sizeof(THING) * E
, ...)
)
// Drop single-byte sizes and redundant parens.
@@
expression COUNT;
typedef u8;
typedef __u8;
@@
(
kmalloc(
- sizeof(u8) * (COUNT)
+ COUNT
, ...)
|
kmalloc(
- sizeof(__u8) * (COUNT)
+ COUNT
, ...)
|
kmalloc(
- sizeof(char) * (COUNT)
+ COUNT
, ...)
|
kmalloc(
- sizeof(unsigned char) * (COUNT)
+ COUNT
, ...)
|
kmalloc(
- sizeof(u8) * COUNT
+ COUNT
, ...)
|
kmalloc(
- sizeof(__u8) * COUNT
+ COUNT
, ...)
|
kmalloc(
- sizeof(char) * COUNT
+ COUNT
, ...)
|
kmalloc(
- sizeof(unsigned char) * COUNT
+ COUNT
, ...)
)
// 2-factor product with sizeof(type/expression) and identifier or constant.
@@
type TYPE;
expression THING;
identifier COUNT_ID;
constant COUNT_CONST;
@@
(
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * (COUNT_ID)
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * COUNT_ID
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * (COUNT_CONST)
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * COUNT_CONST
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * (COUNT_ID)
+ COUNT_ID, sizeof(THING)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * COUNT_ID
+ COUNT_ID, sizeof(THING)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * (COUNT_CONST)
+ COUNT_CONST, sizeof(THING)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * COUNT_CONST
+ COUNT_CONST, sizeof(THING)
, ...)
)
// 2-factor product, only identifiers.
@@
identifier SIZE, COUNT;
@@
- kmalloc
+ kmalloc_array
(
- SIZE * COUNT
+ COUNT, SIZE
, ...)
// 3-factor product with 1 sizeof(type) or sizeof(expression), with
// redundant parens removed.
@@
expression THING;
identifier STRIDE, COUNT;
type TYPE;
@@
(
kmalloc(
- sizeof(TYPE) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kmalloc(
- sizeof(TYPE) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kmalloc(
- sizeof(TYPE) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kmalloc(
- sizeof(TYPE) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kmalloc(
- sizeof(THING) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kmalloc(
- sizeof(THING) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kmalloc(
- sizeof(THING) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kmalloc(
- sizeof(THING) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
)
// 3-factor product with 2 sizeof(variable), with redundant parens removed.
@@
expression THING1, THING2;
identifier COUNT;
type TYPE1, TYPE2;
@@
(
kmalloc(
- sizeof(TYPE1) * sizeof(TYPE2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kmalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kmalloc(
- sizeof(THING1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kmalloc(
- sizeof(THING1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kmalloc(
- sizeof(TYPE1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
|
kmalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
)
// 3-factor product, only identifiers, with redundant parens removed.
@@
identifier STRIDE, SIZE, COUNT;
@@
(
kmalloc(
- (COUNT) * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- COUNT * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- COUNT * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- (COUNT) * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- COUNT * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- (COUNT) * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- (COUNT) * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kmalloc(
- COUNT * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
)
// Any remaining multi-factor products, first at least 3-factor products,
// when they're not all constants...
@@
expression E1, E2, E3;
constant C1, C2, C3;
@@
(
kmalloc(C1 * C2 * C3, ...)
|
kmalloc(
- (E1) * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
|
kmalloc(
- (E1) * (E2) * E3
+ array3_size(E1, E2, E3)
, ...)
|
kmalloc(
- (E1) * (E2) * (E3)
+ array3_size(E1, E2, E3)
, ...)
|
kmalloc(
- E1 * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
)
// And then all remaining 2 factors products when they're not all constants,
// keeping sizeof() as the second factor argument.
@@
expression THING, E1, E2;
type TYPE;
constant C1, C2, C3;
@@
(
kmalloc(sizeof(THING) * C2, ...)
|
kmalloc(sizeof(TYPE) * C2, ...)
|
kmalloc(C1 * C2 * C3, ...)
|
kmalloc(C1 * C2, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * (E2)
+ E2, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(TYPE) * E2
+ E2, sizeof(TYPE)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * (E2)
+ E2, sizeof(THING)
, ...)
|
- kmalloc
+ kmalloc_array
(
- sizeof(THING) * E2
+ E2, sizeof(THING)
, ...)
|
- kmalloc
+ kmalloc_array
(
- (E1) * E2
+ E1, E2
, ...)
|
- kmalloc
+ kmalloc_array
(
- (E1) * (E2)
+ E1, E2
, ...)
|
- kmalloc
+ kmalloc_array
(
- E1 * E2
+ E1, E2
, ...)
)
Signed-off-by: Kees Cook <keescook@chromium.org>
2018-06-12 23:55:00 +03:00
cxt . lrsa = kmalloc_array ( cxt . nrealreaders_stress ,
sizeof ( * cxt . lrsa ) ,
GFP_KERNEL ) ;
2017-05-15 12:07:23 +03:00
if ( cxt . lrsa = = NULL ) {
VERBOSE_TOROUT_STRING ( " cxt.lrsa: Out of memory " ) ;
firsterr = - ENOMEM ;
kfree ( cxt . lwsa ) ;
cxt . lwsa = NULL ;
goto unwind ;
}
for ( i = 0 ; i < cxt . nrealreaders_stress ; i + + ) {
cxt . lrsa [ i ] . n_lock_fail = 0 ;
cxt . lrsa [ i ] . n_lock_acquired = 0 ;
}
2014-09-12 08:40:41 +04:00
}
}
2016-04-12 18:47:18 +03:00
2023-08-22 05:36:10 +03:00
firsterr = call_rcu_chain_init ( ) ;
if ( torture_init_error ( firsterr ) )
goto unwind ;
2014-09-12 08:42:25 +04:00
lock_torture_print_module_parms ( cxt . cur_ops , " Start of test " ) ;
2014-09-12 08:40:41 +04:00
/* Prepare torture context. */
2014-02-05 03:51:41 +04:00
if ( onoff_interval > 0 ) {
firsterr = torture_onoff_init ( onoff_holdoff * HZ ,
2018-12-10 20:44:52 +03:00
onoff_interval * HZ , NULL ) ;
2021-08-06 01:53:10 +03:00
if ( torture_init_error ( firsterr ) )
2014-02-05 03:51:41 +04:00
goto unwind ;
}
if ( shuffle_interval > 0 ) {
firsterr = torture_shuffle_init ( shuffle_interval ) ;
2021-08-06 01:53:10 +03:00
if ( torture_init_error ( firsterr ) )
2014-02-05 03:51:41 +04:00
goto unwind ;
}
if ( shutdown_secs > 0 ) {
firsterr = torture_shutdown_init ( shutdown_secs ,
lock_torture_cleanup ) ;
2021-08-06 01:53:10 +03:00
if ( torture_init_error ( firsterr ) )
2014-02-05 03:51:41 +04:00
goto unwind ;
}
if ( stutter > 0 ) {
2019-04-10 00:44:49 +03:00
firsterr = torture_stutter_init ( stutter , stutter ) ;
2021-08-06 01:53:10 +03:00
if ( torture_init_error ( firsterr ) )
2014-02-05 03:51:41 +04:00
goto unwind ;
}
2017-05-15 12:07:23 +03:00
if ( nwriters_stress ) {
treewide: kzalloc() -> kcalloc()
The kzalloc() function has a 2-factor argument form, kcalloc(). This
patch replaces cases of:
kzalloc(a * b, gfp)
with:
kcalloc(a * b, gfp)
as well as handling cases of:
kzalloc(a * b * c, gfp)
with:
kzalloc(array3_size(a, b, c), gfp)
as it's slightly less ugly than:
kzalloc_array(array_size(a, b), c, gfp)
This does, however, attempt to ignore constant size factors like:
kzalloc(4 * 1024, gfp)
though any constants defined via macros get caught up in the conversion.
Any factors with a sizeof() of "unsigned char", "char", and "u8" were
dropped, since they're redundant.
The Coccinelle script used for this was:
// Fix redundant parens around sizeof().
@@
type TYPE;
expression THING, E;
@@
(
kzalloc(
- (sizeof(TYPE)) * E
+ sizeof(TYPE) * E
, ...)
|
kzalloc(
- (sizeof(THING)) * E
+ sizeof(THING) * E
, ...)
)
// Drop single-byte sizes and redundant parens.
@@
expression COUNT;
typedef u8;
typedef __u8;
@@
(
kzalloc(
- sizeof(u8) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(__u8) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(char) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(unsigned char) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(u8) * COUNT
+ COUNT
, ...)
|
kzalloc(
- sizeof(__u8) * COUNT
+ COUNT
, ...)
|
kzalloc(
- sizeof(char) * COUNT
+ COUNT
, ...)
|
kzalloc(
- sizeof(unsigned char) * COUNT
+ COUNT
, ...)
)
// 2-factor product with sizeof(type/expression) and identifier or constant.
@@
type TYPE;
expression THING;
identifier COUNT_ID;
constant COUNT_CONST;
@@
(
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * (COUNT_ID)
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * COUNT_ID
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * (COUNT_CONST)
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * COUNT_CONST
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * (COUNT_ID)
+ COUNT_ID, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * COUNT_ID
+ COUNT_ID, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * (COUNT_CONST)
+ COUNT_CONST, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * COUNT_CONST
+ COUNT_CONST, sizeof(THING)
, ...)
)
// 2-factor product, only identifiers.
@@
identifier SIZE, COUNT;
@@
- kzalloc
+ kcalloc
(
- SIZE * COUNT
+ COUNT, SIZE
, ...)
// 3-factor product with 1 sizeof(type) or sizeof(expression), with
// redundant parens removed.
@@
expression THING;
identifier STRIDE, COUNT;
type TYPE;
@@
(
kzalloc(
- sizeof(TYPE) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(TYPE) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(TYPE) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(TYPE) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(THING) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kzalloc(
- sizeof(THING) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kzalloc(
- sizeof(THING) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kzalloc(
- sizeof(THING) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
)
// 3-factor product with 2 sizeof(variable), with redundant parens removed.
@@
expression THING1, THING2;
identifier COUNT;
type TYPE1, TYPE2;
@@
(
kzalloc(
- sizeof(TYPE1) * sizeof(TYPE2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kzalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kzalloc(
- sizeof(THING1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kzalloc(
- sizeof(THING1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kzalloc(
- sizeof(TYPE1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
|
kzalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
)
// 3-factor product, only identifiers, with redundant parens removed.
@@
identifier STRIDE, SIZE, COUNT;
@@
(
kzalloc(
- (COUNT) * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- (COUNT) * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- (COUNT) * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- (COUNT) * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
)
// Any remaining multi-factor products, first at least 3-factor products,
// when they're not all constants...
@@
expression E1, E2, E3;
constant C1, C2, C3;
@@
(
kzalloc(C1 * C2 * C3, ...)
|
kzalloc(
- (E1) * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
|
kzalloc(
- (E1) * (E2) * E3
+ array3_size(E1, E2, E3)
, ...)
|
kzalloc(
- (E1) * (E2) * (E3)
+ array3_size(E1, E2, E3)
, ...)
|
kzalloc(
- E1 * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
)
// And then all remaining 2 factors products when they're not all constants,
// keeping sizeof() as the second factor argument.
@@
expression THING, E1, E2;
type TYPE;
constant C1, C2, C3;
@@
(
kzalloc(sizeof(THING) * C2, ...)
|
kzalloc(sizeof(TYPE) * C2, ...)
|
kzalloc(C1 * C2 * C3, ...)
|
kzalloc(C1 * C2, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * (E2)
+ E2, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * E2
+ E2, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * (E2)
+ E2, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * E2
+ E2, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- (E1) * E2
+ E1, E2
, ...)
|
- kzalloc
+ kcalloc
(
- (E1) * (E2)
+ E1, E2
, ...)
|
- kzalloc
+ kcalloc
(
- E1 * E2
+ E1, E2
, ...)
)
Signed-off-by: Kees Cook <keescook@chromium.org>
2018-06-13 00:03:40 +03:00
writer_tasks = kcalloc ( cxt . nrealwriters_stress ,
sizeof ( writer_tasks [ 0 ] ) ,
2017-05-15 12:07:23 +03:00
GFP_KERNEL ) ;
if ( writer_tasks = = NULL ) {
2021-11-03 11:30:28 +03:00
TOROUT_ERRSTRING ( " writer_tasks: Out of memory " ) ;
2017-05-15 12:07:23 +03:00
firsterr = - ENOMEM ;
goto unwind ;
}
2014-02-05 03:51:41 +04:00
}
2014-09-12 08:40:41 +04:00
2023-02-21 22:02:35 +03:00
/* cap nested_locks to MAX_NESTED_LOCKS */
if ( nested_locks > MAX_NESTED_LOCKS )
nested_locks = MAX_NESTED_LOCKS ;
2014-09-12 08:42:25 +04:00
if ( cxt . cur_ops - > readlock ) {
treewide: kzalloc() -> kcalloc()
The kzalloc() function has a 2-factor argument form, kcalloc(). This
patch replaces cases of:
kzalloc(a * b, gfp)
with:
kcalloc(a * b, gfp)
as well as handling cases of:
kzalloc(a * b * c, gfp)
with:
kzalloc(array3_size(a, b, c), gfp)
as it's slightly less ugly than:
kzalloc_array(array_size(a, b), c, gfp)
This does, however, attempt to ignore constant size factors like:
kzalloc(4 * 1024, gfp)
though any constants defined via macros get caught up in the conversion.
Any factors with a sizeof() of "unsigned char", "char", and "u8" were
dropped, since they're redundant.
The Coccinelle script used for this was:
// Fix redundant parens around sizeof().
@@
type TYPE;
expression THING, E;
@@
(
kzalloc(
- (sizeof(TYPE)) * E
+ sizeof(TYPE) * E
, ...)
|
kzalloc(
- (sizeof(THING)) * E
+ sizeof(THING) * E
, ...)
)
// Drop single-byte sizes and redundant parens.
@@
expression COUNT;
typedef u8;
typedef __u8;
@@
(
kzalloc(
- sizeof(u8) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(__u8) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(char) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(unsigned char) * (COUNT)
+ COUNT
, ...)
|
kzalloc(
- sizeof(u8) * COUNT
+ COUNT
, ...)
|
kzalloc(
- sizeof(__u8) * COUNT
+ COUNT
, ...)
|
kzalloc(
- sizeof(char) * COUNT
+ COUNT
, ...)
|
kzalloc(
- sizeof(unsigned char) * COUNT
+ COUNT
, ...)
)
// 2-factor product with sizeof(type/expression) and identifier or constant.
@@
type TYPE;
expression THING;
identifier COUNT_ID;
constant COUNT_CONST;
@@
(
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * (COUNT_ID)
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * COUNT_ID
+ COUNT_ID, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * (COUNT_CONST)
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * COUNT_CONST
+ COUNT_CONST, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * (COUNT_ID)
+ COUNT_ID, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * COUNT_ID
+ COUNT_ID, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * (COUNT_CONST)
+ COUNT_CONST, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * COUNT_CONST
+ COUNT_CONST, sizeof(THING)
, ...)
)
// 2-factor product, only identifiers.
@@
identifier SIZE, COUNT;
@@
- kzalloc
+ kcalloc
(
- SIZE * COUNT
+ COUNT, SIZE
, ...)
// 3-factor product with 1 sizeof(type) or sizeof(expression), with
// redundant parens removed.
@@
expression THING;
identifier STRIDE, COUNT;
type TYPE;
@@
(
kzalloc(
- sizeof(TYPE) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(TYPE) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(TYPE) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(TYPE) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(TYPE))
, ...)
|
kzalloc(
- sizeof(THING) * (COUNT) * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kzalloc(
- sizeof(THING) * (COUNT) * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kzalloc(
- sizeof(THING) * COUNT * (STRIDE)
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
|
kzalloc(
- sizeof(THING) * COUNT * STRIDE
+ array3_size(COUNT, STRIDE, sizeof(THING))
, ...)
)
// 3-factor product with 2 sizeof(variable), with redundant parens removed.
@@
expression THING1, THING2;
identifier COUNT;
type TYPE1, TYPE2;
@@
(
kzalloc(
- sizeof(TYPE1) * sizeof(TYPE2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kzalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(TYPE2))
, ...)
|
kzalloc(
- sizeof(THING1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kzalloc(
- sizeof(THING1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(THING1), sizeof(THING2))
, ...)
|
kzalloc(
- sizeof(TYPE1) * sizeof(THING2) * COUNT
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
|
kzalloc(
- sizeof(TYPE1) * sizeof(THING2) * (COUNT)
+ array3_size(COUNT, sizeof(TYPE1), sizeof(THING2))
, ...)
)
// 3-factor product, only identifiers, with redundant parens removed.
@@
identifier STRIDE, SIZE, COUNT;
@@
(
kzalloc(
- (COUNT) * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- (COUNT) * (STRIDE) * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- (COUNT) * STRIDE * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- (COUNT) * (STRIDE) * (SIZE)
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
|
kzalloc(
- COUNT * STRIDE * SIZE
+ array3_size(COUNT, STRIDE, SIZE)
, ...)
)
// Any remaining multi-factor products, first at least 3-factor products,
// when they're not all constants...
@@
expression E1, E2, E3;
constant C1, C2, C3;
@@
(
kzalloc(C1 * C2 * C3, ...)
|
kzalloc(
- (E1) * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
|
kzalloc(
- (E1) * (E2) * E3
+ array3_size(E1, E2, E3)
, ...)
|
kzalloc(
- (E1) * (E2) * (E3)
+ array3_size(E1, E2, E3)
, ...)
|
kzalloc(
- E1 * E2 * E3
+ array3_size(E1, E2, E3)
, ...)
)
// And then all remaining 2 factors products when they're not all constants,
// keeping sizeof() as the second factor argument.
@@
expression THING, E1, E2;
type TYPE;
constant C1, C2, C3;
@@
(
kzalloc(sizeof(THING) * C2, ...)
|
kzalloc(sizeof(TYPE) * C2, ...)
|
kzalloc(C1 * C2 * C3, ...)
|
kzalloc(C1 * C2, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * (E2)
+ E2, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(TYPE) * E2
+ E2, sizeof(TYPE)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * (E2)
+ E2, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- sizeof(THING) * E2
+ E2, sizeof(THING)
, ...)
|
- kzalloc
+ kcalloc
(
- (E1) * E2
+ E1, E2
, ...)
|
- kzalloc
+ kcalloc
(
- (E1) * (E2)
+ E1, E2
, ...)
|
- kzalloc
+ kcalloc
(
- E1 * E2
+ E1, E2
, ...)
)
Signed-off-by: Kees Cook <keescook@chromium.org>
2018-06-13 00:03:40 +03:00
reader_tasks = kcalloc ( cxt . nrealreaders_stress ,
sizeof ( reader_tasks [ 0 ] ) ,
2014-09-12 08:40:41 +04:00
GFP_KERNEL ) ;
if ( reader_tasks = = NULL ) {
2021-11-03 11:30:28 +03:00
TOROUT_ERRSTRING ( " reader_tasks: Out of memory " ) ;
2016-11-11 00:06:39 +03:00
kfree ( writer_tasks ) ;
writer_tasks = NULL ;
2014-09-12 08:40:41 +04:00
firsterr = - ENOMEM ;
goto unwind ;
}
}
/*
* Create the kthreads and start torturing ( oh , those poor little locks ) .
*
* TODO : Note that we interleave writers with readers , giving writers a
* slight advantage , by creating its kthread first . This can be modified
* for very specific needs , or even let the user choose the policy , if
* ever wanted .
*/
2014-09-12 08:42:25 +04:00
for ( i = 0 , j = 0 ; i < cxt . nrealwriters_stress | |
j < cxt . nrealreaders_stress ; i + + , j + + ) {
if ( i > = cxt . nrealwriters_stress )
2014-09-12 08:40:41 +04:00
goto create_reader ;
/* Create writer. */
2023-06-03 01:02:10 +03:00
firsterr = torture_create_kthread_cb ( lock_torture_writer , & cxt . lwsa [ i ] ,
writer_tasks [ i ] ,
writer_fifo ? sched_set_fifo : NULL ) ;
2021-08-06 01:53:10 +03:00
if ( torture_init_error ( firsterr ) )
2014-02-05 03:51:41 +04:00
goto unwind ;
2023-08-22 18:48:16 +03:00
if ( cpumask_nonempty ( bind_writers ) )
torture_sched_setaffinity ( writer_tasks [ i ] - > pid , bind_writers ) ;
2014-09-12 08:40:41 +04:00
create_reader :
2014-09-12 08:42:25 +04:00
if ( cxt . cur_ops - > readlock = = NULL | | ( j > = cxt . nrealreaders_stress ) )
2014-09-12 08:40:41 +04:00
continue ;
/* Create reader. */
2014-09-12 08:42:25 +04:00
firsterr = torture_create_kthread ( lock_torture_reader , & cxt . lrsa [ j ] ,
2014-09-12 08:40:41 +04:00
reader_tasks [ j ] ) ;
2021-08-06 01:53:10 +03:00
if ( torture_init_error ( firsterr ) )
2014-09-12 08:40:41 +04:00
goto unwind ;
2023-08-22 18:48:16 +03:00
if ( cpumask_nonempty ( bind_readers ) )
torture_sched_setaffinity ( reader_tasks [ j ] - > pid , bind_readers ) ;
2014-02-05 03:51:41 +04:00
}
if ( stat_interval > 0 ) {
firsterr = torture_create_kthread ( lock_torture_stats , NULL ,
stats_task ) ;
2021-08-06 01:53:10 +03:00
if ( torture_init_error ( firsterr ) )
2014-02-05 03:51:41 +04:00
goto unwind ;
}
torture_init_end ( ) ;
return 0 ;
unwind :
torture_init_end ( ) ;
lock_torture_cleanup ( ) ;
2020-09-18 21:18:06 +03:00
if ( shutdown_secs ) {
WARN_ON ( ! IS_MODULE ( CONFIG_LOCK_TORTURE_TEST ) ) ;
kernel_power_off ( ) ;
}
2014-02-05 03:51:41 +04:00
return firsterr ;
}
module_init ( lock_torture_init ) ;
module_exit ( lock_torture_cleanup ) ;