2014-02-05 03:51:41 +04:00
/*
* Module - based torture test facility for locking
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation ; either version 2 of the License , or
* ( at your option ) any later version .
*
* This program is distributed in the hope that it will be useful ,
* but WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the
* GNU General Public License for more details .
*
* You should have received a copy of the GNU General Public License
* along with this program ; if not , you can access it online at
* http : //www.gnu.org/licenses/gpl-2.0.html.
*
* Copyright ( C ) IBM Corporation , 2014
*
2015-07-23 00:07:27 +03:00
* Authors : Paul E . McKenney < paulmck @ us . ibm . com >
* Davidlohr Bueso < dave @ stgolabs . net >
2014-02-05 03:51:41 +04:00
* Based on kernel / rcu / torture . c .
*/
# include <linux/kernel.h>
# include <linux/module.h>
# include <linux/kthread.h>
2015-07-23 00:07:27 +03:00
# include <linux/sched/rt.h>
2014-02-05 03:51:41 +04:00
# include <linux/spinlock.h>
2014-09-29 17:14:23 +04:00
# include <linux/rwlock.h>
2014-09-12 07:40:18 +04:00
# include <linux/mutex.h>
2014-09-29 17:14:26 +04:00
# include <linux/rwsem.h>
2014-02-05 03:51:41 +04:00
# include <linux/smp.h>
# include <linux/interrupt.h>
# include <linux/sched.h>
2017-02-01 20:07:51 +03:00
# include <uapi/linux/sched/types.h>
2017-02-03 12:08:30 +03:00
# include <linux/rtmutex.h>
2014-02-05 03:51:41 +04:00
# include <linux/atomic.h>
# include <linux/moduleparam.h>
# include <linux/delay.h>
# include <linux/slab.h>
2015-08-30 00:46:29 +03:00
# include <linux/percpu-rwsem.h>
2014-02-05 03:51:41 +04:00
# include <linux/torture.h>
MODULE_LICENSE ( " GPL " ) ;
MODULE_AUTHOR ( " Paul E. McKenney <paulmck@us.ibm.com> " ) ;
torture_param ( int , nwriters_stress , - 1 ,
" Number of write-locking stress-test threads " ) ;
2014-09-12 08:40:41 +04:00
torture_param ( int , nreaders_stress , - 1 ,
" Number of read-locking stress-test threads " ) ;
2014-02-05 03:51:41 +04:00
torture_param ( int , onoff_holdoff , 0 , " Time after boot before CPU hotplugs (s) " ) ;
torture_param ( int , onoff_interval , 0 ,
" Time between CPU hotplugs (s), 0=disable " ) ;
torture_param ( int , shuffle_interval , 3 ,
" Number of jiffies between shuffles, 0=disable " ) ;
torture_param ( int , shutdown_secs , 0 , " Shutdown time (j), <= zero to disable. " ) ;
torture_param ( int , stat_interval , 60 ,
" Number of seconds between stats printk()s " ) ;
torture_param ( int , stutter , 5 , " Number of jiffies to run/halt test, 0=disable " ) ;
torture_param ( bool , verbose , true ,
" Enable verbose debugging printk()s " ) ;
static char * torture_type = " spin_lock " ;
module_param ( torture_type , charp , 0444 ) ;
MODULE_PARM_DESC ( torture_type ,
2014-09-12 07:40:18 +04:00
" Type of lock to torture (spin_lock, spin_lock_irq, mutex_lock, ...) " ) ;
2014-02-05 03:51:41 +04:00
static struct task_struct * stats_task ;
static struct task_struct * * writer_tasks ;
2014-09-12 08:40:41 +04:00
static struct task_struct * * reader_tasks ;
2014-02-05 03:51:41 +04:00
static bool lock_is_write_held ;
2014-09-12 08:40:41 +04:00
static bool lock_is_read_held ;
2014-02-05 03:51:41 +04:00
2014-09-12 07:40:20 +04:00
struct lock_stress_stats {
long n_lock_fail ;
long n_lock_acquired ;
2014-02-05 03:51:41 +04:00
} ;
/* Forward reference. */
static void lock_torture_cleanup ( void ) ;
/*
* Operations vector for selecting different types of tests .
*/
struct lock_torture_ops {
void ( * init ) ( void ) ;
int ( * writelock ) ( void ) ;
void ( * write_delay ) ( struct torture_random_state * trsp ) ;
2015-07-23 00:07:27 +03:00
void ( * task_boost ) ( struct torture_random_state * trsp ) ;
2014-02-05 03:51:41 +04:00
void ( * writeunlock ) ( void ) ;
2014-09-12 08:40:41 +04:00
int ( * readlock ) ( void ) ;
void ( * read_delay ) ( struct torture_random_state * trsp ) ;
void ( * readunlock ) ( void ) ;
2015-07-23 00:07:27 +03:00
unsigned long flags ; /* for irq spinlocks */
2014-02-05 03:51:41 +04:00
const char * name ;
} ;
2014-09-12 08:42:25 +04:00
struct lock_torture_cxt {
int nrealwriters_stress ;
int nrealreaders_stress ;
bool debug_lock ;
atomic_t n_lock_torture_errors ;
struct lock_torture_ops * cur_ops ;
struct lock_stress_stats * lwsa ; /* writer statistics */
struct lock_stress_stats * lrsa ; /* reader statistics */
} ;
static struct lock_torture_cxt cxt = { 0 , 0 , false ,
ATOMIC_INIT ( 0 ) ,
NULL , NULL } ;
2014-02-05 03:51:41 +04:00
/*
* Definitions for lock torture testing .
*/
2014-02-11 20:05:07 +04:00
static int torture_lock_busted_write_lock ( void )
{
return 0 ; /* BUGGY, do not use in real life!!! */
}
static void torture_lock_busted_write_delay ( struct torture_random_state * trsp )
{
2015-04-01 18:42:27 +03:00
const unsigned long longdelay_ms = 100 ;
2014-02-11 20:05:07 +04:00
/* We want a long delay occasionally to force massive contention. */
if ( ! ( torture_random ( trsp ) %
2015-04-01 18:42:27 +03:00
( cxt . nrealwriters_stress * 2000 * longdelay_ms ) ) )
mdelay ( longdelay_ms ) ;
2014-09-12 08:42:25 +04:00
if ( ! ( torture_random ( trsp ) % ( cxt . nrealwriters_stress * 20000 ) ) )
2017-10-16 21:05:03 +03:00
torture_preempt_schedule ( ) ; /* Allow test to be preempted. */
2014-02-11 20:05:07 +04:00
}
static void torture_lock_busted_write_unlock ( void )
{
/* BUGGY, do not use in real life!!! */
}
2015-07-23 00:07:27 +03:00
static void torture_boost_dummy ( struct torture_random_state * trsp )
{
/* Only rtmutexes care about priority */
}
2014-02-11 20:05:07 +04:00
static struct lock_torture_ops lock_busted_ops = {
. writelock = torture_lock_busted_write_lock ,
. write_delay = torture_lock_busted_write_delay ,
2015-07-23 00:07:27 +03:00
. task_boost = torture_boost_dummy ,
2014-02-11 20:05:07 +04:00
. writeunlock = torture_lock_busted_write_unlock ,
2014-09-12 08:40:41 +04:00
. readlock = NULL ,
. read_delay = NULL ,
. readunlock = NULL ,
2014-02-11 20:05:07 +04:00
. name = " lock_busted "
} ;
2014-02-05 03:51:41 +04:00
static DEFINE_SPINLOCK ( torture_spinlock ) ;
static int torture_spin_lock_write_lock ( void ) __acquires ( torture_spinlock )
{
spin_lock ( & torture_spinlock ) ;
return 0 ;
}
static void torture_spin_lock_write_delay ( struct torture_random_state * trsp )
{
const unsigned long shortdelay_us = 2 ;
2015-04-01 18:42:27 +03:00
const unsigned long longdelay_ms = 100 ;
2014-02-05 03:51:41 +04:00
/* We want a short delay mostly to emulate likely code, and
* we want a long delay occasionally to force massive contention .
*/
if ( ! ( torture_random ( trsp ) %
2015-04-01 18:42:27 +03:00
( cxt . nrealwriters_stress * 2000 * longdelay_ms ) ) )
mdelay ( longdelay_ms ) ;
2014-02-05 03:51:41 +04:00
if ( ! ( torture_random ( trsp ) %
2014-09-12 08:42:25 +04:00
( cxt . nrealwriters_stress * 2 * shortdelay_us ) ) )
2014-02-05 03:51:41 +04:00
udelay ( shortdelay_us ) ;
2014-09-12 08:42:25 +04:00
if ( ! ( torture_random ( trsp ) % ( cxt . nrealwriters_stress * 20000 ) ) )
2017-10-16 21:05:03 +03:00
torture_preempt_schedule ( ) ; /* Allow test to be preempted. */
2014-02-05 03:51:41 +04:00
}
static void torture_spin_lock_write_unlock ( void ) __releases ( torture_spinlock )
{
spin_unlock ( & torture_spinlock ) ;
}
static struct lock_torture_ops spin_lock_ops = {
. writelock = torture_spin_lock_write_lock ,
. write_delay = torture_spin_lock_write_delay ,
2015-07-23 00:07:27 +03:00
. task_boost = torture_boost_dummy ,
2014-02-05 03:51:41 +04:00
. writeunlock = torture_spin_lock_write_unlock ,
2014-09-12 08:40:41 +04:00
. readlock = NULL ,
. read_delay = NULL ,
. readunlock = NULL ,
2014-02-05 03:51:41 +04:00
. name = " spin_lock "
} ;
static int torture_spin_lock_write_lock_irq ( void )
2014-09-29 17:14:24 +04:00
__acquires ( torture_spinlock )
2014-02-05 03:51:41 +04:00
{
unsigned long flags ;
spin_lock_irqsave ( & torture_spinlock , flags ) ;
2014-09-12 08:42:25 +04:00
cxt . cur_ops - > flags = flags ;
2014-02-05 03:51:41 +04:00
return 0 ;
}
static void torture_lock_spin_write_unlock_irq ( void )
__releases ( torture_spinlock )
{
2014-09-12 08:42:25 +04:00
spin_unlock_irqrestore ( & torture_spinlock , cxt . cur_ops - > flags ) ;
2014-02-05 03:51:41 +04:00
}
static struct lock_torture_ops spin_lock_irq_ops = {
. writelock = torture_spin_lock_write_lock_irq ,
. write_delay = torture_spin_lock_write_delay ,
2015-07-23 00:07:27 +03:00
. task_boost = torture_boost_dummy ,
2014-02-05 03:51:41 +04:00
. writeunlock = torture_lock_spin_write_unlock_irq ,
2014-09-12 08:40:41 +04:00
. readlock = NULL ,
. read_delay = NULL ,
. readunlock = NULL ,
2014-02-05 03:51:41 +04:00
. name = " spin_lock_irq "
} ;
2014-09-29 17:14:23 +04:00
static DEFINE_RWLOCK ( torture_rwlock ) ;
static int torture_rwlock_write_lock ( void ) __acquires ( torture_rwlock )
{
write_lock ( & torture_rwlock ) ;
return 0 ;
}
static void torture_rwlock_write_delay ( struct torture_random_state * trsp )
{
const unsigned long shortdelay_us = 2 ;
const unsigned long longdelay_ms = 100 ;
/* We want a short delay mostly to emulate likely code, and
* we want a long delay occasionally to force massive contention .
*/
if ( ! ( torture_random ( trsp ) %
( cxt . nrealwriters_stress * 2000 * longdelay_ms ) ) )
mdelay ( longdelay_ms ) ;
else
udelay ( shortdelay_us ) ;
}
static void torture_rwlock_write_unlock ( void ) __releases ( torture_rwlock )
{
write_unlock ( & torture_rwlock ) ;
}
static int torture_rwlock_read_lock ( void ) __acquires ( torture_rwlock )
{
read_lock ( & torture_rwlock ) ;
return 0 ;
}
static void torture_rwlock_read_delay ( struct torture_random_state * trsp )
{
const unsigned long shortdelay_us = 10 ;
const unsigned long longdelay_ms = 100 ;
/* We want a short delay mostly to emulate likely code, and
* we want a long delay occasionally to force massive contention .
*/
if ( ! ( torture_random ( trsp ) %
( cxt . nrealreaders_stress * 2000 * longdelay_ms ) ) )
mdelay ( longdelay_ms ) ;
else
udelay ( shortdelay_us ) ;
}
static void torture_rwlock_read_unlock ( void ) __releases ( torture_rwlock )
{
read_unlock ( & torture_rwlock ) ;
}
static struct lock_torture_ops rw_lock_ops = {
. writelock = torture_rwlock_write_lock ,
. write_delay = torture_rwlock_write_delay ,
2015-07-23 00:07:27 +03:00
. task_boost = torture_boost_dummy ,
2014-09-29 17:14:23 +04:00
. writeunlock = torture_rwlock_write_unlock ,
. readlock = torture_rwlock_read_lock ,
. read_delay = torture_rwlock_read_delay ,
. readunlock = torture_rwlock_read_unlock ,
. name = " rw_lock "
} ;
static int torture_rwlock_write_lock_irq ( void ) __acquires ( torture_rwlock )
{
unsigned long flags ;
write_lock_irqsave ( & torture_rwlock , flags ) ;
cxt . cur_ops - > flags = flags ;
return 0 ;
}
static void torture_rwlock_write_unlock_irq ( void )
__releases ( torture_rwlock )
{
write_unlock_irqrestore ( & torture_rwlock , cxt . cur_ops - > flags ) ;
}
static int torture_rwlock_read_lock_irq ( void ) __acquires ( torture_rwlock )
{
unsigned long flags ;
read_lock_irqsave ( & torture_rwlock , flags ) ;
cxt . cur_ops - > flags = flags ;
return 0 ;
}
static void torture_rwlock_read_unlock_irq ( void )
__releases ( torture_rwlock )
{
2015-03-07 03:06:53 +03:00
read_unlock_irqrestore ( & torture_rwlock , cxt . cur_ops - > flags ) ;
2014-09-29 17:14:23 +04:00
}
static struct lock_torture_ops rw_lock_irq_ops = {
. writelock = torture_rwlock_write_lock_irq ,
. write_delay = torture_rwlock_write_delay ,
2015-07-23 00:07:27 +03:00
. task_boost = torture_boost_dummy ,
2014-09-29 17:14:23 +04:00
. writeunlock = torture_rwlock_write_unlock_irq ,
. readlock = torture_rwlock_read_lock_irq ,
. read_delay = torture_rwlock_read_delay ,
. readunlock = torture_rwlock_read_unlock_irq ,
. name = " rw_lock_irq "
} ;
2014-09-12 07:40:18 +04:00
static DEFINE_MUTEX ( torture_mutex ) ;
static int torture_mutex_lock ( void ) __acquires ( torture_mutex )
{
mutex_lock ( & torture_mutex ) ;
return 0 ;
}
static void torture_mutex_delay ( struct torture_random_state * trsp )
{
const unsigned long longdelay_ms = 100 ;
/* We want a long delay occasionally to force massive contention. */
if ( ! ( torture_random ( trsp ) %
2014-09-12 08:42:25 +04:00
( cxt . nrealwriters_stress * 2000 * longdelay_ms ) ) )
2014-09-12 07:40:18 +04:00
mdelay ( longdelay_ms * 5 ) ;
else
mdelay ( longdelay_ms / 5 ) ;
2014-09-12 08:42:25 +04:00
if ( ! ( torture_random ( trsp ) % ( cxt . nrealwriters_stress * 20000 ) ) )
2017-10-16 21:05:03 +03:00
torture_preempt_schedule ( ) ; /* Allow test to be preempted. */
2014-09-12 07:40:18 +04:00
}
static void torture_mutex_unlock ( void ) __releases ( torture_mutex )
{
mutex_unlock ( & torture_mutex ) ;
}
static struct lock_torture_ops mutex_lock_ops = {
. writelock = torture_mutex_lock ,
. write_delay = torture_mutex_delay ,
2015-07-23 00:07:27 +03:00
. task_boost = torture_boost_dummy ,
2014-09-12 07:40:18 +04:00
. writeunlock = torture_mutex_unlock ,
2014-09-12 08:40:41 +04:00
. readlock = NULL ,
. read_delay = NULL ,
. readunlock = NULL ,
2014-09-12 07:40:18 +04:00
. name = " mutex_lock "
} ;
2016-12-01 14:47:05 +03:00
# include <linux/ww_mutex.h>
static DEFINE_WW_CLASS ( torture_ww_class ) ;
static DEFINE_WW_MUTEX ( torture_ww_mutex_0 , & torture_ww_class ) ;
static DEFINE_WW_MUTEX ( torture_ww_mutex_1 , & torture_ww_class ) ;
static DEFINE_WW_MUTEX ( torture_ww_mutex_2 , & torture_ww_class ) ;
static int torture_ww_mutex_lock ( void )
__acquires ( torture_ww_mutex_0 )
__acquires ( torture_ww_mutex_1 )
__acquires ( torture_ww_mutex_2 )
{
LIST_HEAD ( list ) ;
struct reorder_lock {
struct list_head link ;
struct ww_mutex * lock ;
} locks [ 3 ] , * ll , * ln ;
struct ww_acquire_ctx ctx ;
locks [ 0 ] . lock = & torture_ww_mutex_0 ;
list_add ( & locks [ 0 ] . link , & list ) ;
locks [ 1 ] . lock = & torture_ww_mutex_1 ;
list_add ( & locks [ 1 ] . link , & list ) ;
locks [ 2 ] . lock = & torture_ww_mutex_2 ;
list_add ( & locks [ 2 ] . link , & list ) ;
ww_acquire_init ( & ctx , & torture_ww_class ) ;
list_for_each_entry ( ll , & list , link ) {
int err ;
err = ww_mutex_lock ( ll - > lock , & ctx ) ;
if ( ! err )
continue ;
ln = ll ;
list_for_each_entry_continue_reverse ( ln , & list , link )
ww_mutex_unlock ( ln - > lock ) ;
if ( err ! = - EDEADLK )
return err ;
ww_mutex_lock_slow ( ll - > lock , & ctx ) ;
list_move ( & ll - > link , & list ) ;
}
ww_acquire_fini ( & ctx ) ;
return 0 ;
}
static void torture_ww_mutex_unlock ( void )
__releases ( torture_ww_mutex_0 )
__releases ( torture_ww_mutex_1 )
__releases ( torture_ww_mutex_2 )
{
ww_mutex_unlock ( & torture_ww_mutex_0 ) ;
ww_mutex_unlock ( & torture_ww_mutex_1 ) ;
ww_mutex_unlock ( & torture_ww_mutex_2 ) ;
}
static struct lock_torture_ops ww_mutex_lock_ops = {
. writelock = torture_ww_mutex_lock ,
. write_delay = torture_mutex_delay ,
. task_boost = torture_boost_dummy ,
. writeunlock = torture_ww_mutex_unlock ,
. readlock = NULL ,
. read_delay = NULL ,
. readunlock = NULL ,
. name = " ww_mutex_lock "
} ;
2015-07-23 00:07:27 +03:00
# ifdef CONFIG_RT_MUTEXES
static DEFINE_RT_MUTEX ( torture_rtmutex ) ;
static int torture_rtmutex_lock ( void ) __acquires ( torture_rtmutex )
{
rt_mutex_lock ( & torture_rtmutex ) ;
return 0 ;
}
static void torture_rtmutex_boost ( struct torture_random_state * trsp )
{
int policy ;
struct sched_param param ;
const unsigned int factor = 50000 ; /* yes, quite arbitrary */
if ( ! rt_task ( current ) ) {
/*
2016-04-12 18:47:17 +03:00
* Boost priority once every ~ 50 k operations . When the
2015-07-23 00:07:27 +03:00
* task tries to take the lock , the rtmutex it will account
* for the new priority , and do any corresponding pi - dance .
*/
2016-04-12 18:47:17 +03:00
if ( trsp & & ! ( torture_random ( trsp ) %
( cxt . nrealwriters_stress * factor ) ) ) {
2015-07-23 00:07:27 +03:00
policy = SCHED_FIFO ;
param . sched_priority = MAX_RT_PRIO - 1 ;
} else /* common case, do nothing */
return ;
} else {
/*
* The task will remain boosted for another ~ 500 k operations ,
* then restored back to its original prio , and so forth .
*
* When @ trsp is nil , we want to force - reset the task for
* stopping the kthread .
*/
if ( ! trsp | | ! ( torture_random ( trsp ) %
( cxt . nrealwriters_stress * factor * 2 ) ) ) {
policy = SCHED_NORMAL ;
param . sched_priority = 0 ;
} else /* common case, do nothing */
return ;
}
sched_setscheduler_nocheck ( current , policy , & param ) ;
}
static void torture_rtmutex_delay ( struct torture_random_state * trsp )
{
const unsigned long shortdelay_us = 2 ;
const unsigned long longdelay_ms = 100 ;
/*
* We want a short delay mostly to emulate likely code , and
* we want a long delay occasionally to force massive contention .
*/
if ( ! ( torture_random ( trsp ) %
( cxt . nrealwriters_stress * 2000 * longdelay_ms ) ) )
mdelay ( longdelay_ms ) ;
if ( ! ( torture_random ( trsp ) %
( cxt . nrealwriters_stress * 2 * shortdelay_us ) ) )
udelay ( shortdelay_us ) ;
if ( ! ( torture_random ( trsp ) % ( cxt . nrealwriters_stress * 20000 ) ) )
2017-10-16 21:05:03 +03:00
torture_preempt_schedule ( ) ; /* Allow test to be preempted. */
2015-07-23 00:07:27 +03:00
}
static void torture_rtmutex_unlock ( void ) __releases ( torture_rtmutex )
{
rt_mutex_unlock ( & torture_rtmutex ) ;
}
static struct lock_torture_ops rtmutex_lock_ops = {
. writelock = torture_rtmutex_lock ,
. write_delay = torture_rtmutex_delay ,
. task_boost = torture_rtmutex_boost ,
. writeunlock = torture_rtmutex_unlock ,
. readlock = NULL ,
. read_delay = NULL ,
. readunlock = NULL ,
. name = " rtmutex_lock "
} ;
# endif
2014-09-12 08:41:30 +04:00
static DECLARE_RWSEM ( torture_rwsem ) ;
static int torture_rwsem_down_write ( void ) __acquires ( torture_rwsem )
{
down_write ( & torture_rwsem ) ;
return 0 ;
}
static void torture_rwsem_write_delay ( struct torture_random_state * trsp )
{
const unsigned long longdelay_ms = 100 ;
/* We want a long delay occasionally to force massive contention. */
if ( ! ( torture_random ( trsp ) %
2014-09-12 08:42:25 +04:00
( cxt . nrealwriters_stress * 2000 * longdelay_ms ) ) )
2014-09-12 08:41:30 +04:00
mdelay ( longdelay_ms * 10 ) ;
else
mdelay ( longdelay_ms / 10 ) ;
2014-09-12 08:42:25 +04:00
if ( ! ( torture_random ( trsp ) % ( cxt . nrealwriters_stress * 20000 ) ) )
2017-10-16 21:05:03 +03:00
torture_preempt_schedule ( ) ; /* Allow test to be preempted. */
2014-09-12 08:41:30 +04:00
}
static void torture_rwsem_up_write ( void ) __releases ( torture_rwsem )
{
up_write ( & torture_rwsem ) ;
}
static int torture_rwsem_down_read ( void ) __acquires ( torture_rwsem )
{
down_read ( & torture_rwsem ) ;
return 0 ;
}
static void torture_rwsem_read_delay ( struct torture_random_state * trsp )
{
const unsigned long longdelay_ms = 100 ;
/* We want a long delay occasionally to force massive contention. */
if ( ! ( torture_random ( trsp ) %
2017-05-15 12:07:22 +03:00
( cxt . nrealreaders_stress * 2000 * longdelay_ms ) ) )
2014-09-12 08:41:30 +04:00
mdelay ( longdelay_ms * 2 ) ;
else
mdelay ( longdelay_ms / 2 ) ;
2014-09-12 08:42:25 +04:00
if ( ! ( torture_random ( trsp ) % ( cxt . nrealreaders_stress * 20000 ) ) )
2017-10-16 21:05:03 +03:00
torture_preempt_schedule ( ) ; /* Allow test to be preempted. */
2014-09-12 08:41:30 +04:00
}
static void torture_rwsem_up_read ( void ) __releases ( torture_rwsem )
{
up_read ( & torture_rwsem ) ;
}
static struct lock_torture_ops rwsem_lock_ops = {
. writelock = torture_rwsem_down_write ,
. write_delay = torture_rwsem_write_delay ,
2015-07-23 00:07:27 +03:00
. task_boost = torture_boost_dummy ,
2014-09-12 08:41:30 +04:00
. writeunlock = torture_rwsem_up_write ,
. readlock = torture_rwsem_down_read ,
. read_delay = torture_rwsem_read_delay ,
. readunlock = torture_rwsem_up_read ,
. name = " rwsem_lock "
} ;
2015-08-30 00:46:29 +03:00
# include <linux/percpu-rwsem.h>
static struct percpu_rw_semaphore pcpu_rwsem ;
void torture_percpu_rwsem_init ( void )
{
BUG_ON ( percpu_init_rwsem ( & pcpu_rwsem ) ) ;
}
static int torture_percpu_rwsem_down_write ( void ) __acquires ( pcpu_rwsem )
{
percpu_down_write ( & pcpu_rwsem ) ;
return 0 ;
}
static void torture_percpu_rwsem_up_write ( void ) __releases ( pcpu_rwsem )
{
percpu_up_write ( & pcpu_rwsem ) ;
}
static int torture_percpu_rwsem_down_read ( void ) __acquires ( pcpu_rwsem )
{
percpu_down_read ( & pcpu_rwsem ) ;
return 0 ;
}
static void torture_percpu_rwsem_up_read ( void ) __releases ( pcpu_rwsem )
{
percpu_up_read ( & pcpu_rwsem ) ;
}
static struct lock_torture_ops percpu_rwsem_lock_ops = {
. init = torture_percpu_rwsem_init ,
. writelock = torture_percpu_rwsem_down_write ,
. write_delay = torture_rwsem_write_delay ,
. task_boost = torture_boost_dummy ,
. writeunlock = torture_percpu_rwsem_up_write ,
. readlock = torture_percpu_rwsem_down_read ,
. read_delay = torture_rwsem_read_delay ,
. readunlock = torture_percpu_rwsem_up_read ,
. name = " percpu_rwsem_lock "
} ;
2014-02-05 03:51:41 +04:00
/*
* Lock torture writer kthread . Repeatedly acquires and releases
* the lock , checking for duplicate acquisitions .
*/
static int lock_torture_writer ( void * arg )
{
2014-09-12 07:40:20 +04:00
struct lock_stress_stats * lwsp = arg ;
2014-02-05 03:51:41 +04:00
static DEFINE_TORTURE_RANDOM ( rand ) ;
VERBOSE_TOROUT_STRING ( " lock_torture_writer task started " ) ;
2014-03-11 14:09:12 +04:00
set_user_nice ( current , MAX_NICE ) ;
2014-02-05 03:51:41 +04:00
do {
2014-02-27 00:14:51 +04:00
if ( ( torture_random ( & rand ) & 0xfffff ) = = 0 )
schedule_timeout_uninterruptible ( 1 ) ;
2014-09-29 17:14:25 +04:00
2015-07-23 00:07:27 +03:00
cxt . cur_ops - > task_boost ( & rand ) ;
2014-09-12 08:42:25 +04:00
cxt . cur_ops - > writelock ( ) ;
2014-02-05 03:51:41 +04:00
if ( WARN_ON_ONCE ( lock_is_write_held ) )
2014-09-12 07:40:20 +04:00
lwsp - > n_lock_fail + + ;
2014-02-05 03:51:41 +04:00
lock_is_write_held = 1 ;
2014-09-29 17:14:25 +04:00
if ( WARN_ON_ONCE ( lock_is_read_held ) )
lwsp - > n_lock_fail + + ; /* rare, but... */
2014-09-12 07:40:20 +04:00
lwsp - > n_lock_acquired + + ;
2014-09-12 08:42:25 +04:00
cxt . cur_ops - > write_delay ( & rand ) ;
2014-02-05 03:51:41 +04:00
lock_is_write_held = 0 ;
2014-09-12 08:42:25 +04:00
cxt . cur_ops - > writeunlock ( ) ;
2014-09-29 17:14:25 +04:00
2014-02-05 03:51:41 +04:00
stutter_wait ( " lock_torture_writer " ) ;
} while ( ! torture_must_stop ( ) ) ;
2015-07-23 00:07:27 +03:00
cxt . cur_ops - > task_boost ( NULL ) ; /* reset prio */
2014-02-05 03:51:41 +04:00
torture_kthread_stopping ( " lock_torture_writer " ) ;
return 0 ;
}
2014-09-12 08:40:41 +04:00
/*
* Lock torture reader kthread . Repeatedly acquires and releases
* the reader lock .
*/
static int lock_torture_reader ( void * arg )
{
struct lock_stress_stats * lrsp = arg ;
static DEFINE_TORTURE_RANDOM ( rand ) ;
VERBOSE_TOROUT_STRING ( " lock_torture_reader task started " ) ;
set_user_nice ( current , MAX_NICE ) ;
do {
if ( ( torture_random ( & rand ) & 0xfffff ) = = 0 )
schedule_timeout_uninterruptible ( 1 ) ;
2014-09-29 17:14:25 +04:00
2014-09-12 08:42:25 +04:00
cxt . cur_ops - > readlock ( ) ;
2014-09-12 08:40:41 +04:00
lock_is_read_held = 1 ;
2014-09-29 17:14:25 +04:00
if ( WARN_ON_ONCE ( lock_is_write_held ) )
lrsp - > n_lock_fail + + ; /* rare, but... */
2014-09-12 08:40:41 +04:00
lrsp - > n_lock_acquired + + ;
2014-09-12 08:42:25 +04:00
cxt . cur_ops - > read_delay ( & rand ) ;
2014-09-12 08:40:41 +04:00
lock_is_read_held = 0 ;
2014-09-12 08:42:25 +04:00
cxt . cur_ops - > readunlock ( ) ;
2014-09-29 17:14:25 +04:00
2014-09-12 08:40:41 +04:00
stutter_wait ( " lock_torture_reader " ) ;
} while ( ! torture_must_stop ( ) ) ;
torture_kthread_stopping ( " lock_torture_reader " ) ;
return 0 ;
}
2014-02-05 03:51:41 +04:00
/*
* Create an lock - torture - statistics message in the specified buffer .
*/
2014-09-12 08:40:41 +04:00
static void __torture_print_stats ( char * page ,
struct lock_stress_stats * statp , bool write )
2014-02-05 03:51:41 +04:00
{
bool fail = 0 ;
2014-09-12 08:40:41 +04:00
int i , n_stress ;
2017-05-15 12:07:23 +03:00
long max = 0 , min = statp ? statp [ 0 ] . n_lock_acquired : 0 ;
2014-02-05 03:51:41 +04:00
long long sum = 0 ;
2014-09-12 08:42:25 +04:00
n_stress = write ? cxt . nrealwriters_stress : cxt . nrealreaders_stress ;
2014-09-12 08:40:41 +04:00
for ( i = 0 ; i < n_stress ; i + + ) {
if ( statp [ i ] . n_lock_fail )
2014-02-05 03:51:41 +04:00
fail = true ;
2014-09-12 08:40:41 +04:00
sum + = statp [ i ] . n_lock_acquired ;
if ( max < statp [ i ] . n_lock_fail )
max = statp [ i ] . n_lock_fail ;
if ( min > statp [ i ] . n_lock_fail )
min = statp [ i ] . n_lock_fail ;
2014-02-05 03:51:41 +04:00
}
page + = sprintf ( page ,
2014-09-12 08:40:41 +04:00
" %s: Total: %lld Max/Min: %ld/%ld %s Fail: %d %s \n " ,
write ? " Writes " : " Reads " ,
2014-02-05 03:51:41 +04:00
sum , max , min , max / 2 > min ? " ??? " : " " ,
fail , fail ? " !!! " : " " ) ;
if ( fail )
2014-09-12 08:42:25 +04:00
atomic_inc ( & cxt . n_lock_torture_errors ) ;
2014-02-05 03:51:41 +04:00
}
/*
* Print torture statistics . Caller must ensure that there is only one
* call to this function at a given time ! ! ! This is normally accomplished
* by relying on the module system to only have one copy of the module
* loaded , and then by giving the lock_torture_stats kthread full control
* ( or the init / cleanup functions when lock_torture_stats thread is not
* running ) .
*/
static void lock_torture_stats_print ( void )
{
2014-09-12 08:42:25 +04:00
int size = cxt . nrealwriters_stress * 200 + 8192 ;
2014-02-05 03:51:41 +04:00
char * buf ;
2014-09-12 08:42:25 +04:00
if ( cxt . cur_ops - > readlock )
size + = cxt . nrealreaders_stress * 200 + 8192 ;
2014-09-12 08:40:41 +04:00
2014-02-05 03:51:41 +04:00
buf = kmalloc ( size , GFP_KERNEL ) ;
if ( ! buf ) {
pr_err ( " lock_torture_stats_print: Out of memory, need: %d " ,
size ) ;
return ;
}
2014-09-12 08:40:41 +04:00
2014-09-12 08:42:25 +04:00
__torture_print_stats ( buf , cxt . lwsa , true ) ;
2014-02-05 03:51:41 +04:00
pr_alert ( " %s " , buf ) ;
kfree ( buf ) ;
2014-09-12 08:40:41 +04:00
2014-09-12 08:42:25 +04:00
if ( cxt . cur_ops - > readlock ) {
2014-09-12 08:40:41 +04:00
buf = kmalloc ( size , GFP_KERNEL ) ;
if ( ! buf ) {
pr_err ( " lock_torture_stats_print: Out of memory, need: %d " ,
size ) ;
return ;
}
2014-09-12 08:42:25 +04:00
__torture_print_stats ( buf , cxt . lrsa , false ) ;
2014-09-12 08:40:41 +04:00
pr_alert ( " %s " , buf ) ;
kfree ( buf ) ;
}
2014-02-05 03:51:41 +04:00
}
/*
* Periodically prints torture statistics , if periodic statistics printing
* was specified via the stat_interval module parameter .
*
* No need to worry about fullstop here , since this one doesn ' t reference
* volatile state or register callbacks .
*/
static int lock_torture_stats ( void * arg )
{
VERBOSE_TOROUT_STRING ( " lock_torture_stats task started " ) ;
do {
schedule_timeout_interruptible ( stat_interval * HZ ) ;
lock_torture_stats_print ( ) ;
torture_shutdown_absorb ( " lock_torture_stats " ) ;
} while ( ! torture_must_stop ( ) ) ;
torture_kthread_stopping ( " lock_torture_stats " ) ;
return 0 ;
}
static inline void
lock_torture_print_module_parms ( struct lock_torture_ops * cur_ops ,
const char * tag )
{
pr_alert ( " %s " TORTURE_FLAG
2014-09-12 08:40:41 +04:00
" --- %s%s: nwriters_stress=%d nreaders_stress=%d stat_interval=%d verbose=%d shuffle_interval=%d stutter=%d shutdown_secs=%d onoff_interval=%d onoff_holdoff=%d \n " ,
2014-09-12 08:42:25 +04:00
torture_type , tag , cxt . debug_lock ? " [debug] " : " " ,
cxt . nrealwriters_stress , cxt . nrealreaders_stress , stat_interval ,
2014-09-12 08:40:41 +04:00
verbose , shuffle_interval , stutter , shutdown_secs ,
2014-02-05 03:51:41 +04:00
onoff_interval , onoff_holdoff ) ;
}
static void lock_torture_cleanup ( void )
{
int i ;
2014-09-12 07:40:21 +04:00
if ( torture_cleanup_begin ( ) )
2014-02-05 03:51:41 +04:00
return ;
2016-04-12 18:47:18 +03:00
/*
* Indicates early cleanup , meaning that the test has not run ,
* such as when passing bogus args when loading the module . As
* such , only perform the underlying torture - specific cleanups ,
* and avoid anything related to locktorture .
*/
2017-05-15 12:07:23 +03:00
if ( ! cxt . lwsa & & ! cxt . lrsa )
2016-04-12 18:47:18 +03:00
goto end ;
2014-02-05 03:51:41 +04:00
if ( writer_tasks ) {
2014-09-12 08:42:25 +04:00
for ( i = 0 ; i < cxt . nrealwriters_stress ; i + + )
2014-02-05 03:51:41 +04:00
torture_stop_kthread ( lock_torture_writer ,
writer_tasks [ i ] ) ;
kfree ( writer_tasks ) ;
writer_tasks = NULL ;
}
2014-09-12 08:40:41 +04:00
if ( reader_tasks ) {
2014-09-12 08:42:25 +04:00
for ( i = 0 ; i < cxt . nrealreaders_stress ; i + + )
2014-09-12 08:40:41 +04:00
torture_stop_kthread ( lock_torture_reader ,
reader_tasks [ i ] ) ;
kfree ( reader_tasks ) ;
reader_tasks = NULL ;
}
2014-02-05 03:51:41 +04:00
torture_stop_kthread ( lock_torture_stats , stats_task ) ;
lock_torture_stats_print ( ) ; /* -After- the stats thread is stopped! */
2014-09-12 08:42:25 +04:00
if ( atomic_read ( & cxt . n_lock_torture_errors ) )
lock_torture_print_module_parms ( cxt . cur_ops ,
2014-02-05 03:51:41 +04:00
" End of test: FAILURE " ) ;
else if ( torture_onoff_failures ( ) )
2014-09-12 08:42:25 +04:00
lock_torture_print_module_parms ( cxt . cur_ops ,
2014-02-05 03:51:41 +04:00
" End of test: LOCK_HOTPLUG " ) ;
else
2014-09-12 08:42:25 +04:00
lock_torture_print_module_parms ( cxt . cur_ops ,
2014-02-05 03:51:41 +04:00
" End of test: SUCCESS " ) ;
2016-11-11 00:06:39 +03:00
kfree ( cxt . lwsa ) ;
kfree ( cxt . lrsa ) ;
2016-04-12 18:47:18 +03:00
end :
2014-09-12 07:40:21 +04:00
torture_cleanup_end ( ) ;
2014-02-05 03:51:41 +04:00
}
static int __init lock_torture_init ( void )
{
2014-09-12 08:40:41 +04:00
int i , j ;
2014-02-05 03:51:41 +04:00
int firsterr = 0 ;
static struct lock_torture_ops * torture_ops [ ] = {
2014-09-29 17:14:23 +04:00
& lock_busted_ops ,
& spin_lock_ops , & spin_lock_irq_ops ,
& rw_lock_ops , & rw_lock_irq_ops ,
& mutex_lock_ops ,
2016-12-01 14:47:05 +03:00
& ww_mutex_lock_ops ,
2015-07-23 00:07:27 +03:00
# ifdef CONFIG_RT_MUTEXES
& rtmutex_lock_ops ,
# endif
2014-09-29 17:14:23 +04:00
& rwsem_lock_ops ,
2015-08-30 00:46:29 +03:00
& percpu_rwsem_lock_ops ,
2014-02-05 03:51:41 +04:00
} ;
2017-11-22 07:19:17 +03:00
if ( ! torture_init_begin ( torture_type , verbose ) )
2014-04-07 20:14:11 +04:00
return - EBUSY ;
2014-02-05 03:51:41 +04:00
/* Process args and tell the world that the torturer is on the job. */
for ( i = 0 ; i < ARRAY_SIZE ( torture_ops ) ; i + + ) {
2014-09-12 08:42:25 +04:00
cxt . cur_ops = torture_ops [ i ] ;
if ( strcmp ( torture_type , cxt . cur_ops - > name ) = = 0 )
2014-02-05 03:51:41 +04:00
break ;
}
if ( i = = ARRAY_SIZE ( torture_ops ) ) {
pr_alert ( " lock-torture: invalid torture type: \" %s \" \n " ,
torture_type ) ;
pr_alert ( " lock-torture types: " ) ;
for ( i = 0 ; i < ARRAY_SIZE ( torture_ops ) ; i + + )
pr_alert ( " %s " , torture_ops [ i ] - > name ) ;
pr_alert ( " \n " ) ;
2015-08-31 06:01:48 +03:00
firsterr = - EINVAL ;
goto unwind ;
2014-02-05 03:51:41 +04:00
}
2017-05-15 12:07:23 +03:00
if ( nwriters_stress = = 0 & & nreaders_stress = = 0 ) {
pr_alert ( " lock-torture: must run at least one locking thread \n " ) ;
firsterr = - EINVAL ;
goto unwind ;
}
2014-09-12 08:42:25 +04:00
if ( cxt . cur_ops - > init )
2015-08-31 06:01:48 +03:00
cxt . cur_ops - > init ( ) ;
2014-02-05 03:51:41 +04:00
if ( nwriters_stress > = 0 )
2014-09-12 08:42:25 +04:00
cxt . nrealwriters_stress = nwriters_stress ;
2014-02-05 03:51:41 +04:00
else
2014-09-12 08:42:25 +04:00
cxt . nrealwriters_stress = 2 * num_online_cpus ( ) ;
2014-09-12 07:40:19 +04:00
# ifdef CONFIG_DEBUG_MUTEXES
if ( strncmp ( torture_type , " mutex " , 5 ) = = 0 )
2014-09-12 08:42:25 +04:00
cxt . debug_lock = true ;
2014-09-12 07:40:19 +04:00
# endif
2015-07-23 00:07:27 +03:00
# ifdef CONFIG_DEBUG_RT_MUTEXES
if ( strncmp ( torture_type , " rtmutex " , 7 ) = = 0 )
cxt . debug_lock = true ;
# endif
2014-09-12 07:40:19 +04:00
# ifdef CONFIG_DEBUG_SPINLOCK
2014-09-29 17:14:23 +04:00
if ( ( strncmp ( torture_type , " spin " , 4 ) = = 0 ) | |
( strncmp ( torture_type , " rw_lock " , 7 ) = = 0 ) )
2014-09-12 08:42:25 +04:00
cxt . debug_lock = true ;
2014-09-12 07:40:19 +04:00
# endif
2014-02-05 03:51:41 +04:00
/* Initialize the statistics so that each run gets its own numbers. */
2017-05-15 12:07:23 +03:00
if ( nwriters_stress ) {
lock_is_write_held = 0 ;
cxt . lwsa = kmalloc ( sizeof ( * cxt . lwsa ) * cxt . nrealwriters_stress , GFP_KERNEL ) ;
if ( cxt . lwsa = = NULL ) {
VERBOSE_TOROUT_STRING ( " cxt.lwsa: Out of memory " ) ;
firsterr = - ENOMEM ;
goto unwind ;
}
2014-02-05 03:51:41 +04:00
2017-05-15 12:07:23 +03:00
for ( i = 0 ; i < cxt . nrealwriters_stress ; i + + ) {
cxt . lwsa [ i ] . n_lock_fail = 0 ;
cxt . lwsa [ i ] . n_lock_acquired = 0 ;
}
2014-02-05 03:51:41 +04:00
}
2014-09-12 08:42:25 +04:00
if ( cxt . cur_ops - > readlock ) {
2014-09-12 08:40:41 +04:00
if ( nreaders_stress > = 0 )
2014-09-12 08:42:25 +04:00
cxt . nrealreaders_stress = nreaders_stress ;
2014-09-12 08:40:41 +04:00
else {
/*
* By default distribute evenly the number of
* readers and writers . We still run the same number
* of threads as the writer - only locks default .
*/
if ( nwriters_stress < 0 ) /* user doesn't care */
2014-09-12 08:42:25 +04:00
cxt . nrealwriters_stress = num_online_cpus ( ) ;
cxt . nrealreaders_stress = cxt . nrealwriters_stress ;
2014-09-12 08:40:41 +04:00
}
2017-05-15 12:07:23 +03:00
if ( nreaders_stress ) {
lock_is_read_held = 0 ;
cxt . lrsa = kmalloc ( sizeof ( * cxt . lrsa ) * cxt . nrealreaders_stress , GFP_KERNEL ) ;
if ( cxt . lrsa = = NULL ) {
VERBOSE_TOROUT_STRING ( " cxt.lrsa: Out of memory " ) ;
firsterr = - ENOMEM ;
kfree ( cxt . lwsa ) ;
cxt . lwsa = NULL ;
goto unwind ;
}
for ( i = 0 ; i < cxt . nrealreaders_stress ; i + + ) {
cxt . lrsa [ i ] . n_lock_fail = 0 ;
cxt . lrsa [ i ] . n_lock_acquired = 0 ;
}
2014-09-12 08:40:41 +04:00
}
}
2016-04-12 18:47:18 +03:00
2014-09-12 08:42:25 +04:00
lock_torture_print_module_parms ( cxt . cur_ops , " Start of test " ) ;
2014-09-12 08:40:41 +04:00
/* Prepare torture context. */
2014-02-05 03:51:41 +04:00
if ( onoff_interval > 0 ) {
firsterr = torture_onoff_init ( onoff_holdoff * HZ ,
onoff_interval * HZ ) ;
if ( firsterr )
goto unwind ;
}
if ( shuffle_interval > 0 ) {
firsterr = torture_shuffle_init ( shuffle_interval ) ;
if ( firsterr )
goto unwind ;
}
if ( shutdown_secs > 0 ) {
firsterr = torture_shutdown_init ( shutdown_secs ,
lock_torture_cleanup ) ;
if ( firsterr )
goto unwind ;
}
if ( stutter > 0 ) {
firsterr = torture_stutter_init ( stutter ) ;
if ( firsterr )
goto unwind ;
}
2017-05-15 12:07:23 +03:00
if ( nwriters_stress ) {
writer_tasks = kzalloc ( cxt . nrealwriters_stress * sizeof ( writer_tasks [ 0 ] ) ,
GFP_KERNEL ) ;
if ( writer_tasks = = NULL ) {
VERBOSE_TOROUT_ERRSTRING ( " writer_tasks: Out of memory " ) ;
firsterr = - ENOMEM ;
goto unwind ;
}
2014-02-05 03:51:41 +04:00
}
2014-09-12 08:40:41 +04:00
2014-09-12 08:42:25 +04:00
if ( cxt . cur_ops - > readlock ) {
reader_tasks = kzalloc ( cxt . nrealreaders_stress * sizeof ( reader_tasks [ 0 ] ) ,
2014-09-12 08:40:41 +04:00
GFP_KERNEL ) ;
if ( reader_tasks = = NULL ) {
VERBOSE_TOROUT_ERRSTRING ( " reader_tasks: Out of memory " ) ;
2016-11-11 00:06:39 +03:00
kfree ( writer_tasks ) ;
writer_tasks = NULL ;
2014-09-12 08:40:41 +04:00
firsterr = - ENOMEM ;
goto unwind ;
}
}
/*
* Create the kthreads and start torturing ( oh , those poor little locks ) .
*
* TODO : Note that we interleave writers with readers , giving writers a
* slight advantage , by creating its kthread first . This can be modified
* for very specific needs , or even let the user choose the policy , if
* ever wanted .
*/
2014-09-12 08:42:25 +04:00
for ( i = 0 , j = 0 ; i < cxt . nrealwriters_stress | |
j < cxt . nrealreaders_stress ; i + + , j + + ) {
if ( i > = cxt . nrealwriters_stress )
2014-09-12 08:40:41 +04:00
goto create_reader ;
/* Create writer. */
2014-09-12 08:42:25 +04:00
firsterr = torture_create_kthread ( lock_torture_writer , & cxt . lwsa [ i ] ,
2014-02-05 03:51:41 +04:00
writer_tasks [ i ] ) ;
if ( firsterr )
goto unwind ;
2014-09-12 08:40:41 +04:00
create_reader :
2014-09-12 08:42:25 +04:00
if ( cxt . cur_ops - > readlock = = NULL | | ( j > = cxt . nrealreaders_stress ) )
2014-09-12 08:40:41 +04:00
continue ;
/* Create reader. */
2014-09-12 08:42:25 +04:00
firsterr = torture_create_kthread ( lock_torture_reader , & cxt . lrsa [ j ] ,
2014-09-12 08:40:41 +04:00
reader_tasks [ j ] ) ;
if ( firsterr )
goto unwind ;
2014-02-05 03:51:41 +04:00
}
if ( stat_interval > 0 ) {
firsterr = torture_create_kthread ( lock_torture_stats , NULL ,
stats_task ) ;
if ( firsterr )
goto unwind ;
}
torture_init_end ( ) ;
return 0 ;
unwind :
torture_init_end ( ) ;
lock_torture_cleanup ( ) ;
return firsterr ;
}
module_init ( lock_torture_init ) ;
module_exit ( lock_torture_cleanup ) ;