2019-01-17 21:41:31 +03:00
// SPDX-License-Identifier: GPL-2.0+
2014-01-27 23:49:39 +04:00
/*
* Common functions for in - kernel torture tests .
*
* Copyright ( C ) IBM Corporation , 2014
*
2019-01-17 21:41:31 +03:00
* Author : Paul E . McKenney < paulmck @ linux . ibm . com >
2014-01-27 23:49:39 +04:00
* Based on kernel / rcu / torture . c .
*/
2018-05-15 22:25:05 +03:00
# define pr_fmt(fmt) fmt
2014-01-27 23:49:39 +04:00
# include <linux/types.h>
# include <linux/kernel.h>
# include <linux/init.h>
# include <linux/module.h>
# include <linux/kthread.h>
# include <linux/err.h>
# include <linux/spinlock.h>
# include <linux/smp.h>
# include <linux/interrupt.h>
# include <linux/sched.h>
2017-02-01 18:36:40 +03:00
# include <linux/sched/clock.h>
2014-01-27 23:49:39 +04:00
# include <linux/atomic.h>
# include <linux/bitops.h>
# include <linux/completion.h>
# include <linux/moduleparam.h>
# include <linux/percpu.h>
# include <linux/notifier.h>
# include <linux/reboot.h>
# include <linux/freezer.h>
# include <linux/cpu.h>
# include <linux/delay.h>
# include <linux/stat.h>
# include <linux/slab.h>
# include <linux/trace_clock.h>
2016-06-18 17:45:43 +03:00
# include <linux/ktime.h>
2014-01-27 23:49:39 +04:00
# include <asm/byteorder.h>
# include <linux/torture.h>
2017-10-04 21:23:10 +03:00
# include "rcu/rcu.h"
2014-01-27 23:49:39 +04:00
MODULE_LICENSE ( " GPL " ) ;
2019-01-17 21:41:31 +03:00
MODULE_AUTHOR ( " Paul E. McKenney <paulmck@linux.ibm.com> " ) ;
2014-01-27 23:49:39 +04:00
2019-12-07 02:02:59 +03:00
static bool disable_onoff_at_boot ;
module_param ( disable_onoff_at_boot , bool , 0444 ) ;
2020-06-17 01:38:24 +03:00
static bool ftrace_dump_at_shutdown ;
module_param ( ftrace_dump_at_shutdown , bool , 0444 ) ;
2014-01-31 01:38:09 +04:00
static char * torture_type ;
2018-05-09 20:29:18 +03:00
static int verbose ;
2014-01-31 01:38:09 +04:00
2014-01-31 03:49:29 +04:00
/* Mediate rmmod and system shutdown. Concurrent rmmod & shutdown illegal! */
# define FULLSTOP_DONTSTOP 0 /* Normal operation. */
# define FULLSTOP_SHUTDOWN 1 /* System shutdown with torture running. */
# define FULLSTOP_RMMOD 2 /* Normal rmmod of torture. */
static int fullstop = FULLSTOP_RMMOD ;
2014-01-31 03:37:19 +04:00
static DEFINE_MUTEX ( fullstop_mutex ) ;
2014-01-29 19:40:27 +04:00
2014-01-29 03:58:22 +04:00
# ifdef CONFIG_HOTPLUG_CPU
/*
* Variables for online - offline handling . Only present if CPU hotplug
* is enabled , otherwise does nothing .
*/
static struct task_struct * onoff_task ;
static long onoff_holdoff ;
static long onoff_interval ;
2018-12-10 20:44:52 +03:00
static torture_ofl_func * onoff_f ;
2014-01-29 03:58:22 +04:00
static long n_offline_attempts ;
static long n_offline_successes ;
static unsigned long sum_offline ;
static int min_offline = - 1 ;
static int max_offline ;
static long n_online_attempts ;
static long n_online_successes ;
static unsigned long sum_online ;
static int min_online = - 1 ;
static int max_online ;
2016-04-21 03:18:41 +03:00
/*
* Attempt to take a CPU offline . Return false if the CPU is already
* offline or if it is not subject to CPU - hotplug operations . The
* caller can detect other failures by looking at the statistics .
*/
bool torture_offline ( int cpu , long * n_offl_attempts , long * n_offl_successes ,
unsigned long * sum_offl , int * min_offl , int * max_offl )
{
unsigned long delta ;
int ret ;
2019-12-05 21:49:11 +03:00
char * s ;
2016-04-21 03:18:41 +03:00
unsigned long starttime ;
if ( ! cpu_online ( cpu ) | | ! cpu_is_hotpluggable ( cpu ) )
return false ;
2019-01-23 06:23:00 +03:00
if ( num_online_cpus ( ) < = 1 )
return false ; /* Can't offline the last CPU. */
2016-04-21 03:18:41 +03:00
2018-05-09 20:29:18 +03:00
if ( verbose > 1 )
2016-04-21 03:18:41 +03:00
pr_alert ( " %s " TORTURE_FLAG
" torture_onoff task: offlining %d \n " ,
torture_type , cpu ) ;
starttime = jiffies ;
( * n_offl_attempts ) + + ;
2020-03-23 16:51:08 +03:00
ret = remove_cpu ( cpu ) ;
2016-04-21 03:18:41 +03:00
if ( ret ) {
2019-12-05 21:49:11 +03:00
s = " " ;
if ( ! rcu_inkernel_boot_has_ended ( ) & & ret = = - EBUSY ) {
// PCI probe frequently disables hotplug during boot.
( * n_offl_attempts ) - - ;
s = " (-EBUSY forgiven during boot) " ;
}
2016-04-21 03:18:41 +03:00
if ( verbose )
pr_alert ( " %s " TORTURE_FLAG
2019-12-05 21:49:11 +03:00
" torture_onoff task: offline %d failed%s: errno %d \n " ,
torture_type , cpu , s , ret ) ;
2016-04-21 03:18:41 +03:00
} else {
2018-05-09 20:29:18 +03:00
if ( verbose > 1 )
2016-04-21 03:18:41 +03:00
pr_alert ( " %s " TORTURE_FLAG
" torture_onoff task: offlined %d \n " ,
torture_type , cpu ) ;
2018-12-10 20:44:52 +03:00
if ( onoff_f )
onoff_f ( ) ;
2016-04-21 03:18:41 +03:00
( * n_offl_successes ) + + ;
delta = jiffies - starttime ;
2017-06-23 01:38:26 +03:00
* sum_offl + = delta ;
2016-04-21 03:18:41 +03:00
if ( * min_offl < 0 ) {
* min_offl = delta ;
* max_offl = delta ;
}
if ( * min_offl > delta )
* min_offl = delta ;
if ( * max_offl < delta )
* max_offl = delta ;
}
return true ;
}
EXPORT_SYMBOL_GPL ( torture_offline ) ;
/*
* Attempt to bring a CPU online . Return false if the CPU is already
* online or if it is not subject to CPU - hotplug operations . The
* caller can detect other failures by looking at the statistics .
*/
bool torture_online ( int cpu , long * n_onl_attempts , long * n_onl_successes ,
unsigned long * sum_onl , int * min_onl , int * max_onl )
{
unsigned long delta ;
int ret ;
2019-12-05 21:49:11 +03:00
char * s ;
2016-04-21 03:18:41 +03:00
unsigned long starttime ;
if ( cpu_online ( cpu ) | | ! cpu_is_hotpluggable ( cpu ) )
return false ;
2018-05-09 20:29:18 +03:00
if ( verbose > 1 )
2016-04-21 03:18:41 +03:00
pr_alert ( " %s " TORTURE_FLAG
" torture_onoff task: onlining %d \n " ,
torture_type , cpu ) ;
starttime = jiffies ;
( * n_onl_attempts ) + + ;
2020-03-23 16:51:08 +03:00
ret = add_cpu ( cpu ) ;
2016-04-21 03:18:41 +03:00
if ( ret ) {
2019-12-05 21:49:11 +03:00
s = " " ;
if ( ! rcu_inkernel_boot_has_ended ( ) & & ret = = - EBUSY ) {
// PCI probe frequently disables hotplug during boot.
( * n_onl_attempts ) - - ;
s = " (-EBUSY forgiven during boot) " ;
}
2016-04-21 03:18:41 +03:00
if ( verbose )
pr_alert ( " %s " TORTURE_FLAG
2019-12-05 21:49:11 +03:00
" torture_onoff task: online %d failed%s: errno %d \n " ,
torture_type , cpu , s , ret ) ;
2016-04-21 03:18:41 +03:00
} else {
2018-05-09 20:29:18 +03:00
if ( verbose > 1 )
2016-04-21 03:18:41 +03:00
pr_alert ( " %s " TORTURE_FLAG
" torture_onoff task: onlined %d \n " ,
torture_type , cpu ) ;
( * n_onl_successes ) + + ;
delta = jiffies - starttime ;
* sum_onl + = delta ;
if ( * min_onl < 0 ) {
* min_onl = delta ;
* max_onl = delta ;
}
if ( * min_onl > delta )
* min_onl = delta ;
if ( * max_onl < delta )
* max_onl = delta ;
}
return true ;
}
EXPORT_SYMBOL_GPL ( torture_online ) ;
2014-01-29 03:58:22 +04:00
/*
* Execute random CPU - hotplug operations at the interval specified
* by the onoff_interval .
*/
static int
torture_onoff ( void * arg )
{
int cpu ;
int maxcpu = - 1 ;
DEFINE_TORTURE_RANDOM ( rand ) ;
2018-08-22 01:27:16 +03:00
int ret ;
2014-01-29 03:58:22 +04:00
VERBOSE_TOROUT_STRING ( " torture_onoff task started " ) ;
for_each_online_cpu ( cpu )
maxcpu = cpu ;
WARN_ON ( maxcpu < 0 ) ;
2020-03-23 16:51:08 +03:00
if ( ! IS_MODULE ( CONFIG_TORTURE_TEST ) ) {
2018-08-22 01:27:16 +03:00
for_each_possible_cpu ( cpu ) {
if ( cpu_online ( cpu ) )
continue ;
2020-03-23 16:51:08 +03:00
ret = add_cpu ( cpu ) ;
2018-08-22 01:27:16 +03:00
if ( ret & & verbose ) {
pr_alert ( " %s " TORTURE_FLAG
" %s: Initial online %d: errno %d \n " ,
__func__ , torture_type , cpu , ret ) ;
}
}
2020-03-23 16:51:08 +03:00
}
2016-05-02 05:30:00 +03:00
if ( maxcpu = = 0 ) {
VERBOSE_TOROUT_STRING ( " Only one CPU, so CPU-hotplug testing is disabled " ) ;
goto stop ;
}
2014-01-29 03:58:22 +04:00
if ( onoff_holdoff > 0 ) {
VERBOSE_TOROUT_STRING ( " torture_onoff begin holdoff " ) ;
schedule_timeout_interruptible ( onoff_holdoff ) ;
VERBOSE_TOROUT_STRING ( " torture_onoff end holdoff " ) ;
}
while ( ! torture_must_stop ( ) ) {
2019-12-07 02:02:59 +03:00
if ( disable_onoff_at_boot & & ! rcu_inkernel_boot_has_ended ( ) ) {
schedule_timeout_interruptible ( HZ / 10 ) ;
continue ;
}
2014-01-29 03:58:22 +04:00
cpu = ( torture_random ( & rand ) > > 4 ) % ( maxcpu + 1 ) ;
2016-04-21 03:18:41 +03:00
if ( ! torture_offline ( cpu ,
& n_offline_attempts , & n_offline_successes ,
& sum_offline , & min_offline , & max_offline ) )
torture_online ( cpu ,
& n_online_attempts , & n_online_successes ,
& sum_online , & min_online , & max_online ) ;
2014-01-29 03:58:22 +04:00
schedule_timeout_interruptible ( onoff_interval ) ;
}
2016-05-02 05:30:00 +03:00
stop :
2014-02-01 05:37:28 +04:00
torture_kthread_stopping ( " torture_onoff " ) ;
2014-01-29 03:58:22 +04:00
return 0 ;
}
# endif /* #ifdef CONFIG_HOTPLUG_CPU */
/*
* Initiate online - offline handling .
*/
2018-12-10 20:44:52 +03:00
int torture_onoff_init ( long ooholdoff , long oointerval , torture_ofl_func * f )
2014-01-29 03:58:22 +04:00
{
2014-02-03 23:52:27 +04:00
# ifdef CONFIG_HOTPLUG_CPU
2014-01-29 03:58:22 +04:00
onoff_holdoff = ooholdoff ;
onoff_interval = oointerval ;
2018-12-10 20:44:52 +03:00
onoff_f = f ;
2014-01-29 03:58:22 +04:00
if ( onoff_interval < = 0 )
return 0 ;
2018-09-22 04:21:31 +03:00
return torture_create_kthread ( torture_onoff , NULL , onoff_task ) ;
# else /* #ifdef CONFIG_HOTPLUG_CPU */
return 0 ;
# endif /* #else #ifdef CONFIG_HOTPLUG_CPU */
2014-01-29 03:58:22 +04:00
}
EXPORT_SYMBOL_GPL ( torture_onoff_init ) ;
/*
* Clean up after online / offline testing .
*/
2014-01-31 02:21:11 +04:00
static void torture_onoff_cleanup ( void )
2014-01-29 03:58:22 +04:00
{
# ifdef CONFIG_HOTPLUG_CPU
if ( onoff_task = = NULL )
return ;
VERBOSE_TOROUT_STRING ( " Stopping torture_onoff task " ) ;
kthread_stop ( onoff_task ) ;
onoff_task = NULL ;
# endif /* #ifdef CONFIG_HOTPLUG_CPU */
}
/*
* Print online / offline testing statistics .
*/
2014-07-14 17:16:15 +04:00
void torture_onoff_stats ( void )
2014-01-29 03:58:22 +04:00
{
# ifdef CONFIG_HOTPLUG_CPU
2014-07-14 17:16:15 +04:00
pr_cont ( " onoff: %ld/%ld:%ld/%ld %d,%d:%d,%d %lu:%lu (HZ=%d) " ,
n_online_successes , n_online_attempts ,
n_offline_successes , n_offline_attempts ,
min_online , max_online ,
min_offline , max_offline ,
sum_online , sum_offline , HZ ) ;
2014-01-29 03:58:22 +04:00
# endif /* #ifdef CONFIG_HOTPLUG_CPU */
}
EXPORT_SYMBOL_GPL ( torture_onoff_stats ) ;
/*
* Were all the online / offline operations successful ?
*/
bool torture_onoff_failures ( void )
{
# ifdef CONFIG_HOTPLUG_CPU
return n_online_successes ! = n_online_attempts | |
n_offline_successes ! = n_offline_attempts ;
# else /* #ifdef CONFIG_HOTPLUG_CPU */
return false ;
# endif /* #else #ifdef CONFIG_HOTPLUG_CPU */
}
EXPORT_SYMBOL_GPL ( torture_onoff_failures ) ;
2014-01-27 23:49:39 +04:00
# define TORTURE_RANDOM_MULT 39916801 /* prime */
# define TORTURE_RANDOM_ADD 479001701 /* prime */
# define TORTURE_RANDOM_REFRESH 10000
/*
* Crude but fast random - number generator . Uses a linear congruential
* generator , with occasional help from cpu_clock ( ) .
*/
unsigned long
torture_random ( struct torture_random_state * trsp )
{
if ( - - trsp - > trs_count < 0 ) {
trsp - > trs_state + = ( unsigned long ) local_clock ( ) ;
trsp - > trs_count = TORTURE_RANDOM_REFRESH ;
}
trsp - > trs_state = trsp - > trs_state * TORTURE_RANDOM_MULT +
TORTURE_RANDOM_ADD ;
return swahw32 ( trsp - > trs_state ) ;
}
EXPORT_SYMBOL_GPL ( torture_random ) ;
2014-01-29 19:40:27 +04:00
2014-01-29 03:29:21 +04:00
/*
* Variables for shuffling . The idea is to ensure that each CPU stays
* idle for an extended period to test interactions with dyntick idle ,
2017-02-28 01:29:06 +03:00
* as well as interactions with any per - CPU variables .
2014-01-29 03:29:21 +04:00
*/
struct shuffle_task {
struct list_head st_l ;
struct task_struct * st_t ;
} ;
static long shuffle_interval ; /* In jiffies. */
static struct task_struct * shuffler_task ;
static cpumask_var_t shuffle_tmp_mask ;
static int shuffle_idle_cpu ; /* Force all torture tasks off this CPU */
static struct list_head shuffle_task_list = LIST_HEAD_INIT ( shuffle_task_list ) ;
static DEFINE_MUTEX ( shuffle_task_mutex ) ;
/*
* Register a task to be shuffled . If there is no memory , just splat
* and don ' t bother registering .
*/
void torture_shuffle_task_register ( struct task_struct * tp )
{
struct shuffle_task * stp ;
if ( WARN_ON_ONCE ( tp = = NULL ) )
return ;
stp = kmalloc ( sizeof ( * stp ) , GFP_KERNEL ) ;
if ( WARN_ON_ONCE ( stp = = NULL ) )
return ;
stp - > st_t = tp ;
mutex_lock ( & shuffle_task_mutex ) ;
list_add ( & stp - > st_l , & shuffle_task_list ) ;
mutex_unlock ( & shuffle_task_mutex ) ;
}
EXPORT_SYMBOL_GPL ( torture_shuffle_task_register ) ;
/*
* Unregister all tasks , for example , at the end of the torture run .
*/
static void torture_shuffle_task_unregister_all ( void )
{
struct shuffle_task * stp ;
struct shuffle_task * p ;
mutex_lock ( & shuffle_task_mutex ) ;
list_for_each_entry_safe ( stp , p , & shuffle_task_list , st_l ) {
list_del ( & stp - > st_l ) ;
kfree ( stp ) ;
}
mutex_unlock ( & shuffle_task_mutex ) ;
}
/* Shuffle tasks such that we allow shuffle_idle_cpu to become idle.
* A special case is when shuffle_idle_cpu = - 1 , in which case we allow
* the tasks to run on all CPUs .
*/
static void torture_shuffle_tasks ( void )
{
struct shuffle_task * stp ;
cpumask_setall ( shuffle_tmp_mask ) ;
get_online_cpus ( ) ;
/* No point in shuffling if there is only one online CPU (ex: UP) */
if ( num_online_cpus ( ) = = 1 ) {
put_online_cpus ( ) ;
return ;
}
/* Advance to the next CPU. Upon overflow, don't idle any CPUs. */
shuffle_idle_cpu = cpumask_next ( shuffle_idle_cpu , shuffle_tmp_mask ) ;
if ( shuffle_idle_cpu > = nr_cpu_ids )
shuffle_idle_cpu = - 1 ;
2014-03-17 17:21:21 +04:00
else
2014-01-29 03:29:21 +04:00
cpumask_clear_cpu ( shuffle_idle_cpu , shuffle_tmp_mask ) ;
mutex_lock ( & shuffle_task_mutex ) ;
list_for_each_entry ( stp , & shuffle_task_list , st_l )
set_cpus_allowed_ptr ( stp - > st_t , shuffle_tmp_mask ) ;
mutex_unlock ( & shuffle_task_mutex ) ;
put_online_cpus ( ) ;
}
/* Shuffle tasks across CPUs, with the intent of allowing each CPU in the
* system to become idle at a time and cut off its timer ticks . This is meant
* to test the support for such tickless idle CPU in RCU .
*/
static int torture_shuffle ( void * arg )
{
VERBOSE_TOROUT_STRING ( " torture_shuffle task started " ) ;
do {
schedule_timeout_interruptible ( shuffle_interval ) ;
torture_shuffle_tasks ( ) ;
torture_shutdown_absorb ( " torture_shuffle " ) ;
} while ( ! torture_must_stop ( ) ) ;
2014-02-01 05:37:28 +04:00
torture_kthread_stopping ( " torture_shuffle " ) ;
2014-01-29 03:29:21 +04:00
return 0 ;
}
/*
* Start the shuffler , with shuffint in jiffies .
*/
int torture_shuffle_init ( long shuffint )
{
shuffle_interval = shuffint ;
shuffle_idle_cpu = - 1 ;
if ( ! alloc_cpumask_var ( & shuffle_tmp_mask , GFP_KERNEL ) ) {
VERBOSE_TOROUT_ERRSTRING ( " Failed to alloc mask " ) ;
return - ENOMEM ;
}
/* Create the shuffler thread */
2014-02-03 23:52:27 +04:00
return torture_create_kthread ( torture_shuffle , NULL , shuffler_task ) ;
2014-01-29 03:29:21 +04:00
}
EXPORT_SYMBOL_GPL ( torture_shuffle_init ) ;
/*
* Stop the shuffling .
*/
2014-01-31 02:21:11 +04:00
static void torture_shuffle_cleanup ( void )
2014-01-29 03:29:21 +04:00
{
torture_shuffle_task_unregister_all ( ) ;
if ( shuffler_task ) {
VERBOSE_TOROUT_STRING ( " Stopping torture_shuffle task " ) ;
kthread_stop ( shuffler_task ) ;
free_cpumask_var ( shuffle_tmp_mask ) ;
}
shuffler_task = NULL ;
}
2014-02-01 02:52:13 +04:00
/*
* Variables for auto - shutdown . This allows " lights out " torture runs
* to be fully scripted .
*/
static struct task_struct * shutdown_task ;
2016-06-18 17:45:43 +03:00
static ktime_t shutdown_time ; /* time to system shutdown. */
2014-02-01 02:52:13 +04:00
static void ( * torture_shutdown_hook ) ( void ) ;
2014-01-29 19:40:27 +04:00
/*
* Absorb kthreads into a kernel function that won ' t return , so that
* they won ' t ever access module text or data again .
*/
void torture_shutdown_absorb ( const char * title )
{
2015-03-04 01:57:58 +03:00
while ( READ_ONCE ( fullstop ) = = FULLSTOP_SHUTDOWN ) {
2014-01-31 03:37:19 +04:00
pr_notice ( " torture thread %s parking due to system shutdown \n " ,
title ) ;
2014-01-29 19:40:27 +04:00
schedule_timeout_uninterruptible ( MAX_SCHEDULE_TIMEOUT ) ;
}
}
EXPORT_SYMBOL_GPL ( torture_shutdown_absorb ) ;
2014-01-31 01:38:09 +04:00
2014-02-01 02:52:13 +04:00
/*
* Cause the torture test to shutdown the system after the test has
* run for the time specified by the shutdown_secs parameter .
*/
static int torture_shutdown ( void * arg )
{
2016-06-18 17:45:43 +03:00
ktime_t ktime_snap ;
2014-02-01 02:52:13 +04:00
VERBOSE_TOROUT_STRING ( " torture_shutdown task started " ) ;
2016-06-18 17:45:43 +03:00
ktime_snap = ktime_get ( ) ;
while ( ktime_before ( ktime_snap , shutdown_time ) & &
2014-02-01 02:52:13 +04:00
! torture_must_stop ( ) ) {
if ( verbose )
pr_alert ( " %s " TORTURE_FLAG
2016-06-18 17:45:43 +03:00
" torture_shutdown task: %llu ms remaining \n " ,
torture_type ,
ktime_ms_delta ( shutdown_time , ktime_snap ) ) ;
set_current_state ( TASK_INTERRUPTIBLE ) ;
schedule_hrtimeout ( & shutdown_time , HRTIMER_MODE_ABS ) ;
ktime_snap = ktime_get ( ) ;
2014-02-01 02:52:13 +04:00
}
if ( torture_must_stop ( ) ) {
2014-02-01 05:37:28 +04:00
torture_kthread_stopping ( " torture_shutdown " ) ;
2014-02-01 02:52:13 +04:00
return 0 ;
}
/* OK, shut down the system. */
VERBOSE_TOROUT_STRING ( " torture_shutdown task shutting down system " ) ;
shutdown_task = NULL ; /* Avoid self-kill deadlock. */
2014-02-08 02:42:51 +04:00
if ( torture_shutdown_hook )
torture_shutdown_hook ( ) ;
else
VERBOSE_TOROUT_STRING ( " No torture_shutdown_hook(), skipping. " ) ;
2020-06-17 01:38:24 +03:00
if ( ftrace_dump_at_shutdown )
rcu_ftrace_dump ( DUMP_ALL ) ;
2014-02-01 02:52:13 +04:00
kernel_power_off ( ) ; /* Shut down the system. */
return 0 ;
}
/*
* Start up the shutdown task .
*/
int torture_shutdown_init ( int ssecs , void ( * cleanup ) ( void ) )
{
torture_shutdown_hook = cleanup ;
2016-06-18 17:45:43 +03:00
if ( ssecs > 0 ) {
shutdown_time = ktime_add ( ktime_get ( ) , ktime_set ( ssecs , 0 ) ) ;
2018-09-22 04:21:31 +03:00
return torture_create_kthread ( torture_shutdown , NULL ,
2014-02-03 23:52:27 +04:00
shutdown_task ) ;
2014-02-01 02:52:13 +04:00
}
2018-09-22 04:21:31 +03:00
return 0 ;
2014-02-01 02:52:13 +04:00
}
EXPORT_SYMBOL_GPL ( torture_shutdown_init ) ;
2014-01-31 03:37:19 +04:00
/*
* Detect and respond to a system shutdown .
*/
static int torture_shutdown_notify ( struct notifier_block * unused1 ,
unsigned long unused2 , void * unused3 )
{
mutex_lock ( & fullstop_mutex ) ;
2015-03-04 01:57:58 +03:00
if ( READ_ONCE ( fullstop ) = = FULLSTOP_DONTSTOP ) {
2014-01-31 05:06:30 +04:00
VERBOSE_TOROUT_STRING ( " Unscheduled system shutdown detected " ) ;
2015-03-04 01:57:58 +03:00
WRITE_ONCE ( fullstop , FULLSTOP_SHUTDOWN ) ;
2014-01-31 05:06:30 +04:00
} else {
2014-01-31 03:37:19 +04:00
pr_warn ( " Concurrent rmmod and shutdown illegal! \n " ) ;
2014-01-31 05:06:30 +04:00
}
2014-01-31 03:37:19 +04:00
mutex_unlock ( & fullstop_mutex ) ;
return NOTIFY_DONE ;
}
static struct notifier_block torture_shutdown_nb = {
. notifier_call = torture_shutdown_notify ,
} ;
2014-02-05 00:35:27 +04:00
/*
* Shut down the shutdown task . Say what ? ? ? Heh ! This can happen if
* the torture module gets an rmmod before the shutdown time arrives . ; - )
*/
static void torture_shutdown_cleanup ( void )
{
unregister_reboot_notifier ( & torture_shutdown_nb ) ;
if ( shutdown_task ! = NULL ) {
VERBOSE_TOROUT_STRING ( " Stopping torture_shutdown task " ) ;
kthread_stop ( shutdown_task ) ;
}
shutdown_task = NULL ;
}
2014-01-31 23:57:43 +04:00
/*
* Variables for stuttering , which means to periodically pause and
* restart testing in order to catch bugs that appear when load is
* suddenly applied to or removed from the system .
*/
static struct task_struct * stutter_task ;
static int stutter_pause_test ;
static int stutter ;
2019-04-10 00:44:49 +03:00
static int stutter_gap ;
2014-01-31 23:57:43 +04:00
/*
* Block until the stutter interval ends . This must be called periodically
* by all running kthreads that need to be subject to stuttering .
*/
2018-08-08 00:34:44 +03:00
bool stutter_wait ( const char * title )
2014-01-31 23:57:43 +04:00
{
2020-09-02 02:58:41 +03:00
ktime_t delay ;
unsigned int i = 0 ;
2019-04-09 21:06:32 +03:00
bool ret = false ;
2020-09-02 02:58:41 +03:00
int spt ;
2017-11-22 02:01:02 +03:00
2018-03-03 03:35:27 +03:00
cond_resched_tasks_rcu_qs ( ) ;
2017-11-22 02:01:02 +03:00
spt = READ_ONCE ( stutter_pause_test ) ;
2017-11-22 09:07:59 +03:00
for ( ; spt ; spt = READ_ONCE ( stutter_pause_test ) ) {
2020-09-23 02:42:42 +03:00
if ( ! ret ) {
sched_set_normal ( current , MAX_NICE ) ;
ret = true ;
}
2017-11-22 02:01:02 +03:00
if ( spt = = 1 ) {
schedule_timeout_interruptible ( 1 ) ;
} else if ( spt = = 2 ) {
2020-09-02 02:58:41 +03:00
while ( READ_ONCE ( stutter_pause_test ) ) {
if ( ! ( i + + & 0xffff ) ) {
set_current_state ( TASK_INTERRUPTIBLE ) ;
delay = 10 * NSEC_PER_USEC ;
schedule_hrtimeout ( & delay , HRTIMER_MODE_REL ) ;
}
2017-11-22 02:01:02 +03:00
cond_resched ( ) ;
2020-09-02 02:58:41 +03:00
}
2017-11-22 02:01:02 +03:00
} else {
2014-01-31 23:57:43 +04:00
schedule_timeout_interruptible ( round_jiffies_relative ( HZ ) ) ;
2017-11-22 02:01:02 +03:00
}
2014-01-31 23:57:43 +04:00
torture_shutdown_absorb ( title ) ;
}
2019-04-09 21:06:32 +03:00
return ret ;
2014-01-31 23:57:43 +04:00
}
EXPORT_SYMBOL_GPL ( stutter_wait ) ;
/*
* Cause the torture test to " stutter " , starting and stopping all
* threads periodically .
*/
static int torture_stutter ( void * arg )
{
2020-09-03 07:08:41 +03:00
ktime_t delay ;
DEFINE_TORTURE_RANDOM ( rand ) ;
2019-04-09 21:06:32 +03:00
int wtime ;
2014-01-31 23:57:43 +04:00
VERBOSE_TOROUT_STRING ( " torture_stutter task started " ) ;
do {
2017-11-22 02:01:02 +03:00
if ( ! torture_must_stop ( ) & & stutter > 1 ) {
2019-04-09 21:06:32 +03:00
wtime = stutter ;
2020-09-03 07:08:41 +03:00
if ( stutter > 2 ) {
2019-04-09 21:06:32 +03:00
WRITE_ONCE ( stutter_pause_test , 1 ) ;
2020-09-03 07:08:41 +03:00
wtime = stutter - 3 ;
delay = ktime_divns ( NSEC_PER_SEC * wtime , HZ ) ;
delay + = ( torture_random ( & rand ) > > 3 ) % NSEC_PER_MSEC ;
set_current_state ( TASK_INTERRUPTIBLE ) ;
schedule_hrtimeout ( & delay , HRTIMER_MODE_REL ) ;
wtime = 2 ;
2019-04-09 21:06:32 +03:00
}
2017-11-22 02:01:02 +03:00
WRITE_ONCE ( stutter_pause_test , 2 ) ;
2020-09-03 07:08:41 +03:00
delay = ktime_divns ( NSEC_PER_SEC * wtime , HZ ) ;
set_current_state ( TASK_INTERRUPTIBLE ) ;
schedule_hrtimeout ( & delay , HRTIMER_MODE_REL ) ;
2014-01-31 23:57:43 +04:00
}
2017-11-22 02:01:02 +03:00
WRITE_ONCE ( stutter_pause_test , 0 ) ;
2014-01-31 23:57:43 +04:00
if ( ! torture_must_stop ( ) )
2019-04-10 00:44:49 +03:00
schedule_timeout_interruptible ( stutter_gap ) ;
2014-01-31 23:57:43 +04:00
torture_shutdown_absorb ( " torture_stutter " ) ;
} while ( ! torture_must_stop ( ) ) ;
2014-02-01 05:37:28 +04:00
torture_kthread_stopping ( " torture_stutter " ) ;
2014-01-31 23:57:43 +04:00
return 0 ;
}
/*
* Initialize and kick off the torture_stutter kthread .
*/
2019-04-10 00:44:49 +03:00
int torture_stutter_init ( const int s , const int sgap )
2014-01-31 23:57:43 +04:00
{
stutter = s ;
2019-04-10 00:44:49 +03:00
stutter_gap = sgap ;
2018-09-22 04:21:31 +03:00
return torture_create_kthread ( torture_stutter , NULL , stutter_task ) ;
2014-01-31 23:57:43 +04:00
}
EXPORT_SYMBOL_GPL ( torture_stutter_init ) ;
/*
* Cleanup after the torture_stutter kthread .
*/
2014-02-05 00:35:27 +04:00
static void torture_stutter_cleanup ( void )
2014-01-31 23:57:43 +04:00
{
if ( ! stutter_task )
return ;
VERBOSE_TOROUT_STRING ( " Stopping torture_stutter task " ) ;
kthread_stop ( stutter_task ) ;
stutter_task = NULL ;
}
2014-01-31 01:38:09 +04:00
/*
* Initialize torture module . Please note that this is - not - invoked via
* the usual module_init ( ) mechanism , but rather by an explicit call from
* the client torture module . This call must be paired with a later
* torture_init_end ( ) .
2014-01-31 23:57:43 +04:00
*
* The runnable parameter points to a flag that controls whether or not
* the test is currently runnable . If there is no such flag , pass in NULL .
2014-01-31 01:38:09 +04:00
*/
2018-05-09 20:29:18 +03:00
bool torture_init_begin ( char * ttype , int v )
2014-01-31 01:38:09 +04:00
{
mutex_lock ( & fullstop_mutex ) ;
2014-04-07 20:14:11 +04:00
if ( torture_type ! = NULL ) {
2016-03-22 01:36:40 +03:00
pr_alert ( " torture_init_begin: Refusing %s init: %s running. \n " ,
2014-04-07 20:14:11 +04:00
ttype , torture_type ) ;
2016-03-22 01:36:40 +03:00
pr_alert ( " torture_init_begin: One torture test at a time! \n " ) ;
2014-04-07 20:14:11 +04:00
mutex_unlock ( & fullstop_mutex ) ;
return false ;
}
2014-01-31 01:38:09 +04:00
torture_type = ttype ;
verbose = v ;
2014-01-31 03:49:29 +04:00
fullstop = FULLSTOP_DONTSTOP ;
2014-04-07 20:14:11 +04:00
return true ;
2014-01-31 01:38:09 +04:00
}
EXPORT_SYMBOL_GPL ( torture_init_begin ) ;
/*
* Tell the torture module that initialization is complete .
*/
2014-04-17 00:42:09 +04:00
void torture_init_end ( void )
2014-01-31 01:38:09 +04:00
{
mutex_unlock ( & fullstop_mutex ) ;
2014-01-31 03:37:19 +04:00
register_reboot_notifier ( & torture_shutdown_nb ) ;
2014-01-31 01:38:09 +04:00
}
EXPORT_SYMBOL_GPL ( torture_init_end ) ;
2014-01-31 02:21:11 +04:00
/*
* Clean up torture module . Please note that this is - not - invoked via
* the usual module_exit ( ) mechanism , but rather by an explicit call from
* the client torture module . Returns true if a race with system shutdown
2014-02-05 00:35:27 +04:00
* is detected , otherwise , all kthreads started by functions in this file
* will be shut down .
2014-01-31 02:21:11 +04:00
*
* This must be called before the caller starts shutting down its own
* kthreads .
2014-09-12 07:40:21 +04:00
*
* Both torture_cleanup_begin ( ) and torture_cleanup_end ( ) must be paired ,
* in order to correctly perform the cleanup . They are separated because
* threads can still need to reference the torture_type type , thus nullify
* only after completing all other relevant calls .
2014-01-31 02:21:11 +04:00
*/
2014-09-12 07:40:21 +04:00
bool torture_cleanup_begin ( void )
2014-01-31 02:21:11 +04:00
{
mutex_lock ( & fullstop_mutex ) ;
2015-03-04 01:57:58 +03:00
if ( READ_ONCE ( fullstop ) = = FULLSTOP_SHUTDOWN ) {
2014-01-31 02:21:11 +04:00
pr_warn ( " Concurrent rmmod and shutdown illegal! \n " ) ;
mutex_unlock ( & fullstop_mutex ) ;
schedule_timeout_uninterruptible ( 10 ) ;
return true ;
}
2015-03-04 01:57:58 +03:00
WRITE_ONCE ( fullstop , FULLSTOP_RMMOD ) ;
2014-01-31 02:21:11 +04:00
mutex_unlock ( & fullstop_mutex ) ;
2014-02-05 00:35:27 +04:00
torture_shutdown_cleanup ( ) ;
2014-01-31 02:21:11 +04:00
torture_shuffle_cleanup ( ) ;
2014-02-05 00:35:27 +04:00
torture_stutter_cleanup ( ) ;
2014-01-31 02:21:11 +04:00
torture_onoff_cleanup ( ) ;
2014-09-12 07:40:21 +04:00
return false ;
}
EXPORT_SYMBOL_GPL ( torture_cleanup_begin ) ;
void torture_cleanup_end ( void )
{
2014-04-07 20:14:11 +04:00
mutex_lock ( & fullstop_mutex ) ;
torture_type = NULL ;
mutex_unlock ( & fullstop_mutex ) ;
2014-01-31 02:21:11 +04:00
}
2014-09-12 07:40:21 +04:00
EXPORT_SYMBOL_GPL ( torture_cleanup_end ) ;
2014-01-31 03:49:29 +04:00
/*
* Is it time for the current torture test to stop ?
*/
bool torture_must_stop ( void )
{
return torture_must_stop_irq ( ) | | kthread_should_stop ( ) ;
}
EXPORT_SYMBOL_GPL ( torture_must_stop ) ;
/*
* Is it time for the current torture test to stop ? This is the irq - safe
* version , hence no check for kthread_should_stop ( ) .
*/
bool torture_must_stop_irq ( void )
{
2015-03-04 01:57:58 +03:00
return READ_ONCE ( fullstop ) ! = FULLSTOP_DONTSTOP ;
2014-01-31 03:49:29 +04:00
}
EXPORT_SYMBOL_GPL ( torture_must_stop_irq ) ;
2014-02-01 05:37:28 +04:00
/*
* Each kthread must wait for kthread_should_stop ( ) before returning from
* its top - level function , otherwise segfaults ensue . This function
* prints a " stopping " message and waits for kthread_should_stop ( ) , and
* should be called from all torture kthreads immediately prior to
* returning .
*/
void torture_kthread_stopping ( char * title )
{
2014-03-04 04:58:03 +04:00
char buf [ 128 ] ;
snprintf ( buf , sizeof ( buf ) , " Stopping %s " , title ) ;
VERBOSE_TOROUT_STRING ( buf ) ;
2014-02-01 05:37:28 +04:00
while ( ! kthread_should_stop ( ) ) {
torture_shutdown_absorb ( title ) ;
schedule_timeout_uninterruptible ( 1 ) ;
}
}
EXPORT_SYMBOL_GPL ( torture_kthread_stopping ) ;
2014-02-03 23:52:27 +04:00
/*
* Create a generic torture kthread that is immediately runnable . If you
* need the kthread to be stopped so that you can do something to it before
* it starts , you will need to open - code your own .
*/
int _torture_create_kthread ( int ( * fn ) ( void * arg ) , void * arg , char * s , char * m ,
char * f , struct task_struct * * tp )
{
int ret = 0 ;
VERBOSE_TOROUT_STRING ( m ) ;
2014-05-22 22:51:04 +04:00
* tp = kthread_run ( fn , arg , " %s " , s ) ;
2014-02-03 23:52:27 +04:00
if ( IS_ERR ( * tp ) ) {
ret = PTR_ERR ( * tp ) ;
VERBOSE_TOROUT_ERRSTRING ( f ) ;
* tp = NULL ;
}
torture_shuffle_task_register ( * tp ) ;
return ret ;
}
EXPORT_SYMBOL_GPL ( _torture_create_kthread ) ;
2014-02-04 23:47:08 +04:00
/*
* Stop a generic kthread , emitting a message .
*/
void _torture_stop_kthread ( char * m , struct task_struct * * tp )
{
if ( * tp = = NULL )
return ;
VERBOSE_TOROUT_STRING ( m ) ;
kthread_stop ( * tp ) ;
* tp = NULL ;
}
EXPORT_SYMBOL_GPL ( _torture_stop_kthread ) ;