locktorture updates for v6.4

This update adds tests for nested locking and also adds support for
 testing raw spinlocks in PREEMPT_RT kernels.
 -----BEGIN PGP SIGNATURE-----
 
 iQJHBAABCgAxFiEEbK7UrM+RBIrCoViJnr8S83LZ+4wFAmQs8kETHHBhdWxtY2tA
 a2VybmVsLm9yZwAKCRCevxLzctn7jImaEACFX7CPZyRUG32Yo6wdzxRHuZPid6cR
 Si5GyRiTJzKuS9aDgl6jMYRvFXSXE9Xx1TVX0ad6fkNW40IMAkXprmUkQwN3ZtSb
 K/pOLyOSFkm/XDrfDinPU46kh+DgSrAZtB3jhELa5doRxr9lWWSnwV4HoBx64T3/
 84LEyIi47OSVxucaUWfimDUyBbNl4Oq95hdpD3hwxyxq5nsv2Q+oLWy2syXeegOz
 3ru4Aswg40cwjYT9tjnrfZKZeteby2q55JYUDvP3kPfu/utyMyafUOda0DhHFdRB
 dT1EISkY/zyqf3orTfghLpYJEplDNkSKhVtyn2dQcRHhoUJ9e/8xnRclqVo4tkqv
 QWUZHJFar08P6iNBh9Z/YiM8D4kpeQNVCmR29h094BlQMbTLYbcZUjJ3YeE5nsz+
 Bid7Ln6aBvGb3Ui6EWq7FVfcGzrPms3MUXw6nQLh6HaQg0F2g73MKS9Wd75OjEc/
 cKPxkqzC35pM87eEf0xBlJzudZYxkYhP8Rt0bCGt/tq/pZAulCyOgnET2mcBv7Z0
 94uEIGVvswVPB9/VKyqf7mHVrk/uJeygGKD1++4pzGumdhfsaM1dl3g6DkrSgK1j
 A/kAApkhha8Zacj3oAAQuBPi8JuIqUFQvfbA8Os6d/8PXfTRaaMnV9DRS7wcohkP
 7haDPwX8pHj+Gg==
 =QAhX
 -----END PGP SIGNATURE-----

Merge tag 'locktorture.2023.04.04a' of git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu

Pull locktorture updates from Paul McKenney:
 "This adds tests for nested locking and also adds support for testing
  raw spinlocks in PREEMPT_RT kernels"

* tag 'locktorture.2023.04.04a' of git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu:
  locktorture: Add raw_spinlock* torture tests for PREEMPT_RT kernels
  locktorture: With nested locks, occasionally skip main lock
  locktorture: Add nested locking to rtmutex torture tests
  locktorture: Add nested locking to mutex torture tests
  locktorture: Add nested_[un]lock() hooks and nlocks parameter
This commit is contained in:
Linus Torvalds 2023-04-24 12:05:08 -07:00
commit 4a4075ada6
6 changed files with 188 additions and 16 deletions

View File

@ -51,8 +51,11 @@ torture_param(int, rt_boost, 2,
torture_param(int, rt_boost_factor, 50, "A factor determining how often rt-boost happens.");
torture_param(int, verbose, 1,
"Enable verbose debugging printk()s");
torture_param(int, nested_locks, 0, "Number of nested locks (max = 8)");
/* Going much higher trips "BUG: MAX_LOCKDEP_CHAIN_HLOCKS too low!" errors */
#define MAX_NESTED_LOCKS 8
static char *torture_type = "spin_lock";
static char *torture_type = IS_ENABLED(CONFIG_PREEMPT_RT) ? "raw_spin_lock" : "spin_lock";
module_param(torture_type, charp, 0444);
MODULE_PARM_DESC(torture_type,
"Type of lock to torture (spin_lock, spin_lock_irq, mutex_lock, ...)");
@ -79,10 +82,12 @@ static void lock_torture_cleanup(void);
struct lock_torture_ops {
void (*init)(void);
void (*exit)(void);
int (*nested_lock)(int tid, u32 lockset);
int (*writelock)(int tid);
void (*write_delay)(struct torture_random_state *trsp);
void (*task_boost)(struct torture_random_state *trsp);
void (*writeunlock)(int tid);
void (*nested_unlock)(int tid, u32 lockset);
int (*readlock)(int tid);
void (*read_delay)(struct torture_random_state *trsp);
void (*readunlock)(int tid);
@ -252,6 +257,59 @@ static struct lock_torture_ops spin_lock_irq_ops = {
.name = "spin_lock_irq"
};
static DEFINE_RAW_SPINLOCK(torture_raw_spinlock);
static int torture_raw_spin_lock_write_lock(int tid __maybe_unused)
__acquires(torture_raw_spinlock)
{
raw_spin_lock(&torture_raw_spinlock);
return 0;
}
static void torture_raw_spin_lock_write_unlock(int tid __maybe_unused)
__releases(torture_raw_spinlock)
{
raw_spin_unlock(&torture_raw_spinlock);
}
static struct lock_torture_ops raw_spin_lock_ops = {
.writelock = torture_raw_spin_lock_write_lock,
.write_delay = torture_spin_lock_write_delay,
.task_boost = torture_rt_boost,
.writeunlock = torture_raw_spin_lock_write_unlock,
.readlock = NULL,
.read_delay = NULL,
.readunlock = NULL,
.name = "raw_spin_lock"
};
static int torture_raw_spin_lock_write_lock_irq(int tid __maybe_unused)
__acquires(torture_raw_spinlock)
{
unsigned long flags;
raw_spin_lock_irqsave(&torture_raw_spinlock, flags);
cxt.cur_ops->flags = flags;
return 0;
}
static void torture_raw_spin_lock_write_unlock_irq(int tid __maybe_unused)
__releases(torture_raw_spinlock)
{
raw_spin_unlock_irqrestore(&torture_raw_spinlock, cxt.cur_ops->flags);
}
static struct lock_torture_ops raw_spin_lock_irq_ops = {
.writelock = torture_raw_spin_lock_write_lock_irq,
.write_delay = torture_spin_lock_write_delay,
.task_boost = torture_rt_boost,
.writeunlock = torture_raw_spin_lock_write_unlock_irq,
.readlock = NULL,
.read_delay = NULL,
.readunlock = NULL,
.name = "raw_spin_lock_irq"
};
static DEFINE_RWLOCK(torture_rwlock);
static int torture_rwlock_write_lock(int tid __maybe_unused)
@ -365,6 +423,28 @@ static struct lock_torture_ops rw_lock_irq_ops = {
};
static DEFINE_MUTEX(torture_mutex);
static struct mutex torture_nested_mutexes[MAX_NESTED_LOCKS];
static struct lock_class_key nested_mutex_keys[MAX_NESTED_LOCKS];
static void torture_mutex_init(void)
{
int i;
for (i = 0; i < MAX_NESTED_LOCKS; i++)
__mutex_init(&torture_nested_mutexes[i], __func__,
&nested_mutex_keys[i]);
}
static int torture_mutex_nested_lock(int tid __maybe_unused,
u32 lockset)
{
int i;
for (i = 0; i < nested_locks; i++)
if (lockset & (1 << i))
mutex_lock(&torture_nested_mutexes[i]);
return 0;
}
static int torture_mutex_lock(int tid __maybe_unused)
__acquires(torture_mutex)
@ -393,11 +473,24 @@ __releases(torture_mutex)
mutex_unlock(&torture_mutex);
}
static void torture_mutex_nested_unlock(int tid __maybe_unused,
u32 lockset)
{
int i;
for (i = nested_locks - 1; i >= 0; i--)
if (lockset & (1 << i))
mutex_unlock(&torture_nested_mutexes[i]);
}
static struct lock_torture_ops mutex_lock_ops = {
.init = torture_mutex_init,
.nested_lock = torture_mutex_nested_lock,
.writelock = torture_mutex_lock,
.write_delay = torture_mutex_delay,
.task_boost = torture_rt_boost,
.writeunlock = torture_mutex_unlock,
.nested_unlock = torture_mutex_nested_unlock,
.readlock = NULL,
.read_delay = NULL,
.readunlock = NULL,
@ -504,6 +597,28 @@ static struct lock_torture_ops ww_mutex_lock_ops = {
#ifdef CONFIG_RT_MUTEXES
static DEFINE_RT_MUTEX(torture_rtmutex);
static struct rt_mutex torture_nested_rtmutexes[MAX_NESTED_LOCKS];
static struct lock_class_key nested_rtmutex_keys[MAX_NESTED_LOCKS];
static void torture_rtmutex_init(void)
{
int i;
for (i = 0; i < MAX_NESTED_LOCKS; i++)
__rt_mutex_init(&torture_nested_rtmutexes[i], __func__,
&nested_rtmutex_keys[i]);
}
static int torture_rtmutex_nested_lock(int tid __maybe_unused,
u32 lockset)
{
int i;
for (i = 0; i < nested_locks; i++)
if (lockset & (1 << i))
rt_mutex_lock(&torture_nested_rtmutexes[i]);
return 0;
}
static int torture_rtmutex_lock(int tid __maybe_unused)
__acquires(torture_rtmutex)
@ -545,11 +660,24 @@ static void torture_rt_boost_rtmutex(struct torture_random_state *trsp)
__torture_rt_boost(trsp);
}
static void torture_rtmutex_nested_unlock(int tid __maybe_unused,
u32 lockset)
{
int i;
for (i = nested_locks - 1; i >= 0; i--)
if (lockset & (1 << i))
rt_mutex_unlock(&torture_nested_rtmutexes[i]);
}
static struct lock_torture_ops rtmutex_lock_ops = {
.init = torture_rtmutex_init,
.nested_lock = torture_rtmutex_nested_lock,
.writelock = torture_rtmutex_lock,
.write_delay = torture_rtmutex_delay,
.task_boost = torture_rt_boost_rtmutex,
.writeunlock = torture_rtmutex_unlock,
.nested_unlock = torture_rtmutex_nested_unlock,
.readlock = NULL,
.read_delay = NULL,
.readunlock = NULL,
@ -684,6 +812,8 @@ static int lock_torture_writer(void *arg)
struct lock_stress_stats *lwsp = arg;
int tid = lwsp - cxt.lwsa;
DEFINE_TORTURE_RANDOM(rand);
u32 lockset_mask;
bool skip_main_lock;
VERBOSE_TOROUT_STRING("lock_torture_writer task started");
set_user_nice(current, MAX_NICE);
@ -692,19 +822,40 @@ static int lock_torture_writer(void *arg)
if ((torture_random(&rand) & 0xfffff) == 0)
schedule_timeout_uninterruptible(1);
cxt.cur_ops->task_boost(&rand);
cxt.cur_ops->writelock(tid);
if (WARN_ON_ONCE(lock_is_write_held))
lwsp->n_lock_fail++;
lock_is_write_held = true;
if (WARN_ON_ONCE(atomic_read(&lock_is_read_held)))
lwsp->n_lock_fail++; /* rare, but... */
lockset_mask = torture_random(&rand);
/*
* When using nested_locks, we want to occasionally
* skip the main lock so we can avoid always serializing
* the lock chains on that central lock. By skipping the
* main lock occasionally, we can create different
* contention patterns (allowing for multiple disjoint
* blocked trees)
*/
skip_main_lock = (nested_locks &&
!(torture_random(&rand) % 100));
lwsp->n_lock_acquired++;
cxt.cur_ops->task_boost(&rand);
if (cxt.cur_ops->nested_lock)
cxt.cur_ops->nested_lock(tid, lockset_mask);
if (!skip_main_lock) {
cxt.cur_ops->writelock(tid);
if (WARN_ON_ONCE(lock_is_write_held))
lwsp->n_lock_fail++;
lock_is_write_held = true;
if (WARN_ON_ONCE(atomic_read(&lock_is_read_held)))
lwsp->n_lock_fail++; /* rare, but... */
lwsp->n_lock_acquired++;
}
cxt.cur_ops->write_delay(&rand);
lock_is_write_held = false;
WRITE_ONCE(last_lock_release, jiffies);
cxt.cur_ops->writeunlock(tid);
if (!skip_main_lock) {
lock_is_write_held = false;
WRITE_ONCE(last_lock_release, jiffies);
cxt.cur_ops->writeunlock(tid);
}
if (cxt.cur_ops->nested_unlock)
cxt.cur_ops->nested_unlock(tid, lockset_mask);
stutter_wait("lock_torture_writer");
} while (!torture_must_stop());
@ -845,11 +996,11 @@ lock_torture_print_module_parms(struct lock_torture_ops *cur_ops,
const char *tag)
{
pr_alert("%s" TORTURE_FLAG
"--- %s%s: nwriters_stress=%d nreaders_stress=%d stat_interval=%d verbose=%d shuffle_interval=%d stutter=%d shutdown_secs=%d onoff_interval=%d onoff_holdoff=%d\n",
"--- %s%s: nwriters_stress=%d nreaders_stress=%d nested_locks=%d stat_interval=%d verbose=%d shuffle_interval=%d stutter=%d shutdown_secs=%d onoff_interval=%d onoff_holdoff=%d\n",
torture_type, tag, cxt.debug_lock ? " [debug]": "",
cxt.nrealwriters_stress, cxt.nrealreaders_stress, stat_interval,
verbose, shuffle_interval, stutter, shutdown_secs,
onoff_interval, onoff_holdoff);
cxt.nrealwriters_stress, cxt.nrealreaders_stress,
nested_locks, stat_interval, verbose, shuffle_interval,
stutter, shutdown_secs, onoff_interval, onoff_holdoff);
}
static void lock_torture_cleanup(void)
@ -919,6 +1070,7 @@ static int __init lock_torture_init(void)
static struct lock_torture_ops *torture_ops[] = {
&lock_busted_ops,
&spin_lock_ops, &spin_lock_irq_ops,
&raw_spin_lock_ops, &raw_spin_lock_irq_ops,
&rw_lock_ops, &rw_lock_irq_ops,
&mutex_lock_ops,
&ww_mutex_lock_ops,
@ -1068,6 +1220,10 @@ static int __init lock_torture_init(void)
}
}
/* cap nested_locks to MAX_NESTED_LOCKS */
if (nested_locks > MAX_NESTED_LOCKS)
nested_locks = MAX_NESTED_LOCKS;
if (cxt.cur_ops->readlock) {
reader_tasks = kcalloc(cxt.nrealreaders_stress,
sizeof(reader_tasks[0]),

View File

@ -5,3 +5,5 @@ LOCK04
LOCK05
LOCK06
LOCK07
LOCK08
LOCK09

View File

@ -0,0 +1,6 @@
CONFIG_SMP=y
CONFIG_NR_CPUS=4
CONFIG_HOTPLUG_CPU=y
CONFIG_PREEMPT_NONE=n
CONFIG_PREEMPT_VOLUNTARY=n
CONFIG_PREEMPT=y

View File

@ -0,0 +1 @@
locktorture.torture_type=mutex_lock locktorture.nested_locks=8

View File

@ -0,0 +1,6 @@
CONFIG_SMP=y
CONFIG_NR_CPUS=4
CONFIG_HOTPLUG_CPU=y
CONFIG_PREEMPT_NONE=n
CONFIG_PREEMPT_VOLUNTARY=n
CONFIG_PREEMPT=y

View File

@ -0,0 +1 @@
locktorture.torture_type=rtmutex_lock locktorture.nested_locks=8