rcutorture: Add test of holding scheduler locks across rcu_read_unlock()
Now that it should be safe to hold scheduler locks across rcu_read_unlock(), even in cases where the corresponding RCU read-side critical section might have been preempted and boosted, the commit adds a test of this capability to rcutorture. This has been tested on current mainline (which can deadlock in this situation), and lockdep duly reported the expected deadlock. On -rcu, lockdep is silent, thus far, anyway. Cc: Ingo Molnar <mingo@redhat.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Juri Lelli <juri.lelli@redhat.com> Cc: Vincent Guittot <vincent.guittot@linaro.org> Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
This commit is contained in:
parent
5f5fa7ea89
commit
52b1fc3f79
@ -1147,6 +1147,7 @@ static void rcutorture_one_extend(int *readstate, int newstate,
|
||||
struct torture_random_state *trsp,
|
||||
struct rt_read_seg *rtrsp)
|
||||
{
|
||||
unsigned long flags;
|
||||
int idxnew = -1;
|
||||
int idxold = *readstate;
|
||||
int statesnew = ~*readstate & newstate;
|
||||
@ -1181,8 +1182,15 @@ static void rcutorture_one_extend(int *readstate, int newstate,
|
||||
rcu_read_unlock_bh();
|
||||
if (statesold & RCUTORTURE_RDR_SCHED)
|
||||
rcu_read_unlock_sched();
|
||||
if (statesold & RCUTORTURE_RDR_RCU)
|
||||
if (statesold & RCUTORTURE_RDR_RCU) {
|
||||
bool lockit = !statesnew && !(torture_random(trsp) & 0xffff);
|
||||
|
||||
if (lockit)
|
||||
raw_spin_lock_irqsave(¤t->pi_lock, flags);
|
||||
cur_ops->readunlock(idxold >> RCUTORTURE_RDR_SHIFT);
|
||||
if (lockit)
|
||||
raw_spin_unlock_irqrestore(¤t->pi_lock, flags);
|
||||
}
|
||||
|
||||
/* Delay if neither beginning nor end and there was a change. */
|
||||
if ((statesnew || statesold) && *readstate && newstate)
|
||||
|
Loading…
x
Reference in New Issue
Block a user