powerpc/qspinlock: theft prevention to control latency

Give the queue head the ability to stop stealers. After a number of
spins without successfully acquiring the lock, the queue head sets
this, which halts stealing and will assure it is the next owner.

Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20221126095932.1234527-6-npiggin@gmail.com
This commit is contained in:
Nicholas Piggin 2022-11-26 19:59:20 +10:00 committed by Michael Ellerman
parent 6aa42f883c
commit 0944534ef4
2 changed files with 60 additions and 1 deletions

View File

@ -29,7 +29,8 @@ typedef struct qspinlock {
* Bitfields in the lock word:
*
* 0: locked bit
* 1-16: unused bits
* 1-15: unused bits
* 16: must queue bit
* 17-31: tail cpu (+1)
*/
#define _Q_SET_MASK(type) (((1U << _Q_ ## type ## _BITS) - 1)\
@ -39,6 +40,11 @@ typedef struct qspinlock {
#define _Q_LOCKED_BITS 1
#define _Q_LOCKED_VAL (1U << _Q_LOCKED_OFFSET)
/* 0x00010000 */
#define _Q_MUST_Q_OFFSET 16
#define _Q_MUST_Q_BITS 1
#define _Q_MUST_Q_VAL (1U << _Q_MUST_Q_OFFSET)
/* 0xfffe0000 */
#define _Q_TAIL_CPU_OFFSET 17
#define _Q_TAIL_CPU_BITS 15

View File

@ -22,6 +22,7 @@ struct qnodes {
/* Tuning parameters */
static int steal_spins __read_mostly = (1 << 5);
static bool maybe_stealers __read_mostly = true;
static int head_spins __read_mostly = (1 << 8);
static DEFINE_PER_CPU_ALIGNED(struct qnodes, qnodes);
@ -30,6 +31,11 @@ static __always_inline int get_steal_spins(void)
return steal_spins;
}
static __always_inline int get_head_spins(void)
{
return head_spins;
}
static inline u32 encode_tail_cpu(int cpu)
{
return (cpu + 1) << _Q_TAIL_CPU_OFFSET;
@ -104,6 +110,22 @@ static __always_inline u32 publish_tail_cpu(struct qspinlock *lock, u32 tail)
return prev;
}
static __always_inline u32 set_mustq(struct qspinlock *lock)
{
u32 prev;
asm volatile(
"1: lwarx %0,0,%1 # set_mustq \n"
" or %0,%0,%2 \n"
" stwcx. %0,0,%1 \n"
" bne- 1b \n"
: "=&r" (prev)
: "r" (&lock->val), "r" (_Q_MUST_Q_VAL)
: "cr0", "memory");
return prev;
}
static struct qnode *get_tail_qnode(struct qspinlock *lock, u32 val)
{
int cpu = decode_tail_cpu(val);
@ -139,6 +161,9 @@ static inline bool try_to_steal_lock(struct qspinlock *lock)
do {
u32 val = READ_ONCE(lock->val);
if (val & _Q_MUST_Q_VAL)
break;
if (unlikely(!(val & _Q_LOCKED_VAL))) {
if (__queued_spin_trylock_steal(lock))
return true;
@ -157,7 +182,9 @@ static inline void queued_spin_lock_mcs_queue(struct qspinlock *lock)
struct qnodes *qnodesp;
struct qnode *next, *node;
u32 val, old, tail;
bool mustq = false;
int idx;
int iters = 0;
BUILD_BUG_ON(CONFIG_NR_CPUS >= (1U << _Q_TAIL_CPU_BITS));
@ -209,6 +236,15 @@ again:
break;
cpu_relax();
if (!maybe_stealers)
continue;
iters++;
if (!mustq && iters >= get_head_spins()) {
mustq = true;
set_mustq(lock);
val |= _Q_MUST_Q_VAL;
}
}
/* If we're the last queued, must clean up the tail. */
@ -293,9 +329,26 @@ static int steal_spins_get(void *data, u64 *val)
DEFINE_SIMPLE_ATTRIBUTE(fops_steal_spins, steal_spins_get, steal_spins_set, "%llu\n");
static int head_spins_set(void *data, u64 val)
{
head_spins = val;
return 0;
}
static int head_spins_get(void *data, u64 *val)
{
*val = head_spins;
return 0;
}
DEFINE_SIMPLE_ATTRIBUTE(fops_head_spins, head_spins_get, head_spins_set, "%llu\n");
static __init int spinlock_debugfs_init(void)
{
debugfs_create_file("qspl_steal_spins", 0600, arch_debugfs_dir, NULL, &fops_steal_spins);
debugfs_create_file("qspl_head_spins", 0600, arch_debugfs_dir, NULL, &fops_head_spins);
return 0;
}