locking/qspinlock: Use smp_store_release() in queued_spin_unlock()
A qspinlock can be unlocked simply by writing zero to the locked byte. This can be implemented in the generic code, so do that and remove the arch-specific override for x86 in the !PV case. Signed-off-by: Will Deacon <will.deacon@arm.com> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Acked-by: Waiman Long <longman@redhat.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: boqun.feng@gmail.com Cc: linux-arm-kernel@lists.infradead.org Cc: paulmck@linux.vnet.ibm.com Link: http://lkml.kernel.org/r/1524738868-31318-11-git-send-email-will.deacon@arm.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
c131a198c4
commit
626e5fbc14
@ -9,6 +9,12 @@
|
||||
|
||||
#define _Q_PENDING_LOOPS (1 << 9)
|
||||
|
||||
#ifdef CONFIG_PARAVIRT_SPINLOCKS
|
||||
extern void native_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
|
||||
extern void __pv_init_lock_hash(void);
|
||||
extern void __pv_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
|
||||
extern void __raw_callee_save___pv_queued_spin_unlock(struct qspinlock *lock);
|
||||
|
||||
#define queued_spin_unlock queued_spin_unlock
|
||||
/**
|
||||
* queued_spin_unlock - release a queued spinlock
|
||||
@ -21,12 +27,6 @@ static inline void native_queued_spin_unlock(struct qspinlock *lock)
|
||||
smp_store_release(&lock->locked, 0);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PARAVIRT_SPINLOCKS
|
||||
extern void native_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
|
||||
extern void __pv_init_lock_hash(void);
|
||||
extern void __pv_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
|
||||
extern void __raw_callee_save___pv_queued_spin_unlock(struct qspinlock *lock);
|
||||
|
||||
static inline void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
|
||||
{
|
||||
pv_queued_spin_lock_slowpath(lock, val);
|
||||
@ -42,11 +42,6 @@ static inline bool vcpu_is_preempted(long cpu)
|
||||
{
|
||||
return pv_vcpu_is_preempted(cpu);
|
||||
}
|
||||
#else
|
||||
static inline void queued_spin_unlock(struct qspinlock *lock)
|
||||
{
|
||||
native_queued_spin_unlock(lock);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_PARAVIRT
|
||||
|
@ -100,7 +100,7 @@ static __always_inline void queued_spin_unlock(struct qspinlock *lock)
|
||||
/*
|
||||
* unlock() needs release semantics:
|
||||
*/
|
||||
(void)atomic_sub_return_release(_Q_LOCKED_VAL, &lock->val);
|
||||
smp_store_release(&lock->locked, 0);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user