locking: Remove smp_read_barrier_depends() from queued_spin_lock_slowpath()
Queued spinlocks are not used by DEC Alpha, and furthermore operations such as READ_ONCE() and release/relaxed RMW atomics are being changed to imply smp_read_barrier_depends(). This commit therefore removes the now-redundant smp_read_barrier_depends() from queued_spin_lock_slowpath(), and adjusts the comments accordingly. Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Ingo Molnar <mingo@redhat.com>
This commit is contained in:
@ -170,7 +170,7 @@ static __always_inline void clear_pending_set_locked(struct qspinlock *lock)
|
|||||||
* @tail : The new queue tail code word
|
* @tail : The new queue tail code word
|
||||||
* Return: The previous queue tail code word
|
* Return: The previous queue tail code word
|
||||||
*
|
*
|
||||||
* xchg(lock, tail)
|
* xchg(lock, tail), which heads an address dependency
|
||||||
*
|
*
|
||||||
* p,*,* -> n,*,* ; prev = xchg(lock, node)
|
* p,*,* -> n,*,* ; prev = xchg(lock, node)
|
||||||
*/
|
*/
|
||||||
@ -409,13 +409,11 @@ queue:
|
|||||||
if (old & _Q_TAIL_MASK) {
|
if (old & _Q_TAIL_MASK) {
|
||||||
prev = decode_tail(old);
|
prev = decode_tail(old);
|
||||||
/*
|
/*
|
||||||
* The above xchg_tail() is also a load of @lock which generates,
|
* The above xchg_tail() is also a load of @lock which
|
||||||
* through decode_tail(), a pointer.
|
* generates, through decode_tail(), a pointer. The address
|
||||||
*
|
* dependency matches the RELEASE of xchg_tail() such that
|
||||||
* The address dependency matches the RELEASE of xchg_tail()
|
* the subsequent access to @prev happens after.
|
||||||
* such that the access to @prev must happen after.
|
|
||||||
*/
|
*/
|
||||||
smp_read_barrier_depends();
|
|
||||||
|
|
||||||
WRITE_ONCE(prev->next, node);
|
WRITE_ONCE(prev->next, node);
|
||||||
|
|
||||||
|
Reference in New Issue
Block a user