arch,sparc: Convert smp_mb__*()
sparc32: fully relies on asm-generic/barrier.h and thus can use its implementation. sparc64: is strongly ordered and its atomic ops imply a full barrier, implement the new primitives using barrier(). Signed-off-by: Peter Zijlstra <peterz@infradead.org> Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Acked-by: David S. Miller <davem@davemloft.net> Link: http://lkml.kernel.org/n/tip-2cla9ubpd8chrntnm7e4zdt4@git.kernel.org Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Will Deacon <will.deacon@arm.com> Cc: linux-kernel@vger.kernel.org Cc: sparclinux@vger.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
603228bcb8
commit
56d3648948
@ -14,6 +14,7 @@
|
||||
#include <linux/types.h>
|
||||
|
||||
#include <asm/cmpxchg.h>
|
||||
#include <asm/barrier.h>
|
||||
#include <asm-generic/atomic64.h>
|
||||
|
||||
|
||||
@ -52,10 +53,4 @@ extern void atomic_set(atomic_t *, int);
|
||||
#define atomic_dec_and_test(v) (atomic_dec_return(v) == 0)
|
||||
#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
|
||||
|
||||
/* Atomic operations are already serializing */
|
||||
#define smp_mb__before_atomic_dec() barrier()
|
||||
#define smp_mb__after_atomic_dec() barrier()
|
||||
#define smp_mb__before_atomic_inc() barrier()
|
||||
#define smp_mb__after_atomic_inc() barrier()
|
||||
|
||||
#endif /* !(__ARCH_SPARC_ATOMIC__) */
|
||||
|
@ -9,6 +9,7 @@
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <asm/cmpxchg.h>
|
||||
#include <asm/barrier.h>
|
||||
|
||||
#define ATOMIC_INIT(i) { (i) }
|
||||
#define ATOMIC64_INIT(i) { (i) }
|
||||
@ -108,10 +109,4 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
|
||||
|
||||
extern long atomic64_dec_if_positive(atomic64_t *v);
|
||||
|
||||
/* Atomic operations are already serializing */
|
||||
#define smp_mb__before_atomic_dec() barrier()
|
||||
#define smp_mb__after_atomic_dec() barrier()
|
||||
#define smp_mb__before_atomic_inc() barrier()
|
||||
#define smp_mb__after_atomic_inc() barrier()
|
||||
|
||||
#endif /* !(__ARCH_SPARC64_ATOMIC__) */
|
||||
|
@ -68,4 +68,7 @@ do { \
|
||||
___p1; \
|
||||
})
|
||||
|
||||
#define smp_mb__before_atomic() barrier()
|
||||
#define smp_mb__after_atomic() barrier()
|
||||
|
||||
#endif /* !(__SPARC64_BARRIER_H) */
|
||||
|
@ -90,9 +90,6 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
|
||||
|
||||
#include <asm-generic/bitops/non-atomic.h>
|
||||
|
||||
#define smp_mb__before_clear_bit() do { } while(0)
|
||||
#define smp_mb__after_clear_bit() do { } while(0)
|
||||
|
||||
#include <asm-generic/bitops/ffz.h>
|
||||
#include <asm-generic/bitops/__ffs.h>
|
||||
#include <asm-generic/bitops/sched.h>
|
||||
|
@ -13,6 +13,7 @@
|
||||
|
||||
#include <linux/compiler.h>
|
||||
#include <asm/byteorder.h>
|
||||
#include <asm/barrier.h>
|
||||
|
||||
extern int test_and_set_bit(unsigned long nr, volatile unsigned long *addr);
|
||||
extern int test_and_clear_bit(unsigned long nr, volatile unsigned long *addr);
|
||||
@ -23,9 +24,6 @@ extern void change_bit(unsigned long nr, volatile unsigned long *addr);
|
||||
|
||||
#include <asm-generic/bitops/non-atomic.h>
|
||||
|
||||
#define smp_mb__before_clear_bit() barrier()
|
||||
#define smp_mb__after_clear_bit() barrier()
|
||||
|
||||
#include <asm-generic/bitops/fls.h>
|
||||
#include <asm-generic/bitops/__fls.h>
|
||||
#include <asm-generic/bitops/fls64.h>
|
||||
|
Loading…
Reference in New Issue
Block a user