2012-03-28 18:30:02 +01:00
/*
* Copyright ( C ) 1999 Cort Dougan < cort @ cs . nmt . edu >
*/
# ifndef _ASM_POWERPC_BARRIER_H
# define _ASM_POWERPC_BARRIER_H
/*
* Memory barrier .
* The sync instruction guarantees that all memory accesses initiated
* by this processor have been performed ( with respect to all other
* mechanisms that access memory ) . The eieio instruction is a barrier
* providing an ordering ( separately ) for ( a ) cacheable stores and ( b )
* loads and stores to non - cacheable memory ( e . g . I / O devices ) .
*
* mb ( ) prevents loads and stores being reordered across this point .
* rmb ( ) prevents loads being reordered across this point .
* wmb ( ) prevents stores being reordered across this point .
* read_barrier_depends ( ) prevents data - dependent loads being reordered
* across this point ( nop on PPC ) .
*
* * mb ( ) variants without smp_ prefix must order all types of memory
* operations with one another . sync is the only instruction sufficient
* to do this .
*
* For the smp_ barriers , ordering is for cacheable memory operations
* only . We have to use the sync instruction for smp_mb ( ) , since lwsync
* doesn ' t order loads with respect to previous stores . Lwsync can be
* used for smp_rmb ( ) and smp_wmb ( ) .
*
* However , on CPUs that don ' t support lwsync , lwsync actually maps to a
* heavy - weight sync , so smp_wmb ( ) can be a lighter - weight eieio .
*/
# define mb() __asm__ __volatile__ ("sync" : : : "memory")
# define rmb() __asm__ __volatile__ ("sync" : : : "memory")
# define wmb() __asm__ __volatile__ ("sync" : : : "memory")
# ifdef __SUBARCH_HAS_LWSYNC
# define SMPWMB LWSYNC
# else
# define SMPWMB eieio
# endif
2013-11-06 14:57:36 +01:00
# define __lwsync() __asm__ __volatile__ (stringify_in_c(LWSYNC) : : :"memory")
2014-12-11 15:02:06 -08:00
# define dma_rmb() __lwsync()
# define dma_wmb() __asm__ __volatile__ (stringify_in_c(SMPWMB) : : :"memory")
2015-12-27 15:04:42 +02:00
# define __smp_lwsync() __lwsync()
2013-11-06 14:57:36 +01:00
2015-12-27 15:04:42 +02:00
# define __smp_mb() mb()
# define __smp_rmb() __lwsync()
# define __smp_wmb() __asm__ __volatile__ (stringify_in_c(SMPWMB) : : :"memory")
2012-03-28 18:30:02 +01:00
/*
* This is a barrier which prevents following instructions from being
* started until the value of the argument x is known . For example , if
* x is a variable loaded from memory , this prevents following
* instructions from being executed until the load has been performed .
*/
# define data_barrier(x) \
asm volatile ( " twi 0,%0,0; isync " : : " r " ( x ) : " memory " ) ;
2015-12-27 15:04:42 +02:00
# define __smp_store_release(p, v) \
2013-11-06 14:57:36 +01:00
do { \
compiletime_assert_atomic_type ( * p ) ; \
2015-12-27 15:04:42 +02:00
__smp_lwsync ( ) ; \
locking, arch: use WRITE_ONCE()/READ_ONCE() in smp_store_release()/smp_load_acquire()
Replace ACCESS_ONCE() macro in smp_store_release() and smp_load_acquire()
with WRITE_ONCE() and READ_ONCE() on x86, arm, arm64, ia64, metag, mips,
powerpc, s390, sparc and asm-generic since ACCESS_ONCE() does not work
reliably on non-scalar types.
WRITE_ONCE() and READ_ONCE() were introduced in the following commits:
230fa253df63 ("kernel: Provide READ_ONCE and ASSIGN_ONCE")
43239cbe79fc ("kernel: Change ASSIGN_ONCE(val, x) to WRITE_ONCE(x, val)")
Signed-off-by: Andrey Konovalov <andreyknvl@google.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Davidlohr Bueso <dbueso@suse.de>
Acked-by: Michael Ellerman <mpe@ellerman.id.au> (powerpc)
Acked-by: Ralf Baechle <ralf@linux-mips.org>
Cc: Alexander Duyck <alexander.h.duyck@redhat.com>
Cc: Andre Przywara <andre.przywara@arm.com>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Borislav Petkov <bp@suse.de>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Christian Borntraeger <borntraeger@de.ibm.com>
Cc: David S. Miller <davem@davemloft.net>
Cc: Davidlohr Bueso <dave@stgolabs.net>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Fenghua Yu <fenghua.yu@intel.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: James Hogan <james.hogan@imgtec.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Russell King <linux@arm.linux.org.uk>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Tony Luck <tony.luck@intel.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: linux-arch@vger.kernel.org
Link: http://lkml.kernel.org/r/1438528264-714-1-git-send-email-andreyknvl@google.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2015-08-02 17:11:04 +02:00
WRITE_ONCE ( * p , v ) ; \
2013-11-06 14:57:36 +01:00
} while ( 0 )
2015-12-27 15:04:42 +02:00
# define __smp_load_acquire(p) \
2013-11-06 14:57:36 +01:00
( { \
locking, arch: use WRITE_ONCE()/READ_ONCE() in smp_store_release()/smp_load_acquire()
Replace ACCESS_ONCE() macro in smp_store_release() and smp_load_acquire()
with WRITE_ONCE() and READ_ONCE() on x86, arm, arm64, ia64, metag, mips,
powerpc, s390, sparc and asm-generic since ACCESS_ONCE() does not work
reliably on non-scalar types.
WRITE_ONCE() and READ_ONCE() were introduced in the following commits:
230fa253df63 ("kernel: Provide READ_ONCE and ASSIGN_ONCE")
43239cbe79fc ("kernel: Change ASSIGN_ONCE(val, x) to WRITE_ONCE(x, val)")
Signed-off-by: Andrey Konovalov <andreyknvl@google.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Davidlohr Bueso <dbueso@suse.de>
Acked-by: Michael Ellerman <mpe@ellerman.id.au> (powerpc)
Acked-by: Ralf Baechle <ralf@linux-mips.org>
Cc: Alexander Duyck <alexander.h.duyck@redhat.com>
Cc: Andre Przywara <andre.przywara@arm.com>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Borislav Petkov <bp@suse.de>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Christian Borntraeger <borntraeger@de.ibm.com>
Cc: David S. Miller <davem@davemloft.net>
Cc: Davidlohr Bueso <dave@stgolabs.net>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Fenghua Yu <fenghua.yu@intel.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: James Hogan <james.hogan@imgtec.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Russell King <linux@arm.linux.org.uk>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Tony Luck <tony.luck@intel.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: linux-arch@vger.kernel.org
Link: http://lkml.kernel.org/r/1438528264-714-1-git-send-email-andreyknvl@google.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2015-08-02 17:11:04 +02:00
typeof ( * p ) ___p1 = READ_ONCE ( * p ) ; \
2013-11-06 14:57:36 +01:00
compiletime_assert_atomic_type ( * p ) ; \
2015-12-27 15:04:42 +02:00
__smp_lwsync ( ) ; \
2013-11-06 14:57:36 +01:00
___p1 ; \
} )
2015-04-01 08:19:59 -07:00
# define smp_mb__before_spinlock() smp_mb()
2014-03-13 19:00:35 +01:00
2015-12-21 09:22:18 +02:00
# include <asm-generic/barrier.h>
2012-03-28 18:30:02 +01:00
# endif /* _ASM_POWERPC_BARRIER_H */