2012-03-28 18:30:03 +01:00
# ifndef __SPARC64_BARRIER_H
# define __SPARC64_BARRIER_H
/* These are here in an effort to more fully work around Spitfire Errata
* # 51. Essentially , if a memory barrier occurs soon after a mispredicted
* branch , the chip can stop executing instructions until a trap occurs .
* Therefore , if interrupts are disabled , the chip can hang forever .
*
* It used to be believed that the memory barrier had to be right in the
* delay slot , but a case has been traced recently wherein the memory barrier
* was one instruction after the branch delay slot and the chip still hung .
* The offending sequence was the following in sym_wakeup_done ( ) of the
* sym53c8xx_2 driver :
*
* call sym_ccb_from_dsa , 0
* movge % icc , 0 , % l0
* brz , pn % o0 , . LL1303
* mov % o0 , % l2
* membar # LoadLoad
*
* The branch has to be mispredicted for the bug to occur . Therefore , we put
* the memory barrier explicitly into a " branch always, predicted taken "
* delay slot to avoid the problem case .
*/
# define membar_safe(type) \
do { __asm__ __volatile__ ( " ba,pt %%xcc, 1f \n \t " \
" membar " type " \n " \
" 1: \n " \
: : : " memory " ) ; \
} while ( 0 )
/* The kernel always executes in TSO memory model these days,
* and furthermore most sparc64 chips implement more stringent
* memory ordering than required by the specifications .
*/
# define mb() membar_safe("#StoreLoad")
# define rmb() __asm__ __volatile__("":::"memory")
# define wmb() __asm__ __volatile__("":::"memory")
2015-12-27 15:04:42 +02:00
# define __smp_store_release(p, v) \
2013-11-06 14:57:36 +01:00
do { \
compiletime_assert_atomic_type ( * p ) ; \
barrier ( ) ; \
locking, arch: use WRITE_ONCE()/READ_ONCE() in smp_store_release()/smp_load_acquire()
Replace ACCESS_ONCE() macro in smp_store_release() and smp_load_acquire()
with WRITE_ONCE() and READ_ONCE() on x86, arm, arm64, ia64, metag, mips,
powerpc, s390, sparc and asm-generic since ACCESS_ONCE() does not work
reliably on non-scalar types.
WRITE_ONCE() and READ_ONCE() were introduced in the following commits:
230fa253df63 ("kernel: Provide READ_ONCE and ASSIGN_ONCE")
43239cbe79fc ("kernel: Change ASSIGN_ONCE(val, x) to WRITE_ONCE(x, val)")
Signed-off-by: Andrey Konovalov <andreyknvl@google.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Davidlohr Bueso <dbueso@suse.de>
Acked-by: Michael Ellerman <mpe@ellerman.id.au> (powerpc)
Acked-by: Ralf Baechle <ralf@linux-mips.org>
Cc: Alexander Duyck <alexander.h.duyck@redhat.com>
Cc: Andre Przywara <andre.przywara@arm.com>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Borislav Petkov <bp@suse.de>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Christian Borntraeger <borntraeger@de.ibm.com>
Cc: David S. Miller <davem@davemloft.net>
Cc: Davidlohr Bueso <dave@stgolabs.net>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Fenghua Yu <fenghua.yu@intel.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: James Hogan <james.hogan@imgtec.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Russell King <linux@arm.linux.org.uk>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Tony Luck <tony.luck@intel.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: linux-arch@vger.kernel.org
Link: http://lkml.kernel.org/r/1438528264-714-1-git-send-email-andreyknvl@google.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2015-08-02 17:11:04 +02:00
WRITE_ONCE ( * p , v ) ; \
2013-11-06 14:57:36 +01:00
} while ( 0 )
2015-12-27 15:04:42 +02:00
# define __smp_load_acquire(p) \
2013-11-06 14:57:36 +01:00
( { \
locking, arch: use WRITE_ONCE()/READ_ONCE() in smp_store_release()/smp_load_acquire()
Replace ACCESS_ONCE() macro in smp_store_release() and smp_load_acquire()
with WRITE_ONCE() and READ_ONCE() on x86, arm, arm64, ia64, metag, mips,
powerpc, s390, sparc and asm-generic since ACCESS_ONCE() does not work
reliably on non-scalar types.
WRITE_ONCE() and READ_ONCE() were introduced in the following commits:
230fa253df63 ("kernel: Provide READ_ONCE and ASSIGN_ONCE")
43239cbe79fc ("kernel: Change ASSIGN_ONCE(val, x) to WRITE_ONCE(x, val)")
Signed-off-by: Andrey Konovalov <andreyknvl@google.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Davidlohr Bueso <dbueso@suse.de>
Acked-by: Michael Ellerman <mpe@ellerman.id.au> (powerpc)
Acked-by: Ralf Baechle <ralf@linux-mips.org>
Cc: Alexander Duyck <alexander.h.duyck@redhat.com>
Cc: Andre Przywara <andre.przywara@arm.com>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Borislav Petkov <bp@suse.de>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Christian Borntraeger <borntraeger@de.ibm.com>
Cc: David S. Miller <davem@davemloft.net>
Cc: Davidlohr Bueso <dave@stgolabs.net>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Fenghua Yu <fenghua.yu@intel.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: James Hogan <james.hogan@imgtec.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Russell King <linux@arm.linux.org.uk>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Tony Luck <tony.luck@intel.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: linux-arch@vger.kernel.org
Link: http://lkml.kernel.org/r/1438528264-714-1-git-send-email-andreyknvl@google.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2015-08-02 17:11:04 +02:00
typeof ( * p ) ___p1 = READ_ONCE ( * p ) ; \
2013-11-06 14:57:36 +01:00
compiletime_assert_atomic_type ( * p ) ; \
barrier ( ) ; \
___p1 ; \
} )
2015-12-27 15:04:42 +02:00
# define __smp_mb__before_atomic() barrier()
# define __smp_mb__after_atomic() barrier()
2014-03-13 19:00:35 +01:00
2015-12-21 09:22:18 +02:00
# include <asm-generic/barrier.h>
2012-03-28 18:30:03 +01:00
# endif /* !(__SPARC64_BARRIER_H) */