d646285885
Rather then relying on the core code to use smp_read_barrier_depends() as part of the READ_ONCE() definition, instead override __READ_ONCE() in the Alpha code so that it generates the required mb() and then implement smp_load_acquire() using the new macro to avoid redundant back-to-back barriers from the generic implementation. Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Acked-by: Paul E. McKenney <paulmck@kernel.org> Signed-off-by: Will Deacon <will@kernel.org>
24 lines
507 B
C
24 lines
507 B
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef __BARRIER_H
|
|
#define __BARRIER_H
|
|
|
|
#define mb() __asm__ __volatile__("mb": : :"memory")
|
|
#define rmb() __asm__ __volatile__("mb": : :"memory")
|
|
#define wmb() __asm__ __volatile__("wmb": : :"memory")
|
|
|
|
#define __smp_load_acquire(p) \
|
|
({ \
|
|
compiletime_assert_atomic_type(*p); \
|
|
__READ_ONCE(*p); \
|
|
})
|
|
|
|
#ifdef CONFIG_SMP
|
|
#define __ASM_SMP_MB "\tmb\n"
|
|
#else
|
|
#define __ASM_SMP_MB
|
|
#endif
|
|
|
|
#include <asm-generic/barrier.h>
|
|
|
|
#endif /* __BARRIER_H */
|