dda5f312bb
The sync_*() ops on arch/arm are defined in terms of the regular bitops
with no special handling. This is not correct, as UP kernels elide
barriers for the fully-ordered operations, and so the required ordering
is lost when such UP kernels are run under a hypervsior on an SMP
system.
Fix this by defining sync ops with the required barriers.
Note: On 32-bit arm, the sync_*() ops are currently only used by Xen,
which requires ARMv7, but the semantics can be implemented for ARMv6+.
Fixes: e54d2f6152
("xen/arm: sync_bitops")
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Kees Cook <keescook@chromium.org>
Link: https://lore.kernel.org/r/20230605070124.3741859-2-mark.rutland@arm.com
49 lines
1.5 KiB
C
49 lines
1.5 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef __ASM_SYNC_BITOPS_H__
|
|
#define __ASM_SYNC_BITOPS_H__
|
|
|
|
#include <asm/bitops.h>
|
|
|
|
/* sync_bitops functions are equivalent to the SMP implementation of the
|
|
* original functions, independently from CONFIG_SMP being defined.
|
|
*
|
|
* We need them because _set_bit etc are not SMP safe if !CONFIG_SMP. But
|
|
* under Xen you might be communicating with a completely external entity
|
|
* who might be on another CPU (e.g. two uniprocessor guests communicating
|
|
* via event channels and grant tables). So we need a variant of the bit
|
|
* ops which are SMP safe even on a UP kernel.
|
|
*/
|
|
|
|
/*
|
|
* Unordered
|
|
*/
|
|
|
|
#define sync_set_bit(nr, p) _set_bit(nr, p)
|
|
#define sync_clear_bit(nr, p) _clear_bit(nr, p)
|
|
#define sync_change_bit(nr, p) _change_bit(nr, p)
|
|
#define sync_test_bit(nr, addr) test_bit(nr, addr)
|
|
|
|
/*
|
|
* Fully ordered
|
|
*/
|
|
|
|
int _sync_test_and_set_bit(int nr, volatile unsigned long * p);
|
|
#define sync_test_and_set_bit(nr, p) _sync_test_and_set_bit(nr, p)
|
|
|
|
int _sync_test_and_clear_bit(int nr, volatile unsigned long * p);
|
|
#define sync_test_and_clear_bit(nr, p) _sync_test_and_clear_bit(nr, p)
|
|
|
|
int _sync_test_and_change_bit(int nr, volatile unsigned long * p);
|
|
#define sync_test_and_change_bit(nr, p) _sync_test_and_change_bit(nr, p)
|
|
|
|
#define arch_sync_cmpxchg(ptr, old, new) \
|
|
({ \
|
|
__typeof__(*(ptr)) __ret; \
|
|
__smp_mb__before_atomic(); \
|
|
__ret = arch_cmpxchg_relaxed((ptr), (old), (new)); \
|
|
__smp_mb__after_atomic(); \
|
|
__ret; \
|
|
})
|
|
|
|
#endif
|