2019-06-03 07:44:50 +02:00
/* SPDX-License-Identifier: GPL-2.0-only */
2012-03-05 11:49:34 +00:00
/*
* Based on arch / arm / include / asm / cmpxchg . h
*
* Copyright ( C ) 2012 ARM Ltd .
*/
# ifndef __ASM_CMPXCHG_H
# define __ASM_CMPXCHG_H
2018-02-19 11:39:23 +00:00
# include <linux/build_bug.h>
2018-02-27 10:50:20 +00:00
# include <linux/compiler.h>
2012-03-05 11:49:34 +00:00
# include <asm/barrier.h>
2015-03-31 14:11:24 +01:00
# include <asm/lse.h>
2012-03-05 11:49:34 +00:00
2015-10-08 20:15:18 +01:00
/*
* We need separate acquire parameters for ll / sc and lse , since the full
* barrier case is generated as release + dmb for the former and
* acquire + release for the latter .
*/
2018-09-13 13:30:45 +01:00
# define __XCHG_CASE(w, sfx, name, sz, mb, nop_lse, acq, acq_lse, rel, cl) \
static inline u # # sz __xchg_case_ # # name # # sz ( u # # sz x , volatile void * ptr ) \
{ \
u # # sz ret ; \
unsigned long tmp ; \
\
asm volatile ( ARM64_LSE_ATOMIC_INSN ( \
/* LL/SC */ \
" prfm pstl1strm, %2 \n " \
" 1: ld " # acq " xr " # sfx " \t % " # w " 0, %2 \n " \
" st " # rel " xr " # sfx " \t %w1, % " # w " 3, %2 \n " \
" cbnz %w1, 1b \n " \
" " # mb , \
/* LSE atomics */ \
" swp " # acq_lse # rel # sfx " \t % " # w " 3, % " # w " 0, %2 \n " \
__nops ( 3 ) \
" " # nop_lse ) \
: " =&r " ( ret ) , " =&r " ( tmp ) , " +Q " ( * ( u # # sz * ) ptr ) \
: " r " ( x ) \
: cl ) ; \
\
return ret ; \
2012-03-05 11:49:34 +00:00
}
2018-09-13 13:30:45 +01:00
__XCHG_CASE ( w , b , , 8 , , , , , , )
__XCHG_CASE ( w , h , , 16 , , , , , , )
__XCHG_CASE ( w , , , 32 , , , , , , )
__XCHG_CASE ( , , , 64 , , , , , , )
__XCHG_CASE ( w , b , acq_ , 8 , , , a , a , , " memory " )
__XCHG_CASE ( w , h , acq_ , 16 , , , a , a , , " memory " )
__XCHG_CASE ( w , , acq_ , 32 , , , a , a , , " memory " )
__XCHG_CASE ( , , acq_ , 64 , , , a , a , , " memory " )
__XCHG_CASE ( w , b , rel_ , 8 , , , , , l , " memory " )
__XCHG_CASE ( w , h , rel_ , 16 , , , , , l , " memory " )
__XCHG_CASE ( w , , rel_ , 32 , , , , , l , " memory " )
__XCHG_CASE ( , , rel_ , 64 , , , , , l , " memory " )
__XCHG_CASE ( w , b , mb_ , 8 , dmb ish , nop , , a , l , " memory " )
__XCHG_CASE ( w , h , mb_ , 16 , dmb ish , nop , , a , l , " memory " )
__XCHG_CASE ( w , , mb_ , 32 , dmb ish , nop , , a , l , " memory " )
__XCHG_CASE ( , , mb_ , 64 , dmb ish , nop , , a , l , " memory " )
2015-10-08 20:15:18 +01:00
# undef __XCHG_CASE
# define __XCHG_GEN(sfx) \
2019-09-10 13:56:22 +02:00
static __always_inline unsigned long __xchg # # sfx ( unsigned long x , \
2015-10-08 20:15:18 +01:00
volatile void * ptr , \
int size ) \
{ \
switch ( size ) { \
case 1 : \
2018-09-13 13:30:45 +01:00
return __xchg_case # # sfx # # _8 ( x , ptr ) ; \
2015-10-08 20:15:18 +01:00
case 2 : \
2018-09-13 13:30:45 +01:00
return __xchg_case # # sfx # # _16 ( x , ptr ) ; \
2015-10-08 20:15:18 +01:00
case 4 : \
2018-09-13 13:30:45 +01:00
return __xchg_case # # sfx # # _32 ( x , ptr ) ; \
2015-10-08 20:15:18 +01:00
case 8 : \
2018-09-13 13:30:45 +01:00
return __xchg_case # # sfx # # _64 ( x , ptr ) ; \
2015-10-08 20:15:18 +01:00
default : \
BUILD_BUG ( ) ; \
} \
\
unreachable ( ) ; \
}
__XCHG_GEN ( )
__XCHG_GEN ( _acq )
__XCHG_GEN ( _rel )
__XCHG_GEN ( _mb )
# undef __XCHG_GEN
# define __xchg_wrapper(sfx, ptr, x) \
( { \
__typeof__ ( * ( ptr ) ) __ret ; \
__ret = ( __typeof__ ( * ( ptr ) ) ) \
__xchg # # sfx ( ( unsigned long ) ( x ) , ( ptr ) , sizeof ( * ( ptr ) ) ) ; \
__ret ; \
arm64: xchg: prevent warning if return value is unused
Some users of xchg() don't bother using the return value, which results
in a compiler warning like the following (from kgdb):
In file included from linux/arch/arm64/include/asm/atomic.h:27:0,
from include/linux/atomic.h:4,
from include/linux/spinlock.h:402,
from include/linux/seqlock.h:35,
from include/linux/time.h:5,
from include/uapi/linux/timex.h:56,
from include/linux/timex.h:56,
from include/linux/sched.h:19,
from include/linux/pid_namespace.h:4,
from kernel/debug/debug_core.c:30:
kernel/debug/debug_core.c: In function ‘kgdb_cpu_enter’:
linux/arch/arm64/include/asm/cmpxchg.h:75:3: warning: value computed is not used [-Wunused-value]
((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
^
linux/arch/arm64/include/asm/atomic.h:132:30: note: in expansion of macro ‘xchg’
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
kernel/debug/debug_core.c:504:4: note: in expansion of macro ‘atomic_xchg’
atomic_xchg(&kgdb_active, cpu);
^
This patch makes use of the same trick as we do for cmpxchg, by assigning
the return value to a dummy variable in the xchg() macro itself.
Signed-off-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2014-04-30 16:23:06 +01:00
} )
2012-03-05 11:49:34 +00:00
2015-10-08 20:15:18 +01:00
/* xchg */
2018-09-04 11:48:30 +01:00
# define arch_xchg_relaxed(...) __xchg_wrapper( , __VA_ARGS__)
# define arch_xchg_acquire(...) __xchg_wrapper(_acq, __VA_ARGS__)
# define arch_xchg_release(...) __xchg_wrapper(_rel, __VA_ARGS__)
# define arch_xchg(...) __xchg_wrapper( _mb, __VA_ARGS__)
2015-10-08 20:15:18 +01:00
2019-08-29 11:49:10 +01:00
# define __CMPXCHG_CASE(name, sz) \
static inline u # # sz __cmpxchg_case_ # # name # # sz ( volatile void * ptr , \
u # # sz old , \
u # # sz new ) \
{ \
return __lse_ll_sc_body ( _cmpxchg_case_ # # name # # sz , \
ptr , old , new ) ; \
}
__CMPXCHG_CASE ( , 8 )
__CMPXCHG_CASE ( , 16 )
__CMPXCHG_CASE ( , 32 )
__CMPXCHG_CASE ( , 64 )
__CMPXCHG_CASE ( acq_ , 8 )
__CMPXCHG_CASE ( acq_ , 16 )
__CMPXCHG_CASE ( acq_ , 32 )
__CMPXCHG_CASE ( acq_ , 64 )
__CMPXCHG_CASE ( rel_ , 8 )
__CMPXCHG_CASE ( rel_ , 16 )
__CMPXCHG_CASE ( rel_ , 32 )
__CMPXCHG_CASE ( rel_ , 64 )
__CMPXCHG_CASE ( mb_ , 8 )
__CMPXCHG_CASE ( mb_ , 16 )
__CMPXCHG_CASE ( mb_ , 32 )
__CMPXCHG_CASE ( mb_ , 64 )
2019-08-29 14:33:23 +01:00
# undef __CMPXCHG_CASE
2019-08-29 11:49:10 +01:00
# define __CMPXCHG_DBL(name) \
static inline long __cmpxchg_double # # name ( unsigned long old1 , \
unsigned long old2 , \
unsigned long new1 , \
unsigned long new2 , \
volatile void * ptr ) \
{ \
return __lse_ll_sc_body ( _cmpxchg_double # # name , \
old1 , old2 , new1 , new2 , ptr ) ; \
}
__CMPXCHG_DBL ( )
__CMPXCHG_DBL ( _mb )
2019-08-29 14:33:23 +01:00
# undef __CMPXCHG_DBL
2015-10-08 20:15:18 +01:00
# define __CMPXCHG_GEN(sfx) \
2019-09-10 13:56:22 +02:00
static __always_inline unsigned long __cmpxchg # # sfx ( volatile void * ptr , \
2015-10-08 20:15:18 +01:00
unsigned long old , \
unsigned long new , \
int size ) \
{ \
switch ( size ) { \
case 1 : \
2018-09-13 14:28:33 +01:00
return __cmpxchg_case # # sfx # # _8 ( ptr , old , new ) ; \
2015-10-08 20:15:18 +01:00
case 2 : \
2018-09-13 14:28:33 +01:00
return __cmpxchg_case # # sfx # # _16 ( ptr , old , new ) ; \
2015-10-08 20:15:18 +01:00
case 4 : \
2018-09-13 13:30:45 +01:00
return __cmpxchg_case # # sfx # # _32 ( ptr , old , new ) ; \
2015-10-08 20:15:18 +01:00
case 8 : \
2018-09-13 13:30:45 +01:00
return __cmpxchg_case # # sfx # # _64 ( ptr , old , new ) ; \
2015-10-08 20:15:18 +01:00
default : \
BUILD_BUG ( ) ; \
} \
\
unreachable ( ) ; \
2012-03-05 11:49:34 +00:00
}
2015-10-08 20:15:18 +01:00
__CMPXCHG_GEN ( )
__CMPXCHG_GEN ( _acq )
__CMPXCHG_GEN ( _rel )
__CMPXCHG_GEN ( _mb )
2012-03-05 11:49:34 +00:00
2015-10-08 20:15:18 +01:00
# undef __CMPXCHG_GEN
2013-12-03 19:19:12 +00:00
2015-10-08 20:15:18 +01:00
# define __cmpxchg_wrapper(sfx, ptr, o, n) \
( { \
__typeof__ ( * ( ptr ) ) __ret ; \
__ret = ( __typeof__ ( * ( ptr ) ) ) \
__cmpxchg # # sfx ( ( ptr ) , ( unsigned long ) ( o ) , \
( unsigned long ) ( n ) , sizeof ( * ( ptr ) ) ) ; \
__ret ; \
2013-12-03 19:19:12 +00:00
} )
2012-03-05 11:49:34 +00:00
2015-10-08 20:15:18 +01:00
/* cmpxchg */
2018-09-04 11:48:30 +01:00
# define arch_cmpxchg_relaxed(...) __cmpxchg_wrapper( , __VA_ARGS__)
# define arch_cmpxchg_acquire(...) __cmpxchg_wrapper(_acq, __VA_ARGS__)
# define arch_cmpxchg_release(...) __cmpxchg_wrapper(_rel, __VA_ARGS__)
# define arch_cmpxchg(...) __cmpxchg_wrapper( _mb, __VA_ARGS__)
# define arch_cmpxchg_local arch_cmpxchg_relaxed
2015-10-08 20:15:18 +01:00
/* cmpxchg64 */
2018-09-04 11:48:30 +01:00
# define arch_cmpxchg64_relaxed arch_cmpxchg_relaxed
# define arch_cmpxchg64_acquire arch_cmpxchg_acquire
# define arch_cmpxchg64_release arch_cmpxchg_release
# define arch_cmpxchg64 arch_cmpxchg
# define arch_cmpxchg64_local arch_cmpxchg_local
2015-10-08 20:15:18 +01:00
/* cmpxchg_double */
2015-05-14 18:05:50 +01:00
# define system_has_cmpxchg_double() 1
# define __cmpxchg_double_check(ptr1, ptr2) \
( { \
if ( sizeof ( * ( ptr1 ) ) ! = 8 ) \
BUILD_BUG ( ) ; \
VM_BUG_ON ( ( unsigned long * ) ( ptr2 ) - ( unsigned long * ) ( ptr1 ) ! = 1 ) ; \
} )
2018-09-04 11:48:30 +01:00
# define arch_cmpxchg_double(ptr1, ptr2, o1, o2, n1, n2) \
( { \
int __ret ; \
__cmpxchg_double_check ( ptr1 , ptr2 ) ; \
__ret = ! __cmpxchg_double_mb ( ( unsigned long ) ( o1 ) , ( unsigned long ) ( o2 ) , \
( unsigned long ) ( n1 ) , ( unsigned long ) ( n2 ) , \
ptr1 ) ; \
__ret ; \
2014-10-24 13:22:20 +01:00
} )
2018-09-04 11:48:30 +01:00
# define arch_cmpxchg_double_local(ptr1, ptr2, o1, o2, n1, n2) \
( { \
int __ret ; \
__cmpxchg_double_check ( ptr1 , ptr2 ) ; \
__ret = ! __cmpxchg_double ( ( unsigned long ) ( o1 ) , ( unsigned long ) ( o2 ) , \
( unsigned long ) ( n1 ) , ( unsigned long ) ( n2 ) , \
ptr1 ) ; \
__ret ; \
2014-10-24 13:22:20 +01:00
} )
2018-09-13 13:30:45 +01:00
# define __CMPWAIT_CASE(w, sfx, sz) \
static inline void __cmpwait_case_ # # sz ( volatile void * ptr , \
unsigned long val ) \
2016-06-27 18:43:54 +01:00
{ \
unsigned long tmp ; \
\
asm volatile ( \
2018-04-30 13:56:32 +01:00
" sevl \n " \
" wfe \n " \
2018-09-13 13:30:45 +01:00
" ldxr " # sfx " \t % " # w " [tmp], %[v] \n " \
2016-06-27 18:43:54 +01:00
" eor % " # w " [tmp], % " # w " [tmp], % " # w " [val] \n " \
" cbnz % " # w " [tmp], 1f \n " \
" wfe \n " \
" 1: " \
arm64: atomics: lse: Dereference matching size
When building with -Warray-bounds, the following warning is generated:
In file included from ./arch/arm64/include/asm/lse.h:16,
from ./arch/arm64/include/asm/cmpxchg.h:14,
from ./arch/arm64/include/asm/atomic.h:16,
from ./include/linux/atomic.h:7,
from ./include/asm-generic/bitops/atomic.h:5,
from ./arch/arm64/include/asm/bitops.h:25,
from ./include/linux/bitops.h:33,
from ./include/linux/kernel.h:22,
from kernel/printk/printk.c:22:
./arch/arm64/include/asm/atomic_lse.h:247:9: warning: array subscript 'long unsigned int[0]' is partly outside array bounds of 'atomic_t[1]' [-Warray-bounds]
247 | asm volatile( \
| ^~~
./arch/arm64/include/asm/atomic_lse.h:266:1: note: in expansion of macro '__CMPXCHG_CASE'
266 | __CMPXCHG_CASE(w, , acq_, 32, a, "memory")
| ^~~~~~~~~~~~~~
kernel/printk/printk.c:3606:17: note: while referencing 'printk_cpulock_owner'
3606 | static atomic_t printk_cpulock_owner = ATOMIC_INIT(-1);
| ^~~~~~~~~~~~~~~~~~~~
This is due to the compiler seeing an unsigned long * cast against
something (atomic_t) that is int sized. Replace the cast with the
matching size cast. This results in no change in binary output.
Note that __ll_sc__cmpxchg_case_##name##sz already uses the same
constraint:
[v] "+Q" (*(u##sz *)ptr
Which is why only the LSE form needs updating and not the
LL/SC form, so this change is unlikely to be problematic.
Cc: Will Deacon <will@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Boqun Feng <boqun.feng@gmail.com>
Cc: linux-arm-kernel@lists.infradead.org
Acked-by: Ard Biesheuvel <ardb@kernel.org>
Acked-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Kees Cook <keescook@chromium.org>
Link: https://lore.kernel.org/r/20220112202259.3950286-1-keescook@chromium.org
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2022-01-12 12:22:59 -08:00
: [ tmp ] " =&r " ( tmp ) , [ v ] " +Q " ( * ( u # # sz * ) ptr ) \
2016-06-27 18:43:54 +01:00
: [ val ] " r " ( val ) ) ; \
}
2018-09-13 13:30:45 +01:00
__CMPWAIT_CASE ( w , b , 8 ) ;
__CMPWAIT_CASE ( w , h , 16 ) ;
__CMPWAIT_CASE ( w , , 32 ) ;
__CMPWAIT_CASE ( , , 64 ) ;
2016-06-27 18:43:54 +01:00
# undef __CMPWAIT_CASE
# define __CMPWAIT_GEN(sfx) \
2019-09-10 13:56:22 +02:00
static __always_inline void __cmpwait # # sfx ( volatile void * ptr , \
2016-06-27 18:43:54 +01:00
unsigned long val , \
int size ) \
{ \
switch ( size ) { \
case 1 : \
2018-09-13 13:30:45 +01:00
return __cmpwait_case # # sfx # # _8 ( ptr , ( u8 ) val ) ; \
2016-06-27 18:43:54 +01:00
case 2 : \
2018-09-13 13:30:45 +01:00
return __cmpwait_case # # sfx # # _16 ( ptr , ( u16 ) val ) ; \
2016-06-27 18:43:54 +01:00
case 4 : \
2018-09-13 13:30:45 +01:00
return __cmpwait_case # # sfx # # _32 ( ptr , val ) ; \
2016-06-27 18:43:54 +01:00
case 8 : \
2018-09-13 13:30:45 +01:00
return __cmpwait_case # # sfx # # _64 ( ptr , val ) ; \
2016-06-27 18:43:54 +01:00
default : \
BUILD_BUG ( ) ; \
} \
\
unreachable ( ) ; \
}
__CMPWAIT_GEN ( )
# undef __CMPWAIT_GEN
# define __cmpwait_relaxed(ptr, val) \
__cmpwait ( ( ptr ) , ( unsigned long ) ( val ) , sizeof ( * ( ptr ) ) )
2012-03-05 11:49:34 +00:00
# endif /* __ASM_CMPXCHG_H */