2012-03-05 11:49:34 +00:00
/*
* Based on arch / arm / include / asm / cmpxchg . h
*
* Copyright ( C ) 2012 ARM Ltd .
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation .
*
* This program is distributed in the hope that it will be useful ,
* but WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the
* GNU General Public License for more details .
*
* You should have received a copy of the GNU General Public License
* along with this program . If not , see < http : //www.gnu.org/licenses/>.
*/
# ifndef __ASM_CMPXCHG_H
# define __ASM_CMPXCHG_H
2018-02-19 11:39:23 +00:00
# include <linux/build_bug.h>
2018-02-27 10:50:20 +00:00
# include <linux/compiler.h>
2012-03-05 11:49:34 +00:00
2015-04-23 20:08:49 +01:00
# include <asm/atomic.h>
2012-03-05 11:49:34 +00:00
# include <asm/barrier.h>
2015-03-31 14:11:24 +01:00
# include <asm/lse.h>
2012-03-05 11:49:34 +00:00
2015-10-08 20:15:18 +01:00
/*
* We need separate acquire parameters for ll / sc and lse , since the full
* barrier case is generated as release + dmb for the former and
* acquire + release for the latter .
*/
2018-09-13 13:30:45 +01:00
# define __XCHG_CASE(w, sfx, name, sz, mb, nop_lse, acq, acq_lse, rel, cl) \
static inline u # # sz __xchg_case_ # # name # # sz ( u # # sz x , volatile void * ptr ) \
{ \
u # # sz ret ; \
unsigned long tmp ; \
\
asm volatile ( ARM64_LSE_ATOMIC_INSN ( \
/* LL/SC */ \
" prfm pstl1strm, %2 \n " \
" 1: ld " # acq " xr " # sfx " \t % " # w " 0, %2 \n " \
" st " # rel " xr " # sfx " \t %w1, % " # w " 3, %2 \n " \
" cbnz %w1, 1b \n " \
" " # mb , \
/* LSE atomics */ \
" swp " # acq_lse # rel # sfx " \t % " # w " 3, % " # w " 0, %2 \n " \
__nops ( 3 ) \
" " # nop_lse ) \
: " =&r " ( ret ) , " =&r " ( tmp ) , " +Q " ( * ( u # # sz * ) ptr ) \
: " r " ( x ) \
: cl ) ; \
\
return ret ; \
2012-03-05 11:49:34 +00:00
}
2018-09-13 13:30:45 +01:00
__XCHG_CASE ( w , b , , 8 , , , , , , )
__XCHG_CASE ( w , h , , 16 , , , , , , )
__XCHG_CASE ( w , , , 32 , , , , , , )
__XCHG_CASE ( , , , 64 , , , , , , )
__XCHG_CASE ( w , b , acq_ , 8 , , , a , a , , " memory " )
__XCHG_CASE ( w , h , acq_ , 16 , , , a , a , , " memory " )
__XCHG_CASE ( w , , acq_ , 32 , , , a , a , , " memory " )
__XCHG_CASE ( , , acq_ , 64 , , , a , a , , " memory " )
__XCHG_CASE ( w , b , rel_ , 8 , , , , , l , " memory " )
__XCHG_CASE ( w , h , rel_ , 16 , , , , , l , " memory " )
__XCHG_CASE ( w , , rel_ , 32 , , , , , l , " memory " )
__XCHG_CASE ( , , rel_ , 64 , , , , , l , " memory " )
__XCHG_CASE ( w , b , mb_ , 8 , dmb ish , nop , , a , l , " memory " )
__XCHG_CASE ( w , h , mb_ , 16 , dmb ish , nop , , a , l , " memory " )
__XCHG_CASE ( w , , mb_ , 32 , dmb ish , nop , , a , l , " memory " )
__XCHG_CASE ( , , mb_ , 64 , dmb ish , nop , , a , l , " memory " )
2015-10-08 20:15:18 +01:00
# undef __XCHG_CASE
# define __XCHG_GEN(sfx) \
static inline unsigned long __xchg # # sfx ( unsigned long x , \
volatile void * ptr , \
int size ) \
{ \
switch ( size ) { \
case 1 : \
2018-09-13 13:30:45 +01:00
return __xchg_case # # sfx # # _8 ( x , ptr ) ; \
2015-10-08 20:15:18 +01:00
case 2 : \
2018-09-13 13:30:45 +01:00
return __xchg_case # # sfx # # _16 ( x , ptr ) ; \
2015-10-08 20:15:18 +01:00
case 4 : \
2018-09-13 13:30:45 +01:00
return __xchg_case # # sfx # # _32 ( x , ptr ) ; \
2015-10-08 20:15:18 +01:00
case 8 : \
2018-09-13 13:30:45 +01:00
return __xchg_case # # sfx # # _64 ( x , ptr ) ; \
2015-10-08 20:15:18 +01:00
default : \
BUILD_BUG ( ) ; \
} \
\
unreachable ( ) ; \
}
__XCHG_GEN ( )
__XCHG_GEN ( _acq )
__XCHG_GEN ( _rel )
__XCHG_GEN ( _mb )
# undef __XCHG_GEN
# define __xchg_wrapper(sfx, ptr, x) \
( { \
__typeof__ ( * ( ptr ) ) __ret ; \
__ret = ( __typeof__ ( * ( ptr ) ) ) \
__xchg # # sfx ( ( unsigned long ) ( x ) , ( ptr ) , sizeof ( * ( ptr ) ) ) ; \
__ret ; \
arm64: xchg: prevent warning if return value is unused
Some users of xchg() don't bother using the return value, which results
in a compiler warning like the following (from kgdb):
In file included from linux/arch/arm64/include/asm/atomic.h:27:0,
from include/linux/atomic.h:4,
from include/linux/spinlock.h:402,
from include/linux/seqlock.h:35,
from include/linux/time.h:5,
from include/uapi/linux/timex.h:56,
from include/linux/timex.h:56,
from include/linux/sched.h:19,
from include/linux/pid_namespace.h:4,
from kernel/debug/debug_core.c:30:
kernel/debug/debug_core.c: In function ‘kgdb_cpu_enter’:
linux/arch/arm64/include/asm/cmpxchg.h:75:3: warning: value computed is not used [-Wunused-value]
((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
^
linux/arch/arm64/include/asm/atomic.h:132:30: note: in expansion of macro ‘xchg’
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
kernel/debug/debug_core.c:504:4: note: in expansion of macro ‘atomic_xchg’
atomic_xchg(&kgdb_active, cpu);
^
This patch makes use of the same trick as we do for cmpxchg, by assigning
the return value to a dummy variable in the xchg() macro itself.
Signed-off-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
2014-04-30 16:23:06 +01:00
} )
2012-03-05 11:49:34 +00:00
2015-10-08 20:15:18 +01:00
/* xchg */
# define xchg_relaxed(...) __xchg_wrapper( , __VA_ARGS__)
# define xchg_acquire(...) __xchg_wrapper(_acq, __VA_ARGS__)
# define xchg_release(...) __xchg_wrapper(_rel, __VA_ARGS__)
# define xchg(...) __xchg_wrapper( _mb, __VA_ARGS__)
# define __CMPXCHG_GEN(sfx) \
static inline unsigned long __cmpxchg # # sfx ( volatile void * ptr , \
unsigned long old , \
unsigned long new , \
int size ) \
{ \
switch ( size ) { \
case 1 : \
2018-09-13 13:30:45 +01:00
return __cmpxchg_case # # sfx # # _8 ( ptr , ( u8 ) old , new ) ; \
2015-10-08 20:15:18 +01:00
case 2 : \
2018-09-13 13:30:45 +01:00
return __cmpxchg_case # # sfx # # _16 ( ptr , ( u16 ) old , new ) ; \
2015-10-08 20:15:18 +01:00
case 4 : \
2018-09-13 13:30:45 +01:00
return __cmpxchg_case # # sfx # # _32 ( ptr , old , new ) ; \
2015-10-08 20:15:18 +01:00
case 8 : \
2018-09-13 13:30:45 +01:00
return __cmpxchg_case # # sfx # # _64 ( ptr , old , new ) ; \
2015-10-08 20:15:18 +01:00
default : \
BUILD_BUG ( ) ; \
} \
\
unreachable ( ) ; \
2012-03-05 11:49:34 +00:00
}
2015-10-08 20:15:18 +01:00
__CMPXCHG_GEN ( )
__CMPXCHG_GEN ( _acq )
__CMPXCHG_GEN ( _rel )
__CMPXCHG_GEN ( _mb )
2012-03-05 11:49:34 +00:00
2015-10-08 20:15:18 +01:00
# undef __CMPXCHG_GEN
2013-12-03 19:19:12 +00:00
2015-10-08 20:15:18 +01:00
# define __cmpxchg_wrapper(sfx, ptr, o, n) \
( { \
__typeof__ ( * ( ptr ) ) __ret ; \
__ret = ( __typeof__ ( * ( ptr ) ) ) \
__cmpxchg # # sfx ( ( ptr ) , ( unsigned long ) ( o ) , \
( unsigned long ) ( n ) , sizeof ( * ( ptr ) ) ) ; \
__ret ; \
2013-12-03 19:19:12 +00:00
} )
2012-03-05 11:49:34 +00:00
2015-10-08 20:15:18 +01:00
/* cmpxchg */
# define cmpxchg_relaxed(...) __cmpxchg_wrapper( , __VA_ARGS__)
# define cmpxchg_acquire(...) __cmpxchg_wrapper(_acq, __VA_ARGS__)
# define cmpxchg_release(...) __cmpxchg_wrapper(_rel, __VA_ARGS__)
# define cmpxchg(...) __cmpxchg_wrapper( _mb, __VA_ARGS__)
# define cmpxchg_local cmpxchg_relaxed
/* cmpxchg64 */
# define cmpxchg64_relaxed cmpxchg_relaxed
# define cmpxchg64_acquire cmpxchg_acquire
# define cmpxchg64_release cmpxchg_release
# define cmpxchg64 cmpxchg
# define cmpxchg64_local cmpxchg_local
/* cmpxchg_double */
2015-05-14 18:05:50 +01:00
# define system_has_cmpxchg_double() 1
# define __cmpxchg_double_check(ptr1, ptr2) \
( { \
if ( sizeof ( * ( ptr1 ) ) ! = 8 ) \
BUILD_BUG ( ) ; \
VM_BUG_ON ( ( unsigned long * ) ( ptr2 ) - ( unsigned long * ) ( ptr1 ) ! = 1 ) ; \
} )
2014-10-24 13:22:20 +01:00
# define cmpxchg_double(ptr1, ptr2, o1, o2, n1, n2) \
( { \
int __ret ; \
2015-05-14 18:05:50 +01:00
__cmpxchg_double_check ( ptr1 , ptr2 ) ; \
__ret = ! __cmpxchg_double_mb ( ( unsigned long ) ( o1 ) , ( unsigned long ) ( o2 ) , \
( unsigned long ) ( n1 ) , ( unsigned long ) ( n2 ) , \
ptr1 ) ; \
2014-10-24 13:22:20 +01:00
__ret ; \
} )
# define cmpxchg_double_local(ptr1, ptr2, o1, o2, n1, n2) \
( { \
int __ret ; \
2015-05-14 18:05:50 +01:00
__cmpxchg_double_check ( ptr1 , ptr2 ) ; \
__ret = ! __cmpxchg_double ( ( unsigned long ) ( o1 ) , ( unsigned long ) ( o2 ) , \
( unsigned long ) ( n1 ) , ( unsigned long ) ( n2 ) , \
ptr1 ) ; \
2014-10-24 13:22:20 +01:00
__ret ; \
} )
2018-09-13 13:30:45 +01:00
# define __CMPWAIT_CASE(w, sfx, sz) \
static inline void __cmpwait_case_ # # sz ( volatile void * ptr , \
unsigned long val ) \
2016-06-27 18:43:54 +01:00
{ \
unsigned long tmp ; \
\
asm volatile ( \
2018-04-30 13:56:32 +01:00
" sevl \n " \
" wfe \n " \
2018-09-13 13:30:45 +01:00
" ldxr " # sfx " \t % " # w " [tmp], %[v] \n " \
2016-06-27 18:43:54 +01:00
" eor % " # w " [tmp], % " # w " [tmp], % " # w " [val] \n " \
" cbnz % " # w " [tmp], 1f \n " \
" wfe \n " \
" 1: " \
: [ tmp ] " =&r " ( tmp ) , [ v ] " +Q " ( * ( unsigned long * ) ptr ) \
: [ val ] " r " ( val ) ) ; \
}
2018-09-13 13:30:45 +01:00
__CMPWAIT_CASE ( w , b , 8 ) ;
__CMPWAIT_CASE ( w , h , 16 ) ;
__CMPWAIT_CASE ( w , , 32 ) ;
__CMPWAIT_CASE ( , , 64 ) ;
2016-06-27 18:43:54 +01:00
# undef __CMPWAIT_CASE
# define __CMPWAIT_GEN(sfx) \
static inline void __cmpwait # # sfx ( volatile void * ptr , \
unsigned long val , \
int size ) \
{ \
switch ( size ) { \
case 1 : \
2018-09-13 13:30:45 +01:00
return __cmpwait_case # # sfx # # _8 ( ptr , ( u8 ) val ) ; \
2016-06-27 18:43:54 +01:00
case 2 : \
2018-09-13 13:30:45 +01:00
return __cmpwait_case # # sfx # # _16 ( ptr , ( u16 ) val ) ; \
2016-06-27 18:43:54 +01:00
case 4 : \
2018-09-13 13:30:45 +01:00
return __cmpwait_case # # sfx # # _32 ( ptr , val ) ; \
2016-06-27 18:43:54 +01:00
case 8 : \
2018-09-13 13:30:45 +01:00
return __cmpwait_case # # sfx # # _64 ( ptr , val ) ; \
2016-06-27 18:43:54 +01:00
default : \
BUILD_BUG ( ) ; \
} \
\
unreachable ( ) ; \
}
__CMPWAIT_GEN ( )
# undef __CMPWAIT_GEN
# define __cmpwait_relaxed(ptr, val) \
__cmpwait ( ( ptr ) , ( unsigned long ) ( val ) , sizeof ( * ( ptr ) ) )
2012-03-05 11:49:34 +00:00
# endif /* __ASM_CMPXCHG_H */