2011-08-18 11:48:06 -07:00
# ifndef ASM_X86_CMPXCHG_H
# define ASM_X86_CMPXCHG_H
2011-08-29 14:47:58 -07:00
# include <linux/compiler.h>
2011-08-18 11:48:06 -07:00
# include <asm/alternative.h> /* Provides LOCK_PREFIX */
2014-07-11 12:43:38 +02:00
# define __HAVE_ARCH_CMPXCHG 1
2011-08-29 14:47:58 -07:00
/*
* Non - existant functions to indicate usage errors at link time
* ( or compile - time if the compiler implements __compiletime_error ( ) .
*/
extern void __xchg_wrong_size ( void )
__compiletime_error ( " Bad argument size for xchg " ) ;
extern void __cmpxchg_wrong_size ( void )
__compiletime_error ( " Bad argument size for cmpxchg " ) ;
extern void __xadd_wrong_size ( void )
__compiletime_error ( " Bad argument size for xadd " ) ;
2011-09-28 11:49:28 -07:00
extern void __add_wrong_size ( void )
__compiletime_error ( " Bad argument size for add " ) ;
2011-08-18 11:48:06 -07:00
/*
* Constants for operation sizes . On 32 - bit , the 64 - bit size it set to
* - 1 because sizeof will never return - 1 , thereby making those switch
* case statements guaranteeed dead code which the compiler will
* eliminate , and allowing the " missing symbol in the default case " to
* indicate a usage error .
*/
# define __X86_CASE_B 1
# define __X86_CASE_W 2
# define __X86_CASE_L 4
# ifdef CONFIG_64BIT
# define __X86_CASE_Q 8
# else
# define __X86_CASE_Q -1 /* sizeof will never return -1 */
# endif
2011-09-30 12:14:10 -07:00
/*
* An exchange - type operation , which takes a value and a pointer , and
2013-04-25 15:20:54 +08:00
* returns the old value .
2011-09-30 12:14:10 -07:00
*/
# define __xchg_op(ptr, arg, op, lock) \
( { \
__typeof__ ( * ( ptr ) ) __ret = ( arg ) ; \
switch ( sizeof ( * ( ptr ) ) ) { \
case __X86_CASE_B : \
asm volatile ( lock # op " b %b0, %1 \n " \
2012-04-02 16:15:33 -07:00
: " +q " ( __ret ) , " +m " ( * ( ptr ) ) \
2011-09-30 12:14:10 -07:00
: : " memory " , " cc " ) ; \
break ; \
case __X86_CASE_W : \
asm volatile ( lock # op " w %w0, %1 \n " \
: " +r " ( __ret ) , " +m " ( * ( ptr ) ) \
: : " memory " , " cc " ) ; \
break ; \
case __X86_CASE_L : \
asm volatile ( lock # op " l %0, %1 \n " \
: " +r " ( __ret ) , " +m " ( * ( ptr ) ) \
: : " memory " , " cc " ) ; \
break ; \
case __X86_CASE_Q : \
asm volatile ( lock # op " q %q0, %1 \n " \
: " +r " ( __ret ) , " +m " ( * ( ptr ) ) \
: : " memory " , " cc " ) ; \
break ; \
default : \
__ # # op # # _wrong_size ( ) ; \
} \
__ret ; \
} )
2011-08-18 11:48:06 -07:00
/*
* Note : no " lock " prefix even on SMP : xchg always implies lock anyway .
* Since this is generally used to protect other memory information , we
* use " asm volatile " and " memory " clobbers to prevent gcc from moving
* information around .
*/
2011-09-30 12:14:10 -07:00
# define xchg(ptr, v) __xchg_op((ptr), (v), xchg, "")
2011-08-18 11:48:06 -07:00
/*
* Atomic compare and exchange . Compare OLD with MEM , if identical ,
* store NEW in MEM . Return the initial value in MEM . Success is
* indicated by comparing RETURN with OLD .
*/
# define __raw_cmpxchg(ptr, old, new, size, lock) \
( { \
__typeof__ ( * ( ptr ) ) __ret ; \
__typeof__ ( * ( ptr ) ) __old = ( old ) ; \
__typeof__ ( * ( ptr ) ) __new = ( new ) ; \
switch ( size ) { \
case __X86_CASE_B : \
{ \
volatile u8 * __ptr = ( volatile u8 * ) ( ptr ) ; \
asm volatile ( lock " cmpxchgb %2,%1 " \
: " =a " ( __ret ) , " +m " ( * __ptr ) \
: " q " ( __new ) , " 0 " ( __old ) \
: " memory " ) ; \
break ; \
} \
case __X86_CASE_W : \
{ \
volatile u16 * __ptr = ( volatile u16 * ) ( ptr ) ; \
asm volatile ( lock " cmpxchgw %2,%1 " \
: " =a " ( __ret ) , " +m " ( * __ptr ) \
: " r " ( __new ) , " 0 " ( __old ) \
: " memory " ) ; \
break ; \
} \
case __X86_CASE_L : \
{ \
volatile u32 * __ptr = ( volatile u32 * ) ( ptr ) ; \
asm volatile ( lock " cmpxchgl %2,%1 " \
: " =a " ( __ret ) , " +m " ( * __ptr ) \
: " r " ( __new ) , " 0 " ( __old ) \
: " memory " ) ; \
break ; \
} \
case __X86_CASE_Q : \
{ \
volatile u64 * __ptr = ( volatile u64 * ) ( ptr ) ; \
asm volatile ( lock " cmpxchgq %2,%1 " \
: " =a " ( __ret ) , " +m " ( * __ptr ) \
: " r " ( __new ) , " 0 " ( __old ) \
: " memory " ) ; \
break ; \
} \
default : \
__cmpxchg_wrong_size ( ) ; \
} \
__ret ; \
} )
# define __cmpxchg(ptr, old, new, size) \
__raw_cmpxchg ( ( ptr ) , ( old ) , ( new ) , ( size ) , LOCK_PREFIX )
# define __sync_cmpxchg(ptr, old, new, size) \
__raw_cmpxchg ( ( ptr ) , ( old ) , ( new ) , ( size ) , " lock; " )
# define __cmpxchg_local(ptr, old, new, size) \
__raw_cmpxchg ( ( ptr ) , ( old ) , ( new ) , ( size ) , " " )
2007-10-11 11:20:03 +02:00
# ifdef CONFIG_X86_32
2012-10-02 18:01:25 +01:00
# include <asm / cmpxchg_32.h>
2007-10-11 11:20:03 +02:00
# else
2012-10-02 18:01:25 +01:00
# include <asm / cmpxchg_64.h>
2007-10-11 11:20:03 +02:00
# endif
2011-08-18 11:48:06 -07:00
# define cmpxchg(ptr, old, new) \
2012-01-26 15:47:37 +00:00
__cmpxchg ( ptr , old , new , sizeof ( * ( ptr ) ) )
2011-08-18 11:48:06 -07:00
# define sync_cmpxchg(ptr, old, new) \
2012-01-26 15:47:37 +00:00
__sync_cmpxchg ( ptr , old , new , sizeof ( * ( ptr ) ) )
2011-08-18 11:48:06 -07:00
# define cmpxchg_local(ptr, old, new) \
2012-01-26 15:47:37 +00:00
__cmpxchg_local ( ptr , old , new , sizeof ( * ( ptr ) ) )
2011-08-18 11:48:06 -07:00
2011-06-21 12:00:55 -07:00
/*
* xadd ( ) adds " inc " to " *ptr " and atomically returns the previous
* value of " *ptr " .
*
* xadd ( ) is locked when multiple CPUs are online
* xadd_sync ( ) is always locked
* xadd_local ( ) is never locked
*/
2011-09-30 12:14:10 -07:00
# define __xadd(ptr, inc, lock) __xchg_op((ptr), (inc), xadd, lock)
2011-06-21 12:00:55 -07:00
# define xadd(ptr, inc) __xadd((ptr), (inc), LOCK_PREFIX)
# define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ")
# define xadd_local(ptr, inc) __xadd((ptr), (inc), "")
2011-09-28 11:49:28 -07:00
# define __add(ptr, inc, lock) \
( { \
__typeof__ ( * ( ptr ) ) __ret = ( inc ) ; \
switch ( sizeof ( * ( ptr ) ) ) { \
case __X86_CASE_B : \
asm volatile ( lock " addb %b1, %0 \n " \
2012-04-06 09:30:57 -07:00
: " +m " ( * ( ptr ) ) : " qi " ( inc ) \
2011-09-28 11:49:28 -07:00
: " memory " , " cc " ) ; \
break ; \
case __X86_CASE_W : \
asm volatile ( lock " addw %w1, %0 \n " \
: " +m " ( * ( ptr ) ) : " ri " ( inc ) \
: " memory " , " cc " ) ; \
break ; \
case __X86_CASE_L : \
asm volatile ( lock " addl %1, %0 \n " \
: " +m " ( * ( ptr ) ) : " ri " ( inc ) \
: " memory " , " cc " ) ; \
break ; \
case __X86_CASE_Q : \
asm volatile ( lock " addq %1, %0 \n " \
: " +m " ( * ( ptr ) ) : " ri " ( inc ) \
: " memory " , " cc " ) ; \
break ; \
default : \
__add_wrong_size ( ) ; \
} \
__ret ; \
} )
/*
* add_ * ( ) adds " inc " to " *ptr "
*
* __add ( ) takes a lock prefix
* add_smp ( ) is locked when multiple CPUs are online
* add_sync ( ) is always locked
*/
# define add_smp(ptr, inc) __add((ptr), (inc), LOCK_PREFIX)
# define add_sync(ptr, inc) __add((ptr), (inc), "lock; ")
2012-01-02 17:02:18 +00:00
# define __cmpxchg_double(pfx, p1, p2, o1, o2, n1, n2) \
( { \
bool __ret ; \
__typeof__ ( * ( p1 ) ) __old1 = ( o1 ) , __new1 = ( n1 ) ; \
__typeof__ ( * ( p2 ) ) __old2 = ( o2 ) , __new2 = ( n2 ) ; \
BUILD_BUG_ON ( sizeof ( * ( p1 ) ) ! = sizeof ( long ) ) ; \
BUILD_BUG_ON ( sizeof ( * ( p2 ) ) ! = sizeof ( long ) ) ; \
VM_BUG_ON ( ( unsigned long ) ( p1 ) % ( 2 * sizeof ( long ) ) ) ; \
VM_BUG_ON ( ( unsigned long ) ( ( p1 ) + 1 ) ! = ( unsigned long ) ( p2 ) ) ; \
asm volatile ( pfx " cmpxchg%c4b %2; sete %0 " \
: " =a " ( __ret ) , " +d " ( __old2 ) , \
" +m " ( * ( p1 ) ) , " +m " ( * ( p2 ) ) \
: " i " ( 2 * sizeof ( long ) ) , " a " ( __old1 ) , \
" b " ( __new1 ) , " c " ( __new2 ) ) ; \
__ret ; \
} )
# define cmpxchg_double(p1, p2, o1, o2, n1, n2) \
__cmpxchg_double ( LOCK_PREFIX , p1 , p2 , o1 , o2 , n1 , n2 )
# define cmpxchg_double_local(p1, p2, o1, o2, n1, n2) \
__cmpxchg_double ( , p1 , p2 , o1 , o2 , n1 , n2 )
2011-08-18 11:48:06 -07:00
# endif /* ASM_X86_CMPXCHG_H */