2011-08-18 11:48:06 -07:00
# ifndef ASM_X86_CMPXCHG_H
# define ASM_X86_CMPXCHG_H
2011-08-29 14:47:58 -07:00
# include <linux/compiler.h>
2011-08-18 11:48:06 -07:00
# include <asm/alternative.h> /* Provides LOCK_PREFIX */
2011-08-29 14:47:58 -07:00
/*
* Non - existant functions to indicate usage errors at link time
* ( or compile - time if the compiler implements __compiletime_error ( ) .
*/
extern void __xchg_wrong_size ( void )
__compiletime_error ( " Bad argument size for xchg " ) ;
extern void __cmpxchg_wrong_size ( void )
__compiletime_error ( " Bad argument size for cmpxchg " ) ;
extern void __xadd_wrong_size ( void )
__compiletime_error ( " Bad argument size for xadd " ) ;
2011-08-18 11:48:06 -07:00
/*
* Constants for operation sizes . On 32 - bit , the 64 - bit size it set to
* - 1 because sizeof will never return - 1 , thereby making those switch
* case statements guaranteeed dead code which the compiler will
* eliminate , and allowing the " missing symbol in the default case " to
* indicate a usage error .
*/
# define __X86_CASE_B 1
# define __X86_CASE_W 2
# define __X86_CASE_L 4
# ifdef CONFIG_64BIT
# define __X86_CASE_Q 8
# else
# define __X86_CASE_Q -1 /* sizeof will never return -1 */
# endif
/*
* Note : no " lock " prefix even on SMP : xchg always implies lock anyway .
* Since this is generally used to protect other memory information , we
* use " asm volatile " and " memory " clobbers to prevent gcc from moving
* information around .
*/
# define __xchg(x, ptr, size) \
( { \
__typeof ( * ( ptr ) ) __x = ( x ) ; \
switch ( size ) { \
case __X86_CASE_B : \
{ \
volatile u8 * __ptr = ( volatile u8 * ) ( ptr ) ; \
asm volatile ( " xchgb %0,%1 " \
: " =q " ( __x ) , " +m " ( * __ptr ) \
: " 0 " ( __x ) \
: " memory " ) ; \
break ; \
} \
case __X86_CASE_W : \
{ \
volatile u16 * __ptr = ( volatile u16 * ) ( ptr ) ; \
asm volatile ( " xchgw %0,%1 " \
: " =r " ( __x ) , " +m " ( * __ptr ) \
: " 0 " ( __x ) \
: " memory " ) ; \
break ; \
} \
case __X86_CASE_L : \
{ \
volatile u32 * __ptr = ( volatile u32 * ) ( ptr ) ; \
asm volatile ( " xchgl %0,%1 " \
: " =r " ( __x ) , " +m " ( * __ptr ) \
: " 0 " ( __x ) \
: " memory " ) ; \
break ; \
} \
case __X86_CASE_Q : \
{ \
volatile u64 * __ptr = ( volatile u64 * ) ( ptr ) ; \
asm volatile ( " xchgq %0,%1 " \
: " =r " ( __x ) , " +m " ( * __ptr ) \
: " 0 " ( __x ) \
: " memory " ) ; \
break ; \
} \
default : \
__xchg_wrong_size ( ) ; \
} \
__x ; \
} )
# define xchg(ptr, v) \
__xchg ( ( v ) , ( ptr ) , sizeof ( * ptr ) )
/*
* Atomic compare and exchange . Compare OLD with MEM , if identical ,
* store NEW in MEM . Return the initial value in MEM . Success is
* indicated by comparing RETURN with OLD .
*/
# define __raw_cmpxchg(ptr, old, new, size, lock) \
( { \
__typeof__ ( * ( ptr ) ) __ret ; \
__typeof__ ( * ( ptr ) ) __old = ( old ) ; \
__typeof__ ( * ( ptr ) ) __new = ( new ) ; \
switch ( size ) { \
case __X86_CASE_B : \
{ \
volatile u8 * __ptr = ( volatile u8 * ) ( ptr ) ; \
asm volatile ( lock " cmpxchgb %2,%1 " \
: " =a " ( __ret ) , " +m " ( * __ptr ) \
: " q " ( __new ) , " 0 " ( __old ) \
: " memory " ) ; \
break ; \
} \
case __X86_CASE_W : \
{ \
volatile u16 * __ptr = ( volatile u16 * ) ( ptr ) ; \
asm volatile ( lock " cmpxchgw %2,%1 " \
: " =a " ( __ret ) , " +m " ( * __ptr ) \
: " r " ( __new ) , " 0 " ( __old ) \
: " memory " ) ; \
break ; \
} \
case __X86_CASE_L : \
{ \
volatile u32 * __ptr = ( volatile u32 * ) ( ptr ) ; \
asm volatile ( lock " cmpxchgl %2,%1 " \
: " =a " ( __ret ) , " +m " ( * __ptr ) \
: " r " ( __new ) , " 0 " ( __old ) \
: " memory " ) ; \
break ; \
} \
case __X86_CASE_Q : \
{ \
volatile u64 * __ptr = ( volatile u64 * ) ( ptr ) ; \
asm volatile ( lock " cmpxchgq %2,%1 " \
: " =a " ( __ret ) , " +m " ( * __ptr ) \
: " r " ( __new ) , " 0 " ( __old ) \
: " memory " ) ; \
break ; \
} \
default : \
__cmpxchg_wrong_size ( ) ; \
} \
__ret ; \
} )
# define __cmpxchg(ptr, old, new, size) \
__raw_cmpxchg ( ( ptr ) , ( old ) , ( new ) , ( size ) , LOCK_PREFIX )
# define __sync_cmpxchg(ptr, old, new, size) \
__raw_cmpxchg ( ( ptr ) , ( old ) , ( new ) , ( size ) , " lock; " )
# define __cmpxchg_local(ptr, old, new, size) \
__raw_cmpxchg ( ( ptr ) , ( old ) , ( new ) , ( size ) , " " )
2007-10-11 11:20:03 +02:00
# ifdef CONFIG_X86_32
# include "cmpxchg_32.h"
# else
# include "cmpxchg_64.h"
# endif
2011-08-18 11:48:06 -07:00
# ifdef __HAVE_ARCH_CMPXCHG
# define cmpxchg(ptr, old, new) \
__cmpxchg ( ( ptr ) , ( old ) , ( new ) , sizeof ( * ptr ) )
# define sync_cmpxchg(ptr, old, new) \
__sync_cmpxchg ( ( ptr ) , ( old ) , ( new ) , sizeof ( * ptr ) )
# define cmpxchg_local(ptr, old, new) \
__cmpxchg_local ( ( ptr ) , ( old ) , ( new ) , sizeof ( * ptr ) )
# endif
2011-06-21 12:00:55 -07:00
# define __xadd(ptr, inc, lock) \
( { \
__typeof__ ( * ( ptr ) ) __ret = ( inc ) ; \
switch ( sizeof ( * ( ptr ) ) ) { \
case __X86_CASE_B : \
asm volatile ( lock " xaddb %b0, %1 \n " \
: " +r " ( __ret ) , " +m " ( * ( ptr ) ) \
: : " memory " , " cc " ) ; \
break ; \
case __X86_CASE_W : \
asm volatile ( lock " xaddw %w0, %1 \n " \
: " +r " ( __ret ) , " +m " ( * ( ptr ) ) \
: : " memory " , " cc " ) ; \
break ; \
case __X86_CASE_L : \
asm volatile ( lock " xaddl %0, %1 \n " \
: " +r " ( __ret ) , " +m " ( * ( ptr ) ) \
: : " memory " , " cc " ) ; \
break ; \
case __X86_CASE_Q : \
asm volatile ( lock " xaddq %q0, %1 \n " \
: " +r " ( __ret ) , " +m " ( * ( ptr ) ) \
: : " memory " , " cc " ) ; \
break ; \
default : \
__xadd_wrong_size ( ) ; \
} \
__ret ; \
} )
/*
* xadd ( ) adds " inc " to " *ptr " and atomically returns the previous
* value of " *ptr " .
*
* xadd ( ) is locked when multiple CPUs are online
* xadd_sync ( ) is always locked
* xadd_local ( ) is never locked
*/
# define xadd(ptr, inc) __xadd((ptr), (inc), LOCK_PREFIX)
# define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ")
# define xadd_local(ptr, inc) __xadd((ptr), (inc), "")
2011-08-18 11:48:06 -07:00
# endif /* ASM_X86_CMPXCHG_H */