2009-05-14 02:56:35 +04:00
/*
2011-07-27 03:09:08 +04:00
* Generic C implementation of atomic counter operations . Usable on
* UP systems only . Do not include in machine independent code .
*
2009-05-14 02:56:35 +04:00
* Originally implemented for MN10300 .
*
* Copyright ( C ) 2007 Red Hat , Inc . All Rights Reserved .
* Written by David Howells ( dhowells @ redhat . com )
*
* This program is free software ; you can redistribute it and / or
* modify it under the terms of the GNU General Public Licence
* as published by the Free Software Foundation ; either version
* 2 of the Licence , or ( at your option ) any later version .
*/
# ifndef __ASM_GENERIC_ATOMIC_H
# define __ASM_GENERIC_ATOMIC_H
2012-03-28 21:30:03 +04:00
# include <asm/cmpxchg.h>
2009-05-14 02:56:35 +04:00
# ifdef CONFIG_SMP
2011-07-27 03:09:11 +04:00
/* Force people to define core atomics */
# if !defined(atomic_add_return) || !defined(atomic_sub_return) || \
! defined ( atomic_clear_mask ) | | ! defined ( atomic_set_mask )
# error "SMP requires a little arch-specific magic"
# endif
2009-05-14 02:56:35 +04:00
# endif
/*
* Atomic operations that C can ' t guarantee us . Useful for
* resource counting etc . .
*/
# define ATOMIC_INIT(i) { (i) }
# ifdef __KERNEL__
/**
* atomic_read - read atomic variable
* @ v : pointer of type atomic_t
*
2010-05-25 01:33:09 +04:00
* Atomically reads the value of @ v .
2009-05-14 02:56:35 +04:00
*/
2011-07-27 03:09:11 +04:00
# ifndef atomic_read
2010-05-17 08:33:53 +04:00
# define atomic_read(v) (*(volatile int *)&(v)->counter)
2011-07-27 03:09:11 +04:00
# endif
2009-05-14 02:56:35 +04:00
/**
* atomic_set - set atomic variable
* @ v : pointer of type atomic_t
* @ i : required value
*
2010-05-25 01:33:09 +04:00
* Atomically sets the value of @ v to @ i .
2009-05-14 02:56:35 +04:00
*/
# define atomic_set(v, i) (((v)->counter) = (i))
2010-10-07 17:08:55 +04:00
# include <linux/irqflags.h>
2009-05-14 02:56:35 +04:00
/**
* atomic_add_return - add integer to atomic variable
* @ i : integer value to add
* @ v : pointer of type atomic_t
*
* Atomically adds @ i to @ v and returns the result
*/
2011-07-27 03:09:11 +04:00
# ifndef atomic_add_return
2009-05-14 02:56:35 +04:00
static inline int atomic_add_return ( int i , atomic_t * v )
{
unsigned long flags ;
int temp ;
2010-10-07 17:08:55 +04:00
raw_local_irq_save ( flags ) ; /* Don't trace it in an irqsoff handler */
2009-05-14 02:56:35 +04:00
temp = v - > counter ;
temp + = i ;
v - > counter = temp ;
2010-08-10 04:18:24 +04:00
raw_local_irq_restore ( flags ) ;
2009-05-14 02:56:35 +04:00
return temp ;
}
2011-07-27 03:09:11 +04:00
# endif
2009-05-14 02:56:35 +04:00
/**
* atomic_sub_return - subtract integer from atomic variable
* @ i : integer value to subtract
* @ v : pointer of type atomic_t
*
* Atomically subtracts @ i from @ v and returns the result
*/
2011-07-27 03:09:11 +04:00
# ifndef atomic_sub_return
2009-05-14 02:56:35 +04:00
static inline int atomic_sub_return ( int i , atomic_t * v )
{
unsigned long flags ;
int temp ;
2010-10-07 17:08:55 +04:00
raw_local_irq_save ( flags ) ; /* Don't trace it in an irqsoff handler */
2009-05-14 02:56:35 +04:00
temp = v - > counter ;
temp - = i ;
v - > counter = temp ;
2010-08-10 04:18:24 +04:00
raw_local_irq_restore ( flags ) ;
2009-05-14 02:56:35 +04:00
return temp ;
}
2011-07-27 03:09:11 +04:00
# endif
2009-05-14 02:56:35 +04:00
static inline int atomic_add_negative ( int i , atomic_t * v )
{
return atomic_add_return ( i , v ) < 0 ;
}
static inline void atomic_add ( int i , atomic_t * v )
{
atomic_add_return ( i , v ) ;
}
static inline void atomic_sub ( int i , atomic_t * v )
{
atomic_sub_return ( i , v ) ;
}
static inline void atomic_inc ( atomic_t * v )
{
atomic_add_return ( 1 , v ) ;
}
static inline void atomic_dec ( atomic_t * v )
{
atomic_sub_return ( 1 , v ) ;
}
# define atomic_dec_return(v) atomic_sub_return(1, (v))
# define atomic_inc_return(v) atomic_add_return(1, (v))
# define atomic_sub_and_test(i, v) (atomic_sub_return((i), (v)) == 0)
2011-07-27 03:09:09 +04:00
# define atomic_dec_and_test(v) (atomic_dec_return(v) == 0)
# define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
2009-05-14 02:56:35 +04:00
2010-06-27 14:26:06 +04:00
# define atomic_xchg(ptr, v) (xchg(&(ptr)->counter, (v)))
# define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new)))
# define cmpxchg_local(ptr, o, n) \
( ( __typeof__ ( * ( ptr ) ) ) __cmpxchg_local_generic ( ( ptr ) , ( unsigned long ) ( o ) , \
( unsigned long ) ( n ) , sizeof ( * ( ptr ) ) ) )
# define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
2011-07-27 03:09:07 +04:00
static inline int __atomic_add_unless ( atomic_t * v , int a , int u )
2010-06-27 14:26:06 +04:00
{
int c , old ;
c = atomic_read ( v ) ;
while ( c ! = u & & ( old = atomic_cmpxchg ( v , c , c + a ) ) ! = c )
c = old ;
2011-07-27 03:09:07 +04:00
return c ;
2010-06-27 14:26:06 +04:00
}
2009-05-14 02:56:35 +04:00
2011-07-27 03:09:10 +04:00
/**
* atomic_clear_mask - Atomically clear bits in atomic variable
* @ mask : Mask of the bits to be cleared
* @ v : pointer of type atomic_t
*
* Atomically clears the bits set in @ mask from @ v
*/
2011-07-27 03:09:11 +04:00
# ifndef atomic_clear_mask
2011-07-27 03:09:10 +04:00
static inline void atomic_clear_mask ( unsigned long mask , atomic_t * v )
2009-05-14 02:56:35 +04:00
{
unsigned long flags ;
mask = ~ mask ;
2010-08-10 04:18:24 +04:00
raw_local_irq_save ( flags ) ; /* Don't trace it in a irqsoff handler */
2011-07-27 03:09:10 +04:00
v - > counter & = mask ;
2010-08-10 04:18:24 +04:00
raw_local_irq_restore ( flags ) ;
2009-05-14 02:56:35 +04:00
}
2011-07-27 03:09:11 +04:00
# endif
2009-05-14 02:56:35 +04:00
2011-07-27 03:09:10 +04:00
/**
* atomic_set_mask - Atomically set bits in atomic variable
* @ mask : Mask of the bits to be set
* @ v : pointer of type atomic_t
*
* Atomically sets the bits set in @ mask in @ v
*/
2011-07-27 03:09:11 +04:00
# ifndef atomic_set_mask
2011-07-27 03:09:10 +04:00
static inline void atomic_set_mask ( unsigned int mask , atomic_t * v )
{
unsigned long flags ;
raw_local_irq_save ( flags ) ; /* Don't trace it in a irqsoff handler */
v - > counter | = mask ;
raw_local_irq_restore ( flags ) ;
}
2011-07-27 03:09:11 +04:00
# endif
2011-07-27 03:09:10 +04:00
2009-05-14 02:56:35 +04:00
/* Assume that atomic operations are already serializing */
# define smp_mb__before_atomic_dec() barrier()
# define smp_mb__after_atomic_dec() barrier()
# define smp_mb__before_atomic_inc() barrier()
# define smp_mb__after_atomic_inc() barrier()
# endif /* __KERNEL__ */
# endif /* __ASM_GENERIC_ATOMIC_H */