locking/atomic, arch/x86: Implement atomic{,64}_fetch_{add,sub,and,or,xor}()
Implement FETCH-OP atomic primitives, these are very similar to the existing OP-RETURN primitives we already have, except they return the value of the atomic variable _before_ modification. This is especially useful for irreversible operations -- such as bitops (because it becomes impossible to reconstruct the state prior to modification). Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: linux-arch@vger.kernel.org Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
1af5de9af1
commit
a8bcccaba1
@ -171,6 +171,16 @@ static __always_inline int atomic_sub_return(int i, atomic_t *v)
|
||||
#define atomic_inc_return(v) (atomic_add_return(1, v))
|
||||
#define atomic_dec_return(v) (atomic_sub_return(1, v))
|
||||
|
||||
static __always_inline int atomic_fetch_add(int i, atomic_t *v)
|
||||
{
|
||||
return xadd(&v->counter, i);
|
||||
}
|
||||
|
||||
static __always_inline int atomic_fetch_sub(int i, atomic_t *v)
|
||||
{
|
||||
return xadd(&v->counter, -i);
|
||||
}
|
||||
|
||||
static __always_inline int atomic_cmpxchg(atomic_t *v, int old, int new)
|
||||
{
|
||||
return cmpxchg(&v->counter, old, new);
|
||||
@ -190,10 +200,31 @@ static inline void atomic_##op(int i, atomic_t *v) \
|
||||
: "memory"); \
|
||||
}
|
||||
|
||||
ATOMIC_OP(and)
|
||||
ATOMIC_OP(or)
|
||||
ATOMIC_OP(xor)
|
||||
#define ATOMIC_FETCH_OP(op, c_op) \
|
||||
static inline int atomic_fetch_##op(int i, atomic_t *v) \
|
||||
{ \
|
||||
int old, val = atomic_read(v); \
|
||||
for (;;) { \
|
||||
old = atomic_cmpxchg(v, val, val c_op i); \
|
||||
if (old == val) \
|
||||
break; \
|
||||
val = old; \
|
||||
} \
|
||||
return old; \
|
||||
}
|
||||
|
||||
#define ATOMIC_OPS(op, c_op) \
|
||||
ATOMIC_OP(op) \
|
||||
ATOMIC_FETCH_OP(op, c_op)
|
||||
|
||||
#define atomic_fetch_or atomic_fetch_or
|
||||
|
||||
ATOMIC_OPS(and, &)
|
||||
ATOMIC_OPS(or , |)
|
||||
ATOMIC_OPS(xor, ^)
|
||||
|
||||
#undef ATOMIC_OPS
|
||||
#undef ATOMIC_FETCH_OP
|
||||
#undef ATOMIC_OP
|
||||
|
||||
/**
|
||||
|
@ -320,10 +320,29 @@ static inline void atomic64_##op(long long i, atomic64_t *v) \
|
||||
c = old; \
|
||||
}
|
||||
|
||||
ATOMIC64_OP(and, &)
|
||||
ATOMIC64_OP(or, |)
|
||||
ATOMIC64_OP(xor, ^)
|
||||
#define ATOMIC64_FETCH_OP(op, c_op) \
|
||||
static inline long long atomic64_fetch_##op(long long i, atomic64_t *v) \
|
||||
{ \
|
||||
long long old, c = 0; \
|
||||
while ((old = atomic64_cmpxchg(v, c, c c_op i)) != c) \
|
||||
c = old; \
|
||||
return old; \
|
||||
}
|
||||
|
||||
ATOMIC64_FETCH_OP(add, +)
|
||||
|
||||
#define atomic64_fetch_sub(i, v) atomic64_fetch_add(-(i), (v))
|
||||
|
||||
#define ATOMIC64_OPS(op, c_op) \
|
||||
ATOMIC64_OP(op, c_op) \
|
||||
ATOMIC64_FETCH_OP(op, c_op)
|
||||
|
||||
ATOMIC64_OPS(and, &)
|
||||
ATOMIC64_OPS(or, |)
|
||||
ATOMIC64_OPS(xor, ^)
|
||||
|
||||
#undef ATOMIC64_OPS
|
||||
#undef ATOMIC64_FETCH_OP
|
||||
#undef ATOMIC64_OP
|
||||
|
||||
#endif /* _ASM_X86_ATOMIC64_32_H */
|
||||
|
@ -158,6 +158,16 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
|
||||
return atomic64_add_return(-i, v);
|
||||
}
|
||||
|
||||
static inline long atomic64_fetch_add(long i, atomic64_t *v)
|
||||
{
|
||||
return xadd(&v->counter, i);
|
||||
}
|
||||
|
||||
static inline long atomic64_fetch_sub(long i, atomic64_t *v)
|
||||
{
|
||||
return xadd(&v->counter, -i);
|
||||
}
|
||||
|
||||
#define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
|
||||
#define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
|
||||
|
||||
@ -229,10 +239,29 @@ static inline void atomic64_##op(long i, atomic64_t *v) \
|
||||
: "memory"); \
|
||||
}
|
||||
|
||||
ATOMIC64_OP(and)
|
||||
ATOMIC64_OP(or)
|
||||
ATOMIC64_OP(xor)
|
||||
#define ATOMIC64_FETCH_OP(op, c_op) \
|
||||
static inline long atomic64_fetch_##op(long i, atomic64_t *v) \
|
||||
{ \
|
||||
long old, val = atomic64_read(v); \
|
||||
for (;;) { \
|
||||
old = atomic64_cmpxchg(v, val, val c_op i); \
|
||||
if (old == val) \
|
||||
break; \
|
||||
val = old; \
|
||||
} \
|
||||
return old; \
|
||||
}
|
||||
|
||||
#define ATOMIC64_OPS(op, c_op) \
|
||||
ATOMIC64_OP(op) \
|
||||
ATOMIC64_FETCH_OP(op, c_op)
|
||||
|
||||
ATOMIC64_OPS(and, &)
|
||||
ATOMIC64_OPS(or, |)
|
||||
ATOMIC64_OPS(xor, ^)
|
||||
|
||||
#undef ATOMIC64_OPS
|
||||
#undef ATOMIC64_FETCH_OP
|
||||
#undef ATOMIC64_OP
|
||||
|
||||
#endif /* _ASM_X86_ATOMIC64_64_H */
|
||||
|
Loading…
x
Reference in New Issue
Block a user