Merge tag 'locking-core-2021-06-28' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull locking updates from Ingo Molnar:
- Core locking & atomics:
- Convert all architectures to ARCH_ATOMIC: move every architecture
to ARCH_ATOMIC, then get rid of ARCH_ATOMIC and all the
transitory facilities and #ifdefs.
Much reduction in complexity from that series:
63 files changed, 756 insertions(+), 4094 deletions(-)
- Self-test enhancements
- Futexes:
- Add the new FUTEX_LOCK_PI2 ABI, which is a variant that doesn't
set FLAGS_CLOCKRT (.e. uses CLOCK_MONOTONIC).
[ The temptation to repurpose FUTEX_LOCK_PI's implicit setting of
FLAGS_CLOCKRT & invert the flag's meaning to avoid having to
introduce a new variant was resisted successfully. ]
- Enhance futex self-tests
- Lockdep:
- Fix dependency path printouts
- Optimize trace saving
- Broaden & fix wait-context checks
- Misc cleanups and fixes.
* tag 'locking-core-2021-06-28' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (52 commits)
locking/lockdep: Correct the description error for check_redundant()
futex: Provide FUTEX_LOCK_PI2 to support clock selection
futex: Prepare futex_lock_pi() for runtime clock selection
lockdep/selftest: Remove wait-type RCU_CALLBACK tests
lockdep/selftests: Fix selftests vs PROVE_RAW_LOCK_NESTING
lockdep: Fix wait-type for empty stack
locking/selftests: Add a selftest for check_irq_usage()
lockding/lockdep: Avoid to find wrong lock dep path in check_irq_usage()
locking/lockdep: Remove the unnecessary trace saving
locking/lockdep: Fix the dep path printing for backwards BFS
selftests: futex: Add futex compare requeue test
selftests: futex: Add futex wait test
seqlock: Remove trailing semicolon in macros
locking/lockdep: Reduce LOCKDEP dependency list
locking/lockdep,doc: Improve readability of the block matrix
locking/atomics: atomic-instrumented: simplify ifdeffery
locking/atomic: delete !ARCH_ATOMIC remnants
locking/atomic: xtensa: move to ARCH_ATOMIC
locking/atomic: sparc: move to ARCH_ATOMIC
locking/atomic: sh: move to ARCH_ATOMIC
...
This commit is contained in:
@@ -18,30 +18,30 @@
|
||||
#include <asm/barrier.h>
|
||||
#include <asm-generic/atomic64.h>
|
||||
|
||||
int atomic_add_return(int, atomic_t *);
|
||||
int atomic_fetch_add(int, atomic_t *);
|
||||
int atomic_fetch_and(int, atomic_t *);
|
||||
int atomic_fetch_or(int, atomic_t *);
|
||||
int atomic_fetch_xor(int, atomic_t *);
|
||||
int atomic_cmpxchg(atomic_t *, int, int);
|
||||
int atomic_xchg(atomic_t *, int);
|
||||
int atomic_fetch_add_unless(atomic_t *, int, int);
|
||||
void atomic_set(atomic_t *, int);
|
||||
int arch_atomic_add_return(int, atomic_t *);
|
||||
int arch_atomic_fetch_add(int, atomic_t *);
|
||||
int arch_atomic_fetch_and(int, atomic_t *);
|
||||
int arch_atomic_fetch_or(int, atomic_t *);
|
||||
int arch_atomic_fetch_xor(int, atomic_t *);
|
||||
int arch_atomic_cmpxchg(atomic_t *, int, int);
|
||||
int arch_atomic_xchg(atomic_t *, int);
|
||||
int arch_atomic_fetch_add_unless(atomic_t *, int, int);
|
||||
void arch_atomic_set(atomic_t *, int);
|
||||
|
||||
#define atomic_fetch_add_unless atomic_fetch_add_unless
|
||||
#define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
|
||||
|
||||
#define atomic_set_release(v, i) atomic_set((v), (i))
|
||||
#define arch_atomic_set_release(v, i) arch_atomic_set((v), (i))
|
||||
|
||||
#define atomic_read(v) READ_ONCE((v)->counter)
|
||||
#define arch_atomic_read(v) READ_ONCE((v)->counter)
|
||||
|
||||
#define atomic_add(i, v) ((void)atomic_add_return( (int)(i), (v)))
|
||||
#define atomic_sub(i, v) ((void)atomic_add_return(-(int)(i), (v)))
|
||||
#define arch_atomic_add(i, v) ((void)arch_atomic_add_return( (int)(i), (v)))
|
||||
#define arch_atomic_sub(i, v) ((void)arch_atomic_add_return(-(int)(i), (v)))
|
||||
|
||||
#define atomic_and(i, v) ((void)atomic_fetch_and((i), (v)))
|
||||
#define atomic_or(i, v) ((void)atomic_fetch_or((i), (v)))
|
||||
#define atomic_xor(i, v) ((void)atomic_fetch_xor((i), (v)))
|
||||
#define arch_atomic_and(i, v) ((void)arch_atomic_fetch_and((i), (v)))
|
||||
#define arch_atomic_or(i, v) ((void)arch_atomic_fetch_or((i), (v)))
|
||||
#define arch_atomic_xor(i, v) ((void)arch_atomic_fetch_xor((i), (v)))
|
||||
|
||||
#define atomic_sub_return(i, v) (atomic_add_return(-(int)(i), (v)))
|
||||
#define atomic_fetch_sub(i, v) (atomic_fetch_add (-(int)(i), (v)))
|
||||
#define arch_atomic_sub_return(i, v) (arch_atomic_add_return(-(int)(i), (v)))
|
||||
#define arch_atomic_fetch_sub(i, v) (arch_atomic_fetch_add (-(int)(i), (v)))
|
||||
|
||||
#endif /* !(__ARCH_SPARC_ATOMIC__) */
|
||||
|
||||
@@ -14,23 +14,23 @@
|
||||
|
||||
#define ATOMIC64_INIT(i) { (i) }
|
||||
|
||||
#define atomic_read(v) READ_ONCE((v)->counter)
|
||||
#define atomic64_read(v) READ_ONCE((v)->counter)
|
||||
#define arch_atomic_read(v) READ_ONCE((v)->counter)
|
||||
#define arch_atomic64_read(v) READ_ONCE((v)->counter)
|
||||
|
||||
#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
|
||||
#define atomic64_set(v, i) WRITE_ONCE(((v)->counter), (i))
|
||||
#define arch_atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
|
||||
#define arch_atomic64_set(v, i) WRITE_ONCE(((v)->counter), (i))
|
||||
|
||||
#define ATOMIC_OP(op) \
|
||||
void atomic_##op(int, atomic_t *); \
|
||||
void atomic64_##op(s64, atomic64_t *);
|
||||
void arch_atomic_##op(int, atomic_t *); \
|
||||
void arch_atomic64_##op(s64, atomic64_t *);
|
||||
|
||||
#define ATOMIC_OP_RETURN(op) \
|
||||
int atomic_##op##_return(int, atomic_t *); \
|
||||
s64 atomic64_##op##_return(s64, atomic64_t *);
|
||||
int arch_atomic_##op##_return(int, atomic_t *); \
|
||||
s64 arch_atomic64_##op##_return(s64, atomic64_t *);
|
||||
|
||||
#define ATOMIC_FETCH_OP(op) \
|
||||
int atomic_fetch_##op(int, atomic_t *); \
|
||||
s64 atomic64_fetch_##op(s64, atomic64_t *);
|
||||
int arch_atomic_fetch_##op(int, atomic_t *); \
|
||||
s64 arch_atomic64_fetch_##op(s64, atomic64_t *);
|
||||
|
||||
#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) ATOMIC_FETCH_OP(op)
|
||||
|
||||
@@ -49,18 +49,18 @@ ATOMIC_OPS(xor)
|
||||
#undef ATOMIC_OP_RETURN
|
||||
#undef ATOMIC_OP
|
||||
|
||||
#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
|
||||
#define arch_atomic_cmpxchg(v, o, n) (arch_cmpxchg(&((v)->counter), (o), (n)))
|
||||
|
||||
static inline int atomic_xchg(atomic_t *v, int new)
|
||||
static inline int arch_atomic_xchg(atomic_t *v, int new)
|
||||
{
|
||||
return xchg(&v->counter, new);
|
||||
return arch_xchg(&v->counter, new);
|
||||
}
|
||||
|
||||
#define atomic64_cmpxchg(v, o, n) \
|
||||
((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
|
||||
#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
|
||||
#define arch_atomic64_cmpxchg(v, o, n) \
|
||||
((__typeof__((v)->counter))arch_cmpxchg(&((v)->counter), (o), (n)))
|
||||
#define arch_atomic64_xchg(v, new) (arch_xchg(&((v)->counter), new))
|
||||
|
||||
s64 atomic64_dec_if_positive(atomic64_t *v);
|
||||
#define atomic64_dec_if_positive atomic64_dec_if_positive
|
||||
s64 arch_atomic64_dec_if_positive(atomic64_t *v);
|
||||
#define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive
|
||||
|
||||
#endif /* !(__ARCH_SPARC64_ATOMIC__) */
|
||||
|
||||
@@ -25,7 +25,7 @@ static inline unsigned long __xchg(unsigned long x, __volatile__ void * ptr, int
|
||||
return x;
|
||||
}
|
||||
|
||||
#define xchg(ptr,x) ({(__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)));})
|
||||
#define arch_xchg(ptr,x) ({(__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)));})
|
||||
|
||||
/* Emulate cmpxchg() the same way we emulate atomics,
|
||||
* by hashing the object address and indexing into an array
|
||||
@@ -55,7 +55,7 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size)
|
||||
return old;
|
||||
}
|
||||
|
||||
#define cmpxchg(ptr, o, n) \
|
||||
#define arch_cmpxchg(ptr, o, n) \
|
||||
({ \
|
||||
__typeof__(*(ptr)) _o_ = (o); \
|
||||
__typeof__(*(ptr)) _n_ = (n); \
|
||||
@@ -64,7 +64,7 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size)
|
||||
})
|
||||
|
||||
u64 __cmpxchg_u64(u64 *ptr, u64 old, u64 new);
|
||||
#define cmpxchg64(ptr, old, new) __cmpxchg_u64(ptr, old, new)
|
||||
#define arch_cmpxchg64(ptr, old, new) __cmpxchg_u64(ptr, old, new)
|
||||
|
||||
#include <asm-generic/cmpxchg-local.h>
|
||||
|
||||
@@ -72,9 +72,9 @@ u64 __cmpxchg_u64(u64 *ptr, u64 old, u64 new);
|
||||
* cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
|
||||
* them available.
|
||||
*/
|
||||
#define cmpxchg_local(ptr, o, n) \
|
||||
((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\
|
||||
#define arch_cmpxchg_local(ptr, o, n) \
|
||||
((__typeof__(*(ptr)))__generic_cmpxchg_local((ptr), (unsigned long)(o),\
|
||||
(unsigned long)(n), sizeof(*(ptr))))
|
||||
#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
|
||||
#define arch_cmpxchg64_local(ptr, o, n) __generic_cmpxchg64_local((ptr), (o), (n))
|
||||
|
||||
#endif /* __ARCH_SPARC_CMPXCHG__ */
|
||||
|
||||
@@ -52,7 +52,7 @@ static inline unsigned long xchg64(__volatile__ unsigned long *m, unsigned long
|
||||
return val;
|
||||
}
|
||||
|
||||
#define xchg(ptr,x) \
|
||||
#define arch_xchg(ptr,x) \
|
||||
({ __typeof__(*(ptr)) __ret; \
|
||||
__ret = (__typeof__(*(ptr))) \
|
||||
__xchg((unsigned long)(x), (ptr), sizeof(*(ptr))); \
|
||||
@@ -168,7 +168,7 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
|
||||
return old;
|
||||
}
|
||||
|
||||
#define cmpxchg(ptr,o,n) \
|
||||
#define arch_cmpxchg(ptr,o,n) \
|
||||
({ \
|
||||
__typeof__(*(ptr)) _o_ = (o); \
|
||||
__typeof__(*(ptr)) _n_ = (n); \
|
||||
@@ -189,20 +189,20 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,
|
||||
case 4:
|
||||
case 8: return __cmpxchg(ptr, old, new, size);
|
||||
default:
|
||||
return __cmpxchg_local_generic(ptr, old, new, size);
|
||||
return __generic_cmpxchg_local(ptr, old, new, size);
|
||||
}
|
||||
|
||||
return old;
|
||||
}
|
||||
|
||||
#define cmpxchg_local(ptr, o, n) \
|
||||
#define arch_cmpxchg_local(ptr, o, n) \
|
||||
((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \
|
||||
(unsigned long)(n), sizeof(*(ptr))))
|
||||
#define cmpxchg64_local(ptr, o, n) \
|
||||
#define arch_cmpxchg64_local(ptr, o, n) \
|
||||
({ \
|
||||
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
|
||||
cmpxchg_local((ptr), (o), (n)); \
|
||||
})
|
||||
#define cmpxchg64(ptr, o, n) cmpxchg64_local((ptr), (o), (n))
|
||||
#define arch_cmpxchg64(ptr, o, n) arch_cmpxchg64_local((ptr), (o), (n))
|
||||
|
||||
#endif /* __ARCH_SPARC64_CMPXCHG__ */
|
||||
|
||||
@@ -29,7 +29,7 @@ static DEFINE_SPINLOCK(dummy);
|
||||
#endif /* SMP */
|
||||
|
||||
#define ATOMIC_FETCH_OP(op, c_op) \
|
||||
int atomic_fetch_##op(int i, atomic_t *v) \
|
||||
int arch_atomic_fetch_##op(int i, atomic_t *v) \
|
||||
{ \
|
||||
int ret; \
|
||||
unsigned long flags; \
|
||||
@@ -41,10 +41,10 @@ int atomic_fetch_##op(int i, atomic_t *v) \
|
||||
spin_unlock_irqrestore(ATOMIC_HASH(v), flags); \
|
||||
return ret; \
|
||||
} \
|
||||
EXPORT_SYMBOL(atomic_fetch_##op);
|
||||
EXPORT_SYMBOL(arch_atomic_fetch_##op);
|
||||
|
||||
#define ATOMIC_OP_RETURN(op, c_op) \
|
||||
int atomic_##op##_return(int i, atomic_t *v) \
|
||||
int arch_atomic_##op##_return(int i, atomic_t *v) \
|
||||
{ \
|
||||
int ret; \
|
||||
unsigned long flags; \
|
||||
@@ -55,7 +55,7 @@ int atomic_##op##_return(int i, atomic_t *v) \
|
||||
spin_unlock_irqrestore(ATOMIC_HASH(v), flags); \
|
||||
return ret; \
|
||||
} \
|
||||
EXPORT_SYMBOL(atomic_##op##_return);
|
||||
EXPORT_SYMBOL(arch_atomic_##op##_return);
|
||||
|
||||
ATOMIC_OP_RETURN(add, +=)
|
||||
|
||||
@@ -67,7 +67,7 @@ ATOMIC_FETCH_OP(xor, ^=)
|
||||
#undef ATOMIC_FETCH_OP
|
||||
#undef ATOMIC_OP_RETURN
|
||||
|
||||
int atomic_xchg(atomic_t *v, int new)
|
||||
int arch_atomic_xchg(atomic_t *v, int new)
|
||||
{
|
||||
int ret;
|
||||
unsigned long flags;
|
||||
@@ -78,9 +78,9 @@ int atomic_xchg(atomic_t *v, int new)
|
||||
spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(atomic_xchg);
|
||||
EXPORT_SYMBOL(arch_atomic_xchg);
|
||||
|
||||
int atomic_cmpxchg(atomic_t *v, int old, int new)
|
||||
int arch_atomic_cmpxchg(atomic_t *v, int old, int new)
|
||||
{
|
||||
int ret;
|
||||
unsigned long flags;
|
||||
@@ -93,9 +93,9 @@ int atomic_cmpxchg(atomic_t *v, int old, int new)
|
||||
spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(atomic_cmpxchg);
|
||||
EXPORT_SYMBOL(arch_atomic_cmpxchg);
|
||||
|
||||
int atomic_fetch_add_unless(atomic_t *v, int a, int u)
|
||||
int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
|
||||
{
|
||||
int ret;
|
||||
unsigned long flags;
|
||||
@@ -107,10 +107,10 @@ int atomic_fetch_add_unless(atomic_t *v, int a, int u)
|
||||
spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(atomic_fetch_add_unless);
|
||||
EXPORT_SYMBOL(arch_atomic_fetch_add_unless);
|
||||
|
||||
/* Atomic operations are already serializing */
|
||||
void atomic_set(atomic_t *v, int i)
|
||||
void arch_atomic_set(atomic_t *v, int i)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
@@ -118,7 +118,7 @@ void atomic_set(atomic_t *v, int i)
|
||||
v->counter = i;
|
||||
spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
|
||||
}
|
||||
EXPORT_SYMBOL(atomic_set);
|
||||
EXPORT_SYMBOL(arch_atomic_set);
|
||||
|
||||
unsigned long ___set_bit(unsigned long *addr, unsigned long mask)
|
||||
{
|
||||
|
||||
@@ -19,7 +19,7 @@
|
||||
*/
|
||||
|
||||
#define ATOMIC_OP(op) \
|
||||
ENTRY(atomic_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
|
||||
ENTRY(arch_atomic_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
|
||||
BACKOFF_SETUP(%o2); \
|
||||
1: lduw [%o1], %g1; \
|
||||
op %g1, %o0, %g7; \
|
||||
@@ -30,11 +30,11 @@ ENTRY(atomic_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
|
||||
retl; \
|
||||
nop; \
|
||||
2: BACKOFF_SPIN(%o2, %o3, 1b); \
|
||||
ENDPROC(atomic_##op); \
|
||||
EXPORT_SYMBOL(atomic_##op);
|
||||
ENDPROC(arch_atomic_##op); \
|
||||
EXPORT_SYMBOL(arch_atomic_##op);
|
||||
|
||||
#define ATOMIC_OP_RETURN(op) \
|
||||
ENTRY(atomic_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
|
||||
ENTRY(arch_atomic_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */\
|
||||
BACKOFF_SETUP(%o2); \
|
||||
1: lduw [%o1], %g1; \
|
||||
op %g1, %o0, %g7; \
|
||||
@@ -45,11 +45,11 @@ ENTRY(atomic_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
|
||||
retl; \
|
||||
sra %g1, 0, %o0; \
|
||||
2: BACKOFF_SPIN(%o2, %o3, 1b); \
|
||||
ENDPROC(atomic_##op##_return); \
|
||||
EXPORT_SYMBOL(atomic_##op##_return);
|
||||
ENDPROC(arch_atomic_##op##_return); \
|
||||
EXPORT_SYMBOL(arch_atomic_##op##_return);
|
||||
|
||||
#define ATOMIC_FETCH_OP(op) \
|
||||
ENTRY(atomic_fetch_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
|
||||
ENTRY(arch_atomic_fetch_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
|
||||
BACKOFF_SETUP(%o2); \
|
||||
1: lduw [%o1], %g1; \
|
||||
op %g1, %o0, %g7; \
|
||||
@@ -60,8 +60,8 @@ ENTRY(atomic_fetch_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
|
||||
retl; \
|
||||
sra %g1, 0, %o0; \
|
||||
2: BACKOFF_SPIN(%o2, %o3, 1b); \
|
||||
ENDPROC(atomic_fetch_##op); \
|
||||
EXPORT_SYMBOL(atomic_fetch_##op);
|
||||
ENDPROC(arch_atomic_fetch_##op); \
|
||||
EXPORT_SYMBOL(arch_atomic_fetch_##op);
|
||||
|
||||
ATOMIC_OP(add)
|
||||
ATOMIC_OP_RETURN(add)
|
||||
@@ -85,7 +85,7 @@ ATOMIC_FETCH_OP(xor)
|
||||
#undef ATOMIC_OP
|
||||
|
||||
#define ATOMIC64_OP(op) \
|
||||
ENTRY(atomic64_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
|
||||
ENTRY(arch_atomic64_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
|
||||
BACKOFF_SETUP(%o2); \
|
||||
1: ldx [%o1], %g1; \
|
||||
op %g1, %o0, %g7; \
|
||||
@@ -96,11 +96,11 @@ ENTRY(atomic64_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
|
||||
retl; \
|
||||
nop; \
|
||||
2: BACKOFF_SPIN(%o2, %o3, 1b); \
|
||||
ENDPROC(atomic64_##op); \
|
||||
EXPORT_SYMBOL(atomic64_##op);
|
||||
ENDPROC(arch_atomic64_##op); \
|
||||
EXPORT_SYMBOL(arch_atomic64_##op);
|
||||
|
||||
#define ATOMIC64_OP_RETURN(op) \
|
||||
ENTRY(atomic64_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
|
||||
ENTRY(arch_atomic64_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
|
||||
BACKOFF_SETUP(%o2); \
|
||||
1: ldx [%o1], %g1; \
|
||||
op %g1, %o0, %g7; \
|
||||
@@ -111,11 +111,11 @@ ENTRY(atomic64_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
|
||||
retl; \
|
||||
op %g1, %o0, %o0; \
|
||||
2: BACKOFF_SPIN(%o2, %o3, 1b); \
|
||||
ENDPROC(atomic64_##op##_return); \
|
||||
EXPORT_SYMBOL(atomic64_##op##_return);
|
||||
ENDPROC(arch_atomic64_##op##_return); \
|
||||
EXPORT_SYMBOL(arch_atomic64_##op##_return);
|
||||
|
||||
#define ATOMIC64_FETCH_OP(op) \
|
||||
ENTRY(atomic64_fetch_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
|
||||
ENTRY(arch_atomic64_fetch_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
|
||||
BACKOFF_SETUP(%o2); \
|
||||
1: ldx [%o1], %g1; \
|
||||
op %g1, %o0, %g7; \
|
||||
@@ -126,8 +126,8 @@ ENTRY(atomic64_fetch_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
|
||||
retl; \
|
||||
mov %g1, %o0; \
|
||||
2: BACKOFF_SPIN(%o2, %o3, 1b); \
|
||||
ENDPROC(atomic64_fetch_##op); \
|
||||
EXPORT_SYMBOL(atomic64_fetch_##op);
|
||||
ENDPROC(arch_atomic64_fetch_##op); \
|
||||
EXPORT_SYMBOL(arch_atomic64_fetch_##op);
|
||||
|
||||
ATOMIC64_OP(add)
|
||||
ATOMIC64_OP_RETURN(add)
|
||||
@@ -150,7 +150,7 @@ ATOMIC64_FETCH_OP(xor)
|
||||
#undef ATOMIC64_OP_RETURN
|
||||
#undef ATOMIC64_OP
|
||||
|
||||
ENTRY(atomic64_dec_if_positive) /* %o0 = atomic_ptr */
|
||||
ENTRY(arch_atomic64_dec_if_positive) /* %o0 = atomic_ptr */
|
||||
BACKOFF_SETUP(%o2)
|
||||
1: ldx [%o0], %g1
|
||||
brlez,pn %g1, 3f
|
||||
@@ -162,5 +162,5 @@ ENTRY(atomic64_dec_if_positive) /* %o0 = atomic_ptr */
|
||||
3: retl
|
||||
sub %g1, 1, %o0
|
||||
2: BACKOFF_SPIN(%o2, %o3, 1b)
|
||||
ENDPROC(atomic64_dec_if_positive)
|
||||
EXPORT_SYMBOL(atomic64_dec_if_positive)
|
||||
ENDPROC(arch_atomic64_dec_if_positive)
|
||||
EXPORT_SYMBOL(arch_atomic64_dec_if_positive)
|
||||
|
||||
Reference in New Issue
Block a user