Locking changes for this cycle:

- Core locking & atomics:
 
      - Convert all architectures to ARCH_ATOMIC: move every
        architecture to ARCH_ATOMIC, then get rid of ARCH_ATOMIC
        and all the transitory facilities and #ifdefs.
 
        Much reduction in complexity from that series:
 
            63 files changed, 756 insertions(+), 4094 deletions(-)
 
      - Self-test enhancements
 
  - Futexes:
 
      - Add the new FUTEX_LOCK_PI2 ABI, which is a variant that
        doesn't set FLAGS_CLOCKRT (.e. uses CLOCK_MONOTONIC).
 
        [ The temptation to repurpose FUTEX_LOCK_PI's implicit
          setting of FLAGS_CLOCKRT & invert the flag's meaning
          to avoid having to introduce a new variant was
          resisted successfully. ]
 
      - Enhance futex self-tests
 
  - Lockdep:
 
      - Fix dependency path printouts
      - Optimize trace saving
      - Broaden & fix wait-context checks
 
  - Misc cleanups and fixes.
 
 Signed-off-by: Ingo Molnar <mingo@kernel.org>
 -----BEGIN PGP SIGNATURE-----
 
 iQJFBAABCgAvFiEEBpT5eoXrXCwVQwEKEnMQ0APhK1gFAmDZaEYRHG1pbmdvQGtl
 cm5lbC5vcmcACgkQEnMQ0APhK1hPdxAAiNCsxL6X1cZ8zqbWsvLefT9Zqhzgs5u6
 gdZele7PNibvbYdON26b5RUzuKfOW/hgyX6LKqr+AiNYTT9PGhcY+tycUr2PGk5R
 LMyhJWmmX5cUVPU92ky+z5hEHB2gr4XPJcvgpKKUL0XB1tBaSvy2DtgwPuhXOoT1
 1sCQfy63t71snt2RfEnibVW6xovwaA2lsqL81lLHJN4iRFWvqO498/m4+PWkylsm
 ig/+VT1Oz7t4wqu3NhTqNNZv+4K4W2asniyo53Dg2BnRm/NjhJtgg4jRibrb0ssb
 67Xdq6y8+xNBmEAKj+Re8VpMcu4aj346Ctk7d4gst2ah/Rc0TvqfH6mezH7oq7RL
 hmOrMBWtwQfKhEE/fDkng30nrVxc/98YXP0n2rCCa0ySsaF6b6T185mTcYDRDxFs
 BVNS58ub+zxrF9Zd4nhIHKaEHiL2ZdDimqAicXN0RpywjIzTQ/y11uU7I1WBsKkq
 WkPYs+FPHnX7aBv1MsuxHhb8sUXjG924K4JeqnjF45jC3sC1crX+N0jv4wHw+89V
 h4k20s2Tw6m5XGXlgGwMJh0PCcD6X22Vd9Uyw8zb+IJfvNTGR9Rp1Ec+1gMRSll+
 xsn6G6Uy9bcNU0SqKlBSfelweGKn4ZxbEPn76Jc8KWLiepuZ6vv5PBoOuaujWht9
 KAeOC5XdjMk=
 =tH//
 -----END PGP SIGNATURE-----

Merge tag 'locking-core-2021-06-28' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull locking updates from Ingo Molnar:

 - Core locking & atomics:

     - Convert all architectures to ARCH_ATOMIC: move every architecture
       to ARCH_ATOMIC, then get rid of ARCH_ATOMIC and all the
       transitory facilities and #ifdefs.

       Much reduction in complexity from that series:

           63 files changed, 756 insertions(+), 4094 deletions(-)

     - Self-test enhancements

 - Futexes:

     - Add the new FUTEX_LOCK_PI2 ABI, which is a variant that doesn't
       set FLAGS_CLOCKRT (.e. uses CLOCK_MONOTONIC).

       [ The temptation to repurpose FUTEX_LOCK_PI's implicit setting of
         FLAGS_CLOCKRT & invert the flag's meaning to avoid having to
         introduce a new variant was resisted successfully. ]

     - Enhance futex self-tests

 - Lockdep:

     - Fix dependency path printouts

     - Optimize trace saving

     - Broaden & fix wait-context checks

 - Misc cleanups and fixes.

* tag 'locking-core-2021-06-28' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (52 commits)
  locking/lockdep: Correct the description error for check_redundant()
  futex: Provide FUTEX_LOCK_PI2 to support clock selection
  futex: Prepare futex_lock_pi() for runtime clock selection
  lockdep/selftest: Remove wait-type RCU_CALLBACK tests
  lockdep/selftests: Fix selftests vs PROVE_RAW_LOCK_NESTING
  lockdep: Fix wait-type for empty stack
  locking/selftests: Add a selftest for check_irq_usage()
  lockding/lockdep: Avoid to find wrong lock dep path in check_irq_usage()
  locking/lockdep: Remove the unnecessary trace saving
  locking/lockdep: Fix the dep path printing for backwards BFS
  selftests: futex: Add futex compare requeue test
  selftests: futex: Add futex wait test
  seqlock: Remove trailing semicolon in macros
  locking/lockdep: Reduce LOCKDEP dependency list
  locking/lockdep,doc: Improve readability of the block matrix
  locking/atomics: atomic-instrumented: simplify ifdeffery
  locking/atomic: delete !ARCH_ATOMIC remnants
  locking/atomic: xtensa: move to ARCH_ATOMIC
  locking/atomic: sparc: move to ARCH_ATOMIC
  locking/atomic: sh: move to ARCH_ATOMIC
  ...
This commit is contained in:
Linus Torvalds 2021-06-28 11:45:29 -07:00
commit a15286c63d
77 changed files with 1394 additions and 4156 deletions

View File

@ -453,9 +453,9 @@ There are simply four block conditions:
Block condition matrix, Y means the row blocks the column, and N means otherwise. Block condition matrix, Y means the row blocks the column, and N means otherwise.
+---+---+---+---+ +---+---+---+---+
| | E | r | R | | | W | r | R |
+---+---+---+---+ +---+---+---+---+
| E | Y | Y | Y | | W | Y | Y | Y |
+---+---+---+---+ +---+---+---+---+
| r | Y | Y | N | | r | Y | Y | N |
+---+---+---+---+ +---+---+---+---+

View File

@ -26,11 +26,11 @@
#define ATOMIC64_INIT(i) { (i) } #define ATOMIC64_INIT(i) { (i) }
#define atomic_read(v) READ_ONCE((v)->counter) #define arch_atomic_read(v) READ_ONCE((v)->counter)
#define atomic64_read(v) READ_ONCE((v)->counter) #define arch_atomic64_read(v) READ_ONCE((v)->counter)
#define atomic_set(v,i) WRITE_ONCE((v)->counter, (i)) #define arch_atomic_set(v,i) WRITE_ONCE((v)->counter, (i))
#define atomic64_set(v,i) WRITE_ONCE((v)->counter, (i)) #define arch_atomic64_set(v,i) WRITE_ONCE((v)->counter, (i))
/* /*
* To get proper branch prediction for the main line, we must branch * To get proper branch prediction for the main line, we must branch
@ -39,7 +39,7 @@
*/ */
#define ATOMIC_OP(op, asm_op) \ #define ATOMIC_OP(op, asm_op) \
static __inline__ void atomic_##op(int i, atomic_t * v) \ static __inline__ void arch_atomic_##op(int i, atomic_t * v) \
{ \ { \
unsigned long temp; \ unsigned long temp; \
__asm__ __volatile__( \ __asm__ __volatile__( \
@ -55,7 +55,7 @@ static __inline__ void atomic_##op(int i, atomic_t * v) \
} \ } \
#define ATOMIC_OP_RETURN(op, asm_op) \ #define ATOMIC_OP_RETURN(op, asm_op) \
static inline int atomic_##op##_return_relaxed(int i, atomic_t *v) \ static inline int arch_atomic_##op##_return_relaxed(int i, atomic_t *v) \
{ \ { \
long temp, result; \ long temp, result; \
__asm__ __volatile__( \ __asm__ __volatile__( \
@ -74,7 +74,7 @@ static inline int atomic_##op##_return_relaxed(int i, atomic_t *v) \
} }
#define ATOMIC_FETCH_OP(op, asm_op) \ #define ATOMIC_FETCH_OP(op, asm_op) \
static inline int atomic_fetch_##op##_relaxed(int i, atomic_t *v) \ static inline int arch_atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
{ \ { \
long temp, result; \ long temp, result; \
__asm__ __volatile__( \ __asm__ __volatile__( \
@ -92,7 +92,7 @@ static inline int atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
} }
#define ATOMIC64_OP(op, asm_op) \ #define ATOMIC64_OP(op, asm_op) \
static __inline__ void atomic64_##op(s64 i, atomic64_t * v) \ static __inline__ void arch_atomic64_##op(s64 i, atomic64_t * v) \
{ \ { \
s64 temp; \ s64 temp; \
__asm__ __volatile__( \ __asm__ __volatile__( \
@ -108,7 +108,8 @@ static __inline__ void atomic64_##op(s64 i, atomic64_t * v) \
} \ } \
#define ATOMIC64_OP_RETURN(op, asm_op) \ #define ATOMIC64_OP_RETURN(op, asm_op) \
static __inline__ s64 atomic64_##op##_return_relaxed(s64 i, atomic64_t * v) \ static __inline__ s64 \
arch_atomic64_##op##_return_relaxed(s64 i, atomic64_t * v) \
{ \ { \
s64 temp, result; \ s64 temp, result; \
__asm__ __volatile__( \ __asm__ __volatile__( \
@ -127,7 +128,8 @@ static __inline__ s64 atomic64_##op##_return_relaxed(s64 i, atomic64_t * v) \
} }
#define ATOMIC64_FETCH_OP(op, asm_op) \ #define ATOMIC64_FETCH_OP(op, asm_op) \
static __inline__ s64 atomic64_fetch_##op##_relaxed(s64 i, atomic64_t * v) \ static __inline__ s64 \
arch_atomic64_fetch_##op##_relaxed(s64 i, atomic64_t * v) \
{ \ { \
s64 temp, result; \ s64 temp, result; \
__asm__ __volatile__( \ __asm__ __volatile__( \
@ -155,18 +157,18 @@ static __inline__ s64 atomic64_fetch_##op##_relaxed(s64 i, atomic64_t * v) \
ATOMIC_OPS(add) ATOMIC_OPS(add)
ATOMIC_OPS(sub) ATOMIC_OPS(sub)
#define atomic_add_return_relaxed atomic_add_return_relaxed #define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed
#define atomic_sub_return_relaxed atomic_sub_return_relaxed #define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed
#define atomic_fetch_add_relaxed atomic_fetch_add_relaxed #define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed
#define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed #define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed
#define atomic64_add_return_relaxed atomic64_add_return_relaxed #define arch_atomic64_add_return_relaxed arch_atomic64_add_return_relaxed
#define atomic64_sub_return_relaxed atomic64_sub_return_relaxed #define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return_relaxed
#define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed #define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add_relaxed
#define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed #define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub_relaxed
#define atomic_andnot atomic_andnot #define arch_atomic_andnot arch_atomic_andnot
#define atomic64_andnot atomic64_andnot #define arch_atomic64_andnot arch_atomic64_andnot
#undef ATOMIC_OPS #undef ATOMIC_OPS
#define ATOMIC_OPS(op, asm) \ #define ATOMIC_OPS(op, asm) \
@ -180,15 +182,15 @@ ATOMIC_OPS(andnot, bic)
ATOMIC_OPS(or, bis) ATOMIC_OPS(or, bis)
ATOMIC_OPS(xor, xor) ATOMIC_OPS(xor, xor)
#define atomic_fetch_and_relaxed atomic_fetch_and_relaxed #define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed
#define atomic_fetch_andnot_relaxed atomic_fetch_andnot_relaxed #define arch_atomic_fetch_andnot_relaxed arch_atomic_fetch_andnot_relaxed
#define atomic_fetch_or_relaxed atomic_fetch_or_relaxed #define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed
#define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed #define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed
#define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed #define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and_relaxed
#define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot_relaxed #define arch_atomic64_fetch_andnot_relaxed arch_atomic64_fetch_andnot_relaxed
#define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed #define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or_relaxed
#define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed #define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor_relaxed
#undef ATOMIC_OPS #undef ATOMIC_OPS
#undef ATOMIC64_FETCH_OP #undef ATOMIC64_FETCH_OP
@ -198,14 +200,18 @@ ATOMIC_OPS(xor, xor)
#undef ATOMIC_OP_RETURN #undef ATOMIC_OP_RETURN
#undef ATOMIC_OP #undef ATOMIC_OP
#define atomic64_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new)) #define arch_atomic64_cmpxchg(v, old, new) \
#define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) (arch_cmpxchg(&((v)->counter), old, new))
#define arch_atomic64_xchg(v, new) \
(arch_xchg(&((v)->counter), new))
#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new)) #define arch_atomic_cmpxchg(v, old, new) \
#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) (arch_cmpxchg(&((v)->counter), old, new))
#define arch_atomic_xchg(v, new) \
(arch_xchg(&((v)->counter), new))
/** /**
* atomic_fetch_add_unless - add unless the number is a given value * arch_atomic_fetch_add_unless - add unless the number is a given value
* @v: pointer of type atomic_t * @v: pointer of type atomic_t
* @a: the amount to add to v... * @a: the amount to add to v...
* @u: ...unless v is equal to u. * @u: ...unless v is equal to u.
@ -213,7 +219,7 @@ ATOMIC_OPS(xor, xor)
* Atomically adds @a to @v, so long as it was not @u. * Atomically adds @a to @v, so long as it was not @u.
* Returns the old value of @v. * Returns the old value of @v.
*/ */
static __inline__ int atomic_fetch_add_unless(atomic_t *v, int a, int u) static __inline__ int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
{ {
int c, new, old; int c, new, old;
smp_mb(); smp_mb();
@ -234,10 +240,10 @@ static __inline__ int atomic_fetch_add_unless(atomic_t *v, int a, int u)
smp_mb(); smp_mb();
return old; return old;
} }
#define atomic_fetch_add_unless atomic_fetch_add_unless #define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
/** /**
* atomic64_fetch_add_unless - add unless the number is a given value * arch_atomic64_fetch_add_unless - add unless the number is a given value
* @v: pointer of type atomic64_t * @v: pointer of type atomic64_t
* @a: the amount to add to v... * @a: the amount to add to v...
* @u: ...unless v is equal to u. * @u: ...unless v is equal to u.
@ -245,7 +251,7 @@ static __inline__ int atomic_fetch_add_unless(atomic_t *v, int a, int u)
* Atomically adds @a to @v, so long as it was not @u. * Atomically adds @a to @v, so long as it was not @u.
* Returns the old value of @v. * Returns the old value of @v.
*/ */
static __inline__ s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u) static __inline__ s64 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
{ {
s64 c, new, old; s64 c, new, old;
smp_mb(); smp_mb();
@ -266,16 +272,16 @@ static __inline__ s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
smp_mb(); smp_mb();
return old; return old;
} }
#define atomic64_fetch_add_unless atomic64_fetch_add_unless #define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless
/* /*
* atomic64_dec_if_positive - decrement by 1 if old value positive * arch_atomic64_dec_if_positive - decrement by 1 if old value positive
* @v: pointer of type atomic_t * @v: pointer of type atomic_t
* *
* The function returns the old value of *v minus 1, even if * The function returns the old value of *v minus 1, even if
* the atomic variable, v, was not decremented. * the atomic variable, v, was not decremented.
*/ */
static inline s64 atomic64_dec_if_positive(atomic64_t *v) static inline s64 arch_atomic64_dec_if_positive(atomic64_t *v)
{ {
s64 old, tmp; s64 old, tmp;
smp_mb(); smp_mb();
@ -295,6 +301,6 @@ static inline s64 atomic64_dec_if_positive(atomic64_t *v)
smp_mb(); smp_mb();
return old - 1; return old - 1;
} }
#define atomic64_dec_if_positive atomic64_dec_if_positive #define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive
#endif /* _ALPHA_ATOMIC_H */ #endif /* _ALPHA_ATOMIC_H */

View File

@ -17,7 +17,7 @@
sizeof(*(ptr))); \ sizeof(*(ptr))); \
}) })
#define cmpxchg_local(ptr, o, n) \ #define arch_cmpxchg_local(ptr, o, n) \
({ \ ({ \
__typeof__(*(ptr)) _o_ = (o); \ __typeof__(*(ptr)) _o_ = (o); \
__typeof__(*(ptr)) _n_ = (n); \ __typeof__(*(ptr)) _n_ = (n); \
@ -26,7 +26,7 @@
sizeof(*(ptr))); \ sizeof(*(ptr))); \
}) })
#define cmpxchg64_local(ptr, o, n) \ #define arch_cmpxchg64_local(ptr, o, n) \
({ \ ({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
cmpxchg_local((ptr), (o), (n)); \ cmpxchg_local((ptr), (o), (n)); \
@ -42,7 +42,7 @@
* The leading and the trailing memory barriers guarantee that these * The leading and the trailing memory barriers guarantee that these
* operations are fully ordered. * operations are fully ordered.
*/ */
#define xchg(ptr, x) \ #define arch_xchg(ptr, x) \
({ \ ({ \
__typeof__(*(ptr)) __ret; \ __typeof__(*(ptr)) __ret; \
__typeof__(*(ptr)) _x_ = (x); \ __typeof__(*(ptr)) _x_ = (x); \
@ -53,7 +53,7 @@
__ret; \ __ret; \
}) })
#define cmpxchg(ptr, o, n) \ #define arch_cmpxchg(ptr, o, n) \
({ \ ({ \
__typeof__(*(ptr)) __ret; \ __typeof__(*(ptr)) __ret; \
__typeof__(*(ptr)) _o_ = (o); \ __typeof__(*(ptr)) _o_ = (o); \
@ -65,10 +65,10 @@
__ret; \ __ret; \
}) })
#define cmpxchg64(ptr, o, n) \ #define arch_cmpxchg64(ptr, o, n) \
({ \ ({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
cmpxchg((ptr), (o), (n)); \ arch_cmpxchg((ptr), (o), (n)); \
}) })
#undef ____cmpxchg #undef ____cmpxchg

View File

@ -14,14 +14,14 @@
#include <asm/barrier.h> #include <asm/barrier.h>
#include <asm/smp.h> #include <asm/smp.h>
#define atomic_read(v) READ_ONCE((v)->counter) #define arch_atomic_read(v) READ_ONCE((v)->counter)
#ifdef CONFIG_ARC_HAS_LLSC #ifdef CONFIG_ARC_HAS_LLSC
#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i)) #define arch_atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
#define ATOMIC_OP(op, c_op, asm_op) \ #define ATOMIC_OP(op, c_op, asm_op) \
static inline void atomic_##op(int i, atomic_t *v) \ static inline void arch_atomic_##op(int i, atomic_t *v) \
{ \ { \
unsigned int val; \ unsigned int val; \
\ \
@ -37,7 +37,7 @@ static inline void atomic_##op(int i, atomic_t *v) \
} \ } \
#define ATOMIC_OP_RETURN(op, c_op, asm_op) \ #define ATOMIC_OP_RETURN(op, c_op, asm_op) \
static inline int atomic_##op##_return(int i, atomic_t *v) \ static inline int arch_atomic_##op##_return(int i, atomic_t *v) \
{ \ { \
unsigned int val; \ unsigned int val; \
\ \
@ -63,7 +63,7 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
} }
#define ATOMIC_FETCH_OP(op, c_op, asm_op) \ #define ATOMIC_FETCH_OP(op, c_op, asm_op) \
static inline int atomic_fetch_##op(int i, atomic_t *v) \ static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \
{ \ { \
unsigned int val, orig; \ unsigned int val, orig; \
\ \
@ -94,11 +94,11 @@ static inline int atomic_fetch_##op(int i, atomic_t *v) \
#ifndef CONFIG_SMP #ifndef CONFIG_SMP
/* violating atomic_xxx API locking protocol in UP for optimization sake */ /* violating atomic_xxx API locking protocol in UP for optimization sake */
#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i)) #define arch_atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
#else #else
static inline void atomic_set(atomic_t *v, int i) static inline void arch_atomic_set(atomic_t *v, int i)
{ {
/* /*
* Independent of hardware support, all of the atomic_xxx() APIs need * Independent of hardware support, all of the atomic_xxx() APIs need
@ -116,7 +116,7 @@ static inline void atomic_set(atomic_t *v, int i)
atomic_ops_unlock(flags); atomic_ops_unlock(flags);
} }
#define atomic_set_release(v, i) atomic_set((v), (i)) #define arch_atomic_set_release(v, i) arch_atomic_set((v), (i))
#endif #endif
@ -126,7 +126,7 @@ static inline void atomic_set(atomic_t *v, int i)
*/ */
#define ATOMIC_OP(op, c_op, asm_op) \ #define ATOMIC_OP(op, c_op, asm_op) \
static inline void atomic_##op(int i, atomic_t *v) \ static inline void arch_atomic_##op(int i, atomic_t *v) \
{ \ { \
unsigned long flags; \ unsigned long flags; \
\ \
@ -136,7 +136,7 @@ static inline void atomic_##op(int i, atomic_t *v) \
} }
#define ATOMIC_OP_RETURN(op, c_op, asm_op) \ #define ATOMIC_OP_RETURN(op, c_op, asm_op) \
static inline int atomic_##op##_return(int i, atomic_t *v) \ static inline int arch_atomic_##op##_return(int i, atomic_t *v) \
{ \ { \
unsigned long flags; \ unsigned long flags; \
unsigned long temp; \ unsigned long temp; \
@ -154,7 +154,7 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
} }
#define ATOMIC_FETCH_OP(op, c_op, asm_op) \ #define ATOMIC_FETCH_OP(op, c_op, asm_op) \
static inline int atomic_fetch_##op(int i, atomic_t *v) \ static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \
{ \ { \
unsigned long flags; \ unsigned long flags; \
unsigned long orig; \ unsigned long orig; \
@ -180,9 +180,6 @@ static inline int atomic_fetch_##op(int i, atomic_t *v) \
ATOMIC_OPS(add, +=, add) ATOMIC_OPS(add, +=, add)
ATOMIC_OPS(sub, -=, sub) ATOMIC_OPS(sub, -=, sub)
#define atomic_andnot atomic_andnot
#define atomic_fetch_andnot atomic_fetch_andnot
#undef ATOMIC_OPS #undef ATOMIC_OPS
#define ATOMIC_OPS(op, c_op, asm_op) \ #define ATOMIC_OPS(op, c_op, asm_op) \
ATOMIC_OP(op, c_op, asm_op) \ ATOMIC_OP(op, c_op, asm_op) \
@ -193,6 +190,9 @@ ATOMIC_OPS(andnot, &= ~, bic)
ATOMIC_OPS(or, |=, or) ATOMIC_OPS(or, |=, or)
ATOMIC_OPS(xor, ^=, xor) ATOMIC_OPS(xor, ^=, xor)
#define arch_atomic_andnot arch_atomic_andnot
#define arch_atomic_fetch_andnot arch_atomic_fetch_andnot
#undef ATOMIC_OPS #undef ATOMIC_OPS
#undef ATOMIC_FETCH_OP #undef ATOMIC_FETCH_OP
#undef ATOMIC_OP_RETURN #undef ATOMIC_OP_RETURN
@ -220,7 +220,7 @@ typedef struct {
#define ATOMIC64_INIT(a) { (a) } #define ATOMIC64_INIT(a) { (a) }
static inline s64 atomic64_read(const atomic64_t *v) static inline s64 arch_atomic64_read(const atomic64_t *v)
{ {
s64 val; s64 val;
@ -232,7 +232,7 @@ static inline s64 atomic64_read(const atomic64_t *v)
return val; return val;
} }
static inline void atomic64_set(atomic64_t *v, s64 a) static inline void arch_atomic64_set(atomic64_t *v, s64 a)
{ {
/* /*
* This could have been a simple assignment in "C" but would need * This could have been a simple assignment in "C" but would need
@ -253,7 +253,7 @@ static inline void atomic64_set(atomic64_t *v, s64 a)
} }
#define ATOMIC64_OP(op, op1, op2) \ #define ATOMIC64_OP(op, op1, op2) \
static inline void atomic64_##op(s64 a, atomic64_t *v) \ static inline void arch_atomic64_##op(s64 a, atomic64_t *v) \
{ \ { \
s64 val; \ s64 val; \
\ \
@ -270,7 +270,7 @@ static inline void atomic64_##op(s64 a, atomic64_t *v) \
} \ } \
#define ATOMIC64_OP_RETURN(op, op1, op2) \ #define ATOMIC64_OP_RETURN(op, op1, op2) \
static inline s64 atomic64_##op##_return(s64 a, atomic64_t *v) \ static inline s64 arch_atomic64_##op##_return(s64 a, atomic64_t *v) \
{ \ { \
s64 val; \ s64 val; \
\ \
@ -293,7 +293,7 @@ static inline s64 atomic64_##op##_return(s64 a, atomic64_t *v) \
} }
#define ATOMIC64_FETCH_OP(op, op1, op2) \ #define ATOMIC64_FETCH_OP(op, op1, op2) \
static inline s64 atomic64_fetch_##op(s64 a, atomic64_t *v) \ static inline s64 arch_atomic64_fetch_##op(s64 a, atomic64_t *v) \
{ \ { \
s64 val, orig; \ s64 val, orig; \
\ \
@ -320,9 +320,6 @@ static inline s64 atomic64_fetch_##op(s64 a, atomic64_t *v) \
ATOMIC64_OP_RETURN(op, op1, op2) \ ATOMIC64_OP_RETURN(op, op1, op2) \
ATOMIC64_FETCH_OP(op, op1, op2) ATOMIC64_FETCH_OP(op, op1, op2)
#define atomic64_andnot atomic64_andnot
#define atomic64_fetch_andnot atomic64_fetch_andnot
ATOMIC64_OPS(add, add.f, adc) ATOMIC64_OPS(add, add.f, adc)
ATOMIC64_OPS(sub, sub.f, sbc) ATOMIC64_OPS(sub, sub.f, sbc)
ATOMIC64_OPS(and, and, and) ATOMIC64_OPS(and, and, and)
@ -330,13 +327,16 @@ ATOMIC64_OPS(andnot, bic, bic)
ATOMIC64_OPS(or, or, or) ATOMIC64_OPS(or, or, or)
ATOMIC64_OPS(xor, xor, xor) ATOMIC64_OPS(xor, xor, xor)
#define arch_atomic64_andnot arch_atomic64_andnot
#define arch_atomic64_fetch_andnot arch_atomic64_fetch_andnot
#undef ATOMIC64_OPS #undef ATOMIC64_OPS
#undef ATOMIC64_FETCH_OP #undef ATOMIC64_FETCH_OP
#undef ATOMIC64_OP_RETURN #undef ATOMIC64_OP_RETURN
#undef ATOMIC64_OP #undef ATOMIC64_OP
static inline s64 static inline s64
atomic64_cmpxchg(atomic64_t *ptr, s64 expected, s64 new) arch_atomic64_cmpxchg(atomic64_t *ptr, s64 expected, s64 new)
{ {
s64 prev; s64 prev;
@ -358,7 +358,7 @@ atomic64_cmpxchg(atomic64_t *ptr, s64 expected, s64 new)
return prev; return prev;
} }
static inline s64 atomic64_xchg(atomic64_t *ptr, s64 new) static inline s64 arch_atomic64_xchg(atomic64_t *ptr, s64 new)
{ {
s64 prev; s64 prev;
@ -379,14 +379,14 @@ static inline s64 atomic64_xchg(atomic64_t *ptr, s64 new)
} }
/** /**
* atomic64_dec_if_positive - decrement by 1 if old value positive * arch_atomic64_dec_if_positive - decrement by 1 if old value positive
* @v: pointer of type atomic64_t * @v: pointer of type atomic64_t
* *
* The function returns the old value of *v minus 1, even if * The function returns the old value of *v minus 1, even if
* the atomic variable, v, was not decremented. * the atomic variable, v, was not decremented.
*/ */
static inline s64 atomic64_dec_if_positive(atomic64_t *v) static inline s64 arch_atomic64_dec_if_positive(atomic64_t *v)
{ {
s64 val; s64 val;
@ -408,10 +408,10 @@ static inline s64 atomic64_dec_if_positive(atomic64_t *v)
return val; return val;
} }
#define atomic64_dec_if_positive atomic64_dec_if_positive #define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive
/** /**
* atomic64_fetch_add_unless - add unless the number is a given value * arch_atomic64_fetch_add_unless - add unless the number is a given value
* @v: pointer of type atomic64_t * @v: pointer of type atomic64_t
* @a: the amount to add to v... * @a: the amount to add to v...
* @u: ...unless v is equal to u. * @u: ...unless v is equal to u.
@ -419,7 +419,7 @@ static inline s64 atomic64_dec_if_positive(atomic64_t *v)
* Atomically adds @a to @v, if it was not @u. * Atomically adds @a to @v, if it was not @u.
* Returns the old value of @v * Returns the old value of @v
*/ */
static inline s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u) static inline s64 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
{ {
s64 old, temp; s64 old, temp;
@ -443,7 +443,7 @@ static inline s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
return old; return old;
} }
#define atomic64_fetch_add_unless atomic64_fetch_add_unless #define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless
#endif /* !CONFIG_GENERIC_ATOMIC64 */ #endif /* !CONFIG_GENERIC_ATOMIC64 */

View File

@ -63,7 +63,7 @@ __cmpxchg(volatile void *ptr, unsigned long expected, unsigned long new)
#endif #endif
#define cmpxchg(ptr, o, n) ({ \ #define arch_cmpxchg(ptr, o, n) ({ \
(typeof(*(ptr)))__cmpxchg((ptr), \ (typeof(*(ptr)))__cmpxchg((ptr), \
(unsigned long)(o), \ (unsigned long)(o), \
(unsigned long)(n)); \ (unsigned long)(n)); \
@ -75,7 +75,7 @@ __cmpxchg(volatile void *ptr, unsigned long expected, unsigned long new)
* !LLSC: cmpxchg() has to use an external lock atomic_ops_lock to guarantee * !LLSC: cmpxchg() has to use an external lock atomic_ops_lock to guarantee
* semantics, and this lock also happens to be used by atomic_*() * semantics, and this lock also happens to be used by atomic_*()
*/ */
#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n))) #define arch_atomic_cmpxchg(v, o, n) ((int)arch_cmpxchg(&((v)->counter), (o), (n)))
/* /*
@ -123,7 +123,7 @@ static inline unsigned long __xchg(unsigned long val, volatile void *ptr,
#if !defined(CONFIG_ARC_HAS_LLSC) && defined(CONFIG_SMP) #if !defined(CONFIG_ARC_HAS_LLSC) && defined(CONFIG_SMP)
#define xchg(ptr, with) \ #define arch_xchg(ptr, with) \
({ \ ({ \
unsigned long flags; \ unsigned long flags; \
typeof(*(ptr)) old_val; \ typeof(*(ptr)) old_val; \
@ -136,7 +136,7 @@ static inline unsigned long __xchg(unsigned long val, volatile void *ptr,
#else #else
#define xchg(ptr, with) _xchg(ptr, with) #define arch_xchg(ptr, with) _xchg(ptr, with)
#endif #endif
@ -153,6 +153,6 @@ static inline unsigned long __xchg(unsigned long val, volatile void *ptr,
* can't be clobbered by others. Thus no serialization required when * can't be clobbered by others. Thus no serialization required when
* atomic_xchg is involved. * atomic_xchg is involved.
*/ */
#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) #define arch_atomic_xchg(v, new) (arch_xchg(&((v)->counter), new))
#endif #endif

View File

@ -22,8 +22,8 @@
* strex/ldrex monitor on some implementations. The reason we can use it for * strex/ldrex monitor on some implementations. The reason we can use it for
* atomic_set() is the clrex or dummy strex done on every exception return. * atomic_set() is the clrex or dummy strex done on every exception return.
*/ */
#define atomic_read(v) READ_ONCE((v)->counter) #define arch_atomic_read(v) READ_ONCE((v)->counter)
#define atomic_set(v,i) WRITE_ONCE(((v)->counter), (i)) #define arch_atomic_set(v,i) WRITE_ONCE(((v)->counter), (i))
#if __LINUX_ARM_ARCH__ >= 6 #if __LINUX_ARM_ARCH__ >= 6
@ -34,7 +34,7 @@
*/ */
#define ATOMIC_OP(op, c_op, asm_op) \ #define ATOMIC_OP(op, c_op, asm_op) \
static inline void atomic_##op(int i, atomic_t *v) \ static inline void arch_atomic_##op(int i, atomic_t *v) \
{ \ { \
unsigned long tmp; \ unsigned long tmp; \
int result; \ int result; \
@ -52,7 +52,7 @@ static inline void atomic_##op(int i, atomic_t *v) \
} \ } \
#define ATOMIC_OP_RETURN(op, c_op, asm_op) \ #define ATOMIC_OP_RETURN(op, c_op, asm_op) \
static inline int atomic_##op##_return_relaxed(int i, atomic_t *v) \ static inline int arch_atomic_##op##_return_relaxed(int i, atomic_t *v) \
{ \ { \
unsigned long tmp; \ unsigned long tmp; \
int result; \ int result; \
@ -73,7 +73,7 @@ static inline int atomic_##op##_return_relaxed(int i, atomic_t *v) \
} }
#define ATOMIC_FETCH_OP(op, c_op, asm_op) \ #define ATOMIC_FETCH_OP(op, c_op, asm_op) \
static inline int atomic_fetch_##op##_relaxed(int i, atomic_t *v) \ static inline int arch_atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
{ \ { \
unsigned long tmp; \ unsigned long tmp; \
int result, val; \ int result, val; \
@ -93,17 +93,17 @@ static inline int atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
return result; \ return result; \
} }
#define atomic_add_return_relaxed atomic_add_return_relaxed #define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed
#define atomic_sub_return_relaxed atomic_sub_return_relaxed #define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed
#define atomic_fetch_add_relaxed atomic_fetch_add_relaxed #define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed
#define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed #define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed
#define atomic_fetch_and_relaxed atomic_fetch_and_relaxed #define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed
#define atomic_fetch_andnot_relaxed atomic_fetch_andnot_relaxed #define arch_atomic_fetch_andnot_relaxed arch_atomic_fetch_andnot_relaxed
#define atomic_fetch_or_relaxed atomic_fetch_or_relaxed #define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed
#define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed #define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed
static inline int atomic_cmpxchg_relaxed(atomic_t *ptr, int old, int new) static inline int arch_atomic_cmpxchg_relaxed(atomic_t *ptr, int old, int new)
{ {
int oldval; int oldval;
unsigned long res; unsigned long res;
@ -123,9 +123,9 @@ static inline int atomic_cmpxchg_relaxed(atomic_t *ptr, int old, int new)
return oldval; return oldval;
} }
#define atomic_cmpxchg_relaxed atomic_cmpxchg_relaxed #define arch_atomic_cmpxchg_relaxed arch_atomic_cmpxchg_relaxed
static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u) static inline int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
{ {
int oldval, newval; int oldval, newval;
unsigned long tmp; unsigned long tmp;
@ -151,7 +151,7 @@ static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
return oldval; return oldval;
} }
#define atomic_fetch_add_unless atomic_fetch_add_unless #define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
#else /* ARM_ARCH_6 */ #else /* ARM_ARCH_6 */
@ -160,7 +160,7 @@ static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
#endif #endif
#define ATOMIC_OP(op, c_op, asm_op) \ #define ATOMIC_OP(op, c_op, asm_op) \
static inline void atomic_##op(int i, atomic_t *v) \ static inline void arch_atomic_##op(int i, atomic_t *v) \
{ \ { \
unsigned long flags; \ unsigned long flags; \
\ \
@ -170,7 +170,7 @@ static inline void atomic_##op(int i, atomic_t *v) \
} \ } \
#define ATOMIC_OP_RETURN(op, c_op, asm_op) \ #define ATOMIC_OP_RETURN(op, c_op, asm_op) \
static inline int atomic_##op##_return(int i, atomic_t *v) \ static inline int arch_atomic_##op##_return(int i, atomic_t *v) \
{ \ { \
unsigned long flags; \ unsigned long flags; \
int val; \ int val; \
@ -184,7 +184,7 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
} }
#define ATOMIC_FETCH_OP(op, c_op, asm_op) \ #define ATOMIC_FETCH_OP(op, c_op, asm_op) \
static inline int atomic_fetch_##op(int i, atomic_t *v) \ static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \
{ \ { \
unsigned long flags; \ unsigned long flags; \
int val; \ int val; \
@ -197,7 +197,7 @@ static inline int atomic_fetch_##op(int i, atomic_t *v) \
return val; \ return val; \
} }
static inline int atomic_cmpxchg(atomic_t *v, int old, int new) static inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new)
{ {
int ret; int ret;
unsigned long flags; unsigned long flags;
@ -211,7 +211,7 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
return ret; return ret;
} }
#define atomic_fetch_andnot atomic_fetch_andnot #define arch_atomic_fetch_andnot arch_atomic_fetch_andnot
#endif /* __LINUX_ARM_ARCH__ */ #endif /* __LINUX_ARM_ARCH__ */
@ -223,7 +223,7 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
ATOMIC_OPS(add, +=, add) ATOMIC_OPS(add, +=, add)
ATOMIC_OPS(sub, -=, sub) ATOMIC_OPS(sub, -=, sub)
#define atomic_andnot atomic_andnot #define arch_atomic_andnot arch_atomic_andnot
#undef ATOMIC_OPS #undef ATOMIC_OPS
#define ATOMIC_OPS(op, c_op, asm_op) \ #define ATOMIC_OPS(op, c_op, asm_op) \
@ -240,7 +240,7 @@ ATOMIC_OPS(xor, ^=, eor)
#undef ATOMIC_OP_RETURN #undef ATOMIC_OP_RETURN
#undef ATOMIC_OP #undef ATOMIC_OP
#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) #define arch_atomic_xchg(v, new) (arch_xchg(&((v)->counter), new))
#ifndef CONFIG_GENERIC_ATOMIC64 #ifndef CONFIG_GENERIC_ATOMIC64
typedef struct { typedef struct {
@ -250,7 +250,7 @@ typedef struct {
#define ATOMIC64_INIT(i) { (i) } #define ATOMIC64_INIT(i) { (i) }
#ifdef CONFIG_ARM_LPAE #ifdef CONFIG_ARM_LPAE
static inline s64 atomic64_read(const atomic64_t *v) static inline s64 arch_atomic64_read(const atomic64_t *v)
{ {
s64 result; s64 result;
@ -263,7 +263,7 @@ static inline s64 atomic64_read(const atomic64_t *v)
return result; return result;
} }
static inline void atomic64_set(atomic64_t *v, s64 i) static inline void arch_atomic64_set(atomic64_t *v, s64 i)
{ {
__asm__ __volatile__("@ atomic64_set\n" __asm__ __volatile__("@ atomic64_set\n"
" strd %2, %H2, [%1]" " strd %2, %H2, [%1]"
@ -272,7 +272,7 @@ static inline void atomic64_set(atomic64_t *v, s64 i)
); );
} }
#else #else
static inline s64 atomic64_read(const atomic64_t *v) static inline s64 arch_atomic64_read(const atomic64_t *v)
{ {
s64 result; s64 result;
@ -285,7 +285,7 @@ static inline s64 atomic64_read(const atomic64_t *v)
return result; return result;
} }
static inline void atomic64_set(atomic64_t *v, s64 i) static inline void arch_atomic64_set(atomic64_t *v, s64 i)
{ {
s64 tmp; s64 tmp;
@ -302,7 +302,7 @@ static inline void atomic64_set(atomic64_t *v, s64 i)
#endif #endif
#define ATOMIC64_OP(op, op1, op2) \ #define ATOMIC64_OP(op, op1, op2) \
static inline void atomic64_##op(s64 i, atomic64_t *v) \ static inline void arch_atomic64_##op(s64 i, atomic64_t *v) \
{ \ { \
s64 result; \ s64 result; \
unsigned long tmp; \ unsigned long tmp; \
@ -322,7 +322,7 @@ static inline void atomic64_##op(s64 i, atomic64_t *v) \
#define ATOMIC64_OP_RETURN(op, op1, op2) \ #define ATOMIC64_OP_RETURN(op, op1, op2) \
static inline s64 \ static inline s64 \
atomic64_##op##_return_relaxed(s64 i, atomic64_t *v) \ arch_atomic64_##op##_return_relaxed(s64 i, atomic64_t *v) \
{ \ { \
s64 result; \ s64 result; \
unsigned long tmp; \ unsigned long tmp; \
@ -345,7 +345,7 @@ atomic64_##op##_return_relaxed(s64 i, atomic64_t *v) \
#define ATOMIC64_FETCH_OP(op, op1, op2) \ #define ATOMIC64_FETCH_OP(op, op1, op2) \
static inline s64 \ static inline s64 \
atomic64_fetch_##op##_relaxed(s64 i, atomic64_t *v) \ arch_atomic64_fetch_##op##_relaxed(s64 i, atomic64_t *v) \
{ \ { \
s64 result, val; \ s64 result, val; \
unsigned long tmp; \ unsigned long tmp; \
@ -374,34 +374,34 @@ atomic64_fetch_##op##_relaxed(s64 i, atomic64_t *v) \
ATOMIC64_OPS(add, adds, adc) ATOMIC64_OPS(add, adds, adc)
ATOMIC64_OPS(sub, subs, sbc) ATOMIC64_OPS(sub, subs, sbc)
#define atomic64_add_return_relaxed atomic64_add_return_relaxed #define arch_atomic64_add_return_relaxed arch_atomic64_add_return_relaxed
#define atomic64_sub_return_relaxed atomic64_sub_return_relaxed #define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return_relaxed
#define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed #define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add_relaxed
#define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed #define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub_relaxed
#undef ATOMIC64_OPS #undef ATOMIC64_OPS
#define ATOMIC64_OPS(op, op1, op2) \ #define ATOMIC64_OPS(op, op1, op2) \
ATOMIC64_OP(op, op1, op2) \ ATOMIC64_OP(op, op1, op2) \
ATOMIC64_FETCH_OP(op, op1, op2) ATOMIC64_FETCH_OP(op, op1, op2)
#define atomic64_andnot atomic64_andnot #define arch_atomic64_andnot arch_atomic64_andnot
ATOMIC64_OPS(and, and, and) ATOMIC64_OPS(and, and, and)
ATOMIC64_OPS(andnot, bic, bic) ATOMIC64_OPS(andnot, bic, bic)
ATOMIC64_OPS(or, orr, orr) ATOMIC64_OPS(or, orr, orr)
ATOMIC64_OPS(xor, eor, eor) ATOMIC64_OPS(xor, eor, eor)
#define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed #define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and_relaxed
#define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot_relaxed #define arch_atomic64_fetch_andnot_relaxed arch_atomic64_fetch_andnot_relaxed
#define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed #define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or_relaxed
#define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed #define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor_relaxed
#undef ATOMIC64_OPS #undef ATOMIC64_OPS
#undef ATOMIC64_FETCH_OP #undef ATOMIC64_FETCH_OP
#undef ATOMIC64_OP_RETURN #undef ATOMIC64_OP_RETURN
#undef ATOMIC64_OP #undef ATOMIC64_OP
static inline s64 atomic64_cmpxchg_relaxed(atomic64_t *ptr, s64 old, s64 new) static inline s64 arch_atomic64_cmpxchg_relaxed(atomic64_t *ptr, s64 old, s64 new)
{ {
s64 oldval; s64 oldval;
unsigned long res; unsigned long res;
@ -422,9 +422,9 @@ static inline s64 atomic64_cmpxchg_relaxed(atomic64_t *ptr, s64 old, s64 new)
return oldval; return oldval;
} }
#define atomic64_cmpxchg_relaxed atomic64_cmpxchg_relaxed #define arch_atomic64_cmpxchg_relaxed arch_atomic64_cmpxchg_relaxed
static inline s64 atomic64_xchg_relaxed(atomic64_t *ptr, s64 new) static inline s64 arch_atomic64_xchg_relaxed(atomic64_t *ptr, s64 new)
{ {
s64 result; s64 result;
unsigned long tmp; unsigned long tmp;
@ -442,9 +442,9 @@ static inline s64 atomic64_xchg_relaxed(atomic64_t *ptr, s64 new)
return result; return result;
} }
#define atomic64_xchg_relaxed atomic64_xchg_relaxed #define arch_atomic64_xchg_relaxed arch_atomic64_xchg_relaxed
static inline s64 atomic64_dec_if_positive(atomic64_t *v) static inline s64 arch_atomic64_dec_if_positive(atomic64_t *v)
{ {
s64 result; s64 result;
unsigned long tmp; unsigned long tmp;
@ -470,9 +470,9 @@ static inline s64 atomic64_dec_if_positive(atomic64_t *v)
return result; return result;
} }
#define atomic64_dec_if_positive atomic64_dec_if_positive #define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive
static inline s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u) static inline s64 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
{ {
s64 oldval, newval; s64 oldval, newval;
unsigned long tmp; unsigned long tmp;
@ -500,7 +500,7 @@ static inline s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
return oldval; return oldval;
} }
#define atomic64_fetch_add_unless atomic64_fetch_add_unless #define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless
#endif /* !CONFIG_GENERIC_ATOMIC64 */ #endif /* !CONFIG_GENERIC_ATOMIC64 */
#endif #endif

View File

@ -114,7 +114,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
return ret; return ret;
} }
#define xchg_relaxed(ptr, x) ({ \ #define arch_xchg_relaxed(ptr, x) ({ \
(__typeof__(*(ptr)))__xchg((unsigned long)(x), (ptr), \ (__typeof__(*(ptr)))__xchg((unsigned long)(x), (ptr), \
sizeof(*(ptr))); \ sizeof(*(ptr))); \
}) })
@ -128,20 +128,20 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
#error "SMP is not supported on this platform" #error "SMP is not supported on this platform"
#endif #endif
#define xchg xchg_relaxed #define arch_xchg arch_xchg_relaxed
/* /*
* cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
* them available. * them available.
*/ */
#define cmpxchg_local(ptr, o, n) ({ \ #define arch_cmpxchg_local(ptr, o, n) ({ \
(__typeof(*ptr))__cmpxchg_local_generic((ptr), \ (__typeof(*ptr))__generic_cmpxchg_local((ptr), \
(unsigned long)(o), \ (unsigned long)(o), \
(unsigned long)(n), \ (unsigned long)(n), \
sizeof(*(ptr))); \ sizeof(*(ptr))); \
}) })
#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) #define arch_cmpxchg64_local(ptr, o, n) __generic_cmpxchg64_local((ptr), (o), (n))
#include <asm-generic/cmpxchg.h> #include <asm-generic/cmpxchg.h>
@ -207,7 +207,7 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
return oldval; return oldval;
} }
#define cmpxchg_relaxed(ptr,o,n) ({ \ #define arch_cmpxchg_relaxed(ptr,o,n) ({ \
(__typeof__(*(ptr)))__cmpxchg((ptr), \ (__typeof__(*(ptr)))__cmpxchg((ptr), \
(unsigned long)(o), \ (unsigned long)(o), \
(unsigned long)(n), \ (unsigned long)(n), \
@ -224,7 +224,7 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,
#ifdef CONFIG_CPU_V6 /* min ARCH == ARMv6 */ #ifdef CONFIG_CPU_V6 /* min ARCH == ARMv6 */
case 1: case 1:
case 2: case 2:
ret = __cmpxchg_local_generic(ptr, old, new, size); ret = __generic_cmpxchg_local(ptr, old, new, size);
break; break;
#endif #endif
default: default:
@ -234,7 +234,7 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,
return ret; return ret;
} }
#define cmpxchg_local(ptr, o, n) ({ \ #define arch_cmpxchg_local(ptr, o, n) ({ \
(__typeof(*ptr))__cmpxchg_local((ptr), \ (__typeof(*ptr))__cmpxchg_local((ptr), \
(unsigned long)(o), \ (unsigned long)(o), \
(unsigned long)(n), \ (unsigned long)(n), \
@ -266,13 +266,13 @@ static inline unsigned long long __cmpxchg64(unsigned long long *ptr,
return oldval; return oldval;
} }
#define cmpxchg64_relaxed(ptr, o, n) ({ \ #define arch_cmpxchg64_relaxed(ptr, o, n) ({ \
(__typeof__(*(ptr)))__cmpxchg64((ptr), \ (__typeof__(*(ptr)))__cmpxchg64((ptr), \
(unsigned long long)(o), \ (unsigned long long)(o), \
(unsigned long long)(n)); \ (unsigned long long)(n)); \
}) })
#define cmpxchg64_local(ptr, o, n) cmpxchg64_relaxed((ptr), (o), (n)) #define arch_cmpxchg64_local(ptr, o, n) arch_cmpxchg64_relaxed((ptr), (o), (n))
#endif /* __LINUX_ARM_ARCH__ >= 6 */ #endif /* __LINUX_ARM_ARCH__ >= 6 */

View File

@ -21,7 +21,7 @@
#define sync_test_and_clear_bit(nr, p) _test_and_clear_bit(nr, p) #define sync_test_and_clear_bit(nr, p) _test_and_clear_bit(nr, p)
#define sync_test_and_change_bit(nr, p) _test_and_change_bit(nr, p) #define sync_test_and_change_bit(nr, p) _test_and_change_bit(nr, p)
#define sync_test_bit(nr, addr) test_bit(nr, addr) #define sync_test_bit(nr, addr) test_bit(nr, addr)
#define sync_cmpxchg cmpxchg #define arch_sync_cmpxchg arch_cmpxchg
#endif #endif

View File

@ -223,6 +223,4 @@ static __always_inline long arch_atomic64_dec_if_positive(atomic64_t *v)
#define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive #define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive
#define ARCH_ATOMIC
#endif /* __ASM_ATOMIC_H */ #endif /* __ASM_ATOMIC_H */

View File

@ -31,7 +31,7 @@ extern void __bad_xchg(void);
__ret; \ __ret; \
}) })
#define xchg_relaxed(ptr, x) \ #define arch_xchg_relaxed(ptr, x) \
(__xchg_relaxed((x), (ptr), sizeof(*(ptr)))) (__xchg_relaxed((x), (ptr), sizeof(*(ptr))))
#define __cmpxchg_relaxed(ptr, old, new, size) \ #define __cmpxchg_relaxed(ptr, old, new, size) \
@ -61,14 +61,14 @@ extern void __bad_xchg(void);
__ret; \ __ret; \
}) })
#define cmpxchg_relaxed(ptr, o, n) \ #define arch_cmpxchg_relaxed(ptr, o, n) \
(__cmpxchg_relaxed((ptr), (o), (n), sizeof(*(ptr)))) (__cmpxchg_relaxed((ptr), (o), (n), sizeof(*(ptr))))
#define cmpxchg(ptr, o, n) \ #define arch_cmpxchg(ptr, o, n) \
({ \ ({ \
__typeof__(*(ptr)) __ret; \ __typeof__(*(ptr)) __ret; \
__smp_release_fence(); \ __smp_release_fence(); \
__ret = cmpxchg_relaxed(ptr, o, n); \ __ret = arch_cmpxchg_relaxed(ptr, o, n); \
__smp_acquire_fence(); \ __smp_acquire_fence(); \
__ret; \ __ret; \
}) })

View File

@ -1,5 +1,6 @@
# SPDX-License-Identifier: GPL-2.0 # SPDX-License-Identifier: GPL-2.0
generic-y += asm-offsets.h generic-y += asm-offsets.h
generic-y += cmpxchg.h
generic-y += extable.h generic-y += extable.h
generic-y += kvm_para.h generic-y += kvm_para.h
generic-y += mcs_spinlock.h generic-y += mcs_spinlock.h

View File

@ -1,97 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ARCH_H8300_ATOMIC__
#define __ARCH_H8300_ATOMIC__
#include <linux/compiler.h>
#include <linux/types.h>
#include <asm/cmpxchg.h>
#include <asm/irqflags.h>
/*
* Atomic operations that C can't guarantee us. Useful for
* resource counting etc..
*/
#define atomic_read(v) READ_ONCE((v)->counter)
#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
#define ATOMIC_OP_RETURN(op, c_op) \
static inline int atomic_##op##_return(int i, atomic_t *v) \
{ \
h8300flags flags; \
int ret; \
\
flags = arch_local_irq_save(); \
ret = v->counter c_op i; \
arch_local_irq_restore(flags); \
return ret; \
}
#define ATOMIC_FETCH_OP(op, c_op) \
static inline int atomic_fetch_##op(int i, atomic_t *v) \
{ \
h8300flags flags; \
int ret; \
\
flags = arch_local_irq_save(); \
ret = v->counter; \
v->counter c_op i; \
arch_local_irq_restore(flags); \
return ret; \
}
#define ATOMIC_OP(op, c_op) \
static inline void atomic_##op(int i, atomic_t *v) \
{ \
h8300flags flags; \
\
flags = arch_local_irq_save(); \
v->counter c_op i; \
arch_local_irq_restore(flags); \
}
ATOMIC_OP_RETURN(add, +=)
ATOMIC_OP_RETURN(sub, -=)
#define ATOMIC_OPS(op, c_op) \
ATOMIC_OP(op, c_op) \
ATOMIC_FETCH_OP(op, c_op)
ATOMIC_OPS(and, &=)
ATOMIC_OPS(or, |=)
ATOMIC_OPS(xor, ^=)
ATOMIC_OPS(add, +=)
ATOMIC_OPS(sub, -=)
#undef ATOMIC_OPS
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
{
int ret;
h8300flags flags;
flags = arch_local_irq_save();
ret = v->counter;
if (likely(ret == old))
v->counter = new;
arch_local_irq_restore(flags);
return ret;
}
static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
{
int ret;
h8300flags flags;
flags = arch_local_irq_save();
ret = v->counter;
if (ret != u)
v->counter += a;
arch_local_irq_restore(flags);
return ret;
}
#define atomic_fetch_add_unless atomic_fetch_add_unless
#endif /* __ARCH_H8300_ATOMIC __ */

View File

@ -1,66 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ARCH_H8300_CMPXCHG__
#define __ARCH_H8300_CMPXCHG__
#include <linux/irqflags.h>
#define xchg(ptr, x) \
((__typeof__(*(ptr)))__xchg((unsigned long)(x), (ptr), \
sizeof(*(ptr))))
struct __xchg_dummy { unsigned long a[100]; };
#define __xg(x) ((volatile struct __xchg_dummy *)(x))
static inline unsigned long __xchg(unsigned long x,
volatile void *ptr, int size)
{
unsigned long tmp, flags;
local_irq_save(flags);
switch (size) {
case 1:
__asm__ __volatile__
("mov.b %2,%0\n\t"
"mov.b %1,%2"
: "=&r" (tmp) : "r" (x), "m" (*__xg(ptr)));
break;
case 2:
__asm__ __volatile__
("mov.w %2,%0\n\t"
"mov.w %1,%2"
: "=&r" (tmp) : "r" (x), "m" (*__xg(ptr)));
break;
case 4:
__asm__ __volatile__
("mov.l %2,%0\n\t"
"mov.l %1,%2"
: "=&r" (tmp) : "r" (x), "m" (*__xg(ptr)));
break;
default:
tmp = 0;
}
local_irq_restore(flags);
return tmp;
}
#include <asm-generic/cmpxchg-local.h>
/*
* cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
* them available.
*/
#define cmpxchg_local(ptr, o, n) \
((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), \
(unsigned long)(o), \
(unsigned long)(n), \
sizeof(*(ptr))))
#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
#ifndef CONFIG_SMP
#include <asm-generic/cmpxchg.h>
#endif
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
#endif /* __ARCH_H8300_CMPXCHG__ */

View File

@ -14,7 +14,7 @@
/* Normal writes in our arch don't clear lock reservations */ /* Normal writes in our arch don't clear lock reservations */
static inline void atomic_set(atomic_t *v, int new) static inline void arch_atomic_set(atomic_t *v, int new)
{ {
asm volatile( asm volatile(
"1: r6 = memw_locked(%0);\n" "1: r6 = memw_locked(%0);\n"
@ -26,26 +26,26 @@ static inline void atomic_set(atomic_t *v, int new)
); );
} }
#define atomic_set_release(v, i) atomic_set((v), (i)) #define arch_atomic_set_release(v, i) arch_atomic_set((v), (i))
/** /**
* atomic_read - reads a word, atomically * arch_atomic_read - reads a word, atomically
* @v: pointer to atomic value * @v: pointer to atomic value
* *
* Assumes all word reads on our architecture are atomic. * Assumes all word reads on our architecture are atomic.
*/ */
#define atomic_read(v) READ_ONCE((v)->counter) #define arch_atomic_read(v) READ_ONCE((v)->counter)
/** /**
* atomic_xchg - atomic * arch_atomic_xchg - atomic
* @v: pointer to memory to change * @v: pointer to memory to change
* @new: new value (technically passed in a register -- see xchg) * @new: new value (technically passed in a register -- see xchg)
*/ */
#define atomic_xchg(v, new) (xchg(&((v)->counter), (new))) #define arch_atomic_xchg(v, new) (arch_xchg(&((v)->counter), (new)))
/** /**
* atomic_cmpxchg - atomic compare-and-exchange values * arch_atomic_cmpxchg - atomic compare-and-exchange values
* @v: pointer to value to change * @v: pointer to value to change
* @old: desired old value to match * @old: desired old value to match
* @new: new value to put in * @new: new value to put in
@ -61,7 +61,7 @@ static inline void atomic_set(atomic_t *v, int new)
* *
* "old" is "expected" old val, __oldval is actual old value * "old" is "expected" old val, __oldval is actual old value
*/ */
static inline int atomic_cmpxchg(atomic_t *v, int old, int new) static inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new)
{ {
int __oldval; int __oldval;
@ -81,7 +81,7 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
} }
#define ATOMIC_OP(op) \ #define ATOMIC_OP(op) \
static inline void atomic_##op(int i, atomic_t *v) \ static inline void arch_atomic_##op(int i, atomic_t *v) \
{ \ { \
int output; \ int output; \
\ \
@ -97,7 +97,7 @@ static inline void atomic_##op(int i, atomic_t *v) \
} \ } \
#define ATOMIC_OP_RETURN(op) \ #define ATOMIC_OP_RETURN(op) \
static inline int atomic_##op##_return(int i, atomic_t *v) \ static inline int arch_atomic_##op##_return(int i, atomic_t *v) \
{ \ { \
int output; \ int output; \
\ \
@ -114,7 +114,7 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
} }
#define ATOMIC_FETCH_OP(op) \ #define ATOMIC_FETCH_OP(op) \
static inline int atomic_fetch_##op(int i, atomic_t *v) \ static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \
{ \ { \
int output, val; \ int output, val; \
\ \
@ -148,7 +148,7 @@ ATOMIC_OPS(xor)
#undef ATOMIC_OP #undef ATOMIC_OP
/** /**
* atomic_fetch_add_unless - add unless the number is a given value * arch_atomic_fetch_add_unless - add unless the number is a given value
* @v: pointer to value * @v: pointer to value
* @a: amount to add * @a: amount to add
* @u: unless value is equal to u * @u: unless value is equal to u
@ -157,7 +157,7 @@ ATOMIC_OPS(xor)
* *
*/ */
static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u) static inline int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
{ {
int __oldval; int __oldval;
register int tmp; register int tmp;
@ -180,6 +180,6 @@ static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
); );
return __oldval; return __oldval;
} }
#define atomic_fetch_add_unless atomic_fetch_add_unless #define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
#endif #endif

View File

@ -42,7 +42,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr,
* Atomically swap the contents of a register with memory. Should be atomic * Atomically swap the contents of a register with memory. Should be atomic
* between multiple CPU's and within interrupts on the same CPU. * between multiple CPU's and within interrupts on the same CPU.
*/ */
#define xchg(ptr, v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v), (ptr), \ #define arch_xchg(ptr, v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v), (ptr), \
sizeof(*(ptr)))) sizeof(*(ptr))))
/* /*
@ -51,7 +51,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr,
* variable casting. * variable casting.
*/ */
#define cmpxchg(ptr, old, new) \ #define arch_cmpxchg(ptr, old, new) \
({ \ ({ \
__typeof__(ptr) __ptr = (ptr); \ __typeof__(ptr) __ptr = (ptr); \
__typeof__(*(ptr)) __old = (old); \ __typeof__(*(ptr)) __old = (old); \

View File

@ -21,11 +21,11 @@
#define ATOMIC64_INIT(i) { (i) } #define ATOMIC64_INIT(i) { (i) }
#define atomic_read(v) READ_ONCE((v)->counter) #define arch_atomic_read(v) READ_ONCE((v)->counter)
#define atomic64_read(v) READ_ONCE((v)->counter) #define arch_atomic64_read(v) READ_ONCE((v)->counter)
#define atomic_set(v,i) WRITE_ONCE(((v)->counter), (i)) #define arch_atomic_set(v,i) WRITE_ONCE(((v)->counter), (i))
#define atomic64_set(v,i) WRITE_ONCE(((v)->counter), (i)) #define arch_atomic64_set(v,i) WRITE_ONCE(((v)->counter), (i))
#define ATOMIC_OP(op, c_op) \ #define ATOMIC_OP(op, c_op) \
static __inline__ int \ static __inline__ int \
@ -36,7 +36,7 @@ ia64_atomic_##op (int i, atomic_t *v) \
\ \
do { \ do { \
CMPXCHG_BUGCHECK(v); \ CMPXCHG_BUGCHECK(v); \
old = atomic_read(v); \ old = arch_atomic_read(v); \
new = old c_op i; \ new = old c_op i; \
} while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old); \ } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old); \
return new; \ return new; \
@ -51,7 +51,7 @@ ia64_atomic_fetch_##op (int i, atomic_t *v) \
\ \
do { \ do { \
CMPXCHG_BUGCHECK(v); \ CMPXCHG_BUGCHECK(v); \
old = atomic_read(v); \ old = arch_atomic_read(v); \
new = old c_op i; \ new = old c_op i; \
} while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old); \ } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic_t)) != old); \
return old; \ return old; \
@ -74,7 +74,7 @@ ATOMIC_OPS(sub, -)
#define __ia64_atomic_const(i) 0 #define __ia64_atomic_const(i) 0
#endif #endif
#define atomic_add_return(i,v) \ #define arch_atomic_add_return(i,v) \
({ \ ({ \
int __ia64_aar_i = (i); \ int __ia64_aar_i = (i); \
__ia64_atomic_const(i) \ __ia64_atomic_const(i) \
@ -82,7 +82,7 @@ ATOMIC_OPS(sub, -)
: ia64_atomic_add(__ia64_aar_i, v); \ : ia64_atomic_add(__ia64_aar_i, v); \
}) })
#define atomic_sub_return(i,v) \ #define arch_atomic_sub_return(i,v) \
({ \ ({ \
int __ia64_asr_i = (i); \ int __ia64_asr_i = (i); \
__ia64_atomic_const(i) \ __ia64_atomic_const(i) \
@ -90,7 +90,7 @@ ATOMIC_OPS(sub, -)
: ia64_atomic_sub(__ia64_asr_i, v); \ : ia64_atomic_sub(__ia64_asr_i, v); \
}) })
#define atomic_fetch_add(i,v) \ #define arch_atomic_fetch_add(i,v) \
({ \ ({ \
int __ia64_aar_i = (i); \ int __ia64_aar_i = (i); \
__ia64_atomic_const(i) \ __ia64_atomic_const(i) \
@ -98,7 +98,7 @@ ATOMIC_OPS(sub, -)
: ia64_atomic_fetch_add(__ia64_aar_i, v); \ : ia64_atomic_fetch_add(__ia64_aar_i, v); \
}) })
#define atomic_fetch_sub(i,v) \ #define arch_atomic_fetch_sub(i,v) \
({ \ ({ \
int __ia64_asr_i = (i); \ int __ia64_asr_i = (i); \
__ia64_atomic_const(i) \ __ia64_atomic_const(i) \
@ -110,13 +110,13 @@ ATOMIC_FETCH_OP(and, &)
ATOMIC_FETCH_OP(or, |) ATOMIC_FETCH_OP(or, |)
ATOMIC_FETCH_OP(xor, ^) ATOMIC_FETCH_OP(xor, ^)
#define atomic_and(i,v) (void)ia64_atomic_fetch_and(i,v) #define arch_atomic_and(i,v) (void)ia64_atomic_fetch_and(i,v)
#define atomic_or(i,v) (void)ia64_atomic_fetch_or(i,v) #define arch_atomic_or(i,v) (void)ia64_atomic_fetch_or(i,v)
#define atomic_xor(i,v) (void)ia64_atomic_fetch_xor(i,v) #define arch_atomic_xor(i,v) (void)ia64_atomic_fetch_xor(i,v)
#define atomic_fetch_and(i,v) ia64_atomic_fetch_and(i,v) #define arch_atomic_fetch_and(i,v) ia64_atomic_fetch_and(i,v)
#define atomic_fetch_or(i,v) ia64_atomic_fetch_or(i,v) #define arch_atomic_fetch_or(i,v) ia64_atomic_fetch_or(i,v)
#define atomic_fetch_xor(i,v) ia64_atomic_fetch_xor(i,v) #define arch_atomic_fetch_xor(i,v) ia64_atomic_fetch_xor(i,v)
#undef ATOMIC_OPS #undef ATOMIC_OPS
#undef ATOMIC_FETCH_OP #undef ATOMIC_FETCH_OP
@ -131,7 +131,7 @@ ia64_atomic64_##op (s64 i, atomic64_t *v) \
\ \
do { \ do { \
CMPXCHG_BUGCHECK(v); \ CMPXCHG_BUGCHECK(v); \
old = atomic64_read(v); \ old = arch_atomic64_read(v); \
new = old c_op i; \ new = old c_op i; \
} while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old); \ } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old); \
return new; \ return new; \
@ -146,7 +146,7 @@ ia64_atomic64_fetch_##op (s64 i, atomic64_t *v) \
\ \
do { \ do { \
CMPXCHG_BUGCHECK(v); \ CMPXCHG_BUGCHECK(v); \
old = atomic64_read(v); \ old = arch_atomic64_read(v); \
new = old c_op i; \ new = old c_op i; \
} while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old); \ } while (ia64_cmpxchg(acq, v, old, new, sizeof(atomic64_t)) != old); \
return old; \ return old; \
@ -159,7 +159,7 @@ ia64_atomic64_fetch_##op (s64 i, atomic64_t *v) \
ATOMIC64_OPS(add, +) ATOMIC64_OPS(add, +)
ATOMIC64_OPS(sub, -) ATOMIC64_OPS(sub, -)
#define atomic64_add_return(i,v) \ #define arch_atomic64_add_return(i,v) \
({ \ ({ \
s64 __ia64_aar_i = (i); \ s64 __ia64_aar_i = (i); \
__ia64_atomic_const(i) \ __ia64_atomic_const(i) \
@ -167,7 +167,7 @@ ATOMIC64_OPS(sub, -)
: ia64_atomic64_add(__ia64_aar_i, v); \ : ia64_atomic64_add(__ia64_aar_i, v); \
}) })
#define atomic64_sub_return(i,v) \ #define arch_atomic64_sub_return(i,v) \
({ \ ({ \
s64 __ia64_asr_i = (i); \ s64 __ia64_asr_i = (i); \
__ia64_atomic_const(i) \ __ia64_atomic_const(i) \
@ -175,7 +175,7 @@ ATOMIC64_OPS(sub, -)
: ia64_atomic64_sub(__ia64_asr_i, v); \ : ia64_atomic64_sub(__ia64_asr_i, v); \
}) })
#define atomic64_fetch_add(i,v) \ #define arch_atomic64_fetch_add(i,v) \
({ \ ({ \
s64 __ia64_aar_i = (i); \ s64 __ia64_aar_i = (i); \
__ia64_atomic_const(i) \ __ia64_atomic_const(i) \
@ -183,7 +183,7 @@ ATOMIC64_OPS(sub, -)
: ia64_atomic64_fetch_add(__ia64_aar_i, v); \ : ia64_atomic64_fetch_add(__ia64_aar_i, v); \
}) })
#define atomic64_fetch_sub(i,v) \ #define arch_atomic64_fetch_sub(i,v) \
({ \ ({ \
s64 __ia64_asr_i = (i); \ s64 __ia64_asr_i = (i); \
__ia64_atomic_const(i) \ __ia64_atomic_const(i) \
@ -195,29 +195,29 @@ ATOMIC64_FETCH_OP(and, &)
ATOMIC64_FETCH_OP(or, |) ATOMIC64_FETCH_OP(or, |)
ATOMIC64_FETCH_OP(xor, ^) ATOMIC64_FETCH_OP(xor, ^)
#define atomic64_and(i,v) (void)ia64_atomic64_fetch_and(i,v) #define arch_atomic64_and(i,v) (void)ia64_atomic64_fetch_and(i,v)
#define atomic64_or(i,v) (void)ia64_atomic64_fetch_or(i,v) #define arch_atomic64_or(i,v) (void)ia64_atomic64_fetch_or(i,v)
#define atomic64_xor(i,v) (void)ia64_atomic64_fetch_xor(i,v) #define arch_atomic64_xor(i,v) (void)ia64_atomic64_fetch_xor(i,v)
#define atomic64_fetch_and(i,v) ia64_atomic64_fetch_and(i,v) #define arch_atomic64_fetch_and(i,v) ia64_atomic64_fetch_and(i,v)
#define atomic64_fetch_or(i,v) ia64_atomic64_fetch_or(i,v) #define arch_atomic64_fetch_or(i,v) ia64_atomic64_fetch_or(i,v)
#define atomic64_fetch_xor(i,v) ia64_atomic64_fetch_xor(i,v) #define arch_atomic64_fetch_xor(i,v) ia64_atomic64_fetch_xor(i,v)
#undef ATOMIC64_OPS #undef ATOMIC64_OPS
#undef ATOMIC64_FETCH_OP #undef ATOMIC64_FETCH_OP
#undef ATOMIC64_OP #undef ATOMIC64_OP
#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new)) #define arch_atomic_cmpxchg(v, old, new) (arch_cmpxchg(&((v)->counter), old, new))
#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) #define arch_atomic_xchg(v, new) (arch_xchg(&((v)->counter), new))
#define atomic64_cmpxchg(v, old, new) \ #define arch_atomic64_cmpxchg(v, old, new) \
(cmpxchg(&((v)->counter), old, new)) (arch_cmpxchg(&((v)->counter), old, new))
#define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) #define arch_atomic64_xchg(v, new) (arch_xchg(&((v)->counter), new))
#define atomic_add(i,v) (void)atomic_add_return((i), (v)) #define arch_atomic_add(i,v) (void)arch_atomic_add_return((i), (v))
#define atomic_sub(i,v) (void)atomic_sub_return((i), (v)) #define arch_atomic_sub(i,v) (void)arch_atomic_sub_return((i), (v))
#define atomic64_add(i,v) (void)atomic64_add_return((i), (v)) #define arch_atomic64_add(i,v) (void)arch_atomic64_add_return((i), (v))
#define atomic64_sub(i,v) (void)atomic64_sub_return((i), (v)) #define arch_atomic64_sub(i,v) (void)arch_atomic64_sub_return((i), (v))
#endif /* _ASM_IA64_ATOMIC_H */ #endif /* _ASM_IA64_ATOMIC_H */

View File

@ -0,0 +1,16 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_IA64_CMPXCHG_H
#define _ASM_IA64_CMPXCHG_H
#include <uapi/asm/cmpxchg.h>
#define arch_xchg(ptr, x) \
({(__typeof__(*(ptr))) __xchg((unsigned long) (x), (ptr), sizeof(*(ptr)));})
#define arch_cmpxchg(ptr, o, n) cmpxchg_acq((ptr), (o), (n))
#define arch_cmpxchg64(ptr, o, n) cmpxchg_acq((ptr), (o), (n))
#define arch_cmpxchg_local arch_cmpxchg
#define arch_cmpxchg64_local arch_cmpxchg64
#endif /* _ASM_IA64_CMPXCHG_H */

View File

@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#ifndef _ASM_IA64_CMPXCHG_H #ifndef _UAPI_ASM_IA64_CMPXCHG_H
#define _ASM_IA64_CMPXCHG_H #define _UAPI_ASM_IA64_CMPXCHG_H
/* /*
* Compare/Exchange, forked from asm/intrinsics.h * Compare/Exchange, forked from asm/intrinsics.h
@ -53,8 +53,10 @@ extern void ia64_xchg_called_with_bad_pointer(void);
__xchg_result; \ __xchg_result; \
}) })
#ifndef __KERNEL__
#define xchg(ptr, x) \ #define xchg(ptr, x) \
({(__typeof__(*(ptr))) __xchg((unsigned long) (x), (ptr), sizeof(*(ptr)));}) ({(__typeof__(*(ptr))) __xchg((unsigned long) (x), (ptr), sizeof(*(ptr)));})
#endif
/* /*
* Atomic compare and exchange. Compare OLD with MEM, if identical, * Atomic compare and exchange. Compare OLD with MEM, if identical,
@ -126,12 +128,14 @@ extern long ia64_cmpxchg_called_with_bad_pointer(void);
* we had to back-pedal and keep the "legacy" behavior of a full fence :-( * we had to back-pedal and keep the "legacy" behavior of a full fence :-(
*/ */
#ifndef __KERNEL__
/* for compatibility with other platforms: */ /* for compatibility with other platforms: */
#define cmpxchg(ptr, o, n) cmpxchg_acq((ptr), (o), (n)) #define cmpxchg(ptr, o, n) cmpxchg_acq((ptr), (o), (n))
#define cmpxchg64(ptr, o, n) cmpxchg_acq((ptr), (o), (n)) #define cmpxchg64(ptr, o, n) cmpxchg_acq((ptr), (o), (n))
#define cmpxchg_local cmpxchg #define cmpxchg_local cmpxchg
#define cmpxchg64_local cmpxchg64 #define cmpxchg64_local cmpxchg64
#endif
#ifdef CONFIG_IA64_DEBUG_CMPXCHG #ifdef CONFIG_IA64_DEBUG_CMPXCHG
# define CMPXCHG_BUGCHECK_DECL int _cmpxchg_bugcheck_count = 128; # define CMPXCHG_BUGCHECK_DECL int _cmpxchg_bugcheck_count = 128;
@ -152,4 +156,4 @@ do { \
#endif /* !__ASSEMBLY__ */ #endif /* !__ASSEMBLY__ */
#endif /* _ASM_IA64_CMPXCHG_H */ #endif /* _UAPI_ASM_IA64_CMPXCHG_H */

View File

@ -16,8 +16,8 @@
* We do not have SMP m68k systems, so we don't have to deal with that. * We do not have SMP m68k systems, so we don't have to deal with that.
*/ */
#define atomic_read(v) READ_ONCE((v)->counter) #define arch_atomic_read(v) READ_ONCE((v)->counter)
#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i)) #define arch_atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
/* /*
* The ColdFire parts cannot do some immediate to memory operations, * The ColdFire parts cannot do some immediate to memory operations,
@ -30,7 +30,7 @@
#endif #endif
#define ATOMIC_OP(op, c_op, asm_op) \ #define ATOMIC_OP(op, c_op, asm_op) \
static inline void atomic_##op(int i, atomic_t *v) \ static inline void arch_atomic_##op(int i, atomic_t *v) \
{ \ { \
__asm__ __volatile__(#asm_op "l %1,%0" : "+m" (*v) : ASM_DI (i));\ __asm__ __volatile__(#asm_op "l %1,%0" : "+m" (*v) : ASM_DI (i));\
} \ } \
@ -38,7 +38,7 @@ static inline void atomic_##op(int i, atomic_t *v) \
#ifdef CONFIG_RMW_INSNS #ifdef CONFIG_RMW_INSNS
#define ATOMIC_OP_RETURN(op, c_op, asm_op) \ #define ATOMIC_OP_RETURN(op, c_op, asm_op) \
static inline int atomic_##op##_return(int i, atomic_t *v) \ static inline int arch_atomic_##op##_return(int i, atomic_t *v) \
{ \ { \
int t, tmp; \ int t, tmp; \
\ \
@ -48,12 +48,12 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
" casl %2,%1,%0\n" \ " casl %2,%1,%0\n" \
" jne 1b" \ " jne 1b" \
: "+m" (*v), "=&d" (t), "=&d" (tmp) \ : "+m" (*v), "=&d" (t), "=&d" (tmp) \
: "g" (i), "2" (atomic_read(v))); \ : "g" (i), "2" (arch_atomic_read(v))); \
return t; \ return t; \
} }
#define ATOMIC_FETCH_OP(op, c_op, asm_op) \ #define ATOMIC_FETCH_OP(op, c_op, asm_op) \
static inline int atomic_fetch_##op(int i, atomic_t *v) \ static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \
{ \ { \
int t, tmp; \ int t, tmp; \
\ \
@ -63,14 +63,14 @@ static inline int atomic_fetch_##op(int i, atomic_t *v) \
" casl %2,%1,%0\n" \ " casl %2,%1,%0\n" \
" jne 1b" \ " jne 1b" \
: "+m" (*v), "=&d" (t), "=&d" (tmp) \ : "+m" (*v), "=&d" (t), "=&d" (tmp) \
: "g" (i), "2" (atomic_read(v))); \ : "g" (i), "2" (arch_atomic_read(v))); \
return tmp; \ return tmp; \
} }
#else #else
#define ATOMIC_OP_RETURN(op, c_op, asm_op) \ #define ATOMIC_OP_RETURN(op, c_op, asm_op) \
static inline int atomic_##op##_return(int i, atomic_t * v) \ static inline int arch_atomic_##op##_return(int i, atomic_t * v) \
{ \ { \
unsigned long flags; \ unsigned long flags; \
int t; \ int t; \
@ -83,7 +83,7 @@ static inline int atomic_##op##_return(int i, atomic_t * v) \
} }
#define ATOMIC_FETCH_OP(op, c_op, asm_op) \ #define ATOMIC_FETCH_OP(op, c_op, asm_op) \
static inline int atomic_fetch_##op(int i, atomic_t * v) \ static inline int arch_atomic_fetch_##op(int i, atomic_t * v) \
{ \ { \
unsigned long flags; \ unsigned long flags; \
int t; \ int t; \
@ -120,27 +120,27 @@ ATOMIC_OPS(xor, ^=, eor)
#undef ATOMIC_OP_RETURN #undef ATOMIC_OP_RETURN
#undef ATOMIC_OP #undef ATOMIC_OP
static inline void atomic_inc(atomic_t *v) static inline void arch_atomic_inc(atomic_t *v)
{ {
__asm__ __volatile__("addql #1,%0" : "+m" (*v)); __asm__ __volatile__("addql #1,%0" : "+m" (*v));
} }
#define atomic_inc atomic_inc #define arch_atomic_inc arch_atomic_inc
static inline void atomic_dec(atomic_t *v) static inline void arch_atomic_dec(atomic_t *v)
{ {
__asm__ __volatile__("subql #1,%0" : "+m" (*v)); __asm__ __volatile__("subql #1,%0" : "+m" (*v));
} }
#define atomic_dec atomic_dec #define arch_atomic_dec arch_atomic_dec
static inline int atomic_dec_and_test(atomic_t *v) static inline int arch_atomic_dec_and_test(atomic_t *v)
{ {
char c; char c;
__asm__ __volatile__("subql #1,%1; seq %0" : "=d" (c), "+m" (*v)); __asm__ __volatile__("subql #1,%1; seq %0" : "=d" (c), "+m" (*v));
return c != 0; return c != 0;
} }
#define atomic_dec_and_test atomic_dec_and_test #define arch_atomic_dec_and_test arch_atomic_dec_and_test
static inline int atomic_dec_and_test_lt(atomic_t *v) static inline int arch_atomic_dec_and_test_lt(atomic_t *v)
{ {
char c; char c;
__asm__ __volatile__( __asm__ __volatile__(
@ -150,49 +150,49 @@ static inline int atomic_dec_and_test_lt(atomic_t *v)
return c != 0; return c != 0;
} }
static inline int atomic_inc_and_test(atomic_t *v) static inline int arch_atomic_inc_and_test(atomic_t *v)
{ {
char c; char c;
__asm__ __volatile__("addql #1,%1; seq %0" : "=d" (c), "+m" (*v)); __asm__ __volatile__("addql #1,%1; seq %0" : "=d" (c), "+m" (*v));
return c != 0; return c != 0;
} }
#define atomic_inc_and_test atomic_inc_and_test #define arch_atomic_inc_and_test arch_atomic_inc_and_test
#ifdef CONFIG_RMW_INSNS #ifdef CONFIG_RMW_INSNS
#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n))) #define arch_atomic_cmpxchg(v, o, n) ((int)arch_cmpxchg(&((v)->counter), (o), (n)))
#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) #define arch_atomic_xchg(v, new) (arch_xchg(&((v)->counter), new))
#else /* !CONFIG_RMW_INSNS */ #else /* !CONFIG_RMW_INSNS */
static inline int atomic_cmpxchg(atomic_t *v, int old, int new) static inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new)
{ {
unsigned long flags; unsigned long flags;
int prev; int prev;
local_irq_save(flags); local_irq_save(flags);
prev = atomic_read(v); prev = arch_atomic_read(v);
if (prev == old) if (prev == old)
atomic_set(v, new); arch_atomic_set(v, new);
local_irq_restore(flags); local_irq_restore(flags);
return prev; return prev;
} }
static inline int atomic_xchg(atomic_t *v, int new) static inline int arch_atomic_xchg(atomic_t *v, int new)
{ {
unsigned long flags; unsigned long flags;
int prev; int prev;
local_irq_save(flags); local_irq_save(flags);
prev = atomic_read(v); prev = arch_atomic_read(v);
atomic_set(v, new); arch_atomic_set(v, new);
local_irq_restore(flags); local_irq_restore(flags);
return prev; return prev;
} }
#endif /* !CONFIG_RMW_INSNS */ #endif /* !CONFIG_RMW_INSNS */
static inline int atomic_sub_and_test(int i, atomic_t *v) static inline int arch_atomic_sub_and_test(int i, atomic_t *v)
{ {
char c; char c;
__asm__ __volatile__("subl %2,%1; seq %0" __asm__ __volatile__("subl %2,%1; seq %0"
@ -200,9 +200,9 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
: ASM_DI (i)); : ASM_DI (i));
return c != 0; return c != 0;
} }
#define atomic_sub_and_test atomic_sub_and_test #define arch_atomic_sub_and_test arch_atomic_sub_and_test
static inline int atomic_add_negative(int i, atomic_t *v) static inline int arch_atomic_add_negative(int i, atomic_t *v)
{ {
char c; char c;
__asm__ __volatile__("addl %2,%1; smi %0" __asm__ __volatile__("addl %2,%1; smi %0"
@ -210,6 +210,6 @@ static inline int atomic_add_negative(int i, atomic_t *v)
: ASM_DI (i)); : ASM_DI (i));
return c != 0; return c != 0;
} }
#define atomic_add_negative atomic_add_negative #define arch_atomic_add_negative arch_atomic_add_negative
#endif /* __ARCH_M68K_ATOMIC __ */ #endif /* __ARCH_M68K_ATOMIC __ */

View File

@ -76,11 +76,11 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz
} }
#endif #endif
#define xchg(ptr,x) ({(__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)));}) #define arch_xchg(ptr,x) ({(__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)));})
#include <asm-generic/cmpxchg-local.h> #include <asm-generic/cmpxchg-local.h>
#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) #define arch_cmpxchg64_local(ptr, o, n) __generic_cmpxchg64_local((ptr), (o), (n))
extern unsigned long __invalid_cmpxchg_size(volatile void *, extern unsigned long __invalid_cmpxchg_size(volatile void *,
unsigned long, unsigned long, int); unsigned long, unsigned long, int);
@ -118,14 +118,14 @@ static inline unsigned long __cmpxchg(volatile void *p, unsigned long old,
return old; return old;
} }
#define cmpxchg(ptr, o, n) \ #define arch_cmpxchg(ptr, o, n) \
({(__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \ ({(__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \
(unsigned long)(n), sizeof(*(ptr)));}) (unsigned long)(n), sizeof(*(ptr)));})
#define cmpxchg_local(ptr, o, n) \ #define arch_cmpxchg_local(ptr, o, n) \
({(__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \ ({(__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \
(unsigned long)(n), sizeof(*(ptr)));}) (unsigned long)(n), sizeof(*(ptr)));})
#define cmpxchg64(ptr, o, n) cmpxchg64_local((ptr), (o), (n)) #define arch_cmpxchg64(ptr, o, n) arch_cmpxchg64_local((ptr), (o), (n))
#else #else

View File

@ -31,7 +31,7 @@ static inline void get_mmu_context(struct mm_struct *mm)
if (mm->context != NO_CONTEXT) if (mm->context != NO_CONTEXT)
return; return;
while (atomic_dec_and_test_lt(&nr_free_contexts)) { while (arch_atomic_dec_and_test_lt(&nr_free_contexts)) {
atomic_inc(&nr_free_contexts); atomic_inc(&nr_free_contexts);
steal_context(); steal_context();
} }

View File

@ -1,5 +1,6 @@
# SPDX-License-Identifier: GPL-2.0 # SPDX-License-Identifier: GPL-2.0
generated-y += syscall_table.h generated-y += syscall_table.h
generic-y += cmpxchg.h
generic-y += extable.h generic-y += extable.h
generic-y += kvm_para.h generic-y += kvm_para.h
generic-y += mcs_spinlock.h generic-y += mcs_spinlock.h

View File

@ -1,28 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_MICROBLAZE_ATOMIC_H
#define _ASM_MICROBLAZE_ATOMIC_H
#include <asm/cmpxchg.h>
#include <asm-generic/atomic.h>
#include <asm-generic/atomic64.h>
/*
* Atomically test *v and decrement if it is greater than 0.
* The function returns the old value of *v minus 1.
*/
static inline int atomic_dec_if_positive(atomic_t *v)
{
unsigned long flags;
int res;
local_irq_save(flags);
res = v->counter - 1;
if (res >= 0)
v->counter = res;
local_irq_restore(flags);
return res;
}
#define atomic_dec_if_positive atomic_dec_if_positive
#endif /* _ASM_MICROBLAZE_ATOMIC_H */

View File

@ -1,9 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_MICROBLAZE_CMPXCHG_H
#define _ASM_MICROBLAZE_CMPXCHG_H
#ifndef CONFIG_SMP
# include <asm-generic/cmpxchg.h>
#endif
#endif /* _ASM_MICROBLAZE_CMPXCHG_H */

View File

@ -25,24 +25,25 @@
#include <asm/war.h> #include <asm/war.h>
#define ATOMIC_OPS(pfx, type) \ #define ATOMIC_OPS(pfx, type) \
static __always_inline type pfx##_read(const pfx##_t *v) \ static __always_inline type arch_##pfx##_read(const pfx##_t *v) \
{ \ { \
return READ_ONCE(v->counter); \ return READ_ONCE(v->counter); \
} \ } \
\ \
static __always_inline void pfx##_set(pfx##_t *v, type i) \ static __always_inline void arch_##pfx##_set(pfx##_t *v, type i) \
{ \ { \
WRITE_ONCE(v->counter, i); \ WRITE_ONCE(v->counter, i); \
} \ } \
\ \
static __always_inline type pfx##_cmpxchg(pfx##_t *v, type o, type n) \ static __always_inline type \
arch_##pfx##_cmpxchg(pfx##_t *v, type o, type n) \
{ \ { \
return cmpxchg(&v->counter, o, n); \ return arch_cmpxchg(&v->counter, o, n); \
} \ } \
\ \
static __always_inline type pfx##_xchg(pfx##_t *v, type n) \ static __always_inline type arch_##pfx##_xchg(pfx##_t *v, type n) \
{ \ { \
return xchg(&v->counter, n); \ return arch_xchg(&v->counter, n); \
} }
ATOMIC_OPS(atomic, int) ATOMIC_OPS(atomic, int)
@ -53,7 +54,7 @@ ATOMIC_OPS(atomic64, s64)
#endif #endif
#define ATOMIC_OP(pfx, op, type, c_op, asm_op, ll, sc) \ #define ATOMIC_OP(pfx, op, type, c_op, asm_op, ll, sc) \
static __inline__ void pfx##_##op(type i, pfx##_t * v) \ static __inline__ void arch_##pfx##_##op(type i, pfx##_t * v) \
{ \ { \
type temp; \ type temp; \
\ \
@ -80,7 +81,8 @@ static __inline__ void pfx##_##op(type i, pfx##_t * v) \
} }
#define ATOMIC_OP_RETURN(pfx, op, type, c_op, asm_op, ll, sc) \ #define ATOMIC_OP_RETURN(pfx, op, type, c_op, asm_op, ll, sc) \
static __inline__ type pfx##_##op##_return_relaxed(type i, pfx##_t * v) \ static __inline__ type \
arch_##pfx##_##op##_return_relaxed(type i, pfx##_t * v) \
{ \ { \
type temp, result; \ type temp, result; \
\ \
@ -113,7 +115,8 @@ static __inline__ type pfx##_##op##_return_relaxed(type i, pfx##_t * v) \
} }
#define ATOMIC_FETCH_OP(pfx, op, type, c_op, asm_op, ll, sc) \ #define ATOMIC_FETCH_OP(pfx, op, type, c_op, asm_op, ll, sc) \
static __inline__ type pfx##_fetch_##op##_relaxed(type i, pfx##_t * v) \ static __inline__ type \
arch_##pfx##_fetch_##op##_relaxed(type i, pfx##_t * v) \
{ \ { \
int temp, result; \ int temp, result; \
\ \
@ -153,18 +156,18 @@ static __inline__ type pfx##_fetch_##op##_relaxed(type i, pfx##_t * v) \
ATOMIC_OPS(atomic, add, int, +=, addu, ll, sc) ATOMIC_OPS(atomic, add, int, +=, addu, ll, sc)
ATOMIC_OPS(atomic, sub, int, -=, subu, ll, sc) ATOMIC_OPS(atomic, sub, int, -=, subu, ll, sc)
#define atomic_add_return_relaxed atomic_add_return_relaxed #define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed
#define atomic_sub_return_relaxed atomic_sub_return_relaxed #define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed
#define atomic_fetch_add_relaxed atomic_fetch_add_relaxed #define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed
#define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed #define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed
#ifdef CONFIG_64BIT #ifdef CONFIG_64BIT
ATOMIC_OPS(atomic64, add, s64, +=, daddu, lld, scd) ATOMIC_OPS(atomic64, add, s64, +=, daddu, lld, scd)
ATOMIC_OPS(atomic64, sub, s64, -=, dsubu, lld, scd) ATOMIC_OPS(atomic64, sub, s64, -=, dsubu, lld, scd)
# define atomic64_add_return_relaxed atomic64_add_return_relaxed # define arch_atomic64_add_return_relaxed arch_atomic64_add_return_relaxed
# define atomic64_sub_return_relaxed atomic64_sub_return_relaxed # define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return_relaxed
# define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed # define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add_relaxed
# define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed # define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub_relaxed
#endif /* CONFIG_64BIT */ #endif /* CONFIG_64BIT */
#undef ATOMIC_OPS #undef ATOMIC_OPS
@ -176,17 +179,17 @@ ATOMIC_OPS(atomic, and, int, &=, and, ll, sc)
ATOMIC_OPS(atomic, or, int, |=, or, ll, sc) ATOMIC_OPS(atomic, or, int, |=, or, ll, sc)
ATOMIC_OPS(atomic, xor, int, ^=, xor, ll, sc) ATOMIC_OPS(atomic, xor, int, ^=, xor, ll, sc)
#define atomic_fetch_and_relaxed atomic_fetch_and_relaxed #define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed
#define atomic_fetch_or_relaxed atomic_fetch_or_relaxed #define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed
#define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed #define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed
#ifdef CONFIG_64BIT #ifdef CONFIG_64BIT
ATOMIC_OPS(atomic64, and, s64, &=, and, lld, scd) ATOMIC_OPS(atomic64, and, s64, &=, and, lld, scd)
ATOMIC_OPS(atomic64, or, s64, |=, or, lld, scd) ATOMIC_OPS(atomic64, or, s64, |=, or, lld, scd)
ATOMIC_OPS(atomic64, xor, s64, ^=, xor, lld, scd) ATOMIC_OPS(atomic64, xor, s64, ^=, xor, lld, scd)
# define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed # define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and_relaxed
# define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed # define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or_relaxed
# define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed # define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor_relaxed
#endif #endif
#undef ATOMIC_OPS #undef ATOMIC_OPS
@ -203,7 +206,7 @@ ATOMIC_OPS(atomic64, xor, s64, ^=, xor, lld, scd)
* The function returns the old value of @v minus @i. * The function returns the old value of @v minus @i.
*/ */
#define ATOMIC_SIP_OP(pfx, type, op, ll, sc) \ #define ATOMIC_SIP_OP(pfx, type, op, ll, sc) \
static __inline__ int pfx##_sub_if_positive(type i, pfx##_t * v) \ static __inline__ int arch_##pfx##_sub_if_positive(type i, pfx##_t * v) \
{ \ { \
type temp, result; \ type temp, result; \
\ \
@ -255,11 +258,11 @@ static __inline__ int pfx##_sub_if_positive(type i, pfx##_t * v) \
} }
ATOMIC_SIP_OP(atomic, int, subu, ll, sc) ATOMIC_SIP_OP(atomic, int, subu, ll, sc)
#define atomic_dec_if_positive(v) atomic_sub_if_positive(1, v) #define arch_atomic_dec_if_positive(v) arch_atomic_sub_if_positive(1, v)
#ifdef CONFIG_64BIT #ifdef CONFIG_64BIT
ATOMIC_SIP_OP(atomic64, s64, dsubu, lld, scd) ATOMIC_SIP_OP(atomic64, s64, dsubu, lld, scd)
#define atomic64_dec_if_positive(v) atomic64_sub_if_positive(1, v) #define arch_atomic64_dec_if_positive(v) arch_atomic64_sub_if_positive(1, v)
#endif #endif
#undef ATOMIC_SIP_OP #undef ATOMIC_SIP_OP

View File

@ -90,7 +90,7 @@ unsigned long __xchg(volatile void *ptr, unsigned long x, int size)
} }
} }
#define xchg(ptr, x) \ #define arch_xchg(ptr, x) \
({ \ ({ \
__typeof__(*(ptr)) __res; \ __typeof__(*(ptr)) __res; \
\ \
@ -175,14 +175,14 @@ unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
} }
} }
#define cmpxchg_local(ptr, old, new) \ #define arch_cmpxchg_local(ptr, old, new) \
((__typeof__(*(ptr))) \ ((__typeof__(*(ptr))) \
__cmpxchg((ptr), \ __cmpxchg((ptr), \
(unsigned long)(__typeof__(*(ptr)))(old), \ (unsigned long)(__typeof__(*(ptr)))(old), \
(unsigned long)(__typeof__(*(ptr)))(new), \ (unsigned long)(__typeof__(*(ptr)))(new), \
sizeof(*(ptr)))) sizeof(*(ptr))))
#define cmpxchg(ptr, old, new) \ #define arch_cmpxchg(ptr, old, new) \
({ \ ({ \
__typeof__(*(ptr)) __res; \ __typeof__(*(ptr)) __res; \
\ \
@ -194,7 +194,7 @@ unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
if (__SYNC_loongson3_war == 0) \ if (__SYNC_loongson3_war == 0) \
smp_mb__before_llsc(); \ smp_mb__before_llsc(); \
\ \
__res = cmpxchg_local((ptr), (old), (new)); \ __res = arch_cmpxchg_local((ptr), (old), (new)); \
\ \
/* \ /* \
* In the Loongson3 workaround case __cmpxchg_asm() already \ * In the Loongson3 workaround case __cmpxchg_asm() already \
@ -208,21 +208,21 @@ unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
}) })
#ifdef CONFIG_64BIT #ifdef CONFIG_64BIT
#define cmpxchg64_local(ptr, o, n) \ #define arch_cmpxchg64_local(ptr, o, n) \
({ \ ({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
cmpxchg_local((ptr), (o), (n)); \ arch_cmpxchg_local((ptr), (o), (n)); \
}) })
#define cmpxchg64(ptr, o, n) \ #define arch_cmpxchg64(ptr, o, n) \
({ \ ({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
cmpxchg((ptr), (o), (n)); \ arch_cmpxchg((ptr), (o), (n)); \
}) })
#else #else
# include <asm-generic/cmpxchg-local.h> # include <asm-generic/cmpxchg-local.h>
# define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) # define arch_cmpxchg64_local(ptr, o, n) __generic_cmpxchg64_local((ptr), (o), (n))
# ifdef CONFIG_SMP # ifdef CONFIG_SMP
@ -294,7 +294,7 @@ static inline unsigned long __cmpxchg64(volatile void *ptr,
return ret; return ret;
} }
# define cmpxchg64(ptr, o, n) ({ \ # define arch_cmpxchg64(ptr, o, n) ({ \
unsigned long long __old = (__typeof__(*(ptr)))(o); \ unsigned long long __old = (__typeof__(*(ptr)))(o); \
unsigned long long __new = (__typeof__(*(ptr)))(n); \ unsigned long long __new = (__typeof__(*(ptr)))(n); \
__typeof__(*(ptr)) __res; \ __typeof__(*(ptr)) __res; \
@ -317,7 +317,7 @@ static inline unsigned long __cmpxchg64(volatile void *ptr,
}) })
# else /* !CONFIG_SMP */ # else /* !CONFIG_SMP */
# define cmpxchg64(ptr, o, n) cmpxchg64_local((ptr), (o), (n)) # define arch_cmpxchg64(ptr, o, n) arch_cmpxchg64_local((ptr), (o), (n))
# endif /* !CONFIG_SMP */ # endif /* !CONFIG_SMP */
#endif /* !CONFIG_64BIT */ #endif /* !CONFIG_64BIT */

View File

@ -41,7 +41,7 @@ unsigned long __xchg_small(volatile void *ptr, unsigned long val, unsigned int s
do { do {
old32 = load32; old32 = load32;
new32 = (load32 & ~mask) | (val << shift); new32 = (load32 & ~mask) | (val << shift);
load32 = cmpxchg(ptr32, old32, new32); load32 = arch_cmpxchg(ptr32, old32, new32);
} while (load32 != old32); } while (load32 != old32);
return (load32 & mask) >> shift; return (load32 & mask) >> shift;
@ -97,7 +97,7 @@ unsigned long __cmpxchg_small(volatile void *ptr, unsigned long old,
*/ */
old32 = (load32 & ~mask) | (old << shift); old32 = (load32 & ~mask) | (old << shift);
new32 = (load32 & ~mask) | (new << shift); new32 = (load32 & ~mask) | (new << shift);
load32 = cmpxchg(ptr32, old32, new32); load32 = arch_cmpxchg(ptr32, old32, new32);
if (load32 == old32) if (load32 == old32)
return old; return old;
} }

View File

@ -13,7 +13,7 @@
/* Atomically perform op with v->counter and i */ /* Atomically perform op with v->counter and i */
#define ATOMIC_OP(op) \ #define ATOMIC_OP(op) \
static inline void atomic_##op(int i, atomic_t *v) \ static inline void arch_atomic_##op(int i, atomic_t *v) \
{ \ { \
int tmp; \ int tmp; \
\ \
@ -30,7 +30,7 @@ static inline void atomic_##op(int i, atomic_t *v) \
/* Atomically perform op with v->counter and i, return the result */ /* Atomically perform op with v->counter and i, return the result */
#define ATOMIC_OP_RETURN(op) \ #define ATOMIC_OP_RETURN(op) \
static inline int atomic_##op##_return(int i, atomic_t *v) \ static inline int arch_atomic_##op##_return(int i, atomic_t *v) \
{ \ { \
int tmp; \ int tmp; \
\ \
@ -49,7 +49,7 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
/* Atomically perform op with v->counter and i, return orig v->counter */ /* Atomically perform op with v->counter and i, return orig v->counter */
#define ATOMIC_FETCH_OP(op) \ #define ATOMIC_FETCH_OP(op) \
static inline int atomic_fetch_##op(int i, atomic_t *v) \ static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \
{ \ { \
int tmp, old; \ int tmp, old; \
\ \
@ -75,6 +75,8 @@ ATOMIC_FETCH_OP(and)
ATOMIC_FETCH_OP(or) ATOMIC_FETCH_OP(or)
ATOMIC_FETCH_OP(xor) ATOMIC_FETCH_OP(xor)
ATOMIC_OP(add)
ATOMIC_OP(sub)
ATOMIC_OP(and) ATOMIC_OP(and)
ATOMIC_OP(or) ATOMIC_OP(or)
ATOMIC_OP(xor) ATOMIC_OP(xor)
@ -83,16 +85,18 @@ ATOMIC_OP(xor)
#undef ATOMIC_OP_RETURN #undef ATOMIC_OP_RETURN
#undef ATOMIC_OP #undef ATOMIC_OP
#define atomic_add_return atomic_add_return #define arch_atomic_add_return arch_atomic_add_return
#define atomic_sub_return atomic_sub_return #define arch_atomic_sub_return arch_atomic_sub_return
#define atomic_fetch_add atomic_fetch_add #define arch_atomic_fetch_add arch_atomic_fetch_add
#define atomic_fetch_sub atomic_fetch_sub #define arch_atomic_fetch_sub arch_atomic_fetch_sub
#define atomic_fetch_and atomic_fetch_and #define arch_atomic_fetch_and arch_atomic_fetch_and
#define atomic_fetch_or atomic_fetch_or #define arch_atomic_fetch_or arch_atomic_fetch_or
#define atomic_fetch_xor atomic_fetch_xor #define arch_atomic_fetch_xor arch_atomic_fetch_xor
#define atomic_and atomic_and #define arch_atomic_add arch_atomic_add
#define atomic_or atomic_or #define arch_atomic_sub arch_atomic_sub
#define atomic_xor atomic_xor #define arch_atomic_and arch_atomic_and
#define arch_atomic_or arch_atomic_or
#define arch_atomic_xor arch_atomic_xor
/* /*
* Atomically add a to v->counter as long as v is not already u. * Atomically add a to v->counter as long as v is not already u.
@ -100,7 +104,7 @@ ATOMIC_OP(xor)
* *
* This is often used through atomic_inc_not_zero() * This is often used through atomic_inc_not_zero()
*/ */
static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u) static inline int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
{ {
int old, tmp; int old, tmp;
@ -119,8 +123,14 @@ static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
return old; return old;
} }
#define atomic_fetch_add_unless atomic_fetch_add_unless #define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
#include <asm-generic/atomic.h> #define arch_atomic_read(v) READ_ONCE((v)->counter)
#define arch_atomic_set(v,i) WRITE_ONCE((v)->counter, (i))
#include <asm/cmpxchg.h>
#define arch_atomic_xchg(ptr, v) (arch_xchg(&(ptr)->counter, (v)))
#define arch_atomic_cmpxchg(v, old, new) (arch_cmpxchg(&((v)->counter), (old), (new)))
#endif /* __ASM_OPENRISC_ATOMIC_H */ #endif /* __ASM_OPENRISC_ATOMIC_H */

View File

@ -132,7 +132,7 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
} }
} }
#define cmpxchg(ptr, o, n) \ #define arch_cmpxchg(ptr, o, n) \
({ \ ({ \
(__typeof__(*(ptr))) __cmpxchg((ptr), \ (__typeof__(*(ptr))) __cmpxchg((ptr), \
(unsigned long)(o), \ (unsigned long)(o), \
@ -161,7 +161,7 @@ static inline unsigned long __xchg(volatile void *ptr, unsigned long with,
} }
} }
#define xchg(ptr, with) \ #define arch_xchg(ptr, with) \
({ \ ({ \
(__typeof__(*(ptr))) __xchg((ptr), \ (__typeof__(*(ptr))) __xchg((ptr), \
(unsigned long)(with), \ (unsigned long)(with), \

View File

@ -56,7 +56,7 @@ extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
* are atomic, so a reader never sees inconsistent values. * are atomic, so a reader never sees inconsistent values.
*/ */
static __inline__ void atomic_set(atomic_t *v, int i) static __inline__ void arch_atomic_set(atomic_t *v, int i)
{ {
unsigned long flags; unsigned long flags;
_atomic_spin_lock_irqsave(v, flags); _atomic_spin_lock_irqsave(v, flags);
@ -66,19 +66,19 @@ static __inline__ void atomic_set(atomic_t *v, int i)
_atomic_spin_unlock_irqrestore(v, flags); _atomic_spin_unlock_irqrestore(v, flags);
} }
#define atomic_set_release(v, i) atomic_set((v), (i)) #define arch_atomic_set_release(v, i) arch_atomic_set((v), (i))
static __inline__ int atomic_read(const atomic_t *v) static __inline__ int arch_atomic_read(const atomic_t *v)
{ {
return READ_ONCE((v)->counter); return READ_ONCE((v)->counter);
} }
/* exported interface */ /* exported interface */
#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n))) #define arch_atomic_cmpxchg(v, o, n) (arch_cmpxchg(&((v)->counter), (o), (n)))
#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) #define arch_atomic_xchg(v, new) (arch_xchg(&((v)->counter), new))
#define ATOMIC_OP(op, c_op) \ #define ATOMIC_OP(op, c_op) \
static __inline__ void atomic_##op(int i, atomic_t *v) \ static __inline__ void arch_atomic_##op(int i, atomic_t *v) \
{ \ { \
unsigned long flags; \ unsigned long flags; \
\ \
@ -88,7 +88,7 @@ static __inline__ void atomic_##op(int i, atomic_t *v) \
} }
#define ATOMIC_OP_RETURN(op, c_op) \ #define ATOMIC_OP_RETURN(op, c_op) \
static __inline__ int atomic_##op##_return(int i, atomic_t *v) \ static __inline__ int arch_atomic_##op##_return(int i, atomic_t *v) \
{ \ { \
unsigned long flags; \ unsigned long flags; \
int ret; \ int ret; \
@ -101,7 +101,7 @@ static __inline__ int atomic_##op##_return(int i, atomic_t *v) \
} }
#define ATOMIC_FETCH_OP(op, c_op) \ #define ATOMIC_FETCH_OP(op, c_op) \
static __inline__ int atomic_fetch_##op(int i, atomic_t *v) \ static __inline__ int arch_atomic_fetch_##op(int i, atomic_t *v) \
{ \ { \
unsigned long flags; \ unsigned long flags; \
int ret; \ int ret; \
@ -141,7 +141,7 @@ ATOMIC_OPS(xor, ^=)
#define ATOMIC64_INIT(i) { (i) } #define ATOMIC64_INIT(i) { (i) }
#define ATOMIC64_OP(op, c_op) \ #define ATOMIC64_OP(op, c_op) \
static __inline__ void atomic64_##op(s64 i, atomic64_t *v) \ static __inline__ void arch_atomic64_##op(s64 i, atomic64_t *v) \
{ \ { \
unsigned long flags; \ unsigned long flags; \
\ \
@ -151,7 +151,7 @@ static __inline__ void atomic64_##op(s64 i, atomic64_t *v) \
} }
#define ATOMIC64_OP_RETURN(op, c_op) \ #define ATOMIC64_OP_RETURN(op, c_op) \
static __inline__ s64 atomic64_##op##_return(s64 i, atomic64_t *v) \ static __inline__ s64 arch_atomic64_##op##_return(s64 i, atomic64_t *v) \
{ \ { \
unsigned long flags; \ unsigned long flags; \
s64 ret; \ s64 ret; \
@ -164,7 +164,7 @@ static __inline__ s64 atomic64_##op##_return(s64 i, atomic64_t *v) \
} }
#define ATOMIC64_FETCH_OP(op, c_op) \ #define ATOMIC64_FETCH_OP(op, c_op) \
static __inline__ s64 atomic64_fetch_##op(s64 i, atomic64_t *v) \ static __inline__ s64 arch_atomic64_fetch_##op(s64 i, atomic64_t *v) \
{ \ { \
unsigned long flags; \ unsigned long flags; \
s64 ret; \ s64 ret; \
@ -200,7 +200,7 @@ ATOMIC64_OPS(xor, ^=)
#undef ATOMIC64_OP #undef ATOMIC64_OP
static __inline__ void static __inline__ void
atomic64_set(atomic64_t *v, s64 i) arch_atomic64_set(atomic64_t *v, s64 i)
{ {
unsigned long flags; unsigned long flags;
_atomic_spin_lock_irqsave(v, flags); _atomic_spin_lock_irqsave(v, flags);
@ -210,18 +210,18 @@ atomic64_set(atomic64_t *v, s64 i)
_atomic_spin_unlock_irqrestore(v, flags); _atomic_spin_unlock_irqrestore(v, flags);
} }
#define atomic64_set_release(v, i) atomic64_set((v), (i)) #define arch_atomic64_set_release(v, i) arch_atomic64_set((v), (i))
static __inline__ s64 static __inline__ s64
atomic64_read(const atomic64_t *v) arch_atomic64_read(const atomic64_t *v)
{ {
return READ_ONCE((v)->counter); return READ_ONCE((v)->counter);
} }
/* exported interface */ /* exported interface */
#define atomic64_cmpxchg(v, o, n) \ #define arch_atomic64_cmpxchg(v, o, n) \
((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n))) ((__typeof__((v)->counter))arch_cmpxchg(&((v)->counter), (o), (n)))
#define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) #define arch_atomic64_xchg(v, new) (arch_xchg(&((v)->counter), new))
#endif /* !CONFIG_64BIT */ #endif /* !CONFIG_64BIT */

View File

@ -44,7 +44,7 @@ __xchg(unsigned long x, volatile void *ptr, int size)
** if (((unsigned long)p & 0xf) == 0) ** if (((unsigned long)p & 0xf) == 0)
** return __ldcw(p); ** return __ldcw(p);
*/ */
#define xchg(ptr, x) \ #define arch_xchg(ptr, x) \
({ \ ({ \
__typeof__(*(ptr)) __ret; \ __typeof__(*(ptr)) __ret; \
__typeof__(*(ptr)) _x_ = (x); \ __typeof__(*(ptr)) _x_ = (x); \
@ -78,7 +78,7 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size)
return old; return old;
} }
#define cmpxchg(ptr, o, n) \ #define arch_cmpxchg(ptr, o, n) \
({ \ ({ \
__typeof__(*(ptr)) _o_ = (o); \ __typeof__(*(ptr)) _o_ = (o); \
__typeof__(*(ptr)) _n_ = (n); \ __typeof__(*(ptr)) _n_ = (n); \
@ -98,7 +98,7 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,
#endif #endif
case 4: return __cmpxchg_u32(ptr, old, new_); case 4: return __cmpxchg_u32(ptr, old, new_);
default: default:
return __cmpxchg_local_generic(ptr, old, new_, size); return __generic_cmpxchg_local(ptr, old, new_, size);
} }
} }
@ -106,19 +106,19 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,
* cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
* them available. * them available.
*/ */
#define cmpxchg_local(ptr, o, n) \ #define arch_cmpxchg_local(ptr, o, n) \
((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \ ((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \
(unsigned long)(n), sizeof(*(ptr)))) (unsigned long)(n), sizeof(*(ptr))))
#ifdef CONFIG_64BIT #ifdef CONFIG_64BIT
#define cmpxchg64_local(ptr, o, n) \ #define arch_cmpxchg64_local(ptr, o, n) \
({ \ ({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
cmpxchg_local((ptr), (o), (n)); \ cmpxchg_local((ptr), (o), (n)); \
}) })
#else #else
#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) #define arch_cmpxchg64_local(ptr, o, n) __generic_cmpxchg64_local((ptr), (o), (n))
#endif #endif
#define cmpxchg64(ptr, o, n) __cmpxchg_u64(ptr, o, n) #define arch_cmpxchg64(ptr, o, n) __cmpxchg_u64(ptr, o, n)
#endif /* _ASM_PARISC_CMPXCHG_H_ */ #endif /* _ASM_PARISC_CMPXCHG_H_ */

View File

@ -23,7 +23,7 @@
#define __atomic_release_fence() \ #define __atomic_release_fence() \
__asm__ __volatile__(PPC_RELEASE_BARRIER "" : : : "memory") __asm__ __volatile__(PPC_RELEASE_BARRIER "" : : : "memory")
static __inline__ int atomic_read(const atomic_t *v) static __inline__ int arch_atomic_read(const atomic_t *v)
{ {
int t; int t;
@ -32,13 +32,13 @@ static __inline__ int atomic_read(const atomic_t *v)
return t; return t;
} }
static __inline__ void atomic_set(atomic_t *v, int i) static __inline__ void arch_atomic_set(atomic_t *v, int i)
{ {
__asm__ __volatile__("stw%U0%X0 %1,%0" : "=m"UPD_CONSTR(v->counter) : "r"(i)); __asm__ __volatile__("stw%U0%X0 %1,%0" : "=m"UPD_CONSTR(v->counter) : "r"(i));
} }
#define ATOMIC_OP(op, asm_op) \ #define ATOMIC_OP(op, asm_op) \
static __inline__ void atomic_##op(int a, atomic_t *v) \ static __inline__ void arch_atomic_##op(int a, atomic_t *v) \
{ \ { \
int t; \ int t; \
\ \
@ -53,7 +53,7 @@ static __inline__ void atomic_##op(int a, atomic_t *v) \
} \ } \
#define ATOMIC_OP_RETURN_RELAXED(op, asm_op) \ #define ATOMIC_OP_RETURN_RELAXED(op, asm_op) \
static inline int atomic_##op##_return_relaxed(int a, atomic_t *v) \ static inline int arch_atomic_##op##_return_relaxed(int a, atomic_t *v) \
{ \ { \
int t; \ int t; \
\ \
@ -70,7 +70,7 @@ static inline int atomic_##op##_return_relaxed(int a, atomic_t *v) \
} }
#define ATOMIC_FETCH_OP_RELAXED(op, asm_op) \ #define ATOMIC_FETCH_OP_RELAXED(op, asm_op) \
static inline int atomic_fetch_##op##_relaxed(int a, atomic_t *v) \ static inline int arch_atomic_fetch_##op##_relaxed(int a, atomic_t *v) \
{ \ { \
int res, t; \ int res, t; \
\ \
@ -94,11 +94,11 @@ static inline int atomic_fetch_##op##_relaxed(int a, atomic_t *v) \
ATOMIC_OPS(add, add) ATOMIC_OPS(add, add)
ATOMIC_OPS(sub, subf) ATOMIC_OPS(sub, subf)
#define atomic_add_return_relaxed atomic_add_return_relaxed #define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed
#define atomic_sub_return_relaxed atomic_sub_return_relaxed #define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed
#define atomic_fetch_add_relaxed atomic_fetch_add_relaxed #define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed
#define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed #define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed
#undef ATOMIC_OPS #undef ATOMIC_OPS
#define ATOMIC_OPS(op, asm_op) \ #define ATOMIC_OPS(op, asm_op) \
@ -109,16 +109,16 @@ ATOMIC_OPS(and, and)
ATOMIC_OPS(or, or) ATOMIC_OPS(or, or)
ATOMIC_OPS(xor, xor) ATOMIC_OPS(xor, xor)
#define atomic_fetch_and_relaxed atomic_fetch_and_relaxed #define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed
#define atomic_fetch_or_relaxed atomic_fetch_or_relaxed #define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed
#define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed #define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed
#undef ATOMIC_OPS #undef ATOMIC_OPS
#undef ATOMIC_FETCH_OP_RELAXED #undef ATOMIC_FETCH_OP_RELAXED
#undef ATOMIC_OP_RETURN_RELAXED #undef ATOMIC_OP_RETURN_RELAXED
#undef ATOMIC_OP #undef ATOMIC_OP
static __inline__ void atomic_inc(atomic_t *v) static __inline__ void arch_atomic_inc(atomic_t *v)
{ {
int t; int t;
@ -131,9 +131,9 @@ static __inline__ void atomic_inc(atomic_t *v)
: "r" (&v->counter) : "r" (&v->counter)
: "cc", "xer"); : "cc", "xer");
} }
#define atomic_inc atomic_inc #define arch_atomic_inc arch_atomic_inc
static __inline__ int atomic_inc_return_relaxed(atomic_t *v) static __inline__ int arch_atomic_inc_return_relaxed(atomic_t *v)
{ {
int t; int t;
@ -149,7 +149,7 @@ static __inline__ int atomic_inc_return_relaxed(atomic_t *v)
return t; return t;
} }
static __inline__ void atomic_dec(atomic_t *v) static __inline__ void arch_atomic_dec(atomic_t *v)
{ {
int t; int t;
@ -162,9 +162,9 @@ static __inline__ void atomic_dec(atomic_t *v)
: "r" (&v->counter) : "r" (&v->counter)
: "cc", "xer"); : "cc", "xer");
} }
#define atomic_dec atomic_dec #define arch_atomic_dec arch_atomic_dec
static __inline__ int atomic_dec_return_relaxed(atomic_t *v) static __inline__ int arch_atomic_dec_return_relaxed(atomic_t *v)
{ {
int t; int t;
@ -180,17 +180,20 @@ static __inline__ int atomic_dec_return_relaxed(atomic_t *v)
return t; return t;
} }
#define atomic_inc_return_relaxed atomic_inc_return_relaxed #define arch_atomic_inc_return_relaxed arch_atomic_inc_return_relaxed
#define atomic_dec_return_relaxed atomic_dec_return_relaxed #define arch_atomic_dec_return_relaxed arch_atomic_dec_return_relaxed
#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n))) #define arch_atomic_cmpxchg(v, o, n) \
#define atomic_cmpxchg_relaxed(v, o, n) \ (arch_cmpxchg(&((v)->counter), (o), (n)))
cmpxchg_relaxed(&((v)->counter), (o), (n)) #define arch_atomic_cmpxchg_relaxed(v, o, n) \
#define atomic_cmpxchg_acquire(v, o, n) \ arch_cmpxchg_relaxed(&((v)->counter), (o), (n))
cmpxchg_acquire(&((v)->counter), (o), (n)) #define arch_atomic_cmpxchg_acquire(v, o, n) \
arch_cmpxchg_acquire(&((v)->counter), (o), (n))
#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) #define arch_atomic_xchg(v, new) \
#define atomic_xchg_relaxed(v, new) xchg_relaxed(&((v)->counter), (new)) (arch_xchg(&((v)->counter), new))
#define arch_atomic_xchg_relaxed(v, new) \
arch_xchg_relaxed(&((v)->counter), (new))
/* /*
* Don't want to override the generic atomic_try_cmpxchg_acquire, because * Don't want to override the generic atomic_try_cmpxchg_acquire, because
@ -199,7 +202,7 @@ static __inline__ int atomic_dec_return_relaxed(atomic_t *v)
* would be a surprise). * would be a surprise).
*/ */
static __always_inline bool static __always_inline bool
atomic_try_cmpxchg_lock(atomic_t *v, int *old, int new) arch_atomic_try_cmpxchg_lock(atomic_t *v, int *old, int new)
{ {
int r, o = *old; int r, o = *old;
@ -229,7 +232,7 @@ atomic_try_cmpxchg_lock(atomic_t *v, int *old, int new)
* Atomically adds @a to @v, so long as it was not @u. * Atomically adds @a to @v, so long as it was not @u.
* Returns the old value of @v. * Returns the old value of @v.
*/ */
static __inline__ int atomic_fetch_add_unless(atomic_t *v, int a, int u) static __inline__ int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
{ {
int t; int t;
@ -250,7 +253,7 @@ static __inline__ int atomic_fetch_add_unless(atomic_t *v, int a, int u)
return t; return t;
} }
#define atomic_fetch_add_unless atomic_fetch_add_unless #define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
/** /**
* atomic_inc_not_zero - increment unless the number is zero * atomic_inc_not_zero - increment unless the number is zero
@ -259,7 +262,7 @@ static __inline__ int atomic_fetch_add_unless(atomic_t *v, int a, int u)
* Atomically increments @v by 1, so long as @v is non-zero. * Atomically increments @v by 1, so long as @v is non-zero.
* Returns non-zero if @v was non-zero, and zero otherwise. * Returns non-zero if @v was non-zero, and zero otherwise.
*/ */
static __inline__ int atomic_inc_not_zero(atomic_t *v) static __inline__ int arch_atomic_inc_not_zero(atomic_t *v)
{ {
int t1, t2; int t1, t2;
@ -280,14 +283,14 @@ static __inline__ int atomic_inc_not_zero(atomic_t *v)
return t1; return t1;
} }
#define atomic_inc_not_zero(v) atomic_inc_not_zero((v)) #define arch_atomic_inc_not_zero(v) arch_atomic_inc_not_zero((v))
/* /*
* Atomically test *v and decrement if it is greater than 0. * Atomically test *v and decrement if it is greater than 0.
* The function returns the old value of *v minus 1, even if * The function returns the old value of *v minus 1, even if
* the atomic variable, v, was not decremented. * the atomic variable, v, was not decremented.
*/ */
static __inline__ int atomic_dec_if_positive(atomic_t *v) static __inline__ int arch_atomic_dec_if_positive(atomic_t *v)
{ {
int t; int t;
@ -307,13 +310,13 @@ static __inline__ int atomic_dec_if_positive(atomic_t *v)
return t; return t;
} }
#define atomic_dec_if_positive atomic_dec_if_positive #define arch_atomic_dec_if_positive arch_atomic_dec_if_positive
#ifdef __powerpc64__ #ifdef __powerpc64__
#define ATOMIC64_INIT(i) { (i) } #define ATOMIC64_INIT(i) { (i) }
static __inline__ s64 atomic64_read(const atomic64_t *v) static __inline__ s64 arch_atomic64_read(const atomic64_t *v)
{ {
s64 t; s64 t;
@ -322,13 +325,13 @@ static __inline__ s64 atomic64_read(const atomic64_t *v)
return t; return t;
} }
static __inline__ void atomic64_set(atomic64_t *v, s64 i) static __inline__ void arch_atomic64_set(atomic64_t *v, s64 i)
{ {
__asm__ __volatile__("std%U0%X0 %1,%0" : "=m"UPD_CONSTR(v->counter) : "r"(i)); __asm__ __volatile__("std%U0%X0 %1,%0" : "=m"UPD_CONSTR(v->counter) : "r"(i));
} }
#define ATOMIC64_OP(op, asm_op) \ #define ATOMIC64_OP(op, asm_op) \
static __inline__ void atomic64_##op(s64 a, atomic64_t *v) \ static __inline__ void arch_atomic64_##op(s64 a, atomic64_t *v) \
{ \ { \
s64 t; \ s64 t; \
\ \
@ -344,7 +347,7 @@ static __inline__ void atomic64_##op(s64 a, atomic64_t *v) \
#define ATOMIC64_OP_RETURN_RELAXED(op, asm_op) \ #define ATOMIC64_OP_RETURN_RELAXED(op, asm_op) \
static inline s64 \ static inline s64 \
atomic64_##op##_return_relaxed(s64 a, atomic64_t *v) \ arch_atomic64_##op##_return_relaxed(s64 a, atomic64_t *v) \
{ \ { \
s64 t; \ s64 t; \
\ \
@ -362,7 +365,7 @@ atomic64_##op##_return_relaxed(s64 a, atomic64_t *v) \
#define ATOMIC64_FETCH_OP_RELAXED(op, asm_op) \ #define ATOMIC64_FETCH_OP_RELAXED(op, asm_op) \
static inline s64 \ static inline s64 \
atomic64_fetch_##op##_relaxed(s64 a, atomic64_t *v) \ arch_atomic64_fetch_##op##_relaxed(s64 a, atomic64_t *v) \
{ \ { \
s64 res, t; \ s64 res, t; \
\ \
@ -386,11 +389,11 @@ atomic64_fetch_##op##_relaxed(s64 a, atomic64_t *v) \
ATOMIC64_OPS(add, add) ATOMIC64_OPS(add, add)
ATOMIC64_OPS(sub, subf) ATOMIC64_OPS(sub, subf)
#define atomic64_add_return_relaxed atomic64_add_return_relaxed #define arch_atomic64_add_return_relaxed arch_atomic64_add_return_relaxed
#define atomic64_sub_return_relaxed atomic64_sub_return_relaxed #define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return_relaxed
#define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed #define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add_relaxed
#define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed #define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub_relaxed
#undef ATOMIC64_OPS #undef ATOMIC64_OPS
#define ATOMIC64_OPS(op, asm_op) \ #define ATOMIC64_OPS(op, asm_op) \
@ -401,16 +404,16 @@ ATOMIC64_OPS(and, and)
ATOMIC64_OPS(or, or) ATOMIC64_OPS(or, or)
ATOMIC64_OPS(xor, xor) ATOMIC64_OPS(xor, xor)
#define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed #define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and_relaxed
#define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed #define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or_relaxed
#define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed #define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor_relaxed
#undef ATOPIC64_OPS #undef ATOPIC64_OPS
#undef ATOMIC64_FETCH_OP_RELAXED #undef ATOMIC64_FETCH_OP_RELAXED
#undef ATOMIC64_OP_RETURN_RELAXED #undef ATOMIC64_OP_RETURN_RELAXED
#undef ATOMIC64_OP #undef ATOMIC64_OP
static __inline__ void atomic64_inc(atomic64_t *v) static __inline__ void arch_atomic64_inc(atomic64_t *v)
{ {
s64 t; s64 t;
@ -423,9 +426,9 @@ static __inline__ void atomic64_inc(atomic64_t *v)
: "r" (&v->counter) : "r" (&v->counter)
: "cc", "xer"); : "cc", "xer");
} }
#define atomic64_inc atomic64_inc #define arch_atomic64_inc arch_atomic64_inc
static __inline__ s64 atomic64_inc_return_relaxed(atomic64_t *v) static __inline__ s64 arch_atomic64_inc_return_relaxed(atomic64_t *v)
{ {
s64 t; s64 t;
@ -441,7 +444,7 @@ static __inline__ s64 atomic64_inc_return_relaxed(atomic64_t *v)
return t; return t;
} }
static __inline__ void atomic64_dec(atomic64_t *v) static __inline__ void arch_atomic64_dec(atomic64_t *v)
{ {
s64 t; s64 t;
@ -454,9 +457,9 @@ static __inline__ void atomic64_dec(atomic64_t *v)
: "r" (&v->counter) : "r" (&v->counter)
: "cc", "xer"); : "cc", "xer");
} }
#define atomic64_dec atomic64_dec #define arch_atomic64_dec arch_atomic64_dec
static __inline__ s64 atomic64_dec_return_relaxed(atomic64_t *v) static __inline__ s64 arch_atomic64_dec_return_relaxed(atomic64_t *v)
{ {
s64 t; s64 t;
@ -472,14 +475,14 @@ static __inline__ s64 atomic64_dec_return_relaxed(atomic64_t *v)
return t; return t;
} }
#define atomic64_inc_return_relaxed atomic64_inc_return_relaxed #define arch_atomic64_inc_return_relaxed arch_atomic64_inc_return_relaxed
#define atomic64_dec_return_relaxed atomic64_dec_return_relaxed #define arch_atomic64_dec_return_relaxed arch_atomic64_dec_return_relaxed
/* /*
* Atomically test *v and decrement if it is greater than 0. * Atomically test *v and decrement if it is greater than 0.
* The function returns the old value of *v minus 1. * The function returns the old value of *v minus 1.
*/ */
static __inline__ s64 atomic64_dec_if_positive(atomic64_t *v) static __inline__ s64 arch_atomic64_dec_if_positive(atomic64_t *v)
{ {
s64 t; s64 t;
@ -498,16 +501,19 @@ static __inline__ s64 atomic64_dec_if_positive(atomic64_t *v)
return t; return t;
} }
#define atomic64_dec_if_positive atomic64_dec_if_positive #define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive
#define atomic64_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n))) #define arch_atomic64_cmpxchg(v, o, n) \
#define atomic64_cmpxchg_relaxed(v, o, n) \ (arch_cmpxchg(&((v)->counter), (o), (n)))
cmpxchg_relaxed(&((v)->counter), (o), (n)) #define arch_atomic64_cmpxchg_relaxed(v, o, n) \
#define atomic64_cmpxchg_acquire(v, o, n) \ arch_cmpxchg_relaxed(&((v)->counter), (o), (n))
cmpxchg_acquire(&((v)->counter), (o), (n)) #define arch_atomic64_cmpxchg_acquire(v, o, n) \
arch_cmpxchg_acquire(&((v)->counter), (o), (n))
#define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) #define arch_atomic64_xchg(v, new) \
#define atomic64_xchg_relaxed(v, new) xchg_relaxed(&((v)->counter), (new)) (arch_xchg(&((v)->counter), new))
#define arch_atomic64_xchg_relaxed(v, new) \
arch_xchg_relaxed(&((v)->counter), (new))
/** /**
* atomic64_fetch_add_unless - add unless the number is a given value * atomic64_fetch_add_unless - add unless the number is a given value
@ -518,7 +524,7 @@ static __inline__ s64 atomic64_dec_if_positive(atomic64_t *v)
* Atomically adds @a to @v, so long as it was not @u. * Atomically adds @a to @v, so long as it was not @u.
* Returns the old value of @v. * Returns the old value of @v.
*/ */
static __inline__ s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u) static __inline__ s64 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
{ {
s64 t; s64 t;
@ -539,7 +545,7 @@ static __inline__ s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
return t; return t;
} }
#define atomic64_fetch_add_unless atomic64_fetch_add_unless #define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless
/** /**
* atomic_inc64_not_zero - increment unless the number is zero * atomic_inc64_not_zero - increment unless the number is zero
@ -548,7 +554,7 @@ static __inline__ s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
* Atomically increments @v by 1, so long as @v is non-zero. * Atomically increments @v by 1, so long as @v is non-zero.
* Returns non-zero if @v was non-zero, and zero otherwise. * Returns non-zero if @v was non-zero, and zero otherwise.
*/ */
static __inline__ int atomic64_inc_not_zero(atomic64_t *v) static __inline__ int arch_atomic64_inc_not_zero(atomic64_t *v)
{ {
s64 t1, t2; s64 t1, t2;
@ -569,7 +575,7 @@ static __inline__ int atomic64_inc_not_zero(atomic64_t *v)
return t1 != 0; return t1 != 0;
} }
#define atomic64_inc_not_zero(v) atomic64_inc_not_zero((v)) #define arch_atomic64_inc_not_zero(v) arch_atomic64_inc_not_zero((v))
#endif /* __powerpc64__ */ #endif /* __powerpc64__ */

View File

@ -185,14 +185,14 @@ __xchg_relaxed(void *ptr, unsigned long x, unsigned int size)
BUILD_BUG_ON_MSG(1, "Unsupported size for __xchg_local"); BUILD_BUG_ON_MSG(1, "Unsupported size for __xchg_local");
return x; return x;
} }
#define xchg_local(ptr,x) \ #define arch_xchg_local(ptr,x) \
({ \ ({ \
__typeof__(*(ptr)) _x_ = (x); \ __typeof__(*(ptr)) _x_ = (x); \
(__typeof__(*(ptr))) __xchg_local((ptr), \ (__typeof__(*(ptr))) __xchg_local((ptr), \
(unsigned long)_x_, sizeof(*(ptr))); \ (unsigned long)_x_, sizeof(*(ptr))); \
}) })
#define xchg_relaxed(ptr, x) \ #define arch_xchg_relaxed(ptr, x) \
({ \ ({ \
__typeof__(*(ptr)) _x_ = (x); \ __typeof__(*(ptr)) _x_ = (x); \
(__typeof__(*(ptr))) __xchg_relaxed((ptr), \ (__typeof__(*(ptr))) __xchg_relaxed((ptr), \
@ -467,7 +467,7 @@ __cmpxchg_acquire(void *ptr, unsigned long old, unsigned long new,
BUILD_BUG_ON_MSG(1, "Unsupported size for __cmpxchg_acquire"); BUILD_BUG_ON_MSG(1, "Unsupported size for __cmpxchg_acquire");
return old; return old;
} }
#define cmpxchg(ptr, o, n) \ #define arch_cmpxchg(ptr, o, n) \
({ \ ({ \
__typeof__(*(ptr)) _o_ = (o); \ __typeof__(*(ptr)) _o_ = (o); \
__typeof__(*(ptr)) _n_ = (n); \ __typeof__(*(ptr)) _n_ = (n); \
@ -476,7 +476,7 @@ __cmpxchg_acquire(void *ptr, unsigned long old, unsigned long new,
}) })
#define cmpxchg_local(ptr, o, n) \ #define arch_cmpxchg_local(ptr, o, n) \
({ \ ({ \
__typeof__(*(ptr)) _o_ = (o); \ __typeof__(*(ptr)) _o_ = (o); \
__typeof__(*(ptr)) _n_ = (n); \ __typeof__(*(ptr)) _n_ = (n); \
@ -484,7 +484,7 @@ __cmpxchg_acquire(void *ptr, unsigned long old, unsigned long new,
(unsigned long)_n_, sizeof(*(ptr))); \ (unsigned long)_n_, sizeof(*(ptr))); \
}) })
#define cmpxchg_relaxed(ptr, o, n) \ #define arch_cmpxchg_relaxed(ptr, o, n) \
({ \ ({ \
__typeof__(*(ptr)) _o_ = (o); \ __typeof__(*(ptr)) _o_ = (o); \
__typeof__(*(ptr)) _n_ = (n); \ __typeof__(*(ptr)) _n_ = (n); \
@ -493,7 +493,7 @@ __cmpxchg_acquire(void *ptr, unsigned long old, unsigned long new,
sizeof(*(ptr))); \ sizeof(*(ptr))); \
}) })
#define cmpxchg_acquire(ptr, o, n) \ #define arch_cmpxchg_acquire(ptr, o, n) \
({ \ ({ \
__typeof__(*(ptr)) _o_ = (o); \ __typeof__(*(ptr)) _o_ = (o); \
__typeof__(*(ptr)) _n_ = (n); \ __typeof__(*(ptr)) _n_ = (n); \
@ -502,29 +502,29 @@ __cmpxchg_acquire(void *ptr, unsigned long old, unsigned long new,
sizeof(*(ptr))); \ sizeof(*(ptr))); \
}) })
#ifdef CONFIG_PPC64 #ifdef CONFIG_PPC64
#define cmpxchg64(ptr, o, n) \ #define arch_cmpxchg64(ptr, o, n) \
({ \ ({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
cmpxchg((ptr), (o), (n)); \ arch_cmpxchg((ptr), (o), (n)); \
}) })
#define cmpxchg64_local(ptr, o, n) \ #define arch_cmpxchg64_local(ptr, o, n) \
({ \ ({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
cmpxchg_local((ptr), (o), (n)); \ arch_cmpxchg_local((ptr), (o), (n)); \
}) })
#define cmpxchg64_relaxed(ptr, o, n) \ #define arch_cmpxchg64_relaxed(ptr, o, n) \
({ \ ({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
cmpxchg_relaxed((ptr), (o), (n)); \ arch_cmpxchg_relaxed((ptr), (o), (n)); \
}) })
#define cmpxchg64_acquire(ptr, o, n) \ #define arch_cmpxchg64_acquire(ptr, o, n) \
({ \ ({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
cmpxchg_acquire((ptr), (o), (n)); \ arch_cmpxchg_acquire((ptr), (o), (n)); \
}) })
#else #else
#include <asm-generic/cmpxchg-local.h> #include <asm-generic/cmpxchg-local.h>
#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) #define arch_cmpxchg64_local(ptr, o, n) __generic_cmpxchg64_local((ptr), (o), (n))
#endif #endif
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */

View File

@ -37,7 +37,7 @@ static __always_inline void queued_spin_lock(struct qspinlock *lock)
{ {
u32 val = 0; u32 val = 0;
if (likely(atomic_try_cmpxchg_lock(&lock->val, &val, _Q_LOCKED_VAL))) if (likely(arch_atomic_try_cmpxchg_lock(&lock->val, &val, _Q_LOCKED_VAL)))
return; return;
queued_spin_lock_slowpath(lock, val); queued_spin_lock_slowpath(lock, val);

View File

@ -25,22 +25,22 @@
#define __atomic_release_fence() \ #define __atomic_release_fence() \
__asm__ __volatile__(RISCV_RELEASE_BARRIER "" ::: "memory"); __asm__ __volatile__(RISCV_RELEASE_BARRIER "" ::: "memory");
static __always_inline int atomic_read(const atomic_t *v) static __always_inline int arch_atomic_read(const atomic_t *v)
{ {
return READ_ONCE(v->counter); return READ_ONCE(v->counter);
} }
static __always_inline void atomic_set(atomic_t *v, int i) static __always_inline void arch_atomic_set(atomic_t *v, int i)
{ {
WRITE_ONCE(v->counter, i); WRITE_ONCE(v->counter, i);
} }
#ifndef CONFIG_GENERIC_ATOMIC64 #ifndef CONFIG_GENERIC_ATOMIC64
#define ATOMIC64_INIT(i) { (i) } #define ATOMIC64_INIT(i) { (i) }
static __always_inline s64 atomic64_read(const atomic64_t *v) static __always_inline s64 arch_atomic64_read(const atomic64_t *v)
{ {
return READ_ONCE(v->counter); return READ_ONCE(v->counter);
} }
static __always_inline void atomic64_set(atomic64_t *v, s64 i) static __always_inline void arch_atomic64_set(atomic64_t *v, s64 i)
{ {
WRITE_ONCE(v->counter, i); WRITE_ONCE(v->counter, i);
} }
@ -53,7 +53,7 @@ static __always_inline void atomic64_set(atomic64_t *v, s64 i)
*/ */
#define ATOMIC_OP(op, asm_op, I, asm_type, c_type, prefix) \ #define ATOMIC_OP(op, asm_op, I, asm_type, c_type, prefix) \
static __always_inline \ static __always_inline \
void atomic##prefix##_##op(c_type i, atomic##prefix##_t *v) \ void arch_atomic##prefix##_##op(c_type i, atomic##prefix##_t *v) \
{ \ { \
__asm__ __volatile__ ( \ __asm__ __volatile__ ( \
" amo" #asm_op "." #asm_type " zero, %1, %0" \ " amo" #asm_op "." #asm_type " zero, %1, %0" \
@ -87,7 +87,7 @@ ATOMIC_OPS(xor, xor, i)
*/ */
#define ATOMIC_FETCH_OP(op, asm_op, I, asm_type, c_type, prefix) \ #define ATOMIC_FETCH_OP(op, asm_op, I, asm_type, c_type, prefix) \
static __always_inline \ static __always_inline \
c_type atomic##prefix##_fetch_##op##_relaxed(c_type i, \ c_type arch_atomic##prefix##_fetch_##op##_relaxed(c_type i, \
atomic##prefix##_t *v) \ atomic##prefix##_t *v) \
{ \ { \
register c_type ret; \ register c_type ret; \
@ -99,7 +99,7 @@ c_type atomic##prefix##_fetch_##op##_relaxed(c_type i, \
return ret; \ return ret; \
} \ } \
static __always_inline \ static __always_inline \
c_type atomic##prefix##_fetch_##op(c_type i, atomic##prefix##_t *v) \ c_type arch_atomic##prefix##_fetch_##op(c_type i, atomic##prefix##_t *v) \
{ \ { \
register c_type ret; \ register c_type ret; \
__asm__ __volatile__ ( \ __asm__ __volatile__ ( \
@ -112,15 +112,15 @@ c_type atomic##prefix##_fetch_##op(c_type i, atomic##prefix##_t *v) \
#define ATOMIC_OP_RETURN(op, asm_op, c_op, I, asm_type, c_type, prefix) \ #define ATOMIC_OP_RETURN(op, asm_op, c_op, I, asm_type, c_type, prefix) \
static __always_inline \ static __always_inline \
c_type atomic##prefix##_##op##_return_relaxed(c_type i, \ c_type arch_atomic##prefix##_##op##_return_relaxed(c_type i, \
atomic##prefix##_t *v) \ atomic##prefix##_t *v) \
{ \ { \
return atomic##prefix##_fetch_##op##_relaxed(i, v) c_op I; \ return arch_atomic##prefix##_fetch_##op##_relaxed(i, v) c_op I; \
} \ } \
static __always_inline \ static __always_inline \
c_type atomic##prefix##_##op##_return(c_type i, atomic##prefix##_t *v) \ c_type arch_atomic##prefix##_##op##_return(c_type i, atomic##prefix##_t *v) \
{ \ { \
return atomic##prefix##_fetch_##op(i, v) c_op I; \ return arch_atomic##prefix##_fetch_##op(i, v) c_op I; \
} }
#ifdef CONFIG_GENERIC_ATOMIC64 #ifdef CONFIG_GENERIC_ATOMIC64
@ -138,26 +138,26 @@ c_type atomic##prefix##_##op##_return(c_type i, atomic##prefix##_t *v) \
ATOMIC_OPS(add, add, +, i) ATOMIC_OPS(add, add, +, i)
ATOMIC_OPS(sub, add, +, -i) ATOMIC_OPS(sub, add, +, -i)
#define atomic_add_return_relaxed atomic_add_return_relaxed #define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed
#define atomic_sub_return_relaxed atomic_sub_return_relaxed #define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed
#define atomic_add_return atomic_add_return #define arch_atomic_add_return arch_atomic_add_return
#define atomic_sub_return atomic_sub_return #define arch_atomic_sub_return arch_atomic_sub_return
#define atomic_fetch_add_relaxed atomic_fetch_add_relaxed #define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed
#define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed #define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed
#define atomic_fetch_add atomic_fetch_add #define arch_atomic_fetch_add arch_atomic_fetch_add
#define atomic_fetch_sub atomic_fetch_sub #define arch_atomic_fetch_sub arch_atomic_fetch_sub
#ifndef CONFIG_GENERIC_ATOMIC64 #ifndef CONFIG_GENERIC_ATOMIC64
#define atomic64_add_return_relaxed atomic64_add_return_relaxed #define arch_atomic64_add_return_relaxed arch_atomic64_add_return_relaxed
#define atomic64_sub_return_relaxed atomic64_sub_return_relaxed #define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return_relaxed
#define atomic64_add_return atomic64_add_return #define arch_atomic64_add_return arch_atomic64_add_return
#define atomic64_sub_return atomic64_sub_return #define arch_atomic64_sub_return arch_atomic64_sub_return
#define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed #define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add_relaxed
#define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed #define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub_relaxed
#define atomic64_fetch_add atomic64_fetch_add #define arch_atomic64_fetch_add arch_atomic64_fetch_add
#define atomic64_fetch_sub atomic64_fetch_sub #define arch_atomic64_fetch_sub arch_atomic64_fetch_sub
#endif #endif
#undef ATOMIC_OPS #undef ATOMIC_OPS
@ -175,20 +175,20 @@ ATOMIC_OPS(and, and, i)
ATOMIC_OPS( or, or, i) ATOMIC_OPS( or, or, i)
ATOMIC_OPS(xor, xor, i) ATOMIC_OPS(xor, xor, i)
#define atomic_fetch_and_relaxed atomic_fetch_and_relaxed #define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed
#define atomic_fetch_or_relaxed atomic_fetch_or_relaxed #define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed
#define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed #define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed
#define atomic_fetch_and atomic_fetch_and #define arch_atomic_fetch_and arch_atomic_fetch_and
#define atomic_fetch_or atomic_fetch_or #define arch_atomic_fetch_or arch_atomic_fetch_or
#define atomic_fetch_xor atomic_fetch_xor #define arch_atomic_fetch_xor arch_atomic_fetch_xor
#ifndef CONFIG_GENERIC_ATOMIC64 #ifndef CONFIG_GENERIC_ATOMIC64
#define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed #define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and_relaxed
#define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed #define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or_relaxed
#define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed #define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor_relaxed
#define atomic64_fetch_and atomic64_fetch_and #define arch_atomic64_fetch_and arch_atomic64_fetch_and
#define atomic64_fetch_or atomic64_fetch_or #define arch_atomic64_fetch_or arch_atomic64_fetch_or
#define atomic64_fetch_xor atomic64_fetch_xor #define arch_atomic64_fetch_xor arch_atomic64_fetch_xor
#endif #endif
#undef ATOMIC_OPS #undef ATOMIC_OPS
@ -197,7 +197,7 @@ ATOMIC_OPS(xor, xor, i)
#undef ATOMIC_OP_RETURN #undef ATOMIC_OP_RETURN
/* This is required to provide a full barrier on success. */ /* This is required to provide a full barrier on success. */
static __always_inline int atomic_fetch_add_unless(atomic_t *v, int a, int u) static __always_inline int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
{ {
int prev, rc; int prev, rc;
@ -214,10 +214,10 @@ static __always_inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
: "memory"); : "memory");
return prev; return prev;
} }
#define atomic_fetch_add_unless atomic_fetch_add_unless #define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
#ifndef CONFIG_GENERIC_ATOMIC64 #ifndef CONFIG_GENERIC_ATOMIC64
static __always_inline s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u) static __always_inline s64 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
{ {
s64 prev; s64 prev;
long rc; long rc;
@ -235,7 +235,7 @@ static __always_inline s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u
: "memory"); : "memory");
return prev; return prev;
} }
#define atomic64_fetch_add_unless atomic64_fetch_add_unless #define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless
#endif #endif
/* /*
@ -244,45 +244,45 @@ static __always_inline s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u
*/ */
#define ATOMIC_OP(c_t, prefix, size) \ #define ATOMIC_OP(c_t, prefix, size) \
static __always_inline \ static __always_inline \
c_t atomic##prefix##_xchg_relaxed(atomic##prefix##_t *v, c_t n) \ c_t arch_atomic##prefix##_xchg_relaxed(atomic##prefix##_t *v, c_t n) \
{ \ { \
return __xchg_relaxed(&(v->counter), n, size); \ return __xchg_relaxed(&(v->counter), n, size); \
} \ } \
static __always_inline \ static __always_inline \
c_t atomic##prefix##_xchg_acquire(atomic##prefix##_t *v, c_t n) \ c_t arch_atomic##prefix##_xchg_acquire(atomic##prefix##_t *v, c_t n) \
{ \ { \
return __xchg_acquire(&(v->counter), n, size); \ return __xchg_acquire(&(v->counter), n, size); \
} \ } \
static __always_inline \ static __always_inline \
c_t atomic##prefix##_xchg_release(atomic##prefix##_t *v, c_t n) \ c_t arch_atomic##prefix##_xchg_release(atomic##prefix##_t *v, c_t n) \
{ \ { \
return __xchg_release(&(v->counter), n, size); \ return __xchg_release(&(v->counter), n, size); \
} \ } \
static __always_inline \ static __always_inline \
c_t atomic##prefix##_xchg(atomic##prefix##_t *v, c_t n) \ c_t arch_atomic##prefix##_xchg(atomic##prefix##_t *v, c_t n) \
{ \ { \
return __xchg(&(v->counter), n, size); \ return __xchg(&(v->counter), n, size); \
} \ } \
static __always_inline \ static __always_inline \
c_t atomic##prefix##_cmpxchg_relaxed(atomic##prefix##_t *v, \ c_t arch_atomic##prefix##_cmpxchg_relaxed(atomic##prefix##_t *v, \
c_t o, c_t n) \ c_t o, c_t n) \
{ \ { \
return __cmpxchg_relaxed(&(v->counter), o, n, size); \ return __cmpxchg_relaxed(&(v->counter), o, n, size); \
} \ } \
static __always_inline \ static __always_inline \
c_t atomic##prefix##_cmpxchg_acquire(atomic##prefix##_t *v, \ c_t arch_atomic##prefix##_cmpxchg_acquire(atomic##prefix##_t *v, \
c_t o, c_t n) \ c_t o, c_t n) \
{ \ { \
return __cmpxchg_acquire(&(v->counter), o, n, size); \ return __cmpxchg_acquire(&(v->counter), o, n, size); \
} \ } \
static __always_inline \ static __always_inline \
c_t atomic##prefix##_cmpxchg_release(atomic##prefix##_t *v, \ c_t arch_atomic##prefix##_cmpxchg_release(atomic##prefix##_t *v, \
c_t o, c_t n) \ c_t o, c_t n) \
{ \ { \
return __cmpxchg_release(&(v->counter), o, n, size); \ return __cmpxchg_release(&(v->counter), o, n, size); \
} \ } \
static __always_inline \ static __always_inline \
c_t atomic##prefix##_cmpxchg(atomic##prefix##_t *v, c_t o, c_t n) \ c_t arch_atomic##prefix##_cmpxchg(atomic##prefix##_t *v, c_t o, c_t n) \
{ \ { \
return __cmpxchg(&(v->counter), o, n, size); \ return __cmpxchg(&(v->counter), o, n, size); \
} }
@ -298,19 +298,19 @@ c_t atomic##prefix##_cmpxchg(atomic##prefix##_t *v, c_t o, c_t n) \
ATOMIC_OPS() ATOMIC_OPS()
#define atomic_xchg_relaxed atomic_xchg_relaxed #define arch_atomic_xchg_relaxed arch_atomic_xchg_relaxed
#define atomic_xchg_acquire atomic_xchg_acquire #define arch_atomic_xchg_acquire arch_atomic_xchg_acquire
#define atomic_xchg_release atomic_xchg_release #define arch_atomic_xchg_release arch_atomic_xchg_release
#define atomic_xchg atomic_xchg #define arch_atomic_xchg arch_atomic_xchg
#define atomic_cmpxchg_relaxed atomic_cmpxchg_relaxed #define arch_atomic_cmpxchg_relaxed arch_atomic_cmpxchg_relaxed
#define atomic_cmpxchg_acquire atomic_cmpxchg_acquire #define arch_atomic_cmpxchg_acquire arch_atomic_cmpxchg_acquire
#define atomic_cmpxchg_release atomic_cmpxchg_release #define arch_atomic_cmpxchg_release arch_atomic_cmpxchg_release
#define atomic_cmpxchg atomic_cmpxchg #define arch_atomic_cmpxchg arch_atomic_cmpxchg
#undef ATOMIC_OPS #undef ATOMIC_OPS
#undef ATOMIC_OP #undef ATOMIC_OP
static __always_inline int atomic_sub_if_positive(atomic_t *v, int offset) static __always_inline int arch_atomic_sub_if_positive(atomic_t *v, int offset)
{ {
int prev, rc; int prev, rc;
@ -328,10 +328,10 @@ static __always_inline int atomic_sub_if_positive(atomic_t *v, int offset)
return prev - offset; return prev - offset;
} }
#define atomic_dec_if_positive(v) atomic_sub_if_positive(v, 1) #define arch_atomic_dec_if_positive(v) arch_atomic_sub_if_positive(v, 1)
#ifndef CONFIG_GENERIC_ATOMIC64 #ifndef CONFIG_GENERIC_ATOMIC64
static __always_inline s64 atomic64_sub_if_positive(atomic64_t *v, s64 offset) static __always_inline s64 arch_atomic64_sub_if_positive(atomic64_t *v, s64 offset)
{ {
s64 prev; s64 prev;
long rc; long rc;
@ -350,7 +350,7 @@ static __always_inline s64 atomic64_sub_if_positive(atomic64_t *v, s64 offset)
return prev - offset; return prev - offset;
} }
#define atomic64_dec_if_positive(v) atomic64_sub_if_positive(v, 1) #define arch_atomic64_dec_if_positive(v) arch_atomic64_sub_if_positive(v, 1)
#endif #endif
#endif /* _ASM_RISCV_ATOMIC_H */ #endif /* _ASM_RISCV_ATOMIC_H */

View File

@ -37,7 +37,7 @@
__ret; \ __ret; \
}) })
#define xchg_relaxed(ptr, x) \ #define arch_xchg_relaxed(ptr, x) \
({ \ ({ \
__typeof__(*(ptr)) _x_ = (x); \ __typeof__(*(ptr)) _x_ = (x); \
(__typeof__(*(ptr))) __xchg_relaxed((ptr), \ (__typeof__(*(ptr))) __xchg_relaxed((ptr), \
@ -72,7 +72,7 @@
__ret; \ __ret; \
}) })
#define xchg_acquire(ptr, x) \ #define arch_xchg_acquire(ptr, x) \
({ \ ({ \
__typeof__(*(ptr)) _x_ = (x); \ __typeof__(*(ptr)) _x_ = (x); \
(__typeof__(*(ptr))) __xchg_acquire((ptr), \ (__typeof__(*(ptr))) __xchg_acquire((ptr), \
@ -107,7 +107,7 @@
__ret; \ __ret; \
}) })
#define xchg_release(ptr, x) \ #define arch_xchg_release(ptr, x) \
({ \ ({ \
__typeof__(*(ptr)) _x_ = (x); \ __typeof__(*(ptr)) _x_ = (x); \
(__typeof__(*(ptr))) __xchg_release((ptr), \ (__typeof__(*(ptr))) __xchg_release((ptr), \
@ -140,7 +140,7 @@
__ret; \ __ret; \
}) })
#define xchg(ptr, x) \ #define arch_xchg(ptr, x) \
({ \ ({ \
__typeof__(*(ptr)) _x_ = (x); \ __typeof__(*(ptr)) _x_ = (x); \
(__typeof__(*(ptr))) __xchg((ptr), _x_, sizeof(*(ptr))); \ (__typeof__(*(ptr))) __xchg((ptr), _x_, sizeof(*(ptr))); \
@ -149,13 +149,13 @@
#define xchg32(ptr, x) \ #define xchg32(ptr, x) \
({ \ ({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 4); \ BUILD_BUG_ON(sizeof(*(ptr)) != 4); \
xchg((ptr), (x)); \ arch_xchg((ptr), (x)); \
}) })
#define xchg64(ptr, x) \ #define xchg64(ptr, x) \
({ \ ({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
xchg((ptr), (x)); \ arch_xchg((ptr), (x)); \
}) })
/* /*
@ -199,7 +199,7 @@
__ret; \ __ret; \
}) })
#define cmpxchg_relaxed(ptr, o, n) \ #define arch_cmpxchg_relaxed(ptr, o, n) \
({ \ ({ \
__typeof__(*(ptr)) _o_ = (o); \ __typeof__(*(ptr)) _o_ = (o); \
__typeof__(*(ptr)) _n_ = (n); \ __typeof__(*(ptr)) _n_ = (n); \
@ -245,7 +245,7 @@
__ret; \ __ret; \
}) })
#define cmpxchg_acquire(ptr, o, n) \ #define arch_cmpxchg_acquire(ptr, o, n) \
({ \ ({ \
__typeof__(*(ptr)) _o_ = (o); \ __typeof__(*(ptr)) _o_ = (o); \
__typeof__(*(ptr)) _n_ = (n); \ __typeof__(*(ptr)) _n_ = (n); \
@ -291,7 +291,7 @@
__ret; \ __ret; \
}) })
#define cmpxchg_release(ptr, o, n) \ #define arch_cmpxchg_release(ptr, o, n) \
({ \ ({ \
__typeof__(*(ptr)) _o_ = (o); \ __typeof__(*(ptr)) _o_ = (o); \
__typeof__(*(ptr)) _n_ = (n); \ __typeof__(*(ptr)) _n_ = (n); \
@ -337,7 +337,7 @@
__ret; \ __ret; \
}) })
#define cmpxchg(ptr, o, n) \ #define arch_cmpxchg(ptr, o, n) \
({ \ ({ \
__typeof__(*(ptr)) _o_ = (o); \ __typeof__(*(ptr)) _o_ = (o); \
__typeof__(*(ptr)) _n_ = (n); \ __typeof__(*(ptr)) _n_ = (n); \
@ -345,31 +345,31 @@
_o_, _n_, sizeof(*(ptr))); \ _o_, _n_, sizeof(*(ptr))); \
}) })
#define cmpxchg_local(ptr, o, n) \ #define arch_cmpxchg_local(ptr, o, n) \
(__cmpxchg_relaxed((ptr), (o), (n), sizeof(*(ptr)))) (__cmpxchg_relaxed((ptr), (o), (n), sizeof(*(ptr))))
#define cmpxchg32(ptr, o, n) \ #define cmpxchg32(ptr, o, n) \
({ \ ({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 4); \ BUILD_BUG_ON(sizeof(*(ptr)) != 4); \
cmpxchg((ptr), (o), (n)); \ arch_cmpxchg((ptr), (o), (n)); \
}) })
#define cmpxchg32_local(ptr, o, n) \ #define cmpxchg32_local(ptr, o, n) \
({ \ ({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 4); \ BUILD_BUG_ON(sizeof(*(ptr)) != 4); \
cmpxchg_relaxed((ptr), (o), (n)) \ arch_cmpxchg_relaxed((ptr), (o), (n)) \
}) })
#define cmpxchg64(ptr, o, n) \ #define arch_cmpxchg64(ptr, o, n) \
({ \ ({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
cmpxchg((ptr), (o), (n)); \ arch_cmpxchg((ptr), (o), (n)); \
}) })
#define cmpxchg64_local(ptr, o, n) \ #define arch_cmpxchg64_local(ptr, o, n) \
({ \ ({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
cmpxchg_relaxed((ptr), (o), (n)); \ arch_cmpxchg_relaxed((ptr), (o), (n)); \
}) })
#endif /* _ASM_RISCV_CMPXCHG_H */ #endif /* _ASM_RISCV_CMPXCHG_H */

View File

@ -147,6 +147,4 @@ ATOMIC64_OPS(xor)
#define arch_atomic64_fetch_sub(_i, _v) arch_atomic64_fetch_add(-(s64)(_i), _v) #define arch_atomic64_fetch_sub(_i, _v) arch_atomic64_fetch_add(-(s64)(_i), _v)
#define arch_atomic64_sub(_i, _v) arch_atomic64_add(-(s64)(_i), _v) #define arch_atomic64_sub(_i, _v) arch_atomic64_add(-(s64)(_i), _v)
#define ARCH_ATOMIC
#endif /* __ARCH_S390_ATOMIC__ */ #endif /* __ARCH_S390_ATOMIC__ */

View File

@ -3,7 +3,7 @@
#define __ASM_SH_ATOMIC_GRB_H #define __ASM_SH_ATOMIC_GRB_H
#define ATOMIC_OP(op) \ #define ATOMIC_OP(op) \
static inline void atomic_##op(int i, atomic_t *v) \ static inline void arch_atomic_##op(int i, atomic_t *v) \
{ \ { \
int tmp; \ int tmp; \
\ \
@ -23,7 +23,7 @@ static inline void atomic_##op(int i, atomic_t *v) \
} \ } \
#define ATOMIC_OP_RETURN(op) \ #define ATOMIC_OP_RETURN(op) \
static inline int atomic_##op##_return(int i, atomic_t *v) \ static inline int arch_atomic_##op##_return(int i, atomic_t *v) \
{ \ { \
int tmp; \ int tmp; \
\ \
@ -45,7 +45,7 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
} }
#define ATOMIC_FETCH_OP(op) \ #define ATOMIC_FETCH_OP(op) \
static inline int atomic_fetch_##op(int i, atomic_t *v) \ static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \
{ \ { \
int res, tmp; \ int res, tmp; \
\ \

View File

@ -11,7 +11,7 @@
*/ */
#define ATOMIC_OP(op, c_op) \ #define ATOMIC_OP(op, c_op) \
static inline void atomic_##op(int i, atomic_t *v) \ static inline void arch_atomic_##op(int i, atomic_t *v) \
{ \ { \
unsigned long flags; \ unsigned long flags; \
\ \
@ -21,7 +21,7 @@ static inline void atomic_##op(int i, atomic_t *v) \
} }
#define ATOMIC_OP_RETURN(op, c_op) \ #define ATOMIC_OP_RETURN(op, c_op) \
static inline int atomic_##op##_return(int i, atomic_t *v) \ static inline int arch_atomic_##op##_return(int i, atomic_t *v) \
{ \ { \
unsigned long temp, flags; \ unsigned long temp, flags; \
\ \
@ -35,7 +35,7 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
} }
#define ATOMIC_FETCH_OP(op, c_op) \ #define ATOMIC_FETCH_OP(op, c_op) \
static inline int atomic_fetch_##op(int i, atomic_t *v) \ static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \
{ \ { \
unsigned long temp, flags; \ unsigned long temp, flags; \
\ \

View File

@ -17,7 +17,7 @@
*/ */
#define ATOMIC_OP(op) \ #define ATOMIC_OP(op) \
static inline void atomic_##op(int i, atomic_t *v) \ static inline void arch_atomic_##op(int i, atomic_t *v) \
{ \ { \
unsigned long tmp; \ unsigned long tmp; \
\ \
@ -32,7 +32,7 @@ static inline void atomic_##op(int i, atomic_t *v) \
} }
#define ATOMIC_OP_RETURN(op) \ #define ATOMIC_OP_RETURN(op) \
static inline int atomic_##op##_return(int i, atomic_t *v) \ static inline int arch_atomic_##op##_return(int i, atomic_t *v) \
{ \ { \
unsigned long temp; \ unsigned long temp; \
\ \
@ -50,7 +50,7 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
} }
#define ATOMIC_FETCH_OP(op) \ #define ATOMIC_FETCH_OP(op) \
static inline int atomic_fetch_##op(int i, atomic_t *v) \ static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \
{ \ { \
unsigned long res, temp; \ unsigned long res, temp; \
\ \

View File

@ -19,8 +19,8 @@
#include <asm/cmpxchg.h> #include <asm/cmpxchg.h>
#include <asm/barrier.h> #include <asm/barrier.h>
#define atomic_read(v) READ_ONCE((v)->counter) #define arch_atomic_read(v) READ_ONCE((v)->counter)
#define atomic_set(v,i) WRITE_ONCE((v)->counter, (i)) #define arch_atomic_set(v,i) WRITE_ONCE((v)->counter, (i))
#if defined(CONFIG_GUSA_RB) #if defined(CONFIG_GUSA_RB)
#include <asm/atomic-grb.h> #include <asm/atomic-grb.h>
@ -30,8 +30,8 @@
#include <asm/atomic-irq.h> #include <asm/atomic-irq.h>
#endif #endif
#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) #define arch_atomic_xchg(v, new) (arch_xchg(&((v)->counter), new))
#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n))) #define arch_atomic_cmpxchg(v, o, n) (arch_cmpxchg(&((v)->counter), (o), (n)))
#endif /* CONFIG_CPU_J2 */ #endif /* CONFIG_CPU_J2 */

View File

@ -45,7 +45,7 @@ extern void __xchg_called_with_bad_pointer(void);
__xchg__res; \ __xchg__res; \
}) })
#define xchg(ptr,x) \ #define arch_xchg(ptr,x) \
((__typeof__(*(ptr)))__xchg((ptr),(unsigned long)(x), sizeof(*(ptr)))) ((__typeof__(*(ptr)))__xchg((ptr),(unsigned long)(x), sizeof(*(ptr))))
/* This function doesn't exist, so you'll get a linker error /* This function doesn't exist, so you'll get a linker error
@ -63,7 +63,7 @@ static inline unsigned long __cmpxchg(volatile void * ptr, unsigned long old,
return old; return old;
} }
#define cmpxchg(ptr,o,n) \ #define arch_cmpxchg(ptr,o,n) \
({ \ ({ \
__typeof__(*(ptr)) _o_ = (o); \ __typeof__(*(ptr)) _o_ = (o); \
__typeof__(*(ptr)) _n_ = (n); \ __typeof__(*(ptr)) _n_ = (n); \

View File

@ -18,30 +18,30 @@
#include <asm/barrier.h> #include <asm/barrier.h>
#include <asm-generic/atomic64.h> #include <asm-generic/atomic64.h>
int atomic_add_return(int, atomic_t *); int arch_atomic_add_return(int, atomic_t *);
int atomic_fetch_add(int, atomic_t *); int arch_atomic_fetch_add(int, atomic_t *);
int atomic_fetch_and(int, atomic_t *); int arch_atomic_fetch_and(int, atomic_t *);
int atomic_fetch_or(int, atomic_t *); int arch_atomic_fetch_or(int, atomic_t *);
int atomic_fetch_xor(int, atomic_t *); int arch_atomic_fetch_xor(int, atomic_t *);
int atomic_cmpxchg(atomic_t *, int, int); int arch_atomic_cmpxchg(atomic_t *, int, int);
int atomic_xchg(atomic_t *, int); int arch_atomic_xchg(atomic_t *, int);
int atomic_fetch_add_unless(atomic_t *, int, int); int arch_atomic_fetch_add_unless(atomic_t *, int, int);
void atomic_set(atomic_t *, int); void arch_atomic_set(atomic_t *, int);
#define atomic_fetch_add_unless atomic_fetch_add_unless #define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
#define atomic_set_release(v, i) atomic_set((v), (i)) #define arch_atomic_set_release(v, i) arch_atomic_set((v), (i))
#define atomic_read(v) READ_ONCE((v)->counter) #define arch_atomic_read(v) READ_ONCE((v)->counter)
#define atomic_add(i, v) ((void)atomic_add_return( (int)(i), (v))) #define arch_atomic_add(i, v) ((void)arch_atomic_add_return( (int)(i), (v)))
#define atomic_sub(i, v) ((void)atomic_add_return(-(int)(i), (v))) #define arch_atomic_sub(i, v) ((void)arch_atomic_add_return(-(int)(i), (v)))
#define atomic_and(i, v) ((void)atomic_fetch_and((i), (v))) #define arch_atomic_and(i, v) ((void)arch_atomic_fetch_and((i), (v)))
#define atomic_or(i, v) ((void)atomic_fetch_or((i), (v))) #define arch_atomic_or(i, v) ((void)arch_atomic_fetch_or((i), (v)))
#define atomic_xor(i, v) ((void)atomic_fetch_xor((i), (v))) #define arch_atomic_xor(i, v) ((void)arch_atomic_fetch_xor((i), (v)))
#define atomic_sub_return(i, v) (atomic_add_return(-(int)(i), (v))) #define arch_atomic_sub_return(i, v) (arch_atomic_add_return(-(int)(i), (v)))
#define atomic_fetch_sub(i, v) (atomic_fetch_add (-(int)(i), (v))) #define arch_atomic_fetch_sub(i, v) (arch_atomic_fetch_add (-(int)(i), (v)))
#endif /* !(__ARCH_SPARC_ATOMIC__) */ #endif /* !(__ARCH_SPARC_ATOMIC__) */

View File

@ -14,23 +14,23 @@
#define ATOMIC64_INIT(i) { (i) } #define ATOMIC64_INIT(i) { (i) }
#define atomic_read(v) READ_ONCE((v)->counter) #define arch_atomic_read(v) READ_ONCE((v)->counter)
#define atomic64_read(v) READ_ONCE((v)->counter) #define arch_atomic64_read(v) READ_ONCE((v)->counter)
#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i)) #define arch_atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
#define atomic64_set(v, i) WRITE_ONCE(((v)->counter), (i)) #define arch_atomic64_set(v, i) WRITE_ONCE(((v)->counter), (i))
#define ATOMIC_OP(op) \ #define ATOMIC_OP(op) \
void atomic_##op(int, atomic_t *); \ void arch_atomic_##op(int, atomic_t *); \
void atomic64_##op(s64, atomic64_t *); void arch_atomic64_##op(s64, atomic64_t *);
#define ATOMIC_OP_RETURN(op) \ #define ATOMIC_OP_RETURN(op) \
int atomic_##op##_return(int, atomic_t *); \ int arch_atomic_##op##_return(int, atomic_t *); \
s64 atomic64_##op##_return(s64, atomic64_t *); s64 arch_atomic64_##op##_return(s64, atomic64_t *);
#define ATOMIC_FETCH_OP(op) \ #define ATOMIC_FETCH_OP(op) \
int atomic_fetch_##op(int, atomic_t *); \ int arch_atomic_fetch_##op(int, atomic_t *); \
s64 atomic64_fetch_##op(s64, atomic64_t *); s64 arch_atomic64_fetch_##op(s64, atomic64_t *);
#define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) ATOMIC_FETCH_OP(op) #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op) ATOMIC_FETCH_OP(op)
@ -49,18 +49,18 @@ ATOMIC_OPS(xor)
#undef ATOMIC_OP_RETURN #undef ATOMIC_OP_RETURN
#undef ATOMIC_OP #undef ATOMIC_OP
#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n))) #define arch_atomic_cmpxchg(v, o, n) (arch_cmpxchg(&((v)->counter), (o), (n)))
static inline int atomic_xchg(atomic_t *v, int new) static inline int arch_atomic_xchg(atomic_t *v, int new)
{ {
return xchg(&v->counter, new); return arch_xchg(&v->counter, new);
} }
#define atomic64_cmpxchg(v, o, n) \ #define arch_atomic64_cmpxchg(v, o, n) \
((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n))) ((__typeof__((v)->counter))arch_cmpxchg(&((v)->counter), (o), (n)))
#define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) #define arch_atomic64_xchg(v, new) (arch_xchg(&((v)->counter), new))
s64 atomic64_dec_if_positive(atomic64_t *v); s64 arch_atomic64_dec_if_positive(atomic64_t *v);
#define atomic64_dec_if_positive atomic64_dec_if_positive #define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive
#endif /* !(__ARCH_SPARC64_ATOMIC__) */ #endif /* !(__ARCH_SPARC64_ATOMIC__) */

View File

@ -25,7 +25,7 @@ static inline unsigned long __xchg(unsigned long x, __volatile__ void * ptr, int
return x; return x;
} }
#define xchg(ptr,x) ({(__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)));}) #define arch_xchg(ptr,x) ({(__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)));})
/* Emulate cmpxchg() the same way we emulate atomics, /* Emulate cmpxchg() the same way we emulate atomics,
* by hashing the object address and indexing into an array * by hashing the object address and indexing into an array
@ -55,7 +55,7 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size)
return old; return old;
} }
#define cmpxchg(ptr, o, n) \ #define arch_cmpxchg(ptr, o, n) \
({ \ ({ \
__typeof__(*(ptr)) _o_ = (o); \ __typeof__(*(ptr)) _o_ = (o); \
__typeof__(*(ptr)) _n_ = (n); \ __typeof__(*(ptr)) _n_ = (n); \
@ -64,7 +64,7 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size)
}) })
u64 __cmpxchg_u64(u64 *ptr, u64 old, u64 new); u64 __cmpxchg_u64(u64 *ptr, u64 old, u64 new);
#define cmpxchg64(ptr, old, new) __cmpxchg_u64(ptr, old, new) #define arch_cmpxchg64(ptr, old, new) __cmpxchg_u64(ptr, old, new)
#include <asm-generic/cmpxchg-local.h> #include <asm-generic/cmpxchg-local.h>
@ -72,9 +72,9 @@ u64 __cmpxchg_u64(u64 *ptr, u64 old, u64 new);
* cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
* them available. * them available.
*/ */
#define cmpxchg_local(ptr, o, n) \ #define arch_cmpxchg_local(ptr, o, n) \
((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\ ((__typeof__(*(ptr)))__generic_cmpxchg_local((ptr), (unsigned long)(o),\
(unsigned long)(n), sizeof(*(ptr)))) (unsigned long)(n), sizeof(*(ptr))))
#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) #define arch_cmpxchg64_local(ptr, o, n) __generic_cmpxchg64_local((ptr), (o), (n))
#endif /* __ARCH_SPARC_CMPXCHG__ */ #endif /* __ARCH_SPARC_CMPXCHG__ */

View File

@ -52,7 +52,7 @@ static inline unsigned long xchg64(__volatile__ unsigned long *m, unsigned long
return val; return val;
} }
#define xchg(ptr,x) \ #define arch_xchg(ptr,x) \
({ __typeof__(*(ptr)) __ret; \ ({ __typeof__(*(ptr)) __ret; \
__ret = (__typeof__(*(ptr))) \ __ret = (__typeof__(*(ptr))) \
__xchg((unsigned long)(x), (ptr), sizeof(*(ptr))); \ __xchg((unsigned long)(x), (ptr), sizeof(*(ptr))); \
@ -168,7 +168,7 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
return old; return old;
} }
#define cmpxchg(ptr,o,n) \ #define arch_cmpxchg(ptr,o,n) \
({ \ ({ \
__typeof__(*(ptr)) _o_ = (o); \ __typeof__(*(ptr)) _o_ = (o); \
__typeof__(*(ptr)) _n_ = (n); \ __typeof__(*(ptr)) _n_ = (n); \
@ -189,20 +189,20 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,
case 4: case 4:
case 8: return __cmpxchg(ptr, old, new, size); case 8: return __cmpxchg(ptr, old, new, size);
default: default:
return __cmpxchg_local_generic(ptr, old, new, size); return __generic_cmpxchg_local(ptr, old, new, size);
} }
return old; return old;
} }
#define cmpxchg_local(ptr, o, n) \ #define arch_cmpxchg_local(ptr, o, n) \
((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \ ((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \
(unsigned long)(n), sizeof(*(ptr)))) (unsigned long)(n), sizeof(*(ptr))))
#define cmpxchg64_local(ptr, o, n) \ #define arch_cmpxchg64_local(ptr, o, n) \
({ \ ({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
cmpxchg_local((ptr), (o), (n)); \ cmpxchg_local((ptr), (o), (n)); \
}) })
#define cmpxchg64(ptr, o, n) cmpxchg64_local((ptr), (o), (n)) #define arch_cmpxchg64(ptr, o, n) arch_cmpxchg64_local((ptr), (o), (n))
#endif /* __ARCH_SPARC64_CMPXCHG__ */ #endif /* __ARCH_SPARC64_CMPXCHG__ */

View File

@ -29,7 +29,7 @@ static DEFINE_SPINLOCK(dummy);
#endif /* SMP */ #endif /* SMP */
#define ATOMIC_FETCH_OP(op, c_op) \ #define ATOMIC_FETCH_OP(op, c_op) \
int atomic_fetch_##op(int i, atomic_t *v) \ int arch_atomic_fetch_##op(int i, atomic_t *v) \
{ \ { \
int ret; \ int ret; \
unsigned long flags; \ unsigned long flags; \
@ -41,10 +41,10 @@ int atomic_fetch_##op(int i, atomic_t *v) \
spin_unlock_irqrestore(ATOMIC_HASH(v), flags); \ spin_unlock_irqrestore(ATOMIC_HASH(v), flags); \
return ret; \ return ret; \
} \ } \
EXPORT_SYMBOL(atomic_fetch_##op); EXPORT_SYMBOL(arch_atomic_fetch_##op);
#define ATOMIC_OP_RETURN(op, c_op) \ #define ATOMIC_OP_RETURN(op, c_op) \
int atomic_##op##_return(int i, atomic_t *v) \ int arch_atomic_##op##_return(int i, atomic_t *v) \
{ \ { \
int ret; \ int ret; \
unsigned long flags; \ unsigned long flags; \
@ -55,7 +55,7 @@ int atomic_##op##_return(int i, atomic_t *v) \
spin_unlock_irqrestore(ATOMIC_HASH(v), flags); \ spin_unlock_irqrestore(ATOMIC_HASH(v), flags); \
return ret; \ return ret; \
} \ } \
EXPORT_SYMBOL(atomic_##op##_return); EXPORT_SYMBOL(arch_atomic_##op##_return);
ATOMIC_OP_RETURN(add, +=) ATOMIC_OP_RETURN(add, +=)
@ -67,7 +67,7 @@ ATOMIC_FETCH_OP(xor, ^=)
#undef ATOMIC_FETCH_OP #undef ATOMIC_FETCH_OP
#undef ATOMIC_OP_RETURN #undef ATOMIC_OP_RETURN
int atomic_xchg(atomic_t *v, int new) int arch_atomic_xchg(atomic_t *v, int new)
{ {
int ret; int ret;
unsigned long flags; unsigned long flags;
@ -78,9 +78,9 @@ int atomic_xchg(atomic_t *v, int new)
spin_unlock_irqrestore(ATOMIC_HASH(v), flags); spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
return ret; return ret;
} }
EXPORT_SYMBOL(atomic_xchg); EXPORT_SYMBOL(arch_atomic_xchg);
int atomic_cmpxchg(atomic_t *v, int old, int new) int arch_atomic_cmpxchg(atomic_t *v, int old, int new)
{ {
int ret; int ret;
unsigned long flags; unsigned long flags;
@ -93,9 +93,9 @@ int atomic_cmpxchg(atomic_t *v, int old, int new)
spin_unlock_irqrestore(ATOMIC_HASH(v), flags); spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
return ret; return ret;
} }
EXPORT_SYMBOL(atomic_cmpxchg); EXPORT_SYMBOL(arch_atomic_cmpxchg);
int atomic_fetch_add_unless(atomic_t *v, int a, int u) int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
{ {
int ret; int ret;
unsigned long flags; unsigned long flags;
@ -107,10 +107,10 @@ int atomic_fetch_add_unless(atomic_t *v, int a, int u)
spin_unlock_irqrestore(ATOMIC_HASH(v), flags); spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
return ret; return ret;
} }
EXPORT_SYMBOL(atomic_fetch_add_unless); EXPORT_SYMBOL(arch_atomic_fetch_add_unless);
/* Atomic operations are already serializing */ /* Atomic operations are already serializing */
void atomic_set(atomic_t *v, int i) void arch_atomic_set(atomic_t *v, int i)
{ {
unsigned long flags; unsigned long flags;
@ -118,7 +118,7 @@ void atomic_set(atomic_t *v, int i)
v->counter = i; v->counter = i;
spin_unlock_irqrestore(ATOMIC_HASH(v), flags); spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
} }
EXPORT_SYMBOL(atomic_set); EXPORT_SYMBOL(arch_atomic_set);
unsigned long ___set_bit(unsigned long *addr, unsigned long mask) unsigned long ___set_bit(unsigned long *addr, unsigned long mask)
{ {

View File

@ -19,7 +19,7 @@
*/ */
#define ATOMIC_OP(op) \ #define ATOMIC_OP(op) \
ENTRY(atomic_##op) /* %o0 = increment, %o1 = atomic_ptr */ \ ENTRY(arch_atomic_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
BACKOFF_SETUP(%o2); \ BACKOFF_SETUP(%o2); \
1: lduw [%o1], %g1; \ 1: lduw [%o1], %g1; \
op %g1, %o0, %g7; \ op %g1, %o0, %g7; \
@ -30,11 +30,11 @@ ENTRY(atomic_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
retl; \ retl; \
nop; \ nop; \
2: BACKOFF_SPIN(%o2, %o3, 1b); \ 2: BACKOFF_SPIN(%o2, %o3, 1b); \
ENDPROC(atomic_##op); \ ENDPROC(arch_atomic_##op); \
EXPORT_SYMBOL(atomic_##op); EXPORT_SYMBOL(arch_atomic_##op);
#define ATOMIC_OP_RETURN(op) \ #define ATOMIC_OP_RETURN(op) \
ENTRY(atomic_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \ ENTRY(arch_atomic_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */\
BACKOFF_SETUP(%o2); \ BACKOFF_SETUP(%o2); \
1: lduw [%o1], %g1; \ 1: lduw [%o1], %g1; \
op %g1, %o0, %g7; \ op %g1, %o0, %g7; \
@ -45,11 +45,11 @@ ENTRY(atomic_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
retl; \ retl; \
sra %g1, 0, %o0; \ sra %g1, 0, %o0; \
2: BACKOFF_SPIN(%o2, %o3, 1b); \ 2: BACKOFF_SPIN(%o2, %o3, 1b); \
ENDPROC(atomic_##op##_return); \ ENDPROC(arch_atomic_##op##_return); \
EXPORT_SYMBOL(atomic_##op##_return); EXPORT_SYMBOL(arch_atomic_##op##_return);
#define ATOMIC_FETCH_OP(op) \ #define ATOMIC_FETCH_OP(op) \
ENTRY(atomic_fetch_##op) /* %o0 = increment, %o1 = atomic_ptr */ \ ENTRY(arch_atomic_fetch_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
BACKOFF_SETUP(%o2); \ BACKOFF_SETUP(%o2); \
1: lduw [%o1], %g1; \ 1: lduw [%o1], %g1; \
op %g1, %o0, %g7; \ op %g1, %o0, %g7; \
@ -60,8 +60,8 @@ ENTRY(atomic_fetch_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
retl; \ retl; \
sra %g1, 0, %o0; \ sra %g1, 0, %o0; \
2: BACKOFF_SPIN(%o2, %o3, 1b); \ 2: BACKOFF_SPIN(%o2, %o3, 1b); \
ENDPROC(atomic_fetch_##op); \ ENDPROC(arch_atomic_fetch_##op); \
EXPORT_SYMBOL(atomic_fetch_##op); EXPORT_SYMBOL(arch_atomic_fetch_##op);
ATOMIC_OP(add) ATOMIC_OP(add)
ATOMIC_OP_RETURN(add) ATOMIC_OP_RETURN(add)
@ -85,7 +85,7 @@ ATOMIC_FETCH_OP(xor)
#undef ATOMIC_OP #undef ATOMIC_OP
#define ATOMIC64_OP(op) \ #define ATOMIC64_OP(op) \
ENTRY(atomic64_##op) /* %o0 = increment, %o1 = atomic_ptr */ \ ENTRY(arch_atomic64_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
BACKOFF_SETUP(%o2); \ BACKOFF_SETUP(%o2); \
1: ldx [%o1], %g1; \ 1: ldx [%o1], %g1; \
op %g1, %o0, %g7; \ op %g1, %o0, %g7; \
@ -96,11 +96,11 @@ ENTRY(atomic64_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
retl; \ retl; \
nop; \ nop; \
2: BACKOFF_SPIN(%o2, %o3, 1b); \ 2: BACKOFF_SPIN(%o2, %o3, 1b); \
ENDPROC(atomic64_##op); \ ENDPROC(arch_atomic64_##op); \
EXPORT_SYMBOL(atomic64_##op); EXPORT_SYMBOL(arch_atomic64_##op);
#define ATOMIC64_OP_RETURN(op) \ #define ATOMIC64_OP_RETURN(op) \
ENTRY(atomic64_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \ ENTRY(arch_atomic64_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
BACKOFF_SETUP(%o2); \ BACKOFF_SETUP(%o2); \
1: ldx [%o1], %g1; \ 1: ldx [%o1], %g1; \
op %g1, %o0, %g7; \ op %g1, %o0, %g7; \
@ -111,11 +111,11 @@ ENTRY(atomic64_##op##_return) /* %o0 = increment, %o1 = atomic_ptr */ \
retl; \ retl; \
op %g1, %o0, %o0; \ op %g1, %o0, %o0; \
2: BACKOFF_SPIN(%o2, %o3, 1b); \ 2: BACKOFF_SPIN(%o2, %o3, 1b); \
ENDPROC(atomic64_##op##_return); \ ENDPROC(arch_atomic64_##op##_return); \
EXPORT_SYMBOL(atomic64_##op##_return); EXPORT_SYMBOL(arch_atomic64_##op##_return);
#define ATOMIC64_FETCH_OP(op) \ #define ATOMIC64_FETCH_OP(op) \
ENTRY(atomic64_fetch_##op) /* %o0 = increment, %o1 = atomic_ptr */ \ ENTRY(arch_atomic64_fetch_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
BACKOFF_SETUP(%o2); \ BACKOFF_SETUP(%o2); \
1: ldx [%o1], %g1; \ 1: ldx [%o1], %g1; \
op %g1, %o0, %g7; \ op %g1, %o0, %g7; \
@ -126,8 +126,8 @@ ENTRY(atomic64_fetch_##op) /* %o0 = increment, %o1 = atomic_ptr */ \
retl; \ retl; \
mov %g1, %o0; \ mov %g1, %o0; \
2: BACKOFF_SPIN(%o2, %o3, 1b); \ 2: BACKOFF_SPIN(%o2, %o3, 1b); \
ENDPROC(atomic64_fetch_##op); \ ENDPROC(arch_atomic64_fetch_##op); \
EXPORT_SYMBOL(atomic64_fetch_##op); EXPORT_SYMBOL(arch_atomic64_fetch_##op);
ATOMIC64_OP(add) ATOMIC64_OP(add)
ATOMIC64_OP_RETURN(add) ATOMIC64_OP_RETURN(add)
@ -150,7 +150,7 @@ ATOMIC64_FETCH_OP(xor)
#undef ATOMIC64_OP_RETURN #undef ATOMIC64_OP_RETURN
#undef ATOMIC64_OP #undef ATOMIC64_OP
ENTRY(atomic64_dec_if_positive) /* %o0 = atomic_ptr */ ENTRY(arch_atomic64_dec_if_positive) /* %o0 = atomic_ptr */
BACKOFF_SETUP(%o2) BACKOFF_SETUP(%o2)
1: ldx [%o0], %g1 1: ldx [%o0], %g1
brlez,pn %g1, 3f brlez,pn %g1, 3f
@ -162,5 +162,5 @@ ENTRY(atomic64_dec_if_positive) /* %o0 = atomic_ptr */
3: retl 3: retl
sub %g1, 1, %o0 sub %g1, 1, %o0
2: BACKOFF_SPIN(%o2, %o3, 1b) 2: BACKOFF_SPIN(%o2, %o3, 1b)
ENDPROC(atomic64_dec_if_positive) ENDPROC(arch_atomic64_dec_if_positive)
EXPORT_SYMBOL(atomic64_dec_if_positive) EXPORT_SYMBOL(arch_atomic64_dec_if_positive)

View File

@ -269,6 +269,4 @@ static __always_inline int arch_atomic_fetch_xor(int i, atomic_t *v)
# include <asm/atomic64_64.h> # include <asm/atomic64_64.h>
#endif #endif
#define ARCH_ATOMIC
#endif /* _ASM_X86_ATOMIC_H */ #endif /* _ASM_X86_ATOMIC_H */

View File

@ -43,7 +43,7 @@
* *
* Atomically reads the value of @v. * Atomically reads the value of @v.
*/ */
#define atomic_read(v) READ_ONCE((v)->counter) #define arch_atomic_read(v) READ_ONCE((v)->counter)
/** /**
* atomic_set - set atomic variable * atomic_set - set atomic variable
@ -52,11 +52,11 @@
* *
* Atomically sets the value of @v to @i. * Atomically sets the value of @v to @i.
*/ */
#define atomic_set(v,i) WRITE_ONCE((v)->counter, (i)) #define arch_atomic_set(v,i) WRITE_ONCE((v)->counter, (i))
#if XCHAL_HAVE_EXCLUSIVE #if XCHAL_HAVE_EXCLUSIVE
#define ATOMIC_OP(op) \ #define ATOMIC_OP(op) \
static inline void atomic_##op(int i, atomic_t *v) \ static inline void arch_atomic_##op(int i, atomic_t *v) \
{ \ { \
unsigned long tmp; \ unsigned long tmp; \
int result; \ int result; \
@ -74,7 +74,7 @@ static inline void atomic_##op(int i, atomic_t *v) \
} \ } \
#define ATOMIC_OP_RETURN(op) \ #define ATOMIC_OP_RETURN(op) \
static inline int atomic_##op##_return(int i, atomic_t *v) \ static inline int arch_atomic_##op##_return(int i, atomic_t *v) \
{ \ { \
unsigned long tmp; \ unsigned long tmp; \
int result; \ int result; \
@ -95,7 +95,7 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
} }
#define ATOMIC_FETCH_OP(op) \ #define ATOMIC_FETCH_OP(op) \
static inline int atomic_fetch_##op(int i, atomic_t *v) \ static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \
{ \ { \
unsigned long tmp; \ unsigned long tmp; \
int result; \ int result; \
@ -116,7 +116,7 @@ static inline int atomic_fetch_##op(int i, atomic_t *v) \
#elif XCHAL_HAVE_S32C1I #elif XCHAL_HAVE_S32C1I
#define ATOMIC_OP(op) \ #define ATOMIC_OP(op) \
static inline void atomic_##op(int i, atomic_t * v) \ static inline void arch_atomic_##op(int i, atomic_t * v) \
{ \ { \
unsigned long tmp; \ unsigned long tmp; \
int result; \ int result; \
@ -135,7 +135,7 @@ static inline void atomic_##op(int i, atomic_t * v) \
} \ } \
#define ATOMIC_OP_RETURN(op) \ #define ATOMIC_OP_RETURN(op) \
static inline int atomic_##op##_return(int i, atomic_t * v) \ static inline int arch_atomic_##op##_return(int i, atomic_t * v) \
{ \ { \
unsigned long tmp; \ unsigned long tmp; \
int result; \ int result; \
@ -157,7 +157,7 @@ static inline int atomic_##op##_return(int i, atomic_t * v) \
} }
#define ATOMIC_FETCH_OP(op) \ #define ATOMIC_FETCH_OP(op) \
static inline int atomic_fetch_##op(int i, atomic_t * v) \ static inline int arch_atomic_fetch_##op(int i, atomic_t * v) \
{ \ { \
unsigned long tmp; \ unsigned long tmp; \
int result; \ int result; \
@ -180,7 +180,7 @@ static inline int atomic_fetch_##op(int i, atomic_t * v) \
#else /* XCHAL_HAVE_S32C1I */ #else /* XCHAL_HAVE_S32C1I */
#define ATOMIC_OP(op) \ #define ATOMIC_OP(op) \
static inline void atomic_##op(int i, atomic_t * v) \ static inline void arch_atomic_##op(int i, atomic_t * v) \
{ \ { \
unsigned int vval; \ unsigned int vval; \
\ \
@ -198,7 +198,7 @@ static inline void atomic_##op(int i, atomic_t * v) \
} \ } \
#define ATOMIC_OP_RETURN(op) \ #define ATOMIC_OP_RETURN(op) \
static inline int atomic_##op##_return(int i, atomic_t * v) \ static inline int arch_atomic_##op##_return(int i, atomic_t * v) \
{ \ { \
unsigned int vval; \ unsigned int vval; \
\ \
@ -218,7 +218,7 @@ static inline int atomic_##op##_return(int i, atomic_t * v) \
} }
#define ATOMIC_FETCH_OP(op) \ #define ATOMIC_FETCH_OP(op) \
static inline int atomic_fetch_##op(int i, atomic_t * v) \ static inline int arch_atomic_fetch_##op(int i, atomic_t * v) \
{ \ { \
unsigned int tmp, vval; \ unsigned int tmp, vval; \
\ \
@ -257,7 +257,7 @@ ATOMIC_OPS(xor)
#undef ATOMIC_OP_RETURN #undef ATOMIC_OP_RETURN
#undef ATOMIC_OP #undef ATOMIC_OP
#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n))) #define arch_atomic_cmpxchg(v, o, n) ((int)arch_cmpxchg(&((v)->counter), (o), (n)))
#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) #define arch_atomic_xchg(v, new) (arch_xchg(&((v)->counter), new))
#endif /* _XTENSA_ATOMIC_H */ #endif /* _XTENSA_ATOMIC_H */

View File

@ -80,7 +80,7 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
} }
} }
#define cmpxchg(ptr,o,n) \ #define arch_cmpxchg(ptr,o,n) \
({ __typeof__(*(ptr)) _o_ = (o); \ ({ __typeof__(*(ptr)) _o_ = (o); \
__typeof__(*(ptr)) _n_ = (n); \ __typeof__(*(ptr)) _n_ = (n); \
(__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \ (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
@ -97,7 +97,7 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,
case 4: case 4:
return __cmpxchg_u32(ptr, old, new); return __cmpxchg_u32(ptr, old, new);
default: default:
return __cmpxchg_local_generic(ptr, old, new, size); return __generic_cmpxchg_local(ptr, old, new, size);
} }
return old; return old;
@ -107,11 +107,11 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,
* cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
* them available. * them available.
*/ */
#define cmpxchg_local(ptr, o, n) \ #define arch_cmpxchg_local(ptr, o, n) \
((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\ ((__typeof__(*(ptr)))__generic_cmpxchg_local((ptr), (unsigned long)(o),\
(unsigned long)(n), sizeof(*(ptr)))) (unsigned long)(n), sizeof(*(ptr))))
#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) #define arch_cmpxchg64_local(ptr, o, n) __generic_cmpxchg64_local((ptr), (o), (n))
#define cmpxchg64(ptr, o, n) cmpxchg64_local((ptr), (o), (n)) #define arch_cmpxchg64(ptr, o, n) arch_cmpxchg64_local((ptr), (o), (n))
/* /*
* xchg_u32 * xchg_u32
@ -169,7 +169,7 @@ static inline unsigned long xchg_u32(volatile int * m, unsigned long val)
#endif #endif
} }
#define xchg(ptr,x) \ #define arch_xchg(ptr,x) \
((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
static inline u32 xchg_small(volatile void *ptr, u32 x, int size) static inline u32 xchg_small(volatile void *ptr, u32 x, int size)

File diff suppressed because it is too large Load Diff

View File

@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* SPDX-License-Identifier: GPL-2.0-or-later */
/* /*
* Generic C implementation of atomic counter operations. Usable on * Generic C implementation of atomic counter operations. Do not include in
* UP systems only. Do not include in machine independent code. * machine independent code.
* *
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com) * Written by David Howells (dhowells@redhat.com)
@ -12,56 +12,39 @@
#include <asm/cmpxchg.h> #include <asm/cmpxchg.h>
#include <asm/barrier.h> #include <asm/barrier.h>
/*
* atomic_$op() - $op integer to atomic variable
* @i: integer value to $op
* @v: pointer to the atomic variable
*
* Atomically $ops @i to @v. Does not strictly guarantee a memory-barrier, use
* smp_mb__{before,after}_atomic().
*/
/*
* atomic_$op_return() - $op interer to atomic variable and returns the result
* @i: integer value to $op
* @v: pointer to the atomic variable
*
* Atomically $ops @i to @v. Does imply a full memory barrier.
*/
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
/* we can build all atomic primitives from cmpxchg */ /* we can build all atomic primitives from cmpxchg */
#define ATOMIC_OP(op, c_op) \ #define ATOMIC_OP(op, c_op) \
static inline void atomic_##op(int i, atomic_t *v) \ static inline void generic_atomic_##op(int i, atomic_t *v) \
{ \ { \
int c, old; \ int c, old; \
\ \
c = v->counter; \ c = v->counter; \
while ((old = cmpxchg(&v->counter, c, c c_op i)) != c) \ while ((old = arch_cmpxchg(&v->counter, c, c c_op i)) != c) \
c = old; \ c = old; \
} }
#define ATOMIC_OP_RETURN(op, c_op) \ #define ATOMIC_OP_RETURN(op, c_op) \
static inline int atomic_##op##_return(int i, atomic_t *v) \ static inline int generic_atomic_##op##_return(int i, atomic_t *v) \
{ \ { \
int c, old; \ int c, old; \
\ \
c = v->counter; \ c = v->counter; \
while ((old = cmpxchg(&v->counter, c, c c_op i)) != c) \ while ((old = arch_cmpxchg(&v->counter, c, c c_op i)) != c) \
c = old; \ c = old; \
\ \
return c c_op i; \ return c c_op i; \
} }
#define ATOMIC_FETCH_OP(op, c_op) \ #define ATOMIC_FETCH_OP(op, c_op) \
static inline int atomic_fetch_##op(int i, atomic_t *v) \ static inline int generic_atomic_fetch_##op(int i, atomic_t *v) \
{ \ { \
int c, old; \ int c, old; \
\ \
c = v->counter; \ c = v->counter; \
while ((old = cmpxchg(&v->counter, c, c c_op i)) != c) \ while ((old = arch_cmpxchg(&v->counter, c, c c_op i)) != c) \
c = old; \ c = old; \
\ \
return c; \ return c; \
@ -72,7 +55,7 @@ static inline int atomic_fetch_##op(int i, atomic_t *v) \
#include <linux/irqflags.h> #include <linux/irqflags.h>
#define ATOMIC_OP(op, c_op) \ #define ATOMIC_OP(op, c_op) \
static inline void atomic_##op(int i, atomic_t *v) \ static inline void generic_atomic_##op(int i, atomic_t *v) \
{ \ { \
unsigned long flags; \ unsigned long flags; \
\ \
@ -82,7 +65,7 @@ static inline void atomic_##op(int i, atomic_t *v) \
} }
#define ATOMIC_OP_RETURN(op, c_op) \ #define ATOMIC_OP_RETURN(op, c_op) \
static inline int atomic_##op##_return(int i, atomic_t *v) \ static inline int generic_atomic_##op##_return(int i, atomic_t *v) \
{ \ { \
unsigned long flags; \ unsigned long flags; \
int ret; \ int ret; \
@ -95,7 +78,7 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
} }
#define ATOMIC_FETCH_OP(op, c_op) \ #define ATOMIC_FETCH_OP(op, c_op) \
static inline int atomic_fetch_##op(int i, atomic_t *v) \ static inline int generic_atomic_fetch_##op(int i, atomic_t *v) \
{ \ { \
unsigned long flags; \ unsigned long flags; \
int ret; \ int ret; \
@ -110,87 +93,44 @@ static inline int atomic_fetch_##op(int i, atomic_t *v) \
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
#ifndef atomic_add_return
ATOMIC_OP_RETURN(add, +) ATOMIC_OP_RETURN(add, +)
#endif
#ifndef atomic_sub_return
ATOMIC_OP_RETURN(sub, -) ATOMIC_OP_RETURN(sub, -)
#endif
#ifndef atomic_fetch_add
ATOMIC_FETCH_OP(add, +) ATOMIC_FETCH_OP(add, +)
#endif
#ifndef atomic_fetch_sub
ATOMIC_FETCH_OP(sub, -) ATOMIC_FETCH_OP(sub, -)
#endif
#ifndef atomic_fetch_and
ATOMIC_FETCH_OP(and, &) ATOMIC_FETCH_OP(and, &)
#endif
#ifndef atomic_fetch_or
ATOMIC_FETCH_OP(or, |) ATOMIC_FETCH_OP(or, |)
#endif
#ifndef atomic_fetch_xor
ATOMIC_FETCH_OP(xor, ^) ATOMIC_FETCH_OP(xor, ^)
#endif
#ifndef atomic_and ATOMIC_OP(add, +)
ATOMIC_OP(sub, -)
ATOMIC_OP(and, &) ATOMIC_OP(and, &)
#endif
#ifndef atomic_or
ATOMIC_OP(or, |) ATOMIC_OP(or, |)
#endif
#ifndef atomic_xor
ATOMIC_OP(xor, ^) ATOMIC_OP(xor, ^)
#endif
#undef ATOMIC_FETCH_OP #undef ATOMIC_FETCH_OP
#undef ATOMIC_OP_RETURN #undef ATOMIC_OP_RETURN
#undef ATOMIC_OP #undef ATOMIC_OP
/* #define arch_atomic_add_return generic_atomic_add_return
* Atomic operations that C can't guarantee us. Useful for #define arch_atomic_sub_return generic_atomic_sub_return
* resource counting etc..
*/
/** #define arch_atomic_fetch_add generic_atomic_fetch_add
* atomic_read - read atomic variable #define arch_atomic_fetch_sub generic_atomic_fetch_sub
* @v: pointer of type atomic_t #define arch_atomic_fetch_and generic_atomic_fetch_and
* #define arch_atomic_fetch_or generic_atomic_fetch_or
* Atomically reads the value of @v. #define arch_atomic_fetch_xor generic_atomic_fetch_xor
*/
#ifndef atomic_read
#define atomic_read(v) READ_ONCE((v)->counter)
#endif
/** #define arch_atomic_add generic_atomic_add
* atomic_set - set atomic variable #define arch_atomic_sub generic_atomic_sub
* @v: pointer of type atomic_t #define arch_atomic_and generic_atomic_and
* @i: required value #define arch_atomic_or generic_atomic_or
* #define arch_atomic_xor generic_atomic_xor
* Atomically sets the value of @v to @i.
*/
#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
#include <linux/irqflags.h> #define arch_atomic_read(v) READ_ONCE((v)->counter)
#define arch_atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
static inline void atomic_add(int i, atomic_t *v) #define arch_atomic_xchg(ptr, v) (arch_xchg(&(ptr)->counter, (v)))
{ #define arch_atomic_cmpxchg(v, old, new) (arch_cmpxchg(&((v)->counter), (old), (new)))
atomic_add_return(i, v);
}
static inline void atomic_sub(int i, atomic_t *v)
{
atomic_sub_return(i, v);
}
#define atomic_xchg(ptr, v) (xchg(&(ptr)->counter, (v)))
#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new)))
#endif /* __ASM_GENERIC_ATOMIC_H */ #endif /* __ASM_GENERIC_ATOMIC_H */

View File

@ -15,19 +15,17 @@ typedef struct {
#define ATOMIC64_INIT(i) { (i) } #define ATOMIC64_INIT(i) { (i) }
extern s64 atomic64_read(const atomic64_t *v); extern s64 generic_atomic64_read(const atomic64_t *v);
extern void atomic64_set(atomic64_t *v, s64 i); extern void generic_atomic64_set(atomic64_t *v, s64 i);
#define atomic64_set_release(v, i) atomic64_set((v), (i))
#define ATOMIC64_OP(op) \ #define ATOMIC64_OP(op) \
extern void atomic64_##op(s64 a, atomic64_t *v); extern void generic_atomic64_##op(s64 a, atomic64_t *v);
#define ATOMIC64_OP_RETURN(op) \ #define ATOMIC64_OP_RETURN(op) \
extern s64 atomic64_##op##_return(s64 a, atomic64_t *v); extern s64 generic_atomic64_##op##_return(s64 a, atomic64_t *v);
#define ATOMIC64_FETCH_OP(op) \ #define ATOMIC64_FETCH_OP(op) \
extern s64 atomic64_fetch_##op(s64 a, atomic64_t *v); extern s64 generic_atomic64_fetch_##op(s64 a, atomic64_t *v);
#define ATOMIC64_OPS(op) ATOMIC64_OP(op) ATOMIC64_OP_RETURN(op) ATOMIC64_FETCH_OP(op) #define ATOMIC64_OPS(op) ATOMIC64_OP(op) ATOMIC64_OP_RETURN(op) ATOMIC64_FETCH_OP(op)
@ -46,11 +44,32 @@ ATOMIC64_OPS(xor)
#undef ATOMIC64_OP_RETURN #undef ATOMIC64_OP_RETURN
#undef ATOMIC64_OP #undef ATOMIC64_OP
extern s64 atomic64_dec_if_positive(atomic64_t *v); extern s64 generic_atomic64_dec_if_positive(atomic64_t *v);
#define atomic64_dec_if_positive atomic64_dec_if_positive extern s64 generic_atomic64_cmpxchg(atomic64_t *v, s64 o, s64 n);
extern s64 atomic64_cmpxchg(atomic64_t *v, s64 o, s64 n); extern s64 generic_atomic64_xchg(atomic64_t *v, s64 new);
extern s64 atomic64_xchg(atomic64_t *v, s64 new); extern s64 generic_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u);
extern s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u);
#define atomic64_fetch_add_unless atomic64_fetch_add_unless #define arch_atomic64_read generic_atomic64_read
#define arch_atomic64_set generic_atomic64_set
#define arch_atomic64_set_release generic_atomic64_set
#define arch_atomic64_add generic_atomic64_add
#define arch_atomic64_add_return generic_atomic64_add_return
#define arch_atomic64_fetch_add generic_atomic64_fetch_add
#define arch_atomic64_sub generic_atomic64_sub
#define arch_atomic64_sub_return generic_atomic64_sub_return
#define arch_atomic64_fetch_sub generic_atomic64_fetch_sub
#define arch_atomic64_and generic_atomic64_and
#define arch_atomic64_fetch_and generic_atomic64_fetch_and
#define arch_atomic64_or generic_atomic64_or
#define arch_atomic64_fetch_or generic_atomic64_fetch_or
#define arch_atomic64_xor generic_atomic64_xor
#define arch_atomic64_fetch_xor generic_atomic64_fetch_xor
#define arch_atomic64_dec_if_positive generic_atomic64_dec_if_positive
#define arch_atomic64_cmpxchg generic_atomic64_cmpxchg
#define arch_atomic64_xchg generic_atomic64_xchg
#define arch_atomic64_fetch_add_unless generic_atomic64_fetch_add_unless
#endif /* _ASM_GENERIC_ATOMIC64_H */ #endif /* _ASM_GENERIC_ATOMIC64_H */

View File

@ -12,7 +12,7 @@ extern unsigned long wrong_size_cmpxchg(volatile void *ptr)
* Generic version of __cmpxchg_local (disables interrupts). Takes an unsigned * Generic version of __cmpxchg_local (disables interrupts). Takes an unsigned
* long parameter, supporting various types of architectures. * long parameter, supporting various types of architectures.
*/ */
static inline unsigned long __cmpxchg_local_generic(volatile void *ptr, static inline unsigned long __generic_cmpxchg_local(volatile void *ptr,
unsigned long old, unsigned long new, int size) unsigned long old, unsigned long new, int size)
{ {
unsigned long flags, prev; unsigned long flags, prev;
@ -51,7 +51,7 @@ static inline unsigned long __cmpxchg_local_generic(volatile void *ptr,
/* /*
* Generic version of __cmpxchg64_local. Takes an u64 parameter. * Generic version of __cmpxchg64_local. Takes an u64 parameter.
*/ */
static inline u64 __cmpxchg64_local_generic(volatile void *ptr, static inline u64 __generic_cmpxchg64_local(volatile void *ptr,
u64 old, u64 new) u64 old, u64 new)
{ {
u64 prev; u64 prev;

View File

@ -14,16 +14,14 @@
#include <linux/types.h> #include <linux/types.h>
#include <linux/irqflags.h> #include <linux/irqflags.h>
#ifndef xchg
/* /*
* This function doesn't exist, so you'll get a linker error if * This function doesn't exist, so you'll get a linker error if
* something tries to do an invalidly-sized xchg(). * something tries to do an invalidly-sized xchg().
*/ */
extern void __xchg_called_with_bad_pointer(void); extern void __generic_xchg_called_with_bad_pointer(void);
static inline static inline
unsigned long __xchg(unsigned long x, volatile void *ptr, int size) unsigned long __generic_xchg(unsigned long x, volatile void *ptr, int size)
{ {
unsigned long ret, flags; unsigned long ret, flags;
@ -75,35 +73,43 @@ unsigned long __xchg(unsigned long x, volatile void *ptr, int size)
#endif /* CONFIG_64BIT */ #endif /* CONFIG_64BIT */
default: default:
__xchg_called_with_bad_pointer(); __generic_xchg_called_with_bad_pointer();
return x; return x;
} }
} }
#define xchg(ptr, x) ({ \ #define generic_xchg(ptr, x) ({ \
((__typeof__(*(ptr))) \ ((__typeof__(*(ptr))) \
__xchg((unsigned long)(x), (ptr), sizeof(*(ptr)))); \ __generic_xchg((unsigned long)(x), (ptr), sizeof(*(ptr)))); \
}) })
#endif /* xchg */
/* /*
* Atomic compare and exchange. * Atomic compare and exchange.
*/ */
#include <asm-generic/cmpxchg-local.h> #include <asm-generic/cmpxchg-local.h>
#ifndef cmpxchg_local #define generic_cmpxchg_local(ptr, o, n) ({ \
#define cmpxchg_local(ptr, o, n) ({ \ ((__typeof__(*(ptr)))__generic_cmpxchg_local((ptr), (unsigned long)(o), \
((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\
(unsigned long)(n), sizeof(*(ptr)))); \ (unsigned long)(n), sizeof(*(ptr)))); \
}) })
#define generic_cmpxchg64_local(ptr, o, n) \
__generic_cmpxchg64_local((ptr), (o), (n))
#ifndef arch_xchg
#define arch_xchg generic_xchg
#endif #endif
#ifndef cmpxchg64_local #ifndef arch_cmpxchg_local
#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) #define arch_cmpxchg_local generic_cmpxchg_local
#endif #endif
#define cmpxchg(ptr, o, n) cmpxchg_local((ptr), (o), (n)) #ifndef arch_cmpxchg64_local
#define cmpxchg64(ptr, o, n) cmpxchg64_local((ptr), (o), (n)) #define arch_cmpxchg64_local generic_cmpxchg64_local
#endif
#define arch_cmpxchg arch_cmpxchg_local
#define arch_cmpxchg64 arch_cmpxchg64_local
#endif /* __ASM_GENERIC_CMPXCHG_H */ #endif /* __ASM_GENERIC_CMPXCHG_H */

File diff suppressed because it is too large Load Diff

View File

@ -77,12 +77,8 @@
__ret; \ __ret; \
}) })
#ifdef ARCH_ATOMIC
#include <linux/atomic-arch-fallback.h> #include <linux/atomic-arch-fallback.h>
#include <asm-generic/atomic-instrumented.h> #include <asm-generic/atomic-instrumented.h>
#else
#include <linux/atomic-fallback.h>
#endif
#include <asm-generic/atomic-long.h> #include <asm-generic/atomic-long.h>

View File

@ -52,7 +52,7 @@ enum lockdep_lock_type {
* NR_LOCKDEP_CACHING_CLASSES ... Number of classes * NR_LOCKDEP_CACHING_CLASSES ... Number of classes
* cached in the instance of lockdep_map * cached in the instance of lockdep_map
* *
* Currently main class (subclass == 0) and signle depth subclass * Currently main class (subclass == 0) and single depth subclass
* are cached in lockdep_map. This optimization is mainly targeting * are cached in lockdep_map. This optimization is mainly targeting
* on rq->lock. double_rq_lock() acquires this highly competitive with * on rq->lock. double_rq_lock() acquires this highly competitive with
* single depth. * single depth.

View File

@ -182,9 +182,9 @@ static inline void seqcount_lockdep_reader_access(const seqcount_t *s)
#define seqcount_raw_spinlock_init(s, lock) seqcount_LOCKNAME_init(s, lock, raw_spinlock) #define seqcount_raw_spinlock_init(s, lock) seqcount_LOCKNAME_init(s, lock, raw_spinlock)
#define seqcount_spinlock_init(s, lock) seqcount_LOCKNAME_init(s, lock, spinlock) #define seqcount_spinlock_init(s, lock) seqcount_LOCKNAME_init(s, lock, spinlock)
#define seqcount_rwlock_init(s, lock) seqcount_LOCKNAME_init(s, lock, rwlock); #define seqcount_rwlock_init(s, lock) seqcount_LOCKNAME_init(s, lock, rwlock)
#define seqcount_mutex_init(s, lock) seqcount_LOCKNAME_init(s, lock, mutex); #define seqcount_mutex_init(s, lock) seqcount_LOCKNAME_init(s, lock, mutex)
#define seqcount_ww_mutex_init(s, lock) seqcount_LOCKNAME_init(s, lock, ww_mutex); #define seqcount_ww_mutex_init(s, lock) seqcount_LOCKNAME_init(s, lock, ww_mutex)
/* /*
* SEQCOUNT_LOCKNAME() - Instantiate seqcount_LOCKNAME_t and helpers * SEQCOUNT_LOCKNAME() - Instantiate seqcount_LOCKNAME_t and helpers

View File

@ -21,6 +21,7 @@
#define FUTEX_WAKE_BITSET 10 #define FUTEX_WAKE_BITSET 10
#define FUTEX_WAIT_REQUEUE_PI 11 #define FUTEX_WAIT_REQUEUE_PI 11
#define FUTEX_CMP_REQUEUE_PI 12 #define FUTEX_CMP_REQUEUE_PI 12
#define FUTEX_LOCK_PI2 13
#define FUTEX_PRIVATE_FLAG 128 #define FUTEX_PRIVATE_FLAG 128
#define FUTEX_CLOCK_REALTIME 256 #define FUTEX_CLOCK_REALTIME 256
@ -32,6 +33,7 @@
#define FUTEX_CMP_REQUEUE_PRIVATE (FUTEX_CMP_REQUEUE | FUTEX_PRIVATE_FLAG) #define FUTEX_CMP_REQUEUE_PRIVATE (FUTEX_CMP_REQUEUE | FUTEX_PRIVATE_FLAG)
#define FUTEX_WAKE_OP_PRIVATE (FUTEX_WAKE_OP | FUTEX_PRIVATE_FLAG) #define FUTEX_WAKE_OP_PRIVATE (FUTEX_WAKE_OP | FUTEX_PRIVATE_FLAG)
#define FUTEX_LOCK_PI_PRIVATE (FUTEX_LOCK_PI | FUTEX_PRIVATE_FLAG) #define FUTEX_LOCK_PI_PRIVATE (FUTEX_LOCK_PI | FUTEX_PRIVATE_FLAG)
#define FUTEX_LOCK_PI2_PRIVATE (FUTEX_LOCK_PI2 | FUTEX_PRIVATE_FLAG)
#define FUTEX_UNLOCK_PI_PRIVATE (FUTEX_UNLOCK_PI | FUTEX_PRIVATE_FLAG) #define FUTEX_UNLOCK_PI_PRIVATE (FUTEX_UNLOCK_PI | FUTEX_PRIVATE_FLAG)
#define FUTEX_TRYLOCK_PI_PRIVATE (FUTEX_TRYLOCK_PI | FUTEX_PRIVATE_FLAG) #define FUTEX_TRYLOCK_PI_PRIVATE (FUTEX_TRYLOCK_PI | FUTEX_PRIVATE_FLAG)
#define FUTEX_WAIT_BITSET_PRIVATE (FUTEX_WAIT_BITSET | FUTEX_PRIVATE_FLAG) #define FUTEX_WAIT_BITSET_PRIVATE (FUTEX_WAIT_BITSET | FUTEX_PRIVATE_FLAG)

View File

@ -1727,12 +1727,9 @@ retry_private:
return ret; return ret;
} }
if (!(flags & FLAGS_SHARED)) {
cond_resched(); cond_resched();
if (!(flags & FLAGS_SHARED))
goto retry_private; goto retry_private;
}
cond_resched();
goto retry; goto retry;
} }
@ -1873,7 +1870,7 @@ futex_proxy_trylock_atomic(u32 __user *pifutex, struct futex_hash_bucket *hb1,
* If the caller intends to requeue more than 1 waiter to pifutex, * If the caller intends to requeue more than 1 waiter to pifutex,
* force futex_lock_pi_atomic() to set the FUTEX_WAITERS bit now, * force futex_lock_pi_atomic() to set the FUTEX_WAITERS bit now,
* as we have means to handle the possible fault. If not, don't set * as we have means to handle the possible fault. If not, don't set
* the bit unecessarily as it will force the subsequent unlock to enter * the bit unnecessarily as it will force the subsequent unlock to enter
* the kernel. * the kernel.
*/ */
top_waiter = futex_top_waiter(hb1, key1); top_waiter = futex_top_waiter(hb1, key1);
@ -2102,7 +2099,7 @@ retry_private:
continue; continue;
/* /*
* FUTEX_WAIT_REQEUE_PI and FUTEX_CMP_REQUEUE_PI should always * FUTEX_WAIT_REQUEUE_PI and FUTEX_CMP_REQUEUE_PI should always
* be paired with each other and no other futex ops. * be paired with each other and no other futex ops.
* *
* We should never be requeueing a futex_q with a pi_state, * We should never be requeueing a futex_q with a pi_state,
@ -2317,7 +2314,7 @@ retry:
} }
/* /*
* PI futexes can not be requeued and must remove themself from the * PI futexes can not be requeued and must remove themselves from the
* hash bucket. The hash bucket lock (i.e. lock_ptr) is held. * hash bucket. The hash bucket lock (i.e. lock_ptr) is held.
*/ */
static void unqueue_me_pi(struct futex_q *q) static void unqueue_me_pi(struct futex_q *q)
@ -2785,7 +2782,7 @@ static int futex_lock_pi(u32 __user *uaddr, unsigned int flags,
if (refill_pi_state_cache()) if (refill_pi_state_cache())
return -ENOMEM; return -ENOMEM;
to = futex_setup_timer(time, &timeout, FLAGS_CLOCKRT, 0); to = futex_setup_timer(time, &timeout, flags, 0);
retry: retry:
ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &q.key, FUTEX_WRITE); ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &q.key, FUTEX_WRITE);
@ -2902,7 +2899,7 @@ no_block:
*/ */
res = fixup_owner(uaddr, &q, !ret); res = fixup_owner(uaddr, &q, !ret);
/* /*
* If fixup_owner() returned an error, proprogate that. If it acquired * If fixup_owner() returned an error, propagate that. If it acquired
* the lock, clear our -ETIMEDOUT or -EINTR. * the lock, clear our -ETIMEDOUT or -EINTR.
*/ */
if (res) if (res)
@ -3279,7 +3276,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
*/ */
res = fixup_owner(uaddr2, &q, !ret); res = fixup_owner(uaddr2, &q, !ret);
/* /*
* If fixup_owner() returned an error, proprogate that. If it * If fixup_owner() returned an error, propagate that. If it
* acquired the lock, clear -ETIMEDOUT or -EINTR. * acquired the lock, clear -ETIMEDOUT or -EINTR.
*/ */
if (res) if (res)
@ -3677,7 +3674,7 @@ void futex_exec_release(struct task_struct *tsk)
{ {
/* /*
* The state handling is done for consistency, but in the case of * The state handling is done for consistency, but in the case of
* exec() there is no way to prevent futher damage as the PID stays * exec() there is no way to prevent further damage as the PID stays
* the same. But for the unlikely and arguably buggy case that a * the same. But for the unlikely and arguably buggy case that a
* futex is held on exec(), this provides at least as much state * futex is held on exec(), this provides at least as much state
* consistency protection which is possible. * consistency protection which is possible.
@ -3709,12 +3706,14 @@ long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
if (op & FUTEX_CLOCK_REALTIME) { if (op & FUTEX_CLOCK_REALTIME) {
flags |= FLAGS_CLOCKRT; flags |= FLAGS_CLOCKRT;
if (cmd != FUTEX_WAIT_BITSET && cmd != FUTEX_WAIT_REQUEUE_PI) if (cmd != FUTEX_WAIT_BITSET && cmd != FUTEX_WAIT_REQUEUE_PI &&
cmd != FUTEX_LOCK_PI2)
return -ENOSYS; return -ENOSYS;
} }
switch (cmd) { switch (cmd) {
case FUTEX_LOCK_PI: case FUTEX_LOCK_PI:
case FUTEX_LOCK_PI2:
case FUTEX_UNLOCK_PI: case FUTEX_UNLOCK_PI:
case FUTEX_TRYLOCK_PI: case FUTEX_TRYLOCK_PI:
case FUTEX_WAIT_REQUEUE_PI: case FUTEX_WAIT_REQUEUE_PI:
@ -3741,6 +3740,9 @@ long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
case FUTEX_WAKE_OP: case FUTEX_WAKE_OP:
return futex_wake_op(uaddr, flags, uaddr2, val, val2, val3); return futex_wake_op(uaddr, flags, uaddr2, val, val2, val3);
case FUTEX_LOCK_PI: case FUTEX_LOCK_PI:
flags |= FLAGS_CLOCKRT;
fallthrough;
case FUTEX_LOCK_PI2:
return futex_lock_pi(uaddr, flags, timeout, 0); return futex_lock_pi(uaddr, flags, timeout, 0);
case FUTEX_UNLOCK_PI: case FUTEX_UNLOCK_PI:
return futex_unlock_pi(uaddr, flags); return futex_unlock_pi(uaddr, flags);
@ -3761,6 +3763,7 @@ static __always_inline bool futex_cmd_has_timeout(u32 cmd)
switch (cmd) { switch (cmd) {
case FUTEX_WAIT: case FUTEX_WAIT:
case FUTEX_LOCK_PI: case FUTEX_LOCK_PI:
case FUTEX_LOCK_PI2:
case FUTEX_WAIT_BITSET: case FUTEX_WAIT_BITSET:
case FUTEX_WAIT_REQUEUE_PI: case FUTEX_WAIT_REQUEUE_PI:
return true; return true;

View File

@ -2306,7 +2306,56 @@ static void print_lock_class_header(struct lock_class *class, int depth)
} }
/* /*
* printk the shortest lock dependencies from @start to @end in reverse order: * Dependency path printing:
*
* After BFS we get a lock dependency path (linked via ->parent of lock_list),
* printing out each lock in the dependency path will help on understanding how
* the deadlock could happen. Here are some details about dependency path
* printing:
*
* 1) A lock_list can be either forwards or backwards for a lock dependency,
* for a lock dependency A -> B, there are two lock_lists:
*
* a) lock_list in the ->locks_after list of A, whose ->class is B and
* ->links_to is A. In this case, we can say the lock_list is
* "A -> B" (forwards case).
*
* b) lock_list in the ->locks_before list of B, whose ->class is A
* and ->links_to is B. In this case, we can say the lock_list is
* "B <- A" (bacwards case).
*
* The ->trace of both a) and b) point to the call trace where B was
* acquired with A held.
*
* 2) A "helper" lock_list is introduced during BFS, this lock_list doesn't
* represent a certain lock dependency, it only provides an initial entry
* for BFS. For example, BFS may introduce a "helper" lock_list whose
* ->class is A, as a result BFS will search all dependencies starting with
* A, e.g. A -> B or A -> C.
*
* The notation of a forwards helper lock_list is like "-> A", which means
* we should search the forwards dependencies starting with "A", e.g A -> B
* or A -> C.
*
* The notation of a bacwards helper lock_list is like "<- B", which means
* we should search the backwards dependencies ending with "B", e.g.
* B <- A or B <- C.
*/
/*
* printk the shortest lock dependencies from @root to @leaf in reverse order.
*
* We have a lock dependency path as follow:
*
* @root @leaf
* | |
* V V
* ->parent ->parent
* | lock_list | <--------- | lock_list | ... | lock_list | <--------- | lock_list |
* | -> L1 | | L1 -> L2 | ... |Ln-2 -> Ln-1| | Ln-1 -> Ln|
*
* , so it's natural that we start from @leaf and print every ->class and
* ->trace until we reach the @root.
*/ */
static void __used static void __used
print_shortest_lock_dependencies(struct lock_list *leaf, print_shortest_lock_dependencies(struct lock_list *leaf,
@ -2334,6 +2383,61 @@ print_shortest_lock_dependencies(struct lock_list *leaf,
} while (entry && (depth >= 0)); } while (entry && (depth >= 0));
} }
/*
* printk the shortest lock dependencies from @leaf to @root.
*
* We have a lock dependency path (from a backwards search) as follow:
*
* @leaf @root
* | |
* V V
* ->parent ->parent
* | lock_list | ---------> | lock_list | ... | lock_list | ---------> | lock_list |
* | L2 <- L1 | | L3 <- L2 | ... | Ln <- Ln-1 | | <- Ln |
*
* , so when we iterate from @leaf to @root, we actually print the lock
* dependency path L1 -> L2 -> .. -> Ln in the non-reverse order.
*
* Another thing to notice here is that ->class of L2 <- L1 is L1, while the
* ->trace of L2 <- L1 is the call trace of L2, in fact we don't have the call
* trace of L1 in the dependency path, which is alright, because most of the
* time we can figure out where L1 is held from the call trace of L2.
*/
static void __used
print_shortest_lock_dependencies_backwards(struct lock_list *leaf,
struct lock_list *root)
{
struct lock_list *entry = leaf;
const struct lock_trace *trace = NULL;
int depth;
/*compute depth from generated tree by BFS*/
depth = get_lock_depth(leaf);
do {
print_lock_class_header(entry->class, depth);
if (trace) {
printk("%*s ... acquired at:\n", depth, "");
print_lock_trace(trace, 2);
printk("\n");
}
/*
* Record the pointer to the trace for the next lock_list
* entry, see the comments for the function.
*/
trace = entry->trace;
if (depth == 0 && (entry != root)) {
printk("lockdep:%s bad path found in chain graph\n", __func__);
break;
}
entry = get_lock_parent(entry);
depth--;
} while (entry && (depth >= 0));
}
static void static void
print_irq_lock_scenario(struct lock_list *safe_entry, print_irq_lock_scenario(struct lock_list *safe_entry,
struct lock_list *unsafe_entry, struct lock_list *unsafe_entry,
@ -2448,10 +2552,7 @@ print_bad_irq_dependency(struct task_struct *curr,
lockdep_print_held_locks(curr); lockdep_print_held_locks(curr);
pr_warn("\nthe dependencies between %s-irq-safe lock and the holding lock:\n", irqclass); pr_warn("\nthe dependencies between %s-irq-safe lock and the holding lock:\n", irqclass);
prev_root->trace = save_trace(); print_shortest_lock_dependencies_backwards(backwards_entry, prev_root);
if (!prev_root->trace)
return;
print_shortest_lock_dependencies(backwards_entry, prev_root);
pr_warn("\nthe dependencies between the lock to be acquired"); pr_warn("\nthe dependencies between the lock to be acquired");
pr_warn(" and %s-irq-unsafe lock:\n", irqclass); pr_warn(" and %s-irq-unsafe lock:\n", irqclass);
@ -2669,8 +2770,18 @@ static int check_irq_usage(struct task_struct *curr, struct held_lock *prev,
* Step 3: we found a bad match! Now retrieve a lock from the backward * Step 3: we found a bad match! Now retrieve a lock from the backward
* list whose usage mask matches the exclusive usage mask from the * list whose usage mask matches the exclusive usage mask from the
* lock found on the forward list. * lock found on the forward list.
*
* Note, we should only keep the LOCKF_ENABLED_IRQ_ALL bits, considering
* the follow case:
*
* When trying to add A -> B to the graph, we find that there is a
* hardirq-safe L, that L -> ... -> A, and another hardirq-unsafe M,
* that B -> ... -> M. However M is **softirq-safe**, if we use exact
* invert bits of M's usage_mask, we will find another lock N that is
* **softirq-unsafe** and N -> ... -> A, however N -> .. -> M will not
* cause a inversion deadlock.
*/ */
backward_mask = original_mask(target_entry1->class->usage_mask); backward_mask = original_mask(target_entry1->class->usage_mask & LOCKF_ENABLED_IRQ_ALL);
ret = find_usage_backwards(&this, backward_mask, &target_entry); ret = find_usage_backwards(&this, backward_mask, &target_entry);
if (bfs_error(ret)) { if (bfs_error(ret)) {
@ -2720,7 +2831,7 @@ static inline bool usage_skip(struct lock_list *entry, void *mask)
* <target> or not. If it can, <src> -> <target> dependency is already * <target> or not. If it can, <src> -> <target> dependency is already
* in the graph. * in the graph.
* *
* Return BFS_RMATCH if it does, or BFS_RMATCH if it does not, return BFS_E* if * Return BFS_RMATCH if it does, or BFS_RNOMATCH if it does not, return BFS_E* if
* any error appears in the bfs search. * any error appears in the bfs search.
*/ */
static noinline enum bfs_result static noinline enum bfs_result
@ -4579,7 +4690,7 @@ static int check_wait_context(struct task_struct *curr, struct held_lock *next)
u8 curr_inner; u8 curr_inner;
int depth; int depth;
if (!curr->lockdep_depth || !next_inner || next->trylock) if (!next_inner || next->trylock)
return 0; return 0;
if (!next_outer) if (!next_outer)

View File

@ -1372,7 +1372,6 @@ config LOCKDEP
bool bool
depends on DEBUG_KERNEL && LOCK_DEBUGGING_SUPPORT depends on DEBUG_KERNEL && LOCK_DEBUGGING_SUPPORT
select STACKTRACE select STACKTRACE
depends on FRAME_POINTER || MIPS || PPC || S390 || MICROBLAZE || ARM || ARC || X86
select KALLSYMS select KALLSYMS
select KALLSYMS_ALL select KALLSYMS_ALL

View File

@ -42,7 +42,7 @@ static inline raw_spinlock_t *lock_addr(const atomic64_t *v)
return &atomic64_lock[addr & (NR_LOCKS - 1)].lock; return &atomic64_lock[addr & (NR_LOCKS - 1)].lock;
} }
s64 atomic64_read(const atomic64_t *v) s64 generic_atomic64_read(const atomic64_t *v)
{ {
unsigned long flags; unsigned long flags;
raw_spinlock_t *lock = lock_addr(v); raw_spinlock_t *lock = lock_addr(v);
@ -53,9 +53,9 @@ s64 atomic64_read(const atomic64_t *v)
raw_spin_unlock_irqrestore(lock, flags); raw_spin_unlock_irqrestore(lock, flags);
return val; return val;
} }
EXPORT_SYMBOL(atomic64_read); EXPORT_SYMBOL(generic_atomic64_read);
void atomic64_set(atomic64_t *v, s64 i) void generic_atomic64_set(atomic64_t *v, s64 i)
{ {
unsigned long flags; unsigned long flags;
raw_spinlock_t *lock = lock_addr(v); raw_spinlock_t *lock = lock_addr(v);
@ -64,10 +64,10 @@ void atomic64_set(atomic64_t *v, s64 i)
v->counter = i; v->counter = i;
raw_spin_unlock_irqrestore(lock, flags); raw_spin_unlock_irqrestore(lock, flags);
} }
EXPORT_SYMBOL(atomic64_set); EXPORT_SYMBOL(generic_atomic64_set);
#define ATOMIC64_OP(op, c_op) \ #define ATOMIC64_OP(op, c_op) \
void atomic64_##op(s64 a, atomic64_t *v) \ void generic_atomic64_##op(s64 a, atomic64_t *v) \
{ \ { \
unsigned long flags; \ unsigned long flags; \
raw_spinlock_t *lock = lock_addr(v); \ raw_spinlock_t *lock = lock_addr(v); \
@ -76,10 +76,10 @@ void atomic64_##op(s64 a, atomic64_t *v) \
v->counter c_op a; \ v->counter c_op a; \
raw_spin_unlock_irqrestore(lock, flags); \ raw_spin_unlock_irqrestore(lock, flags); \
} \ } \
EXPORT_SYMBOL(atomic64_##op); EXPORT_SYMBOL(generic_atomic64_##op);
#define ATOMIC64_OP_RETURN(op, c_op) \ #define ATOMIC64_OP_RETURN(op, c_op) \
s64 atomic64_##op##_return(s64 a, atomic64_t *v) \ s64 generic_atomic64_##op##_return(s64 a, atomic64_t *v) \
{ \ { \
unsigned long flags; \ unsigned long flags; \
raw_spinlock_t *lock = lock_addr(v); \ raw_spinlock_t *lock = lock_addr(v); \
@ -90,10 +90,10 @@ s64 atomic64_##op##_return(s64 a, atomic64_t *v) \
raw_spin_unlock_irqrestore(lock, flags); \ raw_spin_unlock_irqrestore(lock, flags); \
return val; \ return val; \
} \ } \
EXPORT_SYMBOL(atomic64_##op##_return); EXPORT_SYMBOL(generic_atomic64_##op##_return);
#define ATOMIC64_FETCH_OP(op, c_op) \ #define ATOMIC64_FETCH_OP(op, c_op) \
s64 atomic64_fetch_##op(s64 a, atomic64_t *v) \ s64 generic_atomic64_fetch_##op(s64 a, atomic64_t *v) \
{ \ { \
unsigned long flags; \ unsigned long flags; \
raw_spinlock_t *lock = lock_addr(v); \ raw_spinlock_t *lock = lock_addr(v); \
@ -105,7 +105,7 @@ s64 atomic64_fetch_##op(s64 a, atomic64_t *v) \
raw_spin_unlock_irqrestore(lock, flags); \ raw_spin_unlock_irqrestore(lock, flags); \
return val; \ return val; \
} \ } \
EXPORT_SYMBOL(atomic64_fetch_##op); EXPORT_SYMBOL(generic_atomic64_fetch_##op);
#define ATOMIC64_OPS(op, c_op) \ #define ATOMIC64_OPS(op, c_op) \
ATOMIC64_OP(op, c_op) \ ATOMIC64_OP(op, c_op) \
@ -130,7 +130,7 @@ ATOMIC64_OPS(xor, ^=)
#undef ATOMIC64_OP_RETURN #undef ATOMIC64_OP_RETURN
#undef ATOMIC64_OP #undef ATOMIC64_OP
s64 atomic64_dec_if_positive(atomic64_t *v) s64 generic_atomic64_dec_if_positive(atomic64_t *v)
{ {
unsigned long flags; unsigned long flags;
raw_spinlock_t *lock = lock_addr(v); raw_spinlock_t *lock = lock_addr(v);
@ -143,9 +143,9 @@ s64 atomic64_dec_if_positive(atomic64_t *v)
raw_spin_unlock_irqrestore(lock, flags); raw_spin_unlock_irqrestore(lock, flags);
return val; return val;
} }
EXPORT_SYMBOL(atomic64_dec_if_positive); EXPORT_SYMBOL(generic_atomic64_dec_if_positive);
s64 atomic64_cmpxchg(atomic64_t *v, s64 o, s64 n) s64 generic_atomic64_cmpxchg(atomic64_t *v, s64 o, s64 n)
{ {
unsigned long flags; unsigned long flags;
raw_spinlock_t *lock = lock_addr(v); raw_spinlock_t *lock = lock_addr(v);
@ -158,9 +158,9 @@ s64 atomic64_cmpxchg(atomic64_t *v, s64 o, s64 n)
raw_spin_unlock_irqrestore(lock, flags); raw_spin_unlock_irqrestore(lock, flags);
return val; return val;
} }
EXPORT_SYMBOL(atomic64_cmpxchg); EXPORT_SYMBOL(generic_atomic64_cmpxchg);
s64 atomic64_xchg(atomic64_t *v, s64 new) s64 generic_atomic64_xchg(atomic64_t *v, s64 new)
{ {
unsigned long flags; unsigned long flags;
raw_spinlock_t *lock = lock_addr(v); raw_spinlock_t *lock = lock_addr(v);
@ -172,9 +172,9 @@ s64 atomic64_xchg(atomic64_t *v, s64 new)
raw_spin_unlock_irqrestore(lock, flags); raw_spin_unlock_irqrestore(lock, flags);
return val; return val;
} }
EXPORT_SYMBOL(atomic64_xchg); EXPORT_SYMBOL(generic_atomic64_xchg);
s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u) s64 generic_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
{ {
unsigned long flags; unsigned long flags;
raw_spinlock_t *lock = lock_addr(v); raw_spinlock_t *lock = lock_addr(v);
@ -188,4 +188,4 @@ s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
return val; return val;
} }
EXPORT_SYMBOL(atomic64_fetch_add_unless); EXPORT_SYMBOL(generic_atomic64_fetch_add_unless);

View File

@ -53,6 +53,7 @@ __setup("debug_locks_verbose=", setup_debug_locks_verbose);
#define LOCKTYPE_WW 0x10 #define LOCKTYPE_WW 0x10
#define LOCKTYPE_RTMUTEX 0x20 #define LOCKTYPE_RTMUTEX 0x20
#define LOCKTYPE_LL 0x40 #define LOCKTYPE_LL 0x40
#define LOCKTYPE_SPECIAL 0x80
static struct ww_acquire_ctx t, t2; static struct ww_acquire_ctx t, t2;
static struct ww_mutex o, o2, o3; static struct ww_mutex o, o2, o3;
@ -194,6 +195,7 @@ static void init_shared_classes(void)
#define HARDIRQ_ENTER() \ #define HARDIRQ_ENTER() \
local_irq_disable(); \ local_irq_disable(); \
__irq_enter(); \ __irq_enter(); \
lockdep_hardirq_threaded(); \
WARN_ON(!in_irq()); WARN_ON(!in_irq());
#define HARDIRQ_EXIT() \ #define HARDIRQ_EXIT() \
@ -2492,16 +2494,6 @@ static void rcu_sched_exit(int *_)
int rcu_sched_guard_##name __guard(rcu_sched_exit); \ int rcu_sched_guard_##name __guard(rcu_sched_exit); \
rcu_read_lock_sched(); rcu_read_lock_sched();
static void rcu_callback_exit(int *_)
{
rcu_lock_release(&rcu_callback_map);
}
#define RCU_CALLBACK_CONTEXT(name, ...) \
int rcu_callback_guard_##name __guard(rcu_callback_exit); \
rcu_lock_acquire(&rcu_callback_map);
static void raw_spinlock_exit(raw_spinlock_t **lock) static void raw_spinlock_exit(raw_spinlock_t **lock)
{ {
raw_spin_unlock(*lock); raw_spin_unlock(*lock);
@ -2558,8 +2550,6 @@ static void __maybe_unused inner##_in_##outer(void) \
* ---------------+-------+----------+------+------- * ---------------+-------+----------+------+-------
* RCU_BH | o | o | o | x * RCU_BH | o | o | o | x
* ---------------+-------+----------+------+------- * ---------------+-------+----------+------+-------
* RCU_CALLBACK | o | o | o | x
* ---------------+-------+----------+------+-------
* RCU_SCHED | o | o | x | x * RCU_SCHED | o | o | x | x
* ---------------+-------+----------+------+------- * ---------------+-------+----------+------+-------
* RAW_SPIN | o | o | x | x * RAW_SPIN | o | o | x | x
@ -2576,7 +2566,6 @@ GENERATE_2_CONTEXT_TESTCASE(NOTTHREADED_HARDIRQ, , inner, inner_lock) \
GENERATE_2_CONTEXT_TESTCASE(SOFTIRQ, , inner, inner_lock) \ GENERATE_2_CONTEXT_TESTCASE(SOFTIRQ, , inner, inner_lock) \
GENERATE_2_CONTEXT_TESTCASE(RCU, , inner, inner_lock) \ GENERATE_2_CONTEXT_TESTCASE(RCU, , inner, inner_lock) \
GENERATE_2_CONTEXT_TESTCASE(RCU_BH, , inner, inner_lock) \ GENERATE_2_CONTEXT_TESTCASE(RCU_BH, , inner, inner_lock) \
GENERATE_2_CONTEXT_TESTCASE(RCU_CALLBACK, , inner, inner_lock) \
GENERATE_2_CONTEXT_TESTCASE(RCU_SCHED, , inner, inner_lock) \ GENERATE_2_CONTEXT_TESTCASE(RCU_SCHED, , inner, inner_lock) \
GENERATE_2_CONTEXT_TESTCASE(RAW_SPINLOCK, raw_lock_A, inner, inner_lock) \ GENERATE_2_CONTEXT_TESTCASE(RAW_SPINLOCK, raw_lock_A, inner, inner_lock) \
GENERATE_2_CONTEXT_TESTCASE(SPINLOCK, lock_A, inner, inner_lock) \ GENERATE_2_CONTEXT_TESTCASE(SPINLOCK, lock_A, inner, inner_lock) \
@ -2638,10 +2627,6 @@ static void wait_context_tests(void)
DO_CONTEXT_TESTCASE_OUTER_LIMITED_PREEMPTIBLE(RCU_BH); DO_CONTEXT_TESTCASE_OUTER_LIMITED_PREEMPTIBLE(RCU_BH);
pr_cont("\n"); pr_cont("\n");
print_testname("in RCU callback context");
DO_CONTEXT_TESTCASE_OUTER_LIMITED_PREEMPTIBLE(RCU_CALLBACK);
pr_cont("\n");
print_testname("in RCU-sched context"); print_testname("in RCU-sched context");
DO_CONTEXT_TESTCASE_OUTER_NOT_PREEMPTIBLE(RCU_SCHED); DO_CONTEXT_TESTCASE_OUTER_NOT_PREEMPTIBLE(RCU_SCHED);
pr_cont("\n"); pr_cont("\n");
@ -2744,6 +2729,66 @@ static void local_lock_tests(void)
pr_cont("\n"); pr_cont("\n");
} }
static void hardirq_deadlock_softirq_not_deadlock(void)
{
/* mutex_A is hardirq-unsafe and softirq-unsafe */
/* mutex_A -> lock_C */
mutex_lock(&mutex_A);
HARDIRQ_DISABLE();
spin_lock(&lock_C);
spin_unlock(&lock_C);
HARDIRQ_ENABLE();
mutex_unlock(&mutex_A);
/* lock_A is hardirq-safe */
HARDIRQ_ENTER();
spin_lock(&lock_A);
spin_unlock(&lock_A);
HARDIRQ_EXIT();
/* lock_A -> lock_B */
HARDIRQ_DISABLE();
spin_lock(&lock_A);
spin_lock(&lock_B);
spin_unlock(&lock_B);
spin_unlock(&lock_A);
HARDIRQ_ENABLE();
/* lock_B -> lock_C */
HARDIRQ_DISABLE();
spin_lock(&lock_B);
spin_lock(&lock_C);
spin_unlock(&lock_C);
spin_unlock(&lock_B);
HARDIRQ_ENABLE();
/* lock_D is softirq-safe */
SOFTIRQ_ENTER();
spin_lock(&lock_D);
spin_unlock(&lock_D);
SOFTIRQ_EXIT();
/* And lock_D is hardirq-unsafe */
SOFTIRQ_DISABLE();
spin_lock(&lock_D);
spin_unlock(&lock_D);
SOFTIRQ_ENABLE();
/*
* mutex_A -> lock_C -> lock_D is softirq-unsafe -> softirq-safe, not
* deadlock.
*
* lock_A -> lock_B -> lock_C -> lock_D is hardirq-safe ->
* hardirq-unsafe, deadlock.
*/
HARDIRQ_DISABLE();
spin_lock(&lock_C);
spin_lock(&lock_D);
spin_unlock(&lock_D);
spin_unlock(&lock_C);
HARDIRQ_ENABLE();
}
void locking_selftest(void) void locking_selftest(void)
{ {
/* /*
@ -2872,6 +2917,10 @@ void locking_selftest(void)
local_lock_tests(); local_lock_tests();
print_testname("hardirq_unsafe_softirq_safe");
dotest(hardirq_deadlock_softirq_not_deadlock, FAILURE, LOCKTYPE_SPECIAL);
pr_cont("\n");
if (unexpected_testcase_failures) { if (unexpected_testcase_failures) {
printk("-----------------------------------------------------------------\n"); printk("-----------------------------------------------------------------\n");
debug_locks = 0; debug_locks = 0;

View File

@ -17,6 +17,7 @@
* Kris Katterjohn - Added many additional checks in bpf_check_classic() * Kris Katterjohn - Added many additional checks in bpf_check_classic()
*/ */
#include <linux/atomic.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/types.h> #include <linux/types.h>
#include <linux/mm.h> #include <linux/mm.h>
@ -41,7 +42,6 @@
#include <linux/timer.h> #include <linux/timer.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <asm/unaligned.h> #include <asm/unaligned.h>
#include <asm/cmpxchg.h>
#include <linux/filter.h> #include <linux/filter.h>
#include <linux/ratelimit.h> #include <linux/ratelimit.h>
#include <linux/seccomp.h> #include <linux/seccomp.h>

View File

@ -7,13 +7,13 @@
* Trond Myklebust <trond.myklebust@primarydata.com> * Trond Myklebust <trond.myklebust@primarydata.com>
* *
*/ */
#include <linux/atomic.h>
#include <linux/types.h> #include <linux/types.h>
#include <linux/kref.h> #include <linux/kref.h>
#include <linux/list.h> #include <linux/list.h>
#include <linux/rcupdate.h> #include <linux/rcupdate.h>
#include <linux/rculist.h> #include <linux/rculist.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <asm/cmpxchg.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/sunrpc/xprt.h> #include <linux/sunrpc/xprt.h>
#include <linux/sunrpc/addr.h> #include <linux/sunrpc/addr.h>

View File

@ -17,7 +17,6 @@ cat <<EOF |
asm-generic/atomic-instrumented.h asm-generic/atomic-instrumented.h
asm-generic/atomic-long.h asm-generic/atomic-long.h
linux/atomic-arch-fallback.h linux/atomic-arch-fallback.h
linux/atomic-fallback.h
EOF EOF
while read header; do while read header; do
OLDSUM="$(tail -n 1 ${LINUXDIR}/include/${header})" OLDSUM="$(tail -n 1 ${LINUXDIR}/include/${header})"

View File

@ -41,34 +41,6 @@ gen_params_checks()
done done
} }
# gen_guard(meta, atomic, pfx, name, sfx, order)
gen_guard()
{
local meta="$1"; shift
local atomic="$1"; shift
local pfx="$1"; shift
local name="$1"; shift
local sfx="$1"; shift
local order="$1"; shift
local atomicname="arch_${atomic}_${pfx}${name}${sfx}${order}"
local template="$(find_fallback_template "${pfx}" "${name}" "${sfx}" "${order}")"
# We definitely need a preprocessor symbol for this atomic if it is an
# ordering variant, or if there's a generic fallback.
if [ ! -z "${order}" ] || [ ! -z "${template}" ]; then
printf "defined(${atomicname})"
return
fi
# If this is a base variant, but a relaxed variant *may* exist, then we
# only have a preprocessor symbol if the relaxed variant isn't defined
if meta_has_relaxed "${meta}"; then
printf "!defined(${atomicname}_relaxed) || defined(${atomicname})"
fi
}
#gen_proto_order_variant(meta, pfx, name, sfx, order, atomic, int, arg...) #gen_proto_order_variant(meta, pfx, name, sfx, order, atomic, int, arg...)
gen_proto_order_variant() gen_proto_order_variant()
{ {
@ -82,16 +54,12 @@ gen_proto_order_variant()
local atomicname="${atomic}_${pfx}${name}${sfx}${order}" local atomicname="${atomic}_${pfx}${name}${sfx}${order}"
local guard="$(gen_guard "${meta}" "${atomic}" "${pfx}" "${name}" "${sfx}" "${order}")"
local ret="$(gen_ret_type "${meta}" "${int}")" local ret="$(gen_ret_type "${meta}" "${int}")"
local params="$(gen_params "${int}" "${atomic}" "$@")" local params="$(gen_params "${int}" "${atomic}" "$@")"
local checks="$(gen_params_checks "${meta}" "$@")" local checks="$(gen_params_checks "${meta}" "$@")"
local args="$(gen_args "$@")" local args="$(gen_args "$@")"
local retstmt="$(gen_ret_stmt "${meta}")" local retstmt="$(gen_ret_stmt "${meta}")"
[ ! -z "${guard}" ] && printf "#if ${guard}\n"
cat <<EOF cat <<EOF
static __always_inline ${ret} static __always_inline ${ret}
${atomicname}(${params}) ${atomicname}(${params})
@ -99,11 +67,8 @@ ${atomicname}(${params})
${checks} ${checks}
${retstmt}arch_${atomicname}(${args}); ${retstmt}arch_${atomicname}(${args});
} }
#define ${atomicname} ${atomicname}
EOF EOF
[ ! -z "${guard}" ] && printf "#endif\n"
printf "\n" printf "\n"
} }
@ -139,19 +104,6 @@ EOF
fi fi
} }
gen_optional_xchg()
{
local name="$1"; shift
local sfx="$1"; shift
local guard="defined(arch_${name}${sfx})"
[ -z "${sfx}" ] && guard="!defined(arch_${name}_relaxed) || defined(arch_${name})"
printf "#if ${guard}\n"
gen_xchg "${name}${sfx}" ""
printf "#endif\n\n"
}
cat << EOF cat << EOF
// SPDX-License-Identifier: GPL-2.0 // SPDX-License-Identifier: GPL-2.0
@ -188,7 +140,8 @@ done
for xchg in "xchg" "cmpxchg" "cmpxchg64" "try_cmpxchg"; do for xchg in "xchg" "cmpxchg" "cmpxchg64" "try_cmpxchg"; do
for order in "" "_acquire" "_release" "_relaxed"; do for order in "" "_acquire" "_release" "_relaxed"; do
gen_optional_xchg "${xchg}" "${order}" gen_xchg "${xchg}${order}" ""
printf "\n"
done done
done done

View File

@ -11,7 +11,6 @@ cat <<EOF |
gen-atomic-instrumented.sh asm-generic/atomic-instrumented.h gen-atomic-instrumented.sh asm-generic/atomic-instrumented.h
gen-atomic-long.sh asm-generic/atomic-long.h gen-atomic-long.sh asm-generic/atomic-long.h
gen-atomic-fallback.sh linux/atomic-arch-fallback.h arch_ gen-atomic-fallback.sh linux/atomic-arch-fallback.h arch_
gen-atomic-fallback.sh linux/atomic-fallback.h
EOF EOF
while read script header args; do while read script header args; do
/bin/sh ${ATOMICDIR}/${script} ${ATOMICTBL} ${args} > ${LINUXDIR}/include/${header} /bin/sh ${ATOMICDIR}/${script} ${ATOMICTBL} ${args} > ${LINUXDIR}/include/${header}

View File

@ -6,3 +6,5 @@ futex_wait_private_mapped_file
futex_wait_timeout futex_wait_timeout
futex_wait_uninitialized_heap futex_wait_uninitialized_heap
futex_wait_wouldblock futex_wait_wouldblock
futex_wait
futex_requeue

View File

@ -1,5 +1,6 @@
# SPDX-License-Identifier: GPL-2.0 # SPDX-License-Identifier: GPL-2.0
INCLUDES := -I../include -I../../ INCLUDES := -I../include -I../../ -I../../../../../usr/include/ \
-I$(KBUILD_OUTPUT)/kselftest/usr/include
CFLAGS := $(CFLAGS) -g -O2 -Wall -D_GNU_SOURCE -pthread $(INCLUDES) CFLAGS := $(CFLAGS) -g -O2 -Wall -D_GNU_SOURCE -pthread $(INCLUDES)
LDLIBS := -lpthread -lrt LDLIBS := -lpthread -lrt
@ -14,7 +15,9 @@ TEST_GEN_FILES := \
futex_requeue_pi_signal_restart \ futex_requeue_pi_signal_restart \
futex_requeue_pi_mismatched_ops \ futex_requeue_pi_mismatched_ops \
futex_wait_uninitialized_heap \ futex_wait_uninitialized_heap \
futex_wait_private_mapped_file futex_wait_private_mapped_file \
futex_wait \
futex_requeue
TEST_PROGS := run.sh TEST_PROGS := run.sh

View File

@ -0,0 +1,136 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright Collabora Ltd., 2021
*
* futex cmp requeue test by André Almeida <andrealmeid@collabora.com>
*/
#include <pthread.h>
#include <limits.h>
#include "logging.h"
#include "futextest.h"
#define TEST_NAME "futex-requeue"
#define timeout_ns 30000000
#define WAKE_WAIT_US 10000
volatile futex_t *f1;
void usage(char *prog)
{
printf("Usage: %s\n", prog);
printf(" -c Use color\n");
printf(" -h Display this help message\n");
printf(" -v L Verbosity level: %d=QUIET %d=CRITICAL %d=INFO\n",
VQUIET, VCRITICAL, VINFO);
}
void *waiterfn(void *arg)
{
struct timespec to;
to.tv_sec = 0;
to.tv_nsec = timeout_ns;
if (futex_wait(f1, *f1, &to, 0))
printf("waiter failed errno %d\n", errno);
return NULL;
}
int main(int argc, char *argv[])
{
pthread_t waiter[10];
int res, ret = RET_PASS;
int c, i;
volatile futex_t _f1 = 0;
volatile futex_t f2 = 0;
f1 = &_f1;
while ((c = getopt(argc, argv, "cht:v:")) != -1) {
switch (c) {
case 'c':
log_color(1);
break;
case 'h':
usage(basename(argv[0]));
exit(0);
case 'v':
log_verbosity(atoi(optarg));
break;
default:
usage(basename(argv[0]));
exit(1);
}
}
ksft_print_header();
ksft_set_plan(2);
ksft_print_msg("%s: Test futex_requeue\n",
basename(argv[0]));
/*
* Requeue a waiter from f1 to f2, and wake f2.
*/
if (pthread_create(&waiter[0], NULL, waiterfn, NULL))
error("pthread_create failed\n", errno);
usleep(WAKE_WAIT_US);
info("Requeuing 1 futex from f1 to f2\n");
res = futex_cmp_requeue(f1, 0, &f2, 0, 1, 0);
if (res != 1) {
ksft_test_result_fail("futex_requeue simple returned: %d %s\n",
res ? errno : res,
res ? strerror(errno) : "");
ret = RET_FAIL;
}
info("Waking 1 futex at f2\n");
res = futex_wake(&f2, 1, 0);
if (res != 1) {
ksft_test_result_fail("futex_requeue simple returned: %d %s\n",
res ? errno : res,
res ? strerror(errno) : "");
ret = RET_FAIL;
} else {
ksft_test_result_pass("futex_requeue simple succeeds\n");
}
/*
* Create 10 waiters at f1. At futex_requeue, wake 3 and requeue 7.
* At futex_wake, wake INT_MAX (should be exactly 7).
*/
for (i = 0; i < 10; i++) {
if (pthread_create(&waiter[i], NULL, waiterfn, NULL))
error("pthread_create failed\n", errno);
}
usleep(WAKE_WAIT_US);
info("Waking 3 futexes at f1 and requeuing 7 futexes from f1 to f2\n");
res = futex_cmp_requeue(f1, 0, &f2, 3, 7, 0);
if (res != 10) {
ksft_test_result_fail("futex_requeue many returned: %d %s\n",
res ? errno : res,
res ? strerror(errno) : "");
ret = RET_FAIL;
}
info("Waking INT_MAX futexes at f2\n");
res = futex_wake(&f2, INT_MAX, 0);
if (res != 7) {
ksft_test_result_fail("futex_requeue many returned: %d %s\n",
res ? errno : res,
res ? strerror(errno) : "");
ret = RET_FAIL;
} else {
ksft_test_result_pass("futex_requeue many succeeds\n");
}
ksft_print_cnts();
return ret;
}

View File

@ -0,0 +1,171 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright Collabora Ltd., 2021
*
* futex cmp requeue test by André Almeida <andrealmeid@collabora.com>
*/
#include <pthread.h>
#include <sys/shm.h>
#include <sys/mman.h>
#include <fcntl.h>
#include "logging.h"
#include "futextest.h"
#define TEST_NAME "futex-wait"
#define timeout_ns 30000000
#define WAKE_WAIT_US 10000
#define SHM_PATH "futex_shm_file"
void *futex;
void usage(char *prog)
{
printf("Usage: %s\n", prog);
printf(" -c Use color\n");
printf(" -h Display this help message\n");
printf(" -v L Verbosity level: %d=QUIET %d=CRITICAL %d=INFO\n",
VQUIET, VCRITICAL, VINFO);
}
static void *waiterfn(void *arg)
{
struct timespec to;
unsigned int flags = 0;
if (arg)
flags = *((unsigned int *) arg);
to.tv_sec = 0;
to.tv_nsec = timeout_ns;
if (futex_wait(futex, 0, &to, flags))
printf("waiter failed errno %d\n", errno);
return NULL;
}
int main(int argc, char *argv[])
{
int res, ret = RET_PASS, fd, c, shm_id;
u_int32_t f_private = 0, *shared_data;
unsigned int flags = FUTEX_PRIVATE_FLAG;
pthread_t waiter;
void *shm;
futex = &f_private;
while ((c = getopt(argc, argv, "cht:v:")) != -1) {
switch (c) {
case 'c':
log_color(1);
break;
case 'h':
usage(basename(argv[0]));
exit(0);
case 'v':
log_verbosity(atoi(optarg));
break;
default:
usage(basename(argv[0]));
exit(1);
}
}
ksft_print_header();
ksft_set_plan(3);
ksft_print_msg("%s: Test futex_wait\n", basename(argv[0]));
/* Testing a private futex */
info("Calling private futex_wait on futex: %p\n", futex);
if (pthread_create(&waiter, NULL, waiterfn, (void *) &flags))
error("pthread_create failed\n", errno);
usleep(WAKE_WAIT_US);
info("Calling private futex_wake on futex: %p\n", futex);
res = futex_wake(futex, 1, FUTEX_PRIVATE_FLAG);
if (res != 1) {
ksft_test_result_fail("futex_wake private returned: %d %s\n",
errno, strerror(errno));
ret = RET_FAIL;
} else {
ksft_test_result_pass("futex_wake private succeeds\n");
}
/* Testing an anon page shared memory */
shm_id = shmget(IPC_PRIVATE, 4096, IPC_CREAT | 0666);
if (shm_id < 0) {
perror("shmget");
exit(1);
}
shared_data = shmat(shm_id, NULL, 0);
*shared_data = 0;
futex = shared_data;
info("Calling shared (page anon) futex_wait on futex: %p\n", futex);
if (pthread_create(&waiter, NULL, waiterfn, NULL))
error("pthread_create failed\n", errno);
usleep(WAKE_WAIT_US);
info("Calling shared (page anon) futex_wake on futex: %p\n", futex);
res = futex_wake(futex, 1, 0);
if (res != 1) {
ksft_test_result_fail("futex_wake shared (page anon) returned: %d %s\n",
errno, strerror(errno));
ret = RET_FAIL;
} else {
ksft_test_result_pass("futex_wake shared (page anon) succeeds\n");
}
/* Testing a file backed shared memory */
fd = open(SHM_PATH, O_RDWR | O_CREAT, S_IRUSR | S_IWUSR);
if (fd < 0) {
perror("open");
exit(1);
}
if (ftruncate(fd, sizeof(f_private))) {
perror("ftruncate");
exit(1);
}
shm = mmap(NULL, sizeof(f_private), PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
if (shm == MAP_FAILED) {
perror("mmap");
exit(1);
}
memcpy(shm, &f_private, sizeof(f_private));
futex = shm;
info("Calling shared (file backed) futex_wait on futex: %p\n", futex);
if (pthread_create(&waiter, NULL, waiterfn, NULL))
error("pthread_create failed\n", errno);
usleep(WAKE_WAIT_US);
info("Calling shared (file backed) futex_wake on futex: %p\n", futex);
res = futex_wake(shm, 1, 0);
if (res != 1) {
ksft_test_result_fail("futex_wake shared (file backed) returned: %d %s\n",
errno, strerror(errno));
ret = RET_FAIL;
} else {
ksft_test_result_pass("futex_wake shared (file backed) succeeds\n");
}
/* Freeing resources */
shmdt(shared_data);
munmap(shm, sizeof(f_private));
remove(SHM_PATH);
close(fd);
ksft_print_cnts();
return ret;
}

View File

@ -11,21 +11,18 @@
* *
* HISTORY * HISTORY
* 2009-Nov-6: Initial version by Darren Hart <dvhart@linux.intel.com> * 2009-Nov-6: Initial version by Darren Hart <dvhart@linux.intel.com>
* 2021-Apr-26: More test cases by André Almeida <andrealmeid@collabora.com>
* *
*****************************************************************************/ *****************************************************************************/
#include <errno.h> #include <pthread.h>
#include <getopt.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include "futextest.h" #include "futextest.h"
#include "logging.h" #include "logging.h"
#define TEST_NAME "futex-wait-timeout" #define TEST_NAME "futex-wait-timeout"
static long timeout_ns = 100000; /* 100us default timeout */ static long timeout_ns = 100000; /* 100us default timeout */
static futex_t futex_pi;
void usage(char *prog) void usage(char *prog)
{ {
@ -37,11 +34,67 @@ void usage(char *prog)
VQUIET, VCRITICAL, VINFO); VQUIET, VCRITICAL, VINFO);
} }
/*
* Get a PI lock and hold it forever, so the main thread lock_pi will block
* and we can test the timeout
*/
void *get_pi_lock(void *arg)
{
int ret;
volatile futex_t lock = 0;
ret = futex_lock_pi(&futex_pi, NULL, 0, 0);
if (ret != 0)
error("futex_lock_pi failed\n", ret);
/* Blocks forever */
ret = futex_wait(&lock, 0, NULL, 0);
error("futex_wait failed\n", ret);
return NULL;
}
/*
* Check if the function returned the expected error
*/
static void test_timeout(int res, int *ret, char *test_name, int err)
{
if (!res || errno != err) {
ksft_test_result_fail("%s returned %d\n", test_name,
res < 0 ? errno : res);
*ret = RET_FAIL;
} else {
ksft_test_result_pass("%s succeeds\n", test_name);
}
}
/*
* Calculate absolute timeout and correct overflow
*/
static int futex_get_abs_timeout(clockid_t clockid, struct timespec *to,
long timeout_ns)
{
if (clock_gettime(clockid, to)) {
error("clock_gettime failed\n", errno);
return errno;
}
to->tv_nsec += timeout_ns;
if (to->tv_nsec >= 1000000000) {
to->tv_sec++;
to->tv_nsec -= 1000000000;
}
return 0;
}
int main(int argc, char *argv[]) int main(int argc, char *argv[])
{ {
futex_t f1 = FUTEX_INITIALIZER; futex_t f1 = FUTEX_INITIALIZER;
struct timespec to;
int res, ret = RET_PASS; int res, ret = RET_PASS;
struct timespec to;
pthread_t thread;
int c; int c;
while ((c = getopt(argc, argv, "cht:v:")) != -1) { while ((c = getopt(argc, argv, "cht:v:")) != -1) {
@ -65,22 +118,63 @@ int main(int argc, char *argv[])
} }
ksft_print_header(); ksft_print_header();
ksft_set_plan(1); ksft_set_plan(7);
ksft_print_msg("%s: Block on a futex and wait for timeout\n", ksft_print_msg("%s: Block on a futex and wait for timeout\n",
basename(argv[0])); basename(argv[0]));
ksft_print_msg("\tArguments: timeout=%ldns\n", timeout_ns); ksft_print_msg("\tArguments: timeout=%ldns\n", timeout_ns);
/* initialize timeout */ pthread_create(&thread, NULL, get_pi_lock, NULL);
/* initialize relative timeout */
to.tv_sec = 0; to.tv_sec = 0;
to.tv_nsec = timeout_ns; to.tv_nsec = timeout_ns;
info("Calling futex_wait on f1: %u @ %p\n", f1, &f1); res = futex_wait(&f1, f1, &to, 0);
res = futex_wait(&f1, f1, &to, FUTEX_PRIVATE_FLAG); test_timeout(res, &ret, "futex_wait relative", ETIMEDOUT);
if (!res || errno != ETIMEDOUT) {
fail("futex_wait returned %d\n", ret < 0 ? errno : ret);
ret = RET_FAIL;
}
print_result(TEST_NAME, ret); /* FUTEX_WAIT_BITSET with CLOCK_REALTIME */
if (futex_get_abs_timeout(CLOCK_REALTIME, &to, timeout_ns))
return RET_FAIL;
res = futex_wait_bitset(&f1, f1, &to, 1, FUTEX_CLOCK_REALTIME);
test_timeout(res, &ret, "futex_wait_bitset realtime", ETIMEDOUT);
/* FUTEX_WAIT_BITSET with CLOCK_MONOTONIC */
if (futex_get_abs_timeout(CLOCK_MONOTONIC, &to, timeout_ns))
return RET_FAIL;
res = futex_wait_bitset(&f1, f1, &to, 1, 0);
test_timeout(res, &ret, "futex_wait_bitset monotonic", ETIMEDOUT);
/* FUTEX_WAIT_REQUEUE_PI with CLOCK_REALTIME */
if (futex_get_abs_timeout(CLOCK_REALTIME, &to, timeout_ns))
return RET_FAIL;
res = futex_wait_requeue_pi(&f1, f1, &futex_pi, &to, FUTEX_CLOCK_REALTIME);
test_timeout(res, &ret, "futex_wait_requeue_pi realtime", ETIMEDOUT);
/* FUTEX_WAIT_REQUEUE_PI with CLOCK_MONOTONIC */
if (futex_get_abs_timeout(CLOCK_MONOTONIC, &to, timeout_ns))
return RET_FAIL;
res = futex_wait_requeue_pi(&f1, f1, &futex_pi, &to, 0);
test_timeout(res, &ret, "futex_wait_requeue_pi monotonic", ETIMEDOUT);
/*
* FUTEX_LOCK_PI with CLOCK_REALTIME
* Due to historical reasons, FUTEX_LOCK_PI supports only realtime
* clock, but requires the caller to not set CLOCK_REALTIME flag.
*
* If you call FUTEX_LOCK_PI with a monotonic clock, it'll be
* interpreted as a realtime clock, and (unless you mess your machine's
* time or your time machine) the monotonic clock value is always
* smaller than realtime and the syscall will timeout immediately.
*/
if (futex_get_abs_timeout(CLOCK_REALTIME, &to, timeout_ns))
return RET_FAIL;
res = futex_lock_pi(&futex_pi, &to, 0, 0);
test_timeout(res, &ret, "futex_lock_pi realtime", ETIMEDOUT);
/* Test operations that don't support FUTEX_CLOCK_REALTIME */
res = futex_lock_pi(&futex_pi, NULL, 0, FUTEX_CLOCK_REALTIME);
test_timeout(res, &ret, "futex_lock_pi invalid timeout flag", ENOSYS);
ksft_print_cnts();
return ret; return ret;
} }

View File

@ -73,3 +73,9 @@ echo
echo echo
./futex_wait_uninitialized_heap $COLOR ./futex_wait_uninitialized_heap $COLOR
./futex_wait_private_mapped_file $COLOR ./futex_wait_private_mapped_file $COLOR
echo
./futex_wait $COLOR
echo
./futex_requeue $COLOR