Merge tag 'locking-core-2022-05-23' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull locking updates from Ingo Molnar:

 - rwsem cleanups & optimizations/fixes:
    - Conditionally wake waiters in reader/writer slowpaths
    - Always try to wake waiters in out_nolock path

 - Add try_cmpxchg64() implementation, with arch optimizations - and use
   it to micro-optimize sched_clock_{local,remote}()

 - Various force-inlining fixes to address objdump instrumentation-check
   warnings

 - Add lock contention tracepoints:

    lock:contention_begin
    lock:contention_end

 - Misc smaller fixes & cleanups

* tag 'locking-core-2022-05-23' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  sched/clock: Use try_cmpxchg64 in sched_clock_{local,remote}
  locking/atomic/x86: Introduce arch_try_cmpxchg64
  locking/atomic: Add generic try_cmpxchg64 support
  futex: Remove a PREEMPT_RT_FULL reference.
  locking/qrwlock: Change "queue rwlock" to "queued rwlock"
  lockdep: Delete local_irq_enable_in_hardirq()
  locking/mutex: Make contention tracepoints more consistent wrt adaptive spinning
  locking: Apply contention tracepoints in the slow path
  locking: Add lock contention tracepoints
  locking/rwsem: Always try to wake waiters in out_nolock path
  locking/rwsem: Conditionally wake waiters in reader/writer slowpaths
  locking/rwsem: No need to check for handoff bit if wait queue empty
  lockdep: Fix -Wunused-parameter for _THIS_IP_
  x86/mm: Force-inline __phys_addr_nodebug()
  x86/kvm/svm: Force-inline GHCB accessors
  task_stack, x86/cea: Force-inline stack helpers
This commit is contained in:
Linus Torvalds
2022-05-24 10:18:23 -07:00
31 changed files with 412 additions and 148 deletions

View File

@ -164,41 +164,44 @@ gen_xchg_fallbacks()
gen_try_cmpxchg_fallback()
{
local cmpxchg="$1"; shift;
local order="$1"; shift;
cat <<EOF
#ifndef arch_try_cmpxchg${order}
#define arch_try_cmpxchg${order}(_ptr, _oldp, _new) \\
#ifndef arch_try_${cmpxchg}${order}
#define arch_try_${cmpxchg}${order}(_ptr, _oldp, _new) \\
({ \\
typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \\
___r = arch_cmpxchg${order}((_ptr), ___o, (_new)); \\
___r = arch_${cmpxchg}${order}((_ptr), ___o, (_new)); \\
if (unlikely(___r != ___o)) \\
*___op = ___r; \\
likely(___r == ___o); \\
})
#endif /* arch_try_cmpxchg${order} */
#endif /* arch_try_${cmpxchg}${order} */
EOF
}
gen_try_cmpxchg_fallbacks()
{
printf "#ifndef arch_try_cmpxchg_relaxed\n"
printf "#ifdef arch_try_cmpxchg\n"
local cmpxchg="$1"; shift;
gen_basic_fallbacks "arch_try_cmpxchg"
printf "#ifndef arch_try_${cmpxchg}_relaxed\n"
printf "#ifdef arch_try_${cmpxchg}\n"
printf "#endif /* arch_try_cmpxchg */\n\n"
gen_basic_fallbacks "arch_try_${cmpxchg}"
printf "#endif /* arch_try_${cmpxchg} */\n\n"
for order in "" "_acquire" "_release" "_relaxed"; do
gen_try_cmpxchg_fallback "${order}"
gen_try_cmpxchg_fallback "${cmpxchg}" "${order}"
done
printf "#else /* arch_try_cmpxchg_relaxed */\n"
printf "#else /* arch_try_${cmpxchg}_relaxed */\n"
gen_order_fallbacks "arch_try_cmpxchg"
gen_order_fallbacks "arch_try_${cmpxchg}"
printf "#endif /* arch_try_cmpxchg_relaxed */\n\n"
printf "#endif /* arch_try_${cmpxchg}_relaxed */\n\n"
}
cat << EOF
@ -218,7 +221,9 @@ for xchg in "arch_xchg" "arch_cmpxchg" "arch_cmpxchg64"; do
gen_xchg_fallbacks "${xchg}"
done
gen_try_cmpxchg_fallbacks
for cmpxchg in "cmpxchg" "cmpxchg64"; do
gen_try_cmpxchg_fallbacks "${cmpxchg}"
done
grep '^[a-z]' "$1" | while read name meta args; do
gen_proto "${meta}" "${name}" "atomic" "int" ${args}

View File

@ -166,7 +166,7 @@ grep '^[a-z]' "$1" | while read name meta args; do
done
for xchg in "xchg" "cmpxchg" "cmpxchg64" "try_cmpxchg"; do
for xchg in "xchg" "cmpxchg" "cmpxchg64" "try_cmpxchg" "try_cmpxchg64"; do
for order in "" "_acquire" "_release" "_relaxed"; do
gen_xchg "${xchg}" "${order}" ""
printf "\n"