Locking fixes:
- Get static calls & modules right. Hopefully. - WW mutex fixes Signed-off-by: Ingo Molnar <mingo@kernel.org> -----BEGIN PGP SIGNATURE----- iQJFBAABCgAvFiEEBpT5eoXrXCwVQwEKEnMQ0APhK1gFAmBXJRMRHG1pbmdvQGtl cm5lbC5vcmcACgkQEnMQ0APhK1hxRg/+IoAS0BvnVqlFhuYojzWlgq7kxWl09EzM Qyopa30mBOrOE7s1dI98Fu41+jUzmDrKiJrET/XpUTyQYVPQ3FDOoQFKch0aMJnX 7dCo/AOapBkkkYoMMp12W8cdg9ka/Z4dK7w0XPh+NvEyygRW4GxiCgtrL+W+JADx 0UsIcjs8rJeZ6r0LI8cEy9P5R3ciUjTJ1NJuFXinWdoGhV7Yqwb/g4CTuWiAtLXh LttGJSUPxMEVgf3QJmXYsESBhtZ/OZIq++FxQj10POvrTRAJSB/TnSxSJnoGZuf/ ccOygkAPmORavkKjBrWUaI1PHs/mkTuwKb8DFEIuMgAtUwNc3FWvCs1xealFmI78 MmGd/+2uzE3iuderiwPKti+2VAZ3eKB8HSjvbbWvnQ97M94Hzhk4XlBIoQxMuFWu qitkq0X3FprLD3MRJZi4hLLPyedeEiGDUa3T07Z4pHSq0EH5T+y2DfvJy6lu+I1D lFkSNjDhuwZsT/zVjqIV1eH5YvYhTF5FRW7m9gWAq8x+fzdiEicW7clRnztTCXfi ZJFVvp8K5dGKOLYu/uX4PHzT6s8OsqJyzp33G32GcyzSBdc1UInHWUMkzxfMt58y K75FMie2M4A84mPWAyXEurITEVk921v3p2viw2xRcwwaWf+kQhfAlaR8fmQY4JIo kh1heEWisV0= =CE/r -----END PGP SIGNATURE----- Merge tag 'locking-urgent-2021-03-21' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip Pull locking fixes from Ingo Molnar: - Get static calls & modules right. Hopefully. - WW mutex fixes * tag 'locking-urgent-2021-03-21' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: static_call: Fix static_call_update() sanity check static_call: Align static_call_is_init() patching condition static_call: Fix static_call_set_init() locking/ww_mutex: Fix acquire/release imbalance in ww_acquire_init()/ww_acquire_fini() locking/ww_mutex: Simplify use_ww_ctx & ww_ctx handling
This commit is contained in:
commit
5ba33b488a
@ -173,9 +173,10 @@ static inline void ww_acquire_done(struct ww_acquire_ctx *ctx)
|
|||||||
*/
|
*/
|
||||||
static inline void ww_acquire_fini(struct ww_acquire_ctx *ctx)
|
static inline void ww_acquire_fini(struct ww_acquire_ctx *ctx)
|
||||||
{
|
{
|
||||||
#ifdef CONFIG_DEBUG_MUTEXES
|
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
||||||
mutex_release(&ctx->dep_map, _THIS_IP_);
|
mutex_release(&ctx->dep_map, _THIS_IP_);
|
||||||
|
#endif
|
||||||
|
#ifdef CONFIG_DEBUG_MUTEXES
|
||||||
DEBUG_LOCKS_WARN_ON(ctx->acquired);
|
DEBUG_LOCKS_WARN_ON(ctx->acquired);
|
||||||
if (!IS_ENABLED(CONFIG_PROVE_LOCKING))
|
if (!IS_ENABLED(CONFIG_PROVE_LOCKING))
|
||||||
/*
|
/*
|
||||||
|
@ -407,6 +407,14 @@ static bool jump_label_can_update(struct jump_entry *entry, bool init)
|
|||||||
return false;
|
return false;
|
||||||
|
|
||||||
if (!kernel_text_address(jump_entry_code(entry))) {
|
if (!kernel_text_address(jump_entry_code(entry))) {
|
||||||
|
/*
|
||||||
|
* This skips patching built-in __exit, which
|
||||||
|
* is part of init_section_contains() but is
|
||||||
|
* not part of kernel_text_address().
|
||||||
|
*
|
||||||
|
* Skipping built-in __exit is fine since it
|
||||||
|
* will never be executed.
|
||||||
|
*/
|
||||||
WARN_ONCE(!jump_entry_is_init(entry),
|
WARN_ONCE(!jump_entry_is_init(entry),
|
||||||
"can't patch jump_label at %pS",
|
"can't patch jump_label at %pS",
|
||||||
(void *)jump_entry_code(entry));
|
(void *)jump_entry_code(entry));
|
||||||
|
@ -626,7 +626,7 @@ static inline int mutex_can_spin_on_owner(struct mutex *lock)
|
|||||||
*/
|
*/
|
||||||
static __always_inline bool
|
static __always_inline bool
|
||||||
mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
|
mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
|
||||||
const bool use_ww_ctx, struct mutex_waiter *waiter)
|
struct mutex_waiter *waiter)
|
||||||
{
|
{
|
||||||
if (!waiter) {
|
if (!waiter) {
|
||||||
/*
|
/*
|
||||||
@ -702,7 +702,7 @@ fail:
|
|||||||
#else
|
#else
|
||||||
static __always_inline bool
|
static __always_inline bool
|
||||||
mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
|
mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
|
||||||
const bool use_ww_ctx, struct mutex_waiter *waiter)
|
struct mutex_waiter *waiter)
|
||||||
{
|
{
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
@ -922,6 +922,9 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
|
|||||||
struct ww_mutex *ww;
|
struct ww_mutex *ww;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
if (!use_ww_ctx)
|
||||||
|
ww_ctx = NULL;
|
||||||
|
|
||||||
might_sleep();
|
might_sleep();
|
||||||
|
|
||||||
#ifdef CONFIG_DEBUG_MUTEXES
|
#ifdef CONFIG_DEBUG_MUTEXES
|
||||||
@ -929,7 +932,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
ww = container_of(lock, struct ww_mutex, base);
|
ww = container_of(lock, struct ww_mutex, base);
|
||||||
if (use_ww_ctx && ww_ctx) {
|
if (ww_ctx) {
|
||||||
if (unlikely(ww_ctx == READ_ONCE(ww->ctx)))
|
if (unlikely(ww_ctx == READ_ONCE(ww->ctx)))
|
||||||
return -EALREADY;
|
return -EALREADY;
|
||||||
|
|
||||||
@ -946,10 +949,10 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
|
|||||||
mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
|
mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
|
||||||
|
|
||||||
if (__mutex_trylock(lock) ||
|
if (__mutex_trylock(lock) ||
|
||||||
mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, NULL)) {
|
mutex_optimistic_spin(lock, ww_ctx, NULL)) {
|
||||||
/* got the lock, yay! */
|
/* got the lock, yay! */
|
||||||
lock_acquired(&lock->dep_map, ip);
|
lock_acquired(&lock->dep_map, ip);
|
||||||
if (use_ww_ctx && ww_ctx)
|
if (ww_ctx)
|
||||||
ww_mutex_set_context_fastpath(ww, ww_ctx);
|
ww_mutex_set_context_fastpath(ww, ww_ctx);
|
||||||
preempt_enable();
|
preempt_enable();
|
||||||
return 0;
|
return 0;
|
||||||
@ -960,7 +963,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
|
|||||||
* After waiting to acquire the wait_lock, try again.
|
* After waiting to acquire the wait_lock, try again.
|
||||||
*/
|
*/
|
||||||
if (__mutex_trylock(lock)) {
|
if (__mutex_trylock(lock)) {
|
||||||
if (use_ww_ctx && ww_ctx)
|
if (ww_ctx)
|
||||||
__ww_mutex_check_waiters(lock, ww_ctx);
|
__ww_mutex_check_waiters(lock, ww_ctx);
|
||||||
|
|
||||||
goto skip_wait;
|
goto skip_wait;
|
||||||
@ -1013,7 +1016,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
|
|||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (use_ww_ctx && ww_ctx) {
|
if (ww_ctx) {
|
||||||
ret = __ww_mutex_check_kill(lock, &waiter, ww_ctx);
|
ret = __ww_mutex_check_kill(lock, &waiter, ww_ctx);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto err;
|
goto err;
|
||||||
@ -1026,7 +1029,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
|
|||||||
* ww_mutex needs to always recheck its position since its waiter
|
* ww_mutex needs to always recheck its position since its waiter
|
||||||
* list is not FIFO ordered.
|
* list is not FIFO ordered.
|
||||||
*/
|
*/
|
||||||
if ((use_ww_ctx && ww_ctx) || !first) {
|
if (ww_ctx || !first) {
|
||||||
first = __mutex_waiter_is_first(lock, &waiter);
|
first = __mutex_waiter_is_first(lock, &waiter);
|
||||||
if (first)
|
if (first)
|
||||||
__mutex_set_flag(lock, MUTEX_FLAG_HANDOFF);
|
__mutex_set_flag(lock, MUTEX_FLAG_HANDOFF);
|
||||||
@ -1039,7 +1042,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
|
|||||||
* or we must see its unlock and acquire.
|
* or we must see its unlock and acquire.
|
||||||
*/
|
*/
|
||||||
if (__mutex_trylock(lock) ||
|
if (__mutex_trylock(lock) ||
|
||||||
(first && mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, &waiter)))
|
(first && mutex_optimistic_spin(lock, ww_ctx, &waiter)))
|
||||||
break;
|
break;
|
||||||
|
|
||||||
spin_lock(&lock->wait_lock);
|
spin_lock(&lock->wait_lock);
|
||||||
@ -1048,7 +1051,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
|
|||||||
acquired:
|
acquired:
|
||||||
__set_current_state(TASK_RUNNING);
|
__set_current_state(TASK_RUNNING);
|
||||||
|
|
||||||
if (use_ww_ctx && ww_ctx) {
|
if (ww_ctx) {
|
||||||
/*
|
/*
|
||||||
* Wound-Wait; we stole the lock (!first_waiter), check the
|
* Wound-Wait; we stole the lock (!first_waiter), check the
|
||||||
* waiters as anyone might want to wound us.
|
* waiters as anyone might want to wound us.
|
||||||
@ -1068,7 +1071,7 @@ skip_wait:
|
|||||||
/* got the lock - cleanup and rejoice! */
|
/* got the lock - cleanup and rejoice! */
|
||||||
lock_acquired(&lock->dep_map, ip);
|
lock_acquired(&lock->dep_map, ip);
|
||||||
|
|
||||||
if (use_ww_ctx && ww_ctx)
|
if (ww_ctx)
|
||||||
ww_mutex_lock_acquired(ww, ww_ctx);
|
ww_mutex_lock_acquired(ww, ww_ctx);
|
||||||
|
|
||||||
spin_unlock(&lock->wait_lock);
|
spin_unlock(&lock->wait_lock);
|
||||||
|
@ -35,27 +35,30 @@ static inline void *static_call_addr(struct static_call_site *site)
|
|||||||
return (void *)((long)site->addr + (long)&site->addr);
|
return (void *)((long)site->addr + (long)&site->addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline unsigned long __static_call_key(const struct static_call_site *site)
|
||||||
|
{
|
||||||
|
return (long)site->key + (long)&site->key;
|
||||||
|
}
|
||||||
|
|
||||||
static inline struct static_call_key *static_call_key(const struct static_call_site *site)
|
static inline struct static_call_key *static_call_key(const struct static_call_site *site)
|
||||||
{
|
{
|
||||||
return (struct static_call_key *)
|
return (void *)(__static_call_key(site) & ~STATIC_CALL_SITE_FLAGS);
|
||||||
(((long)site->key + (long)&site->key) & ~STATIC_CALL_SITE_FLAGS);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* These assume the key is word-aligned. */
|
/* These assume the key is word-aligned. */
|
||||||
static inline bool static_call_is_init(struct static_call_site *site)
|
static inline bool static_call_is_init(struct static_call_site *site)
|
||||||
{
|
{
|
||||||
return ((long)site->key + (long)&site->key) & STATIC_CALL_SITE_INIT;
|
return __static_call_key(site) & STATIC_CALL_SITE_INIT;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline bool static_call_is_tail(struct static_call_site *site)
|
static inline bool static_call_is_tail(struct static_call_site *site)
|
||||||
{
|
{
|
||||||
return ((long)site->key + (long)&site->key) & STATIC_CALL_SITE_TAIL;
|
return __static_call_key(site) & STATIC_CALL_SITE_TAIL;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void static_call_set_init(struct static_call_site *site)
|
static inline void static_call_set_init(struct static_call_site *site)
|
||||||
{
|
{
|
||||||
site->key = ((long)static_call_key(site) | STATIC_CALL_SITE_INIT) -
|
site->key = (__static_call_key(site) | STATIC_CALL_SITE_INIT) -
|
||||||
(long)&site->key;
|
(long)&site->key;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -146,6 +149,7 @@ void __static_call_update(struct static_call_key *key, void *tramp, void *func)
|
|||||||
};
|
};
|
||||||
|
|
||||||
for (site_mod = &first; site_mod; site_mod = site_mod->next) {
|
for (site_mod = &first; site_mod; site_mod = site_mod->next) {
|
||||||
|
bool init = system_state < SYSTEM_RUNNING;
|
||||||
struct module *mod = site_mod->mod;
|
struct module *mod = site_mod->mod;
|
||||||
|
|
||||||
if (!site_mod->sites) {
|
if (!site_mod->sites) {
|
||||||
@ -165,6 +169,7 @@ void __static_call_update(struct static_call_key *key, void *tramp, void *func)
|
|||||||
if (mod) {
|
if (mod) {
|
||||||
stop = mod->static_call_sites +
|
stop = mod->static_call_sites +
|
||||||
mod->num_static_call_sites;
|
mod->num_static_call_sites;
|
||||||
|
init = mod->state == MODULE_STATE_COMING;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
@ -172,25 +177,26 @@ void __static_call_update(struct static_call_key *key, void *tramp, void *func)
|
|||||||
site < stop && static_call_key(site) == key; site++) {
|
site < stop && static_call_key(site) == key; site++) {
|
||||||
void *site_addr = static_call_addr(site);
|
void *site_addr = static_call_addr(site);
|
||||||
|
|
||||||
if (static_call_is_init(site)) {
|
if (!init && static_call_is_init(site))
|
||||||
/*
|
continue;
|
||||||
* Don't write to call sites which were in
|
|
||||||
* initmem and have since been freed.
|
|
||||||
*/
|
|
||||||
if (!mod && system_state >= SYSTEM_RUNNING)
|
|
||||||
continue;
|
|
||||||
if (mod && !within_module_init((unsigned long)site_addr, mod))
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!kernel_text_address((unsigned long)site_addr)) {
|
if (!kernel_text_address((unsigned long)site_addr)) {
|
||||||
WARN_ONCE(1, "can't patch static call site at %pS",
|
/*
|
||||||
|
* This skips patching built-in __exit, which
|
||||||
|
* is part of init_section_contains() but is
|
||||||
|
* not part of kernel_text_address().
|
||||||
|
*
|
||||||
|
* Skipping built-in __exit is fine since it
|
||||||
|
* will never be executed.
|
||||||
|
*/
|
||||||
|
WARN_ONCE(!static_call_is_init(site),
|
||||||
|
"can't patch static call site at %pS",
|
||||||
site_addr);
|
site_addr);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
arch_static_call_transform(site_addr, NULL, func,
|
arch_static_call_transform(site_addr, NULL, func,
|
||||||
static_call_is_tail(site));
|
static_call_is_tail(site));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -349,7 +355,7 @@ static int static_call_add_module(struct module *mod)
|
|||||||
struct static_call_site *site;
|
struct static_call_site *site;
|
||||||
|
|
||||||
for (site = start; site != stop; site++) {
|
for (site = start; site != stop; site++) {
|
||||||
unsigned long s_key = (long)site->key + (long)&site->key;
|
unsigned long s_key = __static_call_key(site);
|
||||||
unsigned long addr = s_key & ~STATIC_CALL_SITE_FLAGS;
|
unsigned long addr = s_key & ~STATIC_CALL_SITE_FLAGS;
|
||||||
unsigned long key;
|
unsigned long key;
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user