Merge branch 'locking-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull locking fixes from Thomas Gleixner: "A set of locking fixes: - Address the fallout of the rwsem rework. Missing ACQUIREs and a sanity check to prevent a use-after-free - Add missing checks for unitialized mutexes when mutex debugging is enabled. - Remove the bogus code in the generic SMP variant of arch_futex_atomic_op_inuser() - Fixup the #ifdeffery in lockdep to prevent compile warnings" * 'locking-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: locking/mutex: Test for initialized mutex locking/lockdep: Clean up #ifdef checks locking/lockdep: Hide unused 'class' variable locking/rwsem: Add ACQUIRE comments tty/ldsem, locking/rwsem: Add missing ACQUIRE to read_failed sleep loop lcoking/rwsem: Add missing ACQUIRE to read_slowpath sleep loop locking/rwsem: Add missing ACQUIRE to read_slowpath exit when queue is empty locking/rwsem: Don't call owner_on_cpu() on read-owner futex: Cleanup generic SMP variant of arch_futex_atomic_op_inuser()
This commit is contained in:
commit
431f288ed7
@ -93,8 +93,7 @@ static void __ldsem_wake_readers(struct ld_semaphore *sem)
|
|||||||
|
|
||||||
list_for_each_entry_safe(waiter, next, &sem->read_wait, list) {
|
list_for_each_entry_safe(waiter, next, &sem->read_wait, list) {
|
||||||
tsk = waiter->task;
|
tsk = waiter->task;
|
||||||
smp_mb();
|
smp_store_release(&waiter->task, NULL);
|
||||||
waiter->task = NULL;
|
|
||||||
wake_up_process(tsk);
|
wake_up_process(tsk);
|
||||||
put_task_struct(tsk);
|
put_task_struct(tsk);
|
||||||
}
|
}
|
||||||
@ -194,7 +193,7 @@ down_read_failed(struct ld_semaphore *sem, long count, long timeout)
|
|||||||
for (;;) {
|
for (;;) {
|
||||||
set_current_state(TASK_UNINTERRUPTIBLE);
|
set_current_state(TASK_UNINTERRUPTIBLE);
|
||||||
|
|
||||||
if (!waiter.task)
|
if (!smp_load_acquire(&waiter.task))
|
||||||
break;
|
break;
|
||||||
if (!timeout)
|
if (!timeout)
|
||||||
break;
|
break;
|
||||||
|
@ -118,26 +118,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
|
|||||||
static inline int
|
static inline int
|
||||||
arch_futex_atomic_op_inuser(int op, u32 oparg, int *oval, u32 __user *uaddr)
|
arch_futex_atomic_op_inuser(int op, u32 oparg, int *oval, u32 __user *uaddr)
|
||||||
{
|
{
|
||||||
int oldval = 0, ret;
|
return -ENOSYS;
|
||||||
|
|
||||||
pagefault_disable();
|
|
||||||
|
|
||||||
switch (op) {
|
|
||||||
case FUTEX_OP_SET:
|
|
||||||
case FUTEX_OP_ADD:
|
|
||||||
case FUTEX_OP_OR:
|
|
||||||
case FUTEX_OP_ANDN:
|
|
||||||
case FUTEX_OP_XOR:
|
|
||||||
default:
|
|
||||||
ret = -ENOSYS;
|
|
||||||
}
|
|
||||||
|
|
||||||
pagefault_enable();
|
|
||||||
|
|
||||||
if (!ret)
|
|
||||||
*oval = oldval;
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int
|
static inline int
|
||||||
|
@ -448,7 +448,7 @@ static void print_lockdep_off(const char *bug_msg)
|
|||||||
|
|
||||||
unsigned long nr_stack_trace_entries;
|
unsigned long nr_stack_trace_entries;
|
||||||
|
|
||||||
#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
|
#ifdef CONFIG_PROVE_LOCKING
|
||||||
/*
|
/*
|
||||||
* Stack-trace: tightly packed array of stack backtrace
|
* Stack-trace: tightly packed array of stack backtrace
|
||||||
* addresses. Protected by the graph_lock.
|
* addresses. Protected by the graph_lock.
|
||||||
@ -491,7 +491,7 @@ unsigned int max_lockdep_depth;
|
|||||||
DEFINE_PER_CPU(struct lockdep_stats, lockdep_stats);
|
DEFINE_PER_CPU(struct lockdep_stats, lockdep_stats);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
|
#ifdef CONFIG_PROVE_LOCKING
|
||||||
/*
|
/*
|
||||||
* Locking printouts:
|
* Locking printouts:
|
||||||
*/
|
*/
|
||||||
@ -2969,7 +2969,7 @@ static void check_chain_key(struct task_struct *curr)
|
|||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
|
#ifdef CONFIG_PROVE_LOCKING
|
||||||
static int mark_lock(struct task_struct *curr, struct held_lock *this,
|
static int mark_lock(struct task_struct *curr, struct held_lock *this,
|
||||||
enum lock_usage_bit new_bit);
|
enum lock_usage_bit new_bit);
|
||||||
|
|
||||||
@ -3608,7 +3608,7 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this,
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
#else /* defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) */
|
#else /* CONFIG_PROVE_LOCKING */
|
||||||
|
|
||||||
static inline int
|
static inline int
|
||||||
mark_usage(struct task_struct *curr, struct held_lock *hlock, int check)
|
mark_usage(struct task_struct *curr, struct held_lock *hlock, int check)
|
||||||
@ -3627,7 +3627,7 @@ static inline int separate_irq_context(struct task_struct *curr,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif /* defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) */
|
#endif /* CONFIG_PROVE_LOCKING */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Initialize a lock instance's lock-class mapping info:
|
* Initialize a lock instance's lock-class mapping info:
|
||||||
@ -4321,8 +4321,7 @@ static void __lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie cookie
|
|||||||
*/
|
*/
|
||||||
static void check_flags(unsigned long flags)
|
static void check_flags(unsigned long flags)
|
||||||
{
|
{
|
||||||
#if defined(CONFIG_PROVE_LOCKING) && defined(CONFIG_DEBUG_LOCKDEP) && \
|
#if defined(CONFIG_PROVE_LOCKING) && defined(CONFIG_DEBUG_LOCKDEP)
|
||||||
defined(CONFIG_TRACE_IRQFLAGS)
|
|
||||||
if (!debug_locks)
|
if (!debug_locks)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
@ -200,7 +200,6 @@ static void lockdep_stats_debug_show(struct seq_file *m)
|
|||||||
|
|
||||||
static int lockdep_stats_show(struct seq_file *m, void *v)
|
static int lockdep_stats_show(struct seq_file *m, void *v)
|
||||||
{
|
{
|
||||||
struct lock_class *class;
|
|
||||||
unsigned long nr_unused = 0, nr_uncategorized = 0,
|
unsigned long nr_unused = 0, nr_uncategorized = 0,
|
||||||
nr_irq_safe = 0, nr_irq_unsafe = 0,
|
nr_irq_safe = 0, nr_irq_unsafe = 0,
|
||||||
nr_softirq_safe = 0, nr_softirq_unsafe = 0,
|
nr_softirq_safe = 0, nr_softirq_unsafe = 0,
|
||||||
@ -211,6 +210,8 @@ static int lockdep_stats_show(struct seq_file *m, void *v)
|
|||||||
sum_forward_deps = 0;
|
sum_forward_deps = 0;
|
||||||
|
|
||||||
#ifdef CONFIG_PROVE_LOCKING
|
#ifdef CONFIG_PROVE_LOCKING
|
||||||
|
struct lock_class *class;
|
||||||
|
|
||||||
list_for_each_entry(class, &all_lock_classes, lock_entry) {
|
list_for_each_entry(class, &all_lock_classes, lock_entry) {
|
||||||
|
|
||||||
if (class->usage_mask == 0)
|
if (class->usage_mask == 0)
|
||||||
|
@ -908,6 +908,10 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
|
|||||||
|
|
||||||
might_sleep();
|
might_sleep();
|
||||||
|
|
||||||
|
#ifdef CONFIG_DEBUG_MUTEXES
|
||||||
|
DEBUG_LOCKS_WARN_ON(lock->magic != lock);
|
||||||
|
#endif
|
||||||
|
|
||||||
ww = container_of(lock, struct ww_mutex, base);
|
ww = container_of(lock, struct ww_mutex, base);
|
||||||
if (use_ww_ctx && ww_ctx) {
|
if (use_ww_ctx && ww_ctx) {
|
||||||
if (unlikely(ww_ctx == READ_ONCE(ww->ctx)))
|
if (unlikely(ww_ctx == READ_ONCE(ww->ctx)))
|
||||||
@ -1379,8 +1383,13 @@ __ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock,
|
|||||||
*/
|
*/
|
||||||
int __sched mutex_trylock(struct mutex *lock)
|
int __sched mutex_trylock(struct mutex *lock)
|
||||||
{
|
{
|
||||||
bool locked = __mutex_trylock(lock);
|
bool locked;
|
||||||
|
|
||||||
|
#ifdef CONFIG_DEBUG_MUTEXES
|
||||||
|
DEBUG_LOCKS_WARN_ON(lock->magic != lock);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
locked = __mutex_trylock(lock);
|
||||||
if (locked)
|
if (locked)
|
||||||
mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
|
mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
|
||||||
|
|
||||||
|
@ -666,7 +666,11 @@ static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem,
|
|||||||
preempt_disable();
|
preempt_disable();
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
owner = rwsem_owner_flags(sem, &flags);
|
owner = rwsem_owner_flags(sem, &flags);
|
||||||
if ((flags & nonspinnable) || (owner && !owner_on_cpu(owner)))
|
/*
|
||||||
|
* Don't check the read-owner as the entry may be stale.
|
||||||
|
*/
|
||||||
|
if ((flags & nonspinnable) ||
|
||||||
|
(owner && !(flags & RWSEM_READER_OWNED) && !owner_on_cpu(owner)))
|
||||||
ret = false;
|
ret = false;
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
preempt_enable();
|
preempt_enable();
|
||||||
@ -1000,6 +1004,7 @@ rwsem_down_read_slowpath(struct rw_semaphore *sem, int state)
|
|||||||
atomic_long_add(-RWSEM_READER_BIAS, &sem->count);
|
atomic_long_add(-RWSEM_READER_BIAS, &sem->count);
|
||||||
adjustment = 0;
|
adjustment = 0;
|
||||||
if (rwsem_optimistic_spin(sem, false)) {
|
if (rwsem_optimistic_spin(sem, false)) {
|
||||||
|
/* rwsem_optimistic_spin() implies ACQUIRE on success */
|
||||||
/*
|
/*
|
||||||
* Wake up other readers in the wait list if the front
|
* Wake up other readers in the wait list if the front
|
||||||
* waiter is a reader.
|
* waiter is a reader.
|
||||||
@ -1014,6 +1019,7 @@ rwsem_down_read_slowpath(struct rw_semaphore *sem, int state)
|
|||||||
}
|
}
|
||||||
return sem;
|
return sem;
|
||||||
} else if (rwsem_reader_phase_trylock(sem, waiter.last_rowner)) {
|
} else if (rwsem_reader_phase_trylock(sem, waiter.last_rowner)) {
|
||||||
|
/* rwsem_reader_phase_trylock() implies ACQUIRE on success */
|
||||||
return sem;
|
return sem;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1032,6 +1038,8 @@ queue:
|
|||||||
*/
|
*/
|
||||||
if (adjustment && !(atomic_long_read(&sem->count) &
|
if (adjustment && !(atomic_long_read(&sem->count) &
|
||||||
(RWSEM_WRITER_MASK | RWSEM_FLAG_HANDOFF))) {
|
(RWSEM_WRITER_MASK | RWSEM_FLAG_HANDOFF))) {
|
||||||
|
/* Provide lock ACQUIRE */
|
||||||
|
smp_acquire__after_ctrl_dep();
|
||||||
raw_spin_unlock_irq(&sem->wait_lock);
|
raw_spin_unlock_irq(&sem->wait_lock);
|
||||||
rwsem_set_reader_owned(sem);
|
rwsem_set_reader_owned(sem);
|
||||||
lockevent_inc(rwsem_rlock_fast);
|
lockevent_inc(rwsem_rlock_fast);
|
||||||
@ -1065,15 +1073,18 @@ queue:
|
|||||||
wake_up_q(&wake_q);
|
wake_up_q(&wake_q);
|
||||||
|
|
||||||
/* wait to be given the lock */
|
/* wait to be given the lock */
|
||||||
while (true) {
|
for (;;) {
|
||||||
set_current_state(state);
|
set_current_state(state);
|
||||||
if (!waiter.task)
|
if (!smp_load_acquire(&waiter.task)) {
|
||||||
|
/* Matches rwsem_mark_wake()'s smp_store_release(). */
|
||||||
break;
|
break;
|
||||||
|
}
|
||||||
if (signal_pending_state(state, current)) {
|
if (signal_pending_state(state, current)) {
|
||||||
raw_spin_lock_irq(&sem->wait_lock);
|
raw_spin_lock_irq(&sem->wait_lock);
|
||||||
if (waiter.task)
|
if (waiter.task)
|
||||||
goto out_nolock;
|
goto out_nolock;
|
||||||
raw_spin_unlock_irq(&sem->wait_lock);
|
raw_spin_unlock_irq(&sem->wait_lock);
|
||||||
|
/* Ordered by sem->wait_lock against rwsem_mark_wake(). */
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
schedule();
|
schedule();
|
||||||
@ -1083,6 +1094,7 @@ queue:
|
|||||||
__set_current_state(TASK_RUNNING);
|
__set_current_state(TASK_RUNNING);
|
||||||
lockevent_inc(rwsem_rlock);
|
lockevent_inc(rwsem_rlock);
|
||||||
return sem;
|
return sem;
|
||||||
|
|
||||||
out_nolock:
|
out_nolock:
|
||||||
list_del(&waiter.list);
|
list_del(&waiter.list);
|
||||||
if (list_empty(&sem->wait_list)) {
|
if (list_empty(&sem->wait_list)) {
|
||||||
@ -1123,8 +1135,10 @@ rwsem_down_write_slowpath(struct rw_semaphore *sem, int state)
|
|||||||
|
|
||||||
/* do optimistic spinning and steal lock if possible */
|
/* do optimistic spinning and steal lock if possible */
|
||||||
if (rwsem_can_spin_on_owner(sem, RWSEM_WR_NONSPINNABLE) &&
|
if (rwsem_can_spin_on_owner(sem, RWSEM_WR_NONSPINNABLE) &&
|
||||||
rwsem_optimistic_spin(sem, true))
|
rwsem_optimistic_spin(sem, true)) {
|
||||||
|
/* rwsem_optimistic_spin() implies ACQUIRE on success */
|
||||||
return sem;
|
return sem;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Disable reader optimistic spinning for this rwsem after
|
* Disable reader optimistic spinning for this rwsem after
|
||||||
@ -1184,9 +1198,11 @@ rwsem_down_write_slowpath(struct rw_semaphore *sem, int state)
|
|||||||
wait:
|
wait:
|
||||||
/* wait until we successfully acquire the lock */
|
/* wait until we successfully acquire the lock */
|
||||||
set_current_state(state);
|
set_current_state(state);
|
||||||
while (true) {
|
for (;;) {
|
||||||
if (rwsem_try_write_lock(sem, wstate))
|
if (rwsem_try_write_lock(sem, wstate)) {
|
||||||
|
/* rwsem_try_write_lock() implies ACQUIRE on success */
|
||||||
break;
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
raw_spin_unlock_irq(&sem->wait_lock);
|
raw_spin_unlock_irq(&sem->wait_lock);
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user