Updates for this cycle were:
- rwsem micro-optimizations - spinlock micro-optimizations - cleanups, simplifications Signed-off-by: Ingo Molnar <mingo@kernel.org> -----BEGIN PGP SIGNATURE----- iQJFBAABCgAvFiEEBpT5eoXrXCwVQwEKEnMQ0APhK1gFAmPzZkURHG1pbmdvQGtl cm5lbC5vcmcACgkQEnMQ0APhK1gBvA/6A5RMmSdOIVojmiwUYvZInA/Kpm2wuW0q bQWW9maLb96JMpj3FB5Xs5U993WiF0Gt9aGHoND9V2wOYbnv01ElCKKgsw7zLnXb c++txpmD+HoUGp94H8T2nA3szPLR7OpPpLmfjTWHKeWQRTStJobTTqi5jVTUZT37 92MZ2tVzapQJq5VESk0C+0FBFDobh0gTX8hwkEj83ubXK4rC071/gJD4JHZt4nWN Up9YGNoNvw+ns7upo2C1XJ4H4ucFoCXT2smH4Oh0gk8Cfs6oP1k5H8J5aJQ+23fT EOWzkk0vJdpukNXI1+4G4KMwCO6zv+xVxXpEBizEeTgKWwbJpgBeGrisheAUyMHT bwfztsn+NQET11NsccmtRzspscUT42Nc+FUW0KeR2LiBKZhuD6l1Tac3w2HolycA 2YjMQx3ATOEnMFgv4jGlldlasIAnYj0qitw6wCGqkJSvrC3au/LfcBHn45SxkBWc KZV1Oj26aH1hDxYSLyZRmFEvf/46D9CHmv8ReuFbmM6FIYwL+go+Odw/MHMFAbZR aP9YR4e94p6WmaNwMqzozP+wN67E4TME2vG6+T1n/szKDogoBlcn/wl053pkcHa/ CsjELY82/CRRrDgWSnSKUZEFvnnBujyEiSz7pTZCzdBTMc/EcxK5CHOSyN23x+LI TvvxFn7KM/o= =yTly -----END PGP SIGNATURE----- Merge tag 'locking-core-2023-02-20' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip Pull locking updates from Ingo Molnar: - rwsem micro-optimizations - spinlock micro-optimizations - cleanups, simplifications * tag 'locking-core-2023-02-20' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: vduse: Remove include of rwlock.h locking/lockdep: Remove lockdep_init_map_crosslock. x86/ACPI/boot: Use try_cmpxchg() in __acpi_{acquire,release}_global_lock() x86/PAT: Use try_cmpxchg() in set_page_memtype() locking/rwsem: Disable preemption in all down_write*() and up_write() code paths locking/rwsem: Disable preemption in all down_read*() and up_read() code paths locking/rwsem: Prevent non-first waiter from spinning in down_write() slowpath locking/qspinlock: Micro-optimize pending state waiting for unlock
This commit is contained in:
commit
6e649d0856
@ -1840,23 +1840,23 @@ early_param("acpi_sci", setup_acpi_sci);
|
||||
|
||||
int __acpi_acquire_global_lock(unsigned int *lock)
|
||||
{
|
||||
unsigned int old, new, val;
|
||||
unsigned int old, new;
|
||||
|
||||
old = READ_ONCE(*lock);
|
||||
do {
|
||||
old = *lock;
|
||||
new = (((old & ~0x3) + 2) + ((old >> 1) & 0x1));
|
||||
val = cmpxchg(lock, old, new);
|
||||
} while (unlikely (val != old));
|
||||
} while (!try_cmpxchg(lock, &old, new));
|
||||
return ((new & 0x3) < 3) ? -1 : 0;
|
||||
}
|
||||
|
||||
int __acpi_release_global_lock(unsigned int *lock)
|
||||
{
|
||||
unsigned int old, new, val;
|
||||
unsigned int old, new;
|
||||
|
||||
old = READ_ONCE(*lock);
|
||||
do {
|
||||
old = *lock;
|
||||
new = old & ~0x3;
|
||||
val = cmpxchg(lock, old, new);
|
||||
} while (unlikely (val != old));
|
||||
} while (!try_cmpxchg(lock, &old, new));
|
||||
return old & 0x1;
|
||||
}
|
||||
|
||||
|
@ -159,10 +159,10 @@ static inline void set_page_memtype(struct page *pg,
|
||||
break;
|
||||
}
|
||||
|
||||
old_flags = READ_ONCE(pg->flags);
|
||||
do {
|
||||
old_flags = pg->flags;
|
||||
new_flags = (old_flags & _PGMT_CLEAR_MASK) | memtype_flags;
|
||||
} while (cmpxchg(&pg->flags, old_flags, new_flags) != old_flags);
|
||||
} while (!try_cmpxchg(&pg->flags, &old_flags, new_flags));
|
||||
}
|
||||
#else
|
||||
static inline enum page_cache_mode get_page_memtype(struct page *pg)
|
||||
|
@ -14,7 +14,6 @@
|
||||
#include <linux/iova.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/vhost_iotlb.h>
|
||||
#include <linux/rwlock.h>
|
||||
|
||||
#define IOVA_START_PFN 1
|
||||
|
||||
|
@ -435,7 +435,6 @@ enum xhlock_context_t {
|
||||
XHLOCK_CTX_NR,
|
||||
};
|
||||
|
||||
#define lockdep_init_map_crosslock(m, n, k, s) do {} while (0)
|
||||
/*
|
||||
* To initialize a lockdep_map statically use this macro.
|
||||
* Note that _name must not be NULL.
|
||||
|
@ -371,7 +371,7 @@ void __lockfunc queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
|
||||
/*
|
||||
* We're pending, wait for the owner to go away.
|
||||
*
|
||||
* 0,1,1 -> 0,1,0
|
||||
* 0,1,1 -> *,1,0
|
||||
*
|
||||
* this wait loop must be a load-acquire such that we match the
|
||||
* store-release that clears the locked bit and create lock
|
||||
@ -380,7 +380,7 @@ void __lockfunc queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
|
||||
* barriers.
|
||||
*/
|
||||
if (val & _Q_LOCKED_MASK)
|
||||
atomic_cond_read_acquire(&lock->val, !(VAL & _Q_LOCKED_MASK));
|
||||
smp_cond_load_acquire(&lock->locked, !VAL);
|
||||
|
||||
/*
|
||||
* take ownership and clear the pending bit.
|
||||
|
@ -256,16 +256,13 @@ static inline bool rwsem_read_trylock(struct rw_semaphore *sem, long *cntp)
|
||||
static inline bool rwsem_write_trylock(struct rw_semaphore *sem)
|
||||
{
|
||||
long tmp = RWSEM_UNLOCKED_VALUE;
|
||||
bool ret = false;
|
||||
|
||||
preempt_disable();
|
||||
if (atomic_long_try_cmpxchg_acquire(&sem->count, &tmp, RWSEM_WRITER_LOCKED)) {
|
||||
rwsem_set_owner(sem);
|
||||
ret = true;
|
||||
return true;
|
||||
}
|
||||
|
||||
preempt_enable();
|
||||
return ret;
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -624,18 +621,16 @@ static inline bool rwsem_try_write_lock(struct rw_semaphore *sem,
|
||||
*/
|
||||
if (first->handoff_set && (waiter != first))
|
||||
return false;
|
||||
|
||||
/*
|
||||
* First waiter can inherit a previously set handoff
|
||||
* bit and spin on rwsem if lock acquisition fails.
|
||||
*/
|
||||
if (waiter == first)
|
||||
waiter->handoff_set = true;
|
||||
}
|
||||
|
||||
new = count;
|
||||
|
||||
if (count & RWSEM_LOCK_MASK) {
|
||||
/*
|
||||
* A waiter (first or not) can set the handoff bit
|
||||
* if it is an RT task or wait in the wait queue
|
||||
* for too long.
|
||||
*/
|
||||
if (has_handoff || (!rt_task(waiter->task) &&
|
||||
!time_after(jiffies, waiter->timeout)))
|
||||
return false;
|
||||
@ -651,11 +646,12 @@ static inline bool rwsem_try_write_lock(struct rw_semaphore *sem,
|
||||
} while (!atomic_long_try_cmpxchg_acquire(&sem->count, &count, new));
|
||||
|
||||
/*
|
||||
* We have either acquired the lock with handoff bit cleared or
|
||||
* set the handoff bit.
|
||||
* We have either acquired the lock with handoff bit cleared or set
|
||||
* the handoff bit. Only the first waiter can have its handoff_set
|
||||
* set here to enable optimistic spinning in slowpath loop.
|
||||
*/
|
||||
if (new & RWSEM_FLAG_HANDOFF) {
|
||||
waiter->handoff_set = true;
|
||||
first->handoff_set = true;
|
||||
lockevent_inc(rwsem_wlock_handoff);
|
||||
return false;
|
||||
}
|
||||
@ -717,7 +713,6 @@ static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem)
|
||||
return false;
|
||||
}
|
||||
|
||||
preempt_disable();
|
||||
/*
|
||||
* Disable preemption is equal to the RCU read-side crital section,
|
||||
* thus the task_strcut structure won't go away.
|
||||
@ -729,7 +724,6 @@ static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem)
|
||||
if ((flags & RWSEM_NONSPINNABLE) ||
|
||||
(owner && !(flags & RWSEM_READER_OWNED) && !owner_on_cpu(owner)))
|
||||
ret = false;
|
||||
preempt_enable();
|
||||
|
||||
lockevent_cond_inc(rwsem_opt_fail, !ret);
|
||||
return ret;
|
||||
@ -829,8 +823,6 @@ static bool rwsem_optimistic_spin(struct rw_semaphore *sem)
|
||||
int loop = 0;
|
||||
u64 rspin_threshold = 0;
|
||||
|
||||
preempt_disable();
|
||||
|
||||
/* sem->wait_lock should not be held when doing optimistic spinning */
|
||||
if (!osq_lock(&sem->osq))
|
||||
goto done;
|
||||
@ -938,7 +930,6 @@ static bool rwsem_optimistic_spin(struct rw_semaphore *sem)
|
||||
}
|
||||
osq_unlock(&sem->osq);
|
||||
done:
|
||||
preempt_enable();
|
||||
lockevent_cond_inc(rwsem_opt_fail, !taken);
|
||||
return taken;
|
||||
}
|
||||
@ -1092,7 +1083,7 @@ queue:
|
||||
/* Ordered by sem->wait_lock against rwsem_mark_wake(). */
|
||||
break;
|
||||
}
|
||||
schedule();
|
||||
schedule_preempt_disabled();
|
||||
lockevent_inc(rwsem_sleep_reader);
|
||||
}
|
||||
|
||||
@ -1179,15 +1170,12 @@ rwsem_down_write_slowpath(struct rw_semaphore *sem, int state)
|
||||
if (waiter.handoff_set) {
|
||||
enum owner_state owner_state;
|
||||
|
||||
preempt_disable();
|
||||
owner_state = rwsem_spin_on_owner(sem);
|
||||
preempt_enable();
|
||||
|
||||
if (owner_state == OWNER_NULL)
|
||||
goto trylock_again;
|
||||
}
|
||||
|
||||
schedule();
|
||||
schedule_preempt_disabled();
|
||||
lockevent_inc(rwsem_sleep_writer);
|
||||
set_current_state(state);
|
||||
trylock_again:
|
||||
@ -1254,14 +1242,20 @@ static struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem)
|
||||
*/
|
||||
static inline int __down_read_common(struct rw_semaphore *sem, int state)
|
||||
{
|
||||
int ret = 0;
|
||||
long count;
|
||||
|
||||
preempt_disable();
|
||||
if (!rwsem_read_trylock(sem, &count)) {
|
||||
if (IS_ERR(rwsem_down_read_slowpath(sem, count, state)))
|
||||
return -EINTR;
|
||||
if (IS_ERR(rwsem_down_read_slowpath(sem, count, state))) {
|
||||
ret = -EINTR;
|
||||
goto out;
|
||||
}
|
||||
DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem);
|
||||
}
|
||||
return 0;
|
||||
out:
|
||||
preempt_enable();
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline void __down_read(struct rw_semaphore *sem)
|
||||
@ -1281,19 +1275,23 @@ static inline int __down_read_killable(struct rw_semaphore *sem)
|
||||
|
||||
static inline int __down_read_trylock(struct rw_semaphore *sem)
|
||||
{
|
||||
int ret = 0;
|
||||
long tmp;
|
||||
|
||||
DEBUG_RWSEMS_WARN_ON(sem->magic != sem, sem);
|
||||
|
||||
preempt_disable();
|
||||
tmp = atomic_long_read(&sem->count);
|
||||
while (!(tmp & RWSEM_READ_FAILED_MASK)) {
|
||||
if (atomic_long_try_cmpxchg_acquire(&sem->count, &tmp,
|
||||
tmp + RWSEM_READER_BIAS)) {
|
||||
rwsem_set_reader_owned(sem);
|
||||
return 1;
|
||||
ret = 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
preempt_enable();
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1301,12 +1299,15 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
|
||||
*/
|
||||
static inline int __down_write_common(struct rw_semaphore *sem, int state)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
preempt_disable();
|
||||
if (unlikely(!rwsem_write_trylock(sem))) {
|
||||
if (IS_ERR(rwsem_down_write_slowpath(sem, state)))
|
||||
return -EINTR;
|
||||
ret = -EINTR;
|
||||
}
|
||||
|
||||
return 0;
|
||||
preempt_enable();
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline void __down_write(struct rw_semaphore *sem)
|
||||
@ -1321,8 +1322,14 @@ static inline int __down_write_killable(struct rw_semaphore *sem)
|
||||
|
||||
static inline int __down_write_trylock(struct rw_semaphore *sem)
|
||||
{
|
||||
int ret;
|
||||
|
||||
preempt_disable();
|
||||
DEBUG_RWSEMS_WARN_ON(sem->magic != sem, sem);
|
||||
return rwsem_write_trylock(sem);
|
||||
ret = rwsem_write_trylock(sem);
|
||||
preempt_enable();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1335,6 +1342,7 @@ static inline void __up_read(struct rw_semaphore *sem)
|
||||
DEBUG_RWSEMS_WARN_ON(sem->magic != sem, sem);
|
||||
DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem);
|
||||
|
||||
preempt_disable();
|
||||
rwsem_clear_reader_owned(sem);
|
||||
tmp = atomic_long_add_return_release(-RWSEM_READER_BIAS, &sem->count);
|
||||
DEBUG_RWSEMS_WARN_ON(tmp < 0, sem);
|
||||
@ -1343,6 +1351,7 @@ static inline void __up_read(struct rw_semaphore *sem)
|
||||
clear_nonspinnable(sem);
|
||||
rwsem_wake(sem);
|
||||
}
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1363,9 +1372,9 @@ static inline void __up_write(struct rw_semaphore *sem)
|
||||
preempt_disable();
|
||||
rwsem_clear_owner(sem);
|
||||
tmp = atomic_long_fetch_add_release(-RWSEM_WRITER_LOCKED, &sem->count);
|
||||
preempt_enable();
|
||||
if (unlikely(tmp & RWSEM_FLAG_WAITERS))
|
||||
rwsem_wake(sem);
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1383,11 +1392,13 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
|
||||
* write side. As such, rely on RELEASE semantics.
|
||||
*/
|
||||
DEBUG_RWSEMS_WARN_ON(rwsem_owner(sem) != current, sem);
|
||||
preempt_disable();
|
||||
tmp = atomic_long_fetch_add_release(
|
||||
-RWSEM_WRITER_LOCKED+RWSEM_READER_BIAS, &sem->count);
|
||||
rwsem_set_reader_owned(sem);
|
||||
if (tmp & RWSEM_FLAG_WAITERS)
|
||||
rwsem_downgrade_wake(sem);
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
#else /* !CONFIG_PREEMPT_RT */
|
||||
@ -1662,6 +1673,12 @@ void down_read_non_owner(struct rw_semaphore *sem)
|
||||
{
|
||||
might_sleep();
|
||||
__down_read(sem);
|
||||
/*
|
||||
* The owner value for a reader-owned lock is mostly for debugging
|
||||
* purpose only and is not critical to the correct functioning of
|
||||
* rwsem. So it is perfectly fine to set it in a preempt-enabled
|
||||
* context here.
|
||||
*/
|
||||
__rwsem_set_reader_owned(sem, NULL);
|
||||
}
|
||||
EXPORT_SYMBOL(down_read_non_owner);
|
||||
|
Loading…
Reference in New Issue
Block a user