2017-03-16 22:18:50 -08:00
// SPDX-License-Identifier: GPL-2.0
# include <linux/export.h>
# include <linux/log2.h>
# include <linux/percpu.h>
# include <linux/preempt.h>
# include <linux/rcupdate.h>
# include <linux/sched.h>
2022-09-24 01:33:13 -04:00
# include <linux/sched/clock.h>
2017-03-16 22:18:50 -08:00
# include <linux/sched/rt.h>
# include <linux/slab.h>
# include "six.h"
# ifdef DEBUG
2023-05-20 23:57:48 -04:00
# define EBUG_ON(cond) BUG_ON(cond)
2017-03-16 22:18:50 -08:00
# else
2023-05-20 23:57:48 -04:00
# define EBUG_ON(cond) do {} while (0)
2017-03-16 22:18:50 -08:00
# endif
2023-02-04 19:38:43 -05:00
# define six_acquire(l, t, r, ip) lock_acquire(l, 0, t, r, 1, NULL, ip)
# define six_release(l, ip) lock_release(l, ip)
2017-03-16 22:18:50 -08:00
2022-08-26 19:22:24 -04:00
static void do_six_unlock_type ( struct six_lock * lock , enum six_lock_type type ) ;
2023-05-20 23:57:48 -04:00
/*
* bits 0 - 26 reader count
* bits 26 - 27 write_locking ( a thread is trying to get a write lock ,
* but does not have one yet )
* bits 27 - 28 held for intent
* bits 28 - 29 nospin - optimistic spinning has timed out
* bits 29 - 30 has read waiters
* bits 30 - 31 has intent waiters
* bits 31 - 32 has write waiters
* bits 32 - 64 sequence number : incremented on every write lock or
* unlock , thus bit 33 ( sequence number odd ) indicates
* lock is currently held for write
*/
# define SIX_STATE_READ_OFFSET 0
# define SIX_STATE_READ_BITS 26
# define SIX_STATE_READ_LOCK ~(~0ULL << 26)
# define SIX_STATE_WRITE_LOCKING (1ULL << 26)
# define SIX_STATE_INTENT_HELD (1ULL << 27)
# define SIX_STATE_NOSPIN (1ULL << 28)
# define SIX_STATE_WAITING_READ (1ULL << (29 + SIX_LOCK_read))
# define SIX_STATE_WAITING_INTENT (1ULL << (29 + SIX_LOCK_intent))
# define SIX_STATE_WAITING_WRITE (1ULL << (29 + SIX_LOCK_write))
# define SIX_STATE_SEQ_OFFSET 32
# define SIX_STATE_SEQ_BITS 32
# define SIX_STATE_SEQ (~0ULL << 32)
# define SIX_LOCK_HELD_read SIX_STATE_READ_LOCK
# define SIX_LOCK_HELD_intent SIX_STATE_INTENT_HELD
# define SIX_LOCK_HELD_write (1ULL << SIX_STATE_SEQ_OFFSET)
2017-03-16 22:18:50 -08:00
struct six_lock_vals {
/* Value we add to the lock in order to take the lock: */
u64 lock_val ;
/* If the lock has this value (used as a mask), taking the lock fails: */
u64 lock_fail ;
/* Value we add to the lock in order to release the lock: */
u64 unlock_val ;
/* Mask that indicates lock is held for this type: */
u64 held_mask ;
/* Waitlist we wakeup when releasing the lock: */
enum six_lock_type unlock_wakeup ;
} ;
# define LOCK_VALS { \
[ SIX_LOCK_read ] = { \
2023-05-20 23:57:48 -04:00
. lock_val = 1ULL < < SIX_STATE_READ_OFFSET , \
. lock_fail = SIX_LOCK_HELD_write | SIX_STATE_WRITE_LOCKING , \
. unlock_val = - ( 1ULL < < SIX_STATE_READ_OFFSET ) , \
. held_mask = SIX_LOCK_HELD_read , \
2017-03-16 22:18:50 -08:00
. unlock_wakeup = SIX_LOCK_write , \
} , \
[ SIX_LOCK_intent ] = { \
2023-05-20 23:57:48 -04:00
. lock_val = SIX_STATE_INTENT_HELD , \
. lock_fail = SIX_LOCK_HELD_intent , \
. unlock_val = - SIX_STATE_INTENT_HELD , \
. held_mask = SIX_LOCK_HELD_intent , \
2017-03-16 22:18:50 -08:00
. unlock_wakeup = SIX_LOCK_intent , \
} , \
[ SIX_LOCK_write ] = { \
2023-05-20 23:57:48 -04:00
. lock_val = SIX_LOCK_HELD_write , \
. lock_fail = SIX_LOCK_HELD_read , \
. unlock_val = SIX_LOCK_HELD_write , \
. held_mask = SIX_LOCK_HELD_write , \
2017-03-16 22:18:50 -08:00
. unlock_wakeup = SIX_LOCK_read , \
} , \
}
2023-05-20 23:57:48 -04:00
static inline u32 six_state_seq ( u64 state )
{
return state > > SIX_STATE_SEQ_OFFSET ;
}
# ifdef CONFIG_GENERIC_ATOMIC64
static inline void six_set_bitmask ( struct six_lock * lock , u64 mask )
{
u64 old , new , v = atomic64_read ( & lock - > state ) ;
do {
old = new = v ;
if ( ( old & mask ) = = mask )
break ;
new | = mask ;
} while ( ( v = atomic64_cmpxchg ( & lock - > state , old , new ) ) ! = old ) ;
}
static inline void six_clear_bitmask ( struct six_lock * lock , u64 mask )
{
u64 old , new , v = atomic64_read ( & lock - > state ) ;
do {
old = new = v ;
if ( ! ( old & mask ) )
break ;
new & = ~ mask ;
} while ( ( v = atomic64_cmpxchg ( & lock - > state , old , new ) ) ! = old ) ;
}
# else
/*
* Returns the index of the first set bit , treating @ mask as an array of ulongs :
* that is , a bit index that can be passed to test_bit ( ) / set_bit ( ) .
*
* Assumes the set bit we want is in the low 4 bytes :
*/
static inline unsigned u64_mask_to_ulong_bitnr ( u64 mask )
{
# if BITS_PER_LONG == 64
return ilog2 ( mask ) ;
# else
# if defined(__LITTLE_ENDIAN)
return ilog2 ( ( u32 ) mask ) ;
# elif defined(__BIG_ENDIAN)
return ilog2 ( ( u32 ) mask ) + 32 ;
# else
# error Unknown byteorder
# endif
# endif
}
static inline void six_set_bitmask ( struct six_lock * lock , u64 mask )
{
unsigned bitnr = u64_mask_to_ulong_bitnr ( mask ) ;
if ( ! test_bit ( bitnr , ( unsigned long * ) & lock - > state ) )
set_bit ( bitnr , ( unsigned long * ) & lock - > state ) ;
}
static inline void six_clear_bitmask ( struct six_lock * lock , u64 mask )
{
unsigned bitnr = u64_mask_to_ulong_bitnr ( mask ) ;
if ( test_bit ( bitnr , ( unsigned long * ) & lock - > state ) )
clear_bit ( bitnr , ( unsigned long * ) & lock - > state ) ;
}
# endif
2017-03-16 22:18:50 -08:00
static inline void six_set_owner ( struct six_lock * lock , enum six_lock_type type ,
2023-05-20 23:57:48 -04:00
u64 old , struct task_struct * owner )
2017-03-16 22:18:50 -08:00
{
if ( type ! = SIX_LOCK_intent )
return ;
2023-05-20 23:57:48 -04:00
if ( ! ( old & SIX_LOCK_HELD_intent ) ) {
2017-03-16 22:18:50 -08:00
EBUG_ON ( lock - > owner ) ;
2022-08-26 19:22:24 -04:00
lock - > owner = owner ;
2017-03-16 22:18:50 -08:00
} else {
EBUG_ON ( lock - > owner ! = current ) ;
}
}
static inline unsigned pcpu_read_count ( struct six_lock * lock )
{
unsigned read_count = 0 ;
int cpu ;
for_each_possible_cpu ( cpu )
read_count + = * per_cpu_ptr ( lock - > readers , cpu ) ;
return read_count ;
}
2022-08-26 19:22:24 -04:00
static int __do_six_trylock_type ( struct six_lock * lock ,
enum six_lock_type type ,
struct task_struct * task ,
bool try )
2017-03-16 22:18:50 -08:00
{
const struct six_lock_vals l [ ] = LOCK_VALS ;
2022-08-26 19:22:24 -04:00
int ret ;
2023-05-20 23:57:48 -04:00
u64 old , new , v ;
2017-03-16 22:18:50 -08:00
2022-08-26 19:22:24 -04:00
EBUG_ON ( type = = SIX_LOCK_write & & lock - > owner ! = task ) ;
2023-05-20 23:57:48 -04:00
EBUG_ON ( type = = SIX_LOCK_write & &
( atomic64_read ( & lock - > state ) & SIX_LOCK_HELD_write ) ) ;
EBUG_ON ( type = = SIX_LOCK_write & &
( try ! = ! ( atomic64_read ( & lock - > state ) & SIX_STATE_WRITE_LOCKING ) ) ) ;
2017-03-16 22:18:50 -08:00
/*
* Percpu reader mode :
*
* The basic idea behind this algorithm is that you can implement a lock
* between two threads without any atomics , just memory barriers :
*
* For two threads you ' ll need two variables , one variable for " thread a
* has the lock " and another for " thread b has the lock " .
*
* To take the lock , a thread sets its variable indicating that it holds
* the lock , then issues a full memory barrier , then reads from the
* other thread ' s variable to check if the other thread thinks it has
* the lock . If we raced , we backoff and retry / sleep .
*/
if ( type = = SIX_LOCK_read & & lock - > readers ) {
preempt_disable ( ) ;
this_cpu_inc ( * lock - > readers ) ; /* signal that we own lock */
smp_mb ( ) ;
2023-05-20 23:57:48 -04:00
old = atomic64_read ( & lock - > state ) ;
ret = ! ( old & l [ type ] . lock_fail ) ;
2017-03-16 22:18:50 -08:00
this_cpu_sub ( * lock - > readers , ! ret ) ;
preempt_enable ( ) ;
/*
* If we failed because a writer was trying to take the
* lock , issue a wakeup because we might have caused a
* spurious trylock failure :
*/
2023-05-20 23:57:48 -04:00
if ( old & SIX_STATE_WRITE_LOCKING )
2022-08-26 19:22:24 -04:00
ret = - 1 - SIX_LOCK_write ;
2017-03-16 22:18:50 -08:00
} else if ( type = = SIX_LOCK_write & & lock - > readers ) {
if ( try ) {
2023-05-20 23:57:48 -04:00
atomic64_add ( SIX_STATE_WRITE_LOCKING ,
& lock - > state ) ;
2017-03-16 22:18:50 -08:00
smp_mb__after_atomic ( ) ;
}
ret = ! pcpu_read_count ( lock ) ;
/*
* On success , we increment lock - > seq ; also we clear
* write_locking unless we failed from the lock path :
*/
v = 0 ;
if ( ret )
2023-05-20 23:57:48 -04:00
v + = SIX_LOCK_HELD_write ;
2017-03-16 22:18:50 -08:00
if ( ret | | try )
2023-05-20 23:57:48 -04:00
v - = SIX_STATE_WRITE_LOCKING ;
2017-03-16 22:18:50 -08:00
if ( try & & ! ret ) {
2023-05-20 23:57:48 -04:00
old = atomic64_add_return ( v , & lock - > state ) ;
if ( old & SIX_STATE_WAITING_READ )
2022-08-26 19:22:24 -04:00
ret = - 1 - SIX_LOCK_read ;
2017-03-16 22:18:50 -08:00
} else {
2023-05-20 23:57:48 -04:00
atomic64_add ( v , & lock - > state ) ;
2017-03-16 22:18:50 -08:00
}
} else {
2023-05-20 23:57:48 -04:00
v = atomic64_read ( & lock - > state ) ;
2017-03-16 22:18:50 -08:00
do {
2023-05-20 23:57:48 -04:00
new = old = v ;
2017-03-16 22:18:50 -08:00
2023-05-20 23:57:48 -04:00
if ( ! ( old & l [ type ] . lock_fail ) ) {
new + = l [ type ] . lock_val ;
2017-03-16 22:18:50 -08:00
if ( type = = SIX_LOCK_write )
2023-05-20 23:57:48 -04:00
new & = ~ SIX_STATE_WRITE_LOCKING ;
2023-05-20 20:37:53 -04:00
} else {
break ;
}
2023-05-20 23:57:48 -04:00
} while ( ( v = atomic64_cmpxchg_acquire ( & lock - > state , old , new ) ) ! = old ) ;
2017-03-16 22:18:50 -08:00
2023-05-20 23:57:48 -04:00
ret = ! ( old & l [ type ] . lock_fail ) ;
2017-03-16 22:18:50 -08:00
2023-05-20 23:57:48 -04:00
EBUG_ON ( ret & & ! ( atomic64_read ( & lock - > state ) & l [ type ] . held_mask ) ) ;
2017-03-16 22:18:50 -08:00
}
2022-08-26 19:22:24 -04:00
if ( ret > 0 )
six_set_owner ( lock , type , old , task ) ;
2017-03-16 22:18:50 -08:00
2023-05-20 23:57:48 -04:00
EBUG_ON ( type = = SIX_LOCK_write & & ( try | | ret > 0 ) & &
( atomic64_read ( & lock - > state ) & SIX_STATE_WRITE_LOCKING ) ) ;
2017-03-16 22:18:50 -08:00
return ret ;
}
2023-05-20 23:57:48 -04:00
static void __six_lock_wakeup ( struct six_lock * lock , enum six_lock_type lock_type )
2022-08-26 19:22:24 -04:00
{
struct six_lock_waiter * w , * next ;
struct task_struct * task ;
bool saw_one ;
int ret ;
again :
ret = 0 ;
saw_one = false ;
raw_spin_lock ( & lock - > wait_lock ) ;
list_for_each_entry_safe ( w , next , & lock - > wait_list , list ) {
if ( w - > lock_want ! = lock_type )
continue ;
if ( saw_one & & lock_type ! = SIX_LOCK_read )
goto unlock ;
saw_one = true ;
ret = __do_six_trylock_type ( lock , lock_type , w - > task , false ) ;
if ( ret < = 0 )
goto unlock ;
__list_del ( w - > list . prev , w - > list . next ) ;
task = w - > task ;
/*
* Do no writes to @ w besides setting lock_acquired - otherwise
* we would need a memory barrier :
*/
barrier ( ) ;
w - > lock_acquired = true ;
wake_up_process ( task ) ;
}
2023-05-20 23:57:48 -04:00
six_clear_bitmask ( lock , SIX_STATE_WAITING_READ < < lock_type ) ;
2022-08-26 19:22:24 -04:00
unlock :
raw_spin_unlock ( & lock - > wait_lock ) ;
if ( ret < 0 ) {
lock_type = - ret - 1 ;
goto again ;
}
}
2023-05-20 21:44:30 -04:00
__always_inline
2023-05-20 23:57:48 -04:00
static void six_lock_wakeup ( struct six_lock * lock , u64 state ,
2023-05-20 21:44:30 -04:00
enum six_lock_type lock_type )
2022-08-26 19:22:24 -04:00
{
2023-05-20 23:57:48 -04:00
if ( lock_type = = SIX_LOCK_write & & ( state & SIX_LOCK_HELD_read ) )
2022-08-26 19:22:24 -04:00
return ;
2023-05-20 23:57:48 -04:00
if ( ! ( state & ( SIX_STATE_WAITING_READ < < lock_type ) ) )
2022-08-26 19:22:24 -04:00
return ;
__six_lock_wakeup ( lock , lock_type ) ;
}
2023-05-20 21:44:30 -04:00
__always_inline
2022-08-26 19:22:24 -04:00
static bool do_six_trylock_type ( struct six_lock * lock ,
enum six_lock_type type ,
bool try )
{
int ret ;
ret = __do_six_trylock_type ( lock , type , current , try ) ;
if ( ret < 0 )
__six_lock_wakeup ( lock , - ret - 1 ) ;
return ret > 0 ;
}
2023-05-20 21:44:30 -04:00
bool six_trylock_ip_type ( struct six_lock * lock , enum six_lock_type type ,
unsigned long ip )
2017-03-16 22:18:50 -08:00
{
if ( ! do_six_trylock_type ( lock , type , true ) )
return false ;
if ( type ! = SIX_LOCK_write )
2023-02-04 19:38:43 -05:00
six_acquire ( & lock - > dep_map , 1 , type = = SIX_LOCK_read , ip ) ;
2017-03-16 22:18:50 -08:00
return true ;
}
2023-05-20 21:44:30 -04:00
bool six_relock_ip_type ( struct six_lock * lock , enum six_lock_type type ,
unsigned seq , unsigned long ip )
2017-03-16 22:18:50 -08:00
{
const struct six_lock_vals l [ ] = LOCK_VALS ;
2023-05-20 23:57:48 -04:00
u64 old , v ;
2017-03-16 22:18:50 -08:00
EBUG_ON ( type = = SIX_LOCK_write ) ;
if ( type = = SIX_LOCK_read & &
lock - > readers ) {
bool ret ;
preempt_disable ( ) ;
this_cpu_inc ( * lock - > readers ) ;
smp_mb ( ) ;
2023-05-20 23:57:48 -04:00
old = atomic64_read ( & lock - > state ) ;
ret = ! ( old & l [ type ] . lock_fail ) & & six_state_seq ( old ) = = seq ;
2017-03-16 22:18:50 -08:00
this_cpu_sub ( * lock - > readers , ! ret ) ;
preempt_enable ( ) ;
/*
* Similar to the lock path , we may have caused a spurious write
* lock fail and need to issue a wakeup :
*/
if ( ret )
2023-02-04 19:38:43 -05:00
six_acquire ( & lock - > dep_map , 1 , type = = SIX_LOCK_read , ip ) ;
2023-05-20 23:57:48 -04:00
else if ( old & SIX_STATE_WRITE_LOCKING )
2023-03-06 07:57:51 -05:00
six_lock_wakeup ( lock , old , SIX_LOCK_write ) ;
2017-03-16 22:18:50 -08:00
return ret ;
}
2023-05-20 23:57:48 -04:00
v = atomic64_read ( & lock - > state ) ;
2017-03-16 22:18:50 -08:00
do {
2023-05-20 23:57:48 -04:00
old = v ;
2017-03-16 22:18:50 -08:00
2023-05-20 23:57:48 -04:00
if ( ( old & l [ type ] . lock_fail ) | | six_state_seq ( old ) ! = seq )
2017-03-16 22:18:50 -08:00
return false ;
2023-05-20 23:57:48 -04:00
} while ( ( v = atomic64_cmpxchg_acquire ( & lock - > state ,
old ,
old + l [ type ] . lock_val ) ) ! = old ) ;
2017-03-16 22:18:50 -08:00
2022-08-26 19:22:24 -04:00
six_set_owner ( lock , type , old , current ) ;
2017-03-16 22:18:50 -08:00
if ( type ! = SIX_LOCK_write )
2023-02-04 19:38:43 -05:00
six_acquire ( & lock - > dep_map , 1 , type = = SIX_LOCK_read , ip ) ;
2017-03-16 22:18:50 -08:00
return true ;
}
2023-05-20 21:44:30 -04:00
EXPORT_SYMBOL_GPL ( six_relock_ip_type ) ;
2017-03-16 22:18:50 -08:00
# ifdef CONFIG_SIX_LOCK_SPIN_ON_OWNER
2023-02-05 14:09:30 -05:00
static inline bool six_can_spin_on_owner ( struct six_lock * lock )
2017-03-16 22:18:50 -08:00
{
struct task_struct * owner ;
2023-02-05 14:09:30 -05:00
bool ret ;
2017-03-16 22:18:50 -08:00
if ( need_resched ( ) )
2023-02-05 14:09:30 -05:00
return false ;
2017-03-16 22:18:50 -08:00
rcu_read_lock ( ) ;
owner = READ_ONCE ( lock - > owner ) ;
2023-02-05 14:09:30 -05:00
ret = ! owner | | owner_on_cpu ( owner ) ;
2017-03-16 22:18:50 -08:00
rcu_read_unlock ( ) ;
2023-02-05 14:09:30 -05:00
return ret ;
}
2017-03-16 22:18:50 -08:00
static inline bool six_spin_on_owner ( struct six_lock * lock ,
2023-02-05 14:09:30 -05:00
struct task_struct * owner ,
u64 end_time )
2017-03-16 22:18:50 -08:00
{
bool ret = true ;
2023-02-05 14:09:30 -05:00
unsigned loop = 0 ;
2017-03-16 22:18:50 -08:00
rcu_read_lock ( ) ;
while ( lock - > owner = = owner ) {
/*
* Ensure we emit the owner - > on_cpu , dereference _after_
* checking lock - > owner still matches owner . If that fails ,
* owner might point to freed memory . If it still matches ,
* the rcu_read_lock ( ) ensures the memory stays valid .
*/
barrier ( ) ;
2023-02-05 14:09:30 -05:00
if ( ! owner_on_cpu ( owner ) | | need_resched ( ) ) {
ret = false ;
break ;
}
if ( ! ( + + loop & 0xf ) & & ( time_after64 ( sched_clock ( ) , end_time ) ) ) {
2023-05-20 23:57:48 -04:00
six_set_bitmask ( lock , SIX_STATE_NOSPIN ) ;
2017-03-16 22:18:50 -08:00
ret = false ;
break ;
}
cpu_relax ( ) ;
}
rcu_read_unlock ( ) ;
return ret ;
}
static inline bool six_optimistic_spin ( struct six_lock * lock , enum six_lock_type type )
{
struct task_struct * task = current ;
2023-02-05 14:09:30 -05:00
u64 end_time ;
2017-03-16 22:18:50 -08:00
if ( type = = SIX_LOCK_write )
return false ;
preempt_disable ( ) ;
if ( ! six_can_spin_on_owner ( lock ) )
goto fail ;
if ( ! osq_lock ( & lock - > osq ) )
goto fail ;
2023-02-05 14:09:30 -05:00
end_time = sched_clock ( ) + 10 * NSEC_PER_USEC ;
2017-03-16 22:18:50 -08:00
while ( 1 ) {
struct task_struct * owner ;
/*
* If there ' s an owner , wait for it to either
* release the lock or go to sleep .
*/
owner = READ_ONCE ( lock - > owner ) ;
2023-02-05 14:09:30 -05:00
if ( owner & & ! six_spin_on_owner ( lock , owner , end_time ) )
2017-03-16 22:18:50 -08:00
break ;
if ( do_six_trylock_type ( lock , type , false ) ) {
osq_unlock ( & lock - > osq ) ;
preempt_enable ( ) ;
return true ;
}
/*
* When there ' s no owner , we might have preempted between the
* owner acquiring the lock and setting the owner field . If
* we ' re an RT task that will live - lock because we won ' t let
* the owner complete .
*/
if ( ! owner & & ( need_resched ( ) | | rt_task ( task ) ) )
break ;
/*
* The cpu_relax ( ) call is a compiler barrier which forces
* everything in this loop to be re - loaded . We don ' t need
* memory barriers as we ' ll eventually observe the right
* values at the cost of a few extra spins .
*/
cpu_relax ( ) ;
}
osq_unlock ( & lock - > osq ) ;
fail :
preempt_enable ( ) ;
/*
* If we fell out of the spin path because of need_resched ( ) ,
* reschedule now , before we try - lock again . This avoids getting
* scheduled out right after we obtained the lock .
*/
if ( need_resched ( ) )
schedule ( ) ;
return false ;
}
# else /* CONFIG_SIX_LOCK_SPIN_ON_OWNER */
static inline bool six_optimistic_spin ( struct six_lock * lock , enum six_lock_type type )
{
return false ;
}
# endif
noinline
static int __six_lock_type_slowpath ( struct six_lock * lock , enum six_lock_type type ,
2022-08-27 16:22:51 -04:00
struct six_lock_waiter * wait ,
2023-02-04 19:38:43 -05:00
six_lock_should_sleep_fn should_sleep_fn , void * p ,
unsigned long ip )
2017-03-16 22:18:50 -08:00
{
2023-05-20 23:57:48 -04:00
u64 old ;
2017-03-16 22:18:50 -08:00
int ret = 0 ;
if ( type = = SIX_LOCK_write ) {
2023-05-20 23:57:48 -04:00
EBUG_ON ( atomic64_read ( & lock - > state ) & SIX_STATE_WRITE_LOCKING ) ;
atomic64_add ( SIX_STATE_WRITE_LOCKING , & lock - > state ) ;
2017-03-16 22:18:50 -08:00
smp_mb__after_atomic ( ) ;
}
if ( six_optimistic_spin ( lock , type ) )
2022-08-26 19:22:24 -04:00
goto out ;
2017-03-16 22:18:50 -08:00
2023-02-04 19:38:43 -05:00
lock_contended ( & lock - > dep_map , ip ) ;
2017-03-16 22:18:50 -08:00
2022-08-27 16:22:51 -04:00
wait - > task = current ;
wait - > lock_want = type ;
2022-08-26 19:22:24 -04:00
wait - > lock_acquired = false ;
2022-08-25 10:49:52 -04:00
raw_spin_lock ( & lock - > wait_lock ) ;
2023-05-20 23:57:48 -04:00
six_set_bitmask ( lock , SIX_STATE_WAITING_READ < < type ) ;
2022-08-26 19:22:24 -04:00
/*
* Retry taking the lock after taking waitlist lock , have raced with an
* unlock :
*/
ret = __do_six_trylock_type ( lock , type , current , false ) ;
if ( ret < = 0 ) {
wait - > start_time = local_clock ( ) ;
2022-09-24 01:33:13 -04:00
2022-08-26 19:22:24 -04:00
if ( ! list_empty ( & lock - > wait_list ) ) {
struct six_lock_waiter * last =
list_last_entry ( & lock - > wait_list ,
struct six_lock_waiter , list ) ;
2022-09-24 01:33:13 -04:00
2022-08-26 19:22:24 -04:00
if ( time_before_eq64 ( wait - > start_time , last - > start_time ) )
wait - > start_time = last - > start_time + 1 ;
}
2022-09-24 01:33:13 -04:00
2022-08-26 19:22:24 -04:00
list_add_tail ( & wait - > list , & lock - > wait_list ) ;
}
2022-08-25 10:49:52 -04:00
raw_spin_unlock ( & lock - > wait_lock ) ;
2017-03-16 22:18:50 -08:00
2022-08-26 19:22:24 -04:00
if ( unlikely ( ret > 0 ) ) {
ret = 0 ;
goto out ;
}
if ( unlikely ( ret < 0 ) ) {
__six_lock_wakeup ( lock , - ret - 1 ) ;
ret = 0 ;
}
2017-03-16 22:18:50 -08:00
while ( 1 ) {
set_current_state ( TASK_UNINTERRUPTIBLE ) ;
2022-08-26 19:22:24 -04:00
if ( wait - > lock_acquired )
2017-03-16 22:18:50 -08:00
break ;
ret = should_sleep_fn ? should_sleep_fn ( lock , p ) : 0 ;
2022-08-26 19:22:24 -04:00
if ( unlikely ( ret ) ) {
raw_spin_lock ( & lock - > wait_lock ) ;
if ( ! wait - > lock_acquired )
list_del ( & wait - > list ) ;
raw_spin_unlock ( & lock - > wait_lock ) ;
if ( wait - > lock_acquired )
do_six_unlock_type ( lock , type ) ;
2017-03-16 22:18:50 -08:00
break ;
2022-08-26 19:22:24 -04:00
}
2017-03-16 22:18:50 -08:00
schedule ( ) ;
}
__set_current_state ( TASK_RUNNING ) ;
2022-08-26 19:22:24 -04:00
out :
2023-05-20 23:57:48 -04:00
if ( ret & & type = = SIX_LOCK_write ) {
six_clear_bitmask ( lock , SIX_STATE_WRITE_LOCKING ) ;
2017-03-16 22:18:50 -08:00
six_lock_wakeup ( lock , old , SIX_LOCK_read ) ;
}
return ret ;
}
2023-05-20 21:44:30 -04:00
int six_lock_type_ip_waiter ( struct six_lock * lock , enum six_lock_type type ,
struct six_lock_waiter * wait ,
six_lock_should_sleep_fn should_sleep_fn , void * p ,
unsigned long ip )
2017-03-16 22:18:50 -08:00
{
int ret ;
2022-09-24 01:33:13 -04:00
wait - > start_time = 0 ;
2017-03-16 22:18:50 -08:00
if ( type ! = SIX_LOCK_write )
2023-02-04 19:38:43 -05:00
six_acquire ( & lock - > dep_map , 0 , type = = SIX_LOCK_read , ip ) ;
2017-03-16 22:18:50 -08:00
ret = do_six_trylock_type ( lock , type , true ) ? 0
2023-02-04 19:38:43 -05:00
: __six_lock_type_slowpath ( lock , type , wait , should_sleep_fn , p , ip ) ;
2017-03-16 22:18:50 -08:00
if ( ret & & type ! = SIX_LOCK_write )
2023-02-04 19:38:43 -05:00
six_release ( & lock - > dep_map , ip ) ;
2017-03-16 22:18:50 -08:00
if ( ! ret )
2023-02-04 19:38:43 -05:00
lock_acquired ( & lock - > dep_map , ip ) ;
2017-03-16 22:18:50 -08:00
return ret ;
}
2023-05-20 21:44:30 -04:00
EXPORT_SYMBOL_GPL ( six_lock_type_ip_waiter ) ;
2017-03-16 22:18:50 -08:00
2022-08-27 16:22:51 -04:00
__always_inline
2022-08-26 19:22:24 -04:00
static void do_six_unlock_type ( struct six_lock * lock , enum six_lock_type type )
2017-03-16 22:18:50 -08:00
{
const struct six_lock_vals l [ ] = LOCK_VALS ;
2023-05-20 23:57:48 -04:00
u64 state ;
2017-03-16 22:18:50 -08:00
2022-08-26 19:22:24 -04:00
if ( type = = SIX_LOCK_intent )
2017-03-16 22:18:50 -08:00
lock - > owner = NULL ;
if ( type = = SIX_LOCK_read & &
lock - > readers ) {
smp_mb ( ) ; /* unlock barrier */
this_cpu_dec ( * lock - > readers ) ;
smp_mb ( ) ; /* between unlocking and checking for waiters */
2023-05-20 23:57:48 -04:00
state = atomic64_read ( & lock - > state ) ;
2017-03-16 22:18:50 -08:00
} else {
2023-02-05 14:09:30 -05:00
u64 v = l [ type ] . unlock_val ;
if ( type ! = SIX_LOCK_read )
2023-05-20 23:57:48 -04:00
v - = atomic64_read ( & lock - > state ) & SIX_STATE_NOSPIN ;
2023-02-05 14:09:30 -05:00
2023-05-20 23:57:48 -04:00
EBUG_ON ( ! ( atomic64_read ( & lock - > state ) & l [ type ] . held_mask ) ) ;
state = atomic64_add_return_release ( v , & lock - > state ) ;
2017-03-16 22:18:50 -08:00
}
six_lock_wakeup ( lock , state , l [ type ] . unlock_wakeup ) ;
}
2023-05-20 21:44:30 -04:00
void six_unlock_ip_type ( struct six_lock * lock , enum six_lock_type type , unsigned long ip )
2022-08-26 19:22:24 -04:00
{
EBUG_ON ( type = = SIX_LOCK_write & &
2023-05-20 23:57:48 -04:00
! ( atomic64_read ( & lock - > state ) & SIX_LOCK_HELD_intent ) ) ;
2022-08-26 19:22:24 -04:00
EBUG_ON ( ( type = = SIX_LOCK_write | |
type = = SIX_LOCK_intent ) & &
lock - > owner ! = current ) ;
if ( type ! = SIX_LOCK_write )
2023-02-04 19:38:43 -05:00
six_release ( & lock - > dep_map , ip ) ;
2022-08-26 19:22:24 -04:00
if ( type = = SIX_LOCK_intent & &
lock - > intent_lock_recurse ) {
- - lock - > intent_lock_recurse ;
return ;
}
do_six_unlock_type ( lock , type ) ;
}
2023-05-20 21:44:30 -04:00
EXPORT_SYMBOL_GPL ( six_unlock_ip_type ) ;
2017-03-16 22:18:50 -08:00
/* Convert from intent to read: */
void six_lock_downgrade ( struct six_lock * lock )
{
six_lock_increment ( lock , SIX_LOCK_read ) ;
six_unlock_intent ( lock ) ;
}
EXPORT_SYMBOL_GPL ( six_lock_downgrade ) ;
bool six_lock_tryupgrade ( struct six_lock * lock )
{
2023-05-20 23:57:48 -04:00
const struct six_lock_vals l [ ] = LOCK_VALS ;
u64 old , new , v = atomic64_read ( & lock - > state ) ;
2017-03-16 22:18:50 -08:00
do {
2023-05-20 23:57:48 -04:00
new = old = v ;
2017-03-16 22:18:50 -08:00
2023-05-20 23:57:48 -04:00
if ( new & SIX_LOCK_HELD_intent )
2017-03-16 22:18:50 -08:00
return false ;
if ( ! lock - > readers ) {
2023-05-20 23:57:48 -04:00
EBUG_ON ( ! ( new & SIX_LOCK_HELD_read ) ) ;
new + = l [ SIX_LOCK_read ] . unlock_val ;
2017-03-16 22:18:50 -08:00
}
2023-05-20 23:57:48 -04:00
new | = SIX_LOCK_HELD_intent ;
} while ( ( v = atomic64_cmpxchg_acquire ( & lock - > state , old , new ) ) ! = old ) ;
2017-03-16 22:18:50 -08:00
if ( lock - > readers )
this_cpu_dec ( * lock - > readers ) ;
2022-08-26 19:22:24 -04:00
six_set_owner ( lock , SIX_LOCK_intent , old , current ) ;
2017-03-16 22:18:50 -08:00
return true ;
}
EXPORT_SYMBOL_GPL ( six_lock_tryupgrade ) ;
bool six_trylock_convert ( struct six_lock * lock ,
enum six_lock_type from ,
enum six_lock_type to )
{
EBUG_ON ( to = = SIX_LOCK_write | | from = = SIX_LOCK_write ) ;
if ( to = = from )
return true ;
if ( to = = SIX_LOCK_read ) {
six_lock_downgrade ( lock ) ;
return true ;
} else {
return six_lock_tryupgrade ( lock ) ;
}
}
EXPORT_SYMBOL_GPL ( six_trylock_convert ) ;
/*
* Increment read / intent lock count , assuming we already have it read or intent
* locked :
*/
void six_lock_increment ( struct six_lock * lock , enum six_lock_type type )
{
const struct six_lock_vals l [ ] = LOCK_VALS ;
2023-02-04 19:38:43 -05:00
six_acquire ( & lock - > dep_map , 0 , type = = SIX_LOCK_read , _RET_IP_ ) ;
2017-03-16 22:18:50 -08:00
/* XXX: assert already locked, and that we don't overflow: */
switch ( type ) {
case SIX_LOCK_read :
if ( lock - > readers ) {
this_cpu_inc ( * lock - > readers ) ;
} else {
2023-05-20 23:57:48 -04:00
EBUG_ON ( ! ( atomic64_read ( & lock - > state ) &
( SIX_LOCK_HELD_read |
SIX_LOCK_HELD_intent ) ) ) ;
atomic64_add ( l [ type ] . lock_val , & lock - > state ) ;
2017-03-16 22:18:50 -08:00
}
break ;
case SIX_LOCK_intent :
2023-05-20 23:57:48 -04:00
EBUG_ON ( ! ( atomic64_read ( & lock - > state ) & SIX_LOCK_HELD_intent ) ) ;
2017-03-16 22:18:50 -08:00
lock - > intent_lock_recurse + + ;
break ;
case SIX_LOCK_write :
BUG ( ) ;
break ;
}
}
EXPORT_SYMBOL_GPL ( six_lock_increment ) ;
void six_lock_wakeup_all ( struct six_lock * lock )
{
2023-05-20 23:57:48 -04:00
u64 state = atomic64_read ( & lock - > state ) ;
2017-03-16 22:18:50 -08:00
struct six_lock_waiter * w ;
2022-08-26 19:22:24 -04:00
six_lock_wakeup ( lock , state , SIX_LOCK_read ) ;
six_lock_wakeup ( lock , state , SIX_LOCK_intent ) ;
six_lock_wakeup ( lock , state , SIX_LOCK_write ) ;
2017-03-16 22:18:50 -08:00
raw_spin_lock ( & lock - > wait_lock ) ;
2022-08-25 10:49:52 -04:00
list_for_each_entry ( w , & lock - > wait_list , list )
2017-03-16 22:18:50 -08:00
wake_up_process ( w - > task ) ;
raw_spin_unlock ( & lock - > wait_lock ) ;
}
EXPORT_SYMBOL_GPL ( six_lock_wakeup_all ) ;
/*
* Returns lock held counts , for both read and intent
*/
struct six_lock_count six_lock_counts ( struct six_lock * lock )
{
2022-08-21 23:08:53 -04:00
struct six_lock_count ret ;
2023-02-15 18:29:16 -05:00
ret . n [ SIX_LOCK_read ] = ! lock - > readers
2023-05-20 23:57:48 -04:00
? atomic64_read ( & lock - > state ) & SIX_STATE_READ_LOCK
2023-02-15 18:29:16 -05:00
: pcpu_read_count ( lock ) ;
2023-05-20 23:57:48 -04:00
ret . n [ SIX_LOCK_intent ] = ! ! ( atomic64_read ( & lock - > state ) & SIX_LOCK_HELD_intent ) +
lock - > intent_lock_recurse ;
ret . n [ SIX_LOCK_write ] = ! ! ( atomic64_read ( & lock - > state ) & SIX_LOCK_HELD_write ) ;
2017-03-16 22:18:50 -08:00
return ret ;
}
EXPORT_SYMBOL_GPL ( six_lock_counts ) ;
2023-05-20 20:40:08 -04:00
void six_lock_readers_add ( struct six_lock * lock , int nr )
{
if ( lock - > readers )
this_cpu_add ( * lock - > readers , nr ) ;
2023-05-20 23:57:48 -04:00
else /* reader count starts at bit 0 */
atomic64_add ( nr , & lock - > state ) ;
2023-05-20 20:40:08 -04:00
}
EXPORT_SYMBOL_GPL ( six_lock_readers_add ) ;
2023-05-20 20:57:55 -04:00
void six_lock_exit ( struct six_lock * lock )
{
WARN_ON ( lock - > readers & & pcpu_read_count ( lock ) ) ;
2023-05-20 23:57:48 -04:00
WARN_ON ( atomic64_read ( & lock - > state ) & SIX_LOCK_HELD_read ) ;
2023-05-20 20:57:55 -04:00
free_percpu ( lock - > readers ) ;
lock - > readers = NULL ;
}
EXPORT_SYMBOL_GPL ( six_lock_exit ) ;
void __six_lock_init ( struct six_lock * lock , const char * name ,
struct lock_class_key * key , enum six_lock_init_flags flags )
{
2023-05-20 23:57:48 -04:00
atomic64_set ( & lock - > state , 0 ) ;
2023-05-20 20:57:55 -04:00
raw_spin_lock_init ( & lock - > wait_lock ) ;
INIT_LIST_HEAD ( & lock - > wait_list ) ;
# ifdef CONFIG_DEBUG_LOCK_ALLOC
debug_check_no_locks_freed ( ( void * ) lock , sizeof ( * lock ) ) ;
lockdep_init_map ( & lock - > dep_map , name , key , 0 ) ;
# endif
if ( flags & SIX_LOCK_INIT_PCPU ) {
/*
* We don ' t return an error here on memory allocation failure
* since percpu is an optimization , and locks will work with the
* same semantics in non - percpu mode : callers can check for
* failure if they wish by checking lock - > readers , but generally
* will not want to treat it as an error .
*/
lock - > readers = alloc_percpu ( unsigned ) ;
}
}
EXPORT_SYMBOL_GPL ( __six_lock_init ) ;