2017-03-17 09:18:50 +03:00
/* SPDX-License-Identifier: GPL-2.0 */
# ifndef _BCACHEFS_BTREE_LOCKING_H
# define _BCACHEFS_BTREE_LOCKING_H
/*
* Only for internal btree use :
*
* The btree iterator tracks what locks it wants to take , and what locks it
* currently has - here we have wrappers for locking / unlocking btree nodes and
* updating the iterator state
*/
# include "btree_iter.h"
# include "six.h"
2022-08-19 22:35:34 +03:00
extern struct lock_class_key bch2_btree_node_lock_key ;
2022-08-10 19:42:55 +03:00
static inline bool is_btree_node ( struct btree_path * path , unsigned l )
{
return l < BTREE_MAX_DEPTH & & ! IS_ERR_OR_NULL ( path - > l [ l ] . b ) ;
}
2022-08-20 02:50:18 +03:00
static inline struct btree_transaction_stats * btree_trans_stats ( struct btree_trans * trans )
{
return trans - > fn_idx < ARRAY_SIZE ( trans - > c - > btree_transaction_stats )
? & trans - > c - > btree_transaction_stats [ trans - > fn_idx ]
: NULL ;
}
2017-03-17 09:18:50 +03:00
/* matches six lock types */
enum btree_node_locked_type {
BTREE_NODE_UNLOCKED = - 1 ,
BTREE_NODE_READ_LOCKED = SIX_LOCK_read ,
BTREE_NODE_INTENT_LOCKED = SIX_LOCK_intent ,
2022-08-23 04:05:31 +03:00
BTREE_NODE_WRITE_LOCKED = SIX_LOCK_write ,
2017-03-17 09:18:50 +03:00
} ;
2021-08-30 22:18:31 +03:00
static inline int btree_node_locked_type ( struct btree_path * path ,
2017-03-17 09:18:50 +03:00
unsigned level )
{
2022-08-22 00:20:42 +03:00
return BTREE_NODE_UNLOCKED + ( ( path - > nodes_locked > > ( level < < 1 ) ) & 3 ) ;
2017-03-17 09:18:50 +03:00
}
2022-08-23 04:05:31 +03:00
static inline bool btree_node_write_locked ( struct btree_path * path , unsigned l )
{
return btree_node_locked_type ( path , l ) = = BTREE_NODE_WRITE_LOCKED ;
}
static inline bool btree_node_intent_locked ( struct btree_path * path , unsigned l )
2017-03-17 09:18:50 +03:00
{
2022-08-23 04:05:31 +03:00
return btree_node_locked_type ( path , l ) = = BTREE_NODE_INTENT_LOCKED ;
2017-03-17 09:18:50 +03:00
}
2022-08-23 04:05:31 +03:00
static inline bool btree_node_read_locked ( struct btree_path * path , unsigned l )
2017-03-17 09:18:50 +03:00
{
2022-08-23 04:05:31 +03:00
return btree_node_locked_type ( path , l ) = = BTREE_NODE_READ_LOCKED ;
2017-03-17 09:18:50 +03:00
}
2021-08-30 22:18:31 +03:00
static inline bool btree_node_locked ( struct btree_path * path , unsigned level )
2017-03-17 09:18:50 +03:00
{
2022-08-22 01:17:51 +03:00
return btree_node_locked_type ( path , level ) ! = BTREE_NODE_UNLOCKED ;
2017-03-17 09:18:50 +03:00
}
2022-08-22 01:17:51 +03:00
static inline void mark_btree_node_locked_noreset ( struct btree_path * path ,
2022-08-22 00:20:42 +03:00
unsigned level ,
enum btree_node_locked_type type )
2017-03-17 09:18:50 +03:00
{
/* relying on this to avoid a branch */
BUILD_BUG_ON ( SIX_LOCK_read ! = 0 ) ;
BUILD_BUG_ON ( SIX_LOCK_intent ! = 1 ) ;
2022-08-22 00:20:42 +03:00
path - > nodes_locked & = ~ ( 3U < < ( level < < 1 ) ) ;
path - > nodes_locked | = ( type + 1 ) < < ( level < < 1 ) ;
2022-08-22 01:17:51 +03:00
}
static inline void mark_btree_node_unlocked ( struct btree_path * path ,
unsigned level )
{
2022-08-23 04:05:31 +03:00
EBUG_ON ( btree_node_write_locked ( path , level ) ) ;
2022-08-22 01:17:51 +03:00
mark_btree_node_locked_noreset ( path , level , BTREE_NODE_UNLOCKED ) ;
2017-03-17 09:18:50 +03:00
}
2022-07-14 11:33:09 +03:00
static inline void mark_btree_node_locked ( struct btree_trans * trans ,
struct btree_path * path ,
unsigned level ,
enum six_lock_type type )
{
2022-08-22 01:17:51 +03:00
mark_btree_node_locked_noreset ( path , level , type ) ;
2022-07-14 11:33:09 +03:00
# ifdef CONFIG_BCACHEFS_LOCK_TIME_STATS
path - > l [ level ] . lock_taken_time = ktime_get_ns ( ) ;
# endif
}
2021-08-30 22:18:31 +03:00
static inline enum six_lock_type __btree_lock_want ( struct btree_path * path , int level )
2017-03-17 09:18:50 +03:00
{
2021-08-30 22:18:31 +03:00
return level < path - > locks_want
2017-03-17 09:18:50 +03:00
? SIX_LOCK_intent
: SIX_LOCK_read ;
}
static inline enum btree_node_locked_type
2021-08-30 22:18:31 +03:00
btree_lock_want ( struct btree_path * path , int level )
2017-03-17 09:18:50 +03:00
{
2021-08-30 22:18:31 +03:00
if ( level < path - > level )
2017-03-17 09:18:50 +03:00
return BTREE_NODE_UNLOCKED ;
2021-08-30 22:18:31 +03:00
if ( level < path - > locks_want )
2017-03-17 09:18:50 +03:00
return BTREE_NODE_INTENT_LOCKED ;
2021-08-30 22:18:31 +03:00
if ( level = = path - > level )
2017-03-17 09:18:50 +03:00
return BTREE_NODE_READ_LOCKED ;
return BTREE_NODE_UNLOCKED ;
}
2022-08-12 02:36:24 +03:00
static void btree_trans_lock_hold_time_update ( struct btree_trans * trans ,
struct btree_path * path , unsigned level )
{
# ifdef CONFIG_BCACHEFS_LOCK_TIME_STATS
struct btree_transaction_stats * s = btree_trans_stats ( trans ) ;
if ( s )
__bch2_time_stats_update ( & s - > lock_hold_times ,
path - > l [ level ] . lock_taken_time ,
ktime_get_ns ( ) ) ;
# endif
}
2022-08-20 02:50:18 +03:00
/* unlock: */
2022-07-14 09:58:23 +03:00
static inline void btree_node_unlock ( struct btree_trans * trans ,
struct btree_path * path , unsigned level )
2017-03-17 09:18:50 +03:00
{
2021-08-30 22:18:31 +03:00
int lock_type = btree_node_locked_type ( path , level ) ;
2017-03-17 09:18:50 +03:00
EBUG_ON ( level > = BTREE_MAX_DEPTH ) ;
2022-07-14 11:33:09 +03:00
if ( lock_type ! = BTREE_NODE_UNLOCKED ) {
2021-08-30 22:18:31 +03:00
six_unlock_type ( & path - > l [ level ] . b - > c . lock , lock_type ) ;
2022-08-12 02:36:24 +03:00
btree_trans_lock_hold_time_update ( trans , path , level ) ;
2022-07-14 11:33:09 +03:00
}
2021-08-30 22:18:31 +03:00
mark_btree_node_unlocked ( path , level ) ;
2017-03-17 09:18:50 +03:00
}
2022-08-22 01:17:51 +03:00
static inline int btree_path_lowest_level_locked ( struct btree_path * path )
{
2022-08-22 00:20:42 +03:00
return __ffs ( path - > nodes_locked ) > > 1 ;
2022-08-22 01:17:51 +03:00
}
static inline int btree_path_highest_level_locked ( struct btree_path * path )
{
2022-08-22 00:20:42 +03:00
return __fls ( path - > nodes_locked ) > > 1 ;
2022-08-22 01:17:51 +03:00
}
2022-07-14 09:58:23 +03:00
static inline void __bch2_btree_path_unlock ( struct btree_trans * trans ,
struct btree_path * path )
2017-03-17 09:18:50 +03:00
{
2021-08-30 22:18:31 +03:00
btree_path_set_dirty ( path , BTREE_ITER_NEED_RELOCK ) ;
2017-03-17 09:18:50 +03:00
2021-08-30 22:18:31 +03:00
while ( path - > nodes_locked )
2022-08-22 01:17:51 +03:00
btree_node_unlock ( trans , path , btree_path_lowest_level_locked ( path ) ) ;
2017-03-17 09:18:50 +03:00
}
2022-08-20 02:50:18 +03:00
/*
* Updates the saved lock sequence number , so that bch2_btree_node_relock ( ) will
* succeed :
*/
static inline void
bch2_btree_node_unlock_write_inlined ( struct btree_trans * trans , struct btree_path * path ,
struct btree * b )
2017-03-17 09:18:50 +03:00
{
2022-08-20 02:50:18 +03:00
struct btree_path * linked ;
EBUG_ON ( path - > l [ b - > c . level ] . b ! = b ) ;
EBUG_ON ( path - > l [ b - > c . level ] . lock_seq + 1 ! = b - > c . lock . state . seq ) ;
2022-08-23 04:05:31 +03:00
EBUG_ON ( btree_node_locked_type ( path , b - > c . level ) ! = SIX_LOCK_write ) ;
mark_btree_node_locked_noreset ( path , b - > c . level , SIX_LOCK_intent ) ;
2022-08-20 02:50:18 +03:00
trans_for_each_path_with_node ( trans , b , linked )
linked - > l [ b - > c . level ] . lock_seq + = 2 ;
six_unlock_write ( & b - > c . lock ) ;
2017-03-17 09:18:50 +03:00
}
2022-08-20 02:50:18 +03:00
void bch2_btree_node_unlock_write ( struct btree_trans * ,
struct btree_path * , struct btree * ) ;
/* lock: */
2022-08-21 21:29:43 +03:00
static inline int __must_check
btree_node_lock_nopath ( struct btree_trans * trans ,
struct btree_bkey_cached_common * b ,
enum six_lock_type type )
{
six_lock_type ( & b - > lock , type , NULL , NULL ) ;
return 0 ;
}
static inline void btree_node_lock_nopath_nofail ( struct btree_trans * trans ,
struct btree_bkey_cached_common * b ,
enum six_lock_type type )
{
int ret = btree_node_lock_nopath ( trans , b , type ) ;
BUG_ON ( ret ) ;
}
2022-07-18 06:06:38 +03:00
static inline int btree_node_lock_type ( struct btree_trans * trans ,
2022-02-16 06:28:37 +03:00
struct btree_path * path ,
2022-08-22 20:21:10 +03:00
struct btree_bkey_cached_common * b ,
2022-02-16 06:28:37 +03:00
struct bpos pos , unsigned level ,
enum six_lock_type type ,
six_lock_should_sleep_fn should_sleep_fn , void * p )
2017-03-17 09:18:50 +03:00
{
2022-07-18 06:06:38 +03:00
int ret ;
2017-03-17 09:18:50 +03:00
2022-08-22 20:21:10 +03:00
if ( six_trylock_type ( & b - > lock , type ) )
2022-07-18 06:06:38 +03:00
return 0 ;
2017-03-17 09:18:50 +03:00
2022-02-16 06:28:37 +03:00
trans - > locking_path_idx = path - > idx ;
trans - > locking_pos = pos ;
trans - > locking_btree_id = path - > btree_id ;
trans - > locking_level = level ;
trans - > locking_lock_type = type ;
2022-08-22 20:21:10 +03:00
trans - > locking = b ;
ret = six_lock_type ( & b - > lock , type , should_sleep_fn , p ) ;
2022-02-16 06:28:37 +03:00
trans - > locking = NULL ;
2022-08-28 00:47:27 +03:00
return ret ;
2017-03-17 09:18:50 +03:00
}
2018-07-24 23:42:27 +03:00
/*
* Lock a btree node if we already have it locked on one of our linked
* iterators :
*/
2020-06-12 21:58:07 +03:00
static inline bool btree_node_lock_increment ( struct btree_trans * trans ,
2022-08-22 20:21:10 +03:00
struct btree_bkey_cached_common * b ,
unsigned level ,
2018-07-24 23:42:27 +03:00
enum btree_node_locked_type want )
{
2021-08-30 22:18:31 +03:00
struct btree_path * path ;
2018-07-24 23:42:27 +03:00
2021-08-30 22:18:31 +03:00
trans_for_each_path ( trans , path )
2022-08-22 20:21:10 +03:00
if ( & path - > l [ level ] . b - > c = = b & &
2021-08-30 22:18:31 +03:00
btree_node_locked_type ( path , level ) > = want ) {
2022-08-22 20:21:10 +03:00
six_lock_increment ( & b - > lock , want ) ;
2018-07-24 23:42:27 +03:00
return true ;
}
return false ;
}
2022-07-18 06:06:38 +03:00
int __bch2_btree_node_lock ( struct btree_trans * , struct btree_path * ,
2022-08-22 20:21:10 +03:00
struct btree_bkey_cached_common * ,
struct bpos , unsigned ,
2022-07-18 06:06:38 +03:00
enum six_lock_type ,
six_lock_should_sleep_fn , void * ,
unsigned long ) ;
2017-03-17 09:18:50 +03:00
2022-07-18 06:06:38 +03:00
static inline int btree_node_lock ( struct btree_trans * trans ,
2021-08-30 22:18:31 +03:00
struct btree_path * path ,
2022-08-22 20:21:10 +03:00
struct btree_bkey_cached_common * b ,
struct bpos pos , unsigned level ,
2020-06-13 05:29:48 +03:00
enum six_lock_type type ,
2020-10-28 21:17:46 +03:00
six_lock_should_sleep_fn should_sleep_fn , void * p ,
unsigned long ip )
2017-03-17 09:18:50 +03:00
{
2022-07-18 06:06:38 +03:00
int ret = 0 ;
2017-03-17 09:18:50 +03:00
EBUG_ON ( level > = BTREE_MAX_DEPTH ) ;
2021-08-30 22:18:31 +03:00
EBUG_ON ( ! ( trans - > paths_allocated & ( 1ULL < < path - > idx ) ) ) ;
2020-06-13 05:29:48 +03:00
2022-08-22 20:21:10 +03:00
if ( likely ( six_trylock_type ( & b - > lock , type ) ) | |
2022-07-18 06:06:38 +03:00
btree_node_lock_increment ( trans , b , level , type ) | |
! ( ret = __bch2_btree_node_lock ( trans , path , b , pos , level , type ,
should_sleep_fn , p , ip ) ) ) {
2022-07-14 11:33:09 +03:00
# ifdef CONFIG_BCACHEFS_LOCK_TIME_STATS
2022-08-22 20:21:10 +03:00
path - > l [ b - > level ] . lock_taken_time = ktime_get_ns ( ) ;
2022-07-14 11:33:09 +03:00
# endif
}
2022-07-18 06:06:38 +03:00
return ret ;
2017-03-17 09:18:50 +03:00
}
2021-08-30 21:22:43 +03:00
void __bch2_btree_node_lock_write ( struct btree_trans * , struct btree * ) ;
2017-03-17 09:18:50 +03:00
2021-08-25 04:30:06 +03:00
static inline void bch2_btree_node_lock_write ( struct btree_trans * trans ,
2021-08-30 22:18:31 +03:00
struct btree_path * path ,
2021-08-25 04:30:06 +03:00
struct btree * b )
2017-03-17 09:18:50 +03:00
{
2021-08-30 22:18:31 +03:00
EBUG_ON ( path - > l [ b - > c . level ] . b ! = b ) ;
EBUG_ON ( path - > l [ b - > c . level ] . lock_seq ! = b - > c . lock . state . seq ) ;
EBUG_ON ( ! btree_node_intent_locked ( path , b - > c . level ) ) ;
2017-03-17 09:18:50 +03:00
2022-08-26 21:55:00 +03:00
/*
* six locks are unfair , and read locks block while a thread wants a
* write lock : thus , we need to tell the cycle detector we have a write
* lock _before_ taking the lock :
*/
mark_btree_node_locked_noreset ( path , b - > c . level , SIX_LOCK_write ) ;
2019-09-21 22:29:34 +03:00
if ( unlikely ( ! six_trylock_write ( & b - > c . lock ) ) )
2021-08-30 21:22:43 +03:00
__bch2_btree_node_lock_write ( trans , b ) ;
2017-03-17 09:18:50 +03:00
}
2022-08-20 02:50:18 +03:00
/* relock: */
bool bch2_btree_path_relock_norestart ( struct btree_trans * ,
struct btree_path * , unsigned long ) ;
int __bch2_btree_path_relock ( struct btree_trans * ,
struct btree_path * , unsigned long ) ;
static inline int bch2_btree_path_relock ( struct btree_trans * trans ,
struct btree_path * path , unsigned long trace_ip )
{
return btree_node_locked ( path , path - > level )
? 0
: __bch2_btree_path_relock ( trans , path , trace_ip ) ;
}
bool __bch2_btree_node_relock ( struct btree_trans * , struct btree_path * , unsigned ) ;
static inline bool bch2_btree_node_relock ( struct btree_trans * trans ,
struct btree_path * path , unsigned level )
{
EBUG_ON ( btree_node_locked ( path , level ) & &
2022-08-23 04:05:31 +03:00
! btree_node_write_locked ( path , level ) & &
btree_node_locked_type ( path , level ) ! = __btree_lock_want ( path , level ) ) ;
2022-08-20 02:50:18 +03:00
return likely ( btree_node_locked ( path , level ) ) | |
2022-08-27 19:28:09 +03:00
( ! IS_ERR_OR_NULL ( path - > l [ level ] . b ) & &
__bch2_btree_node_relock ( trans , path , level ) ) ;
2022-08-20 02:50:18 +03:00
}
/* upgrade */
2022-08-19 22:35:34 +03:00
bool bch2_btree_path_upgrade_noupgrade_sibs ( struct btree_trans * ,
struct btree_path * , unsigned ) ;
bool __bch2_btree_path_upgrade ( struct btree_trans * ,
struct btree_path * , unsigned ) ;
static inline bool bch2_btree_path_upgrade ( struct btree_trans * trans ,
struct btree_path * path ,
unsigned new_locks_want )
{
new_locks_want = min ( new_locks_want , BTREE_MAX_DEPTH ) ;
return path - > locks_want < new_locks_want
? __bch2_btree_path_upgrade ( trans , path , new_locks_want )
: path - > uptodate = = BTREE_ITER_UPTODATE ;
}
2022-08-20 02:50:18 +03:00
/* misc: */
2022-08-11 01:55:53 +03:00
static inline void btree_path_set_should_be_locked ( struct btree_path * path )
{
EBUG_ON ( ! btree_node_locked ( path , path - > level ) ) ;
EBUG_ON ( path - > uptodate ) ;
path - > should_be_locked = true ;
}
2022-08-11 02:08:30 +03:00
static inline void __btree_path_set_level_up ( struct btree_trans * trans ,
struct btree_path * path ,
unsigned l )
{
btree_node_unlock ( trans , path , l ) ;
path - > l [ l ] . b = ERR_PTR ( - BCH_ERR_no_btree_node_up ) ;
}
static inline void btree_path_set_level_up ( struct btree_trans * trans ,
struct btree_path * path )
{
__btree_path_set_level_up ( trans , path , path - > level + + ) ;
btree_path_set_dirty ( path , BTREE_ITER_NEED_TRAVERSE ) ;
}
2022-08-20 02:50:18 +03:00
/* debug */
2022-08-10 19:42:55 +03:00
struct six_lock_count bch2_btree_node_lock_counts ( struct btree_trans * ,
2022-08-22 20:21:10 +03:00
struct btree_path * ,
struct btree_bkey_cached_common * b ,
unsigned ) ;
2022-08-10 19:42:55 +03:00
2022-08-19 22:35:34 +03:00
# ifdef CONFIG_BCACHEFS_DEBUG
void bch2_btree_path_verify_locks ( struct btree_path * ) ;
void bch2_trans_verify_locks ( struct btree_trans * ) ;
# else
static inline void bch2_btree_path_verify_locks ( struct btree_path * path ) { }
static inline void bch2_trans_verify_locks ( struct btree_trans * trans ) { }
# endif
2017-03-17 09:18:50 +03:00
# endif /* _BCACHEFS_BTREE_LOCKING_H */