2017-03-17 09:18:50 +03:00
/* SPDX-License-Identifier: GPL-2.0 */
# ifndef _BCACHEFS_BTREE_LOCKING_H
# define _BCACHEFS_BTREE_LOCKING_H
/*
* Only for internal btree use :
*
* The btree iterator tracks what locks it wants to take , and what locks it
* currently has - here we have wrappers for locking / unlocking btree nodes and
* updating the iterator state
*/
# include "btree_iter.h"
# include "six.h"
/* matches six lock types */
enum btree_node_locked_type {
BTREE_NODE_UNLOCKED = - 1 ,
BTREE_NODE_READ_LOCKED = SIX_LOCK_read ,
BTREE_NODE_INTENT_LOCKED = SIX_LOCK_intent ,
} ;
2021-08-30 22:18:31 +03:00
static inline int btree_node_locked_type ( struct btree_path * path ,
2017-03-17 09:18:50 +03:00
unsigned level )
{
/*
* We ' re relying on the fact that if nodes_intent_locked is set
* nodes_locked must be set as well , so that we can compute without
* branches :
*/
return BTREE_NODE_UNLOCKED +
2021-08-30 22:18:31 +03:00
( ( path - > nodes_locked > > level ) & 1 ) +
( ( path - > nodes_intent_locked > > level ) & 1 ) ;
2017-03-17 09:18:50 +03:00
}
2021-08-30 22:18:31 +03:00
static inline bool btree_node_intent_locked ( struct btree_path * path ,
2017-03-17 09:18:50 +03:00
unsigned level )
{
2021-08-30 22:18:31 +03:00
return btree_node_locked_type ( path , level ) = = BTREE_NODE_INTENT_LOCKED ;
2017-03-17 09:18:50 +03:00
}
2021-08-30 22:18:31 +03:00
static inline bool btree_node_read_locked ( struct btree_path * path ,
2017-03-17 09:18:50 +03:00
unsigned level )
{
2021-08-30 22:18:31 +03:00
return btree_node_locked_type ( path , level ) = = BTREE_NODE_READ_LOCKED ;
2017-03-17 09:18:50 +03:00
}
2021-08-30 22:18:31 +03:00
static inline bool btree_node_locked ( struct btree_path * path , unsigned level )
2017-03-17 09:18:50 +03:00
{
2021-08-30 22:18:31 +03:00
return path - > nodes_locked & ( 1 < < level ) ;
2017-03-17 09:18:50 +03:00
}
2021-08-30 22:18:31 +03:00
static inline void mark_btree_node_unlocked ( struct btree_path * path ,
2017-03-17 09:18:50 +03:00
unsigned level )
{
2021-08-30 22:18:31 +03:00
path - > nodes_locked & = ~ ( 1 < < level ) ;
path - > nodes_intent_locked & = ~ ( 1 < < level ) ;
2017-03-17 09:18:50 +03:00
}
2022-07-14 11:33:09 +03:00
static inline void mark_btree_node_locked_noreset ( struct btree_trans * trans ,
struct btree_path * path ,
2017-03-17 09:18:50 +03:00
unsigned level ,
enum six_lock_type type )
{
/* relying on this to avoid a branch */
BUILD_BUG_ON ( SIX_LOCK_read ! = 0 ) ;
BUILD_BUG_ON ( SIX_LOCK_intent ! = 1 ) ;
2021-08-30 22:18:31 +03:00
path - > nodes_locked | = 1 < < level ;
path - > nodes_intent_locked | = type < < level ;
2017-03-17 09:18:50 +03:00
}
2022-07-14 11:33:09 +03:00
static inline void mark_btree_node_locked ( struct btree_trans * trans ,
struct btree_path * path ,
unsigned level ,
enum six_lock_type type )
{
mark_btree_node_locked_noreset ( trans , path , level , type ) ;
# ifdef CONFIG_BCACHEFS_LOCK_TIME_STATS
path - > l [ level ] . lock_taken_time = ktime_get_ns ( ) ;
# endif
}
static inline void mark_btree_node_intent_locked ( struct btree_trans * trans ,
struct btree_path * path ,
2017-03-17 09:18:50 +03:00
unsigned level )
{
2022-07-14 11:33:09 +03:00
mark_btree_node_locked_noreset ( trans , path , level , SIX_LOCK_intent ) ;
2017-03-17 09:18:50 +03:00
}
2021-08-30 22:18:31 +03:00
static inline enum six_lock_type __btree_lock_want ( struct btree_path * path , int level )
2017-03-17 09:18:50 +03:00
{
2021-08-30 22:18:31 +03:00
return level < path - > locks_want
2017-03-17 09:18:50 +03:00
? SIX_LOCK_intent
: SIX_LOCK_read ;
}
static inline enum btree_node_locked_type
2021-08-30 22:18:31 +03:00
btree_lock_want ( struct btree_path * path , int level )
2017-03-17 09:18:50 +03:00
{
2021-08-30 22:18:31 +03:00
if ( level < path - > level )
2017-03-17 09:18:50 +03:00
return BTREE_NODE_UNLOCKED ;
2021-08-30 22:18:31 +03:00
if ( level < path - > locks_want )
2017-03-17 09:18:50 +03:00
return BTREE_NODE_INTENT_LOCKED ;
2021-08-30 22:18:31 +03:00
if ( level = = path - > level )
2017-03-17 09:18:50 +03:00
return BTREE_NODE_READ_LOCKED ;
return BTREE_NODE_UNLOCKED ;
}
2022-07-14 09:58:23 +03:00
static inline void btree_node_unlock ( struct btree_trans * trans ,
struct btree_path * path , unsigned level )
2017-03-17 09:18:50 +03:00
{
2021-08-30 22:18:31 +03:00
int lock_type = btree_node_locked_type ( path , level ) ;
2017-03-17 09:18:50 +03:00
EBUG_ON ( level > = BTREE_MAX_DEPTH ) ;
2022-07-14 11:33:09 +03:00
if ( lock_type ! = BTREE_NODE_UNLOCKED ) {
2021-08-30 22:18:31 +03:00
six_unlock_type ( & path - > l [ level ] . b - > c . lock , lock_type ) ;
2022-07-14 11:33:09 +03:00
# ifdef CONFIG_BCACHEFS_LOCK_TIME_STATS
if ( trans - > lock_name_idx < BCH_LOCK_TIME_NR ) {
struct bch_fs * c = trans - > c ;
__bch2_time_stats_update ( & c - > lock_held_stats . times [ trans - > lock_name_idx ] ,
path - > l [ level ] . lock_taken_time ,
ktime_get_ns ( ) ) ;
}
# endif
}
2021-08-30 22:18:31 +03:00
mark_btree_node_unlocked ( path , level ) ;
2017-03-17 09:18:50 +03:00
}
2022-07-14 09:58:23 +03:00
static inline void __bch2_btree_path_unlock ( struct btree_trans * trans ,
struct btree_path * path )
2017-03-17 09:18:50 +03:00
{
2021-08-30 22:18:31 +03:00
btree_path_set_dirty ( path , BTREE_ITER_NEED_RELOCK ) ;
2017-03-17 09:18:50 +03:00
2021-08-30 22:18:31 +03:00
while ( path - > nodes_locked )
2022-07-14 09:58:23 +03:00
btree_node_unlock ( trans , path , __ffs ( path - > nodes_locked ) ) ;
2017-03-17 09:18:50 +03:00
}
static inline enum bch_time_stats lock_to_time_stat ( enum six_lock_type type )
{
switch ( type ) {
case SIX_LOCK_read :
return BCH_TIME_btree_lock_contended_read ;
case SIX_LOCK_intent :
return BCH_TIME_btree_lock_contended_intent ;
case SIX_LOCK_write :
return BCH_TIME_btree_lock_contended_write ;
default :
BUG ( ) ;
}
}
2022-07-18 06:06:38 +03:00
static inline int btree_node_lock_type ( struct btree_trans * trans ,
2022-02-16 06:28:37 +03:00
struct btree_path * path ,
struct btree * b ,
struct bpos pos , unsigned level ,
enum six_lock_type type ,
six_lock_should_sleep_fn should_sleep_fn , void * p )
2017-03-17 09:18:50 +03:00
{
2022-02-16 06:28:37 +03:00
struct bch_fs * c = trans - > c ;
u64 start_time ;
2022-07-18 06:06:38 +03:00
int ret ;
2017-03-17 09:18:50 +03:00
2022-02-16 06:28:37 +03:00
if ( six_trylock_type ( & b - > c . lock , type ) )
2022-07-18 06:06:38 +03:00
return 0 ;
2017-03-17 09:18:50 +03:00
2022-02-16 06:28:37 +03:00
start_time = local_clock ( ) ;
trans - > locking_path_idx = path - > idx ;
trans - > locking_pos = pos ;
trans - > locking_btree_id = path - > btree_id ;
trans - > locking_level = level ;
trans - > locking_lock_type = type ;
2022-08-05 18:36:13 +03:00
trans - > locking = & b - > c ;
2022-07-18 06:06:38 +03:00
ret = six_lock_type ( & b - > c . lock , type , should_sleep_fn , p ) ;
2022-02-16 06:28:37 +03:00
trans - > locking = NULL ;
if ( ret )
2022-07-18 06:06:38 +03:00
return ret ;
2022-02-16 06:28:37 +03:00
2022-07-18 06:06:38 +03:00
bch2_time_stats_update ( & c - > times [ lock_to_time_stat ( type ) ] , start_time ) ;
return 0 ;
2017-03-17 09:18:50 +03:00
}
2018-07-24 23:42:27 +03:00
/*
* Lock a btree node if we already have it locked on one of our linked
* iterators :
*/
2020-06-12 21:58:07 +03:00
static inline bool btree_node_lock_increment ( struct btree_trans * trans ,
2018-07-24 23:42:27 +03:00
struct btree * b , unsigned level ,
enum btree_node_locked_type want )
{
2021-08-30 22:18:31 +03:00
struct btree_path * path ;
2018-07-24 23:42:27 +03:00
2021-08-30 22:18:31 +03:00
trans_for_each_path ( trans , path )
if ( path - > l [ level ] . b = = b & &
btree_node_locked_type ( path , level ) > = want ) {
2020-06-06 19:28:01 +03:00
six_lock_increment ( & b - > c . lock , want ) ;
2018-07-24 23:42:27 +03:00
return true ;
}
return false ;
}
2022-07-18 06:06:38 +03:00
int __bch2_btree_node_lock ( struct btree_trans * , struct btree_path * ,
struct btree * , struct bpos , unsigned ,
enum six_lock_type ,
six_lock_should_sleep_fn , void * ,
unsigned long ) ;
2017-03-17 09:18:50 +03:00
2022-07-18 06:06:38 +03:00
static inline int btree_node_lock ( struct btree_trans * trans ,
2021-08-30 22:18:31 +03:00
struct btree_path * path ,
2021-08-30 21:22:43 +03:00
struct btree * b , struct bpos pos , unsigned level ,
2020-06-13 05:29:48 +03:00
enum six_lock_type type ,
2020-10-28 21:17:46 +03:00
six_lock_should_sleep_fn should_sleep_fn , void * p ,
unsigned long ip )
2017-03-17 09:18:50 +03:00
{
2022-07-18 06:06:38 +03:00
int ret = 0 ;
2017-03-17 09:18:50 +03:00
EBUG_ON ( level > = BTREE_MAX_DEPTH ) ;
2021-08-30 22:18:31 +03:00
EBUG_ON ( ! ( trans - > paths_allocated & ( 1ULL < < path - > idx ) ) ) ;
2020-06-13 05:29:48 +03:00
2022-07-14 11:33:09 +03:00
if ( likely ( six_trylock_type ( & b - > c . lock , type ) ) | |
2022-07-18 06:06:38 +03:00
btree_node_lock_increment ( trans , b , level , type ) | |
! ( ret = __bch2_btree_node_lock ( trans , path , b , pos , level , type ,
should_sleep_fn , p , ip ) ) ) {
2022-07-14 11:33:09 +03:00
# ifdef CONFIG_BCACHEFS_LOCK_TIME_STATS
path - > l [ b - > c . level ] . lock_taken_time = ktime_get_ns ( ) ;
# endif
}
2022-07-18 06:06:38 +03:00
return ret ;
2017-03-17 09:18:50 +03:00
}
2021-08-30 22:18:31 +03:00
bool __bch2_btree_node_relock ( struct btree_trans * , struct btree_path * , unsigned ) ;
2017-03-17 09:18:50 +03:00
2021-08-30 21:22:43 +03:00
static inline bool bch2_btree_node_relock ( struct btree_trans * trans ,
2021-08-30 22:18:31 +03:00
struct btree_path * path , unsigned level )
2017-03-17 09:18:50 +03:00
{
2021-08-30 22:18:31 +03:00
EBUG_ON ( btree_node_locked ( path , level ) & &
btree_node_locked_type ( path , level ) ! =
__btree_lock_want ( path , level ) ) ;
2017-03-17 09:18:50 +03:00
2021-08-30 22:18:31 +03:00
return likely ( btree_node_locked ( path , level ) ) | |
__bch2_btree_node_relock ( trans , path , level ) ;
2017-03-17 09:18:50 +03:00
}
2019-10-29 02:35:13 +03:00
/*
* Updates the saved lock sequence number , so that bch2_btree_node_relock ( ) will
* succeed :
*/
static inline void
2021-08-30 22:18:31 +03:00
bch2_btree_node_unlock_write_inlined ( struct btree_trans * trans , struct btree_path * path ,
2021-08-25 04:30:06 +03:00
struct btree * b )
2019-10-29 02:35:13 +03:00
{
2021-08-30 22:18:31 +03:00
struct btree_path * linked ;
2019-10-29 02:35:13 +03:00
2021-08-30 22:18:31 +03:00
EBUG_ON ( path - > l [ b - > c . level ] . b ! = b ) ;
EBUG_ON ( path - > l [ b - > c . level ] . lock_seq + 1 ! = b - > c . lock . state . seq ) ;
2019-10-29 02:35:13 +03:00
2021-08-30 22:18:31 +03:00
trans_for_each_path_with_node ( trans , b , linked )
2019-10-29 02:35:13 +03:00
linked - > l [ b - > c . level ] . lock_seq + = 2 ;
six_unlock_write ( & b - > c . lock ) ;
}
2021-08-25 04:30:06 +03:00
void bch2_btree_node_unlock_write ( struct btree_trans * ,
2021-08-30 22:18:31 +03:00
struct btree_path * , struct btree * ) ;
2017-03-17 09:18:50 +03:00
2021-08-30 21:22:43 +03:00
void __bch2_btree_node_lock_write ( struct btree_trans * , struct btree * ) ;
2017-03-17 09:18:50 +03:00
2021-08-25 04:30:06 +03:00
static inline void bch2_btree_node_lock_write ( struct btree_trans * trans ,
2021-08-30 22:18:31 +03:00
struct btree_path * path ,
2021-08-25 04:30:06 +03:00
struct btree * b )
2017-03-17 09:18:50 +03:00
{
2021-08-30 22:18:31 +03:00
EBUG_ON ( path - > l [ b - > c . level ] . b ! = b ) ;
EBUG_ON ( path - > l [ b - > c . level ] . lock_seq ! = b - > c . lock . state . seq ) ;
EBUG_ON ( ! btree_node_intent_locked ( path , b - > c . level ) ) ;
2017-03-17 09:18:50 +03:00
2019-09-21 22:29:34 +03:00
if ( unlikely ( ! six_trylock_write ( & b - > c . lock ) ) )
2021-08-30 21:22:43 +03:00
__bch2_btree_node_lock_write ( trans , b ) ;
2017-03-17 09:18:50 +03:00
}
# endif /* _BCACHEFS_BTREE_LOCKING_H */