2017-03-16 22:18:50 -08:00
// SPDX-License-Identifier: GPL-2.0
# include "bcachefs.h"
# include "bkey_methods.h"
2020-12-17 15:08:58 -05:00
# include "bkey_buf.h"
2017-03-16 22:18:50 -08:00
# include "btree_cache.h"
# include "btree_iter.h"
2019-03-07 19:46:10 -05:00
# include "btree_key_cache.h"
2017-03-16 22:18:50 -08:00
# include "btree_locking.h"
2020-03-05 18:44:59 -05:00
# include "btree_update.h"
2017-03-16 22:18:50 -08:00
# include "debug.h"
2021-03-19 20:29:11 -04:00
# include "error.h"
2017-03-16 22:18:50 -08:00
# include "extents.h"
2019-03-07 19:46:10 -05:00
# include "journal.h"
2021-04-03 20:29:05 -04:00
# include "replicas.h"
2017-03-16 22:18:50 -08:00
# include "trace.h"
# include <linux/prefetch.h>
2021-08-30 15:18:31 -04:00
static inline void btree_path_list_remove ( struct btree_trans * , struct btree_path * ) ;
static inline void btree_path_list_add ( struct btree_trans * , struct btree_path * ,
struct btree_path * ) ;
static struct btree_path * btree_path_alloc ( struct btree_trans * , struct btree_path * ) ;
static inline int __btree_path_cmp ( const struct btree_path * l ,
enum btree_id r_btree_id ,
bool r_cached ,
struct bpos r_pos ,
unsigned r_level )
2021-06-12 15:45:45 -04:00
{
2021-08-30 15:18:31 -04:00
return cmp_int ( l - > btree_id , r_btree_id ) ? :
cmp_int ( l - > cached , r_cached ) ? :
bpos_cmp ( l - > pos , r_pos ) ? :
- cmp_int ( l - > level , r_level ) ;
}
static inline int btree_path_cmp ( const struct btree_path * l ,
const struct btree_path * r )
{
return __btree_path_cmp ( l , r - > btree_id , r - > cached , r - > pos , r - > level ) ;
2021-06-12 15:45:45 -04:00
}
bcachefs: Start using bpos.snapshot field
This patch starts treating the bpos.snapshot field like part of the key
in the btree code:
* bpos_successor() and bpos_predecessor() now include the snapshot field
* Keys in btrees that will be using snapshots (extents, inodes, dirents
and xattrs) now always have their snapshot field set to U32_MAX
The btree iterator code gets a new flag, BTREE_ITER_ALL_SNAPSHOTS, that
determines whether we're iterating over keys in all snapshots or not -
internally, this controlls whether bkey_(successor|predecessor)
increment/decrement the snapshot field, or only the higher bits of the
key.
We add a new member to struct btree_iter, iter->snapshot: when
BTREE_ITER_ALL_SNAPSHOTS is not set, iter->pos.snapshot should always
equal iter->snapshot, which will be 0 for btrees that don't use
snapshots, and alsways U32_MAX for btrees that will use snapshots
(until we enable snapshot creation).
This patch also introduces a new metadata version number, and compat
code for reading from/writing to older versions - this isn't a forced
upgrade (yet).
Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2021-03-24 18:02:16 -04:00
static inline struct bpos bkey_successor ( struct btree_iter * iter , struct bpos p )
{
/* Are we iterating over keys in all snapshots? */
if ( iter - > flags & BTREE_ITER_ALL_SNAPSHOTS ) {
p = bpos_successor ( p ) ;
} else {
p = bpos_nosnap_successor ( p ) ;
p . snapshot = iter - > snapshot ;
}
return p ;
}
static inline struct bpos bkey_predecessor ( struct btree_iter * iter , struct bpos p )
{
/* Are we iterating over keys in all snapshots? */
if ( iter - > flags & BTREE_ITER_ALL_SNAPSHOTS ) {
p = bpos_predecessor ( p ) ;
} else {
p = bpos_nosnap_predecessor ( p ) ;
p . snapshot = iter - > snapshot ;
}
return p ;
}
2021-08-30 15:18:31 -04:00
static inline bool is_btree_node ( struct btree_path * path , unsigned l )
2017-03-16 22:18:50 -08:00
{
return l < BTREE_MAX_DEPTH & &
2021-08-30 15:18:31 -04:00
( unsigned long ) path - > l [ l ] . b > = 128 ;
2017-03-16 22:18:50 -08:00
}
2020-01-06 22:25:09 -05:00
static inline struct bpos btree_iter_search_key ( struct btree_iter * iter )
2018-08-21 16:30:14 -04:00
{
2020-01-06 22:25:09 -05:00
struct bpos pos = iter - > pos ;
2018-08-21 17:38:41 -04:00
2020-01-06 22:25:09 -05:00
if ( ( iter - > flags & BTREE_ITER_IS_EXTENTS ) & &
bkey_cmp ( pos , POS_MAX ) )
bcachefs: Start using bpos.snapshot field
This patch starts treating the bpos.snapshot field like part of the key
in the btree code:
* bpos_successor() and bpos_predecessor() now include the snapshot field
* Keys in btrees that will be using snapshots (extents, inodes, dirents
and xattrs) now always have their snapshot field set to U32_MAX
The btree iterator code gets a new flag, BTREE_ITER_ALL_SNAPSHOTS, that
determines whether we're iterating over keys in all snapshots or not -
internally, this controlls whether bkey_(successor|predecessor)
increment/decrement the snapshot field, or only the higher bits of the
key.
We add a new member to struct btree_iter, iter->snapshot: when
BTREE_ITER_ALL_SNAPSHOTS is not set, iter->pos.snapshot should always
equal iter->snapshot, which will be 0 for btrees that don't use
snapshots, and alsways U32_MAX for btrees that will use snapshots
(until we enable snapshot creation).
This patch also introduces a new metadata version number, and compat
code for reading from/writing to older versions - this isn't a forced
upgrade (yet).
Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2021-03-24 18:02:16 -04:00
pos = bkey_successor ( iter , pos ) ;
2020-01-06 22:25:09 -05:00
return pos ;
2018-08-21 16:30:14 -04:00
}
2021-08-30 15:18:31 -04:00
static inline bool btree_path_pos_before_node ( struct btree_path * path ,
2020-03-02 13:38:19 -05:00
struct btree * b )
{
2021-08-30 15:18:31 -04:00
return bpos_cmp ( path - > pos , b - > data - > min_key ) < 0 ;
2020-03-02 13:38:19 -05:00
}
2021-08-30 15:18:31 -04:00
static inline bool btree_path_pos_after_node ( struct btree_path * path ,
2020-03-02 13:38:19 -05:00
struct btree * b )
{
2021-08-30 15:18:31 -04:00
return bpos_cmp ( b - > key . k . p , path - > pos ) < 0 ;
2020-03-02 13:38:19 -05:00
}
2021-08-30 15:18:31 -04:00
static inline bool btree_path_pos_in_node ( struct btree_path * path ,
2020-03-02 13:38:19 -05:00
struct btree * b )
{
2021-08-30 15:18:31 -04:00
return path - > btree_id = = b - > c . btree_id & &
! btree_path_pos_before_node ( path , b ) & &
! btree_path_pos_after_node ( path , b ) ;
2020-03-02 13:38:19 -05:00
}
2017-03-16 22:18:50 -08:00
/* Btree node locking: */
2021-08-24 21:30:06 -04:00
void bch2_btree_node_unlock_write ( struct btree_trans * trans ,
2021-08-30 15:18:31 -04:00
struct btree_path * path , struct btree * b )
2017-03-16 22:18:50 -08:00
{
2021-08-30 15:18:31 -04:00
bch2_btree_node_unlock_write_inlined ( trans , path , b ) ;
2017-03-16 22:18:50 -08:00
}
2021-08-30 14:22:43 -04:00
void __bch2_btree_node_lock_write ( struct btree_trans * trans , struct btree * b )
2017-03-16 22:18:50 -08:00
{
2021-08-30 15:18:31 -04:00
struct btree_path * linked ;
2017-03-16 22:18:50 -08:00
unsigned readers = 0 ;
2021-08-30 15:18:31 -04:00
trans_for_each_path ( trans , linked )
if ( linked - > l [ b - > c . level ] . b = = b & &
btree_node_read_locked ( linked , b - > c . level ) )
2017-03-16 22:18:50 -08:00
readers + + ;
/*
* Must drop our read locks before calling six_lock_write ( ) -
* six_unlock ( ) won ' t do wakeups until the reader count
* goes to 0 , and it ' s safe because we have the node intent
* locked :
*/
2021-03-23 23:52:27 -04:00
if ( ! b - > c . lock . readers )
atomic64_sub ( __SIX_VAL ( read_lock , readers ) ,
& b - > c . lock . state . counter ) ;
else
this_cpu_sub ( * b - > c . lock . readers , readers ) ;
2021-08-24 21:30:06 -04:00
btree_node_lock_type ( trans - > c , b , SIX_LOCK_write ) ;
2021-03-23 23:52:27 -04:00
if ( ! b - > c . lock . readers )
atomic64_add ( __SIX_VAL ( read_lock , readers ) ,
& b - > c . lock . state . counter ) ;
else
this_cpu_add ( * b - > c . lock . readers , readers ) ;
2017-03-16 22:18:50 -08:00
}
2021-08-30 14:22:43 -04:00
bool __bch2_btree_node_relock ( struct btree_trans * trans ,
2021-08-30 15:18:31 -04:00
struct btree_path * path , unsigned level )
2017-03-16 22:18:50 -08:00
{
2021-08-30 15:18:31 -04:00
struct btree * b = btree_path_node ( path , level ) ;
int want = __btree_lock_want ( path , level ) ;
2017-03-16 22:18:50 -08:00
2021-08-30 15:18:31 -04:00
if ( ! is_btree_node ( path , level ) )
2017-03-16 22:18:50 -08:00
return false ;
if ( race_fault ( ) )
return false ;
2021-08-30 15:18:31 -04:00
if ( six_relock_type ( & b - > c . lock , want , path - > l [ level ] . lock_seq ) | |
( btree_node_lock_seq_matches ( path , b , level ) & &
2021-08-30 14:22:43 -04:00
btree_node_lock_increment ( trans , b , level , want ) ) ) {
2021-09-03 17:18:57 -04:00
mark_btree_node_locked ( trans , path , level , want ) ;
2019-05-14 14:08:23 -04:00
return true ;
} else {
2017-03-16 22:18:50 -08:00
return false ;
2019-05-14 14:08:23 -04:00
}
2017-03-16 22:18:50 -08:00
}
2021-08-30 14:22:43 -04:00
static bool bch2_btree_node_upgrade ( struct btree_trans * trans ,
2021-08-30 15:18:31 -04:00
struct btree_path * path , unsigned level )
2017-03-16 22:18:50 -08:00
{
2021-08-30 15:18:31 -04:00
struct btree * b = path - > l [ level ] . b ;
2017-03-16 22:18:50 -08:00
2021-08-30 15:18:31 -04:00
EBUG_ON ( btree_lock_want ( path , level ) ! = BTREE_NODE_INTENT_LOCKED ) ;
2017-03-16 22:18:50 -08:00
2021-08-30 15:18:31 -04:00
if ( ! is_btree_node ( path , level ) )
2017-03-16 22:18:50 -08:00
return false ;
2021-08-30 15:18:31 -04:00
if ( btree_node_intent_locked ( path , level ) )
2017-03-16 22:18:50 -08:00
return true ;
if ( race_fault ( ) )
return false ;
2021-08-30 15:18:31 -04:00
if ( btree_node_locked ( path , level )
2020-06-06 12:28:01 -04:00
? six_lock_tryupgrade ( & b - > c . lock )
2021-08-30 15:18:31 -04:00
: six_relock_type ( & b - > c . lock , SIX_LOCK_intent , path - > l [ level ] . lock_seq ) )
2017-03-16 22:18:50 -08:00
goto success ;
2021-08-30 15:18:31 -04:00
if ( btree_node_lock_seq_matches ( path , b , level ) & &
2021-08-30 14:22:43 -04:00
btree_node_lock_increment ( trans , b , level , BTREE_NODE_INTENT_LOCKED ) ) {
2021-08-30 15:18:31 -04:00
btree_node_unlock ( path , level ) ;
2017-03-16 22:18:50 -08:00
goto success ;
}
return false ;
success :
2021-09-03 17:18:57 -04:00
mark_btree_node_intent_locked ( trans , path , level ) ;
2017-03-16 22:18:50 -08:00
return true ;
}
2021-08-30 15:18:31 -04:00
static inline bool btree_path_get_locks ( struct btree_trans * trans ,
struct btree_path * path ,
2021-08-24 21:30:06 -04:00
bool upgrade , unsigned long trace_ip )
2017-03-16 22:18:50 -08:00
{
2021-08-30 15:18:31 -04:00
unsigned l = path - > level ;
2017-03-16 22:18:50 -08:00
int fail_idx = - 1 ;
do {
2021-08-30 15:18:31 -04:00
if ( ! btree_path_node ( path , l ) )
2017-03-16 22:18:50 -08:00
break ;
if ( ! ( upgrade
2021-08-30 15:18:31 -04:00
? bch2_btree_node_upgrade ( trans , path , l )
: bch2_btree_node_relock ( trans , path , l ) ) ) {
2021-06-04 15:18:10 -04:00
( upgrade
? trace_node_upgrade_fail
2021-08-24 21:30:06 -04:00
: trace_node_relock_fail ) ( trans - > ip , trace_ip ,
2021-08-30 15:18:31 -04:00
path - > cached ,
path - > btree_id , & path - > pos ,
l , path - > l [ l ] . lock_seq ,
is_btree_node ( path , l )
2021-06-04 15:18:10 -04:00
? 0
2021-08-30 15:18:31 -04:00
: ( unsigned long ) path - > l [ l ] . b ,
is_btree_node ( path , l )
? path - > l [ l ] . b - > c . lock . state . seq
2021-06-04 15:18:10 -04:00
: 0 ) ;
2017-03-16 22:18:50 -08:00
fail_idx = l ;
}
l + + ;
2021-08-30 15:18:31 -04:00
} while ( l < path - > locks_want ) ;
2017-03-16 22:18:50 -08:00
/*
* When we fail to get a lock , we have to ensure that any child nodes
2021-08-30 15:18:31 -04:00
* can ' t be relocked so bch2_btree_path_traverse has to walk back up to
2017-03-16 22:18:50 -08:00
* the node that we failed to relock :
*/
2021-09-04 21:23:11 -04:00
if ( fail_idx > = 0 ) {
__bch2_btree_path_unlock ( path ) ;
btree_path_set_dirty ( path , BTREE_ITER_NEED_TRAVERSE ) ;
do {
path - > l [ fail_idx ] . b = BTREE_ITER_NO_NODE_GET_LOCKS ;
- - fail_idx ;
} while ( fail_idx > = 0 ) ;
2017-03-16 22:18:50 -08:00
}
2021-08-30 15:18:31 -04:00
if ( path - > uptodate = = BTREE_ITER_NEED_RELOCK )
path - > uptodate = BTREE_ITER_UPTODATE ;
2017-03-16 22:18:50 -08:00
2021-08-30 14:45:11 -04:00
bch2_trans_verify_locks ( trans ) ;
2019-03-27 22:03:30 -04:00
2021-08-30 15:18:31 -04:00
return path - > uptodate < BTREE_ITER_NEED_RELOCK ;
2017-03-16 22:18:50 -08:00
}
2020-06-15 19:53:46 -04:00
static struct bpos btree_node_pos ( struct btree_bkey_cached_common * _b ,
2021-08-30 15:54:41 -04:00
bool cached )
2020-06-15 19:53:46 -04:00
{
2021-08-30 15:54:41 -04:00
return ! cached
2020-06-15 19:53:46 -04:00
? container_of ( _b , struct btree , c ) - > key . k . p
: container_of ( _b , struct bkey_cached , c ) - > key . pos ;
}
2017-03-16 22:18:50 -08:00
/* Slowpath: */
2021-08-30 14:22:43 -04:00
bool __bch2_btree_node_lock ( struct btree_trans * trans ,
2021-08-30 15:18:31 -04:00
struct btree_path * path ,
struct btree * b ,
struct bpos pos , unsigned level ,
2020-06-12 22:29:48 -04:00
enum six_lock_type type ,
2020-10-28 14:17:46 -04:00
six_lock_should_sleep_fn should_sleep_fn , void * p ,
unsigned long ip )
2017-03-16 22:18:50 -08:00
{
2021-08-30 15:18:31 -04:00
struct btree_path * linked , * deadlock_path = NULL ;
2020-06-12 22:29:48 -04:00
u64 start_time = local_clock ( ) ;
2020-10-28 14:17:46 -04:00
unsigned reason = 9 ;
2021-03-31 14:42:36 -04:00
bool ret ;
2017-03-16 22:18:50 -08:00
2018-07-24 16:42:27 -04:00
/* Check if it's safe to block: */
2021-08-30 15:18:31 -04:00
trans_for_each_path ( trans , linked ) {
2017-03-16 22:18:50 -08:00
if ( ! linked - > nodes_locked )
continue ;
/*
* Can ' t block taking an intent lock if we have _any_ nodes read
* locked :
*
* - Our read lock blocks another thread with an intent lock on
* the same node from getting a write lock , and thus from
* dropping its intent lock
*
* - And the other thread may have multiple nodes intent locked :
* both the node we want to intent lock , and the node we
* already have read locked - deadlock :
*/
if ( type = = SIX_LOCK_intent & &
linked - > nodes_locked ! = linked - > nodes_intent_locked ) {
2021-08-30 15:18:31 -04:00
deadlock_path = linked ;
2021-04-14 13:26:15 -04:00
reason = 1 ;
2017-03-16 22:18:50 -08:00
}
2021-08-30 15:18:31 -04:00
if ( linked - > btree_id ! = path - > btree_id ) {
if ( linked - > btree_id > path - > btree_id ) {
deadlock_path = linked ;
2020-11-05 20:49:08 -05:00
reason = 3 ;
}
continue ;
}
/*
2021-08-30 15:18:31 -04:00
* Within the same btree , cached paths come before non
* cached paths :
2020-11-05 20:49:08 -05:00
*/
2021-08-30 15:18:31 -04:00
if ( linked - > cached ! = path - > cached ) {
if ( path - > cached ) {
deadlock_path = linked ;
2020-11-05 20:49:08 -05:00
reason = 4 ;
}
continue ;
}
2017-03-16 22:18:50 -08:00
/*
* Interior nodes must be locked before their descendants : if
2021-08-30 15:18:31 -04:00
* another path has possible descendants locked of the node
2017-03-16 22:18:50 -08:00
* we ' re about to lock , it must have the ancestors locked too :
*/
2020-11-05 20:49:08 -05:00
if ( level > __fls ( linked - > nodes_locked ) ) {
2021-08-30 15:18:31 -04:00
deadlock_path = linked ;
2021-04-14 13:26:15 -04:00
reason = 5 ;
2020-06-12 14:58:07 -04:00
}
/* Must lock btree nodes in key order: */
2020-11-05 20:49:08 -05:00
if ( btree_node_locked ( linked , level ) & &
2021-03-04 16:20:16 -05:00
bpos_cmp ( pos , btree_node_pos ( ( void * ) linked - > l [ level ] . b ,
2021-08-30 15:54:41 -04:00
linked - > cached ) ) < = 0 ) {
2021-08-30 15:18:31 -04:00
deadlock_path = linked ;
2020-11-05 20:49:08 -05:00
reason = 7 ;
2020-10-28 14:17:46 -04:00
}
2017-03-16 22:18:50 -08:00
}
2021-08-30 15:18:31 -04:00
if ( unlikely ( deadlock_path ) ) {
2021-07-25 17:19:52 -04:00
trace_trans_restart_would_deadlock ( trans - > ip , ip ,
2021-04-14 13:26:15 -04:00
trans - > in_traverse_all , reason ,
2021-08-30 15:18:31 -04:00
deadlock_path - > btree_id ,
deadlock_path - > cached ,
& deadlock_path - > pos ,
path - > btree_id ,
path - > cached ,
2021-04-14 13:26:15 -04:00
& pos ) ;
2021-07-25 17:19:52 -04:00
btree_trans_restart ( trans ) ;
2019-04-23 00:10:08 -04:00
return false ;
}
2018-07-12 23:30:45 -04:00
2020-06-12 22:29:48 -04:00
if ( six_trylock_type ( & b - > c . lock , type ) )
return true ;
2021-03-31 14:42:36 -04:00
# ifdef CONFIG_BCACHEFS_DEBUG
2021-08-30 15:18:31 -04:00
trans - > locking_path_idx = path - > idx ;
2021-03-31 14:42:36 -04:00
trans - > locking_pos = pos ;
2021-08-30 15:18:31 -04:00
trans - > locking_btree_id = path - > btree_id ;
2021-03-31 14:42:36 -04:00
trans - > locking_level = level ;
trans - > locking = b ;
# endif
2020-06-12 22:29:48 -04:00
2021-03-31 14:42:36 -04:00
ret = six_lock_type ( & b - > c . lock , type , should_sleep_fn , p ) = = 0 ;
# ifdef CONFIG_BCACHEFS_DEBUG
trans - > locking = NULL ;
# endif
if ( ret )
bch2_time_stats_update ( & trans - > c - > times [ lock_to_time_stat ( type ) ] ,
start_time ) ;
return ret ;
2017-03-16 22:18:50 -08:00
}
/* Btree iterator locking: */
# ifdef CONFIG_BCACHEFS_DEBUG
2021-08-30 15:18:31 -04:00
static void bch2_btree_path_verify_locks ( struct btree_path * path )
2017-03-16 22:18:50 -08:00
{
unsigned l ;
2021-09-04 21:23:11 -04:00
if ( ! path - > nodes_locked ) {
BUG_ON ( path - > uptodate = = BTREE_ITER_UPTODATE ) ;
return ;
}
2017-03-16 22:18:50 -08:00
2021-09-04 21:23:11 -04:00
for ( l = 0 ; btree_path_node ( path , l ) ; l + + )
2021-08-30 15:18:31 -04:00
BUG_ON ( btree_lock_want ( path , l ) ! =
btree_node_locked_type ( path , l ) ) ;
2017-03-16 22:18:50 -08:00
}
2018-11-23 05:19:25 -05:00
2021-08-30 14:45:11 -04:00
void bch2_trans_verify_locks ( struct btree_trans * trans )
2018-11-23 05:19:25 -05:00
{
2021-08-30 15:18:31 -04:00
struct btree_path * path ;
2018-11-23 05:19:25 -05:00
2021-08-30 15:18:31 -04:00
trans_for_each_path ( trans , path )
bch2_btree_path_verify_locks ( path ) ;
2018-11-23 05:19:25 -05:00
}
2020-03-15 16:15:08 -04:00
# else
2021-08-30 15:18:31 -04:00
static inline void bch2_btree_path_verify_locks ( struct btree_path * path ) { }
2017-03-16 22:18:50 -08:00
# endif
2021-08-30 15:18:31 -04:00
/* Btree path locking: */
2021-07-24 17:12:51 -04:00
/*
* Only for btree_cache . c - only relocks intent locks
*/
2021-08-30 15:18:31 -04:00
bool bch2_btree_path_relock_intent ( struct btree_trans * trans ,
struct btree_path * path )
2021-07-24 17:12:51 -04:00
{
unsigned l ;
2021-08-30 15:18:31 -04:00
for ( l = path - > level ;
l < path - > locks_want & & btree_path_node ( path , l ) ;
2021-07-24 17:12:51 -04:00
l + + ) {
2021-08-30 15:18:31 -04:00
if ( ! bch2_btree_node_relock ( trans , path , l ) ) {
2021-08-24 21:30:06 -04:00
trace_node_relock_fail ( trans - > ip , _RET_IP_ ,
2021-08-30 15:18:31 -04:00
path - > cached ,
path - > btree_id , & path - > pos ,
l , path - > l [ l ] . lock_seq ,
is_btree_node ( path , l )
2021-07-24 17:12:51 -04:00
? 0
2021-08-30 15:18:31 -04:00
: ( unsigned long ) path - > l [ l ] . b ,
is_btree_node ( path , l )
? path - > l [ l ] . b - > c . lock . state . seq
2021-07-24 17:12:51 -04:00
: 0 ) ;
2021-09-04 21:23:11 -04:00
__bch2_btree_path_unlock ( path ) ;
2021-08-30 15:18:31 -04:00
btree_path_set_dirty ( path , BTREE_ITER_NEED_TRAVERSE ) ;
2021-08-24 21:30:06 -04:00
btree_trans_restart ( trans ) ;
2021-07-24 17:12:51 -04:00
return false ;
}
}
return true ;
}
2017-03-16 22:18:50 -08:00
__flatten
2021-08-30 15:18:31 -04:00
static bool bch2_btree_path_relock ( struct btree_trans * trans ,
struct btree_path * path , unsigned long trace_ip )
2017-03-16 22:18:50 -08:00
{
2021-08-30 15:18:31 -04:00
bool ret = btree_path_get_locks ( trans , path , false , trace_ip ) ;
2021-07-25 17:19:52 -04:00
if ( ! ret )
2021-08-24 21:30:06 -04:00
btree_trans_restart ( trans ) ;
2021-07-25 17:19:52 -04:00
return ret ;
2017-03-16 22:18:50 -08:00
}
2021-08-30 15:18:31 -04:00
bool __bch2_btree_path_upgrade ( struct btree_trans * trans ,
struct btree_path * path ,
2017-03-16 22:18:50 -08:00
unsigned new_locks_want )
{
2021-08-30 15:18:31 -04:00
struct btree_path * linked ;
2021-04-16 14:29:26 -04:00
2021-08-30 15:18:31 -04:00
EBUG_ON ( path - > locks_want > = new_locks_want ) ;
2017-03-16 22:18:50 -08:00
2021-08-30 15:18:31 -04:00
path - > locks_want = new_locks_want ;
2017-03-16 22:18:50 -08:00
2021-08-30 15:18:31 -04:00
if ( btree_path_get_locks ( trans , path , true , _THIS_IP_ ) )
2021-04-16 14:29:26 -04:00
return true ;
/*
* XXX : this is ugly - we ' d prefer to not be mucking with other
* iterators in the btree_trans here .
*
* On failure to upgrade the iterator , setting iter - > locks_want and
2021-08-30 15:18:31 -04:00
* calling get_locks ( ) is sufficient to make bch2_btree_path_traverse ( )
2021-04-16 14:29:26 -04:00
* get the locks we want on transaction restart .
*
* But if this iterator was a clone , on transaction restart what we did
* to this iterator isn ' t going to be preserved .
*
* Possibly we could add an iterator field for the parent iterator when
* an iterator is a copy - for now , we ' ll just upgrade any other
* iterators with the same btree id .
*
* The code below used to be needed to ensure ancestor nodes get locked
* before interior nodes - now that ' s handled by
2021-08-30 15:18:31 -04:00
* bch2_btree_path_traverse_all ( ) .
2021-04-16 14:29:26 -04:00
*/
2021-08-30 15:18:31 -04:00
trans_for_each_path ( trans , linked )
if ( linked ! = path & &
linked - > cached = = path - > cached & &
linked - > btree_id = = path - > btree_id & &
2021-04-16 14:29:26 -04:00
linked - > locks_want < new_locks_want ) {
linked - > locks_want = new_locks_want ;
2021-08-30 15:18:31 -04:00
btree_path_get_locks ( trans , linked , true , _THIS_IP_ ) ;
2021-04-16 14:29:26 -04:00
}
return false ;
2017-03-16 22:18:50 -08:00
}
2021-08-30 15:18:31 -04:00
void __bch2_btree_path_downgrade ( struct btree_path * path ,
2021-04-02 21:29:05 -04:00
unsigned new_locks_want )
2017-03-16 22:18:50 -08:00
{
2021-04-02 21:29:05 -04:00
unsigned l ;
2017-03-16 22:18:50 -08:00
2021-08-30 15:18:31 -04:00
EBUG_ON ( path - > locks_want < new_locks_want ) ;
2017-03-16 22:18:50 -08:00
2021-08-30 15:18:31 -04:00
path - > locks_want = new_locks_want ;
2021-04-02 21:29:05 -04:00
2021-08-30 15:18:31 -04:00
while ( path - > nodes_locked & &
( l = __fls ( path - > nodes_locked ) ) > = path - > locks_want ) {
if ( l > path - > level ) {
btree_node_unlock ( path , l ) ;
2021-04-02 21:29:05 -04:00
} else {
2021-08-30 15:18:31 -04:00
if ( btree_node_intent_locked ( path , l ) ) {
six_lock_downgrade ( & path - > l [ l ] . b - > c . lock ) ;
path - > nodes_intent_locked ^ = 1 < < l ;
2017-03-16 22:18:50 -08:00
}
2021-04-02 21:29:05 -04:00
break ;
2017-03-16 22:18:50 -08:00
}
}
2018-11-23 05:19:25 -05:00
2021-08-30 15:18:31 -04:00
bch2_btree_path_verify_locks ( path ) ;
2017-03-16 22:18:50 -08:00
}
2020-06-08 13:26:48 -04:00
void bch2_trans_downgrade ( struct btree_trans * trans )
{
2021-08-30 15:18:31 -04:00
struct btree_path * path ;
2020-06-08 13:26:48 -04:00
2021-08-30 15:18:31 -04:00
trans_for_each_path ( trans , path )
bch2_btree_path_downgrade ( path ) ;
2020-06-08 13:26:48 -04:00
}
2019-05-15 09:47:40 -04:00
/* Btree transaction locking: */
bool bch2_trans_relock ( struct btree_trans * trans )
2019-03-27 22:03:30 -04:00
{
2021-08-30 15:18:31 -04:00
struct btree_path * path ;
2019-03-27 22:03:30 -04:00
2021-07-25 17:19:52 -04:00
if ( unlikely ( trans - > restarted ) )
return false ;
2021-08-30 15:18:31 -04:00
trans_for_each_path ( trans , path )
if ( path - > should_be_locked & &
! bch2_btree_path_relock ( trans , path , _RET_IP_ ) ) {
2021-06-04 15:18:10 -04:00
trace_trans_restart_relock ( trans - > ip , _RET_IP_ ,
2021-08-30 15:18:31 -04:00
path - > btree_id , & path - > pos ) ;
2021-07-25 17:19:52 -04:00
BUG_ON ( ! trans - > restarted ) ;
2021-03-30 20:35:46 -04:00
return false ;
2021-03-31 21:44:55 -04:00
}
2021-03-30 20:35:46 -04:00
return true ;
2019-03-27 22:03:30 -04:00
}
2019-05-15 09:47:40 -04:00
void bch2_trans_unlock ( struct btree_trans * trans )
2019-03-27 22:03:30 -04:00
{
2021-08-30 15:18:31 -04:00
struct btree_path * path ;
2019-03-27 22:03:30 -04:00
2021-08-30 15:18:31 -04:00
trans_for_each_path ( trans , path )
__bch2_btree_path_unlock ( path ) ;
2019-03-27 22:03:30 -04:00
}
2017-03-16 22:18:50 -08:00
/* Btree iterator: */
# ifdef CONFIG_BCACHEFS_DEBUG
2021-08-30 15:18:31 -04:00
static void bch2_btree_path_verify_cached ( struct btree_trans * trans ,
struct btree_path * path )
2020-06-15 19:53:46 -04:00
{
struct bkey_cached * ck ;
2021-08-30 15:18:31 -04:00
bool locked = btree_node_locked ( path , 0 ) ;
2020-06-15 19:53:46 -04:00
2021-08-30 15:18:31 -04:00
if ( ! bch2_btree_node_relock ( trans , path , 0 ) )
2020-06-15 19:53:46 -04:00
return ;
2021-08-30 15:18:31 -04:00
ck = ( void * ) path - > l [ 0 ] . b ;
BUG_ON ( ck - > key . btree_id ! = path - > btree_id | |
bkey_cmp ( ck - > key . pos , path - > pos ) ) ;
2020-06-15 19:53:46 -04:00
if ( ! locked )
2021-08-30 15:18:31 -04:00
btree_node_unlock ( path , 0 ) ;
2020-06-15 19:53:46 -04:00
}
2021-08-30 15:18:31 -04:00
static void bch2_btree_path_verify_level ( struct btree_trans * trans ,
struct btree_path * path , unsigned level )
2017-03-16 22:18:50 -08:00
{
2021-08-30 15:18:31 -04:00
struct btree_path_level * l ;
2021-03-20 22:13:30 -04:00
struct btree_node_iter tmp ;
bool locked ;
2020-02-18 16:17:55 -05:00
struct bkey_packed * p , * k ;
2021-03-04 15:20:22 -05:00
char buf1 [ 100 ] , buf2 [ 100 ] , buf3 [ 100 ] ;
2020-02-18 16:17:55 -05:00
const char * msg ;
2017-03-16 22:18:50 -08:00
2020-11-02 18:20:44 -05:00
if ( ! bch2_debug_check_iterators )
2019-03-28 01:51:47 -04:00
return ;
2021-08-30 15:18:31 -04:00
l = & path - > l [ level ] ;
2021-03-20 22:13:30 -04:00
tmp = l - > iter ;
2021-08-30 15:18:31 -04:00
locked = btree_node_locked ( path , level ) ;
2021-03-20 22:13:30 -04:00
2021-08-30 15:18:31 -04:00
if ( path - > cached ) {
2020-06-15 19:53:46 -04:00
if ( ! level )
2021-08-30 15:18:31 -04:00
bch2_btree_path_verify_cached ( trans , path ) ;
2020-06-15 19:53:46 -04:00
return ;
}
2021-08-30 15:18:31 -04:00
if ( ! btree_path_node ( path , level ) )
2020-02-18 16:17:55 -05:00
return ;
2021-08-30 15:18:31 -04:00
if ( ! bch2_btree_node_relock ( trans , path , level ) )
2016-07-21 19:05:06 -08:00
return ;
2021-08-30 15:18:31 -04:00
BUG_ON ( ! btree_path_pos_in_node ( path , l - > b ) ) ;
2020-02-18 16:17:55 -05:00
bch2_btree_node_iter_verify ( & l - > iter , l - > b ) ;
2017-03-16 22:18:50 -08:00
/*
2021-09-04 21:19:48 -04:00
* For interior nodes , the iterator will have skipped past deleted keys :
2017-03-16 22:18:50 -08:00
*/
2021-09-04 21:19:48 -04:00
p = level
2021-02-19 23:41:40 -05:00
? bch2_btree_node_iter_prev ( & tmp , l - > b )
2020-02-18 16:17:55 -05:00
: bch2_btree_node_iter_prev_all ( & tmp , l - > b ) ;
k = bch2_btree_node_iter_peek_all ( & l - > iter , l - > b ) ;
2017-03-16 22:18:50 -08:00
2021-08-30 15:18:31 -04:00
if ( p & & bkey_iter_pos_cmp ( l - > b , p , & path - > pos ) > = 0 ) {
2020-02-18 16:17:55 -05:00
msg = " before " ;
goto err ;
2017-03-16 22:18:50 -08:00
}
2021-08-30 15:18:31 -04:00
if ( k & & bkey_iter_pos_cmp ( l - > b , k , & path - > pos ) < 0 ) {
2020-02-18 16:17:55 -05:00
msg = " after " ;
goto err ;
}
2021-08-30 15:54:41 -04:00
2020-02-18 16:17:55 -05:00
if ( ! locked )
2021-08-30 15:18:31 -04:00
btree_node_unlock ( path , level ) ;
2020-02-18 16:17:55 -05:00
return ;
err :
strcpy ( buf2 , " (none) " ) ;
2021-03-04 15:20:22 -05:00
strcpy ( buf3 , " (none) " ) ;
2021-08-30 15:18:31 -04:00
bch2_bpos_to_text ( & PBUF ( buf1 ) , path - > pos ) ;
2020-02-18 16:17:55 -05:00
if ( p ) {
struct bkey uk = bkey_unpack_key ( l - > b , p ) ;
2021-03-04 15:20:22 -05:00
bch2_bkey_to_text ( & PBUF ( buf2 ) , & uk ) ;
2020-02-18 16:17:55 -05:00
}
2017-03-16 22:18:50 -08:00
2020-02-18 16:17:55 -05:00
if ( k ) {
struct bkey uk = bkey_unpack_key ( l - > b , k ) ;
2021-03-04 15:20:22 -05:00
bch2_bkey_to_text ( & PBUF ( buf3 ) , & uk ) ;
2017-03-16 22:18:50 -08:00
}
2020-02-18 16:17:55 -05:00
2021-08-30 15:18:31 -04:00
panic ( " path should be %s key at level %u: \n "
" path pos %s \n "
2020-02-18 16:17:55 -05:00
" prev key %s \n "
" cur key %s \n " ,
2021-03-04 15:20:22 -05:00
msg , level , buf1 , buf2 , buf3 ) ;
2017-03-16 22:18:50 -08:00
}
2021-08-30 15:18:31 -04:00
static void bch2_btree_path_verify ( struct btree_trans * trans ,
struct btree_path * path )
2017-03-16 22:18:50 -08:00
{
2021-07-14 15:13:27 -04:00
struct bch_fs * c = trans - > c ;
2020-02-18 16:17:55 -05:00
unsigned i ;
2017-03-16 22:18:50 -08:00
2021-08-30 15:18:31 -04:00
EBUG_ON ( path - > btree_id > = BTREE_ID_NR ) ;
for ( i = 0 ; i < ( ! path - > cached ? BTREE_MAX_DEPTH : 1 ) ; i + + ) {
if ( ! path - > l [ i ] . b ) {
BUG_ON ( c - > btree_roots [ path - > btree_id ] . b - > c . level > i ) ;
break ;
}
bch2_btree_path_verify_level ( trans , path , i ) ;
}
bch2_btree_path_verify_locks ( path ) ;
}
void bch2_trans_verify_paths ( struct btree_trans * trans )
{
struct btree_path * path ;
trans_for_each_path ( trans , path )
bch2_btree_path_verify ( trans , path ) ;
}
static void bch2_btree_iter_verify ( struct btree_iter * iter )
{
struct btree_trans * trans = iter - > trans ;
BUG_ON ( iter - > btree_id > = BTREE_ID_NR ) ;
BUG_ON ( ! ! ( iter - > flags & BTREE_ITER_CACHED ) ! = iter - > path - > cached ) ;
2021-02-11 21:57:32 -05:00
bcachefs: Start using bpos.snapshot field
This patch starts treating the bpos.snapshot field like part of the key
in the btree code:
* bpos_successor() and bpos_predecessor() now include the snapshot field
* Keys in btrees that will be using snapshots (extents, inodes, dirents
and xattrs) now always have their snapshot field set to U32_MAX
The btree iterator code gets a new flag, BTREE_ITER_ALL_SNAPSHOTS, that
determines whether we're iterating over keys in all snapshots or not -
internally, this controlls whether bkey_(successor|predecessor)
increment/decrement the snapshot field, or only the higher bits of the
key.
We add a new member to struct btree_iter, iter->snapshot: when
BTREE_ITER_ALL_SNAPSHOTS is not set, iter->pos.snapshot should always
equal iter->snapshot, which will be 0 for btrees that don't use
snapshots, and alsways U32_MAX for btrees that will use snapshots
(until we enable snapshot creation).
This patch also introduces a new metadata version number, and compat
code for reading from/writing to older versions - this isn't a forced
upgrade (yet).
Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2021-03-24 18:02:16 -04:00
BUG_ON ( ! ( iter - > flags & BTREE_ITER_ALL_SNAPSHOTS ) & &
iter - > pos . snapshot ! = iter - > snapshot ) ;
BUG_ON ( ( iter - > flags & BTREE_ITER_IS_EXTENTS ) & &
( iter - > flags & BTREE_ITER_ALL_SNAPSHOTS ) ) ;
2021-08-30 15:54:41 -04:00
BUG_ON ( ! ( iter - > flags & __BTREE_ITER_ALL_SNAPSHOTS ) & &
bcachefs: Start using bpos.snapshot field
This patch starts treating the bpos.snapshot field like part of the key
in the btree code:
* bpos_successor() and bpos_predecessor() now include the snapshot field
* Keys in btrees that will be using snapshots (extents, inodes, dirents
and xattrs) now always have their snapshot field set to U32_MAX
The btree iterator code gets a new flag, BTREE_ITER_ALL_SNAPSHOTS, that
determines whether we're iterating over keys in all snapshots or not -
internally, this controlls whether bkey_(successor|predecessor)
increment/decrement the snapshot field, or only the higher bits of the
key.
We add a new member to struct btree_iter, iter->snapshot: when
BTREE_ITER_ALL_SNAPSHOTS is not set, iter->pos.snapshot should always
equal iter->snapshot, which will be 0 for btrees that don't use
snapshots, and alsways U32_MAX for btrees that will use snapshots
(until we enable snapshot creation).
This patch also introduces a new metadata version number, and compat
code for reading from/writing to older versions - this isn't a forced
upgrade (yet).
Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2021-03-24 18:02:16 -04:00
( iter - > flags & BTREE_ITER_ALL_SNAPSHOTS ) & &
! btree_type_has_snapshots ( iter - > btree_id ) ) ;
2021-08-30 15:18:31 -04:00
bch2_btree_path_verify ( trans , iter - > path ) ;
2020-02-18 16:17:55 -05:00
}
2021-02-11 21:57:32 -05:00
static void bch2_btree_iter_verify_entry_exit ( struct btree_iter * iter )
{
bcachefs: Start using bpos.snapshot field
This patch starts treating the bpos.snapshot field like part of the key
in the btree code:
* bpos_successor() and bpos_predecessor() now include the snapshot field
* Keys in btrees that will be using snapshots (extents, inodes, dirents
and xattrs) now always have their snapshot field set to U32_MAX
The btree iterator code gets a new flag, BTREE_ITER_ALL_SNAPSHOTS, that
determines whether we're iterating over keys in all snapshots or not -
internally, this controlls whether bkey_(successor|predecessor)
increment/decrement the snapshot field, or only the higher bits of the
key.
We add a new member to struct btree_iter, iter->snapshot: when
BTREE_ITER_ALL_SNAPSHOTS is not set, iter->pos.snapshot should always
equal iter->snapshot, which will be 0 for btrees that don't use
snapshots, and alsways U32_MAX for btrees that will use snapshots
(until we enable snapshot creation).
This patch also introduces a new metadata version number, and compat
code for reading from/writing to older versions - this isn't a forced
upgrade (yet).
Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2021-03-24 18:02:16 -04:00
BUG_ON ( ! ( iter - > flags & BTREE_ITER_ALL_SNAPSHOTS ) & &
iter - > pos . snapshot ! = iter - > snapshot ) ;
2021-08-30 15:54:41 -04:00
BUG_ON ( bkey_cmp ( iter - > pos , bkey_start_pos ( & iter - > k ) ) < 0 | |
bkey_cmp ( iter - > pos , iter - > k . p ) > 0 ) ;
2021-02-11 21:57:32 -05:00
}
2016-07-21 19:05:06 -08:00
# else
2021-08-30 15:18:31 -04:00
static inline void bch2_btree_path_verify_level ( struct btree_trans * trans ,
struct btree_path * path , unsigned l ) { }
static inline void bch2_btree_path_verify ( struct btree_trans * trans ,
struct btree_path * path ) { }
2020-03-15 16:15:08 -04:00
static inline void bch2_btree_iter_verify ( struct btree_iter * iter ) { }
2021-02-11 21:57:32 -05:00
static inline void bch2_btree_iter_verify_entry_exit ( struct btree_iter * iter ) { }
2016-07-21 19:05:06 -08:00
2017-03-16 22:18:50 -08:00
# endif
2021-08-30 15:18:31 -04:00
/* Btree path: fixups after btree updates */
2019-08-20 17:43:47 -04:00
static void btree_node_iter_set_set_pos ( struct btree_node_iter * iter ,
struct btree * b ,
struct bset_tree * t ,
struct bkey_packed * k )
{
struct btree_node_iter_set * set ;
btree_node_iter_for_each ( iter , set )
if ( set - > end = = t - > end_offset ) {
set - > k = __btree_node_key_to_offset ( b , k ) ;
bch2_btree_node_iter_sort ( iter , b ) ;
return ;
}
bch2_btree_node_iter_push ( iter , b , k , btree_bkey_last ( b , t ) ) ;
}
2021-08-30 15:18:31 -04:00
static void __bch2_btree_path_fix_key_modified ( struct btree_path * path ,
2020-01-06 22:25:09 -05:00
struct btree * b ,
struct bkey_packed * where )
2019-10-02 09:56:39 -04:00
{
2021-08-30 15:18:31 -04:00
struct btree_path_level * l = & path - > l [ b - > c . level ] ;
2019-10-02 09:56:39 -04:00
2020-01-06 22:25:09 -05:00
if ( where ! = bch2_btree_node_iter_peek_all ( & l - > iter , l - > b ) )
return ;
2021-08-30 15:18:31 -04:00
if ( bkey_iter_pos_cmp ( l - > b , where , & path - > pos ) < 0 )
2020-01-06 22:25:09 -05:00
bch2_btree_node_iter_advance ( & l - > iter , l - > b ) ;
2019-10-02 09:56:39 -04:00
}
2021-08-30 15:18:31 -04:00
void bch2_btree_path_fix_key_modified ( struct btree_trans * trans ,
2019-10-02 09:56:39 -04:00
struct btree * b ,
struct bkey_packed * where )
{
2021-08-30 15:18:31 -04:00
struct btree_path * path ;
2019-10-02 09:56:39 -04:00
2021-08-30 15:18:31 -04:00
trans_for_each_path_with_node ( trans , b , path ) {
__bch2_btree_path_fix_key_modified ( path , b , where ) ;
bch2_btree_path_verify_level ( trans , path , b - > c . level ) ;
2019-10-02 09:56:39 -04:00
}
}
2021-08-30 15:18:31 -04:00
static void __bch2_btree_node_iter_fix ( struct btree_path * path ,
struct btree * b ,
struct btree_node_iter * node_iter ,
struct bset_tree * t ,
struct bkey_packed * where ,
unsigned clobber_u64s ,
unsigned new_u64s )
2017-03-16 22:18:50 -08:00
{
const struct bkey_packed * end = btree_bkey_last ( b , t ) ;
struct btree_node_iter_set * set ;
unsigned offset = __btree_node_key_to_offset ( b , where ) ;
int shift = new_u64s - clobber_u64s ;
2016-07-21 19:05:06 -08:00
unsigned old_end = t - > end_offset - shift ;
2019-09-07 19:17:40 -04:00
unsigned orig_iter_pos = node_iter - > data [ 0 ] . k ;
bool iter_current_key_modified =
orig_iter_pos > = offset & &
orig_iter_pos < = offset + clobber_u64s ;
2017-03-16 22:18:50 -08:00
btree_node_iter_for_each ( node_iter , set )
if ( set - > end = = old_end )
goto found ;
/* didn't find the bset in the iterator - might have to readd it: */
if ( new_u64s & &
2021-08-30 15:18:31 -04:00
bkey_iter_pos_cmp ( b , where , & path - > pos ) > = 0 ) {
2017-03-16 22:18:50 -08:00
bch2_btree_node_iter_push ( node_iter , b , where , end ) ;
2019-09-07 19:17:40 -04:00
goto fixup_done ;
} else {
/* Iterator is after key that changed */
2019-09-19 16:07:41 -04:00
return ;
2017-03-16 22:18:50 -08:00
}
found :
2016-07-21 19:05:06 -08:00
set - > end = t - > end_offset ;
2017-03-16 22:18:50 -08:00
/* Iterator hasn't gotten to the key that changed yet: */
if ( set - > k < offset )
2019-09-19 16:07:41 -04:00
return ;
2017-03-16 22:18:50 -08:00
if ( new_u64s & &
2021-08-30 15:18:31 -04:00
bkey_iter_pos_cmp ( b , where , & path - > pos ) > = 0 ) {
2017-03-16 22:18:50 -08:00
set - > k = offset ;
} else if ( set - > k < offset + clobber_u64s ) {
set - > k = offset + new_u64s ;
if ( set - > k = = set - > end )
bch2_btree_node_iter_set_drop ( node_iter , set ) ;
} else {
2019-09-07 19:17:40 -04:00
/* Iterator is after key that changed */
2017-03-16 22:18:50 -08:00
set - > k = ( int ) set - > k + shift ;
2019-09-19 16:07:41 -04:00
return ;
2017-03-16 22:18:50 -08:00
}
bch2_btree_node_iter_sort ( node_iter , b ) ;
2019-09-07 19:17:40 -04:00
fixup_done :
if ( node_iter - > data [ 0 ] . k ! = orig_iter_pos )
iter_current_key_modified = true ;
2018-12-06 12:01:29 -05:00
2017-03-16 22:18:50 -08:00
/*
2019-08-20 17:43:47 -04:00
* When a new key is added , and the node iterator now points to that
* key , the iterator might have skipped past deleted keys that should
* come after the key the iterator now points to . We have to rewind to
2019-09-07 19:17:40 -04:00
* before those deleted keys - otherwise
* bch2_btree_node_iter_prev_all ( ) breaks :
2017-03-16 22:18:50 -08:00
*/
2019-08-20 17:43:47 -04:00
if ( ! bch2_btree_node_iter_end ( node_iter ) & &
2019-09-07 19:17:40 -04:00
iter_current_key_modified & &
2021-09-04 21:19:48 -04:00
b - > c . level ) {
2019-08-20 17:43:47 -04:00
struct bset_tree * t ;
struct bkey_packed * k , * k2 , * p ;
k = bch2_btree_node_iter_peek_all ( node_iter , b ) ;
2017-03-16 22:18:50 -08:00
for_each_bset ( b , t ) {
2019-08-20 17:43:47 -04:00
bool set_pos = false ;
if ( node_iter - > data [ 0 ] . end = = t - > end_offset )
2017-03-16 22:18:50 -08:00
continue ;
2019-08-20 17:43:47 -04:00
k2 = bch2_btree_node_iter_bset_pos ( node_iter , b , t ) ;
while ( ( p = bch2_bkey_prev_all ( b , t , k2 ) ) & &
bkey_iter_cmp ( b , k , p ) < 0 ) {
k2 = p ;
set_pos = true ;
2017-03-16 22:18:50 -08:00
}
2019-08-20 17:43:47 -04:00
if ( set_pos )
btree_node_iter_set_set_pos ( node_iter ,
b , t , k2 ) ;
2017-03-16 22:18:50 -08:00
}
}
}
2021-08-24 21:30:06 -04:00
void bch2_btree_node_iter_fix ( struct btree_trans * trans ,
2021-08-30 15:18:31 -04:00
struct btree_path * path ,
2018-08-11 19:12:05 -04:00
struct btree * b ,
struct btree_node_iter * node_iter ,
struct bkey_packed * where ,
unsigned clobber_u64s ,
unsigned new_u64s )
2017-03-16 22:18:50 -08:00
{
2018-08-11 19:12:05 -04:00
struct bset_tree * t = bch2_bkey_to_bset_inlined ( b , where ) ;
2021-08-30 15:18:31 -04:00
struct btree_path * linked ;
2017-03-16 22:18:50 -08:00
2021-08-30 15:18:31 -04:00
if ( node_iter ! = & path - > l [ b - > c . level ] . iter ) {
__bch2_btree_node_iter_fix ( path , b , node_iter , t ,
2019-09-19 16:07:41 -04:00
where , clobber_u64s , new_u64s ) ;
2020-02-18 16:17:55 -05:00
2020-11-02 18:20:44 -05:00
if ( bch2_debug_check_iterators )
2020-02-18 16:17:55 -05:00
bch2_btree_node_iter_verify ( node_iter , b ) ;
2019-09-19 16:07:41 -04:00
}
2017-03-16 22:18:50 -08:00
2021-08-30 15:18:31 -04:00
trans_for_each_path_with_node ( trans , b , linked ) {
2017-03-16 22:18:50 -08:00
__bch2_btree_node_iter_fix ( linked , b ,
2019-09-19 16:07:41 -04:00
& linked - > l [ b - > c . level ] . iter , t ,
where , clobber_u64s , new_u64s ) ;
2021-08-30 15:18:31 -04:00
bch2_btree_path_verify_level ( trans , linked , b - > c . level ) ;
2019-09-19 16:07:41 -04:00
}
2017-03-16 22:18:50 -08:00
}
2021-08-30 15:18:31 -04:00
/* Btree path level: pointer to a particular btree node and node iter */
static inline struct bkey_s_c __btree_iter_unpack ( struct bch_fs * c ,
struct btree_path_level * l ,
2017-03-16 22:18:50 -08:00
struct bkey * u ,
struct bkey_packed * k )
{
struct bkey_s_c ret ;
if ( unlikely ( ! k ) ) {
/*
* signal to bch2_btree_iter_peek_slot ( ) that we ' re currently at
* a hole
*/
2018-11-01 15:10:01 -04:00
u - > type = KEY_TYPE_deleted ;
2017-03-16 22:18:50 -08:00
return bkey_s_c_null ;
}
ret = bkey_disassemble ( l - > b , k , u ) ;
2021-06-08 16:29:24 -04:00
/*
* XXX : bch2_btree_bset_insert_key ( ) generates invalid keys when we
* overwrite extents - it sets k - > type = KEY_TYPE_deleted on the key
* being overwritten but doesn ' t change k - > size . But this is ok , because
* those keys are never written out , we just have to avoid a spurious
* assertion here :
*/
if ( bch2_debug_check_bkeys & & ! bkey_deleted ( ret . k ) )
2021-08-30 15:18:31 -04:00
bch2_bkey_debugcheck ( c , l - > b , ret ) ;
2017-03-16 22:18:50 -08:00
return ret ;
}
2021-08-30 15:18:31 -04:00
static inline struct bkey_s_c btree_path_level_peek_all ( struct bch_fs * c ,
struct btree_path_level * l ,
struct bkey * u )
2017-03-16 22:18:50 -08:00
{
2021-08-30 15:18:31 -04:00
return __btree_iter_unpack ( c , l , u ,
2017-03-16 22:18:50 -08:00
bch2_btree_node_iter_peek_all ( & l - > iter , l - > b ) ) ;
}
2021-08-30 15:18:31 -04:00
static inline struct bkey_s_c btree_path_level_peek ( struct btree_trans * trans ,
struct btree_path * path ,
struct btree_path_level * l ,
struct bkey * u )
2017-03-16 22:18:50 -08:00
{
2021-08-30 15:18:31 -04:00
struct bkey_s_c k = __btree_iter_unpack ( trans - > c , l , u ,
2017-03-16 22:18:50 -08:00
bch2_btree_node_iter_peek ( & l - > iter , l - > b ) ) ;
2021-03-21 19:32:01 -04:00
2021-08-30 15:18:31 -04:00
path - > pos = k . k ? k . k - > p : l - > b - > key . k . p ;
trans - > paths_sorted = false ;
2021-03-21 19:32:01 -04:00
return k ;
2017-03-16 22:18:50 -08:00
}
2021-08-30 15:18:31 -04:00
static inline struct bkey_s_c btree_path_level_prev ( struct btree_trans * trans ,
struct btree_path * path ,
struct btree_path_level * l ,
struct bkey * u )
2019-09-07 17:17:21 -04:00
{
2021-08-30 15:18:31 -04:00
struct bkey_s_c k = __btree_iter_unpack ( trans - > c , l , u ,
2019-09-07 17:17:21 -04:00
bch2_btree_node_iter_prev ( & l - > iter , l - > b ) ) ;
2021-03-21 19:32:01 -04:00
2021-08-30 15:18:31 -04:00
path - > pos = k . k ? k . k - > p : l - > b - > data - > min_key ;
trans - > paths_sorted = false ;
2021-03-21 19:32:01 -04:00
return k ;
2019-09-07 17:17:21 -04:00
}
2021-08-30 15:18:31 -04:00
static inline bool btree_path_advance_to_pos ( struct btree_path * path ,
struct btree_path_level * l ,
2018-08-21 16:30:14 -04:00
int max_advance )
2017-03-16 22:18:50 -08:00
{
2018-08-21 16:30:14 -04:00
struct bkey_packed * k ;
int nr_advanced = 0 ;
while ( ( k = bch2_btree_node_iter_peek_all ( & l - > iter , l - > b ) ) & &
2021-08-30 15:18:31 -04:00
bkey_iter_pos_cmp ( l - > b , k , & path - > pos ) < 0 ) {
2018-08-21 16:30:14 -04:00
if ( max_advance > 0 & & nr_advanced > = max_advance )
return false ;
bch2_btree_node_iter_advance ( & l - > iter , l - > b ) ;
nr_advanced + + ;
}
return true ;
2017-03-16 22:18:50 -08:00
}
/*
* Verify that iterator for parent node points to child node :
*/
2021-08-30 15:18:31 -04:00
static void btree_path_verify_new_node ( struct btree_trans * trans ,
struct btree_path * path , struct btree * b )
2017-03-16 22:18:50 -08:00
{
2021-08-30 15:18:31 -04:00
struct btree_path_level * l ;
2017-03-16 22:18:50 -08:00
unsigned plevel ;
bool parent_locked ;
struct bkey_packed * k ;
if ( ! IS_ENABLED ( CONFIG_BCACHEFS_DEBUG ) )
return ;
2020-06-06 12:28:01 -04:00
plevel = b - > c . level + 1 ;
2021-08-30 15:18:31 -04:00
if ( ! btree_path_node ( path , plevel ) )
2017-03-16 22:18:50 -08:00
return ;
2021-08-30 15:18:31 -04:00
parent_locked = btree_node_locked ( path , plevel ) ;
2017-03-16 22:18:50 -08:00
2021-08-30 15:18:31 -04:00
if ( ! bch2_btree_node_relock ( trans , path , plevel ) )
2017-03-16 22:18:50 -08:00
return ;
2021-08-30 15:18:31 -04:00
l = & path - > l [ plevel ] ;
2017-03-16 22:18:50 -08:00
k = bch2_btree_node_iter_peek_all ( & l - > iter , l - > b ) ;
if ( ! k | |
bkey_deleted ( k ) | |
bkey_cmp_left_packed ( l - > b , k , & b - > key . k . p ) ) {
2021-03-04 15:20:22 -05:00
char buf1 [ 100 ] ;
char buf2 [ 100 ] ;
char buf3 [ 100 ] ;
char buf4 [ 100 ] ;
2017-03-16 22:18:50 -08:00
struct bkey uk = bkey_unpack_key ( b , k ) ;
2021-08-30 14:22:43 -04:00
bch2_dump_btree_node ( trans - > c , l - > b ) ;
2021-08-30 15:18:31 -04:00
bch2_bpos_to_text ( & PBUF ( buf1 ) , path - > pos ) ;
2021-03-04 15:20:22 -05:00
bch2_bkey_to_text ( & PBUF ( buf2 ) , & uk ) ;
bch2_bpos_to_text ( & PBUF ( buf3 ) , b - > data - > min_key ) ;
bch2_bpos_to_text ( & PBUF ( buf3 ) , b - > data - > max_key ) ;
2020-12-06 16:30:02 -05:00
panic ( " parent iter doesn't point to new node: \n "
2021-03-04 15:20:22 -05:00
" iter pos %s %s \n "
2020-12-06 16:30:02 -05:00
" iter key %s \n "
2021-03-04 15:20:22 -05:00
" new node %s-%s \n " ,
2021-08-30 15:18:31 -04:00
bch2_btree_ids [ path - > btree_id ] , buf1 ,
2021-03-04 15:20:22 -05:00
buf2 , buf3 , buf4 ) ;
2017-03-16 22:18:50 -08:00
}
if ( ! parent_locked )
2021-09-04 21:23:11 -04:00
btree_node_unlock ( path , plevel ) ;
2017-03-16 22:18:50 -08:00
}
2021-08-30 15:18:31 -04:00
static inline void __btree_path_level_init ( struct btree_path * path ,
2021-08-30 14:22:43 -04:00
unsigned level )
2017-03-16 22:18:50 -08:00
{
2021-08-30 15:18:31 -04:00
struct btree_path_level * l = & path - > l [ level ] ;
2018-08-21 16:30:14 -04:00
2021-08-30 15:18:31 -04:00
bch2_btree_node_iter_init ( & l - > iter , l - > b , & path - > pos ) ;
2017-03-16 22:18:50 -08:00
2020-12-11 12:02:48 -05:00
/*
* Iterators to interior nodes should always be pointed at the first non
* whiteout :
*/
if ( level )
bch2_btree_node_iter_peek ( & l - > iter , l - > b ) ;
2017-03-16 22:18:50 -08:00
}
2021-08-30 15:18:31 -04:00
static inline void btree_path_level_init ( struct btree_trans * trans ,
struct btree_path * path ,
2021-08-30 14:22:43 -04:00
struct btree * b )
2017-03-16 22:18:50 -08:00
{
2021-08-30 15:18:31 -04:00
BUG_ON ( path - > cached ) ;
2019-03-07 19:46:10 -05:00
2021-08-30 15:18:31 -04:00
btree_path_verify_new_node ( trans , path , b ) ;
2017-03-16 22:18:50 -08:00
2021-08-30 15:18:31 -04:00
EBUG_ON ( ! btree_path_pos_in_node ( path , b ) ) ;
2020-06-06 12:28:01 -04:00
EBUG_ON ( b - > c . lock . state . seq & 1 ) ;
2017-03-16 22:18:50 -08:00
2021-08-30 15:18:31 -04:00
path - > l [ b - > c . level ] . lock_seq = b - > c . lock . state . seq ;
path - > l [ b - > c . level ] . b = b ;
__btree_path_level_init ( path , b - > c . level ) ;
2017-03-16 22:18:50 -08:00
}
2021-08-30 15:18:31 -04:00
/* Btree path: fixups after btree node updates: */
2017-03-16 22:18:50 -08:00
/*
* A btree node is being replaced - update the iterator to point to the new
* node :
*/
2021-08-30 14:36:03 -04:00
void bch2_trans_node_add ( struct btree_trans * trans , struct btree * b )
2017-03-16 22:18:50 -08:00
{
2021-08-30 15:18:31 -04:00
struct btree_path * path ;
2017-03-16 22:18:50 -08:00
2021-08-30 15:18:31 -04:00
trans_for_each_path ( trans , path )
if ( ! path - > cached & &
btree_path_pos_in_node ( path , b ) ) {
2021-09-04 21:23:11 -04:00
enum btree_node_locked_type t =
btree_lock_want ( path , b - > c . level ) ;
2017-03-16 22:18:50 -08:00
2021-09-04 21:23:11 -04:00
if ( path - > nodes_locked & &
t ! = BTREE_NODE_UNLOCKED ) {
btree_node_unlock ( path , b - > c . level ) ;
2020-06-06 12:28:01 -04:00
six_lock_increment ( & b - > c . lock , ( enum six_lock_type ) t ) ;
2021-09-03 17:18:57 -04:00
mark_btree_node_locked ( trans , path , b - > c . level , ( enum six_lock_type ) t ) ;
2017-03-16 22:18:50 -08:00
}
2021-08-30 15:18:31 -04:00
btree_path_level_init ( trans , path , b ) ;
2017-03-16 22:18:50 -08:00
}
}
/*
* A btree node has been modified in such a way as to invalidate iterators - fix
* them :
*/
2021-08-30 14:36:03 -04:00
void bch2_trans_node_reinit_iter ( struct btree_trans * trans , struct btree * b )
2017-03-16 22:18:50 -08:00
{
2021-08-30 15:18:31 -04:00
struct btree_path * path ;
2017-03-16 22:18:50 -08:00
2021-08-30 15:18:31 -04:00
trans_for_each_path_with_node ( trans , b , path )
__btree_path_level_init ( path , b - > c . level ) ;
2017-03-16 22:18:50 -08:00
}
2021-08-30 15:18:31 -04:00
/* Btree path: traverse, set_pos: */
2020-06-12 22:29:48 -04:00
static int lock_root_check_fn ( struct six_lock * lock , void * p )
{
struct btree * b = container_of ( lock , struct btree , c . lock ) ;
struct btree * * rootp = p ;
return b = = * rootp ? 0 : - 1 ;
}
2021-08-30 15:18:31 -04:00
static inline int btree_path_lock_root ( struct btree_trans * trans ,
struct btree_path * path ,
2020-10-28 14:17:46 -04:00
unsigned depth_want ,
unsigned long trace_ip )
2017-03-16 22:18:50 -08:00
{
2021-07-25 17:19:52 -04:00
struct bch_fs * c = trans - > c ;
2021-08-30 15:18:31 -04:00
struct btree * b , * * rootp = & c - > btree_roots [ path - > btree_id ] . b ;
2017-03-16 22:18:50 -08:00
enum six_lock_type lock_type ;
unsigned i ;
2021-08-30 15:18:31 -04:00
EBUG_ON ( path - > nodes_locked ) ;
2017-03-16 22:18:50 -08:00
while ( 1 ) {
2020-06-12 22:29:48 -04:00
b = READ_ONCE ( * rootp ) ;
2021-08-30 15:18:31 -04:00
path - > level = READ_ONCE ( b - > c . level ) ;
2017-03-16 22:18:50 -08:00
2021-08-30 15:18:31 -04:00
if ( unlikely ( path - > level < depth_want ) ) {
2017-03-16 22:18:50 -08:00
/*
* the root is at a lower depth than the depth we want :
* got to the end of the btree , or we ' re walking nodes
* greater than some depth and there are no nodes > =
* that depth
*/
2021-08-30 15:18:31 -04:00
path - > level = depth_want ;
for ( i = path - > level ; i < BTREE_MAX_DEPTH ; i + + )
path - > l [ i ] . b = NULL ;
2018-11-21 02:59:07 -05:00
return 1 ;
2017-03-16 22:18:50 -08:00
}
2021-08-30 15:18:31 -04:00
lock_type = __btree_lock_want ( path , path - > level ) ;
if ( unlikely ( ! btree_node_lock ( trans , path , b , SPOS_MAX ,
path - > level , lock_type ,
2020-10-28 14:17:46 -04:00
lock_root_check_fn , rootp ,
2021-07-25 17:19:52 -04:00
trace_ip ) ) ) {
if ( trans - > restarted )
return - EINTR ;
continue ;
}
2017-03-16 22:18:50 -08:00
2020-06-12 22:29:48 -04:00
if ( likely ( b = = READ_ONCE ( * rootp ) & &
2021-08-30 15:18:31 -04:00
b - > c . level = = path - > level & &
2017-03-16 22:18:50 -08:00
! race_fault ( ) ) ) {
2021-08-30 15:18:31 -04:00
for ( i = 0 ; i < path - > level ; i + + )
path - > l [ i ] . b = BTREE_ITER_NO_NODE_LOCK_ROOT ;
path - > l [ path - > level ] . b = b ;
for ( i = path - > level + 1 ; i < BTREE_MAX_DEPTH ; i + + )
path - > l [ i ] . b = NULL ;
2021-09-03 17:18:57 -04:00
mark_btree_node_locked ( trans , path , path - > level , lock_type ) ;
2021-08-30 15:18:31 -04:00
btree_path_level_init ( trans , path , b ) ;
2017-03-16 22:18:50 -08:00
return 0 ;
}
2020-06-06 12:28:01 -04:00
six_unlock_type ( & b - > c . lock , lock_type ) ;
2017-03-16 22:18:50 -08:00
}
}
noinline
2021-08-30 15:18:31 -04:00
static int btree_path_prefetch ( struct btree_trans * trans , struct btree_path * path )
2017-03-16 22:18:50 -08:00
{
2021-08-24 21:30:06 -04:00
struct bch_fs * c = trans - > c ;
2021-08-30 15:18:31 -04:00
struct btree_path_level * l = path_l ( path ) ;
2017-03-16 22:18:50 -08:00
struct btree_node_iter node_iter = l - > iter ;
struct bkey_packed * k ;
2020-12-17 15:08:58 -05:00
struct bkey_buf tmp ;
2019-03-25 17:06:42 -04:00
unsigned nr = test_bit ( BCH_FS_STARTED , & c - > flags )
2021-08-30 15:18:31 -04:00
? ( path - > level > 1 ? 0 : 2 )
: ( path - > level > 1 ? 1 : 16 ) ;
bool was_locked = btree_node_locked ( path , path - > level ) ;
2021-07-24 19:50:40 -04:00
int ret = 0 ;
2017-03-16 22:18:50 -08:00
2020-12-17 15:08:58 -05:00
bch2_bkey_buf_init ( & tmp ) ;
2021-07-24 19:50:40 -04:00
while ( nr & & ! ret ) {
2021-08-30 15:18:31 -04:00
if ( ! bch2_btree_node_relock ( trans , path , path - > level ) )
2020-12-17 15:08:58 -05:00
break ;
2017-03-16 22:18:50 -08:00
bch2_btree_node_iter_advance ( & node_iter , l - > b ) ;
k = bch2_btree_node_iter_peek ( & node_iter , l - > b ) ;
if ( ! k )
break ;
2020-12-17 15:08:58 -05:00
bch2_bkey_buf_unpack ( & tmp , c , l - > b , k ) ;
2021-08-30 15:18:31 -04:00
ret = bch2_btree_node_prefetch ( c , trans , path , tmp . k , path - > btree_id ,
path - > level - 1 ) ;
2017-03-16 22:18:50 -08:00
}
if ( ! was_locked )
2021-08-30 15:18:31 -04:00
btree_node_unlock ( path , path - > level ) ;
2020-12-17 15:08:58 -05:00
bch2_bkey_buf_exit ( & tmp , c ) ;
2021-07-24 19:50:40 -04:00
return ret ;
2017-03-16 22:18:50 -08:00
}
2021-08-30 14:22:43 -04:00
static noinline void btree_node_mem_ptr_set ( struct btree_trans * trans ,
2021-08-30 15:18:31 -04:00
struct btree_path * path ,
2020-02-24 15:25:00 -05:00
unsigned plevel , struct btree * b )
{
2021-08-30 15:18:31 -04:00
struct btree_path_level * l = & path - > l [ plevel ] ;
bool locked = btree_node_locked ( path , plevel ) ;
2020-02-24 15:25:00 -05:00
struct bkey_packed * k ;
struct bch_btree_ptr_v2 * bp ;
2021-08-30 15:18:31 -04:00
if ( ! bch2_btree_node_relock ( trans , path , plevel ) )
2020-02-24 15:25:00 -05:00
return ;
k = bch2_btree_node_iter_peek_all ( & l - > iter , l - > b ) ;
BUG_ON ( k - > type ! = KEY_TYPE_btree_ptr_v2 ) ;
bp = ( void * ) bkeyp_val ( & l - > b - > format , k ) ;
bp - > mem_ptr = ( unsigned long ) b ;
if ( ! locked )
2021-08-30 15:18:31 -04:00
btree_node_unlock ( path , plevel ) ;
2020-02-24 15:25:00 -05:00
}
2021-08-30 15:18:31 -04:00
static __always_inline int btree_path_down ( struct btree_trans * trans ,
struct btree_path * path ,
unsigned flags ,
2020-10-28 14:17:46 -04:00
unsigned long trace_ip )
2017-03-16 22:18:50 -08:00
{
2021-07-24 17:12:51 -04:00
struct bch_fs * c = trans - > c ;
2021-08-30 15:18:31 -04:00
struct btree_path_level * l = path_l ( path ) ;
2017-03-16 22:18:50 -08:00
struct btree * b ;
2021-08-30 15:18:31 -04:00
unsigned level = path - > level - 1 ;
enum six_lock_type lock_type = __btree_lock_want ( path , level ) ;
2020-12-17 15:08:58 -05:00
struct bkey_buf tmp ;
int ret ;
2017-03-16 22:18:50 -08:00
2021-08-30 15:18:31 -04:00
EBUG_ON ( ! btree_node_locked ( path , path - > level ) ) ;
2017-03-16 22:18:50 -08:00
2020-12-17 15:08:58 -05:00
bch2_bkey_buf_init ( & tmp ) ;
bch2_bkey_buf_unpack ( & tmp , c , l - > b ,
2017-03-16 22:18:50 -08:00
bch2_btree_node_iter_peek ( & l - > iter , l - > b ) ) ;
2021-08-30 15:18:31 -04:00
b = bch2_btree_node_get ( trans , path , tmp . k , level , lock_type , trace_ip ) ;
2020-12-17 15:08:58 -05:00
ret = PTR_ERR_OR_ZERO ( b ) ;
if ( unlikely ( ret ) )
goto err ;
2017-03-16 22:18:50 -08:00
2021-09-03 17:18:57 -04:00
mark_btree_node_locked ( trans , path , level , lock_type ) ;
2021-08-30 15:18:31 -04:00
btree_path_level_init ( trans , path , b ) ;
2017-03-16 22:18:50 -08:00
2020-12-17 15:08:58 -05:00
if ( tmp . k - > k . type = = KEY_TYPE_btree_ptr_v2 & &
unlikely ( b ! = btree_node_mem_ptr ( tmp . k ) ) )
2021-08-30 15:18:31 -04:00
btree_node_mem_ptr_set ( trans , path , level + 1 , b ) ;
2020-02-24 15:25:00 -05:00
2021-08-30 15:18:31 -04:00
if ( flags & BTREE_ITER_PREFETCH )
ret = btree_path_prefetch ( trans , path ) ;
2017-03-16 22:18:50 -08:00
2021-08-30 15:18:31 -04:00
if ( btree_node_read_locked ( path , level + 1 ) )
btree_node_unlock ( path , level + 1 ) ;
path - > level = level ;
2021-04-08 22:26:53 -04:00
2021-08-30 15:18:31 -04:00
bch2_btree_path_verify_locks ( path ) ;
2020-12-17 15:08:58 -05:00
err :
bch2_bkey_buf_exit ( & tmp , c ) ;
return ret ;
2017-03-16 22:18:50 -08:00
}
2021-08-30 15:18:31 -04:00
static int btree_path_traverse_one ( struct btree_trans * , struct btree_path * ,
unsigned , unsigned long ) ;
2017-03-16 22:18:50 -08:00
2021-08-30 15:18:31 -04:00
static int __btree_path_traverse_all ( struct btree_trans * trans , int ret ,
2021-06-04 15:18:10 -04:00
unsigned long trace_ip )
2017-03-16 22:18:50 -08:00
{
2019-03-27 22:54:42 -04:00
struct bch_fs * c = trans - > c ;
2021-08-30 15:18:31 -04:00
struct btree_path * path , * prev = NULL ;
2021-06-12 15:45:45 -04:00
int i ;
2019-03-27 22:54:42 -04:00
2019-03-07 19:46:10 -05:00
if ( trans - > in_traverse_all )
return - EINTR ;
trans - > in_traverse_all = true ;
retry_all :
2021-07-25 17:19:52 -04:00
trans - > restarted = false ;
2021-08-30 15:18:31 -04:00
trans_for_each_path ( trans , path )
path - > should_be_locked = false ;
2019-03-27 22:54:42 -04:00
2021-08-30 15:18:31 -04:00
btree_trans_sort_paths ( trans ) ;
2021-04-14 13:26:15 -04:00
2021-08-30 15:18:31 -04:00
trans_for_each_path_inorder_reverse ( trans , path , i ) {
2021-06-12 15:45:45 -04:00
if ( prev ) {
2021-08-30 15:18:31 -04:00
if ( path - > btree_id = = prev - > btree_id & &
path - > locks_want < prev - > locks_want )
__bch2_btree_path_upgrade ( trans , path , prev - > locks_want ) ;
else if ( ! path - > locks_want & & prev - > locks_want )
__bch2_btree_path_upgrade ( trans , path , 1 ) ;
2021-06-12 15:45:45 -04:00
}
2021-04-14 13:26:15 -04:00
2021-08-30 15:18:31 -04:00
prev = path ;
2021-04-14 13:26:15 -04:00
}
2019-05-15 09:47:40 -04:00
bch2_trans_unlock ( trans ) ;
2020-10-28 14:17:46 -04:00
cond_resched ( ) ;
2017-03-16 22:18:50 -08:00
2019-03-28 00:07:24 -04:00
if ( unlikely ( ret = = - ENOMEM ) ) {
2017-03-16 22:18:50 -08:00
struct closure cl ;
closure_init_stack ( & cl ) ;
do {
ret = bch2_btree_cache_cannibalize_lock ( c , & cl ) ;
closure_sync ( & cl ) ;
} while ( ret ) ;
}
2019-03-28 00:07:24 -04:00
if ( unlikely ( ret = = - EIO ) ) {
2019-04-04 21:28:16 -04:00
trans - > error = true ;
2019-03-28 00:07:24 -04:00
goto out ;
}
BUG_ON ( ret & & ret ! = - EINTR ) ;
2017-03-16 22:18:50 -08:00
/* Now, redo traversals in correct order: */
2021-06-12 15:45:45 -04:00
i = 0 ;
while ( i < trans - > nr_sorted ) {
2021-08-30 15:18:31 -04:00
path = trans - > paths + trans - > sorted [ i ] ;
2019-03-07 19:46:10 -05:00
2021-08-30 15:18:31 -04:00
EBUG_ON ( ! ( trans - > paths_allocated & ( 1ULL < < path - > idx ) ) ) ;
2021-09-03 17:18:57 -04:00
# ifdef CONFIG_BCACHEFS_DEBUG
trans - > traverse_all_idx = path - > idx ;
# endif
2017-03-16 22:18:50 -08:00
2021-08-30 15:18:31 -04:00
ret = btree_path_traverse_one ( trans , path , 0 , _THIS_IP_ ) ;
2019-03-27 22:54:42 -04:00
if ( ret )
goto retry_all ;
2021-06-12 15:45:45 -04:00
2021-08-30 15:18:31 -04:00
EBUG_ON ( ! ( trans - > paths_allocated & ( 1ULL < < path - > idx ) ) ) ;
2021-06-12 15:45:45 -04:00
2021-08-30 15:18:31 -04:00
if ( path - > nodes_locked )
2021-06-12 15:45:45 -04:00
i + + ;
2019-03-27 22:54:42 -04:00
}
2021-06-12 15:45:45 -04:00
/*
* BTREE_ITER_NEED_RELOCK is ok here - if we called bch2_trans_unlock ( )
2021-08-30 15:18:31 -04:00
* and relock ( ) , relock ( ) won ' t relock since path - > should_be_locked
2021-06-12 15:45:45 -04:00
* isn ' t set yet , which is all fine
*/
2021-08-30 15:18:31 -04:00
trans_for_each_path ( trans , path )
BUG_ON ( path - > uptodate > = BTREE_ITER_NEED_TRAVERSE ) ;
2017-03-16 22:18:50 -08:00
out :
bch2_btree_cache_cannibalize_unlock ( c ) ;
2019-03-07 19:46:10 -05:00
2021-09-03 17:18:57 -04:00
# ifdef CONFIG_BCACHEFS_DEBUG
trans - > traverse_all_idx = U8_MAX ;
# endif
2019-03-07 19:46:10 -05:00
trans - > in_traverse_all = false ;
2021-04-14 13:26:15 -04:00
2021-06-04 15:18:10 -04:00
trace_trans_traverse_all ( trans - > ip , trace_ip ) ;
2017-03-16 22:18:50 -08:00
return ret ;
2019-03-28 00:07:24 -04:00
}
2017-03-16 22:18:50 -08:00
2021-08-30 15:18:31 -04:00
static int bch2_btree_path_traverse_all ( struct btree_trans * trans )
2019-03-28 00:07:24 -04:00
{
2021-08-30 15:18:31 -04:00
return __btree_path_traverse_all ( trans , 0 , _RET_IP_ ) ;
2017-03-16 22:18:50 -08:00
}
2021-08-30 15:18:31 -04:00
static inline bool btree_path_good_node ( struct btree_trans * trans ,
struct btree_path * path ,
2019-09-07 19:19:57 -04:00
unsigned l , int check_pos )
{
2021-08-30 15:18:31 -04:00
if ( ! is_btree_node ( path , l ) | |
! bch2_btree_node_relock ( trans , path , l ) )
2019-09-07 19:19:57 -04:00
return false ;
2021-08-30 15:18:31 -04:00
if ( check_pos < 0 & & btree_path_pos_before_node ( path , path - > l [ l ] . b ) )
2019-09-07 19:19:57 -04:00
return false ;
2021-08-30 15:18:31 -04:00
if ( check_pos > 0 & & btree_path_pos_after_node ( path , path - > l [ l ] . b ) )
2019-09-07 19:19:57 -04:00
return false ;
return true ;
}
2021-08-30 15:18:31 -04:00
static inline unsigned btree_path_up_until_good_node ( struct btree_trans * trans ,
struct btree_path * path ,
2019-09-07 19:19:57 -04:00
int check_pos )
2017-03-16 22:18:50 -08:00
{
2021-08-30 15:18:31 -04:00
unsigned l = path - > level ;
2017-03-16 22:18:50 -08:00
2021-09-04 21:23:11 -04:00
if ( ! path - > nodes_locked )
btree_path_get_locks ( trans , path , false , _THIS_IP_ ) ;
2021-08-30 15:18:31 -04:00
while ( btree_path_node ( path , l ) & &
! btree_path_good_node ( trans , path , l , check_pos ) ) {
btree_node_unlock ( path , l ) ;
path - > l [ l ] . b = BTREE_ITER_NO_NODE_UP ;
2017-03-16 22:18:50 -08:00
l + + ;
}
return l ;
}
/*
* This is the main state machine for walking down the btree - walks down to a
* specified depth
*
* Returns 0 on success , - EIO on error ( error reading in a btree node ) .
*
* On error , caller ( peek_node ( ) / peek_key ( ) ) must return NULL ; the error is
2019-05-10 16:09:17 -04:00
* stashed in the iterator and returned from bch2_trans_exit ( ) .
2017-03-16 22:18:50 -08:00
*/
2021-08-30 15:18:31 -04:00
static int btree_path_traverse_one ( struct btree_trans * trans ,
struct btree_path * path ,
unsigned flags ,
2020-10-28 14:17:46 -04:00
unsigned long trace_ip )
2017-03-16 22:18:50 -08:00
{
2021-08-30 15:18:31 -04:00
unsigned l , depth_want = path - > level ;
2021-06-04 15:18:10 -04:00
int ret = 0 ;
2017-03-16 22:18:50 -08:00
2021-07-22 12:39:11 -04:00
/*
2021-08-30 15:18:31 -04:00
* Ensure we obey path - > should_be_locked : if it ' s set , we can ' t unlock
* and re - traverse the path without a transaction restart :
2021-07-22 12:39:11 -04:00
*/
2021-08-30 15:18:31 -04:00
if ( path - > should_be_locked ) {
ret = bch2_btree_path_relock ( trans , path , trace_ip ) ? 0 : - EINTR ;
2021-07-22 12:39:11 -04:00
goto out ;
}
2021-08-30 15:18:31 -04:00
if ( path - > cached ) {
ret = bch2_btree_path_traverse_cached ( trans , path , flags ) ;
2021-06-04 15:18:10 -04:00
goto out ;
}
2019-03-07 19:46:10 -05:00
2021-08-30 15:18:31 -04:00
if ( unlikely ( path - > level > = BTREE_MAX_DEPTH ) )
2021-06-04 15:18:10 -04:00
goto out ;
2019-03-07 19:46:10 -05:00
2021-08-30 15:18:31 -04:00
path - > level = btree_path_up_until_good_node ( trans , path , 0 ) ;
2017-03-16 22:18:50 -08:00
2021-07-14 15:13:27 -04:00
/* If we need intent locks, take them too: */
2021-08-30 15:18:31 -04:00
for ( l = path - > level + 1 ;
l < path - > locks_want & & btree_path_node ( path , l ) ;
2021-07-14 15:13:27 -04:00
l + + )
2021-08-30 15:18:31 -04:00
if ( ! bch2_btree_node_relock ( trans , path , l ) )
while ( path - > level < = l ) {
btree_node_unlock ( path , path - > level ) ;
path - > l [ path - > level ] . b = BTREE_ITER_NO_NODE_UP ;
path - > level + + ;
2021-07-14 15:13:27 -04:00
}
2017-03-16 22:18:50 -08:00
/*
2021-08-30 15:18:31 -04:00
* Note : path - > nodes [ path - > level ] may be temporarily NULL here - that
2017-03-16 22:18:50 -08:00
* would indicate to other code that we got to the end of the btree ,
* here it indicates that relocking the root failed - it ' s critical that
2021-08-30 15:18:31 -04:00
* btree_path_lock_root ( ) comes next and that it can ' t fail
2017-03-16 22:18:50 -08:00
*/
2021-08-30 15:18:31 -04:00
while ( path - > level > depth_want ) {
ret = btree_path_node ( path , path - > level )
? btree_path_down ( trans , path , flags , trace_ip )
: btree_path_lock_root ( trans , path , depth_want , trace_ip ) ;
2017-03-16 22:18:50 -08:00
if ( unlikely ( ret ) ) {
2021-06-04 15:18:10 -04:00
if ( ret = = 1 ) {
/*
2021-08-30 15:54:41 -04:00
* No nodes at this level - got to the end of
* the btree :
2021-06-04 15:18:10 -04:00
*/
ret = 0 ;
goto out ;
}
2018-11-21 02:59:07 -05:00
2021-08-30 15:18:31 -04:00
__bch2_btree_path_unlock ( path ) ;
path - > level = depth_want ;
2019-03-07 19:46:10 -05:00
2021-08-30 15:18:31 -04:00
if ( ret = = - EIO )
path - > l [ path - > level ] . b =
2019-03-07 19:46:10 -05:00
BTREE_ITER_NO_NODE_ERROR ;
2021-08-30 15:18:31 -04:00
else
path - > l [ path - > level ] . b =
2019-03-07 19:46:10 -05:00
BTREE_ITER_NO_NODE_DOWN ;
2021-06-04 15:18:10 -04:00
goto out ;
2017-03-16 22:18:50 -08:00
}
}
2021-08-30 15:18:31 -04:00
path - > uptodate = BTREE_ITER_UPTODATE ;
2021-06-04 15:18:10 -04:00
out :
2021-07-25 17:19:52 -04:00
BUG_ON ( ( ret = = - EINTR ) ! = ! ! trans - > restarted ) ;
2021-07-24 17:31:25 -04:00
trace_iter_traverse ( trans - > ip , trace_ip ,
2021-08-30 15:18:31 -04:00
path - > cached ,
path - > btree_id , & path - > pos , ret ) ;
bch2_btree_path_verify ( trans , path ) ;
2021-06-04 15:18:10 -04:00
return ret ;
2017-03-16 22:18:50 -08:00
}
2021-08-30 15:18:31 -04:00
static int __btree_path_traverse_all ( struct btree_trans * , int , unsigned long ) ;
int __must_check bch2_btree_path_traverse ( struct btree_trans * trans ,
struct btree_path * path , unsigned flags )
2017-03-16 22:18:50 -08:00
{
int ret ;
2021-08-30 15:18:31 -04:00
if ( path - > uptodate < BTREE_ITER_NEED_RELOCK )
return 0 ;
2019-03-07 19:46:10 -05:00
ret = bch2_trans_cond_resched ( trans ) ? :
2021-08-30 15:18:31 -04:00
btree_path_traverse_one ( trans , path , flags , _RET_IP_ ) ;
if ( unlikely ( ret ) & & hweight64 ( trans - > paths_allocated ) = = 1 ) {
ret = __btree_path_traverse_all ( trans , ret , _RET_IP_ ) ;
2021-07-27 18:01:52 -04:00
BUG_ON ( ret = = - EINTR ) ;
}
2017-03-16 22:18:50 -08:00
return ret ;
}
2021-08-30 15:18:31 -04:00
static void btree_path_copy ( struct btree_trans * trans , struct btree_path * dst ,
struct btree_path * src )
{
unsigned i , offset = offsetof ( struct btree_path , pos ) ;
memcpy ( ( void * ) dst + offset ,
( void * ) src + offset ,
sizeof ( struct btree_path ) - offset ) ;
for ( i = 0 ; i < BTREE_MAX_DEPTH ; i + + )
if ( btree_node_locked ( dst , i ) )
six_lock_increment ( & dst - > l [ i ] . b - > c . lock ,
__btree_lock_want ( dst , i ) ) ;
trans - > paths_sorted = false ;
}
struct btree_path * __must_check
__bch2_btree_path_make_mut ( struct btree_trans * trans ,
struct btree_path * path , bool intent )
{
struct btree_path * new = btree_path_alloc ( trans , path ) ;
btree_path_copy ( trans , new , path ) ;
__btree_path_get ( new , intent ) ;
__btree_path_put ( path , intent ) ;
path = new ;
path - > preserve = false ;
# ifdef CONFIG_BCACHEFS_DEBUG
path - > ip_allocated = _RET_IP_ ;
# endif
return path ;
}
static struct btree_path * __must_check
__bch2_btree_path_set_pos ( struct btree_trans * trans ,
struct btree_path * path , struct bpos new_pos ,
bool intent , int cmp )
{
# ifdef CONFIG_BCACHEFS_DEBUG
struct bpos old_pos = path - > pos ;
# endif
unsigned l = path - > level ;
EBUG_ON ( trans - > restarted ) ;
EBUG_ON ( ! path - > ref ) ;
path = bch2_btree_path_make_mut ( trans , path , intent ) ;
path - > pos = new_pos ;
path - > should_be_locked = false ;
trans - > paths_sorted = false ;
if ( unlikely ( path - > cached ) ) {
btree_node_unlock ( path , 0 ) ;
path - > l [ 0 ] . b = BTREE_ITER_NO_NODE_CACHED ;
btree_path_set_dirty ( path , BTREE_ITER_NEED_TRAVERSE ) ;
goto out ;
}
l = btree_path_up_until_good_node ( trans , path , cmp ) ;
if ( btree_path_node ( path , l ) ) {
/*
* We might have to skip over many keys , or just a few : try
* advancing the node iterator , and if we have to skip over too
* many keys just reinit it ( or if we ' re rewinding , since that
* is expensive ) .
*/
if ( cmp < 0 | |
! btree_path_advance_to_pos ( path , & path - > l [ l ] , 8 ) )
__btree_path_level_init ( path , l ) ;
}
2021-09-04 21:23:11 -04:00
if ( l ! = path - > level ) {
2021-08-30 15:18:31 -04:00
btree_path_set_dirty ( path , BTREE_ITER_NEED_TRAVERSE ) ;
2021-09-04 21:23:11 -04:00
__bch2_btree_path_unlock ( path ) ;
}
2021-08-30 15:18:31 -04:00
out :
bch2_btree_path_verify ( trans , path ) ;
# ifdef CONFIG_BCACHEFS_DEBUG
trace_path_set_pos ( trans - > ip , _RET_IP_ , path - > btree_id ,
& old_pos , & new_pos , l ) ;
# endif
return path ;
}
static inline struct btree_path * __must_check
btree_path_set_pos ( struct btree_trans * trans ,
struct btree_path * path , struct bpos new_pos ,
bool intent )
{
int cmp = bpos_cmp ( new_pos , path - > pos ) ;
return cmp
? __bch2_btree_path_set_pos ( trans , path , new_pos , intent , cmp )
: path ;
}
/* Btree path: main interface: */
static struct btree_path * have_path_at_pos ( struct btree_trans * trans , struct btree_path * path )
{
struct btree_path * next ;
next = prev_btree_path ( trans , path ) ;
if ( next & & ! btree_path_cmp ( next , path ) )
return next ;
next = next_btree_path ( trans , path ) ;
if ( next & & ! btree_path_cmp ( next , path ) )
return next ;
return NULL ;
}
static bool have_node_at_pos ( struct btree_trans * trans , struct btree_path * path )
{
struct btree_path * next ;
next = prev_btree_path ( trans , path ) ;
if ( next & & path_l ( next ) - > b = = path_l ( path ) - > b )
return true ;
next = next_btree_path ( trans , path ) ;
if ( next & & path_l ( next ) - > b = = path_l ( path ) - > b )
return true ;
return false ;
}
static inline void __bch2_path_free ( struct btree_trans * trans , struct btree_path * path )
2021-03-23 21:22:50 -04:00
{
2021-08-30 15:18:31 -04:00
__bch2_btree_path_unlock ( path ) ;
btree_path_list_remove ( trans , path ) ;
trans - > paths_allocated & = ~ ( 1ULL < < path - > idx ) ;
2021-03-23 21:22:50 -04:00
}
2021-08-30 15:18:31 -04:00
void bch2_path_put ( struct btree_trans * trans , struct btree_path * path , bool intent )
{
struct btree_path * dup ;
EBUG_ON ( trans - > paths + path - > idx ! = path ) ;
EBUG_ON ( ! path - > ref ) ;
if ( ! __btree_path_put ( path , intent ) )
return ;
/*
* Perhaps instead we should check for duplicate paths in traverse_all :
*/
if ( path - > preserve & &
( dup = have_path_at_pos ( trans , path ) ) ) {
dup - > preserve = true ;
path - > preserve = false ;
}
if ( ! path - > preserve & &
have_node_at_pos ( trans , path ) )
__bch2_path_free ( trans , path ) ;
}
noinline __cold
void bch2_dump_trans_paths_updates ( struct btree_trans * trans )
{
struct btree_path * path ;
struct btree_insert_entry * i ;
unsigned idx ;
char buf [ 300 ] ;
btree_trans_sort_paths ( trans ) ;
trans_for_each_path_inorder ( trans , path , idx )
printk ( KERN_ERR " path: idx %u ref %u:%u%s btree %s pos %s %pS \n " ,
path - > idx , path - > ref , path - > intent_ref ,
path - > preserve ? " preserve " : " " ,
bch2_btree_ids [ path - > btree_id ] ,
( bch2_bpos_to_text ( & PBUF ( buf ) , path - > pos ) , buf ) ,
# ifdef CONFIG_BCACHEFS_DEBUG
( void * ) path - > ip_allocated
# else
NULL
# endif
) ;
trans_for_each_update ( trans , i )
printk ( KERN_ERR " update: btree %s %s %pS \n " ,
bch2_btree_ids [ i - > btree_id ] ,
( bch2_bkey_val_to_text ( & PBUF ( buf ) , trans - > c , bkey_i_to_s_c ( i - > k ) ) , buf ) ,
( void * ) i - > ip_allocated ) ;
}
static struct btree_path * btree_path_alloc ( struct btree_trans * trans ,
struct btree_path * pos )
{
struct btree_path * path ;
unsigned idx ;
if ( unlikely ( trans - > paths_allocated = =
~ ( ( ~ 0ULL < < 1 ) < < ( BTREE_ITER_MAX - 1 ) ) ) ) {
bch2_dump_trans_paths_updates ( trans ) ;
panic ( " trans path oveflow \n " ) ;
}
idx = __ffs64 ( ~ trans - > paths_allocated ) ;
trans - > paths_allocated | = 1ULL < < idx ;
path = & trans - > paths [ idx ] ;
path - > idx = idx ;
path - > ref = 0 ;
path - > intent_ref = 0 ;
path - > nodes_locked = 0 ;
path - > nodes_intent_locked = 0 ;
btree_path_list_add ( trans , pos , path ) ;
return path ;
}
struct btree_path * bch2_path_get ( struct btree_trans * trans , bool cached ,
enum btree_id btree_id , struct bpos pos ,
unsigned locks_want , unsigned level ,
bool intent )
{
2021-08-30 15:18:31 -04:00
struct btree_path * path , * path_pos = NULL ;
2021-08-30 15:18:31 -04:00
struct bpos pos_min = POS_MIN ;
int i ;
BUG_ON ( trans - > restarted ) ;
2021-08-30 15:18:31 -04:00
btree_trans_sort_paths ( trans ) ;
2021-08-30 15:18:31 -04:00
2021-08-30 15:18:31 -04:00
trans_for_each_path_inorder ( trans , path , i ) {
if ( __btree_path_cmp ( path ,
btree_id ,
cached ,
pos ,
level ) > 0 )
break ;
2021-08-30 15:18:31 -04:00
2021-08-30 15:18:31 -04:00
path_pos = path ;
2021-08-30 15:18:31 -04:00
}
2021-08-30 15:18:31 -04:00
if ( path_pos & &
path_pos - > cached = = cached & &
path_pos - > btree_id = = btree_id & &
path_pos - > level = = level ) {
__btree_path_get ( path_pos , intent ) ;
path = btree_path_set_pos ( trans , path_pos , pos , intent ) ;
2021-08-30 15:18:31 -04:00
path - > preserve = true ;
} else {
2021-08-30 15:18:31 -04:00
path = btree_path_alloc ( trans , path_pos ) ;
path_pos = NULL ;
2021-08-30 15:18:31 -04:00
__btree_path_get ( path , intent ) ;
path - > pos = pos ;
path - > btree_id = btree_id ;
path - > cached = cached ;
path - > preserve = true ;
path - > uptodate = BTREE_ITER_NEED_TRAVERSE ;
path - > should_be_locked = false ;
path - > level = level ;
path - > locks_want = locks_want ;
path - > nodes_locked = 0 ;
path - > nodes_intent_locked = 0 ;
for ( i = 0 ; i < ARRAY_SIZE ( path - > l ) ; i + + )
path - > l [ i ] . b = BTREE_ITER_NO_NODE_INIT ;
# ifdef CONFIG_BCACHEFS_DEBUG
path - > ip_allocated = _RET_IP_ ;
# endif
trans - > paths_sorted = false ;
}
if ( path - > intent_ref )
locks_want = max ( locks_want , level + 1 ) ;
/*
* If the path has locks_want greater than requested , we don ' t downgrade
* it here - on transaction restart because btree node split needs to
* upgrade locks , we might be putting / getting the iterator again .
* Downgrading iterators only happens via bch2_trans_downgrade ( ) , after
* a successful transaction commit .
*/
locks_want = min ( locks_want , BTREE_MAX_DEPTH ) ;
if ( locks_want > path - > locks_want ) {
path - > locks_want = locks_want ;
btree_path_get_locks ( trans , path , true , _THIS_IP_ ) ;
}
trace_trans_get_path ( _RET_IP_ , trans - > ip , btree_id ,
& pos , locks_want , path - > uptodate ,
2021-08-30 15:18:31 -04:00
path_pos ? & path_pos - > pos : & pos_min ,
path_pos ? path_pos - > locks_want : U8_MAX ,
path_pos ? path_pos - > uptodate : U8_MAX ) ;
2021-08-30 15:18:31 -04:00
return path ;
}
inline struct bkey_s_c bch2_btree_path_peek_slot ( struct btree_path * path , struct bkey * u )
{
struct bkey_s_c k ;
BUG_ON ( path - > uptodate ! = BTREE_ITER_UPTODATE ) ;
if ( ! path - > cached ) {
struct btree_path_level * l = path_l ( path ) ;
struct bkey_packed * _k =
bch2_btree_node_iter_peek_all ( & l - > iter , l - > b ) ;
k = _k ? bkey_disassemble ( l - > b , _k , u ) : bkey_s_c_null ;
EBUG_ON ( k . k & & bkey_deleted ( k . k ) & & bpos_cmp ( k . k - > p , path - > pos ) = = 0 ) ;
if ( ! k . k | | bpos_cmp ( path - > pos , k . k - > p ) )
goto hole ;
} else {
struct bkey_cached * ck = ( void * ) path - > l [ 0 ] . b ;
EBUG_ON ( path - > btree_id ! = ck - > key . btree_id | |
bkey_cmp ( path - > pos , ck - > key . pos ) ) ;
/* BTREE_ITER_CACHED_NOFILL? */
if ( unlikely ( ! ck - > valid ) )
goto hole ;
k = bkey_i_to_s_c ( ck - > k ) ;
}
return k ;
hole :
bkey_init ( u ) ;
u - > p = path - > pos ;
return ( struct bkey_s_c ) { u , NULL } ;
}
/* Btree iterators: */
2021-03-23 21:22:50 -04:00
int __must_check
bch2_btree_iter_traverse ( struct btree_iter * iter )
{
2021-06-04 17:17:45 -04:00
int ret ;
2021-08-30 15:18:31 -04:00
iter - > path = btree_path_set_pos ( iter - > trans , iter - > path ,
btree_iter_search_key ( iter ) ,
iter - > flags & BTREE_ITER_INTENT ) ;
2021-03-23 21:22:50 -04:00
2021-08-30 15:18:31 -04:00
ret = bch2_btree_path_traverse ( iter - > trans , iter - > path , iter - > flags ) ;
2021-06-04 17:17:45 -04:00
if ( ret )
return ret ;
2021-08-30 15:18:31 -04:00
iter - > path - > should_be_locked = true ;
2021-06-04 17:17:45 -04:00
return 0 ;
2021-03-23 21:22:50 -04:00
}
2017-03-16 22:18:50 -08:00
/* Iterate across nodes (leaf and interior nodes) */
struct btree * bch2_btree_iter_peek_node ( struct btree_iter * iter )
{
2021-08-30 15:54:41 -04:00
struct btree * b = NULL ;
2017-03-16 22:18:50 -08:00
int ret ;
2021-08-30 15:18:31 -04:00
EBUG_ON ( iter - > path - > cached ) ;
2021-02-11 21:57:32 -05:00
bch2_btree_iter_verify ( iter ) ;
2017-03-16 22:18:50 -08:00
2021-08-30 15:18:31 -04:00
ret = bch2_btree_path_traverse ( iter - > trans , iter - > path , iter - > flags ) ;
2017-03-16 22:18:50 -08:00
if ( ret )
2021-08-30 15:54:41 -04:00
goto out ;
2017-03-16 22:18:50 -08:00
2021-08-30 15:18:31 -04:00
b = btree_path_node ( iter - > path , iter - > path - > level ) ;
2017-03-16 22:18:50 -08:00
if ( ! b )
2021-08-30 15:54:41 -04:00
goto out ;
2017-03-16 22:18:50 -08:00
2021-03-04 16:20:16 -05:00
BUG_ON ( bpos_cmp ( b - > key . k . p , iter - > pos ) < 0 ) ;
2017-03-16 22:18:50 -08:00
2021-08-30 15:54:41 -04:00
bkey_init ( & iter - > k ) ;
2021-08-30 15:18:31 -04:00
iter - > k . p = iter - > pos = b - > key . k . p ;
iter - > path - > should_be_locked = true ;
2021-08-30 15:54:41 -04:00
out :
bch2_btree_iter_verify_entry_exit ( iter ) ;
bch2_btree_iter_verify ( iter ) ;
2020-02-18 16:17:55 -05:00
2017-03-16 22:18:50 -08:00
return b ;
}
2020-02-18 16:17:55 -05:00
struct btree * bch2_btree_iter_next_node ( struct btree_iter * iter )
2017-03-16 22:18:50 -08:00
{
2021-08-30 15:18:31 -04:00
struct btree_trans * trans = iter - > trans ;
struct btree_path * path = iter - > path ;
2021-08-30 15:54:41 -04:00
struct btree * b = NULL ;
2017-03-16 22:18:50 -08:00
int ret ;
2021-08-30 15:18:31 -04:00
EBUG_ON ( iter - > path - > cached ) ;
2021-02-11 21:57:32 -05:00
bch2_btree_iter_verify ( iter ) ;
2017-03-16 22:18:50 -08:00
/* already got to end? */
2021-08-30 15:18:31 -04:00
if ( ! btree_path_node ( path , path - > level ) )
2021-08-30 15:54:41 -04:00
goto out ;
2017-03-16 22:18:50 -08:00
2021-08-30 15:18:31 -04:00
bch2_trans_cond_resched ( trans ) ;
2019-04-04 21:53:12 -04:00
2021-08-30 15:18:31 -04:00
btree_node_unlock ( path , path - > level ) ;
path - > l [ path - > level ] . b = BTREE_ITER_NO_NODE_UP ;
path - > level + + ;
2017-03-16 22:18:50 -08:00
2021-08-30 15:18:31 -04:00
btree_path_set_dirty ( path , BTREE_ITER_NEED_TRAVERSE ) ;
ret = bch2_btree_path_traverse ( trans , path , iter - > flags ) ;
2017-03-16 22:18:50 -08:00
if ( ret )
2021-08-30 15:54:41 -04:00
goto out ;
2017-03-16 22:18:50 -08:00
/* got to end? */
2021-08-30 15:18:31 -04:00
b = btree_path_node ( path , path - > level ) ;
2017-03-16 22:18:50 -08:00
if ( ! b )
2021-08-30 15:54:41 -04:00
goto out ;
2017-03-16 22:18:50 -08:00
2021-03-04 16:20:16 -05:00
if ( bpos_cmp ( iter - > pos , b - > key . k . p ) < 0 ) {
2017-03-16 22:18:50 -08:00
/*
* Haven ' t gotten to the end of the parent node : go back down to
* the next child node
*/
2021-08-30 15:18:31 -04:00
path = iter - > path =
btree_path_set_pos ( trans , path , bpos_successor ( iter - > pos ) ,
iter - > flags & BTREE_ITER_INTENT ) ;
2017-03-16 22:18:50 -08:00
2021-03-21 18:09:02 -04:00
/* Unlock to avoid screwing up our lock invariants: */
2021-08-30 15:18:31 -04:00
btree_node_unlock ( path , path - > level ) ;
2017-03-16 22:18:50 -08:00
2021-08-30 15:18:31 -04:00
path - > level = iter - > min_depth ;
btree_path_set_dirty ( path , BTREE_ITER_NEED_TRAVERSE ) ;
2021-03-21 18:09:02 -04:00
bch2_btree_iter_verify ( iter ) ;
2021-08-30 15:18:31 -04:00
ret = bch2_btree_path_traverse ( trans , path , iter - > flags ) ;
2021-08-30 15:54:41 -04:00
if ( ret ) {
b = NULL ;
goto out ;
}
2017-03-16 22:18:50 -08:00
2021-08-30 15:18:31 -04:00
b = path - > l [ path - > level ] . b ;
2017-03-16 22:18:50 -08:00
}
2021-08-30 15:54:41 -04:00
bkey_init ( & iter - > k ) ;
2021-08-30 15:18:31 -04:00
iter - > k . p = iter - > pos = b - > key . k . p ;
iter - > path - > should_be_locked = true ;
2021-08-30 15:54:41 -04:00
out :
bch2_btree_iter_verify_entry_exit ( iter ) ;
bch2_btree_iter_verify ( iter ) ;
2020-02-18 16:17:55 -05:00
2017-03-16 22:18:50 -08:00
return b ;
}
/* Iterate across keys (in leaf nodes only) */
2021-03-21 16:55:25 -04:00
inline bool bch2_btree_iter_advance ( struct btree_iter * iter )
2021-02-07 21:28:58 -05:00
{
2021-02-07 21:11:49 -05:00
struct bpos pos = iter - > k . p ;
2021-07-05 22:02:07 -04:00
bool ret = bpos_cmp ( pos , SPOS_MAX ) ! = 0 ;
2021-02-07 21:11:49 -05:00
2021-02-11 21:57:32 -05:00
if ( ret & & ! ( iter - > flags & BTREE_ITER_IS_EXTENTS ) )
bcachefs: Start using bpos.snapshot field
This patch starts treating the bpos.snapshot field like part of the key
in the btree code:
* bpos_successor() and bpos_predecessor() now include the snapshot field
* Keys in btrees that will be using snapshots (extents, inodes, dirents
and xattrs) now always have their snapshot field set to U32_MAX
The btree iterator code gets a new flag, BTREE_ITER_ALL_SNAPSHOTS, that
determines whether we're iterating over keys in all snapshots or not -
internally, this controlls whether bkey_(successor|predecessor)
increment/decrement the snapshot field, or only the higher bits of the
key.
We add a new member to struct btree_iter, iter->snapshot: when
BTREE_ITER_ALL_SNAPSHOTS is not set, iter->pos.snapshot should always
equal iter->snapshot, which will be 0 for btrees that don't use
snapshots, and alsways U32_MAX for btrees that will use snapshots
(until we enable snapshot creation).
This patch also introduces a new metadata version number, and compat
code for reading from/writing to older versions - this isn't a forced
upgrade (yet).
Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2021-03-24 18:02:16 -04:00
pos = bkey_successor ( iter , pos ) ;
2021-02-07 21:11:49 -05:00
bch2_btree_iter_set_pos ( iter , pos ) ;
2021-02-11 21:57:32 -05:00
return ret ;
2021-02-07 21:11:49 -05:00
}
2021-03-21 16:55:25 -04:00
inline bool bch2_btree_iter_rewind ( struct btree_iter * iter )
2021-02-07 21:11:49 -05:00
{
struct bpos pos = bkey_start_pos ( & iter - > k ) ;
2021-07-14 23:35:11 -04:00
bool ret = ( iter - > flags & BTREE_ITER_ALL_SNAPSHOTS
? bpos_cmp ( pos , POS_MIN )
: bkey_cmp ( pos , POS_MIN ) ) ! = 0 ;
2021-02-07 21:11:49 -05:00
2021-02-11 21:57:32 -05:00
if ( ret & & ! ( iter - > flags & BTREE_ITER_IS_EXTENTS ) )
bcachefs: Start using bpos.snapshot field
This patch starts treating the bpos.snapshot field like part of the key
in the btree code:
* bpos_successor() and bpos_predecessor() now include the snapshot field
* Keys in btrees that will be using snapshots (extents, inodes, dirents
and xattrs) now always have their snapshot field set to U32_MAX
The btree iterator code gets a new flag, BTREE_ITER_ALL_SNAPSHOTS, that
determines whether we're iterating over keys in all snapshots or not -
internally, this controlls whether bkey_(successor|predecessor)
increment/decrement the snapshot field, or only the higher bits of the
key.
We add a new member to struct btree_iter, iter->snapshot: when
BTREE_ITER_ALL_SNAPSHOTS is not set, iter->pos.snapshot should always
equal iter->snapshot, which will be 0 for btrees that don't use
snapshots, and alsways U32_MAX for btrees that will use snapshots
(until we enable snapshot creation).
This patch also introduces a new metadata version number, and compat
code for reading from/writing to older versions - this isn't a forced
upgrade (yet).
Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2021-03-24 18:02:16 -04:00
pos = bkey_predecessor ( iter , pos ) ;
2021-02-07 21:11:49 -05:00
bch2_btree_iter_set_pos ( iter , pos ) ;
2021-02-11 21:57:32 -05:00
return ret ;
2021-02-07 21:28:58 -05:00
}
2021-08-30 15:18:31 -04:00
struct bkey_i * __bch2_btree_trans_peek_updates ( struct btree_iter * iter )
2021-03-21 19:43:31 -04:00
{
struct btree_insert_entry * i ;
2021-06-04 00:29:49 -04:00
struct bkey_i * ret = NULL ;
2021-03-21 19:43:31 -04:00
2021-06-02 00:18:34 -04:00
trans_for_each_update ( iter - > trans , i ) {
2021-06-04 00:29:49 -04:00
if ( i - > btree_id < iter - > btree_id )
continue ;
if ( i - > btree_id > iter - > btree_id )
2021-03-21 19:43:31 -04:00
break ;
2021-08-30 15:18:31 -04:00
if ( bpos_cmp ( i - > k - > k . p , iter - > path - > pos ) < 0 )
2021-06-04 00:29:49 -04:00
continue ;
if ( ! ret | | bpos_cmp ( i - > k - > k . p , ret - > k . p ) < 0 )
ret = i - > k ;
}
2021-03-21 19:43:31 -04:00
2021-06-04 00:29:49 -04:00
return ret ;
}
/**
* bch2_btree_iter_peek : returns first key greater than or equal to iterator ' s
* current position
*/
struct bkey_s_c bch2_btree_iter_peek ( struct btree_iter * iter )
2017-03-16 22:18:50 -08:00
{
2021-08-30 15:18:31 -04:00
struct btree_trans * trans = iter - > trans ;
2021-03-21 19:43:31 -04:00
struct bpos search_key = btree_iter_search_key ( iter ) ;
2021-04-29 21:44:05 -04:00
struct bkey_i * next_update ;
2017-03-16 22:18:50 -08:00
struct bkey_s_c k ;
2021-08-30 15:18:31 -04:00
int ret , cmp ;
2017-03-16 22:18:50 -08:00
2021-08-30 15:18:31 -04:00
EBUG_ON ( iter - > path - > cached | | iter - > path - > level ) ;
2021-02-11 21:57:32 -05:00
bch2_btree_iter_verify ( iter ) ;
bch2_btree_iter_verify_entry_exit ( iter ) ;
2017-03-16 22:18:50 -08:00
while ( 1 ) {
2021-08-30 15:18:31 -04:00
iter - > path = btree_path_set_pos ( trans , iter - > path , search_key ,
iter - > flags & BTREE_ITER_INTENT ) ;
2021-08-07 18:19:33 -04:00
2021-08-30 15:18:31 -04:00
ret = bch2_btree_path_traverse ( trans , iter - > path , iter - > flags ) ;
2021-08-24 16:54:36 -04:00
if ( unlikely ( ret ) ) {
/* ensure that iter->k is consistent with iter->pos: */
bch2_btree_iter_set_pos ( iter , iter - > pos ) ;
k = bkey_s_c_err ( ret ) ;
goto out ;
}
2017-03-16 22:18:50 -08:00
2021-08-07 18:21:35 -04:00
next_update = btree_trans_peek_updates ( iter ) ;
2021-08-30 15:18:31 -04:00
k = btree_path_level_peek_all ( trans - > c , & iter - > path - > l [ 0 ] , & iter - > k ) ;
2021-08-24 16:54:36 -04:00
/* * In the btree, deleted keys sort before non deleted: */
if ( k . k & & bkey_deleted ( k . k ) & &
( ! next_update | |
bpos_cmp ( k . k - > p , next_update - > k . p ) < = 0 ) ) {
search_key = k . k - > p ;
continue ;
}
2021-03-21 19:43:31 -04:00
if ( next_update & &
2021-08-24 16:54:36 -04:00
bpos_cmp ( next_update - > k . p ,
2021-08-30 15:18:31 -04:00
k . k ? k . k - > p : iter - > path - > l [ 0 ] . b - > key . k . p ) < = 0 ) {
2021-06-04 00:29:49 -04:00
iter - > k = next_update - > k ;
2021-03-21 19:43:31 -04:00
k = bkey_i_to_s_c ( next_update ) ;
2021-06-04 00:29:49 -04:00
}
2021-03-21 19:43:31 -04:00
if ( likely ( k . k ) ) {
2021-08-24 16:54:36 -04:00
if ( likely ( ! bkey_deleted ( k . k ) ) )
break ;
/* Advance to next key: */
search_key = bkey_successor ( iter , k . k - > p ) ;
2021-08-30 15:18:31 -04:00
} else if ( likely ( bpos_cmp ( iter - > path - > l [ 0 ] . b - > key . k . p , SPOS_MAX ) ) ) {
2021-08-24 16:54:36 -04:00
/* Advance to next leaf node: */
2021-08-30 15:18:31 -04:00
search_key = bpos_successor ( iter - > path - > l [ 0 ] . b - > key . k . p ) ;
2021-08-24 16:54:36 -04:00
} else {
/* End of btree: */
2021-08-07 18:19:33 -04:00
bch2_btree_iter_set_pos ( iter , SPOS_MAX ) ;
k = bkey_s_c_null ;
goto out ;
}
2017-03-16 22:18:50 -08:00
}
/*
2021-03-21 19:43:31 -04:00
* iter - > pos should be mononotically increasing , and always be equal to
* the key we just returned - except extents can straddle iter - > pos :
2017-03-16 22:18:50 -08:00
*/
2021-04-09 16:52:30 -04:00
if ( ! ( iter - > flags & BTREE_ITER_IS_EXTENTS ) )
iter - > pos = k . k - > p ;
else if ( bkey_cmp ( bkey_start_pos ( k . k ) , iter - > pos ) > 0 )
2017-03-16 22:18:50 -08:00
iter - > pos = bkey_start_pos ( k . k ) ;
2021-08-30 15:18:31 -04:00
cmp = bpos_cmp ( k . k - > p , iter - > path - > pos ) ;
if ( cmp ) {
iter - > path - > pos = k . k - > p ;
trans - > paths_sorted = false ;
}
2021-08-07 18:19:33 -04:00
out :
2021-08-30 15:18:31 -04:00
iter - > path - > should_be_locked = true ;
2021-02-11 21:57:32 -05:00
bch2_btree_iter_verify_entry_exit ( iter ) ;
bch2_btree_iter_verify ( iter ) ;
2017-03-16 22:18:50 -08:00
return k ;
}
2019-09-07 19:19:57 -04:00
/**
* bch2_btree_iter_next : returns first key greater than iterator ' s current
* position
*/
2017-03-16 22:18:50 -08:00
struct bkey_s_c bch2_btree_iter_next ( struct btree_iter * iter )
{
2021-03-21 16:55:25 -04:00
if ( ! bch2_btree_iter_advance ( iter ) )
2020-02-18 16:17:55 -05:00
return bkey_s_c_null ;
2019-09-07 19:19:57 -04:00
2020-02-18 16:17:55 -05:00
return bch2_btree_iter_peek ( iter ) ;
2017-03-16 22:18:50 -08:00
}
2019-09-07 17:17:21 -04:00
/**
* bch2_btree_iter_peek_prev : returns first key less than or equal to
* iterator ' s current position
*/
struct bkey_s_c bch2_btree_iter_peek_prev ( struct btree_iter * iter )
2017-03-16 22:18:50 -08:00
{
2021-08-30 15:18:31 -04:00
struct btree_trans * trans = iter - > trans ;
2021-08-07 18:19:33 -04:00
struct bpos search_key = iter - > pos ;
2017-03-16 22:18:50 -08:00
struct bkey_s_c k ;
int ret ;
2021-08-30 15:18:31 -04:00
EBUG_ON ( iter - > path - > cached | | iter - > path - > level ) ;
2021-06-04 00:29:49 -04:00
EBUG_ON ( iter - > flags & BTREE_ITER_WITH_UPDATES ) ;
2021-02-11 21:57:32 -05:00
bch2_btree_iter_verify ( iter ) ;
bch2_btree_iter_verify_entry_exit ( iter ) ;
2017-03-16 22:18:50 -08:00
while ( 1 ) {
2021-08-30 15:18:31 -04:00
iter - > path = btree_path_set_pos ( trans , iter - > path , search_key ,
iter - > flags & BTREE_ITER_INTENT ) ;
2021-08-07 18:19:33 -04:00
2021-08-30 15:18:31 -04:00
ret = bch2_btree_path_traverse ( trans , iter - > path , iter - > flags ) ;
2021-02-11 21:57:32 -05:00
if ( unlikely ( ret ) ) {
2021-08-24 16:54:36 -04:00
/* ensure that iter->k is consistent with iter->pos: */
bch2_btree_iter_set_pos ( iter , iter - > pos ) ;
2021-02-11 21:57:32 -05:00
k = bkey_s_c_err ( ret ) ;
2021-08-24 16:54:36 -04:00
goto out ;
2021-02-11 21:57:32 -05:00
}
2017-03-16 22:18:50 -08:00
2021-08-30 15:18:31 -04:00
k = btree_path_level_peek ( trans , iter - > path ,
& iter - > path - > l [ 0 ] , & iter - > k ) ;
2021-02-07 21:11:49 -05:00
if ( ! k . k | |
( ( iter - > flags & BTREE_ITER_IS_EXTENTS )
2021-03-04 22:40:41 -05:00
? bkey_cmp ( bkey_start_pos ( k . k ) , iter - > pos ) > = 0
2021-07-03 23:57:09 -04:00
: bkey_cmp ( k . k - > p , iter - > pos ) > 0 ) )
2021-08-30 15:18:31 -04:00
k = btree_path_level_prev ( trans , iter - > path ,
& iter - > path - > l [ 0 ] , & iter - > k ) ;
2019-09-07 17:17:21 -04:00
2021-08-24 16:54:36 -04:00
if ( likely ( k . k ) ) {
2017-03-16 22:18:50 -08:00
break ;
2021-08-30 15:18:31 -04:00
} else if ( likely ( bpos_cmp ( iter - > path - > l [ 0 ] . b - > data - > min_key , POS_MIN ) ) ) {
2021-08-24 16:54:36 -04:00
/* Advance to previous leaf node: */
2021-08-30 15:18:31 -04:00
search_key = bpos_predecessor ( iter - > path - > l [ 0 ] . b - > data - > min_key ) ;
2021-08-24 16:54:36 -04:00
} else {
/* Start of btree: */
2021-08-07 18:19:33 -04:00
bch2_btree_iter_set_pos ( iter , POS_MIN ) ;
2021-02-11 21:57:32 -05:00
k = bkey_s_c_null ;
2021-08-24 16:54:36 -04:00
goto out ;
2021-02-11 21:57:32 -05:00
}
2019-09-07 17:17:21 -04:00
}
2017-03-16 22:18:50 -08:00
2021-03-04 22:40:41 -05:00
EBUG_ON ( bkey_cmp ( bkey_start_pos ( k . k ) , iter - > pos ) > 0 ) ;
2021-02-07 21:11:49 -05:00
/* Extents can straddle iter->pos: */
2021-03-04 22:40:41 -05:00
if ( bkey_cmp ( k . k - > p , iter - > pos ) < 0 )
2021-02-07 21:11:49 -05:00
iter - > pos = k . k - > p ;
2021-02-11 21:57:32 -05:00
out :
2021-08-30 15:18:31 -04:00
iter - > path - > should_be_locked = true ;
2021-02-11 21:57:32 -05:00
bch2_btree_iter_verify_entry_exit ( iter ) ;
bch2_btree_iter_verify ( iter ) ;
2021-08-30 15:18:31 -04:00
2017-03-16 22:18:50 -08:00
return k ;
}
2019-09-07 17:17:21 -04:00
/**
* bch2_btree_iter_prev : returns first key less than iterator ' s current
* position
*/
struct bkey_s_c bch2_btree_iter_prev ( struct btree_iter * iter )
{
2021-03-21 16:55:25 -04:00
if ( ! bch2_btree_iter_rewind ( iter ) )
2020-02-18 16:17:55 -05:00
return bkey_s_c_null ;
2019-09-07 17:17:21 -04:00
2020-02-18 16:17:55 -05:00
return bch2_btree_iter_peek_prev ( iter ) ;
2019-09-07 17:17:21 -04:00
}
2020-03-13 21:41:22 -04:00
struct bkey_s_c bch2_btree_iter_peek_slot ( struct btree_iter * iter )
2016-07-21 19:05:06 -08:00
{
2021-08-24 21:30:06 -04:00
struct btree_trans * trans = iter - > trans ;
2021-06-10 20:15:50 -04:00
struct bpos search_key ;
2016-07-21 19:05:06 -08:00
struct bkey_s_c k ;
2020-03-13 21:41:22 -04:00
int ret ;
2021-08-30 15:18:31 -04:00
EBUG_ON ( iter - > path - > level ) ;
2021-02-11 21:57:32 -05:00
bch2_btree_iter_verify ( iter ) ;
bch2_btree_iter_verify_entry_exit ( iter ) ;
2022-12-30 19:15:53 -05:00
/* extents can't span inode numbers: */
if ( ( iter - > flags & BTREE_ITER_IS_EXTENTS ) & &
2021-06-10 20:15:50 -04:00
unlikely ( iter - > pos . offset = = KEY_OFFSET_MAX ) ) {
2022-12-30 19:15:53 -05:00
if ( iter - > pos . inode = = KEY_INODE_MAX )
return bkey_s_c_null ;
2020-03-13 21:41:22 -04:00
2022-12-30 19:15:53 -05:00
bch2_btree_iter_set_pos ( iter , bpos_nosnap_successor ( iter - > pos ) ) ;
}
2021-02-10 16:13:57 -05:00
2021-06-10 20:15:50 -04:00
search_key = btree_iter_search_key ( iter ) ;
2021-08-30 15:18:31 -04:00
iter - > path = btree_path_set_pos ( trans , iter - > path , search_key ,
iter - > flags & BTREE_ITER_INTENT ) ;
2021-06-10 20:15:50 -04:00
2021-08-30 15:18:31 -04:00
ret = bch2_btree_path_traverse ( trans , iter - > path , iter - > flags ) ;
2020-03-13 21:41:22 -04:00
if ( unlikely ( ret ) )
return bkey_s_c_err ( ret ) ;
2017-03-16 22:18:50 -08:00
2021-08-30 15:54:41 -04:00
if ( ! ( iter - > flags & BTREE_ITER_IS_EXTENTS ) ) {
2021-06-10 20:15:50 -04:00
struct bkey_i * next_update ;
2017-03-16 22:18:50 -08:00
2021-08-07 18:21:35 -04:00
next_update = btree_trans_peek_updates ( iter ) ;
2022-12-30 19:15:53 -05:00
if ( next_update & &
2021-08-30 15:18:31 -04:00
! bpos_cmp ( next_update - > k . p , iter - > pos ) ) {
2022-12-30 19:15:53 -05:00
iter - > k = next_update - > k ;
k = bkey_i_to_s_c ( next_update ) ;
2021-08-30 15:18:31 -04:00
} else {
k = bch2_btree_path_peek_slot ( iter - > path , & iter - > k ) ;
2022-12-30 19:15:53 -05:00
}
if ( ! k . k | |
( ( iter - > flags & BTREE_ITER_ALL_SNAPSHOTS )
? bpos_cmp ( iter - > pos , k . k - > p )
: bkey_cmp ( iter - > pos , k . k - > p ) ) ) {
bkey_init ( & iter - > k ) ;
iter - > k . p = iter - > pos ;
k = ( struct bkey_s_c ) { & iter - > k , NULL } ;
}
} else {
struct bpos next ;
2022-12-30 19:15:53 -05:00
if ( iter - > flags & BTREE_ITER_INTENT ) {
2021-08-30 15:18:31 -04:00
struct btree_iter iter2 ;
2022-12-30 19:15:53 -05:00
2021-08-30 15:18:31 -04:00
bch2_trans_copy_iter ( & iter2 , iter ) ;
k = bch2_btree_iter_peek ( & iter2 ) ;
2022-12-30 19:15:53 -05:00
2021-08-30 15:18:31 -04:00
if ( k . k & & ! bkey_err ( k ) ) {
iter - > k = iter2 . k ;
k . k = & iter - > k ;
}
bch2_trans_iter_exit ( trans , & iter2 ) ;
2022-12-30 19:15:53 -05:00
} else {
struct bpos pos = iter - > pos ;
k = bch2_btree_iter_peek ( iter ) ;
iter - > pos = pos ;
}
2022-12-30 19:15:53 -05:00
if ( unlikely ( bkey_err ( k ) ) )
return k ;
next = k . k ? bkey_start_pos ( k . k ) : POS_MAX ;
if ( bkey_cmp ( iter - > pos , next ) < 0 ) {
bkey_init ( & iter - > k ) ;
iter - > k . p = iter - > pos ;
bch2_key_resize ( & iter - > k ,
min_t ( u64 , KEY_SIZE_MAX ,
( next . inode = = iter - > pos . inode
? next . offset
: KEY_OFFSET_MAX ) -
iter - > pos . offset ) ) ;
k = ( struct bkey_s_c ) { & iter - > k , NULL } ;
EBUG_ON ( ! k . k - > size ) ;
}
2016-07-21 19:05:06 -08:00
}
2019-08-17 15:17:09 -04:00
2021-08-30 15:18:31 -04:00
iter - > path - > should_be_locked = true ;
2021-02-11 21:57:32 -05:00
bch2_btree_iter_verify_entry_exit ( iter ) ;
bch2_btree_iter_verify ( iter ) ;
2021-06-04 17:17:45 -04:00
2019-08-17 15:17:09 -04:00
return k ;
2017-03-16 22:18:50 -08:00
}
struct bkey_s_c bch2_btree_iter_next_slot ( struct btree_iter * iter )
{
2021-03-21 16:55:25 -04:00
if ( ! bch2_btree_iter_advance ( iter ) )
2020-02-18 16:17:55 -05:00
return bkey_s_c_null ;
2017-03-16 22:18:50 -08:00
2020-02-18 16:17:55 -05:00
return bch2_btree_iter_peek_slot ( iter ) ;
2017-03-16 22:18:50 -08:00
}
2021-03-02 22:45:28 -05:00
struct bkey_s_c bch2_btree_iter_prev_slot ( struct btree_iter * iter )
{
2021-03-21 16:55:25 -04:00
if ( ! bch2_btree_iter_rewind ( iter ) )
2021-03-02 22:45:28 -05:00
return bkey_s_c_null ;
return bch2_btree_iter_peek_slot ( iter ) ;
}
2017-03-16 22:18:50 -08:00
/* new transactional stuff: */
2021-06-12 15:45:45 -04:00
# ifdef CONFIG_BCACHEFS_DEBUG
static void btree_trans_verify_sorted_refs ( struct btree_trans * trans )
{
2021-08-30 15:18:31 -04:00
struct btree_path * path ;
2021-06-12 15:45:45 -04:00
unsigned i ;
2021-08-30 15:18:31 -04:00
BUG_ON ( trans - > nr_sorted ! = hweight64 ( trans - > paths_allocated ) ) ;
2021-06-12 15:45:45 -04:00
2021-08-30 15:18:31 -04:00
trans_for_each_path ( trans , path ) {
BUG_ON ( path - > sorted_idx > = trans - > nr_sorted ) ;
BUG_ON ( trans - > sorted [ path - > sorted_idx ] ! = path - > idx ) ;
2021-06-12 15:45:45 -04:00
}
for ( i = 0 ; i < trans - > nr_sorted ; i + + ) {
unsigned idx = trans - > sorted [ i ] ;
2021-08-30 15:18:31 -04:00
EBUG_ON ( ! ( trans - > paths_allocated & ( 1ULL < < idx ) ) ) ;
BUG_ON ( trans - > paths [ idx ] . sorted_idx ! = i ) ;
2021-06-12 15:45:45 -04:00
}
}
static void btree_trans_verify_sorted ( struct btree_trans * trans )
{
2021-08-30 15:18:31 -04:00
struct btree_path * path , * prev = NULL ;
2021-06-12 15:45:45 -04:00
unsigned i ;
2021-08-30 15:18:31 -04:00
trans_for_each_path_inorder ( trans , path , i ) {
BUG_ON ( prev & & btree_path_cmp ( prev , path ) > 0 ) ;
prev = path ;
2021-06-12 15:45:45 -04:00
}
}
2021-09-03 17:18:57 -04:00
# else
static inline void btree_trans_verify_sorted_refs ( struct btree_trans * trans ) { }
static inline void btree_trans_verify_sorted ( struct btree_trans * trans ) { }
# endif
2021-06-12 15:45:45 -04:00
2021-09-03 17:18:57 -04:00
void __bch2_btree_trans_sort_paths ( struct btree_trans * trans )
2021-06-12 15:45:45 -04:00
{
int i , l = 0 , r = trans - > nr_sorted , inc = 1 ;
bool swapped ;
2021-09-03 17:18:57 -04:00
btree_trans_verify_sorted_refs ( trans ) ;
if ( trans - > paths_sorted )
goto out ;
2021-06-12 15:45:45 -04:00
/*
* Cocktail shaker sort : this is efficient because iterators will be
* mostly sorteda .
*/
do {
swapped = false ;
for ( i = inc > 0 ? l : r - 2 ;
i + 1 < r & & i > = l ;
i + = inc ) {
2021-08-30 15:18:31 -04:00
if ( btree_path_cmp ( trans - > paths + trans - > sorted [ i ] ,
trans - > paths + trans - > sorted [ i + 1 ] ) > 0 ) {
2021-06-12 15:45:45 -04:00
swap ( trans - > sorted [ i ] , trans - > sorted [ i + 1 ] ) ;
2021-08-30 15:18:31 -04:00
trans - > paths [ trans - > sorted [ i ] ] . sorted_idx = i ;
trans - > paths [ trans - > sorted [ i + 1 ] ] . sorted_idx = i + 1 ;
2021-06-12 15:45:45 -04:00
swapped = true ;
}
}
if ( inc > 0 )
- - r ;
else
l + + ;
inc = - inc ;
} while ( swapped ) ;
2021-08-30 15:18:31 -04:00
trans - > paths_sorted = true ;
2021-09-03 17:18:57 -04:00
out :
2021-06-12 15:45:45 -04:00
btree_trans_verify_sorted ( trans ) ;
}
2021-08-30 15:18:31 -04:00
static inline void btree_path_list_remove ( struct btree_trans * trans ,
struct btree_path * path )
2021-06-12 15:45:45 -04:00
{
unsigned i ;
2021-08-30 15:18:31 -04:00
EBUG_ON ( path - > sorted_idx > = trans - > nr_sorted ) ;
2021-06-12 15:45:45 -04:00
# ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
trans - > nr_sorted - - ;
2021-08-30 15:18:31 -04:00
memmove_u64s_down_small ( trans - > sorted + path - > sorted_idx ,
trans - > sorted + path - > sorted_idx + 1 ,
DIV_ROUND_UP ( trans - > nr_sorted - path - > sorted_idx , 8 ) ) ;
2021-06-12 15:45:45 -04:00
# else
2021-08-30 15:18:31 -04:00
array_remove_item ( trans - > sorted , trans - > nr_sorted , path - > sorted_idx ) ;
2021-06-12 15:45:45 -04:00
# endif
2021-08-30 15:18:31 -04:00
for ( i = path - > sorted_idx ; i < trans - > nr_sorted ; i + + )
trans - > paths [ trans - > sorted [ i ] ] . sorted_idx = i ;
2021-06-12 15:45:45 -04:00
2021-08-30 15:18:31 -04:00
path - > sorted_idx = U8_MAX ;
2021-06-12 15:45:45 -04:00
}
2021-08-30 15:18:31 -04:00
static inline void btree_path_list_add ( struct btree_trans * trans ,
struct btree_path * pos ,
struct btree_path * path )
2021-06-12 15:45:45 -04:00
{
unsigned i ;
2021-09-03 17:18:57 -04:00
path - > sorted_idx = pos ? pos - > sorted_idx + 1 : trans - > nr_sorted ;
2021-06-12 15:45:45 -04:00
# ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
2021-08-30 15:18:31 -04:00
memmove_u64s_up_small ( trans - > sorted + path - > sorted_idx + 1 ,
trans - > sorted + path - > sorted_idx ,
DIV_ROUND_UP ( trans - > nr_sorted - path - > sorted_idx , 8 ) ) ;
2021-06-12 15:45:45 -04:00
trans - > nr_sorted + + ;
2021-08-30 15:18:31 -04:00
trans - > sorted [ path - > sorted_idx ] = path - > idx ;
2021-06-12 15:45:45 -04:00
# else
2021-08-30 15:18:31 -04:00
array_insert_item ( trans - > sorted , trans - > nr_sorted , path - > sorted_idx , path - > idx ) ;
2021-06-12 15:45:45 -04:00
# endif
2021-08-30 15:18:31 -04:00
for ( i = path - > sorted_idx ; i < trans - > nr_sorted ; i + + )
trans - > paths [ trans - > sorted [ i ] ] . sorted_idx = i ;
2019-09-26 22:21:39 -04:00
2021-06-12 15:45:45 -04:00
btree_trans_verify_sorted_refs ( trans ) ;
2019-03-25 22:43:26 -04:00
}
2021-08-30 15:18:31 -04:00
void bch2_trans_iter_exit ( struct btree_trans * trans , struct btree_iter * iter )
2019-09-26 22:21:39 -04:00
{
2021-08-30 15:18:31 -04:00
if ( iter - > path )
bch2_path_put ( trans , iter - > path ,
iter - > flags & BTREE_ITER_INTENT ) ;
iter - > path = NULL ;
2019-09-26 22:21:39 -04:00
}
2021-08-30 15:18:31 -04:00
static void __bch2_trans_iter_init ( struct btree_trans * trans ,
struct btree_iter * iter ,
enum btree_id btree_id , struct bpos pos ,
unsigned locks_want ,
unsigned depth ,
unsigned flags )
2017-03-16 22:18:50 -08:00
{
2021-07-25 17:19:52 -04:00
EBUG_ON ( trans - > restarted ) ;
2021-08-30 15:54:41 -04:00
if ( ! ( flags & ( BTREE_ITER_ALL_SNAPSHOTS | BTREE_ITER_NOT_EXTENTS ) ) & &
btree_node_type_is_extents ( btree_id ) )
2021-04-29 16:56:17 -04:00
flags | = BTREE_ITER_IS_EXTENTS ;
2017-03-16 22:18:50 -08:00
2021-08-30 15:54:41 -04:00
if ( ! btree_type_has_snapshots ( btree_id ) & &
! ( flags & __BTREE_ITER_ALL_SNAPSHOTS ) )
bcachefs: Start using bpos.snapshot field
This patch starts treating the bpos.snapshot field like part of the key
in the btree code:
* bpos_successor() and bpos_predecessor() now include the snapshot field
* Keys in btrees that will be using snapshots (extents, inodes, dirents
and xattrs) now always have their snapshot field set to U32_MAX
The btree iterator code gets a new flag, BTREE_ITER_ALL_SNAPSHOTS, that
determines whether we're iterating over keys in all snapshots or not -
internally, this controlls whether bkey_(successor|predecessor)
increment/decrement the snapshot field, or only the higher bits of the
key.
We add a new member to struct btree_iter, iter->snapshot: when
BTREE_ITER_ALL_SNAPSHOTS is not set, iter->pos.snapshot should always
equal iter->snapshot, which will be 0 for btrees that don't use
snapshots, and alsways U32_MAX for btrees that will use snapshots
(until we enable snapshot creation).
This patch also introduces a new metadata version number, and compat
code for reading from/writing to older versions - this isn't a forced
upgrade (yet).
Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2021-03-24 18:02:16 -04:00
flags & = ~ BTREE_ITER_ALL_SNAPSHOTS ;
if ( ! ( flags & BTREE_ITER_ALL_SNAPSHOTS ) )
pos . snapshot = btree_type_has_snapshots ( btree_id )
? U32_MAX : 0 ;
2021-08-30 15:18:31 -04:00
iter - > trans = trans ;
iter - > path = NULL ;
iter - > btree_id = btree_id ;
iter - > min_depth = depth ;
2021-08-30 15:54:41 -04:00
iter - > flags = flags ;
iter - > snapshot = pos . snapshot ;
2021-08-30 15:18:31 -04:00
iter - > pos = pos ;
iter - > k . type = KEY_TYPE_deleted ;
iter - > k . p = pos ;
iter - > k . size = 0 ;
bcachefs: Start using bpos.snapshot field
This patch starts treating the bpos.snapshot field like part of the key
in the btree code:
* bpos_successor() and bpos_predecessor() now include the snapshot field
* Keys in btrees that will be using snapshots (extents, inodes, dirents
and xattrs) now always have their snapshot field set to U32_MAX
The btree iterator code gets a new flag, BTREE_ITER_ALL_SNAPSHOTS, that
determines whether we're iterating over keys in all snapshots or not -
internally, this controlls whether bkey_(successor|predecessor)
increment/decrement the snapshot field, or only the higher bits of the
key.
We add a new member to struct btree_iter, iter->snapshot: when
BTREE_ITER_ALL_SNAPSHOTS is not set, iter->pos.snapshot should always
equal iter->snapshot, which will be 0 for btrees that don't use
snapshots, and alsways U32_MAX for btrees that will use snapshots
(until we enable snapshot creation).
This patch also introduces a new metadata version number, and compat
code for reading from/writing to older versions - this isn't a forced
upgrade (yet).
Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2021-03-24 18:02:16 -04:00
2021-08-30 15:18:31 -04:00
iter - > path = bch2_path_get ( trans ,
flags & BTREE_ITER_CACHED ,
btree_id ,
btree_iter_search_key ( iter ) ,
locks_want ,
depth ,
flags & BTREE_ITER_INTENT ) ;
2017-03-16 22:18:50 -08:00
}
2021-08-30 15:18:31 -04:00
void bch2_trans_iter_init ( struct btree_trans * trans ,
struct btree_iter * iter ,
unsigned btree_id , struct bpos pos ,
unsigned flags )
2019-03-25 15:10:15 -04:00
{
2021-08-30 15:18:31 -04:00
__bch2_trans_iter_init ( trans , iter , btree_id , pos ,
0 , 0 , flags ) ;
2019-03-25 15:10:15 -04:00
}
2021-08-30 15:18:31 -04:00
void bch2_trans_node_iter_init ( struct btree_trans * trans ,
struct btree_iter * iter ,
enum btree_id btree_id ,
struct bpos pos ,
unsigned locks_want ,
unsigned depth ,
unsigned flags )
2017-03-16 22:18:50 -08:00
{
2021-08-30 15:18:31 -04:00
__bch2_trans_iter_init ( trans , iter , btree_id , pos , locks_want , depth ,
BTREE_ITER_NOT_EXTENTS |
__BTREE_ITER_ALL_SNAPSHOTS |
BTREE_ITER_ALL_SNAPSHOTS |
flags ) ;
BUG_ON ( iter - > path - > locks_want < min ( locks_want , BTREE_MAX_DEPTH ) ) ;
BUG_ON ( iter - > path - > level ! = depth ) ;
BUG_ON ( iter - > min_depth ! = depth ) ;
}
2019-03-25 22:43:26 -04:00
2021-08-30 15:18:31 -04:00
void bch2_trans_copy_iter ( struct btree_iter * dst , struct btree_iter * src )
{
* dst = * src ;
if ( src - > path )
__btree_path_get ( src - > path , src - > flags & BTREE_ITER_INTENT ) ;
2017-03-16 22:18:50 -08:00
}
2021-04-15 12:50:09 -04:00
void * bch2_trans_kmalloc ( struct btree_trans * trans , size_t size )
2017-03-16 22:18:50 -08:00
{
2021-04-15 12:50:09 -04:00
size_t new_top = trans - > mem_top + size ;
void * p ;
if ( new_top > trans - > mem_bytes ) {
2017-03-16 22:18:50 -08:00
size_t old_bytes = trans - > mem_bytes ;
2021-04-15 12:50:09 -04:00
size_t new_bytes = roundup_pow_of_two ( new_top ) ;
2021-04-24 00:09:06 -04:00
void * new_mem ;
WARN_ON_ONCE ( new_bytes > BTREE_TRANS_MEM_MAX ) ;
new_mem = krealloc ( trans - > mem , new_bytes , GFP_NOFS ) ;
if ( ! new_mem & & new_bytes < = BTREE_TRANS_MEM_MAX ) {
new_mem = mempool_alloc ( & trans - > c - > btree_trans_mem_pool , GFP_KERNEL ) ;
new_bytes = BTREE_TRANS_MEM_MAX ;
kfree ( trans - > mem ) ;
}
2017-03-16 22:18:50 -08:00
if ( ! new_mem )
2021-04-15 12:50:09 -04:00
return ERR_PTR ( - ENOMEM ) ;
2017-03-16 22:18:50 -08:00
trans - > mem = new_mem ;
trans - > mem_bytes = new_bytes ;
2018-07-12 23:30:45 -04:00
if ( old_bytes ) {
2021-04-15 12:50:09 -04:00
trace_trans_restart_mem_realloced ( trans - > ip , _RET_IP_ , new_bytes ) ;
2021-07-25 17:19:52 -04:00
btree_trans_restart ( trans ) ;
2021-04-15 12:50:09 -04:00
return ERR_PTR ( - EINTR ) ;
2018-07-12 23:30:45 -04:00
}
2017-03-16 22:18:50 -08:00
}
2019-05-15 10:54:43 -04:00
p = trans - > mem + trans - > mem_top ;
2017-03-16 22:18:50 -08:00
trans - > mem_top + = size ;
2021-06-07 16:50:30 -04:00
memset ( p , 0 , size ) ;
2019-05-15 10:54:43 -04:00
return p ;
2017-03-16 22:18:50 -08:00
}
2021-07-07 22:31:36 -04:00
/**
2021-07-24 23:57:28 -04:00
* bch2_trans_begin ( ) - reset a transaction after a interrupted attempt
2021-07-07 22:31:36 -04:00
* @ trans : transaction to reset
*
* While iterating over nodes or updating nodes a attempt to lock a btree
* node may return EINTR when the trylock fails . When this occurs
2021-07-24 23:57:28 -04:00
* bch2_trans_begin ( ) should be called and the transaction retried .
2021-07-07 22:31:36 -04:00
*/
2021-07-24 23:57:28 -04:00
void bch2_trans_begin ( struct btree_trans * trans )
2017-03-16 22:18:50 -08:00
{
2021-08-30 15:18:31 -04:00
struct btree_insert_entry * i ;
struct btree_path * path ;
2019-03-07 23:13:39 -05:00
2021-08-30 15:18:31 -04:00
trans_for_each_update ( trans , i )
__btree_path_put ( i - > path , true ) ;
2017-03-16 22:18:50 -08:00
2021-06-12 15:45:56 -04:00
trans - > extra_journal_res = 0 ;
2019-03-25 15:34:48 -04:00
trans - > nr_updates = 0 ;
2020-02-26 15:39:46 -05:00
trans - > mem_top = 0 ;
2019-03-28 00:07:24 -04:00
2021-02-03 21:51:56 -05:00
trans - > hooks = NULL ;
2020-05-25 19:29:48 -04:00
trans - > extra_journal_entries = NULL ;
trans - > extra_journal_entry_u64s = 0 ;
2019-12-30 12:43:19 -05:00
if ( trans - > fs_usage_deltas ) {
trans - > fs_usage_deltas - > used = 0 ;
memset ( ( void * ) trans - > fs_usage_deltas +
offsetof ( struct replicas_delta_list , memset_start ) , 0 ,
( void * ) & trans - > fs_usage_deltas - > memset_end -
( void * ) & trans - > fs_usage_deltas - > memset_start ) ;
}
2021-08-30 15:18:31 -04:00
trans_for_each_path ( trans , path ) {
/*
* XXX : we probably shouldn ' t be doing this if the transaction
* was restarted , but currently we still overflow transaction
* iterators if we do that
*/
if ( ! path - > ref & & ! path - > preserve )
__bch2_path_free ( trans , path ) ;
else
path - > preserve = path - > should_be_locked = false ;
}
2021-07-24 23:57:28 -04:00
bch2_trans_cond_resched ( trans ) ;
2021-03-19 20:29:11 -04:00
2021-07-24 23:57:28 -04:00
if ( trans - > restarted )
2021-08-30 15:18:31 -04:00
bch2_btree_path_traverse_all ( trans ) ;
2021-07-25 17:19:52 -04:00
trans - > restarted = false ;
2017-03-16 22:18:50 -08:00
}
2021-08-30 15:18:31 -04:00
static void bch2_trans_alloc_paths ( struct btree_trans * trans , struct bch_fs * c )
2020-11-05 20:02:01 -05:00
{
2021-08-30 15:18:31 -04:00
size_t paths_bytes = sizeof ( struct btree_path ) * BTREE_ITER_MAX ;
2020-12-01 23:11:53 -05:00
size_t updates_bytes = sizeof ( struct btree_insert_entry ) * BTREE_ITER_MAX ;
2020-11-16 18:20:50 -05:00
void * p = NULL ;
2020-11-05 20:02:01 -05:00
BUG_ON ( trans - > used_mempool ) ;
2020-11-16 18:20:50 -05:00
# ifdef __KERNEL__
2021-08-30 15:18:31 -04:00
p = this_cpu_xchg ( c - > btree_paths_bufs - > path , NULL ) ;
2020-11-16 18:20:50 -05:00
# endif
if ( ! p )
2021-08-30 15:18:31 -04:00
p = mempool_alloc ( & trans - > c - > btree_paths_pool , GFP_NOFS ) ;
2020-11-05 20:02:01 -05:00
2021-08-30 15:18:31 -04:00
trans - > paths = p ; p + = paths_bytes ;
2020-11-05 20:02:01 -05:00
trans - > updates = p ; p + = updates_bytes ;
}
2019-05-15 10:54:43 -04:00
void bch2_trans_init ( struct btree_trans * trans , struct bch_fs * c ,
unsigned expected_nr_iters ,
size_t expected_mem_bytes )
2021-05-23 17:04:13 -04:00
__acquires ( & c - > btree_trans_barrier )
2017-03-16 22:18:50 -08:00
{
2020-11-02 18:54:33 -05:00
memset ( trans , 0 , sizeof ( * trans ) ) ;
2017-03-16 22:18:50 -08:00
trans - > c = c ;
2019-04-23 00:10:08 -04:00
trans - > ip = _RET_IP_ ;
2019-05-15 10:54:43 -04:00
2021-08-30 15:18:31 -04:00
bch2_trans_alloc_paths ( trans , c ) ;
2019-05-15 10:54:43 -04:00
2020-11-15 20:52:55 -05:00
if ( expected_mem_bytes ) {
expected_mem_bytes = roundup_pow_of_two ( expected_mem_bytes ) ;
trans - > mem = kmalloc ( expected_mem_bytes , GFP_KERNEL ) ;
2021-04-24 00:09:06 -04:00
if ( ! unlikely ( trans - > mem ) ) {
trans - > mem = mempool_alloc ( & c - > btree_trans_mem_pool , GFP_KERNEL ) ;
trans - > mem_bytes = BTREE_TRANS_MEM_MAX ;
} else {
2020-11-15 20:52:55 -05:00
trans - > mem_bytes = expected_mem_bytes ;
2021-04-24 00:09:06 -04:00
}
2020-11-15 20:52:55 -05:00
}
2020-06-02 16:36:11 -04:00
2020-11-15 16:30:22 -05:00
trans - > srcu_idx = srcu_read_lock ( & c - > btree_trans_barrier ) ;
2020-06-02 16:36:11 -04:00
# ifdef CONFIG_BCACHEFS_DEBUG
2020-06-02 19:41:47 -04:00
trans - > pid = current - > pid ;
2020-06-02 16:36:11 -04:00
mutex_lock ( & c - > btree_trans_lock ) ;
list_add ( & trans - > list , & c - > btree_trans_list ) ;
mutex_unlock ( & c - > btree_trans_lock ) ;
# endif
2017-03-16 22:18:50 -08:00
}
2021-08-30 15:18:31 -04:00
static void check_btree_paths_leaked ( struct btree_trans * trans )
{
# ifdef CONFIG_BCACHEFS_DEBUG
struct bch_fs * c = trans - > c ;
struct btree_path * path ;
trans_for_each_path ( trans , path )
if ( path - > ref )
goto leaked ;
return ;
leaked :
bch_err ( c , " btree paths leaked from %pS! " , ( void * ) trans - > ip ) ;
trans_for_each_path ( trans , path )
if ( path - > ref )
printk ( KERN_ERR " btree %s %pS \n " ,
bch2_btree_ids [ path - > btree_id ] ,
( void * ) path - > ip_allocated ) ;
/* Be noisy about this: */
bch2_fatal_error ( c ) ;
# endif
}
2017-03-16 22:18:50 -08:00
int bch2_trans_exit ( struct btree_trans * trans )
2021-05-23 17:04:13 -04:00
__releases ( & c - > btree_trans_barrier )
2017-03-16 22:18:50 -08:00
{
2021-08-30 15:18:31 -04:00
struct btree_insert_entry * i ;
2020-11-05 20:02:01 -05:00
struct bch_fs * c = trans - > c ;
2019-04-04 21:28:16 -04:00
bch2_trans_unlock ( trans ) ;
2017-03-16 22:18:50 -08:00
2021-08-30 15:18:31 -04:00
trans_for_each_update ( trans , i )
__btree_path_put ( i - > path , true ) ;
trans - > nr_updates = 0 ;
2021-03-20 15:12:05 -04:00
2021-08-30 15:18:31 -04:00
check_btree_paths_leaked ( trans ) ;
2021-03-19 20:29:11 -04:00
2021-08-30 15:18:31 -04:00
# ifdef CONFIG_BCACHEFS_DEBUG
mutex_lock ( & c - > btree_trans_lock ) ;
2020-06-02 16:36:11 -04:00
list_del ( & trans - > list ) ;
2021-08-30 15:18:31 -04:00
mutex_unlock ( & c - > btree_trans_lock ) ;
2020-06-02 16:36:11 -04:00
# endif
2020-11-15 16:30:22 -05:00
srcu_read_unlock ( & c - > btree_trans_barrier , trans - > srcu_idx ) ;
2021-08-30 15:18:31 -04:00
bch2_journal_preres_put ( & c - > journal , & trans - > journal_preres ) ;
2019-03-07 19:46:10 -05:00
2021-04-24 00:24:25 -04:00
if ( trans - > fs_usage_deltas ) {
if ( trans - > fs_usage_deltas - > size + sizeof ( trans - > fs_usage_deltas ) = =
REPLICAS_DELTA_LIST_MAX )
mempool_free ( trans - > fs_usage_deltas ,
2021-08-30 15:18:31 -04:00
& c - > replicas_delta_pool ) ;
2021-04-24 00:24:25 -04:00
else
kfree ( trans - > fs_usage_deltas ) ;
}
2021-04-24 00:09:06 -04:00
if ( trans - > mem_bytes = = BTREE_TRANS_MEM_MAX )
2021-08-30 15:18:31 -04:00
mempool_free ( trans - > mem , & c - > btree_trans_mem_pool ) ;
2021-04-24 00:09:06 -04:00
else
kfree ( trans - > mem ) ;
2020-11-05 20:02:01 -05:00
2020-11-16 18:20:50 -05:00
# ifdef __KERNEL__
/*
* Userspace doesn ' t have a real percpu implementation :
*/
2021-08-30 15:18:31 -04:00
trans - > paths = this_cpu_xchg ( c - > btree_paths_bufs - > path , trans - > paths ) ;
2020-11-16 18:20:50 -05:00
# endif
2021-04-24 00:09:06 -04:00
2021-08-30 15:18:31 -04:00
if ( trans - > paths )
mempool_free ( trans - > paths , & c - > btree_paths_pool ) ;
2020-11-05 20:02:01 -05:00
2017-03-16 22:18:50 -08:00
trans - > mem = ( void * ) 0x1 ;
2021-08-30 15:18:31 -04:00
trans - > paths = ( void * ) 0x1 ;
2019-04-04 21:28:16 -04:00
return trans - > error ? - EIO : 0 ;
2017-03-16 22:18:50 -08:00
}
2019-09-07 14:16:00 -04:00
2020-11-07 12:43:48 -05:00
static void __maybe_unused
2021-08-30 15:18:31 -04:00
bch2_btree_path_node_to_text ( struct printbuf * out ,
2020-11-07 12:43:48 -05:00
struct btree_bkey_cached_common * _b ,
2021-08-30 15:54:41 -04:00
bool cached )
2020-06-15 19:53:46 -04:00
{
2021-03-31 16:43:50 -04:00
pr_buf ( out , " l=%u %s: " ,
_b - > level , bch2_btree_ids [ _b - > btree_id ] ) ;
2021-08-30 15:54:41 -04:00
bch2_bpos_to_text ( out , btree_node_pos ( _b , cached ) ) ;
2020-06-15 19:53:46 -04:00
}
2021-04-02 21:29:05 -04:00
# ifdef CONFIG_BCACHEFS_DEBUG
2021-08-30 15:54:41 -04:00
static bool trans_has_locks ( struct btree_trans * trans )
2021-03-31 16:43:50 -04:00
{
2021-08-30 15:18:31 -04:00
struct btree_path * path ;
2021-03-31 16:43:50 -04:00
2021-08-30 15:18:31 -04:00
trans_for_each_path ( trans , path )
if ( path - > nodes_locked )
2021-03-31 16:43:50 -04:00
return true ;
return false ;
}
2021-04-02 21:29:05 -04:00
# endif
2021-03-31 16:43:50 -04:00
2020-06-02 16:36:11 -04:00
void bch2_btree_trans_to_text ( struct printbuf * out , struct bch_fs * c )
{
# ifdef CONFIG_BCACHEFS_DEBUG
struct btree_trans * trans ;
2021-08-30 15:18:31 -04:00
struct btree_path * path ;
2020-06-02 16:36:11 -04:00
struct btree * b ;
unsigned l ;
mutex_lock ( & c - > btree_trans_lock ) ;
list_for_each_entry ( trans , & c - > btree_trans_list , list ) {
2021-08-30 15:54:41 -04:00
if ( ! trans_has_locks ( trans ) )
2021-03-31 16:43:50 -04:00
continue ;
pr_buf ( out , " %i %ps \n " , trans - > pid , ( void * ) trans - > ip ) ;
2020-06-02 16:36:11 -04:00
2021-08-30 15:18:31 -04:00
trans_for_each_path ( trans , path ) {
if ( ! path - > nodes_locked )
2020-06-02 16:36:11 -04:00
continue ;
2021-09-04 21:23:11 -04:00
pr_buf ( out , " path %u %c l=%u %s: " ,
2021-08-30 15:18:31 -04:00
path - > idx ,
path - > cached ? ' c ' : ' b ' ,
2021-09-04 21:23:11 -04:00
path - > level ,
2021-08-30 15:18:31 -04:00
bch2_btree_ids [ path - > btree_id ] ) ;
bch2_bpos_to_text ( out , path - > pos ) ;
2020-06-02 16:36:11 -04:00
pr_buf ( out , " \n " ) ;
for ( l = 0 ; l < BTREE_MAX_DEPTH ; l + + ) {
2021-08-30 15:18:31 -04:00
if ( btree_node_locked ( path , l ) ) {
2020-06-15 19:53:46 -04:00
pr_buf ( out , " %s l=%u " ,
2021-08-30 15:18:31 -04:00
btree_node_intent_locked ( path , l ) ? " i " : " r " , l ) ;
bch2_btree_path_node_to_text ( out ,
( void * ) path - > l [ l ] . b ,
path - > cached ) ;
2020-06-02 16:36:11 -04:00
pr_buf ( out , " \n " ) ;
}
}
}
b = READ_ONCE ( trans - > locking ) ;
if ( b ) {
2021-08-30 15:18:31 -04:00
path = & trans - > paths [ trans - > locking_path_idx ] ;
pr_buf ( out , " locking path %u %c l=%u %s: " ,
trans - > locking_path_idx ,
path - > cached ? ' c ' : ' b ' ,
2020-06-12 14:58:07 -04:00
trans - > locking_level ,
bch2_btree_ids [ trans - > locking_btree_id ] ) ;
bch2_bpos_to_text ( out , trans - > locking_pos ) ;
2020-06-15 19:53:46 -04:00
pr_buf ( out , " node " ) ;
2021-08-30 15:18:31 -04:00
bch2_btree_path_node_to_text ( out ,
( void * ) b , path - > cached ) ;
2020-06-02 16:36:11 -04:00
pr_buf ( out , " \n " ) ;
}
}
mutex_unlock ( & c - > btree_trans_lock ) ;
# endif
}
2019-09-07 14:16:00 -04:00
void bch2_fs_btree_iter_exit ( struct bch_fs * c )
{
2021-04-24 00:09:06 -04:00
mempool_exit ( & c - > btree_trans_mem_pool ) ;
2021-08-30 15:18:31 -04:00
mempool_exit ( & c - > btree_paths_pool ) ;
2020-11-15 16:30:22 -05:00
cleanup_srcu_struct ( & c - > btree_trans_barrier ) ;
2019-09-07 14:16:00 -04:00
}
int bch2_fs_btree_iter_init ( struct bch_fs * c )
{
unsigned nr = BTREE_ITER_MAX ;
2020-06-02 16:36:11 -04:00
INIT_LIST_HEAD ( & c - > btree_trans_list ) ;
mutex_init ( & c - > btree_trans_lock ) ;
2020-11-15 16:30:22 -05:00
return init_srcu_struct ( & c - > btree_trans_barrier ) ? :
2021-08-30 15:18:31 -04:00
mempool_init_kmalloc_pool ( & c - > btree_paths_pool , 1 ,
sizeof ( struct btree_path ) * nr +
2021-04-24 00:09:06 -04:00
sizeof ( struct btree_insert_entry ) * nr ) ? :
mempool_init_kmalloc_pool ( & c - > btree_trans_mem_pool , 1 ,
BTREE_TRANS_MEM_MAX ) ;
2019-09-07 14:16:00 -04:00
}