2017-03-17 09:18:50 +03:00
// SPDX-License-Identifier: GPL-2.0
# include "bcachefs.h"
# include "bkey_methods.h"
# include "btree_cache.h"
# include "btree_iter.h"
# include "btree_locking.h"
# include "debug.h"
# include "extents.h"
# include "trace.h"
# include <linux/prefetch.h>
static inline struct bkey_s_c __btree_iter_peek_all ( struct btree_iter * ,
struct btree_iter_level * ,
struct bkey * ) ;
2019-05-14 21:08:23 +03:00
# define BTREE_ITER_NO_NODE_GET_LOCKS ((struct btree *) 1)
# define BTREE_ITER_NO_NODE_DROP ((struct btree *) 2)
# define BTREE_ITER_NO_NODE_LOCK_ROOT ((struct btree *) 3)
# define BTREE_ITER_NO_NODE_UP ((struct btree *) 4)
# define BTREE_ITER_NO_NODE_DOWN ((struct btree *) 5)
# define BTREE_ITER_NO_NODE_INIT ((struct btree *) 6)
# define BTREE_ITER_NO_NODE_ERROR ((struct btree *) 7)
2017-03-17 09:18:50 +03:00
static inline bool is_btree_node ( struct btree_iter * iter , unsigned l )
{
return l < BTREE_MAX_DEPTH & &
2019-05-14 21:08:23 +03:00
( unsigned long ) iter - > l [ l ] . b > = 128 ;
2017-03-17 09:18:50 +03:00
}
2018-08-21 23:30:14 +03:00
/* Returns < 0 if @k is before iter pos, > 0 if @k is after */
static inline int __btree_iter_pos_cmp ( struct btree_iter * iter ,
const struct btree * b ,
const struct bkey_packed * k ,
bool interior_node )
{
int cmp = bkey_cmp_left_packed ( b , k , & iter - > pos ) ;
if ( cmp )
return cmp ;
if ( bkey_deleted ( k ) )
return - 1 ;
2018-08-22 00:38:41 +03:00
/*
* Normally , for extents we want the first key strictly greater than
* the iterator position - with the exception that for interior nodes ,
* we don ' t want to advance past the last key if the iterator position
* is POS_MAX :
*/
if ( iter - > flags & BTREE_ITER_IS_EXTENTS & &
( ! interior_node | |
bkey_cmp_left_packed_byval ( b , k , POS_MAX ) ) )
2018-08-21 23:30:14 +03:00
return - 1 ;
return 1 ;
}
static inline int btree_iter_pos_cmp ( struct btree_iter * iter ,
const struct btree * b ,
const struct bkey_packed * k )
{
2020-06-06 19:28:01 +03:00
return __btree_iter_pos_cmp ( iter , b , k , b - > c . level ! = 0 ) ;
2018-08-21 23:30:14 +03:00
}
2017-03-17 09:18:50 +03:00
/* Btree node locking: */
/*
* Updates the saved lock sequence number , so that bch2_btree_node_relock ( ) will
* succeed :
*/
void bch2_btree_node_unlock_write ( struct btree * b , struct btree_iter * iter )
{
struct btree_iter * linked ;
2020-06-06 19:28:01 +03:00
EBUG_ON ( iter - > l [ b - > c . level ] . b ! = b ) ;
EBUG_ON ( iter - > l [ b - > c . level ] . lock_seq + 1 ! = b - > c . lock . state . seq ) ;
2017-03-17 09:18:50 +03:00
2019-03-28 05:03:30 +03:00
trans_for_each_iter_with_node ( iter - > trans , b , linked )
2020-06-06 19:28:01 +03:00
linked - > l [ b - > c . level ] . lock_seq + = 2 ;
2017-03-17 09:18:50 +03:00
2020-06-06 19:28:01 +03:00
six_unlock_write ( & b - > c . lock ) ;
2017-03-17 09:18:50 +03:00
}
void __bch2_btree_node_lock_write ( struct btree * b , struct btree_iter * iter )
{
struct btree_iter * linked ;
unsigned readers = 0 ;
2019-08-12 21:35:34 +03:00
EBUG_ON ( ! btree_node_intent_locked ( iter , b - > c . level ) ) ;
2017-03-17 09:18:50 +03:00
2019-03-28 05:03:30 +03:00
trans_for_each_iter ( iter - > trans , linked )
2020-06-06 19:28:01 +03:00
if ( linked - > l [ b - > c . level ] . b = = b & &
btree_node_read_locked ( linked , b - > c . level ) )
2017-03-17 09:18:50 +03:00
readers + + ;
/*
* Must drop our read locks before calling six_lock_write ( ) -
* six_unlock ( ) won ' t do wakeups until the reader count
* goes to 0 , and it ' s safe because we have the node intent
* locked :
*/
atomic64_sub ( __SIX_VAL ( read_lock , readers ) ,
2020-06-06 19:28:01 +03:00
& b - > c . lock . state . counter ) ;
2019-03-26 00:06:42 +03:00
btree_node_lock_type ( iter - > trans - > c , b , SIX_LOCK_write ) ;
2017-03-17 09:18:50 +03:00
atomic64_add ( __SIX_VAL ( read_lock , readers ) ,
2020-06-06 19:28:01 +03:00
& b - > c . lock . state . counter ) ;
2017-03-17 09:18:50 +03:00
}
bool __bch2_btree_node_relock ( struct btree_iter * iter , unsigned level )
{
struct btree * b = btree_iter_node ( iter , level ) ;
int want = __btree_lock_want ( iter , level ) ;
2019-05-14 21:08:23 +03:00
if ( ! is_btree_node ( iter , level ) )
2017-03-17 09:18:50 +03:00
return false ;
if ( race_fault ( ) )
return false ;
2019-05-14 21:08:23 +03:00
if ( six_relock_type ( & b - > c . lock , want , iter - > l [ level ] . lock_seq ) | |
( btree_node_lock_seq_matches ( iter , b , level ) & &
btree_node_lock_increment ( iter , b , level , want ) ) ) {
mark_btree_node_locked ( iter , level , want ) ;
return true ;
} else {
2017-03-17 09:18:50 +03:00
return false ;
2019-05-14 21:08:23 +03:00
}
2017-03-17 09:18:50 +03:00
}
static bool bch2_btree_node_upgrade ( struct btree_iter * iter , unsigned level )
{
struct btree * b = iter - > l [ level ] . b ;
EBUG_ON ( btree_lock_want ( iter , level ) ! = BTREE_NODE_INTENT_LOCKED ) ;
if ( ! is_btree_node ( iter , level ) )
return false ;
if ( btree_node_intent_locked ( iter , level ) )
return true ;
if ( race_fault ( ) )
return false ;
if ( btree_node_locked ( iter , level )
2020-06-06 19:28:01 +03:00
? six_lock_tryupgrade ( & b - > c . lock )
: six_relock_type ( & b - > c . lock , SIX_LOCK_intent , iter - > l [ level ] . lock_seq ) )
2017-03-17 09:18:50 +03:00
goto success ;
2019-05-14 21:08:23 +03:00
if ( btree_node_lock_seq_matches ( iter , b , level ) & &
2017-03-17 09:18:50 +03:00
btree_node_lock_increment ( iter , b , level , BTREE_NODE_INTENT_LOCKED ) ) {
btree_node_unlock ( iter , level ) ;
goto success ;
}
return false ;
success :
mark_btree_node_intent_locked ( iter , level ) ;
return true ;
}
static inline bool btree_iter_get_locks ( struct btree_iter * iter ,
2019-05-15 17:08:55 +03:00
bool upgrade , bool trace )
2017-03-17 09:18:50 +03:00
{
unsigned l = iter - > level ;
int fail_idx = - 1 ;
do {
if ( ! btree_iter_node ( iter , l ) )
break ;
if ( ! ( upgrade
? bch2_btree_node_upgrade ( iter , l )
: bch2_btree_node_relock ( iter , l ) ) ) {
2019-05-15 17:08:55 +03:00
if ( trace )
( upgrade
? trace_node_upgrade_fail
: trace_node_relock_fail ) ( l , iter - > l [ l ] . lock_seq ,
2019-05-14 21:08:23 +03:00
is_btree_node ( iter , l )
? 0
: ( unsigned long ) iter - > l [ l ] . b ,
is_btree_node ( iter , l )
? iter - > l [ l ] . b - > c . lock . state . seq
: 0 ) ;
2017-03-17 09:18:50 +03:00
fail_idx = l ;
btree_iter_set_dirty ( iter , BTREE_ITER_NEED_TRAVERSE ) ;
}
l + + ;
} while ( l < iter - > locks_want ) ;
/*
* When we fail to get a lock , we have to ensure that any child nodes
* can ' t be relocked so bch2_btree_iter_traverse has to walk back up to
* the node that we failed to relock :
*/
while ( fail_idx > = 0 ) {
btree_node_unlock ( iter , fail_idx ) ;
2019-05-14 21:08:23 +03:00
iter - > l [ fail_idx ] . b = BTREE_ITER_NO_NODE_GET_LOCKS ;
2017-03-17 09:18:50 +03:00
- - fail_idx ;
}
if ( iter - > uptodate = = BTREE_ITER_NEED_RELOCK )
iter - > uptodate = BTREE_ITER_NEED_PEEK ;
2019-03-28 05:03:30 +03:00
bch2_btree_trans_verify_locks ( iter - > trans ) ;
2017-03-17 09:18:50 +03:00
return iter - > uptodate < BTREE_ITER_NEED_RELOCK ;
}
/* Slowpath: */
bool __bch2_btree_node_lock ( struct btree * b , struct bpos pos ,
unsigned level ,
struct btree_iter * iter ,
2019-05-11 00:09:42 +03:00
enum six_lock_type type )
2017-03-17 09:18:50 +03:00
{
struct btree_iter * linked ;
bool ret = true ;
2018-07-24 23:42:27 +03:00
/* Check if it's safe to block: */
2019-03-28 05:03:30 +03:00
trans_for_each_iter ( iter - > trans , linked ) {
2017-03-17 09:18:50 +03:00
if ( ! linked - > nodes_locked )
continue ;
2018-07-24 23:42:27 +03:00
/* * Must lock btree nodes in key order: */
2017-03-17 09:18:50 +03:00
if ( __btree_iter_cmp ( iter - > btree_id , pos , linked ) < 0 )
ret = false ;
/*
* Can ' t block taking an intent lock if we have _any_ nodes read
* locked :
*
* - Our read lock blocks another thread with an intent lock on
* the same node from getting a write lock , and thus from
* dropping its intent lock
*
* - And the other thread may have multiple nodes intent locked :
* both the node we want to intent lock , and the node we
* already have read locked - deadlock :
*/
if ( type = = SIX_LOCK_intent & &
linked - > nodes_locked ! = linked - > nodes_intent_locked ) {
2019-05-11 00:15:30 +03:00
if ( ! ( iter - > trans - > nounlock ) ) {
2017-03-17 09:18:50 +03:00
linked - > locks_want = max_t ( unsigned ,
linked - > locks_want ,
__fls ( linked - > nodes_locked ) + 1 ) ;
2019-05-15 17:08:55 +03:00
btree_iter_get_locks ( linked , true , false ) ;
2017-03-17 09:18:50 +03:00
}
ret = false ;
}
/*
* Interior nodes must be locked before their descendants : if
* another iterator has possible descendants locked of the node
* we ' re about to lock , it must have the ancestors locked too :
*/
if ( linked - > btree_id = = iter - > btree_id & &
level > __fls ( linked - > nodes_locked ) ) {
2019-05-11 00:15:30 +03:00
if ( ! ( iter - > trans - > nounlock ) ) {
2018-07-24 23:42:27 +03:00
linked - > locks_want =
max ( level + 1 , max_t ( unsigned ,
linked - > locks_want ,
iter - > locks_want ) ) ;
2019-05-15 17:08:55 +03:00
btree_iter_get_locks ( linked , true , false ) ;
2017-03-17 09:18:50 +03:00
}
ret = false ;
}
}
2019-04-23 07:10:08 +03:00
if ( unlikely ( ! ret ) ) {
2019-05-15 17:54:43 +03:00
trace_trans_restart_would_deadlock ( iter - > trans - > ip ) ;
2019-04-23 07:10:08 +03:00
return false ;
}
2018-07-13 06:30:45 +03:00
2019-04-23 07:10:08 +03:00
__btree_node_lock_type ( iter - > trans - > c , b , type ) ;
return true ;
2017-03-17 09:18:50 +03:00
}
/* Btree iterator locking: */
# ifdef CONFIG_BCACHEFS_DEBUG
2019-03-28 05:03:30 +03:00
void bch2_btree_iter_verify_locks ( struct btree_iter * iter )
2017-03-17 09:18:50 +03:00
{
unsigned l ;
for ( l = 0 ; btree_iter_node ( iter , l ) ; l + + ) {
if ( iter - > uptodate > = BTREE_ITER_NEED_RELOCK & &
! btree_node_locked ( iter , l ) )
continue ;
BUG_ON ( btree_lock_want ( iter , l ) ! =
btree_node_locked_type ( iter , l ) ) ;
}
}
2018-11-23 13:19:25 +03:00
2019-03-28 05:03:30 +03:00
void bch2_btree_trans_verify_locks ( struct btree_trans * trans )
2018-11-23 13:19:25 +03:00
{
2019-03-28 05:03:30 +03:00
struct btree_iter * iter ;
2018-11-23 13:19:25 +03:00
2019-03-28 05:03:30 +03:00
trans_for_each_iter ( trans , iter )
bch2_btree_iter_verify_locks ( iter ) ;
2018-11-23 13:19:25 +03:00
}
2017-03-17 09:18:50 +03:00
# endif
__flatten
2019-05-15 17:08:55 +03:00
static bool bch2_btree_iter_relock ( struct btree_iter * iter , bool trace )
2017-03-17 09:18:50 +03:00
{
return iter - > uptodate > = BTREE_ITER_NEED_RELOCK
2019-05-15 17:08:55 +03:00
? btree_iter_get_locks ( iter , false , trace )
2017-03-17 09:18:50 +03:00
: true ;
}
bool __bch2_btree_iter_upgrade ( struct btree_iter * iter ,
unsigned new_locks_want )
{
struct btree_iter * linked ;
EBUG_ON ( iter - > locks_want > = new_locks_want ) ;
iter - > locks_want = new_locks_want ;
2019-05-15 17:08:55 +03:00
if ( btree_iter_get_locks ( iter , true , true ) )
2017-03-17 09:18:50 +03:00
return true ;
/*
* Ancestor nodes must be locked before child nodes , so set locks_want
* on iterators that might lock ancestors before us to avoid getting
* - EINTR later :
*/
2019-03-28 05:03:30 +03:00
trans_for_each_iter ( iter - > trans , linked )
if ( linked ! = iter & &
linked - > btree_id = = iter - > btree_id & &
2017-03-17 09:18:50 +03:00
linked - > locks_want < new_locks_want ) {
linked - > locks_want = new_locks_want ;
2019-05-15 17:08:55 +03:00
btree_iter_get_locks ( linked , true , false ) ;
2017-03-17 09:18:50 +03:00
}
return false ;
}
bool __bch2_btree_iter_upgrade_nounlock ( struct btree_iter * iter ,
unsigned new_locks_want )
{
unsigned l = iter - > level ;
EBUG_ON ( iter - > locks_want > = new_locks_want ) ;
iter - > locks_want = new_locks_want ;
do {
if ( ! btree_iter_node ( iter , l ) )
break ;
if ( ! bch2_btree_node_upgrade ( iter , l ) ) {
iter - > locks_want = l ;
return false ;
}
l + + ;
} while ( l < iter - > locks_want ) ;
return true ;
}
void __bch2_btree_iter_downgrade ( struct btree_iter * iter ,
unsigned downgrade_to )
{
struct btree_iter * linked ;
unsigned l ;
/*
* We downgrade linked iterators as well because btree_iter_upgrade
* might have had to modify locks_want on linked iterators due to lock
* ordering :
*/
2019-03-28 05:03:30 +03:00
trans_for_each_iter ( iter - > trans , linked ) {
2017-03-17 09:18:50 +03:00
unsigned new_locks_want = downgrade_to ? :
( linked - > flags & BTREE_ITER_INTENT ? 1 : 0 ) ;
if ( linked - > locks_want < = new_locks_want )
continue ;
linked - > locks_want = new_locks_want ;
while ( linked - > nodes_locked & &
( l = __fls ( linked - > nodes_locked ) ) > = linked - > locks_want ) {
if ( l > linked - > level ) {
btree_node_unlock ( linked , l ) ;
} else {
if ( btree_node_intent_locked ( linked , l ) ) {
2020-06-06 19:28:01 +03:00
six_lock_downgrade ( & linked - > l [ l ] . b - > c . lock ) ;
2017-03-17 09:18:50 +03:00
linked - > nodes_intent_locked ^ = 1 < < l ;
}
break ;
}
}
}
2018-11-23 13:19:25 +03:00
2019-03-28 05:03:30 +03:00
bch2_btree_trans_verify_locks ( iter - > trans ) ;
2017-03-17 09:18:50 +03:00
}
2019-05-15 16:47:40 +03:00
/* Btree transaction locking: */
bool bch2_trans_relock ( struct btree_trans * trans )
2019-03-28 05:03:30 +03:00
{
struct btree_iter * iter ;
bool ret = true ;
trans_for_each_iter ( trans , iter )
2019-05-15 17:08:55 +03:00
if ( iter - > uptodate = = BTREE_ITER_NEED_RELOCK )
ret & = bch2_btree_iter_relock ( iter , true ) ;
2019-03-28 05:03:30 +03:00
return ret ;
}
2019-05-15 16:47:40 +03:00
void bch2_trans_unlock ( struct btree_trans * trans )
2019-03-28 05:03:30 +03:00
{
struct btree_iter * iter ;
trans_for_each_iter ( trans , iter )
__bch2_btree_iter_unlock ( iter ) ;
}
2017-03-17 09:18:50 +03:00
/* Btree iterator: */
# ifdef CONFIG_BCACHEFS_DEBUG
static void __bch2_btree_iter_verify ( struct btree_iter * iter ,
struct btree * b )
{
2020-06-06 19:28:01 +03:00
struct btree_iter_level * l = & iter - > l [ b - > c . level ] ;
2017-03-17 09:18:50 +03:00
struct btree_node_iter tmp = l - > iter ;
struct bkey_packed * k ;
2019-03-28 08:51:47 +03:00
if ( ! debug_check_iterators ( iter - > trans - > c ) )
return ;
2016-07-22 06:05:06 +03:00
if ( iter - > uptodate > BTREE_ITER_NEED_PEEK )
return ;
2017-03-17 09:18:50 +03:00
bch2_btree_node_iter_verify ( & l - > iter , b ) ;
/*
* For interior nodes , the iterator will have skipped past
* deleted keys :
2016-07-22 06:05:06 +03:00
*
* For extents , the iterator may have skipped past deleted keys ( but not
* whiteouts )
2017-03-17 09:18:50 +03:00
*/
2020-06-06 19:28:01 +03:00
k = b - > c . level | | iter - > flags & BTREE_ITER_IS_EXTENTS
2018-11-01 22:10:01 +03:00
? bch2_btree_node_iter_prev_filter ( & tmp , b , KEY_TYPE_discard )
2017-03-17 09:18:50 +03:00
: bch2_btree_node_iter_prev_all ( & tmp , b ) ;
2018-08-21 23:30:14 +03:00
if ( k & & btree_iter_pos_cmp ( iter , b , k ) > 0 ) {
2017-03-17 09:18:50 +03:00
char buf [ 100 ] ;
struct bkey uk = bkey_unpack_key ( b , k ) ;
2018-11-09 09:24:07 +03:00
bch2_bkey_to_text ( & PBUF ( buf ) , & uk ) ;
2016-07-22 06:05:06 +03:00
panic ( " prev key should be before iter pos: \n %s \n %llu:%llu \n " ,
2017-03-17 09:18:50 +03:00
buf , iter - > pos . inode , iter - > pos . offset ) ;
}
k = bch2_btree_node_iter_peek_all ( & l - > iter , b ) ;
2018-08-21 23:30:14 +03:00
if ( k & & btree_iter_pos_cmp ( iter , b , k ) < 0 ) {
2017-03-17 09:18:50 +03:00
char buf [ 100 ] ;
struct bkey uk = bkey_unpack_key ( b , k ) ;
2018-11-09 09:24:07 +03:00
bch2_bkey_to_text ( & PBUF ( buf ) , & uk ) ;
2016-07-22 06:05:06 +03:00
panic ( " iter should be after current key: \n "
" iter pos %llu:%llu \n "
" cur key %s \n " ,
2017-03-17 09:18:50 +03:00
iter - > pos . inode , iter - > pos . offset , buf ) ;
}
2016-07-22 06:05:06 +03:00
BUG_ON ( iter - > uptodate = = BTREE_ITER_UPTODATE & &
( iter - > flags & BTREE_ITER_TYPE ) = = BTREE_ITER_KEYS & &
! bkey_whiteout ( & iter - > k ) & &
bch2_btree_node_iter_end ( & l - > iter ) ) ;
2017-03-17 09:18:50 +03:00
}
void bch2_btree_iter_verify ( struct btree_iter * iter , struct btree * b )
{
struct btree_iter * linked ;
2019-03-28 08:51:47 +03:00
if ( ! debug_check_iterators ( iter - > trans - > c ) )
return ;
2019-03-28 05:03:30 +03:00
trans_for_each_iter_with_node ( iter - > trans , b , linked )
2017-03-17 09:18:50 +03:00
__bch2_btree_iter_verify ( linked , b ) ;
}
2016-07-22 06:05:06 +03:00
# else
static inline void __bch2_btree_iter_verify ( struct btree_iter * iter ,
struct btree * b ) { }
2017-03-17 09:18:50 +03:00
# endif
2019-08-21 00:43:47 +03:00
static void btree_node_iter_set_set_pos ( struct btree_node_iter * iter ,
struct btree * b ,
struct bset_tree * t ,
struct bkey_packed * k )
{
struct btree_node_iter_set * set ;
btree_node_iter_for_each ( iter , set )
if ( set - > end = = t - > end_offset ) {
set - > k = __btree_node_key_to_offset ( b , k ) ;
bch2_btree_node_iter_sort ( iter , b ) ;
return ;
}
bch2_btree_node_iter_push ( iter , b , k , btree_bkey_last ( b , t ) ) ;
}
2017-03-17 09:18:50 +03:00
static void __bch2_btree_node_iter_fix ( struct btree_iter * iter ,
struct btree * b ,
struct btree_node_iter * node_iter ,
struct bset_tree * t ,
struct bkey_packed * where ,
unsigned clobber_u64s ,
unsigned new_u64s )
{
const struct bkey_packed * end = btree_bkey_last ( b , t ) ;
struct btree_node_iter_set * set ;
unsigned offset = __btree_node_key_to_offset ( b , where ) ;
int shift = new_u64s - clobber_u64s ;
2016-07-22 06:05:06 +03:00
unsigned old_end = t - > end_offset - shift ;
2017-03-17 09:18:50 +03:00
btree_node_iter_for_each ( node_iter , set )
if ( set - > end = = old_end )
goto found ;
/* didn't find the bset in the iterator - might have to readd it: */
if ( new_u64s & &
2018-08-21 23:30:14 +03:00
btree_iter_pos_cmp ( iter , b , where ) > 0 ) {
2017-03-17 09:18:50 +03:00
btree_iter_set_dirty ( iter , BTREE_ITER_NEED_PEEK ) ;
bch2_btree_node_iter_push ( node_iter , b , where , end ) ;
2020-06-06 19:28:01 +03:00
if ( ! b - > c . level & &
2017-03-17 09:18:50 +03:00
node_iter = = & iter - > l [ 0 ] . iter )
bkey_disassemble ( b ,
bch2_btree_node_iter_peek_all ( node_iter , b ) ,
& iter - > k ) ;
}
2019-08-21 00:43:47 +03:00
goto iter_current_key_not_modified ;
2017-03-17 09:18:50 +03:00
found :
2016-07-22 06:05:06 +03:00
set - > end = t - > end_offset ;
2017-03-17 09:18:50 +03:00
/* Iterator hasn't gotten to the key that changed yet: */
if ( set - > k < offset )
return ;
if ( new_u64s & &
2018-08-21 23:30:14 +03:00
btree_iter_pos_cmp ( iter , b , where ) > 0 ) {
2017-03-17 09:18:50 +03:00
set - > k = offset ;
} else if ( set - > k < offset + clobber_u64s ) {
set - > k = offset + new_u64s ;
if ( set - > k = = set - > end )
bch2_btree_node_iter_set_drop ( node_iter , set ) ;
} else {
set - > k = ( int ) set - > k + shift ;
goto iter_current_key_not_modified ;
}
btree_iter_set_dirty ( iter , BTREE_ITER_NEED_PEEK ) ;
bch2_btree_node_iter_sort ( node_iter , b ) ;
2020-06-06 19:28:01 +03:00
if ( ! b - > c . level & & node_iter = = & iter - > l [ 0 ] . iter ) {
2018-12-06 20:01:29 +03:00
/*
* not legal to call bkey_debugcheck ( ) here , because we ' re
* called midway through the update path after update has been
* marked but before deletes have actually happened :
*/
#if 0
2017-03-17 09:18:50 +03:00
__btree_iter_peek_all ( iter , & iter - > l [ 0 ] , & iter - > k ) ;
2018-12-06 20:01:29 +03:00
# endif
struct btree_iter_level * l = & iter - > l [ 0 ] ;
struct bkey_packed * k =
bch2_btree_node_iter_peek_all ( & l - > iter , l - > b ) ;
if ( unlikely ( ! k ) )
iter - > k . type = KEY_TYPE_deleted ;
else
bkey_disassemble ( l - > b , k , & iter - > k ) ;
}
2017-03-17 09:18:50 +03:00
iter_current_key_not_modified :
/*
2019-08-21 00:43:47 +03:00
* When a new key is added , and the node iterator now points to that
* key , the iterator might have skipped past deleted keys that should
* come after the key the iterator now points to . We have to rewind to
* before those deleted keys - otherwise bch2_btree_node_iter_prev_all ( )
* breaks :
2017-03-17 09:18:50 +03:00
*/
2019-08-21 00:43:47 +03:00
if ( ! bch2_btree_node_iter_end ( node_iter ) & &
( b - > c . level | |
( iter - > flags & BTREE_ITER_IS_EXTENTS ) ) ) {
struct bset_tree * t ;
struct bkey_packed * k , * k2 , * p ;
k = bch2_btree_node_iter_peek_all ( node_iter , b ) ;
2017-03-17 09:18:50 +03:00
for_each_bset ( b , t ) {
2019-08-21 00:43:47 +03:00
bool set_pos = false ;
if ( node_iter - > data [ 0 ] . end = = t - > end_offset )
2017-03-17 09:18:50 +03:00
continue ;
2019-08-21 00:43:47 +03:00
k2 = bch2_btree_node_iter_bset_pos ( node_iter , b , t ) ;
while ( ( p = bch2_bkey_prev_all ( b , t , k2 ) ) & &
bkey_iter_cmp ( b , k , p ) < 0 ) {
k2 = p ;
set_pos = true ;
2017-03-17 09:18:50 +03:00
}
2019-08-21 00:43:47 +03:00
if ( set_pos )
btree_node_iter_set_set_pos ( node_iter ,
b , t , k2 ) ;
2017-03-17 09:18:50 +03:00
}
}
2019-08-21 00:43:47 +03:00
bch2_btree_node_iter_verify ( node_iter , b ) ;
2017-03-17 09:18:50 +03:00
}
void bch2_btree_node_iter_fix ( struct btree_iter * iter ,
2018-08-12 02:12:05 +03:00
struct btree * b ,
struct btree_node_iter * node_iter ,
struct bkey_packed * where ,
unsigned clobber_u64s ,
unsigned new_u64s )
2017-03-17 09:18:50 +03:00
{
2018-08-12 02:12:05 +03:00
struct bset_tree * t = bch2_bkey_to_bset_inlined ( b , where ) ;
2017-03-17 09:18:50 +03:00
struct btree_iter * linked ;
2020-06-06 19:28:01 +03:00
if ( node_iter ! = & iter - > l [ b - > c . level ] . iter )
2017-03-17 09:18:50 +03:00
__bch2_btree_node_iter_fix ( iter , b , node_iter , t ,
where , clobber_u64s , new_u64s ) ;
2019-03-28 05:03:30 +03:00
trans_for_each_iter_with_node ( iter - > trans , b , linked )
2017-03-17 09:18:50 +03:00
__bch2_btree_node_iter_fix ( linked , b ,
2020-06-06 19:28:01 +03:00
& linked - > l [ b - > c . level ] . iter , t ,
2017-03-17 09:18:50 +03:00
where , clobber_u64s , new_u64s ) ;
}
static inline struct bkey_s_c __btree_iter_unpack ( struct btree_iter * iter ,
struct btree_iter_level * l ,
struct bkey * u ,
struct bkey_packed * k )
{
struct bkey_s_c ret ;
if ( unlikely ( ! k ) ) {
/*
* signal to bch2_btree_iter_peek_slot ( ) that we ' re currently at
* a hole
*/
2018-11-01 22:10:01 +03:00
u - > type = KEY_TYPE_deleted ;
2017-03-17 09:18:50 +03:00
return bkey_s_c_null ;
}
ret = bkey_disassemble ( l - > b , k , u ) ;
2019-03-26 00:06:42 +03:00
if ( debug_check_bkeys ( iter - > trans - > c ) )
bch2_bkey_debugcheck ( iter - > trans - > c , l - > b , ret ) ;
2017-03-17 09:18:50 +03:00
return ret ;
}
/* peek_all() doesn't skip deleted keys */
static inline struct bkey_s_c __btree_iter_peek_all ( struct btree_iter * iter ,
struct btree_iter_level * l ,
struct bkey * u )
{
return __btree_iter_unpack ( iter , l , u ,
bch2_btree_node_iter_peek_all ( & l - > iter , l - > b ) ) ;
}
static inline struct bkey_s_c __btree_iter_peek ( struct btree_iter * iter ,
struct btree_iter_level * l )
{
return __btree_iter_unpack ( iter , l , & iter - > k ,
bch2_btree_node_iter_peek ( & l - > iter , l - > b ) ) ;
}
2018-08-21 23:30:14 +03:00
static inline bool btree_iter_advance_to_pos ( struct btree_iter * iter ,
struct btree_iter_level * l ,
int max_advance )
2017-03-17 09:18:50 +03:00
{
2018-08-21 23:30:14 +03:00
struct bkey_packed * k ;
int nr_advanced = 0 ;
while ( ( k = bch2_btree_node_iter_peek_all ( & l - > iter , l - > b ) ) & &
btree_iter_pos_cmp ( iter , l - > b , k ) < 0 ) {
if ( max_advance > 0 & & nr_advanced > = max_advance )
return false ;
bch2_btree_node_iter_advance ( & l - > iter , l - > b ) ;
nr_advanced + + ;
}
return true ;
2017-03-17 09:18:50 +03:00
}
/*
* Verify that iterator for parent node points to child node :
*/
static void btree_iter_verify_new_node ( struct btree_iter * iter , struct btree * b )
{
struct btree_iter_level * l ;
unsigned plevel ;
bool parent_locked ;
struct bkey_packed * k ;
if ( ! IS_ENABLED ( CONFIG_BCACHEFS_DEBUG ) )
return ;
2020-06-06 19:28:01 +03:00
plevel = b - > c . level + 1 ;
2017-03-17 09:18:50 +03:00
if ( ! btree_iter_node ( iter , plevel ) )
return ;
parent_locked = btree_node_locked ( iter , plevel ) ;
if ( ! bch2_btree_node_relock ( iter , plevel ) )
return ;
l = & iter - > l [ plevel ] ;
k = bch2_btree_node_iter_peek_all ( & l - > iter , l - > b ) ;
if ( ! k | |
bkey_deleted ( k ) | |
bkey_cmp_left_packed ( l - > b , k , & b - > key . k . p ) ) {
char buf [ 100 ] ;
struct bkey uk = bkey_unpack_key ( b , k ) ;
2018-11-09 09:24:07 +03:00
bch2_bkey_to_text ( & PBUF ( buf ) , & uk ) ;
2017-03-17 09:18:50 +03:00
panic ( " parent iter doesn't point to new node: \n %s \n %llu:%llu \n " ,
buf , b - > key . k . p . inode , b - > key . k . p . offset ) ;
}
if ( ! parent_locked )
2020-06-06 19:28:01 +03:00
btree_node_unlock ( iter , b - > c . level + 1 ) ;
2017-03-17 09:18:50 +03:00
}
static inline bool btree_iter_pos_after_node ( struct btree_iter * iter ,
struct btree * b )
{
2018-08-21 23:30:14 +03:00
return __btree_iter_pos_cmp ( iter , NULL ,
2018-08-22 00:38:41 +03:00
bkey_to_packed ( & b - > key ) , true ) < 0 ;
2017-03-17 09:18:50 +03:00
}
static inline bool btree_iter_pos_in_node ( struct btree_iter * iter ,
struct btree * b )
{
2020-06-06 19:28:01 +03:00
return iter - > btree_id = = b - > c . btree_id & &
2017-03-17 09:18:50 +03:00
bkey_cmp ( iter - > pos , b - > data - > min_key ) > = 0 & &
! btree_iter_pos_after_node ( iter , b ) ;
}
static inline void __btree_iter_init ( struct btree_iter * iter ,
2018-08-21 23:30:14 +03:00
unsigned level )
2017-03-17 09:18:50 +03:00
{
2018-08-21 23:30:14 +03:00
struct btree_iter_level * l = & iter - > l [ level ] ;
bch2_btree_node_iter_init ( & l - > iter , l - > b , & iter - > pos ) ;
2017-03-17 09:18:50 +03:00
2018-08-21 23:30:14 +03:00
if ( iter - > flags & BTREE_ITER_IS_EXTENTS )
btree_iter_advance_to_pos ( iter , l , - 1 ) ;
2017-03-17 09:18:50 +03:00
/* Skip to first non whiteout: */
2018-08-21 23:30:14 +03:00
if ( level )
bch2_btree_node_iter_peek ( & l - > iter , l - > b ) ;
2017-03-17 09:18:50 +03:00
btree_iter_set_dirty ( iter , BTREE_ITER_NEED_PEEK ) ;
}
static inline void btree_iter_node_set ( struct btree_iter * iter ,
struct btree * b )
{
btree_iter_verify_new_node ( iter , b ) ;
EBUG_ON ( ! btree_iter_pos_in_node ( iter , b ) ) ;
2020-06-06 19:28:01 +03:00
EBUG_ON ( b - > c . lock . state . seq & 1 ) ;
2017-03-17 09:18:50 +03:00
2020-06-06 19:28:01 +03:00
iter - > l [ b - > c . level ] . lock_seq = b - > c . lock . state . seq ;
iter - > l [ b - > c . level ] . b = b ;
__btree_iter_init ( iter , b - > c . level ) ;
2017-03-17 09:18:50 +03:00
}
/*
* A btree node is being replaced - update the iterator to point to the new
* node :
*/
void bch2_btree_iter_node_replace ( struct btree_iter * iter , struct btree * b )
{
enum btree_node_locked_type t ;
struct btree_iter * linked ;
2019-03-28 05:03:30 +03:00
trans_for_each_iter ( iter - > trans , linked )
2017-03-17 09:18:50 +03:00
if ( btree_iter_pos_in_node ( linked , b ) ) {
/*
* bch2_btree_iter_node_drop ( ) has already been called -
* the old node we ' re replacing has already been
* unlocked and the pointer invalidated
*/
2020-06-06 19:28:01 +03:00
BUG_ON ( btree_node_locked ( linked , b - > c . level ) ) ;
2017-03-17 09:18:50 +03:00
2020-06-06 19:28:01 +03:00
t = btree_lock_want ( linked , b - > c . level ) ;
2017-03-17 09:18:50 +03:00
if ( t ! = BTREE_NODE_UNLOCKED ) {
2020-06-06 19:28:01 +03:00
six_lock_increment ( & b - > c . lock , ( enum six_lock_type ) t ) ;
mark_btree_node_locked ( linked , b - > c . level , ( enum six_lock_type ) t ) ;
2017-03-17 09:18:50 +03:00
}
btree_iter_node_set ( linked , b ) ;
}
2020-06-06 19:28:01 +03:00
six_unlock_intent ( & b - > c . lock ) ;
2017-03-17 09:18:50 +03:00
}
void bch2_btree_iter_node_drop ( struct btree_iter * iter , struct btree * b )
{
struct btree_iter * linked ;
2020-06-06 19:28:01 +03:00
unsigned level = b - > c . level ;
2017-03-17 09:18:50 +03:00
2019-03-28 05:03:30 +03:00
trans_for_each_iter ( iter - > trans , linked )
2017-03-17 09:18:50 +03:00
if ( linked - > l [ level ] . b = = b ) {
2018-11-23 13:19:25 +03:00
__btree_node_unlock ( linked , level ) ;
2019-05-14 21:08:23 +03:00
linked - > l [ level ] . b = BTREE_ITER_NO_NODE_DROP ;
2017-03-17 09:18:50 +03:00
}
}
/*
* A btree node has been modified in such a way as to invalidate iterators - fix
* them :
*/
void bch2_btree_iter_reinit_node ( struct btree_iter * iter , struct btree * b )
{
struct btree_iter * linked ;
2019-03-28 05:03:30 +03:00
trans_for_each_iter_with_node ( iter - > trans , b , linked )
2020-06-06 19:28:01 +03:00
__btree_iter_init ( linked , b - > c . level ) ;
2017-03-17 09:18:50 +03:00
}
static inline int btree_iter_lock_root ( struct btree_iter * iter ,
unsigned depth_want )
{
2019-03-26 00:06:42 +03:00
struct bch_fs * c = iter - > trans - > c ;
2017-03-17 09:18:50 +03:00
struct btree * b ;
enum six_lock_type lock_type ;
unsigned i ;
EBUG_ON ( iter - > nodes_locked ) ;
while ( 1 ) {
b = READ_ONCE ( c - > btree_roots [ iter - > btree_id ] . b ) ;
2020-06-06 19:28:01 +03:00
iter - > level = READ_ONCE ( b - > c . level ) ;
2017-03-17 09:18:50 +03:00
if ( unlikely ( iter - > level < depth_want ) ) {
/*
* the root is at a lower depth than the depth we want :
* got to the end of the btree , or we ' re walking nodes
* greater than some depth and there are no nodes > =
* that depth
*/
iter - > level = depth_want ;
2019-05-14 21:08:23 +03:00
for ( i = iter - > level ; i < BTREE_MAX_DEPTH ; i + + )
iter - > l [ i ] . b = NULL ;
2018-11-21 10:59:07 +03:00
return 1 ;
2017-03-17 09:18:50 +03:00
}
lock_type = __btree_lock_want ( iter , iter - > level ) ;
if ( unlikely ( ! btree_node_lock ( b , POS_MAX , iter - > level ,
2019-05-11 00:09:42 +03:00
iter , lock_type ) ) )
2017-03-17 09:18:50 +03:00
return - EINTR ;
if ( likely ( b = = c - > btree_roots [ iter - > btree_id ] . b & &
2020-06-06 19:28:01 +03:00
b - > c . level = = iter - > level & &
2017-03-17 09:18:50 +03:00
! race_fault ( ) ) ) {
for ( i = 0 ; i < iter - > level ; i + + )
2019-05-14 21:08:23 +03:00
iter - > l [ i ] . b = BTREE_ITER_NO_NODE_LOCK_ROOT ;
2017-03-17 09:18:50 +03:00
iter - > l [ iter - > level ] . b = b ;
2019-05-14 21:08:23 +03:00
for ( i = iter - > level + 1 ; i < BTREE_MAX_DEPTH ; i + + )
iter - > l [ i ] . b = NULL ;
2017-03-17 09:18:50 +03:00
mark_btree_node_locked ( iter , iter - > level , lock_type ) ;
btree_iter_node_set ( iter , b ) ;
return 0 ;
}
2020-06-06 19:28:01 +03:00
six_unlock_type ( & b - > c . lock , lock_type ) ;
2017-03-17 09:18:50 +03:00
}
}
noinline
static void btree_iter_prefetch ( struct btree_iter * iter )
{
2019-03-26 00:06:42 +03:00
struct bch_fs * c = iter - > trans - > c ;
2017-03-17 09:18:50 +03:00
struct btree_iter_level * l = & iter - > l [ iter - > level ] ;
struct btree_node_iter node_iter = l - > iter ;
struct bkey_packed * k ;
BKEY_PADDED ( k ) tmp ;
2019-03-26 00:06:42 +03:00
unsigned nr = test_bit ( BCH_FS_STARTED , & c - > flags )
2017-03-17 09:18:50 +03:00
? ( iter - > level > 1 ? 0 : 2 )
: ( iter - > level > 1 ? 1 : 16 ) ;
bool was_locked = btree_node_locked ( iter , iter - > level ) ;
while ( nr ) {
if ( ! bch2_btree_node_relock ( iter , iter - > level ) )
return ;
bch2_btree_node_iter_advance ( & node_iter , l - > b ) ;
k = bch2_btree_node_iter_peek ( & node_iter , l - > b ) ;
if ( ! k )
break ;
bch2_bkey_unpack ( l - > b , & tmp . k , k ) ;
2019-03-26 00:06:42 +03:00
bch2_btree_node_prefetch ( c , iter , & tmp . k , iter - > level - 1 ) ;
2017-03-17 09:18:50 +03:00
}
if ( ! was_locked )
btree_node_unlock ( iter , iter - > level ) ;
}
static inline int btree_iter_down ( struct btree_iter * iter )
{
2019-03-26 00:06:42 +03:00
struct bch_fs * c = iter - > trans - > c ;
2017-03-17 09:18:50 +03:00
struct btree_iter_level * l = & iter - > l [ iter - > level ] ;
struct btree * b ;
unsigned level = iter - > level - 1 ;
enum six_lock_type lock_type = __btree_lock_want ( iter , level ) ;
BKEY_PADDED ( k ) tmp ;
BUG_ON ( ! btree_node_locked ( iter , iter - > level ) ) ;
bch2_bkey_unpack ( l - > b , & tmp . k ,
bch2_btree_node_iter_peek ( & l - > iter , l - > b ) ) ;
2019-05-11 00:09:42 +03:00
b = bch2_btree_node_get ( c , iter , & tmp . k , level , lock_type ) ;
2017-03-17 09:18:50 +03:00
if ( unlikely ( IS_ERR ( b ) ) )
return PTR_ERR ( b ) ;
mark_btree_node_locked ( iter , level , lock_type ) ;
btree_iter_node_set ( iter , b ) ;
if ( iter - > flags & BTREE_ITER_PREFETCH )
btree_iter_prefetch ( iter ) ;
iter - > level = level ;
return 0 ;
}
static void btree_iter_up ( struct btree_iter * iter )
{
btree_node_unlock ( iter , iter - > level + + ) ;
}
int __must_check __bch2_btree_iter_traverse ( struct btree_iter * ) ;
2019-03-28 07:07:24 +03:00
static int __btree_iter_traverse_all ( struct btree_trans * trans ,
2019-05-24 21:33:16 +03:00
struct btree_iter * orig_iter , int ret )
2017-03-17 09:18:50 +03:00
{
2019-03-28 05:54:42 +03:00
struct bch_fs * c = trans - > c ;
2019-05-24 21:33:16 +03:00
struct btree_iter * iter ;
2019-03-28 05:54:42 +03:00
u8 sorted [ BTREE_ITER_MAX ] ;
unsigned i , nr_sorted = 0 ;
trans_for_each_iter ( trans , iter )
sorted [ nr_sorted + + ] = iter - trans - > iters ;
# define btree_iter_cmp_by_idx(_l, _r) \
btree_iter_cmp ( & trans - > iters [ _l ] , & trans - > iters [ _r ] )
bubble_sort ( sorted , nr_sorted , btree_iter_cmp_by_idx ) ;
# undef btree_iter_cmp_by_idx
2017-03-17 09:18:50 +03:00
retry_all :
2019-05-15 16:47:40 +03:00
bch2_trans_unlock ( trans ) ;
2017-03-17 09:18:50 +03:00
2019-03-28 07:07:24 +03:00
if ( unlikely ( ret = = - ENOMEM ) ) {
2017-03-17 09:18:50 +03:00
struct closure cl ;
closure_init_stack ( & cl ) ;
do {
ret = bch2_btree_cache_cannibalize_lock ( c , & cl ) ;
closure_sync ( & cl ) ;
} while ( ret ) ;
}
2019-03-28 07:07:24 +03:00
if ( unlikely ( ret = = - EIO ) ) {
2019-04-05 04:28:16 +03:00
trans - > error = true ;
2019-05-24 21:33:16 +03:00
orig_iter - > flags | = BTREE_ITER_ERROR ;
orig_iter - > l [ orig_iter - > level ] . b = BTREE_ITER_NO_NODE_ERROR ;
2019-03-28 07:07:24 +03:00
goto out ;
}
BUG_ON ( ret & & ret ! = - EINTR ) ;
2017-03-17 09:18:50 +03:00
/* Now, redo traversals in correct order: */
2019-03-28 05:54:42 +03:00
for ( i = 0 ; i < nr_sorted ; i + + ) {
iter = & trans - > iters [ sorted [ i ] ] ;
2017-03-17 09:18:50 +03:00
2019-03-28 05:54:42 +03:00
do {
ret = __bch2_btree_iter_traverse ( iter ) ;
} while ( ret = = - EINTR ) ;
2017-03-17 09:18:50 +03:00
2019-03-28 05:54:42 +03:00
if ( ret )
goto retry_all ;
}
2017-03-17 09:18:50 +03:00
2019-03-11 21:59:58 +03:00
ret = hweight64 ( trans - > iters_live ) > 1 ? - EINTR : 0 ;
2017-03-17 09:18:50 +03:00
out :
bch2_btree_cache_cannibalize_unlock ( c ) ;
return ret ;
2019-03-28 07:07:24 +03:00
}
2017-03-17 09:18:50 +03:00
2019-03-28 07:07:24 +03:00
int bch2_btree_iter_traverse_all ( struct btree_trans * trans )
{
return __btree_iter_traverse_all ( trans , NULL , 0 ) ;
2017-03-17 09:18:50 +03:00
}
static unsigned btree_iter_up_until_locked ( struct btree_iter * iter ,
bool check_pos )
{
unsigned l = iter - > level ;
while ( btree_iter_node ( iter , l ) & &
2019-05-14 21:08:23 +03:00
( ! is_btree_node ( iter , l ) | |
! bch2_btree_node_relock ( iter , l ) | |
( check_pos & &
! btree_iter_pos_in_node ( iter , iter - > l [ l ] . b ) ) ) ) {
2017-03-17 09:18:50 +03:00
btree_node_unlock ( iter , l ) ;
2019-05-14 21:08:23 +03:00
iter - > l [ l ] . b = BTREE_ITER_NO_NODE_UP ;
2017-03-17 09:18:50 +03:00
l + + ;
}
return l ;
}
/*
* This is the main state machine for walking down the btree - walks down to a
* specified depth
*
* Returns 0 on success , - EIO on error ( error reading in a btree node ) .
*
* On error , caller ( peek_node ( ) / peek_key ( ) ) must return NULL ; the error is
2019-05-10 23:09:17 +03:00
* stashed in the iterator and returned from bch2_trans_exit ( ) .
2017-03-17 09:18:50 +03:00
*/
int __must_check __bch2_btree_iter_traverse ( struct btree_iter * iter )
{
unsigned depth_want = iter - > level ;
if ( unlikely ( iter - > level > = BTREE_MAX_DEPTH ) )
return 0 ;
2019-05-15 17:08:55 +03:00
if ( bch2_btree_iter_relock ( iter , false ) )
2017-03-17 09:18:50 +03:00
return 0 ;
/*
* XXX : correctly using BTREE_ITER_UPTODATE should make using check_pos
* here unnecessary
*/
iter - > level = btree_iter_up_until_locked ( iter , true ) ;
/*
* If we ' ve got a btree node locked ( i . e . we aren ' t about to relock the
* root ) - advance its node iterator if necessary :
*
* XXX correctly using BTREE_ITER_UPTODATE should make this unnecessary
*/
2018-08-21 23:30:14 +03:00
if ( btree_iter_node ( iter , iter - > level ) )
btree_iter_advance_to_pos ( iter , & iter - > l [ iter - > level ] , - 1 ) ;
2017-03-17 09:18:50 +03:00
/*
* Note : iter - > nodes [ iter - > level ] may be temporarily NULL here - that
* would indicate to other code that we got to the end of the btree ,
* here it indicates that relocking the root failed - it ' s critical that
* btree_iter_lock_root ( ) comes next and that it can ' t fail
*/
while ( iter - > level > depth_want ) {
int ret = btree_iter_node ( iter , iter - > level )
? btree_iter_down ( iter )
: btree_iter_lock_root ( iter , depth_want ) ;
if ( unlikely ( ret ) ) {
2018-11-21 10:59:07 +03:00
if ( ret = = 1 )
return 0 ;
2017-03-17 09:18:50 +03:00
iter - > level = depth_want ;
2019-05-14 21:08:23 +03:00
iter - > l [ iter - > level ] . b = BTREE_ITER_NO_NODE_DOWN ;
2017-03-17 09:18:50 +03:00
return ret ;
}
}
iter - > uptodate = BTREE_ITER_NEED_PEEK ;
2016-07-22 06:05:06 +03:00
2019-03-28 05:03:30 +03:00
bch2_btree_trans_verify_locks ( iter - > trans ) ;
2016-07-22 06:05:06 +03:00
__bch2_btree_iter_verify ( iter , iter - > l [ iter - > level ] . b ) ;
2017-03-17 09:18:50 +03:00
return 0 ;
}
int __must_check bch2_btree_iter_traverse ( struct btree_iter * iter )
{
int ret ;
2019-05-15 16:53:27 +03:00
ret = bch2_trans_cond_resched ( iter - > trans ) ? :
__bch2_btree_iter_traverse ( iter ) ;
2017-03-17 09:18:50 +03:00
if ( unlikely ( ret ) )
2019-03-28 07:07:24 +03:00
ret = __btree_iter_traverse_all ( iter - > trans , iter , ret ) ;
2017-03-17 09:18:50 +03:00
return ret ;
}
static inline void bch2_btree_iter_checks ( struct btree_iter * iter ,
enum btree_iter_type type )
{
EBUG_ON ( iter - > btree_id > = BTREE_ID_NR ) ;
EBUG_ON ( ! ! ( iter - > flags & BTREE_ITER_IS_EXTENTS ) ! =
2019-05-12 00:29:20 +03:00
( btree_node_type_is_extents ( iter - > btree_id ) & &
2017-03-17 09:18:50 +03:00
type ! = BTREE_ITER_NODES ) ) ;
2019-03-28 05:03:30 +03:00
bch2_btree_trans_verify_locks ( iter - > trans ) ;
2017-03-17 09:18:50 +03:00
}
/* Iterate across nodes (leaf and interior nodes) */
struct btree * bch2_btree_iter_peek_node ( struct btree_iter * iter )
{
struct btree * b ;
int ret ;
bch2_btree_iter_checks ( iter , BTREE_ITER_NODES ) ;
if ( iter - > uptodate = = BTREE_ITER_UPTODATE )
return iter - > l [ iter - > level ] . b ;
ret = bch2_btree_iter_traverse ( iter ) ;
if ( ret )
return NULL ;
b = btree_iter_node ( iter , iter - > level ) ;
if ( ! b )
return NULL ;
BUG_ON ( bkey_cmp ( b - > key . k . p , iter - > pos ) < 0 ) ;
iter - > pos = b - > key . k . p ;
iter - > uptodate = BTREE_ITER_UPTODATE ;
return b ;
}
struct btree * bch2_btree_iter_next_node ( struct btree_iter * iter , unsigned depth )
{
struct btree * b ;
int ret ;
bch2_btree_iter_checks ( iter , BTREE_ITER_NODES ) ;
/* already got to end? */
if ( ! btree_iter_node ( iter , iter - > level ) )
return NULL ;
2019-04-05 04:53:12 +03:00
bch2_trans_cond_resched ( iter - > trans ) ;
2017-03-17 09:18:50 +03:00
btree_iter_up ( iter ) ;
if ( ! bch2_btree_node_relock ( iter , iter - > level ) )
btree_iter_set_dirty ( iter , BTREE_ITER_NEED_RELOCK ) ;
ret = bch2_btree_iter_traverse ( iter ) ;
if ( ret )
return NULL ;
/* got to end? */
b = btree_iter_node ( iter , iter - > level ) ;
if ( ! b )
return NULL ;
if ( bkey_cmp ( iter - > pos , b - > key . k . p ) < 0 ) {
/*
* Haven ' t gotten to the end of the parent node : go back down to
* the next child node
*/
/*
* We don ' t really want to be unlocking here except we can ' t
* directly tell btree_iter_traverse ( ) " traverse to this level "
* except by setting iter - > level , so we have to unlock so we
* don ' t screw up our lock invariants :
*/
if ( btree_node_read_locked ( iter , iter - > level ) )
btree_node_unlock ( iter , iter - > level ) ;
/* ick: */
iter - > pos = iter - > btree_id = = BTREE_ID_INODES
? btree_type_successor ( iter - > btree_id , iter - > pos )
: bkey_successor ( iter - > pos ) ;
iter - > level = depth ;
btree_iter_set_dirty ( iter , BTREE_ITER_NEED_TRAVERSE ) ;
ret = bch2_btree_iter_traverse ( iter ) ;
if ( ret )
return NULL ;
b = iter - > l [ iter - > level ] . b ;
}
iter - > pos = b - > key . k . p ;
iter - > uptodate = BTREE_ITER_UPTODATE ;
return b ;
}
/* Iterate across keys (in leaf nodes only) */
void bch2_btree_iter_set_pos_same_leaf ( struct btree_iter * iter , struct bpos new_pos )
{
struct btree_iter_level * l = & iter - > l [ 0 ] ;
EBUG_ON ( iter - > level ! = 0 ) ;
EBUG_ON ( bkey_cmp ( new_pos , iter - > pos ) < 0 ) ;
EBUG_ON ( ! btree_node_locked ( iter , 0 ) ) ;
EBUG_ON ( bkey_cmp ( new_pos , l - > b - > key . k . p ) > 0 ) ;
iter - > pos = new_pos ;
btree_iter_set_dirty ( iter , BTREE_ITER_NEED_PEEK ) ;
2018-08-21 23:30:14 +03:00
btree_iter_advance_to_pos ( iter , l , - 1 ) ;
2017-03-17 09:18:50 +03:00
2018-08-21 23:30:14 +03:00
if ( bch2_btree_node_iter_end ( & l - > iter ) & &
btree_iter_pos_after_node ( iter , l - > b ) )
2017-03-17 09:18:50 +03:00
btree_iter_set_dirty ( iter , BTREE_ITER_NEED_TRAVERSE ) ;
}
void bch2_btree_iter_set_pos ( struct btree_iter * iter , struct bpos new_pos )
{
int cmp = bkey_cmp ( new_pos , iter - > pos ) ;
unsigned level ;
if ( ! cmp )
return ;
iter - > pos = new_pos ;
level = btree_iter_up_until_locked ( iter , true ) ;
if ( btree_iter_node ( iter , level ) ) {
/*
* We might have to skip over many keys , or just a few : try
* advancing the node iterator , and if we have to skip over too
* many keys just reinit it ( or if we ' re rewinding , since that
* is expensive ) .
*/
2018-08-21 23:30:14 +03:00
if ( cmp < 0 | |
! btree_iter_advance_to_pos ( iter , & iter - > l [ level ] , 8 ) )
__btree_iter_init ( iter , level ) ;
2017-03-17 09:18:50 +03:00
/* Don't leave it locked if we're not supposed to: */
if ( btree_lock_want ( iter , level ) = = BTREE_NODE_UNLOCKED )
btree_node_unlock ( iter , level ) ;
}
if ( level ! = iter - > level )
btree_iter_set_dirty ( iter , BTREE_ITER_NEED_TRAVERSE ) ;
else
btree_iter_set_dirty ( iter , BTREE_ITER_NEED_PEEK ) ;
}
static inline struct bkey_s_c btree_iter_peek_uptodate ( struct btree_iter * iter )
{
struct btree_iter_level * l = & iter - > l [ 0 ] ;
struct bkey_s_c ret = { . k = & iter - > k } ;
if ( ! bkey_deleted ( & iter - > k ) ) {
EBUG_ON ( bch2_btree_node_iter_end ( & l - > iter ) ) ;
ret . v = bkeyp_val ( & l - > b - > format ,
__bch2_btree_node_iter_peek_all ( & l - > iter , l - > b ) ) ;
}
2019-03-26 00:06:42 +03:00
if ( debug_check_bkeys ( iter - > trans - > c ) & &
2017-03-17 09:18:50 +03:00
! bkey_deleted ( ret . k ) )
2019-03-26 00:06:42 +03:00
bch2_bkey_debugcheck ( iter - > trans - > c , l - > b , ret ) ;
2017-03-17 09:18:50 +03:00
return ret ;
}
struct bkey_s_c bch2_btree_iter_peek ( struct btree_iter * iter )
{
struct btree_iter_level * l = & iter - > l [ 0 ] ;
struct bkey_s_c k ;
int ret ;
bch2_btree_iter_checks ( iter , BTREE_ITER_KEYS ) ;
if ( iter - > uptodate = = BTREE_ITER_UPTODATE )
return btree_iter_peek_uptodate ( iter ) ;
while ( 1 ) {
2019-05-15 16:53:27 +03:00
if ( iter - > uptodate > = BTREE_ITER_NEED_RELOCK ) {
ret = bch2_btree_iter_traverse ( iter ) ;
if ( unlikely ( ret ) )
return bkey_s_c_err ( ret ) ;
}
2017-03-17 09:18:50 +03:00
k = __btree_iter_peek ( iter , l ) ;
if ( likely ( k . k ) )
break ;
/* got to the end of the leaf, iterator needs to be traversed: */
iter - > pos = l - > b - > key . k . p ;
iter - > uptodate = BTREE_ITER_NEED_TRAVERSE ;
if ( ! bkey_cmp ( iter - > pos , POS_MAX ) )
return bkey_s_c_null ;
iter - > pos = btree_type_successor ( iter - > btree_id , iter - > pos ) ;
}
/*
* iter - > pos should always be equal to the key we just
* returned - except extents can straddle iter - > pos :
*/
if ( ! ( iter - > flags & BTREE_ITER_IS_EXTENTS ) | |
bkey_cmp ( bkey_start_pos ( k . k ) , iter - > pos ) > 0 )
iter - > pos = bkey_start_pos ( k . k ) ;
iter - > uptodate = BTREE_ITER_UPTODATE ;
return k ;
}
static noinline
struct bkey_s_c bch2_btree_iter_peek_next_leaf ( struct btree_iter * iter )
{
struct btree_iter_level * l = & iter - > l [ 0 ] ;
iter - > pos = l - > b - > key . k . p ;
iter - > uptodate = BTREE_ITER_NEED_TRAVERSE ;
if ( ! bkey_cmp ( iter - > pos , POS_MAX ) )
return bkey_s_c_null ;
iter - > pos = btree_type_successor ( iter - > btree_id , iter - > pos ) ;
return bch2_btree_iter_peek ( iter ) ;
}
struct bkey_s_c bch2_btree_iter_next ( struct btree_iter * iter )
{
struct btree_iter_level * l = & iter - > l [ 0 ] ;
struct bkey_packed * p ;
struct bkey_s_c k ;
bch2_btree_iter_checks ( iter , BTREE_ITER_KEYS ) ;
2019-05-15 16:53:27 +03:00
iter - > pos = btree_type_successor ( iter - > btree_id , iter - > k . p ) ;
2017-03-17 09:18:50 +03:00
if ( unlikely ( iter - > uptodate ! = BTREE_ITER_UPTODATE ) ) {
2019-05-15 16:53:27 +03:00
/*
* XXX : when we just need to relock we should be able to avoid
* calling traverse , but we need to kill BTREE_ITER_NEED_PEEK
* for that to work
*/
btree_iter_set_dirty ( iter , BTREE_ITER_NEED_TRAVERSE ) ;
return bch2_btree_iter_peek ( iter ) ;
2017-03-17 09:18:50 +03:00
}
do {
2018-08-21 23:30:14 +03:00
bch2_btree_node_iter_advance ( & l - > iter , l - > b ) ;
2017-03-17 09:18:50 +03:00
p = bch2_btree_node_iter_peek_all ( & l - > iter , l - > b ) ;
if ( unlikely ( ! p ) )
return bch2_btree_iter_peek_next_leaf ( iter ) ;
} while ( bkey_whiteout ( p ) ) ;
k = __btree_iter_unpack ( iter , l , & iter - > k , p ) ;
EBUG_ON ( bkey_cmp ( bkey_start_pos ( k . k ) , iter - > pos ) < 0 ) ;
iter - > pos = bkey_start_pos ( k . k ) ;
return k ;
}
struct bkey_s_c bch2_btree_iter_prev ( struct btree_iter * iter )
{
struct btree_iter_level * l = & iter - > l [ 0 ] ;
struct bkey_packed * p ;
struct bkey_s_c k ;
int ret ;
bch2_btree_iter_checks ( iter , BTREE_ITER_KEYS ) ;
if ( unlikely ( iter - > uptodate ! = BTREE_ITER_UPTODATE ) ) {
k = bch2_btree_iter_peek ( iter ) ;
if ( IS_ERR ( k . k ) )
return k ;
}
while ( 1 ) {
p = bch2_btree_node_iter_prev ( & l - > iter , l - > b ) ;
if ( likely ( p ) )
break ;
iter - > pos = l - > b - > data - > min_key ;
if ( ! bkey_cmp ( iter - > pos , POS_MIN ) )
return bkey_s_c_null ;
bch2_btree_iter_set_pos ( iter ,
btree_type_predecessor ( iter - > btree_id , iter - > pos ) ) ;
ret = bch2_btree_iter_traverse ( iter ) ;
if ( unlikely ( ret ) )
return bkey_s_c_err ( ret ) ;
p = bch2_btree_node_iter_peek ( & l - > iter , l - > b ) ;
if ( p )
break ;
}
k = __btree_iter_unpack ( iter , l , & iter - > k , p ) ;
EBUG_ON ( bkey_cmp ( bkey_start_pos ( k . k ) , iter - > pos ) > 0 ) ;
iter - > pos = bkey_start_pos ( k . k ) ;
iter - > uptodate = BTREE_ITER_UPTODATE ;
return k ;
}
static inline struct bkey_s_c
2016-07-22 06:05:06 +03:00
__bch2_btree_iter_peek_slot_extents ( struct btree_iter * iter )
2017-03-17 09:18:50 +03:00
{
struct btree_iter_level * l = & iter - > l [ 0 ] ;
2016-07-22 06:05:06 +03:00
struct btree_node_iter node_iter ;
2017-03-17 09:18:50 +03:00
struct bkey_s_c k ;
struct bkey n ;
int ret ;
recheck :
while ( ( k = __btree_iter_peek_all ( iter , l , & iter - > k ) ) . k & &
2019-08-17 22:54:48 +03:00
bkey_cmp ( k . k - > p , iter - > pos ) < = 0 )
2018-08-21 23:30:14 +03:00
bch2_btree_node_iter_advance ( & l - > iter , l - > b ) ;
2017-03-17 09:18:50 +03:00
2016-07-22 06:05:06 +03:00
/*
* iterator is now at the correct position for inserting at iter - > pos ,
* but we need to keep iterating until we find the first non whiteout so
* we know how big a hole we have , if any :
*/
node_iter = l - > iter ;
if ( k . k & & bkey_whiteout ( k . k ) )
k = __btree_iter_unpack ( iter , l , & iter - > k ,
bch2_btree_node_iter_peek ( & node_iter , l - > b ) ) ;
2017-03-17 09:18:50 +03:00
/*
* If we got to the end of the node , check if we need to traverse to the
* next node :
*/
if ( unlikely ( ! k . k & & btree_iter_pos_after_node ( iter , l - > b ) ) ) {
btree_iter_set_dirty ( iter , BTREE_ITER_NEED_TRAVERSE ) ;
ret = bch2_btree_iter_traverse ( iter ) ;
if ( unlikely ( ret ) )
return bkey_s_c_err ( ret ) ;
goto recheck ;
}
if ( k . k & &
! bkey_whiteout ( k . k ) & &
bkey_cmp ( bkey_start_pos ( k . k ) , iter - > pos ) < = 0 ) {
2016-07-22 06:05:06 +03:00
/*
* if we skipped forward to find the first non whiteout and
* there _wasn ' t_ actually a hole , we want the iterator to be
* pointed at the key we found :
*/
l - > iter = node_iter ;
2017-03-17 09:18:50 +03:00
EBUG_ON ( bkey_cmp ( k . k - > p , iter - > pos ) < 0 ) ;
EBUG_ON ( bkey_deleted ( k . k ) ) ;
iter - > uptodate = BTREE_ITER_UPTODATE ;
2019-08-17 22:17:09 +03:00
__bch2_btree_iter_verify ( iter , l - > b ) ;
2017-03-17 09:18:50 +03:00
return k ;
}
/* hole */
2016-07-22 06:05:06 +03:00
/* holes can't span inode numbers: */
if ( iter - > pos . offset = = KEY_OFFSET_MAX ) {
if ( iter - > pos . inode = = KEY_INODE_MAX )
return bkey_s_c_null ;
iter - > pos = bkey_successor ( iter - > pos ) ;
goto recheck ;
}
if ( ! k . k )
k . k = & l - > b - > key . k ;
2017-03-17 09:18:50 +03:00
bkey_init ( & n ) ;
n . p = iter - > pos ;
2016-07-22 06:05:06 +03:00
bch2_key_resize ( & n ,
min_t ( u64 , KEY_SIZE_MAX ,
( k . k - > p . inode = = n . p . inode
? bkey_start_offset ( k . k )
: KEY_OFFSET_MAX ) -
n . p . offset ) ) ;
2018-11-09 09:24:07 +03:00
EBUG_ON ( ! n . size ) ;
2017-03-17 09:18:50 +03:00
2016-07-22 06:05:06 +03:00
iter - > k = n ;
iter - > uptodate = BTREE_ITER_UPTODATE ;
2019-08-17 22:17:09 +03:00
__bch2_btree_iter_verify ( iter , l - > b ) ;
2016-07-22 06:05:06 +03:00
return ( struct bkey_s_c ) { & iter - > k , NULL } ;
}
2017-03-17 09:18:50 +03:00
2016-07-22 06:05:06 +03:00
static inline struct bkey_s_c
__bch2_btree_iter_peek_slot ( struct btree_iter * iter )
{
struct btree_iter_level * l = & iter - > l [ 0 ] ;
struct bkey_s_c k ;
int ret ;
2017-03-17 09:18:50 +03:00
2016-07-22 06:05:06 +03:00
if ( iter - > flags & BTREE_ITER_IS_EXTENTS )
return __bch2_btree_iter_peek_slot_extents ( iter ) ;
2017-03-17 09:18:50 +03:00
2016-07-22 06:05:06 +03:00
recheck :
while ( ( k = __btree_iter_peek_all ( iter , l , & iter - > k ) ) . k & &
bkey_deleted ( k . k ) & &
bkey_cmp ( k . k - > p , iter - > pos ) = = 0 )
2018-08-21 23:30:14 +03:00
bch2_btree_node_iter_advance ( & l - > iter , l - > b ) ;
2017-03-17 09:18:50 +03:00
2016-07-22 06:05:06 +03:00
/*
* If we got to the end of the node , check if we need to traverse to the
* next node :
*/
if ( unlikely ( ! k . k & & btree_iter_pos_after_node ( iter , l - > b ) ) ) {
btree_iter_set_dirty ( iter , BTREE_ITER_NEED_TRAVERSE ) ;
ret = bch2_btree_iter_traverse ( iter ) ;
if ( unlikely ( ret ) )
return bkey_s_c_err ( ret ) ;
2017-03-17 09:18:50 +03:00
2016-07-22 06:05:06 +03:00
goto recheck ;
2017-03-17 09:18:50 +03:00
}
2019-08-17 22:17:09 +03:00
if ( ! k . k | |
bkey_deleted ( k . k ) | |
bkey_cmp ( iter - > pos , k . k - > p ) ) {
2016-07-22 06:05:06 +03:00
/* hole */
bkey_init ( & iter - > k ) ;
iter - > k . p = iter - > pos ;
2019-08-17 22:17:09 +03:00
k = ( struct bkey_s_c ) { & iter - > k , NULL } ;
2016-07-22 06:05:06 +03:00
}
2019-08-17 22:17:09 +03:00
iter - > uptodate = BTREE_ITER_UPTODATE ;
__bch2_btree_iter_verify ( iter , l - > b ) ;
return k ;
2017-03-17 09:18:50 +03:00
}
struct bkey_s_c bch2_btree_iter_peek_slot ( struct btree_iter * iter )
{
int ret ;
bch2_btree_iter_checks ( iter , BTREE_ITER_SLOTS ) ;
if ( iter - > uptodate = = BTREE_ITER_UPTODATE )
return btree_iter_peek_uptodate ( iter ) ;
2019-05-15 16:53:27 +03:00
if ( iter - > uptodate > = BTREE_ITER_NEED_RELOCK ) {
ret = bch2_btree_iter_traverse ( iter ) ;
if ( unlikely ( ret ) )
return bkey_s_c_err ( ret ) ;
}
2017-03-17 09:18:50 +03:00
return __bch2_btree_iter_peek_slot ( iter ) ;
}
struct bkey_s_c bch2_btree_iter_next_slot ( struct btree_iter * iter )
{
bch2_btree_iter_checks ( iter , BTREE_ITER_SLOTS ) ;
iter - > pos = btree_type_successor ( iter - > btree_id , iter - > k . p ) ;
if ( unlikely ( iter - > uptodate ! = BTREE_ITER_UPTODATE ) ) {
/*
* XXX : when we just need to relock we should be able to avoid
* calling traverse , but we need to kill BTREE_ITER_NEED_PEEK
* for that to work
*/
btree_iter_set_dirty ( iter , BTREE_ITER_NEED_TRAVERSE ) ;
return bch2_btree_iter_peek_slot ( iter ) ;
}
if ( ! bkey_deleted ( & iter - > k ) )
2018-08-21 23:30:14 +03:00
bch2_btree_node_iter_advance ( & iter - > l [ 0 ] . iter , iter - > l [ 0 ] . b ) ;
2017-03-17 09:18:50 +03:00
btree_iter_set_dirty ( iter , BTREE_ITER_NEED_PEEK ) ;
return __bch2_btree_iter_peek_slot ( iter ) ;
}
2019-03-26 00:06:42 +03:00
static inline void bch2_btree_iter_init ( struct btree_trans * trans ,
struct btree_iter * iter , enum btree_id btree_id ,
2019-03-25 22:10:15 +03:00
struct bpos pos , unsigned flags )
2017-03-17 09:18:50 +03:00
{
2019-03-26 00:06:42 +03:00
struct bch_fs * c = trans - > c ;
2017-03-17 09:18:50 +03:00
unsigned i ;
2019-05-12 00:29:20 +03:00
if ( btree_node_type_is_extents ( btree_id ) & &
2019-03-25 22:10:15 +03:00
! ( flags & BTREE_ITER_NODES ) )
flags | = BTREE_ITER_IS_EXTENTS ;
2017-03-17 09:18:50 +03:00
2019-03-26 00:06:42 +03:00
iter - > trans = trans ;
2017-03-17 09:18:50 +03:00
iter - > pos = pos ;
bkey_init ( & iter - > k ) ;
iter - > k . p = pos ;
iter - > flags = flags ;
iter - > uptodate = BTREE_ITER_NEED_TRAVERSE ;
iter - > btree_id = btree_id ;
2019-03-25 22:10:15 +03:00
iter - > level = 0 ;
iter - > locks_want = flags & BTREE_ITER_INTENT ? 1 : 0 ;
2017-03-17 09:18:50 +03:00
iter - > nodes_locked = 0 ;
iter - > nodes_intent_locked = 0 ;
for ( i = 0 ; i < ARRAY_SIZE ( iter - > l ) ; i + + )
iter - > l [ i ] . b = NULL ;
2019-05-14 21:08:23 +03:00
iter - > l [ iter - > level ] . b = BTREE_ITER_NO_NODE_INIT ;
2017-03-17 09:18:50 +03:00
prefetch ( c - > btree_roots [ btree_id ] . b ) ;
}
/* new transactional stuff: */
2019-03-25 22:10:15 +03:00
int bch2_trans_iter_put ( struct btree_trans * trans ,
struct btree_iter * iter )
2018-08-09 01:42:04 +03:00
{
2019-03-28 05:03:30 +03:00
int ret = btree_iter_err ( iter ) ;
2018-08-09 01:42:04 +03:00
2019-03-28 06:14:38 +03:00
trans - > iters_live & = ~ ( 1ULL < < iter - > idx ) ;
2019-03-25 22:10:15 +03:00
return ret ;
2018-08-09 01:42:04 +03:00
}
2019-03-25 22:34:48 +03:00
static inline void __bch2_trans_iter_free ( struct btree_trans * trans ,
unsigned idx )
{
2019-03-28 05:46:52 +03:00
__bch2_btree_iter_unlock ( & trans - > iters [ idx ] ) ;
2019-03-25 22:34:48 +03:00
trans - > iters_linked & = ~ ( 1ULL < < idx ) ;
trans - > iters_live & = ~ ( 1ULL < < idx ) ;
trans - > iters_touched & = ~ ( 1ULL < < idx ) ;
trans - > iters_unlink_on_restart & = ~ ( 1ULL < < idx ) ;
trans - > iters_unlink_on_commit & = ~ ( 1ULL < < idx ) ;
}
2019-03-25 22:10:15 +03:00
int bch2_trans_iter_free ( struct btree_trans * trans ,
struct btree_iter * iter )
2017-03-17 09:18:50 +03:00
{
2019-03-28 05:03:30 +03:00
int ret = btree_iter_err ( iter ) ;
2019-03-25 22:10:15 +03:00
2019-03-28 06:14:38 +03:00
__bch2_trans_iter_free ( trans , iter - > idx ) ;
2019-03-25 22:10:15 +03:00
return ret ;
2019-03-25 22:34:48 +03:00
}
2017-03-17 09:18:50 +03:00
2019-03-25 22:10:15 +03:00
int bch2_trans_iter_free_on_commit ( struct btree_trans * trans ,
struct btree_iter * iter )
2019-03-25 22:34:48 +03:00
{
2019-03-28 05:03:30 +03:00
int ret = btree_iter_err ( iter ) ;
2019-03-25 22:10:15 +03:00
2019-03-28 06:14:38 +03:00
trans - > iters_unlink_on_commit | = 1ULL < < iter - > idx ;
2019-03-25 22:10:15 +03:00
return ret ;
2017-03-17 09:18:50 +03:00
}
2019-05-15 17:54:43 +03:00
static int bch2_trans_realloc_iters ( struct btree_trans * trans ,
2019-05-15 17:08:55 +03:00
unsigned new_size )
2017-03-17 09:18:50 +03:00
{
2019-03-08 07:13:39 +03:00
void * new_iters , * new_updates ;
2017-03-17 09:18:50 +03:00
2019-05-15 17:08:55 +03:00
new_size = roundup_pow_of_two ( new_size ) ;
2019-03-08 07:13:39 +03:00
BUG_ON ( new_size > BTREE_ITER_MAX ) ;
if ( new_size < = trans - > size )
return 0 ;
BUG_ON ( trans - > used_mempool ) ;
2017-03-17 09:18:50 +03:00
bch2_trans_unlock ( trans ) ;
2019-03-08 07:13:39 +03:00
new_iters = kmalloc ( sizeof ( struct btree_iter ) * new_size +
sizeof ( struct btree_insert_entry ) * ( new_size + 4 ) ,
GFP_NOFS ) ;
if ( new_iters )
goto success ;
2018-08-09 04:22:46 +03:00
new_iters = mempool_alloc ( & trans - > c - > btree_iters_pool , GFP_NOFS ) ;
2019-03-08 07:13:39 +03:00
new_size = BTREE_ITER_MAX ;
trans - > used_mempool = true ;
success :
new_updates = new_iters + sizeof ( struct btree_iter ) * new_size ;
2017-03-17 09:18:50 +03:00
memcpy ( new_iters , trans - > iters ,
sizeof ( struct btree_iter ) * trans - > nr_iters ) ;
2019-03-08 07:13:39 +03:00
memcpy ( new_updates , trans - > updates ,
sizeof ( struct btree_insert_entry ) * trans - > nr_updates ) ;
2019-03-25 22:34:48 +03:00
if ( IS_ENABLED ( CONFIG_BCACHEFS_DEBUG ) )
memset ( trans - > iters , POISON_FREE ,
sizeof ( struct btree_iter ) * trans - > nr_iters +
sizeof ( struct btree_insert_entry ) * trans - > nr_iters ) ;
2019-03-08 07:13:39 +03:00
if ( trans - > iters ! = trans - > iters_onstack )
kfree ( trans - > iters ) ;
trans - > iters = new_iters ;
trans - > updates = new_updates ;
trans - > size = new_size ;
2017-03-17 09:18:50 +03:00
2018-07-13 06:30:45 +03:00
if ( trans - > iters_live ) {
2019-05-15 17:54:43 +03:00
trace_trans_restart_iters_realloced ( trans - > ip , trans - > size ) ;
2018-07-13 06:30:45 +03:00
return - EINTR ;
}
return 0 ;
2017-03-17 09:18:50 +03:00
}
2019-03-26 05:43:26 +03:00
static int btree_trans_iter_alloc ( struct btree_trans * trans )
{
2019-04-01 01:40:01 +03:00
unsigned idx = __ffs64 ( ~ trans - > iters_linked ) ;
2019-03-26 05:43:26 +03:00
if ( idx < trans - > nr_iters )
goto got_slot ;
if ( trans - > nr_iters = = trans - > size ) {
2019-05-15 17:08:55 +03:00
int ret = bch2_trans_realloc_iters ( trans , trans - > size * 2 ) ;
2019-03-26 05:43:26 +03:00
if ( ret )
return ret ;
}
idx = trans - > nr_iters + + ;
BUG_ON ( trans - > nr_iters > trans - > size ) ;
2019-03-28 06:14:38 +03:00
trans - > iters [ idx ] . idx = idx ;
2019-03-26 05:43:26 +03:00
got_slot :
BUG_ON ( trans - > iters_linked & ( 1ULL < < idx ) ) ;
trans - > iters_linked | = 1ULL < < idx ;
return idx ;
}
2017-03-17 09:18:50 +03:00
static struct btree_iter * __btree_trans_get_iter ( struct btree_trans * trans ,
2019-03-25 22:34:48 +03:00
unsigned btree_id , struct bpos pos ,
2017-03-17 09:18:50 +03:00
unsigned flags , u64 iter_id )
{
struct btree_iter * iter ;
int idx ;
BUG_ON ( trans - > nr_iters > BTREE_ITER_MAX ) ;
2019-03-25 22:34:48 +03:00
for ( idx = 0 ; idx < trans - > nr_iters ; idx + + ) {
2019-03-26 05:43:26 +03:00
if ( ! ( trans - > iters_linked & ( 1ULL < < idx ) ) )
continue ;
2019-03-25 22:34:48 +03:00
iter = & trans - > iters [ idx ] ;
if ( iter_id
? iter - > id = = iter_id
: ( iter - > btree_id = = btree_id & &
! bkey_cmp ( iter - > pos , pos ) ) )
2017-03-17 09:18:50 +03:00
goto found ;
2019-03-25 22:34:48 +03:00
}
2017-03-17 09:18:50 +03:00
idx = - 1 ;
found :
if ( idx < 0 ) {
2019-03-26 05:43:26 +03:00
idx = btree_trans_iter_alloc ( trans ) ;
if ( idx < 0 )
return ERR_PTR ( idx ) ;
2017-03-17 09:18:50 +03:00
iter = & trans - > iters [ idx ] ;
2019-03-08 07:13:39 +03:00
iter - > id = iter_id ;
2017-03-17 09:18:50 +03:00
2019-03-26 00:06:42 +03:00
bch2_btree_iter_init ( trans , iter , btree_id , pos , flags ) ;
2017-03-17 09:18:50 +03:00
} else {
iter = & trans - > iters [ idx ] ;
iter - > flags & = ~ ( BTREE_ITER_INTENT | BTREE_ITER_PREFETCH ) ;
iter - > flags | = flags & ( BTREE_ITER_INTENT | BTREE_ITER_PREFETCH ) ;
2019-08-12 21:35:34 +03:00
if ( ( iter - > flags & BTREE_ITER_INTENT ) & &
! bch2_btree_iter_upgrade ( iter , 1 ) ) {
trace_trans_restart_upgrade ( trans - > ip ) ;
return ERR_PTR ( - EINTR ) ;
}
2017-03-17 09:18:50 +03:00
}
2019-03-25 22:34:48 +03:00
BUG_ON ( iter - > btree_id ! = btree_id ) ;
2019-03-08 07:13:39 +03:00
BUG_ON ( trans - > iters_live & ( 1ULL < < idx ) ) ;
2019-03-25 22:34:48 +03:00
trans - > iters_live | = 1ULL < < idx ;
trans - > iters_touched | = 1ULL < < idx ;
2017-03-17 09:18:50 +03:00
2018-08-09 01:42:04 +03:00
BUG_ON ( iter - > btree_id ! = btree_id ) ;
BUG_ON ( ( iter - > flags ^ flags ) & BTREE_ITER_TYPE ) ;
2017-03-17 09:18:50 +03:00
return iter ;
}
struct btree_iter * __bch2_trans_get_iter ( struct btree_trans * trans ,
enum btree_id btree_id ,
struct bpos pos , unsigned flags ,
u64 iter_id )
{
struct btree_iter * iter =
2019-03-25 22:34:48 +03:00
__btree_trans_get_iter ( trans , btree_id , pos , flags , iter_id ) ;
2017-03-17 09:18:50 +03:00
if ( ! IS_ERR ( iter ) )
bch2_btree_iter_set_pos ( iter , pos ) ;
return iter ;
}
2019-03-25 22:10:15 +03:00
struct btree_iter * bch2_trans_get_node_iter ( struct btree_trans * trans ,
enum btree_id btree_id ,
struct bpos pos ,
unsigned locks_want ,
unsigned depth ,
unsigned flags )
{
struct btree_iter * iter =
__btree_trans_get_iter ( trans , btree_id , pos ,
flags | BTREE_ITER_NODES , 0 ) ;
unsigned i ;
BUG_ON ( IS_ERR ( iter ) ) ;
BUG_ON ( bkey_cmp ( iter - > pos , pos ) ) ;
iter - > locks_want = locks_want ;
iter - > level = depth ;
for ( i = 0 ; i < ARRAY_SIZE ( iter - > l ) ; i + + )
iter - > l [ i ] . b = NULL ;
2019-05-14 21:08:23 +03:00
iter - > l [ iter - > level ] . b = BTREE_ITER_NO_NODE_INIT ;
2019-03-25 22:10:15 +03:00
return iter ;
}
2019-03-26 05:43:26 +03:00
struct btree_iter * bch2_trans_copy_iter ( struct btree_trans * trans ,
struct btree_iter * src )
2017-03-17 09:18:50 +03:00
{
2019-03-28 05:46:52 +03:00
struct btree_iter * iter ;
2019-03-28 06:14:38 +03:00
unsigned offset = offsetof ( struct btree_iter , trans ) ;
2019-03-28 05:46:52 +03:00
int i , idx ;
2017-03-17 09:18:50 +03:00
2019-03-26 05:43:26 +03:00
idx = btree_trans_iter_alloc ( trans ) ;
if ( idx < 0 )
return ERR_PTR ( idx ) ;
trans - > iters_live | = 1ULL < < idx ;
trans - > iters_touched | = 1ULL < < idx ;
trans - > iters_unlink_on_restart | = 1ULL < < idx ;
2019-03-28 05:46:52 +03:00
iter = & trans - > iters [ idx ] ;
2019-03-28 06:14:38 +03:00
memcpy ( ( void * ) iter + offset ,
( void * ) src + offset ,
sizeof ( * iter ) - offset ) ;
2019-03-28 05:46:52 +03:00
for ( i = 0 ; i < BTREE_MAX_DEPTH ; i + + )
if ( btree_node_locked ( iter , i ) )
2020-06-06 19:28:01 +03:00
six_lock_increment ( & iter - > l [ i ] . b - > c . lock ,
2019-03-28 05:46:52 +03:00
__btree_lock_want ( iter , i ) ) ;
2019-03-26 05:43:26 +03:00
return & trans - > iters [ idx ] ;
2017-03-17 09:18:50 +03:00
}
2019-05-15 17:54:43 +03:00
static int bch2_trans_preload_mem ( struct btree_trans * trans , size_t size )
2017-03-17 09:18:50 +03:00
{
2019-05-15 17:54:43 +03:00
if ( size > trans - > mem_bytes ) {
2017-03-17 09:18:50 +03:00
size_t old_bytes = trans - > mem_bytes ;
2019-05-15 17:54:43 +03:00
size_t new_bytes = roundup_pow_of_two ( size ) ;
2017-03-17 09:18:50 +03:00
void * new_mem = krealloc ( trans - > mem , new_bytes , GFP_NOFS ) ;
if ( ! new_mem )
2019-05-15 17:54:43 +03:00
return - ENOMEM ;
2017-03-17 09:18:50 +03:00
trans - > mem = new_mem ;
trans - > mem_bytes = new_bytes ;
2018-07-13 06:30:45 +03:00
if ( old_bytes ) {
2019-05-15 17:54:43 +03:00
trace_trans_restart_mem_realloced ( trans - > ip , new_bytes ) ;
return - EINTR ;
2018-07-13 06:30:45 +03:00
}
2017-03-17 09:18:50 +03:00
}
2019-05-15 17:54:43 +03:00
return 0 ;
}
void * bch2_trans_kmalloc ( struct btree_trans * trans , size_t size )
{
void * p ;
int ret ;
ret = bch2_trans_preload_mem ( trans , trans - > mem_top + size ) ;
if ( ret )
return ERR_PTR ( ret ) ;
p = trans - > mem + trans - > mem_top ;
2017-03-17 09:18:50 +03:00
trans - > mem_top + = size ;
2019-05-15 17:54:43 +03:00
return p ;
2017-03-17 09:18:50 +03:00
}
2019-03-25 22:34:48 +03:00
inline void bch2_trans_unlink_iters ( struct btree_trans * trans , u64 iters )
{
iters & = trans - > iters_linked ;
2019-03-28 07:32:38 +03:00
iters & = ~ trans - > iters_live ;
2019-03-25 22:34:48 +03:00
while ( iters ) {
unsigned idx = __ffs64 ( iters ) ;
iters & = ~ ( 1ULL < < idx ) ;
__bch2_trans_iter_free ( trans , idx ) ;
}
}
2019-05-15 19:37:11 +03:00
void bch2_trans_begin ( struct btree_trans * trans )
2017-03-17 09:18:50 +03:00
{
2019-03-25 22:34:48 +03:00
u64 iters_to_unlink ;
2017-03-17 09:18:50 +03:00
/*
* On transaction restart , the transaction isn ' t required to allocate
* all the same iterators it on the last iteration :
*
* Unlink any iterators it didn ' t use this iteration , assuming it got
* further ( allocated an iter with a higher idx ) than where the iter
* was originally allocated :
*/
2019-03-25 22:34:48 +03:00
iters_to_unlink = ~ trans - > iters_live &
( ( 1ULL < < fls64 ( trans - > iters_live ) ) - 1 ) ;
2019-03-08 07:13:39 +03:00
2019-03-25 22:34:48 +03:00
iters_to_unlink | = trans - > iters_unlink_on_restart ;
iters_to_unlink | = trans - > iters_unlink_on_commit ;
2019-03-08 07:13:39 +03:00
2019-03-28 07:32:38 +03:00
trans - > iters_live = 0 ;
2019-03-25 22:34:48 +03:00
bch2_trans_unlink_iters ( trans , iters_to_unlink ) ;
2017-03-17 09:18:50 +03:00
2019-03-25 22:34:48 +03:00
trans - > iters_touched = 0 ;
trans - > iters_unlink_on_restart = 0 ;
trans - > iters_unlink_on_commit = 0 ;
trans - > nr_updates = 0 ;
trans - > mem_top = 0 ;
2019-03-28 07:07:24 +03:00
bch2_btree_iter_traverse_all ( trans ) ;
2017-03-17 09:18:50 +03:00
}
2019-05-15 17:54:43 +03:00
void bch2_trans_init ( struct btree_trans * trans , struct bch_fs * c ,
unsigned expected_nr_iters ,
size_t expected_mem_bytes )
2017-03-17 09:18:50 +03:00
{
2019-03-08 07:13:39 +03:00
memset ( trans , 0 , offsetof ( struct btree_trans , iters_onstack ) ) ;
2017-03-17 09:18:50 +03:00
trans - > c = c ;
2019-04-23 07:10:08 +03:00
trans - > ip = _RET_IP_ ;
2019-03-08 07:13:39 +03:00
trans - > size = ARRAY_SIZE ( trans - > iters_onstack ) ;
2017-03-17 09:18:50 +03:00
trans - > iters = trans - > iters_onstack ;
2019-03-08 07:13:39 +03:00
trans - > updates = trans - > updates_onstack ;
2019-05-15 22:47:43 +03:00
trans - > fs_usage_deltas = NULL ;
2019-05-15 17:54:43 +03:00
if ( expected_nr_iters > trans - > size )
bch2_trans_realloc_iters ( trans , expected_nr_iters ) ;
if ( expected_mem_bytes )
bch2_trans_preload_mem ( trans , expected_mem_bytes ) ;
2017-03-17 09:18:50 +03:00
}
int bch2_trans_exit ( struct btree_trans * trans )
{
2019-04-05 04:28:16 +03:00
bch2_trans_unlock ( trans ) ;
2017-03-17 09:18:50 +03:00
2019-05-15 22:47:43 +03:00
kfree ( trans - > fs_usage_deltas ) ;
2017-03-17 09:18:50 +03:00
kfree ( trans - > mem ) ;
2019-03-08 07:13:39 +03:00
if ( trans - > used_mempool )
2018-08-09 04:22:46 +03:00
mempool_free ( trans - > iters , & trans - > c - > btree_iters_pool ) ;
2019-03-08 07:13:39 +03:00
else if ( trans - > iters ! = trans - > iters_onstack )
kfree ( trans - > iters ) ;
2017-03-17 09:18:50 +03:00
trans - > mem = ( void * ) 0x1 ;
trans - > iters = ( void * ) 0x1 ;
2019-04-05 04:28:16 +03:00
return trans - > error ? - EIO : 0 ;
2017-03-17 09:18:50 +03:00
}