2017-03-16 22:18:50 -08:00
// SPDX-License-Identifier: GPL-2.0
# ifdef CONFIG_BCACHEFS_TESTS
# include "bcachefs.h"
# include "btree_update.h"
# include "journal_reclaim.h"
2023-08-16 16:54:33 -04:00
# include "snapshot.h"
2017-03-16 22:18:50 -08:00
# include "tests.h"
# include "linux/kthread.h"
# include "linux/random.h"
static void delete_test_keys ( struct bch_fs * c )
{
int ret ;
2021-02-20 19:27:37 -05:00
ret = bch2_btree_delete_range ( c , BTREE_ID_extents ,
2022-10-11 04:32:41 -04:00
SPOS ( 0 , 0 , U32_MAX ) ,
POS ( 0 , U64_MAX ) ,
0 , NULL ) ;
2017-03-16 22:18:50 -08:00
BUG_ON ( ret ) ;
2021-02-20 19:27:37 -05:00
ret = bch2_btree_delete_range ( c , BTREE_ID_xattrs ,
2022-10-11 04:32:41 -04:00
SPOS ( 0 , 0 , U32_MAX ) ,
POS ( 0 , U64_MAX ) ,
2022-03-05 18:23:47 -05:00
0 , NULL ) ;
2017-03-16 22:18:50 -08:00
BUG_ON ( ret ) ;
}
/* unit tests */
2020-12-01 12:23:55 -05:00
static int test_delete ( struct bch_fs * c , u64 nr )
2017-03-16 22:18:50 -08:00
{
2023-09-12 17:16:02 -04:00
struct btree_trans * trans = bch2_trans_get ( c ) ;
2021-08-30 15:18:31 -04:00
struct btree_iter iter ;
2017-03-16 22:18:50 -08:00
struct bkey_i_cookie k ;
int ret ;
bkey_cookie_init ( & k . k_i ) ;
2021-07-14 21:25:55 -04:00
k . k . p . snapshot = U32_MAX ;
2017-03-16 22:18:50 -08:00
2023-09-12 17:16:02 -04:00
bch2_trans_iter_init ( trans , & iter , BTREE_ID_xattrs , k . k . p ,
2021-08-30 15:18:31 -04:00
BTREE_ITER_INTENT ) ;
2017-03-16 22:18:50 -08:00
2023-09-12 17:16:02 -04:00
ret = commit_do ( trans , NULL , NULL , 0 ,
2021-08-30 15:18:31 -04:00
bch2_btree_iter_traverse ( & iter ) ? :
2023-09-12 17:16:02 -04:00
bch2_trans_update ( trans , & iter , & k . k_i , 0 ) ) ;
2023-09-26 16:02:06 -04:00
bch_err_msg ( c , ret , " update error " ) ;
if ( ret )
2020-12-01 12:23:55 -05:00
goto err ;
2017-03-16 22:18:50 -08:00
pr_info ( " deleting once " ) ;
2023-09-12 17:16:02 -04:00
ret = commit_do ( trans , NULL , NULL , 0 ,
2021-08-30 15:18:31 -04:00
bch2_btree_iter_traverse ( & iter ) ? :
2023-09-12 17:16:02 -04:00
bch2_btree_delete_at ( trans , & iter , 0 ) ) ;
2023-09-26 16:02:06 -04:00
bch_err_msg ( c , ret , " delete error (first) " ) ;
if ( ret )
2020-12-01 12:23:55 -05:00
goto err ;
2017-03-16 22:18:50 -08:00
pr_info ( " deleting twice " ) ;
2023-09-12 17:16:02 -04:00
ret = commit_do ( trans , NULL , NULL , 0 ,
2021-08-30 15:18:31 -04:00
bch2_btree_iter_traverse ( & iter ) ? :
2023-09-12 17:16:02 -04:00
bch2_btree_delete_at ( trans , & iter , 0 ) ) ;
2023-09-26 16:02:06 -04:00
bch_err_msg ( c , ret , " delete error (second) " ) ;
if ( ret )
2020-12-01 12:23:55 -05:00
goto err ;
err :
2023-09-12 17:16:02 -04:00
bch2_trans_iter_exit ( trans , & iter ) ;
bch2_trans_put ( trans ) ;
2020-12-01 12:23:55 -05:00
return ret ;
2017-03-16 22:18:50 -08:00
}
2020-12-01 12:23:55 -05:00
static int test_delete_written ( struct bch_fs * c , u64 nr )
2017-03-16 22:18:50 -08:00
{
2023-09-12 17:16:02 -04:00
struct btree_trans * trans = bch2_trans_get ( c ) ;
2021-08-30 15:18:31 -04:00
struct btree_iter iter ;
2017-03-16 22:18:50 -08:00
struct bkey_i_cookie k ;
int ret ;
bkey_cookie_init ( & k . k_i ) ;
2021-07-14 21:25:55 -04:00
k . k . p . snapshot = U32_MAX ;
2017-03-16 22:18:50 -08:00
2023-09-12 17:16:02 -04:00
bch2_trans_iter_init ( trans , & iter , BTREE_ID_xattrs , k . k . p ,
2021-08-30 15:18:31 -04:00
BTREE_ITER_INTENT ) ;
2017-03-16 22:18:50 -08:00
2023-09-12 17:16:02 -04:00
ret = commit_do ( trans , NULL , NULL , 0 ,
2021-08-30 15:18:31 -04:00
bch2_btree_iter_traverse ( & iter ) ? :
2023-09-12 17:16:02 -04:00
bch2_trans_update ( trans , & iter , & k . k_i , 0 ) ) ;
2023-09-26 16:02:06 -04:00
bch_err_msg ( c , ret , " update error " ) ;
if ( ret )
2020-12-01 12:23:55 -05:00
goto err ;
2017-03-16 22:18:50 -08:00
2023-09-12 17:16:02 -04:00
bch2_trans_unlock ( trans ) ;
2017-03-16 22:18:50 -08:00
bch2_journal_flush_all_pins ( & c - > journal ) ;
2023-09-12 17:16:02 -04:00
ret = commit_do ( trans , NULL , NULL , 0 ,
2021-08-30 15:18:31 -04:00
bch2_btree_iter_traverse ( & iter ) ? :
2023-09-12 17:16:02 -04:00
bch2_btree_delete_at ( trans , & iter , 0 ) ) ;
2023-09-26 16:02:06 -04:00
bch_err_msg ( c , ret , " delete error " ) ;
if ( ret )
2020-12-01 12:23:55 -05:00
goto err ;
err :
2023-09-12 17:16:02 -04:00
bch2_trans_iter_exit ( trans , & iter ) ;
bch2_trans_put ( trans ) ;
2020-12-01 12:23:55 -05:00
return ret ;
2017-03-16 22:18:50 -08:00
}
2020-12-01 12:23:55 -05:00
static int test_iterate ( struct bch_fs * c , u64 nr )
2017-03-16 22:18:50 -08:00
{
2023-09-12 17:16:02 -04:00
struct btree_trans * trans = bch2_trans_get ( c ) ;
2021-08-30 15:18:31 -04:00
struct btree_iter iter = { NULL } ;
2017-03-16 22:18:50 -08:00
struct bkey_s_c k ;
u64 i ;
2020-12-01 12:23:55 -05:00
int ret = 0 ;
2017-03-16 22:18:50 -08:00
delete_test_keys ( c ) ;
pr_info ( " inserting test keys " ) ;
for ( i = 0 ; i < nr ; i + + ) {
2023-09-12 18:41:22 -04:00
struct bkey_i_cookie ck ;
2017-03-16 22:18:50 -08:00
2023-09-12 18:41:22 -04:00
bkey_cookie_init ( & ck . k_i ) ;
ck . k . p . offset = i ;
ck . k . p . snapshot = U32_MAX ;
2017-03-16 22:18:50 -08:00
2023-09-12 18:41:22 -04:00
ret = bch2_btree_insert ( c , BTREE_ID_xattrs , & ck . k_i , NULL , 0 ) ;
2023-09-26 16:02:06 -04:00
bch_err_msg ( c , ret , " insert error " ) ;
if ( ret )
2020-12-01 12:23:55 -05:00
goto err ;
2017-03-16 22:18:50 -08:00
}
pr_info ( " iterating forwards " ) ;
i = 0 ;
2023-09-12 17:16:02 -04:00
ret = for_each_btree_key2_upto ( trans , iter , BTREE_ID_xattrs ,
2022-10-11 04:32:41 -04:00
SPOS ( 0 , 0 , U32_MAX ) , POS ( 0 , U64_MAX ) ,
0 , k , ( {
2017-03-16 22:18:50 -08:00
BUG_ON ( k . k - > p . offset ! = i + + ) ;
2022-07-20 16:25:00 -04:00
0 ;
} ) ) ;
2023-09-26 16:02:06 -04:00
bch_err_msg ( c , ret , " error iterating forwards " ) ;
if ( ret )
2022-07-20 16:25:00 -04:00
goto err ;
2017-03-16 22:18:50 -08:00
BUG_ON ( i ! = nr ) ;
pr_info ( " iterating backwards " ) ;
2023-09-12 17:16:02 -04:00
ret = for_each_btree_key_reverse ( trans , iter , BTREE_ID_xattrs ,
2022-07-20 16:25:00 -04:00
SPOS ( 0 , U64_MAX , U32_MAX ) , 0 , k ,
( {
BUG_ON ( k . k - > p . offset ! = - - i ) ;
0 ;
} ) ) ;
2023-09-26 16:02:06 -04:00
bch_err_msg ( c , ret , " error iterating backwards " ) ;
if ( ret )
2022-07-20 16:25:00 -04:00
goto err ;
2017-03-16 22:18:50 -08:00
BUG_ON ( i ) ;
2020-12-01 12:23:55 -05:00
err :
2023-09-12 17:16:02 -04:00
bch2_trans_iter_exit ( trans , & iter ) ;
bch2_trans_put ( trans ) ;
2020-12-01 12:23:55 -05:00
return ret ;
2017-03-16 22:18:50 -08:00
}
2020-12-01 12:23:55 -05:00
static int test_iterate_extents ( struct bch_fs * c , u64 nr )
2017-03-16 22:18:50 -08:00
{
2023-09-12 17:16:02 -04:00
struct btree_trans * trans = bch2_trans_get ( c ) ;
2021-08-30 15:18:31 -04:00
struct btree_iter iter = { NULL } ;
2017-03-16 22:18:50 -08:00
struct bkey_s_c k ;
u64 i ;
2020-12-01 12:23:55 -05:00
int ret = 0 ;
2017-03-16 22:18:50 -08:00
delete_test_keys ( c ) ;
pr_info ( " inserting test extents " ) ;
for ( i = 0 ; i < nr ; i + = 8 ) {
2023-09-12 18:41:22 -04:00
struct bkey_i_cookie ck ;
2017-03-16 22:18:50 -08:00
2023-09-12 18:41:22 -04:00
bkey_cookie_init ( & ck . k_i ) ;
ck . k . p . offset = i + 8 ;
ck . k . p . snapshot = U32_MAX ;
ck . k . size = 8 ;
2017-03-16 22:18:50 -08:00
2023-09-12 18:41:22 -04:00
ret = bch2_btree_insert ( c , BTREE_ID_extents , & ck . k_i , NULL , 0 ) ;
2023-09-26 16:02:06 -04:00
bch_err_msg ( c , ret , " insert error " ) ;
if ( ret )
2020-12-01 12:23:55 -05:00
goto err ;
2017-03-16 22:18:50 -08:00
}
pr_info ( " iterating forwards " ) ;
i = 0 ;
2023-09-12 17:16:02 -04:00
ret = for_each_btree_key2_upto ( trans , iter , BTREE_ID_extents ,
2022-10-11 04:32:41 -04:00
SPOS ( 0 , 0 , U32_MAX ) , POS ( 0 , U64_MAX ) ,
0 , k , ( {
2017-03-16 22:18:50 -08:00
BUG_ON ( bkey_start_offset ( k . k ) ! = i ) ;
i = k . k - > p . offset ;
2022-07-20 16:25:00 -04:00
0 ;
} ) ) ;
2023-09-26 16:02:06 -04:00
bch_err_msg ( c , ret , " error iterating forwards " ) ;
if ( ret )
2022-07-20 16:25:00 -04:00
goto err ;
2017-03-16 22:18:50 -08:00
BUG_ON ( i ! = nr ) ;
pr_info ( " iterating backwards " ) ;
2023-09-12 17:16:02 -04:00
ret = for_each_btree_key_reverse ( trans , iter , BTREE_ID_extents ,
2022-07-20 16:25:00 -04:00
SPOS ( 0 , U64_MAX , U32_MAX ) , 0 , k ,
( {
BUG_ON ( k . k - > p . offset ! = i ) ;
i = bkey_start_offset ( k . k ) ;
0 ;
} ) ) ;
2023-09-26 16:02:06 -04:00
bch_err_msg ( c , ret , " error iterating backwards " ) ;
if ( ret )
2022-07-20 16:25:00 -04:00
goto err ;
2017-03-16 22:18:50 -08:00
BUG_ON ( i ) ;
2020-12-01 12:23:55 -05:00
err :
2023-09-12 17:16:02 -04:00
bch2_trans_iter_exit ( trans , & iter ) ;
bch2_trans_put ( trans ) ;
2020-12-01 12:23:55 -05:00
return ret ;
2017-03-16 22:18:50 -08:00
}
2020-12-01 12:23:55 -05:00
static int test_iterate_slots ( struct bch_fs * c , u64 nr )
2017-03-16 22:18:50 -08:00
{
2023-09-12 17:16:02 -04:00
struct btree_trans * trans = bch2_trans_get ( c ) ;
2021-08-30 15:18:31 -04:00
struct btree_iter iter = { NULL } ;
2017-03-16 22:18:50 -08:00
struct bkey_s_c k ;
u64 i ;
2020-12-01 12:23:55 -05:00
int ret = 0 ;
2017-03-16 22:18:50 -08:00
delete_test_keys ( c ) ;
pr_info ( " inserting test keys " ) ;
for ( i = 0 ; i < nr ; i + + ) {
2023-09-12 18:41:22 -04:00
struct bkey_i_cookie ck ;
2017-03-16 22:18:50 -08:00
2023-09-12 18:41:22 -04:00
bkey_cookie_init ( & ck . k_i ) ;
ck . k . p . offset = i * 2 ;
ck . k . p . snapshot = U32_MAX ;
2017-03-16 22:18:50 -08:00
2023-09-12 18:41:22 -04:00
ret = bch2_btree_insert ( c , BTREE_ID_xattrs , & ck . k_i , NULL , 0 ) ;
2023-09-26 16:02:06 -04:00
bch_err_msg ( c , ret , " insert error " ) ;
if ( ret )
2020-12-01 12:23:55 -05:00
goto err ;
2017-03-16 22:18:50 -08:00
}
pr_info ( " iterating forwards " ) ;
i = 0 ;
2023-09-12 17:16:02 -04:00
ret = for_each_btree_key2_upto ( trans , iter , BTREE_ID_xattrs ,
2022-10-11 04:32:41 -04:00
SPOS ( 0 , 0 , U32_MAX ) , POS ( 0 , U64_MAX ) ,
0 , k , ( {
2017-03-16 22:18:50 -08:00
BUG_ON ( k . k - > p . offset ! = i ) ;
i + = 2 ;
2022-07-20 16:25:00 -04:00
0 ;
} ) ) ;
2023-09-26 16:02:06 -04:00
bch_err_msg ( c , ret , " error iterating forwards " ) ;
if ( ret )
2022-07-20 16:25:00 -04:00
goto err ;
2017-03-16 22:18:50 -08:00
BUG_ON ( i ! = nr * 2 ) ;
pr_info ( " iterating forwards by slots " ) ;
i = 0 ;
2023-09-12 17:16:02 -04:00
ret = for_each_btree_key2_upto ( trans , iter , BTREE_ID_xattrs ,
2022-10-11 04:32:41 -04:00
SPOS ( 0 , 0 , U32_MAX ) , POS ( 0 , U64_MAX ) ,
2022-07-20 16:25:00 -04:00
BTREE_ITER_SLOTS , k , ( {
if ( i > = nr * 2 )
break ;
2020-03-02 13:38:19 -05:00
BUG_ON ( k . k - > p . offset ! = i ) ;
2017-03-16 22:18:50 -08:00
BUG_ON ( bkey_deleted ( k . k ) ! = ( i & 1 ) ) ;
2020-03-02 13:38:19 -05:00
i + + ;
2022-07-20 16:25:00 -04:00
0 ;
} ) ) ;
if ( ret < 0 ) {
2023-06-20 13:49:25 -04:00
bch_err_msg ( c , ret , " error iterating forwards by slots " ) ;
2022-07-20 16:25:00 -04:00
goto err ;
2017-03-16 22:18:50 -08:00
}
2022-07-20 16:25:00 -04:00
ret = 0 ;
2020-12-01 12:23:55 -05:00
err :
2023-09-12 17:16:02 -04:00
bch2_trans_put ( trans ) ;
2020-12-01 12:23:55 -05:00
return ret ;
2017-03-16 22:18:50 -08:00
}
2020-12-01 12:23:55 -05:00
static int test_iterate_slots_extents ( struct bch_fs * c , u64 nr )
2017-03-16 22:18:50 -08:00
{
2023-09-12 17:16:02 -04:00
struct btree_trans * trans = bch2_trans_get ( c ) ;
2021-08-30 15:18:31 -04:00
struct btree_iter iter = { NULL } ;
2017-03-16 22:18:50 -08:00
struct bkey_s_c k ;
u64 i ;
2020-12-01 12:23:55 -05:00
int ret = 0 ;
2017-03-16 22:18:50 -08:00
delete_test_keys ( c ) ;
pr_info ( " inserting test keys " ) ;
for ( i = 0 ; i < nr ; i + = 16 ) {
2023-09-12 18:41:22 -04:00
struct bkey_i_cookie ck ;
2017-03-16 22:18:50 -08:00
2023-09-12 18:41:22 -04:00
bkey_cookie_init ( & ck . k_i ) ;
ck . k . p . offset = i + 16 ;
ck . k . p . snapshot = U32_MAX ;
ck . k . size = 8 ;
2017-03-16 22:18:50 -08:00
2023-09-12 18:41:22 -04:00
ret = bch2_btree_insert ( c , BTREE_ID_extents , & ck . k_i , NULL , 0 ) ;
2023-09-26 16:02:06 -04:00
bch_err_msg ( c , ret , " insert error " ) ;
if ( ret )
2020-12-01 12:23:55 -05:00
goto err ;
2017-03-16 22:18:50 -08:00
}
pr_info ( " iterating forwards " ) ;
i = 0 ;
2023-09-12 17:16:02 -04:00
ret = for_each_btree_key2_upto ( trans , iter , BTREE_ID_extents ,
2022-10-11 04:32:41 -04:00
SPOS ( 0 , 0 , U32_MAX ) , POS ( 0 , U64_MAX ) ,
0 , k , ( {
2017-03-16 22:18:50 -08:00
BUG_ON ( bkey_start_offset ( k . k ) ! = i + 8 ) ;
BUG_ON ( k . k - > size ! = 8 ) ;
i + = 16 ;
2022-07-20 16:25:00 -04:00
0 ;
} ) ) ;
2023-09-26 16:02:06 -04:00
bch_err_msg ( c , ret , " error iterating forwards " ) ;
if ( ret )
2022-07-20 16:25:00 -04:00
goto err ;
2017-03-16 22:18:50 -08:00
BUG_ON ( i ! = nr ) ;
pr_info ( " iterating forwards by slots " ) ;
i = 0 ;
2023-09-12 17:16:02 -04:00
ret = for_each_btree_key2_upto ( trans , iter , BTREE_ID_extents ,
2022-10-11 04:32:41 -04:00
SPOS ( 0 , 0 , U32_MAX ) , POS ( 0 , U64_MAX ) ,
2022-07-20 16:25:00 -04:00
BTREE_ITER_SLOTS , k , ( {
if ( i = = nr )
break ;
2017-03-16 22:18:50 -08:00
BUG_ON ( bkey_deleted ( k . k ) ! = ! ( i % 16 ) ) ;
BUG_ON ( bkey_start_offset ( k . k ) ! = i ) ;
BUG_ON ( k . k - > size ! = 8 ) ;
i = k . k - > p . offset ;
2022-07-20 16:25:00 -04:00
0 ;
} ) ) ;
2023-09-26 16:02:06 -04:00
bch_err_msg ( c , ret , " error iterating forwards by slots " ) ;
if ( ret )
2022-07-20 16:25:00 -04:00
goto err ;
ret = 0 ;
2020-12-01 12:23:55 -05:00
err :
2023-09-12 17:16:02 -04:00
bch2_trans_put ( trans ) ;
2020-12-01 12:23:55 -05:00
return 0 ;
2017-03-16 22:18:50 -08:00
}
2018-08-21 16:30:14 -04:00
/*
* XXX : we really want to make sure we ' ve got a btree with depth > 0 for these
* tests
*/
2020-12-01 12:23:55 -05:00
static int test_peek_end ( struct bch_fs * c , u64 nr )
2018-08-21 16:30:14 -04:00
{
2023-09-12 17:16:02 -04:00
struct btree_trans * trans = bch2_trans_get ( c ) ;
2021-08-30 15:18:31 -04:00
struct btree_iter iter ;
2018-08-21 16:30:14 -04:00
struct bkey_s_c k ;
2023-09-12 17:16:02 -04:00
bch2_trans_iter_init ( trans , & iter , BTREE_ID_xattrs ,
2021-12-15 20:38:56 -05:00
SPOS ( 0 , 0 , U32_MAX ) , 0 ) ;
2019-03-25 15:10:15 -04:00
2023-09-12 17:16:02 -04:00
lockrestart_do ( trans , bkey_err ( k = bch2_btree_iter_peek_upto ( & iter , POS ( 0 , U64_MAX ) ) ) ) ;
2018-08-21 16:30:14 -04:00
BUG_ON ( k . k ) ;
2023-09-12 17:16:02 -04:00
lockrestart_do ( trans , bkey_err ( k = bch2_btree_iter_peek_upto ( & iter , POS ( 0 , U64_MAX ) ) ) ) ;
2018-08-21 16:30:14 -04:00
BUG_ON ( k . k ) ;
2023-09-12 17:16:02 -04:00
bch2_trans_iter_exit ( trans , & iter ) ;
bch2_trans_put ( trans ) ;
2020-12-01 12:23:55 -05:00
return 0 ;
2018-08-21 16:30:14 -04:00
}
2020-12-01 12:23:55 -05:00
static int test_peek_end_extents ( struct bch_fs * c , u64 nr )
2018-08-21 16:30:14 -04:00
{
2023-09-12 17:16:02 -04:00
struct btree_trans * trans = bch2_trans_get ( c ) ;
2021-08-30 15:18:31 -04:00
struct btree_iter iter ;
2018-08-21 16:30:14 -04:00
struct bkey_s_c k ;
2023-09-12 17:16:02 -04:00
bch2_trans_iter_init ( trans , & iter , BTREE_ID_extents ,
2021-12-15 20:38:56 -05:00
SPOS ( 0 , 0 , U32_MAX ) , 0 ) ;
2019-03-25 15:10:15 -04:00
2023-09-12 17:16:02 -04:00
lockrestart_do ( trans , bkey_err ( k = bch2_btree_iter_peek_upto ( & iter , POS ( 0 , U64_MAX ) ) ) ) ;
2018-08-21 16:30:14 -04:00
BUG_ON ( k . k ) ;
2023-09-12 17:16:02 -04:00
lockrestart_do ( trans , bkey_err ( k = bch2_btree_iter_peek_upto ( & iter , POS ( 0 , U64_MAX ) ) ) ) ;
2018-08-21 16:30:14 -04:00
BUG_ON ( k . k ) ;
2023-09-12 17:16:02 -04:00
bch2_trans_iter_exit ( trans , & iter ) ;
bch2_trans_put ( trans ) ;
2020-12-01 12:23:55 -05:00
return 0 ;
2018-08-21 16:30:14 -04:00
}
2018-08-01 23:03:41 -04:00
/* extent unit tests */
2023-07-06 22:47:42 -04:00
static u64 test_version ;
2018-08-01 23:03:41 -04:00
2020-12-01 12:23:55 -05:00
static int insert_test_extent ( struct bch_fs * c ,
u64 start , u64 end )
2018-08-01 23:03:41 -04:00
{
struct bkey_i_cookie k ;
int ret ;
bkey_cookie_init ( & k . k_i ) ;
k . k_i . k . p . offset = end ;
2021-07-14 21:25:55 -04:00
k . k_i . k . p . snapshot = U32_MAX ;
2018-08-01 23:03:41 -04:00
k . k_i . k . size = end - start ;
k . k_i . k . version . lo = test_version + + ;
2023-09-12 18:41:22 -04:00
ret = bch2_btree_insert ( c , BTREE_ID_extents , & k . k_i , NULL , 0 ) ;
2023-09-26 16:02:06 -04:00
bch_err_fn ( c , ret ) ;
2020-12-01 12:23:55 -05:00
return ret ;
2018-08-01 23:03:41 -04:00
}
2020-12-01 12:23:55 -05:00
static int __test_extent_overwrite ( struct bch_fs * c ,
2018-08-01 23:03:41 -04:00
u64 e1_start , u64 e1_end ,
u64 e2_start , u64 e2_end )
{
2020-12-01 12:23:55 -05:00
int ret ;
ret = insert_test_extent ( c , e1_start , e1_end ) ? :
insert_test_extent ( c , e2_start , e2_end ) ;
2018-08-01 23:03:41 -04:00
delete_test_keys ( c ) ;
2020-12-01 12:23:55 -05:00
return ret ;
2018-08-01 23:03:41 -04:00
}
2020-12-01 12:23:55 -05:00
static int test_extent_overwrite_front ( struct bch_fs * c , u64 nr )
2018-08-01 23:03:41 -04:00
{
2020-12-01 12:23:55 -05:00
return __test_extent_overwrite ( c , 0 , 64 , 0 , 32 ) ? :
__test_extent_overwrite ( c , 8 , 64 , 0 , 32 ) ;
2018-08-01 23:03:41 -04:00
}
2020-12-01 12:23:55 -05:00
static int test_extent_overwrite_back ( struct bch_fs * c , u64 nr )
2018-08-01 23:03:41 -04:00
{
2020-12-01 12:23:55 -05:00
return __test_extent_overwrite ( c , 0 , 64 , 32 , 64 ) ? :
__test_extent_overwrite ( c , 0 , 64 , 32 , 72 ) ;
2018-08-01 23:03:41 -04:00
}
2020-12-01 12:23:55 -05:00
static int test_extent_overwrite_middle ( struct bch_fs * c , u64 nr )
2018-08-01 23:03:41 -04:00
{
2020-12-01 12:23:55 -05:00
return __test_extent_overwrite ( c , 0 , 64 , 32 , 40 ) ;
2018-08-01 23:03:41 -04:00
}
2020-12-01 12:23:55 -05:00
static int test_extent_overwrite_all ( struct bch_fs * c , u64 nr )
2018-08-01 23:03:41 -04:00
{
2020-12-01 12:23:55 -05:00
return __test_extent_overwrite ( c , 32 , 64 , 0 , 64 ) ? :
__test_extent_overwrite ( c , 32 , 64 , 0 , 128 ) ? :
__test_extent_overwrite ( c , 32 , 64 , 32 , 64 ) ? :
__test_extent_overwrite ( c , 32 , 64 , 32 , 128 ) ;
2018-08-01 23:03:41 -04:00
}
2023-07-20 22:42:26 -04:00
static int insert_test_overlapping_extent ( struct bch_fs * c , u64 inum , u64 start , u32 len , u32 snapid )
{
struct bkey_i_cookie k ;
int ret ;
bkey_cookie_init ( & k . k_i ) ;
k . k_i . k . p . inode = inum ;
k . k_i . k . p . offset = start + len ;
k . k_i . k . p . snapshot = snapid ;
k . k_i . k . size = len ;
ret = bch2_trans_do ( c , NULL , NULL , 0 ,
2023-09-12 17:16:02 -04:00
bch2_btree_insert_nonextent ( trans , BTREE_ID_extents , & k . k_i ,
2023-07-20 22:42:26 -04:00
BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE ) ) ;
2023-09-26 16:02:06 -04:00
bch_err_fn ( c , ret ) ;
2023-07-20 22:42:26 -04:00
return ret ;
}
static int test_extent_create_overlapping ( struct bch_fs * c , u64 inum )
{
return insert_test_overlapping_extent ( c , inum , 0 , 16 , U32_MAX - 2 ) ? : /* overwrite entire */
insert_test_overlapping_extent ( c , inum , 2 , 8 , U32_MAX - 2 ) ? :
insert_test_overlapping_extent ( c , inum , 4 , 4 , U32_MAX ) ? :
insert_test_overlapping_extent ( c , inum , 32 , 8 , U32_MAX - 2 ) ? : /* overwrite front/back */
insert_test_overlapping_extent ( c , inum , 36 , 8 , U32_MAX ) ? :
insert_test_overlapping_extent ( c , inum , 60 , 8 , U32_MAX - 2 ) ? :
insert_test_overlapping_extent ( c , inum , 64 , 8 , U32_MAX ) ;
}
2021-12-29 13:50:50 -05:00
/* snapshot unit tests */
/* Test skipping over keys in unrelated snapshots: */
static int test_snapshot_filter ( struct bch_fs * c , u32 snapid_lo , u32 snapid_hi )
{
2023-09-12 17:16:02 -04:00
struct btree_trans * trans ;
2021-12-29 13:50:50 -05:00
struct btree_iter iter ;
struct bkey_s_c k ;
struct bkey_i_cookie cookie ;
int ret ;
bkey_cookie_init ( & cookie . k_i ) ;
cookie . k . p . snapshot = snapid_hi ;
2023-09-12 18:41:22 -04:00
ret = bch2_btree_insert ( c , BTREE_ID_xattrs , & cookie . k_i , NULL , 0 ) ;
2021-12-29 13:50:50 -05:00
if ( ret )
return ret ;
2023-09-12 17:16:02 -04:00
trans = bch2_trans_get ( c ) ;
bch2_trans_iter_init ( trans , & iter , BTREE_ID_xattrs ,
2021-12-29 13:50:50 -05:00
SPOS ( 0 , 0 , snapid_lo ) , 0 ) ;
2023-09-12 17:16:02 -04:00
lockrestart_do ( trans , bkey_err ( k = bch2_btree_iter_peek_upto ( & iter , POS ( 0 , U64_MAX ) ) ) ) ;
2021-12-29 13:50:50 -05:00
BUG_ON ( k . k - > p . snapshot ! = U32_MAX ) ;
2023-09-12 17:16:02 -04:00
bch2_trans_iter_exit ( trans , & iter ) ;
bch2_trans_put ( trans ) ;
2021-12-29 13:50:50 -05:00
return ret ;
}
static int test_snapshots ( struct bch_fs * c , u64 nr )
{
struct bkey_i_cookie cookie ;
u32 snapids [ 2 ] ;
u32 snapid_subvols [ 2 ] = { 1 , 1 } ;
int ret ;
bkey_cookie_init ( & cookie . k_i ) ;
cookie . k . p . snapshot = U32_MAX ;
2023-09-12 18:41:22 -04:00
ret = bch2_btree_insert ( c , BTREE_ID_xattrs , & cookie . k_i , NULL , 0 ) ;
2021-12-29 13:50:50 -05:00
if ( ret )
return ret ;
ret = bch2_trans_do ( c , NULL , NULL , 0 ,
2023-09-12 17:16:02 -04:00
bch2_snapshot_node_create ( trans , U32_MAX ,
2021-12-29 13:50:50 -05:00
snapids ,
snapid_subvols ,
2 ) ) ;
if ( ret )
return ret ;
if ( snapids [ 0 ] > snapids [ 1 ] )
swap ( snapids [ 0 ] , snapids [ 1 ] ) ;
ret = test_snapshot_filter ( c , snapids [ 0 ] , snapids [ 1 ] ) ;
2023-09-26 16:02:06 -04:00
bch_err_msg ( c , ret , " from test_snapshot_filter " ) ;
return ret ;
2021-12-29 13:50:50 -05:00
}
2017-03-16 22:18:50 -08:00
/* perf tests */
static u64 test_rand ( void )
{
u64 v ;
2022-10-22 15:10:28 -04:00
2017-03-16 22:18:50 -08:00
get_random_bytes ( & v , sizeof ( v ) ) ;
return v ;
}
2020-12-01 12:23:55 -05:00
static int rand_insert ( struct bch_fs * c , u64 nr )
2017-03-16 22:18:50 -08:00
{
2023-09-12 17:16:02 -04:00
struct btree_trans * trans = bch2_trans_get ( c ) ;
2017-03-16 22:18:50 -08:00
struct bkey_i_cookie k ;
2020-12-01 12:23:55 -05:00
int ret = 0 ;
2017-03-16 22:18:50 -08:00
u64 i ;
for ( i = 0 ; i < nr ; i + + ) {
bkey_cookie_init ( & k . k_i ) ;
k . k . p . offset = test_rand ( ) ;
bcachefs: Start using bpos.snapshot field
This patch starts treating the bpos.snapshot field like part of the key
in the btree code:
* bpos_successor() and bpos_predecessor() now include the snapshot field
* Keys in btrees that will be using snapshots (extents, inodes, dirents
and xattrs) now always have their snapshot field set to U32_MAX
The btree iterator code gets a new flag, BTREE_ITER_ALL_SNAPSHOTS, that
determines whether we're iterating over keys in all snapshots or not -
internally, this controlls whether bkey_(successor|predecessor)
increment/decrement the snapshot field, or only the higher bits of the
key.
We add a new member to struct btree_iter, iter->snapshot: when
BTREE_ITER_ALL_SNAPSHOTS is not set, iter->pos.snapshot should always
equal iter->snapshot, which will be 0 for btrees that don't use
snapshots, and alsways U32_MAX for btrees that will use snapshots
(until we enable snapshot creation).
This patch also introduces a new metadata version number, and compat
code for reading from/writing to older versions - this isn't a forced
upgrade (yet).
Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2021-03-24 18:02:16 -04:00
k . k . p . snapshot = U32_MAX ;
2017-03-16 22:18:50 -08:00
2023-09-12 17:16:02 -04:00
ret = commit_do ( trans , NULL , NULL , 0 ,
bch2_btree_insert_trans ( trans , BTREE_ID_xattrs , & k . k_i , 0 ) ) ;
2023-06-05 01:16:00 -04:00
if ( ret )
2020-12-01 12:23:55 -05:00
break ;
2017-03-16 22:18:50 -08:00
}
2020-02-26 15:39:46 -05:00
2023-09-12 17:16:02 -04:00
bch2_trans_put ( trans ) ;
2020-12-01 12:23:55 -05:00
return ret ;
2017-03-16 22:18:50 -08:00
}
2021-04-14 12:10:17 -04:00
static int rand_insert_multi ( struct bch_fs * c , u64 nr )
{
2023-09-12 17:16:02 -04:00
struct btree_trans * trans = bch2_trans_get ( c ) ;
2021-04-14 12:10:17 -04:00
struct bkey_i_cookie k [ 8 ] ;
int ret = 0 ;
unsigned j ;
u64 i ;
for ( i = 0 ; i < nr ; i + = ARRAY_SIZE ( k ) ) {
for ( j = 0 ; j < ARRAY_SIZE ( k ) ; j + + ) {
bkey_cookie_init ( & k [ j ] . k_i ) ;
k [ j ] . k . p . offset = test_rand ( ) ;
k [ j ] . k . p . snapshot = U32_MAX ;
}
2023-09-12 17:16:02 -04:00
ret = commit_do ( trans , NULL , NULL , 0 ,
bch2_btree_insert_trans ( trans , BTREE_ID_xattrs , & k [ 0 ] . k_i , 0 ) ? :
bch2_btree_insert_trans ( trans , BTREE_ID_xattrs , & k [ 1 ] . k_i , 0 ) ? :
bch2_btree_insert_trans ( trans , BTREE_ID_xattrs , & k [ 2 ] . k_i , 0 ) ? :
bch2_btree_insert_trans ( trans , BTREE_ID_xattrs , & k [ 3 ] . k_i , 0 ) ? :
bch2_btree_insert_trans ( trans , BTREE_ID_xattrs , & k [ 4 ] . k_i , 0 ) ? :
bch2_btree_insert_trans ( trans , BTREE_ID_xattrs , & k [ 5 ] . k_i , 0 ) ? :
bch2_btree_insert_trans ( trans , BTREE_ID_xattrs , & k [ 6 ] . k_i , 0 ) ? :
bch2_btree_insert_trans ( trans , BTREE_ID_xattrs , & k [ 7 ] . k_i , 0 ) ) ;
2023-06-05 01:16:00 -04:00
if ( ret )
2021-04-14 12:10:17 -04:00
break ;
}
2023-09-12 17:16:02 -04:00
bch2_trans_put ( trans ) ;
2021-04-14 12:10:17 -04:00
return ret ;
}
2020-12-01 12:23:55 -05:00
static int rand_lookup ( struct bch_fs * c , u64 nr )
2017-03-16 22:18:50 -08:00
{
2023-09-12 17:16:02 -04:00
struct btree_trans * trans = bch2_trans_get ( c ) ;
2021-08-30 15:18:31 -04:00
struct btree_iter iter ;
2019-03-25 15:10:15 -04:00
struct bkey_s_c k ;
2020-12-01 12:23:55 -05:00
int ret = 0 ;
2017-03-16 22:18:50 -08:00
u64 i ;
2023-09-12 17:16:02 -04:00
bch2_trans_iter_init ( trans , & iter , BTREE_ID_xattrs ,
2021-12-15 20:38:56 -05:00
SPOS ( 0 , 0 , U32_MAX ) , 0 ) ;
2017-03-16 22:18:50 -08:00
2019-03-25 15:10:15 -04:00
for ( i = 0 ; i < nr ; i + + ) {
2021-12-15 20:38:56 -05:00
bch2_btree_iter_set_pos ( & iter , SPOS ( 0 , test_rand ( ) , U32_MAX ) ) ;
2017-03-16 22:18:50 -08:00
2023-09-12 17:16:02 -04:00
lockrestart_do ( trans , bkey_err ( k = bch2_btree_iter_peek ( & iter ) ) ) ;
2020-12-01 12:23:55 -05:00
ret = bkey_err ( k ) ;
2023-06-05 01:16:00 -04:00
if ( ret )
2020-12-01 12:23:55 -05:00
break ;
2017-03-16 22:18:50 -08:00
}
2019-03-25 15:10:15 -04:00
2023-09-12 17:16:02 -04:00
bch2_trans_iter_exit ( trans , & iter ) ;
bch2_trans_put ( trans ) ;
2020-12-01 12:23:55 -05:00
return ret ;
2017-03-16 22:18:50 -08:00
}
2021-08-30 15:18:31 -04:00
static int rand_mixed_trans ( struct btree_trans * trans ,
struct btree_iter * iter ,
struct bkey_i_cookie * cookie ,
u64 i , u64 pos )
{
struct bkey_s_c k ;
int ret ;
2021-12-15 20:38:56 -05:00
bch2_btree_iter_set_pos ( iter , SPOS ( 0 , pos , U32_MAX ) ) ;
2021-08-30 15:18:31 -04:00
k = bch2_btree_iter_peek ( iter ) ;
ret = bkey_err ( k ) ;
2023-09-26 16:02:06 -04:00
bch_err_msg ( trans - > c , ret , " lookup error " ) ;
2021-08-30 15:18:31 -04:00
if ( ret )
return ret ;
if ( ! ( i & 3 ) & & k . k ) {
bkey_cookie_init ( & cookie - > k_i ) ;
cookie - > k . p = iter - > pos ;
2021-12-05 00:30:49 -05:00
ret = bch2_trans_update ( trans , iter , & cookie - > k_i , 0 ) ;
2021-08-30 15:18:31 -04:00
}
2021-12-05 00:30:49 -05:00
return ret ;
2021-08-30 15:18:31 -04:00
}
2020-12-01 12:23:55 -05:00
static int rand_mixed ( struct bch_fs * c , u64 nr )
2017-03-16 22:18:50 -08:00
{
2023-09-12 17:16:02 -04:00
struct btree_trans * trans = bch2_trans_get ( c ) ;
2021-08-30 15:18:31 -04:00
struct btree_iter iter ;
struct bkey_i_cookie cookie ;
2020-12-01 12:23:55 -05:00
int ret = 0 ;
2021-08-30 15:18:31 -04:00
u64 i , rand ;
2017-03-16 22:18:50 -08:00
2023-09-12 17:16:02 -04:00
bch2_trans_iter_init ( trans , & iter , BTREE_ID_xattrs ,
2021-12-15 20:38:56 -05:00
SPOS ( 0 , 0 , U32_MAX ) , 0 ) ;
2017-03-16 22:18:50 -08:00
2019-03-25 15:10:15 -04:00
for ( i = 0 ; i < nr ; i + + ) {
2021-08-30 15:18:31 -04:00
rand = test_rand ( ) ;
2023-09-12 17:16:02 -04:00
ret = commit_do ( trans , NULL , NULL , 0 ,
rand_mixed_trans ( trans , & iter , & cookie , i , rand ) ) ;
2023-06-05 01:16:00 -04:00
if ( ret )
2020-12-01 12:23:55 -05:00
break ;
2017-03-16 22:18:50 -08:00
}
2023-09-12 17:16:02 -04:00
bch2_trans_iter_exit ( trans , & iter ) ;
bch2_trans_put ( trans ) ;
2020-12-01 12:23:55 -05:00
return ret ;
2017-03-16 22:18:50 -08:00
}
2020-02-26 15:39:46 -05:00
static int __do_delete ( struct btree_trans * trans , struct bpos pos )
{
2021-08-30 15:18:31 -04:00
struct btree_iter iter ;
2020-02-26 15:39:46 -05:00
struct bkey_s_c k ;
int ret = 0 ;
2021-08-30 15:18:31 -04:00
bch2_trans_iter_init ( trans , & iter , BTREE_ID_xattrs , pos ,
BTREE_ITER_INTENT ) ;
2023-06-05 01:16:00 -04:00
k = bch2_btree_iter_peek ( & iter ) ;
2020-02-26 15:39:46 -05:00
ret = bkey_err ( k ) ;
if ( ret )
goto err ;
2020-12-07 11:44:12 -05:00
if ( ! k . k )
goto err ;
2021-12-15 20:38:56 -05:00
ret = bch2_btree_delete_at ( trans , & iter , 0 ) ;
2020-02-26 15:39:46 -05:00
err :
2021-08-30 15:18:31 -04:00
bch2_trans_iter_exit ( trans , & iter ) ;
2020-02-26 15:39:46 -05:00
return ret ;
}
2020-12-01 12:23:55 -05:00
static int rand_delete ( struct bch_fs * c , u64 nr )
2017-03-16 22:18:50 -08:00
{
2023-09-12 17:16:02 -04:00
struct btree_trans * trans = bch2_trans_get ( c ) ;
2020-12-01 12:23:55 -05:00
int ret = 0 ;
2017-03-16 22:18:50 -08:00
u64 i ;
for ( i = 0 ; i < nr ; i + + ) {
2021-12-15 20:38:56 -05:00
struct bpos pos = SPOS ( 0 , test_rand ( ) , U32_MAX ) ;
2017-03-16 22:18:50 -08:00
2023-09-12 17:16:02 -04:00
ret = commit_do ( trans , NULL , NULL , 0 ,
__do_delete ( trans , pos ) ) ;
2023-06-05 01:16:00 -04:00
if ( ret )
2020-12-01 12:23:55 -05:00
break ;
2017-03-16 22:18:50 -08:00
}
2020-02-26 15:39:46 -05:00
2023-09-12 17:16:02 -04:00
bch2_trans_put ( trans ) ;
2020-12-01 12:23:55 -05:00
return ret ;
2017-03-16 22:18:50 -08:00
}
2020-12-01 12:23:55 -05:00
static int seq_insert ( struct bch_fs * c , u64 nr )
2017-03-16 22:18:50 -08:00
{
2021-08-30 15:18:31 -04:00
struct btree_iter iter ;
2017-03-16 22:18:50 -08:00
struct bkey_s_c k ;
struct bkey_i_cookie insert ;
bkey_cookie_init ( & insert . k_i ) ;
2023-06-05 01:16:00 -04:00
return bch2_trans_run ( c ,
2023-09-12 17:16:02 -04:00
for_each_btree_key_commit ( trans , iter , BTREE_ID_xattrs ,
2022-07-20 16:25:00 -04:00
SPOS ( 0 , 0 , U32_MAX ) ,
BTREE_ITER_SLOTS | BTREE_ITER_INTENT , k ,
2023-06-05 01:16:00 -04:00
NULL , NULL , 0 , ( {
2022-07-20 16:25:00 -04:00
if ( iter . pos . offset > = nr )
break ;
insert . k . p = iter . pos ;
2023-09-12 17:16:02 -04:00
bch2_trans_update ( trans , & iter , & insert . k_i , 0 ) ;
2023-06-05 01:16:00 -04:00
} ) ) ) ;
2017-03-16 22:18:50 -08:00
}
2020-12-01 12:23:55 -05:00
static int seq_lookup ( struct bch_fs * c , u64 nr )
2017-03-16 22:18:50 -08:00
{
2021-08-30 15:18:31 -04:00
struct btree_iter iter ;
2017-03-16 22:18:50 -08:00
struct bkey_s_c k ;
2023-06-05 01:16:00 -04:00
return bch2_trans_run ( c ,
2023-09-12 17:16:02 -04:00
for_each_btree_key2_upto ( trans , iter , BTREE_ID_xattrs ,
2022-10-11 04:32:41 -04:00
SPOS ( 0 , 0 , U32_MAX ) , POS ( 0 , U64_MAX ) ,
0 , k ,
2023-06-05 01:16:00 -04:00
0 ) ) ;
2017-03-16 22:18:50 -08:00
}
2020-12-01 12:23:55 -05:00
static int seq_overwrite ( struct bch_fs * c , u64 nr )
2017-03-16 22:18:50 -08:00
{
2021-08-30 15:18:31 -04:00
struct btree_iter iter ;
2017-03-16 22:18:50 -08:00
struct bkey_s_c k ;
2019-03-13 20:49:16 -04:00
2023-06-05 01:16:00 -04:00
return bch2_trans_run ( c ,
2023-09-12 17:16:02 -04:00
for_each_btree_key_commit ( trans , iter , BTREE_ID_xattrs ,
2022-07-20 16:25:00 -04:00
SPOS ( 0 , 0 , U32_MAX ) ,
BTREE_ITER_INTENT , k ,
2023-06-05 01:16:00 -04:00
NULL , NULL , 0 , ( {
2022-07-20 16:25:00 -04:00
struct bkey_i_cookie u ;
2017-03-16 22:18:50 -08:00
2022-07-20 16:25:00 -04:00
bkey_reassemble ( & u . k_i , k ) ;
2023-09-12 17:16:02 -04:00
bch2_trans_update ( trans , & iter , & u . k_i , 0 ) ;
2023-06-05 01:16:00 -04:00
} ) ) ) ;
2017-03-16 22:18:50 -08:00
}
2020-12-01 12:23:55 -05:00
static int seq_delete ( struct bch_fs * c , u64 nr )
2017-03-16 22:18:50 -08:00
{
2023-06-05 01:16:00 -04:00
return bch2_btree_delete_range ( c , BTREE_ID_xattrs ,
2022-10-11 04:32:41 -04:00
SPOS ( 0 , 0 , U32_MAX ) ,
POS ( 0 , U64_MAX ) ,
2022-03-05 18:23:47 -05:00
0 , NULL ) ;
2017-03-16 22:18:50 -08:00
}
2020-12-01 12:23:55 -05:00
typedef int ( * perf_test_fn ) ( struct bch_fs * , u64 ) ;
2017-03-16 22:18:50 -08:00
struct test_job {
struct bch_fs * c ;
u64 nr ;
unsigned nr_threads ;
perf_test_fn fn ;
atomic_t ready ;
wait_queue_head_t ready_wait ;
atomic_t done ;
struct completion done_completion ;
u64 start ;
u64 finish ;
2020-12-01 12:23:55 -05:00
int ret ;
2017-03-16 22:18:50 -08:00
} ;
static int btree_perf_test_thread ( void * data )
{
struct test_job * j = data ;
2020-12-01 12:23:55 -05:00
int ret ;
2017-03-16 22:18:50 -08:00
if ( atomic_dec_and_test ( & j - > ready ) ) {
wake_up ( & j - > ready_wait ) ;
j - > start = sched_clock ( ) ;
} else {
wait_event ( j - > ready_wait , ! atomic_read ( & j - > ready ) ) ;
}
2021-08-17 17:14:26 -06:00
ret = j - > fn ( j - > c , div64_u64 ( j - > nr , j - > nr_threads ) ) ;
2021-12-29 13:50:50 -05:00
if ( ret ) {
2022-07-18 19:42:58 -04:00
bch_err ( j - > c , " %ps: error %s " , j - > fn , bch2_err_str ( ret ) ) ;
2020-12-01 12:23:55 -05:00
j - > ret = ret ;
2021-12-29 13:50:50 -05:00
}
2017-03-16 22:18:50 -08:00
if ( atomic_dec_and_test ( & j - > done ) ) {
j - > finish = sched_clock ( ) ;
complete ( & j - > done_completion ) ;
}
return 0 ;
}
2020-12-01 12:23:55 -05:00
int bch2_btree_perf_test ( struct bch_fs * c , const char * testname ,
u64 nr , unsigned nr_threads )
2017-03-16 22:18:50 -08:00
{
struct test_job j = { . c = c , . nr = nr , . nr_threads = nr_threads } ;
2022-02-25 13:18:19 -05:00
char name_buf [ 20 ] ;
struct printbuf nr_buf = PRINTBUF ;
struct printbuf per_sec_buf = PRINTBUF ;
2017-03-16 22:18:50 -08:00
unsigned i ;
u64 time ;
atomic_set ( & j . ready , nr_threads ) ;
init_waitqueue_head ( & j . ready_wait ) ;
atomic_set ( & j . done , nr_threads ) ;
init_completion ( & j . done_completion ) ;
# define perf_test(_test) \
if ( ! strcmp ( testname , # _test ) ) j . fn = _test
perf_test ( rand_insert ) ;
2021-04-14 12:10:17 -04:00
perf_test ( rand_insert_multi ) ;
2017-03-16 22:18:50 -08:00
perf_test ( rand_lookup ) ;
perf_test ( rand_mixed ) ;
perf_test ( rand_delete ) ;
perf_test ( seq_insert ) ;
perf_test ( seq_lookup ) ;
perf_test ( seq_overwrite ) ;
perf_test ( seq_delete ) ;
/* a unit test, not a perf test: */
perf_test ( test_delete ) ;
perf_test ( test_delete_written ) ;
perf_test ( test_iterate ) ;
perf_test ( test_iterate_extents ) ;
perf_test ( test_iterate_slots ) ;
perf_test ( test_iterate_slots_extents ) ;
2018-08-21 16:30:14 -04:00
perf_test ( test_peek_end ) ;
perf_test ( test_peek_end_extents ) ;
2017-03-16 22:18:50 -08:00
2018-08-01 23:03:41 -04:00
perf_test ( test_extent_overwrite_front ) ;
perf_test ( test_extent_overwrite_back ) ;
perf_test ( test_extent_overwrite_middle ) ;
perf_test ( test_extent_overwrite_all ) ;
2023-07-20 22:42:26 -04:00
perf_test ( test_extent_create_overlapping ) ;
2018-08-01 23:03:41 -04:00
2021-12-29 13:50:50 -05:00
perf_test ( test_snapshots ) ;
2017-03-16 22:18:50 -08:00
if ( ! j . fn ) {
pr_err ( " unknown test %s " , testname ) ;
2020-12-01 12:23:55 -05:00
return - EINVAL ;
2017-03-16 22:18:50 -08:00
}
//pr_info("running test %s:", testname);
if ( nr_threads = = 1 )
btree_perf_test_thread ( & j ) ;
else
for ( i = 0 ; i < nr_threads ; i + + )
kthread_run ( btree_perf_test_thread , & j ,
" bcachefs perf test[%u] " , i ) ;
while ( wait_for_completion_interruptible ( & j . done_completion ) )
;
time = j . finish - j . start ;
scnprintf ( name_buf , sizeof ( name_buf ) , " %s: " , testname ) ;
2023-02-03 21:01:40 -05:00
prt_human_readable_u64 ( & nr_buf , nr ) ;
prt_human_readable_u64 ( & per_sec_buf , div64_u64 ( nr * NSEC_PER_SEC , time ) ) ;
2017-03-16 22:18:50 -08:00
printk ( KERN_INFO " %-12s %s with %u threads in %5llu sec, %5llu nsec per iter, %5s per sec \n " ,
2022-02-25 13:18:19 -05:00
name_buf , nr_buf . buf , nr_threads ,
2021-08-17 17:14:26 -06:00
div_u64 ( time , NSEC_PER_SEC ) ,
div_u64 ( time * nr_threads , nr ) ,
2022-02-25 13:18:19 -05:00
per_sec_buf . buf ) ;
printbuf_exit ( & per_sec_buf ) ;
printbuf_exit ( & nr_buf ) ;
2020-12-01 12:23:55 -05:00
return j . ret ;
2017-03-16 22:18:50 -08:00
}
# endif /* CONFIG_BCACHEFS_TESTS */