2018-11-27 18:30:56 -05:00
// SPDX-License-Identifier: GPL-2.0
# include "bcachefs.h"
2020-12-17 15:08:58 -05:00
# include "bkey_buf.h"
2018-11-27 18:30:56 -05:00
# include "bkey_sort.h"
# include "bset.h"
# include "extents.h"
2019-12-14 16:20:33 -05:00
typedef int ( * sort_cmp_fn ) ( struct btree * ,
struct bkey_packed * ,
struct bkey_packed * ) ;
2018-11-27 18:30:56 -05:00
2019-12-14 16:20:33 -05:00
static inline bool sort_iter_end ( struct sort_iter * iter )
2018-11-27 18:30:56 -05:00
{
return ! iter - > used ;
}
2021-02-20 00:00:23 -05:00
static inline void sort_iter_sift ( struct sort_iter * iter , unsigned from ,
sort_cmp_fn cmp )
2018-11-27 18:30:56 -05:00
{
unsigned i ;
for ( i = from ;
i + 1 < iter - > used & &
cmp ( iter - > b , iter - > data [ i ] . k , iter - > data [ i + 1 ] . k ) > 0 ;
i + + )
swap ( iter - > data [ i ] , iter - > data [ i + 1 ] ) ;
}
static inline void sort_iter_sort ( struct sort_iter * iter , sort_cmp_fn cmp )
{
unsigned i = iter - > used ;
while ( i - - )
2021-02-20 00:00:23 -05:00
sort_iter_sift ( iter , i , cmp ) ;
2018-11-27 18:30:56 -05:00
}
static inline struct bkey_packed * sort_iter_peek ( struct sort_iter * iter )
{
2019-12-14 16:20:33 -05:00
return ! sort_iter_end ( iter ) ? iter - > data - > k : NULL ;
2018-11-27 18:30:56 -05:00
}
2021-02-20 00:00:23 -05:00
static inline void sort_iter_advance ( struct sort_iter * iter , sort_cmp_fn cmp )
2018-11-27 18:30:56 -05:00
{
2021-02-20 00:00:23 -05:00
struct sort_iter_set * i = iter - > data ;
2018-11-27 18:30:56 -05:00
2021-02-20 00:00:23 -05:00
BUG_ON ( ! iter - > used ) ;
2018-11-27 18:30:56 -05:00
2019-12-14 16:20:33 -05:00
i - > k = bkey_next_skip_noops ( i - > k , i - > end ) ;
BUG_ON ( i - > k > i - > end ) ;
if ( i - > k = = i - > end )
2021-02-20 00:00:23 -05:00
array_remove_item ( iter - > data , iter - > used , 0 ) ;
2018-11-27 18:30:56 -05:00
else
2021-02-20 00:00:23 -05:00
sort_iter_sift ( iter , 0 , cmp ) ;
2018-11-27 18:30:56 -05:00
}
static inline struct bkey_packed * sort_iter_next ( struct sort_iter * iter ,
sort_cmp_fn cmp )
{
struct bkey_packed * ret = sort_iter_peek ( iter ) ;
if ( ret )
sort_iter_advance ( iter , cmp ) ;
return ret ;
}
/*
2019-12-14 16:20:33 -05:00
* If keys compare equal , compare by pointer order :
2018-11-27 18:30:56 -05:00
*/
2019-12-14 16:20:33 -05:00
static inline int key_sort_fix_overlapping_cmp ( struct btree * b ,
struct bkey_packed * l ,
struct bkey_packed * r )
2018-11-27 18:30:56 -05:00
{
2020-11-07 12:31:20 -05:00
return bch2_bkey_cmp_packed ( b , l , r ) ? :
2019-12-14 16:20:33 -05:00
cmp_int ( ( unsigned long ) l , ( unsigned long ) r ) ;
}
2018-11-27 18:30:56 -05:00
2019-12-14 16:20:33 -05:00
static inline bool should_drop_next_key ( struct sort_iter * iter )
{
2018-11-27 18:30:56 -05:00
/*
* key_sort_cmp ( ) ensures that when keys compare equal the older key
2019-12-14 16:20:33 -05:00
* comes first ; so if l - > k compares equal to r - > k then l - > k is older
* and should be dropped .
2018-11-27 18:30:56 -05:00
*/
2019-12-14 16:20:33 -05:00
return iter - > used > = 2 & &
2020-11-07 12:31:20 -05:00
! bch2_bkey_cmp_packed ( iter - > b ,
2019-12-14 16:20:33 -05:00
iter - > data [ 0 ] . k ,
iter - > data [ 1 ] . k ) ;
2018-11-27 18:30:56 -05:00
}
2019-12-14 16:20:33 -05:00
struct btree_nr_keys
bch2_key_sort_fix_overlapping ( struct bch_fs * c , struct bset * dst ,
struct sort_iter * iter )
2018-11-27 18:30:56 -05:00
{
struct bkey_packed * out = dst - > start ;
2019-12-14 16:20:33 -05:00
struct bkey_packed * k ;
2018-11-27 18:30:56 -05:00
struct btree_nr_keys nr ;
memset ( & nr , 0 , sizeof ( nr ) ) ;
2019-12-14 16:20:33 -05:00
sort_iter_sort ( iter , key_sort_fix_overlapping_cmp ) ;
2018-11-27 18:30:56 -05:00
2019-12-14 16:20:33 -05:00
while ( ( k = sort_iter_peek ( iter ) ) ) {
2021-02-19 23:41:40 -05:00
if ( ! bkey_deleted ( k ) & &
2019-12-14 16:20:33 -05:00
! should_drop_next_key ( iter ) ) {
2018-11-27 18:30:56 -05:00
bkey_copy ( out , k ) ;
btree_keys_account_key_add ( & nr , 0 , out ) ;
out = bkey_next ( out ) ;
}
2019-12-14 16:20:33 -05:00
sort_iter_advance ( iter , key_sort_fix_overlapping_cmp ) ;
2018-11-27 18:30:56 -05:00
}
dst - > u64s = cpu_to_le16 ( ( u64 * ) out - dst - > _data ) ;
return nr ;
}
static void extent_sort_append ( struct bch_fs * c ,
2019-06-09 16:56:29 -04:00
struct bkey_format * f ,
2018-11-27 18:30:56 -05:00
struct btree_nr_keys * nr ,
2020-03-24 17:00:48 -04:00
struct bkey_packed * * out ,
2019-06-09 16:56:29 -04:00
struct bkey_s k )
2018-11-27 18:30:56 -05:00
{
2021-02-19 23:41:40 -05:00
if ( ! bkey_deleted ( k . k ) ) {
2020-03-24 17:00:48 -04:00
if ( ! bch2_bkey_pack_key ( * out , k . k , f ) )
memcpy_u64s_small ( * out , k . k , BKEY_U64s ) ;
2018-11-27 18:30:56 -05:00
2020-03-24 17:00:48 -04:00
memcpy_u64s_small ( bkeyp_val ( f , * out ) , k . v , bkey_val_u64s ( k . k ) ) ;
2018-11-27 18:30:56 -05:00
2020-03-24 17:00:48 -04:00
btree_keys_account_key_add ( nr , 0 , * out ) ;
* out = bkey_next ( * out ) ;
}
2018-11-27 18:30:56 -05:00
}
/* Sort + repack in a new format: */
2018-11-01 15:10:01 -04:00
struct btree_nr_keys
2018-11-27 18:30:56 -05:00
bch2_sort_repack ( struct bset * dst , struct btree * src ,
struct btree_node_iter * src_iter ,
struct bkey_format * out_f ,
bool filter_whiteouts )
{
struct bkey_format * in_f = & src - > format ;
struct bkey_packed * in , * out = vstruct_last ( dst ) ;
struct btree_nr_keys nr ;
memset ( & nr , 0 , sizeof ( nr ) ) ;
while ( ( in = bch2_btree_node_iter_next_all ( src_iter , src ) ) ) {
2021-02-19 23:41:40 -05:00
if ( filter_whiteouts & & bkey_deleted ( in ) )
2018-11-27 18:30:56 -05:00
continue ;
if ( bch2_bkey_transform ( out_f , out , bkey_packed ( in )
? in_f : & bch2_bkey_format_current , in ) )
out - > format = KEY_FORMAT_LOCAL_BTREE ;
else
bch2_bkey_unpack ( src , ( void * ) out , in ) ;
btree_keys_account_key_add ( & nr , 0 , out ) ;
out = bkey_next ( out ) ;
}
dst - > u64s = cpu_to_le16 ( ( u64 * ) out - dst - > _data ) ;
return nr ;
}
2020-03-24 17:00:48 -04:00
/* Sort, repack, and call bch2_bkey_normalize() to drop stale pointers: */
2018-11-27 18:30:56 -05:00
struct btree_nr_keys
bch2_sort_repack_merge ( struct bch_fs * c ,
struct bset * dst , struct btree * src ,
struct btree_node_iter * iter ,
struct bkey_format * out_f ,
2018-11-01 15:10:01 -04:00
bool filter_whiteouts )
2018-11-27 18:30:56 -05:00
{
2020-03-24 17:00:48 -04:00
struct bkey_packed * out = vstruct_last ( dst ) , * k_packed ;
2020-12-17 15:08:58 -05:00
struct bkey_buf k ;
2018-11-27 18:30:56 -05:00
struct btree_nr_keys nr ;
memset ( & nr , 0 , sizeof ( nr ) ) ;
2020-12-17 15:08:58 -05:00
bch2_bkey_buf_init ( & k ) ;
2018-11-27 18:30:56 -05:00
2019-08-21 18:35:15 -04:00
while ( ( k_packed = bch2_btree_node_iter_next_all ( iter , src ) ) ) {
2021-02-19 23:41:40 -05:00
if ( filter_whiteouts & & bkey_deleted ( k_packed ) )
2018-11-27 18:30:56 -05:00
continue ;
2020-01-16 16:14:56 -05:00
/*
* NOTE :
* bch2_bkey_normalize may modify the key we pass it ( dropping
* stale pointers ) and we don ' t have a write lock on the src
* node ; we have to make a copy of the entire key before calling
* normalize
*/
2020-12-17 15:08:58 -05:00
bch2_bkey_buf_realloc ( & k , c , k_packed - > u64s + BKEY_U64s ) ;
2020-01-16 16:14:56 -05:00
bch2_bkey_unpack ( src , k . k , k_packed ) ;
2018-11-27 18:30:56 -05:00
2019-06-09 16:56:29 -04:00
if ( filter_whiteouts & &
2020-01-16 16:14:56 -05:00
bch2_bkey_normalize ( c , bkey_i_to_s ( k . k ) ) )
2018-11-27 18:30:56 -05:00
continue ;
2020-03-24 17:00:48 -04:00
extent_sort_append ( c , out_f , & nr , & out , bkey_i_to_s ( k . k ) ) ;
2018-11-27 18:30:56 -05:00
}
2020-03-24 17:00:48 -04:00
dst - > u64s = cpu_to_le16 ( ( u64 * ) out - dst - > _data ) ;
2020-12-17 15:08:58 -05:00
bch2_bkey_buf_exit ( & k , c ) ;
2018-11-27 18:30:56 -05:00
return nr ;
}
static inline int sort_keys_cmp ( struct btree * b ,
struct bkey_packed * l ,
struct bkey_packed * r )
{
2020-11-07 12:31:20 -05:00
return bch2_bkey_cmp_packed ( b , l , r ) ? :
2019-11-26 17:26:04 -05:00
( int ) bkey_deleted ( r ) - ( int ) bkey_deleted ( l ) ? :
2018-11-27 18:30:56 -05:00
( int ) l - > needs_whiteout - ( int ) r - > needs_whiteout ;
}
unsigned bch2_sort_keys ( struct bkey_packed * dst ,
struct sort_iter * iter ,
bool filter_whiteouts )
{
const struct bkey_format * f = & iter - > b - > format ;
struct bkey_packed * in , * next , * out = dst ;
sort_iter_sort ( iter , sort_keys_cmp ) ;
while ( ( in = sort_iter_next ( iter , sort_keys_cmp ) ) ) {
2020-01-15 22:53:49 -05:00
bool needs_whiteout = false ;
2021-02-19 23:41:40 -05:00
if ( bkey_deleted ( in ) & &
2018-11-27 18:30:56 -05:00
( filter_whiteouts | | ! in - > needs_whiteout ) )
continue ;
2020-01-15 22:53:49 -05:00
while ( ( next = sort_iter_peek ( iter ) ) & &
2020-11-07 12:31:20 -05:00
! bch2_bkey_cmp_packed ( iter - > b , in , next ) ) {
2018-11-27 18:30:56 -05:00
BUG_ON ( in - > needs_whiteout & &
next - > needs_whiteout ) ;
2020-01-15 22:53:49 -05:00
needs_whiteout | = in - > needs_whiteout ;
in = sort_iter_next ( iter , sort_keys_cmp ) ;
2018-11-27 18:30:56 -05:00
}
2021-02-19 23:41:40 -05:00
if ( bkey_deleted ( in ) ) {
2018-11-27 18:30:56 -05:00
memcpy_u64s ( out , in , bkeyp_key_u64s ( f , in ) ) ;
set_bkeyp_val_u64s ( f , out , 0 ) ;
} else {
bkey_copy ( out , in ) ;
}
2020-01-15 22:53:49 -05:00
out - > needs_whiteout | = needs_whiteout ;
2018-11-27 18:30:56 -05:00
out = bkey_next ( out ) ;
}
return ( u64 * ) out - ( u64 * ) dst ;
}