2017-03-17 09:18:50 +03:00
// SPDX-License-Identifier: GPL-2.0
/*
* Code for working with individual keys , and sorted sets of keys with in a
* btree node
*
* Copyright 2012 Google , Inc .
*/
# include "bcachefs.h"
# include "btree_cache.h"
# include "bset.h"
# include "eytzinger.h"
# include "trace.h"
# include "util.h"
# include <asm/unaligned.h>
# include <linux/console.h>
# include <linux/random.h>
# include <linux/prefetch.h>
2016-07-22 06:05:06 +03:00
static inline void __bch2_btree_node_iter_advance ( struct btree_node_iter * ,
struct btree * ) ;
2019-08-17 22:17:09 +03:00
static inline unsigned __btree_node_iter_used ( struct btree_node_iter * iter )
{
unsigned n = ARRAY_SIZE ( iter - > data ) ;
while ( n & & __btree_node_iter_set_end ( iter , n - 1 ) )
- - n ;
return n ;
}
2017-03-17 09:18:50 +03:00
struct bset_tree * bch2_bkey_to_bset ( struct btree * b , struct bkey_packed * k )
{
2018-08-12 02:12:05 +03:00
return bch2_bkey_to_bset_inlined ( b , k ) ;
2017-03-17 09:18:50 +03:00
}
/*
* There are never duplicate live keys in the btree - but including keys that
* have been flagged as deleted ( and will be cleaned up later ) we _will_ see
* duplicates .
*
* Thus the sort order is : usual key comparison first , but for keys that compare
* equal the deleted key ( s ) come first , and the ( at most one ) live version comes
* last .
*
* The main reason for this is insertion : to handle overwrites , we first iterate
* over keys that compare equal to our insert key , and then insert immediately
* prior to the first key greater than the key we ' re inserting - our insert
* position will be after all keys that compare equal to our insert key , which
* by the time we actually do the insert will all be deleted .
*/
2020-06-18 00:33:53 +03:00
void bch2_dump_bset ( struct bch_fs * c , struct btree * b ,
struct bset * i , unsigned set )
2017-03-17 09:18:50 +03:00
{
struct bkey_packed * _k , * _n ;
2020-06-18 00:33:53 +03:00
struct bkey uk , n ;
struct bkey_s_c k ;
char buf [ 200 ] ;
2017-03-17 09:18:50 +03:00
if ( ! i - > u64s )
return ;
2020-06-18 00:33:53 +03:00
for ( _k = i - > start ;
2017-03-17 09:18:50 +03:00
_k < vstruct_last ( i ) ;
2020-06-18 00:33:53 +03:00
_k = _n ) {
2021-03-25 03:22:51 +03:00
_n = bkey_next ( _k ) ;
2017-03-17 09:18:50 +03:00
2020-06-18 00:33:53 +03:00
k = bkey_disassemble ( b , _k , & uk ) ;
if ( c )
bch2_bkey_val_to_text ( & PBUF ( buf ) , c , k ) ;
else
bch2_bkey_to_text ( & PBUF ( buf ) , k . k ) ;
2020-03-08 01:20:39 +03:00
printk ( KERN_ERR " block %u key %5zu: %s \n " , set ,
_k - > _data - i - > _data , buf ) ;
2017-03-17 09:18:50 +03:00
if ( _n = = vstruct_last ( i ) )
continue ;
n = bkey_unpack_key ( b , _n ) ;
2021-03-05 00:20:16 +03:00
if ( bpos_cmp ( n . p , k . k - > p ) < 0 ) {
2017-03-17 09:18:50 +03:00
printk ( KERN_ERR " Key skipped backwards \n " ) ;
continue ;
}
2020-06-18 00:33:53 +03:00
if ( ! bkey_deleted ( k . k ) & &
2021-03-05 00:20:16 +03:00
! bpos_cmp ( n . p , k . k - > p ) )
2017-03-17 09:18:50 +03:00
printk ( KERN_ERR " Duplicate keys \n " ) ;
}
}
2020-06-18 00:33:53 +03:00
void bch2_dump_btree_node ( struct bch_fs * c , struct btree * b )
2017-03-17 09:18:50 +03:00
{
struct bset_tree * t ;
console_lock ( ) ;
for_each_bset ( b , t )
2020-06-18 00:33:53 +03:00
bch2_dump_bset ( c , b , bset ( b , t ) , t - b - > set ) ;
2017-03-17 09:18:50 +03:00
console_unlock ( ) ;
}
void bch2_dump_btree_node_iter ( struct btree * b ,
struct btree_node_iter * iter )
{
struct btree_node_iter_set * set ;
2019-08-17 22:17:09 +03:00
printk ( KERN_ERR " btree node iter with %u/%u sets: \n " ,
__btree_node_iter_used ( iter ) , b - > nsets ) ;
2017-03-17 09:18:50 +03:00
btree_node_iter_for_each ( iter , set ) {
struct bkey_packed * k = __btree_node_offset_to_key ( b , set - > k ) ;
struct bset_tree * t = bch2_bkey_to_bset ( b , k ) ;
struct bkey uk = bkey_unpack_key ( b , k ) ;
char buf [ 100 ] ;
2018-11-09 09:24:07 +03:00
bch2_bkey_to_text ( & PBUF ( buf ) , & uk ) ;
2019-08-17 22:17:09 +03:00
printk ( KERN_ERR " set %zu key %u: %s \n " ,
t - b - > set , set - > k , buf ) ;
2017-03-17 09:18:50 +03:00
}
}
# ifdef CONFIG_BCACHEFS_DEBUG
void __bch2_verify_btree_nr_keys ( struct btree * b )
{
struct bset_tree * t ;
struct bkey_packed * k ;
struct btree_nr_keys nr = { 0 } ;
for_each_bset ( b , t )
2019-11-10 07:50:52 +03:00
bset_tree_for_each_key ( b , t , k )
2021-02-20 07:41:40 +03:00
if ( ! bkey_deleted ( k ) )
2017-03-17 09:18:50 +03:00
btree_keys_account_key_add ( & nr , t - b - > set , k ) ;
BUG_ON ( memcmp ( & nr , & b - > nr , sizeof ( nr ) ) ) ;
}
2016-07-22 06:05:06 +03:00
static void bch2_btree_node_iter_next_check ( struct btree_node_iter * _iter ,
struct btree * b )
2017-03-17 09:18:50 +03:00
{
2016-07-22 06:05:06 +03:00
struct btree_node_iter iter = * _iter ;
const struct bkey_packed * k , * n ;
k = bch2_btree_node_iter_peek_all ( & iter , b ) ;
__bch2_btree_node_iter_advance ( & iter , b ) ;
n = bch2_btree_node_iter_peek_all ( & iter , b ) ;
2017-03-17 09:18:50 +03:00
bkey_unpack_key ( b , k ) ;
if ( n & &
2018-08-21 23:30:14 +03:00
bkey_iter_cmp ( b , k , n ) > 0 ) {
2016-07-22 06:05:06 +03:00
struct btree_node_iter_set * set ;
2017-03-17 09:18:50 +03:00
struct bkey ku = bkey_unpack_key ( b , k ) ;
struct bkey nu = bkey_unpack_key ( b , n ) ;
char buf1 [ 80 ] , buf2 [ 80 ] ;
2020-06-18 00:33:53 +03:00
bch2_dump_btree_node ( NULL , b ) ;
2018-11-09 09:24:07 +03:00
bch2_bkey_to_text ( & PBUF ( buf1 ) , & ku ) ;
bch2_bkey_to_text ( & PBUF ( buf2 ) , & nu ) ;
2016-07-22 06:05:06 +03:00
printk ( KERN_ERR " out of order/overlapping: \n %s \n %s \n " ,
buf1 , buf2 ) ;
printk ( KERN_ERR " iter was: " ) ;
btree_node_iter_for_each ( _iter , set ) {
struct bkey_packed * k = __btree_node_offset_to_key ( b , set - > k ) ;
struct bset_tree * t = bch2_bkey_to_bset ( b , k ) ;
printk ( " [%zi %zi] " , t - b - > set ,
k - > _data - bset ( b , t ) - > _data ) ;
}
panic ( " \n " ) ;
2017-03-17 09:18:50 +03:00
}
}
void bch2_btree_node_iter_verify ( struct btree_node_iter * iter ,
2016-07-22 06:05:06 +03:00
struct btree * b )
2017-03-17 09:18:50 +03:00
{
2018-08-06 05:23:44 +03:00
struct btree_node_iter_set * set , * s2 ;
2019-08-17 22:17:09 +03:00
struct bkey_packed * k , * p ;
2017-03-17 09:18:50 +03:00
struct bset_tree * t ;
2019-08-17 22:17:09 +03:00
if ( bch2_btree_node_iter_end ( iter ) )
return ;
2018-08-06 05:23:44 +03:00
/* Verify no duplicates: */
btree_node_iter_for_each ( iter , set )
btree_node_iter_for_each ( iter , s2 )
BUG_ON ( set ! = s2 & & set - > end = = s2 - > end ) ;
2017-03-17 09:18:50 +03:00
2018-08-06 05:23:44 +03:00
/* Verify that set->end is correct: */
2017-03-17 09:18:50 +03:00
btree_node_iter_for_each ( iter , set ) {
2018-08-06 05:23:44 +03:00
for_each_bset ( b , t )
if ( set - > end = = t - > end_offset )
goto found ;
BUG ( ) ;
found :
BUG_ON ( set - > k < btree_bkey_first_offset ( t ) | |
set - > k > = t - > end_offset ) ;
2017-03-17 09:18:50 +03:00
}
2018-08-06 05:23:44 +03:00
/* Verify iterator is sorted: */
btree_node_iter_for_each ( iter , set )
BUG_ON ( set ! = iter - > data & &
2016-07-22 06:05:06 +03:00
btree_node_iter_cmp ( b , set [ - 1 ] , set [ 0 ] ) > 0 ) ;
2019-08-17 22:17:09 +03:00
k = bch2_btree_node_iter_peek_all ( iter , b ) ;
for_each_bset ( b , t ) {
if ( iter - > data [ 0 ] . end = = t - > end_offset )
continue ;
p = bch2_bkey_prev_all ( b , t ,
bch2_btree_node_iter_bset_pos ( iter , b , t ) ) ;
BUG_ON ( p & & bkey_iter_cmp ( b , k , p ) < 0 ) ;
}
2017-03-17 09:18:50 +03:00
}
2016-07-22 06:05:06 +03:00
void bch2_verify_insert_pos ( struct btree * b , struct bkey_packed * where ,
struct bkey_packed * insert , unsigned clobber_u64s )
2017-03-17 09:18:50 +03:00
{
struct bset_tree * t = bch2_bkey_to_bset ( b , where ) ;
2016-07-22 06:05:06 +03:00
struct bkey_packed * prev = bch2_bkey_prev_all ( b , t , where ) ;
struct bkey_packed * next = ( void * ) ( where - > _data + clobber_u64s ) ;
#if 0
BUG_ON ( prev & &
2018-08-21 23:30:14 +03:00
bkey_iter_cmp ( b , prev , insert ) > 0 ) ;
2016-07-22 06:05:06 +03:00
# else
if ( prev & &
2018-08-21 23:30:14 +03:00
bkey_iter_cmp ( b , prev , insert ) > 0 ) {
2016-07-22 06:05:06 +03:00
struct bkey k1 = bkey_unpack_key ( b , prev ) ;
struct bkey k2 = bkey_unpack_key ( b , insert ) ;
char buf1 [ 100 ] ;
char buf2 [ 100 ] ;
2017-03-17 09:18:50 +03:00
2020-06-18 00:33:53 +03:00
bch2_dump_btree_node ( NULL , b ) ;
2018-11-09 09:24:07 +03:00
bch2_bkey_to_text ( & PBUF ( buf1 ) , & k1 ) ;
bch2_bkey_to_text ( & PBUF ( buf2 ) , & k2 ) ;
2016-07-22 06:05:06 +03:00
panic ( " prev > insert: \n "
2019-11-27 01:18:23 +03:00
" prev key %s \n "
" insert key %s \n " ,
buf1 , buf2 ) ;
2017-03-17 09:18:50 +03:00
}
2016-07-22 06:05:06 +03:00
# endif
#if 0
BUG_ON ( next ! = btree_bkey_last ( b , t ) & &
2018-08-21 23:30:14 +03:00
bkey_iter_cmp ( b , insert , next ) > 0 ) ;
2016-07-22 06:05:06 +03:00
# else
if ( next ! = btree_bkey_last ( b , t ) & &
2018-08-21 23:30:14 +03:00
bkey_iter_cmp ( b , insert , next ) > 0 ) {
2016-07-22 06:05:06 +03:00
struct bkey k1 = bkey_unpack_key ( b , insert ) ;
struct bkey k2 = bkey_unpack_key ( b , next ) ;
char buf1 [ 100 ] ;
char buf2 [ 100 ] ;
2017-03-17 09:18:50 +03:00
2020-06-18 00:33:53 +03:00
bch2_dump_btree_node ( NULL , b ) ;
2018-11-09 09:24:07 +03:00
bch2_bkey_to_text ( & PBUF ( buf1 ) , & k1 ) ;
bch2_bkey_to_text ( & PBUF ( buf2 ) , & k2 ) ;
2016-07-22 06:05:06 +03:00
panic ( " insert > next: \n "
2019-11-27 01:18:23 +03:00
" insert key %s \n "
" next key %s \n " ,
buf1 , buf2 ) ;
2017-03-17 09:18:50 +03:00
}
2016-07-22 06:05:06 +03:00
# endif
}
2017-03-17 09:18:50 +03:00
# else
static inline void bch2_btree_node_iter_next_check ( struct btree_node_iter * iter ,
2016-07-22 06:05:06 +03:00
struct btree * b ) { }
2017-03-17 09:18:50 +03:00
# endif
/* Auxiliary search trees */
2019-10-23 21:56:20 +03:00
# define BFLOAT_FAILED_UNPACKED U8_MAX
# define BFLOAT_FAILED U8_MAX
2017-03-17 09:18:50 +03:00
struct bkey_float {
u8 exponent ;
u8 key_offset ;
2019-11-07 03:40:09 +03:00
u16 mantissa ;
} ;
# define BKEY_MANTISSA_BITS 16
2017-03-17 09:18:50 +03:00
static unsigned bkey_float_byte_offset ( unsigned idx )
{
2019-11-07 03:40:09 +03:00
return idx * sizeof ( struct bkey_float ) ;
2017-03-17 09:18:50 +03:00
}
struct ro_aux_tree {
2019-11-07 03:40:09 +03:00
struct bkey_float f [ 0 ] ;
2017-03-17 09:18:50 +03:00
} ;
struct rw_aux_tree {
u16 offset ;
struct bpos k ;
} ;
static unsigned bset_aux_tree_buf_end ( const struct bset_tree * t )
{
BUG_ON ( t - > aux_data_offset = = U16_MAX ) ;
switch ( bset_aux_tree_type ( t ) ) {
case BSET_NO_AUX_TREE :
return t - > aux_data_offset ;
case BSET_RO_AUX_TREE :
return t - > aux_data_offset +
2019-11-07 03:40:09 +03:00
DIV_ROUND_UP ( t - > size * sizeof ( struct bkey_float ) +
t - > size * sizeof ( u8 ) , 8 ) ;
2017-03-17 09:18:50 +03:00
case BSET_RW_AUX_TREE :
return t - > aux_data_offset +
DIV_ROUND_UP ( sizeof ( struct rw_aux_tree ) * t - > size , 8 ) ;
default :
BUG ( ) ;
}
}
static unsigned bset_aux_tree_buf_start ( const struct btree * b ,
const struct bset_tree * t )
{
return t = = b - > set
? DIV_ROUND_UP ( b - > unpack_fn_len , 8 )
: bset_aux_tree_buf_end ( t - 1 ) ;
}
static void * __aux_tree_base ( const struct btree * b ,
const struct bset_tree * t )
{
return b - > aux_data + t - > aux_data_offset * 8 ;
}
static struct ro_aux_tree * ro_aux_tree_base ( const struct btree * b ,
const struct bset_tree * t )
{
EBUG_ON ( bset_aux_tree_type ( t ) ! = BSET_RO_AUX_TREE ) ;
return __aux_tree_base ( b , t ) ;
}
static u8 * ro_aux_tree_prev ( const struct btree * b ,
const struct bset_tree * t )
{
EBUG_ON ( bset_aux_tree_type ( t ) ! = BSET_RO_AUX_TREE ) ;
return __aux_tree_base ( b , t ) + bkey_float_byte_offset ( t - > size ) ;
}
static struct bkey_float * bkey_float ( const struct btree * b ,
const struct bset_tree * t ,
unsigned idx )
{
2019-11-07 03:40:09 +03:00
return ro_aux_tree_base ( b , t ) - > f + idx ;
2017-03-17 09:18:50 +03:00
}
2020-11-03 03:15:18 +03:00
static void bset_aux_tree_verify ( const struct btree * b )
2017-03-17 09:18:50 +03:00
{
# ifdef CONFIG_BCACHEFS_DEBUG
2020-11-03 03:15:18 +03:00
const struct bset_tree * t ;
2017-03-17 09:18:50 +03:00
for_each_bset ( b , t ) {
if ( t - > aux_data_offset = = U16_MAX )
continue ;
BUG_ON ( t ! = b - > set & &
t [ - 1 ] . aux_data_offset = = U16_MAX ) ;
BUG_ON ( t - > aux_data_offset < bset_aux_tree_buf_start ( b , t ) ) ;
BUG_ON ( t - > aux_data_offset > btree_aux_data_u64s ( b ) ) ;
BUG_ON ( bset_aux_tree_buf_end ( t ) > btree_aux_data_u64s ( b ) ) ;
}
# endif
}
2020-11-03 02:20:44 +03:00
void bch2_btree_keys_init ( struct btree * b )
2017-03-17 09:18:50 +03:00
{
unsigned i ;
b - > nsets = 0 ;
memset ( & b - > nr , 0 , sizeof ( b - > nr ) ) ;
2020-11-03 02:20:44 +03:00
2017-03-17 09:18:50 +03:00
for ( i = 0 ; i < MAX_BSETS ; i + + )
b - > set [ i ] . data_offset = U16_MAX ;
bch2_bset_set_no_aux_tree ( b , b - > set ) ;
}
/* Binary tree stuff for auxiliary search trees */
/*
* Cacheline / offset < - > bkey pointer arithmetic :
*
* t - > tree is a binary search tree in an array ; each node corresponds to a key
* in one cacheline in t - > set ( BSET_CACHELINE bytes ) .
*
* This means we don ' t have to store the full index of the key that a node in
* the binary tree points to ; eytzinger1_to_inorder ( ) gives us the cacheline , and
* then bkey_float - > m gives us the offset within that cacheline , in units of 8
* bytes .
*
* cacheline_to_bkey ( ) and friends abstract out all the pointer arithmetic to
* make this work .
*
* To construct the bfloat for an arbitrary key we need to know what the key
* immediately preceding it is : we have to check if the two keys differ in the
* bits we ' re going to store in bkey_float - > mantissa . t - > prev [ j ] stores the size
* of the previous key so we can walk backwards to it from t - > tree [ j ] ' s key .
*/
static inline void * bset_cacheline ( const struct btree * b ,
const struct bset_tree * t ,
unsigned cacheline )
{
return ( void * ) round_down ( ( unsigned long ) btree_bkey_first ( b , t ) ,
L1_CACHE_BYTES ) +
cacheline * BSET_CACHELINE ;
}
static struct bkey_packed * cacheline_to_bkey ( const struct btree * b ,
const struct bset_tree * t ,
unsigned cacheline ,
unsigned offset )
{
return bset_cacheline ( b , t , cacheline ) + offset * 8 ;
}
static unsigned bkey_to_cacheline ( const struct btree * b ,
const struct bset_tree * t ,
const struct bkey_packed * k )
{
return ( ( void * ) k - bset_cacheline ( b , t , 0 ) ) / BSET_CACHELINE ;
}
static ssize_t __bkey_to_cacheline_offset ( const struct btree * b ,
const struct bset_tree * t ,
unsigned cacheline ,
const struct bkey_packed * k )
{
return ( u64 * ) k - ( u64 * ) bset_cacheline ( b , t , cacheline ) ;
}
static unsigned bkey_to_cacheline_offset ( const struct btree * b ,
const struct bset_tree * t ,
unsigned cacheline ,
const struct bkey_packed * k )
{
size_t m = __bkey_to_cacheline_offset ( b , t , cacheline , k ) ;
EBUG_ON ( m > U8_MAX ) ;
return m ;
}
static inline struct bkey_packed * tree_to_bkey ( const struct btree * b ,
const struct bset_tree * t ,
unsigned j )
{
return cacheline_to_bkey ( b , t ,
__eytzinger1_to_inorder ( j , t - > size , t - > extra ) ,
bkey_float ( b , t , j ) - > key_offset ) ;
}
static struct bkey_packed * tree_to_prev_bkey ( const struct btree * b ,
const struct bset_tree * t ,
unsigned j )
{
unsigned prev_u64s = ro_aux_tree_prev ( b , t ) [ j ] ;
return ( void * ) ( tree_to_bkey ( b , t , j ) - > _data - prev_u64s ) ;
}
static struct rw_aux_tree * rw_aux_tree ( const struct btree * b ,
const struct bset_tree * t )
{
EBUG_ON ( bset_aux_tree_type ( t ) ! = BSET_RW_AUX_TREE ) ;
return __aux_tree_base ( b , t ) ;
}
/*
* For the write set - the one we ' re currently inserting keys into - we don ' t
* maintain a full search tree , we just keep a simple lookup table in t - > prev .
*/
static struct bkey_packed * rw_aux_to_bkey ( const struct btree * b ,
struct bset_tree * t ,
unsigned j )
{
return __btree_node_offset_to_key ( b , rw_aux_tree ( b , t ) [ j ] . offset ) ;
}
static void rw_aux_tree_set ( const struct btree * b , struct bset_tree * t ,
unsigned j , struct bkey_packed * k )
{
EBUG_ON ( k > = btree_bkey_last ( b , t ) ) ;
rw_aux_tree ( b , t ) [ j ] = ( struct rw_aux_tree ) {
. offset = __btree_node_key_to_offset ( b , k ) ,
. k = bkey_unpack_pos ( b , k ) ,
} ;
}
static void bch2_bset_verify_rw_aux_tree ( struct btree * b ,
struct bset_tree * t )
{
struct bkey_packed * k = btree_bkey_first ( b , t ) ;
unsigned j = 0 ;
2020-11-03 02:20:44 +03:00
if ( ! bch2_expensive_debug_checks )
2017-03-17 09:18:50 +03:00
return ;
BUG_ON ( bset_has_ro_aux_tree ( t ) ) ;
if ( ! bset_has_rw_aux_tree ( t ) )
return ;
BUG_ON ( t - > size < 1 ) ;
BUG_ON ( rw_aux_to_bkey ( b , t , j ) ! = k ) ;
goto start ;
while ( 1 ) {
if ( rw_aux_to_bkey ( b , t , j ) = = k ) {
2021-03-05 00:20:16 +03:00
BUG_ON ( bpos_cmp ( rw_aux_tree ( b , t ) [ j ] . k ,
2017-03-17 09:18:50 +03:00
bkey_unpack_pos ( b , k ) ) ) ;
start :
if ( + + j = = t - > size )
break ;
BUG_ON ( rw_aux_tree ( b , t ) [ j ] . offset < =
rw_aux_tree ( b , t ) [ j - 1 ] . offset ) ;
}
2021-03-25 03:22:51 +03:00
k = bkey_next ( k ) ;
2017-03-17 09:18:50 +03:00
BUG_ON ( k > = btree_bkey_last ( b , t ) ) ;
}
}
/* returns idx of first entry >= offset: */
static unsigned rw_aux_tree_bsearch ( struct btree * b ,
struct bset_tree * t ,
unsigned offset )
{
2018-08-06 05:34:03 +03:00
unsigned bset_offs = offset - btree_bkey_first_offset ( t ) ;
unsigned bset_u64s = t - > end_offset - btree_bkey_first_offset ( t ) ;
unsigned idx = bset_u64s ? bset_offs * t - > size / bset_u64s : 0 ;
2017-03-17 09:18:50 +03:00
EBUG_ON ( bset_aux_tree_type ( t ) ! = BSET_RW_AUX_TREE ) ;
2018-08-06 05:34:03 +03:00
EBUG_ON ( ! t - > size ) ;
EBUG_ON ( idx > t - > size ) ;
2017-03-17 09:18:50 +03:00
2018-08-06 05:34:03 +03:00
while ( idx < t - > size & &
rw_aux_tree ( b , t ) [ idx ] . offset < offset )
idx + + ;
2017-03-17 09:18:50 +03:00
2018-08-06 05:34:03 +03:00
while ( idx & &
rw_aux_tree ( b , t ) [ idx - 1 ] . offset > = offset )
idx - - ;
2017-03-17 09:18:50 +03:00
2018-08-06 05:34:03 +03:00
EBUG_ON ( idx < t - > size & &
rw_aux_tree ( b , t ) [ idx ] . offset < offset ) ;
EBUG_ON ( idx & & rw_aux_tree ( b , t ) [ idx - 1 ] . offset > = offset ) ;
EBUG_ON ( idx + 1 < t - > size & &
rw_aux_tree ( b , t ) [ idx ] . offset = =
rw_aux_tree ( b , t ) [ idx + 1 ] . offset ) ;
2017-03-17 09:18:50 +03:00
2018-08-06 05:34:03 +03:00
return idx ;
2017-03-17 09:18:50 +03:00
}
static inline unsigned bkey_mantissa ( const struct bkey_packed * k ,
const struct bkey_float * f ,
unsigned idx )
{
u64 v ;
EBUG_ON ( ! bkey_packed ( k ) ) ;
v = get_unaligned ( ( u64 * ) ( ( ( u8 * ) k - > _data ) + ( f - > exponent > > 3 ) ) ) ;
/*
* In little endian , we ' re shifting off low bits ( and then the bits we
* want are at the low end ) , in big endian we ' re shifting off high bits
* ( and then the bits we want are at the high end , so we shift them
* back down ) :
*/
# if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
v > > = f - > exponent & 7 ;
# else
2019-11-07 03:40:09 +03:00
v > > = 64 - ( f - > exponent & 7 ) - BKEY_MANTISSA_BITS ;
2017-03-17 09:18:50 +03:00
# endif
2019-11-07 03:40:09 +03:00
return ( u16 ) v ;
2017-03-17 09:18:50 +03:00
}
2020-11-08 00:16:52 +03:00
__always_inline
static inline void __make_bfloat ( struct btree * b , struct bset_tree * t ,
unsigned j ,
struct bkey_packed * min_key ,
struct bkey_packed * max_key )
2017-03-17 09:18:50 +03:00
{
struct bkey_float * f = bkey_float ( b , t , j ) ;
struct bkey_packed * m = tree_to_bkey ( b , t , j ) ;
2020-11-08 00:16:52 +03:00
struct bkey_packed * l = is_power_of_2 ( j )
? min_key
: tree_to_prev_bkey ( b , t , j > > ffs ( j ) ) ;
struct bkey_packed * r = is_power_of_2 ( j + 1 )
? max_key
: tree_to_bkey ( b , t , j > > ( ffz ( j ) + 1 ) ) ;
2017-03-17 09:18:50 +03:00
unsigned mantissa ;
int shift , exponent , high_bit ;
/*
* for failed bfloats , the lookup code falls back to comparing against
* the original key .
*/
2019-11-07 00:37:29 +03:00
if ( ! bkey_packed ( l ) | | ! bkey_packed ( r ) | | ! bkey_packed ( m ) | |
2017-03-17 09:18:50 +03:00
! b - > nr_key_bits ) {
f - > exponent = BFLOAT_FAILED_UNPACKED ;
return ;
}
/*
* The greatest differing bit of l and r is the first bit we must
* include in the bfloat mantissa we ' re creating in order to do
* comparisons - that bit always becomes the high bit of
* bfloat - > mantissa , and thus the exponent we ' re calculating here is
* the position of what will become the low bit in bfloat - > mantissa :
*
* Note that this may be negative - we may be running off the low end
* of the key : we handle this later :
*/
high_bit = max ( bch2_bkey_greatest_differing_bit ( b , l , r ) ,
2019-11-07 03:40:09 +03:00
min_t ( unsigned , BKEY_MANTISSA_BITS , b - > nr_key_bits ) - 1 ) ;
exponent = high_bit - ( BKEY_MANTISSA_BITS - 1 ) ;
2017-03-17 09:18:50 +03:00
/*
* Then we calculate the actual shift value , from the start of the key
* ( k - > _data ) , to get the key bits starting at exponent :
*/
# if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
shift = ( int ) ( b - > format . key_u64s * 64 - b - > nr_key_bits ) + exponent ;
2019-11-07 03:40:09 +03:00
EBUG_ON ( shift + BKEY_MANTISSA_BITS > b - > format . key_u64s * 64 ) ;
2017-03-17 09:18:50 +03:00
# else
shift = high_bit_offset +
b - > nr_key_bits -
exponent -
2019-11-07 03:40:09 +03:00
BKEY_MANTISSA_BITS ;
2017-03-17 09:18:50 +03:00
EBUG_ON ( shift < KEY_PACKED_BITS_START ) ;
# endif
EBUG_ON ( shift < 0 | | shift > = BFLOAT_FAILED ) ;
f - > exponent = shift ;
mantissa = bkey_mantissa ( m , f , j ) ;
/*
* If we ' ve got garbage bits , set them to all 1 s - it ' s legal for the
* bfloat to compare larger than the original key , but not smaller :
*/
if ( exponent < 0 )
mantissa | = ~ ( ~ 0U < < - exponent ) ;
2019-11-07 03:40:09 +03:00
f - > mantissa = mantissa ;
2017-03-17 09:18:50 +03:00
}
2020-11-08 00:16:52 +03:00
static void make_bfloat ( struct btree * b , struct bset_tree * t ,
unsigned j ,
struct bkey_packed * min_key ,
struct bkey_packed * max_key )
{
struct bkey_i * k ;
if ( is_power_of_2 ( j ) & &
! min_key - > u64s ) {
2021-03-27 03:10:59 +03:00
if ( ! bkey_pack_pos ( min_key , b - > data - > min_key , b ) ) {
k = ( void * ) min_key ;
bkey_init ( & k - > k ) ;
k - > k . p = b - > data - > min_key ;
}
2020-11-08 00:16:52 +03:00
}
if ( is_power_of_2 ( j + 1 ) & &
! max_key - > u64s ) {
2021-03-27 03:10:59 +03:00
if ( ! bkey_pack_pos ( max_key , b - > data - > max_key , b ) ) {
k = ( void * ) max_key ;
bkey_init ( & k - > k ) ;
2021-04-04 04:54:14 +03:00
k - > k . p = b - > data - > max_key ;
2021-03-27 03:10:59 +03:00
}
2020-11-08 00:16:52 +03:00
}
__make_bfloat ( b , t , j , min_key , max_key ) ;
}
2017-03-17 09:18:50 +03:00
/* bytes remaining - only valid for last bset: */
2020-11-03 03:15:18 +03:00
static unsigned __bset_tree_capacity ( const struct btree * b , const struct bset_tree * t )
2017-03-17 09:18:50 +03:00
{
bset_aux_tree_verify ( b ) ;
return btree_aux_data_bytes ( b ) - t - > aux_data_offset * sizeof ( u64 ) ;
}
2020-11-03 03:15:18 +03:00
static unsigned bset_ro_tree_capacity ( const struct btree * b , const struct bset_tree * t )
2017-03-17 09:18:50 +03:00
{
2019-11-07 03:40:09 +03:00
return __bset_tree_capacity ( b , t ) /
( sizeof ( struct bkey_float ) + sizeof ( u8 ) ) ;
2017-03-17 09:18:50 +03:00
}
2020-11-03 03:15:18 +03:00
static unsigned bset_rw_tree_capacity ( const struct btree * b , const struct bset_tree * t )
2017-03-17 09:18:50 +03:00
{
return __bset_tree_capacity ( b , t ) / sizeof ( struct rw_aux_tree ) ;
}
2020-11-08 00:16:52 +03:00
static noinline void __build_rw_aux_tree ( struct btree * b , struct bset_tree * t )
2017-03-17 09:18:50 +03:00
{
struct bkey_packed * k ;
t - > size = 1 ;
t - > extra = BSET_RW_AUX_TREE_VAL ;
rw_aux_tree ( b , t ) [ 0 ] . offset =
__btree_node_key_to_offset ( b , btree_bkey_first ( b , t ) ) ;
2019-11-10 07:50:52 +03:00
bset_tree_for_each_key ( b , t , k ) {
2017-03-17 09:18:50 +03:00
if ( t - > size = = bset_rw_tree_capacity ( b , t ) )
break ;
if ( ( void * ) k - ( void * ) rw_aux_to_bkey ( b , t , t - > size - 1 ) >
L1_CACHE_BYTES )
rw_aux_tree_set ( b , t , t - > size + + , k ) ;
}
}
2020-11-08 00:16:52 +03:00
static noinline void __build_ro_aux_tree ( struct btree * b , struct bset_tree * t )
2017-03-17 09:18:50 +03:00
{
struct bkey_packed * prev = NULL , * k = btree_bkey_first ( b , t ) ;
2020-11-08 00:16:52 +03:00
struct bkey_i min_key , max_key ;
2017-03-17 09:18:50 +03:00
unsigned j , cacheline = 1 ;
t - > size = min ( bkey_to_cacheline ( b , t , btree_bkey_last ( b , t ) ) ,
bset_ro_tree_capacity ( b , t ) ) ;
retry :
if ( t - > size < 2 ) {
t - > size = 0 ;
t - > extra = BSET_NO_AUX_TREE_VAL ;
return ;
}
t - > extra = ( t - > size - rounddown_pow_of_two ( t - > size - 1 ) ) < < 1 ;
/* First we figure out where the first key in each cacheline is */
eytzinger1_for_each ( j , t - > size ) {
while ( bkey_to_cacheline ( b , t , k ) < cacheline )
2021-03-25 03:22:51 +03:00
prev = k , k = bkey_next ( k ) ;
2017-03-17 09:18:50 +03:00
if ( k > = btree_bkey_last ( b , t ) ) {
/* XXX: this path sucks */
t - > size - - ;
goto retry ;
}
ro_aux_tree_prev ( b , t ) [ j ] = prev - > u64s ;
bkey_float ( b , t , j ) - > key_offset =
bkey_to_cacheline_offset ( b , t , cacheline + + , k ) ;
EBUG_ON ( tree_to_prev_bkey ( b , t , j ) ! = prev ) ;
EBUG_ON ( tree_to_bkey ( b , t , j ) ! = k ) ;
}
2019-11-10 07:50:52 +03:00
while ( k ! = btree_bkey_last ( b , t ) )
2021-03-25 03:22:51 +03:00
prev = k , k = bkey_next ( k ) ;
2017-03-17 09:18:50 +03:00
2021-03-27 03:10:59 +03:00
if ( ! bkey_pack_pos ( bkey_to_packed ( & min_key ) , b - > data - > min_key , b ) ) {
bkey_init ( & min_key . k ) ;
min_key . k . p = b - > data - > min_key ;
}
if ( ! bkey_pack_pos ( bkey_to_packed ( & max_key ) , b - > data - > max_key , b ) ) {
bkey_init ( & max_key . k ) ;
2021-04-04 04:54:14 +03:00
max_key . k . p = b - > data - > max_key ;
2021-03-27 03:10:59 +03:00
}
2020-11-08 00:16:52 +03:00
2017-03-17 09:18:50 +03:00
/* Then we build the tree */
eytzinger1_for_each ( j , t - > size )
2020-11-08 00:16:52 +03:00
__make_bfloat ( b , t , j ,
bkey_to_packed ( & min_key ) ,
bkey_to_packed ( & max_key ) ) ;
2017-03-17 09:18:50 +03:00
}
static void bset_alloc_tree ( struct btree * b , struct bset_tree * t )
{
struct bset_tree * i ;
for ( i = b - > set ; i ! = t ; i + + )
BUG_ON ( bset_has_rw_aux_tree ( i ) ) ;
bch2_bset_set_no_aux_tree ( b , t ) ;
/* round up to next cacheline: */
t - > aux_data_offset = round_up ( bset_aux_tree_buf_start ( b , t ) ,
SMP_CACHE_BYTES / sizeof ( u64 ) ) ;
bset_aux_tree_verify ( b ) ;
}
void bch2_bset_build_aux_tree ( struct btree * b , struct bset_tree * t ,
bool writeable )
{
if ( writeable
? bset_has_rw_aux_tree ( t )
: bset_has_ro_aux_tree ( t ) )
return ;
bset_alloc_tree ( b , t ) ;
if ( ! __bset_tree_capacity ( b , t ) )
return ;
if ( writeable )
__build_rw_aux_tree ( b , t ) ;
else
__build_ro_aux_tree ( b , t ) ;
bset_aux_tree_verify ( b ) ;
}
void bch2_bset_init_first ( struct btree * b , struct bset * i )
{
struct bset_tree * t ;
BUG_ON ( b - > nsets ) ;
memset ( i , 0 , sizeof ( * i ) ) ;
get_random_bytes ( & i - > seq , sizeof ( i - > seq ) ) ;
SET_BSET_BIG_ENDIAN ( i , CPU_BIG_ENDIAN ) ;
t = & b - > set [ b - > nsets + + ] ;
set_btree_bset ( b , t , i ) ;
}
void bch2_bset_init_next ( struct bch_fs * c , struct btree * b ,
struct btree_node_entry * bne )
{
struct bset * i = & bne - > keys ;
struct bset_tree * t ;
BUG_ON ( bset_byte_offset ( b , bne ) > = btree_bytes ( c ) ) ;
BUG_ON ( ( void * ) bne < ( void * ) btree_bkey_last ( b , bset_tree_last ( b ) ) ) ;
BUG_ON ( b - > nsets > = MAX_BSETS ) ;
memset ( i , 0 , sizeof ( * i ) ) ;
i - > seq = btree_bset_first ( b ) - > seq ;
SET_BSET_BIG_ENDIAN ( i , CPU_BIG_ENDIAN ) ;
t = & b - > set [ b - > nsets + + ] ;
set_btree_bset ( b , t , i ) ;
}
/*
* find _some_ key in the same bset as @ k that precedes @ k - not necessarily the
* immediate predecessor :
*/
static struct bkey_packed * __bkey_prev ( struct btree * b , struct bset_tree * t ,
struct bkey_packed * k )
{
struct bkey_packed * p ;
unsigned offset ;
int j ;
EBUG_ON ( k < btree_bkey_first ( b , t ) | |
k > btree_bkey_last ( b , t ) ) ;
if ( k = = btree_bkey_first ( b , t ) )
return NULL ;
switch ( bset_aux_tree_type ( t ) ) {
case BSET_NO_AUX_TREE :
p = btree_bkey_first ( b , t ) ;
break ;
case BSET_RO_AUX_TREE :
j = min_t ( unsigned , t - > size - 1 , bkey_to_cacheline ( b , t , k ) ) ;
do {
p = j ? tree_to_bkey ( b , t ,
__inorder_to_eytzinger1 ( j - - ,
t - > size , t - > extra ) )
: btree_bkey_first ( b , t ) ;
} while ( p > = k ) ;
break ;
case BSET_RW_AUX_TREE :
offset = __btree_node_key_to_offset ( b , k ) ;
j = rw_aux_tree_bsearch ( b , t , offset ) ;
p = j ? rw_aux_to_bkey ( b , t , j - 1 )
: btree_bkey_first ( b , t ) ;
break ;
}
return p ;
}
struct bkey_packed * bch2_bkey_prev_filter ( struct btree * b ,
struct bset_tree * t ,
struct bkey_packed * k ,
unsigned min_key_type )
{
struct bkey_packed * p , * i , * ret = NULL , * orig_k = k ;
while ( ( p = __bkey_prev ( b , t , k ) ) & & ! ret ) {
2021-03-25 03:22:51 +03:00
for ( i = p ; i ! = k ; i = bkey_next ( i ) )
2017-03-17 09:18:50 +03:00
if ( i - > type > = min_key_type )
ret = i ;
k = p ;
}
2020-11-03 02:20:44 +03:00
if ( bch2_expensive_debug_checks ) {
2017-03-17 09:18:50 +03:00
BUG_ON ( ret > = orig_k ) ;
2019-11-10 07:50:52 +03:00
for ( i = ret
2021-03-25 03:22:51 +03:00
? bkey_next ( ret )
2019-11-10 07:50:52 +03:00
: btree_bkey_first ( b , t ) ;
2017-03-17 09:18:50 +03:00
i ! = orig_k ;
2021-03-25 03:22:51 +03:00
i = bkey_next ( i ) )
2017-03-17 09:18:50 +03:00
BUG_ON ( i - > type > = min_key_type ) ;
}
return ret ;
}
/* Insert */
static void rw_aux_tree_fix_invalidated_key ( struct btree * b ,
struct bset_tree * t ,
struct bkey_packed * k )
{
unsigned offset = __btree_node_key_to_offset ( b , k ) ;
unsigned j = rw_aux_tree_bsearch ( b , t , offset ) ;
if ( j < t - > size & &
rw_aux_tree ( b , t ) [ j ] . offset = = offset )
rw_aux_tree_set ( b , t , j , k ) ;
bch2_bset_verify_rw_aux_tree ( b , t ) ;
}
static void ro_aux_tree_fix_invalidated_key ( struct btree * b ,
struct bset_tree * t ,
struct bkey_packed * k )
{
struct bkey_packed min_key , max_key ;
unsigned inorder , j ;
EBUG_ON ( bset_aux_tree_type ( t ) ! = BSET_RO_AUX_TREE ) ;
/* signal to make_bfloat() that they're uninitialized: */
min_key . u64s = max_key . u64s = 0 ;
2021-03-25 03:22:51 +03:00
if ( bkey_next ( k ) = = btree_bkey_last ( b , t ) ) {
2017-03-17 09:18:50 +03:00
for ( j = 1 ; j < t - > size ; j = j * 2 + 1 )
make_bfloat ( b , t , j , & min_key , & max_key ) ;
}
inorder = bkey_to_cacheline ( b , t , k ) ;
if ( inorder & &
inorder < t - > size ) {
j = __inorder_to_eytzinger1 ( inorder , t - > size , t - > extra ) ;
if ( k = = tree_to_bkey ( b , t , j ) ) {
/* Fix the node this key corresponds to */
make_bfloat ( b , t , j , & min_key , & max_key ) ;
/* Children for which this key is the right boundary */
for ( j = eytzinger1_left_child ( j ) ;
j < t - > size ;
j = eytzinger1_right_child ( j ) )
make_bfloat ( b , t , j , & min_key , & max_key ) ;
}
}
if ( inorder + 1 < t - > size ) {
j = __inorder_to_eytzinger1 ( inorder + 1 , t - > size , t - > extra ) ;
if ( k = = tree_to_prev_bkey ( b , t , j ) ) {
make_bfloat ( b , t , j , & min_key , & max_key ) ;
/* Children for which this key is the left boundary */
for ( j = eytzinger1_right_child ( j ) ;
j < t - > size ;
j = eytzinger1_left_child ( j ) )
make_bfloat ( b , t , j , & min_key , & max_key ) ;
}
}
}
/**
* bch2_bset_fix_invalidated_key ( ) - given an existing key @ k that has been
* modified , fix any auxiliary search tree by remaking all the nodes in the
* auxiliary search tree that @ k corresponds to
*/
2018-08-12 02:12:05 +03:00
void bch2_bset_fix_invalidated_key ( struct btree * b , struct bkey_packed * k )
2017-03-17 09:18:50 +03:00
{
2018-08-12 02:12:05 +03:00
struct bset_tree * t = bch2_bkey_to_bset_inlined ( b , k ) ;
2017-03-17 09:18:50 +03:00
switch ( bset_aux_tree_type ( t ) ) {
case BSET_NO_AUX_TREE :
break ;
case BSET_RO_AUX_TREE :
ro_aux_tree_fix_invalidated_key ( b , t , k ) ;
break ;
case BSET_RW_AUX_TREE :
rw_aux_tree_fix_invalidated_key ( b , t , k ) ;
break ;
}
}
static void bch2_bset_fix_lookup_table ( struct btree * b ,
struct bset_tree * t ,
struct bkey_packed * _where ,
unsigned clobber_u64s ,
unsigned new_u64s )
{
int shift = new_u64s - clobber_u64s ;
unsigned l , j , where = __btree_node_key_to_offset ( b , _where ) ;
EBUG_ON ( bset_has_ro_aux_tree ( t ) ) ;
if ( ! bset_has_rw_aux_tree ( t ) )
return ;
2018-08-06 05:34:03 +03:00
/* returns first entry >= where */
2017-03-17 09:18:50 +03:00
l = rw_aux_tree_bsearch ( b , t , where ) ;
if ( ! l ) /* never delete first entry */
l + + ;
else if ( l < t - > size & &
where < t - > end_offset & &
rw_aux_tree ( b , t ) [ l ] . offset = = where )
rw_aux_tree_set ( b , t , l + + , _where ) ;
/* l now > where */
for ( j = l ;
j < t - > size & &
rw_aux_tree ( b , t ) [ j ] . offset < where + clobber_u64s ;
j + + )
;
if ( j < t - > size & &
rw_aux_tree ( b , t ) [ j ] . offset + shift = =
rw_aux_tree ( b , t ) [ l - 1 ] . offset )
j + + ;
memmove ( & rw_aux_tree ( b , t ) [ l ] ,
& rw_aux_tree ( b , t ) [ j ] ,
( void * ) & rw_aux_tree ( b , t ) [ t - > size ] -
( void * ) & rw_aux_tree ( b , t ) [ j ] ) ;
t - > size - = j - l ;
for ( j = l ; j < t - > size ; j + + )
rw_aux_tree ( b , t ) [ j ] . offset + = shift ;
EBUG_ON ( l < t - > size & &
rw_aux_tree ( b , t ) [ l ] . offset = =
rw_aux_tree ( b , t ) [ l - 1 ] . offset ) ;
if ( t - > size < bset_rw_tree_capacity ( b , t ) & &
( l < t - > size
? rw_aux_tree ( b , t ) [ l ] . offset
: t - > end_offset ) -
rw_aux_tree ( b , t ) [ l - 1 ] . offset >
L1_CACHE_BYTES / sizeof ( u64 ) ) {
struct bkey_packed * start = rw_aux_to_bkey ( b , t , l - 1 ) ;
struct bkey_packed * end = l < t - > size
? rw_aux_to_bkey ( b , t , l )
: btree_bkey_last ( b , t ) ;
struct bkey_packed * k = start ;
while ( 1 ) {
2021-03-25 03:22:51 +03:00
k = bkey_next ( k ) ;
2017-03-17 09:18:50 +03:00
if ( k = = end )
break ;
if ( ( void * ) k - ( void * ) start > = L1_CACHE_BYTES ) {
memmove ( & rw_aux_tree ( b , t ) [ l + 1 ] ,
& rw_aux_tree ( b , t ) [ l ] ,
( void * ) & rw_aux_tree ( b , t ) [ t - > size ] -
( void * ) & rw_aux_tree ( b , t ) [ l ] ) ;
t - > size + + ;
rw_aux_tree_set ( b , t , l , k ) ;
break ;
}
}
}
bch2_bset_verify_rw_aux_tree ( b , t ) ;
bset_aux_tree_verify ( b ) ;
}
void bch2_bset_insert ( struct btree * b ,
struct btree_node_iter * iter ,
struct bkey_packed * where ,
struct bkey_i * insert ,
unsigned clobber_u64s )
{
struct bkey_format * f = & b - > format ;
struct bset_tree * t = bset_tree_last ( b ) ;
struct bkey_packed packed , * src = bkey_to_packed ( insert ) ;
bch2_bset_verify_rw_aux_tree ( b , t ) ;
2016-07-22 06:05:06 +03:00
bch2_verify_insert_pos ( b , where , bkey_to_packed ( insert ) , clobber_u64s ) ;
2017-03-17 09:18:50 +03:00
if ( bch2_bkey_pack_key ( & packed , & insert - > k , f ) )
src = & packed ;
2021-02-20 07:41:40 +03:00
if ( ! bkey_deleted ( & insert - > k ) )
2017-03-17 09:18:50 +03:00
btree_keys_account_key_add ( & b - > nr , t - b - > set , src ) ;
if ( src - > u64s ! = clobber_u64s ) {
u64 * src_p = where - > _data + clobber_u64s ;
u64 * dst_p = where - > _data + src - > u64s ;
EBUG_ON ( ( int ) le16_to_cpu ( bset ( b , t ) - > u64s ) <
( int ) clobber_u64s - src - > u64s ) ;
memmove_u64s ( dst_p , src_p , btree_bkey_last ( b , t ) - > _data - src_p ) ;
le16_add_cpu ( & bset ( b , t ) - > u64s , src - > u64s - clobber_u64s ) ;
set_btree_bset_end ( b , t ) ;
}
memcpy_u64s ( where , src ,
bkeyp_key_u64s ( f , src ) ) ;
memcpy_u64s ( bkeyp_val ( f , where ) , & insert - > v ,
bkeyp_val_u64s ( f , src ) ) ;
2020-01-31 04:26:08 +03:00
if ( src - > u64s ! = clobber_u64s )
bch2_bset_fix_lookup_table ( b , t , where , clobber_u64s , src - > u64s ) ;
2017-03-17 09:18:50 +03:00
bch2_verify_btree_nr_keys ( b ) ;
}
void bch2_bset_delete ( struct btree * b ,
struct bkey_packed * where ,
unsigned clobber_u64s )
{
struct bset_tree * t = bset_tree_last ( b ) ;
u64 * src_p = where - > _data + clobber_u64s ;
u64 * dst_p = where - > _data ;
bch2_bset_verify_rw_aux_tree ( b , t ) ;
EBUG_ON ( le16_to_cpu ( bset ( b , t ) - > u64s ) < clobber_u64s ) ;
memmove_u64s_down ( dst_p , src_p , btree_bkey_last ( b , t ) - > _data - src_p ) ;
le16_add_cpu ( & bset ( b , t ) - > u64s , - clobber_u64s ) ;
set_btree_bset_end ( b , t ) ;
bch2_bset_fix_lookup_table ( b , t , where , clobber_u64s , 0 ) ;
}
/* Lookup */
__flatten
static struct bkey_packed * bset_search_write_set ( const struct btree * b ,
struct bset_tree * t ,
2021-03-28 04:00:26 +03:00
struct bpos * search )
2017-03-17 09:18:50 +03:00
{
unsigned l = 0 , r = t - > size ;
while ( l + 1 ! = r ) {
unsigned m = ( l + r ) > > 1 ;
2021-03-05 00:20:16 +03:00
if ( bpos_cmp ( rw_aux_tree ( b , t ) [ m ] . k , * search ) < 0 )
2017-03-17 09:18:50 +03:00
l = m ;
else
r = m ;
}
return rw_aux_to_bkey ( b , t , l ) ;
}
2019-11-07 23:14:10 +03:00
static inline void prefetch_four_cachelines ( void * p )
{
2021-05-22 01:45:38 +03:00
# if CONFIG_X86_64
asm ( " prefetcht0 (-127 + 64 * 0)(%0); "
" prefetcht0 (-127 + 64 * 1)(%0); "
" prefetcht0 (-127 + 64 * 2)(%0); "
" prefetcht0 (-127 + 64 * 3)(%0); "
2019-11-07 23:14:10 +03:00
:
: " r " ( p + 127 ) ) ;
# else
prefetch ( p + L1_CACHE_BYTES * 0 ) ;
prefetch ( p + L1_CACHE_BYTES * 1 ) ;
prefetch ( p + L1_CACHE_BYTES * 2 ) ;
prefetch ( p + L1_CACHE_BYTES * 3 ) ;
# endif
}
2019-10-23 21:56:20 +03:00
static inline bool bkey_mantissa_bits_dropped ( const struct btree * b ,
const struct bkey_float * f ,
unsigned idx )
{
# if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
unsigned key_bits_start = b - > format . key_u64s * 64 - b - > nr_key_bits ;
return f - > exponent > key_bits_start ;
# else
unsigned key_bits_end = high_bit_offset + b - > nr_key_bits ;
2019-11-07 03:40:09 +03:00
return f - > exponent + BKEY_MANTISSA_BITS < key_bits_end ;
2019-10-23 21:56:20 +03:00
# endif
}
2017-03-17 09:18:50 +03:00
__flatten
static struct bkey_packed * bset_search_tree ( const struct btree * b ,
2020-11-03 03:15:18 +03:00
const struct bset_tree * t ,
const struct bpos * search ,
2017-03-17 09:18:50 +03:00
const struct bkey_packed * packed_search )
{
struct ro_aux_tree * base = ro_aux_tree_base ( b , t ) ;
2019-11-07 23:14:10 +03:00
struct bkey_float * f ;
2019-10-23 21:56:20 +03:00
struct bkey_packed * k ;
unsigned inorder , n = 1 , l , r ;
int cmp ;
2017-03-17 09:18:50 +03:00
2019-11-07 23:14:10 +03:00
do {
if ( likely ( n < < 4 < t - > size ) )
2019-11-07 03:40:09 +03:00
prefetch ( & base - > f [ n < < 4 ] ) ;
2017-03-17 09:18:50 +03:00
2019-11-07 03:40:09 +03:00
f = & base - > f [ n ] ;
2019-10-23 21:56:20 +03:00
if ( unlikely ( f - > exponent > = BFLOAT_FAILED ) )
goto slowpath ;
2019-11-07 03:40:09 +03:00
l = f - > mantissa ;
2019-10-23 21:56:20 +03:00
r = bkey_mantissa ( packed_search , f , n ) ;
if ( unlikely ( l = = r ) & & bkey_mantissa_bits_dropped ( b , f , n ) )
goto slowpath ;
n = n * 2 + ( l < r ) ;
continue ;
slowpath :
k = tree_to_bkey ( b , t , n ) ;
cmp = bkey_cmp_p_or_unp ( b , k , packed_search , search ) ;
if ( ! cmp )
return k ;
n = n * 2 + ( cmp < 0 ) ;
2017-03-17 09:18:50 +03:00
} while ( n < t - > size ) ;
inorder = __eytzinger1_to_inorder ( n > > 1 , t - > size , t - > extra ) ;
/*
* n would have been the node we recursed to - the low bit tells us if
* we recursed left or recursed right .
*/
2019-11-07 03:40:09 +03:00
if ( likely ( ! ( n & 1 ) ) ) {
- - inorder ;
if ( unlikely ( ! inorder ) )
2017-03-17 09:18:50 +03:00
return btree_bkey_first ( b , t ) ;
2019-11-07 03:40:09 +03:00
f = & base - > f [ eytzinger1_prev ( n > > 1 , t - > size ) ] ;
2017-03-17 09:18:50 +03:00
}
2019-11-07 03:40:09 +03:00
return cacheline_to_bkey ( b , t , inorder , f - > key_offset ) ;
2017-03-17 09:18:50 +03:00
}
2019-11-07 23:14:10 +03:00
static __always_inline __flatten
struct bkey_packed * __bch2_bset_search ( struct btree * b ,
2017-03-17 09:18:50 +03:00
struct bset_tree * t ,
2018-08-21 23:30:14 +03:00
struct bpos * search ,
const struct bkey_packed * lossy_packed_search )
2017-03-17 09:18:50 +03:00
{
/*
* First , we search for a cacheline , then lastly we do a linear search
* within that cacheline .
*
* To search for the cacheline , there ' s three different possibilities :
* * The set is too small to have a search tree , so we just do a linear
* search over the whole set .
* * The set is the one we ' re currently inserting into ; keeping a full
* auxiliary search tree up to date would be too expensive , so we
* use a much simpler lookup table to do a binary search -
* bset_search_write_set ( ) .
* * Or we use the auxiliary search tree we constructed earlier -
* bset_search_tree ( )
*/
switch ( bset_aux_tree_type ( t ) ) {
case BSET_NO_AUX_TREE :
2019-11-07 23:14:10 +03:00
return btree_bkey_first ( b , t ) ;
2017-03-17 09:18:50 +03:00
case BSET_RW_AUX_TREE :
2021-03-28 04:00:26 +03:00
return bset_search_write_set ( b , t , search ) ;
2017-03-17 09:18:50 +03:00
case BSET_RO_AUX_TREE :
2019-11-07 23:14:10 +03:00
return bset_search_tree ( b , t , search , lossy_packed_search ) ;
default :
unreachable ( ) ;
2017-03-17 09:18:50 +03:00
}
2019-11-07 23:14:10 +03:00
}
2017-03-17 09:18:50 +03:00
2019-11-07 23:14:10 +03:00
static __always_inline __flatten
struct bkey_packed * bch2_bset_search_linear ( struct btree * b ,
struct bset_tree * t ,
struct bpos * search ,
struct bkey_packed * packed_search ,
const struct bkey_packed * lossy_packed_search ,
struct bkey_packed * m )
{
2017-03-17 09:18:50 +03:00
if ( lossy_packed_search )
while ( m ! = btree_bkey_last ( b , t ) & &
2020-01-07 06:25:09 +03:00
bkey_iter_cmp_p_or_unp ( b , m ,
lossy_packed_search , search ) < 0 )
2021-03-25 03:22:51 +03:00
m = bkey_next ( m ) ;
2017-03-17 09:18:50 +03:00
if ( ! packed_search )
while ( m ! = btree_bkey_last ( b , t ) & &
2020-01-07 06:25:09 +03:00
bkey_iter_pos_cmp ( b , m , search ) < 0 )
2021-03-25 03:22:51 +03:00
m = bkey_next ( m ) ;
2017-03-17 09:18:50 +03:00
2020-11-03 02:20:44 +03:00
if ( bch2_expensive_debug_checks ) {
2017-03-17 09:18:50 +03:00
struct bkey_packed * prev = bch2_bkey_prev_all ( b , t , m ) ;
BUG_ON ( prev & &
2020-01-07 06:25:09 +03:00
bkey_iter_cmp_p_or_unp ( b , prev ,
packed_search , search ) > = 0 ) ;
2017-03-17 09:18:50 +03:00
}
return m ;
}
/* Btree node iterator */
2018-08-21 23:30:14 +03:00
static inline void __bch2_btree_node_iter_push ( struct btree_node_iter * iter ,
struct btree * b ,
const struct bkey_packed * k ,
const struct bkey_packed * end )
{
if ( k ! = end ) {
struct btree_node_iter_set * pos ;
btree_node_iter_for_each ( iter , pos )
;
BUG_ON ( pos > = iter - > data + ARRAY_SIZE ( iter - > data ) ) ;
* pos = ( struct btree_node_iter_set ) {
__btree_node_key_to_offset ( b , k ) ,
__btree_node_key_to_offset ( b , end )
} ;
}
}
2017-03-17 09:18:50 +03:00
void bch2_btree_node_iter_push ( struct btree_node_iter * iter ,
struct btree * b ,
const struct bkey_packed * k ,
const struct bkey_packed * end )
{
__bch2_btree_node_iter_push ( iter , b , k , end ) ;
bch2_btree_node_iter_sort ( iter , b ) ;
}
noinline __flatten __attribute__ ( ( cold ) )
static void btree_node_iter_init_pack_failed ( struct btree_node_iter * iter ,
2018-08-21 23:30:14 +03:00
struct btree * b , struct bpos * search )
2017-03-17 09:18:50 +03:00
{
2021-03-28 04:00:26 +03:00
struct bkey_packed * k ;
2017-03-17 09:18:50 +03:00
2018-08-21 23:30:14 +03:00
trace_bkey_pack_pos_fail ( search ) ;
2017-03-17 09:18:50 +03:00
2021-03-28 04:00:26 +03:00
bch2_btree_node_iter_init_from_start ( iter , b ) ;
2017-03-17 09:18:50 +03:00
2021-03-28 04:00:26 +03:00
while ( ( k = bch2_btree_node_iter_peek ( iter , b ) ) & &
bkey_iter_pos_cmp ( b , k , search ) < 0 )
bch2_btree_node_iter_advance ( iter , b ) ;
2017-03-17 09:18:50 +03:00
}
/**
* bch_btree_node_iter_init - initialize a btree node iterator , starting from a
* given position
*
* Main entry point to the lookup code for individual btree nodes :
*
* NOTE :
*
* When you don ' t filter out deleted keys , btree nodes _do_ contain duplicate
* keys . This doesn ' t matter for most code , but it does matter for lookups .
*
* Some adjacent keys with a string of equal keys :
* i j k k k k l m
*
* If you search for k , the lookup code isn ' t guaranteed to return you any
* specific k . The lookup code is conceptually doing a binary search and
* iterating backwards is very expensive so if the pivot happens to land at the
* last k that ' s what you ' ll get .
*
* This works out ok , but it ' s something to be aware of :
*
* - For non extents , we guarantee that the live key comes last - see
* btree_node_iter_cmp ( ) , keys_out_of_order ( ) . So the duplicates you don ' t
* see will only be deleted keys you don ' t care about .
*
* - For extents , deleted keys sort last ( see the comment at the top of this
* file ) . But when you ' re searching for extents , you actually want the first
* key strictly greater than your search key - an extent that compares equal
* to the search key is going to have 0 sectors after the search key .
*
* But this does mean that we can ' t just search for
bcachefs: Start using bpos.snapshot field
This patch starts treating the bpos.snapshot field like part of the key
in the btree code:
* bpos_successor() and bpos_predecessor() now include the snapshot field
* Keys in btrees that will be using snapshots (extents, inodes, dirents
and xattrs) now always have their snapshot field set to U32_MAX
The btree iterator code gets a new flag, BTREE_ITER_ALL_SNAPSHOTS, that
determines whether we're iterating over keys in all snapshots or not -
internally, this controlls whether bkey_(successor|predecessor)
increment/decrement the snapshot field, or only the higher bits of the
key.
We add a new member to struct btree_iter, iter->snapshot: when
BTREE_ITER_ALL_SNAPSHOTS is not set, iter->pos.snapshot should always
equal iter->snapshot, which will be 0 for btrees that don't use
snapshots, and alsways U32_MAX for btrees that will use snapshots
(until we enable snapshot creation).
This patch also introduces a new metadata version number, and compat
code for reading from/writing to older versions - this isn't a forced
upgrade (yet).
Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2021-03-25 01:02:16 +03:00
* bpos_successor ( start_of_range ) to get the first extent that overlaps with
2017-03-17 09:18:50 +03:00
* the range we want - if we ' re unlucky and there ' s an extent that ends
* exactly where we searched , then there could be a deleted key at the same
* position and we ' d get that when we search instead of the preceding extent
* we needed .
*
* So we ' ve got to search for start_of_range , then after the lookup iterate
* past any extents that compare equal to the position we searched for .
*/
2019-10-24 02:50:01 +03:00
__flatten
2017-03-17 09:18:50 +03:00
void bch2_btree_node_iter_init ( struct btree_node_iter * iter ,
2018-08-21 23:30:14 +03:00
struct btree * b , struct bpos * search )
2017-03-17 09:18:50 +03:00
{
struct bkey_packed p , * packed_search = NULL ;
2019-10-24 02:50:01 +03:00
struct btree_node_iter_set * pos = iter - > data ;
2019-11-07 23:14:10 +03:00
struct bkey_packed * k [ MAX_BSETS ] ;
unsigned i ;
2017-03-17 09:18:50 +03:00
2021-03-05 00:20:16 +03:00
EBUG_ON ( bpos_cmp ( * search , b - > data - > min_key ) < 0 ) ;
2021-04-04 04:54:14 +03:00
EBUG_ON ( bpos_cmp ( * search , b - > data - > max_key ) > 0 ) ;
2017-03-17 09:18:50 +03:00
bset_aux_tree_verify ( b ) ;
2016-07-22 06:05:06 +03:00
memset ( iter , 0 , sizeof ( * iter ) ) ;
2017-03-17 09:18:50 +03:00
2018-08-21 23:30:14 +03:00
switch ( bch2_bkey_pack_pos_lossy ( & p , * search , b ) ) {
2017-03-17 09:18:50 +03:00
case BKEY_PACK_POS_EXACT :
packed_search = & p ;
break ;
case BKEY_PACK_POS_SMALLER :
packed_search = NULL ;
break ;
case BKEY_PACK_POS_FAIL :
2018-08-21 23:30:14 +03:00
btree_node_iter_init_pack_failed ( iter , b , search ) ;
2017-03-17 09:18:50 +03:00
return ;
}
2019-11-07 23:14:10 +03:00
for ( i = 0 ; i < b - > nsets ; i + + ) {
k [ i ] = __bch2_bset_search ( b , b - > set + i , search , & p ) ;
prefetch_four_cachelines ( k [ i ] ) ;
}
for ( i = 0 ; i < b - > nsets ; i + + ) {
struct bset_tree * t = b - > set + i ;
2019-10-24 02:50:01 +03:00
struct bkey_packed * end = btree_bkey_last ( b , t ) ;
2019-11-07 23:14:10 +03:00
k [ i ] = bch2_bset_search_linear ( b , t , search ,
packed_search , & p , k [ i ] ) ;
if ( k [ i ] ! = end )
2019-10-24 02:50:01 +03:00
* pos + + = ( struct btree_node_iter_set ) {
2019-11-07 23:14:10 +03:00
__btree_node_key_to_offset ( b , k [ i ] ) ,
2019-10-24 02:50:01 +03:00
__btree_node_key_to_offset ( b , end )
} ;
}
2017-03-17 09:18:50 +03:00
bch2_btree_node_iter_sort ( iter , b ) ;
}
void bch2_btree_node_iter_init_from_start ( struct btree_node_iter * iter ,
2016-07-22 06:05:06 +03:00
struct btree * b )
2017-03-17 09:18:50 +03:00
{
struct bset_tree * t ;
2016-07-22 06:05:06 +03:00
memset ( iter , 0 , sizeof ( * iter ) ) ;
2017-03-17 09:18:50 +03:00
for_each_bset ( b , t )
__bch2_btree_node_iter_push ( iter , b ,
btree_bkey_first ( b , t ) ,
btree_bkey_last ( b , t ) ) ;
bch2_btree_node_iter_sort ( iter , b ) ;
}
struct bkey_packed * bch2_btree_node_iter_bset_pos ( struct btree_node_iter * iter ,
struct btree * b ,
struct bset_tree * t )
{
struct btree_node_iter_set * set ;
btree_node_iter_for_each ( iter , set )
if ( set - > end = = t - > end_offset )
return __btree_node_offset_to_key ( b , set - > k ) ;
return btree_bkey_last ( b , t ) ;
}
static inline bool btree_node_iter_sort_two ( struct btree_node_iter * iter ,
struct btree * b ,
unsigned first )
{
bool ret ;
2016-07-22 06:05:06 +03:00
if ( ( ret = ( btree_node_iter_cmp ( b ,
2017-03-17 09:18:50 +03:00
iter - > data [ first ] ,
iter - > data [ first + 1 ] ) > 0 ) ) )
swap ( iter - > data [ first ] , iter - > data [ first + 1 ] ) ;
return ret ;
}
void bch2_btree_node_iter_sort ( struct btree_node_iter * iter ,
struct btree * b )
{
/* unrolled bubble sort: */
if ( ! __btree_node_iter_set_end ( iter , 2 ) ) {
btree_node_iter_sort_two ( iter , b , 0 ) ;
btree_node_iter_sort_two ( iter , b , 1 ) ;
}
if ( ! __btree_node_iter_set_end ( iter , 1 ) )
btree_node_iter_sort_two ( iter , b , 0 ) ;
}
void bch2_btree_node_iter_set_drop ( struct btree_node_iter * iter ,
struct btree_node_iter_set * set )
{
struct btree_node_iter_set * last =
iter - > data + ARRAY_SIZE ( iter - > data ) - 1 ;
memmove ( & set [ 0 ] , & set [ 1 ] , ( void * ) last - ( void * ) set ) ;
* last = ( struct btree_node_iter_set ) { 0 , 0 } ;
}
static inline void __bch2_btree_node_iter_advance ( struct btree_node_iter * iter ,
struct btree * b )
{
iter - > data - > k + = __bch2_btree_node_iter_peek_all ( iter , b ) - > u64s ;
EBUG_ON ( iter - > data - > k > iter - > data - > end ) ;
if ( unlikely ( __btree_node_iter_set_end ( iter , 0 ) ) ) {
bch2_btree_node_iter_set_drop ( iter , iter - > data ) ;
return ;
}
if ( __btree_node_iter_set_end ( iter , 1 ) )
return ;
if ( ! btree_node_iter_sort_two ( iter , b , 0 ) )
return ;
if ( __btree_node_iter_set_end ( iter , 2 ) )
return ;
btree_node_iter_sort_two ( iter , b , 1 ) ;
}
void bch2_btree_node_iter_advance ( struct btree_node_iter * iter ,
struct btree * b )
{
2020-11-03 02:20:44 +03:00
if ( bch2_expensive_debug_checks ) {
2019-03-28 08:51:47 +03:00
bch2_btree_node_iter_verify ( iter , b ) ;
bch2_btree_node_iter_next_check ( iter , b ) ;
}
2016-07-22 06:05:06 +03:00
__bch2_btree_node_iter_advance ( iter , b ) ;
2017-03-17 09:18:50 +03:00
}
/*
* Expensive :
*/
2019-08-19 20:43:01 +03:00
struct bkey_packed * bch2_btree_node_iter_prev_all ( struct btree_node_iter * iter ,
struct btree * b )
2017-03-17 09:18:50 +03:00
{
struct bkey_packed * k , * prev = NULL ;
struct btree_node_iter_set * set ;
struct bset_tree * t ;
2018-11-04 03:04:54 +03:00
unsigned end = 0 ;
2017-03-17 09:18:50 +03:00
2020-11-03 02:20:44 +03:00
if ( bch2_expensive_debug_checks )
2020-02-19 00:17:55 +03:00
bch2_btree_node_iter_verify ( iter , b ) ;
2017-03-17 09:18:50 +03:00
for_each_bset ( b , t ) {
2019-08-19 20:43:01 +03:00
k = bch2_bkey_prev_all ( b , t ,
bch2_btree_node_iter_bset_pos ( iter , b , t ) ) ;
2017-03-17 09:18:50 +03:00
if ( k & &
2018-08-21 23:30:14 +03:00
( ! prev | | bkey_iter_cmp ( b , k , prev ) > 0 ) ) {
2017-03-17 09:18:50 +03:00
prev = k ;
end = t - > end_offset ;
}
}
if ( ! prev )
2019-08-19 20:43:01 +03:00
return NULL ;
2017-03-17 09:18:50 +03:00
/*
* We ' re manually memmoving instead of just calling sort ( ) to ensure the
* prev we picked ends up in slot 0 - sort won ' t necessarily put it
* there because of duplicate deleted keys :
*/
btree_node_iter_for_each ( iter , set )
if ( set - > end = = end )
goto found ;
BUG_ON ( set ! = & iter - > data [ __btree_node_iter_used ( iter ) ] ) ;
found :
BUG_ON ( set > = iter - > data + ARRAY_SIZE ( iter - > data ) ) ;
memmove ( & iter - > data [ 1 ] ,
& iter - > data [ 0 ] ,
( void * ) set - ( void * ) & iter - > data [ 0 ] ) ;
iter - > data [ 0 ] . k = __btree_node_key_to_offset ( b , prev ) ;
iter - > data [ 0 ] . end = end ;
2020-11-03 02:20:44 +03:00
if ( bch2_expensive_debug_checks )
2020-02-19 00:17:55 +03:00
bch2_btree_node_iter_verify ( iter , b ) ;
2019-08-19 20:43:01 +03:00
return prev ;
}
2017-03-17 09:18:50 +03:00
2021-02-20 07:41:40 +03:00
struct bkey_packed * bch2_btree_node_iter_prev ( struct btree_node_iter * iter ,
struct btree * b )
2019-08-19 20:43:01 +03:00
{
struct bkey_packed * prev ;
do {
prev = bch2_btree_node_iter_prev_all ( iter , b ) ;
2021-02-20 07:41:40 +03:00
} while ( prev & & bkey_deleted ( prev ) ) ;
2017-03-17 09:18:50 +03:00
return prev ;
}
struct bkey_s_c bch2_btree_node_iter_peek_unpack ( struct btree_node_iter * iter ,
struct btree * b ,
struct bkey * u )
{
struct bkey_packed * k = bch2_btree_node_iter_peek ( iter , b ) ;
return k ? bkey_disassemble ( b , k , u ) : bkey_s_c_null ;
}
/* Mergesort */
void bch2_btree_keys_stats ( struct btree * b , struct bset_stats * stats )
{
struct bset_tree * t ;
for_each_bset ( b , t ) {
enum bset_aux_tree_type type = bset_aux_tree_type ( t ) ;
size_t j ;
stats - > sets [ type ] . nr + + ;
stats - > sets [ type ] . bytes + = le16_to_cpu ( bset ( b , t ) - > u64s ) *
sizeof ( u64 ) ;
if ( bset_has_ro_aux_tree ( t ) ) {
stats - > floats + = t - > size - 1 ;
for ( j = 1 ; j < t - > size ; j + + )
2019-10-23 21:56:20 +03:00
stats - > failed + =
bkey_float ( b , t , j ) - > exponent = =
BFLOAT_FAILED ;
2017-03-17 09:18:50 +03:00
}
}
}
2018-11-09 09:24:07 +03:00
void bch2_bfloat_to_text ( struct printbuf * out , struct btree * b ,
struct bkey_packed * k )
2017-03-17 09:18:50 +03:00
{
struct bset_tree * t = bch2_bkey_to_bset ( b , k ) ;
2019-11-07 00:37:29 +03:00
struct bkey uk ;
2018-11-01 23:02:02 +03:00
unsigned j , inorder ;
2017-03-17 09:18:50 +03:00
2018-11-09 09:24:07 +03:00
if ( out - > pos ! = out - > end )
* out - > pos = ' \0 ' ;
2017-03-17 09:18:50 +03:00
if ( ! bset_has_ro_aux_tree ( t ) )
2018-11-09 09:24:07 +03:00
return ;
2017-03-17 09:18:50 +03:00
2018-11-01 23:02:02 +03:00
inorder = bkey_to_cacheline ( b , t , k ) ;
if ( ! inorder | | inorder > = t - > size )
2018-11-09 09:24:07 +03:00
return ;
2018-11-01 23:02:02 +03:00
j = __inorder_to_eytzinger1 ( inorder , t - > size , t - > extra ) ;
if ( k ! = tree_to_bkey ( b , t , j ) )
2018-11-09 09:24:07 +03:00
return ;
2018-11-01 23:02:02 +03:00
switch ( bkey_float ( b , t , j ) - > exponent ) {
2019-10-23 21:56:20 +03:00
case BFLOAT_FAILED :
2018-11-01 23:02:02 +03:00
uk = bkey_unpack_key ( b , k ) ;
2018-11-09 09:24:07 +03:00
pr_buf ( out ,
" failed unpacked at depth %u \n "
2021-03-04 23:20:22 +03:00
" \t " ,
ilog2 ( j ) ) ;
bch2_bpos_to_text ( out , uk . p ) ;
pr_buf ( out , " \n " ) ;
2018-11-09 09:24:07 +03:00
break ;
2018-11-01 23:02:02 +03:00
}
2017-03-17 09:18:50 +03:00
}