2019-11-15 15:52:28 -05:00
// SPDX-License-Identifier: GPL-2.0
# include "bcachefs.h"
# include "btree_update.h"
# include "btree_update_interior.h"
# include "buckets.h"
# include "debug.h"
# include "extents.h"
# include "extent_update.h"
/*
* This counts the number of iterators to the alloc & ec btrees we ' ll need
* inserting / removing this extent :
*/
static unsigned bch2_bkey_nr_alloc_ptrs ( struct bkey_s_c k )
{
struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c ( k ) ;
const union bch_extent_entry * entry ;
unsigned ret = 0 ;
bkey_extent_entry_for_each ( ptrs , entry ) {
switch ( __extent_entry_type ( entry ) ) {
case BCH_EXTENT_ENTRY_ptr :
case BCH_EXTENT_ENTRY_stripe_ptr :
ret + + ;
}
}
return ret ;
}
static int count_iters_for_insert ( struct btree_trans * trans ,
struct bkey_s_c k ,
unsigned offset ,
struct bpos * end ,
unsigned * nr_iters ,
2020-04-24 17:57:59 -04:00
unsigned max_iters )
2019-11-15 15:52:28 -05:00
{
2020-04-24 17:57:59 -04:00
int ret = 0 , ret2 = 0 ;
2019-11-15 15:52:28 -05:00
2020-03-21 14:08:01 -04:00
if ( * nr_iters > = max_iters ) {
* end = bpos_min ( * end , k . k - > p ) ;
ret = 1 ;
}
2019-12-30 14:37:25 -05:00
2019-11-15 15:52:28 -05:00
switch ( k . k - > type ) {
case KEY_TYPE_extent :
case KEY_TYPE_reflink_v :
* nr_iters + = bch2_bkey_nr_alloc_ptrs ( k ) ;
if ( * nr_iters > = max_iters ) {
* end = bpos_min ( * end , k . k - > p ) ;
ret = 1 ;
}
break ;
case KEY_TYPE_reflink_p : {
struct bkey_s_c_reflink_p p = bkey_s_c_to_reflink_p ( k ) ;
u64 idx = le64_to_cpu ( p . v - > idx ) ;
unsigned sectors = bpos_min ( * end , p . k - > p ) . offset -
bkey_start_offset ( p . k ) ;
struct btree_iter * iter ;
struct bkey_s_c r_k ;
for_each_btree_key ( trans , iter ,
2021-02-20 19:27:37 -05:00
BTREE_ID_reflink , POS ( 0 , idx + offset ) ,
2020-04-24 17:57:59 -04:00
BTREE_ITER_SLOTS , r_k , ret2 ) {
2019-11-15 15:52:28 -05:00
if ( bkey_cmp ( bkey_start_pos ( r_k . k ) ,
POS ( 0 , idx + sectors ) ) > = 0 )
break ;
2020-04-24 17:57:59 -04:00
/* extent_update_to_keys(), for the reflink_v update */
* nr_iters + = 1 ;
2019-11-15 15:52:28 -05:00
* nr_iters + = 1 + bch2_bkey_nr_alloc_ptrs ( r_k ) ;
if ( * nr_iters > = max_iters ) {
struct bpos pos = bkey_start_pos ( k . k ) ;
2020-05-14 21:45:08 -04:00
pos . offset + = min_t ( u64 , k . k - > size ,
r_k . k - > p . offset - idx ) ;
2019-11-15 15:52:28 -05:00
* end = bpos_min ( * end , pos ) ;
ret = 1 ;
break ;
}
}
bch2_trans_iter_put ( trans , iter ) ;
break ;
}
}
2020-04-24 17:57:59 -04:00
return ret2 ? : ret ;
2019-11-15 15:52:28 -05:00
}
# define EXTENT_ITERS_MAX (BTREE_ITER_MAX / 3)
2021-08-24 21:30:06 -04:00
int bch2_extent_atomic_end ( struct btree_trans * trans ,
struct btree_iter * iter ,
2019-11-15 15:52:28 -05:00
struct bkey_i * insert ,
struct bpos * end )
{
2021-02-10 16:13:57 -05:00
struct btree_iter * copy ;
struct bkey_s_c k ;
unsigned nr_iters = 0 ;
2019-11-15 15:52:28 -05:00
int ret ;
2021-06-14 18:16:10 -04:00
ret = bch2_btree_iter_traverse ( iter ) ;
if ( ret )
return ret ;
2021-02-10 16:13:57 -05:00
* end = insert - > k . p ;
2019-11-15 15:52:28 -05:00
2020-04-24 17:57:59 -04:00
/* extent_update_to_keys(): */
nr_iters + = 1 ;
2019-11-15 15:52:28 -05:00
ret = count_iters_for_insert ( trans , bkey_i_to_s_c ( insert ) , 0 , end ,
2020-04-24 17:57:59 -04:00
& nr_iters , EXTENT_ITERS_MAX / 2 ) ;
2019-11-15 15:52:28 -05:00
if ( ret < 0 )
return ret ;
2021-02-10 16:13:57 -05:00
copy = bch2_trans_copy_iter ( trans , iter ) ;
for_each_btree_key_continue ( copy , 0 , k , ret ) {
2019-11-15 15:52:28 -05:00
unsigned offset = 0 ;
if ( bkey_cmp ( bkey_start_pos ( k . k ) , * end ) > = 0 )
break ;
if ( bkey_cmp ( bkey_start_pos ( & insert - > k ) ,
bkey_start_pos ( k . k ) ) > 0 )
offset = bkey_start_offset ( & insert - > k ) -
bkey_start_offset ( k . k ) ;
2020-04-24 17:57:59 -04:00
/* extent_handle_overwrites(): */
switch ( bch2_extent_overlap ( & insert - > k , k . k ) ) {
case BCH_EXTENT_OVERLAP_ALL :
case BCH_EXTENT_OVERLAP_FRONT :
nr_iters + = 1 ;
break ;
case BCH_EXTENT_OVERLAP_BACK :
case BCH_EXTENT_OVERLAP_MIDDLE :
nr_iters + = 2 ;
break ;
}
2019-11-15 15:52:28 -05:00
ret = count_iters_for_insert ( trans , k , offset , end ,
2020-04-24 17:57:59 -04:00
& nr_iters , EXTENT_ITERS_MAX ) ;
2019-11-15 15:52:28 -05:00
if ( ret )
break ;
}
2021-02-10 16:13:57 -05:00
bch2_trans_iter_put ( trans , copy ) ;
2019-11-15 15:52:28 -05:00
return ret < 0 ? ret : 0 ;
}
2021-08-24 21:30:06 -04:00
int bch2_extent_trim_atomic ( struct btree_trans * trans ,
struct btree_iter * iter ,
struct bkey_i * k )
2019-11-15 15:52:28 -05:00
{
struct bpos end ;
int ret ;
2021-08-24 21:30:06 -04:00
ret = bch2_extent_atomic_end ( trans , iter , k , & end ) ;
2019-11-15 15:52:28 -05:00
if ( ret )
return ret ;
bch2_cut_back ( end , k ) ;
return 0 ;
}