2017-03-16 22:18:50 -08:00
// SPDX-License-Identifier: GPL-2.0
# include "bcachefs.h"
2024-04-13 17:49:23 -04:00
# include "alloc_foreground.h"
2019-03-29 19:49:17 -04:00
# include "btree_gc.h"
2017-03-16 22:18:50 -08:00
# include "btree_io.h"
# include "btree_iter.h"
2023-08-05 16:08:44 -04:00
# include "btree_journal_iter.h"
2019-03-07 19:46:10 -05:00
# include "btree_key_cache.h"
2023-08-05 12:55:08 -04:00
# include "btree_update_interior.h"
bcachefs: Btree write buffer
This adds a new method of doing btree updates - a straight write buffer,
implemented as a flat fixed size array.
This is only useful when we don't need to read from the btree in order
to do the update, and when reading is infrequent - perfect for the LRU
btree.
This will make LRU btree updates fast enough that we'll be able to use
it for persistently indexing buckets by fragmentation, which will be a
massive boost to copygc performance.
Changes:
- A new btree_insert_type enum, for btree_insert_entries. Specifies
btree, btree key cache, or btree write buffer.
- bch2_trans_update_buffered(): updates via the btree write buffer
don't need a btree path, so we need a new update path.
- Transaction commit path changes:
The update to the btree write buffer both mutates global, and can
fail if there isn't currently room. Therefore we do all write buffer
updates in the transaction all at once, and also if it fails we have
to revert filesystem usage counter changes.
If there isn't room we flush the write buffer in the transaction
commit error path and retry.
- A new persistent option, for specifying the number of entries in the
write buffer.
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2023-01-04 00:00:50 -05:00
# include "btree_write_buffer.h"
2018-11-05 02:31:48 -05:00
# include "buckets.h"
2023-11-09 14:22:46 -05:00
# include "disk_accounting.h"
2022-07-17 23:06:38 -04:00
# include "errcode.h"
2018-07-17 13:50:15 -04:00
# include "error.h"
2017-03-16 22:18:50 -08:00
# include "journal.h"
2023-12-21 00:16:32 -05:00
# include "journal_io.h"
2017-03-16 22:18:50 -08:00
# include "journal_reclaim.h"
2018-11-07 17:48:32 -05:00
# include "replicas.h"
2023-08-16 16:54:33 -04:00
# include "snapshot.h"
2017-03-16 22:18:50 -08:00
2019-10-28 19:35:13 -04:00
# include <linux/prefetch.h>
2022-12-20 11:13:19 -05:00
2024-04-13 17:49:23 -04:00
static const char * const trans_commit_flags_strs [ ] = {
# define x(n, ...) #n,
BCH_TRANS_COMMIT_FLAGS ( )
# undef x
NULL
} ;
void bch2_trans_commit_flags_to_text ( struct printbuf * out , enum bch_trans_commit_flags flags )
{
enum bch_watermark watermark = flags & BCH_WATERMARK_MASK ;
prt_printf ( out , " watermark=%s " , bch2_watermarks [ watermark ] ) ;
flags > > = BCH_WATERMARK_BITS ;
if ( flags ) {
prt_char ( out , ' ' ) ;
bch2_prt_bitflags ( out , trans_commit_flags_strs , flags ) ;
}
}
2022-11-23 18:46:03 -05:00
static void verify_update_old_key ( struct btree_trans * trans , struct btree_insert_entry * i )
{
# ifdef CONFIG_BCACHEFS_DEBUG
struct bch_fs * c = trans - > c ;
struct bkey u ;
2023-12-10 16:10:24 -05:00
struct bkey_s_c k = bch2_btree_path_peek_slot_exact ( trans - > paths + i - > path , & u ) ;
2022-11-23 18:46:03 -05:00
if ( unlikely ( trans - > journal_replay_not_finished ) ) {
struct bkey_i * j_k =
bch2_journal_keys_peek_slot ( c , i - > btree_id , i - > level , i - > k - > k . p ) ;
if ( j_k )
k = bkey_i_to_s_c ( j_k ) ;
}
2023-02-13 18:21:40 -05:00
u = * k . k ;
u . needs_whiteout = i - > old_k . needs_whiteout ;
2022-11-23 18:46:03 -05:00
2023-02-13 18:21:40 -05:00
BUG_ON ( memcmp ( & i - > old_k , & u , sizeof ( struct bkey ) ) ) ;
2022-11-23 18:46:03 -05:00
BUG_ON ( i - > old_v ! = k . v ) ;
# endif
}
2023-12-10 16:10:24 -05:00
static inline struct btree_path_level * insert_l ( struct btree_trans * trans , struct btree_insert_entry * i )
2021-08-30 16:08:34 -04:00
{
2023-12-10 16:10:24 -05:00
return ( trans - > paths + i - > path ) - > l + i - > level ;
2021-08-30 16:08:34 -04:00
}
2019-09-07 14:16:00 -04:00
static inline bool same_leaf_as_prev ( struct btree_trans * trans ,
2019-12-31 19:37:10 -05:00
struct btree_insert_entry * i )
2019-09-07 14:16:00 -04:00
{
2021-06-07 14:54:56 -04:00
return i ! = trans - > updates & &
2023-12-10 16:10:24 -05:00
insert_l ( trans , & i [ 0 ] ) - > b = = insert_l ( trans , & i [ - 1 ] ) - > b ;
2019-09-07 14:16:00 -04:00
}
2021-08-27 20:55:44 -04:00
static inline bool same_leaf_as_next ( struct btree_trans * trans ,
struct btree_insert_entry * i )
{
return i + 1 < trans - > updates + trans - > nr_updates & &
2023-12-10 16:10:24 -05:00
insert_l ( trans , & i [ 0 ] ) - > b = = insert_l ( trans , & i [ 1 ] ) - > b ;
2021-08-27 20:55:44 -04:00
}
2022-10-09 05:04:38 -04:00
inline void bch2_btree_node_prep_for_write ( struct btree_trans * trans ,
struct btree_path * path ,
struct btree * b )
2019-03-15 17:11:58 -04:00
{
2021-07-10 23:22:06 -04:00
struct bch_fs * c = trans - > c ;
2019-09-21 15:29:34 -04:00
if ( unlikely ( btree_node_just_written ( b ) ) & &
2019-03-15 17:11:58 -04:00
bch2_btree_post_write_cleanup ( c , b ) )
2021-08-30 14:36:03 -04:00
bch2_trans_node_reinit_iter ( trans , b ) ;
2019-03-15 17:11:58 -04:00
/*
* If the last bset has been written , or if it ' s gotten too big - start
* a new bset to insert into :
*/
if ( want_new_bset ( c , b ) )
2021-08-30 14:36:03 -04:00
bch2_btree_init_next ( trans , b ) ;
2019-03-15 17:11:58 -04:00
}
2023-11-06 19:49:47 -05:00
static noinline int trans_lock_write_fail ( struct btree_trans * trans , struct btree_insert_entry * i )
{
while ( - - i > = trans - > updates ) {
if ( same_leaf_as_prev ( trans , i ) )
continue ;
2023-12-10 16:10:24 -05:00
bch2_btree_node_unlock_write ( trans , trans - > paths + i - > path , insert_l ( trans , i ) - > b ) ;
2023-11-06 19:49:47 -05:00
}
trace_and_count ( trans - > c , trans_restart_would_deadlock_write , trans ) ;
return btree_trans_restart ( trans , BCH_ERR_transaction_restart_would_deadlock_write ) ;
}
static inline int bch2_trans_lock_write ( struct btree_trans * trans )
{
EBUG_ON ( trans - > write_locked ) ;
trans_for_each_update ( trans , i ) {
if ( same_leaf_as_prev ( trans , i ) )
continue ;
2023-12-10 16:10:24 -05:00
if ( bch2_btree_node_lock_write ( trans , trans - > paths + i - > path , & insert_l ( trans , i ) - > b - > c ) )
2023-11-06 19:49:47 -05:00
return trans_lock_write_fail ( trans , i ) ;
if ( ! i - > cached )
2023-12-10 16:10:24 -05:00
bch2_btree_node_prep_for_write ( trans , trans - > paths + i - > path , insert_l ( trans , i ) - > b ) ;
2023-11-06 19:49:47 -05:00
}
trans - > write_locked = true ;
return 0 ;
}
static inline void bch2_trans_unlock_write ( struct btree_trans * trans )
{
if ( likely ( trans - > write_locked ) ) {
trans_for_each_update ( trans , i )
2024-06-08 15:20:53 -04:00
if ( btree_node_locked_type ( trans - > paths + i - > path , i - > level ) = =
BTREE_NODE_WRITE_LOCKED )
2023-12-10 16:10:24 -05:00
bch2_btree_node_unlock_write_inlined ( trans ,
trans - > paths + i - > path , insert_l ( trans , i ) - > b ) ;
2023-11-06 19:49:47 -05:00
trans - > write_locked = false ;
}
}
2017-03-16 22:18:50 -08:00
/* Inserting into a given leaf node (last stage of insert): */
/* Handle overwrites and do insert, for non extents: */
2021-08-24 21:30:06 -04:00
bool bch2_btree_bset_insert_key ( struct btree_trans * trans ,
2021-08-30 15:18:31 -04:00
struct btree_path * path ,
2017-03-16 22:18:50 -08:00
struct btree * b ,
struct btree_node_iter * node_iter ,
struct bkey_i * insert )
{
struct bkey_packed * k ;
2020-01-30 20:26:08 -05:00
unsigned clobber_u64s = 0 , new_u64s = 0 ;
2017-03-16 22:18:50 -08:00
EBUG_ON ( btree_node_just_written ( b ) ) ;
EBUG_ON ( bset_written ( b , btree_bset_last ( b ) ) ) ;
EBUG_ON ( bkey_deleted ( & insert - > k ) & & bkey_val_u64s ( & insert - > k ) ) ;
2022-11-24 03:12:22 -05:00
EBUG_ON ( bpos_lt ( insert - > k . p , b - > data - > min_key ) ) ;
EBUG_ON ( bpos_gt ( insert - > k . p , b - > data - > max_key ) ) ;
2024-01-16 13:29:59 -05:00
EBUG_ON ( insert - > k . u64s > bch2_btree_keys_u64s_remaining ( b ) ) ;
2023-08-21 19:57:34 -04:00
EBUG_ON ( ! b - > c . level & & ! bpos_eq ( insert - > k . p , path - > pos ) ) ;
2017-03-16 22:18:50 -08:00
k = bch2_btree_node_iter_peek_all ( node_iter , b ) ;
2020-11-07 12:31:20 -05:00
if ( k & & bkey_cmp_left_packed ( b , k , & insert - > k . p ) )
2020-01-06 23:43:04 -05:00
k = NULL ;
2017-03-16 22:18:50 -08:00
2020-01-06 23:43:04 -05:00
/* @k is the key being overwritten/deleted, if any: */
2021-02-19 23:41:40 -05:00
EBUG_ON ( k & & bkey_deleted ( k ) ) ;
2017-03-16 22:18:50 -08:00
2020-01-30 20:26:08 -05:00
/* Deleting, but not found? nothing to do: */
2021-02-19 23:41:40 -05:00
if ( bkey_deleted ( & insert - > k ) & & ! k )
2020-01-30 20:26:08 -05:00
return false ;
2021-02-19 23:41:40 -05:00
if ( bkey_deleted ( & insert - > k ) ) {
2020-01-06 23:43:04 -05:00
/* Deleting: */
btree_account_key_drop ( b , k ) ;
k - > type = KEY_TYPE_deleted ;
2019-11-29 14:08:51 -05:00
2020-01-30 20:26:08 -05:00
if ( k - > needs_whiteout )
2024-01-16 13:29:59 -05:00
push_whiteout ( b , insert - > k . p ) ;
2020-01-30 20:26:08 -05:00
k - > needs_whiteout = false ;
2017-03-16 22:18:50 -08:00
2020-01-06 23:43:04 -05:00
if ( k > = btree_bset_last ( b ) - > start ) {
clobber_u64s = k - > u64s ;
bch2_bset_delete ( b , k , clobber_u64s ) ;
2020-01-30 20:26:08 -05:00
goto fix_iter ;
2020-01-06 23:43:04 -05:00
} else {
2021-08-30 15:18:31 -04:00
bch2_btree_path_fix_key_modified ( trans , b , k ) ;
2020-01-06 23:43:04 -05:00
}
return true ;
}
2019-11-29 14:08:51 -05:00
2020-01-06 23:43:04 -05:00
if ( k ) {
/* Overwriting: */
btree_account_key_drop ( b , k ) ;
k - > type = KEY_TYPE_deleted ;
2020-01-05 18:20:23 -05:00
insert - > k . needs_whiteout = k - > needs_whiteout ;
k - > needs_whiteout = false ;
2019-11-29 14:08:51 -05:00
if ( k > = btree_bset_last ( b ) - > start ) {
clobber_u64s = k - > u64s ;
2017-03-16 22:18:50 -08:00
goto overwrite ;
2020-01-06 23:43:04 -05:00
} else {
2021-08-30 15:18:31 -04:00
bch2_btree_path_fix_key_modified ( trans , b , k ) ;
2017-03-16 22:18:50 -08:00
}
}
2018-08-11 19:12:05 -04:00
k = bch2_btree_node_iter_bset_pos ( node_iter , b , bset_tree_last ( b ) ) ;
2017-03-16 22:18:50 -08:00
overwrite :
bch2_bset_insert ( b , node_iter , k , insert , clobber_u64s ) ;
2020-01-30 20:26:08 -05:00
new_u64s = k - > u64s ;
fix_iter :
if ( clobber_u64s ! = new_u64s )
2021-08-30 15:18:31 -04:00
bch2_btree_node_iter_fix ( trans , path , b , node_iter , k ,
2020-01-30 20:26:08 -05:00
clobber_u64s , new_u64s ) ;
2017-03-16 22:18:50 -08:00
return true ;
}
2021-04-03 16:24:13 -04:00
static int __btree_node_flush ( struct journal * j , struct journal_entry_pin * pin ,
2017-03-16 22:18:50 -08:00
unsigned i , u64 seq )
{
struct bch_fs * c = container_of ( j , struct bch_fs , journal ) ;
struct btree_write * w = container_of ( pin , struct btree_write , journal ) ;
struct btree * b = container_of ( w , struct btree , writes [ i ] ) ;
2023-09-12 17:16:02 -04:00
struct btree_trans * trans = bch2_trans_get ( c ) ;
2024-05-23 11:19:26 +02:00
unsigned long old , new ;
2022-02-27 09:42:46 -05:00
unsigned idx = w - b - > writes ;
2017-03-16 22:18:50 -08:00
2023-09-12 17:16:02 -04:00
btree_node_lock_nopath_nofail ( trans , & b - > c , SIX_LOCK_read ) ;
2022-02-27 09:42:46 -05:00
2024-05-23 11:19:26 +02:00
old = READ_ONCE ( b - > flags ) ;
2022-02-27 09:42:46 -05:00
do {
2024-05-23 11:19:26 +02:00
new = old ;
2022-02-27 09:42:46 -05:00
if ( ! ( old & ( 1 < < BTREE_NODE_dirty ) ) | |
! ! ( old & ( 1 < < BTREE_NODE_write_idx ) ) ! = idx | |
w - > journal . seq ! = seq )
break ;
2022-11-17 16:03:15 -05:00
new & = ~ BTREE_WRITE_TYPE_MASK ;
new | = BTREE_WRITE_journal_reclaim ;
2022-02-27 09:42:46 -05:00
new | = 1 < < BTREE_NODE_need_write ;
2024-05-23 11:19:26 +02:00
} while ( ! try_cmpxchg ( & b - > flags , & old , new ) ) ;
2022-02-27 09:42:46 -05:00
btree_node_write_if_need ( c , b , SIX_LOCK_read ) ;
2020-06-06 12:28:01 -04:00
six_unlock_read ( & b - > c . lock ) ;
2022-08-21 14:29:43 -04:00
2023-09-12 17:16:02 -04:00
bch2_trans_put ( trans ) ;
2021-04-03 16:24:13 -04:00
return 0 ;
2017-03-16 22:18:50 -08:00
}
2023-03-07 07:28:20 -05:00
int bch2_btree_node_flush0 ( struct journal * j , struct journal_entry_pin * pin , u64 seq )
2017-03-16 22:18:50 -08:00
{
return __btree_node_flush ( j , pin , 0 , seq ) ;
}
2023-03-07 07:28:20 -05:00
int bch2_btree_node_flush1 ( struct journal * j , struct journal_entry_pin * pin , u64 seq )
2017-03-16 22:18:50 -08:00
{
return __btree_node_flush ( j , pin , 1 , seq ) ;
}
2020-02-08 19:06:31 -05:00
inline void bch2_btree_add_journal_pin ( struct bch_fs * c ,
struct btree * b , u64 seq )
{
struct btree_write * w = btree_current_write ( b ) ;
bch2_journal_pin_add ( & c - > journal , seq , & w - > journal ,
btree_node_write_idx ( b ) = = 0
2023-03-07 07:28:20 -05:00
? bch2_btree_node_flush0
: bch2_btree_node_flush1 ) ;
2020-02-08 19:06:31 -05:00
}
2017-03-16 22:18:50 -08:00
/**
2023-09-12 18:41:22 -04:00
* bch2_btree_insert_key_leaf ( ) - insert a key one key into a leaf node
* @ trans : btree transaction object
* @ path : path pointing to @ insert ' s pos
* @ insert : key to insert
* @ journal_seq : sequence number of journal reservation
2017-03-16 22:18:50 -08:00
*/
bcachefs: Btree write buffer
This adds a new method of doing btree updates - a straight write buffer,
implemented as a flat fixed size array.
This is only useful when we don't need to read from the btree in order
to do the update, and when reading is infrequent - perfect for the LRU
btree.
This will make LRU btree updates fast enough that we'll be able to use
it for persistently indexing buckets by fragmentation, which will be a
massive boost to copygc performance.
Changes:
- A new btree_insert_type enum, for btree_insert_entries. Specifies
btree, btree key cache, or btree write buffer.
- bch2_trans_update_buffered(): updates via the btree write buffer
don't need a btree path, so we need a new update path.
- Transaction commit path changes:
The update to the btree write buffer both mutates global, and can
fail if there isn't currently room. Therefore we do all write buffer
updates in the transaction all at once, and also if it fails we have
to revert filesystem usage counter changes.
If there isn't room we flush the write buffer in the transaction
commit error path and retry.
- A new persistent option, for specifying the number of entries in the
write buffer.
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2023-01-04 00:00:50 -05:00
inline void bch2_btree_insert_key_leaf ( struct btree_trans * trans ,
struct btree_path * path ,
struct bkey_i * insert ,
u64 journal_seq )
2017-03-16 22:18:50 -08:00
{
struct bch_fs * c = trans - > c ;
bcachefs: Btree write buffer
This adds a new method of doing btree updates - a straight write buffer,
implemented as a flat fixed size array.
This is only useful when we don't need to read from the btree in order
to do the update, and when reading is infrequent - perfect for the LRU
btree.
This will make LRU btree updates fast enough that we'll be able to use
it for persistently indexing buckets by fragmentation, which will be a
massive boost to copygc performance.
Changes:
- A new btree_insert_type enum, for btree_insert_entries. Specifies
btree, btree key cache, or btree write buffer.
- bch2_trans_update_buffered(): updates via the btree write buffer
don't need a btree path, so we need a new update path.
- Transaction commit path changes:
The update to the btree write buffer both mutates global, and can
fail if there isn't currently room. Therefore we do all write buffer
updates in the transaction all at once, and also if it fails we have
to revert filesystem usage counter changes.
If there isn't room we flush the write buffer in the transaction
commit error path and retry.
- A new persistent option, for specifying the number of entries in the
write buffer.
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2023-01-04 00:00:50 -05:00
struct btree * b = path_l ( path ) - > b ;
2019-10-19 19:03:23 -04:00
struct bset_tree * t = bset_tree_last ( b ) ;
2020-06-09 21:00:29 -04:00
struct bset * i = bset ( b , t ) ;
2019-10-19 19:03:23 -04:00
int old_u64s = bset_u64s ( t ) ;
2017-03-16 22:18:50 -08:00
int old_live_u64s = b - > nr . live_u64s ;
int live_u64s_added , u64s_added ;
bcachefs: Btree write buffer
This adds a new method of doing btree updates - a straight write buffer,
implemented as a flat fixed size array.
This is only useful when we don't need to read from the btree in order
to do the update, and when reading is infrequent - perfect for the LRU
btree.
This will make LRU btree updates fast enough that we'll be able to use
it for persistently indexing buckets by fragmentation, which will be a
massive boost to copygc performance.
Changes:
- A new btree_insert_type enum, for btree_insert_entries. Specifies
btree, btree key cache, or btree write buffer.
- bch2_trans_update_buffered(): updates via the btree write buffer
don't need a btree path, so we need a new update path.
- Transaction commit path changes:
The update to the btree write buffer both mutates global, and can
fail if there isn't currently room. Therefore we do all write buffer
updates in the transaction all at once, and also if it fails we have
to revert filesystem usage counter changes.
If there isn't room we flush the write buffer in the transaction
commit error path and retry.
- A new persistent option, for specifying the number of entries in the
write buffer.
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2023-01-04 00:00:50 -05:00
if ( unlikely ( ! bch2_btree_bset_insert_key ( trans , path , b ,
& path_l ( path ) - > iter , insert ) ) )
2022-03-11 18:16:42 -05:00
return ;
2020-06-09 21:00:29 -04:00
bcachefs: Btree write buffer
This adds a new method of doing btree updates - a straight write buffer,
implemented as a flat fixed size array.
This is only useful when we don't need to read from the btree in order
to do the update, and when reading is infrequent - perfect for the LRU
btree.
This will make LRU btree updates fast enough that we'll be able to use
it for persistently indexing buckets by fragmentation, which will be a
massive boost to copygc performance.
Changes:
- A new btree_insert_type enum, for btree_insert_entries. Specifies
btree, btree key cache, or btree write buffer.
- bch2_trans_update_buffered(): updates via the btree write buffer
don't need a btree path, so we need a new update path.
- Transaction commit path changes:
The update to the btree write buffer both mutates global, and can
fail if there isn't currently room. Therefore we do all write buffer
updates in the transaction all at once, and also if it fails we have
to revert filesystem usage counter changes.
If there isn't room we flush the write buffer in the transaction
commit error path and retry.
- A new persistent option, for specifying the number of entries in the
write buffer.
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2023-01-04 00:00:50 -05:00
i - > journal_seq = cpu_to_le64 ( max ( journal_seq , le64_to_cpu ( i - > journal_seq ) ) ) ;
2019-11-26 17:26:04 -05:00
bcachefs: Btree write buffer
This adds a new method of doing btree updates - a straight write buffer,
implemented as a flat fixed size array.
This is only useful when we don't need to read from the btree in order
to do the update, and when reading is infrequent - perfect for the LRU
btree.
This will make LRU btree updates fast enough that we'll be able to use
it for persistently indexing buckets by fragmentation, which will be a
massive boost to copygc performance.
Changes:
- A new btree_insert_type enum, for btree_insert_entries. Specifies
btree, btree key cache, or btree write buffer.
- bch2_trans_update_buffered(): updates via the btree write buffer
don't need a btree path, so we need a new update path.
- Transaction commit path changes:
The update to the btree write buffer both mutates global, and can
fail if there isn't currently room. Therefore we do all write buffer
updates in the transaction all at once, and also if it fails we have
to revert filesystem usage counter changes.
If there isn't room we flush the write buffer in the transaction
commit error path and retry.
- A new persistent option, for specifying the number of entries in the
write buffer.
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2023-01-04 00:00:50 -05:00
bch2_btree_add_journal_pin ( c , b , journal_seq ) ;
2020-06-09 21:00:29 -04:00
2023-07-09 15:13:30 -04:00
if ( unlikely ( ! btree_node_dirty ( b ) ) ) {
2023-11-26 17:05:02 -05:00
EBUG_ON ( test_bit ( BCH_FS_clean_shutdown , & c - > flags ) ) ;
2022-02-26 11:10:20 -05:00
set_btree_node_dirty_acct ( c , b ) ;
2023-07-09 15:13:30 -04:00
}
2017-03-16 22:18:50 -08:00
live_u64s_added = ( int ) b - > nr . live_u64s - old_live_u64s ;
2019-10-19 19:03:23 -04:00
u64s_added = ( int ) bset_u64s ( t ) - old_u64s ;
2017-03-16 22:18:50 -08:00
if ( b - > sib_u64s [ 0 ] ! = U16_MAX & & live_u64s_added < 0 )
b - > sib_u64s [ 0 ] = max ( 0 , ( int ) b - > sib_u64s [ 0 ] + live_u64s_added ) ;
if ( b - > sib_u64s [ 1 ] ! = U16_MAX & & live_u64s_added < 0 )
b - > sib_u64s [ 1 ] = max ( 0 , ( int ) b - > sib_u64s [ 1 ] + live_u64s_added ) ;
if ( u64s_added > live_u64s_added & &
bch2_maybe_compact_whiteouts ( c , b ) )
2021-08-30 14:36:03 -04:00
bch2_trans_node_reinit_iter ( trans , b ) ;
2017-03-16 22:18:50 -08:00
}
2019-03-07 19:46:10 -05:00
/* Cached btree updates: */
2019-03-15 17:11:58 -04:00
/* Normal update interface: */
2017-03-16 22:18:50 -08:00
2019-03-18 16:18:39 -04:00
static inline void btree_insert_entry_checks ( struct btree_trans * trans ,
2021-02-20 20:51:57 -05:00
struct btree_insert_entry * i )
2017-03-16 22:18:50 -08:00
{
2023-12-10 16:10:24 -05:00
struct btree_path * path = trans - > paths + i - > path ;
BUG_ON ( ! bpos_eq ( i - > k - > k . p , path - > pos ) ) ;
BUG_ON ( i - > cached ! = path - > cached ) ;
BUG_ON ( i - > level ! = path - > level ) ;
BUG_ON ( i - > btree_id ! = path - > btree_id ) ;
2021-03-16 00:42:25 -04:00
EBUG_ON ( ! i - > level & &
2023-11-03 18:30:08 -04:00
btree_type_has_snapshots ( i - > btree_id ) & &
2024-04-07 18:05:34 -04:00
! ( i - > flags & BTREE_UPDATE_internal_snapshot_node ) & &
2024-04-30 06:20:37 -04:00
test_bit ( JOURNAL_replay_done , & trans - > c - > journal . flags ) & &
2021-03-16 00:42:25 -04:00
i - > k - > k . p . snapshot & &
2024-03-22 16:29:23 -04:00
bch2_snapshot_is_internal_node ( trans - > c , i - > k - > k . p . snapshot ) > 0 ) ;
2019-03-15 17:11:58 -04:00
}
2017-03-16 22:18:50 -08:00
2022-11-01 03:37:53 -04:00
static __always_inline int bch2_trans_journal_res_get ( struct btree_trans * trans ,
2023-02-09 13:22:12 -05:00
unsigned flags )
2019-02-26 17:13:46 -05:00
{
2022-12-13 15:17:40 -05:00
return bch2_journal_res_get ( & trans - > c - > journal , & trans - > journal_res ,
2023-02-09 13:22:12 -05:00
trans - > journal_u64s , flags ) ;
2019-03-15 17:11:58 -04:00
}
2017-03-16 22:18:50 -08:00
2021-12-31 16:12:54 -05:00
# define JSET_ENTRY_LOG_U64s 4
static noinline void journal_transaction_name ( struct btree_trans * trans )
{
struct bch_fs * c = trans - > c ;
2022-06-05 15:29:00 -04:00
struct journal * j = & c - > journal ;
struct jset_entry * entry =
bch2_journal_add_entry ( j , & trans - > journal_res ,
BCH_JSET_ENTRY_log , 0 , 0 ,
JSET_ENTRY_LOG_U64s ) ;
struct jset_entry_log * l =
container_of ( entry , struct jset_entry_log , entry ) ;
strncpy ( l - > d , trans - > fn , JSET_ENTRY_LOG_U64s * sizeof ( u64 ) ) ;
2021-12-31 16:12:54 -05:00
}
2022-12-07 11:39:34 -05:00
static inline int btree_key_can_insert ( struct btree_trans * trans ,
struct btree * b , unsigned u64s )
2018-08-03 19:41:44 -04:00
{
2024-01-16 13:29:59 -05:00
if ( ! bch2_btree_node_insert_fits ( b , u64s ) )
2022-12-07 11:39:34 -05:00
return - BCH_ERR_btree_insert_btree_node_full ;
2018-08-03 19:41:44 -04:00
2022-12-07 11:39:34 -05:00
return 0 ;
2018-08-03 19:41:44 -04:00
}
2023-11-05 15:28:44 -05:00
noinline static int
btree_key_can_insert_cached_slowpath ( struct btree_trans * trans , unsigned flags ,
struct btree_path * path , unsigned new_u64s )
{
struct bkey_cached * ck = ( void * ) path - > l [ 0 ] . b ;
struct bkey_i * new_k ;
int ret ;
bch2_trans_unlock_write ( trans ) ;
bch2_trans_unlock ( trans ) ;
new_k = kmalloc ( new_u64s * sizeof ( u64 ) , GFP_KERNEL ) ;
if ( ! new_k ) {
2023-11-23 20:05:45 -05:00
bch_err ( trans - > c , " error allocating memory for key cache key, btree %s u64s %u " ,
2023-11-05 15:28:44 -05:00
bch2_btree_id_str ( path - > btree_id ) , new_u64s ) ;
return - BCH_ERR_ENOMEM_btree_key_cache_insert ;
}
ret = bch2_trans_relock ( trans ) ? :
bch2_trans_lock_write ( trans ) ;
if ( unlikely ( ret ) ) {
kfree ( new_k ) ;
return ret ;
}
memcpy ( new_k , ck - > k , ck - > u64s * sizeof ( u64 ) ) ;
trans_for_each_update ( trans , i )
if ( i - > old_v = = & ck - > k - > v )
i - > old_v = & new_k - > v ;
kfree ( ck - > k ) ;
ck - > u64s = new_u64s ;
ck - > k = new_k ;
return 0 ;
}
2023-02-09 13:22:12 -05:00
static int btree_key_can_insert_cached ( struct btree_trans * trans , unsigned flags ,
struct btree_path * path , unsigned u64s )
2019-03-07 19:46:10 -05:00
{
2021-12-30 20:14:52 -05:00
struct bch_fs * c = trans - > c ;
2021-08-30 15:18:31 -04:00
struct bkey_cached * ck = ( void * ) path - > l [ 0 ] . b ;
2022-10-17 07:32:57 -04:00
unsigned new_u64s ;
2019-03-07 19:46:10 -05:00
struct bkey_i * new_k ;
2024-04-12 21:07:05 -04:00
unsigned watermark = flags & BCH_WATERMARK_MASK ;
2019-03-07 19:46:10 -05:00
2021-08-30 15:18:31 -04:00
EBUG_ON ( path - > level ) ;
2019-03-07 19:46:10 -05:00
2024-04-12 21:07:05 -04:00
if ( watermark < BCH_WATERMARK_reclaim & &
! test_bit ( BKEY_CACHED_DIRTY , & ck - > flags ) & &
bch2_btree_key_cache_must_wait ( c ) )
2022-12-07 11:39:34 -05:00
return - BCH_ERR_btree_insert_need_journal_reclaim ;
2020-11-19 21:40:03 -05:00
2021-04-24 00:42:02 -04:00
/*
* bch2_varint_decode can read past the end of the buffer by at most 7
* bytes ( it won ' t be used ) :
*/
u64s + = 1 ;
2020-06-28 18:11:12 -04:00
if ( u64s < = ck - > u64s )
2022-12-07 11:39:34 -05:00
return 0 ;
2019-03-07 19:46:10 -05:00
2020-06-28 18:11:12 -04:00
new_u64s = roundup_pow_of_two ( u64s ) ;
2024-01-17 17:16:07 -05:00
new_k = krealloc ( ck - > k , new_u64s * sizeof ( u64 ) , GFP_NOWAIT | __GFP_NOWARN ) ;
2023-11-05 15:28:44 -05:00
if ( unlikely ( ! new_k ) )
return btree_key_can_insert_cached_slowpath ( trans , flags , path , new_u64s ) ;
2019-03-07 19:46:10 -05:00
2022-11-23 18:46:03 -05:00
trans_for_each_update ( trans , i )
if ( i - > old_v = = & ck - > k - > v )
i - > old_v = & new_k - > v ;
2019-03-07 19:46:10 -05:00
ck - > u64s = new_u64s ;
ck - > k = new_k ;
2022-10-17 07:32:57 -04:00
return 0 ;
2019-03-07 19:46:10 -05:00
}
2022-02-24 11:30:17 -05:00
/* Triggers: */
static int run_one_mem_trigger ( struct btree_trans * trans ,
struct btree_insert_entry * i ,
unsigned flags )
{
2022-11-23 18:46:03 -05:00
verify_update_old_key ( trans , i ) ;
2024-04-07 18:05:34 -04:00
if ( unlikely ( flags & BTREE_TRIGGER_norun ) )
2022-02-24 11:30:17 -05:00
return 0 ;
2024-05-29 19:37:29 -04:00
struct bkey_s_c old = { & i - > old_k , i - > old_v } ;
struct bkey_i * new = i - > k ;
const struct bkey_ops * old_ops = bch2_bkey_type_ops ( old . k - > type ) ;
const struct bkey_ops * new_ops = bch2_bkey_type_ops ( i - > k - > k . type ) ;
if ( old_ops - > trigger = = new_ops - > trigger )
return bch2_key_trigger ( trans , i - > btree_id , i - > level ,
2023-12-27 23:19:09 -05:00
old , bkey_i_to_s ( new ) ,
2024-04-07 18:05:34 -04:00
BTREE_TRIGGER_insert | BTREE_TRIGGER_overwrite | flags ) ;
2024-05-29 19:37:29 -04:00
else
return bch2_key_trigger_new ( trans , i - > btree_id , i - > level ,
2023-12-31 21:01:06 -05:00
bkey_i_to_s ( new ) , flags ) ? :
2024-05-29 19:37:29 -04:00
bch2_key_trigger_old ( trans , i - > btree_id , i - > level ,
2023-12-31 21:01:06 -05:00
old , flags ) ;
2022-02-24 11:30:17 -05:00
}
2022-02-24 11:02:58 -05:00
static int run_one_trans_trigger ( struct btree_trans * trans , struct btree_insert_entry * i ,
2022-03-30 23:39:48 -04:00
bool overwrite )
2022-02-24 11:30:17 -05:00
{
2024-05-29 19:37:29 -04:00
verify_update_old_key ( trans , i ) ;
if ( ( i - > flags & BTREE_TRIGGER_norun ) | |
! btree_node_type_has_trans_triggers ( i - > bkey_type ) )
return 0 ;
2022-02-24 11:02:58 -05:00
/*
* Transactional triggers create new btree_insert_entries , so we can ' t
* pass them a pointer to a btree_insert_entry , that memory is going to
* move :
*/
struct bkey old_k = i - > old_k ;
struct bkey_s_c old = { & old_k , i - > old_v } ;
2023-07-06 19:23:27 -04:00
const struct bkey_ops * old_ops = bch2_bkey_type_ops ( old . k - > type ) ;
const struct bkey_ops * new_ops = bch2_bkey_type_ops ( i - > k - > k . type ) ;
2024-04-07 18:05:34 -04:00
unsigned flags = i - > flags | BTREE_TRIGGER_transactional ;
2022-02-24 11:30:17 -05:00
2022-03-30 23:39:48 -04:00
if ( ! i - > insert_trigger_run & &
! i - > overwrite_trigger_run & &
2023-12-31 21:01:06 -05:00
old_ops - > trigger = = new_ops - > trigger ) {
2022-02-24 11:30:17 -05:00
i - > overwrite_trigger_run = true ;
2022-03-30 23:39:48 -04:00
i - > insert_trigger_run = true ;
2023-12-31 21:01:06 -05:00
return bch2_key_trigger ( trans , i - > btree_id , i - > level , old , bkey_i_to_s ( i - > k ) ,
2024-04-07 18:05:34 -04:00
BTREE_TRIGGER_insert |
BTREE_TRIGGER_overwrite | flags ) ? : 1 ;
2022-03-30 23:39:48 -04:00
} else if ( overwrite & & ! i - > overwrite_trigger_run ) {
i - > overwrite_trigger_run = true ;
2023-12-31 21:01:06 -05:00
return bch2_key_trigger_old ( trans , i - > btree_id , i - > level , old , flags ) ? : 1 ;
2022-03-30 23:39:48 -04:00
} else if ( ! overwrite & & ! i - > insert_trigger_run ) {
i - > insert_trigger_run = true ;
2023-12-31 21:01:06 -05:00
return bch2_key_trigger_new ( trans , i - > btree_id , i - > level , bkey_i_to_s ( i - > k ) , flags ) ? : 1 ;
2022-02-24 11:30:17 -05:00
} else {
2022-03-30 23:39:48 -04:00
return 0 ;
2022-02-24 11:30:17 -05:00
}
}
static int run_btree_triggers ( struct btree_trans * trans , enum btree_id btree_id ,
2024-04-11 17:47:42 -04:00
unsigned btree_id_start )
2022-02-24 11:30:17 -05:00
{
2024-05-29 19:37:29 -04:00
for ( int overwrite = 1 ; overwrite > = 0 ; - - overwrite ) {
bool trans_trigger_run ;
2022-02-24 11:30:17 -05:00
/*
* Running triggers will append more updates to the list of updates as
* we ' re walking it :
*/
do {
trans_trigger_run = false ;
2024-04-11 17:47:42 -04:00
for ( unsigned i = btree_id_start ;
i < trans - > nr_updates & & trans - > updates [ i ] . btree_id < = btree_id ;
2022-02-24 11:30:17 -05:00
i + + ) {
2024-04-11 17:47:42 -04:00
if ( trans - > updates [ i ] . btree_id ! = btree_id )
2022-01-06 01:20:41 -05:00
continue ;
2024-05-29 19:37:29 -04:00
int ret = run_one_trans_trigger ( trans , trans - > updates + i , overwrite ) ;
2022-02-24 11:30:17 -05:00
if ( ret < 0 )
return ret ;
if ( ret )
trans_trigger_run = true ;
}
} while ( trans_trigger_run ) ;
}
return 0 ;
}
static int bch2_trans_commit_run_triggers ( struct btree_trans * trans )
{
2024-04-11 17:47:42 -04:00
unsigned btree_id = 0 , btree_id_start = 0 ;
2022-02-24 11:30:17 -05:00
int ret = 0 ;
/*
*
* For a given btree , this algorithm runs insert triggers before
* overwrite triggers : this is so that when extents are being moved
* ( e . g . by FALLOCATE_FL_INSERT_RANGE ) , we don ' t drop references before
* they are re - added .
*/
for ( btree_id = 0 ; btree_id < BTREE_ID_NR ; btree_id + + ) {
2022-01-06 01:20:41 -05:00
if ( btree_id = = BTREE_ID_alloc )
continue ;
2024-04-11 17:47:42 -04:00
while ( btree_id_start < trans - > nr_updates & &
trans - > updates [ btree_id_start ] . btree_id < btree_id )
2022-02-24 11:30:17 -05:00
btree_id_start + + ;
ret = run_btree_triggers ( trans , btree_id , btree_id_start ) ;
if ( ret )
return ret ;
}
2024-04-11 17:47:42 -04:00
for ( unsigned idx = 0 ; idx < trans - > nr_updates ; idx + + ) {
struct btree_insert_entry * i = trans - > updates + idx ;
2022-01-06 01:20:41 -05:00
if ( i - > btree_id > BTREE_ID_alloc )
break ;
if ( i - > btree_id = = BTREE_ID_alloc ) {
2024-04-11 17:47:42 -04:00
ret = run_btree_triggers ( trans , BTREE_ID_alloc , idx ) ;
2022-01-06 01:20:41 -05:00
if ( ret )
return ret ;
break ;
}
}
2022-11-22 22:06:04 -05:00
# ifdef CONFIG_BCACHEFS_DEBUG
2022-02-24 11:30:17 -05:00
trans_for_each_update ( trans , i )
2024-04-07 18:05:34 -04:00
BUG_ON ( ! ( i - > flags & BTREE_TRIGGER_norun ) & &
2024-05-29 19:37:29 -04:00
btree_node_type_has_trans_triggers ( i - > bkey_type ) & &
2022-02-24 11:30:17 -05:00
( ! i - > insert_trigger_run | | ! i - > overwrite_trigger_run ) ) ;
2022-11-22 22:06:04 -05:00
# endif
2022-02-24 11:30:17 -05:00
return 0 ;
}
static noinline int bch2_trans_commit_run_gc_triggers ( struct btree_trans * trans )
2017-03-16 22:18:50 -08:00
{
2024-04-06 23:58:01 -04:00
trans_for_each_update ( trans , i )
2024-05-29 19:37:29 -04:00
if ( btree_node_type_has_triggers ( i - > bkey_type ) & &
2024-06-07 20:51:57 -04:00
gc_visited ( trans - > c , gc_pos_btree ( i - > btree_id , i - > level , i - > k - > k . p ) ) ) {
2024-04-07 18:05:34 -04:00
int ret = run_one_mem_trigger ( trans , i , i - > flags | BTREE_TRIGGER_gc ) ;
2022-01-04 22:32:09 -05:00
if ( ret )
2024-01-07 17:14:46 -05:00
return ret ;
2022-01-04 22:32:09 -05:00
}
2024-01-07 17:14:46 -05:00
return 0 ;
2019-10-19 19:03:23 -04:00
}
2019-09-07 14:16:00 -04:00
2023-11-09 14:22:46 -05:00
static struct bversion journal_pos_to_bversion ( struct journal_res * res , unsigned offset )
{
return ( struct bversion ) {
. hi = res - > seq > > 32 ,
. lo = ( res - > seq < < 32 ) | ( res - > offset + offset ) ,
} ;
}
2019-10-19 19:03:23 -04:00
static inline int
2023-02-09 13:22:12 -05:00
bch2_trans_commit_write_locked ( struct btree_trans * trans , unsigned flags ,
2021-06-04 15:18:10 -04:00
struct btree_insert_entry * * stopped_at ,
unsigned long trace_ip )
2019-10-19 19:03:23 -04:00
{
struct bch_fs * c = trans - > c ;
2021-02-03 21:51:56 -05:00
struct btree_trans_commit_hook * h ;
2019-12-31 19:37:10 -05:00
unsigned u64s = 0 ;
2023-11-09 14:22:46 -05:00
int ret = 0 ;
2019-03-15 18:20:46 -04:00
2024-04-09 20:14:21 -04:00
bch2_trans_verify_not_unlocked ( trans ) ;
bch2_trans_verify_not_in_restart ( trans ) ;
2017-03-16 22:18:50 -08:00
if ( race_fault ( ) ) {
2022-08-27 12:48:36 -04:00
trace_and_count ( c , trans_restart_fault_inject , trans , trace_ip ) ;
2022-07-17 23:06:38 -04:00
return btree_trans_restart_nounlock ( trans , BCH_ERR_transaction_restart_fault_inject ) ;
2017-03-16 22:18:50 -08:00
}
2018-08-03 19:41:44 -04:00
/*
* Check if the insert will fit in the leaf node with the write lock
* held , otherwise another thread could write the node changing the
* amount of space available :
*/
2019-02-26 17:13:46 -05:00
2019-10-28 19:35:13 -04:00
prefetch ( & trans - > c - > journal . flags ) ;
2019-03-11 14:59:58 -04:00
2021-06-07 14:54:56 -04:00
trans_for_each_update ( trans , i ) {
2019-10-28 19:35:13 -04:00
/* Multiple inserts might go to same leaf: */
2019-12-31 19:37:10 -05:00
if ( ! same_leaf_as_prev ( trans , i ) )
2019-10-28 19:35:13 -04:00
u64s = 0 ;
2019-03-11 14:59:58 -04:00
2019-10-28 19:35:13 -04:00
u64s + = i - > k - > k . u64s ;
2021-08-30 16:08:34 -04:00
ret = ! i - > cached
2023-12-10 16:10:24 -05:00
? btree_key_can_insert ( trans , insert_l ( trans , i ) - > b , u64s )
: btree_key_can_insert_cached ( trans , flags , trans - > paths + i - > path , u64s ) ;
2019-10-28 19:35:13 -04:00
if ( ret ) {
* stopped_at = i ;
return ret ;
2019-03-11 14:59:58 -04:00
}
2023-11-07 11:16:14 -05:00
i - > k - > k . needs_whiteout = false ;
2019-10-28 19:35:13 -04:00
}
2019-03-15 17:11:58 -04:00
/*
* Don ' t get journal reservation until after we know insert will
* succeed :
*/
2023-11-11 16:31:50 -05:00
if ( likely ( ! ( flags & BCH_TRANS_COMMIT_no_journal_res ) ) ) {
2019-10-19 19:03:23 -04:00
ret = bch2_trans_journal_res_get ( trans ,
2023-06-27 17:32:38 -04:00
( flags & BCH_WATERMARK_MASK ) |
2019-10-19 19:03:23 -04:00
JOURNAL_RES_GET_NONBLOCK ) ;
2019-05-15 09:49:46 -04:00
if ( ret )
2021-11-28 14:08:58 -05:00
return ret ;
2021-12-31 16:12:54 -05:00
if ( unlikely ( trans - > journal_transaction_names ) )
journal_transaction_name ( trans ) ;
2019-05-15 09:49:46 -04:00
}
2019-02-26 17:13:46 -05:00
2019-10-19 19:03:23 -04:00
/*
* Not allowed to fail after we ' ve gotten our journal reservation - we
* have to use it :
*/
2022-12-13 18:19:30 -05:00
if ( IS_ENABLED ( CONFIG_BCACHEFS_DEBUG ) & &
2023-11-11 16:31:50 -05:00
! ( flags & BCH_TRANS_COMMIT_no_journal_res ) ) {
2020-11-02 18:20:44 -05:00
if ( bch2_journal_seq_verify )
2021-06-07 14:54:56 -04:00
trans_for_each_update ( trans , i )
2017-03-16 22:18:50 -08:00
i - > k - > k . version . lo = trans - > journal_res . seq ;
2020-11-02 18:20:44 -05:00
else if ( bch2_inject_invalid_keys )
2021-06-07 14:54:56 -04:00
trans_for_each_update ( trans , i )
2017-03-16 22:18:50 -08:00
i - > k - > k . version = MAX_VERSION ;
}
2023-03-15 14:41:07 -04:00
h = trans - > hooks ;
while ( h ) {
ret = h - > fn ( trans , h ) ;
if ( ret )
2023-11-09 14:22:46 -05:00
return ret ;
2023-03-15 14:41:07 -04:00
h = h - > next ;
}
2023-11-09 14:22:46 -05:00
struct jset_entry * entry = trans - > journal_entries ;
if ( likely ( ! ( flags & BCH_TRANS_COMMIT_skip_accounting_apply ) ) ) {
percpu_down_read ( & c - > mark_lock ) ;
for ( entry = trans - > journal_entries ;
entry ! = ( void * ) ( ( u64 * ) trans - > journal_entries + trans - > journal_entries_u64s ) ;
entry = vstruct_next ( entry ) )
if ( jset_entry_is_key ( entry ) & & entry - > start - > k . type = = KEY_TYPE_accounting ) {
struct bkey_i_accounting * a = bkey_i_to_accounting ( entry - > start ) ;
a - > k . version = journal_pos_to_bversion ( & trans - > journal_res ,
( u64 * ) entry - ( u64 * ) trans - > journal_entries ) ;
BUG_ON ( bversion_zero ( a - > k . version ) ) ;
2024-02-11 22:48:05 -05:00
ret = bch2_accounting_mem_mod_locked ( trans , accounting_i_to_s_c ( a ) , false ) ;
2023-11-09 14:22:46 -05:00
if ( ret )
goto revert_fs_usage ;
}
percpu_up_read ( & c - > mark_lock ) ;
/* XXX: we only want to run this if deltas are nonzero */
bch2_trans_account_disk_usage_change ( trans ) ;
}
2019-09-22 18:49:16 -04:00
trans_for_each_update ( trans , i )
2024-05-29 19:37:29 -04:00
if ( btree_node_type_has_atomic_triggers ( i - > bkey_type ) ) {
2024-04-07 18:05:34 -04:00
ret = run_one_mem_trigger ( trans , i , BTREE_TRIGGER_atomic | i - > flags ) ;
2022-01-04 22:32:09 -05:00
if ( ret )
bcachefs: Btree write buffer
This adds a new method of doing btree updates - a straight write buffer,
implemented as a flat fixed size array.
This is only useful when we don't need to read from the btree in order
to do the update, and when reading is infrequent - perfect for the LRU
btree.
This will make LRU btree updates fast enough that we'll be able to use
it for persistently indexing buckets by fragmentation, which will be a
massive boost to copygc performance.
Changes:
- A new btree_insert_type enum, for btree_insert_entries. Specifies
btree, btree key cache, or btree write buffer.
- bch2_trans_update_buffered(): updates via the btree write buffer
don't need a btree path, so we need a new update path.
- Transaction commit path changes:
The update to the btree write buffer both mutates global, and can
fail if there isn't currently room. Therefore we do all write buffer
updates in the transaction all at once, and also if it fails we have
to revert filesystem usage counter changes.
If there isn't room we flush the write buffer in the transaction
commit error path and retry.
- A new persistent option, for specifying the number of entries in the
write buffer.
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2023-01-04 00:00:50 -05:00
goto fatal_err ;
2022-01-04 22:32:09 -05:00
}
2019-03-11 14:59:58 -04:00
2022-01-04 22:32:09 -05:00
if ( unlikely ( c - > gc_pos . phase ) ) {
2022-02-24 11:30:17 -05:00
ret = bch2_trans_commit_run_gc_triggers ( trans ) ;
2022-01-04 22:32:09 -05:00
if ( ret )
bcachefs: Btree write buffer
This adds a new method of doing btree updates - a straight write buffer,
implemented as a flat fixed size array.
This is only useful when we don't need to read from the btree in order
to do the update, and when reading is infrequent - perfect for the LRU
btree.
This will make LRU btree updates fast enough that we'll be able to use
it for persistently indexing buckets by fragmentation, which will be a
massive boost to copygc performance.
Changes:
- A new btree_insert_type enum, for btree_insert_entries. Specifies
btree, btree key cache, or btree write buffer.
- bch2_trans_update_buffered(): updates via the btree write buffer
don't need a btree path, so we need a new update path.
- Transaction commit path changes:
The update to the btree write buffer both mutates global, and can
fail if there isn't currently room. Therefore we do all write buffer
updates in the transaction all at once, and also if it fails we have
to revert filesystem usage counter changes.
If there isn't room we flush the write buffer in the transaction
commit error path and retry.
- A new persistent option, for specifying the number of entries in the
write buffer.
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2023-01-04 00:00:50 -05:00
goto fatal_err ;
2022-01-04 22:32:09 -05:00
}
2019-03-11 14:59:58 -04:00
2023-11-11 16:31:50 -05:00
if ( likely ( ! ( flags & BCH_TRANS_COMMIT_no_journal_res ) ) ) {
bcachefs: Btree write buffer
This adds a new method of doing btree updates - a straight write buffer,
implemented as a flat fixed size array.
This is only useful when we don't need to read from the btree in order
to do the update, and when reading is infrequent - perfect for the LRU
btree.
This will make LRU btree updates fast enough that we'll be able to use
it for persistently indexing buckets by fragmentation, which will be a
massive boost to copygc performance.
Changes:
- A new btree_insert_type enum, for btree_insert_entries. Specifies
btree, btree key cache, or btree write buffer.
- bch2_trans_update_buffered(): updates via the btree write buffer
don't need a btree path, so we need a new update path.
- Transaction commit path changes:
The update to the btree write buffer both mutates global, and can
fail if there isn't currently room. Therefore we do all write buffer
updates in the transaction all at once, and also if it fails we have
to revert filesystem usage counter changes.
If there isn't room we flush the write buffer in the transaction
commit error path and retry.
- A new persistent option, for specifying the number of entries in the
write buffer.
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2023-01-04 00:00:50 -05:00
struct journal * j = & c - > journal ;
struct jset_entry * entry ;
2022-06-05 15:32:57 -04:00
bcachefs: Btree write buffer
This adds a new method of doing btree updates - a straight write buffer,
implemented as a flat fixed size array.
This is only useful when we don't need to read from the btree in order
to do the update, and when reading is infrequent - perfect for the LRU
btree.
This will make LRU btree updates fast enough that we'll be able to use
it for persistently indexing buckets by fragmentation, which will be a
massive boost to copygc performance.
Changes:
- A new btree_insert_type enum, for btree_insert_entries. Specifies
btree, btree key cache, or btree write buffer.
- bch2_trans_update_buffered(): updates via the btree write buffer
don't need a btree path, so we need a new update path.
- Transaction commit path changes:
The update to the btree write buffer both mutates global, and can
fail if there isn't currently room. Therefore we do all write buffer
updates in the transaction all at once, and also if it fails we have
to revert filesystem usage counter changes.
If there isn't room we flush the write buffer in the transaction
commit error path and retry.
- A new persistent option, for specifying the number of entries in the
write buffer.
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2023-01-04 00:00:50 -05:00
trans_for_each_update ( trans , i ) {
2022-06-05 15:32:57 -04:00
if ( i - > key_cache_already_flushed )
continue ;
2024-04-07 18:05:34 -04:00
if ( i - > flags & BTREE_UPDATE_nojournal )
2022-06-05 15:32:57 -04:00
continue ;
2022-11-23 18:46:03 -05:00
verify_update_old_key ( trans , i ) ;
2022-06-05 15:32:57 -04:00
if ( trans - > journal_transaction_names ) {
entry = bch2_journal_add_entry ( j , & trans - > journal_res ,
BCH_JSET_ENTRY_overwrite ,
i - > btree_id , i - > level ,
i - > old_k . u64s ) ;
2023-10-31 18:05:22 -04:00
bkey_reassemble ( ( struct bkey_i * ) entry - > start ,
2022-06-05 15:32:57 -04:00
( struct bkey_s_c ) { & i - > old_k , i - > old_v } ) ;
}
entry = bch2_journal_add_entry ( j , & trans - > journal_res ,
BCH_JSET_ENTRY_btree_keys ,
i - > btree_id , i - > level ,
i - > k - > k . u64s ) ;
2023-10-31 18:05:22 -04:00
bkey_copy ( ( struct bkey_i * ) entry - > start , i - > k ) ;
2022-06-05 15:32:57 -04:00
}
2023-12-10 16:48:22 -05:00
memcpy_u64s_small ( journal_res_entry ( & c - > journal , & trans - > journal_res ) ,
trans - > journal_entries ,
trans - > journal_entries_u64s ) ;
trans - > journal_res . offset + = trans - > journal_entries_u64s ;
trans - > journal_res . u64s - = trans - > journal_entries_u64s ;
2022-06-05 15:32:57 -04:00
if ( trans - > journal_seq )
* trans - > journal_seq = trans - > journal_res . seq ;
}
trans_for_each_update ( trans , i ) {
2023-12-10 16:10:24 -05:00
struct btree_path * path = trans - > paths + i - > path ;
2024-06-08 15:20:53 -04:00
if ( ! i - > cached )
2023-12-10 16:10:24 -05:00
bch2_btree_insert_key_leaf ( trans , path , i - > k , trans - > journal_res . seq ) ;
2024-06-08 15:20:53 -04:00
else if ( ! i - > key_cache_already_flushed )
bcachefs: don't bump key cache journal seq on nojournal commits
fstest generic/388 occasionally reproduces corruptions where an
inode has extents beyond i_size. This is a deliberate crash and
recovery test, and the post crash+recovery characteristics are
usually the same: the inode exists on disk in an early (i.e. just
allocated) state based on the journal sequence number associated
with the inode. Subsequent inode updates exist in the journal at
higher sequence numbers, but the inode hadn't been written back
before the associated crash and the post-crash recovery processes a
set of journal sequence numbers that doesn't include updates to the
inode. In fact, the sequence with the most recent inode key update
always happens to be the sequence just before the front of the
journal processed by recovery.
This last bit is a significant hint that the problem relates to an
on-disk journal update of the front of the journal. The root cause
of this problem is basically that the inode is updated (multiple
times) in-core and in the key cache, each time bumping the key cache
sequence number used to control the cache flush. The cache flush
skips one or more times, bumping the associated key cache journal
pin to the key cache seq value. This has a side effect of holding
the inode in memory a bit longer than normal, which helps exacerbate
this problem, but is also unsafe in certain cases where the key
cache seq may have been updated by a transaction commit that didn't
journal the associated key.
For example, consider an inode that has been allocated, updated
several times in the key cache, journaled, but not yet written back.
At this stage, everything should be consistent if the fs happens to
crash because the latest update has been journal. Now consider a key
update via bch2_extent_update_i_size_sectors() that uses the
BTREE_UPDATE_NOJOURNAL flag. While this update may not change inode
state, it can have the side effect of bumping ck->seq in
bch2_btree_insert_key_cached(). In turn, if a subsequent key cache
flush skips due to seq not matching the former, the ck->journal pin
is updated to ck->seq even though the most recent key update was not
journaled. If this pin happens to reside at the front (tail) of the
journal, this means a subsequent journal write can update last_seq
to a value beyond that which includes the most recent update to the
inode. If this occurs and the fs happens to crash before the inode
happens to flush, recovery will see the latest last_seq, fail to
recover the inode and leave the inode in the inconsistent state
described above.
To avoid this problem, skip the key cache seq update on NOJOURNAL
commits, except on initial pin add. Pass the insert entry directly
to bch2_btree_insert_key_cached() to make the associated flag
available and be consistent with btree_insert_key_leaf().
Signed-off-by: Brian Foster <bfoster@redhat.com>
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2023-03-02 09:03:37 -05:00
bch2_btree_insert_key_cached ( trans , flags , i ) ;
2024-06-08 15:20:53 -04:00
else
2023-12-10 16:10:24 -05:00
bch2_btree_key_cache_drop ( trans , path ) ;
2022-06-05 15:32:57 -04:00
}
2021-11-28 14:08:58 -05:00
bcachefs: Btree write buffer
This adds a new method of doing btree updates - a straight write buffer,
implemented as a flat fixed size array.
This is only useful when we don't need to read from the btree in order
to do the update, and when reading is infrequent - perfect for the LRU
btree.
This will make LRU btree updates fast enough that we'll be able to use
it for persistently indexing buckets by fragmentation, which will be a
massive boost to copygc performance.
Changes:
- A new btree_insert_type enum, for btree_insert_entries. Specifies
btree, btree key cache, or btree write buffer.
- bch2_trans_update_buffered(): updates via the btree write buffer
don't need a btree path, so we need a new update path.
- Transaction commit path changes:
The update to the btree write buffer both mutates global, and can
fail if there isn't currently room. Therefore we do all write buffer
updates in the transaction all at once, and also if it fails we have
to revert filesystem usage counter changes.
If there isn't room we flush the write buffer in the transaction
commit error path and retry.
- A new persistent option, for specifying the number of entries in the
write buffer.
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2023-01-04 00:00:50 -05:00
return 0 ;
fatal_err :
2023-11-09 14:22:46 -05:00
bch2_fs_fatal_error ( c , " fatal error in transaction commit: %s " , bch2_err_str ( ret ) ) ;
percpu_down_read ( & c - > mark_lock ) ;
bcachefs: Btree write buffer
This adds a new method of doing btree updates - a straight write buffer,
implemented as a flat fixed size array.
This is only useful when we don't need to read from the btree in order
to do the update, and when reading is infrequent - perfect for the LRU
btree.
This will make LRU btree updates fast enough that we'll be able to use
it for persistently indexing buckets by fragmentation, which will be a
massive boost to copygc performance.
Changes:
- A new btree_insert_type enum, for btree_insert_entries. Specifies
btree, btree key cache, or btree write buffer.
- bch2_trans_update_buffered(): updates via the btree write buffer
don't need a btree path, so we need a new update path.
- Transaction commit path changes:
The update to the btree write buffer both mutates global, and can
fail if there isn't currently room. Therefore we do all write buffer
updates in the transaction all at once, and also if it fails we have
to revert filesystem usage counter changes.
If there isn't room we flush the write buffer in the transaction
commit error path and retry.
- A new persistent option, for specifying the number of entries in the
write buffer.
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
2023-01-04 00:00:50 -05:00
revert_fs_usage :
2023-11-09 14:22:46 -05:00
for ( struct jset_entry * entry2 = trans - > journal_entries ;
entry2 ! = entry ;
entry2 = vstruct_next ( entry2 ) )
if ( jset_entry_is_key ( entry2 ) & & entry2 - > start - > k . type = = KEY_TYPE_accounting ) {
struct bkey_s_accounting a = bkey_i_to_s_accounting ( entry2 - > start ) ;
bch2_accounting_neg ( a ) ;
2024-02-11 22:48:05 -05:00
bch2_accounting_mem_mod_locked ( trans , a . c , false ) ;
2023-11-09 14:22:46 -05:00
bch2_accounting_neg ( a ) ;
}
percpu_up_read ( & c - > mark_lock ) ;
2019-10-19 19:03:23 -04:00
return ret ;
}
2021-12-31 17:54:13 -05:00
static noinline void bch2_drop_overwrites_from_journal ( struct btree_trans * trans )
{
2023-12-27 20:59:01 -05:00
/*
* Accounting keys aren ' t deduped in the journal : we have to compare
* each individual update against what ' s in the btree to see if it has
* been applied yet , and accounting updates also don ' t overwrite ,
* they ' re deltas that accumulate .
*/
2021-12-31 17:54:13 -05:00
trans_for_each_update ( trans , i )
2023-12-27 20:59:01 -05:00
if ( i - > k - > k . type ! = KEY_TYPE_accounting )
bch2_journal_key_overwritten ( trans - > c , i - > btree_id , i - > level , i - > k - > k . p ) ;
2021-12-31 17:54:13 -05:00
}
2023-10-20 00:01:53 -04:00
static noinline int bch2_trans_commit_bkey_invalid ( struct btree_trans * trans ,
2024-05-08 18:40:42 -04:00
enum bch_validate_flags flags ,
2022-11-03 11:14:04 -04:00
struct btree_insert_entry * i ,
struct printbuf * err )
{
struct bch_fs * c = trans - > c ;
printbuf_reset ( err ) ;
2024-04-10 16:08:24 -04:00
prt_printf ( err , " invalid bkey on insert from %s -> %ps \n " ,
2022-11-03 11:14:04 -04:00
trans - > fn , ( void * ) i - > ip_allocated ) ;
printbuf_indent_add ( err , 2 ) ;
bch2_bkey_val_to_text ( err , c , bkey_i_to_s_c ( i - > k ) ) ;
prt_newline ( err ) ;
2023-10-20 00:01:53 -04:00
bch2_bkey_invalid ( c , bkey_i_to_s_c ( i - > k ) , i - > bkey_type , flags , err ) ;
2022-11-03 11:14:04 -04:00
bch2_print_string_as_lines ( KERN_ERR , err - > buf ) ;
bch2_inconsistent_error ( c ) ;
bch2_dump_trans_updates ( trans ) ;
return - EINVAL ;
}
2023-12-21 00:16:32 -05:00
static noinline int bch2_trans_commit_journal_entry_invalid ( struct btree_trans * trans ,
struct jset_entry * i )
{
struct bch_fs * c = trans - > c ;
struct printbuf buf = PRINTBUF ;
2024-04-10 16:08:24 -04:00
prt_printf ( & buf , " invalid bkey on insert from %s \n " , trans - > fn ) ;
2023-12-21 00:16:32 -05:00
printbuf_indent_add ( & buf , 2 ) ;
bch2_journal_entry_to_text ( & buf , c , i ) ;
prt_newline ( & buf ) ;
bch2_print_string_as_lines ( KERN_ERR , buf . buf ) ;
bch2_inconsistent_error ( c ) ;
bch2_dump_trans_updates ( trans ) ;
return - EINVAL ;
}
2023-11-09 13:19:00 -05:00
static int bch2_trans_commit_journal_pin_flush ( struct journal * j ,
struct journal_entry_pin * _pin , u64 seq )
{
return 0 ;
}
2019-10-19 19:03:23 -04:00
/*
* Get journal reservation , take write locks , and attempt to do btree update ( s ) :
*/
2023-02-09 13:22:12 -05:00
static inline int do_bch2_trans_commit ( struct btree_trans * trans , unsigned flags ,
2021-06-04 15:18:10 -04:00
struct btree_insert_entry * * stopped_at ,
unsigned long trace_ip )
2019-10-19 19:03:23 -04:00
{
2021-03-29 01:13:31 -04:00
struct bch_fs * c = trans - > c ;
2023-07-20 23:13:43 -04:00
int ret = 0 , u64s_delta = 0 ;
2019-10-19 19:03:23 -04:00
2024-04-11 17:47:42 -04:00
for ( unsigned idx = 0 ; idx < trans - > nr_updates ; idx + + ) {
struct btree_insert_entry * i = trans - > updates + idx ;
2022-02-24 11:02:58 -05:00
if ( i - > cached )
2021-08-27 20:55:44 -04:00
continue ;
2021-03-29 01:13:31 -04:00
2021-08-27 20:55:44 -04:00
u64s_delta + = ! bkey_deleted ( & i - > k - > k ) ? i - > k - > k . u64s : 0 ;
2022-02-24 11:02:58 -05:00
u64s_delta - = i - > old_btree_u64s ;
2021-08-27 20:55:44 -04:00
if ( ! same_leaf_as_next ( trans , i ) ) {
if ( u64s_delta < = 0 ) {
2023-12-15 15:21:40 -05:00
ret = bch2_foreground_maybe_merge ( trans , i - > path ,
2023-02-09 13:22:12 -05:00
i - > level , flags ) ;
2021-08-27 20:55:44 -04:00
if ( unlikely ( ret ) )
return ret ;
}
2021-03-29 01:13:31 -04:00
2021-08-27 20:55:44 -04:00
u64s_delta = 0 ;
2021-03-29 01:13:31 -04:00
}
}
2023-11-06 19:49:47 -05:00
ret = bch2_trans_lock_write ( trans ) ;
2021-09-07 21:25:32 -04:00
if ( unlikely ( ret ) )
return ret ;
2019-10-28 19:35:13 -04:00
2023-02-09 13:22:12 -05:00
ret = bch2_trans_commit_write_locked ( trans , flags , stopped_at , trace_ip ) ;
2019-10-28 19:35:13 -04:00
2021-12-25 20:07:00 -05:00
if ( ! ret & & unlikely ( trans - > journal_replay_not_finished ) )
2021-12-31 17:54:13 -05:00
bch2_drop_overwrites_from_journal ( trans ) ;
2023-11-06 19:49:47 -05:00
bch2_trans_unlock_write ( trans ) ;
2019-10-19 19:03:23 -04:00
2020-05-25 14:57:06 -04:00
if ( ! ret & & trans - > journal_pin )
2021-03-29 01:13:31 -04:00
bch2_journal_pin_add ( & c - > journal , trans - > journal_res . seq ,
2023-11-09 13:19:00 -05:00
trans - > journal_pin ,
bch2_trans_commit_journal_pin_flush ) ;
2020-05-25 14:57:06 -04:00
2019-10-19 19:03:23 -04:00
/*
* Drop journal reservation after dropping write locks , since dropping
* the journal reservation may kick off a journal write :
*/
2023-11-11 16:31:50 -05:00
if ( likely ( ! ( flags & BCH_TRANS_COMMIT_no_journal_res ) ) )
2023-11-08 22:00:00 -05:00
bch2_journal_res_put ( & c - > journal , & trans - > journal_res ) ;
2019-10-19 19:03:23 -04:00
2023-10-27 15:23:46 -04:00
return ret ;
2017-03-16 22:18:50 -08:00
}
2021-03-31 17:52:52 -04:00
static int journal_reclaim_wait_done ( struct bch_fs * c )
{
2021-04-14 22:15:55 -04:00
int ret = bch2_journal_error ( & c - > journal ) ? :
! bch2_btree_key_cache_must_wait ( c ) ;
2021-03-31 17:52:52 -04:00
if ( ! ret )
2021-04-14 22:15:55 -04:00
journal_reclaim_kick ( & c - > journal ) ;
2021-03-31 17:52:52 -04:00
return ret ;
}
2019-03-21 21:12:01 -04:00
static noinline
2023-02-09 13:22:12 -05:00
int bch2_trans_commit_error ( struct btree_trans * trans , unsigned flags ,
2019-03-21 21:12:01 -04:00
struct btree_insert_entry * i ,
2021-06-04 15:18:10 -04:00
int ret , unsigned long trace_ip )
2017-03-16 22:18:50 -08:00
{
struct bch_fs * c = trans - > c ;
2024-04-01 19:20:36 -04:00
enum bch_watermark watermark = flags & BCH_WATERMARK_MASK ;
2017-03-16 22:18:50 -08:00
2018-11-07 17:48:32 -05:00
switch ( ret ) {
2023-12-15 15:21:40 -05:00
case - BCH_ERR_btree_insert_btree_node_full :
ret = bch2_btree_split_leaf ( trans , i - > path , flags ) ;
2022-07-17 23:06:38 -04:00
if ( bch2_err_matches ( ret , BCH_ERR_transaction_restart ) )
2023-12-15 15:21:40 -05:00
trace_and_count ( c , trans_restart_btree_node_split , trans ,
trace_ip , trans - > paths + i - > path ) ;
2018-11-07 17:48:32 -05:00
break ;
2022-12-07 11:39:34 -05:00
case - BCH_ERR_btree_insert_need_mark_replicas :
2023-05-30 04:59:30 -04:00
ret = drop_locks_do ( trans ,
2023-11-09 14:22:46 -05:00
bch2_accounting_update_sb ( trans ) ) ;
2018-11-07 17:48:32 -05:00
break ;
2022-12-13 15:17:40 -05:00
case - BCH_ERR_journal_res_get_blocked :
2023-07-10 11:17:56 -04:00
/*
* XXX : this should probably be a separate BTREE_INSERT_NONBLOCK
* flag
*/
2023-11-11 16:31:50 -05:00
if ( ( flags & BCH_TRANS_COMMIT_journal_reclaim ) & &
2024-04-01 19:20:36 -04:00
watermark < BCH_WATERMARK_reclaim ) {
2022-07-17 23:06:38 -04:00
ret = - BCH_ERR_journal_reclaim_would_deadlock ;
2021-07-25 17:19:52 -04:00
break ;
}
2021-04-03 16:24:13 -04:00
2023-05-30 04:59:30 -04:00
ret = drop_locks_do ( trans ,
bch2_trans_journal_res_get ( trans ,
2023-06-27 17:32:38 -04:00
( flags & BCH_WATERMARK_MASK ) |
2023-05-30 04:59:30 -04:00
JOURNAL_RES_GET_CHECK ) ) ;
2020-11-19 21:40:03 -05:00
break ;
2022-12-07 11:39:34 -05:00
case - BCH_ERR_btree_insert_need_journal_reclaim :
2020-11-19 21:40:03 -05:00
bch2_trans_unlock ( trans ) ;
2022-08-27 12:48:36 -04:00
trace_and_count ( c , trans_blocked_journal_reclaim , trans , trace_ip ) ;
2021-04-29 00:21:54 -04:00
2021-04-14 22:15:55 -04:00
wait_event_freezable ( c - > journal . reclaim_wait ,
( ret = journal_reclaim_wait_done ( c ) ) ) ;
2021-03-31 17:52:52 -04:00
if ( ret < 0 )
2021-07-25 17:19:52 -04:00
break ;
2020-11-19 21:40:03 -05:00
2022-07-17 23:06:38 -04:00
ret = bch2_trans_relock ( trans ) ;
2019-03-15 17:11:58 -04:00
break ;
2018-11-07 17:48:32 -05:00
default :
BUG_ON ( ret > = 0 ) ;
break ;
2017-03-16 22:18:50 -08:00
}
2022-07-17 23:06:38 -04:00
BUG_ON ( bch2_err_matches ( ret , BCH_ERR_transaction_restart ) ! = ! ! trans - > restarted ) ;
2022-09-18 17:10:33 -04:00
bch2_fs_inconsistent_on ( bch2_err_matches ( ret , ENOSPC ) & &
2023-11-11 16:31:50 -05:00
( flags & BCH_TRANS_COMMIT_no_enospc ) , c ,
2022-09-18 17:10:33 -04:00
" %s: incorrectly got %s \n " , __func__ , bch2_err_str ( ret ) ) ;
2021-05-20 15:49:23 -04:00
2019-03-21 21:12:01 -04:00
return ret ;
}
2019-10-19 19:03:23 -04:00
static noinline int
2023-02-09 13:22:12 -05:00
bch2_trans_commit_get_rw_cold ( struct btree_trans * trans , unsigned flags )
2019-03-21 21:12:01 -04:00
{
struct bch_fs * c = trans - > c ;
int ret ;
2023-11-11 16:31:50 -05:00
if ( likely ( ! ( flags & BCH_TRANS_COMMIT_lazy_rw ) ) | |
2023-11-26 17:05:02 -05:00
test_bit ( BCH_FS_started , & c - > flags ) )
2022-12-11 20:37:11 -05:00
return - BCH_ERR_erofs_trans_commit ;
2019-03-21 21:12:01 -04:00
2023-05-28 18:06:27 -04:00
ret = drop_locks_do ( trans , bch2_fs_read_write_early ( c ) ) ;
2019-10-19 19:03:23 -04:00
if ( ret )
return ret ;
2019-03-21 21:12:01 -04:00
2023-02-09 12:21:45 -05:00
bch2_write_ref_get ( c , BCH_WRITE_REF_trans ) ;
2019-10-19 19:03:23 -04:00
return 0 ;
2017-03-16 22:18:50 -08:00
}
2022-02-19 02:48:27 -05:00
/*
* This is for updates done in the early part of fsck - btree_gc - before we ' ve
* gone RW . we only add the new key to the list of keys for journal replay to
* do .
*/
static noinline int
do_bch2_trans_commit_to_journal_replay ( struct btree_trans * trans )
{
struct bch_fs * c = trans - > c ;
trans_for_each_update ( trans , i ) {
2023-12-27 20:59:01 -05:00
int ret = bch2_journal_key_insert ( c , i - > btree_id , i - > level , i - > k ) ;
2022-02-19 02:48:27 -05:00
if ( ret )
2023-12-27 20:59:01 -05:00
return ret ;
2022-02-19 02:48:27 -05:00
}
2023-12-27 20:59:01 -05:00
for ( struct jset_entry * i = trans - > journal_entries ;
i ! = ( void * ) ( ( u64 * ) trans - > journal_entries + trans - > journal_entries_u64s ) ;
i = vstruct_next ( i ) )
if ( i - > type = = BCH_JSET_ENTRY_btree_keys | |
i - > type = = BCH_JSET_ENTRY_write_buffer_keys ) {
int ret = bch2_journal_key_insert ( c , i - > btree_id , i - > level , i - > start ) ;
if ( ret )
return ret ;
}
return 0 ;
2022-02-19 02:48:27 -05:00
}
2023-02-09 13:22:12 -05:00
int __bch2_trans_commit ( struct btree_trans * trans , unsigned flags )
2017-03-16 22:18:50 -08:00
{
2023-12-16 21:31:26 -05:00
struct btree_insert_entry * errored_at = NULL ;
2021-12-31 16:12:54 -05:00
struct bch_fs * c = trans - > c ;
2019-03-21 16:28:57 -04:00
int ret = 0 ;
2017-03-16 22:18:50 -08:00
2024-04-09 20:14:21 -04:00
bch2_trans_verify_not_unlocked ( trans ) ;
bch2_trans_verify_not_in_restart ( trans ) ;
2021-07-10 13:44:42 -04:00
if ( ! trans - > nr_updates & &
2023-12-10 16:48:22 -05:00
! trans - > journal_entries_u64s )
2021-01-21 19:30:35 -05:00
goto out_reset ;
2017-03-16 22:18:50 -08:00
2022-02-19 02:40:45 -05:00
ret = bch2_trans_commit_run_triggers ( trans ) ;
if ( ret )
goto out_reset ;
2023-08-22 20:29:35 -04:00
trans_for_each_update ( trans , i ) {
struct printbuf buf = PRINTBUF ;
2024-05-08 18:40:42 -04:00
enum bch_validate_flags invalid_flags = 0 ;
2023-08-22 20:29:35 -04:00
2023-11-11 16:31:50 -05:00
if ( ! ( flags & BCH_TRANS_COMMIT_no_journal_res ) )
2024-05-08 18:40:42 -04:00
invalid_flags | = BCH_VALIDATE_write | BCH_VALIDATE_commit ;
2023-08-22 20:29:35 -04:00
if ( unlikely ( bch2_bkey_invalid ( c , bkey_i_to_s_c ( i - > k ) ,
i - > bkey_type , invalid_flags , & buf ) ) )
2023-10-20 00:01:53 -04:00
ret = bch2_trans_commit_bkey_invalid ( trans , invalid_flags , i , & buf ) ;
2023-08-22 20:29:35 -04:00
btree_insert_entry_checks ( trans , i ) ;
printbuf_exit ( & buf ) ;
if ( ret )
return ret ;
}
2023-12-21 00:16:32 -05:00
for ( struct jset_entry * i = trans - > journal_entries ;
i ! = ( void * ) ( ( u64 * ) trans - > journal_entries + trans - > journal_entries_u64s ) ;
i = vstruct_next ( i ) ) {
2024-05-08 18:40:42 -04:00
enum bch_validate_flags invalid_flags = 0 ;
2023-12-21 00:16:32 -05:00
if ( ! ( flags & BCH_TRANS_COMMIT_no_journal_res ) )
2024-05-08 18:40:42 -04:00
invalid_flags | = BCH_VALIDATE_write | BCH_VALIDATE_commit ;
2023-12-21 00:16:32 -05:00
if ( unlikely ( bch2_journal_entry_validate ( c , NULL , i ,
bcachefs_metadata_version_current ,
CPU_BIG_ENDIAN , invalid_flags ) ) )
ret = bch2_trans_commit_journal_entry_invalid ( trans , i ) ;
if ( ret )
return ret ;
}
2023-11-26 17:05:02 -05:00
if ( unlikely ( ! test_bit ( BCH_FS_may_go_rw , & c - > flags ) ) ) {
2022-02-19 02:48:27 -05:00
ret = do_bch2_trans_commit_to_journal_replay ( trans ) ;
goto out_reset ;
}
2023-11-11 16:31:50 -05:00
if ( ! ( flags & BCH_TRANS_COMMIT_no_check_rw ) & &
2023-02-09 12:21:45 -05:00
unlikely ( ! bch2_write_ref_tryget ( c , BCH_WRITE_REF_trans ) ) ) {
2023-02-09 13:22:12 -05:00
ret = bch2_trans_commit_get_rw_cold ( trans , flags ) ;
2022-02-19 02:40:45 -05:00
if ( ret )
goto out_reset ;
}
2023-11-26 17:05:02 -05:00
EBUG_ON ( test_bit ( BCH_FS_clean_shutdown , & c - > flags ) ) ;
2022-04-17 17:30:49 -04:00
2023-12-10 16:48:22 -05:00
trans - > journal_u64s = trans - > journal_entries_u64s ;
2021-12-31 16:12:54 -05:00
trans - > journal_transaction_names = READ_ONCE ( c - > opts . journal_transaction_names ) ;
if ( trans - > journal_transaction_names )
2022-06-05 15:29:00 -04:00
trans - > journal_u64s + = jset_u64s ( JSET_ENTRY_LOG_U64s ) ;
2021-12-31 16:12:54 -05:00
2019-12-24 18:03:53 -05:00
trans_for_each_update ( trans , i ) {
2023-12-10 16:10:24 -05:00
struct btree_path * path = trans - > paths + i - > path ;
EBUG_ON ( ! path - > should_be_locked ) ;
2021-03-09 19:37:40 -05:00
2023-12-10 16:10:24 -05:00
ret = bch2_btree_path_upgrade ( trans , path , i - > level + 1 ) ;
2022-09-17 14:36:24 -04:00
if ( unlikely ( ret ) )
2021-02-20 20:51:57 -05:00
goto out ;
2023-12-10 16:10:24 -05:00
EBUG_ON ( ! btree_node_intent_locked ( path , i - > level ) ) ;
2021-03-31 16:43:50 -04:00
2022-06-05 15:32:57 -04:00
if ( i - > key_cache_already_flushed )
continue ;
2024-04-07 18:05:34 -04:00
if ( i - > flags & BTREE_UPDATE_nojournal )
2022-06-05 15:32:57 -04:00
continue ;
2023-11-04 22:54:26 -04:00
/* we're going to journal the key being updated: */
trans - > journal_u64s + = jset_u64s ( i - > k - > k . u64s ) ;
2022-06-05 15:32:57 -04:00
/* and we're also going to log the overwrite: */
if ( trans - > journal_transaction_names )
trans - > journal_u64s + = jset_u64s ( i - > old_k . u64s ) ;
2019-12-24 18:03:53 -05:00
}
2021-06-12 15:45:56 -04:00
2023-12-11 02:31:12 -05:00
if ( trans - > extra_disk_res ) {
2021-12-31 16:12:54 -05:00
ret = bch2_disk_reservation_add ( c , trans - > disk_res ,
2023-12-11 02:31:12 -05:00
trans - > extra_disk_res ,
2023-11-11 16:31:50 -05:00
( flags & BCH_TRANS_COMMIT_no_enospc )
2021-06-12 15:45:56 -04:00
? BCH_DISK_RESERVATION_NOFAIL : 0 ) ;
if ( ret )
goto err ;
}
2019-10-19 19:03:23 -04:00
retry :
2023-12-16 21:31:26 -05:00
errored_at = NULL ;
2024-04-09 20:14:21 -04:00
bch2_trans_verify_not_unlocked ( trans ) ;
2023-02-01 16:15:51 -05:00
bch2_trans_verify_not_in_restart ( trans ) ;
2023-11-11 16:31:50 -05:00
if ( likely ( ! ( flags & BCH_TRANS_COMMIT_no_journal_res ) ) )
2023-11-08 22:00:00 -05:00
memset ( & trans - > journal_res , 0 , sizeof ( trans - > journal_res ) ) ;
2023-11-09 14:22:46 -05:00
memset ( & trans - > fs_usage_delta , 0 , sizeof ( trans - > fs_usage_delta ) ) ;
2019-03-21 22:19:57 -04:00
2023-12-16 21:31:26 -05:00
ret = do_bch2_trans_commit ( trans , flags , & errored_at , _RET_IP_ ) ;
2019-03-21 22:19:57 -04:00
2019-10-19 19:03:23 -04:00
/* make sure we didn't drop or screw up locks: */
2021-08-30 14:45:11 -04:00
bch2_trans_verify_locks ( trans ) ;
2019-10-19 19:03:23 -04:00
2019-03-21 21:12:01 -04:00
if ( ret )
goto err ;
2022-04-17 18:06:31 -04:00
2022-08-27 12:48:36 -04:00
trace_and_count ( c , transaction_commit , trans , _RET_IP_ ) ;
2019-03-21 21:12:01 -04:00
out :
2023-11-11 16:31:50 -05:00
if ( likely ( ! ( flags & BCH_TRANS_COMMIT_no_check_rw ) ) )
2023-02-09 12:21:45 -05:00
bch2_write_ref_put ( c , BCH_WRITE_REF_trans ) ;
2021-01-21 19:30:35 -05:00
out_reset :
2023-10-27 15:23:46 -04:00
if ( ! ret )
bch2_trans_downgrade ( trans ) ;
2022-05-29 11:38:48 -04:00
bch2_trans_reset_updates ( trans ) ;
2021-07-24 23:57:28 -04:00
2019-03-13 22:44:04 -04:00
return ret ;
2019-03-21 21:12:01 -04:00
err :
2023-12-16 21:31:26 -05:00
ret = bch2_trans_commit_error ( trans , flags , errored_at , ret , _RET_IP_ ) ;
2019-10-19 19:03:23 -04:00
if ( ret )
goto out ;
2019-03-11 14:59:58 -04:00
2023-11-08 22:00:00 -05:00
/*
* We might have done another transaction commit in the error path -
* i . e . btree write buffer flush - which will have made use of
2023-11-11 16:31:50 -05:00
* trans - > journal_res , but with BCH_TRANS_COMMIT_no_journal_res that is
* how the journal sequence number to pin is passed in - so we must
* restart :
2023-11-08 22:00:00 -05:00
*/
2023-11-11 16:31:50 -05:00
if ( flags & BCH_TRANS_COMMIT_no_journal_res ) {
2023-11-08 22:00:00 -05:00
ret = - BCH_ERR_transaction_restart_nested ;
goto out ;
}
2019-10-19 19:03:23 -04:00
goto retry ;
2017-03-16 22:18:50 -08:00
}