2017-03-16 22:18:50 -08:00
/* SPDX-License-Identifier: GPL-2.0 */
# ifndef _BCACHEFS_JOURNAL_IO_H
# define _BCACHEFS_JOURNAL_IO_H
2024-01-27 00:05:03 -05:00
# include "darray.h"
2024-04-26 00:32:56 -04:00
void bch2_journal_pos_from_member_info_set ( struct bch_fs * ) ;
void bch2_journal_pos_from_member_info_resume ( struct bch_fs * ) ;
2024-01-27 00:05:03 -05:00
struct journal_ptr {
bool csum_good ;
u8 dev ;
u32 bucket ;
u32 bucket_offset ;
u64 sector ;
} ;
2017-03-16 22:18:50 -08:00
/*
* Only used for holding the journal entries we read in btree_journal_read ( )
* during cache_registration
*/
struct journal_replay {
2024-01-27 00:05:03 -05:00
DARRAY_PREALLOCATED ( struct journal_ptr , 8 ) ptrs ;
2021-01-26 16:04:12 -05:00
2022-10-14 01:14:15 -04:00
bool csum_good ;
2024-02-25 18:48:21 -05:00
bool ignore_blacklisted ;
bool ignore_not_dirty ;
2017-03-16 22:18:50 -08:00
/* must be last: */
struct jset j ;
} ;
2024-02-25 18:48:21 -05:00
static inline bool journal_replay_ignore ( struct journal_replay * i )
{
return ! i | | i - > ignore_blacklisted | | i - > ignore_not_dirty ;
}
2017-03-16 22:18:50 -08:00
static inline struct jset_entry * __jset_entry_type_next ( struct jset * jset ,
struct jset_entry * entry , unsigned type )
{
while ( entry < vstruct_last ( jset ) ) {
if ( entry - > type = = type )
return entry ;
entry = vstruct_next ( entry ) ;
}
return NULL ;
}
# define for_each_jset_entry_type(entry, jset, type) \
2024-02-23 22:46:35 -05:00
for ( struct jset_entry * entry = ( jset ) - > start ; \
2017-03-16 22:18:50 -08:00
( entry = __jset_entry_type_next ( jset , entry , type ) ) ; \
entry = vstruct_next ( entry ) )
2023-03-04 23:05:55 -05:00
# define jset_entry_for_each_key(_e, _k) \
2024-02-23 22:46:35 -05:00
for ( struct bkey_i * _k = ( _e ) - > start ; \
2023-03-04 23:05:55 -05:00
_k < vstruct_last ( _e ) ; \
_k = bkey_next ( _k ) )
# define for_each_jset_key(k, entry, jset) \
for_each_jset_entry_type ( entry , jset , BCH_JSET_ENTRY_btree_keys ) \
jset_entry_for_each_key ( entry , k )
2017-03-16 22:18:50 -08:00
2022-09-26 16:23:19 -04:00
int bch2_journal_entry_validate ( struct bch_fs * , struct jset * ,
2023-08-06 12:43:31 -04:00
struct jset_entry * , unsigned , int ,
2024-05-08 18:40:42 -04:00
enum bch_validate_flags ) ;
2021-12-31 17:06:29 -05:00
void bch2_journal_entry_to_text ( struct printbuf * , struct bch_fs * ,
struct jset_entry * ) ;
2021-03-04 19:06:26 -05:00
2022-02-19 01:18:18 -05:00
void bch2_journal_ptrs_to_text ( struct printbuf * , struct bch_fs * ,
struct journal_replay * ) ;
2022-12-14 10:39:04 -05:00
int bch2_journal_read ( struct bch_fs * , u64 * , u64 * , u64 * ) ;
2017-03-16 22:18:50 -08:00
2023-11-17 19:13:27 -05:00
CLOSURE_CALLBACK ( bch2_journal_write ) ;
2017-03-16 22:18:50 -08:00
2024-01-27 10:16:15 -05:00
static inline struct jset_entry * jset_entry_init ( struct jset_entry * * end , size_t size )
{
struct jset_entry * entry = * end ;
unsigned u64s = DIV_ROUND_UP ( size , sizeof ( u64 ) ) ;
memset ( entry , 0 , u64s * sizeof ( u64 ) ) ;
/*
* The u64s field counts from the start of data , ignoring the shared
* fields .
*/
entry - > u64s = cpu_to_le16 ( u64s - 1 ) ;
* end = vstruct_next ( * end ) ;
return entry ;
}
2017-03-16 22:18:50 -08:00
# endif /* _BCACHEFS_JOURNAL_IO_H */