2018-11-01 22:13:19 +03:00
/* SPDX-License-Identifier: GPL-2.0 */
# ifndef _BCACHEFS_EC_H
# define _BCACHEFS_EC_H
# include "ec_types.h"
2020-07-10 01:31:51 +03:00
# include "buckets_types.h"
2022-06-14 02:07:19 +03:00
# include "extents_types.h"
2018-11-01 22:13:19 +03:00
2023-07-07 04:16:10 +03:00
enum bkey_invalid_flags ;
2022-04-04 00:50:01 +03:00
int bch2_stripe_invalid ( const struct bch_fs * , struct bkey_s_c ,
2023-07-07 04:16:10 +03:00
enum bkey_invalid_flags , struct printbuf * ) ;
2018-11-01 22:10:01 +03:00
void bch2_stripe_to_text ( struct printbuf * , struct bch_fs * ,
2018-11-01 22:13:19 +03:00
struct bkey_s_c ) ;
2022-10-22 22:59:53 +03:00
# define bch2_bkey_ops_stripe ((struct bkey_ops) { \
2018-11-01 22:10:01 +03:00
. key_invalid = bch2_stripe_invalid , \
. val_to_text = bch2_stripe_to_text , \
2020-02-07 04:15:15 +03:00
. swab = bch2_ptr_swab , \
2022-03-13 08:26:52 +03:00
. trans_trigger = bch2_trans_mark_stripe , \
. atomic_trigger = bch2_mark_stripe , \
2023-04-29 20:24:18 +03:00
. min_val_size = 8 , \
2022-10-22 22:59:53 +03:00
} )
2018-11-01 22:13:19 +03:00
2018-11-26 04:53:51 +03:00
static inline unsigned stripe_csums_per_device ( const struct bch_stripe * s )
{
return DIV_ROUND_UP ( le16_to_cpu ( s - > sectors ) ,
1 < < s - > csum_granularity_bits ) ;
}
static inline unsigned stripe_csum_offset ( const struct bch_stripe * s ,
unsigned dev , unsigned csum_idx )
{
unsigned csum_bytes = bch_crc_bytes [ s - > csum_type ] ;
return sizeof ( struct bch_stripe ) +
sizeof ( struct bch_extent_ptr ) * s - > nr_blocks +
( dev * stripe_csums_per_device ( s ) + csum_idx ) * csum_bytes ;
}
static inline unsigned stripe_blockcount_offset ( const struct bch_stripe * s ,
unsigned idx )
{
return stripe_csum_offset ( s , s - > nr_blocks , 0 ) +
sizeof ( u16 ) * idx ;
}
static inline unsigned stripe_blockcount_get ( const struct bch_stripe * s ,
unsigned idx )
{
return le16_to_cpup ( ( void * ) s + stripe_blockcount_offset ( s , idx ) ) ;
}
static inline void stripe_blockcount_set ( struct bch_stripe * s ,
unsigned idx , unsigned v )
{
__le16 * p = ( void * ) s + stripe_blockcount_offset ( s , idx ) ;
* p = cpu_to_le16 ( v ) ;
}
static inline unsigned stripe_val_u64s ( const struct bch_stripe * s )
{
return DIV_ROUND_UP ( stripe_blockcount_offset ( s , s - > nr_blocks ) ,
sizeof ( u64 ) ) ;
}
static inline void * stripe_csum ( struct bch_stripe * s ,
2021-01-11 21:51:23 +03:00
unsigned block , unsigned csum_idx )
2018-11-26 04:53:51 +03:00
{
2021-01-11 21:51:23 +03:00
EBUG_ON ( block > = s - > nr_blocks ) ;
EBUG_ON ( csum_idx > = stripe_csums_per_device ( s ) ) ;
return ( void * ) s + stripe_csum_offset ( s , block , csum_idx ) ;
}
static inline struct bch_csum stripe_csum_get ( struct bch_stripe * s ,
unsigned block , unsigned csum_idx )
{
struct bch_csum csum = { 0 } ;
memcpy ( & csum , stripe_csum ( s , block , csum_idx ) , bch_crc_bytes [ s - > csum_type ] ) ;
return csum ;
}
static inline void stripe_csum_set ( struct bch_stripe * s ,
unsigned block , unsigned csum_idx ,
struct bch_csum csum )
{
memcpy ( stripe_csum ( s , block , csum_idx ) , & csum , bch_crc_bytes [ s - > csum_type ] ) ;
}
2021-03-13 00:55:28 +03:00
static inline bool __bch2_ptr_matches_stripe ( const struct bch_extent_ptr * stripe_ptr ,
const struct bch_extent_ptr * data_ptr ,
unsigned sectors )
{
return data_ptr - > dev = = stripe_ptr - > dev & &
data_ptr - > gen = = stripe_ptr - > gen & &
data_ptr - > offset > = stripe_ptr - > offset & &
data_ptr - > offset < stripe_ptr - > offset + sectors ;
}
static inline bool bch2_ptr_matches_stripe ( const struct bch_stripe * s ,
struct extent_ptr_decoded p )
2021-01-11 21:51:23 +03:00
{
unsigned nr_data = s - > nr_blocks - s - > nr_redundant ;
2021-03-13 00:55:28 +03:00
BUG_ON ( ! p . has_ec ) ;
if ( p . ec . block > = nr_data )
2021-01-11 21:51:23 +03:00
return false ;
2021-03-13 00:55:28 +03:00
return __bch2_ptr_matches_stripe ( & s - > ptrs [ p . ec . block ] , & p . ptr ,
le16_to_cpu ( s - > sectors ) ) ;
2021-01-11 21:51:23 +03:00
}
2021-12-05 07:07:33 +03:00
static inline bool bch2_ptr_matches_stripe_m ( const struct gc_stripe * m ,
2021-03-13 00:55:28 +03:00
struct extent_ptr_decoded p )
2021-01-11 21:51:23 +03:00
{
2021-03-13 00:55:28 +03:00
unsigned nr_data = m - > nr_blocks - m - > nr_redundant ;
2021-01-11 21:51:23 +03:00
BUG_ON ( ! p . has_ec ) ;
2021-03-13 00:55:28 +03:00
if ( p . ec . block > = nr_data )
return false ;
return __bch2_ptr_matches_stripe ( & m - > ptrs [ p . ec . block ] , & p . ptr ,
m - > sectors ) ;
2018-11-26 04:53:51 +03:00
}
2018-11-01 22:13:19 +03:00
struct bch_read_bio ;
struct ec_stripe_buf {
/* might not be buffering the entire stripe: */
unsigned offset ;
unsigned size ;
2020-12-16 22:23:27 +03:00
unsigned long valid [ BITS_TO_LONGS ( BCH_BKEY_PTRS_MAX ) ] ;
2018-11-01 22:13:19 +03:00
2020-12-16 22:23:27 +03:00
void * data [ BCH_BKEY_PTRS_MAX ] ;
2018-11-01 22:13:19 +03:00
2023-08-02 03:06:45 +03:00
__BKEY_PADDED ( key , 255 ) ;
2018-11-01 22:13:19 +03:00
} ;
struct ec_stripe_head ;
2023-03-09 18:18:09 +03:00
enum ec_stripe_ref {
STRIPE_REF_io ,
STRIPE_REF_stripe ,
STRIPE_REF_NR
} ;
2018-11-01 22:13:19 +03:00
struct ec_stripe_new {
struct bch_fs * c ;
struct ec_stripe_head * h ;
struct mutex lock ;
struct list_head list ;
2023-02-19 05:07:25 +03:00
struct hlist_node hash ;
u64 idx ;
2020-12-15 03:41:03 +03:00
struct closure iodone ;
2018-11-01 22:13:19 +03:00
2023-03-09 18:18:09 +03:00
atomic_t ref [ STRIPE_REF_NR ] ;
2018-11-01 22:13:19 +03:00
int err ;
2020-07-07 05:33:54 +03:00
u8 nr_data ;
u8 nr_parity ;
bool allocated ;
bool pending ;
2020-12-15 03:41:03 +03:00
bool have_existing_stripe ;
2020-06-30 21:44:19 +03:00
2021-01-19 07:26:42 +03:00
unsigned long blocks_gotten [ BITS_TO_LONGS ( BCH_BKEY_PTRS_MAX ) ] ;
2020-12-16 22:23:27 +03:00
unsigned long blocks_allocated [ BITS_TO_LONGS ( BCH_BKEY_PTRS_MAX ) ] ;
2021-01-19 07:26:42 +03:00
open_bucket_idx_t blocks [ BCH_BKEY_PTRS_MAX ] ;
2020-07-10 01:31:51 +03:00
struct disk_reservation res ;
2018-11-01 22:13:19 +03:00
2020-12-15 03:41:03 +03:00
struct ec_stripe_buf new_stripe ;
struct ec_stripe_buf existing_stripe ;
2018-11-01 22:13:19 +03:00
} ;
struct ec_stripe_head {
struct list_head list ;
struct mutex lock ;
unsigned target ;
unsigned algo ;
unsigned redundancy ;
2023-06-25 02:30:10 +03:00
enum bch_watermark watermark ;
2018-11-01 22:13:19 +03:00
struct bch_devs_mask devs ;
unsigned nr_active_devs ;
unsigned blocksize ;
struct dev_stripe_state block_stripe ;
struct dev_stripe_state parity_stripe ;
struct ec_stripe_new * s ;
} ;
int bch2_ec_read_extent ( struct bch_fs * , struct bch_read_bio * ) ;
void * bch2_writepoint_ec_buf ( struct bch_fs * , struct write_point * ) ;
void bch2_ec_bucket_cancel ( struct bch_fs * , struct open_bucket * ) ;
int bch2_ec_stripe_new_alloc ( struct bch_fs * , struct ec_stripe_head * ) ;
2020-07-07 03:59:46 +03:00
void bch2_ec_stripe_head_put ( struct bch_fs * , struct ec_stripe_head * ) ;
2023-02-18 04:50:55 +03:00
struct ec_stripe_head * bch2_ec_stripe_head_get ( struct btree_trans * ,
2023-03-03 10:43:39 +03:00
unsigned , unsigned , unsigned ,
2023-06-25 02:30:10 +03:00
enum bch_watermark , struct closure * ) ;
2018-11-01 22:13:19 +03:00
2018-11-25 01:09:44 +03:00
void bch2_stripes_heap_update ( struct bch_fs * , struct stripe * , size_t ) ;
void bch2_stripes_heap_del ( struct bch_fs * , struct stripe * , size_t ) ;
void bch2_stripes_heap_insert ( struct bch_fs * , struct stripe * , size_t ) ;
2018-11-01 22:13:19 +03:00
2023-02-09 20:22:58 +03:00
void bch2_do_stripe_deletes ( struct bch_fs * ) ;
2023-03-06 07:52:49 +03:00
void bch2_ec_do_stripe_creates ( struct bch_fs * ) ;
2023-03-09 18:18:09 +03:00
void bch2_ec_stripe_new_free ( struct bch_fs * , struct ec_stripe_new * ) ;
2023-03-06 07:52:49 +03:00
2023-03-09 18:18:09 +03:00
static inline void ec_stripe_new_get ( struct ec_stripe_new * s ,
enum ec_stripe_ref ref )
2023-03-06 07:52:49 +03:00
{
2023-03-09 18:18:09 +03:00
atomic_inc ( & s - > ref [ ref ] ) ;
2023-03-06 07:52:49 +03:00
}
2023-03-09 18:18:09 +03:00
static inline void ec_stripe_new_put ( struct bch_fs * c , struct ec_stripe_new * s ,
enum ec_stripe_ref ref )
2023-03-06 07:52:49 +03:00
{
2023-03-09 18:18:09 +03:00
BUG_ON ( atomic_read ( & s - > ref [ ref ] ) < = 0 ) ;
if ( atomic_dec_and_test ( & s - > ref [ ref ] ) )
switch ( ref ) {
case STRIPE_REF_stripe :
bch2_ec_stripe_new_free ( c , s ) ;
break ;
case STRIPE_REF_io :
bch2_ec_do_stripe_creates ( c ) ;
break ;
default :
unreachable ( ) ;
}
2023-03-06 07:52:49 +03:00
}
2023-02-09 20:22:58 +03:00
2018-11-01 22:13:19 +03:00
void bch2_ec_stop_dev ( struct bch_fs * , struct bch_dev * ) ;
2023-03-14 05:01:47 +03:00
void bch2_fs_ec_stop ( struct bch_fs * ) ;
void bch2_fs_ec_flush ( struct bch_fs * ) ;
2018-11-01 22:13:19 +03:00
2021-04-29 22:37:47 +03:00
int bch2_stripes_read ( struct bch_fs * ) ;
2018-11-26 04:53:51 +03:00
2020-07-07 03:18:13 +03:00
void bch2_stripes_heap_to_text ( struct printbuf * , struct bch_fs * ) ;
2020-07-26 00:06:11 +03:00
void bch2_new_stripes_to_text ( struct printbuf * , struct bch_fs * ) ;
2020-07-07 03:18:13 +03:00
2018-11-01 22:13:19 +03:00
void bch2_fs_ec_exit ( struct bch_fs * ) ;
2022-04-09 08:23:50 +03:00
void bch2_fs_ec_init_early ( struct bch_fs * ) ;
2018-11-01 22:13:19 +03:00
int bch2_fs_ec_init ( struct bch_fs * ) ;
# endif /* _BCACHEFS_EC_H */