2017-03-17 09:18:50 +03:00
// SPDX-License-Identifier: GPL-2.0
/*
* bcache sysfs interfaces
*
* Copyright 2010 , 2011 Kent Overstreet < kent . overstreet @ gmail . com >
* Copyright 2012 Google , Inc .
*/
# ifndef NO_BCACHEFS_SYSFS
# include "bcachefs.h"
2018-10-06 07:46:55 +03:00
# include "alloc_background.h"
2021-12-26 05:21:46 +03:00
# include "alloc_foreground.h"
2017-03-17 09:18:50 +03:00
# include "sysfs.h"
# include "btree_cache.h"
# include "btree_io.h"
# include "btree_iter.h"
2020-06-16 02:53:46 +03:00
# include "btree_key_cache.h"
2017-03-17 09:18:50 +03:00
# include "btree_update.h"
# include "btree_update_interior.h"
# include "btree_gc.h"
# include "buckets.h"
2019-12-19 23:07:51 +03:00
# include "clock.h"
2017-03-17 09:18:50 +03:00
# include "disk_groups.h"
2018-11-01 22:13:19 +03:00
# include "ec.h"
2017-03-17 09:18:50 +03:00
# include "inode.h"
# include "journal.h"
# include "keylist.h"
# include "move.h"
# include "opts.h"
# include "rebalance.h"
# include "replicas.h"
# include "super-io.h"
# include "tests.h"
# include <linux/blkdev.h>
# include <linux/sort.h>
# include <linux/sched/clock.h>
# include "util.h"
# define SYSFS_OPS(type) \
struct sysfs_ops type # # _sysfs_ops = { \
. show = type # # _show , \
. store = type # # _store \
}
# define SHOW(fn) \
2022-02-25 21:18:19 +03:00
static ssize_t fn # # _to_text ( struct printbuf * , \
struct kobject * , struct attribute * ) ; \
\
2017-03-17 09:18:50 +03:00
static ssize_t fn # # _show ( struct kobject * kobj , struct attribute * attr , \
char * buf ) \
2022-02-25 21:18:19 +03:00
{ \
struct printbuf out = PRINTBUF ; \
ssize_t ret = fn # # _to_text ( & out , kobj , attr ) ; \
\
if ( ! ret & & out . allocation_failure ) \
ret = - ENOMEM ; \
\
if ( ! ret ) { \
ret = min_t ( size_t , out . pos , PAGE_SIZE - 1 ) ; \
memcpy ( buf , out . buf , ret ) ; \
} \
printbuf_exit ( & out ) ; \
return ret ; \
} \
\
static ssize_t fn # # _to_text ( struct printbuf * out , struct kobject * kobj , \
struct attribute * attr )
2017-03-17 09:18:50 +03:00
# define STORE(fn) \
static ssize_t fn # # _store ( struct kobject * kobj , struct attribute * attr , \
const char * buf , size_t size ) \
# define __sysfs_attribute(_name, _mode) \
static struct attribute sysfs_ # # _name = \
{ . name = # _name , . mode = _mode }
# define write_attribute(n) __sysfs_attribute(n, S_IWUSR)
# define read_attribute(n) __sysfs_attribute(n, S_IRUGO)
# define rw_attribute(n) __sysfs_attribute(n, S_IRUGO|S_IWUSR)
# define sysfs_printf(file, fmt, ...) \
do { \
if ( attr = = & sysfs_ # # file ) \
2022-02-25 21:18:19 +03:00
pr_buf ( out , fmt " \n " , __VA_ARGS__ ) ; \
2017-03-17 09:18:50 +03:00
} while ( 0 )
# define sysfs_print(file, var) \
do { \
if ( attr = = & sysfs_ # # file ) \
2022-02-25 21:18:19 +03:00
snprint ( out , var ) ; \
2017-03-17 09:18:50 +03:00
} while ( 0 )
# define sysfs_hprint(file, val) \
do { \
2022-02-25 21:18:19 +03:00
if ( attr = = & sysfs_ # # file ) \
bch2_hprint ( out , val ) ; \
2017-03-17 09:18:50 +03:00
} while ( 0 )
# define var_printf(_var, fmt) sysfs_printf(_var, fmt, var(_var))
# define var_print(_var) sysfs_print(_var, var(_var))
# define var_hprint(_var) sysfs_hprint(_var, var(_var))
# define sysfs_strtoul(file, var) \
do { \
if ( attr = = & sysfs_ # # file ) \
return strtoul_safe ( buf , var ) ? : ( ssize_t ) size ; \
} while ( 0 )
# define sysfs_strtoul_clamp(file, var, min, max) \
do { \
if ( attr = = & sysfs_ # # file ) \
return strtoul_safe_clamp ( buf , var , min , max ) \
? : ( ssize_t ) size ; \
} while ( 0 )
# define strtoul_or_return(cp) \
( { \
unsigned long _v ; \
int _r = kstrtoul ( cp , 10 , & _v ) ; \
if ( _r ) \
return _r ; \
_v ; \
} )
# define strtoul_restrict_or_return(cp, min, max) \
( { \
unsigned long __v = 0 ; \
int _r = strtoul_safe_restrict ( cp , __v , min , max ) ; \
if ( _r ) \
return _r ; \
__v ; \
} )
# define strtoi_h_or_return(cp) \
( { \
u64 _v ; \
int _r = strtoi_h ( cp , & _v ) ; \
if ( _r ) \
return _r ; \
_v ; \
} )
# define sysfs_hatoi(file, var) \
do { \
if ( attr = = & sysfs_ # # file ) \
return strtoi_h ( buf , & var ) ? : ( ssize_t ) size ; \
} while ( 0 )
write_attribute ( trigger_gc ) ;
write_attribute ( prune_cache ) ;
rw_attribute ( btree_gc_periodic ) ;
2021-04-13 22:00:40 +03:00
rw_attribute ( gc_gens_pos ) ;
2017-03-17 09:18:50 +03:00
read_attribute ( uuid ) ;
read_attribute ( minor ) ;
read_attribute ( bucket_size ) ;
read_attribute ( first_bucket ) ;
read_attribute ( nbuckets ) ;
read_attribute ( durability ) ;
read_attribute ( iodone ) ;
read_attribute ( io_latency_read ) ;
read_attribute ( io_latency_write ) ;
read_attribute ( io_latency_stats_read ) ;
read_attribute ( io_latency_stats_write ) ;
read_attribute ( congested ) ;
2021-04-01 04:07:37 +03:00
read_attribute ( btree_avg_write_size ) ;
2017-03-17 09:18:50 +03:00
read_attribute ( btree_cache_size ) ;
read_attribute ( compression_stats ) ;
read_attribute ( journal_debug ) ;
read_attribute ( btree_updates ) ;
2020-11-20 04:13:30 +03:00
read_attribute ( btree_cache ) ;
2020-06-16 02:53:46 +03:00
read_attribute ( btree_key_cache ) ;
2020-06-02 23:36:11 +03:00
read_attribute ( btree_transactions ) ;
2020-07-07 03:18:13 +03:00
read_attribute ( stripes_heap ) ;
2021-07-13 06:52:49 +03:00
read_attribute ( open_buckets ) ;
2022-10-31 23:13:05 +03:00
read_attribute ( write_points ) ;
2017-03-17 09:18:50 +03:00
read_attribute ( internal_uuid ) ;
read_attribute ( has_data ) ;
read_attribute ( alloc_debug ) ;
read_attribute ( read_realloc_races ) ;
read_attribute ( extent_migrate_done ) ;
read_attribute ( extent_migrate_raced ) ;
2022-01-10 04:48:31 +03:00
read_attribute ( bucket_alloc_fail ) ;
2017-03-17 09:18:50 +03:00
rw_attribute ( discard ) ;
rw_attribute ( label ) ;
rw_attribute ( copy_gc_enabled ) ;
2021-04-13 21:45:55 +03:00
read_attribute ( copy_gc_wait ) ;
2017-03-17 09:18:50 +03:00
rw_attribute ( rebalance_enabled ) ;
sysfs_pd_controller_attribute ( rebalance ) ;
read_attribute ( rebalance_work ) ;
rw_attribute ( promote_whole_extents ) ;
2018-11-01 22:13:19 +03:00
read_attribute ( new_stripes ) ;
2019-12-19 23:07:51 +03:00
read_attribute ( io_timers_read ) ;
read_attribute ( io_timers_write ) ;
2022-01-07 05:38:08 +03:00
read_attribute ( data_jobs ) ;
2021-07-23 22:57:19 +03:00
2017-03-17 09:18:50 +03:00
# ifdef CONFIG_BCACHEFS_TESTS
write_attribute ( perf_test ) ;
# endif /* CONFIG_BCACHEFS_TESTS */
# define x(_name) \
static struct attribute sysfs_time_stat_ # # _name = \
{ . name = # _name , . mode = S_IRUGO } ;
BCH_TIME_STATS ( )
# undef x
static struct attribute sysfs_state_rw = {
. name = " state " ,
. mode = S_IRUGO
} ;
static size_t bch2_btree_cache_size ( struct bch_fs * c )
{
size_t ret = 0 ;
struct btree * b ;
mutex_lock ( & c - > btree_cache . lock ) ;
list_for_each_entry ( b , & c - > btree_cache . live , list )
ret + = btree_bytes ( c ) ;
mutex_unlock ( & c - > btree_cache . lock ) ;
return ret ;
}
2021-04-01 04:07:37 +03:00
static size_t bch2_btree_avg_write_size ( struct bch_fs * c )
{
u64 nr = atomic64_read ( & c - > btree_writes_nr ) ;
u64 sectors = atomic64_read ( & c - > btree_writes_sectors ) ;
return nr ? div64_u64 ( sectors , nr ) : 0 ;
}
2021-07-23 22:57:19 +03:00
static long data_progress_to_text ( struct printbuf * out , struct bch_fs * c )
{
long ret = 0 ;
2022-01-07 05:38:08 +03:00
struct bch_move_stats * stats ;
2021-07-23 22:57:19 +03:00
mutex_lock ( & c - > data_progress_lock ) ;
2022-01-07 05:38:08 +03:00
list_for_each_entry ( stats , & c - > data_progress_list , list ) {
pr_buf ( out , " %s: data type %s btree_id %s position: " ,
stats - > name ,
bch2_data_types [ stats - > data_type ] ,
bch2_btree_ids [ stats - > btree_id ] ) ;
bch2_bpos_to_text ( out , stats - > pos ) ;
pr_buf ( out , " %s " , " \n " ) ;
}
2021-07-23 22:57:19 +03:00
mutex_unlock ( & c - > data_progress_lock ) ;
return ret ;
}
2020-07-26 00:06:11 +03:00
static int bch2_compression_stats_to_text ( struct printbuf * out , struct bch_fs * c )
2017-03-17 09:18:50 +03:00
{
2019-03-25 22:10:15 +03:00
struct btree_trans trans ;
2021-08-30 22:18:31 +03:00
struct btree_iter iter ;
2017-03-17 09:18:50 +03:00
struct bkey_s_c k ;
2021-12-28 03:58:12 +03:00
enum btree_id id ;
u64 nr_uncompressed_extents = 0 ,
2017-03-17 09:18:50 +03:00
nr_compressed_extents = 0 ,
2021-12-28 03:58:12 +03:00
nr_incompressible_extents = 0 ,
uncompressed_sectors = 0 ,
incompressible_sectors = 0 ,
2017-03-17 09:18:50 +03:00
compressed_sectors_compressed = 0 ,
compressed_sectors_uncompressed = 0 ;
2019-04-17 22:49:28 +03:00
int ret ;
2017-03-17 09:18:50 +03:00
2019-03-22 05:19:57 +03:00
if ( ! test_bit ( BCH_FS_STARTED , & c - > flags ) )
2017-03-17 09:18:50 +03:00
return - EPERM ;
2019-05-15 17:54:43 +03:00
bch2_trans_init ( & trans , c , 0 , 0 ) ;
2019-03-25 22:10:15 +03:00
2021-12-28 03:58:12 +03:00
for ( id = 0 ; id < BTREE_ID_NR ; id + + ) {
if ( ! ( ( 1U < < id ) & BTREE_ID_HAS_PTRS ) )
continue ;
for_each_btree_key ( & trans , iter , id , POS_MIN ,
BTREE_ITER_ALL_SNAPSHOTS , k , ret ) {
struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c ( k ) ;
2018-09-28 04:08:39 +03:00
const union bch_extent_entry * entry ;
struct extent_ptr_decoded p ;
2021-12-28 03:58:12 +03:00
bool compressed = false , uncompressed = false , incompressible = false ;
bkey_for_each_ptr_decode ( k . k , ptrs , p , entry ) {
switch ( p . crc . compression_type ) {
case BCH_COMPRESSION_TYPE_none :
uncompressed = true ;
uncompressed_sectors + = k . k - > size ;
break ;
case BCH_COMPRESSION_TYPE_incompressible :
incompressible = true ;
incompressible_sectors + = k . k - > size ;
break ;
default :
2017-03-17 09:18:50 +03:00
compressed_sectors_compressed + =
2018-09-28 04:08:39 +03:00
p . crc . compressed_size ;
2017-03-17 09:18:50 +03:00
compressed_sectors_uncompressed + =
2018-09-28 04:08:39 +03:00
p . crc . uncompressed_size ;
2021-12-28 03:58:12 +03:00
compressed = true ;
break ;
2017-03-17 09:18:50 +03:00
}
}
2021-12-28 03:58:12 +03:00
if ( incompressible )
nr_incompressible_extents + + ;
else if ( uncompressed )
nr_uncompressed_extents + + ;
else if ( compressed )
nr_compressed_extents + + ;
2017-03-17 09:18:50 +03:00
}
2021-12-28 03:58:12 +03:00
bch2_trans_iter_exit ( & trans , & iter ) ;
}
2019-04-17 22:49:28 +03:00
2021-10-19 22:08:00 +03:00
bch2_trans_exit ( & trans ) ;
2021-12-28 03:58:12 +03:00
2019-04-17 22:49:28 +03:00
if ( ret )
return ret ;
2017-03-17 09:18:50 +03:00
2021-12-28 03:58:12 +03:00
pr_buf ( out , " uncompressed: \n " ) ;
pr_buf ( out , " nr extents: %llu \n " , nr_uncompressed_extents ) ;
pr_buf ( out , " size: " ) ;
bch2_hprint ( out , uncompressed_sectors < < 9 ) ;
pr_buf ( out , " \n " ) ;
pr_buf ( out , " compressed: \n " ) ;
pr_buf ( out , " nr extents: %llu \n " , nr_compressed_extents ) ;
pr_buf ( out , " compressed size: " ) ;
bch2_hprint ( out , compressed_sectors_compressed < < 9 ) ;
pr_buf ( out , " \n " ) ;
pr_buf ( out , " uncompressed size: " ) ;
bch2_hprint ( out , compressed_sectors_uncompressed < < 9 ) ;
pr_buf ( out , " \n " ) ;
pr_buf ( out , " incompressible: \n " ) ;
pr_buf ( out , " nr extents: %llu \n " , nr_incompressible_extents ) ;
pr_buf ( out , " size: " ) ;
bch2_hprint ( out , incompressible_sectors < < 9 ) ;
pr_buf ( out , " \n " ) ;
2020-07-26 00:06:11 +03:00
return 0 ;
2018-11-01 22:13:19 +03:00
}
2021-05-24 00:04:13 +03:00
static void bch2_gc_gens_pos_to_text ( struct printbuf * out , struct bch_fs * c )
2021-04-13 22:00:40 +03:00
{
pr_buf ( out , " %s: " , bch2_btree_ids [ c - > gc_gens_btree ] ) ;
bch2_bpos_to_text ( out , c - > gc_gens_pos ) ;
pr_buf ( out , " \n " ) ;
}
2017-03-17 09:18:50 +03:00
SHOW ( bch2_fs )
{
struct bch_fs * c = container_of ( kobj , struct bch_fs , kobj ) ;
sysfs_print ( minor , c - > minor ) ;
sysfs_printf ( internal_uuid , " %pU " , c - > sb . uuid . b ) ;
sysfs_hprint ( btree_cache_size , bch2_btree_cache_size ( c ) ) ;
2021-04-01 04:07:37 +03:00
sysfs_hprint ( btree_avg_write_size , bch2_btree_avg_write_size ( c ) ) ;
2017-03-17 09:18:50 +03:00
sysfs_print ( read_realloc_races ,
atomic_long_read ( & c - > read_realloc_races ) ) ;
sysfs_print ( extent_migrate_done ,
atomic_long_read ( & c - > extent_migrate_done ) ) ;
sysfs_print ( extent_migrate_raced ,
atomic_long_read ( & c - > extent_migrate_raced ) ) ;
2022-01-10 04:48:31 +03:00
sysfs_print ( bucket_alloc_fail ,
atomic_long_read ( & c - > bucket_alloc_fail ) ) ;
2017-03-17 09:18:50 +03:00
sysfs_printf ( btree_gc_periodic , " %u " , ( int ) c - > btree_gc_periodic ) ;
2022-02-25 21:18:19 +03:00
if ( attr = = & sysfs_gc_gens_pos )
bch2_gc_gens_pos_to_text ( out , c ) ;
2021-04-13 22:00:40 +03:00
2017-03-17 09:18:50 +03:00
sysfs_printf ( copy_gc_enabled , " %i " , c - > copy_gc_enabled ) ;
sysfs_printf ( rebalance_enabled , " %i " , c - > rebalance . enabled ) ;
sysfs_pd_controller_show ( rebalance , & c - > rebalance . pd ) ; /* XXX */
2021-04-13 21:45:55 +03:00
sysfs_hprint ( copy_gc_wait ,
max ( 0LL , c - > copygc_wait -
atomic64_read ( & c - > io_clock [ WRITE ] . now ) ) < < 9 ) ;
2017-03-17 09:18:50 +03:00
2022-02-25 21:18:19 +03:00
if ( attr = = & sysfs_rebalance_work )
bch2_rebalance_work_to_text ( out , c ) ;
2017-03-17 09:18:50 +03:00
sysfs_print ( promote_whole_extents , c - > promote_whole_extents ) ;
/* Debugging: */
2022-02-25 21:18:19 +03:00
if ( attr = = & sysfs_journal_debug )
bch2_journal_debug_to_text ( out , & c - > journal ) ;
2017-03-17 09:18:50 +03:00
2022-02-25 21:18:19 +03:00
if ( attr = = & sysfs_btree_updates )
bch2_btree_updates_to_text ( out , c ) ;
2017-03-17 09:18:50 +03:00
2022-02-25 21:18:19 +03:00
if ( attr = = & sysfs_btree_cache )
bch2_btree_cache_to_text ( out , c ) ;
2020-11-20 04:13:30 +03:00
2022-02-25 21:18:19 +03:00
if ( attr = = & sysfs_btree_key_cache )
bch2_btree_key_cache_to_text ( out , & c - > btree_key_cache ) ;
2020-06-16 02:53:46 +03:00
2022-02-25 21:18:19 +03:00
if ( attr = = & sysfs_btree_transactions )
bch2_btree_trans_to_text ( out , c ) ;
2017-03-17 09:18:50 +03:00
2022-02-25 21:18:19 +03:00
if ( attr = = & sysfs_stripes_heap )
bch2_stripes_heap_to_text ( out , c ) ;
2020-07-07 03:18:13 +03:00
2022-02-25 21:18:19 +03:00
if ( attr = = & sysfs_open_buckets )
bch2_open_buckets_to_text ( out , c ) ;
2021-07-13 06:52:49 +03:00
2022-10-31 23:13:05 +03:00
if ( attr = = & sysfs_write_points )
bch2_write_points_to_text ( out , c ) ;
2022-02-25 21:18:19 +03:00
if ( attr = = & sysfs_compression_stats )
bch2_compression_stats_to_text ( out , c ) ;
2017-03-17 09:18:50 +03:00
2022-02-25 21:18:19 +03:00
if ( attr = = & sysfs_new_stripes )
bch2_new_stripes_to_text ( out , c ) ;
2018-11-01 22:13:19 +03:00
2022-02-25 21:18:19 +03:00
if ( attr = = & sysfs_io_timers_read )
bch2_io_timers_to_text ( out , & c - > io_clock [ READ ] ) ;
2019-12-19 23:07:51 +03:00
2022-02-25 21:18:19 +03:00
if ( attr = = & sysfs_io_timers_write )
bch2_io_timers_to_text ( out , & c - > io_clock [ WRITE ] ) ;
if ( attr = = & sysfs_data_jobs )
data_progress_to_text ( out , c ) ;
2021-07-23 22:57:19 +03:00
2017-03-17 09:18:50 +03:00
return 0 ;
}
2020-06-15 21:58:47 +03:00
STORE ( bch2_fs )
2017-03-17 09:18:50 +03:00
{
struct bch_fs * c = container_of ( kobj , struct bch_fs , kobj ) ;
if ( attr = = & sysfs_btree_gc_periodic ) {
ssize_t ret = strtoul_safe ( buf , c - > btree_gc_periodic )
? : ( ssize_t ) size ;
wake_up_process ( c - > gc_thread ) ;
return ret ;
}
if ( attr = = & sysfs_copy_gc_enabled ) {
ssize_t ret = strtoul_safe ( buf , c - > copy_gc_enabled )
? : ( ssize_t ) size ;
2020-07-11 23:28:54 +03:00
if ( c - > copygc_thread )
wake_up_process ( c - > copygc_thread ) ;
2017-03-17 09:18:50 +03:00
return ret ;
}
if ( attr = = & sysfs_rebalance_enabled ) {
ssize_t ret = strtoul_safe ( buf , c - > rebalance . enabled )
? : ( ssize_t ) size ;
rebalance_wakeup ( c ) ;
return ret ;
}
sysfs_pd_controller_store ( rebalance , & c - > rebalance . pd ) ;
sysfs_strtoul ( promote_whole_extents , c - > promote_whole_extents ) ;
/* Debugging: */
2019-03-22 05:19:57 +03:00
if ( ! test_bit ( BCH_FS_STARTED , & c - > flags ) )
2017-03-17 09:18:50 +03:00
return - EPERM ;
/* Debugging: */
2021-12-24 12:51:10 +03:00
if ( ! test_bit ( BCH_FS_RW , & c - > flags ) )
return - EROFS ;
if ( attr = = & sysfs_prune_cache ) {
struct shrink_control sc ;
sc . gfp_mask = GFP_KERNEL ;
sc . nr_to_scan = strtoul_or_return ( buf ) ;
c - > btree_cache . shrink . scan_objects ( & c - > btree_cache . shrink , & sc ) ;
}
2020-06-15 21:58:47 +03:00
if ( attr = = & sysfs_trigger_gc ) {
2020-06-15 22:10:54 +03:00
/*
* Full gc is currently incompatible with btree key cache :
*/
#if 0
2020-06-15 21:58:47 +03:00
down_read ( & c - > state_lock ) ;
2021-01-27 04:15:46 +03:00
bch2_gc ( c , false , false ) ;
2020-06-15 21:58:47 +03:00
up_read ( & c - > state_lock ) ;
2020-06-15 22:10:54 +03:00
# else
bch2_gc_gens ( c ) ;
# endif
2020-06-15 21:58:47 +03:00
}
2017-03-17 09:18:50 +03:00
# ifdef CONFIG_BCACHEFS_TESTS
if ( attr = = & sysfs_perf_test ) {
char * tmp = kstrdup ( buf , GFP_KERNEL ) , * p = tmp ;
char * test = strsep ( & p , " \t \n " ) ;
char * nr_str = strsep ( & p , " \t \n " ) ;
char * threads_str = strsep ( & p , " \t \n " ) ;
unsigned threads ;
u64 nr ;
int ret = - EINVAL ;
if ( threads_str & &
! ( ret = kstrtouint ( threads_str , 10 , & threads ) ) & &
! ( ret = bch2_strtoull_h ( nr_str , & nr ) ) )
2020-12-01 20:23:55 +03:00
ret = bch2_btree_perf_test ( c , test , nr , threads ) ;
2017-03-17 09:18:50 +03:00
kfree ( tmp ) ;
2020-12-01 20:23:55 +03:00
if ( ret )
size = ret ;
2017-03-17 09:18:50 +03:00
}
# endif
return size ;
}
SYSFS_OPS ( bch2_fs ) ;
struct attribute * bch2_fs_files [ ] = {
& sysfs_minor ,
& sysfs_btree_cache_size ,
2021-04-01 04:07:37 +03:00
& sysfs_btree_avg_write_size ,
2017-03-17 09:18:50 +03:00
& sysfs_promote_whole_extents ,
& sysfs_compression_stats ,
# ifdef CONFIG_BCACHEFS_TESTS
& sysfs_perf_test ,
# endif
NULL
} ;
/* internal dir - just a wrapper */
SHOW ( bch2_fs_internal )
{
struct bch_fs * c = container_of ( kobj , struct bch_fs , internal ) ;
2022-02-25 21:18:19 +03:00
return bch2_fs_to_text ( out , & c - > kobj , attr ) ;
2017-03-17 09:18:50 +03:00
}
STORE ( bch2_fs_internal )
{
struct bch_fs * c = container_of ( kobj , struct bch_fs , internal ) ;
return bch2_fs_store ( & c - > kobj , attr , buf , size ) ;
}
SYSFS_OPS ( bch2_fs_internal ) ;
struct attribute * bch2_fs_internal_files [ ] = {
& sysfs_journal_debug ,
& sysfs_btree_updates ,
2020-11-20 04:13:30 +03:00
& sysfs_btree_cache ,
2020-06-16 02:53:46 +03:00
& sysfs_btree_key_cache ,
2020-06-02 23:36:11 +03:00
& sysfs_btree_transactions ,
2021-12-14 22:24:04 +03:00
& sysfs_new_stripes ,
2020-07-07 03:18:13 +03:00
& sysfs_stripes_heap ,
2021-07-13 06:52:49 +03:00
& sysfs_open_buckets ,
2022-10-31 23:13:05 +03:00
& sysfs_write_points ,
2021-12-14 22:24:04 +03:00
& sysfs_io_timers_read ,
& sysfs_io_timers_write ,
& sysfs_trigger_gc ,
& sysfs_prune_cache ,
2017-03-17 09:18:50 +03:00
& sysfs_read_realloc_races ,
& sysfs_extent_migrate_done ,
& sysfs_extent_migrate_raced ,
2022-01-10 04:48:31 +03:00
& sysfs_bucket_alloc_fail ,
2017-03-17 09:18:50 +03:00
2021-04-13 22:00:40 +03:00
& sysfs_gc_gens_pos ,
2017-03-17 09:18:50 +03:00
& sysfs_copy_gc_enabled ,
2021-04-13 21:45:55 +03:00
& sysfs_copy_gc_wait ,
2017-03-17 09:18:50 +03:00
& sysfs_rebalance_enabled ,
& sysfs_rebalance_work ,
sysfs_pd_controller_files ( rebalance ) ,
2022-01-07 05:38:08 +03:00
& sysfs_data_jobs ,
2021-07-23 22:57:19 +03:00
2017-03-17 09:18:50 +03:00
& sysfs_internal_uuid ,
NULL
} ;
/* options */
SHOW ( bch2_fs_opts_dir )
{
struct bch_fs * c = container_of ( kobj , struct bch_fs , opts_dir ) ;
const struct bch_option * opt = container_of ( attr , struct bch_option , attr ) ;
int id = opt - bch2_opt_table ;
u64 v = bch2_opt_get_by_id ( & c - > opts , id ) ;
2022-03-05 20:01:16 +03:00
bch2_opt_to_text ( out , c , c - > disk_sb . sb , opt , v , OPT_SHOW_FULL_LIST ) ;
2022-02-25 21:18:19 +03:00
pr_char ( out , ' \n ' ) ;
2017-03-17 09:18:50 +03:00
2022-02-25 21:18:19 +03:00
return 0 ;
2017-03-17 09:18:50 +03:00
}
STORE ( bch2_fs_opts_dir )
{
struct bch_fs * c = container_of ( kobj , struct bch_fs , opts_dir ) ;
const struct bch_option * opt = container_of ( attr , struct bch_option , attr ) ;
int ret , id = opt - bch2_opt_table ;
char * tmp ;
u64 v ;
2022-03-06 23:15:41 +03:00
/*
* We don ' t need to take c - > writes for correctness , but it eliminates an
* unsightly error message in the dmesg log when we ' re RO :
*/
if ( unlikely ( ! percpu_ref_tryget ( & c - > writes ) ) )
return - EROFS ;
2017-03-17 09:18:50 +03:00
tmp = kstrdup ( buf , GFP_KERNEL ) ;
2022-03-06 23:15:41 +03:00
if ( ! tmp ) {
ret = - ENOMEM ;
goto err ;
}
2017-03-17 09:18:50 +03:00
2022-03-21 07:15:38 +03:00
ret = bch2_opt_parse ( c , opt , strim ( tmp ) , & v , NULL ) ;
2017-03-17 09:18:50 +03:00
kfree ( tmp ) ;
if ( ret < 0 )
2022-03-06 23:15:41 +03:00
goto err ;
2017-03-17 09:18:50 +03:00
2018-11-13 02:30:55 +03:00
ret = bch2_opt_check_may_set ( c , id , v ) ;
if ( ret < 0 )
2022-03-06 23:15:41 +03:00
goto err ;
2017-03-17 09:18:50 +03:00
2021-12-14 22:24:41 +03:00
bch2_opt_set_sb ( c , opt , v ) ;
2017-03-17 09:18:50 +03:00
bch2_opt_set_by_id ( & c - > opts , id , v ) ;
if ( ( id = = Opt_background_target | |
id = = Opt_background_compression ) & & v ) {
bch2_rebalance_add_work ( c , S64_MAX ) ;
rebalance_wakeup ( c ) ;
}
2022-03-06 23:15:41 +03:00
ret = size ;
err :
percpu_ref_put ( & c - > writes ) ;
return ret ;
2017-03-17 09:18:50 +03:00
}
SYSFS_OPS ( bch2_fs_opts_dir ) ;
struct attribute * bch2_fs_opts_dir_files [ ] = { NULL } ;
int bch2_opts_create_sysfs_files ( struct kobject * kobj )
{
const struct bch_option * i ;
int ret ;
for ( i = bch2_opt_table ;
i < bch2_opt_table + bch2_opts_nr ;
i + + ) {
2021-12-14 22:24:41 +03:00
if ( ! ( i - > flags & OPT_FS ) )
2017-03-17 09:18:50 +03:00
continue ;
ret = sysfs_create_file ( kobj , & i - > attr ) ;
if ( ret )
return ret ;
}
return 0 ;
}
/* time stats */
SHOW ( bch2_fs_time_stats )
{
struct bch_fs * c = container_of ( kobj , struct bch_fs , time_stats ) ;
2020-07-26 00:06:11 +03:00
# define x(name) \
2022-02-25 21:18:19 +03:00
if ( attr = = & sysfs_time_stat_ # # name ) \
bch2_time_stats_to_text ( out , & c - > times [ BCH_TIME_ # # name ] ) ;
2017-03-17 09:18:50 +03:00
BCH_TIME_STATS ( )
# undef x
return 0 ;
}
STORE ( bch2_fs_time_stats )
{
return size ;
}
SYSFS_OPS ( bch2_fs_time_stats ) ;
struct attribute * bch2_fs_time_stats_files [ ] = {
# define x(name) \
& sysfs_time_stat_ # # name ,
BCH_TIME_STATS ( )
# undef x
NULL
} ;
2020-07-26 00:06:11 +03:00
static void dev_alloc_debug_to_text ( struct printbuf * out , struct bch_dev * ca )
2017-03-17 09:18:50 +03:00
{
struct bch_fs * c = ca - > fs ;
2020-07-22 20:27:00 +03:00
struct bch_dev_usage stats = bch2_dev_usage_read ( ca ) ;
2019-01-19 21:13:29 +03:00
unsigned i , nr [ BCH_DATA_NR ] ;
memset ( nr , 0 , sizeof ( nr ) ) ;
for ( i = 0 ; i < ARRAY_SIZE ( c - > open_buckets ) ; i + + )
2021-12-26 05:21:46 +03:00
nr [ c - > open_buckets [ i ] . data_type ] + + ;
2017-03-17 09:18:50 +03:00
2020-07-26 00:06:11 +03:00
pr_buf ( out ,
2021-01-22 04:51:51 +03:00
" \t \t buckets \t sectors fragmented \n "
" capacity%16llu \n " ,
ca - > mi . nbuckets - ca - > mi . first_bucket ) ;
for ( i = 1 ; i < BCH_DATA_NR ; i + + )
pr_buf ( out , " %-8s%16llu%16llu%16llu \n " ,
bch2_data_types [ i ] , stats . d [ i ] . buckets ,
stats . d [ i ] . sectors , stats . d [ i ] . fragmented ) ;
pr_buf ( out ,
" ec \t %16llu \n "
" available%15llu \n "
" \n "
" freelist_wait \t \t %s \n "
2021-04-13 16:49:23 +03:00
" open buckets allocated \t %u \n "
" open buckets this dev \t %u \n "
" open buckets total \t %u \n "
2021-01-22 04:51:51 +03:00
" open_buckets_wait \t %s \n "
" open_buckets_btree \t %u \n "
" open_buckets_user \t %u \n "
2022-01-10 04:48:31 +03:00
" btree reserve cache \t %u \n " ,
2021-01-22 04:51:51 +03:00
stats . buckets_ec ,
2022-01-10 04:48:31 +03:00
__dev_buckets_available ( ca , stats , RESERVE_none ) ,
2021-01-22 04:51:51 +03:00
c - > freelist_wait . list . first ? " waiting " : " empty " ,
2021-04-13 16:49:23 +03:00
OPEN_BUCKETS_COUNT - c - > open_buckets_nr_free ,
ca - > nr_open_buckets ,
OPEN_BUCKETS_COUNT ,
2021-01-22 04:51:51 +03:00
c - > open_buckets_wait . list . first ? " waiting " : " empty " ,
nr [ BCH_DATA_btree ] ,
nr [ BCH_DATA_user ] ,
2022-01-10 04:48:31 +03:00
c - > btree_reserve_cache_nr ) ;
2017-03-17 09:18:50 +03:00
}
static const char * const bch2_rw [ ] = {
" read " ,
" write " ,
NULL
} ;
2020-07-26 00:06:11 +03:00
static void dev_iodone_to_text ( struct printbuf * out , struct bch_dev * ca )
2017-03-17 09:18:50 +03:00
{
2019-02-06 19:42:13 +03:00
int rw , i ;
2017-03-17 09:18:50 +03:00
for ( rw = 0 ; rw < 2 ; rw + + ) {
2020-07-26 00:06:11 +03:00
pr_buf ( out , " %s: \n " , bch2_rw [ rw ] ) ;
2017-03-17 09:18:50 +03:00
2019-02-06 19:42:13 +03:00
for ( i = 1 ; i < BCH_DATA_NR ; i + + )
2020-07-26 00:06:11 +03:00
pr_buf ( out , " %-12s:%12llu \n " ,
2019-02-06 19:42:13 +03:00
bch2_data_types [ i ] ,
percpu_u64_get ( & ca - > io_done - > sectors [ rw ] [ i ] ) < < 9 ) ;
2017-03-17 09:18:50 +03:00
}
}
SHOW ( bch2_dev )
{
struct bch_dev * ca = container_of ( kobj , struct bch_dev , kobj ) ;
struct bch_fs * c = ca - > fs ;
sysfs_printf ( uuid , " %pU \n " , ca - > uuid . b ) ;
sysfs_print ( bucket_size , bucket_bytes ( ca ) ) ;
sysfs_print ( first_bucket , ca - > mi . first_bucket ) ;
sysfs_print ( nbuckets , ca - > mi . nbuckets ) ;
sysfs_print ( durability , ca - > mi . durability ) ;
sysfs_print ( discard , ca - > mi . discard ) ;
if ( attr = = & sysfs_label ) {
if ( ca - > mi . group ) {
mutex_lock ( & c - > sb_lock ) ;
2022-02-25 21:18:19 +03:00
bch2_disk_path_to_text ( out , c - > disk_sb . sb ,
2018-11-09 09:24:07 +03:00
ca - > mi . group - 1 ) ;
2017-03-17 09:18:50 +03:00
mutex_unlock ( & c - > sb_lock ) ;
}
2022-02-25 21:18:19 +03:00
pr_char ( out , ' \n ' ) ;
2017-03-17 09:18:50 +03:00
}
if ( attr = = & sysfs_has_data ) {
2022-02-25 21:18:19 +03:00
bch2_flags_to_text ( out , bch2_data_types ,
2018-11-09 09:24:07 +03:00
bch2_dev_has_data ( c , ca ) ) ;
2022-02-25 21:18:19 +03:00
pr_char ( out , ' \n ' ) ;
2017-03-17 09:18:50 +03:00
}
if ( attr = = & sysfs_state_rw ) {
2022-02-25 21:18:19 +03:00
bch2_string_opt_to_text ( out , bch2_member_states ,
2018-11-09 09:24:07 +03:00
ca - > mi . state ) ;
2022-02-25 21:18:19 +03:00
pr_char ( out , ' \n ' ) ;
2017-03-17 09:18:50 +03:00
}
2022-02-25 21:18:19 +03:00
if ( attr = = & sysfs_iodone )
dev_iodone_to_text ( out , ca ) ;
2017-03-17 09:18:50 +03:00
sysfs_print ( io_latency_read , atomic64_read ( & ca - > cur_latency [ READ ] ) ) ;
sysfs_print ( io_latency_write , atomic64_read ( & ca - > cur_latency [ WRITE ] ) ) ;
2022-02-25 21:18:19 +03:00
if ( attr = = & sysfs_io_latency_stats_read )
bch2_time_stats_to_text ( out , & ca - > io_latency [ READ ] ) ;
if ( attr = = & sysfs_io_latency_stats_write )
bch2_time_stats_to_text ( out , & ca - > io_latency [ WRITE ] ) ;
2017-03-17 09:18:50 +03:00
sysfs_printf ( congested , " %u%% " ,
clamp ( atomic_read ( & ca - > congested ) , 0 , CONGESTED_MAX )
* 100 / CONGESTED_MAX ) ;
2022-02-25 21:18:19 +03:00
if ( attr = = & sysfs_alloc_debug )
dev_alloc_debug_to_text ( out , ca ) ;
2017-03-17 09:18:50 +03:00
return 0 ;
}
STORE ( bch2_dev )
{
struct bch_dev * ca = container_of ( kobj , struct bch_dev , kobj ) ;
struct bch_fs * c = ca - > fs ;
struct bch_member * mi ;
if ( attr = = & sysfs_discard ) {
bool v = strtoul_or_return ( buf ) ;
mutex_lock ( & c - > sb_lock ) ;
mi = & bch2_sb_get_members ( c - > disk_sb . sb ) - > members [ ca - > dev_idx ] ;
if ( v ! = BCH_MEMBER_DISCARD ( mi ) ) {
SET_BCH_MEMBER_DISCARD ( mi , v ) ;
bch2_write_super ( c ) ;
}
mutex_unlock ( & c - > sb_lock ) ;
}
if ( attr = = & sysfs_label ) {
char * tmp ;
int ret ;
tmp = kstrdup ( buf , GFP_KERNEL ) ;
if ( ! tmp )
return - ENOMEM ;
ret = bch2_dev_group_set ( c , ca , strim ( tmp ) ) ;
kfree ( tmp ) ;
if ( ret )
return ret ;
}
return size ;
}
SYSFS_OPS ( bch2_dev ) ;
struct attribute * bch2_dev_files [ ] = {
& sysfs_uuid ,
& sysfs_bucket_size ,
& sysfs_first_bucket ,
& sysfs_nbuckets ,
& sysfs_durability ,
/* settings: */
& sysfs_discard ,
& sysfs_state_rw ,
& sysfs_label ,
& sysfs_has_data ,
& sysfs_iodone ,
& sysfs_io_latency_read ,
& sysfs_io_latency_write ,
& sysfs_io_latency_stats_read ,
& sysfs_io_latency_stats_write ,
& sysfs_congested ,
/* debug: */
& sysfs_alloc_debug ,
NULL
} ;
# endif /* _BCACHEFS_SYSFS_H_ */