2018-04-03 20:23:33 +03:00
// SPDX-License-Identifier: GPL-2.0
2007-06-12 17:07:21 +04:00
/*
* Copyright ( C ) 2007 Oracle . All rights reserved .
*/
2007-08-29 23:47:34 +04:00
# include <linux/sched.h>
2019-08-01 18:49:55 +03:00
# include <linux/sched/mm.h>
2007-08-29 23:47:34 +04:00
# include <linux/slab.h>
# include <linux/spinlock.h>
# include <linux/completion.h>
2013-11-01 21:07:00 +04:00
# include <linux/bug.h>
2019-10-07 12:11:04 +03:00
# include <crypto/hash.h>
2007-08-29 23:47:34 +04:00
2007-04-05 05:22:22 +04:00
# include "ctree.h"
2019-12-14 03:22:20 +03:00
# include "discard.h"
2007-04-05 05:22:22 +04:00
# include "disk-io.h"
2020-08-21 10:39:54 +03:00
# include "send.h"
2007-04-05 05:22:22 +04:00
# include "transaction.h"
2013-11-01 21:06:57 +04:00
# include "sysfs.h"
2013-11-01 21:07:05 +04:00
# include "volumes.h"
2019-06-18 23:09:16 +03:00
# include "space-info.h"
2019-06-20 22:37:44 +03:00
# include "block-group.h"
2020-06-28 08:07:15 +03:00
# include "qgroup.h"
2022-02-08 22:31:21 +03:00
# include "misc.h"
2013-11-01 21:06:57 +04:00
2021-08-10 16:55:59 +03:00
/*
* Structure name Path
* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
* btrfs_supported_static_feature_attrs / sys / fs / btrfs / features
* btrfs_supported_feature_attrs / sys / fs / btrfs / features and
* / sys / fs / btrfs / < uuid > / features
* btrfs_attrs / sys / fs / btrfs / < uuid >
* devid_attrs / sys / fs / btrfs / < uuid > / devinfo / < devid >
* allocation_attrs / sys / fs / btrfs / < uuid > / allocation
* qgroup_attrs / sys / fs / btrfs / < uuid > / qgroups / < level > _ < qgroupid >
* space_info_attrs / sys / fs / btrfs / < uuid > / allocation / < bg - type >
* raid_attrs / sys / fs / btrfs / < uuid > / allocation / < bg - type > / < bg - profile >
2022-07-25 22:15:15 +03:00
* discard_attrs / sys / fs / btrfs / < uuid > / discard
2021-08-10 16:55:59 +03:00
*
* When built with BTRFS_CONFIG_DEBUG :
*
* btrfs_debug_feature_attrs / sys / fs / btrfs / debug
* btrfs_debug_mount_attrs / sys / fs / btrfs / < uuid > / debug
*/
2019-08-02 13:52:48 +03:00
struct btrfs_feature_attr {
struct kobj_attribute kobj_attr ;
enum btrfs_feature_set feature_set ;
u64 feature_bit ;
} ;
/* For raid type sysfs entries */
struct raid_kobject {
u64 flags ;
struct kobject kobj ;
} ;
# define __INIT_KOBJ_ATTR(_name, _mode, _show, _store) \
{ \
. attr = { . name = __stringify ( _name ) , . mode = _mode } , \
. show = _show , \
. store = _store , \
}
2022-02-08 22:31:22 +03:00
# define BTRFS_ATTR_W(_prefix, _name, _store) \
static struct kobj_attribute btrfs_attr_ # # _prefix # # _ # # _name = \
__INIT_KOBJ_ATTR ( _name , 0200 , NULL , _store )
2019-08-02 13:52:48 +03:00
# define BTRFS_ATTR_RW(_prefix, _name, _show, _store) \
static struct kobj_attribute btrfs_attr_ # # _prefix # # _ # # _name = \
__INIT_KOBJ_ATTR ( _name , 0644 , _show , _store )
# define BTRFS_ATTR(_prefix, _name, _show) \
static struct kobj_attribute btrfs_attr_ # # _prefix # # _ # # _name = \
__INIT_KOBJ_ATTR ( _name , 0444 , _show , NULL )
# define BTRFS_ATTR_PTR(_prefix, _name) \
( & btrfs_attr_ # # _prefix # # _ # # _name . attr )
# define BTRFS_FEAT_ATTR(_name, _feature_set, _feature_prefix, _feature_bit) \
static struct btrfs_feature_attr btrfs_attr_features_ # # _name = { \
. kobj_attr = __INIT_KOBJ_ATTR ( _name , S_IRUGO , \
btrfs_feature_attr_show , \
btrfs_feature_attr_store ) , \
. feature_set = _feature_set , \
. feature_bit = _feature_prefix # # _ # # _feature_bit , \
}
# define BTRFS_FEAT_ATTR_PTR(_name) \
( & btrfs_attr_features_ # # _name . kobj_attr . attr )
# define BTRFS_FEAT_ATTR_COMPAT(name, feature) \
BTRFS_FEAT_ATTR ( name , FEAT_COMPAT , BTRFS_FEATURE_COMPAT , feature )
# define BTRFS_FEAT_ATTR_COMPAT_RO(name, feature) \
BTRFS_FEAT_ATTR ( name , FEAT_COMPAT_RO , BTRFS_FEATURE_COMPAT_RO , feature )
# define BTRFS_FEAT_ATTR_INCOMPAT(name, feature) \
BTRFS_FEAT_ATTR ( name , FEAT_INCOMPAT , BTRFS_FEATURE_INCOMPAT , feature )
2013-11-01 21:06:59 +04:00
static inline struct btrfs_fs_info * to_fs_info ( struct kobject * kobj ) ;
2015-03-10 01:38:29 +03:00
static inline struct btrfs_fs_devices * to_fs_devs ( struct kobject * kobj ) ;
2022-02-08 22:31:21 +03:00
static struct kobject * get_btrfs_kobj ( struct kobject * kobj ) ;
2013-11-01 21:06:58 +04:00
2019-08-02 14:07:38 +03:00
static struct btrfs_feature_attr * to_btrfs_feature_attr ( struct kobj_attribute * a )
{
return container_of ( a , struct btrfs_feature_attr , kobj_attr ) ;
}
static struct kobj_attribute * attr_to_btrfs_attr ( struct attribute * attr )
{
return container_of ( attr , struct kobj_attribute , attr ) ;
}
static struct btrfs_feature_attr * attr_to_btrfs_feature_attr (
struct attribute * attr )
{
return to_btrfs_feature_attr ( attr_to_btrfs_attr ( attr ) ) ;
}
2013-11-01 21:06:59 +04:00
static u64 get_features ( struct btrfs_fs_info * fs_info ,
enum btrfs_feature_set set )
2013-11-01 21:06:58 +04:00
{
2013-11-01 21:06:59 +04:00
struct btrfs_super_block * disk_super = fs_info - > super_copy ;
if ( set = = FEAT_COMPAT )
return btrfs_super_compat_flags ( disk_super ) ;
else if ( set = = FEAT_COMPAT_RO )
return btrfs_super_compat_ro_flags ( disk_super ) ;
else
return btrfs_super_incompat_flags ( disk_super ) ;
2013-11-01 21:06:58 +04:00
}
2013-11-01 21:07:01 +04:00
static void set_features ( struct btrfs_fs_info * fs_info ,
enum btrfs_feature_set set , u64 features )
{
struct btrfs_super_block * disk_super = fs_info - > super_copy ;
if ( set = = FEAT_COMPAT )
btrfs_set_super_compat_flags ( disk_super , features ) ;
else if ( set = = FEAT_COMPAT_RO )
btrfs_set_super_compat_ro_flags ( disk_super , features ) ;
else
btrfs_set_super_incompat_flags ( disk_super , features ) ;
}
static int can_modify_feature ( struct btrfs_feature_attr * fa )
{
int val = 0 ;
u64 set , clear ;
switch ( fa - > feature_set ) {
case FEAT_COMPAT :
set = BTRFS_FEATURE_COMPAT_SAFE_SET ;
clear = BTRFS_FEATURE_COMPAT_SAFE_CLEAR ;
break ;
case FEAT_COMPAT_RO :
set = BTRFS_FEATURE_COMPAT_RO_SAFE_SET ;
clear = BTRFS_FEATURE_COMPAT_RO_SAFE_CLEAR ;
break ;
case FEAT_INCOMPAT :
set = BTRFS_FEATURE_INCOMPAT_SAFE_SET ;
clear = BTRFS_FEATURE_INCOMPAT_SAFE_CLEAR ;
break ;
default :
2016-09-20 17:05:01 +03:00
pr_warn ( " btrfs: sysfs: unknown feature set %d \n " ,
2013-11-19 16:36:21 +04:00
fa - > feature_set ) ;
return 0 ;
2013-11-01 21:07:01 +04:00
}
if ( set & fa - > feature_bit )
val | = 1 ;
if ( clear & fa - > feature_bit )
val | = 2 ;
return val ;
}
2013-11-01 21:06:59 +04:00
static ssize_t btrfs_feature_attr_show ( struct kobject * kobj ,
struct kobj_attribute * a , char * buf )
2013-11-01 21:06:58 +04:00
{
2013-11-01 21:06:59 +04:00
int val = 0 ;
2013-11-01 21:06:58 +04:00
struct btrfs_fs_info * fs_info = to_fs_info ( kobj ) ;
2013-11-01 21:07:01 +04:00
struct btrfs_feature_attr * fa = to_btrfs_feature_attr ( a ) ;
2013-11-01 21:06:59 +04:00
if ( fs_info ) {
u64 features = get_features ( fs_info , fa - > feature_set ) ;
if ( features & fa - > feature_bit )
val = 1 ;
2013-11-01 21:07:01 +04:00
} else
val = can_modify_feature ( fa ) ;
2013-11-01 21:06:59 +04:00
2021-10-19 03:22:09 +03:00
return sysfs_emit ( buf , " %d \n " , val ) ;
2013-11-01 21:06:58 +04:00
}
2013-11-01 21:07:01 +04:00
static ssize_t btrfs_feature_attr_store ( struct kobject * kobj ,
struct kobj_attribute * a ,
const char * buf , size_t count )
{
struct btrfs_fs_info * fs_info ;
struct btrfs_feature_attr * fa = to_btrfs_feature_attr ( a ) ;
u64 features , set , clear ;
unsigned long val ;
int ret ;
fs_info = to_fs_info ( kobj ) ;
if ( ! fs_info )
return - EPERM ;
2017-07-17 10:45:34 +03:00
if ( sb_rdonly ( fs_info - > sb ) )
2015-01-23 20:43:31 +03:00
return - EROFS ;
2013-11-01 21:07:01 +04:00
ret = kstrtoul ( skip_spaces ( buf ) , 0 , & val ) ;
if ( ret )
return ret ;
if ( fa - > feature_set = = FEAT_COMPAT ) {
set = BTRFS_FEATURE_COMPAT_SAFE_SET ;
clear = BTRFS_FEATURE_COMPAT_SAFE_CLEAR ;
} else if ( fa - > feature_set = = FEAT_COMPAT_RO ) {
set = BTRFS_FEATURE_COMPAT_RO_SAFE_SET ;
clear = BTRFS_FEATURE_COMPAT_RO_SAFE_CLEAR ;
} else {
set = BTRFS_FEATURE_INCOMPAT_SAFE_SET ;
clear = BTRFS_FEATURE_INCOMPAT_SAFE_CLEAR ;
}
features = get_features ( fs_info , fa - > feature_set ) ;
/* Nothing to do */
if ( ( val & & ( features & fa - > feature_bit ) ) | |
( ! val & & ! ( features & fa - > feature_bit ) ) )
return count ;
if ( ( val & & ! ( set & fa - > feature_bit ) ) | |
( ! val & & ! ( clear & fa - > feature_bit ) ) ) {
btrfs_info ( fs_info ,
" %sabling feature %s on mounted fs is not supported. " ,
val ? " En " : " Dis " , fa - > kobj_attr . attr . name ) ;
return - EPERM ;
}
btrfs_info ( fs_info , " %s %s feature flag " ,
val ? " Setting " : " Clearing " , fa - > kobj_attr . attr . name ) ;
spin_lock ( & fs_info - > super_lock ) ;
features = get_features ( fs_info , fa - > feature_set ) ;
if ( val )
features | = fa - > feature_bit ;
else
features & = ~ fa - > feature_bit ;
set_features ( fs_info , fa - > feature_set , features ) ;
spin_unlock ( & fs_info - > super_lock ) ;
2014-11-12 16:22:21 +03:00
/*
* We don ' t want to do full transaction commit from inside sysfs
*/
btrfs_set_pending ( fs_info , COMMIT ) ;
wake_up_process ( fs_info - > transaction_kthread ) ;
2013-11-01 21:07:01 +04:00
return count ;
}
2013-11-01 21:06:59 +04:00
static umode_t btrfs_feature_visible ( struct kobject * kobj ,
struct attribute * attr , int unused )
2013-11-01 21:06:57 +04:00
{
2013-11-01 21:06:59 +04:00
struct btrfs_fs_info * fs_info = to_fs_info ( kobj ) ;
umode_t mode = attr - > mode ;
if ( fs_info ) {
struct btrfs_feature_attr * fa ;
u64 features ;
fa = attr_to_btrfs_feature_attr ( attr ) ;
features = get_features ( fs_info , fa - > feature_set ) ;
2013-11-01 21:07:01 +04:00
if ( can_modify_feature ( fa ) )
mode | = S_IWUSR ;
else if ( ! ( features & fa - > feature_bit ) )
2013-11-01 21:06:59 +04:00
mode = 0 ;
}
return mode ;
2013-11-01 21:06:57 +04:00
}
BTRFS_FEAT_ATTR_INCOMPAT ( default_subvol , DEFAULT_SUBVOL ) ;
BTRFS_FEAT_ATTR_INCOMPAT ( mixed_groups , MIXED_GROUPS ) ;
BTRFS_FEAT_ATTR_INCOMPAT ( compress_lzo , COMPRESS_LZO ) ;
btrfs: Add zstd support
Add zstd compression and decompression support to BtrFS. zstd at its
fastest level compresses almost as well as zlib, while offering much
faster compression and decompression, approaching lzo speeds.
I benchmarked btrfs with zstd compression against no compression, lzo
compression, and zlib compression. I benchmarked two scenarios. Copying
a set of files to btrfs, and then reading the files. Copying a tarball
to btrfs, extracting it to btrfs, and then reading the extracted files.
After every operation, I call `sync` and include the sync time.
Between every pair of operations I unmount and remount the filesystem
to avoid caching. The benchmark files can be found in the upstream
zstd source repository under
`contrib/linux-kernel/{btrfs-benchmark.sh,btrfs-extract-benchmark.sh}`
[1] [2].
I ran the benchmarks on a Ubuntu 14.04 VM with 2 cores and 4 GiB of RAM.
The VM is running on a MacBook Pro with a 3.1 GHz Intel Core i7 processor,
16 GB of RAM, and a SSD.
The first compression benchmark is copying 10 copies of the unzipped
Silesia corpus [3] into a BtrFS filesystem mounted with
`-o compress-force=Method`. The decompression benchmark times how long
it takes to `tar` all 10 copies into `/dev/null`. The compression ratio is
measured by comparing the output of `df` and `du`. See the benchmark file
[1] for details. I benchmarked multiple zstd compression levels, although
the patch uses zstd level 1.
| Method | Ratio | Compression MB/s | Decompression speed |
|---------|-------|------------------|---------------------|
| None | 0.99 | 504 | 686 |
| lzo | 1.66 | 398 | 442 |
| zlib | 2.58 | 65 | 241 |
| zstd 1 | 2.57 | 260 | 383 |
| zstd 3 | 2.71 | 174 | 408 |
| zstd 6 | 2.87 | 70 | 398 |
| zstd 9 | 2.92 | 43 | 406 |
| zstd 12 | 2.93 | 21 | 408 |
| zstd 15 | 3.01 | 11 | 354 |
The next benchmark first copies `linux-4.11.6.tar` [4] to btrfs. Then it
measures the compression ratio, extracts the tar, and deletes the tar.
Then it measures the compression ratio again, and `tar`s the extracted
files into `/dev/null`. See the benchmark file [2] for details.
| Method | Tar Ratio | Extract Ratio | Copy (s) | Extract (s)| Read (s) |
|--------|-----------|---------------|----------|------------|----------|
| None | 0.97 | 0.78 | 0.981 | 5.501 | 8.807 |
| lzo | 2.06 | 1.38 | 1.631 | 8.458 | 8.585 |
| zlib | 3.40 | 1.86 | 7.750 | 21.544 | 11.744 |
| zstd 1 | 3.57 | 1.85 | 2.579 | 11.479 | 9.389 |
[1] https://github.com/facebook/zstd/blob/dev/contrib/linux-kernel/btrfs-benchmark.sh
[2] https://github.com/facebook/zstd/blob/dev/contrib/linux-kernel/btrfs-extract-benchmark.sh
[3] http://sun.aei.polsl.pl/~sdeor/index.php?page=silesia
[4] https://cdn.kernel.org/pub/linux/kernel/v4.x/linux-4.11.6.tar.xz
zstd source repository: https://github.com/facebook/zstd
Signed-off-by: Nick Terrell <terrelln@fb.com>
Signed-off-by: Chris Mason <clm@fb.com>
2017-08-10 05:39:02 +03:00
BTRFS_FEAT_ATTR_INCOMPAT ( compress_zstd , COMPRESS_ZSTD ) ;
2013-11-01 21:06:57 +04:00
BTRFS_FEAT_ATTR_INCOMPAT ( extended_iref , EXTENDED_IREF ) ;
BTRFS_FEAT_ATTR_INCOMPAT ( raid56 , RAID56 ) ;
BTRFS_FEAT_ATTR_INCOMPAT ( skinny_metadata , SKINNY_METADATA ) ;
2014-01-21 21:56:09 +04:00
BTRFS_FEAT_ATTR_INCOMPAT ( no_holes , NO_HOLES ) ;
2018-11-19 18:37:45 +03:00
BTRFS_FEAT_ATTR_INCOMPAT ( metadata_uuid , METADATA_UUID ) ;
2016-01-21 20:36:46 +03:00
BTRFS_FEAT_ATTR_COMPAT_RO ( free_space_tree , FREE_SPACE_TREE ) ;
2022-08-09 08:02:18 +03:00
BTRFS_FEAT_ATTR_COMPAT_RO ( block_group_tree , BLOCK_GROUP_TREE ) ;
2018-07-10 19:15:05 +03:00
BTRFS_FEAT_ATTR_INCOMPAT ( raid1c34 , RAID1C34 ) ;
2022-06-06 19:36:35 +03:00
# ifdef CONFIG_BLK_DEV_ZONED
2020-11-10 14:26:06 +03:00
BTRFS_FEAT_ATTR_INCOMPAT ( zoned , ZONED ) ;
2022-06-06 19:36:35 +03:00
# endif
# ifdef CONFIG_BTRFS_DEBUG
2021-12-15 23:39:58 +03:00
/* Remove once support for extent tree v2 is feature complete */
BTRFS_FEAT_ATTR_INCOMPAT ( extent_tree_v2 , EXTENT_TREE_V2 ) ;
2020-11-10 14:26:06 +03:00
# endif
2021-06-30 23:01:49 +03:00
# ifdef CONFIG_FS_VERITY
BTRFS_FEAT_ATTR_COMPAT_RO ( verity , VERITY ) ;
# endif
2013-11-01 21:06:57 +04:00
2021-08-10 16:55:59 +03:00
/*
* Features which depend on feature bits and may differ between each fs .
*
2022-05-25 17:27:25 +03:00
* / sys / fs / btrfs / features - all available features implemented by this version
2021-08-10 16:55:59 +03:00
* / sys / fs / btrfs / UUID / features - features of the fs which are enabled or
* can be changed on a mounted filesystem .
*/
2013-11-01 21:06:57 +04:00
static struct attribute * btrfs_supported_feature_attrs [ ] = {
BTRFS_FEAT_ATTR_PTR ( default_subvol ) ,
BTRFS_FEAT_ATTR_PTR ( mixed_groups ) ,
BTRFS_FEAT_ATTR_PTR ( compress_lzo ) ,
btrfs: Add zstd support
Add zstd compression and decompression support to BtrFS. zstd at its
fastest level compresses almost as well as zlib, while offering much
faster compression and decompression, approaching lzo speeds.
I benchmarked btrfs with zstd compression against no compression, lzo
compression, and zlib compression. I benchmarked two scenarios. Copying
a set of files to btrfs, and then reading the files. Copying a tarball
to btrfs, extracting it to btrfs, and then reading the extracted files.
After every operation, I call `sync` and include the sync time.
Between every pair of operations I unmount and remount the filesystem
to avoid caching. The benchmark files can be found in the upstream
zstd source repository under
`contrib/linux-kernel/{btrfs-benchmark.sh,btrfs-extract-benchmark.sh}`
[1] [2].
I ran the benchmarks on a Ubuntu 14.04 VM with 2 cores and 4 GiB of RAM.
The VM is running on a MacBook Pro with a 3.1 GHz Intel Core i7 processor,
16 GB of RAM, and a SSD.
The first compression benchmark is copying 10 copies of the unzipped
Silesia corpus [3] into a BtrFS filesystem mounted with
`-o compress-force=Method`. The decompression benchmark times how long
it takes to `tar` all 10 copies into `/dev/null`. The compression ratio is
measured by comparing the output of `df` and `du`. See the benchmark file
[1] for details. I benchmarked multiple zstd compression levels, although
the patch uses zstd level 1.
| Method | Ratio | Compression MB/s | Decompression speed |
|---------|-------|------------------|---------------------|
| None | 0.99 | 504 | 686 |
| lzo | 1.66 | 398 | 442 |
| zlib | 2.58 | 65 | 241 |
| zstd 1 | 2.57 | 260 | 383 |
| zstd 3 | 2.71 | 174 | 408 |
| zstd 6 | 2.87 | 70 | 398 |
| zstd 9 | 2.92 | 43 | 406 |
| zstd 12 | 2.93 | 21 | 408 |
| zstd 15 | 3.01 | 11 | 354 |
The next benchmark first copies `linux-4.11.6.tar` [4] to btrfs. Then it
measures the compression ratio, extracts the tar, and deletes the tar.
Then it measures the compression ratio again, and `tar`s the extracted
files into `/dev/null`. See the benchmark file [2] for details.
| Method | Tar Ratio | Extract Ratio | Copy (s) | Extract (s)| Read (s) |
|--------|-----------|---------------|----------|------------|----------|
| None | 0.97 | 0.78 | 0.981 | 5.501 | 8.807 |
| lzo | 2.06 | 1.38 | 1.631 | 8.458 | 8.585 |
| zlib | 3.40 | 1.86 | 7.750 | 21.544 | 11.744 |
| zstd 1 | 3.57 | 1.85 | 2.579 | 11.479 | 9.389 |
[1] https://github.com/facebook/zstd/blob/dev/contrib/linux-kernel/btrfs-benchmark.sh
[2] https://github.com/facebook/zstd/blob/dev/contrib/linux-kernel/btrfs-extract-benchmark.sh
[3] http://sun.aei.polsl.pl/~sdeor/index.php?page=silesia
[4] https://cdn.kernel.org/pub/linux/kernel/v4.x/linux-4.11.6.tar.xz
zstd source repository: https://github.com/facebook/zstd
Signed-off-by: Nick Terrell <terrelln@fb.com>
Signed-off-by: Chris Mason <clm@fb.com>
2017-08-10 05:39:02 +03:00
BTRFS_FEAT_ATTR_PTR ( compress_zstd ) ,
2013-11-01 21:06:57 +04:00
BTRFS_FEAT_ATTR_PTR ( extended_iref ) ,
BTRFS_FEAT_ATTR_PTR ( raid56 ) ,
BTRFS_FEAT_ATTR_PTR ( skinny_metadata ) ,
2014-01-21 21:56:09 +04:00
BTRFS_FEAT_ATTR_PTR ( no_holes ) ,
2018-11-19 18:37:45 +03:00
BTRFS_FEAT_ATTR_PTR ( metadata_uuid ) ,
2016-01-21 20:36:46 +03:00
BTRFS_FEAT_ATTR_PTR ( free_space_tree ) ,
2018-07-10 19:15:05 +03:00
BTRFS_FEAT_ATTR_PTR ( raid1c34 ) ,
2022-08-09 08:02:18 +03:00
BTRFS_FEAT_ATTR_PTR ( block_group_tree ) ,
2022-06-06 19:36:35 +03:00
# ifdef CONFIG_BLK_DEV_ZONED
2020-11-10 14:26:06 +03:00
BTRFS_FEAT_ATTR_PTR ( zoned ) ,
2022-06-06 19:36:35 +03:00
# endif
# ifdef CONFIG_BTRFS_DEBUG
2021-12-15 23:39:58 +03:00
BTRFS_FEAT_ATTR_PTR ( extent_tree_v2 ) ,
2021-06-30 23:01:49 +03:00
# endif
# ifdef CONFIG_FS_VERITY
BTRFS_FEAT_ATTR_PTR ( verity ) ,
2020-11-10 14:26:06 +03:00
# endif
2013-11-01 21:06:57 +04:00
NULL
} ;
static const struct attribute_group btrfs_feature_attr_group = {
. name = " features " ,
2013-11-01 21:06:59 +04:00
. is_visible = btrfs_feature_visible ,
2013-11-01 21:06:57 +04:00
. attrs = btrfs_supported_feature_attrs ,
} ;
2007-08-29 23:47:34 +04:00
2018-05-17 08:24:51 +03:00
static ssize_t rmdir_subvol_show ( struct kobject * kobj ,
struct kobj_attribute * ka , char * buf )
{
2021-10-19 03:22:09 +03:00
return sysfs_emit ( buf , " 0 \n " ) ;
2018-05-17 08:24:51 +03:00
}
BTRFS_ATTR ( static_feature , rmdir_subvol , rmdir_subvol_show ) ;
2019-10-07 12:11:03 +03:00
static ssize_t supported_checksums_show ( struct kobject * kobj ,
struct kobj_attribute * a , char * buf )
{
ssize_t ret = 0 ;
int i ;
for ( i = 0 ; i < btrfs_get_num_csums ( ) ; i + + ) {
/*
* This " trick " only works as long as ' enum btrfs_csum_type ' has
* no holes in it
*/
2021-10-19 03:22:09 +03:00
ret + = sysfs_emit_at ( buf , ret , " %s%s " , ( i = = 0 ? " " : " " ) ,
btrfs_super_csum_name ( i ) ) ;
2019-10-07 12:11:03 +03:00
}
2021-10-19 03:22:09 +03:00
ret + = sysfs_emit_at ( buf , ret , " \n " ) ;
2019-10-07 12:11:03 +03:00
return ret ;
}
BTRFS_ATTR ( static_feature , supported_checksums , supported_checksums_show ) ;
2020-08-21 10:39:54 +03:00
static ssize_t send_stream_version_show ( struct kobject * kobj ,
struct kobj_attribute * ka , char * buf )
{
2021-10-19 03:22:09 +03:00
return sysfs_emit ( buf , " %d \n " , BTRFS_SEND_STREAM_VERSION ) ;
2020-08-21 10:39:54 +03:00
}
BTRFS_ATTR ( static_feature , send_stream_version , send_stream_version_show ) ;
2020-10-16 18:29:15 +03:00
static const char * rescue_opts [ ] = {
" usebackuproot " ,
" nologreplay " ,
2020-10-16 18:29:18 +03:00
" ignorebadroots " ,
2020-10-16 18:29:19 +03:00
" ignoredatacsums " ,
2020-10-16 18:29:20 +03:00
" all " ,
2020-10-16 18:29:15 +03:00
} ;
static ssize_t supported_rescue_options_show ( struct kobject * kobj ,
struct kobj_attribute * a ,
char * buf )
{
ssize_t ret = 0 ;
int i ;
for ( i = 0 ; i < ARRAY_SIZE ( rescue_opts ) ; i + + )
2021-10-19 03:22:09 +03:00
ret + = sysfs_emit_at ( buf , ret , " %s%s " , ( i ? " " : " " ) , rescue_opts [ i ] ) ;
ret + = sysfs_emit_at ( buf , ret , " \n " ) ;
2020-10-16 18:29:15 +03:00
return ret ;
}
BTRFS_ATTR ( static_feature , supported_rescue_options ,
supported_rescue_options_show ) ;
2021-03-25 10:14:33 +03:00
static ssize_t supported_sectorsizes_show ( struct kobject * kobj ,
struct kobj_attribute * a ,
char * buf )
{
ssize_t ret = 0 ;
2022-01-13 08:22:10 +03:00
/* An artificial limit to only support 4K and PAGE_SIZE */
if ( PAGE_SIZE > SZ_4K )
2021-10-19 03:22:09 +03:00
ret + = sysfs_emit_at ( buf , ret , " %u " , SZ_4K ) ;
ret + = sysfs_emit_at ( buf , ret , " %lu \n " , PAGE_SIZE ) ;
2021-03-25 10:14:33 +03:00
return ret ;
}
BTRFS_ATTR ( static_feature , supported_sectorsizes ,
supported_sectorsizes_show ) ;
2021-08-10 16:55:59 +03:00
/*
* Features which only depend on kernel version .
*
* These are listed in / sys / fs / btrfs / features along with
* btrfs_supported_feature_attrs .
*/
2018-05-17 08:24:51 +03:00
static struct attribute * btrfs_supported_static_feature_attrs [ ] = {
BTRFS_ATTR_PTR ( static_feature , rmdir_subvol ) ,
2019-10-07 12:11:03 +03:00
BTRFS_ATTR_PTR ( static_feature , supported_checksums ) ,
2020-08-21 10:39:54 +03:00
BTRFS_ATTR_PTR ( static_feature , send_stream_version ) ,
2020-10-16 18:29:15 +03:00
BTRFS_ATTR_PTR ( static_feature , supported_rescue_options ) ,
2021-03-25 10:14:33 +03:00
BTRFS_ATTR_PTR ( static_feature , supported_sectorsizes ) ,
2018-05-17 08:24:51 +03:00
NULL
} ;
static const struct attribute_group btrfs_static_feature_attr_group = {
. name = " features " ,
. attrs = btrfs_supported_static_feature_attrs ,
} ;
2019-12-14 03:22:19 +03:00
/*
* Discard statistics and tunables
*/
2022-07-25 22:15:15 +03:00
# define discard_to_fs_info(_kobj) to_fs_info(get_btrfs_kobj(_kobj))
2019-12-14 03:22:20 +03:00
2019-12-14 03:22:21 +03:00
static ssize_t btrfs_discardable_bytes_show ( struct kobject * kobj ,
struct kobj_attribute * a ,
char * buf )
{
struct btrfs_fs_info * fs_info = discard_to_fs_info ( kobj ) ;
2021-10-19 03:22:09 +03:00
return sysfs_emit ( buf , " %lld \n " ,
2019-12-14 03:22:21 +03:00
atomic64_read ( & fs_info - > discard_ctl . discardable_bytes ) ) ;
}
BTRFS_ATTR ( discard , discardable_bytes , btrfs_discardable_bytes_show ) ;
2019-12-14 03:22:20 +03:00
static ssize_t btrfs_discardable_extents_show ( struct kobject * kobj ,
struct kobj_attribute * a ,
char * buf )
{
struct btrfs_fs_info * fs_info = discard_to_fs_info ( kobj ) ;
2021-10-19 03:22:09 +03:00
return sysfs_emit ( buf , " %d \n " ,
2019-12-14 03:22:20 +03:00
atomic_read ( & fs_info - > discard_ctl . discardable_extents ) ) ;
}
BTRFS_ATTR ( discard , discardable_extents , btrfs_discardable_extents_show ) ;
2020-01-03 00:26:41 +03:00
static ssize_t btrfs_discard_bitmap_bytes_show ( struct kobject * kobj ,
struct kobj_attribute * a ,
char * buf )
{
struct btrfs_fs_info * fs_info = discard_to_fs_info ( kobj ) ;
2021-10-19 03:22:09 +03:00
return sysfs_emit ( buf , " %llu \n " ,
fs_info - > discard_ctl . discard_bitmap_bytes ) ;
2020-01-03 00:26:41 +03:00
}
BTRFS_ATTR ( discard , discard_bitmap_bytes , btrfs_discard_bitmap_bytes_show ) ;
static ssize_t btrfs_discard_bytes_saved_show ( struct kobject * kobj ,
struct kobj_attribute * a ,
char * buf )
{
struct btrfs_fs_info * fs_info = discard_to_fs_info ( kobj ) ;
2021-10-19 03:22:09 +03:00
return sysfs_emit ( buf , " %lld \n " ,
2020-01-03 00:26:41 +03:00
atomic64_read ( & fs_info - > discard_ctl . discard_bytes_saved ) ) ;
}
BTRFS_ATTR ( discard , discard_bytes_saved , btrfs_discard_bytes_saved_show ) ;
static ssize_t btrfs_discard_extent_bytes_show ( struct kobject * kobj ,
struct kobj_attribute * a ,
char * buf )
{
struct btrfs_fs_info * fs_info = discard_to_fs_info ( kobj ) ;
2021-10-19 03:22:09 +03:00
return sysfs_emit ( buf , " %llu \n " ,
fs_info - > discard_ctl . discard_extent_bytes ) ;
2020-01-03 00:26:41 +03:00
}
BTRFS_ATTR ( discard , discard_extent_bytes , btrfs_discard_extent_bytes_show ) ;
2020-01-03 00:26:35 +03:00
static ssize_t btrfs_discard_iops_limit_show ( struct kobject * kobj ,
struct kobj_attribute * a ,
char * buf )
{
struct btrfs_fs_info * fs_info = discard_to_fs_info ( kobj ) ;
2021-10-19 03:22:09 +03:00
return sysfs_emit ( buf , " %u \n " ,
READ_ONCE ( fs_info - > discard_ctl . iops_limit ) ) ;
2020-01-03 00:26:35 +03:00
}
static ssize_t btrfs_discard_iops_limit_store ( struct kobject * kobj ,
struct kobj_attribute * a ,
const char * buf , size_t len )
{
struct btrfs_fs_info * fs_info = discard_to_fs_info ( kobj ) ;
struct btrfs_discard_ctl * discard_ctl = & fs_info - > discard_ctl ;
u32 iops_limit ;
int ret ;
ret = kstrtou32 ( buf , 10 , & iops_limit ) ;
if ( ret )
return - EINVAL ;
WRITE_ONCE ( discard_ctl - > iops_limit , iops_limit ) ;
2020-11-04 12:45:54 +03:00
btrfs_discard_calc_delay ( discard_ctl ) ;
btrfs_discard_schedule_work ( discard_ctl , true ) ;
2020-01-03 00:26:35 +03:00
return len ;
}
BTRFS_ATTR_RW ( discard , iops_limit , btrfs_discard_iops_limit_show ,
btrfs_discard_iops_limit_store ) ;
2020-01-03 00:26:36 +03:00
static ssize_t btrfs_discard_kbps_limit_show ( struct kobject * kobj ,
struct kobj_attribute * a ,
char * buf )
{
struct btrfs_fs_info * fs_info = discard_to_fs_info ( kobj ) ;
2021-10-19 03:22:09 +03:00
return sysfs_emit ( buf , " %u \n " ,
READ_ONCE ( fs_info - > discard_ctl . kbps_limit ) ) ;
2020-01-03 00:26:36 +03:00
}
static ssize_t btrfs_discard_kbps_limit_store ( struct kobject * kobj ,
struct kobj_attribute * a ,
const char * buf , size_t len )
{
struct btrfs_fs_info * fs_info = discard_to_fs_info ( kobj ) ;
struct btrfs_discard_ctl * discard_ctl = & fs_info - > discard_ctl ;
u32 kbps_limit ;
int ret ;
ret = kstrtou32 ( buf , 10 , & kbps_limit ) ;
if ( ret )
return - EINVAL ;
WRITE_ONCE ( discard_ctl - > kbps_limit , kbps_limit ) ;
2020-11-04 12:45:54 +03:00
btrfs_discard_schedule_work ( discard_ctl , true ) ;
2020-01-03 00:26:36 +03:00
return len ;
}
BTRFS_ATTR_RW ( discard , kbps_limit , btrfs_discard_kbps_limit_show ,
btrfs_discard_kbps_limit_store ) ;
2020-01-03 00:26:38 +03:00
static ssize_t btrfs_discard_max_discard_size_show ( struct kobject * kobj ,
struct kobj_attribute * a ,
char * buf )
{
struct btrfs_fs_info * fs_info = discard_to_fs_info ( kobj ) ;
2021-10-19 03:22:09 +03:00
return sysfs_emit ( buf , " %llu \n " ,
READ_ONCE ( fs_info - > discard_ctl . max_discard_size ) ) ;
2020-01-03 00:26:38 +03:00
}
static ssize_t btrfs_discard_max_discard_size_store ( struct kobject * kobj ,
struct kobj_attribute * a ,
const char * buf , size_t len )
{
struct btrfs_fs_info * fs_info = discard_to_fs_info ( kobj ) ;
struct btrfs_discard_ctl * discard_ctl = & fs_info - > discard_ctl ;
u64 max_discard_size ;
int ret ;
ret = kstrtou64 ( buf , 10 , & max_discard_size ) ;
if ( ret )
return - EINVAL ;
WRITE_ONCE ( discard_ctl - > max_discard_size , max_discard_size ) ;
return len ;
}
BTRFS_ATTR_RW ( discard , max_discard_size , btrfs_discard_max_discard_size_show ,
btrfs_discard_max_discard_size_store ) ;
2021-08-10 16:55:59 +03:00
/*
2022-07-25 22:15:15 +03:00
* Per - filesystem stats for discard ( when mounted with discard = async ) .
2021-08-10 16:55:59 +03:00
*
2022-07-25 22:15:15 +03:00
* Path : / sys / fs / btrfs / < uuid > / discard /
2021-08-10 16:55:59 +03:00
*/
2022-07-25 22:15:15 +03:00
static const struct attribute * discard_attrs [ ] = {
2019-12-14 03:22:21 +03:00
BTRFS_ATTR_PTR ( discard , discardable_bytes ) ,
2019-12-14 03:22:20 +03:00
BTRFS_ATTR_PTR ( discard , discardable_extents ) ,
2020-01-03 00:26:41 +03:00
BTRFS_ATTR_PTR ( discard , discard_bitmap_bytes ) ,
BTRFS_ATTR_PTR ( discard , discard_bytes_saved ) ,
BTRFS_ATTR_PTR ( discard , discard_extent_bytes ) ,
2020-01-03 00:26:35 +03:00
BTRFS_ATTR_PTR ( discard , iops_limit ) ,
2020-01-03 00:26:36 +03:00
BTRFS_ATTR_PTR ( discard , kbps_limit ) ,
2020-01-03 00:26:38 +03:00
BTRFS_ATTR_PTR ( discard , max_discard_size ) ,
2019-12-14 03:22:19 +03:00
NULL ,
} ;
2022-07-25 22:15:15 +03:00
# ifdef CONFIG_BTRFS_DEBUG
2019-06-13 18:23:02 +03:00
/*
2021-08-10 16:55:59 +03:00
* Per - filesystem runtime debugging exported via sysfs .
2019-06-13 18:23:02 +03:00
*
2021-08-10 16:55:59 +03:00
* Path : / sys / fs / btrfs / UUID / debug /
2019-06-13 18:23:02 +03:00
*/
2019-12-14 03:22:18 +03:00
static const struct attribute * btrfs_debug_mount_attrs [ ] = {
NULL ,
} ;
2021-08-10 16:55:59 +03:00
/*
* Runtime debugging exported via sysfs , applies to all mounted filesystems .
*
* Path : / sys / fs / btrfs / debug
*/
2019-06-13 18:23:02 +03:00
static struct attribute * btrfs_debug_feature_attrs [ ] = {
NULL
} ;
static const struct attribute_group btrfs_debug_feature_attr_group = {
. name = " debug " ,
. attrs = btrfs_debug_feature_attrs ,
} ;
# endif
2013-11-01 21:07:04 +04:00
static ssize_t btrfs_show_u64 ( u64 * value_ptr , spinlock_t * lock , char * buf )
{
u64 val ;
if ( lock )
spin_lock ( lock ) ;
val = * value_ptr ;
if ( lock )
spin_unlock ( lock ) ;
2021-10-19 03:22:09 +03:00
return sysfs_emit ( buf , " %llu \n " , val ) ;
2013-11-01 21:07:04 +04:00
}
static ssize_t global_rsv_size_show ( struct kobject * kobj ,
struct kobj_attribute * ka , char * buf )
{
struct btrfs_fs_info * fs_info = to_fs_info ( kobj - > parent ) ;
struct btrfs_block_rsv * block_rsv = & fs_info - > global_block_rsv ;
return btrfs_show_u64 ( & block_rsv - > size , & block_rsv - > lock , buf ) ;
}
2017-10-08 23:30:58 +03:00
BTRFS_ATTR ( allocation , global_rsv_size , global_rsv_size_show ) ;
2013-11-01 21:07:04 +04:00
static ssize_t global_rsv_reserved_show ( struct kobject * kobj ,
struct kobj_attribute * a , char * buf )
{
struct btrfs_fs_info * fs_info = to_fs_info ( kobj - > parent ) ;
struct btrfs_block_rsv * block_rsv = & fs_info - > global_block_rsv ;
return btrfs_show_u64 ( & block_rsv - > reserved , & block_rsv - > lock , buf ) ;
}
2017-10-08 23:30:58 +03:00
BTRFS_ATTR ( allocation , global_rsv_reserved , global_rsv_reserved_show ) ;
2013-11-01 21:07:04 +04:00
# define to_space_info(_kobj) container_of(_kobj, struct btrfs_space_info, kobj)
2014-05-27 20:59:57 +04:00
# define to_raid_kobj(_kobj) container_of(_kobj, struct raid_kobject, kobj)
2013-11-01 21:07:04 +04:00
static ssize_t raid_bytes_show ( struct kobject * kobj ,
struct kobj_attribute * attr , char * buf ) ;
2017-10-08 23:30:58 +03:00
BTRFS_ATTR ( raid , total_bytes , raid_bytes_show ) ;
BTRFS_ATTR ( raid , used_bytes , raid_bytes_show ) ;
2013-11-01 21:07:04 +04:00
static ssize_t raid_bytes_show ( struct kobject * kobj ,
struct kobj_attribute * attr , char * buf )
{
struct btrfs_space_info * sinfo = to_space_info ( kobj - > parent ) ;
2019-10-29 21:20:18 +03:00
struct btrfs_block_group * block_group ;
2018-03-20 22:25:26 +03:00
int index = btrfs_bg_flags_to_raid_index ( to_raid_kobj ( kobj ) - > flags ) ;
2013-11-01 21:07:04 +04:00
u64 val = 0 ;
down_read ( & sinfo - > groups_sem ) ;
list_for_each_entry ( block_group , & sinfo - > block_groups [ index ] , list ) {
2017-10-08 23:30:58 +03:00
if ( & attr - > attr = = BTRFS_ATTR_PTR ( raid , total_bytes ) )
2019-10-23 19:48:22 +03:00
val + = block_group - > length ;
2013-11-01 21:07:04 +04:00
else
2019-10-23 19:48:11 +03:00
val + = block_group - > used ;
2013-11-01 21:07:04 +04:00
}
up_read ( & sinfo - > groups_sem ) ;
2021-10-19 03:22:09 +03:00
return sysfs_emit ( buf , " %llu \n " , val ) ;
2013-11-01 21:07:04 +04:00
}
2021-08-10 16:55:59 +03:00
/*
* Allocation information about block group profiles .
*
* Path : / sys / fs / btrfs / < uuid > / allocation / < bg - type > / < bg - profile > /
*/
2019-05-02 20:34:45 +03:00
static struct attribute * raid_attrs [ ] = {
2017-10-08 23:30:58 +03:00
BTRFS_ATTR_PTR ( raid , total_bytes ) ,
BTRFS_ATTR_PTR ( raid , used_bytes ) ,
2013-11-01 21:07:04 +04:00
NULL
} ;
2019-05-02 20:34:45 +03:00
ATTRIBUTE_GROUPS ( raid ) ;
2013-11-01 21:07:04 +04:00
static void release_raid_kobj ( struct kobject * kobj )
{
2014-05-27 20:59:57 +04:00
kfree ( to_raid_kobj ( kobj ) ) ;
2013-11-01 21:07:04 +04:00
}
2019-08-01 18:55:55 +03:00
static struct kobj_type btrfs_raid_ktype = {
2013-11-01 21:07:04 +04:00
. sysfs_ops = & kobj_sysfs_ops ,
. release = release_raid_kobj ,
2019-05-02 20:34:45 +03:00
. default_groups = raid_groups ,
2013-11-01 21:07:04 +04:00
} ;
# define SPACE_INFO_ATTR(field) \
static ssize_t btrfs_space_info_show_ # # field ( struct kobject * kobj , \
struct kobj_attribute * a , \
char * buf ) \
{ \
struct btrfs_space_info * sinfo = to_space_info ( kobj ) ; \
return btrfs_show_u64 ( & sinfo - > field , & sinfo - > lock , buf ) ; \
} \
2017-10-08 23:30:58 +03:00
BTRFS_ATTR ( space_info , field , btrfs_space_info_show_ # # field )
2013-11-01 21:07:04 +04:00
2022-02-08 22:31:21 +03:00
static ssize_t btrfs_chunk_size_show ( struct kobject * kobj ,
struct kobj_attribute * a , char * buf )
{
struct btrfs_space_info * sinfo = to_space_info ( kobj ) ;
return sysfs_emit ( buf , " %llu \n " , READ_ONCE ( sinfo - > chunk_size ) ) ;
}
/*
* Store new chunk size in space info . Can be called on a read - only filesystem .
*
* If the new chunk size value is larger than 10 % of free space it is reduced
* to match that limit . Alignment must be to 256 M and the system chunk size
* cannot be set .
*/
static ssize_t btrfs_chunk_size_store ( struct kobject * kobj ,
struct kobj_attribute * a ,
const char * buf , size_t len )
{
struct btrfs_space_info * space_info = to_space_info ( kobj ) ;
struct btrfs_fs_info * fs_info = to_fs_info ( get_btrfs_kobj ( kobj ) ) ;
char * retptr ;
u64 val ;
if ( ! capable ( CAP_SYS_ADMIN ) )
return - EPERM ;
if ( ! fs_info - > fs_devices )
return - EINVAL ;
if ( btrfs_is_zoned ( fs_info ) )
return - EINVAL ;
/* System block type must not be changed. */
if ( space_info - > flags & BTRFS_BLOCK_GROUP_SYSTEM )
return - EPERM ;
val = memparse ( buf , & retptr ) ;
/* There could be trailing '\n', also catch any typos after the value */
retptr = skip_spaces ( retptr ) ;
if ( * retptr ! = 0 | | val = = 0 )
return - EINVAL ;
val = min ( val , BTRFS_MAX_DATA_CHUNK_SIZE ) ;
/* Limit stripe size to 10% of available space. */
val = min ( div_factor ( fs_info - > fs_devices - > total_rw_bytes , 1 ) , val ) ;
/* Must be multiple of 256M. */
val & = ~ ( ( u64 ) SZ_256M - 1 ) ;
/* Must be at least 256M. */
if ( val < SZ_256M )
return - EINVAL ;
btrfs_update_space_info_chunk_size ( space_info , val ) ;
return len ;
}
2022-02-08 22:31:22 +03:00
# ifdef CONFIG_BTRFS_DEBUG
/*
* Request chunk allocation with current chunk size .
*/
static ssize_t btrfs_force_chunk_alloc_store ( struct kobject * kobj ,
struct kobj_attribute * a ,
const char * buf , size_t len )
{
struct btrfs_space_info * space_info = to_space_info ( kobj ) ;
struct btrfs_fs_info * fs_info = to_fs_info ( get_btrfs_kobj ( kobj ) ) ;
struct btrfs_trans_handle * trans ;
bool val ;
int ret ;
if ( ! capable ( CAP_SYS_ADMIN ) )
return - EPERM ;
if ( sb_rdonly ( fs_info - > sb ) )
return - EROFS ;
ret = kstrtobool ( buf , & val ) ;
if ( ret )
return ret ;
if ( ! val )
return - EINVAL ;
/*
* This is unsafe to be called from sysfs context and may cause
* unexpected problems .
*/
trans = btrfs_start_transaction ( fs_info - > tree_root , 0 ) ;
if ( IS_ERR ( trans ) )
return PTR_ERR ( trans ) ;
ret = btrfs_force_chunk_alloc ( trans , space_info - > flags ) ;
btrfs_end_transaction ( trans ) ;
if ( ret = = 1 )
return len ;
return - ENOSPC ;
}
BTRFS_ATTR_W ( space_info , force_chunk_alloc , btrfs_force_chunk_alloc_store ) ;
# endif
2013-11-01 21:07:04 +04:00
SPACE_INFO_ATTR ( flags ) ;
SPACE_INFO_ATTR ( total_bytes ) ;
SPACE_INFO_ATTR ( bytes_used ) ;
SPACE_INFO_ATTR ( bytes_pinned ) ;
SPACE_INFO_ATTR ( bytes_reserved ) ;
SPACE_INFO_ATTR ( bytes_may_use ) ;
2016-06-21 06:12:56 +03:00
SPACE_INFO_ATTR ( bytes_readonly ) ;
2021-02-04 13:21:52 +03:00
SPACE_INFO_ATTR ( bytes_zone_unusable ) ;
2013-11-01 21:07:04 +04:00
SPACE_INFO_ATTR ( disk_used ) ;
SPACE_INFO_ATTR ( disk_total ) ;
2022-02-08 22:31:21 +03:00
BTRFS_ATTR_RW ( space_info , chunk_size , btrfs_chunk_size_show , btrfs_chunk_size_store ) ;
2013-11-01 21:07:04 +04:00
2022-03-29 11:56:06 +03:00
static ssize_t btrfs_sinfo_bg_reclaim_threshold_show ( struct kobject * kobj ,
struct kobj_attribute * a ,
char * buf )
{
struct btrfs_space_info * space_info = to_space_info ( kobj ) ;
ssize_t ret ;
ret = sysfs_emit ( buf , " %d \n " , READ_ONCE ( space_info - > bg_reclaim_threshold ) ) ;
return ret ;
}
static ssize_t btrfs_sinfo_bg_reclaim_threshold_store ( struct kobject * kobj ,
struct kobj_attribute * a ,
const char * buf , size_t len )
{
struct btrfs_space_info * space_info = to_space_info ( kobj ) ;
int thresh ;
int ret ;
ret = kstrtoint ( buf , 10 , & thresh ) ;
if ( ret )
return ret ;
2022-03-29 11:56:08 +03:00
if ( thresh < 0 | | thresh > 100 )
2022-03-29 11:56:06 +03:00
return - EINVAL ;
WRITE_ONCE ( space_info - > bg_reclaim_threshold , thresh ) ;
return len ;
}
BTRFS_ATTR_RW ( space_info , bg_reclaim_threshold ,
btrfs_sinfo_bg_reclaim_threshold_show ,
btrfs_sinfo_bg_reclaim_threshold_store ) ;
2021-08-10 16:55:59 +03:00
/*
* Allocation information about block group types .
*
* Path : / sys / fs / btrfs / < uuid > / allocation / < bg - type > /
*/
2013-11-01 21:07:04 +04:00
static struct attribute * space_info_attrs [ ] = {
2017-10-08 23:30:58 +03:00
BTRFS_ATTR_PTR ( space_info , flags ) ,
BTRFS_ATTR_PTR ( space_info , total_bytes ) ,
BTRFS_ATTR_PTR ( space_info , bytes_used ) ,
BTRFS_ATTR_PTR ( space_info , bytes_pinned ) ,
BTRFS_ATTR_PTR ( space_info , bytes_reserved ) ,
BTRFS_ATTR_PTR ( space_info , bytes_may_use ) ,
BTRFS_ATTR_PTR ( space_info , bytes_readonly ) ,
2021-02-04 13:21:52 +03:00
BTRFS_ATTR_PTR ( space_info , bytes_zone_unusable ) ,
2017-10-08 23:30:58 +03:00
BTRFS_ATTR_PTR ( space_info , disk_used ) ,
BTRFS_ATTR_PTR ( space_info , disk_total ) ,
2022-03-29 11:56:06 +03:00
BTRFS_ATTR_PTR ( space_info , bg_reclaim_threshold ) ,
2022-02-08 22:31:21 +03:00
BTRFS_ATTR_PTR ( space_info , chunk_size ) ,
2022-02-08 22:31:22 +03:00
# ifdef CONFIG_BTRFS_DEBUG
BTRFS_ATTR_PTR ( space_info , force_chunk_alloc ) ,
# endif
2013-11-01 21:07:04 +04:00
NULL ,
} ;
2019-05-02 20:34:45 +03:00
ATTRIBUTE_GROUPS ( space_info ) ;
2013-11-01 21:07:04 +04:00
static void space_info_release ( struct kobject * kobj )
{
struct btrfs_space_info * sinfo = to_space_info ( kobj ) ;
kfree ( sinfo ) ;
}
2019-08-01 18:55:55 +03:00
static struct kobj_type space_info_ktype = {
2013-11-01 21:07:04 +04:00
. sysfs_ops = & kobj_sysfs_ops ,
. release = space_info_release ,
2019-05-02 20:34:45 +03:00
. default_groups = space_info_groups ,
2013-11-01 21:07:04 +04:00
} ;
2021-08-10 16:55:59 +03:00
/*
* Allocation information about block groups .
*
* Path : / sys / fs / btrfs / < uuid > / allocation /
*/
2013-11-01 21:07:04 +04:00
static const struct attribute * allocation_attrs [ ] = {
2017-10-08 23:30:58 +03:00
BTRFS_ATTR_PTR ( allocation , global_rsv_reserved ) ,
BTRFS_ATTR_PTR ( allocation , global_rsv_size ) ,
2013-11-01 21:07:04 +04:00
NULL ,
} ;
2013-11-01 21:07:06 +04:00
static ssize_t btrfs_label_show ( struct kobject * kobj ,
struct kobj_attribute * a , char * buf )
{
struct btrfs_fs_info * fs_info = to_fs_info ( kobj ) ;
2014-07-01 12:00:07 +04:00
char * label = fs_info - > super_copy - > label ;
2016-04-26 17:22:06 +03:00
ssize_t ret ;
spin_lock ( & fs_info - > super_lock ) ;
2021-10-19 03:22:09 +03:00
ret = sysfs_emit ( buf , label [ 0 ] ? " %s \n " : " %s " , label ) ;
2016-04-26 17:22:06 +03:00
spin_unlock ( & fs_info - > super_lock ) ;
return ret ;
2013-11-01 21:07:06 +04:00
}
static ssize_t btrfs_label_store ( struct kobject * kobj ,
struct kobj_attribute * a ,
const char * buf , size_t len )
{
struct btrfs_fs_info * fs_info = to_fs_info ( kobj ) ;
2014-07-01 12:00:07 +04:00
size_t p_len ;
2013-11-01 21:07:06 +04:00
2016-04-26 17:03:57 +03:00
if ( ! fs_info )
return - EPERM ;
2017-07-17 10:45:34 +03:00
if ( sb_rdonly ( fs_info - > sb ) )
2014-07-30 16:04:10 +04:00
return - EROFS ;
2014-07-01 12:00:07 +04:00
/*
* p_len is the len until the first occurrence of either
* ' \n ' or ' \0 '
*/
p_len = strcspn ( buf , " \n " ) ;
if ( p_len > = BTRFS_LABEL_SIZE )
2013-11-01 21:07:06 +04:00
return - EINVAL ;
2014-05-30 21:29:05 +04:00
spin_lock ( & fs_info - > super_lock ) ;
2014-07-01 12:00:07 +04:00
memset ( fs_info - > super_copy - > label , 0 , BTRFS_LABEL_SIZE ) ;
memcpy ( fs_info - > super_copy - > label , buf , p_len ) ;
2014-05-30 21:29:05 +04:00
spin_unlock ( & fs_info - > super_lock ) ;
2013-11-01 21:07:06 +04:00
2014-05-30 21:29:05 +04:00
/*
* We don ' t want to do full transaction commit from inside sysfs
*/
btrfs_set_pending ( fs_info , COMMIT ) ;
wake_up_process ( fs_info - > transaction_kthread ) ;
2013-11-01 21:07:06 +04:00
2014-05-30 21:29:05 +04:00
return len ;
2013-11-01 21:07:06 +04:00
}
2017-10-08 23:30:58 +03:00
BTRFS_ATTR_RW ( , label , btrfs_label_show , btrfs_label_store ) ;
2013-11-01 21:07:06 +04:00
2014-05-07 20:17:16 +04:00
static ssize_t btrfs_nodesize_show ( struct kobject * kobj ,
struct kobj_attribute * a , char * buf )
{
struct btrfs_fs_info * fs_info = to_fs_info ( kobj ) ;
2021-10-19 03:22:09 +03:00
return sysfs_emit ( buf , " %u \n " , fs_info - > super_copy - > nodesize ) ;
2014-05-07 20:17:16 +04:00
}
2017-10-08 23:30:58 +03:00
BTRFS_ATTR ( , nodesize , btrfs_nodesize_show ) ;
2014-05-07 20:17:16 +04:00
static ssize_t btrfs_sectorsize_show ( struct kobject * kobj ,
struct kobj_attribute * a , char * buf )
{
struct btrfs_fs_info * fs_info = to_fs_info ( kobj ) ;
2021-10-19 03:22:09 +03:00
return sysfs_emit ( buf , " %u \n " , fs_info - > super_copy - > sectorsize ) ;
2014-05-07 20:17:16 +04:00
}
2017-10-08 23:30:58 +03:00
BTRFS_ATTR ( , sectorsize , btrfs_sectorsize_show ) ;
2014-05-07 20:17:16 +04:00
2022-06-15 01:22:34 +03:00
static ssize_t btrfs_commit_stats_show ( struct kobject * kobj ,
struct kobj_attribute * a , char * buf )
{
struct btrfs_fs_info * fs_info = to_fs_info ( kobj ) ;
return sysfs_emit ( buf ,
" commits %llu \n "
" last_commit_ms %llu \n "
" max_commit_ms %llu \n "
" total_commit_ms %llu \n " ,
fs_info - > commit_stats . commit_count ,
div_u64 ( fs_info - > commit_stats . last_commit_dur , NSEC_PER_MSEC ) ,
div_u64 ( fs_info - > commit_stats . max_commit_dur , NSEC_PER_MSEC ) ,
div_u64 ( fs_info - > commit_stats . total_commit_dur , NSEC_PER_MSEC ) ) ;
}
static ssize_t btrfs_commit_stats_store ( struct kobject * kobj ,
struct kobj_attribute * a ,
const char * buf , size_t len )
{
struct btrfs_fs_info * fs_info = to_fs_info ( kobj ) ;
unsigned long val ;
int ret ;
if ( ! fs_info )
return - EPERM ;
if ( ! capable ( CAP_SYS_RESOURCE ) )
return - EPERM ;
ret = kstrtoul ( buf , 10 , & val ) ;
if ( ret )
return ret ;
if ( val )
return - EINVAL ;
WRITE_ONCE ( fs_info - > commit_stats . max_commit_dur , 0 ) ;
return len ;
}
BTRFS_ATTR_RW ( , commit_stats , btrfs_commit_stats_show , btrfs_commit_stats_store ) ;
2014-05-07 20:17:16 +04:00
static ssize_t btrfs_clone_alignment_show ( struct kobject * kobj ,
struct kobj_attribute * a , char * buf )
{
struct btrfs_fs_info * fs_info = to_fs_info ( kobj ) ;
2021-10-19 03:22:09 +03:00
return sysfs_emit ( buf , " %u \n " , fs_info - > super_copy - > sectorsize ) ;
2014-05-07 20:17:16 +04:00
}
2017-10-08 23:30:58 +03:00
BTRFS_ATTR ( , clone_alignment , btrfs_clone_alignment_show ) ;
2014-05-07 20:17:16 +04:00
2017-05-12 00:18:03 +03:00
static ssize_t quota_override_show ( struct kobject * kobj ,
struct kobj_attribute * a , char * buf )
{
struct btrfs_fs_info * fs_info = to_fs_info ( kobj ) ;
int quota_override ;
quota_override = test_bit ( BTRFS_FS_QUOTA_OVERRIDE , & fs_info - > flags ) ;
2021-10-19 03:22:09 +03:00
return sysfs_emit ( buf , " %d \n " , quota_override ) ;
2017-05-12 00:18:03 +03:00
}
static ssize_t quota_override_store ( struct kobject * kobj ,
struct kobj_attribute * a ,
const char * buf , size_t len )
{
struct btrfs_fs_info * fs_info = to_fs_info ( kobj ) ;
unsigned long knob ;
int err ;
if ( ! fs_info )
return - EPERM ;
if ( ! capable ( CAP_SYS_RESOURCE ) )
return - EPERM ;
err = kstrtoul ( buf , 10 , & knob ) ;
if ( err )
return err ;
if ( knob > 1 )
return - EINVAL ;
if ( knob )
set_bit ( BTRFS_FS_QUOTA_OVERRIDE , & fs_info - > flags ) ;
else
clear_bit ( BTRFS_FS_QUOTA_OVERRIDE , & fs_info - > flags ) ;
return len ;
}
2017-10-08 23:30:58 +03:00
BTRFS_ATTR_RW ( , quota_override , quota_override_show , quota_override_store ) ;
2017-05-12 00:18:03 +03:00
2018-11-19 18:37:45 +03:00
static ssize_t btrfs_metadata_uuid_show ( struct kobject * kobj ,
struct kobj_attribute * a , char * buf )
{
struct btrfs_fs_info * fs_info = to_fs_info ( kobj ) ;
2021-10-19 03:22:09 +03:00
return sysfs_emit ( buf , " %pU \n " , fs_info - > fs_devices - > metadata_uuid ) ;
2018-11-19 18:37:45 +03:00
}
BTRFS_ATTR ( , metadata_uuid , btrfs_metadata_uuid_show ) ;
2019-10-07 12:11:04 +03:00
static ssize_t btrfs_checksum_show ( struct kobject * kobj ,
struct kobj_attribute * a , char * buf )
{
struct btrfs_fs_info * fs_info = to_fs_info ( kobj ) ;
u16 csum_type = btrfs_super_csum_type ( fs_info - > super_copy ) ;
2021-10-19 03:22:09 +03:00
return sysfs_emit ( buf , " %s (%s) \n " ,
btrfs_super_csum_name ( csum_type ) ,
crypto_shash_driver_name ( fs_info - > csum_shash ) ) ;
2019-10-07 12:11:04 +03:00
}
BTRFS_ATTR ( , checksum , btrfs_checksum_show ) ;
2020-08-25 18:02:33 +03:00
static ssize_t btrfs_exclusive_operation_show ( struct kobject * kobj ,
struct kobj_attribute * a , char * buf )
{
struct btrfs_fs_info * fs_info = to_fs_info ( kobj ) ;
const char * str ;
switch ( READ_ONCE ( fs_info - > exclusive_operation ) ) {
case BTRFS_EXCLOP_NONE :
str = " none \n " ;
break ;
case BTRFS_EXCLOP_BALANCE :
str = " balance \n " ;
break ;
2022-05-03 18:35:25 +03:00
case BTRFS_EXCLOP_BALANCE_PAUSED :
str = " balance paused \n " ;
break ;
2020-08-25 18:02:33 +03:00
case BTRFS_EXCLOP_DEV_ADD :
str = " device add \n " ;
break ;
case BTRFS_EXCLOP_DEV_REMOVE :
str = " device remove \n " ;
break ;
case BTRFS_EXCLOP_DEV_REPLACE :
str = " device replace \n " ;
break ;
case BTRFS_EXCLOP_RESIZE :
str = " resize \n " ;
break ;
case BTRFS_EXCLOP_SWAP_ACTIVATE :
str = " swap activate \n " ;
break ;
default :
str = " UNKNOWN \n " ;
break ;
}
2021-10-19 03:22:09 +03:00
return sysfs_emit ( buf , " %s " , str ) ;
2020-08-25 18:02:33 +03:00
}
BTRFS_ATTR ( , exclusive_operation , btrfs_exclusive_operation_show ) ;
2020-10-07 10:20:03 +03:00
static ssize_t btrfs_generation_show ( struct kobject * kobj ,
struct kobj_attribute * a , char * buf )
{
struct btrfs_fs_info * fs_info = to_fs_info ( kobj ) ;
2021-10-19 03:22:09 +03:00
return sysfs_emit ( buf , " %llu \n " , fs_info - > generation ) ;
2020-10-07 10:20:03 +03:00
}
BTRFS_ATTR ( , generation , btrfs_generation_show ) ;
2020-10-28 16:14:47 +03:00
static const char * const btrfs_read_policy_name [ ] = { " pid " } ;
static ssize_t btrfs_read_policy_show ( struct kobject * kobj ,
struct kobj_attribute * a , char * buf )
{
struct btrfs_fs_devices * fs_devices = to_fs_devs ( kobj ) ;
ssize_t ret = 0 ;
int i ;
for ( i = 0 ; i < BTRFS_NR_READ_POLICY ; i + + ) {
if ( fs_devices - > read_policy = = i )
ret + = scnprintf ( buf + ret , PAGE_SIZE - ret , " %s[%s] " ,
( ret = = 0 ? " " : " " ) ,
btrfs_read_policy_name [ i ] ) ;
else
ret + = scnprintf ( buf + ret , PAGE_SIZE - ret , " %s%s " ,
( ret = = 0 ? " " : " " ) ,
btrfs_read_policy_name [ i ] ) ;
}
ret + = scnprintf ( buf + ret , PAGE_SIZE - ret , " \n " ) ;
return ret ;
}
static ssize_t btrfs_read_policy_store ( struct kobject * kobj ,
struct kobj_attribute * a ,
const char * buf , size_t len )
{
struct btrfs_fs_devices * fs_devices = to_fs_devs ( kobj ) ;
int i ;
for ( i = 0 ; i < BTRFS_NR_READ_POLICY ; i + + ) {
2022-08-02 16:46:28 +03:00
if ( sysfs_streq ( buf , btrfs_read_policy_name [ i ] ) ) {
2020-10-28 16:14:47 +03:00
if ( i ! = fs_devices - > read_policy ) {
fs_devices - > read_policy = i ;
btrfs_info ( fs_devices - > fs_info ,
" read policy set to '%s' " ,
btrfs_read_policy_name [ i ] ) ;
}
return len ;
}
}
return - EINVAL ;
}
BTRFS_ATTR_RW ( , read_policy , btrfs_read_policy_show , btrfs_read_policy_store ) ;
2021-04-19 10:41:02 +03:00
static ssize_t btrfs_bg_reclaim_threshold_show ( struct kobject * kobj ,
struct kobj_attribute * a ,
char * buf )
{
struct btrfs_fs_info * fs_info = to_fs_info ( kobj ) ;
ssize_t ret ;
2021-10-19 03:22:09 +03:00
ret = sysfs_emit ( buf , " %d \n " , READ_ONCE ( fs_info - > bg_reclaim_threshold ) ) ;
2021-04-19 10:41:02 +03:00
return ret ;
}
static ssize_t btrfs_bg_reclaim_threshold_store ( struct kobject * kobj ,
struct kobj_attribute * a ,
const char * buf , size_t len )
{
struct btrfs_fs_info * fs_info = to_fs_info ( kobj ) ;
int thresh ;
int ret ;
ret = kstrtoint ( buf , 10 , & thresh ) ;
if ( ret )
return ret ;
2021-08-09 14:41:17 +03:00
if ( thresh ! = 0 & & ( thresh < = 50 | | thresh > 100 ) )
2021-04-19 10:41:02 +03:00
return - EINVAL ;
2021-08-09 14:41:17 +03:00
WRITE_ONCE ( fs_info - > bg_reclaim_threshold , thresh ) ;
2021-04-19 10:41:02 +03:00
return len ;
}
BTRFS_ATTR_RW ( , bg_reclaim_threshold , btrfs_bg_reclaim_threshold_show ,
btrfs_bg_reclaim_threshold_store ) ;
2021-08-10 16:55:59 +03:00
/*
* Per - filesystem information and stats .
*
* Path : / sys / fs / btrfs / < uuid > /
*/
2015-03-10 01:38:27 +03:00
static const struct attribute * btrfs_attrs [ ] = {
2017-10-08 23:30:58 +03:00
BTRFS_ATTR_PTR ( , label ) ,
BTRFS_ATTR_PTR ( , nodesize ) ,
BTRFS_ATTR_PTR ( , sectorsize ) ,
BTRFS_ATTR_PTR ( , clone_alignment ) ,
BTRFS_ATTR_PTR ( , quota_override ) ,
2018-11-19 18:37:45 +03:00
BTRFS_ATTR_PTR ( , metadata_uuid ) ,
2019-10-07 12:11:04 +03:00
BTRFS_ATTR_PTR ( , checksum ) ,
2020-08-25 18:02:33 +03:00
BTRFS_ATTR_PTR ( , exclusive_operation ) ,
2020-10-07 10:20:03 +03:00
BTRFS_ATTR_PTR ( , generation ) ,
2020-10-28 16:14:47 +03:00
BTRFS_ATTR_PTR ( , read_policy ) ,
2021-04-19 10:41:02 +03:00
BTRFS_ATTR_PTR ( , bg_reclaim_threshold ) ,
2022-06-15 01:22:34 +03:00
BTRFS_ATTR_PTR ( , commit_stats ) ,
2013-11-01 21:07:06 +04:00
NULL ,
} ;
2015-08-14 13:32:50 +03:00
static void btrfs_release_fsid_kobj ( struct kobject * kobj )
2013-11-01 21:06:59 +04:00
{
2015-03-10 01:38:29 +03:00
struct btrfs_fs_devices * fs_devs = to_fs_devs ( kobj ) ;
2015-03-10 01:38:19 +03:00
2015-08-14 13:32:50 +03:00
memset ( & fs_devs - > fsid_kobj , 0 , sizeof ( struct kobject ) ) ;
2015-03-10 01:38:29 +03:00
complete ( & fs_devs - > kobj_unregister ) ;
2013-11-01 21:06:59 +04:00
}
static struct kobj_type btrfs_ktype = {
. sysfs_ops = & kobj_sysfs_ops ,
2015-08-14 13:32:50 +03:00
. release = btrfs_release_fsid_kobj ,
2013-11-01 21:06:59 +04:00
} ;
2015-03-10 01:38:29 +03:00
static inline struct btrfs_fs_devices * to_fs_devs ( struct kobject * kobj )
{
if ( kobj - > ktype ! = & btrfs_ktype )
return NULL ;
2015-08-14 13:32:50 +03:00
return container_of ( kobj , struct btrfs_fs_devices , fsid_kobj ) ;
2015-03-10 01:38:29 +03:00
}
2013-11-01 21:06:59 +04:00
static inline struct btrfs_fs_info * to_fs_info ( struct kobject * kobj )
{
if ( kobj - > ktype ! = & btrfs_ktype )
return NULL ;
2015-03-10 01:38:29 +03:00
return to_fs_devs ( kobj ) - > fs_info ;
2013-11-01 21:06:59 +04:00
}
2007-08-29 23:47:34 +04:00
2022-02-08 22:31:21 +03:00
static struct kobject * get_btrfs_kobj ( struct kobject * kobj )
{
while ( kobj ) {
if ( kobj - > ktype = = & btrfs_ktype )
return kobj ;
kobj = kobj - > parent ;
}
return NULL ;
}
2013-11-21 19:37:16 +04:00
# define NUM_FEATURE_BITS 64
2018-05-16 11:09:26 +03:00
# define BTRFS_FEATURE_NAME_MAX 13
static char btrfs_unknown_feature_names [ FEAT_MAX ] [ NUM_FEATURE_BITS ] [ BTRFS_FEATURE_NAME_MAX ] ;
static struct btrfs_feature_attr btrfs_feature_attrs [ FEAT_MAX ] [ NUM_FEATURE_BITS ] ;
2013-11-21 19:37:16 +04:00
2022-02-01 17:42:07 +03:00
static_assert ( ARRAY_SIZE ( btrfs_unknown_feature_names ) = =
ARRAY_SIZE ( btrfs_feature_attrs ) ) ;
static_assert ( ARRAY_SIZE ( btrfs_unknown_feature_names [ 0 ] ) = =
ARRAY_SIZE ( btrfs_feature_attrs [ 0 ] ) ) ;
2018-05-16 11:09:26 +03:00
static const u64 supported_feature_masks [ FEAT_MAX ] = {
2013-11-21 19:37:16 +04:00
[ FEAT_COMPAT ] = BTRFS_FEATURE_COMPAT_SUPP ,
[ FEAT_COMPAT_RO ] = BTRFS_FEATURE_COMPAT_RO_SUPP ,
[ FEAT_INCOMPAT ] = BTRFS_FEATURE_INCOMPAT_SUPP ,
} ;
static int addrm_unknown_feature_attrs ( struct btrfs_fs_info * fs_info , bool add )
{
int set ;
for ( set = 0 ; set < FEAT_MAX ; set + + ) {
int i ;
struct attribute * attrs [ 2 ] ;
struct attribute_group agroup = {
. name = " features " ,
. attrs = attrs ,
} ;
u64 features = get_features ( fs_info , set ) ;
features & = ~ supported_feature_masks [ set ] ;
if ( ! features )
continue ;
attrs [ 1 ] = NULL ;
for ( i = 0 ; i < NUM_FEATURE_BITS ; i + + ) {
struct btrfs_feature_attr * fa ;
if ( ! ( features & ( 1ULL < < i ) ) )
continue ;
fa = & btrfs_feature_attrs [ set ] [ i ] ;
attrs [ 0 ] = & fa - > kobj_attr . attr ;
if ( add ) {
int ret ;
2015-08-14 13:32:50 +03:00
ret = sysfs_merge_group ( & fs_info - > fs_devices - > fsid_kobj ,
2013-11-21 19:37:16 +04:00
& agroup ) ;
if ( ret )
return ret ;
} else
2015-08-14 13:32:50 +03:00
sysfs_unmerge_group ( & fs_info - > fs_devices - > fsid_kobj ,
2013-11-21 19:37:16 +04:00
& agroup ) ;
}
}
return 0 ;
}
2015-03-10 01:38:32 +03:00
static void __btrfs_sysfs_remove_fsid ( struct btrfs_fs_devices * fs_devs )
2013-11-01 21:06:58 +04:00
{
2020-02-12 12:28:10 +03:00
if ( fs_devs - > devinfo_kobj ) {
kobject_del ( fs_devs - > devinfo_kobj ) ;
kobject_put ( fs_devs - > devinfo_kobj ) ;
fs_devs - > devinfo_kobj = NULL ;
}
2019-11-21 12:33:30 +03:00
if ( fs_devs - > devices_kobj ) {
kobject_del ( fs_devs - > devices_kobj ) ;
kobject_put ( fs_devs - > devices_kobj ) ;
fs_devs - > devices_kobj = NULL ;
2015-03-10 01:38:24 +03:00
}
2015-08-14 13:32:50 +03:00
if ( fs_devs - > fsid_kobj . state_initialized ) {
kobject_del ( & fs_devs - > fsid_kobj ) ;
kobject_put ( & fs_devs - > fsid_kobj ) ;
2015-06-22 13:18:32 +03:00
wait_for_completion ( & fs_devs - > kobj_unregister ) ;
}
2013-11-01 21:06:58 +04:00
}
2015-03-10 01:38:32 +03:00
/* when fs_devs is NULL it will remove all fsid kobject */
2015-03-10 01:38:37 +03:00
void btrfs_sysfs_remove_fsid ( struct btrfs_fs_devices * fs_devs )
2015-03-10 01:38:32 +03:00
{
struct list_head * fs_uuids = btrfs_get_fs_uuids ( ) ;
if ( fs_devs ) {
__btrfs_sysfs_remove_fsid ( fs_devs ) ;
return ;
}
2018-04-12 05:29:25 +03:00
list_for_each_entry ( fs_devs , fs_uuids , fs_list ) {
2015-03-10 01:38:32 +03:00
__btrfs_sysfs_remove_fsid ( fs_devs ) ;
}
}
2020-09-04 20:34:27 +03:00
static void btrfs_sysfs_remove_fs_devices ( struct btrfs_fs_devices * fs_devices )
{
struct btrfs_device * device ;
2020-09-04 20:34:28 +03:00
struct btrfs_fs_devices * seed ;
2020-09-04 20:34:27 +03:00
list_for_each_entry ( device , & fs_devices - > devices , dev_list )
btrfs_sysfs_remove_device ( device ) ;
2020-09-04 20:34:28 +03:00
list_for_each_entry ( seed , & fs_devices - > seed_list , seed_list ) {
list_for_each_entry ( device , & seed - > devices , dev_list )
btrfs_sysfs_remove_device ( device ) ;
}
2020-09-04 20:34:27 +03:00
}
2015-08-14 13:32:47 +03:00
void btrfs_sysfs_remove_mounted ( struct btrfs_fs_info * fs_info )
2013-11-21 19:37:16 +04:00
{
2020-07-03 11:13:15 +03:00
struct kobject * fsid_kobj = & fs_info - > fs_devices - > fsid_kobj ;
sysfs_remove_link ( fsid_kobj , " bdi " ) ;
2013-11-21 19:37:16 +04:00
if ( fs_info - > space_info_kobj ) {
sysfs_remove_files ( fs_info - > space_info_kobj , allocation_attrs ) ;
kobject_del ( fs_info - > space_info_kobj ) ;
kobject_put ( fs_info - > space_info_kobj ) ;
}
2022-07-25 22:15:15 +03:00
if ( fs_info - > discard_kobj ) {
sysfs_remove_files ( fs_info - > discard_kobj , discard_attrs ) ;
kobject_del ( fs_info - > discard_kobj ) ;
kobject_put ( fs_info - > discard_kobj ) ;
2019-12-14 03:22:19 +03:00
}
2022-07-25 22:15:15 +03:00
# ifdef CONFIG_BTRFS_DEBUG
2019-12-14 03:22:18 +03:00
if ( fs_info - > debug_kobj ) {
sysfs_remove_files ( fs_info - > debug_kobj , btrfs_debug_mount_attrs ) ;
kobject_del ( fs_info - > debug_kobj ) ;
kobject_put ( fs_info - > debug_kobj ) ;
}
2019-12-14 03:22:17 +03:00
# endif
2013-11-21 19:37:16 +04:00
addrm_unknown_feature_attrs ( fs_info , false ) ;
2020-07-03 11:13:15 +03:00
sysfs_remove_group ( fsid_kobj , & btrfs_feature_attr_group ) ;
sysfs_remove_files ( fsid_kobj , btrfs_attrs ) ;
2020-09-04 20:34:27 +03:00
btrfs_sysfs_remove_fs_devices ( fs_info - > fs_devices ) ;
2013-11-21 19:37:16 +04:00
}
2019-08-01 20:07:55 +03:00
static const char * const btrfs_feature_set_names [ FEAT_MAX ] = {
2013-11-01 21:07:00 +04:00
[ FEAT_COMPAT ] = " compat " ,
[ FEAT_COMPAT_RO ] = " compat_ro " ,
[ FEAT_INCOMPAT ] = " incompat " ,
} ;
2020-08-17 11:56:00 +03:00
const char * btrfs_feature_set_name ( enum btrfs_feature_set set )
2019-08-01 20:07:55 +03:00
{
return btrfs_feature_set_names [ set ] ;
}
2013-11-01 21:07:02 +04:00
char * btrfs_printable_features ( enum btrfs_feature_set set , u64 flags )
{
size_t bufsize = 4096 ; /* safe max, 64 names * 64 bytes */
int len = 0 ;
int i ;
char * str ;
str = kmalloc ( bufsize , GFP_KERNEL ) ;
if ( ! str )
return str ;
for ( i = 0 ; i < ARRAY_SIZE ( btrfs_feature_attrs [ set ] ) ; i + + ) {
const char * name ;
if ( ! ( flags & ( 1ULL < < i ) ) )
continue ;
name = btrfs_feature_attrs [ set ] [ i ] . kobj_attr . attr . name ;
2020-03-22 12:09:11 +03:00
len + = scnprintf ( str + len , bufsize - len , " %s%s " ,
2013-11-01 21:07:02 +04:00
len ? " , " : " " , name ) ;
}
return str ;
}
2013-11-01 21:07:00 +04:00
static void init_feature_attrs ( void )
{
struct btrfs_feature_attr * fa ;
int set , i ;
2013-11-01 21:07:02 +04:00
memset ( btrfs_feature_attrs , 0 , sizeof ( btrfs_feature_attrs ) ) ;
memset ( btrfs_unknown_feature_names , 0 ,
sizeof ( btrfs_unknown_feature_names ) ) ;
2013-11-01 21:07:00 +04:00
for ( i = 0 ; btrfs_supported_feature_attrs [ i ] ; i + + ) {
struct btrfs_feature_attr * sfa ;
struct attribute * a = btrfs_supported_feature_attrs [ i ] ;
2013-11-01 21:07:02 +04:00
int bit ;
2013-11-01 21:07:00 +04:00
sfa = attr_to_btrfs_feature_attr ( a ) ;
2013-11-01 21:07:02 +04:00
bit = ilog2 ( sfa - > feature_bit ) ;
fa = & btrfs_feature_attrs [ sfa - > feature_set ] [ bit ] ;
2013-11-01 21:07:00 +04:00
fa - > kobj_attr . attr . name = sfa - > kobj_attr . attr . name ;
}
for ( set = 0 ; set < FEAT_MAX ; set + + ) {
for ( i = 0 ; i < ARRAY_SIZE ( btrfs_feature_attrs [ set ] ) ; i + + ) {
char * name = btrfs_unknown_feature_names [ set ] [ i ] ;
fa = & btrfs_feature_attrs [ set ] [ i ] ;
if ( fa - > kobj_attr . attr . name )
continue ;
2018-05-16 11:09:26 +03:00
snprintf ( name , BTRFS_FEATURE_NAME_MAX , " %s:%u " ,
2013-11-01 21:07:00 +04:00
btrfs_feature_set_names [ set ] , i ) ;
fa - > kobj_attr . attr . name = name ;
fa - > kobj_attr . attr . mode = S_IRUGO ;
fa - > feature_set = set ;
fa - > feature_bit = 1ULL < < i ;
}
}
}
2019-08-01 18:49:55 +03:00
/*
* Create a sysfs entry for a given block group type at path
* / sys / fs / btrfs / UUID / allocation / data / TYPE
*/
2019-10-29 21:20:18 +03:00
void btrfs_sysfs_add_block_group_type ( struct btrfs_block_group * cache )
2019-08-01 18:49:55 +03:00
{
struct btrfs_fs_info * fs_info = cache - > fs_info ;
struct btrfs_space_info * space_info = cache - > space_info ;
struct raid_kobject * rkobj ;
const int index = btrfs_bg_flags_to_raid_index ( cache - > flags ) ;
unsigned int nofs_flag ;
int ret ;
/*
* Setup a NOFS context because kobject_add ( ) , deep in its call chain ,
* does GFP_KERNEL allocations , and we are often called in a context
* where if reclaim is triggered we can deadlock ( we are either holding
* a transaction handle or some lock required for a transaction
* commit ) .
*/
nofs_flag = memalloc_nofs_save ( ) ;
rkobj = kzalloc ( sizeof ( * rkobj ) , GFP_NOFS ) ;
if ( ! rkobj ) {
memalloc_nofs_restore ( nofs_flag ) ;
btrfs_warn ( cache - > fs_info ,
" couldn't alloc memory for raid level kobject " ) ;
return ;
}
rkobj - > flags = cache - > flags ;
kobject_init ( & rkobj - > kobj , & btrfs_raid_ktype ) ;
btrfs: do not create raid sysfs entries under any locks
While running xfstests btrfs/177 I got the following lockdep splat
======================================================
WARNING: possible circular locking dependency detected
5.9.0-rc3+ #5 Not tainted
------------------------------------------------------
kswapd0/100 is trying to acquire lock:
ffff97066aa56760 (&delayed_node->mutex){+.+.}-{3:3}, at: __btrfs_release_delayed_node.part.0+0x3f/0x330
but task is already holding lock:
ffffffff9fd74700 (fs_reclaim){+.+.}-{0:0}, at: __fs_reclaim_acquire+0x5/0x30
which lock already depends on the new lock.
the existing dependency chain (in reverse order) is:
-> #3 (fs_reclaim){+.+.}-{0:0}:
fs_reclaim_acquire+0x65/0x80
slab_pre_alloc_hook.constprop.0+0x20/0x200
kmem_cache_alloc+0x37/0x270
alloc_inode+0x82/0xb0
iget_locked+0x10d/0x2c0
kernfs_get_inode+0x1b/0x130
kernfs_get_tree+0x136/0x240
sysfs_get_tree+0x16/0x40
vfs_get_tree+0x28/0xc0
path_mount+0x434/0xc00
__x64_sys_mount+0xe3/0x120
do_syscall_64+0x33/0x40
entry_SYSCALL_64_after_hwframe+0x44/0xa9
-> #2 (kernfs_mutex){+.+.}-{3:3}:
__mutex_lock+0x7e/0x7e0
kernfs_add_one+0x23/0x150
kernfs_create_dir_ns+0x7a/0xb0
sysfs_create_dir_ns+0x60/0xb0
kobject_add_internal+0xc0/0x2c0
kobject_add+0x6e/0x90
btrfs_sysfs_add_block_group_type+0x102/0x160
btrfs_make_block_group+0x167/0x230
btrfs_alloc_chunk+0x54f/0xb80
btrfs_chunk_alloc+0x18e/0x3a0
find_free_extent+0xdf6/0x1210
btrfs_reserve_extent+0xb3/0x1b0
btrfs_alloc_tree_block+0xb0/0x310
alloc_tree_block_no_bg_flush+0x4a/0x60
__btrfs_cow_block+0x11a/0x530
btrfs_cow_block+0x104/0x220
btrfs_search_slot+0x52e/0x9d0
btrfs_insert_empty_items+0x64/0xb0
btrfs_new_inode+0x225/0x730
btrfs_create+0xab/0x1f0
lookup_open.isra.0+0x52d/0x690
path_openat+0x2a7/0x9e0
do_filp_open+0x75/0x100
do_sys_openat2+0x7b/0x130
__x64_sys_openat+0x46/0x70
do_syscall_64+0x33/0x40
entry_SYSCALL_64_after_hwframe+0x44/0xa9
-> #1 (&fs_info->chunk_mutex){+.+.}-{3:3}:
__mutex_lock+0x7e/0x7e0
btrfs_chunk_alloc+0x125/0x3a0
find_free_extent+0xdf6/0x1210
btrfs_reserve_extent+0xb3/0x1b0
btrfs_alloc_tree_block+0xb0/0x310
alloc_tree_block_no_bg_flush+0x4a/0x60
__btrfs_cow_block+0x11a/0x530
btrfs_cow_block+0x104/0x220
btrfs_search_slot+0x52e/0x9d0
btrfs_lookup_inode+0x2a/0x8f
__btrfs_update_delayed_inode+0x80/0x240
btrfs_commit_inode_delayed_inode+0x119/0x120
btrfs_evict_inode+0x357/0x500
evict+0xcf/0x1f0
do_unlinkat+0x1a9/0x2b0
do_syscall_64+0x33/0x40
entry_SYSCALL_64_after_hwframe+0x44/0xa9
-> #0 (&delayed_node->mutex){+.+.}-{3:3}:
__lock_acquire+0x119c/0x1fc0
lock_acquire+0xa7/0x3d0
__mutex_lock+0x7e/0x7e0
__btrfs_release_delayed_node.part.0+0x3f/0x330
btrfs_evict_inode+0x24c/0x500
evict+0xcf/0x1f0
dispose_list+0x48/0x70
prune_icache_sb+0x44/0x50
super_cache_scan+0x161/0x1e0
do_shrink_slab+0x178/0x3c0
shrink_slab+0x17c/0x290
shrink_node+0x2b2/0x6d0
balance_pgdat+0x30a/0x670
kswapd+0x213/0x4c0
kthread+0x138/0x160
ret_from_fork+0x1f/0x30
other info that might help us debug this:
Chain exists of:
&delayed_node->mutex --> kernfs_mutex --> fs_reclaim
Possible unsafe locking scenario:
CPU0 CPU1
---- ----
lock(fs_reclaim);
lock(kernfs_mutex);
lock(fs_reclaim);
lock(&delayed_node->mutex);
*** DEADLOCK ***
3 locks held by kswapd0/100:
#0: ffffffff9fd74700 (fs_reclaim){+.+.}-{0:0}, at: __fs_reclaim_acquire+0x5/0x30
#1: ffffffff9fd65c50 (shrinker_rwsem){++++}-{3:3}, at: shrink_slab+0x115/0x290
#2: ffff9706629780e0 (&type->s_umount_key#36){++++}-{3:3}, at: super_cache_scan+0x38/0x1e0
stack backtrace:
CPU: 1 PID: 100 Comm: kswapd0 Not tainted 5.9.0-rc3+ #5
Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 1.13.0-2.fc32 04/01/2014
Call Trace:
dump_stack+0x8b/0xb8
check_noncircular+0x12d/0x150
__lock_acquire+0x119c/0x1fc0
lock_acquire+0xa7/0x3d0
? __btrfs_release_delayed_node.part.0+0x3f/0x330
__mutex_lock+0x7e/0x7e0
? __btrfs_release_delayed_node.part.0+0x3f/0x330
? __btrfs_release_delayed_node.part.0+0x3f/0x330
? lock_acquire+0xa7/0x3d0
? find_held_lock+0x2b/0x80
__btrfs_release_delayed_node.part.0+0x3f/0x330
btrfs_evict_inode+0x24c/0x500
evict+0xcf/0x1f0
dispose_list+0x48/0x70
prune_icache_sb+0x44/0x50
super_cache_scan+0x161/0x1e0
do_shrink_slab+0x178/0x3c0
shrink_slab+0x17c/0x290
shrink_node+0x2b2/0x6d0
balance_pgdat+0x30a/0x670
kswapd+0x213/0x4c0
? _raw_spin_unlock_irqrestore+0x41/0x50
? add_wait_queue_exclusive+0x70/0x70
? balance_pgdat+0x670/0x670
kthread+0x138/0x160
? kthread_create_worker_on_cpu+0x40/0x40
ret_from_fork+0x1f/0x30
This happens because when we link in a block group with a new raid index
type we'll create the corresponding sysfs entries for it. This is
problematic because while restriping we're holding the chunk_mutex, and
while mounting we're holding the tree locks.
Fixing this isn't pretty, we move the call to the sysfs stuff into the
btrfs_create_pending_block_groups() work, where we're not holding any
locks. This creates a slight race where other threads could see that
there's no sysfs kobj for that raid type, and race to create the
sysfs dir. Fix this by wrapping the creation in space_info->lock, so we
only get one thread calling kobject_add() for the new directory. We
don't worry about the lock on cleanup as it only gets deleted on
unmount.
On mount it's more straightforward, we loop through the space_infos
already, just check every raid index in each space_info and added the
sysfs entries for the corresponding block groups.
Signed-off-by: Josef Bacik <josef@toxicpanda.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2020-09-02 00:40:38 +03:00
/*
* We call this either on mount , or if we ' ve created a block group for a
* new index type while running ( i . e . when restriping ) . The running
* case is tricky because we could race with other threads , so we need
* to have this check to make sure we didn ' t already init the kobject .
*
* We don ' t have to protect on the free side because it only happens on
* unmount .
*/
spin_lock ( & space_info - > lock ) ;
if ( space_info - > block_group_kobjs [ index ] ) {
spin_unlock ( & space_info - > lock ) ;
kobject_put ( & rkobj - > kobj ) ;
return ;
} else {
space_info - > block_group_kobjs [ index ] = & rkobj - > kobj ;
}
spin_unlock ( & space_info - > lock ) ;
2019-08-01 18:49:55 +03:00
ret = kobject_add ( & rkobj - > kobj , & space_info - > kobj , " %s " ,
btrfs_bg_type_to_raid_name ( rkobj - > flags ) ) ;
memalloc_nofs_restore ( nofs_flag ) ;
if ( ret ) {
btrfs: do not create raid sysfs entries under any locks
While running xfstests btrfs/177 I got the following lockdep splat
======================================================
WARNING: possible circular locking dependency detected
5.9.0-rc3+ #5 Not tainted
------------------------------------------------------
kswapd0/100 is trying to acquire lock:
ffff97066aa56760 (&delayed_node->mutex){+.+.}-{3:3}, at: __btrfs_release_delayed_node.part.0+0x3f/0x330
but task is already holding lock:
ffffffff9fd74700 (fs_reclaim){+.+.}-{0:0}, at: __fs_reclaim_acquire+0x5/0x30
which lock already depends on the new lock.
the existing dependency chain (in reverse order) is:
-> #3 (fs_reclaim){+.+.}-{0:0}:
fs_reclaim_acquire+0x65/0x80
slab_pre_alloc_hook.constprop.0+0x20/0x200
kmem_cache_alloc+0x37/0x270
alloc_inode+0x82/0xb0
iget_locked+0x10d/0x2c0
kernfs_get_inode+0x1b/0x130
kernfs_get_tree+0x136/0x240
sysfs_get_tree+0x16/0x40
vfs_get_tree+0x28/0xc0
path_mount+0x434/0xc00
__x64_sys_mount+0xe3/0x120
do_syscall_64+0x33/0x40
entry_SYSCALL_64_after_hwframe+0x44/0xa9
-> #2 (kernfs_mutex){+.+.}-{3:3}:
__mutex_lock+0x7e/0x7e0
kernfs_add_one+0x23/0x150
kernfs_create_dir_ns+0x7a/0xb0
sysfs_create_dir_ns+0x60/0xb0
kobject_add_internal+0xc0/0x2c0
kobject_add+0x6e/0x90
btrfs_sysfs_add_block_group_type+0x102/0x160
btrfs_make_block_group+0x167/0x230
btrfs_alloc_chunk+0x54f/0xb80
btrfs_chunk_alloc+0x18e/0x3a0
find_free_extent+0xdf6/0x1210
btrfs_reserve_extent+0xb3/0x1b0
btrfs_alloc_tree_block+0xb0/0x310
alloc_tree_block_no_bg_flush+0x4a/0x60
__btrfs_cow_block+0x11a/0x530
btrfs_cow_block+0x104/0x220
btrfs_search_slot+0x52e/0x9d0
btrfs_insert_empty_items+0x64/0xb0
btrfs_new_inode+0x225/0x730
btrfs_create+0xab/0x1f0
lookup_open.isra.0+0x52d/0x690
path_openat+0x2a7/0x9e0
do_filp_open+0x75/0x100
do_sys_openat2+0x7b/0x130
__x64_sys_openat+0x46/0x70
do_syscall_64+0x33/0x40
entry_SYSCALL_64_after_hwframe+0x44/0xa9
-> #1 (&fs_info->chunk_mutex){+.+.}-{3:3}:
__mutex_lock+0x7e/0x7e0
btrfs_chunk_alloc+0x125/0x3a0
find_free_extent+0xdf6/0x1210
btrfs_reserve_extent+0xb3/0x1b0
btrfs_alloc_tree_block+0xb0/0x310
alloc_tree_block_no_bg_flush+0x4a/0x60
__btrfs_cow_block+0x11a/0x530
btrfs_cow_block+0x104/0x220
btrfs_search_slot+0x52e/0x9d0
btrfs_lookup_inode+0x2a/0x8f
__btrfs_update_delayed_inode+0x80/0x240
btrfs_commit_inode_delayed_inode+0x119/0x120
btrfs_evict_inode+0x357/0x500
evict+0xcf/0x1f0
do_unlinkat+0x1a9/0x2b0
do_syscall_64+0x33/0x40
entry_SYSCALL_64_after_hwframe+0x44/0xa9
-> #0 (&delayed_node->mutex){+.+.}-{3:3}:
__lock_acquire+0x119c/0x1fc0
lock_acquire+0xa7/0x3d0
__mutex_lock+0x7e/0x7e0
__btrfs_release_delayed_node.part.0+0x3f/0x330
btrfs_evict_inode+0x24c/0x500
evict+0xcf/0x1f0
dispose_list+0x48/0x70
prune_icache_sb+0x44/0x50
super_cache_scan+0x161/0x1e0
do_shrink_slab+0x178/0x3c0
shrink_slab+0x17c/0x290
shrink_node+0x2b2/0x6d0
balance_pgdat+0x30a/0x670
kswapd+0x213/0x4c0
kthread+0x138/0x160
ret_from_fork+0x1f/0x30
other info that might help us debug this:
Chain exists of:
&delayed_node->mutex --> kernfs_mutex --> fs_reclaim
Possible unsafe locking scenario:
CPU0 CPU1
---- ----
lock(fs_reclaim);
lock(kernfs_mutex);
lock(fs_reclaim);
lock(&delayed_node->mutex);
*** DEADLOCK ***
3 locks held by kswapd0/100:
#0: ffffffff9fd74700 (fs_reclaim){+.+.}-{0:0}, at: __fs_reclaim_acquire+0x5/0x30
#1: ffffffff9fd65c50 (shrinker_rwsem){++++}-{3:3}, at: shrink_slab+0x115/0x290
#2: ffff9706629780e0 (&type->s_umount_key#36){++++}-{3:3}, at: super_cache_scan+0x38/0x1e0
stack backtrace:
CPU: 1 PID: 100 Comm: kswapd0 Not tainted 5.9.0-rc3+ #5
Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 1.13.0-2.fc32 04/01/2014
Call Trace:
dump_stack+0x8b/0xb8
check_noncircular+0x12d/0x150
__lock_acquire+0x119c/0x1fc0
lock_acquire+0xa7/0x3d0
? __btrfs_release_delayed_node.part.0+0x3f/0x330
__mutex_lock+0x7e/0x7e0
? __btrfs_release_delayed_node.part.0+0x3f/0x330
? __btrfs_release_delayed_node.part.0+0x3f/0x330
? lock_acquire+0xa7/0x3d0
? find_held_lock+0x2b/0x80
__btrfs_release_delayed_node.part.0+0x3f/0x330
btrfs_evict_inode+0x24c/0x500
evict+0xcf/0x1f0
dispose_list+0x48/0x70
prune_icache_sb+0x44/0x50
super_cache_scan+0x161/0x1e0
do_shrink_slab+0x178/0x3c0
shrink_slab+0x17c/0x290
shrink_node+0x2b2/0x6d0
balance_pgdat+0x30a/0x670
kswapd+0x213/0x4c0
? _raw_spin_unlock_irqrestore+0x41/0x50
? add_wait_queue_exclusive+0x70/0x70
? balance_pgdat+0x670/0x670
kthread+0x138/0x160
? kthread_create_worker_on_cpu+0x40/0x40
ret_from_fork+0x1f/0x30
This happens because when we link in a block group with a new raid index
type we'll create the corresponding sysfs entries for it. This is
problematic because while restriping we're holding the chunk_mutex, and
while mounting we're holding the tree locks.
Fixing this isn't pretty, we move the call to the sysfs stuff into the
btrfs_create_pending_block_groups() work, where we're not holding any
locks. This creates a slight race where other threads could see that
there's no sysfs kobj for that raid type, and race to create the
sysfs dir. Fix this by wrapping the creation in space_info->lock, so we
only get one thread calling kobject_add() for the new directory. We
don't worry about the lock on cleanup as it only gets deleted on
unmount.
On mount it's more straightforward, we loop through the space_infos
already, just check every raid index in each space_info and added the
sysfs entries for the corresponding block groups.
Signed-off-by: Josef Bacik <josef@toxicpanda.com>
Signed-off-by: David Sterba <dsterba@suse.com>
2020-09-02 00:40:38 +03:00
spin_lock ( & space_info - > lock ) ;
space_info - > block_group_kobjs [ index ] = NULL ;
spin_unlock ( & space_info - > lock ) ;
2019-08-01 18:49:55 +03:00
kobject_put ( & rkobj - > kobj ) ;
btrfs_warn ( fs_info ,
" failed to add kobject for block cache, ignoring " ) ;
return ;
}
}
2019-08-01 19:50:16 +03:00
/*
* Remove sysfs directories for all block group types of a given space info and
* the space info as well
*/
void btrfs_sysfs_remove_space_info ( struct btrfs_space_info * space_info )
{
int i ;
for ( i = 0 ; i < BTRFS_NR_RAID_TYPES ; i + + ) {
struct kobject * kobj ;
kobj = space_info - > block_group_kobjs [ i ] ;
space_info - > block_group_kobjs [ i ] = NULL ;
if ( kobj ) {
kobject_del ( kobj ) ;
kobject_put ( kobj ) ;
}
}
kobject_del ( & space_info - > kobj ) ;
kobject_put ( & space_info - > kobj ) ;
}
2019-08-01 19:50:16 +03:00
static const char * alloc_name ( u64 flags )
{
switch ( flags ) {
case BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA :
return " mixed " ;
case BTRFS_BLOCK_GROUP_METADATA :
return " metadata " ;
case BTRFS_BLOCK_GROUP_DATA :
return " data " ;
case BTRFS_BLOCK_GROUP_SYSTEM :
return " system " ;
default :
WARN_ON ( 1 ) ;
return " invalid-combination " ;
2020-11-01 18:30:08 +03:00
}
2019-08-01 19:50:16 +03:00
}
/*
* Create a sysfs entry for a space info type at path
* / sys / fs / btrfs / UUID / allocation / TYPE
*/
int btrfs_sysfs_add_space_info_type ( struct btrfs_fs_info * fs_info ,
struct btrfs_space_info * space_info )
{
int ret ;
ret = kobject_init_and_add ( & space_info - > kobj , & space_info_ktype ,
fs_info - > space_info_kobj , " %s " ,
alloc_name ( space_info - > flags ) ) ;
if ( ret ) {
kobject_put ( & space_info - > kobj ) ;
return ret ;
}
return 0 ;
}
2020-09-04 20:34:27 +03:00
void btrfs_sysfs_remove_device ( struct btrfs_device * device )
2014-06-03 07:36:00 +04:00
{
2020-09-04 20:34:24 +03:00
struct kobject * devices_kobj ;
2014-06-03 07:36:00 +04:00
2020-09-04 20:34:24 +03:00
/*
* Seed fs_devices devices_kobj aren ' t used , fetch kobject from the
* fs_info : : fs_devices .
*/
devices_kobj = device - > fs_info - > fs_devices - > devices_kobj ;
ASSERT ( devices_kobj ) ;
2014-06-03 07:36:00 +04:00
2020-11-17 10:18:55 +03:00
if ( device - > bdev )
sysfs_remove_link ( devices_kobj , bdev_kobj ( device - > bdev ) - > name ) ;
2020-01-06 14:38:31 +03:00
2020-09-04 20:34:24 +03:00
if ( device - > devid_kobj . state_initialized ) {
kobject_del ( & device - > devid_kobj ) ;
kobject_put ( & device - > devid_kobj ) ;
wait_for_completion ( & device - > kobj_unregister ) ;
}
}
2014-06-03 07:36:00 +04:00
2020-01-06 14:38:31 +03:00
static ssize_t btrfs_devinfo_in_fs_metadata_show ( struct kobject * kobj ,
struct kobj_attribute * a ,
char * buf )
{
int val ;
struct btrfs_device * device = container_of ( kobj , struct btrfs_device ,
devid_kobj ) ;
val = ! ! test_bit ( BTRFS_DEV_STATE_IN_FS_METADATA , & device - > dev_state ) ;
2021-10-19 03:22:09 +03:00
return sysfs_emit ( buf , " %d \n " , val ) ;
2020-01-06 14:38:31 +03:00
}
BTRFS_ATTR ( devid , in_fs_metadata , btrfs_devinfo_in_fs_metadata_show ) ;
2020-02-13 11:40:53 +03:00
static ssize_t btrfs_devinfo_missing_show ( struct kobject * kobj ,
2020-01-06 14:38:31 +03:00
struct kobj_attribute * a , char * buf )
{
int val ;
struct btrfs_device * device = container_of ( kobj , struct btrfs_device ,
devid_kobj ) ;
val = ! ! test_bit ( BTRFS_DEV_STATE_MISSING , & device - > dev_state ) ;
2021-10-19 03:22:09 +03:00
return sysfs_emit ( buf , " %d \n " , val ) ;
2020-01-06 14:38:31 +03:00
}
2020-02-13 11:40:53 +03:00
BTRFS_ATTR ( devid , missing , btrfs_devinfo_missing_show ) ;
2020-01-06 14:38:31 +03:00
static ssize_t btrfs_devinfo_replace_target_show ( struct kobject * kobj ,
struct kobj_attribute * a ,
char * buf )
{
int val ;
struct btrfs_device * device = container_of ( kobj , struct btrfs_device ,
devid_kobj ) ;
val = ! ! test_bit ( BTRFS_DEV_STATE_REPLACE_TGT , & device - > dev_state ) ;
2021-10-19 03:22:09 +03:00
return sysfs_emit ( buf , " %d \n " , val ) ;
2020-01-06 14:38:31 +03:00
}
BTRFS_ATTR ( devid , replace_target , btrfs_devinfo_replace_target_show ) ;
2019-10-09 14:58:13 +03:00
static ssize_t btrfs_devinfo_scrub_speed_max_show ( struct kobject * kobj ,
struct kobj_attribute * a ,
char * buf )
{
struct btrfs_device * device = container_of ( kobj , struct btrfs_device ,
devid_kobj ) ;
2021-10-19 03:22:09 +03:00
return sysfs_emit ( buf , " %llu \n " , READ_ONCE ( device - > scrub_speed_max ) ) ;
2019-10-09 14:58:13 +03:00
}
static ssize_t btrfs_devinfo_scrub_speed_max_store ( struct kobject * kobj ,
struct kobj_attribute * a ,
const char * buf , size_t len )
{
struct btrfs_device * device = container_of ( kobj , struct btrfs_device ,
devid_kobj ) ;
char * endptr ;
unsigned long long limit ;
limit = memparse ( buf , & endptr ) ;
WRITE_ONCE ( device - > scrub_speed_max , limit ) ;
return len ;
}
BTRFS_ATTR_RW ( devid , scrub_speed_max , btrfs_devinfo_scrub_speed_max_show ,
btrfs_devinfo_scrub_speed_max_store ) ;
2020-01-06 14:38:31 +03:00
static ssize_t btrfs_devinfo_writeable_show ( struct kobject * kobj ,
struct kobj_attribute * a , char * buf )
{
int val ;
struct btrfs_device * device = container_of ( kobj , struct btrfs_device ,
devid_kobj ) ;
val = ! ! test_bit ( BTRFS_DEV_STATE_WRITEABLE , & device - > dev_state ) ;
2021-10-19 03:22:09 +03:00
return sysfs_emit ( buf , " %d \n " , val ) ;
2020-01-06 14:38:31 +03:00
}
BTRFS_ATTR ( devid , writeable , btrfs_devinfo_writeable_show ) ;
2021-10-21 18:31:17 +03:00
static ssize_t btrfs_devinfo_fsid_show ( struct kobject * kobj ,
struct kobj_attribute * a , char * buf )
{
struct btrfs_device * device = container_of ( kobj , struct btrfs_device ,
devid_kobj ) ;
return sysfs_emit ( buf , " %pU \n " , device - > fs_devices - > fsid ) ;
}
BTRFS_ATTR ( devid , fsid , btrfs_devinfo_fsid_show ) ;
2021-06-04 16:00:05 +03:00
static ssize_t btrfs_devinfo_error_stats_show ( struct kobject * kobj ,
struct kobj_attribute * a , char * buf )
{
struct btrfs_device * device = container_of ( kobj , struct btrfs_device ,
devid_kobj ) ;
if ( ! device - > dev_stats_valid )
2021-10-19 03:22:09 +03:00
return sysfs_emit ( buf , " invalid \n " ) ;
2021-06-04 16:00:05 +03:00
/*
* Print all at once so we get a snapshot of all values from the same
* time . Keep them in sync and in order of definition of
* btrfs_dev_stat_values .
*/
2021-10-19 03:22:09 +03:00
return sysfs_emit ( buf ,
2021-06-04 16:00:05 +03:00
" write_errs %d \n "
" read_errs %d \n "
" flush_errs %d \n "
" corruption_errs %d \n "
" generation_errs %d \n " ,
btrfs_dev_stat_read ( device , BTRFS_DEV_STAT_WRITE_ERRS ) ,
btrfs_dev_stat_read ( device , BTRFS_DEV_STAT_READ_ERRS ) ,
btrfs_dev_stat_read ( device , BTRFS_DEV_STAT_FLUSH_ERRS ) ,
btrfs_dev_stat_read ( device , BTRFS_DEV_STAT_CORRUPTION_ERRS ) ,
btrfs_dev_stat_read ( device , BTRFS_DEV_STAT_GENERATION_ERRS ) ) ;
}
BTRFS_ATTR ( devid , error_stats , btrfs_devinfo_error_stats_show ) ;
2021-08-10 16:55:59 +03:00
/*
* Information about one device .
*
* Path : / sys / fs / btrfs / < uuid > / devinfo / < devid > /
*/
2020-01-06 14:38:31 +03:00
static struct attribute * devid_attrs [ ] = {
2021-06-04 16:00:05 +03:00
BTRFS_ATTR_PTR ( devid , error_stats ) ,
2021-10-21 18:31:17 +03:00
BTRFS_ATTR_PTR ( devid , fsid ) ,
2020-01-06 14:38:31 +03:00
BTRFS_ATTR_PTR ( devid , in_fs_metadata ) ,
BTRFS_ATTR_PTR ( devid , missing ) ,
BTRFS_ATTR_PTR ( devid , replace_target ) ,
2019-10-09 14:58:13 +03:00
BTRFS_ATTR_PTR ( devid , scrub_speed_max ) ,
2020-01-06 14:38:31 +03:00
BTRFS_ATTR_PTR ( devid , writeable ) ,
NULL
} ;
ATTRIBUTE_GROUPS ( devid ) ;
static void btrfs_release_devid_kobj ( struct kobject * kobj )
{
struct btrfs_device * device = container_of ( kobj , struct btrfs_device ,
devid_kobj ) ;
memset ( & device - > devid_kobj , 0 , sizeof ( struct kobject ) ) ;
complete ( & device - > kobj_unregister ) ;
}
static struct kobj_type devid_ktype = {
. sysfs_ops = & kobj_sysfs_ops ,
. default_groups = devid_groups ,
. release = btrfs_release_devid_kobj ,
} ;
2020-09-04 20:34:26 +03:00
int btrfs_sysfs_add_device ( struct btrfs_device * device )
2015-03-10 01:38:28 +03:00
{
2020-09-04 20:34:23 +03:00
int ret ;
2020-07-21 17:17:50 +03:00
unsigned int nofs_flag ;
2020-09-04 20:34:23 +03:00
struct kobject * devices_kobj ;
struct kobject * devinfo_kobj ;
2015-03-10 01:38:28 +03:00
2020-09-04 20:34:23 +03:00
/*
* Make sure we use the fs_info : : fs_devices to fetch the kobjects even
* for the seed fs_devices
*/
devices_kobj = device - > fs_info - > fs_devices - > devices_kobj ;
devinfo_kobj = device - > fs_info - > fs_devices - > devinfo_kobj ;
ASSERT ( devices_kobj ) ;
ASSERT ( devinfo_kobj ) ;
2014-01-15 13:22:28 +04:00
2020-09-04 20:34:23 +03:00
nofs_flag = memalloc_nofs_save ( ) ;
2014-06-03 07:36:01 +04:00
2020-09-04 20:34:23 +03:00
if ( device - > bdev ) {
2020-11-17 10:18:55 +03:00
struct kobject * disk_kobj = bdev_kobj ( device - > bdev ) ;
2020-01-06 14:38:31 +03:00
2020-09-04 20:34:23 +03:00
ret = sysfs_create_link ( devices_kobj , disk_kobj , disk_kobj - > name ) ;
if ( ret ) {
btrfs_warn ( device - > fs_info ,
" creating sysfs device link for devid %llu failed: %d " ,
device - > devid , ret ) ;
goto out ;
2020-01-06 14:38:31 +03:00
}
2020-09-04 20:34:23 +03:00
}
2013-11-01 21:07:05 +04:00
2020-09-04 20:34:23 +03:00
init_completion ( & device - > kobj_unregister ) ;
ret = kobject_init_and_add ( & device - > devid_kobj , & devid_ktype ,
devinfo_kobj , " %llu " , device - > devid ) ;
if ( ret ) {
kobject_put ( & device - > devid_kobj ) ;
btrfs_warn ( device - > fs_info ,
" devinfo init for devid %llu failed: %d " ,
device - > devid , ret ) ;
2013-11-01 21:07:05 +04:00
}
2020-09-04 20:34:23 +03:00
out :
2020-07-21 17:17:50 +03:00
memalloc_nofs_restore ( nofs_flag ) ;
2020-09-04 20:34:23 +03:00
return ret ;
}
2013-11-01 21:07:05 +04:00
2020-09-04 20:34:26 +03:00
static int btrfs_sysfs_add_fs_devices ( struct btrfs_fs_devices * fs_devices )
2020-09-04 20:34:23 +03:00
{
int ret ;
2020-09-04 20:34:26 +03:00
struct btrfs_device * device ;
2020-09-04 20:34:28 +03:00
struct btrfs_fs_devices * seed ;
2020-09-04 20:34:23 +03:00
list_for_each_entry ( device , & fs_devices - > devices , dev_list ) {
ret = btrfs_sysfs_add_device ( device ) ;
if ( ret )
2020-09-04 20:34:29 +03:00
goto fail ;
2020-09-04 20:34:23 +03:00
}
2020-09-04 20:34:28 +03:00
list_for_each_entry ( seed , & fs_devices - > seed_list , seed_list ) {
list_for_each_entry ( device , & seed - > devices , dev_list ) {
ret = btrfs_sysfs_add_device ( device ) ;
if ( ret )
2020-09-04 20:34:29 +03:00
goto fail ;
2020-09-04 20:34:28 +03:00
}
}
2020-09-04 20:34:23 +03:00
return 0 ;
2020-09-04 20:34:29 +03:00
fail :
btrfs_sysfs_remove_fs_devices ( fs_devices ) ;
return ret ;
2013-11-01 21:07:05 +04:00
}
2019-08-01 19:50:16 +03:00
void btrfs_kobject_uevent ( struct block_device * bdev , enum kobject_action action )
{
int ret ;
ret = kobject_uevent ( & disk_to_dev ( bdev - > bd_disk ) - > kobj , action ) ;
if ( ret )
pr_warn ( " BTRFS: Sending event '%d' to kobject: '%s' (%p): failed \n " ,
action , kobject_name ( & disk_to_dev ( bdev - > bd_disk ) - > kobj ) ,
& disk_to_dev ( bdev - > bd_disk ) - > kobj ) ;
}
2020-08-12 16:18:51 +03:00
void btrfs_sysfs_update_sprout_fsid ( struct btrfs_fs_devices * fs_devices )
2019-08-01 19:50:16 +03:00
{
char fsid_buf [ BTRFS_UUID_UNPARSED_SIZE ] ;
/*
* Sprouting changes fsid of the mounted filesystem , rename the fsid
* directory
*/
2020-08-12 16:18:51 +03:00
snprintf ( fsid_buf , BTRFS_UUID_UNPARSED_SIZE , " %pU " , fs_devices - > fsid ) ;
2019-08-01 19:50:16 +03:00
if ( kobject_rename ( & fs_devices - > fsid_kobj , fsid_buf ) )
btrfs_warn ( fs_devices - > fs_info ,
" sysfs: failed to create fsid for sprout " ) ;
}
2020-01-06 14:38:31 +03:00
void btrfs_sysfs_update_devid ( struct btrfs_device * device )
{
char tmp [ 24 ] ;
snprintf ( tmp , sizeof ( tmp ) , " %llu " , device - > devid ) ;
if ( kobject_rename ( & device - > devid_kobj , tmp ) )
btrfs_warn ( device - > fs_devices - > fs_info ,
" sysfs: failed to update devid for %llu " ,
device - > devid ) ;
}
2013-11-01 21:06:59 +04:00
/* /sys/fs/btrfs/ entry */
static struct kset * btrfs_kset ;
2015-03-10 01:38:26 +03:00
/*
2019-11-21 12:33:32 +03:00
* Creates :
* / sys / fs / btrfs / UUID
*
2015-03-10 01:38:26 +03:00
* Can be called by the device discovery thread .
*/
2019-11-21 12:33:32 +03:00
int btrfs_sysfs_add_fsid ( struct btrfs_fs_devices * fs_devs )
2013-11-01 21:06:58 +04:00
{
int error ;
2015-03-10 01:38:29 +03:00
init_completion ( & fs_devs - > kobj_unregister ) ;
2015-08-14 13:32:50 +03:00
fs_devs - > fsid_kobj . kset = btrfs_kset ;
2019-11-21 12:33:32 +03:00
error = kobject_init_and_add ( & fs_devs - > fsid_kobj , & btrfs_ktype , NULL ,
" %pU " , fs_devs - > fsid ) ;
2019-05-13 06:39:12 +03:00
if ( error ) {
kobject_put ( & fs_devs - > fsid_kobj ) ;
return error ;
}
2019-11-21 12:33:34 +03:00
fs_devs - > devices_kobj = kobject_create_and_add ( " devices " ,
& fs_devs - > fsid_kobj ) ;
if ( ! fs_devs - > devices_kobj ) {
btrfs_err ( fs_devs - > fs_info ,
" failed to init sysfs device interface " ) ;
2020-02-12 12:28:12 +03:00
btrfs_sysfs_remove_fsid ( fs_devs ) ;
2019-11-21 12:33:34 +03:00
return - ENOMEM ;
}
2020-02-12 12:28:10 +03:00
fs_devs - > devinfo_kobj = kobject_create_and_add ( " devinfo " ,
& fs_devs - > fsid_kobj ) ;
if ( ! fs_devs - > devinfo_kobj ) {
btrfs_err ( fs_devs - > fs_info ,
" failed to init sysfs devinfo kobject " ) ;
btrfs_sysfs_remove_fsid ( fs_devs ) ;
return - ENOMEM ;
}
2019-05-13 06:39:12 +03:00
return 0 ;
2015-03-10 01:38:26 +03:00
}
2015-08-14 13:32:46 +03:00
int btrfs_sysfs_add_mounted ( struct btrfs_fs_info * fs_info )
2015-03-10 01:38:26 +03:00
{
int error ;
2015-03-10 01:38:29 +03:00
struct btrfs_fs_devices * fs_devs = fs_info - > fs_devices ;
2015-08-14 13:32:50 +03:00
struct kobject * fsid_kobj = & fs_devs - > fsid_kobj ;
2015-03-10 01:38:26 +03:00
2020-09-04 20:34:26 +03:00
error = btrfs_sysfs_add_fs_devices ( fs_devs ) ;
2015-03-10 01:38:38 +03:00
if ( error )
2015-03-10 01:38:24 +03:00
return error ;
2015-08-14 13:32:50 +03:00
error = sysfs_create_files ( fsid_kobj , btrfs_attrs ) ;
2013-11-21 19:37:16 +04:00
if ( error ) {
2020-09-04 20:34:27 +03:00
btrfs_sysfs_remove_fs_devices ( fs_devs ) ;
2013-11-21 19:37:16 +04:00
return error ;
}
2013-11-01 21:07:00 +04:00
2015-08-14 13:32:50 +03:00
error = sysfs_create_group ( fsid_kobj ,
2015-03-10 01:38:27 +03:00
& btrfs_feature_attr_group ) ;
if ( error )
goto failure ;
2019-06-13 18:23:02 +03:00
# ifdef CONFIG_BTRFS_DEBUG
2019-12-14 03:22:18 +03:00
fs_info - > debug_kobj = kobject_create_and_add ( " debug " , fsid_kobj ) ;
if ( ! fs_info - > debug_kobj ) {
error = - ENOMEM ;
goto failure ;
}
error = sysfs_create_files ( fs_info - > debug_kobj , btrfs_debug_mount_attrs ) ;
2019-06-13 18:23:02 +03:00
if ( error )
goto failure ;
2022-07-25 22:15:15 +03:00
# endif
2019-12-14 03:22:19 +03:00
/* Discard directory */
2022-07-25 22:15:15 +03:00
fs_info - > discard_kobj = kobject_create_and_add ( " discard " , fsid_kobj ) ;
if ( ! fs_info - > discard_kobj ) {
2019-12-14 03:22:19 +03:00
error = - ENOMEM ;
goto failure ;
}
2022-07-25 22:15:15 +03:00
error = sysfs_create_files ( fs_info - > discard_kobj , discard_attrs ) ;
2019-12-14 03:22:19 +03:00
if ( error )
goto failure ;
2019-06-13 18:23:02 +03:00
2013-11-21 19:37:16 +04:00
error = addrm_unknown_feature_attrs ( fs_info , true ) ;
2013-11-01 21:07:00 +04:00
if ( error )
goto failure ;
2020-07-03 11:13:15 +03:00
error = sysfs_create_link ( fsid_kobj , & fs_info - > sb - > s_bdi - > dev - > kobj , " bdi " ) ;
if ( error )
goto failure ;
2013-11-01 21:07:04 +04:00
fs_info - > space_info_kobj = kobject_create_and_add ( " allocation " ,
2015-08-14 13:32:50 +03:00
fsid_kobj ) ;
2013-11-01 21:07:04 +04:00
if ( ! fs_info - > space_info_kobj ) {
error = - ENOMEM ;
goto failure ;
}
error = sysfs_create_files ( fs_info - > space_info_kobj , allocation_attrs ) ;
if ( error )
goto failure ;
2013-11-01 21:07:00 +04:00
return 0 ;
failure :
2015-08-14 13:32:47 +03:00
btrfs_sysfs_remove_mounted ( fs_info ) ;
2013-11-01 21:06:58 +04:00
return error ;
}
2020-06-28 08:07:15 +03:00
static inline struct btrfs_fs_info * qgroup_kobj_to_fs_info ( struct kobject * kobj )
{
return to_fs_info ( kobj - > parent - > parent ) ;
}
# define QGROUP_ATTR(_member, _show_name) \
static ssize_t btrfs_qgroup_show_ # # _member ( struct kobject * qgroup_kobj , \
struct kobj_attribute * a , \
char * buf ) \
{ \
struct btrfs_fs_info * fs_info = qgroup_kobj_to_fs_info ( qgroup_kobj ) ; \
struct btrfs_qgroup * qgroup = container_of ( qgroup_kobj , \
struct btrfs_qgroup , kobj ) ; \
return btrfs_show_u64 ( & qgroup - > _member , & fs_info - > qgroup_lock , buf ) ; \
} \
BTRFS_ATTR ( qgroup , _show_name , btrfs_qgroup_show_ # # _member )
# define QGROUP_RSV_ATTR(_name, _type) \
static ssize_t btrfs_qgroup_rsv_show_ # # _name ( struct kobject * qgroup_kobj , \
struct kobj_attribute * a , \
char * buf ) \
{ \
struct btrfs_fs_info * fs_info = qgroup_kobj_to_fs_info ( qgroup_kobj ) ; \
struct btrfs_qgroup * qgroup = container_of ( qgroup_kobj , \
struct btrfs_qgroup , kobj ) ; \
return btrfs_show_u64 ( & qgroup - > rsv . values [ _type ] , \
& fs_info - > qgroup_lock , buf ) ; \
} \
BTRFS_ATTR ( qgroup , rsv_ # # _name , btrfs_qgroup_rsv_show_ # # _name )
QGROUP_ATTR ( rfer , referenced ) ;
QGROUP_ATTR ( excl , exclusive ) ;
QGROUP_ATTR ( max_rfer , max_referenced ) ;
QGROUP_ATTR ( max_excl , max_exclusive ) ;
QGROUP_ATTR ( lim_flags , limit_flags ) ;
QGROUP_RSV_ATTR ( data , BTRFS_QGROUP_RSV_DATA ) ;
QGROUP_RSV_ATTR ( meta_pertrans , BTRFS_QGROUP_RSV_META_PERTRANS ) ;
QGROUP_RSV_ATTR ( meta_prealloc , BTRFS_QGROUP_RSV_META_PREALLOC ) ;
2021-08-10 16:55:59 +03:00
/*
* Qgroup information .
*
* Path : / sys / fs / btrfs / < uuid > / qgroups / < level > _ < qgroupid > /
*/
2020-06-28 08:07:15 +03:00
static struct attribute * qgroup_attrs [ ] = {
BTRFS_ATTR_PTR ( qgroup , referenced ) ,
BTRFS_ATTR_PTR ( qgroup , exclusive ) ,
BTRFS_ATTR_PTR ( qgroup , max_referenced ) ,
BTRFS_ATTR_PTR ( qgroup , max_exclusive ) ,
BTRFS_ATTR_PTR ( qgroup , limit_flags ) ,
BTRFS_ATTR_PTR ( qgroup , rsv_data ) ,
BTRFS_ATTR_PTR ( qgroup , rsv_meta_pertrans ) ,
BTRFS_ATTR_PTR ( qgroup , rsv_meta_prealloc ) ,
NULL
} ;
ATTRIBUTE_GROUPS ( qgroup ) ;
static void qgroup_release ( struct kobject * kobj )
{
struct btrfs_qgroup * qgroup = container_of ( kobj , struct btrfs_qgroup , kobj ) ;
memset ( & qgroup - > kobj , 0 , sizeof ( * kobj ) ) ;
}
static struct kobj_type qgroup_ktype = {
. sysfs_ops = & kobj_sysfs_ops ,
. release = qgroup_release ,
. default_groups = qgroup_groups ,
} ;
int btrfs_sysfs_add_one_qgroup ( struct btrfs_fs_info * fs_info ,
struct btrfs_qgroup * qgroup )
{
struct kobject * qgroups_kobj = fs_info - > qgroups_kobj ;
int ret ;
if ( test_bit ( BTRFS_FS_STATE_DUMMY_FS_INFO , & fs_info - > fs_state ) )
return 0 ;
if ( qgroup - > kobj . state_initialized )
return 0 ;
if ( ! qgroups_kobj )
return - EINVAL ;
ret = kobject_init_and_add ( & qgroup - > kobj , & qgroup_ktype , qgroups_kobj ,
" %hu_%llu " , btrfs_qgroup_level ( qgroup - > qgroupid ) ,
btrfs_qgroup_subvolid ( qgroup - > qgroupid ) ) ;
if ( ret < 0 )
kobject_put ( & qgroup - > kobj ) ;
return ret ;
}
void btrfs_sysfs_del_qgroups ( struct btrfs_fs_info * fs_info )
{
struct btrfs_qgroup * qgroup ;
struct btrfs_qgroup * next ;
if ( test_bit ( BTRFS_FS_STATE_DUMMY_FS_INFO , & fs_info - > fs_state ) )
return ;
rbtree_postorder_for_each_entry_safe ( qgroup , next ,
& fs_info - > qgroup_tree , node )
btrfs_sysfs_del_one_qgroup ( fs_info , qgroup ) ;
2020-08-03 09:20:11 +03:00
if ( fs_info - > qgroups_kobj ) {
kobject_del ( fs_info - > qgroups_kobj ) ;
kobject_put ( fs_info - > qgroups_kobj ) ;
fs_info - > qgroups_kobj = NULL ;
}
2020-06-28 08:07:15 +03:00
}
/* Called when qgroups get initialized, thus there is no need for locking */
int btrfs_sysfs_add_qgroups ( struct btrfs_fs_info * fs_info )
{
struct kobject * fsid_kobj = & fs_info - > fs_devices - > fsid_kobj ;
struct btrfs_qgroup * qgroup ;
struct btrfs_qgroup * next ;
int ret = 0 ;
if ( test_bit ( BTRFS_FS_STATE_DUMMY_FS_INFO , & fs_info - > fs_state ) )
return 0 ;
ASSERT ( fsid_kobj ) ;
if ( fs_info - > qgroups_kobj )
return 0 ;
fs_info - > qgroups_kobj = kobject_create_and_add ( " qgroups " , fsid_kobj ) ;
if ( ! fs_info - > qgroups_kobj ) {
ret = - ENOMEM ;
goto out ;
}
rbtree_postorder_for_each_entry_safe ( qgroup , next ,
& fs_info - > qgroup_tree , node ) {
ret = btrfs_sysfs_add_one_qgroup ( fs_info , qgroup ) ;
if ( ret < 0 )
goto out ;
}
out :
if ( ret < 0 )
btrfs_sysfs_del_qgroups ( fs_info ) ;
return ret ;
}
void btrfs_sysfs_del_one_qgroup ( struct btrfs_fs_info * fs_info ,
struct btrfs_qgroup * qgroup )
{
if ( test_bit ( BTRFS_FS_STATE_DUMMY_FS_INFO , & fs_info - > fs_state ) )
return ;
if ( qgroup - > kobj . state_initialized ) {
kobject_del ( & qgroup - > kobj ) ;
kobject_put ( & qgroup - > kobj ) ;
}
}
2016-01-21 20:50:40 +03:00
/*
* Change per - fs features in / sys / fs / btrfs / UUID / features to match current
* values in superblock . Call after any changes to incompat / compat_ro flags
*/
void btrfs_sysfs_feature_update ( struct btrfs_fs_info * fs_info ,
u64 bit , enum btrfs_feature_set set )
{
struct btrfs_fs_devices * fs_devs ;
struct kobject * fsid_kobj ;
2020-08-19 17:16:28 +03:00
u64 __maybe_unused features ;
int __maybe_unused ret ;
2016-01-21 20:50:40 +03:00
if ( ! fs_info )
return ;
2020-08-19 17:16:28 +03:00
/*
* See 14e46 e04958df74 and e410e34fad913dd , feature bit updates are not
* safe when called from some contexts ( eg . balance )
*/
2016-01-21 20:50:40 +03:00
features = get_features ( fs_info , set ) ;
ASSERT ( bit & supported_feature_masks [ set ] ) ;
fs_devs = fs_info - > fs_devices ;
fsid_kobj = & fs_devs - > fsid_kobj ;
2016-01-27 16:06:29 +03:00
if ( ! fsid_kobj - > state_initialized )
return ;
2016-01-21 20:50:40 +03:00
/*
* FIXME : this is too heavy to update just one value , ideally we ' d like
* to use sysfs_update_group but some refactoring is needed first .
*/
sysfs_remove_group ( fsid_kobj , & btrfs_feature_attr_group ) ;
ret = sysfs_create_group ( fsid_kobj , & btrfs_feature_attr_group ) ;
}
2017-11-03 02:21:50 +03:00
int __init btrfs_init_sysfs ( void )
2007-08-29 23:47:34 +04:00
{
2013-11-01 21:06:57 +04:00
int ret ;
2014-02-05 18:36:18 +04:00
2008-02-20 22:14:16 +03:00
btrfs_kset = kset_create_and_add ( " btrfs " , NULL , fs_kobj ) ;
if ( ! btrfs_kset )
return - ENOMEM ;
2013-11-01 21:06:57 +04:00
2014-02-05 18:36:18 +04:00
init_feature_attrs ( ) ;
2013-11-01 21:06:57 +04:00
ret = sysfs_create_group ( & btrfs_kset - > kobj , & btrfs_feature_attr_group ) ;
2015-01-23 21:27:00 +03:00
if ( ret )
goto out2 ;
2018-05-17 08:24:51 +03:00
ret = sysfs_merge_group ( & btrfs_kset - > kobj ,
& btrfs_static_feature_attr_group ) ;
if ( ret )
goto out_remove_group ;
2015-01-23 21:27:00 +03:00
2019-06-13 18:23:02 +03:00
# ifdef CONFIG_BTRFS_DEBUG
ret = sysfs_create_group ( & btrfs_kset - > kobj , & btrfs_debug_feature_attr_group ) ;
if ( ret )
goto out2 ;
# endif
2015-01-23 21:27:00 +03:00
return 0 ;
2018-05-17 08:24:51 +03:00
out_remove_group :
sysfs_remove_group ( & btrfs_kset - > kobj , & btrfs_feature_attr_group ) ;
2015-01-23 21:27:00 +03:00
out2 :
kset_unregister ( btrfs_kset ) ;
2013-11-01 21:06:57 +04:00
2014-02-05 18:36:18 +04:00
return ret ;
2007-08-29 23:47:34 +04:00
}
2018-02-19 19:24:18 +03:00
void __cold btrfs_exit_sysfs ( void )
2007-08-29 23:47:34 +04:00
{
2018-05-17 08:24:51 +03:00
sysfs_unmerge_group ( & btrfs_kset - > kobj ,
& btrfs_static_feature_attr_group ) ;
2013-11-01 21:06:57 +04:00
sysfs_remove_group ( & btrfs_kset - > kobj , & btrfs_feature_attr_group ) ;
2019-12-14 03:22:17 +03:00
# ifdef CONFIG_BTRFS_DEBUG
sysfs_remove_group ( & btrfs_kset - > kobj , & btrfs_debug_feature_attr_group ) ;
# endif
2008-02-20 22:14:16 +03:00
kset_unregister ( btrfs_kset ) ;
2007-08-29 23:47:34 +04:00
}