2018-09-12 04:16:07 +03:00
// SPDX-License-Identifier: GPL-2.0
2017-06-14 12:39:47 +03:00
/*
* f2fs sysfs interface
*
* Copyright ( c ) 2012 Samsung Electronics Co . , Ltd .
* http : //www.samsung.com/
* Copyright ( c ) 2017 Chao Yu < chao @ kernel . org >
*/
2018-07-07 06:50:57 +03:00
# include <linux/compiler.h>
2017-06-14 12:39:47 +03:00
# include <linux/proc_fs.h>
# include <linux/f2fs_fs.h>
2017-07-14 03:45:21 +03:00
# include <linux/seq_file.h>
2017-06-14 12:39:47 +03:00
# include "f2fs.h"
# include "segment.h"
# include "gc.h"
static struct proc_dir_entry * f2fs_proc_root ;
/* Sysfs support for f2fs */
enum {
GC_THREAD , /* struct f2fs_gc_thread */
SM_INFO , /* struct f2fs_sm_info */
DCC_INFO , /* struct discard_cmd_control */
NM_INFO , /* struct f2fs_nm_info */
F2FS_SBI , /* struct f2fs_sb_info */
# ifdef CONFIG_F2FS_FAULT_INJECTION
FAULT_INFO_RATE , /* struct f2fs_fault_info */
FAULT_INFO_TYPE , /* struct f2fs_fault_info */
# endif
2017-10-27 15:45:05 +03:00
RESERVED_BLOCKS , /* struct f2fs_sb_info */
2017-06-14 12:39:47 +03:00
} ;
struct f2fs_attr {
struct attribute attr ;
ssize_t ( * show ) ( struct f2fs_attr * , struct f2fs_sb_info * , char * ) ;
ssize_t ( * store ) ( struct f2fs_attr * , struct f2fs_sb_info * ,
const char * , size_t ) ;
int struct_type ;
int offset ;
2017-07-22 03:14:09 +03:00
int id ;
2017-06-14 12:39:47 +03:00
} ;
static unsigned char * __struct_ptr ( struct f2fs_sb_info * sbi , int struct_type )
{
if ( struct_type = = GC_THREAD )
return ( unsigned char * ) sbi - > gc_thread ;
else if ( struct_type = = SM_INFO )
return ( unsigned char * ) SM_I ( sbi ) ;
else if ( struct_type = = DCC_INFO )
return ( unsigned char * ) SM_I ( sbi ) - > dcc_info ;
else if ( struct_type = = NM_INFO )
return ( unsigned char * ) NM_I ( sbi ) ;
2017-06-26 11:24:41 +03:00
else if ( struct_type = = F2FS_SBI | | struct_type = = RESERVED_BLOCKS )
2017-06-14 12:39:47 +03:00
return ( unsigned char * ) sbi ;
# ifdef CONFIG_F2FS_FAULT_INJECTION
else if ( struct_type = = FAULT_INFO_RATE | |
struct_type = = FAULT_INFO_TYPE )
2018-03-08 09:22:56 +03:00
return ( unsigned char * ) & F2FS_OPTION ( sbi ) . fault_info ;
2017-06-14 12:39:47 +03:00
# endif
return NULL ;
}
2017-10-24 10:46:54 +03:00
static ssize_t dirty_segments_show ( struct f2fs_attr * a ,
struct f2fs_sb_info * sbi , char * buf )
{
return snprintf ( buf , PAGE_SIZE , " %llu \n " ,
( unsigned long long ) ( dirty_segments ( sbi ) ) ) ;
}
2017-06-14 12:39:47 +03:00
static ssize_t lifetime_write_kbytes_show ( struct f2fs_attr * a ,
struct f2fs_sb_info * sbi , char * buf )
{
struct super_block * sb = sbi - > sb ;
if ( ! sb - > s_bdev - > bd_part )
return snprintf ( buf , PAGE_SIZE , " 0 \n " ) ;
return snprintf ( buf , PAGE_SIZE , " %llu \n " ,
( unsigned long long ) ( sbi - > kbytes_written +
BD_PART_WRITTEN ( sbi ) ) ) ;
}
2017-07-22 03:14:09 +03:00
static ssize_t features_show ( struct f2fs_attr * a ,
struct f2fs_sb_info * sbi , char * buf )
{
struct super_block * sb = sbi - > sb ;
int len = 0 ;
if ( ! sb - > s_bdev - > bd_part )
return snprintf ( buf , PAGE_SIZE , " 0 \n " ) ;
2018-10-24 13:34:26 +03:00
if ( f2fs_sb_has_encrypt ( sbi ) )
2017-07-22 03:14:09 +03:00
len + = snprintf ( buf , PAGE_SIZE - len , " %s " ,
" encryption " ) ;
2018-10-24 13:34:26 +03:00
if ( f2fs_sb_has_blkzoned ( sbi ) )
2017-07-22 03:14:09 +03:00
len + = snprintf ( buf + len , PAGE_SIZE - len , " %s%s " ,
len ? " , " : " " , " blkzoned " ) ;
2018-10-24 13:34:26 +03:00
if ( f2fs_sb_has_extra_attr ( sbi ) )
2017-07-22 03:14:09 +03:00
len + = snprintf ( buf + len , PAGE_SIZE - len , " %s%s " ,
len ? " , " : " " , " extra_attr " ) ;
2018-10-24 13:34:26 +03:00
if ( f2fs_sb_has_project_quota ( sbi ) )
2017-07-22 03:14:09 +03:00
len + = snprintf ( buf + len , PAGE_SIZE - len , " %s%s " ,
len ? " , " : " " , " projquota " ) ;
2018-10-24 13:34:26 +03:00
if ( f2fs_sb_has_inode_chksum ( sbi ) )
2017-07-22 03:14:09 +03:00
len + = snprintf ( buf + len , PAGE_SIZE - len , " %s%s " ,
len ? " , " : " " , " inode_checksum " ) ;
2018-10-24 13:34:26 +03:00
if ( f2fs_sb_has_flexible_inline_xattr ( sbi ) )
f2fs: support flexible inline xattr size
Now, in product, more and more features based on file encryption were
introduced, their demand of xattr space is increasing, however, inline
xattr has fixed-size of 200 bytes, once inline xattr space is full, new
increased xattr data would occupy additional xattr block which may bring
us more space usage and performance regression during persisting.
In order to resolve above issue, it's better to expand inline xattr size
flexibly according to user's requirement.
So this patch introduces new filesystem feature 'flexible inline xattr',
and new mount option 'inline_xattr_size=%u', once mkfs enables the
feature, we can use the option to make f2fs supporting flexible inline
xattr size.
To support this feature, we add extra attribute i_inline_xattr_size in
inode layout, indicating that how many space inline xattr borrows from
block address mapping space in inode layout, by this, we can easily
locate and store flexible-sized inline xattr data in inode.
Inode disk layout:
+----------------------+
| .i_mode |
| ... |
| .i_ext |
+----------------------+
| .i_extra_isize |
| .i_inline_xattr_size |-----------+
| ... | |
+----------------------+ |
| .i_addr | |
| - block address or | |
| - inline data | |
+----------------------+<---+ v
| inline xattr | +---inline xattr range
+----------------------+<---+
| .i_nid |
+----------------------+
| node_footer |
| (nid, ino, offset) |
+----------------------+
Note that, we have to cnosider backward compatibility which reserved
inline_data space, 200 bytes, all the time, reported by Sheng Yong.
Previous inline data or directory always reserved 200 bytes in inode layout,
even if inline_xattr is disabled. In order to keep inline_dentry's structure
for backward compatibility, we get the space back only from inline_data.
Signed-off-by: Chao Yu <yuchao0@huawei.com>
Reported-by: Sheng Yong <shengyong1@huawei.com>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
2017-09-06 16:59:50 +03:00
len + = snprintf ( buf + len , PAGE_SIZE - len , " %s%s " ,
len ? " , " : " " , " flexible_inline_xattr " ) ;
2018-10-24 13:34:26 +03:00
if ( f2fs_sb_has_quota_ino ( sbi ) )
2017-10-06 07:03:06 +03:00
len + = snprintf ( buf + len , PAGE_SIZE - len , " %s%s " ,
len ? " , " : " " , " quota_ino " ) ;
2018-10-24 13:34:26 +03:00
if ( f2fs_sb_has_inode_crtime ( sbi ) )
2018-01-25 09:54:42 +03:00
len + = snprintf ( buf + len , PAGE_SIZE - len , " %s%s " ,
len ? " , " : " " , " inode_crtime " ) ;
2018-10-24 13:34:26 +03:00
if ( f2fs_sb_has_lost_found ( sbi ) )
2018-03-15 13:51:41 +03:00
len + = snprintf ( buf + len , PAGE_SIZE - len , " %s%s " ,
len ? " , " : " " , " lost_found " ) ;
2018-10-24 13:34:26 +03:00
if ( f2fs_sb_has_sb_chksum ( sbi ) )
2018-09-28 15:25:56 +03:00
len + = snprintf ( buf + len , PAGE_SIZE - len , " %s%s " ,
len ? " , " : " " , " sb_checksum " ) ;
2017-07-22 03:14:09 +03:00
len + = snprintf ( buf + len , PAGE_SIZE - len , " \n " ) ;
return len ;
}
2017-10-27 15:45:05 +03:00
static ssize_t current_reserved_blocks_show ( struct f2fs_attr * a ,
struct f2fs_sb_info * sbi , char * buf )
{
return snprintf ( buf , PAGE_SIZE , " %u \n " , sbi - > current_reserved_blocks ) ;
}
2017-06-14 12:39:47 +03:00
static ssize_t f2fs_sbi_show ( struct f2fs_attr * a ,
struct f2fs_sb_info * sbi , char * buf )
{
unsigned char * ptr = NULL ;
unsigned int * ui ;
ptr = __struct_ptr ( sbi , a - > struct_type ) ;
if ( ! ptr )
return - EINVAL ;
2018-02-26 17:04:13 +03:00
if ( ! strcmp ( a - > attr . name , " extension_list " ) ) {
__u8 ( * extlist ) [ F2FS_EXTENSION_LEN ] =
sbi - > raw_super - > extension_list ;
2018-02-28 12:07:27 +03:00
int cold_count = le32_to_cpu ( sbi - > raw_super - > extension_count ) ;
int hot_count = sbi - > raw_super - > hot_ext_count ;
2018-02-26 17:04:13 +03:00
int len = 0 , i ;
2018-02-28 12:07:27 +03:00
len + = snprintf ( buf + len , PAGE_SIZE - len ,
2018-04-30 18:27:44 +03:00
" cold file extension: \n " ) ;
2018-02-28 12:07:27 +03:00
for ( i = 0 ; i < cold_count ; i + + )
len + = snprintf ( buf + len , PAGE_SIZE - len , " %s \n " ,
extlist [ i ] ) ;
len + = snprintf ( buf + len , PAGE_SIZE - len ,
2018-04-30 18:27:44 +03:00
" hot file extension: \n " ) ;
2018-02-28 12:07:27 +03:00
for ( i = cold_count ; i < cold_count + hot_count ; i + + )
2018-02-26 17:04:13 +03:00
len + = snprintf ( buf + len , PAGE_SIZE - len , " %s \n " ,
extlist [ i ] ) ;
return len ;
}
2017-06-14 12:39:47 +03:00
ui = ( unsigned int * ) ( ptr + a - > offset ) ;
return snprintf ( buf , PAGE_SIZE , " %u \n " , * ui ) ;
}
f2fs: clean up symbol namespace
As Ted reported:
"Hi, I was looking at f2fs's sources recently, and I noticed that there
is a very large number of non-static symbols which don't have a f2fs
prefix. There's well over a hundred (see attached below).
As one example, in fs/f2fs/dir.c there is:
unsigned char get_de_type(struct f2fs_dir_entry *de)
This function is clearly only useful for f2fs, but it has a generic
name. This means that if any other file system tries to have the same
symbol name, there will be a symbol conflict and the kernel would not
successfully build. It also means that when someone is looking f2fs
sources, it's not at all obvious whether a function such as
read_data_page(), invalidate_blocks(), is a generic kernel function
found in the fs, mm, or block layers, or a f2fs specific function.
You might want to fix this at some point. Hopefully Kent's bcachefs
isn't similarly using genericly named functions, since that might
cause conflicts with f2fs's functions --- but just as this would be a
problem that we would rightly insist that Kent fix, this is something
that we should have rightly insisted that f2fs should have fixed
before it was integrated into the mainline kernel.
acquire_orphan_inode
add_ino_entry
add_orphan_inode
allocate_data_block
allocate_new_segments
alloc_nid
alloc_nid_done
alloc_nid_failed
available_free_memory
...."
This patch adds "f2fs_" prefix for all non-static symbols in order to:
a) avoid conflict with other kernel generic symbols;
b) to indicate the function is f2fs specific one instead of generic
one;
Reported-by: Theodore Ts'o <tytso@mit.edu>
Signed-off-by: Chao Yu <yuchao0@huawei.com>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
2018-05-29 19:20:41 +03:00
static ssize_t __sbi_store ( struct f2fs_attr * a ,
2017-06-14 12:39:47 +03:00
struct f2fs_sb_info * sbi ,
const char * buf , size_t count )
{
unsigned char * ptr ;
unsigned long t ;
unsigned int * ui ;
ssize_t ret ;
ptr = __struct_ptr ( sbi , a - > struct_type ) ;
if ( ! ptr )
return - EINVAL ;
2018-02-26 17:04:13 +03:00
if ( ! strcmp ( a - > attr . name , " extension_list " ) ) {
const char * name = strim ( ( char * ) buf ) ;
2018-02-28 12:07:27 +03:00
bool set = true , hot ;
if ( ! strncmp ( name , " [h] " , 3 ) )
hot = true ;
else if ( ! strncmp ( name , " [c] " , 3 ) )
hot = false ;
else
return - EINVAL ;
name + = 3 ;
2018-02-26 17:04:13 +03:00
2018-02-28 12:07:27 +03:00
if ( * name = = ' ! ' ) {
2018-02-26 17:04:13 +03:00
name + + ;
set = false ;
}
if ( strlen ( name ) > = F2FS_EXTENSION_LEN )
return - EINVAL ;
down_write ( & sbi - > sb_lock ) ;
f2fs: clean up symbol namespace
As Ted reported:
"Hi, I was looking at f2fs's sources recently, and I noticed that there
is a very large number of non-static symbols which don't have a f2fs
prefix. There's well over a hundred (see attached below).
As one example, in fs/f2fs/dir.c there is:
unsigned char get_de_type(struct f2fs_dir_entry *de)
This function is clearly only useful for f2fs, but it has a generic
name. This means that if any other file system tries to have the same
symbol name, there will be a symbol conflict and the kernel would not
successfully build. It also means that when someone is looking f2fs
sources, it's not at all obvious whether a function such as
read_data_page(), invalidate_blocks(), is a generic kernel function
found in the fs, mm, or block layers, or a f2fs specific function.
You might want to fix this at some point. Hopefully Kent's bcachefs
isn't similarly using genericly named functions, since that might
cause conflicts with f2fs's functions --- but just as this would be a
problem that we would rightly insist that Kent fix, this is something
that we should have rightly insisted that f2fs should have fixed
before it was integrated into the mainline kernel.
acquire_orphan_inode
add_ino_entry
add_orphan_inode
allocate_data_block
allocate_new_segments
alloc_nid
alloc_nid_done
alloc_nid_failed
available_free_memory
...."
This patch adds "f2fs_" prefix for all non-static symbols in order to:
a) avoid conflict with other kernel generic symbols;
b) to indicate the function is f2fs specific one instead of generic
one;
Reported-by: Theodore Ts'o <tytso@mit.edu>
Signed-off-by: Chao Yu <yuchao0@huawei.com>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
2018-05-29 19:20:41 +03:00
ret = f2fs_update_extension_list ( sbi , name , hot , set ) ;
2018-02-26 17:04:13 +03:00
if ( ret )
goto out ;
ret = f2fs_commit_super ( sbi , false ) ;
if ( ret )
f2fs: clean up symbol namespace
As Ted reported:
"Hi, I was looking at f2fs's sources recently, and I noticed that there
is a very large number of non-static symbols which don't have a f2fs
prefix. There's well over a hundred (see attached below).
As one example, in fs/f2fs/dir.c there is:
unsigned char get_de_type(struct f2fs_dir_entry *de)
This function is clearly only useful for f2fs, but it has a generic
name. This means that if any other file system tries to have the same
symbol name, there will be a symbol conflict and the kernel would not
successfully build. It also means that when someone is looking f2fs
sources, it's not at all obvious whether a function such as
read_data_page(), invalidate_blocks(), is a generic kernel function
found in the fs, mm, or block layers, or a f2fs specific function.
You might want to fix this at some point. Hopefully Kent's bcachefs
isn't similarly using genericly named functions, since that might
cause conflicts with f2fs's functions --- but just as this would be a
problem that we would rightly insist that Kent fix, this is something
that we should have rightly insisted that f2fs should have fixed
before it was integrated into the mainline kernel.
acquire_orphan_inode
add_ino_entry
add_orphan_inode
allocate_data_block
allocate_new_segments
alloc_nid
alloc_nid_done
alloc_nid_failed
available_free_memory
...."
This patch adds "f2fs_" prefix for all non-static symbols in order to:
a) avoid conflict with other kernel generic symbols;
b) to indicate the function is f2fs specific one instead of generic
one;
Reported-by: Theodore Ts'o <tytso@mit.edu>
Signed-off-by: Chao Yu <yuchao0@huawei.com>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
2018-05-29 19:20:41 +03:00
f2fs_update_extension_list ( sbi , name , hot , ! set ) ;
2018-02-26 17:04:13 +03:00
out :
up_write ( & sbi - > sb_lock ) ;
return ret ? ret : count ;
}
2017-06-14 12:39:47 +03:00
ui = ( unsigned int * ) ( ptr + a - > offset ) ;
ret = kstrtoul ( skip_spaces ( buf ) , 0 , & t ) ;
if ( ret < 0 )
return ret ;
# ifdef CONFIG_F2FS_FAULT_INJECTION
if ( a - > struct_type = = FAULT_INFO_TYPE & & t > = ( 1 < < FAULT_MAX ) )
return - EINVAL ;
2019-01-04 12:39:53 +03:00
if ( a - > struct_type = = FAULT_INFO_RATE & & t > = UINT_MAX )
return - EINVAL ;
2017-06-14 12:39:47 +03:00
# endif
2017-06-26 11:24:41 +03:00
if ( a - > struct_type = = RESERVED_BLOCKS ) {
spin_lock ( & sbi - > stat_lock ) ;
2017-12-28 02:05:52 +03:00
if ( t > ( unsigned long ) ( sbi - > user_block_count -
2018-03-08 09:22:56 +03:00
F2FS_OPTION ( sbi ) . root_reserved_blocks ) ) {
2017-06-26 11:24:41 +03:00
spin_unlock ( & sbi - > stat_lock ) ;
return - EINVAL ;
}
* ui = t ;
2017-10-27 15:45:05 +03:00
sbi - > current_reserved_blocks = min ( sbi - > reserved_blocks ,
sbi - > user_block_count - valid_user_blocks ( sbi ) ) ;
2017-06-26 11:24:41 +03:00
spin_unlock ( & sbi - > stat_lock ) ;
return count ;
}
f2fs: introduce discard_granularity sysfs entry
Commit d618ebaf0aa8 ("f2fs: enable small discard by default") enables
f2fs to issue 4K size discard in real-time discard mode. However, issuing
smaller discard may cost more lifetime but releasing less free space in
flash device. Since f2fs has ability of separating hot/cold data and
garbage collection, we can expect that small-sized invalid region would
expand soon with OPU, deletion or garbage collection on valid datas, so
it's better to delay or skip issuing smaller size discards, it could help
to reduce overmuch consumption of IO bandwidth and lifetime of flash
storage.
This patch makes f2fs selectng 64K size as its default minimal
granularity, and issue discard with the size which is not smaller than
minimal granularity. Also it exposes discard granularity as sysfs entry
for configuration in different scenario.
Jaegeuk Kim:
We must issue all the accumulated discard commands when fstrim is called.
So, I've added pend_list_tag[] to indicate whether we should issue the
commands or not. If tag sets P_ACTIVE or P_TRIM, we have to issue them.
P_TRIM is set once at a time, given fstrim trigger.
In addition, issue_discard_thread is calling too much due to the number of
discard commands remaining in the pending list. I added a timer to control
it likewise gc_thread.
Signed-off-by: Chao Yu <yuchao0@huawei.com>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
2017-08-07 18:09:56 +03:00
if ( ! strcmp ( a - > attr . name , " discard_granularity " ) ) {
if ( t = = 0 | | t > MAX_PLIST_NUM )
return - EINVAL ;
if ( t = = * ui )
return count ;
2017-09-12 09:25:35 +03:00
* ui = t ;
f2fs: introduce discard_granularity sysfs entry
Commit d618ebaf0aa8 ("f2fs: enable small discard by default") enables
f2fs to issue 4K size discard in real-time discard mode. However, issuing
smaller discard may cost more lifetime but releasing less free space in
flash device. Since f2fs has ability of separating hot/cold data and
garbage collection, we can expect that small-sized invalid region would
expand soon with OPU, deletion or garbage collection on valid datas, so
it's better to delay or skip issuing smaller size discards, it could help
to reduce overmuch consumption of IO bandwidth and lifetime of flash
storage.
This patch makes f2fs selectng 64K size as its default minimal
granularity, and issue discard with the size which is not smaller than
minimal granularity. Also it exposes discard granularity as sysfs entry
for configuration in different scenario.
Jaegeuk Kim:
We must issue all the accumulated discard commands when fstrim is called.
So, I've added pend_list_tag[] to indicate whether we should issue the
commands or not. If tag sets P_ACTIVE or P_TRIM, we have to issue them.
P_TRIM is set once at a time, given fstrim trigger.
In addition, issue_discard_thread is calling too much due to the number of
discard commands remaining in the pending list. I added a timer to control
it likewise gc_thread.
Signed-off-by: Chao Yu <yuchao0@huawei.com>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
2017-08-07 18:09:56 +03:00
return count ;
}
2018-10-25 11:19:28 +03:00
if ( ! strcmp ( a - > attr . name , " migration_granularity " ) ) {
if ( t = = 0 | | t > sbi - > segs_per_sec )
return - EINVAL ;
}
2018-04-09 05:25:23 +03:00
if ( ! strcmp ( a - > attr . name , " trim_sections " ) )
return - EINVAL ;
2018-05-08 00:22:40 +03:00
if ( ! strcmp ( a - > attr . name , " gc_urgent " ) ) {
if ( t > = 1 ) {
sbi - > gc_mode = GC_URGENT ;
if ( sbi - > gc_thread ) {
2018-08-05 07:45:35 +03:00
sbi - > gc_thread - > gc_wake = 1 ;
2018-05-08 00:22:40 +03:00
wake_up_interruptible_all (
& sbi - > gc_thread - > gc_wait_queue_head ) ;
wake_up_discard_thread ( sbi , true ) ;
}
} else {
sbi - > gc_mode = GC_NORMAL ;
}
return count ;
}
if ( ! strcmp ( a - > attr . name , " gc_idle " ) ) {
if ( t = = GC_IDLE_CB )
sbi - > gc_mode = GC_IDLE_CB ;
else if ( t = = GC_IDLE_GREEDY )
sbi - > gc_mode = GC_IDLE_GREEDY ;
else
sbi - > gc_mode = GC_NORMAL ;
return count ;
}
2017-08-02 18:21:48 +03:00
2019-01-15 23:02:15 +03:00
if ( ! strcmp ( a - > attr . name , " iostat_enable " ) ) {
sbi - > iostat_enable = ! ! t ;
if ( ! sbi - > iostat_enable )
f2fs_reset_iostat ( sbi ) ;
return count ;
}
* ui = ( unsigned int ) t ;
2017-06-14 12:39:47 +03:00
return count ;
}
2018-05-28 11:57:32 +03:00
static ssize_t f2fs_sbi_store ( struct f2fs_attr * a ,
struct f2fs_sb_info * sbi ,
const char * buf , size_t count )
{
ssize_t ret ;
bool gc_entry = ( ! strcmp ( a - > attr . name , " gc_urgent " ) | |
a - > struct_type = = GC_THREAD ) ;
2018-07-15 03:58:08 +03:00
if ( gc_entry ) {
if ( ! down_read_trylock ( & sbi - > sb - > s_umount ) )
return - EAGAIN ;
}
f2fs: clean up symbol namespace
As Ted reported:
"Hi, I was looking at f2fs's sources recently, and I noticed that there
is a very large number of non-static symbols which don't have a f2fs
prefix. There's well over a hundred (see attached below).
As one example, in fs/f2fs/dir.c there is:
unsigned char get_de_type(struct f2fs_dir_entry *de)
This function is clearly only useful for f2fs, but it has a generic
name. This means that if any other file system tries to have the same
symbol name, there will be a symbol conflict and the kernel would not
successfully build. It also means that when someone is looking f2fs
sources, it's not at all obvious whether a function such as
read_data_page(), invalidate_blocks(), is a generic kernel function
found in the fs, mm, or block layers, or a f2fs specific function.
You might want to fix this at some point. Hopefully Kent's bcachefs
isn't similarly using genericly named functions, since that might
cause conflicts with f2fs's functions --- but just as this would be a
problem that we would rightly insist that Kent fix, this is something
that we should have rightly insisted that f2fs should have fixed
before it was integrated into the mainline kernel.
acquire_orphan_inode
add_ino_entry
add_orphan_inode
allocate_data_block
allocate_new_segments
alloc_nid
alloc_nid_done
alloc_nid_failed
available_free_memory
...."
This patch adds "f2fs_" prefix for all non-static symbols in order to:
a) avoid conflict with other kernel generic symbols;
b) to indicate the function is f2fs specific one instead of generic
one;
Reported-by: Theodore Ts'o <tytso@mit.edu>
Signed-off-by: Chao Yu <yuchao0@huawei.com>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
2018-05-29 19:20:41 +03:00
ret = __sbi_store ( a , sbi , buf , count ) ;
2018-05-28 11:57:32 +03:00
if ( gc_entry )
up_read ( & sbi - > sb - > s_umount ) ;
return ret ;
}
2017-06-14 12:39:47 +03:00
static ssize_t f2fs_attr_show ( struct kobject * kobj ,
struct attribute * attr , char * buf )
{
struct f2fs_sb_info * sbi = container_of ( kobj , struct f2fs_sb_info ,
s_kobj ) ;
struct f2fs_attr * a = container_of ( attr , struct f2fs_attr , attr ) ;
return a - > show ? a - > show ( a , sbi , buf ) : 0 ;
}
static ssize_t f2fs_attr_store ( struct kobject * kobj , struct attribute * attr ,
const char * buf , size_t len )
{
struct f2fs_sb_info * sbi = container_of ( kobj , struct f2fs_sb_info ,
s_kobj ) ;
struct f2fs_attr * a = container_of ( attr , struct f2fs_attr , attr ) ;
return a - > store ? a - > store ( a , sbi , buf , len ) : 0 ;
}
static void f2fs_sb_release ( struct kobject * kobj )
{
struct f2fs_sb_info * sbi = container_of ( kobj , struct f2fs_sb_info ,
s_kobj ) ;
complete ( & sbi - > s_kobj_unregister ) ;
}
2017-07-22 03:14:09 +03:00
enum feat_id {
FEAT_CRYPTO = 0 ,
FEAT_BLKZONED ,
FEAT_ATOMIC_WRITE ,
FEAT_EXTRA_ATTR ,
FEAT_PROJECT_QUOTA ,
FEAT_INODE_CHECKSUM ,
f2fs: support flexible inline xattr size
Now, in product, more and more features based on file encryption were
introduced, their demand of xattr space is increasing, however, inline
xattr has fixed-size of 200 bytes, once inline xattr space is full, new
increased xattr data would occupy additional xattr block which may bring
us more space usage and performance regression during persisting.
In order to resolve above issue, it's better to expand inline xattr size
flexibly according to user's requirement.
So this patch introduces new filesystem feature 'flexible inline xattr',
and new mount option 'inline_xattr_size=%u', once mkfs enables the
feature, we can use the option to make f2fs supporting flexible inline
xattr size.
To support this feature, we add extra attribute i_inline_xattr_size in
inode layout, indicating that how many space inline xattr borrows from
block address mapping space in inode layout, by this, we can easily
locate and store flexible-sized inline xattr data in inode.
Inode disk layout:
+----------------------+
| .i_mode |
| ... |
| .i_ext |
+----------------------+
| .i_extra_isize |
| .i_inline_xattr_size |-----------+
| ... | |
+----------------------+ |
| .i_addr | |
| - block address or | |
| - inline data | |
+----------------------+<---+ v
| inline xattr | +---inline xattr range
+----------------------+<---+
| .i_nid |
+----------------------+
| node_footer |
| (nid, ino, offset) |
+----------------------+
Note that, we have to cnosider backward compatibility which reserved
inline_data space, 200 bytes, all the time, reported by Sheng Yong.
Previous inline data or directory always reserved 200 bytes in inode layout,
even if inline_xattr is disabled. In order to keep inline_dentry's structure
for backward compatibility, we get the space back only from inline_data.
Signed-off-by: Chao Yu <yuchao0@huawei.com>
Reported-by: Sheng Yong <shengyong1@huawei.com>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
2017-09-06 16:59:50 +03:00
FEAT_FLEXIBLE_INLINE_XATTR ,
2017-10-06 07:03:06 +03:00
FEAT_QUOTA_INO ,
2018-01-25 09:54:42 +03:00
FEAT_INODE_CRTIME ,
2018-03-15 13:51:41 +03:00
FEAT_LOST_FOUND ,
2018-09-28 15:25:56 +03:00
FEAT_SB_CHECKSUM ,
2017-07-22 03:14:09 +03:00
} ;
static ssize_t f2fs_feature_show ( struct f2fs_attr * a ,
struct f2fs_sb_info * sbi , char * buf )
{
switch ( a - > id ) {
case FEAT_CRYPTO :
case FEAT_BLKZONED :
case FEAT_ATOMIC_WRITE :
case FEAT_EXTRA_ATTR :
case FEAT_PROJECT_QUOTA :
case FEAT_INODE_CHECKSUM :
f2fs: support flexible inline xattr size
Now, in product, more and more features based on file encryption were
introduced, their demand of xattr space is increasing, however, inline
xattr has fixed-size of 200 bytes, once inline xattr space is full, new
increased xattr data would occupy additional xattr block which may bring
us more space usage and performance regression during persisting.
In order to resolve above issue, it's better to expand inline xattr size
flexibly according to user's requirement.
So this patch introduces new filesystem feature 'flexible inline xattr',
and new mount option 'inline_xattr_size=%u', once mkfs enables the
feature, we can use the option to make f2fs supporting flexible inline
xattr size.
To support this feature, we add extra attribute i_inline_xattr_size in
inode layout, indicating that how many space inline xattr borrows from
block address mapping space in inode layout, by this, we can easily
locate and store flexible-sized inline xattr data in inode.
Inode disk layout:
+----------------------+
| .i_mode |
| ... |
| .i_ext |
+----------------------+
| .i_extra_isize |
| .i_inline_xattr_size |-----------+
| ... | |
+----------------------+ |
| .i_addr | |
| - block address or | |
| - inline data | |
+----------------------+<---+ v
| inline xattr | +---inline xattr range
+----------------------+<---+
| .i_nid |
+----------------------+
| node_footer |
| (nid, ino, offset) |
+----------------------+
Note that, we have to cnosider backward compatibility which reserved
inline_data space, 200 bytes, all the time, reported by Sheng Yong.
Previous inline data or directory always reserved 200 bytes in inode layout,
even if inline_xattr is disabled. In order to keep inline_dentry's structure
for backward compatibility, we get the space back only from inline_data.
Signed-off-by: Chao Yu <yuchao0@huawei.com>
Reported-by: Sheng Yong <shengyong1@huawei.com>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
2017-09-06 16:59:50 +03:00
case FEAT_FLEXIBLE_INLINE_XATTR :
2017-10-06 07:03:06 +03:00
case FEAT_QUOTA_INO :
2018-01-25 09:54:42 +03:00
case FEAT_INODE_CRTIME :
2018-03-15 13:51:41 +03:00
case FEAT_LOST_FOUND :
2018-09-28 15:25:56 +03:00
case FEAT_SB_CHECKSUM :
2017-07-22 03:14:09 +03:00
return snprintf ( buf , PAGE_SIZE , " supported \n " ) ;
}
return 0 ;
}
2017-06-14 12:39:47 +03:00
# define F2FS_ATTR_OFFSET(_struct_type, _name, _mode, _show, _store, _offset) \
static struct f2fs_attr f2fs_attr_ # # _name = { \
. attr = { . name = __stringify ( _name ) , . mode = _mode } , \
. show = _show , \
. store = _store , \
. struct_type = _struct_type , \
. offset = _offset \
}
# define F2FS_RW_ATTR(struct_type, struct_name, name, elname) \
F2FS_ATTR_OFFSET ( struct_type , name , 0644 , \
f2fs_sbi_show , f2fs_sbi_store , \
offsetof ( struct struct_name , elname ) )
# define F2FS_GENERAL_RO_ATTR(name) \
static struct f2fs_attr f2fs_attr_ # # name = __ATTR ( name , 0444 , name # # _show , NULL )
2017-07-22 03:14:09 +03:00
# define F2FS_FEATURE_RO_ATTR(_name, _id) \
static struct f2fs_attr f2fs_attr_ # # _name = { \
. attr = { . name = __stringify ( _name ) , . mode = 0444 } , \
. show = f2fs_feature_show , \
. id = _id , \
}
2017-08-07 08:09:00 +03:00
F2FS_RW_ATTR ( GC_THREAD , f2fs_gc_kthread , gc_urgent_sleep_time ,
urgent_sleep_time ) ;
2017-06-14 12:39:47 +03:00
F2FS_RW_ATTR ( GC_THREAD , f2fs_gc_kthread , gc_min_sleep_time , min_sleep_time ) ;
F2FS_RW_ATTR ( GC_THREAD , f2fs_gc_kthread , gc_max_sleep_time , max_sleep_time ) ;
F2FS_RW_ATTR ( GC_THREAD , f2fs_gc_kthread , gc_no_gc_sleep_time , no_gc_sleep_time ) ;
2018-05-08 00:22:40 +03:00
F2FS_RW_ATTR ( F2FS_SBI , f2fs_sb_info , gc_idle , gc_mode ) ;
F2FS_RW_ATTR ( F2FS_SBI , f2fs_sb_info , gc_urgent , gc_mode ) ;
2017-06-14 12:39:47 +03:00
F2FS_RW_ATTR ( SM_INFO , f2fs_sm_info , reclaim_segments , rec_prefree_segments ) ;
F2FS_RW_ATTR ( DCC_INFO , discard_cmd_control , max_small_discards , max_discards ) ;
f2fs: introduce discard_granularity sysfs entry
Commit d618ebaf0aa8 ("f2fs: enable small discard by default") enables
f2fs to issue 4K size discard in real-time discard mode. However, issuing
smaller discard may cost more lifetime but releasing less free space in
flash device. Since f2fs has ability of separating hot/cold data and
garbage collection, we can expect that small-sized invalid region would
expand soon with OPU, deletion or garbage collection on valid datas, so
it's better to delay or skip issuing smaller size discards, it could help
to reduce overmuch consumption of IO bandwidth and lifetime of flash
storage.
This patch makes f2fs selectng 64K size as its default minimal
granularity, and issue discard with the size which is not smaller than
minimal granularity. Also it exposes discard granularity as sysfs entry
for configuration in different scenario.
Jaegeuk Kim:
We must issue all the accumulated discard commands when fstrim is called.
So, I've added pend_list_tag[] to indicate whether we should issue the
commands or not. If tag sets P_ACTIVE or P_TRIM, we have to issue them.
P_TRIM is set once at a time, given fstrim trigger.
In addition, issue_discard_thread is calling too much due to the number of
discard commands remaining in the pending list. I added a timer to control
it likewise gc_thread.
Signed-off-by: Chao Yu <yuchao0@huawei.com>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
2017-08-07 18:09:56 +03:00
F2FS_RW_ATTR ( DCC_INFO , discard_cmd_control , discard_granularity , discard_granularity ) ;
2017-06-26 11:24:41 +03:00
F2FS_RW_ATTR ( RESERVED_BLOCKS , f2fs_sb_info , reserved_blocks , reserved_blocks ) ;
2017-06-14 12:39:47 +03:00
F2FS_RW_ATTR ( SM_INFO , f2fs_sm_info , batched_trim_sections , trim_sections ) ;
F2FS_RW_ATTR ( SM_INFO , f2fs_sm_info , ipu_policy , ipu_policy ) ;
F2FS_RW_ATTR ( SM_INFO , f2fs_sm_info , min_ipu_util , min_ipu_util ) ;
F2FS_RW_ATTR ( SM_INFO , f2fs_sm_info , min_fsync_blocks , min_fsync_blocks ) ;
2018-08-10 03:53:34 +03:00
F2FS_RW_ATTR ( SM_INFO , f2fs_sm_info , min_seq_blocks , min_seq_blocks ) ;
2017-06-14 12:39:47 +03:00
F2FS_RW_ATTR ( SM_INFO , f2fs_sm_info , min_hot_blocks , min_hot_blocks ) ;
2017-10-28 11:52:33 +03:00
F2FS_RW_ATTR ( SM_INFO , f2fs_sm_info , min_ssr_sections , min_ssr_sections ) ;
2017-06-14 12:39:47 +03:00
F2FS_RW_ATTR ( NM_INFO , f2fs_nm_info , ram_thresh , ram_thresh ) ;
F2FS_RW_ATTR ( NM_INFO , f2fs_nm_info , ra_nid_pages , ra_nid_pages ) ;
F2FS_RW_ATTR ( NM_INFO , f2fs_nm_info , dirty_nats_ratio , dirty_nats_ratio ) ;
F2FS_RW_ATTR ( F2FS_SBI , f2fs_sb_info , max_victim_search , max_victim_search ) ;
2018-10-25 11:19:28 +03:00
F2FS_RW_ATTR ( F2FS_SBI , f2fs_sb_info , migration_granularity , migration_granularity ) ;
2017-06-14 12:39:47 +03:00
F2FS_RW_ATTR ( F2FS_SBI , f2fs_sb_info , dir_level , dir_level ) ;
F2FS_RW_ATTR ( F2FS_SBI , f2fs_sb_info , cp_interval , interval_time [ CP_TIME ] ) ;
F2FS_RW_ATTR ( F2FS_SBI , f2fs_sb_info , idle_interval , interval_time [ REQ_TIME ] ) ;
2018-09-19 11:48:47 +03:00
F2FS_RW_ATTR ( F2FS_SBI , f2fs_sb_info , discard_idle_interval ,
interval_time [ DISCARD_TIME ] ) ;
F2FS_RW_ATTR ( F2FS_SBI , f2fs_sb_info , gc_idle_interval , interval_time [ GC_TIME ] ) ;
2017-08-02 18:21:48 +03:00
F2FS_RW_ATTR ( F2FS_SBI , f2fs_sb_info , iostat_enable , iostat_enable ) ;
2017-11-22 13:23:38 +03:00
F2FS_RW_ATTR ( F2FS_SBI , f2fs_sb_info , readdir_ra , readdir_ra ) ;
2017-12-08 03:25:39 +03:00
F2FS_RW_ATTR ( F2FS_SBI , f2fs_sb_info , gc_pin_file_thresh , gc_pin_file_threshold ) ;
2018-02-26 17:04:13 +03:00
F2FS_RW_ATTR ( F2FS_SBI , f2fs_super_block , extension_list , extension_list ) ;
2017-06-14 12:39:47 +03:00
# ifdef CONFIG_F2FS_FAULT_INJECTION
F2FS_RW_ATTR ( FAULT_INFO_RATE , f2fs_fault_info , inject_rate , inject_rate ) ;
F2FS_RW_ATTR ( FAULT_INFO_TYPE , f2fs_fault_info , inject_type , inject_type ) ;
# endif
2017-10-24 10:46:54 +03:00
F2FS_GENERAL_RO_ATTR ( dirty_segments ) ;
2017-06-14 12:39:47 +03:00
F2FS_GENERAL_RO_ATTR ( lifetime_write_kbytes ) ;
2017-07-22 03:14:09 +03:00
F2FS_GENERAL_RO_ATTR ( features ) ;
2017-10-27 15:45:05 +03:00
F2FS_GENERAL_RO_ATTR ( current_reserved_blocks ) ;
2017-07-22 03:14:09 +03:00
# ifdef CONFIG_F2FS_FS_ENCRYPTION
F2FS_FEATURE_RO_ATTR ( encryption , FEAT_CRYPTO ) ;
# endif
# ifdef CONFIG_BLK_DEV_ZONED
F2FS_FEATURE_RO_ATTR ( block_zoned , FEAT_BLKZONED ) ;
# endif
F2FS_FEATURE_RO_ATTR ( atomic_write , FEAT_ATOMIC_WRITE ) ;
F2FS_FEATURE_RO_ATTR ( extra_attr , FEAT_EXTRA_ATTR ) ;
F2FS_FEATURE_RO_ATTR ( project_quota , FEAT_PROJECT_QUOTA ) ;
F2FS_FEATURE_RO_ATTR ( inode_checksum , FEAT_INODE_CHECKSUM ) ;
f2fs: support flexible inline xattr size
Now, in product, more and more features based on file encryption were
introduced, their demand of xattr space is increasing, however, inline
xattr has fixed-size of 200 bytes, once inline xattr space is full, new
increased xattr data would occupy additional xattr block which may bring
us more space usage and performance regression during persisting.
In order to resolve above issue, it's better to expand inline xattr size
flexibly according to user's requirement.
So this patch introduces new filesystem feature 'flexible inline xattr',
and new mount option 'inline_xattr_size=%u', once mkfs enables the
feature, we can use the option to make f2fs supporting flexible inline
xattr size.
To support this feature, we add extra attribute i_inline_xattr_size in
inode layout, indicating that how many space inline xattr borrows from
block address mapping space in inode layout, by this, we can easily
locate and store flexible-sized inline xattr data in inode.
Inode disk layout:
+----------------------+
| .i_mode |
| ... |
| .i_ext |
+----------------------+
| .i_extra_isize |
| .i_inline_xattr_size |-----------+
| ... | |
+----------------------+ |
| .i_addr | |
| - block address or | |
| - inline data | |
+----------------------+<---+ v
| inline xattr | +---inline xattr range
+----------------------+<---+
| .i_nid |
+----------------------+
| node_footer |
| (nid, ino, offset) |
+----------------------+
Note that, we have to cnosider backward compatibility which reserved
inline_data space, 200 bytes, all the time, reported by Sheng Yong.
Previous inline data or directory always reserved 200 bytes in inode layout,
even if inline_xattr is disabled. In order to keep inline_dentry's structure
for backward compatibility, we get the space back only from inline_data.
Signed-off-by: Chao Yu <yuchao0@huawei.com>
Reported-by: Sheng Yong <shengyong1@huawei.com>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
2017-09-06 16:59:50 +03:00
F2FS_FEATURE_RO_ATTR ( flexible_inline_xattr , FEAT_FLEXIBLE_INLINE_XATTR ) ;
2017-10-06 07:03:06 +03:00
F2FS_FEATURE_RO_ATTR ( quota_ino , FEAT_QUOTA_INO ) ;
2018-01-25 09:54:42 +03:00
F2FS_FEATURE_RO_ATTR ( inode_crtime , FEAT_INODE_CRTIME ) ;
2018-03-15 13:51:41 +03:00
F2FS_FEATURE_RO_ATTR ( lost_found , FEAT_LOST_FOUND ) ;
2018-09-28 15:25:56 +03:00
F2FS_FEATURE_RO_ATTR ( sb_checksum , FEAT_SB_CHECKSUM ) ;
2017-06-14 12:39:47 +03:00
# define ATTR_LIST(name) (&f2fs_attr_##name.attr)
static struct attribute * f2fs_attrs [ ] = {
2017-08-07 08:09:00 +03:00
ATTR_LIST ( gc_urgent_sleep_time ) ,
2017-06-14 12:39:47 +03:00
ATTR_LIST ( gc_min_sleep_time ) ,
ATTR_LIST ( gc_max_sleep_time ) ,
ATTR_LIST ( gc_no_gc_sleep_time ) ,
ATTR_LIST ( gc_idle ) ,
2017-08-07 08:09:00 +03:00
ATTR_LIST ( gc_urgent ) ,
2017-06-14 12:39:47 +03:00
ATTR_LIST ( reclaim_segments ) ,
ATTR_LIST ( max_small_discards ) ,
f2fs: introduce discard_granularity sysfs entry
Commit d618ebaf0aa8 ("f2fs: enable small discard by default") enables
f2fs to issue 4K size discard in real-time discard mode. However, issuing
smaller discard may cost more lifetime but releasing less free space in
flash device. Since f2fs has ability of separating hot/cold data and
garbage collection, we can expect that small-sized invalid region would
expand soon with OPU, deletion or garbage collection on valid datas, so
it's better to delay or skip issuing smaller size discards, it could help
to reduce overmuch consumption of IO bandwidth and lifetime of flash
storage.
This patch makes f2fs selectng 64K size as its default minimal
granularity, and issue discard with the size which is not smaller than
minimal granularity. Also it exposes discard granularity as sysfs entry
for configuration in different scenario.
Jaegeuk Kim:
We must issue all the accumulated discard commands when fstrim is called.
So, I've added pend_list_tag[] to indicate whether we should issue the
commands or not. If tag sets P_ACTIVE or P_TRIM, we have to issue them.
P_TRIM is set once at a time, given fstrim trigger.
In addition, issue_discard_thread is calling too much due to the number of
discard commands remaining in the pending list. I added a timer to control
it likewise gc_thread.
Signed-off-by: Chao Yu <yuchao0@huawei.com>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
2017-08-07 18:09:56 +03:00
ATTR_LIST ( discard_granularity ) ,
2017-06-14 12:39:47 +03:00
ATTR_LIST ( batched_trim_sections ) ,
ATTR_LIST ( ipu_policy ) ,
ATTR_LIST ( min_ipu_util ) ,
ATTR_LIST ( min_fsync_blocks ) ,
2018-08-10 03:53:34 +03:00
ATTR_LIST ( min_seq_blocks ) ,
2017-06-14 12:39:47 +03:00
ATTR_LIST ( min_hot_blocks ) ,
2017-10-28 11:52:33 +03:00
ATTR_LIST ( min_ssr_sections ) ,
2017-06-14 12:39:47 +03:00
ATTR_LIST ( max_victim_search ) ,
2018-10-25 11:19:28 +03:00
ATTR_LIST ( migration_granularity ) ,
2017-06-14 12:39:47 +03:00
ATTR_LIST ( dir_level ) ,
ATTR_LIST ( ram_thresh ) ,
ATTR_LIST ( ra_nid_pages ) ,
ATTR_LIST ( dirty_nats_ratio ) ,
ATTR_LIST ( cp_interval ) ,
ATTR_LIST ( idle_interval ) ,
2018-09-19 11:48:47 +03:00
ATTR_LIST ( discard_idle_interval ) ,
ATTR_LIST ( gc_idle_interval ) ,
2017-08-02 18:21:48 +03:00
ATTR_LIST ( iostat_enable ) ,
2017-11-22 13:23:38 +03:00
ATTR_LIST ( readdir_ra ) ,
2017-12-08 03:25:39 +03:00
ATTR_LIST ( gc_pin_file_thresh ) ,
2018-02-26 17:04:13 +03:00
ATTR_LIST ( extension_list ) ,
2017-06-14 12:39:47 +03:00
# ifdef CONFIG_F2FS_FAULT_INJECTION
ATTR_LIST ( inject_rate ) ,
ATTR_LIST ( inject_type ) ,
# endif
2017-10-24 10:46:54 +03:00
ATTR_LIST ( dirty_segments ) ,
2017-06-14 12:39:47 +03:00
ATTR_LIST ( lifetime_write_kbytes ) ,
2017-07-22 03:14:09 +03:00
ATTR_LIST ( features ) ,
2017-06-26 11:24:41 +03:00
ATTR_LIST ( reserved_blocks ) ,
2017-10-27 15:45:05 +03:00
ATTR_LIST ( current_reserved_blocks ) ,
2017-06-14 12:39:47 +03:00
NULL ,
} ;
2017-07-22 03:14:09 +03:00
static struct attribute * f2fs_feat_attrs [ ] = {
# ifdef CONFIG_F2FS_FS_ENCRYPTION
ATTR_LIST ( encryption ) ,
# endif
# ifdef CONFIG_BLK_DEV_ZONED
ATTR_LIST ( block_zoned ) ,
# endif
ATTR_LIST ( atomic_write ) ,
ATTR_LIST ( extra_attr ) ,
ATTR_LIST ( project_quota ) ,
ATTR_LIST ( inode_checksum ) ,
f2fs: support flexible inline xattr size
Now, in product, more and more features based on file encryption were
introduced, their demand of xattr space is increasing, however, inline
xattr has fixed-size of 200 bytes, once inline xattr space is full, new
increased xattr data would occupy additional xattr block which may bring
us more space usage and performance regression during persisting.
In order to resolve above issue, it's better to expand inline xattr size
flexibly according to user's requirement.
So this patch introduces new filesystem feature 'flexible inline xattr',
and new mount option 'inline_xattr_size=%u', once mkfs enables the
feature, we can use the option to make f2fs supporting flexible inline
xattr size.
To support this feature, we add extra attribute i_inline_xattr_size in
inode layout, indicating that how many space inline xattr borrows from
block address mapping space in inode layout, by this, we can easily
locate and store flexible-sized inline xattr data in inode.
Inode disk layout:
+----------------------+
| .i_mode |
| ... |
| .i_ext |
+----------------------+
| .i_extra_isize |
| .i_inline_xattr_size |-----------+
| ... | |
+----------------------+ |
| .i_addr | |
| - block address or | |
| - inline data | |
+----------------------+<---+ v
| inline xattr | +---inline xattr range
+----------------------+<---+
| .i_nid |
+----------------------+
| node_footer |
| (nid, ino, offset) |
+----------------------+
Note that, we have to cnosider backward compatibility which reserved
inline_data space, 200 bytes, all the time, reported by Sheng Yong.
Previous inline data or directory always reserved 200 bytes in inode layout,
even if inline_xattr is disabled. In order to keep inline_dentry's structure
for backward compatibility, we get the space back only from inline_data.
Signed-off-by: Chao Yu <yuchao0@huawei.com>
Reported-by: Sheng Yong <shengyong1@huawei.com>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
2017-09-06 16:59:50 +03:00
ATTR_LIST ( flexible_inline_xattr ) ,
2017-10-06 07:03:06 +03:00
ATTR_LIST ( quota_ino ) ,
2018-01-25 09:54:42 +03:00
ATTR_LIST ( inode_crtime ) ,
2018-03-15 13:51:41 +03:00
ATTR_LIST ( lost_found ) ,
2018-09-28 15:25:56 +03:00
ATTR_LIST ( sb_checksum ) ,
2017-07-22 03:14:09 +03:00
NULL ,
} ;
2017-06-14 12:39:47 +03:00
static const struct sysfs_ops f2fs_attr_ops = {
. show = f2fs_attr_show ,
. store = f2fs_attr_store ,
} ;
2017-07-22 03:14:09 +03:00
static struct kobj_type f2fs_sb_ktype = {
2017-06-14 12:39:47 +03:00
. default_attrs = f2fs_attrs ,
. sysfs_ops = & f2fs_attr_ops ,
. release = f2fs_sb_release ,
} ;
2017-07-22 03:14:09 +03:00
static struct kobj_type f2fs_ktype = {
. sysfs_ops = & f2fs_attr_ops ,
} ;
static struct kset f2fs_kset = {
. kobj = { . ktype = & f2fs_ktype } ,
} ;
static struct kobj_type f2fs_feat_ktype = {
. default_attrs = f2fs_feat_attrs ,
. sysfs_ops = & f2fs_attr_ops ,
} ;
static struct kobject f2fs_feat = {
. kset = & f2fs_kset ,
} ;
2018-07-07 06:50:57 +03:00
static int __maybe_unused segment_info_seq_show ( struct seq_file * seq ,
void * offset )
2017-06-14 12:39:47 +03:00
{
struct super_block * sb = seq - > private ;
struct f2fs_sb_info * sbi = F2FS_SB ( sb ) ;
unsigned int total_segs =
le32_to_cpu ( sbi - > raw_super - > segment_count_main ) ;
int i ;
seq_puts ( seq , " format: segment_type|valid_blocks \n "
" segment_type(0:HD, 1:WD, 2:CD, 3:HN, 4:WN, 5:CN) \n " ) ;
for ( i = 0 ; i < total_segs ; i + + ) {
struct seg_entry * se = get_seg_entry ( sbi , i ) ;
if ( ( i % 10 ) = = 0 )
seq_printf ( seq , " %-10d " , i ) ;
seq_printf ( seq , " %d|%-3u " , se - > type ,
get_valid_blocks ( sbi , i , false ) ) ;
if ( ( i % 10 ) = = 9 | | i = = ( total_segs - 1 ) )
seq_putc ( seq , ' \n ' ) ;
else
seq_putc ( seq , ' ' ) ;
}
return 0 ;
}
2018-07-07 06:50:57 +03:00
static int __maybe_unused segment_bits_seq_show ( struct seq_file * seq ,
void * offset )
2017-06-14 12:39:47 +03:00
{
struct super_block * sb = seq - > private ;
struct f2fs_sb_info * sbi = F2FS_SB ( sb ) ;
unsigned int total_segs =
le32_to_cpu ( sbi - > raw_super - > segment_count_main ) ;
int i , j ;
seq_puts ( seq , " format: segment_type|valid_blocks|bitmaps \n "
" segment_type(0:HD, 1:WD, 2:CD, 3:HN, 4:WN, 5:CN) \n " ) ;
for ( i = 0 ; i < total_segs ; i + + ) {
struct seg_entry * se = get_seg_entry ( sbi , i ) ;
seq_printf ( seq , " %-10d " , i ) ;
seq_printf ( seq , " %d|%-3u| " , se - > type ,
get_valid_blocks ( sbi , i , false ) ) ;
for ( j = 0 ; j < SIT_VBLOCK_MAP_SIZE ; j + + )
seq_printf ( seq , " %.2x " , se - > cur_valid_map [ j ] ) ;
seq_putc ( seq , ' \n ' ) ;
}
return 0 ;
}
2018-07-07 06:50:57 +03:00
static int __maybe_unused iostat_info_seq_show ( struct seq_file * seq ,
void * offset )
2017-08-02 18:21:48 +03:00
{
struct super_block * sb = seq - > private ;
struct f2fs_sb_info * sbi = F2FS_SB ( sb ) ;
time64_t now = ktime_get_real_seconds ( ) ;
if ( ! sbi - > iostat_enable )
return 0 ;
seq_printf ( seq , " time: %-16llu \n " , now ) ;
/* print app IOs */
seq_printf ( seq , " app buffered: %-16llu \n " ,
sbi - > write_iostat [ APP_BUFFERED_IO ] ) ;
seq_printf ( seq , " app direct: %-16llu \n " ,
sbi - > write_iostat [ APP_DIRECT_IO ] ) ;
seq_printf ( seq , " app mapped: %-16llu \n " ,
sbi - > write_iostat [ APP_MAPPED_IO ] ) ;
/* print fs IOs */
seq_printf ( seq , " fs data: %-16llu \n " ,
sbi - > write_iostat [ FS_DATA_IO ] ) ;
seq_printf ( seq , " fs node: %-16llu \n " ,
sbi - > write_iostat [ FS_NODE_IO ] ) ;
seq_printf ( seq , " fs meta: %-16llu \n " ,
sbi - > write_iostat [ FS_META_IO ] ) ;
seq_printf ( seq , " fs gc data: %-16llu \n " ,
sbi - > write_iostat [ FS_GC_DATA_IO ] ) ;
seq_printf ( seq , " fs gc node: %-16llu \n " ,
sbi - > write_iostat [ FS_GC_NODE_IO ] ) ;
seq_printf ( seq , " fs cp data: %-16llu \n " ,
sbi - > write_iostat [ FS_CP_DATA_IO ] ) ;
seq_printf ( seq , " fs cp node: %-16llu \n " ,
sbi - > write_iostat [ FS_CP_NODE_IO ] ) ;
seq_printf ( seq , " fs cp meta: %-16llu \n " ,
sbi - > write_iostat [ FS_CP_META_IO ] ) ;
seq_printf ( seq , " fs discard: %-16llu \n " ,
sbi - > write_iostat [ FS_DISCARD ] ) ;
return 0 ;
}
2018-07-23 17:10:22 +03:00
static int __maybe_unused victim_bits_seq_show ( struct seq_file * seq ,
void * offset )
{
struct super_block * sb = seq - > private ;
struct f2fs_sb_info * sbi = F2FS_SB ( sb ) ;
struct dirty_seglist_info * dirty_i = DIRTY_I ( sbi ) ;
int i ;
seq_puts ( seq , " format: victim_secmap bitmaps \n " ) ;
for ( i = 0 ; i < MAIN_SECS ( sbi ) ; i + + ) {
if ( ( i % 10 ) = = 0 )
seq_printf ( seq , " %-10d " , i ) ;
seq_printf ( seq , " %d " , test_bit ( i , dirty_i - > victim_secmap ) ? 1 : 0 ) ;
if ( ( i % 10 ) = = 9 | | i = = ( MAIN_SECS ( sbi ) - 1 ) )
seq_putc ( seq , ' \n ' ) ;
else
seq_putc ( seq , ' ' ) ;
}
return 0 ;
}
2017-07-26 21:24:13 +03:00
int __init f2fs_init_sysfs ( void )
2017-06-14 12:39:47 +03:00
{
2017-07-22 03:14:09 +03:00
int ret ;
2017-06-14 12:39:47 +03:00
2017-07-22 03:14:09 +03:00
kobject_set_name ( & f2fs_kset . kobj , " f2fs " ) ;
f2fs_kset . kobj . parent = fs_kobj ;
ret = kset_register ( & f2fs_kset ) ;
if ( ret )
return ret ;
ret = kobject_init_and_add ( & f2fs_feat , & f2fs_feat_ktype ,
NULL , " features " ) ;
if ( ret )
kset_unregister ( & f2fs_kset ) ;
else
f2fs_proc_root = proc_mkdir ( " fs/f2fs " , NULL ) ;
return ret ;
2017-06-14 12:39:47 +03:00
}
2017-07-26 21:24:13 +03:00
void f2fs_exit_sysfs ( void )
2017-06-14 12:39:47 +03:00
{
2017-07-22 03:14:09 +03:00
kobject_put ( & f2fs_feat ) ;
kset_unregister ( & f2fs_kset ) ;
2017-06-14 12:39:47 +03:00
remove_proc_entry ( " fs/f2fs " , NULL ) ;
2017-07-22 03:14:09 +03:00
f2fs_proc_root = NULL ;
2017-06-14 12:39:47 +03:00
}
2017-07-26 21:24:13 +03:00
int f2fs_register_sysfs ( struct f2fs_sb_info * sbi )
2017-06-14 12:39:47 +03:00
{
struct super_block * sb = sbi - > sb ;
int err ;
2017-07-22 03:14:09 +03:00
sbi - > s_kobj . kset = & f2fs_kset ;
init_completion ( & sbi - > s_kobj_unregister ) ;
err = kobject_init_and_add ( & sbi - > s_kobj , & f2fs_sb_ktype , NULL ,
" %s " , sb - > s_id ) ;
if ( err )
return err ;
2017-06-14 12:39:47 +03:00
if ( f2fs_proc_root )
sbi - > s_proc = proc_mkdir ( sb - > s_id , f2fs_proc_root ) ;
if ( sbi - > s_proc ) {
2018-05-15 16:57:23 +03:00
proc_create_single_data ( " segment_info " , S_IRUGO , sbi - > s_proc ,
segment_info_seq_show , sb ) ;
proc_create_single_data ( " segment_bits " , S_IRUGO , sbi - > s_proc ,
segment_bits_seq_show , sb ) ;
proc_create_single_data ( " iostat_info " , S_IRUGO , sbi - > s_proc ,
iostat_info_seq_show , sb ) ;
2018-07-23 17:10:22 +03:00
proc_create_single_data ( " victim_bits " , S_IRUGO , sbi - > s_proc ,
victim_bits_seq_show , sb ) ;
2017-06-14 12:39:47 +03:00
}
return 0 ;
}
2017-07-26 21:24:13 +03:00
void f2fs_unregister_sysfs ( struct f2fs_sb_info * sbi )
2017-06-14 12:39:47 +03:00
{
if ( sbi - > s_proc ) {
2017-08-02 18:21:48 +03:00
remove_proc_entry ( " iostat_info " , sbi - > s_proc ) ;
2017-06-14 12:39:47 +03:00
remove_proc_entry ( " segment_info " , sbi - > s_proc ) ;
remove_proc_entry ( " segment_bits " , sbi - > s_proc ) ;
2018-07-23 17:10:22 +03:00
remove_proc_entry ( " victim_bits " , sbi - > s_proc ) ;
2017-06-14 12:39:47 +03:00
remove_proc_entry ( sbi - > sb - > s_id , f2fs_proc_root ) ;
}
2017-07-22 03:14:09 +03:00
kobject_del ( & sbi - > s_kobj ) ;
2017-06-14 12:39:47 +03:00
}