2012-11-29 08:28:09 +04:00
/*
2012-11-02 12:08:18 +04:00
* fs / f2fs / checkpoint . c
*
* Copyright ( c ) 2012 Samsung Electronics Co . , Ltd .
* http : //www.samsung.com/
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation .
*/
# include <linux/fs.h>
# include <linux/bio.h>
# include <linux/mpage.h>
# include <linux/writeback.h>
# include <linux/blkdev.h>
# include <linux/f2fs_fs.h>
# include <linux/pagevec.h>
# include <linux/swap.h>
# include "f2fs.h"
# include "node.h"
# include "segment.h"
2014-12-18 06:58:58 +03:00
# include "trace.h"
2013-04-23 13:26:54 +04:00
# include <trace/events/f2fs.h>
2012-11-02 12:08:18 +04:00
2014-07-26 02:47:17 +04:00
static struct kmem_cache * ino_entry_slab ;
2014-12-29 10:56:18 +03:00
struct kmem_cache * inode_entry_slab ;
2012-11-02 12:08:18 +04:00
2016-05-19 00:07:56 +03:00
void f2fs_stop_checkpoint ( struct f2fs_sb_info * sbi , bool end_io )
{
2016-09-20 06:04:18 +03:00
set_ckpt_flags ( sbi , CP_ERROR_FLAG ) ;
2016-05-19 00:07:56 +03:00
if ( ! end_io )
2017-05-10 21:28:38 +03:00
f2fs_flush_merged_writes ( sbi ) ;
2016-05-19 00:07:56 +03:00
}
2012-11-29 08:28:09 +04:00
/*
2012-11-02 12:08:18 +04:00
* We guarantee no failure on the returned page .
*/
struct page * grab_meta_page ( struct f2fs_sb_info * sbi , pgoff_t index )
{
2014-01-20 14:37:04 +04:00
struct address_space * mapping = META_MAPPING ( sbi ) ;
2012-11-02 12:08:18 +04:00
struct page * page = NULL ;
repeat :
2016-04-30 02:11:53 +03:00
page = f2fs_grab_cache_page ( mapping , index , false ) ;
2012-11-02 12:08:18 +04:00
if ( ! page ) {
cond_resched ( ) ;
goto repeat ;
}
2016-01-20 18:43:51 +03:00
f2fs_wait_on_page_writeback ( page , META , true ) ;
2016-07-01 04:49:15 +03:00
if ( ! PageUptodate ( page ) )
SetPageUptodate ( page ) ;
2012-11-02 12:08:18 +04:00
return page ;
}
2012-11-29 08:28:09 +04:00
/*
2012-11-02 12:08:18 +04:00
* We guarantee no failure on the returned page .
*/
2015-10-12 12:04:21 +03:00
static struct page * __get_meta_page ( struct f2fs_sb_info * sbi , pgoff_t index ,
bool is_meta )
2012-11-02 12:08:18 +04:00
{
2014-01-20 14:37:04 +04:00
struct address_space * mapping = META_MAPPING ( sbi ) ;
2012-11-02 12:08:18 +04:00
struct page * page ;
2014-12-18 06:33:13 +03:00
struct f2fs_io_info fio = {
2015-04-24 00:38:15 +03:00
. sbi = sbi ,
2014-12-18 06:33:13 +03:00
. type = META ,
2016-06-05 22:31:55 +03:00
. op = REQ_OP_READ ,
2016-11-01 16:40:10 +03:00
. op_flags = REQ_META | REQ_PRIO ,
f2fs: trace old block address for CoWed page
This patch enables to trace old block address of CoWed page for better
debugging.
f2fs_submit_page_mbio: dev = (1,0), ino = 1, page_index = 0x1d4f0, oldaddr = 0xfe8ab, newaddr = 0xfee90 rw = WRITE_SYNC, type = NODE
f2fs_submit_page_mbio: dev = (1,0), ino = 1, page_index = 0x1d4f8, oldaddr = 0xfe8b0, newaddr = 0xfee91 rw = WRITE_SYNC, type = NODE
f2fs_submit_page_mbio: dev = (1,0), ino = 1, page_index = 0x1d4fa, oldaddr = 0xfe8ae, newaddr = 0xfee92 rw = WRITE_SYNC, type = NODE
f2fs_submit_page_mbio: dev = (1,0), ino = 134824, page_index = 0x96, oldaddr = 0xf049b, newaddr = 0x2bbe rw = WRITE, type = DATA
f2fs_submit_page_mbio: dev = (1,0), ino = 134824, page_index = 0x97, oldaddr = 0xf049c, newaddr = 0x2bbf rw = WRITE, type = DATA
f2fs_submit_page_mbio: dev = (1,0), ino = 134824, page_index = 0x98, oldaddr = 0xf049d, newaddr = 0x2bc0 rw = WRITE, type = DATA
f2fs_submit_page_mbio: dev = (1,0), ino = 135260, page_index = 0x47, oldaddr = 0xffffffff, newaddr = 0xf2631 rw = WRITE, type = DATA
f2fs_submit_page_mbio: dev = (1,0), ino = 135260, page_index = 0x48, oldaddr = 0xffffffff, newaddr = 0xf2632 rw = WRITE, type = DATA
f2fs_submit_page_mbio: dev = (1,0), ino = 135260, page_index = 0x49, oldaddr = 0xffffffff, newaddr = 0xf2633 rw = WRITE, type = DATA
Signed-off-by: Chao Yu <chao2.yu@samsung.com>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
2016-02-22 13:36:38 +03:00
. old_blkaddr = index ,
. new_blkaddr = index ,
2015-04-23 22:04:33 +03:00
. encrypted_page = NULL ,
2014-12-18 06:33:13 +03:00
} ;
2015-10-12 12:04:21 +03:00
if ( unlikely ( ! is_meta ) )
2016-06-05 22:31:55 +03:00
fio . op_flags & = ~ REQ_META ;
2012-11-02 12:08:18 +04:00
repeat :
2016-04-30 02:11:53 +03:00
page = f2fs_grab_cache_page ( mapping , index , false ) ;
2012-11-02 12:08:18 +04:00
if ( ! page ) {
cond_resched ( ) ;
goto repeat ;
}
2013-03-08 16:29:23 +04:00
if ( PageUptodate ( page ) )
goto out ;
2015-04-24 00:38:15 +03:00
fio . page = page ;
2015-07-15 23:08:21 +03:00
if ( f2fs_submit_page_bio ( & fio ) ) {
f2fs_put_page ( page , 1 ) ;
2012-11-02 12:08:18 +04:00
goto repeat ;
2015-07-15 23:08:21 +03:00
}
2012-11-02 12:08:18 +04:00
2013-03-08 16:29:23 +04:00
lock_page ( page ) ;
2013-12-06 10:00:58 +04:00
if ( unlikely ( page - > mapping ! = mapping ) ) {
2013-04-26 06:55:17 +04:00
f2fs_put_page ( page , 1 ) ;
goto repeat ;
}
2015-07-29 12:33:13 +03:00
/*
* if there is any IO error when accessing device , make our filesystem
* readonly and make sure do not write checkpoint with non - uptodate
* meta page .
*/
if ( unlikely ( ! PageUptodate ( page ) ) )
2016-05-19 00:07:56 +03:00
f2fs_stop_checkpoint ( sbi , false ) ;
2013-03-08 16:29:23 +04:00
out :
2012-11-02 12:08:18 +04:00
return page ;
}
2015-10-12 12:04:21 +03:00
struct page * get_meta_page ( struct f2fs_sb_info * sbi , pgoff_t index )
{
return __get_meta_page ( sbi , index , true ) ;
}
/* for POR only */
struct page * get_tmp_page ( struct f2fs_sb_info * sbi , pgoff_t index )
{
return __get_meta_page ( sbi , index , false ) ;
}
2015-04-18 13:05:36 +03:00
bool is_valid_blkaddr ( struct f2fs_sb_info * sbi , block_t blkaddr , int type )
2014-02-07 12:11:53 +04:00
{
switch ( type ) {
case META_NAT :
2014-12-08 09:59:17 +03:00
break ;
2014-02-07 12:11:53 +04:00
case META_SIT :
2014-12-08 09:59:17 +03:00
if ( unlikely ( blkaddr > = SIT_BLK_CNT ( sbi ) ) )
return false ;
break ;
2014-02-27 15:12:24 +04:00
case META_SSA :
2014-12-08 09:59:17 +03:00
if ( unlikely ( blkaddr > = MAIN_BLKADDR ( sbi ) | |
blkaddr < SM_I ( sbi ) - > ssa_blkaddr ) )
return false ;
break ;
2014-02-07 12:11:53 +04:00
case META_CP :
2014-12-08 09:59:17 +03:00
if ( unlikely ( blkaddr > = SIT_I ( sbi ) - > sit_base_addr | |
blkaddr < __start_cp_addr ( sbi ) ) )
return false ;
break ;
2014-09-12 00:49:55 +04:00
case META_POR :
2014-12-08 09:59:17 +03:00
if ( unlikely ( blkaddr > = MAX_BLKADDR ( sbi ) | |
blkaddr < MAIN_BLKADDR ( sbi ) ) )
return false ;
break ;
2014-02-07 12:11:53 +04:00
default :
BUG ( ) ;
}
2014-12-08 09:59:17 +03:00
return true ;
2014-02-07 12:11:53 +04:00
}
/*
2014-02-27 15:12:24 +04:00
* Readahead CP / NAT / SIT / SSA pages
2014-02-07 12:11:53 +04:00
*/
2015-10-12 12:05:59 +03:00
int ra_meta_pages ( struct f2fs_sb_info * sbi , block_t start , int nrpages ,
int type , bool sync )
2014-02-07 12:11:53 +04:00
{
struct page * page ;
2014-09-12 00:49:55 +04:00
block_t blkno = start ;
2014-02-07 12:11:53 +04:00
struct f2fs_io_info fio = {
2015-04-24 00:38:15 +03:00
. sbi = sbi ,
2014-02-07 12:11:53 +04:00
. type = META ,
2016-06-05 22:31:55 +03:00
. op = REQ_OP_READ ,
2016-11-01 16:40:10 +03:00
. op_flags = sync ? ( REQ_META | REQ_PRIO ) : REQ_RAHEAD ,
2015-04-23 22:04:33 +03:00
. encrypted_page = NULL ,
2017-05-19 18:37:01 +03:00
. in_list = false ,
2014-02-07 12:11:53 +04:00
} ;
2016-02-14 13:54:33 +03:00
struct blk_plug plug ;
2014-02-07 12:11:53 +04:00
2015-10-12 12:04:21 +03:00
if ( unlikely ( type = = META_POR ) )
2016-06-05 22:31:55 +03:00
fio . op_flags & = ~ REQ_META ;
2015-10-12 12:04:21 +03:00
2016-02-14 13:54:33 +03:00
blk_start_plug ( & plug ) ;
2014-02-07 12:11:53 +04:00
for ( ; nrpages - - > 0 ; blkno + + ) {
2014-12-08 09:59:17 +03:00
if ( ! is_valid_blkaddr ( sbi , blkno , type ) )
goto out ;
2014-02-07 12:11:53 +04:00
switch ( type ) {
case META_NAT :
2014-12-08 09:59:17 +03:00
if ( unlikely ( blkno > =
NAT_BLOCK_OFFSET ( NM_I ( sbi ) - > max_nid ) ) )
2014-02-07 12:11:53 +04:00
blkno = 0 ;
2014-12-08 09:59:17 +03:00
/* get nat block addr */
f2fs: trace old block address for CoWed page
This patch enables to trace old block address of CoWed page for better
debugging.
f2fs_submit_page_mbio: dev = (1,0), ino = 1, page_index = 0x1d4f0, oldaddr = 0xfe8ab, newaddr = 0xfee90 rw = WRITE_SYNC, type = NODE
f2fs_submit_page_mbio: dev = (1,0), ino = 1, page_index = 0x1d4f8, oldaddr = 0xfe8b0, newaddr = 0xfee91 rw = WRITE_SYNC, type = NODE
f2fs_submit_page_mbio: dev = (1,0), ino = 1, page_index = 0x1d4fa, oldaddr = 0xfe8ae, newaddr = 0xfee92 rw = WRITE_SYNC, type = NODE
f2fs_submit_page_mbio: dev = (1,0), ino = 134824, page_index = 0x96, oldaddr = 0xf049b, newaddr = 0x2bbe rw = WRITE, type = DATA
f2fs_submit_page_mbio: dev = (1,0), ino = 134824, page_index = 0x97, oldaddr = 0xf049c, newaddr = 0x2bbf rw = WRITE, type = DATA
f2fs_submit_page_mbio: dev = (1,0), ino = 134824, page_index = 0x98, oldaddr = 0xf049d, newaddr = 0x2bc0 rw = WRITE, type = DATA
f2fs_submit_page_mbio: dev = (1,0), ino = 135260, page_index = 0x47, oldaddr = 0xffffffff, newaddr = 0xf2631 rw = WRITE, type = DATA
f2fs_submit_page_mbio: dev = (1,0), ino = 135260, page_index = 0x48, oldaddr = 0xffffffff, newaddr = 0xf2632 rw = WRITE, type = DATA
f2fs_submit_page_mbio: dev = (1,0), ino = 135260, page_index = 0x49, oldaddr = 0xffffffff, newaddr = 0xf2633 rw = WRITE, type = DATA
Signed-off-by: Chao Yu <chao2.yu@samsung.com>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
2016-02-22 13:36:38 +03:00
fio . new_blkaddr = current_nat_addr ( sbi ,
2014-02-07 12:11:53 +04:00
blkno * NAT_ENTRY_PER_BLOCK ) ;
break ;
case META_SIT :
/* get sit block addr */
f2fs: trace old block address for CoWed page
This patch enables to trace old block address of CoWed page for better
debugging.
f2fs_submit_page_mbio: dev = (1,0), ino = 1, page_index = 0x1d4f0, oldaddr = 0xfe8ab, newaddr = 0xfee90 rw = WRITE_SYNC, type = NODE
f2fs_submit_page_mbio: dev = (1,0), ino = 1, page_index = 0x1d4f8, oldaddr = 0xfe8b0, newaddr = 0xfee91 rw = WRITE_SYNC, type = NODE
f2fs_submit_page_mbio: dev = (1,0), ino = 1, page_index = 0x1d4fa, oldaddr = 0xfe8ae, newaddr = 0xfee92 rw = WRITE_SYNC, type = NODE
f2fs_submit_page_mbio: dev = (1,0), ino = 134824, page_index = 0x96, oldaddr = 0xf049b, newaddr = 0x2bbe rw = WRITE, type = DATA
f2fs_submit_page_mbio: dev = (1,0), ino = 134824, page_index = 0x97, oldaddr = 0xf049c, newaddr = 0x2bbf rw = WRITE, type = DATA
f2fs_submit_page_mbio: dev = (1,0), ino = 134824, page_index = 0x98, oldaddr = 0xf049d, newaddr = 0x2bc0 rw = WRITE, type = DATA
f2fs_submit_page_mbio: dev = (1,0), ino = 135260, page_index = 0x47, oldaddr = 0xffffffff, newaddr = 0xf2631 rw = WRITE, type = DATA
f2fs_submit_page_mbio: dev = (1,0), ino = 135260, page_index = 0x48, oldaddr = 0xffffffff, newaddr = 0xf2632 rw = WRITE, type = DATA
f2fs_submit_page_mbio: dev = (1,0), ino = 135260, page_index = 0x49, oldaddr = 0xffffffff, newaddr = 0xf2633 rw = WRITE, type = DATA
Signed-off-by: Chao Yu <chao2.yu@samsung.com>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
2016-02-22 13:36:38 +03:00
fio . new_blkaddr = current_sit_addr ( sbi ,
2014-02-07 12:11:53 +04:00
blkno * SIT_ENTRY_PER_BLOCK ) ;
break ;
2014-02-27 15:12:24 +04:00
case META_SSA :
2014-02-07 12:11:53 +04:00
case META_CP :
2014-09-12 00:49:55 +04:00
case META_POR :
f2fs: trace old block address for CoWed page
This patch enables to trace old block address of CoWed page for better
debugging.
f2fs_submit_page_mbio: dev = (1,0), ino = 1, page_index = 0x1d4f0, oldaddr = 0xfe8ab, newaddr = 0xfee90 rw = WRITE_SYNC, type = NODE
f2fs_submit_page_mbio: dev = (1,0), ino = 1, page_index = 0x1d4f8, oldaddr = 0xfe8b0, newaddr = 0xfee91 rw = WRITE_SYNC, type = NODE
f2fs_submit_page_mbio: dev = (1,0), ino = 1, page_index = 0x1d4fa, oldaddr = 0xfe8ae, newaddr = 0xfee92 rw = WRITE_SYNC, type = NODE
f2fs_submit_page_mbio: dev = (1,0), ino = 134824, page_index = 0x96, oldaddr = 0xf049b, newaddr = 0x2bbe rw = WRITE, type = DATA
f2fs_submit_page_mbio: dev = (1,0), ino = 134824, page_index = 0x97, oldaddr = 0xf049c, newaddr = 0x2bbf rw = WRITE, type = DATA
f2fs_submit_page_mbio: dev = (1,0), ino = 134824, page_index = 0x98, oldaddr = 0xf049d, newaddr = 0x2bc0 rw = WRITE, type = DATA
f2fs_submit_page_mbio: dev = (1,0), ino = 135260, page_index = 0x47, oldaddr = 0xffffffff, newaddr = 0xf2631 rw = WRITE, type = DATA
f2fs_submit_page_mbio: dev = (1,0), ino = 135260, page_index = 0x48, oldaddr = 0xffffffff, newaddr = 0xf2632 rw = WRITE, type = DATA
f2fs_submit_page_mbio: dev = (1,0), ino = 135260, page_index = 0x49, oldaddr = 0xffffffff, newaddr = 0xf2633 rw = WRITE, type = DATA
Signed-off-by: Chao Yu <chao2.yu@samsung.com>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
2016-02-22 13:36:38 +03:00
fio . new_blkaddr = blkno ;
2014-02-07 12:11:53 +04:00
break ;
default :
BUG ( ) ;
}
2016-04-30 02:11:53 +03:00
page = f2fs_grab_cache_page ( META_MAPPING ( sbi ) ,
fio . new_blkaddr , false ) ;
2014-02-07 12:11:53 +04:00
if ( ! page )
continue ;
if ( PageUptodate ( page ) ) {
f2fs_put_page ( page , 1 ) ;
continue ;
}
2015-04-24 00:38:15 +03:00
fio . page = page ;
2017-05-10 21:23:36 +03:00
f2fs_submit_page_bio ( & fio ) ;
2014-02-07 12:11:53 +04:00
f2fs_put_page ( page , 0 ) ;
}
out :
2016-02-14 13:54:33 +03:00
blk_finish_plug ( & plug ) ;
2014-02-07 12:11:53 +04:00
return blkno - start ;
}
2014-12-08 10:02:52 +03:00
void ra_meta_pages_cond ( struct f2fs_sb_info * sbi , pgoff_t index )
{
struct page * page ;
bool readahead = false ;
page = find_get_page ( META_MAPPING ( sbi ) , index ) ;
2016-04-06 21:27:03 +03:00
if ( ! page | | ! PageUptodate ( page ) )
2014-12-08 10:02:52 +03:00
readahead = true ;
f2fs_put_page ( page , 0 ) ;
if ( readahead )
2016-10-18 21:07:45 +03:00
ra_meta_pages ( sbi , index , BIO_MAX_PAGES , META_POR , true ) ;
2014-12-08 10:02:52 +03:00
}
2017-08-02 18:21:48 +03:00
static int __f2fs_write_meta_page ( struct page * page ,
struct writeback_control * wbc ,
enum iostat_type io_type )
2012-11-02 12:08:18 +04:00
{
2014-09-03 02:31:18 +04:00
struct f2fs_sb_info * sbi = F2FS_P_SB ( page ) ;
2012-11-02 12:08:18 +04:00
2014-05-06 12:48:26 +04:00
trace_f2fs_writepage ( page , META ) ;
2015-01-28 12:48:42 +03:00
if ( unlikely ( is_sbi_flag_set ( sbi , SBI_POR_DOING ) ) )
2013-12-05 13:15:22 +04:00
goto redirty_out ;
2014-11-19 22:03:34 +03:00
if ( wbc - > for_reclaim & & page - > index < GET_SUM_BLOCK ( sbi , 0 ) )
2013-12-05 13:15:22 +04:00
goto redirty_out ;
2014-08-12 03:49:25 +04:00
if ( unlikely ( f2fs_cp_error ( sbi ) ) )
2014-08-12 05:37:46 +04:00
goto redirty_out ;
2012-11-02 12:08:18 +04:00
2017-08-02 18:21:48 +03:00
write_meta_page ( sbi , page , io_type ) ;
f2fs: prevent checkpoint once any IO failure is detected
This patch enhances the checkpoint routine to cope with IO errors.
Basically f2fs detects IO errors from end_io_write, and the errors are able to
be occurred during one of data, node, and meta page writes.
In the previous code, when an IO error is occurred during writes, f2fs sets a
flag, CP_ERROR_FLAG, in the raw ckeckpoint buffer which will be written to disk.
Afterwards, write_checkpoint() will check the flag and remount f2fs as a
read-only (ro) mode.
However, even once f2fs is remounted as a ro mode, dirty checkpoint pages are
freely able to be written to disk by flusher or kswapd in background.
In such a case, after cold reboot, f2fs would restore the checkpoint data having
CP_ERROR_FLAG, resulting in disabling write_checkpoint and remounting f2fs as
a ro mode again.
Therefore, let's prevent any checkpoint page (meta) writes once an IO error is
occurred, and remount f2fs as a ro mode right away at that moment.
Reported-by: Oliver Winker <oliver@oli1170.net>
Signed-off-by: Jaegeuk Kim <jaegeuk.kim@samsung.com>
Reviewed-by: Namjae Jeon <namjae.jeon@samsung.com>
2013-01-24 14:56:11 +04:00
dec_page_count ( sbi , F2FS_DIRTY_META ) ;
f2fs: introduce f2fs_submit_merged_bio_cond
f2fs use single bio buffer per type data (META/NODE/DATA) for caching
writes locating in continuous block address as many as possible, after
submitting, these writes may be still cached in bio buffer, so we have
to flush cached writes in bio buffer by calling f2fs_submit_merged_bio.
Unfortunately, in the scenario of high concurrency, bio buffer could be
flushed by someone else before we submit it as below reasons:
a) there is no space in bio buffer.
b) add a request of different type (SYNC, ASYNC).
c) add a discontinuous block address.
For this condition, f2fs_submit_merged_bio will be devastating, because
it could break the following merging of writes in bio buffer, split one
big bio into two smaller one.
This patch introduces f2fs_submit_merged_bio_cond which can do a
conditional submitting with bio buffer, before submitting it will judge
whether:
- page in DATA type bio buffer is matching with specified page;
- page in DATA type bio buffer is belong to specified inode;
- page in NODE type bio buffer is belong to specified inode;
If there is no eligible page in bio buffer, we will skip submitting step,
result in gaining more chance to merge consecutive block IOs in bio cache.
Signed-off-by: Chao Yu <chao2.yu@samsung.com>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
2016-01-18 13:28:11 +03:00
if ( wbc - > for_reclaim )
2017-05-10 21:28:38 +03:00
f2fs_submit_merged_write_cond ( sbi , page - > mapping - > host ,
0 , page - > index , META ) ;
f2fs: introduce f2fs_submit_merged_bio_cond
f2fs use single bio buffer per type data (META/NODE/DATA) for caching
writes locating in continuous block address as many as possible, after
submitting, these writes may be still cached in bio buffer, so we have
to flush cached writes in bio buffer by calling f2fs_submit_merged_bio.
Unfortunately, in the scenario of high concurrency, bio buffer could be
flushed by someone else before we submit it as below reasons:
a) there is no space in bio buffer.
b) add a request of different type (SYNC, ASYNC).
c) add a discontinuous block address.
For this condition, f2fs_submit_merged_bio will be devastating, because
it could break the following merging of writes in bio buffer, split one
big bio into two smaller one.
This patch introduces f2fs_submit_merged_bio_cond which can do a
conditional submitting with bio buffer, before submitting it will judge
whether:
- page in DATA type bio buffer is matching with specified page;
- page in DATA type bio buffer is belong to specified inode;
- page in NODE type bio buffer is belong to specified inode;
If there is no eligible page in bio buffer, we will skip submitting step,
result in gaining more chance to merge consecutive block IOs in bio cache.
Signed-off-by: Chao Yu <chao2.yu@samsung.com>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
2016-01-18 13:28:11 +03:00
f2fs: prevent checkpoint once any IO failure is detected
This patch enhances the checkpoint routine to cope with IO errors.
Basically f2fs detects IO errors from end_io_write, and the errors are able to
be occurred during one of data, node, and meta page writes.
In the previous code, when an IO error is occurred during writes, f2fs sets a
flag, CP_ERROR_FLAG, in the raw ckeckpoint buffer which will be written to disk.
Afterwards, write_checkpoint() will check the flag and remount f2fs as a
read-only (ro) mode.
However, even once f2fs is remounted as a ro mode, dirty checkpoint pages are
freely able to be written to disk by flusher or kswapd in background.
In such a case, after cold reboot, f2fs would restore the checkpoint data having
CP_ERROR_FLAG, resulting in disabling write_checkpoint and remounting f2fs as
a ro mode again.
Therefore, let's prevent any checkpoint page (meta) writes once an IO error is
occurred, and remount f2fs as a ro mode right away at that moment.
Reported-by: Oliver Winker <oliver@oli1170.net>
Signed-off-by: Jaegeuk Kim <jaegeuk.kim@samsung.com>
Reviewed-by: Namjae Jeon <namjae.jeon@samsung.com>
2013-01-24 14:56:11 +04:00
unlock_page ( page ) ;
2014-11-19 22:03:34 +03:00
f2fs: introduce f2fs_submit_merged_bio_cond
f2fs use single bio buffer per type data (META/NODE/DATA) for caching
writes locating in continuous block address as many as possible, after
submitting, these writes may be still cached in bio buffer, so we have
to flush cached writes in bio buffer by calling f2fs_submit_merged_bio.
Unfortunately, in the scenario of high concurrency, bio buffer could be
flushed by someone else before we submit it as below reasons:
a) there is no space in bio buffer.
b) add a request of different type (SYNC, ASYNC).
c) add a discontinuous block address.
For this condition, f2fs_submit_merged_bio will be devastating, because
it could break the following merging of writes in bio buffer, split one
big bio into two smaller one.
This patch introduces f2fs_submit_merged_bio_cond which can do a
conditional submitting with bio buffer, before submitting it will judge
whether:
- page in DATA type bio buffer is matching with specified page;
- page in DATA type bio buffer is belong to specified inode;
- page in NODE type bio buffer is belong to specified inode;
If there is no eligible page in bio buffer, we will skip submitting step,
result in gaining more chance to merge consecutive block IOs in bio cache.
Signed-off-by: Chao Yu <chao2.yu@samsung.com>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
2016-01-18 13:28:11 +03:00
if ( unlikely ( f2fs_cp_error ( sbi ) ) )
2017-05-10 21:28:38 +03:00
f2fs_submit_merged_write ( sbi , META ) ;
f2fs: introduce f2fs_submit_merged_bio_cond
f2fs use single bio buffer per type data (META/NODE/DATA) for caching
writes locating in continuous block address as many as possible, after
submitting, these writes may be still cached in bio buffer, so we have
to flush cached writes in bio buffer by calling f2fs_submit_merged_bio.
Unfortunately, in the scenario of high concurrency, bio buffer could be
flushed by someone else before we submit it as below reasons:
a) there is no space in bio buffer.
b) add a request of different type (SYNC, ASYNC).
c) add a discontinuous block address.
For this condition, f2fs_submit_merged_bio will be devastating, because
it could break the following merging of writes in bio buffer, split one
big bio into two smaller one.
This patch introduces f2fs_submit_merged_bio_cond which can do a
conditional submitting with bio buffer, before submitting it will judge
whether:
- page in DATA type bio buffer is matching with specified page;
- page in DATA type bio buffer is belong to specified inode;
- page in NODE type bio buffer is belong to specified inode;
If there is no eligible page in bio buffer, we will skip submitting step,
result in gaining more chance to merge consecutive block IOs in bio cache.
Signed-off-by: Chao Yu <chao2.yu@samsung.com>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
2016-01-18 13:28:11 +03:00
f2fs: prevent checkpoint once any IO failure is detected
This patch enhances the checkpoint routine to cope with IO errors.
Basically f2fs detects IO errors from end_io_write, and the errors are able to
be occurred during one of data, node, and meta page writes.
In the previous code, when an IO error is occurred during writes, f2fs sets a
flag, CP_ERROR_FLAG, in the raw ckeckpoint buffer which will be written to disk.
Afterwards, write_checkpoint() will check the flag and remount f2fs as a
read-only (ro) mode.
However, even once f2fs is remounted as a ro mode, dirty checkpoint pages are
freely able to be written to disk by flusher or kswapd in background.
In such a case, after cold reboot, f2fs would restore the checkpoint data having
CP_ERROR_FLAG, resulting in disabling write_checkpoint and remounting f2fs as
a ro mode again.
Therefore, let's prevent any checkpoint page (meta) writes once an IO error is
occurred, and remount f2fs as a ro mode right away at that moment.
Reported-by: Oliver Winker <oliver@oli1170.net>
Signed-off-by: Jaegeuk Kim <jaegeuk.kim@samsung.com>
Reviewed-by: Namjae Jeon <namjae.jeon@samsung.com>
2013-01-24 14:56:11 +04:00
return 0 ;
2013-12-05 13:15:22 +04:00
redirty_out :
2014-04-15 11:04:15 +04:00
redirty_page_for_writepage ( wbc , page ) ;
2013-12-05 13:15:22 +04:00
return AOP_WRITEPAGE_ACTIVATE ;
2012-11-02 12:08:18 +04:00
}
2017-08-02 18:21:48 +03:00
static int f2fs_write_meta_page ( struct page * page ,
struct writeback_control * wbc )
{
return __f2fs_write_meta_page ( page , wbc , FS_META_IO ) ;
}
2012-11-02 12:08:18 +04:00
static int f2fs_write_meta_pages ( struct address_space * mapping ,
struct writeback_control * wbc )
{
2014-09-03 02:31:18 +04:00
struct f2fs_sb_info * sbi = F2FS_M_SB ( mapping ) ;
2014-03-18 08:47:11 +04:00
long diff , written ;
2012-11-02 12:08:18 +04:00
2017-06-29 18:20:45 +03:00
if ( unlikely ( is_sbi_flag_set ( sbi , SBI_POR_DOING ) ) )
goto skip_write ;
2013-12-17 12:28:41 +04:00
/* collect a number of dirty meta pages and write together */
2014-03-18 08:47:11 +04:00
if ( wbc - > for_kupdate | |
get_pages ( sbi , F2FS_DIRTY_META ) < nr_pages_to_skip ( sbi , META ) )
2014-03-18 08:43:05 +04:00
goto skip_write ;
2012-11-02 12:08:18 +04:00
2017-03-01 13:07:10 +03:00
/* if locked failed, cp will flush dirty pages instead */
if ( ! mutex_trylock ( & sbi - > cp_mutex ) )
goto skip_write ;
2016-02-04 11:14:00 +03:00
2017-03-01 13:07:10 +03:00
trace_f2fs_writepages ( mapping - > host , wbc , META ) ;
2014-03-18 08:47:11 +04:00
diff = nr_pages_to_write ( sbi , META , wbc ) ;
2017-08-02 18:21:48 +03:00
written = sync_meta_pages ( sbi , META , wbc - > nr_to_write , FS_META_IO ) ;
2012-11-02 12:08:18 +04:00
mutex_unlock ( & sbi - > cp_mutex ) ;
2014-03-18 08:47:11 +04:00
wbc - > nr_to_write = max ( ( long ) 0 , wbc - > nr_to_write - written - diff ) ;
2012-11-02 12:08:18 +04:00
return 0 ;
2014-03-18 08:43:05 +04:00
skip_write :
wbc - > pages_skipped + = get_pages ( sbi , F2FS_DIRTY_META ) ;
2016-02-04 11:14:00 +03:00
trace_f2fs_writepages ( mapping - > host , wbc , META ) ;
2014-03-18 08:43:05 +04:00
return 0 ;
2012-11-02 12:08:18 +04:00
}
long sync_meta_pages ( struct f2fs_sb_info * sbi , enum page_type type ,
2017-08-02 18:21:48 +03:00
long nr_to_write , enum iostat_type io_type )
2012-11-02 12:08:18 +04:00
{
2014-01-20 14:37:04 +04:00
struct address_space * mapping = META_MAPPING ( sbi ) ;
2017-11-16 04:34:51 +03:00
pgoff_t index = 0 , prev = ULONG_MAX ;
2012-11-02 12:08:18 +04:00
struct pagevec pvec ;
long nwritten = 0 ;
2017-11-16 04:34:51 +03:00
int nr_pages ;
2012-11-02 12:08:18 +04:00
struct writeback_control wbc = {
. for_reclaim = 0 ,
} ;
2016-02-14 13:54:33 +03:00
struct blk_plug plug ;
2012-11-02 12:08:18 +04:00
2017-11-16 04:37:52 +03:00
pagevec_init ( & pvec ) ;
2012-11-02 12:08:18 +04:00
2016-02-14 13:54:33 +03:00
blk_start_plug ( & plug ) ;
2017-11-16 04:34:51 +03:00
while ( ( nr_pages = pagevec_lookup_tag ( & pvec , mapping , & index ,
2017-11-16 04:35:19 +03:00
PAGECACHE_TAG_DIRTY ) ) ) {
2017-11-16 04:34:51 +03:00
int i ;
2012-11-02 12:08:18 +04:00
for ( i = 0 ; i < nr_pages ; i + + ) {
struct page * page = pvec . pages [ i ] ;
2014-02-05 08:03:57 +04:00
f2fs: fix incorrect upper bound when iterating inode mapping tree
1. Inode mapping tree can index page in range of [0, ULONG_MAX], however,
in some places, f2fs only search or iterate page in ragne of [0, LONG_MAX],
result in miss hitting in page cache.
2. filemap_fdatawait_range accepts range parameters in unit of bytes, so
the max range it covers should be [0, LLONG_MAX], if we use [0, LONG_MAX]
as range for waiting on writeback, big number of pages will not be covered.
This patch corrects above two issues.
Signed-off-by: Chao Yu <chao2.yu@samsung.com>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
2016-02-24 12:20:44 +03:00
if ( prev = = ULONG_MAX )
f2fs: merge meta writes as many possible
This patch tries to merge IOs as many as possible when background flusher
conducts flushing the dirty meta pages.
[Before]
...
2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 124320, size = 4096
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 124560, size = 32768
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 95720, size = 987136
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 123928, size = 4096
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 123944, size = 8192
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 123968, size = 45056
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 124064, size = 4096
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 97648, size = 1007616
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 123776, size = 8192
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 123800, size = 32768
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 124624, size = 4096
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 99616, size = 921600
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 123608, size = 4096
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 123624, size = 77824
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 123792, size = 4096
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 123864, size = 32768
...
[After]
...
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 92168, size = 892928
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 93912, size = 753664
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 95384, size = 716800
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 96784, size = 712704
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 104160, size = 364544
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 104872, size = 356352
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 105568, size = 278528
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 106112, size = 319488
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 106736, size = 258048
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 107240, size = 270336
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 107768, size = 180224
...
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
2015-10-02 02:42:55 +03:00
prev = page - > index - 1 ;
if ( nr_to_write ! = LONG_MAX & & page - > index ! = prev + 1 ) {
pagevec_release ( & pvec ) ;
goto stop ;
}
2012-11-02 12:08:18 +04:00
lock_page ( page ) ;
2014-02-05 08:03:57 +04:00
if ( unlikely ( page - > mapping ! = mapping ) ) {
continue_unlock :
unlock_page ( page ) ;
continue ;
}
if ( ! PageDirty ( page ) ) {
/* someone wrote it for us */
goto continue_unlock ;
}
2016-01-28 22:48:52 +03:00
f2fs_wait_on_page_writeback ( page , META , true ) ;
BUG_ON ( PageWriteback ( page ) ) ;
2014-02-05 08:03:57 +04:00
if ( ! clear_page_dirty_for_io ( page ) )
goto continue_unlock ;
2017-08-02 18:21:48 +03:00
if ( __f2fs_write_meta_page ( page , & wbc , io_type ) ) {
f2fs: prevent checkpoint once any IO failure is detected
This patch enhances the checkpoint routine to cope with IO errors.
Basically f2fs detects IO errors from end_io_write, and the errors are able to
be occurred during one of data, node, and meta page writes.
In the previous code, when an IO error is occurred during writes, f2fs sets a
flag, CP_ERROR_FLAG, in the raw ckeckpoint buffer which will be written to disk.
Afterwards, write_checkpoint() will check the flag and remount f2fs as a
read-only (ro) mode.
However, even once f2fs is remounted as a ro mode, dirty checkpoint pages are
freely able to be written to disk by flusher or kswapd in background.
In such a case, after cold reboot, f2fs would restore the checkpoint data having
CP_ERROR_FLAG, resulting in disabling write_checkpoint and remounting f2fs as
a ro mode again.
Therefore, let's prevent any checkpoint page (meta) writes once an IO error is
occurred, and remount f2fs as a ro mode right away at that moment.
Reported-by: Oliver Winker <oliver@oli1170.net>
Signed-off-by: Jaegeuk Kim <jaegeuk.kim@samsung.com>
Reviewed-by: Namjae Jeon <namjae.jeon@samsung.com>
2013-01-24 14:56:11 +04:00
unlock_page ( page ) ;
break ;
}
2013-12-05 13:15:22 +04:00
nwritten + + ;
f2fs: merge meta writes as many possible
This patch tries to merge IOs as many as possible when background flusher
conducts flushing the dirty meta pages.
[Before]
...
2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 124320, size = 4096
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 124560, size = 32768
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 95720, size = 987136
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 123928, size = 4096
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 123944, size = 8192
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 123968, size = 45056
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 124064, size = 4096
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 97648, size = 1007616
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 123776, size = 8192
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 123800, size = 32768
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 124624, size = 4096
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 99616, size = 921600
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 123608, size = 4096
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 123624, size = 77824
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 123792, size = 4096
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 123864, size = 32768
...
[After]
...
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 92168, size = 892928
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 93912, size = 753664
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 95384, size = 716800
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 96784, size = 712704
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 104160, size = 364544
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 104872, size = 356352
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 105568, size = 278528
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 106112, size = 319488
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 106736, size = 258048
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 107240, size = 270336
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 107768, size = 180224
...
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
2015-10-02 02:42:55 +03:00
prev = page - > index ;
2013-12-05 13:15:22 +04:00
if ( unlikely ( nwritten > = nr_to_write ) )
2012-11-02 12:08:18 +04:00
break ;
}
pagevec_release ( & pvec ) ;
cond_resched ( ) ;
}
f2fs: merge meta writes as many possible
This patch tries to merge IOs as many as possible when background flusher
conducts flushing the dirty meta pages.
[Before]
...
2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 124320, size = 4096
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 124560, size = 32768
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 95720, size = 987136
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 123928, size = 4096
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 123944, size = 8192
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 123968, size = 45056
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 124064, size = 4096
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 97648, size = 1007616
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 123776, size = 8192
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 123800, size = 32768
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 124624, size = 4096
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 99616, size = 921600
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 123608, size = 4096
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 123624, size = 77824
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 123792, size = 4096
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 123864, size = 32768
...
[After]
...
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 92168, size = 892928
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 93912, size = 753664
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 95384, size = 716800
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 96784, size = 712704
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 104160, size = 364544
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 104872, size = 356352
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 105568, size = 278528
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 106112, size = 319488
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 106736, size = 258048
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 107240, size = 270336
f2fs_submit_write_bio: dev = (8,18), WRITE_SYNC(MP), META, sector = 107768, size = 180224
...
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
2015-10-02 02:42:55 +03:00
stop :
2012-11-02 12:08:18 +04:00
if ( nwritten )
2017-05-10 21:28:38 +03:00
f2fs_submit_merged_write ( sbi , type ) ;
2012-11-02 12:08:18 +04:00
2016-02-14 13:54:33 +03:00
blk_finish_plug ( & plug ) ;
2012-11-02 12:08:18 +04:00
return nwritten ;
}
static int f2fs_set_meta_page_dirty ( struct page * page )
{
2013-10-24 12:53:29 +04:00
trace_f2fs_set_page_dirty ( page , META ) ;
2016-07-01 04:49:15 +03:00
if ( ! PageUptodate ( page ) )
SetPageUptodate ( page ) ;
2012-11-02 12:08:18 +04:00
if ( ! PageDirty ( page ) ) {
2016-07-01 04:40:10 +03:00
f2fs_set_page_dirty_nobuffers ( page ) ;
2014-09-03 02:31:18 +04:00
inc_page_count ( F2FS_P_SB ( page ) , F2FS_DIRTY_META ) ;
2015-01-19 15:24:37 +03:00
SetPagePrivate ( page ) ;
2014-12-18 06:58:58 +03:00
f2fs_trace_pid ( page ) ;
2012-11-02 12:08:18 +04:00
return 1 ;
}
return 0 ;
}
const struct address_space_operations f2fs_meta_aops = {
. writepage = f2fs_write_meta_page ,
. writepages = f2fs_write_meta_pages ,
. set_page_dirty = f2fs_set_meta_page_dirty ,
2015-02-05 12:44:29 +03:00
. invalidatepage = f2fs_invalidate_page ,
. releasepage = f2fs_release_page ,
2016-09-20 00:03:27 +03:00
# ifdef CONFIG_MIGRATION
. migratepage = f2fs_migrate_page ,
# endif
2012-11-02 12:08:18 +04:00
} ;
2017-09-29 08:59:38 +03:00
static void __add_ino_entry ( struct f2fs_sb_info * sbi , nid_t ino ,
unsigned int devidx , int type )
2014-07-26 02:47:16 +04:00
{
2014-11-18 06:18:36 +03:00
struct inode_management * im = & sbi - > im [ type ] ;
2015-08-20 18:51:56 +03:00
struct ino_entry * e , * tmp ;
tmp = f2fs_kmem_cache_alloc ( ino_entry_slab , GFP_NOFS ) ;
2017-11-10 04:30:42 +03:00
2015-08-20 18:51:56 +03:00
radix_tree_preload ( GFP_NOFS | __GFP_NOFAIL ) ;
2014-12-04 07:47:26 +03:00
2014-11-18 06:18:36 +03:00
spin_lock ( & im - > ino_lock ) ;
e = radix_tree_lookup ( & im - > ino_root , ino ) ;
2014-07-25 05:15:17 +04:00
if ( ! e ) {
2015-08-20 18:51:56 +03:00
e = tmp ;
2017-11-10 04:30:42 +03:00
if ( unlikely ( radix_tree_insert ( & im - > ino_root , ino , e ) ) )
f2fs_bug_on ( sbi , 1 ) ;
2014-07-25 05:15:17 +04:00
memset ( e , 0 , sizeof ( struct ino_entry ) ) ;
e - > ino = ino ;
2014-07-26 02:47:16 +04:00
2014-11-18 06:18:36 +03:00
list_add_tail ( & e - > list , & im - > ino_list ) ;
2014-11-07 02:16:04 +03:00
if ( type ! = ORPHAN_INO )
2014-11-18 06:18:36 +03:00
im - > ino_num + + ;
2014-07-25 05:15:17 +04:00
}
2017-09-29 08:59:38 +03:00
if ( type = = FLUSH_INO )
f2fs_set_bit ( devidx , ( char * ) & e - > dirty_device ) ;
2014-11-18 06:18:36 +03:00
spin_unlock ( & im - > ino_lock ) ;
2014-12-04 07:47:26 +03:00
radix_tree_preload_end ( ) ;
2015-08-20 18:51:56 +03:00
if ( e ! = tmp )
kmem_cache_free ( ino_entry_slab , tmp ) ;
2014-07-26 02:47:16 +04:00
}
2014-07-26 02:47:17 +04:00
static void __remove_ino_entry ( struct f2fs_sb_info * sbi , nid_t ino , int type )
2014-07-26 02:47:16 +04:00
{
2014-11-18 06:18:36 +03:00
struct inode_management * im = & sbi - > im [ type ] ;
2014-07-26 02:47:17 +04:00
struct ino_entry * e ;
2014-07-26 02:47:16 +04:00
2014-11-18 06:18:36 +03:00
spin_lock ( & im - > ino_lock ) ;
e = radix_tree_lookup ( & im - > ino_root , ino ) ;
2014-07-25 05:15:17 +04:00
if ( e ) {
list_del ( & e - > list ) ;
2014-11-18 06:18:36 +03:00
radix_tree_delete ( & im - > ino_root , ino ) ;
im - > ino_num - - ;
spin_unlock ( & im - > ino_lock ) ;
2014-07-25 05:15:17 +04:00
kmem_cache_free ( ino_entry_slab , e ) ;
return ;
2014-07-26 02:47:16 +04:00
}
2014-11-18 06:18:36 +03:00
spin_unlock ( & im - > ino_lock ) ;
2014-07-26 02:47:16 +04:00
}
2015-12-15 08:29:47 +03:00
void add_ino_entry ( struct f2fs_sb_info * sbi , nid_t ino , int type )
2014-07-25 18:40:59 +04:00
{
/* add new dirty ino entry into list */
2017-09-29 08:59:38 +03:00
__add_ino_entry ( sbi , ino , 0 , type ) ;
2014-07-25 18:40:59 +04:00
}
2015-12-15 08:29:47 +03:00
void remove_ino_entry ( struct f2fs_sb_info * sbi , nid_t ino , int type )
2014-07-25 18:40:59 +04:00
{
/* remove dirty ino entry from list */
__remove_ino_entry ( sbi , ino , type ) ;
}
/* mode should be APPEND_INO or UPDATE_INO */
bool exist_written_data ( struct f2fs_sb_info * sbi , nid_t ino , int mode )
{
2014-11-18 06:18:36 +03:00
struct inode_management * im = & sbi - > im [ mode ] ;
2014-07-25 18:40:59 +04:00
struct ino_entry * e ;
2014-11-18 06:18:36 +03:00
spin_lock ( & im - > ino_lock ) ;
e = radix_tree_lookup ( & im - > ino_root , ino ) ;
spin_unlock ( & im - > ino_lock ) ;
2014-07-25 18:40:59 +04:00
return e ? true : false ;
}
2016-05-03 08:09:56 +03:00
void release_ino_entry ( struct f2fs_sb_info * sbi , bool all )
2014-07-25 18:40:59 +04:00
{
struct ino_entry * e , * tmp ;
int i ;
2017-09-29 08:59:38 +03:00
for ( i = all ? ORPHAN_INO : APPEND_INO ; i < MAX_INO_ENTRY ; i + + ) {
2014-11-18 06:18:36 +03:00
struct inode_management * im = & sbi - > im [ i ] ;
spin_lock ( & im - > ino_lock ) ;
list_for_each_entry_safe ( e , tmp , & im - > ino_list , list ) {
2014-07-25 18:40:59 +04:00
list_del ( & e - > list ) ;
2014-11-18 06:18:36 +03:00
radix_tree_delete ( & im - > ino_root , e - > ino ) ;
2014-07-25 18:40:59 +04:00
kmem_cache_free ( ino_entry_slab , e ) ;
2014-11-18 06:18:36 +03:00
im - > ino_num - - ;
2014-07-25 18:40:59 +04:00
}
2014-11-18 06:18:36 +03:00
spin_unlock ( & im - > ino_lock ) ;
2014-07-25 18:40:59 +04:00
}
}
2017-09-29 08:59:38 +03:00
void set_dirty_device ( struct f2fs_sb_info * sbi , nid_t ino ,
unsigned int devidx , int type )
{
__add_ino_entry ( sbi , ino , devidx , type ) ;
}
bool is_dirty_device ( struct f2fs_sb_info * sbi , nid_t ino ,
unsigned int devidx , int type )
{
struct inode_management * im = & sbi - > im [ type ] ;
struct ino_entry * e ;
bool is_dirty = false ;
spin_lock ( & im - > ino_lock ) ;
e = radix_tree_lookup ( & im - > ino_root , ino ) ;
if ( e & & f2fs_test_bit ( devidx , ( char * ) & e - > dirty_device ) )
is_dirty = true ;
spin_unlock ( & im - > ino_lock ) ;
return is_dirty ;
}
2013-07-30 06:36:53 +04:00
int acquire_orphan_inode ( struct f2fs_sb_info * sbi )
2012-11-02 12:08:18 +04:00
{
2014-11-18 06:18:36 +03:00
struct inode_management * im = & sbi - > im [ ORPHAN_INO ] ;
2012-11-02 12:08:18 +04:00
int err = 0 ;
2014-11-18 06:18:36 +03:00
spin_lock ( & im - > ino_lock ) ;
2016-04-30 02:29:22 +03:00
# ifdef CONFIG_F2FS_FAULT_INJECTION
2016-09-23 16:30:09 +03:00
if ( time_to_inject ( sbi , FAULT_ORPHAN ) ) {
2016-04-30 02:29:22 +03:00
spin_unlock ( & im - > ino_lock ) ;
2017-02-25 06:08:28 +03:00
f2fs_show_injection_info ( FAULT_ORPHAN ) ;
2016-04-30 02:29:22 +03:00
return - ENOSPC ;
}
# endif
2014-11-18 06:18:36 +03:00
if ( unlikely ( im - > ino_num > = sbi - > max_orphans ) )
2012-11-02 12:08:18 +04:00
err = - ENOSPC ;
2013-07-30 06:36:53 +04:00
else
2014-11-18 06:18:36 +03:00
im - > ino_num + + ;
spin_unlock ( & im - > ino_lock ) ;
2013-12-26 14:24:19 +04:00
2012-11-02 12:08:18 +04:00
return err ;
}
2013-07-30 06:36:53 +04:00
void release_orphan_inode ( struct f2fs_sb_info * sbi )
{
2014-11-18 06:18:36 +03:00
struct inode_management * im = & sbi - > im [ ORPHAN_INO ] ;
spin_lock ( & im - > ino_lock ) ;
f2fs_bug_on ( sbi , im - > ino_num = = 0 ) ;
im - > ino_num - - ;
spin_unlock ( & im - > ino_lock ) ;
2013-07-30 06:36:53 +04:00
}
2016-06-14 04:27:02 +03:00
void add_orphan_inode ( struct inode * inode )
2012-11-02 12:08:18 +04:00
{
2014-07-25 05:15:17 +04:00
/* add new orphan ino entry into list */
2017-09-29 08:59:38 +03:00
__add_ino_entry ( F2FS_I_SB ( inode ) , inode - > i_ino , 0 , ORPHAN_INO ) ;
2016-06-14 04:27:02 +03:00
update_inode_page ( inode ) ;
2012-11-02 12:08:18 +04:00
}
void remove_orphan_inode ( struct f2fs_sb_info * sbi , nid_t ino )
{
2014-07-26 02:47:16 +04:00
/* remove orphan entry from orphan list */
2014-07-26 02:47:17 +04:00
__remove_ino_entry ( sbi , ino , ORPHAN_INO ) ;
2012-11-02 12:08:18 +04:00
}
2015-08-07 12:58:43 +03:00
static int recover_orphan_inode ( struct f2fs_sb_info * sbi , nid_t ino )
2012-11-02 12:08:18 +04:00
{
2015-08-07 12:58:43 +03:00
struct inode * inode ;
2016-09-13 01:08:37 +03:00
struct node_info ni ;
2016-09-21 21:39:42 +03:00
int err = acquire_orphan_inode ( sbi ) ;
if ( err ) {
set_sbi_flag ( sbi , SBI_NEED_FSCK ) ;
f2fs_msg ( sbi - > sb , KERN_WARNING ,
" %s: orphan failed (ino=%x), run fsck to fix. " ,
__func__ , ino ) ;
return err ;
}
2017-09-29 08:59:38 +03:00
__add_ino_entry ( sbi , ino , 0 , ORPHAN_INO ) ;
2015-08-07 12:58:43 +03:00
2016-09-13 01:08:37 +03:00
inode = f2fs_iget_retry ( sbi - > sb , ino ) ;
2015-08-07 12:58:43 +03:00
if ( IS_ERR ( inode ) ) {
/*
* there should be a bug that we can ' t find the entry
* to orphan inode .
*/
f2fs_bug_on ( sbi , PTR_ERR ( inode ) = = - ENOENT ) ;
return PTR_ERR ( inode ) ;
}
2012-11-02 12:08:18 +04:00
clear_nlink ( inode ) ;
/* truncate all the data during iput */
iput ( inode ) ;
2016-09-13 01:08:37 +03:00
get_node_info ( sbi , ino , & ni ) ;
/* ENOMEM was fully retried in f2fs_evict_inode. */
if ( ni . blk_addr ! = NULL_ADDR ) {
2016-09-21 21:39:42 +03:00
set_sbi_flag ( sbi , SBI_NEED_FSCK ) ;
f2fs_msg ( sbi - > sb , KERN_WARNING ,
2017-03-08 00:54:56 +03:00
" %s: orphan failed (ino=%x) by kernel, retry mount. " ,
2016-09-21 21:39:42 +03:00
__func__ , ino ) ;
return - EIO ;
2016-09-13 01:08:37 +03:00
}
2016-09-21 21:39:42 +03:00
__remove_ino_entry ( sbi , ino , ORPHAN_INO ) ;
2015-08-07 12:58:43 +03:00
return 0 ;
2012-11-02 12:08:18 +04:00
}
2015-08-07 12:58:43 +03:00
int recover_orphan_inodes ( struct f2fs_sb_info * sbi )
2012-11-02 12:08:18 +04:00
{
2015-02-26 02:57:21 +03:00
block_t start_blk , orphan_blocks , i , j ;
2017-08-08 05:54:31 +03:00
unsigned int s_flags = sbi - > sb - > s_flags ;
int err = 0 ;
2017-10-06 19:14:28 +03:00
# ifdef CONFIG_QUOTA
int quota_enabled ;
# endif
2012-11-02 12:08:18 +04:00
2016-09-20 06:04:18 +03:00
if ( ! is_set_ckpt_flags ( sbi , CP_ORPHAN_PRESENT_FLAG ) )
2015-08-07 12:58:43 +03:00
return 0 ;
2012-11-02 12:08:18 +04:00
2017-11-28 00:05:09 +03:00
if ( s_flags & SB_RDONLY ) {
2017-08-08 05:54:31 +03:00
f2fs_msg ( sbi - > sb , KERN_INFO , " orphan cleanup on readonly fs " ) ;
2017-11-28 00:05:09 +03:00
sbi - > sb - > s_flags & = ~ SB_RDONLY ;
2017-08-08 05:54:31 +03:00
}
# ifdef CONFIG_QUOTA
/* Needed for iput() to work correctly and not trash data */
2017-11-28 00:05:09 +03:00
sbi - > sb - > s_flags | = SB_ACTIVE ;
2017-10-06 19:14:28 +03:00
2017-08-08 05:54:31 +03:00
/* Turn on quotas so that they are updated correctly */
2017-11-28 00:05:09 +03:00
quota_enabled = f2fs_enable_quota_files ( sbi , s_flags & SB_RDONLY ) ;
2017-08-08 05:54:31 +03:00
# endif
2015-02-26 02:57:20 +03:00
start_blk = __start_cp_addr ( sbi ) + 1 + __cp_payload ( sbi ) ;
2015-02-26 02:57:21 +03:00
orphan_blocks = __start_sum_addr ( sbi ) - 1 - __cp_payload ( sbi ) ;
2012-11-02 12:08:18 +04:00
2015-10-12 12:05:59 +03:00
ra_meta_pages ( sbi , start_blk , orphan_blocks , META_CP , true ) ;
2014-02-07 12:11:53 +04:00
2015-02-26 02:57:21 +03:00
for ( i = 0 ; i < orphan_blocks ; i + + ) {
2012-11-02 12:08:18 +04:00
struct page * page = get_meta_page ( sbi , start_blk + i ) ;
struct f2fs_orphan_block * orphan_blk ;
orphan_blk = ( struct f2fs_orphan_block * ) page_address ( page ) ;
for ( j = 0 ; j < le32_to_cpu ( orphan_blk - > entry_count ) ; j + + ) {
nid_t ino = le32_to_cpu ( orphan_blk - > ino [ j ] ) ;
2015-08-07 12:58:43 +03:00
err = recover_orphan_inode ( sbi , ino ) ;
if ( err ) {
f2fs_put_page ( page , 1 ) ;
2017-08-08 05:54:31 +03:00
goto out ;
2015-08-07 12:58:43 +03:00
}
2012-11-02 12:08:18 +04:00
}
f2fs_put_page ( page , 1 ) ;
}
/* clear Orphan Flag */
2016-09-20 06:04:18 +03:00
clear_ckpt_flags ( sbi , CP_ORPHAN_PRESENT_FLAG ) ;
2017-08-08 05:54:31 +03:00
out :
# ifdef CONFIG_QUOTA
/* Turn quotas off */
2017-10-06 19:14:28 +03:00
if ( quota_enabled )
f2fs_quota_off_umount ( sbi - > sb ) ;
2017-08-08 05:54:31 +03:00
# endif
2017-11-28 00:05:09 +03:00
sbi - > sb - > s_flags = s_flags ; /* Restore SB_RDONLY status */
2017-08-08 05:54:31 +03:00
return err ;
2012-11-02 12:08:18 +04:00
}
static void write_orphan_inodes ( struct f2fs_sb_info * sbi , block_t start_blk )
{
2013-11-19 14:03:58 +04:00
struct list_head * head ;
2012-11-02 12:08:18 +04:00
struct f2fs_orphan_block * orphan_blk = NULL ;
unsigned int nentries = 0 ;
2015-07-13 12:44:25 +03:00
unsigned short index = 1 ;
2014-11-07 02:16:04 +03:00
unsigned short orphan_blocks ;
2014-01-10 14:09:02 +04:00
struct page * page = NULL ;
2014-07-26 02:47:17 +04:00
struct ino_entry * orphan = NULL ;
2014-11-18 06:18:36 +03:00
struct inode_management * im = & sbi - > im [ ORPHAN_INO ] ;
2012-11-02 12:08:18 +04:00
2014-11-18 06:18:36 +03:00
orphan_blocks = GET_ORPHAN_BLOCKS ( im - > ino_num ) ;
2014-11-07 02:16:04 +03:00
2015-05-01 21:08:59 +03:00
/*
* we don ' t need to do spin_lock ( & im - > ino_lock ) here , since all the
* orphan inode operations are covered under f2fs_lock_op ( ) .
* And , spin_lock should be avoided due to page operations below .
*/
2014-11-18 06:18:36 +03:00
head = & im - > ino_list ;
2012-11-02 12:08:18 +04:00
/* loop for each orphan inode entry and write them in Jornal block */
2013-11-19 14:03:58 +04:00
list_for_each_entry ( orphan , head , list ) {
if ( ! page ) {
2015-07-13 12:44:25 +03:00
page = grab_meta_page ( sbi , start_blk + + ) ;
2013-11-19 14:03:58 +04:00
orphan_blk =
( struct f2fs_orphan_block * ) page_address ( page ) ;
memset ( orphan_blk , 0 , sizeof ( * orphan_blk ) ) ;
}
2012-11-02 12:08:18 +04:00
2013-11-26 12:44:16 +04:00
orphan_blk - > ino [ nentries + + ] = cpu_to_le32 ( orphan - > ino ) ;
2012-11-02 12:08:18 +04:00
2013-11-26 12:44:16 +04:00
if ( nentries = = F2FS_ORPHANS_PER_BLOCK ) {
2012-11-02 12:08:18 +04:00
/*
* an orphan block is full of 1020 entries ,
* then we need to flush current orphan blocks
* and bring another one in memory
*/
orphan_blk - > blk_addr = cpu_to_le16 ( index ) ;
orphan_blk - > blk_count = cpu_to_le16 ( orphan_blocks ) ;
orphan_blk - > entry_count = cpu_to_le32 ( nentries ) ;
set_page_dirty ( page ) ;
f2fs_put_page ( page , 1 ) ;
index + + ;
nentries = 0 ;
page = NULL ;
}
2013-11-19 14:03:58 +04:00
}
2012-11-02 12:08:18 +04:00
2013-11-19 14:03:58 +04:00
if ( page ) {
orphan_blk - > blk_addr = cpu_to_le16 ( index ) ;
orphan_blk - > blk_count = cpu_to_le16 ( orphan_blocks ) ;
orphan_blk - > entry_count = cpu_to_le32 ( nentries ) ;
set_page_dirty ( page ) ;
f2fs_put_page ( page , 1 ) ;
2012-11-02 12:08:18 +04:00
}
}
2016-09-30 03:24:53 +03:00
static int get_checkpoint_version ( struct f2fs_sb_info * sbi , block_t cp_addr ,
struct f2fs_checkpoint * * cp_block , struct page * * cp_page ,
unsigned long long * version )
2012-11-02 12:08:18 +04:00
{
unsigned long blk_size = sbi - > blocksize ;
2016-09-30 03:24:53 +03:00
size_t crc_offset = 0 ;
2013-06-19 15:47:19 +04:00
__u32 crc = 0 ;
2012-11-02 12:08:18 +04:00
2016-09-30 03:24:53 +03:00
* cp_page = get_meta_page ( sbi , cp_addr ) ;
* cp_block = ( struct f2fs_checkpoint * ) page_address ( * cp_page ) ;
2012-11-02 12:08:18 +04:00
2016-09-30 03:24:53 +03:00
crc_offset = le32_to_cpu ( ( * cp_block ) - > checksum_offset ) ;
2017-03-15 16:12:50 +03:00
if ( crc_offset > ( blk_size - sizeof ( __le32 ) ) ) {
2016-09-30 03:24:53 +03:00
f2fs_msg ( sbi - > sb , KERN_WARNING ,
" invalid crc_offset: %zu " , crc_offset ) ;
return - EINVAL ;
}
2012-11-02 12:08:18 +04:00
2017-02-25 14:53:39 +03:00
crc = cur_cp_crc ( * cp_block ) ;
2016-09-30 03:24:53 +03:00
if ( ! f2fs_crc_valid ( sbi , crc , * cp_block , crc_offset ) ) {
f2fs_msg ( sbi - > sb , KERN_WARNING , " invalid crc value " ) ;
return - EINVAL ;
}
2012-11-02 12:08:18 +04:00
2016-09-30 03:24:53 +03:00
* version = cur_cp_version ( * cp_block ) ;
return 0 ;
}
2012-11-02 12:08:18 +04:00
2016-09-30 03:24:53 +03:00
static struct page * validate_checkpoint ( struct f2fs_sb_info * sbi ,
block_t cp_addr , unsigned long long * version )
{
struct page * cp_page_1 = NULL , * cp_page_2 = NULL ;
struct f2fs_checkpoint * cp_block = NULL ;
unsigned long long cur_version = 0 , pre_version = 0 ;
int err ;
2012-11-02 12:08:18 +04:00
2016-09-30 03:24:53 +03:00
err = get_checkpoint_version ( sbi , cp_addr , & cp_block ,
& cp_page_1 , version ) ;
if ( err )
goto invalid_cp1 ;
pre_version = * version ;
2012-11-02 12:08:18 +04:00
2016-09-30 03:24:53 +03:00
cp_addr + = le32_to_cpu ( cp_block - > cp_pack_total_block_count ) - 1 ;
err = get_checkpoint_version ( sbi , cp_addr , & cp_block ,
& cp_page_2 , version ) ;
if ( err )
2012-11-02 12:08:18 +04:00
goto invalid_cp2 ;
2016-09-30 03:24:53 +03:00
cur_version = * version ;
2012-11-02 12:08:18 +04:00
if ( cur_version = = pre_version ) {
* version = cur_version ;
f2fs_put_page ( cp_page_2 , 1 ) ;
return cp_page_1 ;
}
invalid_cp2 :
f2fs_put_page ( cp_page_2 , 1 ) ;
invalid_cp1 :
f2fs_put_page ( cp_page_1 , 1 ) ;
return NULL ;
}
int get_valid_checkpoint ( struct f2fs_sb_info * sbi )
{
struct f2fs_checkpoint * cp_block ;
struct f2fs_super_block * fsb = sbi - > raw_super ;
struct page * cp1 , * cp2 , * cur_page ;
unsigned long blk_size = sbi - > blocksize ;
unsigned long long cp1_version = 0 , cp2_version = 0 ;
unsigned long long cp_start_blk_no ;
2015-02-26 02:57:20 +03:00
unsigned int cp_blks = 1 + __cp_payload ( sbi ) ;
2014-05-12 07:27:43 +04:00
block_t cp_blk_no ;
int i ;
2012-11-02 12:08:18 +04:00
2017-11-30 14:28:17 +03:00
sbi - > ckpt = f2fs_kzalloc ( sbi , cp_blks * blk_size , GFP_KERNEL ) ;
2012-11-02 12:08:18 +04:00
if ( ! sbi - > ckpt )
return - ENOMEM ;
/*
* Finding out valid cp block involves read both
* sets ( cp pack1 and cp pack 2 )
*/
cp_start_blk_no = le32_to_cpu ( fsb - > cp_blkaddr ) ;
cp1 = validate_checkpoint ( sbi , cp_start_blk_no , & cp1_version ) ;
/* The second checkpoint pack should start at the next segment */
2013-11-28 07:44:05 +04:00
cp_start_blk_no + = ( ( unsigned long long ) 1 ) < <
le32_to_cpu ( fsb - > log_blocks_per_seg ) ;
2012-11-02 12:08:18 +04:00
cp2 = validate_checkpoint ( sbi , cp_start_blk_no , & cp2_version ) ;
if ( cp1 & & cp2 ) {
if ( ver_after ( cp2_version , cp1_version ) )
cur_page = cp2 ;
else
cur_page = cp1 ;
} else if ( cp1 ) {
cur_page = cp1 ;
} else if ( cp2 ) {
cur_page = cp2 ;
} else {
goto fail_no_cp ;
}
cp_block = ( struct f2fs_checkpoint * ) page_address ( cur_page ) ;
memcpy ( sbi - > ckpt , cp_block , blk_size ) ;
2016-02-17 06:26:32 +03:00
/* Sanity checking of checkpoint */
if ( sanity_check_ckpt ( sbi ) )
2016-12-06 04:25:32 +03:00
goto free_fail_no_cp ;
2016-02-17 06:26:32 +03:00
2016-11-24 23:45:15 +03:00
if ( cur_page = = cp1 )
sbi - > cur_cp_pack = 1 ;
else
sbi - > cur_cp_pack = 2 ;
2016-02-17 06:26:32 +03:00
2014-05-12 07:27:43 +04:00
if ( cp_blks < = 1 )
goto done ;
cp_blk_no = le32_to_cpu ( fsb - > cp_blkaddr ) ;
if ( cur_page = = cp2 )
cp_blk_no + = 1 < < le32_to_cpu ( fsb - > log_blocks_per_seg ) ;
for ( i = 1 ; i < cp_blks ; i + + ) {
void * sit_bitmap_ptr ;
unsigned char * ckpt = ( unsigned char * ) sbi - > ckpt ;
cur_page = get_meta_page ( sbi , cp_blk_no + i ) ;
sit_bitmap_ptr = page_address ( cur_page ) ;
memcpy ( ckpt + i * blk_size , sit_bitmap_ptr , blk_size ) ;
f2fs_put_page ( cur_page , 1 ) ;
}
done :
2012-11-02 12:08:18 +04:00
f2fs_put_page ( cp1 , 1 ) ;
f2fs_put_page ( cp2 , 1 ) ;
return 0 ;
2016-12-06 04:25:32 +03:00
free_fail_no_cp :
f2fs_put_page ( cp1 , 1 ) ;
f2fs_put_page ( cp2 , 1 ) ;
2012-11-02 12:08:18 +04:00
fail_no_cp :
kfree ( sbi - > ckpt ) ;
return - EINVAL ;
}
2015-12-16 08:09:20 +03:00
static void __add_dirty_inode ( struct inode * inode , enum inode_type type )
2012-11-02 12:08:18 +04:00
{
2014-09-03 02:31:18 +04:00
struct f2fs_sb_info * sbi = F2FS_I_SB ( inode ) ;
2015-12-16 08:09:20 +03:00
int flag = ( type = = DIR_INODE ) ? FI_DIRTY_DIR : FI_DIRTY_FILE ;
2012-11-02 12:08:18 +04:00
2016-05-20 20:13:22 +03:00
if ( is_inode_flag_set ( inode , flag ) )
2015-12-15 08:30:45 +03:00
return ;
2014-03-29 07:33:17 +04:00
2016-05-20 20:13:22 +03:00
set_inode_flag ( inode , flag ) ;
2017-03-22 12:23:46 +03:00
if ( ! f2fs_is_volatile_file ( inode ) )
list_add_tail ( & F2FS_I ( inode ) - > dirty_list ,
& sbi - > inode_list [ type ] ) ;
2015-12-17 12:14:44 +03:00
stat_inc_dirty_inode ( sbi , type ) ;
2013-06-05 12:42:45 +04:00
}
2015-12-16 08:09:20 +03:00
static void __remove_dirty_inode ( struct inode * inode , enum inode_type type )
2015-12-15 08:31:40 +03:00
{
2015-12-16 08:09:20 +03:00
int flag = ( type = = DIR_INODE ) ? FI_DIRTY_DIR : FI_DIRTY_FILE ;
2015-12-15 08:31:40 +03:00
2016-05-20 20:13:22 +03:00
if ( get_dirty_pages ( inode ) | | ! is_inode_flag_set ( inode , flag ) )
2015-12-15 08:31:40 +03:00
return ;
2016-05-20 20:13:22 +03:00
list_del_init ( & F2FS_I ( inode ) - > dirty_list ) ;
clear_inode_flag ( inode , flag ) ;
2015-12-17 12:14:44 +03:00
stat_dec_dirty_inode ( F2FS_I_SB ( inode ) , type ) ;
2015-12-15 08:31:40 +03:00
}
2014-09-13 02:53:45 +04:00
void update_dirty_page ( struct inode * inode , struct page * page )
2013-06-05 12:42:45 +04:00
{
2014-09-03 02:31:18 +04:00
struct f2fs_sb_info * sbi = F2FS_I_SB ( inode ) ;
2015-12-16 08:09:20 +03:00
enum inode_type type = S_ISDIR ( inode - > i_mode ) ? DIR_INODE : FILE_INODE ;
2013-06-05 12:42:45 +04:00
2015-06-29 13:14:10 +03:00
if ( ! S_ISDIR ( inode - > i_mode ) & & ! S_ISREG ( inode - > i_mode ) & &
! S_ISLNK ( inode - > i_mode ) )
2012-11-02 12:08:18 +04:00
return ;
2013-10-22 10:52:26 +04:00
2016-06-02 06:55:51 +03:00
spin_lock ( & sbi - > inode_lock [ type ] ) ;
if ( type ! = FILE_INODE | | test_opt ( sbi , DATA_FLUSH ) )
2016-05-16 20:33:40 +03:00
__add_dirty_inode ( inode , type ) ;
2016-05-13 09:57:43 +03:00
inode_inc_dirty_pages ( inode ) ;
2016-06-02 06:55:51 +03:00
spin_unlock ( & sbi - > inode_lock [ type ] ) ;
2014-09-13 02:53:45 +04:00
SetPagePrivate ( page ) ;
2014-12-18 06:58:58 +03:00
f2fs_trace_pid ( page ) ;
2013-06-05 12:42:45 +04:00
}
2015-12-16 08:09:20 +03:00
void remove_dirty_inode ( struct inode * inode )
2012-11-02 12:08:18 +04:00
{
2014-09-03 02:31:18 +04:00
struct f2fs_sb_info * sbi = F2FS_I_SB ( inode ) ;
2015-12-16 08:09:20 +03:00
enum inode_type type = S_ISDIR ( inode - > i_mode ) ? DIR_INODE : FILE_INODE ;
2012-11-02 12:08:18 +04:00
2015-12-16 08:09:20 +03:00
if ( ! S_ISDIR ( inode - > i_mode ) & & ! S_ISREG ( inode - > i_mode ) & &
! S_ISLNK ( inode - > i_mode ) )
2012-11-02 12:08:18 +04:00
return ;
2016-05-16 20:33:40 +03:00
if ( type = = FILE_INODE & & ! test_opt ( sbi , DATA_FLUSH ) )
return ;
2015-12-16 08:09:20 +03:00
spin_lock ( & sbi - > inode_lock [ type ] ) ;
__remove_dirty_inode ( inode , type ) ;
spin_unlock ( & sbi - > inode_lock [ type ] ) ;
2013-05-15 11:40:02 +04:00
}
2015-12-24 13:04:56 +03:00
int sync_dirty_inodes ( struct f2fs_sb_info * sbi , enum inode_type type )
2012-11-02 12:08:18 +04:00
{
2013-11-19 14:03:47 +04:00
struct list_head * head ;
2012-11-02 12:08:18 +04:00
struct inode * inode ;
2015-12-15 08:30:45 +03:00
struct f2fs_inode_info * fi ;
2015-12-17 12:17:16 +03:00
bool is_dir = ( type = = DIR_INODE ) ;
2017-07-14 21:45:21 +03:00
unsigned long ino = 0 ;
2015-12-17 12:17:16 +03:00
trace_f2fs_sync_dirty_inodes_enter ( sbi - > sb , is_dir ,
get_pages ( sbi , is_dir ?
F2FS_DIRTY_DENTS : F2FS_DIRTY_DATA ) ) ;
2012-11-02 12:08:18 +04:00
retry :
2014-10-18 01:14:16 +04:00
if ( unlikely ( f2fs_cp_error ( sbi ) ) )
2015-12-24 13:04:56 +03:00
return - EIO ;
2014-10-18 01:14:16 +04:00
2015-12-16 08:09:20 +03:00
spin_lock ( & sbi - > inode_lock [ type ] ) ;
2013-11-19 14:03:47 +04:00
2015-12-16 08:09:20 +03:00
head = & sbi - > inode_list [ type ] ;
2012-11-02 12:08:18 +04:00
if ( list_empty ( head ) ) {
2015-12-16 08:09:20 +03:00
spin_unlock ( & sbi - > inode_lock [ type ] ) ;
2015-12-17 12:17:16 +03:00
trace_f2fs_sync_dirty_inodes_exit ( sbi - > sb , is_dir ,
get_pages ( sbi , is_dir ?
F2FS_DIRTY_DENTS : F2FS_DIRTY_DATA ) ) ;
2015-12-24 13:04:56 +03:00
return 0 ;
2012-11-02 12:08:18 +04:00
}
2017-01-07 13:49:42 +03:00
fi = list_first_entry ( head , struct f2fs_inode_info , dirty_list ) ;
2015-12-15 08:30:45 +03:00
inode = igrab ( & fi - > vfs_inode ) ;
2015-12-16 08:09:20 +03:00
spin_unlock ( & sbi - > inode_lock [ type ] ) ;
2012-11-02 12:08:18 +04:00
if ( inode ) {
2017-07-14 21:45:21 +03:00
unsigned long cur_ino = inode - > i_ino ;
2017-08-02 18:21:48 +03:00
if ( is_dir )
F2FS_I ( inode ) - > cp_task = current ;
2014-03-18 07:40:49 +04:00
filemap_fdatawrite ( inode - > i_mapping ) ;
2017-08-02 18:21:48 +03:00
if ( is_dir )
F2FS_I ( inode ) - > cp_task = NULL ;
2012-11-02 12:08:18 +04:00
iput ( inode ) ;
2017-07-14 21:45:21 +03:00
/* We need to give cpu to another writers. */
if ( ino = = cur_ino ) {
congestion_wait ( BLK_RW_ASYNC , HZ / 50 ) ;
cond_resched ( ) ;
} else {
ino = cur_ino ;
}
2012-11-02 12:08:18 +04:00
} else {
/*
* We should submit bio , since it exists several
* wribacking dentry pages in the freeing inode .
*/
2017-05-10 21:28:38 +03:00
f2fs_submit_merged_write ( sbi , DATA ) ;
2015-02-27 15:13:14 +03:00
cond_resched ( ) ;
2012-11-02 12:08:18 +04:00
}
goto retry ;
}
2016-05-20 21:10:10 +03:00
int f2fs_sync_inode_meta ( struct f2fs_sb_info * sbi )
{
struct list_head * head = & sbi - > inode_list [ DIRTY_META ] ;
struct inode * inode ;
struct f2fs_inode_info * fi ;
s64 total = get_pages ( sbi , F2FS_DIRTY_IMETA ) ;
while ( total - - ) {
if ( unlikely ( f2fs_cp_error ( sbi ) ) )
return - EIO ;
spin_lock ( & sbi - > inode_lock [ DIRTY_META ] ) ;
if ( list_empty ( head ) ) {
spin_unlock ( & sbi - > inode_lock [ DIRTY_META ] ) ;
return 0 ;
}
2017-01-07 13:49:42 +03:00
fi = list_first_entry ( head , struct f2fs_inode_info ,
2016-05-20 21:10:10 +03:00
gdirty_list ) ;
inode = igrab ( & fi - > vfs_inode ) ;
spin_unlock ( & sbi - > inode_lock [ DIRTY_META ] ) ;
if ( inode ) {
2016-10-20 04:27:56 +03:00
sync_inode_metadata ( inode , 0 ) ;
/* it's on eviction */
if ( is_inode_flag_set ( inode , FI_DIRTY_INODE ) )
update_inode_page ( inode ) ;
2016-05-20 21:10:10 +03:00
iput ( inode ) ;
}
2017-11-02 15:41:01 +03:00
}
2016-05-20 21:10:10 +03:00
return 0 ;
}
2017-03-13 15:22:18 +03:00
static void __prepare_cp_block ( struct f2fs_sb_info * sbi )
{
struct f2fs_checkpoint * ckpt = F2FS_CKPT ( sbi ) ;
struct f2fs_nm_info * nm_i = NM_I ( sbi ) ;
nid_t last_nid = nm_i - > next_scan_nid ;
next_free_nid ( sbi , & last_nid ) ;
ckpt - > valid_block_count = cpu_to_le64 ( valid_user_blocks ( sbi ) ) ;
ckpt - > valid_node_count = cpu_to_le32 ( valid_node_count ( sbi ) ) ;
ckpt - > valid_inode_count = cpu_to_le32 ( valid_inode_count ( sbi ) ) ;
ckpt - > next_free_nid = cpu_to_le32 ( last_nid ) ;
}
2012-11-29 08:28:09 +04:00
/*
2012-11-02 12:08:18 +04:00
* Freeze all the FS - operations for checkpoint .
*/
2014-08-12 05:37:46 +04:00
static int block_operations ( struct f2fs_sb_info * sbi )
2012-11-02 12:08:18 +04:00
{
struct writeback_control wbc = {
. sync_mode = WB_SYNC_ALL ,
. nr_to_write = LONG_MAX ,
. for_reclaim = 0 ,
} ;
f2fs: give a chance to merge IOs by IO scheduler
Previously, background GC submits many 4KB read requests to load victim blocks
and/or its (i)node blocks.
...
f2fs_gc : f2fs_readpage: ino = 1, page_index = 0xb61, blkaddr = 0x3b964ed
f2fs_gc : block_rq_complete: 8,16 R () 499854968 + 8 [0]
f2fs_gc : f2fs_readpage: ino = 1, page_index = 0xb6f, blkaddr = 0x3b964ee
f2fs_gc : block_rq_complete: 8,16 R () 499854976 + 8 [0]
f2fs_gc : f2fs_readpage: ino = 1, page_index = 0xb79, blkaddr = 0x3b964ef
f2fs_gc : block_rq_complete: 8,16 R () 499854984 + 8 [0]
...
However, by the fact that many IOs are sequential, we can give a chance to merge
the IOs by IO scheduler.
In order to do that, let's use blk_plug.
...
f2fs_gc : f2fs_iget: ino = 143
f2fs_gc : f2fs_readpage: ino = 143, page_index = 0x1c6, blkaddr = 0x2e6ee
f2fs_gc : f2fs_iget: ino = 143
f2fs_gc : f2fs_readpage: ino = 143, page_index = 0x1c7, blkaddr = 0x2e6ef
<idle> : block_rq_complete: 8,16 R () 1519616 + 8 [0]
<idle> : block_rq_complete: 8,16 R () 1519848 + 8 [0]
<idle> : block_rq_complete: 8,16 R () 1520432 + 96 [0]
<idle> : block_rq_complete: 8,16 R () 1520536 + 104 [0]
<idle> : block_rq_complete: 8,16 R () 1521008 + 112 [0]
<idle> : block_rq_complete: 8,16 R () 1521440 + 152 [0]
<idle> : block_rq_complete: 8,16 R () 1521688 + 144 [0]
<idle> : block_rq_complete: 8,16 R () 1522128 + 192 [0]
<idle> : block_rq_complete: 8,16 R () 1523256 + 328 [0]
...
Note that this issue should be addressed in checkpoint, and some readahead
flows too.
Reviewed-by: Namjae Jeon <namjae.jeon@samsung.com>
Signed-off-by: Jaegeuk Kim <jaegeuk.kim@samsung.com>
2013-04-24 08:19:56 +04:00
struct blk_plug plug ;
2014-08-12 05:37:46 +04:00
int err = 0 ;
f2fs: give a chance to merge IOs by IO scheduler
Previously, background GC submits many 4KB read requests to load victim blocks
and/or its (i)node blocks.
...
f2fs_gc : f2fs_readpage: ino = 1, page_index = 0xb61, blkaddr = 0x3b964ed
f2fs_gc : block_rq_complete: 8,16 R () 499854968 + 8 [0]
f2fs_gc : f2fs_readpage: ino = 1, page_index = 0xb6f, blkaddr = 0x3b964ee
f2fs_gc : block_rq_complete: 8,16 R () 499854976 + 8 [0]
f2fs_gc : f2fs_readpage: ino = 1, page_index = 0xb79, blkaddr = 0x3b964ef
f2fs_gc : block_rq_complete: 8,16 R () 499854984 + 8 [0]
...
However, by the fact that many IOs are sequential, we can give a chance to merge
the IOs by IO scheduler.
In order to do that, let's use blk_plug.
...
f2fs_gc : f2fs_iget: ino = 143
f2fs_gc : f2fs_readpage: ino = 143, page_index = 0x1c6, blkaddr = 0x2e6ee
f2fs_gc : f2fs_iget: ino = 143
f2fs_gc : f2fs_readpage: ino = 143, page_index = 0x1c7, blkaddr = 0x2e6ef
<idle> : block_rq_complete: 8,16 R () 1519616 + 8 [0]
<idle> : block_rq_complete: 8,16 R () 1519848 + 8 [0]
<idle> : block_rq_complete: 8,16 R () 1520432 + 96 [0]
<idle> : block_rq_complete: 8,16 R () 1520536 + 104 [0]
<idle> : block_rq_complete: 8,16 R () 1521008 + 112 [0]
<idle> : block_rq_complete: 8,16 R () 1521440 + 152 [0]
<idle> : block_rq_complete: 8,16 R () 1521688 + 144 [0]
<idle> : block_rq_complete: 8,16 R () 1522128 + 192 [0]
<idle> : block_rq_complete: 8,16 R () 1523256 + 328 [0]
...
Note that this issue should be addressed in checkpoint, and some readahead
flows too.
Reviewed-by: Namjae Jeon <namjae.jeon@samsung.com>
Signed-off-by: Jaegeuk Kim <jaegeuk.kim@samsung.com>
2013-04-24 08:19:56 +04:00
blk_start_plug ( & plug ) ;
f2fs: introduce a new global lock scheme
In the previous version, f2fs uses global locks according to the usage types,
such as directory operations, block allocation, block write, and so on.
Reference the following lock types in f2fs.h.
enum lock_type {
RENAME, /* for renaming operations */
DENTRY_OPS, /* for directory operations */
DATA_WRITE, /* for data write */
DATA_NEW, /* for data allocation */
DATA_TRUNC, /* for data truncate */
NODE_NEW, /* for node allocation */
NODE_TRUNC, /* for node truncate */
NODE_WRITE, /* for node write */
NR_LOCK_TYPE,
};
In that case, we lose the performance under the multi-threading environment,
since every types of operations must be conducted one at a time.
In order to address the problem, let's share the locks globally with a mutex
array regardless of any types.
So, let users grab a mutex and perform their jobs in parallel as much as
possbile.
For this, I propose a new global lock scheme as follows.
0. Data structure
- f2fs_sb_info -> mutex_lock[NR_GLOBAL_LOCKS]
- f2fs_sb_info -> node_write
1. mutex_lock_op(sbi)
- try to get an avaiable lock from the array.
- returns the index of the gottern lock variable.
2. mutex_unlock_op(sbi, index of the lock)
- unlock the given index of the lock.
3. mutex_lock_all(sbi)
- grab all the locks in the array before the checkpoint.
4. mutex_unlock_all(sbi)
- release all the locks in the array after checkpoint.
5. block_operations()
- call mutex_lock_all()
- sync_dirty_dir_inodes()
- grab node_write
- sync_node_pages()
Note that,
the pairs of mutex_lock_op()/mutex_unlock_op() and
mutex_lock_all()/mutex_unlock_all() should be used together.
Signed-off-by: Jaegeuk Kim <jaegeuk.kim@samsung.com>
2012-11-22 11:21:29 +04:00
retry_flush_dents :
f2fs: use rw_sem instead of fs_lock(locks mutex)
The fs_locks is used to block other ops(ex, recovery) when doing checkpoint.
And each other operate routine(besides checkpoint) needs to acquire a fs_lock,
there is a terrible problem here, if these are too many concurrency threads acquiring
fs_lock, so that they will block each other and may lead to some performance problem,
but this is not the phenomenon we want to see.
Though there are some optimization patches introduced to enhance the usage of fs_lock,
but the thorough solution is using a *rw_sem* to replace the fs_lock.
Checkpoint routine takes write_sem, and other ops take read_sem, so that we can block
other ops(ex, recovery) when doing checkpoint, and other ops will not disturb each other,
this can avoid the problem described above completely.
Because of the weakness of rw_sem, the above change may introduce a potential problem
that the checkpoint thread might get starved if other threads are intensively locking
the read semaphore for I/O.(Pointed out by Xu Jin)
In order to avoid this, a wait_list is introduced, the appending read semaphore ops
will be dropped into the wait_list if checkpoint thread is waiting for write semaphore,
and will be waked up when checkpoint thread gives up write semaphore.
Thanks to Kim's previous review and test, and will be very glad to see other guys'
performance tests about this patch.
V2:
-fix the potential starvation problem.
-use more suitable func name suggested by Xu Jin.
Signed-off-by: Gu Zheng <guz.fnst@cn.fujitsu.com>
[Jaegeuk Kim: adjust minor coding standard]
Signed-off-by: Jaegeuk Kim <jaegeuk.kim@samsung.com>
2013-09-27 14:08:30 +04:00
f2fs_lock_all ( sbi ) ;
2012-11-02 12:08:18 +04:00
/* write all the dirty dentry pages */
if ( get_pages ( sbi , F2FS_DIRTY_DENTS ) ) {
f2fs: use rw_sem instead of fs_lock(locks mutex)
The fs_locks is used to block other ops(ex, recovery) when doing checkpoint.
And each other operate routine(besides checkpoint) needs to acquire a fs_lock,
there is a terrible problem here, if these are too many concurrency threads acquiring
fs_lock, so that they will block each other and may lead to some performance problem,
but this is not the phenomenon we want to see.
Though there are some optimization patches introduced to enhance the usage of fs_lock,
but the thorough solution is using a *rw_sem* to replace the fs_lock.
Checkpoint routine takes write_sem, and other ops take read_sem, so that we can block
other ops(ex, recovery) when doing checkpoint, and other ops will not disturb each other,
this can avoid the problem described above completely.
Because of the weakness of rw_sem, the above change may introduce a potential problem
that the checkpoint thread might get starved if other threads are intensively locking
the read semaphore for I/O.(Pointed out by Xu Jin)
In order to avoid this, a wait_list is introduced, the appending read semaphore ops
will be dropped into the wait_list if checkpoint thread is waiting for write semaphore,
and will be waked up when checkpoint thread gives up write semaphore.
Thanks to Kim's previous review and test, and will be very glad to see other guys'
performance tests about this patch.
V2:
-fix the potential starvation problem.
-use more suitable func name suggested by Xu Jin.
Signed-off-by: Gu Zheng <guz.fnst@cn.fujitsu.com>
[Jaegeuk Kim: adjust minor coding standard]
Signed-off-by: Jaegeuk Kim <jaegeuk.kim@samsung.com>
2013-09-27 14:08:30 +04:00
f2fs_unlock_all ( sbi ) ;
2015-12-24 13:04:56 +03:00
err = sync_dirty_inodes ( sbi , DIR_INODE ) ;
if ( err )
2014-08-12 05:37:46 +04:00
goto out ;
2017-04-12 05:15:33 +03:00
cond_resched ( ) ;
f2fs: introduce a new global lock scheme
In the previous version, f2fs uses global locks according to the usage types,
such as directory operations, block allocation, block write, and so on.
Reference the following lock types in f2fs.h.
enum lock_type {
RENAME, /* for renaming operations */
DENTRY_OPS, /* for directory operations */
DATA_WRITE, /* for data write */
DATA_NEW, /* for data allocation */
DATA_TRUNC, /* for data truncate */
NODE_NEW, /* for node allocation */
NODE_TRUNC, /* for node truncate */
NODE_WRITE, /* for node write */
NR_LOCK_TYPE,
};
In that case, we lose the performance under the multi-threading environment,
since every types of operations must be conducted one at a time.
In order to address the problem, let's share the locks globally with a mutex
array regardless of any types.
So, let users grab a mutex and perform their jobs in parallel as much as
possbile.
For this, I propose a new global lock scheme as follows.
0. Data structure
- f2fs_sb_info -> mutex_lock[NR_GLOBAL_LOCKS]
- f2fs_sb_info -> node_write
1. mutex_lock_op(sbi)
- try to get an avaiable lock from the array.
- returns the index of the gottern lock variable.
2. mutex_unlock_op(sbi, index of the lock)
- unlock the given index of the lock.
3. mutex_lock_all(sbi)
- grab all the locks in the array before the checkpoint.
4. mutex_unlock_all(sbi)
- release all the locks in the array after checkpoint.
5. block_operations()
- call mutex_lock_all()
- sync_dirty_dir_inodes()
- grab node_write
- sync_node_pages()
Note that,
the pairs of mutex_lock_op()/mutex_unlock_op() and
mutex_lock_all()/mutex_unlock_all() should be used together.
Signed-off-by: Jaegeuk Kim <jaegeuk.kim@samsung.com>
2012-11-22 11:21:29 +04:00
goto retry_flush_dents ;
2012-11-02 12:08:18 +04:00
}
2017-03-13 15:22:18 +03:00
/*
* POR : we should ensure that there are no dirty node pages
* until finishing nat / sit flush . inode - > i_blocks can be updated .
*/
down_write ( & sbi - > node_change ) ;
2016-05-20 21:10:10 +03:00
if ( get_pages ( sbi , F2FS_DIRTY_IMETA ) ) {
2017-03-13 15:22:18 +03:00
up_write ( & sbi - > node_change ) ;
2016-05-20 21:10:10 +03:00
f2fs_unlock_all ( sbi ) ;
err = f2fs_sync_inode_meta ( sbi ) ;
if ( err )
goto out ;
2017-04-12 05:15:33 +03:00
cond_resched ( ) ;
2016-05-20 21:10:10 +03:00
goto retry_flush_dents ;
}
f2fs: introduce a new global lock scheme
In the previous version, f2fs uses global locks according to the usage types,
such as directory operations, block allocation, block write, and so on.
Reference the following lock types in f2fs.h.
enum lock_type {
RENAME, /* for renaming operations */
DENTRY_OPS, /* for directory operations */
DATA_WRITE, /* for data write */
DATA_NEW, /* for data allocation */
DATA_TRUNC, /* for data truncate */
NODE_NEW, /* for node allocation */
NODE_TRUNC, /* for node truncate */
NODE_WRITE, /* for node write */
NR_LOCK_TYPE,
};
In that case, we lose the performance under the multi-threading environment,
since every types of operations must be conducted one at a time.
In order to address the problem, let's share the locks globally with a mutex
array regardless of any types.
So, let users grab a mutex and perform their jobs in parallel as much as
possbile.
For this, I propose a new global lock scheme as follows.
0. Data structure
- f2fs_sb_info -> mutex_lock[NR_GLOBAL_LOCKS]
- f2fs_sb_info -> node_write
1. mutex_lock_op(sbi)
- try to get an avaiable lock from the array.
- returns the index of the gottern lock variable.
2. mutex_unlock_op(sbi, index of the lock)
- unlock the given index of the lock.
3. mutex_lock_all(sbi)
- grab all the locks in the array before the checkpoint.
4. mutex_unlock_all(sbi)
- release all the locks in the array after checkpoint.
5. block_operations()
- call mutex_lock_all()
- sync_dirty_dir_inodes()
- grab node_write
- sync_node_pages()
Note that,
the pairs of mutex_lock_op()/mutex_unlock_op() and
mutex_lock_all()/mutex_unlock_all() should be used together.
Signed-off-by: Jaegeuk Kim <jaegeuk.kim@samsung.com>
2012-11-22 11:21:29 +04:00
retry_flush_nodes :
2014-07-03 14:58:39 +04:00
down_write ( & sbi - > node_write ) ;
2012-11-02 12:08:18 +04:00
if ( get_pages ( sbi , F2FS_DIRTY_NODES ) ) {
2014-07-03 14:58:39 +04:00
up_write ( & sbi - > node_write ) ;
2017-08-02 18:21:48 +03:00
err = sync_node_pages ( sbi , & wbc , false , FS_CP_NODE_IO ) ;
2015-12-24 13:04:56 +03:00
if ( err ) {
2017-03-13 15:22:18 +03:00
up_write ( & sbi - > node_change ) ;
2014-08-12 05:37:46 +04:00
f2fs_unlock_all ( sbi ) ;
goto out ;
}
2017-04-12 05:15:33 +03:00
cond_resched ( ) ;
f2fs: introduce a new global lock scheme
In the previous version, f2fs uses global locks according to the usage types,
such as directory operations, block allocation, block write, and so on.
Reference the following lock types in f2fs.h.
enum lock_type {
RENAME, /* for renaming operations */
DENTRY_OPS, /* for directory operations */
DATA_WRITE, /* for data write */
DATA_NEW, /* for data allocation */
DATA_TRUNC, /* for data truncate */
NODE_NEW, /* for node allocation */
NODE_TRUNC, /* for node truncate */
NODE_WRITE, /* for node write */
NR_LOCK_TYPE,
};
In that case, we lose the performance under the multi-threading environment,
since every types of operations must be conducted one at a time.
In order to address the problem, let's share the locks globally with a mutex
array regardless of any types.
So, let users grab a mutex and perform their jobs in parallel as much as
possbile.
For this, I propose a new global lock scheme as follows.
0. Data structure
- f2fs_sb_info -> mutex_lock[NR_GLOBAL_LOCKS]
- f2fs_sb_info -> node_write
1. mutex_lock_op(sbi)
- try to get an avaiable lock from the array.
- returns the index of the gottern lock variable.
2. mutex_unlock_op(sbi, index of the lock)
- unlock the given index of the lock.
3. mutex_lock_all(sbi)
- grab all the locks in the array before the checkpoint.
4. mutex_unlock_all(sbi)
- release all the locks in the array after checkpoint.
5. block_operations()
- call mutex_lock_all()
- sync_dirty_dir_inodes()
- grab node_write
- sync_node_pages()
Note that,
the pairs of mutex_lock_op()/mutex_unlock_op() and
mutex_lock_all()/mutex_unlock_all() should be used together.
Signed-off-by: Jaegeuk Kim <jaegeuk.kim@samsung.com>
2012-11-22 11:21:29 +04:00
goto retry_flush_nodes ;
2012-11-02 12:08:18 +04:00
}
2017-03-13 15:22:18 +03:00
/*
* sbi - > node_change is used only for AIO write_begin path which produces
* dirty node blocks and some checkpoint values by block allocation .
*/
__prepare_cp_block ( sbi ) ;
up_write ( & sbi - > node_change ) ;
2014-08-12 05:37:46 +04:00
out :
f2fs: give a chance to merge IOs by IO scheduler
Previously, background GC submits many 4KB read requests to load victim blocks
and/or its (i)node blocks.
...
f2fs_gc : f2fs_readpage: ino = 1, page_index = 0xb61, blkaddr = 0x3b964ed
f2fs_gc : block_rq_complete: 8,16 R () 499854968 + 8 [0]
f2fs_gc : f2fs_readpage: ino = 1, page_index = 0xb6f, blkaddr = 0x3b964ee
f2fs_gc : block_rq_complete: 8,16 R () 499854976 + 8 [0]
f2fs_gc : f2fs_readpage: ino = 1, page_index = 0xb79, blkaddr = 0x3b964ef
f2fs_gc : block_rq_complete: 8,16 R () 499854984 + 8 [0]
...
However, by the fact that many IOs are sequential, we can give a chance to merge
the IOs by IO scheduler.
In order to do that, let's use blk_plug.
...
f2fs_gc : f2fs_iget: ino = 143
f2fs_gc : f2fs_readpage: ino = 143, page_index = 0x1c6, blkaddr = 0x2e6ee
f2fs_gc : f2fs_iget: ino = 143
f2fs_gc : f2fs_readpage: ino = 143, page_index = 0x1c7, blkaddr = 0x2e6ef
<idle> : block_rq_complete: 8,16 R () 1519616 + 8 [0]
<idle> : block_rq_complete: 8,16 R () 1519848 + 8 [0]
<idle> : block_rq_complete: 8,16 R () 1520432 + 96 [0]
<idle> : block_rq_complete: 8,16 R () 1520536 + 104 [0]
<idle> : block_rq_complete: 8,16 R () 1521008 + 112 [0]
<idle> : block_rq_complete: 8,16 R () 1521440 + 152 [0]
<idle> : block_rq_complete: 8,16 R () 1521688 + 144 [0]
<idle> : block_rq_complete: 8,16 R () 1522128 + 192 [0]
<idle> : block_rq_complete: 8,16 R () 1523256 + 328 [0]
...
Note that this issue should be addressed in checkpoint, and some readahead
flows too.
Reviewed-by: Namjae Jeon <namjae.jeon@samsung.com>
Signed-off-by: Jaegeuk Kim <jaegeuk.kim@samsung.com>
2013-04-24 08:19:56 +04:00
blk_finish_plug ( & plug ) ;
2014-08-12 05:37:46 +04:00
return err ;
2012-11-02 12:08:18 +04:00
}
static void unblock_operations ( struct f2fs_sb_info * sbi )
{
2014-07-03 14:58:39 +04:00
up_write ( & sbi - > node_write ) ;
f2fs: use rw_sem instead of fs_lock(locks mutex)
The fs_locks is used to block other ops(ex, recovery) when doing checkpoint.
And each other operate routine(besides checkpoint) needs to acquire a fs_lock,
there is a terrible problem here, if these are too many concurrency threads acquiring
fs_lock, so that they will block each other and may lead to some performance problem,
but this is not the phenomenon we want to see.
Though there are some optimization patches introduced to enhance the usage of fs_lock,
but the thorough solution is using a *rw_sem* to replace the fs_lock.
Checkpoint routine takes write_sem, and other ops take read_sem, so that we can block
other ops(ex, recovery) when doing checkpoint, and other ops will not disturb each other,
this can avoid the problem described above completely.
Because of the weakness of rw_sem, the above change may introduce a potential problem
that the checkpoint thread might get starved if other threads are intensively locking
the read semaphore for I/O.(Pointed out by Xu Jin)
In order to avoid this, a wait_list is introduced, the appending read semaphore ops
will be dropped into the wait_list if checkpoint thread is waiting for write semaphore,
and will be waked up when checkpoint thread gives up write semaphore.
Thanks to Kim's previous review and test, and will be very glad to see other guys'
performance tests about this patch.
V2:
-fix the potential starvation problem.
-use more suitable func name suggested by Xu Jin.
Signed-off-by: Gu Zheng <guz.fnst@cn.fujitsu.com>
[Jaegeuk Kim: adjust minor coding standard]
Signed-off-by: Jaegeuk Kim <jaegeuk.kim@samsung.com>
2013-09-27 14:08:30 +04:00
f2fs_unlock_all ( sbi ) ;
2012-11-02 12:08:18 +04:00
}
2013-11-07 07:48:25 +04:00
static void wait_on_all_pages_writeback ( struct f2fs_sb_info * sbi )
{
DEFINE_WAIT ( wait ) ;
for ( ; ; ) {
prepare_to_wait ( & sbi - > cp_wait , & wait , TASK_UNINTERRUPTIBLE ) ;
f2fs: don't wait writeback for datas during checkpoint
Normally, while committing checkpoint, we will wait on all pages to be
writebacked no matter the page is data or metadata, so in scenario where
there are lots of data IO being submitted with metadata, we may suffer
long latency for waiting writeback during checkpoint.
Indeed, we only care about persistence for pages with metadata, but not
pages with data, as file system consistent are only related to metadate,
so in order to avoid encountering long latency in above scenario, let's
recognize and reference metadata in submitted IOs, wait writeback only
for metadatas.
Signed-off-by: Chao Yu <yuchao0@huawei.com>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
2016-11-16 05:41:20 +03:00
if ( ! get_pages ( sbi , F2FS_WB_CP_DATA ) )
2013-11-07 07:48:25 +04:00
break ;
2016-02-23 07:07:56 +03:00
io_schedule_timeout ( 5 * HZ ) ;
2013-11-07 07:48:25 +04:00
}
finish_wait ( & sbi - > cp_wait , & wait ) ;
}
2016-10-01 03:37:43 +03:00
static void update_ckpt_flags ( struct f2fs_sb_info * sbi , struct cp_control * cpc )
{
unsigned long orphan_num = sbi - > im [ ORPHAN_INO ] . ino_num ;
struct f2fs_checkpoint * ckpt = F2FS_CKPT ( sbi ) ;
f2fs: use spin_{,un}lock_irq{save,restore}
generic/361 reports below warning, this is because: once, there is
someone entering into critical region of sbi.cp_lock, if write_end_io.
f2fs_stop_checkpoint is invoked from an triggered IRQ, we will encounter
deadlock.
So this patch changes to use spin_{,un}lock_irq{save,restore} to create
critical region without IRQ enabled to avoid potential deadlock.
irq event stamp: 83391573
loop: Write error at byte offset 438729728, length 1024.
hardirqs last enabled at (83391573): [<c1809752>] restore_all+0xf/0x65
hardirqs last disabled at (83391572): [<c1809eac>] reschedule_interrupt+0x30/0x3c
loop: Write error at byte offset 438860288, length 1536.
softirqs last enabled at (83389244): [<c180cc4e>] __do_softirq+0x1ae/0x476
softirqs last disabled at (83389237): [<c101ca7c>] do_softirq_own_stack+0x2c/0x40
loop: Write error at byte offset 438990848, length 2048.
================================
WARNING: inconsistent lock state
4.12.0-rc2+ #30 Tainted: G O
--------------------------------
inconsistent {HARDIRQ-ON-W} -> {IN-HARDIRQ-W} usage.
xfs_io/7959 [HC1[1]:SC0[0]:HE0:SE1] takes:
(&(&sbi->cp_lock)->rlock){?.+...}, at: [<f96f96cc>] f2fs_stop_checkpoint+0x1c/0x50 [f2fs]
{HARDIRQ-ON-W} state was registered at:
__lock_acquire+0x527/0x7b0
lock_acquire+0xae/0x220
_raw_spin_lock+0x42/0x50
do_checkpoint+0x165/0x9e0 [f2fs]
write_checkpoint+0x33f/0x740 [f2fs]
__f2fs_sync_fs+0x92/0x1f0 [f2fs]
f2fs_sync_fs+0x12/0x20 [f2fs]
sync_filesystem+0x67/0x80
generic_shutdown_super+0x27/0x100
kill_block_super+0x22/0x50
kill_f2fs_super+0x3a/0x40 [f2fs]
deactivate_locked_super+0x3d/0x70
deactivate_super+0x40/0x60
cleanup_mnt+0x39/0x70
__cleanup_mnt+0x10/0x20
task_work_run+0x69/0x80
exit_to_usermode_loop+0x57/0x85
do_fast_syscall_32+0x18c/0x1b0
entry_SYSENTER_32+0x4c/0x7b
irq event stamp: 1957420
hardirqs last enabled at (1957419): [<c1808f37>] _raw_spin_unlock_irq+0x27/0x50
hardirqs last disabled at (1957420): [<c1809f9c>] call_function_single_interrupt+0x30/0x3c
softirqs last enabled at (1953784): [<c180cc4e>] __do_softirq+0x1ae/0x476
softirqs last disabled at (1953773): [<c101ca7c>] do_softirq_own_stack+0x2c/0x40
other info that might help us debug this:
Possible unsafe locking scenario:
CPU0
----
lock(&(&sbi->cp_lock)->rlock);
<Interrupt>
lock(&(&sbi->cp_lock)->rlock);
*** DEADLOCK ***
2 locks held by xfs_io/7959:
#0: (sb_writers#13){.+.+.+}, at: [<c11fd7ca>] vfs_write+0x16a/0x190
#1: (&sb->s_type->i_mutex_key#16){+.+.+.}, at: [<f96e33f5>] f2fs_file_write_iter+0x25/0x140 [f2fs]
stack backtrace:
CPU: 2 PID: 7959 Comm: xfs_io Tainted: G O 4.12.0-rc2+ #30
Hardware name: innotek GmbH VirtualBox/VirtualBox, BIOS VirtualBox 12/01/2006
Call Trace:
dump_stack+0x5f/0x92
print_usage_bug+0x1d3/0x1dd
? check_usage_backwards+0xe0/0xe0
mark_lock+0x23d/0x280
__lock_acquire+0x699/0x7b0
? __this_cpu_preempt_check+0xf/0x20
? trace_hardirqs_off_caller+0x91/0xe0
lock_acquire+0xae/0x220
? f2fs_stop_checkpoint+0x1c/0x50 [f2fs]
_raw_spin_lock+0x42/0x50
? f2fs_stop_checkpoint+0x1c/0x50 [f2fs]
f2fs_stop_checkpoint+0x1c/0x50 [f2fs]
f2fs_write_end_io+0x147/0x150 [f2fs]
bio_endio+0x7a/0x1e0
blk_update_request+0xad/0x410
blk_mq_end_request+0x16/0x60
lo_complete_rq+0x3c/0x70
__blk_mq_complete_request_remote+0x11/0x20
flush_smp_call_function_queue+0x6d/0x120
? debug_smp_processor_id+0x12/0x20
generic_smp_call_function_single_interrupt+0x12/0x30
smp_call_function_single_interrupt+0x25/0x40
call_function_single_interrupt+0x37/0x3c
EIP: _raw_spin_unlock_irq+0x2d/0x50
EFLAGS: 00000296 CPU: 2
EAX: 00000001 EBX: d2ccc51c ECX: 00000001 EDX: c1aacebd
ESI: 00000000 EDI: 00000000 EBP: c96c9d1c ESP: c96c9d18
DS: 007b ES: 007b FS: 00d8 GS: 0033 SS: 0068
? inherit_task_group.isra.98.part.99+0x6b/0xb0
__add_to_page_cache_locked+0x1d4/0x290
add_to_page_cache_lru+0x38/0xb0
pagecache_get_page+0x8e/0x200
f2fs_write_begin+0x96/0xf00 [f2fs]
? trace_hardirqs_on_caller+0xdd/0x1c0
? current_time+0x17/0x50
? trace_hardirqs_on+0xb/0x10
generic_perform_write+0xa9/0x170
__generic_file_write_iter+0x1a2/0x1f0
? f2fs_preallocate_blocks+0x137/0x160 [f2fs]
f2fs_file_write_iter+0x6e/0x140 [f2fs]
? __lock_acquire+0x429/0x7b0
__vfs_write+0xc1/0x140
vfs_write+0x9b/0x190
SyS_pwrite64+0x63/0xa0
do_fast_syscall_32+0xa1/0x1b0
entry_SYSENTER_32+0x4c/0x7b
EIP: 0xb7786c61
EFLAGS: 00000293 CPU: 2
EAX: ffffffda EBX: 00000003 ECX: 08416000 EDX: 00001000
ESI: 18b24000 EDI: 00000000 EBP: 00000003 ESP: bf9b36b0
DS: 007b ES: 007b FS: 0000 GS: 0033 SS: 007b
Fixes: aaec2b1d1879 ("f2fs: introduce cp_lock to protect updating of ckpt_flags")
Cc: stable@vger.kernel.org
Signed-off-by: Chao Yu <yuchao0@huawei.com>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
2017-07-07 09:10:15 +03:00
unsigned long flags ;
2016-10-01 03:37:43 +03:00
f2fs: use spin_{,un}lock_irq{save,restore}
generic/361 reports below warning, this is because: once, there is
someone entering into critical region of sbi.cp_lock, if write_end_io.
f2fs_stop_checkpoint is invoked from an triggered IRQ, we will encounter
deadlock.
So this patch changes to use spin_{,un}lock_irq{save,restore} to create
critical region without IRQ enabled to avoid potential deadlock.
irq event stamp: 83391573
loop: Write error at byte offset 438729728, length 1024.
hardirqs last enabled at (83391573): [<c1809752>] restore_all+0xf/0x65
hardirqs last disabled at (83391572): [<c1809eac>] reschedule_interrupt+0x30/0x3c
loop: Write error at byte offset 438860288, length 1536.
softirqs last enabled at (83389244): [<c180cc4e>] __do_softirq+0x1ae/0x476
softirqs last disabled at (83389237): [<c101ca7c>] do_softirq_own_stack+0x2c/0x40
loop: Write error at byte offset 438990848, length 2048.
================================
WARNING: inconsistent lock state
4.12.0-rc2+ #30 Tainted: G O
--------------------------------
inconsistent {HARDIRQ-ON-W} -> {IN-HARDIRQ-W} usage.
xfs_io/7959 [HC1[1]:SC0[0]:HE0:SE1] takes:
(&(&sbi->cp_lock)->rlock){?.+...}, at: [<f96f96cc>] f2fs_stop_checkpoint+0x1c/0x50 [f2fs]
{HARDIRQ-ON-W} state was registered at:
__lock_acquire+0x527/0x7b0
lock_acquire+0xae/0x220
_raw_spin_lock+0x42/0x50
do_checkpoint+0x165/0x9e0 [f2fs]
write_checkpoint+0x33f/0x740 [f2fs]
__f2fs_sync_fs+0x92/0x1f0 [f2fs]
f2fs_sync_fs+0x12/0x20 [f2fs]
sync_filesystem+0x67/0x80
generic_shutdown_super+0x27/0x100
kill_block_super+0x22/0x50
kill_f2fs_super+0x3a/0x40 [f2fs]
deactivate_locked_super+0x3d/0x70
deactivate_super+0x40/0x60
cleanup_mnt+0x39/0x70
__cleanup_mnt+0x10/0x20
task_work_run+0x69/0x80
exit_to_usermode_loop+0x57/0x85
do_fast_syscall_32+0x18c/0x1b0
entry_SYSENTER_32+0x4c/0x7b
irq event stamp: 1957420
hardirqs last enabled at (1957419): [<c1808f37>] _raw_spin_unlock_irq+0x27/0x50
hardirqs last disabled at (1957420): [<c1809f9c>] call_function_single_interrupt+0x30/0x3c
softirqs last enabled at (1953784): [<c180cc4e>] __do_softirq+0x1ae/0x476
softirqs last disabled at (1953773): [<c101ca7c>] do_softirq_own_stack+0x2c/0x40
other info that might help us debug this:
Possible unsafe locking scenario:
CPU0
----
lock(&(&sbi->cp_lock)->rlock);
<Interrupt>
lock(&(&sbi->cp_lock)->rlock);
*** DEADLOCK ***
2 locks held by xfs_io/7959:
#0: (sb_writers#13){.+.+.+}, at: [<c11fd7ca>] vfs_write+0x16a/0x190
#1: (&sb->s_type->i_mutex_key#16){+.+.+.}, at: [<f96e33f5>] f2fs_file_write_iter+0x25/0x140 [f2fs]
stack backtrace:
CPU: 2 PID: 7959 Comm: xfs_io Tainted: G O 4.12.0-rc2+ #30
Hardware name: innotek GmbH VirtualBox/VirtualBox, BIOS VirtualBox 12/01/2006
Call Trace:
dump_stack+0x5f/0x92
print_usage_bug+0x1d3/0x1dd
? check_usage_backwards+0xe0/0xe0
mark_lock+0x23d/0x280
__lock_acquire+0x699/0x7b0
? __this_cpu_preempt_check+0xf/0x20
? trace_hardirqs_off_caller+0x91/0xe0
lock_acquire+0xae/0x220
? f2fs_stop_checkpoint+0x1c/0x50 [f2fs]
_raw_spin_lock+0x42/0x50
? f2fs_stop_checkpoint+0x1c/0x50 [f2fs]
f2fs_stop_checkpoint+0x1c/0x50 [f2fs]
f2fs_write_end_io+0x147/0x150 [f2fs]
bio_endio+0x7a/0x1e0
blk_update_request+0xad/0x410
blk_mq_end_request+0x16/0x60
lo_complete_rq+0x3c/0x70
__blk_mq_complete_request_remote+0x11/0x20
flush_smp_call_function_queue+0x6d/0x120
? debug_smp_processor_id+0x12/0x20
generic_smp_call_function_single_interrupt+0x12/0x30
smp_call_function_single_interrupt+0x25/0x40
call_function_single_interrupt+0x37/0x3c
EIP: _raw_spin_unlock_irq+0x2d/0x50
EFLAGS: 00000296 CPU: 2
EAX: 00000001 EBX: d2ccc51c ECX: 00000001 EDX: c1aacebd
ESI: 00000000 EDI: 00000000 EBP: c96c9d1c ESP: c96c9d18
DS: 007b ES: 007b FS: 00d8 GS: 0033 SS: 0068
? inherit_task_group.isra.98.part.99+0x6b/0xb0
__add_to_page_cache_locked+0x1d4/0x290
add_to_page_cache_lru+0x38/0xb0
pagecache_get_page+0x8e/0x200
f2fs_write_begin+0x96/0xf00 [f2fs]
? trace_hardirqs_on_caller+0xdd/0x1c0
? current_time+0x17/0x50
? trace_hardirqs_on+0xb/0x10
generic_perform_write+0xa9/0x170
__generic_file_write_iter+0x1a2/0x1f0
? f2fs_preallocate_blocks+0x137/0x160 [f2fs]
f2fs_file_write_iter+0x6e/0x140 [f2fs]
? __lock_acquire+0x429/0x7b0
__vfs_write+0xc1/0x140
vfs_write+0x9b/0x190
SyS_pwrite64+0x63/0xa0
do_fast_syscall_32+0xa1/0x1b0
entry_SYSENTER_32+0x4c/0x7b
EIP: 0xb7786c61
EFLAGS: 00000293 CPU: 2
EAX: ffffffda EBX: 00000003 ECX: 08416000 EDX: 00001000
ESI: 18b24000 EDI: 00000000 EBP: 00000003 ESP: bf9b36b0
DS: 007b ES: 007b FS: 0000 GS: 0033 SS: 007b
Fixes: aaec2b1d1879 ("f2fs: introduce cp_lock to protect updating of ckpt_flags")
Cc: stable@vger.kernel.org
Signed-off-by: Chao Yu <yuchao0@huawei.com>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
2017-07-07 09:10:15 +03:00
spin_lock_irqsave ( & sbi - > cp_lock , flags ) ;
2016-10-01 03:37:43 +03:00
2017-04-27 15:40:39 +03:00
if ( ( cpc - > reason & CP_UMOUNT ) & &
2017-03-11 16:18:01 +03:00
le32_to_cpu ( ckpt - > cp_pack_total_block_count ) >
2017-02-09 21:38:09 +03:00
sbi - > blocks_per_seg - NM_I ( sbi ) - > nat_bits_blocks )
disable_nat_bits ( sbi , false ) ;
2017-04-28 08:56:08 +03:00
if ( cpc - > reason & CP_TRIMMED )
__set_ckpt_flags ( ckpt , CP_TRIMMED_FLAG ) ;
2017-04-27 15:40:39 +03:00
if ( cpc - > reason & CP_UMOUNT )
2016-10-01 03:37:43 +03:00
__set_ckpt_flags ( ckpt , CP_UMOUNT_FLAG ) ;
else
__clear_ckpt_flags ( ckpt , CP_UMOUNT_FLAG ) ;
2017-04-27 15:40:39 +03:00
if ( cpc - > reason & CP_FASTBOOT )
2016-10-01 03:37:43 +03:00
__set_ckpt_flags ( ckpt , CP_FASTBOOT_FLAG ) ;
else
__clear_ckpt_flags ( ckpt , CP_FASTBOOT_FLAG ) ;
if ( orphan_num )
__set_ckpt_flags ( ckpt , CP_ORPHAN_PRESENT_FLAG ) ;
else
__clear_ckpt_flags ( ckpt , CP_ORPHAN_PRESENT_FLAG ) ;
if ( is_sbi_flag_set ( sbi , SBI_NEED_FSCK ) )
__set_ckpt_flags ( ckpt , CP_FSCK_FLAG ) ;
/* set this flag to activate crc|cp_ver for recovery */
__set_ckpt_flags ( ckpt , CP_CRC_RECOVERY_FLAG ) ;
f2fs: use spin_{,un}lock_irq{save,restore}
generic/361 reports below warning, this is because: once, there is
someone entering into critical region of sbi.cp_lock, if write_end_io.
f2fs_stop_checkpoint is invoked from an triggered IRQ, we will encounter
deadlock.
So this patch changes to use spin_{,un}lock_irq{save,restore} to create
critical region without IRQ enabled to avoid potential deadlock.
irq event stamp: 83391573
loop: Write error at byte offset 438729728, length 1024.
hardirqs last enabled at (83391573): [<c1809752>] restore_all+0xf/0x65
hardirqs last disabled at (83391572): [<c1809eac>] reschedule_interrupt+0x30/0x3c
loop: Write error at byte offset 438860288, length 1536.
softirqs last enabled at (83389244): [<c180cc4e>] __do_softirq+0x1ae/0x476
softirqs last disabled at (83389237): [<c101ca7c>] do_softirq_own_stack+0x2c/0x40
loop: Write error at byte offset 438990848, length 2048.
================================
WARNING: inconsistent lock state
4.12.0-rc2+ #30 Tainted: G O
--------------------------------
inconsistent {HARDIRQ-ON-W} -> {IN-HARDIRQ-W} usage.
xfs_io/7959 [HC1[1]:SC0[0]:HE0:SE1] takes:
(&(&sbi->cp_lock)->rlock){?.+...}, at: [<f96f96cc>] f2fs_stop_checkpoint+0x1c/0x50 [f2fs]
{HARDIRQ-ON-W} state was registered at:
__lock_acquire+0x527/0x7b0
lock_acquire+0xae/0x220
_raw_spin_lock+0x42/0x50
do_checkpoint+0x165/0x9e0 [f2fs]
write_checkpoint+0x33f/0x740 [f2fs]
__f2fs_sync_fs+0x92/0x1f0 [f2fs]
f2fs_sync_fs+0x12/0x20 [f2fs]
sync_filesystem+0x67/0x80
generic_shutdown_super+0x27/0x100
kill_block_super+0x22/0x50
kill_f2fs_super+0x3a/0x40 [f2fs]
deactivate_locked_super+0x3d/0x70
deactivate_super+0x40/0x60
cleanup_mnt+0x39/0x70
__cleanup_mnt+0x10/0x20
task_work_run+0x69/0x80
exit_to_usermode_loop+0x57/0x85
do_fast_syscall_32+0x18c/0x1b0
entry_SYSENTER_32+0x4c/0x7b
irq event stamp: 1957420
hardirqs last enabled at (1957419): [<c1808f37>] _raw_spin_unlock_irq+0x27/0x50
hardirqs last disabled at (1957420): [<c1809f9c>] call_function_single_interrupt+0x30/0x3c
softirqs last enabled at (1953784): [<c180cc4e>] __do_softirq+0x1ae/0x476
softirqs last disabled at (1953773): [<c101ca7c>] do_softirq_own_stack+0x2c/0x40
other info that might help us debug this:
Possible unsafe locking scenario:
CPU0
----
lock(&(&sbi->cp_lock)->rlock);
<Interrupt>
lock(&(&sbi->cp_lock)->rlock);
*** DEADLOCK ***
2 locks held by xfs_io/7959:
#0: (sb_writers#13){.+.+.+}, at: [<c11fd7ca>] vfs_write+0x16a/0x190
#1: (&sb->s_type->i_mutex_key#16){+.+.+.}, at: [<f96e33f5>] f2fs_file_write_iter+0x25/0x140 [f2fs]
stack backtrace:
CPU: 2 PID: 7959 Comm: xfs_io Tainted: G O 4.12.0-rc2+ #30
Hardware name: innotek GmbH VirtualBox/VirtualBox, BIOS VirtualBox 12/01/2006
Call Trace:
dump_stack+0x5f/0x92
print_usage_bug+0x1d3/0x1dd
? check_usage_backwards+0xe0/0xe0
mark_lock+0x23d/0x280
__lock_acquire+0x699/0x7b0
? __this_cpu_preempt_check+0xf/0x20
? trace_hardirqs_off_caller+0x91/0xe0
lock_acquire+0xae/0x220
? f2fs_stop_checkpoint+0x1c/0x50 [f2fs]
_raw_spin_lock+0x42/0x50
? f2fs_stop_checkpoint+0x1c/0x50 [f2fs]
f2fs_stop_checkpoint+0x1c/0x50 [f2fs]
f2fs_write_end_io+0x147/0x150 [f2fs]
bio_endio+0x7a/0x1e0
blk_update_request+0xad/0x410
blk_mq_end_request+0x16/0x60
lo_complete_rq+0x3c/0x70
__blk_mq_complete_request_remote+0x11/0x20
flush_smp_call_function_queue+0x6d/0x120
? debug_smp_processor_id+0x12/0x20
generic_smp_call_function_single_interrupt+0x12/0x30
smp_call_function_single_interrupt+0x25/0x40
call_function_single_interrupt+0x37/0x3c
EIP: _raw_spin_unlock_irq+0x2d/0x50
EFLAGS: 00000296 CPU: 2
EAX: 00000001 EBX: d2ccc51c ECX: 00000001 EDX: c1aacebd
ESI: 00000000 EDI: 00000000 EBP: c96c9d1c ESP: c96c9d18
DS: 007b ES: 007b FS: 00d8 GS: 0033 SS: 0068
? inherit_task_group.isra.98.part.99+0x6b/0xb0
__add_to_page_cache_locked+0x1d4/0x290
add_to_page_cache_lru+0x38/0xb0
pagecache_get_page+0x8e/0x200
f2fs_write_begin+0x96/0xf00 [f2fs]
? trace_hardirqs_on_caller+0xdd/0x1c0
? current_time+0x17/0x50
? trace_hardirqs_on+0xb/0x10
generic_perform_write+0xa9/0x170
__generic_file_write_iter+0x1a2/0x1f0
? f2fs_preallocate_blocks+0x137/0x160 [f2fs]
f2fs_file_write_iter+0x6e/0x140 [f2fs]
? __lock_acquire+0x429/0x7b0
__vfs_write+0xc1/0x140
vfs_write+0x9b/0x190
SyS_pwrite64+0x63/0xa0
do_fast_syscall_32+0xa1/0x1b0
entry_SYSENTER_32+0x4c/0x7b
EIP: 0xb7786c61
EFLAGS: 00000293 CPU: 2
EAX: ffffffda EBX: 00000003 ECX: 08416000 EDX: 00001000
ESI: 18b24000 EDI: 00000000 EBP: 00000003 ESP: bf9b36b0
DS: 007b ES: 007b FS: 0000 GS: 0033 SS: 007b
Fixes: aaec2b1d1879 ("f2fs: introduce cp_lock to protect updating of ckpt_flags")
Cc: stable@vger.kernel.org
Signed-off-by: Chao Yu <yuchao0@huawei.com>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
2017-07-07 09:10:15 +03:00
spin_unlock_irqrestore ( & sbi - > cp_lock , flags ) ;
2016-10-01 03:37:43 +03:00
}
2015-12-23 12:50:30 +03:00
static int do_checkpoint ( struct f2fs_sb_info * sbi , struct cp_control * cpc )
2012-11-02 12:08:18 +04:00
{
struct f2fs_checkpoint * ckpt = F2FS_CKPT ( sbi ) ;
2014-09-12 16:19:48 +04:00
struct f2fs_nm_info * nm_i = NM_I ( sbi ) ;
f2fs: use spin_{,un}lock_irq{save,restore}
generic/361 reports below warning, this is because: once, there is
someone entering into critical region of sbi.cp_lock, if write_end_io.
f2fs_stop_checkpoint is invoked from an triggered IRQ, we will encounter
deadlock.
So this patch changes to use spin_{,un}lock_irq{save,restore} to create
critical region without IRQ enabled to avoid potential deadlock.
irq event stamp: 83391573
loop: Write error at byte offset 438729728, length 1024.
hardirqs last enabled at (83391573): [<c1809752>] restore_all+0xf/0x65
hardirqs last disabled at (83391572): [<c1809eac>] reschedule_interrupt+0x30/0x3c
loop: Write error at byte offset 438860288, length 1536.
softirqs last enabled at (83389244): [<c180cc4e>] __do_softirq+0x1ae/0x476
softirqs last disabled at (83389237): [<c101ca7c>] do_softirq_own_stack+0x2c/0x40
loop: Write error at byte offset 438990848, length 2048.
================================
WARNING: inconsistent lock state
4.12.0-rc2+ #30 Tainted: G O
--------------------------------
inconsistent {HARDIRQ-ON-W} -> {IN-HARDIRQ-W} usage.
xfs_io/7959 [HC1[1]:SC0[0]:HE0:SE1] takes:
(&(&sbi->cp_lock)->rlock){?.+...}, at: [<f96f96cc>] f2fs_stop_checkpoint+0x1c/0x50 [f2fs]
{HARDIRQ-ON-W} state was registered at:
__lock_acquire+0x527/0x7b0
lock_acquire+0xae/0x220
_raw_spin_lock+0x42/0x50
do_checkpoint+0x165/0x9e0 [f2fs]
write_checkpoint+0x33f/0x740 [f2fs]
__f2fs_sync_fs+0x92/0x1f0 [f2fs]
f2fs_sync_fs+0x12/0x20 [f2fs]
sync_filesystem+0x67/0x80
generic_shutdown_super+0x27/0x100
kill_block_super+0x22/0x50
kill_f2fs_super+0x3a/0x40 [f2fs]
deactivate_locked_super+0x3d/0x70
deactivate_super+0x40/0x60
cleanup_mnt+0x39/0x70
__cleanup_mnt+0x10/0x20
task_work_run+0x69/0x80
exit_to_usermode_loop+0x57/0x85
do_fast_syscall_32+0x18c/0x1b0
entry_SYSENTER_32+0x4c/0x7b
irq event stamp: 1957420
hardirqs last enabled at (1957419): [<c1808f37>] _raw_spin_unlock_irq+0x27/0x50
hardirqs last disabled at (1957420): [<c1809f9c>] call_function_single_interrupt+0x30/0x3c
softirqs last enabled at (1953784): [<c180cc4e>] __do_softirq+0x1ae/0x476
softirqs last disabled at (1953773): [<c101ca7c>] do_softirq_own_stack+0x2c/0x40
other info that might help us debug this:
Possible unsafe locking scenario:
CPU0
----
lock(&(&sbi->cp_lock)->rlock);
<Interrupt>
lock(&(&sbi->cp_lock)->rlock);
*** DEADLOCK ***
2 locks held by xfs_io/7959:
#0: (sb_writers#13){.+.+.+}, at: [<c11fd7ca>] vfs_write+0x16a/0x190
#1: (&sb->s_type->i_mutex_key#16){+.+.+.}, at: [<f96e33f5>] f2fs_file_write_iter+0x25/0x140 [f2fs]
stack backtrace:
CPU: 2 PID: 7959 Comm: xfs_io Tainted: G O 4.12.0-rc2+ #30
Hardware name: innotek GmbH VirtualBox/VirtualBox, BIOS VirtualBox 12/01/2006
Call Trace:
dump_stack+0x5f/0x92
print_usage_bug+0x1d3/0x1dd
? check_usage_backwards+0xe0/0xe0
mark_lock+0x23d/0x280
__lock_acquire+0x699/0x7b0
? __this_cpu_preempt_check+0xf/0x20
? trace_hardirqs_off_caller+0x91/0xe0
lock_acquire+0xae/0x220
? f2fs_stop_checkpoint+0x1c/0x50 [f2fs]
_raw_spin_lock+0x42/0x50
? f2fs_stop_checkpoint+0x1c/0x50 [f2fs]
f2fs_stop_checkpoint+0x1c/0x50 [f2fs]
f2fs_write_end_io+0x147/0x150 [f2fs]
bio_endio+0x7a/0x1e0
blk_update_request+0xad/0x410
blk_mq_end_request+0x16/0x60
lo_complete_rq+0x3c/0x70
__blk_mq_complete_request_remote+0x11/0x20
flush_smp_call_function_queue+0x6d/0x120
? debug_smp_processor_id+0x12/0x20
generic_smp_call_function_single_interrupt+0x12/0x30
smp_call_function_single_interrupt+0x25/0x40
call_function_single_interrupt+0x37/0x3c
EIP: _raw_spin_unlock_irq+0x2d/0x50
EFLAGS: 00000296 CPU: 2
EAX: 00000001 EBX: d2ccc51c ECX: 00000001 EDX: c1aacebd
ESI: 00000000 EDI: 00000000 EBP: c96c9d1c ESP: c96c9d18
DS: 007b ES: 007b FS: 00d8 GS: 0033 SS: 0068
? inherit_task_group.isra.98.part.99+0x6b/0xb0
__add_to_page_cache_locked+0x1d4/0x290
add_to_page_cache_lru+0x38/0xb0
pagecache_get_page+0x8e/0x200
f2fs_write_begin+0x96/0xf00 [f2fs]
? trace_hardirqs_on_caller+0xdd/0x1c0
? current_time+0x17/0x50
? trace_hardirqs_on+0xb/0x10
generic_perform_write+0xa9/0x170
__generic_file_write_iter+0x1a2/0x1f0
? f2fs_preallocate_blocks+0x137/0x160 [f2fs]
f2fs_file_write_iter+0x6e/0x140 [f2fs]
? __lock_acquire+0x429/0x7b0
__vfs_write+0xc1/0x140
vfs_write+0x9b/0x190
SyS_pwrite64+0x63/0xa0
do_fast_syscall_32+0xa1/0x1b0
entry_SYSENTER_32+0x4c/0x7b
EIP: 0xb7786c61
EFLAGS: 00000293 CPU: 2
EAX: ffffffda EBX: 00000003 ECX: 08416000 EDX: 00001000
ESI: 18b24000 EDI: 00000000 EBP: 00000003 ESP: bf9b36b0
DS: 007b ES: 007b FS: 0000 GS: 0033 SS: 007b
Fixes: aaec2b1d1879 ("f2fs: introduce cp_lock to protect updating of ckpt_flags")
Cc: stable@vger.kernel.org
Signed-off-by: Chao Yu <yuchao0@huawei.com>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
2017-07-07 09:10:15 +03:00
unsigned long orphan_num = sbi - > im [ ORPHAN_INO ] . ino_num , flags ;
2012-11-02 12:08:18 +04:00
block_t start_blk ;
unsigned int data_sum_blocks , orphan_blocks ;
2013-06-19 15:47:19 +04:00
__u32 crc32 = 0 ;
2012-11-02 12:08:18 +04:00
int i ;
2015-02-26 02:57:20 +03:00
int cp_payload_blks = __cp_payload ( sbi ) ;
2016-01-27 04:57:30 +03:00
struct super_block * sb = sbi - > sb ;
struct curseg_info * seg_i = CURSEG_I ( sbi , CURSEG_HOT_NODE ) ;
u64 kbytes_written ;
2017-09-29 08:59:39 +03:00
int err ;
2012-11-02 12:08:18 +04:00
/* Flush all the NAT/SIT pages */
2014-08-12 05:37:46 +04:00
while ( get_pages ( sbi , F2FS_DIRTY_META ) ) {
2017-08-02 18:21:48 +03:00
sync_meta_pages ( sbi , META , LONG_MAX , FS_CP_META_IO ) ;
2014-08-12 05:37:46 +04:00
if ( unlikely ( f2fs_cp_error ( sbi ) ) )
2015-12-23 12:50:30 +03:00
return - EIO ;
2014-08-12 05:37:46 +04:00
}
2012-11-02 12:08:18 +04:00
/*
* modify checkpoint
* version number is already updated
*/
ckpt - > elapsed_time = cpu_to_le64 ( get_mtime ( sbi ) ) ;
ckpt - > free_segment_count = cpu_to_le32 ( free_segments ( sbi ) ) ;
2014-08-22 12:17:38 +04:00
for ( i = 0 ; i < NR_CURSEG_NODE_TYPE ; i + + ) {
2012-11-02 12:08:18 +04:00
ckpt - > cur_node_segno [ i ] =
cpu_to_le32 ( curseg_segno ( sbi , i + CURSEG_HOT_NODE ) ) ;
ckpt - > cur_node_blkoff [ i ] =
cpu_to_le16 ( curseg_blkoff ( sbi , i + CURSEG_HOT_NODE ) ) ;
ckpt - > alloc_type [ i + CURSEG_HOT_NODE ] =
curseg_alloc_type ( sbi , i + CURSEG_HOT_NODE ) ;
}
2014-08-22 12:17:38 +04:00
for ( i = 0 ; i < NR_CURSEG_DATA_TYPE ; i + + ) {
2012-11-02 12:08:18 +04:00
ckpt - > cur_data_segno [ i ] =
cpu_to_le32 ( curseg_segno ( sbi , i + CURSEG_HOT_DATA ) ) ;
ckpt - > cur_data_blkoff [ i ] =
cpu_to_le16 ( curseg_blkoff ( sbi , i + CURSEG_HOT_DATA ) ) ;
ckpt - > alloc_type [ i + CURSEG_HOT_DATA ] =
curseg_alloc_type ( sbi , i + CURSEG_HOT_DATA ) ;
}
/* 2 cp + n data seg summary + orphan inode blocks */
2014-12-09 09:21:46 +03:00
data_sum_blocks = npages_for_summary_flush ( sbi , false ) ;
f2fs: use spin_{,un}lock_irq{save,restore}
generic/361 reports below warning, this is because: once, there is
someone entering into critical region of sbi.cp_lock, if write_end_io.
f2fs_stop_checkpoint is invoked from an triggered IRQ, we will encounter
deadlock.
So this patch changes to use spin_{,un}lock_irq{save,restore} to create
critical region without IRQ enabled to avoid potential deadlock.
irq event stamp: 83391573
loop: Write error at byte offset 438729728, length 1024.
hardirqs last enabled at (83391573): [<c1809752>] restore_all+0xf/0x65
hardirqs last disabled at (83391572): [<c1809eac>] reschedule_interrupt+0x30/0x3c
loop: Write error at byte offset 438860288, length 1536.
softirqs last enabled at (83389244): [<c180cc4e>] __do_softirq+0x1ae/0x476
softirqs last disabled at (83389237): [<c101ca7c>] do_softirq_own_stack+0x2c/0x40
loop: Write error at byte offset 438990848, length 2048.
================================
WARNING: inconsistent lock state
4.12.0-rc2+ #30 Tainted: G O
--------------------------------
inconsistent {HARDIRQ-ON-W} -> {IN-HARDIRQ-W} usage.
xfs_io/7959 [HC1[1]:SC0[0]:HE0:SE1] takes:
(&(&sbi->cp_lock)->rlock){?.+...}, at: [<f96f96cc>] f2fs_stop_checkpoint+0x1c/0x50 [f2fs]
{HARDIRQ-ON-W} state was registered at:
__lock_acquire+0x527/0x7b0
lock_acquire+0xae/0x220
_raw_spin_lock+0x42/0x50
do_checkpoint+0x165/0x9e0 [f2fs]
write_checkpoint+0x33f/0x740 [f2fs]
__f2fs_sync_fs+0x92/0x1f0 [f2fs]
f2fs_sync_fs+0x12/0x20 [f2fs]
sync_filesystem+0x67/0x80
generic_shutdown_super+0x27/0x100
kill_block_super+0x22/0x50
kill_f2fs_super+0x3a/0x40 [f2fs]
deactivate_locked_super+0x3d/0x70
deactivate_super+0x40/0x60
cleanup_mnt+0x39/0x70
__cleanup_mnt+0x10/0x20
task_work_run+0x69/0x80
exit_to_usermode_loop+0x57/0x85
do_fast_syscall_32+0x18c/0x1b0
entry_SYSENTER_32+0x4c/0x7b
irq event stamp: 1957420
hardirqs last enabled at (1957419): [<c1808f37>] _raw_spin_unlock_irq+0x27/0x50
hardirqs last disabled at (1957420): [<c1809f9c>] call_function_single_interrupt+0x30/0x3c
softirqs last enabled at (1953784): [<c180cc4e>] __do_softirq+0x1ae/0x476
softirqs last disabled at (1953773): [<c101ca7c>] do_softirq_own_stack+0x2c/0x40
other info that might help us debug this:
Possible unsafe locking scenario:
CPU0
----
lock(&(&sbi->cp_lock)->rlock);
<Interrupt>
lock(&(&sbi->cp_lock)->rlock);
*** DEADLOCK ***
2 locks held by xfs_io/7959:
#0: (sb_writers#13){.+.+.+}, at: [<c11fd7ca>] vfs_write+0x16a/0x190
#1: (&sb->s_type->i_mutex_key#16){+.+.+.}, at: [<f96e33f5>] f2fs_file_write_iter+0x25/0x140 [f2fs]
stack backtrace:
CPU: 2 PID: 7959 Comm: xfs_io Tainted: G O 4.12.0-rc2+ #30
Hardware name: innotek GmbH VirtualBox/VirtualBox, BIOS VirtualBox 12/01/2006
Call Trace:
dump_stack+0x5f/0x92
print_usage_bug+0x1d3/0x1dd
? check_usage_backwards+0xe0/0xe0
mark_lock+0x23d/0x280
__lock_acquire+0x699/0x7b0
? __this_cpu_preempt_check+0xf/0x20
? trace_hardirqs_off_caller+0x91/0xe0
lock_acquire+0xae/0x220
? f2fs_stop_checkpoint+0x1c/0x50 [f2fs]
_raw_spin_lock+0x42/0x50
? f2fs_stop_checkpoint+0x1c/0x50 [f2fs]
f2fs_stop_checkpoint+0x1c/0x50 [f2fs]
f2fs_write_end_io+0x147/0x150 [f2fs]
bio_endio+0x7a/0x1e0
blk_update_request+0xad/0x410
blk_mq_end_request+0x16/0x60
lo_complete_rq+0x3c/0x70
__blk_mq_complete_request_remote+0x11/0x20
flush_smp_call_function_queue+0x6d/0x120
? debug_smp_processor_id+0x12/0x20
generic_smp_call_function_single_interrupt+0x12/0x30
smp_call_function_single_interrupt+0x25/0x40
call_function_single_interrupt+0x37/0x3c
EIP: _raw_spin_unlock_irq+0x2d/0x50
EFLAGS: 00000296 CPU: 2
EAX: 00000001 EBX: d2ccc51c ECX: 00000001 EDX: c1aacebd
ESI: 00000000 EDI: 00000000 EBP: c96c9d1c ESP: c96c9d18
DS: 007b ES: 007b FS: 00d8 GS: 0033 SS: 0068
? inherit_task_group.isra.98.part.99+0x6b/0xb0
__add_to_page_cache_locked+0x1d4/0x290
add_to_page_cache_lru+0x38/0xb0
pagecache_get_page+0x8e/0x200
f2fs_write_begin+0x96/0xf00 [f2fs]
? trace_hardirqs_on_caller+0xdd/0x1c0
? current_time+0x17/0x50
? trace_hardirqs_on+0xb/0x10
generic_perform_write+0xa9/0x170
__generic_file_write_iter+0x1a2/0x1f0
? f2fs_preallocate_blocks+0x137/0x160 [f2fs]
f2fs_file_write_iter+0x6e/0x140 [f2fs]
? __lock_acquire+0x429/0x7b0
__vfs_write+0xc1/0x140
vfs_write+0x9b/0x190
SyS_pwrite64+0x63/0xa0
do_fast_syscall_32+0xa1/0x1b0
entry_SYSENTER_32+0x4c/0x7b
EIP: 0xb7786c61
EFLAGS: 00000293 CPU: 2
EAX: ffffffda EBX: 00000003 ECX: 08416000 EDX: 00001000
ESI: 18b24000 EDI: 00000000 EBP: 00000003 ESP: bf9b36b0
DS: 007b ES: 007b FS: 0000 GS: 0033 SS: 007b
Fixes: aaec2b1d1879 ("f2fs: introduce cp_lock to protect updating of ckpt_flags")
Cc: stable@vger.kernel.org
Signed-off-by: Chao Yu <yuchao0@huawei.com>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
2017-07-07 09:10:15 +03:00
spin_lock_irqsave ( & sbi - > cp_lock , flags ) ;
2014-08-22 12:17:38 +04:00
if ( data_sum_blocks < NR_CURSEG_DATA_TYPE )
2016-09-20 06:04:18 +03:00
__set_ckpt_flags ( ckpt , CP_COMPACT_SUM_FLAG ) ;
2012-11-02 12:08:18 +04:00
else
2016-09-20 06:04:18 +03:00
__clear_ckpt_flags ( ckpt , CP_COMPACT_SUM_FLAG ) ;
f2fs: use spin_{,un}lock_irq{save,restore}
generic/361 reports below warning, this is because: once, there is
someone entering into critical region of sbi.cp_lock, if write_end_io.
f2fs_stop_checkpoint is invoked from an triggered IRQ, we will encounter
deadlock.
So this patch changes to use spin_{,un}lock_irq{save,restore} to create
critical region without IRQ enabled to avoid potential deadlock.
irq event stamp: 83391573
loop: Write error at byte offset 438729728, length 1024.
hardirqs last enabled at (83391573): [<c1809752>] restore_all+0xf/0x65
hardirqs last disabled at (83391572): [<c1809eac>] reschedule_interrupt+0x30/0x3c
loop: Write error at byte offset 438860288, length 1536.
softirqs last enabled at (83389244): [<c180cc4e>] __do_softirq+0x1ae/0x476
softirqs last disabled at (83389237): [<c101ca7c>] do_softirq_own_stack+0x2c/0x40
loop: Write error at byte offset 438990848, length 2048.
================================
WARNING: inconsistent lock state
4.12.0-rc2+ #30 Tainted: G O
--------------------------------
inconsistent {HARDIRQ-ON-W} -> {IN-HARDIRQ-W} usage.
xfs_io/7959 [HC1[1]:SC0[0]:HE0:SE1] takes:
(&(&sbi->cp_lock)->rlock){?.+...}, at: [<f96f96cc>] f2fs_stop_checkpoint+0x1c/0x50 [f2fs]
{HARDIRQ-ON-W} state was registered at:
__lock_acquire+0x527/0x7b0
lock_acquire+0xae/0x220
_raw_spin_lock+0x42/0x50
do_checkpoint+0x165/0x9e0 [f2fs]
write_checkpoint+0x33f/0x740 [f2fs]
__f2fs_sync_fs+0x92/0x1f0 [f2fs]
f2fs_sync_fs+0x12/0x20 [f2fs]
sync_filesystem+0x67/0x80
generic_shutdown_super+0x27/0x100
kill_block_super+0x22/0x50
kill_f2fs_super+0x3a/0x40 [f2fs]
deactivate_locked_super+0x3d/0x70
deactivate_super+0x40/0x60
cleanup_mnt+0x39/0x70
__cleanup_mnt+0x10/0x20
task_work_run+0x69/0x80
exit_to_usermode_loop+0x57/0x85
do_fast_syscall_32+0x18c/0x1b0
entry_SYSENTER_32+0x4c/0x7b
irq event stamp: 1957420
hardirqs last enabled at (1957419): [<c1808f37>] _raw_spin_unlock_irq+0x27/0x50
hardirqs last disabled at (1957420): [<c1809f9c>] call_function_single_interrupt+0x30/0x3c
softirqs last enabled at (1953784): [<c180cc4e>] __do_softirq+0x1ae/0x476
softirqs last disabled at (1953773): [<c101ca7c>] do_softirq_own_stack+0x2c/0x40
other info that might help us debug this:
Possible unsafe locking scenario:
CPU0
----
lock(&(&sbi->cp_lock)->rlock);
<Interrupt>
lock(&(&sbi->cp_lock)->rlock);
*** DEADLOCK ***
2 locks held by xfs_io/7959:
#0: (sb_writers#13){.+.+.+}, at: [<c11fd7ca>] vfs_write+0x16a/0x190
#1: (&sb->s_type->i_mutex_key#16){+.+.+.}, at: [<f96e33f5>] f2fs_file_write_iter+0x25/0x140 [f2fs]
stack backtrace:
CPU: 2 PID: 7959 Comm: xfs_io Tainted: G O 4.12.0-rc2+ #30
Hardware name: innotek GmbH VirtualBox/VirtualBox, BIOS VirtualBox 12/01/2006
Call Trace:
dump_stack+0x5f/0x92
print_usage_bug+0x1d3/0x1dd
? check_usage_backwards+0xe0/0xe0
mark_lock+0x23d/0x280
__lock_acquire+0x699/0x7b0
? __this_cpu_preempt_check+0xf/0x20
? trace_hardirqs_off_caller+0x91/0xe0
lock_acquire+0xae/0x220
? f2fs_stop_checkpoint+0x1c/0x50 [f2fs]
_raw_spin_lock+0x42/0x50
? f2fs_stop_checkpoint+0x1c/0x50 [f2fs]
f2fs_stop_checkpoint+0x1c/0x50 [f2fs]
f2fs_write_end_io+0x147/0x150 [f2fs]
bio_endio+0x7a/0x1e0
blk_update_request+0xad/0x410
blk_mq_end_request+0x16/0x60
lo_complete_rq+0x3c/0x70
__blk_mq_complete_request_remote+0x11/0x20
flush_smp_call_function_queue+0x6d/0x120
? debug_smp_processor_id+0x12/0x20
generic_smp_call_function_single_interrupt+0x12/0x30
smp_call_function_single_interrupt+0x25/0x40
call_function_single_interrupt+0x37/0x3c
EIP: _raw_spin_unlock_irq+0x2d/0x50
EFLAGS: 00000296 CPU: 2
EAX: 00000001 EBX: d2ccc51c ECX: 00000001 EDX: c1aacebd
ESI: 00000000 EDI: 00000000 EBP: c96c9d1c ESP: c96c9d18
DS: 007b ES: 007b FS: 00d8 GS: 0033 SS: 0068
? inherit_task_group.isra.98.part.99+0x6b/0xb0
__add_to_page_cache_locked+0x1d4/0x290
add_to_page_cache_lru+0x38/0xb0
pagecache_get_page+0x8e/0x200
f2fs_write_begin+0x96/0xf00 [f2fs]
? trace_hardirqs_on_caller+0xdd/0x1c0
? current_time+0x17/0x50
? trace_hardirqs_on+0xb/0x10
generic_perform_write+0xa9/0x170
__generic_file_write_iter+0x1a2/0x1f0
? f2fs_preallocate_blocks+0x137/0x160 [f2fs]
f2fs_file_write_iter+0x6e/0x140 [f2fs]
? __lock_acquire+0x429/0x7b0
__vfs_write+0xc1/0x140
vfs_write+0x9b/0x190
SyS_pwrite64+0x63/0xa0
do_fast_syscall_32+0xa1/0x1b0
entry_SYSENTER_32+0x4c/0x7b
EIP: 0xb7786c61
EFLAGS: 00000293 CPU: 2
EAX: ffffffda EBX: 00000003 ECX: 08416000 EDX: 00001000
ESI: 18b24000 EDI: 00000000 EBP: 00000003 ESP: bf9b36b0
DS: 007b ES: 007b FS: 0000 GS: 0033 SS: 007b
Fixes: aaec2b1d1879 ("f2fs: introduce cp_lock to protect updating of ckpt_flags")
Cc: stable@vger.kernel.org
Signed-off-by: Chao Yu <yuchao0@huawei.com>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
2017-07-07 09:10:15 +03:00
spin_unlock_irqrestore ( & sbi - > cp_lock , flags ) ;
2012-11-02 12:08:18 +04:00
2014-11-18 06:18:36 +03:00
orphan_blocks = GET_ORPHAN_BLOCKS ( orphan_num ) ;
2014-05-12 07:27:43 +04:00
ckpt - > cp_pack_start_sum = cpu_to_le32 ( 1 + cp_payload_blks +
orphan_blocks ) ;
2012-11-02 12:08:18 +04:00
2015-01-29 22:45:33 +03:00
if ( __remain_node_summaries ( cpc - > reason ) )
2014-08-22 12:17:38 +04:00
ckpt - > cp_pack_total_block_count = cpu_to_le32 ( F2FS_CP_PACKS +
2014-05-12 07:27:43 +04:00
cp_payload_blks + data_sum_blocks +
orphan_blocks + NR_CURSEG_NODE_TYPE ) ;
2015-01-29 22:45:33 +03:00
else
2014-08-22 12:17:38 +04:00
ckpt - > cp_pack_total_block_count = cpu_to_le32 ( F2FS_CP_PACKS +
2014-05-12 07:27:43 +04:00
cp_payload_blks + data_sum_blocks +
orphan_blocks ) ;
2015-01-29 22:45:33 +03:00
2016-10-01 03:37:43 +03:00
/* update ckpt flag for checkpoint */
update_ckpt_flags ( sbi , cpc ) ;
2016-09-20 03:55:10 +03:00
2012-11-02 12:08:18 +04:00
/* update SIT/NAT bitmap */
get_sit_bitmap ( sbi , __bitmap_ptr ( sbi , SIT_BITMAP ) ) ;
get_nat_bitmap ( sbi , __bitmap_ptr ( sbi , NAT_BITMAP ) ) ;
2016-03-02 23:04:24 +03:00
crc32 = f2fs_crc32 ( sbi , ckpt , le32_to_cpu ( ckpt - > checksum_offset ) ) ;
2013-06-19 15:47:19 +04:00
* ( ( __le32 * ) ( ( unsigned char * ) ckpt +
le32_to_cpu ( ckpt - > checksum_offset ) ) )
2012-11-02 12:08:18 +04:00
= cpu_to_le32 ( crc32 ) ;
2016-11-24 23:45:15 +03:00
start_blk = __start_cp_next_addr ( sbi ) ;
2012-11-02 12:08:18 +04:00
2017-02-09 21:38:09 +03:00
/* write nat bits */
if ( enabled_nat_bits ( sbi , cpc ) ) {
__u64 cp_ver = cur_cp_version ( ckpt ) ;
block_t blk ;
cp_ver | = ( ( __u64 ) crc32 < < 32 ) ;
* ( __le64 * ) nm_i - > nat_bits = cpu_to_le64 ( cp_ver ) ;
blk = start_blk + sbi - > blocks_per_seg - nm_i - > nat_bits_blocks ;
for ( i = 0 ; i < nm_i - > nat_bits_blocks ; i + + )
update_meta_page ( sbi , nm_i - > nat_bits +
( i < < F2FS_BLKSIZE_BITS ) , blk + i ) ;
/* Flush all the NAT BITS pages */
while ( get_pages ( sbi , F2FS_DIRTY_META ) ) {
2017-08-02 18:21:48 +03:00
sync_meta_pages ( sbi , META , LONG_MAX , FS_CP_META_IO ) ;
2017-02-09 21:38:09 +03:00
if ( unlikely ( f2fs_cp_error ( sbi ) ) )
return - EIO ;
}
}
2015-09-17 00:06:54 +03:00
/* need to wait for end_io results */
wait_on_all_pages_writeback ( sbi ) ;
if ( unlikely ( f2fs_cp_error ( sbi ) ) )
2015-12-23 12:50:30 +03:00
return - EIO ;
2015-09-17 00:06:54 +03:00
2017-09-29 08:59:39 +03:00
/* flush all device cache */
err = f2fs_flush_device_cache ( sbi ) ;
if ( err )
return err ;
2012-11-02 12:08:18 +04:00
/* write out checkpoint buffer at block 0 */
2015-05-19 12:40:04 +03:00
update_meta_page ( sbi , ckpt , start_blk + + ) ;
for ( i = 1 ; i < 1 + cp_payload_blks ; i + + )
update_meta_page ( sbi , ( char * ) ckpt + i * F2FS_BLKSIZE ,
start_blk + + ) ;
2014-05-12 07:27:43 +04:00
2014-11-18 06:18:36 +03:00
if ( orphan_num ) {
2012-11-02 12:08:18 +04:00
write_orphan_inodes ( sbi , start_blk ) ;
start_blk + = orphan_blocks ;
}
write_data_summaries ( sbi , start_blk ) ;
start_blk + = data_sum_blocks ;
2016-01-27 04:57:30 +03:00
/* Record write statistics in the hot node summary */
kbytes_written = sbi - > kbytes_written ;
if ( sb - > s_bdev - > bd_part )
kbytes_written + = BD_PART_WRITTEN ( sbi ) ;
f2fs: split journal cache from curseg cache
In curseg cache, f2fs caches two different parts:
- datas of current summay block, i.e. summary entries, footer info.
- journal info, i.e. sparse nat/sit entries or io stat info.
With this approach, 1) it may cause higher lock contention when we access
or update both of the parts of cache since we use the same mutex lock
curseg_mutex to protect the cache. 2) current summary block with last
journal info will be writebacked into device as a normal summary block
when flushing, however, we treat journal info as valid one only in current
summary, so most normal summary blocks contain junk journal data, it wastes
remaining space of summary block.
So, in order to fix above issues, we split curseg cache into two parts:
a) current summary block, protected by original mutex lock curseg_mutex
b) journal cache, protected by newly introduced r/w semaphore journal_rwsem
When loading curseg cache during ->mount, we store summary info and
journal info into different caches; When doing checkpoint, we combine
datas of two cache into current summary block for persisting.
Signed-off-by: Chao Yu <chao2.yu@samsung.com>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
2016-02-19 13:08:46 +03:00
seg_i - > journal - > info . kbytes_written = cpu_to_le64 ( kbytes_written ) ;
2016-01-27 04:57:30 +03:00
2015-01-29 22:45:33 +03:00
if ( __remain_node_summaries ( cpc - > reason ) ) {
2012-11-02 12:08:18 +04:00
write_node_summaries ( sbi , start_blk ) ;
start_blk + = NR_CURSEG_NODE_TYPE ;
}
/* writeout checkpoint block */
2015-05-19 12:40:04 +03:00
update_meta_page ( sbi , ckpt , start_blk ) ;
2012-11-02 12:08:18 +04:00
/* wait for previous submitted node/meta pages writeback */
2013-11-07 07:48:25 +04:00
wait_on_all_pages_writeback ( sbi ) ;
2012-11-02 12:08:18 +04:00
2014-08-12 05:37:46 +04:00
if ( unlikely ( f2fs_cp_error ( sbi ) ) )
2015-12-23 12:50:30 +03:00
return - EIO ;
2014-08-12 05:37:46 +04:00
f2fs: fix incorrect upper bound when iterating inode mapping tree
1. Inode mapping tree can index page in range of [0, ULONG_MAX], however,
in some places, f2fs only search or iterate page in ragne of [0, LONG_MAX],
result in miss hitting in page cache.
2. filemap_fdatawait_range accepts range parameters in unit of bytes, so
the max range it covers should be [0, LLONG_MAX], if we use [0, LONG_MAX]
as range for waiting on writeback, big number of pages will not be covered.
This patch corrects above two issues.
Signed-off-by: Chao Yu <chao2.yu@samsung.com>
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
2016-02-24 12:20:44 +03:00
filemap_fdatawait_range ( NODE_MAPPING ( sbi ) , 0 , LLONG_MAX ) ;
filemap_fdatawait_range ( META_MAPPING ( sbi ) , 0 , LLONG_MAX ) ;
2012-11-02 12:08:18 +04:00
/* update user_block_counts */
sbi - > last_valid_block_count = sbi - > total_valid_block_count ;
2016-05-16 21:06:50 +03:00
percpu_counter_set ( & sbi - > alloc_valid_block_count , 0 ) ;
2012-11-02 12:08:18 +04:00
/* Here, we only have one bio having CP pack */
2017-08-02 18:21:48 +03:00
sync_meta_pages ( sbi , META_FLUSH , LONG_MAX , FS_CP_META_IO ) ;
2012-11-02 12:08:18 +04:00
2014-10-30 00:37:22 +03:00
/* wait for previous submitted meta pages writeback */
wait_on_all_pages_writeback ( sbi ) ;
2016-05-03 08:09:56 +03:00
release_ino_entry ( sbi , false ) ;
2014-08-12 05:37:46 +04:00
if ( unlikely ( f2fs_cp_error ( sbi ) ) )
2015-12-23 12:50:30 +03:00
return - EIO ;
2014-08-12 05:37:46 +04:00
2015-01-28 12:48:42 +03:00
clear_sbi_flag ( sbi , SBI_IS_DIRTY ) ;
2016-08-30 04:23:45 +03:00
clear_sbi_flag ( sbi , SBI_NEED_CP ) ;
2016-11-24 23:45:15 +03:00
__set_cp_next_pack ( sbi ) ;
2015-12-23 12:50:30 +03:00
2016-08-31 05:43:19 +03:00
/*
* redirty superblock if metadata like node page or inode cache is
* updated during writing checkpoint .
*/
if ( get_pages ( sbi , F2FS_DIRTY_NODES ) | |
get_pages ( sbi , F2FS_DIRTY_IMETA ) )
set_sbi_flag ( sbi , SBI_IS_DIRTY ) ;
f2fs_bug_on ( sbi , get_pages ( sbi , F2FS_DIRTY_DENTS ) ) ;
2015-12-23 12:50:30 +03:00
return 0 ;
2012-11-02 12:08:18 +04:00
}
2012-11-29 08:28:09 +04:00
/*
2014-08-06 18:22:50 +04:00
* We guarantee that this checkpoint procedure will not fail .
2012-11-02 12:08:18 +04:00
*/
2015-12-23 12:50:30 +03:00
int write_checkpoint ( struct f2fs_sb_info * sbi , struct cp_control * cpc )
2012-11-02 12:08:18 +04:00
{
struct f2fs_checkpoint * ckpt = F2FS_CKPT ( sbi ) ;
unsigned long long ckpt_ver ;
2015-12-23 12:50:30 +03:00
int err = 0 ;
2012-11-02 12:08:18 +04:00
2013-02-04 10:11:17 +04:00
mutex_lock ( & sbi - > cp_mutex ) ;
2014-08-12 05:37:46 +04:00
2015-01-28 12:48:42 +03:00
if ( ! is_sbi_flag_set ( sbi , SBI_IS_DIRTY ) & &
2017-04-27 15:40:39 +03:00
( ( cpc - > reason & CP_FASTBOOT ) | | ( cpc - > reason & CP_SYNC ) | |
( ( cpc - > reason & CP_DISCARD ) & & ! sbi - > discard_blks ) ) )
2014-08-12 05:37:46 +04:00
goto out ;
2015-12-23 12:50:30 +03:00
if ( unlikely ( f2fs_cp_error ( sbi ) ) ) {
err = - EIO ;
2014-08-12 05:37:46 +04:00
goto out ;
2015-12-23 12:50:30 +03:00
}
if ( f2fs_readonly ( sbi - > sb ) ) {
err = - EROFS ;
2015-01-24 05:43:45 +03:00
goto out ;
2015-12-23 12:50:30 +03:00
}
2015-02-27 10:56:16 +03:00
trace_f2fs_write_checkpoint ( sbi - > sb , cpc - > reason , " start block_ops " ) ;
2015-12-23 12:50:30 +03:00
err = block_operations ( sbi ) ;
if ( err )
2014-08-12 05:37:46 +04:00
goto out ;
2012-11-02 12:08:18 +04:00
2014-09-21 08:57:51 +04:00
trace_f2fs_write_checkpoint ( sbi - > sb , cpc - > reason , " finish block_ops " ) ;
2013-04-23 13:26:54 +04:00
2017-05-10 21:28:38 +03:00
f2fs_flush_merged_writes ( sbi ) ;
2012-11-02 12:08:18 +04:00
2016-08-18 16:01:19 +03:00
/* this is the case of multiple fstrims without any changes */
2017-04-27 15:40:39 +03:00
if ( cpc - > reason & CP_DISCARD ) {
2016-12-30 09:06:15 +03:00
if ( ! exist_trim_candidates ( sbi , cpc ) ) {
unblock_operations ( sbi ) ;
goto out ;
}
2016-12-30 03:58:54 +03:00
if ( NM_I ( sbi ) - > dirty_nat_cnt = = 0 & &
SIT_I ( sbi ) - > dirty_sentries = = 0 & &
prefree_segments ( sbi ) = = 0 ) {
flush_sit_entries ( sbi , cpc ) ;
clear_prefree_segments ( sbi , cpc ) ;
unblock_operations ( sbi ) ;
goto out ;
}
2016-08-18 16:01:19 +03:00
}
2012-11-02 12:08:18 +04:00
/*
* update checkpoint pack index
* Increase the version number so that
* SIT entries and seg summaries are written at correct place
*/
2013-08-09 10:03:21 +04:00
ckpt_ver = cur_cp_version ( ckpt ) ;
2012-11-02 12:08:18 +04:00
ckpt - > checkpoint_ver = cpu_to_le64 ( + + ckpt_ver ) ;
/* write cached NAT/SIT entries to NAT/SIT area */
2017-02-09 21:38:09 +03:00
flush_nat_entries ( sbi , cpc ) ;
2014-09-21 09:06:39 +04:00
flush_sit_entries ( sbi , cpc ) ;
2012-11-02 12:08:18 +04:00
/* unlock all the fs_lock[] in do_checkpoint() */
2015-12-23 12:50:30 +03:00
err = do_checkpoint ( sbi , cpc ) ;
2016-12-30 01:07:53 +03:00
if ( err )
2016-10-11 17:57:00 +03:00
release_discard_addrs ( sbi ) ;
2016-12-30 01:07:53 +03:00
else
2016-10-11 17:57:00 +03:00
clear_prefree_segments ( sbi , cpc ) ;
2016-08-29 18:58:34 +03:00
2012-11-02 12:08:18 +04:00
unblock_operations ( sbi ) ;
2014-02-13 10:12:29 +04:00
stat_inc_cp_count ( sbi - > stat_info ) ;
2015-04-10 03:03:53 +03:00
2017-04-27 15:40:39 +03:00
if ( cpc - > reason & CP_RECOVERY )
2015-04-10 03:03:53 +03:00
f2fs_msg ( sbi - > sb , KERN_NOTICE ,
" checkpoint: version = %llx " , ckpt_ver ) ;
2015-10-06 00:49:57 +03:00
/* do checkpoint periodically */
2016-01-09 02:51:50 +03:00
f2fs_update_time ( sbi , CP_TIME ) ;
2015-12-16 03:07:14 +03:00
trace_f2fs_write_checkpoint ( sbi - > sb , cpc - > reason , " finish checkpoint " ) ;
2014-08-12 05:37:46 +04:00
out :
mutex_unlock ( & sbi - > cp_mutex ) ;
2015-12-23 12:50:30 +03:00
return err ;
2012-11-02 12:08:18 +04:00
}
2014-07-26 02:47:17 +04:00
void init_ino_entry_info ( struct f2fs_sb_info * sbi )
2012-11-02 12:08:18 +04:00
{
2014-07-26 02:47:17 +04:00
int i ;
for ( i = 0 ; i < MAX_INO_ENTRY ; i + + ) {
2014-11-18 06:18:36 +03:00
struct inode_management * im = & sbi - > im [ i ] ;
INIT_RADIX_TREE ( & im - > ino_root , GFP_ATOMIC ) ;
spin_lock_init ( & im - > ino_lock ) ;
INIT_LIST_HEAD ( & im - > ino_list ) ;
im - > ino_num = 0 ;
2014-07-26 02:47:17 +04:00
}
2014-08-22 12:17:38 +04:00
sbi - > max_orphans = ( sbi - > blocks_per_seg - F2FS_CP_PACKS -
2015-02-27 12:38:13 +03:00
NR_CURSEG_TYPE - __cp_payload ( sbi ) ) *
F2FS_ORPHANS_PER_BLOCK ;
2012-11-02 12:08:18 +04:00
}
2013-01-16 19:08:30 +04:00
int __init create_checkpoint_caches ( void )
2012-11-02 12:08:18 +04:00
{
2014-07-26 02:47:17 +04:00
ino_entry_slab = f2fs_kmem_cache_create ( " f2fs_ino_entry " ,
sizeof ( struct ino_entry ) ) ;
if ( ! ino_entry_slab )
2012-11-02 12:08:18 +04:00
return - ENOMEM ;
2014-12-29 10:56:18 +03:00
inode_entry_slab = f2fs_kmem_cache_create ( " f2fs_inode_entry " ,
sizeof ( struct inode_entry ) ) ;
2013-12-06 10:00:58 +04:00
if ( ! inode_entry_slab ) {
2014-07-26 02:47:17 +04:00
kmem_cache_destroy ( ino_entry_slab ) ;
2012-11-02 12:08:18 +04:00
return - ENOMEM ;
}
return 0 ;
}
void destroy_checkpoint_caches ( void )
{
2014-07-26 02:47:17 +04:00
kmem_cache_destroy ( ino_entry_slab ) ;
2012-11-02 12:08:18 +04:00
kmem_cache_destroy ( inode_entry_slab ) ;
}