f2fs: fix to avoid potential race on sbi->unusable_block_count access/update
Use sbi.stat_lock to protect sbi->unusable_block_count accesss/udpate, in order to avoid potential race on it. Signed-off-by: Chao Yu <yuchao0@huawei.com> Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
This commit is contained in:
parent
d764834378
commit
c9c8ed50d9
@ -1536,7 +1536,11 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
|
|||||||
clear_sbi_flag(sbi, SBI_IS_DIRTY);
|
clear_sbi_flag(sbi, SBI_IS_DIRTY);
|
||||||
clear_sbi_flag(sbi, SBI_NEED_CP);
|
clear_sbi_flag(sbi, SBI_NEED_CP);
|
||||||
clear_sbi_flag(sbi, SBI_QUOTA_SKIP_FLUSH);
|
clear_sbi_flag(sbi, SBI_QUOTA_SKIP_FLUSH);
|
||||||
|
|
||||||
|
spin_lock(&sbi->stat_lock);
|
||||||
sbi->unusable_block_count = 0;
|
sbi->unusable_block_count = 0;
|
||||||
|
spin_unlock(&sbi->stat_lock);
|
||||||
|
|
||||||
__set_cp_next_pack(sbi);
|
__set_cp_next_pack(sbi);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -2169,8 +2169,11 @@ static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del)
|
|||||||
* before, we must track that to know how much space we
|
* before, we must track that to know how much space we
|
||||||
* really have.
|
* really have.
|
||||||
*/
|
*/
|
||||||
if (f2fs_test_bit(offset, se->ckpt_valid_map))
|
if (f2fs_test_bit(offset, se->ckpt_valid_map)) {
|
||||||
|
spin_lock(&sbi->stat_lock);
|
||||||
sbi->unusable_block_count++;
|
sbi->unusable_block_count++;
|
||||||
|
spin_unlock(&sbi->stat_lock);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (f2fs_test_and_clear_bit(offset, se->discard_map))
|
if (f2fs_test_and_clear_bit(offset, se->discard_map))
|
||||||
|
@ -1226,10 +1226,13 @@ static int f2fs_statfs(struct dentry *dentry, struct kstatfs *buf)
|
|||||||
buf->f_blocks = total_count - start_count;
|
buf->f_blocks = total_count - start_count;
|
||||||
buf->f_bfree = user_block_count - valid_user_blocks(sbi) -
|
buf->f_bfree = user_block_count - valid_user_blocks(sbi) -
|
||||||
sbi->current_reserved_blocks;
|
sbi->current_reserved_blocks;
|
||||||
|
|
||||||
|
spin_lock(&sbi->stat_lock);
|
||||||
if (unlikely(buf->f_bfree <= sbi->unusable_block_count))
|
if (unlikely(buf->f_bfree <= sbi->unusable_block_count))
|
||||||
buf->f_bfree = 0;
|
buf->f_bfree = 0;
|
||||||
else
|
else
|
||||||
buf->f_bfree -= sbi->unusable_block_count;
|
buf->f_bfree -= sbi->unusable_block_count;
|
||||||
|
spin_unlock(&sbi->stat_lock);
|
||||||
|
|
||||||
if (buf->f_bfree > F2FS_OPTION(sbi).root_reserved_blocks)
|
if (buf->f_bfree > F2FS_OPTION(sbi).root_reserved_blocks)
|
||||||
buf->f_bavail = buf->f_bfree -
|
buf->f_bavail = buf->f_bfree -
|
||||||
@ -1508,7 +1511,10 @@ static int f2fs_disable_checkpoint(struct f2fs_sb_info *sbi)
|
|||||||
if (err)
|
if (err)
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
|
|
||||||
|
spin_lock(&sbi->stat_lock);
|
||||||
sbi->unusable_block_count = 0;
|
sbi->unusable_block_count = 0;
|
||||||
|
spin_unlock(&sbi->stat_lock);
|
||||||
|
|
||||||
out_unlock:
|
out_unlock:
|
||||||
mutex_unlock(&sbi->gc_mutex);
|
mutex_unlock(&sbi->gc_mutex);
|
||||||
restore_flag:
|
restore_flag:
|
||||||
|
Loading…
x
Reference in New Issue
Block a user