ext4: allow ext4_get_group_info() to fail
[ Upstream commit 5354b2af34064a4579be8bc0e2f15a7b70f14b5f ] Previously, ext4_get_group_info() would treat an invalid group number as BUG(), since in theory it should never happen. However, if a malicious attaker (or fuzzer) modifies the superblock via the block device while it is the file system is mounted, it is possible for s_first_data_block to get set to a very large number. In that case, when calculating the block group of some block number (such as the starting block of a preallocation region), could result in an underflow and very large block group number. Then the BUG_ON check in ext4_get_group_info() would fire, resutling in a denial of service attack that can be triggered by root or someone with write access to the block device. For a quality of implementation perspective, it's best that even if the system administrator does something that they shouldn't, that it will not trigger a BUG. So instead of BUG'ing, ext4_get_group_info() will call ext4_error and return NULL. We also add fallback code in all of the callers of ext4_get_group_info() that it might NULL. Also, since ext4_get_group_info() was already borderline to be an inline function, un-inline it. The results in a next reduction of the compiled text size of ext4 by roughly 2k. Cc: stable@kernel.org Link: https://lore.kernel.org/r/20230430154311.579720-2-tytso@mit.edu Reported-by: syzbot+e2efa3efc15a1c9e95c3@syzkaller.appspotmail.com Link: https://syzkaller.appspot.com/bug?id=69b28112e098b070f639efb356393af3ffec4220 Signed-off-by: Theodore Ts'o <tytso@mit.edu> Reviewed-by: Jan Kara <jack@suse.cz> Signed-off-by: Sasha Levin <sashal@kernel.org>
This commit is contained in:
parent
f12aa035e8
commit
b4319e457d
@ -319,6 +319,22 @@ static ext4_fsblk_t ext4_valid_block_bitmap_padding(struct super_block *sb,
|
|||||||
return (next_zero_bit < bitmap_size ? next_zero_bit : 0);
|
return (next_zero_bit < bitmap_size ? next_zero_bit : 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
struct ext4_group_info *ext4_get_group_info(struct super_block *sb,
|
||||||
|
ext4_group_t group)
|
||||||
|
{
|
||||||
|
struct ext4_group_info **grp_info;
|
||||||
|
long indexv, indexh;
|
||||||
|
|
||||||
|
if (unlikely(group >= EXT4_SB(sb)->s_groups_count)) {
|
||||||
|
ext4_error(sb, "invalid group %u", group);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
indexv = group >> (EXT4_DESC_PER_BLOCK_BITS(sb));
|
||||||
|
indexh = group & ((EXT4_DESC_PER_BLOCK(sb)) - 1);
|
||||||
|
grp_info = sbi_array_rcu_deref(EXT4_SB(sb), s_group_info, indexv);
|
||||||
|
return grp_info[indexh];
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Return the block number which was discovered to be invalid, or 0 if
|
* Return the block number which was discovered to be invalid, or 0 if
|
||||||
* the block bitmap is valid.
|
* the block bitmap is valid.
|
||||||
@ -393,7 +409,7 @@ static int ext4_validate_block_bitmap(struct super_block *sb,
|
|||||||
|
|
||||||
if (buffer_verified(bh))
|
if (buffer_verified(bh))
|
||||||
return 0;
|
return 0;
|
||||||
if (EXT4_MB_GRP_BBITMAP_CORRUPT(grp))
|
if (!grp || EXT4_MB_GRP_BBITMAP_CORRUPT(grp))
|
||||||
return -EFSCORRUPTED;
|
return -EFSCORRUPTED;
|
||||||
|
|
||||||
ext4_lock_group(sb, block_group);
|
ext4_lock_group(sb, block_group);
|
||||||
|
@ -2716,6 +2716,8 @@ extern void ext4_check_blocks_bitmap(struct super_block *);
|
|||||||
extern struct ext4_group_desc * ext4_get_group_desc(struct super_block * sb,
|
extern struct ext4_group_desc * ext4_get_group_desc(struct super_block * sb,
|
||||||
ext4_group_t block_group,
|
ext4_group_t block_group,
|
||||||
struct buffer_head ** bh);
|
struct buffer_head ** bh);
|
||||||
|
extern struct ext4_group_info *ext4_get_group_info(struct super_block *sb,
|
||||||
|
ext4_group_t group);
|
||||||
extern int ext4_should_retry_alloc(struct super_block *sb, int *retries);
|
extern int ext4_should_retry_alloc(struct super_block *sb, int *retries);
|
||||||
|
|
||||||
extern struct buffer_head *ext4_read_block_bitmap_nowait(struct super_block *sb,
|
extern struct buffer_head *ext4_read_block_bitmap_nowait(struct super_block *sb,
|
||||||
@ -3322,19 +3324,6 @@ static inline void ext4_isize_set(struct ext4_inode *raw_inode, loff_t i_size)
|
|||||||
raw_inode->i_size_high = cpu_to_le32(i_size >> 32);
|
raw_inode->i_size_high = cpu_to_le32(i_size >> 32);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline
|
|
||||||
struct ext4_group_info *ext4_get_group_info(struct super_block *sb,
|
|
||||||
ext4_group_t group)
|
|
||||||
{
|
|
||||||
struct ext4_group_info **grp_info;
|
|
||||||
long indexv, indexh;
|
|
||||||
BUG_ON(group >= EXT4_SB(sb)->s_groups_count);
|
|
||||||
indexv = group >> (EXT4_DESC_PER_BLOCK_BITS(sb));
|
|
||||||
indexh = group & ((EXT4_DESC_PER_BLOCK(sb)) - 1);
|
|
||||||
grp_info = sbi_array_rcu_deref(EXT4_SB(sb), s_group_info, indexv);
|
|
||||||
return grp_info[indexh];
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Reading s_groups_count requires using smp_rmb() afterwards. See
|
* Reading s_groups_count requires using smp_rmb() afterwards. See
|
||||||
* the locking protocol documented in the comments of ext4_group_add()
|
* the locking protocol documented in the comments of ext4_group_add()
|
||||||
|
@ -91,7 +91,7 @@ static int ext4_validate_inode_bitmap(struct super_block *sb,
|
|||||||
|
|
||||||
if (buffer_verified(bh))
|
if (buffer_verified(bh))
|
||||||
return 0;
|
return 0;
|
||||||
if (EXT4_MB_GRP_IBITMAP_CORRUPT(grp))
|
if (!grp || EXT4_MB_GRP_IBITMAP_CORRUPT(grp))
|
||||||
return -EFSCORRUPTED;
|
return -EFSCORRUPTED;
|
||||||
|
|
||||||
ext4_lock_group(sb, block_group);
|
ext4_lock_group(sb, block_group);
|
||||||
@ -293,7 +293,7 @@ void ext4_free_inode(handle_t *handle, struct inode *inode)
|
|||||||
}
|
}
|
||||||
if (!(sbi->s_mount_state & EXT4_FC_REPLAY)) {
|
if (!(sbi->s_mount_state & EXT4_FC_REPLAY)) {
|
||||||
grp = ext4_get_group_info(sb, block_group);
|
grp = ext4_get_group_info(sb, block_group);
|
||||||
if (unlikely(EXT4_MB_GRP_IBITMAP_CORRUPT(grp))) {
|
if (!grp || unlikely(EXT4_MB_GRP_IBITMAP_CORRUPT(grp))) {
|
||||||
fatal = -EFSCORRUPTED;
|
fatal = -EFSCORRUPTED;
|
||||||
goto error_return;
|
goto error_return;
|
||||||
}
|
}
|
||||||
@ -1047,7 +1047,7 @@ got_group:
|
|||||||
* Skip groups with already-known suspicious inode
|
* Skip groups with already-known suspicious inode
|
||||||
* tables
|
* tables
|
||||||
*/
|
*/
|
||||||
if (EXT4_MB_GRP_IBITMAP_CORRUPT(grp))
|
if (!grp || EXT4_MB_GRP_IBITMAP_CORRUPT(grp))
|
||||||
goto next_group;
|
goto next_group;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1185,6 +1185,10 @@ got:
|
|||||||
|
|
||||||
if (!(sbi->s_mount_state & EXT4_FC_REPLAY)) {
|
if (!(sbi->s_mount_state & EXT4_FC_REPLAY)) {
|
||||||
grp = ext4_get_group_info(sb, group);
|
grp = ext4_get_group_info(sb, group);
|
||||||
|
if (!grp) {
|
||||||
|
err = -EFSCORRUPTED;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
down_read(&grp->alloc_sem); /*
|
down_read(&grp->alloc_sem); /*
|
||||||
* protect vs itable
|
* protect vs itable
|
||||||
* lazyinit
|
* lazyinit
|
||||||
@ -1528,7 +1532,7 @@ int ext4_init_inode_table(struct super_block *sb, ext4_group_t group,
|
|||||||
}
|
}
|
||||||
|
|
||||||
gdp = ext4_get_group_desc(sb, group, &group_desc_bh);
|
gdp = ext4_get_group_desc(sb, group, &group_desc_bh);
|
||||||
if (!gdp)
|
if (!gdp || !grp)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -745,6 +745,8 @@ static int __mb_check_buddy(struct ext4_buddy *e4b, char *file,
|
|||||||
MB_CHECK_ASSERT(e4b->bd_info->bb_fragments == fragments);
|
MB_CHECK_ASSERT(e4b->bd_info->bb_fragments == fragments);
|
||||||
|
|
||||||
grp = ext4_get_group_info(sb, e4b->bd_group);
|
grp = ext4_get_group_info(sb, e4b->bd_group);
|
||||||
|
if (!grp)
|
||||||
|
return NULL;
|
||||||
list_for_each(cur, &grp->bb_prealloc_list) {
|
list_for_each(cur, &grp->bb_prealloc_list) {
|
||||||
ext4_group_t groupnr;
|
ext4_group_t groupnr;
|
||||||
struct ext4_prealloc_space *pa;
|
struct ext4_prealloc_space *pa;
|
||||||
@ -1060,9 +1062,9 @@ mb_set_largest_free_order(struct super_block *sb, struct ext4_group_info *grp)
|
|||||||
|
|
||||||
static noinline_for_stack
|
static noinline_for_stack
|
||||||
void ext4_mb_generate_buddy(struct super_block *sb,
|
void ext4_mb_generate_buddy(struct super_block *sb,
|
||||||
void *buddy, void *bitmap, ext4_group_t group)
|
void *buddy, void *bitmap, ext4_group_t group,
|
||||||
|
struct ext4_group_info *grp)
|
||||||
{
|
{
|
||||||
struct ext4_group_info *grp = ext4_get_group_info(sb, group);
|
|
||||||
struct ext4_sb_info *sbi = EXT4_SB(sb);
|
struct ext4_sb_info *sbi = EXT4_SB(sb);
|
||||||
ext4_grpblk_t max = EXT4_CLUSTERS_PER_GROUP(sb);
|
ext4_grpblk_t max = EXT4_CLUSTERS_PER_GROUP(sb);
|
||||||
ext4_grpblk_t i = 0;
|
ext4_grpblk_t i = 0;
|
||||||
@ -1183,6 +1185,8 @@ static int ext4_mb_init_cache(struct page *page, char *incore, gfp_t gfp)
|
|||||||
break;
|
break;
|
||||||
|
|
||||||
grinfo = ext4_get_group_info(sb, group);
|
grinfo = ext4_get_group_info(sb, group);
|
||||||
|
if (!grinfo)
|
||||||
|
continue;
|
||||||
/*
|
/*
|
||||||
* If page is uptodate then we came here after online resize
|
* If page is uptodate then we came here after online resize
|
||||||
* which added some new uninitialized group info structs, so
|
* which added some new uninitialized group info structs, so
|
||||||
@ -1248,6 +1252,10 @@ static int ext4_mb_init_cache(struct page *page, char *incore, gfp_t gfp)
|
|||||||
group, page->index, i * blocksize);
|
group, page->index, i * blocksize);
|
||||||
trace_ext4_mb_buddy_bitmap_load(sb, group);
|
trace_ext4_mb_buddy_bitmap_load(sb, group);
|
||||||
grinfo = ext4_get_group_info(sb, group);
|
grinfo = ext4_get_group_info(sb, group);
|
||||||
|
if (!grinfo) {
|
||||||
|
err = -EFSCORRUPTED;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
grinfo->bb_fragments = 0;
|
grinfo->bb_fragments = 0;
|
||||||
memset(grinfo->bb_counters, 0,
|
memset(grinfo->bb_counters, 0,
|
||||||
sizeof(*grinfo->bb_counters) *
|
sizeof(*grinfo->bb_counters) *
|
||||||
@ -1258,7 +1266,7 @@ static int ext4_mb_init_cache(struct page *page, char *incore, gfp_t gfp)
|
|||||||
ext4_lock_group(sb, group);
|
ext4_lock_group(sb, group);
|
||||||
/* init the buddy */
|
/* init the buddy */
|
||||||
memset(data, 0xff, blocksize);
|
memset(data, 0xff, blocksize);
|
||||||
ext4_mb_generate_buddy(sb, data, incore, group);
|
ext4_mb_generate_buddy(sb, data, incore, group, grinfo);
|
||||||
ext4_unlock_group(sb, group);
|
ext4_unlock_group(sb, group);
|
||||||
incore = NULL;
|
incore = NULL;
|
||||||
} else {
|
} else {
|
||||||
@ -1372,6 +1380,9 @@ int ext4_mb_init_group(struct super_block *sb, ext4_group_t group, gfp_t gfp)
|
|||||||
might_sleep();
|
might_sleep();
|
||||||
mb_debug(sb, "init group %u\n", group);
|
mb_debug(sb, "init group %u\n", group);
|
||||||
this_grp = ext4_get_group_info(sb, group);
|
this_grp = ext4_get_group_info(sb, group);
|
||||||
|
if (!this_grp)
|
||||||
|
return -EFSCORRUPTED;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This ensures that we don't reinit the buddy cache
|
* This ensures that we don't reinit the buddy cache
|
||||||
* page which map to the group from which we are already
|
* page which map to the group from which we are already
|
||||||
@ -1446,6 +1457,8 @@ ext4_mb_load_buddy_gfp(struct super_block *sb, ext4_group_t group,
|
|||||||
|
|
||||||
blocks_per_page = PAGE_SIZE / sb->s_blocksize;
|
blocks_per_page = PAGE_SIZE / sb->s_blocksize;
|
||||||
grp = ext4_get_group_info(sb, group);
|
grp = ext4_get_group_info(sb, group);
|
||||||
|
if (!grp)
|
||||||
|
return -EFSCORRUPTED;
|
||||||
|
|
||||||
e4b->bd_blkbits = sb->s_blocksize_bits;
|
e4b->bd_blkbits = sb->s_blocksize_bits;
|
||||||
e4b->bd_info = grp;
|
e4b->bd_info = grp;
|
||||||
@ -2162,6 +2175,8 @@ int ext4_mb_find_by_goal(struct ext4_allocation_context *ac,
|
|||||||
struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group);
|
struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group);
|
||||||
struct ext4_free_extent ex;
|
struct ext4_free_extent ex;
|
||||||
|
|
||||||
|
if (!grp)
|
||||||
|
return -EFSCORRUPTED;
|
||||||
if (!(ac->ac_flags & (EXT4_MB_HINT_TRY_GOAL | EXT4_MB_HINT_GOAL_ONLY)))
|
if (!(ac->ac_flags & (EXT4_MB_HINT_TRY_GOAL | EXT4_MB_HINT_GOAL_ONLY)))
|
||||||
return 0;
|
return 0;
|
||||||
if (grp->bb_free == 0)
|
if (grp->bb_free == 0)
|
||||||
@ -2386,7 +2401,7 @@ static bool ext4_mb_good_group(struct ext4_allocation_context *ac,
|
|||||||
|
|
||||||
BUG_ON(cr < 0 || cr >= 4);
|
BUG_ON(cr < 0 || cr >= 4);
|
||||||
|
|
||||||
if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(grp)))
|
if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(grp) || !grp))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
free = grp->bb_free;
|
free = grp->bb_free;
|
||||||
@ -2455,6 +2470,8 @@ static int ext4_mb_good_group_nolock(struct ext4_allocation_context *ac,
|
|||||||
ext4_grpblk_t free;
|
ext4_grpblk_t free;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
|
if (!grp)
|
||||||
|
return -EFSCORRUPTED;
|
||||||
if (sbi->s_mb_stats)
|
if (sbi->s_mb_stats)
|
||||||
atomic64_inc(&sbi->s_bal_cX_groups_considered[ac->ac_criteria]);
|
atomic64_inc(&sbi->s_bal_cX_groups_considered[ac->ac_criteria]);
|
||||||
if (should_lock) {
|
if (should_lock) {
|
||||||
@ -2535,7 +2552,7 @@ ext4_group_t ext4_mb_prefetch(struct super_block *sb, ext4_group_t group,
|
|||||||
* prefetch once, so we avoid getblk() call, which can
|
* prefetch once, so we avoid getblk() call, which can
|
||||||
* be expensive.
|
* be expensive.
|
||||||
*/
|
*/
|
||||||
if (!EXT4_MB_GRP_TEST_AND_SET_READ(grp) &&
|
if (gdp && grp && !EXT4_MB_GRP_TEST_AND_SET_READ(grp) &&
|
||||||
EXT4_MB_GRP_NEED_INIT(grp) &&
|
EXT4_MB_GRP_NEED_INIT(grp) &&
|
||||||
ext4_free_group_clusters(sb, gdp) > 0 &&
|
ext4_free_group_clusters(sb, gdp) > 0 &&
|
||||||
!(ext4_has_group_desc_csum(sb) &&
|
!(ext4_has_group_desc_csum(sb) &&
|
||||||
@ -2579,7 +2596,7 @@ void ext4_mb_prefetch_fini(struct super_block *sb, ext4_group_t group,
|
|||||||
group--;
|
group--;
|
||||||
grp = ext4_get_group_info(sb, group);
|
grp = ext4_get_group_info(sb, group);
|
||||||
|
|
||||||
if (EXT4_MB_GRP_NEED_INIT(grp) &&
|
if (grp && gdp && EXT4_MB_GRP_NEED_INIT(grp) &&
|
||||||
ext4_free_group_clusters(sb, gdp) > 0 &&
|
ext4_free_group_clusters(sb, gdp) > 0 &&
|
||||||
!(ext4_has_group_desc_csum(sb) &&
|
!(ext4_has_group_desc_csum(sb) &&
|
||||||
(gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)))) {
|
(gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)))) {
|
||||||
@ -2838,6 +2855,8 @@ static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v)
|
|||||||
sizeof(struct ext4_group_info);
|
sizeof(struct ext4_group_info);
|
||||||
|
|
||||||
grinfo = ext4_get_group_info(sb, group);
|
grinfo = ext4_get_group_info(sb, group);
|
||||||
|
if (!grinfo)
|
||||||
|
return 0;
|
||||||
/* Load the group info in memory only if not already loaded. */
|
/* Load the group info in memory only if not already loaded. */
|
||||||
if (unlikely(EXT4_MB_GRP_NEED_INIT(grinfo))) {
|
if (unlikely(EXT4_MB_GRP_NEED_INIT(grinfo))) {
|
||||||
err = ext4_mb_load_buddy(sb, group, &e4b);
|
err = ext4_mb_load_buddy(sb, group, &e4b);
|
||||||
@ -2848,7 +2867,7 @@ static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v)
|
|||||||
buddy_loaded = 1;
|
buddy_loaded = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
memcpy(&sg, ext4_get_group_info(sb, group), i);
|
memcpy(&sg, grinfo, i);
|
||||||
|
|
||||||
if (buddy_loaded)
|
if (buddy_loaded)
|
||||||
ext4_mb_unload_buddy(&e4b);
|
ext4_mb_unload_buddy(&e4b);
|
||||||
@ -3210,8 +3229,12 @@ static int ext4_mb_init_backend(struct super_block *sb)
|
|||||||
|
|
||||||
err_freebuddy:
|
err_freebuddy:
|
||||||
cachep = get_groupinfo_cache(sb->s_blocksize_bits);
|
cachep = get_groupinfo_cache(sb->s_blocksize_bits);
|
||||||
while (i-- > 0)
|
while (i-- > 0) {
|
||||||
kmem_cache_free(cachep, ext4_get_group_info(sb, i));
|
struct ext4_group_info *grp = ext4_get_group_info(sb, i);
|
||||||
|
|
||||||
|
if (grp)
|
||||||
|
kmem_cache_free(cachep, grp);
|
||||||
|
}
|
||||||
i = sbi->s_group_info_size;
|
i = sbi->s_group_info_size;
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
group_info = rcu_dereference(sbi->s_group_info);
|
group_info = rcu_dereference(sbi->s_group_info);
|
||||||
@ -3525,6 +3548,8 @@ int ext4_mb_release(struct super_block *sb)
|
|||||||
for (i = 0; i < ngroups; i++) {
|
for (i = 0; i < ngroups; i++) {
|
||||||
cond_resched();
|
cond_resched();
|
||||||
grinfo = ext4_get_group_info(sb, i);
|
grinfo = ext4_get_group_info(sb, i);
|
||||||
|
if (!grinfo)
|
||||||
|
continue;
|
||||||
mb_group_bb_bitmap_free(grinfo);
|
mb_group_bb_bitmap_free(grinfo);
|
||||||
ext4_lock_group(sb, i);
|
ext4_lock_group(sb, i);
|
||||||
count = ext4_mb_cleanup_pa(grinfo);
|
count = ext4_mb_cleanup_pa(grinfo);
|
||||||
@ -4454,6 +4479,8 @@ static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap,
|
|||||||
struct ext4_free_data *entry;
|
struct ext4_free_data *entry;
|
||||||
|
|
||||||
grp = ext4_get_group_info(sb, group);
|
grp = ext4_get_group_info(sb, group);
|
||||||
|
if (!grp)
|
||||||
|
return;
|
||||||
n = rb_first(&(grp->bb_free_root));
|
n = rb_first(&(grp->bb_free_root));
|
||||||
|
|
||||||
while (n) {
|
while (n) {
|
||||||
@ -4481,6 +4508,9 @@ void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap,
|
|||||||
int preallocated = 0;
|
int preallocated = 0;
|
||||||
int len;
|
int len;
|
||||||
|
|
||||||
|
if (!grp)
|
||||||
|
return;
|
||||||
|
|
||||||
/* all form of preallocation discards first load group,
|
/* all form of preallocation discards first load group,
|
||||||
* so the only competing code is preallocation use.
|
* so the only competing code is preallocation use.
|
||||||
* we don't need any locking here
|
* we don't need any locking here
|
||||||
@ -4672,6 +4702,8 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
|
|||||||
|
|
||||||
ei = EXT4_I(ac->ac_inode);
|
ei = EXT4_I(ac->ac_inode);
|
||||||
grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
|
grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
|
||||||
|
if (!grp)
|
||||||
|
return;
|
||||||
|
|
||||||
pa->pa_obj_lock = &ei->i_prealloc_lock;
|
pa->pa_obj_lock = &ei->i_prealloc_lock;
|
||||||
pa->pa_inode = ac->ac_inode;
|
pa->pa_inode = ac->ac_inode;
|
||||||
@ -4725,6 +4757,8 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
|
|||||||
atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
|
atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
|
||||||
|
|
||||||
grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
|
grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
|
||||||
|
if (!grp)
|
||||||
|
return;
|
||||||
lg = ac->ac_lg;
|
lg = ac->ac_lg;
|
||||||
BUG_ON(lg == NULL);
|
BUG_ON(lg == NULL);
|
||||||
|
|
||||||
@ -4853,6 +4887,8 @@ ext4_mb_discard_group_preallocations(struct super_block *sb,
|
|||||||
int err;
|
int err;
|
||||||
int free = 0;
|
int free = 0;
|
||||||
|
|
||||||
|
if (!grp)
|
||||||
|
return 0;
|
||||||
mb_debug(sb, "discard preallocation for group %u\n", group);
|
mb_debug(sb, "discard preallocation for group %u\n", group);
|
||||||
if (list_empty(&grp->bb_prealloc_list))
|
if (list_empty(&grp->bb_prealloc_list))
|
||||||
goto out_dbg;
|
goto out_dbg;
|
||||||
@ -5090,6 +5126,9 @@ static inline void ext4_mb_show_pa(struct super_block *sb)
|
|||||||
struct ext4_prealloc_space *pa;
|
struct ext4_prealloc_space *pa;
|
||||||
ext4_grpblk_t start;
|
ext4_grpblk_t start;
|
||||||
struct list_head *cur;
|
struct list_head *cur;
|
||||||
|
|
||||||
|
if (!grp)
|
||||||
|
continue;
|
||||||
ext4_lock_group(sb, i);
|
ext4_lock_group(sb, i);
|
||||||
list_for_each(cur, &grp->bb_prealloc_list) {
|
list_for_each(cur, &grp->bb_prealloc_list) {
|
||||||
pa = list_entry(cur, struct ext4_prealloc_space,
|
pa = list_entry(cur, struct ext4_prealloc_space,
|
||||||
@ -5895,6 +5934,7 @@ static void ext4_mb_clear_bb(handle_t *handle, struct inode *inode,
|
|||||||
struct buffer_head *bitmap_bh = NULL;
|
struct buffer_head *bitmap_bh = NULL;
|
||||||
struct super_block *sb = inode->i_sb;
|
struct super_block *sb = inode->i_sb;
|
||||||
struct ext4_group_desc *gdp;
|
struct ext4_group_desc *gdp;
|
||||||
|
struct ext4_group_info *grp;
|
||||||
unsigned int overflow;
|
unsigned int overflow;
|
||||||
ext4_grpblk_t bit;
|
ext4_grpblk_t bit;
|
||||||
struct buffer_head *gd_bh;
|
struct buffer_head *gd_bh;
|
||||||
@ -5920,8 +5960,8 @@ do_more:
|
|||||||
overflow = 0;
|
overflow = 0;
|
||||||
ext4_get_group_no_and_offset(sb, block, &block_group, &bit);
|
ext4_get_group_no_and_offset(sb, block, &block_group, &bit);
|
||||||
|
|
||||||
if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(
|
grp = ext4_get_group_info(sb, block_group);
|
||||||
ext4_get_group_info(sb, block_group))))
|
if (unlikely(!grp || EXT4_MB_GRP_BBITMAP_CORRUPT(grp)))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -6523,6 +6563,8 @@ int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range)
|
|||||||
|
|
||||||
for (group = first_group; group <= last_group; group++) {
|
for (group = first_group; group <= last_group; group++) {
|
||||||
grp = ext4_get_group_info(sb, group);
|
grp = ext4_get_group_info(sb, group);
|
||||||
|
if (!grp)
|
||||||
|
continue;
|
||||||
/* We only do this if the grp has never been initialized */
|
/* We only do this if the grp has never been initialized */
|
||||||
if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) {
|
if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) {
|
||||||
ret = ext4_mb_init_group(sb, group, GFP_NOFS);
|
ret = ext4_mb_init_group(sb, group, GFP_NOFS);
|
||||||
|
@ -1049,6 +1049,8 @@ void ext4_mark_group_bitmap_corrupted(struct super_block *sb,
|
|||||||
struct ext4_group_desc *gdp = ext4_get_group_desc(sb, group, NULL);
|
struct ext4_group_desc *gdp = ext4_get_group_desc(sb, group, NULL);
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
if (!grp || !gdp)
|
||||||
|
return;
|
||||||
if (flags & EXT4_GROUP_INFO_BBITMAP_CORRUPT) {
|
if (flags & EXT4_GROUP_INFO_BBITMAP_CORRUPT) {
|
||||||
ret = ext4_test_and_set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT,
|
ret = ext4_test_and_set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT,
|
||||||
&grp->bb_state);
|
&grp->bb_state);
|
||||||
|
Loading…
x
Reference in New Issue
Block a user