btrfs: scrub: factor out initialization of scrub_block into helper
Although there are only two callers, we are going to add some members for scrub_block in the incoming patches. Factoring out the initialization code will make later expansion easier. One thing to note is, even scrub_handle_errored_block() doesn't utilize scrub_block::refs, we still use alloc_scrub_block() to initialize sblock::ref, allowing us to use scrub_block_put() to do cleanup. Signed-off-by: Qu Wenruo <wqu@suse.com> Signed-off-by: David Sterba <dsterba@suse.com>
This commit is contained in:
parent
1dfa500511
commit
15b88f6d24
@ -202,6 +202,19 @@ struct full_stripe_lock {
|
|||||||
struct mutex mutex;
|
struct mutex mutex;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static struct scrub_block *alloc_scrub_block(struct scrub_ctx *sctx)
|
||||||
|
{
|
||||||
|
struct scrub_block *sblock;
|
||||||
|
|
||||||
|
sblock = kzalloc(sizeof(*sblock), GFP_KERNEL);
|
||||||
|
if (!sblock)
|
||||||
|
return NULL;
|
||||||
|
refcount_set(&sblock->refs, 1);
|
||||||
|
sblock->sctx = sctx;
|
||||||
|
sblock->no_io_error_seen = 1;
|
||||||
|
return sblock;
|
||||||
|
}
|
||||||
|
|
||||||
static int scrub_setup_recheck_block(struct scrub_block *original_sblock,
|
static int scrub_setup_recheck_block(struct scrub_block *original_sblock,
|
||||||
struct scrub_block *sblocks_for_recheck[]);
|
struct scrub_block *sblocks_for_recheck[]);
|
||||||
static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
|
static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
|
||||||
@ -912,8 +925,14 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
|
|||||||
* the statistics.
|
* the statistics.
|
||||||
*/
|
*/
|
||||||
for (mirror_index = 0; mirror_index < BTRFS_MAX_MIRRORS; mirror_index++) {
|
for (mirror_index = 0; mirror_index < BTRFS_MAX_MIRRORS; mirror_index++) {
|
||||||
sblocks_for_recheck[mirror_index] =
|
/*
|
||||||
kzalloc(sizeof(struct scrub_block), GFP_KERNEL);
|
* Note: the two members refs and outstanding_sectors are not
|
||||||
|
* used in the blocks that are used for the recheck procedure.
|
||||||
|
*
|
||||||
|
* But alloc_scrub_block() will initialize sblock::ref anyway,
|
||||||
|
* so we can use scrub_block_put() to clean them up.
|
||||||
|
*/
|
||||||
|
sblocks_for_recheck[mirror_index] = alloc_scrub_block(sctx);
|
||||||
if (!sblocks_for_recheck[mirror_index]) {
|
if (!sblocks_for_recheck[mirror_index]) {
|
||||||
spin_lock(&sctx->stat_lock);
|
spin_lock(&sctx->stat_lock);
|
||||||
sctx->stat.malloc_errors++;
|
sctx->stat.malloc_errors++;
|
||||||
@ -923,14 +942,6 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
|
|||||||
btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
|
btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
/*
|
|
||||||
* Note: the two members refs and outstanding_sectors are not
|
|
||||||
* used in the blocks that are used for the recheck procedure.
|
|
||||||
* But to make the cleanup easier, we just put one ref for each
|
|
||||||
* sblocks.
|
|
||||||
*/
|
|
||||||
refcount_set(&sblocks_for_recheck[mirror_index]->refs, 1);
|
|
||||||
sblocks_for_recheck[mirror_index]->sctx = sctx;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Setup the context, map the logical blocks and alloc the sectors */
|
/* Setup the context, map the logical blocks and alloc the sectors */
|
||||||
@ -2223,7 +2234,7 @@ static int scrub_sectors(struct scrub_ctx *sctx, u64 logical, u32 len,
|
|||||||
const u32 sectorsize = sctx->fs_info->sectorsize;
|
const u32 sectorsize = sctx->fs_info->sectorsize;
|
||||||
int index;
|
int index;
|
||||||
|
|
||||||
sblock = kzalloc(sizeof(*sblock), GFP_KERNEL);
|
sblock = alloc_scrub_block(sctx);
|
||||||
if (!sblock) {
|
if (!sblock) {
|
||||||
spin_lock(&sctx->stat_lock);
|
spin_lock(&sctx->stat_lock);
|
||||||
sctx->stat.malloc_errors++;
|
sctx->stat.malloc_errors++;
|
||||||
@ -2231,12 +2242,6 @@ static int scrub_sectors(struct scrub_ctx *sctx, u64 logical, u32 len,
|
|||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* one ref inside this function, plus one for each page added to
|
|
||||||
* a bio later on */
|
|
||||||
refcount_set(&sblock->refs, 1);
|
|
||||||
sblock->sctx = sctx;
|
|
||||||
sblock->no_io_error_seen = 1;
|
|
||||||
|
|
||||||
for (index = 0; len > 0; index++) {
|
for (index = 0; len > 0; index++) {
|
||||||
struct scrub_sector *sector;
|
struct scrub_sector *sector;
|
||||||
/*
|
/*
|
||||||
@ -2576,7 +2581,7 @@ static int scrub_sectors_for_parity(struct scrub_parity *sparity,
|
|||||||
|
|
||||||
ASSERT(IS_ALIGNED(len, sectorsize));
|
ASSERT(IS_ALIGNED(len, sectorsize));
|
||||||
|
|
||||||
sblock = kzalloc(sizeof(*sblock), GFP_KERNEL);
|
sblock = alloc_scrub_block(sctx);
|
||||||
if (!sblock) {
|
if (!sblock) {
|
||||||
spin_lock(&sctx->stat_lock);
|
spin_lock(&sctx->stat_lock);
|
||||||
sctx->stat.malloc_errors++;
|
sctx->stat.malloc_errors++;
|
||||||
@ -2584,11 +2589,6 @@ static int scrub_sectors_for_parity(struct scrub_parity *sparity,
|
|||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* one ref inside this function, plus one for each page added to
|
|
||||||
* a bio later on */
|
|
||||||
refcount_set(&sblock->refs, 1);
|
|
||||||
sblock->sctx = sctx;
|
|
||||||
sblock->no_io_error_seen = 1;
|
|
||||||
sblock->sparity = sparity;
|
sblock->sparity = sparity;
|
||||||
scrub_parity_get(sparity);
|
scrub_parity_get(sparity);
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user