btrfs: scrub: remove the old scrub recheck code
The old scrub code has different entrance to verify the content, and since we have removed the writeback path, now we can start removing the re-check part, including: - scrub_recover structure - scrub_sector::recover member - function scrub_setup_recheck_block() - function scrub_recheck_block() - function scrub_recheck_block_checksum() - function scrub_repair_block_group_good_copy() - function scrub_repair_sector_from_good_copy() - function scrub_is_page_on_raid56() - function full_stripe_lock() - function search_full_stripe_lock() - function get_full_stripe_logical() - function insert_full_stripe_lock() - function lock_full_stripe() - function unlock_full_stripe() - btrfs_block_group::full_stripe_locks_root member - btrfs_full_stripe_locks_tree structure This infrastructure is to ensure RAID56 scrub is properly handling recovery and P/Q scrub correctly. This is no longer needed, before P/Q scrub we will wait for all the involved data stripes to be scrubbed first, and RAID56 code has internal lock to ensure no race in the same full stripe. - function scrub_print_warning() - function scrub_get_recover() - function scrub_put_recover() - function scrub_handle_errored_block() - function scrub_setup_recheck_block() - function scrub_bio_wait_endio() - function scrub_submit_raid56_bio_wait() - function scrub_recheck_block_on_raid56() - function scrub_recheck_block() - function scrub_recheck_block_checksum() - function scrub_repair_block_from_good_copy() - function scrub_repair_sector_from_good_copy() And two more functions exported temporarily for later cleanup: - alloc_scrub_sector() - alloc_scrub_block() Signed-off-by: Qu Wenruo <wqu@suse.com> Reviewed-by: David Sterba <dsterba@suse.com> Signed-off-by: David Sterba <dsterba@suse.com>
This commit is contained in:
parent
16f9399349
commit
e9255d6c40
@ -160,15 +160,6 @@ void btrfs_put_block_group(struct btrfs_block_group *cache)
|
||||
btrfs_discard_cancel_work(&cache->fs_info->discard_ctl,
|
||||
cache);
|
||||
|
||||
/*
|
||||
* If not empty, someone is still holding mutex of
|
||||
* full_stripe_lock, which can only be released by caller.
|
||||
* And it will definitely cause use-after-free when caller
|
||||
* tries to release full stripe lock.
|
||||
*
|
||||
* No better way to resolve, but only to warn.
|
||||
*/
|
||||
WARN_ON(!RB_EMPTY_ROOT(&cache->full_stripe_locks_root.root));
|
||||
kfree(cache->free_space_ctl);
|
||||
kfree(cache->physical_map);
|
||||
kfree(cache);
|
||||
@ -2124,8 +2115,6 @@ static struct btrfs_block_group *btrfs_create_block_group_cache(
|
||||
btrfs_init_free_space_ctl(cache, cache->free_space_ctl);
|
||||
atomic_set(&cache->frozen, 0);
|
||||
mutex_init(&cache->free_space_lock);
|
||||
cache->full_stripe_locks_root.root = RB_ROOT;
|
||||
mutex_init(&cache->full_stripe_locks_root.lock);
|
||||
|
||||
return cache;
|
||||
}
|
||||
|
@ -91,14 +91,6 @@ struct btrfs_caching_control {
|
||||
/* Once caching_thread() finds this much free space, it will wake up waiters. */
|
||||
#define CACHING_CTL_WAKE_UP SZ_2M
|
||||
|
||||
/*
|
||||
* Tree to record all locked full stripes of a RAID5/6 block group
|
||||
*/
|
||||
struct btrfs_full_stripe_locks_tree {
|
||||
struct rb_root root;
|
||||
struct mutex lock;
|
||||
};
|
||||
|
||||
struct btrfs_block_group {
|
||||
struct btrfs_fs_info *fs_info;
|
||||
struct inode *inode;
|
||||
@ -229,9 +221,6 @@ struct btrfs_block_group {
|
||||
*/
|
||||
int swap_extents;
|
||||
|
||||
/* Record locked full stripes for RAID5/6 block group */
|
||||
struct btrfs_full_stripe_locks_tree full_stripe_locks_root;
|
||||
|
||||
/*
|
||||
* Allocation offset for the block group to implement sequential
|
||||
* allocation. This is used only on a zoned filesystem.
|
||||
|
997
fs/btrfs/scrub.c
997
fs/btrfs/scrub.c
File diff suppressed because it is too large
Load Diff
@ -16,9 +16,16 @@ int btrfs_scrub_progress(struct btrfs_fs_info *fs_info, u64 devid,
|
||||
/* Temporary declaration, would be deleted later. */
|
||||
struct scrub_ctx;
|
||||
struct scrub_sector;
|
||||
struct scrub_block;
|
||||
int scrub_find_csum(struct scrub_ctx *sctx, u64 logical, u8 *csum);
|
||||
int scrub_add_sector_to_rd_bio(struct scrub_ctx *sctx,
|
||||
struct scrub_sector *sector);
|
||||
void scrub_sector_get(struct scrub_sector *sector);
|
||||
struct scrub_sector *alloc_scrub_sector(struct scrub_block *sblock, u64 logical);
|
||||
struct scrub_block *alloc_scrub_block(struct scrub_ctx *sctx,
|
||||
struct btrfs_device *dev,
|
||||
u64 logical, u64 physical,
|
||||
u64 physical_for_dev_replace,
|
||||
int mirror_num);
|
||||
|
||||
#endif
|
||||
|
Loading…
Reference in New Issue
Block a user