btrfs: Use btrfs_get_io_geometry appropriately
Presently btrfs_map_block is used not only to do everything necessary to map a bio to the underlying allocation profile but it's also used to identify how much data could be written based on btrfs' stripe logic without actually submitting anything. This is achieved by passing NULL for 'bbio_ret' parameter. This patch refactors all callers that require just the mapping length by switching them to using btrfs_io_geometry instead of calling btrfs_map_block with a special NULL value for 'bbio_ret'. No functional change. Signed-off-by: Nikolay Borisov <nborisov@suse.com> Reviewed-by: David Sterba <dsterba@suse.com> Signed-off-by: David Sterba <dsterba@suse.com>
This commit is contained in:
parent
5f1411265e
commit
89b798ad1b
@ -1932,17 +1932,19 @@ int btrfs_bio_fits_in_stripe(struct page *page, size_t size, struct bio *bio,
|
||||
u64 length = 0;
|
||||
u64 map_length;
|
||||
int ret;
|
||||
struct btrfs_io_geometry geom;
|
||||
|
||||
if (bio_flags & EXTENT_BIO_COMPRESSED)
|
||||
return 0;
|
||||
|
||||
length = bio->bi_iter.bi_size;
|
||||
map_length = length;
|
||||
ret = btrfs_map_block(fs_info, btrfs_op(bio), logical, &map_length,
|
||||
NULL, 0);
|
||||
ret = btrfs_get_io_geometry(fs_info, btrfs_op(bio), logical, map_length,
|
||||
&geom);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
if (map_length < length + size)
|
||||
|
||||
if (geom.len < length + size)
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
@ -8308,22 +8310,21 @@ static int btrfs_submit_direct_hook(struct btrfs_dio_private *dip)
|
||||
struct bio *orig_bio = dip->orig_bio;
|
||||
u64 start_sector = orig_bio->bi_iter.bi_sector;
|
||||
u64 file_offset = dip->logical_offset;
|
||||
u64 map_length;
|
||||
int async_submit = 0;
|
||||
u64 submit_len;
|
||||
int clone_offset = 0;
|
||||
int clone_len;
|
||||
int ret;
|
||||
blk_status_t status;
|
||||
struct btrfs_io_geometry geom;
|
||||
|
||||
map_length = orig_bio->bi_iter.bi_size;
|
||||
submit_len = map_length;
|
||||
ret = btrfs_map_block(fs_info, btrfs_op(orig_bio), start_sector << 9,
|
||||
&map_length, NULL, 0);
|
||||
submit_len = orig_bio->bi_iter.bi_size;
|
||||
ret = btrfs_get_io_geometry(fs_info, btrfs_op(orig_bio),
|
||||
start_sector << 9, submit_len, &geom);
|
||||
if (ret)
|
||||
return -EIO;
|
||||
|
||||
if (map_length >= submit_len) {
|
||||
if (geom.len >= submit_len) {
|
||||
bio = orig_bio;
|
||||
dip->flags |= BTRFS_DIO_ORIG_BIO_SUBMITTED;
|
||||
goto submit;
|
||||
@ -8336,10 +8337,10 @@ static int btrfs_submit_direct_hook(struct btrfs_dio_private *dip)
|
||||
async_submit = 1;
|
||||
|
||||
/* bio split */
|
||||
ASSERT(map_length <= INT_MAX);
|
||||
ASSERT(geom.len <= INT_MAX);
|
||||
atomic_inc(&dip->pending_bios);
|
||||
do {
|
||||
clone_len = min_t(int, submit_len, map_length);
|
||||
clone_len = min_t(int, submit_len, geom.len);
|
||||
|
||||
/*
|
||||
* This will never fail as it's passing GPF_NOFS and
|
||||
@ -8376,9 +8377,8 @@ static int btrfs_submit_direct_hook(struct btrfs_dio_private *dip)
|
||||
start_sector += clone_len >> 9;
|
||||
file_offset += clone_len;
|
||||
|
||||
map_length = submit_len;
|
||||
ret = btrfs_map_block(fs_info, btrfs_op(orig_bio),
|
||||
start_sector << 9, &map_length, NULL, 0);
|
||||
ret = btrfs_get_io_geometry(fs_info, btrfs_op(orig_bio),
|
||||
start_sector << 9, submit_len, &geom);
|
||||
if (ret)
|
||||
goto out_err;
|
||||
} while (submit_len > 0);
|
||||
|
@ -5930,7 +5930,7 @@ static bool need_full_stripe(enum btrfs_map_op op)
|
||||
* usually shouldn't happen unless @logical is corrupted, 0 otherwise.
|
||||
*/
|
||||
int btrfs_get_io_geometry(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
|
||||
u64 logical, u64 len, struct btrfs_io_geometry *io_geom)
|
||||
u64 logical, u64 len, struct btrfs_io_geometry *io_geom)
|
||||
{
|
||||
struct extent_map *em;
|
||||
struct map_lookup *map;
|
||||
@ -6037,78 +6037,30 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info,
|
||||
int patch_the_first_stripe_for_dev_replace = 0;
|
||||
u64 physical_to_patch_in_first_stripe = 0;
|
||||
u64 raid56_full_stripe_start = (u64)-1;
|
||||
struct btrfs_io_geometry geom;
|
||||
|
||||
ASSERT(bbio_ret);
|
||||
|
||||
if (op == BTRFS_MAP_DISCARD)
|
||||
return __btrfs_map_block_for_discard(fs_info, logical,
|
||||
*length, bbio_ret);
|
||||
|
||||
ret = btrfs_get_io_geometry(fs_info, op, logical, *length, &geom);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
em = btrfs_get_chunk_map(fs_info, logical, *length);
|
||||
if (IS_ERR(em))
|
||||
return PTR_ERR(em);
|
||||
|
||||
ASSERT(em);
|
||||
map = em->map_lookup;
|
||||
offset = logical - em->start;
|
||||
|
||||
stripe_len = map->stripe_len;
|
||||
stripe_nr = offset;
|
||||
/*
|
||||
* stripe_nr counts the total number of stripes we have to stride
|
||||
* to get to this block
|
||||
*/
|
||||
stripe_nr = div64_u64(stripe_nr, stripe_len);
|
||||
*length = geom.len;
|
||||
offset = geom.offset;
|
||||
stripe_len = geom.stripe_len;
|
||||
stripe_nr = geom.stripe_nr;
|
||||
stripe_offset = geom.stripe_offset;
|
||||
raid56_full_stripe_start = geom.raid56_stripe_offset;
|
||||
data_stripes = nr_data_stripes(map);
|
||||
|
||||
stripe_offset = stripe_nr * stripe_len;
|
||||
if (offset < stripe_offset) {
|
||||
btrfs_crit(fs_info,
|
||||
"stripe math has gone wrong, stripe_offset=%llu, offset=%llu, start=%llu, logical=%llu, stripe_len=%llu",
|
||||
stripe_offset, offset, em->start, logical,
|
||||
stripe_len);
|
||||
free_extent_map(em);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* stripe_offset is the offset of this block in its stripe*/
|
||||
stripe_offset = offset - stripe_offset;
|
||||
|
||||
/* if we're here for raid56, we need to know the stripe aligned start */
|
||||
if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
|
||||
unsigned long full_stripe_len = stripe_len * data_stripes;
|
||||
raid56_full_stripe_start = offset;
|
||||
|
||||
/* allow a write of a full stripe, but make sure we don't
|
||||
* allow straddling of stripes
|
||||
*/
|
||||
raid56_full_stripe_start = div64_u64(raid56_full_stripe_start,
|
||||
full_stripe_len);
|
||||
raid56_full_stripe_start *= full_stripe_len;
|
||||
}
|
||||
|
||||
if (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
|
||||
u64 max_len;
|
||||
/* For writes to RAID[56], allow a full stripeset across all disks.
|
||||
For other RAID types and for RAID[56] reads, just allow a single
|
||||
stripe (on a single disk). */
|
||||
if ((map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) &&
|
||||
(op == BTRFS_MAP_WRITE)) {
|
||||
max_len = stripe_len * data_stripes -
|
||||
(offset - raid56_full_stripe_start);
|
||||
} else {
|
||||
/* we limit the length of each bio to what fits in a stripe */
|
||||
max_len = stripe_len - stripe_offset;
|
||||
}
|
||||
*length = min_t(u64, em->len - offset, max_len);
|
||||
} else {
|
||||
*length = em->len - offset;
|
||||
}
|
||||
|
||||
/*
|
||||
* This is for when we're called from btrfs_bio_fits_in_stripe and all
|
||||
* it cares about is the length
|
||||
*/
|
||||
if (!bbio_ret)
|
||||
goto out;
|
||||
|
||||
down_read(&dev_replace->rwsem);
|
||||
dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(dev_replace);
|
||||
/*
|
||||
|
@ -429,7 +429,7 @@ int btrfs_map_sblock(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
|
||||
u64 logical, u64 *length,
|
||||
struct btrfs_bio **bbio_ret);
|
||||
int btrfs_get_io_geometry(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
|
||||
u64 logical, u64 len, struct btrfs_io_geometry *io_geom);
|
||||
u64 logical, u64 len, struct btrfs_io_geometry *io_geom);
|
||||
int btrfs_rmap_block(struct btrfs_fs_info *fs_info, u64 chunk_start,
|
||||
u64 physical, u64 **logical, int *naddrs, int *stripe_len);
|
||||
int btrfs_read_sys_array(struct btrfs_fs_info *fs_info);
|
||||
|
Loading…
Reference in New Issue
Block a user