bcachefs: Add offset_into_extent param to bch2_read_extent()
With reflink, we'll no longer be able to calculate the offset of the data we want into the extent we're reading from from the extent pos and the iter pos - we'll have to pass it in separately. Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
This commit is contained in:
parent
a4461c8a7f
commit
06ed855862
@ -1005,7 +1005,6 @@ static void bchfs_read(struct btree_trans *trans, struct btree_iter *iter,
|
||||
struct readpages_iter *readpages_iter)
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
struct bio *bio = &rbio->bio;
|
||||
int flags = BCH_READ_RETRY_IF_STALE|
|
||||
BCH_READ_MAY_PROMOTE;
|
||||
|
||||
@ -1015,9 +1014,10 @@ static void bchfs_read(struct btree_trans *trans, struct btree_iter *iter,
|
||||
while (1) {
|
||||
BKEY_PADDED(k) tmp;
|
||||
struct bkey_s_c k;
|
||||
unsigned bytes;
|
||||
unsigned bytes, offset_into_extent;
|
||||
|
||||
bch2_btree_iter_set_pos(iter, POS(inum, bio->bi_iter.bi_sector));
|
||||
bch2_btree_iter_set_pos(iter,
|
||||
POS(inum, rbio->bio.bi_iter.bi_sector));
|
||||
|
||||
k = bch2_btree_iter_peek_slot(iter);
|
||||
BUG_ON(!k.k);
|
||||
@ -1025,8 +1025,8 @@ static void bchfs_read(struct btree_trans *trans, struct btree_iter *iter,
|
||||
if (IS_ERR(k.k)) {
|
||||
int ret = btree_iter_err(iter);
|
||||
BUG_ON(!ret);
|
||||
bcache_io_error(c, bio, "btree IO error %i", ret);
|
||||
bio_endio(bio);
|
||||
bcache_io_error(c, &rbio->bio, "btree IO error %i", ret);
|
||||
bio_endio(&rbio->bio);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -1034,6 +1034,9 @@ static void bchfs_read(struct btree_trans *trans, struct btree_iter *iter,
|
||||
bch2_trans_unlock(trans);
|
||||
k = bkey_i_to_s_c(&tmp.k);
|
||||
|
||||
offset_into_extent = iter->pos.offset -
|
||||
bkey_start_offset(k.k);
|
||||
|
||||
if (readpages_iter) {
|
||||
bool want_full_extent = false;
|
||||
|
||||
@ -1048,27 +1051,27 @@ static void bchfs_read(struct btree_trans *trans, struct btree_iter *iter,
|
||||
}
|
||||
|
||||
readpage_bio_extend(readpages_iter,
|
||||
bio, k.k->p.offset,
|
||||
&rbio->bio, k.k->p.offset,
|
||||
want_full_extent);
|
||||
}
|
||||
|
||||
bytes = (min_t(u64, k.k->p.offset, bio_end_sector(bio)) -
|
||||
bio->bi_iter.bi_sector) << 9;
|
||||
swap(bio->bi_iter.bi_size, bytes);
|
||||
bytes = min_t(unsigned, bio_sectors(&rbio->bio),
|
||||
(k.k->size - offset_into_extent)) << 9;
|
||||
swap(rbio->bio.bi_iter.bi_size, bytes);
|
||||
|
||||
if (bytes == bio->bi_iter.bi_size)
|
||||
if (rbio->bio.bi_iter.bi_size == bytes)
|
||||
flags |= BCH_READ_LAST_FRAGMENT;
|
||||
|
||||
if (bkey_extent_is_allocation(k.k))
|
||||
bch2_add_page_sectors(bio, k);
|
||||
bch2_add_page_sectors(&rbio->bio, k);
|
||||
|
||||
bch2_read_extent(c, rbio, k, flags);
|
||||
bch2_read_extent(c, rbio, k, offset_into_extent, flags);
|
||||
|
||||
if (flags & BCH_READ_LAST_FRAGMENT)
|
||||
return;
|
||||
|
||||
swap(bio->bi_iter.bi_size, bytes);
|
||||
bio_advance(bio, bytes);
|
||||
swap(rbio->bio.bi_iter.bi_size, bytes);
|
||||
bio_advance(&rbio->bio, bytes);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1240,7 +1240,7 @@ retry:
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = __bch2_read_extent(c, rbio, bvec_iter, k, failed, flags);
|
||||
ret = __bch2_read_extent(c, rbio, bvec_iter, k, 0, failed, flags);
|
||||
if (ret == READ_RETRY)
|
||||
goto retry;
|
||||
if (ret)
|
||||
@ -1272,17 +1272,22 @@ retry:
|
||||
POS(inode, bvec_iter.bi_sector),
|
||||
BTREE_ITER_SLOTS, k, ret) {
|
||||
BKEY_PADDED(k) tmp;
|
||||
unsigned bytes;
|
||||
unsigned bytes, offset_into_extent;
|
||||
|
||||
bkey_reassemble(&tmp.k, k);
|
||||
k = bkey_i_to_s_c(&tmp.k);
|
||||
|
||||
bch2_trans_unlock(&trans);
|
||||
|
||||
bytes = min_t(unsigned, bvec_iter.bi_size,
|
||||
(k.k->p.offset - bvec_iter.bi_sector) << 9);
|
||||
offset_into_extent = iter->pos.offset -
|
||||
bkey_start_offset(k.k);
|
||||
|
||||
bytes = min_t(unsigned, bvec_iter_sectors(bvec_iter),
|
||||
(k.k->size - offset_into_extent)) << 9;
|
||||
swap(bvec_iter.bi_size, bytes);
|
||||
|
||||
ret = __bch2_read_extent(c, rbio, bvec_iter, k, failed, flags);
|
||||
ret = __bch2_read_extent(c, rbio, bvec_iter, k,
|
||||
offset_into_extent, failed, flags);
|
||||
switch (ret) {
|
||||
case READ_RETRY:
|
||||
goto retry;
|
||||
@ -1463,7 +1468,7 @@ static void __bch2_read_endio(struct work_struct *work)
|
||||
goto nodecode;
|
||||
|
||||
/* Adjust crc to point to subset of data we want: */
|
||||
crc.offset += rbio->bvec_iter.bi_sector - rbio->pos.offset;
|
||||
crc.offset += rbio->offset_into_extent;
|
||||
crc.live_size = bvec_iter_sectors(rbio->bvec_iter);
|
||||
|
||||
if (crc.compression_type != BCH_COMPRESSION_NONE) {
|
||||
@ -1574,6 +1579,7 @@ static void bch2_read_endio(struct bio *bio)
|
||||
|
||||
int __bch2_read_extent(struct bch_fs *c, struct bch_read_bio *orig,
|
||||
struct bvec_iter iter, struct bkey_s_c k,
|
||||
unsigned offset_into_extent,
|
||||
struct bch_io_failures *failed, unsigned flags)
|
||||
{
|
||||
struct extent_ptr_decoded pick;
|
||||
@ -1606,7 +1612,6 @@ int __bch2_read_extent(struct bch_fs *c, struct bch_read_bio *orig,
|
||||
if (pick.crc.compressed_size > orig->bio.bi_vcnt * PAGE_SECTORS)
|
||||
goto hole;
|
||||
|
||||
iter.bi_sector = pos.offset;
|
||||
iter.bi_size = pick.crc.compressed_size << 9;
|
||||
goto noclone;
|
||||
}
|
||||
@ -1620,8 +1625,7 @@ int __bch2_read_extent(struct bch_fs *c, struct bch_read_bio *orig,
|
||||
if (narrow_crcs && (flags & BCH_READ_USER_MAPPED))
|
||||
flags |= BCH_READ_MUST_BOUNCE;
|
||||
|
||||
EBUG_ON(bkey_start_offset(k.k) > iter.bi_sector ||
|
||||
k.k->p.offset < bvec_iter_end_sector(iter));
|
||||
BUG_ON(offset_into_extent + bvec_iter_sectors(iter) > k.k->size);
|
||||
|
||||
if (pick.crc.compression_type != BCH_COMPRESSION_NONE ||
|
||||
(pick.crc.csum_type != BCH_CSUM_NONE &&
|
||||
@ -1642,15 +1646,16 @@ int __bch2_read_extent(struct bch_fs *c, struct bch_read_bio *orig,
|
||||
(bvec_iter_sectors(iter) != pick.crc.uncompressed_size ||
|
||||
bvec_iter_sectors(iter) != pick.crc.live_size ||
|
||||
pick.crc.offset ||
|
||||
iter.bi_sector != pos.offset));
|
||||
offset_into_extent));
|
||||
|
||||
pos.offset += offset_into_extent;
|
||||
pick.ptr.offset += pick.crc.offset +
|
||||
(iter.bi_sector - pos.offset);
|
||||
offset_into_extent;
|
||||
pick.crc.compressed_size = bvec_iter_sectors(iter);
|
||||
pick.crc.uncompressed_size = bvec_iter_sectors(iter);
|
||||
pick.crc.offset = 0;
|
||||
pick.crc.live_size = bvec_iter_sectors(iter);
|
||||
pos.offset = iter.bi_sector;
|
||||
offset_into_extent = 0;
|
||||
}
|
||||
|
||||
if (rbio) {
|
||||
@ -1707,6 +1712,7 @@ noclone:
|
||||
else
|
||||
rbio->end_io = orig->bio.bi_end_io;
|
||||
rbio->bvec_iter = iter;
|
||||
rbio->offset_into_extent= offset_into_extent;
|
||||
rbio->flags = flags;
|
||||
rbio->have_ioref = pick_ret > 0 && bch2_dev_get_ioref(ca, READ);
|
||||
rbio->narrow_crcs = narrow_crcs;
|
||||
@ -1834,7 +1840,7 @@ void bch2_read(struct bch_fs *c, struct bch_read_bio *rbio, u64 inode)
|
||||
POS(inode, rbio->bio.bi_iter.bi_sector),
|
||||
BTREE_ITER_SLOTS, k, ret) {
|
||||
BKEY_PADDED(k) tmp;
|
||||
unsigned bytes;
|
||||
unsigned bytes, offset_into_extent;
|
||||
|
||||
/*
|
||||
* Unlock the iterator while the btree node's lock is still in
|
||||
@ -1844,14 +1850,17 @@ void bch2_read(struct bch_fs *c, struct bch_read_bio *rbio, u64 inode)
|
||||
k = bkey_i_to_s_c(&tmp.k);
|
||||
bch2_trans_unlock(&trans);
|
||||
|
||||
bytes = min_t(unsigned, rbio->bio.bi_iter.bi_size,
|
||||
(k.k->p.offset - rbio->bio.bi_iter.bi_sector) << 9);
|
||||
offset_into_extent = iter->pos.offset -
|
||||
bkey_start_offset(k.k);
|
||||
|
||||
bytes = min_t(unsigned, bio_sectors(&rbio->bio),
|
||||
(k.k->size - offset_into_extent)) << 9;
|
||||
swap(rbio->bio.bi_iter.bi_size, bytes);
|
||||
|
||||
if (rbio->bio.bi_iter.bi_size == bytes)
|
||||
flags |= BCH_READ_LAST_FRAGMENT;
|
||||
|
||||
bch2_read_extent(c, rbio, k, flags);
|
||||
bch2_read_extent(c, rbio, k, offset_into_extent, flags);
|
||||
|
||||
if (flags & BCH_READ_LAST_FRAGMENT)
|
||||
return;
|
||||
|
@ -99,10 +99,6 @@ struct bch_devs_mask;
|
||||
struct cache_promote_op;
|
||||
struct extent_ptr_decoded;
|
||||
|
||||
int __bch2_read_extent(struct bch_fs *, struct bch_read_bio *, struct bvec_iter,
|
||||
struct bkey_s_c, struct bch_io_failures *, unsigned);
|
||||
void bch2_read(struct bch_fs *, struct bch_read_bio *, u64);
|
||||
|
||||
enum bch_read_flags {
|
||||
BCH_READ_RETRY_IF_STALE = 1 << 0,
|
||||
BCH_READ_MAY_PROMOTE = 1 << 1,
|
||||
@ -116,14 +112,22 @@ enum bch_read_flags {
|
||||
BCH_READ_IN_RETRY = 1 << 7,
|
||||
};
|
||||
|
||||
int __bch2_read_extent(struct bch_fs *, struct bch_read_bio *,
|
||||
struct bvec_iter, struct bkey_s_c, unsigned,
|
||||
struct bch_io_failures *, unsigned);
|
||||
|
||||
static inline void bch2_read_extent(struct bch_fs *c,
|
||||
struct bch_read_bio *rbio,
|
||||
struct bkey_s_c k,
|
||||
unsigned offset_into_extent,
|
||||
unsigned flags)
|
||||
{
|
||||
__bch2_read_extent(c, rbio, rbio->bio.bi_iter, k, NULL, flags);
|
||||
__bch2_read_extent(c, rbio, rbio->bio.bi_iter, k,
|
||||
offset_into_extent, NULL, flags);
|
||||
}
|
||||
|
||||
void bch2_read(struct bch_fs *, struct bch_read_bio *, u64);
|
||||
|
||||
static inline struct bch_read_bio *rbio_init(struct bio *bio,
|
||||
struct bch_io_opts opts)
|
||||
{
|
||||
|
@ -38,6 +38,8 @@ struct bch_read_bio {
|
||||
*/
|
||||
struct bvec_iter bvec_iter;
|
||||
|
||||
unsigned offset_into_extent;
|
||||
|
||||
u16 flags;
|
||||
union {
|
||||
struct {
|
||||
|
@ -461,7 +461,7 @@ static int bch2_move_extent(struct bch_fs *c,
|
||||
* ctxt when doing wakeup
|
||||
*/
|
||||
closure_get(&ctxt->cl);
|
||||
bch2_read_extent(c, &io->rbio, e.s_c,
|
||||
bch2_read_extent(c, &io->rbio, e.s_c, 0,
|
||||
BCH_READ_NODECODE|
|
||||
BCH_READ_LAST_FRAGMENT);
|
||||
return 0;
|
||||
|
Loading…
x
Reference in New Issue
Block a user