block: remove bio_set_op_attrs
This macro is obsolete, so replace the last few uses with open coded bi_opf assignments. Signed-off-by: Christoph Hellwig <hch@lst.de> Acked-by: Coly Li <colyli@suse.de <mailto:colyli@suse.de>> Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com> Reviewed-by: Bart Van Assche <bvanassche@acm.org> Link: https://lore.kernel.org/r/20221206144057.720846-1-hch@lst.de Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
committed by
Jens Axboe
parent
db1c7d7797
commit
c34b7ac650
@@ -160,7 +160,7 @@ static void read_moving(struct cache_set *c)
|
|||||||
moving_init(io);
|
moving_init(io);
|
||||||
bio = &io->bio.bio;
|
bio = &io->bio.bio;
|
||||||
|
|
||||||
bio_set_op_attrs(bio, REQ_OP_READ, 0);
|
bio->bi_opf = REQ_OP_READ;
|
||||||
bio->bi_end_io = read_moving_endio;
|
bio->bi_end_io = read_moving_endio;
|
||||||
|
|
||||||
if (bch_bio_alloc_pages(bio, GFP_KERNEL))
|
if (bch_bio_alloc_pages(bio, GFP_KERNEL))
|
||||||
|
@@ -244,7 +244,7 @@ static void bch_data_insert_start(struct closure *cl)
|
|||||||
trace_bcache_cache_insert(k);
|
trace_bcache_cache_insert(k);
|
||||||
bch_keylist_push(&op->insert_keys);
|
bch_keylist_push(&op->insert_keys);
|
||||||
|
|
||||||
bio_set_op_attrs(n, REQ_OP_WRITE, 0);
|
n->bi_opf = REQ_OP_WRITE;
|
||||||
bch_submit_bbio(n, op->c, k, 0);
|
bch_submit_bbio(n, op->c, k, 0);
|
||||||
} while (n != bio);
|
} while (n != bio);
|
||||||
|
|
||||||
|
@@ -434,7 +434,7 @@ static void write_dirty(struct closure *cl)
|
|||||||
*/
|
*/
|
||||||
if (KEY_DIRTY(&w->key)) {
|
if (KEY_DIRTY(&w->key)) {
|
||||||
dirty_init(w);
|
dirty_init(w);
|
||||||
bio_set_op_attrs(&io->bio, REQ_OP_WRITE, 0);
|
io->bio.bi_opf = REQ_OP_WRITE;
|
||||||
io->bio.bi_iter.bi_sector = KEY_START(&w->key);
|
io->bio.bi_iter.bi_sector = KEY_START(&w->key);
|
||||||
bio_set_dev(&io->bio, io->dc->bdev);
|
bio_set_dev(&io->bio, io->dc->bdev);
|
||||||
io->bio.bi_end_io = dirty_endio;
|
io->bio.bi_end_io = dirty_endio;
|
||||||
@@ -547,7 +547,7 @@ static void read_dirty(struct cached_dev *dc)
|
|||||||
io->sequence = sequence++;
|
io->sequence = sequence++;
|
||||||
|
|
||||||
dirty_init(w);
|
dirty_init(w);
|
||||||
bio_set_op_attrs(&io->bio, REQ_OP_READ, 0);
|
io->bio.bi_opf = REQ_OP_READ;
|
||||||
io->bio.bi_iter.bi_sector = PTR_OFFSET(&w->key, 0);
|
io->bio.bi_iter.bi_sector = PTR_OFFSET(&w->key, 0);
|
||||||
bio_set_dev(&io->bio, dc->disk.c->cache->bdev);
|
bio_set_dev(&io->bio, dc->disk.c->cache->bdev);
|
||||||
io->bio.bi_end_io = read_dirty_endio;
|
io->bio.bi_end_io = read_dirty_endio;
|
||||||
|
@@ -410,7 +410,7 @@ static void end_discard(struct discard_op *op, int r)
|
|||||||
* need to wait for the chain to complete.
|
* need to wait for the chain to complete.
|
||||||
*/
|
*/
|
||||||
bio_chain(op->bio, op->parent_bio);
|
bio_chain(op->bio, op->parent_bio);
|
||||||
bio_set_op_attrs(op->bio, REQ_OP_DISCARD, 0);
|
op->bio->bi_opf = REQ_OP_DISCARD;
|
||||||
submit_bio(op->bio);
|
submit_bio(op->bio);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -1321,7 +1321,7 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio,
|
|||||||
read_bio->bi_iter.bi_sector = r1_bio->sector +
|
read_bio->bi_iter.bi_sector = r1_bio->sector +
|
||||||
mirror->rdev->data_offset;
|
mirror->rdev->data_offset;
|
||||||
read_bio->bi_end_io = raid1_end_read_request;
|
read_bio->bi_end_io = raid1_end_read_request;
|
||||||
bio_set_op_attrs(read_bio, op, do_sync);
|
read_bio->bi_opf = op | do_sync;
|
||||||
if (test_bit(FailFast, &mirror->rdev->flags) &&
|
if (test_bit(FailFast, &mirror->rdev->flags) &&
|
||||||
test_bit(R1BIO_FailFast, &r1_bio->state))
|
test_bit(R1BIO_FailFast, &r1_bio->state))
|
||||||
read_bio->bi_opf |= MD_FAILFAST;
|
read_bio->bi_opf |= MD_FAILFAST;
|
||||||
@@ -2254,7 +2254,7 @@ static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio)
|
|||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
bio_set_op_attrs(wbio, REQ_OP_WRITE, 0);
|
wbio->bi_opf = REQ_OP_WRITE;
|
||||||
if (test_bit(FailFast, &conf->mirrors[i].rdev->flags))
|
if (test_bit(FailFast, &conf->mirrors[i].rdev->flags))
|
||||||
wbio->bi_opf |= MD_FAILFAST;
|
wbio->bi_opf |= MD_FAILFAST;
|
||||||
|
|
||||||
@@ -2419,7 +2419,7 @@ static int narrow_write_error(struct r1bio *r1_bio, int i)
|
|||||||
GFP_NOIO, &mddev->bio_set);
|
GFP_NOIO, &mddev->bio_set);
|
||||||
}
|
}
|
||||||
|
|
||||||
bio_set_op_attrs(wbio, REQ_OP_WRITE, 0);
|
wbio->bi_opf = REQ_OP_WRITE;
|
||||||
wbio->bi_iter.bi_sector = r1_bio->sector;
|
wbio->bi_iter.bi_sector = r1_bio->sector;
|
||||||
wbio->bi_iter.bi_size = r1_bio->sectors << 9;
|
wbio->bi_iter.bi_size = r1_bio->sectors << 9;
|
||||||
|
|
||||||
@@ -2770,7 +2770,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
|
|||||||
if (i < conf->raid_disks)
|
if (i < conf->raid_disks)
|
||||||
still_degraded = 1;
|
still_degraded = 1;
|
||||||
} else if (!test_bit(In_sync, &rdev->flags)) {
|
} else if (!test_bit(In_sync, &rdev->flags)) {
|
||||||
bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
|
bio->bi_opf = REQ_OP_WRITE;
|
||||||
bio->bi_end_io = end_sync_write;
|
bio->bi_end_io = end_sync_write;
|
||||||
write_targets ++;
|
write_targets ++;
|
||||||
} else {
|
} else {
|
||||||
@@ -2797,7 +2797,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
|
|||||||
if (disk < 0)
|
if (disk < 0)
|
||||||
disk = i;
|
disk = i;
|
||||||
}
|
}
|
||||||
bio_set_op_attrs(bio, REQ_OP_READ, 0);
|
bio->bi_opf = REQ_OP_READ;
|
||||||
bio->bi_end_io = end_sync_read;
|
bio->bi_end_io = end_sync_read;
|
||||||
read_targets++;
|
read_targets++;
|
||||||
} else if (!test_bit(WriteErrorSeen, &rdev->flags) &&
|
} else if (!test_bit(WriteErrorSeen, &rdev->flags) &&
|
||||||
@@ -2809,7 +2809,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
|
|||||||
* if we are doing resync or repair. Otherwise, leave
|
* if we are doing resync or repair. Otherwise, leave
|
||||||
* this device alone for this sync request.
|
* this device alone for this sync request.
|
||||||
*/
|
*/
|
||||||
bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
|
bio->bi_opf = REQ_OP_WRITE;
|
||||||
bio->bi_end_io = end_sync_write;
|
bio->bi_end_io = end_sync_write;
|
||||||
write_targets++;
|
write_targets++;
|
||||||
}
|
}
|
||||||
|
@@ -1254,7 +1254,7 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio,
|
|||||||
read_bio->bi_iter.bi_sector = r10_bio->devs[slot].addr +
|
read_bio->bi_iter.bi_sector = r10_bio->devs[slot].addr +
|
||||||
choose_data_offset(r10_bio, rdev);
|
choose_data_offset(r10_bio, rdev);
|
||||||
read_bio->bi_end_io = raid10_end_read_request;
|
read_bio->bi_end_io = raid10_end_read_request;
|
||||||
bio_set_op_attrs(read_bio, op, do_sync);
|
read_bio->bi_opf = op | do_sync;
|
||||||
if (test_bit(FailFast, &rdev->flags) &&
|
if (test_bit(FailFast, &rdev->flags) &&
|
||||||
test_bit(R10BIO_FailFast, &r10_bio->state))
|
test_bit(R10BIO_FailFast, &r10_bio->state))
|
||||||
read_bio->bi_opf |= MD_FAILFAST;
|
read_bio->bi_opf |= MD_FAILFAST;
|
||||||
@@ -1301,7 +1301,7 @@ static void raid10_write_one_disk(struct mddev *mddev, struct r10bio *r10_bio,
|
|||||||
mbio->bi_iter.bi_sector = (r10_bio->devs[n_copy].addr +
|
mbio->bi_iter.bi_sector = (r10_bio->devs[n_copy].addr +
|
||||||
choose_data_offset(r10_bio, rdev));
|
choose_data_offset(r10_bio, rdev));
|
||||||
mbio->bi_end_io = raid10_end_write_request;
|
mbio->bi_end_io = raid10_end_write_request;
|
||||||
bio_set_op_attrs(mbio, op, do_sync | do_fua);
|
mbio->bi_opf = op | do_sync | do_fua;
|
||||||
if (!replacement && test_bit(FailFast,
|
if (!replacement && test_bit(FailFast,
|
||||||
&conf->mirrors[devnum].rdev->flags)
|
&conf->mirrors[devnum].rdev->flags)
|
||||||
&& enough(conf, devnum))
|
&& enough(conf, devnum))
|
||||||
@@ -2933,7 +2933,7 @@ static int narrow_write_error(struct r10bio *r10_bio, int i)
|
|||||||
wsector = r10_bio->devs[i].addr + (sector - r10_bio->sector);
|
wsector = r10_bio->devs[i].addr + (sector - r10_bio->sector);
|
||||||
wbio->bi_iter.bi_sector = wsector +
|
wbio->bi_iter.bi_sector = wsector +
|
||||||
choose_data_offset(r10_bio, rdev);
|
choose_data_offset(r10_bio, rdev);
|
||||||
bio_set_op_attrs(wbio, REQ_OP_WRITE, 0);
|
wbio->bi_opf = REQ_OP_WRITE;
|
||||||
|
|
||||||
if (submit_bio_wait(wbio) < 0)
|
if (submit_bio_wait(wbio) < 0)
|
||||||
/* Failure! */
|
/* Failure! */
|
||||||
@@ -3542,7 +3542,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
|
|||||||
bio->bi_next = biolist;
|
bio->bi_next = biolist;
|
||||||
biolist = bio;
|
biolist = bio;
|
||||||
bio->bi_end_io = end_sync_read;
|
bio->bi_end_io = end_sync_read;
|
||||||
bio_set_op_attrs(bio, REQ_OP_READ, 0);
|
bio->bi_opf = REQ_OP_READ;
|
||||||
if (test_bit(FailFast, &rdev->flags))
|
if (test_bit(FailFast, &rdev->flags))
|
||||||
bio->bi_opf |= MD_FAILFAST;
|
bio->bi_opf |= MD_FAILFAST;
|
||||||
from_addr = r10_bio->devs[j].addr;
|
from_addr = r10_bio->devs[j].addr;
|
||||||
@@ -3567,7 +3567,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
|
|||||||
bio->bi_next = biolist;
|
bio->bi_next = biolist;
|
||||||
biolist = bio;
|
biolist = bio;
|
||||||
bio->bi_end_io = end_sync_write;
|
bio->bi_end_io = end_sync_write;
|
||||||
bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
|
bio->bi_opf = REQ_OP_WRITE;
|
||||||
bio->bi_iter.bi_sector = to_addr
|
bio->bi_iter.bi_sector = to_addr
|
||||||
+ mrdev->data_offset;
|
+ mrdev->data_offset;
|
||||||
bio_set_dev(bio, mrdev->bdev);
|
bio_set_dev(bio, mrdev->bdev);
|
||||||
@@ -3588,7 +3588,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
|
|||||||
bio->bi_next = biolist;
|
bio->bi_next = biolist;
|
||||||
biolist = bio;
|
biolist = bio;
|
||||||
bio->bi_end_io = end_sync_write;
|
bio->bi_end_io = end_sync_write;
|
||||||
bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
|
bio->bi_opf = REQ_OP_WRITE;
|
||||||
bio->bi_iter.bi_sector = to_addr +
|
bio->bi_iter.bi_sector = to_addr +
|
||||||
mreplace->data_offset;
|
mreplace->data_offset;
|
||||||
bio_set_dev(bio, mreplace->bdev);
|
bio_set_dev(bio, mreplace->bdev);
|
||||||
@@ -3742,7 +3742,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
|
|||||||
bio->bi_next = biolist;
|
bio->bi_next = biolist;
|
||||||
biolist = bio;
|
biolist = bio;
|
||||||
bio->bi_end_io = end_sync_read;
|
bio->bi_end_io = end_sync_read;
|
||||||
bio_set_op_attrs(bio, REQ_OP_READ, 0);
|
bio->bi_opf = REQ_OP_READ;
|
||||||
if (test_bit(FailFast, &rdev->flags))
|
if (test_bit(FailFast, &rdev->flags))
|
||||||
bio->bi_opf |= MD_FAILFAST;
|
bio->bi_opf |= MD_FAILFAST;
|
||||||
bio->bi_iter.bi_sector = sector + rdev->data_offset;
|
bio->bi_iter.bi_sector = sector + rdev->data_offset;
|
||||||
@@ -3764,7 +3764,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
|
|||||||
bio->bi_next = biolist;
|
bio->bi_next = biolist;
|
||||||
biolist = bio;
|
biolist = bio;
|
||||||
bio->bi_end_io = end_sync_write;
|
bio->bi_end_io = end_sync_write;
|
||||||
bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
|
bio->bi_opf = REQ_OP_WRITE;
|
||||||
if (test_bit(FailFast, &rdev->flags))
|
if (test_bit(FailFast, &rdev->flags))
|
||||||
bio->bi_opf |= MD_FAILFAST;
|
bio->bi_opf |= MD_FAILFAST;
|
||||||
bio->bi_iter.bi_sector = sector + rdev->data_offset;
|
bio->bi_iter.bi_sector = sector + rdev->data_offset;
|
||||||
@@ -4970,7 +4970,7 @@ read_more:
|
|||||||
b->bi_iter.bi_sector = r10_bio->devs[s/2].addr +
|
b->bi_iter.bi_sector = r10_bio->devs[s/2].addr +
|
||||||
rdev2->new_data_offset;
|
rdev2->new_data_offset;
|
||||||
b->bi_end_io = end_reshape_write;
|
b->bi_end_io = end_reshape_write;
|
||||||
bio_set_op_attrs(b, REQ_OP_WRITE, 0);
|
b->bi_opf = REQ_OP_WRITE;
|
||||||
b->bi_next = blist;
|
b->bi_next = blist;
|
||||||
blist = b;
|
blist = b;
|
||||||
}
|
}
|
||||||
|
@@ -472,13 +472,6 @@ static inline enum req_op bio_op(const struct bio *bio)
|
|||||||
return bio->bi_opf & REQ_OP_MASK;
|
return bio->bi_opf & REQ_OP_MASK;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* obsolete, don't use in new code */
|
|
||||||
static inline void bio_set_op_attrs(struct bio *bio, enum req_op op,
|
|
||||||
blk_opf_t op_flags)
|
|
||||||
{
|
|
||||||
bio->bi_opf = op | op_flags;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline bool op_is_write(blk_opf_t op)
|
static inline bool op_is_write(blk_opf_t op)
|
||||||
{
|
{
|
||||||
return !!(op & (__force blk_opf_t)1);
|
return !!(op & (__force blk_opf_t)1);
|
||||||
|
Reference in New Issue
Block a user