Merge tag 'for-5.18/block-2022-03-18' of git://git.kernel.dk/linux-block
Pull block updates from Jens Axboe: - BFQ cleanups and fixes (Yu, Zhang, Yahu, Paolo) - blk-rq-qos completion fix (Tejun) - blk-cgroup merge fix (Tejun) - Add offline error return value to distinguish it from an IO error on the device (Song) - IO stats fixes (Zhang, Christoph) - blkcg refcount fixes (Ming, Yu) - Fix for indefinite dispatch loop softlockup (Shin'ichiro) - blk-mq hardware queue management improvements (Ming) - sbitmap dead code removal (Ming, John) - Plugging merge improvements (me) - Show blk-crypto capabilities in sysfs (Eric) - Multiple delayed queue run improvement (David) - Block throttling fixes (Ming) - Start deprecating auto module loading based on dev_t (Christoph) - bio allocation improvements (Christoph, Chaitanya) - Get rid of bio_devname (Christoph) - bio clone improvements (Christoph) - Block plugging improvements (Christoph) - Get rid of genhd.h header (Christoph) - Ensure drivers use appropriate flush helpers (Christoph) - Refcounting improvements (Christoph) - Queue initialization and teardown improvements (Ming, Christoph) - Misc fixes/improvements (Barry, Chaitanya, Colin, Dan, Jiapeng, Lukas, Nian, Yang, Eric, Chengming) * tag 'for-5.18/block-2022-03-18' of git://git.kernel.dk/linux-block: (127 commits) block: cancel all throttled bios in del_gendisk() block: let blkcg_gq grab request queue's refcnt block: avoid use-after-free on throttle data block: limit request dispatch loop duration block/bfq-iosched: Fix spelling mistake "tenative" -> "tentative" sr: simplify the local variable initialization in sr_block_open() block: don't merge across cgroup boundaries if blkcg is enabled block: fix rq-qos breakage from skipping rq_qos_done_bio() block: flush plug based on hardware and software queue order block: ensure plug merging checks the correct queue at least once block: move rq_qos_exit() into disk_release() block: do more work in elevator_exit block: move blk_exit_queue into disk_release block: move q_usage_counter release into blk_queue_release block: don't remove hctx debugfs dir from blk_mq_exit_queue block: move blkcg initialization/destroy into disk allocation/release handler sr: implement ->free_disk to simplify refcounting sd: implement ->free_disk to simplify refcounting sd: delay calling free_opal_dev sd: call sd_zbc_release_disk before releasing the scsi_device reference ...
This commit is contained in:
@ -36,9 +36,7 @@ xfs_flush_bdev_async(
|
||||
return;
|
||||
}
|
||||
|
||||
bio_init(bio, NULL, 0);
|
||||
bio_set_dev(bio, bdev);
|
||||
bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC;
|
||||
bio_init(bio, bdev, NULL, 0, REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC);
|
||||
bio->bi_private = done;
|
||||
bio->bi_end_io = xfs_flush_bdev_async_endio;
|
||||
|
||||
@ -61,10 +59,9 @@ xfs_rw_bdev(
|
||||
if (is_vmalloc && op == REQ_OP_WRITE)
|
||||
flush_kernel_vmap_range(data, count);
|
||||
|
||||
bio = bio_alloc(GFP_KERNEL, bio_max_vecs(left));
|
||||
bio_set_dev(bio, bdev);
|
||||
bio = bio_alloc(bdev, bio_max_vecs(left), op | REQ_META | REQ_SYNC,
|
||||
GFP_KERNEL);
|
||||
bio->bi_iter.bi_sector = sector;
|
||||
bio->bi_opf = op | REQ_META | REQ_SYNC;
|
||||
|
||||
do {
|
||||
struct page *page = kmem_to_page(data);
|
||||
@ -74,10 +71,9 @@ xfs_rw_bdev(
|
||||
while (bio_add_page(bio, page, len, off) != len) {
|
||||
struct bio *prev = bio;
|
||||
|
||||
bio = bio_alloc(GFP_KERNEL, bio_max_vecs(left));
|
||||
bio_copy_dev(bio, prev);
|
||||
bio = bio_alloc(prev->bi_bdev, bio_max_vecs(left),
|
||||
prev->bi_opf, GFP_KERNEL);
|
||||
bio->bi_iter.bi_sector = bio_end_sector(prev);
|
||||
bio->bi_opf = prev->bi_opf;
|
||||
bio_chain(prev, bio);
|
||||
|
||||
submit_bio(prev);
|
||||
|
@ -1440,12 +1440,10 @@ next_chunk:
|
||||
atomic_inc(&bp->b_io_remaining);
|
||||
nr_pages = bio_max_segs(total_nr_pages);
|
||||
|
||||
bio = bio_alloc(GFP_NOIO, nr_pages);
|
||||
bio_set_dev(bio, bp->b_target->bt_bdev);
|
||||
bio = bio_alloc(bp->b_target->bt_bdev, nr_pages, op, GFP_NOIO);
|
||||
bio->bi_iter.bi_sector = sector;
|
||||
bio->bi_end_io = xfs_buf_bio_end_io;
|
||||
bio->bi_private = bp;
|
||||
bio->bi_opf = op;
|
||||
|
||||
for (; size && nr_pages; nr_pages--, page_index++) {
|
||||
int rbytes, nbytes = PAGE_SIZE - offset;
|
||||
|
@ -1883,19 +1883,19 @@ xlog_write_iclog(
|
||||
return;
|
||||
}
|
||||
|
||||
bio_init(&iclog->ic_bio, iclog->ic_bvec, howmany(count, PAGE_SIZE));
|
||||
bio_set_dev(&iclog->ic_bio, log->l_targ->bt_bdev);
|
||||
iclog->ic_bio.bi_iter.bi_sector = log->l_logBBstart + bno;
|
||||
iclog->ic_bio.bi_end_io = xlog_bio_end_io;
|
||||
iclog->ic_bio.bi_private = iclog;
|
||||
|
||||
/*
|
||||
* We use REQ_SYNC | REQ_IDLE here to tell the block layer the are more
|
||||
* IOs coming immediately after this one. This prevents the block layer
|
||||
* writeback throttle from throttling log writes behind background
|
||||
* metadata writeback and causing priority inversions.
|
||||
*/
|
||||
iclog->ic_bio.bi_opf = REQ_OP_WRITE | REQ_META | REQ_SYNC | REQ_IDLE;
|
||||
bio_init(&iclog->ic_bio, log->l_targ->bt_bdev, iclog->ic_bvec,
|
||||
howmany(count, PAGE_SIZE),
|
||||
REQ_OP_WRITE | REQ_META | REQ_SYNC | REQ_IDLE);
|
||||
iclog->ic_bio.bi_iter.bi_sector = log->l_logBBstart + bno;
|
||||
iclog->ic_bio.bi_end_io = xlog_bio_end_io;
|
||||
iclog->ic_bio.bi_private = iclog;
|
||||
|
||||
if (iclog->ic_flags & XLOG_ICL_NEED_FLUSH) {
|
||||
iclog->ic_bio.bi_opf |= REQ_PREFLUSH;
|
||||
/*
|
||||
|
Reference in New Issue
Block a user