block-5.17-2022-02-04
-----BEGIN PGP SIGNATURE----- iQJEBAABCAAuFiEEwPw5LcreJtl1+l5K99NY+ylx4KYFAmH9bhIQHGF4Ym9lQGtl cm5lbC5kawAKCRD301j7KXHgppCCD/0aAm95C45TqdwoDtXNbwX5w1kNlh3wP3X8 Kx070w/wP9eWAQfn7qnp29J1F+gyh21NBIVDh4bGbs4oLtQGu2vQ61cTDz+ZRDku 9WjpCvHbdsHq/xNc7GzJ7wFqW8e52MU770XQqViFcewJ8NAbDlbN5d+WGXfgs+8b PGNe8xf22rJr919w8F+mTi/dVHeLw9Na4SkgKPuOFOosTou1SIftbKE0zsxQJanK YZ7K71iY2gva/Ms61iXLm/T14Bc1OsQr/dV5hdj9uuXp4H8/bLwTnUI+JJ/jYLu2 Oc55Yj0VFfCzczxNF5nD3qsPU+oX/4aR5cNZCY7vYM4JtmuPsdU5SpHdLWeQQQo5 HjzutHcOVsdfyu3wdZkXj7gNUWqAr8RkRo+1grpMLzoBfpRB2D0hrOWNcw7xm/3r MskyYnXEd96jvAuM5iOBcGMLq13/HrYHk5LHVKk/n1z//mFDsksqi+5r328YzrxW ygC3E0qK4sM0HpIOO/8NFhceo8gC9XIlWrkzSOx1vlp1Wb1ZmvYDSU5kZXHqtNQj EYjjamnWUHMad0DUWbgJqfIGJvmObvNBE/r48VyC8YM4dMoJ3K5VQAYD2mgTvQ5T fNOgIT+uMGv6aylA+ebKEuB+RcDxtzXsiCNFbB8vexB9mKUIRyaEKvadiOf8q6LM ohSmaW59xQ== =iwuA -----END PGP SIGNATURE----- Merge tag 'block-5.17-2022-02-04' of git://git.kernel.dk/linux-block Pull block fixes from Jens Axboe: - NVMe pull request - fix use-after-free in rdma and tcp controller reset (Sagi Grimberg) - fix the state check in nvmf_ctlr_matches_baseopts (Uday Shankar) - MD nowait null pointer fix (Song) - blk-integrity seed advance fix (Martin) - Fix a dio regression in this merge window (Ilya) * tag 'block-5.17-2022-02-04' of git://git.kernel.dk/linux-block: block: bio-integrity: Advance seed correctly for larger interval sizes nvme-fabrics: fix state check in nvmf_ctlr_matches_baseopts() md: fix NULL pointer deref with nowait but no mddev->queue block: fix DIO handling regressions in blkdev_read_iter() nvme-rdma: fix possible use-after-free in transport error_recovery work nvme-tcp: fix possible use-after-free in transport error_recovery work nvme: fix a possible use-after-free in controller reset during load
This commit is contained in:
commit
7c4a94590e
@ -373,7 +373,7 @@ void bio_integrity_advance(struct bio *bio, unsigned int bytes_done)
|
||||
struct blk_integrity *bi = blk_get_integrity(bio->bi_bdev->bd_disk);
|
||||
unsigned bytes = bio_integrity_bytes(bi, bytes_done >> 9);
|
||||
|
||||
bip->bip_iter.bi_sector += bytes_done >> 9;
|
||||
bip->bip_iter.bi_sector += bio_integrity_intervals(bi, bytes_done >> 9);
|
||||
bvec_iter_advance(bip->bip_vec, &bip->bip_iter, bytes);
|
||||
}
|
||||
|
||||
|
33
block/fops.c
33
block/fops.c
@ -566,34 +566,37 @@ static ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to)
|
||||
{
|
||||
struct block_device *bdev = iocb->ki_filp->private_data;
|
||||
loff_t size = bdev_nr_bytes(bdev);
|
||||
size_t count = iov_iter_count(to);
|
||||
loff_t pos = iocb->ki_pos;
|
||||
size_t shorted = 0;
|
||||
ssize_t ret = 0;
|
||||
size_t count;
|
||||
|
||||
if (unlikely(pos + count > size)) {
|
||||
if (unlikely(pos + iov_iter_count(to) > size)) {
|
||||
if (pos >= size)
|
||||
return 0;
|
||||
size -= pos;
|
||||
if (count > size) {
|
||||
shorted = count - size;
|
||||
iov_iter_truncate(to, size);
|
||||
}
|
||||
shorted = iov_iter_count(to) - size;
|
||||
iov_iter_truncate(to, size);
|
||||
}
|
||||
|
||||
count = iov_iter_count(to);
|
||||
if (!count)
|
||||
goto reexpand; /* skip atime */
|
||||
|
||||
if (iocb->ki_flags & IOCB_DIRECT) {
|
||||
struct address_space *mapping = iocb->ki_filp->f_mapping;
|
||||
|
||||
if (iocb->ki_flags & IOCB_NOWAIT) {
|
||||
if (filemap_range_needs_writeback(mapping, iocb->ki_pos,
|
||||
iocb->ki_pos + count - 1))
|
||||
return -EAGAIN;
|
||||
if (filemap_range_needs_writeback(mapping, pos,
|
||||
pos + count - 1)) {
|
||||
ret = -EAGAIN;
|
||||
goto reexpand;
|
||||
}
|
||||
} else {
|
||||
ret = filemap_write_and_wait_range(mapping,
|
||||
iocb->ki_pos,
|
||||
iocb->ki_pos + count - 1);
|
||||
ret = filemap_write_and_wait_range(mapping, pos,
|
||||
pos + count - 1);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
goto reexpand;
|
||||
}
|
||||
|
||||
file_accessed(iocb->ki_filp);
|
||||
@ -603,12 +606,14 @@ static ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to)
|
||||
iocb->ki_pos += ret;
|
||||
count -= ret;
|
||||
}
|
||||
iov_iter_revert(to, count - iov_iter_count(to));
|
||||
if (ret < 0 || !count)
|
||||
return ret;
|
||||
goto reexpand;
|
||||
}
|
||||
|
||||
ret = filemap_read(iocb, to, ret);
|
||||
|
||||
reexpand:
|
||||
if (unlikely(shorted))
|
||||
iov_iter_reexpand(to, iov_iter_count(to) + shorted);
|
||||
return ret;
|
||||
|
@ -5869,10 +5869,6 @@ int md_run(struct mddev *mddev)
|
||||
nowait = nowait && blk_queue_nowait(bdev_get_queue(rdev->bdev));
|
||||
}
|
||||
|
||||
/* Set the NOWAIT flags if all underlying devices support it */
|
||||
if (nowait)
|
||||
blk_queue_flag_set(QUEUE_FLAG_NOWAIT, mddev->queue);
|
||||
|
||||
if (!bioset_initialized(&mddev->bio_set)) {
|
||||
err = bioset_init(&mddev->bio_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS);
|
||||
if (err)
|
||||
@ -6010,6 +6006,10 @@ int md_run(struct mddev *mddev)
|
||||
else
|
||||
blk_queue_flag_clear(QUEUE_FLAG_NONROT, mddev->queue);
|
||||
blk_queue_flag_set(QUEUE_FLAG_IO_STAT, mddev->queue);
|
||||
|
||||
/* Set the NOWAIT flags if all underlying devices support it */
|
||||
if (nowait)
|
||||
blk_queue_flag_set(QUEUE_FLAG_NOWAIT, mddev->queue);
|
||||
}
|
||||
if (pers->sync_request) {
|
||||
if (mddev->kobj.sd &&
|
||||
|
@ -4253,7 +4253,14 @@ static void nvme_async_event_work(struct work_struct *work)
|
||||
container_of(work, struct nvme_ctrl, async_event_work);
|
||||
|
||||
nvme_aen_uevent(ctrl);
|
||||
ctrl->ops->submit_async_event(ctrl);
|
||||
|
||||
/*
|
||||
* The transport drivers must guarantee AER submission here is safe by
|
||||
* flushing ctrl async_event_work after changing the controller state
|
||||
* from LIVE and before freeing the admin queue.
|
||||
*/
|
||||
if (ctrl->state == NVME_CTRL_LIVE)
|
||||
ctrl->ops->submit_async_event(ctrl);
|
||||
}
|
||||
|
||||
static bool nvme_ctrl_pp_status(struct nvme_ctrl *ctrl)
|
||||
|
@ -170,6 +170,7 @@ nvmf_ctlr_matches_baseopts(struct nvme_ctrl *ctrl,
|
||||
struct nvmf_ctrl_options *opts)
|
||||
{
|
||||
if (ctrl->state == NVME_CTRL_DELETING ||
|
||||
ctrl->state == NVME_CTRL_DELETING_NOIO ||
|
||||
ctrl->state == NVME_CTRL_DEAD ||
|
||||
strcmp(opts->subsysnqn, ctrl->opts->subsysnqn) ||
|
||||
strcmp(opts->host->nqn, ctrl->opts->host->nqn) ||
|
||||
|
@ -1200,6 +1200,7 @@ static void nvme_rdma_error_recovery_work(struct work_struct *work)
|
||||
struct nvme_rdma_ctrl, err_work);
|
||||
|
||||
nvme_stop_keep_alive(&ctrl->ctrl);
|
||||
flush_work(&ctrl->ctrl.async_event_work);
|
||||
nvme_rdma_teardown_io_queues(ctrl, false);
|
||||
nvme_start_queues(&ctrl->ctrl);
|
||||
nvme_rdma_teardown_admin_queue(ctrl, false);
|
||||
|
@ -2096,6 +2096,7 @@ static void nvme_tcp_error_recovery_work(struct work_struct *work)
|
||||
struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl;
|
||||
|
||||
nvme_stop_keep_alive(ctrl);
|
||||
flush_work(&ctrl->async_event_work);
|
||||
nvme_tcp_teardown_io_queues(ctrl, false);
|
||||
/* unquiesce to fail fast pending requests */
|
||||
nvme_start_queues(ctrl);
|
||||
|
Loading…
Reference in New Issue
Block a user