Merge remote-tracking branch 'jens/for-5.10/block' into dm-5.10
DM depends on these block 5.10 commits:22ada802ed
block: use lcm_not_zero() when stacking chunk_sectors07d098e6bb
block: allow 'chunk_sectors' to be non-power-of-2021a24460d
block: add QUEUE_FLAG_NOWAIT6abc49468e
dm: add support for REQ_NOWAIT and enable it for linear target Signed-off-by: Mike Snitzer <snitzer@redhat.com>
This commit is contained in:
@ -907,7 +907,7 @@ static int device_is_rq_stackable(struct dm_target *ti, struct dm_dev *dev,
|
||||
struct request_queue *q = bdev_get_queue(bdev);
|
||||
|
||||
/* request-based cannot stack on partitions! */
|
||||
if (bdev != bdev->bd_contains)
|
||||
if (bdev_is_partition(bdev))
|
||||
return false;
|
||||
|
||||
return queue_is_mq(q);
|
||||
@ -1752,6 +1752,33 @@ static bool dm_table_supports_write_zeroes(struct dm_table *t)
|
||||
return true;
|
||||
}
|
||||
|
||||
static int device_not_nowait_capable(struct dm_target *ti, struct dm_dev *dev,
|
||||
sector_t start, sector_t len, void *data)
|
||||
{
|
||||
struct request_queue *q = bdev_get_queue(dev->bdev);
|
||||
|
||||
return q && !blk_queue_nowait(q);
|
||||
}
|
||||
|
||||
static bool dm_table_supports_nowait(struct dm_table *t)
|
||||
{
|
||||
struct dm_target *ti;
|
||||
unsigned i = 0;
|
||||
|
||||
while (i < dm_table_get_num_targets(t)) {
|
||||
ti = dm_table_get_target(t, i++);
|
||||
|
||||
if (!dm_target_supports_nowait(ti->type))
|
||||
return false;
|
||||
|
||||
if (!ti->type->iterate_devices ||
|
||||
ti->type->iterate_devices(ti, device_not_nowait_capable, NULL))
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static int device_not_discard_capable(struct dm_target *ti, struct dm_dev *dev,
|
||||
sector_t start, sector_t len, void *data)
|
||||
{
|
||||
@ -1819,7 +1846,7 @@ static int device_requires_stable_pages(struct dm_target *ti,
|
||||
{
|
||||
struct request_queue *q = bdev_get_queue(dev->bdev);
|
||||
|
||||
return q && bdi_cap_stable_pages_required(q->backing_dev_info);
|
||||
return q && blk_queue_stable_writes(q);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1854,6 +1881,11 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
|
||||
*/
|
||||
q->limits = *limits;
|
||||
|
||||
if (dm_table_supports_nowait(t))
|
||||
blk_queue_flag_set(QUEUE_FLAG_NOWAIT, q);
|
||||
else
|
||||
blk_queue_flag_clear(QUEUE_FLAG_NOWAIT, q);
|
||||
|
||||
if (!dm_table_supports_discards(t)) {
|
||||
blk_queue_flag_clear(QUEUE_FLAG_DISCARD, q);
|
||||
/* Must also clear discard limits... */
|
||||
@ -1904,9 +1936,9 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
|
||||
* because they do their own checksumming.
|
||||
*/
|
||||
if (dm_table_requires_stable_pages(t))
|
||||
q->backing_dev_info->capabilities |= BDI_CAP_STABLE_WRITES;
|
||||
blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, q);
|
||||
else
|
||||
q->backing_dev_info->capabilities &= ~BDI_CAP_STABLE_WRITES;
|
||||
blk_queue_flag_clear(QUEUE_FLAG_STABLE_WRITES, q);
|
||||
|
||||
/*
|
||||
* Determine whether or not this queue's I/O timings contribute
|
||||
@ -1929,8 +1961,7 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
|
||||
}
|
||||
#endif
|
||||
|
||||
/* Allow reads to exceed readahead limits */
|
||||
q->backing_dev_info->io_pages = limits->max_sectors >> (PAGE_SHIFT - 9);
|
||||
blk_queue_update_readahead(q);
|
||||
}
|
||||
|
||||
unsigned int dm_table_get_num_targets(struct dm_table *t)
|
||||
|
Reference in New Issue
Block a user