dm zoned: allocate zone by device index
When allocating a zone, pass in an indicator on which device the zone should be allocated; this increases performance for a multi-device setup because reclaim will now allocate zones on the device for which reclaim is running. Signed-off-by: Hannes Reinecke <hare@suse.de> Reviewed-by: Damien Le Moal <damien.lemoal@wdc.com> Signed-off-by: Mike Snitzer <snitzer@redhat.com>
This commit is contained in:
parent
4dba12881f
commit
22c1ef66c4
@ -2050,7 +2050,7 @@ again:
|
||||
goto out;
|
||||
|
||||
/* Allocate a random zone */
|
||||
dzone = dmz_alloc_zone(zmd, alloc_flags);
|
||||
dzone = dmz_alloc_zone(zmd, 0, alloc_flags);
|
||||
if (!dzone) {
|
||||
if (dmz_dev_is_dying(zmd)) {
|
||||
dzone = ERR_PTR(-EIO);
|
||||
@ -2156,7 +2156,7 @@ again:
|
||||
goto out;
|
||||
|
||||
/* Allocate a random zone */
|
||||
bzone = dmz_alloc_zone(zmd, alloc_flags);
|
||||
bzone = dmz_alloc_zone(zmd, 0, alloc_flags);
|
||||
if (!bzone) {
|
||||
if (dmz_dev_is_dying(zmd)) {
|
||||
bzone = ERR_PTR(-EIO);
|
||||
@ -2187,11 +2187,12 @@ out:
|
||||
* Get an unmapped (free) zone.
|
||||
* This must be called with the mapping lock held.
|
||||
*/
|
||||
struct dm_zone *dmz_alloc_zone(struct dmz_metadata *zmd, unsigned long flags)
|
||||
struct dm_zone *dmz_alloc_zone(struct dmz_metadata *zmd, unsigned int dev_idx,
|
||||
unsigned long flags)
|
||||
{
|
||||
struct list_head *list;
|
||||
struct dm_zone *zone;
|
||||
unsigned int dev_idx = 0;
|
||||
int i = 0;
|
||||
|
||||
again:
|
||||
if (flags & DMZ_ALLOC_CACHE)
|
||||
@ -2207,8 +2208,12 @@ again:
|
||||
*/
|
||||
if (!(flags & DMZ_ALLOC_RECLAIM))
|
||||
return NULL;
|
||||
if (dev_idx < zmd->nr_devs) {
|
||||
dev_idx++;
|
||||
/*
|
||||
* Try to allocate from other devices
|
||||
*/
|
||||
if (i < zmd->nr_devs) {
|
||||
dev_idx = (dev_idx + 1) % zmd->nr_devs;
|
||||
i++;
|
||||
goto again;
|
||||
}
|
||||
|
||||
|
@ -288,7 +288,8 @@ static int dmz_reclaim_rnd_data(struct dmz_reclaim *zrc, struct dm_zone *dzone)
|
||||
/* Get a free random or sequential zone */
|
||||
dmz_lock_map(zmd);
|
||||
again:
|
||||
szone = dmz_alloc_zone(zmd, alloc_flags | DMZ_ALLOC_RECLAIM);
|
||||
szone = dmz_alloc_zone(zmd, zrc->dev_idx,
|
||||
alloc_flags | DMZ_ALLOC_RECLAIM);
|
||||
if (!szone && alloc_flags == DMZ_ALLOC_SEQ && dmz_nr_cache_zones(zmd)) {
|
||||
alloc_flags = DMZ_ALLOC_RND;
|
||||
goto again;
|
||||
|
@ -214,7 +214,8 @@ bool dmz_dev_is_dying(struct dmz_metadata *zmd);
|
||||
#define DMZ_ALLOC_SEQ 0x04
|
||||
#define DMZ_ALLOC_RECLAIM 0x10
|
||||
|
||||
struct dm_zone *dmz_alloc_zone(struct dmz_metadata *zmd, unsigned long flags);
|
||||
struct dm_zone *dmz_alloc_zone(struct dmz_metadata *zmd,
|
||||
unsigned int dev_idx, unsigned long flags);
|
||||
void dmz_free_zone(struct dmz_metadata *zmd, struct dm_zone *zone);
|
||||
|
||||
void dmz_map_zone(struct dmz_metadata *zmd, struct dm_zone *zone,
|
||||
|
Loading…
x
Reference in New Issue
Block a user