md/raid1: factor out choose_slow_rdev() from read_balance()
read_balance() is hard to understand because there are too many status and branches, and it's overlong. This patch factor out the case to read the slow rdev from read_balance(), there are no functional changes. Co-developed-by: Paul Luse <paul.e.luse@linux.intel.com> Signed-off-by: Paul Luse <paul.e.luse@linux.intel.com> Signed-off-by: Yu Kuai <yukuai3@huawei.com> Reviewed-by: Xiao Ni <xni@redhat.com> Signed-off-by: Song Liu <song@kernel.org> Link: https://lore.kernel.org/r/20240229095714.926789-9-yukuai1@huaweicloud.com
This commit is contained in:
parent
31a7333175
commit
dfa8ecd167
@ -620,6 +620,53 @@ static int choose_first_rdev(struct r1conf *conf, struct r1bio *r1_bio,
|
|||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int choose_slow_rdev(struct r1conf *conf, struct r1bio *r1_bio,
|
||||||
|
int *max_sectors)
|
||||||
|
{
|
||||||
|
sector_t this_sector = r1_bio->sector;
|
||||||
|
int bb_disk = -1;
|
||||||
|
int bb_read_len = 0;
|
||||||
|
int disk;
|
||||||
|
|
||||||
|
for (disk = 0 ; disk < conf->raid_disks * 2 ; disk++) {
|
||||||
|
struct md_rdev *rdev;
|
||||||
|
int len;
|
||||||
|
int read_len;
|
||||||
|
|
||||||
|
if (r1_bio->bios[disk] == IO_BLOCKED)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
rdev = conf->mirrors[disk].rdev;
|
||||||
|
if (!rdev || test_bit(Faulty, &rdev->flags) ||
|
||||||
|
!test_bit(WriteMostly, &rdev->flags))
|
||||||
|
continue;
|
||||||
|
|
||||||
|
/* there are no bad blocks, we can use this disk */
|
||||||
|
len = r1_bio->sectors;
|
||||||
|
read_len = raid1_check_read_range(rdev, this_sector, &len);
|
||||||
|
if (read_len == r1_bio->sectors) {
|
||||||
|
update_read_sectors(conf, disk, this_sector, read_len);
|
||||||
|
return disk;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* there are partial bad blocks, choose the rdev with largest
|
||||||
|
* read length.
|
||||||
|
*/
|
||||||
|
if (read_len > bb_read_len) {
|
||||||
|
bb_disk = disk;
|
||||||
|
bb_read_len = read_len;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (bb_disk != -1) {
|
||||||
|
*max_sectors = bb_read_len;
|
||||||
|
update_read_sectors(conf, bb_disk, this_sector, bb_read_len);
|
||||||
|
}
|
||||||
|
|
||||||
|
return bb_disk;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This routine returns the disk from which the requested read should
|
* This routine returns the disk from which the requested read should
|
||||||
* be done. There is a per-array 'next expected sequential IO' sector
|
* be done. There is a per-array 'next expected sequential IO' sector
|
||||||
@ -673,23 +720,8 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
|
|||||||
if (!test_bit(In_sync, &rdev->flags) &&
|
if (!test_bit(In_sync, &rdev->flags) &&
|
||||||
rdev->recovery_offset < this_sector + sectors)
|
rdev->recovery_offset < this_sector + sectors)
|
||||||
continue;
|
continue;
|
||||||
if (test_bit(WriteMostly, &rdev->flags)) {
|
if (test_bit(WriteMostly, &rdev->flags))
|
||||||
/* Don't balance among write-mostly, just
|
|
||||||
* use the first as a last resort */
|
|
||||||
if (best_dist_disk < 0) {
|
|
||||||
if (is_badblock(rdev, this_sector, sectors,
|
|
||||||
&first_bad, &bad_sectors)) {
|
|
||||||
if (first_bad <= this_sector)
|
|
||||||
/* Cannot use this */
|
|
||||||
continue;
|
continue;
|
||||||
best_good_sectors = first_bad - this_sector;
|
|
||||||
} else
|
|
||||||
best_good_sectors = sectors;
|
|
||||||
best_dist_disk = disk;
|
|
||||||
best_pending_disk = disk;
|
|
||||||
}
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
/* This is a reasonable device to use. It might
|
/* This is a reasonable device to use. It might
|
||||||
* even be best.
|
* even be best.
|
||||||
*/
|
*/
|
||||||
@ -808,7 +840,10 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
|
|||||||
}
|
}
|
||||||
*max_sectors = sectors;
|
*max_sectors = sectors;
|
||||||
|
|
||||||
|
if (best_disk >= 0)
|
||||||
return best_disk;
|
return best_disk;
|
||||||
|
|
||||||
|
return choose_slow_rdev(conf, r1_bio, max_sectors);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void wake_up_barrier(struct r1conf *conf)
|
static void wake_up_barrier(struct r1conf *conf)
|
||||||
|
Loading…
x
Reference in New Issue
Block a user