Merge tag 'md/4.14-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/shli/md
Pull MD fixes from Shaohua Li: "A few fixes for MD. Mainly fix a problem introduced in 4.13, which we retry bio for some code paths but not all in some situations" * tag 'md/4.14-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/shli/md: md/raid5: cap worker count dm-raid: fix a race condition in request handling md: fix a race condition for flush request handling md: separate request handling
This commit is contained in:
commit
7b5ef82336
@ -3238,7 +3238,7 @@ static int raid_map(struct dm_target *ti, struct bio *bio)
|
|||||||
if (unlikely(bio_end_sector(bio) > mddev->array_sectors))
|
if (unlikely(bio_end_sector(bio) > mddev->array_sectors))
|
||||||
return DM_MAPIO_REQUEUE;
|
return DM_MAPIO_REQUEUE;
|
||||||
|
|
||||||
mddev->pers->make_request(mddev, bio);
|
md_handle_request(mddev, bio);
|
||||||
|
|
||||||
return DM_MAPIO_SUBMITTED;
|
return DM_MAPIO_SUBMITTED;
|
||||||
}
|
}
|
||||||
|
@ -266,6 +266,37 @@ static DEFINE_SPINLOCK(all_mddevs_lock);
|
|||||||
* call has finished, the bio has been linked into some internal structure
|
* call has finished, the bio has been linked into some internal structure
|
||||||
* and so is visible to ->quiesce(), so we don't need the refcount any more.
|
* and so is visible to ->quiesce(), so we don't need the refcount any more.
|
||||||
*/
|
*/
|
||||||
|
void md_handle_request(struct mddev *mddev, struct bio *bio)
|
||||||
|
{
|
||||||
|
check_suspended:
|
||||||
|
rcu_read_lock();
|
||||||
|
if (mddev->suspended) {
|
||||||
|
DEFINE_WAIT(__wait);
|
||||||
|
for (;;) {
|
||||||
|
prepare_to_wait(&mddev->sb_wait, &__wait,
|
||||||
|
TASK_UNINTERRUPTIBLE);
|
||||||
|
if (!mddev->suspended)
|
||||||
|
break;
|
||||||
|
rcu_read_unlock();
|
||||||
|
schedule();
|
||||||
|
rcu_read_lock();
|
||||||
|
}
|
||||||
|
finish_wait(&mddev->sb_wait, &__wait);
|
||||||
|
}
|
||||||
|
atomic_inc(&mddev->active_io);
|
||||||
|
rcu_read_unlock();
|
||||||
|
|
||||||
|
if (!mddev->pers->make_request(mddev, bio)) {
|
||||||
|
atomic_dec(&mddev->active_io);
|
||||||
|
wake_up(&mddev->sb_wait);
|
||||||
|
goto check_suspended;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended)
|
||||||
|
wake_up(&mddev->sb_wait);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(md_handle_request);
|
||||||
|
|
||||||
static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio)
|
static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio)
|
||||||
{
|
{
|
||||||
const int rw = bio_data_dir(bio);
|
const int rw = bio_data_dir(bio);
|
||||||
@ -285,23 +316,6 @@ static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio)
|
|||||||
bio_endio(bio);
|
bio_endio(bio);
|
||||||
return BLK_QC_T_NONE;
|
return BLK_QC_T_NONE;
|
||||||
}
|
}
|
||||||
check_suspended:
|
|
||||||
rcu_read_lock();
|
|
||||||
if (mddev->suspended) {
|
|
||||||
DEFINE_WAIT(__wait);
|
|
||||||
for (;;) {
|
|
||||||
prepare_to_wait(&mddev->sb_wait, &__wait,
|
|
||||||
TASK_UNINTERRUPTIBLE);
|
|
||||||
if (!mddev->suspended)
|
|
||||||
break;
|
|
||||||
rcu_read_unlock();
|
|
||||||
schedule();
|
|
||||||
rcu_read_lock();
|
|
||||||
}
|
|
||||||
finish_wait(&mddev->sb_wait, &__wait);
|
|
||||||
}
|
|
||||||
atomic_inc(&mddev->active_io);
|
|
||||||
rcu_read_unlock();
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* save the sectors now since our bio can
|
* save the sectors now since our bio can
|
||||||
@ -310,20 +324,14 @@ check_suspended:
|
|||||||
sectors = bio_sectors(bio);
|
sectors = bio_sectors(bio);
|
||||||
/* bio could be mergeable after passing to underlayer */
|
/* bio could be mergeable after passing to underlayer */
|
||||||
bio->bi_opf &= ~REQ_NOMERGE;
|
bio->bi_opf &= ~REQ_NOMERGE;
|
||||||
if (!mddev->pers->make_request(mddev, bio)) {
|
|
||||||
atomic_dec(&mddev->active_io);
|
md_handle_request(mddev, bio);
|
||||||
wake_up(&mddev->sb_wait);
|
|
||||||
goto check_suspended;
|
|
||||||
}
|
|
||||||
|
|
||||||
cpu = part_stat_lock();
|
cpu = part_stat_lock();
|
||||||
part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
|
part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]);
|
||||||
part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw], sectors);
|
part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw], sectors);
|
||||||
part_stat_unlock();
|
part_stat_unlock();
|
||||||
|
|
||||||
if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended)
|
|
||||||
wake_up(&mddev->sb_wait);
|
|
||||||
|
|
||||||
return BLK_QC_T_NONE;
|
return BLK_QC_T_NONE;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -439,16 +447,22 @@ static void md_submit_flush_data(struct work_struct *ws)
|
|||||||
struct mddev *mddev = container_of(ws, struct mddev, flush_work);
|
struct mddev *mddev = container_of(ws, struct mddev, flush_work);
|
||||||
struct bio *bio = mddev->flush_bio;
|
struct bio *bio = mddev->flush_bio;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* must reset flush_bio before calling into md_handle_request to avoid a
|
||||||
|
* deadlock, because other bios passed md_handle_request suspend check
|
||||||
|
* could wait for this and below md_handle_request could wait for those
|
||||||
|
* bios because of suspend check
|
||||||
|
*/
|
||||||
|
mddev->flush_bio = NULL;
|
||||||
|
wake_up(&mddev->sb_wait);
|
||||||
|
|
||||||
if (bio->bi_iter.bi_size == 0)
|
if (bio->bi_iter.bi_size == 0)
|
||||||
/* an empty barrier - all done */
|
/* an empty barrier - all done */
|
||||||
bio_endio(bio);
|
bio_endio(bio);
|
||||||
else {
|
else {
|
||||||
bio->bi_opf &= ~REQ_PREFLUSH;
|
bio->bi_opf &= ~REQ_PREFLUSH;
|
||||||
mddev->pers->make_request(mddev, bio);
|
md_handle_request(mddev, bio);
|
||||||
}
|
}
|
||||||
|
|
||||||
mddev->flush_bio = NULL;
|
|
||||||
wake_up(&mddev->sb_wait);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void md_flush_request(struct mddev *mddev, struct bio *bio)
|
void md_flush_request(struct mddev *mddev, struct bio *bio)
|
||||||
|
@ -692,6 +692,7 @@ extern void md_stop_writes(struct mddev *mddev);
|
|||||||
extern int md_rdev_init(struct md_rdev *rdev);
|
extern int md_rdev_init(struct md_rdev *rdev);
|
||||||
extern void md_rdev_clear(struct md_rdev *rdev);
|
extern void md_rdev_clear(struct md_rdev *rdev);
|
||||||
|
|
||||||
|
extern void md_handle_request(struct mddev *mddev, struct bio *bio);
|
||||||
extern void mddev_suspend(struct mddev *mddev);
|
extern void mddev_suspend(struct mddev *mddev);
|
||||||
extern void mddev_resume(struct mddev *mddev);
|
extern void mddev_resume(struct mddev *mddev);
|
||||||
extern struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs,
|
extern struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs,
|
||||||
|
@ -6575,14 +6575,17 @@ static ssize_t
|
|||||||
raid5_store_group_thread_cnt(struct mddev *mddev, const char *page, size_t len)
|
raid5_store_group_thread_cnt(struct mddev *mddev, const char *page, size_t len)
|
||||||
{
|
{
|
||||||
struct r5conf *conf;
|
struct r5conf *conf;
|
||||||
unsigned long new;
|
unsigned int new;
|
||||||
int err;
|
int err;
|
||||||
struct r5worker_group *new_groups, *old_groups;
|
struct r5worker_group *new_groups, *old_groups;
|
||||||
int group_cnt, worker_cnt_per_group;
|
int group_cnt, worker_cnt_per_group;
|
||||||
|
|
||||||
if (len >= PAGE_SIZE)
|
if (len >= PAGE_SIZE)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
if (kstrtoul(page, 10, &new))
|
if (kstrtouint(page, 10, &new))
|
||||||
|
return -EINVAL;
|
||||||
|
/* 8192 should be big enough */
|
||||||
|
if (new > 8192)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
err = mddev_lock(mddev);
|
err = mddev_lock(mddev);
|
||||||
|
Loading…
x
Reference in New Issue
Block a user