Merge tag 'md-6.9-20240216' of https://git.kernel.org/pub/scm/linux/kernel/git/song/md into for-6.9/block
Pull MD changes from Song: "1. Cleanup redundant checks, by Yu Kuai. 2. Remove deprecated headers, by Marc Zyngier and Song Liu. 3. Concurrency fixes, by Li Lingfeng. 4. Memory leak fix, by Li Nan." * tag 'md-6.9-20240216' of https://git.kernel.org/pub/scm/linux/kernel/git/song/md: md: fix kmemleak of rdev->serial md/multipath: Remove md-multipath.h md/linear: Get rid of md-linear.h md: use RCU lock to protect traversal in md_spares_need_change() md: get rdev->mddev with READ_ONCE() md: remove redundant md_wakeup_thread() md: remove redundant check of 'mddev->sync_thread'
This commit is contained in:
commit
d69591caec
@ -1,17 +0,0 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef _LINEAR_H
|
||||
#define _LINEAR_H
|
||||
|
||||
struct dev_info {
|
||||
struct md_rdev *rdev;
|
||||
sector_t end_sector;
|
||||
};
|
||||
|
||||
struct linear_conf
|
||||
{
|
||||
struct rcu_head rcu;
|
||||
sector_t array_sectors;
|
||||
int raid_disks; /* a copy of mddev->raid_disks */
|
||||
struct dev_info disks[] __counted_by(raid_disks);
|
||||
};
|
||||
#endif
|
@ -1,32 +0,0 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef _MULTIPATH_H
|
||||
#define _MULTIPATH_H
|
||||
|
||||
struct multipath_info {
|
||||
struct md_rdev *rdev;
|
||||
};
|
||||
|
||||
struct mpconf {
|
||||
struct mddev *mddev;
|
||||
struct multipath_info *multipaths;
|
||||
int raid_disks;
|
||||
spinlock_t device_lock;
|
||||
struct list_head retry_list;
|
||||
|
||||
mempool_t pool;
|
||||
};
|
||||
|
||||
/*
|
||||
* this is our 'private' 'collective' MULTIPATH buffer head.
|
||||
* it contains information about what kind of IO operations were started
|
||||
* for this MULTIPATH operation, and about their status:
|
||||
*/
|
||||
|
||||
struct multipath_bh {
|
||||
struct mddev *mddev;
|
||||
struct bio *master_bio;
|
||||
struct bio bio;
|
||||
int path;
|
||||
struct list_head retry_list;
|
||||
};
|
||||
#endif
|
@ -2562,6 +2562,7 @@ static int bind_rdev_to_array(struct md_rdev *rdev, struct mddev *mddev)
|
||||
fail:
|
||||
pr_warn("md: failed to register dev-%s for %s\n",
|
||||
b, mdname(mddev));
|
||||
mddev_destroy_serial_pool(mddev, rdev);
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -2591,7 +2592,7 @@ static void md_kick_rdev_from_array(struct md_rdev *rdev)
|
||||
list_del_rcu(&rdev->same_set);
|
||||
pr_debug("md: unbind<%pg>\n", rdev->bdev);
|
||||
mddev_destroy_serial_pool(rdev->mddev, rdev);
|
||||
rdev->mddev = NULL;
|
||||
WRITE_ONCE(rdev->mddev, NULL);
|
||||
sysfs_remove_link(&rdev->kobj, "block");
|
||||
sysfs_put(rdev->sysfs_state);
|
||||
sysfs_put(rdev->sysfs_unack_badblocks);
|
||||
@ -2929,7 +2930,6 @@ static int add_bound_rdev(struct md_rdev *rdev)
|
||||
set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
|
||||
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
|
||||
md_new_event();
|
||||
md_wakeup_thread(mddev->thread);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -3044,10 +3044,8 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len)
|
||||
|
||||
if (err == 0) {
|
||||
md_kick_rdev_from_array(rdev);
|
||||
if (mddev->pers) {
|
||||
if (mddev->pers)
|
||||
set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
|
||||
md_wakeup_thread(mddev->thread);
|
||||
}
|
||||
md_new_event();
|
||||
}
|
||||
}
|
||||
@ -3077,7 +3075,6 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len)
|
||||
clear_bit(BlockedBadBlocks, &rdev->flags);
|
||||
wake_up(&rdev->blocked_wait);
|
||||
set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
|
||||
md_wakeup_thread(rdev->mddev->thread);
|
||||
|
||||
err = 0;
|
||||
} else if (cmd_match(buf, "insync") && rdev->raid_disk == -1) {
|
||||
@ -3115,7 +3112,6 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len)
|
||||
!test_bit(Replacement, &rdev->flags))
|
||||
set_bit(WantReplacement, &rdev->flags);
|
||||
set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
|
||||
md_wakeup_thread(rdev->mddev->thread);
|
||||
err = 0;
|
||||
} else if (cmd_match(buf, "-want_replacement")) {
|
||||
/* Clearing 'want_replacement' is always allowed.
|
||||
@ -3245,7 +3241,6 @@ slot_store(struct md_rdev *rdev, const char *buf, size_t len)
|
||||
if (rdev->raid_disk >= 0)
|
||||
return -EBUSY;
|
||||
set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
|
||||
md_wakeup_thread(rdev->mddev->thread);
|
||||
} else if (rdev->mddev->pers) {
|
||||
/* Activating a spare .. or possibly reactivating
|
||||
* if we ever get bitmaps working here.
|
||||
@ -3339,8 +3334,7 @@ static ssize_t new_offset_store(struct md_rdev *rdev,
|
||||
if (kstrtoull(buf, 10, &new_offset) < 0)
|
||||
return -EINVAL;
|
||||
|
||||
if (mddev->sync_thread ||
|
||||
test_bit(MD_RECOVERY_RUNNING,&mddev->recovery))
|
||||
if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
|
||||
return -EBUSY;
|
||||
if (new_offset == rdev->data_offset)
|
||||
/* reset is always permitted */
|
||||
@ -3671,7 +3665,7 @@ rdev_attr_store(struct kobject *kobj, struct attribute *attr,
|
||||
struct kernfs_node *kn = NULL;
|
||||
bool suspend = false;
|
||||
ssize_t rv;
|
||||
struct mddev *mddev = rdev->mddev;
|
||||
struct mddev *mddev = READ_ONCE(rdev->mddev);
|
||||
|
||||
if (!entry->store)
|
||||
return -EIO;
|
||||
@ -4013,8 +4007,7 @@ level_store(struct mddev *mddev, const char *buf, size_t len)
|
||||
*/
|
||||
|
||||
rv = -EBUSY;
|
||||
if (mddev->sync_thread ||
|
||||
test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
|
||||
if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
|
||||
mddev->reshape_position != MaxSector ||
|
||||
mddev->sysfs_active)
|
||||
goto out_unlock;
|
||||
@ -6188,7 +6181,6 @@ int do_md_run(struct mddev *mddev)
|
||||
/* run start up tasks that require md_thread */
|
||||
md_start(mddev);
|
||||
|
||||
md_wakeup_thread(mddev->thread);
|
||||
md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
|
||||
|
||||
set_capacity_and_notify(mddev->gendisk, mddev->array_sectors);
|
||||
@ -6209,7 +6201,6 @@ int md_start(struct mddev *mddev)
|
||||
|
||||
if (mddev->pers->start) {
|
||||
set_bit(MD_RECOVERY_WAIT, &mddev->recovery);
|
||||
md_wakeup_thread(mddev->thread);
|
||||
ret = mddev->pers->start(mddev);
|
||||
clear_bit(MD_RECOVERY_WAIT, &mddev->recovery);
|
||||
md_wakeup_thread(mddev->sync_thread);
|
||||
@ -6254,7 +6245,6 @@ static int restart_array(struct mddev *mddev)
|
||||
pr_debug("md: %s switched to read-write mode.\n", mdname(mddev));
|
||||
/* Kick recovery or resync if necessary */
|
||||
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
|
||||
md_wakeup_thread(mddev->thread);
|
||||
md_wakeup_thread(mddev->sync_thread);
|
||||
sysfs_notify_dirent_safe(mddev->sysfs_state);
|
||||
return 0;
|
||||
@ -6398,7 +6388,6 @@ static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
|
||||
if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) {
|
||||
did_freeze = 1;
|
||||
set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
|
||||
md_wakeup_thread(mddev->thread);
|
||||
}
|
||||
|
||||
stop_sync_thread(mddev, false, false);
|
||||
@ -6408,7 +6397,6 @@ static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
|
||||
|
||||
mutex_lock(&mddev->open_mutex);
|
||||
if ((mddev->pers && atomic_read(&mddev->openers) > !!bdev) ||
|
||||
mddev->sync_thread ||
|
||||
test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) {
|
||||
pr_warn("md: %s still in use.\n",mdname(mddev));
|
||||
err = -EBUSY;
|
||||
@ -6431,7 +6419,6 @@ out:
|
||||
if ((mddev->pers && !err) || did_freeze) {
|
||||
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
|
||||
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
|
||||
md_wakeup_thread(mddev->thread);
|
||||
sysfs_notify_dirent_safe(mddev->sysfs_state);
|
||||
}
|
||||
|
||||
@ -6453,7 +6440,6 @@ static int do_md_stop(struct mddev *mddev, int mode,
|
||||
if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) {
|
||||
did_freeze = 1;
|
||||
set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
|
||||
md_wakeup_thread(mddev->thread);
|
||||
}
|
||||
|
||||
stop_sync_thread(mddev, true, false);
|
||||
@ -6461,14 +6447,12 @@ static int do_md_stop(struct mddev *mddev, int mode,
|
||||
mutex_lock(&mddev->open_mutex);
|
||||
if ((mddev->pers && atomic_read(&mddev->openers) > !!bdev) ||
|
||||
mddev->sysfs_active ||
|
||||
mddev->sync_thread ||
|
||||
test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) {
|
||||
pr_warn("md: %s still in use.\n",mdname(mddev));
|
||||
mutex_unlock(&mddev->open_mutex);
|
||||
if (did_freeze) {
|
||||
clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
|
||||
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
|
||||
md_wakeup_thread(mddev->thread);
|
||||
}
|
||||
return -EBUSY;
|
||||
}
|
||||
@ -7009,9 +6993,7 @@ kick_rdev:
|
||||
|
||||
md_kick_rdev_from_array(rdev);
|
||||
set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
|
||||
if (mddev->thread)
|
||||
md_wakeup_thread(mddev->thread);
|
||||
else
|
||||
if (!mddev->thread)
|
||||
md_update_sb(mddev, 1);
|
||||
md_new_event();
|
||||
|
||||
@ -7093,7 +7075,6 @@ static int hot_add_disk(struct mddev *mddev, dev_t dev)
|
||||
* array immediately.
|
||||
*/
|
||||
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
|
||||
md_wakeup_thread(mddev->thread);
|
||||
md_new_event();
|
||||
return 0;
|
||||
|
||||
@ -7307,8 +7288,7 @@ static int update_size(struct mddev *mddev, sector_t num_sectors)
|
||||
* of each device. If num_sectors is zero, we find the largest size
|
||||
* that fits.
|
||||
*/
|
||||
if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
|
||||
mddev->sync_thread)
|
||||
if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
|
||||
return -EBUSY;
|
||||
if (!md_is_rdwr(mddev))
|
||||
return -EROFS;
|
||||
@ -7345,8 +7325,7 @@ static int update_raid_disks(struct mddev *mddev, int raid_disks)
|
||||
if (raid_disks <= 0 ||
|
||||
(mddev->max_disks && raid_disks >= mddev->max_disks))
|
||||
return -EINVAL;
|
||||
if (mddev->sync_thread ||
|
||||
test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
|
||||
if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
|
||||
test_bit(MD_RESYNCING_REMOTE, &mddev->recovery) ||
|
||||
mddev->reshape_position != MaxSector)
|
||||
return -EBUSY;
|
||||
@ -9262,9 +9241,14 @@ static bool md_spares_need_change(struct mddev *mddev)
|
||||
{
|
||||
struct md_rdev *rdev;
|
||||
|
||||
rdev_for_each(rdev, mddev)
|
||||
if (rdev_removeable(rdev) || rdev_addable(rdev))
|
||||
rcu_read_lock();
|
||||
rdev_for_each_rcu(rdev, mddev) {
|
||||
if (rdev_removeable(rdev) || rdev_addable(rdev)) {
|
||||
rcu_read_unlock();
|
||||
return true;
|
||||
}
|
||||
}
|
||||
rcu_read_unlock();
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -6967,10 +6967,8 @@ raid5_store_stripe_size(struct mddev *mddev, const char *page, size_t len)
|
||||
pr_debug("md/raid: change stripe_size from %lu to %lu\n",
|
||||
conf->stripe_size, new);
|
||||
|
||||
if (mddev->sync_thread ||
|
||||
test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
|
||||
mddev->reshape_position != MaxSector ||
|
||||
mddev->sysfs_active) {
|
||||
if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
|
||||
mddev->reshape_position != MaxSector || mddev->sysfs_active) {
|
||||
err = -EBUSY;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user