- Fix double free on memory allocation failure in DM integrity

target's integrity_recalc()
 
 - Fix locking in DM raid target's raid_ctr() and around call to
   md_stop()
 
 - Fix DM cache target's cleaner policy to always allow work to be
   queued for writeback; even if cache isn't idle.
 -----BEGIN PGP SIGNATURE-----
 
 iQEzBAABCAAdFiEEJfWUX4UqZ4x1O2wixSPxCi2dA1oFAmTC1UoACgkQxSPxCi2d
 A1pIGAgAhQjlNQ83DexvmMUoNDRGFxOBiOcL9DnVtiXsLd/wTXZTEDIDXJaH9hCq
 MAj7aqadBeHlWT+vNMOYH9ePPtySEKGs8VM/4/fwNtT6wMyqxZZk4JyN7z+YBVJV
 d/9lryVZYRWK7ICgRenR/VSxv8/JgVTBGZZqyl20SXhtlYxndxGcLeV0X8fP3G1Q
 pxdsNuE7TBclB8qrXiPIOlIK0HcSikz6CfQIar3zgip6fO+Wwb92CZ1DOGGi1RJz
 bsTmZXn08l3d1tMJ+y4umZm+Izq8gvWSgDBywRdWq/D6Ao1ScVqY4TExFSSkjDk0
 PUY49vMF/FsKfNt5/VK0/i2U7QAKDQ==
 =92HI
 -----END PGP SIGNATURE-----

Merge tag 'for-6.5/dm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm

Pull device mapper fixes from Mike Snitzer:

 - Fix double free on memory allocation failure in DM integrity target's
   integrity_recalc()

 - Fix locking in DM raid target's raid_ctr() and around call to
   md_stop()

 - Fix DM cache target's cleaner policy to always allow work to be
   queued for writeback; even if cache isn't idle.

* tag 'for-6.5/dm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm:
  dm cache policy smq: ensure IO doesn't prevent cleaner policy progress
  dm raid: protect md_stop() with 'reconfig_mutex'
  dm raid: clean up four equivalent goto tags in raid_ctr()
  dm raid: fix missing reconfig_mutex unlock in raid_ctr() error paths
  dm integrity: fix double free on memory allocation failure
This commit is contained in:
Linus Torvalds 2023-07-28 10:08:49 -07:00
commit c75981a1be
4 changed files with 30 additions and 21 deletions

View File

@ -857,7 +857,13 @@ struct smq_policy {
struct background_tracker *bg_work;
bool migrations_allowed;
bool migrations_allowed:1;
/*
* If this is set the policy will try and clean the whole cache
* even if the device is not idle.
*/
bool cleaner:1;
};
/*----------------------------------------------------------------*/
@ -1138,7 +1144,7 @@ static bool clean_target_met(struct smq_policy *mq, bool idle)
* Cache entries may not be populated. So we cannot rely on the
* size of the clean queue.
*/
if (idle) {
if (idle || mq->cleaner) {
/*
* We'd like to clean everything.
*/
@ -1722,11 +1728,9 @@ static void calc_hotspot_params(sector_t origin_size,
*hotspot_block_size /= 2u;
}
static struct dm_cache_policy *__smq_create(dm_cblock_t cache_size,
sector_t origin_size,
sector_t cache_block_size,
bool mimic_mq,
bool migrations_allowed)
static struct dm_cache_policy *
__smq_create(dm_cblock_t cache_size, sector_t origin_size, sector_t cache_block_size,
bool mimic_mq, bool migrations_allowed, bool cleaner)
{
unsigned int i;
unsigned int nr_sentinels_per_queue = 2u * NR_CACHE_LEVELS;
@ -1813,6 +1817,7 @@ static struct dm_cache_policy *__smq_create(dm_cblock_t cache_size,
goto bad_btracker;
mq->migrations_allowed = migrations_allowed;
mq->cleaner = cleaner;
return &mq->policy;
@ -1836,21 +1841,24 @@ static struct dm_cache_policy *smq_create(dm_cblock_t cache_size,
sector_t origin_size,
sector_t cache_block_size)
{
return __smq_create(cache_size, origin_size, cache_block_size, false, true);
return __smq_create(cache_size, origin_size, cache_block_size,
false, true, false);
}
static struct dm_cache_policy *mq_create(dm_cblock_t cache_size,
sector_t origin_size,
sector_t cache_block_size)
{
return __smq_create(cache_size, origin_size, cache_block_size, true, true);
return __smq_create(cache_size, origin_size, cache_block_size,
true, true, false);
}
static struct dm_cache_policy *cleaner_create(dm_cblock_t cache_size,
sector_t origin_size,
sector_t cache_block_size)
{
return __smq_create(cache_size, origin_size, cache_block_size, false, false);
return __smq_create(cache_size, origin_size, cache_block_size,
false, false, true);
}
/*----------------------------------------------------------------*/

View File

@ -2676,6 +2676,7 @@ oom:
recalc_tags = kvmalloc(recalc_tags_size, GFP_NOIO);
if (!recalc_tags) {
vfree(recalc_buffer);
recalc_buffer = NULL;
goto oom;
}

View File

@ -3251,8 +3251,7 @@ size_check:
r = md_start(&rs->md);
if (r) {
ti->error = "Failed to start raid array";
mddev_unlock(&rs->md);
goto bad_md_start;
goto bad_unlock;
}
/* If raid4/5/6 journal mode explicitly requested (only possible with journal dev) -> set it */
@ -3260,8 +3259,7 @@ size_check:
r = r5c_journal_mode_set(&rs->md, rs->journal_dev.mode);
if (r) {
ti->error = "Failed to set raid4/5/6 journal mode";
mddev_unlock(&rs->md);
goto bad_journal_mode_set;
goto bad_unlock;
}
}
@ -3272,14 +3270,14 @@ size_check:
if (rs_is_raid456(rs)) {
r = rs_set_raid456_stripe_cache(rs);
if (r)
goto bad_stripe_cache;
goto bad_unlock;
}
/* Now do an early reshape check */
if (test_bit(RT_FLAG_RESHAPE_RS, &rs->runtime_flags)) {
r = rs_check_reshape(rs);
if (r)
goto bad_check_reshape;
goto bad_unlock;
/* Restore new, ctr requested layout to perform check */
rs_config_restore(rs, &rs_layout);
@ -3288,7 +3286,7 @@ size_check:
r = rs->md.pers->check_reshape(&rs->md);
if (r) {
ti->error = "Reshape check failed";
goto bad_check_reshape;
goto bad_unlock;
}
}
}
@ -3299,11 +3297,9 @@ size_check:
mddev_unlock(&rs->md);
return 0;
bad_md_start:
bad_journal_mode_set:
bad_stripe_cache:
bad_check_reshape:
bad_unlock:
md_stop(&rs->md);
mddev_unlock(&rs->md);
bad:
raid_set_free(rs);
@ -3314,7 +3310,9 @@ static void raid_dtr(struct dm_target *ti)
{
struct raid_set *rs = ti->private;
mddev_lock_nointr(&rs->md);
md_stop(&rs->md);
mddev_unlock(&rs->md);
raid_set_free(rs);
}

View File

@ -6247,6 +6247,8 @@ static void __md_stop(struct mddev *mddev)
void md_stop(struct mddev *mddev)
{
lockdep_assert_held(&mddev->reconfig_mutex);
/* stop the array and free an attached data structures.
* This is called from dm-raid
*/