mirror of
git://sourceware.org/git/lvm2.git
synced 2025-02-07 05:58:00 +03:00
pool: zero metadata
To avoid polution of metadata with some 'garbage' content or eventualy some leak of stale data in case user want to upload metadata somewhere, ensure upon allocation the metadata device is fully zeroed. Behaviour may slow down allocation of thin-pool or cache-pool a bit so the old behaviour can be restored with lvm.conf setting: allocation/zero_metadata=0 TODO: add zeroing for extension of metadata volume.
This commit is contained in:
parent
edbc5a62b2
commit
bc39d5bec6
@ -1,5 +1,6 @@
|
||||
Version 2.03.10 -
|
||||
=================================
|
||||
Zero pool metadata on allocation (disable with allocation/zero_metadata=0).
|
||||
Failure in zeroing or wiping will fail command (bypass with -Zn, -Wn).
|
||||
Fix running out of free buffers for async writing for larger writes.
|
||||
Add integrity with raid capability.
|
||||
|
@ -626,7 +626,7 @@ cfg(allocation_cache_pool_max_chunks_CFG, "cache_pool_max_chunks", allocation_CF
|
||||
"Using cache pool with more chunks may degrade cache performance.\n")
|
||||
|
||||
cfg(allocation_thin_pool_metadata_require_separate_pvs_CFG, "thin_pool_metadata_require_separate_pvs", allocation_CFG_SECTION, 0, CFG_TYPE_BOOL, DEFAULT_THIN_POOL_METADATA_REQUIRE_SEPARATE_PVS, vsn(2, 2, 89), NULL, 0, NULL,
|
||||
"Thin pool metdata and data will always use different PVs.\n")
|
||||
"Thin pool metadata and data will always use different PVs.\n")
|
||||
|
||||
cfg(allocation_thin_pool_zero_CFG, "thin_pool_zero", allocation_CFG_SECTION, CFG_PROFILABLE | CFG_PROFILABLE_METADATA | CFG_DEFAULT_COMMENTED, CFG_TYPE_BOOL, DEFAULT_THIN_POOL_ZERO, vsn(2, 2, 99), NULL, 0, NULL,
|
||||
"Thin pool data chunks are zeroed before they are first used.\n"
|
||||
@ -657,6 +657,9 @@ cfg(allocation_thin_pool_chunk_size_policy_CFG, "thin_pool_chunk_size_policy", a
|
||||
" 512KiB.\n"
|
||||
"#\n")
|
||||
|
||||
cfg(allocation_zero_metadata_CFG, "zero_metadata", allocation_CFG_SECTION, 0, CFG_TYPE_BOOL, DEFAULT_ZERO_METADATA, vsn(2, 3, 10), NULL, 0, NULL,
|
||||
"Zero whole metadata area before use with thin or cache pool.\n")
|
||||
|
||||
cfg_runtime(allocation_thin_pool_chunk_size_CFG, "thin_pool_chunk_size", allocation_CFG_SECTION, CFG_PROFILABLE | CFG_PROFILABLE_METADATA | CFG_DEFAULT_UNDEFINED, CFG_TYPE_INT, vsn(2, 2, 99), 0, NULL,
|
||||
"The minimal chunk size in KiB for thin pool volumes.\n"
|
||||
"Larger chunk sizes may improve performance for plain thin volumes,\n"
|
||||
|
@ -129,6 +129,7 @@
|
||||
#define DEFAULT_THIN_POOL_DISCARDS "passdown"
|
||||
#define DEFAULT_THIN_POOL_ZERO 1
|
||||
#define DEFAULT_POOL_METADATA_SPARE 1 /* thin + cache */
|
||||
#define DEFAULT_ZERO_METADATA 1 /* thin + cache */
|
||||
|
||||
#ifdef CACHE_CHECK_NEEDS_CHECK
|
||||
# define DEFAULT_CACHE_CHECK_OPTION1 "-q"
|
||||
|
@ -7576,8 +7576,10 @@ int wipe_lv(struct logical_volume *lv, struct wipe_params wp)
|
||||
struct device *dev;
|
||||
char name[PATH_MAX];
|
||||
uint64_t zero_sectors;
|
||||
int zero_metadata = wp.is_metadata ?
|
||||
find_config_tree_bool(lv->vg->cmd, allocation_zero_metadata_CFG, NULL) : 0;
|
||||
|
||||
if (!wp.do_zero && !wp.do_wipe_signatures)
|
||||
if (!wp.do_zero && !wp.do_wipe_signatures && !wp.is_metadata)
|
||||
/* nothing to do */
|
||||
return 1;
|
||||
|
||||
@ -7629,17 +7631,29 @@ int wipe_lv(struct logical_volume *lv, struct wipe_params wp)
|
||||
}
|
||||
}
|
||||
|
||||
if (wp.do_zero) {
|
||||
zero_sectors = wp.zero_sectors ? : UINT64_C(4096) >> SECTOR_SHIFT;
|
||||
|
||||
if (zero_sectors > lv->size)
|
||||
if (wp.do_zero || wp.is_metadata) {
|
||||
zero_metadata = !wp.is_metadata ? 0 :
|
||||
find_config_tree_bool(lv->vg->cmd, allocation_zero_metadata_CFG, NULL);
|
||||
if (zero_metadata) {
|
||||
log_debug("Metadata logical volume %s will be fully zeroed.",
|
||||
display_lvname(lv));
|
||||
zero_sectors = lv->size;
|
||||
} else {
|
||||
if (wp.is_metadata) /* Verbosely notify metadata will not be fully zeroed */
|
||||
log_verbose("Metadata logical volume %s not fully zeroed and may contain stale data.",
|
||||
display_lvname(lv));
|
||||
zero_sectors = wp.zero_sectors ? : UINT64_C(4096) >> SECTOR_SHIFT;
|
||||
|
||||
if (zero_sectors > lv->size)
|
||||
zero_sectors = lv->size;
|
||||
}
|
||||
|
||||
log_verbose("Initializing %s of logical volume %s with value %d.",
|
||||
display_size(lv->vg->cmd, zero_sectors),
|
||||
display_lvname(lv), wp.zero_value);
|
||||
|
||||
if ((wp.zero_value && !dev_set_bytes(dev, UINT64_C(0),
|
||||
if ((!wp.is_metadata &&
|
||||
wp.zero_value && !dev_set_bytes(dev, UINT64_C(0),
|
||||
(size_t) zero_sectors << SECTOR_SHIFT,
|
||||
(uint8_t)wp.zero_value)) ||
|
||||
!dev_write_zeros(dev, UINT64_C(0), (size_t) zero_sectors << SECTOR_SHIFT)) {
|
||||
@ -8465,7 +8479,8 @@ static struct logical_volume *_lv_create_an_lv(struct volume_group *vg,
|
||||
.do_zero = lp->zero,
|
||||
.do_wipe_signatures = lp->wipe_signatures,
|
||||
.yes = lp->yes,
|
||||
.force = lp->force
|
||||
.force = lp->force,
|
||||
.is_metadata = lp->is_metadata,
|
||||
})) {
|
||||
log_error("Aborting. Failed to wipe %s.", lp->snapshot
|
||||
? "snapshot exception store" : "start of new LV");
|
||||
|
@ -803,6 +803,7 @@ struct wipe_params {
|
||||
int do_wipe_signatures; /* should we wipe known signatures found on LV? */
|
||||
int yes; /* answer yes automatically to all questions */
|
||||
force_t force; /* force mode */
|
||||
int is_metadata; /* wipe volume is metadata LV */
|
||||
};
|
||||
|
||||
/* Zero out LV and/or wipe signatures */
|
||||
@ -955,6 +956,7 @@ struct lvcreate_params {
|
||||
unsigned suppress_zero_warn : 1;
|
||||
unsigned needs_lockd_init : 1;
|
||||
unsigned ignore_type : 1;
|
||||
unsigned is_metadata : 1; /* created LV will be used as metadata LV (and can be zeroed) */
|
||||
|
||||
const char *vg_name; /* only-used when VG is not yet opened (in /tools) */
|
||||
const char *lv_name; /* all */
|
||||
|
@ -545,8 +545,8 @@ int create_pool(struct logical_volume *pool_lv,
|
||||
display_lvname(pool_lv));
|
||||
goto bad;
|
||||
}
|
||||
/* Clear 4KB of pool metadata device. */
|
||||
if (!(r = wipe_lv(pool_lv, (struct wipe_params) { .do_zero = 1 }))) {
|
||||
/* Clear pool metadata device. */
|
||||
if (!(r = wipe_lv(pool_lv, (struct wipe_params) { .is_metadata = 1 }))) {
|
||||
log_error("Aborting. Failed to wipe pool metadata %s.",
|
||||
display_lvname(pool_lv));
|
||||
}
|
||||
@ -627,6 +627,7 @@ struct logical_volume *alloc_pool_metadata(struct logical_volume *pool_lv,
|
||||
.tags = DM_LIST_HEAD_INIT(lvc.tags),
|
||||
.temporary = 1,
|
||||
.zero = 1,
|
||||
.is_metadata = 1,
|
||||
};
|
||||
|
||||
if (!(lvc.segtype = get_segtype_from_string(pool_lv->vg->cmd, SEG_TYPE_NAME_STRIPED)))
|
||||
@ -663,6 +664,7 @@ static struct logical_volume *_alloc_pool_metadata_spare(struct volume_group *vg
|
||||
.tags = DM_LIST_HEAD_INIT(lp.tags),
|
||||
.temporary = 1,
|
||||
.zero = 1,
|
||||
.is_metadata = 1,
|
||||
};
|
||||
|
||||
if (!(lp.segtype = get_segtype_from_string(vg->cmd, SEG_TYPE_NAME_STRIPED)))
|
||||
|
Loading…
x
Reference in New Issue
Block a user