mirror of
git://sourceware.org/git/lvm2.git
synced 2025-09-19 01:44:19 +03:00
Compare commits
2 Commits
v2_02_167
...
dev-lvmguy
Author | SHA1 | Date | |
---|---|---|---|
|
c6d7cd1489 | ||
|
eec8bd228c |
@@ -38,9 +38,6 @@ typedef enum {
|
||||
NEXT_AREA
|
||||
} area_use_t;
|
||||
|
||||
/* FIXME: remove RAID_METADATA_AREA_LEN macro after defining 'raid_log_extents'*/
|
||||
#define RAID_METADATA_AREA_LEN 1
|
||||
|
||||
/* FIXME These ended up getting used differently from first intended. Refactor. */
|
||||
/* Only one of A_CONTIGUOUS_TO_LVSEG, A_CLING_TO_LVSEG, A_CLING_TO_ALLOCED may be set */
|
||||
#define A_CONTIGUOUS_TO_LVSEG 0x01 /* Must be contiguous to an existing segment */
|
||||
@@ -878,22 +875,38 @@ dm_percent_t copy_percent(const struct logical_volume *lv)
|
||||
return denominator ? dm_make_percent(numerator, denominator) : DM_PERCENT_100;
|
||||
}
|
||||
|
||||
/* Round up extents to next stripe boundary for number of stripes */
|
||||
static uint32_t _round_to_stripe_boundary(struct volume_group *vg, uint32_t extents,
|
||||
uint32_t stripes, int extend)
|
||||
/*
|
||||
* Round up @extents to next stripe boundary number of
|
||||
* @stripes (if any) and/or to next RAID io boundary.
|
||||
*/
|
||||
uint32_t extents_round_to_boundary(struct volume_group *vg,
|
||||
const struct segment_type *segtype,
|
||||
uint32_t extents,
|
||||
uint32_t stripes,
|
||||
int extend)
|
||||
{
|
||||
int ensure_raid_min = segtype_is_raid(segtype);
|
||||
uint32_t size_rest, new_extents = extents;
|
||||
|
||||
if (!stripes)
|
||||
return extents;
|
||||
do {
|
||||
/* Round up extents to stripe divisible amount if given @stripes */
|
||||
if (stripes > 1 && (size_rest = new_extents % stripes))
|
||||
new_extents += extend ? stripes - size_rest : -size_rest;
|
||||
|
||||
/* Round up extents to stripe divisible amount */
|
||||
if ((size_rest = extents % stripes)) {
|
||||
new_extents += extend ? stripes - size_rest : -size_rest;
|
||||
log_print_unless_silent("Rounding size %s (%d extents) up to stripe boundary size %s (%d extents).",
|
||||
if (ensure_raid_min) {
|
||||
/* Require multiples of 64 KiB to not fail in kernel RAID page size IO */
|
||||
if ((new_extents * vg->extent_size) % ((stripes ?: 1) * RAID_ALLOC_CHUNK_SECTORS))
|
||||
extend ? new_extents++ : new_extents--;
|
||||
else
|
||||
ensure_raid_min = 0;
|
||||
}
|
||||
} while (ensure_raid_min);
|
||||
|
||||
if (extents != new_extents)
|
||||
log_print_unless_silent("Rounding size %s (%d extents) up to %s i/o boundary size %s (%d extents).",
|
||||
display_size(vg->cmd, (uint64_t) extents * vg->extent_size), extents,
|
||||
segtype_is_raid(segtype) ? (stripes > 1 ? "stripe/RAID" : "RAID") : "stripe",
|
||||
display_size(vg->cmd, (uint64_t) new_extents * vg->extent_size), new_extents);
|
||||
}
|
||||
|
||||
return new_extents;
|
||||
}
|
||||
@@ -1256,7 +1269,7 @@ static int _lv_segment_reduce(struct lv_segment *seg, uint32_t reduction)
|
||||
*/
|
||||
static int _lv_reduce(struct logical_volume *lv, uint32_t extents, int delete)
|
||||
{
|
||||
struct lv_segment *seg;
|
||||
struct lv_segment *seg = first_seg(lv);
|
||||
uint32_t count = extents;
|
||||
uint32_t reduction;
|
||||
struct logical_volume *pool_lv;
|
||||
@@ -1267,6 +1280,9 @@ static int _lv_reduce(struct logical_volume *lv, uint32_t extents, int delete)
|
||||
clear_snapshot_merge(lv);
|
||||
}
|
||||
|
||||
if (!delete && seg)
|
||||
extents = extents_round_to_boundary(lv->vg, seg->segtype, extents, seg->area_count - seg->segtype->parity_devs, 0);
|
||||
|
||||
dm_list_iterate_back_items(seg, &lv->segments) {
|
||||
if (!count)
|
||||
break;
|
||||
@@ -1581,11 +1597,12 @@ static uint32_t _mirror_log_extents(uint32_t region_size, uint32_t pe_size, uint
|
||||
|
||||
/* Is there enough total space or should we give up immediately? */
|
||||
static int _sufficient_pes_free(struct alloc_handle *ah, struct dm_list *pvms,
|
||||
uint32_t allocated, uint32_t extents_still_needed)
|
||||
uint32_t allocated, uint32_t extents_still_needed,
|
||||
uint32_t extent_size)
|
||||
{
|
||||
uint32_t area_extents_needed = (extents_still_needed - allocated) * ah->area_count / ah->area_multiple;
|
||||
uint32_t parity_extents_needed = (extents_still_needed - allocated) * ah->parity_count / ah->area_multiple;
|
||||
uint32_t metadata_extents_needed = ah->alloc_and_split_meta ? 0 : ah->metadata_area_count * RAID_METADATA_AREA_LEN; /* One each */
|
||||
uint32_t metadata_extents_needed = ah->alloc_and_split_meta ? 0 : ah->metadata_area_count * lv_raid_metadata_area_len(extent_size);
|
||||
uint32_t total_extents_needed = area_extents_needed + parity_extents_needed + metadata_extents_needed;
|
||||
uint32_t free_pes = pv_maps_size(pvms);
|
||||
|
||||
@@ -3042,7 +3059,7 @@ static int _allocate(struct alloc_handle *ah,
|
||||
old_allocated = alloc_state.allocated;
|
||||
log_debug_alloc("Trying allocation using %s policy.", get_alloc_string(alloc));
|
||||
|
||||
if (!ah->approx_alloc && !_sufficient_pes_free(ah, pvms, alloc_state.allocated, ah->new_extents))
|
||||
if (!ah->approx_alloc && !_sufficient_pes_free(ah, pvms, alloc_state.allocated, ah->new_extents, vg->extent_size))
|
||||
goto_out;
|
||||
|
||||
_init_alloc_parms(ah, &alloc_parms, alloc, prev_lvseg,
|
||||
@@ -3235,7 +3252,7 @@ static struct alloc_handle *_alloc_init(struct cmd_context *cmd,
|
||||
ah->metadata_area_count = area_count;
|
||||
ah->alloc_and_split_meta = 1;
|
||||
|
||||
ah->log_len = RAID_METADATA_AREA_LEN;
|
||||
ah->log_len = existing_extents ? 0 : lv_raid_metadata_area_len(extent_size);
|
||||
|
||||
/*
|
||||
* We need 'log_len' extents for each
|
||||
@@ -3938,6 +3955,35 @@ static int _lv_extend_layered_lv(struct alloc_handle *ah,
|
||||
return 1;
|
||||
}
|
||||
|
||||
/* Return maximum number of extents for size of MetaLV of RaidLV @lv with bitmap */
|
||||
#define RAID_HEADER_SIZE (2 * 4096) /* dm-raid superblock and bitmap superblock */
|
||||
static uint32_t _max_raid_extents(struct logical_volume *lv)
|
||||
{
|
||||
uint64_t max_image_size;
|
||||
uint64_t mlv_bytes; /* dm-raid superblock and bitmap superblock */
|
||||
struct lv_segment *seg = first_seg(lv);
|
||||
struct logical_volume *mlv;
|
||||
|
||||
if (!seg ||
|
||||
!seg_is_raid(seg) ||
|
||||
!seg->meta_areas ||
|
||||
!(mlv = seg_metalv(seg, 0)) ||
|
||||
!seg->region_size)
|
||||
return ~0U;
|
||||
|
||||
mlv_bytes = (mlv->le_count * lv->vg->extent_size) << SECTOR_SHIFT;
|
||||
if (mlv_bytes < RAID_HEADER_SIZE) {
|
||||
log_error("Metadata LV %s too small to even hold the RAID headers",
|
||||
display_lvname(mlv));
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Subtract space for 2 headers (superblock and bitmap) */
|
||||
max_image_size = (mlv_bytes - RAID_HEADER_SIZE) * 8 * seg->region_size;
|
||||
|
||||
return max_image_size / lv->vg->extent_size * (seg_is_raid1(seg) ? 1 : seg->area_count - seg->segtype->parity_devs);
|
||||
}
|
||||
|
||||
/*
|
||||
* Entry point for single-step LV allocation + extension.
|
||||
* Extents is the number of logical extents to append to the LV unless
|
||||
@@ -3982,6 +4028,8 @@ int lv_extend(struct logical_volume *lv,
|
||||
}
|
||||
/* FIXME log_count should be 1 for mirrors */
|
||||
|
||||
extents = extents_round_to_boundary(lv->vg, segtype, extents, stripes, 1);
|
||||
|
||||
if (!(ah = allocate_extents(lv->vg, lv, segtype, stripes, mirrors,
|
||||
log_count, region_size, extents,
|
||||
allocatable_pvs, alloc, approx_alloc, NULL)))
|
||||
@@ -3999,6 +4047,8 @@ int lv_extend(struct logical_volume *lv,
|
||||
stripe_size, 0u, 0)))
|
||||
stack;
|
||||
} else {
|
||||
uint32_t max_extents;
|
||||
|
||||
/*
|
||||
* For RAID, all the devices are AREA_LV.
|
||||
* However, for 'mirror on stripe' using non-RAID targets,
|
||||
@@ -4023,6 +4073,14 @@ int lv_extend(struct logical_volume *lv,
|
||||
stripes, stripe_size)))
|
||||
goto_out;
|
||||
|
||||
if ((max_extents = _max_raid_extents(lv)) < lv->le_count) {
|
||||
log_error("Can't extend LV %s larger than %s because of MetaLV size",
|
||||
display_lvname(lv),
|
||||
display_size(lv->vg->cmd, max_extents * lv->vg->extent_size));
|
||||
r = 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* If we are expanding an existing mirror, we can skip the
|
||||
* resync of the extension if the LV is currently in-sync
|
||||
@@ -7084,29 +7142,7 @@ static struct logical_volume *_lv_create_an_lv(struct volume_group *vg,
|
||||
}
|
||||
}
|
||||
|
||||
if (lp->stripe_size > vg->extent_size) {
|
||||
if (seg_is_raid(lp) && (vg->extent_size < STRIPE_SIZE_MIN)) {
|
||||
/*
|
||||
* FIXME: RAID will simply fail to load the table if
|
||||
* this is the case, but we should probably
|
||||
* honor the stripe minimum for regular stripe
|
||||
* volumes as well. Avoiding doing that now
|
||||
* only to minimize the change.
|
||||
*/
|
||||
log_error("The extent size in volume group %s is too "
|
||||
"small to support striped RAID volumes.",
|
||||
vg->name);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
log_print_unless_silent("Reducing requested stripe size %s to maximum, "
|
||||
"physical extent size %s.",
|
||||
display_size(cmd, (uint64_t) lp->stripe_size),
|
||||
display_size(cmd, (uint64_t) vg->extent_size));
|
||||
lp->stripe_size = vg->extent_size;
|
||||
}
|
||||
|
||||
lp->extents = _round_to_stripe_boundary(vg, lp->extents, lp->stripes, 1);
|
||||
lp->extents = extents_round_to_boundary(vg, lp->segtype, lp->extents, lp->stripes, 1);
|
||||
|
||||
if (!lp->extents && !seg_is_thin_volume(lp)) {
|
||||
log_error(INTERNAL_ERROR "Unable to create new logical volume with no extents.");
|
||||
|
@@ -36,6 +36,7 @@
|
||||
#define MAX_RESTRICTED_LVS 255 /* Used by FMT_RESTRICTED_LVIDS */
|
||||
#define MAX_EXTENT_SIZE ((uint32_t) -1)
|
||||
#define MIN_NON_POWER2_EXTENT_SIZE (128U * 2U) /* 128KB in sectors */
|
||||
#define RAID_ALLOC_CHUNK_SECTORS (64 * 2) /* Allocate RAID in these minimal chunks to ensure page io doesn't fail */
|
||||
|
||||
#define HISTORICAL_LV_PREFIX "-"
|
||||
|
||||
@@ -832,6 +833,12 @@ uint32_t extents_from_percent_size(struct volume_group *vg, const struct dm_list
|
||||
uint32_t extents, int roundup,
|
||||
percent_type_t percent, uint64_t size);
|
||||
|
||||
/* Round @extents to stripe and/or RAID io boundary */
|
||||
uint32_t extents_round_to_boundary(struct volume_group *vg,
|
||||
const struct segment_type *segtype,
|
||||
uint32_t extents, uint32_t stripes,
|
||||
int extend);
|
||||
|
||||
struct logical_volume *find_pool_lv(const struct logical_volume *lv);
|
||||
int pool_is_active(const struct logical_volume *pool_lv);
|
||||
int pool_supports_external_origin(const struct lv_segment *pool_seg, const struct logical_volume *external_lv);
|
||||
@@ -1209,6 +1216,7 @@ int lv_raid_replace(struct logical_volume *lv, struct dm_list *remove_pvs,
|
||||
struct dm_list *allocate_pvs);
|
||||
int lv_raid_remove_missing(struct logical_volume *lv);
|
||||
int partial_raid_lv_supports_degraded_activation(const struct logical_volume *lv);
|
||||
uint32_t lv_raid_metadata_area_len(uint32_t extent_size);
|
||||
/* -- metadata/raid_manip.c */
|
||||
|
||||
/* ++ metadata/cache_manip.c */
|
||||
|
@@ -151,6 +151,14 @@ int lv_is_raid_with_tracking(const struct logical_volume *lv)
|
||||
return _lv_is_raid_with_tracking(lv, &tracking);
|
||||
}
|
||||
|
||||
/* FIXME: remove lv_raid_metadata_area_len() after defining 'lv_raid_rmeta_extents'*/
|
||||
uint32_t lv_raid_metadata_area_len(uint32_t extent_size)
|
||||
{
|
||||
/* Ensure 4 MiB until we get dynamic rmeta resizing... */
|
||||
// return max((4 * 2048 / extent_size), (uint32_t) 1);
|
||||
return max((2*12 / extent_size), (uint32_t) 1);
|
||||
}
|
||||
|
||||
uint32_t lv_raid_image_count(const struct logical_volume *lv)
|
||||
{
|
||||
struct lv_segment *seg = first_seg(lv);
|
||||
@@ -737,7 +745,7 @@ static int _alloc_rmeta_for_lv(struct logical_volume *data_lv,
|
||||
|
||||
if (!(ah = allocate_extents(data_lv->vg, NULL, seg->segtype, 0, 1, 0,
|
||||
seg->region_size,
|
||||
1 /*RAID_METADATA_AREA_LEN*/,
|
||||
lv_raid_metadata_area_len(data_lv->vg->extent_size),
|
||||
allocate_pvs, data_lv->alloc, 0, NULL)))
|
||||
return_0;
|
||||
|
||||
@@ -845,7 +853,7 @@ static int _raid_add_images_without_commit(struct logical_volume *lv,
|
||||
lv->status |= RAID;
|
||||
seg = first_seg(lv);
|
||||
seg_lv(seg, 0)->status |= RAID_IMAGE | LVM_READ | LVM_WRITE;
|
||||
seg->region_size = get_default_region_size(lv->vg->cmd);
|
||||
seg->region_size = min((uint64_t) get_default_region_size(lv->vg->cmd), lv->size);
|
||||
|
||||
/* MD's bitmap is limited to tracking 2^21 regions */
|
||||
while (seg->region_size < (lv->size / (1 << 21))) {
|
||||
@@ -954,6 +962,21 @@ static int _raid_add_images(struct logical_volume *lv,
|
||||
struct lv_segment *seg = first_seg(lv);
|
||||
uint32_t s;
|
||||
|
||||
if (seg_is_linear(seg) &&
|
||||
lv->size % RAID_ALLOC_CHUNK_SECTORS) {
|
||||
uint64_t size = lv->le_count * lv->vg->extent_size;
|
||||
uint32_t extents;
|
||||
|
||||
size += RAID_ALLOC_CHUNK_SECTORS - size % RAID_ALLOC_CHUNK_SECTORS;
|
||||
extents = extents_from_size(lv->vg->cmd, size, lv->vg->extent_size) - lv->le_count;
|
||||
log_print_unless_silent("Resizing LV %s to RAID boundary %s before conversion to raid1",
|
||||
display_lvname(lv), display_size(lv->vg->cmd, size));
|
||||
if (!lv_extend(lv, seg->segtype, 1, 0, 1, 0, extents, pvs, lv->alloc, 0))
|
||||
return 0;
|
||||
if (!lv_update_and_reload_origin(lv))
|
||||
return_0;
|
||||
}
|
||||
|
||||
if (!_raid_add_images_without_commit(lv, new_count, pvs, use_existing_area_len))
|
||||
return_0;
|
||||
|
||||
|
@@ -1255,17 +1255,10 @@ static int _check_raid_parameters(struct volume_group *vg,
|
||||
struct lvcreate_cmdline_params *lcp)
|
||||
{
|
||||
unsigned devs = lcp->pv_count ? : dm_list_size(&vg->pvs);
|
||||
uint64_t page_sectors = lvm_getpagesize() >> SECTOR_SHIFT;
|
||||
struct cmd_context *cmd = vg->cmd;
|
||||
int old_stripes = !arg_is_set(cmd, stripes_ARG) &&
|
||||
find_config_tree_bool(cmd, allocation_raid_stripe_all_devices_CFG, NULL);
|
||||
|
||||
if (vg->extent_size < page_sectors) {
|
||||
log_error("Unable to create RAID LV: requires minimum VG extent size %s",
|
||||
display_size(vg->cmd, page_sectors));
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* If we requested the previous behaviour by setting
|
||||
* "allocation/raid_stripe_all_devices = 1" and the
|
||||
|
Reference in New Issue
Block a user