mirror of
git://sourceware.org/git/lvm2.git
synced 2025-09-27 05:44:18 +03:00
Compare commits
6 Commits
v2_02_177
...
dev-lvmguy
Author | SHA1 | Date | |
---|---|---|---|
|
74210f8662 | ||
|
ddb3826523 | ||
|
2aeb71cb37 | ||
|
c3d821b647 | ||
|
2e9cb73706 | ||
|
9a4acf93cd |
@@ -1,5 +1,6 @@
|
||||
Version 2.02.165 -
|
||||
===================================
|
||||
Avoid PV tags when checking allocation against parallel PVs.
|
||||
Disallow mirror conversions of raid10 volumes.
|
||||
Fix dmeventd unmonitoring when segment type (and dso) changes.
|
||||
Don't allow lvconvert --repair on raid0 devices or attempt to monitor them.
|
||||
|
@@ -391,7 +391,7 @@ static int _read_segment(struct logical_volume *lv, const struct dm_config_node
|
||||
|
||||
if (!(seg = alloc_lv_segment(segtype, lv, start_extent,
|
||||
extent_count, 0, 0, NULL, area_count,
|
||||
extent_count, 0, 0, 0, NULL))) {
|
||||
segtype->parity_devs ? (extent_count / (area_count - segtype->parity_devs)) : extent_count, 0, 0, 0, NULL))) {
|
||||
log_error("Segment allocation failed");
|
||||
return 0;
|
||||
}
|
||||
|
@@ -38,9 +38,6 @@ typedef enum {
|
||||
NEXT_AREA
|
||||
} area_use_t;
|
||||
|
||||
/* FIXME: remove RAID_METADATA_AREA_LEN macro after defining 'raid_log_extents'*/
|
||||
#define RAID_METADATA_AREA_LEN 1
|
||||
|
||||
/* FIXME These ended up getting used differently from first intended. Refactor. */
|
||||
/* Only one of A_CONTIGUOUS_TO_LVSEG, A_CLING_TO_LVSEG, A_CLING_TO_ALLOCED may be set */
|
||||
#define A_CONTIGUOUS_TO_LVSEG 0x01 /* Must be contiguous to an existing segment */
|
||||
@@ -878,22 +875,51 @@ dm_percent_t copy_percent(const struct logical_volume *lv)
|
||||
return denominator ? dm_make_percent(numerator, denominator) : DM_PERCENT_100;
|
||||
}
|
||||
|
||||
/* Round up extents to next stripe boundary for number of stripes */
|
||||
static uint32_t _round_to_stripe_boundary(struct volume_group *vg, uint32_t extents,
|
||||
uint32_t stripes, int extend)
|
||||
/* Round any tiny extents to multiples of 4K */
|
||||
#define MINIMUM_ALLOCATION_SECTORS 8
|
||||
static uint32_t _round_extents(uint32_t extents, uint32_t extent_size, int extend)
|
||||
{
|
||||
uint64_t size = (uint64_t) extents * extent_size;
|
||||
uint64_t rest = size % MINIMUM_ALLOCATION_SECTORS;
|
||||
|
||||
if (!rest)
|
||||
return extents;
|
||||
|
||||
if (!size)
|
||||
return 0;
|
||||
|
||||
rest = MINIMUM_ALLOCATION_SECTORS - rest;
|
||||
|
||||
return (size + (extend ? rest : -(MINIMUM_ALLOCATION_SECTORS - rest))) / extent_size;
|
||||
}
|
||||
|
||||
/* Round up extents to next stripe boundary for number of stripes and ensure minimum sizes */
|
||||
static uint32_t _round_extents_to_boundary(struct volume_group *vg, uint32_t extents,
|
||||
uint32_t stripes, uint32_t stripe_size, int extend)
|
||||
{
|
||||
uint32_t size_rest, new_extents = extents;
|
||||
|
||||
if (!stripes)
|
||||
return extents;
|
||||
if (stripes < 2)
|
||||
return _round_extents(extents, vg->extent_size, extend);
|
||||
|
||||
redo:
|
||||
/* Round up extents to stripe divisible amount */
|
||||
if ((size_rest = extents % stripes)) {
|
||||
if ((size_rest = new_extents % stripes))
|
||||
new_extents += extend ? stripes - size_rest : -size_rest;
|
||||
log_print_unless_silent("Rounding size %s (%d extents) up to stripe boundary size %s (%d extents).",
|
||||
|
||||
if (stripes > 1 && stripe_size > 1) {
|
||||
uint32_t tmp = new_extents;
|
||||
|
||||
if ((new_extents = _round_extents(tmp / stripes, vg->extent_size, extend) * stripes) != tmp)
|
||||
goto redo;
|
||||
}
|
||||
|
||||
log_debug("Adjusted allocation request of %" PRIu32 " to %" PRIu32 " logical extents.", extents, new_extents);
|
||||
|
||||
if (new_extents != extents)
|
||||
log_print_unless_silent("Rounding size %s (%d extents) up to boundary size %s (%d extents).",
|
||||
display_size(vg->cmd, (uint64_t) extents * vg->extent_size), extents,
|
||||
display_size(vg->cmd, (uint64_t) new_extents * vg->extent_size), new_extents);
|
||||
}
|
||||
|
||||
return new_extents;
|
||||
}
|
||||
@@ -1203,6 +1229,8 @@ static uint32_t _calc_area_multiple(const struct segment_type *segtype,
|
||||
}
|
||||
|
||||
/*
|
||||
* FIXME:
|
||||
*
|
||||
* RAID10 - only has 2-way mirror right now.
|
||||
* If we are to move beyond 2-way RAID10, then
|
||||
* the 'stripes' argument will always need to
|
||||
@@ -1230,14 +1258,14 @@ static int _lv_segment_reduce(struct lv_segment *seg, uint32_t reduction)
|
||||
uint32_t area_reduction, s;
|
||||
|
||||
/* Caller must ensure exact divisibility */
|
||||
if (seg_is_striped(seg)) {
|
||||
if (reduction % seg->area_count) {
|
||||
if (seg_is_striped(seg) || (seg_is_raid(seg) && !seg_is_raid1(seg))) {
|
||||
if (reduction % (seg->area_count - seg->segtype->parity_devs)) {
|
||||
log_error("Segment extent reduction %" PRIu32
|
||||
" not divisible by #stripes %" PRIu32,
|
||||
reduction, seg->area_count);
|
||||
return 0;
|
||||
}
|
||||
area_reduction = (reduction / seg->area_count);
|
||||
area_reduction = reduction / (seg->area_count - seg->segtype->parity_devs);
|
||||
} else
|
||||
area_reduction = reduction;
|
||||
|
||||
@@ -1581,11 +1609,13 @@ static uint32_t _mirror_log_extents(uint32_t region_size, uint32_t pe_size, uint
|
||||
|
||||
/* Is there enough total space or should we give up immediately? */
|
||||
static int _sufficient_pes_free(struct alloc_handle *ah, struct dm_list *pvms,
|
||||
uint32_t allocated, uint32_t extents_still_needed)
|
||||
uint32_t allocated, uint32_t extents_still_needed,
|
||||
uint32_t extent_size)
|
||||
{
|
||||
uint32_t area_extents_needed = (extents_still_needed - allocated) * ah->area_count / ah->area_multiple;
|
||||
uint32_t parity_extents_needed = (extents_still_needed - allocated) * ah->parity_count / ah->area_multiple;
|
||||
uint32_t metadata_extents_needed = ah->alloc_and_split_meta ? 0 : ah->metadata_area_count * RAID_METADATA_AREA_LEN; /* One each */
|
||||
uint32_t metadata_extents_needed = ah->alloc_and_split_meta ? 0 :
|
||||
ah->metadata_area_count * lv_raid_metadata_area_len(ah->region_size, extent_size);
|
||||
uint32_t total_extents_needed = area_extents_needed + parity_extents_needed + metadata_extents_needed;
|
||||
uint32_t free_pes = pv_maps_size(pvms);
|
||||
|
||||
@@ -1992,7 +2022,7 @@ static int _is_same_pv(struct pv_match *pvmatch __attribute((unused)), struct pv
|
||||
static int _match_pv_tags(const struct dm_config_node *cling_tag_list_cn,
|
||||
struct physical_volume *pv1, uint32_t pv1_start_pe, uint32_t area_num,
|
||||
struct physical_volume *pv2, struct dm_list *pv_tags, unsigned validate_only,
|
||||
struct dm_pool *mem)
|
||||
struct dm_pool *mem, unsigned parallel_pv)
|
||||
{
|
||||
const struct dm_config_value *cv;
|
||||
const char *str;
|
||||
@@ -2054,10 +2084,14 @@ static int _match_pv_tags(const struct dm_config_node *cling_tag_list_cn,
|
||||
if (!str_list_match_list(&pv1->tags, tags_to_match, &tag_matched))
|
||||
continue;
|
||||
else {
|
||||
if (!pv_tags)
|
||||
log_debug_alloc("Matched allocation PV tag %s on existing %s with free space on %s.",
|
||||
tag_matched, pv_dev_name(pv1), pv2 ? pv_dev_name(pv2) : "-");
|
||||
else
|
||||
if (!pv_tags) {
|
||||
if (parallel_pv)
|
||||
log_debug_alloc("Not using free space on %s: Matched allocation PV tag %s on existing parallel PV %s.",
|
||||
pv_dev_name(pv1), tag_matched, pv2 ? pv_dev_name(pv2) : "-");
|
||||
else
|
||||
log_debug_alloc("Matched allocation PV tag %s on existing %s with free space on %s.",
|
||||
tag_matched, pv_dev_name(pv1), pv2 ? pv_dev_name(pv2) : "-");
|
||||
} else
|
||||
log_debug_alloc("Eliminating allocation area %" PRIu32 " at PV %s start PE %" PRIu32
|
||||
" from consideration: PV tag %s already used.",
|
||||
area_num, pv_dev_name(pv1), pv1_start_pe, tag_matched);
|
||||
@@ -2081,10 +2115,14 @@ static int _match_pv_tags(const struct dm_config_node *cling_tag_list_cn,
|
||||
}
|
||||
continue;
|
||||
}
|
||||
if (!pv_tags)
|
||||
log_debug_alloc("Matched allocation PV tag %s on existing %s with free space on %s.",
|
||||
str, pv_dev_name(pv1), pv2 ? pv_dev_name(pv2) : "-");
|
||||
else
|
||||
if (!pv_tags) {
|
||||
if (parallel_pv)
|
||||
log_debug_alloc("Not using free space on %s: Matched allocation PV tag %s on existing parallel PV %s.",
|
||||
pv2 ? pv_dev_name(pv2) : "-", str, pv_dev_name(pv1));
|
||||
else
|
||||
log_debug_alloc("Matched allocation PV tag %s on existing %s with free space on %s.",
|
||||
str, pv_dev_name(pv1), pv2 ? pv_dev_name(pv2) : "-");
|
||||
} else
|
||||
log_debug_alloc("Eliminating allocation area %" PRIu32 " at PV %s start PE %" PRIu32
|
||||
" from consideration: PV tag %s already used.",
|
||||
area_num, pv_dev_name(pv1), pv1_start_pe, str);
|
||||
@@ -2100,12 +2138,12 @@ static int _match_pv_tags(const struct dm_config_node *cling_tag_list_cn,
|
||||
|
||||
static int _validate_tag_list(const struct dm_config_node *cling_tag_list_cn)
|
||||
{
|
||||
return _match_pv_tags(cling_tag_list_cn, NULL, 0, 0, NULL, NULL, 1, NULL);
|
||||
return _match_pv_tags(cling_tag_list_cn, NULL, 0, 0, NULL, NULL, 1, NULL, 0);
|
||||
}
|
||||
|
||||
static int _tags_list_str(struct dm_pool *mem, struct physical_volume *pv1, const struct dm_config_node *cling_tag_list_cn)
|
||||
{
|
||||
if (!_match_pv_tags(cling_tag_list_cn, pv1, 0, 0, NULL, NULL, 0, mem)) {
|
||||
if (!_match_pv_tags(cling_tag_list_cn, pv1, 0, 0, NULL, NULL, 0, mem, 0)) {
|
||||
dm_pool_abandon_object(mem);
|
||||
return_0;
|
||||
}
|
||||
@@ -2121,7 +2159,7 @@ static int _pv_has_matching_tag(const struct dm_config_node *cling_tag_list_cn,
|
||||
struct physical_volume *pv1, uint32_t pv1_start_pe, uint32_t area_num,
|
||||
struct dm_list *pv_tags)
|
||||
{
|
||||
return _match_pv_tags(cling_tag_list_cn, pv1, pv1_start_pe, area_num, NULL, pv_tags, 0, NULL);
|
||||
return _match_pv_tags(cling_tag_list_cn, pv1, pv1_start_pe, area_num, NULL, pv_tags, 0, NULL, 0);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -2129,14 +2167,15 @@ static int _pv_has_matching_tag(const struct dm_config_node *cling_tag_list_cn,
|
||||
* matches a tag of the PV of the existing segment?
|
||||
*/
|
||||
static int _pvs_have_matching_tag(const struct dm_config_node *cling_tag_list_cn,
|
||||
struct physical_volume *pv1, struct physical_volume *pv2)
|
||||
struct physical_volume *pv1, struct physical_volume *pv2,
|
||||
unsigned parallel_pv)
|
||||
{
|
||||
return _match_pv_tags(cling_tag_list_cn, pv1, 0, 0, pv2, NULL, 0, NULL);
|
||||
return _match_pv_tags(cling_tag_list_cn, pv1, 0, 0, pv2, NULL, 0, NULL, parallel_pv);
|
||||
}
|
||||
|
||||
static int _has_matching_pv_tag(struct pv_match *pvmatch, struct pv_segment *pvseg, struct pv_area *pva)
|
||||
{
|
||||
return _pvs_have_matching_tag(pvmatch->cling_tag_list_cn, pvseg->pv, pva->map->pv);
|
||||
return _pvs_have_matching_tag(pvmatch->cling_tag_list_cn, pvseg->pv, pva->map->pv, 0);
|
||||
}
|
||||
|
||||
static int _log_parallel_areas(struct dm_pool *mem, struct dm_list *parallel_areas,
|
||||
@@ -2395,7 +2434,7 @@ static int _check_cling_to_alloced(struct alloc_handle *ah, const struct dm_conf
|
||||
continue; /* Area already assigned */
|
||||
dm_list_iterate_items(aa, &ah->alloced_areas[s]) {
|
||||
if ((!cling_tag_list_cn && (pva->map->pv == aa[0].pv)) ||
|
||||
(cling_tag_list_cn && _pvs_have_matching_tag(cling_tag_list_cn, pva->map->pv, aa[0].pv))) {
|
||||
(cling_tag_list_cn && _pvs_have_matching_tag(cling_tag_list_cn, pva->map->pv, aa[0].pv, 0))) {
|
||||
if (positional)
|
||||
_reserve_required_area(ah, alloc_state, pva, pva->count, s, 0);
|
||||
return 1;
|
||||
@@ -2406,13 +2445,20 @@ static int _check_cling_to_alloced(struct alloc_handle *ah, const struct dm_conf
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int _pv_is_parallel(struct physical_volume *pv, struct dm_list *parallel_pvs)
|
||||
static int _pv_is_parallel(struct physical_volume *pv, struct dm_list *parallel_pvs, const struct dm_config_node *cling_tag_list_cn)
|
||||
{
|
||||
struct pv_list *pvl;
|
||||
|
||||
dm_list_iterate_items(pvl, parallel_pvs)
|
||||
if (pv == pvl->pv)
|
||||
dm_list_iterate_items(pvl, parallel_pvs) {
|
||||
if (pv == pvl->pv) {
|
||||
log_debug_alloc("Not using free space on existing parallel PV %s.",
|
||||
pv_dev_name(pvl->pv));
|
||||
return 1;
|
||||
}
|
||||
if (cling_tag_list_cn && _pvs_have_matching_tag(cling_tag_list_cn, pvl->pv, pv, 1))
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -2700,7 +2746,7 @@ static int _find_some_parallel_space(struct alloc_handle *ah,
|
||||
/* FIXME Split into log and non-log parallel_pvs and only check the log ones if log_iteration? */
|
||||
/* (I've temporatily disabled the check.) */
|
||||
/* Avoid PVs used by existing parallel areas */
|
||||
if (!log_iteration_count && parallel_pvs && _pv_is_parallel(pvm->pv, parallel_pvs))
|
||||
if (!log_iteration_count && parallel_pvs && _pv_is_parallel(pvm->pv, parallel_pvs, ah->cling_tag_list_cn))
|
||||
goto next_pv;
|
||||
|
||||
/*
|
||||
@@ -3059,7 +3105,7 @@ static int _allocate(struct alloc_handle *ah,
|
||||
old_allocated = alloc_state.allocated;
|
||||
log_debug_alloc("Trying allocation using %s policy.", get_alloc_string(alloc));
|
||||
|
||||
if (!ah->approx_alloc && !_sufficient_pes_free(ah, pvms, alloc_state.allocated, ah->new_extents))
|
||||
if (!ah->approx_alloc && !_sufficient_pes_free(ah, pvms, alloc_state.allocated, ah->new_extents, vg->extent_size))
|
||||
goto_out;
|
||||
|
||||
_init_alloc_parms(ah, &alloc_parms, alloc, prev_lvseg,
|
||||
@@ -3252,7 +3298,7 @@ static struct alloc_handle *_alloc_init(struct cmd_context *cmd,
|
||||
ah->metadata_area_count = area_count;
|
||||
ah->alloc_and_split_meta = 1;
|
||||
|
||||
ah->log_len = RAID_METADATA_AREA_LEN;
|
||||
ah->log_len = lv_raid_metadata_area_len(ah->region_size, extent_size);
|
||||
|
||||
/*
|
||||
* We need 'log_len' extents for each
|
||||
@@ -3831,8 +3877,7 @@ static int _lv_extend_layered_lv(struct alloc_handle *ah,
|
||||
if (seg_is_raid(seg)) {
|
||||
stripes = 1;
|
||||
stripe_size = 0;
|
||||
if (seg_is_any_raid0(seg))
|
||||
area_multiple = seg->area_count;
|
||||
area_multiple = _calc_area_multiple(seg->segtype, seg->area_count, seg_is_raid1(seg) ? 1 : seg->area_count - seg->segtype->parity_devs);
|
||||
}
|
||||
|
||||
for (fa = first_area, s = 0; s < seg->area_count; s++) {
|
||||
@@ -3955,6 +4000,82 @@ static int _lv_extend_layered_lv(struct alloc_handle *ah,
|
||||
return 1;
|
||||
}
|
||||
|
||||
/* Adjust region and stripe size on very small LVs */
|
||||
void lv_adjust_region_and_stripe_size(struct logical_volume *lv)
|
||||
{
|
||||
uint32_t size;
|
||||
uint64_t area_size;
|
||||
struct lv_segment *seg = first_seg(lv);
|
||||
|
||||
if (!seg)
|
||||
return;
|
||||
|
||||
area_size = (uint64_t) seg->area_len * lv->vg->extent_size;
|
||||
|
||||
if (seg->region_size > area_size) {
|
||||
size = _round_down_pow2(seg_lv(seg, 0)->size);
|
||||
log_warn("Region size %s too large for LV %s size %s, rounded down to %s",
|
||||
display_size(lv->vg->cmd, seg->region_size),
|
||||
display_lvname(lv),
|
||||
display_size(lv->vg->cmd, lv->size),
|
||||
display_size(lv->vg->cmd, size));
|
||||
seg->region_size = size;
|
||||
}
|
||||
|
||||
if (seg->stripe_size > area_size) {
|
||||
size = _round_down_pow2(seg_lv(seg, 0)->size);
|
||||
log_warn("Stripe size %s too large for LV %s size %s, rounded down to %s",
|
||||
display_size(lv->vg->cmd, seg->stripe_size),
|
||||
display_lvname(lv),
|
||||
display_size(lv->vg->cmd, lv->size),
|
||||
display_size(lv->vg->cmd, size));
|
||||
seg->stripe_size = size;
|
||||
}
|
||||
}
|
||||
|
||||
/* Check MetaLV size is sufficient fro RaidLV @lv size */
|
||||
#define RAID_SUPERBLOCKS_SIZE (2 * 4096) /* dm-raid superblock and bitmap superblock */
|
||||
static int _raid_rmeta_size_sufficient(struct logical_volume *lv)
|
||||
{
|
||||
uint32_t area_multiple;
|
||||
uint64_t max_rimage_size;
|
||||
uint64_t mlv_bytes; /* dm-raid superblock and bitmap superblock */
|
||||
struct lv_segment *seg = first_seg(lv);
|
||||
struct logical_volume *mlv;
|
||||
|
||||
if (!seg ||
|
||||
!seg_is_raid(seg) ||
|
||||
!seg->region_size ||
|
||||
!seg->meta_areas ||
|
||||
!(mlv = seg_metalv(seg, 0)))
|
||||
return 1;
|
||||
|
||||
mlv_bytes = mlv->size << SECTOR_SHIFT;
|
||||
if (mlv_bytes < RAID_SUPERBLOCKS_SIZE) {
|
||||
log_error("Metadata LV %s too small to even hold the RAID headers",
|
||||
display_lvname(mlv));
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Subtract space for 2 headers (superblock and bitmap)
|
||||
* and calculate max image size in sectors
|
||||
*/
|
||||
max_rimage_size = (mlv_bytes - RAID_SUPERBLOCKS_SIZE) * 8 * seg->region_size;
|
||||
|
||||
/* Calculate the maximum possible LV size */
|
||||
/* FIXME: area_multiple needs to change once we support odd number of stripes in raid10 */
|
||||
area_multiple = _calc_area_multiple(seg->segtype, seg->area_count, 0);
|
||||
if (max_rimage_size * area_multiple < lv->size) {
|
||||
log_error("Can't extend LV %s larger than %s because of MetaLV size",
|
||||
display_lvname(lv),
|
||||
display_size(lv->vg->cmd, max_rimage_size * area_multiple));
|
||||
return 0;
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* Entry point for single-step LV allocation + extension.
|
||||
* Extents is the number of logical extents to append to the LV unless
|
||||
@@ -3972,7 +4093,7 @@ int lv_extend(struct logical_volume *lv,
|
||||
struct dm_list *allocatable_pvs, alloc_policy_t alloc,
|
||||
int approx_alloc)
|
||||
{
|
||||
int r = 1;
|
||||
int r = 1, empty = 0;
|
||||
int log_count = 0;
|
||||
struct alloc_handle *ah;
|
||||
uint32_t sub_lv_count;
|
||||
@@ -3985,6 +4106,8 @@ int lv_extend(struct logical_volume *lv,
|
||||
return lv_add_virtual_segment(lv, 0u, extents, segtype);
|
||||
|
||||
if (!lv->le_count) {
|
||||
empty = 1;
|
||||
|
||||
if (segtype_is_pool(segtype))
|
||||
/*
|
||||
* Pool allocations treat the metadata device like a mirror log.
|
||||
@@ -4015,6 +4138,8 @@ int lv_extend(struct logical_volume *lv,
|
||||
if (!(r = lv_add_segment(ah, 0, ah->area_count, lv, segtype,
|
||||
stripe_size, 0u, 0)))
|
||||
stack;
|
||||
if (empty)
|
||||
lv_adjust_region_and_stripe_size(lv);
|
||||
} else {
|
||||
/*
|
||||
* For RAID, all the devices are AREA_LV.
|
||||
@@ -4040,6 +4165,17 @@ int lv_extend(struct logical_volume *lv,
|
||||
stripes, stripe_size)))
|
||||
goto_out;
|
||||
|
||||
if (empty)
|
||||
lv_adjust_region_and_stripe_size(lv);
|
||||
|
||||
if (!(r = _raid_rmeta_size_sufficient(lv))) {
|
||||
if (!old_extents &&
|
||||
(!lv_remove(lv) || !vg_write(lv->vg) || !vg_commit(lv->vg)))
|
||||
return_0;
|
||||
|
||||
goto_out;
|
||||
}
|
||||
|
||||
/*
|
||||
* If we are expanding an existing mirror, we can skip the
|
||||
* resync of the extension if the LV is currently in-sync
|
||||
@@ -4350,6 +4486,7 @@ static int _validate_stripesize(const struct volume_group *vg,
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Limit stripe size to extent size for non-RAID */
|
||||
if (lp->stripe_size > vg->extent_size) {
|
||||
log_print_unless_silent("Reducing stripe size %s to maximum, "
|
||||
"physical extent size %s.",
|
||||
@@ -4843,6 +4980,10 @@ static int _lvresize_adjust_extents(struct logical_volume *lv,
|
||||
(lp->sign == SIGN_NONE && (lp->extents < existing_extents)))
|
||||
reducing = 1;
|
||||
|
||||
lp->extents = _round_extents_to_boundary(lv->vg, lp->extents,
|
||||
seg_is_mirrored(seg_last) ? 1 : seg_last->area_count - seg_last->segtype->parity_devs,
|
||||
seg_last->stripe_size, !reducing);
|
||||
|
||||
/* If extending, find properties of last segment */
|
||||
if (!reducing) {
|
||||
seg_mirrors = seg_is_mirrored(seg_last) ? lv_mirror_count(lv) : 0;
|
||||
@@ -7101,6 +7242,17 @@ static struct logical_volume *_lv_create_an_lv(struct volume_group *vg,
|
||||
}
|
||||
}
|
||||
|
||||
#if 1
|
||||
/* FIXME: minimum 1K extent size??? */
|
||||
if (lp->stripe_size > vg->extent_size &&
|
||||
!seg_is_raid(lp)) {
|
||||
log_print_unless_silent("Reducing requested stripe size %s to maximum, "
|
||||
"physical extent size %s.",
|
||||
display_size(cmd, (uint64_t) lp->stripe_size),
|
||||
display_size(cmd, (uint64_t) vg->extent_size));
|
||||
lp->stripe_size = vg->extent_size;
|
||||
}
|
||||
#else
|
||||
if (lp->stripe_size > vg->extent_size) {
|
||||
if (seg_is_raid(lp) && (vg->extent_size < STRIPE_SIZE_MIN)) {
|
||||
/*
|
||||
@@ -7122,8 +7274,9 @@ static struct logical_volume *_lv_create_an_lv(struct volume_group *vg,
|
||||
display_size(cmd, (uint64_t) vg->extent_size));
|
||||
lp->stripe_size = vg->extent_size;
|
||||
}
|
||||
#endif
|
||||
|
||||
lp->extents = _round_to_stripe_boundary(vg, lp->extents, lp->stripes, 1);
|
||||
lp->extents = _round_extents_to_boundary(vg, lp->extents, lp->stripes, lp->stripe_size, 1);
|
||||
|
||||
if (!lp->extents && !seg_is_thin_volume(lp)) {
|
||||
log_error(INTERNAL_ERROR "Unable to create new logical volume with no extents.");
|
||||
@@ -7275,6 +7428,7 @@ static struct logical_volume *_lv_create_an_lv(struct volume_group *vg,
|
||||
status |= LV_NOTSYNCED;
|
||||
}
|
||||
|
||||
if (!seg_is_raid(lp))
|
||||
lp->region_size = adjusted_mirror_region_size(vg->extent_size,
|
||||
lp->extents,
|
||||
lp->region_size, 0,
|
||||
|
@@ -413,9 +413,8 @@ int check_lv_segments(struct logical_volume *lv, int complete_vg)
|
||||
inc_error_count;
|
||||
}
|
||||
|
||||
area_multiplier = segtype_is_striped(seg->segtype) ?
|
||||
seg->area_count : 1;
|
||||
|
||||
area_multiplier = (segtype_is_striped(seg->segtype) || (seg_is_raid(seg) && !seg_is_raid1(seg))) ?
|
||||
(seg->area_count - seg->segtype->parity_devs) : 1;
|
||||
if (seg->area_len * area_multiplier != seg->len) {
|
||||
log_error("LV %s: segment %u has inconsistent "
|
||||
"area_len %u",
|
||||
|
@@ -30,10 +30,11 @@
|
||||
#define MAX_STRIPES 128U
|
||||
#define SECTOR_SHIFT 9L
|
||||
#define SECTOR_SIZE ( 1L << SECTOR_SHIFT )
|
||||
#define STRIPE_SIZE_MIN ( (unsigned) lvm_getpagesize() >> SECTOR_SHIFT) /* PAGESIZE in sectors */
|
||||
#define STRIPE_SIZE_MIN 8 /* 8 sectors minimum to allow for raid takover of striped */
|
||||
#define STRIPE_SIZE_MAX ( 512L * 1024L >> SECTOR_SHIFT) /* 512 KB in sectors */
|
||||
#define STRIPE_SIZE_LIMIT ((UINT_MAX >> 2) + 1)
|
||||
#define MAX_RESTRICTED_LVS 255 /* Used by FMT_RESTRICTED_LVIDS */
|
||||
#define MIN_EXTENT_SIZE 8 /* 8 sectors minimum to allow for raid takover of striped */
|
||||
#define MAX_EXTENT_SIZE ((uint32_t) -1)
|
||||
#define MIN_NON_POWER2_EXTENT_SIZE (128U * 2U) /* 128KB in sectors */
|
||||
|
||||
@@ -825,6 +826,7 @@ int lv_rename_update(struct cmd_context *cmd, struct logical_volume *lv,
|
||||
/* Updates and reloads metadata for given lv */
|
||||
int lv_update_and_reload(struct logical_volume *lv);
|
||||
int lv_update_and_reload_origin(struct logical_volume *lv);
|
||||
void lv_adjust_region_and_stripe_size(struct logical_volume *lv);
|
||||
|
||||
uint32_t extents_from_size(struct cmd_context *cmd, uint64_t size,
|
||||
uint32_t extent_size);
|
||||
@@ -1212,6 +1214,7 @@ int lv_raid_replace(struct logical_volume *lv, struct dm_list *remove_pvs,
|
||||
struct dm_list *allocate_pvs);
|
||||
int lv_raid_remove_missing(struct logical_volume *lv);
|
||||
int partial_raid_lv_supports_degraded_activation(const struct logical_volume *lv);
|
||||
uint32_t lv_raid_metadata_area_len(uint32_t region_size, uint32_t extent_size);
|
||||
/* -- metadata/raid_manip.c */
|
||||
|
||||
/* ++ metadata/cache_manip.c */
|
||||
|
@@ -1008,6 +1008,11 @@ int vgcreate_params_validate(struct cmd_context *cmd,
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (vp->extent_size < MIN_EXTENT_SIZE) {
|
||||
log_error("Physical extent size < 4 KiB restricts RAID use.");
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!(cmd->fmt->features & FMT_UNLIMITED_VOLS)) {
|
||||
if (!vp->max_lv)
|
||||
vp->max_lv = 255;
|
||||
|
@@ -151,6 +151,26 @@ int lv_is_raid_with_tracking(const struct logical_volume *lv)
|
||||
return _lv_is_raid_with_tracking(lv, &tracking);
|
||||
}
|
||||
|
||||
/* FIXME: remove lv_raid_metadata_area_len() after defining 'lv_raid_rmeta_extents'*/
|
||||
uint32_t lv_raid_metadata_area_len(uint32_t region_size, uint32_t extent_size)
|
||||
{
|
||||
uint32_t r;
|
||||
uint64_t max_lv_size;
|
||||
|
||||
if (!region_size)
|
||||
region_size++;
|
||||
|
||||
/* Ensure senseful minimum metadata device size until we get dynamic rmeta resizing... */
|
||||
max_lv_size = UINT32_MAX / 2;
|
||||
max_lv_size *= extent_size;
|
||||
max_lv_size = min(max_lv_size, (uint64_t) 2048 * 1024 * 1024 * 128);
|
||||
r = (max_lv_size / region_size / (8*2048*extent_size) ?: 1);
|
||||
if (r * extent_size < 2 * 12)
|
||||
r = 2 * 12;
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
uint32_t lv_raid_image_count(const struct logical_volume *lv)
|
||||
{
|
||||
struct lv_segment *seg = first_seg(lv);
|
||||
@@ -628,6 +648,9 @@ static int _alloc_image_components(struct logical_volume *lv,
|
||||
else
|
||||
region_size = seg->region_size;
|
||||
|
||||
if (!region_size)
|
||||
region_size = get_default_region_size(lv->vg->cmd);
|
||||
|
||||
if (seg_is_raid(seg))
|
||||
segtype = seg->segtype;
|
||||
else if (!(segtype = get_segtype_from_string(lv->vg->cmd, SEG_TYPE_NAME_RAID1)))
|
||||
@@ -709,6 +732,7 @@ static int _alloc_rmeta_for_lv(struct logical_volume *data_lv,
|
||||
struct logical_volume **meta_lv,
|
||||
struct dm_list *allocate_pvs)
|
||||
{
|
||||
uint32_t region_size;
|
||||
struct dm_list allocatable_pvs;
|
||||
struct alloc_handle *ah;
|
||||
struct lv_segment *seg = first_seg(data_lv);
|
||||
@@ -735,9 +759,10 @@ static int _alloc_rmeta_for_lv(struct logical_volume *data_lv,
|
||||
if (!(base_name = top_level_lv_name(data_lv->vg, data_lv->name)))
|
||||
return_0;
|
||||
|
||||
region_size = seg->region_size ?: get_default_region_size(data_lv->vg->cmd);
|
||||
if (!(ah = allocate_extents(data_lv->vg, NULL, seg->segtype, 0, 1, 0,
|
||||
seg->region_size,
|
||||
1 /*RAID_METADATA_AREA_LEN*/,
|
||||
lv_raid_metadata_area_len(region_size, data_lv->vg->extent_size),
|
||||
allocate_pvs, data_lv->alloc, 0, NULL)))
|
||||
return_0;
|
||||
|
||||
@@ -845,7 +870,9 @@ static int _raid_add_images_without_commit(struct logical_volume *lv,
|
||||
lv->status |= RAID;
|
||||
seg = first_seg(lv);
|
||||
seg_lv(seg, 0)->status |= RAID_IMAGE | LVM_READ | LVM_WRITE;
|
||||
seg->region_size = get_default_region_size(lv->vg->cmd);
|
||||
if (!seg->region_size)
|
||||
seg->region_size = get_default_region_size(lv->vg->cmd);
|
||||
seg->stripe_size = 0;
|
||||
|
||||
/* MD's bitmap is limited to tracking 2^21 regions */
|
||||
while (seg->region_size < (lv->size / (1 << 21))) {
|
||||
@@ -2520,7 +2547,7 @@ static struct possible_takeover_reshape_type _possible_takeover_reshape_types[]
|
||||
.current_areas = 1,
|
||||
.options = ALLOW_STRIPE_SIZE },
|
||||
{ .current_types = SEG_STRIPED_TARGET, /* striped, i.e. seg->area_count > 1 */
|
||||
.possible_types = SEG_RAID0|SEG_RAID0_META,
|
||||
.possible_types = SEG_RAID0|SEG_RAID0_META|SEG_RAID4,
|
||||
.current_areas = ~0U,
|
||||
.options = ALLOW_NONE },
|
||||
/* raid0* -> */
|
||||
@@ -2886,7 +2913,7 @@ static int _raid456_to_raid0_or_striped_wrapper(TAKEOVER_FN_ARGS)
|
||||
return_0;
|
||||
|
||||
/* FIXME Hard-coded raid4 to raid0 */
|
||||
seg->area_len = seg->extents_copied = seg->area_len / seg->area_count;
|
||||
seg->area_len = seg->extents_copied = seg->len / (seg->area_count - seg->segtype->parity_devs);
|
||||
|
||||
if (segtype_is_striped_target(new_segtype)) {
|
||||
if (!_convert_raid0_to_striped(lv, 0, &removal_lvs))
|
||||
@@ -2972,10 +2999,13 @@ static int _striped_or_raid0_to_raid45610_wrapper(TAKEOVER_FN_ARGS)
|
||||
|
||||
seg->segtype = new_segtype;
|
||||
seg->region_size = new_region_size;
|
||||
/* FIXME Hard-coded raid0 to raid4 */
|
||||
seg->area_len = seg->len;
|
||||
|
||||
/* FIXME Hard-coded raid0 -> raid4 */
|
||||
if (seg_is_any_raid0(seg))
|
||||
seg->area_len = seg->extents_copied = seg->len / (seg->area_count - seg->segtype->parity_devs);
|
||||
|
||||
_check_and_adjust_region_size(lv);
|
||||
lv_adjust_region_and_stripe_size(lv);
|
||||
|
||||
log_debug_metadata("Updating VG metadata and reloading %s LV %s",
|
||||
lvseg_name(seg), display_lvname(lv));
|
||||
@@ -3065,7 +3095,7 @@ static int _takeover_from_raid0_to_raid10(TAKEOVER_FN_ARGS)
|
||||
|
||||
static int _takeover_from_raid0_to_raid45(TAKEOVER_FN_ARGS)
|
||||
{
|
||||
return _striped_or_raid0_to_raid45610_wrapper(lv, new_segtype, yes, force, first_seg(lv)->area_count + 1, 1 /* data_copies */, 0, 0, new_region_size, allocate_pvs);
|
||||
return _striped_or_raid0_to_raid45610_wrapper(lv, new_segtype, yes, force, first_seg(lv)->area_count + 1, 2 /* data_copies */, 0, 0, new_region_size, allocate_pvs);
|
||||
}
|
||||
|
||||
static int _takeover_from_raid0_to_raid6(TAKEOVER_FN_ARGS)
|
||||
@@ -3111,7 +3141,7 @@ static int _takeover_from_raid0_meta_to_raid10(TAKEOVER_FN_ARGS)
|
||||
|
||||
static int _takeover_from_raid0_meta_to_raid45(TAKEOVER_FN_ARGS)
|
||||
{
|
||||
return _striped_or_raid0_to_raid45610_wrapper(lv, new_segtype, yes, force, first_seg(lv)->area_count + 1, 1 /* data_copies */, 0, 0, new_region_size, allocate_pvs);
|
||||
return _striped_or_raid0_to_raid45610_wrapper(lv, new_segtype, yes, force, first_seg(lv)->area_count + 1, 2 /* data_copies */, 0, 0, new_region_size, allocate_pvs);
|
||||
}
|
||||
|
||||
static int _takeover_from_raid0_meta_to_raid6(TAKEOVER_FN_ARGS)
|
||||
@@ -3384,12 +3414,6 @@ static int _set_convenient_raid456_segtype_to(const struct lv_segment *seg_from,
|
||||
!segtype_is_raid5_n(*segtype)) {
|
||||
log_error("Conversion to raid5_n not yet supported.");
|
||||
return 0;
|
||||
|
||||
/* If this is any raid6 conversion request -> enforce raid6_n_6, because we convert from striped */
|
||||
} else if (segtype_is_any_raid6(*segtype) &&
|
||||
!segtype_is_raid6_n_6(*segtype)) {
|
||||
log_error("Conversion to raid6_n_6 not yet supported.");
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Got to do check for raid5 -> raid6 ... */
|
||||
|
@@ -145,7 +145,7 @@ struct dev_manager;
|
||||
#define segtype_is_unknown(segtype) ((segtype)->flags & SEG_UNKNOWN ? 1 : 0)
|
||||
|
||||
#define segtype_supports_stripe_size(segtype) \
|
||||
((segtype_is_striped(segtype) || segtype_is_mirror(segtype) || \
|
||||
((segtype_is_striped(segtype) || \
|
||||
segtype_is_cache(segtype) || segtype_is_cache_pool(segtype) || \
|
||||
segtype_is_thin(segtype) || segtype_is_snapshot(segtype) || \
|
||||
(segtype_is_raid(segtype) && !segtype_is_raid1(segtype))) ? 1 : 0)
|
||||
|
@@ -19,7 +19,10 @@ aux have_raid 1 3 2 || skip
|
||||
aux prepare_vg 8
|
||||
|
||||
# Delay 1st leg so that rebuilding status characters can be read
|
||||
aux delay_dev "$dev1" 0 3
|
||||
for d in "$dev1" "$dev2" "$dev3" "$dev4" "$dev5" "$dev6" "$dev7" "$dev8"
|
||||
do
|
||||
aux delay_dev $d 0 3
|
||||
done
|
||||
|
||||
# rhbz 1064592
|
||||
|
||||
|
@@ -514,6 +514,8 @@ static int _read_raid_params(struct cmd_context *cmd,
|
||||
lp->segtype->name);
|
||||
return 0;
|
||||
}
|
||||
} else if (!lp->stripe_size) {
|
||||
; // lp->stripe_size = find_config_tree_int(cmd, metadata_stripesize_CFG, NULL) * 2;
|
||||
}
|
||||
|
||||
if (arg_is_set(cmd, mirrors_ARG) && segtype_is_raid(lp->segtype) &&
|
||||
@@ -545,7 +547,6 @@ static int _read_raid_params(struct cmd_context *cmd,
|
||||
static int _read_mirror_and_raid_params(struct cmd_context *cmd,
|
||||
struct lvcreate_params *lp)
|
||||
{
|
||||
int pagesize = lvm_getpagesize();
|
||||
unsigned max_images;
|
||||
|
||||
if (seg_is_raid(lp)) {
|
||||
@@ -622,12 +623,20 @@ static int _read_mirror_and_raid_params(struct cmd_context *cmd,
|
||||
return 0;
|
||||
}
|
||||
|
||||
#if 1
|
||||
if (lp->region_size && !is_power_of_2(lp->region_size)) {
|
||||
log_error("Region size (%" PRIu32 ") must be power of 2",
|
||||
lp->region_size);
|
||||
return 0;
|
||||
}
|
||||
#else
|
||||
if (lp->region_size % (pagesize >> SECTOR_SHIFT)) {
|
||||
log_error("Region size (%" PRIu32 ") must be a multiple of "
|
||||
"machine memory page size (%d)",
|
||||
lp->region_size, pagesize >> SECTOR_SHIFT);
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
if (seg_is_mirror(lp) && !_read_mirror_params(cmd, lp))
|
||||
return_0;
|
||||
@@ -1274,17 +1283,10 @@ static int _check_raid_parameters(struct volume_group *vg,
|
||||
struct lvcreate_cmdline_params *lcp)
|
||||
{
|
||||
unsigned devs = lcp->pv_count ? : dm_list_size(&vg->pvs);
|
||||
uint64_t page_sectors = lvm_getpagesize() >> SECTOR_SHIFT;
|
||||
struct cmd_context *cmd = vg->cmd;
|
||||
int old_stripes = !arg_is_set(cmd, stripes_ARG) &&
|
||||
find_config_tree_bool(cmd, allocation_raid_stripe_all_devices_CFG, NULL);
|
||||
|
||||
if (vg->extent_size < page_sectors) {
|
||||
log_error("Unable to create RAID LV: requires minimum VG extent size %s",
|
||||
display_size(vg->cmd, page_sectors));
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* If we requested the previous behaviour by setting
|
||||
* "allocation/raid_stripe_all_devices = 1" and the
|
||||
|
@@ -1278,6 +1278,20 @@ static int _validate_stripe_params(struct cmd_context *cmd, const struct segment
|
||||
{
|
||||
int stripe_size_required = segtype_supports_stripe_size(segtype);
|
||||
|
||||
#if 1
|
||||
if (stripe_size_required) {
|
||||
if (*stripes == 1 && segtype_is_mirror(segtype)) {
|
||||
stripe_size_required = 0;
|
||||
if (*stripe_size) {
|
||||
log_print_unless_silent("Ignoring stripesize argument with single stripe.");
|
||||
*stripe_size = 0;
|
||||
}
|
||||
}
|
||||
} else if (*stripe_size) {
|
||||
log_print_unless_silent("Ignoring stripesize argument for %s devices.", segtype->name);
|
||||
*stripe_size = 0;
|
||||
}
|
||||
#else
|
||||
if (!stripe_size_required && *stripe_size) {
|
||||
log_print_unless_silent("Ignoring stripesize argument for %s devices.", segtype->name);
|
||||
*stripe_size = 0;
|
||||
@@ -1288,6 +1302,7 @@ static int _validate_stripe_params(struct cmd_context *cmd, const struct segment
|
||||
*stripe_size = 0;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
if (stripe_size_required) {
|
||||
if (!*stripe_size) {
|
||||
|
Reference in New Issue
Block a user