1
0
mirror of git://sourceware.org/git/lvm2.git synced 2025-10-08 19:33:19 +03:00

Compare commits

..

2 Commits

Author SHA1 Message Date
Heinz Mauelshagen
c6d7cd1489 metadata: adjust extents_round_to_boundary(); remove pagesize check inv lvcreate.c 2016-08-18 00:45:12 +02:00
Heinz Mauelshagen
eec8bd228c metadata: support tiny extent sizes better
RAID requires minimum allocation units for its SubLVs, because
the kernel does page io on them. Enforce 64 KiB units to be
safe in case RaidLVs are being accessed on PPC with that page size.
2016-08-18 00:16:13 +02:00
23 changed files with 261 additions and 702 deletions

View File

@@ -1,11 +1,5 @@
Version 2.02.165 -
===================================
Avoid PV tags when checking allocation against parallel PVs.
Disallow mirror conversions of raid10 volumes.
Fix dmeventd unmonitoring when segment type (and dso) changes.
Don't allow lvconvert --repair on raid0 devices or attempt to monitor them.
No longer adjust incorrect number of raid stripes supplied to lvcreate.
Move lcm and gcd to lib/misc.
Suppress some unnecessary --stripesize parameter warnings.
Fix 'pvmove -n name ...' to prohibit collocation of RAID SubLVs

View File

@@ -50,7 +50,6 @@
@top_srcdir@/lib/misc/lvm-file.h
@top_srcdir@/lib/misc/lvm-flock.h
@top_srcdir@/lib/misc/lvm-globals.h
@top_srcdir@/lib/misc/lvm-maths.h
@top_srcdir@/lib/misc/lvm-percent.h
@top_srcdir@/lib/misc/lvm-signal.h
@top_srcdir@/lib/misc/lvm-string.h

View File

@@ -112,7 +112,6 @@ SOURCES =\
misc/lvm-file.c \
misc/lvm-flock.c \
misc/lvm-globals.c \
misc/lvm-maths.c \
misc/lvm-signal.c \
misc/lvm-string.c \
misc/lvm-wrappers.c \

View File

@@ -1548,7 +1548,7 @@ static struct dm_event_handler *_create_dm_event_handler(struct cmd_context *cmd
if (dm_event_handler_set_dmeventd_path(dmevh, find_config_tree_str(cmd, dmeventd_executable_CFG, NULL)))
goto_bad;
if (dso && dm_event_handler_set_dso(dmevh, dso))
if (dm_event_handler_set_dso(dmevh, dso))
goto_bad;
if (dm_event_handler_set_uuid(dmevh, dmuuid))
@@ -1592,39 +1592,6 @@ static char *_build_target_uuid(struct cmd_context *cmd, const struct logical_vo
return build_dm_uuid(cmd->mem, lv, layer);
}
static int _device_registered_with_dmeventd(struct cmd_context *cmd, const struct logical_volume *lv, int *pending, const char **dso)
{
char *uuid;
enum dm_event_mask evmask = 0;
struct dm_event_handler *dmevh;
*pending = 0;
if (!(uuid = _build_target_uuid(cmd, lv)))
return_0;
if (!(dmevh = _create_dm_event_handler(cmd, uuid, NULL, 0, DM_EVENT_ALL_ERRORS)))
return_0;
if (dm_event_get_registered_device(dmevh, 0)) {
dm_event_handler_destroy(dmevh);
return 0;
}
evmask = dm_event_handler_get_event_mask(dmevh);
if (evmask & DM_EVENT_REGISTRATION_PENDING) {
*pending = 1;
evmask &= ~DM_EVENT_REGISTRATION_PENDING;
}
if (dso && (*dso = dm_event_handler_get_dso(dmevh)) && !(*dso = dm_pool_strdup(cmd->mem, *dso)))
log_error("Failed to duplicate dso name.");
dm_event_handler_destroy(dmevh);
return evmask;
}
int target_registered_with_dmeventd(struct cmd_context *cmd, const char *dso,
const struct logical_volume *lv, int *pending)
{
@@ -1707,8 +1674,6 @@ int monitor_dev_for_events(struct cmd_context *cmd, const struct logical_volume
uint32_t s;
static const struct lv_activate_opts zlaopts = { 0 };
struct lvinfo info;
const char *dso;
int new_unmonitor;
if (!laopts)
laopts = &zlaopts;
@@ -1816,58 +1781,42 @@ int monitor_dev_for_events(struct cmd_context *cmd, const struct logical_volume
!seg->segtype->ops->target_monitored) /* doesn't support registration */
continue;
if (!monitor)
/* When unmonitoring, obtain existing dso being used. */
monitored = _device_registered_with_dmeventd(cmd, seg->lv, &pending, &dso);
else
monitored = seg->segtype->ops->target_monitored(seg, &pending);
monitored = seg->segtype->ops->target_monitored(seg, &pending);
/* FIXME: We should really try again if pending */
monitored = (pending) ? 0 : monitored;
monitor_fn = NULL;
new_unmonitor = 0;
if (monitor) {
if (monitored)
log_verbose("%s already monitored.", display_lvname(lv));
else if (seg->segtype->ops->target_monitor_events) {
log_verbose("Monitoring %s%s", display_lvname(lv), test_mode() ? " [Test mode: skipping this]" : "");
else if (seg->segtype->ops->target_monitor_events)
monitor_fn = seg->segtype->ops->target_monitor_events;
}
} else {
if (!monitored)
log_verbose("%s already not monitored.", display_lvname(lv));
else if (*dso) {
/*
* Divert unmonitor away from code that depends on the new segment
* type instead of the existing one if it's changing.
*/
log_verbose("Not monitoring %s with %s%s", display_lvname(lv), dso, test_mode() ? " [Test mode: skipping this]" : "");
new_unmonitor = 1;
}
else if (seg->segtype->ops->target_unmonitor_events)
monitor_fn = seg->segtype->ops->target_unmonitor_events;
}
/* Do [un]monitor */
if (!monitor_fn)
continue;
log_verbose("%sonitoring %s%s", monitor ? "M" : "Not m", display_lvname(lv),
test_mode() ? " [Test mode: skipping this]" : "");
/* FIXME Test mode should really continue a bit further. */
if (test_mode())
continue;
if (new_unmonitor) {
if (!target_register_events(cmd, dso, lv, 0, 0, 10)) {
log_error("%s: segment unmonitoring failed.",
display_lvname(lv));
return 0;
}
} else if (monitor_fn) {
/* FIXME specify events */
if (!monitor_fn(seg, 0)) {
log_error("%s: %s segment monitoring function failed.",
display_lvname(lv), seg->segtype->name);
return 0;
}
} else
continue;
/* FIXME specify events */
if (!monitor_fn(seg, 0)) {
log_error("%s: %s segment monitoring function failed.",
display_lvname(lv), seg->segtype->name);
return 0;
}
/* Check [un]monitor results */
/* Try a couple times if pending, but not forever... */

View File

@@ -391,7 +391,7 @@ static int _read_segment(struct logical_volume *lv, const struct dm_config_node
if (!(seg = alloc_lv_segment(segtype, lv, start_extent,
extent_count, 0, 0, NULL, area_count,
segtype->parity_devs ? (extent_count / (area_count - segtype->parity_devs)) : extent_count, 0, 0, 0, NULL))) {
extent_count, 0, 0, 0, NULL))) {
log_error("Segment allocation failed");
return 0;
}

View File

@@ -875,50 +875,37 @@ dm_percent_t copy_percent(const struct logical_volume *lv)
return denominator ? dm_make_percent(numerator, denominator) : DM_PERCENT_100;
}
/* Round any tiny extents to multiples of 4K */
#define MINIMUM_ALLOCATION_SECTORS 8
static uint32_t _round_extents(uint32_t extents, uint32_t extent_size, int extend)
{
uint64_t size = (uint64_t) extents * extent_size;
uint64_t rest = size % MINIMUM_ALLOCATION_SECTORS;
if (!rest)
return extents;
if (!size)
return 0;
rest = MINIMUM_ALLOCATION_SECTORS - rest;
return (size + (extend ? rest : -(MINIMUM_ALLOCATION_SECTORS - rest))) / extent_size;
}
/* Round up extents to next stripe boundary for number of stripes and ensure minimum sizes */
static uint32_t _round_extents_to_boundary(struct volume_group *vg, uint32_t extents,
uint32_t stripes, uint32_t stripe_size, int extend)
/*
* Round up @extents to next stripe boundary number of
* @stripes (if any) and/or to next RAID io boundary.
*/
uint32_t extents_round_to_boundary(struct volume_group *vg,
const struct segment_type *segtype,
uint32_t extents,
uint32_t stripes,
int extend)
{
int ensure_raid_min = segtype_is_raid(segtype);
uint32_t size_rest, new_extents = extents;
if (stripes < 2)
return _round_extents(extents, vg->extent_size, extend);
do {
/* Round up extents to stripe divisible amount if given @stripes */
if (stripes > 1 && (size_rest = new_extents % stripes))
new_extents += extend ? stripes - size_rest : -size_rest;
redo:
/* Round up extents to stripe divisible amount */
if ((size_rest = new_extents % stripes))
new_extents += extend ? stripes - size_rest : -size_rest;
if (ensure_raid_min) {
/* Require multiples of 64 KiB to not fail in kernel RAID page size IO */
if ((new_extents * vg->extent_size) % ((stripes ?: 1) * RAID_ALLOC_CHUNK_SECTORS))
extend ? new_extents++ : new_extents--;
else
ensure_raid_min = 0;
}
} while (ensure_raid_min);
if (stripes > 1 && stripe_size > 1) {
uint32_t tmp = new_extents;
if ((new_extents = _round_extents(tmp / stripes, vg->extent_size, extend) * stripes) != tmp)
goto redo;
}
log_debug("Adjusted allocation request of %" PRIu32 " to %" PRIu32 " logical extents.", extents, new_extents);
if (new_extents != extents)
log_print_unless_silent("Rounding size %s (%d extents) up to boundary size %s (%d extents).",
if (extents != new_extents)
log_print_unless_silent("Rounding size %s (%d extents) up to %s i/o boundary size %s (%d extents).",
display_size(vg->cmd, (uint64_t) extents * vg->extent_size), extents,
segtype_is_raid(segtype) ? (stripes > 1 ? "stripe/RAID" : "RAID") : "stripe",
display_size(vg->cmd, (uint64_t) new_extents * vg->extent_size), new_extents);
return new_extents;
@@ -1229,8 +1216,6 @@ static uint32_t _calc_area_multiple(const struct segment_type *segtype,
}
/*
* FIXME:
*
* RAID10 - only has 2-way mirror right now.
* If we are to move beyond 2-way RAID10, then
* the 'stripes' argument will always need to
@@ -1258,14 +1243,14 @@ static int _lv_segment_reduce(struct lv_segment *seg, uint32_t reduction)
uint32_t area_reduction, s;
/* Caller must ensure exact divisibility */
if (seg_is_striped(seg) || (seg_is_raid(seg) && !seg_is_raid1(seg))) {
if (reduction % (seg->area_count - seg->segtype->parity_devs)) {
if (seg_is_striped(seg)) {
if (reduction % seg->area_count) {
log_error("Segment extent reduction %" PRIu32
" not divisible by #stripes %" PRIu32,
reduction, seg->area_count);
return 0;
}
area_reduction = reduction / (seg->area_count - seg->segtype->parity_devs);
area_reduction = (reduction / seg->area_count);
} else
area_reduction = reduction;
@@ -1284,7 +1269,7 @@ static int _lv_segment_reduce(struct lv_segment *seg, uint32_t reduction)
*/
static int _lv_reduce(struct logical_volume *lv, uint32_t extents, int delete)
{
struct lv_segment *seg;
struct lv_segment *seg = first_seg(lv);
uint32_t count = extents;
uint32_t reduction;
struct logical_volume *pool_lv;
@@ -1295,6 +1280,9 @@ static int _lv_reduce(struct logical_volume *lv, uint32_t extents, int delete)
clear_snapshot_merge(lv);
}
if (!delete && seg)
extents = extents_round_to_boundary(lv->vg, seg->segtype, extents, seg->area_count - seg->segtype->parity_devs, 0);
dm_list_iterate_back_items(seg, &lv->segments) {
if (!count)
break;
@@ -1614,8 +1602,7 @@ static int _sufficient_pes_free(struct alloc_handle *ah, struct dm_list *pvms,
{
uint32_t area_extents_needed = (extents_still_needed - allocated) * ah->area_count / ah->area_multiple;
uint32_t parity_extents_needed = (extents_still_needed - allocated) * ah->parity_count / ah->area_multiple;
uint32_t metadata_extents_needed = ah->alloc_and_split_meta ? 0 :
ah->metadata_area_count * lv_raid_metadata_area_len(ah->region_size, extent_size);
uint32_t metadata_extents_needed = ah->alloc_and_split_meta ? 0 : ah->metadata_area_count * lv_raid_metadata_area_len(extent_size);
uint32_t total_extents_needed = area_extents_needed + parity_extents_needed + metadata_extents_needed;
uint32_t free_pes = pv_maps_size(pvms);
@@ -1704,6 +1691,49 @@ static void _init_alloc_parms(struct alloc_handle *ah,
alloc_parms->flags |= A_CAN_SPLIT;
}
static int _log_parallel_areas(struct dm_pool *mem, struct dm_list *parallel_areas)
{
struct seg_pvs *spvs;
struct pv_list *pvl;
char *pvnames;
if (!parallel_areas)
return 1;
dm_list_iterate_items(spvs, parallel_areas) {
if (!dm_pool_begin_object(mem, 256)) {
log_error("dm_pool_begin_object failed");
return 0;
}
dm_list_iterate_items(pvl, &spvs->pvs) {
if (!dm_pool_grow_object(mem, pv_dev_name(pvl->pv), strlen(pv_dev_name(pvl->pv)))) {
log_error("dm_pool_grow_object failed");
dm_pool_abandon_object(mem);
return 0;
}
if (!dm_pool_grow_object(mem, " ", 1)) {
log_error("dm_pool_grow_object failed");
dm_pool_abandon_object(mem);
return 0;
}
}
if (!dm_pool_grow_object(mem, "\0", 1)) {
log_error("dm_pool_grow_object failed");
dm_pool_abandon_object(mem);
return 0;
}
pvnames = dm_pool_end_object(mem);
log_debug_alloc("Parallel PVs at LE %" PRIu32 " length %" PRIu32 ": %s",
spvs->le, spvs->len, pvnames);
dm_pool_free(mem, pvnames);
}
return 1;
}
/* Handles also stacking */
static int _setup_lv_size(struct logical_volume *lv, uint32_t extents)
{
@@ -2017,20 +2047,25 @@ static int _is_same_pv(struct pv_match *pvmatch __attribute((unused)), struct pv
/*
* Does PV area have a tag listed in allocation/cling_tag_list that
* matches EITHER a tag of the PV of the existing segment OR a tag in pv_tags?
* If mem is set, then instead we append a list of matching tags for printing to the object there.
* If tags_list_str is set, then instead we generate a list of matching tags for printing.
*/
static int _match_pv_tags(const struct dm_config_node *cling_tag_list_cn,
struct physical_volume *pv1, uint32_t pv1_start_pe, uint32_t area_num,
struct physical_volume *pv2, struct dm_list *pv_tags, unsigned validate_only,
struct dm_pool *mem, unsigned parallel_pv)
struct dm_pool *mem, const char **tags_list_str)
{
const struct dm_config_value *cv;
const char *str;
const char *tag_matched;
struct dm_list *tags_to_match = mem ? NULL : pv_tags ? : &pv2->tags;
struct dm_list *tags_to_match = tags_list_str ? NULL : pv_tags ? : &pv2->tags;
struct dm_str_list *sl;
unsigned first_tag = 1;
if (tags_list_str && !dm_pool_begin_object(mem, 256)) {
log_error("PV tags string allocation failed");
return 0;
}
for (cv = cling_tag_list_cn->v; cv; cv = cv->next) {
if (cv->type != DM_CFG_STRING) {
if (validate_only)
@@ -2067,14 +2102,16 @@ static int _match_pv_tags(const struct dm_config_node *cling_tag_list_cn,
/* Wildcard matches any tag against any tag. */
if (!strcmp(str, "*")) {
if (mem) {
if (tags_list_str) {
dm_list_iterate_items(sl, &pv1->tags) {
if (!first_tag && !dm_pool_grow_object(mem, ",", 0)) {
dm_pool_abandon_object(mem);
log_error("PV tags string extension failed.");
return 0;
}
first_tag = 0;
if (!dm_pool_grow_object(mem, sl->str, 0)) {
dm_pool_abandon_object(mem);
log_error("PV tags string extension failed.");
return 0;
}
@@ -2084,14 +2121,10 @@ static int _match_pv_tags(const struct dm_config_node *cling_tag_list_cn,
if (!str_list_match_list(&pv1->tags, tags_to_match, &tag_matched))
continue;
else {
if (!pv_tags) {
if (parallel_pv)
log_debug_alloc("Not using free space on %s: Matched allocation PV tag %s on existing parallel PV %s.",
pv_dev_name(pv1), tag_matched, pv2 ? pv_dev_name(pv2) : "-");
else
log_debug_alloc("Matched allocation PV tag %s on existing %s with free space on %s.",
tag_matched, pv_dev_name(pv1), pv2 ? pv_dev_name(pv2) : "-");
} else
if (!pv_tags)
log_debug_alloc("Matched allocation PV tag %s on existing %s with free space on %s.",
tag_matched, pv_dev_name(pv1), pv2 ? pv_dev_name(pv2) : "-");
else
log_debug_alloc("Eliminating allocation area %" PRIu32 " at PV %s start PE %" PRIu32
" from consideration: PV tag %s already used.",
area_num, pv_dev_name(pv1), pv1_start_pe, tag_matched);
@@ -2103,26 +2136,24 @@ static int _match_pv_tags(const struct dm_config_node *cling_tag_list_cn,
(tags_to_match && !str_list_match_item(tags_to_match, str)))
continue;
else {
if (mem) {
if (tags_list_str) {
if (!first_tag && !dm_pool_grow_object(mem, ",", 0)) {
dm_pool_abandon_object(mem);
log_error("PV tags string extension failed.");
return 0;
}
first_tag = 0;
if (!dm_pool_grow_object(mem, str, 0)) {
dm_pool_abandon_object(mem);
log_error("PV tags string extension failed.");
return 0;
}
continue;
}
if (!pv_tags) {
if (parallel_pv)
log_debug_alloc("Not using free space on %s: Matched allocation PV tag %s on existing parallel PV %s.",
pv2 ? pv_dev_name(pv2) : "-", str, pv_dev_name(pv1));
else
log_debug_alloc("Matched allocation PV tag %s on existing %s with free space on %s.",
str, pv_dev_name(pv1), pv2 ? pv_dev_name(pv2) : "-");
} else
if (!pv_tags)
log_debug_alloc("Matched allocation PV tag %s on existing %s with free space on %s.",
str, pv_dev_name(pv1), pv2 ? pv_dev_name(pv2) : "-");
else
log_debug_alloc("Eliminating allocation area %" PRIu32 " at PV %s start PE %" PRIu32
" from consideration: PV tag %s already used.",
area_num, pv_dev_name(pv1), pv1_start_pe, str);
@@ -2130,25 +2161,32 @@ static int _match_pv_tags(const struct dm_config_node *cling_tag_list_cn,
}
}
if (mem)
if (tags_list_str) {
if (!dm_pool_grow_object(mem, "\0", 1)) {
dm_pool_abandon_object(mem);
log_error("PV tags string extension failed.");
return 0;
}
*tags_list_str = dm_pool_end_object(mem);
return 1;
}
return 0;
}
static int _validate_tag_list(const struct dm_config_node *cling_tag_list_cn)
{
return _match_pv_tags(cling_tag_list_cn, NULL, 0, 0, NULL, NULL, 1, NULL, 0);
return _match_pv_tags(cling_tag_list_cn, NULL, 0, 0, NULL, NULL, 1, NULL, NULL);
}
static int _tags_list_str(struct dm_pool *mem, struct physical_volume *pv1, const struct dm_config_node *cling_tag_list_cn)
static const char *_tags_list_str(struct alloc_handle *ah, struct physical_volume *pv1)
{
if (!_match_pv_tags(cling_tag_list_cn, pv1, 0, 0, NULL, NULL, 0, mem, 0)) {
dm_pool_abandon_object(mem);
return_0;
}
const char *tags_list_str;
return 1;
if (!_match_pv_tags(ah->cling_tag_list_cn, pv1, 0, 0, NULL, NULL, 0, ah->mem, &tags_list_str))
return_NULL;
return tags_list_str;
}
/*
@@ -2159,7 +2197,7 @@ static int _pv_has_matching_tag(const struct dm_config_node *cling_tag_list_cn,
struct physical_volume *pv1, uint32_t pv1_start_pe, uint32_t area_num,
struct dm_list *pv_tags)
{
return _match_pv_tags(cling_tag_list_cn, pv1, pv1_start_pe, area_num, NULL, pv_tags, 0, NULL, 0);
return _match_pv_tags(cling_tag_list_cn, pv1, pv1_start_pe, area_num, NULL, pv_tags, 0, NULL, NULL);
}
/*
@@ -2167,82 +2205,14 @@ static int _pv_has_matching_tag(const struct dm_config_node *cling_tag_list_cn,
* matches a tag of the PV of the existing segment?
*/
static int _pvs_have_matching_tag(const struct dm_config_node *cling_tag_list_cn,
struct physical_volume *pv1, struct physical_volume *pv2,
unsigned parallel_pv)
struct physical_volume *pv1, struct physical_volume *pv2)
{
return _match_pv_tags(cling_tag_list_cn, pv1, 0, 0, pv2, NULL, 0, NULL, parallel_pv);
return _match_pv_tags(cling_tag_list_cn, pv1, 0, 0, pv2, NULL, 0, NULL, NULL);
}
static int _has_matching_pv_tag(struct pv_match *pvmatch, struct pv_segment *pvseg, struct pv_area *pva)
{
return _pvs_have_matching_tag(pvmatch->cling_tag_list_cn, pvseg->pv, pva->map->pv, 0);
}
static int _log_parallel_areas(struct dm_pool *mem, struct dm_list *parallel_areas,
const struct dm_config_node *cling_tag_list_cn)
{
struct seg_pvs *spvs;
struct pv_list *pvl;
char *pvnames;
unsigned first;
if (!parallel_areas)
return 1;
dm_list_iterate_items(spvs, parallel_areas) {
first = 1;
if (!dm_pool_begin_object(mem, 256)) {
log_error("dm_pool_begin_object failed");
return 0;
}
dm_list_iterate_items(pvl, &spvs->pvs) {
if (!first && !dm_pool_grow_object(mem, " ", 1)) {
log_error("dm_pool_grow_object failed");
dm_pool_abandon_object(mem);
return 0;
}
if (!dm_pool_grow_object(mem, pv_dev_name(pvl->pv), strlen(pv_dev_name(pvl->pv)))) {
log_error("dm_pool_grow_object failed");
dm_pool_abandon_object(mem);
return 0;
}
if (cling_tag_list_cn) {
if (!dm_pool_grow_object(mem, "(", 1)) {
log_error("dm_pool_grow_object failed");
dm_pool_abandon_object(mem);
return 0;
}
if (!_tags_list_str(mem, pvl->pv, cling_tag_list_cn)) {
dm_pool_abandon_object(mem);
return_0;
}
if (!dm_pool_grow_object(mem, ")", 1)) {
log_error("dm_pool_grow_object failed");
dm_pool_abandon_object(mem);
return 0;
}
}
first = 0;
}
if (!dm_pool_grow_object(mem, "\0", 1)) {
log_error("dm_pool_grow_object failed");
dm_pool_abandon_object(mem);
return 0;
}
pvnames = dm_pool_end_object(mem);
log_debug_alloc("Parallel PVs at LE %" PRIu32 " length %" PRIu32 ": %s",
spvs->le, spvs->len, pvnames);
dm_pool_free(mem, pvnames);
}
return 1;
return _pvs_have_matching_tag(pvmatch->cling_tag_list_cn, pvseg->pv, pva->map->pv);
}
/*
@@ -2265,17 +2235,8 @@ static void _reserve_area(struct alloc_handle *ah, struct alloc_state *alloc_sta
struct pv_area_used *area_used = &alloc_state->areas[ix_pva];
const char *pv_tag_list = NULL;
if (ah->cling_tag_list_cn) {
if (!dm_pool_begin_object(ah->mem, 256))
log_error("PV tags string allocation failed");
else if (!_tags_list_str(ah->mem, pva->map->pv, ah->cling_tag_list_cn))
dm_pool_abandon_object(ah->mem);
else if (!dm_pool_grow_object(ah->mem, "\0", 1)) {
dm_pool_abandon_object(ah->mem);
log_error("PV tags string extension failed.");
} else
pv_tag_list = dm_pool_end_object(ah->mem);
}
if (ah->cling_tag_list_cn)
pv_tag_list = _tags_list_str(ah, pva->map->pv);
log_debug_alloc("%s allocation area %" PRIu32 " %s %s start PE %" PRIu32
" length %" PRIu32 " leaving %" PRIu32 "%s%s.",
@@ -2434,7 +2395,7 @@ static int _check_cling_to_alloced(struct alloc_handle *ah, const struct dm_conf
continue; /* Area already assigned */
dm_list_iterate_items(aa, &ah->alloced_areas[s]) {
if ((!cling_tag_list_cn && (pva->map->pv == aa[0].pv)) ||
(cling_tag_list_cn && _pvs_have_matching_tag(cling_tag_list_cn, pva->map->pv, aa[0].pv, 0))) {
(cling_tag_list_cn && _pvs_have_matching_tag(cling_tag_list_cn, pva->map->pv, aa[0].pv))) {
if (positional)
_reserve_required_area(ah, alloc_state, pva, pva->count, s, 0);
return 1;
@@ -2445,20 +2406,13 @@ static int _check_cling_to_alloced(struct alloc_handle *ah, const struct dm_conf
return 0;
}
static int _pv_is_parallel(struct physical_volume *pv, struct dm_list *parallel_pvs, const struct dm_config_node *cling_tag_list_cn)
static int _pv_is_parallel(struct physical_volume *pv, struct dm_list *parallel_pvs)
{
struct pv_list *pvl;
dm_list_iterate_items(pvl, parallel_pvs) {
if (pv == pvl->pv) {
log_debug_alloc("Not using free space on existing parallel PV %s.",
pv_dev_name(pvl->pv));
dm_list_iterate_items(pvl, parallel_pvs)
if (pv == pvl->pv)
return 1;
}
if (cling_tag_list_cn && _pvs_have_matching_tag(cling_tag_list_cn, pvl->pv, pv, 1))
return 1;
}
return 0;
}
@@ -2746,7 +2700,7 @@ static int _find_some_parallel_space(struct alloc_handle *ah,
/* FIXME Split into log and non-log parallel_pvs and only check the log ones if log_iteration? */
/* (I've temporatily disabled the check.) */
/* Avoid PVs used by existing parallel areas */
if (!log_iteration_count && parallel_pvs && _pv_is_parallel(pvm->pv, parallel_pvs, ah->cling_tag_list_cn))
if (!log_iteration_count && parallel_pvs && _pv_is_parallel(pvm->pv, parallel_pvs))
goto next_pv;
/*
@@ -3064,7 +3018,7 @@ static int _allocate(struct alloc_handle *ah,
if (!(pvms = create_pv_maps(ah->mem, vg, allocatable_pvs)))
return_0;
if (!_log_parallel_areas(ah->mem, ah->parallel_areas, ah->cling_tag_list_cn))
if (!_log_parallel_areas(ah->mem, ah->parallel_areas))
stack;
alloc_state.areas_size = dm_list_size(pvms);
@@ -3298,7 +3252,7 @@ static struct alloc_handle *_alloc_init(struct cmd_context *cmd,
ah->metadata_area_count = area_count;
ah->alloc_and_split_meta = 1;
ah->log_len = lv_raid_metadata_area_len(ah->region_size, extent_size);
ah->log_len = existing_extents ? 0 : lv_raid_metadata_area_len(extent_size);
/*
* We need 'log_len' extents for each
@@ -3877,7 +3831,8 @@ static int _lv_extend_layered_lv(struct alloc_handle *ah,
if (seg_is_raid(seg)) {
stripes = 1;
stripe_size = 0;
area_multiple = _calc_area_multiple(seg->segtype, seg->area_count, seg_is_raid1(seg) ? 1 : seg->area_count - seg->segtype->parity_devs);
if (seg_is_any_raid0(seg))
area_multiple = seg->area_count;
}
for (fa = first_area, s = 0; s < seg->area_count; s++) {
@@ -4000,80 +3955,33 @@ static int _lv_extend_layered_lv(struct alloc_handle *ah,
return 1;
}
/* Adjust region and stripe size on very small LVs */
void lv_adjust_region_and_stripe_size(struct logical_volume *lv)
/* Return maximum number of extents for size of MetaLV of RaidLV @lv with bitmap */
#define RAID_HEADER_SIZE (2 * 4096) /* dm-raid superblock and bitmap superblock */
static uint32_t _max_raid_extents(struct logical_volume *lv)
{
uint32_t size;
uint64_t area_size;
struct lv_segment *seg = first_seg(lv);
if (!seg)
return;
area_size = (uint64_t) seg->area_len * lv->vg->extent_size;
if (seg->region_size > area_size) {
size = _round_down_pow2(seg_lv(seg, 0)->size);
log_warn("Region size %s too large for LV %s size %s, rounded down to %s",
display_size(lv->vg->cmd, seg->region_size),
display_lvname(lv),
display_size(lv->vg->cmd, lv->size),
display_size(lv->vg->cmd, size));
seg->region_size = size;
}
if (seg->stripe_size > area_size) {
size = _round_down_pow2(seg_lv(seg, 0)->size);
log_warn("Stripe size %s too large for LV %s size %s, rounded down to %s",
display_size(lv->vg->cmd, seg->stripe_size),
display_lvname(lv),
display_size(lv->vg->cmd, lv->size),
display_size(lv->vg->cmd, size));
seg->stripe_size = size;
}
}
/* Check MetaLV size is sufficient fro RaidLV @lv size */
#define RAID_SUPERBLOCKS_SIZE (2 * 4096) /* dm-raid superblock and bitmap superblock */
static int _raid_rmeta_size_sufficient(struct logical_volume *lv)
{
uint32_t area_multiple;
uint64_t max_rimage_size;
uint64_t max_image_size;
uint64_t mlv_bytes; /* dm-raid superblock and bitmap superblock */
struct lv_segment *seg = first_seg(lv);
struct logical_volume *mlv;
if (!seg ||
!seg_is_raid(seg) ||
!seg->region_size ||
!seg->meta_areas ||
!(mlv = seg_metalv(seg, 0)))
return 1;
!(mlv = seg_metalv(seg, 0)) ||
!seg->region_size)
return ~0U;
mlv_bytes = mlv->size << SECTOR_SHIFT;
if (mlv_bytes < RAID_SUPERBLOCKS_SIZE) {
mlv_bytes = (mlv->le_count * lv->vg->extent_size) << SECTOR_SHIFT;
if (mlv_bytes < RAID_HEADER_SIZE) {
log_error("Metadata LV %s too small to even hold the RAID headers",
display_lvname(mlv));
return 0;
}
/*
* Subtract space for 2 headers (superblock and bitmap)
* and calculate max image size in sectors
*/
max_rimage_size = (mlv_bytes - RAID_SUPERBLOCKS_SIZE) * 8 * seg->region_size;
/* Subtract space for 2 headers (superblock and bitmap) */
max_image_size = (mlv_bytes - RAID_HEADER_SIZE) * 8 * seg->region_size;
/* Calculate the maximum possible LV size */
/* FIXME: area_multiple needs to change once we support odd number of stripes in raid10 */
area_multiple = _calc_area_multiple(seg->segtype, seg->area_count, 0);
if (max_rimage_size * area_multiple < lv->size) {
log_error("Can't extend LV %s larger than %s because of MetaLV size",
display_lvname(lv),
display_size(lv->vg->cmd, max_rimage_size * area_multiple));
return 0;
}
return 1;
return max_image_size / lv->vg->extent_size * (seg_is_raid1(seg) ? 1 : seg->area_count - seg->segtype->parity_devs);
}
/*
@@ -4093,7 +4001,7 @@ int lv_extend(struct logical_volume *lv,
struct dm_list *allocatable_pvs, alloc_policy_t alloc,
int approx_alloc)
{
int r = 1, empty = 0;
int r = 1;
int log_count = 0;
struct alloc_handle *ah;
uint32_t sub_lv_count;
@@ -4106,8 +4014,6 @@ int lv_extend(struct logical_volume *lv,
return lv_add_virtual_segment(lv, 0u, extents, segtype);
if (!lv->le_count) {
empty = 1;
if (segtype_is_pool(segtype))
/*
* Pool allocations treat the metadata device like a mirror log.
@@ -4122,6 +4028,8 @@ int lv_extend(struct logical_volume *lv,
}
/* FIXME log_count should be 1 for mirrors */
extents = extents_round_to_boundary(lv->vg, segtype, extents, stripes, 1);
if (!(ah = allocate_extents(lv->vg, lv, segtype, stripes, mirrors,
log_count, region_size, extents,
allocatable_pvs, alloc, approx_alloc, NULL)))
@@ -4138,9 +4046,9 @@ int lv_extend(struct logical_volume *lv,
if (!(r = lv_add_segment(ah, 0, ah->area_count, lv, segtype,
stripe_size, 0u, 0)))
stack;
if (empty)
lv_adjust_region_and_stripe_size(lv);
} else {
uint32_t max_extents;
/*
* For RAID, all the devices are AREA_LV.
* However, for 'mirror on stripe' using non-RAID targets,
@@ -4165,15 +4073,12 @@ int lv_extend(struct logical_volume *lv,
stripes, stripe_size)))
goto_out;
if (empty)
lv_adjust_region_and_stripe_size(lv);
if (!(r = _raid_rmeta_size_sufficient(lv))) {
if (!old_extents &&
(!lv_remove(lv) || !vg_write(lv->vg) || !vg_commit(lv->vg)))
return_0;
goto_out;
if ((max_extents = _max_raid_extents(lv)) < lv->le_count) {
log_error("Can't extend LV %s larger than %s because of MetaLV size",
display_lvname(lv),
display_size(lv->vg->cmd, max_extents * lv->vg->extent_size));
r = 0;
goto out;
}
/*
@@ -4486,7 +4391,6 @@ static int _validate_stripesize(const struct volume_group *vg,
return 0;
}
/* Limit stripe size to extent size for non-RAID */
if (lp->stripe_size > vg->extent_size) {
log_print_unless_silent("Reducing stripe size %s to maximum, "
"physical extent size %s.",
@@ -4980,10 +4884,6 @@ static int _lvresize_adjust_extents(struct logical_volume *lv,
(lp->sign == SIGN_NONE && (lp->extents < existing_extents)))
reducing = 1;
lp->extents = _round_extents_to_boundary(lv->vg, lp->extents,
seg_is_mirrored(seg_last) ? 1 : seg_last->area_count - seg_last->segtype->parity_devs,
seg_last->stripe_size, !reducing);
/* If extending, find properties of last segment */
if (!reducing) {
seg_mirrors = seg_is_mirrored(seg_last) ? lv_mirror_count(lv) : 0;
@@ -7242,41 +7142,7 @@ static struct logical_volume *_lv_create_an_lv(struct volume_group *vg,
}
}
#if 1
/* FIXME: minimum 1K extent size??? */
if (lp->stripe_size > vg->extent_size &&
!seg_is_raid(lp)) {
log_print_unless_silent("Reducing requested stripe size %s to maximum, "
"physical extent size %s.",
display_size(cmd, (uint64_t) lp->stripe_size),
display_size(cmd, (uint64_t) vg->extent_size));
lp->stripe_size = vg->extent_size;
}
#else
if (lp->stripe_size > vg->extent_size) {
if (seg_is_raid(lp) && (vg->extent_size < STRIPE_SIZE_MIN)) {
/*
* FIXME: RAID will simply fail to load the table if
* this is the case, but we should probably
* honor the stripe minimum for regular stripe
* volumes as well. Avoiding doing that now
* only to minimize the change.
*/
log_error("The extent size in volume group %s is too "
"small to support striped RAID volumes.",
vg->name);
return NULL;
}
log_print_unless_silent("Reducing requested stripe size %s to maximum, "
"physical extent size %s.",
display_size(cmd, (uint64_t) lp->stripe_size),
display_size(cmd, (uint64_t) vg->extent_size));
lp->stripe_size = vg->extent_size;
}
#endif
lp->extents = _round_extents_to_boundary(vg, lp->extents, lp->stripes, lp->stripe_size, 1);
lp->extents = extents_round_to_boundary(vg, lp->segtype, lp->extents, lp->stripes, 1);
if (!lp->extents && !seg_is_thin_volume(lp)) {
log_error(INTERNAL_ERROR "Unable to create new logical volume with no extents.");
@@ -7428,7 +7294,6 @@ static struct logical_volume *_lv_create_an_lv(struct volume_group *vg,
status |= LV_NOTSYNCED;
}
if (!seg_is_raid(lp))
lp->region_size = adjusted_mirror_region_size(vg->extent_size,
lp->extents,
lp->region_size, 0,

View File

@@ -413,8 +413,9 @@ int check_lv_segments(struct logical_volume *lv, int complete_vg)
inc_error_count;
}
area_multiplier = (segtype_is_striped(seg->segtype) || (seg_is_raid(seg) && !seg_is_raid1(seg))) ?
(seg->area_count - seg->segtype->parity_devs) : 1;
area_multiplier = segtype_is_striped(seg->segtype) ?
seg->area_count : 1;
if (seg->area_len * area_multiplier != seg->len) {
log_error("LV %s: segment %u has inconsistent "
"area_len %u",

View File

@@ -30,13 +30,13 @@
#define MAX_STRIPES 128U
#define SECTOR_SHIFT 9L
#define SECTOR_SIZE ( 1L << SECTOR_SHIFT )
#define STRIPE_SIZE_MIN 8 /* 8 sectors minimum to allow for raid takover of striped */
#define STRIPE_SIZE_MIN ( (unsigned) lvm_getpagesize() >> SECTOR_SHIFT) /* PAGESIZE in sectors */
#define STRIPE_SIZE_MAX ( 512L * 1024L >> SECTOR_SHIFT) /* 512 KB in sectors */
#define STRIPE_SIZE_LIMIT ((UINT_MAX >> 2) + 1)
#define MAX_RESTRICTED_LVS 255 /* Used by FMT_RESTRICTED_LVIDS */
#define MIN_EXTENT_SIZE 8 /* 8 sectors minimum to allow for raid takover of striped */
#define MAX_EXTENT_SIZE ((uint32_t) -1)
#define MIN_NON_POWER2_EXTENT_SIZE (128U * 2U) /* 128KB in sectors */
#define RAID_ALLOC_CHUNK_SECTORS (64 * 2) /* Allocate RAID in these minimal chunks to ensure page io doesn't fail */
#define HISTORICAL_LV_PREFIX "-"
@@ -826,7 +826,6 @@ int lv_rename_update(struct cmd_context *cmd, struct logical_volume *lv,
/* Updates and reloads metadata for given lv */
int lv_update_and_reload(struct logical_volume *lv);
int lv_update_and_reload_origin(struct logical_volume *lv);
void lv_adjust_region_and_stripe_size(struct logical_volume *lv);
uint32_t extents_from_size(struct cmd_context *cmd, uint64_t size,
uint32_t extent_size);
@@ -834,6 +833,12 @@ uint32_t extents_from_percent_size(struct volume_group *vg, const struct dm_list
uint32_t extents, int roundup,
percent_type_t percent, uint64_t size);
/* Round @extents to stripe and/or RAID io boundary */
uint32_t extents_round_to_boundary(struct volume_group *vg,
const struct segment_type *segtype,
uint32_t extents, uint32_t stripes,
int extend);
struct logical_volume *find_pool_lv(const struct logical_volume *lv);
int pool_is_active(const struct logical_volume *pool_lv);
int pool_supports_external_origin(const struct lv_segment *pool_seg, const struct logical_volume *external_lv);
@@ -947,9 +952,6 @@ struct lvcreate_params {
uint32_t chunk_size; /* snapshot */
uint32_t region_size; /* mirror */
unsigned stripes_supplied; /* striped */
unsigned stripe_size_supplied; /* striped */
uint32_t mirrors; /* mirror */
uint32_t min_recovery_rate; /* RAID */
@@ -1214,7 +1216,7 @@ int lv_raid_replace(struct logical_volume *lv, struct dm_list *remove_pvs,
struct dm_list *allocate_pvs);
int lv_raid_remove_missing(struct logical_volume *lv);
int partial_raid_lv_supports_degraded_activation(const struct logical_volume *lv);
uint32_t lv_raid_metadata_area_len(uint32_t region_size, uint32_t extent_size);
uint32_t lv_raid_metadata_area_len(uint32_t extent_size);
/* -- metadata/raid_manip.c */
/* ++ metadata/cache_manip.c */

View File

@@ -1008,11 +1008,6 @@ int vgcreate_params_validate(struct cmd_context *cmd,
return 0;
}
if (vp->extent_size < MIN_EXTENT_SIZE) {
log_error("Physical extent size < 4 KiB restricts RAID use.");
return 0;
}
if (!(cmd->fmt->features & FMT_UNLIMITED_VOLS)) {
if (!vp->max_lv)
vp->max_lv = 255;

View File

@@ -430,6 +430,28 @@ int validate_pool_chunk_size(struct cmd_context *cmd,
return r;
}
/* Greatest common divisor */
static unsigned long _gcd(unsigned long n1, unsigned long n2)
{
unsigned long remainder;
do {
remainder = n1 % n2;
n1 = n2;
n2 = remainder;
} while (n2);
return n1;
}
/* Least common multiple */
static unsigned long _lcm(unsigned long n1, unsigned long n2)
{
if (!n1 || !n2)
return 0;
return (n1 * n2) / _gcd(n1, n2);
}
int recalculate_pool_chunk_size_with_dev_hints(struct logical_volume *pool_lv,
int passed_args,
int chunk_size_calc_policy)
@@ -475,7 +497,7 @@ int recalculate_pool_chunk_size_with_dev_hints(struct logical_volume *pool_lv,
continue;
if (previous_hint)
hint = lcm(previous_hint, hint);
hint = _lcm(previous_hint, hint);
previous_hint = hint;
break;
case AREA_LV:

View File

@@ -152,23 +152,11 @@ int lv_is_raid_with_tracking(const struct logical_volume *lv)
}
/* FIXME: remove lv_raid_metadata_area_len() after defining 'lv_raid_rmeta_extents'*/
uint32_t lv_raid_metadata_area_len(uint32_t region_size, uint32_t extent_size)
uint32_t lv_raid_metadata_area_len(uint32_t extent_size)
{
uint32_t r;
uint64_t max_lv_size;
if (!region_size)
region_size++;
/* Ensure senseful minimum metadata device size until we get dynamic rmeta resizing... */
max_lv_size = UINT32_MAX / 2;
max_lv_size *= extent_size;
max_lv_size = min(max_lv_size, (uint64_t) 2048 * 1024 * 1024 * 128);
r = (max_lv_size / region_size / (8*2048*extent_size) ?: 1);
if (r * extent_size < 2 * 12)
r = 2 * 12;
return r;
/* Ensure 4 MiB until we get dynamic rmeta resizing... */
// return max((4 * 2048 / extent_size), (uint32_t) 1);
return max((2*12 / extent_size), (uint32_t) 1);
}
uint32_t lv_raid_image_count(const struct logical_volume *lv)
@@ -648,9 +636,6 @@ static int _alloc_image_components(struct logical_volume *lv,
else
region_size = seg->region_size;
if (!region_size)
region_size = get_default_region_size(lv->vg->cmd);
if (seg_is_raid(seg))
segtype = seg->segtype;
else if (!(segtype = get_segtype_from_string(lv->vg->cmd, SEG_TYPE_NAME_RAID1)))
@@ -732,7 +717,6 @@ static int _alloc_rmeta_for_lv(struct logical_volume *data_lv,
struct logical_volume **meta_lv,
struct dm_list *allocate_pvs)
{
uint32_t region_size;
struct dm_list allocatable_pvs;
struct alloc_handle *ah;
struct lv_segment *seg = first_seg(data_lv);
@@ -759,10 +743,9 @@ static int _alloc_rmeta_for_lv(struct logical_volume *data_lv,
if (!(base_name = top_level_lv_name(data_lv->vg, data_lv->name)))
return_0;
region_size = seg->region_size ?: get_default_region_size(data_lv->vg->cmd);
if (!(ah = allocate_extents(data_lv->vg, NULL, seg->segtype, 0, 1, 0,
seg->region_size,
lv_raid_metadata_area_len(region_size, data_lv->vg->extent_size),
lv_raid_metadata_area_len(data_lv->vg->extent_size),
allocate_pvs, data_lv->alloc, 0, NULL)))
return_0;
@@ -870,9 +853,7 @@ static int _raid_add_images_without_commit(struct logical_volume *lv,
lv->status |= RAID;
seg = first_seg(lv);
seg_lv(seg, 0)->status |= RAID_IMAGE | LVM_READ | LVM_WRITE;
if (!seg->region_size)
seg->region_size = get_default_region_size(lv->vg->cmd);
seg->stripe_size = 0;
seg->region_size = min((uint64_t) get_default_region_size(lv->vg->cmd), lv->size);
/* MD's bitmap is limited to tracking 2^21 regions */
while (seg->region_size < (lv->size / (1 << 21))) {
@@ -981,6 +962,21 @@ static int _raid_add_images(struct logical_volume *lv,
struct lv_segment *seg = first_seg(lv);
uint32_t s;
if (seg_is_linear(seg) &&
lv->size % RAID_ALLOC_CHUNK_SECTORS) {
uint64_t size = lv->le_count * lv->vg->extent_size;
uint32_t extents;
size += RAID_ALLOC_CHUNK_SECTORS - size % RAID_ALLOC_CHUNK_SECTORS;
extents = extents_from_size(lv->vg->cmd, size, lv->vg->extent_size) - lv->le_count;
log_print_unless_silent("Resizing LV %s to RAID boundary %s before conversion to raid1",
display_lvname(lv), display_size(lv->vg->cmd, size));
if (!lv_extend(lv, seg->segtype, 1, 0, 1, 0, extents, pvs, lv->alloc, 0))
return 0;
if (!lv_update_and_reload_origin(lv))
return_0;
}
if (!_raid_add_images_without_commit(lv, new_count, pvs, use_existing_area_len))
return_0;
@@ -2547,7 +2543,7 @@ static struct possible_takeover_reshape_type _possible_takeover_reshape_types[]
.current_areas = 1,
.options = ALLOW_STRIPE_SIZE },
{ .current_types = SEG_STRIPED_TARGET, /* striped, i.e. seg->area_count > 1 */
.possible_types = SEG_RAID0|SEG_RAID0_META|SEG_RAID4,
.possible_types = SEG_RAID0|SEG_RAID0_META,
.current_areas = ~0U,
.options = ALLOW_NONE },
/* raid0* -> */
@@ -2913,7 +2909,7 @@ static int _raid456_to_raid0_or_striped_wrapper(TAKEOVER_FN_ARGS)
return_0;
/* FIXME Hard-coded raid4 to raid0 */
seg->area_len = seg->extents_copied = seg->len / (seg->area_count - seg->segtype->parity_devs);
seg->area_len = seg->extents_copied = seg->area_len / seg->area_count;
if (segtype_is_striped_target(new_segtype)) {
if (!_convert_raid0_to_striped(lv, 0, &removal_lvs))
@@ -2999,13 +2995,10 @@ static int _striped_or_raid0_to_raid45610_wrapper(TAKEOVER_FN_ARGS)
seg->segtype = new_segtype;
seg->region_size = new_region_size;
/* FIXME Hard-coded raid0 -> raid4 */
if (seg_is_any_raid0(seg))
seg->area_len = seg->extents_copied = seg->len / (seg->area_count - seg->segtype->parity_devs);
/* FIXME Hard-coded raid0 to raid4 */
seg->area_len = seg->len;
_check_and_adjust_region_size(lv);
lv_adjust_region_and_stripe_size(lv);
log_debug_metadata("Updating VG metadata and reloading %s LV %s",
lvseg_name(seg), display_lvname(lv));
@@ -3095,7 +3088,7 @@ static int _takeover_from_raid0_to_raid10(TAKEOVER_FN_ARGS)
static int _takeover_from_raid0_to_raid45(TAKEOVER_FN_ARGS)
{
return _striped_or_raid0_to_raid45610_wrapper(lv, new_segtype, yes, force, first_seg(lv)->area_count + 1, 2 /* data_copies */, 0, 0, new_region_size, allocate_pvs);
return _striped_or_raid0_to_raid45610_wrapper(lv, new_segtype, yes, force, first_seg(lv)->area_count + 1, 1 /* data_copies */, 0, 0, new_region_size, allocate_pvs);
}
static int _takeover_from_raid0_to_raid6(TAKEOVER_FN_ARGS)
@@ -3141,7 +3134,7 @@ static int _takeover_from_raid0_meta_to_raid10(TAKEOVER_FN_ARGS)
static int _takeover_from_raid0_meta_to_raid45(TAKEOVER_FN_ARGS)
{
return _striped_or_raid0_to_raid45610_wrapper(lv, new_segtype, yes, force, first_seg(lv)->area_count + 1, 2 /* data_copies */, 0, 0, new_region_size, allocate_pvs);
return _striped_or_raid0_to_raid45610_wrapper(lv, new_segtype, yes, force, first_seg(lv)->area_count + 1, 1 /* data_copies */, 0, 0, new_region_size, allocate_pvs);
}
static int _takeover_from_raid0_meta_to_raid6(TAKEOVER_FN_ARGS)
@@ -3414,6 +3407,12 @@ static int _set_convenient_raid456_segtype_to(const struct lv_segment *seg_from,
!segtype_is_raid5_n(*segtype)) {
log_error("Conversion to raid5_n not yet supported.");
return 0;
/* If this is any raid6 conversion request -> enforce raid6_n_6, because we convert from striped */
} else if (segtype_is_any_raid6(*segtype) &&
!segtype_is_raid6_n_6(*segtype)) {
log_error("Conversion to raid6_n_6 not yet supported.");
return 0;
}
/* Got to do check for raid5 -> raid6 ... */

View File

@@ -145,7 +145,7 @@ struct dev_manager;
#define segtype_is_unknown(segtype) ((segtype)->flags & SEG_UNKNOWN ? 1 : 0)
#define segtype_supports_stripe_size(segtype) \
((segtype_is_striped(segtype) || \
((segtype_is_striped(segtype) || segtype_is_mirror(segtype) || \
segtype_is_cache(segtype) || segtype_is_cache_pool(segtype) || \
segtype_is_thin(segtype) || segtype_is_snapshot(segtype) || \
(segtype_is_raid(segtype) && !segtype_is_raid1(segtype))) ? 1 : 0)

View File

@@ -89,7 +89,6 @@
# include "lvm-logging.h"
# include "lvm-globals.h"
# include "lvm-wrappers.h"
# include "lvm-maths.h"
#endif
#include <unistd.h>

View File

@@ -1,38 +0,0 @@
/*
* Copyright (C) 2016 Red Hat, Inc. All rights reserved.
*
* This file is part of LVM2.
*
* This copyrighted material is made available to anyone wishing to use,
* modify, copy, or redistribute it subject to the terms and conditions
* of the GNU Lesser General Public License v.2.1.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "lib.h"
/* Greatest common divisor */
unsigned long gcd(unsigned long n1, unsigned long n2)
{
unsigned long remainder;
do {
remainder = n1 % n2;
n1 = n2;
n2 = remainder;
} while (n2);
return n1;
}
/* Least common multiple */
unsigned long lcm(unsigned long n1, unsigned long n2)
{
if (!n1 || !n2)
return 0;
return (n1 * n2) / gcd(n1, n2);
}

View File

@@ -1,24 +0,0 @@
/*
* Copyright (C) 2016 Red Hat, Inc. All rights reserved.
*
* This file is part of LVM2.
*
* This copyrighted material is made available to anyone wishing to use,
* modify, copy, or redistribute it subject to the terms and conditions
* of the GNU Lesser General Public License v.2.1.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef _LVM_MATH_H
#define _LVM_MATH_H
/* Greatest common divisor */
unsigned long gcd(unsigned long n1, unsigned long n2);
/* Least common multiple */
unsigned long lcm(unsigned long n1, unsigned long n2);
#endif

View File

@@ -499,12 +499,7 @@ static struct segment_type *_init_raid_segtype(struct cmd_context *cmd,
segtype->ops = &_raid_ops;
segtype->name = rt->name;
segtype->flags = SEG_RAID | SEG_ONLY_EXCLUSIVE | rt->extra_flags;
/* Never monitor raid0 or raid0_meta LVs */
if (!segtype_is_any_raid0(segtype))
segtype->flags |= monitored;
segtype->flags = SEG_RAID | SEG_ONLY_EXCLUSIVE | rt->extra_flags | monitored;
segtype->parity_devs = rt->parity;
log_very_verbose("Initialised segtype: %s", segtype->name);

View File

@@ -491,7 +491,7 @@ static int handle_connect(daemon_state s)
client.socket_fd = accept(s.socket_fd, (struct sockaddr *) &sockaddr, &sl);
if (client.socket_fd < 0) {
ERROR(&s, "Failed to accept connection errno %d.", errno);
ERROR(&s, "Failed to accept connection.");
return 0;
}
@@ -513,7 +513,7 @@ static int handle_connect(daemon_state s)
ts->client = client;
if (pthread_create(&ts->client.thread_id, NULL, _client_thread, ts)) {
ERROR(&s, "Failed to create client thread errno %d.", errno);
ERROR(&s, "Failed to create client thread.");
return 0;
}

View File

@@ -216,7 +216,6 @@ int dm_task_set_major_minor(struct dm_task *dmt, int major, int minor, int allow
int dm_task_set_uid(struct dm_task *dmt, uid_t uid);
int dm_task_set_gid(struct dm_task *dmt, gid_t gid);
int dm_task_set_mode(struct dm_task *dmt, mode_t mode);
/* See also description for DM_UDEV_DISABLE_LIBRARY_FALLBACK flag! */
int dm_task_set_cookie(struct dm_task *dmt, uint32_t *cookie, uint16_t flags);
int dm_task_set_event_nr(struct dm_task *dmt, uint32_t event_nr);
int dm_task_set_geometry(struct dm_task *dmt, const char *cylinders, const char *heads, const char *sectors, const char *start);
@@ -3497,18 +3496,7 @@ struct dm_pool *dm_config_memory(struct dm_config_tree *cft);
* DM_UDEV_DISABLE_LIBRARY_FALLBACK is set in case we need to disable
* libdevmapper's node management. We will rely on udev completely
* and there will be no fallback action provided by libdevmapper if
* udev does something improperly. Using the library fallback code has
* a consequence that you need to take into account: any device node
* or symlink created without udev is not recorded in udev database
* which other applications may read to get complete list of devices.
* For this reason, use of DM_UDEV_DISABLE_LIBRARY_FALLBACK is
* recommended on systems where udev is used. Keep library fallback
* enabled just for exceptional cases where you need to debug udev-related
* problems. If you hit such problems, please contact us through upstream
* LVM2 development mailing list (see also README file). This flag is
* currently not set by default in libdevmapper so you need to set it
* explicitly if you're sure that udev is behaving correctly on your
* setups.
* udev does something improperly.
*/
#define DM_UDEV_DISABLE_LIBRARY_FALLBACK 0x0020
/*

View File

@@ -1,128 +0,0 @@
#!/bin/bash
# Copyright (C) 2016 Red Hat, Inc. All rights reserved.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions
# of the GNU General Public License v.2.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
SKIP_WITH_LVMLOCKD=1
SKIP_WITH_LVMPOLLD=1
. lib/inittest
aux have_raid 1 3 2 || skip
aux prepare_vg 8
# Delay 1st leg so that rebuilding status characters can be read
for d in "$dev1" "$dev2" "$dev3" "$dev4" "$dev5" "$dev6" "$dev7" "$dev8"
do
aux delay_dev $d 0 3
done
# rhbz 1064592
##############################################
# Create an 8-way striped raid10 with 4 mirror
# groups and rebuild selected PVs.
lvcreate --type raid10 -m 1 -i 4 -l 2 -n $lv1 $vg
aux wait_for_sync $vg $lv1
# Rebuild 1st and 2nd device would rebuild a
# whole mirror group and needs to be rejected.
not lvchange --yes --rebuild "$dev1" --rebuild "$dev2" $vg/$lv1
not aux check_status_chars $vg $lv1 "aAaAAAAA"
aux wait_for_sync $vg $lv1
# Rebuild 1st and 3nd device from different mirror groups is fine.
lvchange --yes --rebuild "$dev1" --rebuild "$dev3" $vg/$lv1
aux check_status_chars $vg $lv1 "aAaAAAAA"
aux wait_for_sync $vg $lv1
# Rebuild devices 1, 3, 6 from different mirror groups is fine.
lvchange --yes --rebuild "$dev1" --rebuild "$dev3" --rebuild "$dev6" $vg/$lv1
aux check_status_chars $vg $lv1 "aAaAAaAA"
aux wait_for_sync $vg $lv1
# Rebuild devices 1, 3, 5 and 6 with 5+6 being
# being a whole mirror group needs to be rejected.
not lvchange --yes --rebuild "$dev1" --rebuild "$dev3" --rebuild "$dev6" --rebuild "$dev5" $vg/$lv1
not aux check_status_chars $vg $lv1 "aAaAaaAA"
aux wait_for_sync $vg $lv1
# Rebuild devices 1, 3, 5 and 7 from different mirror groups is fine.
lvchange --yes --rebuild "$dev1" --rebuild "$dev3" --rebuild "$dev5" --rebuild "$dev7" $vg/$lv1
aux check_status_chars $vg $lv1 "aAaAaAaA"
aux wait_for_sync $vg $lv1
# Rebuild devices 2, 4, 6 and 8 from different mirror groups is fine.
lvchange --yes --rebuild "$dev2" --rebuild "$dev4" --rebuild "$dev6" --rebuild "$dev8" $vg/$lv1
aux check_status_chars $vg $lv1 "AaAaAaAa"
aux wait_for_sync $vg $lv1
##############################################
# Create an 8-legged raid1 and rebuild selected PVs
lvremove --yes $vg/$lv1
lvcreate --yes --type raid1 -m 7 -l 2 -n $lv1 $vg
aux wait_for_sync $vg $lv1
# Rebuilding all raid1 legs needs to be rejected.
not lvchange --yes --rebuild "$dev1" --rebuild "$dev2" --rebuild "$dev3" --rebuild "$dev4" \
--rebuild "$dev5" --rebuild "$dev6" --rebuild "$dev7" --rebuild "$dev8" $vg/$lv1
not aux check_status_chars $vg $lv1 "aaaaaaaa"
aux wait_for_sync $vg $lv1
# Rebuilding all but the raid1 master leg is fine.
lvchange --yes --rebuild "$dev2" --rebuild "$dev3" --rebuild "$dev4" \
--rebuild "$dev5" --rebuild "$dev6" --rebuild "$dev7" --rebuild "$dev8" $vg/$lv1
aux check_status_chars $vg $lv1 "Aaaaaaaa"
aux wait_for_sync $vg $lv1
# Rebuilding the raid1 master leg is fine.
lvchange --yes --rebuild "$dev1" $vg/$lv1
aux check_status_chars $vg $lv1 "aAAAAAAA"
aux wait_for_sync $vg $lv1
# Rebuild legs on devices 2, 4, 6 and 8 is fine.
lvchange --yes --rebuild "$dev2" --rebuild "$dev4" --rebuild "$dev6" --rebuild "$dev8" $vg/$lv1
aux check_status_chars $vg $lv1 "AaAaAaAa"
aux wait_for_sync $vg $lv1
##############################################
# Create an 6-legged raid6 and rebuild selected PVs
lvremove --yes $vg/$lv1
lvcreate --yes --type raid6 -i 4 -l 2 -n $lv1 $vg
aux wait_for_sync $vg $lv1
# Rebuilding all raid6 stripes needs to be rejected.
not lvchange --yes --rebuild "$dev1" --rebuild "$dev2" --rebuild "$dev3" \
--rebuild "$dev4" --rebuild "$dev5" --rebuild "$dev6" $vg/$lv1
not aux check_status_chars $vg $lv1 "aaaaaa"
aux wait_for_sync $vg $lv1
# Rebuilding more than 2 raid6 stripes needs to be rejected.
not lvchange --yes --rebuild "$dev2" --rebuild "$dev4" --rebuild "$dev6" $vg/$lv1
not aux check_status_chars $vg $lv1 "AaAaAa"
aux wait_for_sync $vg $lv1
# Rebuilding any 1 raid6 stripe is fine.
lvchange --yes --rebuild "$dev2" $vg/$lv1
aux check_status_chars $vg $lv1 "AaAAAA"
aux wait_for_sync $vg $lv1
lvchange --yes --rebuild "$dev5" $vg/$lv1
aux check_status_chars $vg $lv1 "AAAAaA"
aux wait_for_sync $vg $lv1
# Rebuilding any 2 raid6 stripes is fine.
lvchange --yes --rebuild "$dev2" --rebuild "$dev4" $vg/$lv1
aux check_status_chars $vg $lv1 "AaAaAA"
aux wait_for_sync $vg $lv1
lvchange --yes --rebuild "$dev1" --rebuild "$dev5" $vg/$lv1
aux check_status_chars $vg $lv1 "aAAAaA"
aux wait_for_sync $vg $lv1
vgremove -ff $vg

View File

@@ -86,8 +86,7 @@ struct lvconvert_params {
sign_t mirrors_sign;
uint32_t stripes;
uint32_t stripe_size;
unsigned stripes_supplied;
unsigned stripe_size_supplied;
uint32_t stripe_size_supplied;
uint32_t read_ahead;
cache_mode_t cache_mode; /* cache */
const char *policy_name; /* cache */
@@ -1849,11 +1848,6 @@ static int _lvconvert_raid(struct logical_volume *lv, struct lvconvert_params *l
lvseg_name(seg));
return 0;
}
if (seg_is_raid10(seg)) {
log_error("--mirrors/-m cannot be changed with %s.",
lvseg_name(seg));
return 0;
}
}
if (!_lvconvert_validate_thin(lv, lp))
@@ -1968,13 +1962,7 @@ static int _lvconvert_raid(struct logical_volume *lv, struct lvconvert_params *l
return 0;
}
if (seg_is_striped(seg)) {
log_error("Cannot repair LV %s of type raid0.",
display_lvname(lv));
return 0;
}
if (!lv_raid_percent(lv, &sync_percent)) {
if (!seg_is_striped(seg) && !lv_raid_percent(lv, &sync_percent)) {
log_error("Unable to determine sync status of %s.",
display_lvname(lv));
return 0;
@@ -3195,7 +3183,7 @@ static int _lvconvert_pool(struct cmd_context *cmd,
return_0;
if (!get_stripe_params(cmd, get_segtype_from_string(cmd, SEG_TYPE_NAME_STRIPED),
&lp->stripes, &lp->stripe_size, &lp->stripes_supplied, &lp->stripe_size_supplied))
&lp->stripes, &lp->stripe_size))
return_0;
if (!archive(vg))
@@ -4485,8 +4473,10 @@ static int _lvconvert(struct cmd_context *cmd, struct logical_volume *lv,
if (_mirror_or_raid_type_requested(cmd, lp->type_str) || _raid0_type_requested(lp->type_str) ||
_striped_type_requested(lp->type_str) || lp->repair || lp->mirrorlog || lp->corelog) {
/* FIXME Handle +/- adjustments too? */
if (!get_stripe_params(cmd, lp->segtype, &lp->stripes, &lp->stripe_size, &lp->stripes_supplied, &lp->stripe_size_supplied))
if (!get_stripe_params(cmd, lp->segtype, &lp->stripes, &lp->stripe_size))
return_0;
/* FIXME Move this into the get function */
lp->stripe_size_supplied = arg_is_set(cmd, stripesize_ARG);
if (_raid0_type_requested(lp->type_str) || _striped_type_requested(lp->type_str))
/* FIXME Shouldn't need to override get_stripe_params which defaults to 1 stripe (i.e. linear)! */

View File

@@ -464,13 +464,8 @@ static int _read_raid_params(struct cmd_context *cmd,
/*
* RAID10 needs at least 4 stripes
*/
if (lp->stripes_supplied) {
log_error("Minimum of 2 stripes required for %s.",
lp->segtype->name);
return 0;
}
log_verbose("Using 2 stripes for %s.", lp->segtype->name);
log_warn("Adjusting stripes to the minimum of 2 for %s.",
lp->segtype->name);
lp->stripes = 2;
}
@@ -485,28 +480,14 @@ static int _read_raid_params(struct cmd_context *cmd,
/*
* RAID1 does not take a stripe arg
*/
log_error("Stripes argument cannot be used with segment type, %s",
log_error("Stripe argument cannot be used with segment type, %s",
lp->segtype->name);
return 0;
}
} else if (seg_is_any_raid6(lp) && lp->stripes < 3) {
if (lp->stripes_supplied) {
log_error("Minimum of 3 stripes required for %s.", lp->segtype->name);
return 0;
}
log_verbose("Using 3 stripes for %s.", lp->segtype->name);
lp->stripes = 3;
} else if (lp->stripes < 2) {
if (lp->stripes_supplied) {
log_error("Minimum of 2 stripes required for %s.", lp->segtype->name);
return 0;
}
log_verbose("Using 2 stripes for %s.", lp->segtype->name);
lp->stripes = 2;
}
} else if (lp->stripes < 2)
/* No stripes argument was given */
lp->stripes = seg_is_any_raid6(lp) ? 3 : 2;
if (seg_is_raid1(lp)) {
if (lp->stripe_size) {
@@ -514,8 +495,6 @@ static int _read_raid_params(struct cmd_context *cmd,
lp->segtype->name);
return 0;
}
} else if (!lp->stripe_size) {
; // lp->stripe_size = find_config_tree_int(cmd, metadata_stripesize_CFG, NULL) * 2;
}
if (arg_is_set(cmd, mirrors_ARG) && segtype_is_raid(lp->segtype) &&
@@ -547,6 +526,7 @@ static int _read_raid_params(struct cmd_context *cmd,
static int _read_mirror_and_raid_params(struct cmd_context *cmd,
struct lvcreate_params *lp)
{
int pagesize = lvm_getpagesize();
unsigned max_images;
if (seg_is_raid(lp)) {
@@ -623,20 +603,12 @@ static int _read_mirror_and_raid_params(struct cmd_context *cmd,
return 0;
}
#if 1
if (lp->region_size && !is_power_of_2(lp->region_size)) {
log_error("Region size (%" PRIu32 ") must be power of 2",
lp->region_size);
return 0;
}
#else
if (lp->region_size % (pagesize >> SECTOR_SHIFT)) {
log_error("Region size (%" PRIu32 ") must be a multiple of "
"machine memory page size (%d)",
lp->region_size, pagesize >> SECTOR_SHIFT);
return 0;
}
#endif
if (seg_is_mirror(lp) && !_read_mirror_params(cmd, lp))
return_0;
@@ -1089,7 +1061,7 @@ static int _lvcreate_params(struct cmd_context *cmd,
if (!_lvcreate_name_params(cmd, &argc, &argv, lp) ||
!_read_size_params(cmd, lp, lcp) ||
!get_stripe_params(cmd, lp->segtype, &lp->stripes, &lp->stripe_size, &lp->stripes_supplied, &lp->stripe_size_supplied) ||
!get_stripe_params(cmd, lp->segtype, &lp->stripes, &lp->stripe_size) ||
(lp->create_pool &&
!get_pool_params(cmd, lp->segtype, &lp->passed_args,
&lp->pool_metadata_size, &lp->pool_metadata_spare,

View File

@@ -1278,31 +1278,16 @@ static int _validate_stripe_params(struct cmd_context *cmd, const struct segment
{
int stripe_size_required = segtype_supports_stripe_size(segtype);
#if 1
if (stripe_size_required) {
if (*stripes == 1 && segtype_is_mirror(segtype)) {
stripe_size_required = 0;
if (*stripe_size) {
log_print_unless_silent("Ignoring stripesize argument with single stripe.");
*stripe_size = 0;
}
}
} else if (*stripe_size) {
log_print_unless_silent("Ignoring stripesize argument for %s devices.", segtype->name);
*stripe_size = 0;
}
#else
if (!stripe_size_required && *stripe_size) {
log_print_unless_silent("Ignoring stripesize argument for %s devices.", segtype->name);
*stripe_size = 0;
} else if (*stripes == 1 && (segtype_is_striped_target(segtype) || segtype_is_mirror(segtype))) {
} else if (*stripes == 1 && (segtype_is_striped(segtype) || segtype_is_mirror(segtype))) {
stripe_size_required = 0;
if (*stripe_size) {
log_print_unless_silent("Ignoring stripesize argument with single stripe.");
*stripe_size = 0;
}
}
#endif
if (stripe_size_required) {
if (!*stripe_size) {
@@ -1333,17 +1318,13 @@ static int _validate_stripe_params(struct cmd_context *cmd, const struct segment
* power of 2, we must divide UINT_MAX by four and add 1 (to round it
* up to the power of 2)
*/
int get_stripe_params(struct cmd_context *cmd, const struct segment_type *segtype,
uint32_t *stripes, uint32_t *stripe_size,
unsigned *stripes_supplied, unsigned *stripe_size_supplied)
int get_stripe_params(struct cmd_context *cmd, const struct segment_type *segtype, uint32_t *stripes, uint32_t *stripe_size)
{
/* stripes_long_ARG takes precedence (for lvconvert) */
/* FIXME Cope with relative +/- changes for lvconvert. */
*stripes = arg_uint_value(cmd, arg_is_set(cmd, stripes_long_ARG) ? stripes_long_ARG : stripes_ARG, 1);
*stripes_supplied = arg_is_set(cmd, stripes_long_ARG) ? : arg_is_set(cmd, stripes_ARG);
*stripe_size = arg_uint_value(cmd, stripesize_ARG, 0);
*stripe_size_supplied = arg_is_set(cmd, stripesize_ARG);
if (*stripe_size) {
if (arg_sign_value(cmd, stripesize_ARG, SIGN_NONE) == SIGN_MINUS) {
log_error("Negative stripesize is invalid.");

View File

@@ -197,8 +197,7 @@ int get_pool_params(struct cmd_context *cmd,
int *zero);
int get_stripe_params(struct cmd_context *cmd, const struct segment_type *segtype,
uint32_t *stripes, uint32_t *stripe_size,
unsigned *stripes_supplied, unsigned *stripe_size_supplied);
uint32_t *stripes, uint32_t *stripe_size);
int get_cache_params(struct cmd_context *cmd,
cache_mode_t *cache_mode,