1
0
mirror of git://sourceware.org/git/lvm2.git synced 2025-02-08 09:57:55 +03:00

"lvconvert -mN --type mirror/raid1" all work

This commit is contained in:
Heinz Mauelshagen 2014-11-23 19:38:07 +01:00
parent 57af48d734
commit 2f35da89b8
15 changed files with 2162 additions and 715 deletions

View File

@ -2131,6 +2131,7 @@ static int _add_lv_to_dtree(struct dev_manager *dm, struct dm_tree *dtree,
!_add_lv_to_dtree(dm, dtree, seg_lv(seg, s), 0))
return_0;
if (seg_is_raid(seg) &&
seg_metalv(seg, s) &&
!_add_lv_to_dtree(dm, dtree, seg_metalv(seg, s), 0))
return_0;
}
@ -2300,10 +2301,13 @@ int add_areas_line(struct dev_manager *dm, struct lv_segment *seg,
return_0;
continue;
}
if (!(dlid = build_dm_uuid(dm->mem, seg_metalv(seg, s), NULL)))
return_0;
if (!dm_tree_node_add_target_area(node, NULL, dlid, extent_size * seg_metale(seg, s)))
return_0;
if (seg->meta_areas && seg_metalv(seg, s)) {
if (!(dlid = build_dm_uuid(dm->mem, seg_metalv(seg, s), NULL)))
return_0;
if (!dm_tree_node_add_target_area(node, NULL, dlid, extent_size * seg_metale(seg, s)))
return_0;
}
if (!(dlid = build_dm_uuid(dm->mem, seg_lv(seg, s), NULL)))
return_0;
@ -2629,6 +2633,7 @@ static int _add_segment_to_dtree(struct dev_manager *dm,
laopts, NULL))
return_0;
if (seg_is_raid(seg) &&
seg_metalv(seg, s) &&
!_add_new_lv_to_dtree(dm, dtree, seg_metalv(seg, s),
laopts, NULL))
return_0;

View File

@ -56,6 +56,7 @@
#define DEFAULT_MIRROR_LOG_FAULT_POLICY "allocate"
#define DEFAULT_MIRROR_IMAGE_FAULT_POLICY "remove"
#define DEFAULT_MIRROR_MAX_IMAGES 8 /* limited by kernel DM_KCOPYD_MAX_REGIONS */
#define DEFAULT_RAID_MAX_IMAGES 64 /* limited by kernel failed devices bitfield in superblock */
#define DEFAULT_RAID_FAULT_POLICY "warn"
#define DEFAULT_DMEVENTD_RAID_LIB "libdevmapper-event-lvm2raid.so"

View File

@ -567,13 +567,13 @@ int out_areas(struct formatter *f, const struct lv_segment *seg,
/* RAID devices are laid-out in metadata/data pairs */
if (!lv_is_raid_image(seg_lv(seg, s)) ||
!lv_is_raid_metadata(seg_metalv(seg, s))) {
(seg->meta_areas && seg_metalv(seg, s) && !lv_is_raid_metadata(seg_metalv(seg, s)))) {
log_error("RAID segment has non-RAID areas");
return 0;
}
outf(f, "\"%s\", \"%s\"%s",
seg_metalv(seg, s)->name, seg_lv(seg, s)->name,
(seg->meta_areas && seg_metalv(seg, s)) ? seg_metalv(seg, s)->name : "-", seg_lv(seg, s)->name,
(s == seg->area_count - 1) ? "" : ",");
break;
@ -595,6 +595,12 @@ static int _print_lv(struct formatter *f, struct logical_volume *lv)
struct tm *local_tm;
time_t ts;
#if 1
/* FIXME: HM: workaround for empty metadata lvs with raid0 */
if (!dm_list_size(&lv->segments))
return 1;
#endif
outnl(f);
outf(f, "%s {", lv->name);
_inc_indent(f);

View File

@ -534,6 +534,7 @@ int lv_raid_image_in_sync(const struct logical_volume *lv)
if ((seg = first_seg(lv)))
raid_seg = get_only_segment_using_this_lv(seg->lv);
if (!raid_seg) {
log_error("Failed to find RAID segment for %s", lv->name);
return 0;

View File

@ -84,7 +84,7 @@ struct pv_and_int {
int *i;
};
enum {
enum _lv_type_name_enum {
LV_TYPE_UNKNOWN,
LV_TYPE_PUBLIC,
LV_TYPE_PRIVATE,
@ -114,10 +114,18 @@ enum {
LV_TYPE_DATA,
LV_TYPE_SPARE,
LV_TYPE_VIRTUAL,
/*
* WARNING: all LV_TYPE_RAID* have to be in a sequence
* _lv_layout_and_role_raid() depends on the following order!
*/
LV_TYPE_RAID0,
LV_TYPE_RAID1,
LV_TYPE_RAID10,
LV_TYPE_RAID4,
LV_TYPE_RAID4_N,
LV_TYPE_RAID5,
LV_TYPE_RAID5_0,
LV_TYPE_RAID5_N,
LV_TYPE_RAID5_LA,
LV_TYPE_RAID5_RA,
LV_TYPE_RAID5_LS,
@ -126,6 +134,11 @@ enum {
LV_TYPE_RAID6_ZR,
LV_TYPE_RAID6_NR,
LV_TYPE_RAID6_NC,
LV_TYPE_RAID6_LA_6,
LV_TYPE_RAID6_RA_6,
LV_TYPE_RAID6_LS_6,
LV_TYPE_RAID6_RS_6,
LV_TYPE_RAID6_0_6
};
static const char *_lv_type_names[] = {
@ -158,10 +171,14 @@ static const char *_lv_type_names[] = {
[LV_TYPE_DATA] = "data",
[LV_TYPE_SPARE] = "spare",
[LV_TYPE_VIRTUAL] = "virtual",
[LV_TYPE_RAID0] = SEG_TYPE_NAME_RAID0,
[LV_TYPE_RAID1] = SEG_TYPE_NAME_RAID1,
[LV_TYPE_RAID10] = SEG_TYPE_NAME_RAID10,
[LV_TYPE_RAID4] = SEG_TYPE_NAME_RAID4,
[LV_TYPE_RAID4_N] = SEG_TYPE_NAME_RAID4_N,
[LV_TYPE_RAID5] = SEG_TYPE_NAME_RAID5,
[LV_TYPE_RAID5_0] = SEG_TYPE_NAME_RAID5_0,
[LV_TYPE_RAID5_N] = SEG_TYPE_NAME_RAID5_N,
[LV_TYPE_RAID5_LA] = SEG_TYPE_NAME_RAID5_LA,
[LV_TYPE_RAID5_RA] = SEG_TYPE_NAME_RAID5_RA,
[LV_TYPE_RAID5_LS] = SEG_TYPE_NAME_RAID5_LS,
@ -170,6 +187,11 @@ static const char *_lv_type_names[] = {
[LV_TYPE_RAID6_ZR] = SEG_TYPE_NAME_RAID6_ZR,
[LV_TYPE_RAID6_NR] = SEG_TYPE_NAME_RAID6_NR,
[LV_TYPE_RAID6_NC] = SEG_TYPE_NAME_RAID6_NC,
[LV_TYPE_RAID6_LA_6] = SEG_TYPE_NAME_RAID6_LA_6,
[LV_TYPE_RAID6_RA_6] = SEG_TYPE_NAME_RAID6_RA_6,
[LV_TYPE_RAID6_LS_6] = SEG_TYPE_NAME_RAID6_LS_6,
[LV_TYPE_RAID6_RS_6] = SEG_TYPE_NAME_RAID6_RS_6,
[LV_TYPE_RAID6_0_6] = SEG_TYPE_NAME_RAID6_0_6,
};
static int _lv_layout_and_role_mirror(struct dm_pool *mem,
@ -220,8 +242,8 @@ static int _lv_layout_and_role_raid(struct dm_pool *mem,
struct dm_list *role,
int *public_lv)
{
int top_level = 0;
const char *seg_name;
int t, top_level = 0;
const char *segtype_name;
/* non-top-level LVs */
if (lv_is_raid_image(lv)) {
@ -246,48 +268,24 @@ static int _lv_layout_and_role_raid(struct dm_pool *mem,
/* top-level LVs */
if (!str_list_add_no_dup_check(mem, layout, _lv_type_names[LV_TYPE_RAID]))
goto_bad;
goto bad;
if (!strcmp(first_seg(lv)->segtype->name, SEG_TYPE_NAME_RAID1)) {
if (!str_list_add_no_dup_check(mem, layout, _lv_type_names[LV_TYPE_RAID1]))
goto_bad;
} else if (!strcmp(first_seg(lv)->segtype->name, SEG_TYPE_NAME_RAID10)) {
if (!str_list_add_no_dup_check(mem, layout, _lv_type_names[LV_TYPE_RAID10]))
goto_bad;
} else if (!strcmp(first_seg(lv)->segtype->name, SEG_TYPE_NAME_RAID4)) {
if (!str_list_add_no_dup_check(mem, layout, _lv_type_names[LV_TYPE_RAID4]))
goto_bad;
} else if (!strncmp(seg_name = first_seg(lv)->segtype->name, SEG_TYPE_NAME_RAID5, strlen(SEG_TYPE_NAME_RAID5))) {
if (!str_list_add_no_dup_check(mem, layout, _lv_type_names[LV_TYPE_RAID5]))
goto_bad;
/* WARNING: all LV_TYPE_RAID* have to be in a sequence in the _lv_type_name_enum */
segtype_name = first_seg(lv)->segtype->name;
if (!strncmp(segtype_name, _lv_type_names[LV_TYPE_RAID5], strlen(_lv_type_names[LV_TYPE_RAID5])) &&
!str_list_add_no_dup_check(mem, layout, _lv_type_names[LV_TYPE_RAID5]))
return 0;
if (!strcmp(seg_name, SEG_TYPE_NAME_RAID5_LA)) {
if (!str_list_add_no_dup_check(mem, layout, _lv_type_names[LV_TYPE_RAID5_LA]))
goto_bad;
} else if (!strcmp(seg_name, SEG_TYPE_NAME_RAID5_RA)) {
if (!str_list_add_no_dup_check(mem, layout, _lv_type_names[LV_TYPE_RAID5_RA]))
goto_bad;
} else if (!strcmp(seg_name, SEG_TYPE_NAME_RAID5_LS)) {
if (!str_list_add_no_dup_check(mem, layout, _lv_type_names[LV_TYPE_RAID5_LS]))
goto_bad;
} else if (!strcmp(seg_name, SEG_TYPE_NAME_RAID5_RS)) {
if (!str_list_add_no_dup_check(mem, layout, _lv_type_names[LV_TYPE_RAID5_RS]))
goto_bad;
}
} else if (!strncmp(seg_name = first_seg(lv)->segtype->name, SEG_TYPE_NAME_RAID6, strlen(SEG_TYPE_NAME_RAID6))) {
if (!str_list_add_no_dup_check(mem, layout, _lv_type_names[LV_TYPE_RAID6]))
goto_bad;
if (!strncmp(segtype_name, _lv_type_names[LV_TYPE_RAID6], strlen(_lv_type_names[LV_TYPE_RAID6])) &&
!str_list_add_no_dup_check(mem, layout, _lv_type_names[LV_TYPE_RAID6]))
return 0;
if (!strcmp(seg_name, SEG_TYPE_NAME_RAID6_ZR)) {
if (!str_list_add_no_dup_check(mem, layout, _lv_type_names[LV_TYPE_RAID6_ZR]))
goto_bad;
} else if (!strcmp(seg_name, SEG_TYPE_NAME_RAID6_NR)) {
if (!str_list_add_no_dup_check(mem, layout, _lv_type_names[LV_TYPE_RAID6_NR]))
goto_bad;
} else if (!strcmp(seg_name, SEG_TYPE_NAME_RAID6_NC)) {
if (!str_list_add_no_dup_check(mem, layout, _lv_type_names[LV_TYPE_RAID6_NC]))
goto_bad;
}
for (t = LV_TYPE_RAID0; t <= LV_TYPE_RAID6_0_6; t++) {
if (!strcmp(segtype_name, _lv_type_names[t]))
if (!str_list_add_no_dup_check(mem, layout, _lv_type_names[t]))
goto bad;
else
break;
}
return 1;
@ -928,6 +926,9 @@ dm_percent_t copy_percent(const struct logical_volume *lv)
uint32_t numerator = 0u, denominator = 0u;
struct lv_segment *seg;
if (seg_is_raid0(first_seg(lv)))
return DM_PERCENT_INVALID;
dm_list_iterate_items(seg, &lv->segments) {
denominator += seg->area_len;
@ -1038,6 +1039,7 @@ static int _release_and_discard_lv_segment_area(struct lv_segment *seg, uint32_t
uint32_t area_reduction, int with_discard)
{
struct lv_segment *cache_seg;
struct logical_volume *lv = seg_lv(seg, s);
if (seg_type(seg, s) == AREA_UNASSIGNED)
return 1;
@ -1055,10 +1057,10 @@ static int _release_and_discard_lv_segment_area(struct lv_segment *seg, uint32_t
return 1;
}
if (lv_is_mirror_image(seg_lv(seg, s)) ||
lv_is_thin_pool_data(seg_lv(seg, s)) ||
lv_is_cache_pool_data(seg_lv(seg, s))) {
if (!lv_reduce(seg_lv(seg, s), area_reduction))
if (lv_is_mirror_image(lv) ||
lv_is_thin_pool_data(lv) ||
lv_is_cache_pool_data(lv)) {
if (!lv_reduce(lv, area_reduction))
return_0; /* FIXME: any upper level reporting */
return 1;
}
@ -1072,44 +1074,57 @@ static int _release_and_discard_lv_segment_area(struct lv_segment *seg, uint32_t
return_0;
}
if (lv_is_raid_image(seg_lv(seg, s))) {
/*
* FIXME: Use lv_reduce not lv_remove
* We use lv_remove for now, because I haven't figured out
* why lv_reduce won't remove the LV.
lv_reduce(seg_lv(seg, s), area_reduction);
*/
if (area_reduction != seg->area_len) {
if (lv_is_raid_image(lv)) {
uint32_t len = seg_is_raid0(seg) ? lv->le_count : seg->area_len;
/* FIXME: support shrinking of raid volumes */
if (area_reduction == len) {
if (!lv_reduce(lv, lv->le_count)) {
log_error("Failed to remove RAID image %s", lv->name);
return 0;
}
#if 0
if (!remove_seg_from_segs_using_this_lv(lv, seg))
return_0;
seg_lv(seg, s) = NULL;
seg_le(seg, s) = 0;
seg_type(seg, s) = AREA_UNASSIGNED;
#endif
if (seg->meta_areas) {
struct logical_volume *mlv = seg_metalv(seg, s);
if (mlv) {
if (!lv_reduce(mlv, mlv->le_count)) {
log_error("Failed to remove RAID meta-device %s", mlv->name);
return 0;
}
#if 0
if (!remove_seg_from_segs_using_this_lv(mlv, seg))
return_0;
seg_metalv(seg, s) = NULL;
seg_metale(seg, s) = 0;
seg_metatype(seg, s) = AREA_UNASSIGNED;
#endif
}
}
} else {
log_error("Unable to reduce RAID LV - operation not implemented.");
return_0;
} else {
if (!lv_remove(seg_lv(seg, s))) {
log_error("Failed to remove RAID image %s",
seg_lv(seg, s)->name);
return 0;
}
}
/* Remove metadata area if image has been removed */
if (area_reduction == seg->area_len) {
if (!lv_reduce(seg_metalv(seg, s),
seg_metalv(seg, s)->le_count)) {
log_error("Failed to remove RAID meta-device %s",
seg_metalv(seg, s)->name);
return 0;
}
}
return 1;
}
if (area_reduction == seg->area_len) {
} else if (area_reduction == seg->area_len) {
log_very_verbose("Remove %s:%" PRIu32 "[%" PRIu32 "] from "
"the top of LV %s:%" PRIu32,
seg->lv->name, seg->le, s,
seg_lv(seg, s)->name, seg_le(seg, s));
seg->lv->name, seg->le, s, lv->name, seg_le(seg, s));
if (!remove_seg_from_segs_using_this_lv(seg_lv(seg, s), seg))
if (!remove_seg_from_segs_using_this_lv(lv, seg))
return_0;
seg_lv(seg, s) = NULL;
seg_le(seg, s) = 0;
seg_type(seg, s) = AREA_UNASSIGNED;
@ -1203,18 +1218,23 @@ int set_lv_segment_area_lv(struct lv_segment *seg, uint32_t area_num,
seg->lv->name, seg->le, area_num, lv->name, le);
if (status & RAID_META) {
seg->meta_areas[area_num].type = AREA_LV;
seg_metalv(seg, area_num) = lv;
if (le) {
log_error(INTERNAL_ERROR "Meta le != 0");
return 0;
/* FIXME: should always be allocated for RAID_META? */
if (seg->meta_areas) {
seg->meta_areas[area_num].type = AREA_LV;
seg_metalv(seg, area_num) = lv;
if (le) {
log_error(INTERNAL_ERROR "Meta le != 0");
return 0;
}
seg_metale(seg, area_num) = 0;
}
seg_metale(seg, area_num) = 0;
} else {
seg->areas[area_num].type = AREA_LV;
seg_lv(seg, area_num) = lv;
seg_le(seg, area_num) = le;
}
lv->status |= status;
if (!add_seg_to_segs_using_this_lv(lv, seg))
@ -1252,14 +1272,16 @@ static int _lv_segment_reduce(struct lv_segment *seg, uint32_t reduction)
uint32_t area_reduction, s;
/* Caller must ensure exact divisibility */
if (seg_is_striped(seg)) {
if (seg_is_striped(seg) || seg_is_raid0(seg)) {
if (reduction % seg->area_count) {
log_error("Segment extent reduction %" PRIu32
" not divisible by #stripes %" PRIu32,
reduction, seg->area_count);
return 0;
}
area_reduction = (reduction / seg->area_count);
area_reduction = reduction / seg->area_count;
} else
area_reduction = reduction;
@ -1390,7 +1412,7 @@ int replace_lv_with_error_segment(struct logical_volume *lv)
* that suggest it is anything other than "error".
*/
/* FIXME Check for other flags that need removing */
lv->status &= ~(MIRROR|MIRRORED|PVMOVE|LOCKED);
lv->status &= ~(MIRROR|MIRRORED|RAID|PVMOVE|LOCKED);
/* FIXME Check for any attached LVs that will become orphans e.g. mirror logs */
@ -1507,7 +1529,7 @@ static uint32_t _calc_area_multiple(const struct segment_type *segtype,
* the 'stripes' argument will always need to
* be given.
*/
if (!strcmp(segtype->name, _lv_type_names[LV_TYPE_RAID10])) {
if (segtype_is_raid10(segtype)) {
if (!stripes)
return area_count / 2;
return stripes;
@ -1606,7 +1628,11 @@ static struct alloc_handle *_alloc_init(struct cmd_context *cmd,
* account for the extra parity devices because the array already
* exists and they only want replacement drives.
*/
#if 0
parity_count = (area_count <= segtype->parity_devs) ? 0 : segtype->parity_devs;
#else
parity_count = segtype->parity_devs;
#endif
alloc_count = area_count + parity_count;
if (segtype_is_raid(segtype) && metadata_area_count)
/* RAID has a meta area for each device */
@ -3579,23 +3605,23 @@ static int _lv_extend_layered_lv(struct alloc_handle *ah,
return 0;
}
/* Extend metadata LVs only on initial creation */
/* Extend metadata LVs if any only on initial creation */
if (seg_is_raid(seg) && !lv->le_count) {
if (!seg->meta_areas) {
log_error("No meta_areas for RAID type");
return 0;
if (seg->meta_areas) {
meta_lv = seg_metalv(seg, s);
if (meta_lv) {
if (!lv_add_segment(ah, fa + seg->area_count, 1,
meta_lv, segtype, 0,
meta_lv->status, 0)) {
log_error("Failed to extend %s in %s.",
meta_lv->name, lv->name);
return 0;
}
lv_set_visible(meta_lv);
clear_metadata = 1;
}
}
meta_lv = seg_metalv(seg, s);
if (!lv_add_segment(ah, fa + seg->area_count, 1,
meta_lv, segtype, 0,
meta_lv->status, 0)) {
log_error("Failed to extend %s in %s.",
meta_lv->name, lv->name);
return 0;
}
lv_set_visible(meta_lv);
clear_metadata = 1;
}
fa += stripes;
@ -3655,7 +3681,9 @@ static int _lv_extend_layered_lv(struct alloc_handle *ah,
* The MD bitmap is limited to being able to track 2^21 regions.
* The region_size must be adjusted to meet that criteria.
*/
while (seg_is_raid(seg) && (seg->region_size < (lv->size / (1 << 21)))) {
while (seg_is_raid(seg) &&
!seg_is_raid0(seg) &&
(seg->region_size < (lv->size / (1 << 21)))) {
seg->region_size *= 2;
log_very_verbose("Adjusting RAID region_size from %uS to %uS"
" to support large LV size",
@ -3694,14 +3722,17 @@ int lv_extend(struct logical_volume *lv,
if (segtype_is_virtual(segtype))
return lv_add_virtual_segment(lv, 0u, extents, segtype);
if (!lv->le_count && segtype_is_pool(segtype)) {
/*
* Pool allocations treat the metadata device like a mirror log.
*/
/* FIXME Support striped metadata pool */
log_count = 1;
} else if (segtype_is_raid(segtype) && !lv->le_count)
log_count = mirrors * stripes;
if (!lv->le_count) {
if (segtype_is_pool(segtype)) {
/*
* Thinpool and cache_pool allocations treat the metadata
* device like a mirror log.
*/
/* FIXME Support striped metadata pool */
log_count = 1;
} else if (segtype_is_raid(segtype))
log_count = mirrors * stripes;
}
/* FIXME log_count should be 1 for mirrors */
if (!(ah = allocate_extents(lv->vg, lv, segtype, stripes, mirrors,
@ -3842,7 +3873,7 @@ static int _rename_sub_lv(struct logical_volume *lv,
lv_name_old, lv->name);
return 0;
}
suffix = lv->name + len;
suffix = (char *) lv->name + len;
/*
* Compose a new name for sub lv:
@ -3922,7 +3953,7 @@ int for_each_sub_lv(struct logical_volume *lv,
return_0;
}
if (!seg_is_raid(seg))
if (!seg_is_raid(seg) || !seg->meta_areas)
continue;
/* RAID has meta_areas */
@ -4573,7 +4604,10 @@ static int _lvresize_adjust_extents(struct cmd_context *cmd, struct logical_volu
return 0;
}
if (!strcmp(mirr_seg->segtype->name, _lv_type_names[LV_TYPE_RAID10])) {
if (!strcmp(mirr_seg->segtype->name, _lv_type_names[LV_TYPE_RAID0])) {
lp->stripes = mirr_seg->area_count;
lp->stripe_size = mirr_seg->stripe_size;
} else if (!strcmp(mirr_seg->segtype->name, _lv_type_names[LV_TYPE_RAID10])) {
/* FIXME Warn if command line values are being overridden? */
lp->stripes = mirr_seg->area_count / seg_mirrors;
lp->stripe_size = mirr_seg->stripe_size;
@ -4583,9 +4617,10 @@ static int _lvresize_adjust_extents(struct cmd_context *cmd, struct logical_volu
/* FIXME We will need to support resize for metadata LV as well,
* and data LV could be any type (i.e. mirror)) */
dm_list_iterate_items(seg, seg_mirrors ? &seg_lv(mirr_seg, 0)->segments : &lv->segments) {
/* Allow through "striped" and RAID 4/5/6/10 */
/* Allow through "striped" and RAID 0/10/4/5/6 */
if (!seg_is_striped(seg) &&
(!seg_is_raid(seg) || seg_is_mirrored(seg)) &&
strcmp(seg->segtype->name, _lv_type_names[LV_TYPE_RAID0]) &&
strcmp(seg->segtype->name, _lv_type_names[LV_TYPE_RAID10]))
continue;
@ -5285,12 +5320,19 @@ struct dm_list *build_parallel_areas_from_lv(struct logical_volume *lv,
use_pvmove_parent_lv ? spvs->len * _calc_area_multiple(seg->pvmove_source_seg->segtype, seg->pvmove_source_seg->area_count, 0) : spvs->len,
use_pvmove_parent_lv ? seg->pvmove_source_seg : NULL,
&spvs->len,
0, 0, -1, 0, _add_pvs, (void *) spvs))
0, 0, -1, 0, _add_pvs, (void *) spvs)) {
return_NULL;
}
current_le = spvs->le + spvs->len;
#if 1
raid_multiple = seg_is_mirror(seg) ? 1 : seg->area_count - seg->segtype->parity_devs;
#else
raid_multiple = (seg_is_mirror(seg) || seg_is_raid1(seg)) ? 1 :
seg->area_count - seg->segtype->parity_devs;
raid_multiple = (seg->segtype->parity_devs) ?
seg->area_count - seg->segtype->parity_devs : 1;
#endif
} while ((current_le * raid_multiple) < lv->le_count);
if (create_single_list) {
@ -6817,19 +6859,21 @@ static struct logical_volume *_lv_create_an_lv(struct volume_group *vg,
if (!(create_segtype = get_segtype_from_string(vg->cmd, "striped")))
return_0;
} else if (seg_is_mirrored(lp) || seg_is_raid(lp)) {
/* FIXME This will not pass cluster lock! */
init_mirror_in_sync(lp->nosync);
if (!seg_is_raid0(lp)) {
/* FIXME: this will not pass cluster lock! */
init_mirror_in_sync(lp->nosync);
if (lp->nosync) {
log_warn("WARNING: New %s won't be synchronised. "
"Don't read what you didn't write!",
lp->segtype->name);
status |= LV_NOTSYNCED;
if (lp->nosync) {
log_warn("WARNING: New %s won't be synchronised. "
"Don't read what you didn't write!",
lp->segtype->name);
status |= LV_NOTSYNCED;
}
lp->region_size = adjusted_mirror_region_size(vg->extent_size,
lp->extents,
lp->region_size, 0);
}
lp->region_size = adjusted_mirror_region_size(vg->extent_size,
lp->extents,
lp->region_size, 0);
} else if (pool_lv && seg_is_thin_volume(lp)) {
if (!lv_is_thin_pool(pool_lv)) {
log_error("Logical volume %s is not a thin pool.",
@ -6990,8 +7034,10 @@ static struct logical_volume *_lv_create_an_lv(struct volume_group *vg,
goto revert_new_lv;
}
} else if (seg_is_raid(lp)) {
first_seg(lv)->min_recovery_rate = lp->min_recovery_rate;
first_seg(lv)->max_recovery_rate = lp->max_recovery_rate;
if (!seg_is_raid0(lp)) {
first_seg(lv)->min_recovery_rate = lp->min_recovery_rate;
first_seg(lv)->max_recovery_rate = lp->max_recovery_rate;
}
} else if (seg_is_thin_pool(lp)) {
first_seg(lv)->chunk_size = lp->chunk_size;
first_seg(lv)->zero_new_blocks = lp->zero ? 1 : 0;

View File

@ -137,10 +137,10 @@ int check_lv_segments(struct logical_volume *lv, int complete_vg)
inc_error_count;
}
area_multiplier = segtype_is_striped(seg->segtype) ?
seg->area_count : 1;
area_multiplier = seg_is_striped(seg) ? seg->area_count : 1;
if (seg->area_len * area_multiplier != seg->len) {
printf("segtype=%s area_len=%d area_len*mp=%u seg->len=%u\n", seg->segtype->name, seg->area_len, seg->area_len * area_multiplier, seg->len);
log_error("LV %s: segment %u has inconsistent "
"area_len %u",
lv->name, seg_count, seg->area_len);
@ -405,7 +405,10 @@ int check_lv_segments(struct logical_volume *lv, int complete_vg)
continue;
if (lv == seg_lv(seg, s))
seg_found++;
if (seg_is_raid(seg) && (lv == seg_metalv(seg, s)))
/* HM FIXME: TESTME */
// if (seg_is_raid(seg) && seg->meta_areas && lv == seg_metalv(seg, s))
if (seg->meta_areas && lv == seg_metalv(seg, s))
seg_found++;
}
if (seg_is_replicator_dev(seg)) {
@ -419,13 +422,14 @@ int check_lv_segments(struct logical_volume *lv, int complete_vg)
seg_found++;
}
if (seg_is_replicator(seg) && lv == seg->rlog_lv)
seg_found++;
seg_found++;
if (seg->log_lv == lv)
seg_found++;
if (seg->metadata_lv == lv || seg->pool_lv == lv)
seg_found++;
if (seg_is_thin_volume(seg) && (seg->origin == lv || seg->external_lv == lv))
seg_found++;
if (!seg_found) {
log_error("LV %s is used by LV %s:%" PRIu32 "-%" PRIu32
", but missing ptr from %s to %s",
@ -444,11 +448,22 @@ int check_lv_segments(struct logical_volume *lv, int complete_vg)
seg_found = 0;
dm_list_iterate_items(seg2, &seg->lv->segments)
if (sl->seg == seg2) {
if (seg == seg2) {
seg_found++;
break;
}
if (!seg_found) {
{
unsigned ss = 0;
printf("seg->lv=%s\n", seg->lv->name);
dm_list_iterate_items(seg2, &seg->lv->segments) {
ss++;
printf("seg_lv(seg2, 0)->segs_using_this_lv");
}
printf("s=%u\n", ss);
}
log_error("LV segment %s:%" PRIu32 "-%" PRIu32
" is incorrectly listed as being used by LV %s",
seg->lv->name, seg->le, seg->le + seg->len - 1,

View File

@ -1082,7 +1082,10 @@ int lv_raid_split_and_track(struct logical_volume *lv,
struct dm_list *splittable_pvs);
int lv_raid_merge(struct logical_volume *lv);
int lv_raid_reshape(struct logical_volume *lv,
const struct segment_type *new_segtype);
const struct segment_type *new_segtype,
const unsigned stripes,
const unsigned new_stripe_size,
struct dm_list *allocate_pvs);
int lv_raid_replace(struct logical_volume *lv, struct dm_list *remove_pvs,
struct dm_list *allocate_pvs);
int lv_raid_remove_missing(struct logical_volume *lv);

View File

@ -113,7 +113,7 @@ uint32_t lv_mirror_count(const struct logical_volume *lv)
seg = first_seg(lv);
/* FIXME: RAID10 only supports 2 copies right now */
if (!strcmp(seg->segtype->name, "raid10"))
if (seg_is_raid10(seg))
return 2;
if (lv_is_pvmove(lv))

File diff suppressed because it is too large Load Diff

View File

@ -150,23 +150,76 @@ struct segment_type *init_unknown_segtype(struct cmd_context *cmd,
const char *name);
#define RAID_FEATURE_RAID10 (1U << 0) /* version 1.3 */
#define RAID_FEATURE_RAID0 (1U << 1) /* version 1.6 */
#ifdef RAID_INTERNAL
int init_raid_segtypes(struct cmd_context *cmd, struct segtype_library *seglib);
#endif
#define SEG_TYPE_NAME_RAID1 "raid1"
#define SEG_TYPE_NAME_RAID10 "raid10"
#define SEG_TYPE_NAME_RAID4 "raid4"
#define SEG_TYPE_NAME_RAID5 "raid5"
#define SEG_TYPE_NAME_RAID5_LA "raid5_la"
#define SEG_TYPE_NAME_RAID5_LS "raid5_ls"
#define SEG_TYPE_NAME_RAID5_RA "raid5_ra"
#define SEG_TYPE_NAME_RAID5_RS "raid5_rs"
#define SEG_TYPE_NAME_RAID6 "raid6"
#define SEG_TYPE_NAME_RAID6_NC "raid6_nc"
#define SEG_TYPE_NAME_RAID6_NR "raid6_nr"
#define SEG_TYPE_NAME_RAID6_ZR "raid6_zr"
#define SEG_TYPE_NAME_RAID0 "raid0"
#define SEG_TYPE_NAME_RAID1 "raid1"
#define SEG_TYPE_NAME_RAID10 "raid10"
#define SEG_TYPE_NAME_RAID4 "raid4" /* this is SEG_TYPE_NAME_RAID4_0 */
#define SEG_TYPE_NAME_RAID4_N "raid4_n"
#define SEG_TYPE_NAME_RAID5 "raid5"
#define SEG_TYPE_NAME_RAID5_0 "raid5_0"
#define SEG_TYPE_NAME_RAID5_N "raid5_n"
#define SEG_TYPE_NAME_RAID5_LA "raid5_la"
#define SEG_TYPE_NAME_RAID5_LS "raid5_ls"
#define SEG_TYPE_NAME_RAID5_RA "raid5_ra"
#define SEG_TYPE_NAME_RAID5_RS "raid5_rs"
#define SEG_TYPE_NAME_RAID6 "raid6"
#define SEG_TYPE_NAME_RAID6_NC "raid6_nc"
#define SEG_TYPE_NAME_RAID6_NR "raid6_nr"
#define SEG_TYPE_NAME_RAID6_ZR "raid6_zr"
#define SEG_TYPE_NAME_RAID6_LA_6 "raid6_la_6"
#define SEG_TYPE_NAME_RAID6_LS_6 "raid6_ls_6"
#define SEG_TYPE_NAME_RAID6_RA_6 "raid6_ra_6"
#define SEG_TYPE_NAME_RAID6_RS_6 "raid6_rs_6"
#define SEG_TYPE_NAME_RAID6_0_6 "raid6_0_6"
#define SEG_TYPE_NAME_RAID6_N_6 "raid6_n_6"
#define segtype_is_raid0(segtype) (!strcmp((segtype)->name, SEG_TYPE_NAME_RAID0))
#define segtype_is_raid1(segtype) (!strcmp((segtype)->name, SEG_TYPE_NAME_RAID1))
#define segtype_is_raid10(segtype) (!strcmp((segtype)->name, SEG_TYPE_NAME_RAID10))
#define segtype_is_any_raid4(segtype) (!strncmp((segtype)->name, SEG_TYPE_NAME_RAID4, 5))
#define segtype_is_raid4(segtype) (!strcmp((segtype)->name, SEG_TYPE_NAME_RAID4))
#define segtype_is_raid4_n(segtype) (!strcmp((segtype)->name, SEG_TYPE_NAME_RAID4_N))
#define segtype_is_any_raid5(segtype) (!strncmp((segtype)->name, SEG_TYPE_NAME_RAID5, 5))
#define segtype_is_raid5_ls(segtype) (!strcmp((segtype)->name, SEG_TYPE_NAME_RAID5_LS))
#define segtype_is_raid5_rs(segtype) (!strcmp((segtype)->name, SEG_TYPE_NAME_RAID5_RS))
#define segtype_is_raid5_la(segtype) (!strcmp((segtype)->name, SEG_TYPE_NAME_RAID5_LA))
#define segtype_is_raid5_ra(segtype) (!strcmp((segtype)->name, SEG_TYPE_NAME_RAID5_RA))
#define segtype_is_raid5_0(segtype) (!strcmp((segtype)->name, SEG_TYPE_NAME_RAID5_0))
#define segtype_is_raid5_n(segtype) (!strcmp((segtype)->name, SEG_TYPE_NAME_RAID5_N))
#define segtype_is_any_raid6(segtype) (!strncmp((segtype)->name, SEG_TYPE_NAME_RAID6, 5))
#define segtype_is_raid6_ls_6(segtype) (!strcmp((segtype)->name, SEG_TYPE_NAME_RAID6_LS_6))
#define segtype_is_raid6_rs_6(segtype) (!strcmp((segtype)->name, SEG_TYPE_NAME_RAID6_RS_6))
#define segtype_is_raid6_la_6(segtype) (!strcmp((segtype)->name, SEG_TYPE_NAME_RAID6_LA_6))
#define segtype_is_raid6_ra_6(segtype) (!strcmp((segtype)->name, SEG_TYPE_NAME_RAID6_RA_6))
#define segtype_is_raid6_0_6(segtype) (!strcmp((segtype)->name, SEG_TYPE_NAME_RAID6_0_6))
#define segtype_is_raid6_n_6(segtype) (!strcmp((segtype)->name, SEG_TYPE_NAME_RAID6_N_6))
#define seg_is_raid0(seg) segtype_is_raid0((seg)->segtype)
#define seg_is_raid1(seg) segtype_is_raid1((seg)->segtype)
#define seg_is_raid10(seg) segtype_is_raid10((seg)->segtype)
#define seg_is_any_raid4(seg) segtype_is_any_raid4((seg)->segtype)
#define seg_is_raid4(seg) segtype_is_raid4((seg)->segtype)
#define seg_is_raid4_n(seg) segtype_is_raid4_n((seg)->segtype)
#define seg_is_any_raid5(seg) segtype_is_any_raid5((seg)->segtype)
#define seg_is_raid5_ls(seg) segtype_is_raid5_ls((seg)->segtype)
#define seg_is_raid5_rs(seg) segtype_is_raid5_rs((seg)->segtype)
#define seg_is_raid5_la(seg) segtype_is_raid5_la((seg)->segtype)
#define seg_is_raid5_ra(seg) segtype_is_raid5_ra((seg)->segtype)
#define seg_is_raid5_0(seg) segtype_is_raid5_0((seg)->segtype)
#define seg_is_raid5_n(seg) segtype_is_raid5_n((seg)->segtype)
#define seg_is_any_raid6(seg) segtype_is_any_raid6((seg)->segtype)
#define seg_is_raid6_ls_6(seg) segtype_is_raid6_ls_6((seg)->segtype)
#define seg_is_raid6_rs_6(seg) segtype_is_raid6_rs_6((seg)->segtype)
#define seg_is_raid6_la_6(seg) segtype_is_raid6_la_6((seg)->segtype)
#define seg_is_raid6_ra_6(seg) segtype_is_raid6_ra_6((seg)->segtype)
#define seg_is_raid6_0_6(seg) segtype_is_raid6_0_6((seg)->segtype)
#define seg_is_raid6_n_6(seg) segtype_is_raid6_n_6((seg)->segtype)
#ifdef REPLICATOR_INTERNAL
int init_replicator_segtype(struct cmd_context *cmd, struct segtype_library *seglib);

View File

@ -1,5 +1,5 @@
/*
* Copyright (C) 2011-2013 Red Hat, Inc. All rights reserved.
* Copyright (C) 2011-2014 Red Hat, Inc. All rights reserved.
*
* This file is part of LVM2.
*
@ -34,7 +34,7 @@ static void _raid_display(const struct lv_segment *seg)
display_stripe(seg, s, " ");
}
for (s = 0; s < seg->area_count; ++s)
for (s = 0; seg->meta_areas && s < seg->area_count; ++s)
log_print(" Raid Metadata LV%2d\t%s", s, seg_metalv(seg, s)->name);
log_print(" ");
@ -43,8 +43,9 @@ static void _raid_display(const struct lv_segment *seg)
static int _raid_text_import_area_count(const struct dm_config_node *sn,
uint32_t *area_count)
{
if (!dm_config_get_uint32(sn, "device_count", area_count)) {
log_error("Couldn't read 'device_count' for "
if (!dm_config_get_uint32(sn, "device_count", area_count) &&
!dm_config_get_uint32(sn, "stripe_count", area_count)) {
log_error("Couldn't read '{device|stripe}_count' for "
"segment '%s'.", dm_config_parent_name(sn));
return 0;
}
@ -56,7 +57,7 @@ static int _raid_text_import_areas(struct lv_segment *seg,
const struct dm_config_value *cv)
{
unsigned int s;
struct logical_volume *lv1;
struct logical_volume *lv;
const char *seg_name = dm_config_parent_name(sn);
if (!seg->area_count) {
@ -70,29 +71,34 @@ static int _raid_text_import_areas(struct lv_segment *seg,
return 0;
}
if (!cv->next) {
log_error("Missing data device in areas array for segment %s.", seg_name);
return 0;
}
/* Metadata device comes first unless RAID0 optionally w/o metadata dev */
if (strcmp(cv->v.str, "-")) {
if (!cv->next) {
log_error("Missing data device in areas array for segment %s.", seg_name);
return 0;
}
/* Metadata device comes first */
if (!(lv1 = find_lv(seg->lv->vg, cv->v.str))) {
log_error("Couldn't find volume '%s' for segment '%s'.",
cv->v.str ? : "NULL", seg_name);
return 0;
}
if (!set_lv_segment_area_lv(seg, s, lv1, 0, RAID_META))
if (!(lv = find_lv(seg->lv->vg, cv->v.str))) {
log_error("Couldn't find volume '%s' for segment '%s'.",
cv->v.str ? : "NULL", seg_name);
return 0;
}
if (!set_lv_segment_area_lv(seg, s, lv, 0, RAID_META))
return_0;
}
/* Data device comes second */
cv = cv->next;
if (!(lv1 = find_lv(seg->lv->vg, cv->v.str))) {
/* Data device comes second unless RAID0 */
if (!(lv = find_lv(seg->lv->vg, cv->v.str))) {
log_error("Couldn't find volume '%s' for segment '%s'.",
cv->v.str ? : "NULL", seg_name);
return 0;
}
if (!set_lv_segment_area_lv(seg, s, lv1, 0, RAID_IMAGE))
return_0;
if (!set_lv_segment_area_lv(seg, s, lv, 0, RAID_IMAGE))
return_0;
}
/*
@ -111,50 +117,29 @@ static int _raid_text_import(struct lv_segment *seg,
const struct dm_config_node *sn,
struct dm_hash_table *pv_hash)
{
int i;
const struct dm_config_value *cv;
const struct {
const char *attr_name;
void *var;
} node_import[] = {
{ "region_size", &seg->region_size },
{ "stripe_size", &seg->stripe_size },
{ "writebehind", &seg->writebehind },
{ "min_recovery_rate", &seg->min_recovery_rate },
{ "max_recovery_rate", &seg->max_recovery_rate },
}, *nip = node_import;
if (dm_config_has_node(sn, "region_size")) {
if (!dm_config_get_uint32(sn, "region_size", &seg->region_size)) {
log_error("Couldn't read 'region_size' for "
"segment %s of logical volume %s.",
dm_config_parent_name(sn), seg->lv->name);
return 0;
}
}
if (dm_config_has_node(sn, "stripe_size")) {
if (!dm_config_get_uint32(sn, "stripe_size", &seg->stripe_size)) {
log_error("Couldn't read 'stripe_size' for "
"segment %s of logical volume %s.",
dm_config_parent_name(sn), seg->lv->name);
return 0;
}
}
if (dm_config_has_node(sn, "writebehind")) {
if (!dm_config_get_uint32(sn, "writebehind", &seg->writebehind)) {
log_error("Couldn't read 'writebehind' for "
"segment %s of logical volume %s.",
dm_config_parent_name(sn), seg->lv->name);
return 0;
}
}
if (dm_config_has_node(sn, "min_recovery_rate")) {
if (!dm_config_get_uint32(sn, "min_recovery_rate",
&seg->min_recovery_rate)) {
log_error("Couldn't read 'min_recovery_rate' for "
"segment %s of logical volume %s.",
dm_config_parent_name(sn), seg->lv->name);
return 0;
}
}
if (dm_config_has_node(sn, "max_recovery_rate")) {
if (!dm_config_get_uint32(sn, "max_recovery_rate",
&seg->max_recovery_rate)) {
log_error("Couldn't read 'max_recovery_rate' for "
"segment %s of logical volume %s.",
dm_config_parent_name(sn), seg->lv->name);
return 0;
for (i = 0; i < DM_ARRAY_SIZE(node_import); i++, nip++) {
if (dm_config_has_node(sn, nip->attr_name)) {
if (!dm_config_get_uint32(sn, nip->attr_name, nip->var)) {
log_error("Couldn't read '%s' for segment %s of logical volume %s.",
nip->attr_name, dm_config_parent_name(sn), seg->lv->name);
return 0;
}
}
}
if (!dm_config_get_list(sn, "raids", &cv)) {
log_error("Couldn't find RAID array for "
"segment %s of logical volume %s.",
@ -174,17 +159,29 @@ static int _raid_text_import(struct lv_segment *seg,
static int _raid_text_export(const struct lv_segment *seg, struct formatter *f)
{
outf(f, "device_count = %u", seg->area_count);
if (seg->region_size)
outf(f, "region_size = %" PRIu32, seg->region_size);
int raid0 = seg_is_raid0(seg);
if (raid0)
outfc(f, (seg->area_count == 1) ? "# linear" : NULL,
"stripe_count = %u", seg->area_count);
else {
outf(f, "device_count = %u", seg->area_count);
if (seg->region_size)
outf(f, "region_size = %" PRIu32, seg->region_size);
}
if (seg->stripe_size)
outf(f, "stripe_size = %" PRIu32, seg->stripe_size);
if (seg->writebehind)
outf(f, "writebehind = %" PRIu32, seg->writebehind);
if (seg->min_recovery_rate)
outf(f, "min_recovery_rate = %" PRIu32, seg->min_recovery_rate);
if (seg->max_recovery_rate)
outf(f, "max_recovery_rate = %" PRIu32, seg->max_recovery_rate);
if (!raid0) {
if (seg->writebehind)
outf(f, "writebehind = %" PRIu32, seg->writebehind);
if (seg->min_recovery_rate)
outf(f, "min_recovery_rate = %" PRIu32, seg->min_recovery_rate);
if (seg->max_recovery_rate)
outf(f, "max_recovery_rate = %" PRIu32, seg->max_recovery_rate);
}
return out_areas(f, seg, "raid");
}
@ -216,35 +213,43 @@ static int _raid_add_target_line(struct dev_manager *dm __attribute__((unused)),
* 64 device restriction imposed by kernel as well. It is
* not strictly a userspace limitation.
*/
/* FIXME: HM: is this actually a constraint still (253 seems to be the limit)? */
if (seg->area_count > 64) {
log_error("Unable to handle more than 64 devices in a "
"single RAID array");
return 0;
}
if (!seg->region_size) {
log_error("Missing region size for mirror segment.");
return 0;
if (!seg_is_raid0(seg)) {
if (!seg->region_size) {
log_error("Missing region size for raid segment in %s.",
seg_lv(seg, 0)->name);
return 0;
}
for (s = 0; s < seg->area_count; s++)
if (seg_lv(seg, s)->status & LV_REBUILD)
rebuilds |= 1ULL << s;
for (s = 0; s < seg->area_count; s++)
if (seg_lv(seg, s)->status & LV_WRITEMOSTLY)
writemostly |= 1ULL << s;
if (mirror_in_sync())
flags = DM_NOSYNC;
}
for (s = 0; s < seg->area_count; s++)
if (seg_lv(seg, s)->status & LV_REBUILD)
rebuilds |= 1ULL << s;
for (s = 0; s < seg->area_count; s++)
if (seg_lv(seg, s)->status & LV_WRITEMOSTLY)
writemostly |= 1ULL << s;
if (mirror_in_sync())
flags = DM_NOSYNC;
params.raid_type = lvseg_name(seg);
if (seg->segtype->parity_devs) {
/* RAID 4/5/6 */
params.mirrors = 1;
params.stripes = seg->area_count - seg->segtype->parity_devs;
} else if (strcmp(seg->segtype->name, SEG_TYPE_NAME_RAID10)) {
} else if (seg_is_raid0(seg)) {
params.mirrors = 1;
params.stripes = seg->area_count;
} else if (seg_is_raid10(seg)) {
/* RAID 10 only supports 2 mirrors now */
/* FIXME: HM: is this actually a constraint still? */
params.mirrors = 2;
params.stripes = seg->area_count / 2;
} else {
@ -253,12 +258,17 @@ static int _raid_add_target_line(struct dev_manager *dm __attribute__((unused)),
params.stripes = 1;
params.writebehind = seg->writebehind;
}
params.region_size = seg->region_size;
/* RAID 0 doesn't have a bitmap, thus no region_size etc. */
if (!seg_is_raid0(seg)) {
params.region_size = seg->region_size;
params.rebuilds = rebuilds;
params.writemostly = writemostly;
params.min_recovery_rate = seg->min_recovery_rate;
params.max_recovery_rate = seg->max_recovery_rate;
}
params.stripe_size = seg->stripe_size;
params.rebuilds = rebuilds;
params.writemostly = writemostly;
params.min_recovery_rate = seg->min_recovery_rate;
params.max_recovery_rate = seg->max_recovery_rate;
params.flags = flags;
if (!dm_tree_node_add_raid_target_with_params(node, len, &params))
@ -332,6 +342,7 @@ static int _raid_target_present(struct cmd_context *cmd,
const char *feature;
} _features[] = {
{ 1, 3, RAID_FEATURE_RAID10, SEG_TYPE_NAME_RAID10 },
{ 1, 7, RAID_FEATURE_RAID0, SEG_TYPE_NAME_RAID0 },
};
static int _raid_checked = 0;
@ -437,18 +448,28 @@ static const struct raid_type {
unsigned parity;
int extra_flags;
} _raid_types[] = {
{ SEG_TYPE_NAME_RAID1, 0, SEG_AREAS_MIRRORED },
{ SEG_TYPE_NAME_RAID10, 0, SEG_AREAS_MIRRORED },
{ SEG_TYPE_NAME_RAID4, 1 },
{ SEG_TYPE_NAME_RAID5, 1 },
{ SEG_TYPE_NAME_RAID5_LA, 1 },
{ SEG_TYPE_NAME_RAID5_LS, 1 },
{ SEG_TYPE_NAME_RAID5_RA, 1 },
{ SEG_TYPE_NAME_RAID5_RS, 1 },
{ SEG_TYPE_NAME_RAID6, 2 },
{ SEG_TYPE_NAME_RAID6_NC, 2 },
{ SEG_TYPE_NAME_RAID6_NR, 2 },
{ SEG_TYPE_NAME_RAID6_ZR, 2 }
{ SEG_TYPE_NAME_RAID0, 0 },
{ SEG_TYPE_NAME_RAID1, 0, SEG_AREAS_MIRRORED },
{ SEG_TYPE_NAME_RAID10, 0, SEG_AREAS_MIRRORED },
{ SEG_TYPE_NAME_RAID4, 1 },
{ SEG_TYPE_NAME_RAID4_N, 1 },
{ SEG_TYPE_NAME_RAID5, 1 },
{ SEG_TYPE_NAME_RAID5_0, 1 },
{ SEG_TYPE_NAME_RAID5_N, 1 },
{ SEG_TYPE_NAME_RAID5_LA, 1 },
{ SEG_TYPE_NAME_RAID5_LS, 1 },
{ SEG_TYPE_NAME_RAID5_RA, 1 },
{ SEG_TYPE_NAME_RAID5_RS, 1 },
{ SEG_TYPE_NAME_RAID6, 2 },
{ SEG_TYPE_NAME_RAID6_NC, 2 },
{ SEG_TYPE_NAME_RAID6_NR, 2 },
{ SEG_TYPE_NAME_RAID6_ZR, 2 },
{ SEG_TYPE_NAME_RAID6_LA_6, 2 },
{ SEG_TYPE_NAME_RAID6_LS_6, 2 },
{ SEG_TYPE_NAME_RAID6_RA_6, 2 },
{ SEG_TYPE_NAME_RAID6_RS_6, 2 },
{ SEG_TYPE_NAME_RAID6_0_6, 2 },
{ SEG_TYPE_NAME_RAID6_N_6, 2 },
};
static struct segment_type *_init_raid_segtype(struct cmd_context *cmd,

View File

@ -42,9 +42,13 @@ enum {
SEG_ZERO,
SEG_THIN_POOL,
SEG_THIN,
SEG_RAID0,
SEG_RAID1,
SEG_RAID10,
SEG_RAID4,
SEG_RAID4_N,
SEG_RAID5_0,
SEG_RAID5_N,
SEG_RAID5_LA,
SEG_RAID5_RA,
SEG_RAID5_LS,
@ -52,6 +56,12 @@ enum {
SEG_RAID6_ZR,
SEG_RAID6_NR,
SEG_RAID6_NC,
SEG_RAID6_LA_6,
SEG_RAID6_RA_6,
SEG_RAID6_LS_6,
SEG_RAID6_RS_6,
SEG_RAID6_0_6,
SEG_RAID6_N_6,
};
/* FIXME Add crypt and multipath support */
@ -74,9 +84,13 @@ static const struct {
{ SEG_ZERO, "zero"},
{ SEG_THIN_POOL, "thin-pool"},
{ SEG_THIN, "thin"},
{ SEG_RAID0, "raid0"},
{ SEG_RAID1, "raid1"},
{ SEG_RAID10, "raid10"},
{ SEG_RAID4, "raid4"},
{ SEG_RAID4_N, "raid4_n"},
{ SEG_RAID5_0, "raid5_0"},
{ SEG_RAID5_N, "raid5_n"},
{ SEG_RAID5_LA, "raid5_la"},
{ SEG_RAID5_RA, "raid5_ra"},
{ SEG_RAID5_LS, "raid5_ls"},
@ -84,9 +98,15 @@ static const struct {
{ SEG_RAID6_ZR, "raid6_zr"},
{ SEG_RAID6_NR, "raid6_nr"},
{ SEG_RAID6_NC, "raid6_nc"},
{ SEG_RAID6_LA_6, "raid6_la_6"},
{ SEG_RAID6_RA_6, "raid6_ra_6"},
{ SEG_RAID6_LS_6, "raid6_ls_6"},
{ SEG_RAID6_RS_6, "raid6_rs_6"},
{ SEG_RAID6_0_6, "raid6_0_6"},
{ SEG_RAID6_N_6, "raid6_n_6"},
/*
*WARNING: Since 'raid' target overloads this 1:1 mapping table
* WARNING: Since 'raid' target overloads this 1:1 mapping table
* for search do not add new enum elements past them!
*/
{ SEG_RAID5_LS, "raid5"}, /* same as "raid5_ls" (default for MD also) */
@ -2088,9 +2108,12 @@ static int _emit_areas_line(struct dm_task *dmt __attribute__((unused)),
EMIT_PARAMS(*pos, "%s", synctype);
}
break;
case SEG_RAID0:
case SEG_RAID1:
case SEG_RAID10:
case SEG_RAID4:
case SEG_RAID4_N:
case SEG_RAID5_0:
case SEG_RAID5_N:
case SEG_RAID5_LA:
case SEG_RAID5_RA:
case SEG_RAID5_LS:
@ -2098,6 +2121,12 @@ static int _emit_areas_line(struct dm_task *dmt __attribute__((unused)),
case SEG_RAID6_ZR:
case SEG_RAID6_NR:
case SEG_RAID6_NC:
case SEG_RAID6_LA_6:
case SEG_RAID6_RA_6:
case SEG_RAID6_LS_6:
case SEG_RAID6_RS_6:
case SEG_RAID6_0_6:
case SEG_RAID6_N_6:
if (!area->dev_node) {
EMIT_PARAMS(*pos, " -");
break;
@ -2285,6 +2314,12 @@ static int _mirror_emit_segment_line(struct dm_task *dmt, struct load_segment *s
return 1;
}
/* Return 2 if @p != 0 */
static int _add_2_if_value(unsigned p)
{
return p ? 2 : 0;
}
static int _raid_emit_segment_line(struct dm_task *dmt, uint32_t major,
uint32_t minor, struct load_segment *seg,
uint64_t *seg_start, char *params,
@ -2297,23 +2332,16 @@ static int _raid_emit_segment_line(struct dm_task *dmt, uint32_t major,
if ((seg->flags & DM_NOSYNC) || (seg->flags & DM_FORCESYNC))
param_count++;
if (seg->region_size)
param_count += 2;
if (seg->writebehind)
param_count += 2;
if (seg->min_recovery_rate)
param_count += 2;
if (seg->max_recovery_rate)
param_count += 2;
param_count += _add_2_if_value(seg->region_size) +
_add_2_if_value(seg->writebehind) +
_add_2_if_value(seg->min_recovery_rate) +
_add_2_if_value(seg->max_recovery_rate);
/* rebuilds is 64-bit */
param_count += 2 * hweight32(seg->rebuilds & 0xFFFFFFFF);
param_count += 2 * hweight32(seg->rebuilds >> 32);
/* rebuilds is 64-bit */
/* writemostly is 64-bit */
param_count += 2 * hweight32(seg->writemostly & 0xFFFFFFFF);
param_count += 2 * hweight32(seg->writemostly >> 32);
@ -2351,7 +2379,7 @@ static int _raid_emit_segment_line(struct dm_task *dmt, uint32_t major,
seg->max_recovery_rate);
/* Print number of metadata/data device pairs */
EMIT_PARAMS(pos, " %u", seg->area_count/2);
EMIT_PARAMS(pos, " %u", seg->area_count / 2);
if (_emit_areas_line(dmt, seg, params, paramsize, &pos) <= 0)
return_0;
@ -2522,9 +2550,13 @@ static int _emit_segment_line(struct dm_task *dmt, uint32_t major,
seg->iv_offset != DM_CRYPT_IV_DEFAULT ?
seg->iv_offset : *seg_start);
break;
case SEG_RAID0:
case SEG_RAID1:
case SEG_RAID10:
case SEG_RAID4:
case SEG_RAID4_N:
case SEG_RAID5_0:
case SEG_RAID5_N:
case SEG_RAID5_LA:
case SEG_RAID5_RA:
case SEG_RAID5_LS:
@ -2532,6 +2564,12 @@ static int _emit_segment_line(struct dm_task *dmt, uint32_t major,
case SEG_RAID6_ZR:
case SEG_RAID6_NR:
case SEG_RAID6_NC:
case SEG_RAID6_LA_6:
case SEG_RAID6_RA_6:
case SEG_RAID6_LS_6:
case SEG_RAID6_RS_6:
case SEG_RAID6_0_6:
case SEG_RAID6_N_6:
target_type_is_raid = 1;
r = _raid_emit_segment_line(dmt, major, minor, seg, seg_start,
params, paramsize);
@ -4050,8 +4088,12 @@ int dm_tree_node_add_null_area(struct dm_tree_node *node, uint64_t offset)
seg = dm_list_item(dm_list_last(&node->props.segs), struct load_segment);
switch (seg->type) {
case SEG_RAID0:
case SEG_RAID1:
case SEG_RAID4:
case SEG_RAID4_N:
case SEG_RAID5_0:
case SEG_RAID5_N:
case SEG_RAID5_LA:
case SEG_RAID5_RA:
case SEG_RAID5_LS:
@ -4059,6 +4101,12 @@ int dm_tree_node_add_null_area(struct dm_tree_node *node, uint64_t offset)
case SEG_RAID6_ZR:
case SEG_RAID6_NR:
case SEG_RAID6_NC:
case SEG_RAID6_LA_6:
case SEG_RAID6_RA_6:
case SEG_RAID6_LS_6:
case SEG_RAID6_RS_6:
case SEG_RAID6_0_6:
case SEG_RAID6_N_6:
break;
default:
log_error("dm_tree_node_add_null_area() called on an unsupported segment type");

View File

@ -238,6 +238,7 @@ static int _check_conversion_type(struct cmd_context *cmd, const char *type_str)
/* FIXME: Check thin-pool and thin more thoroughly! */
if (!strcmp(type_str, "snapshot") ||
!strcmp(type_str, "striped") ||
!strncmp(type_str, "raid", 4) ||
!strcmp(type_str, "cache-pool") || !strcmp(type_str, "cache") ||
!strcmp(type_str, "thin-pool") || !strcmp(type_str, "thin"))
@ -253,7 +254,9 @@ static int _snapshot_type_requested(struct cmd_context *cmd, const char *type_st
}
/* mirror/raid* (1,10,4,5,6 and their variants) reshape */
static int _mirror_or_raid_type_requested(struct cmd_context *cmd, const char *type_str) {
return (arg_count(cmd, mirrors_ARG) || !strncmp(type_str, "raid", 4) || !strcmp(type_str, "mirror"));
return (arg_count(cmd, mirrors_ARG) ||
!strncmp(type_str, "raid", 4) ||
!strcmp(type_str, "mirror"));
}
static int _read_pool_params(struct cmd_context *cmd, int *pargc, char ***pargv,
@ -377,6 +380,11 @@ static int _read_params(struct cmd_context *cmd, int argc, char **argv,
if (!_check_conversion_type(cmd, type_str))
return_0;
/* FIXME: TESTME */
if (!arg_count(cmd, type_ARG) &&
!get_stripe_params(cmd, &lp->stripes, &lp->stripe_size))
return_0;
if (arg_count(cmd, repair_ARG) &&
arg_outside_list_is_set(cmd, "cannot be used with --repair",
repair_ARG,
@ -438,9 +446,11 @@ static int _read_params(struct cmd_context *cmd, int argc, char **argv,
}
if ((arg_count(cmd, stripes_long_ARG) || arg_count(cmd, stripesize_ARG)) &&
!(_mirror_or_raid_type_requested(cmd, type_str) ||
arg_count(cmd, repair_ARG) ||
arg_count(cmd, thinpool_ARG))) {
#if 1
(!_mirror_or_raid_type_requested(cmd, type_str) ||
#endif
arg_count(cmd, repair_ARG) ||
arg_count(cmd, thinpool_ARG))) {
log_error("--stripes or --stripesize argument is only valid "
"with --mirrors/--type mirror/--type raid*, --repair and --thinpool");
return 0;
@ -1326,7 +1336,8 @@ static int _lvconvert_mirrors_parse_params(struct cmd_context *cmd,
*new_mimage_count = lp->mirrors;
/* Too many mimages? */
if (lp->mirrors > DEFAULT_MIRROR_MAX_IMAGES) {
if ((!arg_count(cmd, type_ARG) || strcmp(arg_str_value(cmd, type_ARG, NULL), SEG_TYPE_NAME_RAID1)) &&
lp->mirrors > DEFAULT_MIRROR_MAX_IMAGES) {
log_error("Only up to %d images in mirror supported currently.",
DEFAULT_MIRROR_MAX_IMAGES);
return 0;
@ -1756,7 +1767,6 @@ static int _lvconvert_mirrors(struct cmd_context *cmd,
if (!_lvconvert_validate_thin(lv, lp))
return_0;
if (lv_is_thin_type(lv)) {
log_error("Mirror segment type cannot be used for thinpool%s.\n"
"Try \"%s\" segment type instead.",
@ -1811,10 +1821,39 @@ static int _is_valid_raid_conversion(const struct segment_type *from_segtype,
if (from_segtype == to_segtype)
return 1;
if (!segtype_is_raid(from_segtype) && !segtype_is_raid(to_segtype))
return_0; /* Not converting to or from RAID? */
/* From striped to mirror or vice-versa */
if (segtype_is_striped(from_segtype) &&
segtype_is_mirror(to_segtype))
return 1;
return 1;
if (segtype_is_mirror(from_segtype) &&
segtype_is_striped(to_segtype))
return 1;
/* From striped to raid0 or vice-versa */
if (segtype_is_striped(from_segtype) &&
segtype_is_raid0(to_segtype))
return 1;
if (segtype_is_raid0(from_segtype) &&
segtype_is_striped(to_segtype))
return 1;
/* From striped to raid1 or vice-versa */
if (segtype_is_striped(from_segtype) &&
segtype_is_raid1(to_segtype))
return 1;
if (segtype_is_raid1(from_segtype) &&
segtype_is_striped(to_segtype))
return 1;
/* From raid to raid */
if (segtype_is_raid(from_segtype) &&
segtype_is_raid(to_segtype))
return 1;
return 0;
}
static void _lvconvert_raid_repair_ask(struct cmd_context *cmd,
@ -1846,7 +1885,7 @@ static void _lvconvert_raid_repair_ask(struct cmd_context *cmd,
static int _lvconvert_raid(struct logical_volume *lv, struct lvconvert_params *lp)
{
int replace = 0, image_count = 0;
int replace = 0, image_count = 0, reshape_args = 0;
struct dm_list *failed_pvs;
struct cmd_context *cmd = lv->vg->cmd;
struct lv_segment *seg = first_seg(lv);
@ -1855,6 +1894,7 @@ static int _lvconvert_raid(struct logical_volume *lv, struct lvconvert_params *l
if (!arg_count(cmd, type_ARG))
lp->segtype = seg->segtype;
/* FIXME: remove constraint on mirror/raid1 */
/* Can only change image count for raid1 and linear */
if (arg_count(cmd, mirrors_ARG) &&
!seg_is_mirrored(seg) && !seg_is_linear(seg)) {
@ -1875,6 +1915,8 @@ static int _lvconvert_raid(struct logical_volume *lv, struct lvconvert_params *l
/* Change number of RAID1 images */
if (arg_count(cmd, mirrors_ARG) || arg_count(cmd, splitmirrors_ARG)) {
int track;
image_count = lv_raid_image_count(lv);
if (lp->mirrors_sign == SIGN_PLUS)
image_count += lp->mirrors;
@ -1883,10 +1925,11 @@ static int _lvconvert_raid(struct logical_volume *lv, struct lvconvert_params *l
else
image_count = lp->mirrors + 1;
if (image_count < 1) {
log_error("Unable to %s images by specified amount",
arg_count(cmd, splitmirrors_ARG) ?
"split" : "reduce");
track = arg_count(cmd, trackchanges_ARG);
if (image_count < 1 || (track && lp->mirrors != 1)) {
log_error("Unable to %s images by specified amount%s",
arg_count(cmd, splitmirrors_ARG) ? "split" : "reduce",
track ? "; only one with tracking" : "");
return 0;
}
}
@ -1904,8 +1947,19 @@ static int _lvconvert_raid(struct logical_volume *lv, struct lvconvert_params *l
if (arg_count(cmd, mirrors_ARG))
return lv_raid_change_image_count(lv, image_count, lp->pvh);
if (arg_count(cmd, type_ARG))
return lv_raid_reshape(lv, lp->segtype);
if (!arg_count(cmd, stripes_long_ARG))
lp->stripes = seg->area_count - seg->segtype->parity_devs;
else
reshape_args++;
/* FIXME: stripesize arg alone does not work */
if (!arg_count(cmd, stripesize_ARG))
lp->stripe_size = seg->stripe_size;
else
reshape_args++;
if (arg_count(cmd, type_ARG) || reshape_args)
return lv_raid_reshape(lv, lp->segtype, lp->stripes, lp->stripe_size, lp->pvh);
if (arg_count(cmd, replace_ARG))
return lv_raid_replace(lv, lp->replace_pvh, lp->pvh);
@ -1919,7 +1973,9 @@ static int _lvconvert_raid(struct logical_volume *lv, struct lvconvert_params *l
return 0;
}
if (!lv_raid_percent(lv, &sync_percent)) {
if (!seg_is_striped(seg) &&
!seg_is_raid0(seg) &&
!lv_raid_percent(lv, &sync_percent)) {
log_error("Unable to determine sync status of %s/%s.",
lv->vg->name, lv->name);
return 0;
@ -3362,7 +3418,7 @@ static int _lvconvert_single(struct cmd_context *cmd, struct logical_volume *lv,
(lp->cache && !_lvconvert_cache(cmd, lv, lp)))
return_ECMD_FAILED;
} else if (segtype_is_raid(lp->segtype) ||
(lv->status & RAID) || lp->merge_mirror) {
lv_is_raid(lv) || lp->merge_mirror) {
if (!archive(lv->vg))
return_ECMD_FAILED;
@ -3390,7 +3446,8 @@ static int _lvconvert_single(struct cmd_context *cmd, struct logical_volume *lv,
/* If repairing and using policies, remove missing PVs from VG */
if (arg_count(cmd, repair_ARG) && arg_count(cmd, use_policies_ARG))
_remove_missing_empty_pv(lv->vg, failed_pvs);
}
} else
log_error("Nothing to do");
return ECMD_PROCESSED;
}

View File

@ -453,7 +453,7 @@ static int _read_mirror_params(struct cmd_context *cmd,
static int _read_raid_params(struct cmd_context *cmd,
struct lvcreate_params *lp)
{
if ((lp->stripes < 2) && !strcmp(lp->segtype->name, SEG_TYPE_NAME_RAID10)) {
if ((lp->stripes < 2) && segtype_is_raid10(lp->segtype)) {
if (arg_count(cmd, stripes_ARG)) {
/* User supplied the bad argument */
log_error("Segment type 'raid10' requires 2 or more stripes.");
@ -467,8 +467,9 @@ static int _read_raid_params(struct cmd_context *cmd,
/*
* RAID1 does not take a stripe arg
*/
if ((lp->stripes > 1) && seg_is_mirrored(lp) &&
strcmp(lp->segtype->name, SEG_TYPE_NAME_RAID10)) {
if ((lp->stripes > 1) &&
(seg_is_mirrored(lp) || segtype_is_raid1(lp->segtype)) &&
!segtype_is_raid10(lp->segtype)) {
log_error("Stripe argument cannot be used with segment type, %s",
lp->segtype->name);
return 0;
@ -496,15 +497,26 @@ static int _read_mirror_and_raid_params(struct cmd_context *cmd,
/* Common mirror and raid params */
if (arg_count(cmd, mirrors_ARG)) {
lp->mirrors = arg_uint_value(cmd, mirrors_ARG, 0) + 1;
unsigned max_images;
const char *type;
if (lp->mirrors > DEFAULT_MIRROR_MAX_IMAGES) {
log_error("Only up to " DM_TO_STRING(DEFAULT_MIRROR_MAX_IMAGES)
" images in mirror supported currently.");
lp->mirrors = arg_uint_value(cmd, mirrors_ARG, 0) + 1;
if (segtype_is_raid1(lp->segtype)) {
type = SEG_TYPE_NAME_RAID1;
max_images = DEFAULT_RAID_MAX_IMAGES;
} else {
type = "mirror";
max_images = DEFAULT_MIRROR_MAX_IMAGES;
}
if (lp->mirrors > max_images) {
log_error("Only up to %u images in %s supported currently.",
max_images, type);
return 0;
}
if ((lp->mirrors > 2) && !strcmp(lp->segtype->name, SEG_TYPE_NAME_RAID10)) {
if (lp->mirrors > 2 &&
segtype_is_raid10(lp->segtype)) {
/*
* FIXME: When RAID10 is no longer limited to
* 2-way mirror, 'lv_mirror_count()'
@ -526,6 +538,14 @@ static int _read_mirror_and_raid_params(struct cmd_context *cmd,
/* Default to 2 mirrored areas if '--type mirror|raid1|raid10' */
lp->mirrors = seg_is_mirrored(lp) ? 2 : 1;
if (lp->stripes < 2 &&
(segtype_is_raid0(lp->segtype) || segtype_is_raid10(lp->segtype)))
if (arg_count(cmd, stripes_ARG)) {
/* User supplied the bad argument */
log_error("Segment type 'raid(1)0' requires 2 or more stripes.");
return 0;
}
lp->nosync = arg_is_set(cmd, nosync_ARG);
if (!(lp->region_size = arg_uint_value(cmd, regionsize_ARG, 0)) &&
@ -540,6 +560,18 @@ static int _read_mirror_and_raid_params(struct cmd_context *cmd,
return 0;
}
/*
* RAID1 does not take a stripe arg
*/
if ((lp->stripes > 1) &&
(seg_is_mirrored(lp) || segtype_is_raid1(lp->segtype)) &&
!segtype_is_raid0(lp->segtype) &&
!segtype_is_raid10(lp->segtype)) {
log_error("Stripe argument cannot be used with segment type, %s",
lp->segtype->name);
return 0;
}
if (lp->region_size % (pagesize >> SECTOR_SHIFT)) {
log_error("Region size (%" PRIu32 ") must be a multiple of "
"machine memory page size (%d)",
@ -950,7 +982,13 @@ static int _lvcreate_params(struct cmd_context *cmd,
return 0;
}
if (!strcmp(lp->segtype->name, SEG_TYPE_NAME_RAID10) &&
if (segtype_is_raid0(lp->segtype) &&
!(lp->target_attr & RAID_FEATURE_RAID0)) {
log_error("RAID module does not support RAID0.");
return 0;
}
if (segtype_is_raid10(lp->segtype) &&
!(lp->target_attr & RAID_FEATURE_RAID10)) {
log_error("RAID module does not support RAID10.");
return 0;
@ -1192,17 +1230,18 @@ static int _check_raid_parameters(struct volume_group *vg,
if (!lp->stripe_size)
lp->stripe_size = find_config_tree_int(cmd, metadata_stripesize_CFG, NULL) * 2;
if (lp->stripes <= lp->segtype->parity_devs) {
if (lp->stripes < 2) { // <= lp->segtype->parity_devs) {
log_error("Number of stripes must be at least %d for %s",
lp->segtype->parity_devs + 1,
lp->segtype->name);
return 0;
}
} else if (!strcmp(lp->segtype->name, SEG_TYPE_NAME_RAID10)) {
} else if (segtype_is_raid0(lp->segtype) ||
segtype_is_raid10(lp->segtype)) {
if (!arg_count(cmd, stripes_ARG))
lp->stripes = devs / lp->mirrors;
if (lp->stripes < 2) {
log_error("Unable to create RAID10 LV,"
log_error("Unable to create RAID(1)0 LV,"
" insufficient number of devices.");
return 0;
}

View File

@ -1096,10 +1096,14 @@ static int _validate_stripe_params(struct cmd_context *cmd, uint32_t *stripes,
*/
int get_stripe_params(struct cmd_context *cmd, uint32_t *stripes, uint32_t *stripe_size)
{
/* stripes_long_ARG takes precedence (for lvconvert) */
*stripes = arg_uint_value(cmd, arg_count(cmd, stripes_long_ARG) ? stripes_long_ARG : stripes_ARG, 1);
int r;
uint32_t s;
*stripe_size = arg_uint_value(cmd, stripesize_ARG, 0);
/* stripes_long_ARG takes precedence (for lvconvert) */
s = arg_uint_value(cmd, arg_count(cmd, stripes_long_ARG) ? stripes_long_ARG : stripes_ARG, ~0);
*stripes = (s == ~0) ? 2 : s;
*stripe_size = arg_uint_value(cmd, stripesize_ARG, DEFAULT_STRIPESIZE);
if (*stripe_size) {
if (arg_sign_value(cmd, stripesize_ARG, SIGN_NONE) == SIGN_MINUS) {
log_error("Negative stripesize is invalid.");
@ -1113,7 +1117,11 @@ int get_stripe_params(struct cmd_context *cmd, uint32_t *stripes, uint32_t *stri
}
}
return _validate_stripe_params(cmd, stripes, stripe_size);
r = _validate_stripe_params(cmd, stripes, stripe_size);
if (s == ~0)
*stripes = 1;
return r;
}
static int _validate_cachepool_params(struct dm_config_tree *tree)